diff --git a/vendor/cloud.google.com/go/AUTHORS b/vendor/cloud.google.com/go/AUTHORS deleted file mode 100644 index c364af1da0953..0000000000000 --- a/vendor/cloud.google.com/go/AUTHORS +++ /dev/null @@ -1,15 +0,0 @@ -# This is the official list of cloud authors for copyright purposes. -# This file is distinct from the CONTRIBUTORS files. -# See the latter for an explanation. - -# Names should be added to this file as: -# Name or Organization -# The email address is not required for organizations. - -Filippo Valsorda -Google Inc. -Ingo Oeser -Palm Stone Games, Inc. -Paweł Knap -Péter Szilágyi -Tyler Treat diff --git a/vendor/cloud.google.com/go/CONTRIBUTORS b/vendor/cloud.google.com/go/CONTRIBUTORS deleted file mode 100644 index 3b3cbed98e9a9..0000000000000 --- a/vendor/cloud.google.com/go/CONTRIBUTORS +++ /dev/null @@ -1,40 +0,0 @@ -# People who have agreed to one of the CLAs and can contribute patches. -# The AUTHORS file lists the copyright holders; this file -# lists people. For example, Google employees are listed here -# but not in AUTHORS, because Google holds the copyright. -# -# https://developers.google.com/open-source/cla/individual -# https://developers.google.com/open-source/cla/corporate -# -# Names should be added to this file as: -# Name - -# Keep the list alphabetically sorted. - -Alexis Hunt -Andreas Litt -Andrew Gerrand -Brad Fitzpatrick -Burcu Dogan -Dave Day -David Sansome -David Symonds -Filippo Valsorda -Glenn Lewis -Ingo Oeser -James Hall -Johan Euphrosine -Jonathan Amsterdam -Kunpei Sakai -Luna Duclos -Magnus Hiie -Mario Castro -Michael McGreevy -Omar Jarjur -Paweł Knap -Péter Szilágyi -Sarah Adams -Thanatat Tamtan -Toby Burress -Tuo Shan -Tyler Treat diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go index 0d929a6192111..125b7033c96b9 100644 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -137,7 +137,7 @@ func testOnGCE() bool { resc := make(chan bool, 2) // Try two strategies in parallel. - // See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/194 + // See https://github.com/googleapis/google-cloud-go/issues/194 go func() { req, _ := http.NewRequest("GET", "http://"+metadataIP, nil) req.Header.Set("User-Agent", userAgent) @@ -300,8 +300,8 @@ func (c *Client) getETag(suffix string) (value, etag string, err error) { // being stable anyway. host = metadataIP } - url := "http://" + host + "/computeMetadata/v1/" + suffix - req, _ := http.NewRequest("GET", url, nil) + u := "http://" + host + "/computeMetadata/v1/" + suffix + req, _ := http.NewRequest("GET", u, nil) req.Header.Set("Metadata-Flavor", "Google") req.Header.Set("User-Agent", userAgent) res, err := c.hc.Do(req) @@ -312,13 +312,13 @@ func (c *Client) getETag(suffix string) (value, etag string, err error) { if res.StatusCode == http.StatusNotFound { return "", "", NotDefinedError(suffix) } - if res.StatusCode != 200 { - return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url) - } all, err := ioutil.ReadAll(res.Body) if err != nil { return "", "", err } + if res.StatusCode != 200 { + return "", "", &Error{Code: res.StatusCode, Message: string(all)} + } return string(all), res.Header.Get("Etag"), nil } @@ -499,3 +499,15 @@ func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) erro } } } + +// Error contains an error response from the server. +type Error struct { + // Code is the HTTP response status code. + Code int + // Message is the server response message. + Message string +} + +func (e *Error) Error() string { + return fmt.Sprintf("compute: Received %d `%s`", e.Code, e.Message) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/BUILD.bazel b/vendor/github.com/Azure/go-autorest/autorest/BUILD.bazel index 65f2706e01240..4c77182f9145c 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/BUILD.bazel +++ b/vendor/github.com/Azure/go-autorest/autorest/BUILD.bazel @@ -22,6 +22,6 @@ go_library( deps = [ "//vendor/github.com/Azure/go-autorest/autorest/adal:go_default_library", "//vendor/github.com/Azure/go-autorest/logger:go_default_library", - "//vendor/github.com/Azure/go-autorest/version:go_default_library", + "//vendor/github.com/Azure/go-autorest/tracing:go_default_library", ], ) diff --git a/vendor/github.com/Azure/go-autorest/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/LICENSE similarity index 100% rename from vendor/github.com/Azure/go-autorest/LICENSE rename to vendor/github.com/Azure/go-autorest/autorest/LICENSE diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/BUILD.bazel b/vendor/github.com/Azure/go-autorest/autorest/adal/BUILD.bazel index 6585c4b15e949..c3dad65a5e7ac 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/BUILD.bazel +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/BUILD.bazel @@ -8,13 +8,14 @@ go_library( "persist.go", "sender.go", "token.go", + "version.go", ], importmap = "k8s.io/kops/vendor/github.com/Azure/go-autorest/autorest/adal", importpath = "github.com/Azure/go-autorest/autorest/adal", visibility = ["//visibility:public"], deps = [ "//vendor/github.com/Azure/go-autorest/autorest/date:go_default_library", - "//vendor/github.com/Azure/go-autorest/version:go_default_library", + "//vendor/github.com/Azure/go-autorest/tracing:go_default_library", "//vendor/github.com/dgrijalva/jwt-go:go_default_library", ], ) diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE new file mode 100644 index 0000000000000..b9d6a27ea92ef --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/README.md b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md index 7b0c4bc4d21ac..fec416a9c415d 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/README.md +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md @@ -135,7 +135,7 @@ resource := "https://management.core.windows.net/" applicationSecret := "APPLICATION_SECRET" spt, err := adal.NewServicePrincipalToken( - oauthConfig, + *oauthConfig, appliationID, applicationSecret, resource, @@ -170,7 +170,7 @@ if err != nil { } spt, err := adal.NewServicePrincipalTokenFromCertificate( - oauthConfig, + *oauthConfig, applicationID, certificate, rsaPrivateKey, @@ -195,7 +195,7 @@ oauthClient := &http.Client{} // Acquire the device code deviceCode, err := adal.InitiateDeviceAuth( oauthClient, - oauthConfig, + *oauthConfig, applicationID, resource) if err != nil { @@ -212,7 +212,7 @@ if err != nil { } spt, err := adal.NewServicePrincipalTokenFromManualToken( - oauthConfig, + *oauthConfig, applicationID, resource, *token, @@ -227,7 +227,7 @@ if (err == nil) { ```Go spt, err := adal.NewServicePrincipalTokenFromUsernamePassword( - oauthConfig, + *oauthConfig, applicationID, username, password, @@ -243,11 +243,11 @@ if (err == nil) { ``` Go spt, err := adal.NewServicePrincipalTokenFromAuthorizationCode( - oauthConfig, + *oauthConfig, applicationID, clientSecret, - authorizationCode, - redirectURI, + authorizationCode, + redirectURI, resource, callbacks...) diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go index 8c83a917ff7e7..fa5964742fc9a 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go @@ -15,10 +15,15 @@ package adal // limitations under the License. import ( + "errors" "fmt" "net/url" ) +const ( + activeDirectoryEndpointTemplate = "%s/oauth2/%s%s" +) + // OAuthConfig represents the endpoints needed // in OAuth operations type OAuthConfig struct { @@ -60,7 +65,6 @@ func NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID string, apiV } api = fmt.Sprintf("?api-version=%s", *apiVersion) } - const activeDirectoryEndpointTemplate = "%s/oauth2/%s%s" u, err := url.Parse(activeDirectoryEndpoint) if err != nil { return nil, err @@ -89,3 +93,59 @@ func NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID string, apiV DeviceCodeEndpoint: *deviceCodeURL, }, nil } + +// MultiTenantOAuthConfig provides endpoints for primary and aulixiary tenant IDs. +type MultiTenantOAuthConfig interface { + PrimaryTenant() *OAuthConfig + AuxiliaryTenants() []*OAuthConfig +} + +// OAuthOptions contains optional OAuthConfig creation arguments. +type OAuthOptions struct { + APIVersion string +} + +func (c OAuthOptions) apiVersion() string { + if c.APIVersion != "" { + return fmt.Sprintf("?api-version=%s", c.APIVersion) + } + return "1.0" +} + +// NewMultiTenantOAuthConfig creates an object that support multitenant OAuth configuration. +// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/authenticate-multi-tenant for more information. +func NewMultiTenantOAuthConfig(activeDirectoryEndpoint, primaryTenantID string, auxiliaryTenantIDs []string, options OAuthOptions) (MultiTenantOAuthConfig, error) { + if len(auxiliaryTenantIDs) == 0 || len(auxiliaryTenantIDs) > 3 { + return nil, errors.New("must specify one to three auxiliary tenants") + } + mtCfg := multiTenantOAuthConfig{ + cfgs: make([]*OAuthConfig, len(auxiliaryTenantIDs)+1), + } + apiVer := options.apiVersion() + pri, err := NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, primaryTenantID, &apiVer) + if err != nil { + return nil, fmt.Errorf("failed to create OAuthConfig for primary tenant: %v", err) + } + mtCfg.cfgs[0] = pri + for i := range auxiliaryTenantIDs { + aux, err := NewOAuthConfig(activeDirectoryEndpoint, auxiliaryTenantIDs[i]) + if err != nil { + return nil, fmt.Errorf("failed to create OAuthConfig for tenant '%s': %v", auxiliaryTenantIDs[i], err) + } + mtCfg.cfgs[i+1] = aux + } + return mtCfg, nil +} + +type multiTenantOAuthConfig struct { + // first config in the slice is the primary tenant + cfgs []*OAuthConfig +} + +func (m multiTenantOAuthConfig) PrimaryTenant() *OAuthConfig { + return m.cfgs[0] +} + +func (m multiTenantOAuthConfig) AuxiliaryTenants() []*OAuthConfig { + return m.cfgs[1:] +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod b/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod new file mode 100644 index 0000000000000..66db8b9e2dc86 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod @@ -0,0 +1,11 @@ +module github.com/Azure/go-autorest/autorest/adal + +go 1.12 + +require ( + github.com/Azure/go-autorest/autorest/date v0.1.0 + github.com/Azure/go-autorest/autorest/mocks v0.1.0 + github.com/Azure/go-autorest/tracing v0.5.0 + github.com/dgrijalva/jwt-go v3.2.0+incompatible + golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 +) diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum b/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum new file mode 100644 index 0000000000000..9525ff736a587 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum @@ -0,0 +1,12 @@ +github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/mocks v0.1.0 h1:Kx+AUU2Te+A3JIyYn6Dfs+cFgx5XorQKuIXrZGoq/SI= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go index 834401e00dec4..d7e4372bbc5a1 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go @@ -15,7 +15,12 @@ package adal // limitations under the License. import ( + "crypto/tls" "net/http" + "net/http/cookiejar" + "sync" + + "github.com/Azure/go-autorest/tracing" ) const ( @@ -23,6 +28,9 @@ const ( mimeTypeFormPost = "application/x-www-form-urlencoded" ) +var defaultSender Sender +var defaultSenderInit = &sync.Once{} + // Sender is the interface that wraps the Do method to send HTTP requests. // // The standard http.Client conforms to this interface. @@ -45,7 +53,7 @@ type SendDecorator func(Sender) Sender // CreateSender creates, decorates, and returns, as a Sender, the default http.Client. func CreateSender(decorators ...SendDecorator) Sender { - return DecorateSender(&http.Client{}, decorators...) + return DecorateSender(sender(), decorators...) } // DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to @@ -58,3 +66,30 @@ func DecorateSender(s Sender, decorators ...SendDecorator) Sender { } return s } + +func sender() Sender { + // note that we can't init defaultSender in init() since it will + // execute before calling code has had a chance to enable tracing + defaultSenderInit.Do(func() { + // Use behaviour compatible with DefaultTransport, but require TLS minimum version. + defaultTransport := http.DefaultTransport.(*http.Transport) + transport := &http.Transport{ + Proxy: defaultTransport.Proxy, + DialContext: defaultTransport.DialContext, + MaxIdleConns: defaultTransport.MaxIdleConns, + IdleConnTimeout: defaultTransport.IdleConnTimeout, + TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout, + ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout, + TLSClientConfig: &tls.Config{ + MinVersion: tls.VersionTLS12, + }, + } + var roundTripper http.RoundTripper = transport + if tracing.IsEnabled() { + roundTripper = tracing.NewTransport(transport) + } + j, _ := cookiejar.New(nil) + defaultSender = &http.Client{Jar: j, Transport: roundTripper} + }) + return defaultSender +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go index 2fd340d6922f6..b72753498365d 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go @@ -34,7 +34,6 @@ import ( "time" "github.com/Azure/go-autorest/autorest/date" - "github.com/Azure/go-autorest/version" "github.com/dgrijalva/jwt-go" ) @@ -71,6 +70,12 @@ type OAuthTokenProvider interface { OAuthToken() string } +// MultitenantOAuthTokenProvider provides tokens used for multi-tenant authorization. +type MultitenantOAuthTokenProvider interface { + PrimaryOAuthToken() string + AuxiliaryOAuthTokens() []string +} + // TokenRefreshError is an interface used by errors returned during token refresh. type TokenRefreshError interface { error @@ -385,8 +390,13 @@ func (spt *ServicePrincipalToken) UnmarshalJSON(data []byte) error { if err != nil { return err } - spt.refreshLock = &sync.RWMutex{} - spt.sender = &http.Client{} + // Don't override the refreshLock or the sender if those have been already set. + if spt.refreshLock == nil { + spt.refreshLock = &sync.RWMutex{} + } + if spt.sender == nil { + spt.sender = sender() + } return nil } @@ -433,7 +443,7 @@ func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, reso RefreshWithin: defaultRefresh, }, refreshLock: &sync.RWMutex{}, - sender: &http.Client{}, + sender: sender(), refreshCallbacks: callbacks, } return spt, nil @@ -674,7 +684,7 @@ func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedI RefreshWithin: defaultRefresh, }, refreshLock: &sync.RWMutex{}, - sender: &http.Client{}, + sender: sender(), refreshCallbacks: callbacks, MaxMSIRefreshAttempts: defaultMaxMSIRefreshAttempts, } @@ -791,7 +801,7 @@ func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource if err != nil { return fmt.Errorf("adal: Failed to build the refresh request. Error = '%v'", err) } - req.Header.Add("User-Agent", version.UserAgent()) + req.Header.Add("User-Agent", UserAgent()) req = req.WithContext(ctx) if !isIMDS(spt.inner.OauthConfig.TokenEndpoint) { v := url.Values{} @@ -978,3 +988,93 @@ func (spt *ServicePrincipalToken) Token() Token { defer spt.refreshLock.RUnlock() return spt.inner.Token } + +// MultiTenantServicePrincipalToken contains tokens for multi-tenant authorization. +type MultiTenantServicePrincipalToken struct { + PrimaryToken *ServicePrincipalToken + AuxiliaryTokens []*ServicePrincipalToken +} + +// PrimaryOAuthToken returns the primary authorization token. +func (mt *MultiTenantServicePrincipalToken) PrimaryOAuthToken() string { + return mt.PrimaryToken.OAuthToken() +} + +// AuxiliaryOAuthTokens returns one to three auxiliary authorization tokens. +func (mt *MultiTenantServicePrincipalToken) AuxiliaryOAuthTokens() []string { + tokens := make([]string, len(mt.AuxiliaryTokens)) + for i := range mt.AuxiliaryTokens { + tokens[i] = mt.AuxiliaryTokens[i].OAuthToken() + } + return tokens +} + +// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by +// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. +func (mt *MultiTenantServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error { + if err := mt.PrimaryToken.EnsureFreshWithContext(ctx); err != nil { + return fmt.Errorf("failed to refresh primary token: %v", err) + } + for _, aux := range mt.AuxiliaryTokens { + if err := aux.EnsureFreshWithContext(ctx); err != nil { + return fmt.Errorf("failed to refresh auxiliary token: %v", err) + } + } + return nil +} + +// RefreshWithContext obtains a fresh token for the Service Principal. +func (mt *MultiTenantServicePrincipalToken) RefreshWithContext(ctx context.Context) error { + if err := mt.PrimaryToken.RefreshWithContext(ctx); err != nil { + return fmt.Errorf("failed to refresh primary token: %v", err) + } + for _, aux := range mt.AuxiliaryTokens { + if err := aux.RefreshWithContext(ctx); err != nil { + return fmt.Errorf("failed to refresh auxiliary token: %v", err) + } + } + return nil +} + +// RefreshExchangeWithContext refreshes the token, but for a different resource. +func (mt *MultiTenantServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error { + if err := mt.PrimaryToken.RefreshExchangeWithContext(ctx, resource); err != nil { + return fmt.Errorf("failed to refresh primary token: %v", err) + } + for _, aux := range mt.AuxiliaryTokens { + if err := aux.RefreshExchangeWithContext(ctx, resource); err != nil { + return fmt.Errorf("failed to refresh auxiliary token: %v", err) + } + } + return nil +} + +// NewMultiTenantServicePrincipalToken creates a new MultiTenantServicePrincipalToken with the specified credentials and resource. +func NewMultiTenantServicePrincipalToken(multiTenantCfg MultiTenantOAuthConfig, clientID string, secret string, resource string) (*MultiTenantServicePrincipalToken, error) { + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(secret, "secret"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + auxTenants := multiTenantCfg.AuxiliaryTenants() + m := MultiTenantServicePrincipalToken{ + AuxiliaryTokens: make([]*ServicePrincipalToken, len(auxTenants)), + } + primary, err := NewServicePrincipalToken(*multiTenantCfg.PrimaryTenant(), clientID, secret, resource) + if err != nil { + return nil, fmt.Errorf("failed to create SPT for primary tenant: %v", err) + } + m.PrimaryToken = primary + for i := range auxTenants { + aux, err := NewServicePrincipalToken(*auxTenants[i], clientID, secret, resource) + if err != nil { + return nil, fmt.Errorf("failed to create SPT for auxiliary tenant: %v", err) + } + m.AuxiliaryTokens[i] = aux + } + return &m, nil +} diff --git a/vendor/github.com/Azure/go-autorest/version/version.go b/vendor/github.com/Azure/go-autorest/autorest/adal/version.go similarity index 65% rename from vendor/github.com/Azure/go-autorest/version/version.go rename to vendor/github.com/Azure/go-autorest/autorest/adal/version.go index c9876a9b6aed5..c867b348439b1 100644 --- a/vendor/github.com/Azure/go-autorest/version/version.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/version.go @@ -1,4 +1,9 @@ -package version +package adal + +import ( + "fmt" + "runtime" +) // Copyright 2017 Microsoft Corporation // @@ -14,24 +19,27 @@ package version // See the License for the specific language governing permissions and // limitations under the License. -import ( - "fmt" - "runtime" -) - -// Number contains the semantic version of this SDK. -const Number = "v11.1.2" +const number = "v1.0.0" var ( - userAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s", + ua = fmt.Sprintf("Go/%s (%s-%s) go-autorest/adal/%s", runtime.Version(), runtime.GOARCH, runtime.GOOS, - Number, + number, ) ) -// UserAgent returns a string containing the Go version, system archityecture and OS, and the go-autorest version. +// UserAgent returns a string containing the Go version, system architecture and OS, and the adal version. func UserAgent() string { - return userAgent + return ua +} + +// AddToUserAgent adds an extension to the current user agent +func AddToUserAgent(extension string) error { + if extension != "" { + ua = fmt.Sprintf("%s %s", ua, extension) + return nil + } + return fmt.Errorf("Extension was empty, User Agent remained as '%s'", ua) } diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization.go b/vendor/github.com/Azure/go-autorest/autorest/authorization.go index 77eff45bddb5f..54e87b5b648ce 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/authorization.go +++ b/vendor/github.com/Azure/go-autorest/autorest/authorization.go @@ -15,6 +15,8 @@ package autorest // limitations under the License. import ( + "crypto/tls" + "encoding/base64" "fmt" "net/http" "net/url" @@ -30,6 +32,8 @@ const ( apiKeyAuthorizerHeader = "Ocp-Apim-Subscription-Key" bingAPISdkHeader = "X-BingApis-SDK-Client" golangBingAPISdkHeaderValue = "Go-SDK" + authorization = "Authorization" + basic = "Basic" ) // Authorizer is the interface that provides a PrepareDecorator used to supply request @@ -68,7 +72,7 @@ func NewAPIKeyAuthorizer(headers map[string]interface{}, queryParameters map[str return &APIKeyAuthorizer{headers: headers, queryParameters: queryParameters} } -// WithAuthorization returns a PrepareDecorator that adds an HTTP headers and Query Paramaters +// WithAuthorization returns a PrepareDecorator that adds an HTTP headers and Query Parameters. func (aka *APIKeyAuthorizer) WithAuthorization() PrepareDecorator { return func(p Preparer) Preparer { return DecoratePreparer(p, WithHeaders(aka.headers), WithQueryParameters(aka.queryParameters)) @@ -145,11 +149,11 @@ type BearerAuthorizerCallback struct { // NewBearerAuthorizerCallback creates a bearer authorization callback. The callback // is invoked when the HTTP request is submitted. -func NewBearerAuthorizerCallback(sender Sender, callback BearerAuthorizerCallbackFunc) *BearerAuthorizerCallback { - if sender == nil { - sender = &http.Client{} +func NewBearerAuthorizerCallback(s Sender, callback BearerAuthorizerCallbackFunc) *BearerAuthorizerCallback { + if s == nil { + s = sender(tls.RenegotiateNever) } - return &BearerAuthorizerCallback{sender: sender, callback: callback} + return &BearerAuthorizerCallback{sender: s, callback: callback} } // WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose value @@ -257,3 +261,76 @@ func (egta EventGridKeyAuthorizer) WithAuthorization() PrepareDecorator { } return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() } + +// BasicAuthorizer implements basic HTTP authorization by adding the Authorization HTTP header +// with the value "Basic " where is a base64-encoded username:password tuple. +type BasicAuthorizer struct { + userName string + password string +} + +// NewBasicAuthorizer creates a new BasicAuthorizer with the specified username and password. +func NewBasicAuthorizer(userName, password string) *BasicAuthorizer { + return &BasicAuthorizer{ + userName: userName, + password: password, + } +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Basic " followed by the base64-encoded username:password tuple. +func (ba *BasicAuthorizer) WithAuthorization() PrepareDecorator { + headers := make(map[string]interface{}) + headers[authorization] = basic + " " + base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", ba.userName, ba.password))) + + return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() +} + +// MultiTenantServicePrincipalTokenAuthorizer provides authentication across tenants. +type MultiTenantServicePrincipalTokenAuthorizer interface { + WithAuthorization() PrepareDecorator +} + +// NewMultiTenantServicePrincipalTokenAuthorizer crates a BearerAuthorizer using the given token provider +func NewMultiTenantServicePrincipalTokenAuthorizer(tp adal.MultitenantOAuthTokenProvider) MultiTenantServicePrincipalTokenAuthorizer { + return &multiTenantSPTAuthorizer{tp: tp} +} + +type multiTenantSPTAuthorizer struct { + tp adal.MultitenantOAuthTokenProvider +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header using the +// primary token along with the auxiliary authorization header using the auxiliary tokens. +// +// By default, the token will be automatically refreshed through the Refresher interface. +func (mt multiTenantSPTAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err != nil { + return r, err + } + if refresher, ok := mt.tp.(adal.RefresherWithContext); ok { + err = refresher.EnsureFreshWithContext(r.Context()) + if err != nil { + var resp *http.Response + if tokError, ok := err.(adal.TokenRefreshError); ok { + resp = tokError.Response() + } + return r, NewErrorWithError(err, "azure.multiTenantSPTAuthorizer", "WithAuthorization", resp, + "Failed to refresh one or more Tokens for request to %s", r.URL) + } + } + r, err = Prepare(r, WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", mt.tp.PrimaryOAuthToken()))) + if err != nil { + return r, err + } + auxTokens := mt.tp.AuxiliaryOAuthTokens() + for i := range auxTokens { + auxTokens[i] = fmt.Sprintf("Bearer %s", auxTokens[i]) + } + return Prepare(r, WithHeader(headerAuxAuthorization, strings.Join(auxTokens, "; "))) + }) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/BUILD.bazel b/vendor/github.com/Azure/go-autorest/autorest/azure/BUILD.bazel index e4b3d2b213e4d..32e1db2cff2e4 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/BUILD.bazel +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/BUILD.bazel @@ -12,5 +12,8 @@ go_library( importmap = "k8s.io/kops/vendor/github.com/Azure/go-autorest/autorest/azure", importpath = "github.com/Azure/go-autorest/autorest/azure", visibility = ["//visibility:public"], - deps = ["//vendor/github.com/Azure/go-autorest/autorest:go_default_library"], + deps = [ + "//vendor/github.com/Azure/go-autorest/autorest:go_default_library", + "//vendor/github.com/Azure/go-autorest/tracing:go_default_library", + ], ) diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go index 60679c1a4dfc0..1cb41cbeb1b8d 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go @@ -26,6 +26,7 @@ import ( "time" "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/tracing" ) const ( @@ -44,14 +45,7 @@ var pollingCodes = [...]int{http.StatusNoContent, http.StatusAccepted, http.Stat // Future provides a mechanism to access the status and results of an asynchronous request. // Since futures are stateful they should be passed by value to avoid race conditions. type Future struct { - req *http.Request // legacy - pt pollingTracker -} - -// NewFuture returns a new Future object initialized with the specified request. -// Deprecated: Please use NewFutureFromResponse instead. -func NewFuture(req *http.Request) Future { - return Future{req: req} + pt pollingTracker } // NewFutureFromResponse returns a new Future object initialized @@ -85,28 +79,18 @@ func (f Future) PollingMethod() PollingMethodType { return f.pt.pollingMethod() } -// Done queries the service to see if the operation has completed. -// Deprecated: Use DoneWithContext() -func (f *Future) Done(sender autorest.Sender) (bool, error) { - return f.DoneWithContext(context.Background(), sender) -} - // DoneWithContext queries the service to see if the operation has completed. func (f *Future) DoneWithContext(ctx context.Context, sender autorest.Sender) (done bool, err error) { - // support for legacy Future implementation - if f.req != nil { - resp, err := sender.Do(f.req) - if err != nil { - return false, err - } - pt, err := createPollingTracker(resp) - if err != nil { - return false, err + ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.DoneWithContext") + defer func() { + sc := -1 + resp := f.Response() + if resp != nil { + sc = resp.StatusCode } - f.pt = pt - f.req = nil - } - // end legacy + tracing.EndSpan(ctx, sc, err) + }() + if f.pt == nil { return false, autorest.NewError("Future", "Done", "future is not initialized") } @@ -157,15 +141,6 @@ func (f Future) GetPollingDelay() (time.Duration, bool) { return d, true } -// WaitForCompletion will return when one of the following conditions is met: the long -// running operation has completed, the provided context is cancelled, or the client's -// polling duration has been exceeded. It will retry failed polling attempts based on -// the retry value defined in the client up to the maximum retry attempts. -// Deprecated: Please use WaitForCompletionRef() instead. -func (f Future) WaitForCompletion(ctx context.Context, client autorest.Client) error { - return f.WaitForCompletionRef(ctx, client) -} - // WaitForCompletionRef will return when one of the following conditions is met: the long // running operation has completed, the provided context is cancelled, or the client's // polling duration has been exceeded. It will retry failed polling attempts based on @@ -175,6 +150,15 @@ func (f Future) WaitForCompletion(ctx context.Context, client autorest.Client) e // If PollingDuration is greater than zero the value will be used as the context's timeout. // If PollingDuration is zero then no default deadline will be used. func (f *Future) WaitForCompletionRef(ctx context.Context, client autorest.Client) (err error) { + ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.WaitForCompletionRef") + defer func() { + sc := -1 + resp := f.Response() + if resp != nil { + sc = resp.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() cancelCtx := ctx // if the provided context already has a deadline don't override it _, hasDeadline := ctx.Deadline() @@ -433,6 +417,11 @@ func (pt *pollingTrackerBase) pollForStatus(ctx context.Context, sender autorest } req = req.WithContext(ctx) + preparer := autorest.CreatePreparer(autorest.GetPrepareDecorators(ctx)...) + req, err = preparer.Prepare(req) + if err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed preparing HTTP request") + } pt.resp, err = sender.Do(req) if err != nil { return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to send HTTP request") @@ -899,43 +888,6 @@ func isValidURL(s string) bool { return err == nil && u.IsAbs() } -// DoPollForAsynchronous returns a SendDecorator that polls if the http.Response is for an Azure -// long-running operation. It will delay between requests for the duration specified in the -// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled via -// the context associated with the http.Request. -// Deprecated: Prefer using Futures to allow for non-blocking async operations. -func DoPollForAsynchronous(delay time.Duration) autorest.SendDecorator { - return func(s autorest.Sender) autorest.Sender { - return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { - resp, err := s.Do(r) - if err != nil { - return resp, err - } - if !autorest.ResponseHasStatusCode(resp, pollingCodes[:]...) { - return resp, nil - } - future, err := NewFutureFromResponse(resp) - if err != nil { - return resp, err - } - // retry until either the LRO completes or we receive an error - var done bool - for done, err = future.Done(s); !done && err == nil; done, err = future.Done(s) { - // check for Retry-After delay, if not present use the specified polling delay - if pd, ok := future.GetPollingDelay(); ok { - delay = pd - } - // wait until the delay elapses or the context is cancelled - if delayElapsed := autorest.DelayForBackoff(delay, 0, r.Context().Done()); !delayElapsed { - return future.Response(), - autorest.NewErrorWithError(r.Context().Err(), "azure", "DoPollForAsynchronous", future.Response(), "context has been cancelled") - } - } - return future.Response(), err - }) - } -} - // PollingMethodType defines a type used for enumerating polling mechanisms. type PollingMethodType string diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go index 7e41f7fd99c92..6c20b8179ab03 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go @@ -22,9 +22,14 @@ import ( "strings" ) -// EnvironmentFilepathName captures the name of the environment variable containing the path to the file -// to be used while populating the Azure Environment. -const EnvironmentFilepathName = "AZURE_ENVIRONMENT_FILEPATH" +const ( + // EnvironmentFilepathName captures the name of the environment variable containing the path to the file + // to be used while populating the Azure Environment. + EnvironmentFilepathName = "AZURE_ENVIRONMENT_FILEPATH" + + // NotAvailable is used for endpoints and resource IDs that are not available for a given cloud. + NotAvailable = "N/A" +) var environments = map[string]Environment{ "AZURECHINACLOUD": ChinaCloud, @@ -33,28 +38,40 @@ var environments = map[string]Environment{ "AZUREUSGOVERNMENTCLOUD": USGovernmentCloud, } +// ResourceIdentifier contains a set of Azure resource IDs. +type ResourceIdentifier struct { + Graph string `json:"graph"` + KeyVault string `json:"keyVault"` + Datalake string `json:"datalake"` + Batch string `json:"batch"` + OperationalInsights string `json:"operationalInsights"` + Storage string `json:"storage"` +} + // Environment represents a set of endpoints for each of Azure's Clouds. type Environment struct { - Name string `json:"name"` - ManagementPortalURL string `json:"managementPortalURL"` - PublishSettingsURL string `json:"publishSettingsURL"` - ServiceManagementEndpoint string `json:"serviceManagementEndpoint"` - ResourceManagerEndpoint string `json:"resourceManagerEndpoint"` - ActiveDirectoryEndpoint string `json:"activeDirectoryEndpoint"` - GalleryEndpoint string `json:"galleryEndpoint"` - KeyVaultEndpoint string `json:"keyVaultEndpoint"` - GraphEndpoint string `json:"graphEndpoint"` - ServiceBusEndpoint string `json:"serviceBusEndpoint"` - BatchManagementEndpoint string `json:"batchManagementEndpoint"` - StorageEndpointSuffix string `json:"storageEndpointSuffix"` - SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"` - TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"` - KeyVaultDNSSuffix string `json:"keyVaultDNSSuffix"` - ServiceBusEndpointSuffix string `json:"serviceBusEndpointSuffix"` - ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"` - ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"` - ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"` - TokenAudience string `json:"tokenAudience"` + Name string `json:"name"` + ManagementPortalURL string `json:"managementPortalURL"` + PublishSettingsURL string `json:"publishSettingsURL"` + ServiceManagementEndpoint string `json:"serviceManagementEndpoint"` + ResourceManagerEndpoint string `json:"resourceManagerEndpoint"` + ActiveDirectoryEndpoint string `json:"activeDirectoryEndpoint"` + GalleryEndpoint string `json:"galleryEndpoint"` + KeyVaultEndpoint string `json:"keyVaultEndpoint"` + GraphEndpoint string `json:"graphEndpoint"` + ServiceBusEndpoint string `json:"serviceBusEndpoint"` + BatchManagementEndpoint string `json:"batchManagementEndpoint"` + StorageEndpointSuffix string `json:"storageEndpointSuffix"` + SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"` + TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"` + KeyVaultDNSSuffix string `json:"keyVaultDNSSuffix"` + ServiceBusEndpointSuffix string `json:"serviceBusEndpointSuffix"` + ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"` + ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"` + ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"` + CosmosDBDNSSuffix string `json:"cosmosDBDNSSuffix"` + TokenAudience string `json:"tokenAudience"` + ResourceIdentifiers ResourceIdentifier `json:"resourceIdentifiers"` } var ( @@ -79,7 +96,16 @@ var ( ServiceManagementVMDNSSuffix: "cloudapp.net", ResourceManagerVMDNSSuffix: "cloudapp.azure.com", ContainerRegistryDNSSuffix: "azurecr.io", + CosmosDBDNSSuffix: "documents.azure.com", TokenAudience: "https://management.azure.com/", + ResourceIdentifiers: ResourceIdentifier{ + Graph: "https://graph.windows.net/", + KeyVault: "https://vault.azure.net", + Datalake: "https://datalake.azure.net/", + Batch: "https://batch.core.windows.net/", + OperationalInsights: "https://api.loganalytics.io", + Storage: "https://storage.azure.com/", + }, } // USGovernmentCloud is the cloud environment for the US Government @@ -102,8 +128,17 @@ var ( ServiceBusEndpointSuffix: "servicebus.usgovcloudapi.net", ServiceManagementVMDNSSuffix: "usgovcloudapp.net", ResourceManagerVMDNSSuffix: "cloudapp.windowsazure.us", - ContainerRegistryDNSSuffix: "azurecr.io", + ContainerRegistryDNSSuffix: "azurecr.us", + CosmosDBDNSSuffix: "documents.azure.us", TokenAudience: "https://management.usgovcloudapi.net/", + ResourceIdentifiers: ResourceIdentifier{ + Graph: "https://graph.windows.net/", + KeyVault: "https://vault.usgovcloudapi.net", + Datalake: NotAvailable, + Batch: "https://batch.core.usgovcloudapi.net/", + OperationalInsights: "https://api.loganalytics.us", + Storage: "https://storage.azure.com/", + }, } // ChinaCloud is the cloud environment operated in China @@ -126,8 +161,17 @@ var ( ServiceBusEndpointSuffix: "servicebus.chinacloudapi.cn", ServiceManagementVMDNSSuffix: "chinacloudapp.cn", ResourceManagerVMDNSSuffix: "cloudapp.azure.cn", - ContainerRegistryDNSSuffix: "azurecr.io", + ContainerRegistryDNSSuffix: "azurecr.cn", + CosmosDBDNSSuffix: "documents.azure.cn", TokenAudience: "https://management.chinacloudapi.cn/", + ResourceIdentifiers: ResourceIdentifier{ + Graph: "https://graph.chinacloudapi.cn/", + KeyVault: "https://vault.azure.cn", + Datalake: NotAvailable, + Batch: "https://batch.chinacloudapi.cn/", + OperationalInsights: NotAvailable, + Storage: "https://storage.azure.com/", + }, } // GermanCloud is the cloud environment operated in Germany @@ -150,8 +194,17 @@ var ( ServiceBusEndpointSuffix: "servicebus.cloudapi.de", ServiceManagementVMDNSSuffix: "azurecloudapp.de", ResourceManagerVMDNSSuffix: "cloudapp.microsoftazure.de", - ContainerRegistryDNSSuffix: "azurecr.io", + ContainerRegistryDNSSuffix: NotAvailable, + CosmosDBDNSSuffix: "documents.microsoftazure.de", TokenAudience: "https://management.microsoftazure.de/", + ResourceIdentifiers: ResourceIdentifier{ + Graph: "https://graph.cloudapi.de/", + KeyVault: "https://vault.microsoftazure.de", + Datalake: NotAvailable, + Batch: "https://batch.cloudapi.de/", + OperationalInsights: NotAvailable, + Storage: "https://storage.azure.com/", + }, } ) diff --git a/vendor/github.com/Azure/go-autorest/autorest/client.go b/vendor/github.com/Azure/go-autorest/autorest/client.go index 48eb0e8b73bc0..1c6a0617a1fe7 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/client.go +++ b/vendor/github.com/Azure/go-autorest/autorest/client.go @@ -16,17 +16,16 @@ package autorest import ( "bytes" + "crypto/tls" "fmt" "io" "io/ioutil" "log" "net/http" - "net/http/cookiejar" "strings" "time" "github.com/Azure/go-autorest/logger" - "github.com/Azure/go-autorest/version" ) const ( @@ -72,6 +71,22 @@ type Response struct { *http.Response `json:"-"` } +// IsHTTPStatus returns true if the returned HTTP status code matches the provided status code. +// If there was no response (i.e. the underlying http.Response is nil) the return value is false. +func (r Response) IsHTTPStatus(statusCode int) bool { + if r.Response == nil { + return false + } + return r.Response.StatusCode == statusCode +} + +// HasHTTPStatus returns true if the returned HTTP status code matches one of the provided status codes. +// If there was no response (i.e. the underlying http.Response is nil) or not status codes are provided +// the return value is false. +func (r Response) HasHTTPStatus(statusCodes ...int) bool { + return ResponseHasStatusCode(r.Response, statusCodes...) +} + // LoggingInspector implements request and response inspectors that log the full request and // response to a supplied log. type LoggingInspector struct { @@ -169,14 +184,32 @@ type Client struct { // NewClientWithUserAgent returns an instance of a Client with the UserAgent set to the passed // string. func NewClientWithUserAgent(ua string) Client { + return newClient(ua, tls.RenegotiateNever) +} + +// ClientOptions contains various Client configuration options. +type ClientOptions struct { + // UserAgent is an optional user-agent string to append to the default user agent. + UserAgent string + + // Renegotiation is an optional setting to control client-side TLS renegotiation. + Renegotiation tls.RenegotiationSupport +} + +// NewClientWithOptions returns an instance of a Client with the specified values. +func NewClientWithOptions(options ClientOptions) Client { + return newClient(options.UserAgent, options.Renegotiation) +} + +func newClient(ua string, renegotiation tls.RenegotiationSupport) Client { c := Client{ PollingDelay: DefaultPollingDelay, PollingDuration: DefaultPollingDuration, RetryAttempts: DefaultRetryAttempts, RetryDuration: DefaultRetryDuration, - UserAgent: version.UserAgent(), + UserAgent: UserAgent(), } - c.Sender = c.sender() + c.Sender = c.sender(renegotiation) c.AddToUserAgent(ua) return c } @@ -220,17 +253,16 @@ func (c Client) Do(r *http.Request) (*http.Response, error) { return true, v }, }) - resp, err := SendWithSender(c.sender(), r) + resp, err := SendWithSender(c.sender(tls.RenegotiateNever), r) logger.Instance.WriteResponse(resp, logger.Filter{}) Respond(resp, c.ByInspecting()) return resp, err } // sender returns the Sender to which to send requests. -func (c Client) sender() Sender { +func (c Client) sender(renengotiation tls.RenegotiationSupport) Sender { if c.Sender == nil { - j, _ := cookiejar.New(nil) - return &http.Client{Jar: j} + return sender(renengotiation) } return c.Sender } diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE new file mode 100644 index 0000000000000..b9d6a27ea92ef --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/go.mod b/vendor/github.com/Azure/go-autorest/autorest/date/go.mod new file mode 100644 index 0000000000000..13a1e9803386f --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/go.mod @@ -0,0 +1,3 @@ +module github.com/Azure/go-autorest/autorest/date + +go 1.12 diff --git a/vendor/github.com/Azure/go-autorest/autorest/go.mod b/vendor/github.com/Azure/go-autorest/autorest/go.mod new file mode 100644 index 0000000000000..ab2ae66acec5a --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/go.mod @@ -0,0 +1,11 @@ +module github.com/Azure/go-autorest/autorest + +go 1.12 + +require ( + github.com/Azure/go-autorest/autorest/adal v0.5.0 + github.com/Azure/go-autorest/autorest/mocks v0.2.0 + github.com/Azure/go-autorest/logger v0.1.0 + github.com/Azure/go-autorest/tracing v0.5.0 + golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 +) diff --git a/vendor/github.com/Azure/go-autorest/autorest/go.sum b/vendor/github.com/Azure/go-autorest/autorest/go.sum new file mode 100644 index 0000000000000..729b99cd099c0 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/go.sum @@ -0,0 +1,18 @@ +github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/mocks v0.1.0 h1:Kx+AUU2Te+A3JIyYn6Dfs+cFgx5XorQKuIXrZGoq/SI= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/Azure/go-autorest/autorest/preparer.go b/vendor/github.com/Azure/go-autorest/autorest/preparer.go index 6d67bd7337b7b..9f864ab1a15fa 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/preparer.go +++ b/vendor/github.com/Azure/go-autorest/autorest/preparer.go @@ -16,7 +16,9 @@ package autorest import ( "bytes" + "context" "encoding/json" + "encoding/xml" "fmt" "io" "io/ioutil" @@ -31,11 +33,33 @@ const ( mimeTypeOctetStream = "application/octet-stream" mimeTypeFormPost = "application/x-www-form-urlencoded" - headerAuthorization = "Authorization" - headerContentType = "Content-Type" - headerUserAgent = "User-Agent" + headerAuthorization = "Authorization" + headerAuxAuthorization = "x-ms-authorization-auxiliary" + headerContentType = "Content-Type" + headerUserAgent = "User-Agent" ) +// used as a key type in context.WithValue() +type ctxPrepareDecorators struct{} + +// WithPrepareDecorators adds the specified PrepareDecorators to the provided context. +// If no PrepareDecorators are provided the context is unchanged. +func WithPrepareDecorators(ctx context.Context, prepareDecorator []PrepareDecorator) context.Context { + if len(prepareDecorator) == 0 { + return ctx + } + return context.WithValue(ctx, ctxPrepareDecorators{}, prepareDecorator) +} + +// GetPrepareDecorators returns the PrepareDecorators in the provided context or the provided default PrepareDecorators. +func GetPrepareDecorators(ctx context.Context, defaultPrepareDecorators ...PrepareDecorator) []PrepareDecorator { + inCtx := ctx.Value(ctxPrepareDecorators{}) + if pd, ok := inCtx.([]PrepareDecorator); ok { + return pd + } + return defaultPrepareDecorators +} + // Preparer is the interface that wraps the Prepare method. // // Prepare accepts and possibly modifies an http.Request (e.g., adding Headers). Implementations @@ -190,6 +214,9 @@ func AsGet() PrepareDecorator { return WithMethod("GET") } // AsHead returns a PrepareDecorator that sets the HTTP method to HEAD. func AsHead() PrepareDecorator { return WithMethod("HEAD") } +// AsMerge returns a PrepareDecorator that sets the HTTP method to MERGE. +func AsMerge() PrepareDecorator { return WithMethod("MERGE") } + // AsOptions returns a PrepareDecorator that sets the HTTP method to OPTIONS. func AsOptions() PrepareDecorator { return WithMethod("OPTIONS") } @@ -225,6 +252,25 @@ func WithBaseURL(baseURL string) PrepareDecorator { } } +// WithBytes returns a PrepareDecorator that takes a list of bytes +// which passes the bytes directly to the body +func WithBytes(input *[]byte) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if input == nil { + return r, fmt.Errorf("Input Bytes was nil") + } + + r.ContentLength = int64(len(*input)) + r.Body = ioutil.NopCloser(bytes.NewReader(*input)) + } + return r, err + }) + } +} + // WithCustomBaseURL returns a PrepareDecorator that replaces brace-enclosed keys within the // request base URL (i.e., http.Request.URL) with the corresponding values from the passed map. func WithCustomBaseURL(baseURL string, urlParameters map[string]interface{}) PrepareDecorator { @@ -377,6 +423,28 @@ func WithJSON(v interface{}) PrepareDecorator { } } +// WithXML returns a PrepareDecorator that encodes the data passed as XML into the body of the +// request and sets the Content-Length header. +func WithXML(v interface{}) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + b, err := xml.Marshal(v) + if err == nil { + // we have to tack on an XML header + withHeader := xml.Header + string(b) + bytesWithHeader := []byte(withHeader) + + r.ContentLength = int64(len(bytesWithHeader)) + r.Body = ioutil.NopCloser(bytes.NewReader(bytesWithHeader)) + } + } + return r, err + }) + } +} + // WithPath returns a PrepareDecorator that adds the supplied path to the request URL. If the path // is absolute (that is, it begins with a "/"), it replaces the existing path. func WithPath(path string) PrepareDecorator { diff --git a/vendor/github.com/Azure/go-autorest/autorest/responder.go b/vendor/github.com/Azure/go-autorest/autorest/responder.go index a908a0adb708f..349e1963a2cf3 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/responder.go +++ b/vendor/github.com/Azure/go-autorest/autorest/responder.go @@ -153,6 +153,25 @@ func ByClosingIfError() RespondDecorator { } } +// ByUnmarshallingBytes returns a RespondDecorator that copies the Bytes returned in the +// response Body into the value pointed to by v. +func ByUnmarshallingBytes(v *[]byte) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil { + bytes, errInner := ioutil.ReadAll(resp.Body) + if errInner != nil { + err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) + } else { + *v = bytes + } + } + return err + }) + } +} + // ByUnmarshallingJSON returns a RespondDecorator that decodes a JSON document returned in the // response Body into the value pointed to by v. func ByUnmarshallingJSON(v interface{}) RespondDecorator { diff --git a/vendor/github.com/Azure/go-autorest/autorest/sender.go b/vendor/github.com/Azure/go-autorest/autorest/sender.go index e8893a282e386..e582489b326b5 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/sender.go +++ b/vendor/github.com/Azure/go-autorest/autorest/sender.go @@ -15,14 +15,40 @@ package autorest // limitations under the License. import ( + "context" + "crypto/tls" "fmt" "log" "math" "net/http" + "net/http/cookiejar" "strconv" "time" + + "github.com/Azure/go-autorest/tracing" ) +// used as a key type in context.WithValue() +type ctxSendDecorators struct{} + +// WithSendDecorators adds the specified SendDecorators to the provided context. +// If no SendDecorators are provided the context is unchanged. +func WithSendDecorators(ctx context.Context, sendDecorator []SendDecorator) context.Context { + if len(sendDecorator) == 0 { + return ctx + } + return context.WithValue(ctx, ctxSendDecorators{}, sendDecorator) +} + +// GetSendDecorators returns the SendDecorators in the provided context or the provided default SendDecorators. +func GetSendDecorators(ctx context.Context, defaultSendDecorators ...SendDecorator) []SendDecorator { + inCtx := ctx.Value(ctxSendDecorators{}) + if sd, ok := inCtx.([]SendDecorator); ok { + return sd + } + return defaultSendDecorators +} + // Sender is the interface that wraps the Do method to send HTTP requests. // // The standard http.Client conforms to this interface. @@ -38,14 +64,14 @@ func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { return sf(r) } -// SendDecorator takes and possibily decorates, by wrapping, a Sender. Decorators may affect the +// SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the // http.Request and pass it along or, first, pass the http.Request along then react to the // http.Response result. type SendDecorator func(Sender) Sender // CreateSender creates, decorates, and returns, as a Sender, the default http.Client. func CreateSender(decorators ...SendDecorator) Sender { - return DecorateSender(&http.Client{}, decorators...) + return DecorateSender(sender(tls.RenegotiateNever), decorators...) } // DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to @@ -68,7 +94,7 @@ func DecorateSender(s Sender, decorators ...SendDecorator) Sender { // // Send will not poll or retry requests. func Send(r *http.Request, decorators ...SendDecorator) (*http.Response, error) { - return SendWithSender(&http.Client{}, r, decorators...) + return SendWithSender(sender(tls.RenegotiateNever), r, decorators...) } // SendWithSender sends the passed http.Request, through the provided Sender, returning the @@ -80,6 +106,29 @@ func SendWithSender(s Sender, r *http.Request, decorators ...SendDecorator) (*ht return DecorateSender(s, decorators...).Do(r) } +func sender(renengotiation tls.RenegotiationSupport) Sender { + // Use behaviour compatible with DefaultTransport, but require TLS minimum version. + defaultTransport := http.DefaultTransport.(*http.Transport) + transport := &http.Transport{ + Proxy: defaultTransport.Proxy, + DialContext: defaultTransport.DialContext, + MaxIdleConns: defaultTransport.MaxIdleConns, + IdleConnTimeout: defaultTransport.IdleConnTimeout, + TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout, + ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout, + TLSClientConfig: &tls.Config{ + MinVersion: tls.VersionTLS12, + Renegotiation: renengotiation, + }, + } + var roundTripper http.RoundTripper = transport + if tracing.IsEnabled() { + roundTripper = tracing.NewTransport(transport) + } + j, _ := cookiejar.New(nil) + return &http.Client{Jar: j, Transport: roundTripper} +} + // AfterDelay returns a SendDecorator that delays for the passed time.Duration before // invoking the Sender. The delay may be terminated by closing the optional channel on the // http.Request. If canceled, no further Senders are invoked. @@ -209,54 +258,77 @@ func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator { // DoRetryForStatusCodes returns a SendDecorator that retries for specified statusCodes for up to the specified // number of attempts, exponentially backing off between requests using the supplied backoff -// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on -// the http.Request. +// time.Duration (which may be zero). Retrying may be canceled by cancelling the context on the http.Request. +// NOTE: Code http.StatusTooManyRequests (429) will *not* be counted against the number of attempts. func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) SendDecorator { return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { - rr := NewRetriableRequest(r) - // Increment to add the first call (attempts denotes number of retries) - attempts++ - for attempt := 0; attempt < attempts; { - err = rr.Prepare() - if err != nil { - return resp, err - } - resp, err = s.Do(rr.Request()) - // if the error isn't temporary don't bother retrying - if err != nil && !IsTemporaryNetworkError(err) { - return nil, err - } - // we want to retry if err is not nil (e.g. transient network failure). note that for failed authentication - // resp and err will both have a value, so in this case we don't want to retry as it will never succeed. - if err == nil && !ResponseHasStatusCode(resp, codes...) || IsTokenRefreshError(err) { - return resp, err - } - delayed := DelayWithRetryAfter(resp, r.Context().Done()) - if !delayed && !DelayForBackoff(backoff, attempt, r.Context().Done()) { - return resp, r.Context().Err() - } - // don't count a 429 against the number of attempts - // so that we continue to retry until it succeeds - if resp == nil || resp.StatusCode != http.StatusTooManyRequests { - attempt++ - } - } - return resp, err + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return doRetryForStatusCodesImpl(s, r, false, attempts, backoff, 0, codes...) }) } } -// DelayWithRetryAfter invokes time.After for the duration specified in the "Retry-After" header in -// responses with status code 429 +// DoRetryForStatusCodesWithCap returns a SendDecorator that retries for specified statusCodes for up to the +// specified number of attempts, exponentially backing off between requests using the supplied backoff +// time.Duration (which may be zero). To cap the maximum possible delay between iterations specify a value greater +// than zero for cap. Retrying may be canceled by cancelling the context on the http.Request. +func DoRetryForStatusCodesWithCap(attempts int, backoff, cap time.Duration, codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return doRetryForStatusCodesImpl(s, r, true, attempts, backoff, cap, codes...) + }) + } +} + +func doRetryForStatusCodesImpl(s Sender, r *http.Request, count429 bool, attempts int, backoff, cap time.Duration, codes ...int) (resp *http.Response, err error) { + rr := NewRetriableRequest(r) + // Increment to add the first call (attempts denotes number of retries) + for attempt := 0; attempt < attempts+1; { + err = rr.Prepare() + if err != nil { + return + } + resp, err = s.Do(rr.Request()) + // if the error isn't temporary don't bother retrying + if err != nil && !IsTemporaryNetworkError(err) { + return + } + // we want to retry if err is not nil (e.g. transient network failure). note that for failed authentication + // resp and err will both have a value, so in this case we don't want to retry as it will never succeed. + if err == nil && !ResponseHasStatusCode(resp, codes...) || IsTokenRefreshError(err) { + return resp, err + } + delayed := DelayWithRetryAfter(resp, r.Context().Done()) + if !delayed && !DelayForBackoffWithCap(backoff, cap, attempt, r.Context().Done()) { + return resp, r.Context().Err() + } + // when count429 == false don't count a 429 against the number + // of attempts so that we continue to retry until it succeeds + if count429 || (resp == nil || resp.StatusCode != http.StatusTooManyRequests) { + attempt++ + } + } + return resp, err +} + +// DelayWithRetryAfter invokes time.After for the duration specified in the "Retry-After" header. +// The value of Retry-After can be either the number of seconds or a date in RFC1123 format. +// The function returns true after successfully waiting for the specified duration. If there is +// no Retry-After header or the wait is cancelled the return value is false. func DelayWithRetryAfter(resp *http.Response, cancel <-chan struct{}) bool { if resp == nil { return false } - retryAfter, _ := strconv.Atoi(resp.Header.Get("Retry-After")) - if resp.StatusCode == http.StatusTooManyRequests && retryAfter > 0 { + var dur time.Duration + ra := resp.Header.Get("Retry-After") + if retryAfter, _ := strconv.Atoi(ra); retryAfter > 0 { + dur = time.Duration(retryAfter) * time.Second + } else if t, err := time.Parse(time.RFC1123, ra); err == nil { + dur = t.Sub(time.Now()) + } + if dur > 0 { select { - case <-time.After(time.Duration(retryAfter) * time.Second): + case <-time.After(dur): return true case <-cancel: return false @@ -316,8 +388,22 @@ func WithLogging(logger *log.Logger) SendDecorator { // Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt // count. func DelayForBackoff(backoff time.Duration, attempt int, cancel <-chan struct{}) bool { + return DelayForBackoffWithCap(backoff, 0, attempt, cancel) +} + +// DelayForBackoffWithCap invokes time.After for the supplied backoff duration raised to the power of +// passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set +// to zero for no delay. To cap the maximum possible delay specify a value greater than zero for cap. +// The delay may be canceled by closing the passed channel. If terminated early, returns false. +// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt +// count. +func DelayForBackoffWithCap(backoff, cap time.Duration, attempt int, cancel <-chan struct{}) bool { + d := time.Duration(backoff.Seconds()*math.Pow(2, float64(attempt))) * time.Second + if cap > 0 && d > cap { + d = cap + } select { - case <-time.After(time.Duration(backoff.Seconds()*math.Pow(2, float64(attempt))) * time.Second): + case <-time.After(d): return true case <-cancel: return false diff --git a/vendor/github.com/Azure/go-autorest/autorest/utility.go b/vendor/github.com/Azure/go-autorest/autorest/utility.go index bfddd90b5bfa4..08cf11c118981 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/utility.go +++ b/vendor/github.com/Azure/go-autorest/autorest/utility.go @@ -157,7 +157,7 @@ func AsStringSlice(s interface{}) ([]string, error) { } // String method converts interface v to string. If interface is a list, it -// joins list elements using the seperator. Note that only sep[0] will be used for +// joins list elements using the separator. Note that only sep[0] will be used for // joining if any separator is specified. func String(v interface{}, sep ...string) string { if len(sep) == 0 { diff --git a/vendor/github.com/Azure/go-autorest/autorest/version.go b/vendor/github.com/Azure/go-autorest/autorest/version.go index 3c6451546bae8..cb851937a11a4 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/version.go +++ b/vendor/github.com/Azure/go-autorest/autorest/version.go @@ -1,7 +1,5 @@ package autorest -import "github.com/Azure/go-autorest/version" - // Copyright 2017 Microsoft Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,7 +14,28 @@ import "github.com/Azure/go-autorest/version" // See the License for the specific language governing permissions and // limitations under the License. +import ( + "fmt" + "runtime" +) + +const number = "v13.0.0" + +var ( + userAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s", + runtime.Version(), + runtime.GOARCH, + runtime.GOOS, + number, + ) +) + +// UserAgent returns a string containing the Go version, system architecture and OS, and the go-autorest version. +func UserAgent() string { + return userAgent +} + // Version returns the semantic version (see http://semver.org). func Version() string { - return version.Number + return number } diff --git a/vendor/github.com/Azure/go-autorest/logger/LICENSE b/vendor/github.com/Azure/go-autorest/logger/LICENSE new file mode 100644 index 0000000000000..b9d6a27ea92ef --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/logger/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/logger/go.mod b/vendor/github.com/Azure/go-autorest/logger/go.mod new file mode 100644 index 0000000000000..f22ed56bcde47 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/logger/go.mod @@ -0,0 +1,3 @@ +module github.com/Azure/go-autorest/logger + +go 1.12 diff --git a/vendor/github.com/Azure/go-autorest/logger/logger.go b/vendor/github.com/Azure/go-autorest/logger/logger.go index 756fd80cab162..da09f394c5d0b 100644 --- a/vendor/github.com/Azure/go-autorest/logger/logger.go +++ b/vendor/github.com/Azure/go-autorest/logger/logger.go @@ -162,7 +162,7 @@ type Writer interface { // WriteResponse writes the specified HTTP response to the logger if the log level is greater than // or equal to LogInfo. The response body, if set, is logged at level LogDebug or higher. // Custom filters can be specified to exclude URL, header, and/or body content from the log. - // By default no respone content is excluded. + // By default no response content is excluded. WriteResponse(resp *http.Response, filter Filter) } @@ -318,7 +318,7 @@ func (fl fileLogger) WriteResponse(resp *http.Response, filter Filter) { // returns true if the provided body should be included in the log func (fl fileLogger) shouldLogBody(header http.Header, body io.ReadCloser) bool { ct := header.Get("Content-Type") - return fl.logLevel >= LogDebug && body != nil && strings.Index(ct, "application/octet-stream") == -1 + return fl.logLevel >= LogDebug && body != nil && !strings.Contains(ct, "application/octet-stream") } // creates standard header for log entries, it contains a timestamp and the log level diff --git a/vendor/github.com/Azure/go-autorest/version/BUILD.bazel b/vendor/github.com/Azure/go-autorest/tracing/BUILD.bazel similarity index 68% rename from vendor/github.com/Azure/go-autorest/version/BUILD.bazel rename to vendor/github.com/Azure/go-autorest/tracing/BUILD.bazel index 977627349b94b..4693ad02c4674 100644 --- a/vendor/github.com/Azure/go-autorest/version/BUILD.bazel +++ b/vendor/github.com/Azure/go-autorest/tracing/BUILD.bazel @@ -2,8 +2,8 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", - srcs = ["version.go"], - importmap = "k8s.io/kops/vendor/github.com/Azure/go-autorest/version", - importpath = "github.com/Azure/go-autorest/version", + srcs = ["tracing.go"], + importmap = "k8s.io/kops/vendor/github.com/Azure/go-autorest/tracing", + importpath = "github.com/Azure/go-autorest/tracing", visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/Azure/go-autorest/tracing/LICENSE b/vendor/github.com/Azure/go-autorest/tracing/LICENSE new file mode 100644 index 0000000000000..b9d6a27ea92ef --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/tracing/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/tracing/go.mod b/vendor/github.com/Azure/go-autorest/tracing/go.mod new file mode 100644 index 0000000000000..25c34c1085a7a --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/tracing/go.mod @@ -0,0 +1,3 @@ +module github.com/Azure/go-autorest/tracing + +go 1.12 diff --git a/vendor/github.com/Azure/go-autorest/tracing/tracing.go b/vendor/github.com/Azure/go-autorest/tracing/tracing.go new file mode 100644 index 0000000000000..0e7a6e96254aa --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/tracing/tracing.go @@ -0,0 +1,67 @@ +package tracing + +// Copyright 2018 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "context" + "net/http" +) + +// Tracer represents an HTTP tracing facility. +type Tracer interface { + NewTransport(base *http.Transport) http.RoundTripper + StartSpan(ctx context.Context, name string) context.Context + EndSpan(ctx context.Context, httpStatusCode int, err error) +} + +var ( + tracer Tracer +) + +// Register will register the provided Tracer. Pass nil to unregister a Tracer. +func Register(t Tracer) { + tracer = t +} + +// IsEnabled returns true if a Tracer has been registered. +func IsEnabled() bool { + return tracer != nil +} + +// NewTransport creates a new instrumenting http.RoundTripper for the +// registered Tracer. If no Tracer has been registered it returns nil. +func NewTransport(base *http.Transport) http.RoundTripper { + if tracer != nil { + return tracer.NewTransport(base) + } + return nil +} + +// StartSpan starts a trace span with the specified name, associating it with the +// provided context. Has no effect if a Tracer has not been registered. +func StartSpan(ctx context.Context, name string) context.Context { + if tracer != nil { + return tracer.StartSpan(ctx, name) + } + return ctx +} + +// EndSpan ends a previously started span stored in the context. +// Has no effect if a Tracer has not been registered. +func EndSpan(ctx context.Context, httpStatusCode int, err error) { + if tracer != nil { + tracer.EndSpan(ctx, httpStatusCode, err) + } +} diff --git a/vendor/github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/filter/filter.go b/vendor/github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/filter/filter.go index 2a58b9c14be48..e005c8fc3a685 100644 --- a/vendor/github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/filter/filter.go +++ b/vendor/github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/filter/filter.go @@ -43,32 +43,32 @@ var ( None *F ) -// Regexp returns a filter for fieldName matches regexp v. +// Regexp returns a filter for fieldName eq regexp v. func Regexp(fieldName, v string) *F { return (&F{}).AndRegexp(fieldName, v) } -// NotRegexp returns a filter for fieldName not matches regexp v. +// NotRegexp returns a filter for fieldName ne regexp v. func NotRegexp(fieldName, v string) *F { return (&F{}).AndNotRegexp(fieldName, v) } -// EqualInt returns a filter for fieldName ~ v. +// EqualInt returns a filter for fieldName eq v. func EqualInt(fieldName string, v int) *F { return (&F{}).AndEqualInt(fieldName, v) } -// NotEqualInt returns a filter for fieldName != v. +// NotEqualInt returns a filter for fieldName ne v. func NotEqualInt(fieldName string, v int) *F { return (&F{}).AndNotEqualInt(fieldName, v) } -// EqualBool returns a filter for fieldName == v. +// EqualBool returns a filter for fieldName eq v. func EqualBool(fieldName string, v bool) *F { return (&F{}).AndEqualBool(fieldName, v) } -// NotEqualBool returns a filter for fieldName != v. +// NotEqualBool returns a filter for fieldName ne v. func NotEqualBool(fieldName string, v bool) *F { return (&F{}).AndNotEqualBool(fieldName, v) } @@ -104,25 +104,27 @@ type F struct { predicates []filterPredicate } +// TODO(rramkumar): Support logical OR + // And joins two filters together. func (fl *F) And(rest *F) *F { fl.predicates = append(fl.predicates, rest.predicates...) return fl } -// AndRegexp adds a field match string predicate. +// AndRegexp adds a field ~ string predicate. func (fl *F) AndRegexp(fieldName, v string) *F { - fl.predicates = append(fl.predicates, filterPredicate{fieldName: fieldName, op: equals, s: &v}) + fl.predicates = append(fl.predicates, filterPredicate{fieldName: fieldName, op: regexpEquals, s: &v}) return fl } -// AndNotRegexp adds a field not match string predicate. +// AndNotRegexp adds a field !~ string predicate. func (fl *F) AndNotRegexp(fieldName, v string) *F { - fl.predicates = append(fl.predicates, filterPredicate{fieldName: fieldName, op: notEquals, s: &v}) + fl.predicates = append(fl.predicates, filterPredicate{fieldName: fieldName, op: regexpNotEquals, s: &v}) return fl } -// AndEqualInt adds a field == int predicate. +// AndEqualInt adds a field = int predicate. func (fl *F) AndEqualInt(fieldName string, v int) *F { fl.predicates = append(fl.predicates, filterPredicate{fieldName: fieldName, op: equals, i: &v}) return fl @@ -134,7 +136,7 @@ func (fl *F) AndNotEqualInt(fieldName string, v int) *F { return fl } -// AndEqualBool adds a field == bool predicate. +// AndEqualBool adds a field = bool predicate. func (fl *F) AndEqualBool(fieldName string, v bool) *F { fl.predicates = append(fl.predicates, filterPredicate{fieldName: fieldName, op: equals, b: &v}) return fl @@ -177,8 +179,10 @@ func (fl *F) Match(obj interface{}) bool { type filterOp int const ( - equals filterOp = iota - notEquals filterOp = iota + regexpEquals filterOp = iota + regexpNotEquals filterOp = iota + equals filterOp = iota + notEquals filterOp = iota ) // filterPredicate is an individual predicate for a fieldName and value. @@ -194,6 +198,13 @@ type filterPredicate struct { func (fp *filterPredicate) String() string { var op string switch fp.op { + case regexpEquals: + // GCE API maps regexp comparison to 'eq' + op = "eq" + case regexpNotEquals: + op = "ne" + // Since GCE API does not allow using a mix of 'eq' and '=' operators, + // we use 'eq' everywhere case equals: op = "eq" case notEquals: @@ -237,7 +248,10 @@ func (fp *filterPredicate) match(o interface{}) bool { klog.Errorf("Match regexp %q is invalid: %v", *fp.s, err) return false } - match = re.Match([]byte(x)) + match = x == *fp.s + if fp.op < regexpNotEquals { + match = re.Match([]byte(x)) + } case int: if fp.i == nil { return false @@ -251,6 +265,10 @@ func (fp *filterPredicate) match(o interface{}) bool { } switch fp.op { + case regexpEquals: + return match + case regexpNotEquals: + return !match case equals: return match case notEquals: diff --git a/vendor/github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/gen.go b/vendor/github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/gen.go index 9c34936ffe959..da514d2d62a57 100644 --- a/vendor/github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/gen.go +++ b/vendor/github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/gen.go @@ -1,5 +1,5 @@ /* -Copyright 2018 Google LLC +Copyright 2019 Google LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -41,80 +41,140 @@ type Cloud interface { Addresses() Addresses AlphaAddresses() AlphaAddresses BetaAddresses() BetaAddresses + AlphaGlobalAddresses() AlphaGlobalAddresses GlobalAddresses() GlobalAddresses BackendServices() BackendServices BetaBackendServices() BetaBackendServices AlphaBackendServices() AlphaBackendServices RegionBackendServices() RegionBackendServices AlphaRegionBackendServices() AlphaRegionBackendServices + BetaRegionBackendServices() BetaRegionBackendServices Disks() Disks RegionDisks() RegionDisks Firewalls() Firewalls ForwardingRules() ForwardingRules AlphaForwardingRules() AlphaForwardingRules + BetaForwardingRules() BetaForwardingRules + AlphaGlobalForwardingRules() AlphaGlobalForwardingRules + BetaGlobalForwardingRules() BetaGlobalForwardingRules GlobalForwardingRules() GlobalForwardingRules HealthChecks() HealthChecks AlphaHealthChecks() AlphaHealthChecks BetaHealthChecks() BetaHealthChecks + AlphaRegionHealthChecks() AlphaRegionHealthChecks + BetaRegionHealthChecks() BetaRegionHealthChecks HttpHealthChecks() HttpHealthChecks HttpsHealthChecks() HttpsHealthChecks InstanceGroups() InstanceGroups Instances() Instances BetaInstances() BetaInstances AlphaInstances() AlphaInstances + AlphaNetworks() AlphaNetworks + BetaNetworks() BetaNetworks + Networks() Networks AlphaNetworkEndpointGroups() AlphaNetworkEndpointGroups BetaNetworkEndpointGroups() BetaNetworkEndpointGroups + NetworkEndpointGroups() NetworkEndpointGroups Projects() Projects Regions() Regions Routes() Routes BetaSecurityPolicies() BetaSecurityPolicies SslCertificates() SslCertificates + BetaSslCertificates() BetaSslCertificates + AlphaSslCertificates() AlphaSslCertificates + AlphaRegionSslCertificates() AlphaRegionSslCertificates + BetaRegionSslCertificates() BetaRegionSslCertificates + AlphaSubnetworks() AlphaSubnetworks + BetaSubnetworks() BetaSubnetworks + Subnetworks() Subnetworks + AlphaTargetHttpProxies() AlphaTargetHttpProxies + BetaTargetHttpProxies() BetaTargetHttpProxies TargetHttpProxies() TargetHttpProxies + AlphaRegionTargetHttpProxies() AlphaRegionTargetHttpProxies + BetaRegionTargetHttpProxies() BetaRegionTargetHttpProxies TargetHttpsProxies() TargetHttpsProxies + AlphaTargetHttpsProxies() AlphaTargetHttpsProxies + BetaTargetHttpsProxies() BetaTargetHttpsProxies + AlphaRegionTargetHttpsProxies() AlphaRegionTargetHttpsProxies + BetaRegionTargetHttpsProxies() BetaRegionTargetHttpsProxies TargetPools() TargetPools + AlphaUrlMaps() AlphaUrlMaps + BetaUrlMaps() BetaUrlMaps UrlMaps() UrlMaps + AlphaRegionUrlMaps() AlphaRegionUrlMaps + BetaRegionUrlMaps() BetaRegionUrlMaps Zones() Zones } // NewGCE returns a GCE. func NewGCE(s *Service) *GCE { g := &GCE{ - gceAddresses: &GCEAddresses{s}, - gceAlphaAddresses: &GCEAlphaAddresses{s}, - gceBetaAddresses: &GCEBetaAddresses{s}, - gceGlobalAddresses: &GCEGlobalAddresses{s}, - gceBackendServices: &GCEBackendServices{s}, - gceBetaBackendServices: &GCEBetaBackendServices{s}, - gceAlphaBackendServices: &GCEAlphaBackendServices{s}, - gceRegionBackendServices: &GCERegionBackendServices{s}, - gceAlphaRegionBackendServices: &GCEAlphaRegionBackendServices{s}, - gceDisks: &GCEDisks{s}, - gceRegionDisks: &GCERegionDisks{s}, - gceFirewalls: &GCEFirewalls{s}, - gceForwardingRules: &GCEForwardingRules{s}, - gceAlphaForwardingRules: &GCEAlphaForwardingRules{s}, - gceGlobalForwardingRules: &GCEGlobalForwardingRules{s}, - gceHealthChecks: &GCEHealthChecks{s}, - gceAlphaHealthChecks: &GCEAlphaHealthChecks{s}, - gceBetaHealthChecks: &GCEBetaHealthChecks{s}, - gceHttpHealthChecks: &GCEHttpHealthChecks{s}, - gceHttpsHealthChecks: &GCEHttpsHealthChecks{s}, - gceInstanceGroups: &GCEInstanceGroups{s}, - gceInstances: &GCEInstances{s}, - gceBetaInstances: &GCEBetaInstances{s}, - gceAlphaInstances: &GCEAlphaInstances{s}, - gceAlphaNetworkEndpointGroups: &GCEAlphaNetworkEndpointGroups{s}, - gceBetaNetworkEndpointGroups: &GCEBetaNetworkEndpointGroups{s}, - gceProjects: &GCEProjects{s}, - gceRegions: &GCERegions{s}, - gceRoutes: &GCERoutes{s}, - gceBetaSecurityPolicies: &GCEBetaSecurityPolicies{s}, - gceSslCertificates: &GCESslCertificates{s}, - gceTargetHttpProxies: &GCETargetHttpProxies{s}, - gceTargetHttpsProxies: &GCETargetHttpsProxies{s}, - gceTargetPools: &GCETargetPools{s}, - gceUrlMaps: &GCEUrlMaps{s}, - gceZones: &GCEZones{s}, + gceAddresses: &GCEAddresses{s}, + gceAlphaAddresses: &GCEAlphaAddresses{s}, + gceBetaAddresses: &GCEBetaAddresses{s}, + gceAlphaGlobalAddresses: &GCEAlphaGlobalAddresses{s}, + gceGlobalAddresses: &GCEGlobalAddresses{s}, + gceBackendServices: &GCEBackendServices{s}, + gceBetaBackendServices: &GCEBetaBackendServices{s}, + gceAlphaBackendServices: &GCEAlphaBackendServices{s}, + gceRegionBackendServices: &GCERegionBackendServices{s}, + gceAlphaRegionBackendServices: &GCEAlphaRegionBackendServices{s}, + gceBetaRegionBackendServices: &GCEBetaRegionBackendServices{s}, + gceDisks: &GCEDisks{s}, + gceRegionDisks: &GCERegionDisks{s}, + gceFirewalls: &GCEFirewalls{s}, + gceForwardingRules: &GCEForwardingRules{s}, + gceAlphaForwardingRules: &GCEAlphaForwardingRules{s}, + gceBetaForwardingRules: &GCEBetaForwardingRules{s}, + gceAlphaGlobalForwardingRules: &GCEAlphaGlobalForwardingRules{s}, + gceBetaGlobalForwardingRules: &GCEBetaGlobalForwardingRules{s}, + gceGlobalForwardingRules: &GCEGlobalForwardingRules{s}, + gceHealthChecks: &GCEHealthChecks{s}, + gceAlphaHealthChecks: &GCEAlphaHealthChecks{s}, + gceBetaHealthChecks: &GCEBetaHealthChecks{s}, + gceAlphaRegionHealthChecks: &GCEAlphaRegionHealthChecks{s}, + gceBetaRegionHealthChecks: &GCEBetaRegionHealthChecks{s}, + gceHttpHealthChecks: &GCEHttpHealthChecks{s}, + gceHttpsHealthChecks: &GCEHttpsHealthChecks{s}, + gceInstanceGroups: &GCEInstanceGroups{s}, + gceInstances: &GCEInstances{s}, + gceBetaInstances: &GCEBetaInstances{s}, + gceAlphaInstances: &GCEAlphaInstances{s}, + gceAlphaNetworks: &GCEAlphaNetworks{s}, + gceBetaNetworks: &GCEBetaNetworks{s}, + gceNetworks: &GCENetworks{s}, + gceAlphaNetworkEndpointGroups: &GCEAlphaNetworkEndpointGroups{s}, + gceBetaNetworkEndpointGroups: &GCEBetaNetworkEndpointGroups{s}, + gceNetworkEndpointGroups: &GCENetworkEndpointGroups{s}, + gceProjects: &GCEProjects{s}, + gceRegions: &GCERegions{s}, + gceRoutes: &GCERoutes{s}, + gceBetaSecurityPolicies: &GCEBetaSecurityPolicies{s}, + gceSslCertificates: &GCESslCertificates{s}, + gceBetaSslCertificates: &GCEBetaSslCertificates{s}, + gceAlphaSslCertificates: &GCEAlphaSslCertificates{s}, + gceAlphaRegionSslCertificates: &GCEAlphaRegionSslCertificates{s}, + gceBetaRegionSslCertificates: &GCEBetaRegionSslCertificates{s}, + gceAlphaSubnetworks: &GCEAlphaSubnetworks{s}, + gceBetaSubnetworks: &GCEBetaSubnetworks{s}, + gceSubnetworks: &GCESubnetworks{s}, + gceAlphaTargetHttpProxies: &GCEAlphaTargetHttpProxies{s}, + gceBetaTargetHttpProxies: &GCEBetaTargetHttpProxies{s}, + gceTargetHttpProxies: &GCETargetHttpProxies{s}, + gceAlphaRegionTargetHttpProxies: &GCEAlphaRegionTargetHttpProxies{s}, + gceBetaRegionTargetHttpProxies: &GCEBetaRegionTargetHttpProxies{s}, + gceTargetHttpsProxies: &GCETargetHttpsProxies{s}, + gceAlphaTargetHttpsProxies: &GCEAlphaTargetHttpsProxies{s}, + gceBetaTargetHttpsProxies: &GCEBetaTargetHttpsProxies{s}, + gceAlphaRegionTargetHttpsProxies: &GCEAlphaRegionTargetHttpsProxies{s}, + gceBetaRegionTargetHttpsProxies: &GCEBetaRegionTargetHttpsProxies{s}, + gceTargetPools: &GCETargetPools{s}, + gceAlphaUrlMaps: &GCEAlphaUrlMaps{s}, + gceBetaUrlMaps: &GCEBetaUrlMaps{s}, + gceUrlMaps: &GCEUrlMaps{s}, + gceAlphaRegionUrlMaps: &GCEAlphaRegionUrlMaps{s}, + gceBetaRegionUrlMaps: &GCEBetaRegionUrlMaps{s}, + gceZones: &GCEZones{s}, } return g } @@ -124,42 +184,72 @@ var _ Cloud = (*GCE)(nil) // GCE is the golang adapter for the compute APIs. type GCE struct { - gceAddresses *GCEAddresses - gceAlphaAddresses *GCEAlphaAddresses - gceBetaAddresses *GCEBetaAddresses - gceGlobalAddresses *GCEGlobalAddresses - gceBackendServices *GCEBackendServices - gceBetaBackendServices *GCEBetaBackendServices - gceAlphaBackendServices *GCEAlphaBackendServices - gceRegionBackendServices *GCERegionBackendServices - gceAlphaRegionBackendServices *GCEAlphaRegionBackendServices - gceDisks *GCEDisks - gceRegionDisks *GCERegionDisks - gceFirewalls *GCEFirewalls - gceForwardingRules *GCEForwardingRules - gceAlphaForwardingRules *GCEAlphaForwardingRules - gceGlobalForwardingRules *GCEGlobalForwardingRules - gceHealthChecks *GCEHealthChecks - gceAlphaHealthChecks *GCEAlphaHealthChecks - gceBetaHealthChecks *GCEBetaHealthChecks - gceHttpHealthChecks *GCEHttpHealthChecks - gceHttpsHealthChecks *GCEHttpsHealthChecks - gceInstanceGroups *GCEInstanceGroups - gceInstances *GCEInstances - gceBetaInstances *GCEBetaInstances - gceAlphaInstances *GCEAlphaInstances - gceAlphaNetworkEndpointGroups *GCEAlphaNetworkEndpointGroups - gceBetaNetworkEndpointGroups *GCEBetaNetworkEndpointGroups - gceProjects *GCEProjects - gceRegions *GCERegions - gceRoutes *GCERoutes - gceBetaSecurityPolicies *GCEBetaSecurityPolicies - gceSslCertificates *GCESslCertificates - gceTargetHttpProxies *GCETargetHttpProxies - gceTargetHttpsProxies *GCETargetHttpsProxies - gceTargetPools *GCETargetPools - gceUrlMaps *GCEUrlMaps - gceZones *GCEZones + gceAddresses *GCEAddresses + gceAlphaAddresses *GCEAlphaAddresses + gceBetaAddresses *GCEBetaAddresses + gceAlphaGlobalAddresses *GCEAlphaGlobalAddresses + gceGlobalAddresses *GCEGlobalAddresses + gceBackendServices *GCEBackendServices + gceBetaBackendServices *GCEBetaBackendServices + gceAlphaBackendServices *GCEAlphaBackendServices + gceRegionBackendServices *GCERegionBackendServices + gceAlphaRegionBackendServices *GCEAlphaRegionBackendServices + gceBetaRegionBackendServices *GCEBetaRegionBackendServices + gceDisks *GCEDisks + gceRegionDisks *GCERegionDisks + gceFirewalls *GCEFirewalls + gceForwardingRules *GCEForwardingRules + gceAlphaForwardingRules *GCEAlphaForwardingRules + gceBetaForwardingRules *GCEBetaForwardingRules + gceAlphaGlobalForwardingRules *GCEAlphaGlobalForwardingRules + gceBetaGlobalForwardingRules *GCEBetaGlobalForwardingRules + gceGlobalForwardingRules *GCEGlobalForwardingRules + gceHealthChecks *GCEHealthChecks + gceAlphaHealthChecks *GCEAlphaHealthChecks + gceBetaHealthChecks *GCEBetaHealthChecks + gceAlphaRegionHealthChecks *GCEAlphaRegionHealthChecks + gceBetaRegionHealthChecks *GCEBetaRegionHealthChecks + gceHttpHealthChecks *GCEHttpHealthChecks + gceHttpsHealthChecks *GCEHttpsHealthChecks + gceInstanceGroups *GCEInstanceGroups + gceInstances *GCEInstances + gceBetaInstances *GCEBetaInstances + gceAlphaInstances *GCEAlphaInstances + gceAlphaNetworks *GCEAlphaNetworks + gceBetaNetworks *GCEBetaNetworks + gceNetworks *GCENetworks + gceAlphaNetworkEndpointGroups *GCEAlphaNetworkEndpointGroups + gceBetaNetworkEndpointGroups *GCEBetaNetworkEndpointGroups + gceNetworkEndpointGroups *GCENetworkEndpointGroups + gceProjects *GCEProjects + gceRegions *GCERegions + gceRoutes *GCERoutes + gceBetaSecurityPolicies *GCEBetaSecurityPolicies + gceSslCertificates *GCESslCertificates + gceBetaSslCertificates *GCEBetaSslCertificates + gceAlphaSslCertificates *GCEAlphaSslCertificates + gceAlphaRegionSslCertificates *GCEAlphaRegionSslCertificates + gceBetaRegionSslCertificates *GCEBetaRegionSslCertificates + gceAlphaSubnetworks *GCEAlphaSubnetworks + gceBetaSubnetworks *GCEBetaSubnetworks + gceSubnetworks *GCESubnetworks + gceAlphaTargetHttpProxies *GCEAlphaTargetHttpProxies + gceBetaTargetHttpProxies *GCEBetaTargetHttpProxies + gceTargetHttpProxies *GCETargetHttpProxies + gceAlphaRegionTargetHttpProxies *GCEAlphaRegionTargetHttpProxies + gceBetaRegionTargetHttpProxies *GCEBetaRegionTargetHttpProxies + gceTargetHttpsProxies *GCETargetHttpsProxies + gceAlphaTargetHttpsProxies *GCEAlphaTargetHttpsProxies + gceBetaTargetHttpsProxies *GCEBetaTargetHttpsProxies + gceAlphaRegionTargetHttpsProxies *GCEAlphaRegionTargetHttpsProxies + gceBetaRegionTargetHttpsProxies *GCEBetaRegionTargetHttpsProxies + gceTargetPools *GCETargetPools + gceAlphaUrlMaps *GCEAlphaUrlMaps + gceBetaUrlMaps *GCEBetaUrlMaps + gceUrlMaps *GCEUrlMaps + gceAlphaRegionUrlMaps *GCEAlphaRegionUrlMaps + gceBetaRegionUrlMaps *GCEBetaRegionUrlMaps + gceZones *GCEZones } // Addresses returns the interface for the ga Addresses. @@ -177,6 +267,11 @@ func (gce *GCE) BetaAddresses() BetaAddresses { return gce.gceBetaAddresses } +// AlphaGlobalAddresses returns the interface for the alpha GlobalAddresses. +func (gce *GCE) AlphaGlobalAddresses() AlphaGlobalAddresses { + return gce.gceAlphaGlobalAddresses +} + // GlobalAddresses returns the interface for the ga GlobalAddresses. func (gce *GCE) GlobalAddresses() GlobalAddresses { return gce.gceGlobalAddresses @@ -207,6 +302,11 @@ func (gce *GCE) AlphaRegionBackendServices() AlphaRegionBackendServices { return gce.gceAlphaRegionBackendServices } +// BetaRegionBackendServices returns the interface for the beta RegionBackendServices. +func (gce *GCE) BetaRegionBackendServices() BetaRegionBackendServices { + return gce.gceBetaRegionBackendServices +} + // Disks returns the interface for the ga Disks. func (gce *GCE) Disks() Disks { return gce.gceDisks @@ -232,6 +332,21 @@ func (gce *GCE) AlphaForwardingRules() AlphaForwardingRules { return gce.gceAlphaForwardingRules } +// BetaForwardingRules returns the interface for the beta ForwardingRules. +func (gce *GCE) BetaForwardingRules() BetaForwardingRules { + return gce.gceBetaForwardingRules +} + +// AlphaGlobalForwardingRules returns the interface for the alpha GlobalForwardingRules. +func (gce *GCE) AlphaGlobalForwardingRules() AlphaGlobalForwardingRules { + return gce.gceAlphaGlobalForwardingRules +} + +// BetaGlobalForwardingRules returns the interface for the beta GlobalForwardingRules. +func (gce *GCE) BetaGlobalForwardingRules() BetaGlobalForwardingRules { + return gce.gceBetaGlobalForwardingRules +} + // GlobalForwardingRules returns the interface for the ga GlobalForwardingRules. func (gce *GCE) GlobalForwardingRules() GlobalForwardingRules { return gce.gceGlobalForwardingRules @@ -252,6 +367,16 @@ func (gce *GCE) BetaHealthChecks() BetaHealthChecks { return gce.gceBetaHealthChecks } +// AlphaRegionHealthChecks returns the interface for the alpha RegionHealthChecks. +func (gce *GCE) AlphaRegionHealthChecks() AlphaRegionHealthChecks { + return gce.gceAlphaRegionHealthChecks +} + +// BetaRegionHealthChecks returns the interface for the beta RegionHealthChecks. +func (gce *GCE) BetaRegionHealthChecks() BetaRegionHealthChecks { + return gce.gceBetaRegionHealthChecks +} + // HttpHealthChecks returns the interface for the ga HttpHealthChecks. func (gce *GCE) HttpHealthChecks() HttpHealthChecks { return gce.gceHttpHealthChecks @@ -282,6 +407,21 @@ func (gce *GCE) AlphaInstances() AlphaInstances { return gce.gceAlphaInstances } +// AlphaNetworks returns the interface for the alpha Networks. +func (gce *GCE) AlphaNetworks() AlphaNetworks { + return gce.gceAlphaNetworks +} + +// BetaNetworks returns the interface for the beta Networks. +func (gce *GCE) BetaNetworks() BetaNetworks { + return gce.gceBetaNetworks +} + +// Networks returns the interface for the ga Networks. +func (gce *GCE) Networks() Networks { + return gce.gceNetworks +} + // AlphaNetworkEndpointGroups returns the interface for the alpha NetworkEndpointGroups. func (gce *GCE) AlphaNetworkEndpointGroups() AlphaNetworkEndpointGroups { return gce.gceAlphaNetworkEndpointGroups @@ -292,6 +432,11 @@ func (gce *GCE) BetaNetworkEndpointGroups() BetaNetworkEndpointGroups { return gce.gceBetaNetworkEndpointGroups } +// NetworkEndpointGroups returns the interface for the ga NetworkEndpointGroups. +func (gce *GCE) NetworkEndpointGroups() NetworkEndpointGroups { + return gce.gceNetworkEndpointGroups +} + // Projects returns the interface for the ga Projects. func (gce *GCE) Projects() Projects { return gce.gceProjects @@ -317,26 +462,121 @@ func (gce *GCE) SslCertificates() SslCertificates { return gce.gceSslCertificates } +// BetaSslCertificates returns the interface for the beta SslCertificates. +func (gce *GCE) BetaSslCertificates() BetaSslCertificates { + return gce.gceBetaSslCertificates +} + +// AlphaSslCertificates returns the interface for the alpha SslCertificates. +func (gce *GCE) AlphaSslCertificates() AlphaSslCertificates { + return gce.gceAlphaSslCertificates +} + +// AlphaRegionSslCertificates returns the interface for the alpha RegionSslCertificates. +func (gce *GCE) AlphaRegionSslCertificates() AlphaRegionSslCertificates { + return gce.gceAlphaRegionSslCertificates +} + +// BetaRegionSslCertificates returns the interface for the beta RegionSslCertificates. +func (gce *GCE) BetaRegionSslCertificates() BetaRegionSslCertificates { + return gce.gceBetaRegionSslCertificates +} + +// AlphaSubnetworks returns the interface for the alpha Subnetworks. +func (gce *GCE) AlphaSubnetworks() AlphaSubnetworks { + return gce.gceAlphaSubnetworks +} + +// BetaSubnetworks returns the interface for the beta Subnetworks. +func (gce *GCE) BetaSubnetworks() BetaSubnetworks { + return gce.gceBetaSubnetworks +} + +// Subnetworks returns the interface for the ga Subnetworks. +func (gce *GCE) Subnetworks() Subnetworks { + return gce.gceSubnetworks +} + +// AlphaTargetHttpProxies returns the interface for the alpha TargetHttpProxies. +func (gce *GCE) AlphaTargetHttpProxies() AlphaTargetHttpProxies { + return gce.gceAlphaTargetHttpProxies +} + +// BetaTargetHttpProxies returns the interface for the beta TargetHttpProxies. +func (gce *GCE) BetaTargetHttpProxies() BetaTargetHttpProxies { + return gce.gceBetaTargetHttpProxies +} + // TargetHttpProxies returns the interface for the ga TargetHttpProxies. func (gce *GCE) TargetHttpProxies() TargetHttpProxies { return gce.gceTargetHttpProxies } +// AlphaRegionTargetHttpProxies returns the interface for the alpha RegionTargetHttpProxies. +func (gce *GCE) AlphaRegionTargetHttpProxies() AlphaRegionTargetHttpProxies { + return gce.gceAlphaRegionTargetHttpProxies +} + +// BetaRegionTargetHttpProxies returns the interface for the beta RegionTargetHttpProxies. +func (gce *GCE) BetaRegionTargetHttpProxies() BetaRegionTargetHttpProxies { + return gce.gceBetaRegionTargetHttpProxies +} + // TargetHttpsProxies returns the interface for the ga TargetHttpsProxies. func (gce *GCE) TargetHttpsProxies() TargetHttpsProxies { return gce.gceTargetHttpsProxies } +// AlphaTargetHttpsProxies returns the interface for the alpha TargetHttpsProxies. +func (gce *GCE) AlphaTargetHttpsProxies() AlphaTargetHttpsProxies { + return gce.gceAlphaTargetHttpsProxies +} + +// BetaTargetHttpsProxies returns the interface for the beta TargetHttpsProxies. +func (gce *GCE) BetaTargetHttpsProxies() BetaTargetHttpsProxies { + return gce.gceBetaTargetHttpsProxies +} + +// AlphaRegionTargetHttpsProxies returns the interface for the alpha RegionTargetHttpsProxies. +func (gce *GCE) AlphaRegionTargetHttpsProxies() AlphaRegionTargetHttpsProxies { + return gce.gceAlphaRegionTargetHttpsProxies +} + +// BetaRegionTargetHttpsProxies returns the interface for the beta RegionTargetHttpsProxies. +func (gce *GCE) BetaRegionTargetHttpsProxies() BetaRegionTargetHttpsProxies { + return gce.gceBetaRegionTargetHttpsProxies +} + // TargetPools returns the interface for the ga TargetPools. func (gce *GCE) TargetPools() TargetPools { return gce.gceTargetPools } +// AlphaUrlMaps returns the interface for the alpha UrlMaps. +func (gce *GCE) AlphaUrlMaps() AlphaUrlMaps { + return gce.gceAlphaUrlMaps +} + +// BetaUrlMaps returns the interface for the beta UrlMaps. +func (gce *GCE) BetaUrlMaps() BetaUrlMaps { + return gce.gceBetaUrlMaps +} + // UrlMaps returns the interface for the ga UrlMaps. func (gce *GCE) UrlMaps() UrlMaps { return gce.gceUrlMaps } +// AlphaRegionUrlMaps returns the interface for the alpha RegionUrlMaps. +func (gce *GCE) AlphaRegionUrlMaps() AlphaRegionUrlMaps { + return gce.gceAlphaRegionUrlMaps +} + +// BetaRegionUrlMaps returns the interface for the beta RegionUrlMaps. +func (gce *GCE) BetaRegionUrlMaps() BetaRegionUrlMaps { + return gce.gceBetaRegionUrlMaps +} + // Zones returns the interface for the ga Zones. func (gce *GCE) Zones() Zones { return gce.gceZones @@ -357,13 +597,20 @@ func NewMockGCE(projectRouter ProjectRouter) *MockGCE { mockInstanceGroupsObjs := map[meta.Key]*MockInstanceGroupsObj{} mockInstancesObjs := map[meta.Key]*MockInstancesObj{} mockNetworkEndpointGroupsObjs := map[meta.Key]*MockNetworkEndpointGroupsObj{} + mockNetworksObjs := map[meta.Key]*MockNetworksObj{} mockProjectsObjs := map[meta.Key]*MockProjectsObj{} mockRegionBackendServicesObjs := map[meta.Key]*MockRegionBackendServicesObj{} mockRegionDisksObjs := map[meta.Key]*MockRegionDisksObj{} + mockRegionHealthChecksObjs := map[meta.Key]*MockRegionHealthChecksObj{} + mockRegionSslCertificatesObjs := map[meta.Key]*MockRegionSslCertificatesObj{} + mockRegionTargetHttpProxiesObjs := map[meta.Key]*MockRegionTargetHttpProxiesObj{} + mockRegionTargetHttpsProxiesObjs := map[meta.Key]*MockRegionTargetHttpsProxiesObj{} + mockRegionUrlMapsObjs := map[meta.Key]*MockRegionUrlMapsObj{} mockRegionsObjs := map[meta.Key]*MockRegionsObj{} mockRoutesObjs := map[meta.Key]*MockRoutesObj{} mockSecurityPoliciesObjs := map[meta.Key]*MockSecurityPoliciesObj{} mockSslCertificatesObjs := map[meta.Key]*MockSslCertificatesObj{} + mockSubnetworksObjs := map[meta.Key]*MockSubnetworksObj{} mockTargetHttpProxiesObjs := map[meta.Key]*MockTargetHttpProxiesObj{} mockTargetHttpsProxiesObjs := map[meta.Key]*MockTargetHttpsProxiesObj{} mockTargetPoolsObjs := map[meta.Key]*MockTargetPoolsObj{} @@ -371,42 +618,72 @@ func NewMockGCE(projectRouter ProjectRouter) *MockGCE { mockZonesObjs := map[meta.Key]*MockZonesObj{} mock := &MockGCE{ - MockAddresses: NewMockAddresses(projectRouter, mockAddressesObjs), - MockAlphaAddresses: NewMockAlphaAddresses(projectRouter, mockAddressesObjs), - MockBetaAddresses: NewMockBetaAddresses(projectRouter, mockAddressesObjs), - MockGlobalAddresses: NewMockGlobalAddresses(projectRouter, mockGlobalAddressesObjs), - MockBackendServices: NewMockBackendServices(projectRouter, mockBackendServicesObjs), - MockBetaBackendServices: NewMockBetaBackendServices(projectRouter, mockBackendServicesObjs), - MockAlphaBackendServices: NewMockAlphaBackendServices(projectRouter, mockBackendServicesObjs), - MockRegionBackendServices: NewMockRegionBackendServices(projectRouter, mockRegionBackendServicesObjs), - MockAlphaRegionBackendServices: NewMockAlphaRegionBackendServices(projectRouter, mockRegionBackendServicesObjs), - MockDisks: NewMockDisks(projectRouter, mockDisksObjs), - MockRegionDisks: NewMockRegionDisks(projectRouter, mockRegionDisksObjs), - MockFirewalls: NewMockFirewalls(projectRouter, mockFirewallsObjs), - MockForwardingRules: NewMockForwardingRules(projectRouter, mockForwardingRulesObjs), - MockAlphaForwardingRules: NewMockAlphaForwardingRules(projectRouter, mockForwardingRulesObjs), - MockGlobalForwardingRules: NewMockGlobalForwardingRules(projectRouter, mockGlobalForwardingRulesObjs), - MockHealthChecks: NewMockHealthChecks(projectRouter, mockHealthChecksObjs), - MockAlphaHealthChecks: NewMockAlphaHealthChecks(projectRouter, mockHealthChecksObjs), - MockBetaHealthChecks: NewMockBetaHealthChecks(projectRouter, mockHealthChecksObjs), - MockHttpHealthChecks: NewMockHttpHealthChecks(projectRouter, mockHttpHealthChecksObjs), - MockHttpsHealthChecks: NewMockHttpsHealthChecks(projectRouter, mockHttpsHealthChecksObjs), - MockInstanceGroups: NewMockInstanceGroups(projectRouter, mockInstanceGroupsObjs), - MockInstances: NewMockInstances(projectRouter, mockInstancesObjs), - MockBetaInstances: NewMockBetaInstances(projectRouter, mockInstancesObjs), - MockAlphaInstances: NewMockAlphaInstances(projectRouter, mockInstancesObjs), - MockAlphaNetworkEndpointGroups: NewMockAlphaNetworkEndpointGroups(projectRouter, mockNetworkEndpointGroupsObjs), - MockBetaNetworkEndpointGroups: NewMockBetaNetworkEndpointGroups(projectRouter, mockNetworkEndpointGroupsObjs), - MockProjects: NewMockProjects(projectRouter, mockProjectsObjs), - MockRegions: NewMockRegions(projectRouter, mockRegionsObjs), - MockRoutes: NewMockRoutes(projectRouter, mockRoutesObjs), - MockBetaSecurityPolicies: NewMockBetaSecurityPolicies(projectRouter, mockSecurityPoliciesObjs), - MockSslCertificates: NewMockSslCertificates(projectRouter, mockSslCertificatesObjs), - MockTargetHttpProxies: NewMockTargetHttpProxies(projectRouter, mockTargetHttpProxiesObjs), - MockTargetHttpsProxies: NewMockTargetHttpsProxies(projectRouter, mockTargetHttpsProxiesObjs), - MockTargetPools: NewMockTargetPools(projectRouter, mockTargetPoolsObjs), - MockUrlMaps: NewMockUrlMaps(projectRouter, mockUrlMapsObjs), - MockZones: NewMockZones(projectRouter, mockZonesObjs), + MockAddresses: NewMockAddresses(projectRouter, mockAddressesObjs), + MockAlphaAddresses: NewMockAlphaAddresses(projectRouter, mockAddressesObjs), + MockBetaAddresses: NewMockBetaAddresses(projectRouter, mockAddressesObjs), + MockAlphaGlobalAddresses: NewMockAlphaGlobalAddresses(projectRouter, mockGlobalAddressesObjs), + MockGlobalAddresses: NewMockGlobalAddresses(projectRouter, mockGlobalAddressesObjs), + MockBackendServices: NewMockBackendServices(projectRouter, mockBackendServicesObjs), + MockBetaBackendServices: NewMockBetaBackendServices(projectRouter, mockBackendServicesObjs), + MockAlphaBackendServices: NewMockAlphaBackendServices(projectRouter, mockBackendServicesObjs), + MockRegionBackendServices: NewMockRegionBackendServices(projectRouter, mockRegionBackendServicesObjs), + MockAlphaRegionBackendServices: NewMockAlphaRegionBackendServices(projectRouter, mockRegionBackendServicesObjs), + MockBetaRegionBackendServices: NewMockBetaRegionBackendServices(projectRouter, mockRegionBackendServicesObjs), + MockDisks: NewMockDisks(projectRouter, mockDisksObjs), + MockRegionDisks: NewMockRegionDisks(projectRouter, mockRegionDisksObjs), + MockFirewalls: NewMockFirewalls(projectRouter, mockFirewallsObjs), + MockForwardingRules: NewMockForwardingRules(projectRouter, mockForwardingRulesObjs), + MockAlphaForwardingRules: NewMockAlphaForwardingRules(projectRouter, mockForwardingRulesObjs), + MockBetaForwardingRules: NewMockBetaForwardingRules(projectRouter, mockForwardingRulesObjs), + MockAlphaGlobalForwardingRules: NewMockAlphaGlobalForwardingRules(projectRouter, mockGlobalForwardingRulesObjs), + MockBetaGlobalForwardingRules: NewMockBetaGlobalForwardingRules(projectRouter, mockGlobalForwardingRulesObjs), + MockGlobalForwardingRules: NewMockGlobalForwardingRules(projectRouter, mockGlobalForwardingRulesObjs), + MockHealthChecks: NewMockHealthChecks(projectRouter, mockHealthChecksObjs), + MockAlphaHealthChecks: NewMockAlphaHealthChecks(projectRouter, mockHealthChecksObjs), + MockBetaHealthChecks: NewMockBetaHealthChecks(projectRouter, mockHealthChecksObjs), + MockAlphaRegionHealthChecks: NewMockAlphaRegionHealthChecks(projectRouter, mockRegionHealthChecksObjs), + MockBetaRegionHealthChecks: NewMockBetaRegionHealthChecks(projectRouter, mockRegionHealthChecksObjs), + MockHttpHealthChecks: NewMockHttpHealthChecks(projectRouter, mockHttpHealthChecksObjs), + MockHttpsHealthChecks: NewMockHttpsHealthChecks(projectRouter, mockHttpsHealthChecksObjs), + MockInstanceGroups: NewMockInstanceGroups(projectRouter, mockInstanceGroupsObjs), + MockInstances: NewMockInstances(projectRouter, mockInstancesObjs), + MockBetaInstances: NewMockBetaInstances(projectRouter, mockInstancesObjs), + MockAlphaInstances: NewMockAlphaInstances(projectRouter, mockInstancesObjs), + MockAlphaNetworks: NewMockAlphaNetworks(projectRouter, mockNetworksObjs), + MockBetaNetworks: NewMockBetaNetworks(projectRouter, mockNetworksObjs), + MockNetworks: NewMockNetworks(projectRouter, mockNetworksObjs), + MockAlphaNetworkEndpointGroups: NewMockAlphaNetworkEndpointGroups(projectRouter, mockNetworkEndpointGroupsObjs), + MockBetaNetworkEndpointGroups: NewMockBetaNetworkEndpointGroups(projectRouter, mockNetworkEndpointGroupsObjs), + MockNetworkEndpointGroups: NewMockNetworkEndpointGroups(projectRouter, mockNetworkEndpointGroupsObjs), + MockProjects: NewMockProjects(projectRouter, mockProjectsObjs), + MockRegions: NewMockRegions(projectRouter, mockRegionsObjs), + MockRoutes: NewMockRoutes(projectRouter, mockRoutesObjs), + MockBetaSecurityPolicies: NewMockBetaSecurityPolicies(projectRouter, mockSecurityPoliciesObjs), + MockSslCertificates: NewMockSslCertificates(projectRouter, mockSslCertificatesObjs), + MockBetaSslCertificates: NewMockBetaSslCertificates(projectRouter, mockSslCertificatesObjs), + MockAlphaSslCertificates: NewMockAlphaSslCertificates(projectRouter, mockSslCertificatesObjs), + MockAlphaRegionSslCertificates: NewMockAlphaRegionSslCertificates(projectRouter, mockRegionSslCertificatesObjs), + MockBetaRegionSslCertificates: NewMockBetaRegionSslCertificates(projectRouter, mockRegionSslCertificatesObjs), + MockAlphaSubnetworks: NewMockAlphaSubnetworks(projectRouter, mockSubnetworksObjs), + MockBetaSubnetworks: NewMockBetaSubnetworks(projectRouter, mockSubnetworksObjs), + MockSubnetworks: NewMockSubnetworks(projectRouter, mockSubnetworksObjs), + MockAlphaTargetHttpProxies: NewMockAlphaTargetHttpProxies(projectRouter, mockTargetHttpProxiesObjs), + MockBetaTargetHttpProxies: NewMockBetaTargetHttpProxies(projectRouter, mockTargetHttpProxiesObjs), + MockTargetHttpProxies: NewMockTargetHttpProxies(projectRouter, mockTargetHttpProxiesObjs), + MockAlphaRegionTargetHttpProxies: NewMockAlphaRegionTargetHttpProxies(projectRouter, mockRegionTargetHttpProxiesObjs), + MockBetaRegionTargetHttpProxies: NewMockBetaRegionTargetHttpProxies(projectRouter, mockRegionTargetHttpProxiesObjs), + MockTargetHttpsProxies: NewMockTargetHttpsProxies(projectRouter, mockTargetHttpsProxiesObjs), + MockAlphaTargetHttpsProxies: NewMockAlphaTargetHttpsProxies(projectRouter, mockTargetHttpsProxiesObjs), + MockBetaTargetHttpsProxies: NewMockBetaTargetHttpsProxies(projectRouter, mockTargetHttpsProxiesObjs), + MockAlphaRegionTargetHttpsProxies: NewMockAlphaRegionTargetHttpsProxies(projectRouter, mockRegionTargetHttpsProxiesObjs), + MockBetaRegionTargetHttpsProxies: NewMockBetaRegionTargetHttpsProxies(projectRouter, mockRegionTargetHttpsProxiesObjs), + MockTargetPools: NewMockTargetPools(projectRouter, mockTargetPoolsObjs), + MockAlphaUrlMaps: NewMockAlphaUrlMaps(projectRouter, mockUrlMapsObjs), + MockBetaUrlMaps: NewMockBetaUrlMaps(projectRouter, mockUrlMapsObjs), + MockUrlMaps: NewMockUrlMaps(projectRouter, mockUrlMapsObjs), + MockAlphaRegionUrlMaps: NewMockAlphaRegionUrlMaps(projectRouter, mockRegionUrlMapsObjs), + MockBetaRegionUrlMaps: NewMockBetaRegionUrlMaps(projectRouter, mockRegionUrlMapsObjs), + MockZones: NewMockZones(projectRouter, mockZonesObjs), } return mock } @@ -416,42 +693,72 @@ var _ Cloud = (*MockGCE)(nil) // MockGCE is the mock for the compute API. type MockGCE struct { - MockAddresses *MockAddresses - MockAlphaAddresses *MockAlphaAddresses - MockBetaAddresses *MockBetaAddresses - MockGlobalAddresses *MockGlobalAddresses - MockBackendServices *MockBackendServices - MockBetaBackendServices *MockBetaBackendServices - MockAlphaBackendServices *MockAlphaBackendServices - MockRegionBackendServices *MockRegionBackendServices - MockAlphaRegionBackendServices *MockAlphaRegionBackendServices - MockDisks *MockDisks - MockRegionDisks *MockRegionDisks - MockFirewalls *MockFirewalls - MockForwardingRules *MockForwardingRules - MockAlphaForwardingRules *MockAlphaForwardingRules - MockGlobalForwardingRules *MockGlobalForwardingRules - MockHealthChecks *MockHealthChecks - MockAlphaHealthChecks *MockAlphaHealthChecks - MockBetaHealthChecks *MockBetaHealthChecks - MockHttpHealthChecks *MockHttpHealthChecks - MockHttpsHealthChecks *MockHttpsHealthChecks - MockInstanceGroups *MockInstanceGroups - MockInstances *MockInstances - MockBetaInstances *MockBetaInstances - MockAlphaInstances *MockAlphaInstances - MockAlphaNetworkEndpointGroups *MockAlphaNetworkEndpointGroups - MockBetaNetworkEndpointGroups *MockBetaNetworkEndpointGroups - MockProjects *MockProjects - MockRegions *MockRegions - MockRoutes *MockRoutes - MockBetaSecurityPolicies *MockBetaSecurityPolicies - MockSslCertificates *MockSslCertificates - MockTargetHttpProxies *MockTargetHttpProxies - MockTargetHttpsProxies *MockTargetHttpsProxies - MockTargetPools *MockTargetPools - MockUrlMaps *MockUrlMaps - MockZones *MockZones + MockAddresses *MockAddresses + MockAlphaAddresses *MockAlphaAddresses + MockBetaAddresses *MockBetaAddresses + MockAlphaGlobalAddresses *MockAlphaGlobalAddresses + MockGlobalAddresses *MockGlobalAddresses + MockBackendServices *MockBackendServices + MockBetaBackendServices *MockBetaBackendServices + MockAlphaBackendServices *MockAlphaBackendServices + MockRegionBackendServices *MockRegionBackendServices + MockAlphaRegionBackendServices *MockAlphaRegionBackendServices + MockBetaRegionBackendServices *MockBetaRegionBackendServices + MockDisks *MockDisks + MockRegionDisks *MockRegionDisks + MockFirewalls *MockFirewalls + MockForwardingRules *MockForwardingRules + MockAlphaForwardingRules *MockAlphaForwardingRules + MockBetaForwardingRules *MockBetaForwardingRules + MockAlphaGlobalForwardingRules *MockAlphaGlobalForwardingRules + MockBetaGlobalForwardingRules *MockBetaGlobalForwardingRules + MockGlobalForwardingRules *MockGlobalForwardingRules + MockHealthChecks *MockHealthChecks + MockAlphaHealthChecks *MockAlphaHealthChecks + MockBetaHealthChecks *MockBetaHealthChecks + MockAlphaRegionHealthChecks *MockAlphaRegionHealthChecks + MockBetaRegionHealthChecks *MockBetaRegionHealthChecks + MockHttpHealthChecks *MockHttpHealthChecks + MockHttpsHealthChecks *MockHttpsHealthChecks + MockInstanceGroups *MockInstanceGroups + MockInstances *MockInstances + MockBetaInstances *MockBetaInstances + MockAlphaInstances *MockAlphaInstances + MockAlphaNetworks *MockAlphaNetworks + MockBetaNetworks *MockBetaNetworks + MockNetworks *MockNetworks + MockAlphaNetworkEndpointGroups *MockAlphaNetworkEndpointGroups + MockBetaNetworkEndpointGroups *MockBetaNetworkEndpointGroups + MockNetworkEndpointGroups *MockNetworkEndpointGroups + MockProjects *MockProjects + MockRegions *MockRegions + MockRoutes *MockRoutes + MockBetaSecurityPolicies *MockBetaSecurityPolicies + MockSslCertificates *MockSslCertificates + MockBetaSslCertificates *MockBetaSslCertificates + MockAlphaSslCertificates *MockAlphaSslCertificates + MockAlphaRegionSslCertificates *MockAlphaRegionSslCertificates + MockBetaRegionSslCertificates *MockBetaRegionSslCertificates + MockAlphaSubnetworks *MockAlphaSubnetworks + MockBetaSubnetworks *MockBetaSubnetworks + MockSubnetworks *MockSubnetworks + MockAlphaTargetHttpProxies *MockAlphaTargetHttpProxies + MockBetaTargetHttpProxies *MockBetaTargetHttpProxies + MockTargetHttpProxies *MockTargetHttpProxies + MockAlphaRegionTargetHttpProxies *MockAlphaRegionTargetHttpProxies + MockBetaRegionTargetHttpProxies *MockBetaRegionTargetHttpProxies + MockTargetHttpsProxies *MockTargetHttpsProxies + MockAlphaTargetHttpsProxies *MockAlphaTargetHttpsProxies + MockBetaTargetHttpsProxies *MockBetaTargetHttpsProxies + MockAlphaRegionTargetHttpsProxies *MockAlphaRegionTargetHttpsProxies + MockBetaRegionTargetHttpsProxies *MockBetaRegionTargetHttpsProxies + MockTargetPools *MockTargetPools + MockAlphaUrlMaps *MockAlphaUrlMaps + MockBetaUrlMaps *MockBetaUrlMaps + MockUrlMaps *MockUrlMaps + MockAlphaRegionUrlMaps *MockAlphaRegionUrlMaps + MockBetaRegionUrlMaps *MockBetaRegionUrlMaps + MockZones *MockZones } // Addresses returns the interface for the ga Addresses. @@ -469,6 +776,11 @@ func (mock *MockGCE) BetaAddresses() BetaAddresses { return mock.MockBetaAddresses } +// AlphaGlobalAddresses returns the interface for the alpha GlobalAddresses. +func (mock *MockGCE) AlphaGlobalAddresses() AlphaGlobalAddresses { + return mock.MockAlphaGlobalAddresses +} + // GlobalAddresses returns the interface for the ga GlobalAddresses. func (mock *MockGCE) GlobalAddresses() GlobalAddresses { return mock.MockGlobalAddresses @@ -499,6 +811,11 @@ func (mock *MockGCE) AlphaRegionBackendServices() AlphaRegionBackendServices { return mock.MockAlphaRegionBackendServices } +// BetaRegionBackendServices returns the interface for the beta RegionBackendServices. +func (mock *MockGCE) BetaRegionBackendServices() BetaRegionBackendServices { + return mock.MockBetaRegionBackendServices +} + // Disks returns the interface for the ga Disks. func (mock *MockGCE) Disks() Disks { return mock.MockDisks @@ -524,6 +841,21 @@ func (mock *MockGCE) AlphaForwardingRules() AlphaForwardingRules { return mock.MockAlphaForwardingRules } +// BetaForwardingRules returns the interface for the beta ForwardingRules. +func (mock *MockGCE) BetaForwardingRules() BetaForwardingRules { + return mock.MockBetaForwardingRules +} + +// AlphaGlobalForwardingRules returns the interface for the alpha GlobalForwardingRules. +func (mock *MockGCE) AlphaGlobalForwardingRules() AlphaGlobalForwardingRules { + return mock.MockAlphaGlobalForwardingRules +} + +// BetaGlobalForwardingRules returns the interface for the beta GlobalForwardingRules. +func (mock *MockGCE) BetaGlobalForwardingRules() BetaGlobalForwardingRules { + return mock.MockBetaGlobalForwardingRules +} + // GlobalForwardingRules returns the interface for the ga GlobalForwardingRules. func (mock *MockGCE) GlobalForwardingRules() GlobalForwardingRules { return mock.MockGlobalForwardingRules @@ -544,6 +876,16 @@ func (mock *MockGCE) BetaHealthChecks() BetaHealthChecks { return mock.MockBetaHealthChecks } +// AlphaRegionHealthChecks returns the interface for the alpha RegionHealthChecks. +func (mock *MockGCE) AlphaRegionHealthChecks() AlphaRegionHealthChecks { + return mock.MockAlphaRegionHealthChecks +} + +// BetaRegionHealthChecks returns the interface for the beta RegionHealthChecks. +func (mock *MockGCE) BetaRegionHealthChecks() BetaRegionHealthChecks { + return mock.MockBetaRegionHealthChecks +} + // HttpHealthChecks returns the interface for the ga HttpHealthChecks. func (mock *MockGCE) HttpHealthChecks() HttpHealthChecks { return mock.MockHttpHealthChecks @@ -574,6 +916,21 @@ func (mock *MockGCE) AlphaInstances() AlphaInstances { return mock.MockAlphaInstances } +// AlphaNetworks returns the interface for the alpha Networks. +func (mock *MockGCE) AlphaNetworks() AlphaNetworks { + return mock.MockAlphaNetworks +} + +// BetaNetworks returns the interface for the beta Networks. +func (mock *MockGCE) BetaNetworks() BetaNetworks { + return mock.MockBetaNetworks +} + +// Networks returns the interface for the ga Networks. +func (mock *MockGCE) Networks() Networks { + return mock.MockNetworks +} + // AlphaNetworkEndpointGroups returns the interface for the alpha NetworkEndpointGroups. func (mock *MockGCE) AlphaNetworkEndpointGroups() AlphaNetworkEndpointGroups { return mock.MockAlphaNetworkEndpointGroups @@ -584,6 +941,11 @@ func (mock *MockGCE) BetaNetworkEndpointGroups() BetaNetworkEndpointGroups { return mock.MockBetaNetworkEndpointGroups } +// NetworkEndpointGroups returns the interface for the ga NetworkEndpointGroups. +func (mock *MockGCE) NetworkEndpointGroups() NetworkEndpointGroups { + return mock.MockNetworkEndpointGroups +} + // Projects returns the interface for the ga Projects. func (mock *MockGCE) Projects() Projects { return mock.MockProjects @@ -609,26 +971,121 @@ func (mock *MockGCE) SslCertificates() SslCertificates { return mock.MockSslCertificates } +// BetaSslCertificates returns the interface for the beta SslCertificates. +func (mock *MockGCE) BetaSslCertificates() BetaSslCertificates { + return mock.MockBetaSslCertificates +} + +// AlphaSslCertificates returns the interface for the alpha SslCertificates. +func (mock *MockGCE) AlphaSslCertificates() AlphaSslCertificates { + return mock.MockAlphaSslCertificates +} + +// AlphaRegionSslCertificates returns the interface for the alpha RegionSslCertificates. +func (mock *MockGCE) AlphaRegionSslCertificates() AlphaRegionSslCertificates { + return mock.MockAlphaRegionSslCertificates +} + +// BetaRegionSslCertificates returns the interface for the beta RegionSslCertificates. +func (mock *MockGCE) BetaRegionSslCertificates() BetaRegionSslCertificates { + return mock.MockBetaRegionSslCertificates +} + +// AlphaSubnetworks returns the interface for the alpha Subnetworks. +func (mock *MockGCE) AlphaSubnetworks() AlphaSubnetworks { + return mock.MockAlphaSubnetworks +} + +// BetaSubnetworks returns the interface for the beta Subnetworks. +func (mock *MockGCE) BetaSubnetworks() BetaSubnetworks { + return mock.MockBetaSubnetworks +} + +// Subnetworks returns the interface for the ga Subnetworks. +func (mock *MockGCE) Subnetworks() Subnetworks { + return mock.MockSubnetworks +} + +// AlphaTargetHttpProxies returns the interface for the alpha TargetHttpProxies. +func (mock *MockGCE) AlphaTargetHttpProxies() AlphaTargetHttpProxies { + return mock.MockAlphaTargetHttpProxies +} + +// BetaTargetHttpProxies returns the interface for the beta TargetHttpProxies. +func (mock *MockGCE) BetaTargetHttpProxies() BetaTargetHttpProxies { + return mock.MockBetaTargetHttpProxies +} + // TargetHttpProxies returns the interface for the ga TargetHttpProxies. func (mock *MockGCE) TargetHttpProxies() TargetHttpProxies { return mock.MockTargetHttpProxies } +// AlphaRegionTargetHttpProxies returns the interface for the alpha RegionTargetHttpProxies. +func (mock *MockGCE) AlphaRegionTargetHttpProxies() AlphaRegionTargetHttpProxies { + return mock.MockAlphaRegionTargetHttpProxies +} + +// BetaRegionTargetHttpProxies returns the interface for the beta RegionTargetHttpProxies. +func (mock *MockGCE) BetaRegionTargetHttpProxies() BetaRegionTargetHttpProxies { + return mock.MockBetaRegionTargetHttpProxies +} + // TargetHttpsProxies returns the interface for the ga TargetHttpsProxies. func (mock *MockGCE) TargetHttpsProxies() TargetHttpsProxies { return mock.MockTargetHttpsProxies } +// AlphaTargetHttpsProxies returns the interface for the alpha TargetHttpsProxies. +func (mock *MockGCE) AlphaTargetHttpsProxies() AlphaTargetHttpsProxies { + return mock.MockAlphaTargetHttpsProxies +} + +// BetaTargetHttpsProxies returns the interface for the beta TargetHttpsProxies. +func (mock *MockGCE) BetaTargetHttpsProxies() BetaTargetHttpsProxies { + return mock.MockBetaTargetHttpsProxies +} + +// AlphaRegionTargetHttpsProxies returns the interface for the alpha RegionTargetHttpsProxies. +func (mock *MockGCE) AlphaRegionTargetHttpsProxies() AlphaRegionTargetHttpsProxies { + return mock.MockAlphaRegionTargetHttpsProxies +} + +// BetaRegionTargetHttpsProxies returns the interface for the beta RegionTargetHttpsProxies. +func (mock *MockGCE) BetaRegionTargetHttpsProxies() BetaRegionTargetHttpsProxies { + return mock.MockBetaRegionTargetHttpsProxies +} + // TargetPools returns the interface for the ga TargetPools. func (mock *MockGCE) TargetPools() TargetPools { return mock.MockTargetPools } +// AlphaUrlMaps returns the interface for the alpha UrlMaps. +func (mock *MockGCE) AlphaUrlMaps() AlphaUrlMaps { + return mock.MockAlphaUrlMaps +} + +// BetaUrlMaps returns the interface for the beta UrlMaps. +func (mock *MockGCE) BetaUrlMaps() BetaUrlMaps { + return mock.MockBetaUrlMaps +} + // UrlMaps returns the interface for the ga UrlMaps. func (mock *MockGCE) UrlMaps() UrlMaps { return mock.MockUrlMaps } +// AlphaRegionUrlMaps returns the interface for the alpha RegionUrlMaps. +func (mock *MockGCE) AlphaRegionUrlMaps() AlphaRegionUrlMaps { + return mock.MockAlphaRegionUrlMaps +} + +// BetaRegionUrlMaps returns the interface for the beta RegionUrlMaps. +func (mock *MockGCE) BetaRegionUrlMaps() BetaRegionUrlMaps { + return mock.MockBetaRegionUrlMaps +} + // Zones returns the interface for the ga Zones. func (mock *MockGCE) Zones() Zones { return mock.MockZones @@ -786,6 +1243,19 @@ func (m *MockForwardingRulesObj) ToAlpha() *alpha.ForwardingRule { return ret } +// ToBeta retrieves the given version of the object. +func (m *MockForwardingRulesObj) ToBeta() *beta.ForwardingRule { + if ret, ok := m.Obj.(*beta.ForwardingRule); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &beta.ForwardingRule{} + if err := copyViaJSON(ret, m.Obj); err != nil { + klog.Errorf("Could not convert %T to *beta.ForwardingRule via JSON: %v", m.Obj, err) + } + return ret +} + // ToGA retrieves the given version of the object. func (m *MockForwardingRulesObj) ToGA() *ga.ForwardingRule { if ret, ok := m.Obj.(*ga.ForwardingRule); ok { @@ -806,6 +1276,19 @@ type MockGlobalAddressesObj struct { Obj interface{} } +// ToAlpha retrieves the given version of the object. +func (m *MockGlobalAddressesObj) ToAlpha() *alpha.Address { + if ret, ok := m.Obj.(*alpha.Address); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &alpha.Address{} + if err := copyViaJSON(ret, m.Obj); err != nil { + klog.Errorf("Could not convert %T to *alpha.Address via JSON: %v", m.Obj, err) + } + return ret +} + // ToGA retrieves the given version of the object. func (m *MockGlobalAddressesObj) ToGA() *ga.Address { if ret, ok := m.Obj.(*ga.Address); ok { @@ -826,6 +1309,32 @@ type MockGlobalForwardingRulesObj struct { Obj interface{} } +// ToAlpha retrieves the given version of the object. +func (m *MockGlobalForwardingRulesObj) ToAlpha() *alpha.ForwardingRule { + if ret, ok := m.Obj.(*alpha.ForwardingRule); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &alpha.ForwardingRule{} + if err := copyViaJSON(ret, m.Obj); err != nil { + klog.Errorf("Could not convert %T to *alpha.ForwardingRule via JSON: %v", m.Obj, err) + } + return ret +} + +// ToBeta retrieves the given version of the object. +func (m *MockGlobalForwardingRulesObj) ToBeta() *beta.ForwardingRule { + if ret, ok := m.Obj.(*beta.ForwardingRule); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &beta.ForwardingRule{} + if err := copyViaJSON(ret, m.Obj); err != nil { + klog.Errorf("Could not convert %T to *beta.ForwardingRule via JSON: %v", m.Obj, err) + } + return ret +} + // ToGA retrieves the given version of the object. func (m *MockGlobalForwardingRulesObj) ToGA() *ga.ForwardingRule { if ret, ok := m.Obj.(*ga.ForwardingRule); ok { @@ -1024,6 +1533,65 @@ func (m *MockNetworkEndpointGroupsObj) ToBeta() *beta.NetworkEndpointGroup { return ret } +// ToGA retrieves the given version of the object. +func (m *MockNetworkEndpointGroupsObj) ToGA() *ga.NetworkEndpointGroup { + if ret, ok := m.Obj.(*ga.NetworkEndpointGroup); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &ga.NetworkEndpointGroup{} + if err := copyViaJSON(ret, m.Obj); err != nil { + klog.Errorf("Could not convert %T to *ga.NetworkEndpointGroup via JSON: %v", m.Obj, err) + } + return ret +} + +// MockNetworksObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockNetworksObj struct { + Obj interface{} +} + +// ToAlpha retrieves the given version of the object. +func (m *MockNetworksObj) ToAlpha() *alpha.Network { + if ret, ok := m.Obj.(*alpha.Network); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &alpha.Network{} + if err := copyViaJSON(ret, m.Obj); err != nil { + klog.Errorf("Could not convert %T to *alpha.Network via JSON: %v", m.Obj, err) + } + return ret +} + +// ToBeta retrieves the given version of the object. +func (m *MockNetworksObj) ToBeta() *beta.Network { + if ret, ok := m.Obj.(*beta.Network); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &beta.Network{} + if err := copyViaJSON(ret, m.Obj); err != nil { + klog.Errorf("Could not convert %T to *beta.Network via JSON: %v", m.Obj, err) + } + return ret +} + +// ToGA retrieves the given version of the object. +func (m *MockNetworksObj) ToGA() *ga.Network { + if ret, ok := m.Obj.(*ga.Network); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &ga.Network{} + if err := copyViaJSON(ret, m.Obj); err != nil { + klog.Errorf("Could not convert %T to *ga.Network via JSON: %v", m.Obj, err) + } + return ret +} + // MockProjectsObj is used to store the various object versions in the shared // map of mocked objects. This allows for multiple API versions to co-exist and // share the same "view" of the objects in the backend. @@ -1064,6 +1632,19 @@ func (m *MockRegionBackendServicesObj) ToAlpha() *alpha.BackendService { return ret } +// ToBeta retrieves the given version of the object. +func (m *MockRegionBackendServicesObj) ToBeta() *beta.BackendService { + if ret, ok := m.Obj.(*beta.BackendService); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &beta.BackendService{} + if err := copyViaJSON(ret, m.Obj); err != nil { + klog.Errorf("Could not convert %T to *beta.BackendService via JSON: %v", m.Obj, err) + } + return ret +} + // ToGA retrieves the given version of the object. func (m *MockRegionBackendServicesObj) ToGA() *ga.BackendService { if ret, ok := m.Obj.(*ga.BackendService); ok { @@ -1097,72 +1678,263 @@ func (m *MockRegionDisksObj) ToGA() *ga.Disk { return ret } -// MockRegionsObj is used to store the various object versions in the shared +// MockRegionHealthChecksObj is used to store the various object versions in the shared // map of mocked objects. This allows for multiple API versions to co-exist and // share the same "view" of the objects in the backend. -type MockRegionsObj struct { +type MockRegionHealthChecksObj struct { Obj interface{} } -// ToGA retrieves the given version of the object. -func (m *MockRegionsObj) ToGA() *ga.Region { - if ret, ok := m.Obj.(*ga.Region); ok { +// ToAlpha retrieves the given version of the object. +func (m *MockRegionHealthChecksObj) ToAlpha() *alpha.HealthCheck { + if ret, ok := m.Obj.(*alpha.HealthCheck); ok { return ret } // Convert the object via JSON copying to the type that was requested. - ret := &ga.Region{} + ret := &alpha.HealthCheck{} if err := copyViaJSON(ret, m.Obj); err != nil { - klog.Errorf("Could not convert %T to *ga.Region via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *alpha.HealthCheck via JSON: %v", m.Obj, err) } return ret } -// MockRoutesObj is used to store the various object versions in the shared -// map of mocked objects. This allows for multiple API versions to co-exist and -// share the same "view" of the objects in the backend. -type MockRoutesObj struct { - Obj interface{} -} - -// ToGA retrieves the given version of the object. -func (m *MockRoutesObj) ToGA() *ga.Route { - if ret, ok := m.Obj.(*ga.Route); ok { +// ToBeta retrieves the given version of the object. +func (m *MockRegionHealthChecksObj) ToBeta() *beta.HealthCheck { + if ret, ok := m.Obj.(*beta.HealthCheck); ok { return ret } // Convert the object via JSON copying to the type that was requested. - ret := &ga.Route{} + ret := &beta.HealthCheck{} if err := copyViaJSON(ret, m.Obj); err != nil { - klog.Errorf("Could not convert %T to *ga.Route via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *beta.HealthCheck via JSON: %v", m.Obj, err) } return ret } -// MockSecurityPoliciesObj is used to store the various object versions in the shared +// MockRegionSslCertificatesObj is used to store the various object versions in the shared // map of mocked objects. This allows for multiple API versions to co-exist and // share the same "view" of the objects in the backend. -type MockSecurityPoliciesObj struct { +type MockRegionSslCertificatesObj struct { Obj interface{} } -// ToBeta retrieves the given version of the object. -func (m *MockSecurityPoliciesObj) ToBeta() *beta.SecurityPolicy { - if ret, ok := m.Obj.(*beta.SecurityPolicy); ok { +// ToAlpha retrieves the given version of the object. +func (m *MockRegionSslCertificatesObj) ToAlpha() *alpha.SslCertificate { + if ret, ok := m.Obj.(*alpha.SslCertificate); ok { return ret } // Convert the object via JSON copying to the type that was requested. - ret := &beta.SecurityPolicy{} + ret := &alpha.SslCertificate{} if err := copyViaJSON(ret, m.Obj); err != nil { - klog.Errorf("Could not convert %T to *beta.SecurityPolicy via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *alpha.SslCertificate via JSON: %v", m.Obj, err) } return ret } -// MockSslCertificatesObj is used to store the various object versions in the shared -// map of mocked objects. This allows for multiple API versions to co-exist and -// share the same "view" of the objects in the backend. -type MockSslCertificatesObj struct { - Obj interface{} -} +// ToBeta retrieves the given version of the object. +func (m *MockRegionSslCertificatesObj) ToBeta() *beta.SslCertificate { + if ret, ok := m.Obj.(*beta.SslCertificate); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &beta.SslCertificate{} + if err := copyViaJSON(ret, m.Obj); err != nil { + klog.Errorf("Could not convert %T to *beta.SslCertificate via JSON: %v", m.Obj, err) + } + return ret +} + +// MockRegionTargetHttpProxiesObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockRegionTargetHttpProxiesObj struct { + Obj interface{} +} + +// ToAlpha retrieves the given version of the object. +func (m *MockRegionTargetHttpProxiesObj) ToAlpha() *alpha.TargetHttpProxy { + if ret, ok := m.Obj.(*alpha.TargetHttpProxy); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &alpha.TargetHttpProxy{} + if err := copyViaJSON(ret, m.Obj); err != nil { + klog.Errorf("Could not convert %T to *alpha.TargetHttpProxy via JSON: %v", m.Obj, err) + } + return ret +} + +// ToBeta retrieves the given version of the object. +func (m *MockRegionTargetHttpProxiesObj) ToBeta() *beta.TargetHttpProxy { + if ret, ok := m.Obj.(*beta.TargetHttpProxy); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &beta.TargetHttpProxy{} + if err := copyViaJSON(ret, m.Obj); err != nil { + klog.Errorf("Could not convert %T to *beta.TargetHttpProxy via JSON: %v", m.Obj, err) + } + return ret +} + +// MockRegionTargetHttpsProxiesObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockRegionTargetHttpsProxiesObj struct { + Obj interface{} +} + +// ToAlpha retrieves the given version of the object. +func (m *MockRegionTargetHttpsProxiesObj) ToAlpha() *alpha.TargetHttpsProxy { + if ret, ok := m.Obj.(*alpha.TargetHttpsProxy); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &alpha.TargetHttpsProxy{} + if err := copyViaJSON(ret, m.Obj); err != nil { + klog.Errorf("Could not convert %T to *alpha.TargetHttpsProxy via JSON: %v", m.Obj, err) + } + return ret +} + +// ToBeta retrieves the given version of the object. +func (m *MockRegionTargetHttpsProxiesObj) ToBeta() *beta.TargetHttpsProxy { + if ret, ok := m.Obj.(*beta.TargetHttpsProxy); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &beta.TargetHttpsProxy{} + if err := copyViaJSON(ret, m.Obj); err != nil { + klog.Errorf("Could not convert %T to *beta.TargetHttpsProxy via JSON: %v", m.Obj, err) + } + return ret +} + +// MockRegionUrlMapsObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockRegionUrlMapsObj struct { + Obj interface{} +} + +// ToAlpha retrieves the given version of the object. +func (m *MockRegionUrlMapsObj) ToAlpha() *alpha.UrlMap { + if ret, ok := m.Obj.(*alpha.UrlMap); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &alpha.UrlMap{} + if err := copyViaJSON(ret, m.Obj); err != nil { + klog.Errorf("Could not convert %T to *alpha.UrlMap via JSON: %v", m.Obj, err) + } + return ret +} + +// ToBeta retrieves the given version of the object. +func (m *MockRegionUrlMapsObj) ToBeta() *beta.UrlMap { + if ret, ok := m.Obj.(*beta.UrlMap); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &beta.UrlMap{} + if err := copyViaJSON(ret, m.Obj); err != nil { + klog.Errorf("Could not convert %T to *beta.UrlMap via JSON: %v", m.Obj, err) + } + return ret +} + +// MockRegionsObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockRegionsObj struct { + Obj interface{} +} + +// ToGA retrieves the given version of the object. +func (m *MockRegionsObj) ToGA() *ga.Region { + if ret, ok := m.Obj.(*ga.Region); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &ga.Region{} + if err := copyViaJSON(ret, m.Obj); err != nil { + klog.Errorf("Could not convert %T to *ga.Region via JSON: %v", m.Obj, err) + } + return ret +} + +// MockRoutesObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockRoutesObj struct { + Obj interface{} +} + +// ToGA retrieves the given version of the object. +func (m *MockRoutesObj) ToGA() *ga.Route { + if ret, ok := m.Obj.(*ga.Route); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &ga.Route{} + if err := copyViaJSON(ret, m.Obj); err != nil { + klog.Errorf("Could not convert %T to *ga.Route via JSON: %v", m.Obj, err) + } + return ret +} + +// MockSecurityPoliciesObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockSecurityPoliciesObj struct { + Obj interface{} +} + +// ToBeta retrieves the given version of the object. +func (m *MockSecurityPoliciesObj) ToBeta() *beta.SecurityPolicy { + if ret, ok := m.Obj.(*beta.SecurityPolicy); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &beta.SecurityPolicy{} + if err := copyViaJSON(ret, m.Obj); err != nil { + klog.Errorf("Could not convert %T to *beta.SecurityPolicy via JSON: %v", m.Obj, err) + } + return ret +} + +// MockSslCertificatesObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockSslCertificatesObj struct { + Obj interface{} +} + +// ToAlpha retrieves the given version of the object. +func (m *MockSslCertificatesObj) ToAlpha() *alpha.SslCertificate { + if ret, ok := m.Obj.(*alpha.SslCertificate); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &alpha.SslCertificate{} + if err := copyViaJSON(ret, m.Obj); err != nil { + klog.Errorf("Could not convert %T to *alpha.SslCertificate via JSON: %v", m.Obj, err) + } + return ret +} + +// ToBeta retrieves the given version of the object. +func (m *MockSslCertificatesObj) ToBeta() *beta.SslCertificate { + if ret, ok := m.Obj.(*beta.SslCertificate); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &beta.SslCertificate{} + if err := copyViaJSON(ret, m.Obj); err != nil { + klog.Errorf("Could not convert %T to *beta.SslCertificate via JSON: %v", m.Obj, err) + } + return ret +} // ToGA retrieves the given version of the object. func (m *MockSslCertificatesObj) ToGA() *ga.SslCertificate { @@ -1177,6 +1949,52 @@ func (m *MockSslCertificatesObj) ToGA() *ga.SslCertificate { return ret } +// MockSubnetworksObj is used to store the various object versions in the shared +// map of mocked objects. This allows for multiple API versions to co-exist and +// share the same "view" of the objects in the backend. +type MockSubnetworksObj struct { + Obj interface{} +} + +// ToAlpha retrieves the given version of the object. +func (m *MockSubnetworksObj) ToAlpha() *alpha.Subnetwork { + if ret, ok := m.Obj.(*alpha.Subnetwork); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &alpha.Subnetwork{} + if err := copyViaJSON(ret, m.Obj); err != nil { + klog.Errorf("Could not convert %T to *alpha.Subnetwork via JSON: %v", m.Obj, err) + } + return ret +} + +// ToBeta retrieves the given version of the object. +func (m *MockSubnetworksObj) ToBeta() *beta.Subnetwork { + if ret, ok := m.Obj.(*beta.Subnetwork); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &beta.Subnetwork{} + if err := copyViaJSON(ret, m.Obj); err != nil { + klog.Errorf("Could not convert %T to *beta.Subnetwork via JSON: %v", m.Obj, err) + } + return ret +} + +// ToGA retrieves the given version of the object. +func (m *MockSubnetworksObj) ToGA() *ga.Subnetwork { + if ret, ok := m.Obj.(*ga.Subnetwork); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &ga.Subnetwork{} + if err := copyViaJSON(ret, m.Obj); err != nil { + klog.Errorf("Could not convert %T to *ga.Subnetwork via JSON: %v", m.Obj, err) + } + return ret +} + // MockTargetHttpProxiesObj is used to store the various object versions in the shared // map of mocked objects. This allows for multiple API versions to co-exist and // share the same "view" of the objects in the backend. @@ -1184,6 +2002,32 @@ type MockTargetHttpProxiesObj struct { Obj interface{} } +// ToAlpha retrieves the given version of the object. +func (m *MockTargetHttpProxiesObj) ToAlpha() *alpha.TargetHttpProxy { + if ret, ok := m.Obj.(*alpha.TargetHttpProxy); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &alpha.TargetHttpProxy{} + if err := copyViaJSON(ret, m.Obj); err != nil { + klog.Errorf("Could not convert %T to *alpha.TargetHttpProxy via JSON: %v", m.Obj, err) + } + return ret +} + +// ToBeta retrieves the given version of the object. +func (m *MockTargetHttpProxiesObj) ToBeta() *beta.TargetHttpProxy { + if ret, ok := m.Obj.(*beta.TargetHttpProxy); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &beta.TargetHttpProxy{} + if err := copyViaJSON(ret, m.Obj); err != nil { + klog.Errorf("Could not convert %T to *beta.TargetHttpProxy via JSON: %v", m.Obj, err) + } + return ret +} + // ToGA retrieves the given version of the object. func (m *MockTargetHttpProxiesObj) ToGA() *ga.TargetHttpProxy { if ret, ok := m.Obj.(*ga.TargetHttpProxy); ok { @@ -1204,6 +2048,32 @@ type MockTargetHttpsProxiesObj struct { Obj interface{} } +// ToAlpha retrieves the given version of the object. +func (m *MockTargetHttpsProxiesObj) ToAlpha() *alpha.TargetHttpsProxy { + if ret, ok := m.Obj.(*alpha.TargetHttpsProxy); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &alpha.TargetHttpsProxy{} + if err := copyViaJSON(ret, m.Obj); err != nil { + klog.Errorf("Could not convert %T to *alpha.TargetHttpsProxy via JSON: %v", m.Obj, err) + } + return ret +} + +// ToBeta retrieves the given version of the object. +func (m *MockTargetHttpsProxiesObj) ToBeta() *beta.TargetHttpsProxy { + if ret, ok := m.Obj.(*beta.TargetHttpsProxy); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &beta.TargetHttpsProxy{} + if err := copyViaJSON(ret, m.Obj); err != nil { + klog.Errorf("Could not convert %T to *beta.TargetHttpsProxy via JSON: %v", m.Obj, err) + } + return ret +} + // ToGA retrieves the given version of the object. func (m *MockTargetHttpsProxiesObj) ToGA() *ga.TargetHttpsProxy { if ret, ok := m.Obj.(*ga.TargetHttpsProxy); ok { @@ -1244,6 +2114,32 @@ type MockUrlMapsObj struct { Obj interface{} } +// ToAlpha retrieves the given version of the object. +func (m *MockUrlMapsObj) ToAlpha() *alpha.UrlMap { + if ret, ok := m.Obj.(*alpha.UrlMap); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &alpha.UrlMap{} + if err := copyViaJSON(ret, m.Obj); err != nil { + klog.Errorf("Could not convert %T to *alpha.UrlMap via JSON: %v", m.Obj, err) + } + return ret +} + +// ToBeta retrieves the given version of the object. +func (m *MockUrlMapsObj) ToBeta() *beta.UrlMap { + if ret, ok := m.Obj.(*beta.UrlMap); ok { + return ret + } + // Convert the object via JSON copying to the type that was requested. + ret := &beta.UrlMap{} + if err := copyViaJSON(ret, m.Obj); err != nil { + klog.Errorf("Could not convert %T to *beta.UrlMap via JSON: %v", m.Obj, err) + } + return ret +} + // ToGA retrieves the given version of the object. func (m *MockUrlMapsObj) ToGA() *ga.UrlMap { if ret, ok := m.Obj.(*ga.UrlMap); ok { @@ -1283,6 +2179,7 @@ type Addresses interface { List(ctx context.Context, region string, fl *filter.F) ([]*ga.Address, error) Insert(ctx context.Context, key *meta.Key, obj *ga.Address) error Delete(ctx context.Context, key *meta.Key) error + AggregatedList(ctx context.Context, fl *filter.F) (map[string][]*ga.Address, error) } // NewMockAddresses returns a new mock for Addresses. @@ -1309,19 +2206,21 @@ type MockAddresses struct { // If an entry exists for the given key and operation, then the error // will be returned instead of the operation. - GetError map[meta.Key]error - ListError *error - InsertError map[meta.Key]error - DeleteError map[meta.Key]error - - // xxxHook allow you to intercept the standard processing of the mock in - // order to add your own logic. Return (true, _, _) to prevent the normal - // execution flow of the mock. Return (false, nil, nil) to continue with + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + AggregatedListError *error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with // normal mock behavior/ after the hook function executes. - GetHook func(ctx context.Context, key *meta.Key, m *MockAddresses) (bool, *ga.Address, error) - ListHook func(ctx context.Context, region string, fl *filter.F, m *MockAddresses) (bool, []*ga.Address, error) - InsertHook func(ctx context.Context, key *meta.Key, obj *ga.Address, m *MockAddresses) (bool, error) - DeleteHook func(ctx context.Context, key *meta.Key, m *MockAddresses) (bool, error) + GetHook func(ctx context.Context, key *meta.Key, m *MockAddresses) (bool, *ga.Address, error) + ListHook func(ctx context.Context, region string, fl *filter.F, m *MockAddresses) (bool, []*ga.Address, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *ga.Address, m *MockAddresses) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockAddresses) (bool, error) + AggregatedListHook func(ctx context.Context, fl *filter.F, m *MockAddresses) (bool, map[string][]*ga.Address, error) // X is extra state that can be used as part of the mock. Generated code // will not use this field. @@ -1465,6 +2364,41 @@ func (m *MockAddresses) Delete(ctx context.Context, key *meta.Key) error { return nil } +// AggregatedList is a mock for AggregatedList. +func (m *MockAddresses) AggregatedList(ctx context.Context, fl *filter.F) (map[string][]*ga.Address, error) { + if m.AggregatedListHook != nil { + if intercept, objs, err := m.AggregatedListHook(ctx, fl, m); intercept { + klog.V(5).Infof("MockAddresses.AggregatedList(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.AggregatedListError != nil { + err := *m.AggregatedListError + klog.V(5).Infof("MockAddresses.AggregatedList(%v, %v) = nil, %v", ctx, fl, err) + return nil, err + } + + objs := map[string][]*ga.Address{} + for _, obj := range m.Objects { + res, err := ParseResourceURL(obj.ToGA().SelfLink) + location := res.Key.Region + if err != nil { + klog.V(5).Infof("MockAddresses.AggregatedList(%v, %v) = nil, %v", ctx, fl, err) + return nil, err + } + if !fl.Match(obj.ToGA()) { + continue + } + objs[location] = append(objs[location], obj.ToGA()) + } + klog.V(5).Infof("MockAddresses.AggregatedList(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + // Obj wraps the object for use in the mock. func (m *MockAddresses) Obj(o *ga.Address) *MockAddressesObj { return &MockAddressesObj{o} @@ -1611,12 +2545,61 @@ func (g *GCEAddresses) Delete(ctx context.Context, key *meta.Key) error { return err } +// AggregatedList lists all resources of the given type across all locations. +func (g *GCEAddresses) AggregatedList(ctx context.Context, fl *filter.F) (map[string][]*ga.Address, error) { + klog.V(5).Infof("GCEAddresses.AggregatedList(%v, %v) called", ctx, fl) + + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Addresses") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "AggregatedList", + Version: meta.Version("ga"), + Service: "Addresses", + } + + klog.V(5).Infof("GCEAddresses.AggregatedList(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(5).Infof("GCEAddresses.AggregatedList(%v, %v): RateLimiter error: %v", ctx, fl, err) + return nil, err + } + + call := g.s.GA.Addresses.AggregatedList(projectID) + call.Context(ctx) + if fl != filter.None { + call.Filter(fl.String()) + } + + all := map[string][]*ga.Address{} + f := func(l *ga.AddressAggregatedList) error { + for k, v := range l.Items { + klog.V(5).Infof("GCEAddresses.AggregatedList(%v, %v): page[%v]%+v", ctx, fl, k, v) + all[k] = append(all[k], v.Addresses...) + } + return nil + } + if err := call.Pages(ctx, f); err != nil { + klog.V(4).Infof("GCEAddresses.AggregatedList(%v, %v) = %v, %v", ctx, fl, nil, err) + return nil, err + } + if klog.V(4) { + klog.V(4).Infof("GCEAddresses.AggregatedList(%v, %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { + var asStr []string + for _, o := range all { + asStr = append(asStr, fmt.Sprintf("%+v", o)) + } + klog.V(5).Infof("GCEAddresses.AggregatedList(%v, %v) = %v, %v", ctx, fl, asStr, nil) + } + return all, nil +} + // AlphaAddresses is an interface that allows for mocking of Addresses. type AlphaAddresses interface { Get(ctx context.Context, key *meta.Key) (*alpha.Address, error) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.Address, error) Insert(ctx context.Context, key *meta.Key, obj *alpha.Address) error Delete(ctx context.Context, key *meta.Key) error + AggregatedList(ctx context.Context, fl *filter.F) (map[string][]*alpha.Address, error) } // NewMockAlphaAddresses returns a new mock for Addresses. @@ -1643,19 +2626,21 @@ type MockAlphaAddresses struct { // If an entry exists for the given key and operation, then the error // will be returned instead of the operation. - GetError map[meta.Key]error - ListError *error - InsertError map[meta.Key]error - DeleteError map[meta.Key]error + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + AggregatedListError *error // xxxHook allow you to intercept the standard processing of the mock in // order to add your own logic. Return (true, _, _) to prevent the normal // execution flow of the mock. Return (false, nil, nil) to continue with // normal mock behavior/ after the hook function executes. - GetHook func(ctx context.Context, key *meta.Key, m *MockAlphaAddresses) (bool, *alpha.Address, error) - ListHook func(ctx context.Context, region string, fl *filter.F, m *MockAlphaAddresses) (bool, []*alpha.Address, error) - InsertHook func(ctx context.Context, key *meta.Key, obj *alpha.Address, m *MockAlphaAddresses) (bool, error) - DeleteHook func(ctx context.Context, key *meta.Key, m *MockAlphaAddresses) (bool, error) + GetHook func(ctx context.Context, key *meta.Key, m *MockAlphaAddresses) (bool, *alpha.Address, error) + ListHook func(ctx context.Context, region string, fl *filter.F, m *MockAlphaAddresses) (bool, []*alpha.Address, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *alpha.Address, m *MockAlphaAddresses) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockAlphaAddresses) (bool, error) + AggregatedListHook func(ctx context.Context, fl *filter.F, m *MockAlphaAddresses) (bool, map[string][]*alpha.Address, error) // X is extra state that can be used as part of the mock. Generated code // will not use this field. @@ -1799,6 +2784,41 @@ func (m *MockAlphaAddresses) Delete(ctx context.Context, key *meta.Key) error { return nil } +// AggregatedList is a mock for AggregatedList. +func (m *MockAlphaAddresses) AggregatedList(ctx context.Context, fl *filter.F) (map[string][]*alpha.Address, error) { + if m.AggregatedListHook != nil { + if intercept, objs, err := m.AggregatedListHook(ctx, fl, m); intercept { + klog.V(5).Infof("MockAlphaAddresses.AggregatedList(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.AggregatedListError != nil { + err := *m.AggregatedListError + klog.V(5).Infof("MockAlphaAddresses.AggregatedList(%v, %v) = nil, %v", ctx, fl, err) + return nil, err + } + + objs := map[string][]*alpha.Address{} + for _, obj := range m.Objects { + res, err := ParseResourceURL(obj.ToAlpha().SelfLink) + location := res.Key.Region + if err != nil { + klog.V(5).Infof("MockAlphaAddresses.AggregatedList(%v, %v) = nil, %v", ctx, fl, err) + return nil, err + } + if !fl.Match(obj.ToAlpha()) { + continue + } + objs[location] = append(objs[location], obj.ToAlpha()) + } + klog.V(5).Infof("MockAlphaAddresses.AggregatedList(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + // Obj wraps the object for use in the mock. func (m *MockAlphaAddresses) Obj(o *alpha.Address) *MockAddressesObj { return &MockAddressesObj{o} @@ -1945,12 +2965,61 @@ func (g *GCEAlphaAddresses) Delete(ctx context.Context, key *meta.Key) error { return err } +// AggregatedList lists all resources of the given type across all locations. +func (g *GCEAlphaAddresses) AggregatedList(ctx context.Context, fl *filter.F) (map[string][]*alpha.Address, error) { + klog.V(5).Infof("GCEAlphaAddresses.AggregatedList(%v, %v) called", ctx, fl) + + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Addresses") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "AggregatedList", + Version: meta.Version("alpha"), + Service: "Addresses", + } + + klog.V(5).Infof("GCEAlphaAddresses.AggregatedList(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(5).Infof("GCEAlphaAddresses.AggregatedList(%v, %v): RateLimiter error: %v", ctx, fl, err) + return nil, err + } + + call := g.s.Alpha.Addresses.AggregatedList(projectID) + call.Context(ctx) + if fl != filter.None { + call.Filter(fl.String()) + } + + all := map[string][]*alpha.Address{} + f := func(l *alpha.AddressAggregatedList) error { + for k, v := range l.Items { + klog.V(5).Infof("GCEAlphaAddresses.AggregatedList(%v, %v): page[%v]%+v", ctx, fl, k, v) + all[k] = append(all[k], v.Addresses...) + } + return nil + } + if err := call.Pages(ctx, f); err != nil { + klog.V(4).Infof("GCEAlphaAddresses.AggregatedList(%v, %v) = %v, %v", ctx, fl, nil, err) + return nil, err + } + if klog.V(4) { + klog.V(4).Infof("GCEAlphaAddresses.AggregatedList(%v, %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { + var asStr []string + for _, o := range all { + asStr = append(asStr, fmt.Sprintf("%+v", o)) + } + klog.V(5).Infof("GCEAlphaAddresses.AggregatedList(%v, %v) = %v, %v", ctx, fl, asStr, nil) + } + return all, nil +} + // BetaAddresses is an interface that allows for mocking of Addresses. type BetaAddresses interface { Get(ctx context.Context, key *meta.Key) (*beta.Address, error) List(ctx context.Context, region string, fl *filter.F) ([]*beta.Address, error) Insert(ctx context.Context, key *meta.Key, obj *beta.Address) error Delete(ctx context.Context, key *meta.Key) error + AggregatedList(ctx context.Context, fl *filter.F) (map[string][]*beta.Address, error) } // NewMockBetaAddresses returns a new mock for Addresses. @@ -1977,19 +3046,21 @@ type MockBetaAddresses struct { // If an entry exists for the given key and operation, then the error // will be returned instead of the operation. - GetError map[meta.Key]error - ListError *error - InsertError map[meta.Key]error - DeleteError map[meta.Key]error + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + AggregatedListError *error // xxxHook allow you to intercept the standard processing of the mock in // order to add your own logic. Return (true, _, _) to prevent the normal // execution flow of the mock. Return (false, nil, nil) to continue with // normal mock behavior/ after the hook function executes. - GetHook func(ctx context.Context, key *meta.Key, m *MockBetaAddresses) (bool, *beta.Address, error) - ListHook func(ctx context.Context, region string, fl *filter.F, m *MockBetaAddresses) (bool, []*beta.Address, error) - InsertHook func(ctx context.Context, key *meta.Key, obj *beta.Address, m *MockBetaAddresses) (bool, error) - DeleteHook func(ctx context.Context, key *meta.Key, m *MockBetaAddresses) (bool, error) + GetHook func(ctx context.Context, key *meta.Key, m *MockBetaAddresses) (bool, *beta.Address, error) + ListHook func(ctx context.Context, region string, fl *filter.F, m *MockBetaAddresses) (bool, []*beta.Address, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *beta.Address, m *MockBetaAddresses) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockBetaAddresses) (bool, error) + AggregatedListHook func(ctx context.Context, fl *filter.F, m *MockBetaAddresses) (bool, map[string][]*beta.Address, error) // X is extra state that can be used as part of the mock. Generated code // will not use this field. @@ -2133,6 +3204,41 @@ func (m *MockBetaAddresses) Delete(ctx context.Context, key *meta.Key) error { return nil } +// AggregatedList is a mock for AggregatedList. +func (m *MockBetaAddresses) AggregatedList(ctx context.Context, fl *filter.F) (map[string][]*beta.Address, error) { + if m.AggregatedListHook != nil { + if intercept, objs, err := m.AggregatedListHook(ctx, fl, m); intercept { + klog.V(5).Infof("MockBetaAddresses.AggregatedList(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.AggregatedListError != nil { + err := *m.AggregatedListError + klog.V(5).Infof("MockBetaAddresses.AggregatedList(%v, %v) = nil, %v", ctx, fl, err) + return nil, err + } + + objs := map[string][]*beta.Address{} + for _, obj := range m.Objects { + res, err := ParseResourceURL(obj.ToBeta().SelfLink) + location := res.Key.Region + if err != nil { + klog.V(5).Infof("MockBetaAddresses.AggregatedList(%v, %v) = nil, %v", ctx, fl, err) + return nil, err + } + if !fl.Match(obj.ToBeta()) { + continue + } + objs[location] = append(objs[location], obj.ToBeta()) + } + klog.V(5).Infof("MockBetaAddresses.AggregatedList(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + // Obj wraps the object for use in the mock. func (m *MockBetaAddresses) Obj(o *beta.Address) *MockAddressesObj { return &MockAddressesObj{o} @@ -2279,17 +3385,65 @@ func (g *GCEBetaAddresses) Delete(ctx context.Context, key *meta.Key) error { return err } -// GlobalAddresses is an interface that allows for mocking of GlobalAddresses. -type GlobalAddresses interface { - Get(ctx context.Context, key *meta.Key) (*ga.Address, error) - List(ctx context.Context, fl *filter.F) ([]*ga.Address, error) - Insert(ctx context.Context, key *meta.Key, obj *ga.Address) error +// AggregatedList lists all resources of the given type across all locations. +func (g *GCEBetaAddresses) AggregatedList(ctx context.Context, fl *filter.F) (map[string][]*beta.Address, error) { + klog.V(5).Infof("GCEBetaAddresses.AggregatedList(%v, %v) called", ctx, fl) + + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Addresses") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "AggregatedList", + Version: meta.Version("beta"), + Service: "Addresses", + } + + klog.V(5).Infof("GCEBetaAddresses.AggregatedList(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(5).Infof("GCEBetaAddresses.AggregatedList(%v, %v): RateLimiter error: %v", ctx, fl, err) + return nil, err + } + + call := g.s.Beta.Addresses.AggregatedList(projectID) + call.Context(ctx) + if fl != filter.None { + call.Filter(fl.String()) + } + + all := map[string][]*beta.Address{} + f := func(l *beta.AddressAggregatedList) error { + for k, v := range l.Items { + klog.V(5).Infof("GCEBetaAddresses.AggregatedList(%v, %v): page[%v]%+v", ctx, fl, k, v) + all[k] = append(all[k], v.Addresses...) + } + return nil + } + if err := call.Pages(ctx, f); err != nil { + klog.V(4).Infof("GCEBetaAddresses.AggregatedList(%v, %v) = %v, %v", ctx, fl, nil, err) + return nil, err + } + if klog.V(4) { + klog.V(4).Infof("GCEBetaAddresses.AggregatedList(%v, %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { + var asStr []string + for _, o := range all { + asStr = append(asStr, fmt.Sprintf("%+v", o)) + } + klog.V(5).Infof("GCEBetaAddresses.AggregatedList(%v, %v) = %v, %v", ctx, fl, asStr, nil) + } + return all, nil +} + +// AlphaGlobalAddresses is an interface that allows for mocking of GlobalAddresses. +type AlphaGlobalAddresses interface { + Get(ctx context.Context, key *meta.Key) (*alpha.Address, error) + List(ctx context.Context, fl *filter.F) ([]*alpha.Address, error) + Insert(ctx context.Context, key *meta.Key, obj *alpha.Address) error Delete(ctx context.Context, key *meta.Key) error } -// NewMockGlobalAddresses returns a new mock for GlobalAddresses. -func NewMockGlobalAddresses(pr ProjectRouter, objs map[meta.Key]*MockGlobalAddressesObj) *MockGlobalAddresses { - mock := &MockGlobalAddresses{ +// NewMockAlphaGlobalAddresses returns a new mock for GlobalAddresses. +func NewMockAlphaGlobalAddresses(pr ProjectRouter, objs map[meta.Key]*MockGlobalAddressesObj) *MockAlphaGlobalAddresses { + mock := &MockAlphaGlobalAddresses{ ProjectRouter: pr, Objects: objs, @@ -2300,8 +3454,8 @@ func NewMockGlobalAddresses(pr ProjectRouter, objs map[meta.Key]*MockGlobalAddre return mock } -// MockGlobalAddresses is the mock for GlobalAddresses. -type MockGlobalAddresses struct { +// MockAlphaGlobalAddresses is the mock for GlobalAddresses. +type MockAlphaGlobalAddresses struct { Lock sync.Mutex ProjectRouter ProjectRouter @@ -2320,10 +3474,10 @@ type MockGlobalAddresses struct { // order to add your own logic. Return (true, _, _) to prevent the normal // execution flow of the mock. Return (false, nil, nil) to continue with // normal mock behavior/ after the hook function executes. - GetHook func(ctx context.Context, key *meta.Key, m *MockGlobalAddresses) (bool, *ga.Address, error) - ListHook func(ctx context.Context, fl *filter.F, m *MockGlobalAddresses) (bool, []*ga.Address, error) - InsertHook func(ctx context.Context, key *meta.Key, obj *ga.Address, m *MockGlobalAddresses) (bool, error) - DeleteHook func(ctx context.Context, key *meta.Key, m *MockGlobalAddresses) (bool, error) + GetHook func(ctx context.Context, key *meta.Key, m *MockAlphaGlobalAddresses) (bool, *alpha.Address, error) + ListHook func(ctx context.Context, fl *filter.F, m *MockAlphaGlobalAddresses) (bool, []*alpha.Address, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *alpha.Address, m *MockAlphaGlobalAddresses) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockAlphaGlobalAddresses) (bool, error) // X is extra state that can be used as part of the mock. Generated code // will not use this field. @@ -2331,10 +3485,10 @@ type MockGlobalAddresses struct { } // Get returns the object from the mock. -func (m *MockGlobalAddresses) Get(ctx context.Context, key *meta.Key) (*ga.Address, error) { +func (m *MockAlphaGlobalAddresses) Get(ctx context.Context, key *meta.Key) (*alpha.Address, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - klog.V(5).Infof("MockGlobalAddresses.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaGlobalAddresses.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -2346,28 +3500,28 @@ func (m *MockGlobalAddresses) Get(ctx context.Context, key *meta.Key) (*ga.Addre defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - klog.V(5).Infof("MockGlobalAddresses.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaGlobalAddresses.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { - typedObj := obj.ToGA() - klog.V(5).Infof("MockGlobalAddresses.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + typedObj := obj.ToAlpha() + klog.V(5).Infof("MockAlphaGlobalAddresses.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockGlobalAddresses %v not found", key), + Message: fmt.Sprintf("MockAlphaGlobalAddresses %v not found", key), } - klog.V(5).Infof("MockGlobalAddresses.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaGlobalAddresses.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } // List all of the objects in the mock. -func (m *MockGlobalAddresses) List(ctx context.Context, fl *filter.F) ([]*ga.Address, error) { +func (m *MockAlphaGlobalAddresses) List(ctx context.Context, fl *filter.F) ([]*alpha.Address, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - klog.V(5).Infof("MockGlobalAddresses.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockAlphaGlobalAddresses.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -2377,28 +3531,28 @@ func (m *MockGlobalAddresses) List(ctx context.Context, fl *filter.F) ([]*ga.Add if m.ListError != nil { err := *m.ListError - klog.V(5).Infof("MockGlobalAddresses.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockAlphaGlobalAddresses.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } - var objs []*ga.Address + var objs []*alpha.Address for _, obj := range m.Objects { - if !fl.Match(obj.ToGA()) { + if !fl.Match(obj.ToAlpha()) { continue } - objs = append(objs, obj.ToGA()) + objs = append(objs, obj.ToAlpha()) } - klog.V(5).Infof("MockGlobalAddresses.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockAlphaGlobalAddresses.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } // Insert is a mock for inserting/creating a new object. -func (m *MockGlobalAddresses) Insert(ctx context.Context, key *meta.Key, obj *ga.Address) error { +func (m *MockAlphaGlobalAddresses) Insert(ctx context.Context, key *meta.Key, obj *alpha.Address) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - klog.V(5).Infof("MockGlobalAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaGlobalAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -2410,32 +3564,32 @@ func (m *MockGlobalAddresses) Insert(ctx context.Context, key *meta.Key, obj *ga defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - klog.V(5).Infof("MockGlobalAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaGlobalAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { err := &googleapi.Error{ Code: http.StatusConflict, - Message: fmt.Sprintf("MockGlobalAddresses %v exists", key), + Message: fmt.Sprintf("MockAlphaGlobalAddresses %v exists", key), } - klog.V(5).Infof("MockGlobalAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaGlobalAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } obj.Name = key.Name - projectID := m.ProjectRouter.ProjectID(ctx, "ga", "addresses") - obj.SelfLink = SelfLink(meta.VersionGA, projectID, "addresses", key) + projectID := m.ProjectRouter.ProjectID(ctx, "alpha", "addresses") + obj.SelfLink = SelfLink(meta.VersionAlpha, projectID, "addresses", key) m.Objects[*key] = &MockGlobalAddressesObj{obj} - klog.V(5).Infof("MockGlobalAddresses.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockAlphaGlobalAddresses.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } // Delete is a mock for deleting the object. -func (m *MockGlobalAddresses) Delete(ctx context.Context, key *meta.Key) error { +func (m *MockAlphaGlobalAddresses) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - klog.V(5).Infof("MockGlobalAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaGlobalAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -2447,184 +3601,181 @@ func (m *MockGlobalAddresses) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - klog.V(5).Infof("MockGlobalAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaGlobalAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockGlobalAddresses %v not found", key), + Message: fmt.Sprintf("MockAlphaGlobalAddresses %v not found", key), } - klog.V(5).Infof("MockGlobalAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaGlobalAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - klog.V(5).Infof("MockGlobalAddresses.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockAlphaGlobalAddresses.Delete(%v, %v) = nil", ctx, key) return nil } // Obj wraps the object for use in the mock. -func (m *MockGlobalAddresses) Obj(o *ga.Address) *MockGlobalAddressesObj { +func (m *MockAlphaGlobalAddresses) Obj(o *alpha.Address) *MockGlobalAddressesObj { return &MockGlobalAddressesObj{o} } -// GCEGlobalAddresses is a simplifying adapter for the GCE GlobalAddresses. -type GCEGlobalAddresses struct { +// GCEAlphaGlobalAddresses is a simplifying adapter for the GCE GlobalAddresses. +type GCEAlphaGlobalAddresses struct { s *Service } // Get the Address named by key. -func (g *GCEGlobalAddresses) Get(ctx context.Context, key *meta.Key) (*ga.Address, error) { - klog.V(5).Infof("GCEGlobalAddresses.Get(%v, %v): called", ctx, key) +func (g *GCEAlphaGlobalAddresses) Get(ctx context.Context, key *meta.Key) (*alpha.Address, error) { + klog.V(5).Infof("GCEAlphaGlobalAddresses.Get(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEGlobalAddresses.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaGlobalAddresses.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalAddresses") + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "GlobalAddresses") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Get", - Version: meta.Version("ga"), + Version: meta.Version("alpha"), Service: "GlobalAddresses", } - klog.V(5).Infof("GCEGlobalAddresses.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaGlobalAddresses.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEGlobalAddresses.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaGlobalAddresses.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } - call := g.s.GA.GlobalAddresses.Get(projectID, key.Name) + call := g.s.Alpha.GlobalAddresses.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - klog.V(4).Infof("GCEGlobalAddresses.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEAlphaGlobalAddresses.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all Address objects. -func (g *GCEGlobalAddresses) List(ctx context.Context, fl *filter.F) ([]*ga.Address, error) { - klog.V(5).Infof("GCEGlobalAddresses.List(%v, %v) called", ctx, fl) - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalAddresses") +func (g *GCEAlphaGlobalAddresses) List(ctx context.Context, fl *filter.F) ([]*alpha.Address, error) { + klog.V(5).Infof("GCEAlphaGlobalAddresses.List(%v, %v) called", ctx, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "GlobalAddresses") rk := &RateLimitKey{ ProjectID: projectID, Operation: "List", - Version: meta.Version("ga"), + Version: meta.Version("alpha"), Service: "GlobalAddresses", } if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - klog.V(5).Infof("GCEGlobalAddresses.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) - call := g.s.GA.GlobalAddresses.List(projectID) + klog.V(5).Infof("GCEAlphaGlobalAddresses.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + call := g.s.Alpha.GlobalAddresses.List(projectID) if fl != filter.None { call.Filter(fl.String()) } - var all []*ga.Address - f := func(l *ga.AddressList) error { - klog.V(5).Infof("GCEGlobalAddresses.List(%v, ..., %v): page %+v", ctx, fl, l) + var all []*alpha.Address + f := func(l *alpha.AddressList) error { + klog.V(5).Infof("GCEAlphaGlobalAddresses.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - klog.V(4).Infof("GCEGlobalAddresses.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEAlphaGlobalAddresses.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } if klog.V(4) { - klog.V(4).Infof("GCEGlobalAddresses.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + klog.V(4).Infof("GCEAlphaGlobalAddresses.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - klog.V(5).Infof("GCEGlobalAddresses.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEAlphaGlobalAddresses.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil } // Insert Address with key of value obj. -func (g *GCEGlobalAddresses) Insert(ctx context.Context, key *meta.Key, obj *ga.Address) error { - klog.V(5).Infof("GCEGlobalAddresses.Insert(%v, %v, %+v): called", ctx, key, obj) +func (g *GCEAlphaGlobalAddresses) Insert(ctx context.Context, key *meta.Key, obj *alpha.Address) error { + klog.V(5).Infof("GCEAlphaGlobalAddresses.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - klog.V(2).Infof("GCEGlobalAddresses.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaGlobalAddresses.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalAddresses") + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "GlobalAddresses") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Insert", - Version: meta.Version("ga"), + Version: meta.Version("alpha"), Service: "GlobalAddresses", } - klog.V(5).Infof("GCEGlobalAddresses.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaGlobalAddresses.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEGlobalAddresses.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaGlobalAddresses.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name - call := g.s.GA.GlobalAddresses.Insert(projectID, obj) + call := g.s.Alpha.GlobalAddresses.Insert(projectID, obj) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEGlobalAddresses.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaGlobalAddresses.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEGlobalAddresses.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEAlphaGlobalAddresses.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the Address referenced by key. -func (g *GCEGlobalAddresses) Delete(ctx context.Context, key *meta.Key) error { - klog.V(5).Infof("GCEGlobalAddresses.Delete(%v, %v): called", ctx, key) +func (g *GCEAlphaGlobalAddresses) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCEAlphaGlobalAddresses.Delete(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEGlobalAddresses.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaGlobalAddresses.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalAddresses") + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "GlobalAddresses") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Delete", - Version: meta.Version("ga"), + Version: meta.Version("alpha"), Service: "GlobalAddresses", } - klog.V(5).Infof("GCEGlobalAddresses.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaGlobalAddresses.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEGlobalAddresses.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaGlobalAddresses.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.GA.GlobalAddresses.Delete(projectID, key.Name) + call := g.s.Alpha.GlobalAddresses.Delete(projectID, key.Name) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEGlobalAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaGlobalAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEGlobalAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaGlobalAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } -// BackendServices is an interface that allows for mocking of BackendServices. -type BackendServices interface { - Get(ctx context.Context, key *meta.Key) (*ga.BackendService, error) - List(ctx context.Context, fl *filter.F) ([]*ga.BackendService, error) - Insert(ctx context.Context, key *meta.Key, obj *ga.BackendService) error +// GlobalAddresses is an interface that allows for mocking of GlobalAddresses. +type GlobalAddresses interface { + Get(ctx context.Context, key *meta.Key) (*ga.Address, error) + List(ctx context.Context, fl *filter.F) ([]*ga.Address, error) + Insert(ctx context.Context, key *meta.Key, obj *ga.Address) error Delete(ctx context.Context, key *meta.Key) error - GetHealth(context.Context, *meta.Key, *ga.ResourceGroupReference) (*ga.BackendServiceGroupHealth, error) - Patch(context.Context, *meta.Key, *ga.BackendService) error - Update(context.Context, *meta.Key, *ga.BackendService) error } -// NewMockBackendServices returns a new mock for BackendServices. -func NewMockBackendServices(pr ProjectRouter, objs map[meta.Key]*MockBackendServicesObj) *MockBackendServices { - mock := &MockBackendServices{ +// NewMockGlobalAddresses returns a new mock for GlobalAddresses. +func NewMockGlobalAddresses(pr ProjectRouter, objs map[meta.Key]*MockGlobalAddressesObj) *MockGlobalAddresses { + mock := &MockGlobalAddresses{ ProjectRouter: pr, Objects: objs, @@ -2635,14 +3786,14 @@ func NewMockBackendServices(pr ProjectRouter, objs map[meta.Key]*MockBackendServ return mock } -// MockBackendServices is the mock for BackendServices. -type MockBackendServices struct { +// MockGlobalAddresses is the mock for GlobalAddresses. +type MockGlobalAddresses struct { Lock sync.Mutex ProjectRouter ProjectRouter // Objects maintained by the mock. - Objects map[meta.Key]*MockBackendServicesObj + Objects map[meta.Key]*MockGlobalAddressesObj // If an entry exists for the given key and operation, then the error // will be returned instead of the operation. @@ -2655,13 +3806,10 @@ type MockBackendServices struct { // order to add your own logic. Return (true, _, _) to prevent the normal // execution flow of the mock. Return (false, nil, nil) to continue with // normal mock behavior/ after the hook function executes. - GetHook func(ctx context.Context, key *meta.Key, m *MockBackendServices) (bool, *ga.BackendService, error) - ListHook func(ctx context.Context, fl *filter.F, m *MockBackendServices) (bool, []*ga.BackendService, error) - InsertHook func(ctx context.Context, key *meta.Key, obj *ga.BackendService, m *MockBackendServices) (bool, error) - DeleteHook func(ctx context.Context, key *meta.Key, m *MockBackendServices) (bool, error) - GetHealthHook func(context.Context, *meta.Key, *ga.ResourceGroupReference, *MockBackendServices) (*ga.BackendServiceGroupHealth, error) - PatchHook func(context.Context, *meta.Key, *ga.BackendService, *MockBackendServices) error - UpdateHook func(context.Context, *meta.Key, *ga.BackendService, *MockBackendServices) error + GetHook func(ctx context.Context, key *meta.Key, m *MockGlobalAddresses) (bool, *ga.Address, error) + ListHook func(ctx context.Context, fl *filter.F, m *MockGlobalAddresses) (bool, []*ga.Address, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *ga.Address, m *MockGlobalAddresses) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockGlobalAddresses) (bool, error) // X is extra state that can be used as part of the mock. Generated code // will not use this field. @@ -2669,10 +3817,10 @@ type MockBackendServices struct { } // Get returns the object from the mock. -func (m *MockBackendServices) Get(ctx context.Context, key *meta.Key) (*ga.BackendService, error) { +func (m *MockGlobalAddresses) Get(ctx context.Context, key *meta.Key) (*ga.Address, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - klog.V(5).Infof("MockBackendServices.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockGlobalAddresses.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -2684,28 +3832,28 @@ func (m *MockBackendServices) Get(ctx context.Context, key *meta.Key) (*ga.Backe defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - klog.V(5).Infof("MockBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockGlobalAddresses.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - klog.V(5).Infof("MockBackendServices.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockGlobalAddresses.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockBackendServices %v not found", key), + Message: fmt.Sprintf("MockGlobalAddresses %v not found", key), } - klog.V(5).Infof("MockBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockGlobalAddresses.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } // List all of the objects in the mock. -func (m *MockBackendServices) List(ctx context.Context, fl *filter.F) ([]*ga.BackendService, error) { +func (m *MockGlobalAddresses) List(ctx context.Context, fl *filter.F) ([]*ga.Address, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - klog.V(5).Infof("MockBackendServices.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockGlobalAddresses.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -2715,12 +3863,12 @@ func (m *MockBackendServices) List(ctx context.Context, fl *filter.F) ([]*ga.Bac if m.ListError != nil { err := *m.ListError - klog.V(5).Infof("MockBackendServices.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockGlobalAddresses.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } - var objs []*ga.BackendService + var objs []*ga.Address for _, obj := range m.Objects { if !fl.Match(obj.ToGA()) { continue @@ -2728,15 +3876,15 @@ func (m *MockBackendServices) List(ctx context.Context, fl *filter.F) ([]*ga.Bac objs = append(objs, obj.ToGA()) } - klog.V(5).Infof("MockBackendServices.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockGlobalAddresses.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } // Insert is a mock for inserting/creating a new object. -func (m *MockBackendServices) Insert(ctx context.Context, key *meta.Key, obj *ga.BackendService) error { +func (m *MockGlobalAddresses) Insert(ctx context.Context, key *meta.Key, obj *ga.Address) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - klog.V(5).Infof("MockBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockGlobalAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -2748,32 +3896,32 @@ func (m *MockBackendServices) Insert(ctx context.Context, key *meta.Key, obj *ga defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - klog.V(5).Infof("MockBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockGlobalAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { err := &googleapi.Error{ Code: http.StatusConflict, - Message: fmt.Sprintf("MockBackendServices %v exists", key), + Message: fmt.Sprintf("MockGlobalAddresses %v exists", key), } - klog.V(5).Infof("MockBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockGlobalAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } obj.Name = key.Name - projectID := m.ProjectRouter.ProjectID(ctx, "ga", "backendServices") - obj.SelfLink = SelfLink(meta.VersionGA, projectID, "backendServices", key) + projectID := m.ProjectRouter.ProjectID(ctx, "ga", "addresses") + obj.SelfLink = SelfLink(meta.VersionGA, projectID, "addresses", key) - m.Objects[*key] = &MockBackendServicesObj{obj} - klog.V(5).Infof("MockBackendServices.Insert(%v, %v, %+v) = nil", ctx, key, obj) + m.Objects[*key] = &MockGlobalAddressesObj{obj} + klog.V(5).Infof("MockGlobalAddresses.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } // Delete is a mock for deleting the object. -func (m *MockBackendServices) Delete(ctx context.Context, key *meta.Key) error { +func (m *MockGlobalAddresses) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - klog.V(5).Infof("MockBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockGlobalAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -2785,301 +3933,184 @@ func (m *MockBackendServices) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - klog.V(5).Infof("MockBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockGlobalAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockBackendServices %v not found", key), + Message: fmt.Sprintf("MockGlobalAddresses %v not found", key), } - klog.V(5).Infof("MockBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockGlobalAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - klog.V(5).Infof("MockBackendServices.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockGlobalAddresses.Delete(%v, %v) = nil", ctx, key) return nil } // Obj wraps the object for use in the mock. -func (m *MockBackendServices) Obj(o *ga.BackendService) *MockBackendServicesObj { - return &MockBackendServicesObj{o} -} - -// GetHealth is a mock for the corresponding method. -func (m *MockBackendServices) GetHealth(ctx context.Context, key *meta.Key, arg0 *ga.ResourceGroupReference) (*ga.BackendServiceGroupHealth, error) { - if m.GetHealthHook != nil { - return m.GetHealthHook(ctx, key, arg0, m) - } - return nil, fmt.Errorf("GetHealthHook must be set") -} - -// Patch is a mock for the corresponding method. -func (m *MockBackendServices) Patch(ctx context.Context, key *meta.Key, arg0 *ga.BackendService) error { - if m.PatchHook != nil { - return m.PatchHook(ctx, key, arg0, m) - } - return nil -} - -// Update is a mock for the corresponding method. -func (m *MockBackendServices) Update(ctx context.Context, key *meta.Key, arg0 *ga.BackendService) error { - if m.UpdateHook != nil { - return m.UpdateHook(ctx, key, arg0, m) - } - return nil +func (m *MockGlobalAddresses) Obj(o *ga.Address) *MockGlobalAddressesObj { + return &MockGlobalAddressesObj{o} } -// GCEBackendServices is a simplifying adapter for the GCE BackendServices. -type GCEBackendServices struct { +// GCEGlobalAddresses is a simplifying adapter for the GCE GlobalAddresses. +type GCEGlobalAddresses struct { s *Service } -// Get the BackendService named by key. -func (g *GCEBackendServices) Get(ctx context.Context, key *meta.Key) (*ga.BackendService, error) { - klog.V(5).Infof("GCEBackendServices.Get(%v, %v): called", ctx, key) +// Get the Address named by key. +func (g *GCEGlobalAddresses) Get(ctx context.Context, key *meta.Key) (*ga.Address, error) { + klog.V(5).Infof("GCEGlobalAddresses.Get(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEBackendServices.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEGlobalAddresses.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "BackendServices") + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalAddresses") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Get", Version: meta.Version("ga"), - Service: "BackendServices", + Service: "GlobalAddresses", } - klog.V(5).Infof("GCEBackendServices.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEGlobalAddresses.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEBackendServices.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEGlobalAddresses.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } - call := g.s.GA.BackendServices.Get(projectID, key.Name) + call := g.s.GA.GlobalAddresses.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - klog.V(4).Infof("GCEBackendServices.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEGlobalAddresses.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } -// List all BackendService objects. -func (g *GCEBackendServices) List(ctx context.Context, fl *filter.F) ([]*ga.BackendService, error) { - klog.V(5).Infof("GCEBackendServices.List(%v, %v) called", ctx, fl) - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "BackendServices") +// List all Address objects. +func (g *GCEGlobalAddresses) List(ctx context.Context, fl *filter.F) ([]*ga.Address, error) { + klog.V(5).Infof("GCEGlobalAddresses.List(%v, %v) called", ctx, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalAddresses") rk := &RateLimitKey{ ProjectID: projectID, Operation: "List", Version: meta.Version("ga"), - Service: "BackendServices", + Service: "GlobalAddresses", } if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - klog.V(5).Infof("GCEBackendServices.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) - call := g.s.GA.BackendServices.List(projectID) + klog.V(5).Infof("GCEGlobalAddresses.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + call := g.s.GA.GlobalAddresses.List(projectID) if fl != filter.None { call.Filter(fl.String()) } - var all []*ga.BackendService - f := func(l *ga.BackendServiceList) error { - klog.V(5).Infof("GCEBackendServices.List(%v, ..., %v): page %+v", ctx, fl, l) + var all []*ga.Address + f := func(l *ga.AddressList) error { + klog.V(5).Infof("GCEGlobalAddresses.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - klog.V(4).Infof("GCEBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEGlobalAddresses.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } if klog.V(4) { - klog.V(4).Infof("GCEBackendServices.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + klog.V(4).Infof("GCEGlobalAddresses.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - klog.V(5).Infof("GCEBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEGlobalAddresses.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil } -// Insert BackendService with key of value obj. -func (g *GCEBackendServices) Insert(ctx context.Context, key *meta.Key, obj *ga.BackendService) error { - klog.V(5).Infof("GCEBackendServices.Insert(%v, %v, %+v): called", ctx, key, obj) +// Insert Address with key of value obj. +func (g *GCEGlobalAddresses) Insert(ctx context.Context, key *meta.Key, obj *ga.Address) error { + klog.V(5).Infof("GCEGlobalAddresses.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - klog.V(2).Infof("GCEBackendServices.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEGlobalAddresses.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "BackendServices") + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalAddresses") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Insert", Version: meta.Version("ga"), - Service: "BackendServices", + Service: "GlobalAddresses", } - klog.V(5).Infof("GCEBackendServices.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEGlobalAddresses.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEBackendServices.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEGlobalAddresses.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name - call := g.s.GA.BackendServices.Insert(projectID, obj) + call := g.s.GA.GlobalAddresses.Insert(projectID, obj) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEBackendServices.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEGlobalAddresses.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEBackendServices.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEGlobalAddresses.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } -// Delete the BackendService referenced by key. -func (g *GCEBackendServices) Delete(ctx context.Context, key *meta.Key) error { - klog.V(5).Infof("GCEBackendServices.Delete(%v, %v): called", ctx, key) +// Delete the Address referenced by key. +func (g *GCEGlobalAddresses) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCEGlobalAddresses.Delete(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEBackendServices.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEGlobalAddresses.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "BackendServices") + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalAddresses") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Delete", Version: meta.Version("ga"), - Service: "BackendServices", + Service: "GlobalAddresses", } - klog.V(5).Infof("GCEBackendServices.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEGlobalAddresses.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEBackendServices.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) - return err - } - call := g.s.GA.BackendServices.Delete(projectID, key.Name) - - call.Context(ctx) - - op, err := call.Do() - if err != nil { - klog.V(4).Infof("GCEBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEGlobalAddresses.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } + call := g.s.GA.GlobalAddresses.Delete(projectID, key.Name) - err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEBackendServices.Delete(%v, %v) = %v", ctx, key, err) - return err -} - -// GetHealth is a method on GCEBackendServices. -func (g *GCEBackendServices) GetHealth(ctx context.Context, key *meta.Key, arg0 *ga.ResourceGroupReference) (*ga.BackendServiceGroupHealth, error) { - klog.V(5).Infof("GCEBackendServices.GetHealth(%v, %v, ...): called", ctx, key) - - if !key.Valid() { - klog.V(2).Infof("GCEBackendServices.GetHealth(%v, %v, ...): key is invalid (%#v)", ctx, key, key) - return nil, fmt.Errorf("invalid GCE key (%+v)", key) - } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "BackendServices") - rk := &RateLimitKey{ - ProjectID: projectID, - Operation: "GetHealth", - Version: meta.Version("ga"), - Service: "BackendServices", - } - klog.V(5).Infof("GCEBackendServices.GetHealth(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) - - if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEBackendServices.GetHealth(%v, %v, ...): RateLimiter error: %v", ctx, key, err) - return nil, err - } - call := g.s.GA.BackendServices.GetHealth(projectID, key.Name, arg0) call.Context(ctx) - v, err := call.Do() - klog.V(4).Infof("GCEBackendServices.GetHealth(%v, %v, ...) = %+v, %v", ctx, key, v, err) - return v, err -} - -// Patch is a method on GCEBackendServices. -func (g *GCEBackendServices) Patch(ctx context.Context, key *meta.Key, arg0 *ga.BackendService) error { - klog.V(5).Infof("GCEBackendServices.Patch(%v, %v, ...): called", ctx, key) - - if !key.Valid() { - klog.V(2).Infof("GCEBackendServices.Patch(%v, %v, ...): key is invalid (%#v)", ctx, key, key) - return fmt.Errorf("invalid GCE key (%+v)", key) - } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "BackendServices") - rk := &RateLimitKey{ - ProjectID: projectID, - Operation: "Patch", - Version: meta.Version("ga"), - Service: "BackendServices", - } - klog.V(5).Infof("GCEBackendServices.Patch(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) - if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEBackendServices.Patch(%v, %v, ...): RateLimiter error: %v", ctx, key, err) - return err - } - call := g.s.GA.BackendServices.Patch(projectID, key.Name, arg0) - call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEBackendServices.Patch(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEGlobalAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } - err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEBackendServices.Patch(%v, %v, ...) = %+v", ctx, key, err) - return err -} - -// Update is a method on GCEBackendServices. -func (g *GCEBackendServices) Update(ctx context.Context, key *meta.Key, arg0 *ga.BackendService) error { - klog.V(5).Infof("GCEBackendServices.Update(%v, %v, ...): called", ctx, key) - - if !key.Valid() { - klog.V(2).Infof("GCEBackendServices.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) - return fmt.Errorf("invalid GCE key (%+v)", key) - } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "BackendServices") - rk := &RateLimitKey{ - ProjectID: projectID, - Operation: "Update", - Version: meta.Version("ga"), - Service: "BackendServices", - } - klog.V(5).Infof("GCEBackendServices.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) - if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEBackendServices.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) - return err - } - call := g.s.GA.BackendServices.Update(projectID, key.Name, arg0) - call.Context(ctx) - op, err := call.Do() - if err != nil { - klog.V(4).Infof("GCEBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) - return err - } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEGlobalAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } -// BetaBackendServices is an interface that allows for mocking of BackendServices. -type BetaBackendServices interface { - Get(ctx context.Context, key *meta.Key) (*beta.BackendService, error) - List(ctx context.Context, fl *filter.F) ([]*beta.BackendService, error) - Insert(ctx context.Context, key *meta.Key, obj *beta.BackendService) error +// BackendServices is an interface that allows for mocking of BackendServices. +type BackendServices interface { + Get(ctx context.Context, key *meta.Key) (*ga.BackendService, error) + List(ctx context.Context, fl *filter.F) ([]*ga.BackendService, error) + Insert(ctx context.Context, key *meta.Key, obj *ga.BackendService) error Delete(ctx context.Context, key *meta.Key) error - SetSecurityPolicy(context.Context, *meta.Key, *beta.SecurityPolicyReference) error - Update(context.Context, *meta.Key, *beta.BackendService) error + GetHealth(context.Context, *meta.Key, *ga.ResourceGroupReference) (*ga.BackendServiceGroupHealth, error) + Patch(context.Context, *meta.Key, *ga.BackendService) error + Update(context.Context, *meta.Key, *ga.BackendService) error } -// NewMockBetaBackendServices returns a new mock for BackendServices. -func NewMockBetaBackendServices(pr ProjectRouter, objs map[meta.Key]*MockBackendServicesObj) *MockBetaBackendServices { - mock := &MockBetaBackendServices{ +// NewMockBackendServices returns a new mock for BackendServices. +func NewMockBackendServices(pr ProjectRouter, objs map[meta.Key]*MockBackendServicesObj) *MockBackendServices { + mock := &MockBackendServices{ ProjectRouter: pr, Objects: objs, @@ -3090,8 +4121,8 @@ func NewMockBetaBackendServices(pr ProjectRouter, objs map[meta.Key]*MockBackend return mock } -// MockBetaBackendServices is the mock for BackendServices. -type MockBetaBackendServices struct { +// MockBackendServices is the mock for BackendServices. +type MockBackendServices struct { Lock sync.Mutex ProjectRouter ProjectRouter @@ -3110,12 +4141,13 @@ type MockBetaBackendServices struct { // order to add your own logic. Return (true, _, _) to prevent the normal // execution flow of the mock. Return (false, nil, nil) to continue with // normal mock behavior/ after the hook function executes. - GetHook func(ctx context.Context, key *meta.Key, m *MockBetaBackendServices) (bool, *beta.BackendService, error) - ListHook func(ctx context.Context, fl *filter.F, m *MockBetaBackendServices) (bool, []*beta.BackendService, error) - InsertHook func(ctx context.Context, key *meta.Key, obj *beta.BackendService, m *MockBetaBackendServices) (bool, error) - DeleteHook func(ctx context.Context, key *meta.Key, m *MockBetaBackendServices) (bool, error) - SetSecurityPolicyHook func(context.Context, *meta.Key, *beta.SecurityPolicyReference, *MockBetaBackendServices) error - UpdateHook func(context.Context, *meta.Key, *beta.BackendService, *MockBetaBackendServices) error + GetHook func(ctx context.Context, key *meta.Key, m *MockBackendServices) (bool, *ga.BackendService, error) + ListHook func(ctx context.Context, fl *filter.F, m *MockBackendServices) (bool, []*ga.BackendService, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *ga.BackendService, m *MockBackendServices) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockBackendServices) (bool, error) + GetHealthHook func(context.Context, *meta.Key, *ga.ResourceGroupReference, *MockBackendServices) (*ga.BackendServiceGroupHealth, error) + PatchHook func(context.Context, *meta.Key, *ga.BackendService, *MockBackendServices) error + UpdateHook func(context.Context, *meta.Key, *ga.BackendService, *MockBackendServices) error // X is extra state that can be used as part of the mock. Generated code // will not use this field. @@ -3123,10 +4155,10 @@ type MockBetaBackendServices struct { } // Get returns the object from the mock. -func (m *MockBetaBackendServices) Get(ctx context.Context, key *meta.Key) (*beta.BackendService, error) { +func (m *MockBackendServices) Get(ctx context.Context, key *meta.Key) (*ga.BackendService, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - klog.V(5).Infof("MockBetaBackendServices.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockBackendServices.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -3138,28 +4170,28 @@ func (m *MockBetaBackendServices) Get(ctx context.Context, key *meta.Key) (*beta defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - klog.V(5).Infof("MockBetaBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { - typedObj := obj.ToBeta() - klog.V(5).Infof("MockBetaBackendServices.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + typedObj := obj.ToGA() + klog.V(5).Infof("MockBackendServices.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockBetaBackendServices %v not found", key), + Message: fmt.Sprintf("MockBackendServices %v not found", key), } - klog.V(5).Infof("MockBetaBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } // List all of the objects in the mock. -func (m *MockBetaBackendServices) List(ctx context.Context, fl *filter.F) ([]*beta.BackendService, error) { +func (m *MockBackendServices) List(ctx context.Context, fl *filter.F) ([]*ga.BackendService, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - klog.V(5).Infof("MockBetaBackendServices.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockBackendServices.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -3169,28 +4201,28 @@ func (m *MockBetaBackendServices) List(ctx context.Context, fl *filter.F) ([]*be if m.ListError != nil { err := *m.ListError - klog.V(5).Infof("MockBetaBackendServices.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockBackendServices.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } - var objs []*beta.BackendService + var objs []*ga.BackendService for _, obj := range m.Objects { - if !fl.Match(obj.ToBeta()) { + if !fl.Match(obj.ToGA()) { continue } - objs = append(objs, obj.ToBeta()) + objs = append(objs, obj.ToGA()) } - klog.V(5).Infof("MockBetaBackendServices.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockBackendServices.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } // Insert is a mock for inserting/creating a new object. -func (m *MockBetaBackendServices) Insert(ctx context.Context, key *meta.Key, obj *beta.BackendService) error { +func (m *MockBackendServices) Insert(ctx context.Context, key *meta.Key, obj *ga.BackendService) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - klog.V(5).Infof("MockBetaBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -3202,32 +4234,32 @@ func (m *MockBetaBackendServices) Insert(ctx context.Context, key *meta.Key, obj defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - klog.V(5).Infof("MockBetaBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { err := &googleapi.Error{ Code: http.StatusConflict, - Message: fmt.Sprintf("MockBetaBackendServices %v exists", key), + Message: fmt.Sprintf("MockBackendServices %v exists", key), } - klog.V(5).Infof("MockBetaBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } obj.Name = key.Name - projectID := m.ProjectRouter.ProjectID(ctx, "beta", "backendServices") - obj.SelfLink = SelfLink(meta.VersionBeta, projectID, "backendServices", key) + projectID := m.ProjectRouter.ProjectID(ctx, "ga", "backendServices") + obj.SelfLink = SelfLink(meta.VersionGA, projectID, "backendServices", key) m.Objects[*key] = &MockBackendServicesObj{obj} - klog.V(5).Infof("MockBetaBackendServices.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockBackendServices.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } // Delete is a mock for deleting the object. -func (m *MockBetaBackendServices) Delete(ctx context.Context, key *meta.Key) error { +func (m *MockBackendServices) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - klog.V(5).Infof("MockBetaBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -3239,265 +4271,301 @@ func (m *MockBetaBackendServices) Delete(ctx context.Context, key *meta.Key) err defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - klog.V(5).Infof("MockBetaBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockBetaBackendServices %v not found", key), + Message: fmt.Sprintf("MockBackendServices %v not found", key), } - klog.V(5).Infof("MockBetaBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - klog.V(5).Infof("MockBetaBackendServices.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockBackendServices.Delete(%v, %v) = nil", ctx, key) return nil } // Obj wraps the object for use in the mock. -func (m *MockBetaBackendServices) Obj(o *beta.BackendService) *MockBackendServicesObj { +func (m *MockBackendServices) Obj(o *ga.BackendService) *MockBackendServicesObj { return &MockBackendServicesObj{o} } -// SetSecurityPolicy is a mock for the corresponding method. -func (m *MockBetaBackendServices) SetSecurityPolicy(ctx context.Context, key *meta.Key, arg0 *beta.SecurityPolicyReference) error { - if m.SetSecurityPolicyHook != nil { - return m.SetSecurityPolicyHook(ctx, key, arg0, m) +// GetHealth is a mock for the corresponding method. +func (m *MockBackendServices) GetHealth(ctx context.Context, key *meta.Key, arg0 *ga.ResourceGroupReference) (*ga.BackendServiceGroupHealth, error) { + if m.GetHealthHook != nil { + return m.GetHealthHook(ctx, key, arg0, m) + } + return nil, fmt.Errorf("GetHealthHook must be set") +} + +// Patch is a mock for the corresponding method. +func (m *MockBackendServices) Patch(ctx context.Context, key *meta.Key, arg0 *ga.BackendService) error { + if m.PatchHook != nil { + return m.PatchHook(ctx, key, arg0, m) } return nil } // Update is a mock for the corresponding method. -func (m *MockBetaBackendServices) Update(ctx context.Context, key *meta.Key, arg0 *beta.BackendService) error { +func (m *MockBackendServices) Update(ctx context.Context, key *meta.Key, arg0 *ga.BackendService) error { if m.UpdateHook != nil { return m.UpdateHook(ctx, key, arg0, m) } return nil } -// GCEBetaBackendServices is a simplifying adapter for the GCE BackendServices. -type GCEBetaBackendServices struct { +// GCEBackendServices is a simplifying adapter for the GCE BackendServices. +type GCEBackendServices struct { s *Service } // Get the BackendService named by key. -func (g *GCEBetaBackendServices) Get(ctx context.Context, key *meta.Key) (*beta.BackendService, error) { - klog.V(5).Infof("GCEBetaBackendServices.Get(%v, %v): called", ctx, key) +func (g *GCEBackendServices) Get(ctx context.Context, key *meta.Key) (*ga.BackendService, error) { + klog.V(5).Infof("GCEBackendServices.Get(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEBetaBackendServices.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBackendServices.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "BackendServices") + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "BackendServices") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Get", - Version: meta.Version("beta"), + Version: meta.Version("ga"), Service: "BackendServices", } - klog.V(5).Infof("GCEBetaBackendServices.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBackendServices.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEBetaBackendServices.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBackendServices.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } - call := g.s.Beta.BackendServices.Get(projectID, key.Name) + call := g.s.GA.BackendServices.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - klog.V(4).Infof("GCEBetaBackendServices.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEBackendServices.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all BackendService objects. -func (g *GCEBetaBackendServices) List(ctx context.Context, fl *filter.F) ([]*beta.BackendService, error) { - klog.V(5).Infof("GCEBetaBackendServices.List(%v, %v) called", ctx, fl) - projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "BackendServices") +func (g *GCEBackendServices) List(ctx context.Context, fl *filter.F) ([]*ga.BackendService, error) { + klog.V(5).Infof("GCEBackendServices.List(%v, %v) called", ctx, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "BackendServices") rk := &RateLimitKey{ ProjectID: projectID, Operation: "List", - Version: meta.Version("beta"), + Version: meta.Version("ga"), Service: "BackendServices", } if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - klog.V(5).Infof("GCEBetaBackendServices.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) - call := g.s.Beta.BackendServices.List(projectID) - if fl != filter.None { + klog.V(5).Infof("GCEBackendServices.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + call := g.s.GA.BackendServices.List(projectID) + if fl != filter.None { call.Filter(fl.String()) } - var all []*beta.BackendService - f := func(l *beta.BackendServiceList) error { - klog.V(5).Infof("GCEBetaBackendServices.List(%v, ..., %v): page %+v", ctx, fl, l) + var all []*ga.BackendService + f := func(l *ga.BackendServiceList) error { + klog.V(5).Infof("GCEBackendServices.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - klog.V(4).Infof("GCEBetaBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } if klog.V(4) { - klog.V(4).Infof("GCEBetaBackendServices.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + klog.V(4).Infof("GCEBackendServices.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - klog.V(5).Infof("GCEBetaBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil } // Insert BackendService with key of value obj. -func (g *GCEBetaBackendServices) Insert(ctx context.Context, key *meta.Key, obj *beta.BackendService) error { - klog.V(5).Infof("GCEBetaBackendServices.Insert(%v, %v, %+v): called", ctx, key, obj) +func (g *GCEBackendServices) Insert(ctx context.Context, key *meta.Key, obj *ga.BackendService) error { + klog.V(5).Infof("GCEBackendServices.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - klog.V(2).Infof("GCEBetaBackendServices.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBackendServices.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "BackendServices") + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "BackendServices") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Insert", - Version: meta.Version("beta"), + Version: meta.Version("ga"), Service: "BackendServices", } - klog.V(5).Infof("GCEBetaBackendServices.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBackendServices.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEBetaBackendServices.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBackendServices.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name - call := g.s.Beta.BackendServices.Insert(projectID, obj) + call := g.s.GA.BackendServices.Insert(projectID, obj) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEBetaBackendServices.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBackendServices.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEBetaBackendServices.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEBackendServices.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the BackendService referenced by key. -func (g *GCEBetaBackendServices) Delete(ctx context.Context, key *meta.Key) error { - klog.V(5).Infof("GCEBetaBackendServices.Delete(%v, %v): called", ctx, key) +func (g *GCEBackendServices) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCEBackendServices.Delete(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEBetaBackendServices.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBackendServices.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "BackendServices") + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "BackendServices") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Delete", - Version: meta.Version("beta"), + Version: meta.Version("ga"), Service: "BackendServices", } - klog.V(5).Infof("GCEBetaBackendServices.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBackendServices.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEBetaBackendServices.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBackendServices.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.Beta.BackendServices.Delete(projectID, key.Name) + call := g.s.GA.BackendServices.Delete(projectID, key.Name) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEBetaBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEBetaBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } -// SetSecurityPolicy is a method on GCEBetaBackendServices. -func (g *GCEBetaBackendServices) SetSecurityPolicy(ctx context.Context, key *meta.Key, arg0 *beta.SecurityPolicyReference) error { - klog.V(5).Infof("GCEBetaBackendServices.SetSecurityPolicy(%v, %v, ...): called", ctx, key) +// GetHealth is a method on GCEBackendServices. +func (g *GCEBackendServices) GetHealth(ctx context.Context, key *meta.Key, arg0 *ga.ResourceGroupReference) (*ga.BackendServiceGroupHealth, error) { + klog.V(5).Infof("GCEBackendServices.GetHealth(%v, %v, ...): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEBetaBackendServices.SetSecurityPolicy(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBackendServices.GetHealth(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return nil, fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "BackendServices") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "GetHealth", + Version: meta.Version("ga"), + Service: "BackendServices", + } + klog.V(5).Infof("GCEBackendServices.GetHealth(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEBackendServices.GetHealth(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return nil, err + } + call := g.s.GA.BackendServices.GetHealth(projectID, key.Name, arg0) + call.Context(ctx) + v, err := call.Do() + klog.V(4).Infof("GCEBackendServices.GetHealth(%v, %v, ...) = %+v, %v", ctx, key, v, err) + return v, err +} + +// Patch is a method on GCEBackendServices. +func (g *GCEBackendServices) Patch(ctx context.Context, key *meta.Key, arg0 *ga.BackendService) error { + klog.V(5).Infof("GCEBackendServices.Patch(%v, %v, ...): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEBackendServices.Patch(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "BackendServices") + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "BackendServices") rk := &RateLimitKey{ ProjectID: projectID, - Operation: "SetSecurityPolicy", - Version: meta.Version("beta"), + Operation: "Patch", + Version: meta.Version("ga"), Service: "BackendServices", } - klog.V(5).Infof("GCEBetaBackendServices.SetSecurityPolicy(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBackendServices.Patch(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEBetaBackendServices.SetSecurityPolicy(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBackendServices.Patch(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.Beta.BackendServices.SetSecurityPolicy(projectID, key.Name, arg0) + call := g.s.GA.BackendServices.Patch(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEBetaBackendServices.SetSecurityPolicy(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBackendServices.Patch(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEBetaBackendServices.SetSecurityPolicy(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBackendServices.Patch(%v, %v, ...) = %+v", ctx, key, err) return err } -// Update is a method on GCEBetaBackendServices. -func (g *GCEBetaBackendServices) Update(ctx context.Context, key *meta.Key, arg0 *beta.BackendService) error { - klog.V(5).Infof("GCEBetaBackendServices.Update(%v, %v, ...): called", ctx, key) +// Update is a method on GCEBackendServices. +func (g *GCEBackendServices) Update(ctx context.Context, key *meta.Key, arg0 *ga.BackendService) error { + klog.V(5).Infof("GCEBackendServices.Update(%v, %v, ...): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEBetaBackendServices.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBackendServices.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "BackendServices") + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "BackendServices") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Update", - Version: meta.Version("beta"), + Version: meta.Version("ga"), Service: "BackendServices", } - klog.V(5).Infof("GCEBetaBackendServices.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBackendServices.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEBetaBackendServices.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBackendServices.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.Beta.BackendServices.Update(projectID, key.Name, arg0) + call := g.s.GA.BackendServices.Update(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEBetaBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEBetaBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) return err } -// AlphaBackendServices is an interface that allows for mocking of BackendServices. -type AlphaBackendServices interface { - Get(ctx context.Context, key *meta.Key) (*alpha.BackendService, error) - List(ctx context.Context, fl *filter.F) ([]*alpha.BackendService, error) - Insert(ctx context.Context, key *meta.Key, obj *alpha.BackendService) error +// BetaBackendServices is an interface that allows for mocking of BackendServices. +type BetaBackendServices interface { + Get(ctx context.Context, key *meta.Key) (*beta.BackendService, error) + List(ctx context.Context, fl *filter.F) ([]*beta.BackendService, error) + Insert(ctx context.Context, key *meta.Key, obj *beta.BackendService) error Delete(ctx context.Context, key *meta.Key) error - SetSecurityPolicy(context.Context, *meta.Key, *alpha.SecurityPolicyReference) error - Update(context.Context, *meta.Key, *alpha.BackendService) error + SetSecurityPolicy(context.Context, *meta.Key, *beta.SecurityPolicyReference) error + Update(context.Context, *meta.Key, *beta.BackendService) error } -// NewMockAlphaBackendServices returns a new mock for BackendServices. -func NewMockAlphaBackendServices(pr ProjectRouter, objs map[meta.Key]*MockBackendServicesObj) *MockAlphaBackendServices { - mock := &MockAlphaBackendServices{ +// NewMockBetaBackendServices returns a new mock for BackendServices. +func NewMockBetaBackendServices(pr ProjectRouter, objs map[meta.Key]*MockBackendServicesObj) *MockBetaBackendServices { + mock := &MockBetaBackendServices{ ProjectRouter: pr, Objects: objs, @@ -3508,8 +4576,8 @@ func NewMockAlphaBackendServices(pr ProjectRouter, objs map[meta.Key]*MockBacken return mock } -// MockAlphaBackendServices is the mock for BackendServices. -type MockAlphaBackendServices struct { +// MockBetaBackendServices is the mock for BackendServices. +type MockBetaBackendServices struct { Lock sync.Mutex ProjectRouter ProjectRouter @@ -3528,12 +4596,12 @@ type MockAlphaBackendServices struct { // order to add your own logic. Return (true, _, _) to prevent the normal // execution flow of the mock. Return (false, nil, nil) to continue with // normal mock behavior/ after the hook function executes. - GetHook func(ctx context.Context, key *meta.Key, m *MockAlphaBackendServices) (bool, *alpha.BackendService, error) - ListHook func(ctx context.Context, fl *filter.F, m *MockAlphaBackendServices) (bool, []*alpha.BackendService, error) - InsertHook func(ctx context.Context, key *meta.Key, obj *alpha.BackendService, m *MockAlphaBackendServices) (bool, error) - DeleteHook func(ctx context.Context, key *meta.Key, m *MockAlphaBackendServices) (bool, error) - SetSecurityPolicyHook func(context.Context, *meta.Key, *alpha.SecurityPolicyReference, *MockAlphaBackendServices) error - UpdateHook func(context.Context, *meta.Key, *alpha.BackendService, *MockAlphaBackendServices) error + GetHook func(ctx context.Context, key *meta.Key, m *MockBetaBackendServices) (bool, *beta.BackendService, error) + ListHook func(ctx context.Context, fl *filter.F, m *MockBetaBackendServices) (bool, []*beta.BackendService, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *beta.BackendService, m *MockBetaBackendServices) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockBetaBackendServices) (bool, error) + SetSecurityPolicyHook func(context.Context, *meta.Key, *beta.SecurityPolicyReference, *MockBetaBackendServices) error + UpdateHook func(context.Context, *meta.Key, *beta.BackendService, *MockBetaBackendServices) error // X is extra state that can be used as part of the mock. Generated code // will not use this field. @@ -3541,10 +4609,10 @@ type MockAlphaBackendServices struct { } // Get returns the object from the mock. -func (m *MockAlphaBackendServices) Get(ctx context.Context, key *meta.Key) (*alpha.BackendService, error) { +func (m *MockBetaBackendServices) Get(ctx context.Context, key *meta.Key) (*beta.BackendService, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - klog.V(5).Infof("MockAlphaBackendServices.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaBackendServices.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -3556,28 +4624,28 @@ func (m *MockAlphaBackendServices) Get(ctx context.Context, key *meta.Key) (*alp defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - klog.V(5).Infof("MockAlphaBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBetaBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { - typedObj := obj.ToAlpha() - klog.V(5).Infof("MockAlphaBackendServices.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + typedObj := obj.ToBeta() + klog.V(5).Infof("MockBetaBackendServices.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockAlphaBackendServices %v not found", key), + Message: fmt.Sprintf("MockBetaBackendServices %v not found", key), } - klog.V(5).Infof("MockAlphaBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBetaBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } // List all of the objects in the mock. -func (m *MockAlphaBackendServices) List(ctx context.Context, fl *filter.F) ([]*alpha.BackendService, error) { +func (m *MockBetaBackendServices) List(ctx context.Context, fl *filter.F) ([]*beta.BackendService, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - klog.V(5).Infof("MockAlphaBackendServices.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockBetaBackendServices.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -3587,28 +4655,28 @@ func (m *MockAlphaBackendServices) List(ctx context.Context, fl *filter.F) ([]*a if m.ListError != nil { err := *m.ListError - klog.V(5).Infof("MockAlphaBackendServices.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockBetaBackendServices.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } - var objs []*alpha.BackendService + var objs []*beta.BackendService for _, obj := range m.Objects { - if !fl.Match(obj.ToAlpha()) { + if !fl.Match(obj.ToBeta()) { continue } - objs = append(objs, obj.ToAlpha()) + objs = append(objs, obj.ToBeta()) } - klog.V(5).Infof("MockAlphaBackendServices.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockBetaBackendServices.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } // Insert is a mock for inserting/creating a new object. -func (m *MockAlphaBackendServices) Insert(ctx context.Context, key *meta.Key, obj *alpha.BackendService) error { +func (m *MockBetaBackendServices) Insert(ctx context.Context, key *meta.Key, obj *beta.BackendService) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - klog.V(5).Infof("MockAlphaBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -3620,32 +4688,32 @@ func (m *MockAlphaBackendServices) Insert(ctx context.Context, key *meta.Key, ob defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - klog.V(5).Infof("MockAlphaBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { err := &googleapi.Error{ Code: http.StatusConflict, - Message: fmt.Sprintf("MockAlphaBackendServices %v exists", key), + Message: fmt.Sprintf("MockBetaBackendServices %v exists", key), } - klog.V(5).Infof("MockAlphaBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } obj.Name = key.Name - projectID := m.ProjectRouter.ProjectID(ctx, "alpha", "backendServices") - obj.SelfLink = SelfLink(meta.VersionAlpha, projectID, "backendServices", key) + projectID := m.ProjectRouter.ProjectID(ctx, "beta", "backendServices") + obj.SelfLink = SelfLink(meta.VersionBeta, projectID, "backendServices", key) m.Objects[*key] = &MockBackendServicesObj{obj} - klog.V(5).Infof("MockAlphaBackendServices.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockBetaBackendServices.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } // Delete is a mock for deleting the object. -func (m *MockAlphaBackendServices) Delete(ctx context.Context, key *meta.Key) error { +func (m *MockBetaBackendServices) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - klog.V(5).Infof("MockAlphaBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -3657,30 +4725,30 @@ func (m *MockAlphaBackendServices) Delete(ctx context.Context, key *meta.Key) er defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - klog.V(5).Infof("MockAlphaBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockAlphaBackendServices %v not found", key), + Message: fmt.Sprintf("MockBetaBackendServices %v not found", key), } - klog.V(5).Infof("MockAlphaBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - klog.V(5).Infof("MockAlphaBackendServices.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockBetaBackendServices.Delete(%v, %v) = nil", ctx, key) return nil } // Obj wraps the object for use in the mock. -func (m *MockAlphaBackendServices) Obj(o *alpha.BackendService) *MockBackendServicesObj { +func (m *MockBetaBackendServices) Obj(o *beta.BackendService) *MockBackendServicesObj { return &MockBackendServicesObj{o} } // SetSecurityPolicy is a mock for the corresponding method. -func (m *MockAlphaBackendServices) SetSecurityPolicy(ctx context.Context, key *meta.Key, arg0 *alpha.SecurityPolicyReference) error { +func (m *MockBetaBackendServices) SetSecurityPolicy(ctx context.Context, key *meta.Key, arg0 *beta.SecurityPolicyReference) error { if m.SetSecurityPolicyHook != nil { return m.SetSecurityPolicyHook(ctx, key, arg0, m) } @@ -3688,234 +4756,234 @@ func (m *MockAlphaBackendServices) SetSecurityPolicy(ctx context.Context, key *m } // Update is a mock for the corresponding method. -func (m *MockAlphaBackendServices) Update(ctx context.Context, key *meta.Key, arg0 *alpha.BackendService) error { +func (m *MockBetaBackendServices) Update(ctx context.Context, key *meta.Key, arg0 *beta.BackendService) error { if m.UpdateHook != nil { return m.UpdateHook(ctx, key, arg0, m) } return nil } -// GCEAlphaBackendServices is a simplifying adapter for the GCE BackendServices. -type GCEAlphaBackendServices struct { +// GCEBetaBackendServices is a simplifying adapter for the GCE BackendServices. +type GCEBetaBackendServices struct { s *Service } // Get the BackendService named by key. -func (g *GCEAlphaBackendServices) Get(ctx context.Context, key *meta.Key) (*alpha.BackendService, error) { - klog.V(5).Infof("GCEAlphaBackendServices.Get(%v, %v): called", ctx, key) +func (g *GCEBetaBackendServices) Get(ctx context.Context, key *meta.Key) (*beta.BackendService, error) { + klog.V(5).Infof("GCEBetaBackendServices.Get(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEAlphaBackendServices.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaBackendServices.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "BackendServices") + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "BackendServices") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Get", - Version: meta.Version("alpha"), + Version: meta.Version("beta"), Service: "BackendServices", } - klog.V(5).Infof("GCEAlphaBackendServices.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaBackendServices.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEAlphaBackendServices.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaBackendServices.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } - call := g.s.Alpha.BackendServices.Get(projectID, key.Name) + call := g.s.Beta.BackendServices.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - klog.V(4).Infof("GCEAlphaBackendServices.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEBetaBackendServices.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all BackendService objects. -func (g *GCEAlphaBackendServices) List(ctx context.Context, fl *filter.F) ([]*alpha.BackendService, error) { - klog.V(5).Infof("GCEAlphaBackendServices.List(%v, %v) called", ctx, fl) - projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "BackendServices") +func (g *GCEBetaBackendServices) List(ctx context.Context, fl *filter.F) ([]*beta.BackendService, error) { + klog.V(5).Infof("GCEBetaBackendServices.List(%v, %v) called", ctx, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "BackendServices") rk := &RateLimitKey{ ProjectID: projectID, Operation: "List", - Version: meta.Version("alpha"), + Version: meta.Version("beta"), Service: "BackendServices", } if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - klog.V(5).Infof("GCEAlphaBackendServices.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) - call := g.s.Alpha.BackendServices.List(projectID) + klog.V(5).Infof("GCEBetaBackendServices.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + call := g.s.Beta.BackendServices.List(projectID) if fl != filter.None { call.Filter(fl.String()) } - var all []*alpha.BackendService - f := func(l *alpha.BackendServiceList) error { - klog.V(5).Infof("GCEAlphaBackendServices.List(%v, ..., %v): page %+v", ctx, fl, l) + var all []*beta.BackendService + f := func(l *beta.BackendServiceList) error { + klog.V(5).Infof("GCEBetaBackendServices.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - klog.V(4).Infof("GCEAlphaBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEBetaBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } if klog.V(4) { - klog.V(4).Infof("GCEAlphaBackendServices.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + klog.V(4).Infof("GCEBetaBackendServices.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - klog.V(5).Infof("GCEAlphaBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEBetaBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil } // Insert BackendService with key of value obj. -func (g *GCEAlphaBackendServices) Insert(ctx context.Context, key *meta.Key, obj *alpha.BackendService) error { - klog.V(5).Infof("GCEAlphaBackendServices.Insert(%v, %v, %+v): called", ctx, key, obj) +func (g *GCEBetaBackendServices) Insert(ctx context.Context, key *meta.Key, obj *beta.BackendService) error { + klog.V(5).Infof("GCEBetaBackendServices.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - klog.V(2).Infof("GCEAlphaBackendServices.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaBackendServices.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "BackendServices") + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "BackendServices") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Insert", - Version: meta.Version("alpha"), + Version: meta.Version("beta"), Service: "BackendServices", } - klog.V(5).Infof("GCEAlphaBackendServices.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaBackendServices.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEAlphaBackendServices.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaBackendServices.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name - call := g.s.Alpha.BackendServices.Insert(projectID, obj) + call := g.s.Beta.BackendServices.Insert(projectID, obj) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEAlphaBackendServices.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaBackendServices.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEAlphaBackendServices.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEBetaBackendServices.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the BackendService referenced by key. -func (g *GCEAlphaBackendServices) Delete(ctx context.Context, key *meta.Key) error { - klog.V(5).Infof("GCEAlphaBackendServices.Delete(%v, %v): called", ctx, key) +func (g *GCEBetaBackendServices) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCEBetaBackendServices.Delete(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEAlphaBackendServices.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaBackendServices.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "BackendServices") + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "BackendServices") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Delete", - Version: meta.Version("alpha"), + Version: meta.Version("beta"), Service: "BackendServices", } - klog.V(5).Infof("GCEAlphaBackendServices.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaBackendServices.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEAlphaBackendServices.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaBackendServices.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.Alpha.BackendServices.Delete(projectID, key.Name) + call := g.s.Beta.BackendServices.Delete(projectID, key.Name) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEAlphaBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBetaBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEAlphaBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBetaBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } -// SetSecurityPolicy is a method on GCEAlphaBackendServices. -func (g *GCEAlphaBackendServices) SetSecurityPolicy(ctx context.Context, key *meta.Key, arg0 *alpha.SecurityPolicyReference) error { - klog.V(5).Infof("GCEAlphaBackendServices.SetSecurityPolicy(%v, %v, ...): called", ctx, key) +// SetSecurityPolicy is a method on GCEBetaBackendServices. +func (g *GCEBetaBackendServices) SetSecurityPolicy(ctx context.Context, key *meta.Key, arg0 *beta.SecurityPolicyReference) error { + klog.V(5).Infof("GCEBetaBackendServices.SetSecurityPolicy(%v, %v, ...): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEAlphaBackendServices.SetSecurityPolicy(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaBackendServices.SetSecurityPolicy(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "BackendServices") + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "BackendServices") rk := &RateLimitKey{ ProjectID: projectID, Operation: "SetSecurityPolicy", - Version: meta.Version("alpha"), + Version: meta.Version("beta"), Service: "BackendServices", } - klog.V(5).Infof("GCEAlphaBackendServices.SetSecurityPolicy(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaBackendServices.SetSecurityPolicy(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEAlphaBackendServices.SetSecurityPolicy(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaBackendServices.SetSecurityPolicy(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.Alpha.BackendServices.SetSecurityPolicy(projectID, key.Name, arg0) + call := g.s.Beta.BackendServices.SetSecurityPolicy(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEAlphaBackendServices.SetSecurityPolicy(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaBackendServices.SetSecurityPolicy(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEAlphaBackendServices.SetSecurityPolicy(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaBackendServices.SetSecurityPolicy(%v, %v, ...) = %+v", ctx, key, err) return err } -// Update is a method on GCEAlphaBackendServices. -func (g *GCEAlphaBackendServices) Update(ctx context.Context, key *meta.Key, arg0 *alpha.BackendService) error { - klog.V(5).Infof("GCEAlphaBackendServices.Update(%v, %v, ...): called", ctx, key) +// Update is a method on GCEBetaBackendServices. +func (g *GCEBetaBackendServices) Update(ctx context.Context, key *meta.Key, arg0 *beta.BackendService) error { + klog.V(5).Infof("GCEBetaBackendServices.Update(%v, %v, ...): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEAlphaBackendServices.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaBackendServices.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "BackendServices") + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "BackendServices") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Update", - Version: meta.Version("alpha"), + Version: meta.Version("beta"), Service: "BackendServices", } - klog.V(5).Infof("GCEAlphaBackendServices.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaBackendServices.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEAlphaBackendServices.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaBackendServices.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.Alpha.BackendServices.Update(projectID, key.Name, arg0) + call := g.s.Beta.BackendServices.Update(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEAlphaBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEAlphaBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) return err } -// RegionBackendServices is an interface that allows for mocking of RegionBackendServices. -type RegionBackendServices interface { - Get(ctx context.Context, key *meta.Key) (*ga.BackendService, error) - List(ctx context.Context, region string, fl *filter.F) ([]*ga.BackendService, error) - Insert(ctx context.Context, key *meta.Key, obj *ga.BackendService) error +// AlphaBackendServices is an interface that allows for mocking of BackendServices. +type AlphaBackendServices interface { + Get(ctx context.Context, key *meta.Key) (*alpha.BackendService, error) + List(ctx context.Context, fl *filter.F) ([]*alpha.BackendService, error) + Insert(ctx context.Context, key *meta.Key, obj *alpha.BackendService) error Delete(ctx context.Context, key *meta.Key) error - GetHealth(context.Context, *meta.Key, *ga.ResourceGroupReference) (*ga.BackendServiceGroupHealth, error) - Update(context.Context, *meta.Key, *ga.BackendService) error + SetSecurityPolicy(context.Context, *meta.Key, *alpha.SecurityPolicyReference) error + Update(context.Context, *meta.Key, *alpha.BackendService) error } -// NewMockRegionBackendServices returns a new mock for RegionBackendServices. -func NewMockRegionBackendServices(pr ProjectRouter, objs map[meta.Key]*MockRegionBackendServicesObj) *MockRegionBackendServices { - mock := &MockRegionBackendServices{ +// NewMockAlphaBackendServices returns a new mock for BackendServices. +func NewMockAlphaBackendServices(pr ProjectRouter, objs map[meta.Key]*MockBackendServicesObj) *MockAlphaBackendServices { + mock := &MockAlphaBackendServices{ ProjectRouter: pr, Objects: objs, @@ -3926,14 +4994,14 @@ func NewMockRegionBackendServices(pr ProjectRouter, objs map[meta.Key]*MockRegio return mock } -// MockRegionBackendServices is the mock for RegionBackendServices. -type MockRegionBackendServices struct { +// MockAlphaBackendServices is the mock for BackendServices. +type MockAlphaBackendServices struct { Lock sync.Mutex ProjectRouter ProjectRouter // Objects maintained by the mock. - Objects map[meta.Key]*MockRegionBackendServicesObj + Objects map[meta.Key]*MockBackendServicesObj // If an entry exists for the given key and operation, then the error // will be returned instead of the operation. @@ -3946,12 +5014,12 @@ type MockRegionBackendServices struct { // order to add your own logic. Return (true, _, _) to prevent the normal // execution flow of the mock. Return (false, nil, nil) to continue with // normal mock behavior/ after the hook function executes. - GetHook func(ctx context.Context, key *meta.Key, m *MockRegionBackendServices) (bool, *ga.BackendService, error) - ListHook func(ctx context.Context, region string, fl *filter.F, m *MockRegionBackendServices) (bool, []*ga.BackendService, error) - InsertHook func(ctx context.Context, key *meta.Key, obj *ga.BackendService, m *MockRegionBackendServices) (bool, error) - DeleteHook func(ctx context.Context, key *meta.Key, m *MockRegionBackendServices) (bool, error) - GetHealthHook func(context.Context, *meta.Key, *ga.ResourceGroupReference, *MockRegionBackendServices) (*ga.BackendServiceGroupHealth, error) - UpdateHook func(context.Context, *meta.Key, *ga.BackendService, *MockRegionBackendServices) error + GetHook func(ctx context.Context, key *meta.Key, m *MockAlphaBackendServices) (bool, *alpha.BackendService, error) + ListHook func(ctx context.Context, fl *filter.F, m *MockAlphaBackendServices) (bool, []*alpha.BackendService, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *alpha.BackendService, m *MockAlphaBackendServices) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockAlphaBackendServices) (bool, error) + SetSecurityPolicyHook func(context.Context, *meta.Key, *alpha.SecurityPolicyReference, *MockAlphaBackendServices) error + UpdateHook func(context.Context, *meta.Key, *alpha.BackendService, *MockAlphaBackendServices) error // X is extra state that can be used as part of the mock. Generated code // will not use this field. @@ -3959,10 +5027,10 @@ type MockRegionBackendServices struct { } // Get returns the object from the mock. -func (m *MockRegionBackendServices) Get(ctx context.Context, key *meta.Key) (*ga.BackendService, error) { +func (m *MockAlphaBackendServices) Get(ctx context.Context, key *meta.Key) (*alpha.BackendService, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - klog.V(5).Infof("MockRegionBackendServices.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaBackendServices.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -3974,28 +5042,28 @@ func (m *MockRegionBackendServices) Get(ctx context.Context, key *meta.Key) (*ga defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - klog.V(5).Infof("MockRegionBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { - typedObj := obj.ToGA() - klog.V(5).Infof("MockRegionBackendServices.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + typedObj := obj.ToAlpha() + klog.V(5).Infof("MockAlphaBackendServices.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockRegionBackendServices %v not found", key), + Message: fmt.Sprintf("MockAlphaBackendServices %v not found", key), } - klog.V(5).Infof("MockRegionBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } -// List all of the objects in the mock in the given region. -func (m *MockRegionBackendServices) List(ctx context.Context, region string, fl *filter.F) ([]*ga.BackendService, error) { +// List all of the objects in the mock. +func (m *MockAlphaBackendServices) List(ctx context.Context, fl *filter.F) ([]*alpha.BackendService, error) { if m.ListHook != nil { - if intercept, objs, err := m.ListHook(ctx, region, fl, m); intercept { - klog.V(5).Infof("MockRegionBackendServices.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { + klog.V(5).Infof("MockAlphaBackendServices.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -4005,31 +5073,28 @@ func (m *MockRegionBackendServices) List(ctx context.Context, region string, fl if m.ListError != nil { err := *m.ListError - klog.V(5).Infof("MockRegionBackendServices.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + klog.V(5).Infof("MockAlphaBackendServices.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } - var objs []*ga.BackendService - for key, obj := range m.Objects { - if key.Region != region { - continue - } - if !fl.Match(obj.ToGA()) { + var objs []*alpha.BackendService + for _, obj := range m.Objects { + if !fl.Match(obj.ToAlpha()) { continue } - objs = append(objs, obj.ToGA()) + objs = append(objs, obj.ToAlpha()) } - klog.V(5).Infof("MockRegionBackendServices.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + klog.V(5).Infof("MockAlphaBackendServices.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } // Insert is a mock for inserting/creating a new object. -func (m *MockRegionBackendServices) Insert(ctx context.Context, key *meta.Key, obj *ga.BackendService) error { +func (m *MockAlphaBackendServices) Insert(ctx context.Context, key *meta.Key, obj *alpha.BackendService) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - klog.V(5).Infof("MockRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -4041,32 +5106,32 @@ func (m *MockRegionBackendServices) Insert(ctx context.Context, key *meta.Key, o defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - klog.V(5).Infof("MockRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { err := &googleapi.Error{ Code: http.StatusConflict, - Message: fmt.Sprintf("MockRegionBackendServices %v exists", key), + Message: fmt.Sprintf("MockAlphaBackendServices %v exists", key), } - klog.V(5).Infof("MockRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } obj.Name = key.Name - projectID := m.ProjectRouter.ProjectID(ctx, "ga", "backendServices") - obj.SelfLink = SelfLink(meta.VersionGA, projectID, "backendServices", key) + projectID := m.ProjectRouter.ProjectID(ctx, "alpha", "backendServices") + obj.SelfLink = SelfLink(meta.VersionAlpha, projectID, "backendServices", key) - m.Objects[*key] = &MockRegionBackendServicesObj{obj} - klog.V(5).Infof("MockRegionBackendServices.Insert(%v, %v, %+v) = nil", ctx, key, obj) + m.Objects[*key] = &MockBackendServicesObj{obj} + klog.V(5).Infof("MockAlphaBackendServices.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } // Delete is a mock for deleting the object. -func (m *MockRegionBackendServices) Delete(ctx context.Context, key *meta.Key) error { +func (m *MockAlphaBackendServices) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - klog.V(5).Infof("MockRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -4078,259 +5143,265 @@ func (m *MockRegionBackendServices) Delete(ctx context.Context, key *meta.Key) e defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - klog.V(5).Infof("MockRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockRegionBackendServices %v not found", key), + Message: fmt.Sprintf("MockAlphaBackendServices %v not found", key), } - klog.V(5).Infof("MockRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - klog.V(5).Infof("MockRegionBackendServices.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockAlphaBackendServices.Delete(%v, %v) = nil", ctx, key) return nil } // Obj wraps the object for use in the mock. -func (m *MockRegionBackendServices) Obj(o *ga.BackendService) *MockRegionBackendServicesObj { - return &MockRegionBackendServicesObj{o} +func (m *MockAlphaBackendServices) Obj(o *alpha.BackendService) *MockBackendServicesObj { + return &MockBackendServicesObj{o} } -// GetHealth is a mock for the corresponding method. -func (m *MockRegionBackendServices) GetHealth(ctx context.Context, key *meta.Key, arg0 *ga.ResourceGroupReference) (*ga.BackendServiceGroupHealth, error) { - if m.GetHealthHook != nil { - return m.GetHealthHook(ctx, key, arg0, m) +// SetSecurityPolicy is a mock for the corresponding method. +func (m *MockAlphaBackendServices) SetSecurityPolicy(ctx context.Context, key *meta.Key, arg0 *alpha.SecurityPolicyReference) error { + if m.SetSecurityPolicyHook != nil { + return m.SetSecurityPolicyHook(ctx, key, arg0, m) } - return nil, fmt.Errorf("GetHealthHook must be set") + return nil } // Update is a mock for the corresponding method. -func (m *MockRegionBackendServices) Update(ctx context.Context, key *meta.Key, arg0 *ga.BackendService) error { +func (m *MockAlphaBackendServices) Update(ctx context.Context, key *meta.Key, arg0 *alpha.BackendService) error { if m.UpdateHook != nil { return m.UpdateHook(ctx, key, arg0, m) } return nil } -// GCERegionBackendServices is a simplifying adapter for the GCE RegionBackendServices. -type GCERegionBackendServices struct { +// GCEAlphaBackendServices is a simplifying adapter for the GCE BackendServices. +type GCEAlphaBackendServices struct { s *Service } // Get the BackendService named by key. -func (g *GCERegionBackendServices) Get(ctx context.Context, key *meta.Key) (*ga.BackendService, error) { - klog.V(5).Infof("GCERegionBackendServices.Get(%v, %v): called", ctx, key) +func (g *GCEAlphaBackendServices) Get(ctx context.Context, key *meta.Key) (*alpha.BackendService, error) { + klog.V(5).Infof("GCEAlphaBackendServices.Get(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCERegionBackendServices.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaBackendServices.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionBackendServices") + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "BackendServices") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Get", - Version: meta.Version("ga"), - Service: "RegionBackendServices", + Version: meta.Version("alpha"), + Service: "BackendServices", } - klog.V(5).Infof("GCERegionBackendServices.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaBackendServices.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCERegionBackendServices.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaBackendServices.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } - call := g.s.GA.RegionBackendServices.Get(projectID, key.Region, key.Name) + call := g.s.Alpha.BackendServices.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - klog.V(4).Infof("GCERegionBackendServices.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEAlphaBackendServices.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all BackendService objects. -func (g *GCERegionBackendServices) List(ctx context.Context, region string, fl *filter.F) ([]*ga.BackendService, error) { - klog.V(5).Infof("GCERegionBackendServices.List(%v, %v, %v) called", ctx, region, fl) - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionBackendServices") +func (g *GCEAlphaBackendServices) List(ctx context.Context, fl *filter.F) ([]*alpha.BackendService, error) { + klog.V(5).Infof("GCEAlphaBackendServices.List(%v, %v) called", ctx, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "BackendServices") rk := &RateLimitKey{ ProjectID: projectID, Operation: "List", - Version: meta.Version("ga"), - Service: "RegionBackendServices", + Version: meta.Version("alpha"), + Service: "BackendServices", } if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - klog.V(5).Infof("GCERegionBackendServices.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) - call := g.s.GA.RegionBackendServices.List(projectID, region) + klog.V(5).Infof("GCEAlphaBackendServices.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + call := g.s.Alpha.BackendServices.List(projectID) if fl != filter.None { call.Filter(fl.String()) } - var all []*ga.BackendService - f := func(l *ga.BackendServiceList) error { - klog.V(5).Infof("GCERegionBackendServices.List(%v, ..., %v): page %+v", ctx, fl, l) + var all []*alpha.BackendService + f := func(l *alpha.BackendServiceList) error { + klog.V(5).Infof("GCEAlphaBackendServices.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - klog.V(4).Infof("GCERegionBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEAlphaBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } if klog.V(4) { - klog.V(4).Infof("GCERegionBackendServices.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + klog.V(4).Infof("GCEAlphaBackendServices.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - klog.V(5).Infof("GCERegionBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEAlphaBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil } // Insert BackendService with key of value obj. -func (g *GCERegionBackendServices) Insert(ctx context.Context, key *meta.Key, obj *ga.BackendService) error { - klog.V(5).Infof("GCERegionBackendServices.Insert(%v, %v, %+v): called", ctx, key, obj) +func (g *GCEAlphaBackendServices) Insert(ctx context.Context, key *meta.Key, obj *alpha.BackendService) error { + klog.V(5).Infof("GCEAlphaBackendServices.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - klog.V(2).Infof("GCERegionBackendServices.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaBackendServices.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionBackendServices") + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "BackendServices") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Insert", - Version: meta.Version("ga"), - Service: "RegionBackendServices", + Version: meta.Version("alpha"), + Service: "BackendServices", } - klog.V(5).Infof("GCERegionBackendServices.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaBackendServices.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCERegionBackendServices.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaBackendServices.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name - call := g.s.GA.RegionBackendServices.Insert(projectID, key.Region, obj) + call := g.s.Alpha.BackendServices.Insert(projectID, obj) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCERegionBackendServices.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaBackendServices.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCERegionBackendServices.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEAlphaBackendServices.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the BackendService referenced by key. -func (g *GCERegionBackendServices) Delete(ctx context.Context, key *meta.Key) error { - klog.V(5).Infof("GCERegionBackendServices.Delete(%v, %v): called", ctx, key) +func (g *GCEAlphaBackendServices) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCEAlphaBackendServices.Delete(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCERegionBackendServices.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaBackendServices.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionBackendServices") + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "BackendServices") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Delete", - Version: meta.Version("ga"), - Service: "RegionBackendServices", + Version: meta.Version("alpha"), + Service: "BackendServices", } - klog.V(5).Infof("GCERegionBackendServices.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaBackendServices.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCERegionBackendServices.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaBackendServices.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.GA.RegionBackendServices.Delete(projectID, key.Region, key.Name) + call := g.s.Alpha.BackendServices.Delete(projectID, key.Name) + call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCERegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCERegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } -// GetHealth is a method on GCERegionBackendServices. -func (g *GCERegionBackendServices) GetHealth(ctx context.Context, key *meta.Key, arg0 *ga.ResourceGroupReference) (*ga.BackendServiceGroupHealth, error) { - klog.V(5).Infof("GCERegionBackendServices.GetHealth(%v, %v, ...): called", ctx, key) +// SetSecurityPolicy is a method on GCEAlphaBackendServices. +func (g *GCEAlphaBackendServices) SetSecurityPolicy(ctx context.Context, key *meta.Key, arg0 *alpha.SecurityPolicyReference) error { + klog.V(5).Infof("GCEAlphaBackendServices.SetSecurityPolicy(%v, %v, ...): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCERegionBackendServices.GetHealth(%v, %v, ...): key is invalid (%#v)", ctx, key, key) - return nil, fmt.Errorf("invalid GCE key (%+v)", key) + klog.V(2).Infof("GCEAlphaBackendServices.SetSecurityPolicy(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionBackendServices") + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "BackendServices") rk := &RateLimitKey{ ProjectID: projectID, - Operation: "GetHealth", - Version: meta.Version("ga"), - Service: "RegionBackendServices", + Operation: "SetSecurityPolicy", + Version: meta.Version("alpha"), + Service: "BackendServices", } - klog.V(5).Infof("GCERegionBackendServices.GetHealth(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaBackendServices.SetSecurityPolicy(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCERegionBackendServices.GetHealth(%v, %v, ...): RateLimiter error: %v", ctx, key, err) - return nil, err + klog.V(4).Infof("GCEAlphaBackendServices.SetSecurityPolicy(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err } - call := g.s.GA.RegionBackendServices.GetHealth(projectID, key.Region, key.Name, arg0) + call := g.s.Alpha.BackendServices.SetSecurityPolicy(projectID, key.Name, arg0) call.Context(ctx) - v, err := call.Do() - klog.V(4).Infof("GCERegionBackendServices.GetHealth(%v, %v, ...) = %+v, %v", ctx, key, v, err) - return v, err + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEAlphaBackendServices.SetSecurityPolicy(%v, %v, ...) = %+v", ctx, key, err) + return err + } + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEAlphaBackendServices.SetSecurityPolicy(%v, %v, ...) = %+v", ctx, key, err) + return err } -// Update is a method on GCERegionBackendServices. -func (g *GCERegionBackendServices) Update(ctx context.Context, key *meta.Key, arg0 *ga.BackendService) error { - klog.V(5).Infof("GCERegionBackendServices.Update(%v, %v, ...): called", ctx, key) +// Update is a method on GCEAlphaBackendServices. +func (g *GCEAlphaBackendServices) Update(ctx context.Context, key *meta.Key, arg0 *alpha.BackendService) error { + klog.V(5).Infof("GCEAlphaBackendServices.Update(%v, %v, ...): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCERegionBackendServices.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaBackendServices.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionBackendServices") + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "BackendServices") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Update", - Version: meta.Version("ga"), - Service: "RegionBackendServices", + Version: meta.Version("alpha"), + Service: "BackendServices", } - klog.V(5).Infof("GCERegionBackendServices.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaBackendServices.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCERegionBackendServices.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaBackendServices.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.GA.RegionBackendServices.Update(projectID, key.Region, key.Name, arg0) + call := g.s.Alpha.BackendServices.Update(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCERegionBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCERegionBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) return err } -// AlphaRegionBackendServices is an interface that allows for mocking of RegionBackendServices. -type AlphaRegionBackendServices interface { - Get(ctx context.Context, key *meta.Key) (*alpha.BackendService, error) - List(ctx context.Context, region string, fl *filter.F) ([]*alpha.BackendService, error) - Insert(ctx context.Context, key *meta.Key, obj *alpha.BackendService) error +// RegionBackendServices is an interface that allows for mocking of RegionBackendServices. +type RegionBackendServices interface { + Get(ctx context.Context, key *meta.Key) (*ga.BackendService, error) + List(ctx context.Context, region string, fl *filter.F) ([]*ga.BackendService, error) + Insert(ctx context.Context, key *meta.Key, obj *ga.BackendService) error Delete(ctx context.Context, key *meta.Key) error - GetHealth(context.Context, *meta.Key, *alpha.ResourceGroupReference) (*alpha.BackendServiceGroupHealth, error) - Update(context.Context, *meta.Key, *alpha.BackendService) error + GetHealth(context.Context, *meta.Key, *ga.ResourceGroupReference) (*ga.BackendServiceGroupHealth, error) + Update(context.Context, *meta.Key, *ga.BackendService) error } -// NewMockAlphaRegionBackendServices returns a new mock for RegionBackendServices. -func NewMockAlphaRegionBackendServices(pr ProjectRouter, objs map[meta.Key]*MockRegionBackendServicesObj) *MockAlphaRegionBackendServices { - mock := &MockAlphaRegionBackendServices{ +// NewMockRegionBackendServices returns a new mock for RegionBackendServices. +func NewMockRegionBackendServices(pr ProjectRouter, objs map[meta.Key]*MockRegionBackendServicesObj) *MockRegionBackendServices { + mock := &MockRegionBackendServices{ ProjectRouter: pr, Objects: objs, @@ -4341,8 +5412,8 @@ func NewMockAlphaRegionBackendServices(pr ProjectRouter, objs map[meta.Key]*Mock return mock } -// MockAlphaRegionBackendServices is the mock for RegionBackendServices. -type MockAlphaRegionBackendServices struct { +// MockRegionBackendServices is the mock for RegionBackendServices. +type MockRegionBackendServices struct { Lock sync.Mutex ProjectRouter ProjectRouter @@ -4361,12 +5432,12 @@ type MockAlphaRegionBackendServices struct { // order to add your own logic. Return (true, _, _) to prevent the normal // execution flow of the mock. Return (false, nil, nil) to continue with // normal mock behavior/ after the hook function executes. - GetHook func(ctx context.Context, key *meta.Key, m *MockAlphaRegionBackendServices) (bool, *alpha.BackendService, error) - ListHook func(ctx context.Context, region string, fl *filter.F, m *MockAlphaRegionBackendServices) (bool, []*alpha.BackendService, error) - InsertHook func(ctx context.Context, key *meta.Key, obj *alpha.BackendService, m *MockAlphaRegionBackendServices) (bool, error) - DeleteHook func(ctx context.Context, key *meta.Key, m *MockAlphaRegionBackendServices) (bool, error) - GetHealthHook func(context.Context, *meta.Key, *alpha.ResourceGroupReference, *MockAlphaRegionBackendServices) (*alpha.BackendServiceGroupHealth, error) - UpdateHook func(context.Context, *meta.Key, *alpha.BackendService, *MockAlphaRegionBackendServices) error + GetHook func(ctx context.Context, key *meta.Key, m *MockRegionBackendServices) (bool, *ga.BackendService, error) + ListHook func(ctx context.Context, region string, fl *filter.F, m *MockRegionBackendServices) (bool, []*ga.BackendService, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *ga.BackendService, m *MockRegionBackendServices) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockRegionBackendServices) (bool, error) + GetHealthHook func(context.Context, *meta.Key, *ga.ResourceGroupReference, *MockRegionBackendServices) (*ga.BackendServiceGroupHealth, error) + UpdateHook func(context.Context, *meta.Key, *ga.BackendService, *MockRegionBackendServices) error // X is extra state that can be used as part of the mock. Generated code // will not use this field. @@ -4374,10 +5445,10 @@ type MockAlphaRegionBackendServices struct { } // Get returns the object from the mock. -func (m *MockAlphaRegionBackendServices) Get(ctx context.Context, key *meta.Key) (*alpha.BackendService, error) { +func (m *MockRegionBackendServices) Get(ctx context.Context, key *meta.Key) (*ga.BackendService, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - klog.V(5).Infof("MockAlphaRegionBackendServices.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockRegionBackendServices.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -4389,28 +5460,28 @@ func (m *MockAlphaRegionBackendServices) Get(ctx context.Context, key *meta.Key) defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - klog.V(5).Infof("MockAlphaRegionBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockRegionBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { - typedObj := obj.ToAlpha() - klog.V(5).Infof("MockAlphaRegionBackendServices.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + typedObj := obj.ToGA() + klog.V(5).Infof("MockRegionBackendServices.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockAlphaRegionBackendServices %v not found", key), + Message: fmt.Sprintf("MockRegionBackendServices %v not found", key), } - klog.V(5).Infof("MockAlphaRegionBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockRegionBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } // List all of the objects in the mock in the given region. -func (m *MockAlphaRegionBackendServices) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.BackendService, error) { +func (m *MockRegionBackendServices) List(ctx context.Context, region string, fl *filter.F) ([]*ga.BackendService, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, region, fl, m); intercept { - klog.V(5).Infof("MockAlphaRegionBackendServices.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + klog.V(5).Infof("MockRegionBackendServices.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) return objs, err } } @@ -4420,31 +5491,31 @@ func (m *MockAlphaRegionBackendServices) List(ctx context.Context, region string if m.ListError != nil { err := *m.ListError - klog.V(5).Infof("MockAlphaRegionBackendServices.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + klog.V(5).Infof("MockRegionBackendServices.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) return nil, *m.ListError } - var objs []*alpha.BackendService + var objs []*ga.BackendService for key, obj := range m.Objects { if key.Region != region { continue } - if !fl.Match(obj.ToAlpha()) { + if !fl.Match(obj.ToGA()) { continue } - objs = append(objs, obj.ToAlpha()) + objs = append(objs, obj.ToGA()) } - klog.V(5).Infof("MockAlphaRegionBackendServices.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + klog.V(5).Infof("MockRegionBackendServices.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) return objs, nil } // Insert is a mock for inserting/creating a new object. -func (m *MockAlphaRegionBackendServices) Insert(ctx context.Context, key *meta.Key, obj *alpha.BackendService) error { +func (m *MockRegionBackendServices) Insert(ctx context.Context, key *meta.Key, obj *ga.BackendService) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - klog.V(5).Infof("MockAlphaRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -4456,32 +5527,32 @@ func (m *MockAlphaRegionBackendServices) Insert(ctx context.Context, key *meta.K defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - klog.V(5).Infof("MockAlphaRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { err := &googleapi.Error{ Code: http.StatusConflict, - Message: fmt.Sprintf("MockAlphaRegionBackendServices %v exists", key), + Message: fmt.Sprintf("MockRegionBackendServices %v exists", key), } - klog.V(5).Infof("MockAlphaRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } obj.Name = key.Name - projectID := m.ProjectRouter.ProjectID(ctx, "alpha", "backendServices") - obj.SelfLink = SelfLink(meta.VersionAlpha, projectID, "backendServices", key) + projectID := m.ProjectRouter.ProjectID(ctx, "ga", "backendServices") + obj.SelfLink = SelfLink(meta.VersionGA, projectID, "backendServices", key) m.Objects[*key] = &MockRegionBackendServicesObj{obj} - klog.V(5).Infof("MockAlphaRegionBackendServices.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockRegionBackendServices.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } // Delete is a mock for deleting the object. -func (m *MockAlphaRegionBackendServices) Delete(ctx context.Context, key *meta.Key) error { +func (m *MockRegionBackendServices) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - klog.V(5).Infof("MockAlphaRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -4493,30 +5564,30 @@ func (m *MockAlphaRegionBackendServices) Delete(ctx context.Context, key *meta.K defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - klog.V(5).Infof("MockAlphaRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockAlphaRegionBackendServices %v not found", key), + Message: fmt.Sprintf("MockRegionBackendServices %v not found", key), } - klog.V(5).Infof("MockAlphaRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - klog.V(5).Infof("MockAlphaRegionBackendServices.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockRegionBackendServices.Delete(%v, %v) = nil", ctx, key) return nil } // Obj wraps the object for use in the mock. -func (m *MockAlphaRegionBackendServices) Obj(o *alpha.BackendService) *MockRegionBackendServicesObj { +func (m *MockRegionBackendServices) Obj(o *ga.BackendService) *MockRegionBackendServicesObj { return &MockRegionBackendServicesObj{o} } // GetHealth is a mock for the corresponding method. -func (m *MockAlphaRegionBackendServices) GetHealth(ctx context.Context, key *meta.Key, arg0 *alpha.ResourceGroupReference) (*alpha.BackendServiceGroupHealth, error) { +func (m *MockRegionBackendServices) GetHealth(ctx context.Context, key *meta.Key, arg0 *ga.ResourceGroupReference) (*ga.BackendServiceGroupHealth, error) { if m.GetHealthHook != nil { return m.GetHealthHook(ctx, key, arg0, m) } @@ -4524,227 +5595,228 @@ func (m *MockAlphaRegionBackendServices) GetHealth(ctx context.Context, key *met } // Update is a mock for the corresponding method. -func (m *MockAlphaRegionBackendServices) Update(ctx context.Context, key *meta.Key, arg0 *alpha.BackendService) error { +func (m *MockRegionBackendServices) Update(ctx context.Context, key *meta.Key, arg0 *ga.BackendService) error { if m.UpdateHook != nil { return m.UpdateHook(ctx, key, arg0, m) } return nil } -// GCEAlphaRegionBackendServices is a simplifying adapter for the GCE RegionBackendServices. -type GCEAlphaRegionBackendServices struct { +// GCERegionBackendServices is a simplifying adapter for the GCE RegionBackendServices. +type GCERegionBackendServices struct { s *Service } // Get the BackendService named by key. -func (g *GCEAlphaRegionBackendServices) Get(ctx context.Context, key *meta.Key) (*alpha.BackendService, error) { - klog.V(5).Infof("GCEAlphaRegionBackendServices.Get(%v, %v): called", ctx, key) +func (g *GCERegionBackendServices) Get(ctx context.Context, key *meta.Key) (*ga.BackendService, error) { + klog.V(5).Infof("GCERegionBackendServices.Get(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEAlphaRegionBackendServices.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCERegionBackendServices.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionBackendServices") + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionBackendServices") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Get", - Version: meta.Version("alpha"), + Version: meta.Version("ga"), Service: "RegionBackendServices", } - klog.V(5).Infof("GCEAlphaRegionBackendServices.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCERegionBackendServices.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEAlphaRegionBackendServices.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCERegionBackendServices.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } - call := g.s.Alpha.RegionBackendServices.Get(projectID, key.Region, key.Name) + call := g.s.GA.RegionBackendServices.Get(projectID, key.Region, key.Name) call.Context(ctx) v, err := call.Do() - klog.V(4).Infof("GCEAlphaRegionBackendServices.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCERegionBackendServices.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all BackendService objects. -func (g *GCEAlphaRegionBackendServices) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.BackendService, error) { - klog.V(5).Infof("GCEAlphaRegionBackendServices.List(%v, %v, %v) called", ctx, region, fl) - projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionBackendServices") +func (g *GCERegionBackendServices) List(ctx context.Context, region string, fl *filter.F) ([]*ga.BackendService, error) { + klog.V(5).Infof("GCERegionBackendServices.List(%v, %v, %v) called", ctx, region, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionBackendServices") rk := &RateLimitKey{ ProjectID: projectID, Operation: "List", - Version: meta.Version("alpha"), + Version: meta.Version("ga"), Service: "RegionBackendServices", } if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - klog.V(5).Infof("GCEAlphaRegionBackendServices.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) - call := g.s.Alpha.RegionBackendServices.List(projectID, region) + klog.V(5).Infof("GCERegionBackendServices.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) + call := g.s.GA.RegionBackendServices.List(projectID, region) if fl != filter.None { call.Filter(fl.String()) } - var all []*alpha.BackendService - f := func(l *alpha.BackendServiceList) error { - klog.V(5).Infof("GCEAlphaRegionBackendServices.List(%v, ..., %v): page %+v", ctx, fl, l) + var all []*ga.BackendService + f := func(l *ga.BackendServiceList) error { + klog.V(5).Infof("GCERegionBackendServices.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - klog.V(4).Infof("GCEAlphaRegionBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCERegionBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } if klog.V(4) { - klog.V(4).Infof("GCEAlphaRegionBackendServices.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + klog.V(4).Infof("GCERegionBackendServices.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - klog.V(5).Infof("GCEAlphaRegionBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCERegionBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil } // Insert BackendService with key of value obj. -func (g *GCEAlphaRegionBackendServices) Insert(ctx context.Context, key *meta.Key, obj *alpha.BackendService) error { - klog.V(5).Infof("GCEAlphaRegionBackendServices.Insert(%v, %v, %+v): called", ctx, key, obj) +func (g *GCERegionBackendServices) Insert(ctx context.Context, key *meta.Key, obj *ga.BackendService) error { + klog.V(5).Infof("GCERegionBackendServices.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - klog.V(2).Infof("GCEAlphaRegionBackendServices.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCERegionBackendServices.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionBackendServices") + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionBackendServices") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Insert", - Version: meta.Version("alpha"), + Version: meta.Version("ga"), Service: "RegionBackendServices", } - klog.V(5).Infof("GCEAlphaRegionBackendServices.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCERegionBackendServices.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEAlphaRegionBackendServices.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCERegionBackendServices.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name - call := g.s.Alpha.RegionBackendServices.Insert(projectID, key.Region, obj) + call := g.s.GA.RegionBackendServices.Insert(projectID, key.Region, obj) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEAlphaRegionBackendServices.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCERegionBackendServices.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEAlphaRegionBackendServices.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCERegionBackendServices.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the BackendService referenced by key. -func (g *GCEAlphaRegionBackendServices) Delete(ctx context.Context, key *meta.Key) error { - klog.V(5).Infof("GCEAlphaRegionBackendServices.Delete(%v, %v): called", ctx, key) +func (g *GCERegionBackendServices) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCERegionBackendServices.Delete(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEAlphaRegionBackendServices.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCERegionBackendServices.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionBackendServices") + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionBackendServices") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Delete", - Version: meta.Version("alpha"), + Version: meta.Version("ga"), Service: "RegionBackendServices", } - klog.V(5).Infof("GCEAlphaRegionBackendServices.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCERegionBackendServices.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEAlphaRegionBackendServices.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCERegionBackendServices.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.Alpha.RegionBackendServices.Delete(projectID, key.Region, key.Name) + call := g.s.GA.RegionBackendServices.Delete(projectID, key.Region, key.Name) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEAlphaRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCERegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEAlphaRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCERegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } -// GetHealth is a method on GCEAlphaRegionBackendServices. -func (g *GCEAlphaRegionBackendServices) GetHealth(ctx context.Context, key *meta.Key, arg0 *alpha.ResourceGroupReference) (*alpha.BackendServiceGroupHealth, error) { - klog.V(5).Infof("GCEAlphaRegionBackendServices.GetHealth(%v, %v, ...): called", ctx, key) +// GetHealth is a method on GCERegionBackendServices. +func (g *GCERegionBackendServices) GetHealth(ctx context.Context, key *meta.Key, arg0 *ga.ResourceGroupReference) (*ga.BackendServiceGroupHealth, error) { + klog.V(5).Infof("GCERegionBackendServices.GetHealth(%v, %v, ...): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEAlphaRegionBackendServices.GetHealth(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCERegionBackendServices.GetHealth(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionBackendServices") + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionBackendServices") rk := &RateLimitKey{ ProjectID: projectID, Operation: "GetHealth", - Version: meta.Version("alpha"), + Version: meta.Version("ga"), Service: "RegionBackendServices", } - klog.V(5).Infof("GCEAlphaRegionBackendServices.GetHealth(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCERegionBackendServices.GetHealth(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEAlphaRegionBackendServices.GetHealth(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCERegionBackendServices.GetHealth(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return nil, err } - call := g.s.Alpha.RegionBackendServices.GetHealth(projectID, key.Region, key.Name, arg0) + call := g.s.GA.RegionBackendServices.GetHealth(projectID, key.Region, key.Name, arg0) call.Context(ctx) v, err := call.Do() - klog.V(4).Infof("GCEAlphaRegionBackendServices.GetHealth(%v, %v, ...) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCERegionBackendServices.GetHealth(%v, %v, ...) = %+v, %v", ctx, key, v, err) return v, err } -// Update is a method on GCEAlphaRegionBackendServices. -func (g *GCEAlphaRegionBackendServices) Update(ctx context.Context, key *meta.Key, arg0 *alpha.BackendService) error { - klog.V(5).Infof("GCEAlphaRegionBackendServices.Update(%v, %v, ...): called", ctx, key) +// Update is a method on GCERegionBackendServices. +func (g *GCERegionBackendServices) Update(ctx context.Context, key *meta.Key, arg0 *ga.BackendService) error { + klog.V(5).Infof("GCERegionBackendServices.Update(%v, %v, ...): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEAlphaRegionBackendServices.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCERegionBackendServices.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionBackendServices") + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionBackendServices") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Update", - Version: meta.Version("alpha"), + Version: meta.Version("ga"), Service: "RegionBackendServices", } - klog.V(5).Infof("GCEAlphaRegionBackendServices.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCERegionBackendServices.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEAlphaRegionBackendServices.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCERegionBackendServices.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.Alpha.RegionBackendServices.Update(projectID, key.Region, key.Name, arg0) + call := g.s.GA.RegionBackendServices.Update(projectID, key.Region, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEAlphaRegionBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCERegionBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEAlphaRegionBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCERegionBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) return err } -// Disks is an interface that allows for mocking of Disks. -type Disks interface { - Get(ctx context.Context, key *meta.Key) (*ga.Disk, error) - List(ctx context.Context, zone string, fl *filter.F) ([]*ga.Disk, error) - Insert(ctx context.Context, key *meta.Key, obj *ga.Disk) error +// AlphaRegionBackendServices is an interface that allows for mocking of RegionBackendServices. +type AlphaRegionBackendServices interface { + Get(ctx context.Context, key *meta.Key) (*alpha.BackendService, error) + List(ctx context.Context, region string, fl *filter.F) ([]*alpha.BackendService, error) + Insert(ctx context.Context, key *meta.Key, obj *alpha.BackendService) error Delete(ctx context.Context, key *meta.Key) error - Resize(context.Context, *meta.Key, *ga.DisksResizeRequest) error + GetHealth(context.Context, *meta.Key, *alpha.ResourceGroupReference) (*alpha.BackendServiceGroupHealth, error) + Update(context.Context, *meta.Key, *alpha.BackendService) error } -// NewMockDisks returns a new mock for Disks. -func NewMockDisks(pr ProjectRouter, objs map[meta.Key]*MockDisksObj) *MockDisks { - mock := &MockDisks{ +// NewMockAlphaRegionBackendServices returns a new mock for RegionBackendServices. +func NewMockAlphaRegionBackendServices(pr ProjectRouter, objs map[meta.Key]*MockRegionBackendServicesObj) *MockAlphaRegionBackendServices { + mock := &MockAlphaRegionBackendServices{ ProjectRouter: pr, Objects: objs, @@ -4755,14 +5827,14 @@ func NewMockDisks(pr ProjectRouter, objs map[meta.Key]*MockDisksObj) *MockDisks return mock } -// MockDisks is the mock for Disks. -type MockDisks struct { +// MockAlphaRegionBackendServices is the mock for RegionBackendServices. +type MockAlphaRegionBackendServices struct { Lock sync.Mutex ProjectRouter ProjectRouter // Objects maintained by the mock. - Objects map[meta.Key]*MockDisksObj + Objects map[meta.Key]*MockRegionBackendServicesObj // If an entry exists for the given key and operation, then the error // will be returned instead of the operation. @@ -4775,11 +5847,12 @@ type MockDisks struct { // order to add your own logic. Return (true, _, _) to prevent the normal // execution flow of the mock. Return (false, nil, nil) to continue with // normal mock behavior/ after the hook function executes. - GetHook func(ctx context.Context, key *meta.Key, m *MockDisks) (bool, *ga.Disk, error) - ListHook func(ctx context.Context, zone string, fl *filter.F, m *MockDisks) (bool, []*ga.Disk, error) - InsertHook func(ctx context.Context, key *meta.Key, obj *ga.Disk, m *MockDisks) (bool, error) - DeleteHook func(ctx context.Context, key *meta.Key, m *MockDisks) (bool, error) - ResizeHook func(context.Context, *meta.Key, *ga.DisksResizeRequest, *MockDisks) error + GetHook func(ctx context.Context, key *meta.Key, m *MockAlphaRegionBackendServices) (bool, *alpha.BackendService, error) + ListHook func(ctx context.Context, region string, fl *filter.F, m *MockAlphaRegionBackendServices) (bool, []*alpha.BackendService, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *alpha.BackendService, m *MockAlphaRegionBackendServices) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockAlphaRegionBackendServices) (bool, error) + GetHealthHook func(context.Context, *meta.Key, *alpha.ResourceGroupReference, *MockAlphaRegionBackendServices) (*alpha.BackendServiceGroupHealth, error) + UpdateHook func(context.Context, *meta.Key, *alpha.BackendService, *MockAlphaRegionBackendServices) error // X is extra state that can be used as part of the mock. Generated code // will not use this field. @@ -4787,10 +5860,10 @@ type MockDisks struct { } // Get returns the object from the mock. -func (m *MockDisks) Get(ctx context.Context, key *meta.Key) (*ga.Disk, error) { +func (m *MockAlphaRegionBackendServices) Get(ctx context.Context, key *meta.Key) (*alpha.BackendService, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - klog.V(5).Infof("MockDisks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaRegionBackendServices.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -4802,28 +5875,28 @@ func (m *MockDisks) Get(ctx context.Context, key *meta.Key) (*ga.Disk, error) { defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - klog.V(5).Infof("MockDisks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaRegionBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { - typedObj := obj.ToGA() - klog.V(5).Infof("MockDisks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + typedObj := obj.ToAlpha() + klog.V(5).Infof("MockAlphaRegionBackendServices.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockDisks %v not found", key), + Message: fmt.Sprintf("MockAlphaRegionBackendServices %v not found", key), } - klog.V(5).Infof("MockDisks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaRegionBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } -// List all of the objects in the mock in the given zone. -func (m *MockDisks) List(ctx context.Context, zone string, fl *filter.F) ([]*ga.Disk, error) { +// List all of the objects in the mock in the given region. +func (m *MockAlphaRegionBackendServices) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.BackendService, error) { if m.ListHook != nil { - if intercept, objs, err := m.ListHook(ctx, zone, fl, m); intercept { - klog.V(5).Infof("MockDisks.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) + if intercept, objs, err := m.ListHook(ctx, region, fl, m); intercept { + klog.V(5).Infof("MockAlphaRegionBackendServices.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) return objs, err } } @@ -4833,31 +5906,31 @@ func (m *MockDisks) List(ctx context.Context, zone string, fl *filter.F) ([]*ga. if m.ListError != nil { err := *m.ListError - klog.V(5).Infof("MockDisks.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) + klog.V(5).Infof("MockAlphaRegionBackendServices.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) return nil, *m.ListError } - var objs []*ga.Disk + var objs []*alpha.BackendService for key, obj := range m.Objects { - if key.Zone != zone { + if key.Region != region { continue } - if !fl.Match(obj.ToGA()) { + if !fl.Match(obj.ToAlpha()) { continue } - objs = append(objs, obj.ToGA()) + objs = append(objs, obj.ToAlpha()) } - klog.V(5).Infof("MockDisks.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) + klog.V(5).Infof("MockAlphaRegionBackendServices.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) return objs, nil } // Insert is a mock for inserting/creating a new object. -func (m *MockDisks) Insert(ctx context.Context, key *meta.Key, obj *ga.Disk) error { +func (m *MockAlphaRegionBackendServices) Insert(ctx context.Context, key *meta.Key, obj *alpha.BackendService) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - klog.V(5).Infof("MockDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -4869,32 +5942,32 @@ func (m *MockDisks) Insert(ctx context.Context, key *meta.Key, obj *ga.Disk) err defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - klog.V(5).Infof("MockDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { err := &googleapi.Error{ Code: http.StatusConflict, - Message: fmt.Sprintf("MockDisks %v exists", key), + Message: fmt.Sprintf("MockAlphaRegionBackendServices %v exists", key), } - klog.V(5).Infof("MockDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } obj.Name = key.Name - projectID := m.ProjectRouter.ProjectID(ctx, "ga", "disks") - obj.SelfLink = SelfLink(meta.VersionGA, projectID, "disks", key) + projectID := m.ProjectRouter.ProjectID(ctx, "alpha", "backendServices") + obj.SelfLink = SelfLink(meta.VersionAlpha, projectID, "backendServices", key) - m.Objects[*key] = &MockDisksObj{obj} - klog.V(5).Infof("MockDisks.Insert(%v, %v, %+v) = nil", ctx, key, obj) + m.Objects[*key] = &MockRegionBackendServicesObj{obj} + klog.V(5).Infof("MockAlphaRegionBackendServices.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } // Delete is a mock for deleting the object. -func (m *MockDisks) Delete(ctx context.Context, key *meta.Key) error { +func (m *MockAlphaRegionBackendServices) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - klog.V(5).Infof("MockDisks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -4906,222 +5979,259 @@ func (m *MockDisks) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - klog.V(5).Infof("MockDisks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockDisks %v not found", key), + Message: fmt.Sprintf("MockAlphaRegionBackendServices %v not found", key), } - klog.V(5).Infof("MockDisks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - klog.V(5).Infof("MockDisks.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockAlphaRegionBackendServices.Delete(%v, %v) = nil", ctx, key) return nil } // Obj wraps the object for use in the mock. -func (m *MockDisks) Obj(o *ga.Disk) *MockDisksObj { - return &MockDisksObj{o} +func (m *MockAlphaRegionBackendServices) Obj(o *alpha.BackendService) *MockRegionBackendServicesObj { + return &MockRegionBackendServicesObj{o} } -// Resize is a mock for the corresponding method. -func (m *MockDisks) Resize(ctx context.Context, key *meta.Key, arg0 *ga.DisksResizeRequest) error { - if m.ResizeHook != nil { - return m.ResizeHook(ctx, key, arg0, m) +// GetHealth is a mock for the corresponding method. +func (m *MockAlphaRegionBackendServices) GetHealth(ctx context.Context, key *meta.Key, arg0 *alpha.ResourceGroupReference) (*alpha.BackendServiceGroupHealth, error) { + if m.GetHealthHook != nil { + return m.GetHealthHook(ctx, key, arg0, m) + } + return nil, fmt.Errorf("GetHealthHook must be set") +} + +// Update is a mock for the corresponding method. +func (m *MockAlphaRegionBackendServices) Update(ctx context.Context, key *meta.Key, arg0 *alpha.BackendService) error { + if m.UpdateHook != nil { + return m.UpdateHook(ctx, key, arg0, m) } return nil } -// GCEDisks is a simplifying adapter for the GCE Disks. -type GCEDisks struct { +// GCEAlphaRegionBackendServices is a simplifying adapter for the GCE RegionBackendServices. +type GCEAlphaRegionBackendServices struct { s *Service } -// Get the Disk named by key. -func (g *GCEDisks) Get(ctx context.Context, key *meta.Key) (*ga.Disk, error) { - klog.V(5).Infof("GCEDisks.Get(%v, %v): called", ctx, key) +// Get the BackendService named by key. +func (g *GCEAlphaRegionBackendServices) Get(ctx context.Context, key *meta.Key) (*alpha.BackendService, error) { + klog.V(5).Infof("GCEAlphaRegionBackendServices.Get(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEDisks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaRegionBackendServices.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Disks") + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionBackendServices") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Get", - Version: meta.Version("ga"), - Service: "Disks", + Version: meta.Version("alpha"), + Service: "RegionBackendServices", } - klog.V(5).Infof("GCEDisks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaRegionBackendServices.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEDisks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionBackendServices.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } - call := g.s.GA.Disks.Get(projectID, key.Zone, key.Name) + call := g.s.Alpha.RegionBackendServices.Get(projectID, key.Region, key.Name) call.Context(ctx) v, err := call.Do() - klog.V(4).Infof("GCEDisks.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEAlphaRegionBackendServices.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } -// List all Disk objects. -func (g *GCEDisks) List(ctx context.Context, zone string, fl *filter.F) ([]*ga.Disk, error) { - klog.V(5).Infof("GCEDisks.List(%v, %v, %v) called", ctx, zone, fl) - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Disks") +// List all BackendService objects. +func (g *GCEAlphaRegionBackendServices) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.BackendService, error) { + klog.V(5).Infof("GCEAlphaRegionBackendServices.List(%v, %v, %v) called", ctx, region, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionBackendServices") rk := &RateLimitKey{ ProjectID: projectID, Operation: "List", - Version: meta.Version("ga"), - Service: "Disks", + Version: meta.Version("alpha"), + Service: "RegionBackendServices", } if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - klog.V(5).Infof("GCEDisks.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) - call := g.s.GA.Disks.List(projectID, zone) + klog.V(5).Infof("GCEAlphaRegionBackendServices.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) + call := g.s.Alpha.RegionBackendServices.List(projectID, region) if fl != filter.None { call.Filter(fl.String()) } - var all []*ga.Disk - f := func(l *ga.DiskList) error { - klog.V(5).Infof("GCEDisks.List(%v, ..., %v): page %+v", ctx, fl, l) + var all []*alpha.BackendService + f := func(l *alpha.BackendServiceList) error { + klog.V(5).Infof("GCEAlphaRegionBackendServices.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - klog.V(4).Infof("GCEDisks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEAlphaRegionBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } if klog.V(4) { - klog.V(4).Infof("GCEDisks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + klog.V(4).Infof("GCEAlphaRegionBackendServices.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - klog.V(5).Infof("GCEDisks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEAlphaRegionBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil } -// Insert Disk with key of value obj. -func (g *GCEDisks) Insert(ctx context.Context, key *meta.Key, obj *ga.Disk) error { - klog.V(5).Infof("GCEDisks.Insert(%v, %v, %+v): called", ctx, key, obj) +// Insert BackendService with key of value obj. +func (g *GCEAlphaRegionBackendServices) Insert(ctx context.Context, key *meta.Key, obj *alpha.BackendService) error { + klog.V(5).Infof("GCEAlphaRegionBackendServices.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - klog.V(2).Infof("GCEDisks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaRegionBackendServices.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Disks") + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionBackendServices") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Insert", - Version: meta.Version("ga"), - Service: "Disks", + Version: meta.Version("alpha"), + Service: "RegionBackendServices", } - klog.V(5).Infof("GCEDisks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaRegionBackendServices.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEDisks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionBackendServices.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name - call := g.s.GA.Disks.Insert(projectID, key.Zone, obj) + call := g.s.Alpha.RegionBackendServices.Insert(projectID, key.Region, obj) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEDisks.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionBackendServices.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEDisks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEAlphaRegionBackendServices.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } -// Delete the Disk referenced by key. -func (g *GCEDisks) Delete(ctx context.Context, key *meta.Key) error { - klog.V(5).Infof("GCEDisks.Delete(%v, %v): called", ctx, key) +// Delete the BackendService referenced by key. +func (g *GCEAlphaRegionBackendServices) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCEAlphaRegionBackendServices.Delete(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEDisks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaRegionBackendServices.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Disks") + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionBackendServices") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Delete", - Version: meta.Version("ga"), - Service: "Disks", + Version: meta.Version("alpha"), + Service: "RegionBackendServices", } - klog.V(5).Infof("GCEDisks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaRegionBackendServices.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEDisks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionBackendServices.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.GA.Disks.Delete(projectID, key.Zone, key.Name) + call := g.s.Alpha.RegionBackendServices.Delete(projectID, key.Region, key.Name) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEDisks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEDisks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } -// Resize is a method on GCEDisks. -func (g *GCEDisks) Resize(ctx context.Context, key *meta.Key, arg0 *ga.DisksResizeRequest) error { - klog.V(5).Infof("GCEDisks.Resize(%v, %v, ...): called", ctx, key) +// GetHealth is a method on GCEAlphaRegionBackendServices. +func (g *GCEAlphaRegionBackendServices) GetHealth(ctx context.Context, key *meta.Key, arg0 *alpha.ResourceGroupReference) (*alpha.BackendServiceGroupHealth, error) { + klog.V(5).Infof("GCEAlphaRegionBackendServices.GetHealth(%v, %v, ...): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEDisks.Resize(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaRegionBackendServices.GetHealth(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return nil, fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionBackendServices") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "GetHealth", + Version: meta.Version("alpha"), + Service: "RegionBackendServices", + } + klog.V(5).Infof("GCEAlphaRegionBackendServices.GetHealth(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEAlphaRegionBackendServices.GetHealth(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return nil, err + } + call := g.s.Alpha.RegionBackendServices.GetHealth(projectID, key.Region, key.Name, arg0) + call.Context(ctx) + v, err := call.Do() + klog.V(4).Infof("GCEAlphaRegionBackendServices.GetHealth(%v, %v, ...) = %+v, %v", ctx, key, v, err) + return v, err +} + +// Update is a method on GCEAlphaRegionBackendServices. +func (g *GCEAlphaRegionBackendServices) Update(ctx context.Context, key *meta.Key, arg0 *alpha.BackendService) error { + klog.V(5).Infof("GCEAlphaRegionBackendServices.Update(%v, %v, ...): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEAlphaRegionBackendServices.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Disks") + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionBackendServices") rk := &RateLimitKey{ ProjectID: projectID, - Operation: "Resize", - Version: meta.Version("ga"), - Service: "Disks", + Operation: "Update", + Version: meta.Version("alpha"), + Service: "RegionBackendServices", } - klog.V(5).Infof("GCEDisks.Resize(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaRegionBackendServices.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEDisks.Resize(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionBackendServices.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.GA.Disks.Resize(projectID, key.Zone, key.Name, arg0) + call := g.s.Alpha.RegionBackendServices.Update(projectID, key.Region, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEDisks.Resize(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEDisks.Resize(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) return err } -// RegionDisks is an interface that allows for mocking of RegionDisks. -type RegionDisks interface { - Get(ctx context.Context, key *meta.Key) (*ga.Disk, error) - List(ctx context.Context, region string, fl *filter.F) ([]*ga.Disk, error) - Insert(ctx context.Context, key *meta.Key, obj *ga.Disk) error +// BetaRegionBackendServices is an interface that allows for mocking of RegionBackendServices. +type BetaRegionBackendServices interface { + Get(ctx context.Context, key *meta.Key) (*beta.BackendService, error) + List(ctx context.Context, region string, fl *filter.F) ([]*beta.BackendService, error) + Insert(ctx context.Context, key *meta.Key, obj *beta.BackendService) error Delete(ctx context.Context, key *meta.Key) error - Resize(context.Context, *meta.Key, *ga.RegionDisksResizeRequest) error + GetHealth(context.Context, *meta.Key, *beta.ResourceGroupReference) (*beta.BackendServiceGroupHealth, error) + Update(context.Context, *meta.Key, *beta.BackendService) error } -// NewMockRegionDisks returns a new mock for RegionDisks. -func NewMockRegionDisks(pr ProjectRouter, objs map[meta.Key]*MockRegionDisksObj) *MockRegionDisks { - mock := &MockRegionDisks{ +// NewMockBetaRegionBackendServices returns a new mock for RegionBackendServices. +func NewMockBetaRegionBackendServices(pr ProjectRouter, objs map[meta.Key]*MockRegionBackendServicesObj) *MockBetaRegionBackendServices { + mock := &MockBetaRegionBackendServices{ ProjectRouter: pr, Objects: objs, @@ -5132,14 +6242,14 @@ func NewMockRegionDisks(pr ProjectRouter, objs map[meta.Key]*MockRegionDisksObj) return mock } -// MockRegionDisks is the mock for RegionDisks. -type MockRegionDisks struct { +// MockBetaRegionBackendServices is the mock for RegionBackendServices. +type MockBetaRegionBackendServices struct { Lock sync.Mutex ProjectRouter ProjectRouter // Objects maintained by the mock. - Objects map[meta.Key]*MockRegionDisksObj + Objects map[meta.Key]*MockRegionBackendServicesObj // If an entry exists for the given key and operation, then the error // will be returned instead of the operation. @@ -5152,11 +6262,12 @@ type MockRegionDisks struct { // order to add your own logic. Return (true, _, _) to prevent the normal // execution flow of the mock. Return (false, nil, nil) to continue with // normal mock behavior/ after the hook function executes. - GetHook func(ctx context.Context, key *meta.Key, m *MockRegionDisks) (bool, *ga.Disk, error) - ListHook func(ctx context.Context, region string, fl *filter.F, m *MockRegionDisks) (bool, []*ga.Disk, error) - InsertHook func(ctx context.Context, key *meta.Key, obj *ga.Disk, m *MockRegionDisks) (bool, error) - DeleteHook func(ctx context.Context, key *meta.Key, m *MockRegionDisks) (bool, error) - ResizeHook func(context.Context, *meta.Key, *ga.RegionDisksResizeRequest, *MockRegionDisks) error + GetHook func(ctx context.Context, key *meta.Key, m *MockBetaRegionBackendServices) (bool, *beta.BackendService, error) + ListHook func(ctx context.Context, region string, fl *filter.F, m *MockBetaRegionBackendServices) (bool, []*beta.BackendService, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *beta.BackendService, m *MockBetaRegionBackendServices) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockBetaRegionBackendServices) (bool, error) + GetHealthHook func(context.Context, *meta.Key, *beta.ResourceGroupReference, *MockBetaRegionBackendServices) (*beta.BackendServiceGroupHealth, error) + UpdateHook func(context.Context, *meta.Key, *beta.BackendService, *MockBetaRegionBackendServices) error // X is extra state that can be used as part of the mock. Generated code // will not use this field. @@ -5164,10 +6275,10 @@ type MockRegionDisks struct { } // Get returns the object from the mock. -func (m *MockRegionDisks) Get(ctx context.Context, key *meta.Key) (*ga.Disk, error) { +func (m *MockBetaRegionBackendServices) Get(ctx context.Context, key *meta.Key) (*beta.BackendService, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - klog.V(5).Infof("MockRegionDisks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaRegionBackendServices.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -5179,28 +6290,28 @@ func (m *MockRegionDisks) Get(ctx context.Context, key *meta.Key) (*ga.Disk, err defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - klog.V(5).Infof("MockRegionDisks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBetaRegionBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { - typedObj := obj.ToGA() - klog.V(5).Infof("MockRegionDisks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + typedObj := obj.ToBeta() + klog.V(5).Infof("MockBetaRegionBackendServices.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockRegionDisks %v not found", key), + Message: fmt.Sprintf("MockBetaRegionBackendServices %v not found", key), } - klog.V(5).Infof("MockRegionDisks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBetaRegionBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } // List all of the objects in the mock in the given region. -func (m *MockRegionDisks) List(ctx context.Context, region string, fl *filter.F) ([]*ga.Disk, error) { +func (m *MockBetaRegionBackendServices) List(ctx context.Context, region string, fl *filter.F) ([]*beta.BackendService, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, region, fl, m); intercept { - klog.V(5).Infof("MockRegionDisks.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + klog.V(5).Infof("MockBetaRegionBackendServices.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) return objs, err } } @@ -5210,31 +6321,31 @@ func (m *MockRegionDisks) List(ctx context.Context, region string, fl *filter.F) if m.ListError != nil { err := *m.ListError - klog.V(5).Infof("MockRegionDisks.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + klog.V(5).Infof("MockBetaRegionBackendServices.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) return nil, *m.ListError } - var objs []*ga.Disk + var objs []*beta.BackendService for key, obj := range m.Objects { if key.Region != region { continue } - if !fl.Match(obj.ToGA()) { + if !fl.Match(obj.ToBeta()) { continue } - objs = append(objs, obj.ToGA()) + objs = append(objs, obj.ToBeta()) } - klog.V(5).Infof("MockRegionDisks.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + klog.V(5).Infof("MockBetaRegionBackendServices.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) return objs, nil } // Insert is a mock for inserting/creating a new object. -func (m *MockRegionDisks) Insert(ctx context.Context, key *meta.Key, obj *ga.Disk) error { +func (m *MockBetaRegionBackendServices) Insert(ctx context.Context, key *meta.Key, obj *beta.BackendService) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - klog.V(5).Infof("MockRegionDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -5246,32 +6357,32 @@ func (m *MockRegionDisks) Insert(ctx context.Context, key *meta.Key, obj *ga.Dis defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - klog.V(5).Infof("MockRegionDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { err := &googleapi.Error{ Code: http.StatusConflict, - Message: fmt.Sprintf("MockRegionDisks %v exists", key), + Message: fmt.Sprintf("MockBetaRegionBackendServices %v exists", key), } - klog.V(5).Infof("MockRegionDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } obj.Name = key.Name - projectID := m.ProjectRouter.ProjectID(ctx, "ga", "disks") - obj.SelfLink = SelfLink(meta.VersionGA, projectID, "disks", key) + projectID := m.ProjectRouter.ProjectID(ctx, "beta", "backendServices") + obj.SelfLink = SelfLink(meta.VersionBeta, projectID, "backendServices", key) - m.Objects[*key] = &MockRegionDisksObj{obj} - klog.V(5).Infof("MockRegionDisks.Insert(%v, %v, %+v) = nil", ctx, key, obj) + m.Objects[*key] = &MockRegionBackendServicesObj{obj} + klog.V(5).Infof("MockBetaRegionBackendServices.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } // Delete is a mock for deleting the object. -func (m *MockRegionDisks) Delete(ctx context.Context, key *meta.Key) error { +func (m *MockBetaRegionBackendServices) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - klog.V(5).Infof("MockRegionDisks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -5283,222 +6394,258 @@ func (m *MockRegionDisks) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - klog.V(5).Infof("MockRegionDisks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockRegionDisks %v not found", key), + Message: fmt.Sprintf("MockBetaRegionBackendServices %v not found", key), } - klog.V(5).Infof("MockRegionDisks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - klog.V(5).Infof("MockRegionDisks.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockBetaRegionBackendServices.Delete(%v, %v) = nil", ctx, key) return nil } // Obj wraps the object for use in the mock. -func (m *MockRegionDisks) Obj(o *ga.Disk) *MockRegionDisksObj { - return &MockRegionDisksObj{o} +func (m *MockBetaRegionBackendServices) Obj(o *beta.BackendService) *MockRegionBackendServicesObj { + return &MockRegionBackendServicesObj{o} } -// Resize is a mock for the corresponding method. -func (m *MockRegionDisks) Resize(ctx context.Context, key *meta.Key, arg0 *ga.RegionDisksResizeRequest) error { - if m.ResizeHook != nil { - return m.ResizeHook(ctx, key, arg0, m) +// GetHealth is a mock for the corresponding method. +func (m *MockBetaRegionBackendServices) GetHealth(ctx context.Context, key *meta.Key, arg0 *beta.ResourceGroupReference) (*beta.BackendServiceGroupHealth, error) { + if m.GetHealthHook != nil { + return m.GetHealthHook(ctx, key, arg0, m) + } + return nil, fmt.Errorf("GetHealthHook must be set") +} + +// Update is a mock for the corresponding method. +func (m *MockBetaRegionBackendServices) Update(ctx context.Context, key *meta.Key, arg0 *beta.BackendService) error { + if m.UpdateHook != nil { + return m.UpdateHook(ctx, key, arg0, m) } return nil } -// GCERegionDisks is a simplifying adapter for the GCE RegionDisks. -type GCERegionDisks struct { +// GCEBetaRegionBackendServices is a simplifying adapter for the GCE RegionBackendServices. +type GCEBetaRegionBackendServices struct { s *Service } -// Get the Disk named by key. -func (g *GCERegionDisks) Get(ctx context.Context, key *meta.Key) (*ga.Disk, error) { - klog.V(5).Infof("GCERegionDisks.Get(%v, %v): called", ctx, key) +// Get the BackendService named by key. +func (g *GCEBetaRegionBackendServices) Get(ctx context.Context, key *meta.Key) (*beta.BackendService, error) { + klog.V(5).Infof("GCEBetaRegionBackendServices.Get(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCERegionDisks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaRegionBackendServices.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionDisks") + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "RegionBackendServices") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Get", - Version: meta.Version("ga"), - Service: "RegionDisks", + Version: meta.Version("beta"), + Service: "RegionBackendServices", } - klog.V(5).Infof("GCERegionDisks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaRegionBackendServices.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCERegionDisks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaRegionBackendServices.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } - call := g.s.GA.RegionDisks.Get(projectID, key.Region, key.Name) + call := g.s.Beta.RegionBackendServices.Get(projectID, key.Region, key.Name) call.Context(ctx) v, err := call.Do() - klog.V(4).Infof("GCERegionDisks.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEBetaRegionBackendServices.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } -// List all Disk objects. -func (g *GCERegionDisks) List(ctx context.Context, region string, fl *filter.F) ([]*ga.Disk, error) { - klog.V(5).Infof("GCERegionDisks.List(%v, %v, %v) called", ctx, region, fl) - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionDisks") +// List all BackendService objects. +func (g *GCEBetaRegionBackendServices) List(ctx context.Context, region string, fl *filter.F) ([]*beta.BackendService, error) { + klog.V(5).Infof("GCEBetaRegionBackendServices.List(%v, %v, %v) called", ctx, region, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "RegionBackendServices") rk := &RateLimitKey{ ProjectID: projectID, Operation: "List", - Version: meta.Version("ga"), - Service: "RegionDisks", + Version: meta.Version("beta"), + Service: "RegionBackendServices", } if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - klog.V(5).Infof("GCERegionDisks.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) - call := g.s.GA.RegionDisks.List(projectID, region) + klog.V(5).Infof("GCEBetaRegionBackendServices.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) + call := g.s.Beta.RegionBackendServices.List(projectID, region) if fl != filter.None { call.Filter(fl.String()) } - var all []*ga.Disk - f := func(l *ga.DiskList) error { - klog.V(5).Infof("GCERegionDisks.List(%v, ..., %v): page %+v", ctx, fl, l) + var all []*beta.BackendService + f := func(l *beta.BackendServiceList) error { + klog.V(5).Infof("GCEBetaRegionBackendServices.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - klog.V(4).Infof("GCERegionDisks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEBetaRegionBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } if klog.V(4) { - klog.V(4).Infof("GCERegionDisks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + klog.V(4).Infof("GCEBetaRegionBackendServices.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - klog.V(5).Infof("GCERegionDisks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEBetaRegionBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil } -// Insert Disk with key of value obj. -func (g *GCERegionDisks) Insert(ctx context.Context, key *meta.Key, obj *ga.Disk) error { - klog.V(5).Infof("GCERegionDisks.Insert(%v, %v, %+v): called", ctx, key, obj) +// Insert BackendService with key of value obj. +func (g *GCEBetaRegionBackendServices) Insert(ctx context.Context, key *meta.Key, obj *beta.BackendService) error { + klog.V(5).Infof("GCEBetaRegionBackendServices.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - klog.V(2).Infof("GCERegionDisks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaRegionBackendServices.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionDisks") + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "RegionBackendServices") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Insert", - Version: meta.Version("ga"), - Service: "RegionDisks", + Version: meta.Version("beta"), + Service: "RegionBackendServices", } - klog.V(5).Infof("GCERegionDisks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaRegionBackendServices.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCERegionDisks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaRegionBackendServices.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name - call := g.s.GA.RegionDisks.Insert(projectID, key.Region, obj) + call := g.s.Beta.RegionBackendServices.Insert(projectID, key.Region, obj) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCERegionDisks.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaRegionBackendServices.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCERegionDisks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEBetaRegionBackendServices.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } -// Delete the Disk referenced by key. -func (g *GCERegionDisks) Delete(ctx context.Context, key *meta.Key) error { - klog.V(5).Infof("GCERegionDisks.Delete(%v, %v): called", ctx, key) +// Delete the BackendService referenced by key. +func (g *GCEBetaRegionBackendServices) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCEBetaRegionBackendServices.Delete(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCERegionDisks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaRegionBackendServices.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionDisks") + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "RegionBackendServices") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Delete", - Version: meta.Version("ga"), - Service: "RegionDisks", + Version: meta.Version("beta"), + Service: "RegionBackendServices", } - klog.V(5).Infof("GCERegionDisks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaRegionBackendServices.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCERegionDisks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaRegionBackendServices.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.GA.RegionDisks.Delete(projectID, key.Region, key.Name) + call := g.s.Beta.RegionBackendServices.Delete(projectID, key.Region, key.Name) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCERegionDisks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBetaRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCERegionDisks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBetaRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } -// Resize is a method on GCERegionDisks. -func (g *GCERegionDisks) Resize(ctx context.Context, key *meta.Key, arg0 *ga.RegionDisksResizeRequest) error { - klog.V(5).Infof("GCERegionDisks.Resize(%v, %v, ...): called", ctx, key) +// GetHealth is a method on GCEBetaRegionBackendServices. +func (g *GCEBetaRegionBackendServices) GetHealth(ctx context.Context, key *meta.Key, arg0 *beta.ResourceGroupReference) (*beta.BackendServiceGroupHealth, error) { + klog.V(5).Infof("GCEBetaRegionBackendServices.GetHealth(%v, %v, ...): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCERegionDisks.Resize(%v, %v, ...): key is invalid (%#v)", ctx, key, key) - return fmt.Errorf("invalid GCE key (%+v)", key) + klog.V(2).Infof("GCEBetaRegionBackendServices.GetHealth(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return nil, fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionDisks") + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "RegionBackendServices") rk := &RateLimitKey{ ProjectID: projectID, - Operation: "Resize", - Version: meta.Version("ga"), - Service: "RegionDisks", + Operation: "GetHealth", + Version: meta.Version("beta"), + Service: "RegionBackendServices", } - klog.V(5).Infof("GCERegionDisks.Resize(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaRegionBackendServices.GetHealth(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCERegionDisks.Resize(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaRegionBackendServices.GetHealth(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return nil, err + } + call := g.s.Beta.RegionBackendServices.GetHealth(projectID, key.Region, key.Name, arg0) + call.Context(ctx) + v, err := call.Do() + klog.V(4).Infof("GCEBetaRegionBackendServices.GetHealth(%v, %v, ...) = %+v, %v", ctx, key, v, err) + return v, err +} + +// Update is a method on GCEBetaRegionBackendServices. +func (g *GCEBetaRegionBackendServices) Update(ctx context.Context, key *meta.Key, arg0 *beta.BackendService) error { + klog.V(5).Infof("GCEBetaRegionBackendServices.Update(%v, %v, ...): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEBetaRegionBackendServices.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "RegionBackendServices") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Update", + Version: meta.Version("beta"), + Service: "RegionBackendServices", + } + klog.V(5).Infof("GCEBetaRegionBackendServices.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEBetaRegionBackendServices.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.GA.RegionDisks.Resize(projectID, key.Region, key.Name, arg0) + call := g.s.Beta.RegionBackendServices.Update(projectID, key.Region, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCERegionDisks.Resize(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaRegionBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCERegionDisks.Resize(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaRegionBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) return err } -// Firewalls is an interface that allows for mocking of Firewalls. -type Firewalls interface { - Get(ctx context.Context, key *meta.Key) (*ga.Firewall, error) - List(ctx context.Context, fl *filter.F) ([]*ga.Firewall, error) - Insert(ctx context.Context, key *meta.Key, obj *ga.Firewall) error +// Disks is an interface that allows for mocking of Disks. +type Disks interface { + Get(ctx context.Context, key *meta.Key) (*ga.Disk, error) + List(ctx context.Context, zone string, fl *filter.F) ([]*ga.Disk, error) + Insert(ctx context.Context, key *meta.Key, obj *ga.Disk) error Delete(ctx context.Context, key *meta.Key) error - Update(context.Context, *meta.Key, *ga.Firewall) error + Resize(context.Context, *meta.Key, *ga.DisksResizeRequest) error } -// NewMockFirewalls returns a new mock for Firewalls. -func NewMockFirewalls(pr ProjectRouter, objs map[meta.Key]*MockFirewallsObj) *MockFirewalls { - mock := &MockFirewalls{ +// NewMockDisks returns a new mock for Disks. +func NewMockDisks(pr ProjectRouter, objs map[meta.Key]*MockDisksObj) *MockDisks { + mock := &MockDisks{ ProjectRouter: pr, Objects: objs, @@ -5509,14 +6656,14 @@ func NewMockFirewalls(pr ProjectRouter, objs map[meta.Key]*MockFirewallsObj) *Mo return mock } -// MockFirewalls is the mock for Firewalls. -type MockFirewalls struct { +// MockDisks is the mock for Disks. +type MockDisks struct { Lock sync.Mutex ProjectRouter ProjectRouter // Objects maintained by the mock. - Objects map[meta.Key]*MockFirewallsObj + Objects map[meta.Key]*MockDisksObj // If an entry exists for the given key and operation, then the error // will be returned instead of the operation. @@ -5529,11 +6676,11 @@ type MockFirewalls struct { // order to add your own logic. Return (true, _, _) to prevent the normal // execution flow of the mock. Return (false, nil, nil) to continue with // normal mock behavior/ after the hook function executes. - GetHook func(ctx context.Context, key *meta.Key, m *MockFirewalls) (bool, *ga.Firewall, error) - ListHook func(ctx context.Context, fl *filter.F, m *MockFirewalls) (bool, []*ga.Firewall, error) - InsertHook func(ctx context.Context, key *meta.Key, obj *ga.Firewall, m *MockFirewalls) (bool, error) - DeleteHook func(ctx context.Context, key *meta.Key, m *MockFirewalls) (bool, error) - UpdateHook func(context.Context, *meta.Key, *ga.Firewall, *MockFirewalls) error + GetHook func(ctx context.Context, key *meta.Key, m *MockDisks) (bool, *ga.Disk, error) + ListHook func(ctx context.Context, zone string, fl *filter.F, m *MockDisks) (bool, []*ga.Disk, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *ga.Disk, m *MockDisks) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockDisks) (bool, error) + ResizeHook func(context.Context, *meta.Key, *ga.DisksResizeRequest, *MockDisks) error // X is extra state that can be used as part of the mock. Generated code // will not use this field. @@ -5541,10 +6688,10 @@ type MockFirewalls struct { } // Get returns the object from the mock. -func (m *MockFirewalls) Get(ctx context.Context, key *meta.Key) (*ga.Firewall, error) { +func (m *MockDisks) Get(ctx context.Context, key *meta.Key) (*ga.Disk, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - klog.V(5).Infof("MockFirewalls.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockDisks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -5556,28 +6703,28 @@ func (m *MockFirewalls) Get(ctx context.Context, key *meta.Key) (*ga.Firewall, e defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - klog.V(5).Infof("MockFirewalls.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockDisks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - klog.V(5).Infof("MockFirewalls.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockDisks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockFirewalls %v not found", key), + Message: fmt.Sprintf("MockDisks %v not found", key), } - klog.V(5).Infof("MockFirewalls.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockDisks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } -// List all of the objects in the mock. -func (m *MockFirewalls) List(ctx context.Context, fl *filter.F) ([]*ga.Firewall, error) { +// List all of the objects in the mock in the given zone. +func (m *MockDisks) List(ctx context.Context, zone string, fl *filter.F) ([]*ga.Disk, error) { if m.ListHook != nil { - if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - klog.V(5).Infof("MockFirewalls.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + if intercept, objs, err := m.ListHook(ctx, zone, fl, m); intercept { + klog.V(5).Infof("MockDisks.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) return objs, err } } @@ -5587,28 +6734,31 @@ func (m *MockFirewalls) List(ctx context.Context, fl *filter.F) ([]*ga.Firewall, if m.ListError != nil { err := *m.ListError - klog.V(5).Infof("MockFirewalls.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockDisks.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) return nil, *m.ListError } - var objs []*ga.Firewall - for _, obj := range m.Objects { + var objs []*ga.Disk + for key, obj := range m.Objects { + if key.Zone != zone { + continue + } if !fl.Match(obj.ToGA()) { continue } objs = append(objs, obj.ToGA()) } - klog.V(5).Infof("MockFirewalls.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockDisks.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) return objs, nil } // Insert is a mock for inserting/creating a new object. -func (m *MockFirewalls) Insert(ctx context.Context, key *meta.Key, obj *ga.Firewall) error { +func (m *MockDisks) Insert(ctx context.Context, key *meta.Key, obj *ga.Disk) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - klog.V(5).Infof("MockFirewalls.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -5620,32 +6770,32 @@ func (m *MockFirewalls) Insert(ctx context.Context, key *meta.Key, obj *ga.Firew defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - klog.V(5).Infof("MockFirewalls.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { err := &googleapi.Error{ Code: http.StatusConflict, - Message: fmt.Sprintf("MockFirewalls %v exists", key), + Message: fmt.Sprintf("MockDisks %v exists", key), } - klog.V(5).Infof("MockFirewalls.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } obj.Name = key.Name - projectID := m.ProjectRouter.ProjectID(ctx, "ga", "firewalls") - obj.SelfLink = SelfLink(meta.VersionGA, projectID, "firewalls", key) + projectID := m.ProjectRouter.ProjectID(ctx, "ga", "disks") + obj.SelfLink = SelfLink(meta.VersionGA, projectID, "disks", key) - m.Objects[*key] = &MockFirewallsObj{obj} - klog.V(5).Infof("MockFirewalls.Insert(%v, %v, %+v) = nil", ctx, key, obj) + m.Objects[*key] = &MockDisksObj{obj} + klog.V(5).Infof("MockDisks.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } // Delete is a mock for deleting the object. -func (m *MockFirewalls) Delete(ctx context.Context, key *meta.Key) error { +func (m *MockDisks) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - klog.V(5).Infof("MockFirewalls.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockDisks.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -5657,222 +6807,222 @@ func (m *MockFirewalls) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - klog.V(5).Infof("MockFirewalls.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockDisks.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockFirewalls %v not found", key), + Message: fmt.Sprintf("MockDisks %v not found", key), } - klog.V(5).Infof("MockFirewalls.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockDisks.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - klog.V(5).Infof("MockFirewalls.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockDisks.Delete(%v, %v) = nil", ctx, key) return nil } // Obj wraps the object for use in the mock. -func (m *MockFirewalls) Obj(o *ga.Firewall) *MockFirewallsObj { - return &MockFirewallsObj{o} +func (m *MockDisks) Obj(o *ga.Disk) *MockDisksObj { + return &MockDisksObj{o} } -// Update is a mock for the corresponding method. -func (m *MockFirewalls) Update(ctx context.Context, key *meta.Key, arg0 *ga.Firewall) error { - if m.UpdateHook != nil { - return m.UpdateHook(ctx, key, arg0, m) +// Resize is a mock for the corresponding method. +func (m *MockDisks) Resize(ctx context.Context, key *meta.Key, arg0 *ga.DisksResizeRequest) error { + if m.ResizeHook != nil { + return m.ResizeHook(ctx, key, arg0, m) } return nil } -// GCEFirewalls is a simplifying adapter for the GCE Firewalls. -type GCEFirewalls struct { +// GCEDisks is a simplifying adapter for the GCE Disks. +type GCEDisks struct { s *Service } -// Get the Firewall named by key. -func (g *GCEFirewalls) Get(ctx context.Context, key *meta.Key) (*ga.Firewall, error) { - klog.V(5).Infof("GCEFirewalls.Get(%v, %v): called", ctx, key) +// Get the Disk named by key. +func (g *GCEDisks) Get(ctx context.Context, key *meta.Key) (*ga.Disk, error) { + klog.V(5).Infof("GCEDisks.Get(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEFirewalls.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEDisks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Firewalls") + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Disks") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Get", Version: meta.Version("ga"), - Service: "Firewalls", + Service: "Disks", } - klog.V(5).Infof("GCEFirewalls.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEDisks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEFirewalls.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEDisks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } - call := g.s.GA.Firewalls.Get(projectID, key.Name) + call := g.s.GA.Disks.Get(projectID, key.Zone, key.Name) call.Context(ctx) v, err := call.Do() - klog.V(4).Infof("GCEFirewalls.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEDisks.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } -// List all Firewall objects. -func (g *GCEFirewalls) List(ctx context.Context, fl *filter.F) ([]*ga.Firewall, error) { - klog.V(5).Infof("GCEFirewalls.List(%v, %v) called", ctx, fl) - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Firewalls") +// List all Disk objects. +func (g *GCEDisks) List(ctx context.Context, zone string, fl *filter.F) ([]*ga.Disk, error) { + klog.V(5).Infof("GCEDisks.List(%v, %v, %v) called", ctx, zone, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Disks") rk := &RateLimitKey{ ProjectID: projectID, Operation: "List", Version: meta.Version("ga"), - Service: "Firewalls", + Service: "Disks", } if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - klog.V(5).Infof("GCEFirewalls.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) - call := g.s.GA.Firewalls.List(projectID) + klog.V(5).Infof("GCEDisks.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) + call := g.s.GA.Disks.List(projectID, zone) if fl != filter.None { call.Filter(fl.String()) } - var all []*ga.Firewall - f := func(l *ga.FirewallList) error { - klog.V(5).Infof("GCEFirewalls.List(%v, ..., %v): page %+v", ctx, fl, l) + var all []*ga.Disk + f := func(l *ga.DiskList) error { + klog.V(5).Infof("GCEDisks.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - klog.V(4).Infof("GCEFirewalls.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEDisks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } if klog.V(4) { - klog.V(4).Infof("GCEFirewalls.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + klog.V(4).Infof("GCEDisks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - klog.V(5).Infof("GCEFirewalls.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEDisks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil } -// Insert Firewall with key of value obj. -func (g *GCEFirewalls) Insert(ctx context.Context, key *meta.Key, obj *ga.Firewall) error { - klog.V(5).Infof("GCEFirewalls.Insert(%v, %v, %+v): called", ctx, key, obj) +// Insert Disk with key of value obj. +func (g *GCEDisks) Insert(ctx context.Context, key *meta.Key, obj *ga.Disk) error { + klog.V(5).Infof("GCEDisks.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - klog.V(2).Infof("GCEFirewalls.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEDisks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Firewalls") + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Disks") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Insert", Version: meta.Version("ga"), - Service: "Firewalls", + Service: "Disks", } - klog.V(5).Infof("GCEFirewalls.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEDisks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEFirewalls.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEDisks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name - call := g.s.GA.Firewalls.Insert(projectID, obj) + call := g.s.GA.Disks.Insert(projectID, key.Zone, obj) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEFirewalls.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEDisks.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEFirewalls.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEDisks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } -// Delete the Firewall referenced by key. -func (g *GCEFirewalls) Delete(ctx context.Context, key *meta.Key) error { - klog.V(5).Infof("GCEFirewalls.Delete(%v, %v): called", ctx, key) +// Delete the Disk referenced by key. +func (g *GCEDisks) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCEDisks.Delete(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEFirewalls.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEDisks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Firewalls") + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Disks") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Delete", Version: meta.Version("ga"), - Service: "Firewalls", + Service: "Disks", } - klog.V(5).Infof("GCEFirewalls.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEDisks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEFirewalls.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEDisks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.GA.Firewalls.Delete(projectID, key.Name) - + call := g.s.GA.Disks.Delete(projectID, key.Zone, key.Name) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEFirewalls.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEDisks.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEFirewalls.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEDisks.Delete(%v, %v) = %v", ctx, key, err) return err } -// Update is a method on GCEFirewalls. -func (g *GCEFirewalls) Update(ctx context.Context, key *meta.Key, arg0 *ga.Firewall) error { - klog.V(5).Infof("GCEFirewalls.Update(%v, %v, ...): called", ctx, key) +// Resize is a method on GCEDisks. +func (g *GCEDisks) Resize(ctx context.Context, key *meta.Key, arg0 *ga.DisksResizeRequest) error { + klog.V(5).Infof("GCEDisks.Resize(%v, %v, ...): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEFirewalls.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEDisks.Resize(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Firewalls") + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Disks") rk := &RateLimitKey{ ProjectID: projectID, - Operation: "Update", + Operation: "Resize", Version: meta.Version("ga"), - Service: "Firewalls", + Service: "Disks", } - klog.V(5).Infof("GCEFirewalls.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEDisks.Resize(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEFirewalls.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEDisks.Resize(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.GA.Firewalls.Update(projectID, key.Name, arg0) + call := g.s.GA.Disks.Resize(projectID, key.Zone, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEFirewalls.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEDisks.Resize(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEFirewalls.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEDisks.Resize(%v, %v, ...) = %+v", ctx, key, err) return err } -// ForwardingRules is an interface that allows for mocking of ForwardingRules. -type ForwardingRules interface { - Get(ctx context.Context, key *meta.Key) (*ga.ForwardingRule, error) - List(ctx context.Context, region string, fl *filter.F) ([]*ga.ForwardingRule, error) - Insert(ctx context.Context, key *meta.Key, obj *ga.ForwardingRule) error +// RegionDisks is an interface that allows for mocking of RegionDisks. +type RegionDisks interface { + Get(ctx context.Context, key *meta.Key) (*ga.Disk, error) + List(ctx context.Context, region string, fl *filter.F) ([]*ga.Disk, error) + Insert(ctx context.Context, key *meta.Key, obj *ga.Disk) error Delete(ctx context.Context, key *meta.Key) error + Resize(context.Context, *meta.Key, *ga.RegionDisksResizeRequest) error } -// NewMockForwardingRules returns a new mock for ForwardingRules. -func NewMockForwardingRules(pr ProjectRouter, objs map[meta.Key]*MockForwardingRulesObj) *MockForwardingRules { - mock := &MockForwardingRules{ +// NewMockRegionDisks returns a new mock for RegionDisks. +func NewMockRegionDisks(pr ProjectRouter, objs map[meta.Key]*MockRegionDisksObj) *MockRegionDisks { + mock := &MockRegionDisks{ ProjectRouter: pr, Objects: objs, @@ -5883,14 +7033,14 @@ func NewMockForwardingRules(pr ProjectRouter, objs map[meta.Key]*MockForwardingR return mock } -// MockForwardingRules is the mock for ForwardingRules. -type MockForwardingRules struct { +// MockRegionDisks is the mock for RegionDisks. +type MockRegionDisks struct { Lock sync.Mutex ProjectRouter ProjectRouter // Objects maintained by the mock. - Objects map[meta.Key]*MockForwardingRulesObj + Objects map[meta.Key]*MockRegionDisksObj // If an entry exists for the given key and operation, then the error // will be returned instead of the operation. @@ -5903,10 +7053,11 @@ type MockForwardingRules struct { // order to add your own logic. Return (true, _, _) to prevent the normal // execution flow of the mock. Return (false, nil, nil) to continue with // normal mock behavior/ after the hook function executes. - GetHook func(ctx context.Context, key *meta.Key, m *MockForwardingRules) (bool, *ga.ForwardingRule, error) - ListHook func(ctx context.Context, region string, fl *filter.F, m *MockForwardingRules) (bool, []*ga.ForwardingRule, error) - InsertHook func(ctx context.Context, key *meta.Key, obj *ga.ForwardingRule, m *MockForwardingRules) (bool, error) - DeleteHook func(ctx context.Context, key *meta.Key, m *MockForwardingRules) (bool, error) + GetHook func(ctx context.Context, key *meta.Key, m *MockRegionDisks) (bool, *ga.Disk, error) + ListHook func(ctx context.Context, region string, fl *filter.F, m *MockRegionDisks) (bool, []*ga.Disk, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *ga.Disk, m *MockRegionDisks) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockRegionDisks) (bool, error) + ResizeHook func(context.Context, *meta.Key, *ga.RegionDisksResizeRequest, *MockRegionDisks) error // X is extra state that can be used as part of the mock. Generated code // will not use this field. @@ -5914,10 +7065,10 @@ type MockForwardingRules struct { } // Get returns the object from the mock. -func (m *MockForwardingRules) Get(ctx context.Context, key *meta.Key) (*ga.ForwardingRule, error) { +func (m *MockRegionDisks) Get(ctx context.Context, key *meta.Key) (*ga.Disk, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - klog.V(5).Infof("MockForwardingRules.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockRegionDisks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -5929,28 +7080,28 @@ func (m *MockForwardingRules) Get(ctx context.Context, key *meta.Key) (*ga.Forwa defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - klog.V(5).Infof("MockForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockRegionDisks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - klog.V(5).Infof("MockForwardingRules.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockRegionDisks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockForwardingRules %v not found", key), + Message: fmt.Sprintf("MockRegionDisks %v not found", key), } - klog.V(5).Infof("MockForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockRegionDisks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } // List all of the objects in the mock in the given region. -func (m *MockForwardingRules) List(ctx context.Context, region string, fl *filter.F) ([]*ga.ForwardingRule, error) { +func (m *MockRegionDisks) List(ctx context.Context, region string, fl *filter.F) ([]*ga.Disk, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, region, fl, m); intercept { - klog.V(5).Infof("MockForwardingRules.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + klog.V(5).Infof("MockRegionDisks.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) return objs, err } } @@ -5960,12 +7111,12 @@ func (m *MockForwardingRules) List(ctx context.Context, region string, fl *filte if m.ListError != nil { err := *m.ListError - klog.V(5).Infof("MockForwardingRules.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + klog.V(5).Infof("MockRegionDisks.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) return nil, *m.ListError } - var objs []*ga.ForwardingRule + var objs []*ga.Disk for key, obj := range m.Objects { if key.Region != region { continue @@ -5976,15 +7127,15 @@ func (m *MockForwardingRules) List(ctx context.Context, region string, fl *filte objs = append(objs, obj.ToGA()) } - klog.V(5).Infof("MockForwardingRules.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + klog.V(5).Infof("MockRegionDisks.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) return objs, nil } // Insert is a mock for inserting/creating a new object. -func (m *MockForwardingRules) Insert(ctx context.Context, key *meta.Key, obj *ga.ForwardingRule) error { +func (m *MockRegionDisks) Insert(ctx context.Context, key *meta.Key, obj *ga.Disk) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - klog.V(5).Infof("MockForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockRegionDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -5996,32 +7147,32 @@ func (m *MockForwardingRules) Insert(ctx context.Context, key *meta.Key, obj *ga defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - klog.V(5).Infof("MockForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockRegionDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { err := &googleapi.Error{ Code: http.StatusConflict, - Message: fmt.Sprintf("MockForwardingRules %v exists", key), + Message: fmt.Sprintf("MockRegionDisks %v exists", key), } - klog.V(5).Infof("MockForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockRegionDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } obj.Name = key.Name - projectID := m.ProjectRouter.ProjectID(ctx, "ga", "forwardingRules") - obj.SelfLink = SelfLink(meta.VersionGA, projectID, "forwardingRules", key) + projectID := m.ProjectRouter.ProjectID(ctx, "ga", "disks") + obj.SelfLink = SelfLink(meta.VersionGA, projectID, "disks", key) - m.Objects[*key] = &MockForwardingRulesObj{obj} - klog.V(5).Infof("MockForwardingRules.Insert(%v, %v, %+v) = nil", ctx, key, obj) + m.Objects[*key] = &MockRegionDisksObj{obj} + klog.V(5).Infof("MockRegionDisks.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } // Delete is a mock for deleting the object. -func (m *MockForwardingRules) Delete(ctx context.Context, key *meta.Key) error { +func (m *MockRegionDisks) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - klog.V(5).Infof("MockForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockRegionDisks.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -6033,180 +7184,222 @@ func (m *MockForwardingRules) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - klog.V(5).Infof("MockForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockRegionDisks.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockForwardingRules %v not found", key), + Message: fmt.Sprintf("MockRegionDisks %v not found", key), } - klog.V(5).Infof("MockForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockRegionDisks.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - klog.V(5).Infof("MockForwardingRules.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockRegionDisks.Delete(%v, %v) = nil", ctx, key) return nil } // Obj wraps the object for use in the mock. -func (m *MockForwardingRules) Obj(o *ga.ForwardingRule) *MockForwardingRulesObj { - return &MockForwardingRulesObj{o} +func (m *MockRegionDisks) Obj(o *ga.Disk) *MockRegionDisksObj { + return &MockRegionDisksObj{o} } -// GCEForwardingRules is a simplifying adapter for the GCE ForwardingRules. -type GCEForwardingRules struct { +// Resize is a mock for the corresponding method. +func (m *MockRegionDisks) Resize(ctx context.Context, key *meta.Key, arg0 *ga.RegionDisksResizeRequest) error { + if m.ResizeHook != nil { + return m.ResizeHook(ctx, key, arg0, m) + } + return nil +} + +// GCERegionDisks is a simplifying adapter for the GCE RegionDisks. +type GCERegionDisks struct { s *Service } -// Get the ForwardingRule named by key. -func (g *GCEForwardingRules) Get(ctx context.Context, key *meta.Key) (*ga.ForwardingRule, error) { - klog.V(5).Infof("GCEForwardingRules.Get(%v, %v): called", ctx, key) +// Get the Disk named by key. +func (g *GCERegionDisks) Get(ctx context.Context, key *meta.Key) (*ga.Disk, error) { + klog.V(5).Infof("GCERegionDisks.Get(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEForwardingRules.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCERegionDisks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "ForwardingRules") + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionDisks") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Get", Version: meta.Version("ga"), - Service: "ForwardingRules", + Service: "RegionDisks", } - klog.V(5).Infof("GCEForwardingRules.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCERegionDisks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEForwardingRules.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCERegionDisks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } - call := g.s.GA.ForwardingRules.Get(projectID, key.Region, key.Name) + call := g.s.GA.RegionDisks.Get(projectID, key.Region, key.Name) call.Context(ctx) v, err := call.Do() - klog.V(4).Infof("GCEForwardingRules.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCERegionDisks.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } -// List all ForwardingRule objects. -func (g *GCEForwardingRules) List(ctx context.Context, region string, fl *filter.F) ([]*ga.ForwardingRule, error) { - klog.V(5).Infof("GCEForwardingRules.List(%v, %v, %v) called", ctx, region, fl) - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "ForwardingRules") +// List all Disk objects. +func (g *GCERegionDisks) List(ctx context.Context, region string, fl *filter.F) ([]*ga.Disk, error) { + klog.V(5).Infof("GCERegionDisks.List(%v, %v, %v) called", ctx, region, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionDisks") rk := &RateLimitKey{ ProjectID: projectID, Operation: "List", Version: meta.Version("ga"), - Service: "ForwardingRules", + Service: "RegionDisks", } if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - klog.V(5).Infof("GCEForwardingRules.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) - call := g.s.GA.ForwardingRules.List(projectID, region) + klog.V(5).Infof("GCERegionDisks.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) + call := g.s.GA.RegionDisks.List(projectID, region) if fl != filter.None { call.Filter(fl.String()) } - var all []*ga.ForwardingRule - f := func(l *ga.ForwardingRuleList) error { - klog.V(5).Infof("GCEForwardingRules.List(%v, ..., %v): page %+v", ctx, fl, l) + var all []*ga.Disk + f := func(l *ga.DiskList) error { + klog.V(5).Infof("GCERegionDisks.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - klog.V(4).Infof("GCEForwardingRules.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCERegionDisks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } if klog.V(4) { - klog.V(4).Infof("GCEForwardingRules.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + klog.V(4).Infof("GCERegionDisks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - klog.V(5).Infof("GCEForwardingRules.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCERegionDisks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil } -// Insert ForwardingRule with key of value obj. -func (g *GCEForwardingRules) Insert(ctx context.Context, key *meta.Key, obj *ga.ForwardingRule) error { - klog.V(5).Infof("GCEForwardingRules.Insert(%v, %v, %+v): called", ctx, key, obj) +// Insert Disk with key of value obj. +func (g *GCERegionDisks) Insert(ctx context.Context, key *meta.Key, obj *ga.Disk) error { + klog.V(5).Infof("GCERegionDisks.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - klog.V(2).Infof("GCEForwardingRules.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCERegionDisks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "ForwardingRules") + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionDisks") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Insert", Version: meta.Version("ga"), - Service: "ForwardingRules", + Service: "RegionDisks", } - klog.V(5).Infof("GCEForwardingRules.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCERegionDisks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEForwardingRules.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCERegionDisks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name - call := g.s.GA.ForwardingRules.Insert(projectID, key.Region, obj) + call := g.s.GA.RegionDisks.Insert(projectID, key.Region, obj) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEForwardingRules.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCERegionDisks.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEForwardingRules.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCERegionDisks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } -// Delete the ForwardingRule referenced by key. -func (g *GCEForwardingRules) Delete(ctx context.Context, key *meta.Key) error { - klog.V(5).Infof("GCEForwardingRules.Delete(%v, %v): called", ctx, key) +// Delete the Disk referenced by key. +func (g *GCERegionDisks) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCERegionDisks.Delete(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEForwardingRules.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCERegionDisks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "ForwardingRules") + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionDisks") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Delete", Version: meta.Version("ga"), - Service: "ForwardingRules", + Service: "RegionDisks", } - klog.V(5).Infof("GCEForwardingRules.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCERegionDisks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEForwardingRules.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCERegionDisks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.GA.ForwardingRules.Delete(projectID, key.Region, key.Name) + call := g.s.GA.RegionDisks.Delete(projectID, key.Region, key.Name) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCERegionDisks.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCERegionDisks.Delete(%v, %v) = %v", ctx, key, err) return err } -// AlphaForwardingRules is an interface that allows for mocking of ForwardingRules. -type AlphaForwardingRules interface { - Get(ctx context.Context, key *meta.Key) (*alpha.ForwardingRule, error) - List(ctx context.Context, region string, fl *filter.F) ([]*alpha.ForwardingRule, error) - Insert(ctx context.Context, key *meta.Key, obj *alpha.ForwardingRule) error +// Resize is a method on GCERegionDisks. +func (g *GCERegionDisks) Resize(ctx context.Context, key *meta.Key, arg0 *ga.RegionDisksResizeRequest) error { + klog.V(5).Infof("GCERegionDisks.Resize(%v, %v, ...): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCERegionDisks.Resize(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionDisks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Resize", + Version: meta.Version("ga"), + Service: "RegionDisks", + } + klog.V(5).Infof("GCERegionDisks.Resize(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCERegionDisks.Resize(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.GA.RegionDisks.Resize(projectID, key.Region, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCERegionDisks.Resize(%v, %v, ...) = %+v", ctx, key, err) + return err + } + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCERegionDisks.Resize(%v, %v, ...) = %+v", ctx, key, err) + return err +} + +// Firewalls is an interface that allows for mocking of Firewalls. +type Firewalls interface { + Get(ctx context.Context, key *meta.Key) (*ga.Firewall, error) + List(ctx context.Context, fl *filter.F) ([]*ga.Firewall, error) + Insert(ctx context.Context, key *meta.Key, obj *ga.Firewall) error Delete(ctx context.Context, key *meta.Key) error + Update(context.Context, *meta.Key, *ga.Firewall) error } -// NewMockAlphaForwardingRules returns a new mock for ForwardingRules. -func NewMockAlphaForwardingRules(pr ProjectRouter, objs map[meta.Key]*MockForwardingRulesObj) *MockAlphaForwardingRules { - mock := &MockAlphaForwardingRules{ +// NewMockFirewalls returns a new mock for Firewalls. +func NewMockFirewalls(pr ProjectRouter, objs map[meta.Key]*MockFirewallsObj) *MockFirewalls { + mock := &MockFirewalls{ ProjectRouter: pr, Objects: objs, @@ -6217,14 +7410,14 @@ func NewMockAlphaForwardingRules(pr ProjectRouter, objs map[meta.Key]*MockForwar return mock } -// MockAlphaForwardingRules is the mock for ForwardingRules. -type MockAlphaForwardingRules struct { +// MockFirewalls is the mock for Firewalls. +type MockFirewalls struct { Lock sync.Mutex ProjectRouter ProjectRouter // Objects maintained by the mock. - Objects map[meta.Key]*MockForwardingRulesObj + Objects map[meta.Key]*MockFirewallsObj // If an entry exists for the given key and operation, then the error // will be returned instead of the operation. @@ -6237,10 +7430,11 @@ type MockAlphaForwardingRules struct { // order to add your own logic. Return (true, _, _) to prevent the normal // execution flow of the mock. Return (false, nil, nil) to continue with // normal mock behavior/ after the hook function executes. - GetHook func(ctx context.Context, key *meta.Key, m *MockAlphaForwardingRules) (bool, *alpha.ForwardingRule, error) - ListHook func(ctx context.Context, region string, fl *filter.F, m *MockAlphaForwardingRules) (bool, []*alpha.ForwardingRule, error) - InsertHook func(ctx context.Context, key *meta.Key, obj *alpha.ForwardingRule, m *MockAlphaForwardingRules) (bool, error) - DeleteHook func(ctx context.Context, key *meta.Key, m *MockAlphaForwardingRules) (bool, error) + GetHook func(ctx context.Context, key *meta.Key, m *MockFirewalls) (bool, *ga.Firewall, error) + ListHook func(ctx context.Context, fl *filter.F, m *MockFirewalls) (bool, []*ga.Firewall, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *ga.Firewall, m *MockFirewalls) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockFirewalls) (bool, error) + UpdateHook func(context.Context, *meta.Key, *ga.Firewall, *MockFirewalls) error // X is extra state that can be used as part of the mock. Generated code // will not use this field. @@ -6248,10 +7442,10 @@ type MockAlphaForwardingRules struct { } // Get returns the object from the mock. -func (m *MockAlphaForwardingRules) Get(ctx context.Context, key *meta.Key) (*alpha.ForwardingRule, error) { +func (m *MockFirewalls) Get(ctx context.Context, key *meta.Key) (*ga.Firewall, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - klog.V(5).Infof("MockAlphaForwardingRules.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockFirewalls.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -6263,28 +7457,28 @@ func (m *MockAlphaForwardingRules) Get(ctx context.Context, key *meta.Key) (*alp defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - klog.V(5).Infof("MockAlphaForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockFirewalls.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { - typedObj := obj.ToAlpha() - klog.V(5).Infof("MockAlphaForwardingRules.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + typedObj := obj.ToGA() + klog.V(5).Infof("MockFirewalls.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockAlphaForwardingRules %v not found", key), + Message: fmt.Sprintf("MockFirewalls %v not found", key), } - klog.V(5).Infof("MockAlphaForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockFirewalls.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } -// List all of the objects in the mock in the given region. -func (m *MockAlphaForwardingRules) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.ForwardingRule, error) { +// List all of the objects in the mock. +func (m *MockFirewalls) List(ctx context.Context, fl *filter.F) ([]*ga.Firewall, error) { if m.ListHook != nil { - if intercept, objs, err := m.ListHook(ctx, region, fl, m); intercept { - klog.V(5).Infof("MockAlphaForwardingRules.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { + klog.V(5).Infof("MockFirewalls.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -6294,31 +7488,28 @@ func (m *MockAlphaForwardingRules) List(ctx context.Context, region string, fl * if m.ListError != nil { err := *m.ListError - klog.V(5).Infof("MockAlphaForwardingRules.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + klog.V(5).Infof("MockFirewalls.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } - var objs []*alpha.ForwardingRule - for key, obj := range m.Objects { - if key.Region != region { - continue - } - if !fl.Match(obj.ToAlpha()) { + var objs []*ga.Firewall + for _, obj := range m.Objects { + if !fl.Match(obj.ToGA()) { continue } - objs = append(objs, obj.ToAlpha()) + objs = append(objs, obj.ToGA()) } - klog.V(5).Infof("MockAlphaForwardingRules.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + klog.V(5).Infof("MockFirewalls.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } // Insert is a mock for inserting/creating a new object. -func (m *MockAlphaForwardingRules) Insert(ctx context.Context, key *meta.Key, obj *alpha.ForwardingRule) error { +func (m *MockFirewalls) Insert(ctx context.Context, key *meta.Key, obj *ga.Firewall) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - klog.V(5).Infof("MockAlphaForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockFirewalls.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -6330,32 +7521,32 @@ func (m *MockAlphaForwardingRules) Insert(ctx context.Context, key *meta.Key, ob defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - klog.V(5).Infof("MockAlphaForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockFirewalls.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { err := &googleapi.Error{ Code: http.StatusConflict, - Message: fmt.Sprintf("MockAlphaForwardingRules %v exists", key), + Message: fmt.Sprintf("MockFirewalls %v exists", key), } - klog.V(5).Infof("MockAlphaForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockFirewalls.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } obj.Name = key.Name - projectID := m.ProjectRouter.ProjectID(ctx, "alpha", "forwardingRules") - obj.SelfLink = SelfLink(meta.VersionAlpha, projectID, "forwardingRules", key) + projectID := m.ProjectRouter.ProjectID(ctx, "ga", "firewalls") + obj.SelfLink = SelfLink(meta.VersionGA, projectID, "firewalls", key) - m.Objects[*key] = &MockForwardingRulesObj{obj} - klog.V(5).Infof("MockAlphaForwardingRules.Insert(%v, %v, %+v) = nil", ctx, key, obj) + m.Objects[*key] = &MockFirewallsObj{obj} + klog.V(5).Infof("MockFirewalls.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } // Delete is a mock for deleting the object. -func (m *MockAlphaForwardingRules) Delete(ctx context.Context, key *meta.Key) error { +func (m *MockFirewalls) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - klog.V(5).Infof("MockAlphaForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockFirewalls.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -6367,181 +7558,223 @@ func (m *MockAlphaForwardingRules) Delete(ctx context.Context, key *meta.Key) er defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - klog.V(5).Infof("MockAlphaForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockFirewalls.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockAlphaForwardingRules %v not found", key), + Message: fmt.Sprintf("MockFirewalls %v not found", key), } - klog.V(5).Infof("MockAlphaForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockFirewalls.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - klog.V(5).Infof("MockAlphaForwardingRules.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockFirewalls.Delete(%v, %v) = nil", ctx, key) return nil } // Obj wraps the object for use in the mock. -func (m *MockAlphaForwardingRules) Obj(o *alpha.ForwardingRule) *MockForwardingRulesObj { - return &MockForwardingRulesObj{o} +func (m *MockFirewalls) Obj(o *ga.Firewall) *MockFirewallsObj { + return &MockFirewallsObj{o} } -// GCEAlphaForwardingRules is a simplifying adapter for the GCE ForwardingRules. -type GCEAlphaForwardingRules struct { +// Update is a mock for the corresponding method. +func (m *MockFirewalls) Update(ctx context.Context, key *meta.Key, arg0 *ga.Firewall) error { + if m.UpdateHook != nil { + return m.UpdateHook(ctx, key, arg0, m) + } + return nil +} + +// GCEFirewalls is a simplifying adapter for the GCE Firewalls. +type GCEFirewalls struct { s *Service } -// Get the ForwardingRule named by key. -func (g *GCEAlphaForwardingRules) Get(ctx context.Context, key *meta.Key) (*alpha.ForwardingRule, error) { - klog.V(5).Infof("GCEAlphaForwardingRules.Get(%v, %v): called", ctx, key) +// Get the Firewall named by key. +func (g *GCEFirewalls) Get(ctx context.Context, key *meta.Key) (*ga.Firewall, error) { + klog.V(5).Infof("GCEFirewalls.Get(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEAlphaForwardingRules.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEFirewalls.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "ForwardingRules") + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Firewalls") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Get", - Version: meta.Version("alpha"), - Service: "ForwardingRules", + Version: meta.Version("ga"), + Service: "Firewalls", } - klog.V(5).Infof("GCEAlphaForwardingRules.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEFirewalls.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEAlphaForwardingRules.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEFirewalls.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } - call := g.s.Alpha.ForwardingRules.Get(projectID, key.Region, key.Name) + call := g.s.GA.Firewalls.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - klog.V(4).Infof("GCEAlphaForwardingRules.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEFirewalls.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } -// List all ForwardingRule objects. -func (g *GCEAlphaForwardingRules) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.ForwardingRule, error) { - klog.V(5).Infof("GCEAlphaForwardingRules.List(%v, %v, %v) called", ctx, region, fl) - projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "ForwardingRules") +// List all Firewall objects. +func (g *GCEFirewalls) List(ctx context.Context, fl *filter.F) ([]*ga.Firewall, error) { + klog.V(5).Infof("GCEFirewalls.List(%v, %v) called", ctx, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Firewalls") rk := &RateLimitKey{ ProjectID: projectID, Operation: "List", - Version: meta.Version("alpha"), - Service: "ForwardingRules", + Version: meta.Version("ga"), + Service: "Firewalls", } if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - klog.V(5).Infof("GCEAlphaForwardingRules.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) - call := g.s.Alpha.ForwardingRules.List(projectID, region) + klog.V(5).Infof("GCEFirewalls.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + call := g.s.GA.Firewalls.List(projectID) if fl != filter.None { call.Filter(fl.String()) } - var all []*alpha.ForwardingRule - f := func(l *alpha.ForwardingRuleList) error { - klog.V(5).Infof("GCEAlphaForwardingRules.List(%v, ..., %v): page %+v", ctx, fl, l) + var all []*ga.Firewall + f := func(l *ga.FirewallList) error { + klog.V(5).Infof("GCEFirewalls.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - klog.V(4).Infof("GCEAlphaForwardingRules.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEFirewalls.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } if klog.V(4) { - klog.V(4).Infof("GCEAlphaForwardingRules.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + klog.V(4).Infof("GCEFirewalls.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - klog.V(5).Infof("GCEAlphaForwardingRules.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEFirewalls.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil } -// Insert ForwardingRule with key of value obj. -func (g *GCEAlphaForwardingRules) Insert(ctx context.Context, key *meta.Key, obj *alpha.ForwardingRule) error { - klog.V(5).Infof("GCEAlphaForwardingRules.Insert(%v, %v, %+v): called", ctx, key, obj) +// Insert Firewall with key of value obj. +func (g *GCEFirewalls) Insert(ctx context.Context, key *meta.Key, obj *ga.Firewall) error { + klog.V(5).Infof("GCEFirewalls.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - klog.V(2).Infof("GCEAlphaForwardingRules.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEFirewalls.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "ForwardingRules") + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Firewalls") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Insert", - Version: meta.Version("alpha"), - Service: "ForwardingRules", + Version: meta.Version("ga"), + Service: "Firewalls", } - klog.V(5).Infof("GCEAlphaForwardingRules.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEFirewalls.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEAlphaForwardingRules.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEFirewalls.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name - call := g.s.Alpha.ForwardingRules.Insert(projectID, key.Region, obj) + call := g.s.GA.Firewalls.Insert(projectID, obj) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEAlphaForwardingRules.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEFirewalls.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEAlphaForwardingRules.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEFirewalls.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } -// Delete the ForwardingRule referenced by key. -func (g *GCEAlphaForwardingRules) Delete(ctx context.Context, key *meta.Key) error { - klog.V(5).Infof("GCEAlphaForwardingRules.Delete(%v, %v): called", ctx, key) +// Delete the Firewall referenced by key. +func (g *GCEFirewalls) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCEFirewalls.Delete(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEAlphaForwardingRules.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEFirewalls.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "ForwardingRules") + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Firewalls") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Delete", - Version: meta.Version("alpha"), - Service: "ForwardingRules", + Version: meta.Version("ga"), + Service: "Firewalls", } - klog.V(5).Infof("GCEAlphaForwardingRules.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEFirewalls.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEAlphaForwardingRules.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEFirewalls.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.Alpha.ForwardingRules.Delete(projectID, key.Region, key.Name) + call := g.s.GA.Firewalls.Delete(projectID, key.Name) + call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEAlphaForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEFirewalls.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEAlphaForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEFirewalls.Delete(%v, %v) = %v", ctx, key, err) return err } -// GlobalForwardingRules is an interface that allows for mocking of GlobalForwardingRules. -type GlobalForwardingRules interface { +// Update is a method on GCEFirewalls. +func (g *GCEFirewalls) Update(ctx context.Context, key *meta.Key, arg0 *ga.Firewall) error { + klog.V(5).Infof("GCEFirewalls.Update(%v, %v, ...): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEFirewalls.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Firewalls") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Update", + Version: meta.Version("ga"), + Service: "Firewalls", + } + klog.V(5).Infof("GCEFirewalls.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEFirewalls.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.GA.Firewalls.Update(projectID, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEFirewalls.Update(%v, %v, ...) = %+v", ctx, key, err) + return err + } + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEFirewalls.Update(%v, %v, ...) = %+v", ctx, key, err) + return err +} + +// ForwardingRules is an interface that allows for mocking of ForwardingRules. +type ForwardingRules interface { Get(ctx context.Context, key *meta.Key) (*ga.ForwardingRule, error) - List(ctx context.Context, fl *filter.F) ([]*ga.ForwardingRule, error) + List(ctx context.Context, region string, fl *filter.F) ([]*ga.ForwardingRule, error) Insert(ctx context.Context, key *meta.Key, obj *ga.ForwardingRule) error Delete(ctx context.Context, key *meta.Key) error SetTarget(context.Context, *meta.Key, *ga.TargetReference) error } -// NewMockGlobalForwardingRules returns a new mock for GlobalForwardingRules. -func NewMockGlobalForwardingRules(pr ProjectRouter, objs map[meta.Key]*MockGlobalForwardingRulesObj) *MockGlobalForwardingRules { - mock := &MockGlobalForwardingRules{ +// NewMockForwardingRules returns a new mock for ForwardingRules. +func NewMockForwardingRules(pr ProjectRouter, objs map[meta.Key]*MockForwardingRulesObj) *MockForwardingRules { + mock := &MockForwardingRules{ ProjectRouter: pr, Objects: objs, @@ -6552,14 +7785,14 @@ func NewMockGlobalForwardingRules(pr ProjectRouter, objs map[meta.Key]*MockGloba return mock } -// MockGlobalForwardingRules is the mock for GlobalForwardingRules. -type MockGlobalForwardingRules struct { +// MockForwardingRules is the mock for ForwardingRules. +type MockForwardingRules struct { Lock sync.Mutex ProjectRouter ProjectRouter // Objects maintained by the mock. - Objects map[meta.Key]*MockGlobalForwardingRulesObj + Objects map[meta.Key]*MockForwardingRulesObj // If an entry exists for the given key and operation, then the error // will be returned instead of the operation. @@ -6572,11 +7805,11 @@ type MockGlobalForwardingRules struct { // order to add your own logic. Return (true, _, _) to prevent the normal // execution flow of the mock. Return (false, nil, nil) to continue with // normal mock behavior/ after the hook function executes. - GetHook func(ctx context.Context, key *meta.Key, m *MockGlobalForwardingRules) (bool, *ga.ForwardingRule, error) - ListHook func(ctx context.Context, fl *filter.F, m *MockGlobalForwardingRules) (bool, []*ga.ForwardingRule, error) - InsertHook func(ctx context.Context, key *meta.Key, obj *ga.ForwardingRule, m *MockGlobalForwardingRules) (bool, error) - DeleteHook func(ctx context.Context, key *meta.Key, m *MockGlobalForwardingRules) (bool, error) - SetTargetHook func(context.Context, *meta.Key, *ga.TargetReference, *MockGlobalForwardingRules) error + GetHook func(ctx context.Context, key *meta.Key, m *MockForwardingRules) (bool, *ga.ForwardingRule, error) + ListHook func(ctx context.Context, region string, fl *filter.F, m *MockForwardingRules) (bool, []*ga.ForwardingRule, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *ga.ForwardingRule, m *MockForwardingRules) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockForwardingRules) (bool, error) + SetTargetHook func(context.Context, *meta.Key, *ga.TargetReference, *MockForwardingRules) error // X is extra state that can be used as part of the mock. Generated code // will not use this field. @@ -6584,10 +7817,10 @@ type MockGlobalForwardingRules struct { } // Get returns the object from the mock. -func (m *MockGlobalForwardingRules) Get(ctx context.Context, key *meta.Key) (*ga.ForwardingRule, error) { +func (m *MockForwardingRules) Get(ctx context.Context, key *meta.Key) (*ga.ForwardingRule, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - klog.V(5).Infof("MockGlobalForwardingRules.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockForwardingRules.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -6599,28 +7832,28 @@ func (m *MockGlobalForwardingRules) Get(ctx context.Context, key *meta.Key) (*ga defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - klog.V(5).Infof("MockGlobalForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - klog.V(5).Infof("MockGlobalForwardingRules.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockForwardingRules.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockGlobalForwardingRules %v not found", key), + Message: fmt.Sprintf("MockForwardingRules %v not found", key), } - klog.V(5).Infof("MockGlobalForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } -// List all of the objects in the mock. -func (m *MockGlobalForwardingRules) List(ctx context.Context, fl *filter.F) ([]*ga.ForwardingRule, error) { +// List all of the objects in the mock in the given region. +func (m *MockForwardingRules) List(ctx context.Context, region string, fl *filter.F) ([]*ga.ForwardingRule, error) { if m.ListHook != nil { - if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - klog.V(5).Infof("MockGlobalForwardingRules.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + if intercept, objs, err := m.ListHook(ctx, region, fl, m); intercept { + klog.V(5).Infof("MockForwardingRules.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) return objs, err } } @@ -6630,28 +7863,31 @@ func (m *MockGlobalForwardingRules) List(ctx context.Context, fl *filter.F) ([]* if m.ListError != nil { err := *m.ListError - klog.V(5).Infof("MockGlobalForwardingRules.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockForwardingRules.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) return nil, *m.ListError } var objs []*ga.ForwardingRule - for _, obj := range m.Objects { - if !fl.Match(obj.ToGA()) { - continue + for key, obj := range m.Objects { + if key.Region != region { + continue + } + if !fl.Match(obj.ToGA()) { + continue } objs = append(objs, obj.ToGA()) } - klog.V(5).Infof("MockGlobalForwardingRules.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockForwardingRules.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) return objs, nil } // Insert is a mock for inserting/creating a new object. -func (m *MockGlobalForwardingRules) Insert(ctx context.Context, key *meta.Key, obj *ga.ForwardingRule) error { +func (m *MockForwardingRules) Insert(ctx context.Context, key *meta.Key, obj *ga.ForwardingRule) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - klog.V(5).Infof("MockGlobalForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -6663,15 +7899,15 @@ func (m *MockGlobalForwardingRules) Insert(ctx context.Context, key *meta.Key, o defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - klog.V(5).Infof("MockGlobalForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { err := &googleapi.Error{ Code: http.StatusConflict, - Message: fmt.Sprintf("MockGlobalForwardingRules %v exists", key), + Message: fmt.Sprintf("MockForwardingRules %v exists", key), } - klog.V(5).Infof("MockGlobalForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -6679,16 +7915,16 @@ func (m *MockGlobalForwardingRules) Insert(ctx context.Context, key *meta.Key, o projectID := m.ProjectRouter.ProjectID(ctx, "ga", "forwardingRules") obj.SelfLink = SelfLink(meta.VersionGA, projectID, "forwardingRules", key) - m.Objects[*key] = &MockGlobalForwardingRulesObj{obj} - klog.V(5).Infof("MockGlobalForwardingRules.Insert(%v, %v, %+v) = nil", ctx, key, obj) + m.Objects[*key] = &MockForwardingRulesObj{obj} + klog.V(5).Infof("MockForwardingRules.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } // Delete is a mock for deleting the object. -func (m *MockGlobalForwardingRules) Delete(ctx context.Context, key *meta.Key) error { +func (m *MockForwardingRules) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - klog.V(5).Infof("MockGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -6700,223 +7936,222 @@ func (m *MockGlobalForwardingRules) Delete(ctx context.Context, key *meta.Key) e defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - klog.V(5).Infof("MockGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockGlobalForwardingRules %v not found", key), + Message: fmt.Sprintf("MockForwardingRules %v not found", key), } - klog.V(5).Infof("MockGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - klog.V(5).Infof("MockGlobalForwardingRules.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockForwardingRules.Delete(%v, %v) = nil", ctx, key) return nil } // Obj wraps the object for use in the mock. -func (m *MockGlobalForwardingRules) Obj(o *ga.ForwardingRule) *MockGlobalForwardingRulesObj { - return &MockGlobalForwardingRulesObj{o} +func (m *MockForwardingRules) Obj(o *ga.ForwardingRule) *MockForwardingRulesObj { + return &MockForwardingRulesObj{o} } // SetTarget is a mock for the corresponding method. -func (m *MockGlobalForwardingRules) SetTarget(ctx context.Context, key *meta.Key, arg0 *ga.TargetReference) error { +func (m *MockForwardingRules) SetTarget(ctx context.Context, key *meta.Key, arg0 *ga.TargetReference) error { if m.SetTargetHook != nil { return m.SetTargetHook(ctx, key, arg0, m) } return nil } -// GCEGlobalForwardingRules is a simplifying adapter for the GCE GlobalForwardingRules. -type GCEGlobalForwardingRules struct { +// GCEForwardingRules is a simplifying adapter for the GCE ForwardingRules. +type GCEForwardingRules struct { s *Service } // Get the ForwardingRule named by key. -func (g *GCEGlobalForwardingRules) Get(ctx context.Context, key *meta.Key) (*ga.ForwardingRule, error) { - klog.V(5).Infof("GCEGlobalForwardingRules.Get(%v, %v): called", ctx, key) +func (g *GCEForwardingRules) Get(ctx context.Context, key *meta.Key) (*ga.ForwardingRule, error) { + klog.V(5).Infof("GCEForwardingRules.Get(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEGlobalForwardingRules.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEForwardingRules.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalForwardingRules") + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "ForwardingRules") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Get", Version: meta.Version("ga"), - Service: "GlobalForwardingRules", + Service: "ForwardingRules", } - klog.V(5).Infof("GCEGlobalForwardingRules.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEForwardingRules.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEGlobalForwardingRules.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEForwardingRules.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } - call := g.s.GA.GlobalForwardingRules.Get(projectID, key.Name) + call := g.s.GA.ForwardingRules.Get(projectID, key.Region, key.Name) call.Context(ctx) v, err := call.Do() - klog.V(4).Infof("GCEGlobalForwardingRules.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEForwardingRules.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all ForwardingRule objects. -func (g *GCEGlobalForwardingRules) List(ctx context.Context, fl *filter.F) ([]*ga.ForwardingRule, error) { - klog.V(5).Infof("GCEGlobalForwardingRules.List(%v, %v) called", ctx, fl) - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalForwardingRules") +func (g *GCEForwardingRules) List(ctx context.Context, region string, fl *filter.F) ([]*ga.ForwardingRule, error) { + klog.V(5).Infof("GCEForwardingRules.List(%v, %v, %v) called", ctx, region, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "ForwardingRules") rk := &RateLimitKey{ ProjectID: projectID, Operation: "List", Version: meta.Version("ga"), - Service: "GlobalForwardingRules", + Service: "ForwardingRules", } if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - klog.V(5).Infof("GCEGlobalForwardingRules.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) - call := g.s.GA.GlobalForwardingRules.List(projectID) + klog.V(5).Infof("GCEForwardingRules.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) + call := g.s.GA.ForwardingRules.List(projectID, region) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.ForwardingRule f := func(l *ga.ForwardingRuleList) error { - klog.V(5).Infof("GCEGlobalForwardingRules.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEForwardingRules.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - klog.V(4).Infof("GCEGlobalForwardingRules.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEForwardingRules.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } if klog.V(4) { - klog.V(4).Infof("GCEGlobalForwardingRules.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + klog.V(4).Infof("GCEForwardingRules.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - klog.V(5).Infof("GCEGlobalForwardingRules.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEForwardingRules.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil } // Insert ForwardingRule with key of value obj. -func (g *GCEGlobalForwardingRules) Insert(ctx context.Context, key *meta.Key, obj *ga.ForwardingRule) error { - klog.V(5).Infof("GCEGlobalForwardingRules.Insert(%v, %v, %+v): called", ctx, key, obj) +func (g *GCEForwardingRules) Insert(ctx context.Context, key *meta.Key, obj *ga.ForwardingRule) error { + klog.V(5).Infof("GCEForwardingRules.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - klog.V(2).Infof("GCEGlobalForwardingRules.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEForwardingRules.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalForwardingRules") + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "ForwardingRules") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Insert", Version: meta.Version("ga"), - Service: "GlobalForwardingRules", + Service: "ForwardingRules", } - klog.V(5).Infof("GCEGlobalForwardingRules.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEForwardingRules.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEGlobalForwardingRules.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEForwardingRules.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name - call := g.s.GA.GlobalForwardingRules.Insert(projectID, obj) + call := g.s.GA.ForwardingRules.Insert(projectID, key.Region, obj) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEGlobalForwardingRules.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEForwardingRules.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEGlobalForwardingRules.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEForwardingRules.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the ForwardingRule referenced by key. -func (g *GCEGlobalForwardingRules) Delete(ctx context.Context, key *meta.Key) error { - klog.V(5).Infof("GCEGlobalForwardingRules.Delete(%v, %v): called", ctx, key) +func (g *GCEForwardingRules) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCEForwardingRules.Delete(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEGlobalForwardingRules.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEForwardingRules.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalForwardingRules") + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "ForwardingRules") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Delete", Version: meta.Version("ga"), - Service: "GlobalForwardingRules", + Service: "ForwardingRules", } - klog.V(5).Infof("GCEGlobalForwardingRules.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEForwardingRules.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEGlobalForwardingRules.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEForwardingRules.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.GA.GlobalForwardingRules.Delete(projectID, key.Name) - + call := g.s.GA.ForwardingRules.Delete(projectID, key.Region, key.Name) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } -// SetTarget is a method on GCEGlobalForwardingRules. -func (g *GCEGlobalForwardingRules) SetTarget(ctx context.Context, key *meta.Key, arg0 *ga.TargetReference) error { - klog.V(5).Infof("GCEGlobalForwardingRules.SetTarget(%v, %v, ...): called", ctx, key) +// SetTarget is a method on GCEForwardingRules. +func (g *GCEForwardingRules) SetTarget(ctx context.Context, key *meta.Key, arg0 *ga.TargetReference) error { + klog.V(5).Infof("GCEForwardingRules.SetTarget(%v, %v, ...): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEGlobalForwardingRules.SetTarget(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEForwardingRules.SetTarget(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalForwardingRules") + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "ForwardingRules") rk := &RateLimitKey{ ProjectID: projectID, Operation: "SetTarget", Version: meta.Version("ga"), - Service: "GlobalForwardingRules", + Service: "ForwardingRules", } - klog.V(5).Infof("GCEGlobalForwardingRules.SetTarget(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEForwardingRules.SetTarget(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEGlobalForwardingRules.SetTarget(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEForwardingRules.SetTarget(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.GA.GlobalForwardingRules.SetTarget(projectID, key.Name, arg0) + call := g.s.GA.ForwardingRules.SetTarget(projectID, key.Region, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEGlobalForwardingRules.SetTarget(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEForwardingRules.SetTarget(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEGlobalForwardingRules.SetTarget(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEForwardingRules.SetTarget(%v, %v, ...) = %+v", ctx, key, err) return err } -// HealthChecks is an interface that allows for mocking of HealthChecks. -type HealthChecks interface { - Get(ctx context.Context, key *meta.Key) (*ga.HealthCheck, error) - List(ctx context.Context, fl *filter.F) ([]*ga.HealthCheck, error) - Insert(ctx context.Context, key *meta.Key, obj *ga.HealthCheck) error +// AlphaForwardingRules is an interface that allows for mocking of ForwardingRules. +type AlphaForwardingRules interface { + Get(ctx context.Context, key *meta.Key) (*alpha.ForwardingRule, error) + List(ctx context.Context, region string, fl *filter.F) ([]*alpha.ForwardingRule, error) + Insert(ctx context.Context, key *meta.Key, obj *alpha.ForwardingRule) error Delete(ctx context.Context, key *meta.Key) error - Update(context.Context, *meta.Key, *ga.HealthCheck) error + SetTarget(context.Context, *meta.Key, *alpha.TargetReference) error } -// NewMockHealthChecks returns a new mock for HealthChecks. -func NewMockHealthChecks(pr ProjectRouter, objs map[meta.Key]*MockHealthChecksObj) *MockHealthChecks { - mock := &MockHealthChecks{ +// NewMockAlphaForwardingRules returns a new mock for ForwardingRules. +func NewMockAlphaForwardingRules(pr ProjectRouter, objs map[meta.Key]*MockForwardingRulesObj) *MockAlphaForwardingRules { + mock := &MockAlphaForwardingRules{ ProjectRouter: pr, Objects: objs, @@ -6927,14 +8162,14 @@ func NewMockHealthChecks(pr ProjectRouter, objs map[meta.Key]*MockHealthChecksOb return mock } -// MockHealthChecks is the mock for HealthChecks. -type MockHealthChecks struct { +// MockAlphaForwardingRules is the mock for ForwardingRules. +type MockAlphaForwardingRules struct { Lock sync.Mutex ProjectRouter ProjectRouter // Objects maintained by the mock. - Objects map[meta.Key]*MockHealthChecksObj + Objects map[meta.Key]*MockForwardingRulesObj // If an entry exists for the given key and operation, then the error // will be returned instead of the operation. @@ -6947,11 +8182,11 @@ type MockHealthChecks struct { // order to add your own logic. Return (true, _, _) to prevent the normal // execution flow of the mock. Return (false, nil, nil) to continue with // normal mock behavior/ after the hook function executes. - GetHook func(ctx context.Context, key *meta.Key, m *MockHealthChecks) (bool, *ga.HealthCheck, error) - ListHook func(ctx context.Context, fl *filter.F, m *MockHealthChecks) (bool, []*ga.HealthCheck, error) - InsertHook func(ctx context.Context, key *meta.Key, obj *ga.HealthCheck, m *MockHealthChecks) (bool, error) - DeleteHook func(ctx context.Context, key *meta.Key, m *MockHealthChecks) (bool, error) - UpdateHook func(context.Context, *meta.Key, *ga.HealthCheck, *MockHealthChecks) error + GetHook func(ctx context.Context, key *meta.Key, m *MockAlphaForwardingRules) (bool, *alpha.ForwardingRule, error) + ListHook func(ctx context.Context, region string, fl *filter.F, m *MockAlphaForwardingRules) (bool, []*alpha.ForwardingRule, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *alpha.ForwardingRule, m *MockAlphaForwardingRules) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockAlphaForwardingRules) (bool, error) + SetTargetHook func(context.Context, *meta.Key, *alpha.TargetReference, *MockAlphaForwardingRules) error // X is extra state that can be used as part of the mock. Generated code // will not use this field. @@ -6959,10 +8194,10 @@ type MockHealthChecks struct { } // Get returns the object from the mock. -func (m *MockHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.HealthCheck, error) { +func (m *MockAlphaForwardingRules) Get(ctx context.Context, key *meta.Key) (*alpha.ForwardingRule, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - klog.V(5).Infof("MockHealthChecks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaForwardingRules.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -6974,28 +8209,28 @@ func (m *MockHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.HealthCh defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - klog.V(5).Infof("MockHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { - typedObj := obj.ToGA() - klog.V(5).Infof("MockHealthChecks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + typedObj := obj.ToAlpha() + klog.V(5).Infof("MockAlphaForwardingRules.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockHealthChecks %v not found", key), + Message: fmt.Sprintf("MockAlphaForwardingRules %v not found", key), } - klog.V(5).Infof("MockHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } -// List all of the objects in the mock. -func (m *MockHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.HealthCheck, error) { +// List all of the objects in the mock in the given region. +func (m *MockAlphaForwardingRules) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.ForwardingRule, error) { if m.ListHook != nil { - if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - klog.V(5).Infof("MockHealthChecks.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + if intercept, objs, err := m.ListHook(ctx, region, fl, m); intercept { + klog.V(5).Infof("MockAlphaForwardingRules.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) return objs, err } } @@ -7005,28 +8240,31 @@ func (m *MockHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.Health if m.ListError != nil { err := *m.ListError - klog.V(5).Infof("MockHealthChecks.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockAlphaForwardingRules.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) return nil, *m.ListError } - var objs []*ga.HealthCheck - for _, obj := range m.Objects { - if !fl.Match(obj.ToGA()) { + var objs []*alpha.ForwardingRule + for key, obj := range m.Objects { + if key.Region != region { continue } - objs = append(objs, obj.ToGA()) + if !fl.Match(obj.ToAlpha()) { + continue + } + objs = append(objs, obj.ToAlpha()) } - klog.V(5).Infof("MockHealthChecks.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockAlphaForwardingRules.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) return objs, nil } // Insert is a mock for inserting/creating a new object. -func (m *MockHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *ga.HealthCheck) error { +func (m *MockAlphaForwardingRules) Insert(ctx context.Context, key *meta.Key, obj *alpha.ForwardingRule) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - klog.V(5).Infof("MockHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -7038,32 +8276,32 @@ func (m *MockHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *ga.He defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - klog.V(5).Infof("MockHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { err := &googleapi.Error{ Code: http.StatusConflict, - Message: fmt.Sprintf("MockHealthChecks %v exists", key), + Message: fmt.Sprintf("MockAlphaForwardingRules %v exists", key), } - klog.V(5).Infof("MockHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } obj.Name = key.Name - projectID := m.ProjectRouter.ProjectID(ctx, "ga", "healthChecks") - obj.SelfLink = SelfLink(meta.VersionGA, projectID, "healthChecks", key) + projectID := m.ProjectRouter.ProjectID(ctx, "alpha", "forwardingRules") + obj.SelfLink = SelfLink(meta.VersionAlpha, projectID, "forwardingRules", key) - m.Objects[*key] = &MockHealthChecksObj{obj} - klog.V(5).Infof("MockHealthChecks.Insert(%v, %v, %+v) = nil", ctx, key, obj) + m.Objects[*key] = &MockForwardingRulesObj{obj} + klog.V(5).Infof("MockAlphaForwardingRules.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } // Delete is a mock for deleting the object. -func (m *MockHealthChecks) Delete(ctx context.Context, key *meta.Key) error { +func (m *MockAlphaForwardingRules) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - klog.V(5).Infof("MockHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -7075,223 +8313,222 @@ func (m *MockHealthChecks) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - klog.V(5).Infof("MockHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockHealthChecks %v not found", key), + Message: fmt.Sprintf("MockAlphaForwardingRules %v not found", key), } - klog.V(5).Infof("MockHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - klog.V(5).Infof("MockHealthChecks.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockAlphaForwardingRules.Delete(%v, %v) = nil", ctx, key) return nil } // Obj wraps the object for use in the mock. -func (m *MockHealthChecks) Obj(o *ga.HealthCheck) *MockHealthChecksObj { - return &MockHealthChecksObj{o} +func (m *MockAlphaForwardingRules) Obj(o *alpha.ForwardingRule) *MockForwardingRulesObj { + return &MockForwardingRulesObj{o} } -// Update is a mock for the corresponding method. -func (m *MockHealthChecks) Update(ctx context.Context, key *meta.Key, arg0 *ga.HealthCheck) error { - if m.UpdateHook != nil { - return m.UpdateHook(ctx, key, arg0, m) +// SetTarget is a mock for the corresponding method. +func (m *MockAlphaForwardingRules) SetTarget(ctx context.Context, key *meta.Key, arg0 *alpha.TargetReference) error { + if m.SetTargetHook != nil { + return m.SetTargetHook(ctx, key, arg0, m) } return nil } -// GCEHealthChecks is a simplifying adapter for the GCE HealthChecks. -type GCEHealthChecks struct { +// GCEAlphaForwardingRules is a simplifying adapter for the GCE ForwardingRules. +type GCEAlphaForwardingRules struct { s *Service } -// Get the HealthCheck named by key. -func (g *GCEHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.HealthCheck, error) { - klog.V(5).Infof("GCEHealthChecks.Get(%v, %v): called", ctx, key) +// Get the ForwardingRule named by key. +func (g *GCEAlphaForwardingRules) Get(ctx context.Context, key *meta.Key) (*alpha.ForwardingRule, error) { + klog.V(5).Infof("GCEAlphaForwardingRules.Get(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEHealthChecks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaForwardingRules.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HealthChecks") + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "ForwardingRules") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Get", - Version: meta.Version("ga"), - Service: "HealthChecks", + Version: meta.Version("alpha"), + Service: "ForwardingRules", } - klog.V(5).Infof("GCEHealthChecks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaForwardingRules.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEHealthChecks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaForwardingRules.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } - call := g.s.GA.HealthChecks.Get(projectID, key.Name) + call := g.s.Alpha.ForwardingRules.Get(projectID, key.Region, key.Name) call.Context(ctx) v, err := call.Do() - klog.V(4).Infof("GCEHealthChecks.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEAlphaForwardingRules.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } -// List all HealthCheck objects. -func (g *GCEHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.HealthCheck, error) { - klog.V(5).Infof("GCEHealthChecks.List(%v, %v) called", ctx, fl) - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HealthChecks") +// List all ForwardingRule objects. +func (g *GCEAlphaForwardingRules) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.ForwardingRule, error) { + klog.V(5).Infof("GCEAlphaForwardingRules.List(%v, %v, %v) called", ctx, region, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "ForwardingRules") rk := &RateLimitKey{ ProjectID: projectID, Operation: "List", - Version: meta.Version("ga"), - Service: "HealthChecks", + Version: meta.Version("alpha"), + Service: "ForwardingRules", } if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - klog.V(5).Infof("GCEHealthChecks.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) - call := g.s.GA.HealthChecks.List(projectID) + klog.V(5).Infof("GCEAlphaForwardingRules.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) + call := g.s.Alpha.ForwardingRules.List(projectID, region) if fl != filter.None { call.Filter(fl.String()) } - var all []*ga.HealthCheck - f := func(l *ga.HealthCheckList) error { - klog.V(5).Infof("GCEHealthChecks.List(%v, ..., %v): page %+v", ctx, fl, l) + var all []*alpha.ForwardingRule + f := func(l *alpha.ForwardingRuleList) error { + klog.V(5).Infof("GCEAlphaForwardingRules.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - klog.V(4).Infof("GCEHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEAlphaForwardingRules.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } if klog.V(4) { - klog.V(4).Infof("GCEHealthChecks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + klog.V(4).Infof("GCEAlphaForwardingRules.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - klog.V(5).Infof("GCEHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEAlphaForwardingRules.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil } -// Insert HealthCheck with key of value obj. -func (g *GCEHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *ga.HealthCheck) error { - klog.V(5).Infof("GCEHealthChecks.Insert(%v, %v, %+v): called", ctx, key, obj) +// Insert ForwardingRule with key of value obj. +func (g *GCEAlphaForwardingRules) Insert(ctx context.Context, key *meta.Key, obj *alpha.ForwardingRule) error { + klog.V(5).Infof("GCEAlphaForwardingRules.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - klog.V(2).Infof("GCEHealthChecks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaForwardingRules.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HealthChecks") + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "ForwardingRules") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Insert", - Version: meta.Version("ga"), - Service: "HealthChecks", + Version: meta.Version("alpha"), + Service: "ForwardingRules", } - klog.V(5).Infof("GCEHealthChecks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaForwardingRules.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEHealthChecks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaForwardingRules.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name - call := g.s.GA.HealthChecks.Insert(projectID, obj) + call := g.s.Alpha.ForwardingRules.Insert(projectID, key.Region, obj) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEHealthChecks.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaForwardingRules.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEHealthChecks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEAlphaForwardingRules.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } -// Delete the HealthCheck referenced by key. -func (g *GCEHealthChecks) Delete(ctx context.Context, key *meta.Key) error { - klog.V(5).Infof("GCEHealthChecks.Delete(%v, %v): called", ctx, key) +// Delete the ForwardingRule referenced by key. +func (g *GCEAlphaForwardingRules) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCEAlphaForwardingRules.Delete(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEHealthChecks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaForwardingRules.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HealthChecks") + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "ForwardingRules") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Delete", - Version: meta.Version("ga"), - Service: "HealthChecks", + Version: meta.Version("alpha"), + Service: "ForwardingRules", } - klog.V(5).Infof("GCEHealthChecks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaForwardingRules.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEHealthChecks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaForwardingRules.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.GA.HealthChecks.Delete(projectID, key.Name) - + call := g.s.Alpha.ForwardingRules.Delete(projectID, key.Region, key.Name) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } -// Update is a method on GCEHealthChecks. -func (g *GCEHealthChecks) Update(ctx context.Context, key *meta.Key, arg0 *ga.HealthCheck) error { - klog.V(5).Infof("GCEHealthChecks.Update(%v, %v, ...): called", ctx, key) +// SetTarget is a method on GCEAlphaForwardingRules. +func (g *GCEAlphaForwardingRules) SetTarget(ctx context.Context, key *meta.Key, arg0 *alpha.TargetReference) error { + klog.V(5).Infof("GCEAlphaForwardingRules.SetTarget(%v, %v, ...): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEHealthChecks.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaForwardingRules.SetTarget(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HealthChecks") + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "ForwardingRules") rk := &RateLimitKey{ ProjectID: projectID, - Operation: "Update", - Version: meta.Version("ga"), - Service: "HealthChecks", + Operation: "SetTarget", + Version: meta.Version("alpha"), + Service: "ForwardingRules", } - klog.V(5).Infof("GCEHealthChecks.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaForwardingRules.SetTarget(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEHealthChecks.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaForwardingRules.SetTarget(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.GA.HealthChecks.Update(projectID, key.Name, arg0) + call := g.s.Alpha.ForwardingRules.SetTarget(projectID, key.Region, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaForwardingRules.SetTarget(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaForwardingRules.SetTarget(%v, %v, ...) = %+v", ctx, key, err) return err } -// AlphaHealthChecks is an interface that allows for mocking of HealthChecks. -type AlphaHealthChecks interface { - Get(ctx context.Context, key *meta.Key) (*alpha.HealthCheck, error) - List(ctx context.Context, fl *filter.F) ([]*alpha.HealthCheck, error) - Insert(ctx context.Context, key *meta.Key, obj *alpha.HealthCheck) error +// BetaForwardingRules is an interface that allows for mocking of ForwardingRules. +type BetaForwardingRules interface { + Get(ctx context.Context, key *meta.Key) (*beta.ForwardingRule, error) + List(ctx context.Context, region string, fl *filter.F) ([]*beta.ForwardingRule, error) + Insert(ctx context.Context, key *meta.Key, obj *beta.ForwardingRule) error Delete(ctx context.Context, key *meta.Key) error - Update(context.Context, *meta.Key, *alpha.HealthCheck) error + SetTarget(context.Context, *meta.Key, *beta.TargetReference) error } -// NewMockAlphaHealthChecks returns a new mock for HealthChecks. -func NewMockAlphaHealthChecks(pr ProjectRouter, objs map[meta.Key]*MockHealthChecksObj) *MockAlphaHealthChecks { - mock := &MockAlphaHealthChecks{ +// NewMockBetaForwardingRules returns a new mock for ForwardingRules. +func NewMockBetaForwardingRules(pr ProjectRouter, objs map[meta.Key]*MockForwardingRulesObj) *MockBetaForwardingRules { + mock := &MockBetaForwardingRules{ ProjectRouter: pr, Objects: objs, @@ -7302,14 +8539,14 @@ func NewMockAlphaHealthChecks(pr ProjectRouter, objs map[meta.Key]*MockHealthChe return mock } -// MockAlphaHealthChecks is the mock for HealthChecks. -type MockAlphaHealthChecks struct { +// MockBetaForwardingRules is the mock for ForwardingRules. +type MockBetaForwardingRules struct { Lock sync.Mutex ProjectRouter ProjectRouter // Objects maintained by the mock. - Objects map[meta.Key]*MockHealthChecksObj + Objects map[meta.Key]*MockForwardingRulesObj // If an entry exists for the given key and operation, then the error // will be returned instead of the operation. @@ -7322,11 +8559,11 @@ type MockAlphaHealthChecks struct { // order to add your own logic. Return (true, _, _) to prevent the normal // execution flow of the mock. Return (false, nil, nil) to continue with // normal mock behavior/ after the hook function executes. - GetHook func(ctx context.Context, key *meta.Key, m *MockAlphaHealthChecks) (bool, *alpha.HealthCheck, error) - ListHook func(ctx context.Context, fl *filter.F, m *MockAlphaHealthChecks) (bool, []*alpha.HealthCheck, error) - InsertHook func(ctx context.Context, key *meta.Key, obj *alpha.HealthCheck, m *MockAlphaHealthChecks) (bool, error) - DeleteHook func(ctx context.Context, key *meta.Key, m *MockAlphaHealthChecks) (bool, error) - UpdateHook func(context.Context, *meta.Key, *alpha.HealthCheck, *MockAlphaHealthChecks) error + GetHook func(ctx context.Context, key *meta.Key, m *MockBetaForwardingRules) (bool, *beta.ForwardingRule, error) + ListHook func(ctx context.Context, region string, fl *filter.F, m *MockBetaForwardingRules) (bool, []*beta.ForwardingRule, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *beta.ForwardingRule, m *MockBetaForwardingRules) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockBetaForwardingRules) (bool, error) + SetTargetHook func(context.Context, *meta.Key, *beta.TargetReference, *MockBetaForwardingRules) error // X is extra state that can be used as part of the mock. Generated code // will not use this field. @@ -7334,10 +8571,10 @@ type MockAlphaHealthChecks struct { } // Get returns the object from the mock. -func (m *MockAlphaHealthChecks) Get(ctx context.Context, key *meta.Key) (*alpha.HealthCheck, error) { +func (m *MockBetaForwardingRules) Get(ctx context.Context, key *meta.Key) (*beta.ForwardingRule, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - klog.V(5).Infof("MockAlphaHealthChecks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaForwardingRules.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -7349,28 +8586,28 @@ func (m *MockAlphaHealthChecks) Get(ctx context.Context, key *meta.Key) (*alpha. defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - klog.V(5).Infof("MockAlphaHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBetaForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { - typedObj := obj.ToAlpha() - klog.V(5).Infof("MockAlphaHealthChecks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + typedObj := obj.ToBeta() + klog.V(5).Infof("MockBetaForwardingRules.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockAlphaHealthChecks %v not found", key), + Message: fmt.Sprintf("MockBetaForwardingRules %v not found", key), } - klog.V(5).Infof("MockAlphaHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBetaForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } -// List all of the objects in the mock. -func (m *MockAlphaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*alpha.HealthCheck, error) { +// List all of the objects in the mock in the given region. +func (m *MockBetaForwardingRules) List(ctx context.Context, region string, fl *filter.F) ([]*beta.ForwardingRule, error) { if m.ListHook != nil { - if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - klog.V(5).Infof("MockAlphaHealthChecks.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + if intercept, objs, err := m.ListHook(ctx, region, fl, m); intercept { + klog.V(5).Infof("MockBetaForwardingRules.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) return objs, err } } @@ -7380,28 +8617,31 @@ func (m *MockAlphaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*alph if m.ListError != nil { err := *m.ListError - klog.V(5).Infof("MockAlphaHealthChecks.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockBetaForwardingRules.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) return nil, *m.ListError } - var objs []*alpha.HealthCheck - for _, obj := range m.Objects { - if !fl.Match(obj.ToAlpha()) { + var objs []*beta.ForwardingRule + for key, obj := range m.Objects { + if key.Region != region { continue } - objs = append(objs, obj.ToAlpha()) + if !fl.Match(obj.ToBeta()) { + continue + } + objs = append(objs, obj.ToBeta()) } - klog.V(5).Infof("MockAlphaHealthChecks.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockBetaForwardingRules.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) return objs, nil } // Insert is a mock for inserting/creating a new object. -func (m *MockAlphaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *alpha.HealthCheck) error { +func (m *MockBetaForwardingRules) Insert(ctx context.Context, key *meta.Key, obj *beta.ForwardingRule) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - klog.V(5).Infof("MockAlphaHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -7413,32 +8653,32 @@ func (m *MockAlphaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj * defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - klog.V(5).Infof("MockAlphaHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { err := &googleapi.Error{ Code: http.StatusConflict, - Message: fmt.Sprintf("MockAlphaHealthChecks %v exists", key), + Message: fmt.Sprintf("MockBetaForwardingRules %v exists", key), } - klog.V(5).Infof("MockAlphaHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } obj.Name = key.Name - projectID := m.ProjectRouter.ProjectID(ctx, "alpha", "healthChecks") - obj.SelfLink = SelfLink(meta.VersionAlpha, projectID, "healthChecks", key) + projectID := m.ProjectRouter.ProjectID(ctx, "beta", "forwardingRules") + obj.SelfLink = SelfLink(meta.VersionBeta, projectID, "forwardingRules", key) - m.Objects[*key] = &MockHealthChecksObj{obj} - klog.V(5).Infof("MockAlphaHealthChecks.Insert(%v, %v, %+v) = nil", ctx, key, obj) + m.Objects[*key] = &MockForwardingRulesObj{obj} + klog.V(5).Infof("MockBetaForwardingRules.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } // Delete is a mock for deleting the object. -func (m *MockAlphaHealthChecks) Delete(ctx context.Context, key *meta.Key) error { +func (m *MockBetaForwardingRules) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - klog.V(5).Infof("MockAlphaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -7450,223 +8690,222 @@ func (m *MockAlphaHealthChecks) Delete(ctx context.Context, key *meta.Key) error defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - klog.V(5).Infof("MockAlphaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockAlphaHealthChecks %v not found", key), + Message: fmt.Sprintf("MockBetaForwardingRules %v not found", key), } - klog.V(5).Infof("MockAlphaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - klog.V(5).Infof("MockAlphaHealthChecks.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockBetaForwardingRules.Delete(%v, %v) = nil", ctx, key) return nil } // Obj wraps the object for use in the mock. -func (m *MockAlphaHealthChecks) Obj(o *alpha.HealthCheck) *MockHealthChecksObj { - return &MockHealthChecksObj{o} +func (m *MockBetaForwardingRules) Obj(o *beta.ForwardingRule) *MockForwardingRulesObj { + return &MockForwardingRulesObj{o} } -// Update is a mock for the corresponding method. -func (m *MockAlphaHealthChecks) Update(ctx context.Context, key *meta.Key, arg0 *alpha.HealthCheck) error { - if m.UpdateHook != nil { - return m.UpdateHook(ctx, key, arg0, m) +// SetTarget is a mock for the corresponding method. +func (m *MockBetaForwardingRules) SetTarget(ctx context.Context, key *meta.Key, arg0 *beta.TargetReference) error { + if m.SetTargetHook != nil { + return m.SetTargetHook(ctx, key, arg0, m) } return nil } -// GCEAlphaHealthChecks is a simplifying adapter for the GCE HealthChecks. -type GCEAlphaHealthChecks struct { +// GCEBetaForwardingRules is a simplifying adapter for the GCE ForwardingRules. +type GCEBetaForwardingRules struct { s *Service } -// Get the HealthCheck named by key. -func (g *GCEAlphaHealthChecks) Get(ctx context.Context, key *meta.Key) (*alpha.HealthCheck, error) { - klog.V(5).Infof("GCEAlphaHealthChecks.Get(%v, %v): called", ctx, key) +// Get the ForwardingRule named by key. +func (g *GCEBetaForwardingRules) Get(ctx context.Context, key *meta.Key) (*beta.ForwardingRule, error) { + klog.V(5).Infof("GCEBetaForwardingRules.Get(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEAlphaHealthChecks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaForwardingRules.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "HealthChecks") + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "ForwardingRules") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Get", - Version: meta.Version("alpha"), - Service: "HealthChecks", + Version: meta.Version("beta"), + Service: "ForwardingRules", } - klog.V(5).Infof("GCEAlphaHealthChecks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaForwardingRules.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEAlphaHealthChecks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaForwardingRules.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } - call := g.s.Alpha.HealthChecks.Get(projectID, key.Name) + call := g.s.Beta.ForwardingRules.Get(projectID, key.Region, key.Name) call.Context(ctx) v, err := call.Do() - klog.V(4).Infof("GCEAlphaHealthChecks.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEBetaForwardingRules.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } -// List all HealthCheck objects. -func (g *GCEAlphaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*alpha.HealthCheck, error) { - klog.V(5).Infof("GCEAlphaHealthChecks.List(%v, %v) called", ctx, fl) - projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "HealthChecks") +// List all ForwardingRule objects. +func (g *GCEBetaForwardingRules) List(ctx context.Context, region string, fl *filter.F) ([]*beta.ForwardingRule, error) { + klog.V(5).Infof("GCEBetaForwardingRules.List(%v, %v, %v) called", ctx, region, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "ForwardingRules") rk := &RateLimitKey{ ProjectID: projectID, Operation: "List", - Version: meta.Version("alpha"), - Service: "HealthChecks", + Version: meta.Version("beta"), + Service: "ForwardingRules", } if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - klog.V(5).Infof("GCEAlphaHealthChecks.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) - call := g.s.Alpha.HealthChecks.List(projectID) + klog.V(5).Infof("GCEBetaForwardingRules.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) + call := g.s.Beta.ForwardingRules.List(projectID, region) if fl != filter.None { call.Filter(fl.String()) } - var all []*alpha.HealthCheck - f := func(l *alpha.HealthCheckList) error { - klog.V(5).Infof("GCEAlphaHealthChecks.List(%v, ..., %v): page %+v", ctx, fl, l) + var all []*beta.ForwardingRule + f := func(l *beta.ForwardingRuleList) error { + klog.V(5).Infof("GCEBetaForwardingRules.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - klog.V(4).Infof("GCEAlphaHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEBetaForwardingRules.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } if klog.V(4) { - klog.V(4).Infof("GCEAlphaHealthChecks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + klog.V(4).Infof("GCEBetaForwardingRules.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - klog.V(5).Infof("GCEAlphaHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEBetaForwardingRules.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil } -// Insert HealthCheck with key of value obj. -func (g *GCEAlphaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *alpha.HealthCheck) error { - klog.V(5).Infof("GCEAlphaHealthChecks.Insert(%v, %v, %+v): called", ctx, key, obj) +// Insert ForwardingRule with key of value obj. +func (g *GCEBetaForwardingRules) Insert(ctx context.Context, key *meta.Key, obj *beta.ForwardingRule) error { + klog.V(5).Infof("GCEBetaForwardingRules.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - klog.V(2).Infof("GCEAlphaHealthChecks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaForwardingRules.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "HealthChecks") + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "ForwardingRules") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Insert", - Version: meta.Version("alpha"), - Service: "HealthChecks", + Version: meta.Version("beta"), + Service: "ForwardingRules", } - klog.V(5).Infof("GCEAlphaHealthChecks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaForwardingRules.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEAlphaHealthChecks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaForwardingRules.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name - call := g.s.Alpha.HealthChecks.Insert(projectID, obj) + call := g.s.Beta.ForwardingRules.Insert(projectID, key.Region, obj) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEAlphaHealthChecks.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaForwardingRules.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEAlphaHealthChecks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEBetaForwardingRules.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } -// Delete the HealthCheck referenced by key. -func (g *GCEAlphaHealthChecks) Delete(ctx context.Context, key *meta.Key) error { - klog.V(5).Infof("GCEAlphaHealthChecks.Delete(%v, %v): called", ctx, key) +// Delete the ForwardingRule referenced by key. +func (g *GCEBetaForwardingRules) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCEBetaForwardingRules.Delete(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEAlphaHealthChecks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaForwardingRules.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "HealthChecks") + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "ForwardingRules") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Delete", - Version: meta.Version("alpha"), - Service: "HealthChecks", + Version: meta.Version("beta"), + Service: "ForwardingRules", } - klog.V(5).Infof("GCEAlphaHealthChecks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaForwardingRules.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEAlphaHealthChecks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaForwardingRules.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.Alpha.HealthChecks.Delete(projectID, key.Name) - + call := g.s.Beta.ForwardingRules.Delete(projectID, key.Region, key.Name) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEAlphaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBetaForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEAlphaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBetaForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } -// Update is a method on GCEAlphaHealthChecks. -func (g *GCEAlphaHealthChecks) Update(ctx context.Context, key *meta.Key, arg0 *alpha.HealthCheck) error { - klog.V(5).Infof("GCEAlphaHealthChecks.Update(%v, %v, ...): called", ctx, key) +// SetTarget is a method on GCEBetaForwardingRules. +func (g *GCEBetaForwardingRules) SetTarget(ctx context.Context, key *meta.Key, arg0 *beta.TargetReference) error { + klog.V(5).Infof("GCEBetaForwardingRules.SetTarget(%v, %v, ...): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEAlphaHealthChecks.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaForwardingRules.SetTarget(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "HealthChecks") + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "ForwardingRules") rk := &RateLimitKey{ ProjectID: projectID, - Operation: "Update", - Version: meta.Version("alpha"), - Service: "HealthChecks", + Operation: "SetTarget", + Version: meta.Version("beta"), + Service: "ForwardingRules", } - klog.V(5).Infof("GCEAlphaHealthChecks.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaForwardingRules.SetTarget(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEAlphaHealthChecks.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaForwardingRules.SetTarget(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.Alpha.HealthChecks.Update(projectID, key.Name, arg0) + call := g.s.Beta.ForwardingRules.SetTarget(projectID, key.Region, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEAlphaHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaForwardingRules.SetTarget(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEAlphaHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaForwardingRules.SetTarget(%v, %v, ...) = %+v", ctx, key, err) return err } -// BetaHealthChecks is an interface that allows for mocking of HealthChecks. -type BetaHealthChecks interface { - Get(ctx context.Context, key *meta.Key) (*beta.HealthCheck, error) - List(ctx context.Context, fl *filter.F) ([]*beta.HealthCheck, error) - Insert(ctx context.Context, key *meta.Key, obj *beta.HealthCheck) error +// AlphaGlobalForwardingRules is an interface that allows for mocking of GlobalForwardingRules. +type AlphaGlobalForwardingRules interface { + Get(ctx context.Context, key *meta.Key) (*alpha.ForwardingRule, error) + List(ctx context.Context, fl *filter.F) ([]*alpha.ForwardingRule, error) + Insert(ctx context.Context, key *meta.Key, obj *alpha.ForwardingRule) error Delete(ctx context.Context, key *meta.Key) error - Update(context.Context, *meta.Key, *beta.HealthCheck) error + SetTarget(context.Context, *meta.Key, *alpha.TargetReference) error } -// NewMockBetaHealthChecks returns a new mock for HealthChecks. -func NewMockBetaHealthChecks(pr ProjectRouter, objs map[meta.Key]*MockHealthChecksObj) *MockBetaHealthChecks { - mock := &MockBetaHealthChecks{ +// NewMockAlphaGlobalForwardingRules returns a new mock for GlobalForwardingRules. +func NewMockAlphaGlobalForwardingRules(pr ProjectRouter, objs map[meta.Key]*MockGlobalForwardingRulesObj) *MockAlphaGlobalForwardingRules { + mock := &MockAlphaGlobalForwardingRules{ ProjectRouter: pr, Objects: objs, @@ -7677,14 +8916,14 @@ func NewMockBetaHealthChecks(pr ProjectRouter, objs map[meta.Key]*MockHealthChec return mock } -// MockBetaHealthChecks is the mock for HealthChecks. -type MockBetaHealthChecks struct { +// MockAlphaGlobalForwardingRules is the mock for GlobalForwardingRules. +type MockAlphaGlobalForwardingRules struct { Lock sync.Mutex ProjectRouter ProjectRouter // Objects maintained by the mock. - Objects map[meta.Key]*MockHealthChecksObj + Objects map[meta.Key]*MockGlobalForwardingRulesObj // If an entry exists for the given key and operation, then the error // will be returned instead of the operation. @@ -7697,11 +8936,11 @@ type MockBetaHealthChecks struct { // order to add your own logic. Return (true, _, _) to prevent the normal // execution flow of the mock. Return (false, nil, nil) to continue with // normal mock behavior/ after the hook function executes. - GetHook func(ctx context.Context, key *meta.Key, m *MockBetaHealthChecks) (bool, *beta.HealthCheck, error) - ListHook func(ctx context.Context, fl *filter.F, m *MockBetaHealthChecks) (bool, []*beta.HealthCheck, error) - InsertHook func(ctx context.Context, key *meta.Key, obj *beta.HealthCheck, m *MockBetaHealthChecks) (bool, error) - DeleteHook func(ctx context.Context, key *meta.Key, m *MockBetaHealthChecks) (bool, error) - UpdateHook func(context.Context, *meta.Key, *beta.HealthCheck, *MockBetaHealthChecks) error + GetHook func(ctx context.Context, key *meta.Key, m *MockAlphaGlobalForwardingRules) (bool, *alpha.ForwardingRule, error) + ListHook func(ctx context.Context, fl *filter.F, m *MockAlphaGlobalForwardingRules) (bool, []*alpha.ForwardingRule, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *alpha.ForwardingRule, m *MockAlphaGlobalForwardingRules) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockAlphaGlobalForwardingRules) (bool, error) + SetTargetHook func(context.Context, *meta.Key, *alpha.TargetReference, *MockAlphaGlobalForwardingRules) error // X is extra state that can be used as part of the mock. Generated code // will not use this field. @@ -7709,10 +8948,10 @@ type MockBetaHealthChecks struct { } // Get returns the object from the mock. -func (m *MockBetaHealthChecks) Get(ctx context.Context, key *meta.Key) (*beta.HealthCheck, error) { +func (m *MockAlphaGlobalForwardingRules) Get(ctx context.Context, key *meta.Key) (*alpha.ForwardingRule, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - klog.V(5).Infof("MockBetaHealthChecks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaGlobalForwardingRules.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -7724,28 +8963,28 @@ func (m *MockBetaHealthChecks) Get(ctx context.Context, key *meta.Key) (*beta.He defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - klog.V(5).Infof("MockBetaHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaGlobalForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { - typedObj := obj.ToBeta() - klog.V(5).Infof("MockBetaHealthChecks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + typedObj := obj.ToAlpha() + klog.V(5).Infof("MockAlphaGlobalForwardingRules.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockBetaHealthChecks %v not found", key), + Message: fmt.Sprintf("MockAlphaGlobalForwardingRules %v not found", key), } - klog.V(5).Infof("MockBetaHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaGlobalForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } // List all of the objects in the mock. -func (m *MockBetaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*beta.HealthCheck, error) { +func (m *MockAlphaGlobalForwardingRules) List(ctx context.Context, fl *filter.F) ([]*alpha.ForwardingRule, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - klog.V(5).Infof("MockBetaHealthChecks.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockAlphaGlobalForwardingRules.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -7755,28 +8994,28 @@ func (m *MockBetaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*beta. if m.ListError != nil { err := *m.ListError - klog.V(5).Infof("MockBetaHealthChecks.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockAlphaGlobalForwardingRules.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } - var objs []*beta.HealthCheck + var objs []*alpha.ForwardingRule for _, obj := range m.Objects { - if !fl.Match(obj.ToBeta()) { + if !fl.Match(obj.ToAlpha()) { continue } - objs = append(objs, obj.ToBeta()) + objs = append(objs, obj.ToAlpha()) } - klog.V(5).Infof("MockBetaHealthChecks.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockAlphaGlobalForwardingRules.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } // Insert is a mock for inserting/creating a new object. -func (m *MockBetaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *beta.HealthCheck) error { +func (m *MockAlphaGlobalForwardingRules) Insert(ctx context.Context, key *meta.Key, obj *alpha.ForwardingRule) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - klog.V(5).Infof("MockBetaHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaGlobalForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -7788,32 +9027,32 @@ func (m *MockBetaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *b defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - klog.V(5).Infof("MockBetaHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaGlobalForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { err := &googleapi.Error{ Code: http.StatusConflict, - Message: fmt.Sprintf("MockBetaHealthChecks %v exists", key), + Message: fmt.Sprintf("MockAlphaGlobalForwardingRules %v exists", key), } - klog.V(5).Infof("MockBetaHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaGlobalForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } obj.Name = key.Name - projectID := m.ProjectRouter.ProjectID(ctx, "beta", "healthChecks") - obj.SelfLink = SelfLink(meta.VersionBeta, projectID, "healthChecks", key) + projectID := m.ProjectRouter.ProjectID(ctx, "alpha", "forwardingRules") + obj.SelfLink = SelfLink(meta.VersionAlpha, projectID, "forwardingRules", key) - m.Objects[*key] = &MockHealthChecksObj{obj} - klog.V(5).Infof("MockBetaHealthChecks.Insert(%v, %v, %+v) = nil", ctx, key, obj) + m.Objects[*key] = &MockGlobalForwardingRulesObj{obj} + klog.V(5).Infof("MockAlphaGlobalForwardingRules.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } // Delete is a mock for deleting the object. -func (m *MockBetaHealthChecks) Delete(ctx context.Context, key *meta.Key) error { +func (m *MockAlphaGlobalForwardingRules) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - klog.V(5).Infof("MockBetaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -7825,223 +9064,223 @@ func (m *MockBetaHealthChecks) Delete(ctx context.Context, key *meta.Key) error defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - klog.V(5).Infof("MockBetaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockBetaHealthChecks %v not found", key), + Message: fmt.Sprintf("MockAlphaGlobalForwardingRules %v not found", key), } - klog.V(5).Infof("MockBetaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - klog.V(5).Infof("MockBetaHealthChecks.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockAlphaGlobalForwardingRules.Delete(%v, %v) = nil", ctx, key) return nil } // Obj wraps the object for use in the mock. -func (m *MockBetaHealthChecks) Obj(o *beta.HealthCheck) *MockHealthChecksObj { - return &MockHealthChecksObj{o} +func (m *MockAlphaGlobalForwardingRules) Obj(o *alpha.ForwardingRule) *MockGlobalForwardingRulesObj { + return &MockGlobalForwardingRulesObj{o} } -// Update is a mock for the corresponding method. -func (m *MockBetaHealthChecks) Update(ctx context.Context, key *meta.Key, arg0 *beta.HealthCheck) error { - if m.UpdateHook != nil { - return m.UpdateHook(ctx, key, arg0, m) +// SetTarget is a mock for the corresponding method. +func (m *MockAlphaGlobalForwardingRules) SetTarget(ctx context.Context, key *meta.Key, arg0 *alpha.TargetReference) error { + if m.SetTargetHook != nil { + return m.SetTargetHook(ctx, key, arg0, m) } return nil } -// GCEBetaHealthChecks is a simplifying adapter for the GCE HealthChecks. -type GCEBetaHealthChecks struct { +// GCEAlphaGlobalForwardingRules is a simplifying adapter for the GCE GlobalForwardingRules. +type GCEAlphaGlobalForwardingRules struct { s *Service } -// Get the HealthCheck named by key. -func (g *GCEBetaHealthChecks) Get(ctx context.Context, key *meta.Key) (*beta.HealthCheck, error) { - klog.V(5).Infof("GCEBetaHealthChecks.Get(%v, %v): called", ctx, key) +// Get the ForwardingRule named by key. +func (g *GCEAlphaGlobalForwardingRules) Get(ctx context.Context, key *meta.Key) (*alpha.ForwardingRule, error) { + klog.V(5).Infof("GCEAlphaGlobalForwardingRules.Get(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEBetaHealthChecks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaGlobalForwardingRules.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "HealthChecks") + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "GlobalForwardingRules") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Get", - Version: meta.Version("beta"), - Service: "HealthChecks", + Version: meta.Version("alpha"), + Service: "GlobalForwardingRules", } - klog.V(5).Infof("GCEBetaHealthChecks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaGlobalForwardingRules.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEBetaHealthChecks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaGlobalForwardingRules.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } - call := g.s.Beta.HealthChecks.Get(projectID, key.Name) + call := g.s.Alpha.GlobalForwardingRules.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - klog.V(4).Infof("GCEBetaHealthChecks.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEAlphaGlobalForwardingRules.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } -// List all HealthCheck objects. -func (g *GCEBetaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*beta.HealthCheck, error) { - klog.V(5).Infof("GCEBetaHealthChecks.List(%v, %v) called", ctx, fl) - projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "HealthChecks") +// List all ForwardingRule objects. +func (g *GCEAlphaGlobalForwardingRules) List(ctx context.Context, fl *filter.F) ([]*alpha.ForwardingRule, error) { + klog.V(5).Infof("GCEAlphaGlobalForwardingRules.List(%v, %v) called", ctx, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "GlobalForwardingRules") rk := &RateLimitKey{ ProjectID: projectID, Operation: "List", - Version: meta.Version("beta"), - Service: "HealthChecks", + Version: meta.Version("alpha"), + Service: "GlobalForwardingRules", } if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - klog.V(5).Infof("GCEBetaHealthChecks.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) - call := g.s.Beta.HealthChecks.List(projectID) + klog.V(5).Infof("GCEAlphaGlobalForwardingRules.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + call := g.s.Alpha.GlobalForwardingRules.List(projectID) if fl != filter.None { call.Filter(fl.String()) } - var all []*beta.HealthCheck - f := func(l *beta.HealthCheckList) error { - klog.V(5).Infof("GCEBetaHealthChecks.List(%v, ..., %v): page %+v", ctx, fl, l) + var all []*alpha.ForwardingRule + f := func(l *alpha.ForwardingRuleList) error { + klog.V(5).Infof("GCEAlphaGlobalForwardingRules.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - klog.V(4).Infof("GCEBetaHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEAlphaGlobalForwardingRules.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } if klog.V(4) { - klog.V(4).Infof("GCEBetaHealthChecks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + klog.V(4).Infof("GCEAlphaGlobalForwardingRules.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - klog.V(5).Infof("GCEBetaHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEAlphaGlobalForwardingRules.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil } -// Insert HealthCheck with key of value obj. -func (g *GCEBetaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *beta.HealthCheck) error { - klog.V(5).Infof("GCEBetaHealthChecks.Insert(%v, %v, %+v): called", ctx, key, obj) +// Insert ForwardingRule with key of value obj. +func (g *GCEAlphaGlobalForwardingRules) Insert(ctx context.Context, key *meta.Key, obj *alpha.ForwardingRule) error { + klog.V(5).Infof("GCEAlphaGlobalForwardingRules.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - klog.V(2).Infof("GCEBetaHealthChecks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaGlobalForwardingRules.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "HealthChecks") + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "GlobalForwardingRules") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Insert", - Version: meta.Version("beta"), - Service: "HealthChecks", + Version: meta.Version("alpha"), + Service: "GlobalForwardingRules", } - klog.V(5).Infof("GCEBetaHealthChecks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaGlobalForwardingRules.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEBetaHealthChecks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaGlobalForwardingRules.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name - call := g.s.Beta.HealthChecks.Insert(projectID, obj) + call := g.s.Alpha.GlobalForwardingRules.Insert(projectID, obj) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEBetaHealthChecks.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaGlobalForwardingRules.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEBetaHealthChecks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEAlphaGlobalForwardingRules.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } -// Delete the HealthCheck referenced by key. -func (g *GCEBetaHealthChecks) Delete(ctx context.Context, key *meta.Key) error { - klog.V(5).Infof("GCEBetaHealthChecks.Delete(%v, %v): called", ctx, key) +// Delete the ForwardingRule referenced by key. +func (g *GCEAlphaGlobalForwardingRules) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCEAlphaGlobalForwardingRules.Delete(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEBetaHealthChecks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaGlobalForwardingRules.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "HealthChecks") + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "GlobalForwardingRules") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Delete", - Version: meta.Version("beta"), - Service: "HealthChecks", + Version: meta.Version("alpha"), + Service: "GlobalForwardingRules", } - klog.V(5).Infof("GCEBetaHealthChecks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaGlobalForwardingRules.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEBetaHealthChecks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaGlobalForwardingRules.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.Beta.HealthChecks.Delete(projectID, key.Name) + call := g.s.Alpha.GlobalForwardingRules.Delete(projectID, key.Name) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEBetaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEBetaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } -// Update is a method on GCEBetaHealthChecks. -func (g *GCEBetaHealthChecks) Update(ctx context.Context, key *meta.Key, arg0 *beta.HealthCheck) error { - klog.V(5).Infof("GCEBetaHealthChecks.Update(%v, %v, ...): called", ctx, key) +// SetTarget is a method on GCEAlphaGlobalForwardingRules. +func (g *GCEAlphaGlobalForwardingRules) SetTarget(ctx context.Context, key *meta.Key, arg0 *alpha.TargetReference) error { + klog.V(5).Infof("GCEAlphaGlobalForwardingRules.SetTarget(%v, %v, ...): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEBetaHealthChecks.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaGlobalForwardingRules.SetTarget(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "HealthChecks") + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "GlobalForwardingRules") rk := &RateLimitKey{ ProjectID: projectID, - Operation: "Update", - Version: meta.Version("beta"), - Service: "HealthChecks", + Operation: "SetTarget", + Version: meta.Version("alpha"), + Service: "GlobalForwardingRules", } - klog.V(5).Infof("GCEBetaHealthChecks.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaGlobalForwardingRules.SetTarget(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEBetaHealthChecks.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaGlobalForwardingRules.SetTarget(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.Beta.HealthChecks.Update(projectID, key.Name, arg0) + call := g.s.Alpha.GlobalForwardingRules.SetTarget(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEBetaHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaGlobalForwardingRules.SetTarget(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEBetaHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaGlobalForwardingRules.SetTarget(%v, %v, ...) = %+v", ctx, key, err) return err } -// HttpHealthChecks is an interface that allows for mocking of HttpHealthChecks. -type HttpHealthChecks interface { - Get(ctx context.Context, key *meta.Key) (*ga.HttpHealthCheck, error) - List(ctx context.Context, fl *filter.F) ([]*ga.HttpHealthCheck, error) - Insert(ctx context.Context, key *meta.Key, obj *ga.HttpHealthCheck) error +// BetaGlobalForwardingRules is an interface that allows for mocking of GlobalForwardingRules. +type BetaGlobalForwardingRules interface { + Get(ctx context.Context, key *meta.Key) (*beta.ForwardingRule, error) + List(ctx context.Context, fl *filter.F) ([]*beta.ForwardingRule, error) + Insert(ctx context.Context, key *meta.Key, obj *beta.ForwardingRule) error Delete(ctx context.Context, key *meta.Key) error - Update(context.Context, *meta.Key, *ga.HttpHealthCheck) error + SetTarget(context.Context, *meta.Key, *beta.TargetReference) error } -// NewMockHttpHealthChecks returns a new mock for HttpHealthChecks. -func NewMockHttpHealthChecks(pr ProjectRouter, objs map[meta.Key]*MockHttpHealthChecksObj) *MockHttpHealthChecks { - mock := &MockHttpHealthChecks{ +// NewMockBetaGlobalForwardingRules returns a new mock for GlobalForwardingRules. +func NewMockBetaGlobalForwardingRules(pr ProjectRouter, objs map[meta.Key]*MockGlobalForwardingRulesObj) *MockBetaGlobalForwardingRules { + mock := &MockBetaGlobalForwardingRules{ ProjectRouter: pr, Objects: objs, @@ -8052,14 +9291,14 @@ func NewMockHttpHealthChecks(pr ProjectRouter, objs map[meta.Key]*MockHttpHealth return mock } -// MockHttpHealthChecks is the mock for HttpHealthChecks. -type MockHttpHealthChecks struct { +// MockBetaGlobalForwardingRules is the mock for GlobalForwardingRules. +type MockBetaGlobalForwardingRules struct { Lock sync.Mutex ProjectRouter ProjectRouter // Objects maintained by the mock. - Objects map[meta.Key]*MockHttpHealthChecksObj + Objects map[meta.Key]*MockGlobalForwardingRulesObj // If an entry exists for the given key and operation, then the error // will be returned instead of the operation. @@ -8072,11 +9311,11 @@ type MockHttpHealthChecks struct { // order to add your own logic. Return (true, _, _) to prevent the normal // execution flow of the mock. Return (false, nil, nil) to continue with // normal mock behavior/ after the hook function executes. - GetHook func(ctx context.Context, key *meta.Key, m *MockHttpHealthChecks) (bool, *ga.HttpHealthCheck, error) - ListHook func(ctx context.Context, fl *filter.F, m *MockHttpHealthChecks) (bool, []*ga.HttpHealthCheck, error) - InsertHook func(ctx context.Context, key *meta.Key, obj *ga.HttpHealthCheck, m *MockHttpHealthChecks) (bool, error) - DeleteHook func(ctx context.Context, key *meta.Key, m *MockHttpHealthChecks) (bool, error) - UpdateHook func(context.Context, *meta.Key, *ga.HttpHealthCheck, *MockHttpHealthChecks) error + GetHook func(ctx context.Context, key *meta.Key, m *MockBetaGlobalForwardingRules) (bool, *beta.ForwardingRule, error) + ListHook func(ctx context.Context, fl *filter.F, m *MockBetaGlobalForwardingRules) (bool, []*beta.ForwardingRule, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *beta.ForwardingRule, m *MockBetaGlobalForwardingRules) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockBetaGlobalForwardingRules) (bool, error) + SetTargetHook func(context.Context, *meta.Key, *beta.TargetReference, *MockBetaGlobalForwardingRules) error // X is extra state that can be used as part of the mock. Generated code // will not use this field. @@ -8084,10 +9323,10 @@ type MockHttpHealthChecks struct { } // Get returns the object from the mock. -func (m *MockHttpHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.HttpHealthCheck, error) { +func (m *MockBetaGlobalForwardingRules) Get(ctx context.Context, key *meta.Key) (*beta.ForwardingRule, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - klog.V(5).Infof("MockHttpHealthChecks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaGlobalForwardingRules.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -8099,28 +9338,28 @@ func (m *MockHttpHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.Http defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - klog.V(5).Infof("MockHttpHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBetaGlobalForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { - typedObj := obj.ToGA() - klog.V(5).Infof("MockHttpHealthChecks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + typedObj := obj.ToBeta() + klog.V(5).Infof("MockBetaGlobalForwardingRules.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockHttpHealthChecks %v not found", key), + Message: fmt.Sprintf("MockBetaGlobalForwardingRules %v not found", key), } - klog.V(5).Infof("MockHttpHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBetaGlobalForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } // List all of the objects in the mock. -func (m *MockHttpHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.HttpHealthCheck, error) { +func (m *MockBetaGlobalForwardingRules) List(ctx context.Context, fl *filter.F) ([]*beta.ForwardingRule, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - klog.V(5).Infof("MockHttpHealthChecks.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockBetaGlobalForwardingRules.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -8130,28 +9369,28 @@ func (m *MockHttpHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.Ht if m.ListError != nil { err := *m.ListError - klog.V(5).Infof("MockHttpHealthChecks.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockBetaGlobalForwardingRules.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } - var objs []*ga.HttpHealthCheck + var objs []*beta.ForwardingRule for _, obj := range m.Objects { - if !fl.Match(obj.ToGA()) { + if !fl.Match(obj.ToBeta()) { continue } - objs = append(objs, obj.ToGA()) + objs = append(objs, obj.ToBeta()) } - klog.V(5).Infof("MockHttpHealthChecks.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockBetaGlobalForwardingRules.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } // Insert is a mock for inserting/creating a new object. -func (m *MockHttpHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *ga.HttpHealthCheck) error { +func (m *MockBetaGlobalForwardingRules) Insert(ctx context.Context, key *meta.Key, obj *beta.ForwardingRule) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - klog.V(5).Infof("MockHttpHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaGlobalForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -8163,32 +9402,32 @@ func (m *MockHttpHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *g defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - klog.V(5).Infof("MockHttpHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaGlobalForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { err := &googleapi.Error{ Code: http.StatusConflict, - Message: fmt.Sprintf("MockHttpHealthChecks %v exists", key), + Message: fmt.Sprintf("MockBetaGlobalForwardingRules %v exists", key), } - klog.V(5).Infof("MockHttpHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaGlobalForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } obj.Name = key.Name - projectID := m.ProjectRouter.ProjectID(ctx, "ga", "httpHealthChecks") - obj.SelfLink = SelfLink(meta.VersionGA, projectID, "httpHealthChecks", key) + projectID := m.ProjectRouter.ProjectID(ctx, "beta", "forwardingRules") + obj.SelfLink = SelfLink(meta.VersionBeta, projectID, "forwardingRules", key) - m.Objects[*key] = &MockHttpHealthChecksObj{obj} - klog.V(5).Infof("MockHttpHealthChecks.Insert(%v, %v, %+v) = nil", ctx, key, obj) + m.Objects[*key] = &MockGlobalForwardingRulesObj{obj} + klog.V(5).Infof("MockBetaGlobalForwardingRules.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } // Delete is a mock for deleting the object. -func (m *MockHttpHealthChecks) Delete(ctx context.Context, key *meta.Key) error { +func (m *MockBetaGlobalForwardingRules) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - klog.V(5).Infof("MockHttpHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -8200,223 +9439,223 @@ func (m *MockHttpHealthChecks) Delete(ctx context.Context, key *meta.Key) error defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - klog.V(5).Infof("MockHttpHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockHttpHealthChecks %v not found", key), + Message: fmt.Sprintf("MockBetaGlobalForwardingRules %v not found", key), } - klog.V(5).Infof("MockHttpHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - klog.V(5).Infof("MockHttpHealthChecks.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockBetaGlobalForwardingRules.Delete(%v, %v) = nil", ctx, key) return nil } // Obj wraps the object for use in the mock. -func (m *MockHttpHealthChecks) Obj(o *ga.HttpHealthCheck) *MockHttpHealthChecksObj { - return &MockHttpHealthChecksObj{o} +func (m *MockBetaGlobalForwardingRules) Obj(o *beta.ForwardingRule) *MockGlobalForwardingRulesObj { + return &MockGlobalForwardingRulesObj{o} } -// Update is a mock for the corresponding method. -func (m *MockHttpHealthChecks) Update(ctx context.Context, key *meta.Key, arg0 *ga.HttpHealthCheck) error { - if m.UpdateHook != nil { - return m.UpdateHook(ctx, key, arg0, m) +// SetTarget is a mock for the corresponding method. +func (m *MockBetaGlobalForwardingRules) SetTarget(ctx context.Context, key *meta.Key, arg0 *beta.TargetReference) error { + if m.SetTargetHook != nil { + return m.SetTargetHook(ctx, key, arg0, m) } return nil } -// GCEHttpHealthChecks is a simplifying adapter for the GCE HttpHealthChecks. -type GCEHttpHealthChecks struct { +// GCEBetaGlobalForwardingRules is a simplifying adapter for the GCE GlobalForwardingRules. +type GCEBetaGlobalForwardingRules struct { s *Service } -// Get the HttpHealthCheck named by key. -func (g *GCEHttpHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.HttpHealthCheck, error) { - klog.V(5).Infof("GCEHttpHealthChecks.Get(%v, %v): called", ctx, key) +// Get the ForwardingRule named by key. +func (g *GCEBetaGlobalForwardingRules) Get(ctx context.Context, key *meta.Key) (*beta.ForwardingRule, error) { + klog.V(5).Infof("GCEBetaGlobalForwardingRules.Get(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEHttpHealthChecks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaGlobalForwardingRules.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpHealthChecks") + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "GlobalForwardingRules") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Get", - Version: meta.Version("ga"), - Service: "HttpHealthChecks", + Version: meta.Version("beta"), + Service: "GlobalForwardingRules", } - klog.V(5).Infof("GCEHttpHealthChecks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaGlobalForwardingRules.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEHttpHealthChecks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaGlobalForwardingRules.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } - call := g.s.GA.HttpHealthChecks.Get(projectID, key.Name) + call := g.s.Beta.GlobalForwardingRules.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - klog.V(4).Infof("GCEHttpHealthChecks.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEBetaGlobalForwardingRules.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } -// List all HttpHealthCheck objects. -func (g *GCEHttpHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.HttpHealthCheck, error) { - klog.V(5).Infof("GCEHttpHealthChecks.List(%v, %v) called", ctx, fl) - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpHealthChecks") +// List all ForwardingRule objects. +func (g *GCEBetaGlobalForwardingRules) List(ctx context.Context, fl *filter.F) ([]*beta.ForwardingRule, error) { + klog.V(5).Infof("GCEBetaGlobalForwardingRules.List(%v, %v) called", ctx, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "GlobalForwardingRules") rk := &RateLimitKey{ ProjectID: projectID, Operation: "List", - Version: meta.Version("ga"), - Service: "HttpHealthChecks", + Version: meta.Version("beta"), + Service: "GlobalForwardingRules", } if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - klog.V(5).Infof("GCEHttpHealthChecks.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) - call := g.s.GA.HttpHealthChecks.List(projectID) + klog.V(5).Infof("GCEBetaGlobalForwardingRules.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + call := g.s.Beta.GlobalForwardingRules.List(projectID) if fl != filter.None { call.Filter(fl.String()) } - var all []*ga.HttpHealthCheck - f := func(l *ga.HttpHealthCheckList) error { - klog.V(5).Infof("GCEHttpHealthChecks.List(%v, ..., %v): page %+v", ctx, fl, l) + var all []*beta.ForwardingRule + f := func(l *beta.ForwardingRuleList) error { + klog.V(5).Infof("GCEBetaGlobalForwardingRules.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - klog.V(4).Infof("GCEHttpHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEBetaGlobalForwardingRules.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } if klog.V(4) { - klog.V(4).Infof("GCEHttpHealthChecks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + klog.V(4).Infof("GCEBetaGlobalForwardingRules.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - klog.V(5).Infof("GCEHttpHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEBetaGlobalForwardingRules.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil } -// Insert HttpHealthCheck with key of value obj. -func (g *GCEHttpHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *ga.HttpHealthCheck) error { - klog.V(5).Infof("GCEHttpHealthChecks.Insert(%v, %v, %+v): called", ctx, key, obj) +// Insert ForwardingRule with key of value obj. +func (g *GCEBetaGlobalForwardingRules) Insert(ctx context.Context, key *meta.Key, obj *beta.ForwardingRule) error { + klog.V(5).Infof("GCEBetaGlobalForwardingRules.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - klog.V(2).Infof("GCEHttpHealthChecks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaGlobalForwardingRules.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpHealthChecks") + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "GlobalForwardingRules") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Insert", - Version: meta.Version("ga"), - Service: "HttpHealthChecks", + Version: meta.Version("beta"), + Service: "GlobalForwardingRules", } - klog.V(5).Infof("GCEHttpHealthChecks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaGlobalForwardingRules.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEHttpHealthChecks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaGlobalForwardingRules.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name - call := g.s.GA.HttpHealthChecks.Insert(projectID, obj) + call := g.s.Beta.GlobalForwardingRules.Insert(projectID, obj) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEHttpHealthChecks.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaGlobalForwardingRules.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEHttpHealthChecks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEBetaGlobalForwardingRules.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } -// Delete the HttpHealthCheck referenced by key. -func (g *GCEHttpHealthChecks) Delete(ctx context.Context, key *meta.Key) error { - klog.V(5).Infof("GCEHttpHealthChecks.Delete(%v, %v): called", ctx, key) +// Delete the ForwardingRule referenced by key. +func (g *GCEBetaGlobalForwardingRules) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCEBetaGlobalForwardingRules.Delete(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEHttpHealthChecks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaGlobalForwardingRules.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpHealthChecks") + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "GlobalForwardingRules") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Delete", - Version: meta.Version("ga"), - Service: "HttpHealthChecks", + Version: meta.Version("beta"), + Service: "GlobalForwardingRules", } - klog.V(5).Infof("GCEHttpHealthChecks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaGlobalForwardingRules.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEHttpHealthChecks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaGlobalForwardingRules.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.GA.HttpHealthChecks.Delete(projectID, key.Name) + call := g.s.Beta.GlobalForwardingRules.Delete(projectID, key.Name) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEHttpHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBetaGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEHttpHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBetaGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } -// Update is a method on GCEHttpHealthChecks. -func (g *GCEHttpHealthChecks) Update(ctx context.Context, key *meta.Key, arg0 *ga.HttpHealthCheck) error { - klog.V(5).Infof("GCEHttpHealthChecks.Update(%v, %v, ...): called", ctx, key) +// SetTarget is a method on GCEBetaGlobalForwardingRules. +func (g *GCEBetaGlobalForwardingRules) SetTarget(ctx context.Context, key *meta.Key, arg0 *beta.TargetReference) error { + klog.V(5).Infof("GCEBetaGlobalForwardingRules.SetTarget(%v, %v, ...): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEHttpHealthChecks.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaGlobalForwardingRules.SetTarget(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpHealthChecks") + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "GlobalForwardingRules") rk := &RateLimitKey{ ProjectID: projectID, - Operation: "Update", - Version: meta.Version("ga"), - Service: "HttpHealthChecks", + Operation: "SetTarget", + Version: meta.Version("beta"), + Service: "GlobalForwardingRules", } - klog.V(5).Infof("GCEHttpHealthChecks.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaGlobalForwardingRules.SetTarget(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEHttpHealthChecks.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaGlobalForwardingRules.SetTarget(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.GA.HttpHealthChecks.Update(projectID, key.Name, arg0) + call := g.s.Beta.GlobalForwardingRules.SetTarget(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEHttpHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaGlobalForwardingRules.SetTarget(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEHttpHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaGlobalForwardingRules.SetTarget(%v, %v, ...) = %+v", ctx, key, err) return err } -// HttpsHealthChecks is an interface that allows for mocking of HttpsHealthChecks. -type HttpsHealthChecks interface { - Get(ctx context.Context, key *meta.Key) (*ga.HttpsHealthCheck, error) - List(ctx context.Context, fl *filter.F) ([]*ga.HttpsHealthCheck, error) - Insert(ctx context.Context, key *meta.Key, obj *ga.HttpsHealthCheck) error +// GlobalForwardingRules is an interface that allows for mocking of GlobalForwardingRules. +type GlobalForwardingRules interface { + Get(ctx context.Context, key *meta.Key) (*ga.ForwardingRule, error) + List(ctx context.Context, fl *filter.F) ([]*ga.ForwardingRule, error) + Insert(ctx context.Context, key *meta.Key, obj *ga.ForwardingRule) error Delete(ctx context.Context, key *meta.Key) error - Update(context.Context, *meta.Key, *ga.HttpsHealthCheck) error + SetTarget(context.Context, *meta.Key, *ga.TargetReference) error } -// NewMockHttpsHealthChecks returns a new mock for HttpsHealthChecks. -func NewMockHttpsHealthChecks(pr ProjectRouter, objs map[meta.Key]*MockHttpsHealthChecksObj) *MockHttpsHealthChecks { - mock := &MockHttpsHealthChecks{ +// NewMockGlobalForwardingRules returns a new mock for GlobalForwardingRules. +func NewMockGlobalForwardingRules(pr ProjectRouter, objs map[meta.Key]*MockGlobalForwardingRulesObj) *MockGlobalForwardingRules { + mock := &MockGlobalForwardingRules{ ProjectRouter: pr, Objects: objs, @@ -8427,14 +9666,14 @@ func NewMockHttpsHealthChecks(pr ProjectRouter, objs map[meta.Key]*MockHttpsHeal return mock } -// MockHttpsHealthChecks is the mock for HttpsHealthChecks. -type MockHttpsHealthChecks struct { +// MockGlobalForwardingRules is the mock for GlobalForwardingRules. +type MockGlobalForwardingRules struct { Lock sync.Mutex ProjectRouter ProjectRouter // Objects maintained by the mock. - Objects map[meta.Key]*MockHttpsHealthChecksObj + Objects map[meta.Key]*MockGlobalForwardingRulesObj // If an entry exists for the given key and operation, then the error // will be returned instead of the operation. @@ -8447,11 +9686,11 @@ type MockHttpsHealthChecks struct { // order to add your own logic. Return (true, _, _) to prevent the normal // execution flow of the mock. Return (false, nil, nil) to continue with // normal mock behavior/ after the hook function executes. - GetHook func(ctx context.Context, key *meta.Key, m *MockHttpsHealthChecks) (bool, *ga.HttpsHealthCheck, error) - ListHook func(ctx context.Context, fl *filter.F, m *MockHttpsHealthChecks) (bool, []*ga.HttpsHealthCheck, error) - InsertHook func(ctx context.Context, key *meta.Key, obj *ga.HttpsHealthCheck, m *MockHttpsHealthChecks) (bool, error) - DeleteHook func(ctx context.Context, key *meta.Key, m *MockHttpsHealthChecks) (bool, error) - UpdateHook func(context.Context, *meta.Key, *ga.HttpsHealthCheck, *MockHttpsHealthChecks) error + GetHook func(ctx context.Context, key *meta.Key, m *MockGlobalForwardingRules) (bool, *ga.ForwardingRule, error) + ListHook func(ctx context.Context, fl *filter.F, m *MockGlobalForwardingRules) (bool, []*ga.ForwardingRule, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *ga.ForwardingRule, m *MockGlobalForwardingRules) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockGlobalForwardingRules) (bool, error) + SetTargetHook func(context.Context, *meta.Key, *ga.TargetReference, *MockGlobalForwardingRules) error // X is extra state that can be used as part of the mock. Generated code // will not use this field. @@ -8459,10 +9698,10 @@ type MockHttpsHealthChecks struct { } // Get returns the object from the mock. -func (m *MockHttpsHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.HttpsHealthCheck, error) { +func (m *MockGlobalForwardingRules) Get(ctx context.Context, key *meta.Key) (*ga.ForwardingRule, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - klog.V(5).Infof("MockHttpsHealthChecks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockGlobalForwardingRules.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -8474,28 +9713,28 @@ func (m *MockHttpsHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.Htt defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - klog.V(5).Infof("MockHttpsHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockGlobalForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - klog.V(5).Infof("MockHttpsHealthChecks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockGlobalForwardingRules.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockHttpsHealthChecks %v not found", key), + Message: fmt.Sprintf("MockGlobalForwardingRules %v not found", key), } - klog.V(5).Infof("MockHttpsHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockGlobalForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } // List all of the objects in the mock. -func (m *MockHttpsHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.HttpsHealthCheck, error) { +func (m *MockGlobalForwardingRules) List(ctx context.Context, fl *filter.F) ([]*ga.ForwardingRule, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - klog.V(5).Infof("MockHttpsHealthChecks.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockGlobalForwardingRules.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -8505,12 +9744,12 @@ func (m *MockHttpsHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.H if m.ListError != nil { err := *m.ListError - klog.V(5).Infof("MockHttpsHealthChecks.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockGlobalForwardingRules.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } - var objs []*ga.HttpsHealthCheck + var objs []*ga.ForwardingRule for _, obj := range m.Objects { if !fl.Match(obj.ToGA()) { continue @@ -8518,15 +9757,15 @@ func (m *MockHttpsHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.H objs = append(objs, obj.ToGA()) } - klog.V(5).Infof("MockHttpsHealthChecks.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockGlobalForwardingRules.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } // Insert is a mock for inserting/creating a new object. -func (m *MockHttpsHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *ga.HttpsHealthCheck) error { +func (m *MockGlobalForwardingRules) Insert(ctx context.Context, key *meta.Key, obj *ga.ForwardingRule) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - klog.V(5).Infof("MockHttpsHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockGlobalForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -8538,32 +9777,32 @@ func (m *MockHttpsHealthChecks) Insert(ctx context.Context, key *meta.Key, obj * defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - klog.V(5).Infof("MockHttpsHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockGlobalForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { err := &googleapi.Error{ Code: http.StatusConflict, - Message: fmt.Sprintf("MockHttpsHealthChecks %v exists", key), + Message: fmt.Sprintf("MockGlobalForwardingRules %v exists", key), } - klog.V(5).Infof("MockHttpsHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockGlobalForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } obj.Name = key.Name - projectID := m.ProjectRouter.ProjectID(ctx, "ga", "httpsHealthChecks") - obj.SelfLink = SelfLink(meta.VersionGA, projectID, "httpsHealthChecks", key) + projectID := m.ProjectRouter.ProjectID(ctx, "ga", "forwardingRules") + obj.SelfLink = SelfLink(meta.VersionGA, projectID, "forwardingRules", key) - m.Objects[*key] = &MockHttpsHealthChecksObj{obj} - klog.V(5).Infof("MockHttpsHealthChecks.Insert(%v, %v, %+v) = nil", ctx, key, obj) + m.Objects[*key] = &MockGlobalForwardingRulesObj{obj} + klog.V(5).Infof("MockGlobalForwardingRules.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } // Delete is a mock for deleting the object. -func (m *MockHttpsHealthChecks) Delete(ctx context.Context, key *meta.Key) error { +func (m *MockGlobalForwardingRules) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - klog.V(5).Infof("MockHttpsHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -8575,226 +9814,223 @@ func (m *MockHttpsHealthChecks) Delete(ctx context.Context, key *meta.Key) error defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - klog.V(5).Infof("MockHttpsHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockHttpsHealthChecks %v not found", key), + Message: fmt.Sprintf("MockGlobalForwardingRules %v not found", key), } - klog.V(5).Infof("MockHttpsHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - klog.V(5).Infof("MockHttpsHealthChecks.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockGlobalForwardingRules.Delete(%v, %v) = nil", ctx, key) return nil } // Obj wraps the object for use in the mock. -func (m *MockHttpsHealthChecks) Obj(o *ga.HttpsHealthCheck) *MockHttpsHealthChecksObj { - return &MockHttpsHealthChecksObj{o} +func (m *MockGlobalForwardingRules) Obj(o *ga.ForwardingRule) *MockGlobalForwardingRulesObj { + return &MockGlobalForwardingRulesObj{o} } -// Update is a mock for the corresponding method. -func (m *MockHttpsHealthChecks) Update(ctx context.Context, key *meta.Key, arg0 *ga.HttpsHealthCheck) error { - if m.UpdateHook != nil { - return m.UpdateHook(ctx, key, arg0, m) +// SetTarget is a mock for the corresponding method. +func (m *MockGlobalForwardingRules) SetTarget(ctx context.Context, key *meta.Key, arg0 *ga.TargetReference) error { + if m.SetTargetHook != nil { + return m.SetTargetHook(ctx, key, arg0, m) } return nil } -// GCEHttpsHealthChecks is a simplifying adapter for the GCE HttpsHealthChecks. -type GCEHttpsHealthChecks struct { +// GCEGlobalForwardingRules is a simplifying adapter for the GCE GlobalForwardingRules. +type GCEGlobalForwardingRules struct { s *Service } -// Get the HttpsHealthCheck named by key. -func (g *GCEHttpsHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.HttpsHealthCheck, error) { - klog.V(5).Infof("GCEHttpsHealthChecks.Get(%v, %v): called", ctx, key) +// Get the ForwardingRule named by key. +func (g *GCEGlobalForwardingRules) Get(ctx context.Context, key *meta.Key) (*ga.ForwardingRule, error) { + klog.V(5).Infof("GCEGlobalForwardingRules.Get(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEHttpsHealthChecks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEGlobalForwardingRules.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpsHealthChecks") + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalForwardingRules") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Get", Version: meta.Version("ga"), - Service: "HttpsHealthChecks", + Service: "GlobalForwardingRules", } - klog.V(5).Infof("GCEHttpsHealthChecks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEGlobalForwardingRules.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEHttpsHealthChecks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEGlobalForwardingRules.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } - call := g.s.GA.HttpsHealthChecks.Get(projectID, key.Name) + call := g.s.GA.GlobalForwardingRules.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - klog.V(4).Infof("GCEHttpsHealthChecks.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEGlobalForwardingRules.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } -// List all HttpsHealthCheck objects. -func (g *GCEHttpsHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.HttpsHealthCheck, error) { - klog.V(5).Infof("GCEHttpsHealthChecks.List(%v, %v) called", ctx, fl) - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpsHealthChecks") +// List all ForwardingRule objects. +func (g *GCEGlobalForwardingRules) List(ctx context.Context, fl *filter.F) ([]*ga.ForwardingRule, error) { + klog.V(5).Infof("GCEGlobalForwardingRules.List(%v, %v) called", ctx, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalForwardingRules") rk := &RateLimitKey{ ProjectID: projectID, Operation: "List", Version: meta.Version("ga"), - Service: "HttpsHealthChecks", + Service: "GlobalForwardingRules", } if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - klog.V(5).Infof("GCEHttpsHealthChecks.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) - call := g.s.GA.HttpsHealthChecks.List(projectID) + klog.V(5).Infof("GCEGlobalForwardingRules.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + call := g.s.GA.GlobalForwardingRules.List(projectID) if fl != filter.None { call.Filter(fl.String()) } - var all []*ga.HttpsHealthCheck - f := func(l *ga.HttpsHealthCheckList) error { - klog.V(5).Infof("GCEHttpsHealthChecks.List(%v, ..., %v): page %+v", ctx, fl, l) + var all []*ga.ForwardingRule + f := func(l *ga.ForwardingRuleList) error { + klog.V(5).Infof("GCEGlobalForwardingRules.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - klog.V(4).Infof("GCEHttpsHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEGlobalForwardingRules.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } if klog.V(4) { - klog.V(4).Infof("GCEHttpsHealthChecks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + klog.V(4).Infof("GCEGlobalForwardingRules.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - klog.V(5).Infof("GCEHttpsHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEGlobalForwardingRules.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil } -// Insert HttpsHealthCheck with key of value obj. -func (g *GCEHttpsHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *ga.HttpsHealthCheck) error { - klog.V(5).Infof("GCEHttpsHealthChecks.Insert(%v, %v, %+v): called", ctx, key, obj) +// Insert ForwardingRule with key of value obj. +func (g *GCEGlobalForwardingRules) Insert(ctx context.Context, key *meta.Key, obj *ga.ForwardingRule) error { + klog.V(5).Infof("GCEGlobalForwardingRules.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - klog.V(2).Infof("GCEHttpsHealthChecks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEGlobalForwardingRules.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpsHealthChecks") + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalForwardingRules") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Insert", Version: meta.Version("ga"), - Service: "HttpsHealthChecks", + Service: "GlobalForwardingRules", } - klog.V(5).Infof("GCEHttpsHealthChecks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEGlobalForwardingRules.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEHttpsHealthChecks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEGlobalForwardingRules.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name - call := g.s.GA.HttpsHealthChecks.Insert(projectID, obj) + call := g.s.GA.GlobalForwardingRules.Insert(projectID, obj) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEHttpsHealthChecks.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEGlobalForwardingRules.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEHttpsHealthChecks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEGlobalForwardingRules.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } -// Delete the HttpsHealthCheck referenced by key. -func (g *GCEHttpsHealthChecks) Delete(ctx context.Context, key *meta.Key) error { - klog.V(5).Infof("GCEHttpsHealthChecks.Delete(%v, %v): called", ctx, key) +// Delete the ForwardingRule referenced by key. +func (g *GCEGlobalForwardingRules) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCEGlobalForwardingRules.Delete(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEHttpsHealthChecks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEGlobalForwardingRules.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpsHealthChecks") + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalForwardingRules") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Delete", Version: meta.Version("ga"), - Service: "HttpsHealthChecks", + Service: "GlobalForwardingRules", } - klog.V(5).Infof("GCEHttpsHealthChecks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEGlobalForwardingRules.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEHttpsHealthChecks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEGlobalForwardingRules.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.GA.HttpsHealthChecks.Delete(projectID, key.Name) + call := g.s.GA.GlobalForwardingRules.Delete(projectID, key.Name) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEHttpsHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEHttpsHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } -// Update is a method on GCEHttpsHealthChecks. -func (g *GCEHttpsHealthChecks) Update(ctx context.Context, key *meta.Key, arg0 *ga.HttpsHealthCheck) error { - klog.V(5).Infof("GCEHttpsHealthChecks.Update(%v, %v, ...): called", ctx, key) +// SetTarget is a method on GCEGlobalForwardingRules. +func (g *GCEGlobalForwardingRules) SetTarget(ctx context.Context, key *meta.Key, arg0 *ga.TargetReference) error { + klog.V(5).Infof("GCEGlobalForwardingRules.SetTarget(%v, %v, ...): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEHttpsHealthChecks.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEGlobalForwardingRules.SetTarget(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpsHealthChecks") + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalForwardingRules") rk := &RateLimitKey{ ProjectID: projectID, - Operation: "Update", + Operation: "SetTarget", Version: meta.Version("ga"), - Service: "HttpsHealthChecks", + Service: "GlobalForwardingRules", } - klog.V(5).Infof("GCEHttpsHealthChecks.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEGlobalForwardingRules.SetTarget(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEHttpsHealthChecks.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEGlobalForwardingRules.SetTarget(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.GA.HttpsHealthChecks.Update(projectID, key.Name, arg0) + call := g.s.GA.GlobalForwardingRules.SetTarget(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEHttpsHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEGlobalForwardingRules.SetTarget(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEHttpsHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEGlobalForwardingRules.SetTarget(%v, %v, ...) = %+v", ctx, key, err) return err } -// InstanceGroups is an interface that allows for mocking of InstanceGroups. -type InstanceGroups interface { - Get(ctx context.Context, key *meta.Key) (*ga.InstanceGroup, error) - List(ctx context.Context, zone string, fl *filter.F) ([]*ga.InstanceGroup, error) - Insert(ctx context.Context, key *meta.Key, obj *ga.InstanceGroup) error +// HealthChecks is an interface that allows for mocking of HealthChecks. +type HealthChecks interface { + Get(ctx context.Context, key *meta.Key) (*ga.HealthCheck, error) + List(ctx context.Context, fl *filter.F) ([]*ga.HealthCheck, error) + Insert(ctx context.Context, key *meta.Key, obj *ga.HealthCheck) error Delete(ctx context.Context, key *meta.Key) error - AddInstances(context.Context, *meta.Key, *ga.InstanceGroupsAddInstancesRequest) error - ListInstances(context.Context, *meta.Key, *ga.InstanceGroupsListInstancesRequest, *filter.F) ([]*ga.InstanceWithNamedPorts, error) - RemoveInstances(context.Context, *meta.Key, *ga.InstanceGroupsRemoveInstancesRequest) error - SetNamedPorts(context.Context, *meta.Key, *ga.InstanceGroupsSetNamedPortsRequest) error + Update(context.Context, *meta.Key, *ga.HealthCheck) error } -// NewMockInstanceGroups returns a new mock for InstanceGroups. -func NewMockInstanceGroups(pr ProjectRouter, objs map[meta.Key]*MockInstanceGroupsObj) *MockInstanceGroups { - mock := &MockInstanceGroups{ +// NewMockHealthChecks returns a new mock for HealthChecks. +func NewMockHealthChecks(pr ProjectRouter, objs map[meta.Key]*MockHealthChecksObj) *MockHealthChecks { + mock := &MockHealthChecks{ ProjectRouter: pr, Objects: objs, @@ -8805,14 +10041,14 @@ func NewMockInstanceGroups(pr ProjectRouter, objs map[meta.Key]*MockInstanceGrou return mock } -// MockInstanceGroups is the mock for InstanceGroups. -type MockInstanceGroups struct { +// MockHealthChecks is the mock for HealthChecks. +type MockHealthChecks struct { Lock sync.Mutex ProjectRouter ProjectRouter // Objects maintained by the mock. - Objects map[meta.Key]*MockInstanceGroupsObj + Objects map[meta.Key]*MockHealthChecksObj // If an entry exists for the given key and operation, then the error // will be returned instead of the operation. @@ -8825,14 +10061,11 @@ type MockInstanceGroups struct { // order to add your own logic. Return (true, _, _) to prevent the normal // execution flow of the mock. Return (false, nil, nil) to continue with // normal mock behavior/ after the hook function executes. - GetHook func(ctx context.Context, key *meta.Key, m *MockInstanceGroups) (bool, *ga.InstanceGroup, error) - ListHook func(ctx context.Context, zone string, fl *filter.F, m *MockInstanceGroups) (bool, []*ga.InstanceGroup, error) - InsertHook func(ctx context.Context, key *meta.Key, obj *ga.InstanceGroup, m *MockInstanceGroups) (bool, error) - DeleteHook func(ctx context.Context, key *meta.Key, m *MockInstanceGroups) (bool, error) - AddInstancesHook func(context.Context, *meta.Key, *ga.InstanceGroupsAddInstancesRequest, *MockInstanceGroups) error - ListInstancesHook func(context.Context, *meta.Key, *ga.InstanceGroupsListInstancesRequest, *filter.F, *MockInstanceGroups) ([]*ga.InstanceWithNamedPorts, error) - RemoveInstancesHook func(context.Context, *meta.Key, *ga.InstanceGroupsRemoveInstancesRequest, *MockInstanceGroups) error - SetNamedPortsHook func(context.Context, *meta.Key, *ga.InstanceGroupsSetNamedPortsRequest, *MockInstanceGroups) error + GetHook func(ctx context.Context, key *meta.Key, m *MockHealthChecks) (bool, *ga.HealthCheck, error) + ListHook func(ctx context.Context, fl *filter.F, m *MockHealthChecks) (bool, []*ga.HealthCheck, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *ga.HealthCheck, m *MockHealthChecks) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockHealthChecks) (bool, error) + UpdateHook func(context.Context, *meta.Key, *ga.HealthCheck, *MockHealthChecks) error // X is extra state that can be used as part of the mock. Generated code // will not use this field. @@ -8840,10 +10073,10 @@ type MockInstanceGroups struct { } // Get returns the object from the mock. -func (m *MockInstanceGroups) Get(ctx context.Context, key *meta.Key) (*ga.InstanceGroup, error) { +func (m *MockHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.HealthCheck, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - klog.V(5).Infof("MockInstanceGroups.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockHealthChecks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -8855,28 +10088,28 @@ func (m *MockInstanceGroups) Get(ctx context.Context, key *meta.Key) (*ga.Instan defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - klog.V(5).Infof("MockInstanceGroups.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - klog.V(5).Infof("MockInstanceGroups.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockHealthChecks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockInstanceGroups %v not found", key), + Message: fmt.Sprintf("MockHealthChecks %v not found", key), } - klog.V(5).Infof("MockInstanceGroups.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } -// List all of the objects in the mock in the given zone. -func (m *MockInstanceGroups) List(ctx context.Context, zone string, fl *filter.F) ([]*ga.InstanceGroup, error) { +// List all of the objects in the mock. +func (m *MockHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.HealthCheck, error) { if m.ListHook != nil { - if intercept, objs, err := m.ListHook(ctx, zone, fl, m); intercept { - klog.V(5).Infof("MockInstanceGroups.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) + if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { + klog.V(5).Infof("MockHealthChecks.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -8886,31 +10119,28 @@ func (m *MockInstanceGroups) List(ctx context.Context, zone string, fl *filter.F if m.ListError != nil { err := *m.ListError - klog.V(5).Infof("MockInstanceGroups.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) + klog.V(5).Infof("MockHealthChecks.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } - var objs []*ga.InstanceGroup - for key, obj := range m.Objects { - if key.Zone != zone { - continue - } + var objs []*ga.HealthCheck + for _, obj := range m.Objects { if !fl.Match(obj.ToGA()) { continue } objs = append(objs, obj.ToGA()) } - klog.V(5).Infof("MockInstanceGroups.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) + klog.V(5).Infof("MockHealthChecks.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } // Insert is a mock for inserting/creating a new object. -func (m *MockInstanceGroups) Insert(ctx context.Context, key *meta.Key, obj *ga.InstanceGroup) error { +func (m *MockHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *ga.HealthCheck) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - klog.V(5).Infof("MockInstanceGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -8922,32 +10152,32 @@ func (m *MockInstanceGroups) Insert(ctx context.Context, key *meta.Key, obj *ga. defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - klog.V(5).Infof("MockInstanceGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { err := &googleapi.Error{ Code: http.StatusConflict, - Message: fmt.Sprintf("MockInstanceGroups %v exists", key), + Message: fmt.Sprintf("MockHealthChecks %v exists", key), } - klog.V(5).Infof("MockInstanceGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } obj.Name = key.Name - projectID := m.ProjectRouter.ProjectID(ctx, "ga", "instanceGroups") - obj.SelfLink = SelfLink(meta.VersionGA, projectID, "instanceGroups", key) + projectID := m.ProjectRouter.ProjectID(ctx, "ga", "healthChecks") + obj.SelfLink = SelfLink(meta.VersionGA, projectID, "healthChecks", key) - m.Objects[*key] = &MockInstanceGroupsObj{obj} - klog.V(5).Infof("MockInstanceGroups.Insert(%v, %v, %+v) = nil", ctx, key, obj) + m.Objects[*key] = &MockHealthChecksObj{obj} + klog.V(5).Infof("MockHealthChecks.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } // Delete is a mock for deleting the object. -func (m *MockInstanceGroups) Delete(ctx context.Context, key *meta.Key) error { +func (m *MockHealthChecks) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - klog.V(5).Infof("MockInstanceGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -8959,357 +10189,223 @@ func (m *MockInstanceGroups) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - klog.V(5).Infof("MockInstanceGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockInstanceGroups %v not found", key), + Message: fmt.Sprintf("MockHealthChecks %v not found", key), } - klog.V(5).Infof("MockInstanceGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - klog.V(5).Infof("MockInstanceGroups.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockHealthChecks.Delete(%v, %v) = nil", ctx, key) return nil } // Obj wraps the object for use in the mock. -func (m *MockInstanceGroups) Obj(o *ga.InstanceGroup) *MockInstanceGroupsObj { - return &MockInstanceGroupsObj{o} -} - -// AddInstances is a mock for the corresponding method. -func (m *MockInstanceGroups) AddInstances(ctx context.Context, key *meta.Key, arg0 *ga.InstanceGroupsAddInstancesRequest) error { - if m.AddInstancesHook != nil { - return m.AddInstancesHook(ctx, key, arg0, m) - } - return nil -} - -// ListInstances is a mock for the corresponding method. -func (m *MockInstanceGroups) ListInstances(ctx context.Context, key *meta.Key, arg0 *ga.InstanceGroupsListInstancesRequest, fl *filter.F) ([]*ga.InstanceWithNamedPorts, error) { - if m.ListInstancesHook != nil { - return m.ListInstancesHook(ctx, key, arg0, fl, m) - } - return nil, nil -} - -// RemoveInstances is a mock for the corresponding method. -func (m *MockInstanceGroups) RemoveInstances(ctx context.Context, key *meta.Key, arg0 *ga.InstanceGroupsRemoveInstancesRequest) error { - if m.RemoveInstancesHook != nil { - return m.RemoveInstancesHook(ctx, key, arg0, m) - } - return nil +func (m *MockHealthChecks) Obj(o *ga.HealthCheck) *MockHealthChecksObj { + return &MockHealthChecksObj{o} } -// SetNamedPorts is a mock for the corresponding method. -func (m *MockInstanceGroups) SetNamedPorts(ctx context.Context, key *meta.Key, arg0 *ga.InstanceGroupsSetNamedPortsRequest) error { - if m.SetNamedPortsHook != nil { - return m.SetNamedPortsHook(ctx, key, arg0, m) +// Update is a mock for the corresponding method. +func (m *MockHealthChecks) Update(ctx context.Context, key *meta.Key, arg0 *ga.HealthCheck) error { + if m.UpdateHook != nil { + return m.UpdateHook(ctx, key, arg0, m) } return nil } -// GCEInstanceGroups is a simplifying adapter for the GCE InstanceGroups. -type GCEInstanceGroups struct { +// GCEHealthChecks is a simplifying adapter for the GCE HealthChecks. +type GCEHealthChecks struct { s *Service } -// Get the InstanceGroup named by key. -func (g *GCEInstanceGroups) Get(ctx context.Context, key *meta.Key) (*ga.InstanceGroup, error) { - klog.V(5).Infof("GCEInstanceGroups.Get(%v, %v): called", ctx, key) +// Get the HealthCheck named by key. +func (g *GCEHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.HealthCheck, error) { + klog.V(5).Infof("GCEHealthChecks.Get(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEInstanceGroups.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEHealthChecks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HealthChecks") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Get", Version: meta.Version("ga"), - Service: "InstanceGroups", + Service: "HealthChecks", } - klog.V(5).Infof("GCEInstanceGroups.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEHealthChecks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEInstanceGroups.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEHealthChecks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } - call := g.s.GA.InstanceGroups.Get(projectID, key.Zone, key.Name) + call := g.s.GA.HealthChecks.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - klog.V(4).Infof("GCEInstanceGroups.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEHealthChecks.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } -// List all InstanceGroup objects. -func (g *GCEInstanceGroups) List(ctx context.Context, zone string, fl *filter.F) ([]*ga.InstanceGroup, error) { - klog.V(5).Infof("GCEInstanceGroups.List(%v, %v, %v) called", ctx, zone, fl) - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") +// List all HealthCheck objects. +func (g *GCEHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.HealthCheck, error) { + klog.V(5).Infof("GCEHealthChecks.List(%v, %v) called", ctx, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HealthChecks") rk := &RateLimitKey{ ProjectID: projectID, Operation: "List", Version: meta.Version("ga"), - Service: "InstanceGroups", + Service: "HealthChecks", } if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - klog.V(5).Infof("GCEInstanceGroups.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) - call := g.s.GA.InstanceGroups.List(projectID, zone) + klog.V(5).Infof("GCEHealthChecks.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + call := g.s.GA.HealthChecks.List(projectID) if fl != filter.None { call.Filter(fl.String()) } - var all []*ga.InstanceGroup - f := func(l *ga.InstanceGroupList) error { - klog.V(5).Infof("GCEInstanceGroups.List(%v, ..., %v): page %+v", ctx, fl, l) + var all []*ga.HealthCheck + f := func(l *ga.HealthCheckList) error { + klog.V(5).Infof("GCEHealthChecks.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - klog.V(4).Infof("GCEInstanceGroups.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } if klog.V(4) { - klog.V(4).Infof("GCEInstanceGroups.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + klog.V(4).Infof("GCEHealthChecks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - klog.V(5).Infof("GCEInstanceGroups.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil } -// Insert InstanceGroup with key of value obj. -func (g *GCEInstanceGroups) Insert(ctx context.Context, key *meta.Key, obj *ga.InstanceGroup) error { - klog.V(5).Infof("GCEInstanceGroups.Insert(%v, %v, %+v): called", ctx, key, obj) +// Insert HealthCheck with key of value obj. +func (g *GCEHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *ga.HealthCheck) error { + klog.V(5).Infof("GCEHealthChecks.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - klog.V(2).Infof("GCEInstanceGroups.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEHealthChecks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HealthChecks") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Insert", Version: meta.Version("ga"), - Service: "InstanceGroups", + Service: "HealthChecks", } - klog.V(5).Infof("GCEInstanceGroups.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEHealthChecks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEInstanceGroups.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEHealthChecks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name - call := g.s.GA.InstanceGroups.Insert(projectID, key.Zone, obj) + call := g.s.GA.HealthChecks.Insert(projectID, obj) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEInstanceGroups.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEHealthChecks.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEInstanceGroups.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEHealthChecks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } -// Delete the InstanceGroup referenced by key. -func (g *GCEInstanceGroups) Delete(ctx context.Context, key *meta.Key) error { - klog.V(5).Infof("GCEInstanceGroups.Delete(%v, %v): called", ctx, key) +// Delete the HealthCheck referenced by key. +func (g *GCEHealthChecks) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCEHealthChecks.Delete(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEInstanceGroups.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEHealthChecks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HealthChecks") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Delete", Version: meta.Version("ga"), - Service: "InstanceGroups", + Service: "HealthChecks", } - klog.V(5).Infof("GCEInstanceGroups.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEHealthChecks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEInstanceGroups.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEHealthChecks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.GA.InstanceGroups.Delete(projectID, key.Zone, key.Name) + call := g.s.GA.HealthChecks.Delete(projectID, key.Name) + call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEInstanceGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEInstanceGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } -// AddInstances is a method on GCEInstanceGroups. -func (g *GCEInstanceGroups) AddInstances(ctx context.Context, key *meta.Key, arg0 *ga.InstanceGroupsAddInstancesRequest) error { - klog.V(5).Infof("GCEInstanceGroups.AddInstances(%v, %v, ...): called", ctx, key) +// Update is a method on GCEHealthChecks. +func (g *GCEHealthChecks) Update(ctx context.Context, key *meta.Key, arg0 *ga.HealthCheck) error { + klog.V(5).Infof("GCEHealthChecks.Update(%v, %v, ...): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEInstanceGroups.AddInstances(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEHealthChecks.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HealthChecks") rk := &RateLimitKey{ ProjectID: projectID, - Operation: "AddInstances", + Operation: "Update", Version: meta.Version("ga"), - Service: "InstanceGroups", + Service: "HealthChecks", } - klog.V(5).Infof("GCEInstanceGroups.AddInstances(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEHealthChecks.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEInstanceGroups.AddInstances(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEHealthChecks.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.GA.InstanceGroups.AddInstances(projectID, key.Zone, key.Name, arg0) + call := g.s.GA.HealthChecks.Update(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEInstanceGroups.AddInstances(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEInstanceGroups.AddInstances(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) return err } -// ListInstances is a method on GCEInstanceGroups. -func (g *GCEInstanceGroups) ListInstances(ctx context.Context, key *meta.Key, arg0 *ga.InstanceGroupsListInstancesRequest, fl *filter.F) ([]*ga.InstanceWithNamedPorts, error) { - klog.V(5).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...): called", ctx, key) - - if !key.Valid() { - klog.V(2).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...): key is invalid (%#v)", ctx, key, key) - return nil, fmt.Errorf("invalid GCE key (%+v)", key) - } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") - rk := &RateLimitKey{ - ProjectID: projectID, - Operation: "ListInstances", - Version: meta.Version("ga"), - Service: "InstanceGroups", - } - klog.V(5).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) - - if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...): RateLimiter error: %v", ctx, key, err) - return nil, err - } - call := g.s.GA.InstanceGroups.ListInstances(projectID, key.Zone, key.Name, arg0) - var all []*ga.InstanceWithNamedPorts - f := func(l *ga.InstanceGroupsListInstances) error { - klog.V(5).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...): page %+v", ctx, key, l) - all = append(all, l.Items...) - return nil - } - if err := call.Pages(ctx, f); err != nil { - klog.V(4).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...) = %v, %v", ctx, key, nil, err) - return nil, err - } - if klog.V(4) { - klog.V(4).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...) = [%v items], %v", ctx, key, len(all), nil) - } else if klog.V(5) { - var asStr []string - for _, o := range all { - asStr = append(asStr, fmt.Sprintf("%+v", o)) - } - klog.V(5).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...) = %v, %v", ctx, key, asStr, nil) - } - return all, nil -} - -// RemoveInstances is a method on GCEInstanceGroups. -func (g *GCEInstanceGroups) RemoveInstances(ctx context.Context, key *meta.Key, arg0 *ga.InstanceGroupsRemoveInstancesRequest) error { - klog.V(5).Infof("GCEInstanceGroups.RemoveInstances(%v, %v, ...): called", ctx, key) - - if !key.Valid() { - klog.V(2).Infof("GCEInstanceGroups.RemoveInstances(%v, %v, ...): key is invalid (%#v)", ctx, key, key) - return fmt.Errorf("invalid GCE key (%+v)", key) - } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") - rk := &RateLimitKey{ - ProjectID: projectID, - Operation: "RemoveInstances", - Version: meta.Version("ga"), - Service: "InstanceGroups", - } - klog.V(5).Infof("GCEInstanceGroups.RemoveInstances(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) - - if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEInstanceGroups.RemoveInstances(%v, %v, ...): RateLimiter error: %v", ctx, key, err) - return err - } - call := g.s.GA.InstanceGroups.RemoveInstances(projectID, key.Zone, key.Name, arg0) - call.Context(ctx) - op, err := call.Do() - if err != nil { - klog.V(4).Infof("GCEInstanceGroups.RemoveInstances(%v, %v, ...) = %+v", ctx, key, err) - return err - } - err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEInstanceGroups.RemoveInstances(%v, %v, ...) = %+v", ctx, key, err) - return err -} - -// SetNamedPorts is a method on GCEInstanceGroups. -func (g *GCEInstanceGroups) SetNamedPorts(ctx context.Context, key *meta.Key, arg0 *ga.InstanceGroupsSetNamedPortsRequest) error { - klog.V(5).Infof("GCEInstanceGroups.SetNamedPorts(%v, %v, ...): called", ctx, key) - - if !key.Valid() { - klog.V(2).Infof("GCEInstanceGroups.SetNamedPorts(%v, %v, ...): key is invalid (%#v)", ctx, key, key) - return fmt.Errorf("invalid GCE key (%+v)", key) - } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") - rk := &RateLimitKey{ - ProjectID: projectID, - Operation: "SetNamedPorts", - Version: meta.Version("ga"), - Service: "InstanceGroups", - } - klog.V(5).Infof("GCEInstanceGroups.SetNamedPorts(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) - - if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEInstanceGroups.SetNamedPorts(%v, %v, ...): RateLimiter error: %v", ctx, key, err) - return err - } - call := g.s.GA.InstanceGroups.SetNamedPorts(projectID, key.Zone, key.Name, arg0) - call.Context(ctx) - op, err := call.Do() - if err != nil { - klog.V(4).Infof("GCEInstanceGroups.SetNamedPorts(%v, %v, ...) = %+v", ctx, key, err) - return err - } - err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEInstanceGroups.SetNamedPorts(%v, %v, ...) = %+v", ctx, key, err) - return err -} - -// Instances is an interface that allows for mocking of Instances. -type Instances interface { - Get(ctx context.Context, key *meta.Key) (*ga.Instance, error) - List(ctx context.Context, zone string, fl *filter.F) ([]*ga.Instance, error) - Insert(ctx context.Context, key *meta.Key, obj *ga.Instance) error +// AlphaHealthChecks is an interface that allows for mocking of HealthChecks. +type AlphaHealthChecks interface { + Get(ctx context.Context, key *meta.Key) (*alpha.HealthCheck, error) + List(ctx context.Context, fl *filter.F) ([]*alpha.HealthCheck, error) + Insert(ctx context.Context, key *meta.Key, obj *alpha.HealthCheck) error Delete(ctx context.Context, key *meta.Key) error - AttachDisk(context.Context, *meta.Key, *ga.AttachedDisk) error - DetachDisk(context.Context, *meta.Key, string) error + Update(context.Context, *meta.Key, *alpha.HealthCheck) error } -// NewMockInstances returns a new mock for Instances. -func NewMockInstances(pr ProjectRouter, objs map[meta.Key]*MockInstancesObj) *MockInstances { - mock := &MockInstances{ +// NewMockAlphaHealthChecks returns a new mock for HealthChecks. +func NewMockAlphaHealthChecks(pr ProjectRouter, objs map[meta.Key]*MockHealthChecksObj) *MockAlphaHealthChecks { + mock := &MockAlphaHealthChecks{ ProjectRouter: pr, Objects: objs, @@ -9320,14 +10416,14 @@ func NewMockInstances(pr ProjectRouter, objs map[meta.Key]*MockInstancesObj) *Mo return mock } -// MockInstances is the mock for Instances. -type MockInstances struct { +// MockAlphaHealthChecks is the mock for HealthChecks. +type MockAlphaHealthChecks struct { Lock sync.Mutex ProjectRouter ProjectRouter // Objects maintained by the mock. - Objects map[meta.Key]*MockInstancesObj + Objects map[meta.Key]*MockHealthChecksObj // If an entry exists for the given key and operation, then the error // will be returned instead of the operation. @@ -9340,12 +10436,11 @@ type MockInstances struct { // order to add your own logic. Return (true, _, _) to prevent the normal // execution flow of the mock. Return (false, nil, nil) to continue with // normal mock behavior/ after the hook function executes. - GetHook func(ctx context.Context, key *meta.Key, m *MockInstances) (bool, *ga.Instance, error) - ListHook func(ctx context.Context, zone string, fl *filter.F, m *MockInstances) (bool, []*ga.Instance, error) - InsertHook func(ctx context.Context, key *meta.Key, obj *ga.Instance, m *MockInstances) (bool, error) - DeleteHook func(ctx context.Context, key *meta.Key, m *MockInstances) (bool, error) - AttachDiskHook func(context.Context, *meta.Key, *ga.AttachedDisk, *MockInstances) error - DetachDiskHook func(context.Context, *meta.Key, string, *MockInstances) error + GetHook func(ctx context.Context, key *meta.Key, m *MockAlphaHealthChecks) (bool, *alpha.HealthCheck, error) + ListHook func(ctx context.Context, fl *filter.F, m *MockAlphaHealthChecks) (bool, []*alpha.HealthCheck, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *alpha.HealthCheck, m *MockAlphaHealthChecks) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockAlphaHealthChecks) (bool, error) + UpdateHook func(context.Context, *meta.Key, *alpha.HealthCheck, *MockAlphaHealthChecks) error // X is extra state that can be used as part of the mock. Generated code // will not use this field. @@ -9353,10 +10448,10 @@ type MockInstances struct { } // Get returns the object from the mock. -func (m *MockInstances) Get(ctx context.Context, key *meta.Key) (*ga.Instance, error) { +func (m *MockAlphaHealthChecks) Get(ctx context.Context, key *meta.Key) (*alpha.HealthCheck, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - klog.V(5).Infof("MockInstances.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaHealthChecks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -9368,28 +10463,28 @@ func (m *MockInstances) Get(ctx context.Context, key *meta.Key) (*ga.Instance, e defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - klog.V(5).Infof("MockInstances.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { - typedObj := obj.ToGA() - klog.V(5).Infof("MockInstances.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + typedObj := obj.ToAlpha() + klog.V(5).Infof("MockAlphaHealthChecks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockInstances %v not found", key), + Message: fmt.Sprintf("MockAlphaHealthChecks %v not found", key), } - klog.V(5).Infof("MockInstances.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } -// List all of the objects in the mock in the given zone. -func (m *MockInstances) List(ctx context.Context, zone string, fl *filter.F) ([]*ga.Instance, error) { +// List all of the objects in the mock. +func (m *MockAlphaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*alpha.HealthCheck, error) { if m.ListHook != nil { - if intercept, objs, err := m.ListHook(ctx, zone, fl, m); intercept { - klog.V(5).Infof("MockInstances.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) + if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { + klog.V(5).Infof("MockAlphaHealthChecks.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -9399,31 +10494,28 @@ func (m *MockInstances) List(ctx context.Context, zone string, fl *filter.F) ([] if m.ListError != nil { err := *m.ListError - klog.V(5).Infof("MockInstances.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) + klog.V(5).Infof("MockAlphaHealthChecks.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } - var objs []*ga.Instance - for key, obj := range m.Objects { - if key.Zone != zone { - continue - } - if !fl.Match(obj.ToGA()) { + var objs []*alpha.HealthCheck + for _, obj := range m.Objects { + if !fl.Match(obj.ToAlpha()) { continue } - objs = append(objs, obj.ToGA()) + objs = append(objs, obj.ToAlpha()) } - klog.V(5).Infof("MockInstances.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) + klog.V(5).Infof("MockAlphaHealthChecks.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } // Insert is a mock for inserting/creating a new object. -func (m *MockInstances) Insert(ctx context.Context, key *meta.Key, obj *ga.Instance) error { +func (m *MockAlphaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *alpha.HealthCheck) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - klog.V(5).Infof("MockInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -9435,32 +10527,32 @@ func (m *MockInstances) Insert(ctx context.Context, key *meta.Key, obj *ga.Insta defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - klog.V(5).Infof("MockInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { err := &googleapi.Error{ Code: http.StatusConflict, - Message: fmt.Sprintf("MockInstances %v exists", key), + Message: fmt.Sprintf("MockAlphaHealthChecks %v exists", key), } - klog.V(5).Infof("MockInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } obj.Name = key.Name - projectID := m.ProjectRouter.ProjectID(ctx, "ga", "instances") - obj.SelfLink = SelfLink(meta.VersionGA, projectID, "instances", key) + projectID := m.ProjectRouter.ProjectID(ctx, "alpha", "healthChecks") + obj.SelfLink = SelfLink(meta.VersionAlpha, projectID, "healthChecks", key) - m.Objects[*key] = &MockInstancesObj{obj} - klog.V(5).Infof("MockInstances.Insert(%v, %v, %+v) = nil", ctx, key, obj) + m.Objects[*key] = &MockHealthChecksObj{obj} + klog.V(5).Infof("MockAlphaHealthChecks.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } // Delete is a mock for deleting the object. -func (m *MockInstances) Delete(ctx context.Context, key *meta.Key) error { +func (m *MockAlphaHealthChecks) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - klog.V(5).Infof("MockInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -9472,265 +10564,223 @@ func (m *MockInstances) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - klog.V(5).Infof("MockInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockInstances %v not found", key), + Message: fmt.Sprintf("MockAlphaHealthChecks %v not found", key), } - klog.V(5).Infof("MockInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - klog.V(5).Infof("MockInstances.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockAlphaHealthChecks.Delete(%v, %v) = nil", ctx, key) return nil } // Obj wraps the object for use in the mock. -func (m *MockInstances) Obj(o *ga.Instance) *MockInstancesObj { - return &MockInstancesObj{o} -} - -// AttachDisk is a mock for the corresponding method. -func (m *MockInstances) AttachDisk(ctx context.Context, key *meta.Key, arg0 *ga.AttachedDisk) error { - if m.AttachDiskHook != nil { - return m.AttachDiskHook(ctx, key, arg0, m) - } - return nil +func (m *MockAlphaHealthChecks) Obj(o *alpha.HealthCheck) *MockHealthChecksObj { + return &MockHealthChecksObj{o} } -// DetachDisk is a mock for the corresponding method. -func (m *MockInstances) DetachDisk(ctx context.Context, key *meta.Key, arg0 string) error { - if m.DetachDiskHook != nil { - return m.DetachDiskHook(ctx, key, arg0, m) +// Update is a mock for the corresponding method. +func (m *MockAlphaHealthChecks) Update(ctx context.Context, key *meta.Key, arg0 *alpha.HealthCheck) error { + if m.UpdateHook != nil { + return m.UpdateHook(ctx, key, arg0, m) } return nil } -// GCEInstances is a simplifying adapter for the GCE Instances. -type GCEInstances struct { +// GCEAlphaHealthChecks is a simplifying adapter for the GCE HealthChecks. +type GCEAlphaHealthChecks struct { s *Service } -// Get the Instance named by key. -func (g *GCEInstances) Get(ctx context.Context, key *meta.Key) (*ga.Instance, error) { - klog.V(5).Infof("GCEInstances.Get(%v, %v): called", ctx, key) +// Get the HealthCheck named by key. +func (g *GCEAlphaHealthChecks) Get(ctx context.Context, key *meta.Key) (*alpha.HealthCheck, error) { + klog.V(5).Infof("GCEAlphaHealthChecks.Get(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEInstances.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaHealthChecks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Instances") + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "HealthChecks") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Get", - Version: meta.Version("ga"), - Service: "Instances", + Version: meta.Version("alpha"), + Service: "HealthChecks", } - klog.V(5).Infof("GCEInstances.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaHealthChecks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEInstances.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaHealthChecks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } - call := g.s.GA.Instances.Get(projectID, key.Zone, key.Name) + call := g.s.Alpha.HealthChecks.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - klog.V(4).Infof("GCEInstances.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEAlphaHealthChecks.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } -// List all Instance objects. -func (g *GCEInstances) List(ctx context.Context, zone string, fl *filter.F) ([]*ga.Instance, error) { - klog.V(5).Infof("GCEInstances.List(%v, %v, %v) called", ctx, zone, fl) - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Instances") - rk := &RateLimitKey{ - ProjectID: projectID, - Operation: "List", - Version: meta.Version("ga"), - Service: "Instances", +// List all HealthCheck objects. +func (g *GCEAlphaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*alpha.HealthCheck, error) { + klog.V(5).Infof("GCEAlphaHealthChecks.List(%v, %v) called", ctx, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "HealthChecks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("alpha"), + Service: "HealthChecks", } if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - klog.V(5).Infof("GCEInstances.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) - call := g.s.GA.Instances.List(projectID, zone) + klog.V(5).Infof("GCEAlphaHealthChecks.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + call := g.s.Alpha.HealthChecks.List(projectID) if fl != filter.None { call.Filter(fl.String()) } - var all []*ga.Instance - f := func(l *ga.InstanceList) error { - klog.V(5).Infof("GCEInstances.List(%v, ..., %v): page %+v", ctx, fl, l) + var all []*alpha.HealthCheck + f := func(l *alpha.HealthCheckList) error { + klog.V(5).Infof("GCEAlphaHealthChecks.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - klog.V(4).Infof("GCEInstances.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEAlphaHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } if klog.V(4) { - klog.V(4).Infof("GCEInstances.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + klog.V(4).Infof("GCEAlphaHealthChecks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - klog.V(5).Infof("GCEInstances.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEAlphaHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil } -// Insert Instance with key of value obj. -func (g *GCEInstances) Insert(ctx context.Context, key *meta.Key, obj *ga.Instance) error { - klog.V(5).Infof("GCEInstances.Insert(%v, %v, %+v): called", ctx, key, obj) +// Insert HealthCheck with key of value obj. +func (g *GCEAlphaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *alpha.HealthCheck) error { + klog.V(5).Infof("GCEAlphaHealthChecks.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - klog.V(2).Infof("GCEInstances.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaHealthChecks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Instances") + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "HealthChecks") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Insert", - Version: meta.Version("ga"), - Service: "Instances", + Version: meta.Version("alpha"), + Service: "HealthChecks", } - klog.V(5).Infof("GCEInstances.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaHealthChecks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEInstances.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaHealthChecks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name - call := g.s.GA.Instances.Insert(projectID, key.Zone, obj) + call := g.s.Alpha.HealthChecks.Insert(projectID, obj) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEInstances.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaHealthChecks.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEInstances.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEAlphaHealthChecks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } -// Delete the Instance referenced by key. -func (g *GCEInstances) Delete(ctx context.Context, key *meta.Key) error { - klog.V(5).Infof("GCEInstances.Delete(%v, %v): called", ctx, key) +// Delete the HealthCheck referenced by key. +func (g *GCEAlphaHealthChecks) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCEAlphaHealthChecks.Delete(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEInstances.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaHealthChecks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Instances") + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "HealthChecks") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Delete", - Version: meta.Version("ga"), - Service: "Instances", + Version: meta.Version("alpha"), + Service: "HealthChecks", } - klog.V(5).Infof("GCEInstances.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaHealthChecks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEInstances.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaHealthChecks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.GA.Instances.Delete(projectID, key.Zone, key.Name) + call := g.s.Alpha.HealthChecks.Delete(projectID, key.Name) + call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEInstances.Delete(%v, %v) = %v", ctx, key, err) - return err -} - -// AttachDisk is a method on GCEInstances. -func (g *GCEInstances) AttachDisk(ctx context.Context, key *meta.Key, arg0 *ga.AttachedDisk) error { - klog.V(5).Infof("GCEInstances.AttachDisk(%v, %v, ...): called", ctx, key) - - if !key.Valid() { - klog.V(2).Infof("GCEInstances.AttachDisk(%v, %v, ...): key is invalid (%#v)", ctx, key, key) - return fmt.Errorf("invalid GCE key (%+v)", key) - } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Instances") - rk := &RateLimitKey{ - ProjectID: projectID, - Operation: "AttachDisk", - Version: meta.Version("ga"), - Service: "Instances", - } - klog.V(5).Infof("GCEInstances.AttachDisk(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) - - if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEInstances.AttachDisk(%v, %v, ...): RateLimiter error: %v", ctx, key, err) - return err - } - call := g.s.GA.Instances.AttachDisk(projectID, key.Zone, key.Name, arg0) - call.Context(ctx) - op, err := call.Do() - if err != nil { - klog.V(4).Infof("GCEInstances.AttachDisk(%v, %v, ...) = %+v", ctx, key, err) - return err - } - err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEInstances.AttachDisk(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } -// DetachDisk is a method on GCEInstances. -func (g *GCEInstances) DetachDisk(ctx context.Context, key *meta.Key, arg0 string) error { - klog.V(5).Infof("GCEInstances.DetachDisk(%v, %v, ...): called", ctx, key) +// Update is a method on GCEAlphaHealthChecks. +func (g *GCEAlphaHealthChecks) Update(ctx context.Context, key *meta.Key, arg0 *alpha.HealthCheck) error { + klog.V(5).Infof("GCEAlphaHealthChecks.Update(%v, %v, ...): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEInstances.DetachDisk(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaHealthChecks.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Instances") + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "HealthChecks") rk := &RateLimitKey{ ProjectID: projectID, - Operation: "DetachDisk", - Version: meta.Version("ga"), - Service: "Instances", + Operation: "Update", + Version: meta.Version("alpha"), + Service: "HealthChecks", } - klog.V(5).Infof("GCEInstances.DetachDisk(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaHealthChecks.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEInstances.DetachDisk(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaHealthChecks.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.GA.Instances.DetachDisk(projectID, key.Zone, key.Name, arg0) + call := g.s.Alpha.HealthChecks.Update(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEInstances.DetachDisk(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEInstances.DetachDisk(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) return err } -// BetaInstances is an interface that allows for mocking of Instances. -type BetaInstances interface { - Get(ctx context.Context, key *meta.Key) (*beta.Instance, error) - List(ctx context.Context, zone string, fl *filter.F) ([]*beta.Instance, error) - Insert(ctx context.Context, key *meta.Key, obj *beta.Instance) error +// BetaHealthChecks is an interface that allows for mocking of HealthChecks. +type BetaHealthChecks interface { + Get(ctx context.Context, key *meta.Key) (*beta.HealthCheck, error) + List(ctx context.Context, fl *filter.F) ([]*beta.HealthCheck, error) + Insert(ctx context.Context, key *meta.Key, obj *beta.HealthCheck) error Delete(ctx context.Context, key *meta.Key) error - AttachDisk(context.Context, *meta.Key, *beta.AttachedDisk) error - DetachDisk(context.Context, *meta.Key, string) error - UpdateNetworkInterface(context.Context, *meta.Key, string, *beta.NetworkInterface) error + Update(context.Context, *meta.Key, *beta.HealthCheck) error } -// NewMockBetaInstances returns a new mock for Instances. -func NewMockBetaInstances(pr ProjectRouter, objs map[meta.Key]*MockInstancesObj) *MockBetaInstances { - mock := &MockBetaInstances{ +// NewMockBetaHealthChecks returns a new mock for HealthChecks. +func NewMockBetaHealthChecks(pr ProjectRouter, objs map[meta.Key]*MockHealthChecksObj) *MockBetaHealthChecks { + mock := &MockBetaHealthChecks{ ProjectRouter: pr, Objects: objs, @@ -9741,14 +10791,14 @@ func NewMockBetaInstances(pr ProjectRouter, objs map[meta.Key]*MockInstancesObj) return mock } -// MockBetaInstances is the mock for Instances. -type MockBetaInstances struct { +// MockBetaHealthChecks is the mock for HealthChecks. +type MockBetaHealthChecks struct { Lock sync.Mutex ProjectRouter ProjectRouter // Objects maintained by the mock. - Objects map[meta.Key]*MockInstancesObj + Objects map[meta.Key]*MockHealthChecksObj // If an entry exists for the given key and operation, then the error // will be returned instead of the operation. @@ -9761,13 +10811,11 @@ type MockBetaInstances struct { // order to add your own logic. Return (true, _, _) to prevent the normal // execution flow of the mock. Return (false, nil, nil) to continue with // normal mock behavior/ after the hook function executes. - GetHook func(ctx context.Context, key *meta.Key, m *MockBetaInstances) (bool, *beta.Instance, error) - ListHook func(ctx context.Context, zone string, fl *filter.F, m *MockBetaInstances) (bool, []*beta.Instance, error) - InsertHook func(ctx context.Context, key *meta.Key, obj *beta.Instance, m *MockBetaInstances) (bool, error) - DeleteHook func(ctx context.Context, key *meta.Key, m *MockBetaInstances) (bool, error) - AttachDiskHook func(context.Context, *meta.Key, *beta.AttachedDisk, *MockBetaInstances) error - DetachDiskHook func(context.Context, *meta.Key, string, *MockBetaInstances) error - UpdateNetworkInterfaceHook func(context.Context, *meta.Key, string, *beta.NetworkInterface, *MockBetaInstances) error + GetHook func(ctx context.Context, key *meta.Key, m *MockBetaHealthChecks) (bool, *beta.HealthCheck, error) + ListHook func(ctx context.Context, fl *filter.F, m *MockBetaHealthChecks) (bool, []*beta.HealthCheck, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *beta.HealthCheck, m *MockBetaHealthChecks) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockBetaHealthChecks) (bool, error) + UpdateHook func(context.Context, *meta.Key, *beta.HealthCheck, *MockBetaHealthChecks) error // X is extra state that can be used as part of the mock. Generated code // will not use this field. @@ -9775,10 +10823,10 @@ type MockBetaInstances struct { } // Get returns the object from the mock. -func (m *MockBetaInstances) Get(ctx context.Context, key *meta.Key) (*beta.Instance, error) { +func (m *MockBetaHealthChecks) Get(ctx context.Context, key *meta.Key) (*beta.HealthCheck, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - klog.V(5).Infof("MockBetaInstances.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaHealthChecks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -9790,28 +10838,28 @@ func (m *MockBetaInstances) Get(ctx context.Context, key *meta.Key) (*beta.Insta defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - klog.V(5).Infof("MockBetaInstances.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBetaHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToBeta() - klog.V(5).Infof("MockBetaInstances.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockBetaHealthChecks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockBetaInstances %v not found", key), + Message: fmt.Sprintf("MockBetaHealthChecks %v not found", key), } - klog.V(5).Infof("MockBetaInstances.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBetaHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } -// List all of the objects in the mock in the given zone. -func (m *MockBetaInstances) List(ctx context.Context, zone string, fl *filter.F) ([]*beta.Instance, error) { +// List all of the objects in the mock. +func (m *MockBetaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*beta.HealthCheck, error) { if m.ListHook != nil { - if intercept, objs, err := m.ListHook(ctx, zone, fl, m); intercept { - klog.V(5).Infof("MockBetaInstances.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) + if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { + klog.V(5).Infof("MockBetaHealthChecks.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -9821,31 +10869,28 @@ func (m *MockBetaInstances) List(ctx context.Context, zone string, fl *filter.F) if m.ListError != nil { err := *m.ListError - klog.V(5).Infof("MockBetaInstances.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) + klog.V(5).Infof("MockBetaHealthChecks.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } - var objs []*beta.Instance - for key, obj := range m.Objects { - if key.Zone != zone { - continue - } + var objs []*beta.HealthCheck + for _, obj := range m.Objects { if !fl.Match(obj.ToBeta()) { continue } objs = append(objs, obj.ToBeta()) } - klog.V(5).Infof("MockBetaInstances.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) + klog.V(5).Infof("MockBetaHealthChecks.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } // Insert is a mock for inserting/creating a new object. -func (m *MockBetaInstances) Insert(ctx context.Context, key *meta.Key, obj *beta.Instance) error { +func (m *MockBetaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *beta.HealthCheck) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - klog.V(5).Infof("MockBetaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -9857,32 +10902,32 @@ func (m *MockBetaInstances) Insert(ctx context.Context, key *meta.Key, obj *beta defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - klog.V(5).Infof("MockBetaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { err := &googleapi.Error{ Code: http.StatusConflict, - Message: fmt.Sprintf("MockBetaInstances %v exists", key), + Message: fmt.Sprintf("MockBetaHealthChecks %v exists", key), } - klog.V(5).Infof("MockBetaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } obj.Name = key.Name - projectID := m.ProjectRouter.ProjectID(ctx, "beta", "instances") - obj.SelfLink = SelfLink(meta.VersionBeta, projectID, "instances", key) + projectID := m.ProjectRouter.ProjectID(ctx, "beta", "healthChecks") + obj.SelfLink = SelfLink(meta.VersionBeta, projectID, "healthChecks", key) - m.Objects[*key] = &MockInstancesObj{obj} - klog.V(5).Infof("MockBetaInstances.Insert(%v, %v, %+v) = nil", ctx, key, obj) + m.Objects[*key] = &MockHealthChecksObj{obj} + klog.V(5).Infof("MockBetaHealthChecks.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } // Delete is a mock for deleting the object. -func (m *MockBetaInstances) Delete(ctx context.Context, key *meta.Key) error { +func (m *MockBetaHealthChecks) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - klog.V(5).Infof("MockBetaInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -9894,306 +10939,223 @@ func (m *MockBetaInstances) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - klog.V(5).Infof("MockBetaInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockBetaInstances %v not found", key), + Message: fmt.Sprintf("MockBetaHealthChecks %v not found", key), } - klog.V(5).Infof("MockBetaInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - klog.V(5).Infof("MockBetaInstances.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockBetaHealthChecks.Delete(%v, %v) = nil", ctx, key) return nil } // Obj wraps the object for use in the mock. -func (m *MockBetaInstances) Obj(o *beta.Instance) *MockInstancesObj { - return &MockInstancesObj{o} +func (m *MockBetaHealthChecks) Obj(o *beta.HealthCheck) *MockHealthChecksObj { + return &MockHealthChecksObj{o} } -// AttachDisk is a mock for the corresponding method. -func (m *MockBetaInstances) AttachDisk(ctx context.Context, key *meta.Key, arg0 *beta.AttachedDisk) error { - if m.AttachDiskHook != nil { - return m.AttachDiskHook(ctx, key, arg0, m) +// Update is a mock for the corresponding method. +func (m *MockBetaHealthChecks) Update(ctx context.Context, key *meta.Key, arg0 *beta.HealthCheck) error { + if m.UpdateHook != nil { + return m.UpdateHook(ctx, key, arg0, m) } return nil } -// DetachDisk is a mock for the corresponding method. -func (m *MockBetaInstances) DetachDisk(ctx context.Context, key *meta.Key, arg0 string) error { - if m.DetachDiskHook != nil { - return m.DetachDiskHook(ctx, key, arg0, m) - } - return nil +// GCEBetaHealthChecks is a simplifying adapter for the GCE HealthChecks. +type GCEBetaHealthChecks struct { + s *Service } -// UpdateNetworkInterface is a mock for the corresponding method. -func (m *MockBetaInstances) UpdateNetworkInterface(ctx context.Context, key *meta.Key, arg0 string, arg1 *beta.NetworkInterface) error { - if m.UpdateNetworkInterfaceHook != nil { - return m.UpdateNetworkInterfaceHook(ctx, key, arg0, arg1, m) - } - return nil -} - -// GCEBetaInstances is a simplifying adapter for the GCE Instances. -type GCEBetaInstances struct { - s *Service -} - -// Get the Instance named by key. -func (g *GCEBetaInstances) Get(ctx context.Context, key *meta.Key) (*beta.Instance, error) { - klog.V(5).Infof("GCEBetaInstances.Get(%v, %v): called", ctx, key) +// Get the HealthCheck named by key. +func (g *GCEBetaHealthChecks) Get(ctx context.Context, key *meta.Key) (*beta.HealthCheck, error) { + klog.V(5).Infof("GCEBetaHealthChecks.Get(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEBetaInstances.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaHealthChecks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Instances") + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "HealthChecks") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Get", Version: meta.Version("beta"), - Service: "Instances", + Service: "HealthChecks", } - klog.V(5).Infof("GCEBetaInstances.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaHealthChecks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEBetaInstances.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaHealthChecks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } - call := g.s.Beta.Instances.Get(projectID, key.Zone, key.Name) + call := g.s.Beta.HealthChecks.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - klog.V(4).Infof("GCEBetaInstances.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEBetaHealthChecks.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } -// List all Instance objects. -func (g *GCEBetaInstances) List(ctx context.Context, zone string, fl *filter.F) ([]*beta.Instance, error) { - klog.V(5).Infof("GCEBetaInstances.List(%v, %v, %v) called", ctx, zone, fl) - projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Instances") +// List all HealthCheck objects. +func (g *GCEBetaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*beta.HealthCheck, error) { + klog.V(5).Infof("GCEBetaHealthChecks.List(%v, %v) called", ctx, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "HealthChecks") rk := &RateLimitKey{ ProjectID: projectID, Operation: "List", Version: meta.Version("beta"), - Service: "Instances", + Service: "HealthChecks", } if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - klog.V(5).Infof("GCEBetaInstances.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) - call := g.s.Beta.Instances.List(projectID, zone) + klog.V(5).Infof("GCEBetaHealthChecks.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + call := g.s.Beta.HealthChecks.List(projectID) if fl != filter.None { call.Filter(fl.String()) } - var all []*beta.Instance - f := func(l *beta.InstanceList) error { - klog.V(5).Infof("GCEBetaInstances.List(%v, ..., %v): page %+v", ctx, fl, l) + var all []*beta.HealthCheck + f := func(l *beta.HealthCheckList) error { + klog.V(5).Infof("GCEBetaHealthChecks.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - klog.V(4).Infof("GCEBetaInstances.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEBetaHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } if klog.V(4) { - klog.V(4).Infof("GCEBetaInstances.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + klog.V(4).Infof("GCEBetaHealthChecks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - klog.V(5).Infof("GCEBetaInstances.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEBetaHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil } -// Insert Instance with key of value obj. -func (g *GCEBetaInstances) Insert(ctx context.Context, key *meta.Key, obj *beta.Instance) error { - klog.V(5).Infof("GCEBetaInstances.Insert(%v, %v, %+v): called", ctx, key, obj) +// Insert HealthCheck with key of value obj. +func (g *GCEBetaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *beta.HealthCheck) error { + klog.V(5).Infof("GCEBetaHealthChecks.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - klog.V(2).Infof("GCEBetaInstances.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaHealthChecks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Instances") + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "HealthChecks") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Insert", Version: meta.Version("beta"), - Service: "Instances", + Service: "HealthChecks", } - klog.V(5).Infof("GCEBetaInstances.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaHealthChecks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEBetaInstances.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaHealthChecks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name - call := g.s.Beta.Instances.Insert(projectID, key.Zone, obj) + call := g.s.Beta.HealthChecks.Insert(projectID, obj) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEBetaInstances.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaHealthChecks.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEBetaInstances.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEBetaHealthChecks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } -// Delete the Instance referenced by key. -func (g *GCEBetaInstances) Delete(ctx context.Context, key *meta.Key) error { - klog.V(5).Infof("GCEBetaInstances.Delete(%v, %v): called", ctx, key) +// Delete the HealthCheck referenced by key. +func (g *GCEBetaHealthChecks) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCEBetaHealthChecks.Delete(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEBetaInstances.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaHealthChecks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Instances") + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "HealthChecks") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Delete", Version: meta.Version("beta"), - Service: "Instances", + Service: "HealthChecks", } - klog.V(5).Infof("GCEBetaInstances.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaHealthChecks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEBetaInstances.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) - return err - } - call := g.s.Beta.Instances.Delete(projectID, key.Zone, key.Name) - call.Context(ctx) - - op, err := call.Do() - if err != nil { - klog.V(4).Infof("GCEBetaInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBetaHealthChecks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } + call := g.s.Beta.HealthChecks.Delete(projectID, key.Name) - err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEBetaInstances.Delete(%v, %v) = %v", ctx, key, err) - return err -} - -// AttachDisk is a method on GCEBetaInstances. -func (g *GCEBetaInstances) AttachDisk(ctx context.Context, key *meta.Key, arg0 *beta.AttachedDisk) error { - klog.V(5).Infof("GCEBetaInstances.AttachDisk(%v, %v, ...): called", ctx, key) - - if !key.Valid() { - klog.V(2).Infof("GCEBetaInstances.AttachDisk(%v, %v, ...): key is invalid (%#v)", ctx, key, key) - return fmt.Errorf("invalid GCE key (%+v)", key) - } - projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Instances") - rk := &RateLimitKey{ - ProjectID: projectID, - Operation: "AttachDisk", - Version: meta.Version("beta"), - Service: "Instances", - } - klog.V(5).Infof("GCEBetaInstances.AttachDisk(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) - - if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEBetaInstances.AttachDisk(%v, %v, ...): RateLimiter error: %v", ctx, key, err) - return err - } - call := g.s.Beta.Instances.AttachDisk(projectID, key.Zone, key.Name, arg0) call.Context(ctx) - op, err := call.Do() - if err != nil { - klog.V(4).Infof("GCEBetaInstances.AttachDisk(%v, %v, ...) = %+v", ctx, key, err) - return err - } - err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEBetaInstances.AttachDisk(%v, %v, ...) = %+v", ctx, key, err) - return err -} - -// DetachDisk is a method on GCEBetaInstances. -func (g *GCEBetaInstances) DetachDisk(ctx context.Context, key *meta.Key, arg0 string) error { - klog.V(5).Infof("GCEBetaInstances.DetachDisk(%v, %v, ...): called", ctx, key) - - if !key.Valid() { - klog.V(2).Infof("GCEBetaInstances.DetachDisk(%v, %v, ...): key is invalid (%#v)", ctx, key, key) - return fmt.Errorf("invalid GCE key (%+v)", key) - } - projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Instances") - rk := &RateLimitKey{ - ProjectID: projectID, - Operation: "DetachDisk", - Version: meta.Version("beta"), - Service: "Instances", - } - klog.V(5).Infof("GCEBetaInstances.DetachDisk(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) - if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEBetaInstances.DetachDisk(%v, %v, ...): RateLimiter error: %v", ctx, key, err) - return err - } - call := g.s.Beta.Instances.DetachDisk(projectID, key.Zone, key.Name, arg0) - call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEBetaInstances.DetachDisk(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } + err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEBetaInstances.DetachDisk(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } -// UpdateNetworkInterface is a method on GCEBetaInstances. -func (g *GCEBetaInstances) UpdateNetworkInterface(ctx context.Context, key *meta.Key, arg0 string, arg1 *beta.NetworkInterface) error { - klog.V(5).Infof("GCEBetaInstances.UpdateNetworkInterface(%v, %v, ...): called", ctx, key) +// Update is a method on GCEBetaHealthChecks. +func (g *GCEBetaHealthChecks) Update(ctx context.Context, key *meta.Key, arg0 *beta.HealthCheck) error { + klog.V(5).Infof("GCEBetaHealthChecks.Update(%v, %v, ...): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEBetaInstances.UpdateNetworkInterface(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaHealthChecks.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Instances") + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "HealthChecks") rk := &RateLimitKey{ ProjectID: projectID, - Operation: "UpdateNetworkInterface", + Operation: "Update", Version: meta.Version("beta"), - Service: "Instances", + Service: "HealthChecks", } - klog.V(5).Infof("GCEBetaInstances.UpdateNetworkInterface(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaHealthChecks.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEBetaInstances.UpdateNetworkInterface(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaHealthChecks.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.Beta.Instances.UpdateNetworkInterface(projectID, key.Zone, key.Name, arg0, arg1) + call := g.s.Beta.HealthChecks.Update(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEBetaInstances.UpdateNetworkInterface(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEBetaInstances.UpdateNetworkInterface(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) return err } -// AlphaInstances is an interface that allows for mocking of Instances. -type AlphaInstances interface { - Get(ctx context.Context, key *meta.Key) (*alpha.Instance, error) - List(ctx context.Context, zone string, fl *filter.F) ([]*alpha.Instance, error) - Insert(ctx context.Context, key *meta.Key, obj *alpha.Instance) error +// AlphaRegionHealthChecks is an interface that allows for mocking of RegionHealthChecks. +type AlphaRegionHealthChecks interface { + Get(ctx context.Context, key *meta.Key) (*alpha.HealthCheck, error) + List(ctx context.Context, region string, fl *filter.F) ([]*alpha.HealthCheck, error) + Insert(ctx context.Context, key *meta.Key, obj *alpha.HealthCheck) error Delete(ctx context.Context, key *meta.Key) error - AttachDisk(context.Context, *meta.Key, *alpha.AttachedDisk) error - DetachDisk(context.Context, *meta.Key, string) error - UpdateNetworkInterface(context.Context, *meta.Key, string, *alpha.NetworkInterface) error + Update(context.Context, *meta.Key, *alpha.HealthCheck) error } -// NewMockAlphaInstances returns a new mock for Instances. -func NewMockAlphaInstances(pr ProjectRouter, objs map[meta.Key]*MockInstancesObj) *MockAlphaInstances { - mock := &MockAlphaInstances{ +// NewMockAlphaRegionHealthChecks returns a new mock for RegionHealthChecks. +func NewMockAlphaRegionHealthChecks(pr ProjectRouter, objs map[meta.Key]*MockRegionHealthChecksObj) *MockAlphaRegionHealthChecks { + mock := &MockAlphaRegionHealthChecks{ ProjectRouter: pr, Objects: objs, @@ -10204,14 +11166,14 @@ func NewMockAlphaInstances(pr ProjectRouter, objs map[meta.Key]*MockInstancesObj return mock } -// MockAlphaInstances is the mock for Instances. -type MockAlphaInstances struct { +// MockAlphaRegionHealthChecks is the mock for RegionHealthChecks. +type MockAlphaRegionHealthChecks struct { Lock sync.Mutex ProjectRouter ProjectRouter // Objects maintained by the mock. - Objects map[meta.Key]*MockInstancesObj + Objects map[meta.Key]*MockRegionHealthChecksObj // If an entry exists for the given key and operation, then the error // will be returned instead of the operation. @@ -10224,13 +11186,11 @@ type MockAlphaInstances struct { // order to add your own logic. Return (true, _, _) to prevent the normal // execution flow of the mock. Return (false, nil, nil) to continue with // normal mock behavior/ after the hook function executes. - GetHook func(ctx context.Context, key *meta.Key, m *MockAlphaInstances) (bool, *alpha.Instance, error) - ListHook func(ctx context.Context, zone string, fl *filter.F, m *MockAlphaInstances) (bool, []*alpha.Instance, error) - InsertHook func(ctx context.Context, key *meta.Key, obj *alpha.Instance, m *MockAlphaInstances) (bool, error) - DeleteHook func(ctx context.Context, key *meta.Key, m *MockAlphaInstances) (bool, error) - AttachDiskHook func(context.Context, *meta.Key, *alpha.AttachedDisk, *MockAlphaInstances) error - DetachDiskHook func(context.Context, *meta.Key, string, *MockAlphaInstances) error - UpdateNetworkInterfaceHook func(context.Context, *meta.Key, string, *alpha.NetworkInterface, *MockAlphaInstances) error + GetHook func(ctx context.Context, key *meta.Key, m *MockAlphaRegionHealthChecks) (bool, *alpha.HealthCheck, error) + ListHook func(ctx context.Context, region string, fl *filter.F, m *MockAlphaRegionHealthChecks) (bool, []*alpha.HealthCheck, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *alpha.HealthCheck, m *MockAlphaRegionHealthChecks) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockAlphaRegionHealthChecks) (bool, error) + UpdateHook func(context.Context, *meta.Key, *alpha.HealthCheck, *MockAlphaRegionHealthChecks) error // X is extra state that can be used as part of the mock. Generated code // will not use this field. @@ -10238,10 +11198,10 @@ type MockAlphaInstances struct { } // Get returns the object from the mock. -func (m *MockAlphaInstances) Get(ctx context.Context, key *meta.Key) (*alpha.Instance, error) { +func (m *MockAlphaRegionHealthChecks) Get(ctx context.Context, key *meta.Key) (*alpha.HealthCheck, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - klog.V(5).Infof("MockAlphaInstances.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaRegionHealthChecks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -10253,28 +11213,28 @@ func (m *MockAlphaInstances) Get(ctx context.Context, key *meta.Key) (*alpha.Ins defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - klog.V(5).Infof("MockAlphaInstances.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaRegionHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToAlpha() - klog.V(5).Infof("MockAlphaInstances.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockAlphaRegionHealthChecks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockAlphaInstances %v not found", key), + Message: fmt.Sprintf("MockAlphaRegionHealthChecks %v not found", key), } - klog.V(5).Infof("MockAlphaInstances.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaRegionHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } -// List all of the objects in the mock in the given zone. -func (m *MockAlphaInstances) List(ctx context.Context, zone string, fl *filter.F) ([]*alpha.Instance, error) { +// List all of the objects in the mock in the given region. +func (m *MockAlphaRegionHealthChecks) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.HealthCheck, error) { if m.ListHook != nil { - if intercept, objs, err := m.ListHook(ctx, zone, fl, m); intercept { - klog.V(5).Infof("MockAlphaInstances.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) + if intercept, objs, err := m.ListHook(ctx, region, fl, m); intercept { + klog.V(5).Infof("MockAlphaRegionHealthChecks.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) return objs, err } } @@ -10284,14 +11244,14 @@ func (m *MockAlphaInstances) List(ctx context.Context, zone string, fl *filter.F if m.ListError != nil { err := *m.ListError - klog.V(5).Infof("MockAlphaInstances.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) + klog.V(5).Infof("MockAlphaRegionHealthChecks.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) return nil, *m.ListError } - var objs []*alpha.Instance + var objs []*alpha.HealthCheck for key, obj := range m.Objects { - if key.Zone != zone { + if key.Region != region { continue } if !fl.Match(obj.ToAlpha()) { @@ -10300,15 +11260,15 @@ func (m *MockAlphaInstances) List(ctx context.Context, zone string, fl *filter.F objs = append(objs, obj.ToAlpha()) } - klog.V(5).Infof("MockAlphaInstances.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) + klog.V(5).Infof("MockAlphaRegionHealthChecks.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) return objs, nil } // Insert is a mock for inserting/creating a new object. -func (m *MockAlphaInstances) Insert(ctx context.Context, key *meta.Key, obj *alpha.Instance) error { +func (m *MockAlphaRegionHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *alpha.HealthCheck) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - klog.V(5).Infof("MockAlphaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaRegionHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -10320,32 +11280,32 @@ func (m *MockAlphaInstances) Insert(ctx context.Context, key *meta.Key, obj *alp defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - klog.V(5).Infof("MockAlphaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaRegionHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { err := &googleapi.Error{ Code: http.StatusConflict, - Message: fmt.Sprintf("MockAlphaInstances %v exists", key), + Message: fmt.Sprintf("MockAlphaRegionHealthChecks %v exists", key), } - klog.V(5).Infof("MockAlphaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaRegionHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } obj.Name = key.Name - projectID := m.ProjectRouter.ProjectID(ctx, "alpha", "instances") - obj.SelfLink = SelfLink(meta.VersionAlpha, projectID, "instances", key) + projectID := m.ProjectRouter.ProjectID(ctx, "alpha", "healthChecks") + obj.SelfLink = SelfLink(meta.VersionAlpha, projectID, "healthChecks", key) - m.Objects[*key] = &MockInstancesObj{obj} - klog.V(5).Infof("MockAlphaInstances.Insert(%v, %v, %+v) = nil", ctx, key, obj) + m.Objects[*key] = &MockRegionHealthChecksObj{obj} + klog.V(5).Infof("MockAlphaRegionHealthChecks.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } // Delete is a mock for deleting the object. -func (m *MockAlphaInstances) Delete(ctx context.Context, key *meta.Key) error { +func (m *MockAlphaRegionHealthChecks) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - klog.V(5).Infof("MockAlphaInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaRegionHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -10357,307 +11317,222 @@ func (m *MockAlphaInstances) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - klog.V(5).Infof("MockAlphaInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaRegionHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockAlphaInstances %v not found", key), + Message: fmt.Sprintf("MockAlphaRegionHealthChecks %v not found", key), } - klog.V(5).Infof("MockAlphaInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaRegionHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - klog.V(5).Infof("MockAlphaInstances.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockAlphaRegionHealthChecks.Delete(%v, %v) = nil", ctx, key) return nil } // Obj wraps the object for use in the mock. -func (m *MockAlphaInstances) Obj(o *alpha.Instance) *MockInstancesObj { - return &MockInstancesObj{o} +func (m *MockAlphaRegionHealthChecks) Obj(o *alpha.HealthCheck) *MockRegionHealthChecksObj { + return &MockRegionHealthChecksObj{o} } -// AttachDisk is a mock for the corresponding method. -func (m *MockAlphaInstances) AttachDisk(ctx context.Context, key *meta.Key, arg0 *alpha.AttachedDisk) error { - if m.AttachDiskHook != nil { - return m.AttachDiskHook(ctx, key, arg0, m) - } - return nil -} - -// DetachDisk is a mock for the corresponding method. -func (m *MockAlphaInstances) DetachDisk(ctx context.Context, key *meta.Key, arg0 string) error { - if m.DetachDiskHook != nil { - return m.DetachDiskHook(ctx, key, arg0, m) - } - return nil -} - -// UpdateNetworkInterface is a mock for the corresponding method. -func (m *MockAlphaInstances) UpdateNetworkInterface(ctx context.Context, key *meta.Key, arg0 string, arg1 *alpha.NetworkInterface) error { - if m.UpdateNetworkInterfaceHook != nil { - return m.UpdateNetworkInterfaceHook(ctx, key, arg0, arg1, m) +// Update is a mock for the corresponding method. +func (m *MockAlphaRegionHealthChecks) Update(ctx context.Context, key *meta.Key, arg0 *alpha.HealthCheck) error { + if m.UpdateHook != nil { + return m.UpdateHook(ctx, key, arg0, m) } return nil } -// GCEAlphaInstances is a simplifying adapter for the GCE Instances. -type GCEAlphaInstances struct { +// GCEAlphaRegionHealthChecks is a simplifying adapter for the GCE RegionHealthChecks. +type GCEAlphaRegionHealthChecks struct { s *Service } -// Get the Instance named by key. -func (g *GCEAlphaInstances) Get(ctx context.Context, key *meta.Key) (*alpha.Instance, error) { - klog.V(5).Infof("GCEAlphaInstances.Get(%v, %v): called", ctx, key) +// Get the HealthCheck named by key. +func (g *GCEAlphaRegionHealthChecks) Get(ctx context.Context, key *meta.Key) (*alpha.HealthCheck, error) { + klog.V(5).Infof("GCEAlphaRegionHealthChecks.Get(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEAlphaInstances.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaRegionHealthChecks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Instances") + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionHealthChecks") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Get", Version: meta.Version("alpha"), - Service: "Instances", + Service: "RegionHealthChecks", } - klog.V(5).Infof("GCEAlphaInstances.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaRegionHealthChecks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEAlphaInstances.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionHealthChecks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } - call := g.s.Alpha.Instances.Get(projectID, key.Zone, key.Name) + call := g.s.Alpha.RegionHealthChecks.Get(projectID, key.Region, key.Name) call.Context(ctx) v, err := call.Do() - klog.V(4).Infof("GCEAlphaInstances.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEAlphaRegionHealthChecks.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } -// List all Instance objects. -func (g *GCEAlphaInstances) List(ctx context.Context, zone string, fl *filter.F) ([]*alpha.Instance, error) { - klog.V(5).Infof("GCEAlphaInstances.List(%v, %v, %v) called", ctx, zone, fl) - projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Instances") +// List all HealthCheck objects. +func (g *GCEAlphaRegionHealthChecks) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.HealthCheck, error) { + klog.V(5).Infof("GCEAlphaRegionHealthChecks.List(%v, %v, %v) called", ctx, region, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionHealthChecks") rk := &RateLimitKey{ ProjectID: projectID, Operation: "List", Version: meta.Version("alpha"), - Service: "Instances", + Service: "RegionHealthChecks", } if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - klog.V(5).Infof("GCEAlphaInstances.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) - call := g.s.Alpha.Instances.List(projectID, zone) + klog.V(5).Infof("GCEAlphaRegionHealthChecks.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) + call := g.s.Alpha.RegionHealthChecks.List(projectID, region) if fl != filter.None { call.Filter(fl.String()) } - var all []*alpha.Instance - f := func(l *alpha.InstanceList) error { - klog.V(5).Infof("GCEAlphaInstances.List(%v, ..., %v): page %+v", ctx, fl, l) + var all []*alpha.HealthCheck + f := func(l *alpha.HealthCheckList) error { + klog.V(5).Infof("GCEAlphaRegionHealthChecks.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - klog.V(4).Infof("GCEAlphaInstances.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEAlphaRegionHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } if klog.V(4) { - klog.V(4).Infof("GCEAlphaInstances.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + klog.V(4).Infof("GCEAlphaRegionHealthChecks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - klog.V(5).Infof("GCEAlphaInstances.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEAlphaRegionHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil } -// Insert Instance with key of value obj. -func (g *GCEAlphaInstances) Insert(ctx context.Context, key *meta.Key, obj *alpha.Instance) error { - klog.V(5).Infof("GCEAlphaInstances.Insert(%v, %v, %+v): called", ctx, key, obj) +// Insert HealthCheck with key of value obj. +func (g *GCEAlphaRegionHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *alpha.HealthCheck) error { + klog.V(5).Infof("GCEAlphaRegionHealthChecks.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - klog.V(2).Infof("GCEAlphaInstances.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaRegionHealthChecks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Instances") + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionHealthChecks") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Insert", Version: meta.Version("alpha"), - Service: "Instances", + Service: "RegionHealthChecks", } - klog.V(5).Infof("GCEAlphaInstances.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaRegionHealthChecks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEAlphaInstances.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionHealthChecks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name - call := g.s.Alpha.Instances.Insert(projectID, key.Zone, obj) + call := g.s.Alpha.RegionHealthChecks.Insert(projectID, key.Region, obj) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEAlphaInstances.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionHealthChecks.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEAlphaInstances.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEAlphaRegionHealthChecks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } -// Delete the Instance referenced by key. -func (g *GCEAlphaInstances) Delete(ctx context.Context, key *meta.Key) error { - klog.V(5).Infof("GCEAlphaInstances.Delete(%v, %v): called", ctx, key) +// Delete the HealthCheck referenced by key. +func (g *GCEAlphaRegionHealthChecks) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCEAlphaRegionHealthChecks.Delete(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEAlphaInstances.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaRegionHealthChecks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Instances") + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionHealthChecks") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Delete", Version: meta.Version("alpha"), - Service: "Instances", + Service: "RegionHealthChecks", } - klog.V(5).Infof("GCEAlphaInstances.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaRegionHealthChecks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEAlphaInstances.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionHealthChecks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.Alpha.Instances.Delete(projectID, key.Zone, key.Name) + call := g.s.Alpha.RegionHealthChecks.Delete(projectID, key.Region, key.Name) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEAlphaInstances.Delete(%v, %v) = %v", ctx, key, err) - return err - } - - err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEAlphaInstances.Delete(%v, %v) = %v", ctx, key, err) - return err -} - -// AttachDisk is a method on GCEAlphaInstances. -func (g *GCEAlphaInstances) AttachDisk(ctx context.Context, key *meta.Key, arg0 *alpha.AttachedDisk) error { - klog.V(5).Infof("GCEAlphaInstances.AttachDisk(%v, %v, ...): called", ctx, key) - - if !key.Valid() { - klog.V(2).Infof("GCEAlphaInstances.AttachDisk(%v, %v, ...): key is invalid (%#v)", ctx, key, key) - return fmt.Errorf("invalid GCE key (%+v)", key) - } - projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Instances") - rk := &RateLimitKey{ - ProjectID: projectID, - Operation: "AttachDisk", - Version: meta.Version("alpha"), - Service: "Instances", - } - klog.V(5).Infof("GCEAlphaInstances.AttachDisk(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) - - if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEAlphaInstances.AttachDisk(%v, %v, ...): RateLimiter error: %v", ctx, key, err) - return err - } - call := g.s.Alpha.Instances.AttachDisk(projectID, key.Zone, key.Name, arg0) - call.Context(ctx) - op, err := call.Do() - if err != nil { - klog.V(4).Infof("GCEAlphaInstances.AttachDisk(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } - err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEAlphaInstances.AttachDisk(%v, %v, ...) = %+v", ctx, key, err) - return err -} - -// DetachDisk is a method on GCEAlphaInstances. -func (g *GCEAlphaInstances) DetachDisk(ctx context.Context, key *meta.Key, arg0 string) error { - klog.V(5).Infof("GCEAlphaInstances.DetachDisk(%v, %v, ...): called", ctx, key) - - if !key.Valid() { - klog.V(2).Infof("GCEAlphaInstances.DetachDisk(%v, %v, ...): key is invalid (%#v)", ctx, key, key) - return fmt.Errorf("invalid GCE key (%+v)", key) - } - projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Instances") - rk := &RateLimitKey{ - ProjectID: projectID, - Operation: "DetachDisk", - Version: meta.Version("alpha"), - Service: "Instances", - } - klog.V(5).Infof("GCEAlphaInstances.DetachDisk(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) - if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEAlphaInstances.DetachDisk(%v, %v, ...): RateLimiter error: %v", ctx, key, err) - return err - } - call := g.s.Alpha.Instances.DetachDisk(projectID, key.Zone, key.Name, arg0) - call.Context(ctx) - op, err := call.Do() - if err != nil { - klog.V(4).Infof("GCEAlphaInstances.DetachDisk(%v, %v, ...) = %+v", ctx, key, err) - return err - } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEAlphaInstances.DetachDisk(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } -// UpdateNetworkInterface is a method on GCEAlphaInstances. -func (g *GCEAlphaInstances) UpdateNetworkInterface(ctx context.Context, key *meta.Key, arg0 string, arg1 *alpha.NetworkInterface) error { - klog.V(5).Infof("GCEAlphaInstances.UpdateNetworkInterface(%v, %v, ...): called", ctx, key) +// Update is a method on GCEAlphaRegionHealthChecks. +func (g *GCEAlphaRegionHealthChecks) Update(ctx context.Context, key *meta.Key, arg0 *alpha.HealthCheck) error { + klog.V(5).Infof("GCEAlphaRegionHealthChecks.Update(%v, %v, ...): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEAlphaInstances.UpdateNetworkInterface(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaRegionHealthChecks.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Instances") + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionHealthChecks") rk := &RateLimitKey{ ProjectID: projectID, - Operation: "UpdateNetworkInterface", + Operation: "Update", Version: meta.Version("alpha"), - Service: "Instances", + Service: "RegionHealthChecks", } - klog.V(5).Infof("GCEAlphaInstances.UpdateNetworkInterface(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaRegionHealthChecks.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEAlphaInstances.UpdateNetworkInterface(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionHealthChecks.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.Alpha.Instances.UpdateNetworkInterface(projectID, key.Zone, key.Name, arg0, arg1) + call := g.s.Alpha.RegionHealthChecks.Update(projectID, key.Region, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEAlphaInstances.UpdateNetworkInterface(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEAlphaInstances.UpdateNetworkInterface(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) return err } -// AlphaNetworkEndpointGroups is an interface that allows for mocking of NetworkEndpointGroups. -type AlphaNetworkEndpointGroups interface { - Get(ctx context.Context, key *meta.Key) (*alpha.NetworkEndpointGroup, error) - List(ctx context.Context, zone string, fl *filter.F) ([]*alpha.NetworkEndpointGroup, error) - Insert(ctx context.Context, key *meta.Key, obj *alpha.NetworkEndpointGroup) error +// BetaRegionHealthChecks is an interface that allows for mocking of RegionHealthChecks. +type BetaRegionHealthChecks interface { + Get(ctx context.Context, key *meta.Key) (*beta.HealthCheck, error) + List(ctx context.Context, region string, fl *filter.F) ([]*beta.HealthCheck, error) + Insert(ctx context.Context, key *meta.Key, obj *beta.HealthCheck) error Delete(ctx context.Context, key *meta.Key) error - AggregatedList(ctx context.Context, fl *filter.F) (map[string][]*alpha.NetworkEndpointGroup, error) - AttachNetworkEndpoints(context.Context, *meta.Key, *alpha.NetworkEndpointGroupsAttachEndpointsRequest) error - DetachNetworkEndpoints(context.Context, *meta.Key, *alpha.NetworkEndpointGroupsDetachEndpointsRequest) error - ListNetworkEndpoints(context.Context, *meta.Key, *alpha.NetworkEndpointGroupsListEndpointsRequest, *filter.F) ([]*alpha.NetworkEndpointWithHealthStatus, error) + Update(context.Context, *meta.Key, *beta.HealthCheck) error } -// NewMockAlphaNetworkEndpointGroups returns a new mock for NetworkEndpointGroups. -func NewMockAlphaNetworkEndpointGroups(pr ProjectRouter, objs map[meta.Key]*MockNetworkEndpointGroupsObj) *MockAlphaNetworkEndpointGroups { - mock := &MockAlphaNetworkEndpointGroups{ +// NewMockBetaRegionHealthChecks returns a new mock for RegionHealthChecks. +func NewMockBetaRegionHealthChecks(pr ProjectRouter, objs map[meta.Key]*MockRegionHealthChecksObj) *MockBetaRegionHealthChecks { + mock := &MockBetaRegionHealthChecks{ ProjectRouter: pr, Objects: objs, @@ -10668,35 +11543,31 @@ func NewMockAlphaNetworkEndpointGroups(pr ProjectRouter, objs map[meta.Key]*Mock return mock } -// MockAlphaNetworkEndpointGroups is the mock for NetworkEndpointGroups. -type MockAlphaNetworkEndpointGroups struct { +// MockBetaRegionHealthChecks is the mock for RegionHealthChecks. +type MockBetaRegionHealthChecks struct { Lock sync.Mutex ProjectRouter ProjectRouter // Objects maintained by the mock. - Objects map[meta.Key]*MockNetworkEndpointGroupsObj + Objects map[meta.Key]*MockRegionHealthChecksObj // If an entry exists for the given key and operation, then the error // will be returned instead of the operation. - GetError map[meta.Key]error - ListError *error - InsertError map[meta.Key]error - DeleteError map[meta.Key]error - AggregatedListError *error + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error // xxxHook allow you to intercept the standard processing of the mock in // order to add your own logic. Return (true, _, _) to prevent the normal // execution flow of the mock. Return (false, nil, nil) to continue with // normal mock behavior/ after the hook function executes. - GetHook func(ctx context.Context, key *meta.Key, m *MockAlphaNetworkEndpointGroups) (bool, *alpha.NetworkEndpointGroup, error) - ListHook func(ctx context.Context, zone string, fl *filter.F, m *MockAlphaNetworkEndpointGroups) (bool, []*alpha.NetworkEndpointGroup, error) - InsertHook func(ctx context.Context, key *meta.Key, obj *alpha.NetworkEndpointGroup, m *MockAlphaNetworkEndpointGroups) (bool, error) - DeleteHook func(ctx context.Context, key *meta.Key, m *MockAlphaNetworkEndpointGroups) (bool, error) - AggregatedListHook func(ctx context.Context, fl *filter.F, m *MockAlphaNetworkEndpointGroups) (bool, map[string][]*alpha.NetworkEndpointGroup, error) - AttachNetworkEndpointsHook func(context.Context, *meta.Key, *alpha.NetworkEndpointGroupsAttachEndpointsRequest, *MockAlphaNetworkEndpointGroups) error - DetachNetworkEndpointsHook func(context.Context, *meta.Key, *alpha.NetworkEndpointGroupsDetachEndpointsRequest, *MockAlphaNetworkEndpointGroups) error - ListNetworkEndpointsHook func(context.Context, *meta.Key, *alpha.NetworkEndpointGroupsListEndpointsRequest, *filter.F, *MockAlphaNetworkEndpointGroups) ([]*alpha.NetworkEndpointWithHealthStatus, error) + GetHook func(ctx context.Context, key *meta.Key, m *MockBetaRegionHealthChecks) (bool, *beta.HealthCheck, error) + ListHook func(ctx context.Context, region string, fl *filter.F, m *MockBetaRegionHealthChecks) (bool, []*beta.HealthCheck, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *beta.HealthCheck, m *MockBetaRegionHealthChecks) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockBetaRegionHealthChecks) (bool, error) + UpdateHook func(context.Context, *meta.Key, *beta.HealthCheck, *MockBetaRegionHealthChecks) error // X is extra state that can be used as part of the mock. Generated code // will not use this field. @@ -10704,10 +11575,10 @@ type MockAlphaNetworkEndpointGroups struct { } // Get returns the object from the mock. -func (m *MockAlphaNetworkEndpointGroups) Get(ctx context.Context, key *meta.Key) (*alpha.NetworkEndpointGroup, error) { +func (m *MockBetaRegionHealthChecks) Get(ctx context.Context, key *meta.Key) (*beta.HealthCheck, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaRegionHealthChecks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -10719,28 +11590,28 @@ func (m *MockAlphaNetworkEndpointGroups) Get(ctx context.Context, key *meta.Key) defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBetaRegionHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { - typedObj := obj.ToAlpha() - klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + typedObj := obj.ToBeta() + klog.V(5).Infof("MockBetaRegionHealthChecks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockAlphaNetworkEndpointGroups %v not found", key), + Message: fmt.Sprintf("MockBetaRegionHealthChecks %v not found", key), } - klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBetaRegionHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } -// List all of the objects in the mock in the given zone. -func (m *MockAlphaNetworkEndpointGroups) List(ctx context.Context, zone string, fl *filter.F) ([]*alpha.NetworkEndpointGroup, error) { +// List all of the objects in the mock in the given region. +func (m *MockBetaRegionHealthChecks) List(ctx context.Context, region string, fl *filter.F) ([]*beta.HealthCheck, error) { if m.ListHook != nil { - if intercept, objs, err := m.ListHook(ctx, zone, fl, m); intercept { - klog.V(5).Infof("MockAlphaNetworkEndpointGroups.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) + if intercept, objs, err := m.ListHook(ctx, region, fl, m); intercept { + klog.V(5).Infof("MockBetaRegionHealthChecks.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) return objs, err } } @@ -10750,31 +11621,31 @@ func (m *MockAlphaNetworkEndpointGroups) List(ctx context.Context, zone string, if m.ListError != nil { err := *m.ListError - klog.V(5).Infof("MockAlphaNetworkEndpointGroups.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) + klog.V(5).Infof("MockBetaRegionHealthChecks.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) return nil, *m.ListError } - var objs []*alpha.NetworkEndpointGroup + var objs []*beta.HealthCheck for key, obj := range m.Objects { - if key.Zone != zone { + if key.Region != region { continue } - if !fl.Match(obj.ToAlpha()) { + if !fl.Match(obj.ToBeta()) { continue } - objs = append(objs, obj.ToAlpha()) + objs = append(objs, obj.ToBeta()) } - klog.V(5).Infof("MockAlphaNetworkEndpointGroups.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) + klog.V(5).Infof("MockBetaRegionHealthChecks.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) return objs, nil } // Insert is a mock for inserting/creating a new object. -func (m *MockAlphaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.Key, obj *alpha.NetworkEndpointGroup) error { +func (m *MockBetaRegionHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *beta.HealthCheck) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaRegionHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -10786,32 +11657,32 @@ func (m *MockAlphaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.K defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaRegionHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { err := &googleapi.Error{ Code: http.StatusConflict, - Message: fmt.Sprintf("MockAlphaNetworkEndpointGroups %v exists", key), + Message: fmt.Sprintf("MockBetaRegionHealthChecks %v exists", key), } - klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaRegionHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } obj.Name = key.Name - projectID := m.ProjectRouter.ProjectID(ctx, "alpha", "networkEndpointGroups") - obj.SelfLink = SelfLink(meta.VersionAlpha, projectID, "networkEndpointGroups", key) + projectID := m.ProjectRouter.ProjectID(ctx, "beta", "healthChecks") + obj.SelfLink = SelfLink(meta.VersionBeta, projectID, "healthChecks", key) - m.Objects[*key] = &MockNetworkEndpointGroupsObj{obj} - klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Insert(%v, %v, %+v) = nil", ctx, key, obj) + m.Objects[*key] = &MockRegionHealthChecksObj{obj} + klog.V(5).Infof("MockBetaRegionHealthChecks.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } // Delete is a mock for deleting the object. -func (m *MockAlphaNetworkEndpointGroups) Delete(ctx context.Context, key *meta.Key) error { +func (m *MockBetaRegionHealthChecks) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaRegionHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -10823,440 +11694,257 @@ func (m *MockAlphaNetworkEndpointGroups) Delete(ctx context.Context, key *meta.K defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaRegionHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockAlphaNetworkEndpointGroups %v not found", key), + Message: fmt.Sprintf("MockBetaRegionHealthChecks %v not found", key), } - klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaRegionHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockBetaRegionHealthChecks.Delete(%v, %v) = nil", ctx, key) return nil } -// AggregatedList is a mock for AggregatedList. -func (m *MockAlphaNetworkEndpointGroups) AggregatedList(ctx context.Context, fl *filter.F) (map[string][]*alpha.NetworkEndpointGroup, error) { - if m.AggregatedListHook != nil { - if intercept, objs, err := m.AggregatedListHook(ctx, fl, m); intercept { - klog.V(5).Infof("MockAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) - return objs, err - } +// Obj wraps the object for use in the mock. +func (m *MockBetaRegionHealthChecks) Obj(o *beta.HealthCheck) *MockRegionHealthChecksObj { + return &MockRegionHealthChecksObj{o} +} + +// Update is a mock for the corresponding method. +func (m *MockBetaRegionHealthChecks) Update(ctx context.Context, key *meta.Key, arg0 *beta.HealthCheck) error { + if m.UpdateHook != nil { + return m.UpdateHook(ctx, key, arg0, m) } + return nil +} - m.Lock.Lock() - defer m.Lock.Unlock() +// GCEBetaRegionHealthChecks is a simplifying adapter for the GCE RegionHealthChecks. +type GCEBetaRegionHealthChecks struct { + s *Service +} - if m.AggregatedListError != nil { - err := *m.AggregatedListError - klog.V(5).Infof("MockAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = nil, %v", ctx, fl, err) - return nil, err - } - - objs := map[string][]*alpha.NetworkEndpointGroup{} - for _, obj := range m.Objects { - res, err := ParseResourceURL(obj.ToAlpha().SelfLink) - location := res.Key.Zone - if err != nil { - klog.V(5).Infof("MockAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = nil, %v", ctx, fl, err) - return nil, err - } - if !fl.Match(obj.ToAlpha()) { - continue - } - objs[location] = append(objs[location], obj.ToAlpha()) - } - klog.V(5).Infof("MockAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = [%v items], nil", ctx, fl, len(objs)) - return objs, nil -} - -// Obj wraps the object for use in the mock. -func (m *MockAlphaNetworkEndpointGroups) Obj(o *alpha.NetworkEndpointGroup) *MockNetworkEndpointGroupsObj { - return &MockNetworkEndpointGroupsObj{o} -} - -// AttachNetworkEndpoints is a mock for the corresponding method. -func (m *MockAlphaNetworkEndpointGroups) AttachNetworkEndpoints(ctx context.Context, key *meta.Key, arg0 *alpha.NetworkEndpointGroupsAttachEndpointsRequest) error { - if m.AttachNetworkEndpointsHook != nil { - return m.AttachNetworkEndpointsHook(ctx, key, arg0, m) - } - return nil -} - -// DetachNetworkEndpoints is a mock for the corresponding method. -func (m *MockAlphaNetworkEndpointGroups) DetachNetworkEndpoints(ctx context.Context, key *meta.Key, arg0 *alpha.NetworkEndpointGroupsDetachEndpointsRequest) error { - if m.DetachNetworkEndpointsHook != nil { - return m.DetachNetworkEndpointsHook(ctx, key, arg0, m) - } - return nil -} - -// ListNetworkEndpoints is a mock for the corresponding method. -func (m *MockAlphaNetworkEndpointGroups) ListNetworkEndpoints(ctx context.Context, key *meta.Key, arg0 *alpha.NetworkEndpointGroupsListEndpointsRequest, fl *filter.F) ([]*alpha.NetworkEndpointWithHealthStatus, error) { - if m.ListNetworkEndpointsHook != nil { - return m.ListNetworkEndpointsHook(ctx, key, arg0, fl, m) - } - return nil, nil -} - -// GCEAlphaNetworkEndpointGroups is a simplifying adapter for the GCE NetworkEndpointGroups. -type GCEAlphaNetworkEndpointGroups struct { - s *Service -} - -// Get the NetworkEndpointGroup named by key. -func (g *GCEAlphaNetworkEndpointGroups) Get(ctx context.Context, key *meta.Key) (*alpha.NetworkEndpointGroup, error) { - klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.Get(%v, %v): called", ctx, key) +// Get the HealthCheck named by key. +func (g *GCEBetaRegionHealthChecks) Get(ctx context.Context, key *meta.Key) (*beta.HealthCheck, error) { + klog.V(5).Infof("GCEBetaRegionHealthChecks.Get(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEAlphaNetworkEndpointGroups.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaRegionHealthChecks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "NetworkEndpointGroups") + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "RegionHealthChecks") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Get", - Version: meta.Version("alpha"), - Service: "NetworkEndpointGroups", + Version: meta.Version("beta"), + Service: "RegionHealthChecks", } - klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaRegionHealthChecks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaRegionHealthChecks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } - call := g.s.Alpha.NetworkEndpointGroups.Get(projectID, key.Zone, key.Name) + call := g.s.Beta.RegionHealthChecks.Get(projectID, key.Region, key.Name) call.Context(ctx) v, err := call.Do() - klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEBetaRegionHealthChecks.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } -// List all NetworkEndpointGroup objects. -func (g *GCEAlphaNetworkEndpointGroups) List(ctx context.Context, zone string, fl *filter.F) ([]*alpha.NetworkEndpointGroup, error) { - klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.List(%v, %v, %v) called", ctx, zone, fl) - projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "NetworkEndpointGroups") +// List all HealthCheck objects. +func (g *GCEBetaRegionHealthChecks) List(ctx context.Context, region string, fl *filter.F) ([]*beta.HealthCheck, error) { + klog.V(5).Infof("GCEBetaRegionHealthChecks.List(%v, %v, %v) called", ctx, region, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "RegionHealthChecks") rk := &RateLimitKey{ ProjectID: projectID, Operation: "List", - Version: meta.Version("alpha"), - Service: "NetworkEndpointGroups", + Version: meta.Version("beta"), + Service: "RegionHealthChecks", } if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) - call := g.s.Alpha.NetworkEndpointGroups.List(projectID, zone) + klog.V(5).Infof("GCEBetaRegionHealthChecks.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) + call := g.s.Beta.RegionHealthChecks.List(projectID, region) if fl != filter.None { call.Filter(fl.String()) } - var all []*alpha.NetworkEndpointGroup - f := func(l *alpha.NetworkEndpointGroupList) error { - klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.List(%v, ..., %v): page %+v", ctx, fl, l) + var all []*beta.HealthCheck + f := func(l *beta.HealthCheckList) error { + klog.V(5).Infof("GCEBetaRegionHealthChecks.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEBetaRegionHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } if klog.V(4) { - klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + klog.V(4).Infof("GCEBetaRegionHealthChecks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEBetaRegionHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil } -// Insert NetworkEndpointGroup with key of value obj. -func (g *GCEAlphaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.Key, obj *alpha.NetworkEndpointGroup) error { - klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.Insert(%v, %v, %+v): called", ctx, key, obj) +// Insert HealthCheck with key of value obj. +func (g *GCEBetaRegionHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *beta.HealthCheck) error { + klog.V(5).Infof("GCEBetaRegionHealthChecks.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - klog.V(2).Infof("GCEAlphaNetworkEndpointGroups.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaRegionHealthChecks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "NetworkEndpointGroups") + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "RegionHealthChecks") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Insert", - Version: meta.Version("alpha"), - Service: "NetworkEndpointGroups", + Version: meta.Version("beta"), + Service: "RegionHealthChecks", } - klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaRegionHealthChecks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaRegionHealthChecks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name - call := g.s.Alpha.NetworkEndpointGroups.Insert(projectID, key.Zone, obj) + call := g.s.Beta.RegionHealthChecks.Insert(projectID, key.Region, obj) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaRegionHealthChecks.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEBetaRegionHealthChecks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } -// Delete the NetworkEndpointGroup referenced by key. -func (g *GCEAlphaNetworkEndpointGroups) Delete(ctx context.Context, key *meta.Key) error { - klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.Delete(%v, %v): called", ctx, key) +// Delete the HealthCheck referenced by key. +func (g *GCEBetaRegionHealthChecks) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCEBetaRegionHealthChecks.Delete(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEAlphaNetworkEndpointGroups.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaRegionHealthChecks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "NetworkEndpointGroups") + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "RegionHealthChecks") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Delete", - Version: meta.Version("alpha"), - Service: "NetworkEndpointGroups", + Version: meta.Version("beta"), + Service: "RegionHealthChecks", } - klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaRegionHealthChecks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaRegionHealthChecks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.Alpha.NetworkEndpointGroups.Delete(projectID, key.Zone, key.Name) + call := g.s.Beta.RegionHealthChecks.Delete(projectID, key.Region, key.Name) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBetaRegionHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBetaRegionHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } -// AggregatedList lists all resources of the given type across all locations. -func (g *GCEAlphaNetworkEndpointGroups) AggregatedList(ctx context.Context, fl *filter.F) (map[string][]*alpha.NetworkEndpointGroup, error) { - klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v) called", ctx, fl) - - projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "NetworkEndpointGroups") - rk := &RateLimitKey{ - ProjectID: projectID, - Operation: "AggregatedList", - Version: meta.Version("alpha"), - Service: "NetworkEndpointGroups", - } - - klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) - if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v): RateLimiter error: %v", ctx, fl, err) - return nil, err - } - - call := g.s.Alpha.NetworkEndpointGroups.AggregatedList(projectID) - call.Context(ctx) - if fl != filter.None { - call.Filter(fl.String()) - } - - all := map[string][]*alpha.NetworkEndpointGroup{} - f := func(l *alpha.NetworkEndpointGroupAggregatedList) error { - for k, v := range l.Items { - klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v): page[%v]%+v", ctx, fl, k, v) - all[k] = append(all[k], v.NetworkEndpointGroups...) - } - return nil - } - if err := call.Pages(ctx, f); err != nil { - klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = %v, %v", ctx, fl, nil, err) - return nil, err - } - if klog.V(4) { - klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if klog.V(5) { - var asStr []string - for _, o := range all { - asStr = append(asStr, fmt.Sprintf("%+v", o)) - } - klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = %v, %v", ctx, fl, asStr, nil) - } - return all, nil -} - -// AttachNetworkEndpoints is a method on GCEAlphaNetworkEndpointGroups. -func (g *GCEAlphaNetworkEndpointGroups) AttachNetworkEndpoints(ctx context.Context, key *meta.Key, arg0 *alpha.NetworkEndpointGroupsAttachEndpointsRequest) error { - klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): called", ctx, key) +// Update is a method on GCEBetaRegionHealthChecks. +func (g *GCEBetaRegionHealthChecks) Update(ctx context.Context, key *meta.Key, arg0 *beta.HealthCheck) error { + klog.V(5).Infof("GCEBetaRegionHealthChecks.Update(%v, %v, ...): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEAlphaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaRegionHealthChecks.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "NetworkEndpointGroups") + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "RegionHealthChecks") rk := &RateLimitKey{ ProjectID: projectID, - Operation: "AttachNetworkEndpoints", - Version: meta.Version("alpha"), - Service: "NetworkEndpointGroups", + Operation: "Update", + Version: meta.Version("beta"), + Service: "RegionHealthChecks", } - klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaRegionHealthChecks.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaRegionHealthChecks.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.Alpha.NetworkEndpointGroups.AttachNetworkEndpoints(projectID, key.Zone, key.Name, arg0) + call := g.s.Beta.RegionHealthChecks.Update(projectID, key.Region, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaRegionHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaRegionHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) return err } -// DetachNetworkEndpoints is a method on GCEAlphaNetworkEndpointGroups. -func (g *GCEAlphaNetworkEndpointGroups) DetachNetworkEndpoints(ctx context.Context, key *meta.Key, arg0 *alpha.NetworkEndpointGroupsDetachEndpointsRequest) error { - klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): called", ctx, key) +// HttpHealthChecks is an interface that allows for mocking of HttpHealthChecks. +type HttpHealthChecks interface { + Get(ctx context.Context, key *meta.Key) (*ga.HttpHealthCheck, error) + List(ctx context.Context, fl *filter.F) ([]*ga.HttpHealthCheck, error) + Insert(ctx context.Context, key *meta.Key, obj *ga.HttpHealthCheck) error + Delete(ctx context.Context, key *meta.Key) error + Update(context.Context, *meta.Key, *ga.HttpHealthCheck) error +} - if !key.Valid() { - klog.V(2).Infof("GCEAlphaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): key is invalid (%#v)", ctx, key, key) - return fmt.Errorf("invalid GCE key (%+v)", key) - } - projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "NetworkEndpointGroups") - rk := &RateLimitKey{ - ProjectID: projectID, - Operation: "DetachNetworkEndpoints", - Version: meta.Version("alpha"), - Service: "NetworkEndpointGroups", - } - klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) +// NewMockHttpHealthChecks returns a new mock for HttpHealthChecks. +func NewMockHttpHealthChecks(pr ProjectRouter, objs map[meta.Key]*MockHttpHealthChecksObj) *MockHttpHealthChecks { + mock := &MockHttpHealthChecks{ + ProjectRouter: pr, - if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): RateLimiter error: %v", ctx, key, err) - return err - } - call := g.s.Alpha.NetworkEndpointGroups.DetachNetworkEndpoints(projectID, key.Zone, key.Name, arg0) - call.Context(ctx) - op, err := call.Do() - if err != nil { - klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) - return err + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, } - err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) - return err + return mock } -// ListNetworkEndpoints is a method on GCEAlphaNetworkEndpointGroups. -func (g *GCEAlphaNetworkEndpointGroups) ListNetworkEndpoints(ctx context.Context, key *meta.Key, arg0 *alpha.NetworkEndpointGroupsListEndpointsRequest, fl *filter.F) ([]*alpha.NetworkEndpointWithHealthStatus, error) { - klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): called", ctx, key) +// MockHttpHealthChecks is the mock for HttpHealthChecks. +type MockHttpHealthChecks struct { + Lock sync.Mutex - if !key.Valid() { - klog.V(2).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): key is invalid (%#v)", ctx, key, key) - return nil, fmt.Errorf("invalid GCE key (%+v)", key) - } - projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "NetworkEndpointGroups") - rk := &RateLimitKey{ - ProjectID: projectID, - Operation: "ListNetworkEndpoints", - Version: meta.Version("alpha"), - Service: "NetworkEndpointGroups", - } - klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) - - if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): RateLimiter error: %v", ctx, key, err) - return nil, err - } - call := g.s.Alpha.NetworkEndpointGroups.ListNetworkEndpoints(projectID, key.Zone, key.Name, arg0) - var all []*alpha.NetworkEndpointWithHealthStatus - f := func(l *alpha.NetworkEndpointGroupsListNetworkEndpoints) error { - klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): page %+v", ctx, key, l) - all = append(all, l.Items...) - return nil - } - if err := call.Pages(ctx, f); err != nil { - klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...) = %v, %v", ctx, key, nil, err) - return nil, err - } - if klog.V(4) { - klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...) = [%v items], %v", ctx, key, len(all), nil) - } else if klog.V(5) { - var asStr []string - for _, o := range all { - asStr = append(asStr, fmt.Sprintf("%+v", o)) - } - klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...) = %v, %v", ctx, key, asStr, nil) - } - return all, nil -} - -// BetaNetworkEndpointGroups is an interface that allows for mocking of NetworkEndpointGroups. -type BetaNetworkEndpointGroups interface { - Get(ctx context.Context, key *meta.Key) (*beta.NetworkEndpointGroup, error) - List(ctx context.Context, zone string, fl *filter.F) ([]*beta.NetworkEndpointGroup, error) - Insert(ctx context.Context, key *meta.Key, obj *beta.NetworkEndpointGroup) error - Delete(ctx context.Context, key *meta.Key) error - AggregatedList(ctx context.Context, fl *filter.F) (map[string][]*beta.NetworkEndpointGroup, error) - AttachNetworkEndpoints(context.Context, *meta.Key, *beta.NetworkEndpointGroupsAttachEndpointsRequest) error - DetachNetworkEndpoints(context.Context, *meta.Key, *beta.NetworkEndpointGroupsDetachEndpointsRequest) error - ListNetworkEndpoints(context.Context, *meta.Key, *beta.NetworkEndpointGroupsListEndpointsRequest, *filter.F) ([]*beta.NetworkEndpointWithHealthStatus, error) -} - -// NewMockBetaNetworkEndpointGroups returns a new mock for NetworkEndpointGroups. -func NewMockBetaNetworkEndpointGroups(pr ProjectRouter, objs map[meta.Key]*MockNetworkEndpointGroupsObj) *MockBetaNetworkEndpointGroups { - mock := &MockBetaNetworkEndpointGroups{ - ProjectRouter: pr, - - Objects: objs, - GetError: map[meta.Key]error{}, - InsertError: map[meta.Key]error{}, - DeleteError: map[meta.Key]error{}, - } - return mock -} - -// MockBetaNetworkEndpointGroups is the mock for NetworkEndpointGroups. -type MockBetaNetworkEndpointGroups struct { - Lock sync.Mutex - - ProjectRouter ProjectRouter + ProjectRouter ProjectRouter // Objects maintained by the mock. - Objects map[meta.Key]*MockNetworkEndpointGroupsObj + Objects map[meta.Key]*MockHttpHealthChecksObj // If an entry exists for the given key and operation, then the error // will be returned instead of the operation. - GetError map[meta.Key]error - ListError *error - InsertError map[meta.Key]error - DeleteError map[meta.Key]error - AggregatedListError *error + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error // xxxHook allow you to intercept the standard processing of the mock in // order to add your own logic. Return (true, _, _) to prevent the normal // execution flow of the mock. Return (false, nil, nil) to continue with // normal mock behavior/ after the hook function executes. - GetHook func(ctx context.Context, key *meta.Key, m *MockBetaNetworkEndpointGroups) (bool, *beta.NetworkEndpointGroup, error) - ListHook func(ctx context.Context, zone string, fl *filter.F, m *MockBetaNetworkEndpointGroups) (bool, []*beta.NetworkEndpointGroup, error) - InsertHook func(ctx context.Context, key *meta.Key, obj *beta.NetworkEndpointGroup, m *MockBetaNetworkEndpointGroups) (bool, error) - DeleteHook func(ctx context.Context, key *meta.Key, m *MockBetaNetworkEndpointGroups) (bool, error) - AggregatedListHook func(ctx context.Context, fl *filter.F, m *MockBetaNetworkEndpointGroups) (bool, map[string][]*beta.NetworkEndpointGroup, error) - AttachNetworkEndpointsHook func(context.Context, *meta.Key, *beta.NetworkEndpointGroupsAttachEndpointsRequest, *MockBetaNetworkEndpointGroups) error - DetachNetworkEndpointsHook func(context.Context, *meta.Key, *beta.NetworkEndpointGroupsDetachEndpointsRequest, *MockBetaNetworkEndpointGroups) error - ListNetworkEndpointsHook func(context.Context, *meta.Key, *beta.NetworkEndpointGroupsListEndpointsRequest, *filter.F, *MockBetaNetworkEndpointGroups) ([]*beta.NetworkEndpointWithHealthStatus, error) + GetHook func(ctx context.Context, key *meta.Key, m *MockHttpHealthChecks) (bool, *ga.HttpHealthCheck, error) + ListHook func(ctx context.Context, fl *filter.F, m *MockHttpHealthChecks) (bool, []*ga.HttpHealthCheck, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *ga.HttpHealthCheck, m *MockHttpHealthChecks) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockHttpHealthChecks) (bool, error) + UpdateHook func(context.Context, *meta.Key, *ga.HttpHealthCheck, *MockHttpHealthChecks) error // X is extra state that can be used as part of the mock. Generated code // will not use this field. @@ -11264,10 +11952,10 @@ type MockBetaNetworkEndpointGroups struct { } // Get returns the object from the mock. -func (m *MockBetaNetworkEndpointGroups) Get(ctx context.Context, key *meta.Key) (*beta.NetworkEndpointGroup, error) { +func (m *MockHttpHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.HttpHealthCheck, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - klog.V(5).Infof("MockBetaNetworkEndpointGroups.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockHttpHealthChecks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -11279,28 +11967,28 @@ func (m *MockBetaNetworkEndpointGroups) Get(ctx context.Context, key *meta.Key) defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - klog.V(5).Infof("MockBetaNetworkEndpointGroups.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockHttpHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { - typedObj := obj.ToBeta() - klog.V(5).Infof("MockBetaNetworkEndpointGroups.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + typedObj := obj.ToGA() + klog.V(5).Infof("MockHttpHealthChecks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockBetaNetworkEndpointGroups %v not found", key), + Message: fmt.Sprintf("MockHttpHealthChecks %v not found", key), } - klog.V(5).Infof("MockBetaNetworkEndpointGroups.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockHttpHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } -// List all of the objects in the mock in the given zone. -func (m *MockBetaNetworkEndpointGroups) List(ctx context.Context, zone string, fl *filter.F) ([]*beta.NetworkEndpointGroup, error) { +// List all of the objects in the mock. +func (m *MockHttpHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.HttpHealthCheck, error) { if m.ListHook != nil { - if intercept, objs, err := m.ListHook(ctx, zone, fl, m); intercept { - klog.V(5).Infof("MockBetaNetworkEndpointGroups.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) + if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { + klog.V(5).Infof("MockHttpHealthChecks.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -11310,31 +11998,28 @@ func (m *MockBetaNetworkEndpointGroups) List(ctx context.Context, zone string, f if m.ListError != nil { err := *m.ListError - klog.V(5).Infof("MockBetaNetworkEndpointGroups.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) + klog.V(5).Infof("MockHttpHealthChecks.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } - var objs []*beta.NetworkEndpointGroup - for key, obj := range m.Objects { - if key.Zone != zone { - continue - } - if !fl.Match(obj.ToBeta()) { + var objs []*ga.HttpHealthCheck + for _, obj := range m.Objects { + if !fl.Match(obj.ToGA()) { continue } - objs = append(objs, obj.ToBeta()) + objs = append(objs, obj.ToGA()) } - klog.V(5).Infof("MockBetaNetworkEndpointGroups.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) + klog.V(5).Infof("MockHttpHealthChecks.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } // Insert is a mock for inserting/creating a new object. -func (m *MockBetaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.Key, obj *beta.NetworkEndpointGroup) error { +func (m *MockHttpHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *ga.HttpHealthCheck) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - klog.V(5).Infof("MockBetaNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockHttpHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -11346,32 +12031,32 @@ func (m *MockBetaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.Ke defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - klog.V(5).Infof("MockBetaNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockHttpHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { err := &googleapi.Error{ Code: http.StatusConflict, - Message: fmt.Sprintf("MockBetaNetworkEndpointGroups %v exists", key), + Message: fmt.Sprintf("MockHttpHealthChecks %v exists", key), } - klog.V(5).Infof("MockBetaNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockHttpHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } obj.Name = key.Name - projectID := m.ProjectRouter.ProjectID(ctx, "beta", "networkEndpointGroups") - obj.SelfLink = SelfLink(meta.VersionBeta, projectID, "networkEndpointGroups", key) + projectID := m.ProjectRouter.ProjectID(ctx, "ga", "httpHealthChecks") + obj.SelfLink = SelfLink(meta.VersionGA, projectID, "httpHealthChecks", key) - m.Objects[*key] = &MockNetworkEndpointGroupsObj{obj} - klog.V(5).Infof("MockBetaNetworkEndpointGroups.Insert(%v, %v, %+v) = nil", ctx, key, obj) + m.Objects[*key] = &MockHttpHealthChecksObj{obj} + klog.V(5).Infof("MockHttpHealthChecks.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } // Delete is a mock for deleting the object. -func (m *MockBetaNetworkEndpointGroups) Delete(ctx context.Context, key *meta.Key) error { +func (m *MockHttpHealthChecks) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - klog.V(5).Infof("MockBetaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockHttpHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -11383,472 +12068,12082 @@ func (m *MockBetaNetworkEndpointGroups) Delete(ctx context.Context, key *meta.Ke defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - klog.V(5).Infof("MockBetaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockHttpHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockBetaNetworkEndpointGroups %v not found", key), + Message: fmt.Sprintf("MockHttpHealthChecks %v not found", key), } - klog.V(5).Infof("MockBetaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockHttpHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - klog.V(5).Infof("MockBetaNetworkEndpointGroups.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockHttpHealthChecks.Delete(%v, %v) = nil", ctx, key) return nil } -// AggregatedList is a mock for AggregatedList. -func (m *MockBetaNetworkEndpointGroups) AggregatedList(ctx context.Context, fl *filter.F) (map[string][]*beta.NetworkEndpointGroup, error) { - if m.AggregatedListHook != nil { - if intercept, objs, err := m.AggregatedListHook(ctx, fl, m); intercept { - klog.V(5).Infof("MockBetaNetworkEndpointGroups.AggregatedList(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) - return objs, err - } - } - - m.Lock.Lock() - defer m.Lock.Unlock() - - if m.AggregatedListError != nil { - err := *m.AggregatedListError - klog.V(5).Infof("MockBetaNetworkEndpointGroups.AggregatedList(%v, %v) = nil, %v", ctx, fl, err) - return nil, err - } - - objs := map[string][]*beta.NetworkEndpointGroup{} - for _, obj := range m.Objects { - res, err := ParseResourceURL(obj.ToBeta().SelfLink) - location := res.Key.Zone - if err != nil { - klog.V(5).Infof("MockBetaNetworkEndpointGroups.AggregatedList(%v, %v) = nil, %v", ctx, fl, err) - return nil, err - } - if !fl.Match(obj.ToBeta()) { - continue - } - objs[location] = append(objs[location], obj.ToBeta()) - } - klog.V(5).Infof("MockBetaNetworkEndpointGroups.AggregatedList(%v, %v) = [%v items], nil", ctx, fl, len(objs)) - return objs, nil -} - // Obj wraps the object for use in the mock. -func (m *MockBetaNetworkEndpointGroups) Obj(o *beta.NetworkEndpointGroup) *MockNetworkEndpointGroupsObj { - return &MockNetworkEndpointGroupsObj{o} -} - -// AttachNetworkEndpoints is a mock for the corresponding method. -func (m *MockBetaNetworkEndpointGroups) AttachNetworkEndpoints(ctx context.Context, key *meta.Key, arg0 *beta.NetworkEndpointGroupsAttachEndpointsRequest) error { - if m.AttachNetworkEndpointsHook != nil { - return m.AttachNetworkEndpointsHook(ctx, key, arg0, m) - } - return nil +func (m *MockHttpHealthChecks) Obj(o *ga.HttpHealthCheck) *MockHttpHealthChecksObj { + return &MockHttpHealthChecksObj{o} } -// DetachNetworkEndpoints is a mock for the corresponding method. -func (m *MockBetaNetworkEndpointGroups) DetachNetworkEndpoints(ctx context.Context, key *meta.Key, arg0 *beta.NetworkEndpointGroupsDetachEndpointsRequest) error { - if m.DetachNetworkEndpointsHook != nil { - return m.DetachNetworkEndpointsHook(ctx, key, arg0, m) +// Update is a mock for the corresponding method. +func (m *MockHttpHealthChecks) Update(ctx context.Context, key *meta.Key, arg0 *ga.HttpHealthCheck) error { + if m.UpdateHook != nil { + return m.UpdateHook(ctx, key, arg0, m) } return nil } -// ListNetworkEndpoints is a mock for the corresponding method. -func (m *MockBetaNetworkEndpointGroups) ListNetworkEndpoints(ctx context.Context, key *meta.Key, arg0 *beta.NetworkEndpointGroupsListEndpointsRequest, fl *filter.F) ([]*beta.NetworkEndpointWithHealthStatus, error) { - if m.ListNetworkEndpointsHook != nil { - return m.ListNetworkEndpointsHook(ctx, key, arg0, fl, m) - } - return nil, nil -} - -// GCEBetaNetworkEndpointGroups is a simplifying adapter for the GCE NetworkEndpointGroups. -type GCEBetaNetworkEndpointGroups struct { +// GCEHttpHealthChecks is a simplifying adapter for the GCE HttpHealthChecks. +type GCEHttpHealthChecks struct { s *Service } -// Get the NetworkEndpointGroup named by key. -func (g *GCEBetaNetworkEndpointGroups) Get(ctx context.Context, key *meta.Key) (*beta.NetworkEndpointGroup, error) { - klog.V(5).Infof("GCEBetaNetworkEndpointGroups.Get(%v, %v): called", ctx, key) +// Get the HttpHealthCheck named by key. +func (g *GCEHttpHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.HttpHealthCheck, error) { + klog.V(5).Infof("GCEHttpHealthChecks.Get(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEBetaNetworkEndpointGroups.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEHttpHealthChecks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "NetworkEndpointGroups") + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpHealthChecks") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Get", - Version: meta.Version("beta"), - Service: "NetworkEndpointGroups", + Version: meta.Version("ga"), + Service: "HttpHealthChecks", } - klog.V(5).Infof("GCEBetaNetworkEndpointGroups.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEHttpHealthChecks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEBetaNetworkEndpointGroups.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEHttpHealthChecks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } - call := g.s.Beta.NetworkEndpointGroups.Get(projectID, key.Zone, key.Name) + call := g.s.GA.HttpHealthChecks.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - klog.V(4).Infof("GCEBetaNetworkEndpointGroups.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEHttpHealthChecks.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } -// List all NetworkEndpointGroup objects. -func (g *GCEBetaNetworkEndpointGroups) List(ctx context.Context, zone string, fl *filter.F) ([]*beta.NetworkEndpointGroup, error) { - klog.V(5).Infof("GCEBetaNetworkEndpointGroups.List(%v, %v, %v) called", ctx, zone, fl) - projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "NetworkEndpointGroups") +// List all HttpHealthCheck objects. +func (g *GCEHttpHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.HttpHealthCheck, error) { + klog.V(5).Infof("GCEHttpHealthChecks.List(%v, %v) called", ctx, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpHealthChecks") rk := &RateLimitKey{ ProjectID: projectID, Operation: "List", - Version: meta.Version("beta"), - Service: "NetworkEndpointGroups", + Version: meta.Version("ga"), + Service: "HttpHealthChecks", } if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - klog.V(5).Infof("GCEBetaNetworkEndpointGroups.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) - call := g.s.Beta.NetworkEndpointGroups.List(projectID, zone) + klog.V(5).Infof("GCEHttpHealthChecks.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + call := g.s.GA.HttpHealthChecks.List(projectID) if fl != filter.None { call.Filter(fl.String()) } - var all []*beta.NetworkEndpointGroup - f := func(l *beta.NetworkEndpointGroupList) error { - klog.V(5).Infof("GCEBetaNetworkEndpointGroups.List(%v, ..., %v): page %+v", ctx, fl, l) + var all []*ga.HttpHealthCheck + f := func(l *ga.HttpHealthCheckList) error { + klog.V(5).Infof("GCEHttpHealthChecks.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - klog.V(4).Infof("GCEBetaNetworkEndpointGroups.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEHttpHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } if klog.V(4) { - klog.V(4).Infof("GCEBetaNetworkEndpointGroups.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + klog.V(4).Infof("GCEHttpHealthChecks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - klog.V(5).Infof("GCEBetaNetworkEndpointGroups.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEHttpHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil } -// Insert NetworkEndpointGroup with key of value obj. -func (g *GCEBetaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.Key, obj *beta.NetworkEndpointGroup) error { - klog.V(5).Infof("GCEBetaNetworkEndpointGroups.Insert(%v, %v, %+v): called", ctx, key, obj) +// Insert HttpHealthCheck with key of value obj. +func (g *GCEHttpHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *ga.HttpHealthCheck) error { + klog.V(5).Infof("GCEHttpHealthChecks.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - klog.V(2).Infof("GCEBetaNetworkEndpointGroups.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEHttpHealthChecks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "NetworkEndpointGroups") + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpHealthChecks") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Insert", - Version: meta.Version("beta"), + Version: meta.Version("ga"), + Service: "HttpHealthChecks", + } + klog.V(5).Infof("GCEHttpHealthChecks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEHttpHealthChecks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + obj.Name = key.Name + call := g.s.GA.HttpHealthChecks.Insert(projectID, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEHttpHealthChecks.Insert(%v, %v, ...) = %+v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEHttpHealthChecks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + return err +} + +// Delete the HttpHealthCheck referenced by key. +func (g *GCEHttpHealthChecks) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCEHttpHealthChecks.Delete(%v, %v): called", ctx, key) + if !key.Valid() { + klog.V(2).Infof("GCEHttpHealthChecks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpHealthChecks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("ga"), + Service: "HttpHealthChecks", + } + klog.V(5).Infof("GCEHttpHealthChecks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEHttpHealthChecks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.GA.HttpHealthChecks.Delete(projectID, key.Name) + + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEHttpHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEHttpHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + return err +} + +// Update is a method on GCEHttpHealthChecks. +func (g *GCEHttpHealthChecks) Update(ctx context.Context, key *meta.Key, arg0 *ga.HttpHealthCheck) error { + klog.V(5).Infof("GCEHttpHealthChecks.Update(%v, %v, ...): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEHttpHealthChecks.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpHealthChecks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Update", + Version: meta.Version("ga"), + Service: "HttpHealthChecks", + } + klog.V(5).Infof("GCEHttpHealthChecks.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEHttpHealthChecks.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.GA.HttpHealthChecks.Update(projectID, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEHttpHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) + return err + } + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEHttpHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) + return err +} + +// HttpsHealthChecks is an interface that allows for mocking of HttpsHealthChecks. +type HttpsHealthChecks interface { + Get(ctx context.Context, key *meta.Key) (*ga.HttpsHealthCheck, error) + List(ctx context.Context, fl *filter.F) ([]*ga.HttpsHealthCheck, error) + Insert(ctx context.Context, key *meta.Key, obj *ga.HttpsHealthCheck) error + Delete(ctx context.Context, key *meta.Key) error + Update(context.Context, *meta.Key, *ga.HttpsHealthCheck) error +} + +// NewMockHttpsHealthChecks returns a new mock for HttpsHealthChecks. +func NewMockHttpsHealthChecks(pr ProjectRouter, objs map[meta.Key]*MockHttpsHealthChecksObj) *MockHttpsHealthChecks { + mock := &MockHttpsHealthChecks{ + ProjectRouter: pr, + + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockHttpsHealthChecks is the mock for HttpsHealthChecks. +type MockHttpsHealthChecks struct { + Lock sync.Mutex + + ProjectRouter ProjectRouter + + // Objects maintained by the mock. + Objects map[meta.Key]*MockHttpsHealthChecksObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(ctx context.Context, key *meta.Key, m *MockHttpsHealthChecks) (bool, *ga.HttpsHealthCheck, error) + ListHook func(ctx context.Context, fl *filter.F, m *MockHttpsHealthChecks) (bool, []*ga.HttpsHealthCheck, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *ga.HttpsHealthCheck, m *MockHttpsHealthChecks) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockHttpsHealthChecks) (bool, error) + UpdateHook func(context.Context, *meta.Key, *ga.HttpsHealthCheck, *MockHttpsHealthChecks) error + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockHttpsHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.HttpsHealthCheck, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(ctx, key, m); intercept { + klog.V(5).Infof("MockHttpsHealthChecks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + if !key.Valid() { + return nil, fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[*key]; ok { + klog.V(5).Infof("MockHttpsHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[*key]; ok { + typedObj := obj.ToGA() + klog.V(5).Infof("MockHttpsHealthChecks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockHttpsHealthChecks %v not found", key), + } + klog.V(5).Infof("MockHttpsHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock. +func (m *MockHttpsHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.HttpsHealthCheck, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { + klog.V(5).Infof("MockHttpsHealthChecks.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + klog.V(5).Infof("MockHttpsHealthChecks.List(%v, %v) = nil, %v", ctx, fl, err) + + return nil, *m.ListError + } + + var objs []*ga.HttpsHealthCheck + for _, obj := range m.Objects { + if !fl.Match(obj.ToGA()) { + continue + } + objs = append(objs, obj.ToGA()) + } + + klog.V(5).Infof("MockHttpsHealthChecks.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockHttpsHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *ga.HttpsHealthCheck) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { + klog.V(5).Infof("MockHttpsHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[*key]; ok { + klog.V(5).Infof("MockHttpsHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[*key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockHttpsHealthChecks %v exists", key), + } + klog.V(5).Infof("MockHttpsHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "ga", "httpsHealthChecks") + obj.SelfLink = SelfLink(meta.VersionGA, projectID, "httpsHealthChecks", key) + + m.Objects[*key] = &MockHttpsHealthChecksObj{obj} + klog.V(5).Infof("MockHttpsHealthChecks.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockHttpsHealthChecks) Delete(ctx context.Context, key *meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(ctx, key, m); intercept { + klog.V(5).Infof("MockHttpsHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[*key]; ok { + klog.V(5).Infof("MockHttpsHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[*key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockHttpsHealthChecks %v not found", key), + } + klog.V(5).Infof("MockHttpsHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, *key) + klog.V(5).Infof("MockHttpsHealthChecks.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockHttpsHealthChecks) Obj(o *ga.HttpsHealthCheck) *MockHttpsHealthChecksObj { + return &MockHttpsHealthChecksObj{o} +} + +// Update is a mock for the corresponding method. +func (m *MockHttpsHealthChecks) Update(ctx context.Context, key *meta.Key, arg0 *ga.HttpsHealthCheck) error { + if m.UpdateHook != nil { + return m.UpdateHook(ctx, key, arg0, m) + } + return nil +} + +// GCEHttpsHealthChecks is a simplifying adapter for the GCE HttpsHealthChecks. +type GCEHttpsHealthChecks struct { + s *Service +} + +// Get the HttpsHealthCheck named by key. +func (g *GCEHttpsHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.HttpsHealthCheck, error) { + klog.V(5).Infof("GCEHttpsHealthChecks.Get(%v, %v): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEHttpsHealthChecks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + return nil, fmt.Errorf("invalid GCE key (%#v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpsHealthChecks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("ga"), + Service: "HttpsHealthChecks", + } + klog.V(5).Infof("GCEHttpsHealthChecks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEHttpsHealthChecks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + return nil, err + } + call := g.s.GA.HttpsHealthChecks.Get(projectID, key.Name) + call.Context(ctx) + v, err := call.Do() + klog.V(4).Infof("GCEHttpsHealthChecks.Get(%v, %v) = %+v, %v", ctx, key, v, err) + return v, err +} + +// List all HttpsHealthCheck objects. +func (g *GCEHttpsHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.HttpsHealthCheck, error) { + klog.V(5).Infof("GCEHttpsHealthChecks.List(%v, %v) called", ctx, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpsHealthChecks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("ga"), + Service: "HttpsHealthChecks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + klog.V(5).Infof("GCEHttpsHealthChecks.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + call := g.s.GA.HttpsHealthChecks.List(projectID) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*ga.HttpsHealthCheck + f := func(l *ga.HttpsHealthCheckList) error { + klog.V(5).Infof("GCEHttpsHealthChecks.List(%v, ..., %v): page %+v", ctx, fl, l) + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + klog.V(4).Infof("GCEHttpsHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + return nil, err + } + + if klog.V(4) { + klog.V(4).Infof("GCEHttpsHealthChecks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { + var asStr []string + for _, o := range all { + asStr = append(asStr, fmt.Sprintf("%+v", o)) + } + klog.V(5).Infof("GCEHttpsHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + } + + return all, nil +} + +// Insert HttpsHealthCheck with key of value obj. +func (g *GCEHttpsHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *ga.HttpsHealthCheck) error { + klog.V(5).Infof("GCEHttpsHealthChecks.Insert(%v, %v, %+v): called", ctx, key, obj) + if !key.Valid() { + klog.V(2).Infof("GCEHttpsHealthChecks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpsHealthChecks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("ga"), + Service: "HttpsHealthChecks", + } + klog.V(5).Infof("GCEHttpsHealthChecks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEHttpsHealthChecks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + obj.Name = key.Name + call := g.s.GA.HttpsHealthChecks.Insert(projectID, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEHttpsHealthChecks.Insert(%v, %v, ...) = %+v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEHttpsHealthChecks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + return err +} + +// Delete the HttpsHealthCheck referenced by key. +func (g *GCEHttpsHealthChecks) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCEHttpsHealthChecks.Delete(%v, %v): called", ctx, key) + if !key.Valid() { + klog.V(2).Infof("GCEHttpsHealthChecks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpsHealthChecks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("ga"), + Service: "HttpsHealthChecks", + } + klog.V(5).Infof("GCEHttpsHealthChecks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEHttpsHealthChecks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.GA.HttpsHealthChecks.Delete(projectID, key.Name) + + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEHttpsHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEHttpsHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + return err +} + +// Update is a method on GCEHttpsHealthChecks. +func (g *GCEHttpsHealthChecks) Update(ctx context.Context, key *meta.Key, arg0 *ga.HttpsHealthCheck) error { + klog.V(5).Infof("GCEHttpsHealthChecks.Update(%v, %v, ...): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEHttpsHealthChecks.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpsHealthChecks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Update", + Version: meta.Version("ga"), + Service: "HttpsHealthChecks", + } + klog.V(5).Infof("GCEHttpsHealthChecks.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEHttpsHealthChecks.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.GA.HttpsHealthChecks.Update(projectID, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEHttpsHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) + return err + } + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEHttpsHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) + return err +} + +// InstanceGroups is an interface that allows for mocking of InstanceGroups. +type InstanceGroups interface { + Get(ctx context.Context, key *meta.Key) (*ga.InstanceGroup, error) + List(ctx context.Context, zone string, fl *filter.F) ([]*ga.InstanceGroup, error) + Insert(ctx context.Context, key *meta.Key, obj *ga.InstanceGroup) error + Delete(ctx context.Context, key *meta.Key) error + AddInstances(context.Context, *meta.Key, *ga.InstanceGroupsAddInstancesRequest) error + ListInstances(context.Context, *meta.Key, *ga.InstanceGroupsListInstancesRequest, *filter.F) ([]*ga.InstanceWithNamedPorts, error) + RemoveInstances(context.Context, *meta.Key, *ga.InstanceGroupsRemoveInstancesRequest) error + SetNamedPorts(context.Context, *meta.Key, *ga.InstanceGroupsSetNamedPortsRequest) error +} + +// NewMockInstanceGroups returns a new mock for InstanceGroups. +func NewMockInstanceGroups(pr ProjectRouter, objs map[meta.Key]*MockInstanceGroupsObj) *MockInstanceGroups { + mock := &MockInstanceGroups{ + ProjectRouter: pr, + + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockInstanceGroups is the mock for InstanceGroups. +type MockInstanceGroups struct { + Lock sync.Mutex + + ProjectRouter ProjectRouter + + // Objects maintained by the mock. + Objects map[meta.Key]*MockInstanceGroupsObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(ctx context.Context, key *meta.Key, m *MockInstanceGroups) (bool, *ga.InstanceGroup, error) + ListHook func(ctx context.Context, zone string, fl *filter.F, m *MockInstanceGroups) (bool, []*ga.InstanceGroup, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *ga.InstanceGroup, m *MockInstanceGroups) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockInstanceGroups) (bool, error) + AddInstancesHook func(context.Context, *meta.Key, *ga.InstanceGroupsAddInstancesRequest, *MockInstanceGroups) error + ListInstancesHook func(context.Context, *meta.Key, *ga.InstanceGroupsListInstancesRequest, *filter.F, *MockInstanceGroups) ([]*ga.InstanceWithNamedPorts, error) + RemoveInstancesHook func(context.Context, *meta.Key, *ga.InstanceGroupsRemoveInstancesRequest, *MockInstanceGroups) error + SetNamedPortsHook func(context.Context, *meta.Key, *ga.InstanceGroupsSetNamedPortsRequest, *MockInstanceGroups) error + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockInstanceGroups) Get(ctx context.Context, key *meta.Key) (*ga.InstanceGroup, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(ctx, key, m); intercept { + klog.V(5).Infof("MockInstanceGroups.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + if !key.Valid() { + return nil, fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[*key]; ok { + klog.V(5).Infof("MockInstanceGroups.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[*key]; ok { + typedObj := obj.ToGA() + klog.V(5).Infof("MockInstanceGroups.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockInstanceGroups %v not found", key), + } + klog.V(5).Infof("MockInstanceGroups.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock in the given zone. +func (m *MockInstanceGroups) List(ctx context.Context, zone string, fl *filter.F) ([]*ga.InstanceGroup, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(ctx, zone, fl, m); intercept { + klog.V(5).Infof("MockInstanceGroups.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + klog.V(5).Infof("MockInstanceGroups.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) + + return nil, *m.ListError + } + + var objs []*ga.InstanceGroup + for key, obj := range m.Objects { + if key.Zone != zone { + continue + } + if !fl.Match(obj.ToGA()) { + continue + } + objs = append(objs, obj.ToGA()) + } + + klog.V(5).Infof("MockInstanceGroups.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockInstanceGroups) Insert(ctx context.Context, key *meta.Key, obj *ga.InstanceGroup) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { + klog.V(5).Infof("MockInstanceGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[*key]; ok { + klog.V(5).Infof("MockInstanceGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[*key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockInstanceGroups %v exists", key), + } + klog.V(5).Infof("MockInstanceGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "ga", "instanceGroups") + obj.SelfLink = SelfLink(meta.VersionGA, projectID, "instanceGroups", key) + + m.Objects[*key] = &MockInstanceGroupsObj{obj} + klog.V(5).Infof("MockInstanceGroups.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockInstanceGroups) Delete(ctx context.Context, key *meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(ctx, key, m); intercept { + klog.V(5).Infof("MockInstanceGroups.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[*key]; ok { + klog.V(5).Infof("MockInstanceGroups.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[*key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockInstanceGroups %v not found", key), + } + klog.V(5).Infof("MockInstanceGroups.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, *key) + klog.V(5).Infof("MockInstanceGroups.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockInstanceGroups) Obj(o *ga.InstanceGroup) *MockInstanceGroupsObj { + return &MockInstanceGroupsObj{o} +} + +// AddInstances is a mock for the corresponding method. +func (m *MockInstanceGroups) AddInstances(ctx context.Context, key *meta.Key, arg0 *ga.InstanceGroupsAddInstancesRequest) error { + if m.AddInstancesHook != nil { + return m.AddInstancesHook(ctx, key, arg0, m) + } + return nil +} + +// ListInstances is a mock for the corresponding method. +func (m *MockInstanceGroups) ListInstances(ctx context.Context, key *meta.Key, arg0 *ga.InstanceGroupsListInstancesRequest, fl *filter.F) ([]*ga.InstanceWithNamedPorts, error) { + if m.ListInstancesHook != nil { + return m.ListInstancesHook(ctx, key, arg0, fl, m) + } + return nil, nil +} + +// RemoveInstances is a mock for the corresponding method. +func (m *MockInstanceGroups) RemoveInstances(ctx context.Context, key *meta.Key, arg0 *ga.InstanceGroupsRemoveInstancesRequest) error { + if m.RemoveInstancesHook != nil { + return m.RemoveInstancesHook(ctx, key, arg0, m) + } + return nil +} + +// SetNamedPorts is a mock for the corresponding method. +func (m *MockInstanceGroups) SetNamedPorts(ctx context.Context, key *meta.Key, arg0 *ga.InstanceGroupsSetNamedPortsRequest) error { + if m.SetNamedPortsHook != nil { + return m.SetNamedPortsHook(ctx, key, arg0, m) + } + return nil +} + +// GCEInstanceGroups is a simplifying adapter for the GCE InstanceGroups. +type GCEInstanceGroups struct { + s *Service +} + +// Get the InstanceGroup named by key. +func (g *GCEInstanceGroups) Get(ctx context.Context, key *meta.Key) (*ga.InstanceGroup, error) { + klog.V(5).Infof("GCEInstanceGroups.Get(%v, %v): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEInstanceGroups.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + return nil, fmt.Errorf("invalid GCE key (%#v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("ga"), + Service: "InstanceGroups", + } + klog.V(5).Infof("GCEInstanceGroups.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEInstanceGroups.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + return nil, err + } + call := g.s.GA.InstanceGroups.Get(projectID, key.Zone, key.Name) + call.Context(ctx) + v, err := call.Do() + klog.V(4).Infof("GCEInstanceGroups.Get(%v, %v) = %+v, %v", ctx, key, v, err) + return v, err +} + +// List all InstanceGroup objects. +func (g *GCEInstanceGroups) List(ctx context.Context, zone string, fl *filter.F) ([]*ga.InstanceGroup, error) { + klog.V(5).Infof("GCEInstanceGroups.List(%v, %v, %v) called", ctx, zone, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("ga"), + Service: "InstanceGroups", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + klog.V(5).Infof("GCEInstanceGroups.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) + call := g.s.GA.InstanceGroups.List(projectID, zone) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*ga.InstanceGroup + f := func(l *ga.InstanceGroupList) error { + klog.V(5).Infof("GCEInstanceGroups.List(%v, ..., %v): page %+v", ctx, fl, l) + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + klog.V(4).Infof("GCEInstanceGroups.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + return nil, err + } + + if klog.V(4) { + klog.V(4).Infof("GCEInstanceGroups.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { + var asStr []string + for _, o := range all { + asStr = append(asStr, fmt.Sprintf("%+v", o)) + } + klog.V(5).Infof("GCEInstanceGroups.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + } + + return all, nil +} + +// Insert InstanceGroup with key of value obj. +func (g *GCEInstanceGroups) Insert(ctx context.Context, key *meta.Key, obj *ga.InstanceGroup) error { + klog.V(5).Infof("GCEInstanceGroups.Insert(%v, %v, %+v): called", ctx, key, obj) + if !key.Valid() { + klog.V(2).Infof("GCEInstanceGroups.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("ga"), + Service: "InstanceGroups", + } + klog.V(5).Infof("GCEInstanceGroups.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEInstanceGroups.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + obj.Name = key.Name + call := g.s.GA.InstanceGroups.Insert(projectID, key.Zone, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEInstanceGroups.Insert(%v, %v, ...) = %+v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEInstanceGroups.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + return err +} + +// Delete the InstanceGroup referenced by key. +func (g *GCEInstanceGroups) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCEInstanceGroups.Delete(%v, %v): called", ctx, key) + if !key.Valid() { + klog.V(2).Infof("GCEInstanceGroups.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("ga"), + Service: "InstanceGroups", + } + klog.V(5).Infof("GCEInstanceGroups.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEInstanceGroups.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.GA.InstanceGroups.Delete(projectID, key.Zone, key.Name) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEInstanceGroups.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEInstanceGroups.Delete(%v, %v) = %v", ctx, key, err) + return err +} + +// AddInstances is a method on GCEInstanceGroups. +func (g *GCEInstanceGroups) AddInstances(ctx context.Context, key *meta.Key, arg0 *ga.InstanceGroupsAddInstancesRequest) error { + klog.V(5).Infof("GCEInstanceGroups.AddInstances(%v, %v, ...): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEInstanceGroups.AddInstances(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "AddInstances", + Version: meta.Version("ga"), + Service: "InstanceGroups", + } + klog.V(5).Infof("GCEInstanceGroups.AddInstances(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEInstanceGroups.AddInstances(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.GA.InstanceGroups.AddInstances(projectID, key.Zone, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEInstanceGroups.AddInstances(%v, %v, ...) = %+v", ctx, key, err) + return err + } + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEInstanceGroups.AddInstances(%v, %v, ...) = %+v", ctx, key, err) + return err +} + +// ListInstances is a method on GCEInstanceGroups. +func (g *GCEInstanceGroups) ListInstances(ctx context.Context, key *meta.Key, arg0 *ga.InstanceGroupsListInstancesRequest, fl *filter.F) ([]*ga.InstanceWithNamedPorts, error) { + klog.V(5).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return nil, fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "ListInstances", + Version: meta.Version("ga"), + Service: "InstanceGroups", + } + klog.V(5).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return nil, err + } + call := g.s.GA.InstanceGroups.ListInstances(projectID, key.Zone, key.Name, arg0) + var all []*ga.InstanceWithNamedPorts + f := func(l *ga.InstanceGroupsListInstances) error { + klog.V(5).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...): page %+v", ctx, key, l) + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + klog.V(4).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...) = %v, %v", ctx, key, nil, err) + return nil, err + } + if klog.V(4) { + klog.V(4).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...) = [%v items], %v", ctx, key, len(all), nil) + } else if klog.V(5) { + var asStr []string + for _, o := range all { + asStr = append(asStr, fmt.Sprintf("%+v", o)) + } + klog.V(5).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...) = %v, %v", ctx, key, asStr, nil) + } + return all, nil +} + +// RemoveInstances is a method on GCEInstanceGroups. +func (g *GCEInstanceGroups) RemoveInstances(ctx context.Context, key *meta.Key, arg0 *ga.InstanceGroupsRemoveInstancesRequest) error { + klog.V(5).Infof("GCEInstanceGroups.RemoveInstances(%v, %v, ...): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEInstanceGroups.RemoveInstances(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "RemoveInstances", + Version: meta.Version("ga"), + Service: "InstanceGroups", + } + klog.V(5).Infof("GCEInstanceGroups.RemoveInstances(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEInstanceGroups.RemoveInstances(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.GA.InstanceGroups.RemoveInstances(projectID, key.Zone, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEInstanceGroups.RemoveInstances(%v, %v, ...) = %+v", ctx, key, err) + return err + } + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEInstanceGroups.RemoveInstances(%v, %v, ...) = %+v", ctx, key, err) + return err +} + +// SetNamedPorts is a method on GCEInstanceGroups. +func (g *GCEInstanceGroups) SetNamedPorts(ctx context.Context, key *meta.Key, arg0 *ga.InstanceGroupsSetNamedPortsRequest) error { + klog.V(5).Infof("GCEInstanceGroups.SetNamedPorts(%v, %v, ...): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEInstanceGroups.SetNamedPorts(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "SetNamedPorts", + Version: meta.Version("ga"), + Service: "InstanceGroups", + } + klog.V(5).Infof("GCEInstanceGroups.SetNamedPorts(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEInstanceGroups.SetNamedPorts(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.GA.InstanceGroups.SetNamedPorts(projectID, key.Zone, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEInstanceGroups.SetNamedPorts(%v, %v, ...) = %+v", ctx, key, err) + return err + } + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEInstanceGroups.SetNamedPorts(%v, %v, ...) = %+v", ctx, key, err) + return err +} + +// Instances is an interface that allows for mocking of Instances. +type Instances interface { + Get(ctx context.Context, key *meta.Key) (*ga.Instance, error) + List(ctx context.Context, zone string, fl *filter.F) ([]*ga.Instance, error) + Insert(ctx context.Context, key *meta.Key, obj *ga.Instance) error + Delete(ctx context.Context, key *meta.Key) error + AttachDisk(context.Context, *meta.Key, *ga.AttachedDisk) error + DetachDisk(context.Context, *meta.Key, string) error +} + +// NewMockInstances returns a new mock for Instances. +func NewMockInstances(pr ProjectRouter, objs map[meta.Key]*MockInstancesObj) *MockInstances { + mock := &MockInstances{ + ProjectRouter: pr, + + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockInstances is the mock for Instances. +type MockInstances struct { + Lock sync.Mutex + + ProjectRouter ProjectRouter + + // Objects maintained by the mock. + Objects map[meta.Key]*MockInstancesObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(ctx context.Context, key *meta.Key, m *MockInstances) (bool, *ga.Instance, error) + ListHook func(ctx context.Context, zone string, fl *filter.F, m *MockInstances) (bool, []*ga.Instance, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *ga.Instance, m *MockInstances) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockInstances) (bool, error) + AttachDiskHook func(context.Context, *meta.Key, *ga.AttachedDisk, *MockInstances) error + DetachDiskHook func(context.Context, *meta.Key, string, *MockInstances) error + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockInstances) Get(ctx context.Context, key *meta.Key) (*ga.Instance, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(ctx, key, m); intercept { + klog.V(5).Infof("MockInstances.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + if !key.Valid() { + return nil, fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[*key]; ok { + klog.V(5).Infof("MockInstances.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[*key]; ok { + typedObj := obj.ToGA() + klog.V(5).Infof("MockInstances.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockInstances %v not found", key), + } + klog.V(5).Infof("MockInstances.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock in the given zone. +func (m *MockInstances) List(ctx context.Context, zone string, fl *filter.F) ([]*ga.Instance, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(ctx, zone, fl, m); intercept { + klog.V(5).Infof("MockInstances.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + klog.V(5).Infof("MockInstances.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) + + return nil, *m.ListError + } + + var objs []*ga.Instance + for key, obj := range m.Objects { + if key.Zone != zone { + continue + } + if !fl.Match(obj.ToGA()) { + continue + } + objs = append(objs, obj.ToGA()) + } + + klog.V(5).Infof("MockInstances.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockInstances) Insert(ctx context.Context, key *meta.Key, obj *ga.Instance) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { + klog.V(5).Infof("MockInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[*key]; ok { + klog.V(5).Infof("MockInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[*key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockInstances %v exists", key), + } + klog.V(5).Infof("MockInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "ga", "instances") + obj.SelfLink = SelfLink(meta.VersionGA, projectID, "instances", key) + + m.Objects[*key] = &MockInstancesObj{obj} + klog.V(5).Infof("MockInstances.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockInstances) Delete(ctx context.Context, key *meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(ctx, key, m); intercept { + klog.V(5).Infof("MockInstances.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[*key]; ok { + klog.V(5).Infof("MockInstances.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[*key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockInstances %v not found", key), + } + klog.V(5).Infof("MockInstances.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, *key) + klog.V(5).Infof("MockInstances.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockInstances) Obj(o *ga.Instance) *MockInstancesObj { + return &MockInstancesObj{o} +} + +// AttachDisk is a mock for the corresponding method. +func (m *MockInstances) AttachDisk(ctx context.Context, key *meta.Key, arg0 *ga.AttachedDisk) error { + if m.AttachDiskHook != nil { + return m.AttachDiskHook(ctx, key, arg0, m) + } + return nil +} + +// DetachDisk is a mock for the corresponding method. +func (m *MockInstances) DetachDisk(ctx context.Context, key *meta.Key, arg0 string) error { + if m.DetachDiskHook != nil { + return m.DetachDiskHook(ctx, key, arg0, m) + } + return nil +} + +// GCEInstances is a simplifying adapter for the GCE Instances. +type GCEInstances struct { + s *Service +} + +// Get the Instance named by key. +func (g *GCEInstances) Get(ctx context.Context, key *meta.Key) (*ga.Instance, error) { + klog.V(5).Infof("GCEInstances.Get(%v, %v): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEInstances.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + return nil, fmt.Errorf("invalid GCE key (%#v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("ga"), + Service: "Instances", + } + klog.V(5).Infof("GCEInstances.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEInstances.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + return nil, err + } + call := g.s.GA.Instances.Get(projectID, key.Zone, key.Name) + call.Context(ctx) + v, err := call.Do() + klog.V(4).Infof("GCEInstances.Get(%v, %v) = %+v, %v", ctx, key, v, err) + return v, err +} + +// List all Instance objects. +func (g *GCEInstances) List(ctx context.Context, zone string, fl *filter.F) ([]*ga.Instance, error) { + klog.V(5).Infof("GCEInstances.List(%v, %v, %v) called", ctx, zone, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("ga"), + Service: "Instances", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + klog.V(5).Infof("GCEInstances.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) + call := g.s.GA.Instances.List(projectID, zone) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*ga.Instance + f := func(l *ga.InstanceList) error { + klog.V(5).Infof("GCEInstances.List(%v, ..., %v): page %+v", ctx, fl, l) + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + klog.V(4).Infof("GCEInstances.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + return nil, err + } + + if klog.V(4) { + klog.V(4).Infof("GCEInstances.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { + var asStr []string + for _, o := range all { + asStr = append(asStr, fmt.Sprintf("%+v", o)) + } + klog.V(5).Infof("GCEInstances.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + } + + return all, nil +} + +// Insert Instance with key of value obj. +func (g *GCEInstances) Insert(ctx context.Context, key *meta.Key, obj *ga.Instance) error { + klog.V(5).Infof("GCEInstances.Insert(%v, %v, %+v): called", ctx, key, obj) + if !key.Valid() { + klog.V(2).Infof("GCEInstances.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("ga"), + Service: "Instances", + } + klog.V(5).Infof("GCEInstances.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEInstances.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + obj.Name = key.Name + call := g.s.GA.Instances.Insert(projectID, key.Zone, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEInstances.Insert(%v, %v, ...) = %+v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEInstances.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + return err +} + +// Delete the Instance referenced by key. +func (g *GCEInstances) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCEInstances.Delete(%v, %v): called", ctx, key) + if !key.Valid() { + klog.V(2).Infof("GCEInstances.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("ga"), + Service: "Instances", + } + klog.V(5).Infof("GCEInstances.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEInstances.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.GA.Instances.Delete(projectID, key.Zone, key.Name) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEInstances.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEInstances.Delete(%v, %v) = %v", ctx, key, err) + return err +} + +// AttachDisk is a method on GCEInstances. +func (g *GCEInstances) AttachDisk(ctx context.Context, key *meta.Key, arg0 *ga.AttachedDisk) error { + klog.V(5).Infof("GCEInstances.AttachDisk(%v, %v, ...): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEInstances.AttachDisk(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "AttachDisk", + Version: meta.Version("ga"), + Service: "Instances", + } + klog.V(5).Infof("GCEInstances.AttachDisk(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEInstances.AttachDisk(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.GA.Instances.AttachDisk(projectID, key.Zone, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEInstances.AttachDisk(%v, %v, ...) = %+v", ctx, key, err) + return err + } + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEInstances.AttachDisk(%v, %v, ...) = %+v", ctx, key, err) + return err +} + +// DetachDisk is a method on GCEInstances. +func (g *GCEInstances) DetachDisk(ctx context.Context, key *meta.Key, arg0 string) error { + klog.V(5).Infof("GCEInstances.DetachDisk(%v, %v, ...): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEInstances.DetachDisk(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "DetachDisk", + Version: meta.Version("ga"), + Service: "Instances", + } + klog.V(5).Infof("GCEInstances.DetachDisk(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEInstances.DetachDisk(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.GA.Instances.DetachDisk(projectID, key.Zone, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEInstances.DetachDisk(%v, %v, ...) = %+v", ctx, key, err) + return err + } + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEInstances.DetachDisk(%v, %v, ...) = %+v", ctx, key, err) + return err +} + +// BetaInstances is an interface that allows for mocking of Instances. +type BetaInstances interface { + Get(ctx context.Context, key *meta.Key) (*beta.Instance, error) + List(ctx context.Context, zone string, fl *filter.F) ([]*beta.Instance, error) + Insert(ctx context.Context, key *meta.Key, obj *beta.Instance) error + Delete(ctx context.Context, key *meta.Key) error + AttachDisk(context.Context, *meta.Key, *beta.AttachedDisk) error + DetachDisk(context.Context, *meta.Key, string) error + UpdateNetworkInterface(context.Context, *meta.Key, string, *beta.NetworkInterface) error +} + +// NewMockBetaInstances returns a new mock for Instances. +func NewMockBetaInstances(pr ProjectRouter, objs map[meta.Key]*MockInstancesObj) *MockBetaInstances { + mock := &MockBetaInstances{ + ProjectRouter: pr, + + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockBetaInstances is the mock for Instances. +type MockBetaInstances struct { + Lock sync.Mutex + + ProjectRouter ProjectRouter + + // Objects maintained by the mock. + Objects map[meta.Key]*MockInstancesObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(ctx context.Context, key *meta.Key, m *MockBetaInstances) (bool, *beta.Instance, error) + ListHook func(ctx context.Context, zone string, fl *filter.F, m *MockBetaInstances) (bool, []*beta.Instance, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *beta.Instance, m *MockBetaInstances) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockBetaInstances) (bool, error) + AttachDiskHook func(context.Context, *meta.Key, *beta.AttachedDisk, *MockBetaInstances) error + DetachDiskHook func(context.Context, *meta.Key, string, *MockBetaInstances) error + UpdateNetworkInterfaceHook func(context.Context, *meta.Key, string, *beta.NetworkInterface, *MockBetaInstances) error + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockBetaInstances) Get(ctx context.Context, key *meta.Key) (*beta.Instance, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(ctx, key, m); intercept { + klog.V(5).Infof("MockBetaInstances.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + if !key.Valid() { + return nil, fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[*key]; ok { + klog.V(5).Infof("MockBetaInstances.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[*key]; ok { + typedObj := obj.ToBeta() + klog.V(5).Infof("MockBetaInstances.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockBetaInstances %v not found", key), + } + klog.V(5).Infof("MockBetaInstances.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock in the given zone. +func (m *MockBetaInstances) List(ctx context.Context, zone string, fl *filter.F) ([]*beta.Instance, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(ctx, zone, fl, m); intercept { + klog.V(5).Infof("MockBetaInstances.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + klog.V(5).Infof("MockBetaInstances.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) + + return nil, *m.ListError + } + + var objs []*beta.Instance + for key, obj := range m.Objects { + if key.Zone != zone { + continue + } + if !fl.Match(obj.ToBeta()) { + continue + } + objs = append(objs, obj.ToBeta()) + } + + klog.V(5).Infof("MockBetaInstances.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockBetaInstances) Insert(ctx context.Context, key *meta.Key, obj *beta.Instance) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { + klog.V(5).Infof("MockBetaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[*key]; ok { + klog.V(5).Infof("MockBetaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[*key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockBetaInstances %v exists", key), + } + klog.V(5).Infof("MockBetaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "beta", "instances") + obj.SelfLink = SelfLink(meta.VersionBeta, projectID, "instances", key) + + m.Objects[*key] = &MockInstancesObj{obj} + klog.V(5).Infof("MockBetaInstances.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockBetaInstances) Delete(ctx context.Context, key *meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(ctx, key, m); intercept { + klog.V(5).Infof("MockBetaInstances.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[*key]; ok { + klog.V(5).Infof("MockBetaInstances.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[*key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockBetaInstances %v not found", key), + } + klog.V(5).Infof("MockBetaInstances.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, *key) + klog.V(5).Infof("MockBetaInstances.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockBetaInstances) Obj(o *beta.Instance) *MockInstancesObj { + return &MockInstancesObj{o} +} + +// AttachDisk is a mock for the corresponding method. +func (m *MockBetaInstances) AttachDisk(ctx context.Context, key *meta.Key, arg0 *beta.AttachedDisk) error { + if m.AttachDiskHook != nil { + return m.AttachDiskHook(ctx, key, arg0, m) + } + return nil +} + +// DetachDisk is a mock for the corresponding method. +func (m *MockBetaInstances) DetachDisk(ctx context.Context, key *meta.Key, arg0 string) error { + if m.DetachDiskHook != nil { + return m.DetachDiskHook(ctx, key, arg0, m) + } + return nil +} + +// UpdateNetworkInterface is a mock for the corresponding method. +func (m *MockBetaInstances) UpdateNetworkInterface(ctx context.Context, key *meta.Key, arg0 string, arg1 *beta.NetworkInterface) error { + if m.UpdateNetworkInterfaceHook != nil { + return m.UpdateNetworkInterfaceHook(ctx, key, arg0, arg1, m) + } + return nil +} + +// GCEBetaInstances is a simplifying adapter for the GCE Instances. +type GCEBetaInstances struct { + s *Service +} + +// Get the Instance named by key. +func (g *GCEBetaInstances) Get(ctx context.Context, key *meta.Key) (*beta.Instance, error) { + klog.V(5).Infof("GCEBetaInstances.Get(%v, %v): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEBetaInstances.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + return nil, fmt.Errorf("invalid GCE key (%#v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("beta"), + Service: "Instances", + } + klog.V(5).Infof("GCEBetaInstances.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEBetaInstances.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + return nil, err + } + call := g.s.Beta.Instances.Get(projectID, key.Zone, key.Name) + call.Context(ctx) + v, err := call.Do() + klog.V(4).Infof("GCEBetaInstances.Get(%v, %v) = %+v, %v", ctx, key, v, err) + return v, err +} + +// List all Instance objects. +func (g *GCEBetaInstances) List(ctx context.Context, zone string, fl *filter.F) ([]*beta.Instance, error) { + klog.V(5).Infof("GCEBetaInstances.List(%v, %v, %v) called", ctx, zone, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("beta"), + Service: "Instances", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + klog.V(5).Infof("GCEBetaInstances.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) + call := g.s.Beta.Instances.List(projectID, zone) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*beta.Instance + f := func(l *beta.InstanceList) error { + klog.V(5).Infof("GCEBetaInstances.List(%v, ..., %v): page %+v", ctx, fl, l) + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + klog.V(4).Infof("GCEBetaInstances.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + return nil, err + } + + if klog.V(4) { + klog.V(4).Infof("GCEBetaInstances.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { + var asStr []string + for _, o := range all { + asStr = append(asStr, fmt.Sprintf("%+v", o)) + } + klog.V(5).Infof("GCEBetaInstances.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + } + + return all, nil +} + +// Insert Instance with key of value obj. +func (g *GCEBetaInstances) Insert(ctx context.Context, key *meta.Key, obj *beta.Instance) error { + klog.V(5).Infof("GCEBetaInstances.Insert(%v, %v, %+v): called", ctx, key, obj) + if !key.Valid() { + klog.V(2).Infof("GCEBetaInstances.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("beta"), + Service: "Instances", + } + klog.V(5).Infof("GCEBetaInstances.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEBetaInstances.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + obj.Name = key.Name + call := g.s.Beta.Instances.Insert(projectID, key.Zone, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEBetaInstances.Insert(%v, %v, ...) = %+v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEBetaInstances.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + return err +} + +// Delete the Instance referenced by key. +func (g *GCEBetaInstances) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCEBetaInstances.Delete(%v, %v): called", ctx, key) + if !key.Valid() { + klog.V(2).Infof("GCEBetaInstances.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("beta"), + Service: "Instances", + } + klog.V(5).Infof("GCEBetaInstances.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEBetaInstances.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.Beta.Instances.Delete(projectID, key.Zone, key.Name) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEBetaInstances.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEBetaInstances.Delete(%v, %v) = %v", ctx, key, err) + return err +} + +// AttachDisk is a method on GCEBetaInstances. +func (g *GCEBetaInstances) AttachDisk(ctx context.Context, key *meta.Key, arg0 *beta.AttachedDisk) error { + klog.V(5).Infof("GCEBetaInstances.AttachDisk(%v, %v, ...): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEBetaInstances.AttachDisk(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "AttachDisk", + Version: meta.Version("beta"), + Service: "Instances", + } + klog.V(5).Infof("GCEBetaInstances.AttachDisk(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEBetaInstances.AttachDisk(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.Beta.Instances.AttachDisk(projectID, key.Zone, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEBetaInstances.AttachDisk(%v, %v, ...) = %+v", ctx, key, err) + return err + } + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEBetaInstances.AttachDisk(%v, %v, ...) = %+v", ctx, key, err) + return err +} + +// DetachDisk is a method on GCEBetaInstances. +func (g *GCEBetaInstances) DetachDisk(ctx context.Context, key *meta.Key, arg0 string) error { + klog.V(5).Infof("GCEBetaInstances.DetachDisk(%v, %v, ...): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEBetaInstances.DetachDisk(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "DetachDisk", + Version: meta.Version("beta"), + Service: "Instances", + } + klog.V(5).Infof("GCEBetaInstances.DetachDisk(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEBetaInstances.DetachDisk(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.Beta.Instances.DetachDisk(projectID, key.Zone, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEBetaInstances.DetachDisk(%v, %v, ...) = %+v", ctx, key, err) + return err + } + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEBetaInstances.DetachDisk(%v, %v, ...) = %+v", ctx, key, err) + return err +} + +// UpdateNetworkInterface is a method on GCEBetaInstances. +func (g *GCEBetaInstances) UpdateNetworkInterface(ctx context.Context, key *meta.Key, arg0 string, arg1 *beta.NetworkInterface) error { + klog.V(5).Infof("GCEBetaInstances.UpdateNetworkInterface(%v, %v, ...): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEBetaInstances.UpdateNetworkInterface(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "UpdateNetworkInterface", + Version: meta.Version("beta"), + Service: "Instances", + } + klog.V(5).Infof("GCEBetaInstances.UpdateNetworkInterface(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEBetaInstances.UpdateNetworkInterface(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.Beta.Instances.UpdateNetworkInterface(projectID, key.Zone, key.Name, arg0, arg1) + call.Context(ctx) + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEBetaInstances.UpdateNetworkInterface(%v, %v, ...) = %+v", ctx, key, err) + return err + } + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEBetaInstances.UpdateNetworkInterface(%v, %v, ...) = %+v", ctx, key, err) + return err +} + +// AlphaInstances is an interface that allows for mocking of Instances. +type AlphaInstances interface { + Get(ctx context.Context, key *meta.Key) (*alpha.Instance, error) + List(ctx context.Context, zone string, fl *filter.F) ([]*alpha.Instance, error) + Insert(ctx context.Context, key *meta.Key, obj *alpha.Instance) error + Delete(ctx context.Context, key *meta.Key) error + AttachDisk(context.Context, *meta.Key, *alpha.AttachedDisk) error + DetachDisk(context.Context, *meta.Key, string) error + UpdateNetworkInterface(context.Context, *meta.Key, string, *alpha.NetworkInterface) error +} + +// NewMockAlphaInstances returns a new mock for Instances. +func NewMockAlphaInstances(pr ProjectRouter, objs map[meta.Key]*MockInstancesObj) *MockAlphaInstances { + mock := &MockAlphaInstances{ + ProjectRouter: pr, + + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockAlphaInstances is the mock for Instances. +type MockAlphaInstances struct { + Lock sync.Mutex + + ProjectRouter ProjectRouter + + // Objects maintained by the mock. + Objects map[meta.Key]*MockInstancesObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(ctx context.Context, key *meta.Key, m *MockAlphaInstances) (bool, *alpha.Instance, error) + ListHook func(ctx context.Context, zone string, fl *filter.F, m *MockAlphaInstances) (bool, []*alpha.Instance, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *alpha.Instance, m *MockAlphaInstances) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockAlphaInstances) (bool, error) + AttachDiskHook func(context.Context, *meta.Key, *alpha.AttachedDisk, *MockAlphaInstances) error + DetachDiskHook func(context.Context, *meta.Key, string, *MockAlphaInstances) error + UpdateNetworkInterfaceHook func(context.Context, *meta.Key, string, *alpha.NetworkInterface, *MockAlphaInstances) error + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockAlphaInstances) Get(ctx context.Context, key *meta.Key) (*alpha.Instance, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(ctx, key, m); intercept { + klog.V(5).Infof("MockAlphaInstances.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + if !key.Valid() { + return nil, fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[*key]; ok { + klog.V(5).Infof("MockAlphaInstances.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[*key]; ok { + typedObj := obj.ToAlpha() + klog.V(5).Infof("MockAlphaInstances.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAlphaInstances %v not found", key), + } + klog.V(5).Infof("MockAlphaInstances.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock in the given zone. +func (m *MockAlphaInstances) List(ctx context.Context, zone string, fl *filter.F) ([]*alpha.Instance, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(ctx, zone, fl, m); intercept { + klog.V(5).Infof("MockAlphaInstances.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + klog.V(5).Infof("MockAlphaInstances.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) + + return nil, *m.ListError + } + + var objs []*alpha.Instance + for key, obj := range m.Objects { + if key.Zone != zone { + continue + } + if !fl.Match(obj.ToAlpha()) { + continue + } + objs = append(objs, obj.ToAlpha()) + } + + klog.V(5).Infof("MockAlphaInstances.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockAlphaInstances) Insert(ctx context.Context, key *meta.Key, obj *alpha.Instance) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { + klog.V(5).Infof("MockAlphaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[*key]; ok { + klog.V(5).Infof("MockAlphaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[*key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockAlphaInstances %v exists", key), + } + klog.V(5).Infof("MockAlphaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "alpha", "instances") + obj.SelfLink = SelfLink(meta.VersionAlpha, projectID, "instances", key) + + m.Objects[*key] = &MockInstancesObj{obj} + klog.V(5).Infof("MockAlphaInstances.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockAlphaInstances) Delete(ctx context.Context, key *meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(ctx, key, m); intercept { + klog.V(5).Infof("MockAlphaInstances.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[*key]; ok { + klog.V(5).Infof("MockAlphaInstances.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[*key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAlphaInstances %v not found", key), + } + klog.V(5).Infof("MockAlphaInstances.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, *key) + klog.V(5).Infof("MockAlphaInstances.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockAlphaInstances) Obj(o *alpha.Instance) *MockInstancesObj { + return &MockInstancesObj{o} +} + +// AttachDisk is a mock for the corresponding method. +func (m *MockAlphaInstances) AttachDisk(ctx context.Context, key *meta.Key, arg0 *alpha.AttachedDisk) error { + if m.AttachDiskHook != nil { + return m.AttachDiskHook(ctx, key, arg0, m) + } + return nil +} + +// DetachDisk is a mock for the corresponding method. +func (m *MockAlphaInstances) DetachDisk(ctx context.Context, key *meta.Key, arg0 string) error { + if m.DetachDiskHook != nil { + return m.DetachDiskHook(ctx, key, arg0, m) + } + return nil +} + +// UpdateNetworkInterface is a mock for the corresponding method. +func (m *MockAlphaInstances) UpdateNetworkInterface(ctx context.Context, key *meta.Key, arg0 string, arg1 *alpha.NetworkInterface) error { + if m.UpdateNetworkInterfaceHook != nil { + return m.UpdateNetworkInterfaceHook(ctx, key, arg0, arg1, m) + } + return nil +} + +// GCEAlphaInstances is a simplifying adapter for the GCE Instances. +type GCEAlphaInstances struct { + s *Service +} + +// Get the Instance named by key. +func (g *GCEAlphaInstances) Get(ctx context.Context, key *meta.Key) (*alpha.Instance, error) { + klog.V(5).Infof("GCEAlphaInstances.Get(%v, %v): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEAlphaInstances.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + return nil, fmt.Errorf("invalid GCE key (%#v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("alpha"), + Service: "Instances", + } + klog.V(5).Infof("GCEAlphaInstances.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEAlphaInstances.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + return nil, err + } + call := g.s.Alpha.Instances.Get(projectID, key.Zone, key.Name) + call.Context(ctx) + v, err := call.Do() + klog.V(4).Infof("GCEAlphaInstances.Get(%v, %v) = %+v, %v", ctx, key, v, err) + return v, err +} + +// List all Instance objects. +func (g *GCEAlphaInstances) List(ctx context.Context, zone string, fl *filter.F) ([]*alpha.Instance, error) { + klog.V(5).Infof("GCEAlphaInstances.List(%v, %v, %v) called", ctx, zone, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("alpha"), + Service: "Instances", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + klog.V(5).Infof("GCEAlphaInstances.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) + call := g.s.Alpha.Instances.List(projectID, zone) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*alpha.Instance + f := func(l *alpha.InstanceList) error { + klog.V(5).Infof("GCEAlphaInstances.List(%v, ..., %v): page %+v", ctx, fl, l) + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + klog.V(4).Infof("GCEAlphaInstances.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + return nil, err + } + + if klog.V(4) { + klog.V(4).Infof("GCEAlphaInstances.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { + var asStr []string + for _, o := range all { + asStr = append(asStr, fmt.Sprintf("%+v", o)) + } + klog.V(5).Infof("GCEAlphaInstances.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + } + + return all, nil +} + +// Insert Instance with key of value obj. +func (g *GCEAlphaInstances) Insert(ctx context.Context, key *meta.Key, obj *alpha.Instance) error { + klog.V(5).Infof("GCEAlphaInstances.Insert(%v, %v, %+v): called", ctx, key, obj) + if !key.Valid() { + klog.V(2).Infof("GCEAlphaInstances.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("alpha"), + Service: "Instances", + } + klog.V(5).Infof("GCEAlphaInstances.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEAlphaInstances.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + obj.Name = key.Name + call := g.s.Alpha.Instances.Insert(projectID, key.Zone, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEAlphaInstances.Insert(%v, %v, ...) = %+v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEAlphaInstances.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + return err +} + +// Delete the Instance referenced by key. +func (g *GCEAlphaInstances) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCEAlphaInstances.Delete(%v, %v): called", ctx, key) + if !key.Valid() { + klog.V(2).Infof("GCEAlphaInstances.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("alpha"), + Service: "Instances", + } + klog.V(5).Infof("GCEAlphaInstances.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEAlphaInstances.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.Alpha.Instances.Delete(projectID, key.Zone, key.Name) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEAlphaInstances.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEAlphaInstances.Delete(%v, %v) = %v", ctx, key, err) + return err +} + +// AttachDisk is a method on GCEAlphaInstances. +func (g *GCEAlphaInstances) AttachDisk(ctx context.Context, key *meta.Key, arg0 *alpha.AttachedDisk) error { + klog.V(5).Infof("GCEAlphaInstances.AttachDisk(%v, %v, ...): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEAlphaInstances.AttachDisk(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "AttachDisk", + Version: meta.Version("alpha"), + Service: "Instances", + } + klog.V(5).Infof("GCEAlphaInstances.AttachDisk(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEAlphaInstances.AttachDisk(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.Alpha.Instances.AttachDisk(projectID, key.Zone, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEAlphaInstances.AttachDisk(%v, %v, ...) = %+v", ctx, key, err) + return err + } + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEAlphaInstances.AttachDisk(%v, %v, ...) = %+v", ctx, key, err) + return err +} + +// DetachDisk is a method on GCEAlphaInstances. +func (g *GCEAlphaInstances) DetachDisk(ctx context.Context, key *meta.Key, arg0 string) error { + klog.V(5).Infof("GCEAlphaInstances.DetachDisk(%v, %v, ...): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEAlphaInstances.DetachDisk(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "DetachDisk", + Version: meta.Version("alpha"), + Service: "Instances", + } + klog.V(5).Infof("GCEAlphaInstances.DetachDisk(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEAlphaInstances.DetachDisk(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.Alpha.Instances.DetachDisk(projectID, key.Zone, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEAlphaInstances.DetachDisk(%v, %v, ...) = %+v", ctx, key, err) + return err + } + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEAlphaInstances.DetachDisk(%v, %v, ...) = %+v", ctx, key, err) + return err +} + +// UpdateNetworkInterface is a method on GCEAlphaInstances. +func (g *GCEAlphaInstances) UpdateNetworkInterface(ctx context.Context, key *meta.Key, arg0 string, arg1 *alpha.NetworkInterface) error { + klog.V(5).Infof("GCEAlphaInstances.UpdateNetworkInterface(%v, %v, ...): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEAlphaInstances.UpdateNetworkInterface(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Instances") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "UpdateNetworkInterface", + Version: meta.Version("alpha"), + Service: "Instances", + } + klog.V(5).Infof("GCEAlphaInstances.UpdateNetworkInterface(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEAlphaInstances.UpdateNetworkInterface(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.Alpha.Instances.UpdateNetworkInterface(projectID, key.Zone, key.Name, arg0, arg1) + call.Context(ctx) + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEAlphaInstances.UpdateNetworkInterface(%v, %v, ...) = %+v", ctx, key, err) + return err + } + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEAlphaInstances.UpdateNetworkInterface(%v, %v, ...) = %+v", ctx, key, err) + return err +} + +// AlphaNetworks is an interface that allows for mocking of Networks. +type AlphaNetworks interface { + Get(ctx context.Context, key *meta.Key) (*alpha.Network, error) + List(ctx context.Context, fl *filter.F) ([]*alpha.Network, error) + Insert(ctx context.Context, key *meta.Key, obj *alpha.Network) error + Delete(ctx context.Context, key *meta.Key) error +} + +// NewMockAlphaNetworks returns a new mock for Networks. +func NewMockAlphaNetworks(pr ProjectRouter, objs map[meta.Key]*MockNetworksObj) *MockAlphaNetworks { + mock := &MockAlphaNetworks{ + ProjectRouter: pr, + + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockAlphaNetworks is the mock for Networks. +type MockAlphaNetworks struct { + Lock sync.Mutex + + ProjectRouter ProjectRouter + + // Objects maintained by the mock. + Objects map[meta.Key]*MockNetworksObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(ctx context.Context, key *meta.Key, m *MockAlphaNetworks) (bool, *alpha.Network, error) + ListHook func(ctx context.Context, fl *filter.F, m *MockAlphaNetworks) (bool, []*alpha.Network, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *alpha.Network, m *MockAlphaNetworks) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockAlphaNetworks) (bool, error) + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockAlphaNetworks) Get(ctx context.Context, key *meta.Key) (*alpha.Network, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(ctx, key, m); intercept { + klog.V(5).Infof("MockAlphaNetworks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + if !key.Valid() { + return nil, fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[*key]; ok { + klog.V(5).Infof("MockAlphaNetworks.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[*key]; ok { + typedObj := obj.ToAlpha() + klog.V(5).Infof("MockAlphaNetworks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAlphaNetworks %v not found", key), + } + klog.V(5).Infof("MockAlphaNetworks.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock. +func (m *MockAlphaNetworks) List(ctx context.Context, fl *filter.F) ([]*alpha.Network, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { + klog.V(5).Infof("MockAlphaNetworks.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + klog.V(5).Infof("MockAlphaNetworks.List(%v, %v) = nil, %v", ctx, fl, err) + + return nil, *m.ListError + } + + var objs []*alpha.Network + for _, obj := range m.Objects { + if !fl.Match(obj.ToAlpha()) { + continue + } + objs = append(objs, obj.ToAlpha()) + } + + klog.V(5).Infof("MockAlphaNetworks.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockAlphaNetworks) Insert(ctx context.Context, key *meta.Key, obj *alpha.Network) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { + klog.V(5).Infof("MockAlphaNetworks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[*key]; ok { + klog.V(5).Infof("MockAlphaNetworks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[*key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockAlphaNetworks %v exists", key), + } + klog.V(5).Infof("MockAlphaNetworks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "alpha", "networks") + obj.SelfLink = SelfLink(meta.VersionAlpha, projectID, "networks", key) + + m.Objects[*key] = &MockNetworksObj{obj} + klog.V(5).Infof("MockAlphaNetworks.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockAlphaNetworks) Delete(ctx context.Context, key *meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(ctx, key, m); intercept { + klog.V(5).Infof("MockAlphaNetworks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[*key]; ok { + klog.V(5).Infof("MockAlphaNetworks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[*key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAlphaNetworks %v not found", key), + } + klog.V(5).Infof("MockAlphaNetworks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, *key) + klog.V(5).Infof("MockAlphaNetworks.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockAlphaNetworks) Obj(o *alpha.Network) *MockNetworksObj { + return &MockNetworksObj{o} +} + +// GCEAlphaNetworks is a simplifying adapter for the GCE Networks. +type GCEAlphaNetworks struct { + s *Service +} + +// Get the Network named by key. +func (g *GCEAlphaNetworks) Get(ctx context.Context, key *meta.Key) (*alpha.Network, error) { + klog.V(5).Infof("GCEAlphaNetworks.Get(%v, %v): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEAlphaNetworks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + return nil, fmt.Errorf("invalid GCE key (%#v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Networks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("alpha"), + Service: "Networks", + } + klog.V(5).Infof("GCEAlphaNetworks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEAlphaNetworks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + return nil, err + } + call := g.s.Alpha.Networks.Get(projectID, key.Name) + call.Context(ctx) + v, err := call.Do() + klog.V(4).Infof("GCEAlphaNetworks.Get(%v, %v) = %+v, %v", ctx, key, v, err) + return v, err +} + +// List all Network objects. +func (g *GCEAlphaNetworks) List(ctx context.Context, fl *filter.F) ([]*alpha.Network, error) { + klog.V(5).Infof("GCEAlphaNetworks.List(%v, %v) called", ctx, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Networks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("alpha"), + Service: "Networks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + klog.V(5).Infof("GCEAlphaNetworks.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + call := g.s.Alpha.Networks.List(projectID) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*alpha.Network + f := func(l *alpha.NetworkList) error { + klog.V(5).Infof("GCEAlphaNetworks.List(%v, ..., %v): page %+v", ctx, fl, l) + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + klog.V(4).Infof("GCEAlphaNetworks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + return nil, err + } + + if klog.V(4) { + klog.V(4).Infof("GCEAlphaNetworks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { + var asStr []string + for _, o := range all { + asStr = append(asStr, fmt.Sprintf("%+v", o)) + } + klog.V(5).Infof("GCEAlphaNetworks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + } + + return all, nil +} + +// Insert Network with key of value obj. +func (g *GCEAlphaNetworks) Insert(ctx context.Context, key *meta.Key, obj *alpha.Network) error { + klog.V(5).Infof("GCEAlphaNetworks.Insert(%v, %v, %+v): called", ctx, key, obj) + if !key.Valid() { + klog.V(2).Infof("GCEAlphaNetworks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Networks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("alpha"), + Service: "Networks", + } + klog.V(5).Infof("GCEAlphaNetworks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEAlphaNetworks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + obj.Name = key.Name + call := g.s.Alpha.Networks.Insert(projectID, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEAlphaNetworks.Insert(%v, %v, ...) = %+v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEAlphaNetworks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + return err +} + +// Delete the Network referenced by key. +func (g *GCEAlphaNetworks) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCEAlphaNetworks.Delete(%v, %v): called", ctx, key) + if !key.Valid() { + klog.V(2).Infof("GCEAlphaNetworks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Networks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("alpha"), + Service: "Networks", + } + klog.V(5).Infof("GCEAlphaNetworks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEAlphaNetworks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.Alpha.Networks.Delete(projectID, key.Name) + + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEAlphaNetworks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEAlphaNetworks.Delete(%v, %v) = %v", ctx, key, err) + return err +} + +// BetaNetworks is an interface that allows for mocking of Networks. +type BetaNetworks interface { + Get(ctx context.Context, key *meta.Key) (*beta.Network, error) + List(ctx context.Context, fl *filter.F) ([]*beta.Network, error) + Insert(ctx context.Context, key *meta.Key, obj *beta.Network) error + Delete(ctx context.Context, key *meta.Key) error +} + +// NewMockBetaNetworks returns a new mock for Networks. +func NewMockBetaNetworks(pr ProjectRouter, objs map[meta.Key]*MockNetworksObj) *MockBetaNetworks { + mock := &MockBetaNetworks{ + ProjectRouter: pr, + + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockBetaNetworks is the mock for Networks. +type MockBetaNetworks struct { + Lock sync.Mutex + + ProjectRouter ProjectRouter + + // Objects maintained by the mock. + Objects map[meta.Key]*MockNetworksObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(ctx context.Context, key *meta.Key, m *MockBetaNetworks) (bool, *beta.Network, error) + ListHook func(ctx context.Context, fl *filter.F, m *MockBetaNetworks) (bool, []*beta.Network, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *beta.Network, m *MockBetaNetworks) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockBetaNetworks) (bool, error) + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockBetaNetworks) Get(ctx context.Context, key *meta.Key) (*beta.Network, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(ctx, key, m); intercept { + klog.V(5).Infof("MockBetaNetworks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + if !key.Valid() { + return nil, fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[*key]; ok { + klog.V(5).Infof("MockBetaNetworks.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[*key]; ok { + typedObj := obj.ToBeta() + klog.V(5).Infof("MockBetaNetworks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockBetaNetworks %v not found", key), + } + klog.V(5).Infof("MockBetaNetworks.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock. +func (m *MockBetaNetworks) List(ctx context.Context, fl *filter.F) ([]*beta.Network, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { + klog.V(5).Infof("MockBetaNetworks.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + klog.V(5).Infof("MockBetaNetworks.List(%v, %v) = nil, %v", ctx, fl, err) + + return nil, *m.ListError + } + + var objs []*beta.Network + for _, obj := range m.Objects { + if !fl.Match(obj.ToBeta()) { + continue + } + objs = append(objs, obj.ToBeta()) + } + + klog.V(5).Infof("MockBetaNetworks.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockBetaNetworks) Insert(ctx context.Context, key *meta.Key, obj *beta.Network) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { + klog.V(5).Infof("MockBetaNetworks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[*key]; ok { + klog.V(5).Infof("MockBetaNetworks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[*key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockBetaNetworks %v exists", key), + } + klog.V(5).Infof("MockBetaNetworks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "beta", "networks") + obj.SelfLink = SelfLink(meta.VersionBeta, projectID, "networks", key) + + m.Objects[*key] = &MockNetworksObj{obj} + klog.V(5).Infof("MockBetaNetworks.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockBetaNetworks) Delete(ctx context.Context, key *meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(ctx, key, m); intercept { + klog.V(5).Infof("MockBetaNetworks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[*key]; ok { + klog.V(5).Infof("MockBetaNetworks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[*key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockBetaNetworks %v not found", key), + } + klog.V(5).Infof("MockBetaNetworks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, *key) + klog.V(5).Infof("MockBetaNetworks.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockBetaNetworks) Obj(o *beta.Network) *MockNetworksObj { + return &MockNetworksObj{o} +} + +// GCEBetaNetworks is a simplifying adapter for the GCE Networks. +type GCEBetaNetworks struct { + s *Service +} + +// Get the Network named by key. +func (g *GCEBetaNetworks) Get(ctx context.Context, key *meta.Key) (*beta.Network, error) { + klog.V(5).Infof("GCEBetaNetworks.Get(%v, %v): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEBetaNetworks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + return nil, fmt.Errorf("invalid GCE key (%#v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Networks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("beta"), + Service: "Networks", + } + klog.V(5).Infof("GCEBetaNetworks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEBetaNetworks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + return nil, err + } + call := g.s.Beta.Networks.Get(projectID, key.Name) + call.Context(ctx) + v, err := call.Do() + klog.V(4).Infof("GCEBetaNetworks.Get(%v, %v) = %+v, %v", ctx, key, v, err) + return v, err +} + +// List all Network objects. +func (g *GCEBetaNetworks) List(ctx context.Context, fl *filter.F) ([]*beta.Network, error) { + klog.V(5).Infof("GCEBetaNetworks.List(%v, %v) called", ctx, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Networks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("beta"), + Service: "Networks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + klog.V(5).Infof("GCEBetaNetworks.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + call := g.s.Beta.Networks.List(projectID) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*beta.Network + f := func(l *beta.NetworkList) error { + klog.V(5).Infof("GCEBetaNetworks.List(%v, ..., %v): page %+v", ctx, fl, l) + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + klog.V(4).Infof("GCEBetaNetworks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + return nil, err + } + + if klog.V(4) { + klog.V(4).Infof("GCEBetaNetworks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { + var asStr []string + for _, o := range all { + asStr = append(asStr, fmt.Sprintf("%+v", o)) + } + klog.V(5).Infof("GCEBetaNetworks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + } + + return all, nil +} + +// Insert Network with key of value obj. +func (g *GCEBetaNetworks) Insert(ctx context.Context, key *meta.Key, obj *beta.Network) error { + klog.V(5).Infof("GCEBetaNetworks.Insert(%v, %v, %+v): called", ctx, key, obj) + if !key.Valid() { + klog.V(2).Infof("GCEBetaNetworks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Networks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("beta"), + Service: "Networks", + } + klog.V(5).Infof("GCEBetaNetworks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEBetaNetworks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + obj.Name = key.Name + call := g.s.Beta.Networks.Insert(projectID, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEBetaNetworks.Insert(%v, %v, ...) = %+v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEBetaNetworks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + return err +} + +// Delete the Network referenced by key. +func (g *GCEBetaNetworks) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCEBetaNetworks.Delete(%v, %v): called", ctx, key) + if !key.Valid() { + klog.V(2).Infof("GCEBetaNetworks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Networks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("beta"), + Service: "Networks", + } + klog.V(5).Infof("GCEBetaNetworks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEBetaNetworks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.Beta.Networks.Delete(projectID, key.Name) + + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEBetaNetworks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEBetaNetworks.Delete(%v, %v) = %v", ctx, key, err) + return err +} + +// Networks is an interface that allows for mocking of Networks. +type Networks interface { + Get(ctx context.Context, key *meta.Key) (*ga.Network, error) + List(ctx context.Context, fl *filter.F) ([]*ga.Network, error) + Insert(ctx context.Context, key *meta.Key, obj *ga.Network) error + Delete(ctx context.Context, key *meta.Key) error +} + +// NewMockNetworks returns a new mock for Networks. +func NewMockNetworks(pr ProjectRouter, objs map[meta.Key]*MockNetworksObj) *MockNetworks { + mock := &MockNetworks{ + ProjectRouter: pr, + + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockNetworks is the mock for Networks. +type MockNetworks struct { + Lock sync.Mutex + + ProjectRouter ProjectRouter + + // Objects maintained by the mock. + Objects map[meta.Key]*MockNetworksObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(ctx context.Context, key *meta.Key, m *MockNetworks) (bool, *ga.Network, error) + ListHook func(ctx context.Context, fl *filter.F, m *MockNetworks) (bool, []*ga.Network, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *ga.Network, m *MockNetworks) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockNetworks) (bool, error) + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockNetworks) Get(ctx context.Context, key *meta.Key) (*ga.Network, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(ctx, key, m); intercept { + klog.V(5).Infof("MockNetworks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + if !key.Valid() { + return nil, fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[*key]; ok { + klog.V(5).Infof("MockNetworks.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[*key]; ok { + typedObj := obj.ToGA() + klog.V(5).Infof("MockNetworks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockNetworks %v not found", key), + } + klog.V(5).Infof("MockNetworks.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock. +func (m *MockNetworks) List(ctx context.Context, fl *filter.F) ([]*ga.Network, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { + klog.V(5).Infof("MockNetworks.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + klog.V(5).Infof("MockNetworks.List(%v, %v) = nil, %v", ctx, fl, err) + + return nil, *m.ListError + } + + var objs []*ga.Network + for _, obj := range m.Objects { + if !fl.Match(obj.ToGA()) { + continue + } + objs = append(objs, obj.ToGA()) + } + + klog.V(5).Infof("MockNetworks.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockNetworks) Insert(ctx context.Context, key *meta.Key, obj *ga.Network) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { + klog.V(5).Infof("MockNetworks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[*key]; ok { + klog.V(5).Infof("MockNetworks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[*key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockNetworks %v exists", key), + } + klog.V(5).Infof("MockNetworks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "ga", "networks") + obj.SelfLink = SelfLink(meta.VersionGA, projectID, "networks", key) + + m.Objects[*key] = &MockNetworksObj{obj} + klog.V(5).Infof("MockNetworks.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockNetworks) Delete(ctx context.Context, key *meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(ctx, key, m); intercept { + klog.V(5).Infof("MockNetworks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[*key]; ok { + klog.V(5).Infof("MockNetworks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[*key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockNetworks %v not found", key), + } + klog.V(5).Infof("MockNetworks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, *key) + klog.V(5).Infof("MockNetworks.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockNetworks) Obj(o *ga.Network) *MockNetworksObj { + return &MockNetworksObj{o} +} + +// GCENetworks is a simplifying adapter for the GCE Networks. +type GCENetworks struct { + s *Service +} + +// Get the Network named by key. +func (g *GCENetworks) Get(ctx context.Context, key *meta.Key) (*ga.Network, error) { + klog.V(5).Infof("GCENetworks.Get(%v, %v): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCENetworks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + return nil, fmt.Errorf("invalid GCE key (%#v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Networks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("ga"), + Service: "Networks", + } + klog.V(5).Infof("GCENetworks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCENetworks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + return nil, err + } + call := g.s.GA.Networks.Get(projectID, key.Name) + call.Context(ctx) + v, err := call.Do() + klog.V(4).Infof("GCENetworks.Get(%v, %v) = %+v, %v", ctx, key, v, err) + return v, err +} + +// List all Network objects. +func (g *GCENetworks) List(ctx context.Context, fl *filter.F) ([]*ga.Network, error) { + klog.V(5).Infof("GCENetworks.List(%v, %v) called", ctx, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Networks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("ga"), + Service: "Networks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + klog.V(5).Infof("GCENetworks.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + call := g.s.GA.Networks.List(projectID) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*ga.Network + f := func(l *ga.NetworkList) error { + klog.V(5).Infof("GCENetworks.List(%v, ..., %v): page %+v", ctx, fl, l) + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + klog.V(4).Infof("GCENetworks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + return nil, err + } + + if klog.V(4) { + klog.V(4).Infof("GCENetworks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { + var asStr []string + for _, o := range all { + asStr = append(asStr, fmt.Sprintf("%+v", o)) + } + klog.V(5).Infof("GCENetworks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + } + + return all, nil +} + +// Insert Network with key of value obj. +func (g *GCENetworks) Insert(ctx context.Context, key *meta.Key, obj *ga.Network) error { + klog.V(5).Infof("GCENetworks.Insert(%v, %v, %+v): called", ctx, key, obj) + if !key.Valid() { + klog.V(2).Infof("GCENetworks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Networks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("ga"), + Service: "Networks", + } + klog.V(5).Infof("GCENetworks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCENetworks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + obj.Name = key.Name + call := g.s.GA.Networks.Insert(projectID, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCENetworks.Insert(%v, %v, ...) = %+v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCENetworks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + return err +} + +// Delete the Network referenced by key. +func (g *GCENetworks) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCENetworks.Delete(%v, %v): called", ctx, key) + if !key.Valid() { + klog.V(2).Infof("GCENetworks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Networks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("ga"), + Service: "Networks", + } + klog.V(5).Infof("GCENetworks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCENetworks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.GA.Networks.Delete(projectID, key.Name) + + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCENetworks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCENetworks.Delete(%v, %v) = %v", ctx, key, err) + return err +} + +// AlphaNetworkEndpointGroups is an interface that allows for mocking of NetworkEndpointGroups. +type AlphaNetworkEndpointGroups interface { + Get(ctx context.Context, key *meta.Key) (*alpha.NetworkEndpointGroup, error) + List(ctx context.Context, zone string, fl *filter.F) ([]*alpha.NetworkEndpointGroup, error) + Insert(ctx context.Context, key *meta.Key, obj *alpha.NetworkEndpointGroup) error + Delete(ctx context.Context, key *meta.Key) error + AggregatedList(ctx context.Context, fl *filter.F) (map[string][]*alpha.NetworkEndpointGroup, error) + AttachNetworkEndpoints(context.Context, *meta.Key, *alpha.NetworkEndpointGroupsAttachEndpointsRequest) error + DetachNetworkEndpoints(context.Context, *meta.Key, *alpha.NetworkEndpointGroupsDetachEndpointsRequest) error + ListNetworkEndpoints(context.Context, *meta.Key, *alpha.NetworkEndpointGroupsListEndpointsRequest, *filter.F) ([]*alpha.NetworkEndpointWithHealthStatus, error) +} + +// NewMockAlphaNetworkEndpointGroups returns a new mock for NetworkEndpointGroups. +func NewMockAlphaNetworkEndpointGroups(pr ProjectRouter, objs map[meta.Key]*MockNetworkEndpointGroupsObj) *MockAlphaNetworkEndpointGroups { + mock := &MockAlphaNetworkEndpointGroups{ + ProjectRouter: pr, + + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockAlphaNetworkEndpointGroups is the mock for NetworkEndpointGroups. +type MockAlphaNetworkEndpointGroups struct { + Lock sync.Mutex + + ProjectRouter ProjectRouter + + // Objects maintained by the mock. + Objects map[meta.Key]*MockNetworkEndpointGroupsObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + AggregatedListError *error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(ctx context.Context, key *meta.Key, m *MockAlphaNetworkEndpointGroups) (bool, *alpha.NetworkEndpointGroup, error) + ListHook func(ctx context.Context, zone string, fl *filter.F, m *MockAlphaNetworkEndpointGroups) (bool, []*alpha.NetworkEndpointGroup, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *alpha.NetworkEndpointGroup, m *MockAlphaNetworkEndpointGroups) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockAlphaNetworkEndpointGroups) (bool, error) + AggregatedListHook func(ctx context.Context, fl *filter.F, m *MockAlphaNetworkEndpointGroups) (bool, map[string][]*alpha.NetworkEndpointGroup, error) + AttachNetworkEndpointsHook func(context.Context, *meta.Key, *alpha.NetworkEndpointGroupsAttachEndpointsRequest, *MockAlphaNetworkEndpointGroups) error + DetachNetworkEndpointsHook func(context.Context, *meta.Key, *alpha.NetworkEndpointGroupsDetachEndpointsRequest, *MockAlphaNetworkEndpointGroups) error + ListNetworkEndpointsHook func(context.Context, *meta.Key, *alpha.NetworkEndpointGroupsListEndpointsRequest, *filter.F, *MockAlphaNetworkEndpointGroups) ([]*alpha.NetworkEndpointWithHealthStatus, error) + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockAlphaNetworkEndpointGroups) Get(ctx context.Context, key *meta.Key) (*alpha.NetworkEndpointGroup, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(ctx, key, m); intercept { + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + if !key.Valid() { + return nil, fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[*key]; ok { + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[*key]; ok { + typedObj := obj.ToAlpha() + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAlphaNetworkEndpointGroups %v not found", key), + } + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock in the given zone. +func (m *MockAlphaNetworkEndpointGroups) List(ctx context.Context, zone string, fl *filter.F) ([]*alpha.NetworkEndpointGroup, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(ctx, zone, fl, m); intercept { + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) + + return nil, *m.ListError + } + + var objs []*alpha.NetworkEndpointGroup + for key, obj := range m.Objects { + if key.Zone != zone { + continue + } + if !fl.Match(obj.ToAlpha()) { + continue + } + objs = append(objs, obj.ToAlpha()) + } + + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockAlphaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.Key, obj *alpha.NetworkEndpointGroup) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[*key]; ok { + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[*key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockAlphaNetworkEndpointGroups %v exists", key), + } + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "alpha", "networkEndpointGroups") + obj.SelfLink = SelfLink(meta.VersionAlpha, projectID, "networkEndpointGroups", key) + + m.Objects[*key] = &MockNetworkEndpointGroupsObj{obj} + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockAlphaNetworkEndpointGroups) Delete(ctx context.Context, key *meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(ctx, key, m); intercept { + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[*key]; ok { + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[*key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAlphaNetworkEndpointGroups %v not found", key), + } + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, *key) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// AggregatedList is a mock for AggregatedList. +func (m *MockAlphaNetworkEndpointGroups) AggregatedList(ctx context.Context, fl *filter.F) (map[string][]*alpha.NetworkEndpointGroup, error) { + if m.AggregatedListHook != nil { + if intercept, objs, err := m.AggregatedListHook(ctx, fl, m); intercept { + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.AggregatedListError != nil { + err := *m.AggregatedListError + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = nil, %v", ctx, fl, err) + return nil, err + } + + objs := map[string][]*alpha.NetworkEndpointGroup{} + for _, obj := range m.Objects { + res, err := ParseResourceURL(obj.ToAlpha().SelfLink) + location := res.Key.Zone + if err != nil { + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = nil, %v", ctx, fl, err) + return nil, err + } + if !fl.Match(obj.ToAlpha()) { + continue + } + objs[location] = append(objs[location], obj.ToAlpha()) + } + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + +// Obj wraps the object for use in the mock. +func (m *MockAlphaNetworkEndpointGroups) Obj(o *alpha.NetworkEndpointGroup) *MockNetworkEndpointGroupsObj { + return &MockNetworkEndpointGroupsObj{o} +} + +// AttachNetworkEndpoints is a mock for the corresponding method. +func (m *MockAlphaNetworkEndpointGroups) AttachNetworkEndpoints(ctx context.Context, key *meta.Key, arg0 *alpha.NetworkEndpointGroupsAttachEndpointsRequest) error { + if m.AttachNetworkEndpointsHook != nil { + return m.AttachNetworkEndpointsHook(ctx, key, arg0, m) + } + return nil +} + +// DetachNetworkEndpoints is a mock for the corresponding method. +func (m *MockAlphaNetworkEndpointGroups) DetachNetworkEndpoints(ctx context.Context, key *meta.Key, arg0 *alpha.NetworkEndpointGroupsDetachEndpointsRequest) error { + if m.DetachNetworkEndpointsHook != nil { + return m.DetachNetworkEndpointsHook(ctx, key, arg0, m) + } + return nil +} + +// ListNetworkEndpoints is a mock for the corresponding method. +func (m *MockAlphaNetworkEndpointGroups) ListNetworkEndpoints(ctx context.Context, key *meta.Key, arg0 *alpha.NetworkEndpointGroupsListEndpointsRequest, fl *filter.F) ([]*alpha.NetworkEndpointWithHealthStatus, error) { + if m.ListNetworkEndpointsHook != nil { + return m.ListNetworkEndpointsHook(ctx, key, arg0, fl, m) + } + return nil, nil +} + +// GCEAlphaNetworkEndpointGroups is a simplifying adapter for the GCE NetworkEndpointGroups. +type GCEAlphaNetworkEndpointGroups struct { + s *Service +} + +// Get the NetworkEndpointGroup named by key. +func (g *GCEAlphaNetworkEndpointGroups) Get(ctx context.Context, key *meta.Key) (*alpha.NetworkEndpointGroup, error) { + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.Get(%v, %v): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEAlphaNetworkEndpointGroups.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + return nil, fmt.Errorf("invalid GCE key (%#v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "NetworkEndpointGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("alpha"), + Service: "NetworkEndpointGroups", + } + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + return nil, err + } + call := g.s.Alpha.NetworkEndpointGroups.Get(projectID, key.Zone, key.Name) + call.Context(ctx) + v, err := call.Do() + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Get(%v, %v) = %+v, %v", ctx, key, v, err) + return v, err +} + +// List all NetworkEndpointGroup objects. +func (g *GCEAlphaNetworkEndpointGroups) List(ctx context.Context, zone string, fl *filter.F) ([]*alpha.NetworkEndpointGroup, error) { + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.List(%v, %v, %v) called", ctx, zone, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "NetworkEndpointGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("alpha"), + Service: "NetworkEndpointGroups", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) + call := g.s.Alpha.NetworkEndpointGroups.List(projectID, zone) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*alpha.NetworkEndpointGroup + f := func(l *alpha.NetworkEndpointGroupList) error { + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.List(%v, ..., %v): page %+v", ctx, fl, l) + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + return nil, err + } + + if klog.V(4) { + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { + var asStr []string + for _, o := range all { + asStr = append(asStr, fmt.Sprintf("%+v", o)) + } + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + } + + return all, nil +} + +// Insert NetworkEndpointGroup with key of value obj. +func (g *GCEAlphaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.Key, obj *alpha.NetworkEndpointGroup) error { + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.Insert(%v, %v, %+v): called", ctx, key, obj) + if !key.Valid() { + klog.V(2).Infof("GCEAlphaNetworkEndpointGroups.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "NetworkEndpointGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("alpha"), + Service: "NetworkEndpointGroups", + } + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + obj.Name = key.Name + call := g.s.Alpha.NetworkEndpointGroups.Insert(projectID, key.Zone, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Insert(%v, %v, ...) = %+v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + return err +} + +// Delete the NetworkEndpointGroup referenced by key. +func (g *GCEAlphaNetworkEndpointGroups) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.Delete(%v, %v): called", ctx, key) + if !key.Valid() { + klog.V(2).Infof("GCEAlphaNetworkEndpointGroups.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "NetworkEndpointGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("alpha"), + Service: "NetworkEndpointGroups", + } + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.Alpha.NetworkEndpointGroups.Delete(projectID, key.Zone, key.Name) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + return err +} + +// AggregatedList lists all resources of the given type across all locations. +func (g *GCEAlphaNetworkEndpointGroups) AggregatedList(ctx context.Context, fl *filter.F) (map[string][]*alpha.NetworkEndpointGroup, error) { + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v) called", ctx, fl) + + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "NetworkEndpointGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "AggregatedList", + Version: meta.Version("alpha"), + Service: "NetworkEndpointGroups", + } + + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v): RateLimiter error: %v", ctx, fl, err) + return nil, err + } + + call := g.s.Alpha.NetworkEndpointGroups.AggregatedList(projectID) + call.Context(ctx) + if fl != filter.None { + call.Filter(fl.String()) + } + + all := map[string][]*alpha.NetworkEndpointGroup{} + f := func(l *alpha.NetworkEndpointGroupAggregatedList) error { + for k, v := range l.Items { + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v): page[%v]%+v", ctx, fl, k, v) + all[k] = append(all[k], v.NetworkEndpointGroups...) + } + return nil + } + if err := call.Pages(ctx, f); err != nil { + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = %v, %v", ctx, fl, nil, err) + return nil, err + } + if klog.V(4) { + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { + var asStr []string + for _, o := range all { + asStr = append(asStr, fmt.Sprintf("%+v", o)) + } + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = %v, %v", ctx, fl, asStr, nil) + } + return all, nil +} + +// AttachNetworkEndpoints is a method on GCEAlphaNetworkEndpointGroups. +func (g *GCEAlphaNetworkEndpointGroups) AttachNetworkEndpoints(ctx context.Context, key *meta.Key, arg0 *alpha.NetworkEndpointGroupsAttachEndpointsRequest) error { + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEAlphaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "NetworkEndpointGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "AttachNetworkEndpoints", + Version: meta.Version("alpha"), + Service: "NetworkEndpointGroups", + } + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.Alpha.NetworkEndpointGroups.AttachNetworkEndpoints(projectID, key.Zone, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) + return err + } + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) + return err +} + +// DetachNetworkEndpoints is a method on GCEAlphaNetworkEndpointGroups. +func (g *GCEAlphaNetworkEndpointGroups) DetachNetworkEndpoints(ctx context.Context, key *meta.Key, arg0 *alpha.NetworkEndpointGroupsDetachEndpointsRequest) error { + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEAlphaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "NetworkEndpointGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "DetachNetworkEndpoints", + Version: meta.Version("alpha"), + Service: "NetworkEndpointGroups", + } + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.Alpha.NetworkEndpointGroups.DetachNetworkEndpoints(projectID, key.Zone, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) + return err + } + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) + return err +} + +// ListNetworkEndpoints is a method on GCEAlphaNetworkEndpointGroups. +func (g *GCEAlphaNetworkEndpointGroups) ListNetworkEndpoints(ctx context.Context, key *meta.Key, arg0 *alpha.NetworkEndpointGroupsListEndpointsRequest, fl *filter.F) ([]*alpha.NetworkEndpointWithHealthStatus, error) { + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return nil, fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "NetworkEndpointGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "ListNetworkEndpoints", + Version: meta.Version("alpha"), + Service: "NetworkEndpointGroups", + } + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return nil, err + } + call := g.s.Alpha.NetworkEndpointGroups.ListNetworkEndpoints(projectID, key.Zone, key.Name, arg0) + var all []*alpha.NetworkEndpointWithHealthStatus + f := func(l *alpha.NetworkEndpointGroupsListNetworkEndpoints) error { + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): page %+v", ctx, key, l) + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...) = %v, %v", ctx, key, nil, err) + return nil, err + } + if klog.V(4) { + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...) = [%v items], %v", ctx, key, len(all), nil) + } else if klog.V(5) { + var asStr []string + for _, o := range all { + asStr = append(asStr, fmt.Sprintf("%+v", o)) + } + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...) = %v, %v", ctx, key, asStr, nil) + } + return all, nil +} + +// BetaNetworkEndpointGroups is an interface that allows for mocking of NetworkEndpointGroups. +type BetaNetworkEndpointGroups interface { + Get(ctx context.Context, key *meta.Key) (*beta.NetworkEndpointGroup, error) + List(ctx context.Context, zone string, fl *filter.F) ([]*beta.NetworkEndpointGroup, error) + Insert(ctx context.Context, key *meta.Key, obj *beta.NetworkEndpointGroup) error + Delete(ctx context.Context, key *meta.Key) error + AggregatedList(ctx context.Context, fl *filter.F) (map[string][]*beta.NetworkEndpointGroup, error) + AttachNetworkEndpoints(context.Context, *meta.Key, *beta.NetworkEndpointGroupsAttachEndpointsRequest) error + DetachNetworkEndpoints(context.Context, *meta.Key, *beta.NetworkEndpointGroupsDetachEndpointsRequest) error + ListNetworkEndpoints(context.Context, *meta.Key, *beta.NetworkEndpointGroupsListEndpointsRequest, *filter.F) ([]*beta.NetworkEndpointWithHealthStatus, error) +} + +// NewMockBetaNetworkEndpointGroups returns a new mock for NetworkEndpointGroups. +func NewMockBetaNetworkEndpointGroups(pr ProjectRouter, objs map[meta.Key]*MockNetworkEndpointGroupsObj) *MockBetaNetworkEndpointGroups { + mock := &MockBetaNetworkEndpointGroups{ + ProjectRouter: pr, + + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockBetaNetworkEndpointGroups is the mock for NetworkEndpointGroups. +type MockBetaNetworkEndpointGroups struct { + Lock sync.Mutex + + ProjectRouter ProjectRouter + + // Objects maintained by the mock. + Objects map[meta.Key]*MockNetworkEndpointGroupsObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + AggregatedListError *error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(ctx context.Context, key *meta.Key, m *MockBetaNetworkEndpointGroups) (bool, *beta.NetworkEndpointGroup, error) + ListHook func(ctx context.Context, zone string, fl *filter.F, m *MockBetaNetworkEndpointGroups) (bool, []*beta.NetworkEndpointGroup, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *beta.NetworkEndpointGroup, m *MockBetaNetworkEndpointGroups) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockBetaNetworkEndpointGroups) (bool, error) + AggregatedListHook func(ctx context.Context, fl *filter.F, m *MockBetaNetworkEndpointGroups) (bool, map[string][]*beta.NetworkEndpointGroup, error) + AttachNetworkEndpointsHook func(context.Context, *meta.Key, *beta.NetworkEndpointGroupsAttachEndpointsRequest, *MockBetaNetworkEndpointGroups) error + DetachNetworkEndpointsHook func(context.Context, *meta.Key, *beta.NetworkEndpointGroupsDetachEndpointsRequest, *MockBetaNetworkEndpointGroups) error + ListNetworkEndpointsHook func(context.Context, *meta.Key, *beta.NetworkEndpointGroupsListEndpointsRequest, *filter.F, *MockBetaNetworkEndpointGroups) ([]*beta.NetworkEndpointWithHealthStatus, error) + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockBetaNetworkEndpointGroups) Get(ctx context.Context, key *meta.Key) (*beta.NetworkEndpointGroup, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(ctx, key, m); intercept { + klog.V(5).Infof("MockBetaNetworkEndpointGroups.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + if !key.Valid() { + return nil, fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[*key]; ok { + klog.V(5).Infof("MockBetaNetworkEndpointGroups.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[*key]; ok { + typedObj := obj.ToBeta() + klog.V(5).Infof("MockBetaNetworkEndpointGroups.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockBetaNetworkEndpointGroups %v not found", key), + } + klog.V(5).Infof("MockBetaNetworkEndpointGroups.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock in the given zone. +func (m *MockBetaNetworkEndpointGroups) List(ctx context.Context, zone string, fl *filter.F) ([]*beta.NetworkEndpointGroup, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(ctx, zone, fl, m); intercept { + klog.V(5).Infof("MockBetaNetworkEndpointGroups.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + klog.V(5).Infof("MockBetaNetworkEndpointGroups.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) + + return nil, *m.ListError + } + + var objs []*beta.NetworkEndpointGroup + for key, obj := range m.Objects { + if key.Zone != zone { + continue + } + if !fl.Match(obj.ToBeta()) { + continue + } + objs = append(objs, obj.ToBeta()) + } + + klog.V(5).Infof("MockBetaNetworkEndpointGroups.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockBetaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.Key, obj *beta.NetworkEndpointGroup) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { + klog.V(5).Infof("MockBetaNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[*key]; ok { + klog.V(5).Infof("MockBetaNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[*key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockBetaNetworkEndpointGroups %v exists", key), + } + klog.V(5).Infof("MockBetaNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "beta", "networkEndpointGroups") + obj.SelfLink = SelfLink(meta.VersionBeta, projectID, "networkEndpointGroups", key) + + m.Objects[*key] = &MockNetworkEndpointGroupsObj{obj} + klog.V(5).Infof("MockBetaNetworkEndpointGroups.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockBetaNetworkEndpointGroups) Delete(ctx context.Context, key *meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(ctx, key, m); intercept { + klog.V(5).Infof("MockBetaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[*key]; ok { + klog.V(5).Infof("MockBetaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[*key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockBetaNetworkEndpointGroups %v not found", key), + } + klog.V(5).Infof("MockBetaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, *key) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// AggregatedList is a mock for AggregatedList. +func (m *MockBetaNetworkEndpointGroups) AggregatedList(ctx context.Context, fl *filter.F) (map[string][]*beta.NetworkEndpointGroup, error) { + if m.AggregatedListHook != nil { + if intercept, objs, err := m.AggregatedListHook(ctx, fl, m); intercept { + klog.V(5).Infof("MockBetaNetworkEndpointGroups.AggregatedList(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.AggregatedListError != nil { + err := *m.AggregatedListError + klog.V(5).Infof("MockBetaNetworkEndpointGroups.AggregatedList(%v, %v) = nil, %v", ctx, fl, err) + return nil, err + } + + objs := map[string][]*beta.NetworkEndpointGroup{} + for _, obj := range m.Objects { + res, err := ParseResourceURL(obj.ToBeta().SelfLink) + location := res.Key.Zone + if err != nil { + klog.V(5).Infof("MockBetaNetworkEndpointGroups.AggregatedList(%v, %v) = nil, %v", ctx, fl, err) + return nil, err + } + if !fl.Match(obj.ToBeta()) { + continue + } + objs[location] = append(objs[location], obj.ToBeta()) + } + klog.V(5).Infof("MockBetaNetworkEndpointGroups.AggregatedList(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + +// Obj wraps the object for use in the mock. +func (m *MockBetaNetworkEndpointGroups) Obj(o *beta.NetworkEndpointGroup) *MockNetworkEndpointGroupsObj { + return &MockNetworkEndpointGroupsObj{o} +} + +// AttachNetworkEndpoints is a mock for the corresponding method. +func (m *MockBetaNetworkEndpointGroups) AttachNetworkEndpoints(ctx context.Context, key *meta.Key, arg0 *beta.NetworkEndpointGroupsAttachEndpointsRequest) error { + if m.AttachNetworkEndpointsHook != nil { + return m.AttachNetworkEndpointsHook(ctx, key, arg0, m) + } + return nil +} + +// DetachNetworkEndpoints is a mock for the corresponding method. +func (m *MockBetaNetworkEndpointGroups) DetachNetworkEndpoints(ctx context.Context, key *meta.Key, arg0 *beta.NetworkEndpointGroupsDetachEndpointsRequest) error { + if m.DetachNetworkEndpointsHook != nil { + return m.DetachNetworkEndpointsHook(ctx, key, arg0, m) + } + return nil +} + +// ListNetworkEndpoints is a mock for the corresponding method. +func (m *MockBetaNetworkEndpointGroups) ListNetworkEndpoints(ctx context.Context, key *meta.Key, arg0 *beta.NetworkEndpointGroupsListEndpointsRequest, fl *filter.F) ([]*beta.NetworkEndpointWithHealthStatus, error) { + if m.ListNetworkEndpointsHook != nil { + return m.ListNetworkEndpointsHook(ctx, key, arg0, fl, m) + } + return nil, nil +} + +// GCEBetaNetworkEndpointGroups is a simplifying adapter for the GCE NetworkEndpointGroups. +type GCEBetaNetworkEndpointGroups struct { + s *Service +} + +// Get the NetworkEndpointGroup named by key. +func (g *GCEBetaNetworkEndpointGroups) Get(ctx context.Context, key *meta.Key) (*beta.NetworkEndpointGroup, error) { + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.Get(%v, %v): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEBetaNetworkEndpointGroups.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + return nil, fmt.Errorf("invalid GCE key (%#v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "NetworkEndpointGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("beta"), + Service: "NetworkEndpointGroups", + } + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + return nil, err + } + call := g.s.Beta.NetworkEndpointGroups.Get(projectID, key.Zone, key.Name) + call.Context(ctx) + v, err := call.Do() + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.Get(%v, %v) = %+v, %v", ctx, key, v, err) + return v, err +} + +// List all NetworkEndpointGroup objects. +func (g *GCEBetaNetworkEndpointGroups) List(ctx context.Context, zone string, fl *filter.F) ([]*beta.NetworkEndpointGroup, error) { + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.List(%v, %v, %v) called", ctx, zone, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "NetworkEndpointGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("beta"), + Service: "NetworkEndpointGroups", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) + call := g.s.Beta.NetworkEndpointGroups.List(projectID, zone) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*beta.NetworkEndpointGroup + f := func(l *beta.NetworkEndpointGroupList) error { + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.List(%v, ..., %v): page %+v", ctx, fl, l) + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + return nil, err + } + + if klog.V(4) { + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { + var asStr []string + for _, o := range all { + asStr = append(asStr, fmt.Sprintf("%+v", o)) + } + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + } + + return all, nil +} + +// Insert NetworkEndpointGroup with key of value obj. +func (g *GCEBetaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.Key, obj *beta.NetworkEndpointGroup) error { + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.Insert(%v, %v, %+v): called", ctx, key, obj) + if !key.Valid() { + klog.V(2).Infof("GCEBetaNetworkEndpointGroups.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "NetworkEndpointGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("beta"), + Service: "NetworkEndpointGroups", + } + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + obj.Name = key.Name + call := g.s.Beta.NetworkEndpointGroups.Insert(projectID, key.Zone, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.Insert(%v, %v, ...) = %+v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + return err +} + +// Delete the NetworkEndpointGroup referenced by key. +func (g *GCEBetaNetworkEndpointGroups) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.Delete(%v, %v): called", ctx, key) + if !key.Valid() { + klog.V(2).Infof("GCEBetaNetworkEndpointGroups.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "NetworkEndpointGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("beta"), + Service: "NetworkEndpointGroups", + } + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.Beta.NetworkEndpointGroups.Delete(projectID, key.Zone, key.Name) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + return err +} + +// AggregatedList lists all resources of the given type across all locations. +func (g *GCEBetaNetworkEndpointGroups) AggregatedList(ctx context.Context, fl *filter.F) (map[string][]*beta.NetworkEndpointGroup, error) { + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v) called", ctx, fl) + + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "NetworkEndpointGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "AggregatedList", + Version: meta.Version("beta"), + Service: "NetworkEndpointGroups", + } + + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v): RateLimiter error: %v", ctx, fl, err) + return nil, err + } + + call := g.s.Beta.NetworkEndpointGroups.AggregatedList(projectID) + call.Context(ctx) + if fl != filter.None { + call.Filter(fl.String()) + } + + all := map[string][]*beta.NetworkEndpointGroup{} + f := func(l *beta.NetworkEndpointGroupAggregatedList) error { + for k, v := range l.Items { + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v): page[%v]%+v", ctx, fl, k, v) + all[k] = append(all[k], v.NetworkEndpointGroups...) + } + return nil + } + if err := call.Pages(ctx, f); err != nil { + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v) = %v, %v", ctx, fl, nil, err) + return nil, err + } + if klog.V(4) { + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { + var asStr []string + for _, o := range all { + asStr = append(asStr, fmt.Sprintf("%+v", o)) + } + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v) = %v, %v", ctx, fl, asStr, nil) + } + return all, nil +} + +// AttachNetworkEndpoints is a method on GCEBetaNetworkEndpointGroups. +func (g *GCEBetaNetworkEndpointGroups) AttachNetworkEndpoints(ctx context.Context, key *meta.Key, arg0 *beta.NetworkEndpointGroupsAttachEndpointsRequest) error { + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEBetaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "NetworkEndpointGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "AttachNetworkEndpoints", + Version: meta.Version("beta"), + Service: "NetworkEndpointGroups", + } + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.Beta.NetworkEndpointGroups.AttachNetworkEndpoints(projectID, key.Zone, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) + return err + } + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) + return err +} + +// DetachNetworkEndpoints is a method on GCEBetaNetworkEndpointGroups. +func (g *GCEBetaNetworkEndpointGroups) DetachNetworkEndpoints(ctx context.Context, key *meta.Key, arg0 *beta.NetworkEndpointGroupsDetachEndpointsRequest) error { + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEBetaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "NetworkEndpointGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "DetachNetworkEndpoints", + Version: meta.Version("beta"), + Service: "NetworkEndpointGroups", + } + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.Beta.NetworkEndpointGroups.DetachNetworkEndpoints(projectID, key.Zone, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) + return err + } + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) + return err +} + +// ListNetworkEndpoints is a method on GCEBetaNetworkEndpointGroups. +func (g *GCEBetaNetworkEndpointGroups) ListNetworkEndpoints(ctx context.Context, key *meta.Key, arg0 *beta.NetworkEndpointGroupsListEndpointsRequest, fl *filter.F) ([]*beta.NetworkEndpointWithHealthStatus, error) { + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return nil, fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "NetworkEndpointGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "ListNetworkEndpoints", + Version: meta.Version("beta"), + Service: "NetworkEndpointGroups", + } + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return nil, err + } + call := g.s.Beta.NetworkEndpointGroups.ListNetworkEndpoints(projectID, key.Zone, key.Name, arg0) + var all []*beta.NetworkEndpointWithHealthStatus + f := func(l *beta.NetworkEndpointGroupsListNetworkEndpoints) error { + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): page %+v", ctx, key, l) + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...) = %v, %v", ctx, key, nil, err) + return nil, err + } + if klog.V(4) { + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...) = [%v items], %v", ctx, key, len(all), nil) + } else if klog.V(5) { + var asStr []string + for _, o := range all { + asStr = append(asStr, fmt.Sprintf("%+v", o)) + } + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...) = %v, %v", ctx, key, asStr, nil) + } + return all, nil +} + +// NetworkEndpointGroups is an interface that allows for mocking of NetworkEndpointGroups. +type NetworkEndpointGroups interface { + Get(ctx context.Context, key *meta.Key) (*ga.NetworkEndpointGroup, error) + List(ctx context.Context, zone string, fl *filter.F) ([]*ga.NetworkEndpointGroup, error) + Insert(ctx context.Context, key *meta.Key, obj *ga.NetworkEndpointGroup) error + Delete(ctx context.Context, key *meta.Key) error + AggregatedList(ctx context.Context, fl *filter.F) (map[string][]*ga.NetworkEndpointGroup, error) + AttachNetworkEndpoints(context.Context, *meta.Key, *ga.NetworkEndpointGroupsAttachEndpointsRequest) error + DetachNetworkEndpoints(context.Context, *meta.Key, *ga.NetworkEndpointGroupsDetachEndpointsRequest) error + ListNetworkEndpoints(context.Context, *meta.Key, *ga.NetworkEndpointGroupsListEndpointsRequest, *filter.F) ([]*ga.NetworkEndpointWithHealthStatus, error) +} + +// NewMockNetworkEndpointGroups returns a new mock for NetworkEndpointGroups. +func NewMockNetworkEndpointGroups(pr ProjectRouter, objs map[meta.Key]*MockNetworkEndpointGroupsObj) *MockNetworkEndpointGroups { + mock := &MockNetworkEndpointGroups{ + ProjectRouter: pr, + + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockNetworkEndpointGroups is the mock for NetworkEndpointGroups. +type MockNetworkEndpointGroups struct { + Lock sync.Mutex + + ProjectRouter ProjectRouter + + // Objects maintained by the mock. + Objects map[meta.Key]*MockNetworkEndpointGroupsObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + AggregatedListError *error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(ctx context.Context, key *meta.Key, m *MockNetworkEndpointGroups) (bool, *ga.NetworkEndpointGroup, error) + ListHook func(ctx context.Context, zone string, fl *filter.F, m *MockNetworkEndpointGroups) (bool, []*ga.NetworkEndpointGroup, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *ga.NetworkEndpointGroup, m *MockNetworkEndpointGroups) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockNetworkEndpointGroups) (bool, error) + AggregatedListHook func(ctx context.Context, fl *filter.F, m *MockNetworkEndpointGroups) (bool, map[string][]*ga.NetworkEndpointGroup, error) + AttachNetworkEndpointsHook func(context.Context, *meta.Key, *ga.NetworkEndpointGroupsAttachEndpointsRequest, *MockNetworkEndpointGroups) error + DetachNetworkEndpointsHook func(context.Context, *meta.Key, *ga.NetworkEndpointGroupsDetachEndpointsRequest, *MockNetworkEndpointGroups) error + ListNetworkEndpointsHook func(context.Context, *meta.Key, *ga.NetworkEndpointGroupsListEndpointsRequest, *filter.F, *MockNetworkEndpointGroups) ([]*ga.NetworkEndpointWithHealthStatus, error) + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockNetworkEndpointGroups) Get(ctx context.Context, key *meta.Key) (*ga.NetworkEndpointGroup, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(ctx, key, m); intercept { + klog.V(5).Infof("MockNetworkEndpointGroups.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + if !key.Valid() { + return nil, fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[*key]; ok { + klog.V(5).Infof("MockNetworkEndpointGroups.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[*key]; ok { + typedObj := obj.ToGA() + klog.V(5).Infof("MockNetworkEndpointGroups.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockNetworkEndpointGroups %v not found", key), + } + klog.V(5).Infof("MockNetworkEndpointGroups.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock in the given zone. +func (m *MockNetworkEndpointGroups) List(ctx context.Context, zone string, fl *filter.F) ([]*ga.NetworkEndpointGroup, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(ctx, zone, fl, m); intercept { + klog.V(5).Infof("MockNetworkEndpointGroups.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + klog.V(5).Infof("MockNetworkEndpointGroups.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) + + return nil, *m.ListError + } + + var objs []*ga.NetworkEndpointGroup + for key, obj := range m.Objects { + if key.Zone != zone { + continue + } + if !fl.Match(obj.ToGA()) { + continue + } + objs = append(objs, obj.ToGA()) + } + + klog.V(5).Infof("MockNetworkEndpointGroups.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockNetworkEndpointGroups) Insert(ctx context.Context, key *meta.Key, obj *ga.NetworkEndpointGroup) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { + klog.V(5).Infof("MockNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[*key]; ok { + klog.V(5).Infof("MockNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[*key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockNetworkEndpointGroups %v exists", key), + } + klog.V(5).Infof("MockNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "ga", "networkEndpointGroups") + obj.SelfLink = SelfLink(meta.VersionGA, projectID, "networkEndpointGroups", key) + + m.Objects[*key] = &MockNetworkEndpointGroupsObj{obj} + klog.V(5).Infof("MockNetworkEndpointGroups.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockNetworkEndpointGroups) Delete(ctx context.Context, key *meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(ctx, key, m); intercept { + klog.V(5).Infof("MockNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[*key]; ok { + klog.V(5).Infof("MockNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[*key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockNetworkEndpointGroups %v not found", key), + } + klog.V(5).Infof("MockNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, *key) + klog.V(5).Infof("MockNetworkEndpointGroups.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// AggregatedList is a mock for AggregatedList. +func (m *MockNetworkEndpointGroups) AggregatedList(ctx context.Context, fl *filter.F) (map[string][]*ga.NetworkEndpointGroup, error) { + if m.AggregatedListHook != nil { + if intercept, objs, err := m.AggregatedListHook(ctx, fl, m); intercept { + klog.V(5).Infof("MockNetworkEndpointGroups.AggregatedList(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.AggregatedListError != nil { + err := *m.AggregatedListError + klog.V(5).Infof("MockNetworkEndpointGroups.AggregatedList(%v, %v) = nil, %v", ctx, fl, err) + return nil, err + } + + objs := map[string][]*ga.NetworkEndpointGroup{} + for _, obj := range m.Objects { + res, err := ParseResourceURL(obj.ToGA().SelfLink) + location := res.Key.Zone + if err != nil { + klog.V(5).Infof("MockNetworkEndpointGroups.AggregatedList(%v, %v) = nil, %v", ctx, fl, err) + return nil, err + } + if !fl.Match(obj.ToGA()) { + continue + } + objs[location] = append(objs[location], obj.ToGA()) + } + klog.V(5).Infof("MockNetworkEndpointGroups.AggregatedList(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + +// Obj wraps the object for use in the mock. +func (m *MockNetworkEndpointGroups) Obj(o *ga.NetworkEndpointGroup) *MockNetworkEndpointGroupsObj { + return &MockNetworkEndpointGroupsObj{o} +} + +// AttachNetworkEndpoints is a mock for the corresponding method. +func (m *MockNetworkEndpointGroups) AttachNetworkEndpoints(ctx context.Context, key *meta.Key, arg0 *ga.NetworkEndpointGroupsAttachEndpointsRequest) error { + if m.AttachNetworkEndpointsHook != nil { + return m.AttachNetworkEndpointsHook(ctx, key, arg0, m) + } + return nil +} + +// DetachNetworkEndpoints is a mock for the corresponding method. +func (m *MockNetworkEndpointGroups) DetachNetworkEndpoints(ctx context.Context, key *meta.Key, arg0 *ga.NetworkEndpointGroupsDetachEndpointsRequest) error { + if m.DetachNetworkEndpointsHook != nil { + return m.DetachNetworkEndpointsHook(ctx, key, arg0, m) + } + return nil +} + +// ListNetworkEndpoints is a mock for the corresponding method. +func (m *MockNetworkEndpointGroups) ListNetworkEndpoints(ctx context.Context, key *meta.Key, arg0 *ga.NetworkEndpointGroupsListEndpointsRequest, fl *filter.F) ([]*ga.NetworkEndpointWithHealthStatus, error) { + if m.ListNetworkEndpointsHook != nil { + return m.ListNetworkEndpointsHook(ctx, key, arg0, fl, m) + } + return nil, nil +} + +// GCENetworkEndpointGroups is a simplifying adapter for the GCE NetworkEndpointGroups. +type GCENetworkEndpointGroups struct { + s *Service +} + +// Get the NetworkEndpointGroup named by key. +func (g *GCENetworkEndpointGroups) Get(ctx context.Context, key *meta.Key) (*ga.NetworkEndpointGroup, error) { + klog.V(5).Infof("GCENetworkEndpointGroups.Get(%v, %v): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCENetworkEndpointGroups.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + return nil, fmt.Errorf("invalid GCE key (%#v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "NetworkEndpointGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("ga"), + Service: "NetworkEndpointGroups", + } + klog.V(5).Infof("GCENetworkEndpointGroups.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCENetworkEndpointGroups.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + return nil, err + } + call := g.s.GA.NetworkEndpointGroups.Get(projectID, key.Zone, key.Name) + call.Context(ctx) + v, err := call.Do() + klog.V(4).Infof("GCENetworkEndpointGroups.Get(%v, %v) = %+v, %v", ctx, key, v, err) + return v, err +} + +// List all NetworkEndpointGroup objects. +func (g *GCENetworkEndpointGroups) List(ctx context.Context, zone string, fl *filter.F) ([]*ga.NetworkEndpointGroup, error) { + klog.V(5).Infof("GCENetworkEndpointGroups.List(%v, %v, %v) called", ctx, zone, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "NetworkEndpointGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("ga"), + Service: "NetworkEndpointGroups", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + klog.V(5).Infof("GCENetworkEndpointGroups.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) + call := g.s.GA.NetworkEndpointGroups.List(projectID, zone) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*ga.NetworkEndpointGroup + f := func(l *ga.NetworkEndpointGroupList) error { + klog.V(5).Infof("GCENetworkEndpointGroups.List(%v, ..., %v): page %+v", ctx, fl, l) + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + klog.V(4).Infof("GCENetworkEndpointGroups.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + return nil, err + } + + if klog.V(4) { + klog.V(4).Infof("GCENetworkEndpointGroups.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { + var asStr []string + for _, o := range all { + asStr = append(asStr, fmt.Sprintf("%+v", o)) + } + klog.V(5).Infof("GCENetworkEndpointGroups.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + } + + return all, nil +} + +// Insert NetworkEndpointGroup with key of value obj. +func (g *GCENetworkEndpointGroups) Insert(ctx context.Context, key *meta.Key, obj *ga.NetworkEndpointGroup) error { + klog.V(5).Infof("GCENetworkEndpointGroups.Insert(%v, %v, %+v): called", ctx, key, obj) + if !key.Valid() { + klog.V(2).Infof("GCENetworkEndpointGroups.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "NetworkEndpointGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("ga"), + Service: "NetworkEndpointGroups", + } + klog.V(5).Infof("GCENetworkEndpointGroups.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCENetworkEndpointGroups.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + obj.Name = key.Name + call := g.s.GA.NetworkEndpointGroups.Insert(projectID, key.Zone, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCENetworkEndpointGroups.Insert(%v, %v, ...) = %+v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCENetworkEndpointGroups.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + return err +} + +// Delete the NetworkEndpointGroup referenced by key. +func (g *GCENetworkEndpointGroups) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCENetworkEndpointGroups.Delete(%v, %v): called", ctx, key) + if !key.Valid() { + klog.V(2).Infof("GCENetworkEndpointGroups.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "NetworkEndpointGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("ga"), + Service: "NetworkEndpointGroups", + } + klog.V(5).Infof("GCENetworkEndpointGroups.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCENetworkEndpointGroups.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.GA.NetworkEndpointGroups.Delete(projectID, key.Zone, key.Name) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCENetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCENetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + return err +} + +// AggregatedList lists all resources of the given type across all locations. +func (g *GCENetworkEndpointGroups) AggregatedList(ctx context.Context, fl *filter.F) (map[string][]*ga.NetworkEndpointGroup, error) { + klog.V(5).Infof("GCENetworkEndpointGroups.AggregatedList(%v, %v) called", ctx, fl) + + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "NetworkEndpointGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "AggregatedList", + Version: meta.Version("ga"), + Service: "NetworkEndpointGroups", + } + + klog.V(5).Infof("GCENetworkEndpointGroups.AggregatedList(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(5).Infof("GCENetworkEndpointGroups.AggregatedList(%v, %v): RateLimiter error: %v", ctx, fl, err) + return nil, err + } + + call := g.s.GA.NetworkEndpointGroups.AggregatedList(projectID) + call.Context(ctx) + if fl != filter.None { + call.Filter(fl.String()) + } + + all := map[string][]*ga.NetworkEndpointGroup{} + f := func(l *ga.NetworkEndpointGroupAggregatedList) error { + for k, v := range l.Items { + klog.V(5).Infof("GCENetworkEndpointGroups.AggregatedList(%v, %v): page[%v]%+v", ctx, fl, k, v) + all[k] = append(all[k], v.NetworkEndpointGroups...) + } + return nil + } + if err := call.Pages(ctx, f); err != nil { + klog.V(4).Infof("GCENetworkEndpointGroups.AggregatedList(%v, %v) = %v, %v", ctx, fl, nil, err) + return nil, err + } + if klog.V(4) { + klog.V(4).Infof("GCENetworkEndpointGroups.AggregatedList(%v, %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { + var asStr []string + for _, o := range all { + asStr = append(asStr, fmt.Sprintf("%+v", o)) + } + klog.V(5).Infof("GCENetworkEndpointGroups.AggregatedList(%v, %v) = %v, %v", ctx, fl, asStr, nil) + } + return all, nil +} + +// AttachNetworkEndpoints is a method on GCENetworkEndpointGroups. +func (g *GCENetworkEndpointGroups) AttachNetworkEndpoints(ctx context.Context, key *meta.Key, arg0 *ga.NetworkEndpointGroupsAttachEndpointsRequest) error { + klog.V(5).Infof("GCENetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCENetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "NetworkEndpointGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "AttachNetworkEndpoints", + Version: meta.Version("ga"), + Service: "NetworkEndpointGroups", + } + klog.V(5).Infof("GCENetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCENetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.GA.NetworkEndpointGroups.AttachNetworkEndpoints(projectID, key.Zone, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCENetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) + return err + } + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCENetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) + return err +} + +// DetachNetworkEndpoints is a method on GCENetworkEndpointGroups. +func (g *GCENetworkEndpointGroups) DetachNetworkEndpoints(ctx context.Context, key *meta.Key, arg0 *ga.NetworkEndpointGroupsDetachEndpointsRequest) error { + klog.V(5).Infof("GCENetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCENetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "NetworkEndpointGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "DetachNetworkEndpoints", + Version: meta.Version("ga"), + Service: "NetworkEndpointGroups", + } + klog.V(5).Infof("GCENetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCENetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.GA.NetworkEndpointGroups.DetachNetworkEndpoints(projectID, key.Zone, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCENetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) + return err + } + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCENetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) + return err +} + +// ListNetworkEndpoints is a method on GCENetworkEndpointGroups. +func (g *GCENetworkEndpointGroups) ListNetworkEndpoints(ctx context.Context, key *meta.Key, arg0 *ga.NetworkEndpointGroupsListEndpointsRequest, fl *filter.F) ([]*ga.NetworkEndpointWithHealthStatus, error) { + klog.V(5).Infof("GCENetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCENetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return nil, fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "NetworkEndpointGroups") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "ListNetworkEndpoints", + Version: meta.Version("ga"), Service: "NetworkEndpointGroups", } - klog.V(5).Infof("GCEBetaNetworkEndpointGroups.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) - if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEBetaNetworkEndpointGroups.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) - return err + klog.V(5).Infof("GCENetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCENetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return nil, err + } + call := g.s.GA.NetworkEndpointGroups.ListNetworkEndpoints(projectID, key.Zone, key.Name, arg0) + var all []*ga.NetworkEndpointWithHealthStatus + f := func(l *ga.NetworkEndpointGroupsListNetworkEndpoints) error { + klog.V(5).Infof("GCENetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): page %+v", ctx, key, l) + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + klog.V(4).Infof("GCENetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...) = %v, %v", ctx, key, nil, err) + return nil, err + } + if klog.V(4) { + klog.V(4).Infof("GCENetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...) = [%v items], %v", ctx, key, len(all), nil) + } else if klog.V(5) { + var asStr []string + for _, o := range all { + asStr = append(asStr, fmt.Sprintf("%+v", o)) + } + klog.V(5).Infof("GCENetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...) = %v, %v", ctx, key, asStr, nil) + } + return all, nil +} + +// Projects is an interface that allows for mocking of Projects. +type Projects interface { + // ProjectsOps is an interface with additional non-CRUD type methods. + // This interface is expected to be implemented by hand (non-autogenerated). + ProjectsOps +} + +// NewMockProjects returns a new mock for Projects. +func NewMockProjects(pr ProjectRouter, objs map[meta.Key]*MockProjectsObj) *MockProjects { + mock := &MockProjects{ + ProjectRouter: pr, + + Objects: objs, + } + return mock +} + +// MockProjects is the mock for Projects. +type MockProjects struct { + Lock sync.Mutex + + ProjectRouter ProjectRouter + + // Objects maintained by the mock. + Objects map[meta.Key]*MockProjectsObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Obj wraps the object for use in the mock. +func (m *MockProjects) Obj(o *ga.Project) *MockProjectsObj { + return &MockProjectsObj{o} +} + +// GCEProjects is a simplifying adapter for the GCE Projects. +type GCEProjects struct { + s *Service +} + +// Regions is an interface that allows for mocking of Regions. +type Regions interface { + Get(ctx context.Context, key *meta.Key) (*ga.Region, error) + List(ctx context.Context, fl *filter.F) ([]*ga.Region, error) +} + +// NewMockRegions returns a new mock for Regions. +func NewMockRegions(pr ProjectRouter, objs map[meta.Key]*MockRegionsObj) *MockRegions { + mock := &MockRegions{ + ProjectRouter: pr, + + Objects: objs, + GetError: map[meta.Key]error{}, + } + return mock +} + +// MockRegions is the mock for Regions. +type MockRegions struct { + Lock sync.Mutex + + ProjectRouter ProjectRouter + + // Objects maintained by the mock. + Objects map[meta.Key]*MockRegionsObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(ctx context.Context, key *meta.Key, m *MockRegions) (bool, *ga.Region, error) + ListHook func(ctx context.Context, fl *filter.F, m *MockRegions) (bool, []*ga.Region, error) + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockRegions) Get(ctx context.Context, key *meta.Key) (*ga.Region, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(ctx, key, m); intercept { + klog.V(5).Infof("MockRegions.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + if !key.Valid() { + return nil, fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[*key]; ok { + klog.V(5).Infof("MockRegions.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[*key]; ok { + typedObj := obj.ToGA() + klog.V(5).Infof("MockRegions.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockRegions %v not found", key), + } + klog.V(5).Infof("MockRegions.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock. +func (m *MockRegions) List(ctx context.Context, fl *filter.F) ([]*ga.Region, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { + klog.V(5).Infof("MockRegions.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + klog.V(5).Infof("MockRegions.List(%v, %v) = nil, %v", ctx, fl, err) + + return nil, *m.ListError + } + + var objs []*ga.Region + for _, obj := range m.Objects { + if !fl.Match(obj.ToGA()) { + continue + } + objs = append(objs, obj.ToGA()) + } + + klog.V(5).Infof("MockRegions.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + +// Obj wraps the object for use in the mock. +func (m *MockRegions) Obj(o *ga.Region) *MockRegionsObj { + return &MockRegionsObj{o} +} + +// GCERegions is a simplifying adapter for the GCE Regions. +type GCERegions struct { + s *Service +} + +// Get the Region named by key. +func (g *GCERegions) Get(ctx context.Context, key *meta.Key) (*ga.Region, error) { + klog.V(5).Infof("GCERegions.Get(%v, %v): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCERegions.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + return nil, fmt.Errorf("invalid GCE key (%#v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Regions") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("ga"), + Service: "Regions", + } + klog.V(5).Infof("GCERegions.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCERegions.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + return nil, err + } + call := g.s.GA.Regions.Get(projectID, key.Name) + call.Context(ctx) + v, err := call.Do() + klog.V(4).Infof("GCERegions.Get(%v, %v) = %+v, %v", ctx, key, v, err) + return v, err +} + +// List all Region objects. +func (g *GCERegions) List(ctx context.Context, fl *filter.F) ([]*ga.Region, error) { + klog.V(5).Infof("GCERegions.List(%v, %v) called", ctx, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Regions") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("ga"), + Service: "Regions", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + klog.V(5).Infof("GCERegions.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + call := g.s.GA.Regions.List(projectID) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*ga.Region + f := func(l *ga.RegionList) error { + klog.V(5).Infof("GCERegions.List(%v, ..., %v): page %+v", ctx, fl, l) + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + klog.V(4).Infof("GCERegions.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + return nil, err + } + + if klog.V(4) { + klog.V(4).Infof("GCERegions.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { + var asStr []string + for _, o := range all { + asStr = append(asStr, fmt.Sprintf("%+v", o)) + } + klog.V(5).Infof("GCERegions.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + } + + return all, nil +} + +// Routes is an interface that allows for mocking of Routes. +type Routes interface { + Get(ctx context.Context, key *meta.Key) (*ga.Route, error) + List(ctx context.Context, fl *filter.F) ([]*ga.Route, error) + Insert(ctx context.Context, key *meta.Key, obj *ga.Route) error + Delete(ctx context.Context, key *meta.Key) error +} + +// NewMockRoutes returns a new mock for Routes. +func NewMockRoutes(pr ProjectRouter, objs map[meta.Key]*MockRoutesObj) *MockRoutes { + mock := &MockRoutes{ + ProjectRouter: pr, + + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockRoutes is the mock for Routes. +type MockRoutes struct { + Lock sync.Mutex + + ProjectRouter ProjectRouter + + // Objects maintained by the mock. + Objects map[meta.Key]*MockRoutesObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(ctx context.Context, key *meta.Key, m *MockRoutes) (bool, *ga.Route, error) + ListHook func(ctx context.Context, fl *filter.F, m *MockRoutes) (bool, []*ga.Route, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *ga.Route, m *MockRoutes) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockRoutes) (bool, error) + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockRoutes) Get(ctx context.Context, key *meta.Key) (*ga.Route, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(ctx, key, m); intercept { + klog.V(5).Infof("MockRoutes.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + if !key.Valid() { + return nil, fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[*key]; ok { + klog.V(5).Infof("MockRoutes.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[*key]; ok { + typedObj := obj.ToGA() + klog.V(5).Infof("MockRoutes.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockRoutes %v not found", key), + } + klog.V(5).Infof("MockRoutes.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock. +func (m *MockRoutes) List(ctx context.Context, fl *filter.F) ([]*ga.Route, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { + klog.V(5).Infof("MockRoutes.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + klog.V(5).Infof("MockRoutes.List(%v, %v) = nil, %v", ctx, fl, err) + + return nil, *m.ListError + } + + var objs []*ga.Route + for _, obj := range m.Objects { + if !fl.Match(obj.ToGA()) { + continue + } + objs = append(objs, obj.ToGA()) + } + + klog.V(5).Infof("MockRoutes.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockRoutes) Insert(ctx context.Context, key *meta.Key, obj *ga.Route) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { + klog.V(5).Infof("MockRoutes.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[*key]; ok { + klog.V(5).Infof("MockRoutes.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[*key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockRoutes %v exists", key), + } + klog.V(5).Infof("MockRoutes.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "ga", "routes") + obj.SelfLink = SelfLink(meta.VersionGA, projectID, "routes", key) + + m.Objects[*key] = &MockRoutesObj{obj} + klog.V(5).Infof("MockRoutes.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockRoutes) Delete(ctx context.Context, key *meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(ctx, key, m); intercept { + klog.V(5).Infof("MockRoutes.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[*key]; ok { + klog.V(5).Infof("MockRoutes.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[*key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockRoutes %v not found", key), + } + klog.V(5).Infof("MockRoutes.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, *key) + klog.V(5).Infof("MockRoutes.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockRoutes) Obj(o *ga.Route) *MockRoutesObj { + return &MockRoutesObj{o} +} + +// GCERoutes is a simplifying adapter for the GCE Routes. +type GCERoutes struct { + s *Service +} + +// Get the Route named by key. +func (g *GCERoutes) Get(ctx context.Context, key *meta.Key) (*ga.Route, error) { + klog.V(5).Infof("GCERoutes.Get(%v, %v): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCERoutes.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + return nil, fmt.Errorf("invalid GCE key (%#v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Routes") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("ga"), + Service: "Routes", + } + klog.V(5).Infof("GCERoutes.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCERoutes.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + return nil, err + } + call := g.s.GA.Routes.Get(projectID, key.Name) + call.Context(ctx) + v, err := call.Do() + klog.V(4).Infof("GCERoutes.Get(%v, %v) = %+v, %v", ctx, key, v, err) + return v, err +} + +// List all Route objects. +func (g *GCERoutes) List(ctx context.Context, fl *filter.F) ([]*ga.Route, error) { + klog.V(5).Infof("GCERoutes.List(%v, %v) called", ctx, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Routes") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("ga"), + Service: "Routes", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + klog.V(5).Infof("GCERoutes.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + call := g.s.GA.Routes.List(projectID) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*ga.Route + f := func(l *ga.RouteList) error { + klog.V(5).Infof("GCERoutes.List(%v, ..., %v): page %+v", ctx, fl, l) + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + klog.V(4).Infof("GCERoutes.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + return nil, err + } + + if klog.V(4) { + klog.V(4).Infof("GCERoutes.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { + var asStr []string + for _, o := range all { + asStr = append(asStr, fmt.Sprintf("%+v", o)) + } + klog.V(5).Infof("GCERoutes.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + } + + return all, nil +} + +// Insert Route with key of value obj. +func (g *GCERoutes) Insert(ctx context.Context, key *meta.Key, obj *ga.Route) error { + klog.V(5).Infof("GCERoutes.Insert(%v, %v, %+v): called", ctx, key, obj) + if !key.Valid() { + klog.V(2).Infof("GCERoutes.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Routes") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("ga"), + Service: "Routes", + } + klog.V(5).Infof("GCERoutes.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCERoutes.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + obj.Name = key.Name + call := g.s.GA.Routes.Insert(projectID, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCERoutes.Insert(%v, %v, ...) = %+v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCERoutes.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + return err +} + +// Delete the Route referenced by key. +func (g *GCERoutes) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCERoutes.Delete(%v, %v): called", ctx, key) + if !key.Valid() { + klog.V(2).Infof("GCERoutes.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Routes") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("ga"), + Service: "Routes", + } + klog.V(5).Infof("GCERoutes.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCERoutes.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.GA.Routes.Delete(projectID, key.Name) + + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCERoutes.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCERoutes.Delete(%v, %v) = %v", ctx, key, err) + return err +} + +// BetaSecurityPolicies is an interface that allows for mocking of SecurityPolicies. +type BetaSecurityPolicies interface { + Get(ctx context.Context, key *meta.Key) (*beta.SecurityPolicy, error) + List(ctx context.Context, fl *filter.F) ([]*beta.SecurityPolicy, error) + Insert(ctx context.Context, key *meta.Key, obj *beta.SecurityPolicy) error + Delete(ctx context.Context, key *meta.Key) error + AddRule(context.Context, *meta.Key, *beta.SecurityPolicyRule) error + GetRule(context.Context, *meta.Key) (*beta.SecurityPolicyRule, error) + Patch(context.Context, *meta.Key, *beta.SecurityPolicy) error + PatchRule(context.Context, *meta.Key, *beta.SecurityPolicyRule) error + RemoveRule(context.Context, *meta.Key) error +} + +// NewMockBetaSecurityPolicies returns a new mock for SecurityPolicies. +func NewMockBetaSecurityPolicies(pr ProjectRouter, objs map[meta.Key]*MockSecurityPoliciesObj) *MockBetaSecurityPolicies { + mock := &MockBetaSecurityPolicies{ + ProjectRouter: pr, + + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockBetaSecurityPolicies is the mock for SecurityPolicies. +type MockBetaSecurityPolicies struct { + Lock sync.Mutex + + ProjectRouter ProjectRouter + + // Objects maintained by the mock. + Objects map[meta.Key]*MockSecurityPoliciesObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(ctx context.Context, key *meta.Key, m *MockBetaSecurityPolicies) (bool, *beta.SecurityPolicy, error) + ListHook func(ctx context.Context, fl *filter.F, m *MockBetaSecurityPolicies) (bool, []*beta.SecurityPolicy, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *beta.SecurityPolicy, m *MockBetaSecurityPolicies) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockBetaSecurityPolicies) (bool, error) + AddRuleHook func(context.Context, *meta.Key, *beta.SecurityPolicyRule, *MockBetaSecurityPolicies) error + GetRuleHook func(context.Context, *meta.Key, *MockBetaSecurityPolicies) (*beta.SecurityPolicyRule, error) + PatchHook func(context.Context, *meta.Key, *beta.SecurityPolicy, *MockBetaSecurityPolicies) error + PatchRuleHook func(context.Context, *meta.Key, *beta.SecurityPolicyRule, *MockBetaSecurityPolicies) error + RemoveRuleHook func(context.Context, *meta.Key, *MockBetaSecurityPolicies) error + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockBetaSecurityPolicies) Get(ctx context.Context, key *meta.Key) (*beta.SecurityPolicy, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(ctx, key, m); intercept { + klog.V(5).Infof("MockBetaSecurityPolicies.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + if !key.Valid() { + return nil, fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[*key]; ok { + klog.V(5).Infof("MockBetaSecurityPolicies.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[*key]; ok { + typedObj := obj.ToBeta() + klog.V(5).Infof("MockBetaSecurityPolicies.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockBetaSecurityPolicies %v not found", key), + } + klog.V(5).Infof("MockBetaSecurityPolicies.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock. +func (m *MockBetaSecurityPolicies) List(ctx context.Context, fl *filter.F) ([]*beta.SecurityPolicy, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { + klog.V(5).Infof("MockBetaSecurityPolicies.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + klog.V(5).Infof("MockBetaSecurityPolicies.List(%v, %v) = nil, %v", ctx, fl, err) + + return nil, *m.ListError + } + + var objs []*beta.SecurityPolicy + for _, obj := range m.Objects { + if !fl.Match(obj.ToBeta()) { + continue + } + objs = append(objs, obj.ToBeta()) + } + + klog.V(5).Infof("MockBetaSecurityPolicies.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockBetaSecurityPolicies) Insert(ctx context.Context, key *meta.Key, obj *beta.SecurityPolicy) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { + klog.V(5).Infof("MockBetaSecurityPolicies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[*key]; ok { + klog.V(5).Infof("MockBetaSecurityPolicies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[*key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockBetaSecurityPolicies %v exists", key), + } + klog.V(5).Infof("MockBetaSecurityPolicies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "beta", "securityPolicies") + obj.SelfLink = SelfLink(meta.VersionBeta, projectID, "securityPolicies", key) + + m.Objects[*key] = &MockSecurityPoliciesObj{obj} + klog.V(5).Infof("MockBetaSecurityPolicies.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockBetaSecurityPolicies) Delete(ctx context.Context, key *meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(ctx, key, m); intercept { + klog.V(5).Infof("MockBetaSecurityPolicies.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[*key]; ok { + klog.V(5).Infof("MockBetaSecurityPolicies.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[*key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockBetaSecurityPolicies %v not found", key), + } + klog.V(5).Infof("MockBetaSecurityPolicies.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, *key) + klog.V(5).Infof("MockBetaSecurityPolicies.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockBetaSecurityPolicies) Obj(o *beta.SecurityPolicy) *MockSecurityPoliciesObj { + return &MockSecurityPoliciesObj{o} +} + +// AddRule is a mock for the corresponding method. +func (m *MockBetaSecurityPolicies) AddRule(ctx context.Context, key *meta.Key, arg0 *beta.SecurityPolicyRule) error { + if m.AddRuleHook != nil { + return m.AddRuleHook(ctx, key, arg0, m) + } + return nil +} + +// GetRule is a mock for the corresponding method. +func (m *MockBetaSecurityPolicies) GetRule(ctx context.Context, key *meta.Key) (*beta.SecurityPolicyRule, error) { + if m.GetRuleHook != nil { + return m.GetRuleHook(ctx, key, m) + } + return nil, fmt.Errorf("GetRuleHook must be set") +} + +// Patch is a mock for the corresponding method. +func (m *MockBetaSecurityPolicies) Patch(ctx context.Context, key *meta.Key, arg0 *beta.SecurityPolicy) error { + if m.PatchHook != nil { + return m.PatchHook(ctx, key, arg0, m) + } + return nil +} + +// PatchRule is a mock for the corresponding method. +func (m *MockBetaSecurityPolicies) PatchRule(ctx context.Context, key *meta.Key, arg0 *beta.SecurityPolicyRule) error { + if m.PatchRuleHook != nil { + return m.PatchRuleHook(ctx, key, arg0, m) + } + return nil +} + +// RemoveRule is a mock for the corresponding method. +func (m *MockBetaSecurityPolicies) RemoveRule(ctx context.Context, key *meta.Key) error { + if m.RemoveRuleHook != nil { + return m.RemoveRuleHook(ctx, key, m) + } + return nil +} + +// GCEBetaSecurityPolicies is a simplifying adapter for the GCE SecurityPolicies. +type GCEBetaSecurityPolicies struct { + s *Service +} + +// Get the SecurityPolicy named by key. +func (g *GCEBetaSecurityPolicies) Get(ctx context.Context, key *meta.Key) (*beta.SecurityPolicy, error) { + klog.V(5).Infof("GCEBetaSecurityPolicies.Get(%v, %v): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEBetaSecurityPolicies.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + return nil, fmt.Errorf("invalid GCE key (%#v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "SecurityPolicies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("beta"), + Service: "SecurityPolicies", + } + klog.V(5).Infof("GCEBetaSecurityPolicies.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEBetaSecurityPolicies.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + return nil, err + } + call := g.s.Beta.SecurityPolicies.Get(projectID, key.Name) + call.Context(ctx) + v, err := call.Do() + klog.V(4).Infof("GCEBetaSecurityPolicies.Get(%v, %v) = %+v, %v", ctx, key, v, err) + return v, err +} + +// List all SecurityPolicy objects. +func (g *GCEBetaSecurityPolicies) List(ctx context.Context, fl *filter.F) ([]*beta.SecurityPolicy, error) { + klog.V(5).Infof("GCEBetaSecurityPolicies.List(%v, %v) called", ctx, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "SecurityPolicies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("beta"), + Service: "SecurityPolicies", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + klog.V(5).Infof("GCEBetaSecurityPolicies.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + call := g.s.Beta.SecurityPolicies.List(projectID) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*beta.SecurityPolicy + f := func(l *beta.SecurityPolicyList) error { + klog.V(5).Infof("GCEBetaSecurityPolicies.List(%v, ..., %v): page %+v", ctx, fl, l) + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + klog.V(4).Infof("GCEBetaSecurityPolicies.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + return nil, err + } + + if klog.V(4) { + klog.V(4).Infof("GCEBetaSecurityPolicies.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { + var asStr []string + for _, o := range all { + asStr = append(asStr, fmt.Sprintf("%+v", o)) + } + klog.V(5).Infof("GCEBetaSecurityPolicies.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + } + + return all, nil +} + +// Insert SecurityPolicy with key of value obj. +func (g *GCEBetaSecurityPolicies) Insert(ctx context.Context, key *meta.Key, obj *beta.SecurityPolicy) error { + klog.V(5).Infof("GCEBetaSecurityPolicies.Insert(%v, %v, %+v): called", ctx, key, obj) + if !key.Valid() { + klog.V(2).Infof("GCEBetaSecurityPolicies.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "SecurityPolicies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("beta"), + Service: "SecurityPolicies", + } + klog.V(5).Infof("GCEBetaSecurityPolicies.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEBetaSecurityPolicies.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + obj.Name = key.Name + call := g.s.Beta.SecurityPolicies.Insert(projectID, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEBetaSecurityPolicies.Insert(%v, %v, ...) = %+v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEBetaSecurityPolicies.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + return err +} + +// Delete the SecurityPolicy referenced by key. +func (g *GCEBetaSecurityPolicies) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCEBetaSecurityPolicies.Delete(%v, %v): called", ctx, key) + if !key.Valid() { + klog.V(2).Infof("GCEBetaSecurityPolicies.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "SecurityPolicies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("beta"), + Service: "SecurityPolicies", + } + klog.V(5).Infof("GCEBetaSecurityPolicies.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEBetaSecurityPolicies.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.Beta.SecurityPolicies.Delete(projectID, key.Name) + + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEBetaSecurityPolicies.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEBetaSecurityPolicies.Delete(%v, %v) = %v", ctx, key, err) + return err +} + +// AddRule is a method on GCEBetaSecurityPolicies. +func (g *GCEBetaSecurityPolicies) AddRule(ctx context.Context, key *meta.Key, arg0 *beta.SecurityPolicyRule) error { + klog.V(5).Infof("GCEBetaSecurityPolicies.AddRule(%v, %v, ...): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEBetaSecurityPolicies.AddRule(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "SecurityPolicies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "AddRule", + Version: meta.Version("beta"), + Service: "SecurityPolicies", + } + klog.V(5).Infof("GCEBetaSecurityPolicies.AddRule(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEBetaSecurityPolicies.AddRule(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.Beta.SecurityPolicies.AddRule(projectID, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEBetaSecurityPolicies.AddRule(%v, %v, ...) = %+v", ctx, key, err) + return err + } + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEBetaSecurityPolicies.AddRule(%v, %v, ...) = %+v", ctx, key, err) + return err +} + +// GetRule is a method on GCEBetaSecurityPolicies. +func (g *GCEBetaSecurityPolicies) GetRule(ctx context.Context, key *meta.Key) (*beta.SecurityPolicyRule, error) { + klog.V(5).Infof("GCEBetaSecurityPolicies.GetRule(%v, %v, ...): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEBetaSecurityPolicies.GetRule(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return nil, fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "SecurityPolicies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "GetRule", + Version: meta.Version("beta"), + Service: "SecurityPolicies", + } + klog.V(5).Infof("GCEBetaSecurityPolicies.GetRule(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEBetaSecurityPolicies.GetRule(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return nil, err + } + call := g.s.Beta.SecurityPolicies.GetRule(projectID, key.Name) + call.Context(ctx) + v, err := call.Do() + klog.V(4).Infof("GCEBetaSecurityPolicies.GetRule(%v, %v, ...) = %+v, %v", ctx, key, v, err) + return v, err +} + +// Patch is a method on GCEBetaSecurityPolicies. +func (g *GCEBetaSecurityPolicies) Patch(ctx context.Context, key *meta.Key, arg0 *beta.SecurityPolicy) error { + klog.V(5).Infof("GCEBetaSecurityPolicies.Patch(%v, %v, ...): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEBetaSecurityPolicies.Patch(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "SecurityPolicies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Patch", + Version: meta.Version("beta"), + Service: "SecurityPolicies", + } + klog.V(5).Infof("GCEBetaSecurityPolicies.Patch(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEBetaSecurityPolicies.Patch(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.Beta.SecurityPolicies.Patch(projectID, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEBetaSecurityPolicies.Patch(%v, %v, ...) = %+v", ctx, key, err) + return err + } + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEBetaSecurityPolicies.Patch(%v, %v, ...) = %+v", ctx, key, err) + return err +} + +// PatchRule is a method on GCEBetaSecurityPolicies. +func (g *GCEBetaSecurityPolicies) PatchRule(ctx context.Context, key *meta.Key, arg0 *beta.SecurityPolicyRule) error { + klog.V(5).Infof("GCEBetaSecurityPolicies.PatchRule(%v, %v, ...): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEBetaSecurityPolicies.PatchRule(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "SecurityPolicies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "PatchRule", + Version: meta.Version("beta"), + Service: "SecurityPolicies", + } + klog.V(5).Infof("GCEBetaSecurityPolicies.PatchRule(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEBetaSecurityPolicies.PatchRule(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.Beta.SecurityPolicies.PatchRule(projectID, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEBetaSecurityPolicies.PatchRule(%v, %v, ...) = %+v", ctx, key, err) + return err + } + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEBetaSecurityPolicies.PatchRule(%v, %v, ...) = %+v", ctx, key, err) + return err +} + +// RemoveRule is a method on GCEBetaSecurityPolicies. +func (g *GCEBetaSecurityPolicies) RemoveRule(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCEBetaSecurityPolicies.RemoveRule(%v, %v, ...): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEBetaSecurityPolicies.RemoveRule(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "SecurityPolicies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "RemoveRule", + Version: meta.Version("beta"), + Service: "SecurityPolicies", + } + klog.V(5).Infof("GCEBetaSecurityPolicies.RemoveRule(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEBetaSecurityPolicies.RemoveRule(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.Beta.SecurityPolicies.RemoveRule(projectID, key.Name) + call.Context(ctx) + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEBetaSecurityPolicies.RemoveRule(%v, %v, ...) = %+v", ctx, key, err) + return err + } + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEBetaSecurityPolicies.RemoveRule(%v, %v, ...) = %+v", ctx, key, err) + return err +} + +// SslCertificates is an interface that allows for mocking of SslCertificates. +type SslCertificates interface { + Get(ctx context.Context, key *meta.Key) (*ga.SslCertificate, error) + List(ctx context.Context, fl *filter.F) ([]*ga.SslCertificate, error) + Insert(ctx context.Context, key *meta.Key, obj *ga.SslCertificate) error + Delete(ctx context.Context, key *meta.Key) error +} + +// NewMockSslCertificates returns a new mock for SslCertificates. +func NewMockSslCertificates(pr ProjectRouter, objs map[meta.Key]*MockSslCertificatesObj) *MockSslCertificates { + mock := &MockSslCertificates{ + ProjectRouter: pr, + + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockSslCertificates is the mock for SslCertificates. +type MockSslCertificates struct { + Lock sync.Mutex + + ProjectRouter ProjectRouter + + // Objects maintained by the mock. + Objects map[meta.Key]*MockSslCertificatesObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(ctx context.Context, key *meta.Key, m *MockSslCertificates) (bool, *ga.SslCertificate, error) + ListHook func(ctx context.Context, fl *filter.F, m *MockSslCertificates) (bool, []*ga.SslCertificate, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *ga.SslCertificate, m *MockSslCertificates) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockSslCertificates) (bool, error) + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockSslCertificates) Get(ctx context.Context, key *meta.Key) (*ga.SslCertificate, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(ctx, key, m); intercept { + klog.V(5).Infof("MockSslCertificates.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + if !key.Valid() { + return nil, fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[*key]; ok { + klog.V(5).Infof("MockSslCertificates.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[*key]; ok { + typedObj := obj.ToGA() + klog.V(5).Infof("MockSslCertificates.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockSslCertificates %v not found", key), + } + klog.V(5).Infof("MockSslCertificates.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock. +func (m *MockSslCertificates) List(ctx context.Context, fl *filter.F) ([]*ga.SslCertificate, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { + klog.V(5).Infof("MockSslCertificates.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + klog.V(5).Infof("MockSslCertificates.List(%v, %v) = nil, %v", ctx, fl, err) + + return nil, *m.ListError + } + + var objs []*ga.SslCertificate + for _, obj := range m.Objects { + if !fl.Match(obj.ToGA()) { + continue + } + objs = append(objs, obj.ToGA()) + } + + klog.V(5).Infof("MockSslCertificates.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockSslCertificates) Insert(ctx context.Context, key *meta.Key, obj *ga.SslCertificate) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { + klog.V(5).Infof("MockSslCertificates.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[*key]; ok { + klog.V(5).Infof("MockSslCertificates.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[*key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockSslCertificates %v exists", key), + } + klog.V(5).Infof("MockSslCertificates.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "ga", "sslCertificates") + obj.SelfLink = SelfLink(meta.VersionGA, projectID, "sslCertificates", key) + + m.Objects[*key] = &MockSslCertificatesObj{obj} + klog.V(5).Infof("MockSslCertificates.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockSslCertificates) Delete(ctx context.Context, key *meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(ctx, key, m); intercept { + klog.V(5).Infof("MockSslCertificates.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[*key]; ok { + klog.V(5).Infof("MockSslCertificates.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[*key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockSslCertificates %v not found", key), + } + klog.V(5).Infof("MockSslCertificates.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, *key) + klog.V(5).Infof("MockSslCertificates.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockSslCertificates) Obj(o *ga.SslCertificate) *MockSslCertificatesObj { + return &MockSslCertificatesObj{o} +} + +// GCESslCertificates is a simplifying adapter for the GCE SslCertificates. +type GCESslCertificates struct { + s *Service +} + +// Get the SslCertificate named by key. +func (g *GCESslCertificates) Get(ctx context.Context, key *meta.Key) (*ga.SslCertificate, error) { + klog.V(5).Infof("GCESslCertificates.Get(%v, %v): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCESslCertificates.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + return nil, fmt.Errorf("invalid GCE key (%#v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "SslCertificates") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("ga"), + Service: "SslCertificates", + } + klog.V(5).Infof("GCESslCertificates.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCESslCertificates.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + return nil, err + } + call := g.s.GA.SslCertificates.Get(projectID, key.Name) + call.Context(ctx) + v, err := call.Do() + klog.V(4).Infof("GCESslCertificates.Get(%v, %v) = %+v, %v", ctx, key, v, err) + return v, err +} + +// List all SslCertificate objects. +func (g *GCESslCertificates) List(ctx context.Context, fl *filter.F) ([]*ga.SslCertificate, error) { + klog.V(5).Infof("GCESslCertificates.List(%v, %v) called", ctx, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "SslCertificates") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("ga"), + Service: "SslCertificates", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + klog.V(5).Infof("GCESslCertificates.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + call := g.s.GA.SslCertificates.List(projectID) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*ga.SslCertificate + f := func(l *ga.SslCertificateList) error { + klog.V(5).Infof("GCESslCertificates.List(%v, ..., %v): page %+v", ctx, fl, l) + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + klog.V(4).Infof("GCESslCertificates.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + return nil, err + } + + if klog.V(4) { + klog.V(4).Infof("GCESslCertificates.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { + var asStr []string + for _, o := range all { + asStr = append(asStr, fmt.Sprintf("%+v", o)) + } + klog.V(5).Infof("GCESslCertificates.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + } + + return all, nil +} + +// Insert SslCertificate with key of value obj. +func (g *GCESslCertificates) Insert(ctx context.Context, key *meta.Key, obj *ga.SslCertificate) error { + klog.V(5).Infof("GCESslCertificates.Insert(%v, %v, %+v): called", ctx, key, obj) + if !key.Valid() { + klog.V(2).Infof("GCESslCertificates.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "SslCertificates") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("ga"), + Service: "SslCertificates", + } + klog.V(5).Infof("GCESslCertificates.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCESslCertificates.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + obj.Name = key.Name + call := g.s.GA.SslCertificates.Insert(projectID, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCESslCertificates.Insert(%v, %v, ...) = %+v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCESslCertificates.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + return err +} + +// Delete the SslCertificate referenced by key. +func (g *GCESslCertificates) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCESslCertificates.Delete(%v, %v): called", ctx, key) + if !key.Valid() { + klog.V(2).Infof("GCESslCertificates.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "SslCertificates") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("ga"), + Service: "SslCertificates", + } + klog.V(5).Infof("GCESslCertificates.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCESslCertificates.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.GA.SslCertificates.Delete(projectID, key.Name) + + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCESslCertificates.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCESslCertificates.Delete(%v, %v) = %v", ctx, key, err) + return err +} + +// BetaSslCertificates is an interface that allows for mocking of SslCertificates. +type BetaSslCertificates interface { + Get(ctx context.Context, key *meta.Key) (*beta.SslCertificate, error) + List(ctx context.Context, fl *filter.F) ([]*beta.SslCertificate, error) + Insert(ctx context.Context, key *meta.Key, obj *beta.SslCertificate) error + Delete(ctx context.Context, key *meta.Key) error +} + +// NewMockBetaSslCertificates returns a new mock for SslCertificates. +func NewMockBetaSslCertificates(pr ProjectRouter, objs map[meta.Key]*MockSslCertificatesObj) *MockBetaSslCertificates { + mock := &MockBetaSslCertificates{ + ProjectRouter: pr, + + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockBetaSslCertificates is the mock for SslCertificates. +type MockBetaSslCertificates struct { + Lock sync.Mutex + + ProjectRouter ProjectRouter + + // Objects maintained by the mock. + Objects map[meta.Key]*MockSslCertificatesObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(ctx context.Context, key *meta.Key, m *MockBetaSslCertificates) (bool, *beta.SslCertificate, error) + ListHook func(ctx context.Context, fl *filter.F, m *MockBetaSslCertificates) (bool, []*beta.SslCertificate, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *beta.SslCertificate, m *MockBetaSslCertificates) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockBetaSslCertificates) (bool, error) + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockBetaSslCertificates) Get(ctx context.Context, key *meta.Key) (*beta.SslCertificate, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(ctx, key, m); intercept { + klog.V(5).Infof("MockBetaSslCertificates.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + if !key.Valid() { + return nil, fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[*key]; ok { + klog.V(5).Infof("MockBetaSslCertificates.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[*key]; ok { + typedObj := obj.ToBeta() + klog.V(5).Infof("MockBetaSslCertificates.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockBetaSslCertificates %v not found", key), + } + klog.V(5).Infof("MockBetaSslCertificates.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock. +func (m *MockBetaSslCertificates) List(ctx context.Context, fl *filter.F) ([]*beta.SslCertificate, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { + klog.V(5).Infof("MockBetaSslCertificates.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + klog.V(5).Infof("MockBetaSslCertificates.List(%v, %v) = nil, %v", ctx, fl, err) + + return nil, *m.ListError + } + + var objs []*beta.SslCertificate + for _, obj := range m.Objects { + if !fl.Match(obj.ToBeta()) { + continue + } + objs = append(objs, obj.ToBeta()) + } + + klog.V(5).Infof("MockBetaSslCertificates.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockBetaSslCertificates) Insert(ctx context.Context, key *meta.Key, obj *beta.SslCertificate) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { + klog.V(5).Infof("MockBetaSslCertificates.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[*key]; ok { + klog.V(5).Infof("MockBetaSslCertificates.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[*key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockBetaSslCertificates %v exists", key), + } + klog.V(5).Infof("MockBetaSslCertificates.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "beta", "sslCertificates") + obj.SelfLink = SelfLink(meta.VersionBeta, projectID, "sslCertificates", key) + + m.Objects[*key] = &MockSslCertificatesObj{obj} + klog.V(5).Infof("MockBetaSslCertificates.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockBetaSslCertificates) Delete(ctx context.Context, key *meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(ctx, key, m); intercept { + klog.V(5).Infof("MockBetaSslCertificates.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[*key]; ok { + klog.V(5).Infof("MockBetaSslCertificates.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[*key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockBetaSslCertificates %v not found", key), + } + klog.V(5).Infof("MockBetaSslCertificates.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, *key) + klog.V(5).Infof("MockBetaSslCertificates.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockBetaSslCertificates) Obj(o *beta.SslCertificate) *MockSslCertificatesObj { + return &MockSslCertificatesObj{o} +} + +// GCEBetaSslCertificates is a simplifying adapter for the GCE SslCertificates. +type GCEBetaSslCertificates struct { + s *Service +} + +// Get the SslCertificate named by key. +func (g *GCEBetaSslCertificates) Get(ctx context.Context, key *meta.Key) (*beta.SslCertificate, error) { + klog.V(5).Infof("GCEBetaSslCertificates.Get(%v, %v): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEBetaSslCertificates.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + return nil, fmt.Errorf("invalid GCE key (%#v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "SslCertificates") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("beta"), + Service: "SslCertificates", + } + klog.V(5).Infof("GCEBetaSslCertificates.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEBetaSslCertificates.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + return nil, err + } + call := g.s.Beta.SslCertificates.Get(projectID, key.Name) + call.Context(ctx) + v, err := call.Do() + klog.V(4).Infof("GCEBetaSslCertificates.Get(%v, %v) = %+v, %v", ctx, key, v, err) + return v, err +} + +// List all SslCertificate objects. +func (g *GCEBetaSslCertificates) List(ctx context.Context, fl *filter.F) ([]*beta.SslCertificate, error) { + klog.V(5).Infof("GCEBetaSslCertificates.List(%v, %v) called", ctx, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "SslCertificates") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("beta"), + Service: "SslCertificates", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + klog.V(5).Infof("GCEBetaSslCertificates.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + call := g.s.Beta.SslCertificates.List(projectID) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*beta.SslCertificate + f := func(l *beta.SslCertificateList) error { + klog.V(5).Infof("GCEBetaSslCertificates.List(%v, ..., %v): page %+v", ctx, fl, l) + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + klog.V(4).Infof("GCEBetaSslCertificates.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + return nil, err + } + + if klog.V(4) { + klog.V(4).Infof("GCEBetaSslCertificates.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { + var asStr []string + for _, o := range all { + asStr = append(asStr, fmt.Sprintf("%+v", o)) + } + klog.V(5).Infof("GCEBetaSslCertificates.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + } + + return all, nil +} + +// Insert SslCertificate with key of value obj. +func (g *GCEBetaSslCertificates) Insert(ctx context.Context, key *meta.Key, obj *beta.SslCertificate) error { + klog.V(5).Infof("GCEBetaSslCertificates.Insert(%v, %v, %+v): called", ctx, key, obj) + if !key.Valid() { + klog.V(2).Infof("GCEBetaSslCertificates.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "SslCertificates") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("beta"), + Service: "SslCertificates", + } + klog.V(5).Infof("GCEBetaSslCertificates.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEBetaSslCertificates.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + obj.Name = key.Name + call := g.s.Beta.SslCertificates.Insert(projectID, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEBetaSslCertificates.Insert(%v, %v, ...) = %+v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEBetaSslCertificates.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + return err +} + +// Delete the SslCertificate referenced by key. +func (g *GCEBetaSslCertificates) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCEBetaSslCertificates.Delete(%v, %v): called", ctx, key) + if !key.Valid() { + klog.V(2).Infof("GCEBetaSslCertificates.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "SslCertificates") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("beta"), + Service: "SslCertificates", + } + klog.V(5).Infof("GCEBetaSslCertificates.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEBetaSslCertificates.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.Beta.SslCertificates.Delete(projectID, key.Name) + + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEBetaSslCertificates.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEBetaSslCertificates.Delete(%v, %v) = %v", ctx, key, err) + return err +} + +// AlphaSslCertificates is an interface that allows for mocking of SslCertificates. +type AlphaSslCertificates interface { + Get(ctx context.Context, key *meta.Key) (*alpha.SslCertificate, error) + List(ctx context.Context, fl *filter.F) ([]*alpha.SslCertificate, error) + Insert(ctx context.Context, key *meta.Key, obj *alpha.SslCertificate) error + Delete(ctx context.Context, key *meta.Key) error +} + +// NewMockAlphaSslCertificates returns a new mock for SslCertificates. +func NewMockAlphaSslCertificates(pr ProjectRouter, objs map[meta.Key]*MockSslCertificatesObj) *MockAlphaSslCertificates { + mock := &MockAlphaSslCertificates{ + ProjectRouter: pr, + + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockAlphaSslCertificates is the mock for SslCertificates. +type MockAlphaSslCertificates struct { + Lock sync.Mutex + + ProjectRouter ProjectRouter + + // Objects maintained by the mock. + Objects map[meta.Key]*MockSslCertificatesObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(ctx context.Context, key *meta.Key, m *MockAlphaSslCertificates) (bool, *alpha.SslCertificate, error) + ListHook func(ctx context.Context, fl *filter.F, m *MockAlphaSslCertificates) (bool, []*alpha.SslCertificate, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *alpha.SslCertificate, m *MockAlphaSslCertificates) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockAlphaSslCertificates) (bool, error) + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockAlphaSslCertificates) Get(ctx context.Context, key *meta.Key) (*alpha.SslCertificate, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(ctx, key, m); intercept { + klog.V(5).Infof("MockAlphaSslCertificates.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + if !key.Valid() { + return nil, fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[*key]; ok { + klog.V(5).Infof("MockAlphaSslCertificates.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[*key]; ok { + typedObj := obj.ToAlpha() + klog.V(5).Infof("MockAlphaSslCertificates.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAlphaSslCertificates %v not found", key), + } + klog.V(5).Infof("MockAlphaSslCertificates.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock. +func (m *MockAlphaSslCertificates) List(ctx context.Context, fl *filter.F) ([]*alpha.SslCertificate, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { + klog.V(5).Infof("MockAlphaSslCertificates.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + klog.V(5).Infof("MockAlphaSslCertificates.List(%v, %v) = nil, %v", ctx, fl, err) + + return nil, *m.ListError + } + + var objs []*alpha.SslCertificate + for _, obj := range m.Objects { + if !fl.Match(obj.ToAlpha()) { + continue + } + objs = append(objs, obj.ToAlpha()) + } + + klog.V(5).Infof("MockAlphaSslCertificates.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockAlphaSslCertificates) Insert(ctx context.Context, key *meta.Key, obj *alpha.SslCertificate) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { + klog.V(5).Infof("MockAlphaSslCertificates.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[*key]; ok { + klog.V(5).Infof("MockAlphaSslCertificates.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[*key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockAlphaSslCertificates %v exists", key), + } + klog.V(5).Infof("MockAlphaSslCertificates.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "alpha", "sslCertificates") + obj.SelfLink = SelfLink(meta.VersionAlpha, projectID, "sslCertificates", key) + + m.Objects[*key] = &MockSslCertificatesObj{obj} + klog.V(5).Infof("MockAlphaSslCertificates.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockAlphaSslCertificates) Delete(ctx context.Context, key *meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(ctx, key, m); intercept { + klog.V(5).Infof("MockAlphaSslCertificates.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[*key]; ok { + klog.V(5).Infof("MockAlphaSslCertificates.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[*key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAlphaSslCertificates %v not found", key), + } + klog.V(5).Infof("MockAlphaSslCertificates.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, *key) + klog.V(5).Infof("MockAlphaSslCertificates.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockAlphaSslCertificates) Obj(o *alpha.SslCertificate) *MockSslCertificatesObj { + return &MockSslCertificatesObj{o} +} + +// GCEAlphaSslCertificates is a simplifying adapter for the GCE SslCertificates. +type GCEAlphaSslCertificates struct { + s *Service +} + +// Get the SslCertificate named by key. +func (g *GCEAlphaSslCertificates) Get(ctx context.Context, key *meta.Key) (*alpha.SslCertificate, error) { + klog.V(5).Infof("GCEAlphaSslCertificates.Get(%v, %v): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEAlphaSslCertificates.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + return nil, fmt.Errorf("invalid GCE key (%#v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "SslCertificates") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("alpha"), + Service: "SslCertificates", + } + klog.V(5).Infof("GCEAlphaSslCertificates.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEAlphaSslCertificates.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + return nil, err + } + call := g.s.Alpha.SslCertificates.Get(projectID, key.Name) + call.Context(ctx) + v, err := call.Do() + klog.V(4).Infof("GCEAlphaSslCertificates.Get(%v, %v) = %+v, %v", ctx, key, v, err) + return v, err +} + +// List all SslCertificate objects. +func (g *GCEAlphaSslCertificates) List(ctx context.Context, fl *filter.F) ([]*alpha.SslCertificate, error) { + klog.V(5).Infof("GCEAlphaSslCertificates.List(%v, %v) called", ctx, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "SslCertificates") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("alpha"), + Service: "SslCertificates", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + klog.V(5).Infof("GCEAlphaSslCertificates.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + call := g.s.Alpha.SslCertificates.List(projectID) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*alpha.SslCertificate + f := func(l *alpha.SslCertificateList) error { + klog.V(5).Infof("GCEAlphaSslCertificates.List(%v, ..., %v): page %+v", ctx, fl, l) + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + klog.V(4).Infof("GCEAlphaSslCertificates.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + return nil, err + } + + if klog.V(4) { + klog.V(4).Infof("GCEAlphaSslCertificates.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { + var asStr []string + for _, o := range all { + asStr = append(asStr, fmt.Sprintf("%+v", o)) + } + klog.V(5).Infof("GCEAlphaSslCertificates.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + } + + return all, nil +} + +// Insert SslCertificate with key of value obj. +func (g *GCEAlphaSslCertificates) Insert(ctx context.Context, key *meta.Key, obj *alpha.SslCertificate) error { + klog.V(5).Infof("GCEAlphaSslCertificates.Insert(%v, %v, %+v): called", ctx, key, obj) + if !key.Valid() { + klog.V(2).Infof("GCEAlphaSslCertificates.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "SslCertificates") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("alpha"), + Service: "SslCertificates", + } + klog.V(5).Infof("GCEAlphaSslCertificates.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEAlphaSslCertificates.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + obj.Name = key.Name + call := g.s.Alpha.SslCertificates.Insert(projectID, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEAlphaSslCertificates.Insert(%v, %v, ...) = %+v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEAlphaSslCertificates.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + return err +} + +// Delete the SslCertificate referenced by key. +func (g *GCEAlphaSslCertificates) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCEAlphaSslCertificates.Delete(%v, %v): called", ctx, key) + if !key.Valid() { + klog.V(2).Infof("GCEAlphaSslCertificates.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "SslCertificates") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("alpha"), + Service: "SslCertificates", + } + klog.V(5).Infof("GCEAlphaSslCertificates.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEAlphaSslCertificates.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.Alpha.SslCertificates.Delete(projectID, key.Name) + + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEAlphaSslCertificates.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEAlphaSslCertificates.Delete(%v, %v) = %v", ctx, key, err) + return err +} + +// AlphaRegionSslCertificates is an interface that allows for mocking of RegionSslCertificates. +type AlphaRegionSslCertificates interface { + Get(ctx context.Context, key *meta.Key) (*alpha.SslCertificate, error) + List(ctx context.Context, region string, fl *filter.F) ([]*alpha.SslCertificate, error) + Insert(ctx context.Context, key *meta.Key, obj *alpha.SslCertificate) error + Delete(ctx context.Context, key *meta.Key) error +} + +// NewMockAlphaRegionSslCertificates returns a new mock for RegionSslCertificates. +func NewMockAlphaRegionSslCertificates(pr ProjectRouter, objs map[meta.Key]*MockRegionSslCertificatesObj) *MockAlphaRegionSslCertificates { + mock := &MockAlphaRegionSslCertificates{ + ProjectRouter: pr, + + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockAlphaRegionSslCertificates is the mock for RegionSslCertificates. +type MockAlphaRegionSslCertificates struct { + Lock sync.Mutex + + ProjectRouter ProjectRouter + + // Objects maintained by the mock. + Objects map[meta.Key]*MockRegionSslCertificatesObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(ctx context.Context, key *meta.Key, m *MockAlphaRegionSslCertificates) (bool, *alpha.SslCertificate, error) + ListHook func(ctx context.Context, region string, fl *filter.F, m *MockAlphaRegionSslCertificates) (bool, []*alpha.SslCertificate, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *alpha.SslCertificate, m *MockAlphaRegionSslCertificates) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockAlphaRegionSslCertificates) (bool, error) + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockAlphaRegionSslCertificates) Get(ctx context.Context, key *meta.Key) (*alpha.SslCertificate, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(ctx, key, m); intercept { + klog.V(5).Infof("MockAlphaRegionSslCertificates.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + if !key.Valid() { + return nil, fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[*key]; ok { + klog.V(5).Infof("MockAlphaRegionSslCertificates.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[*key]; ok { + typedObj := obj.ToAlpha() + klog.V(5).Infof("MockAlphaRegionSslCertificates.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAlphaRegionSslCertificates %v not found", key), + } + klog.V(5).Infof("MockAlphaRegionSslCertificates.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock in the given region. +func (m *MockAlphaRegionSslCertificates) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.SslCertificate, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(ctx, region, fl, m); intercept { + klog.V(5).Infof("MockAlphaRegionSslCertificates.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + klog.V(5).Infof("MockAlphaRegionSslCertificates.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + + return nil, *m.ListError + } + + var objs []*alpha.SslCertificate + for key, obj := range m.Objects { + if key.Region != region { + continue + } + if !fl.Match(obj.ToAlpha()) { + continue + } + objs = append(objs, obj.ToAlpha()) + } + + klog.V(5).Infof("MockAlphaRegionSslCertificates.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockAlphaRegionSslCertificates) Insert(ctx context.Context, key *meta.Key, obj *alpha.SslCertificate) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { + klog.V(5).Infof("MockAlphaRegionSslCertificates.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[*key]; ok { + klog.V(5).Infof("MockAlphaRegionSslCertificates.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[*key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockAlphaRegionSslCertificates %v exists", key), + } + klog.V(5).Infof("MockAlphaRegionSslCertificates.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "alpha", "sslCertificates") + obj.SelfLink = SelfLink(meta.VersionAlpha, projectID, "sslCertificates", key) + + m.Objects[*key] = &MockRegionSslCertificatesObj{obj} + klog.V(5).Infof("MockAlphaRegionSslCertificates.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockAlphaRegionSslCertificates) Delete(ctx context.Context, key *meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(ctx, key, m); intercept { + klog.V(5).Infof("MockAlphaRegionSslCertificates.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[*key]; ok { + klog.V(5).Infof("MockAlphaRegionSslCertificates.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[*key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAlphaRegionSslCertificates %v not found", key), + } + klog.V(5).Infof("MockAlphaRegionSslCertificates.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, *key) + klog.V(5).Infof("MockAlphaRegionSslCertificates.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockAlphaRegionSslCertificates) Obj(o *alpha.SslCertificate) *MockRegionSslCertificatesObj { + return &MockRegionSslCertificatesObj{o} +} + +// GCEAlphaRegionSslCertificates is a simplifying adapter for the GCE RegionSslCertificates. +type GCEAlphaRegionSslCertificates struct { + s *Service +} + +// Get the SslCertificate named by key. +func (g *GCEAlphaRegionSslCertificates) Get(ctx context.Context, key *meta.Key) (*alpha.SslCertificate, error) { + klog.V(5).Infof("GCEAlphaRegionSslCertificates.Get(%v, %v): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEAlphaRegionSslCertificates.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + return nil, fmt.Errorf("invalid GCE key (%#v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionSslCertificates") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("alpha"), + Service: "RegionSslCertificates", + } + klog.V(5).Infof("GCEAlphaRegionSslCertificates.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEAlphaRegionSslCertificates.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + return nil, err + } + call := g.s.Alpha.RegionSslCertificates.Get(projectID, key.Region, key.Name) + call.Context(ctx) + v, err := call.Do() + klog.V(4).Infof("GCEAlphaRegionSslCertificates.Get(%v, %v) = %+v, %v", ctx, key, v, err) + return v, err +} + +// List all SslCertificate objects. +func (g *GCEAlphaRegionSslCertificates) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.SslCertificate, error) { + klog.V(5).Infof("GCEAlphaRegionSslCertificates.List(%v, %v, %v) called", ctx, region, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionSslCertificates") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("alpha"), + Service: "RegionSslCertificates", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + klog.V(5).Infof("GCEAlphaRegionSslCertificates.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) + call := g.s.Alpha.RegionSslCertificates.List(projectID, region) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*alpha.SslCertificate + f := func(l *alpha.SslCertificateList) error { + klog.V(5).Infof("GCEAlphaRegionSslCertificates.List(%v, ..., %v): page %+v", ctx, fl, l) + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + klog.V(4).Infof("GCEAlphaRegionSslCertificates.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + return nil, err + } + + if klog.V(4) { + klog.V(4).Infof("GCEAlphaRegionSslCertificates.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { + var asStr []string + for _, o := range all { + asStr = append(asStr, fmt.Sprintf("%+v", o)) + } + klog.V(5).Infof("GCEAlphaRegionSslCertificates.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + } + + return all, nil +} + +// Insert SslCertificate with key of value obj. +func (g *GCEAlphaRegionSslCertificates) Insert(ctx context.Context, key *meta.Key, obj *alpha.SslCertificate) error { + klog.V(5).Infof("GCEAlphaRegionSslCertificates.Insert(%v, %v, %+v): called", ctx, key, obj) + if !key.Valid() { + klog.V(2).Infof("GCEAlphaRegionSslCertificates.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionSslCertificates") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("alpha"), + Service: "RegionSslCertificates", + } + klog.V(5).Infof("GCEAlphaRegionSslCertificates.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEAlphaRegionSslCertificates.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + obj.Name = key.Name + call := g.s.Alpha.RegionSslCertificates.Insert(projectID, key.Region, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEAlphaRegionSslCertificates.Insert(%v, %v, ...) = %+v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEAlphaRegionSslCertificates.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + return err +} + +// Delete the SslCertificate referenced by key. +func (g *GCEAlphaRegionSslCertificates) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCEAlphaRegionSslCertificates.Delete(%v, %v): called", ctx, key) + if !key.Valid() { + klog.V(2).Infof("GCEAlphaRegionSslCertificates.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionSslCertificates") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("alpha"), + Service: "RegionSslCertificates", + } + klog.V(5).Infof("GCEAlphaRegionSslCertificates.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEAlphaRegionSslCertificates.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.Alpha.RegionSslCertificates.Delete(projectID, key.Region, key.Name) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEAlphaRegionSslCertificates.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEAlphaRegionSslCertificates.Delete(%v, %v) = %v", ctx, key, err) + return err +} + +// BetaRegionSslCertificates is an interface that allows for mocking of RegionSslCertificates. +type BetaRegionSslCertificates interface { + Get(ctx context.Context, key *meta.Key) (*beta.SslCertificate, error) + List(ctx context.Context, region string, fl *filter.F) ([]*beta.SslCertificate, error) + Insert(ctx context.Context, key *meta.Key, obj *beta.SslCertificate) error + Delete(ctx context.Context, key *meta.Key) error +} + +// NewMockBetaRegionSslCertificates returns a new mock for RegionSslCertificates. +func NewMockBetaRegionSslCertificates(pr ProjectRouter, objs map[meta.Key]*MockRegionSslCertificatesObj) *MockBetaRegionSslCertificates { + mock := &MockBetaRegionSslCertificates{ + ProjectRouter: pr, + + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockBetaRegionSslCertificates is the mock for RegionSslCertificates. +type MockBetaRegionSslCertificates struct { + Lock sync.Mutex + + ProjectRouter ProjectRouter + + // Objects maintained by the mock. + Objects map[meta.Key]*MockRegionSslCertificatesObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(ctx context.Context, key *meta.Key, m *MockBetaRegionSslCertificates) (bool, *beta.SslCertificate, error) + ListHook func(ctx context.Context, region string, fl *filter.F, m *MockBetaRegionSslCertificates) (bool, []*beta.SslCertificate, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *beta.SslCertificate, m *MockBetaRegionSslCertificates) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockBetaRegionSslCertificates) (bool, error) + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockBetaRegionSslCertificates) Get(ctx context.Context, key *meta.Key) (*beta.SslCertificate, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(ctx, key, m); intercept { + klog.V(5).Infof("MockBetaRegionSslCertificates.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + if !key.Valid() { + return nil, fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[*key]; ok { + klog.V(5).Infof("MockBetaRegionSslCertificates.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[*key]; ok { + typedObj := obj.ToBeta() + klog.V(5).Infof("MockBetaRegionSslCertificates.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockBetaRegionSslCertificates %v not found", key), + } + klog.V(5).Infof("MockBetaRegionSslCertificates.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock in the given region. +func (m *MockBetaRegionSslCertificates) List(ctx context.Context, region string, fl *filter.F) ([]*beta.SslCertificate, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(ctx, region, fl, m); intercept { + klog.V(5).Infof("MockBetaRegionSslCertificates.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + klog.V(5).Infof("MockBetaRegionSslCertificates.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + + return nil, *m.ListError + } + + var objs []*beta.SslCertificate + for key, obj := range m.Objects { + if key.Region != region { + continue + } + if !fl.Match(obj.ToBeta()) { + continue + } + objs = append(objs, obj.ToBeta()) + } + + klog.V(5).Infof("MockBetaRegionSslCertificates.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockBetaRegionSslCertificates) Insert(ctx context.Context, key *meta.Key, obj *beta.SslCertificate) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { + klog.V(5).Infof("MockBetaRegionSslCertificates.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[*key]; ok { + klog.V(5).Infof("MockBetaRegionSslCertificates.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[*key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockBetaRegionSslCertificates %v exists", key), + } + klog.V(5).Infof("MockBetaRegionSslCertificates.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "beta", "sslCertificates") + obj.SelfLink = SelfLink(meta.VersionBeta, projectID, "sslCertificates", key) + + m.Objects[*key] = &MockRegionSslCertificatesObj{obj} + klog.V(5).Infof("MockBetaRegionSslCertificates.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockBetaRegionSslCertificates) Delete(ctx context.Context, key *meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(ctx, key, m); intercept { + klog.V(5).Infof("MockBetaRegionSslCertificates.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[*key]; ok { + klog.V(5).Infof("MockBetaRegionSslCertificates.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[*key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockBetaRegionSslCertificates %v not found", key), + } + klog.V(5).Infof("MockBetaRegionSslCertificates.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, *key) + klog.V(5).Infof("MockBetaRegionSslCertificates.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockBetaRegionSslCertificates) Obj(o *beta.SslCertificate) *MockRegionSslCertificatesObj { + return &MockRegionSslCertificatesObj{o} +} + +// GCEBetaRegionSslCertificates is a simplifying adapter for the GCE RegionSslCertificates. +type GCEBetaRegionSslCertificates struct { + s *Service +} + +// Get the SslCertificate named by key. +func (g *GCEBetaRegionSslCertificates) Get(ctx context.Context, key *meta.Key) (*beta.SslCertificate, error) { + klog.V(5).Infof("GCEBetaRegionSslCertificates.Get(%v, %v): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEBetaRegionSslCertificates.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + return nil, fmt.Errorf("invalid GCE key (%#v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "RegionSslCertificates") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("beta"), + Service: "RegionSslCertificates", + } + klog.V(5).Infof("GCEBetaRegionSslCertificates.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEBetaRegionSslCertificates.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + return nil, err + } + call := g.s.Beta.RegionSslCertificates.Get(projectID, key.Region, key.Name) + call.Context(ctx) + v, err := call.Do() + klog.V(4).Infof("GCEBetaRegionSslCertificates.Get(%v, %v) = %+v, %v", ctx, key, v, err) + return v, err +} + +// List all SslCertificate objects. +func (g *GCEBetaRegionSslCertificates) List(ctx context.Context, region string, fl *filter.F) ([]*beta.SslCertificate, error) { + klog.V(5).Infof("GCEBetaRegionSslCertificates.List(%v, %v, %v) called", ctx, region, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "RegionSslCertificates") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("beta"), + Service: "RegionSslCertificates", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + klog.V(5).Infof("GCEBetaRegionSslCertificates.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) + call := g.s.Beta.RegionSslCertificates.List(projectID, region) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*beta.SslCertificate + f := func(l *beta.SslCertificateList) error { + klog.V(5).Infof("GCEBetaRegionSslCertificates.List(%v, ..., %v): page %+v", ctx, fl, l) + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + klog.V(4).Infof("GCEBetaRegionSslCertificates.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + return nil, err + } + + if klog.V(4) { + klog.V(4).Infof("GCEBetaRegionSslCertificates.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { + var asStr []string + for _, o := range all { + asStr = append(asStr, fmt.Sprintf("%+v", o)) + } + klog.V(5).Infof("GCEBetaRegionSslCertificates.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + } + + return all, nil +} + +// Insert SslCertificate with key of value obj. +func (g *GCEBetaRegionSslCertificates) Insert(ctx context.Context, key *meta.Key, obj *beta.SslCertificate) error { + klog.V(5).Infof("GCEBetaRegionSslCertificates.Insert(%v, %v, %+v): called", ctx, key, obj) + if !key.Valid() { + klog.V(2).Infof("GCEBetaRegionSslCertificates.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "RegionSslCertificates") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("beta"), + Service: "RegionSslCertificates", + } + klog.V(5).Infof("GCEBetaRegionSslCertificates.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEBetaRegionSslCertificates.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + obj.Name = key.Name + call := g.s.Beta.RegionSslCertificates.Insert(projectID, key.Region, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEBetaRegionSslCertificates.Insert(%v, %v, ...) = %+v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEBetaRegionSslCertificates.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + return err +} + +// Delete the SslCertificate referenced by key. +func (g *GCEBetaRegionSslCertificates) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCEBetaRegionSslCertificates.Delete(%v, %v): called", ctx, key) + if !key.Valid() { + klog.V(2).Infof("GCEBetaRegionSslCertificates.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "RegionSslCertificates") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("beta"), + Service: "RegionSslCertificates", + } + klog.V(5).Infof("GCEBetaRegionSslCertificates.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEBetaRegionSslCertificates.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.Beta.RegionSslCertificates.Delete(projectID, key.Region, key.Name) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEBetaRegionSslCertificates.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEBetaRegionSslCertificates.Delete(%v, %v) = %v", ctx, key, err) + return err +} + +// AlphaSubnetworks is an interface that allows for mocking of Subnetworks. +type AlphaSubnetworks interface { + Get(ctx context.Context, key *meta.Key) (*alpha.Subnetwork, error) + List(ctx context.Context, region string, fl *filter.F) ([]*alpha.Subnetwork, error) + Insert(ctx context.Context, key *meta.Key, obj *alpha.Subnetwork) error + Delete(ctx context.Context, key *meta.Key) error +} + +// NewMockAlphaSubnetworks returns a new mock for Subnetworks. +func NewMockAlphaSubnetworks(pr ProjectRouter, objs map[meta.Key]*MockSubnetworksObj) *MockAlphaSubnetworks { + mock := &MockAlphaSubnetworks{ + ProjectRouter: pr, + + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockAlphaSubnetworks is the mock for Subnetworks. +type MockAlphaSubnetworks struct { + Lock sync.Mutex + + ProjectRouter ProjectRouter + + // Objects maintained by the mock. + Objects map[meta.Key]*MockSubnetworksObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(ctx context.Context, key *meta.Key, m *MockAlphaSubnetworks) (bool, *alpha.Subnetwork, error) + ListHook func(ctx context.Context, region string, fl *filter.F, m *MockAlphaSubnetworks) (bool, []*alpha.Subnetwork, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *alpha.Subnetwork, m *MockAlphaSubnetworks) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockAlphaSubnetworks) (bool, error) + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockAlphaSubnetworks) Get(ctx context.Context, key *meta.Key) (*alpha.Subnetwork, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(ctx, key, m); intercept { + klog.V(5).Infof("MockAlphaSubnetworks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + if !key.Valid() { + return nil, fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[*key]; ok { + klog.V(5).Infof("MockAlphaSubnetworks.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[*key]; ok { + typedObj := obj.ToAlpha() + klog.V(5).Infof("MockAlphaSubnetworks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAlphaSubnetworks %v not found", key), + } + klog.V(5).Infof("MockAlphaSubnetworks.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock in the given region. +func (m *MockAlphaSubnetworks) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.Subnetwork, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(ctx, region, fl, m); intercept { + klog.V(5).Infof("MockAlphaSubnetworks.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + klog.V(5).Infof("MockAlphaSubnetworks.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + + return nil, *m.ListError + } + + var objs []*alpha.Subnetwork + for key, obj := range m.Objects { + if key.Region != region { + continue + } + if !fl.Match(obj.ToAlpha()) { + continue + } + objs = append(objs, obj.ToAlpha()) + } + + klog.V(5).Infof("MockAlphaSubnetworks.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockAlphaSubnetworks) Insert(ctx context.Context, key *meta.Key, obj *alpha.Subnetwork) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { + klog.V(5).Infof("MockAlphaSubnetworks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[*key]; ok { + klog.V(5).Infof("MockAlphaSubnetworks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[*key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockAlphaSubnetworks %v exists", key), + } + klog.V(5).Infof("MockAlphaSubnetworks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "alpha", "subnetworks") + obj.SelfLink = SelfLink(meta.VersionAlpha, projectID, "subnetworks", key) + + m.Objects[*key] = &MockSubnetworksObj{obj} + klog.V(5).Infof("MockAlphaSubnetworks.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockAlphaSubnetworks) Delete(ctx context.Context, key *meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(ctx, key, m); intercept { + klog.V(5).Infof("MockAlphaSubnetworks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[*key]; ok { + klog.V(5).Infof("MockAlphaSubnetworks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[*key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAlphaSubnetworks %v not found", key), + } + klog.V(5).Infof("MockAlphaSubnetworks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, *key) + klog.V(5).Infof("MockAlphaSubnetworks.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockAlphaSubnetworks) Obj(o *alpha.Subnetwork) *MockSubnetworksObj { + return &MockSubnetworksObj{o} +} + +// GCEAlphaSubnetworks is a simplifying adapter for the GCE Subnetworks. +type GCEAlphaSubnetworks struct { + s *Service +} + +// Get the Subnetwork named by key. +func (g *GCEAlphaSubnetworks) Get(ctx context.Context, key *meta.Key) (*alpha.Subnetwork, error) { + klog.V(5).Infof("GCEAlphaSubnetworks.Get(%v, %v): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEAlphaSubnetworks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + return nil, fmt.Errorf("invalid GCE key (%#v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Subnetworks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("alpha"), + Service: "Subnetworks", + } + klog.V(5).Infof("GCEAlphaSubnetworks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEAlphaSubnetworks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + return nil, err + } + call := g.s.Alpha.Subnetworks.Get(projectID, key.Region, key.Name) + call.Context(ctx) + v, err := call.Do() + klog.V(4).Infof("GCEAlphaSubnetworks.Get(%v, %v) = %+v, %v", ctx, key, v, err) + return v, err +} + +// List all Subnetwork objects. +func (g *GCEAlphaSubnetworks) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.Subnetwork, error) { + klog.V(5).Infof("GCEAlphaSubnetworks.List(%v, %v, %v) called", ctx, region, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Subnetworks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("alpha"), + Service: "Subnetworks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + klog.V(5).Infof("GCEAlphaSubnetworks.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) + call := g.s.Alpha.Subnetworks.List(projectID, region) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*alpha.Subnetwork + f := func(l *alpha.SubnetworkList) error { + klog.V(5).Infof("GCEAlphaSubnetworks.List(%v, ..., %v): page %+v", ctx, fl, l) + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + klog.V(4).Infof("GCEAlphaSubnetworks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + return nil, err + } + + if klog.V(4) { + klog.V(4).Infof("GCEAlphaSubnetworks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { + var asStr []string + for _, o := range all { + asStr = append(asStr, fmt.Sprintf("%+v", o)) + } + klog.V(5).Infof("GCEAlphaSubnetworks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + } + + return all, nil +} + +// Insert Subnetwork with key of value obj. +func (g *GCEAlphaSubnetworks) Insert(ctx context.Context, key *meta.Key, obj *alpha.Subnetwork) error { + klog.V(5).Infof("GCEAlphaSubnetworks.Insert(%v, %v, %+v): called", ctx, key, obj) + if !key.Valid() { + klog.V(2).Infof("GCEAlphaSubnetworks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Subnetworks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("alpha"), + Service: "Subnetworks", + } + klog.V(5).Infof("GCEAlphaSubnetworks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEAlphaSubnetworks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + obj.Name = key.Name + call := g.s.Alpha.Subnetworks.Insert(projectID, key.Region, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEAlphaSubnetworks.Insert(%v, %v, ...) = %+v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEAlphaSubnetworks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + return err +} + +// Delete the Subnetwork referenced by key. +func (g *GCEAlphaSubnetworks) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCEAlphaSubnetworks.Delete(%v, %v): called", ctx, key) + if !key.Valid() { + klog.V(2).Infof("GCEAlphaSubnetworks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Subnetworks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("alpha"), + Service: "Subnetworks", + } + klog.V(5).Infof("GCEAlphaSubnetworks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEAlphaSubnetworks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.Alpha.Subnetworks.Delete(projectID, key.Region, key.Name) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEAlphaSubnetworks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEAlphaSubnetworks.Delete(%v, %v) = %v", ctx, key, err) + return err +} + +// BetaSubnetworks is an interface that allows for mocking of Subnetworks. +type BetaSubnetworks interface { + Get(ctx context.Context, key *meta.Key) (*beta.Subnetwork, error) + List(ctx context.Context, region string, fl *filter.F) ([]*beta.Subnetwork, error) + Insert(ctx context.Context, key *meta.Key, obj *beta.Subnetwork) error + Delete(ctx context.Context, key *meta.Key) error +} + +// NewMockBetaSubnetworks returns a new mock for Subnetworks. +func NewMockBetaSubnetworks(pr ProjectRouter, objs map[meta.Key]*MockSubnetworksObj) *MockBetaSubnetworks { + mock := &MockBetaSubnetworks{ + ProjectRouter: pr, + + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockBetaSubnetworks is the mock for Subnetworks. +type MockBetaSubnetworks struct { + Lock sync.Mutex + + ProjectRouter ProjectRouter + + // Objects maintained by the mock. + Objects map[meta.Key]*MockSubnetworksObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(ctx context.Context, key *meta.Key, m *MockBetaSubnetworks) (bool, *beta.Subnetwork, error) + ListHook func(ctx context.Context, region string, fl *filter.F, m *MockBetaSubnetworks) (bool, []*beta.Subnetwork, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *beta.Subnetwork, m *MockBetaSubnetworks) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockBetaSubnetworks) (bool, error) + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockBetaSubnetworks) Get(ctx context.Context, key *meta.Key) (*beta.Subnetwork, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(ctx, key, m); intercept { + klog.V(5).Infof("MockBetaSubnetworks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + if !key.Valid() { + return nil, fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[*key]; ok { + klog.V(5).Infof("MockBetaSubnetworks.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[*key]; ok { + typedObj := obj.ToBeta() + klog.V(5).Infof("MockBetaSubnetworks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockBetaSubnetworks %v not found", key), + } + klog.V(5).Infof("MockBetaSubnetworks.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock in the given region. +func (m *MockBetaSubnetworks) List(ctx context.Context, region string, fl *filter.F) ([]*beta.Subnetwork, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(ctx, region, fl, m); intercept { + klog.V(5).Infof("MockBetaSubnetworks.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + klog.V(5).Infof("MockBetaSubnetworks.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + + return nil, *m.ListError + } + + var objs []*beta.Subnetwork + for key, obj := range m.Objects { + if key.Region != region { + continue + } + if !fl.Match(obj.ToBeta()) { + continue + } + objs = append(objs, obj.ToBeta()) + } + + klog.V(5).Infof("MockBetaSubnetworks.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockBetaSubnetworks) Insert(ctx context.Context, key *meta.Key, obj *beta.Subnetwork) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { + klog.V(5).Infof("MockBetaSubnetworks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[*key]; ok { + klog.V(5).Infof("MockBetaSubnetworks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[*key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockBetaSubnetworks %v exists", key), + } + klog.V(5).Infof("MockBetaSubnetworks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "beta", "subnetworks") + obj.SelfLink = SelfLink(meta.VersionBeta, projectID, "subnetworks", key) + + m.Objects[*key] = &MockSubnetworksObj{obj} + klog.V(5).Infof("MockBetaSubnetworks.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockBetaSubnetworks) Delete(ctx context.Context, key *meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(ctx, key, m); intercept { + klog.V(5).Infof("MockBetaSubnetworks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[*key]; ok { + klog.V(5).Infof("MockBetaSubnetworks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[*key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockBetaSubnetworks %v not found", key), + } + klog.V(5).Infof("MockBetaSubnetworks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, *key) + klog.V(5).Infof("MockBetaSubnetworks.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockBetaSubnetworks) Obj(o *beta.Subnetwork) *MockSubnetworksObj { + return &MockSubnetworksObj{o} +} + +// GCEBetaSubnetworks is a simplifying adapter for the GCE Subnetworks. +type GCEBetaSubnetworks struct { + s *Service +} + +// Get the Subnetwork named by key. +func (g *GCEBetaSubnetworks) Get(ctx context.Context, key *meta.Key) (*beta.Subnetwork, error) { + klog.V(5).Infof("GCEBetaSubnetworks.Get(%v, %v): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEBetaSubnetworks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + return nil, fmt.Errorf("invalid GCE key (%#v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Subnetworks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("beta"), + Service: "Subnetworks", + } + klog.V(5).Infof("GCEBetaSubnetworks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEBetaSubnetworks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + return nil, err + } + call := g.s.Beta.Subnetworks.Get(projectID, key.Region, key.Name) + call.Context(ctx) + v, err := call.Do() + klog.V(4).Infof("GCEBetaSubnetworks.Get(%v, %v) = %+v, %v", ctx, key, v, err) + return v, err +} + +// List all Subnetwork objects. +func (g *GCEBetaSubnetworks) List(ctx context.Context, region string, fl *filter.F) ([]*beta.Subnetwork, error) { + klog.V(5).Infof("GCEBetaSubnetworks.List(%v, %v, %v) called", ctx, region, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Subnetworks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("beta"), + Service: "Subnetworks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + klog.V(5).Infof("GCEBetaSubnetworks.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) + call := g.s.Beta.Subnetworks.List(projectID, region) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*beta.Subnetwork + f := func(l *beta.SubnetworkList) error { + klog.V(5).Infof("GCEBetaSubnetworks.List(%v, ..., %v): page %+v", ctx, fl, l) + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + klog.V(4).Infof("GCEBetaSubnetworks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + return nil, err + } + + if klog.V(4) { + klog.V(4).Infof("GCEBetaSubnetworks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { + var asStr []string + for _, o := range all { + asStr = append(asStr, fmt.Sprintf("%+v", o)) + } + klog.V(5).Infof("GCEBetaSubnetworks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + } + + return all, nil +} + +// Insert Subnetwork with key of value obj. +func (g *GCEBetaSubnetworks) Insert(ctx context.Context, key *meta.Key, obj *beta.Subnetwork) error { + klog.V(5).Infof("GCEBetaSubnetworks.Insert(%v, %v, %+v): called", ctx, key, obj) + if !key.Valid() { + klog.V(2).Infof("GCEBetaSubnetworks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Subnetworks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("beta"), + Service: "Subnetworks", + } + klog.V(5).Infof("GCEBetaSubnetworks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEBetaSubnetworks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + obj.Name = key.Name + call := g.s.Beta.Subnetworks.Insert(projectID, key.Region, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEBetaSubnetworks.Insert(%v, %v, ...) = %+v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEBetaSubnetworks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + return err +} + +// Delete the Subnetwork referenced by key. +func (g *GCEBetaSubnetworks) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCEBetaSubnetworks.Delete(%v, %v): called", ctx, key) + if !key.Valid() { + klog.V(2).Infof("GCEBetaSubnetworks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Subnetworks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("beta"), + Service: "Subnetworks", + } + klog.V(5).Infof("GCEBetaSubnetworks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEBetaSubnetworks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.Beta.Subnetworks.Delete(projectID, key.Region, key.Name) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEBetaSubnetworks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEBetaSubnetworks.Delete(%v, %v) = %v", ctx, key, err) + return err +} + +// Subnetworks is an interface that allows for mocking of Subnetworks. +type Subnetworks interface { + Get(ctx context.Context, key *meta.Key) (*ga.Subnetwork, error) + List(ctx context.Context, region string, fl *filter.F) ([]*ga.Subnetwork, error) + Insert(ctx context.Context, key *meta.Key, obj *ga.Subnetwork) error + Delete(ctx context.Context, key *meta.Key) error +} + +// NewMockSubnetworks returns a new mock for Subnetworks. +func NewMockSubnetworks(pr ProjectRouter, objs map[meta.Key]*MockSubnetworksObj) *MockSubnetworks { + mock := &MockSubnetworks{ + ProjectRouter: pr, + + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockSubnetworks is the mock for Subnetworks. +type MockSubnetworks struct { + Lock sync.Mutex + + ProjectRouter ProjectRouter + + // Objects maintained by the mock. + Objects map[meta.Key]*MockSubnetworksObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(ctx context.Context, key *meta.Key, m *MockSubnetworks) (bool, *ga.Subnetwork, error) + ListHook func(ctx context.Context, region string, fl *filter.F, m *MockSubnetworks) (bool, []*ga.Subnetwork, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *ga.Subnetwork, m *MockSubnetworks) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockSubnetworks) (bool, error) + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockSubnetworks) Get(ctx context.Context, key *meta.Key) (*ga.Subnetwork, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(ctx, key, m); intercept { + klog.V(5).Infof("MockSubnetworks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + if !key.Valid() { + return nil, fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[*key]; ok { + klog.V(5).Infof("MockSubnetworks.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[*key]; ok { + typedObj := obj.ToGA() + klog.V(5).Infof("MockSubnetworks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockSubnetworks %v not found", key), + } + klog.V(5).Infof("MockSubnetworks.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock in the given region. +func (m *MockSubnetworks) List(ctx context.Context, region string, fl *filter.F) ([]*ga.Subnetwork, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(ctx, region, fl, m); intercept { + klog.V(5).Infof("MockSubnetworks.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + klog.V(5).Infof("MockSubnetworks.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + + return nil, *m.ListError + } + + var objs []*ga.Subnetwork + for key, obj := range m.Objects { + if key.Region != region { + continue + } + if !fl.Match(obj.ToGA()) { + continue + } + objs = append(objs, obj.ToGA()) + } + + klog.V(5).Infof("MockSubnetworks.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockSubnetworks) Insert(ctx context.Context, key *meta.Key, obj *ga.Subnetwork) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { + klog.V(5).Infof("MockSubnetworks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[*key]; ok { + klog.V(5).Infof("MockSubnetworks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[*key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockSubnetworks %v exists", key), + } + klog.V(5).Infof("MockSubnetworks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "ga", "subnetworks") + obj.SelfLink = SelfLink(meta.VersionGA, projectID, "subnetworks", key) + + m.Objects[*key] = &MockSubnetworksObj{obj} + klog.V(5).Infof("MockSubnetworks.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockSubnetworks) Delete(ctx context.Context, key *meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(ctx, key, m); intercept { + klog.V(5).Infof("MockSubnetworks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[*key]; ok { + klog.V(5).Infof("MockSubnetworks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[*key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockSubnetworks %v not found", key), + } + klog.V(5).Infof("MockSubnetworks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, *key) + klog.V(5).Infof("MockSubnetworks.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockSubnetworks) Obj(o *ga.Subnetwork) *MockSubnetworksObj { + return &MockSubnetworksObj{o} +} + +// GCESubnetworks is a simplifying adapter for the GCE Subnetworks. +type GCESubnetworks struct { + s *Service +} + +// Get the Subnetwork named by key. +func (g *GCESubnetworks) Get(ctx context.Context, key *meta.Key) (*ga.Subnetwork, error) { + klog.V(5).Infof("GCESubnetworks.Get(%v, %v): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCESubnetworks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + return nil, fmt.Errorf("invalid GCE key (%#v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Subnetworks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("ga"), + Service: "Subnetworks", + } + klog.V(5).Infof("GCESubnetworks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCESubnetworks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + return nil, err + } + call := g.s.GA.Subnetworks.Get(projectID, key.Region, key.Name) + call.Context(ctx) + v, err := call.Do() + klog.V(4).Infof("GCESubnetworks.Get(%v, %v) = %+v, %v", ctx, key, v, err) + return v, err +} + +// List all Subnetwork objects. +func (g *GCESubnetworks) List(ctx context.Context, region string, fl *filter.F) ([]*ga.Subnetwork, error) { + klog.V(5).Infof("GCESubnetworks.List(%v, %v, %v) called", ctx, region, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Subnetworks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("ga"), + Service: "Subnetworks", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + klog.V(5).Infof("GCESubnetworks.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) + call := g.s.GA.Subnetworks.List(projectID, region) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*ga.Subnetwork + f := func(l *ga.SubnetworkList) error { + klog.V(5).Infof("GCESubnetworks.List(%v, ..., %v): page %+v", ctx, fl, l) + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + klog.V(4).Infof("GCESubnetworks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + return nil, err + } + + if klog.V(4) { + klog.V(4).Infof("GCESubnetworks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { + var asStr []string + for _, o := range all { + asStr = append(asStr, fmt.Sprintf("%+v", o)) + } + klog.V(5).Infof("GCESubnetworks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + } + + return all, nil +} + +// Insert Subnetwork with key of value obj. +func (g *GCESubnetworks) Insert(ctx context.Context, key *meta.Key, obj *ga.Subnetwork) error { + klog.V(5).Infof("GCESubnetworks.Insert(%v, %v, %+v): called", ctx, key, obj) + if !key.Valid() { + klog.V(2).Infof("GCESubnetworks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Subnetworks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("ga"), + Service: "Subnetworks", + } + klog.V(5).Infof("GCESubnetworks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCESubnetworks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + obj.Name = key.Name + call := g.s.GA.Subnetworks.Insert(projectID, key.Region, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCESubnetworks.Insert(%v, %v, ...) = %+v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCESubnetworks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + return err +} + +// Delete the Subnetwork referenced by key. +func (g *GCESubnetworks) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCESubnetworks.Delete(%v, %v): called", ctx, key) + if !key.Valid() { + klog.V(2).Infof("GCESubnetworks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Subnetworks") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("ga"), + Service: "Subnetworks", + } + klog.V(5).Infof("GCESubnetworks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCESubnetworks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.GA.Subnetworks.Delete(projectID, key.Region, key.Name) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCESubnetworks.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCESubnetworks.Delete(%v, %v) = %v", ctx, key, err) + return err +} + +// AlphaTargetHttpProxies is an interface that allows for mocking of TargetHttpProxies. +type AlphaTargetHttpProxies interface { + Get(ctx context.Context, key *meta.Key) (*alpha.TargetHttpProxy, error) + List(ctx context.Context, fl *filter.F) ([]*alpha.TargetHttpProxy, error) + Insert(ctx context.Context, key *meta.Key, obj *alpha.TargetHttpProxy) error + Delete(ctx context.Context, key *meta.Key) error + SetUrlMap(context.Context, *meta.Key, *alpha.UrlMapReference) error +} + +// NewMockAlphaTargetHttpProxies returns a new mock for TargetHttpProxies. +func NewMockAlphaTargetHttpProxies(pr ProjectRouter, objs map[meta.Key]*MockTargetHttpProxiesObj) *MockAlphaTargetHttpProxies { + mock := &MockAlphaTargetHttpProxies{ + ProjectRouter: pr, + + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockAlphaTargetHttpProxies is the mock for TargetHttpProxies. +type MockAlphaTargetHttpProxies struct { + Lock sync.Mutex + + ProjectRouter ProjectRouter + + // Objects maintained by the mock. + Objects map[meta.Key]*MockTargetHttpProxiesObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(ctx context.Context, key *meta.Key, m *MockAlphaTargetHttpProxies) (bool, *alpha.TargetHttpProxy, error) + ListHook func(ctx context.Context, fl *filter.F, m *MockAlphaTargetHttpProxies) (bool, []*alpha.TargetHttpProxy, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *alpha.TargetHttpProxy, m *MockAlphaTargetHttpProxies) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockAlphaTargetHttpProxies) (bool, error) + SetUrlMapHook func(context.Context, *meta.Key, *alpha.UrlMapReference, *MockAlphaTargetHttpProxies) error + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockAlphaTargetHttpProxies) Get(ctx context.Context, key *meta.Key) (*alpha.TargetHttpProxy, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(ctx, key, m); intercept { + klog.V(5).Infof("MockAlphaTargetHttpProxies.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + if !key.Valid() { + return nil, fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[*key]; ok { + klog.V(5).Infof("MockAlphaTargetHttpProxies.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[*key]; ok { + typedObj := obj.ToAlpha() + klog.V(5).Infof("MockAlphaTargetHttpProxies.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAlphaTargetHttpProxies %v not found", key), + } + klog.V(5).Infof("MockAlphaTargetHttpProxies.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock. +func (m *MockAlphaTargetHttpProxies) List(ctx context.Context, fl *filter.F) ([]*alpha.TargetHttpProxy, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { + klog.V(5).Infof("MockAlphaTargetHttpProxies.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + klog.V(5).Infof("MockAlphaTargetHttpProxies.List(%v, %v) = nil, %v", ctx, fl, err) + + return nil, *m.ListError + } + + var objs []*alpha.TargetHttpProxy + for _, obj := range m.Objects { + if !fl.Match(obj.ToAlpha()) { + continue + } + objs = append(objs, obj.ToAlpha()) + } + + klog.V(5).Infof("MockAlphaTargetHttpProxies.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockAlphaTargetHttpProxies) Insert(ctx context.Context, key *meta.Key, obj *alpha.TargetHttpProxy) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { + klog.V(5).Infof("MockAlphaTargetHttpProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[*key]; ok { + klog.V(5).Infof("MockAlphaTargetHttpProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[*key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockAlphaTargetHttpProxies %v exists", key), + } + klog.V(5).Infof("MockAlphaTargetHttpProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "alpha", "targetHttpProxies") + obj.SelfLink = SelfLink(meta.VersionAlpha, projectID, "targetHttpProxies", key) + + m.Objects[*key] = &MockTargetHttpProxiesObj{obj} + klog.V(5).Infof("MockAlphaTargetHttpProxies.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockAlphaTargetHttpProxies) Delete(ctx context.Context, key *meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(ctx, key, m); intercept { + klog.V(5).Infof("MockAlphaTargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[*key]; ok { + klog.V(5).Infof("MockAlphaTargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[*key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAlphaTargetHttpProxies %v not found", key), + } + klog.V(5).Infof("MockAlphaTargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, *key) + klog.V(5).Infof("MockAlphaTargetHttpProxies.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockAlphaTargetHttpProxies) Obj(o *alpha.TargetHttpProxy) *MockTargetHttpProxiesObj { + return &MockTargetHttpProxiesObj{o} +} + +// SetUrlMap is a mock for the corresponding method. +func (m *MockAlphaTargetHttpProxies) SetUrlMap(ctx context.Context, key *meta.Key, arg0 *alpha.UrlMapReference) error { + if m.SetUrlMapHook != nil { + return m.SetUrlMapHook(ctx, key, arg0, m) + } + return nil +} + +// GCEAlphaTargetHttpProxies is a simplifying adapter for the GCE TargetHttpProxies. +type GCEAlphaTargetHttpProxies struct { + s *Service +} + +// Get the TargetHttpProxy named by key. +func (g *GCEAlphaTargetHttpProxies) Get(ctx context.Context, key *meta.Key) (*alpha.TargetHttpProxy, error) { + klog.V(5).Infof("GCEAlphaTargetHttpProxies.Get(%v, %v): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEAlphaTargetHttpProxies.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + return nil, fmt.Errorf("invalid GCE key (%#v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "TargetHttpProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("alpha"), + Service: "TargetHttpProxies", + } + klog.V(5).Infof("GCEAlphaTargetHttpProxies.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEAlphaTargetHttpProxies.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + return nil, err + } + call := g.s.Alpha.TargetHttpProxies.Get(projectID, key.Name) + call.Context(ctx) + v, err := call.Do() + klog.V(4).Infof("GCEAlphaTargetHttpProxies.Get(%v, %v) = %+v, %v", ctx, key, v, err) + return v, err +} + +// List all TargetHttpProxy objects. +func (g *GCEAlphaTargetHttpProxies) List(ctx context.Context, fl *filter.F) ([]*alpha.TargetHttpProxy, error) { + klog.V(5).Infof("GCEAlphaTargetHttpProxies.List(%v, %v) called", ctx, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "TargetHttpProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("alpha"), + Service: "TargetHttpProxies", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + klog.V(5).Infof("GCEAlphaTargetHttpProxies.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + call := g.s.Alpha.TargetHttpProxies.List(projectID) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*alpha.TargetHttpProxy + f := func(l *alpha.TargetHttpProxyList) error { + klog.V(5).Infof("GCEAlphaTargetHttpProxies.List(%v, ..., %v): page %+v", ctx, fl, l) + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + klog.V(4).Infof("GCEAlphaTargetHttpProxies.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + return nil, err + } + + if klog.V(4) { + klog.V(4).Infof("GCEAlphaTargetHttpProxies.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { + var asStr []string + for _, o := range all { + asStr = append(asStr, fmt.Sprintf("%+v", o)) + } + klog.V(5).Infof("GCEAlphaTargetHttpProxies.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + } + + return all, nil +} + +// Insert TargetHttpProxy with key of value obj. +func (g *GCEAlphaTargetHttpProxies) Insert(ctx context.Context, key *meta.Key, obj *alpha.TargetHttpProxy) error { + klog.V(5).Infof("GCEAlphaTargetHttpProxies.Insert(%v, %v, %+v): called", ctx, key, obj) + if !key.Valid() { + klog.V(2).Infof("GCEAlphaTargetHttpProxies.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "TargetHttpProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("alpha"), + Service: "TargetHttpProxies", + } + klog.V(5).Infof("GCEAlphaTargetHttpProxies.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEAlphaTargetHttpProxies.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + obj.Name = key.Name + call := g.s.Alpha.TargetHttpProxies.Insert(projectID, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEAlphaTargetHttpProxies.Insert(%v, %v, ...) = %+v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEAlphaTargetHttpProxies.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + return err +} + +// Delete the TargetHttpProxy referenced by key. +func (g *GCEAlphaTargetHttpProxies) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCEAlphaTargetHttpProxies.Delete(%v, %v): called", ctx, key) + if !key.Valid() { + klog.V(2).Infof("GCEAlphaTargetHttpProxies.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "TargetHttpProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("alpha"), + Service: "TargetHttpProxies", + } + klog.V(5).Infof("GCEAlphaTargetHttpProxies.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEAlphaTargetHttpProxies.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.Alpha.TargetHttpProxies.Delete(projectID, key.Name) + + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEAlphaTargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEAlphaTargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) + return err +} + +// SetUrlMap is a method on GCEAlphaTargetHttpProxies. +func (g *GCEAlphaTargetHttpProxies) SetUrlMap(ctx context.Context, key *meta.Key, arg0 *alpha.UrlMapReference) error { + klog.V(5).Infof("GCEAlphaTargetHttpProxies.SetUrlMap(%v, %v, ...): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEAlphaTargetHttpProxies.SetUrlMap(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "TargetHttpProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "SetUrlMap", + Version: meta.Version("alpha"), + Service: "TargetHttpProxies", + } + klog.V(5).Infof("GCEAlphaTargetHttpProxies.SetUrlMap(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEAlphaTargetHttpProxies.SetUrlMap(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.Alpha.TargetHttpProxies.SetUrlMap(projectID, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEAlphaTargetHttpProxies.SetUrlMap(%v, %v, ...) = %+v", ctx, key, err) + return err + } + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEAlphaTargetHttpProxies.SetUrlMap(%v, %v, ...) = %+v", ctx, key, err) + return err +} + +// BetaTargetHttpProxies is an interface that allows for mocking of TargetHttpProxies. +type BetaTargetHttpProxies interface { + Get(ctx context.Context, key *meta.Key) (*beta.TargetHttpProxy, error) + List(ctx context.Context, fl *filter.F) ([]*beta.TargetHttpProxy, error) + Insert(ctx context.Context, key *meta.Key, obj *beta.TargetHttpProxy) error + Delete(ctx context.Context, key *meta.Key) error + SetUrlMap(context.Context, *meta.Key, *beta.UrlMapReference) error +} + +// NewMockBetaTargetHttpProxies returns a new mock for TargetHttpProxies. +func NewMockBetaTargetHttpProxies(pr ProjectRouter, objs map[meta.Key]*MockTargetHttpProxiesObj) *MockBetaTargetHttpProxies { + mock := &MockBetaTargetHttpProxies{ + ProjectRouter: pr, + + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockBetaTargetHttpProxies is the mock for TargetHttpProxies. +type MockBetaTargetHttpProxies struct { + Lock sync.Mutex + + ProjectRouter ProjectRouter + + // Objects maintained by the mock. + Objects map[meta.Key]*MockTargetHttpProxiesObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(ctx context.Context, key *meta.Key, m *MockBetaTargetHttpProxies) (bool, *beta.TargetHttpProxy, error) + ListHook func(ctx context.Context, fl *filter.F, m *MockBetaTargetHttpProxies) (bool, []*beta.TargetHttpProxy, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *beta.TargetHttpProxy, m *MockBetaTargetHttpProxies) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockBetaTargetHttpProxies) (bool, error) + SetUrlMapHook func(context.Context, *meta.Key, *beta.UrlMapReference, *MockBetaTargetHttpProxies) error + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockBetaTargetHttpProxies) Get(ctx context.Context, key *meta.Key) (*beta.TargetHttpProxy, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(ctx, key, m); intercept { + klog.V(5).Infof("MockBetaTargetHttpProxies.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + if !key.Valid() { + return nil, fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[*key]; ok { + klog.V(5).Infof("MockBetaTargetHttpProxies.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[*key]; ok { + typedObj := obj.ToBeta() + klog.V(5).Infof("MockBetaTargetHttpProxies.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockBetaTargetHttpProxies %v not found", key), + } + klog.V(5).Infof("MockBetaTargetHttpProxies.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock. +func (m *MockBetaTargetHttpProxies) List(ctx context.Context, fl *filter.F) ([]*beta.TargetHttpProxy, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { + klog.V(5).Infof("MockBetaTargetHttpProxies.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + klog.V(5).Infof("MockBetaTargetHttpProxies.List(%v, %v) = nil, %v", ctx, fl, err) + + return nil, *m.ListError + } + + var objs []*beta.TargetHttpProxy + for _, obj := range m.Objects { + if !fl.Match(obj.ToBeta()) { + continue + } + objs = append(objs, obj.ToBeta()) + } + + klog.V(5).Infof("MockBetaTargetHttpProxies.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockBetaTargetHttpProxies) Insert(ctx context.Context, key *meta.Key, obj *beta.TargetHttpProxy) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { + klog.V(5).Infof("MockBetaTargetHttpProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[*key]; ok { + klog.V(5).Infof("MockBetaTargetHttpProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[*key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockBetaTargetHttpProxies %v exists", key), + } + klog.V(5).Infof("MockBetaTargetHttpProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "beta", "targetHttpProxies") + obj.SelfLink = SelfLink(meta.VersionBeta, projectID, "targetHttpProxies", key) + + m.Objects[*key] = &MockTargetHttpProxiesObj{obj} + klog.V(5).Infof("MockBetaTargetHttpProxies.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockBetaTargetHttpProxies) Delete(ctx context.Context, key *meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(ctx, key, m); intercept { + klog.V(5).Infof("MockBetaTargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[*key]; ok { + klog.V(5).Infof("MockBetaTargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[*key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockBetaTargetHttpProxies %v not found", key), + } + klog.V(5).Infof("MockBetaTargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, *key) + klog.V(5).Infof("MockBetaTargetHttpProxies.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockBetaTargetHttpProxies) Obj(o *beta.TargetHttpProxy) *MockTargetHttpProxiesObj { + return &MockTargetHttpProxiesObj{o} +} + +// SetUrlMap is a mock for the corresponding method. +func (m *MockBetaTargetHttpProxies) SetUrlMap(ctx context.Context, key *meta.Key, arg0 *beta.UrlMapReference) error { + if m.SetUrlMapHook != nil { + return m.SetUrlMapHook(ctx, key, arg0, m) + } + return nil +} + +// GCEBetaTargetHttpProxies is a simplifying adapter for the GCE TargetHttpProxies. +type GCEBetaTargetHttpProxies struct { + s *Service +} + +// Get the TargetHttpProxy named by key. +func (g *GCEBetaTargetHttpProxies) Get(ctx context.Context, key *meta.Key) (*beta.TargetHttpProxy, error) { + klog.V(5).Infof("GCEBetaTargetHttpProxies.Get(%v, %v): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEBetaTargetHttpProxies.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + return nil, fmt.Errorf("invalid GCE key (%#v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "TargetHttpProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("beta"), + Service: "TargetHttpProxies", + } + klog.V(5).Infof("GCEBetaTargetHttpProxies.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEBetaTargetHttpProxies.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + return nil, err + } + call := g.s.Beta.TargetHttpProxies.Get(projectID, key.Name) + call.Context(ctx) + v, err := call.Do() + klog.V(4).Infof("GCEBetaTargetHttpProxies.Get(%v, %v) = %+v, %v", ctx, key, v, err) + return v, err +} + +// List all TargetHttpProxy objects. +func (g *GCEBetaTargetHttpProxies) List(ctx context.Context, fl *filter.F) ([]*beta.TargetHttpProxy, error) { + klog.V(5).Infof("GCEBetaTargetHttpProxies.List(%v, %v) called", ctx, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "TargetHttpProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("beta"), + Service: "TargetHttpProxies", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + klog.V(5).Infof("GCEBetaTargetHttpProxies.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + call := g.s.Beta.TargetHttpProxies.List(projectID) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*beta.TargetHttpProxy + f := func(l *beta.TargetHttpProxyList) error { + klog.V(5).Infof("GCEBetaTargetHttpProxies.List(%v, ..., %v): page %+v", ctx, fl, l) + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + klog.V(4).Infof("GCEBetaTargetHttpProxies.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + return nil, err + } + + if klog.V(4) { + klog.V(4).Infof("GCEBetaTargetHttpProxies.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { + var asStr []string + for _, o := range all { + asStr = append(asStr, fmt.Sprintf("%+v", o)) + } + klog.V(5).Infof("GCEBetaTargetHttpProxies.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + } + + return all, nil +} + +// Insert TargetHttpProxy with key of value obj. +func (g *GCEBetaTargetHttpProxies) Insert(ctx context.Context, key *meta.Key, obj *beta.TargetHttpProxy) error { + klog.V(5).Infof("GCEBetaTargetHttpProxies.Insert(%v, %v, %+v): called", ctx, key, obj) + if !key.Valid() { + klog.V(2).Infof("GCEBetaTargetHttpProxies.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "TargetHttpProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("beta"), + Service: "TargetHttpProxies", + } + klog.V(5).Infof("GCEBetaTargetHttpProxies.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEBetaTargetHttpProxies.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + obj.Name = key.Name + call := g.s.Beta.TargetHttpProxies.Insert(projectID, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEBetaTargetHttpProxies.Insert(%v, %v, ...) = %+v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEBetaTargetHttpProxies.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + return err +} + +// Delete the TargetHttpProxy referenced by key. +func (g *GCEBetaTargetHttpProxies) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCEBetaTargetHttpProxies.Delete(%v, %v): called", ctx, key) + if !key.Valid() { + klog.V(2).Infof("GCEBetaTargetHttpProxies.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "TargetHttpProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("beta"), + Service: "TargetHttpProxies", + } + klog.V(5).Infof("GCEBetaTargetHttpProxies.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEBetaTargetHttpProxies.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.Beta.TargetHttpProxies.Delete(projectID, key.Name) + + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEBetaTargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEBetaTargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) + return err +} + +// SetUrlMap is a method on GCEBetaTargetHttpProxies. +func (g *GCEBetaTargetHttpProxies) SetUrlMap(ctx context.Context, key *meta.Key, arg0 *beta.UrlMapReference) error { + klog.V(5).Infof("GCEBetaTargetHttpProxies.SetUrlMap(%v, %v, ...): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEBetaTargetHttpProxies.SetUrlMap(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "TargetHttpProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "SetUrlMap", + Version: meta.Version("beta"), + Service: "TargetHttpProxies", + } + klog.V(5).Infof("GCEBetaTargetHttpProxies.SetUrlMap(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEBetaTargetHttpProxies.SetUrlMap(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.Beta.TargetHttpProxies.SetUrlMap(projectID, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEBetaTargetHttpProxies.SetUrlMap(%v, %v, ...) = %+v", ctx, key, err) + return err + } + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEBetaTargetHttpProxies.SetUrlMap(%v, %v, ...) = %+v", ctx, key, err) + return err +} + +// TargetHttpProxies is an interface that allows for mocking of TargetHttpProxies. +type TargetHttpProxies interface { + Get(ctx context.Context, key *meta.Key) (*ga.TargetHttpProxy, error) + List(ctx context.Context, fl *filter.F) ([]*ga.TargetHttpProxy, error) + Insert(ctx context.Context, key *meta.Key, obj *ga.TargetHttpProxy) error + Delete(ctx context.Context, key *meta.Key) error + SetUrlMap(context.Context, *meta.Key, *ga.UrlMapReference) error +} + +// NewMockTargetHttpProxies returns a new mock for TargetHttpProxies. +func NewMockTargetHttpProxies(pr ProjectRouter, objs map[meta.Key]*MockTargetHttpProxiesObj) *MockTargetHttpProxies { + mock := &MockTargetHttpProxies{ + ProjectRouter: pr, + + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockTargetHttpProxies is the mock for TargetHttpProxies. +type MockTargetHttpProxies struct { + Lock sync.Mutex + + ProjectRouter ProjectRouter + + // Objects maintained by the mock. + Objects map[meta.Key]*MockTargetHttpProxiesObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(ctx context.Context, key *meta.Key, m *MockTargetHttpProxies) (bool, *ga.TargetHttpProxy, error) + ListHook func(ctx context.Context, fl *filter.F, m *MockTargetHttpProxies) (bool, []*ga.TargetHttpProxy, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *ga.TargetHttpProxy, m *MockTargetHttpProxies) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockTargetHttpProxies) (bool, error) + SetUrlMapHook func(context.Context, *meta.Key, *ga.UrlMapReference, *MockTargetHttpProxies) error + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockTargetHttpProxies) Get(ctx context.Context, key *meta.Key) (*ga.TargetHttpProxy, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(ctx, key, m); intercept { + klog.V(5).Infof("MockTargetHttpProxies.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + if !key.Valid() { + return nil, fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[*key]; ok { + klog.V(5).Infof("MockTargetHttpProxies.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[*key]; ok { + typedObj := obj.ToGA() + klog.V(5).Infof("MockTargetHttpProxies.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockTargetHttpProxies %v not found", key), + } + klog.V(5).Infof("MockTargetHttpProxies.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock. +func (m *MockTargetHttpProxies) List(ctx context.Context, fl *filter.F) ([]*ga.TargetHttpProxy, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { + klog.V(5).Infof("MockTargetHttpProxies.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + klog.V(5).Infof("MockTargetHttpProxies.List(%v, %v) = nil, %v", ctx, fl, err) + + return nil, *m.ListError + } + + var objs []*ga.TargetHttpProxy + for _, obj := range m.Objects { + if !fl.Match(obj.ToGA()) { + continue + } + objs = append(objs, obj.ToGA()) + } + + klog.V(5).Infof("MockTargetHttpProxies.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockTargetHttpProxies) Insert(ctx context.Context, key *meta.Key, obj *ga.TargetHttpProxy) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { + klog.V(5).Infof("MockTargetHttpProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[*key]; ok { + klog.V(5).Infof("MockTargetHttpProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[*key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockTargetHttpProxies %v exists", key), + } + klog.V(5).Infof("MockTargetHttpProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "ga", "targetHttpProxies") + obj.SelfLink = SelfLink(meta.VersionGA, projectID, "targetHttpProxies", key) + + m.Objects[*key] = &MockTargetHttpProxiesObj{obj} + klog.V(5).Infof("MockTargetHttpProxies.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockTargetHttpProxies) Delete(ctx context.Context, key *meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(ctx, key, m); intercept { + klog.V(5).Infof("MockTargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[*key]; ok { + klog.V(5).Infof("MockTargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[*key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockTargetHttpProxies %v not found", key), + } + klog.V(5).Infof("MockTargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, *key) + klog.V(5).Infof("MockTargetHttpProxies.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockTargetHttpProxies) Obj(o *ga.TargetHttpProxy) *MockTargetHttpProxiesObj { + return &MockTargetHttpProxiesObj{o} +} + +// SetUrlMap is a mock for the corresponding method. +func (m *MockTargetHttpProxies) SetUrlMap(ctx context.Context, key *meta.Key, arg0 *ga.UrlMapReference) error { + if m.SetUrlMapHook != nil { + return m.SetUrlMapHook(ctx, key, arg0, m) + } + return nil +} + +// GCETargetHttpProxies is a simplifying adapter for the GCE TargetHttpProxies. +type GCETargetHttpProxies struct { + s *Service +} + +// Get the TargetHttpProxy named by key. +func (g *GCETargetHttpProxies) Get(ctx context.Context, key *meta.Key) (*ga.TargetHttpProxy, error) { + klog.V(5).Infof("GCETargetHttpProxies.Get(%v, %v): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCETargetHttpProxies.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + return nil, fmt.Errorf("invalid GCE key (%#v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("ga"), + Service: "TargetHttpProxies", + } + klog.V(5).Infof("GCETargetHttpProxies.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCETargetHttpProxies.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + return nil, err + } + call := g.s.GA.TargetHttpProxies.Get(projectID, key.Name) + call.Context(ctx) + v, err := call.Do() + klog.V(4).Infof("GCETargetHttpProxies.Get(%v, %v) = %+v, %v", ctx, key, v, err) + return v, err +} + +// List all TargetHttpProxy objects. +func (g *GCETargetHttpProxies) List(ctx context.Context, fl *filter.F) ([]*ga.TargetHttpProxy, error) { + klog.V(5).Infof("GCETargetHttpProxies.List(%v, %v) called", ctx, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("ga"), + Service: "TargetHttpProxies", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + klog.V(5).Infof("GCETargetHttpProxies.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + call := g.s.GA.TargetHttpProxies.List(projectID) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*ga.TargetHttpProxy + f := func(l *ga.TargetHttpProxyList) error { + klog.V(5).Infof("GCETargetHttpProxies.List(%v, ..., %v): page %+v", ctx, fl, l) + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + klog.V(4).Infof("GCETargetHttpProxies.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + return nil, err + } + + if klog.V(4) { + klog.V(4).Infof("GCETargetHttpProxies.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { + var asStr []string + for _, o := range all { + asStr = append(asStr, fmt.Sprintf("%+v", o)) + } + klog.V(5).Infof("GCETargetHttpProxies.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + } + + return all, nil +} + +// Insert TargetHttpProxy with key of value obj. +func (g *GCETargetHttpProxies) Insert(ctx context.Context, key *meta.Key, obj *ga.TargetHttpProxy) error { + klog.V(5).Infof("GCETargetHttpProxies.Insert(%v, %v, %+v): called", ctx, key, obj) + if !key.Valid() { + klog.V(2).Infof("GCETargetHttpProxies.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("ga"), + Service: "TargetHttpProxies", + } + klog.V(5).Infof("GCETargetHttpProxies.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCETargetHttpProxies.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + obj.Name = key.Name + call := g.s.GA.TargetHttpProxies.Insert(projectID, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCETargetHttpProxies.Insert(%v, %v, ...) = %+v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCETargetHttpProxies.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + return err +} + +// Delete the TargetHttpProxy referenced by key. +func (g *GCETargetHttpProxies) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCETargetHttpProxies.Delete(%v, %v): called", ctx, key) + if !key.Valid() { + klog.V(2).Infof("GCETargetHttpProxies.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("ga"), + Service: "TargetHttpProxies", + } + klog.V(5).Infof("GCETargetHttpProxies.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCETargetHttpProxies.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.GA.TargetHttpProxies.Delete(projectID, key.Name) + + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCETargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCETargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) + return err +} + +// SetUrlMap is a method on GCETargetHttpProxies. +func (g *GCETargetHttpProxies) SetUrlMap(ctx context.Context, key *meta.Key, arg0 *ga.UrlMapReference) error { + klog.V(5).Infof("GCETargetHttpProxies.SetUrlMap(%v, %v, ...): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCETargetHttpProxies.SetUrlMap(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "SetUrlMap", + Version: meta.Version("ga"), + Service: "TargetHttpProxies", + } + klog.V(5).Infof("GCETargetHttpProxies.SetUrlMap(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCETargetHttpProxies.SetUrlMap(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.GA.TargetHttpProxies.SetUrlMap(projectID, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCETargetHttpProxies.SetUrlMap(%v, %v, ...) = %+v", ctx, key, err) + return err + } + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCETargetHttpProxies.SetUrlMap(%v, %v, ...) = %+v", ctx, key, err) + return err +} + +// AlphaRegionTargetHttpProxies is an interface that allows for mocking of RegionTargetHttpProxies. +type AlphaRegionTargetHttpProxies interface { + Get(ctx context.Context, key *meta.Key) (*alpha.TargetHttpProxy, error) + List(ctx context.Context, region string, fl *filter.F) ([]*alpha.TargetHttpProxy, error) + Insert(ctx context.Context, key *meta.Key, obj *alpha.TargetHttpProxy) error + Delete(ctx context.Context, key *meta.Key) error + SetUrlMap(context.Context, *meta.Key, *alpha.UrlMapReference) error +} + +// NewMockAlphaRegionTargetHttpProxies returns a new mock for RegionTargetHttpProxies. +func NewMockAlphaRegionTargetHttpProxies(pr ProjectRouter, objs map[meta.Key]*MockRegionTargetHttpProxiesObj) *MockAlphaRegionTargetHttpProxies { + mock := &MockAlphaRegionTargetHttpProxies{ + ProjectRouter: pr, + + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockAlphaRegionTargetHttpProxies is the mock for RegionTargetHttpProxies. +type MockAlphaRegionTargetHttpProxies struct { + Lock sync.Mutex + + ProjectRouter ProjectRouter + + // Objects maintained by the mock. + Objects map[meta.Key]*MockRegionTargetHttpProxiesObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(ctx context.Context, key *meta.Key, m *MockAlphaRegionTargetHttpProxies) (bool, *alpha.TargetHttpProxy, error) + ListHook func(ctx context.Context, region string, fl *filter.F, m *MockAlphaRegionTargetHttpProxies) (bool, []*alpha.TargetHttpProxy, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *alpha.TargetHttpProxy, m *MockAlphaRegionTargetHttpProxies) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockAlphaRegionTargetHttpProxies) (bool, error) + SetUrlMapHook func(context.Context, *meta.Key, *alpha.UrlMapReference, *MockAlphaRegionTargetHttpProxies) error + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockAlphaRegionTargetHttpProxies) Get(ctx context.Context, key *meta.Key) (*alpha.TargetHttpProxy, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(ctx, key, m); intercept { + klog.V(5).Infof("MockAlphaRegionTargetHttpProxies.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + if !key.Valid() { + return nil, fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[*key]; ok { + klog.V(5).Infof("MockAlphaRegionTargetHttpProxies.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[*key]; ok { + typedObj := obj.ToAlpha() + klog.V(5).Infof("MockAlphaRegionTargetHttpProxies.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAlphaRegionTargetHttpProxies %v not found", key), + } + klog.V(5).Infof("MockAlphaRegionTargetHttpProxies.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock in the given region. +func (m *MockAlphaRegionTargetHttpProxies) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.TargetHttpProxy, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(ctx, region, fl, m); intercept { + klog.V(5).Infof("MockAlphaRegionTargetHttpProxies.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + klog.V(5).Infof("MockAlphaRegionTargetHttpProxies.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + + return nil, *m.ListError + } + + var objs []*alpha.TargetHttpProxy + for key, obj := range m.Objects { + if key.Region != region { + continue + } + if !fl.Match(obj.ToAlpha()) { + continue + } + objs = append(objs, obj.ToAlpha()) + } + + klog.V(5).Infof("MockAlphaRegionTargetHttpProxies.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockAlphaRegionTargetHttpProxies) Insert(ctx context.Context, key *meta.Key, obj *alpha.TargetHttpProxy) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { + klog.V(5).Infof("MockAlphaRegionTargetHttpProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[*key]; ok { + klog.V(5).Infof("MockAlphaRegionTargetHttpProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[*key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockAlphaRegionTargetHttpProxies %v exists", key), + } + klog.V(5).Infof("MockAlphaRegionTargetHttpProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "alpha", "targetHttpProxies") + obj.SelfLink = SelfLink(meta.VersionAlpha, projectID, "targetHttpProxies", key) + + m.Objects[*key] = &MockRegionTargetHttpProxiesObj{obj} + klog.V(5).Infof("MockAlphaRegionTargetHttpProxies.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockAlphaRegionTargetHttpProxies) Delete(ctx context.Context, key *meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(ctx, key, m); intercept { + klog.V(5).Infof("MockAlphaRegionTargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[*key]; ok { + klog.V(5).Infof("MockAlphaRegionTargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[*key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAlphaRegionTargetHttpProxies %v not found", key), + } + klog.V(5).Infof("MockAlphaRegionTargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, *key) + klog.V(5).Infof("MockAlphaRegionTargetHttpProxies.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockAlphaRegionTargetHttpProxies) Obj(o *alpha.TargetHttpProxy) *MockRegionTargetHttpProxiesObj { + return &MockRegionTargetHttpProxiesObj{o} +} + +// SetUrlMap is a mock for the corresponding method. +func (m *MockAlphaRegionTargetHttpProxies) SetUrlMap(ctx context.Context, key *meta.Key, arg0 *alpha.UrlMapReference) error { + if m.SetUrlMapHook != nil { + return m.SetUrlMapHook(ctx, key, arg0, m) + } + return nil +} + +// GCEAlphaRegionTargetHttpProxies is a simplifying adapter for the GCE RegionTargetHttpProxies. +type GCEAlphaRegionTargetHttpProxies struct { + s *Service +} + +// Get the TargetHttpProxy named by key. +func (g *GCEAlphaRegionTargetHttpProxies) Get(ctx context.Context, key *meta.Key) (*alpha.TargetHttpProxy, error) { + klog.V(5).Infof("GCEAlphaRegionTargetHttpProxies.Get(%v, %v): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEAlphaRegionTargetHttpProxies.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + return nil, fmt.Errorf("invalid GCE key (%#v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionTargetHttpProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("alpha"), + Service: "RegionTargetHttpProxies", + } + klog.V(5).Infof("GCEAlphaRegionTargetHttpProxies.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEAlphaRegionTargetHttpProxies.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + return nil, err + } + call := g.s.Alpha.RegionTargetHttpProxies.Get(projectID, key.Region, key.Name) + call.Context(ctx) + v, err := call.Do() + klog.V(4).Infof("GCEAlphaRegionTargetHttpProxies.Get(%v, %v) = %+v, %v", ctx, key, v, err) + return v, err +} + +// List all TargetHttpProxy objects. +func (g *GCEAlphaRegionTargetHttpProxies) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.TargetHttpProxy, error) { + klog.V(5).Infof("GCEAlphaRegionTargetHttpProxies.List(%v, %v, %v) called", ctx, region, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionTargetHttpProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("alpha"), + Service: "RegionTargetHttpProxies", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + klog.V(5).Infof("GCEAlphaRegionTargetHttpProxies.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) + call := g.s.Alpha.RegionTargetHttpProxies.List(projectID, region) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*alpha.TargetHttpProxy + f := func(l *alpha.TargetHttpProxyList) error { + klog.V(5).Infof("GCEAlphaRegionTargetHttpProxies.List(%v, ..., %v): page %+v", ctx, fl, l) + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + klog.V(4).Infof("GCEAlphaRegionTargetHttpProxies.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + return nil, err + } + + if klog.V(4) { + klog.V(4).Infof("GCEAlphaRegionTargetHttpProxies.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { + var asStr []string + for _, o := range all { + asStr = append(asStr, fmt.Sprintf("%+v", o)) + } + klog.V(5).Infof("GCEAlphaRegionTargetHttpProxies.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + } + + return all, nil +} + +// Insert TargetHttpProxy with key of value obj. +func (g *GCEAlphaRegionTargetHttpProxies) Insert(ctx context.Context, key *meta.Key, obj *alpha.TargetHttpProxy) error { + klog.V(5).Infof("GCEAlphaRegionTargetHttpProxies.Insert(%v, %v, %+v): called", ctx, key, obj) + if !key.Valid() { + klog.V(2).Infof("GCEAlphaRegionTargetHttpProxies.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionTargetHttpProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("alpha"), + Service: "RegionTargetHttpProxies", + } + klog.V(5).Infof("GCEAlphaRegionTargetHttpProxies.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEAlphaRegionTargetHttpProxies.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + obj.Name = key.Name + call := g.s.Alpha.RegionTargetHttpProxies.Insert(projectID, key.Region, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEAlphaRegionTargetHttpProxies.Insert(%v, %v, ...) = %+v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEAlphaRegionTargetHttpProxies.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + return err +} + +// Delete the TargetHttpProxy referenced by key. +func (g *GCEAlphaRegionTargetHttpProxies) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCEAlphaRegionTargetHttpProxies.Delete(%v, %v): called", ctx, key) + if !key.Valid() { + klog.V(2).Infof("GCEAlphaRegionTargetHttpProxies.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionTargetHttpProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("alpha"), + Service: "RegionTargetHttpProxies", + } + klog.V(5).Infof("GCEAlphaRegionTargetHttpProxies.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEAlphaRegionTargetHttpProxies.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.Alpha.RegionTargetHttpProxies.Delete(projectID, key.Region, key.Name) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEAlphaRegionTargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEAlphaRegionTargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) + return err +} + +// SetUrlMap is a method on GCEAlphaRegionTargetHttpProxies. +func (g *GCEAlphaRegionTargetHttpProxies) SetUrlMap(ctx context.Context, key *meta.Key, arg0 *alpha.UrlMapReference) error { + klog.V(5).Infof("GCEAlphaRegionTargetHttpProxies.SetUrlMap(%v, %v, ...): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEAlphaRegionTargetHttpProxies.SetUrlMap(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionTargetHttpProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "SetUrlMap", + Version: meta.Version("alpha"), + Service: "RegionTargetHttpProxies", + } + klog.V(5).Infof("GCEAlphaRegionTargetHttpProxies.SetUrlMap(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEAlphaRegionTargetHttpProxies.SetUrlMap(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.Alpha.RegionTargetHttpProxies.SetUrlMap(projectID, key.Region, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEAlphaRegionTargetHttpProxies.SetUrlMap(%v, %v, ...) = %+v", ctx, key, err) + return err + } + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEAlphaRegionTargetHttpProxies.SetUrlMap(%v, %v, ...) = %+v", ctx, key, err) + return err +} + +// BetaRegionTargetHttpProxies is an interface that allows for mocking of RegionTargetHttpProxies. +type BetaRegionTargetHttpProxies interface { + Get(ctx context.Context, key *meta.Key) (*beta.TargetHttpProxy, error) + List(ctx context.Context, region string, fl *filter.F) ([]*beta.TargetHttpProxy, error) + Insert(ctx context.Context, key *meta.Key, obj *beta.TargetHttpProxy) error + Delete(ctx context.Context, key *meta.Key) error + SetUrlMap(context.Context, *meta.Key, *beta.UrlMapReference) error +} + +// NewMockBetaRegionTargetHttpProxies returns a new mock for RegionTargetHttpProxies. +func NewMockBetaRegionTargetHttpProxies(pr ProjectRouter, objs map[meta.Key]*MockRegionTargetHttpProxiesObj) *MockBetaRegionTargetHttpProxies { + mock := &MockBetaRegionTargetHttpProxies{ + ProjectRouter: pr, + + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockBetaRegionTargetHttpProxies is the mock for RegionTargetHttpProxies. +type MockBetaRegionTargetHttpProxies struct { + Lock sync.Mutex + + ProjectRouter ProjectRouter + + // Objects maintained by the mock. + Objects map[meta.Key]*MockRegionTargetHttpProxiesObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(ctx context.Context, key *meta.Key, m *MockBetaRegionTargetHttpProxies) (bool, *beta.TargetHttpProxy, error) + ListHook func(ctx context.Context, region string, fl *filter.F, m *MockBetaRegionTargetHttpProxies) (bool, []*beta.TargetHttpProxy, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *beta.TargetHttpProxy, m *MockBetaRegionTargetHttpProxies) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockBetaRegionTargetHttpProxies) (bool, error) + SetUrlMapHook func(context.Context, *meta.Key, *beta.UrlMapReference, *MockBetaRegionTargetHttpProxies) error + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockBetaRegionTargetHttpProxies) Get(ctx context.Context, key *meta.Key) (*beta.TargetHttpProxy, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(ctx, key, m); intercept { + klog.V(5).Infof("MockBetaRegionTargetHttpProxies.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + if !key.Valid() { + return nil, fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[*key]; ok { + klog.V(5).Infof("MockBetaRegionTargetHttpProxies.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[*key]; ok { + typedObj := obj.ToBeta() + klog.V(5).Infof("MockBetaRegionTargetHttpProxies.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockBetaRegionTargetHttpProxies %v not found", key), + } + klog.V(5).Infof("MockBetaRegionTargetHttpProxies.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock in the given region. +func (m *MockBetaRegionTargetHttpProxies) List(ctx context.Context, region string, fl *filter.F) ([]*beta.TargetHttpProxy, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(ctx, region, fl, m); intercept { + klog.V(5).Infof("MockBetaRegionTargetHttpProxies.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + klog.V(5).Infof("MockBetaRegionTargetHttpProxies.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + + return nil, *m.ListError + } + + var objs []*beta.TargetHttpProxy + for key, obj := range m.Objects { + if key.Region != region { + continue + } + if !fl.Match(obj.ToBeta()) { + continue + } + objs = append(objs, obj.ToBeta()) + } + + klog.V(5).Infof("MockBetaRegionTargetHttpProxies.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockBetaRegionTargetHttpProxies) Insert(ctx context.Context, key *meta.Key, obj *beta.TargetHttpProxy) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { + klog.V(5).Infof("MockBetaRegionTargetHttpProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[*key]; ok { + klog.V(5).Infof("MockBetaRegionTargetHttpProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[*key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockBetaRegionTargetHttpProxies %v exists", key), + } + klog.V(5).Infof("MockBetaRegionTargetHttpProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "beta", "targetHttpProxies") + obj.SelfLink = SelfLink(meta.VersionBeta, projectID, "targetHttpProxies", key) + + m.Objects[*key] = &MockRegionTargetHttpProxiesObj{obj} + klog.V(5).Infof("MockBetaRegionTargetHttpProxies.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockBetaRegionTargetHttpProxies) Delete(ctx context.Context, key *meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(ctx, key, m); intercept { + klog.V(5).Infof("MockBetaRegionTargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[*key]; ok { + klog.V(5).Infof("MockBetaRegionTargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[*key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockBetaRegionTargetHttpProxies %v not found", key), + } + klog.V(5).Infof("MockBetaRegionTargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, *key) + klog.V(5).Infof("MockBetaRegionTargetHttpProxies.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockBetaRegionTargetHttpProxies) Obj(o *beta.TargetHttpProxy) *MockRegionTargetHttpProxiesObj { + return &MockRegionTargetHttpProxiesObj{o} +} + +// SetUrlMap is a mock for the corresponding method. +func (m *MockBetaRegionTargetHttpProxies) SetUrlMap(ctx context.Context, key *meta.Key, arg0 *beta.UrlMapReference) error { + if m.SetUrlMapHook != nil { + return m.SetUrlMapHook(ctx, key, arg0, m) + } + return nil +} + +// GCEBetaRegionTargetHttpProxies is a simplifying adapter for the GCE RegionTargetHttpProxies. +type GCEBetaRegionTargetHttpProxies struct { + s *Service +} + +// Get the TargetHttpProxy named by key. +func (g *GCEBetaRegionTargetHttpProxies) Get(ctx context.Context, key *meta.Key) (*beta.TargetHttpProxy, error) { + klog.V(5).Infof("GCEBetaRegionTargetHttpProxies.Get(%v, %v): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEBetaRegionTargetHttpProxies.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + return nil, fmt.Errorf("invalid GCE key (%#v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "RegionTargetHttpProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("beta"), + Service: "RegionTargetHttpProxies", + } + klog.V(5).Infof("GCEBetaRegionTargetHttpProxies.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEBetaRegionTargetHttpProxies.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + return nil, err + } + call := g.s.Beta.RegionTargetHttpProxies.Get(projectID, key.Region, key.Name) + call.Context(ctx) + v, err := call.Do() + klog.V(4).Infof("GCEBetaRegionTargetHttpProxies.Get(%v, %v) = %+v, %v", ctx, key, v, err) + return v, err +} + +// List all TargetHttpProxy objects. +func (g *GCEBetaRegionTargetHttpProxies) List(ctx context.Context, region string, fl *filter.F) ([]*beta.TargetHttpProxy, error) { + klog.V(5).Infof("GCEBetaRegionTargetHttpProxies.List(%v, %v, %v) called", ctx, region, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "RegionTargetHttpProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("beta"), + Service: "RegionTargetHttpProxies", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + klog.V(5).Infof("GCEBetaRegionTargetHttpProxies.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) + call := g.s.Beta.RegionTargetHttpProxies.List(projectID, region) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*beta.TargetHttpProxy + f := func(l *beta.TargetHttpProxyList) error { + klog.V(5).Infof("GCEBetaRegionTargetHttpProxies.List(%v, ..., %v): page %+v", ctx, fl, l) + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + klog.V(4).Infof("GCEBetaRegionTargetHttpProxies.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + return nil, err + } + + if klog.V(4) { + klog.V(4).Infof("GCEBetaRegionTargetHttpProxies.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { + var asStr []string + for _, o := range all { + asStr = append(asStr, fmt.Sprintf("%+v", o)) + } + klog.V(5).Infof("GCEBetaRegionTargetHttpProxies.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + } + + return all, nil +} + +// Insert TargetHttpProxy with key of value obj. +func (g *GCEBetaRegionTargetHttpProxies) Insert(ctx context.Context, key *meta.Key, obj *beta.TargetHttpProxy) error { + klog.V(5).Infof("GCEBetaRegionTargetHttpProxies.Insert(%v, %v, %+v): called", ctx, key, obj) + if !key.Valid() { + klog.V(2).Infof("GCEBetaRegionTargetHttpProxies.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "RegionTargetHttpProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("beta"), + Service: "RegionTargetHttpProxies", + } + klog.V(5).Infof("GCEBetaRegionTargetHttpProxies.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEBetaRegionTargetHttpProxies.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + obj.Name = key.Name + call := g.s.Beta.RegionTargetHttpProxies.Insert(projectID, key.Region, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEBetaRegionTargetHttpProxies.Insert(%v, %v, ...) = %+v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEBetaRegionTargetHttpProxies.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + return err +} + +// Delete the TargetHttpProxy referenced by key. +func (g *GCEBetaRegionTargetHttpProxies) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCEBetaRegionTargetHttpProxies.Delete(%v, %v): called", ctx, key) + if !key.Valid() { + klog.V(2).Infof("GCEBetaRegionTargetHttpProxies.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "RegionTargetHttpProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("beta"), + Service: "RegionTargetHttpProxies", + } + klog.V(5).Infof("GCEBetaRegionTargetHttpProxies.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEBetaRegionTargetHttpProxies.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.Beta.RegionTargetHttpProxies.Delete(projectID, key.Region, key.Name) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEBetaRegionTargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEBetaRegionTargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) + return err +} + +// SetUrlMap is a method on GCEBetaRegionTargetHttpProxies. +func (g *GCEBetaRegionTargetHttpProxies) SetUrlMap(ctx context.Context, key *meta.Key, arg0 *beta.UrlMapReference) error { + klog.V(5).Infof("GCEBetaRegionTargetHttpProxies.SetUrlMap(%v, %v, ...): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEBetaRegionTargetHttpProxies.SetUrlMap(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "RegionTargetHttpProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "SetUrlMap", + Version: meta.Version("beta"), + Service: "RegionTargetHttpProxies", + } + klog.V(5).Infof("GCEBetaRegionTargetHttpProxies.SetUrlMap(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEBetaRegionTargetHttpProxies.SetUrlMap(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.Beta.RegionTargetHttpProxies.SetUrlMap(projectID, key.Region, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEBetaRegionTargetHttpProxies.SetUrlMap(%v, %v, ...) = %+v", ctx, key, err) + return err + } + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEBetaRegionTargetHttpProxies.SetUrlMap(%v, %v, ...) = %+v", ctx, key, err) + return err +} + +// TargetHttpsProxies is an interface that allows for mocking of TargetHttpsProxies. +type TargetHttpsProxies interface { + Get(ctx context.Context, key *meta.Key) (*ga.TargetHttpsProxy, error) + List(ctx context.Context, fl *filter.F) ([]*ga.TargetHttpsProxy, error) + Insert(ctx context.Context, key *meta.Key, obj *ga.TargetHttpsProxy) error + Delete(ctx context.Context, key *meta.Key) error + SetSslCertificates(context.Context, *meta.Key, *ga.TargetHttpsProxiesSetSslCertificatesRequest) error + SetUrlMap(context.Context, *meta.Key, *ga.UrlMapReference) error +} + +// NewMockTargetHttpsProxies returns a new mock for TargetHttpsProxies. +func NewMockTargetHttpsProxies(pr ProjectRouter, objs map[meta.Key]*MockTargetHttpsProxiesObj) *MockTargetHttpsProxies { + mock := &MockTargetHttpsProxies{ + ProjectRouter: pr, + + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockTargetHttpsProxies is the mock for TargetHttpsProxies. +type MockTargetHttpsProxies struct { + Lock sync.Mutex + + ProjectRouter ProjectRouter + + // Objects maintained by the mock. + Objects map[meta.Key]*MockTargetHttpsProxiesObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(ctx context.Context, key *meta.Key, m *MockTargetHttpsProxies) (bool, *ga.TargetHttpsProxy, error) + ListHook func(ctx context.Context, fl *filter.F, m *MockTargetHttpsProxies) (bool, []*ga.TargetHttpsProxy, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *ga.TargetHttpsProxy, m *MockTargetHttpsProxies) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockTargetHttpsProxies) (bool, error) + SetSslCertificatesHook func(context.Context, *meta.Key, *ga.TargetHttpsProxiesSetSslCertificatesRequest, *MockTargetHttpsProxies) error + SetUrlMapHook func(context.Context, *meta.Key, *ga.UrlMapReference, *MockTargetHttpsProxies) error + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockTargetHttpsProxies) Get(ctx context.Context, key *meta.Key) (*ga.TargetHttpsProxy, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(ctx, key, m); intercept { + klog.V(5).Infof("MockTargetHttpsProxies.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + if !key.Valid() { + return nil, fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[*key]; ok { + klog.V(5).Infof("MockTargetHttpsProxies.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[*key]; ok { + typedObj := obj.ToGA() + klog.V(5).Infof("MockTargetHttpsProxies.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockTargetHttpsProxies %v not found", key), + } + klog.V(5).Infof("MockTargetHttpsProxies.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock. +func (m *MockTargetHttpsProxies) List(ctx context.Context, fl *filter.F) ([]*ga.TargetHttpsProxy, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { + klog.V(5).Infof("MockTargetHttpsProxies.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + klog.V(5).Infof("MockTargetHttpsProxies.List(%v, %v) = nil, %v", ctx, fl, err) + + return nil, *m.ListError + } + + var objs []*ga.TargetHttpsProxy + for _, obj := range m.Objects { + if !fl.Match(obj.ToGA()) { + continue + } + objs = append(objs, obj.ToGA()) + } + + klog.V(5).Infof("MockTargetHttpsProxies.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockTargetHttpsProxies) Insert(ctx context.Context, key *meta.Key, obj *ga.TargetHttpsProxy) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { + klog.V(5).Infof("MockTargetHttpsProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[*key]; ok { + klog.V(5).Infof("MockTargetHttpsProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[*key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockTargetHttpsProxies %v exists", key), + } + klog.V(5).Infof("MockTargetHttpsProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "ga", "targetHttpsProxies") + obj.SelfLink = SelfLink(meta.VersionGA, projectID, "targetHttpsProxies", key) + + m.Objects[*key] = &MockTargetHttpsProxiesObj{obj} + klog.V(5).Infof("MockTargetHttpsProxies.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockTargetHttpsProxies) Delete(ctx context.Context, key *meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(ctx, key, m); intercept { + klog.V(5).Infof("MockTargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[*key]; ok { + klog.V(5).Infof("MockTargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[*key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockTargetHttpsProxies %v not found", key), + } + klog.V(5).Infof("MockTargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, *key) + klog.V(5).Infof("MockTargetHttpsProxies.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockTargetHttpsProxies) Obj(o *ga.TargetHttpsProxy) *MockTargetHttpsProxiesObj { + return &MockTargetHttpsProxiesObj{o} +} + +// SetSslCertificates is a mock for the corresponding method. +func (m *MockTargetHttpsProxies) SetSslCertificates(ctx context.Context, key *meta.Key, arg0 *ga.TargetHttpsProxiesSetSslCertificatesRequest) error { + if m.SetSslCertificatesHook != nil { + return m.SetSslCertificatesHook(ctx, key, arg0, m) + } + return nil +} + +// SetUrlMap is a mock for the corresponding method. +func (m *MockTargetHttpsProxies) SetUrlMap(ctx context.Context, key *meta.Key, arg0 *ga.UrlMapReference) error { + if m.SetUrlMapHook != nil { + return m.SetUrlMapHook(ctx, key, arg0, m) + } + return nil +} + +// GCETargetHttpsProxies is a simplifying adapter for the GCE TargetHttpsProxies. +type GCETargetHttpsProxies struct { + s *Service +} + +// Get the TargetHttpsProxy named by key. +func (g *GCETargetHttpsProxies) Get(ctx context.Context, key *meta.Key) (*ga.TargetHttpsProxy, error) { + klog.V(5).Infof("GCETargetHttpsProxies.Get(%v, %v): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCETargetHttpsProxies.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + return nil, fmt.Errorf("invalid GCE key (%#v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpsProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("ga"), + Service: "TargetHttpsProxies", + } + klog.V(5).Infof("GCETargetHttpsProxies.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCETargetHttpsProxies.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + return nil, err + } + call := g.s.GA.TargetHttpsProxies.Get(projectID, key.Name) + call.Context(ctx) + v, err := call.Do() + klog.V(4).Infof("GCETargetHttpsProxies.Get(%v, %v) = %+v, %v", ctx, key, v, err) + return v, err +} + +// List all TargetHttpsProxy objects. +func (g *GCETargetHttpsProxies) List(ctx context.Context, fl *filter.F) ([]*ga.TargetHttpsProxy, error) { + klog.V(5).Infof("GCETargetHttpsProxies.List(%v, %v) called", ctx, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpsProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("ga"), + Service: "TargetHttpsProxies", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + klog.V(5).Infof("GCETargetHttpsProxies.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + call := g.s.GA.TargetHttpsProxies.List(projectID) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*ga.TargetHttpsProxy + f := func(l *ga.TargetHttpsProxyList) error { + klog.V(5).Infof("GCETargetHttpsProxies.List(%v, ..., %v): page %+v", ctx, fl, l) + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + klog.V(4).Infof("GCETargetHttpsProxies.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + return nil, err + } + + if klog.V(4) { + klog.V(4).Infof("GCETargetHttpsProxies.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { + var asStr []string + for _, o := range all { + asStr = append(asStr, fmt.Sprintf("%+v", o)) + } + klog.V(5).Infof("GCETargetHttpsProxies.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + } + + return all, nil +} + +// Insert TargetHttpsProxy with key of value obj. +func (g *GCETargetHttpsProxies) Insert(ctx context.Context, key *meta.Key, obj *ga.TargetHttpsProxy) error { + klog.V(5).Infof("GCETargetHttpsProxies.Insert(%v, %v, %+v): called", ctx, key, obj) + if !key.Valid() { + klog.V(2).Infof("GCETargetHttpsProxies.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpsProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("ga"), + Service: "TargetHttpsProxies", + } + klog.V(5).Infof("GCETargetHttpsProxies.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCETargetHttpsProxies.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + obj.Name = key.Name + call := g.s.GA.TargetHttpsProxies.Insert(projectID, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCETargetHttpsProxies.Insert(%v, %v, ...) = %+v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCETargetHttpsProxies.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + return err +} + +// Delete the TargetHttpsProxy referenced by key. +func (g *GCETargetHttpsProxies) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCETargetHttpsProxies.Delete(%v, %v): called", ctx, key) + if !key.Valid() { + klog.V(2).Infof("GCETargetHttpsProxies.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpsProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("ga"), + Service: "TargetHttpsProxies", + } + klog.V(5).Infof("GCETargetHttpsProxies.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCETargetHttpsProxies.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.GA.TargetHttpsProxies.Delete(projectID, key.Name) + + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCETargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCETargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) + return err +} + +// SetSslCertificates is a method on GCETargetHttpsProxies. +func (g *GCETargetHttpsProxies) SetSslCertificates(ctx context.Context, key *meta.Key, arg0 *ga.TargetHttpsProxiesSetSslCertificatesRequest) error { + klog.V(5).Infof("GCETargetHttpsProxies.SetSslCertificates(%v, %v, ...): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCETargetHttpsProxies.SetSslCertificates(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpsProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "SetSslCertificates", + Version: meta.Version("ga"), + Service: "TargetHttpsProxies", + } + klog.V(5).Infof("GCETargetHttpsProxies.SetSslCertificates(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCETargetHttpsProxies.SetSslCertificates(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.GA.TargetHttpsProxies.SetSslCertificates(projectID, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCETargetHttpsProxies.SetSslCertificates(%v, %v, ...) = %+v", ctx, key, err) + return err + } + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCETargetHttpsProxies.SetSslCertificates(%v, %v, ...) = %+v", ctx, key, err) + return err +} + +// SetUrlMap is a method on GCETargetHttpsProxies. +func (g *GCETargetHttpsProxies) SetUrlMap(ctx context.Context, key *meta.Key, arg0 *ga.UrlMapReference) error { + klog.V(5).Infof("GCETargetHttpsProxies.SetUrlMap(%v, %v, ...): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCETargetHttpsProxies.SetUrlMap(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpsProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "SetUrlMap", + Version: meta.Version("ga"), + Service: "TargetHttpsProxies", + } + klog.V(5).Infof("GCETargetHttpsProxies.SetUrlMap(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCETargetHttpsProxies.SetUrlMap(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.GA.TargetHttpsProxies.SetUrlMap(projectID, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCETargetHttpsProxies.SetUrlMap(%v, %v, ...) = %+v", ctx, key, err) + return err + } + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCETargetHttpsProxies.SetUrlMap(%v, %v, ...) = %+v", ctx, key, err) + return err +} + +// AlphaTargetHttpsProxies is an interface that allows for mocking of TargetHttpsProxies. +type AlphaTargetHttpsProxies interface { + Get(ctx context.Context, key *meta.Key) (*alpha.TargetHttpsProxy, error) + List(ctx context.Context, fl *filter.F) ([]*alpha.TargetHttpsProxy, error) + Insert(ctx context.Context, key *meta.Key, obj *alpha.TargetHttpsProxy) error + Delete(ctx context.Context, key *meta.Key) error + SetSslCertificates(context.Context, *meta.Key, *alpha.TargetHttpsProxiesSetSslCertificatesRequest) error + SetUrlMap(context.Context, *meta.Key, *alpha.UrlMapReference) error +} + +// NewMockAlphaTargetHttpsProxies returns a new mock for TargetHttpsProxies. +func NewMockAlphaTargetHttpsProxies(pr ProjectRouter, objs map[meta.Key]*MockTargetHttpsProxiesObj) *MockAlphaTargetHttpsProxies { + mock := &MockAlphaTargetHttpsProxies{ + ProjectRouter: pr, + + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockAlphaTargetHttpsProxies is the mock for TargetHttpsProxies. +type MockAlphaTargetHttpsProxies struct { + Lock sync.Mutex + + ProjectRouter ProjectRouter + + // Objects maintained by the mock. + Objects map[meta.Key]*MockTargetHttpsProxiesObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(ctx context.Context, key *meta.Key, m *MockAlphaTargetHttpsProxies) (bool, *alpha.TargetHttpsProxy, error) + ListHook func(ctx context.Context, fl *filter.F, m *MockAlphaTargetHttpsProxies) (bool, []*alpha.TargetHttpsProxy, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *alpha.TargetHttpsProxy, m *MockAlphaTargetHttpsProxies) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockAlphaTargetHttpsProxies) (bool, error) + SetSslCertificatesHook func(context.Context, *meta.Key, *alpha.TargetHttpsProxiesSetSslCertificatesRequest, *MockAlphaTargetHttpsProxies) error + SetUrlMapHook func(context.Context, *meta.Key, *alpha.UrlMapReference, *MockAlphaTargetHttpsProxies) error + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockAlphaTargetHttpsProxies) Get(ctx context.Context, key *meta.Key) (*alpha.TargetHttpsProxy, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(ctx, key, m); intercept { + klog.V(5).Infof("MockAlphaTargetHttpsProxies.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + if !key.Valid() { + return nil, fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[*key]; ok { + klog.V(5).Infof("MockAlphaTargetHttpsProxies.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[*key]; ok { + typedObj := obj.ToAlpha() + klog.V(5).Infof("MockAlphaTargetHttpsProxies.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAlphaTargetHttpsProxies %v not found", key), + } + klog.V(5).Infof("MockAlphaTargetHttpsProxies.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock. +func (m *MockAlphaTargetHttpsProxies) List(ctx context.Context, fl *filter.F) ([]*alpha.TargetHttpsProxy, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { + klog.V(5).Infof("MockAlphaTargetHttpsProxies.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + klog.V(5).Infof("MockAlphaTargetHttpsProxies.List(%v, %v) = nil, %v", ctx, fl, err) + + return nil, *m.ListError + } + + var objs []*alpha.TargetHttpsProxy + for _, obj := range m.Objects { + if !fl.Match(obj.ToAlpha()) { + continue + } + objs = append(objs, obj.ToAlpha()) + } + + klog.V(5).Infof("MockAlphaTargetHttpsProxies.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockAlphaTargetHttpsProxies) Insert(ctx context.Context, key *meta.Key, obj *alpha.TargetHttpsProxy) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { + klog.V(5).Infof("MockAlphaTargetHttpsProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[*key]; ok { + klog.V(5).Infof("MockAlphaTargetHttpsProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[*key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockAlphaTargetHttpsProxies %v exists", key), + } + klog.V(5).Infof("MockAlphaTargetHttpsProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "alpha", "targetHttpsProxies") + obj.SelfLink = SelfLink(meta.VersionAlpha, projectID, "targetHttpsProxies", key) + + m.Objects[*key] = &MockTargetHttpsProxiesObj{obj} + klog.V(5).Infof("MockAlphaTargetHttpsProxies.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockAlphaTargetHttpsProxies) Delete(ctx context.Context, key *meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(ctx, key, m); intercept { + klog.V(5).Infof("MockAlphaTargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[*key]; ok { + klog.V(5).Infof("MockAlphaTargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[*key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAlphaTargetHttpsProxies %v not found", key), + } + klog.V(5).Infof("MockAlphaTargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, *key) + klog.V(5).Infof("MockAlphaTargetHttpsProxies.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockAlphaTargetHttpsProxies) Obj(o *alpha.TargetHttpsProxy) *MockTargetHttpsProxiesObj { + return &MockTargetHttpsProxiesObj{o} +} + +// SetSslCertificates is a mock for the corresponding method. +func (m *MockAlphaTargetHttpsProxies) SetSslCertificates(ctx context.Context, key *meta.Key, arg0 *alpha.TargetHttpsProxiesSetSslCertificatesRequest) error { + if m.SetSslCertificatesHook != nil { + return m.SetSslCertificatesHook(ctx, key, arg0, m) + } + return nil +} + +// SetUrlMap is a mock for the corresponding method. +func (m *MockAlphaTargetHttpsProxies) SetUrlMap(ctx context.Context, key *meta.Key, arg0 *alpha.UrlMapReference) error { + if m.SetUrlMapHook != nil { + return m.SetUrlMapHook(ctx, key, arg0, m) + } + return nil +} + +// GCEAlphaTargetHttpsProxies is a simplifying adapter for the GCE TargetHttpsProxies. +type GCEAlphaTargetHttpsProxies struct { + s *Service +} + +// Get the TargetHttpsProxy named by key. +func (g *GCEAlphaTargetHttpsProxies) Get(ctx context.Context, key *meta.Key) (*alpha.TargetHttpsProxy, error) { + klog.V(5).Infof("GCEAlphaTargetHttpsProxies.Get(%v, %v): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEAlphaTargetHttpsProxies.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + return nil, fmt.Errorf("invalid GCE key (%#v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "TargetHttpsProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Get", + Version: meta.Version("alpha"), + Service: "TargetHttpsProxies", + } + klog.V(5).Infof("GCEAlphaTargetHttpsProxies.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEAlphaTargetHttpsProxies.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + return nil, err + } + call := g.s.Alpha.TargetHttpsProxies.Get(projectID, key.Name) + call.Context(ctx) + v, err := call.Do() + klog.V(4).Infof("GCEAlphaTargetHttpsProxies.Get(%v, %v) = %+v, %v", ctx, key, v, err) + return v, err +} + +// List all TargetHttpsProxy objects. +func (g *GCEAlphaTargetHttpsProxies) List(ctx context.Context, fl *filter.F) ([]*alpha.TargetHttpsProxy, error) { + klog.V(5).Infof("GCEAlphaTargetHttpsProxies.List(%v, %v) called", ctx, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "TargetHttpsProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("alpha"), + Service: "TargetHttpsProxies", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + klog.V(5).Infof("GCEAlphaTargetHttpsProxies.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + call := g.s.Alpha.TargetHttpsProxies.List(projectID) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*alpha.TargetHttpsProxy + f := func(l *alpha.TargetHttpsProxyList) error { + klog.V(5).Infof("GCEAlphaTargetHttpsProxies.List(%v, ..., %v): page %+v", ctx, fl, l) + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + klog.V(4).Infof("GCEAlphaTargetHttpsProxies.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + return nil, err + } + + if klog.V(4) { + klog.V(4).Infof("GCEAlphaTargetHttpsProxies.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { + var asStr []string + for _, o := range all { + asStr = append(asStr, fmt.Sprintf("%+v", o)) + } + klog.V(5).Infof("GCEAlphaTargetHttpsProxies.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + } + + return all, nil +} + +// Insert TargetHttpsProxy with key of value obj. +func (g *GCEAlphaTargetHttpsProxies) Insert(ctx context.Context, key *meta.Key, obj *alpha.TargetHttpsProxy) error { + klog.V(5).Infof("GCEAlphaTargetHttpsProxies.Insert(%v, %v, %+v): called", ctx, key, obj) + if !key.Valid() { + klog.V(2).Infof("GCEAlphaTargetHttpsProxies.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "TargetHttpsProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("alpha"), + Service: "TargetHttpsProxies", + } + klog.V(5).Infof("GCEAlphaTargetHttpsProxies.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEAlphaTargetHttpsProxies.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + obj.Name = key.Name + call := g.s.Alpha.TargetHttpsProxies.Insert(projectID, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEAlphaTargetHttpsProxies.Insert(%v, %v, ...) = %+v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEAlphaTargetHttpsProxies.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + return err +} + +// Delete the TargetHttpsProxy referenced by key. +func (g *GCEAlphaTargetHttpsProxies) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCEAlphaTargetHttpsProxies.Delete(%v, %v): called", ctx, key) + if !key.Valid() { + klog.V(2).Infof("GCEAlphaTargetHttpsProxies.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "TargetHttpsProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("alpha"), + Service: "TargetHttpsProxies", + } + klog.V(5).Infof("GCEAlphaTargetHttpsProxies.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEAlphaTargetHttpsProxies.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.Alpha.TargetHttpsProxies.Delete(projectID, key.Name) + + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEAlphaTargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEAlphaTargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) + return err +} + +// SetSslCertificates is a method on GCEAlphaTargetHttpsProxies. +func (g *GCEAlphaTargetHttpsProxies) SetSslCertificates(ctx context.Context, key *meta.Key, arg0 *alpha.TargetHttpsProxiesSetSslCertificatesRequest) error { + klog.V(5).Infof("GCEAlphaTargetHttpsProxies.SetSslCertificates(%v, %v, ...): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEAlphaTargetHttpsProxies.SetSslCertificates(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "TargetHttpsProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "SetSslCertificates", + Version: meta.Version("alpha"), + Service: "TargetHttpsProxies", + } + klog.V(5).Infof("GCEAlphaTargetHttpsProxies.SetSslCertificates(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEAlphaTargetHttpsProxies.SetSslCertificates(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.Alpha.TargetHttpsProxies.SetSslCertificates(projectID, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEAlphaTargetHttpsProxies.SetSslCertificates(%v, %v, ...) = %+v", ctx, key, err) + return err + } + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEAlphaTargetHttpsProxies.SetSslCertificates(%v, %v, ...) = %+v", ctx, key, err) + return err +} + +// SetUrlMap is a method on GCEAlphaTargetHttpsProxies. +func (g *GCEAlphaTargetHttpsProxies) SetUrlMap(ctx context.Context, key *meta.Key, arg0 *alpha.UrlMapReference) error { + klog.V(5).Infof("GCEAlphaTargetHttpsProxies.SetUrlMap(%v, %v, ...): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEAlphaTargetHttpsProxies.SetUrlMap(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "TargetHttpsProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "SetUrlMap", + Version: meta.Version("alpha"), + Service: "TargetHttpsProxies", + } + klog.V(5).Infof("GCEAlphaTargetHttpsProxies.SetUrlMap(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEAlphaTargetHttpsProxies.SetUrlMap(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.Alpha.TargetHttpsProxies.SetUrlMap(projectID, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEAlphaTargetHttpsProxies.SetUrlMap(%v, %v, ...) = %+v", ctx, key, err) + return err + } + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEAlphaTargetHttpsProxies.SetUrlMap(%v, %v, ...) = %+v", ctx, key, err) + return err +} + +// BetaTargetHttpsProxies is an interface that allows for mocking of TargetHttpsProxies. +type BetaTargetHttpsProxies interface { + Get(ctx context.Context, key *meta.Key) (*beta.TargetHttpsProxy, error) + List(ctx context.Context, fl *filter.F) ([]*beta.TargetHttpsProxy, error) + Insert(ctx context.Context, key *meta.Key, obj *beta.TargetHttpsProxy) error + Delete(ctx context.Context, key *meta.Key) error + SetSslCertificates(context.Context, *meta.Key, *beta.TargetHttpsProxiesSetSslCertificatesRequest) error + SetUrlMap(context.Context, *meta.Key, *beta.UrlMapReference) error +} + +// NewMockBetaTargetHttpsProxies returns a new mock for TargetHttpsProxies. +func NewMockBetaTargetHttpsProxies(pr ProjectRouter, objs map[meta.Key]*MockTargetHttpsProxiesObj) *MockBetaTargetHttpsProxies { + mock := &MockBetaTargetHttpsProxies{ + ProjectRouter: pr, + + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, + } + return mock +} + +// MockBetaTargetHttpsProxies is the mock for TargetHttpsProxies. +type MockBetaTargetHttpsProxies struct { + Lock sync.Mutex + + ProjectRouter ProjectRouter + + // Objects maintained by the mock. + Objects map[meta.Key]*MockTargetHttpsProxiesObj + + // If an entry exists for the given key and operation, then the error + // will be returned instead of the operation. + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error + + // xxxHook allow you to intercept the standard processing of the mock in + // order to add your own logic. Return (true, _, _) to prevent the normal + // execution flow of the mock. Return (false, nil, nil) to continue with + // normal mock behavior/ after the hook function executes. + GetHook func(ctx context.Context, key *meta.Key, m *MockBetaTargetHttpsProxies) (bool, *beta.TargetHttpsProxy, error) + ListHook func(ctx context.Context, fl *filter.F, m *MockBetaTargetHttpsProxies) (bool, []*beta.TargetHttpsProxy, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *beta.TargetHttpsProxy, m *MockBetaTargetHttpsProxies) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockBetaTargetHttpsProxies) (bool, error) + SetSslCertificatesHook func(context.Context, *meta.Key, *beta.TargetHttpsProxiesSetSslCertificatesRequest, *MockBetaTargetHttpsProxies) error + SetUrlMapHook func(context.Context, *meta.Key, *beta.UrlMapReference, *MockBetaTargetHttpsProxies) error + + // X is extra state that can be used as part of the mock. Generated code + // will not use this field. + X interface{} +} + +// Get returns the object from the mock. +func (m *MockBetaTargetHttpsProxies) Get(ctx context.Context, key *meta.Key) (*beta.TargetHttpsProxy, error) { + if m.GetHook != nil { + if intercept, obj, err := m.GetHook(ctx, key, m); intercept { + klog.V(5).Infof("MockBetaTargetHttpsProxies.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + return obj, err + } + } + if !key.Valid() { + return nil, fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.GetError[*key]; ok { + klog.V(5).Infof("MockBetaTargetHttpsProxies.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err + } + if obj, ok := m.Objects[*key]; ok { + typedObj := obj.ToBeta() + klog.V(5).Infof("MockBetaTargetHttpsProxies.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + return typedObj, nil + } + + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockBetaTargetHttpsProxies %v not found", key), + } + klog.V(5).Infof("MockBetaTargetHttpsProxies.Get(%v, %s) = nil, %v", ctx, key, err) + return nil, err +} + +// List all of the objects in the mock. +func (m *MockBetaTargetHttpsProxies) List(ctx context.Context, fl *filter.F) ([]*beta.TargetHttpsProxy, error) { + if m.ListHook != nil { + if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { + klog.V(5).Infof("MockBetaTargetHttpsProxies.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + return objs, err + } + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if m.ListError != nil { + err := *m.ListError + klog.V(5).Infof("MockBetaTargetHttpsProxies.List(%v, %v) = nil, %v", ctx, fl, err) + + return nil, *m.ListError + } + + var objs []*beta.TargetHttpsProxy + for _, obj := range m.Objects { + if !fl.Match(obj.ToBeta()) { + continue + } + objs = append(objs, obj.ToBeta()) + } + + klog.V(5).Infof("MockBetaTargetHttpsProxies.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + return objs, nil +} + +// Insert is a mock for inserting/creating a new object. +func (m *MockBetaTargetHttpsProxies) Insert(ctx context.Context, key *meta.Key, obj *beta.TargetHttpsProxy) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { + klog.V(5).Infof("MockBetaTargetHttpsProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) } - obj.Name = key.Name - call := g.s.Beta.NetworkEndpointGroups.Insert(projectID, key.Zone, obj) - call.Context(ctx) - op, err := call.Do() - if err != nil { - klog.V(4).Infof("GCEBetaNetworkEndpointGroups.Insert(%v, %v, ...) = %+v", ctx, key, err) + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[*key]; ok { + klog.V(5).Infof("MockBetaTargetHttpsProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[*key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockBetaTargetHttpsProxies %v exists", key), + } + klog.V(5).Infof("MockBetaTargetHttpsProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } - err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEBetaNetworkEndpointGroups.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) - return err + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "beta", "targetHttpsProxies") + obj.SelfLink = SelfLink(meta.VersionBeta, projectID, "targetHttpsProxies", key) + + m.Objects[*key] = &MockTargetHttpsProxiesObj{obj} + klog.V(5).Infof("MockBetaTargetHttpsProxies.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil } -// Delete the NetworkEndpointGroup referenced by key. -func (g *GCEBetaNetworkEndpointGroups) Delete(ctx context.Context, key *meta.Key) error { - klog.V(5).Infof("GCEBetaNetworkEndpointGroups.Delete(%v, %v): called", ctx, key) +// Delete is a mock for deleting the object. +func (m *MockBetaTargetHttpsProxies) Delete(ctx context.Context, key *meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(ctx, key, m); intercept { + klog.V(5).Infof("MockBetaTargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } if !key.Valid() { - klog.V(2).Infof("GCEBetaNetworkEndpointGroups.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "NetworkEndpointGroups") - rk := &RateLimitKey{ - ProjectID: projectID, - Operation: "Delete", - Version: meta.Version("beta"), - Service: "NetworkEndpointGroups", + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[*key]; ok { + klog.V(5).Infof("MockBetaTargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) + return err } - klog.V(5).Infof("GCEBetaNetworkEndpointGroups.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) - if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEBetaNetworkEndpointGroups.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + if _, ok := m.Objects[*key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockBetaTargetHttpsProxies %v not found", key), + } + klog.V(5).Infof("MockBetaTargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) return err } - call := g.s.Beta.NetworkEndpointGroups.Delete(projectID, key.Zone, key.Name) - call.Context(ctx) - op, err := call.Do() - if err != nil { - klog.V(4).Infof("GCEBetaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) - return err + delete(m.Objects, *key) + klog.V(5).Infof("MockBetaTargetHttpsProxies.Delete(%v, %v) = nil", ctx, key) + return nil +} + +// Obj wraps the object for use in the mock. +func (m *MockBetaTargetHttpsProxies) Obj(o *beta.TargetHttpsProxy) *MockTargetHttpsProxiesObj { + return &MockTargetHttpsProxiesObj{o} +} + +// SetSslCertificates is a mock for the corresponding method. +func (m *MockBetaTargetHttpsProxies) SetSslCertificates(ctx context.Context, key *meta.Key, arg0 *beta.TargetHttpsProxiesSetSslCertificatesRequest) error { + if m.SetSslCertificatesHook != nil { + return m.SetSslCertificatesHook(ctx, key, arg0, m) } + return nil +} - err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEBetaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) - return err +// SetUrlMap is a mock for the corresponding method. +func (m *MockBetaTargetHttpsProxies) SetUrlMap(ctx context.Context, key *meta.Key, arg0 *beta.UrlMapReference) error { + if m.SetUrlMapHook != nil { + return m.SetUrlMapHook(ctx, key, arg0, m) + } + return nil } -// AggregatedList lists all resources of the given type across all locations. -func (g *GCEBetaNetworkEndpointGroups) AggregatedList(ctx context.Context, fl *filter.F) (map[string][]*beta.NetworkEndpointGroup, error) { - klog.V(5).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v) called", ctx, fl) +// GCEBetaTargetHttpsProxies is a simplifying adapter for the GCE TargetHttpsProxies. +type GCEBetaTargetHttpsProxies struct { + s *Service +} - projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "NetworkEndpointGroups") +// Get the TargetHttpsProxy named by key. +func (g *GCEBetaTargetHttpsProxies) Get(ctx context.Context, key *meta.Key) (*beta.TargetHttpsProxy, error) { + klog.V(5).Infof("GCEBetaTargetHttpsProxies.Get(%v, %v): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEBetaTargetHttpsProxies.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + return nil, fmt.Errorf("invalid GCE key (%#v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "TargetHttpsProxies") rk := &RateLimitKey{ ProjectID: projectID, - Operation: "AggregatedList", + Operation: "Get", Version: meta.Version("beta"), - Service: "NetworkEndpointGroups", + Service: "TargetHttpsProxies", } - - klog.V(5).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCEBetaTargetHttpsProxies.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(5).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v): RateLimiter error: %v", ctx, fl, err) + klog.V(4).Infof("GCEBetaTargetHttpsProxies.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } - - call := g.s.Beta.NetworkEndpointGroups.AggregatedList(projectID) + call := g.s.Beta.TargetHttpsProxies.Get(projectID, key.Name) call.Context(ctx) + v, err := call.Do() + klog.V(4).Infof("GCEBetaTargetHttpsProxies.Get(%v, %v) = %+v, %v", ctx, key, v, err) + return v, err +} + +// List all TargetHttpsProxy objects. +func (g *GCEBetaTargetHttpsProxies) List(ctx context.Context, fl *filter.F) ([]*beta.TargetHttpsProxy, error) { + klog.V(5).Infof("GCEBetaTargetHttpsProxies.List(%v, %v) called", ctx, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "TargetHttpsProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("beta"), + Service: "TargetHttpsProxies", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + klog.V(5).Infof("GCEBetaTargetHttpsProxies.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + call := g.s.Beta.TargetHttpsProxies.List(projectID) if fl != filter.None { call.Filter(fl.String()) } - - all := map[string][]*beta.NetworkEndpointGroup{} - f := func(l *beta.NetworkEndpointGroupAggregatedList) error { - for k, v := range l.Items { - klog.V(5).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v): page[%v]%+v", ctx, fl, k, v) - all[k] = append(all[k], v.NetworkEndpointGroups...) - } + var all []*beta.TargetHttpsProxy + f := func(l *beta.TargetHttpsProxyList) error { + klog.V(5).Infof("GCEBetaTargetHttpsProxies.List(%v, ..., %v): page %+v", ctx, fl, l) + all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - klog.V(4).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEBetaTargetHttpsProxies.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } + if klog.V(4) { - klog.V(4).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v) = [%v items], %v", ctx, fl, len(all), nil) + klog.V(4).Infof("GCEBetaTargetHttpsProxies.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - klog.V(5).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEBetaTargetHttpsProxies.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } + return all, nil } -// AttachNetworkEndpoints is a method on GCEBetaNetworkEndpointGroups. -func (g *GCEBetaNetworkEndpointGroups) AttachNetworkEndpoints(ctx context.Context, key *meta.Key, arg0 *beta.NetworkEndpointGroupsAttachEndpointsRequest) error { - klog.V(5).Infof("GCEBetaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): called", ctx, key) - +// Insert TargetHttpsProxy with key of value obj. +func (g *GCEBetaTargetHttpsProxies) Insert(ctx context.Context, key *meta.Key, obj *beta.TargetHttpsProxy) error { + klog.V(5).Infof("GCEBetaTargetHttpsProxies.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - klog.V(2).Infof("GCEBetaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaTargetHttpsProxies.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "NetworkEndpointGroups") + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "TargetHttpsProxies") rk := &RateLimitKey{ ProjectID: projectID, - Operation: "AttachNetworkEndpoints", + Operation: "Insert", Version: meta.Version("beta"), - Service: "NetworkEndpointGroups", + Service: "TargetHttpsProxies", } - klog.V(5).Infof("GCEBetaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) - + klog.V(5).Infof("GCEBetaTargetHttpsProxies.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEBetaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaTargetHttpsProxies.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.Beta.NetworkEndpointGroups.AttachNetworkEndpoints(projectID, key.Zone, key.Name, arg0) + obj.Name = key.Name + call := g.s.Beta.TargetHttpsProxies.Insert(projectID, obj) call.Context(ctx) + op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEBetaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaTargetHttpsProxies.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } + err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEBetaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaTargetHttpsProxies.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } -// DetachNetworkEndpoints is a method on GCEBetaNetworkEndpointGroups. -func (g *GCEBetaNetworkEndpointGroups) DetachNetworkEndpoints(ctx context.Context, key *meta.Key, arg0 *beta.NetworkEndpointGroupsDetachEndpointsRequest) error { - klog.V(5).Infof("GCEBetaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): called", ctx, key) - +// Delete the TargetHttpsProxy referenced by key. +func (g *GCEBetaTargetHttpsProxies) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCEBetaTargetHttpsProxies.Delete(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEBetaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaTargetHttpsProxies.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "NetworkEndpointGroups") + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "TargetHttpsProxies") rk := &RateLimitKey{ ProjectID: projectID, - Operation: "DetachNetworkEndpoints", + Operation: "Delete", Version: meta.Version("beta"), - Service: "NetworkEndpointGroups", + Service: "TargetHttpsProxies", } - klog.V(5).Infof("GCEBetaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) - + klog.V(5).Infof("GCEBetaTargetHttpsProxies.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEBetaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaTargetHttpsProxies.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.Beta.NetworkEndpointGroups.DetachNetworkEndpoints(projectID, key.Zone, key.Name, arg0) + call := g.s.Beta.TargetHttpsProxies.Delete(projectID, key.Name) + call.Context(ctx) + op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEBetaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaTargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) return err } + err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEBetaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaTargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) return err } -// ListNetworkEndpoints is a method on GCEBetaNetworkEndpointGroups. -func (g *GCEBetaNetworkEndpointGroups) ListNetworkEndpoints(ctx context.Context, key *meta.Key, arg0 *beta.NetworkEndpointGroupsListEndpointsRequest, fl *filter.F) ([]*beta.NetworkEndpointWithHealthStatus, error) { - klog.V(5).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): called", ctx, key) +// SetSslCertificates is a method on GCEBetaTargetHttpsProxies. +func (g *GCEBetaTargetHttpsProxies) SetSslCertificates(ctx context.Context, key *meta.Key, arg0 *beta.TargetHttpsProxiesSetSslCertificatesRequest) error { + klog.V(5).Infof("GCEBetaTargetHttpsProxies.SetSslCertificates(%v, %v, ...): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): key is invalid (%#v)", ctx, key, key) - return nil, fmt.Errorf("invalid GCE key (%+v)", key) + klog.V(2).Infof("GCEBetaTargetHttpsProxies.SetSslCertificates(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "NetworkEndpointGroups") + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "TargetHttpsProxies") rk := &RateLimitKey{ ProjectID: projectID, - Operation: "ListNetworkEndpoints", + Operation: "SetSslCertificates", Version: meta.Version("beta"), - Service: "NetworkEndpointGroups", + Service: "TargetHttpsProxies", } - klog.V(5).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaTargetHttpsProxies.SetSslCertificates(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): RateLimiter error: %v", ctx, key, err) - return nil, err - } - call := g.s.Beta.NetworkEndpointGroups.ListNetworkEndpoints(projectID, key.Zone, key.Name, arg0) - var all []*beta.NetworkEndpointWithHealthStatus - f := func(l *beta.NetworkEndpointGroupsListNetworkEndpoints) error { - klog.V(5).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): page %+v", ctx, key, l) - all = append(all, l.Items...) - return nil - } - if err := call.Pages(ctx, f); err != nil { - klog.V(4).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...) = %v, %v", ctx, key, nil, err) - return nil, err - } - if klog.V(4) { - klog.V(4).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...) = [%v items], %v", ctx, key, len(all), nil) - } else if klog.V(5) { - var asStr []string - for _, o := range all { - asStr = append(asStr, fmt.Sprintf("%+v", o)) - } - klog.V(5).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...) = %v, %v", ctx, key, asStr, nil) - } - return all, nil -} - -// Projects is an interface that allows for mocking of Projects. -type Projects interface { - // ProjectsOps is an interface with additional non-CRUD type methods. - // This interface is expected to be implemented by hand (non-autogenerated). - ProjectsOps -} - -// NewMockProjects returns a new mock for Projects. -func NewMockProjects(pr ProjectRouter, objs map[meta.Key]*MockProjectsObj) *MockProjects { - mock := &MockProjects{ - ProjectRouter: pr, - - Objects: objs, - } - return mock -} - -// MockProjects is the mock for Projects. -type MockProjects struct { - Lock sync.Mutex - - ProjectRouter ProjectRouter - - // Objects maintained by the mock. - Objects map[meta.Key]*MockProjectsObj - - // If an entry exists for the given key and operation, then the error - // will be returned instead of the operation. - - // xxxHook allow you to intercept the standard processing of the mock in - // order to add your own logic. Return (true, _, _) to prevent the normal - // execution flow of the mock. Return (false, nil, nil) to continue with - // normal mock behavior/ after the hook function executes. - - // X is extra state that can be used as part of the mock. Generated code - // will not use this field. - X interface{} + klog.V(4).Infof("GCEBetaTargetHttpsProxies.SetSslCertificates(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.Beta.TargetHttpsProxies.SetSslCertificates(projectID, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEBetaTargetHttpsProxies.SetSslCertificates(%v, %v, ...) = %+v", ctx, key, err) + return err + } + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEBetaTargetHttpsProxies.SetSslCertificates(%v, %v, ...) = %+v", ctx, key, err) + return err } -// Obj wraps the object for use in the mock. -func (m *MockProjects) Obj(o *ga.Project) *MockProjectsObj { - return &MockProjectsObj{o} -} +// SetUrlMap is a method on GCEBetaTargetHttpsProxies. +func (g *GCEBetaTargetHttpsProxies) SetUrlMap(ctx context.Context, key *meta.Key, arg0 *beta.UrlMapReference) error { + klog.V(5).Infof("GCEBetaTargetHttpsProxies.SetUrlMap(%v, %v, ...): called", ctx, key) -// GCEProjects is a simplifying adapter for the GCE Projects. -type GCEProjects struct { - s *Service + if !key.Valid() { + klog.V(2).Infof("GCEBetaTargetHttpsProxies.SetUrlMap(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "TargetHttpsProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "SetUrlMap", + Version: meta.Version("beta"), + Service: "TargetHttpsProxies", + } + klog.V(5).Infof("GCEBetaTargetHttpsProxies.SetUrlMap(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEBetaTargetHttpsProxies.SetUrlMap(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.Beta.TargetHttpsProxies.SetUrlMap(projectID, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEBetaTargetHttpsProxies.SetUrlMap(%v, %v, ...) = %+v", ctx, key, err) + return err + } + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEBetaTargetHttpsProxies.SetUrlMap(%v, %v, ...) = %+v", ctx, key, err) + return err } -// Regions is an interface that allows for mocking of Regions. -type Regions interface { - Get(ctx context.Context, key *meta.Key) (*ga.Region, error) - List(ctx context.Context, fl *filter.F) ([]*ga.Region, error) +// AlphaRegionTargetHttpsProxies is an interface that allows for mocking of RegionTargetHttpsProxies. +type AlphaRegionTargetHttpsProxies interface { + Get(ctx context.Context, key *meta.Key) (*alpha.TargetHttpsProxy, error) + List(ctx context.Context, region string, fl *filter.F) ([]*alpha.TargetHttpsProxy, error) + Insert(ctx context.Context, key *meta.Key, obj *alpha.TargetHttpsProxy) error + Delete(ctx context.Context, key *meta.Key) error + SetSslCertificates(context.Context, *meta.Key, *alpha.RegionTargetHttpsProxiesSetSslCertificatesRequest) error + SetUrlMap(context.Context, *meta.Key, *alpha.UrlMapReference) error } -// NewMockRegions returns a new mock for Regions. -func NewMockRegions(pr ProjectRouter, objs map[meta.Key]*MockRegionsObj) *MockRegions { - mock := &MockRegions{ +// NewMockAlphaRegionTargetHttpsProxies returns a new mock for RegionTargetHttpsProxies. +func NewMockAlphaRegionTargetHttpsProxies(pr ProjectRouter, objs map[meta.Key]*MockRegionTargetHttpsProxiesObj) *MockAlphaRegionTargetHttpsProxies { + mock := &MockAlphaRegionTargetHttpsProxies{ ProjectRouter: pr, - Objects: objs, - GetError: map[meta.Key]error{}, + Objects: objs, + GetError: map[meta.Key]error{}, + InsertError: map[meta.Key]error{}, + DeleteError: map[meta.Key]error{}, } return mock } -// MockRegions is the mock for Regions. -type MockRegions struct { +// MockAlphaRegionTargetHttpsProxies is the mock for RegionTargetHttpsProxies. +type MockAlphaRegionTargetHttpsProxies struct { Lock sync.Mutex ProjectRouter ProjectRouter // Objects maintained by the mock. - Objects map[meta.Key]*MockRegionsObj + Objects map[meta.Key]*MockRegionTargetHttpsProxiesObj // If an entry exists for the given key and operation, then the error // will be returned instead of the operation. - GetError map[meta.Key]error - ListError *error + GetError map[meta.Key]error + ListError *error + InsertError map[meta.Key]error + DeleteError map[meta.Key]error // xxxHook allow you to intercept the standard processing of the mock in // order to add your own logic. Return (true, _, _) to prevent the normal // execution flow of the mock. Return (false, nil, nil) to continue with // normal mock behavior/ after the hook function executes. - GetHook func(ctx context.Context, key *meta.Key, m *MockRegions) (bool, *ga.Region, error) - ListHook func(ctx context.Context, fl *filter.F, m *MockRegions) (bool, []*ga.Region, error) + GetHook func(ctx context.Context, key *meta.Key, m *MockAlphaRegionTargetHttpsProxies) (bool, *alpha.TargetHttpsProxy, error) + ListHook func(ctx context.Context, region string, fl *filter.F, m *MockAlphaRegionTargetHttpsProxies) (bool, []*alpha.TargetHttpsProxy, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *alpha.TargetHttpsProxy, m *MockAlphaRegionTargetHttpsProxies) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockAlphaRegionTargetHttpsProxies) (bool, error) + SetSslCertificatesHook func(context.Context, *meta.Key, *alpha.RegionTargetHttpsProxiesSetSslCertificatesRequest, *MockAlphaRegionTargetHttpsProxies) error + SetUrlMapHook func(context.Context, *meta.Key, *alpha.UrlMapReference, *MockAlphaRegionTargetHttpsProxies) error // X is extra state that can be used as part of the mock. Generated code // will not use this field. @@ -11856,10 +24151,10 @@ type MockRegions struct { } // Get returns the object from the mock. -func (m *MockRegions) Get(ctx context.Context, key *meta.Key) (*ga.Region, error) { +func (m *MockAlphaRegionTargetHttpsProxies) Get(ctx context.Context, key *meta.Key) (*alpha.TargetHttpsProxy, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - klog.V(5).Infof("MockRegions.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaRegionTargetHttpsProxies.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -11871,28 +24166,28 @@ func (m *MockRegions) Get(ctx context.Context, key *meta.Key) (*ga.Region, error defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - klog.V(5).Infof("MockRegions.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaRegionTargetHttpsProxies.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { - typedObj := obj.ToGA() - klog.V(5).Infof("MockRegions.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + typedObj := obj.ToAlpha() + klog.V(5).Infof("MockAlphaRegionTargetHttpsProxies.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockRegions %v not found", key), + Message: fmt.Sprintf("MockAlphaRegionTargetHttpsProxies %v not found", key), } - klog.V(5).Infof("MockRegions.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaRegionTargetHttpsProxies.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } -// List all of the objects in the mock. -func (m *MockRegions) List(ctx context.Context, fl *filter.F) ([]*ga.Region, error) { +// List all of the objects in the mock in the given region. +func (m *MockAlphaRegionTargetHttpsProxies) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.TargetHttpsProxy, error) { if m.ListHook != nil { - if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - klog.V(5).Infof("MockRegions.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + if intercept, objs, err := m.ListHook(ctx, region, fl, m); intercept { + klog.V(5).Infof("MockAlphaRegionTargetHttpsProxies.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) return objs, err } } @@ -11902,113 +24197,337 @@ func (m *MockRegions) List(ctx context.Context, fl *filter.F) ([]*ga.Region, err if m.ListError != nil { err := *m.ListError - klog.V(5).Infof("MockRegions.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockAlphaRegionTargetHttpsProxies.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) return nil, *m.ListError } - var objs []*ga.Region - for _, obj := range m.Objects { - if !fl.Match(obj.ToGA()) { + var objs []*alpha.TargetHttpsProxy + for key, obj := range m.Objects { + if key.Region != region { continue } - objs = append(objs, obj.ToGA()) + if !fl.Match(obj.ToAlpha()) { + continue + } + objs = append(objs, obj.ToAlpha()) } - klog.V(5).Infof("MockRegions.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockAlphaRegionTargetHttpsProxies.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) return objs, nil } +// Insert is a mock for inserting/creating a new object. +func (m *MockAlphaRegionTargetHttpsProxies) Insert(ctx context.Context, key *meta.Key, obj *alpha.TargetHttpsProxy) error { + if m.InsertHook != nil { + if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { + klog.V(5).Infof("MockAlphaRegionTargetHttpsProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.InsertError[*key]; ok { + klog.V(5).Infof("MockAlphaRegionTargetHttpsProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + if _, ok := m.Objects[*key]; ok { + err := &googleapi.Error{ + Code: http.StatusConflict, + Message: fmt.Sprintf("MockAlphaRegionTargetHttpsProxies %v exists", key), + } + klog.V(5).Infof("MockAlphaRegionTargetHttpsProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + return err + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "alpha", "targetHttpsProxies") + obj.SelfLink = SelfLink(meta.VersionAlpha, projectID, "targetHttpsProxies", key) + + m.Objects[*key] = &MockRegionTargetHttpsProxiesObj{obj} + klog.V(5).Infof("MockAlphaRegionTargetHttpsProxies.Insert(%v, %v, %+v) = nil", ctx, key, obj) + return nil +} + +// Delete is a mock for deleting the object. +func (m *MockAlphaRegionTargetHttpsProxies) Delete(ctx context.Context, key *meta.Key) error { + if m.DeleteHook != nil { + if intercept, err := m.DeleteHook(ctx, key, m); intercept { + klog.V(5).Infof("MockAlphaRegionTargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) + return err + } + } + if !key.Valid() { + return fmt.Errorf("invalid GCE key (%+v)", key) + } + + m.Lock.Lock() + defer m.Lock.Unlock() + + if err, ok := m.DeleteError[*key]; ok { + klog.V(5).Infof("MockAlphaRegionTargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) + return err + } + if _, ok := m.Objects[*key]; !ok { + err := &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("MockAlphaRegionTargetHttpsProxies %v not found", key), + } + klog.V(5).Infof("MockAlphaRegionTargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + delete(m.Objects, *key) + klog.V(5).Infof("MockAlphaRegionTargetHttpsProxies.Delete(%v, %v) = nil", ctx, key) + return nil +} + // Obj wraps the object for use in the mock. -func (m *MockRegions) Obj(o *ga.Region) *MockRegionsObj { - return &MockRegionsObj{o} +func (m *MockAlphaRegionTargetHttpsProxies) Obj(o *alpha.TargetHttpsProxy) *MockRegionTargetHttpsProxiesObj { + return &MockRegionTargetHttpsProxiesObj{o} } -// GCERegions is a simplifying adapter for the GCE Regions. -type GCERegions struct { +// SetSslCertificates is a mock for the corresponding method. +func (m *MockAlphaRegionTargetHttpsProxies) SetSslCertificates(ctx context.Context, key *meta.Key, arg0 *alpha.RegionTargetHttpsProxiesSetSslCertificatesRequest) error { + if m.SetSslCertificatesHook != nil { + return m.SetSslCertificatesHook(ctx, key, arg0, m) + } + return nil +} + +// SetUrlMap is a mock for the corresponding method. +func (m *MockAlphaRegionTargetHttpsProxies) SetUrlMap(ctx context.Context, key *meta.Key, arg0 *alpha.UrlMapReference) error { + if m.SetUrlMapHook != nil { + return m.SetUrlMapHook(ctx, key, arg0, m) + } + return nil +} + +// GCEAlphaRegionTargetHttpsProxies is a simplifying adapter for the GCE RegionTargetHttpsProxies. +type GCEAlphaRegionTargetHttpsProxies struct { s *Service } -// Get the Region named by key. -func (g *GCERegions) Get(ctx context.Context, key *meta.Key) (*ga.Region, error) { - klog.V(5).Infof("GCERegions.Get(%v, %v): called", ctx, key) +// Get the TargetHttpsProxy named by key. +func (g *GCEAlphaRegionTargetHttpsProxies) Get(ctx context.Context, key *meta.Key) (*alpha.TargetHttpsProxy, error) { + klog.V(5).Infof("GCEAlphaRegionTargetHttpsProxies.Get(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCERegions.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaRegionTargetHttpsProxies.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Regions") + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionTargetHttpsProxies") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Get", - Version: meta.Version("ga"), - Service: "Regions", + Version: meta.Version("alpha"), + Service: "RegionTargetHttpsProxies", } - klog.V(5).Infof("GCERegions.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaRegionTargetHttpsProxies.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCERegions.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionTargetHttpsProxies.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } - call := g.s.GA.Regions.Get(projectID, key.Name) + call := g.s.Alpha.RegionTargetHttpsProxies.Get(projectID, key.Region, key.Name) call.Context(ctx) v, err := call.Do() - klog.V(4).Infof("GCERegions.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEAlphaRegionTargetHttpsProxies.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } -// List all Region objects. -func (g *GCERegions) List(ctx context.Context, fl *filter.F) ([]*ga.Region, error) { - klog.V(5).Infof("GCERegions.List(%v, %v) called", ctx, fl) - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Regions") +// List all TargetHttpsProxy objects. +func (g *GCEAlphaRegionTargetHttpsProxies) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.TargetHttpsProxy, error) { + klog.V(5).Infof("GCEAlphaRegionTargetHttpsProxies.List(%v, %v, %v) called", ctx, region, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionTargetHttpsProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "List", + Version: meta.Version("alpha"), + Service: "RegionTargetHttpsProxies", + } + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + return nil, err + } + klog.V(5).Infof("GCEAlphaRegionTargetHttpsProxies.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) + call := g.s.Alpha.RegionTargetHttpsProxies.List(projectID, region) + if fl != filter.None { + call.Filter(fl.String()) + } + var all []*alpha.TargetHttpsProxy + f := func(l *alpha.TargetHttpsProxyList) error { + klog.V(5).Infof("GCEAlphaRegionTargetHttpsProxies.List(%v, ..., %v): page %+v", ctx, fl, l) + all = append(all, l.Items...) + return nil + } + if err := call.Pages(ctx, f); err != nil { + klog.V(4).Infof("GCEAlphaRegionTargetHttpsProxies.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + return nil, err + } + + if klog.V(4) { + klog.V(4).Infof("GCEAlphaRegionTargetHttpsProxies.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { + var asStr []string + for _, o := range all { + asStr = append(asStr, fmt.Sprintf("%+v", o)) + } + klog.V(5).Infof("GCEAlphaRegionTargetHttpsProxies.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + } + + return all, nil +} + +// Insert TargetHttpsProxy with key of value obj. +func (g *GCEAlphaRegionTargetHttpsProxies) Insert(ctx context.Context, key *meta.Key, obj *alpha.TargetHttpsProxy) error { + klog.V(5).Infof("GCEAlphaRegionTargetHttpsProxies.Insert(%v, %v, %+v): called", ctx, key, obj) + if !key.Valid() { + klog.V(2).Infof("GCEAlphaRegionTargetHttpsProxies.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionTargetHttpsProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("alpha"), + Service: "RegionTargetHttpsProxies", + } + klog.V(5).Infof("GCEAlphaRegionTargetHttpsProxies.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEAlphaRegionTargetHttpsProxies.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + obj.Name = key.Name + call := g.s.Alpha.RegionTargetHttpsProxies.Insert(projectID, key.Region, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEAlphaRegionTargetHttpsProxies.Insert(%v, %v, ...) = %+v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEAlphaRegionTargetHttpsProxies.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + return err +} + +// Delete the TargetHttpsProxy referenced by key. +func (g *GCEAlphaRegionTargetHttpsProxies) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCEAlphaRegionTargetHttpsProxies.Delete(%v, %v): called", ctx, key) + if !key.Valid() { + klog.V(2).Infof("GCEAlphaRegionTargetHttpsProxies.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionTargetHttpsProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Delete", + Version: meta.Version("alpha"), + Service: "RegionTargetHttpsProxies", + } + klog.V(5).Infof("GCEAlphaRegionTargetHttpsProxies.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEAlphaRegionTargetHttpsProxies.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.Alpha.RegionTargetHttpsProxies.Delete(projectID, key.Region, key.Name) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEAlphaRegionTargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEAlphaRegionTargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) + return err +} + +// SetSslCertificates is a method on GCEAlphaRegionTargetHttpsProxies. +func (g *GCEAlphaRegionTargetHttpsProxies) SetSslCertificates(ctx context.Context, key *meta.Key, arg0 *alpha.RegionTargetHttpsProxiesSetSslCertificatesRequest) error { + klog.V(5).Infof("GCEAlphaRegionTargetHttpsProxies.SetSslCertificates(%v, %v, ...): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEAlphaRegionTargetHttpsProxies.SetSslCertificates(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionTargetHttpsProxies") rk := &RateLimitKey{ ProjectID: projectID, - Operation: "List", - Version: meta.Version("ga"), - Service: "Regions", + Operation: "SetSslCertificates", + Version: meta.Version("alpha"), + Service: "RegionTargetHttpsProxies", } + klog.V(5).Infof("GCEAlphaRegionTargetHttpsProxies.SetSslCertificates(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - return nil, err + klog.V(4).Infof("GCEAlphaRegionTargetHttpsProxies.SetSslCertificates(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err } - klog.V(5).Infof("GCERegions.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) - call := g.s.GA.Regions.List(projectID) - if fl != filter.None { - call.Filter(fl.String()) + call := g.s.Alpha.RegionTargetHttpsProxies.SetSslCertificates(projectID, key.Region, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEAlphaRegionTargetHttpsProxies.SetSslCertificates(%v, %v, ...) = %+v", ctx, key, err) + return err } - var all []*ga.Region - f := func(l *ga.RegionList) error { - klog.V(5).Infof("GCERegions.List(%v, ..., %v): page %+v", ctx, fl, l) - all = append(all, l.Items...) - return nil + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEAlphaRegionTargetHttpsProxies.SetSslCertificates(%v, %v, ...) = %+v", ctx, key, err) + return err +} + +// SetUrlMap is a method on GCEAlphaRegionTargetHttpsProxies. +func (g *GCEAlphaRegionTargetHttpsProxies) SetUrlMap(ctx context.Context, key *meta.Key, arg0 *alpha.UrlMapReference) error { + klog.V(5).Infof("GCEAlphaRegionTargetHttpsProxies.SetUrlMap(%v, %v, ...): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEAlphaRegionTargetHttpsProxies.SetUrlMap(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) } - if err := call.Pages(ctx, f); err != nil { - klog.V(4).Infof("GCERegions.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) - return nil, err + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionTargetHttpsProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "SetUrlMap", + Version: meta.Version("alpha"), + Service: "RegionTargetHttpsProxies", } + klog.V(5).Infof("GCEAlphaRegionTargetHttpsProxies.SetUrlMap(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) - if klog.V(4) { - klog.V(4).Infof("GCERegions.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if klog.V(5) { - var asStr []string - for _, o := range all { - asStr = append(asStr, fmt.Sprintf("%+v", o)) - } - klog.V(5).Infof("GCERegions.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEAlphaRegionTargetHttpsProxies.SetUrlMap(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err } - - return all, nil + call := g.s.Alpha.RegionTargetHttpsProxies.SetUrlMap(projectID, key.Region, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEAlphaRegionTargetHttpsProxies.SetUrlMap(%v, %v, ...) = %+v", ctx, key, err) + return err + } + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEAlphaRegionTargetHttpsProxies.SetUrlMap(%v, %v, ...) = %+v", ctx, key, err) + return err } -// Routes is an interface that allows for mocking of Routes. -type Routes interface { - Get(ctx context.Context, key *meta.Key) (*ga.Route, error) - List(ctx context.Context, fl *filter.F) ([]*ga.Route, error) - Insert(ctx context.Context, key *meta.Key, obj *ga.Route) error +// BetaRegionTargetHttpsProxies is an interface that allows for mocking of RegionTargetHttpsProxies. +type BetaRegionTargetHttpsProxies interface { + Get(ctx context.Context, key *meta.Key) (*beta.TargetHttpsProxy, error) + List(ctx context.Context, region string, fl *filter.F) ([]*beta.TargetHttpsProxy, error) + Insert(ctx context.Context, key *meta.Key, obj *beta.TargetHttpsProxy) error Delete(ctx context.Context, key *meta.Key) error + SetSslCertificates(context.Context, *meta.Key, *beta.RegionTargetHttpsProxiesSetSslCertificatesRequest) error + SetUrlMap(context.Context, *meta.Key, *beta.UrlMapReference) error } -// NewMockRoutes returns a new mock for Routes. -func NewMockRoutes(pr ProjectRouter, objs map[meta.Key]*MockRoutesObj) *MockRoutes { - mock := &MockRoutes{ +// NewMockBetaRegionTargetHttpsProxies returns a new mock for RegionTargetHttpsProxies. +func NewMockBetaRegionTargetHttpsProxies(pr ProjectRouter, objs map[meta.Key]*MockRegionTargetHttpsProxiesObj) *MockBetaRegionTargetHttpsProxies { + mock := &MockBetaRegionTargetHttpsProxies{ ProjectRouter: pr, Objects: objs, @@ -12019,14 +24538,14 @@ func NewMockRoutes(pr ProjectRouter, objs map[meta.Key]*MockRoutesObj) *MockRout return mock } -// MockRoutes is the mock for Routes. -type MockRoutes struct { +// MockBetaRegionTargetHttpsProxies is the mock for RegionTargetHttpsProxies. +type MockBetaRegionTargetHttpsProxies struct { Lock sync.Mutex ProjectRouter ProjectRouter // Objects maintained by the mock. - Objects map[meta.Key]*MockRoutesObj + Objects map[meta.Key]*MockRegionTargetHttpsProxiesObj // If an entry exists for the given key and operation, then the error // will be returned instead of the operation. @@ -12039,10 +24558,12 @@ type MockRoutes struct { // order to add your own logic. Return (true, _, _) to prevent the normal // execution flow of the mock. Return (false, nil, nil) to continue with // normal mock behavior/ after the hook function executes. - GetHook func(ctx context.Context, key *meta.Key, m *MockRoutes) (bool, *ga.Route, error) - ListHook func(ctx context.Context, fl *filter.F, m *MockRoutes) (bool, []*ga.Route, error) - InsertHook func(ctx context.Context, key *meta.Key, obj *ga.Route, m *MockRoutes) (bool, error) - DeleteHook func(ctx context.Context, key *meta.Key, m *MockRoutes) (bool, error) + GetHook func(ctx context.Context, key *meta.Key, m *MockBetaRegionTargetHttpsProxies) (bool, *beta.TargetHttpsProxy, error) + ListHook func(ctx context.Context, region string, fl *filter.F, m *MockBetaRegionTargetHttpsProxies) (bool, []*beta.TargetHttpsProxy, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *beta.TargetHttpsProxy, m *MockBetaRegionTargetHttpsProxies) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockBetaRegionTargetHttpsProxies) (bool, error) + SetSslCertificatesHook func(context.Context, *meta.Key, *beta.RegionTargetHttpsProxiesSetSslCertificatesRequest, *MockBetaRegionTargetHttpsProxies) error + SetUrlMapHook func(context.Context, *meta.Key, *beta.UrlMapReference, *MockBetaRegionTargetHttpsProxies) error // X is extra state that can be used as part of the mock. Generated code // will not use this field. @@ -12050,10 +24571,10 @@ type MockRoutes struct { } // Get returns the object from the mock. -func (m *MockRoutes) Get(ctx context.Context, key *meta.Key) (*ga.Route, error) { +func (m *MockBetaRegionTargetHttpsProxies) Get(ctx context.Context, key *meta.Key) (*beta.TargetHttpsProxy, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - klog.V(5).Infof("MockRoutes.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaRegionTargetHttpsProxies.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -12065,28 +24586,28 @@ func (m *MockRoutes) Get(ctx context.Context, key *meta.Key) (*ga.Route, error) defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - klog.V(5).Infof("MockRoutes.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBetaRegionTargetHttpsProxies.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { - typedObj := obj.ToGA() - klog.V(5).Infof("MockRoutes.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + typedObj := obj.ToBeta() + klog.V(5).Infof("MockBetaRegionTargetHttpsProxies.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockRoutes %v not found", key), + Message: fmt.Sprintf("MockBetaRegionTargetHttpsProxies %v not found", key), } - klog.V(5).Infof("MockRoutes.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBetaRegionTargetHttpsProxies.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } -// List all of the objects in the mock. -func (m *MockRoutes) List(ctx context.Context, fl *filter.F) ([]*ga.Route, error) { +// List all of the objects in the mock in the given region. +func (m *MockBetaRegionTargetHttpsProxies) List(ctx context.Context, region string, fl *filter.F) ([]*beta.TargetHttpsProxy, error) { if m.ListHook != nil { - if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - klog.V(5).Infof("MockRoutes.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + if intercept, objs, err := m.ListHook(ctx, region, fl, m); intercept { + klog.V(5).Infof("MockBetaRegionTargetHttpsProxies.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) return objs, err } } @@ -12096,28 +24617,31 @@ func (m *MockRoutes) List(ctx context.Context, fl *filter.F) ([]*ga.Route, error if m.ListError != nil { err := *m.ListError - klog.V(5).Infof("MockRoutes.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockBetaRegionTargetHttpsProxies.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) return nil, *m.ListError } - var objs []*ga.Route - for _, obj := range m.Objects { - if !fl.Match(obj.ToGA()) { + var objs []*beta.TargetHttpsProxy + for key, obj := range m.Objects { + if key.Region != region { continue } - objs = append(objs, obj.ToGA()) + if !fl.Match(obj.ToBeta()) { + continue + } + objs = append(objs, obj.ToBeta()) } - klog.V(5).Infof("MockRoutes.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockBetaRegionTargetHttpsProxies.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) return objs, nil } // Insert is a mock for inserting/creating a new object. -func (m *MockRoutes) Insert(ctx context.Context, key *meta.Key, obj *ga.Route) error { +func (m *MockBetaRegionTargetHttpsProxies) Insert(ctx context.Context, key *meta.Key, obj *beta.TargetHttpsProxy) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - klog.V(5).Infof("MockRoutes.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaRegionTargetHttpsProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -12129,32 +24653,32 @@ func (m *MockRoutes) Insert(ctx context.Context, key *meta.Key, obj *ga.Route) e defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - klog.V(5).Infof("MockRoutes.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaRegionTargetHttpsProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { err := &googleapi.Error{ Code: http.StatusConflict, - Message: fmt.Sprintf("MockRoutes %v exists", key), + Message: fmt.Sprintf("MockBetaRegionTargetHttpsProxies %v exists", key), } - klog.V(5).Infof("MockRoutes.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaRegionTargetHttpsProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } obj.Name = key.Name - projectID := m.ProjectRouter.ProjectID(ctx, "ga", "routes") - obj.SelfLink = SelfLink(meta.VersionGA, projectID, "routes", key) + projectID := m.ProjectRouter.ProjectID(ctx, "beta", "targetHttpsProxies") + obj.SelfLink = SelfLink(meta.VersionBeta, projectID, "targetHttpsProxies", key) - m.Objects[*key] = &MockRoutesObj{obj} - klog.V(5).Infof("MockRoutes.Insert(%v, %v, %+v) = nil", ctx, key, obj) + m.Objects[*key] = &MockRegionTargetHttpsProxiesObj{obj} + klog.V(5).Infof("MockBetaRegionTargetHttpsProxies.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } // Delete is a mock for deleting the object. -func (m *MockRoutes) Delete(ctx context.Context, key *meta.Key) error { +func (m *MockBetaRegionTargetHttpsProxies) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - klog.V(5).Infof("MockRoutes.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaRegionTargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -12166,186 +24690,264 @@ func (m *MockRoutes) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - klog.V(5).Infof("MockRoutes.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaRegionTargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockRoutes %v not found", key), + Message: fmt.Sprintf("MockBetaRegionTargetHttpsProxies %v not found", key), } - klog.V(5).Infof("MockRoutes.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaRegionTargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - klog.V(5).Infof("MockRoutes.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockBetaRegionTargetHttpsProxies.Delete(%v, %v) = nil", ctx, key) return nil } // Obj wraps the object for use in the mock. -func (m *MockRoutes) Obj(o *ga.Route) *MockRoutesObj { - return &MockRoutesObj{o} +func (m *MockBetaRegionTargetHttpsProxies) Obj(o *beta.TargetHttpsProxy) *MockRegionTargetHttpsProxiesObj { + return &MockRegionTargetHttpsProxiesObj{o} } -// GCERoutes is a simplifying adapter for the GCE Routes. -type GCERoutes struct { +// SetSslCertificates is a mock for the corresponding method. +func (m *MockBetaRegionTargetHttpsProxies) SetSslCertificates(ctx context.Context, key *meta.Key, arg0 *beta.RegionTargetHttpsProxiesSetSslCertificatesRequest) error { + if m.SetSslCertificatesHook != nil { + return m.SetSslCertificatesHook(ctx, key, arg0, m) + } + return nil +} + +// SetUrlMap is a mock for the corresponding method. +func (m *MockBetaRegionTargetHttpsProxies) SetUrlMap(ctx context.Context, key *meta.Key, arg0 *beta.UrlMapReference) error { + if m.SetUrlMapHook != nil { + return m.SetUrlMapHook(ctx, key, arg0, m) + } + return nil +} + +// GCEBetaRegionTargetHttpsProxies is a simplifying adapter for the GCE RegionTargetHttpsProxies. +type GCEBetaRegionTargetHttpsProxies struct { s *Service } -// Get the Route named by key. -func (g *GCERoutes) Get(ctx context.Context, key *meta.Key) (*ga.Route, error) { - klog.V(5).Infof("GCERoutes.Get(%v, %v): called", ctx, key) +// Get the TargetHttpsProxy named by key. +func (g *GCEBetaRegionTargetHttpsProxies) Get(ctx context.Context, key *meta.Key) (*beta.TargetHttpsProxy, error) { + klog.V(5).Infof("GCEBetaRegionTargetHttpsProxies.Get(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCERoutes.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaRegionTargetHttpsProxies.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Routes") + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "RegionTargetHttpsProxies") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Get", - Version: meta.Version("ga"), - Service: "Routes", + Version: meta.Version("beta"), + Service: "RegionTargetHttpsProxies", } - klog.V(5).Infof("GCERoutes.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaRegionTargetHttpsProxies.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCERoutes.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaRegionTargetHttpsProxies.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } - call := g.s.GA.Routes.Get(projectID, key.Name) + call := g.s.Beta.RegionTargetHttpsProxies.Get(projectID, key.Region, key.Name) call.Context(ctx) v, err := call.Do() - klog.V(4).Infof("GCERoutes.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEBetaRegionTargetHttpsProxies.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } -// List all Route objects. -func (g *GCERoutes) List(ctx context.Context, fl *filter.F) ([]*ga.Route, error) { - klog.V(5).Infof("GCERoutes.List(%v, %v) called", ctx, fl) - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Routes") +// List all TargetHttpsProxy objects. +func (g *GCEBetaRegionTargetHttpsProxies) List(ctx context.Context, region string, fl *filter.F) ([]*beta.TargetHttpsProxy, error) { + klog.V(5).Infof("GCEBetaRegionTargetHttpsProxies.List(%v, %v, %v) called", ctx, region, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "RegionTargetHttpsProxies") rk := &RateLimitKey{ ProjectID: projectID, Operation: "List", - Version: meta.Version("ga"), - Service: "Routes", + Version: meta.Version("beta"), + Service: "RegionTargetHttpsProxies", } if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - klog.V(5).Infof("GCERoutes.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) - call := g.s.GA.Routes.List(projectID) + klog.V(5).Infof("GCEBetaRegionTargetHttpsProxies.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) + call := g.s.Beta.RegionTargetHttpsProxies.List(projectID, region) if fl != filter.None { call.Filter(fl.String()) } - var all []*ga.Route - f := func(l *ga.RouteList) error { - klog.V(5).Infof("GCERoutes.List(%v, ..., %v): page %+v", ctx, fl, l) + var all []*beta.TargetHttpsProxy + f := func(l *beta.TargetHttpsProxyList) error { + klog.V(5).Infof("GCEBetaRegionTargetHttpsProxies.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - klog.V(4).Infof("GCERoutes.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEBetaRegionTargetHttpsProxies.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } if klog.V(4) { - klog.V(4).Infof("GCERoutes.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + klog.V(4).Infof("GCEBetaRegionTargetHttpsProxies.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - klog.V(5).Infof("GCERoutes.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEBetaRegionTargetHttpsProxies.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil } -// Insert Route with key of value obj. -func (g *GCERoutes) Insert(ctx context.Context, key *meta.Key, obj *ga.Route) error { - klog.V(5).Infof("GCERoutes.Insert(%v, %v, %+v): called", ctx, key, obj) +// Insert TargetHttpsProxy with key of value obj. +func (g *GCEBetaRegionTargetHttpsProxies) Insert(ctx context.Context, key *meta.Key, obj *beta.TargetHttpsProxy) error { + klog.V(5).Infof("GCEBetaRegionTargetHttpsProxies.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - klog.V(2).Infof("GCERoutes.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaRegionTargetHttpsProxies.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Routes") + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "RegionTargetHttpsProxies") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Insert", - Version: meta.Version("ga"), - Service: "Routes", + Version: meta.Version("beta"), + Service: "RegionTargetHttpsProxies", } - klog.V(5).Infof("GCERoutes.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaRegionTargetHttpsProxies.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCERoutes.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaRegionTargetHttpsProxies.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name - call := g.s.GA.Routes.Insert(projectID, obj) + call := g.s.Beta.RegionTargetHttpsProxies.Insert(projectID, key.Region, obj) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCERoutes.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaRegionTargetHttpsProxies.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCERoutes.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEBetaRegionTargetHttpsProxies.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } -// Delete the Route referenced by key. -func (g *GCERoutes) Delete(ctx context.Context, key *meta.Key) error { - klog.V(5).Infof("GCERoutes.Delete(%v, %v): called", ctx, key) +// Delete the TargetHttpsProxy referenced by key. +func (g *GCEBetaRegionTargetHttpsProxies) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCEBetaRegionTargetHttpsProxies.Delete(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCERoutes.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaRegionTargetHttpsProxies.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Routes") + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "RegionTargetHttpsProxies") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Delete", - Version: meta.Version("ga"), - Service: "Routes", + Version: meta.Version("beta"), + Service: "RegionTargetHttpsProxies", } - klog.V(5).Infof("GCERoutes.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaRegionTargetHttpsProxies.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCERoutes.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaRegionTargetHttpsProxies.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.GA.Routes.Delete(projectID, key.Name) - + call := g.s.Beta.RegionTargetHttpsProxies.Delete(projectID, key.Region, key.Name) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCERoutes.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBetaRegionTargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCERoutes.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBetaRegionTargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) return err } -// BetaSecurityPolicies is an interface that allows for mocking of SecurityPolicies. -type BetaSecurityPolicies interface { - Get(ctx context.Context, key *meta.Key) (*beta.SecurityPolicy, error) - List(ctx context.Context, fl *filter.F) ([]*beta.SecurityPolicy, error) - Insert(ctx context.Context, key *meta.Key, obj *beta.SecurityPolicy) error +// SetSslCertificates is a method on GCEBetaRegionTargetHttpsProxies. +func (g *GCEBetaRegionTargetHttpsProxies) SetSslCertificates(ctx context.Context, key *meta.Key, arg0 *beta.RegionTargetHttpsProxiesSetSslCertificatesRequest) error { + klog.V(5).Infof("GCEBetaRegionTargetHttpsProxies.SetSslCertificates(%v, %v, ...): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEBetaRegionTargetHttpsProxies.SetSslCertificates(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "RegionTargetHttpsProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "SetSslCertificates", + Version: meta.Version("beta"), + Service: "RegionTargetHttpsProxies", + } + klog.V(5).Infof("GCEBetaRegionTargetHttpsProxies.SetSslCertificates(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEBetaRegionTargetHttpsProxies.SetSslCertificates(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.Beta.RegionTargetHttpsProxies.SetSslCertificates(projectID, key.Region, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEBetaRegionTargetHttpsProxies.SetSslCertificates(%v, %v, ...) = %+v", ctx, key, err) + return err + } + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEBetaRegionTargetHttpsProxies.SetSslCertificates(%v, %v, ...) = %+v", ctx, key, err) + return err +} + +// SetUrlMap is a method on GCEBetaRegionTargetHttpsProxies. +func (g *GCEBetaRegionTargetHttpsProxies) SetUrlMap(ctx context.Context, key *meta.Key, arg0 *beta.UrlMapReference) error { + klog.V(5).Infof("GCEBetaRegionTargetHttpsProxies.SetUrlMap(%v, %v, ...): called", ctx, key) + + if !key.Valid() { + klog.V(2).Infof("GCEBetaRegionTargetHttpsProxies.SetUrlMap(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "RegionTargetHttpsProxies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "SetUrlMap", + Version: meta.Version("beta"), + Service: "RegionTargetHttpsProxies", + } + klog.V(5).Infof("GCEBetaRegionTargetHttpsProxies.SetUrlMap(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEBetaRegionTargetHttpsProxies.SetUrlMap(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.Beta.RegionTargetHttpsProxies.SetUrlMap(projectID, key.Region, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEBetaRegionTargetHttpsProxies.SetUrlMap(%v, %v, ...) = %+v", ctx, key, err) + return err + } + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEBetaRegionTargetHttpsProxies.SetUrlMap(%v, %v, ...) = %+v", ctx, key, err) + return err +} + +// TargetPools is an interface that allows for mocking of TargetPools. +type TargetPools interface { + Get(ctx context.Context, key *meta.Key) (*ga.TargetPool, error) + List(ctx context.Context, region string, fl *filter.F) ([]*ga.TargetPool, error) + Insert(ctx context.Context, key *meta.Key, obj *ga.TargetPool) error Delete(ctx context.Context, key *meta.Key) error - AddRule(context.Context, *meta.Key, *beta.SecurityPolicyRule) error - GetRule(context.Context, *meta.Key) (*beta.SecurityPolicyRule, error) - Patch(context.Context, *meta.Key, *beta.SecurityPolicy) error - PatchRule(context.Context, *meta.Key, *beta.SecurityPolicyRule) error - RemoveRule(context.Context, *meta.Key) error + AddInstance(context.Context, *meta.Key, *ga.TargetPoolsAddInstanceRequest) error + RemoveInstance(context.Context, *meta.Key, *ga.TargetPoolsRemoveInstanceRequest) error } -// NewMockBetaSecurityPolicies returns a new mock for SecurityPolicies. -func NewMockBetaSecurityPolicies(pr ProjectRouter, objs map[meta.Key]*MockSecurityPoliciesObj) *MockBetaSecurityPolicies { - mock := &MockBetaSecurityPolicies{ +// NewMockTargetPools returns a new mock for TargetPools. +func NewMockTargetPools(pr ProjectRouter, objs map[meta.Key]*MockTargetPoolsObj) *MockTargetPools { + mock := &MockTargetPools{ ProjectRouter: pr, Objects: objs, @@ -12356,14 +24958,14 @@ func NewMockBetaSecurityPolicies(pr ProjectRouter, objs map[meta.Key]*MockSecuri return mock } -// MockBetaSecurityPolicies is the mock for SecurityPolicies. -type MockBetaSecurityPolicies struct { +// MockTargetPools is the mock for TargetPools. +type MockTargetPools struct { Lock sync.Mutex ProjectRouter ProjectRouter // Objects maintained by the mock. - Objects map[meta.Key]*MockSecurityPoliciesObj + Objects map[meta.Key]*MockTargetPoolsObj // If an entry exists for the given key and operation, then the error // will be returned instead of the operation. @@ -12376,15 +24978,12 @@ type MockBetaSecurityPolicies struct { // order to add your own logic. Return (true, _, _) to prevent the normal // execution flow of the mock. Return (false, nil, nil) to continue with // normal mock behavior/ after the hook function executes. - GetHook func(ctx context.Context, key *meta.Key, m *MockBetaSecurityPolicies) (bool, *beta.SecurityPolicy, error) - ListHook func(ctx context.Context, fl *filter.F, m *MockBetaSecurityPolicies) (bool, []*beta.SecurityPolicy, error) - InsertHook func(ctx context.Context, key *meta.Key, obj *beta.SecurityPolicy, m *MockBetaSecurityPolicies) (bool, error) - DeleteHook func(ctx context.Context, key *meta.Key, m *MockBetaSecurityPolicies) (bool, error) - AddRuleHook func(context.Context, *meta.Key, *beta.SecurityPolicyRule, *MockBetaSecurityPolicies) error - GetRuleHook func(context.Context, *meta.Key, *MockBetaSecurityPolicies) (*beta.SecurityPolicyRule, error) - PatchHook func(context.Context, *meta.Key, *beta.SecurityPolicy, *MockBetaSecurityPolicies) error - PatchRuleHook func(context.Context, *meta.Key, *beta.SecurityPolicyRule, *MockBetaSecurityPolicies) error - RemoveRuleHook func(context.Context, *meta.Key, *MockBetaSecurityPolicies) error + GetHook func(ctx context.Context, key *meta.Key, m *MockTargetPools) (bool, *ga.TargetPool, error) + ListHook func(ctx context.Context, region string, fl *filter.F, m *MockTargetPools) (bool, []*ga.TargetPool, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *ga.TargetPool, m *MockTargetPools) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockTargetPools) (bool, error) + AddInstanceHook func(context.Context, *meta.Key, *ga.TargetPoolsAddInstanceRequest, *MockTargetPools) error + RemoveInstanceHook func(context.Context, *meta.Key, *ga.TargetPoolsRemoveInstanceRequest, *MockTargetPools) error // X is extra state that can be used as part of the mock. Generated code // will not use this field. @@ -12392,10 +24991,10 @@ type MockBetaSecurityPolicies struct { } // Get returns the object from the mock. -func (m *MockBetaSecurityPolicies) Get(ctx context.Context, key *meta.Key) (*beta.SecurityPolicy, error) { +func (m *MockTargetPools) Get(ctx context.Context, key *meta.Key) (*ga.TargetPool, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - klog.V(5).Infof("MockBetaSecurityPolicies.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockTargetPools.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -12407,28 +25006,28 @@ func (m *MockBetaSecurityPolicies) Get(ctx context.Context, key *meta.Key) (*bet defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - klog.V(5).Infof("MockBetaSecurityPolicies.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockTargetPools.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { - typedObj := obj.ToBeta() - klog.V(5).Infof("MockBetaSecurityPolicies.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + typedObj := obj.ToGA() + klog.V(5).Infof("MockTargetPools.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockBetaSecurityPolicies %v not found", key), + Message: fmt.Sprintf("MockTargetPools %v not found", key), } - klog.V(5).Infof("MockBetaSecurityPolicies.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockTargetPools.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } -// List all of the objects in the mock. -func (m *MockBetaSecurityPolicies) List(ctx context.Context, fl *filter.F) ([]*beta.SecurityPolicy, error) { +// List all of the objects in the mock in the given region. +func (m *MockTargetPools) List(ctx context.Context, region string, fl *filter.F) ([]*ga.TargetPool, error) { if m.ListHook != nil { - if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - klog.V(5).Infof("MockBetaSecurityPolicies.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + if intercept, objs, err := m.ListHook(ctx, region, fl, m); intercept { + klog.V(5).Infof("MockTargetPools.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) return objs, err } } @@ -12438,28 +25037,31 @@ func (m *MockBetaSecurityPolicies) List(ctx context.Context, fl *filter.F) ([]*b if m.ListError != nil { err := *m.ListError - klog.V(5).Infof("MockBetaSecurityPolicies.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockTargetPools.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) return nil, *m.ListError } - var objs []*beta.SecurityPolicy - for _, obj := range m.Objects { - if !fl.Match(obj.ToBeta()) { + var objs []*ga.TargetPool + for key, obj := range m.Objects { + if key.Region != region { continue } - objs = append(objs, obj.ToBeta()) + if !fl.Match(obj.ToGA()) { + continue + } + objs = append(objs, obj.ToGA()) } - klog.V(5).Infof("MockBetaSecurityPolicies.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockTargetPools.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) return objs, nil } // Insert is a mock for inserting/creating a new object. -func (m *MockBetaSecurityPolicies) Insert(ctx context.Context, key *meta.Key, obj *beta.SecurityPolicy) error { +func (m *MockTargetPools) Insert(ctx context.Context, key *meta.Key, obj *ga.TargetPool) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - klog.V(5).Infof("MockBetaSecurityPolicies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockTargetPools.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -12471,32 +25073,32 @@ func (m *MockBetaSecurityPolicies) Insert(ctx context.Context, key *meta.Key, ob defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - klog.V(5).Infof("MockBetaSecurityPolicies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockTargetPools.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { err := &googleapi.Error{ Code: http.StatusConflict, - Message: fmt.Sprintf("MockBetaSecurityPolicies %v exists", key), + Message: fmt.Sprintf("MockTargetPools %v exists", key), } - klog.V(5).Infof("MockBetaSecurityPolicies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockTargetPools.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } obj.Name = key.Name - projectID := m.ProjectRouter.ProjectID(ctx, "beta", "securityPolicies") - obj.SelfLink = SelfLink(meta.VersionBeta, projectID, "securityPolicies", key) + projectID := m.ProjectRouter.ProjectID(ctx, "ga", "targetPools") + obj.SelfLink = SelfLink(meta.VersionGA, projectID, "targetPools", key) - m.Objects[*key] = &MockSecurityPoliciesObj{obj} - klog.V(5).Infof("MockBetaSecurityPolicies.Insert(%v, %v, %+v) = nil", ctx, key, obj) + m.Objects[*key] = &MockTargetPoolsObj{obj} + klog.V(5).Infof("MockTargetPools.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } // Delete is a mock for deleting the object. -func (m *MockBetaSecurityPolicies) Delete(ctx context.Context, key *meta.Key) error { +func (m *MockTargetPools) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - klog.V(5).Infof("MockBetaSecurityPolicies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockTargetPools.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -12508,381 +25110,263 @@ func (m *MockBetaSecurityPolicies) Delete(ctx context.Context, key *meta.Key) er defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - klog.V(5).Infof("MockBetaSecurityPolicies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockTargetPools.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockBetaSecurityPolicies %v not found", key), + Message: fmt.Sprintf("MockTargetPools %v not found", key), } - klog.V(5).Infof("MockBetaSecurityPolicies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockTargetPools.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - klog.V(5).Infof("MockBetaSecurityPolicies.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockTargetPools.Delete(%v, %v) = nil", ctx, key) return nil } // Obj wraps the object for use in the mock. -func (m *MockBetaSecurityPolicies) Obj(o *beta.SecurityPolicy) *MockSecurityPoliciesObj { - return &MockSecurityPoliciesObj{o} -} - -// AddRule is a mock for the corresponding method. -func (m *MockBetaSecurityPolicies) AddRule(ctx context.Context, key *meta.Key, arg0 *beta.SecurityPolicyRule) error { - if m.AddRuleHook != nil { - return m.AddRuleHook(ctx, key, arg0, m) - } - return nil -} - -// GetRule is a mock for the corresponding method. -func (m *MockBetaSecurityPolicies) GetRule(ctx context.Context, key *meta.Key) (*beta.SecurityPolicyRule, error) { - if m.GetRuleHook != nil { - return m.GetRuleHook(ctx, key, m) - } - return nil, fmt.Errorf("GetRuleHook must be set") -} - -// Patch is a mock for the corresponding method. -func (m *MockBetaSecurityPolicies) Patch(ctx context.Context, key *meta.Key, arg0 *beta.SecurityPolicy) error { - if m.PatchHook != nil { - return m.PatchHook(ctx, key, arg0, m) - } - return nil +func (m *MockTargetPools) Obj(o *ga.TargetPool) *MockTargetPoolsObj { + return &MockTargetPoolsObj{o} } -// PatchRule is a mock for the corresponding method. -func (m *MockBetaSecurityPolicies) PatchRule(ctx context.Context, key *meta.Key, arg0 *beta.SecurityPolicyRule) error { - if m.PatchRuleHook != nil { - return m.PatchRuleHook(ctx, key, arg0, m) +// AddInstance is a mock for the corresponding method. +func (m *MockTargetPools) AddInstance(ctx context.Context, key *meta.Key, arg0 *ga.TargetPoolsAddInstanceRequest) error { + if m.AddInstanceHook != nil { + return m.AddInstanceHook(ctx, key, arg0, m) } return nil } -// RemoveRule is a mock for the corresponding method. -func (m *MockBetaSecurityPolicies) RemoveRule(ctx context.Context, key *meta.Key) error { - if m.RemoveRuleHook != nil { - return m.RemoveRuleHook(ctx, key, m) +// RemoveInstance is a mock for the corresponding method. +func (m *MockTargetPools) RemoveInstance(ctx context.Context, key *meta.Key, arg0 *ga.TargetPoolsRemoveInstanceRequest) error { + if m.RemoveInstanceHook != nil { + return m.RemoveInstanceHook(ctx, key, arg0, m) } return nil } -// GCEBetaSecurityPolicies is a simplifying adapter for the GCE SecurityPolicies. -type GCEBetaSecurityPolicies struct { +// GCETargetPools is a simplifying adapter for the GCE TargetPools. +type GCETargetPools struct { s *Service } -// Get the SecurityPolicy named by key. -func (g *GCEBetaSecurityPolicies) Get(ctx context.Context, key *meta.Key) (*beta.SecurityPolicy, error) { - klog.V(5).Infof("GCEBetaSecurityPolicies.Get(%v, %v): called", ctx, key) +// Get the TargetPool named by key. +func (g *GCETargetPools) Get(ctx context.Context, key *meta.Key) (*ga.TargetPool, error) { + klog.V(5).Infof("GCETargetPools.Get(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEBetaSecurityPolicies.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCETargetPools.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "SecurityPolicies") + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetPools") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Get", - Version: meta.Version("beta"), - Service: "SecurityPolicies", + Version: meta.Version("ga"), + Service: "TargetPools", } - klog.V(5).Infof("GCEBetaSecurityPolicies.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCETargetPools.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEBetaSecurityPolicies.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCETargetPools.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } - call := g.s.Beta.SecurityPolicies.Get(projectID, key.Name) + call := g.s.GA.TargetPools.Get(projectID, key.Region, key.Name) call.Context(ctx) v, err := call.Do() - klog.V(4).Infof("GCEBetaSecurityPolicies.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCETargetPools.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } -// List all SecurityPolicy objects. -func (g *GCEBetaSecurityPolicies) List(ctx context.Context, fl *filter.F) ([]*beta.SecurityPolicy, error) { - klog.V(5).Infof("GCEBetaSecurityPolicies.List(%v, %v) called", ctx, fl) - projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "SecurityPolicies") +// List all TargetPool objects. +func (g *GCETargetPools) List(ctx context.Context, region string, fl *filter.F) ([]*ga.TargetPool, error) { + klog.V(5).Infof("GCETargetPools.List(%v, %v, %v) called", ctx, region, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetPools") rk := &RateLimitKey{ ProjectID: projectID, Operation: "List", - Version: meta.Version("beta"), - Service: "SecurityPolicies", + Version: meta.Version("ga"), + Service: "TargetPools", } if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - klog.V(5).Infof("GCEBetaSecurityPolicies.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) - call := g.s.Beta.SecurityPolicies.List(projectID) + klog.V(5).Infof("GCETargetPools.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) + call := g.s.GA.TargetPools.List(projectID, region) if fl != filter.None { call.Filter(fl.String()) } - var all []*beta.SecurityPolicy - f := func(l *beta.SecurityPolicyList) error { - klog.V(5).Infof("GCEBetaSecurityPolicies.List(%v, ..., %v): page %+v", ctx, fl, l) + var all []*ga.TargetPool + f := func(l *ga.TargetPoolList) error { + klog.V(5).Infof("GCETargetPools.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - klog.V(4).Infof("GCEBetaSecurityPolicies.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCETargetPools.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } if klog.V(4) { - klog.V(4).Infof("GCEBetaSecurityPolicies.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + klog.V(4).Infof("GCETargetPools.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - klog.V(5).Infof("GCEBetaSecurityPolicies.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCETargetPools.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil } -// Insert SecurityPolicy with key of value obj. -func (g *GCEBetaSecurityPolicies) Insert(ctx context.Context, key *meta.Key, obj *beta.SecurityPolicy) error { - klog.V(5).Infof("GCEBetaSecurityPolicies.Insert(%v, %v, %+v): called", ctx, key, obj) - if !key.Valid() { - klog.V(2).Infof("GCEBetaSecurityPolicies.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) - return fmt.Errorf("invalid GCE key (%+v)", key) - } - projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "SecurityPolicies") - rk := &RateLimitKey{ - ProjectID: projectID, - Operation: "Insert", - Version: meta.Version("beta"), - Service: "SecurityPolicies", - } - klog.V(5).Infof("GCEBetaSecurityPolicies.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) - if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEBetaSecurityPolicies.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) - return err - } - obj.Name = key.Name - call := g.s.Beta.SecurityPolicies.Insert(projectID, obj) - call.Context(ctx) - - op, err := call.Do() - if err != nil { - klog.V(4).Infof("GCEBetaSecurityPolicies.Insert(%v, %v, ...) = %+v", ctx, key, err) - return err - } - - err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEBetaSecurityPolicies.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) - return err -} - -// Delete the SecurityPolicy referenced by key. -func (g *GCEBetaSecurityPolicies) Delete(ctx context.Context, key *meta.Key) error { - klog.V(5).Infof("GCEBetaSecurityPolicies.Delete(%v, %v): called", ctx, key) - if !key.Valid() { - klog.V(2).Infof("GCEBetaSecurityPolicies.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) - return fmt.Errorf("invalid GCE key (%+v)", key) - } - projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "SecurityPolicies") - rk := &RateLimitKey{ - ProjectID: projectID, - Operation: "Delete", - Version: meta.Version("beta"), - Service: "SecurityPolicies", - } - klog.V(5).Infof("GCEBetaSecurityPolicies.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) - if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEBetaSecurityPolicies.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) - return err - } - call := g.s.Beta.SecurityPolicies.Delete(projectID, key.Name) - - call.Context(ctx) - - op, err := call.Do() - if err != nil { - klog.V(4).Infof("GCEBetaSecurityPolicies.Delete(%v, %v) = %v", ctx, key, err) - return err - } - - err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEBetaSecurityPolicies.Delete(%v, %v) = %v", ctx, key, err) - return err -} - -// AddRule is a method on GCEBetaSecurityPolicies. -func (g *GCEBetaSecurityPolicies) AddRule(ctx context.Context, key *meta.Key, arg0 *beta.SecurityPolicyRule) error { - klog.V(5).Infof("GCEBetaSecurityPolicies.AddRule(%v, %v, ...): called", ctx, key) - +// Insert TargetPool with key of value obj. +func (g *GCETargetPools) Insert(ctx context.Context, key *meta.Key, obj *ga.TargetPool) error { + klog.V(5).Infof("GCETargetPools.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - klog.V(2).Infof("GCEBetaSecurityPolicies.AddRule(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCETargetPools.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "SecurityPolicies") + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetPools") rk := &RateLimitKey{ ProjectID: projectID, - Operation: "AddRule", - Version: meta.Version("beta"), - Service: "SecurityPolicies", + Operation: "Insert", + Version: meta.Version("ga"), + Service: "TargetPools", } - klog.V(5).Infof("GCEBetaSecurityPolicies.AddRule(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) - + klog.V(5).Infof("GCETargetPools.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEBetaSecurityPolicies.AddRule(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCETargetPools.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.Beta.SecurityPolicies.AddRule(projectID, key.Name, arg0) + obj.Name = key.Name + call := g.s.GA.TargetPools.Insert(projectID, key.Region, obj) call.Context(ctx) + op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEBetaSecurityPolicies.AddRule(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCETargetPools.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } + err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEBetaSecurityPolicies.AddRule(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCETargetPools.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } -// GetRule is a method on GCEBetaSecurityPolicies. -func (g *GCEBetaSecurityPolicies) GetRule(ctx context.Context, key *meta.Key) (*beta.SecurityPolicyRule, error) { - klog.V(5).Infof("GCEBetaSecurityPolicies.GetRule(%v, %v, ...): called", ctx, key) - - if !key.Valid() { - klog.V(2).Infof("GCEBetaSecurityPolicies.GetRule(%v, %v, ...): key is invalid (%#v)", ctx, key, key) - return nil, fmt.Errorf("invalid GCE key (%+v)", key) - } - projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "SecurityPolicies") - rk := &RateLimitKey{ - ProjectID: projectID, - Operation: "GetRule", - Version: meta.Version("beta"), - Service: "SecurityPolicies", - } - klog.V(5).Infof("GCEBetaSecurityPolicies.GetRule(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) - - if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEBetaSecurityPolicies.GetRule(%v, %v, ...): RateLimiter error: %v", ctx, key, err) - return nil, err - } - call := g.s.Beta.SecurityPolicies.GetRule(projectID, key.Name) - call.Context(ctx) - v, err := call.Do() - klog.V(4).Infof("GCEBetaSecurityPolicies.GetRule(%v, %v, ...) = %+v, %v", ctx, key, v, err) - return v, err -} - -// Patch is a method on GCEBetaSecurityPolicies. -func (g *GCEBetaSecurityPolicies) Patch(ctx context.Context, key *meta.Key, arg0 *beta.SecurityPolicy) error { - klog.V(5).Infof("GCEBetaSecurityPolicies.Patch(%v, %v, ...): called", ctx, key) - +// Delete the TargetPool referenced by key. +func (g *GCETargetPools) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCETargetPools.Delete(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEBetaSecurityPolicies.Patch(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCETargetPools.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "SecurityPolicies") + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetPools") rk := &RateLimitKey{ ProjectID: projectID, - Operation: "Patch", - Version: meta.Version("beta"), - Service: "SecurityPolicies", + Operation: "Delete", + Version: meta.Version("ga"), + Service: "TargetPools", } - klog.V(5).Infof("GCEBetaSecurityPolicies.Patch(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) - + klog.V(5).Infof("GCETargetPools.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEBetaSecurityPolicies.Patch(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCETargetPools.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.Beta.SecurityPolicies.Patch(projectID, key.Name, arg0) + call := g.s.GA.TargetPools.Delete(projectID, key.Region, key.Name) call.Context(ctx) + op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEBetaSecurityPolicies.Patch(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCETargetPools.Delete(%v, %v) = %v", ctx, key, err) return err } + err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEBetaSecurityPolicies.Patch(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCETargetPools.Delete(%v, %v) = %v", ctx, key, err) return err } -// PatchRule is a method on GCEBetaSecurityPolicies. -func (g *GCEBetaSecurityPolicies) PatchRule(ctx context.Context, key *meta.Key, arg0 *beta.SecurityPolicyRule) error { - klog.V(5).Infof("GCEBetaSecurityPolicies.PatchRule(%v, %v, ...): called", ctx, key) +// AddInstance is a method on GCETargetPools. +func (g *GCETargetPools) AddInstance(ctx context.Context, key *meta.Key, arg0 *ga.TargetPoolsAddInstanceRequest) error { + klog.V(5).Infof("GCETargetPools.AddInstance(%v, %v, ...): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEBetaSecurityPolicies.PatchRule(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCETargetPools.AddInstance(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "SecurityPolicies") + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetPools") rk := &RateLimitKey{ ProjectID: projectID, - Operation: "PatchRule", - Version: meta.Version("beta"), - Service: "SecurityPolicies", + Operation: "AddInstance", + Version: meta.Version("ga"), + Service: "TargetPools", } - klog.V(5).Infof("GCEBetaSecurityPolicies.PatchRule(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCETargetPools.AddInstance(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEBetaSecurityPolicies.PatchRule(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCETargetPools.AddInstance(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.Beta.SecurityPolicies.PatchRule(projectID, key.Name, arg0) + call := g.s.GA.TargetPools.AddInstance(projectID, key.Region, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEBetaSecurityPolicies.PatchRule(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCETargetPools.AddInstance(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEBetaSecurityPolicies.PatchRule(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCETargetPools.AddInstance(%v, %v, ...) = %+v", ctx, key, err) return err } -// RemoveRule is a method on GCEBetaSecurityPolicies. -func (g *GCEBetaSecurityPolicies) RemoveRule(ctx context.Context, key *meta.Key) error { - klog.V(5).Infof("GCEBetaSecurityPolicies.RemoveRule(%v, %v, ...): called", ctx, key) +// RemoveInstance is a method on GCETargetPools. +func (g *GCETargetPools) RemoveInstance(ctx context.Context, key *meta.Key, arg0 *ga.TargetPoolsRemoveInstanceRequest) error { + klog.V(5).Infof("GCETargetPools.RemoveInstance(%v, %v, ...): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEBetaSecurityPolicies.RemoveRule(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCETargetPools.RemoveInstance(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "SecurityPolicies") + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetPools") rk := &RateLimitKey{ ProjectID: projectID, - Operation: "RemoveRule", - Version: meta.Version("beta"), - Service: "SecurityPolicies", + Operation: "RemoveInstance", + Version: meta.Version("ga"), + Service: "TargetPools", } - klog.V(5).Infof("GCEBetaSecurityPolicies.RemoveRule(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCETargetPools.RemoveInstance(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEBetaSecurityPolicies.RemoveRule(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCETargetPools.RemoveInstance(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.Beta.SecurityPolicies.RemoveRule(projectID, key.Name) + call := g.s.GA.TargetPools.RemoveInstance(projectID, key.Region, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEBetaSecurityPolicies.RemoveRule(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCETargetPools.RemoveInstance(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEBetaSecurityPolicies.RemoveRule(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCETargetPools.RemoveInstance(%v, %v, ...) = %+v", ctx, key, err) return err } -// SslCertificates is an interface that allows for mocking of SslCertificates. -type SslCertificates interface { - Get(ctx context.Context, key *meta.Key) (*ga.SslCertificate, error) - List(ctx context.Context, fl *filter.F) ([]*ga.SslCertificate, error) - Insert(ctx context.Context, key *meta.Key, obj *ga.SslCertificate) error +// AlphaUrlMaps is an interface that allows for mocking of UrlMaps. +type AlphaUrlMaps interface { + Get(ctx context.Context, key *meta.Key) (*alpha.UrlMap, error) + List(ctx context.Context, fl *filter.F) ([]*alpha.UrlMap, error) + Insert(ctx context.Context, key *meta.Key, obj *alpha.UrlMap) error Delete(ctx context.Context, key *meta.Key) error + Update(context.Context, *meta.Key, *alpha.UrlMap) error } -// NewMockSslCertificates returns a new mock for SslCertificates. -func NewMockSslCertificates(pr ProjectRouter, objs map[meta.Key]*MockSslCertificatesObj) *MockSslCertificates { - mock := &MockSslCertificates{ +// NewMockAlphaUrlMaps returns a new mock for UrlMaps. +func NewMockAlphaUrlMaps(pr ProjectRouter, objs map[meta.Key]*MockUrlMapsObj) *MockAlphaUrlMaps { + mock := &MockAlphaUrlMaps{ ProjectRouter: pr, Objects: objs, @@ -12893,14 +25377,14 @@ func NewMockSslCertificates(pr ProjectRouter, objs map[meta.Key]*MockSslCertific return mock } -// MockSslCertificates is the mock for SslCertificates. -type MockSslCertificates struct { +// MockAlphaUrlMaps is the mock for UrlMaps. +type MockAlphaUrlMaps struct { Lock sync.Mutex ProjectRouter ProjectRouter // Objects maintained by the mock. - Objects map[meta.Key]*MockSslCertificatesObj + Objects map[meta.Key]*MockUrlMapsObj // If an entry exists for the given key and operation, then the error // will be returned instead of the operation. @@ -12913,10 +25397,11 @@ type MockSslCertificates struct { // order to add your own logic. Return (true, _, _) to prevent the normal // execution flow of the mock. Return (false, nil, nil) to continue with // normal mock behavior/ after the hook function executes. - GetHook func(ctx context.Context, key *meta.Key, m *MockSslCertificates) (bool, *ga.SslCertificate, error) - ListHook func(ctx context.Context, fl *filter.F, m *MockSslCertificates) (bool, []*ga.SslCertificate, error) - InsertHook func(ctx context.Context, key *meta.Key, obj *ga.SslCertificate, m *MockSslCertificates) (bool, error) - DeleteHook func(ctx context.Context, key *meta.Key, m *MockSslCertificates) (bool, error) + GetHook func(ctx context.Context, key *meta.Key, m *MockAlphaUrlMaps) (bool, *alpha.UrlMap, error) + ListHook func(ctx context.Context, fl *filter.F, m *MockAlphaUrlMaps) (bool, []*alpha.UrlMap, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *alpha.UrlMap, m *MockAlphaUrlMaps) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockAlphaUrlMaps) (bool, error) + UpdateHook func(context.Context, *meta.Key, *alpha.UrlMap, *MockAlphaUrlMaps) error // X is extra state that can be used as part of the mock. Generated code // will not use this field. @@ -12924,10 +25409,10 @@ type MockSslCertificates struct { } // Get returns the object from the mock. -func (m *MockSslCertificates) Get(ctx context.Context, key *meta.Key) (*ga.SslCertificate, error) { +func (m *MockAlphaUrlMaps) Get(ctx context.Context, key *meta.Key) (*alpha.UrlMap, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - klog.V(5).Infof("MockSslCertificates.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaUrlMaps.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -12939,28 +25424,28 @@ func (m *MockSslCertificates) Get(ctx context.Context, key *meta.Key) (*ga.SslCe defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - klog.V(5).Infof("MockSslCertificates.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaUrlMaps.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { - typedObj := obj.ToGA() - klog.V(5).Infof("MockSslCertificates.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + typedObj := obj.ToAlpha() + klog.V(5).Infof("MockAlphaUrlMaps.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockSslCertificates %v not found", key), + Message: fmt.Sprintf("MockAlphaUrlMaps %v not found", key), } - klog.V(5).Infof("MockSslCertificates.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaUrlMaps.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } // List all of the objects in the mock. -func (m *MockSslCertificates) List(ctx context.Context, fl *filter.F) ([]*ga.SslCertificate, error) { +func (m *MockAlphaUrlMaps) List(ctx context.Context, fl *filter.F) ([]*alpha.UrlMap, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - klog.V(5).Infof("MockSslCertificates.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockAlphaUrlMaps.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -12970,28 +25455,28 @@ func (m *MockSslCertificates) List(ctx context.Context, fl *filter.F) ([]*ga.Ssl if m.ListError != nil { err := *m.ListError - klog.V(5).Infof("MockSslCertificates.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockAlphaUrlMaps.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } - var objs []*ga.SslCertificate + var objs []*alpha.UrlMap for _, obj := range m.Objects { - if !fl.Match(obj.ToGA()) { + if !fl.Match(obj.ToAlpha()) { continue } - objs = append(objs, obj.ToGA()) + objs = append(objs, obj.ToAlpha()) } - klog.V(5).Infof("MockSslCertificates.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockAlphaUrlMaps.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } // Insert is a mock for inserting/creating a new object. -func (m *MockSslCertificates) Insert(ctx context.Context, key *meta.Key, obj *ga.SslCertificate) error { +func (m *MockAlphaUrlMaps) Insert(ctx context.Context, key *meta.Key, obj *alpha.UrlMap) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - klog.V(5).Infof("MockSslCertificates.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaUrlMaps.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -13003,32 +25488,32 @@ func (m *MockSslCertificates) Insert(ctx context.Context, key *meta.Key, obj *ga defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - klog.V(5).Infof("MockSslCertificates.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaUrlMaps.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { err := &googleapi.Error{ Code: http.StatusConflict, - Message: fmt.Sprintf("MockSslCertificates %v exists", key), + Message: fmt.Sprintf("MockAlphaUrlMaps %v exists", key), } - klog.V(5).Infof("MockSslCertificates.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaUrlMaps.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } obj.Name = key.Name - projectID := m.ProjectRouter.ProjectID(ctx, "ga", "sslCertificates") - obj.SelfLink = SelfLink(meta.VersionGA, projectID, "sslCertificates", key) + projectID := m.ProjectRouter.ProjectID(ctx, "alpha", "urlMaps") + obj.SelfLink = SelfLink(meta.VersionAlpha, projectID, "urlMaps", key) - m.Objects[*key] = &MockSslCertificatesObj{obj} - klog.V(5).Infof("MockSslCertificates.Insert(%v, %v, %+v) = nil", ctx, key, obj) + m.Objects[*key] = &MockUrlMapsObj{obj} + klog.V(5).Infof("MockAlphaUrlMaps.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } // Delete is a mock for deleting the object. -func (m *MockSslCertificates) Delete(ctx context.Context, key *meta.Key) error { +func (m *MockAlphaUrlMaps) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - klog.V(5).Infof("MockSslCertificates.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaUrlMaps.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -13040,182 +25525,223 @@ func (m *MockSslCertificates) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - klog.V(5).Infof("MockSslCertificates.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaUrlMaps.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockSslCertificates %v not found", key), + Message: fmt.Sprintf("MockAlphaUrlMaps %v not found", key), } - klog.V(5).Infof("MockSslCertificates.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaUrlMaps.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - klog.V(5).Infof("MockSslCertificates.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockAlphaUrlMaps.Delete(%v, %v) = nil", ctx, key) return nil } // Obj wraps the object for use in the mock. -func (m *MockSslCertificates) Obj(o *ga.SslCertificate) *MockSslCertificatesObj { - return &MockSslCertificatesObj{o} +func (m *MockAlphaUrlMaps) Obj(o *alpha.UrlMap) *MockUrlMapsObj { + return &MockUrlMapsObj{o} } -// GCESslCertificates is a simplifying adapter for the GCE SslCertificates. -type GCESslCertificates struct { +// Update is a mock for the corresponding method. +func (m *MockAlphaUrlMaps) Update(ctx context.Context, key *meta.Key, arg0 *alpha.UrlMap) error { + if m.UpdateHook != nil { + return m.UpdateHook(ctx, key, arg0, m) + } + return nil +} + +// GCEAlphaUrlMaps is a simplifying adapter for the GCE UrlMaps. +type GCEAlphaUrlMaps struct { s *Service } -// Get the SslCertificate named by key. -func (g *GCESslCertificates) Get(ctx context.Context, key *meta.Key) (*ga.SslCertificate, error) { - klog.V(5).Infof("GCESslCertificates.Get(%v, %v): called", ctx, key) +// Get the UrlMap named by key. +func (g *GCEAlphaUrlMaps) Get(ctx context.Context, key *meta.Key) (*alpha.UrlMap, error) { + klog.V(5).Infof("GCEAlphaUrlMaps.Get(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCESslCertificates.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaUrlMaps.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "SslCertificates") + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "UrlMaps") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Get", - Version: meta.Version("ga"), - Service: "SslCertificates", + Version: meta.Version("alpha"), + Service: "UrlMaps", } - klog.V(5).Infof("GCESslCertificates.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaUrlMaps.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCESslCertificates.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaUrlMaps.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } - call := g.s.GA.SslCertificates.Get(projectID, key.Name) + call := g.s.Alpha.UrlMaps.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - klog.V(4).Infof("GCESslCertificates.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEAlphaUrlMaps.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } -// List all SslCertificate objects. -func (g *GCESslCertificates) List(ctx context.Context, fl *filter.F) ([]*ga.SslCertificate, error) { - klog.V(5).Infof("GCESslCertificates.List(%v, %v) called", ctx, fl) - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "SslCertificates") +// List all UrlMap objects. +func (g *GCEAlphaUrlMaps) List(ctx context.Context, fl *filter.F) ([]*alpha.UrlMap, error) { + klog.V(5).Infof("GCEAlphaUrlMaps.List(%v, %v) called", ctx, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "UrlMaps") rk := &RateLimitKey{ ProjectID: projectID, Operation: "List", - Version: meta.Version("ga"), - Service: "SslCertificates", + Version: meta.Version("alpha"), + Service: "UrlMaps", } if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - klog.V(5).Infof("GCESslCertificates.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) - call := g.s.GA.SslCertificates.List(projectID) + klog.V(5).Infof("GCEAlphaUrlMaps.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + call := g.s.Alpha.UrlMaps.List(projectID) if fl != filter.None { call.Filter(fl.String()) } - var all []*ga.SslCertificate - f := func(l *ga.SslCertificateList) error { - klog.V(5).Infof("GCESslCertificates.List(%v, ..., %v): page %+v", ctx, fl, l) + var all []*alpha.UrlMap + f := func(l *alpha.UrlMapList) error { + klog.V(5).Infof("GCEAlphaUrlMaps.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - klog.V(4).Infof("GCESslCertificates.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEAlphaUrlMaps.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } if klog.V(4) { - klog.V(4).Infof("GCESslCertificates.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + klog.V(4).Infof("GCEAlphaUrlMaps.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - klog.V(5).Infof("GCESslCertificates.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEAlphaUrlMaps.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil } -// Insert SslCertificate with key of value obj. -func (g *GCESslCertificates) Insert(ctx context.Context, key *meta.Key, obj *ga.SslCertificate) error { - klog.V(5).Infof("GCESslCertificates.Insert(%v, %v, %+v): called", ctx, key, obj) +// Insert UrlMap with key of value obj. +func (g *GCEAlphaUrlMaps) Insert(ctx context.Context, key *meta.Key, obj *alpha.UrlMap) error { + klog.V(5).Infof("GCEAlphaUrlMaps.Insert(%v, %v, %+v): called", ctx, key, obj) + if !key.Valid() { + klog.V(2).Infof("GCEAlphaUrlMaps.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "UrlMaps") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Insert", + Version: meta.Version("alpha"), + Service: "UrlMaps", + } + klog.V(5).Infof("GCEAlphaUrlMaps.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + klog.V(4).Infof("GCEAlphaUrlMaps.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + obj.Name = key.Name + call := g.s.Alpha.UrlMaps.Insert(projectID, obj) + call.Context(ctx) + + op, err := call.Do() + if err != nil { + klog.V(4).Infof("GCEAlphaUrlMaps.Insert(%v, %v, ...) = %+v", ctx, key, err) + return err + } + + err = g.s.WaitForCompletion(ctx, op) + klog.V(4).Infof("GCEAlphaUrlMaps.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + return err +} + +// Delete the UrlMap referenced by key. +func (g *GCEAlphaUrlMaps) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCEAlphaUrlMaps.Delete(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCESslCertificates.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaUrlMaps.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "SslCertificates") + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "UrlMaps") rk := &RateLimitKey{ ProjectID: projectID, - Operation: "Insert", - Version: meta.Version("ga"), - Service: "SslCertificates", + Operation: "Delete", + Version: meta.Version("alpha"), + Service: "UrlMaps", } - klog.V(5).Infof("GCESslCertificates.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaUrlMaps.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCESslCertificates.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaUrlMaps.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } - obj.Name = key.Name - call := g.s.GA.SslCertificates.Insert(projectID, obj) + call := g.s.Alpha.UrlMaps.Delete(projectID, key.Name) + call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCESslCertificates.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaUrlMaps.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCESslCertificates.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEAlphaUrlMaps.Delete(%v, %v) = %v", ctx, key, err) return err } -// Delete the SslCertificate referenced by key. -func (g *GCESslCertificates) Delete(ctx context.Context, key *meta.Key) error { - klog.V(5).Infof("GCESslCertificates.Delete(%v, %v): called", ctx, key) +// Update is a method on GCEAlphaUrlMaps. +func (g *GCEAlphaUrlMaps) Update(ctx context.Context, key *meta.Key, arg0 *alpha.UrlMap) error { + klog.V(5).Infof("GCEAlphaUrlMaps.Update(%v, %v, ...): called", ctx, key) + if !key.Valid() { - klog.V(2).Infof("GCESslCertificates.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaUrlMaps.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "SslCertificates") + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "UrlMaps") rk := &RateLimitKey{ ProjectID: projectID, - Operation: "Delete", - Version: meta.Version("ga"), - Service: "SslCertificates", + Operation: "Update", + Version: meta.Version("alpha"), + Service: "UrlMaps", } - klog.V(5).Infof("GCESslCertificates.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaUrlMaps.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCESslCertificates.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaUrlMaps.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.GA.SslCertificates.Delete(projectID, key.Name) - + call := g.s.Alpha.UrlMaps.Update(projectID, key.Name, arg0) call.Context(ctx) - op, err := call.Do() if err != nil { - klog.V(4).Infof("GCESslCertificates.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaUrlMaps.Update(%v, %v, ...) = %+v", ctx, key, err) return err } - err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCESslCertificates.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaUrlMaps.Update(%v, %v, ...) = %+v", ctx, key, err) return err } -// TargetHttpProxies is an interface that allows for mocking of TargetHttpProxies. -type TargetHttpProxies interface { - Get(ctx context.Context, key *meta.Key) (*ga.TargetHttpProxy, error) - List(ctx context.Context, fl *filter.F) ([]*ga.TargetHttpProxy, error) - Insert(ctx context.Context, key *meta.Key, obj *ga.TargetHttpProxy) error +// BetaUrlMaps is an interface that allows for mocking of UrlMaps. +type BetaUrlMaps interface { + Get(ctx context.Context, key *meta.Key) (*beta.UrlMap, error) + List(ctx context.Context, fl *filter.F) ([]*beta.UrlMap, error) + Insert(ctx context.Context, key *meta.Key, obj *beta.UrlMap) error Delete(ctx context.Context, key *meta.Key) error - SetUrlMap(context.Context, *meta.Key, *ga.UrlMapReference) error + Update(context.Context, *meta.Key, *beta.UrlMap) error } -// NewMockTargetHttpProxies returns a new mock for TargetHttpProxies. -func NewMockTargetHttpProxies(pr ProjectRouter, objs map[meta.Key]*MockTargetHttpProxiesObj) *MockTargetHttpProxies { - mock := &MockTargetHttpProxies{ +// NewMockBetaUrlMaps returns a new mock for UrlMaps. +func NewMockBetaUrlMaps(pr ProjectRouter, objs map[meta.Key]*MockUrlMapsObj) *MockBetaUrlMaps { + mock := &MockBetaUrlMaps{ ProjectRouter: pr, Objects: objs, @@ -13226,14 +25752,14 @@ func NewMockTargetHttpProxies(pr ProjectRouter, objs map[meta.Key]*MockTargetHtt return mock } -// MockTargetHttpProxies is the mock for TargetHttpProxies. -type MockTargetHttpProxies struct { +// MockBetaUrlMaps is the mock for UrlMaps. +type MockBetaUrlMaps struct { Lock sync.Mutex ProjectRouter ProjectRouter // Objects maintained by the mock. - Objects map[meta.Key]*MockTargetHttpProxiesObj + Objects map[meta.Key]*MockUrlMapsObj // If an entry exists for the given key and operation, then the error // will be returned instead of the operation. @@ -13246,11 +25772,11 @@ type MockTargetHttpProxies struct { // order to add your own logic. Return (true, _, _) to prevent the normal // execution flow of the mock. Return (false, nil, nil) to continue with // normal mock behavior/ after the hook function executes. - GetHook func(ctx context.Context, key *meta.Key, m *MockTargetHttpProxies) (bool, *ga.TargetHttpProxy, error) - ListHook func(ctx context.Context, fl *filter.F, m *MockTargetHttpProxies) (bool, []*ga.TargetHttpProxy, error) - InsertHook func(ctx context.Context, key *meta.Key, obj *ga.TargetHttpProxy, m *MockTargetHttpProxies) (bool, error) - DeleteHook func(ctx context.Context, key *meta.Key, m *MockTargetHttpProxies) (bool, error) - SetUrlMapHook func(context.Context, *meta.Key, *ga.UrlMapReference, *MockTargetHttpProxies) error + GetHook func(ctx context.Context, key *meta.Key, m *MockBetaUrlMaps) (bool, *beta.UrlMap, error) + ListHook func(ctx context.Context, fl *filter.F, m *MockBetaUrlMaps) (bool, []*beta.UrlMap, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *beta.UrlMap, m *MockBetaUrlMaps) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockBetaUrlMaps) (bool, error) + UpdateHook func(context.Context, *meta.Key, *beta.UrlMap, *MockBetaUrlMaps) error // X is extra state that can be used as part of the mock. Generated code // will not use this field. @@ -13258,10 +25784,10 @@ type MockTargetHttpProxies struct { } // Get returns the object from the mock. -func (m *MockTargetHttpProxies) Get(ctx context.Context, key *meta.Key) (*ga.TargetHttpProxy, error) { +func (m *MockBetaUrlMaps) Get(ctx context.Context, key *meta.Key) (*beta.UrlMap, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - klog.V(5).Infof("MockTargetHttpProxies.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaUrlMaps.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -13273,28 +25799,28 @@ func (m *MockTargetHttpProxies) Get(ctx context.Context, key *meta.Key) (*ga.Tar defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - klog.V(5).Infof("MockTargetHttpProxies.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBetaUrlMaps.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { - typedObj := obj.ToGA() - klog.V(5).Infof("MockTargetHttpProxies.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + typedObj := obj.ToBeta() + klog.V(5).Infof("MockBetaUrlMaps.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockTargetHttpProxies %v not found", key), + Message: fmt.Sprintf("MockBetaUrlMaps %v not found", key), } - klog.V(5).Infof("MockTargetHttpProxies.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBetaUrlMaps.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } // List all of the objects in the mock. -func (m *MockTargetHttpProxies) List(ctx context.Context, fl *filter.F) ([]*ga.TargetHttpProxy, error) { +func (m *MockBetaUrlMaps) List(ctx context.Context, fl *filter.F) ([]*beta.UrlMap, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - klog.V(5).Infof("MockTargetHttpProxies.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockBetaUrlMaps.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -13304,28 +25830,28 @@ func (m *MockTargetHttpProxies) List(ctx context.Context, fl *filter.F) ([]*ga.T if m.ListError != nil { err := *m.ListError - klog.V(5).Infof("MockTargetHttpProxies.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockBetaUrlMaps.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } - var objs []*ga.TargetHttpProxy + var objs []*beta.UrlMap for _, obj := range m.Objects { - if !fl.Match(obj.ToGA()) { + if !fl.Match(obj.ToBeta()) { continue } - objs = append(objs, obj.ToGA()) + objs = append(objs, obj.ToBeta()) } - klog.V(5).Infof("MockTargetHttpProxies.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockBetaUrlMaps.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } // Insert is a mock for inserting/creating a new object. -func (m *MockTargetHttpProxies) Insert(ctx context.Context, key *meta.Key, obj *ga.TargetHttpProxy) error { +func (m *MockBetaUrlMaps) Insert(ctx context.Context, key *meta.Key, obj *beta.UrlMap) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - klog.V(5).Infof("MockTargetHttpProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaUrlMaps.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -13337,32 +25863,32 @@ func (m *MockTargetHttpProxies) Insert(ctx context.Context, key *meta.Key, obj * defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - klog.V(5).Infof("MockTargetHttpProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaUrlMaps.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { err := &googleapi.Error{ Code: http.StatusConflict, - Message: fmt.Sprintf("MockTargetHttpProxies %v exists", key), + Message: fmt.Sprintf("MockBetaUrlMaps %v exists", key), } - klog.V(5).Infof("MockTargetHttpProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaUrlMaps.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } obj.Name = key.Name - projectID := m.ProjectRouter.ProjectID(ctx, "ga", "targetHttpProxies") - obj.SelfLink = SelfLink(meta.VersionGA, projectID, "targetHttpProxies", key) + projectID := m.ProjectRouter.ProjectID(ctx, "beta", "urlMaps") + obj.SelfLink = SelfLink(meta.VersionBeta, projectID, "urlMaps", key) - m.Objects[*key] = &MockTargetHttpProxiesObj{obj} - klog.V(5).Infof("MockTargetHttpProxies.Insert(%v, %v, %+v) = nil", ctx, key, obj) + m.Objects[*key] = &MockUrlMapsObj{obj} + klog.V(5).Infof("MockBetaUrlMaps.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } // Delete is a mock for deleting the object. -func (m *MockTargetHttpProxies) Delete(ctx context.Context, key *meta.Key) error { +func (m *MockBetaUrlMaps) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - klog.V(5).Infof("MockTargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaUrlMaps.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -13374,224 +25900,223 @@ func (m *MockTargetHttpProxies) Delete(ctx context.Context, key *meta.Key) error defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - klog.V(5).Infof("MockTargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaUrlMaps.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockTargetHttpProxies %v not found", key), + Message: fmt.Sprintf("MockBetaUrlMaps %v not found", key), } - klog.V(5).Infof("MockTargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaUrlMaps.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - klog.V(5).Infof("MockTargetHttpProxies.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockBetaUrlMaps.Delete(%v, %v) = nil", ctx, key) return nil } // Obj wraps the object for use in the mock. -func (m *MockTargetHttpProxies) Obj(o *ga.TargetHttpProxy) *MockTargetHttpProxiesObj { - return &MockTargetHttpProxiesObj{o} +func (m *MockBetaUrlMaps) Obj(o *beta.UrlMap) *MockUrlMapsObj { + return &MockUrlMapsObj{o} } -// SetUrlMap is a mock for the corresponding method. -func (m *MockTargetHttpProxies) SetUrlMap(ctx context.Context, key *meta.Key, arg0 *ga.UrlMapReference) error { - if m.SetUrlMapHook != nil { - return m.SetUrlMapHook(ctx, key, arg0, m) +// Update is a mock for the corresponding method. +func (m *MockBetaUrlMaps) Update(ctx context.Context, key *meta.Key, arg0 *beta.UrlMap) error { + if m.UpdateHook != nil { + return m.UpdateHook(ctx, key, arg0, m) } return nil } -// GCETargetHttpProxies is a simplifying adapter for the GCE TargetHttpProxies. -type GCETargetHttpProxies struct { +// GCEBetaUrlMaps is a simplifying adapter for the GCE UrlMaps. +type GCEBetaUrlMaps struct { s *Service } -// Get the TargetHttpProxy named by key. -func (g *GCETargetHttpProxies) Get(ctx context.Context, key *meta.Key) (*ga.TargetHttpProxy, error) { - klog.V(5).Infof("GCETargetHttpProxies.Get(%v, %v): called", ctx, key) +// Get the UrlMap named by key. +func (g *GCEBetaUrlMaps) Get(ctx context.Context, key *meta.Key) (*beta.UrlMap, error) { + klog.V(5).Infof("GCEBetaUrlMaps.Get(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCETargetHttpProxies.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaUrlMaps.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpProxies") + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "UrlMaps") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Get", - Version: meta.Version("ga"), - Service: "TargetHttpProxies", + Version: meta.Version("beta"), + Service: "UrlMaps", } - klog.V(5).Infof("GCETargetHttpProxies.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaUrlMaps.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCETargetHttpProxies.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaUrlMaps.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } - call := g.s.GA.TargetHttpProxies.Get(projectID, key.Name) + call := g.s.Beta.UrlMaps.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - klog.V(4).Infof("GCETargetHttpProxies.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEBetaUrlMaps.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } -// List all TargetHttpProxy objects. -func (g *GCETargetHttpProxies) List(ctx context.Context, fl *filter.F) ([]*ga.TargetHttpProxy, error) { - klog.V(5).Infof("GCETargetHttpProxies.List(%v, %v) called", ctx, fl) - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpProxies") +// List all UrlMap objects. +func (g *GCEBetaUrlMaps) List(ctx context.Context, fl *filter.F) ([]*beta.UrlMap, error) { + klog.V(5).Infof("GCEBetaUrlMaps.List(%v, %v) called", ctx, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "UrlMaps") rk := &RateLimitKey{ ProjectID: projectID, Operation: "List", - Version: meta.Version("ga"), - Service: "TargetHttpProxies", + Version: meta.Version("beta"), + Service: "UrlMaps", } if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - klog.V(5).Infof("GCETargetHttpProxies.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) - call := g.s.GA.TargetHttpProxies.List(projectID) + klog.V(5).Infof("GCEBetaUrlMaps.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + call := g.s.Beta.UrlMaps.List(projectID) if fl != filter.None { call.Filter(fl.String()) } - var all []*ga.TargetHttpProxy - f := func(l *ga.TargetHttpProxyList) error { - klog.V(5).Infof("GCETargetHttpProxies.List(%v, ..., %v): page %+v", ctx, fl, l) + var all []*beta.UrlMap + f := func(l *beta.UrlMapList) error { + klog.V(5).Infof("GCEBetaUrlMaps.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - klog.V(4).Infof("GCETargetHttpProxies.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEBetaUrlMaps.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } if klog.V(4) { - klog.V(4).Infof("GCETargetHttpProxies.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + klog.V(4).Infof("GCEBetaUrlMaps.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - klog.V(5).Infof("GCETargetHttpProxies.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEBetaUrlMaps.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil } -// Insert TargetHttpProxy with key of value obj. -func (g *GCETargetHttpProxies) Insert(ctx context.Context, key *meta.Key, obj *ga.TargetHttpProxy) error { - klog.V(5).Infof("GCETargetHttpProxies.Insert(%v, %v, %+v): called", ctx, key, obj) +// Insert UrlMap with key of value obj. +func (g *GCEBetaUrlMaps) Insert(ctx context.Context, key *meta.Key, obj *beta.UrlMap) error { + klog.V(5).Infof("GCEBetaUrlMaps.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - klog.V(2).Infof("GCETargetHttpProxies.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaUrlMaps.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpProxies") + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "UrlMaps") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Insert", - Version: meta.Version("ga"), - Service: "TargetHttpProxies", + Version: meta.Version("beta"), + Service: "UrlMaps", } - klog.V(5).Infof("GCETargetHttpProxies.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaUrlMaps.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCETargetHttpProxies.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaUrlMaps.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name - call := g.s.GA.TargetHttpProxies.Insert(projectID, obj) + call := g.s.Beta.UrlMaps.Insert(projectID, obj) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCETargetHttpProxies.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaUrlMaps.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCETargetHttpProxies.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEBetaUrlMaps.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } -// Delete the TargetHttpProxy referenced by key. -func (g *GCETargetHttpProxies) Delete(ctx context.Context, key *meta.Key) error { - klog.V(5).Infof("GCETargetHttpProxies.Delete(%v, %v): called", ctx, key) +// Delete the UrlMap referenced by key. +func (g *GCEBetaUrlMaps) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCEBetaUrlMaps.Delete(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCETargetHttpProxies.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaUrlMaps.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpProxies") + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "UrlMaps") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Delete", - Version: meta.Version("ga"), - Service: "TargetHttpProxies", + Version: meta.Version("beta"), + Service: "UrlMaps", } - klog.V(5).Infof("GCETargetHttpProxies.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaUrlMaps.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCETargetHttpProxies.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaUrlMaps.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.GA.TargetHttpProxies.Delete(projectID, key.Name) + call := g.s.Beta.UrlMaps.Delete(projectID, key.Name) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCETargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBetaUrlMaps.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCETargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBetaUrlMaps.Delete(%v, %v) = %v", ctx, key, err) return err } -// SetUrlMap is a method on GCETargetHttpProxies. -func (g *GCETargetHttpProxies) SetUrlMap(ctx context.Context, key *meta.Key, arg0 *ga.UrlMapReference) error { - klog.V(5).Infof("GCETargetHttpProxies.SetUrlMap(%v, %v, ...): called", ctx, key) +// Update is a method on GCEBetaUrlMaps. +func (g *GCEBetaUrlMaps) Update(ctx context.Context, key *meta.Key, arg0 *beta.UrlMap) error { + klog.V(5).Infof("GCEBetaUrlMaps.Update(%v, %v, ...): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCETargetHttpProxies.SetUrlMap(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaUrlMaps.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpProxies") + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "UrlMaps") rk := &RateLimitKey{ ProjectID: projectID, - Operation: "SetUrlMap", - Version: meta.Version("ga"), - Service: "TargetHttpProxies", + Operation: "Update", + Version: meta.Version("beta"), + Service: "UrlMaps", } - klog.V(5).Infof("GCETargetHttpProxies.SetUrlMap(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaUrlMaps.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCETargetHttpProxies.SetUrlMap(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaUrlMaps.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.GA.TargetHttpProxies.SetUrlMap(projectID, key.Name, arg0) + call := g.s.Beta.UrlMaps.Update(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCETargetHttpProxies.SetUrlMap(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaUrlMaps.Update(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCETargetHttpProxies.SetUrlMap(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaUrlMaps.Update(%v, %v, ...) = %+v", ctx, key, err) return err } -// TargetHttpsProxies is an interface that allows for mocking of TargetHttpsProxies. -type TargetHttpsProxies interface { - Get(ctx context.Context, key *meta.Key) (*ga.TargetHttpsProxy, error) - List(ctx context.Context, fl *filter.F) ([]*ga.TargetHttpsProxy, error) - Insert(ctx context.Context, key *meta.Key, obj *ga.TargetHttpsProxy) error +// UrlMaps is an interface that allows for mocking of UrlMaps. +type UrlMaps interface { + Get(ctx context.Context, key *meta.Key) (*ga.UrlMap, error) + List(ctx context.Context, fl *filter.F) ([]*ga.UrlMap, error) + Insert(ctx context.Context, key *meta.Key, obj *ga.UrlMap) error Delete(ctx context.Context, key *meta.Key) error - SetSslCertificates(context.Context, *meta.Key, *ga.TargetHttpsProxiesSetSslCertificatesRequest) error - SetUrlMap(context.Context, *meta.Key, *ga.UrlMapReference) error + Update(context.Context, *meta.Key, *ga.UrlMap) error } -// NewMockTargetHttpsProxies returns a new mock for TargetHttpsProxies. -func NewMockTargetHttpsProxies(pr ProjectRouter, objs map[meta.Key]*MockTargetHttpsProxiesObj) *MockTargetHttpsProxies { - mock := &MockTargetHttpsProxies{ +// NewMockUrlMaps returns a new mock for UrlMaps. +func NewMockUrlMaps(pr ProjectRouter, objs map[meta.Key]*MockUrlMapsObj) *MockUrlMaps { + mock := &MockUrlMaps{ ProjectRouter: pr, Objects: objs, @@ -13602,14 +26127,14 @@ func NewMockTargetHttpsProxies(pr ProjectRouter, objs map[meta.Key]*MockTargetHt return mock } -// MockTargetHttpsProxies is the mock for TargetHttpsProxies. -type MockTargetHttpsProxies struct { +// MockUrlMaps is the mock for UrlMaps. +type MockUrlMaps struct { Lock sync.Mutex ProjectRouter ProjectRouter // Objects maintained by the mock. - Objects map[meta.Key]*MockTargetHttpsProxiesObj + Objects map[meta.Key]*MockUrlMapsObj // If an entry exists for the given key and operation, then the error // will be returned instead of the operation. @@ -13622,12 +26147,11 @@ type MockTargetHttpsProxies struct { // order to add your own logic. Return (true, _, _) to prevent the normal // execution flow of the mock. Return (false, nil, nil) to continue with // normal mock behavior/ after the hook function executes. - GetHook func(ctx context.Context, key *meta.Key, m *MockTargetHttpsProxies) (bool, *ga.TargetHttpsProxy, error) - ListHook func(ctx context.Context, fl *filter.F, m *MockTargetHttpsProxies) (bool, []*ga.TargetHttpsProxy, error) - InsertHook func(ctx context.Context, key *meta.Key, obj *ga.TargetHttpsProxy, m *MockTargetHttpsProxies) (bool, error) - DeleteHook func(ctx context.Context, key *meta.Key, m *MockTargetHttpsProxies) (bool, error) - SetSslCertificatesHook func(context.Context, *meta.Key, *ga.TargetHttpsProxiesSetSslCertificatesRequest, *MockTargetHttpsProxies) error - SetUrlMapHook func(context.Context, *meta.Key, *ga.UrlMapReference, *MockTargetHttpsProxies) error + GetHook func(ctx context.Context, key *meta.Key, m *MockUrlMaps) (bool, *ga.UrlMap, error) + ListHook func(ctx context.Context, fl *filter.F, m *MockUrlMaps) (bool, []*ga.UrlMap, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *ga.UrlMap, m *MockUrlMaps) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockUrlMaps) (bool, error) + UpdateHook func(context.Context, *meta.Key, *ga.UrlMap, *MockUrlMaps) error // X is extra state that can be used as part of the mock. Generated code // will not use this field. @@ -13635,10 +26159,10 @@ type MockTargetHttpsProxies struct { } // Get returns the object from the mock. -func (m *MockTargetHttpsProxies) Get(ctx context.Context, key *meta.Key) (*ga.TargetHttpsProxy, error) { +func (m *MockUrlMaps) Get(ctx context.Context, key *meta.Key) (*ga.UrlMap, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - klog.V(5).Infof("MockTargetHttpsProxies.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockUrlMaps.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -13650,28 +26174,28 @@ func (m *MockTargetHttpsProxies) Get(ctx context.Context, key *meta.Key) (*ga.Ta defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - klog.V(5).Infof("MockTargetHttpsProxies.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockUrlMaps.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - klog.V(5).Infof("MockTargetHttpsProxies.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockUrlMaps.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockTargetHttpsProxies %v not found", key), + Message: fmt.Sprintf("MockUrlMaps %v not found", key), } - klog.V(5).Infof("MockTargetHttpsProxies.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockUrlMaps.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } // List all of the objects in the mock. -func (m *MockTargetHttpsProxies) List(ctx context.Context, fl *filter.F) ([]*ga.TargetHttpsProxy, error) { +func (m *MockUrlMaps) List(ctx context.Context, fl *filter.F) ([]*ga.UrlMap, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - klog.V(5).Infof("MockTargetHttpsProxies.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockUrlMaps.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -13681,12 +26205,12 @@ func (m *MockTargetHttpsProxies) List(ctx context.Context, fl *filter.F) ([]*ga. if m.ListError != nil { err := *m.ListError - klog.V(5).Infof("MockTargetHttpsProxies.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockUrlMaps.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } - var objs []*ga.TargetHttpsProxy + var objs []*ga.UrlMap for _, obj := range m.Objects { if !fl.Match(obj.ToGA()) { continue @@ -13694,15 +26218,15 @@ func (m *MockTargetHttpsProxies) List(ctx context.Context, fl *filter.F) ([]*ga. objs = append(objs, obj.ToGA()) } - klog.V(5).Infof("MockTargetHttpsProxies.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockUrlMaps.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } // Insert is a mock for inserting/creating a new object. -func (m *MockTargetHttpsProxies) Insert(ctx context.Context, key *meta.Key, obj *ga.TargetHttpsProxy) error { +func (m *MockUrlMaps) Insert(ctx context.Context, key *meta.Key, obj *ga.UrlMap) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - klog.V(5).Infof("MockTargetHttpsProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockUrlMaps.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -13714,32 +26238,32 @@ func (m *MockTargetHttpsProxies) Insert(ctx context.Context, key *meta.Key, obj defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - klog.V(5).Infof("MockTargetHttpsProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockUrlMaps.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { err := &googleapi.Error{ Code: http.StatusConflict, - Message: fmt.Sprintf("MockTargetHttpsProxies %v exists", key), + Message: fmt.Sprintf("MockUrlMaps %v exists", key), } - klog.V(5).Infof("MockTargetHttpsProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockUrlMaps.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } obj.Name = key.Name - projectID := m.ProjectRouter.ProjectID(ctx, "ga", "targetHttpsProxies") - obj.SelfLink = SelfLink(meta.VersionGA, projectID, "targetHttpsProxies", key) + projectID := m.ProjectRouter.ProjectID(ctx, "ga", "urlMaps") + obj.SelfLink = SelfLink(meta.VersionGA, projectID, "urlMaps", key) - m.Objects[*key] = &MockTargetHttpsProxiesObj{obj} - klog.V(5).Infof("MockTargetHttpsProxies.Insert(%v, %v, %+v) = nil", ctx, key, obj) + m.Objects[*key] = &MockUrlMapsObj{obj} + klog.V(5).Infof("MockUrlMaps.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } // Delete is a mock for deleting the object. -func (m *MockTargetHttpsProxies) Delete(ctx context.Context, key *meta.Key) error { +func (m *MockUrlMaps) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - klog.V(5).Infof("MockTargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockUrlMaps.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -13751,265 +26275,223 @@ func (m *MockTargetHttpsProxies) Delete(ctx context.Context, key *meta.Key) erro defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - klog.V(5).Infof("MockTargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockUrlMaps.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockTargetHttpsProxies %v not found", key), + Message: fmt.Sprintf("MockUrlMaps %v not found", key), } - klog.V(5).Infof("MockTargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockUrlMaps.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - klog.V(5).Infof("MockTargetHttpsProxies.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockUrlMaps.Delete(%v, %v) = nil", ctx, key) return nil } // Obj wraps the object for use in the mock. -func (m *MockTargetHttpsProxies) Obj(o *ga.TargetHttpsProxy) *MockTargetHttpsProxiesObj { - return &MockTargetHttpsProxiesObj{o} -} - -// SetSslCertificates is a mock for the corresponding method. -func (m *MockTargetHttpsProxies) SetSslCertificates(ctx context.Context, key *meta.Key, arg0 *ga.TargetHttpsProxiesSetSslCertificatesRequest) error { - if m.SetSslCertificatesHook != nil { - return m.SetSslCertificatesHook(ctx, key, arg0, m) - } - return nil +func (m *MockUrlMaps) Obj(o *ga.UrlMap) *MockUrlMapsObj { + return &MockUrlMapsObj{o} } -// SetUrlMap is a mock for the corresponding method. -func (m *MockTargetHttpsProxies) SetUrlMap(ctx context.Context, key *meta.Key, arg0 *ga.UrlMapReference) error { - if m.SetUrlMapHook != nil { - return m.SetUrlMapHook(ctx, key, arg0, m) +// Update is a mock for the corresponding method. +func (m *MockUrlMaps) Update(ctx context.Context, key *meta.Key, arg0 *ga.UrlMap) error { + if m.UpdateHook != nil { + return m.UpdateHook(ctx, key, arg0, m) } return nil } -// GCETargetHttpsProxies is a simplifying adapter for the GCE TargetHttpsProxies. -type GCETargetHttpsProxies struct { +// GCEUrlMaps is a simplifying adapter for the GCE UrlMaps. +type GCEUrlMaps struct { s *Service } -// Get the TargetHttpsProxy named by key. -func (g *GCETargetHttpsProxies) Get(ctx context.Context, key *meta.Key) (*ga.TargetHttpsProxy, error) { - klog.V(5).Infof("GCETargetHttpsProxies.Get(%v, %v): called", ctx, key) +// Get the UrlMap named by key. +func (g *GCEUrlMaps) Get(ctx context.Context, key *meta.Key) (*ga.UrlMap, error) { + klog.V(5).Infof("GCEUrlMaps.Get(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCETargetHttpsProxies.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEUrlMaps.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpsProxies") + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "UrlMaps") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Get", Version: meta.Version("ga"), - Service: "TargetHttpsProxies", + Service: "UrlMaps", } - klog.V(5).Infof("GCETargetHttpsProxies.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEUrlMaps.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCETargetHttpsProxies.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEUrlMaps.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } - call := g.s.GA.TargetHttpsProxies.Get(projectID, key.Name) + call := g.s.GA.UrlMaps.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - klog.V(4).Infof("GCETargetHttpsProxies.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEUrlMaps.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } -// List all TargetHttpsProxy objects. -func (g *GCETargetHttpsProxies) List(ctx context.Context, fl *filter.F) ([]*ga.TargetHttpsProxy, error) { - klog.V(5).Infof("GCETargetHttpsProxies.List(%v, %v) called", ctx, fl) - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpsProxies") +// List all UrlMap objects. +func (g *GCEUrlMaps) List(ctx context.Context, fl *filter.F) ([]*ga.UrlMap, error) { + klog.V(5).Infof("GCEUrlMaps.List(%v, %v) called", ctx, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "UrlMaps") rk := &RateLimitKey{ ProjectID: projectID, Operation: "List", Version: meta.Version("ga"), - Service: "TargetHttpsProxies", + Service: "UrlMaps", } if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - klog.V(5).Infof("GCETargetHttpsProxies.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) - call := g.s.GA.TargetHttpsProxies.List(projectID) + klog.V(5).Infof("GCEUrlMaps.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + call := g.s.GA.UrlMaps.List(projectID) if fl != filter.None { call.Filter(fl.String()) } - var all []*ga.TargetHttpsProxy - f := func(l *ga.TargetHttpsProxyList) error { - klog.V(5).Infof("GCETargetHttpsProxies.List(%v, ..., %v): page %+v", ctx, fl, l) + var all []*ga.UrlMap + f := func(l *ga.UrlMapList) error { + klog.V(5).Infof("GCEUrlMaps.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - klog.V(4).Infof("GCETargetHttpsProxies.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEUrlMaps.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } if klog.V(4) { - klog.V(4).Infof("GCETargetHttpsProxies.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + klog.V(4).Infof("GCEUrlMaps.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - klog.V(5).Infof("GCETargetHttpsProxies.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEUrlMaps.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil } -// Insert TargetHttpsProxy with key of value obj. -func (g *GCETargetHttpsProxies) Insert(ctx context.Context, key *meta.Key, obj *ga.TargetHttpsProxy) error { - klog.V(5).Infof("GCETargetHttpsProxies.Insert(%v, %v, %+v): called", ctx, key, obj) +// Insert UrlMap with key of value obj. +func (g *GCEUrlMaps) Insert(ctx context.Context, key *meta.Key, obj *ga.UrlMap) error { + klog.V(5).Infof("GCEUrlMaps.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - klog.V(2).Infof("GCETargetHttpsProxies.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEUrlMaps.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpsProxies") + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "UrlMaps") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Insert", Version: meta.Version("ga"), - Service: "TargetHttpsProxies", + Service: "UrlMaps", } - klog.V(5).Infof("GCETargetHttpsProxies.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEUrlMaps.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCETargetHttpsProxies.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEUrlMaps.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name - call := g.s.GA.TargetHttpsProxies.Insert(projectID, obj) + call := g.s.GA.UrlMaps.Insert(projectID, obj) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCETargetHttpsProxies.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEUrlMaps.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCETargetHttpsProxies.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEUrlMaps.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } -// Delete the TargetHttpsProxy referenced by key. -func (g *GCETargetHttpsProxies) Delete(ctx context.Context, key *meta.Key) error { - klog.V(5).Infof("GCETargetHttpsProxies.Delete(%v, %v): called", ctx, key) +// Delete the UrlMap referenced by key. +func (g *GCEUrlMaps) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCEUrlMaps.Delete(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCETargetHttpsProxies.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEUrlMaps.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpsProxies") + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "UrlMaps") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Delete", Version: meta.Version("ga"), - Service: "TargetHttpsProxies", + Service: "UrlMaps", } - klog.V(5).Infof("GCETargetHttpsProxies.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEUrlMaps.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCETargetHttpsProxies.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEUrlMaps.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.GA.TargetHttpsProxies.Delete(projectID, key.Name) + call := g.s.GA.UrlMaps.Delete(projectID, key.Name) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCETargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEUrlMaps.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCETargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) - return err -} - -// SetSslCertificates is a method on GCETargetHttpsProxies. -func (g *GCETargetHttpsProxies) SetSslCertificates(ctx context.Context, key *meta.Key, arg0 *ga.TargetHttpsProxiesSetSslCertificatesRequest) error { - klog.V(5).Infof("GCETargetHttpsProxies.SetSslCertificates(%v, %v, ...): called", ctx, key) - - if !key.Valid() { - klog.V(2).Infof("GCETargetHttpsProxies.SetSslCertificates(%v, %v, ...): key is invalid (%#v)", ctx, key, key) - return fmt.Errorf("invalid GCE key (%+v)", key) - } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpsProxies") - rk := &RateLimitKey{ - ProjectID: projectID, - Operation: "SetSslCertificates", - Version: meta.Version("ga"), - Service: "TargetHttpsProxies", - } - klog.V(5).Infof("GCETargetHttpsProxies.SetSslCertificates(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) - - if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCETargetHttpsProxies.SetSslCertificates(%v, %v, ...): RateLimiter error: %v", ctx, key, err) - return err - } - call := g.s.GA.TargetHttpsProxies.SetSslCertificates(projectID, key.Name, arg0) - call.Context(ctx) - op, err := call.Do() - if err != nil { - klog.V(4).Infof("GCETargetHttpsProxies.SetSslCertificates(%v, %v, ...) = %+v", ctx, key, err) - return err - } - err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCETargetHttpsProxies.SetSslCertificates(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEUrlMaps.Delete(%v, %v) = %v", ctx, key, err) return err } -// SetUrlMap is a method on GCETargetHttpsProxies. -func (g *GCETargetHttpsProxies) SetUrlMap(ctx context.Context, key *meta.Key, arg0 *ga.UrlMapReference) error { - klog.V(5).Infof("GCETargetHttpsProxies.SetUrlMap(%v, %v, ...): called", ctx, key) +// Update is a method on GCEUrlMaps. +func (g *GCEUrlMaps) Update(ctx context.Context, key *meta.Key, arg0 *ga.UrlMap) error { + klog.V(5).Infof("GCEUrlMaps.Update(%v, %v, ...): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCETargetHttpsProxies.SetUrlMap(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEUrlMaps.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpsProxies") + projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "UrlMaps") rk := &RateLimitKey{ ProjectID: projectID, - Operation: "SetUrlMap", + Operation: "Update", Version: meta.Version("ga"), - Service: "TargetHttpsProxies", + Service: "UrlMaps", } - klog.V(5).Infof("GCETargetHttpsProxies.SetUrlMap(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEUrlMaps.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCETargetHttpsProxies.SetUrlMap(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEUrlMaps.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.GA.TargetHttpsProxies.SetUrlMap(projectID, key.Name, arg0) + call := g.s.GA.UrlMaps.Update(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCETargetHttpsProxies.SetUrlMap(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEUrlMaps.Update(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCETargetHttpsProxies.SetUrlMap(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEUrlMaps.Update(%v, %v, ...) = %+v", ctx, key, err) return err } -// TargetPools is an interface that allows for mocking of TargetPools. -type TargetPools interface { - Get(ctx context.Context, key *meta.Key) (*ga.TargetPool, error) - List(ctx context.Context, region string, fl *filter.F) ([]*ga.TargetPool, error) - Insert(ctx context.Context, key *meta.Key, obj *ga.TargetPool) error +// AlphaRegionUrlMaps is an interface that allows for mocking of RegionUrlMaps. +type AlphaRegionUrlMaps interface { + Get(ctx context.Context, key *meta.Key) (*alpha.UrlMap, error) + List(ctx context.Context, region string, fl *filter.F) ([]*alpha.UrlMap, error) + Insert(ctx context.Context, key *meta.Key, obj *alpha.UrlMap) error Delete(ctx context.Context, key *meta.Key) error - AddInstance(context.Context, *meta.Key, *ga.TargetPoolsAddInstanceRequest) error - RemoveInstance(context.Context, *meta.Key, *ga.TargetPoolsRemoveInstanceRequest) error + Update(context.Context, *meta.Key, *alpha.UrlMap) error } -// NewMockTargetPools returns a new mock for TargetPools. -func NewMockTargetPools(pr ProjectRouter, objs map[meta.Key]*MockTargetPoolsObj) *MockTargetPools { - mock := &MockTargetPools{ +// NewMockAlphaRegionUrlMaps returns a new mock for RegionUrlMaps. +func NewMockAlphaRegionUrlMaps(pr ProjectRouter, objs map[meta.Key]*MockRegionUrlMapsObj) *MockAlphaRegionUrlMaps { + mock := &MockAlphaRegionUrlMaps{ ProjectRouter: pr, Objects: objs, @@ -14020,14 +26502,14 @@ func NewMockTargetPools(pr ProjectRouter, objs map[meta.Key]*MockTargetPoolsObj) return mock } -// MockTargetPools is the mock for TargetPools. -type MockTargetPools struct { +// MockAlphaRegionUrlMaps is the mock for RegionUrlMaps. +type MockAlphaRegionUrlMaps struct { Lock sync.Mutex ProjectRouter ProjectRouter // Objects maintained by the mock. - Objects map[meta.Key]*MockTargetPoolsObj + Objects map[meta.Key]*MockRegionUrlMapsObj // If an entry exists for the given key and operation, then the error // will be returned instead of the operation. @@ -14040,12 +26522,11 @@ type MockTargetPools struct { // order to add your own logic. Return (true, _, _) to prevent the normal // execution flow of the mock. Return (false, nil, nil) to continue with // normal mock behavior/ after the hook function executes. - GetHook func(ctx context.Context, key *meta.Key, m *MockTargetPools) (bool, *ga.TargetPool, error) - ListHook func(ctx context.Context, region string, fl *filter.F, m *MockTargetPools) (bool, []*ga.TargetPool, error) - InsertHook func(ctx context.Context, key *meta.Key, obj *ga.TargetPool, m *MockTargetPools) (bool, error) - DeleteHook func(ctx context.Context, key *meta.Key, m *MockTargetPools) (bool, error) - AddInstanceHook func(context.Context, *meta.Key, *ga.TargetPoolsAddInstanceRequest, *MockTargetPools) error - RemoveInstanceHook func(context.Context, *meta.Key, *ga.TargetPoolsRemoveInstanceRequest, *MockTargetPools) error + GetHook func(ctx context.Context, key *meta.Key, m *MockAlphaRegionUrlMaps) (bool, *alpha.UrlMap, error) + ListHook func(ctx context.Context, region string, fl *filter.F, m *MockAlphaRegionUrlMaps) (bool, []*alpha.UrlMap, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *alpha.UrlMap, m *MockAlphaRegionUrlMaps) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockAlphaRegionUrlMaps) (bool, error) + UpdateHook func(context.Context, *meta.Key, *alpha.UrlMap, *MockAlphaRegionUrlMaps) error // X is extra state that can be used as part of the mock. Generated code // will not use this field. @@ -14053,10 +26534,10 @@ type MockTargetPools struct { } // Get returns the object from the mock. -func (m *MockTargetPools) Get(ctx context.Context, key *meta.Key) (*ga.TargetPool, error) { +func (m *MockAlphaRegionUrlMaps) Get(ctx context.Context, key *meta.Key) (*alpha.UrlMap, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - klog.V(5).Infof("MockTargetPools.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaRegionUrlMaps.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -14068,28 +26549,28 @@ func (m *MockTargetPools) Get(ctx context.Context, key *meta.Key) (*ga.TargetPoo defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - klog.V(5).Infof("MockTargetPools.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaRegionUrlMaps.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { - typedObj := obj.ToGA() - klog.V(5).Infof("MockTargetPools.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + typedObj := obj.ToAlpha() + klog.V(5).Infof("MockAlphaRegionUrlMaps.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockTargetPools %v not found", key), + Message: fmt.Sprintf("MockAlphaRegionUrlMaps %v not found", key), } - klog.V(5).Infof("MockTargetPools.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaRegionUrlMaps.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } // List all of the objects in the mock in the given region. -func (m *MockTargetPools) List(ctx context.Context, region string, fl *filter.F) ([]*ga.TargetPool, error) { +func (m *MockAlphaRegionUrlMaps) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.UrlMap, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, region, fl, m); intercept { - klog.V(5).Infof("MockTargetPools.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + klog.V(5).Infof("MockAlphaRegionUrlMaps.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) return objs, err } } @@ -14099,31 +26580,31 @@ func (m *MockTargetPools) List(ctx context.Context, region string, fl *filter.F) if m.ListError != nil { err := *m.ListError - klog.V(5).Infof("MockTargetPools.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + klog.V(5).Infof("MockAlphaRegionUrlMaps.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) return nil, *m.ListError } - var objs []*ga.TargetPool + var objs []*alpha.UrlMap for key, obj := range m.Objects { if key.Region != region { continue } - if !fl.Match(obj.ToGA()) { + if !fl.Match(obj.ToAlpha()) { continue } - objs = append(objs, obj.ToGA()) + objs = append(objs, obj.ToAlpha()) } - klog.V(5).Infof("MockTargetPools.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + klog.V(5).Infof("MockAlphaRegionUrlMaps.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) return objs, nil } // Insert is a mock for inserting/creating a new object. -func (m *MockTargetPools) Insert(ctx context.Context, key *meta.Key, obj *ga.TargetPool) error { +func (m *MockAlphaRegionUrlMaps) Insert(ctx context.Context, key *meta.Key, obj *alpha.UrlMap) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - klog.V(5).Infof("MockTargetPools.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaRegionUrlMaps.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -14135,32 +26616,32 @@ func (m *MockTargetPools) Insert(ctx context.Context, key *meta.Key, obj *ga.Tar defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - klog.V(5).Infof("MockTargetPools.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaRegionUrlMaps.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { err := &googleapi.Error{ Code: http.StatusConflict, - Message: fmt.Sprintf("MockTargetPools %v exists", key), + Message: fmt.Sprintf("MockAlphaRegionUrlMaps %v exists", key), } - klog.V(5).Infof("MockTargetPools.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaRegionUrlMaps.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } obj.Name = key.Name - projectID := m.ProjectRouter.ProjectID(ctx, "ga", "targetPools") - obj.SelfLink = SelfLink(meta.VersionGA, projectID, "targetPools", key) + projectID := m.ProjectRouter.ProjectID(ctx, "alpha", "urlMaps") + obj.SelfLink = SelfLink(meta.VersionAlpha, projectID, "urlMaps", key) - m.Objects[*key] = &MockTargetPoolsObj{obj} - klog.V(5).Infof("MockTargetPools.Insert(%v, %v, %+v) = nil", ctx, key, obj) + m.Objects[*key] = &MockRegionUrlMapsObj{obj} + klog.V(5).Infof("MockAlphaRegionUrlMaps.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } // Delete is a mock for deleting the object. -func (m *MockTargetPools) Delete(ctx context.Context, key *meta.Key) error { +func (m *MockAlphaRegionUrlMaps) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - klog.V(5).Infof("MockTargetPools.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaRegionUrlMaps.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -14172,263 +26653,222 @@ func (m *MockTargetPools) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - klog.V(5).Infof("MockTargetPools.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaRegionUrlMaps.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockTargetPools %v not found", key), + Message: fmt.Sprintf("MockAlphaRegionUrlMaps %v not found", key), } - klog.V(5).Infof("MockTargetPools.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaRegionUrlMaps.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - klog.V(5).Infof("MockTargetPools.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockAlphaRegionUrlMaps.Delete(%v, %v) = nil", ctx, key) return nil } // Obj wraps the object for use in the mock. -func (m *MockTargetPools) Obj(o *ga.TargetPool) *MockTargetPoolsObj { - return &MockTargetPoolsObj{o} -} - -// AddInstance is a mock for the corresponding method. -func (m *MockTargetPools) AddInstance(ctx context.Context, key *meta.Key, arg0 *ga.TargetPoolsAddInstanceRequest) error { - if m.AddInstanceHook != nil { - return m.AddInstanceHook(ctx, key, arg0, m) - } - return nil +func (m *MockAlphaRegionUrlMaps) Obj(o *alpha.UrlMap) *MockRegionUrlMapsObj { + return &MockRegionUrlMapsObj{o} } -// RemoveInstance is a mock for the corresponding method. -func (m *MockTargetPools) RemoveInstance(ctx context.Context, key *meta.Key, arg0 *ga.TargetPoolsRemoveInstanceRequest) error { - if m.RemoveInstanceHook != nil { - return m.RemoveInstanceHook(ctx, key, arg0, m) +// Update is a mock for the corresponding method. +func (m *MockAlphaRegionUrlMaps) Update(ctx context.Context, key *meta.Key, arg0 *alpha.UrlMap) error { + if m.UpdateHook != nil { + return m.UpdateHook(ctx, key, arg0, m) } return nil } -// GCETargetPools is a simplifying adapter for the GCE TargetPools. -type GCETargetPools struct { +// GCEAlphaRegionUrlMaps is a simplifying adapter for the GCE RegionUrlMaps. +type GCEAlphaRegionUrlMaps struct { s *Service -} - -// Get the TargetPool named by key. -func (g *GCETargetPools) Get(ctx context.Context, key *meta.Key) (*ga.TargetPool, error) { - klog.V(5).Infof("GCETargetPools.Get(%v, %v): called", ctx, key) +} + +// Get the UrlMap named by key. +func (g *GCEAlphaRegionUrlMaps) Get(ctx context.Context, key *meta.Key) (*alpha.UrlMap, error) { + klog.V(5).Infof("GCEAlphaRegionUrlMaps.Get(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCETargetPools.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaRegionUrlMaps.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetPools") + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionUrlMaps") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Get", - Version: meta.Version("ga"), - Service: "TargetPools", + Version: meta.Version("alpha"), + Service: "RegionUrlMaps", } - klog.V(5).Infof("GCETargetPools.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaRegionUrlMaps.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCETargetPools.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionUrlMaps.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } - call := g.s.GA.TargetPools.Get(projectID, key.Region, key.Name) + call := g.s.Alpha.RegionUrlMaps.Get(projectID, key.Region, key.Name) call.Context(ctx) v, err := call.Do() - klog.V(4).Infof("GCETargetPools.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEAlphaRegionUrlMaps.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } -// List all TargetPool objects. -func (g *GCETargetPools) List(ctx context.Context, region string, fl *filter.F) ([]*ga.TargetPool, error) { - klog.V(5).Infof("GCETargetPools.List(%v, %v, %v) called", ctx, region, fl) - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetPools") +// List all UrlMap objects. +func (g *GCEAlphaRegionUrlMaps) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.UrlMap, error) { + klog.V(5).Infof("GCEAlphaRegionUrlMaps.List(%v, %v, %v) called", ctx, region, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionUrlMaps") rk := &RateLimitKey{ ProjectID: projectID, Operation: "List", - Version: meta.Version("ga"), - Service: "TargetPools", + Version: meta.Version("alpha"), + Service: "RegionUrlMaps", } if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - klog.V(5).Infof("GCETargetPools.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) - call := g.s.GA.TargetPools.List(projectID, region) + klog.V(5).Infof("GCEAlphaRegionUrlMaps.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) + call := g.s.Alpha.RegionUrlMaps.List(projectID, region) if fl != filter.None { call.Filter(fl.String()) } - var all []*ga.TargetPool - f := func(l *ga.TargetPoolList) error { - klog.V(5).Infof("GCETargetPools.List(%v, ..., %v): page %+v", ctx, fl, l) + var all []*alpha.UrlMap + f := func(l *alpha.UrlMapList) error { + klog.V(5).Infof("GCEAlphaRegionUrlMaps.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - klog.V(4).Infof("GCETargetPools.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEAlphaRegionUrlMaps.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } if klog.V(4) { - klog.V(4).Infof("GCETargetPools.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + klog.V(4).Infof("GCEAlphaRegionUrlMaps.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - klog.V(5).Infof("GCETargetPools.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEAlphaRegionUrlMaps.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil } -// Insert TargetPool with key of value obj. -func (g *GCETargetPools) Insert(ctx context.Context, key *meta.Key, obj *ga.TargetPool) error { - klog.V(5).Infof("GCETargetPools.Insert(%v, %v, %+v): called", ctx, key, obj) +// Insert UrlMap with key of value obj. +func (g *GCEAlphaRegionUrlMaps) Insert(ctx context.Context, key *meta.Key, obj *alpha.UrlMap) error { + klog.V(5).Infof("GCEAlphaRegionUrlMaps.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - klog.V(2).Infof("GCETargetPools.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaRegionUrlMaps.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetPools") + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionUrlMaps") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Insert", - Version: meta.Version("ga"), - Service: "TargetPools", + Version: meta.Version("alpha"), + Service: "RegionUrlMaps", } - klog.V(5).Infof("GCETargetPools.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaRegionUrlMaps.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCETargetPools.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionUrlMaps.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name - call := g.s.GA.TargetPools.Insert(projectID, key.Region, obj) + call := g.s.Alpha.RegionUrlMaps.Insert(projectID, key.Region, obj) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCETargetPools.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionUrlMaps.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCETargetPools.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEAlphaRegionUrlMaps.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } -// Delete the TargetPool referenced by key. -func (g *GCETargetPools) Delete(ctx context.Context, key *meta.Key) error { - klog.V(5).Infof("GCETargetPools.Delete(%v, %v): called", ctx, key) +// Delete the UrlMap referenced by key. +func (g *GCEAlphaRegionUrlMaps) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCEAlphaRegionUrlMaps.Delete(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCETargetPools.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaRegionUrlMaps.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetPools") + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionUrlMaps") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Delete", - Version: meta.Version("ga"), - Service: "TargetPools", + Version: meta.Version("alpha"), + Service: "RegionUrlMaps", } - klog.V(5).Infof("GCETargetPools.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaRegionUrlMaps.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCETargetPools.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionUrlMaps.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.GA.TargetPools.Delete(projectID, key.Region, key.Name) + call := g.s.Alpha.RegionUrlMaps.Delete(projectID, key.Region, key.Name) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCETargetPools.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionUrlMaps.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCETargetPools.Delete(%v, %v) = %v", ctx, key, err) - return err -} - -// AddInstance is a method on GCETargetPools. -func (g *GCETargetPools) AddInstance(ctx context.Context, key *meta.Key, arg0 *ga.TargetPoolsAddInstanceRequest) error { - klog.V(5).Infof("GCETargetPools.AddInstance(%v, %v, ...): called", ctx, key) - - if !key.Valid() { - klog.V(2).Infof("GCETargetPools.AddInstance(%v, %v, ...): key is invalid (%#v)", ctx, key, key) - return fmt.Errorf("invalid GCE key (%+v)", key) - } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetPools") - rk := &RateLimitKey{ - ProjectID: projectID, - Operation: "AddInstance", - Version: meta.Version("ga"), - Service: "TargetPools", - } - klog.V(5).Infof("GCETargetPools.AddInstance(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) - - if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCETargetPools.AddInstance(%v, %v, ...): RateLimiter error: %v", ctx, key, err) - return err - } - call := g.s.GA.TargetPools.AddInstance(projectID, key.Region, key.Name, arg0) - call.Context(ctx) - op, err := call.Do() - if err != nil { - klog.V(4).Infof("GCETargetPools.AddInstance(%v, %v, ...) = %+v", ctx, key, err) - return err - } - err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCETargetPools.AddInstance(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionUrlMaps.Delete(%v, %v) = %v", ctx, key, err) return err } -// RemoveInstance is a method on GCETargetPools. -func (g *GCETargetPools) RemoveInstance(ctx context.Context, key *meta.Key, arg0 *ga.TargetPoolsRemoveInstanceRequest) error { - klog.V(5).Infof("GCETargetPools.RemoveInstance(%v, %v, ...): called", ctx, key) +// Update is a method on GCEAlphaRegionUrlMaps. +func (g *GCEAlphaRegionUrlMaps) Update(ctx context.Context, key *meta.Key, arg0 *alpha.UrlMap) error { + klog.V(5).Infof("GCEAlphaRegionUrlMaps.Update(%v, %v, ...): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCETargetPools.RemoveInstance(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaRegionUrlMaps.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetPools") + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionUrlMaps") rk := &RateLimitKey{ ProjectID: projectID, - Operation: "RemoveInstance", - Version: meta.Version("ga"), - Service: "TargetPools", + Operation: "Update", + Version: meta.Version("alpha"), + Service: "RegionUrlMaps", } - klog.V(5).Infof("GCETargetPools.RemoveInstance(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaRegionUrlMaps.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCETargetPools.RemoveInstance(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionUrlMaps.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.GA.TargetPools.RemoveInstance(projectID, key.Region, key.Name, arg0) + call := g.s.Alpha.RegionUrlMaps.Update(projectID, key.Region, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCETargetPools.RemoveInstance(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionUrlMaps.Update(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCETargetPools.RemoveInstance(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionUrlMaps.Update(%v, %v, ...) = %+v", ctx, key, err) return err } -// UrlMaps is an interface that allows for mocking of UrlMaps. -type UrlMaps interface { - Get(ctx context.Context, key *meta.Key) (*ga.UrlMap, error) - List(ctx context.Context, fl *filter.F) ([]*ga.UrlMap, error) - Insert(ctx context.Context, key *meta.Key, obj *ga.UrlMap) error +// BetaRegionUrlMaps is an interface that allows for mocking of RegionUrlMaps. +type BetaRegionUrlMaps interface { + Get(ctx context.Context, key *meta.Key) (*beta.UrlMap, error) + List(ctx context.Context, region string, fl *filter.F) ([]*beta.UrlMap, error) + Insert(ctx context.Context, key *meta.Key, obj *beta.UrlMap) error Delete(ctx context.Context, key *meta.Key) error - Update(context.Context, *meta.Key, *ga.UrlMap) error + Update(context.Context, *meta.Key, *beta.UrlMap) error } -// NewMockUrlMaps returns a new mock for UrlMaps. -func NewMockUrlMaps(pr ProjectRouter, objs map[meta.Key]*MockUrlMapsObj) *MockUrlMaps { - mock := &MockUrlMaps{ +// NewMockBetaRegionUrlMaps returns a new mock for RegionUrlMaps. +func NewMockBetaRegionUrlMaps(pr ProjectRouter, objs map[meta.Key]*MockRegionUrlMapsObj) *MockBetaRegionUrlMaps { + mock := &MockBetaRegionUrlMaps{ ProjectRouter: pr, Objects: objs, @@ -14439,14 +26879,14 @@ func NewMockUrlMaps(pr ProjectRouter, objs map[meta.Key]*MockUrlMapsObj) *MockUr return mock } -// MockUrlMaps is the mock for UrlMaps. -type MockUrlMaps struct { +// MockBetaRegionUrlMaps is the mock for RegionUrlMaps. +type MockBetaRegionUrlMaps struct { Lock sync.Mutex ProjectRouter ProjectRouter // Objects maintained by the mock. - Objects map[meta.Key]*MockUrlMapsObj + Objects map[meta.Key]*MockRegionUrlMapsObj // If an entry exists for the given key and operation, then the error // will be returned instead of the operation. @@ -14459,11 +26899,11 @@ type MockUrlMaps struct { // order to add your own logic. Return (true, _, _) to prevent the normal // execution flow of the mock. Return (false, nil, nil) to continue with // normal mock behavior/ after the hook function executes. - GetHook func(ctx context.Context, key *meta.Key, m *MockUrlMaps) (bool, *ga.UrlMap, error) - ListHook func(ctx context.Context, fl *filter.F, m *MockUrlMaps) (bool, []*ga.UrlMap, error) - InsertHook func(ctx context.Context, key *meta.Key, obj *ga.UrlMap, m *MockUrlMaps) (bool, error) - DeleteHook func(ctx context.Context, key *meta.Key, m *MockUrlMaps) (bool, error) - UpdateHook func(context.Context, *meta.Key, *ga.UrlMap, *MockUrlMaps) error + GetHook func(ctx context.Context, key *meta.Key, m *MockBetaRegionUrlMaps) (bool, *beta.UrlMap, error) + ListHook func(ctx context.Context, region string, fl *filter.F, m *MockBetaRegionUrlMaps) (bool, []*beta.UrlMap, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *beta.UrlMap, m *MockBetaRegionUrlMaps) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockBetaRegionUrlMaps) (bool, error) + UpdateHook func(context.Context, *meta.Key, *beta.UrlMap, *MockBetaRegionUrlMaps) error // X is extra state that can be used as part of the mock. Generated code // will not use this field. @@ -14471,10 +26911,10 @@ type MockUrlMaps struct { } // Get returns the object from the mock. -func (m *MockUrlMaps) Get(ctx context.Context, key *meta.Key) (*ga.UrlMap, error) { +func (m *MockBetaRegionUrlMaps) Get(ctx context.Context, key *meta.Key) (*beta.UrlMap, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - klog.V(5).Infof("MockUrlMaps.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaRegionUrlMaps.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -14486,28 +26926,28 @@ func (m *MockUrlMaps) Get(ctx context.Context, key *meta.Key) (*ga.UrlMap, error defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - klog.V(5).Infof("MockUrlMaps.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBetaRegionUrlMaps.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { - typedObj := obj.ToGA() - klog.V(5).Infof("MockUrlMaps.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + typedObj := obj.ToBeta() + klog.V(5).Infof("MockBetaRegionUrlMaps.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockUrlMaps %v not found", key), + Message: fmt.Sprintf("MockBetaRegionUrlMaps %v not found", key), } - klog.V(5).Infof("MockUrlMaps.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBetaRegionUrlMaps.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } -// List all of the objects in the mock. -func (m *MockUrlMaps) List(ctx context.Context, fl *filter.F) ([]*ga.UrlMap, error) { +// List all of the objects in the mock in the given region. +func (m *MockBetaRegionUrlMaps) List(ctx context.Context, region string, fl *filter.F) ([]*beta.UrlMap, error) { if m.ListHook != nil { - if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - klog.V(5).Infof("MockUrlMaps.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + if intercept, objs, err := m.ListHook(ctx, region, fl, m); intercept { + klog.V(5).Infof("MockBetaRegionUrlMaps.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) return objs, err } } @@ -14517,28 +26957,31 @@ func (m *MockUrlMaps) List(ctx context.Context, fl *filter.F) ([]*ga.UrlMap, err if m.ListError != nil { err := *m.ListError - klog.V(5).Infof("MockUrlMaps.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockBetaRegionUrlMaps.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) return nil, *m.ListError } - var objs []*ga.UrlMap - for _, obj := range m.Objects { - if !fl.Match(obj.ToGA()) { + var objs []*beta.UrlMap + for key, obj := range m.Objects { + if key.Region != region { continue } - objs = append(objs, obj.ToGA()) + if !fl.Match(obj.ToBeta()) { + continue + } + objs = append(objs, obj.ToBeta()) } - klog.V(5).Infof("MockUrlMaps.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockBetaRegionUrlMaps.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) return objs, nil } // Insert is a mock for inserting/creating a new object. -func (m *MockUrlMaps) Insert(ctx context.Context, key *meta.Key, obj *ga.UrlMap) error { +func (m *MockBetaRegionUrlMaps) Insert(ctx context.Context, key *meta.Key, obj *beta.UrlMap) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - klog.V(5).Infof("MockUrlMaps.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaRegionUrlMaps.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -14550,32 +26993,32 @@ func (m *MockUrlMaps) Insert(ctx context.Context, key *meta.Key, obj *ga.UrlMap) defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - klog.V(5).Infof("MockUrlMaps.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaRegionUrlMaps.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { err := &googleapi.Error{ Code: http.StatusConflict, - Message: fmt.Sprintf("MockUrlMaps %v exists", key), + Message: fmt.Sprintf("MockBetaRegionUrlMaps %v exists", key), } - klog.V(5).Infof("MockUrlMaps.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaRegionUrlMaps.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } obj.Name = key.Name - projectID := m.ProjectRouter.ProjectID(ctx, "ga", "urlMaps") - obj.SelfLink = SelfLink(meta.VersionGA, projectID, "urlMaps", key) + projectID := m.ProjectRouter.ProjectID(ctx, "beta", "urlMaps") + obj.SelfLink = SelfLink(meta.VersionBeta, projectID, "urlMaps", key) - m.Objects[*key] = &MockUrlMapsObj{obj} - klog.V(5).Infof("MockUrlMaps.Insert(%v, %v, %+v) = nil", ctx, key, obj) + m.Objects[*key] = &MockRegionUrlMapsObj{obj} + klog.V(5).Infof("MockBetaRegionUrlMaps.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } // Delete is a mock for deleting the object. -func (m *MockUrlMaps) Delete(ctx context.Context, key *meta.Key) error { +func (m *MockBetaRegionUrlMaps) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - klog.V(5).Infof("MockUrlMaps.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaRegionUrlMaps.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -14587,208 +27030,207 @@ func (m *MockUrlMaps) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - klog.V(5).Infof("MockUrlMaps.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaRegionUrlMaps.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { err := &googleapi.Error{ Code: http.StatusNotFound, - Message: fmt.Sprintf("MockUrlMaps %v not found", key), + Message: fmt.Sprintf("MockBetaRegionUrlMaps %v not found", key), } - klog.V(5).Infof("MockUrlMaps.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaRegionUrlMaps.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - klog.V(5).Infof("MockUrlMaps.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockBetaRegionUrlMaps.Delete(%v, %v) = nil", ctx, key) return nil } // Obj wraps the object for use in the mock. -func (m *MockUrlMaps) Obj(o *ga.UrlMap) *MockUrlMapsObj { - return &MockUrlMapsObj{o} +func (m *MockBetaRegionUrlMaps) Obj(o *beta.UrlMap) *MockRegionUrlMapsObj { + return &MockRegionUrlMapsObj{o} } // Update is a mock for the corresponding method. -func (m *MockUrlMaps) Update(ctx context.Context, key *meta.Key, arg0 *ga.UrlMap) error { +func (m *MockBetaRegionUrlMaps) Update(ctx context.Context, key *meta.Key, arg0 *beta.UrlMap) error { if m.UpdateHook != nil { return m.UpdateHook(ctx, key, arg0, m) } return nil } -// GCEUrlMaps is a simplifying adapter for the GCE UrlMaps. -type GCEUrlMaps struct { +// GCEBetaRegionUrlMaps is a simplifying adapter for the GCE RegionUrlMaps. +type GCEBetaRegionUrlMaps struct { s *Service } // Get the UrlMap named by key. -func (g *GCEUrlMaps) Get(ctx context.Context, key *meta.Key) (*ga.UrlMap, error) { - klog.V(5).Infof("GCEUrlMaps.Get(%v, %v): called", ctx, key) +func (g *GCEBetaRegionUrlMaps) Get(ctx context.Context, key *meta.Key) (*beta.UrlMap, error) { + klog.V(5).Infof("GCEBetaRegionUrlMaps.Get(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEUrlMaps.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaRegionUrlMaps.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "UrlMaps") + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "RegionUrlMaps") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Get", - Version: meta.Version("ga"), - Service: "UrlMaps", + Version: meta.Version("beta"), + Service: "RegionUrlMaps", } - klog.V(5).Infof("GCEUrlMaps.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaRegionUrlMaps.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEUrlMaps.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaRegionUrlMaps.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } - call := g.s.GA.UrlMaps.Get(projectID, key.Name) + call := g.s.Beta.RegionUrlMaps.Get(projectID, key.Region, key.Name) call.Context(ctx) v, err := call.Do() - klog.V(4).Infof("GCEUrlMaps.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEBetaRegionUrlMaps.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all UrlMap objects. -func (g *GCEUrlMaps) List(ctx context.Context, fl *filter.F) ([]*ga.UrlMap, error) { - klog.V(5).Infof("GCEUrlMaps.List(%v, %v) called", ctx, fl) - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "UrlMaps") +func (g *GCEBetaRegionUrlMaps) List(ctx context.Context, region string, fl *filter.F) ([]*beta.UrlMap, error) { + klog.V(5).Infof("GCEBetaRegionUrlMaps.List(%v, %v, %v) called", ctx, region, fl) + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "RegionUrlMaps") rk := &RateLimitKey{ ProjectID: projectID, Operation: "List", - Version: meta.Version("ga"), - Service: "UrlMaps", + Version: meta.Version("beta"), + Service: "RegionUrlMaps", } if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - klog.V(5).Infof("GCEUrlMaps.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) - call := g.s.GA.UrlMaps.List(projectID) + klog.V(5).Infof("GCEBetaRegionUrlMaps.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) + call := g.s.Beta.RegionUrlMaps.List(projectID, region) if fl != filter.None { call.Filter(fl.String()) } - var all []*ga.UrlMap - f := func(l *ga.UrlMapList) error { - klog.V(5).Infof("GCEUrlMaps.List(%v, ..., %v): page %+v", ctx, fl, l) + var all []*beta.UrlMap + f := func(l *beta.UrlMapList) error { + klog.V(5).Infof("GCEBetaRegionUrlMaps.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - klog.V(4).Infof("GCEUrlMaps.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEBetaRegionUrlMaps.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } if klog.V(4) { - klog.V(4).Infof("GCEUrlMaps.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + klog.V(4).Infof("GCEBetaRegionUrlMaps.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - klog.V(5).Infof("GCEUrlMaps.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEBetaRegionUrlMaps.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil } // Insert UrlMap with key of value obj. -func (g *GCEUrlMaps) Insert(ctx context.Context, key *meta.Key, obj *ga.UrlMap) error { - klog.V(5).Infof("GCEUrlMaps.Insert(%v, %v, %+v): called", ctx, key, obj) +func (g *GCEBetaRegionUrlMaps) Insert(ctx context.Context, key *meta.Key, obj *beta.UrlMap) error { + klog.V(5).Infof("GCEBetaRegionUrlMaps.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - klog.V(2).Infof("GCEUrlMaps.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaRegionUrlMaps.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "UrlMaps") + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "RegionUrlMaps") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Insert", - Version: meta.Version("ga"), - Service: "UrlMaps", + Version: meta.Version("beta"), + Service: "RegionUrlMaps", } - klog.V(5).Infof("GCEUrlMaps.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaRegionUrlMaps.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEUrlMaps.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaRegionUrlMaps.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name - call := g.s.GA.UrlMaps.Insert(projectID, obj) + call := g.s.Beta.RegionUrlMaps.Insert(projectID, key.Region, obj) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEUrlMaps.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaRegionUrlMaps.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEUrlMaps.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEBetaRegionUrlMaps.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the UrlMap referenced by key. -func (g *GCEUrlMaps) Delete(ctx context.Context, key *meta.Key) error { - klog.V(5).Infof("GCEUrlMaps.Delete(%v, %v): called", ctx, key) +func (g *GCEBetaRegionUrlMaps) Delete(ctx context.Context, key *meta.Key) error { + klog.V(5).Infof("GCEBetaRegionUrlMaps.Delete(%v, %v): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEUrlMaps.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaRegionUrlMaps.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "UrlMaps") + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "RegionUrlMaps") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Delete", - Version: meta.Version("ga"), - Service: "UrlMaps", + Version: meta.Version("beta"), + Service: "RegionUrlMaps", } - klog.V(5).Infof("GCEUrlMaps.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaRegionUrlMaps.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEUrlMaps.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaRegionUrlMaps.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.GA.UrlMaps.Delete(projectID, key.Name) - + call := g.s.Beta.RegionUrlMaps.Delete(projectID, key.Region, key.Name) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEUrlMaps.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBetaRegionUrlMaps.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEUrlMaps.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBetaRegionUrlMaps.Delete(%v, %v) = %v", ctx, key, err) return err } -// Update is a method on GCEUrlMaps. -func (g *GCEUrlMaps) Update(ctx context.Context, key *meta.Key, arg0 *ga.UrlMap) error { - klog.V(5).Infof("GCEUrlMaps.Update(%v, %v, ...): called", ctx, key) +// Update is a method on GCEBetaRegionUrlMaps. +func (g *GCEBetaRegionUrlMaps) Update(ctx context.Context, key *meta.Key, arg0 *beta.UrlMap) error { + klog.V(5).Infof("GCEBetaRegionUrlMaps.Update(%v, %v, ...): called", ctx, key) if !key.Valid() { - klog.V(2).Infof("GCEUrlMaps.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaRegionUrlMaps.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } - projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "UrlMaps") + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "RegionUrlMaps") rk := &RateLimitKey{ ProjectID: projectID, Operation: "Update", - Version: meta.Version("ga"), - Service: "UrlMaps", + Version: meta.Version("beta"), + Service: "RegionUrlMaps", } - klog.V(5).Infof("GCEUrlMaps.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaRegionUrlMaps.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - klog.V(4).Infof("GCEUrlMaps.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaRegionUrlMaps.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } - call := g.s.GA.UrlMaps.Update(projectID, key.Name, arg0) + call := g.s.Beta.RegionUrlMaps.Update(projectID, key.Region, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - klog.V(4).Infof("GCEUrlMaps.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaRegionUrlMaps.Update(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - klog.V(4).Infof("GCEUrlMaps.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaRegionUrlMaps.Update(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -15056,6 +27498,12 @@ func NewNetworkEndpointGroupsResourceID(project, zone, name string) *ResourceID return &ResourceID{project, "networkEndpointGroups", key} } +// NewNetworksResourceID creates a ResourceID for the Networks resource. +func NewNetworksResourceID(project, name string) *ResourceID { + key := meta.GlobalKey(name) + return &ResourceID{project, "networks", key} +} + // NewProjectsResourceID creates a ResourceID for the Projects resource. func NewProjectsResourceID(project string) *ResourceID { var key *meta.Key @@ -15074,6 +27522,36 @@ func NewRegionDisksResourceID(project, region, name string) *ResourceID { return &ResourceID{project, "disks", key} } +// NewRegionHealthChecksResourceID creates a ResourceID for the RegionHealthChecks resource. +func NewRegionHealthChecksResourceID(project, region, name string) *ResourceID { + key := meta.RegionalKey(name, region) + return &ResourceID{project, "healthChecks", key} +} + +// NewRegionSslCertificatesResourceID creates a ResourceID for the RegionSslCertificates resource. +func NewRegionSslCertificatesResourceID(project, region, name string) *ResourceID { + key := meta.RegionalKey(name, region) + return &ResourceID{project, "sslCertificates", key} +} + +// NewRegionTargetHttpProxiesResourceID creates a ResourceID for the RegionTargetHttpProxies resource. +func NewRegionTargetHttpProxiesResourceID(project, region, name string) *ResourceID { + key := meta.RegionalKey(name, region) + return &ResourceID{project, "targetHttpProxies", key} +} + +// NewRegionTargetHttpsProxiesResourceID creates a ResourceID for the RegionTargetHttpsProxies resource. +func NewRegionTargetHttpsProxiesResourceID(project, region, name string) *ResourceID { + key := meta.RegionalKey(name, region) + return &ResourceID{project, "targetHttpsProxies", key} +} + +// NewRegionUrlMapsResourceID creates a ResourceID for the RegionUrlMaps resource. +func NewRegionUrlMapsResourceID(project, region, name string) *ResourceID { + key := meta.RegionalKey(name, region) + return &ResourceID{project, "urlMaps", key} +} + // NewRegionsResourceID creates a ResourceID for the Regions resource. func NewRegionsResourceID(project, name string) *ResourceID { key := meta.GlobalKey(name) @@ -15098,6 +27576,12 @@ func NewSslCertificatesResourceID(project, name string) *ResourceID { return &ResourceID{project, "sslCertificates", key} } +// NewSubnetworksResourceID creates a ResourceID for the Subnetworks resource. +func NewSubnetworksResourceID(project, region, name string) *ResourceID { + key := meta.RegionalKey(name, region) + return &ResourceID{project, "subnetworks", key} +} + // NewTargetHttpProxiesResourceID creates a ResourceID for the TargetHttpProxies resource. func NewTargetHttpProxiesResourceID(project, name string) *ResourceID { key := meta.GlobalKey(name) diff --git a/vendor/github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta/meta.go b/vendor/github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta/meta.go index 1be6e1df57d2e..d5ef72a75f5af 100644 --- a/vendor/github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta/meta.go +++ b/vendor/github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta/meta.go @@ -71,6 +71,7 @@ var AllServices = []*ServiceInfo{ Resource: "addresses", keyType: Regional, serviceType: reflect.TypeOf(&ga.AddressesService{}), + options: AggregatedList, }, { Object: "Address", @@ -79,6 +80,7 @@ var AllServices = []*ServiceInfo{ version: VersionAlpha, keyType: Regional, serviceType: reflect.TypeOf(&alpha.AddressesService{}), + options: AggregatedList, }, { Object: "Address", @@ -87,6 +89,15 @@ var AllServices = []*ServiceInfo{ version: VersionBeta, keyType: Regional, serviceType: reflect.TypeOf(&beta.AddressesService{}), + options: AggregatedList, + }, + { + Object: "Address", + Service: "GlobalAddresses", + Resource: "addresses", + version: VersionAlpha, + keyType: Global, + serviceType: reflect.TypeOf(&alpha.GlobalAddressesService{}), }, { Object: "Address", @@ -155,6 +166,18 @@ var AllServices = []*ServiceInfo{ "Update", }, }, + { + Object: "BackendService", + Service: "RegionBackendServices", + Resource: "backendServices", + version: VersionBeta, + keyType: Regional, + serviceType: reflect.TypeOf(&beta.RegionBackendServicesService{}), + additionalMethods: []string{ + "GetHealth", + "Update", + }, + }, { Object: "Disk", Service: "Disks", @@ -192,6 +215,9 @@ var AllServices = []*ServiceInfo{ Resource: "forwardingRules", keyType: Regional, serviceType: reflect.TypeOf(&ga.ForwardingRulesService{}), + additionalMethods: []string{ + "SetTarget", + }, }, { Object: "ForwardingRule", @@ -200,6 +226,42 @@ var AllServices = []*ServiceInfo{ version: VersionAlpha, keyType: Regional, serviceType: reflect.TypeOf(&alpha.ForwardingRulesService{}), + additionalMethods: []string{ + "SetTarget", + }, + }, + { + Object: "ForwardingRule", + Service: "ForwardingRules", + Resource: "forwardingRules", + version: VersionBeta, + keyType: Regional, + serviceType: reflect.TypeOf(&beta.ForwardingRulesService{}), + additionalMethods: []string{ + "SetTarget", + }, + }, + { + Object: "ForwardingRule", + Service: "GlobalForwardingRules", + Resource: "forwardingRules", + version: VersionAlpha, + keyType: Global, + serviceType: reflect.TypeOf(&alpha.GlobalForwardingRulesService{}), + additionalMethods: []string{ + "SetTarget", + }, + }, + { + Object: "ForwardingRule", + Service: "GlobalForwardingRules", + Resource: "forwardingRules", + version: VersionBeta, + keyType: Global, + serviceType: reflect.TypeOf(&beta.GlobalForwardingRulesService{}), + additionalMethods: []string{ + "SetTarget", + }, }, { Object: "ForwardingRule", @@ -243,6 +305,28 @@ var AllServices = []*ServiceInfo{ "Update", }, }, + { + Object: "HealthCheck", + Service: "RegionHealthChecks", + Resource: "healthChecks", + version: VersionAlpha, + keyType: Regional, + serviceType: reflect.TypeOf(&alpha.RegionHealthChecksService{}), + additionalMethods: []string{ + "Update", + }, + }, + { + Object: "HealthCheck", + Service: "RegionHealthChecks", + Resource: "healthChecks", + version: VersionBeta, + keyType: Regional, + serviceType: reflect.TypeOf(&beta.RegionHealthChecksService{}), + additionalMethods: []string{ + "Update", + }, + }, { Object: "HttpHealthCheck", Service: "HttpHealthChecks", @@ -313,6 +397,30 @@ var AllServices = []*ServiceInfo{ "UpdateNetworkInterface", }, }, + { + Object: "Network", + Service: "Networks", + Resource: "networks", + version: VersionAlpha, + keyType: Global, + serviceType: reflect.TypeOf(&alpha.NetworksService{}), + }, + { + Object: "Network", + Service: "Networks", + Resource: "networks", + version: VersionBeta, + keyType: Global, + serviceType: reflect.TypeOf(&alpha.NetworksService{}), + }, + { + Object: "Network", + Service: "Networks", + Resource: "networks", + version: VersionGA, + keyType: Global, + serviceType: reflect.TypeOf(&alpha.NetworksService{}), + }, { Object: "NetworkEndpointGroup", Service: "NetworkEndpointGroups", @@ -341,6 +449,20 @@ var AllServices = []*ServiceInfo{ }, options: AggregatedList, }, + { + Object: "NetworkEndpointGroup", + Service: "NetworkEndpointGroups", + Resource: "networkEndpointGroups", + version: VersionGA, + keyType: Zonal, + serviceType: reflect.TypeOf(&ga.NetworkEndpointGroupsService{}), + additionalMethods: []string{ + "AttachNetworkEndpoints", + "DetachNetworkEndpoints", + "ListNetworkEndpoints", + }, + options: AggregatedList, + }, { Object: "Project", Service: "Projects", @@ -387,6 +509,84 @@ var AllServices = []*ServiceInfo{ keyType: Global, serviceType: reflect.TypeOf(&ga.SslCertificatesService{}), }, + { + Object: "SslCertificate", + Service: "SslCertificates", + Resource: "sslCertificates", + version: VersionBeta, + keyType: Global, + serviceType: reflect.TypeOf(&beta.SslCertificatesService{}), + }, + { + Object: "SslCertificate", + Service: "SslCertificates", + Resource: "sslCertificates", + version: VersionAlpha, + keyType: Global, + serviceType: reflect.TypeOf(&alpha.SslCertificatesService{}), + }, + { + Object: "SslCertificate", + Service: "RegionSslCertificates", + Resource: "sslCertificates", + version: VersionAlpha, + keyType: Regional, + serviceType: reflect.TypeOf(&alpha.RegionSslCertificatesService{}), + }, + { + Object: "SslCertificate", + Service: "RegionSslCertificates", + Resource: "sslCertificates", + version: VersionBeta, + keyType: Regional, + serviceType: reflect.TypeOf(&beta.RegionSslCertificatesService{}), + }, + { + Object: "Subnetwork", + Service: "Subnetworks", + Resource: "subnetworks", + version: VersionAlpha, + keyType: Regional, + serviceType: reflect.TypeOf(&alpha.SubnetworksService{}), + }, + { + Object: "Subnetwork", + Service: "Subnetworks", + Resource: "subnetworks", + version: VersionBeta, + keyType: Regional, + serviceType: reflect.TypeOf(&alpha.SubnetworksService{}), + }, + { + Object: "Subnetwork", + Service: "Subnetworks", + Resource: "subnetworks", + version: VersionGA, + keyType: Regional, + serviceType: reflect.TypeOf(&alpha.SubnetworksService{}), + }, + { + Object: "TargetHttpProxy", + Service: "TargetHttpProxies", + Resource: "targetHttpProxies", + version: VersionAlpha, + keyType: Global, + serviceType: reflect.TypeOf(&alpha.TargetHttpProxiesService{}), + additionalMethods: []string{ + "SetUrlMap", + }, + }, + { + Object: "TargetHttpProxy", + Service: "TargetHttpProxies", + Resource: "targetHttpProxies", + version: VersionBeta, + keyType: Global, + serviceType: reflect.TypeOf(&beta.TargetHttpProxiesService{}), + additionalMethods: []string{ + "SetUrlMap", + }, + }, { Object: "TargetHttpProxy", Service: "TargetHttpProxies", @@ -397,6 +597,28 @@ var AllServices = []*ServiceInfo{ "SetUrlMap", }, }, + { + Object: "TargetHttpProxy", + Service: "RegionTargetHttpProxies", + Resource: "targetHttpProxies", + version: VersionAlpha, + keyType: Regional, + serviceType: reflect.TypeOf(&alpha.RegionTargetHttpProxiesService{}), + additionalMethods: []string{ + "SetUrlMap", + }, + }, + { + Object: "TargetHttpProxy", + Service: "RegionTargetHttpProxies", + Resource: "targetHttpProxies", + version: VersionBeta, + keyType: Regional, + serviceType: reflect.TypeOf(&beta.RegionTargetHttpProxiesService{}), + additionalMethods: []string{ + "SetUrlMap", + }, + }, { Object: "TargetHttpsProxy", Service: "TargetHttpsProxies", @@ -408,6 +630,54 @@ var AllServices = []*ServiceInfo{ "SetUrlMap", }, }, + { + Object: "TargetHttpsProxy", + Service: "TargetHttpsProxies", + Resource: "targetHttpsProxies", + version: VersionAlpha, + keyType: Global, + serviceType: reflect.TypeOf(&alpha.TargetHttpsProxiesService{}), + additionalMethods: []string{ + "SetSslCertificates", + "SetUrlMap", + }, + }, + { + Object: "TargetHttpsProxy", + Service: "TargetHttpsProxies", + Resource: "targetHttpsProxies", + version: VersionBeta, + keyType: Global, + serviceType: reflect.TypeOf(&beta.TargetHttpsProxiesService{}), + additionalMethods: []string{ + "SetSslCertificates", + "SetUrlMap", + }, + }, + { + Object: "TargetHttpsProxy", + Service: "RegionTargetHttpsProxies", + Resource: "targetHttpsProxies", + version: VersionAlpha, + keyType: Regional, + serviceType: reflect.TypeOf(&alpha.RegionTargetHttpsProxiesService{}), + additionalMethods: []string{ + "SetSslCertificates", + "SetUrlMap", + }, + }, + { + Object: "TargetHttpsProxy", + Service: "RegionTargetHttpsProxies", + Resource: "targetHttpsProxies", + version: VersionBeta, + keyType: Regional, + serviceType: reflect.TypeOf(&beta.RegionTargetHttpsProxiesService{}), + additionalMethods: []string{ + "SetSslCertificates", + "SetUrlMap", + }, + }, { Object: "TargetPool", Service: "TargetPools", @@ -419,6 +689,28 @@ var AllServices = []*ServiceInfo{ "RemoveInstance", }, }, + { + Object: "UrlMap", + Service: "UrlMaps", + Resource: "urlMaps", + version: VersionAlpha, + keyType: Global, + serviceType: reflect.TypeOf(&alpha.UrlMapsService{}), + additionalMethods: []string{ + "Update", + }, + }, + { + Object: "UrlMap", + Service: "UrlMaps", + Resource: "urlMaps", + version: VersionBeta, + keyType: Global, + serviceType: reflect.TypeOf(&beta.UrlMapsService{}), + additionalMethods: []string{ + "Update", + }, + }, { Object: "UrlMap", Service: "UrlMaps", @@ -429,6 +721,28 @@ var AllServices = []*ServiceInfo{ "Update", }, }, + { + Object: "UrlMap", + Service: "RegionUrlMaps", + Resource: "urlMaps", + version: VersionAlpha, + keyType: Regional, + serviceType: reflect.TypeOf(&alpha.RegionUrlMapsService{}), + additionalMethods: []string{ + "Update", + }, + }, + { + Object: "UrlMap", + Service: "RegionUrlMaps", + Resource: "urlMaps", + version: VersionBeta, + keyType: Regional, + serviceType: reflect.TypeOf(&beta.RegionUrlMapsService{}), + additionalMethods: []string{ + "Update", + }, + }, { Object: "Zone", Service: "Zones", diff --git a/vendor/github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/mock/mock.go b/vendor/github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/mock/mock.go index fb908dfaa32cd..9be80d35feb8e 100644 --- a/vendor/github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/mock/mock.go +++ b/vendor/github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/mock/mock.go @@ -53,6 +53,34 @@ type gceObject interface { MarshalJSON() ([]byte, error) } +// AttachDiskHook mocks attaching a disk to an instance +func AttachDiskHook(ctx context.Context, key *meta.Key, req *ga.AttachedDisk, m *cloud.MockInstances) error { + instance, err := m.Get(ctx, key) + if err != nil { + return err + } + instance.Disks = append(instance.Disks, req) + return nil +} + +// DetachDiskHook mocks detaching a disk from an instance +func DetachDiskHook(ctx context.Context, key *meta.Key, diskName string, m *cloud.MockInstances) error { + instance, err := m.Get(ctx, key) + if err != nil { + return err + } + for i, disk := range instance.Disks { + if disk.DeviceName == diskName { + instance.Disks = append(instance.Disks[:i], instance.Disks[i+1:]...) + return nil + } + } + return &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("Disk: %s was not found in Instance %s", diskName, key.String()), + } +} + // AddInstanceHook mocks adding a Instance to MockTargetPools func AddInstanceHook(ctx context.Context, key *meta.Key, req *ga.TargetPoolsAddInstanceRequest, m *cloud.MockTargetPools) error { pool, err := m.Get(ctx, key) @@ -333,10 +361,7 @@ func (igAttrs *InstanceGroupAttributes) List(key *meta.Key) []*ga.InstanceWithNa func AddInstancesHook(ctx context.Context, key *meta.Key, req *ga.InstanceGroupsAddInstancesRequest, m *cloud.MockInstanceGroups) error { _, err := m.Get(ctx, key) if err != nil { - return &googleapi.Error{ - Code: http.StatusNotFound, - Message: fmt.Sprintf("Key: %s was not found in InstanceGroups", key.String()), - } + return err } var attrs InstanceGroupAttributes @@ -350,10 +375,7 @@ func AddInstancesHook(ctx context.Context, key *meta.Key, req *ga.InstanceGroups func ListInstancesHook(ctx context.Context, key *meta.Key, req *ga.InstanceGroupsListInstancesRequest, filter *filter.F, m *cloud.MockInstanceGroups) ([]*ga.InstanceWithNamedPorts, error) { _, err := m.Get(ctx, key) if err != nil { - return nil, &googleapi.Error{ - Code: http.StatusNotFound, - Message: fmt.Sprintf("Key: %s was not found in InstanceGroups", key.String()), - } + return nil, err } var attrs InstanceGroupAttributes @@ -367,10 +389,7 @@ func ListInstancesHook(ctx context.Context, key *meta.Key, req *ga.InstanceGroup func RemoveInstancesHook(ctx context.Context, key *meta.Key, req *ga.InstanceGroupsRemoveInstancesRequest, m *cloud.MockInstanceGroups) error { _, err := m.Get(ctx, key) if err != nil { - return &googleapi.Error{ - Code: http.StatusNotFound, - Message: fmt.Sprintf("Key: %s was not found in InstanceGroups", key.String()), - } + return err } var attrs InstanceGroupAttributes @@ -385,10 +404,7 @@ func RemoveInstancesHook(ctx context.Context, key *meta.Key, req *ga.InstanceGro func UpdateFirewallHook(ctx context.Context, key *meta.Key, obj *ga.Firewall, m *cloud.MockFirewalls) error { _, err := m.Get(ctx, key) if err != nil { - return &googleapi.Error{ - Code: http.StatusNotFound, - Message: fmt.Sprintf("Key: %s was not found in Firewalls", key.String()), - } + return err } obj.Name = key.Name @@ -404,10 +420,7 @@ func UpdateFirewallHook(ctx context.Context, key *meta.Key, obj *ga.Firewall, m func UpdateHealthCheckHook(ctx context.Context, key *meta.Key, obj *ga.HealthCheck, m *cloud.MockHealthChecks) error { _, err := m.Get(ctx, key) if err != nil { - return &googleapi.Error{ - Code: http.StatusNotFound, - Message: fmt.Sprintf("Key: %s was not found in HealthChecks", key.String()), - } + return err } obj.Name = key.Name @@ -418,16 +431,74 @@ func UpdateHealthCheckHook(ctx context.Context, key *meta.Key, obj *ga.HealthChe return nil } +// UpdateAlphaHealthCheckHook defines the hook for updating an alpha HealthCheck. +// It replaces the object with the same key in the mock with the updated object. +func UpdateAlphaHealthCheckHook(ctx context.Context, key *meta.Key, obj *alpha.HealthCheck, m *cloud.MockAlphaHealthChecks) error { + _, err := m.Get(ctx, key) + if err != nil { + return err + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "alpha", "healthChecks") + obj.SelfLink = cloud.SelfLink(meta.VersionAlpha, projectID, "healthChecks", key) + + m.Objects[*key] = &cloud.MockHealthChecksObj{Obj: obj} + return nil +} + +// UpdateAlphaRegionHealthCheckHook defines the hook for updating an alpha HealthCheck. +// It replaces the object with the same key in the mock with the updated object. +func UpdateAlphaRegionHealthCheckHook(ctx context.Context, key *meta.Key, obj *alpha.HealthCheck, m *cloud.MockAlphaRegionHealthChecks) error { + if _, err := m.Get(ctx, key); err != nil { + return err + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "alpha", "healthChecks") + obj.SelfLink = cloud.SelfLink(meta.VersionAlpha, projectID, "healthChecks", key) + + m.Objects[*key] = &cloud.MockRegionHealthChecksObj{Obj: obj} + return nil +} + +// UpdateBetaHealthCheckHook defines the hook for updating a HealthCheck. It +// replaces the object with the same key in the mock with the updated object. +func UpdateBetaHealthCheckHook(ctx context.Context, key *meta.Key, obj *beta.HealthCheck, m *cloud.MockBetaHealthChecks) error { + if _, err := m.Get(ctx, key); err != nil { + return err + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "beta", "healthChecks") + obj.SelfLink = cloud.SelfLink(meta.VersionBeta, projectID, "healthChecks", key) + + m.Objects[*key] = &cloud.MockHealthChecksObj{Obj: obj} + return nil +} + +// UpdateBetaRegionHealthCheckHook defines the hook for updating a HealthCheck. It +// replaces the object with the same key in the mock with the updated object. +func UpdateBetaRegionHealthCheckHook(ctx context.Context, key *meta.Key, obj *beta.HealthCheck, m *cloud.MockBetaRegionHealthChecks) error { + if _, err := m.Get(ctx, key); err != nil { + return err + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "beta", "healthChecks") + obj.SelfLink = cloud.SelfLink(meta.VersionBeta, projectID, "healthChecks", key) + + m.Objects[*key] = &cloud.MockRegionHealthChecksObj{Obj: obj} + return nil +} + // UpdateRegionBackendServiceHook defines the hook for updating a Region // BackendsService. It replaces the object with the same key in the mock with // the updated object. func UpdateRegionBackendServiceHook(ctx context.Context, key *meta.Key, obj *ga.BackendService, m *cloud.MockRegionBackendServices) error { _, err := m.Get(ctx, key) if err != nil { - return &googleapi.Error{ - Code: http.StatusNotFound, - Message: fmt.Sprintf("Key: %s was not found in RegionBackendServices", key.String()), - } + return err } obj.Name = key.Name @@ -438,15 +509,46 @@ func UpdateRegionBackendServiceHook(ctx context.Context, key *meta.Key, obj *ga. return nil } +// UpdateRegionBackendServiceHook defines the hook for updating a Region +// BackendsService. It replaces the object with the same key in the mock with +// the updated object. +func UpdateAlphaRegionBackendServiceHook(ctx context.Context, key *meta.Key, obj *ga.BackendService, m *cloud.MockAlphaRegionBackendServices) error { + _, err := m.Get(ctx, key) + if err != nil { + return err + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "ga", "backendServices") + obj.SelfLink = cloud.SelfLink(meta.VersionAlpha, projectID, "backendServices", key) + + m.Objects[*key] = &cloud.MockRegionBackendServicesObj{Obj: obj} + return nil +} + +// UpdateBetaRegionBackendServiceHook defines the hook for updating a Region +// BackendsService. It replaces the object with the same key in the mock with +// the updated object. +func UpdateBetaRegionBackendServiceHook(ctx context.Context, key *meta.Key, obj *ga.BackendService, m *cloud.MockBetaRegionBackendServices) error { + _, err := m.Get(ctx, key) + if err != nil { + return err + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "ga", "backendServices") + obj.SelfLink = cloud.SelfLink(meta.VersionAlpha, projectID, "backendServices", key) + + m.Objects[*key] = &cloud.MockRegionBackendServicesObj{Obj: obj} + return nil +} + // UpdateBackendServiceHook defines the hook for updating a BackendService. // It replaces the object with the same key in the mock with the updated object. func UpdateBackendServiceHook(ctx context.Context, key *meta.Key, obj *ga.BackendService, m *cloud.MockBackendServices) error { _, err := m.Get(ctx, key) if err != nil { - return &googleapi.Error{ - Code: http.StatusNotFound, - Message: fmt.Sprintf("Key: %s was not found in BackendServices", key.String()), - } + return err } obj.Name = key.Name @@ -462,10 +564,7 @@ func UpdateBackendServiceHook(ctx context.Context, key *meta.Key, obj *ga.Backen func UpdateAlphaBackendServiceHook(ctx context.Context, key *meta.Key, obj *alpha.BackendService, m *cloud.MockAlphaBackendServices) error { _, err := m.Get(ctx, key) if err != nil { - return &googleapi.Error{ - Code: http.StatusNotFound, - Message: fmt.Sprintf("Key: %s was not found in BackendServices", key.String()), - } + return err } obj.Name = key.Name @@ -481,10 +580,7 @@ func UpdateAlphaBackendServiceHook(ctx context.Context, key *meta.Key, obj *alph func UpdateBetaBackendServiceHook(ctx context.Context, key *meta.Key, obj *beta.BackendService, m *cloud.MockBetaBackendServices) error { _, err := m.Get(ctx, key) if err != nil { - return &googleapi.Error{ - Code: http.StatusNotFound, - Message: fmt.Sprintf("Key: %s was not found in BackendServices", key.String()), - } + return err } obj.Name = key.Name @@ -500,10 +596,7 @@ func UpdateBetaBackendServiceHook(ctx context.Context, key *meta.Key, obj *beta. func UpdateURLMapHook(ctx context.Context, key *meta.Key, obj *ga.UrlMap, m *cloud.MockUrlMaps) error { _, err := m.Get(ctx, key) if err != nil { - return &googleapi.Error{ - Code: http.StatusNotFound, - Message: fmt.Sprintf("Key: %s was not found in UrlMaps", key.String()), - } + return err } obj.Name = key.Name @@ -514,6 +607,229 @@ func UpdateURLMapHook(ctx context.Context, key *meta.Key, obj *ga.UrlMap, m *clo return nil } +// UpdateAlphaURLMapHook defines the hook for updating an alpha UrlMap. +// It replaces the object with the same key in the mock with the updated object. +func UpdateAlphaURLMapHook(ctx context.Context, key *meta.Key, obj *alpha.UrlMap, m *cloud.MockAlphaUrlMaps) error { + _, err := m.Get(ctx, key) + if err != nil { + return err + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "alpha", "urlMaps") + obj.SelfLink = cloud.SelfLink(meta.VersionAlpha, projectID, "urlMaps", key) + + m.Objects[*key] = &cloud.MockUrlMapsObj{Obj: obj} + return nil +} + +// UpdateAlphaRegionURLMapHook defines the hook for updating an alpha UrlMap. +// It replaces the object with the same key in the mock with the updated object. +func UpdateAlphaRegionURLMapHook(ctx context.Context, key *meta.Key, obj *alpha.UrlMap, m *cloud.MockAlphaRegionUrlMaps) error { + _, err := m.Get(ctx, key) + if err != nil { + return err + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "alpha", "urlMaps") + obj.SelfLink = cloud.SelfLink(meta.VersionAlpha, projectID, "urlMaps", key) + + m.Objects[*key] = &cloud.MockRegionUrlMapsObj{Obj: obj} + return nil +} + +// UpdateBetaRegionURLMapHook defines the hook for updating an alpha UrlMap. +// It replaces the object with the same key in the mock with the updated object. +func UpdateBetaRegionURLMapHook(ctx context.Context, key *meta.Key, obj *alpha.UrlMap, m *cloud.MockBetaRegionUrlMaps) error { + _, err := m.Get(ctx, key) + if err != nil { + return err + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "alpha", "urlMaps") + obj.SelfLink = cloud.SelfLink(meta.VersionAlpha, projectID, "urlMaps", key) + + m.Objects[*key] = &cloud.MockRegionUrlMapsObj{Obj: obj} + return nil +} + +// SetTargetGlobalForwardingRuleHook defines the hook for setting the target proxy for a GlobalForwardingRule. +func SetTargetGlobalForwardingRuleHook(ctx context.Context, key *meta.Key, obj *ga.TargetReference, m *cloud.MockGlobalForwardingRules) error { + fw, err := m.Get(ctx, key) + if err != nil { + return err + } + + fw.Target = obj.Target + return nil +} + +// SetTargetForwardingRuleHook defines the hook for setting the target proxy for a ForwardingRule. +func SetTargetForwardingRuleHook(ctx context.Context, key *meta.Key, obj *ga.TargetReference, m *cloud.MockForwardingRules) error { + fw, err := m.Get(ctx, key) + if err != nil { + return err + } + + fw.Target = obj.Target + return nil +} + +// SetTargetAlphaForwardingRuleHook defines the hook for setting the target proxy for an Alpha ForwardingRule. +func SetTargetAlphaForwardingRuleHook(ctx context.Context, key *meta.Key, obj *alpha.TargetReference, m *cloud.MockAlphaForwardingRules) error { + fw, err := m.Get(ctx, key) + if err != nil { + return err + } + + fw.Target = obj.Target + return nil +} + +// SetTargetBetaForwardingRuleHook defines the hook for setting the target proxy for an Alpha ForwardingRule. +func SetTargetBetaForwardingRuleHook(ctx context.Context, key *meta.Key, obj *alpha.TargetReference, m *cloud.MockBetaForwardingRules) error { + fw, err := m.Get(ctx, key) + if err != nil { + return err + } + + fw.Target = obj.Target + return nil +} + +// SetTargetAlphaGlobalForwardingRuleHook defines the hook for setting the target proxy for an alpha GlobalForwardingRule. +func SetTargetAlphaGlobalForwardingRuleHook(ctx context.Context, key *meta.Key, ref *alpha.TargetReference, m *cloud.MockAlphaGlobalForwardingRules) error { + fw, err := m.Get(ctx, key) + if err != nil { + return err + } + + fw.Target = ref.Target + return nil +} + +// SetURLMapTargetHTTPProxyHook defines the hook for setting the url map for a TargetHttpProxy. +func SetURLMapTargetHTTPProxyHook(ctx context.Context, key *meta.Key, ref *ga.UrlMapReference, m *cloud.MockTargetHttpProxies) error { + tp, err := m.Get(ctx, key) + if err != nil { + return err + } + + tp.UrlMap = ref.UrlMap + return nil +} + +// SetURLMapTargetHTTPProxyHook defines the hook for setting the url map for a TargetHttpProxy. +func SetURLMapTargetHTTPSProxyHook(ctx context.Context, key *meta.Key, ref *ga.UrlMapReference, m *cloud.MockTargetHttpsProxies) error { + tp, err := m.Get(ctx, key) + if err != nil { + return err + } + + tp.UrlMap = ref.UrlMap + return nil +} + +// SetURLMapTargetHTTPProxyHook defines the hook for setting the url map for a TargetHttpProxy. +func SetURLMapAlphaRegionTargetHTTPSProxyHook(ctx context.Context, key *meta.Key, ref *alpha.UrlMapReference, m *cloud.MockAlphaRegionTargetHttpsProxies) error { + tp, err := m.Get(ctx, key) + if err != nil { + return err + } + + tp.UrlMap = ref.UrlMap + return nil +} + +// SetURLMapBetaRegionTargetHTTPProxyHook defines the hook for setting the url map for a TargetHttpProxy. +func SetURLMapBetaRegionTargetHTTPSProxyHook(ctx context.Context, key *meta.Key, ref *alpha.UrlMapReference, m *cloud.MockBetaRegionTargetHttpsProxies) error { + tp, err := m.Get(ctx, key) + if err != nil { + return err + } + + tp.UrlMap = ref.UrlMap + return nil +} + +// SetURLMapAlphaTargetHTTPProxyHook defines the hook for setting the url map for a TargetHttpProxy. +func SetURLMapAlphaTargetHTTPProxyHook(ctx context.Context, key *meta.Key, ref *alpha.UrlMapReference, m *cloud.MockAlphaTargetHttpProxies) error { + tp, err := m.Get(ctx, key) + if err != nil { + return err + } + + tp.UrlMap = ref.UrlMap + return nil +} + +// SetURLMapAlphaRegionTargetHTTPProxyHook defines the hook for setting the url map for a TargetHttpProxy. +func SetURLMapAlphaRegionTargetHTTPProxyHook(ctx context.Context, key *meta.Key, ref *alpha.UrlMapReference, m *cloud.MockAlphaRegionTargetHttpProxies) error { + tp, err := m.Get(ctx, key) + if err != nil { + return err + } + + tp.UrlMap = ref.UrlMap + return nil +} + +// SetURLMapBetaRegionTargetHTTPProxyHook defines the hook for setting the url map for a TargetHttpProxy. +func SetURLMapBetaRegionTargetHTTPProxyHook(ctx context.Context, key *meta.Key, ref *alpha.UrlMapReference, m *cloud.MockBetaRegionTargetHttpProxies) error { + tp, err := m.Get(ctx, key) + if err != nil { + return err + } + + tp.UrlMap = ref.UrlMap + return nil +} + +// SetSslCertificateTargetHTTPSProxyHook defines the hook for setting ssl certificates on a TargetHttpsProxy. +func SetSslCertificateTargetHTTPSProxyHook(ctx context.Context, key *meta.Key, req *ga.TargetHttpsProxiesSetSslCertificatesRequest, m *cloud.MockTargetHttpsProxies) error { + tp, err := m.Get(ctx, key) + if err != nil { + return err + } + + tp.SslCertificates = req.SslCertificates + return nil +} + +// SetSslCertificateAlphaTargetHTTPSProxyHook defines the hook for setting ssl certificates on a TargetHttpsProxy. +func SetSslCertificateAlphaTargetHTTPSProxyHook(ctx context.Context, key *meta.Key, req *alpha.TargetHttpsProxiesSetSslCertificatesRequest, m *cloud.MockAlphaTargetHttpsProxies) error { + tp, err := m.Get(ctx, key) + if err != nil { + return err + } + tp.SslCertificates = req.SslCertificates + return nil +} + +// SetSslCertificateAlphaRegionTargetHTTPSProxyHook defines the hook for setting ssl certificates on a TargetHttpsProxy. +func SetSslCertificateAlphaRegionTargetHTTPSProxyHook(ctx context.Context, key *meta.Key, req *alpha.TargetHttpsProxiesSetSslCertificatesRequest, m *cloud.MockAlphaRegionTargetHttpsProxies) error { + tp, err := m.Get(ctx, key) + if err != nil { + return err + } + + tp.SslCertificates = req.SslCertificates + return nil +} + +// SetSslCertificateBetaRegionTargetHTTPSProxyHook defines the hook for setting ssl certificates on a TargetHttpsProxy. +func SetSslCertificateBetaRegionTargetHTTPSProxyHook(ctx context.Context, key *meta.Key, req *alpha.TargetHttpsProxiesSetSslCertificatesRequest, m *cloud.MockBetaRegionTargetHttpsProxies) error { + tp, err := m.Get(ctx, key) + if err != nil { + return err + } + + tp.SslCertificates = req.SslCertificates + return nil +} + // InsertFirewallsUnauthorizedErrHook mocks firewall insertion. A forbidden error will be thrown as return. func InsertFirewallsUnauthorizedErrHook(ctx context.Context, key *meta.Key, obj *ga.Firewall, m *cloud.MockFirewalls) (bool, error) { return true, &googleapi.Error{Code: http.StatusForbidden} diff --git a/vendor/github.com/Microsoft/go-winio/BUILD.bazel b/vendor/github.com/Microsoft/go-winio/BUILD.bazel index c6ef8bb862369..b2eb2623f495a 100644 --- a/vendor/github.com/Microsoft/go-winio/BUILD.bazel +++ b/vendor/github.com/Microsoft/go-winio/BUILD.bazel @@ -7,6 +7,7 @@ go_library( "ea.go", "file.go", "fileinfo.go", + "hvsock.go", "pipe.go", "privilege.go", "reparse.go", @@ -17,7 +18,9 @@ go_library( importmap = "k8s.io/kops/vendor/github.com/Microsoft/go-winio", importpath = "github.com/Microsoft/go-winio", visibility = ["//visibility:public"], - deps = select({ + deps = [ + "//vendor/github.com/Microsoft/go-winio/pkg/guid:go_default_library", + ] + select({ "@io_bazel_rules_go//go/platform:windows": [ "//vendor/golang.org/x/sys/windows:go_default_library", ], diff --git a/vendor/github.com/Microsoft/go-winio/file.go b/vendor/github.com/Microsoft/go-winio/file.go index 4334ff1cbeeb1..0385e4108129b 100644 --- a/vendor/github.com/Microsoft/go-winio/file.go +++ b/vendor/github.com/Microsoft/go-winio/file.go @@ -16,6 +16,7 @@ import ( //sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort //sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus //sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes +//sys wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult type atomicBool int32 @@ -79,6 +80,7 @@ type win32File struct { wg sync.WaitGroup wgLock sync.RWMutex closing atomicBool + socket bool readDeadline deadlineHandler writeDeadline deadlineHandler } @@ -109,7 +111,13 @@ func makeWin32File(h syscall.Handle) (*win32File, error) { } func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) { - return makeWin32File(h) + // If we return the result of makeWin32File directly, it can result in an + // interface-wrapped nil, rather than a nil interface value. + f, err := makeWin32File(h) + if err != nil { + return nil, err + } + return f, nil } // closeHandle closes the resources associated with a Win32 handle @@ -190,6 +198,10 @@ func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, er if f.closing.isSet() { err = ErrFileClosed } + } else if err != nil && f.socket { + // err is from Win32. Query the overlapped structure to get the winsock error. + var bytes, flags uint32 + err = wsaGetOverlappedResult(f.handle, &c.o, &bytes, false, &flags) } case <-timeout: cancelIoEx(f.handle, &c.o) @@ -265,6 +277,10 @@ func (f *win32File) Flush() error { return syscall.FlushFileBuffers(f.handle) } +func (f *win32File) Fd() uintptr { + return uintptr(f.handle) +} + func (d *deadlineHandler) set(deadline time.Time) error { d.setLock.Lock() defer d.setLock.Unlock() diff --git a/vendor/github.com/Microsoft/go-winio/go.mod b/vendor/github.com/Microsoft/go-winio/go.mod new file mode 100644 index 0000000000000..b3846826b40cf --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/go.mod @@ -0,0 +1,9 @@ +module github.com/Microsoft/go-winio + +go 1.12 + +require ( + github.com/pkg/errors v0.8.1 + github.com/sirupsen/logrus v1.4.1 + golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b +) diff --git a/vendor/github.com/Microsoft/go-winio/go.sum b/vendor/github.com/Microsoft/go-winio/go.sum new file mode 100644 index 0000000000000..babb4a70df918 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/go.sum @@ -0,0 +1,16 @@ +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/sirupsen/logrus v1.4.1 h1:GL2rEmy6nsikmW0r8opw9JIRScdMF5hA8cOYLH7In1k= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b h1:ag/x1USPSsqHud38I9BAC88qdNLDHHtQ4mlgQIZPPNA= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/Microsoft/go-winio/hvsock.go b/vendor/github.com/Microsoft/go-winio/hvsock.go new file mode 100644 index 0000000000000..dbfe790ee0049 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/hvsock.go @@ -0,0 +1,305 @@ +package winio + +import ( + "fmt" + "io" + "net" + "os" + "syscall" + "time" + "unsafe" + + "github.com/Microsoft/go-winio/pkg/guid" +) + +//sys bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socketError] = ws2_32.bind + +const ( + afHvSock = 34 // AF_HYPERV + + socketError = ^uintptr(0) +) + +// An HvsockAddr is an address for a AF_HYPERV socket. +type HvsockAddr struct { + VMID guid.GUID + ServiceID guid.GUID +} + +type rawHvsockAddr struct { + Family uint16 + _ uint16 + VMID guid.GUID + ServiceID guid.GUID +} + +// Network returns the address's network name, "hvsock". +func (addr *HvsockAddr) Network() string { + return "hvsock" +} + +func (addr *HvsockAddr) String() string { + return fmt.Sprintf("%s:%s", &addr.VMID, &addr.ServiceID) +} + +// VsockServiceID returns an hvsock service ID corresponding to the specified AF_VSOCK port. +func VsockServiceID(port uint32) guid.GUID { + g, _ := guid.FromString("00000000-facb-11e6-bd58-64006a7986d3") + g.Data1 = port + return g +} + +func (addr *HvsockAddr) raw() rawHvsockAddr { + return rawHvsockAddr{ + Family: afHvSock, + VMID: addr.VMID, + ServiceID: addr.ServiceID, + } +} + +func (addr *HvsockAddr) fromRaw(raw *rawHvsockAddr) { + addr.VMID = raw.VMID + addr.ServiceID = raw.ServiceID +} + +// HvsockListener is a socket listener for the AF_HYPERV address family. +type HvsockListener struct { + sock *win32File + addr HvsockAddr +} + +// HvsockConn is a connected socket of the AF_HYPERV address family. +type HvsockConn struct { + sock *win32File + local, remote HvsockAddr +} + +func newHvSocket() (*win32File, error) { + fd, err := syscall.Socket(afHvSock, syscall.SOCK_STREAM, 1) + if err != nil { + return nil, os.NewSyscallError("socket", err) + } + f, err := makeWin32File(fd) + if err != nil { + syscall.Close(fd) + return nil, err + } + f.socket = true + return f, nil +} + +// ListenHvsock listens for connections on the specified hvsock address. +func ListenHvsock(addr *HvsockAddr) (_ *HvsockListener, err error) { + l := &HvsockListener{addr: *addr} + sock, err := newHvSocket() + if err != nil { + return nil, l.opErr("listen", err) + } + sa := addr.raw() + err = bind(sock.handle, unsafe.Pointer(&sa), int32(unsafe.Sizeof(sa))) + if err != nil { + return nil, l.opErr("listen", os.NewSyscallError("socket", err)) + } + err = syscall.Listen(sock.handle, 16) + if err != nil { + return nil, l.opErr("listen", os.NewSyscallError("listen", err)) + } + return &HvsockListener{sock: sock, addr: *addr}, nil +} + +func (l *HvsockListener) opErr(op string, err error) error { + return &net.OpError{Op: op, Net: "hvsock", Addr: &l.addr, Err: err} +} + +// Addr returns the listener's network address. +func (l *HvsockListener) Addr() net.Addr { + return &l.addr +} + +// Accept waits for the next connection and returns it. +func (l *HvsockListener) Accept() (_ net.Conn, err error) { + sock, err := newHvSocket() + if err != nil { + return nil, l.opErr("accept", err) + } + defer func() { + if sock != nil { + sock.Close() + } + }() + c, err := l.sock.prepareIo() + if err != nil { + return nil, l.opErr("accept", err) + } + defer l.sock.wg.Done() + + // AcceptEx, per documentation, requires an extra 16 bytes per address. + const addrlen = uint32(16 + unsafe.Sizeof(rawHvsockAddr{})) + var addrbuf [addrlen * 2]byte + + var bytes uint32 + err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0, addrlen, addrlen, &bytes, &c.o) + _, err = l.sock.asyncIo(c, nil, bytes, err) + if err != nil { + return nil, l.opErr("accept", os.NewSyscallError("acceptex", err)) + } + conn := &HvsockConn{ + sock: sock, + } + conn.local.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[0]))) + conn.remote.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[addrlen]))) + sock = nil + return conn, nil +} + +// Close closes the listener, causing any pending Accept calls to fail. +func (l *HvsockListener) Close() error { + return l.sock.Close() +} + +/* Need to finish ConnectEx handling +func DialHvsock(ctx context.Context, addr *HvsockAddr) (*HvsockConn, error) { + sock, err := newHvSocket() + if err != nil { + return nil, err + } + defer func() { + if sock != nil { + sock.Close() + } + }() + c, err := sock.prepareIo() + if err != nil { + return nil, err + } + defer sock.wg.Done() + var bytes uint32 + err = windows.ConnectEx(windows.Handle(sock.handle), sa, nil, 0, &bytes, &c.o) + _, err = sock.asyncIo(ctx, c, nil, bytes, err) + if err != nil { + return nil, err + } + conn := &HvsockConn{ + sock: sock, + remote: *addr, + } + sock = nil + return conn, nil +} +*/ + +func (conn *HvsockConn) opErr(op string, err error) error { + return &net.OpError{Op: op, Net: "hvsock", Source: &conn.local, Addr: &conn.remote, Err: err} +} + +func (conn *HvsockConn) Read(b []byte) (int, error) { + c, err := conn.sock.prepareIo() + if err != nil { + return 0, conn.opErr("read", err) + } + defer conn.sock.wg.Done() + buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))} + var flags, bytes uint32 + err = syscall.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil) + n, err := conn.sock.asyncIo(c, &conn.sock.readDeadline, bytes, err) + if err != nil { + if _, ok := err.(syscall.Errno); ok { + err = os.NewSyscallError("wsarecv", err) + } + return 0, conn.opErr("read", err) + } else if n == 0 { + err = io.EOF + } + return n, err +} + +func (conn *HvsockConn) Write(b []byte) (int, error) { + t := 0 + for len(b) != 0 { + n, err := conn.write(b) + if err != nil { + return t + n, err + } + t += n + b = b[n:] + } + return t, nil +} + +func (conn *HvsockConn) write(b []byte) (int, error) { + c, err := conn.sock.prepareIo() + if err != nil { + return 0, conn.opErr("write", err) + } + defer conn.sock.wg.Done() + buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))} + var bytes uint32 + err = syscall.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil) + n, err := conn.sock.asyncIo(c, &conn.sock.writeDeadline, bytes, err) + if err != nil { + if _, ok := err.(syscall.Errno); ok { + err = os.NewSyscallError("wsasend", err) + } + return 0, conn.opErr("write", err) + } + return n, err +} + +// Close closes the socket connection, failing any pending read or write calls. +func (conn *HvsockConn) Close() error { + return conn.sock.Close() +} + +func (conn *HvsockConn) shutdown(how int) error { + err := syscall.Shutdown(conn.sock.handle, syscall.SHUT_RD) + if err != nil { + return os.NewSyscallError("shutdown", err) + } + return nil +} + +// CloseRead shuts down the read end of the socket. +func (conn *HvsockConn) CloseRead() error { + err := conn.shutdown(syscall.SHUT_RD) + if err != nil { + return conn.opErr("close", err) + } + return nil +} + +// CloseWrite shuts down the write end of the socket, notifying the other endpoint that +// no more data will be written. +func (conn *HvsockConn) CloseWrite() error { + err := conn.shutdown(syscall.SHUT_WR) + if err != nil { + return conn.opErr("close", err) + } + return nil +} + +// LocalAddr returns the local address of the connection. +func (conn *HvsockConn) LocalAddr() net.Addr { + return &conn.local +} + +// RemoteAddr returns the remote address of the connection. +func (conn *HvsockConn) RemoteAddr() net.Addr { + return &conn.remote +} + +// SetDeadline implements the net.Conn SetDeadline method. +func (conn *HvsockConn) SetDeadline(t time.Time) error { + conn.SetReadDeadline(t) + conn.SetWriteDeadline(t) + return nil +} + +// SetReadDeadline implements the net.Conn SetReadDeadline method. +func (conn *HvsockConn) SetReadDeadline(t time.Time) error { + return conn.sock.SetReadDeadline(t) +} + +// SetWriteDeadline implements the net.Conn SetWriteDeadline method. +func (conn *HvsockConn) SetWriteDeadline(t time.Time) error { + return conn.sock.SetWriteDeadline(t) +} diff --git a/vendor/github.com/Microsoft/go-winio/pipe.go b/vendor/github.com/Microsoft/go-winio/pipe.go index d99eedb6489b0..d6a46f6a246f8 100644 --- a/vendor/github.com/Microsoft/go-winio/pipe.go +++ b/vendor/github.com/Microsoft/go-winio/pipe.go @@ -3,10 +3,13 @@ package winio import ( + "context" "errors" + "fmt" "io" "net" "os" + "runtime" "syscall" "time" "unsafe" @@ -18,6 +21,48 @@ import ( //sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo //sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW //sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc +//sys ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) = ntdll.NtCreateNamedPipeFile +//sys rtlNtStatusToDosError(status ntstatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb +//sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) = ntdll.RtlDosPathNameToNtPathName_U +//sys rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) = ntdll.RtlDefaultNpAcl + +type ioStatusBlock struct { + Status, Information uintptr +} + +type objectAttributes struct { + Length uintptr + RootDirectory uintptr + ObjectName *unicodeString + Attributes uintptr + SecurityDescriptor *securityDescriptor + SecurityQoS uintptr +} + +type unicodeString struct { + Length uint16 + MaximumLength uint16 + Buffer uintptr +} + +type securityDescriptor struct { + Revision byte + Sbz1 byte + Control uint16 + Owner uintptr + Group uintptr + Sacl uintptr + Dacl uintptr +} + +type ntstatus int32 + +func (status ntstatus) Err() error { + if status >= 0 { + return nil + } + return rtlNtStatusToDosError(status) +} const ( cERROR_PIPE_BUSY = syscall.Errno(231) @@ -25,21 +70,20 @@ const ( cERROR_PIPE_CONNECTED = syscall.Errno(535) cERROR_SEM_TIMEOUT = syscall.Errno(121) - cPIPE_ACCESS_DUPLEX = 0x3 - cFILE_FLAG_FIRST_PIPE_INSTANCE = 0x80000 - cSECURITY_SQOS_PRESENT = 0x100000 - cSECURITY_ANONYMOUS = 0 + cSECURITY_SQOS_PRESENT = 0x100000 + cSECURITY_ANONYMOUS = 0 - cPIPE_REJECT_REMOTE_CLIENTS = 0x8 + cPIPE_TYPE_MESSAGE = 4 - cPIPE_UNLIMITED_INSTANCES = 255 + cPIPE_READMODE_MESSAGE = 2 - cNMPWAIT_USE_DEFAULT_WAIT = 0 - cNMPWAIT_NOWAIT = 1 + cFILE_OPEN = 1 + cFILE_CREATE = 2 - cPIPE_TYPE_MESSAGE = 4 + cFILE_PIPE_MESSAGE_TYPE = 1 + cFILE_PIPE_REJECT_REMOTE_CLIENTS = 2 - cPIPE_READMODE_MESSAGE = 2 + cSE_DACL_PRESENT = 4 ) var ( @@ -137,9 +181,30 @@ func (s pipeAddress) String() string { return string(s) } +// tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout. +func tryDialPipe(ctx context.Context, path *string) (syscall.Handle, error) { + for { + select { + case <-ctx.Done(): + return syscall.Handle(0), ctx.Err() + default: + h, err := createFile(*path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0) + if err == nil { + return h, nil + } + if err != cERROR_PIPE_BUSY { + return h, &os.PathError{Err: err, Op: "open", Path: *path} + } + // Wait 10 msec and try again. This is a rather simplistic + // view, as we always try each 10 milliseconds. + time.Sleep(time.Millisecond * 10) + } + } +} + // DialPipe connects to a named pipe by path, timing out if the connection // takes longer than the specified duration. If timeout is nil, then we use -// a default timeout of 5 seconds. (We do not use WaitNamedPipe.) +// a default timeout of 2 seconds. (We do not use WaitNamedPipe.) func DialPipe(path string, timeout *time.Duration) (net.Conn, error) { var absTimeout time.Time if timeout != nil { @@ -147,23 +212,22 @@ func DialPipe(path string, timeout *time.Duration) (net.Conn, error) { } else { absTimeout = time.Now().Add(time.Second * 2) } + ctx, _ := context.WithDeadline(context.Background(), absTimeout) + conn, err := DialPipeContext(ctx, path) + if err == context.DeadlineExceeded { + return nil, ErrTimeout + } + return conn, err +} + +// DialPipeContext attempts to connect to a named pipe by `path` until `ctx` +// cancellation or timeout. +func DialPipeContext(ctx context.Context, path string) (net.Conn, error) { var err error var h syscall.Handle - for { - h, err = createFile(path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0) - if err != cERROR_PIPE_BUSY { - break - } - if time.Now().After(absTimeout) { - return nil, ErrTimeout - } - - // Wait 10 msec and try again. This is a rather simplistic - // view, as we always try each 10 milliseconds. - time.Sleep(time.Millisecond * 10) - } + h, err = tryDialPipe(ctx, &path) if err != nil { - return nil, &os.PathError{Op: "open", Path: path, Err: err} + return nil, err } var flags uint32 @@ -194,43 +258,87 @@ type acceptResponse struct { } type win32PipeListener struct { - firstHandle syscall.Handle - path string - securityDescriptor []byte - config PipeConfig - acceptCh chan (chan acceptResponse) - closeCh chan int - doneCh chan int + firstHandle syscall.Handle + path string + config PipeConfig + acceptCh chan (chan acceptResponse) + closeCh chan int + doneCh chan int } -func makeServerPipeHandle(path string, securityDescriptor []byte, c *PipeConfig, first bool) (syscall.Handle, error) { - var flags uint32 = cPIPE_ACCESS_DUPLEX | syscall.FILE_FLAG_OVERLAPPED +func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (syscall.Handle, error) { + path16, err := syscall.UTF16FromString(path) + if err != nil { + return 0, &os.PathError{Op: "open", Path: path, Err: err} + } + + var oa objectAttributes + oa.Length = unsafe.Sizeof(oa) + + var ntPath unicodeString + if err := rtlDosPathNameToNtPathName(&path16[0], &ntPath, 0, 0).Err(); err != nil { + return 0, &os.PathError{Op: "open", Path: path, Err: err} + } + defer localFree(ntPath.Buffer) + oa.ObjectName = &ntPath + + // The security descriptor is only needed for the first pipe. if first { - flags |= cFILE_FLAG_FIRST_PIPE_INSTANCE + if sd != nil { + len := uint32(len(sd)) + sdb := localAlloc(0, len) + defer localFree(sdb) + copy((*[0xffff]byte)(unsafe.Pointer(sdb))[:], sd) + oa.SecurityDescriptor = (*securityDescriptor)(unsafe.Pointer(sdb)) + } else { + // Construct the default named pipe security descriptor. + var dacl uintptr + if err := rtlDefaultNpAcl(&dacl).Err(); err != nil { + return 0, fmt.Errorf("getting default named pipe ACL: %s", err) + } + defer localFree(dacl) + + sdb := &securityDescriptor{ + Revision: 1, + Control: cSE_DACL_PRESENT, + Dacl: dacl, + } + oa.SecurityDescriptor = sdb + } } - var mode uint32 = cPIPE_REJECT_REMOTE_CLIENTS + typ := uint32(cFILE_PIPE_REJECT_REMOTE_CLIENTS) if c.MessageMode { - mode |= cPIPE_TYPE_MESSAGE + typ |= cFILE_PIPE_MESSAGE_TYPE } - sa := &syscall.SecurityAttributes{} - sa.Length = uint32(unsafe.Sizeof(*sa)) - if securityDescriptor != nil { - len := uint32(len(securityDescriptor)) - sa.SecurityDescriptor = localAlloc(0, len) - defer localFree(sa.SecurityDescriptor) - copy((*[0xffff]byte)(unsafe.Pointer(sa.SecurityDescriptor))[:], securityDescriptor) + disposition := uint32(cFILE_OPEN) + access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | syscall.SYNCHRONIZE) + if first { + disposition = cFILE_CREATE + // By not asking for read or write access, the named pipe file system + // will put this pipe into an initially disconnected state, blocking + // client connections until the next call with first == false. + access = syscall.SYNCHRONIZE } - h, err := createNamedPipe(path, flags, mode, cPIPE_UNLIMITED_INSTANCES, uint32(c.OutputBufferSize), uint32(c.InputBufferSize), 0, sa) + + timeout := int64(-50 * 10000) // 50ms + + var ( + h syscall.Handle + iosb ioStatusBlock + ) + err = ntCreateNamedPipeFile(&h, access, &oa, &iosb, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE, disposition, 0, typ, 0, 0, 0xffffffff, uint32(c.InputBufferSize), uint32(c.OutputBufferSize), &timeout).Err() if err != nil { return 0, &os.PathError{Op: "open", Path: path, Err: err} } + + runtime.KeepAlive(ntPath) return h, nil } func (l *win32PipeListener) makeServerPipe() (*win32File, error) { - h, err := makeServerPipeHandle(l.path, l.securityDescriptor, &l.config, false) + h, err := makeServerPipeHandle(l.path, nil, &l.config, false) if err != nil { return nil, err } @@ -341,32 +449,13 @@ func ListenPipe(path string, c *PipeConfig) (net.Listener, error) { if err != nil { return nil, err } - // Create a client handle and connect it. This results in the pipe - // instance always existing, so that clients see ERROR_PIPE_BUSY - // rather than ERROR_FILE_NOT_FOUND. This ties the first instance - // up so that no other instances can be used. This would have been - // cleaner if the Win32 API matched CreateFile with ConnectNamedPipe - // instead of CreateNamedPipe. (Apparently created named pipes are - // considered to be in listening state regardless of whether any - // active calls to ConnectNamedPipe are outstanding.) - h2, err := createFile(path, 0, 0, nil, syscall.OPEN_EXISTING, cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0) - if err != nil { - syscall.Close(h) - return nil, err - } - // Close the client handle. The server side of the instance will - // still be busy, leading to ERROR_PIPE_BUSY instead of - // ERROR_NOT_FOUND, as long as we don't close the server handle, - // or disconnect the client with DisconnectNamedPipe. - syscall.Close(h2) l := &win32PipeListener{ - firstHandle: h, - path: path, - securityDescriptor: sd, - config: *c, - acceptCh: make(chan (chan acceptResponse)), - closeCh: make(chan int), - doneCh: make(chan int), + firstHandle: h, + path: path, + config: *c, + acceptCh: make(chan (chan acceptResponse)), + closeCh: make(chan int), + doneCh: make(chan int), } go l.listenerRoutine() return l, nil diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/BUILD.bazel b/vendor/github.com/Microsoft/go-winio/pkg/guid/BUILD.bazel new file mode 100644 index 0000000000000..23417954ef4fe --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/BUILD.bazel @@ -0,0 +1,10 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["guid.go"], + importmap = "k8s.io/kops/vendor/github.com/Microsoft/go-winio/pkg/guid", + importpath = "github.com/Microsoft/go-winio/pkg/guid", + visibility = ["//visibility:public"], + deps = ["//vendor/golang.org/x/sys/windows:go_default_library"], +) diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go new file mode 100644 index 0000000000000..586406577043f --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go @@ -0,0 +1,235 @@ +// Package guid provides a GUID type. The backing structure for a GUID is +// identical to that used by the golang.org/x/sys/windows GUID type. +// There are two main binary encodings used for a GUID, the big-endian encoding, +// and the Windows (mixed-endian) encoding. See here for details: +// https://en.wikipedia.org/wiki/Universally_unique_identifier#Encoding +package guid + +import ( + "crypto/rand" + "crypto/sha1" + "encoding" + "encoding/binary" + "fmt" + "strconv" + + "golang.org/x/sys/windows" +) + +// Variant specifies which GUID variant (or "type") of the GUID. It determines +// how the entirety of the rest of the GUID is interpreted. +type Variant uint8 + +// The variants specified by RFC 4122. +const ( + // VariantUnknown specifies a GUID variant which does not conform to one of + // the variant encodings specified in RFC 4122. + VariantUnknown Variant = iota + VariantNCS + VariantRFC4122 + VariantMicrosoft + VariantFuture +) + +// Version specifies how the bits in the GUID were generated. For instance, a +// version 4 GUID is randomly generated, and a version 5 is generated from the +// hash of an input string. +type Version uint8 + +var _ = (encoding.TextMarshaler)(GUID{}) +var _ = (encoding.TextUnmarshaler)(&GUID{}) + +// GUID represents a GUID/UUID. It has the same structure as +// golang.org/x/sys/windows.GUID so that it can be used with functions expecting +// that type. It is defined as its own type so that stringification and +// marshaling can be supported. The representation matches that used by native +// Windows code. +type GUID windows.GUID + +// NewV4 returns a new version 4 (pseudorandom) GUID, as defined by RFC 4122. +func NewV4() (GUID, error) { + var b [16]byte + if _, err := rand.Read(b[:]); err != nil { + return GUID{}, err + } + + g := FromArray(b) + g.setVersion(4) // Version 4 means randomly generated. + g.setVariant(VariantRFC4122) + + return g, nil +} + +// NewV5 returns a new version 5 (generated from a string via SHA-1 hashing) +// GUID, as defined by RFC 4122. The RFC is unclear on the encoding of the name, +// and the sample code treats it as a series of bytes, so we do the same here. +// +// Some implementations, such as those found on Windows, treat the name as a +// big-endian UTF16 stream of bytes. If that is desired, the string can be +// encoded as such before being passed to this function. +func NewV5(namespace GUID, name []byte) (GUID, error) { + b := sha1.New() + namespaceBytes := namespace.ToArray() + b.Write(namespaceBytes[:]) + b.Write(name) + + a := [16]byte{} + copy(a[:], b.Sum(nil)) + + g := FromArray(a) + g.setVersion(5) // Version 5 means generated from a string. + g.setVariant(VariantRFC4122) + + return g, nil +} + +func fromArray(b [16]byte, order binary.ByteOrder) GUID { + var g GUID + g.Data1 = order.Uint32(b[0:4]) + g.Data2 = order.Uint16(b[4:6]) + g.Data3 = order.Uint16(b[6:8]) + copy(g.Data4[:], b[8:16]) + return g +} + +func (g GUID) toArray(order binary.ByteOrder) [16]byte { + b := [16]byte{} + order.PutUint32(b[0:4], g.Data1) + order.PutUint16(b[4:6], g.Data2) + order.PutUint16(b[6:8], g.Data3) + copy(b[8:16], g.Data4[:]) + return b +} + +// FromArray constructs a GUID from a big-endian encoding array of 16 bytes. +func FromArray(b [16]byte) GUID { + return fromArray(b, binary.BigEndian) +} + +// ToArray returns an array of 16 bytes representing the GUID in big-endian +// encoding. +func (g GUID) ToArray() [16]byte { + return g.toArray(binary.BigEndian) +} + +// FromWindowsArray constructs a GUID from a Windows encoding array of bytes. +func FromWindowsArray(b [16]byte) GUID { + return fromArray(b, binary.LittleEndian) +} + +// ToWindowsArray returns an array of 16 bytes representing the GUID in Windows +// encoding. +func (g GUID) ToWindowsArray() [16]byte { + return g.toArray(binary.LittleEndian) +} + +func (g GUID) String() string { + return fmt.Sprintf( + "%08x-%04x-%04x-%04x-%012x", + g.Data1, + g.Data2, + g.Data3, + g.Data4[:2], + g.Data4[2:]) +} + +// FromString parses a string containing a GUID and returns the GUID. The only +// format currently supported is the `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx` +// format. +func FromString(s string) (GUID, error) { + if len(s) != 36 { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + + var g GUID + + data1, err := strconv.ParseUint(s[0:8], 16, 32) + if err != nil { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + g.Data1 = uint32(data1) + + data2, err := strconv.ParseUint(s[9:13], 16, 16) + if err != nil { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + g.Data2 = uint16(data2) + + data3, err := strconv.ParseUint(s[14:18], 16, 16) + if err != nil { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + g.Data3 = uint16(data3) + + for i, x := range []int{19, 21, 24, 26, 28, 30, 32, 34} { + v, err := strconv.ParseUint(s[x:x+2], 16, 8) + if err != nil { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + g.Data4[i] = uint8(v) + } + + return g, nil +} + +func (g *GUID) setVariant(v Variant) { + d := g.Data4[0] + switch v { + case VariantNCS: + d = (d & 0x7f) + case VariantRFC4122: + d = (d & 0x3f) | 0x80 + case VariantMicrosoft: + d = (d & 0x1f) | 0xc0 + case VariantFuture: + d = (d & 0x0f) | 0xe0 + case VariantUnknown: + fallthrough + default: + panic(fmt.Sprintf("invalid variant: %d", v)) + } + g.Data4[0] = d +} + +// Variant returns the GUID variant, as defined in RFC 4122. +func (g GUID) Variant() Variant { + b := g.Data4[0] + if b&0x80 == 0 { + return VariantNCS + } else if b&0xc0 == 0x80 { + return VariantRFC4122 + } else if b&0xe0 == 0xc0 { + return VariantMicrosoft + } else if b&0xe0 == 0xe0 { + return VariantFuture + } + return VariantUnknown +} + +func (g *GUID) setVersion(v Version) { + g.Data3 = (g.Data3 & 0x0fff) | (uint16(v) << 12) +} + +// Version returns the GUID version, as defined in RFC 4122. +func (g GUID) Version() Version { + return Version((g.Data3 & 0xF000) >> 12) +} + +// MarshalText returns the textual representation of the GUID. +func (g GUID) MarshalText() ([]byte, error) { + return []byte(g.String()), nil +} + +// UnmarshalText takes the textual representation of a GUID, and unmarhals it +// into this GUID. +func (g *GUID) UnmarshalText(text []byte) error { + g2, err := FromString(string(text)) + if err != nil { + return err + } + *g = g2 + return nil +} diff --git a/vendor/github.com/Microsoft/go-winio/syscall.go b/vendor/github.com/Microsoft/go-winio/syscall.go index 20d64cf41d0e7..5cb52bc746254 100644 --- a/vendor/github.com/Microsoft/go-winio/syscall.go +++ b/vendor/github.com/Microsoft/go-winio/syscall.go @@ -1,3 +1,3 @@ package winio -//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go +//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go hvsock.go diff --git a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go index 3f527639a47f6..e26b01fafb253 100644 --- a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go @@ -1,4 +1,4 @@ -// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT +// Code generated by 'go generate'; DO NOT EDIT. package winio @@ -38,19 +38,25 @@ func errnoErr(e syscall.Errno) error { var ( modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + modws2_32 = windows.NewLazySystemDLL("ws2_32.dll") + modntdll = windows.NewLazySystemDLL("ntdll.dll") modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") procCancelIoEx = modkernel32.NewProc("CancelIoEx") procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") + procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult") procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") procCreateFileW = modkernel32.NewProc("CreateFileW") - procWaitNamedPipeW = modkernel32.NewProc("WaitNamedPipeW") procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") procLocalAlloc = modkernel32.NewProc("LocalAlloc") + procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile") + procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb") + procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U") + procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl") procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW") @@ -69,6 +75,7 @@ var ( procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW") procBackupRead = modkernel32.NewProc("BackupRead") procBackupWrite = modkernel32.NewProc("BackupWrite") + procbind = modws2_32.NewProc("bind") ) func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) { @@ -120,6 +127,24 @@ func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err erro return } +func wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) { + var _p0 uint32 + if wait { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) { r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0) if r1 == 0 { @@ -176,27 +201,6 @@ func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityA return } -func waitNamedPipe(name string, timeout uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(name) - if err != nil { - return - } - return _waitNamedPipe(_p0, timeout) -} - -func _waitNamedPipe(name *uint16, timeout uint32) (err error) { - r1, _, e1 := syscall.Syscall(procWaitNamedPipeW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(timeout), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0) if r1 == 0 { @@ -227,6 +231,32 @@ func localAlloc(uFlags uint32, length uint32) (ptr uintptr) { return } +func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) { + r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0) + status = ntstatus(r0) + return +} + +func rtlNtStatusToDosError(status ntstatus) (winerr error) { + r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0) + if r0 != 0 { + winerr = syscall.Errno(r0) + } + return +} + +func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) { + r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved), 0, 0) + status = ntstatus(r0) + return +} + +func rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) { + r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(dacl)), 0, 0) + status = ntstatus(r0) + return +} + func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { var _p0 *uint16 _p0, err = syscall.UTF16PtrFromString(accountName) @@ -518,3 +548,15 @@ func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, p } return } + +func bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) { + r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + if r1 == socketError { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} diff --git a/vendor/github.com/PuerkitoBio/purell/.travis.yml b/vendor/github.com/PuerkitoBio/purell/.travis.yml index facfc91c65a42..cf31e6af6d50f 100644 --- a/vendor/github.com/PuerkitoBio/purell/.travis.yml +++ b/vendor/github.com/PuerkitoBio/purell/.travis.yml @@ -1,7 +1,12 @@ language: go go: - - 1.4 - - 1.5 - - 1.6 + - 1.4.x + - 1.5.x + - 1.6.x + - 1.7.x + - 1.8.x + - 1.9.x + - "1.10.x" + - "1.11.x" - tip diff --git a/vendor/github.com/PuerkitoBio/purell/README.md b/vendor/github.com/PuerkitoBio/purell/README.md index 09e8a32cbe995..07de0c4986684 100644 --- a/vendor/github.com/PuerkitoBio/purell/README.md +++ b/vendor/github.com/PuerkitoBio/purell/README.md @@ -4,7 +4,7 @@ Purell is a tiny Go library to normalize URLs. It returns a pure URL. Pure-ell. Based on the [wikipedia paper][wiki] and the [RFC 3986 document][rfc]. -[![build status](https://secure.travis-ci.org/PuerkitoBio/purell.png)](http://travis-ci.org/PuerkitoBio/purell) +[![build status](https://travis-ci.org/PuerkitoBio/purell.svg?branch=master)](http://travis-ci.org/PuerkitoBio/purell) ## Install @@ -12,6 +12,7 @@ Based on the [wikipedia paper][wiki] and the [RFC 3986 document][rfc]. ## Changelog +* **v1.1.1** : Fix failing test due to Go1.12 changes (thanks to @ianlancetaylor). * **2016-11-14 (v1.1.0)** : IDN: Conform to RFC 5895: Fold character width (thanks to @beeker1121). * **2016-07-27 (v1.0.0)** : Normalize IDN to ASCII (thanks to @zenovich). * **2015-02-08** : Add fix for relative paths issue ([PR #5][pr5]) and add fix for unnecessary encoding of reserved characters ([see issue #7][iss7]). diff --git a/vendor/github.com/PuerkitoBio/purell/purell.go b/vendor/github.com/PuerkitoBio/purell/purell.go index 645e1b76f7945..6d0fc190a1881 100644 --- a/vendor/github.com/PuerkitoBio/purell/purell.go +++ b/vendor/github.com/PuerkitoBio/purell/purell.go @@ -299,7 +299,7 @@ func sortQuery(u *url.URL) { if len(q) > 0 { arKeys := make([]string, len(q)) i := 0 - for k, _ := range q { + for k := range q { arKeys[i] = k i++ } diff --git a/vendor/github.com/client9/misspell/.gitignore b/vendor/github.com/client9/misspell/.gitignore index aaca9b8f905dd..b1b707e326071 100644 --- a/vendor/github.com/client9/misspell/.gitignore +++ b/vendor/github.com/client9/misspell/.gitignore @@ -1,5 +1,6 @@ dist/ bin/ +vendor/ # editor turds *~ diff --git a/vendor/github.com/client9/misspell/.travis.yml b/vendor/github.com/client9/misspell/.travis.yml index 5c9f465caea4b..e63e6c2bdcc22 100644 --- a/vendor/github.com/client9/misspell/.travis.yml +++ b/vendor/github.com/client9/misspell/.travis.yml @@ -3,10 +3,18 @@ dist: trusty group: edge language: go go: - - 1.8.3 + - "1.10" git: depth: 1 + script: - - make -e ci -after_success: - - test -n "$TRAVIS_TAG" && ./scripts/goreleaser.sh + - ./scripts/travis.sh + +# calls goreleaser when a new tag is pushed +deploy: +- provider: script + skip_cleanup: true + script: curl -sL http://git.io/goreleaser | bash + on: + tags: true + condition: $TRAVIS_OS_NAME = linux diff --git a/vendor/github.com/client9/misspell/Dockerfile b/vendor/github.com/client9/misspell/Dockerfile index 8b1207c61be98..b8ea37b4c5af6 100644 --- a/vendor/github.com/client9/misspell/Dockerfile +++ b/vendor/github.com/client9/misspell/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.8.3-alpine +FROM golang:1.10.0-alpine # cache buster RUN echo 4 diff --git a/vendor/github.com/client9/misspell/Gopkg.lock b/vendor/github.com/client9/misspell/Gopkg.lock index d58726a0b92b9..90ed45115f957 100644 --- a/vendor/github.com/client9/misspell/Gopkg.lock +++ b/vendor/github.com/client9/misspell/Gopkg.lock @@ -3,13 +3,22 @@ [[projects]] name = "github.com/gobwas/glob" - packages = [".","compiler","match","syntax","syntax/ast","syntax/lexer","util/runes","util/strings"] - revision = "bea32b9cd2d6f55753d94a28e959b13f0244797a" - version = "v0.2.2" + packages = [ + ".", + "compiler", + "match", + "syntax", + "syntax/ast", + "syntax/lexer", + "util/runes", + "util/strings" + ] + revision = "5ccd90ef52e1e632236f7326478d4faa74f99438" + version = "v0.2.3" [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "e481c81c87260652d25840e1d95d6e530331c095d64d84422a166f37ae0a77d3" + inputs-digest = "087ea4c49358ea8258ad9edfe514cd5ce9975c889c258e5ec7b5d2b720aae113" solver-name = "gps-cdcl" solver-version = 1 diff --git a/vendor/github.com/client9/misspell/Gopkg.toml b/vendor/github.com/client9/misspell/Gopkg.toml index efabaa461d4d5..e9b8e6a45a9f0 100644 --- a/vendor/github.com/client9/misspell/Gopkg.toml +++ b/vendor/github.com/client9/misspell/Gopkg.toml @@ -1,7 +1,6 @@ - # Gopkg.toml example # -# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html # for detailed Gopkg.toml documentation. # # required = ["github.com/user/thing/cmd/thing"] @@ -17,10 +16,19 @@ # source = "github.com/myfork/project2" # # [[override]] -# name = "github.com/x/y" -# version = "2.4.0" +# name = "github.com/x/y" +# version = "2.4.0" +# +# [prune] +# non-go = false +# go-tests = true +# unused-packages = true [[constraint]] name = "github.com/gobwas/glob" - version = "0.2.2" + version = "0.2.3" + +[prune] + go-tests = true + unused-packages = true diff --git a/vendor/github.com/client9/misspell/Makefile b/vendor/github.com/client9/misspell/Makefile index 0ccf74862c2b4..862ab77b0d639 100644 --- a/vendor/github.com/client9/misspell/Makefile +++ b/vendor/github.com/client9/misspell/Makefile @@ -4,25 +4,15 @@ install: ## install misspell into GOPATH/bin go install ./cmd/misspell build: hooks ## build and lint misspell - go install ./cmd/misspell - gometalinter \ - --vendor \ - --deadline=60s \ - --disable-all \ - --enable=vet \ - --enable=golint \ - --enable=gofmt \ - --enable=goimports \ - --enable=gosimple \ - --enable=staticcheck \ - --enable=ineffassign \ - --exclude=/usr/local/go/src/net/lookup_unix.go \ - ./... - go test . + ./scripts/build.sh test: ## run all tests go test . +# real publishing is done only by travis +publish: ## test goreleaser + ./scripts/goreleaser-dryrun.sh + # the grep in line 2 is to remove misspellings in the spelling dictionary # that trigger false positives!! falsepositives: /scowl-wl diff --git a/vendor/github.com/client9/misspell/RELEASE-HOWTO.md b/vendor/github.com/client9/misspell/RELEASE-HOWTO.md index 7caf7ee16312c..55b52d962e943 100644 --- a/vendor/github.com/client9/misspell/RELEASE-HOWTO.md +++ b/vendor/github.com/client9/misspell/RELEASE-HOWTO.md @@ -5,21 +5,34 @@ since I forget. 1. Review existing tags and pick new release number - ```bash + ```sh git tag ``` 2. Tag locally - ```bash + ```sh git tag -a v0.1.0 -m "First release" ``` -3. Push + If things get screwed up, delete the tag with + + ```sh + git tag -d v0.1.0 + ``` + +3. Test goreleaser + + TODO: how to install goreleaser + + ```sh + ./scripts/goreleaser-dryrun.sh + ``` + +4. Push ```bash git push origin v0.1.0 ``` -4. Verify release and edit notes. See https://github.com/client9/misspell/releases - +5. Verify release and edit notes. See https://github.com/client9/misspell/releases diff --git a/vendor/github.com/client9/misspell/goreleaser.yml b/vendor/github.com/client9/misspell/goreleaser.yml index 2bd738f8b08fd..560cb3810c4a4 100644 --- a/vendor/github.com/client9/misspell/goreleaser.yml +++ b/vendor/github.com/client9/misspell/goreleaser.yml @@ -1,22 +1,26 @@ # goreleaser.yml # https://github.com/goreleaser/goreleaser -build: - main: cmd/misspell/main.go - binary: misspell - ldflags: -s -w -X main.version={{.Version}} - goos: - - darwin - - linux - - windows - goarch: - - amd64 - env: - - CGO_ENABLED=0 - ignore: - - goos: darwin - goarch: 386 - - goos: windows - goarch: 386 + +project_name: misspell + +builds: + - + main: cmd/misspell/main.go + binary: misspell + ldflags: -s -w -X main.version={{.Version}} + goos: + - darwin + - linux + - windows + goarch: + - amd64 + env: + - CGO_ENABLED=0 + ignore: + - goos: darwin + goarch: 386 + - goos: windows + goarch: 386 archive: name_template: "{{ .Binary }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}" @@ -24,6 +28,11 @@ archive: amd64: 64bit 386: 32bit darwin: mac + files: + - none* + +checksum: + name_template: "{{ .ProjectName }}_{{ .Version }}_checksums.txt" snapshot: - name_template: SNAPSHOT-{{.Commit}} + name_template: "SNAPSHOT-{{.Commit}}" diff --git a/vendor/github.com/coreos/etcd/version/version.go b/vendor/github.com/coreos/etcd/version/version.go index c2bf7cfd5612c..60a7bc7ef3aab 100644 --- a/vendor/github.com/coreos/etcd/version/version.go +++ b/vendor/github.com/coreos/etcd/version/version.go @@ -26,7 +26,7 @@ import ( var ( // MinClusterVersion is the min cluster version this etcd binary is compatible with. MinClusterVersion = "3.0.0" - Version = "3.3.13" + Version = "3.3.17" APIVersion = "unknown" // Git SHA Value will be set during build diff --git a/vendor/github.com/coreos/go-semver/NOTICE b/vendor/github.com/coreos/go-semver/NOTICE new file mode 100644 index 0000000000000..23a0ada2fbb56 --- /dev/null +++ b/vendor/github.com/coreos/go-semver/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2018 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/go-semver/semver/semver.go b/vendor/github.com/coreos/go-semver/semver/semver.go index 110fc23e1520b..76cf4852c769e 100644 --- a/vendor/github.com/coreos/go-semver/semver/semver.go +++ b/vendor/github.com/coreos/go-semver/semver/semver.go @@ -19,6 +19,7 @@ import ( "bytes" "errors" "fmt" + "regexp" "strconv" "strings" ) @@ -76,6 +77,14 @@ func (v *Version) Set(version string) error { return fmt.Errorf("%s is not in dotted-tri format", version) } + if err := validateIdentifier(string(preRelease)); err != nil { + return fmt.Errorf("failed to validate pre-release: %v", err) + } + + if err := validateIdentifier(metadata); err != nil { + return fmt.Errorf("failed to validate metadata: %v", err) + } + parsed := make([]int64, 3, 3) for i, v := range dotParts[:3] { @@ -224,6 +233,13 @@ func recursivePreReleaseCompare(versionA []string, versionB []string) int { bInt = true } + // Numeric identifiers always have lower precedence than non-numeric identifiers. + if aInt && !bInt { + return -1 + } else if !aInt && bInt { + return 1 + } + // Handle Integer Comparison if aInt && bInt { if aI > bI { @@ -266,3 +282,15 @@ func (v *Version) BumpPatch() { v.PreRelease = PreRelease("") v.Metadata = "" } + +// validateIdentifier makes sure the provided identifier satisfies semver spec +func validateIdentifier(id string) error { + if id != "" && !reIdentifier.MatchString(id) { + return fmt.Errorf("%s is not a valid semver identifier", id) + } + return nil +} + +// reIdentifier is a regular expression used to check that pre-release and metadata +// identifiers satisfy the spec requirements +var reIdentifier = regexp.MustCompile(`^[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*$`) diff --git a/vendor/github.com/dgrijalva/jwt-go/.travis.yml b/vendor/github.com/dgrijalva/jwt-go/.travis.yml index bde823d8ab733..1027f56cd94db 100644 --- a/vendor/github.com/dgrijalva/jwt-go/.travis.yml +++ b/vendor/github.com/dgrijalva/jwt-go/.travis.yml @@ -1,8 +1,13 @@ language: go +script: + - go vet ./... + - go test -v ./... + go: - 1.3 - 1.4 - 1.5 - 1.6 + - 1.7 - tip diff --git a/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md b/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md index fd62e94905586..7fc1f793cbc4e 100644 --- a/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md +++ b/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md @@ -56,8 +56,9 @@ This simple parsing example: is directly mapped to: ```go - if token, err := request.ParseFromRequest(tokenString, request.OAuth2Extractor, req, keyLookupFunc); err == nil { - fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) + if token, err := request.ParseFromRequest(req, request.OAuth2Extractor, keyLookupFunc); err == nil { + claims := token.Claims.(jwt.MapClaims) + fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"]) } ``` diff --git a/vendor/github.com/dgrijalva/jwt-go/README.md b/vendor/github.com/dgrijalva/jwt-go/README.md index f48365fafbfda..d358d881b8dde 100644 --- a/vendor/github.com/dgrijalva/jwt-go/README.md +++ b/vendor/github.com/dgrijalva/jwt-go/README.md @@ -1,11 +1,15 @@ -A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html) +# jwt-go [![Build Status](https://travis-ci.org/dgrijalva/jwt-go.svg?branch=master)](https://travis-ci.org/dgrijalva/jwt-go) +[![GoDoc](https://godoc.org/github.com/dgrijalva/jwt-go?status.svg)](https://godoc.org/github.com/dgrijalva/jwt-go) + +A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html) -**BREAKING CHANGES:*** Version 3.0.0 is here. It includes _a lot_ of changes including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code. +**NEW VERSION COMING:** There have been a lot of improvements suggested since the version 3.0.0 released in 2016. I'm working now on cutting two different releases: 3.2.0 will contain any non-breaking changes or enhancements. 4.0.0 will follow shortly which will include breaking changes. See the 4.0.0 milestone to get an idea of what's coming. If you have other ideas, or would like to participate in 4.0.0, now's the time. If you depend on this library and don't want to be interrupted, I recommend you use your dependency mangement tool to pin to version 3. -**NOTICE:** A vulnerability in JWT was [recently published](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/). As this library doesn't force users to validate the `alg` is what they expected, it's possible your usage is effected. There will be an update soon to remedy this, and it will likey require backwards-incompatible changes to the API. In the short term, please make sure your implementation verifies the `alg` is what you expect. +**SECURITY NOTICE:** Some older versions of Go have a security issue in the cryotp/elliptic. Recommendation is to upgrade to at least 1.8.3. See issue #216 for more detail. +**SECURITY NOTICE:** It's important that you [validate the `alg` presented is what you expect](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key types match the expected alg, but you should take the extra step to verify it in your usage. See the examples provided. ## What the heck is a JWT? @@ -37,7 +41,7 @@ Here's an example of an extension that integrates with the Google App Engine sig ## Compliance -This library was last reviewed to comply with [RTF 7519](http://www.rfc-editor.org/info/rfc7519) dated May 2015 with a few notable differences: +This library was last reviewed to comply with [RTF 7519](http://www.rfc-editor.org/info/rfc7519) dated May 2015 with a few notable differences: * In order to protect against accidental use of [Unsecured JWTs](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#UnsecuredJWT), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key. @@ -47,7 +51,10 @@ This library is considered production ready. Feedback and feature requests are This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `master`. Periodically, versions will be tagged from `master`. You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases). -While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v2`. It will do the right thing WRT semantic versioning. +While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v3`. It will do the right thing WRT semantic versioning. + +**BREAKING CHANGES:*** +* Version 3.0.0 includes _a lot_ of changes from the 2.x line, including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code. ## Usage Tips @@ -68,18 +75,26 @@ Symmetric signing methods, such as HSA, use only a single secret. This is probab Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification. +### Signing Methods and Key Types + +Each signing method expects a different object type for its signing keys. See the package documentation for details. Here are the most common ones: + +* The [HMAC signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation +* The [RSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation +* The [ECDSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation + ### JWT and OAuth It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication. Without going too far down the rabbit hole, here's a description of the interaction of these technologies: -* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth. +* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth. * OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token. * Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL. - + ## More Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go). -The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in to documentation. +The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation. diff --git a/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md b/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md index b605b45093095..6370298313a63 100644 --- a/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md +++ b/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md @@ -1,5 +1,18 @@ ## `jwt-go` Version History +#### 3.2.0 + +* Added method `ParseUnverified` to allow users to split up the tasks of parsing and validation +* HMAC signing method returns `ErrInvalidKeyType` instead of `ErrInvalidKey` where appropriate +* Added options to `request.ParseFromRequest`, which allows for an arbitrary list of modifiers to parsing behavior. Initial set include `WithClaims` and `WithParser`. Existing usage of this function will continue to work as before. +* Deprecated `ParseFromRequestWithClaims` to simplify API in the future. + +#### 3.1.0 + +* Improvements to `jwt` command line tool +* Added `SkipClaimsValidation` option to `Parser` +* Documentation updates + #### 3.0.0 * **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code diff --git a/vendor/github.com/dgrijalva/jwt-go/ecdsa.go b/vendor/github.com/dgrijalva/jwt-go/ecdsa.go index 2f59a22236383..f977381240e39 100644 --- a/vendor/github.com/dgrijalva/jwt-go/ecdsa.go +++ b/vendor/github.com/dgrijalva/jwt-go/ecdsa.go @@ -14,6 +14,7 @@ var ( ) // Implements the ECDSA family of signing methods signing methods +// Expects *ecdsa.PrivateKey for signing and *ecdsa.PublicKey for verification type SigningMethodECDSA struct { Name string Hash crypto.Hash diff --git a/vendor/github.com/dgrijalva/jwt-go/errors.go b/vendor/github.com/dgrijalva/jwt-go/errors.go index 662df19d4e756..1c93024aad2ea 100644 --- a/vendor/github.com/dgrijalva/jwt-go/errors.go +++ b/vendor/github.com/dgrijalva/jwt-go/errors.go @@ -51,13 +51,9 @@ func (e ValidationError) Error() string { } else { return "token is invalid" } - return e.Inner.Error() } // No errors func (e *ValidationError) valid() bool { - if e.Errors > 0 { - return false - } - return true + return e.Errors == 0 } diff --git a/vendor/github.com/dgrijalva/jwt-go/hmac.go b/vendor/github.com/dgrijalva/jwt-go/hmac.go index c229919254556..addbe5d401829 100644 --- a/vendor/github.com/dgrijalva/jwt-go/hmac.go +++ b/vendor/github.com/dgrijalva/jwt-go/hmac.go @@ -7,6 +7,7 @@ import ( ) // Implements the HMAC-SHA family of signing methods signing methods +// Expects key type of []byte for both signing and validation type SigningMethodHMAC struct { Name string Hash crypto.Hash @@ -90,5 +91,5 @@ func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, return EncodeSegment(hasher.Sum(nil)), nil } - return "", ErrInvalidKey + return "", ErrInvalidKeyType } diff --git a/vendor/github.com/dgrijalva/jwt-go/parser.go b/vendor/github.com/dgrijalva/jwt-go/parser.go index 7bf1c4ea08422..d6901d9adb52b 100644 --- a/vendor/github.com/dgrijalva/jwt-go/parser.go +++ b/vendor/github.com/dgrijalva/jwt-go/parser.go @@ -21,55 +21,9 @@ func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { } func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { - parts := strings.Split(tokenString, ".") - if len(parts) != 3 { - return nil, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed) - } - - var err error - token := &Token{Raw: tokenString} - - // parse Header - var headerBytes []byte - if headerBytes, err = DecodeSegment(parts[0]); err != nil { - if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") { - return token, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed) - } - return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} - } - if err = json.Unmarshal(headerBytes, &token.Header); err != nil { - return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} - } - - // parse Claims - var claimBytes []byte - token.Claims = claims - - if claimBytes, err = DecodeSegment(parts[1]); err != nil { - return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} - } - dec := json.NewDecoder(bytes.NewBuffer(claimBytes)) - if p.UseJSONNumber { - dec.UseNumber() - } - // JSON Decode. Special case for map type to avoid weird pointer behavior - if c, ok := token.Claims.(MapClaims); ok { - err = dec.Decode(&c) - } else { - err = dec.Decode(&claims) - } - // Handle decode error + token, parts, err := p.ParseUnverified(tokenString, claims) if err != nil { - return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} - } - - // Lookup signature method - if method, ok := token.Header["alg"].(string); ok { - if token.Method = GetSigningMethod(method); token.Method == nil { - return token, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable) - } - } else { - return token, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable) + return token, err } // Verify signing method is in the required set @@ -96,6 +50,9 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf } if key, err = keyFunc(token); err != nil { // keyFunc returned an error + if ve, ok := err.(*ValidationError); ok { + return token, ve + } return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable} } @@ -129,3 +86,63 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf return token, vErr } + +// WARNING: Don't use this method unless you know what you're doing +// +// This method parses the token but doesn't validate the signature. It's only +// ever useful in cases where you know the signature is valid (because it has +// been checked previously in the stack) and you want to extract values from +// it. +func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) { + parts = strings.Split(tokenString, ".") + if len(parts) != 3 { + return nil, parts, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed) + } + + token = &Token{Raw: tokenString} + + // parse Header + var headerBytes []byte + if headerBytes, err = DecodeSegment(parts[0]); err != nil { + if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") { + return token, parts, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed) + } + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + if err = json.Unmarshal(headerBytes, &token.Header); err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + + // parse Claims + var claimBytes []byte + token.Claims = claims + + if claimBytes, err = DecodeSegment(parts[1]); err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + dec := json.NewDecoder(bytes.NewBuffer(claimBytes)) + if p.UseJSONNumber { + dec.UseNumber() + } + // JSON Decode. Special case for map type to avoid weird pointer behavior + if c, ok := token.Claims.(MapClaims); ok { + err = dec.Decode(&c) + } else { + err = dec.Decode(&claims) + } + // Handle decode error + if err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + + // Lookup signature method + if method, ok := token.Header["alg"].(string); ok { + if token.Method = GetSigningMethod(method); token.Method == nil { + return token, parts, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable) + } + } else { + return token, parts, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable) + } + + return token, parts, nil +} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa.go b/vendor/github.com/dgrijalva/jwt-go/rsa.go index 0ae0b1984e5ae..e4caf1ca4a116 100644 --- a/vendor/github.com/dgrijalva/jwt-go/rsa.go +++ b/vendor/github.com/dgrijalva/jwt-go/rsa.go @@ -7,6 +7,7 @@ import ( ) // Implements the RSA family of signing methods signing methods +// Expects *rsa.PrivateKey for signing and *rsa.PublicKey for validation type SigningMethodRSA struct { Name string Hash crypto.Hash @@ -44,7 +45,7 @@ func (m *SigningMethodRSA) Alg() string { } // Implements the Verify method from SigningMethod -// For this signing method, must be an rsa.PublicKey structure. +// For this signing method, must be an *rsa.PublicKey structure. func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error { var err error @@ -73,7 +74,7 @@ func (m *SigningMethodRSA) Verify(signingString, signature string, key interface } // Implements the Sign method from SigningMethod -// For this signing method, must be an rsa.PrivateKey structure. +// For this signing method, must be an *rsa.PrivateKey structure. func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) { var rsaKey *rsa.PrivateKey var ok bool diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go b/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go index 213a90dbbf8d5..a5ababf956c4f 100644 --- a/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go +++ b/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go @@ -39,6 +39,38 @@ func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) { return pkey, nil } +// Parse PEM encoded PKCS1 or PKCS8 private key protected with password +func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + var parsedKey interface{} + + var blockDecrypted []byte + if blockDecrypted, err = x509.DecryptPEMBlock(block, []byte(password)); err != nil { + return nil, err + } + + if parsedKey, err = x509.ParsePKCS1PrivateKey(blockDecrypted); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(blockDecrypted); err != nil { + return nil, err + } + } + + var pkey *rsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, ErrNotRSAPrivateKey + } + + return pkey, nil +} + // Parse PEM encoded PKCS1 or PKCS8 public key func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) { var err error diff --git a/vendor/github.com/docker/distribution/AUTHORS b/vendor/github.com/docker/distribution/AUTHORS deleted file mode 100644 index 252ff8aa2a68d..0000000000000 --- a/vendor/github.com/docker/distribution/AUTHORS +++ /dev/null @@ -1,182 +0,0 @@ -a-palchikov -Aaron Lehmann -Aaron Schlesinger -Aaron Vinson -Adam Duke -Adam Enger -Adrian Mouat -Ahmet Alp Balkan -Alex Chan -Alex Elman -Alexey Gladkov -allencloud -amitshukla -Amy Lindburg -Andrew Hsu -Andrew Meredith -Andrew T Nguyen -Andrey Kostov -Andy Goldstein -Anis Elleuch -Anton Tiurin -Antonio Mercado -Antonio Murdaca -Anusha Ragunathan -Arien Holthuizen -Arnaud Porterie -Arthur Baars -Asuka Suzuki -Avi Miller -Ayose Cazorla -BadZen -Ben Bodenmiller -Ben Firshman -bin liu -Brian Bland -burnettk -Carson A -Cezar Sa Espinola -Charles Smith -Chris Dillon -cuiwei13 -cyli -Daisuke Fujita -Daniel Huhn -Darren Shepherd -Dave Trombley -Dave Tucker -David Lawrence -David Verhasselt -David Xia -davidli -Dejan Golja -Derek McGowan -Diogo Mónica -DJ Enriquez -Donald Huang -Doug Davis -Edgar Lee -Eric Yang -Fabio Berchtold -Fabio Huser -farmerworking -Felix Yan -Florentin Raud -Frank Chen -Frederick F. Kautz IV -gabriell nascimento -Gleb Schukin -harche -Henri Gomez -Hu Keping -Hua Wang -HuKeping -Ian Babrou -igayoso -Jack Griffin -James Findley -Jason Freidman -Jason Heiss -Jeff Nickoloff -Jess Frazelle -Jessie Frazelle -jhaohai -Jianqing Wang -Jihoon Chung -Joao Fernandes -John Mulhausen -John Starks -Jon Johnson -Jon Poler -Jonathan Boulle -Jordan Liggitt -Josh Chorlton -Josh Hawn -Julien Fernandez -Ke Xu -Keerthan Mala -Kelsey Hightower -Kenneth Lim -Kenny Leung -Li Yi -Liu Hua -liuchang0812 -Lloyd Ramey -Louis Kottmann -Luke Carpenter -Marcus Martins -Mary Anthony -Matt Bentley -Matt Duch -Matt Moore -Matt Robenolt -Matthew Green -Michael Prokop -Michal Minar -Michal Minář -Mike Brown -Miquel Sabaté -Misty Stanley-Jones -Misty Stanley-Jones -Morgan Bauer -moxiegirl -Nathan Sullivan -nevermosby -Nghia Tran -Nikita Tarasov -Noah Treuhaft -Nuutti Kotivuori -Oilbeater -Olivier Gambier -Olivier Jacques -Omer Cohen -Patrick Devine -Phil Estes -Philip Misiowiec -Pierre-Yves Ritschard -Qiao Anran -Randy Barlow -Richard Scothern -Rodolfo Carvalho -Rusty Conover -Sean Boran -Sebastiaan van Stijn -Sebastien Coavoux -Serge Dubrouski -Sharif Nassar -Shawn Falkner-Horine -Shreyas Karnik -Simon Thulbourn -spacexnice -Spencer Rinehart -Stan Hu -Stefan Majewsky -Stefan Weil -Stephen J Day -Sungho Moon -Sven Dowideit -Sylvain Baubeau -Ted Reed -tgic -Thomas Sjögren -Tianon Gravi -Tibor Vass -Tonis Tiigi -Tony Holdstock-Brown -Trevor Pounds -Troels Thomsen -Victor Vieux -Victoria Bialas -Vincent Batts -Vincent Demeester -Vincent Giersch -W. Trevor King -weiyuan.yl -xg.song -xiekeyang -Yann ROBERT -yaoyao.xyy -yuexiao-wang -yuzou -zhouhaibing089 -姜继忠 diff --git a/vendor/github.com/docker/go-connections/nat/nat.go b/vendor/github.com/docker/go-connections/nat/nat.go index 4d5f5ae63afdf..bb7e4e336950b 100644 --- a/vendor/github.com/docker/go-connections/nat/nat.go +++ b/vendor/github.com/docker/go-connections/nat/nat.go @@ -113,7 +113,7 @@ func SplitProtoPort(rawPort string) (string, string) { } func validateProto(proto string) bool { - for _, availableProto := range []string{"tcp", "udp"} { + for _, availableProto := range []string{"tcp", "udp", "sctp"} { if availableProto == proto { return true } diff --git a/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go b/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go index 9ca974539a774..1ff81c333c369 100644 --- a/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go +++ b/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go @@ -4,7 +4,6 @@ package tlsconfig import ( "crypto/x509" - ) // SystemCertPool returns an new empty cert pool, diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config.go b/vendor/github.com/docker/go-connections/tlsconfig/config.go index 1b31bbb8b1b96..0ef3fdcb46906 100644 --- a/vendor/github.com/docker/go-connections/tlsconfig/config.go +++ b/vendor/github.com/docker/go-connections/tlsconfig/config.go @@ -46,8 +46,6 @@ var acceptedCBCCiphers = []uint16{ tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_RSA_WITH_AES_128_CBC_SHA, } // DefaultServerAcceptedCiphers should be uses by code which already has a crypto/tls @@ -65,22 +63,34 @@ var allTLSVersions = map[uint16]struct{}{ } // ServerDefault returns a secure-enough TLS configuration for the server TLS configuration. -func ServerDefault() *tls.Config { - return &tls.Config{ - // Avoid fallback to SSL protocols < TLS1.0 - MinVersion: tls.VersionTLS10, +func ServerDefault(ops ...func(*tls.Config)) *tls.Config { + tlsconfig := &tls.Config{ + // Avoid fallback by default to SSL protocols < TLS1.2 + MinVersion: tls.VersionTLS12, PreferServerCipherSuites: true, CipherSuites: DefaultServerAcceptedCiphers, } + + for _, op := range ops { + op(tlsconfig) + } + + return tlsconfig } // ClientDefault returns a secure-enough TLS configuration for the client TLS configuration. -func ClientDefault() *tls.Config { - return &tls.Config{ +func ClientDefault(ops ...func(*tls.Config)) *tls.Config { + tlsconfig := &tls.Config{ // Prefer TLS1.2 as the client minimum MinVersion: tls.VersionTLS12, CipherSuites: clientCipherSuites, } + + for _, op := range ops { + op(tlsconfig) + } + + return tlsconfig } // certPool returns an X.509 certificate pool from `caFile`, the certificate file. diff --git a/vendor/github.com/emicklei/go-restful/BUILD.bazel b/vendor/github.com/emicklei/go-restful/BUILD.bazel index 0a731741d0a82..d66c15bcf37cf 100644 --- a/vendor/github.com/emicklei/go-restful/BUILD.bazel +++ b/vendor/github.com/emicklei/go-restful/BUILD.bazel @@ -15,12 +15,14 @@ go_library( "doc.go", "entity_accessors.go", "filter.go", + "json.go", "jsr311.go", "logger.go", "mime.go", "options_filter.go", "parameter.go", "path_expression.go", + "path_processor.go", "request.go", "response.go", "route.go", diff --git a/vendor/github.com/emicklei/go-restful/CHANGES.md b/vendor/github.com/emicklei/go-restful/CHANGES.md index 0adca766fb859..e525296313e36 100644 --- a/vendor/github.com/emicklei/go-restful/CHANGES.md +++ b/vendor/github.com/emicklei/go-restful/CHANGES.md @@ -1,6 +1,56 @@ -Change history of go-restful -= +## Change history of go-restful + + +v2.9.5 +- fix panic in Response.WriteError if err == nil + +v2.9.4 + +- fix issue #400 , parsing mime type quality +- Route Builder added option for contentEncodingEnabled (#398) + +v2.9.3 + +- Avoid return of 415 Unsupported Media Type when request body is empty (#396) + +v2.9.2 + +- Reduce allocations in per-request methods to improve performance (#395) + +v2.9.1 + +- Fix issue with default responses and invalid status code 0. (#393) + +v2.9.0 + +- add per Route content encoding setting (overrides container setting) + +v2.8.0 + +- add Request.QueryParameters() +- add json-iterator (via build tag) +- disable vgo module (until log is moved) + +v2.7.1 + +- add vgo module + +v2.6.1 + +- add JSONNewDecoderFunc to allow custom JSON Decoder usage (go 1.10+) + +v2.6.0 + +- Make JSR 311 routing and path param processing consistent +- Adding description to RouteBuilder.Reads() +- Update example for Swagger12 and OpenAPI + +2017-09-13 + +- added route condition functions using `.If(func)` in route building. + 2017-02-16 + - solved issue #304, make operation names unique 2017-01-30 diff --git a/vendor/github.com/emicklei/go-restful/README.md b/vendor/github.com/emicklei/go-restful/README.md index cd1f2d0cc2ce0..f52c25acf6e4f 100644 --- a/vendor/github.com/emicklei/go-restful/README.md +++ b/vendor/github.com/emicklei/go-restful/README.md @@ -56,19 +56,33 @@ func (u UserResource) findUser(request *restful.Request, response *restful.Respo - Content encoding (gzip,deflate) of request and response payloads - Automatic responses on OPTIONS (using a filter) - Automatic CORS request handling (using a filter) -- API declaration for Swagger UI (see [go-restful-swagger12](https://github.com/emicklei/go-restful-swagger12),[go-restful-openapi](https://github.com/emicklei/go-restful-openapi)) +- API declaration for Swagger UI ([go-restful-openapi](https://github.com/emicklei/go-restful-openapi), see [go-restful-swagger12](https://github.com/emicklei/go-restful-swagger12)) - Panic recovery to produce HTTP 500, customizable using RecoverHandler(...) - Route errors produce HTTP 404/405/406/415 errors, customizable using ServiceErrorHandler(...) - Configurable (trace) logging - Customizable gzip/deflate readers and writers using CompressorProvider registration - -### Resources + +## How to customize +There are several hooks to customize the behavior of the go-restful package. + +- Router algorithm +- Panic recovery +- JSON decoder +- Trace logging +- Compression +- Encoders for other serializers +- Use [jsoniter](https://github.com/json-iterator/go) by build this package using a tag, e.g. `go build -tags=jsoniter .` + +TODO: write examples of these. + +## Resources - [Example posted on blog](http://ernestmicklei.com/2012/11/go-restful-first-working-example/) - [Design explained on blog](http://ernestmicklei.com/2012/11/go-restful-api-design/) - [sourcegraph](https://sourcegraph.com/github.com/emicklei/go-restful) +- [showcase: Zazkia - tcp proxy for testing resiliency](https://github.com/emicklei/zazkia) - [showcase: Mora - MongoDB REST Api server](https://github.com/emicklei/mora) Type ```git shortlog -s``` for a full list of contributors. -© 2012 - 2017, http://ernestmicklei.com. MIT License. Contributions are welcome. \ No newline at end of file +© 2012 - 2018, http://ernestmicklei.com. MIT License. Contributions are welcome. diff --git a/vendor/github.com/emicklei/go-restful/compressors.go b/vendor/github.com/emicklei/go-restful/compressors.go index cb32f7ef53258..9db4a8c8e979e 100644 --- a/vendor/github.com/emicklei/go-restful/compressors.go +++ b/vendor/github.com/emicklei/go-restful/compressors.go @@ -15,20 +15,20 @@ type CompressorProvider interface { // Before using it, call Reset(). AcquireGzipWriter() *gzip.Writer - // Releases an aqcuired *gzip.Writer. + // Releases an acquired *gzip.Writer. ReleaseGzipWriter(w *gzip.Writer) // Returns a *gzip.Reader which needs to be released later. AcquireGzipReader() *gzip.Reader - // Releases an aqcuired *gzip.Reader. + // Releases an acquired *gzip.Reader. ReleaseGzipReader(w *gzip.Reader) // Returns a *zlib.Writer which needs to be released later. // Before using it, call Reset(). AcquireZlibWriter() *zlib.Writer - // Releases an aqcuired *zlib.Writer. + // Releases an acquired *zlib.Writer. ReleaseZlibWriter(w *zlib.Writer) } @@ -45,7 +45,7 @@ func CurrentCompressorProvider() CompressorProvider { return currentCompressorProvider } -// CompressorProvider sets the actual provider of compressors (zlib or gzip). +// SetCompressorProvider sets the actual provider of compressors (zlib or gzip). func SetCompressorProvider(p CompressorProvider) { if p == nil { panic("cannot set compressor provider to nil") diff --git a/vendor/github.com/emicklei/go-restful/container.go b/vendor/github.com/emicklei/go-restful/container.go index 657d5b6ddd00a..061a8d7189b6e 100644 --- a/vendor/github.com/emicklei/go-restful/container.go +++ b/vendor/github.com/emicklei/go-restful/container.go @@ -97,7 +97,7 @@ func (c *Container) Add(service *WebService) *Container { // cannot have duplicate root paths for _, each := range c.webServices { if each.RootPath() == service.RootPath() { - log.Printf("[restful] WebService with duplicate root path detected:['%v']", each) + log.Printf("WebService with duplicate root path detected:['%v']", each) os.Exit(1) } } @@ -139,8 +139,8 @@ func (c *Container) addHandler(service *WebService, serveMux *http.ServeMux) boo func (c *Container) Remove(ws *WebService) error { if c.ServeMux == http.DefaultServeMux { - errMsg := fmt.Sprintf("[restful] cannot remove a WebService from a Container using the DefaultServeMux: ['%v']", ws) - log.Printf(errMsg) + errMsg := fmt.Sprintf("cannot remove a WebService from a Container using the DefaultServeMux: ['%v']", ws) + log.Print(errMsg) return errors.New(errMsg) } c.webServicesLock.Lock() @@ -168,7 +168,7 @@ func (c *Container) Remove(ws *WebService) error { // This may be a security issue as it exposes sourcecode information. func logStackOnRecover(panicReason interface{}, httpWriter http.ResponseWriter) { var buffer bytes.Buffer - buffer.WriteString(fmt.Sprintf("[restful] recover from panic situation: - %v\r\n", panicReason)) + buffer.WriteString(fmt.Sprintf("recover from panic situation: - %v\r\n", panicReason)) for i := 2; ; i += 1 { _, file, line, ok := runtime.Caller(i) if !ok { @@ -220,31 +220,37 @@ func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.R }() } + // Find best match Route ; err is non nil if no match was found + var webService *WebService + var route *Route + var err error + func() { + c.webServicesLock.RLock() + defer c.webServicesLock.RUnlock() + webService, route, err = c.router.SelectRoute( + c.webServices, + httpRequest) + }() + // Detect if compression is needed // assume without compression, test for override - if c.contentEncodingEnabled { + contentEncodingEnabled := c.contentEncodingEnabled + if route != nil && route.contentEncodingEnabled != nil { + contentEncodingEnabled = *route.contentEncodingEnabled + } + if contentEncodingEnabled { doCompress, encoding := wantsCompressedResponse(httpRequest) if doCompress { var err error writer, err = NewCompressingResponseWriter(httpWriter, encoding) if err != nil { - log.Print("[restful] unable to install compressor: ", err) + log.Print("unable to install compressor: ", err) httpWriter.WriteHeader(http.StatusInternalServerError) return } } } - // Find best match Route ; err is non nil if no match was found - var webService *WebService - var route *Route - var err error - func() { - c.webServicesLock.RLock() - defer c.webServicesLock.RUnlock() - webService, route, err = c.router.SelectRoute( - c.webServices, - httpRequest) - }() + if err != nil { // a non-200 response has already been written // run container filters anyway ; they should not touch the response... @@ -259,7 +265,12 @@ func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.R chain.ProcessFilter(NewRequest(httpRequest), NewResponse(writer)) return } - wrappedRequest, wrappedResponse := route.wrapRequestResponse(writer, httpRequest) + pathProcessor, routerProcessesPath := c.router.(PathProcessor) + if !routerProcessesPath { + pathProcessor = defaultPathProcessor{} + } + pathParams := pathProcessor.ExtractParameters(route, webService, httpRequest.URL.Path) + wrappedRequest, wrappedResponse := route.wrapRequestResponse(writer, httpRequest, pathParams) // pass through filters (if any) if len(c.containerFilters)+len(webService.filters)+len(route.Filters) > 0 { // compose filter chain diff --git a/vendor/github.com/emicklei/go-restful/curly.go b/vendor/github.com/emicklei/go-restful/curly.go index 79f1f5aa200e4..14d5b76bf082f 100644 --- a/vendor/github.com/emicklei/go-restful/curly.go +++ b/vendor/github.com/emicklei/go-restful/curly.go @@ -45,14 +45,14 @@ func (c CurlyRouter) SelectRoute( // selectRoutes return a collection of Route from a WebService that matches the path tokens from the request. func (c CurlyRouter) selectRoutes(ws *WebService, requestTokens []string) sortableCurlyRoutes { - candidates := sortableCurlyRoutes{} + candidates := make(sortableCurlyRoutes, 0, 8) for _, each := range ws.routes { matches, paramCount, staticCount := c.matchesRouteByPathTokens(each.pathParts, requestTokens) if matches { candidates.add(curlyRoute{each, paramCount, staticCount}) // TODO make sure Routes() return pointers? } } - sort.Sort(sort.Reverse(candidates)) + sort.Sort(candidates) return candidates } diff --git a/vendor/github.com/emicklei/go-restful/curly_route.go b/vendor/github.com/emicklei/go-restful/curly_route.go index 296f94650e655..403dd3be947dc 100644 --- a/vendor/github.com/emicklei/go-restful/curly_route.go +++ b/vendor/github.com/emicklei/go-restful/curly_route.go @@ -11,6 +11,7 @@ type curlyRoute struct { staticCount int } +// sortableCurlyRoutes orders by most parameters and path elements first. type sortableCurlyRoutes []curlyRoute func (s *sortableCurlyRoutes) add(route curlyRoute) { @@ -18,6 +19,7 @@ func (s *sortableCurlyRoutes) add(route curlyRoute) { } func (s sortableCurlyRoutes) routes() (routes []Route) { + routes = make([]Route, 0, len(s)) for _, each := range s { routes = append(routes, each.route) // TODO change return type } @@ -31,22 +33,22 @@ func (s sortableCurlyRoutes) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s sortableCurlyRoutes) Less(i, j int) bool { - ci := s[i] - cj := s[j] + a := s[j] + b := s[i] // primary key - if ci.staticCount < cj.staticCount { + if a.staticCount < b.staticCount { return true } - if ci.staticCount > cj.staticCount { + if a.staticCount > b.staticCount { return false } // secundary key - if ci.paramCount < cj.paramCount { + if a.paramCount < b.paramCount { return true } - if ci.paramCount > cj.paramCount { + if a.paramCount > b.paramCount { return false } - return ci.route.Path < cj.route.Path + return a.route.Path < b.route.Path } diff --git a/vendor/github.com/emicklei/go-restful/entity_accessors.go b/vendor/github.com/emicklei/go-restful/entity_accessors.go index 6ecf6c7f897b1..66dfc824f55b7 100644 --- a/vendor/github.com/emicklei/go-restful/entity_accessors.go +++ b/vendor/github.com/emicklei/go-restful/entity_accessors.go @@ -5,7 +5,6 @@ package restful // that can be found in the LICENSE file. import ( - "encoding/json" "encoding/xml" "strings" "sync" @@ -128,7 +127,7 @@ type entityJSONAccess struct { // Read unmarshalls the value from JSON func (e entityJSONAccess) Read(req *Request, v interface{}) error { - decoder := json.NewDecoder(req.Request.Body) + decoder := NewDecoder(req.Request.Body) decoder.UseNumber() return decoder.Decode(v) } @@ -147,7 +146,7 @@ func writeJSON(resp *Response, status int, contentType string, v interface{}) er } if resp.prettyPrint { // pretty output must be created and written explicitly - output, err := json.MarshalIndent(v, " ", " ") + output, err := MarshalIndent(v, "", " ") if err != nil { return err } @@ -159,5 +158,5 @@ func writeJSON(resp *Response, status int, contentType string, v interface{}) er // not-so-pretty resp.Header().Set(HEADER_ContentType, contentType) resp.WriteHeader(status) - return json.NewEncoder(resp).Encode(v) + return NewEncoder(resp).Encode(v) } diff --git a/vendor/github.com/emicklei/go-restful/json.go b/vendor/github.com/emicklei/go-restful/json.go new file mode 100644 index 0000000000000..871165166a169 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/json.go @@ -0,0 +1,11 @@ +// +build !jsoniter + +package restful + +import "encoding/json" + +var ( + MarshalIndent = json.MarshalIndent + NewDecoder = json.NewDecoder + NewEncoder = json.NewEncoder +) diff --git a/vendor/github.com/emicklei/go-restful/jsoniter.go b/vendor/github.com/emicklei/go-restful/jsoniter.go new file mode 100644 index 0000000000000..11b8f8ae7f17e --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/jsoniter.go @@ -0,0 +1,12 @@ +// +build jsoniter + +package restful + +import "github.com/json-iterator/go" + +var ( + json = jsoniter.ConfigCompatibleWithStandardLibrary + MarshalIndent = json.MarshalIndent + NewDecoder = json.NewDecoder + NewEncoder = json.NewEncoder +) diff --git a/vendor/github.com/emicklei/go-restful/jsr311.go b/vendor/github.com/emicklei/go-restful/jsr311.go index 511444ac6854a..3ede1891eca78 100644 --- a/vendor/github.com/emicklei/go-restful/jsr311.go +++ b/vendor/github.com/emicklei/go-restful/jsr311.go @@ -39,57 +39,106 @@ func (r RouterJSR311) SelectRoute( return dispatcher, route, ok } +// ExtractParameters is used to obtain the path parameters from the route using the same matching +// engine as the JSR 311 router. +func (r RouterJSR311) ExtractParameters(route *Route, webService *WebService, urlPath string) map[string]string { + webServiceExpr := webService.pathExpr + webServiceMatches := webServiceExpr.Matcher.FindStringSubmatch(urlPath) + pathParameters := r.extractParams(webServiceExpr, webServiceMatches) + routeExpr := route.pathExpr + routeMatches := routeExpr.Matcher.FindStringSubmatch(webServiceMatches[len(webServiceMatches)-1]) + routeParams := r.extractParams(routeExpr, routeMatches) + for key, value := range routeParams { + pathParameters[key] = value + } + return pathParameters +} + +func (RouterJSR311) extractParams(pathExpr *pathExpression, matches []string) map[string]string { + params := map[string]string{} + for i := 1; i < len(matches); i++ { + if len(pathExpr.VarNames) >= i { + params[pathExpr.VarNames[i-1]] = matches[i] + } + } + return params +} + // http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*Route, error) { + candidates := make([]*Route, 0, 8) + for i, each := range routes { + ok := true + for _, fn := range each.If { + if !fn(httpRequest) { + ok = false + break + } + } + if ok { + candidates = append(candidates, &routes[i]) + } + } + if len(candidates) == 0 { + if trace { + traceLogger.Printf("no Route found (from %d) that passes conditional checks", len(routes)) + } + return nil, NewError(http.StatusNotFound, "404: Not Found") + } + // http method - methodOk := []Route{} - for _, each := range routes { + previous := candidates + candidates = candidates[:0] + for _, each := range previous { if httpRequest.Method == each.Method { - methodOk = append(methodOk, each) + candidates = append(candidates, each) } } - if len(methodOk) == 0 { + if len(candidates) == 0 { if trace { - traceLogger.Printf("no Route found (in %d routes) that matches HTTP method %s\n", len(routes), httpRequest.Method) + traceLogger.Printf("no Route found (in %d routes) that matches HTTP method %s\n", len(previous), httpRequest.Method) } return nil, NewError(http.StatusMethodNotAllowed, "405: Method Not Allowed") } - inputMediaOk := methodOk // content-type contentType := httpRequest.Header.Get(HEADER_ContentType) - inputMediaOk = []Route{} - for _, each := range methodOk { + previous = candidates + candidates = candidates[:0] + for _, each := range previous { if each.matchesContentType(contentType) { - inputMediaOk = append(inputMediaOk, each) + candidates = append(candidates, each) } } - if len(inputMediaOk) == 0 { + if len(candidates) == 0 { if trace { - traceLogger.Printf("no Route found (from %d) that matches HTTP Content-Type: %s\n", len(methodOk), contentType) + traceLogger.Printf("no Route found (from %d) that matches HTTP Content-Type: %s\n", len(previous), contentType) + } + if httpRequest.ContentLength > 0 { + return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type") } - return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type") } // accept - outputMediaOk := []Route{} + previous = candidates + candidates = candidates[:0] accept := httpRequest.Header.Get(HEADER_Accept) if len(accept) == 0 { accept = "*/*" } - for _, each := range inputMediaOk { + for _, each := range previous { if each.matchesAccept(accept) { - outputMediaOk = append(outputMediaOk, each) + candidates = append(candidates, each) } } - if len(outputMediaOk) == 0 { + if len(candidates) == 0 { if trace { - traceLogger.Printf("no Route found (from %d) that matches HTTP Accept: %s\n", len(inputMediaOk), accept) + traceLogger.Printf("no Route found (from %d) that matches HTTP Accept: %s\n", len(previous), accept) } return nil, NewError(http.StatusNotAcceptable, "406: Not Acceptable") } // return r.bestMatchByMedia(outputMediaOk, contentType, accept), nil - return &outputMediaOk[0], nil + return candidates[0], nil } // http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 diff --git a/vendor/github.com/emicklei/go-restful/logger.go b/vendor/github.com/emicklei/go-restful/logger.go index 3f1c4db86bca6..6595df0029667 100644 --- a/vendor/github.com/emicklei/go-restful/logger.go +++ b/vendor/github.com/emicklei/go-restful/logger.go @@ -21,7 +21,7 @@ func TraceLogger(logger log.StdLogger) { EnableTracing(logger != nil) } -// expose the setter for the global logger on the top-level package +// SetLogger exposes the setter for the global logger on the top-level package func SetLogger(customLogger log.StdLogger) { log.SetLogger(customLogger) } diff --git a/vendor/github.com/emicklei/go-restful/mime.go b/vendor/github.com/emicklei/go-restful/mime.go index d7ea2b6157949..33014471b998a 100644 --- a/vendor/github.com/emicklei/go-restful/mime.go +++ b/vendor/github.com/emicklei/go-restful/mime.go @@ -22,7 +22,10 @@ func insertMime(l []mime, e mime) []mime { return append(l, e) } +const qFactorWeightingKey = "q" + // sortedMimes returns a list of mime sorted (desc) by its specified quality. +// e.g. text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3 func sortedMimes(accept string) (sorted []mime) { for _, each := range strings.Split(accept, ",") { typeAndQuality := strings.Split(strings.Trim(each, " "), ";") @@ -30,14 +33,16 @@ func sortedMimes(accept string) (sorted []mime) { sorted = insertMime(sorted, mime{typeAndQuality[0], 1.0}) } else { // take factor - parts := strings.Split(typeAndQuality[1], "=") - if len(parts) == 2 { - f, err := strconv.ParseFloat(parts[1], 64) + qAndWeight := strings.Split(typeAndQuality[1], "=") + if len(qAndWeight) == 2 && strings.Trim(qAndWeight[0], " ") == qFactorWeightingKey { + f, err := strconv.ParseFloat(qAndWeight[1], 64) if err != nil { traceLogger.Printf("unable to parse quality in %s, %v", each, err) } else { sorted = insertMime(sorted, mime{typeAndQuality[0], f}) } + } else { + sorted = insertMime(sorted, mime{typeAndQuality[0], 1.0}) } } } diff --git a/vendor/github.com/emicklei/go-restful/options_filter.go b/vendor/github.com/emicklei/go-restful/options_filter.go index 4514eadcfaa9a..5c1b34251c166 100644 --- a/vendor/github.com/emicklei/go-restful/options_filter.go +++ b/vendor/github.com/emicklei/go-restful/options_filter.go @@ -15,7 +15,15 @@ func (c *Container) OPTIONSFilter(req *Request, resp *Response, chain *FilterCha chain.ProcessFilter(req, resp) return } - resp.AddHeader(HEADER_Allow, strings.Join(c.computeAllowedMethods(req), ",")) + + archs := req.Request.Header.Get(HEADER_AccessControlRequestHeaders) + methods := strings.Join(c.computeAllowedMethods(req), ",") + origin := req.Request.Header.Get(HEADER_Origin) + + resp.AddHeader(HEADER_Allow, methods) + resp.AddHeader(HEADER_AccessControlAllowOrigin, origin) + resp.AddHeader(HEADER_AccessControlAllowHeaders, archs) + resp.AddHeader(HEADER_AccessControlAllowMethods, methods) } // OPTIONSFilter is a filter function that inspects the Http Request for the OPTIONS method diff --git a/vendor/github.com/emicklei/go-restful/parameter.go b/vendor/github.com/emicklei/go-restful/parameter.go index e11c8162a71de..e8793304b15da 100644 --- a/vendor/github.com/emicklei/go-restful/parameter.go +++ b/vendor/github.com/emicklei/go-restful/parameter.go @@ -19,8 +19,30 @@ const ( // FormParameterKind = indicator of Request parameter type "form" FormParameterKind + + // CollectionFormatCSV comma separated values `foo,bar` + CollectionFormatCSV = CollectionFormat("csv") + + // CollectionFormatSSV space separated values `foo bar` + CollectionFormatSSV = CollectionFormat("ssv") + + // CollectionFormatTSV tab separated values `foo\tbar` + CollectionFormatTSV = CollectionFormat("tsv") + + // CollectionFormatPipes pipe separated values `foo|bar` + CollectionFormatPipes = CollectionFormat("pipes") + + // CollectionFormatMulti corresponds to multiple parameter instances instead of multiple values for a single + // instance `foo=bar&foo=baz`. This is valid only for QueryParameters and FormParameters + CollectionFormatMulti = CollectionFormat("multi") ) +type CollectionFormat string + +func (cf CollectionFormat) String() string { + return string(cf) +} + // Parameter is for documententing the parameter used in a Http Request // ParameterData kinds are Path,Query and Body type Parameter struct { @@ -36,6 +58,7 @@ type ParameterData struct { AllowableValues map[string]string AllowMultiple bool DefaultValue string + CollectionFormat string } // Data returns the state of the Parameter @@ -112,3 +135,9 @@ func (p *Parameter) Description(doc string) *Parameter { p.data.Description = doc return p } + +// CollectionFormat sets the collection format for an array type +func (p *Parameter) CollectionFormat(format CollectionFormat) *Parameter { + p.data.CollectionFormat = format.String() + return p +} diff --git a/vendor/github.com/emicklei/go-restful/path_expression.go b/vendor/github.com/emicklei/go-restful/path_expression.go index a921e6f22454e..95a9a25450005 100644 --- a/vendor/github.com/emicklei/go-restful/path_expression.go +++ b/vendor/github.com/emicklei/go-restful/path_expression.go @@ -14,8 +14,9 @@ import ( // PathExpression holds a compiled path expression (RegExp) needed to match against // Http request paths and to extract path parameter values. type pathExpression struct { - LiteralCount int // the number of literal characters (means those not resulting from template variable substitution) - VarCount int // the number of named parameters (enclosed by {}) in the path + LiteralCount int // the number of literal characters (means those not resulting from template variable substitution) + VarNames []string // the names of parameters (enclosed by {}) in the path + VarCount int // the number of named parameters (enclosed by {}) in the path Matcher *regexp.Regexp Source string // Path as defined by the RouteBuilder tokens []string @@ -24,16 +25,16 @@ type pathExpression struct { // NewPathExpression creates a PathExpression from the input URL path. // Returns an error if the path is invalid. func newPathExpression(path string) (*pathExpression, error) { - expression, literalCount, varCount, tokens := templateToRegularExpression(path) + expression, literalCount, varNames, varCount, tokens := templateToRegularExpression(path) compiled, err := regexp.Compile(expression) if err != nil { return nil, err } - return &pathExpression{literalCount, varCount, compiled, expression, tokens}, nil + return &pathExpression{literalCount, varNames, varCount, compiled, expression, tokens}, nil } // http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-370003.7.3 -func templateToRegularExpression(template string) (expression string, literalCount int, varCount int, tokens []string) { +func templateToRegularExpression(template string) (expression string, literalCount int, varNames []string, varCount int, tokens []string) { var buffer bytes.Buffer buffer.WriteString("^") //tokens = strings.Split(template, "/") @@ -46,8 +47,10 @@ func templateToRegularExpression(template string) (expression string, literalCou if strings.HasPrefix(each, "{") { // check for regular expression in variable colon := strings.Index(each, ":") + var varName string if colon != -1 { // extract expression + varName = strings.TrimSpace(each[1:colon]) paramExpr := strings.TrimSpace(each[colon+1 : len(each)-1]) if paramExpr == "*" { // special case buffer.WriteString("(.*)") @@ -56,8 +59,10 @@ func templateToRegularExpression(template string) (expression string, literalCou } } else { // plain var + varName = strings.TrimSpace(each[1 : len(each)-1]) buffer.WriteString("([^/]+?)") } + varNames = append(varNames, varName) varCount += 1 } else { literalCount += len(each) @@ -65,5 +70,5 @@ func templateToRegularExpression(template string) (expression string, literalCou buffer.WriteString(regexp.QuoteMeta(encoded)) } } - return strings.TrimRight(buffer.String(), "/") + "(/.*)?$", literalCount, varCount, tokens + return strings.TrimRight(buffer.String(), "/") + "(/.*)?$", literalCount, varNames, varCount, tokens } diff --git a/vendor/github.com/emicklei/go-restful/path_processor.go b/vendor/github.com/emicklei/go-restful/path_processor.go new file mode 100644 index 0000000000000..357c723a7ae4c --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/path_processor.go @@ -0,0 +1,63 @@ +package restful + +import ( + "bytes" + "strings" +) + +// Copyright 2018 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +// PathProcessor is extra behaviour that a Router can provide to extract path parameters from the path. +// If a Router does not implement this interface then the default behaviour will be used. +type PathProcessor interface { + // ExtractParameters gets the path parameters defined in the route and webService from the urlPath + ExtractParameters(route *Route, webService *WebService, urlPath string) map[string]string +} + +type defaultPathProcessor struct{} + +// Extract the parameters from the request url path +func (d defaultPathProcessor) ExtractParameters(r *Route, _ *WebService, urlPath string) map[string]string { + urlParts := tokenizePath(urlPath) + pathParameters := map[string]string{} + for i, key := range r.pathParts { + var value string + if i >= len(urlParts) { + value = "" + } else { + value = urlParts[i] + } + if strings.HasPrefix(key, "{") { // path-parameter + if colon := strings.Index(key, ":"); colon != -1 { + // extract by regex + regPart := key[colon+1 : len(key)-1] + keyPart := key[1:colon] + if regPart == "*" { + pathParameters[keyPart] = untokenizePath(i, urlParts) + break + } else { + pathParameters[keyPart] = value + } + } else { + // without enclosing {} + pathParameters[key[1:len(key)-1]] = value + } + } + } + return pathParameters +} + +// Untokenize back into an URL path using the slash separator +func untokenizePath(offset int, parts []string) string { + var buffer bytes.Buffer + for p := offset; p < len(parts); p++ { + buffer.WriteString(parts[p]) + // do not end + if p < len(parts)-1 { + buffer.WriteString("/") + } + } + return buffer.String() +} diff --git a/vendor/github.com/emicklei/go-restful/request.go b/vendor/github.com/emicklei/go-restful/request.go index 8c23af12c0250..a20730febf3bf 100644 --- a/vendor/github.com/emicklei/go-restful/request.go +++ b/vendor/github.com/emicklei/go-restful/request.go @@ -51,6 +51,11 @@ func (r *Request) QueryParameter(name string) string { return r.Request.FormValue(name) } +// QueryParameters returns the all the query parameters values by name +func (r *Request) QueryParameters(name string) []string { + return r.Request.URL.Query()[name] +} + // BodyParameter parses the body of the request (once for typically a POST or a PUT) and returns the value of the given name or an error. func (r *Request) BodyParameter(name string) (string, error) { err := r.Request.ParseForm() diff --git a/vendor/github.com/emicklei/go-restful/response.go b/vendor/github.com/emicklei/go-restful/response.go index 3b33ab220abe3..fbb48f2da8804 100644 --- a/vendor/github.com/emicklei/go-restful/response.go +++ b/vendor/github.com/emicklei/go-restful/response.go @@ -5,7 +5,9 @@ package restful // that can be found in the LICENSE file. import ( + "bufio" "errors" + "net" "net/http" ) @@ -19,17 +21,19 @@ var PrettyPrintResponses = true // It provides several convenience methods to prepare and write response content. type Response struct { http.ResponseWriter - requestAccept string // mime-type what the Http Request says it wants to receive - routeProduces []string // mime-types what the Route says it can produce - statusCode int // HTTP status code that has been written explicity (if zero then net/http has written 200) - contentLength int // number of bytes written for the response body - prettyPrint bool // controls the indentation feature of XML and JSON serialization. It is initialized using var PrettyPrintResponses. - err error // err property is kept when WriteError is called + requestAccept string // mime-type what the Http Request says it wants to receive + routeProduces []string // mime-types what the Route says it can produce + statusCode int // HTTP status code that has been written explicitly (if zero then net/http has written 200) + contentLength int // number of bytes written for the response body + prettyPrint bool // controls the indentation feature of XML and JSON serialization. It is initialized using var PrettyPrintResponses. + err error // err property is kept when WriteError is called + hijacker http.Hijacker // if underlying ResponseWriter supports it } // NewResponse creates a new response based on a http ResponseWriter. func NewResponse(httpWriter http.ResponseWriter) *Response { - return &Response{httpWriter, "", []string{}, http.StatusOK, 0, PrettyPrintResponses, nil} // empty content-types + hijacker, _ := httpWriter.(http.Hijacker) + return &Response{ResponseWriter: httpWriter, routeProduces: []string{}, statusCode: http.StatusOK, prettyPrint: PrettyPrintResponses, hijacker: hijacker} } // DefaultResponseContentType set a default. @@ -48,6 +52,16 @@ func (r Response) InternalServerError() Response { return r } +// Hijack implements the http.Hijacker interface. This expands +// the Response to fulfill http.Hijacker if the underlying +// http.ResponseWriter supports it. +func (r *Response) Hijack() (net.Conn, *bufio.ReadWriter, error) { + if r.hijacker == nil { + return nil, nil, errors.New("http.Hijacker not implemented by underlying http.ResponseWriter") + } + return r.hijacker.Hijack() +} + // PrettyPrint changes whether this response must produce pretty (line-by-line, indented) JSON or XML output. func (r *Response) PrettyPrint(bePretty bool) { r.prettyPrint = bePretty @@ -160,10 +174,15 @@ func (r *Response) WriteHeaderAndJson(status int, value interface{}, contentType return writeJSON(r, status, contentType, value) } -// WriteError write the http status and the error string on the response. +// WriteError write the http status and the error string on the response. err can be nil. func (r *Response) WriteError(httpStatus int, err error) error { r.err = err - return r.WriteErrorString(httpStatus, err.Error()) + if err == nil { + r.WriteErrorString(httpStatus, "") + } else { + r.WriteErrorString(httpStatus, err.Error()) + } + return err } // WriteServiceError is a convenience method for a responding with a status and a ServiceError diff --git a/vendor/github.com/emicklei/go-restful/route.go b/vendor/github.com/emicklei/go-restful/route.go index 3dd520eec837b..6d15dbf66feba 100644 --- a/vendor/github.com/emicklei/go-restful/route.go +++ b/vendor/github.com/emicklei/go-restful/route.go @@ -5,7 +5,6 @@ package restful // that can be found in the LICENSE file. import ( - "bytes" "net/http" "strings" ) @@ -13,6 +12,11 @@ import ( // RouteFunction declares the signature of a function that can be bound to a Route. type RouteFunction func(*Request, *Response) +// RouteSelectionConditionFunction declares the signature of a function that +// can be used to add extra conditional logic when selecting whether the route +// matches the HTTP request. +type RouteSelectionConditionFunction func(httpRequest *http.Request) bool + // Route binds a HTTP Method,Path,Consumes combination to a RouteFunction. type Route struct { Method string @@ -21,6 +25,7 @@ type Route struct { Path string // webservice root path + described path Function RouteFunction Filters []FilterFunction + If []RouteSelectionConditionFunction // cached values for dispatching relativePath string @@ -33,10 +38,17 @@ type Route struct { Operation string ParameterDocs []*Parameter ResponseErrors map[int]ResponseError + DefaultResponse *ResponseError ReadSample, WriteSample interface{} // structs that model an example request or response payload // Extra information used to store custom information about the route. Metadata map[string]interface{} + + // marks a route as deprecated + Deprecated bool + + //Overrides the container.contentEncodingEnabled + contentEncodingEnabled *bool } // Initialize for Route @@ -45,10 +57,9 @@ func (r *Route) postBuild() { } // Create Request and Response from their http versions -func (r *Route) wrapRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request) (*Request, *Response) { - params := r.extractParameters(httpRequest.URL.Path) +func (r *Route) wrapRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request, pathParams map[string]string) (*Request, *Response) { wrappedRequest := NewRequest(httpRequest) - wrappedRequest.pathParameters = params + wrappedRequest.pathParameters = pathParams wrappedRequest.selectedRoutePath = r.Path wrappedResponse := NewResponse(httpWriter) wrappedResponse.requestAccept = httpRequest.Header.Get(HEADER_Accept) @@ -67,28 +78,36 @@ func (r *Route) dispatchWithFilters(wrappedRequest *Request, wrappedResponse *Re } } +func stringTrimSpaceCutset(r rune) bool { + return r == ' ' +} + // Return whether the mimeType matches to what this Route can produce. func (r Route) matchesAccept(mimeTypesWithQuality string) bool { - parts := strings.Split(mimeTypesWithQuality, ",") - for _, each := range parts { - var withoutQuality string - if strings.Contains(each, ";") { - withoutQuality = strings.Split(each, ";")[0] + remaining := mimeTypesWithQuality + for { + var mimeType string + if end := strings.Index(remaining, ","); end == -1 { + mimeType, remaining = remaining, "" } else { - withoutQuality = each + mimeType, remaining = remaining[:end], remaining[end+1:] } - // trim before compare - withoutQuality = strings.Trim(withoutQuality, " ") - if withoutQuality == "*/*" { + if quality := strings.Index(mimeType, ";"); quality != -1 { + mimeType = mimeType[:quality] + } + mimeType = strings.TrimFunc(mimeType, stringTrimSpaceCutset) + if mimeType == "*/*" { return true } for _, producibleType := range r.Produces { - if producibleType == "*/*" || producibleType == withoutQuality { + if producibleType == "*/*" || producibleType == mimeType { return true } } + if len(remaining) == 0 { + return false + } } - return false } // Return whether this Route can consume content with a type specified by mimeTypes (can be empty). @@ -100,7 +119,7 @@ func (r Route) matchesContentType(mimeTypes string) bool { } if len(mimeTypes) == 0 { - // idempotent methods with (most-likely or garanteed) empty content match missing Content-Type + // idempotent methods with (most-likely or guaranteed) empty content match missing Content-Type m := r.Method if m == "GET" || m == "HEAD" || m == "OPTIONS" || m == "DELETE" || m == "TRACE" { return true @@ -109,73 +128,33 @@ func (r Route) matchesContentType(mimeTypes string) bool { mimeTypes = MIME_OCTET } - parts := strings.Split(mimeTypes, ",") - for _, each := range parts { - var contentType string - if strings.Contains(each, ";") { - contentType = strings.Split(each, ";")[0] + remaining := mimeTypes + for { + var mimeType string + if end := strings.Index(remaining, ","); end == -1 { + mimeType, remaining = remaining, "" } else { - contentType = each + mimeType, remaining = remaining[:end], remaining[end+1:] + } + if quality := strings.Index(mimeType, ";"); quality != -1 { + mimeType = mimeType[:quality] } - // trim before compare - contentType = strings.Trim(contentType, " ") + mimeType = strings.TrimFunc(mimeType, stringTrimSpaceCutset) for _, consumeableType := range r.Consumes { - if consumeableType == "*/*" || consumeableType == contentType { + if consumeableType == "*/*" || consumeableType == mimeType { return true } } - } - return false -} - -// Extract the parameters from the request url path -func (r Route) extractParameters(urlPath string) map[string]string { - urlParts := tokenizePath(urlPath) - pathParameters := map[string]string{} - for i, key := range r.pathParts { - var value string - if i >= len(urlParts) { - value = "" - } else { - value = urlParts[i] - } - if strings.HasPrefix(key, "{") { // path-parameter - if colon := strings.Index(key, ":"); colon != -1 { - // extract by regex - regPart := key[colon+1 : len(key)-1] - keyPart := key[1:colon] - if regPart == "*" { - pathParameters[keyPart] = untokenizePath(i, urlParts) - break - } else { - pathParameters[keyPart] = value - } - } else { - // without enclosing {} - pathParameters[key[1:len(key)-1]] = value - } - } - } - return pathParameters -} - -// Untokenize back into an URL path using the slash separator -func untokenizePath(offset int, parts []string) string { - var buffer bytes.Buffer - for p := offset; p < len(parts); p++ { - buffer.WriteString(parts[p]) - // do not end - if p < len(parts)-1 { - buffer.WriteString("/") + if len(remaining) == 0 { + return false } } - return buffer.String() } // Tokenize an URL path using the slash separator ; the result does not have empty tokens func tokenizePath(path string) []string { if "/" == path { - return []string{} + return nil } return strings.Split(strings.Trim(path, "/"), "/") } @@ -184,3 +163,8 @@ func tokenizePath(path string) []string { func (r Route) String() string { return r.Method + " " + r.Path } + +// EnableContentEncoding (default=false) allows for GZIP or DEFLATE encoding of responses. Overrides the container.contentEncodingEnabled value. +func (r Route) EnableContentEncoding(enabled bool) { + r.contentEncodingEnabled = &enabled +} diff --git a/vendor/github.com/emicklei/go-restful/route_builder.go b/vendor/github.com/emicklei/go-restful/route_builder.go index 5ad4a3a7cf78c..0fccf61e94ec8 100644 --- a/vendor/github.com/emicklei/go-restful/route_builder.go +++ b/vendor/github.com/emicklei/go-restful/route_builder.go @@ -24,6 +24,7 @@ type RouteBuilder struct { httpMethod string // required function RouteFunction // required filters []FilterFunction + conditions []RouteSelectionConditionFunction typeNameHandleFunc TypeNameHandleFunction // required @@ -34,7 +35,10 @@ type RouteBuilder struct { readSample, writeSample interface{} parameters []*Parameter errorMap map[int]ResponseError + defaultResponse *ResponseError metadata map[string]interface{} + deprecated bool + contentEncodingEnabled *bool } // Do evaluates each argument with the RouteBuilder itself. @@ -89,7 +93,7 @@ func (b *RouteBuilder) Doc(documentation string) *RouteBuilder { return b } -// A verbose explanation of the operation behavior. Optional. +// Notes is a verbose explanation of the operation behavior. Optional. func (b *RouteBuilder) Notes(notes string) *RouteBuilder { b.notes = notes return b @@ -97,15 +101,18 @@ func (b *RouteBuilder) Notes(notes string) *RouteBuilder { // Reads tells what resource type will be read from the request payload. Optional. // A parameter of type "body" is added ,required is set to true and the dataType is set to the qualified name of the sample's type. -func (b *RouteBuilder) Reads(sample interface{}) *RouteBuilder { +func (b *RouteBuilder) Reads(sample interface{}, optionalDescription ...string) *RouteBuilder { fn := b.typeNameHandleFunc if fn == nil { fn = reflectTypeName } typeAsName := fn(sample) - + description := "" + if len(optionalDescription) > 0 { + description = optionalDescription[0] + } b.readSample = sample - bodyParameter := &Parameter{&ParameterData{Name: "body"}} + bodyParameter := &Parameter{&ParameterData{Name: "body", Description: description}} bodyParameter.beBody() bodyParameter.Required(true) bodyParameter.DataType(typeAsName) @@ -159,7 +166,7 @@ func (b *RouteBuilder) Returns(code int, message string, model interface{}) *Rou Code: code, Message: message, Model: model, - IsDefault: false, + IsDefault: false, // this field is deprecated, use default response instead. } // lazy init because there is no NewRouteBuilder (yet) if b.errorMap == nil { @@ -169,17 +176,11 @@ func (b *RouteBuilder) Returns(code int, message string, model interface{}) *Rou return b } -// DefaultReturns is a special Returns call that sets the default of the response ; the code is zero. +// DefaultReturns is a special Returns call that sets the default of the response. func (b *RouteBuilder) DefaultReturns(message string, model interface{}) *RouteBuilder { - b.Returns(0, message, model) - // Modify the ResponseError just added/updated - re := b.errorMap[0] - // errorMap is initialized - b.errorMap[0] = ResponseError{ - Code: re.Code, - Message: re.Message, - Model: re.Model, - IsDefault: true, + b.defaultResponse = &ResponseError{ + Message: message, + Model: model, } return b } @@ -193,6 +194,12 @@ func (b *RouteBuilder) Metadata(key string, value interface{}) *RouteBuilder { return b } +// Deprecate sets the value of deprecated to true. Deprecated routes have a special UI treatment to warn against use +func (b *RouteBuilder) Deprecate() *RouteBuilder { + b.deprecated = true + return b +} + // ResponseError represents a response; not necessarily an error. type ResponseError struct { Code int @@ -212,6 +219,27 @@ func (b *RouteBuilder) Filter(filter FilterFunction) *RouteBuilder { return b } +// If sets a condition function that controls matching the Route based on custom logic. +// The condition function is provided the HTTP request and should return true if the route +// should be considered. +// +// Efficiency note: the condition function is called before checking the method, produces, and +// consumes criteria, so that the correct HTTP status code can be returned. +// +// Lifecycle note: no filter functions have been called prior to calling the condition function, +// so the condition function should not depend on any context that might be set up by container +// or route filters. +func (b *RouteBuilder) If(condition RouteSelectionConditionFunction) *RouteBuilder { + b.conditions = append(b.conditions, condition) + return b +} + +// ContentEncodingEnabled allows you to override the Containers value for auto-compressing this route response. +func (b *RouteBuilder) ContentEncodingEnabled(enabled bool) *RouteBuilder { + b.contentEncodingEnabled = &enabled + return b +} + // If no specific Route path then set to rootPath // If no specific Produces then set to rootProduces // If no specific Consumes then set to rootConsumes @@ -235,11 +263,11 @@ func (b *RouteBuilder) typeNameHandler(handler TypeNameHandleFunction) *RouteBui func (b *RouteBuilder) Build() Route { pathExpr, err := newPathExpression(b.currentPath) if err != nil { - log.Printf("[restful] Invalid path:%s because:%v", b.currentPath, err) + log.Printf("Invalid path:%s because:%v", b.currentPath, err) os.Exit(1) } if b.function == nil { - log.Printf("[restful] No function specified for route:" + b.currentPath) + log.Printf("No function specified for route:" + b.currentPath) os.Exit(1) } operationName := b.operation @@ -248,22 +276,27 @@ func (b *RouteBuilder) Build() Route { operationName = nameOfFunction(b.function) } route := Route{ - Method: b.httpMethod, - Path: concatPath(b.rootPath, b.currentPath), - Produces: b.produces, - Consumes: b.consumes, - Function: b.function, - Filters: b.filters, - relativePath: b.currentPath, - pathExpr: pathExpr, - Doc: b.doc, - Notes: b.notes, - Operation: operationName, - ParameterDocs: b.parameters, - ResponseErrors: b.errorMap, - ReadSample: b.readSample, - WriteSample: b.writeSample, - Metadata: b.metadata} + Method: b.httpMethod, + Path: concatPath(b.rootPath, b.currentPath), + Produces: b.produces, + Consumes: b.consumes, + Function: b.function, + Filters: b.filters, + If: b.conditions, + relativePath: b.currentPath, + pathExpr: pathExpr, + Doc: b.doc, + Notes: b.notes, + Operation: operationName, + ParameterDocs: b.parameters, + ResponseErrors: b.errorMap, + DefaultResponse: b.defaultResponse, + ReadSample: b.readSample, + WriteSample: b.writeSample, + Metadata: b.metadata, + Deprecated: b.deprecated, + contentEncodingEnabled: b.contentEncodingEnabled, + } route.postBuild() return route } diff --git a/vendor/github.com/emicklei/go-restful/router.go b/vendor/github.com/emicklei/go-restful/router.go index 9b32fb67530aa..19078af1c06d8 100644 --- a/vendor/github.com/emicklei/go-restful/router.go +++ b/vendor/github.com/emicklei/go-restful/router.go @@ -7,6 +7,8 @@ package restful import "net/http" // A RouteSelector finds the best matching Route given the input HTTP Request +// RouteSelectors can optionally also implement the PathProcessor interface to also calculate the +// path parameters after the route has been selected. type RouteSelector interface { // SelectRoute finds a Route given the input HTTP Request and a list of WebServices. diff --git a/vendor/github.com/emicklei/go-restful/web_service.go b/vendor/github.com/emicklei/go-restful/web_service.go index 7af60233a03b9..77ba9a8cfc536 100644 --- a/vendor/github.com/emicklei/go-restful/web_service.go +++ b/vendor/github.com/emicklei/go-restful/web_service.go @@ -60,7 +60,7 @@ func reflectTypeName(sample interface{}) string { func (w *WebService) compilePathExpression() { compiled, err := newPathExpression(w.rootPath) if err != nil { - log.Printf("[restful] invalid path:%s because:%v", w.rootPath, err) + log.Printf("invalid path:%s because:%v", w.rootPath, err) os.Exit(1) } w.pathExpr = compiled @@ -118,7 +118,7 @@ func (w *WebService) QueryParameter(name, description string) *Parameter { // QueryParameter creates a new Parameter of kind Query for documentation purposes. // It is initialized as not required with string as its DataType. func QueryParameter(name, description string) *Parameter { - p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}} + p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string", CollectionFormat: CollectionFormatCSV.String()}} p.beQuery() return p } @@ -233,7 +233,7 @@ func (w *WebService) RootPath() string { return w.rootPath } -// PathParameters return the path parameter names for (shared amoung its Routes) +// PathParameters return the path parameter names for (shared among its Routes) func (w *WebService) PathParameters() []*Parameter { return w.pathParameters } diff --git a/vendor/github.com/fatih/camelcase/.travis.yml b/vendor/github.com/fatih/camelcase/.travis.yml deleted file mode 100644 index 0244e464e6d02..0000000000000 --- a/vendor/github.com/fatih/camelcase/.travis.yml +++ /dev/null @@ -1,3 +0,0 @@ -language: go -go: 1.4 - diff --git a/vendor/github.com/fatih/camelcase/LICENSE.md b/vendor/github.com/fatih/camelcase/LICENSE.md deleted file mode 100644 index aa4a536caf080..0000000000000 --- a/vendor/github.com/fatih/camelcase/LICENSE.md +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Fatih Arslan - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/fatih/camelcase/README.md b/vendor/github.com/fatih/camelcase/README.md deleted file mode 100644 index 105a6ae33deb3..0000000000000 --- a/vendor/github.com/fatih/camelcase/README.md +++ /dev/null @@ -1,58 +0,0 @@ -# CamelCase [![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/fatih/camelcase) [![Build Status](http://img.shields.io/travis/fatih/camelcase.svg?style=flat-square)](https://travis-ci.org/fatih/camelcase) - -CamelCase is a Golang (Go) package to split the words of a camelcase type -string into a slice of words. It can be used to convert a camelcase word (lower -or upper case) into any type of word. - -## Splitting rules: - -1. If string is not valid UTF-8, return it without splitting as - single item array. -2. Assign all unicode characters into one of 4 sets: lower case - letters, upper case letters, numbers, and all other characters. -3. Iterate through characters of string, introducing splits - between adjacent characters that belong to different sets. -4. Iterate through array of split strings, and if a given string - is upper case: - * if subsequent string is lower case: - * move last character of upper case string to beginning of - lower case string - -## Install - -```bash -go get github.com/fatih/camelcase -``` - -## Usage and examples - -```go -splitted := camelcase.Split("GolangPackage") - -fmt.Println(splitted[0], splitted[1]) // prints: "Golang", "Package" -``` - -Both lower camel case and upper camel case are supported. For more info please -check: [http://en.wikipedia.org/wiki/CamelCase](http://en.wikipedia.org/wiki/CamelCase) - -Below are some example cases: - -``` -"" => [] -"lowercase" => ["lowercase"] -"Class" => ["Class"] -"MyClass" => ["My", "Class"] -"MyC" => ["My", "C"] -"HTML" => ["HTML"] -"PDFLoader" => ["PDF", "Loader"] -"AString" => ["A", "String"] -"SimpleXMLParser" => ["Simple", "XML", "Parser"] -"vimRPCPlugin" => ["vim", "RPC", "Plugin"] -"GL11Version" => ["GL", "11", "Version"] -"99Bottles" => ["99", "Bottles"] -"May5" => ["May", "5"] -"BFG9000" => ["BFG", "9000"] -"BöseÜberraschung" => ["Böse", "Überraschung"] -"Two spaces" => ["Two", " ", "spaces"] -"BadUTF8\xe2\xe2\xa1" => ["BadUTF8\xe2\xe2\xa1"] -``` diff --git a/vendor/github.com/fatih/camelcase/camelcase.go b/vendor/github.com/fatih/camelcase/camelcase.go deleted file mode 100644 index 02160c9a435d4..0000000000000 --- a/vendor/github.com/fatih/camelcase/camelcase.go +++ /dev/null @@ -1,90 +0,0 @@ -// Package camelcase is a micro package to split the words of a camelcase type -// string into a slice of words. -package camelcase - -import ( - "unicode" - "unicode/utf8" -) - -// Split splits the camelcase word and returns a list of words. It also -// supports digits. Both lower camel case and upper camel case are supported. -// For more info please check: http://en.wikipedia.org/wiki/CamelCase -// -// Examples -// -// "" => [""] -// "lowercase" => ["lowercase"] -// "Class" => ["Class"] -// "MyClass" => ["My", "Class"] -// "MyC" => ["My", "C"] -// "HTML" => ["HTML"] -// "PDFLoader" => ["PDF", "Loader"] -// "AString" => ["A", "String"] -// "SimpleXMLParser" => ["Simple", "XML", "Parser"] -// "vimRPCPlugin" => ["vim", "RPC", "Plugin"] -// "GL11Version" => ["GL", "11", "Version"] -// "99Bottles" => ["99", "Bottles"] -// "May5" => ["May", "5"] -// "BFG9000" => ["BFG", "9000"] -// "BöseÜberraschung" => ["Böse", "Überraschung"] -// "Two spaces" => ["Two", " ", "spaces"] -// "BadUTF8\xe2\xe2\xa1" => ["BadUTF8\xe2\xe2\xa1"] -// -// Splitting rules -// -// 1) If string is not valid UTF-8, return it without splitting as -// single item array. -// 2) Assign all unicode characters into one of 4 sets: lower case -// letters, upper case letters, numbers, and all other characters. -// 3) Iterate through characters of string, introducing splits -// between adjacent characters that belong to different sets. -// 4) Iterate through array of split strings, and if a given string -// is upper case: -// if subsequent string is lower case: -// move last character of upper case string to beginning of -// lower case string -func Split(src string) (entries []string) { - // don't split invalid utf8 - if !utf8.ValidString(src) { - return []string{src} - } - entries = []string{} - var runes [][]rune - lastClass := 0 - class := 0 - // split into fields based on class of unicode character - for _, r := range src { - switch true { - case unicode.IsLower(r): - class = 1 - case unicode.IsUpper(r): - class = 2 - case unicode.IsDigit(r): - class = 3 - default: - class = 4 - } - if class == lastClass { - runes[len(runes)-1] = append(runes[len(runes)-1], r) - } else { - runes = append(runes, []rune{r}) - } - lastClass = class - } - // handle upper case -> lower case sequences, e.g. - // "PDFL", "oader" -> "PDF", "Loader" - for i := 0; i < len(runes)-1; i++ { - if unicode.IsUpper(runes[i][0]) && unicode.IsLower(runes[i+1][0]) { - runes[i+1] = append([]rune{runes[i][len(runes[i])-1]}, runes[i+1]...) - runes[i] = runes[i][:len(runes[i])-1] - } - } - // construct []string from results - for _, s := range runes { - if len(s) > 0 { - entries = append(entries, string(s)) - } - } - return -} diff --git a/vendor/github.com/go-openapi/jsonpointer/.travis.yml b/vendor/github.com/go-openapi/jsonpointer/.travis.yml index 3436c4590c31a..9aef9184e86d2 100644 --- a/vendor/github.com/go-openapi/jsonpointer/.travis.yml +++ b/vendor/github.com/go-openapi/jsonpointer/.travis.yml @@ -1,15 +1,15 @@ after_success: - bash <(curl -s https://codecov.io/bash) go: -- '1.9' -- 1.10.x - 1.11.x +- 1.12.x install: -- go get -u github.com/stretchr/testify/assert -- go get -u github.com/go-openapi/swag +- GO111MODULE=off go get -u gotest.tools/gotestsum +env: +- GO111MODULE=on language: go notifications: slack: secure: a5VgoiwB1G/AZqzmephPZIhEB9avMlsWSlVnM1dSAtYAwdrQHGTQxAmpOxYIoSPDhWNN5bfZmjd29++UlTwLcHSR+e0kJhH6IfDlsHj/HplNCJ9tyI0zYc7XchtdKgeMxMzBKCzgwFXGSbQGydXTliDNBo0HOzmY3cou/daMFTP60K+offcjS+3LRAYb1EroSRXZqrk1nuF/xDL3792DZUdPMiFR/L/Df6y74D6/QP4sTkTDFQitz4Wy/7jbsfj8dG6qK2zivgV6/l+w4OVjFkxVpPXogDWY10vVXNVynqxfJ7to2d1I9lNCHE2ilBCkWMIPdyJF7hjF8pKW+82yP4EzRh0vu8Xn0HT5MZpQxdRY/YMxNrWaG7SxsoEaO4q5uhgdzAqLYY3TRa7MjIK+7Ur+aqOeTXn6OKwVi0CjvZ6mIU3WUKSwiwkFZMbjRAkSb5CYwMEfGFO/z964xz83qGt6WAtBXNotqCQpTIiKtDHQeLOMfksHImCg6JLhQcWBVxamVgu0G3Pdh8Y6DyPnxraXY95+QDavbjqv7TeYT9T/FNnrkXaTTK0s4iWE5H4ACU0Qvz0wUYgfQrZv0/Hp7V17+rabUwnzYySHCy9SWX/7OV9Cfh31iMp9ZIffr76xmmThtOEqs8TrTtU6BWI3rWwvA9cXQipZTVtL0oswrGw= script: -- go test -v -race -cover -coverprofile=coverage.txt -covermode=atomic ./... +- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/jsonpointer/go.mod b/vendor/github.com/go-openapi/jsonpointer/go.mod index eb4d623c590c3..422045df2d867 100644 --- a/vendor/github.com/go-openapi/jsonpointer/go.mod +++ b/vendor/github.com/go-openapi/jsonpointer/go.mod @@ -1,10 +1,6 @@ module github.com/go-openapi/jsonpointer require ( - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/go-openapi/swag v0.17.0 - github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/stretchr/testify v1.2.2 - gopkg.in/yaml.v2 v2.2.1 // indirect + github.com/go-openapi/swag v0.19.2 + github.com/stretchr/testify v1.3.0 ) diff --git a/vendor/github.com/go-openapi/jsonpointer/go.sum b/vendor/github.com/go-openapi/jsonpointer/go.sum index c71f4d7a294fb..f5e28beb4bf75 100644 --- a/vendor/github.com/go-openapi/jsonpointer/go.sum +++ b/vendor/github.com/go-openapi/jsonpointer/go.sum @@ -1,11 +1,22 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/go-openapi/swag v0.17.0 h1:7wu+dZ5k83kvUWeAb+WUkFiUhDzwGqzTR/NhWzeo1JU= -github.com/go-openapi/swag v0.17.0/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 h1:2gxZ0XQIU/5z3Z3bUBu+FXuk2pFbkN6tcwi/pjyaDic= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/go-openapi/swag v0.19.2 h1:jvO6bCMBEilGwMfHhrd61zIID4oIFdwb76V17SM88dE= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 h1:nTT4s92Dgz2HlrB2NaMgvlfqHH39OgMhA7z3PK7PGD4= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/go-openapi/jsonreference/.travis.yml b/vendor/github.com/go-openapi/jsonreference/.travis.yml index 40034d28dc231..40b90757d8946 100644 --- a/vendor/github.com/go-openapi/jsonreference/.travis.yml +++ b/vendor/github.com/go-openapi/jsonreference/.travis.yml @@ -1,16 +1,15 @@ after_success: - bash <(curl -s https://codecov.io/bash) go: -- '1.9' -- 1.10.x - 1.11.x +- 1.12.x install: -- go get -u github.com/stretchr/testify/assert -- go get -u github.com/PuerkitoBio/purell -- go get -u github.com/go-openapi/jsonpointer +- GO111MODULE=off go get -u gotest.tools/gotestsum +env: +- GO111MODULE=on language: go notifications: slack: secure: OpQG/36F7DSF00HLm9WZMhyqFCYYyYTsVDObW226cWiR8PWYiNfLZiSEvIzT1Gx4dDjhigKTIqcLhG34CkL5iNXDjm9Yyo2RYhQPlK8NErNqUEXuBqn4RqYHW48VGhEhOyDd4Ei0E2FN5ZbgpvHgtpkdZ6XDi64r3Ac89isP9aPHXQTuv2Jog6b4/OKKiUTftLcTIst0p4Cp3gqOJWf1wnoj+IadWiECNVQT6zb47IYjtyw6+uV8iUjTzdKcRB6Zc6b4Dq7JAg1Zd7Jfxkql3hlKp4PNlRf9Cy7y5iA3G7MLyg3FcPX5z2kmcyPt2jOTRMBWUJ5zIQpOxizAcN8WsT3WWBL5KbuYK6k0PzujrIDLqdxGpNmjkkMfDBT9cKmZpm2FdW+oZgPFJP+oKmAo4u4KJz/vjiPTXgQlN5bmrLuRMCp+AwC5wkIohTqWZVPE2TK6ZSnMYcg/W39s+RP/9mJoyryAvPSpBOLTI+biCgaUCTOAZxNTWpMFc3tPYntc41WWkdKcooZ9JA5DwfcaVFyTGQ3YXz+HvX6G1z/gW0Q/A4dBi9mj2iE1xm7tRTT+4VQ2AXFvSEI1HJpfPgYnwAtwOD1v3Qm2EUHk9sCdtEDR4wVGEPIVn44GnwFMnGKx9JWppMPYwFu3SVDdHt+E+LOlhZUply11Aa+IVrT2KUQ= script: -- go test -v -race -cover -coverprofile=coverage.txt -covermode=atomic ./... +- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/jsonreference/go.mod b/vendor/github.com/go-openapi/jsonreference/go.mod index 6d15a70503ec3..35adddfe40f03 100644 --- a/vendor/github.com/go-openapi/jsonreference/go.mod +++ b/vendor/github.com/go-openapi/jsonreference/go.mod @@ -1,15 +1,10 @@ module github.com/go-openapi/jsonreference require ( - github.com/PuerkitoBio/purell v1.1.0 + github.com/PuerkitoBio/purell v1.1.1 github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/go-openapi/jsonpointer v0.17.0 - github.com/go-openapi/swag v0.17.0 // indirect - github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/stretchr/testify v1.2.2 - golang.org/x/net v0.0.0-20181005035420-146acd28ed58 // indirect - golang.org/x/text v0.3.0 // indirect - gopkg.in/yaml.v2 v2.2.1 // indirect + github.com/go-openapi/jsonpointer v0.19.2 + github.com/stretchr/testify v1.3.0 + golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 // indirect + golang.org/x/text v0.3.2 // indirect ) diff --git a/vendor/github.com/go-openapi/jsonreference/go.sum b/vendor/github.com/go-openapi/jsonreference/go.sum index ec9bdbc286511..f1a7a34e3ce69 100644 --- a/vendor/github.com/go-openapi/jsonreference/go.sum +++ b/vendor/github.com/go-openapi/jsonreference/go.sum @@ -1,20 +1,36 @@ -github.com/PuerkitoBio/purell v1.1.0 h1:rmGxhojJlM0tuKtfdvliR84CFHljx9ag64t2xmVkjK4= -github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/go-openapi/jsonpointer v0.17.0 h1:Bpl2DtZ6k7wKqfFs7e+4P08+M9I3FQgn09a1UsRUQbk= -github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= -github.com/go-openapi/swag v0.17.0 h1:7wu+dZ5k83kvUWeAb+WUkFiUhDzwGqzTR/NhWzeo1JU= -github.com/go-openapi/swag v0.17.0/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 h1:2gxZ0XQIU/5z3Z3bUBu+FXuk2pFbkN6tcwi/pjyaDic= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/go-openapi/jsonpointer v0.19.2 h1:A9+F4Dc/MCNB5jibxf6rRvOvR/iFgQdyNx9eIhnGqq0= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/swag v0.19.2 h1:jvO6bCMBEilGwMfHhrd61zIID4oIFdwb76V17SM88dE= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 h1:nTT4s92Dgz2HlrB2NaMgvlfqHH39OgMhA7z3PK7PGD4= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -golang.org/x/net v0.0.0-20181005035420-146acd28ed58 h1:otZG8yDCO4LVps5+9bxOeNiCvgmOyt96J3roHTYs7oE= -golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/go-openapi/spec/.golangci.yml b/vendor/github.com/go-openapi/spec/.golangci.yml index ed53e5cd761e5..3e33f9f2e3ecc 100644 --- a/vendor/github.com/go-openapi/spec/.golangci.yml +++ b/vendor/github.com/go-openapi/spec/.golangci.yml @@ -4,11 +4,11 @@ linters-settings: golint: min-confidence: 0 gocyclo: - min-complexity: 25 + min-complexity: 45 maligned: suggest-new: true dupl: - threshold: 100 + threshold: 200 goconst: min-len: 2 min-occurrences: 2 @@ -19,3 +19,5 @@ linters: - maligned - unparam - lll + - gochecknoinits + - gochecknoglobals diff --git a/vendor/github.com/go-openapi/spec/.travis.yml b/vendor/github.com/go-openapi/spec/.travis.yml index a4f03484b76c4..aa26d8763aa38 100644 --- a/vendor/github.com/go-openapi/spec/.travis.yml +++ b/vendor/github.com/go-openapi/spec/.travis.yml @@ -1,18 +1,15 @@ after_success: - bash <(curl -s https://codecov.io/bash) go: -- '1.9' -- 1.10.x - 1.11.x +- 1.12.x install: -- go get -u github.com/stretchr/testify -- go get -u github.com/go-openapi/swag -- go get -u gopkg.in/yaml.v2 -- go get -u github.com/go-openapi/jsonpointer -- go get -u github.com/go-openapi/jsonreference +- GO111MODULE=off go get -u gotest.tools/gotestsum +env: +- GO111MODULE=on language: go notifications: slack: secure: QUWvCkBBK09GF7YtEvHHVt70JOkdlNBG0nIKu/5qc4/nW5HP8I2w0SEf/XR2je0eED1Qe3L/AfMCWwrEj+IUZc3l4v+ju8X8R3Lomhme0Eb0jd1MTMCuPcBT47YCj0M7RON7vXtbFfm1hFJ/jLe5+9FXz0hpXsR24PJc5ZIi/ogNwkaPqG4BmndzecpSh0vc2FJPZUD9LT0I09REY/vXR0oQAalLkW0asGD5taHZTUZq/kBpsNxaAFrLM23i4mUcf33M5fjLpvx5LRICrX/57XpBrDh2TooBU6Qj3CgoY0uPRYUmSNxbVx1czNzl2JtEpb5yjoxfVPQeg0BvQM00G8LJINISR+ohrjhkZmAqchDupAX+yFrxTtORa78CtnIL6z/aTNlgwwVD8kvL/1pFA/JWYmKDmz93mV/+6wubGzNSQCstzjkFA4/iZEKewKUoRIAi/fxyscP6L/rCpmY/4llZZvrnyTqVbt6URWpopUpH4rwYqreXAtJxJsfBJIeSmUIiDIOMGkCTvyTEW3fWGmGoqWtSHLoaWDyAIGb7azb+KvfpWtEcoPFWfSWU+LGee0A/YsUhBl7ADB9A0CJEuR8q4BPpKpfLwPKSiKSAXL7zDkyjExyhtgqbSl2jS+rKIHOZNL8JkCcTP2MKMVd563C5rC5FMKqu3S9m2b6380E= script: -- go test -v -race -cover -coverprofile=coverage.txt -covermode=atomic ./... +- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/spec/BUILD.bazel b/vendor/github.com/go-openapi/spec/BUILD.bazel index 01824089a69c3..dd6bd2e15662c 100644 --- a/vendor/github.com/go-openapi/spec/BUILD.bazel +++ b/vendor/github.com/go-openapi/spec/BUILD.bazel @@ -4,6 +4,7 @@ go_library( name = "go_default_library", srcs = [ "bindata.go", + "cache.go", "contact_info.go", "debug.go", "expander.go", @@ -12,6 +13,7 @@ go_library( "info.go", "items.go", "license.go", + "normalizer.go", "operation.go", "parameter.go", "path_item.go", @@ -20,10 +22,12 @@ go_library( "response.go", "responses.go", "schema.go", + "schema_loader.go", "security_scheme.go", "spec.go", "swagger.go", "tag.go", + "unused.go", "xml_object.go", ], importmap = "k8s.io/kops/vendor/github.com/go-openapi/spec", diff --git a/vendor/github.com/go-openapi/spec/bindata.go b/vendor/github.com/go-openapi/spec/bindata.go index 1717ea1052ebd..d5ec7b900a732 100644 --- a/vendor/github.com/go-openapi/spec/bindata.go +++ b/vendor/github.com/go-openapi/spec/bindata.go @@ -1,14 +1,14 @@ -// Code generated by go-bindata. +// Code generated by go-bindata. DO NOT EDIT. // sources: -// schemas/jsonschema-draft-04.json -// schemas/v2/schema.json -// DO NOT EDIT! +// schemas/jsonschema-draft-04.json (4.357kB) +// schemas/v2/schema.json (40.249kB) package spec import ( "bytes" "compress/gzip" + "crypto/sha256" "fmt" "io" "io/ioutil" @@ -39,8 +39,9 @@ func bindataRead(data []byte, name string) ([]byte, error) { } type asset struct { - bytes []byte - info os.FileInfo + bytes []byte + info os.FileInfo + digest [sha256.Size]byte } type bindataFileInfo struct { @@ -84,8 +85,8 @@ func jsonschemaDraft04JSON() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "jsonschema-draft-04.json", size: 4357, mode: os.FileMode(420), modTime: time.Unix(1523760398, 0)} - a := &asset{bytes: bytes, info: info} + info := bindataFileInfo{name: "jsonschema-draft-04.json", size: 4357, mode: os.FileMode(436), modTime: time.Unix(1540282154, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe1, 0x48, 0x9d, 0xb, 0x47, 0x55, 0xf0, 0x27, 0x93, 0x30, 0x25, 0x91, 0xd3, 0xfc, 0xb8, 0xf0, 0x7b, 0x68, 0x93, 0xa8, 0x2a, 0x94, 0xf2, 0x48, 0x95, 0xf8, 0xe4, 0xed, 0xf1, 0x1b, 0x82, 0xe2}} return a, nil } @@ -104,8 +105,8 @@ func v2SchemaJSON() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "v2/schema.json", size: 40249, mode: os.FileMode(420), modTime: time.Unix(1523760397, 0)} - a := &asset{bytes: bytes, info: info} + info := bindataFileInfo{name: "v2/schema.json", size: 40249, mode: os.FileMode(436), modTime: time.Unix(1540282154, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xcb, 0x25, 0x27, 0xe8, 0x46, 0xae, 0x22, 0xc4, 0xf4, 0x8b, 0x1, 0x32, 0x4d, 0x1f, 0xf8, 0xdf, 0x75, 0x15, 0xc8, 0x2d, 0xc7, 0xed, 0xe, 0x7e, 0x0, 0x75, 0xc0, 0xf9, 0xd2, 0x1f, 0x75, 0x57}} return a, nil } @@ -113,8 +114,8 @@ func v2SchemaJSON() (*asset, error) { // It returns an error if the asset could not be found or // could not be loaded. func Asset(name string) ([]byte, error) { - cannonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[cannonicalName]; ok { + canonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[canonicalName]; ok { a, err := f() if err != nil { return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) @@ -124,6 +125,12 @@ func Asset(name string) ([]byte, error) { return nil, fmt.Errorf("Asset %s not found", name) } +// AssetString returns the asset contents as a string (instead of a []byte). +func AssetString(name string) (string, error) { + data, err := Asset(name) + return string(data), err +} + // MustAsset is like Asset but panics when Asset would return an error. // It simplifies safe initialization of global variables. func MustAsset(name string) []byte { @@ -135,12 +142,18 @@ func MustAsset(name string) []byte { return a } +// MustAssetString is like AssetString but panics when Asset would return an +// error. It simplifies safe initialization of global variables. +func MustAssetString(name string) string { + return string(MustAsset(name)) +} + // AssetInfo loads and returns the asset info for the given name. // It returns an error if the asset could not be found or // could not be loaded. func AssetInfo(name string) (os.FileInfo, error) { - cannonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[cannonicalName]; ok { + canonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[canonicalName]; ok { a, err := f() if err != nil { return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) @@ -150,6 +163,33 @@ func AssetInfo(name string) (os.FileInfo, error) { return nil, fmt.Errorf("AssetInfo %s not found", name) } +// AssetDigest returns the digest of the file with the given name. It returns an +// error if the asset could not be found or the digest could not be loaded. +func AssetDigest(name string) ([sha256.Size]byte, error) { + canonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[canonicalName]; ok { + a, err := f() + if err != nil { + return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s can't read by error: %v", name, err) + } + return a.digest, nil + } + return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s not found", name) +} + +// Digests returns a map of all known files and their checksums. +func Digests() (map[string][sha256.Size]byte, error) { + mp := make(map[string][sha256.Size]byte, len(_bindata)) + for name := range _bindata { + a, err := _bindata[name]() + if err != nil { + return nil, err + } + mp[name] = a.digest + } + return mp, nil +} + // AssetNames returns the names of the assets. func AssetNames() []string { names := make([]string, 0, len(_bindata)) @@ -162,7 +202,8 @@ func AssetNames() []string { // _bindata is a table, holding each asset generator, mapped to its name. var _bindata = map[string]func() (*asset, error){ "jsonschema-draft-04.json": jsonschemaDraft04JSON, - "v2/schema.json": v2SchemaJSON, + + "v2/schema.json": v2SchemaJSON, } // AssetDir returns the file names below a certain @@ -174,15 +215,15 @@ var _bindata = map[string]func() (*asset, error){ // img/ // a.png // b.png -// then AssetDir("data") would return []string{"foo.txt", "img"} -// AssetDir("data/img") would return []string{"a.png", "b.png"} -// AssetDir("foo.txt") and AssetDir("notexist") would return an error +// then AssetDir("data") would return []string{"foo.txt", "img"}, +// AssetDir("data/img") would return []string{"a.png", "b.png"}, +// AssetDir("foo.txt") and AssetDir("notexist") would return an error, and // AssetDir("") will return []string{"data"}. func AssetDir(name string) ([]string, error) { node := _bintree if len(name) != 0 { - cannonicalName := strings.Replace(name, "\\", "/", -1) - pathList := strings.Split(cannonicalName, "/") + canonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(canonicalName, "/") for _, p := range pathList { node = node.Children[p] if node == nil { @@ -212,7 +253,7 @@ var _bintree = &bintree{nil, map[string]*bintree{ }}, }} -// RestoreAsset restores an asset under the given directory +// RestoreAsset restores an asset under the given directory. func RestoreAsset(dir, name string) error { data, err := Asset(name) if err != nil { @@ -230,14 +271,10 @@ func RestoreAsset(dir, name string) error { if err != nil { return err } - err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) - if err != nil { - return err - } - return nil + return os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) } -// RestoreAssets restores an asset under the given directory recursively +// RestoreAssets restores an asset under the given directory recursively. func RestoreAssets(dir, name string) error { children, err := AssetDir(name) // File @@ -255,6 +292,6 @@ func RestoreAssets(dir, name string) error { } func _filePath(dir, name string) string { - cannonicalName := strings.Replace(name, "\\", "/", -1) - return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) + canonicalName := strings.Replace(name, "\\", "/", -1) + return filepath.Join(append([]string{dir}, strings.Split(canonicalName, "/")...)...) } diff --git a/vendor/github.com/go-openapi/spec/cache.go b/vendor/github.com/go-openapi/spec/cache.go new file mode 100644 index 0000000000000..3fada0daef1bd --- /dev/null +++ b/vendor/github.com/go-openapi/spec/cache.go @@ -0,0 +1,60 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import "sync" + +// ResolutionCache a cache for resolving urls +type ResolutionCache interface { + Get(string) (interface{}, bool) + Set(string, interface{}) +} + +type simpleCache struct { + lock sync.RWMutex + store map[string]interface{} +} + +// Get retrieves a cached URI +func (s *simpleCache) Get(uri string) (interface{}, bool) { + debugLog("getting %q from resolution cache", uri) + s.lock.RLock() + v, ok := s.store[uri] + debugLog("got %q from resolution cache: %t", uri, ok) + + s.lock.RUnlock() + return v, ok +} + +// Set caches a URI +func (s *simpleCache) Set(uri string, data interface{}) { + s.lock.Lock() + s.store[uri] = data + s.lock.Unlock() +} + +var resCache ResolutionCache + +func init() { + resCache = initResolutionCache() +} + +// initResolutionCache initializes the URI resolution cache +func initResolutionCache() ResolutionCache { + return &simpleCache{store: map[string]interface{}{ + "http://swagger.io/v2/schema.json": MustLoadSwagger20Schema(), + "http://json-schema.org/draft-04/schema": MustLoadJSONSchemaDraft04(), + }} +} diff --git a/vendor/github.com/go-openapi/spec/debug.go b/vendor/github.com/go-openapi/spec/debug.go index 7edb95a614275..389c528ff613c 100644 --- a/vendor/github.com/go-openapi/spec/debug.go +++ b/vendor/github.com/go-openapi/spec/debug.go @@ -24,9 +24,9 @@ import ( var ( // Debug is true when the SWAGGER_DEBUG env var is not empty. - // It enables a more verbose logging of validators. + // It enables a more verbose logging of this package. Debug = os.Getenv("SWAGGER_DEBUG") != "" - // validateLogger is a debug logger for this package + // specLogger is a debug logger for this package specLogger *log.Logger ) diff --git a/vendor/github.com/go-openapi/spec/expander.go b/vendor/github.com/go-openapi/spec/expander.go index 456a9dd7efb9a..1e7fc8c490c64 100644 --- a/vendor/github.com/go-openapi/spec/expander.go +++ b/vendor/github.com/go-openapi/spec/expander.go @@ -17,20 +17,10 @@ package spec import ( "encoding/json" "fmt" - "log" - "net/url" - "os" - "path" - "path/filepath" - "reflect" "strings" - "sync" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" ) -// ExpandOptions provides options for expand. +// ExpandOptions provides options for spec expand type ExpandOptions struct { RelativeBase string SkipSchemas bool @@ -38,68 +28,6 @@ type ExpandOptions struct { AbsoluteCircularRef bool } -// ResolutionCache a cache for resolving urls -type ResolutionCache interface { - Get(string) (interface{}, bool) - Set(string, interface{}) -} - -type simpleCache struct { - lock sync.RWMutex - store map[string]interface{} -} - -var resCache ResolutionCache - -func init() { - resCache = initResolutionCache() -} - -// initResolutionCache initializes the URI resolution cache -func initResolutionCache() ResolutionCache { - return &simpleCache{store: map[string]interface{}{ - "http://swagger.io/v2/schema.json": MustLoadSwagger20Schema(), - "http://json-schema.org/draft-04/schema": MustLoadJSONSchemaDraft04(), - }} -} - -// resolverContext allows to share a context during spec processing. -// At the moment, it just holds the index of circular references found. -type resolverContext struct { - // circulars holds all visited circular references, which allows shortcuts. - // NOTE: this is not just a performance improvement: it is required to figure out - // circular references which participate several cycles. - // This structure is privately instantiated and needs not be locked against - // concurrent access, unless we chose to implement a parallel spec walking. - circulars map[string]bool - basePath string -} - -func newResolverContext(originalBasePath string) *resolverContext { - return &resolverContext{ - circulars: make(map[string]bool), - basePath: originalBasePath, // keep the root base path in context - } -} - -// Get retrieves a cached URI -func (s *simpleCache) Get(uri string) (interface{}, bool) { - debugLog("getting %q from resolution cache", uri) - s.lock.RLock() - v, ok := s.store[uri] - debugLog("got %q from resolution cache: %t", uri, ok) - - s.lock.RUnlock() - return v, ok -} - -// Set caches a URI -func (s *simpleCache) Set(uri string, data interface{}) { - s.lock.Lock() - s.store[uri] = data - s.lock.Unlock() -} - // ResolveRefWithBase resolves a reference against a context root with preservation of base path func ResolveRefWithBase(root interface{}, ref *Ref, opts *ExpandOptions) (*Schema, error) { resolver, err := defaultSchemaLoader(root, opts, nil, nil) @@ -179,7 +107,10 @@ func ResolveResponseWithBase(root interface{}, ref Ref, opts *ExpandOptions) (*R return result, nil } -// ResolveItems resolves header and parameter items reference against a context root and base path +// ResolveItems resolves parameter items reference against a context root and base path. +// +// NOTE: stricly speaking, this construct is not supported by Swagger 2.0. +// Similarly, $ref are forbidden in response headers. func ResolveItems(root interface{}, ref Ref, opts *ExpandOptions) (*Items, error) { resolver, err := defaultSchemaLoader(root, opts, nil, nil) if err != nil { @@ -213,341 +144,11 @@ func ResolvePathItem(root interface{}, ref Ref, opts *ExpandOptions) (*PathItem, return result, nil } -type schemaLoader struct { - root interface{} - options *ExpandOptions - cache ResolutionCache - context *resolverContext - loadDoc func(string) (json.RawMessage, error) -} - -var idPtr, _ = jsonpointer.New("/id") -var refPtr, _ = jsonpointer.New("/$ref") - -// PathLoader function to use when loading remote refs -var PathLoader func(string) (json.RawMessage, error) - -func init() { - PathLoader = func(path string) (json.RawMessage, error) { - data, err := swag.LoadFromFileOrHTTP(path) - if err != nil { - return nil, err - } - return json.RawMessage(data), nil - } -} - -func defaultSchemaLoader( - root interface{}, - expandOptions *ExpandOptions, - cache ResolutionCache, - context *resolverContext) (*schemaLoader, error) { - - if cache == nil { - cache = resCache - } - if expandOptions == nil { - expandOptions = &ExpandOptions{} - } - absBase, _ := absPath(expandOptions.RelativeBase) - if context == nil { - context = newResolverContext(absBase) - } - return &schemaLoader{ - root: root, - options: expandOptions, - cache: cache, - context: context, - loadDoc: func(path string) (json.RawMessage, error) { - debugLog("fetching document at %q", path) - return PathLoader(path) - }, - }, nil -} - -func idFromNode(node interface{}) (*Ref, error) { - if idValue, _, err := idPtr.Get(node); err == nil { - if refStr, ok := idValue.(string); ok && refStr != "" { - idRef, err := NewRef(refStr) - if err != nil { - return nil, err - } - return &idRef, nil - } - } - return nil, nil -} - -func nextRef(startingNode interface{}, startingRef *Ref, ptr *jsonpointer.Pointer) *Ref { - if startingRef == nil { - return nil - } - - if ptr == nil { - return startingRef - } - - ret := startingRef - var idRef *Ref - node := startingNode - - for _, tok := range ptr.DecodedTokens() { - node, _, _ = jsonpointer.GetForToken(node, tok) - if node == nil { - break - } - - idRef, _ = idFromNode(node) - if idRef != nil { - nw, err := ret.Inherits(*idRef) - if err != nil { - break - } - ret = nw - } - - refRef, _, _ := refPtr.Get(node) - if refRef != nil { - var rf Ref - switch value := refRef.(type) { - case string: - rf, _ = NewRef(value) - } - nw, err := ret.Inherits(rf) - if err != nil { - break - } - nwURL := nw.GetURL() - if nwURL.Scheme == "file" || (nwURL.Scheme == "" && nwURL.Host == "") { - nwpt := filepath.ToSlash(nwURL.Path) - if filepath.IsAbs(nwpt) { - _, err := os.Stat(nwpt) - if err != nil { - nwURL.Path = filepath.Join(".", nwpt) - } - } - } - - ret = nw - } - - } - - return ret -} - -// normalize absolute path for cache. -// on Windows, drive letters should be converted to lower as scheme in net/url.URL -func normalizeAbsPath(path string) string { - u, err := url.Parse(path) - if err != nil { - debugLog("normalize absolute path failed: %s", err) - return path - } - return u.String() -} - -// base or refPath could be a file path or a URL -// given a base absolute path and a ref path, return the absolute path of refPath -// 1) if refPath is absolute, return it -// 2) if refPath is relative, join it with basePath keeping the scheme, hosts, and ports if exists -// base could be a directory or a full file path -func normalizePaths(refPath, base string) string { - refURL, _ := url.Parse(refPath) - if path.IsAbs(refURL.Path) || filepath.IsAbs(refPath) { - // refPath is actually absolute - if refURL.Host != "" { - return refPath - } - parts := strings.Split(refPath, "#") - result := filepath.FromSlash(parts[0]) - if len(parts) == 2 { - result += "#" + parts[1] - } - return result - } - - // relative refPath - baseURL, _ := url.Parse(base) - if !strings.HasPrefix(refPath, "#") { - // combining paths - if baseURL.Host != "" { - baseURL.Path = path.Join(path.Dir(baseURL.Path), refURL.Path) - } else { // base is a file - newBase := fmt.Sprintf("%s#%s", filepath.Join(filepath.Dir(base), filepath.FromSlash(refURL.Path)), refURL.Fragment) - return newBase - } - - } - // copying fragment from ref to base - baseURL.Fragment = refURL.Fragment - return baseURL.String() -} - -// denormalizePaths returns to simplest notation on file $ref, -// i.e. strips the absolute path and sets a path relative to the base path. -// -// This is currently used when we rewrite ref after a circular ref has been detected -func denormalizeFileRef(ref *Ref, relativeBase, originalRelativeBase string) *Ref { - debugLog("denormalizeFileRef for: %s", ref.String()) - - if ref.String() == "" || ref.IsRoot() || ref.HasFragmentOnly { - return ref - } - // strip relativeBase from URI - relativeBaseURL, _ := url.Parse(relativeBase) - relativeBaseURL.Fragment = "" - - if relativeBaseURL.IsAbs() && strings.HasPrefix(ref.String(), relativeBase) { - // this should work for absolute URI (e.g. http://...): we have an exact match, just trim prefix - r, _ := NewRef(strings.TrimPrefix(ref.String(), relativeBase)) - return &r - } - - if relativeBaseURL.IsAbs() { - // other absolute URL get unchanged (i.e. with a non-empty scheme) - return ref - } - - // for relative file URIs: - originalRelativeBaseURL, _ := url.Parse(originalRelativeBase) - originalRelativeBaseURL.Fragment = "" - if strings.HasPrefix(ref.String(), originalRelativeBaseURL.String()) { - // the resulting ref is in the expanded spec: return a local ref - r, _ := NewRef(strings.TrimPrefix(ref.String(), originalRelativeBaseURL.String())) - return &r - } - - // check if we may set a relative path, considering the original base path for this spec. - // Example: - // spec is located at /mypath/spec.json - // my normalized ref points to: /mypath/item.json#/target - // expected result: item.json#/target - parts := strings.Split(ref.String(), "#") - relativePath, err := filepath.Rel(path.Dir(originalRelativeBaseURL.String()), parts[0]) - if err != nil { - // there is no common ancestor (e.g. different drives on windows) - // leaves the ref unchanged - return ref - } - if len(parts) == 2 { - relativePath += "#" + parts[1] - } - r, _ := NewRef(relativePath) - return &r -} - -// relativeBase could be an ABSOLUTE file path or an ABSOLUTE URL -func normalizeFileRef(ref *Ref, relativeBase string) *Ref { - // This is important for when the reference is pointing to the root schema - if ref.String() == "" { - r, _ := NewRef(relativeBase) - return &r - } - - debugLog("normalizing %s against %s", ref.String(), relativeBase) - - s := normalizePaths(ref.String(), relativeBase) - r, _ := NewRef(s) - return &r -} - -func (r *schemaLoader) resolveRef(ref *Ref, target interface{}, basePath string) error { - tgt := reflect.ValueOf(target) - if tgt.Kind() != reflect.Ptr { - return fmt.Errorf("resolve ref: target needs to be a pointer") - } - - refURL := ref.GetURL() - if refURL == nil { - return nil - } - - var res interface{} - var data interface{} - var err error - // Resolve against the root if it isn't nil, and if ref is pointing at the root, or has a fragment only which means - // it is pointing somewhere in the root. - root := r.root - if (ref.IsRoot() || ref.HasFragmentOnly) && root == nil && basePath != "" { - if baseRef, erb := NewRef(basePath); erb == nil { - root, _, _, _ = r.load(baseRef.GetURL()) - } - } - if (ref.IsRoot() || ref.HasFragmentOnly) && root != nil { - data = root - } else { - baseRef := normalizeFileRef(ref, basePath) - debugLog("current ref is: %s", ref.String()) - debugLog("current ref normalized file: %s", baseRef.String()) - data, _, _, err = r.load(baseRef.GetURL()) - if err != nil { - return err - } - } - - res = data - if ref.String() != "" { - res, _, err = ref.GetPointer().Get(data) - if err != nil { - return err - } - } - if err := swag.DynamicJSONToStruct(res, target); err != nil { - return err - } - - return nil -} - -func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error) { - debugLog("loading schema from url: %s", refURL) - toFetch := *refURL - toFetch.Fragment = "" - - normalized := normalizeAbsPath(toFetch.String()) - - data, fromCache := r.cache.Get(normalized) - if !fromCache { - b, err := r.loadDoc(normalized) - if err != nil { - return nil, url.URL{}, false, err - } - - if err := json.Unmarshal(b, &data); err != nil { - return nil, url.URL{}, false, err - } - r.cache.Set(normalized, data) - } - - return data, toFetch, fromCache, nil -} - -// Resolve resolves a reference against basePath and stores the result in target -// Resolve is not in charge of following references, it only resolves ref by following its URL -// if the schema that ref is referring to has more refs in it. Resolve doesn't resolve them -// if basePath is an empty string, ref is resolved against the root schema stored in the schemaLoader struct -func (r *schemaLoader) Resolve(ref *Ref, target interface{}, basePath string) error { - return r.resolveRef(ref, target, basePath) -} - -// absPath returns the absolute path of a file -func absPath(fname string) (string, error) { - if strings.HasPrefix(fname, "http") { - return fname, nil - } - if filepath.IsAbs(fname) { - return fname, nil - } - wd, err := os.Getwd() - return filepath.Join(wd, fname), err -} - // ExpandSpec expands the references in a swagger spec func ExpandSpec(spec *Swagger, options *ExpandOptions) error { resolver, err := defaultSchemaLoader(spec, options, nil, nil) // Just in case this ever returns an error. - if shouldStopOnError(err, resolver.options) { + if resolver.shouldStopOnError(err) { return err } @@ -561,7 +162,7 @@ func ExpandSpec(spec *Swagger, options *ExpandOptions) error { for key, definition := range spec.Definitions { var def *Schema var err error - if def, err = expandSchema(definition, []string{fmt.Sprintf("#/definitions/%s", key)}, resolver, specBasePath); shouldStopOnError(err, resolver.options) { + if def, err = expandSchema(definition, []string{fmt.Sprintf("#/definitions/%s", key)}, resolver, specBasePath); resolver.shouldStopOnError(err) { return err } if def != nil { @@ -570,23 +171,26 @@ func ExpandSpec(spec *Swagger, options *ExpandOptions) error { } } - for key, parameter := range spec.Parameters { - if err := expandParameter(¶meter, resolver, specBasePath); shouldStopOnError(err, resolver.options) { + for key := range spec.Parameters { + parameter := spec.Parameters[key] + if err := expandParameterOrResponse(¶meter, resolver, specBasePath); resolver.shouldStopOnError(err) { return err } spec.Parameters[key] = parameter } - for key, response := range spec.Responses { - if err := expandResponse(&response, resolver, specBasePath); shouldStopOnError(err, resolver.options) { + for key := range spec.Responses { + response := spec.Responses[key] + if err := expandParameterOrResponse(&response, resolver, specBasePath); resolver.shouldStopOnError(err) { return err } spec.Responses[key] = response } if spec.Paths != nil { - for key, path := range spec.Paths.Paths { - if err := expandPathItem(&path, resolver, specBasePath); shouldStopOnError(err, resolver.options) { + for key := range spec.Paths.Paths { + path := spec.Paths.Paths[key] + if err := expandPathItem(&path, resolver, specBasePath); resolver.shouldStopOnError(err) { return err } spec.Paths.Paths[key] = path @@ -596,18 +200,6 @@ func ExpandSpec(spec *Swagger, options *ExpandOptions) error { return nil } -func shouldStopOnError(err error, opts *ExpandOptions) bool { - if err != nil && !opts.ContinueOnError { - return true - } - - if err != nil { - log.Println(err) - } - - return false -} - // baseForRoot loads in the cache the root document and produces a fake "root" base path entry // for further $ref resolution func baseForRoot(root interface{}, cache ResolutionCache) string { @@ -686,52 +278,6 @@ func expandItems(target Schema, parentRefs []string, resolver *schemaLoader, bas return &target, nil } -// basePathFromSchemaID returns a new basePath based on an existing basePath and a schema ID -func basePathFromSchemaID(oldBasePath, id string) string { - u, err := url.Parse(oldBasePath) - if err != nil { - panic(err) - } - uid, err := url.Parse(id) - if err != nil { - panic(err) - } - - if path.IsAbs(uid.Path) { - return id - } - u.Path = path.Join(path.Dir(u.Path), uid.Path) - return u.String() -} - -// isCircular detects cycles in sequences of $ref. -// It relies on a private context (which needs not be locked). -func (r *schemaLoader) isCircular(ref *Ref, basePath string, parentRefs ...string) (foundCycle bool) { - normalizedRef := normalizePaths(ref.String(), basePath) - if _, ok := r.context.circulars[normalizedRef]; ok { - // circular $ref has been already detected in another explored cycle - foundCycle = true - return - } - foundCycle = swag.ContainsStringsCI(parentRefs, normalizedRef) - if foundCycle { - r.context.circulars[normalizedRef] = true - } - return -} - -func updateBasePath(transitive *schemaLoader, resolver *schemaLoader, basePath string) string { - if transitive != resolver { - debugLog("got a new resolver") - if transitive.options != nil && transitive.options.RelativeBase != "" { - basePath, _ = absPath(transitive.options.RelativeBase) - debugLog("new basePath = %s", basePath) - } - } - - return basePath -} - func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) { if target.Ref.String() == "" && target.Ref.IsRoot() { // normalizing is important @@ -741,8 +287,8 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, ba } - /* change the base path of resolution when an ID is encountered - otherwise the basePath should inherit the parent's */ + // change the base path of resolution when an ID is encountered + // otherwise the basePath should inherit the parent's // important: ID can be relative path if target.ID != "" { debugLog("schema has ID: %s", target.ID) @@ -756,12 +302,11 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, ba basePath = normalizePaths(refPath, basePath) } - /* Explain here what this function does */ var t *Schema - /* if Ref is found, everything else doesn't matter */ - /* Ref also changes the resolution scope of children expandSchema */ + // if Ref is found, everything else doesn't matter + // Ref also changes the resolution scope of children expandSchema if target.Ref.String() != "" { - /* Here the resolution scope is changed because a $ref was encountered */ + // here the resolution scope is changed because a $ref was encountered normalizedRef := normalizeFileRef(&target.Ref, basePath) normalizedBasePath := normalizedRef.RemoteURI() @@ -779,31 +324,27 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, ba return &target, nil } - debugLog("basePath: %s", basePath) - if Debug { - b, _ := json.Marshal(target) - debugLog("calling Resolve with target: %s", string(b)) - } - if err := resolver.Resolve(&target.Ref, &t, basePath); shouldStopOnError(err, resolver.options) { + debugLog("basePath: %s: calling Resolve with target: %#v", basePath, target) + if err := resolver.Resolve(&target.Ref, &t, basePath); resolver.shouldStopOnError(err) { return nil, err } if t != nil { parentRefs = append(parentRefs, normalizedRef.String()) var err error - transitiveResolver, err := transitiveResolver(basePath, target.Ref, resolver) - if shouldStopOnError(err, resolver.options) { + transitiveResolver, err := resolver.transitiveResolver(basePath, target.Ref) + if transitiveResolver.shouldStopOnError(err) { return nil, err } - basePath = updateBasePath(transitiveResolver, resolver, normalizedBasePath) + basePath = resolver.updateBasePath(transitiveResolver, normalizedBasePath) return expandSchema(*t, parentRefs, transitiveResolver, basePath) } } t, err := expandItems(target, parentRefs, resolver, basePath) - if shouldStopOnError(err, resolver.options) { + if resolver.shouldStopOnError(err) { return &target, err } if t != nil { @@ -812,21 +353,21 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, ba for i := range target.AllOf { t, err := expandSchema(target.AllOf[i], parentRefs, resolver, basePath) - if shouldStopOnError(err, resolver.options) { + if resolver.shouldStopOnError(err) { return &target, err } target.AllOf[i] = *t } for i := range target.AnyOf { t, err := expandSchema(target.AnyOf[i], parentRefs, resolver, basePath) - if shouldStopOnError(err, resolver.options) { + if resolver.shouldStopOnError(err) { return &target, err } target.AnyOf[i] = *t } for i := range target.OneOf { t, err := expandSchema(target.OneOf[i], parentRefs, resolver, basePath) - if shouldStopOnError(err, resolver.options) { + if resolver.shouldStopOnError(err) { return &target, err } if t != nil { @@ -835,7 +376,7 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, ba } if target.Not != nil { t, err := expandSchema(*target.Not, parentRefs, resolver, basePath) - if shouldStopOnError(err, resolver.options) { + if resolver.shouldStopOnError(err) { return &target, err } if t != nil { @@ -844,7 +385,7 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, ba } for k := range target.Properties { t, err := expandSchema(target.Properties[k], parentRefs, resolver, basePath) - if shouldStopOnError(err, resolver.options) { + if resolver.shouldStopOnError(err) { return &target, err } if t != nil { @@ -853,7 +394,7 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, ba } if target.AdditionalProperties != nil && target.AdditionalProperties.Schema != nil { t, err := expandSchema(*target.AdditionalProperties.Schema, parentRefs, resolver, basePath) - if shouldStopOnError(err, resolver.options) { + if resolver.shouldStopOnError(err) { return &target, err } if t != nil { @@ -862,7 +403,7 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, ba } for k := range target.PatternProperties { t, err := expandSchema(target.PatternProperties[k], parentRefs, resolver, basePath) - if shouldStopOnError(err, resolver.options) { + if resolver.shouldStopOnError(err) { return &target, err } if t != nil { @@ -872,7 +413,7 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, ba for k := range target.Dependencies { if target.Dependencies[k].Schema != nil { t, err := expandSchema(*target.Dependencies[k].Schema, parentRefs, resolver, basePath) - if shouldStopOnError(err, resolver.options) { + if resolver.shouldStopOnError(err) { return &target, err } if t != nil { @@ -882,7 +423,7 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, ba } if target.AdditionalItems != nil && target.AdditionalItems.Schema != nil { t, err := expandSchema(*target.AdditionalItems.Schema, parentRefs, resolver, basePath) - if shouldStopOnError(err, resolver.options) { + if resolver.shouldStopOnError(err) { return &target, err } if t != nil { @@ -891,7 +432,7 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, ba } for k := range target.Definitions { t, err := expandSchema(target.Definitions[k], parentRefs, resolver, basePath) - if shouldStopOnError(err, resolver.options) { + if resolver.shouldStopOnError(err) { return &target, err } if t != nil { @@ -901,75 +442,42 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, ba return &target, nil } -func derefPathItem(pathItem *PathItem, parentRefs []string, resolver *schemaLoader, basePath string) error { - curRef := pathItem.Ref.String() - if curRef != "" { - normalizedRef := normalizeFileRef(&pathItem.Ref, basePath) - normalizedBasePath := normalizedRef.RemoteURI() - - if resolver.isCircular(normalizedRef, basePath, parentRefs...) { - return nil - } - - if err := resolver.Resolve(&pathItem.Ref, pathItem, basePath); shouldStopOnError(err, resolver.options) { - return err - } - - if pathItem.Ref.String() != "" && pathItem.Ref.String() != curRef && basePath != normalizedBasePath { - parentRefs = append(parentRefs, normalizedRef.String()) - return derefPathItem(pathItem, parentRefs, resolver, normalizedBasePath) - } - } - - return nil -} - func expandPathItem(pathItem *PathItem, resolver *schemaLoader, basePath string) error { if pathItem == nil { return nil } parentRefs := []string{} - if err := derefPathItem(pathItem, parentRefs, resolver, basePath); shouldStopOnError(err, resolver.options) { + if err := resolver.deref(pathItem, parentRefs, basePath); resolver.shouldStopOnError(err) { return err } if pathItem.Ref.String() != "" { var err error - resolver, err = transitiveResolver(basePath, pathItem.Ref, resolver) - if shouldStopOnError(err, resolver.options) { + resolver, err = resolver.transitiveResolver(basePath, pathItem.Ref) + if resolver.shouldStopOnError(err) { return err } } pathItem.Ref = Ref{} - // Currently unused: - //parentRefs = parentRefs[0:] - for idx := range pathItem.Parameters { - if err := expandParameter(&(pathItem.Parameters[idx]), resolver, basePath); shouldStopOnError(err, resolver.options) { + if err := expandParameterOrResponse(&(pathItem.Parameters[idx]), resolver, basePath); resolver.shouldStopOnError(err) { return err } } - if err := expandOperation(pathItem.Get, resolver, basePath); shouldStopOnError(err, resolver.options) { - return err + ops := []*Operation{ + pathItem.Get, + pathItem.Head, + pathItem.Options, + pathItem.Put, + pathItem.Post, + pathItem.Patch, + pathItem.Delete, } - if err := expandOperation(pathItem.Head, resolver, basePath); shouldStopOnError(err, resolver.options) { - return err - } - if err := expandOperation(pathItem.Options, resolver, basePath); shouldStopOnError(err, resolver.options) { - return err - } - if err := expandOperation(pathItem.Put, resolver, basePath); shouldStopOnError(err, resolver.options) { - return err - } - if err := expandOperation(pathItem.Post, resolver, basePath); shouldStopOnError(err, resolver.options) { - return err - } - if err := expandOperation(pathItem.Patch, resolver, basePath); shouldStopOnError(err, resolver.options) { - return err - } - if err := expandOperation(pathItem.Delete, resolver, basePath); shouldStopOnError(err, resolver.options) { - return err + for _, op := range ops { + if err := expandOperation(op, resolver, basePath); resolver.shouldStopOnError(err) { + return err + } } return nil } @@ -979,8 +487,9 @@ func expandOperation(op *Operation, resolver *schemaLoader, basePath string) err return nil } - for i, param := range op.Parameters { - if err := expandParameter(¶m, resolver, basePath); shouldStopOnError(err, resolver.options) { + for i := range op.Parameters { + param := op.Parameters[i] + if err := expandParameterOrResponse(¶m, resolver, basePath); resolver.shouldStopOnError(err) { return err } op.Parameters[i] = param @@ -988,11 +497,12 @@ func expandOperation(op *Operation, resolver *schemaLoader, basePath string) err if op.Responses != nil { responses := op.Responses - if err := expandResponse(responses.Default, resolver, basePath); shouldStopOnError(err, resolver.options) { + if err := expandParameterOrResponse(responses.Default, resolver, basePath); resolver.shouldStopOnError(err) { return err } - for code, response := range responses.StatusCodeResponses { - if err := expandResponse(&response, resolver, basePath); shouldStopOnError(err, resolver.options) { + for code := range responses.StatusCodeResponses { + response := responses.StatusCodeResponses[code] + if err := expandParameterOrResponse(&response, resolver, basePath); resolver.shouldStopOnError(err) { return err } responses.StatusCodeResponses[code] = response @@ -1001,34 +511,6 @@ func expandOperation(op *Operation, resolver *schemaLoader, basePath string) err return nil } -func transitiveResolver(basePath string, ref Ref, resolver *schemaLoader) (*schemaLoader, error) { - if ref.IsRoot() || ref.HasFragmentOnly { - return resolver, nil - } - - baseRef, _ := NewRef(basePath) - currentRef := normalizeFileRef(&ref, basePath) - // Set a new root to resolve against - if !strings.HasPrefix(currentRef.String(), baseRef.String()) { - rootURL := currentRef.GetURL() - rootURL.Fragment = "" - root, _ := resolver.cache.Get(rootURL.String()) - var err error - - // shallow copy of resolver options to set a new RelativeBase when - // traversing multiple documents - newOptions := resolver.options - newOptions.RelativeBase = rootURL.String() - debugLog("setting new root: %s", newOptions.RelativeBase) - resolver, err = defaultSchemaLoader(root, newOptions, resolver.cache, resolver.context) - if err != nil { - return nil, err - } - } - - return resolver, nil -} - // ExpandResponseWithRoot expands a response based on a root document, not a fetchable document func ExpandResponseWithRoot(response *Response, root interface{}, cache ResolutionCache) error { opts := &ExpandOptions{ @@ -1043,7 +525,7 @@ func ExpandResponseWithRoot(response *Response, root interface{}, cache Resoluti return err } - return expandResponse(response, resolver, opts.RelativeBase) + return expandParameterOrResponse(response, resolver, opts.RelativeBase) } // ExpandResponse expands a response based on a basepath @@ -1062,70 +544,7 @@ func ExpandResponse(response *Response, basePath string) error { return err } - return expandResponse(response, resolver, opts.RelativeBase) -} - -func derefResponse(response *Response, parentRefs []string, resolver *schemaLoader, basePath string) error { - curRef := response.Ref.String() - if curRef != "" { - /* Here the resolution scope is changed because a $ref was encountered */ - normalizedRef := normalizeFileRef(&response.Ref, basePath) - normalizedBasePath := normalizedRef.RemoteURI() - - if resolver.isCircular(normalizedRef, basePath, parentRefs...) { - return nil - } - - if err := resolver.Resolve(&response.Ref, response, basePath); shouldStopOnError(err, resolver.options) { - return err - } - - if response.Ref.String() != "" && response.Ref.String() != curRef && basePath != normalizedBasePath { - parentRefs = append(parentRefs, normalizedRef.String()) - return derefResponse(response, parentRefs, resolver, normalizedBasePath) - } - } - - return nil -} - -func expandResponse(response *Response, resolver *schemaLoader, basePath string) error { - if response == nil { - return nil - } - parentRefs := []string{} - if err := derefResponse(response, parentRefs, resolver, basePath); shouldStopOnError(err, resolver.options) { - return err - } - if response.Ref.String() != "" { - transitiveResolver, err := transitiveResolver(basePath, response.Ref, resolver) - if shouldStopOnError(err, transitiveResolver.options) { - return err - } - basePath = updateBasePath(transitiveResolver, resolver, basePath) - resolver = transitiveResolver - } - if response.Schema != nil && response.Schema.Ref.String() != "" { - // schema expanded to a $ref in another root - var ern error - response.Schema.Ref, ern = NewRef(normalizePaths(response.Schema.Ref.String(), response.Ref.RemoteURI())) - if ern != nil { - return ern - } - } - response.Ref = Ref{} - - parentRefs = parentRefs[0:] - if !resolver.options.SkipSchemas && response.Schema != nil { - // parentRefs = append(parentRefs, response.Schema.Ref.String()) - s, err := expandSchema(*response.Schema, parentRefs, resolver, basePath) - if shouldStopOnError(err, resolver.options) { - return err - } - *response.Schema = *s - } - - return nil + return expandParameterOrResponse(response, resolver, opts.RelativeBase) } // ExpandParameterWithRoot expands a parameter based on a root document, not a fetchable document @@ -1142,10 +561,10 @@ func ExpandParameterWithRoot(parameter *Parameter, root interface{}, cache Resol return err } - return expandParameter(parameter, resolver, opts.RelativeBase) + return expandParameterOrResponse(parameter, resolver, opts.RelativeBase) } -// ExpandParameter expands a parameter based on a basepath +// ExpandParameter expands a parameter based on a basepath. // This is the exported version of expandParameter // all refs inside parameter will be resolved relative to basePath func ExpandParameter(parameter *Parameter, basePath string) error { @@ -1161,67 +580,71 @@ func ExpandParameter(parameter *Parameter, basePath string) error { return err } - return expandParameter(parameter, resolver, opts.RelativeBase) + return expandParameterOrResponse(parameter, resolver, opts.RelativeBase) } -func derefParameter(parameter *Parameter, parentRefs []string, resolver *schemaLoader, basePath string) error { - curRef := parameter.Ref.String() - if curRef != "" { - normalizedRef := normalizeFileRef(¶meter.Ref, basePath) - normalizedBasePath := normalizedRef.RemoteURI() - - if resolver.isCircular(normalizedRef, basePath, parentRefs...) { - return nil - } - - if err := resolver.Resolve(¶meter.Ref, parameter, basePath); shouldStopOnError(err, resolver.options) { - return err +func getRefAndSchema(input interface{}) (*Ref, *Schema, error) { + var ref *Ref + var sch *Schema + switch refable := input.(type) { + case *Parameter: + if refable == nil { + return nil, nil, nil } - - if parameter.Ref.String() != "" && parameter.Ref.String() != curRef && basePath != normalizedBasePath { - parentRefs = append(parentRefs, normalizedRef.String()) - return derefParameter(parameter, parentRefs, resolver, normalizedBasePath) + ref = &refable.Ref + sch = refable.Schema + case *Response: + if refable == nil { + return nil, nil, nil } + ref = &refable.Ref + sch = refable.Schema + default: + return nil, nil, fmt.Errorf("expand: unsupported type %T. Input should be of type *Parameter or *Response", input) } - - return nil + return ref, sch, nil } -func expandParameter(parameter *Parameter, resolver *schemaLoader, basePath string) error { - if parameter == nil { +func expandParameterOrResponse(input interface{}, resolver *schemaLoader, basePath string) error { + ref, _, err := getRefAndSchema(input) + if err != nil { + return err + } + if ref == nil { return nil } - parentRefs := []string{} - if err := derefParameter(parameter, parentRefs, resolver, basePath); shouldStopOnError(err, resolver.options) { + if err := resolver.deref(input, parentRefs, basePath); resolver.shouldStopOnError(err) { return err } - if parameter.Ref.String() != "" { - transitiveResolver, err := transitiveResolver(basePath, parameter.Ref, resolver) - if shouldStopOnError(err, transitiveResolver.options) { + ref, sch, _ := getRefAndSchema(input) + if ref.String() != "" { + transitiveResolver, err := resolver.transitiveResolver(basePath, *ref) + if transitiveResolver.shouldStopOnError(err) { return err } - basePath = updateBasePath(transitiveResolver, resolver, basePath) + basePath = resolver.updateBasePath(transitiveResolver, basePath) resolver = transitiveResolver } - if parameter.Schema != nil && parameter.Schema.Ref.String() != "" { + if sch != nil && sch.Ref.String() != "" { // schema expanded to a $ref in another root var ern error - parameter.Schema.Ref, ern = NewRef(normalizePaths(parameter.Schema.Ref.String(), parameter.Ref.RemoteURI())) + sch.Ref, ern = NewRef(normalizePaths(sch.Ref.String(), ref.RemoteURI())) if ern != nil { return ern } } - parameter.Ref = Ref{} + if ref != nil { + *ref = Ref{} + } - parentRefs = parentRefs[0:] - if !resolver.options.SkipSchemas && parameter.Schema != nil { - s, err := expandSchema(*parameter.Schema, parentRefs, resolver, basePath) - if shouldStopOnError(err, resolver.options) { + if !resolver.options.SkipSchemas && sch != nil { + s, err := expandSchema(*sch, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { return err } - *parameter.Schema = *s + *sch = *s } return nil } diff --git a/vendor/github.com/go-openapi/spec/go.mod b/vendor/github.com/go-openapi/spec/go.mod index 5af64c10b5746..42073be007559 100644 --- a/vendor/github.com/go-openapi/spec/go.mod +++ b/vendor/github.com/go-openapi/spec/go.mod @@ -1,16 +1,14 @@ module github.com/go-openapi/spec require ( - github.com/PuerkitoBio/purell v1.1.0 // indirect - github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/go-openapi/jsonpointer v0.17.0 - github.com/go-openapi/jsonreference v0.17.0 - github.com/go-openapi/swag v0.17.0 - github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/stretchr/testify v1.2.2 - golang.org/x/net v0.0.0-20181005035420-146acd28ed58 // indirect - golang.org/x/text v0.3.0 // indirect - gopkg.in/yaml.v2 v2.2.1 + github.com/go-openapi/jsonpointer v0.19.2 + github.com/go-openapi/jsonreference v0.19.2 + github.com/go-openapi/swag v0.19.2 + github.com/kr/pty v1.1.5 // indirect + github.com/stretchr/objx v0.2.0 // indirect + github.com/stretchr/testify v1.3.0 + golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 // indirect + golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f // indirect + golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59 // indirect + gopkg.in/yaml.v2 v2.2.2 ) diff --git a/vendor/github.com/go-openapi/spec/go.sum b/vendor/github.com/go-openapi/spec/go.sum index ab6bfb6086ad6..73e97a2d73e13 100644 --- a/vendor/github.com/go-openapi/spec/go.sum +++ b/vendor/github.com/go-openapi/spec/go.sum @@ -1,22 +1,66 @@ github.com/PuerkitoBio/purell v1.1.0 h1:rmGxhojJlM0tuKtfdvliR84CFHljx9ag64t2xmVkjK4= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/go-openapi/jsonpointer v0.17.0 h1:Bpl2DtZ6k7wKqfFs7e+4P08+M9I3FQgn09a1UsRUQbk= -github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= -github.com/go-openapi/jsonreference v0.17.0 h1:d/o7/fsLWWQZACbihvZxcyLQ59jfUVs7WOJv/ak7T7A= -github.com/go-openapi/jsonreference v0.17.0/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= -github.com/go-openapi/swag v0.17.0 h1:7wu+dZ5k83kvUWeAb+WUkFiUhDzwGqzTR/NhWzeo1JU= -github.com/go-openapi/swag v0.17.0/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-openapi/jsonpointer v0.17.0 h1:nH6xp8XdXHx8dqveo0ZuJBluCO2qGrPbDNZ0dwoRHP0= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.19.0 h1:FTUMcX77w5rQkClIzDtTxvn6Bsa894CcrzNj2MMfeg8= +github.com/go-openapi/jsonpointer v0.19.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.19.2 h1:A9+F4Dc/MCNB5jibxf6rRvOvR/iFgQdyNx9eIhnGqq0= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonreference v0.19.0 h1:BqWKpV1dFd+AuiKlgtddwVIFQsuMpxfBDBHGfM2yNpk= +github.com/go-openapi/jsonreference v0.19.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.19.2 h1:o20suLFB4Ri0tuzpWtyHlh7E7HnkqTNLq6aR6WVNS1w= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/swag v0.17.0 h1:iqrgMg7Q7SvtbWLlltPrkMs0UBJI6oTSs79JFRUi880= +github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.19.2 h1:jvO6bCMBEilGwMfHhrd61zIID4oIFdwb76V17SM88dE= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 h1:2gxZ0XQIU/5z3Z3bUBu+FXuk2pFbkN6tcwi/pjyaDic= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 h1:nTT4s92Dgz2HlrB2NaMgvlfqHH39OgMhA7z3PK7PGD4= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/net v0.0.0-20181005035420-146acd28ed58 h1:otZG8yDCO4LVps5+9bxOeNiCvgmOyt96J3roHTYs7oE= golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/go-openapi/spec/header.go b/vendor/github.com/go-openapi/spec/header.go index 82f77f7709b11..39efe452bb09f 100644 --- a/vendor/github.com/go-openapi/spec/header.go +++ b/vendor/github.com/go-openapi/spec/header.go @@ -22,6 +22,10 @@ import ( "github.com/go-openapi/swag" ) +const ( + jsonArray = "array" +) + // HeaderProps describes a response header type HeaderProps struct { Description string `json:"description,omitempty"` @@ -57,7 +61,7 @@ func (h *Header) Typed(tpe, format string) *Header { // CollectionOf a fluent builder method for an array item func (h *Header) CollectionOf(items *Items, format string) *Header { - h.Type = "array" + h.Type = jsonArray h.Items = items h.CollectionFormat = format return h diff --git a/vendor/github.com/go-openapi/spec/info.go b/vendor/github.com/go-openapi/spec/info.go index cfb37ec12a646..c458b49b216a9 100644 --- a/vendor/github.com/go-openapi/spec/info.go +++ b/vendor/github.com/go-openapi/spec/info.go @@ -161,8 +161,5 @@ func (i *Info) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &i.InfoProps); err != nil { return err } - if err := json.Unmarshal(data, &i.VendorExtensible); err != nil { - return err - } - return nil + return json.Unmarshal(data, &i.VendorExtensible) } diff --git a/vendor/github.com/go-openapi/spec/items.go b/vendor/github.com/go-openapi/spec/items.go index cf4298971664e..365d1631582a5 100644 --- a/vendor/github.com/go-openapi/spec/items.go +++ b/vendor/github.com/go-openapi/spec/items.go @@ -22,9 +22,14 @@ import ( "github.com/go-openapi/swag" ) +const ( + jsonRef = "$ref" +) + // SimpleSchema describe swagger simple schemas for parameters and headers type SimpleSchema struct { Type string `json:"type,omitempty"` + Nullable bool `json:"nullable,omitempty"` Format string `json:"format,omitempty"` Items *Items `json:"items,omitempty"` CollectionFormat string `json:"collectionFormat,omitempty"` @@ -87,9 +92,15 @@ func (i *Items) Typed(tpe, format string) *Items { return i } +// AsNullable flags this schema as nullable. +func (i *Items) AsNullable() *Items { + i.Nullable = true + return i +} + // CollectionOf a fluent builder method for an array item func (i *Items) CollectionOf(items *Items, format string) *Items { - i.Type = "array" + i.Type = jsonArray i.Items = items i.CollectionFormat = format return i @@ -217,7 +228,7 @@ func (i Items) MarshalJSON() ([]byte, error) { // JSONLookup look up a value by the json property name func (i Items) JSONLookup(token string) (interface{}, error) { - if token == "$ref" { + if token == jsonRef { return &i.Ref, nil } diff --git a/vendor/github.com/go-openapi/spec/normalizer.go b/vendor/github.com/go-openapi/spec/normalizer.go new file mode 100644 index 0000000000000..b8957e7c0c186 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/normalizer.go @@ -0,0 +1,152 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "fmt" + "net/url" + "os" + "path" + "path/filepath" + "strings" +) + +// normalize absolute path for cache. +// on Windows, drive letters should be converted to lower as scheme in net/url.URL +func normalizeAbsPath(path string) string { + u, err := url.Parse(path) + if err != nil { + debugLog("normalize absolute path failed: %s", err) + return path + } + return u.String() +} + +// base or refPath could be a file path or a URL +// given a base absolute path and a ref path, return the absolute path of refPath +// 1) if refPath is absolute, return it +// 2) if refPath is relative, join it with basePath keeping the scheme, hosts, and ports if exists +// base could be a directory or a full file path +func normalizePaths(refPath, base string) string { + refURL, _ := url.Parse(refPath) + if path.IsAbs(refURL.Path) || filepath.IsAbs(refPath) { + // refPath is actually absolute + if refURL.Host != "" { + return refPath + } + parts := strings.Split(refPath, "#") + result := filepath.FromSlash(parts[0]) + if len(parts) == 2 { + result += "#" + parts[1] + } + return result + } + + // relative refPath + baseURL, _ := url.Parse(base) + if !strings.HasPrefix(refPath, "#") { + // combining paths + if baseURL.Host != "" { + baseURL.Path = path.Join(path.Dir(baseURL.Path), refURL.Path) + } else { // base is a file + newBase := fmt.Sprintf("%s#%s", filepath.Join(filepath.Dir(base), filepath.FromSlash(refURL.Path)), refURL.Fragment) + return newBase + } + + } + // copying fragment from ref to base + baseURL.Fragment = refURL.Fragment + return baseURL.String() +} + +// denormalizePaths returns to simplest notation on file $ref, +// i.e. strips the absolute path and sets a path relative to the base path. +// +// This is currently used when we rewrite ref after a circular ref has been detected +func denormalizeFileRef(ref *Ref, relativeBase, originalRelativeBase string) *Ref { + debugLog("denormalizeFileRef for: %s", ref.String()) + + if ref.String() == "" || ref.IsRoot() || ref.HasFragmentOnly { + return ref + } + // strip relativeBase from URI + relativeBaseURL, _ := url.Parse(relativeBase) + relativeBaseURL.Fragment = "" + + if relativeBaseURL.IsAbs() && strings.HasPrefix(ref.String(), relativeBase) { + // this should work for absolute URI (e.g. http://...): we have an exact match, just trim prefix + r, _ := NewRef(strings.TrimPrefix(ref.String(), relativeBase)) + return &r + } + + if relativeBaseURL.IsAbs() { + // other absolute URL get unchanged (i.e. with a non-empty scheme) + return ref + } + + // for relative file URIs: + originalRelativeBaseURL, _ := url.Parse(originalRelativeBase) + originalRelativeBaseURL.Fragment = "" + if strings.HasPrefix(ref.String(), originalRelativeBaseURL.String()) { + // the resulting ref is in the expanded spec: return a local ref + r, _ := NewRef(strings.TrimPrefix(ref.String(), originalRelativeBaseURL.String())) + return &r + } + + // check if we may set a relative path, considering the original base path for this spec. + // Example: + // spec is located at /mypath/spec.json + // my normalized ref points to: /mypath/item.json#/target + // expected result: item.json#/target + parts := strings.Split(ref.String(), "#") + relativePath, err := filepath.Rel(path.Dir(originalRelativeBaseURL.String()), parts[0]) + if err != nil { + // there is no common ancestor (e.g. different drives on windows) + // leaves the ref unchanged + return ref + } + if len(parts) == 2 { + relativePath += "#" + parts[1] + } + r, _ := NewRef(relativePath) + return &r +} + +// relativeBase could be an ABSOLUTE file path or an ABSOLUTE URL +func normalizeFileRef(ref *Ref, relativeBase string) *Ref { + // This is important for when the reference is pointing to the root schema + if ref.String() == "" { + r, _ := NewRef(relativeBase) + return &r + } + + debugLog("normalizing %s against %s", ref.String(), relativeBase) + + s := normalizePaths(ref.String(), relativeBase) + r, _ := NewRef(s) + return &r +} + +// absPath returns the absolute path of a file +func absPath(fname string) (string, error) { + if strings.HasPrefix(fname, "http") { + return fname, nil + } + if filepath.IsAbs(fname) { + return fname, nil + } + wd, err := os.Getwd() + return filepath.Join(wd, fname), err +} diff --git a/vendor/github.com/go-openapi/spec/operation.go b/vendor/github.com/go-openapi/spec/operation.go index 32f7d8fe7251e..b1ebd59945b76 100644 --- a/vendor/github.com/go-openapi/spec/operation.go +++ b/vendor/github.com/go-openapi/spec/operation.go @@ -15,24 +15,37 @@ package spec import ( + "bytes" + "encoding/gob" "encoding/json" + "sort" "github.com/go-openapi/jsonpointer" "github.com/go-openapi/swag" ) +func init() { + //gob.Register(map[string][]interface{}{}) + gob.Register(map[string]interface{}{}) + gob.Register([]interface{}{}) +} + // OperationProps describes an operation +// +// NOTES: +// - schemes, when present must be from [http, https, ws, wss]: see validate +// - Security is handled as a special case: see MarshalJSON function type OperationProps struct { Description string `json:"description,omitempty"` Consumes []string `json:"consumes,omitempty"` Produces []string `json:"produces,omitempty"` - Schemes []string `json:"schemes,omitempty"` // the scheme, when present must be from [http, https, ws, wss] + Schemes []string `json:"schemes,omitempty"` Tags []string `json:"tags,omitempty"` Summary string `json:"summary,omitempty"` ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` ID string `json:"operationId,omitempty"` Deprecated bool `json:"deprecated,omitempty"` - Security []map[string][]string `json:"security,omitempty"` //Special case, see MarshalJSON function + Security []map[string][]string `json:"security,omitempty"` Parameters []Parameter `json:"parameters,omitempty"` Responses *Responses `json:"responses,omitempty"` } @@ -76,11 +89,17 @@ func (o *Operation) SuccessResponse() (*Response, int, bool) { return nil, 0, false } - for k, v := range o.Responses.StatusCodeResponses { - if k/100 == 2 { - return &v, k, true + responseCodes := make([]int, 0, len(o.Responses.StatusCodeResponses)) + for k := range o.Responses.StatusCodeResponses { + if k >= 200 && k < 300 { + responseCodes = append(responseCodes, k) } } + if len(responseCodes) > 0 { + sort.Ints(responseCodes) + v := o.Responses.StatusCodeResponses[responseCodes[0]] + return &v, responseCodes[0], true + } return o.Responses.Default, 0, false } @@ -99,10 +118,7 @@ func (o *Operation) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &o.OperationProps); err != nil { return err } - if err := json.Unmarshal(data, &o.VendorExtensible); err != nil { - return err - } - return nil + return json.Unmarshal(data, &o.VendorExtensible) } // MarshalJSON converts this items object to JSON @@ -216,7 +232,7 @@ func (o *Operation) AddParam(param *Parameter) *Operation { // RemoveParam removes a parameter from the operation func (o *Operation) RemoveParam(name, in string) *Operation { for i, p := range o.Parameters { - if p.Name == name && p.In == name { + if p.Name == name && p.In == in { o.Parameters = append(o.Parameters[:i], o.Parameters[i+1:]...) return o } @@ -257,3 +273,126 @@ func (o *Operation) RespondsWith(code int, response *Response) *Operation { o.Responses.StatusCodeResponses[code] = *response return o } + +type opsAlias OperationProps + +type gobAlias struct { + Security []map[string]struct { + List []string + Pad bool + } + Alias *opsAlias + SecurityIsEmpty bool +} + +// GobEncode provides a safe gob encoder for Operation, including empty security requirements +func (o Operation) GobEncode() ([]byte, error) { + raw := struct { + Ext VendorExtensible + Props OperationProps + }{ + Ext: o.VendorExtensible, + Props: o.OperationProps, + } + var b bytes.Buffer + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Operation, including empty security requirements +func (o *Operation) GobDecode(b []byte) error { + var raw struct { + Ext VendorExtensible + Props OperationProps + } + + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + o.VendorExtensible = raw.Ext + o.OperationProps = raw.Props + return nil +} + +// GobEncode provides a safe gob encoder for Operation, including empty security requirements +func (op OperationProps) GobEncode() ([]byte, error) { + raw := gobAlias{ + Alias: (*opsAlias)(&op), + } + + var b bytes.Buffer + if op.Security == nil { + // nil security requirement + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err + } + + if len(op.Security) == 0 { + // empty, but non-nil security requirement + raw.SecurityIsEmpty = true + raw.Alias.Security = nil + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err + } + + raw.Security = make([]map[string]struct { + List []string + Pad bool + }, 0, len(op.Security)) + for _, req := range op.Security { + v := make(map[string]struct { + List []string + Pad bool + }, len(req)) + for k, val := range req { + v[k] = struct { + List []string + Pad bool + }{ + List: val, + } + } + raw.Security = append(raw.Security, v) + } + + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Operation, including empty security requirements +func (op *OperationProps) GobDecode(b []byte) error { + var raw gobAlias + + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + if raw.Alias == nil { + return nil + } + + switch { + case raw.SecurityIsEmpty: + // empty, but non-nil security requirement + raw.Alias.Security = []map[string][]string{} + case len(raw.Alias.Security) == 0: + // nil security requirement + raw.Alias.Security = nil + default: + raw.Alias.Security = make([]map[string][]string, 0, len(raw.Security)) + for _, req := range raw.Security { + v := make(map[string][]string, len(req)) + for k, val := range req { + v[k] = make([]string, 0, len(val.List)) + v[k] = append(v[k], val.List...) + } + raw.Alias.Security = append(raw.Alias.Security, v) + } + } + + *op = *(*OperationProps)(raw.Alias) + return nil +} diff --git a/vendor/github.com/go-openapi/spec/parameter.go b/vendor/github.com/go-openapi/spec/parameter.go index cb1a88d252a44..cecdff54568d5 100644 --- a/vendor/github.com/go-openapi/spec/parameter.go +++ b/vendor/github.com/go-openapi/spec/parameter.go @@ -39,7 +39,8 @@ func PathParam(name string) *Parameter { // BodyParam creates a body parameter func BodyParam(name string, schema *Schema) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name, In: "body", Schema: schema}, SimpleSchema: SimpleSchema{Type: "object"}} + return &Parameter{ParamProps: ParamProps{Name: name, In: "body", Schema: schema}, + SimpleSchema: SimpleSchema{Type: "object"}} } // FormDataParam creates a body parameter @@ -49,12 +50,15 @@ func FormDataParam(name string) *Parameter { // FileParam creates a body parameter func FileParam(name string) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}, SimpleSchema: SimpleSchema{Type: "file"}} + return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}, + SimpleSchema: SimpleSchema{Type: "file"}} } // SimpleArrayParam creates a param for a simple array (string, int, date etc) func SimpleArrayParam(name, tpe, fmt string) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name}, SimpleSchema: SimpleSchema{Type: "array", CollectionFormat: "csv", Items: &Items{SimpleSchema: SimpleSchema{Type: "string", Format: fmt}}}} + return &Parameter{ParamProps: ParamProps{Name: name}, + SimpleSchema: SimpleSchema{Type: jsonArray, CollectionFormat: "csv", + Items: &Items{SimpleSchema: SimpleSchema{Type: "string", Format: fmt}}}} } // ParamRef creates a parameter that's a json reference @@ -65,25 +69,43 @@ func ParamRef(uri string) *Parameter { } // ParamProps describes the specific attributes of an operation parameter +// +// NOTE: +// - Schema is defined when "in" == "body": see validate +// - AllowEmptyValue is allowed where "in" == "query" || "formData" type ParamProps struct { Description string `json:"description,omitempty"` Name string `json:"name,omitempty"` In string `json:"in,omitempty"` Required bool `json:"required,omitempty"` - Schema *Schema `json:"schema,omitempty"` // when in == "body" - AllowEmptyValue bool `json:"allowEmptyValue,omitempty"` // when in == "query" || "formData" + Schema *Schema `json:"schema,omitempty"` + AllowEmptyValue bool `json:"allowEmptyValue,omitempty"` } // Parameter a unique parameter is defined by a combination of a [name](#parameterName) and [location](#parameterIn). // // There are five possible parameter types. -// * Path - Used together with [Path Templating](#pathTemplating), where the parameter value is actually part of the operation's URL. This does not include the host or base path of the API. For example, in `/items/{itemId}`, the path parameter is `itemId`. +// * Path - Used together with [Path Templating](#pathTemplating), where the parameter value is actually part +// of the operation's URL. This does not include the host or base path of the API. For example, in `/items/{itemId}`, +// the path parameter is `itemId`. // * Query - Parameters that are appended to the URL. For example, in `/items?id=###`, the query parameter is `id`. // * Header - Custom headers that are expected as part of the request. -// * Body - The payload that's appended to the HTTP request. Since there can only be one payload, there can only be *one* body parameter. The name of the body parameter has no effect on the parameter itself and is used for documentation purposes only. Since Form parameters are also in the payload, body and form parameters cannot exist together for the same operation. -// * Form - Used to describe the payload of an HTTP request when either `application/x-www-form-urlencoded` or `multipart/form-data` are used as the content type of the request (in Swagger's definition, the [`consumes`](#operationConsumes) property of an operation). This is the only parameter type that can be used to send files, thus supporting the `file` type. Since form parameters are sent in the payload, they cannot be declared together with a body parameter for the same operation. Form parameters have a different format based on the content-type used (for further details, consult http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4): -// * `application/x-www-form-urlencoded` - Similar to the format of Query parameters but as a payload. For example, `foo=1&bar=swagger` - both `foo` and `bar` are form parameters. This is normally used for simple parameters that are being transferred. -// * `multipart/form-data` - each parameter takes a section in the payload with an internal header. For example, for the header `Content-Disposition: form-data; name="submit-name"` the name of the parameter is `submit-name`. This type of form parameters is more commonly used for file transfers. +// * Body - The payload that's appended to the HTTP request. Since there can only be one payload, there can only be +// _one_ body parameter. The name of the body parameter has no effect on the parameter itself and is used for +// documentation purposes only. Since Form parameters are also in the payload, body and form parameters cannot exist +// together for the same operation. +// * Form - Used to describe the payload of an HTTP request when either `application/x-www-form-urlencoded` or +// `multipart/form-data` are used as the content type of the request (in Swagger's definition, +// the [`consumes`](#operationConsumes) property of an operation). This is the only parameter type that can be used +// to send files, thus supporting the `file` type. Since form parameters are sent in the payload, they cannot be +// declared together with a body parameter for the same operation. Form parameters have a different format based on +// the content-type used (for further details, consult http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4). +// * `application/x-www-form-urlencoded` - Similar to the format of Query parameters but as a payload. +// For example, `foo=1&bar=swagger` - both `foo` and `bar` are form parameters. This is normally used for simple +// parameters that are being transferred. +// * `multipart/form-data` - each parameter takes a section in the payload with an internal header. +// For example, for the header `Content-Disposition: form-data; name="submit-name"` the name of the parameter is +// `submit-name`. This type of form parameters is more commonly used for file transfers. // // For more information: http://goo.gl/8us55a#parameterObject type Parameter struct { @@ -99,7 +121,7 @@ func (p Parameter) JSONLookup(token string) (interface{}, error) { if ex, ok := p.Extensions[token]; ok { return &ex, nil } - if token == "$ref" { + if token == jsonRef { return &p.Ref, nil } @@ -148,7 +170,7 @@ func (p *Parameter) Typed(tpe, format string) *Parameter { // CollectionOf a fluent builder method for an array parameter func (p *Parameter) CollectionOf(items *Items, format string) *Parameter { - p.Type = "array" + p.Type = jsonArray p.Items = items p.CollectionFormat = format return p @@ -270,10 +292,7 @@ func (p *Parameter) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { return err } - if err := json.Unmarshal(data, &p.ParamProps); err != nil { - return err - } - return nil + return json.Unmarshal(data, &p.ParamProps) } // MarshalJSON converts this items object to JSON diff --git a/vendor/github.com/go-openapi/spec/path_item.go b/vendor/github.com/go-openapi/spec/path_item.go index a8ae63ece52b2..68fc8e90144ed 100644 --- a/vendor/github.com/go-openapi/spec/path_item.go +++ b/vendor/github.com/go-openapi/spec/path_item.go @@ -50,7 +50,7 @@ func (p PathItem) JSONLookup(token string) (interface{}, error) { if ex, ok := p.Extensions[token]; ok { return &ex, nil } - if token == "$ref" { + if token == jsonRef { return &p.Ref, nil } r, _, err := jsonpointer.GetForToken(p.PathItemProps, token) @@ -65,10 +65,7 @@ func (p *PathItem) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { return err } - if err := json.Unmarshal(data, &p.PathItemProps); err != nil { - return err - } - return nil + return json.Unmarshal(data, &p.PathItemProps) } // MarshalJSON converts this items object to JSON diff --git a/vendor/github.com/go-openapi/spec/ref.go b/vendor/github.com/go-openapi/spec/ref.go index 1405bfd8ee2b0..08ff869b2fcd2 100644 --- a/vendor/github.com/go-openapi/spec/ref.go +++ b/vendor/github.com/go-openapi/spec/ref.go @@ -15,6 +15,8 @@ package spec import ( + "bytes" + "encoding/gob" "encoding/json" "net/http" "os" @@ -148,6 +150,28 @@ func (r *Ref) UnmarshalJSON(d []byte) error { return r.fromMap(v) } +// GobEncode provides a safe gob encoder for Ref +func (r Ref) GobEncode() ([]byte, error) { + var b bytes.Buffer + raw, err := r.MarshalJSON() + if err != nil { + return nil, err + } + err = gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Ref +func (r *Ref) GobDecode(b []byte) error { + var raw []byte + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + return json.Unmarshal(raw, r) +} + func (r *Ref) fromMap(v map[string]interface{}) error { if v == nil { return nil diff --git a/vendor/github.com/go-openapi/spec/response.go b/vendor/github.com/go-openapi/spec/response.go index 586db0d780172..27729c1d93b13 100644 --- a/vendor/github.com/go-openapi/spec/response.go +++ b/vendor/github.com/go-openapi/spec/response.go @@ -58,10 +58,7 @@ func (r *Response) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &r.Refable); err != nil { return err } - if err := json.Unmarshal(data, &r.VendorExtensible); err != nil { - return err - } - return nil + return json.Unmarshal(data, &r.VendorExtensible) } // MarshalJSON converts this items object to JSON diff --git a/vendor/github.com/go-openapi/spec/schema.go b/vendor/github.com/go-openapi/spec/schema.go index b9481e29bcdb7..37858ece9098f 100644 --- a/vendor/github.com/go-openapi/spec/schema.go +++ b/vendor/github.com/go-openapi/spec/schema.go @@ -89,7 +89,8 @@ func DateTimeProperty() *Schema { // MapProperty creates a map property func MapProperty(property *Schema) *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"object"}, AdditionalProperties: &SchemaOrBool{Allows: true, Schema: property}}} + return &Schema{SchemaProps: SchemaProps{Type: []string{"object"}, + AdditionalProperties: &SchemaOrBool{Allows: true, Schema: property}}} } // RefProperty creates a ref property @@ -155,54 +156,6 @@ func (r *SchemaURL) fromMap(v map[string]interface{}) error { return nil } -// type ExtraSchemaProps map[string]interface{} - -// // JSONSchema represents a structure that is a json schema draft 04 -// type JSONSchema struct { -// SchemaProps -// ExtraSchemaProps -// } - -// // MarshalJSON marshal this to JSON -// func (s JSONSchema) MarshalJSON() ([]byte, error) { -// b1, err := json.Marshal(s.SchemaProps) -// if err != nil { -// return nil, err -// } -// b2, err := s.Ref.MarshalJSON() -// if err != nil { -// return nil, err -// } -// b3, err := s.Schema.MarshalJSON() -// if err != nil { -// return nil, err -// } -// b4, err := json.Marshal(s.ExtraSchemaProps) -// if err != nil { -// return nil, err -// } -// return swag.ConcatJSON(b1, b2, b3, b4), nil -// } - -// // UnmarshalJSON marshal this from JSON -// func (s *JSONSchema) UnmarshalJSON(data []byte) error { -// var sch JSONSchema -// if err := json.Unmarshal(data, &sch.SchemaProps); err != nil { -// return err -// } -// if err := json.Unmarshal(data, &sch.Ref); err != nil { -// return err -// } -// if err := json.Unmarshal(data, &sch.Schema); err != nil { -// return err -// } -// if err := json.Unmarshal(data, &sch.ExtraSchemaProps); err != nil { -// return err -// } -// *s = sch -// return nil -// } - // SchemaProps describes a JSON schema (draft 4) type SchemaProps struct { ID string `json:"id,omitempty"` @@ -210,6 +163,7 @@ type SchemaProps struct { Schema SchemaURL `json:"-"` Description string `json:"description,omitempty"` Type StringOrArray `json:"type,omitempty"` + Nullable bool `json:"nullable,omitempty"` Format string `json:"format,omitempty"` Title string `json:"title,omitempty"` Default interface{} `json:"default,omitempty"` @@ -349,9 +303,15 @@ func (s *Schema) AddType(tpe, format string) *Schema { return s } +// AsNullable flags this schema as nullable. +func (s *Schema) AsNullable() *Schema { + s.Nullable = true + return s +} + // CollectionOf a fluent builder method for an array parameter func (s *Schema) CollectionOf(items Schema) *Schema { - s.Type = []string{"array"} + s.Type = []string{jsonArray} s.Items = &SchemaOrArray{Schema: &items} return s } diff --git a/vendor/github.com/go-openapi/spec/schema_loader.go b/vendor/github.com/go-openapi/spec/schema_loader.go new file mode 100644 index 0000000000000..c34a96fa04e77 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/schema_loader.go @@ -0,0 +1,275 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" + "log" + "net/url" + "reflect" + "strings" + + "github.com/go-openapi/swag" +) + +// PathLoader function to use when loading remote refs +var PathLoader func(string) (json.RawMessage, error) + +func init() { + PathLoader = func(path string) (json.RawMessage, error) { + data, err := swag.LoadFromFileOrHTTP(path) + if err != nil { + return nil, err + } + return json.RawMessage(data), nil + } +} + +// resolverContext allows to share a context during spec processing. +// At the moment, it just holds the index of circular references found. +type resolverContext struct { + // circulars holds all visited circular references, which allows shortcuts. + // NOTE: this is not just a performance improvement: it is required to figure out + // circular references which participate several cycles. + // This structure is privately instantiated and needs not be locked against + // concurrent access, unless we chose to implement a parallel spec walking. + circulars map[string]bool + basePath string +} + +func newResolverContext(originalBasePath string) *resolverContext { + return &resolverContext{ + circulars: make(map[string]bool), + basePath: originalBasePath, // keep the root base path in context + } +} + +type schemaLoader struct { + root interface{} + options *ExpandOptions + cache ResolutionCache + context *resolverContext + loadDoc func(string) (json.RawMessage, error) +} + +func (r *schemaLoader) transitiveResolver(basePath string, ref Ref) (*schemaLoader, error) { + if ref.IsRoot() || ref.HasFragmentOnly { + return r, nil + } + + baseRef, _ := NewRef(basePath) + currentRef := normalizeFileRef(&ref, basePath) + if strings.HasPrefix(currentRef.String(), baseRef.String()) { + return r, nil + } + + // Set a new root to resolve against + rootURL := currentRef.GetURL() + rootURL.Fragment = "" + root, _ := r.cache.Get(rootURL.String()) + + // shallow copy of resolver options to set a new RelativeBase when + // traversing multiple documents + newOptions := r.options + newOptions.RelativeBase = rootURL.String() + debugLog("setting new root: %s", newOptions.RelativeBase) + resolver, err := defaultSchemaLoader(root, newOptions, r.cache, r.context) + if err != nil { + return nil, err + } + + return resolver, nil +} + +func (r *schemaLoader) updateBasePath(transitive *schemaLoader, basePath string) string { + if transitive != r { + debugLog("got a new resolver") + if transitive.options != nil && transitive.options.RelativeBase != "" { + basePath, _ = absPath(transitive.options.RelativeBase) + debugLog("new basePath = %s", basePath) + } + } + return basePath +} + +func (r *schemaLoader) resolveRef(ref *Ref, target interface{}, basePath string) error { + tgt := reflect.ValueOf(target) + if tgt.Kind() != reflect.Ptr { + return fmt.Errorf("resolve ref: target needs to be a pointer") + } + + refURL := ref.GetURL() + if refURL == nil { + return nil + } + + var res interface{} + var data interface{} + var err error + // Resolve against the root if it isn't nil, and if ref is pointing at the root, or has a fragment only which means + // it is pointing somewhere in the root. + root := r.root + if (ref.IsRoot() || ref.HasFragmentOnly) && root == nil && basePath != "" { + if baseRef, erb := NewRef(basePath); erb == nil { + root, _, _, _ = r.load(baseRef.GetURL()) + } + } + if (ref.IsRoot() || ref.HasFragmentOnly) && root != nil { + data = root + } else { + baseRef := normalizeFileRef(ref, basePath) + debugLog("current ref is: %s", ref.String()) + debugLog("current ref normalized file: %s", baseRef.String()) + data, _, _, err = r.load(baseRef.GetURL()) + if err != nil { + return err + } + } + + res = data + if ref.String() != "" { + res, _, err = ref.GetPointer().Get(data) + if err != nil { + return err + } + } + return swag.DynamicJSONToStruct(res, target) +} + +func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error) { + debugLog("loading schema from url: %s", refURL) + toFetch := *refURL + toFetch.Fragment = "" + + normalized := normalizeAbsPath(toFetch.String()) + + data, fromCache := r.cache.Get(normalized) + if !fromCache { + b, err := r.loadDoc(normalized) + if err != nil { + return nil, url.URL{}, false, err + } + + if err := json.Unmarshal(b, &data); err != nil { + return nil, url.URL{}, false, err + } + r.cache.Set(normalized, data) + } + + return data, toFetch, fromCache, nil +} + +// isCircular detects cycles in sequences of $ref. +// It relies on a private context (which needs not be locked). +func (r *schemaLoader) isCircular(ref *Ref, basePath string, parentRefs ...string) (foundCycle bool) { + normalizedRef := normalizePaths(ref.String(), basePath) + if _, ok := r.context.circulars[normalizedRef]; ok { + // circular $ref has been already detected in another explored cycle + foundCycle = true + return + } + foundCycle = swag.ContainsStringsCI(parentRefs, normalizedRef) + if foundCycle { + r.context.circulars[normalizedRef] = true + } + return +} + +// Resolve resolves a reference against basePath and stores the result in target +// Resolve is not in charge of following references, it only resolves ref by following its URL +// if the schema that ref is referring to has more refs in it. Resolve doesn't resolve them +// if basePath is an empty string, ref is resolved against the root schema stored in the schemaLoader struct +func (r *schemaLoader) Resolve(ref *Ref, target interface{}, basePath string) error { + return r.resolveRef(ref, target, basePath) +} + +func (r *schemaLoader) deref(input interface{}, parentRefs []string, basePath string) error { + var ref *Ref + switch refable := input.(type) { + case *Schema: + ref = &refable.Ref + case *Parameter: + ref = &refable.Ref + case *Response: + ref = &refable.Ref + case *PathItem: + ref = &refable.Ref + default: + return fmt.Errorf("deref: unsupported type %T", input) + } + + curRef := ref.String() + if curRef != "" { + normalizedRef := normalizeFileRef(ref, basePath) + normalizedBasePath := normalizedRef.RemoteURI() + + if r.isCircular(normalizedRef, basePath, parentRefs...) { + return nil + } + + if err := r.resolveRef(ref, input, basePath); r.shouldStopOnError(err) { + return err + } + + // NOTE(fredbi): removed basePath check => needs more testing + if ref.String() != "" && ref.String() != curRef { + parentRefs = append(parentRefs, normalizedRef.String()) + return r.deref(input, parentRefs, normalizedBasePath) + } + } + + return nil +} + +func (r *schemaLoader) shouldStopOnError(err error) bool { + if err != nil && !r.options.ContinueOnError { + return true + } + + if err != nil { + log.Println(err) + } + + return false +} + +func defaultSchemaLoader( + root interface{}, + expandOptions *ExpandOptions, + cache ResolutionCache, + context *resolverContext) (*schemaLoader, error) { + + if cache == nil { + cache = resCache + } + if expandOptions == nil { + expandOptions = &ExpandOptions{} + } + absBase, _ := absPath(expandOptions.RelativeBase) + if context == nil { + context = newResolverContext(absBase) + } + return &schemaLoader{ + root: root, + options: expandOptions, + cache: cache, + context: context, + loadDoc: func(path string) (json.RawMessage, error) { + debugLog("fetching document at %q", path) + return PathLoader(path) + }, + }, nil +} diff --git a/vendor/github.com/go-openapi/spec/security_scheme.go b/vendor/github.com/go-openapi/spec/security_scheme.go index 9f1b454eac91b..fe353842a6fc4 100644 --- a/vendor/github.com/go-openapi/spec/security_scheme.go +++ b/vendor/github.com/go-openapi/spec/security_scheme.go @@ -136,8 +136,5 @@ func (s *SecurityScheme) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &s.SecuritySchemeProps); err != nil { return err } - if err := json.Unmarshal(data, &s.VendorExtensible); err != nil { - return err - } - return nil + return json.Unmarshal(data, &s.VendorExtensible) } diff --git a/vendor/github.com/go-openapi/spec/swagger.go b/vendor/github.com/go-openapi/spec/swagger.go index 4586a21c86023..44722ffd5adc8 100644 --- a/vendor/github.com/go-openapi/spec/swagger.go +++ b/vendor/github.com/go-openapi/spec/swagger.go @@ -15,6 +15,8 @@ package spec import ( + "bytes" + "encoding/gob" "encoding/json" "fmt" "strconv" @@ -24,7 +26,8 @@ import ( ) // Swagger this is the root document object for the API specification. -// It combines what previously was the Resource Listing and API Declaration (version 1.2 and earlier) together into one document. +// It combines what previously was the Resource Listing and API Declaration (version 1.2 and earlier) +// together into one document. // // For more information: http://goo.gl/8us55a#swagger-object- type Swagger struct { @@ -67,17 +70,52 @@ func (s *Swagger) UnmarshalJSON(data []byte) error { return nil } +// GobEncode provides a safe gob encoder for Swagger, including extensions +func (s Swagger) GobEncode() ([]byte, error) { + var b bytes.Buffer + raw := struct { + Props SwaggerProps + Ext VendorExtensible + }{ + Props: s.SwaggerProps, + Ext: s.VendorExtensible, + } + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Swagger, including extensions +func (s *Swagger) GobDecode(b []byte) error { + var raw struct { + Props SwaggerProps + Ext VendorExtensible + } + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + s.SwaggerProps = raw.Props + s.VendorExtensible = raw.Ext + return nil +} + // SwaggerProps captures the top-level properties of an Api specification +// +// NOTE: validation rules +// - the scheme, when present must be from [http, https, ws, wss] +// - BasePath must start with a leading "/" +// - Paths is required type SwaggerProps struct { ID string `json:"id,omitempty"` Consumes []string `json:"consumes,omitempty"` Produces []string `json:"produces,omitempty"` - Schemes []string `json:"schemes,omitempty"` // the scheme, when present must be from [http, https, ws, wss] + Schemes []string `json:"schemes,omitempty"` Swagger string `json:"swagger,omitempty"` Info *Info `json:"info,omitempty"` Host string `json:"host,omitempty"` - BasePath string `json:"basePath,omitempty"` // must start with a leading "/" - Paths *Paths `json:"paths"` // required + BasePath string `json:"basePath,omitempty"` + Paths *Paths `json:"paths"` Definitions Definitions `json:"definitions,omitempty"` Parameters map[string]Parameter `json:"parameters,omitempty"` Responses map[string]Response `json:"responses,omitempty"` @@ -87,6 +125,98 @@ type SwaggerProps struct { ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` } +type swaggerPropsAlias SwaggerProps + +type gobSwaggerPropsAlias struct { + Security []map[string]struct { + List []string + Pad bool + } + Alias *swaggerPropsAlias + SecurityIsEmpty bool +} + +// GobEncode provides a safe gob encoder for SwaggerProps, including empty security requirements +func (o SwaggerProps) GobEncode() ([]byte, error) { + raw := gobSwaggerPropsAlias{ + Alias: (*swaggerPropsAlias)(&o), + } + + var b bytes.Buffer + if o.Security == nil { + // nil security requirement + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err + } + + if len(o.Security) == 0 { + // empty, but non-nil security requirement + raw.SecurityIsEmpty = true + raw.Alias.Security = nil + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err + } + + raw.Security = make([]map[string]struct { + List []string + Pad bool + }, 0, len(o.Security)) + for _, req := range o.Security { + v := make(map[string]struct { + List []string + Pad bool + }, len(req)) + for k, val := range req { + v[k] = struct { + List []string + Pad bool + }{ + List: val, + } + } + raw.Security = append(raw.Security, v) + } + + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for SwaggerProps, including empty security requirements +func (o *SwaggerProps) GobDecode(b []byte) error { + var raw gobSwaggerPropsAlias + + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + if raw.Alias == nil { + return nil + } + + switch { + case raw.SecurityIsEmpty: + // empty, but non-nil security requirement + raw.Alias.Security = []map[string][]string{} + case len(raw.Alias.Security) == 0: + // nil security requirement + raw.Alias.Security = nil + default: + raw.Alias.Security = make([]map[string][]string, 0, len(raw.Security)) + for _, req := range raw.Security { + v := make(map[string][]string, len(req)) + for k, val := range req { + v[k] = make([]string, 0, len(val.List)) + v[k] = append(v[k], val.List...) + } + raw.Alias.Security = append(raw.Alias.Security, v) + } + } + + *o = *(*SwaggerProps)(raw.Alias) + return nil +} + // Dependencies represent a dependencies property type Dependencies map[string]SchemaOrStringArray @@ -244,9 +374,9 @@ func (s *StringOrArray) UnmarshalJSON(data []byte) error { if single == nil { return nil } - switch single.(type) { + switch v := single.(type) { case string: - *s = StringOrArray([]string{single.(string)}) + *s = StringOrArray([]string{v}) return nil default: return fmt.Errorf("only string or array is allowed, not %T", single) diff --git a/vendor/github.com/go-openapi/spec/tag.go b/vendor/github.com/go-openapi/spec/tag.go index 25256c4beca04..faa3d3de1eb49 100644 --- a/vendor/github.com/go-openapi/spec/tag.go +++ b/vendor/github.com/go-openapi/spec/tag.go @@ -30,10 +30,11 @@ type TagProps struct { // NewTag creates a new tag func NewTag(name, description string, externalDocs *ExternalDocumentation) Tag { - return Tag{TagProps: TagProps{description, name, externalDocs}} + return Tag{TagProps: TagProps{Description: description, Name: name, ExternalDocs: externalDocs}} } -// Tag allows adding meta data to a single tag that is used by the [Operation Object](http://goo.gl/8us55a#operationObject). +// Tag allows adding meta data to a single tag that is used by the +// [Operation Object](http://goo.gl/8us55a#operationObject). // It is not mandatory to have a Tag Object per tag used there. // // For more information: http://goo.gl/8us55a#tagObject diff --git a/vendor/github.com/go-openapi/spec/unused.go b/vendor/github.com/go-openapi/spec/unused.go new file mode 100644 index 0000000000000..aa12b56f6e49d --- /dev/null +++ b/vendor/github.com/go-openapi/spec/unused.go @@ -0,0 +1,174 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +/* + +import ( + "net/url" + "os" + "path" + "path/filepath" + + "github.com/go-openapi/jsonpointer" +) + + // Some currently unused functions and definitions that + // used to be part of the expander. + + // Moved here for the record and possible future reuse + +var ( + idPtr, _ = jsonpointer.New("/id") + refPtr, _ = jsonpointer.New("/$ref") +) + +func idFromNode(node interface{}) (*Ref, error) { + if idValue, _, err := idPtr.Get(node); err == nil { + if refStr, ok := idValue.(string); ok && refStr != "" { + idRef, err := NewRef(refStr) + if err != nil { + return nil, err + } + return &idRef, nil + } + } + return nil, nil +} + +func nextRef(startingNode interface{}, startingRef *Ref, ptr *jsonpointer.Pointer) *Ref { + if startingRef == nil { + return nil + } + + if ptr == nil { + return startingRef + } + + ret := startingRef + var idRef *Ref + node := startingNode + + for _, tok := range ptr.DecodedTokens() { + node, _, _ = jsonpointer.GetForToken(node, tok) + if node == nil { + break + } + + idRef, _ = idFromNode(node) + if idRef != nil { + nw, err := ret.Inherits(*idRef) + if err != nil { + break + } + ret = nw + } + + refRef, _, _ := refPtr.Get(node) + if refRef != nil { + var rf Ref + switch value := refRef.(type) { + case string: + rf, _ = NewRef(value) + } + nw, err := ret.Inherits(rf) + if err != nil { + break + } + nwURL := nw.GetURL() + if nwURL.Scheme == "file" || (nwURL.Scheme == "" && nwURL.Host == "") { + nwpt := filepath.ToSlash(nwURL.Path) + if filepath.IsAbs(nwpt) { + _, err := os.Stat(nwpt) + if err != nil { + nwURL.Path = filepath.Join(".", nwpt) + } + } + } + + ret = nw + } + + } + + return ret +} + +// basePathFromSchemaID returns a new basePath based on an existing basePath and a schema ID +func basePathFromSchemaID(oldBasePath, id string) string { + u, err := url.Parse(oldBasePath) + if err != nil { + panic(err) + } + uid, err := url.Parse(id) + if err != nil { + panic(err) + } + + if path.IsAbs(uid.Path) { + return id + } + u.Path = path.Join(path.Dir(u.Path), uid.Path) + return u.String() +} +*/ + +// type ExtraSchemaProps map[string]interface{} + +// // JSONSchema represents a structure that is a json schema draft 04 +// type JSONSchema struct { +// SchemaProps +// ExtraSchemaProps +// } + +// // MarshalJSON marshal this to JSON +// func (s JSONSchema) MarshalJSON() ([]byte, error) { +// b1, err := json.Marshal(s.SchemaProps) +// if err != nil { +// return nil, err +// } +// b2, err := s.Ref.MarshalJSON() +// if err != nil { +// return nil, err +// } +// b3, err := s.Schema.MarshalJSON() +// if err != nil { +// return nil, err +// } +// b4, err := json.Marshal(s.ExtraSchemaProps) +// if err != nil { +// return nil, err +// } +// return swag.ConcatJSON(b1, b2, b3, b4), nil +// } + +// // UnmarshalJSON marshal this from JSON +// func (s *JSONSchema) UnmarshalJSON(data []byte) error { +// var sch JSONSchema +// if err := json.Unmarshal(data, &sch.SchemaProps); err != nil { +// return err +// } +// if err := json.Unmarshal(data, &sch.Ref); err != nil { +// return err +// } +// if err := json.Unmarshal(data, &sch.Schema); err != nil { +// return err +// } +// if err := json.Unmarshal(data, &sch.ExtraSchemaProps); err != nil { +// return err +// } +// *s = sch +// return nil +// } diff --git a/vendor/github.com/go-openapi/swag/.golangci.yml b/vendor/github.com/go-openapi/swag/.golangci.yml index 6b237e4a795bf..625c3d6affe1c 100644 --- a/vendor/github.com/go-openapi/swag/.golangci.yml +++ b/vendor/github.com/go-openapi/swag/.golangci.yml @@ -18,3 +18,5 @@ linters: disable: - maligned - lll + - gochecknoinits + - gochecknoglobals diff --git a/vendor/github.com/go-openapi/swag/.travis.yml b/vendor/github.com/go-openapi/swag/.travis.yml index bd3a2e527dd0a..aa26d8763aa38 100644 --- a/vendor/github.com/go-openapi/swag/.travis.yml +++ b/vendor/github.com/go-openapi/swag/.travis.yml @@ -1,16 +1,15 @@ after_success: - bash <(curl -s https://codecov.io/bash) go: -- '1.9' -- 1.10.x - 1.11.x +- 1.12.x install: -- go get -u github.com/stretchr/testify -- go get -u github.com/mailru/easyjson -- go get -u gopkg.in/yaml.v2 +- GO111MODULE=off go get -u gotest.tools/gotestsum +env: +- GO111MODULE=on language: go notifications: slack: secure: QUWvCkBBK09GF7YtEvHHVt70JOkdlNBG0nIKu/5qc4/nW5HP8I2w0SEf/XR2je0eED1Qe3L/AfMCWwrEj+IUZc3l4v+ju8X8R3Lomhme0Eb0jd1MTMCuPcBT47YCj0M7RON7vXtbFfm1hFJ/jLe5+9FXz0hpXsR24PJc5ZIi/ogNwkaPqG4BmndzecpSh0vc2FJPZUD9LT0I09REY/vXR0oQAalLkW0asGD5taHZTUZq/kBpsNxaAFrLM23i4mUcf33M5fjLpvx5LRICrX/57XpBrDh2TooBU6Qj3CgoY0uPRYUmSNxbVx1czNzl2JtEpb5yjoxfVPQeg0BvQM00G8LJINISR+ohrjhkZmAqchDupAX+yFrxTtORa78CtnIL6z/aTNlgwwVD8kvL/1pFA/JWYmKDmz93mV/+6wubGzNSQCstzjkFA4/iZEKewKUoRIAi/fxyscP6L/rCpmY/4llZZvrnyTqVbt6URWpopUpH4rwYqreXAtJxJsfBJIeSmUIiDIOMGkCTvyTEW3fWGmGoqWtSHLoaWDyAIGb7azb+KvfpWtEcoPFWfSWU+LGee0A/YsUhBl7ADB9A0CJEuR8q4BPpKpfLwPKSiKSAXL7zDkyjExyhtgqbSl2jS+rKIHOZNL8JkCcTP2MKMVd563C5rC5FMKqu3S9m2b6380E= script: -- go test -v -race -cover -coverprofile=coverage.txt -covermode=atomic ./... +- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/swag/BUILD.bazel b/vendor/github.com/go-openapi/swag/BUILD.bazel index 0705c1fcd9d2a..325460bc78b8f 100644 --- a/vendor/github.com/go-openapi/swag/BUILD.bazel +++ b/vendor/github.com/go-openapi/swag/BUILD.bazel @@ -8,12 +8,14 @@ go_library( "doc.go", "json.go", "loading.go", + "name_lexem.go", "net.go", "path.go", "post_go18.go", "post_go19.go", "pre_go18.go", "pre_go19.go", + "split.go", "util.go", "yaml.go", ], diff --git a/vendor/github.com/go-openapi/swag/README.md b/vendor/github.com/go-openapi/swag/README.md index 459a3e18d7958..eb60ae80abe68 100644 --- a/vendor/github.com/go-openapi/swag/README.md +++ b/vendor/github.com/go-openapi/swag/README.md @@ -19,5 +19,4 @@ You may also use it standalone for your projects. This repo has only few dependencies outside of the standard library: -* JSON utilities depend on github.com/mailru/easyjson * YAML utilities depend on gopkg.in/yaml.v2 diff --git a/vendor/github.com/go-openapi/swag/doc.go b/vendor/github.com/go-openapi/swag/doc.go index e01e1a02347b8..8d2c8c5014e2a 100644 --- a/vendor/github.com/go-openapi/swag/doc.go +++ b/vendor/github.com/go-openapi/swag/doc.go @@ -27,7 +27,6 @@ You may also use it standalone for your projects. This repo has only few dependencies outside of the standard library: - * JSON utilities depend on github.com/mailru/easyjson * YAML utilities depend on gopkg.in/yaml.v2 */ package swag diff --git a/vendor/github.com/go-openapi/swag/go.mod b/vendor/github.com/go-openapi/swag/go.mod index 9eb936a19bb96..15bbb082227e8 100644 --- a/vendor/github.com/go-openapi/swag/go.mod +++ b/vendor/github.com/go-openapi/swag/go.mod @@ -2,8 +2,13 @@ module github.com/go-openapi/swag require ( github.com/davecgh/go-spew v1.1.1 // indirect - github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/stretchr/testify v1.2.2 - gopkg.in/yaml.v2 v2.2.1 + github.com/kr/pretty v0.1.0 // indirect + github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 + github.com/stretchr/testify v1.3.0 + gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect + gopkg.in/yaml.v2 v2.2.2 ) + +replace github.com/golang/lint => golang.org/x/lint v0.0.0-20190409202823-959b441ac422 + +replace sourcegraph.com/sourcegraph/go-diff => github.com/sourcegraph/go-diff v0.5.1 diff --git a/vendor/github.com/go-openapi/swag/go.sum b/vendor/github.com/go-openapi/swag/go.sum index d6e717bd4e185..33469f54acbd5 100644 --- a/vendor/github.com/go-openapi/swag/go.sum +++ b/vendor/github.com/go-openapi/swag/go.sum @@ -1,9 +1,20 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 h1:2gxZ0XQIU/5z3Z3bUBu+FXuk2pFbkN6tcwi/pjyaDic= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 h1:nTT4s92Dgz2HlrB2NaMgvlfqHH39OgMhA7z3PK7PGD4= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/go-openapi/swag/json.go b/vendor/github.com/go-openapi/swag/json.go index 33da5e4e7d6b9..62ab15e540598 100644 --- a/vendor/github.com/go-openapi/swag/json.go +++ b/vendor/github.com/go-openapi/swag/json.go @@ -68,15 +68,16 @@ func WriteJSON(data interface{}) ([]byte, error) { // ReadJSON reads json data, prefers finding an appropriate interface to short-circuit the unmarshaller // so it takes the fastes option available func ReadJSON(data []byte, value interface{}) error { + trimmedData := bytes.Trim(data, "\x00") if d, ok := value.(ejUnmarshaler); ok { - jl := &jlexer.Lexer{Data: data} + jl := &jlexer.Lexer{Data: trimmedData} d.UnmarshalEasyJSON(jl) return jl.Error() } if d, ok := value.(json.Unmarshaler); ok { - return d.UnmarshalJSON(data) + return d.UnmarshalJSON(trimmedData) } - return json.Unmarshal(data, value) + return json.Unmarshal(trimmedData, value) } // DynamicJSONToStruct converts an untyped json structure into a struct diff --git a/vendor/github.com/go-openapi/swag/name_lexem.go b/vendor/github.com/go-openapi/swag/name_lexem.go new file mode 100644 index 0000000000000..aa7f6a9bb8eba --- /dev/null +++ b/vendor/github.com/go-openapi/swag/name_lexem.go @@ -0,0 +1,87 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import "unicode" + +type ( + nameLexem interface { + GetUnsafeGoName() string + GetOriginal() string + IsInitialism() bool + } + + initialismNameLexem struct { + original string + matchedInitialism string + } + + casualNameLexem struct { + original string + } +) + +func newInitialismNameLexem(original, matchedInitialism string) *initialismNameLexem { + return &initialismNameLexem{ + original: original, + matchedInitialism: matchedInitialism, + } +} + +func newCasualNameLexem(original string) *casualNameLexem { + return &casualNameLexem{ + original: original, + } +} + +func (l *initialismNameLexem) GetUnsafeGoName() string { + return l.matchedInitialism +} + +func (l *casualNameLexem) GetUnsafeGoName() string { + var first rune + var rest string + for i, orig := range l.original { + if i == 0 { + first = orig + continue + } + if i > 0 { + rest = l.original[i:] + break + } + } + if len(l.original) > 1 { + return string(unicode.ToUpper(first)) + lower(rest) + } + + return l.original +} + +func (l *initialismNameLexem) GetOriginal() string { + return l.original +} + +func (l *casualNameLexem) GetOriginal() string { + return l.original +} + +func (l *initialismNameLexem) IsInitialism() bool { + return true +} + +func (l *casualNameLexem) IsInitialism() bool { + return false +} diff --git a/vendor/github.com/go-openapi/swag/net.go b/vendor/github.com/go-openapi/swag/net.go index 8323fa37b6649..821235f84d4a9 100644 --- a/vendor/github.com/go-openapi/swag/net.go +++ b/vendor/github.com/go-openapi/swag/net.go @@ -1,3 +1,17 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package swag import ( diff --git a/vendor/github.com/go-openapi/swag/post_go18.go b/vendor/github.com/go-openapi/swag/post_go18.go index ef48086db38dc..c2e686d31337c 100644 --- a/vendor/github.com/go-openapi/swag/post_go18.go +++ b/vendor/github.com/go-openapi/swag/post_go18.go @@ -1,3 +1,17 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // +build go1.8 package swag diff --git a/vendor/github.com/go-openapi/swag/post_go19.go b/vendor/github.com/go-openapi/swag/post_go19.go index 567680c793a75..eb2f2d8bc7845 100644 --- a/vendor/github.com/go-openapi/swag/post_go19.go +++ b/vendor/github.com/go-openapi/swag/post_go19.go @@ -1,3 +1,17 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // +build go1.9 package swag @@ -48,6 +62,6 @@ func (m *indexOfInitialisms) sorted() (result []string) { result = append(result, k) return true }) - sort.Sort(sort.Reverse(byLength(result))) + sort.Sort(sort.Reverse(byInitialism(result))) return } diff --git a/vendor/github.com/go-openapi/swag/pre_go18.go b/vendor/github.com/go-openapi/swag/pre_go18.go index 860bb2bbb1e56..6607f339357bf 100644 --- a/vendor/github.com/go-openapi/swag/pre_go18.go +++ b/vendor/github.com/go-openapi/swag/pre_go18.go @@ -1,3 +1,17 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // +build !go1.8 package swag diff --git a/vendor/github.com/go-openapi/swag/pre_go19.go b/vendor/github.com/go-openapi/swag/pre_go19.go index 72c48ae7520b5..4bae187d1e432 100644 --- a/vendor/github.com/go-openapi/swag/pre_go19.go +++ b/vendor/github.com/go-openapi/swag/pre_go19.go @@ -1,3 +1,17 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // +build !go1.9 package swag @@ -50,6 +64,6 @@ func (m *indexOfInitialisms) sorted() (result []string) { for k := range m.index { result = append(result, k) } - sort.Sort(sort.Reverse(byLength(result))) + sort.Sort(sort.Reverse(byInitialism(result))) return } diff --git a/vendor/github.com/go-openapi/swag/split.go b/vendor/github.com/go-openapi/swag/split.go new file mode 100644 index 0000000000000..a1825fb7dc9ac --- /dev/null +++ b/vendor/github.com/go-openapi/swag/split.go @@ -0,0 +1,262 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import ( + "unicode" +) + +var nameReplaceTable = map[rune]string{ + '@': "At ", + '&': "And ", + '|': "Pipe ", + '$': "Dollar ", + '!': "Bang ", + '-': "", + '_': "", +} + +type ( + splitter struct { + postSplitInitialismCheck bool + initialisms []string + } + + splitterOption func(*splitter) *splitter +) + +// split calls the splitter; splitter provides more control and post options +func split(str string) []string { + lexems := newSplitter().split(str) + result := make([]string, 0, len(lexems)) + + for _, lexem := range lexems { + result = append(result, lexem.GetOriginal()) + } + + return result + +} + +func (s *splitter) split(str string) []nameLexem { + return s.toNameLexems(str) +} + +func newSplitter(options ...splitterOption) *splitter { + splitter := &splitter{ + postSplitInitialismCheck: false, + initialisms: initialisms, + } + + for _, option := range options { + splitter = option(splitter) + } + + return splitter +} + +// withPostSplitInitialismCheck allows to catch initialisms after main split process +func withPostSplitInitialismCheck(s *splitter) *splitter { + s.postSplitInitialismCheck = true + return s +} + +type ( + initialismMatch struct { + start, end int + body []rune + complete bool + } + initialismMatches []*initialismMatch +) + +func (s *splitter) toNameLexems(name string) []nameLexem { + nameRunes := []rune(name) + matches := s.gatherInitialismMatches(nameRunes) + return s.mapMatchesToNameLexems(nameRunes, matches) +} + +func (s *splitter) gatherInitialismMatches(nameRunes []rune) initialismMatches { + matches := make(initialismMatches, 0) + + for currentRunePosition, currentRune := range nameRunes { + newMatches := make(initialismMatches, 0, len(matches)) + + // check current initialism matches + for _, match := range matches { + if keepCompleteMatch := match.complete; keepCompleteMatch { + newMatches = append(newMatches, match) + continue + } + + // drop failed match + currentMatchRune := match.body[currentRunePosition-match.start] + if !s.initialismRuneEqual(currentMatchRune, currentRune) { + continue + } + + // try to complete ongoing match + if currentRunePosition-match.start == len(match.body)-1 { + // we are close; the next step is to check the symbol ahead + // if it is a small letter, then it is not the end of match + // but beginning of the next word + + if currentRunePosition < len(nameRunes)-1 { + nextRune := nameRunes[currentRunePosition+1] + if newWord := unicode.IsLower(nextRune); newWord { + // oh ok, it was the start of a new word + continue + } + } + + match.complete = true + match.end = currentRunePosition + } + + newMatches = append(newMatches, match) + } + + // check for new initialism matches + for _, initialism := range s.initialisms { + initialismRunes := []rune(initialism) + if s.initialismRuneEqual(initialismRunes[0], currentRune) { + newMatches = append(newMatches, &initialismMatch{ + start: currentRunePosition, + body: initialismRunes, + complete: false, + }) + } + } + + matches = newMatches + } + + return matches +} + +func (s *splitter) mapMatchesToNameLexems(nameRunes []rune, matches initialismMatches) []nameLexem { + nameLexems := make([]nameLexem, 0) + + var lastAcceptedMatch *initialismMatch + for _, match := range matches { + if !match.complete { + continue + } + + if firstMatch := lastAcceptedMatch == nil; firstMatch { + nameLexems = append(nameLexems, s.breakCasualString(nameRunes[:match.start])...) + nameLexems = append(nameLexems, s.breakInitialism(string(match.body))) + + lastAcceptedMatch = match + + continue + } + + if overlappedMatch := match.start <= lastAcceptedMatch.end; overlappedMatch { + continue + } + + middle := nameRunes[lastAcceptedMatch.end+1 : match.start] + nameLexems = append(nameLexems, s.breakCasualString(middle)...) + nameLexems = append(nameLexems, s.breakInitialism(string(match.body))) + + lastAcceptedMatch = match + } + + // we have not found any accepted matches + if lastAcceptedMatch == nil { + return s.breakCasualString(nameRunes) + } + + if lastAcceptedMatch.end+1 != len(nameRunes) { + rest := nameRunes[lastAcceptedMatch.end+1:] + nameLexems = append(nameLexems, s.breakCasualString(rest)...) + } + + return nameLexems +} + +func (s *splitter) initialismRuneEqual(a, b rune) bool { + return a == b +} + +func (s *splitter) breakInitialism(original string) nameLexem { + return newInitialismNameLexem(original, original) +} + +func (s *splitter) breakCasualString(str []rune) []nameLexem { + segments := make([]nameLexem, 0) + currentSegment := "" + + addCasualNameLexem := func(original string) { + segments = append(segments, newCasualNameLexem(original)) + } + + addInitialismNameLexem := func(original, match string) { + segments = append(segments, newInitialismNameLexem(original, match)) + } + + addNameLexem := func(original string) { + if s.postSplitInitialismCheck { + for _, initialism := range s.initialisms { + if upper(initialism) == upper(original) { + addInitialismNameLexem(original, initialism) + return + } + } + } + + addCasualNameLexem(original) + } + + for _, rn := range string(str) { + if replace, found := nameReplaceTable[rn]; found { + if currentSegment != "" { + addNameLexem(currentSegment) + currentSegment = "" + } + + if replace != "" { + addNameLexem(replace) + } + + continue + } + + if !unicode.In(rn, unicode.L, unicode.M, unicode.N, unicode.Pc) { + if currentSegment != "" { + addNameLexem(currentSegment) + currentSegment = "" + } + + continue + } + + if unicode.IsUpper(rn) { + if currentSegment != "" { + addNameLexem(currentSegment) + } + currentSegment = "" + } + + currentSegment += string(rn) + } + + if currentSegment != "" { + addNameLexem(currentSegment) + } + + return segments +} diff --git a/vendor/github.com/go-openapi/swag/util.go b/vendor/github.com/go-openapi/swag/util.go index e659968fb10c8..87488273dd61a 100644 --- a/vendor/github.com/go-openapi/swag/util.go +++ b/vendor/github.com/go-openapi/swag/util.go @@ -15,11 +15,8 @@ package swag import ( - "math" "reflect" - "regexp" "strings" - "sync" "unicode" ) @@ -29,8 +26,6 @@ var commonInitialisms *indexOfInitialisms // initialisms is a slice of sorted initialisms var initialisms []string -var once sync.Once - var isInitialism func(string) bool func init() { @@ -49,6 +44,8 @@ func init() { "HTTP": true, "ID": true, "IP": true, + "IPv4": true, + "IPv6": true, "JSON": true, "LHS": true, "OAI": true, @@ -79,15 +76,12 @@ func init() { // a thread-safe index of initialisms commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms) + initialisms = commonInitialisms.sorted() // a test function isInitialism = commonInitialisms.isInitialism } -func ensureSorted() { - initialisms = commonInitialisms.sorted() -} - const ( //collectionFormatComma = "csv" collectionFormatSpace = "ssv" @@ -153,49 +147,20 @@ func SplitByFormat(data, format string) []string { return result } -type byLength []string +type byInitialism []string -func (s byLength) Len() int { +func (s byInitialism) Len() int { return len(s) } -func (s byLength) Swap(i, j int) { +func (s byInitialism) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s byLength) Less(i, j int) bool { - return len(s[i]) < len(s[j]) -} - -// Prepares strings by splitting by caps, spaces, dashes, and underscore -func split(str string) []string { - repl := strings.NewReplacer( - "@", "At ", - "&", "And ", - "|", "Pipe ", - "$", "Dollar ", - "!", "Bang ", - "-", " ", - "_", " ", - ) - - rex1 := regexp.MustCompile(`(\p{Lu})`) - rex2 := regexp.MustCompile(`(\pL|\pM|\pN|\p{Pc})+`) - - str = trim(str) - - // Convert dash and underscore to spaces - str = repl.Replace(str) - - // Split when uppercase is found (needed for Snake) - str = rex1.ReplaceAllString(str, " $1") - - // check if consecutive single char things make up an initialism - once.Do(ensureSorted) - for _, k := range initialisms { - str = strings.Replace(str, rex1.ReplaceAllString(k, " $1"), " "+k, -1) +func (s byInitialism) Less(i, j int) bool { + if len(s[i]) != len(s[j]) { + return len(s[i]) < len(s[j]) } - // Get the final list of words - //words = rex2.FindAllString(str, -1) - return rex2.FindAllString(str, -1) + + return strings.Compare(s[i], s[j]) > 0 } // Removes leading whitespaces @@ -215,7 +180,7 @@ func lower(str string) string { // Camelize an uppercased word func Camelize(word string) (camelized string) { - for pos, ru := range word { + for pos, ru := range []rune(word) { if pos > 0 { camelized += string(unicode.ToLower(ru)) } else { @@ -250,30 +215,31 @@ func ToCommandName(name string) string { // ToHumanNameLower represents a code name as a human series of words func ToHumanNameLower(name string) string { - in := split(name) + in := newSplitter(withPostSplitInitialismCheck).split(name) out := make([]string, 0, len(in)) for _, w := range in { - if !isInitialism(upper(w)) { - out = append(out, lower(w)) + if !w.IsInitialism() { + out = append(out, lower(w.GetOriginal())) } else { - out = append(out, w) + out = append(out, w.GetOriginal()) } } + return strings.Join(out, " ") } // ToHumanNameTitle represents a code name as a human series of words with the first letters titleized func ToHumanNameTitle(name string) string { - in := split(name) - out := make([]string, 0, len(in)) + in := newSplitter(withPostSplitInitialismCheck).split(name) + out := make([]string, 0, len(in)) for _, w := range in { - uw := upper(w) - if !isInitialism(uw) { - out = append(out, upper(w[:1])+lower(w[1:])) + original := w.GetOriginal() + if !w.IsInitialism() { + out = append(out, Camelize(original)) } else { - out = append(out, w) + out = append(out, original) } } return strings.Join(out, " ") @@ -289,7 +255,7 @@ func ToJSONName(name string) string { out = append(out, lower(w)) continue } - out = append(out, upper(w[:1])+lower(w[1:])) + out = append(out, Camelize(w)) } return strings.Join(out, "") } @@ -308,28 +274,25 @@ func ToVarName(name string) string { // ToGoName translates a swagger name which can be underscored or camel cased to a name that golint likes func ToGoName(name string) string { - in := split(name) - out := make([]string, 0, len(in)) + lexems := newSplitter(withPostSplitInitialismCheck).split(name) - for _, w := range in { - uw := upper(w) - mod := int(math.Min(float64(len(uw)), 2)) - if !isInitialism(uw) && !isInitialism(uw[:len(uw)-mod]) { - uw = upper(w[:1]) + lower(w[1:]) + result := "" + for _, lexem := range lexems { + goName := lexem.GetUnsafeGoName() + + // to support old behavior + if lexem.IsInitialism() { + goName = upper(goName) } - out = append(out, uw) + result += goName } - result := strings.Join(out, "") if len(result) > 0 { - ud := upper(result[:1]) - ru := []rune(ud) - if unicode.IsUpper(ru[0]) { - result = ud + result[1:] - } else { - result = "X" + ud + result[1:] + if !unicode.IsUpper([]rune(result)[0]) { + result = "X" + result } } + return result } diff --git a/vendor/github.com/go-openapi/swag/yaml.go b/vendor/github.com/go-openapi/swag/yaml.go index f458c81a84a86..435e2948eb631 100644 --- a/vendor/github.com/go-openapi/swag/yaml.go +++ b/vendor/github.com/go-openapi/swag/yaml.go @@ -22,7 +22,6 @@ import ( "github.com/mailru/easyjson/jlexer" "github.com/mailru/easyjson/jwriter" - yaml "gopkg.in/yaml.v2" ) diff --git a/vendor/github.com/gogo/protobuf/LICENSE b/vendor/github.com/gogo/protobuf/LICENSE index 7be0cc7b62cf0..f57de90da8ac3 100644 --- a/vendor/github.com/gogo/protobuf/LICENSE +++ b/vendor/github.com/gogo/protobuf/LICENSE @@ -1,7 +1,6 @@ -Protocol Buffers for Go with Gadgets - Copyright (c) 2013, The GoGo Authors. All rights reserved. -http://github.com/gogo/protobuf + +Protocol Buffers for Go with Gadgets Go support for Protocol Buffers - Google's data interchange format diff --git a/vendor/github.com/gogo/protobuf/gogoproto/doc.go b/vendor/github.com/gogo/protobuf/gogoproto/doc.go index 147b5ecc62fda..081c86fa8ecba 100644 --- a/vendor/github.com/gogo/protobuf/gogoproto/doc.go +++ b/vendor/github.com/gogo/protobuf/gogoproto/doc.go @@ -162,7 +162,7 @@ The most complete way to see examples is to look at github.com/gogo/protobuf/test/thetest.proto Gogoprototest is a seperate project, -because we want to keep gogoprotobuf independant of goprotobuf, +because we want to keep gogoprotobuf independent of goprotobuf, but we still want to test it thoroughly. */ diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go index 97843b2448c75..e352808b90373 100644 --- a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go +++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go @@ -1,12 +1,14 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: gogo.proto -package gogoproto // import "github.com/gogo/protobuf/gogoproto" +package gogoproto -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" -import descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + math "math" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -24,7 +26,7 @@ var E_GoprotoEnumPrefix = &proto.ExtensionDesc{ ExtensionType: (*bool)(nil), Field: 62001, Name: "gogoproto.goproto_enum_prefix", - Tag: "varint,62001,opt,name=goproto_enum_prefix,json=goprotoEnumPrefix", + Tag: "varint,62001,opt,name=goproto_enum_prefix", Filename: "gogo.proto", } @@ -33,7 +35,7 @@ var E_GoprotoEnumStringer = &proto.ExtensionDesc{ ExtensionType: (*bool)(nil), Field: 62021, Name: "gogoproto.goproto_enum_stringer", - Tag: "varint,62021,opt,name=goproto_enum_stringer,json=goprotoEnumStringer", + Tag: "varint,62021,opt,name=goproto_enum_stringer", Filename: "gogo.proto", } @@ -42,7 +44,7 @@ var E_EnumStringer = &proto.ExtensionDesc{ ExtensionType: (*bool)(nil), Field: 62022, Name: "gogoproto.enum_stringer", - Tag: "varint,62022,opt,name=enum_stringer,json=enumStringer", + Tag: "varint,62022,opt,name=enum_stringer", Filename: "gogo.proto", } @@ -51,7 +53,7 @@ var E_EnumCustomname = &proto.ExtensionDesc{ ExtensionType: (*string)(nil), Field: 62023, Name: "gogoproto.enum_customname", - Tag: "bytes,62023,opt,name=enum_customname,json=enumCustomname", + Tag: "bytes,62023,opt,name=enum_customname", Filename: "gogo.proto", } @@ -69,7 +71,7 @@ var E_EnumvalueCustomname = &proto.ExtensionDesc{ ExtensionType: (*string)(nil), Field: 66001, Name: "gogoproto.enumvalue_customname", - Tag: "bytes,66001,opt,name=enumvalue_customname,json=enumvalueCustomname", + Tag: "bytes,66001,opt,name=enumvalue_customname", Filename: "gogo.proto", } @@ -78,7 +80,7 @@ var E_GoprotoGettersAll = &proto.ExtensionDesc{ ExtensionType: (*bool)(nil), Field: 63001, Name: "gogoproto.goproto_getters_all", - Tag: "varint,63001,opt,name=goproto_getters_all,json=goprotoGettersAll", + Tag: "varint,63001,opt,name=goproto_getters_all", Filename: "gogo.proto", } @@ -87,7 +89,7 @@ var E_GoprotoEnumPrefixAll = &proto.ExtensionDesc{ ExtensionType: (*bool)(nil), Field: 63002, Name: "gogoproto.goproto_enum_prefix_all", - Tag: "varint,63002,opt,name=goproto_enum_prefix_all,json=goprotoEnumPrefixAll", + Tag: "varint,63002,opt,name=goproto_enum_prefix_all", Filename: "gogo.proto", } @@ -96,7 +98,7 @@ var E_GoprotoStringerAll = &proto.ExtensionDesc{ ExtensionType: (*bool)(nil), Field: 63003, Name: "gogoproto.goproto_stringer_all", - Tag: "varint,63003,opt,name=goproto_stringer_all,json=goprotoStringerAll", + Tag: "varint,63003,opt,name=goproto_stringer_all", Filename: "gogo.proto", } @@ -105,7 +107,7 @@ var E_VerboseEqualAll = &proto.ExtensionDesc{ ExtensionType: (*bool)(nil), Field: 63004, Name: "gogoproto.verbose_equal_all", - Tag: "varint,63004,opt,name=verbose_equal_all,json=verboseEqualAll", + Tag: "varint,63004,opt,name=verbose_equal_all", Filename: "gogo.proto", } @@ -114,7 +116,7 @@ var E_FaceAll = &proto.ExtensionDesc{ ExtensionType: (*bool)(nil), Field: 63005, Name: "gogoproto.face_all", - Tag: "varint,63005,opt,name=face_all,json=faceAll", + Tag: "varint,63005,opt,name=face_all", Filename: "gogo.proto", } @@ -123,7 +125,7 @@ var E_GostringAll = &proto.ExtensionDesc{ ExtensionType: (*bool)(nil), Field: 63006, Name: "gogoproto.gostring_all", - Tag: "varint,63006,opt,name=gostring_all,json=gostringAll", + Tag: "varint,63006,opt,name=gostring_all", Filename: "gogo.proto", } @@ -132,7 +134,7 @@ var E_PopulateAll = &proto.ExtensionDesc{ ExtensionType: (*bool)(nil), Field: 63007, Name: "gogoproto.populate_all", - Tag: "varint,63007,opt,name=populate_all,json=populateAll", + Tag: "varint,63007,opt,name=populate_all", Filename: "gogo.proto", } @@ -141,7 +143,7 @@ var E_StringerAll = &proto.ExtensionDesc{ ExtensionType: (*bool)(nil), Field: 63008, Name: "gogoproto.stringer_all", - Tag: "varint,63008,opt,name=stringer_all,json=stringerAll", + Tag: "varint,63008,opt,name=stringer_all", Filename: "gogo.proto", } @@ -150,7 +152,7 @@ var E_OnlyoneAll = &proto.ExtensionDesc{ ExtensionType: (*bool)(nil), Field: 63009, Name: "gogoproto.onlyone_all", - Tag: "varint,63009,opt,name=onlyone_all,json=onlyoneAll", + Tag: "varint,63009,opt,name=onlyone_all", Filename: "gogo.proto", } @@ -159,7 +161,7 @@ var E_EqualAll = &proto.ExtensionDesc{ ExtensionType: (*bool)(nil), Field: 63013, Name: "gogoproto.equal_all", - Tag: "varint,63013,opt,name=equal_all,json=equalAll", + Tag: "varint,63013,opt,name=equal_all", Filename: "gogo.proto", } @@ -168,7 +170,7 @@ var E_DescriptionAll = &proto.ExtensionDesc{ ExtensionType: (*bool)(nil), Field: 63014, Name: "gogoproto.description_all", - Tag: "varint,63014,opt,name=description_all,json=descriptionAll", + Tag: "varint,63014,opt,name=description_all", Filename: "gogo.proto", } @@ -177,7 +179,7 @@ var E_TestgenAll = &proto.ExtensionDesc{ ExtensionType: (*bool)(nil), Field: 63015, Name: "gogoproto.testgen_all", - Tag: "varint,63015,opt,name=testgen_all,json=testgenAll", + Tag: "varint,63015,opt,name=testgen_all", Filename: "gogo.proto", } @@ -186,7 +188,7 @@ var E_BenchgenAll = &proto.ExtensionDesc{ ExtensionType: (*bool)(nil), Field: 63016, Name: "gogoproto.benchgen_all", - Tag: "varint,63016,opt,name=benchgen_all,json=benchgenAll", + Tag: "varint,63016,opt,name=benchgen_all", Filename: "gogo.proto", } @@ -195,7 +197,7 @@ var E_MarshalerAll = &proto.ExtensionDesc{ ExtensionType: (*bool)(nil), Field: 63017, Name: "gogoproto.marshaler_all", - Tag: "varint,63017,opt,name=marshaler_all,json=marshalerAll", + Tag: "varint,63017,opt,name=marshaler_all", Filename: "gogo.proto", } @@ -204,7 +206,7 @@ var E_UnmarshalerAll = &proto.ExtensionDesc{ ExtensionType: (*bool)(nil), Field: 63018, Name: "gogoproto.unmarshaler_all", - Tag: "varint,63018,opt,name=unmarshaler_all,json=unmarshalerAll", + Tag: "varint,63018,opt,name=unmarshaler_all", Filename: "gogo.proto", } @@ -213,7 +215,7 @@ var E_StableMarshalerAll = &proto.ExtensionDesc{ ExtensionType: (*bool)(nil), Field: 63019, Name: "gogoproto.stable_marshaler_all", - Tag: "varint,63019,opt,name=stable_marshaler_all,json=stableMarshalerAll", + Tag: "varint,63019,opt,name=stable_marshaler_all", Filename: "gogo.proto", } @@ -222,7 +224,7 @@ var E_SizerAll = &proto.ExtensionDesc{ ExtensionType: (*bool)(nil), Field: 63020, Name: "gogoproto.sizer_all", - Tag: "varint,63020,opt,name=sizer_all,json=sizerAll", + Tag: "varint,63020,opt,name=sizer_all", Filename: "gogo.proto", } @@ -231,7 +233,7 @@ var E_GoprotoEnumStringerAll = &proto.ExtensionDesc{ ExtensionType: (*bool)(nil), Field: 63021, Name: "gogoproto.goproto_enum_stringer_all", - Tag: "varint,63021,opt,name=goproto_enum_stringer_all,json=goprotoEnumStringerAll", + Tag: "varint,63021,opt,name=goproto_enum_stringer_all", Filename: "gogo.proto", } @@ -240,7 +242,7 @@ var E_EnumStringerAll = &proto.ExtensionDesc{ ExtensionType: (*bool)(nil), Field: 63022, Name: "gogoproto.enum_stringer_all", - Tag: "varint,63022,opt,name=enum_stringer_all,json=enumStringerAll", + Tag: "varint,63022,opt,name=enum_stringer_all", Filename: "gogo.proto", } @@ -249,7 +251,7 @@ var E_UnsafeMarshalerAll = &proto.ExtensionDesc{ ExtensionType: (*bool)(nil), Field: 63023, Name: "gogoproto.unsafe_marshaler_all", - Tag: "varint,63023,opt,name=unsafe_marshaler_all,json=unsafeMarshalerAll", + Tag: "varint,63023,opt,name=unsafe_marshaler_all", Filename: "gogo.proto", } @@ -258,7 +260,7 @@ var E_UnsafeUnmarshalerAll = &proto.ExtensionDesc{ ExtensionType: (*bool)(nil), Field: 63024, Name: "gogoproto.unsafe_unmarshaler_all", - Tag: "varint,63024,opt,name=unsafe_unmarshaler_all,json=unsafeUnmarshalerAll", + Tag: "varint,63024,opt,name=unsafe_unmarshaler_all", Filename: "gogo.proto", } @@ -267,7 +269,7 @@ var E_GoprotoExtensionsMapAll = &proto.ExtensionDesc{ ExtensionType: (*bool)(nil), Field: 63025, Name: "gogoproto.goproto_extensions_map_all", - Tag: "varint,63025,opt,name=goproto_extensions_map_all,json=goprotoExtensionsMapAll", + Tag: "varint,63025,opt,name=goproto_extensions_map_all", Filename: "gogo.proto", } @@ -276,7 +278,7 @@ var E_GoprotoUnrecognizedAll = &proto.ExtensionDesc{ ExtensionType: (*bool)(nil), Field: 63026, Name: "gogoproto.goproto_unrecognized_all", - Tag: "varint,63026,opt,name=goproto_unrecognized_all,json=goprotoUnrecognizedAll", + Tag: "varint,63026,opt,name=goproto_unrecognized_all", Filename: "gogo.proto", } @@ -285,7 +287,7 @@ var E_GogoprotoImport = &proto.ExtensionDesc{ ExtensionType: (*bool)(nil), Field: 63027, Name: "gogoproto.gogoproto_import", - Tag: "varint,63027,opt,name=gogoproto_import,json=gogoprotoImport", + Tag: "varint,63027,opt,name=gogoproto_import", Filename: "gogo.proto", } @@ -294,7 +296,7 @@ var E_ProtosizerAll = &proto.ExtensionDesc{ ExtensionType: (*bool)(nil), Field: 63028, Name: "gogoproto.protosizer_all", - Tag: "varint,63028,opt,name=protosizer_all,json=protosizerAll", + Tag: "varint,63028,opt,name=protosizer_all", Filename: "gogo.proto", } @@ -303,7 +305,7 @@ var E_CompareAll = &proto.ExtensionDesc{ ExtensionType: (*bool)(nil), Field: 63029, Name: "gogoproto.compare_all", - Tag: "varint,63029,opt,name=compare_all,json=compareAll", + Tag: "varint,63029,opt,name=compare_all", Filename: "gogo.proto", } @@ -312,7 +314,7 @@ var E_TypedeclAll = &proto.ExtensionDesc{ ExtensionType: (*bool)(nil), Field: 63030, Name: "gogoproto.typedecl_all", - Tag: "varint,63030,opt,name=typedecl_all,json=typedeclAll", + Tag: "varint,63030,opt,name=typedecl_all", Filename: "gogo.proto", } @@ -321,7 +323,7 @@ var E_EnumdeclAll = &proto.ExtensionDesc{ ExtensionType: (*bool)(nil), Field: 63031, Name: "gogoproto.enumdecl_all", - Tag: "varint,63031,opt,name=enumdecl_all,json=enumdeclAll", + Tag: "varint,63031,opt,name=enumdecl_all", Filename: "gogo.proto", } @@ -330,7 +332,7 @@ var E_GoprotoRegistration = &proto.ExtensionDesc{ ExtensionType: (*bool)(nil), Field: 63032, Name: "gogoproto.goproto_registration", - Tag: "varint,63032,opt,name=goproto_registration,json=goprotoRegistration", + Tag: "varint,63032,opt,name=goproto_registration", Filename: "gogo.proto", } @@ -339,7 +341,25 @@ var E_MessagenameAll = &proto.ExtensionDesc{ ExtensionType: (*bool)(nil), Field: 63033, Name: "gogoproto.messagename_all", - Tag: "varint,63033,opt,name=messagename_all,json=messagenameAll", + Tag: "varint,63033,opt,name=messagename_all", + Filename: "gogo.proto", +} + +var E_GoprotoSizecacheAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63034, + Name: "gogoproto.goproto_sizecache_all", + Tag: "varint,63034,opt,name=goproto_sizecache_all", + Filename: "gogo.proto", +} + +var E_GoprotoUnkeyedAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63035, + Name: "gogoproto.goproto_unkeyed_all", + Tag: "varint,63035,opt,name=goproto_unkeyed_all", Filename: "gogo.proto", } @@ -348,7 +368,7 @@ var E_GoprotoGetters = &proto.ExtensionDesc{ ExtensionType: (*bool)(nil), Field: 64001, Name: "gogoproto.goproto_getters", - Tag: "varint,64001,opt,name=goproto_getters,json=goprotoGetters", + Tag: "varint,64001,opt,name=goproto_getters", Filename: "gogo.proto", } @@ -357,7 +377,7 @@ var E_GoprotoStringer = &proto.ExtensionDesc{ ExtensionType: (*bool)(nil), Field: 64003, Name: "gogoproto.goproto_stringer", - Tag: "varint,64003,opt,name=goproto_stringer,json=goprotoStringer", + Tag: "varint,64003,opt,name=goproto_stringer", Filename: "gogo.proto", } @@ -366,7 +386,7 @@ var E_VerboseEqual = &proto.ExtensionDesc{ ExtensionType: (*bool)(nil), Field: 64004, Name: "gogoproto.verbose_equal", - Tag: "varint,64004,opt,name=verbose_equal,json=verboseEqual", + Tag: "varint,64004,opt,name=verbose_equal", Filename: "gogo.proto", } @@ -474,7 +494,7 @@ var E_StableMarshaler = &proto.ExtensionDesc{ ExtensionType: (*bool)(nil), Field: 64019, Name: "gogoproto.stable_marshaler", - Tag: "varint,64019,opt,name=stable_marshaler,json=stableMarshaler", + Tag: "varint,64019,opt,name=stable_marshaler", Filename: "gogo.proto", } @@ -492,7 +512,7 @@ var E_UnsafeMarshaler = &proto.ExtensionDesc{ ExtensionType: (*bool)(nil), Field: 64023, Name: "gogoproto.unsafe_marshaler", - Tag: "varint,64023,opt,name=unsafe_marshaler,json=unsafeMarshaler", + Tag: "varint,64023,opt,name=unsafe_marshaler", Filename: "gogo.proto", } @@ -501,7 +521,7 @@ var E_UnsafeUnmarshaler = &proto.ExtensionDesc{ ExtensionType: (*bool)(nil), Field: 64024, Name: "gogoproto.unsafe_unmarshaler", - Tag: "varint,64024,opt,name=unsafe_unmarshaler,json=unsafeUnmarshaler", + Tag: "varint,64024,opt,name=unsafe_unmarshaler", Filename: "gogo.proto", } @@ -510,7 +530,7 @@ var E_GoprotoExtensionsMap = &proto.ExtensionDesc{ ExtensionType: (*bool)(nil), Field: 64025, Name: "gogoproto.goproto_extensions_map", - Tag: "varint,64025,opt,name=goproto_extensions_map,json=goprotoExtensionsMap", + Tag: "varint,64025,opt,name=goproto_extensions_map", Filename: "gogo.proto", } @@ -519,7 +539,7 @@ var E_GoprotoUnrecognized = &proto.ExtensionDesc{ ExtensionType: (*bool)(nil), Field: 64026, Name: "gogoproto.goproto_unrecognized", - Tag: "varint,64026,opt,name=goproto_unrecognized,json=goprotoUnrecognized", + Tag: "varint,64026,opt,name=goproto_unrecognized", Filename: "gogo.proto", } @@ -559,6 +579,24 @@ var E_Messagename = &proto.ExtensionDesc{ Filename: "gogo.proto", } +var E_GoprotoSizecache = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64034, + Name: "gogoproto.goproto_sizecache", + Tag: "varint,64034,opt,name=goproto_sizecache", + Filename: "gogo.proto", +} + +var E_GoprotoUnkeyed = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64035, + Name: "gogoproto.goproto_unkeyed", + Tag: "varint,64035,opt,name=goproto_unkeyed", + Filename: "gogo.proto", +} + var E_Nullable = &proto.ExtensionDesc{ ExtendedType: (*descriptor.FieldOptions)(nil), ExtensionType: (*bool)(nil), @@ -658,6 +696,15 @@ var E_Stdduration = &proto.ExtensionDesc{ Filename: "gogo.proto", } +var E_Wktpointer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65012, + Name: "gogoproto.wktpointer", + Tag: "varint,65012,opt,name=wktpointer", + Filename: "gogo.proto", +} + func init() { proto.RegisterExtension(E_GoprotoEnumPrefix) proto.RegisterExtension(E_GoprotoEnumStringer) @@ -695,6 +742,8 @@ func init() { proto.RegisterExtension(E_EnumdeclAll) proto.RegisterExtension(E_GoprotoRegistration) proto.RegisterExtension(E_MessagenameAll) + proto.RegisterExtension(E_GoprotoSizecacheAll) + proto.RegisterExtension(E_GoprotoUnkeyedAll) proto.RegisterExtension(E_GoprotoGetters) proto.RegisterExtension(E_GoprotoStringer) proto.RegisterExtension(E_VerboseEqual) @@ -719,6 +768,8 @@ func init() { proto.RegisterExtension(E_Compare) proto.RegisterExtension(E_Typedecl) proto.RegisterExtension(E_Messagename) + proto.RegisterExtension(E_GoprotoSizecache) + proto.RegisterExtension(E_GoprotoUnkeyed) proto.RegisterExtension(E_Nullable) proto.RegisterExtension(E_Embed) proto.RegisterExtension(E_Customtype) @@ -730,88 +781,94 @@ func init() { proto.RegisterExtension(E_Castvalue) proto.RegisterExtension(E_Stdtime) proto.RegisterExtension(E_Stdduration) + proto.RegisterExtension(E_Wktpointer) } -func init() { proto.RegisterFile("gogo.proto", fileDescriptor_gogo_68790841c0f79064) } +func init() { proto.RegisterFile("gogo.proto", fileDescriptor_592445b5231bc2b9) } -var fileDescriptor_gogo_68790841c0f79064 = []byte{ - // 1246 bytes of a gzipped FileDescriptorProto +var fileDescriptor_592445b5231bc2b9 = []byte{ + // 1328 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x98, 0x49, 0x6f, 0x1c, 0x45, - 0x14, 0x80, 0x85, 0x70, 0x64, 0xcf, 0xf3, 0x86, 0xc7, 0xc6, 0x84, 0x08, 0x44, 0xe0, 0xc4, 0xc9, - 0x3e, 0x45, 0x28, 0x65, 0x45, 0x96, 0x63, 0x39, 0x56, 0x10, 0x06, 0x63, 0xe2, 0xb0, 0x1d, 0x46, - 0x3d, 0x33, 0xe5, 0x76, 0x43, 0x77, 0xd7, 0xd0, 0x5d, 0x1d, 0xc5, 0xb9, 0xa1, 0xb0, 0x08, 0x21, - 0x76, 0x24, 0x48, 0x48, 0x02, 0x39, 0xb0, 0xaf, 0x61, 0xe7, 0xc6, 0x85, 0xe5, 0xca, 0x7f, 0xe0, - 0x02, 0x98, 0xdd, 0x37, 0x5f, 0xa2, 0xd7, 0xfd, 0x5e, 0x4f, 0xcd, 0x78, 0xa4, 0xaa, 0xb9, 0xb5, - 0xed, 0xfa, 0x3e, 0x57, 0xbf, 0x57, 0xf5, 0xde, 0x9b, 0x01, 0xf0, 0x95, 0xaf, 0x66, 0x5a, 0x89, - 0xd2, 0xaa, 0x5a, 0xc1, 0xe7, 0xfc, 0xf1, 0xc0, 0x41, 0x5f, 0x29, 0x3f, 0x94, 0xb3, 0xf9, 0x4f, - 0xf5, 0x6c, 0x63, 0xb6, 0x29, 0xd3, 0x46, 0x12, 0xb4, 0xb4, 0x4a, 0x8a, 0xc5, 0xe2, 0x6e, 0x98, - 0xa4, 0xc5, 0x35, 0x19, 0x67, 0x51, 0xad, 0x95, 0xc8, 0x8d, 0xe0, 0x74, 0xf5, 0xa6, 0x99, 0x82, - 0x9c, 0x61, 0x72, 0x66, 0x29, 0xce, 0xa2, 0x7b, 0x5a, 0x3a, 0x50, 0x71, 0xba, 0xff, 0xca, 0xaf, - 0xd7, 0x1e, 0xbc, 0xe6, 0xf6, 0xa1, 0xb5, 0x09, 0x42, 0xf1, 0x6f, 0xab, 0x39, 0x28, 0xd6, 0xe0, - 0xfa, 0x0e, 0x5f, 0xaa, 0x93, 0x20, 0xf6, 0x65, 0x62, 0x31, 0xfe, 0x40, 0xc6, 0x49, 0xc3, 0x78, - 0x1f, 0xa1, 0x62, 0x11, 0x46, 0xfb, 0x71, 0xfd, 0x48, 0xae, 0x11, 0x69, 0x4a, 0x96, 0x61, 0x3c, - 0x97, 0x34, 0xb2, 0x54, 0xab, 0x28, 0xf6, 0x22, 0x69, 0xd1, 0xfc, 0x94, 0x6b, 0x2a, 0x6b, 0x63, - 0x88, 0x2d, 0x96, 0x94, 0x10, 0x30, 0x84, 0xbf, 0x69, 0xca, 0x46, 0x68, 0x31, 0xfc, 0x4c, 0x1b, - 0x29, 0xd7, 0x8b, 0x93, 0x30, 0x85, 0xcf, 0xa7, 0xbc, 0x30, 0x93, 0xe6, 0x4e, 0x6e, 0xed, 0xe9, - 0x39, 0x89, 0xcb, 0x58, 0xf6, 0xcb, 0xd9, 0x81, 0x7c, 0x3b, 0x93, 0xa5, 0xc0, 0xd8, 0x93, 0x91, - 0x45, 0x5f, 0x6a, 0x2d, 0x93, 0xb4, 0xe6, 0x85, 0xbd, 0xb6, 0x77, 0x2c, 0x08, 0x4b, 0xe3, 0xb9, - 0xed, 0xce, 0x2c, 0x2e, 0x17, 0xe4, 0x42, 0x18, 0x8a, 0x75, 0xb8, 0xa1, 0xc7, 0xa9, 0x70, 0x70, - 0x9e, 0x27, 0xe7, 0xd4, 0x9e, 0x93, 0x81, 0xda, 0x55, 0xe0, 0xdf, 0x97, 0xb9, 0x74, 0x70, 0xbe, - 0x41, 0xce, 0x2a, 0xb1, 0x9c, 0x52, 0x34, 0xde, 0x09, 0x13, 0xa7, 0x64, 0x52, 0x57, 0xa9, 0xac, - 0xc9, 0xc7, 0x32, 0x2f, 0x74, 0xd0, 0x5d, 0x20, 0xdd, 0x38, 0x81, 0x4b, 0xc8, 0xa1, 0xeb, 0x30, - 0x0c, 0x6d, 0x78, 0x0d, 0xe9, 0xa0, 0xb8, 0x48, 0x8a, 0x41, 0x5c, 0x8f, 0xe8, 0x02, 0x8c, 0xf8, - 0xaa, 0x78, 0x25, 0x07, 0xfc, 0x12, 0xe1, 0xc3, 0xcc, 0x90, 0xa2, 0xa5, 0x5a, 0x59, 0xe8, 0x69, - 0x97, 0x1d, 0xbc, 0xc9, 0x0a, 0x66, 0x48, 0xd1, 0x47, 0x58, 0xdf, 0x62, 0x45, 0x6a, 0xc4, 0x73, - 0x1e, 0x86, 0x55, 0x1c, 0x6e, 0xa9, 0xd8, 0x65, 0x13, 0x97, 0xc9, 0x00, 0x84, 0xa0, 0x60, 0x0e, - 0x2a, 0xae, 0x89, 0x78, 0x7b, 0x9b, 0xaf, 0x07, 0x67, 0x60, 0x19, 0xc6, 0xb9, 0x40, 0x05, 0x2a, - 0x76, 0x50, 0xbc, 0x43, 0x8a, 0x31, 0x03, 0xa3, 0xd7, 0xd0, 0x32, 0xd5, 0xbe, 0x74, 0x91, 0xbc, - 0xcb, 0xaf, 0x41, 0x08, 0x85, 0xb2, 0x2e, 0xe3, 0xc6, 0xa6, 0x9b, 0xe1, 0x3d, 0x0e, 0x25, 0x33, - 0xa8, 0x58, 0x84, 0xd1, 0xc8, 0x4b, 0xd2, 0x4d, 0x2f, 0x74, 0x4a, 0xc7, 0xfb, 0xe4, 0x18, 0x29, - 0x21, 0x8a, 0x48, 0x16, 0xf7, 0xa3, 0xf9, 0x80, 0x23, 0x62, 0x60, 0x74, 0xf5, 0x52, 0xed, 0xd5, - 0x43, 0x59, 0xeb, 0xc7, 0xf6, 0x21, 0x5f, 0xbd, 0x82, 0x5d, 0x31, 0x8d, 0x73, 0x50, 0x49, 0x83, - 0x33, 0x4e, 0x9a, 0x8f, 0x38, 0xd3, 0x39, 0x80, 0xf0, 0x83, 0x70, 0x63, 0xcf, 0x36, 0xe1, 0x20, - 0xfb, 0x98, 0x64, 0xd3, 0x3d, 0x5a, 0x05, 0x95, 0x84, 0x7e, 0x95, 0x9f, 0x70, 0x49, 0x90, 0x5d, - 0xae, 0x55, 0x98, 0xca, 0xe2, 0xd4, 0xdb, 0xe8, 0x2f, 0x6a, 0x9f, 0x72, 0xd4, 0x0a, 0xb6, 0x23, - 0x6a, 0x27, 0x60, 0x9a, 0x8c, 0xfd, 0xe5, 0xf5, 0x33, 0x2e, 0xac, 0x05, 0xbd, 0xde, 0x99, 0xdd, - 0x87, 0xe1, 0x40, 0x19, 0xce, 0xd3, 0x5a, 0xc6, 0x29, 0x32, 0xb5, 0xc8, 0x6b, 0x39, 0x98, 0xaf, - 0x90, 0x99, 0x2b, 0xfe, 0x52, 0x29, 0x58, 0xf1, 0x5a, 0x28, 0x7f, 0x00, 0xf6, 0xb3, 0x3c, 0x8b, - 0x13, 0xd9, 0x50, 0x7e, 0x1c, 0x9c, 0x91, 0x4d, 0x07, 0xf5, 0xe7, 0x5d, 0xa9, 0x5a, 0x37, 0x70, - 0x34, 0x1f, 0x87, 0xeb, 0xca, 0x59, 0xa5, 0x16, 0x44, 0x2d, 0x95, 0x68, 0x8b, 0xf1, 0x0b, 0xce, - 0x54, 0xc9, 0x1d, 0xcf, 0x31, 0xb1, 0x04, 0x63, 0xf9, 0x8f, 0xae, 0x47, 0xf2, 0x4b, 0x12, 0x8d, - 0xb6, 0x29, 0x2a, 0x1c, 0x0d, 0x15, 0xb5, 0xbc, 0xc4, 0xa5, 0xfe, 0x7d, 0xc5, 0x85, 0x83, 0x10, - 0x2a, 0x1c, 0x7a, 0xab, 0x25, 0xb1, 0xdb, 0x3b, 0x18, 0xbe, 0xe6, 0xc2, 0xc1, 0x0c, 0x29, 0x78, - 0x60, 0x70, 0x50, 0x7c, 0xc3, 0x0a, 0x66, 0x50, 0x71, 0x6f, 0xbb, 0xd1, 0x26, 0xd2, 0x0f, 0x52, - 0x9d, 0x78, 0xb8, 0xda, 0xa2, 0xfa, 0x76, 0xbb, 0x73, 0x08, 0x5b, 0x33, 0x50, 0xac, 0x44, 0x91, - 0x4c, 0x53, 0xcf, 0x97, 0x38, 0x71, 0x38, 0x6c, 0xec, 0x3b, 0xae, 0x44, 0x06, 0x56, 0xdc, 0xcf, - 0xf1, 0xae, 0x59, 0xa5, 0x7a, 0xcb, 0x1e, 0xd1, 0x4a, 0xc1, 0xb0, 0xeb, 0xf1, 0x1d, 0x72, 0x75, - 0x8e, 0x2a, 0xe2, 0x2e, 0x3c, 0x40, 0x9d, 0x03, 0x85, 0x5d, 0x76, 0x76, 0xa7, 0x3c, 0x43, 0x1d, - 0xf3, 0x84, 0x38, 0x06, 0xa3, 0x1d, 0xc3, 0x84, 0x5d, 0xf5, 0x04, 0xa9, 0x46, 0xcc, 0x59, 0x42, - 0x1c, 0x82, 0x01, 0x1c, 0x0c, 0xec, 0xf8, 0x93, 0x84, 0xe7, 0xcb, 0xc5, 0x11, 0x18, 0xe2, 0x81, - 0xc0, 0x8e, 0x3e, 0x45, 0x68, 0x89, 0x20, 0xce, 0xc3, 0x80, 0x1d, 0x7f, 0x9a, 0x71, 0x46, 0x10, - 0x77, 0x0f, 0xe1, 0xf7, 0xcf, 0x0e, 0x50, 0x41, 0xe7, 0xd8, 0xcd, 0xc1, 0x20, 0x4d, 0x01, 0x76, - 0xfa, 0x19, 0xfa, 0xe7, 0x4c, 0x88, 0x3b, 0x60, 0x9f, 0x63, 0xc0, 0x9f, 0x23, 0xb4, 0x58, 0x2f, - 0x16, 0x61, 0xd8, 0xe8, 0xfc, 0x76, 0xfc, 0x79, 0xc2, 0x4d, 0x0a, 0xb7, 0x4e, 0x9d, 0xdf, 0x2e, - 0x78, 0x81, 0xb7, 0x4e, 0x04, 0x86, 0x8d, 0x9b, 0xbe, 0x9d, 0x7e, 0x91, 0xa3, 0xce, 0x88, 0x98, - 0x87, 0x4a, 0x59, 0xc8, 0xed, 0xfc, 0x4b, 0xc4, 0xb7, 0x19, 0x8c, 0x80, 0xd1, 0x48, 0xec, 0x8a, - 0x97, 0x39, 0x02, 0x06, 0x85, 0xd7, 0xa8, 0x7b, 0x38, 0xb0, 0x9b, 0x5e, 0xe1, 0x6b, 0xd4, 0x35, - 0x1b, 0x60, 0x36, 0xf3, 0x7a, 0x6a, 0x57, 0xbc, 0xca, 0xd9, 0xcc, 0xd7, 0xe3, 0x36, 0xba, 0xbb, - 0xad, 0xdd, 0xf1, 0x1a, 0x6f, 0xa3, 0xab, 0xd9, 0x8a, 0x55, 0xa8, 0xee, 0xed, 0xb4, 0x76, 0xdf, - 0xeb, 0xe4, 0x9b, 0xd8, 0xd3, 0x68, 0xc5, 0xfd, 0x30, 0xdd, 0xbb, 0xcb, 0xda, 0xad, 0xe7, 0x76, - 0xba, 0x3e, 0x17, 0x99, 0x4d, 0x56, 0x9c, 0x68, 0x97, 0x6b, 0xb3, 0xc3, 0xda, 0xb5, 0xe7, 0x77, - 0x3a, 0x2b, 0xb6, 0xd9, 0x60, 0xc5, 0x02, 0x40, 0xbb, 0xb9, 0xd9, 0x5d, 0x17, 0xc8, 0x65, 0x40, - 0x78, 0x35, 0xa8, 0xb7, 0xd9, 0xf9, 0x8b, 0x7c, 0x35, 0x88, 0xc0, 0xab, 0xc1, 0x6d, 0xcd, 0x4e, - 0x5f, 0xe2, 0xab, 0xc1, 0x08, 0x9e, 0x6c, 0xa3, 0x73, 0xd8, 0x0d, 0x97, 0xf9, 0x64, 0x1b, 0x94, - 0x98, 0x83, 0xa1, 0x38, 0x0b, 0x43, 0x3c, 0xa0, 0xd5, 0x9b, 0x7b, 0xb4, 0x2b, 0x19, 0x36, 0x99, - 0xff, 0x6d, 0x97, 0x76, 0xc0, 0x80, 0x38, 0x04, 0xfb, 0x64, 0x54, 0x97, 0x4d, 0x1b, 0xf9, 0xfb, - 0x2e, 0x17, 0x25, 0x5c, 0x2d, 0xe6, 0x01, 0x8a, 0x8f, 0xf6, 0xf8, 0x2a, 0x36, 0xf6, 0x8f, 0xdd, - 0xe2, 0x5b, 0x06, 0x03, 0x69, 0x0b, 0xf2, 0x17, 0xb7, 0x08, 0xb6, 0x3b, 0x05, 0xf9, 0x5b, 0x1f, - 0x86, 0xc1, 0x47, 0x52, 0x15, 0x6b, 0xcf, 0xb7, 0xd1, 0x7f, 0x12, 0xcd, 0xeb, 0x31, 0x60, 0x91, - 0x4a, 0xa4, 0xf6, 0xfc, 0xd4, 0xc6, 0xfe, 0x45, 0x6c, 0x09, 0x20, 0xdc, 0xf0, 0x52, 0xed, 0xf2, - 0xde, 0x7f, 0x33, 0xcc, 0x00, 0x6e, 0x1a, 0x9f, 0x1f, 0x95, 0x5b, 0x36, 0xf6, 0x1f, 0xde, 0x34, - 0xad, 0x17, 0x47, 0xa0, 0x82, 0x8f, 0xf9, 0xb7, 0x22, 0x36, 0xf8, 0x5f, 0x82, 0xdb, 0x04, 0xfe, - 0xe7, 0x54, 0x37, 0x75, 0x60, 0x0f, 0xf6, 0x7f, 0x94, 0x69, 0x5e, 0x2f, 0x16, 0x60, 0x38, 0xd5, - 0xcd, 0x66, 0x46, 0xf3, 0x95, 0x05, 0xff, 0x7f, 0xb7, 0xfc, 0xc8, 0x5d, 0x32, 0x47, 0x97, 0x60, - 0xb2, 0xa1, 0xa2, 0x6e, 0xf0, 0x28, 0x2c, 0xab, 0x65, 0xb5, 0x9a, 0x5f, 0xc5, 0x87, 0x6e, 0xf3, - 0x03, 0xbd, 0x99, 0xd5, 0x67, 0x1a, 0x2a, 0x9a, 0xc5, 0xc1, 0xb7, 0xfd, 0x7d, 0x5e, 0x39, 0x06, - 0x5f, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x51, 0xf0, 0xa5, 0x95, 0x02, 0x14, 0x00, 0x00, + 0x14, 0x80, 0x85, 0x48, 0x64, 0x4f, 0x79, 0x8b, 0xc7, 0xc6, 0x84, 0x08, 0x44, 0xe0, 0xc4, 0xc9, + 0x3e, 0x45, 0x28, 0x65, 0x45, 0x96, 0x63, 0x39, 0x56, 0x10, 0x0e, 0xc6, 0x89, 0xc3, 0x76, 0x18, + 0xf5, 0xf4, 0x94, 0xdb, 0x8d, 0xbb, 0xbb, 0x9a, 0xee, 0xea, 0x10, 0xe7, 0x86, 0xc2, 0x22, 0x84, + 0xd8, 0x91, 0x20, 0x21, 0x09, 0x04, 0xc4, 0xbe, 0x86, 0x7d, 0xb9, 0x70, 0x61, 0xb9, 0xf2, 0x1f, + 0xb8, 0x00, 0x66, 0xf7, 0xcd, 0x17, 0xf4, 0xba, 0xdf, 0xeb, 0xa9, 0x69, 0x8f, 0x54, 0x35, 0xb7, + 0xf6, 0xb8, 0xbe, 0x6f, 0xaa, 0xdf, 0xeb, 0x7a, 0xef, 0x4d, 0x33, 0xe6, 0x49, 0x4f, 0x4e, 0xc6, + 0x89, 0x54, 0xb2, 0x5e, 0x83, 0xeb, 0xfc, 0x72, 0xdf, 0x7e, 0x4f, 0x4a, 0x2f, 0x10, 0x53, 0xf9, + 0x5f, 0xcd, 0x6c, 0x75, 0xaa, 0x25, 0x52, 0x37, 0xf1, 0x63, 0x25, 0x93, 0x62, 0x31, 0x3f, 0xc6, + 0xc6, 0x70, 0x71, 0x43, 0x44, 0x59, 0xd8, 0x88, 0x13, 0xb1, 0xea, 0x9f, 0xae, 0x5f, 0x3f, 0x59, + 0x90, 0x93, 0x44, 0x4e, 0xce, 0x47, 0x59, 0x78, 0x47, 0xac, 0x7c, 0x19, 0xa5, 0x7b, 0xaf, 0xfc, + 0x72, 0xf5, 0xfe, 0xab, 0x6e, 0xe9, 0x5f, 0x1e, 0x45, 0x14, 0xfe, 0xb7, 0x94, 0x83, 0x7c, 0x99, + 0x5d, 0xd3, 0xe1, 0x4b, 0x55, 0xe2, 0x47, 0x9e, 0x48, 0x0c, 0xc6, 0xef, 0xd1, 0x38, 0xa6, 0x19, + 0x8f, 0x23, 0xca, 0xe7, 0xd8, 0x50, 0x2f, 0xae, 0x1f, 0xd0, 0x35, 0x28, 0x74, 0xc9, 0x02, 0x1b, + 0xc9, 0x25, 0x6e, 0x96, 0x2a, 0x19, 0x46, 0x4e, 0x28, 0x0c, 0x9a, 0x1f, 0x73, 0x4d, 0x6d, 0x79, + 0x18, 0xb0, 0xb9, 0x92, 0xe2, 0x9c, 0xf5, 0xc3, 0x27, 0x2d, 0xe1, 0x06, 0x06, 0xc3, 0x4f, 0xb8, + 0x91, 0x72, 0x3d, 0x3f, 0xc9, 0xc6, 0xe1, 0xfa, 0x94, 0x13, 0x64, 0x42, 0xdf, 0xc9, 0x4d, 0x5d, + 0x3d, 0x27, 0x61, 0x19, 0xc9, 0x7e, 0x3e, 0xbb, 0x2b, 0xdf, 0xce, 0x58, 0x29, 0xd0, 0xf6, 0xa4, + 0x65, 0xd1, 0x13, 0x4a, 0x89, 0x24, 0x6d, 0x38, 0x41, 0xb7, 0xed, 0x1d, 0xf1, 0x83, 0xd2, 0x78, + 0x6e, 0xb3, 0x33, 0x8b, 0x0b, 0x05, 0x39, 0x1b, 0x04, 0x7c, 0x85, 0x5d, 0xdb, 0xe5, 0xa9, 0xb0, + 0x70, 0x9e, 0x47, 0xe7, 0xf8, 0x8e, 0x27, 0x03, 0xb4, 0x4b, 0x8c, 0x3e, 0x2f, 0x73, 0x69, 0xe1, + 0x7c, 0x19, 0x9d, 0x75, 0x64, 0x29, 0xa5, 0x60, 0xbc, 0x8d, 0x8d, 0x9e, 0x12, 0x49, 0x53, 0xa6, + 0xa2, 0x21, 0x1e, 0xc8, 0x9c, 0xc0, 0x42, 0x77, 0x01, 0x75, 0x23, 0x08, 0xce, 0x03, 0x07, 0xae, + 0x83, 0xac, 0x7f, 0xd5, 0x71, 0x85, 0x85, 0xe2, 0x22, 0x2a, 0xfa, 0x60, 0x3d, 0xa0, 0xb3, 0x6c, + 0xd0, 0x93, 0xc5, 0x2d, 0x59, 0xe0, 0x97, 0x10, 0x1f, 0x20, 0x06, 0x15, 0xb1, 0x8c, 0xb3, 0xc0, + 0x51, 0x36, 0x3b, 0x78, 0x85, 0x14, 0xc4, 0xa0, 0xa2, 0x87, 0xb0, 0xbe, 0x4a, 0x8a, 0x54, 0x8b, + 0xe7, 0x0c, 0x1b, 0x90, 0x51, 0xb0, 0x21, 0x23, 0x9b, 0x4d, 0x5c, 0x46, 0x03, 0x43, 0x04, 0x04, + 0xd3, 0xac, 0x66, 0x9b, 0x88, 0x37, 0x36, 0xe9, 0x78, 0x50, 0x06, 0x16, 0xd8, 0x08, 0x15, 0x28, + 0x5f, 0x46, 0x16, 0x8a, 0x37, 0x51, 0x31, 0xac, 0x61, 0x78, 0x1b, 0x4a, 0xa4, 0xca, 0x13, 0x36, + 0x92, 0xb7, 0xe8, 0x36, 0x10, 0xc1, 0x50, 0x36, 0x45, 0xe4, 0xae, 0xd9, 0x19, 0xde, 0xa6, 0x50, + 0x12, 0x03, 0x8a, 0x39, 0x36, 0x14, 0x3a, 0x49, 0xba, 0xe6, 0x04, 0x56, 0xe9, 0x78, 0x07, 0x1d, + 0x83, 0x25, 0x84, 0x11, 0xc9, 0xa2, 0x5e, 0x34, 0xef, 0x52, 0x44, 0x34, 0x0c, 0x8f, 0x5e, 0xaa, + 0x9c, 0x66, 0x20, 0x1a, 0xbd, 0xd8, 0xde, 0xa3, 0xa3, 0x57, 0xb0, 0x8b, 0xba, 0x71, 0x9a, 0xd5, + 0x52, 0xff, 0x8c, 0x95, 0xe6, 0x7d, 0xca, 0x74, 0x0e, 0x00, 0x7c, 0x0f, 0xbb, 0xae, 0x6b, 0x9b, + 0xb0, 0x90, 0x7d, 0x80, 0xb2, 0x89, 0x2e, 0xad, 0x02, 0x4b, 0x42, 0xaf, 0xca, 0x0f, 0xa9, 0x24, + 0x88, 0x8a, 0x6b, 0x89, 0x8d, 0x67, 0x51, 0xea, 0xac, 0xf6, 0x16, 0xb5, 0x8f, 0x28, 0x6a, 0x05, + 0xdb, 0x11, 0xb5, 0x13, 0x6c, 0x02, 0x8d, 0xbd, 0xe5, 0xf5, 0x63, 0x2a, 0xac, 0x05, 0xbd, 0xd2, + 0x99, 0xdd, 0xfb, 0xd8, 0xbe, 0x32, 0x9c, 0xa7, 0x95, 0x88, 0x52, 0x60, 0x1a, 0xa1, 0x13, 0x5b, + 0x98, 0xaf, 0xa0, 0x99, 0x2a, 0xfe, 0x7c, 0x29, 0x58, 0x74, 0x62, 0x90, 0xdf, 0xcd, 0xf6, 0x92, + 0x3c, 0x8b, 0x12, 0xe1, 0x4a, 0x2f, 0xf2, 0xcf, 0x88, 0x96, 0x85, 0xfa, 0x93, 0x4a, 0xaa, 0x56, + 0x34, 0x1c, 0xcc, 0x47, 0xd9, 0x9e, 0x72, 0x56, 0x69, 0xf8, 0x61, 0x2c, 0x13, 0x65, 0x30, 0x7e, + 0x4a, 0x99, 0x2a, 0xb9, 0xa3, 0x39, 0xc6, 0xe7, 0xd9, 0x70, 0xfe, 0xa7, 0xed, 0x23, 0xf9, 0x19, + 0x8a, 0x86, 0xda, 0x14, 0x16, 0x0e, 0x57, 0x86, 0xb1, 0x93, 0xd8, 0xd4, 0xbf, 0xcf, 0xa9, 0x70, + 0x20, 0x82, 0x85, 0x43, 0x6d, 0xc4, 0x02, 0xba, 0xbd, 0x85, 0xe1, 0x0b, 0x2a, 0x1c, 0xc4, 0xa0, + 0x82, 0x06, 0x06, 0x0b, 0xc5, 0x97, 0xa4, 0x20, 0x06, 0x14, 0x77, 0xb6, 0x1b, 0x6d, 0x22, 0x3c, + 0x3f, 0x55, 0x89, 0x03, 0xab, 0x0d, 0xaa, 0xaf, 0x36, 0x3b, 0x87, 0xb0, 0x65, 0x0d, 0x85, 0x4a, + 0x14, 0x8a, 0x34, 0x75, 0x3c, 0x01, 0x13, 0x87, 0xc5, 0xc6, 0xbe, 0xa6, 0x4a, 0xa4, 0x61, 0xb0, + 0x37, 0x6d, 0x42, 0x84, 0xb0, 0xbb, 0x8e, 0xbb, 0x66, 0xa3, 0xfb, 0xa6, 0xb2, 0xb9, 0xe3, 0xc4, + 0x82, 0x53, 0x9b, 0x7f, 0xb2, 0x68, 0x5d, 0x6c, 0x58, 0x3d, 0x9d, 0xdf, 0x56, 0xe6, 0x9f, 0x95, + 0x82, 0x2c, 0x6a, 0xc8, 0x48, 0x65, 0x9e, 0xaa, 0xdf, 0xb8, 0xc3, 0xb5, 0x58, 0xdc, 0x17, 0xe9, + 0x1e, 0xda, 0xc2, 0xfb, 0xed, 0x1c, 0xa7, 0xf8, 0xed, 0xf0, 0x90, 0x77, 0x0e, 0x3d, 0x66, 0xd9, + 0xd9, 0xad, 0xf2, 0x39, 0xef, 0x98, 0x79, 0xf8, 0x11, 0x36, 0xd4, 0x31, 0xf0, 0x98, 0x55, 0x0f, + 0xa3, 0x6a, 0x50, 0x9f, 0x77, 0xf8, 0x01, 0xb6, 0x0b, 0x86, 0x17, 0x33, 0xfe, 0x08, 0xe2, 0xf9, + 0x72, 0x7e, 0x88, 0xf5, 0xd3, 0xd0, 0x62, 0x46, 0x1f, 0x45, 0xb4, 0x44, 0x00, 0xa7, 0x81, 0xc5, + 0x8c, 0x3f, 0x46, 0x38, 0x21, 0x80, 0xdb, 0x87, 0xf0, 0xbb, 0x27, 0x76, 0x61, 0xd3, 0xa1, 0xd8, + 0x4d, 0xb3, 0x3e, 0x9c, 0x54, 0xcc, 0xf4, 0xe3, 0xf8, 0xe5, 0x44, 0xf0, 0x5b, 0xd9, 0x6e, 0xcb, + 0x80, 0x3f, 0x89, 0x68, 0xb1, 0x9e, 0xcf, 0xb1, 0x01, 0x6d, 0x3a, 0x31, 0xe3, 0x4f, 0x21, 0xae, + 0x53, 0xb0, 0x75, 0x9c, 0x4e, 0xcc, 0x82, 0xa7, 0x69, 0xeb, 0x48, 0x40, 0xd8, 0x68, 0x30, 0x31, + 0xd3, 0xcf, 0x50, 0xd4, 0x09, 0xe1, 0x33, 0xac, 0x56, 0x36, 0x1b, 0x33, 0xff, 0x2c, 0xf2, 0x6d, + 0x06, 0x22, 0xa0, 0x35, 0x3b, 0xb3, 0xe2, 0x39, 0x8a, 0x80, 0x46, 0xc1, 0x31, 0xaa, 0x0e, 0x30, + 0x66, 0xd3, 0xf3, 0x74, 0x8c, 0x2a, 0xf3, 0x0b, 0x64, 0x33, 0xaf, 0xf9, 0x66, 0xc5, 0x0b, 0x94, + 0xcd, 0x7c, 0x3d, 0x6c, 0xa3, 0x3a, 0x11, 0x98, 0x1d, 0x2f, 0xd2, 0x36, 0x2a, 0x03, 0x01, 0x5f, + 0x62, 0xf5, 0x9d, 0xd3, 0x80, 0xd9, 0xf7, 0x12, 0xfa, 0x46, 0x77, 0x0c, 0x03, 0xfc, 0x2e, 0x36, + 0xd1, 0x7d, 0x12, 0x30, 0x5b, 0xcf, 0x6d, 0x55, 0x7e, 0xbb, 0xe9, 0x83, 0x00, 0x3f, 0xd1, 0x6e, + 0x29, 0xfa, 0x14, 0x60, 0xd6, 0x9e, 0xdf, 0xea, 0x2c, 0xdc, 0xfa, 0x10, 0xc0, 0x67, 0x19, 0x6b, + 0x37, 0x60, 0xb3, 0xeb, 0x02, 0xba, 0x34, 0x08, 0x8e, 0x06, 0xf6, 0x5f, 0x33, 0x7f, 0x91, 0x8e, + 0x06, 0x12, 0x70, 0x34, 0xa8, 0xf5, 0x9a, 0xe9, 0x4b, 0x74, 0x34, 0x08, 0x81, 0x27, 0x5b, 0xeb, + 0x6e, 0x66, 0xc3, 0x65, 0x7a, 0xb2, 0x35, 0x8a, 0x1f, 0x63, 0xa3, 0x3b, 0x1a, 0xa2, 0x59, 0xf5, + 0x1a, 0xaa, 0xf6, 0x54, 0xfb, 0xa1, 0xde, 0xbc, 0xb0, 0x19, 0x9a, 0x6d, 0xaf, 0x57, 0x9a, 0x17, + 0xf6, 0x42, 0x3e, 0xcd, 0xfa, 0xa3, 0x2c, 0x08, 0xe0, 0xf0, 0xd4, 0x6f, 0xe8, 0xd2, 0x4d, 0x45, + 0xd0, 0x22, 0xc5, 0xaf, 0xdb, 0x18, 0x1d, 0x02, 0xf8, 0x01, 0xb6, 0x5b, 0x84, 0x4d, 0xd1, 0x32, + 0x91, 0xbf, 0x6d, 0x53, 0xc1, 0x84, 0xd5, 0x7c, 0x86, 0xb1, 0xe2, 0xd5, 0x08, 0x84, 0xd9, 0xc4, + 0xfe, 0xbe, 0x5d, 0xbc, 0xa5, 0xd1, 0x90, 0xb6, 0x20, 0x4f, 0x8a, 0x41, 0xb0, 0xd9, 0x29, 0xc8, + 0x33, 0x72, 0x90, 0xf5, 0xdd, 0x9f, 0xca, 0x48, 0x39, 0x9e, 0x89, 0xfe, 0x03, 0x69, 0x5a, 0x0f, + 0x01, 0x0b, 0x65, 0x22, 0x94, 0xe3, 0xa5, 0x26, 0xf6, 0x4f, 0x64, 0x4b, 0x00, 0x60, 0xd7, 0x49, + 0x95, 0xcd, 0x7d, 0xff, 0x45, 0x30, 0x01, 0xb0, 0x69, 0xb8, 0x5e, 0x17, 0x1b, 0x26, 0xf6, 0x6f, + 0xda, 0x34, 0xae, 0xe7, 0x87, 0x58, 0x0d, 0x2e, 0xf3, 0xb7, 0x4a, 0x26, 0xf8, 0x1f, 0x84, 0xdb, + 0x04, 0x7c, 0x73, 0xaa, 0x5a, 0xca, 0x37, 0x07, 0xfb, 0x5f, 0xcc, 0x34, 0xad, 0xe7, 0xb3, 0x6c, + 0x20, 0x55, 0xad, 0x56, 0x86, 0xf3, 0xa9, 0x01, 0xff, 0x6f, 0xbb, 0x7c, 0x65, 0x51, 0x32, 0x90, + 0xed, 0x07, 0xd7, 0x55, 0x2c, 0xfd, 0x48, 0x89, 0xc4, 0x64, 0xd8, 0x42, 0x83, 0x86, 0x1c, 0x9e, + 0x67, 0x63, 0xae, 0x0c, 0xab, 0xdc, 0x61, 0xb6, 0x20, 0x17, 0xe4, 0x52, 0x5e, 0x67, 0xee, 0xbd, + 0xd9, 0xf3, 0xd5, 0x5a, 0xd6, 0x9c, 0x74, 0x65, 0x38, 0x05, 0xbf, 0x3c, 0xda, 0x2f, 0x54, 0xcb, + 0xdf, 0x21, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0x9c, 0xaf, 0x70, 0x4e, 0x83, 0x15, 0x00, 0x00, } diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto b/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto index bc8d889f161a3..b80c85653f74a 100644 --- a/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto +++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto @@ -84,6 +84,9 @@ extend google.protobuf.FileOptions { optional bool goproto_registration = 63032; optional bool messagename_all = 63033; + + optional bool goproto_sizecache_all = 63034; + optional bool goproto_unkeyed_all = 63035; } extend google.protobuf.MessageOptions { @@ -118,6 +121,9 @@ extend google.protobuf.MessageOptions { optional bool typedecl = 64030; optional bool messagename = 64033; + + optional bool goproto_sizecache = 64034; + optional bool goproto_unkeyed = 64035; } extend google.protobuf.FieldOptions { @@ -133,4 +139,6 @@ extend google.protobuf.FieldOptions { optional bool stdtime = 65010; optional bool stdduration = 65011; + optional bool wktpointer = 65012; + } diff --git a/vendor/github.com/gogo/protobuf/gogoproto/helper.go b/vendor/github.com/gogo/protobuf/gogoproto/helper.go index 22910c6d4f1a9..390d4e4be6b83 100644 --- a/vendor/github.com/gogo/protobuf/gogoproto/helper.go +++ b/vendor/github.com/gogo/protobuf/gogoproto/helper.go @@ -47,6 +47,55 @@ func IsStdDuration(field *google_protobuf.FieldDescriptorProto) bool { return proto.GetBoolExtension(field.Options, E_Stdduration, false) } +func IsStdDouble(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.DoubleValue" +} + +func IsStdFloat(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.FloatValue" +} + +func IsStdInt64(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.Int64Value" +} + +func IsStdUInt64(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.UInt64Value" +} + +func IsStdInt32(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.Int32Value" +} + +func IsStdUInt32(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.UInt32Value" +} + +func IsStdBool(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.BoolValue" +} + +func IsStdString(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.StringValue" +} + +func IsStdBytes(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.BytesValue" +} + +func IsStdType(field *google_protobuf.FieldDescriptorProto) bool { + return (IsStdTime(field) || IsStdDuration(field) || + IsStdDouble(field) || IsStdFloat(field) || + IsStdInt64(field) || IsStdUInt64(field) || + IsStdInt32(field) || IsStdUInt32(field) || + IsStdBool(field) || + IsStdString(field) || IsStdBytes(field)) +} + +func IsWktPtr(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) +} + func NeedsNilCheck(proto3 bool, field *google_protobuf.FieldDescriptorProto) bool { nullable := IsNullable(field) if field.IsMessage() || IsCustomType(field) { @@ -356,3 +405,11 @@ func RegistersGolangProto(file *google_protobuf.FileDescriptorProto) bool { func HasMessageName(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { return proto.GetBoolExtension(message.Options, E_Messagename, proto.GetBoolExtension(file.Options, E_MessagenameAll, false)) } + +func HasSizecache(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoSizecache, proto.GetBoolExtension(file.Options, E_GoprotoSizecacheAll, true)) +} + +func HasUnkeyed(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoUnkeyed, proto.GetBoolExtension(file.Options, E_GoprotoUnkeyedAll, true)) +} diff --git a/vendor/github.com/gogo/protobuf/proto/BUILD.bazel b/vendor/github.com/gogo/protobuf/proto/BUILD.bazel index 041872e28cd97..0e75d34b22f59 100644 --- a/vendor/github.com/gogo/protobuf/proto/BUILD.bazel +++ b/vendor/github.com/gogo/protobuf/proto/BUILD.bazel @@ -6,6 +6,7 @@ go_library( "clone.go", "custom_gogo.go", "decode.go", + "deprecated.go", "discard.go", "duration.go", "duration_gogo.go", @@ -32,6 +33,8 @@ go_library( "text_parser.go", "timestamp.go", "timestamp_gogo.go", + "wrappers.go", + "wrappers_gogo.go", ], importmap = "k8s.io/kops/vendor/github.com/gogo/protobuf/proto", importpath = "github.com/gogo/protobuf/proto", diff --git a/vendor/github.com/gogo/protobuf/proto/decode.go b/vendor/github.com/gogo/protobuf/proto/decode.go index d9aa3c42d666e..63b0f08bef2a0 100644 --- a/vendor/github.com/gogo/protobuf/proto/decode.go +++ b/vendor/github.com/gogo/protobuf/proto/decode.go @@ -186,7 +186,6 @@ func (p *Buffer) DecodeVarint() (x uint64, err error) { if b&0x80 == 0 { goto done } - // x -= 0x80 << 63 // Always zero. return 0, errOverflow diff --git a/vendor/github.com/gogo/protobuf/proto/deprecated.go b/vendor/github.com/gogo/protobuf/proto/deprecated.go new file mode 100644 index 0000000000000..35b882c09aaf9 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/deprecated.go @@ -0,0 +1,63 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2018 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import "errors" + +// Deprecated: do not use. +type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } + +// Deprecated: do not use. +func GetStats() Stats { return Stats{} } + +// Deprecated: do not use. +func MarshalMessageSet(interface{}) ([]byte, error) { + return nil, errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func UnmarshalMessageSet([]byte, interface{}) error { + return errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func MarshalMessageSetJSON(interface{}) ([]byte, error) { + return nil, errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func UnmarshalMessageSetJSON([]byte, interface{}) error { + return errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func RegisterMessageSetType(Message, int32, string) {} diff --git a/vendor/github.com/gogo/protobuf/proto/encode.go b/vendor/github.com/gogo/protobuf/proto/encode.go index c27d35f866bb0..3abfed2cff04b 100644 --- a/vendor/github.com/gogo/protobuf/proto/encode.go +++ b/vendor/github.com/gogo/protobuf/proto/encode.go @@ -37,27 +37,9 @@ package proto import ( "errors" - "fmt" "reflect" ) -// RequiredNotSetError is the error returned if Marshal is called with -// a protocol buffer struct whose required fields have not -// all been initialized. It is also the error returned if Unmarshal is -// called with an encoded protocol buffer that does not include all the -// required fields. -// -// When printed, RequiredNotSetError reports the first unset required field in a -// message. If the field cannot be precisely determined, it is reported as -// "{Unknown}". -type RequiredNotSetError struct { - field string -} - -func (e *RequiredNotSetError) Error() string { - return fmt.Sprintf("proto: required field %q not set", e.field) -} - var ( // errRepeatedHasNil is the error returned if Marshal is called with // a struct with a repeated field containing a nil element. diff --git a/vendor/github.com/gogo/protobuf/proto/extensions.go b/vendor/github.com/gogo/protobuf/proto/extensions.go index 44ebd457cf618..341c6f57f5210 100644 --- a/vendor/github.com/gogo/protobuf/proto/extensions.go +++ b/vendor/github.com/gogo/protobuf/proto/extensions.go @@ -527,6 +527,7 @@ func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { // SetExtension sets the specified extension of pb to the specified value. func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { if epb, ok := pb.(extensionsBytes); ok { + ClearExtension(pb, extension) newb, err := encodeExtension(extension, value) if err != nil { return err @@ -544,7 +545,7 @@ func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error } typ := reflect.TypeOf(extension.ExtensionType) if typ != reflect.TypeOf(value) { - return errors.New("proto: bad extension value type") + return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType) } // nil extension values need to be caught early, because the // encoder can't distinguish an ErrNil due to a nil extension diff --git a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go index 53ebd8cca01c3..6f1ae120ece58 100644 --- a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go @@ -154,6 +154,10 @@ func EncodeInternalExtension(m extendableProto, data []byte) (n int, err error) return EncodeExtensionMap(m.extensionsWrite(), data) } +func EncodeInternalExtensionBackwards(m extendableProto, data []byte) (n int, err error) { + return EncodeExtensionMapBackwards(m.extensionsWrite(), data) +} + func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) { o := 0 for _, e := range m { @@ -169,6 +173,23 @@ func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) { return o, nil } +func EncodeExtensionMapBackwards(m map[int32]Extension, data []byte) (n int, err error) { + o := 0 + end := len(data) + for _, e := range m { + if err := e.Encode(); err != nil { + return 0, err + } + n := copy(data[end-len(e.enc):], e.enc) + if n != len(e.enc) { + return 0, io.ErrShortBuffer + } + end -= n + o += n + } + return o, nil +} + func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) { e := m[id] if err := e.Encode(); err != nil { diff --git a/vendor/github.com/gogo/protobuf/proto/lib.go b/vendor/github.com/gogo/protobuf/proto/lib.go index 0f1950c67e485..d17f802092100 100644 --- a/vendor/github.com/gogo/protobuf/proto/lib.go +++ b/vendor/github.com/gogo/protobuf/proto/lib.go @@ -265,7 +265,6 @@ package proto import ( "encoding/json" - "errors" "fmt" "log" "reflect" @@ -274,34 +273,73 @@ import ( "sync" ) -var errInvalidUTF8 = errors.New("proto: invalid UTF-8 string") +// RequiredNotSetError is an error type returned by either Marshal or Unmarshal. +// Marshal reports this when a required field is not initialized. +// Unmarshal reports this when a required field is missing from the wire data. +type RequiredNotSetError struct{ field string } -// Message is implemented by generated protocol buffer messages. -type Message interface { - Reset() - String() string - ProtoMessage() +func (e *RequiredNotSetError) Error() string { + if e.field == "" { + return fmt.Sprintf("proto: required field not set") + } + return fmt.Sprintf("proto: required field %q not set", e.field) +} +func (e *RequiredNotSetError) RequiredNotSet() bool { + return true +} + +type invalidUTF8Error struct{ field string } + +func (e *invalidUTF8Error) Error() string { + if e.field == "" { + return "proto: invalid UTF-8 detected" + } + return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field) +} +func (e *invalidUTF8Error) InvalidUTF8() bool { + return true } -// Stats records allocation details about the protocol buffer encoders -// and decoders. Useful for tuning the library itself. -type Stats struct { - Emalloc uint64 // mallocs in encode - Dmalloc uint64 // mallocs in decode - Encode uint64 // number of encodes - Decode uint64 // number of decodes - Chit uint64 // number of cache hits - Cmiss uint64 // number of cache misses - Size uint64 // number of sizes +// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8. +// This error should not be exposed to the external API as such errors should +// be recreated with the field information. +var errInvalidUTF8 = &invalidUTF8Error{} + +// isNonFatal reports whether the error is either a RequiredNotSet error +// or a InvalidUTF8 error. +func isNonFatal(err error) bool { + if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() { + return true + } + if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() { + return true + } + return false } -// Set to true to enable stats collection. -const collectStats = false +type nonFatal struct{ E error } -var stats Stats +// Merge merges err into nf and reports whether it was successful. +// Otherwise it returns false for any fatal non-nil errors. +func (nf *nonFatal) Merge(err error) (ok bool) { + if err == nil { + return true // not an error + } + if !isNonFatal(err) { + return false // fatal error + } + if nf.E == nil { + nf.E = err // store first instance of non-fatal error + } + return true +} -// GetStats returns a copy of the global Stats structure. -func GetStats() Stats { return stats } +// Message is implemented by generated protocol buffer messages. +type Message interface { + Reset() + String() string + ProtoMessage() +} // A Buffer is a buffer manager for marshaling and unmarshaling // protocol buffers. It may be reused between invocations to @@ -570,9 +608,11 @@ func SetDefaults(pb Message) { setDefaults(reflect.ValueOf(pb), true, false) } -// v is a pointer to a struct. +// v is a struct. func setDefaults(v reflect.Value, recur, zeros bool) { - v = v.Elem() + if v.Kind() == reflect.Ptr { + v = v.Elem() + } defaultMu.RLock() dm, ok := defaults[v.Type()] @@ -674,8 +714,11 @@ func setDefaults(v reflect.Value, recur, zeros bool) { for _, ni := range dm.nested { f := v.Field(ni) - // f is *T or []*T or map[T]*T + // f is *T or T or []*T or []T switch f.Kind() { + case reflect.Struct: + setDefaults(f, recur, zeros) + case reflect.Ptr: if f.IsNil() { continue @@ -685,7 +728,7 @@ func setDefaults(v reflect.Value, recur, zeros bool) { case reflect.Slice: for i := 0; i < f.Len(); i++ { e := f.Index(i) - if e.IsNil() { + if e.Kind() == reflect.Ptr && e.IsNil() { continue } setDefaults(e, recur, zeros) @@ -757,6 +800,9 @@ func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { var canHaveDefault bool switch ft.Kind() { + case reflect.Struct: + nestedMessage = true // non-nullable + case reflect.Ptr: if ft.Elem().Kind() == reflect.Struct { nestedMessage = true @@ -766,7 +812,7 @@ func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMes case reflect.Slice: switch ft.Elem().Kind() { - case reflect.Ptr: + case reflect.Ptr, reflect.Struct: nestedMessage = true // repeated message case reflect.Uint8: canHaveDefault = true // bytes field diff --git a/vendor/github.com/gogo/protobuf/proto/message_set.go b/vendor/github.com/gogo/protobuf/proto/message_set.go index 3b6ca41d5e554..f48a756761ead 100644 --- a/vendor/github.com/gogo/protobuf/proto/message_set.go +++ b/vendor/github.com/gogo/protobuf/proto/message_set.go @@ -36,13 +36,7 @@ package proto */ import ( - "bytes" - "encoding/json" "errors" - "fmt" - "reflect" - "sort" - "sync" ) // errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. @@ -145,46 +139,9 @@ func skipVarint(buf []byte) []byte { return buf[i+1:] } -// MarshalMessageSet encodes the extension map represented by m in the message set wire format. -// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSet(exts interface{}) ([]byte, error) { - return marshalMessageSet(exts, false) -} - -// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal. -func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) { - switch exts := exts.(type) { - case *XXX_InternalExtensions: - var u marshalInfo - siz := u.sizeMessageSet(exts) - b := make([]byte, 0, siz) - return u.appendMessageSet(b, exts, deterministic) - - case map[int32]Extension: - // This is an old-style extension map. - // Wrap it in a new-style XXX_InternalExtensions. - ie := XXX_InternalExtensions{ - p: &struct { - mu sync.Mutex - extensionMap map[int32]Extension - }{ - extensionMap: exts, - }, - } - - var u marshalInfo - siz := u.sizeMessageSet(&ie) - b := make([]byte, 0, siz) - return u.appendMessageSet(b, &ie, deterministic) - - default: - return nil, errors.New("proto: not an extension map") - } -} - -// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. +// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. // It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSet(buf []byte, exts interface{}) error { +func unmarshalMessageSet(buf []byte, exts interface{}) error { var m map[int32]Extension switch exts := exts.(type) { case *XXX_InternalExtensions: @@ -222,93 +179,3 @@ func UnmarshalMessageSet(buf []byte, exts interface{}) error { } return nil } - -// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. -// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - var mu sync.Locker - m, mu = exts.extensionsRead() - if m != nil { - // Keep the extensions map locked until we're done marshaling to prevent - // races between marshaling and unmarshaling the lazily-{en,de}coded - // values. - mu.Lock() - defer mu.Unlock() - } - case map[int32]Extension: - m = exts - default: - return nil, errors.New("proto: not an extension map") - } - var b bytes.Buffer - b.WriteByte('{') - - // Process the map in key order for deterministic output. - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) // int32Slice defined in text.go - - for i, id := range ids { - ext := m[id] - msd, ok := messageSetMap[id] - if !ok { - // Unknown type; we can't render it, so skip it. - continue - } - - if i > 0 && b.Len() > 1 { - b.WriteByte(',') - } - - fmt.Fprintf(&b, `"[%s]":`, msd.name) - - x := ext.value - if x == nil { - x = reflect.New(msd.t.Elem()).Interface() - if err := Unmarshal(ext.enc, x.(Message)); err != nil { - return nil, err - } - } - d, err := json.Marshal(x) - if err != nil { - return nil, err - } - b.Write(d) - } - b.WriteByte('}') - return b.Bytes(), nil -} - -// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. -// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error { - // Common-case fast path. - if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { - return nil - } - - // This is fairly tricky, and it's not clear that it is needed. - return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") -} - -// A global registry of types that can be used in a MessageSet. - -var messageSetMap = make(map[int32]messageSetDesc) - -type messageSetDesc struct { - t reflect.Type // pointer to struct - name string -} - -// RegisterMessageSetType is called from the generated code. -func RegisterMessageSetType(m Message, fieldNum int32, name string) { - messageSetMap[fieldNum] = messageSetDesc{ - t: reflect.TypeOf(m), - name: name, - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go index b354101b9c3d5..aca8eed02a11e 100644 --- a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go @@ -26,7 +26,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// +build !purego !appengine,!js +// +build !purego,!appengine,!js // This file contains the implementation of the proto field accesses using package unsafe. diff --git a/vendor/github.com/gogo/protobuf/proto/properties.go b/vendor/github.com/gogo/protobuf/proto/properties.go index 7a5e28efe5199..c9e5fa02072cb 100644 --- a/vendor/github.com/gogo/protobuf/proto/properties.go +++ b/vendor/github.com/gogo/protobuf/proto/properties.go @@ -144,7 +144,7 @@ type Properties struct { Repeated bool Packed bool // relevant for repeated primitives only Enum string // set for enum types only - proto3 bool // whether this is known to be a proto3 field; set for []byte only + proto3 bool // whether this is known to be a proto3 field oneof bool // whether this is a oneof field Default string // default value @@ -153,14 +153,15 @@ type Properties struct { CastType string StdTime bool StdDuration bool + WktPointer bool stype reflect.Type // set for struct types only ctype reflect.Type // set for custom types only sprop *StructProperties // set for struct types only - mtype reflect.Type // set for map types only - mkeyprop *Properties // set for map types only - mvalprop *Properties // set for map types only + mtype reflect.Type // set for map types only + MapKeyProp *Properties // set for map types only + MapValProp *Properties // set for map types only } // String formats the properties in the protobuf struct field tag style. @@ -274,6 +275,8 @@ outer: p.StdTime = true case f == "stdduration": p.StdDuration = true + case f == "wktptr": + p.WktPointer = true } } } @@ -296,6 +299,10 @@ func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, loc p.setTag(lockGetProp) return } + if p.WktPointer && !isMap { + p.setTag(lockGetProp) + return + } switch t1 := typ; t1.Kind() { case reflect.Struct: p.stype = typ @@ -317,9 +324,9 @@ func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, loc case reflect.Map: p.mtype = t1 - p.mkeyprop = &Properties{} - p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) - p.mvalprop = &Properties{} + p.MapKeyProp = &Properties{} + p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) + p.MapValProp = &Properties{} vtype := p.mtype.Elem() if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { // The value type is not a message (*T) or bytes ([]byte), @@ -327,10 +334,11 @@ func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, loc vtype = reflect.PtrTo(vtype) } - p.mvalprop.CustomType = p.CustomType - p.mvalprop.StdDuration = p.StdDuration - p.mvalprop.StdTime = p.StdTime - p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) + p.MapValProp.CustomType = p.CustomType + p.MapValProp.StdDuration = p.StdDuration + p.MapValProp.StdTime = p.StdTime + p.MapValProp.WktPointer = p.WktPointer + p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) } p.setTag(lockGetProp) } @@ -383,9 +391,6 @@ func GetProperties(t reflect.Type) *StructProperties { sprop, ok := propertiesMap[t] propertiesMu.RUnlock() if ok { - if collectStats { - stats.Chit++ - } return sprop } @@ -398,14 +403,8 @@ func GetProperties(t reflect.Type) *StructProperties { // getPropertiesLocked requires that propertiesMu is held. func getPropertiesLocked(t reflect.Type) *StructProperties { if prop, ok := propertiesMap[t]; ok { - if collectStats { - stats.Chit++ - } return prop } - if collectStats { - stats.Cmiss++ - } prop := new(StructProperties) // in case of recursive protos, fill this in now. diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal.go b/vendor/github.com/gogo/protobuf/proto/table_marshal.go index 255e7b508860c..9b1538d0559b2 100644 --- a/vendor/github.com/gogo/protobuf/proto/table_marshal.go +++ b/vendor/github.com/gogo/protobuf/proto/table_marshal.go @@ -97,6 +97,8 @@ type marshalElemInfo struct { var ( marshalInfoMap = map[reflect.Type]*marshalInfo{} marshalInfoLock sync.Mutex + + uint8SliceType = reflect.TypeOf(([]uint8)(nil)).Kind() ) // getMarshalInfo returns the information to marshal a given type of message. @@ -246,16 +248,13 @@ func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte // If the message can marshal itself, let it do it, for compatibility. // NOTE: This is not efficient. if u.hasmarshaler { - if deterministic { - return nil, errors.New("proto: deterministic not supported by the Marshal method of " + u.typ.String()) - } m := ptr.asPointerTo(u.typ).Interface().(Marshaler) b1, err := m.Marshal() b = append(b, b1...) return b, err } - var err, errreq error + var err, errLater error // The old marshaler encodes extensions at beginning. if u.extensions.IsValid() { e := ptr.offset(u.extensions).toExtensions() @@ -280,11 +279,13 @@ func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte b = append(b, s...) } for _, f := range u.fields { - if f.required && errreq == nil { - if ptr.offset(f.field).getPointer().isNil() { + if f.required { + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { // Required field is not set. // We record the error but keep going, to give a complete marshaling. - errreq = &RequiredNotSetError{f.name} + if errLater == nil { + errLater = &RequiredNotSetError{f.name} + } continue } } @@ -297,14 +298,21 @@ func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte if err1, ok := err.(*RequiredNotSetError); ok { // Required field in submessage is not set. // We record the error but keep going, to give a complete marshaling. - if errreq == nil { - errreq = &RequiredNotSetError{f.name + "." + err1.field} + if errLater == nil { + errLater = &RequiredNotSetError{f.name + "." + err1.field} } continue } if err == errRepeatedHasNil { err = errors.New("proto: repeated field " + f.name + " has nil element") } + if err == errInvalidUTF8 { + if errLater == nil { + fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name + errLater = &invalidUTF8Error{fullName} + } + continue + } return b, err } } @@ -312,7 +320,7 @@ func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte s := *ptr.offset(u.unrecognized).toBytes() b = append(b, s...) } - return b, errreq + return b, errLater } // computeMarshalInfo initializes the marshal info. @@ -483,7 +491,7 @@ func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { fi.field = toField(f) - fi.wiretag = 1<<31 - 1 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. + fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. fi.isPointer = true fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) @@ -577,6 +585,8 @@ func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, ma ctype := false isTime := false isDuration := false + isWktPointer := false + validateUTF8 := true for i := 2; i < len(tags); i++ { if tags[i] == "packed" { packed = true @@ -593,7 +603,11 @@ func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, ma if tags[i] == "stdduration" { isDuration = true } + if tags[i] == "wktptr" { + isWktPointer = true + } } + validateUTF8 = validateUTF8 && proto3 if !proto3 && !pointer && !slice { nozero = false } @@ -638,6 +652,112 @@ func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, ma return makeDurationMarshaler(getMarshalInfo(t)) } + if isWktPointer { + switch t.Kind() { + case reflect.Float64: + if pointer { + if slice { + return makeStdDoubleValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdDoubleValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdDoubleValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdDoubleValueMarshaler(getMarshalInfo(t)) + case reflect.Float32: + if pointer { + if slice { + return makeStdFloatValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdFloatValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdFloatValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdFloatValueMarshaler(getMarshalInfo(t)) + case reflect.Int64: + if pointer { + if slice { + return makeStdInt64ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt64ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdInt64ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt64ValueMarshaler(getMarshalInfo(t)) + case reflect.Uint64: + if pointer { + if slice { + return makeStdUInt64ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt64ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdUInt64ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt64ValueMarshaler(getMarshalInfo(t)) + case reflect.Int32: + if pointer { + if slice { + return makeStdInt32ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt32ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdInt32ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt32ValueMarshaler(getMarshalInfo(t)) + case reflect.Uint32: + if pointer { + if slice { + return makeStdUInt32ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt32ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdUInt32ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt32ValueMarshaler(getMarshalInfo(t)) + case reflect.Bool: + if pointer { + if slice { + return makeStdBoolValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBoolValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdBoolValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBoolValueMarshaler(getMarshalInfo(t)) + case reflect.String: + if pointer { + if slice { + return makeStdStringValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdStringValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdStringValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdStringValueMarshaler(getMarshalInfo(t)) + case uint8SliceType: + if pointer { + if slice { + return makeStdBytesValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBytesValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdBytesValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBytesValueMarshaler(getMarshalInfo(t)) + default: + panic(fmt.Sprintf("unknown wktpointer type %#v", t)) + } + } + switch t.Kind() { case reflect.Bool: if pointer { @@ -834,6 +954,18 @@ func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, ma } return sizeFloat64Value, appendFloat64Value case reflect.String: + if validateUTF8 { + if pointer { + return sizeStringPtr, appendUTF8StringPtr + } + if slice { + return sizeStringSlice, appendUTF8StringSlice + } + if nozero { + return sizeStringValueNoZero, appendUTF8StringValueNoZero + } + return sizeStringValue, appendUTF8StringValue + } if pointer { return sizeStringPtr, appendStringPtr } @@ -2089,52 +2221,105 @@ func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byt return b, nil } func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + if v == "" { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toStringPtr() + if p == nil { + return b, nil + } + v := *p + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toStringSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} +func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool v := *ptr.toString() if !utf8.ValidString(v) { - return nil, errInvalidUTF8 + invalidUTF8 = true } b = appendVarint(b, wiretag) b = appendVarint(b, uint64(len(v))) b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } return b, nil } -func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { +func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool v := *ptr.toString() if v == "" { return b, nil } if !utf8.ValidString(v) { - return nil, errInvalidUTF8 + invalidUTF8 = true } b = appendVarint(b, wiretag) b = appendVarint(b, uint64(len(v))) b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } return b, nil } -func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { +func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool p := *ptr.toStringPtr() if p == nil { return b, nil } v := *p if !utf8.ValidString(v) { - return nil, errInvalidUTF8 + invalidUTF8 = true } b = appendVarint(b, wiretag) b = appendVarint(b, uint64(len(v))) b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } return b, nil } -func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { +func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool s := *ptr.toStringSlice() for _, v := range s { if !utf8.ValidString(v) { - return nil, errInvalidUTF8 + invalidUTF8 = true } b = appendVarint(b, wiretag) b = appendVarint(b, uint64(len(v))) b = append(b, v...) } + if invalidUTF8 { + return b, errInvalidUTF8 + } return b, nil } func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { @@ -2213,7 +2398,8 @@ func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { s := ptr.getPointerSlice() - var err, errreq error + var err error + var nerr nonFatal for _, v := range s { if v.isNil() { return b, errRepeatedHasNil @@ -2221,22 +2407,14 @@ func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { b = appendVarint(b, wiretag) // start group b, err = u.marshal(b, v, deterministic) b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group - if err != nil { - if _, ok := err.(*RequiredNotSetError); ok { - // Required field in submessage is not set. - // We record the error but keep going, to give a complete marshaling. - if errreq == nil { - errreq = err - } - continue - } + if !nerr.Merge(err) { if err == ErrNil { err = errRepeatedHasNil } return b, err } } - return b, errreq + return b, nerr.E } } @@ -2280,7 +2458,8 @@ func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { s := ptr.getPointerSlice() - var err, errreq error + var err error + var nerr nonFatal for _, v := range s { if v.isNil() { return b, errRepeatedHasNil @@ -2289,22 +2468,15 @@ func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { siz := u.cachedsize(v) b = appendVarint(b, uint64(siz)) b, err = u.marshal(b, v, deterministic) - if err != nil { - if _, ok := err.(*RequiredNotSetError); ok { - // Required field in submessage is not set. - // We record the error but keep going, to give a complete marshaling. - if errreq == nil { - errreq = err - } - continue - } + + if !nerr.Merge(err) { if err == ErrNil { err = errRepeatedHasNil } return b, err } } - return b, errreq + return b, nerr.E } } @@ -2318,15 +2490,21 @@ func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { tags := strings.Split(f.Tag.Get("protobuf"), ",") keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",") valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") + stdOptions := false for _, t := range tags { if strings.HasPrefix(t, "customtype=") { valTags = append(valTags, t) } if t == "stdtime" { valTags = append(valTags, t) + stdOptions = true } if t == "stdduration" { valTags = append(valTags, t) + stdOptions = true + } + if t == "wktptr" { + valTags = append(valTags, t) } } keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map @@ -2340,6 +2518,25 @@ func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { // value. // Key cannot be pointer-typed. valIsPtr := valType.Kind() == reflect.Ptr + + // If value is a message with nested maps, calling + // valSizer in marshal may be quadratic. We should use + // cached version in marshal (but not in size). + // If value is not message type, we don't have size cache, + // but it cannot be nested either. Just use valSizer. + valCachedSizer := valSizer + if valIsPtr && !stdOptions && valType.Elem().Kind() == reflect.Struct { + u := getMarshalInfo(valType.Elem()) + valCachedSizer = func(ptr pointer, tagsize int) int { + // Same as message sizer, but use cache. + p := ptr.getPointer() + if p.isNil() { + return 0 + } + siz := u.cachedsize(p) + return siz + SizeVarint(uint64(siz)) + tagsize + } + } return func(ptr pointer, tagsize int) int { m := ptr.asPointerTo(t).Elem() // the map n := 0 @@ -2360,24 +2557,26 @@ func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { if len(keys) > 1 && deterministic { sort.Sort(mapKeys(keys)) } + + var nerr nonFatal for _, k := range keys { ki := k.Interface() vi := m.MapIndex(k).Interface() kaddr := toAddrPointer(&ki, false) // pointer to key vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value b = appendVarint(b, tag) - siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) b = appendVarint(b, uint64(siz)) b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) - if err != nil { + if !nerr.Merge(err) { return b, err } b, err = valMarshaler(b, vaddr, valWireTag, deterministic) - if err != nil && err != ErrNil { // allow nil value in map + if err != ErrNil && !nerr.Merge(err) { // allow nil value in map return b, err } } - return b, nil + return b, nerr.E } } @@ -2450,6 +2649,7 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de defer mu.Unlock() var err error + var nerr nonFatal // Fast-path for common cases: zero or one extensions. // Don't bother sorting the keys. @@ -2469,11 +2669,11 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de v := e.value p := toAddrPointer(&v, ei.isptr) b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if err != nil { + if !nerr.Merge(err) { return b, err } } - return b, nil + return b, nerr.E } // Sort the keys to provide a deterministic encoding. @@ -2500,11 +2700,11 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de v := e.value p := toAddrPointer(&v, ei.isptr) b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if err != nil { + if !nerr.Merge(err) { return b, err } } - return b, nil + return b, nerr.E } // message set format is: @@ -2561,6 +2761,7 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de defer mu.Unlock() var err error + var nerr nonFatal // Fast-path for common cases: zero or one extensions. // Don't bother sorting the keys. @@ -2587,12 +2788,12 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de v := e.value p := toAddrPointer(&v, ei.isptr) b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) - if err != nil { + if !nerr.Merge(err) { return b, err } b = append(b, 1<<3|WireEndGroup) } - return b, nil + return b, nerr.E } // Sort the keys to provide a deterministic encoding. @@ -2626,11 +2827,11 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de p := toAddrPointer(&v, ei.isptr) b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) b = append(b, 1<<3|WireEndGroup) - if err != nil { + if !nerr.Merge(err) { return b, err } } - return b, nil + return b, nerr.E } // sizeV1Extensions computes the size of encoded data for a V1-API extension field. @@ -2673,6 +2874,7 @@ func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, determ sort.Ints(keys) var err error + var nerr nonFatal for _, k := range keys { e := m[int32(k)] if e.value == nil || e.desc == nil { @@ -2689,11 +2891,11 @@ func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, determ v := e.value p := toAddrPointer(&v, ei.isptr) b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if err != nil { + if !nerr.Merge(err) { return b, err } } - return b, nil + return b, nerr.E } // newMarshaler is the interface representing objects that can marshal themselves. @@ -2758,6 +2960,11 @@ func Marshal(pb Message) ([]byte, error) { // a Buffer for most applications. func (p *Buffer) Marshal(pb Message) error { var err error + if p.deterministic { + if _, ok := pb.(Marshaler); ok { + return fmt.Errorf("proto: deterministic not supported by the Marshal method of %T", pb) + } + } if m, ok := pb.(newMarshaler); ok { siz := m.XXX_Size() p.grow(siz) // make sure buf has enough capacity diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go index 910e2dd6ad31d..bb2622f28c399 100644 --- a/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go +++ b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go @@ -99,6 +99,8 @@ type unmarshalFieldInfo struct { // if a required field, contains a single set bit at this field's index in the required field list. reqMask uint64 + + name string // name of the field, for error reporting } var ( @@ -136,10 +138,10 @@ func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error { u.computeUnmarshalInfo() } if u.isMessageSet { - return UnmarshalMessageSet(b, m.offset(u.extensions).toExtensions()) + return unmarshalMessageSet(b, m.offset(u.extensions).toExtensions()) } - var reqMask uint64 // bitmask of required fields we've seen. - var rnse *RequiredNotSetError // an instance of a RequiredNotSetError returned by a submessage. + var reqMask uint64 // bitmask of required fields we've seen. + var errLater error for len(b) > 0 { // Read tag and wire type. // Special case 1 and 2 byte varints. @@ -178,11 +180,20 @@ func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error { if r, ok := err.(*RequiredNotSetError); ok { // Remember this error, but keep parsing. We need to produce // a full parse even if a required field is missing. - rnse = r + if errLater == nil { + errLater = r + } reqMask |= f.reqMask continue } if err != errInternalBadWireType { + if err == errInvalidUTF8 { + if errLater == nil { + fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name + errLater = &invalidUTF8Error{fullName} + } + continue + } return err } // Fragments with bad wire type are treated as unknown fields. @@ -244,20 +255,16 @@ func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error { emap[int32(tag)] = e } } - if rnse != nil { - // A required field of a submessage/group is missing. Return that error. - return rnse - } - if reqMask != u.reqMask { + if reqMask != u.reqMask && errLater == nil { // A required field of this message is missing. for _, n := range u.reqFields { if reqMask&1 == 0 { - return &RequiredNotSetError{n} + errLater = &RequiredNotSetError{n} } reqMask >>= 1 } } - return nil + return errLater } // computeUnmarshalInfo fills in u with information for use @@ -360,7 +367,7 @@ func (u *unmarshalInfo) computeUnmarshalInfo() { } // Store the info in the correct slot in the message. - u.setTag(tag, toField(&f), unmarshal, reqMask) + u.setTag(tag, toField(&f), unmarshal, reqMask, name) } // Find any types associated with oneof fields. @@ -376,10 +383,17 @@ func (u *unmarshalInfo) computeUnmarshalInfo() { f := typ.Field(0) // oneof implementers have one field baseUnmarshal := fieldUnmarshaler(&f) - tagstr := strings.Split(f.Tag.Get("protobuf"), ",")[1] - tag, err := strconv.Atoi(tagstr) + tags := strings.Split(f.Tag.Get("protobuf"), ",") + fieldNum, err := strconv.Atoi(tags[1]) if err != nil { - panic("protobuf tag field not an integer: " + tagstr) + panic("protobuf tag field not an integer: " + tags[1]) + } + var name string + for _, tag := range tags { + if strings.HasPrefix(tag, "name=") { + name = strings.TrimPrefix(tag, "name=") + break + } } // Find the oneof field that this struct implements. @@ -390,7 +404,7 @@ func (u *unmarshalInfo) computeUnmarshalInfo() { // That lets us know where this struct should be stored // when we encounter it during unmarshaling. unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) - u.setTag(tag, of.field, unmarshal, 0) + u.setTag(fieldNum, of.field, unmarshal, 0, name) } } } @@ -411,7 +425,7 @@ func (u *unmarshalInfo) computeUnmarshalInfo() { // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) - }, 0) + }, 0, "") // Set mask for required field check. u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? for len(u.dense) <= tag { @@ -455,10 +470,16 @@ func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { ctype := false isTime := false isDuration := false + isWktPointer := false + proto3 := false + validateUTF8 := true for _, tag := range tagArray[3:] { if strings.HasPrefix(tag, "name=") { name = tag[5:] } + if tag == "proto3" { + proto3 = true + } if strings.HasPrefix(tag, "customtype=") { ctype = true } @@ -468,7 +489,11 @@ func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { if tag == "stdduration" { isDuration = true } + if tag == "wktptr" { + isWktPointer = true + } } + validateUTF8 = validateUTF8 && proto3 // Figure out packaging (pointer, slice, or both) slice := false @@ -522,6 +547,112 @@ func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { return makeUnmarshalDuration(getUnmarshalInfo(t), name) } + if isWktPointer { + switch t.Kind() { + case reflect.Float64: + if pointer { + if slice { + return makeStdDoubleValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdDoubleValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdDoubleValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdDoubleValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Float32: + if pointer { + if slice { + return makeStdFloatValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdFloatValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdFloatValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdFloatValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Int64: + if pointer { + if slice { + return makeStdInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt64ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Uint64: + if pointer { + if slice { + return makeStdUInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdUInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt64ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Int32: + if pointer { + if slice { + return makeStdInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt32ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Uint32: + if pointer { + if slice { + return makeStdUInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdUInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt32ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Bool: + if pointer { + if slice { + return makeStdBoolValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBoolValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdBoolValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBoolValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.String: + if pointer { + if slice { + return makeStdStringValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdStringValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdStringValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdStringValueUnmarshaler(getUnmarshalInfo(t), name) + case uint8SliceType: + if pointer { + if slice { + return makeStdBytesValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBytesValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdBytesValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBytesValueUnmarshaler(getUnmarshalInfo(t), name) + default: + panic(fmt.Sprintf("unknown wktpointer type %#v", t)) + } + } + // We'll never have both pointer and slice for basic types. if pointer && slice && t.Kind() != reflect.Struct { panic("both pointer and slice for basic type in " + t.Name()) @@ -656,6 +787,15 @@ func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { } return unmarshalBytesValue case reflect.String: + if validateUTF8 { + if pointer { + return unmarshalUTF8StringPtr + } + if slice { + return unmarshalUTF8StringSlice + } + return unmarshalUTF8StringValue + } if pointer { return unmarshalStringPtr } @@ -1516,9 +1656,6 @@ func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { return nil, io.ErrUnexpectedEOF } v := string(b[:x]) - if !utf8.ValidString(v) { - return nil, errInvalidUTF8 - } *f.toString() = v return b[x:], nil } @@ -1536,9 +1673,6 @@ func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { return nil, io.ErrUnexpectedEOF } v := string(b[:x]) - if !utf8.ValidString(v) { - return nil, errInvalidUTF8 - } *f.toStringPtr() = &v return b[x:], nil } @@ -1556,11 +1690,69 @@ func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { return nil, io.ErrUnexpectedEOF } v := string(b[:x]) + s := f.toStringSlice() + *s = append(*s, v) + return b[x:], nil +} + +func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toString() = v if !utf8.ValidString(v) { - return nil, errInvalidUTF8 + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toStringPtr() = &v + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) s := f.toStringSlice() *s = append(*s, v) + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } return b[x:], nil } @@ -1731,6 +1923,9 @@ func makeUnmarshalMap(f *reflect.StructField) unmarshaler { if t == "stdduration" { valTags = append(valTags, t) } + if t == "wktptr" { + valTags = append(valTags, t) + } } unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key")) unmarshalVal := typeUnmarshaler(vt, strings.Join(valTags, ",")) @@ -1755,6 +1950,7 @@ func makeUnmarshalMap(f *reflect.StructField) unmarshaler { // Maps will be somewhat slow. Oh well. // Read key and value from data. + var nerr nonFatal k := reflect.New(kt) v := reflect.New(vt) for len(b) > 0 { @@ -1775,7 +1971,7 @@ func makeUnmarshalMap(f *reflect.StructField) unmarshaler { err = errInternalBadWireType // skip unknown tag } - if err == nil { + if nerr.Merge(err) { continue } if err != errInternalBadWireType { @@ -1798,7 +1994,7 @@ func makeUnmarshalMap(f *reflect.StructField) unmarshaler { // Insert into map. m.SetMapIndex(k.Elem(), v.Elem()) - return r, nil + return r, nerr.E } } @@ -1824,15 +2020,16 @@ func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshal // Unmarshal data into holder. // We unmarshal into the first field of the holder object. var err error + var nerr nonFatal b, err = unmarshal(b, valToPointer(v).offset(field0), w) - if err != nil { + if !nerr.Merge(err) { return nil, err } // Write pointer to holder into target field. f.asPointerTo(ityp).Elem().Set(v) - return b, nil + return b, nerr.E } } @@ -1945,7 +2142,7 @@ func encodeVarint(b []byte, x uint64) []byte { // If there is an error, it returns 0,0. func decodeVarint(b []byte) (uint64, int) { var x, y uint64 - if len(b) <= 0 { + if len(b) == 0 { goto bad } x = uint64(b[0]) diff --git a/vendor/github.com/gogo/protobuf/proto/text.go b/vendor/github.com/gogo/protobuf/proto/text.go index 4f5706dc5f369..0407ba85d01cc 100644 --- a/vendor/github.com/gogo/protobuf/proto/text.go +++ b/vendor/github.com/gogo/protobuf/proto/text.go @@ -364,7 +364,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { return err } } - if err := tm.writeAny(w, key, props.mkeyprop); err != nil { + if err := tm.writeAny(w, key, props.MapKeyProp); err != nil { return err } if err := w.WriteByte('\n'); err != nil { @@ -381,7 +381,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { return err } } - if err := tm.writeAny(w, val, props.mvalprop); err != nil { + if err := tm.writeAny(w, val, props.MapValProp); err != nil { return err } if err := w.WriteByte('\n'); err != nil { diff --git a/vendor/github.com/gogo/protobuf/proto/text_parser.go b/vendor/github.com/gogo/protobuf/proto/text_parser.go index fbb000d3742ac..1ce0be2fa9bee 100644 --- a/vendor/github.com/gogo/protobuf/proto/text_parser.go +++ b/vendor/github.com/gogo/protobuf/proto/text_parser.go @@ -636,17 +636,17 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { if err := p.consumeToken(":"); err != nil { return err } - if err := p.readAny(key, props.mkeyprop); err != nil { + if err := p.readAny(key, props.MapKeyProp); err != nil { return err } if err := p.consumeOptionalSeparator(); err != nil { return err } case "value": - if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { + if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil { return err } - if err := p.readAny(val, props.mvalprop); err != nil { + if err := p.readAny(val, props.MapValProp); err != nil { return err } if err := p.consumeOptionalSeparator(); err != nil { @@ -923,6 +923,16 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error { fv.SetFloat(f) return nil } + case reflect.Int8: + if x, err := strconv.ParseInt(tok.value, 0, 8); err == nil { + fv.SetInt(x) + return nil + } + case reflect.Int16: + if x, err := strconv.ParseInt(tok.value, 0, 16); err == nil { + fv.SetInt(x) + return nil + } case reflect.Int32: if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { fv.SetInt(x) @@ -970,6 +980,16 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error { } // TODO: Handle nested messages which implement encoding.TextUnmarshaler. return p.readStruct(fv, terminator) + case reflect.Uint8: + if x, err := strconv.ParseUint(tok.value, 0, 8); err == nil { + fv.SetUint(x) + return nil + } + case reflect.Uint16: + if x, err := strconv.ParseUint(tok.value, 0, 16); err == nil { + fv.SetUint(x) + return nil + } case reflect.Uint32: if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { fv.SetUint(uint64(x)) diff --git a/vendor/github.com/gogo/protobuf/proto/wrappers.go b/vendor/github.com/gogo/protobuf/proto/wrappers.go new file mode 100644 index 0000000000000..b175d1b642346 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/wrappers.go @@ -0,0 +1,1888 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "io" + "reflect" +) + +func makeStdDoubleValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*float64) + v := &float64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdDoubleValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64) + v := &float64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdDoubleValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float64) + v := &float64Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float64) + v := &float64Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdDoubleValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdDoubleValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdDoubleValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdDoubleValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdDoubleValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdFloatValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*float32) + v := &float32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdFloatValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32) + v := &float32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdFloatValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float32) + v := &float32Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float32) + v := &float32Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdFloatValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdFloatValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdFloatValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdFloatValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdFloatValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*int64) + v := &int64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64) + v := &int64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int64) + v := &int64Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int64) + v := &int64Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*uint64) + v := &uint64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64) + v := &uint64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint64) + v := &uint64Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint64) + v := &uint64Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdUInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdUInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*int32) + v := &int32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32) + v := &int32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int32) + v := &int32Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int32) + v := &int32Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*uint32) + v := &uint32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32) + v := &uint32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint32) + v := &uint32Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint32) + v := &uint32Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdUInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdUInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBoolValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*bool) + v := &boolValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBoolValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool) + v := &boolValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBoolValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(bool) + v := &boolValue{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(bool) + v := &boolValue{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBoolValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBoolValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdBoolValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdBoolValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBoolValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdStringValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*string) + v := &stringValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdStringValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string) + v := &stringValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdStringValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(string) + v := &stringValue{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(string) + v := &stringValue{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdStringValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdStringValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdStringValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdStringValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdStringValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBytesValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*[]byte) + v := &bytesValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBytesValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte) + v := &bytesValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBytesValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().([]byte) + v := &bytesValue{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().([]byte) + v := &bytesValue{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBytesValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBytesValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdBytesValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdBytesValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBytesValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go b/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go new file mode 100644 index 0000000000000..c1cf7bf85e9ad --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go @@ -0,0 +1,113 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +type float64Value struct { + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *float64Value) Reset() { *m = float64Value{} } +func (*float64Value) ProtoMessage() {} +func (*float64Value) String() string { return "float64" } + +type float32Value struct { + Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *float32Value) Reset() { *m = float32Value{} } +func (*float32Value) ProtoMessage() {} +func (*float32Value) String() string { return "float32" } + +type int64Value struct { + Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *int64Value) Reset() { *m = int64Value{} } +func (*int64Value) ProtoMessage() {} +func (*int64Value) String() string { return "int64" } + +type uint64Value struct { + Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *uint64Value) Reset() { *m = uint64Value{} } +func (*uint64Value) ProtoMessage() {} +func (*uint64Value) String() string { return "uint64" } + +type int32Value struct { + Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *int32Value) Reset() { *m = int32Value{} } +func (*int32Value) ProtoMessage() {} +func (*int32Value) String() string { return "int32" } + +type uint32Value struct { + Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *uint32Value) Reset() { *m = uint32Value{} } +func (*uint32Value) ProtoMessage() {} +func (*uint32Value) String() string { return "uint32" } + +type boolValue struct { + Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *boolValue) Reset() { *m = boolValue{} } +func (*boolValue) ProtoMessage() {} +func (*boolValue) String() string { return "bool" } + +type stringValue struct { + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *stringValue) Reset() { *m = stringValue{} } +func (*stringValue) ProtoMessage() {} +func (*stringValue) String() string { return "string" } + +type bytesValue struct { + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *bytesValue) Reset() { *m = bytesValue{} } +func (*bytesValue) ProtoMessage() {} +func (*bytesValue) String() string { return "[]byte" } + +func init() { + RegisterType((*float64Value)(nil), "gogo.protobuf.proto.DoubleValue") + RegisterType((*float32Value)(nil), "gogo.protobuf.proto.FloatValue") + RegisterType((*int64Value)(nil), "gogo.protobuf.proto.Int64Value") + RegisterType((*uint64Value)(nil), "gogo.protobuf.proto.UInt64Value") + RegisterType((*int32Value)(nil), "gogo.protobuf.proto.Int32Value") + RegisterType((*uint32Value)(nil), "gogo.protobuf.proto.UInt32Value") + RegisterType((*boolValue)(nil), "gogo.protobuf.proto.BoolValue") + RegisterType((*stringValue)(nil), "gogo.protobuf.proto.StringValue") + RegisterType((*bytesValue)(nil), "gogo.protobuf.proto.BytesValue") +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go index 44f893b777c4b..cacfa3923514c 100644 --- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go @@ -3,9 +3,11 @@ package descriptor -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + math "math" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -72,6 +74,7 @@ var FieldDescriptorProto_Type_name = map[int32]string{ 17: "TYPE_SINT32", 18: "TYPE_SINT64", } + var FieldDescriptorProto_Type_value = map[string]int32{ "TYPE_DOUBLE": 1, "TYPE_FLOAT": 2, @@ -98,9 +101,11 @@ func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { *p = x return p } + func (x FieldDescriptorProto_Type) String() string { return proto.EnumName(FieldDescriptorProto_Type_name, int32(x)) } + func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type") if err != nil { @@ -109,8 +114,9 @@ func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { *x = FieldDescriptorProto_Type(value) return nil } + func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_descriptor_9588782fb9cbecd6, []int{4, 0} + return fileDescriptor_308767df5ffe18af, []int{4, 0} } type FieldDescriptorProto_Label int32 @@ -127,6 +133,7 @@ var FieldDescriptorProto_Label_name = map[int32]string{ 2: "LABEL_REQUIRED", 3: "LABEL_REPEATED", } + var FieldDescriptorProto_Label_value = map[string]int32{ "LABEL_OPTIONAL": 1, "LABEL_REQUIRED": 2, @@ -138,9 +145,11 @@ func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { *p = x return p } + func (x FieldDescriptorProto_Label) String() string { return proto.EnumName(FieldDescriptorProto_Label_name, int32(x)) } + func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label") if err != nil { @@ -149,8 +158,9 @@ func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { *x = FieldDescriptorProto_Label(value) return nil } + func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_descriptor_9588782fb9cbecd6, []int{4, 1} + return fileDescriptor_308767df5ffe18af, []int{4, 1} } // Generated classes can be optimized for speed or code size. @@ -168,6 +178,7 @@ var FileOptions_OptimizeMode_name = map[int32]string{ 2: "CODE_SIZE", 3: "LITE_RUNTIME", } + var FileOptions_OptimizeMode_value = map[string]int32{ "SPEED": 1, "CODE_SIZE": 2, @@ -179,9 +190,11 @@ func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { *p = x return p } + func (x FileOptions_OptimizeMode) String() string { return proto.EnumName(FileOptions_OptimizeMode_name, int32(x)) } + func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode") if err != nil { @@ -190,8 +203,9 @@ func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { *x = FileOptions_OptimizeMode(value) return nil } + func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_descriptor_9588782fb9cbecd6, []int{10, 0} + return fileDescriptor_308767df5ffe18af, []int{10, 0} } type FieldOptions_CType int32 @@ -208,6 +222,7 @@ var FieldOptions_CType_name = map[int32]string{ 1: "CORD", 2: "STRING_PIECE", } + var FieldOptions_CType_value = map[string]int32{ "STRING": 0, "CORD": 1, @@ -219,9 +234,11 @@ func (x FieldOptions_CType) Enum() *FieldOptions_CType { *p = x return p } + func (x FieldOptions_CType) String() string { return proto.EnumName(FieldOptions_CType_name, int32(x)) } + func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType") if err != nil { @@ -230,8 +247,9 @@ func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { *x = FieldOptions_CType(value) return nil } + func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_descriptor_9588782fb9cbecd6, []int{12, 0} + return fileDescriptor_308767df5ffe18af, []int{12, 0} } type FieldOptions_JSType int32 @@ -250,6 +268,7 @@ var FieldOptions_JSType_name = map[int32]string{ 1: "JS_STRING", 2: "JS_NUMBER", } + var FieldOptions_JSType_value = map[string]int32{ "JS_NORMAL": 0, "JS_STRING": 1, @@ -261,9 +280,11 @@ func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { *p = x return p } + func (x FieldOptions_JSType) String() string { return proto.EnumName(FieldOptions_JSType_name, int32(x)) } + func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType") if err != nil { @@ -272,8 +293,9 @@ func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { *x = FieldOptions_JSType(value) return nil } + func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_descriptor_9588782fb9cbecd6, []int{12, 1} + return fileDescriptor_308767df5ffe18af, []int{12, 1} } // Is this method side-effect-free (or safe in HTTP parlance), or idempotent, @@ -292,6 +314,7 @@ var MethodOptions_IdempotencyLevel_name = map[int32]string{ 1: "NO_SIDE_EFFECTS", 2: "IDEMPOTENT", } + var MethodOptions_IdempotencyLevel_value = map[string]int32{ "IDEMPOTENCY_UNKNOWN": 0, "NO_SIDE_EFFECTS": 1, @@ -303,9 +326,11 @@ func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { *p = x return p } + func (x MethodOptions_IdempotencyLevel) String() string { return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x)) } + func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel") if err != nil { @@ -314,8 +339,9 @@ func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { *x = MethodOptions_IdempotencyLevel(value) return nil } + func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_descriptor_9588782fb9cbecd6, []int{17, 0} + return fileDescriptor_308767df5ffe18af, []int{17, 0} } // The protocol compiler can output a FileDescriptorSet containing the .proto @@ -331,7 +357,7 @@ func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } func (*FileDescriptorSet) ProtoMessage() {} func (*FileDescriptorSet) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_9588782fb9cbecd6, []int{0} + return fileDescriptor_308767df5ffe18af, []int{0} } func (m *FileDescriptorSet) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_FileDescriptorSet.Unmarshal(m, b) @@ -339,8 +365,8 @@ func (m *FileDescriptorSet) XXX_Unmarshal(b []byte) error { func (m *FileDescriptorSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_FileDescriptorSet.Marshal(b, m, deterministic) } -func (dst *FileDescriptorSet) XXX_Merge(src proto.Message) { - xxx_messageInfo_FileDescriptorSet.Merge(dst, src) +func (m *FileDescriptorSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorSet.Merge(m, src) } func (m *FileDescriptorSet) XXX_Size() int { return xxx_messageInfo_FileDescriptorSet.Size(m) @@ -392,7 +418,7 @@ func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } func (*FileDescriptorProto) ProtoMessage() {} func (*FileDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_9588782fb9cbecd6, []int{1} + return fileDescriptor_308767df5ffe18af, []int{1} } func (m *FileDescriptorProto) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_FileDescriptorProto.Unmarshal(m, b) @@ -400,8 +426,8 @@ func (m *FileDescriptorProto) XXX_Unmarshal(b []byte) error { func (m *FileDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_FileDescriptorProto.Marshal(b, m, deterministic) } -func (dst *FileDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_FileDescriptorProto.Merge(dst, src) +func (m *FileDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorProto.Merge(m, src) } func (m *FileDescriptorProto) XXX_Size() int { return xxx_messageInfo_FileDescriptorProto.Size(m) @@ -519,7 +545,7 @@ func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } func (*DescriptorProto) ProtoMessage() {} func (*DescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_9588782fb9cbecd6, []int{2} + return fileDescriptor_308767df5ffe18af, []int{2} } func (m *DescriptorProto) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_DescriptorProto.Unmarshal(m, b) @@ -527,8 +553,8 @@ func (m *DescriptorProto) XXX_Unmarshal(b []byte) error { func (m *DescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_DescriptorProto.Marshal(b, m, deterministic) } -func (dst *DescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_DescriptorProto.Merge(dst, src) +func (m *DescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto.Merge(m, src) } func (m *DescriptorProto) XXX_Size() int { return xxx_messageInfo_DescriptorProto.Size(m) @@ -622,7 +648,7 @@ func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) } func (*DescriptorProto_ExtensionRange) ProtoMessage() {} func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_9588782fb9cbecd6, []int{2, 0} + return fileDescriptor_308767df5ffe18af, []int{2, 0} } func (m *DescriptorProto_ExtensionRange) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_DescriptorProto_ExtensionRange.Unmarshal(m, b) @@ -630,8 +656,8 @@ func (m *DescriptorProto_ExtensionRange) XXX_Unmarshal(b []byte) error { func (m *DescriptorProto_ExtensionRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_DescriptorProto_ExtensionRange.Marshal(b, m, deterministic) } -func (dst *DescriptorProto_ExtensionRange) XXX_Merge(src proto.Message) { - xxx_messageInfo_DescriptorProto_ExtensionRange.Merge(dst, src) +func (m *DescriptorProto_ExtensionRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ExtensionRange.Merge(m, src) } func (m *DescriptorProto_ExtensionRange) XXX_Size() int { return xxx_messageInfo_DescriptorProto_ExtensionRange.Size(m) @@ -678,7 +704,7 @@ func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_R func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) } func (*DescriptorProto_ReservedRange) ProtoMessage() {} func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_9588782fb9cbecd6, []int{2, 1} + return fileDescriptor_308767df5ffe18af, []int{2, 1} } func (m *DescriptorProto_ReservedRange) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_DescriptorProto_ReservedRange.Unmarshal(m, b) @@ -686,8 +712,8 @@ func (m *DescriptorProto_ReservedRange) XXX_Unmarshal(b []byte) error { func (m *DescriptorProto_ReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_DescriptorProto_ReservedRange.Marshal(b, m, deterministic) } -func (dst *DescriptorProto_ReservedRange) XXX_Merge(src proto.Message) { - xxx_messageInfo_DescriptorProto_ReservedRange.Merge(dst, src) +func (m *DescriptorProto_ReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ReservedRange.Merge(m, src) } func (m *DescriptorProto_ReservedRange) XXX_Size() int { return xxx_messageInfo_DescriptorProto_ReservedRange.Size(m) @@ -725,7 +751,7 @@ func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } func (*ExtensionRangeOptions) ProtoMessage() {} func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_9588782fb9cbecd6, []int{3} + return fileDescriptor_308767df5ffe18af, []int{3} } var extRange_ExtensionRangeOptions = []proto.ExtensionRange{ @@ -735,14 +761,15 @@ var extRange_ExtensionRangeOptions = []proto.ExtensionRange{ func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_ExtensionRangeOptions } + func (m *ExtensionRangeOptions) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ExtensionRangeOptions.Unmarshal(m, b) } func (m *ExtensionRangeOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ExtensionRangeOptions.Marshal(b, m, deterministic) } -func (dst *ExtensionRangeOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExtensionRangeOptions.Merge(dst, src) +func (m *ExtensionRangeOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtensionRangeOptions.Merge(m, src) } func (m *ExtensionRangeOptions) XXX_Size() int { return xxx_messageInfo_ExtensionRangeOptions.Size(m) @@ -801,7 +828,7 @@ func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } func (*FieldDescriptorProto) ProtoMessage() {} func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_9588782fb9cbecd6, []int{4} + return fileDescriptor_308767df5ffe18af, []int{4} } func (m *FieldDescriptorProto) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_FieldDescriptorProto.Unmarshal(m, b) @@ -809,8 +836,8 @@ func (m *FieldDescriptorProto) XXX_Unmarshal(b []byte) error { func (m *FieldDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_FieldDescriptorProto.Marshal(b, m, deterministic) } -func (dst *FieldDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_FieldDescriptorProto.Merge(dst, src) +func (m *FieldDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldDescriptorProto.Merge(m, src) } func (m *FieldDescriptorProto) XXX_Size() int { return xxx_messageInfo_FieldDescriptorProto.Size(m) @@ -904,7 +931,7 @@ func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } func (*OneofDescriptorProto) ProtoMessage() {} func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_9588782fb9cbecd6, []int{5} + return fileDescriptor_308767df5ffe18af, []int{5} } func (m *OneofDescriptorProto) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_OneofDescriptorProto.Unmarshal(m, b) @@ -912,8 +939,8 @@ func (m *OneofDescriptorProto) XXX_Unmarshal(b []byte) error { func (m *OneofDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_OneofDescriptorProto.Marshal(b, m, deterministic) } -func (dst *OneofDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_OneofDescriptorProto.Merge(dst, src) +func (m *OneofDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofDescriptorProto.Merge(m, src) } func (m *OneofDescriptorProto) XXX_Size() int { return xxx_messageInfo_OneofDescriptorProto.Size(m) @@ -959,7 +986,7 @@ func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } func (*EnumDescriptorProto) ProtoMessage() {} func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_9588782fb9cbecd6, []int{6} + return fileDescriptor_308767df5ffe18af, []int{6} } func (m *EnumDescriptorProto) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_EnumDescriptorProto.Unmarshal(m, b) @@ -967,8 +994,8 @@ func (m *EnumDescriptorProto) XXX_Unmarshal(b []byte) error { func (m *EnumDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_EnumDescriptorProto.Marshal(b, m, deterministic) } -func (dst *EnumDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_EnumDescriptorProto.Merge(dst, src) +func (m *EnumDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto.Merge(m, src) } func (m *EnumDescriptorProto) XXX_Size() int { return xxx_messageInfo_EnumDescriptorProto.Size(m) @@ -1032,7 +1059,7 @@ func (m *EnumDescriptorProto_EnumReservedRange) Reset() { *m = EnumDescr func (m *EnumDescriptorProto_EnumReservedRange) String() string { return proto.CompactTextString(m) } func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_9588782fb9cbecd6, []int{6, 0} + return fileDescriptor_308767df5ffe18af, []int{6, 0} } func (m *EnumDescriptorProto_EnumReservedRange) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Unmarshal(m, b) @@ -1040,8 +1067,8 @@ func (m *EnumDescriptorProto_EnumReservedRange) XXX_Unmarshal(b []byte) error { func (m *EnumDescriptorProto_EnumReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Marshal(b, m, deterministic) } -func (dst *EnumDescriptorProto_EnumReservedRange) XXX_Merge(src proto.Message) { - xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Merge(dst, src) +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Merge(m, src) } func (m *EnumDescriptorProto_EnumReservedRange) XXX_Size() int { return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Size(m) @@ -1080,7 +1107,7 @@ func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorPro func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } func (*EnumValueDescriptorProto) ProtoMessage() {} func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_9588782fb9cbecd6, []int{7} + return fileDescriptor_308767df5ffe18af, []int{7} } func (m *EnumValueDescriptorProto) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_EnumValueDescriptorProto.Unmarshal(m, b) @@ -1088,8 +1115,8 @@ func (m *EnumValueDescriptorProto) XXX_Unmarshal(b []byte) error { func (m *EnumValueDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_EnumValueDescriptorProto.Marshal(b, m, deterministic) } -func (dst *EnumValueDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_EnumValueDescriptorProto.Merge(dst, src) +func (m *EnumValueDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueDescriptorProto.Merge(m, src) } func (m *EnumValueDescriptorProto) XXX_Size() int { return xxx_messageInfo_EnumValueDescriptorProto.Size(m) @@ -1135,7 +1162,7 @@ func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } func (*ServiceDescriptorProto) ProtoMessage() {} func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_9588782fb9cbecd6, []int{8} + return fileDescriptor_308767df5ffe18af, []int{8} } func (m *ServiceDescriptorProto) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ServiceDescriptorProto.Unmarshal(m, b) @@ -1143,8 +1170,8 @@ func (m *ServiceDescriptorProto) XXX_Unmarshal(b []byte) error { func (m *ServiceDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ServiceDescriptorProto.Marshal(b, m, deterministic) } -func (dst *ServiceDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceDescriptorProto.Merge(dst, src) +func (m *ServiceDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceDescriptorProto.Merge(m, src) } func (m *ServiceDescriptorProto) XXX_Size() int { return xxx_messageInfo_ServiceDescriptorProto.Size(m) @@ -1197,7 +1224,7 @@ func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } func (*MethodDescriptorProto) ProtoMessage() {} func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_9588782fb9cbecd6, []int{9} + return fileDescriptor_308767df5ffe18af, []int{9} } func (m *MethodDescriptorProto) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_MethodDescriptorProto.Unmarshal(m, b) @@ -1205,8 +1232,8 @@ func (m *MethodDescriptorProto) XXX_Unmarshal(b []byte) error { func (m *MethodDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_MethodDescriptorProto.Marshal(b, m, deterministic) } -func (dst *MethodDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_MethodDescriptorProto.Merge(dst, src) +func (m *MethodDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodDescriptorProto.Merge(m, src) } func (m *MethodDescriptorProto) XXX_Size() int { return xxx_messageInfo_MethodDescriptorProto.Size(m) @@ -1336,6 +1363,14 @@ type FileOptions struct { // is empty. When this option is empty, the package name will be used for // determining the namespace. PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` + // Use this option to change the namespace of php generated metadata classes. + // Default is empty. When this option is empty, the proto file name will be used + // for determining the namespace. + PhpMetadataNamespace *string `protobuf:"bytes,44,opt,name=php_metadata_namespace,json=phpMetadataNamespace" json:"php_metadata_namespace,omitempty"` + // Use this option to change the package of ruby generated classes. Default + // is empty. When this option is not set, the package name will be used for + // determining the ruby package. + RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"` // The parser stores options it doesn't recognize here. // See the documentation for the "Options" section above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` @@ -1349,7 +1384,7 @@ func (m *FileOptions) Reset() { *m = FileOptions{} } func (m *FileOptions) String() string { return proto.CompactTextString(m) } func (*FileOptions) ProtoMessage() {} func (*FileOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_9588782fb9cbecd6, []int{10} + return fileDescriptor_308767df5ffe18af, []int{10} } var extRange_FileOptions = []proto.ExtensionRange{ @@ -1359,14 +1394,15 @@ var extRange_FileOptions = []proto.ExtensionRange{ func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_FileOptions } + func (m *FileOptions) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_FileOptions.Unmarshal(m, b) } func (m *FileOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_FileOptions.Marshal(b, m, deterministic) } -func (dst *FileOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_FileOptions.Merge(dst, src) +func (m *FileOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileOptions.Merge(m, src) } func (m *FileOptions) XXX_Size() int { return xxx_messageInfo_FileOptions.Size(m) @@ -1514,6 +1550,20 @@ func (m *FileOptions) GetPhpNamespace() string { return "" } +func (m *FileOptions) GetPhpMetadataNamespace() string { + if m != nil && m.PhpMetadataNamespace != nil { + return *m.PhpMetadataNamespace + } + return "" +} + +func (m *FileOptions) GetRubyPackage() string { + if m != nil && m.RubyPackage != nil { + return *m.RubyPackage + } + return "" +} + func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption { if m != nil { return m.UninterpretedOption @@ -1584,7 +1634,7 @@ func (m *MessageOptions) Reset() { *m = MessageOptions{} } func (m *MessageOptions) String() string { return proto.CompactTextString(m) } func (*MessageOptions) ProtoMessage() {} func (*MessageOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_9588782fb9cbecd6, []int{11} + return fileDescriptor_308767df5ffe18af, []int{11} } var extRange_MessageOptions = []proto.ExtensionRange{ @@ -1594,14 +1644,15 @@ var extRange_MessageOptions = []proto.ExtensionRange{ func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_MessageOptions } + func (m *MessageOptions) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_MessageOptions.Unmarshal(m, b) } func (m *MessageOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_MessageOptions.Marshal(b, m, deterministic) } -func (dst *MessageOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_MessageOptions.Merge(dst, src) +func (m *MessageOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MessageOptions.Merge(m, src) } func (m *MessageOptions) XXX_Size() int { return xxx_messageInfo_MessageOptions.Size(m) @@ -1723,7 +1774,7 @@ func (m *FieldOptions) Reset() { *m = FieldOptions{} } func (m *FieldOptions) String() string { return proto.CompactTextString(m) } func (*FieldOptions) ProtoMessage() {} func (*FieldOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_9588782fb9cbecd6, []int{12} + return fileDescriptor_308767df5ffe18af, []int{12} } var extRange_FieldOptions = []proto.ExtensionRange{ @@ -1733,14 +1784,15 @@ var extRange_FieldOptions = []proto.ExtensionRange{ func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_FieldOptions } + func (m *FieldOptions) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_FieldOptions.Unmarshal(m, b) } func (m *FieldOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_FieldOptions.Marshal(b, m, deterministic) } -func (dst *FieldOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_FieldOptions.Merge(dst, src) +func (m *FieldOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldOptions.Merge(m, src) } func (m *FieldOptions) XXX_Size() int { return xxx_messageInfo_FieldOptions.Size(m) @@ -1819,7 +1871,7 @@ func (m *OneofOptions) Reset() { *m = OneofOptions{} } func (m *OneofOptions) String() string { return proto.CompactTextString(m) } func (*OneofOptions) ProtoMessage() {} func (*OneofOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_9588782fb9cbecd6, []int{13} + return fileDescriptor_308767df5ffe18af, []int{13} } var extRange_OneofOptions = []proto.ExtensionRange{ @@ -1829,14 +1881,15 @@ var extRange_OneofOptions = []proto.ExtensionRange{ func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_OneofOptions } + func (m *OneofOptions) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_OneofOptions.Unmarshal(m, b) } func (m *OneofOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_OneofOptions.Marshal(b, m, deterministic) } -func (dst *OneofOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_OneofOptions.Merge(dst, src) +func (m *OneofOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofOptions.Merge(m, src) } func (m *OneofOptions) XXX_Size() int { return xxx_messageInfo_OneofOptions.Size(m) @@ -1875,7 +1928,7 @@ func (m *EnumOptions) Reset() { *m = EnumOptions{} } func (m *EnumOptions) String() string { return proto.CompactTextString(m) } func (*EnumOptions) ProtoMessage() {} func (*EnumOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_9588782fb9cbecd6, []int{14} + return fileDescriptor_308767df5ffe18af, []int{14} } var extRange_EnumOptions = []proto.ExtensionRange{ @@ -1885,14 +1938,15 @@ var extRange_EnumOptions = []proto.ExtensionRange{ func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_EnumOptions } + func (m *EnumOptions) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_EnumOptions.Unmarshal(m, b) } func (m *EnumOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_EnumOptions.Marshal(b, m, deterministic) } -func (dst *EnumOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_EnumOptions.Merge(dst, src) +func (m *EnumOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumOptions.Merge(m, src) } func (m *EnumOptions) XXX_Size() int { return xxx_messageInfo_EnumOptions.Size(m) @@ -1944,7 +1998,7 @@ func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } func (*EnumValueOptions) ProtoMessage() {} func (*EnumValueOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_9588782fb9cbecd6, []int{15} + return fileDescriptor_308767df5ffe18af, []int{15} } var extRange_EnumValueOptions = []proto.ExtensionRange{ @@ -1954,14 +2008,15 @@ var extRange_EnumValueOptions = []proto.ExtensionRange{ func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_EnumValueOptions } + func (m *EnumValueOptions) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_EnumValueOptions.Unmarshal(m, b) } func (m *EnumValueOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_EnumValueOptions.Marshal(b, m, deterministic) } -func (dst *EnumValueOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_EnumValueOptions.Merge(dst, src) +func (m *EnumValueOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueOptions.Merge(m, src) } func (m *EnumValueOptions) XXX_Size() int { return xxx_messageInfo_EnumValueOptions.Size(m) @@ -2006,7 +2061,7 @@ func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } func (*ServiceOptions) ProtoMessage() {} func (*ServiceOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_9588782fb9cbecd6, []int{16} + return fileDescriptor_308767df5ffe18af, []int{16} } var extRange_ServiceOptions = []proto.ExtensionRange{ @@ -2016,14 +2071,15 @@ var extRange_ServiceOptions = []proto.ExtensionRange{ func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_ServiceOptions } + func (m *ServiceOptions) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ServiceOptions.Unmarshal(m, b) } func (m *ServiceOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ServiceOptions.Marshal(b, m, deterministic) } -func (dst *ServiceOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceOptions.Merge(dst, src) +func (m *ServiceOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceOptions.Merge(m, src) } func (m *ServiceOptions) XXX_Size() int { return xxx_messageInfo_ServiceOptions.Size(m) @@ -2069,7 +2125,7 @@ func (m *MethodOptions) Reset() { *m = MethodOptions{} } func (m *MethodOptions) String() string { return proto.CompactTextString(m) } func (*MethodOptions) ProtoMessage() {} func (*MethodOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_9588782fb9cbecd6, []int{17} + return fileDescriptor_308767df5ffe18af, []int{17} } var extRange_MethodOptions = []proto.ExtensionRange{ @@ -2079,14 +2135,15 @@ var extRange_MethodOptions = []proto.ExtensionRange{ func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_MethodOptions } + func (m *MethodOptions) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_MethodOptions.Unmarshal(m, b) } func (m *MethodOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_MethodOptions.Marshal(b, m, deterministic) } -func (dst *MethodOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_MethodOptions.Merge(dst, src) +func (m *MethodOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodOptions.Merge(m, src) } func (m *MethodOptions) XXX_Size() int { return xxx_messageInfo_MethodOptions.Size(m) @@ -2146,7 +2203,7 @@ func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } func (*UninterpretedOption) ProtoMessage() {} func (*UninterpretedOption) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_9588782fb9cbecd6, []int{18} + return fileDescriptor_308767df5ffe18af, []int{18} } func (m *UninterpretedOption) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_UninterpretedOption.Unmarshal(m, b) @@ -2154,8 +2211,8 @@ func (m *UninterpretedOption) XXX_Unmarshal(b []byte) error { func (m *UninterpretedOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_UninterpretedOption.Marshal(b, m, deterministic) } -func (dst *UninterpretedOption) XXX_Merge(src proto.Message) { - xxx_messageInfo_UninterpretedOption.Merge(dst, src) +func (m *UninterpretedOption) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption.Merge(m, src) } func (m *UninterpretedOption) XXX_Size() int { return xxx_messageInfo_UninterpretedOption.Size(m) @@ -2232,7 +2289,7 @@ func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOptio func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } func (*UninterpretedOption_NamePart) ProtoMessage() {} func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_9588782fb9cbecd6, []int{18, 0} + return fileDescriptor_308767df5ffe18af, []int{18, 0} } func (m *UninterpretedOption_NamePart) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_UninterpretedOption_NamePart.Unmarshal(m, b) @@ -2240,8 +2297,8 @@ func (m *UninterpretedOption_NamePart) XXX_Unmarshal(b []byte) error { func (m *UninterpretedOption_NamePart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_UninterpretedOption_NamePart.Marshal(b, m, deterministic) } -func (dst *UninterpretedOption_NamePart) XXX_Merge(src proto.Message) { - xxx_messageInfo_UninterpretedOption_NamePart.Merge(dst, src) +func (m *UninterpretedOption_NamePart) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption_NamePart.Merge(m, src) } func (m *UninterpretedOption_NamePart) XXX_Size() int { return xxx_messageInfo_UninterpretedOption_NamePart.Size(m) @@ -2322,7 +2379,7 @@ func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } func (*SourceCodeInfo) ProtoMessage() {} func (*SourceCodeInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_9588782fb9cbecd6, []int{19} + return fileDescriptor_308767df5ffe18af, []int{19} } func (m *SourceCodeInfo) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SourceCodeInfo.Unmarshal(m, b) @@ -2330,8 +2387,8 @@ func (m *SourceCodeInfo) XXX_Unmarshal(b []byte) error { func (m *SourceCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_SourceCodeInfo.Marshal(b, m, deterministic) } -func (dst *SourceCodeInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_SourceCodeInfo.Merge(dst, src) +func (m *SourceCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo.Merge(m, src) } func (m *SourceCodeInfo) XXX_Size() int { return xxx_messageInfo_SourceCodeInfo.Size(m) @@ -2439,7 +2496,7 @@ func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } func (*SourceCodeInfo_Location) ProtoMessage() {} func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_9588782fb9cbecd6, []int{19, 0} + return fileDescriptor_308767df5ffe18af, []int{19, 0} } func (m *SourceCodeInfo_Location) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SourceCodeInfo_Location.Unmarshal(m, b) @@ -2447,8 +2504,8 @@ func (m *SourceCodeInfo_Location) XXX_Unmarshal(b []byte) error { func (m *SourceCodeInfo_Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_SourceCodeInfo_Location.Marshal(b, m, deterministic) } -func (dst *SourceCodeInfo_Location) XXX_Merge(src proto.Message) { - xxx_messageInfo_SourceCodeInfo_Location.Merge(dst, src) +func (m *SourceCodeInfo_Location) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo_Location.Merge(m, src) } func (m *SourceCodeInfo_Location) XXX_Size() int { return xxx_messageInfo_SourceCodeInfo_Location.Size(m) @@ -2510,7 +2567,7 @@ func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } func (*GeneratedCodeInfo) ProtoMessage() {} func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_9588782fb9cbecd6, []int{20} + return fileDescriptor_308767df5ffe18af, []int{20} } func (m *GeneratedCodeInfo) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GeneratedCodeInfo.Unmarshal(m, b) @@ -2518,8 +2575,8 @@ func (m *GeneratedCodeInfo) XXX_Unmarshal(b []byte) error { func (m *GeneratedCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GeneratedCodeInfo.Marshal(b, m, deterministic) } -func (dst *GeneratedCodeInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_GeneratedCodeInfo.Merge(dst, src) +func (m *GeneratedCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo.Merge(m, src) } func (m *GeneratedCodeInfo) XXX_Size() int { return xxx_messageInfo_GeneratedCodeInfo.Size(m) @@ -2559,7 +2616,7 @@ func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_ func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { - return fileDescriptor_descriptor_9588782fb9cbecd6, []int{20, 0} + return fileDescriptor_308767df5ffe18af, []int{20, 0} } func (m *GeneratedCodeInfo_Annotation) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GeneratedCodeInfo_Annotation.Unmarshal(m, b) @@ -2567,8 +2624,8 @@ func (m *GeneratedCodeInfo_Annotation) XXX_Unmarshal(b []byte) error { func (m *GeneratedCodeInfo_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GeneratedCodeInfo_Annotation.Marshal(b, m, deterministic) } -func (dst *GeneratedCodeInfo_Annotation) XXX_Merge(src proto.Message) { - xxx_messageInfo_GeneratedCodeInfo_Annotation.Merge(dst, src) +func (m *GeneratedCodeInfo_Annotation) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo_Annotation.Merge(m, src) } func (m *GeneratedCodeInfo_Annotation) XXX_Size() int { return xxx_messageInfo_GeneratedCodeInfo_Annotation.Size(m) @@ -2608,6 +2665,12 @@ func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 { } func init() { + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value) + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value) + proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value) + proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value) + proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value) + proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value) proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet") proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto") proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto") @@ -2635,172 +2698,168 @@ func init() { proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location") proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo") proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation") - proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value) - proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value) - proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value) - proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value) - proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value) - proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value) } -func init() { proto.RegisterFile("descriptor.proto", fileDescriptor_descriptor_9588782fb9cbecd6) } +func init() { proto.RegisterFile("descriptor.proto", fileDescriptor_308767df5ffe18af) } -var fileDescriptor_descriptor_9588782fb9cbecd6 = []byte{ - // 2487 bytes of a gzipped FileDescriptorProto +var fileDescriptor_308767df5ffe18af = []byte{ + // 2522 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xcd, 0x6f, 0xdb, 0xc8, - 0x15, 0x5f, 0x7d, 0x5a, 0x7a, 0x92, 0xe5, 0xf1, 0xd8, 0x9b, 0x30, 0xde, 0x8f, 0x38, 0xda, 0x8f, - 0x38, 0x49, 0xab, 0x2c, 0x9c, 0xc4, 0xc9, 0x3a, 0xc5, 0xb6, 0xb2, 0xc4, 0x78, 0x95, 0xca, 0x92, + 0x15, 0x5f, 0x7d, 0x5a, 0x7a, 0x92, 0x65, 0x7a, 0xec, 0x75, 0x18, 0xef, 0x47, 0x1c, 0xed, 0x66, + 0xe3, 0x24, 0xbb, 0xca, 0xc2, 0x49, 0x9c, 0xac, 0x53, 0x6c, 0x2b, 0x4b, 0x8c, 0x57, 0xa9, 0xbe, 0x4a, 0xc9, 0xdd, 0x64, 0x8b, 0x82, 0x18, 0x93, 0x23, 0x89, 0x09, 0x45, 0x72, 0x49, 0x2a, 0x89, - 0x83, 0x1e, 0x02, 0xf4, 0xd4, 0xff, 0xa0, 0x28, 0x8a, 0x1e, 0x7a, 0x59, 0xa0, 0xd7, 0x02, 0x05, - 0xda, 0x7b, 0xaf, 0x05, 0x7a, 0xef, 0xa1, 0x40, 0x0b, 0xb4, 0x7f, 0x42, 0x8f, 0xc5, 0xcc, 0x90, - 0x14, 0xf5, 0x95, 0x78, 0x17, 0x48, 0xf6, 0x64, 0xcf, 0xef, 0xfd, 0xde, 0xe3, 0x9b, 0x37, 0x6f, - 0xde, 0xbc, 0x19, 0x01, 0xd2, 0xa9, 0xa7, 0xb9, 0x86, 0xe3, 0xdb, 0x6e, 0xc5, 0x71, 0x6d, 0xdf, - 0xc6, 0x6b, 0x03, 0xdb, 0x1e, 0x98, 0x54, 0x8c, 0x4e, 0xc6, 0xfd, 0xf2, 0x11, 0xac, 0xdf, 0x33, - 0x4c, 0x5a, 0x8f, 0x88, 0x5d, 0xea, 0xe3, 0x3b, 0x90, 0xee, 0x1b, 0x26, 0x95, 0x12, 0xdb, 0xa9, - 0x9d, 0xc2, 0xee, 0x87, 0x95, 0x19, 0xa5, 0xca, 0xb4, 0x46, 0x87, 0xc1, 0x0a, 0xd7, 0x28, 0xff, - 0x3b, 0x0d, 0x1b, 0x0b, 0xa4, 0x18, 0x43, 0xda, 0x22, 0x23, 0x66, 0x31, 0xb1, 0x93, 0x57, 0xf8, - 0xff, 0x58, 0x82, 0x15, 0x87, 0x68, 0x8f, 0xc9, 0x80, 0x4a, 0x49, 0x0e, 0x87, 0x43, 0xfc, 0x3e, - 0x80, 0x4e, 0x1d, 0x6a, 0xe9, 0xd4, 0xd2, 0x4e, 0xa5, 0xd4, 0x76, 0x6a, 0x27, 0xaf, 0xc4, 0x10, - 0x7c, 0x0d, 0xd6, 0x9d, 0xf1, 0x89, 0x69, 0x68, 0x6a, 0x8c, 0x06, 0xdb, 0xa9, 0x9d, 0x8c, 0x82, - 0x84, 0xa0, 0x3e, 0x21, 0x5f, 0x86, 0xb5, 0xa7, 0x94, 0x3c, 0x8e, 0x53, 0x0b, 0x9c, 0x5a, 0x62, - 0x70, 0x8c, 0x58, 0x83, 0xe2, 0x88, 0x7a, 0x1e, 0x19, 0x50, 0xd5, 0x3f, 0x75, 0xa8, 0x94, 0xe6, - 0xb3, 0xdf, 0x9e, 0x9b, 0xfd, 0xec, 0xcc, 0x0b, 0x81, 0x56, 0xef, 0xd4, 0xa1, 0xb8, 0x0a, 0x79, - 0x6a, 0x8d, 0x47, 0xc2, 0x42, 0x66, 0x49, 0xfc, 0x64, 0x6b, 0x3c, 0x9a, 0xb5, 0x92, 0x63, 0x6a, - 0x81, 0x89, 0x15, 0x8f, 0xba, 0x4f, 0x0c, 0x8d, 0x4a, 0x59, 0x6e, 0xe0, 0xf2, 0x9c, 0x81, 0xae, - 0x90, 0xcf, 0xda, 0x08, 0xf5, 0x70, 0x0d, 0xf2, 0xf4, 0x99, 0x4f, 0x2d, 0xcf, 0xb0, 0x2d, 0x69, - 0x85, 0x1b, 0xf9, 0x68, 0xc1, 0x2a, 0x52, 0x53, 0x9f, 0x35, 0x31, 0xd1, 0xc3, 0x7b, 0xb0, 0x62, - 0x3b, 0xbe, 0x61, 0x5b, 0x9e, 0x94, 0xdb, 0x4e, 0xec, 0x14, 0x76, 0xdf, 0x5d, 0x98, 0x08, 0x6d, - 0xc1, 0x51, 0x42, 0x32, 0x6e, 0x00, 0xf2, 0xec, 0xb1, 0xab, 0x51, 0x55, 0xb3, 0x75, 0xaa, 0x1a, - 0x56, 0xdf, 0x96, 0xf2, 0xdc, 0xc0, 0xc5, 0xf9, 0x89, 0x70, 0x62, 0xcd, 0xd6, 0x69, 0xc3, 0xea, - 0xdb, 0x4a, 0xc9, 0x9b, 0x1a, 0xe3, 0x73, 0x90, 0xf5, 0x4e, 0x2d, 0x9f, 0x3c, 0x93, 0x8a, 0x3c, - 0x43, 0x82, 0x51, 0xf9, 0xcf, 0x59, 0x58, 0x3b, 0x4b, 0x8a, 0xdd, 0x85, 0x4c, 0x9f, 0xcd, 0x52, - 0x4a, 0x7e, 0x93, 0x18, 0x08, 0x9d, 0xe9, 0x20, 0x66, 0xbf, 0x65, 0x10, 0xab, 0x50, 0xb0, 0xa8, - 0xe7, 0x53, 0x5d, 0x64, 0x44, 0xea, 0x8c, 0x39, 0x05, 0x42, 0x69, 0x3e, 0xa5, 0xd2, 0xdf, 0x2a, - 0xa5, 0x1e, 0xc0, 0x5a, 0xe4, 0x92, 0xea, 0x12, 0x6b, 0x10, 0xe6, 0xe6, 0xf5, 0x57, 0x79, 0x52, - 0x91, 0x43, 0x3d, 0x85, 0xa9, 0x29, 0x25, 0x3a, 0x35, 0xc6, 0x75, 0x00, 0xdb, 0xa2, 0x76, 0x5f, - 0xd5, 0xa9, 0x66, 0x4a, 0xb9, 0x25, 0x51, 0x6a, 0x33, 0xca, 0x5c, 0x94, 0x6c, 0x81, 0x6a, 0x26, - 0xfe, 0x74, 0x92, 0x6a, 0x2b, 0x4b, 0x32, 0xe5, 0x48, 0x6c, 0xb2, 0xb9, 0x6c, 0x3b, 0x86, 0x92, - 0x4b, 0x59, 0xde, 0x53, 0x3d, 0x98, 0x59, 0x9e, 0x3b, 0x51, 0x79, 0xe5, 0xcc, 0x94, 0x40, 0x4d, - 0x4c, 0x6c, 0xd5, 0x8d, 0x0f, 0xf1, 0x07, 0x10, 0x01, 0x2a, 0x4f, 0x2b, 0xe0, 0x55, 0xa8, 0x18, - 0x82, 0x2d, 0x32, 0xa2, 0x5b, 0xcf, 0xa1, 0x34, 0x1d, 0x1e, 0xbc, 0x09, 0x19, 0xcf, 0x27, 0xae, - 0xcf, 0xb3, 0x30, 0xa3, 0x88, 0x01, 0x46, 0x90, 0xa2, 0x96, 0xce, 0xab, 0x5c, 0x46, 0x61, 0xff, - 0xe2, 0x1f, 0x4d, 0x26, 0x9c, 0xe2, 0x13, 0xfe, 0x78, 0x7e, 0x45, 0xa7, 0x2c, 0xcf, 0xce, 0x7b, - 0xeb, 0x36, 0xac, 0x4e, 0x4d, 0xe0, 0xac, 0x9f, 0x2e, 0xff, 0x02, 0xde, 0x5e, 0x68, 0x1a, 0x3f, - 0x80, 0xcd, 0xb1, 0x65, 0x58, 0x3e, 0x75, 0x1d, 0x97, 0xb2, 0x8c, 0x15, 0x9f, 0x92, 0xfe, 0xb3, - 0xb2, 0x24, 0xe7, 0x8e, 0xe3, 0x6c, 0x61, 0x45, 0xd9, 0x18, 0xcf, 0x83, 0x57, 0xf3, 0xb9, 0xff, - 0xae, 0xa0, 0x17, 0x2f, 0x5e, 0xbc, 0x48, 0x96, 0x7f, 0x9d, 0x85, 0xcd, 0x45, 0x7b, 0x66, 0xe1, - 0xf6, 0x3d, 0x07, 0x59, 0x6b, 0x3c, 0x3a, 0xa1, 0x2e, 0x0f, 0x52, 0x46, 0x09, 0x46, 0xb8, 0x0a, - 0x19, 0x93, 0x9c, 0x50, 0x53, 0x4a, 0x6f, 0x27, 0x76, 0x4a, 0xbb, 0xd7, 0xce, 0xb4, 0x2b, 0x2b, - 0x4d, 0xa6, 0xa2, 0x08, 0x4d, 0xfc, 0x19, 0xa4, 0x83, 0x12, 0xcd, 0x2c, 0x5c, 0x3d, 0x9b, 0x05, - 0xb6, 0x97, 0x14, 0xae, 0x87, 0xdf, 0x81, 0x3c, 0xfb, 0x2b, 0x72, 0x23, 0xcb, 0x7d, 0xce, 0x31, - 0x80, 0xe5, 0x05, 0xde, 0x82, 0x1c, 0xdf, 0x26, 0x3a, 0x0d, 0x8f, 0xb6, 0x68, 0xcc, 0x12, 0x4b, - 0xa7, 0x7d, 0x32, 0x36, 0x7d, 0xf5, 0x09, 0x31, 0xc7, 0x94, 0x27, 0x7c, 0x5e, 0x29, 0x06, 0xe0, - 0x4f, 0x19, 0x86, 0x2f, 0x42, 0x41, 0xec, 0x2a, 0xc3, 0xd2, 0xe9, 0x33, 0x5e, 0x3d, 0x33, 0x8a, - 0xd8, 0x68, 0x0d, 0x86, 0xb0, 0xcf, 0x3f, 0xf2, 0x6c, 0x2b, 0x4c, 0x4d, 0xfe, 0x09, 0x06, 0xf0, - 0xcf, 0xdf, 0x9e, 0x2d, 0xdc, 0xef, 0x2d, 0x9e, 0xde, 0x6c, 0x4e, 0x95, 0xff, 0x94, 0x84, 0x34, - 0xaf, 0x17, 0x6b, 0x50, 0xe8, 0x3d, 0xec, 0xc8, 0x6a, 0xbd, 0x7d, 0x7c, 0xd0, 0x94, 0x51, 0x02, - 0x97, 0x00, 0x38, 0x70, 0xaf, 0xd9, 0xae, 0xf6, 0x50, 0x32, 0x1a, 0x37, 0x5a, 0xbd, 0xbd, 0x9b, - 0x28, 0x15, 0x29, 0x1c, 0x0b, 0x20, 0x1d, 0x27, 0xdc, 0xd8, 0x45, 0x19, 0x8c, 0xa0, 0x28, 0x0c, - 0x34, 0x1e, 0xc8, 0xf5, 0xbd, 0x9b, 0x28, 0x3b, 0x8d, 0xdc, 0xd8, 0x45, 0x2b, 0x78, 0x15, 0xf2, - 0x1c, 0x39, 0x68, 0xb7, 0x9b, 0x28, 0x17, 0xd9, 0xec, 0xf6, 0x94, 0x46, 0xeb, 0x10, 0xe5, 0x23, - 0x9b, 0x87, 0x4a, 0xfb, 0xb8, 0x83, 0x20, 0xb2, 0x70, 0x24, 0x77, 0xbb, 0xd5, 0x43, 0x19, 0x15, - 0x22, 0xc6, 0xc1, 0xc3, 0x9e, 0xdc, 0x45, 0xc5, 0x29, 0xb7, 0x6e, 0xec, 0xa2, 0xd5, 0xe8, 0x13, - 0x72, 0xeb, 0xf8, 0x08, 0x95, 0xf0, 0x3a, 0xac, 0x8a, 0x4f, 0x84, 0x4e, 0xac, 0xcd, 0x40, 0x7b, - 0x37, 0x11, 0x9a, 0x38, 0x22, 0xac, 0xac, 0x4f, 0x01, 0x7b, 0x37, 0x11, 0x2e, 0xd7, 0x20, 0xc3, - 0xb3, 0x0b, 0x63, 0x28, 0x35, 0xab, 0x07, 0x72, 0x53, 0x6d, 0x77, 0x7a, 0x8d, 0x76, 0xab, 0xda, - 0x44, 0x89, 0x09, 0xa6, 0xc8, 0x3f, 0x39, 0x6e, 0x28, 0x72, 0x1d, 0x25, 0xe3, 0x58, 0x47, 0xae, - 0xf6, 0xe4, 0x3a, 0x4a, 0x95, 0x35, 0xd8, 0x5c, 0x54, 0x27, 0x17, 0xee, 0x8c, 0xd8, 0x12, 0x27, - 0x97, 0x2c, 0x31, 0xb7, 0x35, 0xb7, 0xc4, 0xff, 0x4a, 0xc2, 0xc6, 0x82, 0xb3, 0x62, 0xe1, 0x47, - 0x7e, 0x08, 0x19, 0x91, 0xa2, 0xe2, 0xf4, 0xbc, 0xb2, 0xf0, 0xd0, 0xe1, 0x09, 0x3b, 0x77, 0x82, - 0x72, 0xbd, 0x78, 0x07, 0x91, 0x5a, 0xd2, 0x41, 0x30, 0x13, 0x73, 0x35, 0xfd, 0xe7, 0x73, 0x35, - 0x5d, 0x1c, 0x7b, 0x7b, 0x67, 0x39, 0xf6, 0x38, 0xf6, 0xcd, 0x6a, 0x7b, 0x66, 0x41, 0x6d, 0xbf, - 0x0b, 0xeb, 0x73, 0x86, 0xce, 0x5c, 0x63, 0x7f, 0x99, 0x00, 0x69, 0x59, 0x70, 0x5e, 0x51, 0xe9, - 0x92, 0x53, 0x95, 0xee, 0xee, 0x6c, 0x04, 0x2f, 0x2d, 0x5f, 0x84, 0xb9, 0xb5, 0xfe, 0x3a, 0x01, - 0xe7, 0x16, 0x77, 0x8a, 0x0b, 0x7d, 0xf8, 0x0c, 0xb2, 0x23, 0xea, 0x0f, 0xed, 0xb0, 0x5b, 0xfa, - 0x78, 0xc1, 0x19, 0xcc, 0xc4, 0xb3, 0x8b, 0x1d, 0x68, 0xc5, 0x0f, 0xf1, 0xd4, 0xb2, 0x76, 0x4f, - 0x78, 0x33, 0xe7, 0xe9, 0xaf, 0x92, 0xf0, 0xf6, 0x42, 0xe3, 0x0b, 0x1d, 0x7d, 0x0f, 0xc0, 0xb0, - 0x9c, 0xb1, 0x2f, 0x3a, 0x22, 0x51, 0x60, 0xf3, 0x1c, 0xe1, 0xc5, 0x8b, 0x15, 0xcf, 0xb1, 0x1f, - 0xc9, 0x53, 0x5c, 0x0e, 0x02, 0xe2, 0x84, 0x3b, 0x13, 0x47, 0xd3, 0xdc, 0xd1, 0xf7, 0x97, 0xcc, - 0x74, 0x2e, 0x31, 0x3f, 0x01, 0xa4, 0x99, 0x06, 0xb5, 0x7c, 0xd5, 0xf3, 0x5d, 0x4a, 0x46, 0x86, - 0x35, 0xe0, 0x27, 0x48, 0x6e, 0x3f, 0xd3, 0x27, 0xa6, 0x47, 0x95, 0x35, 0x21, 0xee, 0x86, 0x52, - 0xa6, 0xc1, 0x13, 0xc8, 0x8d, 0x69, 0x64, 0xa7, 0x34, 0x84, 0x38, 0xd2, 0x28, 0xff, 0x31, 0x07, - 0x85, 0x58, 0x5f, 0x8d, 0x2f, 0x41, 0xf1, 0x11, 0x79, 0x42, 0xd4, 0xf0, 0xae, 0x24, 0x22, 0x51, - 0x60, 0x58, 0x27, 0xb8, 0x2f, 0x7d, 0x02, 0x9b, 0x9c, 0x62, 0x8f, 0x7d, 0xea, 0xaa, 0x9a, 0x49, - 0x3c, 0x8f, 0x07, 0x2d, 0xc7, 0xa9, 0x98, 0xc9, 0xda, 0x4c, 0x54, 0x0b, 0x25, 0xf8, 0x16, 0x6c, - 0x70, 0x8d, 0xd1, 0xd8, 0xf4, 0x0d, 0xc7, 0xa4, 0x2a, 0xbb, 0xbd, 0x79, 0xfc, 0x24, 0x89, 0x3c, - 0x5b, 0x67, 0x8c, 0xa3, 0x80, 0xc0, 0x3c, 0xf2, 0x70, 0x1d, 0xde, 0xe3, 0x6a, 0x03, 0x6a, 0x51, - 0x97, 0xf8, 0x54, 0xa5, 0x5f, 0x8d, 0x89, 0xe9, 0xa9, 0xc4, 0xd2, 0xd5, 0x21, 0xf1, 0x86, 0xd2, - 0x26, 0x33, 0x70, 0x90, 0x94, 0x12, 0xca, 0x05, 0x46, 0x3c, 0x0c, 0x78, 0x32, 0xa7, 0x55, 0x2d, - 0xfd, 0x73, 0xe2, 0x0d, 0xf1, 0x3e, 0x9c, 0xe3, 0x56, 0x3c, 0xdf, 0x35, 0xac, 0x81, 0xaa, 0x0d, - 0xa9, 0xf6, 0x58, 0x1d, 0xfb, 0xfd, 0x3b, 0xd2, 0x3b, 0xf1, 0xef, 0x73, 0x0f, 0xbb, 0x9c, 0x53, - 0x63, 0x94, 0x63, 0xbf, 0x7f, 0x07, 0x77, 0xa1, 0xc8, 0x16, 0x63, 0x64, 0x3c, 0xa7, 0x6a, 0xdf, - 0x76, 0xf9, 0xd1, 0x58, 0x5a, 0x50, 0x9a, 0x62, 0x11, 0xac, 0xb4, 0x03, 0x85, 0x23, 0x5b, 0xa7, - 0xfb, 0x99, 0x6e, 0x47, 0x96, 0xeb, 0x4a, 0x21, 0xb4, 0x72, 0xcf, 0x76, 0x59, 0x42, 0x0d, 0xec, - 0x28, 0xc0, 0x05, 0x91, 0x50, 0x03, 0x3b, 0x0c, 0xef, 0x2d, 0xd8, 0xd0, 0x34, 0x31, 0x67, 0x43, - 0x53, 0x83, 0x3b, 0x96, 0x27, 0xa1, 0xa9, 0x60, 0x69, 0xda, 0xa1, 0x20, 0x04, 0x39, 0xee, 0xe1, - 0x4f, 0xe1, 0xed, 0x49, 0xb0, 0xe2, 0x8a, 0xeb, 0x73, 0xb3, 0x9c, 0x55, 0xbd, 0x05, 0x1b, 0xce, - 0xe9, 0xbc, 0x22, 0x9e, 0xfa, 0xa2, 0x73, 0x3a, 0xab, 0x76, 0x1b, 0x36, 0x9d, 0xa1, 0x33, 0xaf, - 0x77, 0x35, 0xae, 0x87, 0x9d, 0xa1, 0x33, 0xab, 0xf8, 0x11, 0xbf, 0x70, 0xbb, 0x54, 0x23, 0x3e, - 0xd5, 0xa5, 0xf3, 0x71, 0x7a, 0x4c, 0x80, 0xaf, 0x03, 0xd2, 0x34, 0x95, 0x5a, 0xe4, 0xc4, 0xa4, - 0x2a, 0x71, 0xa9, 0x45, 0x3c, 0xe9, 0x62, 0x9c, 0x5c, 0xd2, 0x34, 0x99, 0x4b, 0xab, 0x5c, 0x88, - 0xaf, 0xc2, 0xba, 0x7d, 0xf2, 0x48, 0x13, 0x29, 0xa9, 0x3a, 0x2e, 0xed, 0x1b, 0xcf, 0xa4, 0x0f, - 0x79, 0x7c, 0xd7, 0x98, 0x80, 0x27, 0x64, 0x87, 0xc3, 0xf8, 0x0a, 0x20, 0xcd, 0x1b, 0x12, 0xd7, - 0xe1, 0x35, 0xd9, 0x73, 0x88, 0x46, 0xa5, 0x8f, 0x04, 0x55, 0xe0, 0xad, 0x10, 0x66, 0x5b, 0xc2, - 0x7b, 0x6a, 0xf4, 0xfd, 0xd0, 0xe2, 0x65, 0xb1, 0x25, 0x38, 0x16, 0x58, 0xdb, 0x01, 0xc4, 0x42, - 0x31, 0xf5, 0xe1, 0x1d, 0x4e, 0x2b, 0x39, 0x43, 0x27, 0xfe, 0xdd, 0x0f, 0x60, 0x95, 0x31, 0x27, - 0x1f, 0xbd, 0x22, 0x1a, 0x32, 0x67, 0x18, 0xfb, 0xe2, 0x6b, 0xeb, 0x8d, 0xcb, 0xfb, 0x50, 0x8c, - 0xe7, 0x27, 0xce, 0x83, 0xc8, 0x50, 0x94, 0x60, 0xcd, 0x4a, 0xad, 0x5d, 0x67, 0x6d, 0xc6, 0x97, - 0x32, 0x4a, 0xb2, 0x76, 0xa7, 0xd9, 0xe8, 0xc9, 0xaa, 0x72, 0xdc, 0xea, 0x35, 0x8e, 0x64, 0x94, - 0x8a, 0xf7, 0xd5, 0x7f, 0x4d, 0x42, 0x69, 0xfa, 0x8a, 0x84, 0x7f, 0x00, 0xe7, 0xc3, 0xf7, 0x0c, - 0x8f, 0xfa, 0xea, 0x53, 0xc3, 0xe5, 0x5b, 0x66, 0x44, 0xc4, 0xf1, 0x15, 0x2d, 0xda, 0x66, 0xc0, - 0xea, 0x52, 0xff, 0x0b, 0xc3, 0x65, 0x1b, 0x62, 0x44, 0x7c, 0xdc, 0x84, 0x8b, 0x96, 0xad, 0x7a, - 0x3e, 0xb1, 0x74, 0xe2, 0xea, 0xea, 0xe4, 0x25, 0x49, 0x25, 0x9a, 0x46, 0x3d, 0xcf, 0x16, 0x47, - 0x55, 0x64, 0xe5, 0x5d, 0xcb, 0xee, 0x06, 0xe4, 0x49, 0x0d, 0xaf, 0x06, 0xd4, 0x99, 0x04, 0x4b, - 0x2d, 0x4b, 0xb0, 0x77, 0x20, 0x3f, 0x22, 0x8e, 0x4a, 0x2d, 0xdf, 0x3d, 0xe5, 0x8d, 0x71, 0x4e, - 0xc9, 0x8d, 0x88, 0x23, 0xb3, 0xf1, 0x9b, 0xb9, 0x9f, 0xfc, 0x23, 0x05, 0xc5, 0x78, 0x73, 0xcc, - 0xee, 0x1a, 0x1a, 0x3f, 0x47, 0x12, 0xbc, 0xd2, 0x7c, 0xf0, 0xd2, 0x56, 0xba, 0x52, 0x63, 0x07, - 0xcc, 0x7e, 0x56, 0xb4, 0xac, 0x8a, 0xd0, 0x64, 0x87, 0x3b, 0xab, 0x2d, 0x54, 0xb4, 0x08, 0x39, - 0x25, 0x18, 0xe1, 0x43, 0xc8, 0x3e, 0xf2, 0xb8, 0xed, 0x2c, 0xb7, 0xfd, 0xe1, 0xcb, 0x6d, 0xdf, - 0xef, 0x72, 0xe3, 0xf9, 0xfb, 0x5d, 0xb5, 0xd5, 0x56, 0x8e, 0xaa, 0x4d, 0x25, 0x50, 0xc7, 0x17, - 0x20, 0x6d, 0x92, 0xe7, 0xa7, 0xd3, 0x47, 0x11, 0x87, 0xce, 0x1a, 0xf8, 0x0b, 0x90, 0x7e, 0x4a, - 0xc9, 0xe3, 0xe9, 0x03, 0x80, 0x43, 0xaf, 0x31, 0xf5, 0xaf, 0x43, 0x86, 0xc7, 0x0b, 0x03, 0x04, - 0x11, 0x43, 0x6f, 0xe1, 0x1c, 0xa4, 0x6b, 0x6d, 0x85, 0xa5, 0x3f, 0x82, 0xa2, 0x40, 0xd5, 0x4e, - 0x43, 0xae, 0xc9, 0x28, 0x59, 0xbe, 0x05, 0x59, 0x11, 0x04, 0xb6, 0x35, 0xa2, 0x30, 0xa0, 0xb7, - 0x82, 0x61, 0x60, 0x23, 0x11, 0x4a, 0x8f, 0x8f, 0x0e, 0x64, 0x05, 0x25, 0xe3, 0xcb, 0xeb, 0x41, - 0x31, 0xde, 0x17, 0xbf, 0x99, 0x9c, 0xfa, 0x4b, 0x02, 0x0a, 0xb1, 0x3e, 0x97, 0x35, 0x28, 0xc4, - 0x34, 0xed, 0xa7, 0x2a, 0x31, 0x0d, 0xe2, 0x05, 0x49, 0x01, 0x1c, 0xaa, 0x32, 0xe4, 0xac, 0x8b, - 0xf6, 0x46, 0x9c, 0xff, 0x5d, 0x02, 0xd0, 0x6c, 0x8b, 0x39, 0xe3, 0x60, 0xe2, 0x3b, 0x75, 0xf0, - 0xb7, 0x09, 0x28, 0x4d, 0xf7, 0x95, 0x33, 0xee, 0x5d, 0xfa, 0x4e, 0xdd, 0xfb, 0x67, 0x12, 0x56, - 0xa7, 0xba, 0xc9, 0xb3, 0x7a, 0xf7, 0x15, 0xac, 0x1b, 0x3a, 0x1d, 0x39, 0xb6, 0x4f, 0x2d, 0xed, - 0x54, 0x35, 0xe9, 0x13, 0x6a, 0x4a, 0x65, 0x5e, 0x28, 0xae, 0xbf, 0xbc, 0x5f, 0xad, 0x34, 0x26, - 0x7a, 0x4d, 0xa6, 0xb6, 0xbf, 0xd1, 0xa8, 0xcb, 0x47, 0x9d, 0x76, 0x4f, 0x6e, 0xd5, 0x1e, 0xaa, - 0xc7, 0xad, 0x1f, 0xb7, 0xda, 0x5f, 0xb4, 0x14, 0x64, 0xcc, 0xd0, 0x5e, 0xe3, 0x56, 0xef, 0x00, - 0x9a, 0x75, 0x0a, 0x9f, 0x87, 0x45, 0x6e, 0xa1, 0xb7, 0xf0, 0x06, 0xac, 0xb5, 0xda, 0x6a, 0xb7, - 0x51, 0x97, 0x55, 0xf9, 0xde, 0x3d, 0xb9, 0xd6, 0xeb, 0x8a, 0x17, 0x88, 0x88, 0xdd, 0x9b, 0xde, - 0xd4, 0xbf, 0x49, 0xc1, 0xc6, 0x02, 0x4f, 0x70, 0x35, 0xb8, 0x3b, 0x88, 0xeb, 0xcc, 0xf7, 0xcf, - 0xe2, 0x7d, 0x85, 0x1d, 0xf9, 0x1d, 0xe2, 0xfa, 0xc1, 0x55, 0xe3, 0x0a, 0xb0, 0x28, 0x59, 0xbe, - 0xd1, 0x37, 0xa8, 0x1b, 0x3c, 0xd8, 0x88, 0x0b, 0xc5, 0xda, 0x04, 0x17, 0x6f, 0x36, 0xdf, 0x03, - 0xec, 0xd8, 0x9e, 0xe1, 0x1b, 0x4f, 0xa8, 0x6a, 0x58, 0xe1, 0xeb, 0x0e, 0xbb, 0x60, 0xa4, 0x15, - 0x14, 0x4a, 0x1a, 0x96, 0x1f, 0xb1, 0x2d, 0x3a, 0x20, 0x33, 0x6c, 0x56, 0xc0, 0x53, 0x0a, 0x0a, - 0x25, 0x11, 0xfb, 0x12, 0x14, 0x75, 0x7b, 0xcc, 0xba, 0x2e, 0xc1, 0x63, 0xe7, 0x45, 0x42, 0x29, - 0x08, 0x2c, 0xa2, 0x04, 0xfd, 0xf4, 0xe4, 0x59, 0xa9, 0xa8, 0x14, 0x04, 0x26, 0x28, 0x97, 0x61, - 0x8d, 0x0c, 0x06, 0x2e, 0x33, 0x1e, 0x1a, 0x12, 0x37, 0x84, 0x52, 0x04, 0x73, 0xe2, 0xd6, 0x7d, - 0xc8, 0x85, 0x71, 0x60, 0x47, 0x32, 0x8b, 0x84, 0xea, 0x88, 0x6b, 0x6f, 0x72, 0x27, 0xaf, 0xe4, - 0xac, 0x50, 0x78, 0x09, 0x8a, 0x86, 0xa7, 0x4e, 0x5e, 0xc9, 0x93, 0xdb, 0xc9, 0x9d, 0x9c, 0x52, - 0x30, 0xbc, 0xe8, 0x85, 0xb1, 0xfc, 0x75, 0x12, 0x4a, 0xd3, 0xaf, 0xfc, 0xb8, 0x0e, 0x39, 0xd3, - 0xd6, 0x08, 0x4f, 0x2d, 0xf1, 0x13, 0xd3, 0xce, 0x2b, 0x7e, 0x18, 0xa8, 0x34, 0x03, 0xbe, 0x12, - 0x69, 0x6e, 0xfd, 0x2d, 0x01, 0xb9, 0x10, 0xc6, 0xe7, 0x20, 0xed, 0x10, 0x7f, 0xc8, 0xcd, 0x65, - 0x0e, 0x92, 0x28, 0xa1, 0xf0, 0x31, 0xc3, 0x3d, 0x87, 0x58, 0x3c, 0x05, 0x02, 0x9c, 0x8d, 0xd9, - 0xba, 0x9a, 0x94, 0xe8, 0xfc, 0xfa, 0x61, 0x8f, 0x46, 0xd4, 0xf2, 0xbd, 0x70, 0x5d, 0x03, 0xbc, - 0x16, 0xc0, 0xf8, 0x1a, 0xac, 0xfb, 0x2e, 0x31, 0xcc, 0x29, 0x6e, 0x9a, 0x73, 0x51, 0x28, 0x88, - 0xc8, 0xfb, 0x70, 0x21, 0xb4, 0xab, 0x53, 0x9f, 0x68, 0x43, 0xaa, 0x4f, 0x94, 0xb2, 0xfc, 0x99, - 0xe1, 0x7c, 0x40, 0xa8, 0x07, 0xf2, 0x50, 0xb7, 0xfc, 0xf7, 0x04, 0xac, 0x87, 0x17, 0x26, 0x3d, - 0x0a, 0xd6, 0x11, 0x00, 0xb1, 0x2c, 0xdb, 0x8f, 0x87, 0x6b, 0x3e, 0x95, 0xe7, 0xf4, 0x2a, 0xd5, - 0x48, 0x49, 0x89, 0x19, 0xd8, 0x1a, 0x01, 0x4c, 0x24, 0x4b, 0xc3, 0x76, 0x11, 0x0a, 0xc1, 0x4f, - 0x38, 0xfc, 0x77, 0x40, 0x71, 0xc5, 0x06, 0x01, 0xb1, 0x9b, 0x15, 0xde, 0x84, 0xcc, 0x09, 0x1d, - 0x18, 0x56, 0xf0, 0x30, 0x2b, 0x06, 0xe1, 0x43, 0x48, 0x3a, 0x7a, 0x08, 0x39, 0xf8, 0x19, 0x6c, - 0x68, 0xf6, 0x68, 0xd6, 0xdd, 0x03, 0x34, 0x73, 0xcd, 0xf7, 0x3e, 0x4f, 0x7c, 0x09, 0x93, 0x16, - 0xf3, 0x7f, 0x89, 0xc4, 0xef, 0x93, 0xa9, 0xc3, 0xce, 0xc1, 0x1f, 0x92, 0x5b, 0x87, 0x42, 0xb5, - 0x13, 0xce, 0x54, 0xa1, 0x7d, 0x93, 0x6a, 0xcc, 0xfb, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0xa3, - 0x58, 0x22, 0x30, 0xdf, 0x1c, 0x00, 0x00, + 0x83, 0x1e, 0x02, 0xf4, 0x54, 0xa0, 0x7f, 0x40, 0x51, 0x14, 0x3d, 0xf4, 0xb2, 0x40, 0xff, 0x80, + 0x02, 0xed, 0xbd, 0xd7, 0x02, 0xbd, 0xf7, 0x50, 0xa0, 0x05, 0xda, 0x3f, 0xa1, 0xc7, 0x62, 0x66, + 0x48, 0x8a, 0xd4, 0x47, 0xe2, 0x5d, 0x20, 0xd9, 0x93, 0x3d, 0xef, 0xfd, 0xde, 0x9b, 0x37, 0x8f, + 0xbf, 0x79, 0xf3, 0x66, 0x04, 0x82, 0x46, 0x5c, 0xd5, 0xd1, 0x6d, 0xcf, 0x72, 0x2a, 0xb6, 0x63, + 0x79, 0x16, 0x5a, 0x1b, 0x5a, 0xd6, 0xd0, 0x20, 0x7c, 0x74, 0x32, 0x19, 0x94, 0x5b, 0xb0, 0x7e, + 0x4f, 0x37, 0x48, 0x3d, 0x04, 0xf6, 0x88, 0x87, 0xee, 0x40, 0x7a, 0xa0, 0x1b, 0x44, 0x4c, 0xec, + 0xa4, 0x76, 0x0b, 0x7b, 0x1f, 0x56, 0x66, 0x8c, 0x2a, 0x71, 0x8b, 0x2e, 0x15, 0xcb, 0xcc, 0xa2, + 0xfc, 0xef, 0x34, 0x6c, 0x2c, 0xd0, 0x22, 0x04, 0x69, 0x13, 0x8f, 0xa9, 0xc7, 0xc4, 0x6e, 0x5e, + 0x66, 0xff, 0x23, 0x11, 0x56, 0x6c, 0xac, 0x3e, 0xc6, 0x43, 0x22, 0x26, 0x99, 0x38, 0x18, 0xa2, + 0xf7, 0x01, 0x34, 0x62, 0x13, 0x53, 0x23, 0xa6, 0x7a, 0x2a, 0xa6, 0x76, 0x52, 0xbb, 0x79, 0x39, + 0x22, 0x41, 0xd7, 0x60, 0xdd, 0x9e, 0x9c, 0x18, 0xba, 0xaa, 0x44, 0x60, 0xb0, 0x93, 0xda, 0xcd, + 0xc8, 0x02, 0x57, 0xd4, 0xa7, 0xe0, 0xcb, 0xb0, 0xf6, 0x94, 0xe0, 0xc7, 0x51, 0x68, 0x81, 0x41, + 0x4b, 0x54, 0x1c, 0x01, 0xd6, 0xa0, 0x38, 0x26, 0xae, 0x8b, 0x87, 0x44, 0xf1, 0x4e, 0x6d, 0x22, + 0xa6, 0xd9, 0xea, 0x77, 0xe6, 0x56, 0x3f, 0xbb, 0xf2, 0x82, 0x6f, 0xd5, 0x3f, 0xb5, 0x09, 0xaa, + 0x42, 0x9e, 0x98, 0x93, 0x31, 0xf7, 0x90, 0x59, 0x92, 0x3f, 0xc9, 0x9c, 0x8c, 0x67, 0xbd, 0xe4, + 0xa8, 0x99, 0xef, 0x62, 0xc5, 0x25, 0xce, 0x13, 0x5d, 0x25, 0x62, 0x96, 0x39, 0xb8, 0x3c, 0xe7, + 0xa0, 0xc7, 0xf5, 0xb3, 0x3e, 0x02, 0x3b, 0x54, 0x83, 0x3c, 0x79, 0xe6, 0x11, 0xd3, 0xd5, 0x2d, + 0x53, 0x5c, 0x61, 0x4e, 0x2e, 0x2d, 0xf8, 0x8a, 0xc4, 0xd0, 0x66, 0x5d, 0x4c, 0xed, 0xd0, 0x3e, + 0xac, 0x58, 0xb6, 0xa7, 0x5b, 0xa6, 0x2b, 0xe6, 0x76, 0x12, 0xbb, 0x85, 0xbd, 0x77, 0x17, 0x12, + 0xa1, 0xc3, 0x31, 0x72, 0x00, 0x46, 0x0d, 0x10, 0x5c, 0x6b, 0xe2, 0xa8, 0x44, 0x51, 0x2d, 0x8d, + 0x28, 0xba, 0x39, 0xb0, 0xc4, 0x3c, 0x73, 0x70, 0x61, 0x7e, 0x21, 0x0c, 0x58, 0xb3, 0x34, 0xd2, + 0x30, 0x07, 0x96, 0x5c, 0x72, 0x63, 0x63, 0xb4, 0x05, 0x59, 0xf7, 0xd4, 0xf4, 0xf0, 0x33, 0xb1, + 0xc8, 0x18, 0xe2, 0x8f, 0xca, 0x7f, 0xce, 0xc2, 0xda, 0x59, 0x28, 0x76, 0x17, 0x32, 0x03, 0xba, + 0x4a, 0x31, 0xf9, 0x6d, 0x72, 0xc0, 0x6d, 0xe2, 0x49, 0xcc, 0x7e, 0xc7, 0x24, 0x56, 0xa1, 0x60, + 0x12, 0xd7, 0x23, 0x1a, 0x67, 0x44, 0xea, 0x8c, 0x9c, 0x02, 0x6e, 0x34, 0x4f, 0xa9, 0xf4, 0x77, + 0xa2, 0xd4, 0x03, 0x58, 0x0b, 0x43, 0x52, 0x1c, 0x6c, 0x0e, 0x03, 0x6e, 0x5e, 0x7f, 0x55, 0x24, + 0x15, 0x29, 0xb0, 0x93, 0xa9, 0x99, 0x5c, 0x22, 0xb1, 0x31, 0xaa, 0x03, 0x58, 0x26, 0xb1, 0x06, + 0x8a, 0x46, 0x54, 0x43, 0xcc, 0x2d, 0xc9, 0x52, 0x87, 0x42, 0xe6, 0xb2, 0x64, 0x71, 0xa9, 0x6a, + 0xa0, 0xcf, 0xa6, 0x54, 0x5b, 0x59, 0xc2, 0x94, 0x16, 0xdf, 0x64, 0x73, 0x6c, 0x3b, 0x86, 0x92, + 0x43, 0x28, 0xef, 0x89, 0xe6, 0xaf, 0x2c, 0xcf, 0x82, 0xa8, 0xbc, 0x72, 0x65, 0xb2, 0x6f, 0xc6, + 0x17, 0xb6, 0xea, 0x44, 0x87, 0xe8, 0x03, 0x08, 0x05, 0x0a, 0xa3, 0x15, 0xb0, 0x2a, 0x54, 0x0c, + 0x84, 0x6d, 0x3c, 0x26, 0xdb, 0xcf, 0xa1, 0x14, 0x4f, 0x0f, 0xda, 0x84, 0x8c, 0xeb, 0x61, 0xc7, + 0x63, 0x2c, 0xcc, 0xc8, 0x7c, 0x80, 0x04, 0x48, 0x11, 0x53, 0x63, 0x55, 0x2e, 0x23, 0xd3, 0x7f, + 0xd1, 0x8f, 0xa6, 0x0b, 0x4e, 0xb1, 0x05, 0x7f, 0x34, 0xff, 0x45, 0x63, 0x9e, 0x67, 0xd7, 0xbd, + 0x7d, 0x1b, 0x56, 0x63, 0x0b, 0x38, 0xeb, 0xd4, 0xe5, 0x5f, 0xc0, 0xdb, 0x0b, 0x5d, 0xa3, 0x07, + 0xb0, 0x39, 0x31, 0x75, 0xd3, 0x23, 0x8e, 0xed, 0x10, 0xca, 0x58, 0x3e, 0x95, 0xf8, 0x9f, 0x95, + 0x25, 0x9c, 0x3b, 0x8e, 0xa2, 0xb9, 0x17, 0x79, 0x63, 0x32, 0x2f, 0xbc, 0x9a, 0xcf, 0xfd, 0x77, + 0x45, 0x78, 0xf1, 0xe2, 0xc5, 0x8b, 0x64, 0xf9, 0x37, 0x59, 0xd8, 0x5c, 0xb4, 0x67, 0x16, 0x6e, + 0xdf, 0x2d, 0xc8, 0x9a, 0x93, 0xf1, 0x09, 0x71, 0x58, 0x92, 0x32, 0xb2, 0x3f, 0x42, 0x55, 0xc8, + 0x18, 0xf8, 0x84, 0x18, 0x62, 0x7a, 0x27, 0xb1, 0x5b, 0xda, 0xbb, 0x76, 0xa6, 0x5d, 0x59, 0x69, + 0x52, 0x13, 0x99, 0x5b, 0xa2, 0xcf, 0x21, 0xed, 0x97, 0x68, 0xea, 0xe1, 0xea, 0xd9, 0x3c, 0xd0, + 0xbd, 0x24, 0x33, 0x3b, 0xf4, 0x0e, 0xe4, 0xe9, 0x5f, 0xce, 0x8d, 0x2c, 0x8b, 0x39, 0x47, 0x05, + 0x94, 0x17, 0x68, 0x1b, 0x72, 0x6c, 0x9b, 0x68, 0x24, 0x38, 0xda, 0xc2, 0x31, 0x25, 0x96, 0x46, + 0x06, 0x78, 0x62, 0x78, 0xca, 0x13, 0x6c, 0x4c, 0x08, 0x23, 0x7c, 0x5e, 0x2e, 0xfa, 0xc2, 0x9f, + 0x52, 0x19, 0xba, 0x00, 0x05, 0xbe, 0xab, 0x74, 0x53, 0x23, 0xcf, 0x58, 0xf5, 0xcc, 0xc8, 0x7c, + 0xa3, 0x35, 0xa8, 0x84, 0x4e, 0xff, 0xc8, 0xb5, 0xcc, 0x80, 0x9a, 0x6c, 0x0a, 0x2a, 0x60, 0xd3, + 0xdf, 0x9e, 0x2d, 0xdc, 0xef, 0x2d, 0x5e, 0xde, 0x2c, 0xa7, 0xca, 0x7f, 0x4a, 0x42, 0x9a, 0xd5, + 0x8b, 0x35, 0x28, 0xf4, 0x1f, 0x76, 0x25, 0xa5, 0xde, 0x39, 0x3e, 0x6c, 0x4a, 0x42, 0x02, 0x95, + 0x00, 0x98, 0xe0, 0x5e, 0xb3, 0x53, 0xed, 0x0b, 0xc9, 0x70, 0xdc, 0x68, 0xf7, 0xf7, 0x6f, 0x0a, + 0xa9, 0xd0, 0xe0, 0x98, 0x0b, 0xd2, 0x51, 0xc0, 0x8d, 0x3d, 0x21, 0x83, 0x04, 0x28, 0x72, 0x07, + 0x8d, 0x07, 0x52, 0x7d, 0xff, 0xa6, 0x90, 0x8d, 0x4b, 0x6e, 0xec, 0x09, 0x2b, 0x68, 0x15, 0xf2, + 0x4c, 0x72, 0xd8, 0xe9, 0x34, 0x85, 0x5c, 0xe8, 0xb3, 0xd7, 0x97, 0x1b, 0xed, 0x23, 0x21, 0x1f, + 0xfa, 0x3c, 0x92, 0x3b, 0xc7, 0x5d, 0x01, 0x42, 0x0f, 0x2d, 0xa9, 0xd7, 0xab, 0x1e, 0x49, 0x42, + 0x21, 0x44, 0x1c, 0x3e, 0xec, 0x4b, 0x3d, 0xa1, 0x18, 0x0b, 0xeb, 0xc6, 0x9e, 0xb0, 0x1a, 0x4e, + 0x21, 0xb5, 0x8f, 0x5b, 0x42, 0x09, 0xad, 0xc3, 0x2a, 0x9f, 0x22, 0x08, 0x62, 0x6d, 0x46, 0xb4, + 0x7f, 0x53, 0x10, 0xa6, 0x81, 0x70, 0x2f, 0xeb, 0x31, 0xc1, 0xfe, 0x4d, 0x01, 0x95, 0x6b, 0x90, + 0x61, 0xec, 0x42, 0x08, 0x4a, 0xcd, 0xea, 0xa1, 0xd4, 0x54, 0x3a, 0xdd, 0x7e, 0xa3, 0xd3, 0xae, + 0x36, 0x85, 0xc4, 0x54, 0x26, 0x4b, 0x3f, 0x39, 0x6e, 0xc8, 0x52, 0x5d, 0x48, 0x46, 0x65, 0x5d, + 0xa9, 0xda, 0x97, 0xea, 0x42, 0xaa, 0xac, 0xc2, 0xe6, 0xa2, 0x3a, 0xb9, 0x70, 0x67, 0x44, 0x3e, + 0x71, 0x72, 0xc9, 0x27, 0x66, 0xbe, 0xe6, 0x3e, 0xf1, 0xbf, 0x92, 0xb0, 0xb1, 0xe0, 0xac, 0x58, + 0x38, 0xc9, 0x0f, 0x21, 0xc3, 0x29, 0xca, 0x4f, 0xcf, 0x2b, 0x0b, 0x0f, 0x1d, 0x46, 0xd8, 0xb9, + 0x13, 0x94, 0xd9, 0x45, 0x3b, 0x88, 0xd4, 0x92, 0x0e, 0x82, 0xba, 0x98, 0xab, 0xe9, 0x3f, 0x9f, + 0xab, 0xe9, 0xfc, 0xd8, 0xdb, 0x3f, 0xcb, 0xb1, 0xc7, 0x64, 0xdf, 0xae, 0xb6, 0x67, 0x16, 0xd4, + 0xf6, 0xbb, 0xb0, 0x3e, 0xe7, 0xe8, 0xcc, 0x35, 0xf6, 0x97, 0x09, 0x10, 0x97, 0x25, 0xe7, 0x15, + 0x95, 0x2e, 0x19, 0xab, 0x74, 0x77, 0x67, 0x33, 0x78, 0x71, 0xf9, 0x47, 0x98, 0xfb, 0xd6, 0xdf, + 0x24, 0x60, 0x6b, 0x71, 0xa7, 0xb8, 0x30, 0x86, 0xcf, 0x21, 0x3b, 0x26, 0xde, 0xc8, 0x0a, 0xba, + 0xa5, 0x8f, 0x16, 0x9c, 0xc1, 0x54, 0x3d, 0xfb, 0xb1, 0x7d, 0xab, 0xe8, 0x21, 0x9e, 0x5a, 0xd6, + 0xee, 0xf1, 0x68, 0xe6, 0x22, 0xfd, 0x55, 0x12, 0xde, 0x5e, 0xe8, 0x7c, 0x61, 0xa0, 0xef, 0x01, + 0xe8, 0xa6, 0x3d, 0xf1, 0x78, 0x47, 0xc4, 0x0b, 0x6c, 0x9e, 0x49, 0x58, 0xf1, 0xa2, 0xc5, 0x73, + 0xe2, 0x85, 0xfa, 0x14, 0xd3, 0x03, 0x17, 0x31, 0xc0, 0x9d, 0x69, 0xa0, 0x69, 0x16, 0xe8, 0xfb, + 0x4b, 0x56, 0x3a, 0x47, 0xcc, 0x4f, 0x41, 0x50, 0x0d, 0x9d, 0x98, 0x9e, 0xe2, 0x7a, 0x0e, 0xc1, + 0x63, 0xdd, 0x1c, 0xb2, 0x13, 0x24, 0x77, 0x90, 0x19, 0x60, 0xc3, 0x25, 0xf2, 0x1a, 0x57, 0xf7, + 0x02, 0x2d, 0xb5, 0x60, 0x04, 0x72, 0x22, 0x16, 0xd9, 0x98, 0x05, 0x57, 0x87, 0x16, 0xe5, 0x5f, + 0xe7, 0xa1, 0x10, 0xe9, 0xab, 0xd1, 0x45, 0x28, 0x3e, 0xc2, 0x4f, 0xb0, 0x12, 0xdc, 0x95, 0x78, + 0x26, 0x0a, 0x54, 0xd6, 0xf5, 0xef, 0x4b, 0x9f, 0xc2, 0x26, 0x83, 0x58, 0x13, 0x8f, 0x38, 0x8a, + 0x6a, 0x60, 0xd7, 0x65, 0x49, 0xcb, 0x31, 0x28, 0xa2, 0xba, 0x0e, 0x55, 0xd5, 0x02, 0x0d, 0xba, + 0x05, 0x1b, 0xcc, 0x62, 0x3c, 0x31, 0x3c, 0xdd, 0x36, 0x88, 0x42, 0x6f, 0x6f, 0x2e, 0x3b, 0x49, + 0xc2, 0xc8, 0xd6, 0x29, 0xa2, 0xe5, 0x03, 0x68, 0x44, 0x2e, 0xaa, 0xc3, 0x7b, 0xcc, 0x6c, 0x48, + 0x4c, 0xe2, 0x60, 0x8f, 0x28, 0xe4, 0xeb, 0x09, 0x36, 0x5c, 0x05, 0x9b, 0x9a, 0x32, 0xc2, 0xee, + 0x48, 0xdc, 0xa4, 0x0e, 0x0e, 0x93, 0x62, 0x42, 0x3e, 0x4f, 0x81, 0x47, 0x3e, 0x4e, 0x62, 0xb0, + 0xaa, 0xa9, 0x7d, 0x81, 0xdd, 0x11, 0x3a, 0x80, 0x2d, 0xe6, 0xc5, 0xf5, 0x1c, 0xdd, 0x1c, 0x2a, + 0xea, 0x88, 0xa8, 0x8f, 0x95, 0x89, 0x37, 0xb8, 0x23, 0xbe, 0x13, 0x9d, 0x9f, 0x45, 0xd8, 0x63, + 0x98, 0x1a, 0x85, 0x1c, 0x7b, 0x83, 0x3b, 0xa8, 0x07, 0x45, 0xfa, 0x31, 0xc6, 0xfa, 0x73, 0xa2, + 0x0c, 0x2c, 0x87, 0x1d, 0x8d, 0xa5, 0x05, 0xa5, 0x29, 0x92, 0xc1, 0x4a, 0xc7, 0x37, 0x68, 0x59, + 0x1a, 0x39, 0xc8, 0xf4, 0xba, 0x92, 0x54, 0x97, 0x0b, 0x81, 0x97, 0x7b, 0x96, 0x43, 0x09, 0x35, + 0xb4, 0xc2, 0x04, 0x17, 0x38, 0xa1, 0x86, 0x56, 0x90, 0xde, 0x5b, 0xb0, 0xa1, 0xaa, 0x7c, 0xcd, + 0xba, 0xaa, 0xf8, 0x77, 0x2c, 0x57, 0x14, 0x62, 0xc9, 0x52, 0xd5, 0x23, 0x0e, 0xf0, 0x39, 0xee, + 0xa2, 0xcf, 0xe0, 0xed, 0x69, 0xb2, 0xa2, 0x86, 0xeb, 0x73, 0xab, 0x9c, 0x35, 0xbd, 0x05, 0x1b, + 0xf6, 0xe9, 0xbc, 0x21, 0x8a, 0xcd, 0x68, 0x9f, 0xce, 0x9a, 0xdd, 0x86, 0x4d, 0x7b, 0x64, 0xcf, + 0xdb, 0x5d, 0x8d, 0xda, 0x21, 0x7b, 0x64, 0xcf, 0x1a, 0x5e, 0x62, 0x17, 0x6e, 0x87, 0xa8, 0xd8, + 0x23, 0x9a, 0x78, 0x2e, 0x0a, 0x8f, 0x28, 0xd0, 0x75, 0x10, 0x54, 0x55, 0x21, 0x26, 0x3e, 0x31, + 0x88, 0x82, 0x1d, 0x62, 0x62, 0x57, 0xbc, 0x10, 0x05, 0x97, 0x54, 0x55, 0x62, 0xda, 0x2a, 0x53, + 0xa2, 0xab, 0xb0, 0x6e, 0x9d, 0x3c, 0x52, 0x39, 0x25, 0x15, 0xdb, 0x21, 0x03, 0xfd, 0x99, 0xf8, + 0x21, 0xcb, 0xef, 0x1a, 0x55, 0x30, 0x42, 0x76, 0x99, 0x18, 0x5d, 0x01, 0x41, 0x75, 0x47, 0xd8, + 0xb1, 0x59, 0x4d, 0x76, 0x6d, 0xac, 0x12, 0xf1, 0x12, 0x87, 0x72, 0x79, 0x3b, 0x10, 0xd3, 0x2d, + 0xe1, 0x3e, 0xd5, 0x07, 0x5e, 0xe0, 0xf1, 0x32, 0xdf, 0x12, 0x4c, 0xe6, 0x7b, 0xdb, 0x05, 0x81, + 0xa6, 0x22, 0x36, 0xf1, 0x2e, 0x83, 0x95, 0xec, 0x91, 0x1d, 0x9d, 0xf7, 0x03, 0x58, 0xa5, 0xc8, + 0xe9, 0xa4, 0x57, 0x78, 0x43, 0x66, 0x8f, 0x22, 0x33, 0xde, 0x84, 0x2d, 0x0a, 0x1a, 0x13, 0x0f, + 0x6b, 0xd8, 0xc3, 0x11, 0xf4, 0xc7, 0x0c, 0x4d, 0xf3, 0xde, 0xf2, 0x95, 0xb1, 0x38, 0x9d, 0xc9, + 0xc9, 0x69, 0xc8, 0xac, 0x4f, 0x78, 0x9c, 0x54, 0x16, 0x70, 0xeb, 0xb5, 0x35, 0xdd, 0xe5, 0x03, + 0x28, 0x46, 0x89, 0x8f, 0xf2, 0xc0, 0xa9, 0x2f, 0x24, 0x68, 0x17, 0x54, 0xeb, 0xd4, 0x69, 0xff, + 0xf2, 0x95, 0x24, 0x24, 0x69, 0x1f, 0xd5, 0x6c, 0xf4, 0x25, 0x45, 0x3e, 0x6e, 0xf7, 0x1b, 0x2d, + 0x49, 0x48, 0x45, 0x1b, 0xf6, 0xbf, 0x26, 0xa1, 0x14, 0xbf, 0x7b, 0xa1, 0x1f, 0xc0, 0xb9, 0xe0, + 0xa1, 0xc4, 0x25, 0x9e, 0xf2, 0x54, 0x77, 0xd8, 0x5e, 0x1c, 0x63, 0x7e, 0x2e, 0x86, 0x6c, 0xd8, + 0xf4, 0x51, 0x3d, 0xe2, 0x7d, 0xa9, 0x3b, 0x74, 0xa7, 0x8d, 0xb1, 0x87, 0x9a, 0x70, 0xc1, 0xb4, + 0x14, 0xd7, 0xc3, 0xa6, 0x86, 0x1d, 0x4d, 0x99, 0x3e, 0x51, 0x29, 0x58, 0x55, 0x89, 0xeb, 0x5a, + 0xfc, 0x0c, 0x0c, 0xbd, 0xbc, 0x6b, 0x5a, 0x3d, 0x1f, 0x3c, 0x3d, 0x1c, 0xaa, 0x3e, 0x74, 0x86, + 0xb9, 0xa9, 0x65, 0xcc, 0x7d, 0x07, 0xf2, 0x63, 0x6c, 0x2b, 0xc4, 0xf4, 0x9c, 0x53, 0xd6, 0x71, + 0xe7, 0xe4, 0xdc, 0x18, 0xdb, 0x12, 0x1d, 0xbf, 0x99, 0x8b, 0xcf, 0x3f, 0x52, 0x50, 0x8c, 0x76, + 0xdd, 0xf4, 0x12, 0xa3, 0xb2, 0x03, 0x2a, 0xc1, 0x4a, 0xd8, 0x07, 0x2f, 0xed, 0xd1, 0x2b, 0x35, + 0x7a, 0x72, 0x1d, 0x64, 0x79, 0x2f, 0x2c, 0x73, 0x4b, 0xda, 0x35, 0x50, 0x6a, 0x11, 0xde, 0x7b, + 0xe4, 0x64, 0x7f, 0x84, 0x8e, 0x20, 0xfb, 0xc8, 0x65, 0xbe, 0xb3, 0xcc, 0xf7, 0x87, 0x2f, 0xf7, + 0x7d, 0xbf, 0xc7, 0x9c, 0xe7, 0xef, 0xf7, 0x94, 0x76, 0x47, 0x6e, 0x55, 0x9b, 0xb2, 0x6f, 0x8e, + 0xce, 0x43, 0xda, 0xc0, 0xcf, 0x4f, 0xe3, 0x67, 0x1c, 0x13, 0x9d, 0x35, 0xf1, 0xe7, 0x21, 0xfd, + 0x94, 0xe0, 0xc7, 0xf1, 0x93, 0x85, 0x89, 0x5e, 0x23, 0xf5, 0xaf, 0x43, 0x86, 0xe5, 0x0b, 0x01, + 0xf8, 0x19, 0x13, 0xde, 0x42, 0x39, 0x48, 0xd7, 0x3a, 0x32, 0xa5, 0xbf, 0x00, 0x45, 0x2e, 0x55, + 0xba, 0x0d, 0xa9, 0x26, 0x09, 0xc9, 0xf2, 0x2d, 0xc8, 0xf2, 0x24, 0xd0, 0xad, 0x11, 0xa6, 0x41, + 0x78, 0xcb, 0x1f, 0xfa, 0x3e, 0x12, 0x81, 0xf6, 0xb8, 0x75, 0x28, 0xc9, 0x42, 0x32, 0xfa, 0x79, + 0x5d, 0x28, 0x46, 0x1b, 0xee, 0x37, 0xc3, 0xa9, 0xbf, 0x24, 0xa0, 0x10, 0x69, 0xa0, 0x69, 0xe7, + 0x83, 0x0d, 0xc3, 0x7a, 0xaa, 0x60, 0x43, 0xc7, 0xae, 0x4f, 0x0a, 0x60, 0xa2, 0x2a, 0x95, 0x9c, + 0xf5, 0xa3, 0xbd, 0x91, 0xe0, 0x7f, 0x9f, 0x00, 0x61, 0xb6, 0x77, 0x9d, 0x09, 0x30, 0xf1, 0xbd, + 0x06, 0xf8, 0xbb, 0x04, 0x94, 0xe2, 0x0d, 0xeb, 0x4c, 0x78, 0x17, 0xbf, 0xd7, 0xf0, 0xfe, 0x99, + 0x84, 0xd5, 0x58, 0x9b, 0x7a, 0xd6, 0xe8, 0xbe, 0x86, 0x75, 0x5d, 0x23, 0x63, 0xdb, 0xf2, 0x88, + 0xa9, 0x9e, 0x2a, 0x06, 0x79, 0x42, 0x0c, 0xb1, 0xcc, 0x0a, 0xc5, 0xf5, 0x97, 0x37, 0xc2, 0x95, + 0xc6, 0xd4, 0xae, 0x49, 0xcd, 0x0e, 0x36, 0x1a, 0x75, 0xa9, 0xd5, 0xed, 0xf4, 0xa5, 0x76, 0xed, + 0xa1, 0x72, 0xdc, 0xfe, 0x71, 0xbb, 0xf3, 0x65, 0x5b, 0x16, 0xf4, 0x19, 0xd8, 0x6b, 0xdc, 0xea, + 0x5d, 0x10, 0x66, 0x83, 0x42, 0xe7, 0x60, 0x51, 0x58, 0xc2, 0x5b, 0x68, 0x03, 0xd6, 0xda, 0x1d, + 0xa5, 0xd7, 0xa8, 0x4b, 0x8a, 0x74, 0xef, 0x9e, 0x54, 0xeb, 0xf7, 0xf8, 0xd3, 0x46, 0x88, 0xee, + 0xc7, 0x37, 0xf5, 0x6f, 0x53, 0xb0, 0xb1, 0x20, 0x12, 0x54, 0xf5, 0x2f, 0x25, 0xfc, 0x9e, 0xf4, + 0xc9, 0x59, 0xa2, 0xaf, 0xd0, 0xae, 0xa0, 0x8b, 0x1d, 0xcf, 0xbf, 0xc3, 0x5c, 0x01, 0x9a, 0x25, + 0xd3, 0xd3, 0x07, 0x3a, 0x71, 0xfc, 0x97, 0x20, 0x7e, 0x53, 0x59, 0x9b, 0xca, 0xf9, 0x63, 0xd0, + 0xc7, 0x80, 0x6c, 0xcb, 0xd5, 0x3d, 0xfd, 0x09, 0x51, 0x74, 0x33, 0x78, 0x36, 0xa2, 0x37, 0x97, + 0xb4, 0x2c, 0x04, 0x9a, 0x86, 0xe9, 0x85, 0x68, 0x93, 0x0c, 0xf1, 0x0c, 0x9a, 0x16, 0xf0, 0x94, + 0x2c, 0x04, 0x9a, 0x10, 0x7d, 0x11, 0x8a, 0x9a, 0x35, 0xa1, 0xed, 0x1c, 0xc7, 0xd1, 0xf3, 0x22, + 0x21, 0x17, 0xb8, 0x2c, 0x84, 0xf8, 0x8d, 0xfa, 0xf4, 0xbd, 0xaa, 0x28, 0x17, 0xb8, 0x8c, 0x43, + 0x2e, 0xc3, 0x1a, 0x1e, 0x0e, 0x1d, 0xea, 0x3c, 0x70, 0xc4, 0xaf, 0x1e, 0xa5, 0x50, 0xcc, 0x80, + 0xdb, 0xf7, 0x21, 0x17, 0xe4, 0x81, 0x1e, 0xc9, 0x34, 0x13, 0x8a, 0xcd, 0xef, 0xd3, 0xc9, 0xdd, + 0xbc, 0x9c, 0x33, 0x03, 0xe5, 0x45, 0x28, 0xea, 0xae, 0x32, 0x7d, 0x7e, 0x4f, 0xee, 0x24, 0x77, + 0x73, 0x72, 0x41, 0x77, 0xc3, 0xa7, 0xcb, 0xf2, 0x37, 0x49, 0x28, 0xc5, 0x7f, 0x3e, 0x40, 0x75, + 0xc8, 0x19, 0x96, 0x8a, 0x19, 0xb5, 0xf8, 0x6f, 0x57, 0xbb, 0xaf, 0xf8, 0xc5, 0xa1, 0xd2, 0xf4, + 0xf1, 0x72, 0x68, 0xb9, 0xfd, 0xb7, 0x04, 0xe4, 0x02, 0x31, 0xda, 0x82, 0xb4, 0x8d, 0xbd, 0x11, + 0x73, 0x97, 0x39, 0x4c, 0x0a, 0x09, 0x99, 0x8d, 0xa9, 0xdc, 0xb5, 0xb1, 0xc9, 0x28, 0xe0, 0xcb, + 0xe9, 0x98, 0x7e, 0x57, 0x83, 0x60, 0x8d, 0xdd, 0x6b, 0xac, 0xf1, 0x98, 0x98, 0x9e, 0x1b, 0x7c, + 0x57, 0x5f, 0x5e, 0xf3, 0xc5, 0xe8, 0x1a, 0xac, 0x7b, 0x0e, 0xd6, 0x8d, 0x18, 0x36, 0xcd, 0xb0, + 0x42, 0xa0, 0x08, 0xc1, 0x07, 0x70, 0x3e, 0xf0, 0xab, 0x11, 0x0f, 0xab, 0x23, 0xa2, 0x4d, 0x8d, + 0xb2, 0xec, 0xfd, 0xe2, 0x9c, 0x0f, 0xa8, 0xfb, 0xfa, 0xc0, 0xb6, 0xfc, 0xf7, 0x04, 0xac, 0x07, + 0x37, 0x31, 0x2d, 0x4c, 0x56, 0x0b, 0x00, 0x9b, 0xa6, 0xe5, 0x45, 0xd3, 0x35, 0x4f, 0xe5, 0x39, + 0xbb, 0x4a, 0x35, 0x34, 0x92, 0x23, 0x0e, 0xb6, 0xc7, 0x00, 0x53, 0xcd, 0xd2, 0xb4, 0x5d, 0x80, + 0x82, 0xff, 0xdb, 0x10, 0xfb, 0x81, 0x91, 0xdf, 0xdd, 0x81, 0x8b, 0xe8, 0x95, 0x0d, 0x6d, 0x42, + 0xe6, 0x84, 0x0c, 0x75, 0xd3, 0x7f, 0xf1, 0xe5, 0x83, 0xe0, 0x85, 0x25, 0x1d, 0xbe, 0xb0, 0x1c, + 0xfe, 0x0c, 0x36, 0x54, 0x6b, 0x3c, 0x1b, 0xee, 0xa1, 0x30, 0xf3, 0x7e, 0xe0, 0x7e, 0x91, 0xf8, + 0x0a, 0xa6, 0x2d, 0xe6, 0xff, 0x12, 0x89, 0x3f, 0x24, 0x53, 0x47, 0xdd, 0xc3, 0x3f, 0x26, 0xb7, + 0x8f, 0xb8, 0x69, 0x37, 0x58, 0xa9, 0x4c, 0x06, 0x06, 0x51, 0x69, 0xf4, 0xff, 0x0f, 0x00, 0x00, + 0xff, 0xff, 0x88, 0x17, 0xc1, 0xbe, 0x38, 0x1d, 0x00, 0x00, } diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go index ec6eb168d6a79..165b2110df8ba 100644 --- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go @@ -3,14 +3,16 @@ package descriptor -import fmt "fmt" -import strings "strings" -import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" -import sort "sort" -import strconv "strconv" -import reflect "reflect" -import proto "github.com/gogo/protobuf/proto" -import math "math" +import ( + fmt "fmt" + github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" + proto "github.com/gogo/protobuf/proto" + math "math" + reflect "reflect" + sort "sort" + strconv "strconv" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -358,7 +360,7 @@ func (this *FileOptions) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 23) + s := make([]string, 0, 25) s = append(s, "&descriptor.FileOptions{") if this.JavaPackage != nil { s = append(s, "JavaPackage: "+valueToGoStringDescriptor(this.JavaPackage, "string")+",\n") @@ -414,6 +416,12 @@ func (this *FileOptions) GoString() string { if this.PhpNamespace != nil { s = append(s, "PhpNamespace: "+valueToGoStringDescriptor(this.PhpNamespace, "string")+",\n") } + if this.PhpMetadataNamespace != nil { + s = append(s, "PhpMetadataNamespace: "+valueToGoStringDescriptor(this.PhpMetadataNamespace, "string")+",\n") + } + if this.RubyPackage != nil { + s = append(s, "RubyPackage: "+valueToGoStringDescriptor(this.RubyPackage, "string")+",\n") + } if this.UninterpretedOption != nil { s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") } diff --git a/vendor/github.com/google/gofuzz/go.mod b/vendor/github.com/google/gofuzz/go.mod new file mode 100644 index 0000000000000..8ec4fe9e9725a --- /dev/null +++ b/vendor/github.com/google/gofuzz/go.mod @@ -0,0 +1,3 @@ +module github.com/google/gofuzz + +go 1.12 diff --git a/vendor/github.com/google/uuid/node.go b/vendor/github.com/google/uuid/node.go index 3e4e90dc444cd..d651a2b0619fa 100644 --- a/vendor/github.com/google/uuid/node.go +++ b/vendor/github.com/google/uuid/node.go @@ -48,6 +48,7 @@ func setNodeInterface(name string) bool { // does not specify a specific interface generate a random Node ID // (section 4.1.6) if name == "" { + ifname = "random" randomBits(nodeID[:]) return true } diff --git a/vendor/github.com/gophercloud/gophercloud/.travis.yml b/vendor/github.com/gophercloud/gophercloud/.travis.yml index 0955f170eae05..9153a00fc55f0 100644 --- a/vendor/github.com/gophercloud/gophercloud/.travis.yml +++ b/vendor/github.com/gophercloud/gophercloud/.travis.yml @@ -9,6 +9,7 @@ install: go: - "1.10" - "1.11" +- "1.12" - "tip" env: global: diff --git a/vendor/github.com/gophercloud/gophercloud/.zuul.yaml b/vendor/github.com/gophercloud/gophercloud/.zuul.yaml index 8c31ea160e523..135e3b203a8d1 100644 --- a/vendor/github.com/gophercloud/gophercloud/.zuul.yaml +++ b/vendor/github.com/gophercloud/gophercloud/.zuul.yaml @@ -14,13 +14,20 @@ run: .zuul/playbooks/gophercloud-acceptance-test/run.yaml - job: - name: gophercloud-acceptance-test-queens + name: gophercloud-acceptance-test-ironic + parent: golang-test + description: | + Run gophercloud ironic acceptance test on master branch + run: .zuul/playbooks/gophercloud-acceptance-test-ironic/run.yaml + +- job: + name: gophercloud-acceptance-test-stein parent: gophercloud-acceptance-test description: | - Run gophercloud acceptance test on queens branch + Run gophercloud acceptance test on stein branch vars: global_env: - OS_BRANCH: stable/queens + OS_BRANCH: stable/stein - job: name: gophercloud-acceptance-test-rocky @@ -31,6 +38,15 @@ global_env: OS_BRANCH: stable/rocky +- job: + name: gophercloud-acceptance-test-queens + parent: gophercloud-acceptance-test + description: | + Run gophercloud acceptance test on queens branch + vars: + global_env: + OS_BRANCH: stable/queens + - job: name: gophercloud-acceptance-test-pike parent: gophercloud-acceptance-test @@ -74,6 +90,7 @@ jobs: - gophercloud-unittest - gophercloud-acceptance-test + - gophercloud-acceptance-test-ironic recheck-mitaka: jobs: - gophercloud-acceptance-test-mitaka @@ -92,7 +109,6 @@ recheck-rocky: jobs: - gophercloud-acceptance-test-rocky - periodic: + recheck-stein: jobs: - - gophercloud-unittest - - gophercloud-acceptance-test + - gophercloud-acceptance-test-stein diff --git a/vendor/github.com/gophercloud/gophercloud/doc.go b/vendor/github.com/gophercloud/gophercloud/doc.go index 131cc8e303a18..953ca822a97a7 100644 --- a/vendor/github.com/gophercloud/gophercloud/doc.go +++ b/vendor/github.com/gophercloud/gophercloud/doc.go @@ -9,20 +9,37 @@ Provider structs represent the cloud providers that offer and manage a collection of services. You will generally want to create one Provider client per OpenStack cloud. + It is now recommended to use the `clientconfig` package found at + https://github.com/gophercloud/utils/tree/master/openstack/clientconfig + for all authentication purposes. + + The below documentation is still relevant. clientconfig simply implements + the below and presents it in an easier and more flexible way. + Use your OpenStack credentials to create a Provider client. The IdentityEndpoint is typically refered to as "auth_url" or "OS_AUTH_URL" in information provided by the cloud operator. Additionally, the cloud may refer to TenantID or TenantName as project_id and project_name. Credentials are specified like so: - opts := gophercloud.AuthOptions{ - IdentityEndpoint: "https://openstack.example.com:5000/v2.0", - Username: "{username}", - Password: "{password}", - TenantID: "{tenant_id}", - } + opts := gophercloud.AuthOptions{ + IdentityEndpoint: "https://openstack.example.com:5000/v2.0", + Username: "{username}", + Password: "{password}", + TenantID: "{tenant_id}", + } + + provider, err := openstack.AuthenticatedClient(opts) - provider, err := openstack.AuthenticatedClient(opts) +You can authenticate with a token by doing: + + opts := gophercloud.AuthOptions{ + IdentityEndpoint: "https://openstack.example.com:5000/v2.0", + TokenID: "{token_id}", + TenantID: "{tenant_id}", + } + + provider, err := openstack.AuthenticatedClient(opts) You may also use the openstack.AuthOptionsFromEnv() helper function. This function reads in standard environment variables frequently found in an @@ -39,16 +56,16 @@ operations for a particular OpenStack service. Examples of services include: Compute, Object Storage, Block Storage. In order to define one, you need to pass in the parent provider, like so: - opts := gophercloud.EndpointOpts{Region: "RegionOne"} + opts := gophercloud.EndpointOpts{Region: "RegionOne"} - client, err := openstack.NewComputeV2(provider, opts) + client, err := openstack.NewComputeV2(provider, opts) Resources Resource structs are the domain models that services make use of in order to work with and represent the state of API resources: - server, err := servers.Get(client, "{serverId}").Extract() + server, err := servers.Get(client, "{serverId}").Extract() Intermediate Result structs are returned for API operations, which allow generic access to the HTTP headers, response body, and any errors associated @@ -56,11 +73,11 @@ with the network transaction. To turn a result into a usable resource struct, you must call the Extract method which is chained to the response, or an Extract function from an applicable extension: - result := servers.Get(client, "{serverId}") + result := servers.Get(client, "{serverId}") - // Attempt to extract the disk configuration from the OS-DCF disk config - // extension: - config, err := diskconfig.ExtractGet(result) + // Attempt to extract the disk configuration from the OS-DCF disk config + // extension: + config, err := diskconfig.ExtractGet(result) All requests that enumerate a collection return a Pager struct that is used to iterate through the results one page at a time. Use the EachPage method on that @@ -68,17 +85,17 @@ Pager to handle each successive Page in a closure, then use the appropriate extraction method from that request's package to interpret that Page as a slice of results: - err := servers.List(client, nil).EachPage(func (page pagination.Page) (bool, error) { - s, err := servers.ExtractServers(page) - if err != nil { - return false, err - } + err := servers.List(client, nil).EachPage(func (page pagination.Page) (bool, error) { + s, err := servers.ExtractServers(page) + if err != nil { + return false, err + } - // Handle the []servers.Server slice. + // Handle the []servers.Server slice. - // Return "false" or an error to prematurely stop fetching new pages. - return true, nil - }) + // Return "false" or an error to prematurely stop fetching new pages. + return true, nil + }) If you want to obtain the entire collection of pages without doing any intermediary processing on each page, you can use the AllPages method: diff --git a/vendor/github.com/gophercloud/gophercloud/errors.go b/vendor/github.com/gophercloud/gophercloud/errors.go index 4bf10246843f5..0bcb3af7f00c4 100644 --- a/vendor/github.com/gophercloud/gophercloud/errors.go +++ b/vendor/github.com/gophercloud/gophercloud/errors.go @@ -122,6 +122,11 @@ type ErrDefault408 struct { ErrUnexpectedResponseCode } +// ErrDefault409 is the default error type returned on a 409 HTTP response code. +type ErrDefault409 struct { + ErrUnexpectedResponseCode +} + // ErrDefault429 is the default error type returned on a 429 HTTP response code. type ErrDefault429 struct { ErrUnexpectedResponseCode @@ -211,6 +216,12 @@ type Err408er interface { Error408(ErrUnexpectedResponseCode) error } +// Err409er is the interface resource error types implement to override the error message +// from a 409 error. +type Err409er interface { + Error409(ErrUnexpectedResponseCode) error +} + // Err429er is the interface resource error types implement to override the error message // from a 429 error. type Err429er interface { diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go b/vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go index 0bb1f483752c7..0e8d90ff8262a 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go @@ -13,15 +13,19 @@ AuthOptionsFromEnv fills out an identity.AuthOptions structure with the settings found on the various OpenStack OS_* environment variables. The following variables provide sources of truth: OS_AUTH_URL, OS_USERNAME, -OS_PASSWORD, OS_TENANT_ID, and OS_TENANT_NAME. +OS_PASSWORD and OS_PROJECT_ID. Of these, OS_USERNAME, OS_PASSWORD, and OS_AUTH_URL must have settings, -or an error will result. OS_TENANT_ID, OS_TENANT_NAME, OS_PROJECT_ID, and -OS_PROJECT_NAME are optional. +or an error will result. OS_PROJECT_ID, is optional. -OS_TENANT_ID and OS_TENANT_NAME are mutually exclusive to OS_PROJECT_ID and -OS_PROJECT_NAME. If OS_PROJECT_ID and OS_PROJECT_NAME are set, they will -still be referred as "tenant" in Gophercloud. +OS_TENANT_ID and OS_TENANT_NAME are deprecated forms of OS_PROJECT_ID and +OS_PROJECT_NAME and the latter are expected against a v3 auth api. + +If OS_PROJECT_ID and OS_PROJECT_NAME are set, they will still be referred +as "tenant" in Gophercloud. + +If OS_PROJECT_NAME is set, it requires OS_PROJECT_ID to be set as well to +handle projects not on the default domain. To use this function, first set the OS_* environment variables (for example, by sourcing an `openrc` file), then: @@ -83,6 +87,13 @@ func AuthOptionsFromEnv() (gophercloud.AuthOptions, error) { return nilOptions, err } + if domainID == "" && domainName == "" && tenantID == "" && tenantName != "" { + err := gophercloud.ErrMissingEnvironmentVariable{ + EnvironmentVariable: "OS_PROJECT_ID", + } + return nilOptions, err + } + if applicationCredentialID == "" && applicationCredentialName != "" && applicationCredentialSecret != "" { if userID == "" && username == "" { return nilOptions, gophercloud.ErrMissingAnyoneOfEnvironmentVariables{ diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/client.go b/vendor/github.com/gophercloud/gophercloud/openstack/client.go index 064a70dbedb00..50f239711e72e 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/client.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/client.go @@ -310,6 +310,12 @@ func NewBareMetalV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointO return initClientOpts(client, eo, "baremetal") } +// NewBareMetalIntrospectionV1 creates a ServiceClient that may be used with the v1 +// bare metal introspection package. +func NewBareMetalIntrospectionV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "baremetal-inspector") +} + // NewObjectStorageV1 creates a ServiceClient that may be used with the v1 // object storage package. func NewObjectStorageV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/requests.go index 4e5e499e3aa42..b72807770ee8a 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/requests.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/requests.go @@ -68,7 +68,7 @@ func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r Create return } _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200}, + OkCodes: []int{200, 201}, }) return } diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/requests.go index 539019e90d375..753024a18b728 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/requests.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/requests.go @@ -148,7 +148,7 @@ func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r Create return } -// Get retrieves details of a single flavor. Use ExtractFlavor to convert its +// Get retrieves details of a single flavor. Use Extract to convert its // result into a Flavor. func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { _, r.Err = client.Get(getURL(client, id), &r.Body, nil) diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/requests.go index 646bd4ccf13d7..ee8e93b1dcbf1 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/requests.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/requests.go @@ -183,9 +183,15 @@ type CreateOpts struct { // AccessIPv4 specifies an IPv4 address for the instance. AccessIPv4 string `json:"accessIPv4,omitempty"` - // AccessIPv6 pecifies an IPv6 address for the instance. + // AccessIPv6 specifies an IPv6 address for the instance. AccessIPv6 string `json:"accessIPv6,omitempty"` + // Min specifies Minimum number of servers to launch. + Min int `json:"min_count,omitempty"` + + // Max specifies Maximum number of servers to launch. + Max int `json:"max_count,omitempty"` + // ServiceClient will allow calls to be made to retrieve an image or // flavor ID by name. ServiceClient *gophercloud.ServiceClient `json:"-"` @@ -276,6 +282,14 @@ func (opts CreateOpts) ToServerCreateMap() (map[string]interface{}, error) { b["flavorRef"] = flavorID } + if opts.Min != 0 { + b["min_count"] = opts.Min + } + + if opts.Max != 0 { + b["max_count"] = opts.Max + } + return map[string]interface{}{"server": b}, nil } diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/requests.go index 2bc4579727754..1d9a77bcf5759 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/requests.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/requests.go @@ -122,7 +122,7 @@ type UpdateOpts struct { Description *string `json:"description,omitempty"` // TTL is the time to live of the RecordSet. - TTL int `json:"ttl,omitempty"` + TTL *int `json:"ttl,omitempty"` // Records are the DNS records of the RecordSet. Records []string `json:"records,omitempty"` @@ -135,10 +135,17 @@ func (opts UpdateOpts) ToRecordSetUpdateMap() (map[string]interface{}, error) { return nil, err } - if opts.TTL > 0 { - b["ttl"] = opts.TTL - } else { - b["ttl"] = nil + // If opts.TTL was actually set, use 0 as a special value to send "null", + // even though the result from the API is 0. + // + // Otherwise, don't send the TTL field. + if opts.TTL != nil { + ttl := *(opts.TTL) + if ttl > 0 { + b["ttl"] = ttl + } else { + b["ttl"] = nil + } } return b, nil diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/requests.go index 2d20fa6f4b833..e4d766b2327e4 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/requests.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/requests.go @@ -134,9 +134,9 @@ func Get(c *gophercloud.ServiceClient, token string) (r GetResult) { OkCodes: []int{200, 203}, }) if resp != nil { - r.Err = err r.Header = resp.Header } + r.Err = err return } diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/listeners/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/listeners/requests.go index 005bd1e79a4a2..c512e4d72f683 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/listeners/requests.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/listeners/requests.go @@ -29,21 +29,23 @@ type ListOptsBuilder interface { // sort by a particular listener attribute. SortDir sets the direction, and is // either `asc' or `desc'. Marker and Limit are used for pagination. type ListOpts struct { - ID string `q:"id"` - Name string `q:"name"` - AdminStateUp *bool `q:"admin_state_up"` - ProjectID string `q:"project_id"` - LoadbalancerID string `q:"loadbalancer_id"` - DefaultPoolID string `q:"default_pool_id"` - Protocol string `q:"protocol"` - ProtocolPort int `q:"protocol_port"` - ConnectionLimit int `q:"connection_limit"` - Limit int `q:"limit"` - Marker string `q:"marker"` - SortKey string `q:"sort_key"` - SortDir string `q:"sort_dir"` - TimeoutClientData *int `q:"timeout_client_data"` - TimeoutMemberData *int `q:"timeout_member_data"` + ID string `q:"id"` + Name string `q:"name"` + AdminStateUp *bool `q:"admin_state_up"` + ProjectID string `q:"project_id"` + LoadbalancerID string `q:"loadbalancer_id"` + DefaultPoolID string `q:"default_pool_id"` + Protocol string `q:"protocol"` + ProtocolPort int `q:"protocol_port"` + ConnectionLimit int `q:"connection_limit"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` + TimeoutClientData *int `q:"timeout_client_data"` + TimeoutMemberData *int `q:"timeout_member_data"` + TimeoutMemberConnect *int `q:"timeout_member_connect"` + TimeoutTCPInspect *int `q:"timeout_tcp_inspect"` } // ToListenerListQuery formats a ListOpts into a query string. @@ -120,6 +122,15 @@ type CreateOpts struct { // Backend member inactivity timeout in milliseconds TimeoutMemberData *int `json:"timeout_member_data,omitempty"` + + // Backend member connection timeout in milliseconds + TimeoutMemberConnect *int `json:"timeout_member_connect,omitempty"` + + // Time, in milliseconds, to wait for additional TCP packets for content inspection + TimeoutTCPInspect *int `json:"timeout_tcp_inspect,omitempty"` + + // A dictionary of optional headers to insert into the request before it is sent to the backend member. + InsertHeaders map[string]string `json:"insert_headers,omitempty"` } // ToListenerCreateMap builds a request body from CreateOpts. @@ -185,6 +196,12 @@ type UpdateOpts struct { // Backend member inactivity timeout in milliseconds TimeoutMemberData *int `json:"timeout_member_data,omitempty"` + + // Backend member connection timeout in milliseconds + TimeoutMemberConnect *int `json:"timeout_member_connect,omitempty"` + + // Time, in milliseconds, to wait for additional TCP packets for content inspection + TimeoutTCPInspect *int `json:"timeout_tcp_inspect,omitempty"` } // ToListenerUpdateMap builds a request body from UpdateOpts. diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/listeners/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/listeners/results.go index 45164b333510b..a8f7682101842 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/listeners/results.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/listeners/results.go @@ -68,6 +68,15 @@ type Listener struct { // Backend member inactivity timeout in milliseconds TimeoutMemberData int `json:"timeout_member_data"` + + // Backend member connection timeout in milliseconds + TimeoutMemberConnect int `json:"timeout_member_connect"` + + // Time, in milliseconds, to wait for additional TCP packets for content inspection + TimeoutTCPInspect int `json:"timeout_tcp_inspect"` + + // A dictionary of optional headers to insert into the request before it is sent to the backend member. + InsertHeaders map[string]string `json:"insert_headers"` } type Stats struct { diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/loadbalancers/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/loadbalancers/requests.go index 05b0367f69165..4ff4210fa2505 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/loadbalancers/requests.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/loadbalancers/requests.go @@ -81,10 +81,16 @@ type CreateOpts struct { // Human-readable description for the Loadbalancer. Description string `json:"description,omitempty"` + // Providing a neutron port ID for the vip_port_id tells Octavia to use this + // port for the VIP. If the port has more than one subnet you must specify + // either the vip_subnet_id or vip_address to clarify which address should + // be used for the VIP. + VipPortID string `json:"vip_port_id,omitempty"` + // The subnet on which to allocate the Loadbalancer's address. A project can // only create Loadbalancers on networks authorized by policy (e.g. networks // that belong to them or networks that are shared). - VipSubnetID string `json:"vip_subnet_id" required:"true"` + VipSubnetID string `json:"vip_subnet_id,omitempty"` // The network on which to allocate the Loadbalancer's address. A tenant can // only create Loadbalancers on networks authorized by policy (e.g. networks diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/requests.go index b7e6e9189dbc9..0c0db64d8d301 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/requests.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/requests.go @@ -5,6 +5,12 @@ import ( "github.com/gophercloud/gophercloud/pagination" ) +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToFloatingIPListQuery() (string, error) +} + // ListOpts allows the filtering and sorting of paginated collections through // the API. Filtering is achieved by passing in struct field values that map to // the floating IP attributes you want to see returned. SortKey allows you to @@ -31,16 +37,25 @@ type ListOpts struct { NotTagsAny string `q:"not-tags-any"` } +// ToNetworkListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToFloatingIPListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + // List returns a Pager which allows you to iterate over a collection of // floating IP resources. It accepts a ListOpts struct, which allows you to // filter and sort the returned collection for greater efficiency. -func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager { - q, err := gophercloud.BuildQueryString(&opts) - if err != nil { - return pagination.Pager{Err: err} +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := rootURL(c) + if opts != nil { + query, err := opts.ToFloatingIPListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query } - u := rootURL(c) + q.String() - return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page { + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { return FloatingIPPage{pagination.LinkedPageBase{PageResult: r}} }) } diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/results.go index b0b25a9d66c20..a9709ccec3f7d 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/results.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/results.go @@ -56,11 +56,13 @@ type commonResult struct { // Extract will extract a FloatingIP resource from a result. func (r commonResult) Extract() (*FloatingIP, error) { - var s struct { - FloatingIP *FloatingIP `json:"floatingip"` - } + var s FloatingIP err := r.ExtractInto(&s) - return s.FloatingIP, err + return &s, err +} + +func (r commonResult) ExtractInto(v interface{}) error { + return r.Result.ExtractIntoStructPtr(v, "floatingip") } // CreateResult represents the result of a create operation. Call its Extract @@ -123,3 +125,7 @@ func ExtractFloatingIPs(r pagination.Page) ([]FloatingIP, error) { err := (r.(FloatingIPPage)).ExtractInto(&s) return s.FloatingIPs, err } + +func ExtractFloatingIPsInto(r pagination.Page, v interface{}) error { + return r.(FloatingIPPage).Result.ExtractIntoSlicePtr(v, "floatingips") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/results.go index a6fa4a35ea4a1..857e1947e1f64 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/results.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/results.go @@ -8,7 +8,7 @@ import ( // GatewayInfo represents the information of an external gateway for any // particular network router. type GatewayInfo struct { - NetworkID string `json:"network_id"` + NetworkID string `json:"network_id,omitempty"` EnableSNAT *bool `json:"enable_snat,omitempty"` ExternalFixedIPs []ExternalFixedIP `json:"external_fixed_ips,omitempty"` } diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/doc.go index e768b71f82034..9d1dd5a7ea4ee 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/doc.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/doc.go @@ -45,8 +45,9 @@ Example to Update a Network networkID := "484cda0e-106f-4f4b-bb3f-d413710bbe78" + name := "new_name" updateOpts := networks.UpdateOpts{ - Name: "new_name", + Name: &name, } network, err := networks.Update(networkClient, networkID, updateOpts).Extract() diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/requests.go index d52d099a67e41..8006c481679d6 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/requests.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/requests.go @@ -112,7 +112,7 @@ type UpdateOptsBuilder interface { // UpdateOpts represents options used to update a network. type UpdateOpts struct { AdminStateUp *bool `json:"admin_state_up,omitempty"` - Name string `json:"name,omitempty"` + Name *string `json:"name,omitempty"` Description *string `json:"description,omitempty"` Shared *bool `json:"shared,omitempty"` } diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/doc.go index d0ed8dff06a0b..7d3a1b9b65c32 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/doc.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/doc.go @@ -97,10 +97,12 @@ Example to Create a Subnet With a Default Gateway Example to Update a Subnet subnetID := "db77d064-e34f-4d06-b060-f21e28a61c23" + dnsNameservers := []string{"8.8.8.8"} + name := "new_name" updateOpts := subnets.UpdateOpts{ - Name: "new_name", - DNSNameservers: []string{"8.8.8.8}, + Name: &name, + DNSNameservers: &dnsNameservers, } subnet, err := subnets.Update(networkClient, subnetID, updateOpts).Extract() diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/requests.go index d2c4a29f7ce2f..3e56bf389af32 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/requests.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/requests.go @@ -173,7 +173,7 @@ type UpdateOptsBuilder interface { // UpdateOpts represents the attributes used when updating an existing subnet. type UpdateOpts struct { // Name is a human-readable name of the subnet. - Name string `json:"name,omitempty"` + Name *string `json:"name,omitempty"` // Description of the subnet. Description *string `json:"description,omitempty"` @@ -188,7 +188,7 @@ type UpdateOpts struct { GatewayIP *string `json:"gateway_ip,omitempty"` // DNSNameservers are the nameservers to be set via DHCP. - DNSNameservers []string `json:"dns_nameservers,omitempty"` + DNSNameservers *[]string `json:"dns_nameservers,omitempty"` // HostRoutes are any static host routes to be set via DHCP. HostRoutes *[]HostRoute `json:"host_routes,omitempty"` diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/doc.go index 9e5f664198044..ffc4f05297b62 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/doc.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/doc.go @@ -73,6 +73,9 @@ Example to Update a Container Metadata: map[string]string{ "bar": "baz", }, + RemoveMetadata: []string{ + "foo", + }, } container, err := containers.Update(objectStorageClient, containerName, updateOpts).Extract() diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/requests.go index 89aa996839d99..ca99bb2a6aae0 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/requests.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/requests.go @@ -130,6 +130,7 @@ type UpdateOptsBuilder interface { // deleting a container's metadata. type UpdateOpts struct { Metadata map[string]string + RemoveMetadata []string ContainerRead string `h:"X-Container-Read"` ContainerSyncTo string `h:"X-Container-Sync-To"` ContainerSyncKey string `h:"X-Container-Sync-Key"` @@ -148,9 +149,15 @@ func (opts UpdateOpts) ToContainerUpdateMap() (map[string]string, error) { if err != nil { return nil, err } + for k, v := range opts.Metadata { h["X-Container-Meta-"+k] = v } + + for _, k := range opts.RemoveMetadata { + h["X-Remove-Container-Meta-"+k] = "remove" + } + return h, nil } diff --git a/vendor/github.com/gophercloud/gophercloud/provider_client.go b/vendor/github.com/gophercloud/gophercloud/provider_client.go index 365b4cbcdf1d9..fce00462fd36b 100644 --- a/vendor/github.com/gophercloud/gophercloud/provider_client.go +++ b/vendor/github.com/gophercloud/gophercloud/provider_client.go @@ -2,6 +2,7 @@ package gophercloud import ( "bytes" + "context" "encoding/json" "errors" "io" @@ -76,13 +77,21 @@ type ProviderClient struct { // with the token and reauth func zeroed. Such client can be used to perform reauthorization. Throwaway bool + // Context is the context passed to the HTTP request. + Context context.Context + + // mut is a mutex for the client. It protects read and write access to client attributes such as getting + // and setting the TokenID. mut *sync.RWMutex + // reauthmut is a mutex for reauthentication it attempts to ensure that only one reauthentication + // attempt happens at one time. reauthmut *reauthlock authResult AuthResult } +// reauthlock represents a set of attributes used to help in the reauthentication process. type reauthlock struct { sync.RWMutex reauthing bool @@ -219,7 +228,7 @@ func (client *ProviderClient) Reauthenticate(previousToken string) (err error) { return nil } - if client.mut == nil { + if client.reauthmut == nil { return client.ReauthFunc() } @@ -234,9 +243,6 @@ func (client *ProviderClient) Reauthenticate(previousToken string) (err error) { } client.reauthmut.Unlock() - client.mut.Lock() - defer client.mut.Unlock() - client.reauthmut.Lock() client.reauthmut.reauthing = true client.reauthmut.done = sync.NewCond(client.reauthmut) @@ -312,6 +318,9 @@ func (client *ProviderClient) Request(method, url string, options *RequestOpts) if err != nil { return nil, err } + if client.Context != nil { + req = req.WithContext(client.Context) + } // Populate the request headers. Apply options.MoreHeaders last, to give the caller the chance to // modify or omit any header. @@ -434,6 +443,11 @@ func (client *ProviderClient) Request(method, url string, options *RequestOpts) if error408er, ok := errType.(Err408er); ok { err = error408er.Error408(respErr) } + case http.StatusConflict: + err = ErrDefault409{respErr} + if error409er, ok := errType.(Err409er); ok { + err = error409er.Error409(respErr) + } case 429: err = ErrDefault429{respErr} if error429er, ok := errType.(Err429er); ok { diff --git a/vendor/github.com/gophercloud/gophercloud/service_client.go b/vendor/github.com/gophercloud/gophercloud/service_client.go index c889201f95639..f222f05a66dec 100644 --- a/vendor/github.com/gophercloud/gophercloud/service_client.go +++ b/vendor/github.com/gophercloud/gophercloud/service_client.go @@ -131,6 +131,8 @@ func (client *ServiceClient) setMicroversionHeader(opts *RequestOpts) { opts.MoreHeaders["X-OpenStack-Volume-API-Version"] = client.Microversion case "baremetal": opts.MoreHeaders["X-OpenStack-Ironic-API-Version"] = client.Microversion + case "baremetal-introspection": + opts.MoreHeaders["X-OpenStack-Ironic-Inspector-API-Version"] = client.Microversion } if client.Type != "" { diff --git a/vendor/github.com/hashicorp/golang-lru/lru.go b/vendor/github.com/hashicorp/golang-lru/lru.go index c8d9b0a230f9f..1cbe04b7d0fc0 100644 --- a/vendor/github.com/hashicorp/golang-lru/lru.go +++ b/vendor/github.com/hashicorp/golang-lru/lru.go @@ -40,31 +40,35 @@ func (c *Cache) Purge() { // Add adds a value to the cache. Returns true if an eviction occurred. func (c *Cache) Add(key, value interface{}) (evicted bool) { c.lock.Lock() - defer c.lock.Unlock() - return c.lru.Add(key, value) + evicted = c.lru.Add(key, value) + c.lock.Unlock() + return evicted } // Get looks up a key's value from the cache. func (c *Cache) Get(key interface{}) (value interface{}, ok bool) { c.lock.Lock() - defer c.lock.Unlock() - return c.lru.Get(key) + value, ok = c.lru.Get(key) + c.lock.Unlock() + return value, ok } // Contains checks if a key is in the cache, without updating the // recent-ness or deleting it for being stale. func (c *Cache) Contains(key interface{}) bool { c.lock.RLock() - defer c.lock.RUnlock() - return c.lru.Contains(key) + containKey := c.lru.Contains(key) + c.lock.RUnlock() + return containKey } // Peek returns the key value (or undefined if not found) without updating // the "recently used"-ness of the key. func (c *Cache) Peek(key interface{}) (value interface{}, ok bool) { c.lock.RLock() - defer c.lock.RUnlock() - return c.lru.Peek(key) + value, ok = c.lru.Peek(key) + c.lock.RUnlock() + return value, ok } // ContainsOrAdd checks if a key is in the cache without updating the @@ -98,13 +102,15 @@ func (c *Cache) RemoveOldest() { // Keys returns a slice of the keys in the cache, from oldest to newest. func (c *Cache) Keys() []interface{} { c.lock.RLock() - defer c.lock.RUnlock() - return c.lru.Keys() + keys := c.lru.Keys() + c.lock.RUnlock() + return keys } // Len returns the number of items in the cache. func (c *Cache) Len() int { c.lock.RLock() - defer c.lock.RUnlock() - return c.lru.Len() + length := c.lru.Len() + c.lock.RUnlock() + return length } diff --git a/vendor/github.com/json-iterator/go/README.md b/vendor/github.com/json-iterator/go/README.md index 54d5afe9576d1..50d56ffbf07e5 100644 --- a/vendor/github.com/json-iterator/go/README.md +++ b/vendor/github.com/json-iterator/go/README.md @@ -10,10 +10,6 @@ A high-performance 100% compatible drop-in replacement of "encoding/json" You can also use thrift like JSON using [thrift-iterator](https://github.com/thrift-iterator/go) -``` -Go开发者们请加入我们,滴滴出行平台技术部 taowen@didichuxing.com -``` - # Benchmark ![benchmark](http://jsoniter.com/benchmarks/go-benchmark.png) diff --git a/vendor/github.com/json-iterator/go/adapter.go b/vendor/github.com/json-iterator/go/adapter.go index e674d0f397edb..92d2cc4a3dd5c 100644 --- a/vendor/github.com/json-iterator/go/adapter.go +++ b/vendor/github.com/json-iterator/go/adapter.go @@ -16,7 +16,7 @@ func Unmarshal(data []byte, v interface{}) error { return ConfigDefault.Unmarshal(data, v) } -// UnmarshalFromString convenient method to read from string instead of []byte +// UnmarshalFromString is a convenient method to read from string instead of []byte func UnmarshalFromString(str string, v interface{}) error { return ConfigDefault.UnmarshalFromString(str, v) } diff --git a/vendor/github.com/json-iterator/go/any.go b/vendor/github.com/json-iterator/go/any.go index daecfed615e79..f6b8aeab0a12d 100644 --- a/vendor/github.com/json-iterator/go/any.go +++ b/vendor/github.com/json-iterator/go/any.go @@ -312,6 +312,10 @@ func (codec *directAnyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { func (codec *directAnyCodec) Encode(ptr unsafe.Pointer, stream *Stream) { any := *(*Any)(ptr) + if any == nil { + stream.WriteNil() + return + } any.WriteTo(stream) } diff --git a/vendor/github.com/json-iterator/go/go.mod b/vendor/github.com/json-iterator/go/go.mod new file mode 100644 index 0000000000000..e05c42ff58b81 --- /dev/null +++ b/vendor/github.com/json-iterator/go/go.mod @@ -0,0 +1,11 @@ +module github.com/json-iterator/go + +go 1.12 + +require ( + github.com/davecgh/go-spew v1.1.1 + github.com/google/gofuzz v1.0.0 + github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 + github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 + github.com/stretchr/testify v1.3.0 +) diff --git a/vendor/github.com/json-iterator/go/go.sum b/vendor/github.com/json-iterator/go/go.sum new file mode 100644 index 0000000000000..d778b5a14d633 --- /dev/null +++ b/vendor/github.com/json-iterator/go/go.sum @@ -0,0 +1,14 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= diff --git a/vendor/github.com/json-iterator/go/iter_float.go b/vendor/github.com/json-iterator/go/iter_float.go index 4f883c0959f75..b9754638e8886 100644 --- a/vendor/github.com/json-iterator/go/iter_float.go +++ b/vendor/github.com/json-iterator/go/iter_float.go @@ -77,14 +77,12 @@ func (iter *Iterator) ReadFloat32() (ret float32) { } func (iter *Iterator) readPositiveFloat32() (ret float32) { - value := uint64(0) - c := byte(' ') i := iter.head // first char if i == iter.tail { return iter.readFloat32SlowPath() } - c = iter.buf[i] + c := iter.buf[i] i++ ind := floatDigits[c] switch ind { @@ -107,7 +105,7 @@ func (iter *Iterator) readPositiveFloat32() (ret float32) { return } } - value = uint64(ind) + value := uint64(ind) // chars before dot non_decimal_loop: for ; i < iter.tail; i++ { @@ -145,9 +143,7 @@ non_decimal_loop: } // too many decimal places return iter.readFloat32SlowPath() - case invalidCharForNumber: - fallthrough - case dotInNumber: + case invalidCharForNumber, dotInNumber: return iter.readFloat32SlowPath() } decimalPlaces++ @@ -218,14 +214,12 @@ func (iter *Iterator) ReadFloat64() (ret float64) { } func (iter *Iterator) readPositiveFloat64() (ret float64) { - value := uint64(0) - c := byte(' ') i := iter.head // first char if i == iter.tail { return iter.readFloat64SlowPath() } - c = iter.buf[i] + c := iter.buf[i] i++ ind := floatDigits[c] switch ind { @@ -248,7 +242,7 @@ func (iter *Iterator) readPositiveFloat64() (ret float64) { return } } - value = uint64(ind) + value := uint64(ind) // chars before dot non_decimal_loop: for ; i < iter.tail; i++ { @@ -286,9 +280,7 @@ non_decimal_loop: } // too many decimal places return iter.readFloat64SlowPath() - case invalidCharForNumber: - fallthrough - case dotInNumber: + case invalidCharForNumber, dotInNumber: return iter.readFloat64SlowPath() } decimalPlaces++ diff --git a/vendor/github.com/json-iterator/go/iter_skip.go b/vendor/github.com/json-iterator/go/iter_skip.go index f58beb9137bf5..e91eefb15becf 100644 --- a/vendor/github.com/json-iterator/go/iter_skip.go +++ b/vendor/github.com/json-iterator/go/iter_skip.go @@ -37,17 +37,24 @@ func (iter *Iterator) SkipAndReturnBytes() []byte { return iter.stopCapture() } -type captureBuffer struct { - startedAt int - captured []byte +// SkipAndAppendBytes skips next JSON element and appends its content to +// buffer, returning the result. +func (iter *Iterator) SkipAndAppendBytes(buf []byte) []byte { + iter.startCaptureTo(buf, iter.head) + iter.Skip() + return iter.stopCapture() } -func (iter *Iterator) startCapture(captureStartedAt int) { +func (iter *Iterator) startCaptureTo(buf []byte, captureStartedAt int) { if iter.captured != nil { panic("already in capture mode") } iter.captureStartedAt = captureStartedAt - iter.captured = make([]byte, 0, 32) + iter.captured = buf +} + +func (iter *Iterator) startCapture(captureStartedAt int) { + iter.startCaptureTo(make([]byte, 0, 32), captureStartedAt) } func (iter *Iterator) stopCapture() []byte { @@ -58,13 +65,7 @@ func (iter *Iterator) stopCapture() []byte { remaining := iter.buf[iter.captureStartedAt:iter.head] iter.captureStartedAt = -1 iter.captured = nil - if len(captured) == 0 { - copied := make([]byte, len(remaining)) - copy(copied, remaining) - return copied - } - captured = append(captured, remaining...) - return captured + return append(captured, remaining...) } // Skip skips a json object and positions to relatively the next json object diff --git a/vendor/github.com/json-iterator/go/iter_skip_strict.go b/vendor/github.com/json-iterator/go/iter_skip_strict.go index f67bc2e831510..6cf66d0438dbe 100644 --- a/vendor/github.com/json-iterator/go/iter_skip_strict.go +++ b/vendor/github.com/json-iterator/go/iter_skip_strict.go @@ -2,12 +2,22 @@ package jsoniter -import "fmt" +import ( + "fmt" + "io" +) func (iter *Iterator) skipNumber() { if !iter.trySkipNumber() { iter.unreadByte() - iter.ReadFloat32() + if iter.Error != nil && iter.Error != io.EOF { + return + } + iter.ReadFloat64() + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = nil + iter.ReadBigFloat() + } } } diff --git a/vendor/github.com/json-iterator/go/reflect_extension.go b/vendor/github.com/json-iterator/go/reflect_extension.go index 04f68756bf35a..05e8fbf1fedd4 100644 --- a/vendor/github.com/json-iterator/go/reflect_extension.go +++ b/vendor/github.com/json-iterator/go/reflect_extension.go @@ -338,7 +338,7 @@ func describeStruct(ctx *ctx, typ reflect2.Type) *StructDescriptor { for i := 0; i < structType.NumField(); i++ { field := structType.Field(i) tag, hastag := field.Tag().Lookup(ctx.getTagKey()) - if ctx.onlyTaggedField && !hastag { + if ctx.onlyTaggedField && !hastag && !field.Anonymous() { continue } tagParts := strings.Split(tag, ",") diff --git a/vendor/github.com/json-iterator/go/reflect_map.go b/vendor/github.com/json-iterator/go/reflect_map.go index 7f66a88b043d4..547b4421e349e 100644 --- a/vendor/github.com/json-iterator/go/reflect_map.go +++ b/vendor/github.com/json-iterator/go/reflect_map.go @@ -64,14 +64,26 @@ func decoderOfMapKey(ctx *ctx, typ reflect2.Type) ValDecoder { return &numericMapKeyDecoder{decoderOfType(ctx, typ)} default: ptrType := reflect2.PtrTo(typ) - if ptrType.Implements(textMarshalerType) { + if ptrType.Implements(unmarshalerType) { + return &referenceDecoder{ + &unmarshalerDecoder{ + valType: ptrType, + }, + } + } + if typ.Implements(unmarshalerType) { + return &unmarshalerDecoder{ + valType: typ, + } + } + if ptrType.Implements(textUnmarshalerType) { return &referenceDecoder{ &textUnmarshalerDecoder{ valType: ptrType, }, } } - if typ.Implements(textMarshalerType) { + if typ.Implements(textUnmarshalerType) { return &textUnmarshalerDecoder{ valType: typ, } diff --git a/vendor/github.com/json-iterator/go/reflect_marshaler.go b/vendor/github.com/json-iterator/go/reflect_marshaler.go index 58ac959ad8bf5..fea50719de9ea 100644 --- a/vendor/github.com/json-iterator/go/reflect_marshaler.go +++ b/vendor/github.com/json-iterator/go/reflect_marshaler.go @@ -93,8 +93,7 @@ func (encoder *marshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { stream.WriteNil() return } - marshaler := obj.(json.Marshaler) - bytes, err := marshaler.MarshalJSON() + bytes, err := json.Marshal(obj) if err != nil { stream.Error = err } else { diff --git a/vendor/github.com/json-iterator/go/reflect_native.go b/vendor/github.com/json-iterator/go/reflect_native.go index 9042eb0cb9894..f88722d14d198 100644 --- a/vendor/github.com/json-iterator/go/reflect_native.go +++ b/vendor/github.com/json-iterator/go/reflect_native.go @@ -432,17 +432,19 @@ func (codec *base64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { } func (codec *base64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - src := *((*[]byte)(ptr)) - if len(src) == 0 { + if codec.sliceType.UnsafeIsNil(ptr) { stream.WriteNil() return } + src := *((*[]byte)(ptr)) encoding := base64.StdEncoding stream.writeByte('"') - size := encoding.EncodedLen(len(src)) - buf := make([]byte, size) - encoding.Encode(buf, src) - stream.buf = append(stream.buf, buf...) + if len(src) != 0 { + size := encoding.EncodedLen(len(src)) + buf := make([]byte, size) + encoding.Encode(buf, src) + stream.buf = append(stream.buf, buf...) + } stream.writeByte('"') } diff --git a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go index 355d2d116b4c0..932641ac46ba3 100644 --- a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go +++ b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go @@ -530,8 +530,8 @@ func (decoder *generalStructDecoder) decodeOneField(ptr unsafe.Pointer, iter *It } } if fieldDecoder == nil { - msg := "found unknown field: " + field if decoder.disallowUnknownFields { + msg := "found unknown field: " + field iter.ReportError("ReadObject", msg) } c := iter.nextToken() diff --git a/vendor/github.com/json-iterator/go/stream_float.go b/vendor/github.com/json-iterator/go/stream_float.go index f318d2c59da39..826aa594ac6f3 100644 --- a/vendor/github.com/json-iterator/go/stream_float.go +++ b/vendor/github.com/json-iterator/go/stream_float.go @@ -1,6 +1,7 @@ package jsoniter import ( + "fmt" "math" "strconv" ) @@ -13,6 +14,10 @@ func init() { // WriteFloat32 write float32 to stream func (stream *Stream) WriteFloat32(val float32) { + if math.IsInf(float64(val), 0) || math.IsNaN(float64(val)) { + stream.Error = fmt.Errorf("unsupported value: %f", val) + return + } abs := math.Abs(float64(val)) fmt := byte('f') // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. @@ -26,6 +31,10 @@ func (stream *Stream) WriteFloat32(val float32) { // WriteFloat32Lossy write float32 to stream with ONLY 6 digits precision although much much faster func (stream *Stream) WriteFloat32Lossy(val float32) { + if math.IsInf(float64(val), 0) || math.IsNaN(float64(val)) { + stream.Error = fmt.Errorf("unsupported value: %f", val) + return + } if val < 0 { stream.writeByte('-') val = -val @@ -54,6 +63,10 @@ func (stream *Stream) WriteFloat32Lossy(val float32) { // WriteFloat64 write float64 to stream func (stream *Stream) WriteFloat64(val float64) { + if math.IsInf(val, 0) || math.IsNaN(val) { + stream.Error = fmt.Errorf("unsupported value: %f", val) + return + } abs := math.Abs(val) fmt := byte('f') // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. @@ -67,6 +80,10 @@ func (stream *Stream) WriteFloat64(val float64) { // WriteFloat64Lossy write float64 to stream with ONLY 6 digits precision although much much faster func (stream *Stream) WriteFloat64Lossy(val float64) { + if math.IsInf(val, 0) || math.IsNaN(val) { + stream.Error = fmt.Errorf("unsupported value: %f", val) + return + } if val < 0 { stream.writeByte('-') val = -val diff --git a/vendor/github.com/kr/fs/go.mod b/vendor/github.com/kr/fs/go.mod new file mode 100644 index 0000000000000..7c206e04c8adf --- /dev/null +++ b/vendor/github.com/kr/fs/go.mod @@ -0,0 +1 @@ +module "github.com/kr/fs" diff --git a/vendor/github.com/mailru/easyjson/jlexer/lexer.go b/vendor/github.com/mailru/easyjson/jlexer/lexer.go index 51f056615c3ad..ddd376b844cb5 100644 --- a/vendor/github.com/mailru/easyjson/jlexer/lexer.go +++ b/vendor/github.com/mailru/easyjson/jlexer/lexer.go @@ -521,11 +521,12 @@ func (r *Lexer) SkipRecursive() { r.scanToken() var start, end byte - if r.token.delimValue == '{' { + switch r.token.delimValue { + case '{': start, end = '{', '}' - } else if r.token.delimValue == '[' { + case '[': start, end = '[', ']' - } else { + default: r.consume() return } @@ -1151,7 +1152,7 @@ func (r *Lexer) Interface() interface{} { } else if r.token.delimValue == '[' { r.consume() - var ret []interface{} + ret := []interface{}{} for !r.IsDelim(']') { ret = append(ret, r.Interface()) r.WantComma() diff --git a/vendor/github.com/miekg/dns/BUILD.bazel b/vendor/github.com/miekg/dns/BUILD.bazel index 4e4cfd41ec7ca..1d85a28d700e2 100644 --- a/vendor/github.com/miekg/dns/BUILD.bazel +++ b/vendor/github.com/miekg/dns/BUILD.bazel @@ -3,6 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = [ + "acceptfunc.go", "client.go", "clientconfig.go", "dane.go", @@ -24,7 +25,6 @@ go_library( "msg_helpers.go", "nsecx.go", "privaterr.go", - "rawmsg.go", "reverse.go", "sanitize.go", "scan.go", @@ -42,7 +42,6 @@ go_library( "update.go", "version.go", "xfr.go", - "zcompress.go", "zduplicate.go", "zmsg.go", "ztypes.go", diff --git a/vendor/github.com/miekg/dns/README.md b/vendor/github.com/miekg/dns/README.md index 7f1aaa5de42df..39737c78aa5b3 100644 --- a/vendor/github.com/miekg/dns/README.md +++ b/vendor/github.com/miekg/dns/README.md @@ -7,10 +7,10 @@ > Less is more. -Complete and usable DNS library. All widely used Resource Records are supported, including the -DNSSEC types. It follows a lean and mean philosophy. If there is stuff you should know as a DNS -programmer there isn't a convenience function for it. Server side and client side programming is -supported, i.e. you can build servers and resolvers with it. +Complete and usable DNS library. All Resource Records are supported, including the DNSSEC types. +It follows a lean and mean philosophy. If there is stuff you should know as a DNS programmer there +isn't a convenience function for it. Server side and client side programming is supported, i.e. you +can build servers and resolvers with it. We try to keep the "master" branch as sane as possible and at the bleeding edge of standards, avoiding breaking changes wherever reasonable. We support the last two versions of Go. @@ -42,10 +42,9 @@ A not-so-up-to-date-list-that-may-be-actually-current: * https://github.com/tianon/rawdns * https://mesosphere.github.io/mesos-dns/ * https://pulse.turbobytes.com/ -* https://play.google.com/store/apps/details?id=com.turbobytes.dig * https://github.com/fcambus/statzone * https://github.com/benschw/dns-clb-go -* https://github.com/corny/dnscheck for http://public-dns.info/ +* https://github.com/corny/dnscheck for * https://namesmith.io * https://github.com/miekg/unbound * https://github.com/miekg/exdns @@ -56,7 +55,7 @@ A not-so-up-to-date-list-that-may-be-actually-current: * https://github.com/bamarni/dockness * https://github.com/fffaraz/microdns * http://kelda.io -* https://github.com/ipdcode/hades (JD.COM) +* https://github.com/ipdcode/hades * https://github.com/StackExchange/dnscontrol/ * https://www.dnsperf.com/ * https://dnssectest.net/ @@ -68,29 +67,28 @@ A not-so-up-to-date-list-that-may-be-actually-current: * https://github.com/rs/dnstrace * https://blitiri.com.ar/p/dnss ([github mirror](https://github.com/albertito/dnss)) * https://github.com/semihalev/sdns +* https://render.com Send pull request if you want to be listed here. # Features -* UDP/TCP queries, IPv4 and IPv6; -* RFC 1035 zone file parsing ($INCLUDE, $ORIGIN, $TTL and $GENERATE (for all record types) are supported; -* Fast: - * Reply speed around ~ 80K qps (faster hardware results in more qps); - * Parsing RRs ~ 100K RR/s, that's 5M records in about 50 seconds; -* Server side programming (mimicking the net/http package); -* Client side programming; -* DNSSEC: signing, validating and key generation for DSA, RSA, ECDSA and Ed25519; -* EDNS0, NSID, Cookies; -* AXFR/IXFR; -* TSIG, SIG(0); -* DNS over TLS: optional encrypted connection between client and server; -* DNS name compression; -* Depends only on the standard library. +* UDP/TCP queries, IPv4 and IPv6 +* RFC 1035 zone file parsing ($INCLUDE, $ORIGIN, $TTL and $GENERATE (for all record types) are supported +* Fast +* Server side programming (mimicking the net/http package) +* Client side programming +* DNSSEC: signing, validating and key generation for DSA, RSA, ECDSA and Ed25519 +* EDNS0, NSID, Cookies +* AXFR/IXFR +* TSIG, SIG(0) +* DNS over TLS (DoT): encrypted connection between client and server over TCP +* DNS name compression Have fun! Miek Gieben - 2010-2012 - +DNS Authors 2012- # Building @@ -102,8 +100,8 @@ work: ## Examples -A short "how to use the API" is at the beginning of doc.go (this also will show -when you call `godoc github.com/miekg/dns`). +A short "how to use the API" is at the beginning of doc.go (this also will show when you call `godoc +github.com/miekg/dns`). Example programs can be found in the `github.com/miekg/exdns` repository. @@ -161,12 +159,13 @@ Example programs can be found in the `github.com/miekg/exdns` repository. * 7553 - URI record * 7858 - DNS over TLS: Initiation and Performance Considerations * 7871 - EDNS0 Client Subnet -* 7873 - Domain Name System (DNS) Cookies (draft-ietf-dnsop-cookies) +* 7873 - Domain Name System (DNS) Cookies * 8080 - EdDSA for DNSSEC +* 8499 - DNS Terminology -## Loosely based upon +## Loosely Based Upon -* `ldns` -* `NSD` -* `Net::DNS` -* `GRONG` +* ldns - +* NSD - +* Net::DNS - +* GRONG - diff --git a/vendor/github.com/miekg/dns/acceptfunc.go b/vendor/github.com/miekg/dns/acceptfunc.go new file mode 100644 index 0000000000000..78c076c2535c9 --- /dev/null +++ b/vendor/github.com/miekg/dns/acceptfunc.go @@ -0,0 +1,56 @@ +package dns + +// MsgAcceptFunc is used early in the server code to accept or reject a message with RcodeFormatError. +// It returns a MsgAcceptAction to indicate what should happen with the message. +type MsgAcceptFunc func(dh Header) MsgAcceptAction + +// DefaultMsgAcceptFunc checks the request and will reject if: +// +// * isn't a request (don't respond in that case). +// * opcode isn't OpcodeQuery or OpcodeNotify +// * Zero bit isn't zero +// * has more than 1 question in the question section +// * has more than 1 RR in the Answer section +// * has more than 0 RRs in the Authority section +// * has more than 2 RRs in the Additional section +var DefaultMsgAcceptFunc MsgAcceptFunc = defaultMsgAcceptFunc + +// MsgAcceptAction represents the action to be taken. +type MsgAcceptAction int + +const ( + MsgAccept MsgAcceptAction = iota // Accept the message + MsgReject // Reject the message with a RcodeFormatError + MsgIgnore // Ignore the error and send nothing back. +) + +func defaultMsgAcceptFunc(dh Header) MsgAcceptAction { + if isResponse := dh.Bits&_QR != 0; isResponse { + return MsgIgnore + } + + // Don't allow dynamic updates, because then the sections can contain a whole bunch of RRs. + opcode := int(dh.Bits>>11) & 0xF + if opcode != OpcodeQuery && opcode != OpcodeNotify { + return MsgReject + } + + if isZero := dh.Bits&_Z != 0; isZero { + return MsgReject + } + if dh.Qdcount != 1 { + return MsgReject + } + // NOTIFY requests can have a SOA in the ANSWER section. See RFC 1996 Section 3.7 and 3.11. + if dh.Ancount > 1 { + return MsgReject + } + // IXFR request could have one SOA RR in the NS section. See RFC 1995, section 3. + if dh.Nscount > 1 { + return MsgReject + } + if dh.Arcount > 2 { + return MsgReject + } + return MsgAccept +} diff --git a/vendor/github.com/miekg/dns/client.go b/vendor/github.com/miekg/dns/client.go index 63ced2bd0649a..2393564c52338 100644 --- a/vendor/github.com/miekg/dns/client.go +++ b/vendor/github.com/miekg/dns/client.go @@ -7,11 +7,8 @@ import ( "context" "crypto/tls" "encoding/binary" - "fmt" "io" - "io/ioutil" "net" - "net/http" "strings" "time" ) @@ -19,8 +16,6 @@ import ( const ( dnsTimeout time.Duration = 2 * time.Second tcpIdleTimeout time.Duration = 8 * time.Second - - dohMimeType = "application/dns-message" ) // A Conn represents a connection to a DNS server. @@ -44,7 +39,6 @@ type Client struct { DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds, or net.Dialer.Timeout if expiring earlier - overridden by Timeout when that value is non-zero ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero - HTTPClient *http.Client // The http.Client to use for DNS-over-HTTPS TsigSecret map[string]string // secret(s) for Tsig map[], zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2) SingleInflight bool // if true suppress multiple outstanding queries for the same Qname, Qtype and Qclass group singleflight @@ -132,11 +126,6 @@ func (c *Client) Dial(address string) (conn *Conn, err error) { // attribute appropriately func (c *Client) Exchange(m *Msg, address string) (r *Msg, rtt time.Duration, err error) { if !c.SingleInflight { - if c.Net == "https" { - // TODO(tmthrgd): pipe timeouts into exchangeDOH - return c.exchangeDOH(context.TODO(), m, address) - } - return c.exchange(m, address) } @@ -149,11 +138,6 @@ func (c *Client) Exchange(m *Msg, address string) (r *Msg, rtt time.Duration, er cl = cl1 } r, rtt, err, shared := c.group.Do(m.Question[0].Name+t+cl, func() (*Msg, time.Duration, error) { - if c.Net == "https" { - // TODO(tmthrgd): pipe timeouts into exchangeDOH - return c.exchangeDOH(context.TODO(), m, address) - } - return c.exchange(m, address) }) if r != nil && shared { @@ -199,67 +183,6 @@ func (c *Client) exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err erro return r, rtt, err } -func (c *Client) exchangeDOH(ctx context.Context, m *Msg, a string) (r *Msg, rtt time.Duration, err error) { - p, err := m.Pack() - if err != nil { - return nil, 0, err - } - - req, err := http.NewRequest(http.MethodPost, a, bytes.NewReader(p)) - if err != nil { - return nil, 0, err - } - - req.Header.Set("Content-Type", dohMimeType) - req.Header.Set("Accept", dohMimeType) - - hc := http.DefaultClient - if c.HTTPClient != nil { - hc = c.HTTPClient - } - - if ctx != context.Background() && ctx != context.TODO() { - req = req.WithContext(ctx) - } - - t := time.Now() - - resp, err := hc.Do(req) - if err != nil { - return nil, 0, err - } - defer closeHTTPBody(resp.Body) - - if resp.StatusCode != http.StatusOK { - return nil, 0, fmt.Errorf("dns: server returned HTTP %d error: %q", resp.StatusCode, resp.Status) - } - - if ct := resp.Header.Get("Content-Type"); ct != dohMimeType { - return nil, 0, fmt.Errorf("dns: unexpected Content-Type %q; expected %q", ct, dohMimeType) - } - - p, err = ioutil.ReadAll(resp.Body) - if err != nil { - return nil, 0, err - } - - rtt = time.Since(t) - - r = new(Msg) - if err := r.Unpack(p); err != nil { - return r, 0, err - } - - // TODO: TSIG? Is it even supported over DoH? - - return r, rtt, nil -} - -func closeHTTPBody(r io.ReadCloser) error { - io.Copy(ioutil.Discard, io.LimitReader(r, 8<<20)) - return r.Close() -} - // ReadMsg reads a message from the connection co. // If the received message contains a TSIG record the transaction signature // is verified. This method always tries to return the message, however if an @@ -397,16 +320,12 @@ func (co *Conn) Read(p []byte) (n int, err error) { return 0, err } if l > len(p) { - return int(l), io.ErrShortBuffer + return l, io.ErrShortBuffer } return tcpRead(r, p[:l]) } // UDP connection - n, err = co.Conn.Read(p) - if err != nil { - return n, err - } - return n, err + return co.Conn.Read(p) } // WriteMsg sends a message through the connection co. @@ -428,10 +347,8 @@ func (co *Conn) WriteMsg(m *Msg) (err error) { if err != nil { return err } - if _, err = co.Write(out); err != nil { - return err - } - return nil + _, err = co.Write(out) + return err } // Write implements the net.Conn Write method. @@ -453,8 +370,7 @@ func (co *Conn) Write(p []byte) (n int, err error) { n, err := io.Copy(w, bytes.NewReader(p)) return int(n), err } - n, err = co.Conn.Write(p) - return n, err + return co.Conn.Write(p) } // Return the appropriate timeout for a specific request @@ -521,11 +437,7 @@ func ExchangeConn(c net.Conn, m *Msg) (r *Msg, err error) { // DialTimeout acts like Dial but takes a timeout. func DialTimeout(network, address string, timeout time.Duration) (conn *Conn, err error) { client := Client{Net: network, Dialer: &net.Dialer{Timeout: timeout}} - conn, err = client.Dial(address) - if err != nil { - return nil, err - } - return conn, nil + return client.Dial(address) } // DialWithTLS connects to the address on the named network with TLS. @@ -534,12 +446,7 @@ func DialWithTLS(network, address string, tlsConfig *tls.Config) (conn *Conn, er network += "-tls" } client := Client{Net: network, TLSConfig: tlsConfig} - conn, err = client.Dial(address) - - if err != nil { - return nil, err - } - return conn, nil + return client.Dial(address) } // DialTimeoutWithTLS acts like DialWithTLS but takes a timeout. @@ -548,21 +455,13 @@ func DialTimeoutWithTLS(network, address string, tlsConfig *tls.Config, timeout network += "-tls" } client := Client{Net: network, Dialer: &net.Dialer{Timeout: timeout}, TLSConfig: tlsConfig} - conn, err = client.Dial(address) - if err != nil { - return nil, err - } - return conn, nil + return client.Dial(address) } // ExchangeContext acts like Exchange, but honors the deadline on the provided // context, if present. If there is both a context deadline and a configured // timeout on the client, the earliest of the two takes effect. func (c *Client) ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, rtt time.Duration, err error) { - if !c.SingleInflight && c.Net == "https" { - return c.exchangeDOH(ctx, m, a) - } - var timeout time.Duration if deadline, ok := ctx.Deadline(); !ok { timeout = 0 @@ -571,7 +470,7 @@ func (c *Client) ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, } // not passing the context to the underlying calls, as the API does not support // context. For timeouts you should set up Client.Dialer and call Client.Exchange. - // TODO(tmthrgd): this is a race condition + // TODO(tmthrgd,miekg): this is a race condition. c.Dialer = &net.Dialer{Timeout: timeout} return c.Exchange(m, a) } diff --git a/vendor/github.com/miekg/dns/defaults.go b/vendor/github.com/miekg/dns/defaults.go index 14e18b0b38f6c..391d67a2c732a 100644 --- a/vendor/github.com/miekg/dns/defaults.go +++ b/vendor/github.com/miekg/dns/defaults.go @@ -4,6 +4,7 @@ import ( "errors" "net" "strconv" + "strings" ) const hexDigit = "0123456789abcdef" @@ -163,11 +164,72 @@ func (dns *Msg) IsEdns0() *OPT { // the number of labels. When false is returned the number of labels is not // defined. Also note that this function is extremely liberal; almost any // string is a valid domain name as the DNS is 8 bit protocol. It checks if each -// label fits in 63 characters, but there is no length check for the entire -// string s. I.e. a domain name longer than 255 characters is considered valid. +// label fits in 63 characters and that the entire name will fit into the 255 +// octet wire format limit. func IsDomainName(s string) (labels int, ok bool) { - _, labels, err := packDomainName(s, nil, 0, nil, false) - return labels, err == nil + // XXX: The logic in this function was copied from packDomainName and + // should be kept in sync with that function. + + const lenmsg = 256 + + if len(s) == 0 { // Ok, for instance when dealing with update RR without any rdata. + return 0, false + } + + s = Fqdn(s) + + // Each dot ends a segment of the name. Except for escaped dots (\.), which + // are normal dots. + + var ( + off int + begin int + wasDot bool + ) + for i := 0; i < len(s); i++ { + switch s[i] { + case '\\': + if off+1 > lenmsg { + return labels, false + } + + // check for \DDD + if i+3 < len(s) && isDigit(s[i+1]) && isDigit(s[i+2]) && isDigit(s[i+3]) { + i += 3 + begin += 3 + } else { + i++ + begin++ + } + + wasDot = false + case '.': + if wasDot { + // two dots back to back is not legal + return labels, false + } + wasDot = true + + labelLen := i - begin + if labelLen >= 1<<6 { // top two bits of length must be clear + return labels, false + } + + // off can already (we're in a loop) be bigger than lenmsg + // this happens when a name isn't fully qualified + off += 1 + labelLen + if off > lenmsg { + return labels, false + } + + labels++ + begin = i + 1 + default: + wasDot = false + } + } + + return labels, true } // IsSubDomain checks if child is indeed a child of the parent. If child and parent @@ -181,7 +243,7 @@ func IsSubDomain(parent, child string) bool { // The checking is performed on the binary payload. func IsMsg(buf []byte) error { // Header - if len(buf) < 12 { + if len(buf) < headerSize { return errors.New("dns: bad message header") } // Header: Opcode @@ -191,11 +253,18 @@ func IsMsg(buf []byte) error { // IsFqdn checks if a domain name is fully qualified. func IsFqdn(s string) bool { - l := len(s) - if l == 0 { + s2 := strings.TrimSuffix(s, ".") + if s == s2 { return false } - return s[l-1] == '.' + + i := strings.LastIndexFunc(s2, func(r rune) bool { + return r != '\\' + }) + + // Test whether we have an even number of escape sequences before + // the dot or none. + return (len(s2)-i)%2 != 0 } // IsRRset checks if a set of RRs is a valid RRset as defined by RFC 2181. @@ -244,12 +313,19 @@ func ReverseAddr(addr string) (arpa string, err error) { if ip == nil { return "", &Error{err: "unrecognized address: " + addr} } - if ip.To4() != nil { - return strconv.Itoa(int(ip[15])) + "." + strconv.Itoa(int(ip[14])) + "." + strconv.Itoa(int(ip[13])) + "." + - strconv.Itoa(int(ip[12])) + ".in-addr.arpa.", nil + if v4 := ip.To4(); v4 != nil { + buf := make([]byte, 0, net.IPv4len*4+len("in-addr.arpa.")) + // Add it, in reverse, to the buffer + for i := len(v4) - 1; i >= 0; i-- { + buf = strconv.AppendInt(buf, int64(v4[i]), 10) + buf = append(buf, '.') + } + // Append "in-addr.arpa." and return (buf already has the final .) + buf = append(buf, "in-addr.arpa."...) + return string(buf), nil } // Must be IPv6 - buf := make([]byte, 0, len(ip)*4+len("ip6.arpa.")) + buf := make([]byte, 0, net.IPv6len*4+len("ip6.arpa.")) // Add it, in reverse, to the buffer for i := len(ip) - 1; i >= 0; i-- { v := ip[i] diff --git a/vendor/github.com/miekg/dns/dns.go b/vendor/github.com/miekg/dns/dns.go index e7557f51a8159..f57337b89eb83 100644 --- a/vendor/github.com/miekg/dns/dns.go +++ b/vendor/github.com/miekg/dns/dns.go @@ -34,10 +34,30 @@ type RR interface { // copy returns a copy of the RR copy() RR - // len returns the length (in octets) of the uncompressed RR in wire format. - len() int - // pack packs an RR into wire format. - pack([]byte, int, map[string]int, bool) (int, error) + + // len returns the length (in octets) of the compressed or uncompressed RR in wire format. + // + // If compression is nil, the uncompressed size will be returned, otherwise the compressed + // size will be returned and domain names will be added to the map for future compression. + len(off int, compression map[string]struct{}) int + + // pack packs the records RDATA into wire format. The header will + // already have been packed into msg. + pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) + + // unpack unpacks an RR from wire format. + // + // This will only be called on a new and empty RR type with only the header populated. It + // will only be called if the record's RDATA is non-empty. + unpack(msg []byte, off int) (off1 int, err error) + + // parse parses an RR from zone file format. + // + // This will only be called on a new and empty RR type with only the header populated. + parse(c *zlexer, origin, file string) *ParseError + + // isDuplicate returns whether the two RRs are duplicates. + isDuplicate(r2 RR) bool } // RR_Header is the header all DNS resource records share. @@ -70,28 +90,45 @@ func (h *RR_Header) String() string { return s } -func (h *RR_Header) len() int { - l := len(h.Name) + 1 +func (h *RR_Header) len(off int, compression map[string]struct{}) int { + l := domainNameLen(h.Name, off, compression, true) l += 10 // rrtype(2) + class(2) + ttl(4) + rdlength(2) return l } +func (h *RR_Header) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + // RR_Header has no RDATA to pack. + return off, nil +} + +func (h *RR_Header) unpack(msg []byte, off int) (int, error) { + panic("dns: internal error: unpack should never be called on RR_Header") +} + +func (h *RR_Header) parse(c *zlexer, origin, file string) *ParseError { + panic("dns: internal error: parse should never be called on RR_Header") +} + // ToRFC3597 converts a known RR to the unknown RR representation from RFC 3597. func (rr *RFC3597) ToRFC3597(r RR) error { - buf := make([]byte, r.len()*2) - off, err := PackRR(r, buf, 0, nil, false) + buf := make([]byte, Len(r)*2) + headerEnd, off, err := packRR(r, buf, 0, compressionMap{}, false) if err != nil { return err } buf = buf[:off] - if int(r.Header().Rdlength) > off { - return ErrBuf + + *rr = RFC3597{Hdr: *r.Header()} + rr.Hdr.Rdlength = uint16(off - headerEnd) + + if noRdata(rr.Hdr) { + return nil } - rfc3597, _, err := unpackRFC3597(*r.Header(), buf, off-int(r.Header().Rdlength)) + _, err = rr.unpack(buf, headerEnd) if err != nil { return err } - *rr = *rfc3597.(*RFC3597) + return nil } diff --git a/vendor/github.com/miekg/dns/dnssec.go b/vendor/github.com/miekg/dns/dnssec.go index 26b512e7da566..3954d4198ed28 100644 --- a/vendor/github.com/miekg/dns/dnssec.go +++ b/vendor/github.com/miekg/dns/dnssec.go @@ -67,9 +67,6 @@ var AlgorithmToString = map[uint8]string{ PRIVATEOID: "PRIVATEOID", } -// StringToAlgorithm is the reverse of AlgorithmToString. -var StringToAlgorithm = reverseInt8(AlgorithmToString) - // AlgorithmToHash is a map of algorithm crypto hash IDs to crypto.Hash's. var AlgorithmToHash = map[uint8]crypto.Hash{ RSAMD5: crypto.MD5, // Deprecated in RFC 6725 @@ -102,9 +99,6 @@ var HashToString = map[uint8]string{ SHA512: "SHA512", } -// StringToHash is a map of names to hash IDs. -var StringToHash = reverseInt8(HashToString) - // DNSKEY flag values. const ( SEP = 1 @@ -268,16 +262,17 @@ func (rr *RRSIG) Sign(k crypto.Signer, rrset []RR) error { return ErrKey } + h0 := rrset[0].Header() rr.Hdr.Rrtype = TypeRRSIG - rr.Hdr.Name = rrset[0].Header().Name - rr.Hdr.Class = rrset[0].Header().Class + rr.Hdr.Name = h0.Name + rr.Hdr.Class = h0.Class if rr.OrigTtl == 0 { // If set don't override - rr.OrigTtl = rrset[0].Header().Ttl + rr.OrigTtl = h0.Ttl } - rr.TypeCovered = rrset[0].Header().Rrtype - rr.Labels = uint8(CountLabel(rrset[0].Header().Name)) + rr.TypeCovered = h0.Rrtype + rr.Labels = uint8(CountLabel(h0.Name)) - if strings.HasPrefix(rrset[0].Header().Name, "*") { + if strings.HasPrefix(h0.Name, "*") { rr.Labels-- // wildcard, remove from label count } @@ -401,7 +396,7 @@ func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error { if rr.Algorithm != k.Algorithm { return ErrKey } - if strings.ToLower(rr.SignerName) != strings.ToLower(k.Hdr.Name) { + if !strings.EqualFold(rr.SignerName, k.Hdr.Name) { return ErrKey } if k.Protocol != 3 { @@ -411,10 +406,7 @@ func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error { // IsRRset checked that we have at least one RR and that the RRs in // the set have consistent type, class, and name. Also check that type and // class matches the RRSIG record. - if rrset[0].Header().Class != rr.Hdr.Class { - return ErrRRset - } - if rrset[0].Header().Rrtype != rr.TypeCovered { + if h0 := rrset[0].Header(); h0.Class != rr.Hdr.Class || h0.Rrtype != rr.TypeCovered { return ErrRRset } @@ -563,7 +555,7 @@ func (k *DNSKEY) publicKeyRSA() *rsa.PublicKey { pubkey := new(rsa.PublicKey) - expo := uint64(0) + var expo uint64 for i := 0; i < int(explen); i++ { expo <<= 8 expo |= uint64(keybuf[keyoff+i]) @@ -658,15 +650,16 @@ func rawSignatureData(rrset []RR, s *RRSIG) (buf []byte, err error) { wires := make(wireSlice, len(rrset)) for i, r := range rrset { r1 := r.copy() - r1.Header().Ttl = s.OrigTtl - labels := SplitDomainName(r1.Header().Name) + h := r1.Header() + h.Ttl = s.OrigTtl + labels := SplitDomainName(h.Name) // 6.2. Canonical RR Form. (4) - wildcards if len(labels) > int(s.Labels) { // Wildcard - r1.Header().Name = "*." + strings.Join(labels[len(labels)-int(s.Labels):], ".") + "." + h.Name = "*." + strings.Join(labels[len(labels)-int(s.Labels):], ".") + "." } // RFC 4034: 6.2. Canonical RR Form. (2) - domain name to lowercase - r1.Header().Name = strings.ToLower(r1.Header().Name) + h.Name = strings.ToLower(h.Name) // 6.2. Canonical RR Form. (3) - domain rdata to lowercase. // NS, MD, MF, CNAME, SOA, MB, MG, MR, PTR, // HINFO, MINFO, MX, RP, AFSDB, RT, SIG, PX, NXT, NAPTR, KX, @@ -724,7 +717,7 @@ func rawSignatureData(rrset []RR, s *RRSIG) (buf []byte, err error) { x.Target = strings.ToLower(x.Target) } // 6.2. Canonical RR Form. (5) - origTTL - wire := make([]byte, r1.len()+1) // +1 to be safe(r) + wire := make([]byte, Len(r1)+1) // +1 to be safe(r) off, err1 := PackRR(r1, wire, 0, nil, false) if err1 != nil { return nil, err1 diff --git a/vendor/github.com/miekg/dns/doc.go b/vendor/github.com/miekg/dns/doc.go index 0389d7248ef17..d3d7cec9ef5ca 100644 --- a/vendor/github.com/miekg/dns/doc.go +++ b/vendor/github.com/miekg/dns/doc.go @@ -1,20 +1,20 @@ /* Package dns implements a full featured interface to the Domain Name System. -Server- and client-side programming is supported. -The package allows complete control over what is sent out to the DNS. The package -API follows the less-is-more principle, by presenting a small, clean interface. +Both server- and client-side programming is supported. The package allows +complete control over what is sent out to the DNS. The API follows the +less-is-more principle, by presenting a small, clean interface. -The package dns supports (asynchronous) querying/replying, incoming/outgoing zone transfers, +It supports (asynchronous) querying/replying, incoming/outgoing zone transfers, TSIG, EDNS0, dynamic updates, notifies and DNSSEC validation/signing. -Note that domain names MUST be fully qualified, before sending them, unqualified + +Note that domain names MUST be fully qualified before sending them, unqualified names in a message will result in a packing failure. -Resource records are native types. They are not stored in wire format. -Basic usage pattern for creating a new resource record: +Resource records are native types. They are not stored in wire format. Basic +usage pattern for creating a new resource record: r := new(dns.MX) - r.Hdr = dns.RR_Header{Name: "miek.nl.", Rrtype: dns.TypeMX, - Class: dns.ClassINET, Ttl: 3600} + r.Hdr = dns.RR_Header{Name: "miek.nl.", Rrtype: dns.TypeMX, Class: dns.ClassINET, Ttl: 3600} r.Preference = 10 r.Mx = "mx.miek.nl." @@ -30,8 +30,8 @@ Or even: mx, err := dns.NewRR("$ORIGIN nl.\nmiek 1H IN MX 10 mx.miek") -In the DNS messages are exchanged, these messages contain resource -records (sets). Use pattern for creating a message: +In the DNS messages are exchanged, these messages contain resource records +(sets). Use pattern for creating a message: m := new(dns.Msg) m.SetQuestion("miek.nl.", dns.TypeMX) @@ -40,8 +40,8 @@ Or when not certain if the domain name is fully qualified: m.SetQuestion(dns.Fqdn("miek.nl"), dns.TypeMX) -The message m is now a message with the question section set to ask -the MX records for the miek.nl. zone. +The message m is now a message with the question section set to ask the MX +records for the miek.nl. zone. The following is slightly more verbose, but more flexible: @@ -51,9 +51,8 @@ The following is slightly more verbose, but more flexible: m1.Question = make([]dns.Question, 1) m1.Question[0] = dns.Question{"miek.nl.", dns.TypeMX, dns.ClassINET} -After creating a message it can be sent. -Basic use pattern for synchronous querying the DNS at a -server configured on 127.0.0.1 and port 53: +After creating a message it can be sent. Basic use pattern for synchronous +querying the DNS at a server configured on 127.0.0.1 and port 53: c := new(dns.Client) in, rtt, err := c.Exchange(m1, "127.0.0.1:53") @@ -99,25 +98,24 @@ the Answer section: Domain Name and TXT Character String Representations -Both domain names and TXT character strings are converted to presentation -form both when unpacked and when converted to strings. +Both domain names and TXT character strings are converted to presentation form +both when unpacked and when converted to strings. For TXT character strings, tabs, carriage returns and line feeds will be -converted to \t, \r and \n respectively. Back slashes and quotations marks -will be escaped. Bytes below 32 and above 127 will be converted to \DDD -form. +converted to \t, \r and \n respectively. Back slashes and quotations marks will +be escaped. Bytes below 32 and above 127 will be converted to \DDD form. -For domain names, in addition to the above rules brackets, periods, -spaces, semicolons and the at symbol are escaped. +For domain names, in addition to the above rules brackets, periods, spaces, +semicolons and the at symbol are escaped. DNSSEC -DNSSEC (DNS Security Extension) adds a layer of security to the DNS. It -uses public key cryptography to sign resource records. The -public keys are stored in DNSKEY records and the signatures in RRSIG records. +DNSSEC (DNS Security Extension) adds a layer of security to the DNS. It uses +public key cryptography to sign resource records. The public keys are stored in +DNSKEY records and the signatures in RRSIG records. -Requesting DNSSEC information for a zone is done by adding the DO (DNSSEC OK) bit -to a request. +Requesting DNSSEC information for a zone is done by adding the DO (DNSSEC OK) +bit to a request. m := new(dns.Msg) m.SetEdns0(4096, true) @@ -126,9 +124,9 @@ Signature generation, signature verification and key generation are all supporte DYNAMIC UPDATES -Dynamic updates reuses the DNS message format, but renames three of -the sections. Question is Zone, Answer is Prerequisite, Authority is -Update, only the Additional is not renamed. See RFC 2136 for the gory details. +Dynamic updates reuses the DNS message format, but renames three of the +sections. Question is Zone, Answer is Prerequisite, Authority is Update, only +the Additional is not renamed. See RFC 2136 for the gory details. You can set a rather complex set of rules for the existence of absence of certain resource records or names in a zone to specify if resource records @@ -145,10 +143,9 @@ DNS function shows which functions exist to specify the prerequisites. NONE rrset empty RRset does not exist dns.RRsetNotUsed zone rrset rr RRset exists (value dep) dns.Used -The prerequisite section can also be left empty. -If you have decided on the prerequisites you can tell what RRs should -be added or deleted. The next table shows the options you have and -what functions to call. +The prerequisite section can also be left empty. If you have decided on the +prerequisites you can tell what RRs should be added or deleted. The next table +shows the options you have and what functions to call. 3.4.2.6 - Table Of Metavalues Used In Update Section @@ -181,10 +178,10 @@ changes to the RRset after calling SetTsig() the signature will be incorrect. ... // When sending the TSIG RR is calculated and filled in before sending -When requesting an zone transfer (almost all TSIG usage is when requesting zone transfers), with -TSIG, this is the basic use pattern. In this example we request an AXFR for -miek.nl. with TSIG key named "axfr." and secret "so6ZGir4GPAqINNh9U5c3A==" -and using the server 176.58.119.54: +When requesting an zone transfer (almost all TSIG usage is when requesting zone +transfers), with TSIG, this is the basic use pattern. In this example we +request an AXFR for miek.nl. with TSIG key named "axfr." and secret +"so6ZGir4GPAqINNh9U5c3A==" and using the server 176.58.119.54: t := new(dns.Transfer) m := new(dns.Msg) @@ -194,8 +191,8 @@ and using the server 176.58.119.54: c, err := t.In(m, "176.58.119.54:53") for r := range c { ... } -You can now read the records from the transfer as they come in. Each envelope is checked with TSIG. -If something is not correct an error is returned. +You can now read the records from the transfer as they come in. Each envelope +is checked with TSIG. If something is not correct an error is returned. Basic use pattern validating and replying to a message that has TSIG set. @@ -220,29 +217,30 @@ Basic use pattern validating and replying to a message that has TSIG set. PRIVATE RRS -RFC 6895 sets aside a range of type codes for private use. This range -is 65,280 - 65,534 (0xFF00 - 0xFFFE). When experimenting with new Resource Records these +RFC 6895 sets aside a range of type codes for private use. This range is 65,280 +- 65,534 (0xFF00 - 0xFFFE). When experimenting with new Resource Records these can be used, before requesting an official type code from IANA. -see http://miek.nl/2014/September/21/idn-and-private-rr-in-go-dns/ for more +See https://miek.nl/2014/September/21/idn-and-private-rr-in-go-dns/ for more information. EDNS0 -EDNS0 is an extension mechanism for the DNS defined in RFC 2671 and updated -by RFC 6891. It defines an new RR type, the OPT RR, which is then completely +EDNS0 is an extension mechanism for the DNS defined in RFC 2671 and updated by +RFC 6891. It defines an new RR type, the OPT RR, which is then completely abused. + Basic use pattern for creating an (empty) OPT RR: o := new(dns.OPT) o.Hdr.Name = "." // MUST be the root zone, per definition. o.Hdr.Rrtype = dns.TypeOPT -The rdata of an OPT RR consists out of a slice of EDNS0 (RFC 6891) -interfaces. Currently only a few have been standardized: EDNS0_NSID -(RFC 5001) and EDNS0_SUBNET (draft-vandergaast-edns-client-subnet-02). Note -that these options may be combined in an OPT RR. -Basic use pattern for a server to check if (and which) options are set: +The rdata of an OPT RR consists out of a slice of EDNS0 (RFC 6891) interfaces. +Currently only a few have been standardized: EDNS0_NSID (RFC 5001) and +EDNS0_SUBNET (draft-vandergaast-edns-client-subnet-02). Note that these options +may be combined in an OPT RR. Basic use pattern for a server to check if (and +which) options are set: // o is a dns.OPT for _, s := range o.Option { @@ -262,10 +260,9 @@ From RFC 2931: ... protection for glue records, DNS requests, protection for message headers on requests and responses, and protection of the overall integrity of a response. -It works like TSIG, except that SIG(0) uses public key cryptography, instead of the shared -secret approach in TSIG. -Supported algorithms: DSA, ECDSAP256SHA256, ECDSAP384SHA384, RSASHA1, RSASHA256 and -RSASHA512. +It works like TSIG, except that SIG(0) uses public key cryptography, instead of +the shared secret approach in TSIG. Supported algorithms: DSA, ECDSAP256SHA256, +ECDSAP384SHA384, RSASHA1, RSASHA256 and RSASHA512. Signing subsequent messages in multi-message sessions is not implemented. */ diff --git a/vendor/github.com/miekg/dns/duplicate.go b/vendor/github.com/miekg/dns/duplicate.go index 6372e8a194fd6..05c14aae3160e 100644 --- a/vendor/github.com/miekg/dns/duplicate.go +++ b/vendor/github.com/miekg/dns/duplicate.go @@ -7,18 +7,31 @@ package dns // is so, otherwise false. // It's is a protocol violation to have identical RRs in a message. func IsDuplicate(r1, r2 RR) bool { - if r1.Header().Class != r2.Header().Class { + // Check whether the record header is identical. + if !r1.Header().isDuplicate(r2.Header()) { return false } - if r1.Header().Rrtype != r2.Header().Rrtype { + + // Check whether the RDATA is identical. + return r1.isDuplicate(r2) +} + +func (r1 *RR_Header) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*RR_Header) + if !ok { + return false + } + if r1.Class != r2.Class { + return false + } + if r1.Rrtype != r2.Rrtype { return false } - if !isDulicateName(r1.Header().Name, r2.Header().Name) { + if !isDulicateName(r1.Name, r2.Name) { return false } // ignore TTL - - return isDuplicateRdata(r1, r2) + return true } // isDulicateName checks if the domain names s1 and s2 are equal. diff --git a/vendor/github.com/miekg/dns/edns.go b/vendor/github.com/miekg/dns/edns.go index 18d054139b211..805641b267bb2 100644 --- a/vendor/github.com/miekg/dns/edns.go +++ b/vendor/github.com/miekg/dns/edns.go @@ -78,8 +78,8 @@ func (rr *OPT) String() string { return s } -func (rr *OPT) len() int { - l := rr.Hdr.len() +func (rr *OPT) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) for i := 0; i < len(rr.Option); i++ { l += 4 // Account for 2-byte option code and 2-byte option length. lo, _ := rr.Option[i].pack() @@ -88,6 +88,12 @@ func (rr *OPT) len() int { return l } +func (rr *OPT) parse(c *zlexer, origin, file string) *ParseError { + panic("dns: internal error: parse should never be called on OPT") +} + +func (r1 *OPT) isDuplicate(r2 RR) bool { return false } + // return the old value -> delete SetVersion? // Version returns the EDNS version used. Only zero is defined. @@ -102,15 +108,14 @@ func (rr *OPT) SetVersion(v uint8) { // ExtendedRcode returns the EDNS extended RCODE field (the upper 8 bits of the TTL). func (rr *OPT) ExtendedRcode() int { - return int(rr.Hdr.Ttl&0xFF000000>>24) + 15 + return int(rr.Hdr.Ttl&0xFF000000>>24) << 4 } // SetExtendedRcode sets the EDNS extended RCODE field. -func (rr *OPT) SetExtendedRcode(v uint8) { - if v < RcodeBadVers { // Smaller than 16.. Use the 4 bits you have! - return - } - rr.Hdr.Ttl = rr.Hdr.Ttl&0x00FFFFFF | uint32(v-15)<<24 +// +// If the RCODE is not an extended RCODE, will reset the extended RCODE field to 0. +func (rr *OPT) SetExtendedRcode(v uint16) { + rr.Hdr.Ttl = rr.Hdr.Ttl&0x00FFFFFF | uint32(v>>4)<<24 } // UDPSize returns the UDP buffer size. @@ -154,6 +159,8 @@ type EDNS0 interface { unpack([]byte) error // String returns the string representation of the option. String() string + // copy returns a deep-copy of the option. + copy() EDNS0 } // EDNS0_NSID option is used to retrieve a nameserver @@ -184,7 +191,8 @@ func (e *EDNS0_NSID) pack() ([]byte, error) { // Option implements the EDNS0 interface. func (e *EDNS0_NSID) Option() uint16 { return EDNS0NSID } // Option returns the option code. func (e *EDNS0_NSID) unpack(b []byte) error { e.Nsid = hex.EncodeToString(b); return nil } -func (e *EDNS0_NSID) String() string { return string(e.Nsid) } +func (e *EDNS0_NSID) String() string { return e.Nsid } +func (e *EDNS0_NSID) copy() EDNS0 { return &EDNS0_NSID{e.Code, e.Nsid} } // EDNS0_SUBNET is the subnet option that is used to give the remote nameserver // an idea of where the client lives. See RFC 7871. It can then give back a different @@ -274,22 +282,16 @@ func (e *EDNS0_SUBNET) unpack(b []byte) error { if e.SourceNetmask > net.IPv4len*8 || e.SourceScope > net.IPv4len*8 { return errors.New("dns: bad netmask") } - addr := make([]byte, net.IPv4len) - for i := 0; i < net.IPv4len && 4+i < len(b); i++ { - addr[i] = b[4+i] - } - e.Address = net.IPv4(addr[0], addr[1], addr[2], addr[3]) + addr := make(net.IP, net.IPv4len) + copy(addr, b[4:]) + e.Address = addr.To16() case 2: if e.SourceNetmask > net.IPv6len*8 || e.SourceScope > net.IPv6len*8 { return errors.New("dns: bad netmask") } - addr := make([]byte, net.IPv6len) - for i := 0; i < net.IPv6len && 4+i < len(b); i++ { - addr[i] = b[4+i] - } - e.Address = net.IP{addr[0], addr[1], addr[2], addr[3], addr[4], - addr[5], addr[6], addr[7], addr[8], addr[9], addr[10], - addr[11], addr[12], addr[13], addr[14], addr[15]} + addr := make(net.IP, net.IPv6len) + copy(addr, b[4:]) + e.Address = addr default: return errors.New("dns: bad address family") } @@ -308,6 +310,16 @@ func (e *EDNS0_SUBNET) String() (s string) { return } +func (e *EDNS0_SUBNET) copy() EDNS0 { + return &EDNS0_SUBNET{ + e.Code, + e.Family, + e.SourceNetmask, + e.SourceScope, + e.Address, + } +} + // The EDNS0_COOKIE option is used to add a DNS Cookie to a message. // // o := new(dns.OPT) @@ -343,6 +355,7 @@ func (e *EDNS0_COOKIE) pack() ([]byte, error) { func (e *EDNS0_COOKIE) Option() uint16 { return EDNS0COOKIE } func (e *EDNS0_COOKIE) unpack(b []byte) error { e.Cookie = hex.EncodeToString(b); return nil } func (e *EDNS0_COOKIE) String() string { return e.Cookie } +func (e *EDNS0_COOKIE) copy() EDNS0 { return &EDNS0_COOKIE{e.Code, e.Cookie} } // The EDNS0_UL (Update Lease) (draft RFC) option is used to tell the server to set // an expiration on an update RR. This is helpful for clients that cannot clean @@ -364,6 +377,7 @@ type EDNS0_UL struct { // Option implements the EDNS0 interface. func (e *EDNS0_UL) Option() uint16 { return EDNS0UL } func (e *EDNS0_UL) String() string { return strconv.FormatUint(uint64(e.Lease), 10) } +func (e *EDNS0_UL) copy() EDNS0 { return &EDNS0_UL{e.Code, e.Lease} } // Copied: http://golang.org/src/pkg/net/dnsmsg.go func (e *EDNS0_UL) pack() ([]byte, error) { @@ -418,10 +432,13 @@ func (e *EDNS0_LLQ) unpack(b []byte) error { func (e *EDNS0_LLQ) String() string { s := strconv.FormatUint(uint64(e.Version), 10) + " " + strconv.FormatUint(uint64(e.Opcode), 10) + - " " + strconv.FormatUint(uint64(e.Error), 10) + " " + strconv.FormatUint(uint64(e.Id), 10) + + " " + strconv.FormatUint(uint64(e.Error), 10) + " " + strconv.FormatUint(e.Id, 10) + " " + strconv.FormatUint(uint64(e.LeaseLife), 10) return s } +func (e *EDNS0_LLQ) copy() EDNS0 { + return &EDNS0_LLQ{e.Code, e.Version, e.Opcode, e.Error, e.Id, e.LeaseLife} +} // EDNS0_DUA implements the EDNS0 "DNSSEC Algorithm Understood" option. See RFC 6975. type EDNS0_DAU struct { @@ -445,6 +462,7 @@ func (e *EDNS0_DAU) String() string { } return s } +func (e *EDNS0_DAU) copy() EDNS0 { return &EDNS0_DAU{e.Code, e.AlgCode} } // EDNS0_DHU implements the EDNS0 "DS Hash Understood" option. See RFC 6975. type EDNS0_DHU struct { @@ -468,6 +486,7 @@ func (e *EDNS0_DHU) String() string { } return s } +func (e *EDNS0_DHU) copy() EDNS0 { return &EDNS0_DHU{e.Code, e.AlgCode} } // EDNS0_N3U implements the EDNS0 "NSEC3 Hash Understood" option. See RFC 6975. type EDNS0_N3U struct { @@ -492,6 +511,7 @@ func (e *EDNS0_N3U) String() string { } return s } +func (e *EDNS0_N3U) copy() EDNS0 { return &EDNS0_N3U{e.Code, e.AlgCode} } // EDNS0_EXPIRE implementes the EDNS0 option as described in RFC 7314. type EDNS0_EXPIRE struct { @@ -502,13 +522,11 @@ type EDNS0_EXPIRE struct { // Option implements the EDNS0 interface. func (e *EDNS0_EXPIRE) Option() uint16 { return EDNS0EXPIRE } func (e *EDNS0_EXPIRE) String() string { return strconv.FormatUint(uint64(e.Expire), 10) } +func (e *EDNS0_EXPIRE) copy() EDNS0 { return &EDNS0_EXPIRE{e.Code, e.Expire} } func (e *EDNS0_EXPIRE) pack() ([]byte, error) { b := make([]byte, 4) - b[0] = byte(e.Expire >> 24) - b[1] = byte(e.Expire >> 16) - b[2] = byte(e.Expire >> 8) - b[3] = byte(e.Expire) + binary.BigEndian.PutUint32(b, e.Expire) return b, nil } @@ -543,6 +561,11 @@ func (e *EDNS0_LOCAL) Option() uint16 { return e.Code } func (e *EDNS0_LOCAL) String() string { return strconv.FormatInt(int64(e.Code), 10) + ":0x" + hex.EncodeToString(e.Data) } +func (e *EDNS0_LOCAL) copy() EDNS0 { + b := make([]byte, len(e.Data)) + copy(b, e.Data) + return &EDNS0_LOCAL{e.Code, b} +} func (e *EDNS0_LOCAL) pack() ([]byte, error) { b := make([]byte, len(e.Data)) @@ -615,6 +638,7 @@ func (e *EDNS0_TCP_KEEPALIVE) String() (s string) { } return } +func (e *EDNS0_TCP_KEEPALIVE) copy() EDNS0 { return &EDNS0_TCP_KEEPALIVE{e.Code, e.Length, e.Timeout} } // EDNS0_PADDING option is used to add padding to a request/response. The default // value of padding SHOULD be 0x0 but other values MAY be used, for instance if @@ -628,3 +652,8 @@ func (e *EDNS0_PADDING) Option() uint16 { return EDNS0PADDING } func (e *EDNS0_PADDING) pack() ([]byte, error) { return e.Padding, nil } func (e *EDNS0_PADDING) unpack(b []byte) error { e.Padding = b; return nil } func (e *EDNS0_PADDING) String() string { return fmt.Sprintf("%0X", e.Padding) } +func (e *EDNS0_PADDING) copy() EDNS0 { + b := make([]byte, len(e.Padding)) + copy(b, e.Padding) + return &EDNS0_PADDING{b} +} diff --git a/vendor/github.com/miekg/dns/format.go b/vendor/github.com/miekg/dns/format.go index 3f5303c201382..86057f99b7bcc 100644 --- a/vendor/github.com/miekg/dns/format.go +++ b/vendor/github.com/miekg/dns/format.go @@ -20,7 +20,7 @@ func Field(r RR, i int) string { return "" } d := reflect.ValueOf(r).Elem().Field(i) - switch k := d.Kind(); k { + switch d.Kind() { case reflect.String: return d.String() case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: diff --git a/vendor/github.com/miekg/dns/labels.go b/vendor/github.com/miekg/dns/labels.go index 577fc59d2cc43..ca8c204554c9d 100644 --- a/vendor/github.com/miekg/dns/labels.go +++ b/vendor/github.com/miekg/dns/labels.go @@ -16,7 +16,7 @@ func SplitDomainName(s string) (labels []string) { fqdnEnd := 0 // offset of the final '.' or the length of the name idx := Split(s) begin := 0 - if s[len(s)-1] == '.' { + if IsFqdn(s) { fqdnEnd = len(s) - 1 } else { fqdnEnd = len(s) @@ -36,8 +36,7 @@ func SplitDomainName(s string) (labels []string) { } } - labels = append(labels, s[begin:fqdnEnd]) - return labels + return append(labels, s[begin:fqdnEnd]) } // CompareDomainName compares the names s1 and s2 and diff --git a/vendor/github.com/miekg/dns/msg.go b/vendor/github.com/miekg/dns/msg.go index 47ac6cf281314..5191fc06d09df 100644 --- a/vendor/github.com/miekg/dns/msg.go +++ b/vendor/github.com/miekg/dns/msg.go @@ -9,7 +9,6 @@ package dns //go:generate go run msg_generate.go -//go:generate go run compress_generate.go import ( crand "crypto/rand" @@ -18,12 +17,35 @@ import ( "math/big" "math/rand" "strconv" + "strings" "sync" ) const ( maxCompressionOffset = 2 << 13 // We have 14 bits for the compression pointer maxDomainNameWireOctets = 255 // See RFC 1035 section 2.3.4 + + // This is the maximum number of compression pointers that should occur in a + // semantically valid message. Each label in a domain name must be at least one + // octet and is separated by a period. The root label won't be represented by a + // compression pointer to a compression pointer, hence the -2 to exclude the + // smallest valid root label. + // + // It is possible to construct a valid message that has more compression pointers + // than this, and still doesn't loop, by pointing to a previous pointer. This is + // not something a well written implementation should ever do, so we leave them + // to trip the maximum compression pointer check. + maxCompressionPointers = (maxDomainNameWireOctets+1)/2 - 2 + + // This is the maximum length of a domain name in presentation format. The + // maximum wire length of a domain name is 255 octets (see above), with the + // maximum label length being 63. The wire format requires one extra byte over + // the presentation format, reducing the number of octets by 1. Each label in + // the name will be separated by a single period, with each octet in the label + // expanding to at most 4 bytes (\DDD). If all other labels are of the maximum + // length, then the final label can only be 61 octets long to not exceed the + // maximum allowed wire length. + maxDomainNamePresentationLength = 61*4 + 1 + 63*4 + 1 + 63*4 + 1 + 63*4 + 1 ) // Errors defined in this package. @@ -46,10 +68,9 @@ var ( ErrRRset error = &Error{err: "bad rrset"} ErrSecret error = &Error{err: "no secrets defined"} ErrShortRead error = &Error{err: "short read"} - ErrSig error = &Error{err: "bad signature"} // ErrSig indicates that a signature can not be cryptographically validated. - ErrSoa error = &Error{err: "no SOA"} // ErrSOA indicates that no SOA RR was seen when doing zone transfers. - ErrTime error = &Error{err: "bad time"} // ErrTime indicates a timing error in TSIG authentication. - ErrTruncated error = &Error{err: "failed to unpack truncated message"} // ErrTruncated indicates that we failed to unpack a truncated message. We unpacked as much as we had so Msg can still be used, if desired. + ErrSig error = &Error{err: "bad signature"} // ErrSig indicates that a signature can not be cryptographically validated. + ErrSoa error = &Error{err: "no SOA"} // ErrSOA indicates that no SOA RR was seen when doing zone transfers. + ErrTime error = &Error{err: "bad time"} // ErrTime indicates a timing error in TSIG authentication. ) // Id by default, returns a 16 bits random number to be used as a @@ -151,7 +172,7 @@ var RcodeToString = map[int]string{ RcodeFormatError: "FORMERR", RcodeServerFailure: "SERVFAIL", RcodeNameError: "NXDOMAIN", - RcodeNotImplemented: "NOTIMPL", + RcodeNotImplemented: "NOTIMP", RcodeRefused: "REFUSED", RcodeYXDomain: "YXDOMAIN", // See RFC 2136 RcodeYXRrset: "YXRRSET", @@ -169,6 +190,39 @@ var RcodeToString = map[int]string{ RcodeBadCookie: "BADCOOKIE", } +// compressionMap is used to allow a more efficient compression map +// to be used for internal packDomainName calls without changing the +// signature or functionality of public API. +// +// In particular, map[string]uint16 uses 25% less per-entry memory +// than does map[string]int. +type compressionMap struct { + ext map[string]int // external callers + int map[string]uint16 // internal callers +} + +func (m compressionMap) valid() bool { + return m.int != nil || m.ext != nil +} + +func (m compressionMap) insert(s string, pos int) { + if m.ext != nil { + m.ext[s] = pos + } else { + m.int[s] = uint16(pos) + } +} + +func (m compressionMap) find(s string) (int, bool) { + if m.ext != nil { + pos, ok := m.ext[s] + return pos, ok + } + + pos, ok := m.int[s] + return int(pos), ok +} + // Domain names are a sequence of counted strings // split at the dots. They end with a zero-length string. @@ -177,149 +231,156 @@ var RcodeToString = map[int]string{ // map needs to hold a mapping between domain names and offsets // pointing into msg. func PackDomainName(s string, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) { - off1, _, err = packDomainName(s, msg, off, compression, compress) - return + return packDomainName(s, msg, off, compressionMap{ext: compression}, compress) } -func packDomainName(s string, msg []byte, off int, compression map[string]int, compress bool) (off1 int, labels int, err error) { - // special case if msg == nil - lenmsg := 256 - if msg != nil { - lenmsg = len(msg) - } +func packDomainName(s string, msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + // XXX: A logical copy of this function exists in IsDomainName and + // should be kept in sync with this function. + ls := len(s) if ls == 0 { // Ok, for instance when dealing with update RR without any rdata. - return off, 0, nil - } - // If not fully qualified, error out, but only if msg == nil #ugly - switch { - case msg == nil: - if s[ls-1] != '.' { - s += "." - ls++ - } - case msg != nil: - if s[ls-1] != '.' { - return lenmsg, 0, ErrFqdn - } + return off, nil + } + + // If not fully qualified, error out. + if !IsFqdn(s) { + return len(msg), ErrFqdn } + // Each dot ends a segment of the name. // We trade each dot byte for a length byte. // Except for escaped dots (\.), which are normal dots. // There is also a trailing zero. // Compression - nameoffset := -1 pointer := -1 + // Emit sequence of counted strings, chopping at dots. - begin := 0 - bs := []byte(s) - roBs, bsFresh, escapedDot := s, true, false + var ( + begin int + compBegin int + compOff int + bs []byte + wasDot bool + ) +loop: for i := 0; i < ls; i++ { - if bs[i] == '\\' { - for j := i; j < ls-1; j++ { - bs[j] = bs[j+1] + var c byte + if bs == nil { + c = s[i] + } else { + c = bs[i] + } + + switch c { + case '\\': + if off+1 > len(msg) { + return len(msg), ErrBuf } - ls-- - if off+1 > lenmsg { - return lenmsg, labels, ErrBuf + + if bs == nil { + bs = []byte(s) } + // check for \DDD - if i+2 < ls && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) { - bs[i] = dddToByte(bs[i:]) - for j := i + 1; j < ls-2; j++ { - bs[j] = bs[j+2] - } - ls -= 2 + if i+3 < ls && isDigit(bs[i+1]) && isDigit(bs[i+2]) && isDigit(bs[i+3]) { + bs[i] = dddToByte(bs[i+1:]) + copy(bs[i+1:ls-3], bs[i+4:]) + ls -= 3 + compOff += 3 + } else { + copy(bs[i:ls-1], bs[i+1:]) + ls-- + compOff++ } - escapedDot = bs[i] == '.' - bsFresh = false - continue - } - if bs[i] == '.' { - if i > 0 && bs[i-1] == '.' && !escapedDot { + wasDot = false + case '.': + if wasDot { // two dots back to back is not legal - return lenmsg, labels, ErrRdata + return len(msg), ErrRdata } - if i-begin >= 1<<6 { // top two bits of length must be clear - return lenmsg, labels, ErrRdata + wasDot = true + + labelLen := i - begin + if labelLen >= 1<<6 { // top two bits of length must be clear + return len(msg), ErrRdata } + // off can already (we're in a loop) be bigger than len(msg) // this happens when a name isn't fully qualified - if off+1 > lenmsg { - return lenmsg, labels, ErrBuf - } - if msg != nil { - msg[off] = byte(i - begin) - } - offset := off - off++ - for j := begin; j < i; j++ { - if off+1 > lenmsg { - return lenmsg, labels, ErrBuf - } - if msg != nil { - msg[off] = bs[j] - } - off++ - } - if compress && !bsFresh { - roBs = string(bs) - bsFresh = true + if off+1+labelLen > len(msg) { + return len(msg), ErrBuf } + // Don't try to compress '.' - // We should only compress when compress it true, but we should also still pick + // We should only compress when compress is true, but we should also still pick // up names that can be used for *future* compression(s). - if compression != nil && roBs[begin:] != "." { - if p, ok := compression[roBs[begin:]]; !ok { - // Only offsets smaller than this can be used. - if offset < maxCompressionOffset { - compression[roBs[begin:]] = offset - } - } else { + if compression.valid() && !isRootLabel(s, bs, begin, ls) { + if p, ok := compression.find(s[compBegin:]); ok { // The first hit is the longest matching dname // keep the pointer offset we get back and store // the offset of the current name, because that's // where we need to insert the pointer later // If compress is true, we're allowed to compress this dname - if pointer == -1 && compress { - pointer = p // Where to point to - nameoffset = offset // Where to point from - break + if compress { + pointer = p // Where to point to + break loop } + } else if off < maxCompressionOffset { + // Only offsets smaller than maxCompressionOffset can be used. + compression.insert(s[compBegin:], off) } } - labels++ + + // The following is covered by the length check above. + msg[off] = byte(labelLen) + + if bs == nil { + copy(msg[off+1:], s[begin:i]) + } else { + copy(msg[off+1:], bs[begin:i]) + } + off += 1 + labelLen + begin = i + 1 + compBegin = begin + compOff + default: + wasDot = false } - escapedDot = false } + // Root label is special - if len(bs) == 1 && bs[0] == '.' { - return off, labels, nil + if isRootLabel(s, bs, 0, ls) { + return off, nil } + // If we did compression and we find something add the pointer here if pointer != -1 { - // Clear the msg buffer after the pointer location, otherwise - // packDataNsec writes the wrong data to msg. - tainted := msg[nameoffset:off] - for i := range tainted { - tainted[i] = 0 - } // We have two bytes (14 bits) to put the pointer in - // if msg == nil, we will never do compression - binary.BigEndian.PutUint16(msg[nameoffset:], uint16(pointer^0xC000)) - off = nameoffset + 1 - goto End + binary.BigEndian.PutUint16(msg[off:], uint16(pointer^0xC000)) + return off + 2, nil } - if msg != nil && off < len(msg) { + + if off < len(msg) { msg[off] = 0 } -End: - off++ - return off, labels, nil + + return off + 1, nil +} + +// isRootLabel returns whether s or bs, from off to end, is the root +// label ".". +// +// If bs is nil, s will be checked, otherwise bs will be checked. +func isRootLabel(s string, bs []byte, off, end int) bool { + if bs == nil { + return s[off:end] == "." + } + + return end-off == 1 && bs[off] == '.' } // Unpack a domain name. @@ -336,12 +397,16 @@ End: // In theory, the pointers are only allowed to jump backward. // We let them jump anywhere and stop jumping after a while. -// UnpackDomainName unpacks a domain name into a string. +// UnpackDomainName unpacks a domain name into a string. It returns +// the name, the new offset into msg and any error that occurred. +// +// When an error is encountered, the unpacked name will be discarded +// and len(msg) will be returned as the offset. func UnpackDomainName(msg []byte, off int) (string, int, error) { - s := make([]byte, 0, 64) + s := make([]byte, 0, maxDomainNamePresentationLength) off1 := 0 lenmsg := len(msg) - maxLen := maxDomainNameWireOctets + budget := maxDomainNameWireOctets ptr := 0 // number of pointers followed Loop: for { @@ -360,25 +425,19 @@ Loop: if off+c > lenmsg { return "", lenmsg, ErrBuf } + budget -= c + 1 // +1 for the label separator + if budget <= 0 { + return "", lenmsg, ErrLongDomain + } for j := off; j < off+c; j++ { switch b := msg[j]; b { case '.', '(', ')', ';', ' ', '@': fallthrough case '"', '\\': s = append(s, '\\', b) - // presentation-format \X escapes add an extra byte - maxLen++ default: - if b < 32 || b >= 127 { // unprintable, use \DDD - var buf [3]byte - bufs := strconv.AppendInt(buf[:0], int64(b), 10) - s = append(s, '\\') - for i := len(bufs); i < 3; i++ { - s = append(s, '0') - } - s = append(s, bufs...) - // presentation-format \DDD escapes add 3 extra bytes - maxLen += 3 + if b < ' ' || b > '~' { // unprintable, use \DDD + s = append(s, escapeByte(b)...) } else { s = append(s, b) } @@ -400,7 +459,7 @@ Loop: if ptr == 0 { off1 = off } - if ptr++; ptr > 10 { + if ptr++; ptr > maxCompressionPointers { return "", lenmsg, &Error{err: "too many compression pointers"} } // pointer should guarantee that it advances and points forwards at least @@ -416,10 +475,7 @@ Loop: off1 = off } if len(s) == 0 { - s = []byte(".") - } else if len(s) >= maxLen { - // error if the name is too long, but don't throw it away - return string(s), lenmsg, ErrLongDomain + return ".", off1, nil } return string(s), off1, nil } @@ -528,10 +584,12 @@ func unpackTxt(msg []byte, off0 int) (ss []string, off int, err error) { func isDigit(b byte) bool { return b >= '0' && b <= '9' } func dddToByte(s []byte) byte { + _ = s[2] // bounds check hint to compiler; see golang.org/issue/14808 return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0')) } func dddStringToByte(s string) byte { + _ = s[2] // bounds check hint to compiler; see golang.org/issue/14808 return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0')) } @@ -549,19 +607,38 @@ func intToBytes(i *big.Int, length int) []byte { // PackRR packs a resource record rr into msg[off:]. // See PackDomainName for documentation about the compression. func PackRR(rr RR, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) { + headerEnd, off1, err := packRR(rr, msg, off, compressionMap{ext: compression}, compress) + if err == nil { + // packRR no longer sets the Rdlength field on the rr, but + // callers might be expecting it so we set it here. + rr.Header().Rdlength = uint16(off1 - headerEnd) + } + return off1, err +} + +func packRR(rr RR, msg []byte, off int, compression compressionMap, compress bool) (headerEnd int, off1 int, err error) { if rr == nil { - return len(msg), &Error{err: "nil rr"} + return len(msg), len(msg), &Error{err: "nil rr"} + } + + headerEnd, err = rr.Header().packHeader(msg, off, compression, compress) + if err != nil { + return headerEnd, len(msg), err } - off1, err = rr.pack(msg, off, compression, compress) + off1, err = rr.pack(msg, headerEnd, compression, compress) if err != nil { - return len(msg), err + return headerEnd, len(msg), err } - // TODO(miek): Not sure if this is needed? If removed we can remove rawmsg.go as well. - if rawSetRdlength(msg, off, off1) { - return off1, nil + + rdlength := off1 - headerEnd + if int(uint16(rdlength)) != rdlength { // overflow + return headerEnd, len(msg), ErrRdata } - return off, ErrRdata + + // The RDLENGTH field is the last field in the header and we set it here. + binary.BigEndian.PutUint16(msg[headerEnd-2:], uint16(rdlength)) + return headerEnd, off1, nil } // UnpackRR unpacks msg[off:] into an RR. @@ -577,17 +654,28 @@ func UnpackRR(msg []byte, off int) (rr RR, off1 int, err error) { // UnpackRRWithHeader unpacks the record type specific payload given an existing // RR_Header. func UnpackRRWithHeader(h RR_Header, msg []byte, off int) (rr RR, off1 int, err error) { + if newFn, ok := TypeToRR[h.Rrtype]; ok { + rr = newFn() + *rr.Header() = h + } else { + rr = &RFC3597{Hdr: h} + } + + if noRdata(h) { + return rr, off, nil + } + end := off + int(h.Rdlength) - if fn, known := typeToUnpack[h.Rrtype]; !known { - rr, off, err = unpackRFC3597(h, msg, off) - } else { - rr, off, err = fn(h, msg, off) + off, err = rr.unpack(msg, off) + if err != nil { + return nil, end, err } if off != end { return &h, end, &Error{err: "bad rdlength"} } - return rr, off, err + + return rr, off, nil } // unpackRRslice unpacks msg[off:] into an []RR. @@ -668,32 +756,33 @@ func (dns *Msg) Pack() (msg []byte, err error) { // PackBuffer packs a Msg, using the given buffer buf. If buf is too small a new buffer is allocated. func (dns *Msg) PackBuffer(buf []byte) (msg []byte, err error) { - var compression map[string]int - if dns.Compress { - compression = make(map[string]int) // Compression pointer mappings. + // If this message can't be compressed, avoid filling the + // compression map and creating garbage. + if dns.Compress && dns.isCompressible() { + compression := make(map[string]uint16) // Compression pointer mappings. + return dns.packBufferWithCompressionMap(buf, compressionMap{int: compression}, true) } - return dns.packBufferWithCompressionMap(buf, compression) + + return dns.packBufferWithCompressionMap(buf, compressionMap{}, false) } // packBufferWithCompressionMap packs a Msg, using the given buffer buf. -func (dns *Msg) packBufferWithCompressionMap(buf []byte, compression map[string]int) (msg []byte, err error) { - // We use a similar function in tsig.go's stripTsig. - - var dh Header - +func (dns *Msg) packBufferWithCompressionMap(buf []byte, compression compressionMap, compress bool) (msg []byte, err error) { if dns.Rcode < 0 || dns.Rcode > 0xFFF { return nil, ErrRcode } - if dns.Rcode > 0xF { - // Regular RCODE field is 4 bits - opt := dns.IsEdns0() - if opt == nil { - return nil, ErrExtendedRcode - } - opt.SetExtendedRcode(uint8(dns.Rcode >> 4)) + + // Set extended rcode unconditionally if we have an opt, this will allow + // reseting the extended rcode bits if they need to. + if opt := dns.IsEdns0(); opt != nil { + opt.SetExtendedRcode(uint16(dns.Rcode)) + } else if dns.Rcode > 0xF { + // If Rcode is an extended one and opt is nil, error out. + return nil, ErrExtendedRcode } // Convert convenient Msg into wire-like Header. + var dh Header dh.Id = dns.Id dh.Bits = uint16(dns.Opcode)<<11 | uint16(dns.Rcode&0xF) if dns.Response { @@ -721,50 +810,44 @@ func (dns *Msg) packBufferWithCompressionMap(buf []byte, compression map[string] dh.Bits |= _CD } - // Prepare variable sized arrays. - question := dns.Question - answer := dns.Answer - ns := dns.Ns - extra := dns.Extra - - dh.Qdcount = uint16(len(question)) - dh.Ancount = uint16(len(answer)) - dh.Nscount = uint16(len(ns)) - dh.Arcount = uint16(len(extra)) + dh.Qdcount = uint16(len(dns.Question)) + dh.Ancount = uint16(len(dns.Answer)) + dh.Nscount = uint16(len(dns.Ns)) + dh.Arcount = uint16(len(dns.Extra)) // We need the uncompressed length here, because we first pack it and then compress it. msg = buf - uncompressedLen := compressedLen(dns, false) + uncompressedLen := msgLenWithCompressionMap(dns, nil) if packLen := uncompressedLen + 1; len(msg) < packLen { msg = make([]byte, packLen) } // Pack it in: header and then the pieces. off := 0 - off, err = dh.pack(msg, off, compression, dns.Compress) + off, err = dh.pack(msg, off, compression, compress) if err != nil { return nil, err } - for i := 0; i < len(question); i++ { - off, err = question[i].pack(msg, off, compression, dns.Compress) + for _, r := range dns.Question { + off, err = r.pack(msg, off, compression, compress) if err != nil { return nil, err } } - for i := 0; i < len(answer); i++ { - off, err = PackRR(answer[i], msg, off, compression, dns.Compress) + for _, r := range dns.Answer { + _, off, err = packRR(r, msg, off, compression, compress) if err != nil { return nil, err } } - for i := 0; i < len(ns); i++ { - off, err = PackRR(ns[i], msg, off, compression, dns.Compress) + for _, r := range dns.Ns { + _, off, err = packRR(r, msg, off, compression, compress) if err != nil { return nil, err } } - for i := 0; i < len(extra); i++ { - off, err = PackRR(extra[i], msg, off, compression, dns.Compress) + for _, r := range dns.Extra { + _, off, err = packRR(r, msg, off, compression, compress) if err != nil { return nil, err } @@ -772,28 +855,7 @@ func (dns *Msg) packBufferWithCompressionMap(buf []byte, compression map[string] return msg[:off], nil } -// Unpack unpacks a binary message to a Msg structure. -func (dns *Msg) Unpack(msg []byte) (err error) { - var ( - dh Header - off int - ) - if dh, off, err = unpackMsgHdr(msg, off); err != nil { - return err - } - - dns.Id = dh.Id - dns.Response = dh.Bits&_QR != 0 - dns.Opcode = int(dh.Bits>>11) & 0xF - dns.Authoritative = dh.Bits&_AA != 0 - dns.Truncated = dh.Bits&_TC != 0 - dns.RecursionDesired = dh.Bits&_RD != 0 - dns.RecursionAvailable = dh.Bits&_RA != 0 - dns.Zero = dh.Bits&_Z != 0 - dns.AuthenticatedData = dh.Bits&_AD != 0 - dns.CheckingDisabled = dh.Bits&_CD != 0 - dns.Rcode = int(dh.Bits & 0xF) - +func (dns *Msg) unpack(dh Header, msg []byte, off int) (err error) { // If we are at the end of the message we should return *just* the // header. This can still be useful to the caller. 9.9.9.9 sends these // when responding with REFUSED for instance. @@ -812,8 +874,6 @@ func (dns *Msg) Unpack(msg []byte) (err error) { var q Question q, off, err = unpackQuestion(msg, off) if err != nil { - // Even if Truncated is set, we only will set ErrTruncated if we - // actually got the questions return err } if off1 == off { // Offset does not increase anymore, dh.Qdcount is a lie! @@ -837,16 +897,29 @@ func (dns *Msg) Unpack(msg []byte) (err error) { // The header counts might have been wrong so we need to update it dh.Arcount = uint16(len(dns.Extra)) + // Set extended Rcode + if opt := dns.IsEdns0(); opt != nil { + dns.Rcode |= opt.ExtendedRcode() + } + if off != len(msg) { // TODO(miek) make this an error? // use PackOpt to let people tell how detailed the error reporting should be? // println("dns: extra bytes in dns packet", off, "<", len(msg)) - } else if dns.Truncated { - // Whether we ran into a an error or not, we want to return that it - // was truncated - err = ErrTruncated } return err + +} + +// Unpack unpacks a binary message to a Msg structure. +func (dns *Msg) Unpack(msg []byte) (err error) { + dh, off, err := unpackMsgHdr(msg, 0) + if err != nil { + return err + } + + dns.setHdr(dh) + return dns.unpack(dh, msg, off) } // Convert a complete message to a string with dig-like output. @@ -892,151 +965,117 @@ func (dns *Msg) String() string { return s } +// isCompressible returns whether the msg may be compressible. +func (dns *Msg) isCompressible() bool { + // If we only have one question, there is nothing we can ever compress. + return len(dns.Question) > 1 || len(dns.Answer) > 0 || + len(dns.Ns) > 0 || len(dns.Extra) > 0 +} + // Len returns the message length when in (un)compressed wire format. // If dns.Compress is true compression it is taken into account. Len() // is provided to be a faster way to get the size of the resulting packet, // than packing it, measuring the size and discarding the buffer. -func (dns *Msg) Len() int { return compressedLen(dns, dns.Compress) } - -func compressedLenWithCompressionMap(dns *Msg, compression map[string]int) int { - l := 12 // Message header is always 12 bytes - for _, r := range dns.Question { - compressionLenHelper(compression, r.Name, l) - l += r.len() +func (dns *Msg) Len() int { + // If this message can't be compressed, avoid filling the + // compression map and creating garbage. + if dns.Compress && dns.isCompressible() { + compression := make(map[string]struct{}) + return msgLenWithCompressionMap(dns, compression) } - l += compressionLenSlice(l, compression, dns.Answer) - l += compressionLenSlice(l, compression, dns.Ns) - l += compressionLenSlice(l, compression, dns.Extra) - return l + + return msgLenWithCompressionMap(dns, nil) } -// compressedLen returns the message length when in compressed wire format -// when compress is true, otherwise the uncompressed length is returned. -func compressedLen(dns *Msg, compress bool) int { - // We always return one more than needed. - if compress { - compression := map[string]int{} - return compressedLenWithCompressionMap(dns, compression) - } - l := 12 // Message header is always 12 bytes +func msgLenWithCompressionMap(dns *Msg, compression map[string]struct{}) int { + l := headerSize for _, r := range dns.Question { - l += r.len() + l += r.len(l, compression) } for _, r := range dns.Answer { if r != nil { - l += r.len() + l += r.len(l, compression) } } for _, r := range dns.Ns { if r != nil { - l += r.len() + l += r.len(l, compression) } } for _, r := range dns.Extra { if r != nil { - l += r.len() + l += r.len(l, compression) } } return l } -func compressionLenSlice(lenp int, c map[string]int, rs []RR) int { - initLen := lenp - for _, r := range rs { - if r == nil { - continue - } - // TmpLen is to track len of record at 14bits boudaries - tmpLen := lenp - - x := r.len() - // track this length, and the global length in len, while taking compression into account for both. - k, ok, _ := compressionLenSearch(c, r.Header().Name) - if ok { - // Size of x is reduced by k, but we add 1 since k includes the '.' and label descriptor take 2 bytes - // so, basically x:= x - k - 1 + 2 - x += 1 - k - } +func domainNameLen(s string, off int, compression map[string]struct{}, compress bool) int { + if s == "" || s == "." { + return 1 + } - tmpLen += compressionLenHelper(c, r.Header().Name, tmpLen) - k, ok, _ = compressionLenSearchType(c, r) - if ok { - x += 1 - k + escaped := strings.Contains(s, "\\") + + if compression != nil && (compress || off < maxCompressionOffset) { + // compressionLenSearch will insert the entry into the compression + // map if it doesn't contain it. + if l, ok := compressionLenSearch(compression, s, off); ok && compress { + if escaped { + return escapedNameLen(s[:l]) + 2 + } + + return l + 2 } - lenp += x - tmpLen = lenp - tmpLen += compressionLenHelperType(c, r, tmpLen) + } + if escaped { + return escapedNameLen(s) + 1 } - return lenp - initLen + + return len(s) + 1 } -// Put the parts of the name in the compression map, return the size in bytes added in payload -func compressionLenHelper(c map[string]int, s string, currentLen int) int { - if currentLen > maxCompressionOffset { - // We won't be able to add any label that could be re-used later anyway - return 0 - } - if _, ok := c[s]; ok { - return 0 - } - initLen := currentLen - pref := "" - prev := s - lbs := Split(s) - for j := 0; j < len(lbs); j++ { - pref = s[lbs[j]:] - currentLen += len(prev) - len(pref) - prev = pref - if _, ok := c[pref]; !ok { - // If first byte label is within the first 14bits, it might be re-used later - if currentLen < maxCompressionOffset { - c[pref] = currentLen - } +func escapedNameLen(s string) int { + nameLen := len(s) + for i := 0; i < len(s); i++ { + if s[i] != '\\' { + continue + } + + if i+3 < len(s) && isDigit(s[i+1]) && isDigit(s[i+2]) && isDigit(s[i+3]) { + nameLen -= 3 + i += 3 } else { - added := currentLen - initLen - if j > 0 { - // We added a new PTR - added += 2 - } - return added + nameLen-- + i++ } } - return currentLen - initLen + + return nameLen } -// Look for each part in the compression map and returns its length, -// keep on searching so we get the longest match. -// Will return the size of compression found, whether a match has been -// found and the size of record if added in payload -func compressionLenSearch(c map[string]int, s string) (int, bool, int) { - off := 0 - end := false - if s == "" { // don't bork on bogus data - return 0, false, 0 - } - fullSize := 0 - for { +func compressionLenSearch(c map[string]struct{}, s string, msgOff int) (int, bool) { + for off, end := 0, false; !end; off, end = NextLabel(s, off) { if _, ok := c[s[off:]]; ok { - return len(s[off:]), true, fullSize + off + return off, true } - if end { - break + + if msgOff+off < maxCompressionOffset { + c[s[off:]] = struct{}{} } - // Each label descriptor takes 2 bytes, add it - fullSize += 2 - off, end = NextLabel(s, off) } - return 0, false, fullSize + len(s) + + return 0, false } // Copy returns a new RR which is a deep-copy of r. -func Copy(r RR) RR { r1 := r.copy(); return r1 } +func Copy(r RR) RR { return r.copy() } // Len returns the length (in octets) of the uncompressed RR in wire format. -func Len(r RR) int { return r.len() } +func Len(r RR) int { return r.len(0, nil) } // Copy returns a new *Msg which is a deep-copy of dns. func (dns *Msg) Copy() *Msg { return dns.CopyTo(new(Msg)) } @@ -1084,8 +1123,8 @@ func (dns *Msg) CopyTo(r1 *Msg) *Msg { return r1 } -func (q *Question) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := PackDomainName(q.Name, msg, off, compression, compress) +func (q *Question) pack(msg []byte, off int, compression compressionMap, compress bool) (int, error) { + off, err := packDomainName(q.Name, msg, off, compression, compress) if err != nil { return off, err } @@ -1126,7 +1165,7 @@ func unpackQuestion(msg []byte, off int) (Question, int, error) { return q, off, err } -func (dh *Header) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { +func (dh *Header) pack(msg []byte, off int, compression compressionMap, compress bool) (int, error) { off, err := packUint16(dh.Id, msg, off) if err != nil { return off, err @@ -1148,7 +1187,10 @@ func (dh *Header) pack(msg []byte, off int, compression map[string]int, compress return off, err } off, err = packUint16(dh.Arcount, msg, off) - return off, err + if err != nil { + return off, err + } + return off, nil } func unpackMsgHdr(msg []byte, off int) (Header, int, error) { @@ -1177,5 +1219,23 @@ func unpackMsgHdr(msg []byte, off int) (Header, int, error) { return dh, off, err } dh.Arcount, off, err = unpackUint16(msg, off) - return dh, off, err + if err != nil { + return dh, off, err + } + return dh, off, nil +} + +// setHdr set the header in the dns using the binary data in dh. +func (dns *Msg) setHdr(dh Header) { + dns.Id = dh.Id + dns.Response = dh.Bits&_QR != 0 + dns.Opcode = int(dh.Bits>>11) & 0xF + dns.Authoritative = dh.Bits&_AA != 0 + dns.Truncated = dh.Bits&_TC != 0 + dns.RecursionDesired = dh.Bits&_RD != 0 + dns.RecursionAvailable = dh.Bits&_RA != 0 + dns.Zero = dh.Bits&_Z != 0 // _Z covers the zero bit, which should be zero; not sure why we set it to the opposite. + dns.AuthenticatedData = dh.Bits&_AD != 0 + dns.CheckingDisabled = dh.Bits&_CD != 0 + dns.Rcode = int(dh.Bits & 0xF) } diff --git a/vendor/github.com/miekg/dns/msg_helpers.go b/vendor/github.com/miekg/dns/msg_helpers.go index 81fc2b1be31ae..527621a00143c 100644 --- a/vendor/github.com/miekg/dns/msg_helpers.go +++ b/vendor/github.com/miekg/dns/msg_helpers.go @@ -99,14 +99,14 @@ func unpackHeader(msg []byte, off int) (rr RR_Header, off1 int, truncmsg []byte, return hdr, off, msg, err } -// pack packs an RR header, returning the offset to the end of the header. +// packHeader packs an RR header, returning the offset to the end of the header. // See PackDomainName for documentation about the compression. -func (hdr RR_Header) pack(msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) { +func (hdr RR_Header) packHeader(msg []byte, off int, compression compressionMap, compress bool) (int, error) { if off == len(msg) { return off, nil } - off, err = PackDomainName(hdr.Name, msg, off, compression, compress) + off, err := packDomainName(hdr.Name, msg, off, compression, compress) if err != nil { return len(msg), err } @@ -122,7 +122,7 @@ func (hdr RR_Header) pack(msg []byte, off int, compression map[string]int, compr if err != nil { return len(msg), err } - off, err = packUint16(hdr.Rdlength, msg, off) + off, err = packUint16(0, msg, off) // The RDLENGTH field will be set later in packRR. if err != nil { return len(msg), err } @@ -177,14 +177,14 @@ func unpackUint8(msg []byte, off int) (i uint8, off1 int, err error) { if off+1 > len(msg) { return 0, len(msg), &Error{err: "overflow unpacking uint8"} } - return uint8(msg[off]), off + 1, nil + return msg[off], off + 1, nil } func packUint8(i uint8, msg []byte, off int) (off1 int, err error) { if off+1 > len(msg) { return len(msg), &Error{err: "overflow packing uint8"} } - msg[off] = byte(i) + msg[off] = i return off + 1, nil } @@ -223,8 +223,8 @@ func unpackUint48(msg []byte, off int) (i uint64, off1 int, err error) { return 0, len(msg), &Error{err: "overflow unpacking uint64 as uint48"} } // Used in TSIG where the last 48 bits are occupied, so for now, assume a uint48 (6 bytes) - i = uint64(uint64(msg[off])<<40 | uint64(msg[off+1])<<32 | uint64(msg[off+2])<<24 | uint64(msg[off+3])<<16 | - uint64(msg[off+4])<<8 | uint64(msg[off+5])) + i = uint64(msg[off])<<40 | uint64(msg[off+1])<<32 | uint64(msg[off+2])<<24 | uint64(msg[off+3])<<16 | + uint64(msg[off+4])<<8 | uint64(msg[off+5]) off += 6 return i, off, nil } @@ -275,7 +275,7 @@ func unpackString(msg []byte, off int) (string, int, error) { s.WriteByte('\\') s.WriteByte(b) case b < ' ' || b > '~': // unprintable - writeEscapedByte(&s, b) + s.WriteString(escapeByte(b)) default: s.WriteByte(b) } @@ -363,6 +363,22 @@ func packStringHex(s string, msg []byte, off int) (int, error) { return off, nil } +func unpackStringAny(msg []byte, off, end int) (string, int, error) { + if end > len(msg) { + return "", len(msg), &Error{err: "overflow unpacking anything"} + } + return string(msg[off:end]), end, nil +} + +func packStringAny(s string, msg []byte, off int) (int, error) { + if off+len(s) > len(msg) { + return len(msg), &Error{err: "overflow packing anything"} + } + copy(msg[off:off+len(s)], s) + off += len(s) + return off, nil +} + func unpackStringTxt(msg []byte, off int) ([]string, int, error) { txt, off, err := unpackTxt(msg, off) if err != nil { @@ -383,7 +399,7 @@ func packStringTxt(s []string, msg []byte, off int) (int, error) { func unpackDataOpt(msg []byte, off int) ([]EDNS0, int, error) { var edns []EDNS0 Option: - code := uint16(0) + var code uint16 if off+4 > len(msg) { return nil, len(msg), &Error{err: "overflow unpacking opt"} } @@ -621,10 +637,10 @@ func unpackDataDomainNames(msg []byte, off, end int) ([]string, int, error) { return servers, off, nil } -func packDataDomainNames(names []string, msg []byte, off int, compression map[string]int, compress bool) (int, error) { +func packDataDomainNames(names []string, msg []byte, off int, compression compressionMap, compress bool) (int, error) { var err error for j := 0; j < len(names); j++ { - off, err = PackDomainName(names[j], msg, off, compression, false && compress) + off, err = packDomainName(names[j], msg, off, compression, compress) if err != nil { return len(msg), err } diff --git a/vendor/github.com/miekg/dns/nsecx.go b/vendor/github.com/miekg/dns/nsecx.go index 9b908c4478636..8f071a47399df 100644 --- a/vendor/github.com/miekg/dns/nsecx.go +++ b/vendor/github.com/miekg/dns/nsecx.go @@ -2,49 +2,44 @@ package dns import ( "crypto/sha1" - "hash" + "encoding/hex" "strings" ) -type saltWireFmt struct { - Salt string `dns:"size-hex"` -} - // HashName hashes a string (label) according to RFC 5155. It returns the hashed string in uppercase. func HashName(label string, ha uint8, iter uint16, salt string) string { - saltwire := new(saltWireFmt) - saltwire.Salt = salt - wire := make([]byte, DefaultMsgSize) - n, err := packSaltWire(saltwire, wire) + if ha != SHA1 { + return "" + } + + wireSalt := make([]byte, hex.DecodedLen(len(salt))) + n, err := packStringHex(salt, wireSalt, 0) if err != nil { return "" } - wire = wire[:n] + wireSalt = wireSalt[:n] + name := make([]byte, 255) off, err := PackDomainName(strings.ToLower(label), name, 0, nil, false) if err != nil { return "" } name = name[:off] - var s hash.Hash - switch ha { - case SHA1: - s = sha1.New() - default: - return "" - } + s := sha1.New() // k = 0 s.Write(name) - s.Write(wire) + s.Write(wireSalt) nsec3 := s.Sum(nil) + // k > 0 for k := uint16(0); k < iter; k++ { s.Reset() s.Write(nsec3) - s.Write(wire) + s.Write(wireSalt) nsec3 = s.Sum(nsec3[:0]) } + return toBase32(nsec3) } @@ -63,8 +58,10 @@ func (rr *NSEC3) Cover(name string) bool { } nextHash := rr.NextDomain - if ownerHash == nextHash { // empty interval - return false + + // if empty interval found, try cover wildcard hashes so nameHash shouldn't match with ownerHash + if ownerHash == nextHash && nameHash != ownerHash { // empty interval + return true } if ownerHash > nextHash { // end of zone if nameHash > ownerHash { // covered since there is nothing after ownerHash @@ -96,11 +93,3 @@ func (rr *NSEC3) Match(name string) bool { } return false } - -func packSaltWire(sw *saltWireFmt, msg []byte) (int, error) { - off, err := packStringHex(sw.Salt, msg, 0) - if err != nil { - return off, err - } - return off, nil -} diff --git a/vendor/github.com/miekg/dns/privaterr.go b/vendor/github.com/miekg/dns/privaterr.go index 74544a74e50f6..d9c0d26774f05 100644 --- a/vendor/github.com/miekg/dns/privaterr.go +++ b/vendor/github.com/miekg/dns/privaterr.go @@ -39,11 +39,12 @@ func mkPrivateRR(rrtype uint16) *PrivateRR { } anyrr := rrfunc() - switch rr := anyrr.(type) { - case *PrivateRR: - return rr + rr, ok := anyrr.(*PrivateRR) + if !ok { + panic(fmt.Sprintf("dns: RR is not a PrivateRR, TypeToRR[%d] generator returned %T", rrtype, anyrr)) } - panic(fmt.Sprintf("dns: RR is not a PrivateRR, TypeToRR[%d] generator returned %T", rrtype, anyrr)) + + return rr } // Header return the RR header of r. @@ -52,7 +53,12 @@ func (r *PrivateRR) Header() *RR_Header { return &r.Hdr } func (r *PrivateRR) String() string { return r.Hdr.String() + r.Data.String() } // Private len and copy parts to satisfy RR interface. -func (r *PrivateRR) len() int { return r.Hdr.len() + r.Data.Len() } +func (r *PrivateRR) len(off int, compression map[string]struct{}) int { + l := r.Hdr.len(off, compression) + l += r.Data.Len() + return l +} + func (r *PrivateRR) copy() RR { // make new RR like this: rr := mkPrivateRR(r.Hdr.Rrtype) @@ -64,74 +70,55 @@ func (r *PrivateRR) copy() RR { } return rr } -func (r *PrivateRR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := r.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off + +func (r *PrivateRR) pack(msg []byte, off int, compression compressionMap, compress bool) (int, error) { n, err := r.Data.Pack(msg[off:]) if err != nil { return len(msg), err } off += n - r.Header().Rdlength = uint16(off - headerEnd) return off, nil } -// PrivateHandle registers a private resource record type. It requires -// string and numeric representation of private RR type and generator function as argument. -func PrivateHandle(rtypestr string, rtype uint16, generator func() PrivateRdata) { - rtypestr = strings.ToUpper(rtypestr) - - TypeToRR[rtype] = func() RR { return &PrivateRR{RR_Header{}, generator()} } - TypeToString[rtype] = rtypestr - StringToType[rtypestr] = rtype +func (r *PrivateRR) unpack(msg []byte, off int) (int, error) { + off1, err := r.Data.Unpack(msg[off:]) + off += off1 + return off, err +} - typeToUnpack[rtype] = func(h RR_Header, msg []byte, off int) (RR, int, error) { - if noRdata(h) { - return &h, off, nil +func (r *PrivateRR) parse(c *zlexer, origin, file string) *ParseError { + var l lex + text := make([]string, 0, 2) // could be 0..N elements, median is probably 1 +Fetch: + for { + // TODO(miek): we could also be returning _QUOTE, this might or might not + // be an issue (basically parsing TXT becomes hard) + switch l, _ = c.Next(); l.value { + case zNewline, zEOF: + break Fetch + case zString: + text = append(text, l.token) } - var err error - - rr := mkPrivateRR(h.Rrtype) - rr.Hdr = h + } - off1, err := rr.Data.Unpack(msg[off:]) - off += off1 - if err != nil { - return rr, off, err - } - return rr, off, err + err := r.Data.Parse(text) + if err != nil { + return &ParseError{file, err.Error(), l} } - setPrivateRR := func(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := mkPrivateRR(h.Rrtype) - rr.Hdr = h - - var l lex - text := make([]string, 0, 2) // could be 0..N elements, median is probably 1 - Fetch: - for { - // TODO(miek): we could also be returning _QUOTE, this might or might not - // be an issue (basically parsing TXT becomes hard) - switch l, _ = c.Next(); l.value { - case zNewline, zEOF: - break Fetch - case zString: - text = append(text, l.token) - } - } + return nil +} - err := rr.Data.Parse(text) - if err != nil { - return nil, &ParseError{f, err.Error(), l}, "" - } +func (r1 *PrivateRR) isDuplicate(r2 RR) bool { return false } - return rr, nil, "" - } +// PrivateHandle registers a private resource record type. It requires +// string and numeric representation of private RR type and generator function as argument. +func PrivateHandle(rtypestr string, rtype uint16, generator func() PrivateRdata) { + rtypestr = strings.ToUpper(rtypestr) - typeToparserFunc[rtype] = parserFunc{setPrivateRR, true} + TypeToRR[rtype] = func() RR { return &PrivateRR{RR_Header{}, generator()} } + TypeToString[rtype] = rtypestr + StringToType[rtypestr] = rtype } // PrivateHandleRemove removes definitions required to support private RR type. @@ -140,8 +127,6 @@ func PrivateHandleRemove(rtype uint16) { if ok { delete(TypeToRR, rtype) delete(TypeToString, rtype) - delete(typeToparserFunc, rtype) delete(StringToType, rtypestr) - delete(typeToUnpack, rtype) } } diff --git a/vendor/github.com/miekg/dns/rawmsg.go b/vendor/github.com/miekg/dns/rawmsg.go deleted file mode 100644 index 6e21fba7e1fb7..0000000000000 --- a/vendor/github.com/miekg/dns/rawmsg.go +++ /dev/null @@ -1,49 +0,0 @@ -package dns - -import "encoding/binary" - -// rawSetRdlength sets the rdlength in the header of -// the RR. The offset 'off' must be positioned at the -// start of the header of the RR, 'end' must be the -// end of the RR. -func rawSetRdlength(msg []byte, off, end int) bool { - l := len(msg) -Loop: - for { - if off+1 > l { - return false - } - c := int(msg[off]) - off++ - switch c & 0xC0 { - case 0x00: - if c == 0x00 { - // End of the domainname - break Loop - } - if off+c > l { - return false - } - off += c - - case 0xC0: - // pointer, next byte included, ends domainname - off++ - break Loop - } - } - // The domainname has been seen, we at the start of the fixed part in the header. - // Type is 2 bytes, class is 2 bytes, ttl 4 and then 2 bytes for the length. - off += 2 + 2 + 4 - if off+2 > l { - return false - } - //off+1 is the end of the header, 'end' is the end of the rr - //so 'end' - 'off+2' is the length of the rdata - rdatalen := end - (off + 2) - if rdatalen > 0xFFFF { - return false - } - binary.BigEndian.PutUint16(msg[off:], uint16(rdatalen)) - return true -} diff --git a/vendor/github.com/miekg/dns/reverse.go b/vendor/github.com/miekg/dns/reverse.go index f6e7a47a6e8ab..28151af835983 100644 --- a/vendor/github.com/miekg/dns/reverse.go +++ b/vendor/github.com/miekg/dns/reverse.go @@ -12,6 +12,20 @@ var StringToOpcode = reverseInt(OpcodeToString) // StringToRcode is a map of rcodes to strings. var StringToRcode = reverseInt(RcodeToString) +func init() { + // Preserve previous NOTIMP typo, see github.com/miekg/dns/issues/733. + StringToRcode["NOTIMPL"] = RcodeNotImplemented +} + +// StringToAlgorithm is the reverse of AlgorithmToString. +var StringToAlgorithm = reverseInt8(AlgorithmToString) + +// StringToHash is a map of names to hash IDs. +var StringToHash = reverseInt8(HashToString) + +// StringToCertType is the reverseof CertTypeToString. +var StringToCertType = reverseInt16(CertTypeToString) + // Reverse a map func reverseInt8(m map[uint8]string) map[string]uint8 { n := make(map[string]uint8, len(m)) diff --git a/vendor/github.com/miekg/dns/sanitize.go b/vendor/github.com/miekg/dns/sanitize.go index cac15787adf5e..a638e862e3ab2 100644 --- a/vendor/github.com/miekg/dns/sanitize.go +++ b/vendor/github.com/miekg/dns/sanitize.go @@ -15,10 +15,11 @@ func Dedup(rrs []RR, m map[string]RR) []RR { for _, r := range rrs { key := normalizedString(r) keys = append(keys, &key) - if _, ok := m[key]; ok { + if mr, ok := m[key]; ok { // Shortest TTL wins. - if m[key].Header().Ttl > r.Header().Ttl { - m[key].Header().Ttl = r.Header().Ttl + rh, mrh := r.Header(), mr.Header() + if mrh.Ttl > rh.Ttl { + mrh.Ttl = rh.Ttl } continue } diff --git a/vendor/github.com/miekg/dns/scan.go b/vendor/github.com/miekg/dns/scan.go index 61ace121e5c16..a8691bca70ac8 100644 --- a/vendor/github.com/miekg/dns/scan.go +++ b/vendor/github.com/miekg/dns/scan.go @@ -79,13 +79,12 @@ func (e *ParseError) Error() (s string) { } type lex struct { - token string // text of the token - err bool // when true, token text has lexer error - value uint8 // value: zString, _BLANK, etc. - torc uint16 // type or class as parsed in the lexer, we only need to look this up in the grammar - line int // line in the file - column int // column in the file - comment string // any comment text seen + token string // text of the token + err bool // when true, token text has lexer error + value uint8 // value: zString, _BLANK, etc. + torc uint16 // type or class as parsed in the lexer, we only need to look this up in the grammar + line int // line in the file + column int // column in the file } // Token holds the token that are returned when a zone file is parsed. @@ -244,8 +243,6 @@ type ZoneParser struct { sub *ZoneParser osFile *os.File - com string - includeDepth uint8 includeAllowed bool @@ -318,12 +315,19 @@ func (zp *ZoneParser) setParseError(err string, l lex) (RR, bool) { // Comment returns an optional text comment that occurred alongside // the RR. func (zp *ZoneParser) Comment() string { - return zp.com + if zp.parseErr != nil { + return "" + } + + if zp.sub != nil { + return zp.sub.Comment() + } + + return zp.c.Comment() } func (zp *ZoneParser) subNext() (RR, bool) { if rr, ok := zp.sub.Next(); ok { - zp.com = zp.sub.com return rr, true } @@ -347,8 +351,6 @@ func (zp *ZoneParser) subNext() (RR, bool) { // error. After Next returns (nil, false), the Err method will return // any error that occurred during parsing. func (zp *ZoneParser) Next() (RR, bool) { - zp.com = "" - if zp.parseErr != nil { return nil, false } @@ -501,7 +503,7 @@ func (zp *ZoneParser) Next() (RR, bool) { return zp.setParseError("expecting $TTL value, not this...", l) } - if e, _ := slurpRemainder(zp.c, zp.file); e != nil { + if e := slurpRemainder(zp.c, zp.file); e != nil { zp.parseErr = e return nil, false } @@ -525,7 +527,7 @@ func (zp *ZoneParser) Next() (RR, bool) { return zp.setParseError("expecting $ORIGIN value, not this...", l) } - if e, _ := slurpRemainder(zp.c, zp.file); e != nil { + if e := slurpRemainder(zp.c, zp.file); e != nil { zp.parseErr = e return nil, false } @@ -648,7 +650,7 @@ func (zp *ZoneParser) Next() (RR, bool) { st = zExpectRdata case zExpectRdata: - r, e, c1 := setRR(*h, zp.c, zp.origin, zp.file) + r, e := setRR(*h, zp.c, zp.origin, zp.file) if e != nil { // If e.lex is nil than we have encounter a unknown RR type // in that case we substitute our current lex token @@ -660,7 +662,6 @@ func (zp *ZoneParser) Next() (RR, bool) { return nil, false } - zp.com = c1 return r, true } } @@ -678,7 +679,8 @@ type zlexer struct { line int column int - com string + comBuf string + comment string l lex @@ -767,14 +769,15 @@ func (zl *zlexer) Next() (lex, bool) { escape bool ) - if zl.com != "" { - comi = copy(com[:], zl.com) - zl.com = "" + if zl.comBuf != "" { + comi = copy(com[:], zl.comBuf) + zl.comBuf = "" } + zl.comment = "" + for x, ok := zl.readByte(); ok; x, ok = zl.readByte() { l.line, l.column = zl.line, zl.column - l.comment = "" if stri >= len(str) { l.token = "token length insufficient for parsing" @@ -898,7 +901,7 @@ func (zl *zlexer) Next() (lex, bool) { } zl.commt = true - zl.com = "" + zl.comBuf = "" if comi > 1 { // A newline was previously seen inside a comment that @@ -911,7 +914,7 @@ func (zl *zlexer) Next() (lex, bool) { comi++ if stri > 0 { - zl.com = string(com[:comi]) + zl.comBuf = string(com[:comi]) l.value = zString l.token = string(str[:stri]) @@ -947,11 +950,11 @@ func (zl *zlexer) Next() (lex, bool) { l.value = zNewline l.token = "\n" - l.comment = string(com[:comi]) + zl.comment = string(com[:comi]) return *l, true } - zl.com = string(com[:comi]) + zl.comBuf = string(com[:comi]) break } @@ -977,9 +980,9 @@ func (zl *zlexer) Next() (lex, bool) { l.value = zNewline l.token = "\n" - l.comment = zl.com - zl.com = "" + zl.comment = zl.comBuf + zl.comBuf = "" zl.rrtype = false zl.owner = true @@ -1115,7 +1118,7 @@ func (zl *zlexer) Next() (lex, bool) { // Send remainder of com l.value = zNewline l.token = "\n" - l.comment = string(com[:comi]) + zl.comment = string(com[:comi]) if retL != (lex{}) { zl.nextL = true @@ -1126,7 +1129,6 @@ func (zl *zlexer) Next() (lex, bool) { } if zl.brace != 0 { - l.comment = "" // in case there was left over string and comment l.token = "unbalanced brace" l.err = true return *l, true @@ -1135,6 +1137,14 @@ func (zl *zlexer) Next() (lex, bool) { return lex{value: zEOF}, false } +func (zl *zlexer) Comment() string { + if zl.l.err { + return "" + } + + return zl.comment +} + // Extract the class number from CLASSxx func classToInt(token string) (uint16, bool) { offset := 5 @@ -1163,8 +1173,7 @@ func typeToInt(token string) (uint16, bool) { // stringToTTL parses things like 2w, 2m, etc, and returns the time in seconds. func stringToTTL(token string) (uint32, bool) { - s := uint32(0) - i := uint32(0) + var s, i uint32 for _, c := range token { switch c { case 's', 'S': @@ -1252,7 +1261,7 @@ func toAbsoluteName(name, origin string) (absolute string, ok bool) { } // check if name is already absolute - if name[len(name)-1] == '.' { + if IsFqdn(name) { return name, true } @@ -1292,24 +1301,21 @@ func locCheckEast(token string, longitude uint32) (uint32, bool) { return longitude, false } -// "Eat" the rest of the "line". Return potential comments -func slurpRemainder(c *zlexer, f string) (*ParseError, string) { +// "Eat" the rest of the "line" +func slurpRemainder(c *zlexer, f string) *ParseError { l, _ := c.Next() - com := "" switch l.value { case zBlank: l, _ = c.Next() - com = l.comment if l.value != zNewline && l.value != zEOF { - return &ParseError{f, "garbage after rdata", l}, "" + return &ParseError{f, "garbage after rdata", l} } case zNewline: - com = l.comment case zEOF: default: - return &ParseError{f, "garbage after rdata", l}, "" + return &ParseError{f, "garbage after rdata", l} } - return nil, com + return nil } // Parse a 64 bit-like ipv6 address: "0014:4fff:ff20:ee64" diff --git a/vendor/github.com/miekg/dns/scan_rr.go b/vendor/github.com/miekg/dns/scan_rr.go index 935d22c3ff877..f48ff7890e8f8 100644 --- a/vendor/github.com/miekg/dns/scan_rr.go +++ b/vendor/github.com/miekg/dns/scan_rr.go @@ -7,70 +7,69 @@ import ( "strings" ) -type parserFunc struct { - // Func defines the function that parses the tokens and returns the RR - // or an error. The last string contains any comments in the line as - // they returned by the lexer as well. - Func func(h RR_Header, c *zlexer, origin string, file string) (RR, *ParseError, string) - // Signals if the RR ending is of variable length, like TXT or records - // that have Hexadecimal or Base64 as their last element in the Rdata. Records - // that have a fixed ending or for instance A, AAAA, SOA and etc. - Variable bool -} - // Parse the rdata of each rrtype. // All data from the channel c is either zString or zBlank. // After the rdata there may come a zBlank and then a zNewline // or immediately a zNewline. If this is not the case we flag // an *ParseError: garbage after rdata. -func setRR(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - parserfunc, ok := typeToparserFunc[h.Rrtype] - if ok { - r, e, cm := parserfunc.Func(h, c, o, f) - if parserfunc.Variable { - return r, e, cm - } - if e != nil { - return nil, e, "" - } - e, cm = slurpRemainder(c, f) - if e != nil { - return nil, e, "" - } - return r, nil, cm +func setRR(h RR_Header, c *zlexer, o, f string) (RR, *ParseError) { + var rr RR + if newFn, ok := TypeToRR[h.Rrtype]; ok && canParseAsRR(h.Rrtype) { + rr = newFn() + *rr.Header() = h + } else { + rr = &RFC3597{Hdr: h} + } + + err := rr.parse(c, o, f) + if err != nil { + return nil, err + } + + return rr, nil +} + +// canParseAsRR returns true if the record type can be parsed as a +// concrete RR. It blacklists certain record types that must be parsed +// according to RFC 3597 because they lack a presentation format. +func canParseAsRR(rrtype uint16) bool { + switch rrtype { + case TypeANY, TypeNULL, TypeOPT, TypeTSIG: + return false + default: + return true } - // RFC3957 RR (Unknown RR handling) - return setRFC3597(h, c, o, f) } // A remainder of the rdata with embedded spaces, return the parsed string (sans the spaces) // or an error -func endingToString(c *zlexer, errstr, f string) (string, *ParseError, string) { - s := "" +func endingToString(c *zlexer, errstr, f string) (string, *ParseError) { + var s string l, _ := c.Next() // zString for l.value != zNewline && l.value != zEOF { if l.err { - return s, &ParseError{f, errstr, l}, "" + return s, &ParseError{f, errstr, l} } switch l.value { case zString: s += l.token case zBlank: // Ok default: - return "", &ParseError{f, errstr, l}, "" + return "", &ParseError{f, errstr, l} } l, _ = c.Next() } - return s, nil, l.comment + + return s, nil } // A remainder of the rdata with embedded spaces, split on unquoted whitespace // and return the parsed string slice or an error -func endingToTxtSlice(c *zlexer, errstr, f string) ([]string, *ParseError, string) { +func endingToTxtSlice(c *zlexer, errstr, f string) ([]string, *ParseError) { // Get the remaining data until we see a zNewline l, _ := c.Next() if l.err { - return nil, &ParseError{f, errstr, l}, "" + return nil, &ParseError{f, errstr, l} } // Build the slice @@ -79,7 +78,7 @@ func endingToTxtSlice(c *zlexer, errstr, f string) ([]string, *ParseError, strin empty := false for l.value != zNewline && l.value != zEOF { if l.err { - return nil, &ParseError{f, errstr, l}, "" + return nil, &ParseError{f, errstr, l} } switch l.value { case zString: @@ -106,7 +105,7 @@ func endingToTxtSlice(c *zlexer, errstr, f string) ([]string, *ParseError, strin case zBlank: if quote { // zBlank can only be seen in between txt parts. - return nil, &ParseError{f, errstr, l}, "" + return nil, &ParseError{f, errstr, l} } case zQuote: if empty && quote { @@ -115,115 +114,99 @@ func endingToTxtSlice(c *zlexer, errstr, f string) ([]string, *ParseError, strin quote = !quote empty = true default: - return nil, &ParseError{f, errstr, l}, "" + return nil, &ParseError{f, errstr, l} } l, _ = c.Next() } + if quote { - return nil, &ParseError{f, errstr, l}, "" + return nil, &ParseError{f, errstr, l} } - return s, nil, l.comment -} -func setA(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(A) - rr.Hdr = h + return s, nil +} +func (rr *A) parse(c *zlexer, o, f string) *ParseError { l, _ := c.Next() if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" + return slurpRemainder(c, f) } rr.A = net.ParseIP(l.token) if rr.A == nil || l.err { - return nil, &ParseError{f, "bad A A", l}, "" + return &ParseError{f, "bad A A", l} } - return rr, nil, "" + return slurpRemainder(c, f) } -func setAAAA(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(AAAA) - rr.Hdr = h - +func (rr *AAAA) parse(c *zlexer, o, f string) *ParseError { l, _ := c.Next() if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" + return slurpRemainder(c, f) } rr.AAAA = net.ParseIP(l.token) if rr.AAAA == nil || l.err { - return nil, &ParseError{f, "bad AAAA AAAA", l}, "" + return &ParseError{f, "bad AAAA AAAA", l} } - return rr, nil, "" + return slurpRemainder(c, f) } -func setNS(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(NS) - rr.Hdr = h - +func (rr *NS) parse(c *zlexer, o, f string) *ParseError { l, _ := c.Next() rr.Ns = l.token if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" + return slurpRemainder(c, f) } name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return nil, &ParseError{f, "bad NS Ns", l}, "" + return &ParseError{f, "bad NS Ns", l} } rr.Ns = name - return rr, nil, "" + return slurpRemainder(c, f) } -func setPTR(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(PTR) - rr.Hdr = h - +func (rr *PTR) parse(c *zlexer, o, f string) *ParseError { l, _ := c.Next() rr.Ptr = l.token if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" + return slurpRemainder(c, f) } name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return nil, &ParseError{f, "bad PTR Ptr", l}, "" + return &ParseError{f, "bad PTR Ptr", l} } rr.Ptr = name - return rr, nil, "" + return slurpRemainder(c, f) } -func setNSAPPTR(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(NSAPPTR) - rr.Hdr = h - +func (rr *NSAPPTR) parse(c *zlexer, o, f string) *ParseError { l, _ := c.Next() rr.Ptr = l.token if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" + return slurpRemainder(c, f) } name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return nil, &ParseError{f, "bad NSAP-PTR Ptr", l}, "" + return &ParseError{f, "bad NSAP-PTR Ptr", l} } rr.Ptr = name - return rr, nil, "" + return slurpRemainder(c, f) } -func setRP(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(RP) - rr.Hdr = h - +func (rr *RP) parse(c *zlexer, o, f string) *ParseError { l, _ := c.Next() rr.Mbox = l.token if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" + return slurpRemainder(c, f) } mbox, mboxOk := toAbsoluteName(l.token, o) if l.err || !mboxOk { - return nil, &ParseError{f, "bad RP Mbox", l}, "" + return &ParseError{f, "bad RP Mbox", l} } rr.Mbox = mbox @@ -233,78 +216,66 @@ func setRP(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { txt, txtOk := toAbsoluteName(l.token, o) if l.err || !txtOk { - return nil, &ParseError{f, "bad RP Txt", l}, "" + return &ParseError{f, "bad RP Txt", l} } rr.Txt = txt - return rr, nil, "" + return slurpRemainder(c, f) } -func setMR(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(MR) - rr.Hdr = h - +func (rr *MR) parse(c *zlexer, o, f string) *ParseError { l, _ := c.Next() rr.Mr = l.token if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" + return slurpRemainder(c, f) } name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return nil, &ParseError{f, "bad MR Mr", l}, "" + return &ParseError{f, "bad MR Mr", l} } rr.Mr = name - return rr, nil, "" + return slurpRemainder(c, f) } -func setMB(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(MB) - rr.Hdr = h - +func (rr *MB) parse(c *zlexer, o, f string) *ParseError { l, _ := c.Next() rr.Mb = l.token if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" + return slurpRemainder(c, f) } name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return nil, &ParseError{f, "bad MB Mb", l}, "" + return &ParseError{f, "bad MB Mb", l} } rr.Mb = name - return rr, nil, "" + return slurpRemainder(c, f) } -func setMG(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(MG) - rr.Hdr = h - +func (rr *MG) parse(c *zlexer, o, f string) *ParseError { l, _ := c.Next() rr.Mg = l.token if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" + return slurpRemainder(c, f) } name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return nil, &ParseError{f, "bad MG Mg", l}, "" + return &ParseError{f, "bad MG Mg", l} } rr.Mg = name - return rr, nil, "" + return slurpRemainder(c, f) } -func setHINFO(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(HINFO) - rr.Hdr = h - - chunks, e, c1 := endingToTxtSlice(c, "bad HINFO Fields", f) +func (rr *HINFO) parse(c *zlexer, o, f string) *ParseError { + chunks, e := endingToTxtSlice(c, "bad HINFO Fields", f) if e != nil { - return nil, e, c1 + return e } if ln := len(chunks); ln == 0 { - return rr, nil, "" + return nil } else if ln == 1 { // Can we split it? if out := strings.Fields(chunks[0]); len(out) > 1 { @@ -317,22 +288,19 @@ func setHINFO(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr.Cpu = chunks[0] rr.Os = strings.Join(chunks[1:], " ") - return rr, nil, "" + return nil } -func setMINFO(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(MINFO) - rr.Hdr = h - +func (rr *MINFO) parse(c *zlexer, o, f string) *ParseError { l, _ := c.Next() rr.Rmail = l.token if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" + return slurpRemainder(c, f) } rmail, rmailOk := toAbsoluteName(l.token, o) if l.err || !rmailOk { - return nil, &ParseError{f, "bad MINFO Rmail", l}, "" + return &ParseError{f, "bad MINFO Rmail", l} } rr.Rmail = rmail @@ -342,61 +310,52 @@ func setMINFO(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { email, emailOk := toAbsoluteName(l.token, o) if l.err || !emailOk { - return nil, &ParseError{f, "bad MINFO Email", l}, "" + return &ParseError{f, "bad MINFO Email", l} } rr.Email = email - return rr, nil, "" + return slurpRemainder(c, f) } -func setMF(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(MF) - rr.Hdr = h - +func (rr *MF) parse(c *zlexer, o, f string) *ParseError { l, _ := c.Next() rr.Mf = l.token if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" + return slurpRemainder(c, f) } name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return nil, &ParseError{f, "bad MF Mf", l}, "" + return &ParseError{f, "bad MF Mf", l} } rr.Mf = name - return rr, nil, "" + return slurpRemainder(c, f) } -func setMD(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(MD) - rr.Hdr = h - +func (rr *MD) parse(c *zlexer, o, f string) *ParseError { l, _ := c.Next() rr.Md = l.token if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" + return slurpRemainder(c, f) } name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return nil, &ParseError{f, "bad MD Md", l}, "" + return &ParseError{f, "bad MD Md", l} } rr.Md = name - return rr, nil, "" + return slurpRemainder(c, f) } -func setMX(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(MX) - rr.Hdr = h - +func (rr *MX) parse(c *zlexer, o, f string) *ParseError { l, _ := c.Next() if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" + return slurpRemainder(c, f) } i, e := strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return nil, &ParseError{f, "bad MX Pref", l}, "" + return &ParseError{f, "bad MX Pref", l} } rr.Preference = uint16(i) @@ -406,25 +365,22 @@ func setMX(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return nil, &ParseError{f, "bad MX Mx", l}, "" + return &ParseError{f, "bad MX Mx", l} } rr.Mx = name - return rr, nil, "" + return slurpRemainder(c, f) } -func setRT(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(RT) - rr.Hdr = h - +func (rr *RT) parse(c *zlexer, o, f string) *ParseError { l, _ := c.Next() if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" + return slurpRemainder(c, f) } i, e := strconv.ParseUint(l.token, 10, 16) if e != nil { - return nil, &ParseError{f, "bad RT Preference", l}, "" + return &ParseError{f, "bad RT Preference", l} } rr.Preference = uint16(i) @@ -434,25 +390,22 @@ func setRT(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return nil, &ParseError{f, "bad RT Host", l}, "" + return &ParseError{f, "bad RT Host", l} } rr.Host = name - return rr, nil, "" + return slurpRemainder(c, f) } -func setAFSDB(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(AFSDB) - rr.Hdr = h - +func (rr *AFSDB) parse(c *zlexer, o, f string) *ParseError { l, _ := c.Next() if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" + return slurpRemainder(c, f) } i, e := strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return nil, &ParseError{f, "bad AFSDB Subtype", l}, "" + return &ParseError{f, "bad AFSDB Subtype", l} } rr.Subtype = uint16(i) @@ -462,40 +415,34 @@ func setAFSDB(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return nil, &ParseError{f, "bad AFSDB Hostname", l}, "" + return &ParseError{f, "bad AFSDB Hostname", l} } rr.Hostname = name - return rr, nil, "" + return slurpRemainder(c, f) } -func setX25(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(X25) - rr.Hdr = h - +func (rr *X25) parse(c *zlexer, o, f string) *ParseError { l, _ := c.Next() if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" + return slurpRemainder(c, f) } if l.err { - return nil, &ParseError{f, "bad X25 PSDNAddress", l}, "" + return &ParseError{f, "bad X25 PSDNAddress", l} } rr.PSDNAddress = l.token - return rr, nil, "" + return slurpRemainder(c, f) } -func setKX(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(KX) - rr.Hdr = h - +func (rr *KX) parse(c *zlexer, o, f string) *ParseError { l, _ := c.Next() if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" + return slurpRemainder(c, f) } i, e := strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return nil, &ParseError{f, "bad KX Pref", l}, "" + return &ParseError{f, "bad KX Pref", l} } rr.Preference = uint16(i) @@ -505,61 +452,52 @@ func setKX(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return nil, &ParseError{f, "bad KX Exchanger", l}, "" + return &ParseError{f, "bad KX Exchanger", l} } rr.Exchanger = name - return rr, nil, "" + return slurpRemainder(c, f) } -func setCNAME(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(CNAME) - rr.Hdr = h - +func (rr *CNAME) parse(c *zlexer, o, f string) *ParseError { l, _ := c.Next() rr.Target = l.token if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" + return slurpRemainder(c, f) } name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return nil, &ParseError{f, "bad CNAME Target", l}, "" + return &ParseError{f, "bad CNAME Target", l} } rr.Target = name - return rr, nil, "" + return slurpRemainder(c, f) } -func setDNAME(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(DNAME) - rr.Hdr = h - +func (rr *DNAME) parse(c *zlexer, o, f string) *ParseError { l, _ := c.Next() rr.Target = l.token if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" + return slurpRemainder(c, f) } name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return nil, &ParseError{f, "bad DNAME Target", l}, "" + return &ParseError{f, "bad DNAME Target", l} } rr.Target = name - return rr, nil, "" + return slurpRemainder(c, f) } -func setSOA(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(SOA) - rr.Hdr = h - +func (rr *SOA) parse(c *zlexer, o, f string) *ParseError { l, _ := c.Next() rr.Ns = l.token if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" + return slurpRemainder(c, f) } ns, nsOk := toAbsoluteName(l.token, o) if l.err || !nsOk { - return nil, &ParseError{f, "bad SOA Ns", l}, "" + return &ParseError{f, "bad SOA Ns", l} } rr.Ns = ns @@ -569,7 +507,7 @@ func setSOA(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { mbox, mboxOk := toAbsoluteName(l.token, o) if l.err || !mboxOk { - return nil, &ParseError{f, "bad SOA Mbox", l}, "" + return &ParseError{f, "bad SOA Mbox", l} } rr.Mbox = mbox @@ -582,16 +520,16 @@ func setSOA(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { for i := 0; i < 5; i++ { l, _ = c.Next() if l.err { - return nil, &ParseError{f, "bad SOA zone parameter", l}, "" + return &ParseError{f, "bad SOA zone parameter", l} } if j, e := strconv.ParseUint(l.token, 10, 32); e != nil { if i == 0 { // Serial must be a number - return nil, &ParseError{f, "bad SOA zone parameter", l}, "" + return &ParseError{f, "bad SOA zone parameter", l} } // We allow other fields to be unitful duration strings if v, ok = stringToTTL(l.token); !ok { - return nil, &ParseError{f, "bad SOA zone parameter", l}, "" + return &ParseError{f, "bad SOA zone parameter", l} } } else { @@ -614,21 +552,18 @@ func setSOA(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr.Minttl = v } } - return rr, nil, "" + return slurpRemainder(c, f) } -func setSRV(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(SRV) - rr.Hdr = h - +func (rr *SRV) parse(c *zlexer, o, f string) *ParseError { l, _ := c.Next() if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" + return slurpRemainder(c, f) } i, e := strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return nil, &ParseError{f, "bad SRV Priority", l}, "" + return &ParseError{f, "bad SRV Priority", l} } rr.Priority = uint16(i) @@ -636,7 +571,7 @@ func setSRV(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { l, _ = c.Next() // zString i, e = strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return nil, &ParseError{f, "bad SRV Weight", l}, "" + return &ParseError{f, "bad SRV Weight", l} } rr.Weight = uint16(i) @@ -644,7 +579,7 @@ func setSRV(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { l, _ = c.Next() // zString i, e = strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return nil, &ParseError{f, "bad SRV Port", l}, "" + return &ParseError{f, "bad SRV Port", l} } rr.Port = uint16(i) @@ -654,24 +589,21 @@ func setSRV(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return nil, &ParseError{f, "bad SRV Target", l}, "" + return &ParseError{f, "bad SRV Target", l} } rr.Target = name - return rr, nil, "" + return slurpRemainder(c, f) } -func setNAPTR(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(NAPTR) - rr.Hdr = h - +func (rr *NAPTR) parse(c *zlexer, o, f string) *ParseError { l, _ := c.Next() if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" + return slurpRemainder(c, f) } i, e := strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return nil, &ParseError{f, "bad NAPTR Order", l}, "" + return &ParseError{f, "bad NAPTR Order", l} } rr.Order = uint16(i) @@ -679,7 +611,7 @@ func setNAPTR(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { l, _ = c.Next() // zString i, e = strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return nil, &ParseError{f, "bad NAPTR Preference", l}, "" + return &ParseError{f, "bad NAPTR Preference", l} } rr.Preference = uint16(i) @@ -687,57 +619,57 @@ func setNAPTR(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { c.Next() // zBlank l, _ = c.Next() // _QUOTE if l.value != zQuote { - return nil, &ParseError{f, "bad NAPTR Flags", l}, "" + return &ParseError{f, "bad NAPTR Flags", l} } l, _ = c.Next() // Either String or Quote if l.value == zString { rr.Flags = l.token l, _ = c.Next() // _QUOTE if l.value != zQuote { - return nil, &ParseError{f, "bad NAPTR Flags", l}, "" + return &ParseError{f, "bad NAPTR Flags", l} } } else if l.value == zQuote { rr.Flags = "" } else { - return nil, &ParseError{f, "bad NAPTR Flags", l}, "" + return &ParseError{f, "bad NAPTR Flags", l} } // Service c.Next() // zBlank l, _ = c.Next() // _QUOTE if l.value != zQuote { - return nil, &ParseError{f, "bad NAPTR Service", l}, "" + return &ParseError{f, "bad NAPTR Service", l} } l, _ = c.Next() // Either String or Quote if l.value == zString { rr.Service = l.token l, _ = c.Next() // _QUOTE if l.value != zQuote { - return nil, &ParseError{f, "bad NAPTR Service", l}, "" + return &ParseError{f, "bad NAPTR Service", l} } } else if l.value == zQuote { rr.Service = "" } else { - return nil, &ParseError{f, "bad NAPTR Service", l}, "" + return &ParseError{f, "bad NAPTR Service", l} } // Regexp c.Next() // zBlank l, _ = c.Next() // _QUOTE if l.value != zQuote { - return nil, &ParseError{f, "bad NAPTR Regexp", l}, "" + return &ParseError{f, "bad NAPTR Regexp", l} } l, _ = c.Next() // Either String or Quote if l.value == zString { rr.Regexp = l.token l, _ = c.Next() // _QUOTE if l.value != zQuote { - return nil, &ParseError{f, "bad NAPTR Regexp", l}, "" + return &ParseError{f, "bad NAPTR Regexp", l} } } else if l.value == zQuote { rr.Regexp = "" } else { - return nil, &ParseError{f, "bad NAPTR Regexp", l}, "" + return &ParseError{f, "bad NAPTR Regexp", l} } // After quote no space?? @@ -747,25 +679,22 @@ func setNAPTR(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return nil, &ParseError{f, "bad NAPTR Replacement", l}, "" + return &ParseError{f, "bad NAPTR Replacement", l} } rr.Replacement = name - return rr, nil, "" + return slurpRemainder(c, f) } -func setTALINK(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(TALINK) - rr.Hdr = h - +func (rr *TALINK) parse(c *zlexer, o, f string) *ParseError { l, _ := c.Next() rr.PreviousName = l.token if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" + return slurpRemainder(c, f) } previousName, previousNameOk := toAbsoluteName(l.token, o) if l.err || !previousNameOk { - return nil, &ParseError{f, "bad TALINK PreviousName", l}, "" + return &ParseError{f, "bad TALINK PreviousName", l} } rr.PreviousName = previousName @@ -775,16 +704,14 @@ func setTALINK(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { nextName, nextNameOk := toAbsoluteName(l.token, o) if l.err || !nextNameOk { - return nil, &ParseError{f, "bad TALINK NextName", l}, "" + return &ParseError{f, "bad TALINK NextName", l} } rr.NextName = nextName - return rr, nil, "" + return slurpRemainder(c, f) } -func setLOC(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(LOC) - rr.Hdr = h +func (rr *LOC) parse(c *zlexer, o, f string) *ParseError { // Non zero defaults for LOC record, see RFC 1876, Section 3. rr.HorizPre = 165 // 10000 rr.VertPre = 162 // 10 @@ -794,11 +721,11 @@ func setLOC(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { // North l, _ := c.Next() if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" + return nil } i, e := strconv.ParseUint(l.token, 10, 32) if e != nil || l.err { - return nil, &ParseError{f, "bad LOC Latitude", l}, "" + return &ParseError{f, "bad LOC Latitude", l} } rr.Latitude = 1000 * 60 * 60 * uint32(i) @@ -810,14 +737,14 @@ func setLOC(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { } i, e = strconv.ParseUint(l.token, 10, 32) if e != nil || l.err { - return nil, &ParseError{f, "bad LOC Latitude minutes", l}, "" + return &ParseError{f, "bad LOC Latitude minutes", l} } rr.Latitude += 1000 * 60 * uint32(i) c.Next() // zBlank l, _ = c.Next() if i, e := strconv.ParseFloat(l.token, 32); e != nil || l.err { - return nil, &ParseError{f, "bad LOC Latitude seconds", l}, "" + return &ParseError{f, "bad LOC Latitude seconds", l} } else { rr.Latitude += uint32(1000 * i) } @@ -828,14 +755,14 @@ func setLOC(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { goto East } // If still alive, flag an error - return nil, &ParseError{f, "bad LOC Latitude North/South", l}, "" + return &ParseError{f, "bad LOC Latitude North/South", l} East: // East c.Next() // zBlank l, _ = c.Next() if i, e := strconv.ParseUint(l.token, 10, 32); e != nil || l.err { - return nil, &ParseError{f, "bad LOC Longitude", l}, "" + return &ParseError{f, "bad LOC Longitude", l} } else { rr.Longitude = 1000 * 60 * 60 * uint32(i) } @@ -846,14 +773,14 @@ East: goto Altitude } if i, e := strconv.ParseUint(l.token, 10, 32); e != nil || l.err { - return nil, &ParseError{f, "bad LOC Longitude minutes", l}, "" + return &ParseError{f, "bad LOC Longitude minutes", l} } else { rr.Longitude += 1000 * 60 * uint32(i) } c.Next() // zBlank l, _ = c.Next() if i, e := strconv.ParseFloat(l.token, 32); e != nil || l.err { - return nil, &ParseError{f, "bad LOC Longitude seconds", l}, "" + return &ParseError{f, "bad LOC Longitude seconds", l} } else { rr.Longitude += uint32(1000 * i) } @@ -864,19 +791,19 @@ East: goto Altitude } // If still alive, flag an error - return nil, &ParseError{f, "bad LOC Longitude East/West", l}, "" + return &ParseError{f, "bad LOC Longitude East/West", l} Altitude: c.Next() // zBlank l, _ = c.Next() if len(l.token) == 0 || l.err { - return nil, &ParseError{f, "bad LOC Altitude", l}, "" + return &ParseError{f, "bad LOC Altitude", l} } if l.token[len(l.token)-1] == 'M' || l.token[len(l.token)-1] == 'm' { l.token = l.token[0 : len(l.token)-1] } if i, e := strconv.ParseFloat(l.token, 32); e != nil { - return nil, &ParseError{f, "bad LOC Altitude", l}, "" + return &ParseError{f, "bad LOC Altitude", l} } else { rr.Altitude = uint32(i*100.0 + 10000000.0 + 0.5) } @@ -891,19 +818,19 @@ Altitude: case 0: // Size e, m, ok := stringToCm(l.token) if !ok { - return nil, &ParseError{f, "bad LOC Size", l}, "" + return &ParseError{f, "bad LOC Size", l} } rr.Size = e&0x0f | m<<4&0xf0 case 1: // HorizPre e, m, ok := stringToCm(l.token) if !ok { - return nil, &ParseError{f, "bad LOC HorizPre", l}, "" + return &ParseError{f, "bad LOC HorizPre", l} } rr.HorizPre = e&0x0f | m<<4&0xf0 case 2: // VertPre e, m, ok := stringToCm(l.token) if !ok { - return nil, &ParseError{f, "bad LOC VertPre", l}, "" + return &ParseError{f, "bad LOC VertPre", l} } rr.VertPre = e&0x0f | m<<4&0xf0 } @@ -911,33 +838,30 @@ Altitude: case zBlank: // Ok default: - return nil, &ParseError{f, "bad LOC Size, HorizPre or VertPre", l}, "" + return &ParseError{f, "bad LOC Size, HorizPre or VertPre", l} } l, _ = c.Next() } - return rr, nil, "" + return nil } -func setHIP(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(HIP) - rr.Hdr = h - +func (rr *HIP) parse(c *zlexer, o, f string) *ParseError { // HitLength is not represented l, _ := c.Next() if len(l.token) == 0 { // dynamic update rr. - return rr, nil, l.comment + return nil } i, e := strconv.ParseUint(l.token, 10, 8) if e != nil || l.err { - return nil, &ParseError{f, "bad HIP PublicKeyAlgorithm", l}, "" + return &ParseError{f, "bad HIP PublicKeyAlgorithm", l} } rr.PublicKeyAlgorithm = uint8(i) c.Next() // zBlank l, _ = c.Next() // zString if len(l.token) == 0 || l.err { - return nil, &ParseError{f, "bad HIP Hit", l}, "" + return &ParseError{f, "bad HIP Hit", l} } rr.Hit = l.token // This can not contain spaces, see RFC 5205 Section 6. rr.HitLength = uint8(len(rr.Hit)) / 2 @@ -945,7 +869,7 @@ func setHIP(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { c.Next() // zBlank l, _ = c.Next() // zString if len(l.token) == 0 || l.err { - return nil, &ParseError{f, "bad HIP PublicKey", l}, "" + return &ParseError{f, "bad HIP PublicKey", l} } rr.PublicKey = l.token // This cannot contain spaces rr.PublicKeyLength = uint16(base64.StdEncoding.DecodedLen(len(rr.PublicKey))) @@ -958,33 +882,31 @@ func setHIP(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { case zString: name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return nil, &ParseError{f, "bad HIP RendezvousServers", l}, "" + return &ParseError{f, "bad HIP RendezvousServers", l} } xs = append(xs, name) case zBlank: // Ok default: - return nil, &ParseError{f, "bad HIP RendezvousServers", l}, "" + return &ParseError{f, "bad HIP RendezvousServers", l} } l, _ = c.Next() } + rr.RendezvousServers = xs - return rr, nil, l.comment + return nil } -func setCERT(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(CERT) - rr.Hdr = h - +func (rr *CERT) parse(c *zlexer, o, f string) *ParseError { l, _ := c.Next() if len(l.token) == 0 { // dynamic update rr. - return rr, nil, l.comment + return nil } if v, ok := StringToCertType[l.token]; ok { rr.Type = v } else if i, e := strconv.ParseUint(l.token, 10, 16); e != nil { - return nil, &ParseError{f, "bad CERT Type", l}, "" + return &ParseError{f, "bad CERT Type", l} } else { rr.Type = uint16(i) } @@ -992,7 +914,7 @@ func setCERT(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { l, _ = c.Next() // zString i, e := strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return nil, &ParseError{f, "bad CERT KeyTag", l}, "" + return &ParseError{f, "bad CERT KeyTag", l} } rr.KeyTag = uint16(i) c.Next() // zBlank @@ -1000,42 +922,36 @@ func setCERT(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { if v, ok := StringToAlgorithm[l.token]; ok { rr.Algorithm = v } else if i, e := strconv.ParseUint(l.token, 10, 8); e != nil { - return nil, &ParseError{f, "bad CERT Algorithm", l}, "" + return &ParseError{f, "bad CERT Algorithm", l} } else { rr.Algorithm = uint8(i) } - s, e1, c1 := endingToString(c, "bad CERT Certificate", f) + s, e1 := endingToString(c, "bad CERT Certificate", f) if e1 != nil { - return nil, e1, c1 + return e1 } rr.Certificate = s - return rr, nil, c1 + return nil } -func setOPENPGPKEY(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(OPENPGPKEY) - rr.Hdr = h - - s, e, c1 := endingToString(c, "bad OPENPGPKEY PublicKey", f) +func (rr *OPENPGPKEY) parse(c *zlexer, o, f string) *ParseError { + s, e := endingToString(c, "bad OPENPGPKEY PublicKey", f) if e != nil { - return nil, e, c1 + return e } rr.PublicKey = s - return rr, nil, c1 + return nil } -func setCSYNC(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(CSYNC) - rr.Hdr = h - +func (rr *CSYNC) parse(c *zlexer, o, f string) *ParseError { l, _ := c.Next() if len(l.token) == 0 { // dynamic update rr. - return rr, nil, l.comment + return nil } j, e := strconv.ParseUint(l.token, 10, 32) if e != nil { // Serial must be a number - return nil, &ParseError{f, "bad CSYNC serial", l}, "" + return &ParseError{f, "bad CSYNC serial", l} } rr.Serial = uint32(j) @@ -1045,7 +961,7 @@ func setCSYNC(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { j, e = strconv.ParseUint(l.token, 10, 16) if e != nil { // Serial must be a number - return nil, &ParseError{f, "bad CSYNC flags", l}, "" + return &ParseError{f, "bad CSYNC flags", l} } rr.Flags = uint16(j) @@ -1063,33 +979,26 @@ func setCSYNC(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { tokenUpper := strings.ToUpper(l.token) if k, ok = StringToType[tokenUpper]; !ok { if k, ok = typeToInt(l.token); !ok { - return nil, &ParseError{f, "bad CSYNC TypeBitMap", l}, "" + return &ParseError{f, "bad CSYNC TypeBitMap", l} } } rr.TypeBitMap = append(rr.TypeBitMap, k) default: - return nil, &ParseError{f, "bad CSYNC TypeBitMap", l}, "" + return &ParseError{f, "bad CSYNC TypeBitMap", l} } l, _ = c.Next() } - return rr, nil, l.comment + return nil } -func setSIG(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - r, e, s := setRRSIG(h, c, o, f) - if r != nil { - return &SIG{*r.(*RRSIG)}, e, s - } - return nil, e, s +func (rr *SIG) parse(c *zlexer, o, f string) *ParseError { + return rr.RRSIG.parse(c, o, f) } -func setRRSIG(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(RRSIG) - rr.Hdr = h - +func (rr *RRSIG) parse(c *zlexer, o, f string) *ParseError { l, _ := c.Next() if len(l.token) == 0 { // dynamic update rr. - return rr, nil, l.comment + return nil } tokenUpper := strings.ToUpper(l.token) @@ -1097,11 +1006,11 @@ func setRRSIG(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { if strings.HasPrefix(tokenUpper, "TYPE") { t, ok = typeToInt(l.token) if !ok { - return nil, &ParseError{f, "bad RRSIG Typecovered", l}, "" + return &ParseError{f, "bad RRSIG Typecovered", l} } rr.TypeCovered = t } else { - return nil, &ParseError{f, "bad RRSIG Typecovered", l}, "" + return &ParseError{f, "bad RRSIG Typecovered", l} } } else { rr.TypeCovered = t @@ -1111,7 +1020,7 @@ func setRRSIG(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { l, _ = c.Next() i, err := strconv.ParseUint(l.token, 10, 8) if err != nil || l.err { - return nil, &ParseError{f, "bad RRSIG Algorithm", l}, "" + return &ParseError{f, "bad RRSIG Algorithm", l} } rr.Algorithm = uint8(i) @@ -1119,7 +1028,7 @@ func setRRSIG(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { l, _ = c.Next() i, err = strconv.ParseUint(l.token, 10, 8) if err != nil || l.err { - return nil, &ParseError{f, "bad RRSIG Labels", l}, "" + return &ParseError{f, "bad RRSIG Labels", l} } rr.Labels = uint8(i) @@ -1127,7 +1036,7 @@ func setRRSIG(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { l, _ = c.Next() i, err = strconv.ParseUint(l.token, 10, 32) if err != nil || l.err { - return nil, &ParseError{f, "bad RRSIG OrigTtl", l}, "" + return &ParseError{f, "bad RRSIG OrigTtl", l} } rr.OrigTtl = uint32(i) @@ -1139,7 +1048,7 @@ func setRRSIG(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { // TODO(miek): error out on > MAX_UINT32, same below rr.Expiration = uint32(i) } else { - return nil, &ParseError{f, "bad RRSIG Expiration", l}, "" + return &ParseError{f, "bad RRSIG Expiration", l} } } else { rr.Expiration = i @@ -1151,7 +1060,7 @@ func setRRSIG(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { if i, err := strconv.ParseInt(l.token, 10, 64); err == nil { rr.Inception = uint32(i) } else { - return nil, &ParseError{f, "bad RRSIG Inception", l}, "" + return &ParseError{f, "bad RRSIG Inception", l} } } else { rr.Inception = i @@ -1161,7 +1070,7 @@ func setRRSIG(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { l, _ = c.Next() i, err = strconv.ParseUint(l.token, 10, 16) if err != nil || l.err { - return nil, &ParseError{f, "bad RRSIG KeyTag", l}, "" + return &ParseError{f, "bad RRSIG KeyTag", l} } rr.KeyTag = uint16(i) @@ -1170,32 +1079,29 @@ func setRRSIG(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr.SignerName = l.token name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return nil, &ParseError{f, "bad RRSIG SignerName", l}, "" + return &ParseError{f, "bad RRSIG SignerName", l} } rr.SignerName = name - s, e, c1 := endingToString(c, "bad RRSIG Signature", f) + s, e := endingToString(c, "bad RRSIG Signature", f) if e != nil { - return nil, e, c1 + return e } rr.Signature = s - return rr, nil, c1 + return nil } -func setNSEC(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(NSEC) - rr.Hdr = h - +func (rr *NSEC) parse(c *zlexer, o, f string) *ParseError { l, _ := c.Next() rr.NextDomain = l.token if len(l.token) == 0 { // dynamic update rr. - return rr, nil, l.comment + return nil } name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return nil, &ParseError{f, "bad NSEC NextDomain", l}, "" + return &ParseError{f, "bad NSEC NextDomain", l} } rr.NextDomain = name @@ -1213,50 +1119,47 @@ func setNSEC(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { tokenUpper := strings.ToUpper(l.token) if k, ok = StringToType[tokenUpper]; !ok { if k, ok = typeToInt(l.token); !ok { - return nil, &ParseError{f, "bad NSEC TypeBitMap", l}, "" + return &ParseError{f, "bad NSEC TypeBitMap", l} } } rr.TypeBitMap = append(rr.TypeBitMap, k) default: - return nil, &ParseError{f, "bad NSEC TypeBitMap", l}, "" + return &ParseError{f, "bad NSEC TypeBitMap", l} } l, _ = c.Next() } - return rr, nil, l.comment + return nil } -func setNSEC3(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(NSEC3) - rr.Hdr = h - +func (rr *NSEC3) parse(c *zlexer, o, f string) *ParseError { l, _ := c.Next() if len(l.token) == 0 { // dynamic update rr. - return rr, nil, l.comment + return nil } i, e := strconv.ParseUint(l.token, 10, 8) if e != nil || l.err { - return nil, &ParseError{f, "bad NSEC3 Hash", l}, "" + return &ParseError{f, "bad NSEC3 Hash", l} } rr.Hash = uint8(i) c.Next() // zBlank l, _ = c.Next() i, e = strconv.ParseUint(l.token, 10, 8) if e != nil || l.err { - return nil, &ParseError{f, "bad NSEC3 Flags", l}, "" + return &ParseError{f, "bad NSEC3 Flags", l} } rr.Flags = uint8(i) c.Next() // zBlank l, _ = c.Next() i, e = strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return nil, &ParseError{f, "bad NSEC3 Iterations", l}, "" + return &ParseError{f, "bad NSEC3 Iterations", l} } rr.Iterations = uint16(i) c.Next() l, _ = c.Next() if len(l.token) == 0 || l.err { - return nil, &ParseError{f, "bad NSEC3 Salt", l}, "" + return &ParseError{f, "bad NSEC3 Salt", l} } if l.token != "-" { rr.SaltLength = uint8(len(l.token)) / 2 @@ -1266,7 +1169,7 @@ func setNSEC3(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { c.Next() l, _ = c.Next() if len(l.token) == 0 || l.err { - return nil, &ParseError{f, "bad NSEC3 NextDomain", l}, "" + return &ParseError{f, "bad NSEC3 NextDomain", l} } rr.HashLength = 20 // Fix for NSEC3 (sha1 160 bits) rr.NextDomain = l.token @@ -1285,44 +1188,41 @@ func setNSEC3(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { tokenUpper := strings.ToUpper(l.token) if k, ok = StringToType[tokenUpper]; !ok { if k, ok = typeToInt(l.token); !ok { - return nil, &ParseError{f, "bad NSEC3 TypeBitMap", l}, "" + return &ParseError{f, "bad NSEC3 TypeBitMap", l} } } rr.TypeBitMap = append(rr.TypeBitMap, k) default: - return nil, &ParseError{f, "bad NSEC3 TypeBitMap", l}, "" + return &ParseError{f, "bad NSEC3 TypeBitMap", l} } l, _ = c.Next() } - return rr, nil, l.comment + return nil } -func setNSEC3PARAM(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(NSEC3PARAM) - rr.Hdr = h - +func (rr *NSEC3PARAM) parse(c *zlexer, o, f string) *ParseError { l, _ := c.Next() if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" + return slurpRemainder(c, f) } i, e := strconv.ParseUint(l.token, 10, 8) if e != nil || l.err { - return nil, &ParseError{f, "bad NSEC3PARAM Hash", l}, "" + return &ParseError{f, "bad NSEC3PARAM Hash", l} } rr.Hash = uint8(i) c.Next() // zBlank l, _ = c.Next() i, e = strconv.ParseUint(l.token, 10, 8) if e != nil || l.err { - return nil, &ParseError{f, "bad NSEC3PARAM Flags", l}, "" + return &ParseError{f, "bad NSEC3PARAM Flags", l} } rr.Flags = uint8(i) c.Next() // zBlank l, _ = c.Next() i, e = strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return nil, &ParseError{f, "bad NSEC3PARAM Iterations", l}, "" + return &ParseError{f, "bad NSEC3PARAM Iterations", l} } rr.Iterations = uint16(i) c.Next() @@ -1331,20 +1231,17 @@ func setNSEC3PARAM(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string rr.SaltLength = uint8(len(l.token)) rr.Salt = l.token } - return rr, nil, "" + return slurpRemainder(c, f) } -func setEUI48(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(EUI48) - rr.Hdr = h - +func (rr *EUI48) parse(c *zlexer, o, f string) *ParseError { l, _ := c.Next() if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" + return slurpRemainder(c, f) } if len(l.token) != 17 || l.err { - return nil, &ParseError{f, "bad EUI48 Address", l}, "" + return &ParseError{f, "bad EUI48 Address", l} } addr := make([]byte, 12) dash := 0 @@ -1353,7 +1250,7 @@ func setEUI48(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { addr[i+1] = l.token[i+1+dash] dash++ if l.token[i+1+dash] != '-' { - return nil, &ParseError{f, "bad EUI48 Address", l}, "" + return &ParseError{f, "bad EUI48 Address", l} } } addr[10] = l.token[15] @@ -1361,23 +1258,20 @@ func setEUI48(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { i, e := strconv.ParseUint(string(addr), 16, 48) if e != nil { - return nil, &ParseError{f, "bad EUI48 Address", l}, "" + return &ParseError{f, "bad EUI48 Address", l} } rr.Address = i - return rr, nil, "" + return slurpRemainder(c, f) } -func setEUI64(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(EUI64) - rr.Hdr = h - +func (rr *EUI64) parse(c *zlexer, o, f string) *ParseError { l, _ := c.Next() if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" + return slurpRemainder(c, f) } if len(l.token) != 23 || l.err { - return nil, &ParseError{f, "bad EUI64 Address", l}, "" + return &ParseError{f, "bad EUI64 Address", l} } addr := make([]byte, 16) dash := 0 @@ -1386,7 +1280,7 @@ func setEUI64(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { addr[i+1] = l.token[i+1+dash] dash++ if l.token[i+1+dash] != '-' { - return nil, &ParseError{f, "bad EUI64 Address", l}, "" + return &ParseError{f, "bad EUI64 Address", l} } } addr[14] = l.token[21] @@ -1394,200 +1288,172 @@ func setEUI64(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { i, e := strconv.ParseUint(string(addr), 16, 64) if e != nil { - return nil, &ParseError{f, "bad EUI68 Address", l}, "" + return &ParseError{f, "bad EUI68 Address", l} } - rr.Address = uint64(i) - return rr, nil, "" + rr.Address = i + return slurpRemainder(c, f) } -func setSSHFP(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(SSHFP) - rr.Hdr = h - +func (rr *SSHFP) parse(c *zlexer, o, f string) *ParseError { l, _ := c.Next() if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" + return nil } i, e := strconv.ParseUint(l.token, 10, 8) if e != nil || l.err { - return nil, &ParseError{f, "bad SSHFP Algorithm", l}, "" + return &ParseError{f, "bad SSHFP Algorithm", l} } rr.Algorithm = uint8(i) c.Next() // zBlank l, _ = c.Next() i, e = strconv.ParseUint(l.token, 10, 8) if e != nil || l.err { - return nil, &ParseError{f, "bad SSHFP Type", l}, "" + return &ParseError{f, "bad SSHFP Type", l} } rr.Type = uint8(i) c.Next() // zBlank - s, e1, c1 := endingToString(c, "bad SSHFP Fingerprint", f) + s, e1 := endingToString(c, "bad SSHFP Fingerprint", f) if e1 != nil { - return nil, e1, c1 + return e1 } rr.FingerPrint = s - return rr, nil, "" + return nil } -func setDNSKEYs(h RR_Header, c *zlexer, o, f, typ string) (RR, *ParseError, string) { - rr := new(DNSKEY) - rr.Hdr = h - +func (rr *DNSKEY) parseDNSKEY(c *zlexer, o, f, typ string) *ParseError { l, _ := c.Next() if len(l.token) == 0 { // dynamic update rr. - return rr, nil, l.comment + return nil } i, e := strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return nil, &ParseError{f, "bad " + typ + " Flags", l}, "" + return &ParseError{f, "bad " + typ + " Flags", l} } rr.Flags = uint16(i) c.Next() // zBlank l, _ = c.Next() // zString i, e = strconv.ParseUint(l.token, 10, 8) if e != nil || l.err { - return nil, &ParseError{f, "bad " + typ + " Protocol", l}, "" + return &ParseError{f, "bad " + typ + " Protocol", l} } rr.Protocol = uint8(i) c.Next() // zBlank l, _ = c.Next() // zString i, e = strconv.ParseUint(l.token, 10, 8) if e != nil || l.err { - return nil, &ParseError{f, "bad " + typ + " Algorithm", l}, "" + return &ParseError{f, "bad " + typ + " Algorithm", l} } rr.Algorithm = uint8(i) - s, e1, c1 := endingToString(c, "bad "+typ+" PublicKey", f) + s, e1 := endingToString(c, "bad "+typ+" PublicKey", f) if e1 != nil { - return nil, e1, c1 + return e1 } rr.PublicKey = s - return rr, nil, c1 + return nil } -func setKEY(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - r, e, s := setDNSKEYs(h, c, o, f, "KEY") - if r != nil { - return &KEY{*r.(*DNSKEY)}, e, s - } - return nil, e, s +func (rr *DNSKEY) parse(c *zlexer, o, f string) *ParseError { + return rr.parseDNSKEY(c, o, f, "DNSKEY") } -func setDNSKEY(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - r, e, s := setDNSKEYs(h, c, o, f, "DNSKEY") - return r, e, s +func (rr *KEY) parse(c *zlexer, o, f string) *ParseError { + return rr.parseDNSKEY(c, o, f, "KEY") } -func setCDNSKEY(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - r, e, s := setDNSKEYs(h, c, o, f, "CDNSKEY") - if r != nil { - return &CDNSKEY{*r.(*DNSKEY)}, e, s - } - return nil, e, s +func (rr *CDNSKEY) parse(c *zlexer, o, f string) *ParseError { + return rr.parseDNSKEY(c, o, f, "CDNSKEY") } -func setRKEY(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(RKEY) - rr.Hdr = h - +func (rr *RKEY) parse(c *zlexer, o, f string) *ParseError { l, _ := c.Next() if len(l.token) == 0 { // dynamic update rr. - return rr, nil, l.comment + return nil } i, e := strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return nil, &ParseError{f, "bad RKEY Flags", l}, "" + return &ParseError{f, "bad RKEY Flags", l} } rr.Flags = uint16(i) c.Next() // zBlank l, _ = c.Next() // zString i, e = strconv.ParseUint(l.token, 10, 8) if e != nil || l.err { - return nil, &ParseError{f, "bad RKEY Protocol", l}, "" + return &ParseError{f, "bad RKEY Protocol", l} } rr.Protocol = uint8(i) c.Next() // zBlank l, _ = c.Next() // zString i, e = strconv.ParseUint(l.token, 10, 8) if e != nil || l.err { - return nil, &ParseError{f, "bad RKEY Algorithm", l}, "" + return &ParseError{f, "bad RKEY Algorithm", l} } rr.Algorithm = uint8(i) - s, e1, c1 := endingToString(c, "bad RKEY PublicKey", f) + s, e1 := endingToString(c, "bad RKEY PublicKey", f) if e1 != nil { - return nil, e1, c1 + return e1 } rr.PublicKey = s - return rr, nil, c1 + return nil } -func setEID(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(EID) - rr.Hdr = h - s, e, c1 := endingToString(c, "bad EID Endpoint", f) +func (rr *EID) parse(c *zlexer, o, f string) *ParseError { + s, e := endingToString(c, "bad EID Endpoint", f) if e != nil { - return nil, e, c1 + return e } rr.Endpoint = s - return rr, nil, c1 + return nil } -func setNIMLOC(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(NIMLOC) - rr.Hdr = h - s, e, c1 := endingToString(c, "bad NIMLOC Locator", f) +func (rr *NIMLOC) parse(c *zlexer, o, f string) *ParseError { + s, e := endingToString(c, "bad NIMLOC Locator", f) if e != nil { - return nil, e, c1 + return e } rr.Locator = s - return rr, nil, c1 + return nil } -func setGPOS(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(GPOS) - rr.Hdr = h - +func (rr *GPOS) parse(c *zlexer, o, f string) *ParseError { l, _ := c.Next() if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" + return slurpRemainder(c, f) } _, e := strconv.ParseFloat(l.token, 64) if e != nil || l.err { - return nil, &ParseError{f, "bad GPOS Longitude", l}, "" + return &ParseError{f, "bad GPOS Longitude", l} } rr.Longitude = l.token c.Next() // zBlank l, _ = c.Next() _, e = strconv.ParseFloat(l.token, 64) if e != nil || l.err { - return nil, &ParseError{f, "bad GPOS Latitude", l}, "" + return &ParseError{f, "bad GPOS Latitude", l} } rr.Latitude = l.token c.Next() // zBlank l, _ = c.Next() _, e = strconv.ParseFloat(l.token, 64) if e != nil || l.err { - return nil, &ParseError{f, "bad GPOS Altitude", l}, "" + return &ParseError{f, "bad GPOS Altitude", l} } rr.Altitude = l.token - return rr, nil, "" + return slurpRemainder(c, f) } -func setDSs(h RR_Header, c *zlexer, o, f, typ string) (RR, *ParseError, string) { - rr := new(DS) - rr.Hdr = h - +func (rr *DS) parseDS(c *zlexer, o, f, typ string) *ParseError { l, _ := c.Next() if len(l.token) == 0 { // dynamic update rr. - return rr, nil, l.comment + return nil } i, e := strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return nil, &ParseError{f, "bad " + typ + " KeyTag", l}, "" + return &ParseError{f, "bad " + typ + " KeyTag", l} } rr.KeyTag = uint16(i) c.Next() // zBlank @@ -1596,7 +1462,7 @@ func setDSs(h RR_Header, c *zlexer, o, f, typ string) (RR, *ParseError, string) tokenUpper := strings.ToUpper(l.token) i, ok := StringToAlgorithm[tokenUpper] if !ok || l.err { - return nil, &ParseError{f, "bad " + typ + " Algorithm", l}, "" + return &ParseError{f, "bad " + typ + " Algorithm", l} } rr.Algorithm = i } else { @@ -1606,50 +1472,38 @@ func setDSs(h RR_Header, c *zlexer, o, f, typ string) (RR, *ParseError, string) l, _ = c.Next() i, e = strconv.ParseUint(l.token, 10, 8) if e != nil || l.err { - return nil, &ParseError{f, "bad " + typ + " DigestType", l}, "" + return &ParseError{f, "bad " + typ + " DigestType", l} } rr.DigestType = uint8(i) - s, e1, c1 := endingToString(c, "bad "+typ+" Digest", f) + s, e1 := endingToString(c, "bad "+typ+" Digest", f) if e1 != nil { - return nil, e1, c1 + return e1 } rr.Digest = s - return rr, nil, c1 + return nil } -func setDS(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - r, e, s := setDSs(h, c, o, f, "DS") - return r, e, s +func (rr *DS) parse(c *zlexer, o, f string) *ParseError { + return rr.parseDS(c, o, f, "DS") } -func setDLV(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - r, e, s := setDSs(h, c, o, f, "DLV") - if r != nil { - return &DLV{*r.(*DS)}, e, s - } - return nil, e, s +func (rr *DLV) parse(c *zlexer, o, f string) *ParseError { + return rr.parseDS(c, o, f, "DLV") } -func setCDS(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - r, e, s := setDSs(h, c, o, f, "CDS") - if r != nil { - return &CDS{*r.(*DS)}, e, s - } - return nil, e, s +func (rr *CDS) parse(c *zlexer, o, f string) *ParseError { + return rr.parseDS(c, o, f, "CDS") } -func setTA(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(TA) - rr.Hdr = h - +func (rr *TA) parse(c *zlexer, o, f string) *ParseError { l, _ := c.Next() if len(l.token) == 0 { // dynamic update rr. - return rr, nil, l.comment + return nil } i, e := strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return nil, &ParseError{f, "bad TA KeyTag", l}, "" + return &ParseError{f, "bad TA KeyTag", l} } rr.KeyTag = uint16(i) c.Next() // zBlank @@ -1658,7 +1512,7 @@ func setTA(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { tokenUpper := strings.ToUpper(l.token) i, ok := StringToAlgorithm[tokenUpper] if !ok || l.err { - return nil, &ParseError{f, "bad TA Algorithm", l}, "" + return &ParseError{f, "bad TA Algorithm", l} } rr.Algorithm = i } else { @@ -1668,274 +1522,238 @@ func setTA(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { l, _ = c.Next() i, e = strconv.ParseUint(l.token, 10, 8) if e != nil || l.err { - return nil, &ParseError{f, "bad TA DigestType", l}, "" + return &ParseError{f, "bad TA DigestType", l} } rr.DigestType = uint8(i) - s, err, c1 := endingToString(c, "bad TA Digest", f) + s, err := endingToString(c, "bad TA Digest", f) if err != nil { - return nil, err, c1 + return err } rr.Digest = s - return rr, nil, c1 + return nil } -func setTLSA(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(TLSA) - rr.Hdr = h - +func (rr *TLSA) parse(c *zlexer, o, f string) *ParseError { l, _ := c.Next() if len(l.token) == 0 { // dynamic update rr. - return rr, nil, l.comment + return nil } i, e := strconv.ParseUint(l.token, 10, 8) if e != nil || l.err { - return nil, &ParseError{f, "bad TLSA Usage", l}, "" + return &ParseError{f, "bad TLSA Usage", l} } rr.Usage = uint8(i) c.Next() // zBlank l, _ = c.Next() i, e = strconv.ParseUint(l.token, 10, 8) if e != nil || l.err { - return nil, &ParseError{f, "bad TLSA Selector", l}, "" + return &ParseError{f, "bad TLSA Selector", l} } rr.Selector = uint8(i) c.Next() // zBlank l, _ = c.Next() i, e = strconv.ParseUint(l.token, 10, 8) if e != nil || l.err { - return nil, &ParseError{f, "bad TLSA MatchingType", l}, "" + return &ParseError{f, "bad TLSA MatchingType", l} } rr.MatchingType = uint8(i) // So this needs be e2 (i.e. different than e), because...??t - s, e2, c1 := endingToString(c, "bad TLSA Certificate", f) + s, e2 := endingToString(c, "bad TLSA Certificate", f) if e2 != nil { - return nil, e2, c1 + return e2 } rr.Certificate = s - return rr, nil, c1 + return nil } -func setSMIMEA(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(SMIMEA) - rr.Hdr = h - +func (rr *SMIMEA) parse(c *zlexer, o, f string) *ParseError { l, _ := c.Next() if len(l.token) == 0 { // dynamic update rr. - return rr, nil, l.comment + return nil } i, e := strconv.ParseUint(l.token, 10, 8) if e != nil || l.err { - return nil, &ParseError{f, "bad SMIMEA Usage", l}, "" + return &ParseError{f, "bad SMIMEA Usage", l} } rr.Usage = uint8(i) c.Next() // zBlank l, _ = c.Next() i, e = strconv.ParseUint(l.token, 10, 8) if e != nil || l.err { - return nil, &ParseError{f, "bad SMIMEA Selector", l}, "" + return &ParseError{f, "bad SMIMEA Selector", l} } rr.Selector = uint8(i) c.Next() // zBlank l, _ = c.Next() i, e = strconv.ParseUint(l.token, 10, 8) if e != nil || l.err { - return nil, &ParseError{f, "bad SMIMEA MatchingType", l}, "" + return &ParseError{f, "bad SMIMEA MatchingType", l} } rr.MatchingType = uint8(i) // So this needs be e2 (i.e. different than e), because...??t - s, e2, c1 := endingToString(c, "bad SMIMEA Certificate", f) + s, e2 := endingToString(c, "bad SMIMEA Certificate", f) if e2 != nil { - return nil, e2, c1 + return e2 } rr.Certificate = s - return rr, nil, c1 + return nil } -func setRFC3597(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(RFC3597) - rr.Hdr = h - +func (rr *RFC3597) parse(c *zlexer, o, f string) *ParseError { l, _ := c.Next() if l.token != "\\#" { - return nil, &ParseError{f, "bad RFC3597 Rdata", l}, "" + return &ParseError{f, "bad RFC3597 Rdata", l} } c.Next() // zBlank l, _ = c.Next() rdlength, e := strconv.Atoi(l.token) if e != nil || l.err { - return nil, &ParseError{f, "bad RFC3597 Rdata ", l}, "" + return &ParseError{f, "bad RFC3597 Rdata ", l} } - s, e1, c1 := endingToString(c, "bad RFC3597 Rdata", f) + s, e1 := endingToString(c, "bad RFC3597 Rdata", f) if e1 != nil { - return nil, e1, c1 + return e1 } if rdlength*2 != len(s) { - return nil, &ParseError{f, "bad RFC3597 Rdata", l}, "" + return &ParseError{f, "bad RFC3597 Rdata", l} } rr.Rdata = s - return rr, nil, c1 + return nil } -func setSPF(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(SPF) - rr.Hdr = h - - s, e, c1 := endingToTxtSlice(c, "bad SPF Txt", f) +func (rr *SPF) parse(c *zlexer, o, f string) *ParseError { + s, e := endingToTxtSlice(c, "bad SPF Txt", f) if e != nil { - return nil, e, "" + return e } rr.Txt = s - return rr, nil, c1 + return nil } -func setAVC(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(AVC) - rr.Hdr = h - - s, e, c1 := endingToTxtSlice(c, "bad AVC Txt", f) +func (rr *AVC) parse(c *zlexer, o, f string) *ParseError { + s, e := endingToTxtSlice(c, "bad AVC Txt", f) if e != nil { - return nil, e, "" + return e } rr.Txt = s - return rr, nil, c1 + return nil } -func setTXT(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(TXT) - rr.Hdr = h - +func (rr *TXT) parse(c *zlexer, o, f string) *ParseError { // no zBlank reading here, because all this rdata is TXT - s, e, c1 := endingToTxtSlice(c, "bad TXT Txt", f) + s, e := endingToTxtSlice(c, "bad TXT Txt", f) if e != nil { - return nil, e, "" + return e } rr.Txt = s - return rr, nil, c1 + return nil } // identical to setTXT -func setNINFO(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(NINFO) - rr.Hdr = h - - s, e, c1 := endingToTxtSlice(c, "bad NINFO ZSData", f) +func (rr *NINFO) parse(c *zlexer, o, f string) *ParseError { + s, e := endingToTxtSlice(c, "bad NINFO ZSData", f) if e != nil { - return nil, e, "" + return e } rr.ZSData = s - return rr, nil, c1 + return nil } -func setURI(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(URI) - rr.Hdr = h - +func (rr *URI) parse(c *zlexer, o, f string) *ParseError { l, _ := c.Next() if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" + return nil } i, e := strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return nil, &ParseError{f, "bad URI Priority", l}, "" + return &ParseError{f, "bad URI Priority", l} } rr.Priority = uint16(i) c.Next() // zBlank l, _ = c.Next() i, e = strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return nil, &ParseError{f, "bad URI Weight", l}, "" + return &ParseError{f, "bad URI Weight", l} } rr.Weight = uint16(i) c.Next() // zBlank - s, err, c1 := endingToTxtSlice(c, "bad URI Target", f) + s, err := endingToTxtSlice(c, "bad URI Target", f) if err != nil { - return nil, err, "" + return err } if len(s) != 1 { - return nil, &ParseError{f, "bad URI Target", l}, "" + return &ParseError{f, "bad URI Target", l} } rr.Target = s[0] - return rr, nil, c1 + return nil } -func setDHCID(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { +func (rr *DHCID) parse(c *zlexer, o, f string) *ParseError { // awesome record to parse! - rr := new(DHCID) - rr.Hdr = h - - s, e, c1 := endingToString(c, "bad DHCID Digest", f) + s, e := endingToString(c, "bad DHCID Digest", f) if e != nil { - return nil, e, c1 + return e } rr.Digest = s - return rr, nil, c1 + return nil } -func setNID(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(NID) - rr.Hdr = h - +func (rr *NID) parse(c *zlexer, o, f string) *ParseError { l, _ := c.Next() if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" + return slurpRemainder(c, f) } i, e := strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return nil, &ParseError{f, "bad NID Preference", l}, "" + return &ParseError{f, "bad NID Preference", l} } rr.Preference = uint16(i) c.Next() // zBlank l, _ = c.Next() // zString u, err := stringToNodeID(l) if err != nil || l.err { - return nil, err, "" + return err } rr.NodeID = u - return rr, nil, "" + return slurpRemainder(c, f) } -func setL32(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(L32) - rr.Hdr = h - +func (rr *L32) parse(c *zlexer, o, f string) *ParseError { l, _ := c.Next() if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" + return slurpRemainder(c, f) } i, e := strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return nil, &ParseError{f, "bad L32 Preference", l}, "" + return &ParseError{f, "bad L32 Preference", l} } rr.Preference = uint16(i) c.Next() // zBlank l, _ = c.Next() // zString rr.Locator32 = net.ParseIP(l.token) if rr.Locator32 == nil || l.err { - return nil, &ParseError{f, "bad L32 Locator", l}, "" + return &ParseError{f, "bad L32 Locator", l} } - return rr, nil, "" + return slurpRemainder(c, f) } -func setLP(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(LP) - rr.Hdr = h - +func (rr *LP) parse(c *zlexer, o, f string) *ParseError { l, _ := c.Next() if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" + return slurpRemainder(c, f) } i, e := strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return nil, &ParseError{f, "bad LP Preference", l}, "" + return &ParseError{f, "bad LP Preference", l} } rr.Preference = uint16(i) @@ -1944,98 +1762,83 @@ func setLP(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr.Fqdn = l.token name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return nil, &ParseError{f, "bad LP Fqdn", l}, "" + return &ParseError{f, "bad LP Fqdn", l} } rr.Fqdn = name - return rr, nil, "" + return slurpRemainder(c, f) } -func setL64(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(L64) - rr.Hdr = h - +func (rr *L64) parse(c *zlexer, o, f string) *ParseError { l, _ := c.Next() if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" + return slurpRemainder(c, f) } i, e := strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return nil, &ParseError{f, "bad L64 Preference", l}, "" + return &ParseError{f, "bad L64 Preference", l} } rr.Preference = uint16(i) c.Next() // zBlank l, _ = c.Next() // zString u, err := stringToNodeID(l) if err != nil || l.err { - return nil, err, "" + return err } rr.Locator64 = u - return rr, nil, "" + return slurpRemainder(c, f) } -func setUID(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(UID) - rr.Hdr = h - +func (rr *UID) parse(c *zlexer, o, f string) *ParseError { l, _ := c.Next() if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" + return slurpRemainder(c, f) } i, e := strconv.ParseUint(l.token, 10, 32) if e != nil || l.err { - return nil, &ParseError{f, "bad UID Uid", l}, "" + return &ParseError{f, "bad UID Uid", l} } rr.Uid = uint32(i) - return rr, nil, "" + return slurpRemainder(c, f) } -func setGID(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(GID) - rr.Hdr = h - +func (rr *GID) parse(c *zlexer, o, f string) *ParseError { l, _ := c.Next() if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" + return slurpRemainder(c, f) } i, e := strconv.ParseUint(l.token, 10, 32) if e != nil || l.err { - return nil, &ParseError{f, "bad GID Gid", l}, "" + return &ParseError{f, "bad GID Gid", l} } rr.Gid = uint32(i) - return rr, nil, "" + return slurpRemainder(c, f) } -func setUINFO(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(UINFO) - rr.Hdr = h - - s, e, c1 := endingToTxtSlice(c, "bad UINFO Uinfo", f) +func (rr *UINFO) parse(c *zlexer, o, f string) *ParseError { + s, e := endingToTxtSlice(c, "bad UINFO Uinfo", f) if e != nil { - return nil, e, c1 + return e } if ln := len(s); ln == 0 { - return rr, nil, c1 + return nil } rr.Uinfo = s[0] // silently discard anything after the first character-string - return rr, nil, c1 + return nil } -func setPX(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(PX) - rr.Hdr = h - +func (rr *PX) parse(c *zlexer, o, f string) *ParseError { l, _ := c.Next() if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" + return slurpRemainder(c, f) } i, e := strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return nil, &ParseError{f, "bad PX Preference", l}, "" + return &ParseError{f, "bad PX Preference", l} } rr.Preference = uint16(i) @@ -2044,7 +1847,7 @@ func setPX(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr.Map822 = l.token map822, map822Ok := toAbsoluteName(l.token, o) if l.err || !map822Ok { - return nil, &ParseError{f, "bad PX Map822", l}, "" + return &ParseError{f, "bad PX Map822", l} } rr.Map822 = map822 @@ -2053,56 +1856,50 @@ func setPX(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr.Mapx400 = l.token mapx400, mapx400Ok := toAbsoluteName(l.token, o) if l.err || !mapx400Ok { - return nil, &ParseError{f, "bad PX Mapx400", l}, "" + return &ParseError{f, "bad PX Mapx400", l} } rr.Mapx400 = mapx400 - return rr, nil, "" + return slurpRemainder(c, f) } -func setCAA(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(CAA) - rr.Hdr = h - +func (rr *CAA) parse(c *zlexer, o, f string) *ParseError { l, _ := c.Next() if len(l.token) == 0 { // dynamic update rr. - return rr, nil, l.comment + return nil } i, err := strconv.ParseUint(l.token, 10, 8) if err != nil || l.err { - return nil, &ParseError{f, "bad CAA Flag", l}, "" + return &ParseError{f, "bad CAA Flag", l} } rr.Flag = uint8(i) c.Next() // zBlank l, _ = c.Next() // zString if l.value != zString { - return nil, &ParseError{f, "bad CAA Tag", l}, "" + return &ParseError{f, "bad CAA Tag", l} } rr.Tag = l.token c.Next() // zBlank - s, e, c1 := endingToTxtSlice(c, "bad CAA Value", f) + s, e := endingToTxtSlice(c, "bad CAA Value", f) if e != nil { - return nil, e, "" + return e } if len(s) != 1 { - return nil, &ParseError{f, "bad CAA Value", l}, "" + return &ParseError{f, "bad CAA Value", l} } rr.Value = s[0] - return rr, nil, c1 + return nil } -func setTKEY(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(TKEY) - rr.Hdr = h - +func (rr *TKEY) parse(c *zlexer, o, f string) *ParseError { l, _ := c.Next() // Algorithm if l.value != zString { - return nil, &ParseError{f, "bad TKEY algorithm", l}, "" + return &ParseError{f, "bad TKEY algorithm", l} } rr.Algorithm = l.token c.Next() // zBlank @@ -2111,13 +1908,13 @@ func setTKEY(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { l, _ = c.Next() i, err := strconv.ParseUint(l.token, 10, 8) if err != nil || l.err { - return nil, &ParseError{f, "bad TKEY key length", l}, "" + return &ParseError{f, "bad TKEY key length", l} } rr.KeySize = uint16(i) c.Next() // zBlank l, _ = c.Next() if l.value != zString { - return nil, &ParseError{f, "bad TKEY key", l}, "" + return &ParseError{f, "bad TKEY key", l} } rr.Key = l.token c.Next() // zBlank @@ -2126,84 +1923,15 @@ func setTKEY(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { l, _ = c.Next() i, err = strconv.ParseUint(l.token, 10, 8) if err != nil || l.err { - return nil, &ParseError{f, "bad TKEY otherdata length", l}, "" + return &ParseError{f, "bad TKEY otherdata length", l} } rr.OtherLen = uint16(i) c.Next() // zBlank l, _ = c.Next() if l.value != zString { - return nil, &ParseError{f, "bad TKEY otherday", l}, "" + return &ParseError{f, "bad TKEY otherday", l} } rr.OtherData = l.token - return rr, nil, "" -} - -var typeToparserFunc = map[uint16]parserFunc{ - TypeAAAA: {setAAAA, false}, - TypeAFSDB: {setAFSDB, false}, - TypeA: {setA, false}, - TypeCAA: {setCAA, true}, - TypeCDS: {setCDS, true}, - TypeCDNSKEY: {setCDNSKEY, true}, - TypeCERT: {setCERT, true}, - TypeCNAME: {setCNAME, false}, - TypeCSYNC: {setCSYNC, true}, - TypeDHCID: {setDHCID, true}, - TypeDLV: {setDLV, true}, - TypeDNAME: {setDNAME, false}, - TypeKEY: {setKEY, true}, - TypeDNSKEY: {setDNSKEY, true}, - TypeDS: {setDS, true}, - TypeEID: {setEID, true}, - TypeEUI48: {setEUI48, false}, - TypeEUI64: {setEUI64, false}, - TypeGID: {setGID, false}, - TypeGPOS: {setGPOS, false}, - TypeHINFO: {setHINFO, true}, - TypeHIP: {setHIP, true}, - TypeKX: {setKX, false}, - TypeL32: {setL32, false}, - TypeL64: {setL64, false}, - TypeLOC: {setLOC, true}, - TypeLP: {setLP, false}, - TypeMB: {setMB, false}, - TypeMD: {setMD, false}, - TypeMF: {setMF, false}, - TypeMG: {setMG, false}, - TypeMINFO: {setMINFO, false}, - TypeMR: {setMR, false}, - TypeMX: {setMX, false}, - TypeNAPTR: {setNAPTR, false}, - TypeNID: {setNID, false}, - TypeNIMLOC: {setNIMLOC, true}, - TypeNINFO: {setNINFO, true}, - TypeNSAPPTR: {setNSAPPTR, false}, - TypeNSEC3PARAM: {setNSEC3PARAM, false}, - TypeNSEC3: {setNSEC3, true}, - TypeNSEC: {setNSEC, true}, - TypeNS: {setNS, false}, - TypeOPENPGPKEY: {setOPENPGPKEY, true}, - TypePTR: {setPTR, false}, - TypePX: {setPX, false}, - TypeSIG: {setSIG, true}, - TypeRKEY: {setRKEY, true}, - TypeRP: {setRP, false}, - TypeRRSIG: {setRRSIG, true}, - TypeRT: {setRT, false}, - TypeSMIMEA: {setSMIMEA, true}, - TypeSOA: {setSOA, false}, - TypeSPF: {setSPF, true}, - TypeAVC: {setAVC, true}, - TypeSRV: {setSRV, false}, - TypeSSHFP: {setSSHFP, true}, - TypeTALINK: {setTALINK, false}, - TypeTA: {setTA, true}, - TypeTLSA: {setTLSA, true}, - TypeTXT: {setTXT, true}, - TypeUID: {setUID, false}, - TypeUINFO: {setUINFO, true}, - TypeURI: {setURI, true}, - TypeX25: {setX25, false}, - TypeTKEY: {setTKEY, true}, + return nil } diff --git a/vendor/github.com/miekg/dns/server.go b/vendor/github.com/miekg/dns/server.go index 4b4ec33c8d1cd..882403704afca 100644 --- a/vendor/github.com/miekg/dns/server.go +++ b/vendor/github.com/miekg/dns/server.go @@ -82,6 +82,7 @@ type ConnectionStater interface { type response struct { msg []byte + closed bool // connection has been closed hijacked bool // connection has been hijacked by handler tsigTimersOnly bool tsigStatus error @@ -161,11 +162,11 @@ type defaultReader struct { *Server } -func (dr *defaultReader) ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error) { +func (dr defaultReader) ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error) { return dr.readTCP(conn, timeout) } -func (dr *defaultReader) ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) { +func (dr defaultReader) ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) { return dr.readUDP(conn, timeout) } @@ -202,9 +203,6 @@ type Server struct { IdleTimeout func() time.Duration // Secret(s) for Tsig map[]. The zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2). TsigSecret map[string]string - // Unsafe instructs the server to disregard any sanity checks and directly hand the message to - // the handler. It will specifically not check if the query has the QR bit not set. - Unsafe bool // If NotifyStartedFunc is set it is called once the server has started listening. NotifyStartedFunc func() // DecorateReader is optional, allows customization of the process that reads raw DNS messages. @@ -216,6 +214,9 @@ type Server struct { // Whether to set the SO_REUSEPORT socket option, allowing multiple listeners to be bound to a single address. // It is only supported on go1.11+ and when using ListenAndServe. ReusePort bool + // AcceptMsgFunc will check the incoming message and will reject it early in the process. + // By default DefaultMsgAcceptFunc will be used. + MsgAcceptFunc MsgAcceptFunc // UDP packet or TCP connection queue queue chan *response @@ -299,6 +300,9 @@ func (srv *Server) init() { if srv.UDPSize == 0 { srv.UDPSize = MinMsgSize } + if srv.MsgAcceptFunc == nil { + srv.MsgAcceptFunc = defaultMsgAcceptFunc + } srv.udpPool.New = makeUDPBuffer(srv.UDPSize) } @@ -459,11 +463,10 @@ var testShutdownNotify *sync.Cond // getReadTimeout is a helper func to use system timeout if server did not intend to change it. func (srv *Server) getReadTimeout() time.Duration { - rtimeout := dnsTimeout if srv.ReadTimeout != 0 { - rtimeout = srv.ReadTimeout + return srv.ReadTimeout } - return rtimeout + return dnsTimeout } // serveTCP starts a TCP listener for the server. @@ -514,7 +517,7 @@ func (srv *Server) serveUDP(l *net.UDPConn) error { srv.NotifyStartedFunc() } - reader := Reader(&defaultReader{srv}) + reader := Reader(defaultReader{srv}) if srv.DecorateReader != nil { reader = srv.DecorateReader(reader) } @@ -584,7 +587,7 @@ func (srv *Server) serve(w *response) { w.wg.Done() }() - reader := Reader(&defaultReader{srv}) + reader := Reader(defaultReader{srv}) if srv.DecorateReader != nil { reader = srv.DecorateReader(reader) } @@ -629,14 +632,34 @@ func (srv *Server) disposeBuffer(w *response) { } func (srv *Server) serveDNS(w *response) { + dh, off, err := unpackMsgHdr(w.msg, 0) + if err != nil { + // Let client hang, they are sending crap; any reply can be used to amplify. + return + } + req := new(Msg) - err := req.Unpack(w.msg) - if err != nil { // Send a FormatError back - x := new(Msg) - x.SetRcodeFormatError(req) - w.WriteMsg(x) + req.setHdr(dh) + + switch srv.MsgAcceptFunc(dh) { + case MsgAccept: + case MsgIgnore: + return + case MsgReject: + req.SetRcodeFormatError(req) + // Are we allowed to delete any OPT records here? + req.Ns, req.Answer, req.Extra = nil, nil, nil + + w.WriteMsg(req) + srv.disposeBuffer(w) + return } - if err != nil || !srv.Unsafe && req.Response { + + if err := req.unpack(dh, w.msg, off); err != nil { + req.SetRcodeFormatError(req) + req.Ns, req.Answer, req.Extra = nil, nil, nil + + w.WriteMsg(req) srv.disposeBuffer(w) return } @@ -728,6 +751,10 @@ func (srv *Server) readUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *S // WriteMsg implements the ResponseWriter.WriteMsg method. func (w *response) WriteMsg(m *Msg) (err error) { + if w.closed { + return &Error{err: "WriteMsg called after Close"} + } + var data []byte if w.tsigSecret != nil { // if no secrets, dont check for the tsig (which is a longer check) if t := m.IsTsig(); t != nil { @@ -749,10 +776,13 @@ func (w *response) WriteMsg(m *Msg) (err error) { // Write implements the ResponseWriter.Write method. func (w *response) Write(m []byte) (int, error) { + if w.closed { + return 0, &Error{err: "Write called after Close"} + } + switch { case w.udp != nil: - n, err := WriteToSessionUDP(w.udp, m, w.udpSession) - return n, err + return WriteToSessionUDP(w.udp, m, w.udpSession) case w.tcp != nil: lm := len(m) if lm < 2 { @@ -768,7 +798,7 @@ func (w *response) Write(m []byte) (int, error) { n, err := io.Copy(w.tcp, bytes.NewReader(m)) return int(n), err default: - panic("dns: Write called after Close") + panic("dns: internal error: udp and tcp both nil") } } @@ -780,7 +810,7 @@ func (w *response) LocalAddr() net.Addr { case w.tcp != nil: return w.tcp.LocalAddr() default: - panic("dns: LocalAddr called after Close") + panic("dns: internal error: udp and tcp both nil") } } @@ -792,7 +822,7 @@ func (w *response) RemoteAddr() net.Addr { case w.tcp != nil: return w.tcp.RemoteAddr() default: - panic("dns: RemoteAddr called after Close") + panic("dns: internal error: udpSession and tcp both nil") } } @@ -807,13 +837,20 @@ func (w *response) Hijack() { w.hijacked = true } // Close implements the ResponseWriter.Close method func (w *response) Close() error { - // Can't close the udp conn, as that is actually the listener. - if w.tcp != nil { - e := w.tcp.Close() - w.tcp = nil - return e + if w.closed { + return &Error{err: "connection already closed"} + } + w.closed = true + + switch { + case w.udp != nil: + // Can't close the udp conn, as that is actually the listener. + return nil + case w.tcp != nil: + return w.tcp.Close() + default: + panic("dns: internal error: udp and tcp both nil") } - return nil } // ConnectionState() implements the ConnectionStater.ConnectionState() interface. diff --git a/vendor/github.com/miekg/dns/sig0.go b/vendor/github.com/miekg/dns/sig0.go index 07c2acb196cc8..ec65dd7f95ddf 100644 --- a/vendor/github.com/miekg/dns/sig0.go +++ b/vendor/github.com/miekg/dns/sig0.go @@ -21,15 +21,11 @@ func (rr *SIG) Sign(k crypto.Signer, m *Msg) ([]byte, error) { if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 { return nil, ErrKey } - rr.Header().Rrtype = TypeSIG - rr.Header().Class = ClassANY - rr.Header().Ttl = 0 - rr.Header().Name = "." - rr.OrigTtl = 0 - rr.TypeCovered = 0 - rr.Labels = 0 - buf := make([]byte, m.Len()+rr.len()) + rr.Hdr = RR_Header{Name: ".", Rrtype: TypeSIG, Class: ClassANY, Ttl: 0} + rr.OrigTtl, rr.TypeCovered, rr.Labels = 0, 0, 0 + + buf := make([]byte, m.Len()+Len(rr)) mbuf, err := m.PackBuffer(buf) if err != nil { return nil, err @@ -107,7 +103,7 @@ func (rr *SIG) Verify(k *KEY, buf []byte) error { anc := binary.BigEndian.Uint16(buf[6:]) auc := binary.BigEndian.Uint16(buf[8:]) adc := binary.BigEndian.Uint16(buf[10:]) - offset := 12 + offset := headerSize var err error for i := uint16(0); i < qdc && offset < buflen; i++ { _, offset, err = UnpackDomainName(buf, offset) @@ -167,7 +163,7 @@ func (rr *SIG) Verify(k *KEY, buf []byte) error { } // If key has come from the DNS name compression might // have mangled the case of the name - if strings.ToLower(signername) != strings.ToLower(k.Header().Name) { + if !strings.EqualFold(signername, k.Header().Name) { return &Error{err: "signer name doesn't match key name"} } sigend := offset diff --git a/vendor/github.com/miekg/dns/singleinflight.go b/vendor/github.com/miekg/dns/singleinflight.go index 9573c7d0b8c25..febcc300fe179 100644 --- a/vendor/github.com/miekg/dns/singleinflight.go +++ b/vendor/github.com/miekg/dns/singleinflight.go @@ -23,6 +23,8 @@ type call struct { type singleflight struct { sync.Mutex // protects m m map[string]*call // lazily initialized + + dontDeleteForTesting bool // this is only to be used by TestConcurrentExchanges } // Do executes and returns the results of the given function, making @@ -49,9 +51,11 @@ func (g *singleflight) Do(key string, fn func() (*Msg, time.Duration, error)) (v c.val, c.rtt, c.err = fn() c.wg.Done() - g.Lock() - delete(g.m, key) - g.Unlock() + if !g.dontDeleteForTesting { + g.Lock() + delete(g.m, key) + g.Unlock() + } return c.val, c.rtt, c.err, c.dups > 0 } diff --git a/vendor/github.com/miekg/dns/smimea.go b/vendor/github.com/miekg/dns/smimea.go index 4e7ded4b386e0..89f09f0d10c16 100644 --- a/vendor/github.com/miekg/dns/smimea.go +++ b/vendor/github.com/miekg/dns/smimea.go @@ -14,10 +14,7 @@ func (r *SMIMEA) Sign(usage, selector, matchingType int, cert *x509.Certificate) r.MatchingType = uint8(matchingType) r.Certificate, err = CertificateToDANE(r.Selector, r.MatchingType, cert) - if err != nil { - return err - } - return nil + return err } // Verify verifies a SMIMEA record against an SSL certificate. If it is OK diff --git a/vendor/github.com/miekg/dns/tlsa.go b/vendor/github.com/miekg/dns/tlsa.go index 431e2fb5afca8..4e07983b9787c 100644 --- a/vendor/github.com/miekg/dns/tlsa.go +++ b/vendor/github.com/miekg/dns/tlsa.go @@ -14,10 +14,7 @@ func (r *TLSA) Sign(usage, selector, matchingType int, cert *x509.Certificate) ( r.MatchingType = uint8(matchingType) r.Certificate, err = CertificateToDANE(r.Selector, r.MatchingType, cert) - if err != nil { - return err - } - return nil + return err } // Verify verifies a TLSA record against an SSL certificate. If it is OK diff --git a/vendor/github.com/miekg/dns/tsig.go b/vendor/github.com/miekg/dns/tsig.go index 4837b4ab1fd81..afa462fa0731a 100644 --- a/vendor/github.com/miekg/dns/tsig.go +++ b/vendor/github.com/miekg/dns/tsig.go @@ -54,6 +54,10 @@ func (rr *TSIG) String() string { return s } +func (rr *TSIG) parse(c *zlexer, origin, file string) *ParseError { + panic("dns: internal error: parse should never be called on TSIG") +} + // The following values must be put in wireformat, so that the MAC can be calculated. // RFC 2845, section 3.4.2. TSIG Variables. type tsigWireFmt struct { @@ -113,13 +117,13 @@ func TsigGenerate(m *Msg, secret, requestMAC string, timersOnly bool) ([]byte, s var h hash.Hash switch strings.ToLower(rr.Algorithm) { case HmacMD5: - h = hmac.New(md5.New, []byte(rawsecret)) + h = hmac.New(md5.New, rawsecret) case HmacSHA1: - h = hmac.New(sha1.New, []byte(rawsecret)) + h = hmac.New(sha1.New, rawsecret) case HmacSHA256: - h = hmac.New(sha256.New, []byte(rawsecret)) + h = hmac.New(sha256.New, rawsecret) case HmacSHA512: - h = hmac.New(sha512.New, []byte(rawsecret)) + h = hmac.New(sha512.New, rawsecret) default: return nil, "", ErrKeyAlg } @@ -133,13 +137,12 @@ func TsigGenerate(m *Msg, secret, requestMAC string, timersOnly bool) ([]byte, s t.Algorithm = rr.Algorithm t.OrigId = m.Id - tbuf := make([]byte, t.len()) - if off, err := PackRR(t, tbuf, 0, nil, false); err == nil { - tbuf = tbuf[:off] // reset to actual size used - } else { + tbuf := make([]byte, Len(t)) + off, err := PackRR(t, tbuf, 0, nil, false) + if err != nil { return nil, "", err } - mbuf = append(mbuf, tbuf...) + mbuf = append(mbuf, tbuf[:off]...) // Update the ArCount directly in the buffer. binary.BigEndian.PutUint16(mbuf[10:], uint16(len(m.Extra)+1)) diff --git a/vendor/github.com/miekg/dns/types.go b/vendor/github.com/miekg/dns/types.go index 115f2c7bd0ec9..efa342443beae 100644 --- a/vendor/github.com/miekg/dns/types.go +++ b/vendor/github.com/miekg/dns/types.go @@ -205,9 +205,6 @@ var CertTypeToString = map[uint16]string{ CertOID: "OID", } -// StringToCertType is the reverseof CertTypeToString. -var StringToCertType = reverseInt16(CertTypeToString) - //go:generate go run types_generate.go // Question holds a DNS question. There can be multiple questions in the @@ -218,8 +215,10 @@ type Question struct { Qclass uint16 } -func (q *Question) len() int { - return len(q.Name) + 1 + 2 + 2 +func (q *Question) len(off int, compression map[string]struct{}) int { + l := domainNameLen(q.Name, off, compression, true) + l += 2 + 2 + return l } func (q *Question) String() (s string) { @@ -239,6 +238,25 @@ type ANY struct { func (rr *ANY) String() string { return rr.Hdr.String() } +func (rr *ANY) parse(c *zlexer, origin, file string) *ParseError { + panic("dns: internal error: parse should never be called on ANY") +} + +// NULL RR. See RFC 1035. +type NULL struct { + Hdr RR_Header + Data string `dns:"any"` +} + +func (rr *NULL) String() string { + // There is no presentation format; prefix string with a comment. + return ";" + rr.Hdr.String() + rr.Data +} + +func (rr *NULL) parse(c *zlexer, origin, file string) *ParseError { + panic("dns: internal error: parse should never be called on NULL") +} + // CNAME RR. See RFC 1034. type CNAME struct { Hdr RR_Header @@ -351,7 +369,7 @@ func (rr *X25) String() string { type RT struct { Hdr RR_Header Preference uint16 - Host string `dns:"cdomain-name"` + Host string `dns:"domain-name"` // RFC 3597 prohibits compressing records not defined in RFC 1035. } func (rr *RT) String() string { @@ -460,7 +478,7 @@ func sprintTxtOctet(s string) string { case b == '.': dst.WriteByte('.') case b < ' ' || b > '~': - writeEscapedByte(&dst, b) + dst.WriteString(escapeByte(b)) default: dst.WriteByte(b) } @@ -508,20 +526,44 @@ func writeTXTStringByte(s *strings.Builder, b byte) { s.WriteByte('\\') s.WriteByte(b) case b < ' ' || b > '~': - writeEscapedByte(s, b) + s.WriteString(escapeByte(b)) default: s.WriteByte(b) } } -func writeEscapedByte(s *strings.Builder, b byte) { - var buf [3]byte - bufs := strconv.AppendInt(buf[:0], int64(b), 10) - s.WriteByte('\\') - for i := len(bufs); i < 3; i++ { - s.WriteByte('0') +const ( + escapedByteSmall = "" + + `\000\001\002\003\004\005\006\007\008\009` + + `\010\011\012\013\014\015\016\017\018\019` + + `\020\021\022\023\024\025\026\027\028\029` + + `\030\031` + escapedByteLarge = `\127\128\129` + + `\130\131\132\133\134\135\136\137\138\139` + + `\140\141\142\143\144\145\146\147\148\149` + + `\150\151\152\153\154\155\156\157\158\159` + + `\160\161\162\163\164\165\166\167\168\169` + + `\170\171\172\173\174\175\176\177\178\179` + + `\180\181\182\183\184\185\186\187\188\189` + + `\190\191\192\193\194\195\196\197\198\199` + + `\200\201\202\203\204\205\206\207\208\209` + + `\210\211\212\213\214\215\216\217\218\219` + + `\220\221\222\223\224\225\226\227\228\229` + + `\230\231\232\233\234\235\236\237\238\239` + + `\240\241\242\243\244\245\246\247\248\249` + + `\250\251\252\253\254\255` +) + +// escapeByte returns the \DDD escaping of b which must +// satisfy b < ' ' || b > '~'. +func escapeByte(b byte) string { + if b < ' ' { + return escapedByteSmall[b*4 : b*4+4] } - s.Write(bufs) + + b -= '~' + 1 + // The cast here is needed as b*4 may overflow byte. + return escapedByteLarge[int(b)*4 : int(b)*4+4] } func nextByte(s string, offset int) (byte, int) { @@ -809,8 +851,9 @@ func (rr *NSEC) String() string { return s } -func (rr *NSEC) len() int { - l := rr.Hdr.len() + len(rr.NextDomain) + 1 +func (rr *NSEC) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += domainNameLen(rr.NextDomain, off+l, compression, false) lastwindow := uint32(2 ^ 32 + 1) for _, t := range rr.TypeBitMap { window := t / 256 @@ -974,8 +1017,9 @@ func (rr *NSEC3) String() string { return s } -func (rr *NSEC3) len() int { - l := rr.Hdr.len() + 6 + len(rr.Salt)/2 + 1 + len(rr.NextDomain) + 1 +func (rr *NSEC3) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += 6 + len(rr.Salt)/2 + 1 + len(rr.NextDomain) + 1 lastwindow := uint32(2 ^ 32 + 1) for _, t := range rr.TypeBitMap { window := t / 256 @@ -1022,10 +1066,16 @@ type TKEY struct { // TKEY has no official presentation format, but this will suffice. func (rr *TKEY) String() string { - s := "\n;; TKEY PSEUDOSECTION:\n" - s += rr.Hdr.String() + " " + rr.Algorithm + " " + - strconv.Itoa(int(rr.KeySize)) + " " + rr.Key + " " + - strconv.Itoa(int(rr.OtherLen)) + " " + rr.OtherData + s := ";" + rr.Hdr.String() + + " " + rr.Algorithm + + " " + TimeToString(rr.Inception) + + " " + TimeToString(rr.Expiration) + + " " + strconv.Itoa(int(rr.Mode)) + + " " + strconv.Itoa(int(rr.Error)) + + " " + strconv.Itoa(int(rr.KeySize)) + + " " + rr.Key + + " " + strconv.Itoa(int(rr.OtherLen)) + + " " + rr.OtherData return s } @@ -1291,8 +1341,9 @@ func (rr *CSYNC) String() string { return s } -func (rr *CSYNC) len() int { - l := rr.Hdr.len() + 4 + 2 +func (rr *CSYNC) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += 4 + 2 lastwindow := uint32(2 ^ 32 + 1) for _, t := range rr.TypeBitMap { window := t / 256 diff --git a/vendor/github.com/miekg/dns/udp_windows.go b/vendor/github.com/miekg/dns/udp_windows.go index 6778c3c6cfe98..e7dd8ca313c09 100644 --- a/vendor/github.com/miekg/dns/udp_windows.go +++ b/vendor/github.com/miekg/dns/udp_windows.go @@ -20,15 +20,13 @@ func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) { if err != nil { return n, nil, err } - session := &SessionUDP{raddr.(*net.UDPAddr)} - return n, session, err + return n, &SessionUDP{raddr.(*net.UDPAddr)}, err } // WriteToSessionUDP acts just like net.UDPConn.WriteTo(), but uses a *SessionUDP instead of a net.Addr. // TODO(fastest963): Once go1.10 is released, use WriteMsgUDP. func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) { - n, err := conn.WriteTo(b, session.raddr) - return n, err + return conn.WriteTo(b, session.raddr) } // TODO(fastest963): Once go1.10 is released and we can use *MsgUDP methods diff --git a/vendor/github.com/miekg/dns/update.go b/vendor/github.com/miekg/dns/update.go index e90c5c968ec0a..69dd38652224e 100644 --- a/vendor/github.com/miekg/dns/update.go +++ b/vendor/github.com/miekg/dns/update.go @@ -44,7 +44,8 @@ func (u *Msg) RRsetUsed(rr []RR) { u.Answer = make([]RR, 0, len(rr)) } for _, r := range rr { - u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: r.Header().Rrtype, Class: ClassANY}}) + h := r.Header() + u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: h.Name, Ttl: 0, Rrtype: h.Rrtype, Class: ClassANY}}) } } @@ -55,7 +56,8 @@ func (u *Msg) RRsetNotUsed(rr []RR) { u.Answer = make([]RR, 0, len(rr)) } for _, r := range rr { - u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: r.Header().Rrtype, Class: ClassNONE}}) + h := r.Header() + u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: h.Name, Ttl: 0, Rrtype: h.Rrtype, Class: ClassNONE}}) } } @@ -79,7 +81,8 @@ func (u *Msg) RemoveRRset(rr []RR) { u.Ns = make([]RR, 0, len(rr)) } for _, r := range rr { - u.Ns = append(u.Ns, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: r.Header().Rrtype, Class: ClassANY}}) + h := r.Header() + u.Ns = append(u.Ns, &ANY{Hdr: RR_Header{Name: h.Name, Ttl: 0, Rrtype: h.Rrtype, Class: ClassANY}}) } } @@ -99,8 +102,9 @@ func (u *Msg) Remove(rr []RR) { u.Ns = make([]RR, 0, len(rr)) } for _, r := range rr { - r.Header().Class = ClassNONE - r.Header().Ttl = 0 + h := r.Header() + h.Class = ClassNONE + h.Ttl = 0 u.Ns = append(u.Ns, r) } } diff --git a/vendor/github.com/miekg/dns/version.go b/vendor/github.com/miekg/dns/version.go index 0b0e9b6d8316a..46d644c58c24b 100644 --- a/vendor/github.com/miekg/dns/version.go +++ b/vendor/github.com/miekg/dns/version.go @@ -3,7 +3,7 @@ package dns import "fmt" // Version is current version of this library. -var Version = V{1, 0, 14} +var Version = V{1, 1, 4} // V holds the version of this library. type V struct { diff --git a/vendor/github.com/miekg/dns/xfr.go b/vendor/github.com/miekg/dns/xfr.go index 5d0ff5c8a2758..82afc52ea82e1 100644 --- a/vendor/github.com/miekg/dns/xfr.go +++ b/vendor/github.com/miekg/dns/xfr.go @@ -35,30 +35,36 @@ type Transfer struct { // channel, err := transfer.In(message, master) // func (t *Transfer) In(q *Msg, a string) (env chan *Envelope, err error) { + switch q.Question[0].Qtype { + case TypeAXFR, TypeIXFR: + default: + return nil, &Error{"unsupported question type"} + } + timeout := dnsTimeout if t.DialTimeout != 0 { timeout = t.DialTimeout } + if t.Conn == nil { t.Conn, err = DialTimeout("tcp", a, timeout) if err != nil { return nil, err } } + if err := t.WriteMsg(q); err != nil { return nil, err } + env = make(chan *Envelope) - go func() { - if q.Question[0].Qtype == TypeAXFR { - go t.inAxfr(q, env) - return - } - if q.Question[0].Qtype == TypeIXFR { - go t.inIxfr(q, env) - return - } - }() + switch q.Question[0].Qtype { + case TypeAXFR: + go t.inAxfr(q, env) + case TypeIXFR: + go t.inIxfr(q, env) + } + return env, nil } @@ -111,7 +117,7 @@ func (t *Transfer) inAxfr(q *Msg, c chan *Envelope) { } func (t *Transfer) inIxfr(q *Msg, c chan *Envelope) { - serial := uint32(0) // The first serial seen is the current server serial + var serial uint32 // The first serial seen is the current server serial axfr := true n := 0 qser := q.Ns[0].(*SOA).Serial @@ -237,24 +243,18 @@ func (t *Transfer) WriteMsg(m *Msg) (err error) { if err != nil { return err } - if _, err = t.Write(out); err != nil { - return err - } - return nil + _, err = t.Write(out) + return err } func isSOAFirst(in *Msg) bool { - if len(in.Answer) > 0 { - return in.Answer[0].Header().Rrtype == TypeSOA - } - return false + return len(in.Answer) > 0 && + in.Answer[0].Header().Rrtype == TypeSOA } func isSOALast(in *Msg) bool { - if len(in.Answer) > 0 { - return in.Answer[len(in.Answer)-1].Header().Rrtype == TypeSOA - } - return false + return len(in.Answer) > 0 && + in.Answer[len(in.Answer)-1].Header().Rrtype == TypeSOA } const errXFR = "bad xfr rcode: %d" diff --git a/vendor/github.com/miekg/dns/zcompress.go b/vendor/github.com/miekg/dns/zcompress.go deleted file mode 100644 index 6391a3501fccf..0000000000000 --- a/vendor/github.com/miekg/dns/zcompress.go +++ /dev/null @@ -1,152 +0,0 @@ -// Code generated by "go run compress_generate.go"; DO NOT EDIT. - -package dns - -func compressionLenHelperType(c map[string]int, r RR, initLen int) int { - currentLen := initLen - switch x := r.(type) { - case *AFSDB: - currentLen -= len(x.Hostname) + 1 - currentLen += compressionLenHelper(c, x.Hostname, currentLen) - case *CNAME: - currentLen -= len(x.Target) + 1 - currentLen += compressionLenHelper(c, x.Target, currentLen) - case *DNAME: - currentLen -= len(x.Target) + 1 - currentLen += compressionLenHelper(c, x.Target, currentLen) - case *HIP: - for i := range x.RendezvousServers { - currentLen -= len(x.RendezvousServers[i]) + 1 - } - for i := range x.RendezvousServers { - currentLen += compressionLenHelper(c, x.RendezvousServers[i], currentLen) - } - case *KX: - currentLen -= len(x.Exchanger) + 1 - currentLen += compressionLenHelper(c, x.Exchanger, currentLen) - case *LP: - currentLen -= len(x.Fqdn) + 1 - currentLen += compressionLenHelper(c, x.Fqdn, currentLen) - case *MB: - currentLen -= len(x.Mb) + 1 - currentLen += compressionLenHelper(c, x.Mb, currentLen) - case *MD: - currentLen -= len(x.Md) + 1 - currentLen += compressionLenHelper(c, x.Md, currentLen) - case *MF: - currentLen -= len(x.Mf) + 1 - currentLen += compressionLenHelper(c, x.Mf, currentLen) - case *MG: - currentLen -= len(x.Mg) + 1 - currentLen += compressionLenHelper(c, x.Mg, currentLen) - case *MINFO: - currentLen -= len(x.Rmail) + 1 - currentLen += compressionLenHelper(c, x.Rmail, currentLen) - currentLen -= len(x.Email) + 1 - currentLen += compressionLenHelper(c, x.Email, currentLen) - case *MR: - currentLen -= len(x.Mr) + 1 - currentLen += compressionLenHelper(c, x.Mr, currentLen) - case *MX: - currentLen -= len(x.Mx) + 1 - currentLen += compressionLenHelper(c, x.Mx, currentLen) - case *NAPTR: - currentLen -= len(x.Replacement) + 1 - currentLen += compressionLenHelper(c, x.Replacement, currentLen) - case *NS: - currentLen -= len(x.Ns) + 1 - currentLen += compressionLenHelper(c, x.Ns, currentLen) - case *NSAPPTR: - currentLen -= len(x.Ptr) + 1 - currentLen += compressionLenHelper(c, x.Ptr, currentLen) - case *NSEC: - currentLen -= len(x.NextDomain) + 1 - currentLen += compressionLenHelper(c, x.NextDomain, currentLen) - case *PTR: - currentLen -= len(x.Ptr) + 1 - currentLen += compressionLenHelper(c, x.Ptr, currentLen) - case *PX: - currentLen -= len(x.Map822) + 1 - currentLen += compressionLenHelper(c, x.Map822, currentLen) - currentLen -= len(x.Mapx400) + 1 - currentLen += compressionLenHelper(c, x.Mapx400, currentLen) - case *RP: - currentLen -= len(x.Mbox) + 1 - currentLen += compressionLenHelper(c, x.Mbox, currentLen) - currentLen -= len(x.Txt) + 1 - currentLen += compressionLenHelper(c, x.Txt, currentLen) - case *RRSIG: - currentLen -= len(x.SignerName) + 1 - currentLen += compressionLenHelper(c, x.SignerName, currentLen) - case *RT: - currentLen -= len(x.Host) + 1 - currentLen += compressionLenHelper(c, x.Host, currentLen) - case *SIG: - currentLen -= len(x.SignerName) + 1 - currentLen += compressionLenHelper(c, x.SignerName, currentLen) - case *SOA: - currentLen -= len(x.Ns) + 1 - currentLen += compressionLenHelper(c, x.Ns, currentLen) - currentLen -= len(x.Mbox) + 1 - currentLen += compressionLenHelper(c, x.Mbox, currentLen) - case *SRV: - currentLen -= len(x.Target) + 1 - currentLen += compressionLenHelper(c, x.Target, currentLen) - case *TALINK: - currentLen -= len(x.PreviousName) + 1 - currentLen += compressionLenHelper(c, x.PreviousName, currentLen) - currentLen -= len(x.NextName) + 1 - currentLen += compressionLenHelper(c, x.NextName, currentLen) - case *TKEY: - currentLen -= len(x.Algorithm) + 1 - currentLen += compressionLenHelper(c, x.Algorithm, currentLen) - case *TSIG: - currentLen -= len(x.Algorithm) + 1 - currentLen += compressionLenHelper(c, x.Algorithm, currentLen) - } - return currentLen - initLen -} - -func compressionLenSearchType(c map[string]int, r RR) (int, bool, int) { - switch x := r.(type) { - case *CNAME: - k1, ok1, sz1 := compressionLenSearch(c, x.Target) - return k1, ok1, sz1 - case *MB: - k1, ok1, sz1 := compressionLenSearch(c, x.Mb) - return k1, ok1, sz1 - case *MD: - k1, ok1, sz1 := compressionLenSearch(c, x.Md) - return k1, ok1, sz1 - case *MF: - k1, ok1, sz1 := compressionLenSearch(c, x.Mf) - return k1, ok1, sz1 - case *MG: - k1, ok1, sz1 := compressionLenSearch(c, x.Mg) - return k1, ok1, sz1 - case *MINFO: - k1, ok1, sz1 := compressionLenSearch(c, x.Rmail) - k2, ok2, sz2 := compressionLenSearch(c, x.Email) - return k1 + k2, ok1 && ok2, sz1 + sz2 - case *MR: - k1, ok1, sz1 := compressionLenSearch(c, x.Mr) - return k1, ok1, sz1 - case *MX: - k1, ok1, sz1 := compressionLenSearch(c, x.Mx) - return k1, ok1, sz1 - case *NS: - k1, ok1, sz1 := compressionLenSearch(c, x.Ns) - return k1, ok1, sz1 - case *PTR: - k1, ok1, sz1 := compressionLenSearch(c, x.Ptr) - return k1, ok1, sz1 - case *RT: - k1, ok1, sz1 := compressionLenSearch(c, x.Host) - return k1, ok1, sz1 - case *SOA: - k1, ok1, sz1 := compressionLenSearch(c, x.Ns) - k2, ok2, sz2 := compressionLenSearch(c, x.Mbox) - return k1 + k2, ok1 && ok2, sz1 + sz2 - } - return 0, false, 0 -} diff --git a/vendor/github.com/miekg/dns/zduplicate.go b/vendor/github.com/miekg/dns/zduplicate.go index ba9863b233faf..81e99e0d4cd39 100644 --- a/vendor/github.com/miekg/dns/zduplicate.go +++ b/vendor/github.com/miekg/dns/zduplicate.go @@ -2,164 +2,38 @@ package dns -// isDuplicateRdata calls the rdata specific functions -func isDuplicateRdata(r1, r2 RR) bool { - switch r1.Header().Rrtype { - case TypeA: - return isDuplicateA(r1.(*A), r2.(*A)) - case TypeAAAA: - return isDuplicateAAAA(r1.(*AAAA), r2.(*AAAA)) - case TypeAFSDB: - return isDuplicateAFSDB(r1.(*AFSDB), r2.(*AFSDB)) - case TypeAVC: - return isDuplicateAVC(r1.(*AVC), r2.(*AVC)) - case TypeCAA: - return isDuplicateCAA(r1.(*CAA), r2.(*CAA)) - case TypeCERT: - return isDuplicateCERT(r1.(*CERT), r2.(*CERT)) - case TypeCNAME: - return isDuplicateCNAME(r1.(*CNAME), r2.(*CNAME)) - case TypeCSYNC: - return isDuplicateCSYNC(r1.(*CSYNC), r2.(*CSYNC)) - case TypeDHCID: - return isDuplicateDHCID(r1.(*DHCID), r2.(*DHCID)) - case TypeDNAME: - return isDuplicateDNAME(r1.(*DNAME), r2.(*DNAME)) - case TypeDNSKEY: - return isDuplicateDNSKEY(r1.(*DNSKEY), r2.(*DNSKEY)) - case TypeDS: - return isDuplicateDS(r1.(*DS), r2.(*DS)) - case TypeEID: - return isDuplicateEID(r1.(*EID), r2.(*EID)) - case TypeEUI48: - return isDuplicateEUI48(r1.(*EUI48), r2.(*EUI48)) - case TypeEUI64: - return isDuplicateEUI64(r1.(*EUI64), r2.(*EUI64)) - case TypeGID: - return isDuplicateGID(r1.(*GID), r2.(*GID)) - case TypeGPOS: - return isDuplicateGPOS(r1.(*GPOS), r2.(*GPOS)) - case TypeHINFO: - return isDuplicateHINFO(r1.(*HINFO), r2.(*HINFO)) - case TypeHIP: - return isDuplicateHIP(r1.(*HIP), r2.(*HIP)) - case TypeKX: - return isDuplicateKX(r1.(*KX), r2.(*KX)) - case TypeL32: - return isDuplicateL32(r1.(*L32), r2.(*L32)) - case TypeL64: - return isDuplicateL64(r1.(*L64), r2.(*L64)) - case TypeLOC: - return isDuplicateLOC(r1.(*LOC), r2.(*LOC)) - case TypeLP: - return isDuplicateLP(r1.(*LP), r2.(*LP)) - case TypeMB: - return isDuplicateMB(r1.(*MB), r2.(*MB)) - case TypeMD: - return isDuplicateMD(r1.(*MD), r2.(*MD)) - case TypeMF: - return isDuplicateMF(r1.(*MF), r2.(*MF)) - case TypeMG: - return isDuplicateMG(r1.(*MG), r2.(*MG)) - case TypeMINFO: - return isDuplicateMINFO(r1.(*MINFO), r2.(*MINFO)) - case TypeMR: - return isDuplicateMR(r1.(*MR), r2.(*MR)) - case TypeMX: - return isDuplicateMX(r1.(*MX), r2.(*MX)) - case TypeNAPTR: - return isDuplicateNAPTR(r1.(*NAPTR), r2.(*NAPTR)) - case TypeNID: - return isDuplicateNID(r1.(*NID), r2.(*NID)) - case TypeNIMLOC: - return isDuplicateNIMLOC(r1.(*NIMLOC), r2.(*NIMLOC)) - case TypeNINFO: - return isDuplicateNINFO(r1.(*NINFO), r2.(*NINFO)) - case TypeNS: - return isDuplicateNS(r1.(*NS), r2.(*NS)) - case TypeNSAPPTR: - return isDuplicateNSAPPTR(r1.(*NSAPPTR), r2.(*NSAPPTR)) - case TypeNSEC: - return isDuplicateNSEC(r1.(*NSEC), r2.(*NSEC)) - case TypeNSEC3: - return isDuplicateNSEC3(r1.(*NSEC3), r2.(*NSEC3)) - case TypeNSEC3PARAM: - return isDuplicateNSEC3PARAM(r1.(*NSEC3PARAM), r2.(*NSEC3PARAM)) - case TypeOPENPGPKEY: - return isDuplicateOPENPGPKEY(r1.(*OPENPGPKEY), r2.(*OPENPGPKEY)) - case TypePTR: - return isDuplicatePTR(r1.(*PTR), r2.(*PTR)) - case TypePX: - return isDuplicatePX(r1.(*PX), r2.(*PX)) - case TypeRKEY: - return isDuplicateRKEY(r1.(*RKEY), r2.(*RKEY)) - case TypeRP: - return isDuplicateRP(r1.(*RP), r2.(*RP)) - case TypeRRSIG: - return isDuplicateRRSIG(r1.(*RRSIG), r2.(*RRSIG)) - case TypeRT: - return isDuplicateRT(r1.(*RT), r2.(*RT)) - case TypeSMIMEA: - return isDuplicateSMIMEA(r1.(*SMIMEA), r2.(*SMIMEA)) - case TypeSOA: - return isDuplicateSOA(r1.(*SOA), r2.(*SOA)) - case TypeSPF: - return isDuplicateSPF(r1.(*SPF), r2.(*SPF)) - case TypeSRV: - return isDuplicateSRV(r1.(*SRV), r2.(*SRV)) - case TypeSSHFP: - return isDuplicateSSHFP(r1.(*SSHFP), r2.(*SSHFP)) - case TypeTA: - return isDuplicateTA(r1.(*TA), r2.(*TA)) - case TypeTALINK: - return isDuplicateTALINK(r1.(*TALINK), r2.(*TALINK)) - case TypeTKEY: - return isDuplicateTKEY(r1.(*TKEY), r2.(*TKEY)) - case TypeTLSA: - return isDuplicateTLSA(r1.(*TLSA), r2.(*TLSA)) - case TypeTSIG: - return isDuplicateTSIG(r1.(*TSIG), r2.(*TSIG)) - case TypeTXT: - return isDuplicateTXT(r1.(*TXT), r2.(*TXT)) - case TypeUID: - return isDuplicateUID(r1.(*UID), r2.(*UID)) - case TypeUINFO: - return isDuplicateUINFO(r1.(*UINFO), r2.(*UINFO)) - case TypeURI: - return isDuplicateURI(r1.(*URI), r2.(*URI)) - case TypeX25: - return isDuplicateX25(r1.(*X25), r2.(*X25)) - } - return false -} - // isDuplicate() functions -func isDuplicateA(r1, r2 *A) bool { - if len(r1.A) != len(r2.A) { +func (r1 *A) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*A) + if !ok { return false } - for i := 0; i < len(r1.A); i++ { - if r1.A[i] != r2.A[i] { - return false - } + _ = r2 + if !r1.A.Equal(r2.A) { + return false } return true } -func isDuplicateAAAA(r1, r2 *AAAA) bool { - if len(r1.AAAA) != len(r2.AAAA) { +func (r1 *AAAA) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*AAAA) + if !ok { return false } - for i := 0; i < len(r1.AAAA); i++ { - if r1.AAAA[i] != r2.AAAA[i] { - return false - } + _ = r2 + if !r1.AAAA.Equal(r2.AAAA) { + return false } return true } -func isDuplicateAFSDB(r1, r2 *AFSDB) bool { +func (r1 *AFSDB) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*AFSDB) + if !ok { + return false + } + _ = r2 if r1.Subtype != r2.Subtype { return false } @@ -169,7 +43,21 @@ func isDuplicateAFSDB(r1, r2 *AFSDB) bool { return true } -func isDuplicateAVC(r1, r2 *AVC) bool { +func (r1 *ANY) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*ANY) + if !ok { + return false + } + _ = r2 + return true +} + +func (r1 *AVC) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*AVC) + if !ok { + return false + } + _ = r2 if len(r1.Txt) != len(r2.Txt) { return false } @@ -181,7 +69,12 @@ func isDuplicateAVC(r1, r2 *AVC) bool { return true } -func isDuplicateCAA(r1, r2 *CAA) bool { +func (r1 *CAA) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*CAA) + if !ok { + return false + } + _ = r2 if r1.Flag != r2.Flag { return false } @@ -194,7 +87,12 @@ func isDuplicateCAA(r1, r2 *CAA) bool { return true } -func isDuplicateCERT(r1, r2 *CERT) bool { +func (r1 *CERT) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*CERT) + if !ok { + return false + } + _ = r2 if r1.Type != r2.Type { return false } @@ -210,14 +108,24 @@ func isDuplicateCERT(r1, r2 *CERT) bool { return true } -func isDuplicateCNAME(r1, r2 *CNAME) bool { +func (r1 *CNAME) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*CNAME) + if !ok { + return false + } + _ = r2 if !isDulicateName(r1.Target, r2.Target) { return false } return true } -func isDuplicateCSYNC(r1, r2 *CSYNC) bool { +func (r1 *CSYNC) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*CSYNC) + if !ok { + return false + } + _ = r2 if r1.Serial != r2.Serial { return false } @@ -235,21 +143,36 @@ func isDuplicateCSYNC(r1, r2 *CSYNC) bool { return true } -func isDuplicateDHCID(r1, r2 *DHCID) bool { +func (r1 *DHCID) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*DHCID) + if !ok { + return false + } + _ = r2 if r1.Digest != r2.Digest { return false } return true } -func isDuplicateDNAME(r1, r2 *DNAME) bool { +func (r1 *DNAME) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*DNAME) + if !ok { + return false + } + _ = r2 if !isDulicateName(r1.Target, r2.Target) { return false } return true } -func isDuplicateDNSKEY(r1, r2 *DNSKEY) bool { +func (r1 *DNSKEY) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*DNSKEY) + if !ok { + return false + } + _ = r2 if r1.Flags != r2.Flags { return false } @@ -265,7 +188,12 @@ func isDuplicateDNSKEY(r1, r2 *DNSKEY) bool { return true } -func isDuplicateDS(r1, r2 *DS) bool { +func (r1 *DS) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*DS) + if !ok { + return false + } + _ = r2 if r1.KeyTag != r2.KeyTag { return false } @@ -281,35 +209,60 @@ func isDuplicateDS(r1, r2 *DS) bool { return true } -func isDuplicateEID(r1, r2 *EID) bool { +func (r1 *EID) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*EID) + if !ok { + return false + } + _ = r2 if r1.Endpoint != r2.Endpoint { return false } return true } -func isDuplicateEUI48(r1, r2 *EUI48) bool { +func (r1 *EUI48) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*EUI48) + if !ok { + return false + } + _ = r2 if r1.Address != r2.Address { return false } return true } -func isDuplicateEUI64(r1, r2 *EUI64) bool { +func (r1 *EUI64) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*EUI64) + if !ok { + return false + } + _ = r2 if r1.Address != r2.Address { return false } return true } -func isDuplicateGID(r1, r2 *GID) bool { +func (r1 *GID) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*GID) + if !ok { + return false + } + _ = r2 if r1.Gid != r2.Gid { return false } return true } -func isDuplicateGPOS(r1, r2 *GPOS) bool { +func (r1 *GPOS) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*GPOS) + if !ok { + return false + } + _ = r2 if r1.Longitude != r2.Longitude { return false } @@ -322,7 +275,12 @@ func isDuplicateGPOS(r1, r2 *GPOS) bool { return true } -func isDuplicateHINFO(r1, r2 *HINFO) bool { +func (r1 *HINFO) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*HINFO) + if !ok { + return false + } + _ = r2 if r1.Cpu != r2.Cpu { return false } @@ -332,7 +290,12 @@ func isDuplicateHINFO(r1, r2 *HINFO) bool { return true } -func isDuplicateHIP(r1, r2 *HIP) bool { +func (r1 *HIP) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*HIP) + if !ok { + return false + } + _ = r2 if r1.HitLength != r2.HitLength { return false } @@ -359,7 +322,12 @@ func isDuplicateHIP(r1, r2 *HIP) bool { return true } -func isDuplicateKX(r1, r2 *KX) bool { +func (r1 *KX) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*KX) + if !ok { + return false + } + _ = r2 if r1.Preference != r2.Preference { return false } @@ -369,22 +337,27 @@ func isDuplicateKX(r1, r2 *KX) bool { return true } -func isDuplicateL32(r1, r2 *L32) bool { - if r1.Preference != r2.Preference { +func (r1 *L32) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*L32) + if !ok { return false } - if len(r1.Locator32) != len(r2.Locator32) { + _ = r2 + if r1.Preference != r2.Preference { return false } - for i := 0; i < len(r1.Locator32); i++ { - if r1.Locator32[i] != r2.Locator32[i] { - return false - } + if !r1.Locator32.Equal(r2.Locator32) { + return false } return true } -func isDuplicateL64(r1, r2 *L64) bool { +func (r1 *L64) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*L64) + if !ok { + return false + } + _ = r2 if r1.Preference != r2.Preference { return false } @@ -394,7 +367,12 @@ func isDuplicateL64(r1, r2 *L64) bool { return true } -func isDuplicateLOC(r1, r2 *LOC) bool { +func (r1 *LOC) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*LOC) + if !ok { + return false + } + _ = r2 if r1.Version != r2.Version { return false } @@ -419,7 +397,12 @@ func isDuplicateLOC(r1, r2 *LOC) bool { return true } -func isDuplicateLP(r1, r2 *LP) bool { +func (r1 *LP) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*LP) + if !ok { + return false + } + _ = r2 if r1.Preference != r2.Preference { return false } @@ -429,35 +412,60 @@ func isDuplicateLP(r1, r2 *LP) bool { return true } -func isDuplicateMB(r1, r2 *MB) bool { +func (r1 *MB) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*MB) + if !ok { + return false + } + _ = r2 if !isDulicateName(r1.Mb, r2.Mb) { return false } return true } -func isDuplicateMD(r1, r2 *MD) bool { +func (r1 *MD) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*MD) + if !ok { + return false + } + _ = r2 if !isDulicateName(r1.Md, r2.Md) { return false } return true } -func isDuplicateMF(r1, r2 *MF) bool { +func (r1 *MF) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*MF) + if !ok { + return false + } + _ = r2 if !isDulicateName(r1.Mf, r2.Mf) { return false } return true } -func isDuplicateMG(r1, r2 *MG) bool { +func (r1 *MG) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*MG) + if !ok { + return false + } + _ = r2 if !isDulicateName(r1.Mg, r2.Mg) { return false } return true } -func isDuplicateMINFO(r1, r2 *MINFO) bool { +func (r1 *MINFO) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*MINFO) + if !ok { + return false + } + _ = r2 if !isDulicateName(r1.Rmail, r2.Rmail) { return false } @@ -467,14 +475,24 @@ func isDuplicateMINFO(r1, r2 *MINFO) bool { return true } -func isDuplicateMR(r1, r2 *MR) bool { +func (r1 *MR) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*MR) + if !ok { + return false + } + _ = r2 if !isDulicateName(r1.Mr, r2.Mr) { return false } return true } -func isDuplicateMX(r1, r2 *MX) bool { +func (r1 *MX) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*MX) + if !ok { + return false + } + _ = r2 if r1.Preference != r2.Preference { return false } @@ -484,7 +502,12 @@ func isDuplicateMX(r1, r2 *MX) bool { return true } -func isDuplicateNAPTR(r1, r2 *NAPTR) bool { +func (r1 *NAPTR) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*NAPTR) + if !ok { + return false + } + _ = r2 if r1.Order != r2.Order { return false } @@ -506,7 +529,12 @@ func isDuplicateNAPTR(r1, r2 *NAPTR) bool { return true } -func isDuplicateNID(r1, r2 *NID) bool { +func (r1 *NID) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*NID) + if !ok { + return false + } + _ = r2 if r1.Preference != r2.Preference { return false } @@ -516,14 +544,24 @@ func isDuplicateNID(r1, r2 *NID) bool { return true } -func isDuplicateNIMLOC(r1, r2 *NIMLOC) bool { +func (r1 *NIMLOC) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*NIMLOC) + if !ok { + return false + } + _ = r2 if r1.Locator != r2.Locator { return false } return true } -func isDuplicateNINFO(r1, r2 *NINFO) bool { +func (r1 *NINFO) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*NINFO) + if !ok { + return false + } + _ = r2 if len(r1.ZSData) != len(r2.ZSData) { return false } @@ -535,21 +573,36 @@ func isDuplicateNINFO(r1, r2 *NINFO) bool { return true } -func isDuplicateNS(r1, r2 *NS) bool { +func (r1 *NS) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*NS) + if !ok { + return false + } + _ = r2 if !isDulicateName(r1.Ns, r2.Ns) { return false } return true } -func isDuplicateNSAPPTR(r1, r2 *NSAPPTR) bool { +func (r1 *NSAPPTR) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*NSAPPTR) + if !ok { + return false + } + _ = r2 if !isDulicateName(r1.Ptr, r2.Ptr) { return false } return true } -func isDuplicateNSEC(r1, r2 *NSEC) bool { +func (r1 *NSEC) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*NSEC) + if !ok { + return false + } + _ = r2 if !isDulicateName(r1.NextDomain, r2.NextDomain) { return false } @@ -564,7 +617,12 @@ func isDuplicateNSEC(r1, r2 *NSEC) bool { return true } -func isDuplicateNSEC3(r1, r2 *NSEC3) bool { +func (r1 *NSEC3) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*NSEC3) + if !ok { + return false + } + _ = r2 if r1.Hash != r2.Hash { return false } @@ -597,7 +655,12 @@ func isDuplicateNSEC3(r1, r2 *NSEC3) bool { return true } -func isDuplicateNSEC3PARAM(r1, r2 *NSEC3PARAM) bool { +func (r1 *NSEC3PARAM) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*NSEC3PARAM) + if !ok { + return false + } + _ = r2 if r1.Hash != r2.Hash { return false } @@ -616,21 +679,48 @@ func isDuplicateNSEC3PARAM(r1, r2 *NSEC3PARAM) bool { return true } -func isDuplicateOPENPGPKEY(r1, r2 *OPENPGPKEY) bool { +func (r1 *NULL) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*NULL) + if !ok { + return false + } + _ = r2 + if r1.Data != r2.Data { + return false + } + return true +} + +func (r1 *OPENPGPKEY) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*OPENPGPKEY) + if !ok { + return false + } + _ = r2 if r1.PublicKey != r2.PublicKey { return false } return true } -func isDuplicatePTR(r1, r2 *PTR) bool { +func (r1 *PTR) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*PTR) + if !ok { + return false + } + _ = r2 if !isDulicateName(r1.Ptr, r2.Ptr) { return false } return true } -func isDuplicatePX(r1, r2 *PX) bool { +func (r1 *PX) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*PX) + if !ok { + return false + } + _ = r2 if r1.Preference != r2.Preference { return false } @@ -643,7 +733,24 @@ func isDuplicatePX(r1, r2 *PX) bool { return true } -func isDuplicateRKEY(r1, r2 *RKEY) bool { +func (r1 *RFC3597) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*RFC3597) + if !ok { + return false + } + _ = r2 + if r1.Rdata != r2.Rdata { + return false + } + return true +} + +func (r1 *RKEY) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*RKEY) + if !ok { + return false + } + _ = r2 if r1.Flags != r2.Flags { return false } @@ -659,7 +766,12 @@ func isDuplicateRKEY(r1, r2 *RKEY) bool { return true } -func isDuplicateRP(r1, r2 *RP) bool { +func (r1 *RP) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*RP) + if !ok { + return false + } + _ = r2 if !isDulicateName(r1.Mbox, r2.Mbox) { return false } @@ -669,7 +781,12 @@ func isDuplicateRP(r1, r2 *RP) bool { return true } -func isDuplicateRRSIG(r1, r2 *RRSIG) bool { +func (r1 *RRSIG) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*RRSIG) + if !ok { + return false + } + _ = r2 if r1.TypeCovered != r2.TypeCovered { return false } @@ -700,7 +817,12 @@ func isDuplicateRRSIG(r1, r2 *RRSIG) bool { return true } -func isDuplicateRT(r1, r2 *RT) bool { +func (r1 *RT) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*RT) + if !ok { + return false + } + _ = r2 if r1.Preference != r2.Preference { return false } @@ -710,7 +832,12 @@ func isDuplicateRT(r1, r2 *RT) bool { return true } -func isDuplicateSMIMEA(r1, r2 *SMIMEA) bool { +func (r1 *SMIMEA) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*SMIMEA) + if !ok { + return false + } + _ = r2 if r1.Usage != r2.Usage { return false } @@ -726,7 +853,12 @@ func isDuplicateSMIMEA(r1, r2 *SMIMEA) bool { return true } -func isDuplicateSOA(r1, r2 *SOA) bool { +func (r1 *SOA) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*SOA) + if !ok { + return false + } + _ = r2 if !isDulicateName(r1.Ns, r2.Ns) { return false } @@ -751,7 +883,12 @@ func isDuplicateSOA(r1, r2 *SOA) bool { return true } -func isDuplicateSPF(r1, r2 *SPF) bool { +func (r1 *SPF) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*SPF) + if !ok { + return false + } + _ = r2 if len(r1.Txt) != len(r2.Txt) { return false } @@ -763,7 +900,12 @@ func isDuplicateSPF(r1, r2 *SPF) bool { return true } -func isDuplicateSRV(r1, r2 *SRV) bool { +func (r1 *SRV) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*SRV) + if !ok { + return false + } + _ = r2 if r1.Priority != r2.Priority { return false } @@ -779,7 +921,12 @@ func isDuplicateSRV(r1, r2 *SRV) bool { return true } -func isDuplicateSSHFP(r1, r2 *SSHFP) bool { +func (r1 *SSHFP) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*SSHFP) + if !ok { + return false + } + _ = r2 if r1.Algorithm != r2.Algorithm { return false } @@ -792,7 +939,12 @@ func isDuplicateSSHFP(r1, r2 *SSHFP) bool { return true } -func isDuplicateTA(r1, r2 *TA) bool { +func (r1 *TA) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*TA) + if !ok { + return false + } + _ = r2 if r1.KeyTag != r2.KeyTag { return false } @@ -808,7 +960,12 @@ func isDuplicateTA(r1, r2 *TA) bool { return true } -func isDuplicateTALINK(r1, r2 *TALINK) bool { +func (r1 *TALINK) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*TALINK) + if !ok { + return false + } + _ = r2 if !isDulicateName(r1.PreviousName, r2.PreviousName) { return false } @@ -818,7 +975,12 @@ func isDuplicateTALINK(r1, r2 *TALINK) bool { return true } -func isDuplicateTKEY(r1, r2 *TKEY) bool { +func (r1 *TKEY) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*TKEY) + if !ok { + return false + } + _ = r2 if !isDulicateName(r1.Algorithm, r2.Algorithm) { return false } @@ -849,7 +1011,12 @@ func isDuplicateTKEY(r1, r2 *TKEY) bool { return true } -func isDuplicateTLSA(r1, r2 *TLSA) bool { +func (r1 *TLSA) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*TLSA) + if !ok { + return false + } + _ = r2 if r1.Usage != r2.Usage { return false } @@ -865,7 +1032,12 @@ func isDuplicateTLSA(r1, r2 *TLSA) bool { return true } -func isDuplicateTSIG(r1, r2 *TSIG) bool { +func (r1 *TSIG) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*TSIG) + if !ok { + return false + } + _ = r2 if !isDulicateName(r1.Algorithm, r2.Algorithm) { return false } @@ -896,7 +1068,12 @@ func isDuplicateTSIG(r1, r2 *TSIG) bool { return true } -func isDuplicateTXT(r1, r2 *TXT) bool { +func (r1 *TXT) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*TXT) + if !ok { + return false + } + _ = r2 if len(r1.Txt) != len(r2.Txt) { return false } @@ -908,21 +1085,36 @@ func isDuplicateTXT(r1, r2 *TXT) bool { return true } -func isDuplicateUID(r1, r2 *UID) bool { +func (r1 *UID) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*UID) + if !ok { + return false + } + _ = r2 if r1.Uid != r2.Uid { return false } return true } -func isDuplicateUINFO(r1, r2 *UINFO) bool { +func (r1 *UINFO) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*UINFO) + if !ok { + return false + } + _ = r2 if r1.Uinfo != r2.Uinfo { return false } return true } -func isDuplicateURI(r1, r2 *URI) bool { +func (r1 *URI) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*URI) + if !ok { + return false + } + _ = r2 if r1.Priority != r2.Priority { return false } @@ -935,7 +1127,12 @@ func isDuplicateURI(r1, r2 *URI) bool { return true } -func isDuplicateX25(r1, r2 *X25) bool { +func (r1 *X25) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*X25) + if !ok { + return false + } + _ = r2 if r1.PSDNAddress != r2.PSDNAddress { return false } diff --git a/vendor/github.com/miekg/dns/zmsg.go b/vendor/github.com/miekg/dns/zmsg.go index 1a68f74da25e8..c4cf4757e8a67 100644 --- a/vendor/github.com/miekg/dns/zmsg.go +++ b/vendor/github.com/miekg/dns/zmsg.go @@ -4,82 +4,47 @@ package dns // pack*() functions -func (rr *A) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off +func (rr *A) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packDataA(rr.A, msg, off) if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *AAAA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off +func (rr *AAAA) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packDataAAAA(rr.AAAA, msg, off) if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *AFSDB) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off +func (rr *AFSDB) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packUint16(rr.Subtype, msg, off) if err != nil { return off, err } - off, err = PackDomainName(rr.Hostname, msg, off, compression, false) + off, err = packDomainName(rr.Hostname, msg, off, compression, false) if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *ANY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - rr.Header().Rdlength = uint16(off - headerEnd) +func (rr *ANY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { return off, nil } -func (rr *AVC) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off +func (rr *AVC) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packStringTxt(rr.Txt, msg, off) if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *CAA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off +func (rr *CAA) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packUint8(rr.Flag, msg, off) if err != nil { return off, err @@ -92,16 +57,10 @@ func (rr *CAA) pack(msg []byte, off int, compression map[string]int, compress bo if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *CDNSKEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off +func (rr *CDNSKEY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packUint16(rr.Flags, msg, off) if err != nil { return off, err @@ -118,16 +77,10 @@ func (rr *CDNSKEY) pack(msg []byte, off int, compression map[string]int, compres if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *CDS) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off +func (rr *CDS) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packUint16(rr.KeyTag, msg, off) if err != nil { return off, err @@ -144,16 +97,10 @@ func (rr *CDS) pack(msg []byte, off int, compression map[string]int, compress bo if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *CERT) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off +func (rr *CERT) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packUint16(rr.Type, msg, off) if err != nil { return off, err @@ -170,30 +117,18 @@ func (rr *CERT) pack(msg []byte, off int, compression map[string]int, compress b if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *CNAME) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = PackDomainName(rr.Target, msg, off, compression, compress) +func (rr *CNAME) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDomainName(rr.Target, msg, off, compression, compress) if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *CSYNC) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off +func (rr *CSYNC) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packUint32(rr.Serial, msg, off) if err != nil { return off, err @@ -206,30 +141,18 @@ func (rr *CSYNC) pack(msg []byte, off int, compression map[string]int, compress if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *DHCID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off +func (rr *DHCID) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packStringBase64(rr.Digest, msg, off) if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *DLV) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off +func (rr *DLV) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packUint16(rr.KeyTag, msg, off) if err != nil { return off, err @@ -246,30 +169,18 @@ func (rr *DLV) pack(msg []byte, off int, compression map[string]int, compress bo if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *DNAME) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = PackDomainName(rr.Target, msg, off, compression, false) +func (rr *DNAME) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDomainName(rr.Target, msg, off, compression, false) if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *DNSKEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off +func (rr *DNSKEY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packUint16(rr.Flags, msg, off) if err != nil { return off, err @@ -286,16 +197,10 @@ func (rr *DNSKEY) pack(msg []byte, off int, compression map[string]int, compress if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *DS) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off +func (rr *DS) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packUint16(rr.KeyTag, msg, off) if err != nil { return off, err @@ -312,72 +217,42 @@ func (rr *DS) pack(msg []byte, off int, compression map[string]int, compress boo if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *EID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off +func (rr *EID) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packStringHex(rr.Endpoint, msg, off) if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *EUI48) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off +func (rr *EUI48) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packUint48(rr.Address, msg, off) if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *EUI64) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off +func (rr *EUI64) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packUint64(rr.Address, msg, off) if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *GID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off +func (rr *GID) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packUint32(rr.Gid, msg, off) if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *GPOS) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off +func (rr *GPOS) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packString(rr.Longitude, msg, off) if err != nil { return off, err @@ -390,16 +265,10 @@ func (rr *GPOS) pack(msg []byte, off int, compression map[string]int, compress b if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *HINFO) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off +func (rr *HINFO) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packString(rr.Cpu, msg, off) if err != nil { return off, err @@ -408,16 +277,10 @@ func (rr *HINFO) pack(msg []byte, off int, compression map[string]int, compress if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *HIP) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off +func (rr *HIP) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packUint8(rr.HitLength, msg, off) if err != nil { return off, err @@ -438,20 +301,14 @@ func (rr *HIP) pack(msg []byte, off int, compression map[string]int, compress bo if err != nil { return off, err } - off, err = packDataDomainNames(rr.RendezvousServers, msg, off, compression, compress) + off, err = packDataDomainNames(rr.RendezvousServers, msg, off, compression, false) if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *KEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off +func (rr *KEY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packUint16(rr.Flags, msg, off) if err != nil { return off, err @@ -468,34 +325,22 @@ func (rr *KEY) pack(msg []byte, off int, compression map[string]int, compress bo if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *KX) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off +func (rr *KX) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packUint16(rr.Preference, msg, off) if err != nil { return off, err } - off, err = PackDomainName(rr.Exchanger, msg, off, compression, false) + off, err = packDomainName(rr.Exchanger, msg, off, compression, false) if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *L32) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off +func (rr *L32) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packUint16(rr.Preference, msg, off) if err != nil { return off, err @@ -504,16 +349,10 @@ func (rr *L32) pack(msg []byte, off int, compression map[string]int, compress bo if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *L64) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off +func (rr *L64) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packUint16(rr.Preference, msg, off) if err != nil { return off, err @@ -522,16 +361,10 @@ func (rr *L64) pack(msg []byte, off int, compression map[string]int, compress bo if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *LOC) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off +func (rr *LOC) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packUint8(rr.Version, msg, off) if err != nil { return off, err @@ -560,140 +393,86 @@ func (rr *LOC) pack(msg []byte, off int, compression map[string]int, compress bo if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *LP) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off +func (rr *LP) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packUint16(rr.Preference, msg, off) if err != nil { return off, err } - off, err = PackDomainName(rr.Fqdn, msg, off, compression, false) + off, err = packDomainName(rr.Fqdn, msg, off, compression, false) if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *MB) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = PackDomainName(rr.Mb, msg, off, compression, compress) +func (rr *MB) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDomainName(rr.Mb, msg, off, compression, compress) if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *MD) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = PackDomainName(rr.Md, msg, off, compression, compress) +func (rr *MD) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDomainName(rr.Md, msg, off, compression, compress) if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *MF) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = PackDomainName(rr.Mf, msg, off, compression, compress) +func (rr *MF) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDomainName(rr.Mf, msg, off, compression, compress) if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *MG) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) +func (rr *MG) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDomainName(rr.Mg, msg, off, compression, compress) if err != nil { return off, err } - headerEnd := off - off, err = PackDomainName(rr.Mg, msg, off, compression, compress) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *MINFO) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = PackDomainName(rr.Rmail, msg, off, compression, compress) +func (rr *MINFO) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDomainName(rr.Rmail, msg, off, compression, compress) if err != nil { return off, err } - off, err = PackDomainName(rr.Email, msg, off, compression, compress) + off, err = packDomainName(rr.Email, msg, off, compression, compress) if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *MR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = PackDomainName(rr.Mr, msg, off, compression, compress) +func (rr *MR) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDomainName(rr.Mr, msg, off, compression, compress) if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *MX) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off +func (rr *MX) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packUint16(rr.Preference, msg, off) if err != nil { return off, err } - off, err = PackDomainName(rr.Mx, msg, off, compression, compress) + off, err = packDomainName(rr.Mx, msg, off, compression, compress) if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *NAPTR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off +func (rr *NAPTR) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packUint16(rr.Order, msg, off) if err != nil { return off, err @@ -714,20 +493,14 @@ func (rr *NAPTR) pack(msg []byte, off int, compression map[string]int, compress if err != nil { return off, err } - off, err = PackDomainName(rr.Replacement, msg, off, compression, false) + off, err = packDomainName(rr.Replacement, msg, off, compression, false) if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *NID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off +func (rr *NID) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packUint16(rr.Preference, msg, off) if err != nil { return off, err @@ -736,73 +509,43 @@ func (rr *NID) pack(msg []byte, off int, compression map[string]int, compress bo if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *NIMLOC) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off +func (rr *NIMLOC) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packStringHex(rr.Locator, msg, off) if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *NINFO) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off +func (rr *NINFO) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packStringTxt(rr.ZSData, msg, off) if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *NS) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) +func (rr *NS) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDomainName(rr.Ns, msg, off, compression, compress) if err != nil { return off, err } - headerEnd := off - off, err = PackDomainName(rr.Ns, msg, off, compression, compress) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *NSAPPTR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = PackDomainName(rr.Ptr, msg, off, compression, false) +func (rr *NSAPPTR) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDomainName(rr.Ptr, msg, off, compression, false) if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *NSEC) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = PackDomainName(rr.NextDomain, msg, off, compression, false) +func (rr *NSEC) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDomainName(rr.NextDomain, msg, off, compression, false) if err != nil { return off, err } @@ -810,16 +553,10 @@ func (rr *NSEC) pack(msg []byte, off int, compression map[string]int, compress b if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *NSEC3) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off +func (rr *NSEC3) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packUint8(rr.Hash, msg, off) if err != nil { return off, err @@ -855,16 +592,10 @@ func (rr *NSEC3) pack(msg []byte, off int, compression map[string]int, compress if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *NSEC3PARAM) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off +func (rr *NSEC3PARAM) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packUint8(rr.Hash, msg, off) if err != nil { return off, err @@ -888,94 +619,66 @@ func (rr *NSEC3PARAM) pack(msg []byte, off int, compression map[string]int, comp return off, err } } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *OPENPGPKEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) +func (rr *NULL) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packStringAny(rr.Data, msg, off) if err != nil { return off, err } - headerEnd := off + return off, nil +} + +func (rr *OPENPGPKEY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packStringBase64(rr.PublicKey, msg, off) if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *OPT) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off +func (rr *OPT) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packDataOpt(rr.Option, msg, off) if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *PTR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = PackDomainName(rr.Ptr, msg, off, compression, compress) +func (rr *PTR) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDomainName(rr.Ptr, msg, off, compression, compress) if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *PX) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off +func (rr *PX) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packUint16(rr.Preference, msg, off) if err != nil { return off, err } - off, err = PackDomainName(rr.Map822, msg, off, compression, false) + off, err = packDomainName(rr.Map822, msg, off, compression, false) if err != nil { return off, err } - off, err = PackDomainName(rr.Mapx400, msg, off, compression, false) + off, err = packDomainName(rr.Mapx400, msg, off, compression, false) if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *RFC3597) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off +func (rr *RFC3597) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packStringHex(rr.Rdata, msg, off) if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *RKEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off +func (rr *RKEY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packUint16(rr.Flags, msg, off) if err != nil { return off, err @@ -992,34 +695,22 @@ func (rr *RKEY) pack(msg []byte, off int, compression map[string]int, compress b if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *RP) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = PackDomainName(rr.Mbox, msg, off, compression, false) +func (rr *RP) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDomainName(rr.Mbox, msg, off, compression, false) if err != nil { return off, err } - off, err = PackDomainName(rr.Txt, msg, off, compression, false) + off, err = packDomainName(rr.Txt, msg, off, compression, false) if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *RRSIG) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off +func (rr *RRSIG) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packUint16(rr.TypeCovered, msg, off) if err != nil { return off, err @@ -1048,7 +739,7 @@ func (rr *RRSIG) pack(msg []byte, off int, compression map[string]int, compress if err != nil { return off, err } - off, err = PackDomainName(rr.SignerName, msg, off, compression, false) + off, err = packDomainName(rr.SignerName, msg, off, compression, false) if err != nil { return off, err } @@ -1056,34 +747,22 @@ func (rr *RRSIG) pack(msg []byte, off int, compression map[string]int, compress if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *RT) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off +func (rr *RT) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packUint16(rr.Preference, msg, off) if err != nil { return off, err } - off, err = PackDomainName(rr.Host, msg, off, compression, compress) + off, err = packDomainName(rr.Host, msg, off, compression, false) if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *SIG) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off +func (rr *SIG) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packUint16(rr.TypeCovered, msg, off) if err != nil { return off, err @@ -1112,7 +791,7 @@ func (rr *SIG) pack(msg []byte, off int, compression map[string]int, compress bo if err != nil { return off, err } - off, err = PackDomainName(rr.SignerName, msg, off, compression, false) + off, err = packDomainName(rr.SignerName, msg, off, compression, false) if err != nil { return off, err } @@ -1120,16 +799,10 @@ func (rr *SIG) pack(msg []byte, off int, compression map[string]int, compress bo if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *SMIMEA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off +func (rr *SMIMEA) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packUint8(rr.Usage, msg, off) if err != nil { return off, err @@ -1146,21 +819,15 @@ func (rr *SMIMEA) pack(msg []byte, off int, compression map[string]int, compress if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *SOA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) +func (rr *SOA) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDomainName(rr.Ns, msg, off, compression, compress) if err != nil { return off, err } - headerEnd := off - off, err = PackDomainName(rr.Ns, msg, off, compression, compress) - if err != nil { - return off, err - } - off, err = PackDomainName(rr.Mbox, msg, off, compression, compress) + off, err = packDomainName(rr.Mbox, msg, off, compression, compress) if err != nil { return off, err } @@ -1184,30 +851,18 @@ func (rr *SOA) pack(msg []byte, off int, compression map[string]int, compress bo if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *SPF) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off +func (rr *SPF) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packStringTxt(rr.Txt, msg, off) if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *SRV) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off +func (rr *SRV) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packUint16(rr.Priority, msg, off) if err != nil { return off, err @@ -1220,20 +875,14 @@ func (rr *SRV) pack(msg []byte, off int, compression map[string]int, compress bo if err != nil { return off, err } - off, err = PackDomainName(rr.Target, msg, off, compression, false) + off, err = packDomainName(rr.Target, msg, off, compression, false) if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *SSHFP) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off +func (rr *SSHFP) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packUint8(rr.Algorithm, msg, off) if err != nil { return off, err @@ -1246,16 +895,10 @@ func (rr *SSHFP) pack(msg []byte, off int, compression map[string]int, compress if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *TA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off +func (rr *TA) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packUint16(rr.KeyTag, msg, off) if err != nil { return off, err @@ -1272,35 +915,23 @@ func (rr *TA) pack(msg []byte, off int, compression map[string]int, compress boo if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *TALINK) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = PackDomainName(rr.PreviousName, msg, off, compression, false) +func (rr *TALINK) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDomainName(rr.PreviousName, msg, off, compression, false) if err != nil { return off, err } - off, err = PackDomainName(rr.NextName, msg, off, compression, false) + off, err = packDomainName(rr.NextName, msg, off, compression, false) if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *TKEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = PackDomainName(rr.Algorithm, msg, off, compression, false) +func (rr *TKEY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDomainName(rr.Algorithm, msg, off, compression, false) if err != nil { return off, err } @@ -1336,16 +967,10 @@ func (rr *TKEY) pack(msg []byte, off int, compression map[string]int, compress b if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *TLSA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off +func (rr *TLSA) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packUint8(rr.Usage, msg, off) if err != nil { return off, err @@ -1362,17 +987,11 @@ func (rr *TLSA) pack(msg []byte, off int, compression map[string]int, compress b if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *TSIG) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = PackDomainName(rr.Algorithm, msg, off, compression, false) +func (rr *TSIG) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDomainName(rr.Algorithm, msg, off, compression, false) if err != nil { return off, err } @@ -1408,58 +1027,34 @@ func (rr *TSIG) pack(msg []byte, off int, compression map[string]int, compress b if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *TXT) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off +func (rr *TXT) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packStringTxt(rr.Txt, msg, off) if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *UID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off +func (rr *UID) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packUint32(rr.Uid, msg, off) if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *UINFO) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off +func (rr *UINFO) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packString(rr.Uinfo, msg, off) if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *URI) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off +func (rr *URI) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packUint16(rr.Priority, msg, off) if err != nil { return off, err @@ -1472,2144 +1067,1656 @@ func (rr *URI) pack(msg []byte, off int, compression map[string]int, compress bo if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } -func (rr *X25) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off +func (rr *X25) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packString(rr.PSDNAddress, msg, off) if err != nil { return off, err } - rr.Header().Rdlength = uint16(off - headerEnd) return off, nil } // unpack*() functions -func unpackA(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(A) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *A) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.A, off, err = unpackDataA(msg, off) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackAAAA(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(AAAA) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *AAAA) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.AAAA, off, err = unpackDataAAAA(msg, off) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackAFSDB(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(AFSDB) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *AFSDB) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.Subtype, off, err = unpackUint16(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Hostname, off, err = UnpackDomainName(msg, off) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackANY(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(ANY) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *ANY) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart - return rr, off, err + return off, nil } -func unpackAVC(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(AVC) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *AVC) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.Txt, off, err = unpackStringTxt(msg, off) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackCAA(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(CAA) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *CAA) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.Flag, off, err = unpackUint8(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Tag, off, err = unpackString(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Value, off, err = unpackStringOctet(msg, off) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackCDNSKEY(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(CDNSKEY) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *CDNSKEY) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.Flags, off, err = unpackUint16(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Protocol, off, err = unpackUint8(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Algorithm, off, err = unpackUint8(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackCDS(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(CDS) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *CDS) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.KeyTag, off, err = unpackUint16(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Algorithm, off, err = unpackUint8(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.DigestType, off, err = unpackUint8(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackCERT(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(CERT) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *CERT) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.Type, off, err = unpackUint16(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.KeyTag, off, err = unpackUint16(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Algorithm, off, err = unpackUint8(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Certificate, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackCNAME(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(CNAME) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *CNAME) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.Target, off, err = UnpackDomainName(msg, off) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackCSYNC(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(CSYNC) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *CSYNC) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.Serial, off, err = unpackUint32(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Flags, off, err = unpackUint16(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.TypeBitMap, off, err = unpackDataNsec(msg, off) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackDHCID(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(DHCID) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *DHCID) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.Digest, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackDLV(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(DLV) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *DLV) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.KeyTag, off, err = unpackUint16(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Algorithm, off, err = unpackUint8(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.DigestType, off, err = unpackUint8(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackDNAME(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(DNAME) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *DNAME) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.Target, off, err = UnpackDomainName(msg, off) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackDNSKEY(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(DNSKEY) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *DNSKEY) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.Flags, off, err = unpackUint16(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Protocol, off, err = unpackUint8(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Algorithm, off, err = unpackUint8(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackDS(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(DS) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *DS) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.KeyTag, off, err = unpackUint16(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Algorithm, off, err = unpackUint8(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.DigestType, off, err = unpackUint8(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackEID(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(EID) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *EID) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.Endpoint, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackEUI48(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(EUI48) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *EUI48) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.Address, off, err = unpackUint48(msg, off) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackEUI64(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(EUI64) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *EUI64) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.Address, off, err = unpackUint64(msg, off) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackGID(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(GID) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *GID) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.Gid, off, err = unpackUint32(msg, off) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackGPOS(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(GPOS) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *GPOS) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.Longitude, off, err = unpackString(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Latitude, off, err = unpackString(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Altitude, off, err = unpackString(msg, off) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackHINFO(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(HINFO) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *HINFO) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.Cpu, off, err = unpackString(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Os, off, err = unpackString(msg, off) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackHIP(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(HIP) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *HIP) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.HitLength, off, err = unpackUint8(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.PublicKeyAlgorithm, off, err = unpackUint8(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.PublicKeyLength, off, err = unpackUint16(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Hit, off, err = unpackStringHex(msg, off, off+int(rr.HitLength)) if err != nil { - return rr, off, err + return off, err } rr.PublicKey, off, err = unpackStringBase64(msg, off, off+int(rr.PublicKeyLength)) if err != nil { - return rr, off, err + return off, err } rr.RendezvousServers, off, err = unpackDataDomainNames(msg, off, rdStart+int(rr.Hdr.Rdlength)) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackKEY(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(KEY) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *KEY) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.Flags, off, err = unpackUint16(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Protocol, off, err = unpackUint8(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Algorithm, off, err = unpackUint8(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackKX(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(KX) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *KX) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.Preference, off, err = unpackUint16(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Exchanger, off, err = UnpackDomainName(msg, off) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackL32(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(L32) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *L32) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.Preference, off, err = unpackUint16(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Locator32, off, err = unpackDataA(msg, off) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackL64(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(L64) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *L64) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.Preference, off, err = unpackUint16(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Locator64, off, err = unpackUint64(msg, off) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackLOC(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(LOC) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *LOC) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.Version, off, err = unpackUint8(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Size, off, err = unpackUint8(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.HorizPre, off, err = unpackUint8(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.VertPre, off, err = unpackUint8(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Latitude, off, err = unpackUint32(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Longitude, off, err = unpackUint32(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Altitude, off, err = unpackUint32(msg, off) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackLP(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(LP) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *LP) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.Preference, off, err = unpackUint16(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Fqdn, off, err = UnpackDomainName(msg, off) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackMB(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(MB) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *MB) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.Mb, off, err = UnpackDomainName(msg, off) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackMD(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(MD) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *MD) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.Md, off, err = UnpackDomainName(msg, off) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackMF(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(MF) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *MF) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.Mf, off, err = UnpackDomainName(msg, off) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackMG(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(MG) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *MG) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.Mg, off, err = UnpackDomainName(msg, off) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackMINFO(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(MINFO) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *MINFO) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.Rmail, off, err = UnpackDomainName(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Email, off, err = UnpackDomainName(msg, off) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackMR(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(MR) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *MR) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.Mr, off, err = UnpackDomainName(msg, off) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackMX(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(MX) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *MX) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.Preference, off, err = unpackUint16(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Mx, off, err = UnpackDomainName(msg, off) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackNAPTR(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(NAPTR) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *NAPTR) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.Order, off, err = unpackUint16(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Preference, off, err = unpackUint16(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Flags, off, err = unpackString(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Service, off, err = unpackString(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Regexp, off, err = unpackString(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Replacement, off, err = UnpackDomainName(msg, off) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackNID(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(NID) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *NID) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.Preference, off, err = unpackUint16(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.NodeID, off, err = unpackUint64(msg, off) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackNIMLOC(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(NIMLOC) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *NIMLOC) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.Locator, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackNINFO(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(NINFO) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *NINFO) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.ZSData, off, err = unpackStringTxt(msg, off) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackNS(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(NS) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *NS) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.Ns, off, err = UnpackDomainName(msg, off) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackNSAPPTR(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(NSAPPTR) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *NSAPPTR) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.Ptr, off, err = UnpackDomainName(msg, off) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackNSEC(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(NSEC) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *NSEC) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.NextDomain, off, err = UnpackDomainName(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.TypeBitMap, off, err = unpackDataNsec(msg, off) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackNSEC3(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(NSEC3) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *NSEC3) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.Hash, off, err = unpackUint8(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Flags, off, err = unpackUint8(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Iterations, off, err = unpackUint16(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.SaltLength, off, err = unpackUint8(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Salt, off, err = unpackStringHex(msg, off, off+int(rr.SaltLength)) if err != nil { - return rr, off, err + return off, err } rr.HashLength, off, err = unpackUint8(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.NextDomain, off, err = unpackStringBase32(msg, off, off+int(rr.HashLength)) if err != nil { - return rr, off, err + return off, err } rr.TypeBitMap, off, err = unpackDataNsec(msg, off) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackNSEC3PARAM(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(NSEC3PARAM) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *NSEC3PARAM) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.Hash, off, err = unpackUint8(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Flags, off, err = unpackUint8(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Iterations, off, err = unpackUint16(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.SaltLength, off, err = unpackUint8(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Salt, off, err = unpackStringHex(msg, off, off+int(rr.SaltLength)) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackOPENPGPKEY(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(OPENPGPKEY) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil +func (rr *NULL) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Data, off, err = unpackStringAny(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return off, err } - var err error + return off, nil +} + +func (rr *OPENPGPKEY) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackOPT(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(OPT) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *OPT) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.Option, off, err = unpackDataOpt(msg, off) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackPTR(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(PTR) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *PTR) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.Ptr, off, err = UnpackDomainName(msg, off) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackPX(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(PX) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *PX) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.Preference, off, err = unpackUint16(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Map822, off, err = UnpackDomainName(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Mapx400, off, err = UnpackDomainName(msg, off) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackRFC3597(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(RFC3597) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *RFC3597) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.Rdata, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackRKEY(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(RKEY) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *RKEY) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.Flags, off, err = unpackUint16(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Protocol, off, err = unpackUint8(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Algorithm, off, err = unpackUint8(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackRP(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(RP) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *RP) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.Mbox, off, err = UnpackDomainName(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Txt, off, err = UnpackDomainName(msg, off) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackRRSIG(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(RRSIG) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *RRSIG) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.TypeCovered, off, err = unpackUint16(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Algorithm, off, err = unpackUint8(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Labels, off, err = unpackUint8(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.OrigTtl, off, err = unpackUint32(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Expiration, off, err = unpackUint32(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Inception, off, err = unpackUint32(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.KeyTag, off, err = unpackUint16(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.SignerName, off, err = UnpackDomainName(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Signature, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackRT(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(RT) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *RT) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.Preference, off, err = unpackUint16(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Host, off, err = UnpackDomainName(msg, off) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackSIG(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(SIG) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *SIG) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.TypeCovered, off, err = unpackUint16(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Algorithm, off, err = unpackUint8(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Labels, off, err = unpackUint8(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.OrigTtl, off, err = unpackUint32(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Expiration, off, err = unpackUint32(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Inception, off, err = unpackUint32(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.KeyTag, off, err = unpackUint16(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.SignerName, off, err = UnpackDomainName(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Signature, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackSMIMEA(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(SMIMEA) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *SMIMEA) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.Usage, off, err = unpackUint8(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Selector, off, err = unpackUint8(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.MatchingType, off, err = unpackUint8(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Certificate, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackSOA(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(SOA) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *SOA) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.Ns, off, err = UnpackDomainName(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Mbox, off, err = UnpackDomainName(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Serial, off, err = unpackUint32(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Refresh, off, err = unpackUint32(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Retry, off, err = unpackUint32(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Expire, off, err = unpackUint32(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Minttl, off, err = unpackUint32(msg, off) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackSPF(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(SPF) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *SPF) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.Txt, off, err = unpackStringTxt(msg, off) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackSRV(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(SRV) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *SRV) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.Priority, off, err = unpackUint16(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Weight, off, err = unpackUint16(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Port, off, err = unpackUint16(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Target, off, err = UnpackDomainName(msg, off) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackSSHFP(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(SSHFP) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *SSHFP) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.Algorithm, off, err = unpackUint8(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Type, off, err = unpackUint8(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.FingerPrint, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackTA(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(TA) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *TA) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.KeyTag, off, err = unpackUint16(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Algorithm, off, err = unpackUint8(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.DigestType, off, err = unpackUint8(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackTALINK(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(TALINK) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *TALINK) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.PreviousName, off, err = UnpackDomainName(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.NextName, off, err = UnpackDomainName(msg, off) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackTKEY(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(TKEY) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *TKEY) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.Algorithm, off, err = UnpackDomainName(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Inception, off, err = unpackUint32(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Expiration, off, err = unpackUint32(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Mode, off, err = unpackUint16(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Error, off, err = unpackUint16(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.KeySize, off, err = unpackUint16(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Key, off, err = unpackStringHex(msg, off, off+int(rr.KeySize)) if err != nil { - return rr, off, err + return off, err } rr.OtherLen, off, err = unpackUint16(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.OtherData, off, err = unpackStringHex(msg, off, off+int(rr.OtherLen)) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackTLSA(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(TLSA) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *TLSA) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.Usage, off, err = unpackUint8(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Selector, off, err = unpackUint8(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.MatchingType, off, err = unpackUint8(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Certificate, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackTSIG(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(TSIG) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *TSIG) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.Algorithm, off, err = UnpackDomainName(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.TimeSigned, off, err = unpackUint48(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Fudge, off, err = unpackUint16(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.MACSize, off, err = unpackUint16(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.MAC, off, err = unpackStringHex(msg, off, off+int(rr.MACSize)) if err != nil { - return rr, off, err + return off, err } rr.OrigId, off, err = unpackUint16(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Error, off, err = unpackUint16(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.OtherLen, off, err = unpackUint16(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.OtherData, off, err = unpackStringHex(msg, off, off+int(rr.OtherLen)) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackTXT(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(TXT) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *TXT) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.Txt, off, err = unpackStringTxt(msg, off) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackUID(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(UID) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *UID) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.Uid, off, err = unpackUint32(msg, off) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackUINFO(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(UINFO) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *UINFO) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.Uinfo, off, err = unpackString(msg, off) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackURI(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(URI) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *URI) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.Priority, off, err = unpackUint16(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Weight, off, err = unpackUint16(msg, off) if err != nil { - return rr, off, err + return off, err } if off == len(msg) { - return rr, off, nil + return off, nil } rr.Target, off, err = unpackStringOctet(msg, off) if err != nil { - return rr, off, err + return off, err } - return rr, off, err + return off, nil } -func unpackX25(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(X25) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error +func (rr *X25) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart rr.PSDNAddress, off, err = unpackString(msg, off) if err != nil { - return rr, off, err - } - return rr, off, err -} - -var typeToUnpack = map[uint16]func(RR_Header, []byte, int) (RR, int, error){ - TypeA: unpackA, - TypeAAAA: unpackAAAA, - TypeAFSDB: unpackAFSDB, - TypeANY: unpackANY, - TypeAVC: unpackAVC, - TypeCAA: unpackCAA, - TypeCDNSKEY: unpackCDNSKEY, - TypeCDS: unpackCDS, - TypeCERT: unpackCERT, - TypeCNAME: unpackCNAME, - TypeCSYNC: unpackCSYNC, - TypeDHCID: unpackDHCID, - TypeDLV: unpackDLV, - TypeDNAME: unpackDNAME, - TypeDNSKEY: unpackDNSKEY, - TypeDS: unpackDS, - TypeEID: unpackEID, - TypeEUI48: unpackEUI48, - TypeEUI64: unpackEUI64, - TypeGID: unpackGID, - TypeGPOS: unpackGPOS, - TypeHINFO: unpackHINFO, - TypeHIP: unpackHIP, - TypeKEY: unpackKEY, - TypeKX: unpackKX, - TypeL32: unpackL32, - TypeL64: unpackL64, - TypeLOC: unpackLOC, - TypeLP: unpackLP, - TypeMB: unpackMB, - TypeMD: unpackMD, - TypeMF: unpackMF, - TypeMG: unpackMG, - TypeMINFO: unpackMINFO, - TypeMR: unpackMR, - TypeMX: unpackMX, - TypeNAPTR: unpackNAPTR, - TypeNID: unpackNID, - TypeNIMLOC: unpackNIMLOC, - TypeNINFO: unpackNINFO, - TypeNS: unpackNS, - TypeNSAPPTR: unpackNSAPPTR, - TypeNSEC: unpackNSEC, - TypeNSEC3: unpackNSEC3, - TypeNSEC3PARAM: unpackNSEC3PARAM, - TypeOPENPGPKEY: unpackOPENPGPKEY, - TypeOPT: unpackOPT, - TypePTR: unpackPTR, - TypePX: unpackPX, - TypeRKEY: unpackRKEY, - TypeRP: unpackRP, - TypeRRSIG: unpackRRSIG, - TypeRT: unpackRT, - TypeSIG: unpackSIG, - TypeSMIMEA: unpackSMIMEA, - TypeSOA: unpackSOA, - TypeSPF: unpackSPF, - TypeSRV: unpackSRV, - TypeSSHFP: unpackSSHFP, - TypeTA: unpackTA, - TypeTALINK: unpackTALINK, - TypeTKEY: unpackTKEY, - TypeTLSA: unpackTLSA, - TypeTSIG: unpackTSIG, - TypeTXT: unpackTXT, - TypeUID: unpackUID, - TypeUINFO: unpackUINFO, - TypeURI: unpackURI, - TypeX25: unpackX25, + return off, err + } + return off, nil } diff --git a/vendor/github.com/miekg/dns/ztypes.go b/vendor/github.com/miekg/dns/ztypes.go index 965753b11b27e..19a542d33ca23 100644 --- a/vendor/github.com/miekg/dns/ztypes.go +++ b/vendor/github.com/miekg/dns/ztypes.go @@ -54,6 +54,7 @@ var TypeToRR = map[uint16]func() RR{ TypeNSEC: func() RR { return new(NSEC) }, TypeNSEC3: func() RR { return new(NSEC3) }, TypeNSEC3PARAM: func() RR { return new(NSEC3PARAM) }, + TypeNULL: func() RR { return new(NULL) }, TypeOPENPGPKEY: func() RR { return new(OPENPGPKEY) }, TypeOPT: func() RR { return new(OPT) }, TypePTR: func() RR { return new(PTR) }, @@ -209,6 +210,7 @@ func (rr *NSAPPTR) Header() *RR_Header { return &rr.Hdr } func (rr *NSEC) Header() *RR_Header { return &rr.Hdr } func (rr *NSEC3) Header() *RR_Header { return &rr.Hdr } func (rr *NSEC3PARAM) Header() *RR_Header { return &rr.Hdr } +func (rr *NULL) Header() *RR_Header { return &rr.Hdr } func (rr *OPENPGPKEY) Header() *RR_Header { return &rr.Hdr } func (rr *OPT) Header() *RR_Header { return &rr.Hdr } func (rr *PTR) Header() *RR_Header { return &rr.Hdr } @@ -236,144 +238,144 @@ func (rr *URI) Header() *RR_Header { return &rr.Hdr } func (rr *X25) Header() *RR_Header { return &rr.Hdr } // len() functions -func (rr *A) len() int { - l := rr.Hdr.len() +func (rr *A) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) l += net.IPv4len // A return l } -func (rr *AAAA) len() int { - l := rr.Hdr.len() +func (rr *AAAA) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) l += net.IPv6len // AAAA return l } -func (rr *AFSDB) len() int { - l := rr.Hdr.len() +func (rr *AFSDB) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) l += 2 // Subtype - l += len(rr.Hostname) + 1 + l += domainNameLen(rr.Hostname, off+l, compression, false) return l } -func (rr *ANY) len() int { - l := rr.Hdr.len() +func (rr *ANY) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) return l } -func (rr *AVC) len() int { - l := rr.Hdr.len() +func (rr *AVC) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) for _, x := range rr.Txt { l += len(x) + 1 } return l } -func (rr *CAA) len() int { - l := rr.Hdr.len() +func (rr *CAA) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) l++ // Flag l += len(rr.Tag) + 1 l += len(rr.Value) return l } -func (rr *CERT) len() int { - l := rr.Hdr.len() +func (rr *CERT) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) l += 2 // Type l += 2 // KeyTag l++ // Algorithm l += base64.StdEncoding.DecodedLen(len(rr.Certificate)) return l } -func (rr *CNAME) len() int { - l := rr.Hdr.len() - l += len(rr.Target) + 1 +func (rr *CNAME) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += domainNameLen(rr.Target, off+l, compression, true) return l } -func (rr *DHCID) len() int { - l := rr.Hdr.len() +func (rr *DHCID) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) l += base64.StdEncoding.DecodedLen(len(rr.Digest)) return l } -func (rr *DNAME) len() int { - l := rr.Hdr.len() - l += len(rr.Target) + 1 +func (rr *DNAME) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += domainNameLen(rr.Target, off+l, compression, false) return l } -func (rr *DNSKEY) len() int { - l := rr.Hdr.len() +func (rr *DNSKEY) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) l += 2 // Flags l++ // Protocol l++ // Algorithm l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) return l } -func (rr *DS) len() int { - l := rr.Hdr.len() +func (rr *DS) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) l += 2 // KeyTag l++ // Algorithm l++ // DigestType l += len(rr.Digest)/2 + 1 return l } -func (rr *EID) len() int { - l := rr.Hdr.len() +func (rr *EID) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) l += len(rr.Endpoint)/2 + 1 return l } -func (rr *EUI48) len() int { - l := rr.Hdr.len() +func (rr *EUI48) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) l += 6 // Address return l } -func (rr *EUI64) len() int { - l := rr.Hdr.len() +func (rr *EUI64) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) l += 8 // Address return l } -func (rr *GID) len() int { - l := rr.Hdr.len() +func (rr *GID) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) l += 4 // Gid return l } -func (rr *GPOS) len() int { - l := rr.Hdr.len() +func (rr *GPOS) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) l += len(rr.Longitude) + 1 l += len(rr.Latitude) + 1 l += len(rr.Altitude) + 1 return l } -func (rr *HINFO) len() int { - l := rr.Hdr.len() +func (rr *HINFO) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) l += len(rr.Cpu) + 1 l += len(rr.Os) + 1 return l } -func (rr *HIP) len() int { - l := rr.Hdr.len() +func (rr *HIP) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) l++ // HitLength l++ // PublicKeyAlgorithm l += 2 // PublicKeyLength l += len(rr.Hit) / 2 l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) for _, x := range rr.RendezvousServers { - l += len(x) + 1 + l += domainNameLen(x, off+l, compression, false) } return l } -func (rr *KX) len() int { - l := rr.Hdr.len() +func (rr *KX) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) l += 2 // Preference - l += len(rr.Exchanger) + 1 + l += domainNameLen(rr.Exchanger, off+l, compression, false) return l } -func (rr *L32) len() int { - l := rr.Hdr.len() +func (rr *L32) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) l += 2 // Preference l += net.IPv4len // Locator32 return l } -func (rr *L64) len() int { - l := rr.Hdr.len() +func (rr *L64) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) l += 2 // Preference l += 8 // Locator64 return l } -func (rr *LOC) len() int { - l := rr.Hdr.len() +func (rr *LOC) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) l++ // Version l++ // Size l++ // HorizPre @@ -383,89 +385,89 @@ func (rr *LOC) len() int { l += 4 // Altitude return l } -func (rr *LP) len() int { - l := rr.Hdr.len() +func (rr *LP) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) l += 2 // Preference - l += len(rr.Fqdn) + 1 + l += domainNameLen(rr.Fqdn, off+l, compression, false) return l } -func (rr *MB) len() int { - l := rr.Hdr.len() - l += len(rr.Mb) + 1 +func (rr *MB) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += domainNameLen(rr.Mb, off+l, compression, true) return l } -func (rr *MD) len() int { - l := rr.Hdr.len() - l += len(rr.Md) + 1 +func (rr *MD) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += domainNameLen(rr.Md, off+l, compression, true) return l } -func (rr *MF) len() int { - l := rr.Hdr.len() - l += len(rr.Mf) + 1 +func (rr *MF) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += domainNameLen(rr.Mf, off+l, compression, true) return l } -func (rr *MG) len() int { - l := rr.Hdr.len() - l += len(rr.Mg) + 1 +func (rr *MG) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += domainNameLen(rr.Mg, off+l, compression, true) return l } -func (rr *MINFO) len() int { - l := rr.Hdr.len() - l += len(rr.Rmail) + 1 - l += len(rr.Email) + 1 +func (rr *MINFO) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += domainNameLen(rr.Rmail, off+l, compression, true) + l += domainNameLen(rr.Email, off+l, compression, true) return l } -func (rr *MR) len() int { - l := rr.Hdr.len() - l += len(rr.Mr) + 1 +func (rr *MR) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += domainNameLen(rr.Mr, off+l, compression, true) return l } -func (rr *MX) len() int { - l := rr.Hdr.len() +func (rr *MX) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) l += 2 // Preference - l += len(rr.Mx) + 1 + l += domainNameLen(rr.Mx, off+l, compression, true) return l } -func (rr *NAPTR) len() int { - l := rr.Hdr.len() +func (rr *NAPTR) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) l += 2 // Order l += 2 // Preference l += len(rr.Flags) + 1 l += len(rr.Service) + 1 l += len(rr.Regexp) + 1 - l += len(rr.Replacement) + 1 + l += domainNameLen(rr.Replacement, off+l, compression, false) return l } -func (rr *NID) len() int { - l := rr.Hdr.len() +func (rr *NID) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) l += 2 // Preference l += 8 // NodeID return l } -func (rr *NIMLOC) len() int { - l := rr.Hdr.len() +func (rr *NIMLOC) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) l += len(rr.Locator)/2 + 1 return l } -func (rr *NINFO) len() int { - l := rr.Hdr.len() +func (rr *NINFO) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) for _, x := range rr.ZSData { l += len(x) + 1 } return l } -func (rr *NS) len() int { - l := rr.Hdr.len() - l += len(rr.Ns) + 1 +func (rr *NS) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += domainNameLen(rr.Ns, off+l, compression, true) return l } -func (rr *NSAPPTR) len() int { - l := rr.Hdr.len() - l += len(rr.Ptr) + 1 +func (rr *NSAPPTR) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += domainNameLen(rr.Ptr, off+l, compression, false) return l } -func (rr *NSEC3PARAM) len() int { - l := rr.Hdr.len() +func (rr *NSEC3PARAM) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) l++ // Hash l++ // Flags l += 2 // Iterations @@ -473,44 +475,49 @@ func (rr *NSEC3PARAM) len() int { l += len(rr.Salt) / 2 return l } -func (rr *OPENPGPKEY) len() int { - l := rr.Hdr.len() +func (rr *NULL) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += len(rr.Data) + return l +} +func (rr *OPENPGPKEY) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) return l } -func (rr *PTR) len() int { - l := rr.Hdr.len() - l += len(rr.Ptr) + 1 +func (rr *PTR) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += domainNameLen(rr.Ptr, off+l, compression, true) return l } -func (rr *PX) len() int { - l := rr.Hdr.len() +func (rr *PX) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) l += 2 // Preference - l += len(rr.Map822) + 1 - l += len(rr.Mapx400) + 1 + l += domainNameLen(rr.Map822, off+l, compression, false) + l += domainNameLen(rr.Mapx400, off+l, compression, false) return l } -func (rr *RFC3597) len() int { - l := rr.Hdr.len() +func (rr *RFC3597) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) l += len(rr.Rdata)/2 + 1 return l } -func (rr *RKEY) len() int { - l := rr.Hdr.len() +func (rr *RKEY) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) l += 2 // Flags l++ // Protocol l++ // Algorithm l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) return l } -func (rr *RP) len() int { - l := rr.Hdr.len() - l += len(rr.Mbox) + 1 - l += len(rr.Txt) + 1 +func (rr *RP) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += domainNameLen(rr.Mbox, off+l, compression, false) + l += domainNameLen(rr.Txt, off+l, compression, false) return l } -func (rr *RRSIG) len() int { - l := rr.Hdr.len() +func (rr *RRSIG) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) l += 2 // TypeCovered l++ // Algorithm l++ // Labels @@ -518,28 +525,28 @@ func (rr *RRSIG) len() int { l += 4 // Expiration l += 4 // Inception l += 2 // KeyTag - l += len(rr.SignerName) + 1 + l += domainNameLen(rr.SignerName, off+l, compression, false) l += base64.StdEncoding.DecodedLen(len(rr.Signature)) return l } -func (rr *RT) len() int { - l := rr.Hdr.len() +func (rr *RT) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) l += 2 // Preference - l += len(rr.Host) + 1 + l += domainNameLen(rr.Host, off+l, compression, false) return l } -func (rr *SMIMEA) len() int { - l := rr.Hdr.len() +func (rr *SMIMEA) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) l++ // Usage l++ // Selector l++ // MatchingType l += len(rr.Certificate)/2 + 1 return l } -func (rr *SOA) len() int { - l := rr.Hdr.len() - l += len(rr.Ns) + 1 - l += len(rr.Mbox) + 1 +func (rr *SOA) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += domainNameLen(rr.Ns, off+l, compression, true) + l += domainNameLen(rr.Mbox, off+l, compression, true) l += 4 // Serial l += 4 // Refresh l += 4 // Retry @@ -547,45 +554,45 @@ func (rr *SOA) len() int { l += 4 // Minttl return l } -func (rr *SPF) len() int { - l := rr.Hdr.len() +func (rr *SPF) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) for _, x := range rr.Txt { l += len(x) + 1 } return l } -func (rr *SRV) len() int { - l := rr.Hdr.len() +func (rr *SRV) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) l += 2 // Priority l += 2 // Weight l += 2 // Port - l += len(rr.Target) + 1 + l += domainNameLen(rr.Target, off+l, compression, false) return l } -func (rr *SSHFP) len() int { - l := rr.Hdr.len() +func (rr *SSHFP) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) l++ // Algorithm l++ // Type l += len(rr.FingerPrint)/2 + 1 return l } -func (rr *TA) len() int { - l := rr.Hdr.len() +func (rr *TA) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) l += 2 // KeyTag l++ // Algorithm l++ // DigestType l += len(rr.Digest)/2 + 1 return l } -func (rr *TALINK) len() int { - l := rr.Hdr.len() - l += len(rr.PreviousName) + 1 - l += len(rr.NextName) + 1 +func (rr *TALINK) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += domainNameLen(rr.PreviousName, off+l, compression, false) + l += domainNameLen(rr.NextName, off+l, compression, false) return l } -func (rr *TKEY) len() int { - l := rr.Hdr.len() - l += len(rr.Algorithm) + 1 +func (rr *TKEY) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += domainNameLen(rr.Algorithm, off+l, compression, false) l += 4 // Inception l += 4 // Expiration l += 2 // Mode @@ -596,17 +603,17 @@ func (rr *TKEY) len() int { l += len(rr.OtherData) / 2 return l } -func (rr *TLSA) len() int { - l := rr.Hdr.len() +func (rr *TLSA) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) l++ // Usage l++ // Selector l++ // MatchingType l += len(rr.Certificate)/2 + 1 return l } -func (rr *TSIG) len() int { - l := rr.Hdr.len() - l += len(rr.Algorithm) + 1 +func (rr *TSIG) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += domainNameLen(rr.Algorithm, off+l, compression, false) l += 6 // TimeSigned l += 2 // Fudge l += 2 // MACSize @@ -617,32 +624,32 @@ func (rr *TSIG) len() int { l += len(rr.OtherData) / 2 return l } -func (rr *TXT) len() int { - l := rr.Hdr.len() +func (rr *TXT) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) for _, x := range rr.Txt { l += len(x) + 1 } return l } -func (rr *UID) len() int { - l := rr.Hdr.len() +func (rr *UID) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) l += 4 // Uid return l } -func (rr *UINFO) len() int { - l := rr.Hdr.len() +func (rr *UINFO) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) l += len(rr.Uinfo) + 1 return l } -func (rr *URI) len() int { - l := rr.Hdr.len() +func (rr *URI) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) l += 2 // Priority l += 2 // Weight l += len(rr.Target) return l } -func (rr *X25) len() int { - l := rr.Hdr.len() +func (rr *X25) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) l += len(rr.PSDNAddress) + 1 return l } @@ -783,12 +790,17 @@ func (rr *NSEC3) copy() RR { func (rr *NSEC3PARAM) copy() RR { return &NSEC3PARAM{rr.Hdr, rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt} } +func (rr *NULL) copy() RR { + return &NULL{rr.Hdr, rr.Data} +} func (rr *OPENPGPKEY) copy() RR { return &OPENPGPKEY{rr.Hdr, rr.PublicKey} } func (rr *OPT) copy() RR { Option := make([]EDNS0, len(rr.Option)) - copy(Option, rr.Option) + for i, e := range rr.Option { + Option[i] = e.copy() + } return &OPT{rr.Hdr, Option} } func (rr *PTR) copy() RR { diff --git a/vendor/github.com/opencontainers/go-digest/.mailmap b/vendor/github.com/opencontainers/go-digest/.mailmap new file mode 100644 index 0000000000000..ba611cb21ca58 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/.mailmap @@ -0,0 +1 @@ +Stephen J Day diff --git a/vendor/github.com/opencontainers/go-digest/MAINTAINERS b/vendor/github.com/opencontainers/go-digest/MAINTAINERS index f5bd5a6ed7573..42a29795d7a00 100644 --- a/vendor/github.com/opencontainers/go-digest/MAINTAINERS +++ b/vendor/github.com/opencontainers/go-digest/MAINTAINERS @@ -1,5 +1,7 @@ +Aaron Lehmann (@aaronlehmann) Brandon Philips (@philips) Brendan Burns (@brendandburns) +Derek McGowan (@dmcgowan) Jason Bouzane (@jbouzane) John Starks (@jstarks) Jonathan Boulle (@jonboulle) diff --git a/vendor/github.com/opencontainers/go-digest/README.md b/vendor/github.com/opencontainers/go-digest/README.md index 9d6174cfdcc5f..0f5a04092c42b 100644 --- a/vendor/github.com/opencontainers/go-digest/README.md +++ b/vendor/github.com/opencontainers/go-digest/README.md @@ -1,10 +1,10 @@ # go-digest -[![GoDoc](https://godoc.org/github.com/docker/go-digest?status.svg)](https://godoc.org/github.com/docker/go-digest) [![Go Report Card](https://goreportcard.com/badge/github.com/docker/go-digest)](https://goreportcard.com/report/github.com/docker/go-digest) [![Build Status](https://travis-ci.org/docker/go-digest.svg?branch=master)](https://travis-ci.org/docker/go-digest) +[![GoDoc](https://godoc.org/github.com/opencontainers/go-digest?status.svg)](https://godoc.org/github.com/opencontainers/go-digest) [![Go Report Card](https://goreportcard.com/badge/github.com/opencontainers/go-digest)](https://goreportcard.com/report/github.com/opencontainers/go-digest) [![Build Status](https://travis-ci.org/opencontainers/go-digest.svg?branch=master)](https://travis-ci.org/opencontainers/go-digest) Common digest package used across the container ecosystem. -Please see the [godoc](https://godoc.org/github.com/docker/go-digest) for more information. +Please see the [godoc](https://godoc.org/github.com/opencontainers/go-digest) for more information. # What is a digest? @@ -49,7 +49,7 @@ can power a rich, safe, content distribution system. # Usage -While the [godoc](https://godoc.org/github.com/docker/go-digest) is +While the [godoc](https://godoc.org/github.com/opencontainers/go-digest) is considered the best resource, a few important items need to be called out when using this package. @@ -76,7 +76,7 @@ out when using this package. The Go API, at this stage, is considered stable, unless otherwise noted. -As always, before using a package export, read the [godoc](https://godoc.org/github.com/docker/go-digest). +As always, before using a package export, read the [godoc](https://godoc.org/github.com/opencontainers/go-digest). # Contributing @@ -88,16 +88,16 @@ the alternatives you tried before submitting a PR. # Reporting security issues -The maintainers take security seriously. If you discover a security -issue, please bring it to their attention right away! +Please DO NOT file a public issue, instead send your report privately to +security@opencontainers.org. -Please DO NOT file a public issue, instead send your report privately -to security@docker.com. +The maintainers take security seriously. If you discover a security issue, +please bring it to their attention right away! -Security reports are greatly appreciated and we will publicly thank you -for it. We also like to send gifts—if you're into Docker schwag, make -sure to let us know. We currently do not offer a paid security bounty -program, but are not ruling it out in the future. +If you are reporting a security issue, do not create an issue or file a pull +request on GitHub. Instead, disclose the issue responsibly by sending an email +to security@opencontainers.org (which is inhabited only by the maintainers of +the various OCI projects). # Copyright and license diff --git a/vendor/github.com/opencontainers/go-digest/algorithm.go b/vendor/github.com/opencontainers/go-digest/algorithm.go index a3c44801d5e6f..8813bd26f1fbd 100644 --- a/vendor/github.com/opencontainers/go-digest/algorithm.go +++ b/vendor/github.com/opencontainers/go-digest/algorithm.go @@ -1,3 +1,17 @@ +// Copyright 2017 Docker, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package digest import ( @@ -5,6 +19,7 @@ import ( "fmt" "hash" "io" + "regexp" ) // Algorithm identifies and implementation of a digester by an identifier. @@ -14,9 +29,9 @@ type Algorithm string // supported digest types const ( - SHA256 Algorithm = "sha256" // sha256 with hex encoding - SHA384 Algorithm = "sha384" // sha384 with hex encoding - SHA512 Algorithm = "sha512" // sha512 with hex encoding + SHA256 Algorithm = "sha256" // sha256 with hex encoding (lower case only) + SHA384 Algorithm = "sha384" // sha384 with hex encoding (lower case only) + SHA512 Algorithm = "sha512" // sha512 with hex encoding (lower case only) // Canonical is the primary digest algorithm used with the distribution // project. Other digests may be used but this one is the primary storage @@ -36,6 +51,14 @@ var ( SHA384: crypto.SHA384, SHA512: crypto.SHA512, } + + // anchoredEncodedRegexps contains anchored regular expressions for hex-encoded digests. + // Note that /A-F/ disallowed. + anchoredEncodedRegexps = map[Algorithm]*regexp.Regexp{ + SHA256: regexp.MustCompile(`^[a-f0-9]{64}$`), + SHA384: regexp.MustCompile(`^[a-f0-9]{96}$`), + SHA512: regexp.MustCompile(`^[a-f0-9]{128}$`), + } ) // Available returns true if the digest type is available for use. If this @@ -111,6 +134,14 @@ func (a Algorithm) Hash() hash.Hash { return algorithms[a].New() } +// Encode encodes the raw bytes of a digest, typically from a hash.Hash, into +// the encoded portion of the digest. +func (a Algorithm) Encode(d []byte) string { + // TODO(stevvooe): Currently, all algorithms use a hex encoding. When we + // add support for back registration, we can modify this accordingly. + return fmt.Sprintf("%x", d) +} + // FromReader returns the digest of the reader using the algorithm. func (a Algorithm) FromReader(rd io.Reader) (Digest, error) { digester := a.Digester() @@ -142,3 +173,20 @@ func (a Algorithm) FromBytes(p []byte) Digest { func (a Algorithm) FromString(s string) Digest { return a.FromBytes([]byte(s)) } + +// Validate validates the encoded portion string +func (a Algorithm) Validate(encoded string) error { + r, ok := anchoredEncodedRegexps[a] + if !ok { + return ErrDigestUnsupported + } + // Digests much always be hex-encoded, ensuring that their hex portion will + // always be size*2 + if a.Size()*2 != len(encoded) { + return ErrDigestInvalidLength + } + if r.MatchString(encoded) { + return nil + } + return ErrDigestInvalidFormat +} diff --git a/vendor/github.com/opencontainers/go-digest/digest.go b/vendor/github.com/opencontainers/go-digest/digest.go index 7c66c30c017a6..ad398cba2fb71 100644 --- a/vendor/github.com/opencontainers/go-digest/digest.go +++ b/vendor/github.com/opencontainers/go-digest/digest.go @@ -1,3 +1,17 @@ +// Copyright 2017 Docker, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package digest import ( @@ -31,16 +45,21 @@ func NewDigest(alg Algorithm, h hash.Hash) Digest { // functions. This is also useful for rebuilding digests from binary // serializations. func NewDigestFromBytes(alg Algorithm, p []byte) Digest { - return Digest(fmt.Sprintf("%s:%x", alg, p)) + return NewDigestFromEncoded(alg, alg.Encode(p)) } -// NewDigestFromHex returns a Digest from alg and a the hex encoded digest. +// NewDigestFromHex is deprecated. Please use NewDigestFromEncoded. func NewDigestFromHex(alg, hex string) Digest { - return Digest(fmt.Sprintf("%s:%s", alg, hex)) + return NewDigestFromEncoded(Algorithm(alg), hex) +} + +// NewDigestFromEncoded returns a Digest from alg and the encoded digest. +func NewDigestFromEncoded(alg Algorithm, encoded string) Digest { + return Digest(fmt.Sprintf("%s:%s", alg, encoded)) } // DigestRegexp matches valid digest types. -var DigestRegexp = regexp.MustCompile(`[a-zA-Z0-9-_+.]+:[a-fA-F0-9]+`) +var DigestRegexp = regexp.MustCompile(`[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+`) // DigestRegexpAnchored matches valid digest types, anchored to the start and end of the match. var DigestRegexpAnchored = regexp.MustCompile(`^` + DigestRegexp.String() + `$`) @@ -82,26 +101,18 @@ func FromString(s string) Digest { // error if not. func (d Digest) Validate() error { s := string(d) - i := strings.Index(s, ":") - - // validate i then run through regexp - if i < 0 || i+1 == len(s) || !DigestRegexpAnchored.MatchString(s) { + if i <= 0 || i+1 == len(s) { return ErrDigestInvalidFormat } - - algorithm := Algorithm(s[:i]) + algorithm, encoded := Algorithm(s[:i]), s[i+1:] if !algorithm.Available() { + if !DigestRegexpAnchored.MatchString(s) { + return ErrDigestInvalidFormat + } return ErrDigestUnsupported } - - // Digests much always be hex-encoded, ensuring that their hex portion will - // always be size*2 - if algorithm.Size()*2 != len(s[i+1:]) { - return ErrDigestInvalidLength - } - - return nil + return algorithm.Validate(encoded) } // Algorithm returns the algorithm portion of the digest. This will panic if @@ -119,12 +130,17 @@ func (d Digest) Verifier() Verifier { } } -// Hex returns the hex digest portion of the digest. This will panic if the +// Encoded returns the encoded portion of the digest. This will panic if the // underlying digest is not in a valid format. -func (d Digest) Hex() string { +func (d Digest) Encoded() string { return string(d[d.sepIndex()+1:]) } +// Hex is deprecated. Please use Digest.Encoded. +func (d Digest) Hex() string { + return d.Encoded() +} + func (d Digest) String() string { return string(d) } diff --git a/vendor/github.com/opencontainers/go-digest/digester.go b/vendor/github.com/opencontainers/go-digest/digester.go index 918a3f9191eb5..36fa2728ef462 100644 --- a/vendor/github.com/opencontainers/go-digest/digester.go +++ b/vendor/github.com/opencontainers/go-digest/digester.go @@ -1,3 +1,17 @@ +// Copyright 2017 Docker, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package digest import "hash" diff --git a/vendor/github.com/opencontainers/go-digest/doc.go b/vendor/github.com/opencontainers/go-digest/doc.go index f64b0db32b4cf..491ea1ef1f81f 100644 --- a/vendor/github.com/opencontainers/go-digest/doc.go +++ b/vendor/github.com/opencontainers/go-digest/doc.go @@ -1,3 +1,17 @@ +// Copyright 2017 Docker, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // Package digest provides a generalized type to opaquely represent message // digests and their operations within the registry. The Digest type is // designed to serve as a flexible identifier in a content-addressable system. diff --git a/vendor/github.com/opencontainers/go-digest/verifiers.go b/vendor/github.com/opencontainers/go-digest/verifiers.go index f1db6cda8427d..32125e9187857 100644 --- a/vendor/github.com/opencontainers/go-digest/verifiers.go +++ b/vendor/github.com/opencontainers/go-digest/verifiers.go @@ -1,3 +1,17 @@ +// Copyright 2017 Docker, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package digest import ( diff --git a/vendor/github.com/prometheus/client_golang/prometheus/BUILD.bazel b/vendor/github.com/prometheus/client_golang/prometheus/BUILD.bazel index 66ba28d66856f..6beb7fc09ba89 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/BUILD.bazel +++ b/vendor/github.com/prometheus/client_golang/prometheus/BUILD.bazel @@ -3,6 +3,8 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = [ + "build_info.go", + "build_info_pre_1.12.go", "collector.go", "counter.go", "desc.go", @@ -12,11 +14,12 @@ go_library( "gauge.go", "go_collector.go", "histogram.go", - "http.go", "labels.go", "metric.go", "observer.go", "process_collector.go", + "process_collector_other.go", + "process_collector_windows.go", "registry.go", "summary.go", "timer.go", @@ -35,6 +38,43 @@ go_library( "//vendor/github.com/prometheus/client_model/go:go_default_library", "//vendor/github.com/prometheus/common/expfmt:go_default_library", "//vendor/github.com/prometheus/common/model:go_default_library", - "//vendor/github.com/prometheus/procfs:go_default_library", - ], + ] + select({ + "@io_bazel_rules_go//go/platform:android": [ + "//vendor/github.com/prometheus/procfs:go_default_library", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "//vendor/github.com/prometheus/procfs:go_default_library", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "//vendor/github.com/prometheus/procfs:go_default_library", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "//vendor/github.com/prometheus/procfs:go_default_library", + ], + "@io_bazel_rules_go//go/platform:ios": [ + "//vendor/github.com/prometheus/procfs:go_default_library", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "//vendor/github.com/prometheus/procfs:go_default_library", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "//vendor/github.com/prometheus/procfs:go_default_library", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "//vendor/github.com/prometheus/procfs:go_default_library", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "//vendor/github.com/prometheus/procfs:go_default_library", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "//vendor/github.com/prometheus/procfs:go_default_library", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "//vendor/github.com/prometheus/procfs:go_default_library", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "//vendor/golang.org/x/sys/windows:go_default_library", + ], + "//conditions:default": [], + }), ) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/build_info.go b/vendor/github.com/prometheus/client_golang/prometheus/build_info.go new file mode 100644 index 0000000000000..288f0e854885c --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/build_info.go @@ -0,0 +1,29 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.12 + +package prometheus + +import "runtime/debug" + +// readBuildInfo is a wrapper around debug.ReadBuildInfo for Go 1.12+. +func readBuildInfo() (path, version, sum string) { + path, version, sum = "unknown", "unknown", "unknown" + if bi, ok := debug.ReadBuildInfo(); ok { + path = bi.Main.Path + version = bi.Main.Version + sum = bi.Main.Sum + } + return +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go b/vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go new file mode 100644 index 0000000000000..6609e2877c0b0 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go @@ -0,0 +1,22 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.12 + +package prometheus + +// readBuildInfo is a wrapper around debug.ReadBuildInfo for Go versions before +// 1.12. Remove this whole file once the minimum supported Go version is 1.12. +func readBuildInfo() (path, version, sum string) { + return "unknown", "unknown", "unknown" +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go index c0d70b2faf169..1e839650d4d93 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/collector.go @@ -79,7 +79,7 @@ type Collector interface { // of the Describe method. If a Collector sometimes collects no metrics at all // (for example vectors like CounterVec, GaugeVec, etc., which only collect // metrics after a metric with a fully specified label set has been accessed), -// it might even get registered as an unchecked Collecter (cf. the Register +// it might even get registered as an unchecked Collector (cf. the Register // method of the Registerer interface). Hence, only use this shortcut // implementation of Describe if you are certain to fulfill the contract. // diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go index 5d9525defc8df..01977de66140c 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/doc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go @@ -122,13 +122,13 @@ // the Collect method. The Describe method has to return separate Desc // instances, representative of the “throw-away” metrics to be created later. // NewDesc comes in handy to create those Desc instances. Alternatively, you -// could return no Desc at all, which will marke the Collector “unchecked”. No -// checks are porformed at registration time, but metric consistency will still +// could return no Desc at all, which will mark the Collector “unchecked”. No +// checks are performed at registration time, but metric consistency will still // be ensured at scrape time, i.e. any inconsistencies will lead to scrape // errors. Thus, with unchecked Collectors, the responsibility to not collect // metrics that lead to inconsistencies in the total scrape result lies with the // implementer of the Collector. While this is not a desirable state, it is -// sometimes necessary. The typical use case is a situatios where the exact +// sometimes necessary. The typical use case is a situation where the exact // metrics to be returned by a Collector cannot be predicted at registration // time, but the implementer has sufficient knowledge of the whole system to // guarantee metric consistency. @@ -183,7 +183,6 @@ // method can then expose the gathered metrics in some way. Usually, the metrics // are served via HTTP on the /metrics endpoint. That's happening in the example // above. The tools to expose metrics via HTTP are in the promhttp sub-package. -// (The top-level functions in the prometheus package are deprecated.) // // Pushing to the Pushgateway // diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go index ba3b9333edde0..dc9247fed976c 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go @@ -14,9 +14,9 @@ package prometheus import ( - "fmt" "runtime" "runtime/debug" + "sync" "time" ) @@ -26,16 +26,41 @@ type goCollector struct { gcDesc *Desc goInfoDesc *Desc - // metrics to describe and collect - metrics memStatsMetrics + // ms... are memstats related. + msLast *runtime.MemStats // Previously collected memstats. + msLastTimestamp time.Time + msMtx sync.Mutex // Protects msLast and msLastTimestamp. + msMetrics memStatsMetrics + msRead func(*runtime.MemStats) // For mocking in tests. + msMaxWait time.Duration // Wait time for fresh memstats. + msMaxAge time.Duration // Maximum allowed age of old memstats. } -// NewGoCollector returns a collector which exports metrics about the current Go +// NewGoCollector returns a collector that exports metrics about the current Go // process. This includes memory stats. To collect those, runtime.ReadMemStats -// is called. This causes a stop-the-world, which is very short with Go1.9+ -// (~25µs). However, with older Go versions, the stop-the-world duration depends -// on the heap size and can be quite significant (~1.7 ms/GiB as per +// is called. This requires to “stop the world”, which usually only happens for +// garbage collection (GC). Take the following implications into account when +// deciding whether to use the Go collector: +// +// 1. The performance impact of stopping the world is the more relevant the more +// frequently metrics are collected. However, with Go1.9 or later the +// stop-the-world time per metrics collection is very short (~25µs) so that the +// performance impact will only matter in rare cases. However, with older Go +// versions, the stop-the-world duration depends on the heap size and can be +// quite significant (~1.7 ms/GiB as per // https://go-review.googlesource.com/c/go/+/34937). +// +// 2. During an ongoing GC, nothing else can stop the world. Therefore, if the +// metrics collection happens to coincide with GC, it will only complete after +// GC has finished. Usually, GC is fast enough to not cause problems. However, +// with a very large heap, GC might take multiple seconds, which is enough to +// cause scrape timeouts in common setups. To avoid this problem, the Go +// collector will use the memstats from a previous collection if +// runtime.ReadMemStats takes more than 1s. However, if there are no previously +// collected memstats, or their collection is more than 5m ago, the collection +// will block until runtime.ReadMemStats succeeds. (The problem might be solved +// in Go1.13, see https://github.com/golang/go/issues/19812 for the related Go +// issue.) func NewGoCollector() Collector { return &goCollector{ goroutinesDesc: NewDesc( @@ -54,7 +79,11 @@ func NewGoCollector() Collector { "go_info", "Information about the Go environment.", nil, Labels{"version": runtime.Version()}), - metrics: memStatsMetrics{ + msLast: &runtime.MemStats{}, + msRead: runtime.ReadMemStats, + msMaxWait: time.Second, + msMaxAge: 5 * time.Minute, + msMetrics: memStatsMetrics{ { desc: NewDesc( memstatNamespace("alloc_bytes"), @@ -253,7 +282,7 @@ func NewGoCollector() Collector { } func memstatNamespace(s string) string { - return fmt.Sprintf("go_memstats_%s", s) + return "go_memstats_" + s } // Describe returns all descriptions of the collector. @@ -262,13 +291,27 @@ func (c *goCollector) Describe(ch chan<- *Desc) { ch <- c.threadsDesc ch <- c.gcDesc ch <- c.goInfoDesc - for _, i := range c.metrics { + for _, i := range c.msMetrics { ch <- i.desc } } // Collect returns the current state of all metrics of the collector. func (c *goCollector) Collect(ch chan<- Metric) { + var ( + ms = &runtime.MemStats{} + done = make(chan struct{}) + ) + // Start reading memstats first as it might take a while. + go func() { + c.msRead(ms) + c.msMtx.Lock() + c.msLast = ms + c.msLastTimestamp = time.Now() + c.msMtx.Unlock() + close(done) + }() + ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine())) n, _ := runtime.ThreadCreateProfile(nil) ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n)) @@ -286,9 +329,31 @@ func (c *goCollector) Collect(ch chan<- Metric) { ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1) - ms := &runtime.MemStats{} - runtime.ReadMemStats(ms) - for _, i := range c.metrics { + timer := time.NewTimer(c.msMaxWait) + select { + case <-done: // Our own ReadMemStats succeeded in time. Use it. + timer.Stop() // Important for high collection frequencies to not pile up timers. + c.msCollect(ch, ms) + return + case <-timer.C: // Time out, use last memstats if possible. Continue below. + } + c.msMtx.Lock() + if time.Since(c.msLastTimestamp) < c.msMaxAge { + // Last memstats are recent enough. Collect from them under the lock. + c.msCollect(ch, c.msLast) + c.msMtx.Unlock() + return + } + // If we are here, the last memstats are too old or don't exist. We have + // to wait until our own ReadMemStats finally completes. For that to + // happen, we have to release the lock. + c.msMtx.Unlock() + <-done + c.msCollect(ch, ms) +} + +func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) { + for _, i := range c.msMetrics { ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms)) } } @@ -299,3 +364,33 @@ type memStatsMetrics []struct { eval func(*runtime.MemStats) float64 valType ValueType } + +// NewBuildInfoCollector returns a collector collecting a single metric +// "go_build_info" with the constant value 1 and three labels "path", "version", +// and "checksum". Their label values contain the main module path, version, and +// checksum, respectively. The labels will only have meaningful values if the +// binary is built with Go module support and from source code retrieved from +// the source repository (rather than the local file system). This is usually +// accomplished by building from outside of GOPATH, specifying the full address +// of the main package, e.g. "GO111MODULE=on go run +// github.com/prometheus/client_golang/examples/random". If built without Go +// module support, all label values will be "unknown". If built with Go module +// support but using the source code from the local file system, the "path" will +// be set appropriately, but "checksum" will be empty and "version" will be +// "(devel)". +// +// This collector uses only the build information for the main module. See +// https://github.com/povilasv/prommod for an example of a collector for the +// module dependencies. +func NewBuildInfoCollector() Collector { + path, version, sum := readBuildInfo() + c := &selfCollector{MustNewConstMetric( + NewDesc( + "go_build_info", + "Build information about the main Go module.", + nil, Labels{"path": path, "version": version, "checksum": sum}, + ), + GaugeValue, 1)} + c.init(c.self) + return c +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index f88da707bc840..d7ea67bd2bafd 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -204,8 +204,8 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr } } } - // Finally we know the final length of h.upperBounds and can make counts - // for both states: + // Finally we know the final length of h.upperBounds and can make buckets + // for both counts: h.counts[0].buckets = make([]uint64, len(h.upperBounds)) h.counts[1].buckets = make([]uint64, len(h.upperBounds)) @@ -224,18 +224,21 @@ type histogramCounts struct { } type histogram struct { - // countAndHotIdx is a complicated one. For lock-free yet atomic - // observations, we need to save the total count of observations again, - // combined with the index of the currently-hot counts struct, so that - // we can perform the operation on both values atomically. The least - // significant bit defines the hot counts struct. The remaining 63 bits - // represent the total count of observations. This happens under the - // assumption that the 63bit count will never overflow. Rationale: An - // observations takes about 30ns. Let's assume it could happen in - // 10ns. Overflowing the counter will then take at least (2^63)*10ns, - // which is about 3000 years. + // countAndHotIdx enables lock-free writes with use of atomic updates. + // The most significant bit is the hot index [0 or 1] of the count field + // below. Observe calls update the hot one. All remaining bits count the + // number of Observe calls. Observe starts by incrementing this counter, + // and finish by incrementing the count field in the respective + // histogramCounts, as a marker for completion. // - // This has to be first in the struct for 64bit alignment. See + // Calls of the Write method (which are non-mutating reads from the + // perspective of the histogram) swap the hot–cold under the writeMtx + // lock. A cooldown is awaited (while locked) by comparing the number of + // observations with the initiation count. Once they match, then the + // last observation on the now cool one has completed. All cool fields must + // be merged into the new hot before releasing writeMtx. + // + // Fields with atomic access first! See alignment constraint: // http://golang.org/pkg/sync/atomic/#pkg-note-BUG countAndHotIdx uint64 @@ -243,16 +246,14 @@ type histogram struct { desc *Desc writeMtx sync.Mutex // Only used in the Write method. - upperBounds []float64 - // Two counts, one is "hot" for lock-free observations, the other is // "cold" for writing out a dto.Metric. It has to be an array of // pointers to guarantee 64bit alignment of the histogramCounts, see // http://golang.org/pkg/sync/atomic/#pkg-note-BUG. counts [2]*histogramCounts - hotIdx int // Index of currently-hot counts. Only used within Write. - labelPairs []*dto.LabelPair + upperBounds []float64 + labelPairs []*dto.LabelPair } func (h *histogram) Desc() *Desc { @@ -271,11 +272,11 @@ func (h *histogram) Observe(v float64) { // 300 buckets: 154 ns/op linear - binary 61.6 ns/op i := sort.SearchFloat64s(h.upperBounds, v) - // We increment h.countAndHotIdx by 2 so that the counter in the upper - // 63 bits gets incremented by 1. At the same time, we get the new value + // We increment h.countAndHotIdx so that the counter in the lower + // 63 bits gets incremented. At the same time, we get the new value // back, which we can use to find the currently-hot counts. - n := atomic.AddUint64(&h.countAndHotIdx, 2) - hotCounts := h.counts[n%2] + n := atomic.AddUint64(&h.countAndHotIdx, 1) + hotCounts := h.counts[n>>63] if i < len(h.upperBounds) { atomic.AddUint64(&hotCounts.buckets[i], 1) @@ -293,72 +294,43 @@ func (h *histogram) Observe(v float64) { } func (h *histogram) Write(out *dto.Metric) error { - var ( - his = &dto.Histogram{} - buckets = make([]*dto.Bucket, len(h.upperBounds)) - hotCounts, coldCounts *histogramCounts - count uint64 - ) - - // For simplicity, we mutex the rest of this method. It is not in the - // hot path, i.e. Observe is called much more often than Write. The - // complication of making Write lock-free isn't worth it. + // For simplicity, we protect this whole method by a mutex. It is not in + // the hot path, i.e. Observe is called much more often than Write. The + // complication of making Write lock-free isn't worth it, if possible at + // all. h.writeMtx.Lock() defer h.writeMtx.Unlock() - // This is a bit arcane, which is why the following spells out this if - // clause in English: - // - // If the currently-hot counts struct is #0, we atomically increment - // h.countAndHotIdx by 1 so that from now on Observe will use the counts - // struct #1. Furthermore, the atomic increment gives us the new value, - // which, in its most significant 63 bits, tells us the count of - // observations done so far up to and including currently ongoing - // observations still using the counts struct just changed from hot to - // cold. To have a normal uint64 for the count, we bitshift by 1 and - // save the result in count. We also set h.hotIdx to 1 for the next - // Write call, and we will refer to counts #1 as hotCounts and to counts - // #0 as coldCounts. - // - // If the currently-hot counts struct is #1, we do the corresponding - // things the other way round. We have to _decrement_ h.countAndHotIdx - // (which is a bit arcane in itself, as we have to express -1 with an - // unsigned int...). - if h.hotIdx == 0 { - count = atomic.AddUint64(&h.countAndHotIdx, 1) >> 1 - h.hotIdx = 1 - hotCounts = h.counts[1] - coldCounts = h.counts[0] - } else { - count = atomic.AddUint64(&h.countAndHotIdx, ^uint64(0)) >> 1 // Decrement. - h.hotIdx = 0 - hotCounts = h.counts[0] - coldCounts = h.counts[1] - } - - // Now we have to wait for the now-declared-cold counts to actually cool - // down, i.e. wait for all observations still using it to finish. That's - // the case once the count in the cold counts struct is the same as the - // one atomically retrieved from the upper 63bits of h.countAndHotIdx. - for { - if count == atomic.LoadUint64(&coldCounts.count) { - break - } + // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0) + // without touching the count bits. See the struct comments for a full + // description of the algorithm. + n := atomic.AddUint64(&h.countAndHotIdx, 1<<63) + // count is contained unchanged in the lower 63 bits. + count := n & ((1 << 63) - 1) + // The most significant bit tells us which counts is hot. The complement + // is thus the cold one. + hotCounts := h.counts[n>>63] + coldCounts := h.counts[(^n)>>63] + + // Await cooldown. + for count != atomic.LoadUint64(&coldCounts.count) { runtime.Gosched() // Let observations get work done. } - his.SampleCount = proto.Uint64(count) - his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))) + his := &dto.Histogram{ + Bucket: make([]*dto.Bucket, len(h.upperBounds)), + SampleCount: proto.Uint64(count), + SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), + } var cumCount uint64 for i, upperBound := range h.upperBounds { cumCount += atomic.LoadUint64(&coldCounts.buckets[i]) - buckets[i] = &dto.Bucket{ + his.Bucket[i] = &dto.Bucket{ CumulativeCount: proto.Uint64(cumCount), UpperBound: proto.Float64(upperBound), } } - his.Bucket = buckets out.Histogram = his out.Label = h.labelPairs diff --git a/vendor/github.com/prometheus/client_golang/prometheus/http.go b/vendor/github.com/prometheus/client_golang/prometheus/http.go deleted file mode 100644 index 9f0875bfc811d..0000000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/http.go +++ /dev/null @@ -1,504 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "bufio" - "compress/gzip" - "io" - "net" - "net/http" - "strconv" - "strings" - "sync" - "time" - - "github.com/prometheus/common/expfmt" -) - -// TODO(beorn7): Remove this whole file. It is a partial mirror of -// promhttp/http.go (to avoid circular import chains) where everything HTTP -// related should live. The functions here are just for avoiding -// breakage. Everything is deprecated. - -const ( - contentTypeHeader = "Content-Type" - contentLengthHeader = "Content-Length" - contentEncodingHeader = "Content-Encoding" - acceptEncodingHeader = "Accept-Encoding" -) - -var gzipPool = sync.Pool{ - New: func() interface{} { - return gzip.NewWriter(nil) - }, -} - -// Handler returns an HTTP handler for the DefaultGatherer. It is -// already instrumented with InstrumentHandler (using "prometheus" as handler -// name). -// -// Deprecated: Please note the issues described in the doc comment of -// InstrumentHandler. You might want to consider using promhttp.Handler instead. -func Handler() http.Handler { - return InstrumentHandler("prometheus", UninstrumentedHandler()) -} - -// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer. -// -// Deprecated: Use promhttp.HandlerFor(DefaultGatherer, promhttp.HandlerOpts{}) -// instead. See there for further documentation. -func UninstrumentedHandler() http.Handler { - return http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) { - mfs, err := DefaultGatherer.Gather() - if err != nil { - httpError(rsp, err) - return - } - - contentType := expfmt.Negotiate(req.Header) - header := rsp.Header() - header.Set(contentTypeHeader, string(contentType)) - - w := io.Writer(rsp) - if gzipAccepted(req.Header) { - header.Set(contentEncodingHeader, "gzip") - gz := gzipPool.Get().(*gzip.Writer) - defer gzipPool.Put(gz) - - gz.Reset(w) - defer gz.Close() - - w = gz - } - - enc := expfmt.NewEncoder(w, contentType) - - for _, mf := range mfs { - if err := enc.Encode(mf); err != nil { - httpError(rsp, err) - return - } - } - }) -} - -var instLabels = []string{"method", "code"} - -type nower interface { - Now() time.Time -} - -type nowFunc func() time.Time - -func (n nowFunc) Now() time.Time { - return n() -} - -var now nower = nowFunc(func() time.Time { - return time.Now() -}) - -// InstrumentHandler wraps the given HTTP handler for instrumentation. It -// registers four metric collectors (if not already done) and reports HTTP -// metrics to the (newly or already) registered collectors: http_requests_total -// (CounterVec), http_request_duration_microseconds (Summary), -// http_request_size_bytes (Summary), http_response_size_bytes (Summary). Each -// has a constant label named "handler" with the provided handlerName as -// value. http_requests_total is a metric vector partitioned by HTTP method -// (label name "method") and HTTP status code (label name "code"). -// -// Deprecated: InstrumentHandler has several issues. Use the tooling provided in -// package promhttp instead. The issues are the following: (1) It uses Summaries -// rather than Histograms. Summaries are not useful if aggregation across -// multiple instances is required. (2) It uses microseconds as unit, which is -// deprecated and should be replaced by seconds. (3) The size of the request is -// calculated in a separate goroutine. Since this calculator requires access to -// the request header, it creates a race with any writes to the header performed -// during request handling. httputil.ReverseProxy is a prominent example for a -// handler performing such writes. (4) It has additional issues with HTTP/2, cf. -// https://github.com/prometheus/client_golang/issues/272. -func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc { - return InstrumentHandlerFunc(handlerName, handler.ServeHTTP) -} - -// InstrumentHandlerFunc wraps the given function for instrumentation. It -// otherwise works in the same way as InstrumentHandler (and shares the same -// issues). -// -// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as -// InstrumentHandler is. Use the tooling provided in package promhttp instead. -func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { - return InstrumentHandlerFuncWithOpts( - SummaryOpts{ - Subsystem: "http", - ConstLabels: Labels{"handler": handlerName}, - Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, - }, - handlerFunc, - ) -} - -// InstrumentHandlerWithOpts works like InstrumentHandler (and shares the same -// issues) but provides more flexibility (at the cost of a more complex call -// syntax). As InstrumentHandler, this function registers four metric -// collectors, but it uses the provided SummaryOpts to create them. However, the -// fields "Name" and "Help" in the SummaryOpts are ignored. "Name" is replaced -// by "requests_total", "request_duration_microseconds", "request_size_bytes", -// and "response_size_bytes", respectively. "Help" is replaced by an appropriate -// help string. The names of the variable labels of the http_requests_total -// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code). -// -// If InstrumentHandlerWithOpts is called as follows, it mimics exactly the -// behavior of InstrumentHandler: -// -// prometheus.InstrumentHandlerWithOpts( -// prometheus.SummaryOpts{ -// Subsystem: "http", -// ConstLabels: prometheus.Labels{"handler": handlerName}, -// }, -// handler, -// ) -// -// Technical detail: "requests_total" is a CounterVec, not a SummaryVec, so it -// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally, -// and all its fields are set to the equally named fields in the provided -// SummaryOpts. -// -// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as -// InstrumentHandler is. Use the tooling provided in package promhttp instead. -func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc { - return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP) -} - -// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc (and shares -// the same issues) but provides more flexibility (at the cost of a more complex -// call syntax). See InstrumentHandlerWithOpts for details how the provided -// SummaryOpts are used. -// -// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons -// as InstrumentHandler is. Use the tooling provided in package promhttp instead. -func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { - reqCnt := NewCounterVec( - CounterOpts{ - Namespace: opts.Namespace, - Subsystem: opts.Subsystem, - Name: "requests_total", - Help: "Total number of HTTP requests made.", - ConstLabels: opts.ConstLabels, - }, - instLabels, - ) - if err := Register(reqCnt); err != nil { - if are, ok := err.(AlreadyRegisteredError); ok { - reqCnt = are.ExistingCollector.(*CounterVec) - } else { - panic(err) - } - } - - opts.Name = "request_duration_microseconds" - opts.Help = "The HTTP request latencies in microseconds." - reqDur := NewSummary(opts) - if err := Register(reqDur); err != nil { - if are, ok := err.(AlreadyRegisteredError); ok { - reqDur = are.ExistingCollector.(Summary) - } else { - panic(err) - } - } - - opts.Name = "request_size_bytes" - opts.Help = "The HTTP request sizes in bytes." - reqSz := NewSummary(opts) - if err := Register(reqSz); err != nil { - if are, ok := err.(AlreadyRegisteredError); ok { - reqSz = are.ExistingCollector.(Summary) - } else { - panic(err) - } - } - - opts.Name = "response_size_bytes" - opts.Help = "The HTTP response sizes in bytes." - resSz := NewSummary(opts) - if err := Register(resSz); err != nil { - if are, ok := err.(AlreadyRegisteredError); ok { - resSz = are.ExistingCollector.(Summary) - } else { - panic(err) - } - } - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - now := time.Now() - - delegate := &responseWriterDelegator{ResponseWriter: w} - out := computeApproximateRequestSize(r) - - _, cn := w.(http.CloseNotifier) - _, fl := w.(http.Flusher) - _, hj := w.(http.Hijacker) - _, rf := w.(io.ReaderFrom) - var rw http.ResponseWriter - if cn && fl && hj && rf { - rw = &fancyResponseWriterDelegator{delegate} - } else { - rw = delegate - } - handlerFunc(rw, r) - - elapsed := float64(time.Since(now)) / float64(time.Microsecond) - - method := sanitizeMethod(r.Method) - code := sanitizeCode(delegate.status) - reqCnt.WithLabelValues(method, code).Inc() - reqDur.Observe(elapsed) - resSz.Observe(float64(delegate.written)) - reqSz.Observe(float64(<-out)) - }) -} - -func computeApproximateRequestSize(r *http.Request) <-chan int { - // Get URL length in current goroutine for avoiding a race condition. - // HandlerFunc that runs in parallel may modify the URL. - s := 0 - if r.URL != nil { - s += len(r.URL.String()) - } - - out := make(chan int, 1) - - go func() { - s += len(r.Method) - s += len(r.Proto) - for name, values := range r.Header { - s += len(name) - for _, value := range values { - s += len(value) - } - } - s += len(r.Host) - - // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. - - if r.ContentLength != -1 { - s += int(r.ContentLength) - } - out <- s - close(out) - }() - - return out -} - -type responseWriterDelegator struct { - http.ResponseWriter - - status int - written int64 - wroteHeader bool -} - -func (r *responseWriterDelegator) WriteHeader(code int) { - r.status = code - r.wroteHeader = true - r.ResponseWriter.WriteHeader(code) -} - -func (r *responseWriterDelegator) Write(b []byte) (int, error) { - if !r.wroteHeader { - r.WriteHeader(http.StatusOK) - } - n, err := r.ResponseWriter.Write(b) - r.written += int64(n) - return n, err -} - -type fancyResponseWriterDelegator struct { - *responseWriterDelegator -} - -func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool { - return f.ResponseWriter.(http.CloseNotifier).CloseNotify() -} - -func (f *fancyResponseWriterDelegator) Flush() { - f.ResponseWriter.(http.Flusher).Flush() -} - -func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { - return f.ResponseWriter.(http.Hijacker).Hijack() -} - -func (f *fancyResponseWriterDelegator) ReadFrom(r io.Reader) (int64, error) { - if !f.wroteHeader { - f.WriteHeader(http.StatusOK) - } - n, err := f.ResponseWriter.(io.ReaderFrom).ReadFrom(r) - f.written += n - return n, err -} - -func sanitizeMethod(m string) string { - switch m { - case "GET", "get": - return "get" - case "PUT", "put": - return "put" - case "HEAD", "head": - return "head" - case "POST", "post": - return "post" - case "DELETE", "delete": - return "delete" - case "CONNECT", "connect": - return "connect" - case "OPTIONS", "options": - return "options" - case "NOTIFY", "notify": - return "notify" - default: - return strings.ToLower(m) - } -} - -func sanitizeCode(s int) string { - switch s { - case 100: - return "100" - case 101: - return "101" - - case 200: - return "200" - case 201: - return "201" - case 202: - return "202" - case 203: - return "203" - case 204: - return "204" - case 205: - return "205" - case 206: - return "206" - - case 300: - return "300" - case 301: - return "301" - case 302: - return "302" - case 304: - return "304" - case 305: - return "305" - case 307: - return "307" - - case 400: - return "400" - case 401: - return "401" - case 402: - return "402" - case 403: - return "403" - case 404: - return "404" - case 405: - return "405" - case 406: - return "406" - case 407: - return "407" - case 408: - return "408" - case 409: - return "409" - case 410: - return "410" - case 411: - return "411" - case 412: - return "412" - case 413: - return "413" - case 414: - return "414" - case 415: - return "415" - case 416: - return "416" - case 417: - return "417" - case 418: - return "418" - - case 500: - return "500" - case 501: - return "501" - case 502: - return "502" - case 503: - return "503" - case 504: - return "504" - case 505: - return "505" - - case 428: - return "428" - case 429: - return "429" - case 431: - return "431" - case 511: - return "511" - - default: - return strconv.Itoa(s) - } -} - -// gzipAccepted returns whether the client will accept gzip-encoded content. -func gzipAccepted(header http.Header) bool { - a := header.Get(acceptEncodingHeader) - parts := strings.Split(a, ",") - for _, part := range parts { - part = strings.TrimSpace(part) - if part == "gzip" || strings.HasPrefix(part, "gzip;") { - return true - } - } - return false -} - -// httpError removes any content-encoding header and then calls http.Error with -// the provided error and http.StatusInternalServerErrer. Error contents is -// supposed to be uncompressed plain text. However, same as with a plain -// http.Error, any header settings will be void if the header has already been -// sent. The error message will still be written to the writer, but it will -// probably be of limited use. -func httpError(rsp http.ResponseWriter, err error) { - rsp.Header().Del(contentEncodingHeader) - http.Error( - rsp, - "An error has occurred while serving metrics:\n\n"+err.Error(), - http.StatusInternalServerError, - ) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go index 55176d58ce6a2..9b809794212d2 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go @@ -16,8 +16,6 @@ package prometheus import ( "errors" "os" - - "github.com/prometheus/procfs" ) type processCollector struct { @@ -59,20 +57,9 @@ type ProcessCollectorOpts struct { // collector for the current process with an empty namespace string and no error // reporting. // -// Currently, the collector depends on a Linux-style proc filesystem and -// therefore only exports metrics for Linux. -// -// Note: An older version of this function had the following signature: -// -// NewProcessCollector(pid int, namespace string) Collector -// -// Most commonly, it was called as -// -// NewProcessCollector(os.Getpid(), "") -// -// The following call of the current version is equivalent to the above: -// -// NewProcessCollector(ProcessCollectorOpts{}) +// The collector only works on operating systems with a Linux-style proc +// filesystem and on Microsoft Windows. On other operating systems, it will not +// collect any metrics. func NewProcessCollector(opts ProcessCollectorOpts) Collector { ns := "" if len(opts.Namespace) > 0 { @@ -126,7 +113,7 @@ func NewProcessCollector(opts ProcessCollectorOpts) Collector { } // Set up process metric collection if supported by the runtime. - if _, err := procfs.NewStat(); err == nil { + if canCollectProcess() { c.collectFn = c.processCollect } else { c.collectFn = func(ch chan<- Metric) { @@ -153,46 +140,6 @@ func (c *processCollector) Collect(ch chan<- Metric) { c.collectFn(ch) } -func (c *processCollector) processCollect(ch chan<- Metric) { - pid, err := c.pidFn() - if err != nil { - c.reportError(ch, nil, err) - return - } - - p, err := procfs.NewProc(pid) - if err != nil { - c.reportError(ch, nil, err) - return - } - - if stat, err := p.NewStat(); err == nil { - ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime()) - ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory())) - ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory())) - if startTime, err := stat.StartTime(); err == nil { - ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime) - } else { - c.reportError(ch, c.startTime, err) - } - } else { - c.reportError(ch, nil, err) - } - - if fds, err := p.FileDescriptorsLen(); err == nil { - ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds)) - } else { - c.reportError(ch, c.openFDs, err) - } - - if limits, err := p.NewLimits(); err == nil { - ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles)) - ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace)) - } else { - c.reportError(ch, nil, err) - } -} - func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) { if !c.reportErrors { return diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go new file mode 100644 index 0000000000000..3117461cde758 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go @@ -0,0 +1,65 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package prometheus + +import ( + "github.com/prometheus/procfs" +) + +func canCollectProcess() bool { + _, err := procfs.NewDefaultFS() + return err == nil +} + +func (c *processCollector) processCollect(ch chan<- Metric) { + pid, err := c.pidFn() + if err != nil { + c.reportError(ch, nil, err) + return + } + + p, err := procfs.NewProc(pid) + if err != nil { + c.reportError(ch, nil, err) + return + } + + if stat, err := p.Stat(); err == nil { + ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime()) + ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory())) + ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory())) + if startTime, err := stat.StartTime(); err == nil { + ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime) + } else { + c.reportError(ch, c.startTime, err) + } + } else { + c.reportError(ch, nil, err) + } + + if fds, err := p.FileDescriptorsLen(); err == nil { + ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds)) + } else { + c.reportError(ch, c.openFDs, err) + } + + if limits, err := p.Limits(); err == nil { + ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles)) + ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace)) + } else { + c.reportError(ch, nil, err) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go new file mode 100644 index 0000000000000..e0b935d1fef70 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go @@ -0,0 +1,112 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +func canCollectProcess() bool { + return true +} + +var ( + modpsapi = syscall.NewLazyDLL("psapi.dll") + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + + procGetProcessMemoryInfo = modpsapi.NewProc("GetProcessMemoryInfo") + procGetProcessHandleCount = modkernel32.NewProc("GetProcessHandleCount") +) + +type processMemoryCounters struct { + // https://docs.microsoft.com/en-us/windows/desktop/api/psapi/ns-psapi-_process_memory_counters_ex + _ uint32 + PageFaultCount uint32 + PeakWorkingSetSize uint64 + WorkingSetSize uint64 + QuotaPeakPagedPoolUsage uint64 + QuotaPagedPoolUsage uint64 + QuotaPeakNonPagedPoolUsage uint64 + QuotaNonPagedPoolUsage uint64 + PagefileUsage uint64 + PeakPagefileUsage uint64 + PrivateUsage uint64 +} + +func getProcessMemoryInfo(handle windows.Handle) (processMemoryCounters, error) { + mem := processMemoryCounters{} + r1, _, err := procGetProcessMemoryInfo.Call( + uintptr(handle), + uintptr(unsafe.Pointer(&mem)), + uintptr(unsafe.Sizeof(mem)), + ) + if r1 != 1 { + return mem, err + } else { + return mem, nil + } +} + +func getProcessHandleCount(handle windows.Handle) (uint32, error) { + var count uint32 + r1, _, err := procGetProcessHandleCount.Call( + uintptr(handle), + uintptr(unsafe.Pointer(&count)), + ) + if r1 != 1 { + return 0, err + } else { + return count, nil + } +} + +func (c *processCollector) processCollect(ch chan<- Metric) { + h, err := windows.GetCurrentProcess() + if err != nil { + c.reportError(ch, nil, err) + return + } + + var startTime, exitTime, kernelTime, userTime windows.Filetime + err = windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime) + if err != nil { + c.reportError(ch, nil, err) + return + } + ch <- MustNewConstMetric(c.startTime, GaugeValue, float64(startTime.Nanoseconds()/1e9)) + ch <- MustNewConstMetric(c.cpuTotal, CounterValue, fileTimeToSeconds(kernelTime)+fileTimeToSeconds(userTime)) + + mem, err := getProcessMemoryInfo(h) + if err != nil { + c.reportError(ch, nil, err) + return + } + ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(mem.PrivateUsage)) + ch <- MustNewConstMetric(c.rss, GaugeValue, float64(mem.WorkingSetSize)) + + handles, err := getProcessHandleCount(h) + if err != nil { + c.reportError(ch, nil, err) + return + } + ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(handles)) + ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(16*1024*1024)) // Windows has a hard-coded max limit, not per-process. +} + +func fileTimeToSeconds(ft windows.Filetime) float64 { + return float64(uint64(ft.HighDateTime)<<32+uint64(ft.LowDateTime)) / 1e7 +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/BUILD.bazel b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/BUILD.bazel index 8d45fbb0874e9..99e58ccf75ee3 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/BUILD.bazel +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/BUILD.bazel @@ -4,11 +4,8 @@ go_library( name = "go_default_library", srcs = [ "delegator.go", - "delegator_1_8.go", - "delegator_pre_1_8.go", "http.go", "instrument_client.go", - "instrument_client_1_8.go", "instrument_server.go", ], importmap = "k8s.io/kops/vendor/github.com/prometheus/client_golang/prometheus/promhttp", diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go index 67b56d37cfd72..fa535684f96cb 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go @@ -38,7 +38,6 @@ type delegator interface { type responseWriterDelegator struct { http.ResponseWriter - handler, method string status int written int64 wroteHeader bool @@ -75,8 +74,11 @@ type closeNotifierDelegator struct{ *responseWriterDelegator } type flusherDelegator struct{ *responseWriterDelegator } type hijackerDelegator struct{ *responseWriterDelegator } type readerFromDelegator struct{ *responseWriterDelegator } +type pusherDelegator struct{ *responseWriterDelegator } func (d closeNotifierDelegator) CloseNotify() <-chan bool { + //lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to + //remove support from client_golang yet. return d.ResponseWriter.(http.CloseNotifier).CloseNotify() } func (d flusherDelegator) Flush() { @@ -93,6 +95,9 @@ func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) { d.written += n return n, err } +func (d pusherDelegator) Push(target string, opts *http.PushOptions) error { + return d.ResponseWriter.(http.Pusher).Push(target, opts) +} var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32) @@ -196,4 +201,157 @@ func init() { http.CloseNotifier }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} } + pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16 + return pusherDelegator{d} + } + pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17 + return struct { + *responseWriterDelegator + http.Pusher + http.CloseNotifier + }{d, pusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18 + return struct { + *responseWriterDelegator + http.Pusher + http.Flusher + }{d, pusherDelegator{d}, flusherDelegator{d}} + } + pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19 + return struct { + *responseWriterDelegator + http.Pusher + http.Flusher + http.CloseNotifier + }{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + }{d, pusherDelegator{d}, hijackerDelegator{d}} + } + pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + http.CloseNotifier + }{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + http.Flusher + }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} + } + pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + }{d, pusherDelegator{d}, readerFromDelegator{d}} + } + pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.CloseNotifier + }{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Flusher + }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}} + } + pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Flusher + http.CloseNotifier + }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + http.CloseNotifier + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + http.Flusher + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } +} + +func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator { + d := &responseWriterDelegator{ + ResponseWriter: w, + observeWriteHeader: observeWriteHeaderFunc, + } + + id := 0 + //lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to + //remove support from client_golang yet. + if _, ok := w.(http.CloseNotifier); ok { + id += closeNotifier + } + if _, ok := w.(http.Flusher); ok { + id += flusher + } + if _, ok := w.(http.Hijacker); ok { + id += hijacker + } + if _, ok := w.(io.ReaderFrom); ok { + id += readerFrom + } + if _, ok := w.(http.Pusher); ok { + id += pusher + } + + return pickDelegator[id](d) } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go deleted file mode 100644 index 31a70695695c5..0000000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go +++ /dev/null @@ -1,181 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build go1.8 - -package promhttp - -import ( - "io" - "net/http" -) - -type pusherDelegator struct{ *responseWriterDelegator } - -func (d pusherDelegator) Push(target string, opts *http.PushOptions) error { - return d.ResponseWriter.(http.Pusher).Push(target, opts) -} - -func init() { - pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16 - return pusherDelegator{d} - } - pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17 - return struct { - *responseWriterDelegator - http.Pusher - http.CloseNotifier - }{d, pusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18 - return struct { - *responseWriterDelegator - http.Pusher - http.Flusher - }{d, pusherDelegator{d}, flusherDelegator{d}} - } - pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19 - return struct { - *responseWriterDelegator - http.Pusher - http.Flusher - http.CloseNotifier - }{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20 - return struct { - *responseWriterDelegator - http.Pusher - http.Hijacker - }{d, pusherDelegator{d}, hijackerDelegator{d}} - } - pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21 - return struct { - *responseWriterDelegator - http.Pusher - http.Hijacker - http.CloseNotifier - }{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22 - return struct { - *responseWriterDelegator - http.Pusher - http.Hijacker - http.Flusher - }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} - } - pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23 - return struct { - *responseWriterDelegator - http.Pusher - http.Hijacker - http.Flusher - http.CloseNotifier - }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - }{d, pusherDelegator{d}, readerFromDelegator{d}} - } - pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.CloseNotifier - }{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.Flusher - }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}} - } - pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.Flusher - http.CloseNotifier - }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.Hijacker - }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}} - } - pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.Hijacker - http.CloseNotifier - }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.Hijacker - http.Flusher - }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} - } - pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.Hijacker - http.Flusher - http.CloseNotifier - }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} - } -} - -func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator { - d := &responseWriterDelegator{ - ResponseWriter: w, - observeWriteHeader: observeWriteHeaderFunc, - } - - id := 0 - if _, ok := w.(http.CloseNotifier); ok { - id += closeNotifier - } - if _, ok := w.(http.Flusher); ok { - id += flusher - } - if _, ok := w.(http.Hijacker); ok { - id += hijacker - } - if _, ok := w.(io.ReaderFrom); ok { - id += readerFrom - } - if _, ok := w.(http.Pusher); ok { - id += pusher - } - - return pickDelegator[id](d) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go deleted file mode 100644 index 8bb9b8b68f8b4..0000000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !go1.8 - -package promhttp - -import ( - "io" - "net/http" -) - -func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator { - d := &responseWriterDelegator{ - ResponseWriter: w, - observeWriteHeader: observeWriteHeaderFunc, - } - - id := 0 - if _, ok := w.(http.CloseNotifier); ok { - id += closeNotifier - } - if _, ok := w.(http.Flusher); ok { - id += flusher - } - if _, ok := w.(http.Hijacker); ok { - id += hijacker - } - if _, ok := w.(io.ReaderFrom); ok { - id += readerFrom - } - - return pickDelegator[id](d) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go index 668eb6b3c93ae..cea5a90fd9dd2 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go @@ -47,7 +47,6 @@ import ( const ( contentTypeHeader = "Content-Type" - contentLengthHeader = "Content-Length" contentEncodingHeader = "Content-Encoding" acceptEncodingHeader = "Accept-Encoding" ) @@ -85,10 +84,32 @@ func Handler() http.Handler { // instrumentation. Use the InstrumentMetricHandler function to apply the same // kind of instrumentation as it is used by the Handler function. func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { - var inFlightSem chan struct{} + var ( + inFlightSem chan struct{} + errCnt = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "promhttp_metric_handler_errors_total", + Help: "Total number of internal errors encountered by the promhttp metric handler.", + }, + []string{"cause"}, + ) + ) + if opts.MaxRequestsInFlight > 0 { inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight) } + if opts.Registry != nil { + // Initialize all possibilites that can occur below. + errCnt.WithLabelValues("gathering") + errCnt.WithLabelValues("encoding") + if err := opts.Registry.Register(errCnt); err != nil { + if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + errCnt = are.ExistingCollector.(*prometheus.CounterVec) + } else { + panic(err) + } + } + } h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) { if inFlightSem != nil { @@ -107,6 +128,7 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { if opts.ErrorLog != nil { opts.ErrorLog.Println("error gathering metrics:", err) } + errCnt.WithLabelValues("gathering").Inc() switch opts.ErrorHandling { case PanicOnError: panic(err) @@ -147,6 +169,7 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { if opts.ErrorLog != nil { opts.ErrorLog.Println("error encoding and sending metric family:", err) } + errCnt.WithLabelValues("encoding").Inc() switch opts.ErrorHandling { case PanicOnError: panic(err) @@ -237,9 +260,12 @@ const ( // Ignore errors and try to serve as many metrics as possible. However, // if no metrics can be served, serve an HTTP status code 500 and the // last error message in the body. Only use this in deliberate "best - // effort" metrics collection scenarios. It is recommended to at least - // log errors (by providing an ErrorLog in HandlerOpts) to not mask - // errors completely. + // effort" metrics collection scenarios. In this case, it is highly + // recommended to provide other means of detecting errors: By setting an + // ErrorLog in HandlerOpts, the errors are logged. By providing a + // Registry in HandlerOpts, the exposed metrics include an error counter + // "promhttp_metric_handler_errors_total", which can be used for + // alerts. ContinueOnError // Panic upon the first error encountered (useful for "crash only" apps). PanicOnError @@ -262,6 +288,18 @@ type HandlerOpts struct { // logged regardless of the configured ErrorHandling provided ErrorLog // is not nil. ErrorHandling HandlerErrorHandling + // If Registry is not nil, it is used to register a metric + // "promhttp_metric_handler_errors_total", partitioned by "cause". A + // failed registration causes a panic. Note that this error counter is + // different from the instrumentation you get from the various + // InstrumentHandler... helpers. It counts errors that don't necessarily + // result in a non-2xx HTTP status code. There are two typical cases: + // (1) Encoding errors that only happen after streaming of the HTTP body + // has already started (and the status code 200 has been sent). This + // should only happen with custom collectors. (2) Collection errors with + // no effect on the HTTP status code because ErrorHandling is set to + // ContinueOnError. + Registry prometheus.Registerer // If DisableCompression is true, the handler will never compress the // response, even if requested by the client. DisableCompression bool diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go index 86fd564470f81..83c49b66a8105 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go @@ -14,7 +14,9 @@ package promhttp import ( + "crypto/tls" "net/http" + "net/http/httptrace" "time" "github.com/prometheus/client_golang/prometheus" @@ -95,3 +97,123 @@ func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundT return resp, err }) } + +// InstrumentTrace is used to offer flexibility in instrumenting the available +// httptrace.ClientTrace hook functions. Each function is passed a float64 +// representing the time in seconds since the start of the http request. A user +// may choose to use separately buckets Histograms, or implement custom +// instance labels on a per function basis. +type InstrumentTrace struct { + GotConn func(float64) + PutIdleConn func(float64) + GotFirstResponseByte func(float64) + Got100Continue func(float64) + DNSStart func(float64) + DNSDone func(float64) + ConnectStart func(float64) + ConnectDone func(float64) + TLSHandshakeStart func(float64) + TLSHandshakeDone func(float64) + WroteHeaders func(float64) + Wait100Continue func(float64) + WroteRequest func(float64) +} + +// InstrumentRoundTripperTrace is a middleware that wraps the provided +// RoundTripper and reports times to hook functions provided in the +// InstrumentTrace struct. Hook functions that are not present in the provided +// InstrumentTrace struct are ignored. Times reported to the hook functions are +// time since the start of the request. Only with Go1.9+, those times are +// guaranteed to never be negative. (Earlier Go versions are not using a +// monotonic clock.) Note that partitioning of Histograms is expensive and +// should be used judiciously. +// +// For hook functions that receive an error as an argument, no observations are +// made in the event of a non-nil error value. +// +// See the example for ExampleInstrumentRoundTripperDuration for example usage. +func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc { + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + start := time.Now() + + trace := &httptrace.ClientTrace{ + GotConn: func(_ httptrace.GotConnInfo) { + if it.GotConn != nil { + it.GotConn(time.Since(start).Seconds()) + } + }, + PutIdleConn: func(err error) { + if err != nil { + return + } + if it.PutIdleConn != nil { + it.PutIdleConn(time.Since(start).Seconds()) + } + }, + DNSStart: func(_ httptrace.DNSStartInfo) { + if it.DNSStart != nil { + it.DNSStart(time.Since(start).Seconds()) + } + }, + DNSDone: func(_ httptrace.DNSDoneInfo) { + if it.DNSDone != nil { + it.DNSDone(time.Since(start).Seconds()) + } + }, + ConnectStart: func(_, _ string) { + if it.ConnectStart != nil { + it.ConnectStart(time.Since(start).Seconds()) + } + }, + ConnectDone: func(_, _ string, err error) { + if err != nil { + return + } + if it.ConnectDone != nil { + it.ConnectDone(time.Since(start).Seconds()) + } + }, + GotFirstResponseByte: func() { + if it.GotFirstResponseByte != nil { + it.GotFirstResponseByte(time.Since(start).Seconds()) + } + }, + Got100Continue: func() { + if it.Got100Continue != nil { + it.Got100Continue(time.Since(start).Seconds()) + } + }, + TLSHandshakeStart: func() { + if it.TLSHandshakeStart != nil { + it.TLSHandshakeStart(time.Since(start).Seconds()) + } + }, + TLSHandshakeDone: func(_ tls.ConnectionState, err error) { + if err != nil { + return + } + if it.TLSHandshakeDone != nil { + it.TLSHandshakeDone(time.Since(start).Seconds()) + } + }, + WroteHeaders: func() { + if it.WroteHeaders != nil { + it.WroteHeaders(time.Since(start).Seconds()) + } + }, + Wait100Continue: func() { + if it.Wait100Continue != nil { + it.Wait100Continue(time.Since(start).Seconds()) + } + }, + WroteRequest: func(_ httptrace.WroteRequestInfo) { + if it.WroteRequest != nil { + it.WroteRequest(time.Since(start).Seconds()) + } + }, + } + r = r.WithContext(httptrace.WithClientTrace(r.Context(), trace)) + + return next.RoundTrip(r) + }) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go deleted file mode 100644 index a034d1ec0f189..0000000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build go1.8 - -package promhttp - -import ( - "context" - "crypto/tls" - "net/http" - "net/http/httptrace" - "time" -) - -// InstrumentTrace is used to offer flexibility in instrumenting the available -// httptrace.ClientTrace hook functions. Each function is passed a float64 -// representing the time in seconds since the start of the http request. A user -// may choose to use separately buckets Histograms, or implement custom -// instance labels on a per function basis. -type InstrumentTrace struct { - GotConn func(float64) - PutIdleConn func(float64) - GotFirstResponseByte func(float64) - Got100Continue func(float64) - DNSStart func(float64) - DNSDone func(float64) - ConnectStart func(float64) - ConnectDone func(float64) - TLSHandshakeStart func(float64) - TLSHandshakeDone func(float64) - WroteHeaders func(float64) - Wait100Continue func(float64) - WroteRequest func(float64) -} - -// InstrumentRoundTripperTrace is a middleware that wraps the provided -// RoundTripper and reports times to hook functions provided in the -// InstrumentTrace struct. Hook functions that are not present in the provided -// InstrumentTrace struct are ignored. Times reported to the hook functions are -// time since the start of the request. Only with Go1.9+, those times are -// guaranteed to never be negative. (Earlier Go versions are not using a -// monotonic clock.) Note that partitioning of Histograms is expensive and -// should be used judiciously. -// -// For hook functions that receive an error as an argument, no observations are -// made in the event of a non-nil error value. -// -// See the example for ExampleInstrumentRoundTripperDuration for example usage. -func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc { - return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { - start := time.Now() - - trace := &httptrace.ClientTrace{ - GotConn: func(_ httptrace.GotConnInfo) { - if it.GotConn != nil { - it.GotConn(time.Since(start).Seconds()) - } - }, - PutIdleConn: func(err error) { - if err != nil { - return - } - if it.PutIdleConn != nil { - it.PutIdleConn(time.Since(start).Seconds()) - } - }, - DNSStart: func(_ httptrace.DNSStartInfo) { - if it.DNSStart != nil { - it.DNSStart(time.Since(start).Seconds()) - } - }, - DNSDone: func(_ httptrace.DNSDoneInfo) { - if it.DNSDone != nil { - it.DNSDone(time.Since(start).Seconds()) - } - }, - ConnectStart: func(_, _ string) { - if it.ConnectStart != nil { - it.ConnectStart(time.Since(start).Seconds()) - } - }, - ConnectDone: func(_, _ string, err error) { - if err != nil { - return - } - if it.ConnectDone != nil { - it.ConnectDone(time.Since(start).Seconds()) - } - }, - GotFirstResponseByte: func() { - if it.GotFirstResponseByte != nil { - it.GotFirstResponseByte(time.Since(start).Seconds()) - } - }, - Got100Continue: func() { - if it.Got100Continue != nil { - it.Got100Continue(time.Since(start).Seconds()) - } - }, - TLSHandshakeStart: func() { - if it.TLSHandshakeStart != nil { - it.TLSHandshakeStart(time.Since(start).Seconds()) - } - }, - TLSHandshakeDone: func(_ tls.ConnectionState, err error) { - if err != nil { - return - } - if it.TLSHandshakeDone != nil { - it.TLSHandshakeDone(time.Since(start).Seconds()) - } - }, - WroteHeaders: func() { - if it.WroteHeaders != nil { - it.WroteHeaders(time.Since(start).Seconds()) - } - }, - Wait100Continue: func() { - if it.Wait100Continue != nil { - it.Wait100Continue(time.Since(start).Seconds()) - } - }, - WroteRequest: func(_ httptrace.WroteRequestInfo) { - if it.WroteRequest != nil { - it.WroteRequest(time.Since(start).Seconds()) - } - }, - } - r = r.WithContext(httptrace.WithClientTrace(context.Background(), trace)) - - return next.RoundTrip(r) - }) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go index b5e70b93fa138..6c32516aa2e31 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -325,9 +325,17 @@ func (r *Registry) Register(c Collector) error { return nil } if existing, exists := r.collectorsByID[collectorID]; exists { - return AlreadyRegisteredError{ - ExistingCollector: existing, - NewCollector: c, + switch e := existing.(type) { + case *wrappingCollector: + return AlreadyRegisteredError{ + ExistingCollector: e.unwrapRecursively(), + NewCollector: c, + } + default: + return AlreadyRegisteredError{ + ExistingCollector: e, + NewCollector: c, + } } } // If the collectorID is new, but at least one of the descs existed @@ -680,7 +688,7 @@ func processMetric( // Gatherers is a slice of Gatherer instances that implements the Gatherer // interface itself. Its Gather method calls Gather on all Gatherers in the // slice in order and returns the merged results. Errors returned from the -// Gather calles are all returned in a flattened MultiError. Duplicate and +// Gather calls are all returned in a flattened MultiError. Duplicate and // inconsistent Metrics are skipped (first occurrence in slice order wins) and // reported in the returned error. // diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go index 2980614dff4bd..c970fdee0e4ff 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -16,8 +16,10 @@ package prometheus import ( "fmt" "math" + "runtime" "sort" "sync" + "sync/atomic" "time" "github.com/beorn7/perks/quantile" @@ -37,7 +39,7 @@ const quantileLabel = "quantile" // A typical use-case is the observation of request latencies. By default, a // Summary provides the median, the 90th and the 99th percentile of the latency // as rank estimations. However, the default behavior will change in the -// upcoming v0.10 of the library. There will be no rank estimations at all by +// upcoming v1.0.0 of the library. There will be no rank estimations at all by // default. For a sane transition, it is recommended to set the desired rank // estimations explicitly. // @@ -56,16 +58,8 @@ type Summary interface { Observe(float64) } -// DefObjectives are the default Summary quantile values. -// -// Deprecated: DefObjectives will not be used as the default objectives in -// v0.10 of the library. The default Summary will have no quantiles then. -var ( - DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001} - - errQuantileLabelNotAllowed = fmt.Errorf( - "%q is not allowed as label name in summaries", quantileLabel, - ) +var errQuantileLabelNotAllowed = fmt.Errorf( + "%q is not allowed as label name in summaries", quantileLabel, ) // Default values for SummaryOpts. @@ -84,7 +78,7 @@ const ( // mandatory to set Name to a non-empty string. While all other fields are // optional and can safely be left at their zero value, it is recommended to set // a help string and to explicitly set the Objectives field to the desired value -// as the default value will change in the upcoming v0.10 of the library. +// as the default value will change in the upcoming v1.0.0 of the library. type SummaryOpts struct { // Namespace, Subsystem, and Name are components of the fully-qualified // name of the Summary (created by joining these components with @@ -121,13 +115,8 @@ type SummaryOpts struct { // Objectives defines the quantile rank estimates with their respective // absolute error. If Objectives[q] = e, then the value reported for q // will be the φ-quantile value for some φ between q-e and q+e. The - // default value is DefObjectives. It is used if Objectives is left at - // its zero value (i.e. nil). To create a Summary without Objectives, - // set it to an empty map (i.e. map[float64]float64{}). - // - // Deprecated: Note that the current value of DefObjectives is - // deprecated. It will be replaced by an empty map in v0.10 of the - // library. Please explicitly set Objectives to the desired value. + // default value is an empty map, resulting in a summary without + // quantiles. Objectives map[float64]float64 // MaxAge defines the duration for which an observation stays relevant @@ -151,7 +140,7 @@ type SummaryOpts struct { BufCap uint32 } -// Great fuck-up with the sliding-window decay algorithm... The Merge method of +// Problem with the sliding-window decay algorithm... The Merge method of // perk/quantile is actually not working as advertised - and it might be // unfixable, as the underlying algorithm is apparently not capable of merging // summaries in the first place. To avoid using Merge, we are currently adding @@ -196,7 +185,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { } if opts.Objectives == nil { - opts.Objectives = DefObjectives + opts.Objectives = map[float64]float64{} } if opts.MaxAge < 0 { @@ -214,6 +203,17 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { opts.BufCap = DefBufCap } + if len(opts.Objectives) == 0 { + // Use the lock-free implementation of a Summary without objectives. + s := &noObjectivesSummary{ + desc: desc, + labelPairs: makeLabelPairs(desc, labelValues), + counts: [2]*summaryCounts{&summaryCounts{}, &summaryCounts{}}, + } + s.init(s) // Init self-collection. + return s + } + s := &summary{ desc: desc, @@ -382,6 +382,116 @@ func (s *summary) swapBufs(now time.Time) { } } +type summaryCounts struct { + // sumBits contains the bits of the float64 representing the sum of all + // observations. sumBits and count have to go first in the struct to + // guarantee alignment for atomic operations. + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + sumBits uint64 + count uint64 +} + +type noObjectivesSummary struct { + // countAndHotIdx enables lock-free writes with use of atomic updates. + // The most significant bit is the hot index [0 or 1] of the count field + // below. Observe calls update the hot one. All remaining bits count the + // number of Observe calls. Observe starts by incrementing this counter, + // and finish by incrementing the count field in the respective + // summaryCounts, as a marker for completion. + // + // Calls of the Write method (which are non-mutating reads from the + // perspective of the summary) swap the hot–cold under the writeMtx + // lock. A cooldown is awaited (while locked) by comparing the number of + // observations with the initiation count. Once they match, then the + // last observation on the now cool one has completed. All cool fields must + // be merged into the new hot before releasing writeMtx. + + // Fields with atomic access first! See alignment constraint: + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + countAndHotIdx uint64 + + selfCollector + desc *Desc + writeMtx sync.Mutex // Only used in the Write method. + + // Two counts, one is "hot" for lock-free observations, the other is + // "cold" for writing out a dto.Metric. It has to be an array of + // pointers to guarantee 64bit alignment of the histogramCounts, see + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG. + counts [2]*summaryCounts + + labelPairs []*dto.LabelPair +} + +func (s *noObjectivesSummary) Desc() *Desc { + return s.desc +} + +func (s *noObjectivesSummary) Observe(v float64) { + // We increment h.countAndHotIdx so that the counter in the lower + // 63 bits gets incremented. At the same time, we get the new value + // back, which we can use to find the currently-hot counts. + n := atomic.AddUint64(&s.countAndHotIdx, 1) + hotCounts := s.counts[n>>63] + + for { + oldBits := atomic.LoadUint64(&hotCounts.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + v) + if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { + break + } + } + // Increment count last as we take it as a signal that the observation + // is complete. + atomic.AddUint64(&hotCounts.count, 1) +} + +func (s *noObjectivesSummary) Write(out *dto.Metric) error { + // For simplicity, we protect this whole method by a mutex. It is not in + // the hot path, i.e. Observe is called much more often than Write. The + // complication of making Write lock-free isn't worth it, if possible at + // all. + s.writeMtx.Lock() + defer s.writeMtx.Unlock() + + // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0) + // without touching the count bits. See the struct comments for a full + // description of the algorithm. + n := atomic.AddUint64(&s.countAndHotIdx, 1<<63) + // count is contained unchanged in the lower 63 bits. + count := n & ((1 << 63) - 1) + // The most significant bit tells us which counts is hot. The complement + // is thus the cold one. + hotCounts := s.counts[n>>63] + coldCounts := s.counts[(^n)>>63] + + // Await cooldown. + for count != atomic.LoadUint64(&coldCounts.count) { + runtime.Gosched() // Let observations get work done. + } + + sum := &dto.Summary{ + SampleCount: proto.Uint64(count), + SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), + } + + out.Summary = sum + out.Label = s.labelPairs + + // Finally add all the cold counts to the new hot counts and reset the cold counts. + atomic.AddUint64(&hotCounts.count, count) + atomic.StoreUint64(&coldCounts.count, 0) + for { + oldBits := atomic.LoadUint64(&hotCounts.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + sum.GetSampleSum()) + if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { + atomic.StoreUint64(&coldCounts.sumBits, 0) + break + } + } + return nil +} + type quantSort []*dto.Quantile func (s quantSort) Len() int { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go index 49159bf3eb05f..e303eef6d33be 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go @@ -32,6 +32,12 @@ import ( // WrapRegistererWith provides a way to add fixed labels to a subset of // Collectors. It should not be used to add fixed labels to all metrics exposed. // +// Conflicts between Collectors registered through the original Registerer with +// Collectors registered through the wrapping Registerer will still be +// detected. Any AlreadyRegisteredError returned by the Register method of +// either Registerer will contain the ExistingCollector in the form it was +// provided to the respective registry. +// // The Collector example demonstrates a use of WrapRegistererWith. func WrapRegistererWith(labels Labels, reg Registerer) Registerer { return &wrappingRegisterer{ @@ -54,6 +60,12 @@ func WrapRegistererWith(labels Labels, reg Registerer) Registerer { // (see NewGoCollector) and the process collector (see NewProcessCollector). (In // fact, those metrics are already prefixed with “go_” or “process_”, // respectively.) +// +// Conflicts between Collectors registered through the original Registerer with +// Collectors registered through the wrapping Registerer will still be +// detected. Any AlreadyRegisteredError returned by the Register method of +// either Registerer will contain the ExistingCollector in the form it was +// provided to the respective registry. func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer { return &wrappingRegisterer{ wrappedRegisterer: reg, @@ -123,6 +135,15 @@ func (c *wrappingCollector) Describe(ch chan<- *Desc) { } } +func (c *wrappingCollector) unwrapRecursively() Collector { + switch wc := c.wrappedCollector.(type) { + case *wrappingCollector: + return wc.unwrapRecursively() + default: + return wc + } +} + type wrappingMetric struct { wrappedMetric Metric prefix string diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go index 648b38cb6546d..26e92288c7c09 100644 --- a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go +++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go @@ -1,12 +1,12 @@ /* +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. + HTTP Content-Type Autonegotiation. The functions in this package implement the behaviour specified in http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html -Copyright (c) 2011, Open Knowledge Foundation Ltd. -All rights reserved. - Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go index f7250909b9fd3..00804b7fedb02 100644 --- a/vendor/github.com/prometheus/common/model/metric.go +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -21,7 +21,6 @@ import ( ) var ( - separator = []byte{0} // MetricNameRE is a regular expression matching valid metric // names. Note that the IsValidMetricName function performs the same // check but faster than a match with this regular expression. diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go index 46259b1f10947..7b0064fdb2329 100644 --- a/vendor/github.com/prometheus/common/model/time.go +++ b/vendor/github.com/prometheus/common/model/time.go @@ -150,7 +150,13 @@ func (t *Time) UnmarshalJSON(b []byte) error { return err } - *t = Time(v + va) + // If the value was something like -0.1 the negative is lost in the + // parsing because of the leading zero, this ensures that we capture it. + if len(p[0]) > 0 && p[0][0] == '-' && v+va > 0 { + *t = Time(v+va) * -1 + } else { + *t = Time(v + va) + } default: return fmt.Errorf("invalid time %q", string(b)) diff --git a/vendor/github.com/prometheus/procfs/.golangci.yml b/vendor/github.com/prometheus/procfs/.golangci.yml new file mode 100644 index 0000000000000..438ca92eca5d6 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/.golangci.yml @@ -0,0 +1,6 @@ +# Run only staticcheck for now. Additional linters will be enabled one-by-one. +linters: + enable: + - staticcheck + - govet + disable-all: true diff --git a/vendor/github.com/prometheus/procfs/BUILD.bazel b/vendor/github.com/prometheus/procfs/BUILD.bazel index 7383ab742e3da..730e9b9706e82 100644 --- a/vendor/github.com/prometheus/procfs/BUILD.bazel +++ b/vendor/github.com/prometheus/procfs/BUILD.bazel @@ -10,19 +10,19 @@ go_library( "mdstat.go", "mountstats.go", "net_dev.go", + "net_unix.go", "proc.go", "proc_io.go", "proc_limits.go", "proc_ns.go", + "proc_psi.go", "proc_stat.go", + "proc_status.go", "stat.go", "xfrm.go", ], importmap = "k8s.io/kops/vendor/github.com/prometheus/procfs", importpath = "github.com/prometheus/procfs", visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/prometheus/procfs/nfs:go_default_library", - "//vendor/github.com/prometheus/procfs/xfs:go_default_library", - ], + deps = ["//vendor/github.com/prometheus/procfs/internal/fs:go_default_library"], ) diff --git a/vendor/github.com/prometheus/procfs/MAINTAINERS.md b/vendor/github.com/prometheus/procfs/MAINTAINERS.md index 35993c41c277f..56ba67d3e3172 100644 --- a/vendor/github.com/prometheus/procfs/MAINTAINERS.md +++ b/vendor/github.com/prometheus/procfs/MAINTAINERS.md @@ -1 +1,2 @@ -* Tobias Schmidt +* Johannes 'fish' Ziemke @discordianfish +* Paul Gier @pgier diff --git a/vendor/github.com/prometheus/procfs/Makefile b/vendor/github.com/prometheus/procfs/Makefile index 947d7d8fa7179..616a0d25eb0d2 100644 --- a/vendor/github.com/prometheus/procfs/Makefile +++ b/vendor/github.com/prometheus/procfs/Makefile @@ -14,17 +14,16 @@ include Makefile.common %/.unpacked: %.ttar + @echo ">> extracting fixtures" ./ttar -C $(dir $*) -x -f $*.ttar touch $@ -update_fixtures: fixtures.ttar sysfs/fixtures.ttar - -%fixtures.ttar: %/fixtures - rm -v $(dir $*)fixtures/.unpacked - ./ttar -C $(dir $*) -c -f $*fixtures.ttar fixtures/ +update_fixtures: + rm -vf fixtures/.unpacked + ./ttar -c -f fixtures.ttar fixtures/ .PHONY: build build: .PHONY: test -test: fixtures/.unpacked sysfs/fixtures/.unpacked common-test +test: fixtures/.unpacked common-test diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common index 741579e60f27c..c7f9ea64ff591 100644 --- a/vendor/github.com/prometheus/procfs/Makefile.common +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -29,12 +29,15 @@ GO ?= go GOFMT ?= $(GO)fmt FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) GOOPTS ?= +GOHOSTOS ?= $(shell $(GO) env GOHOSTOS) +GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH) GO_VERSION ?= $(shell $(GO) version) GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION)) PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.') -unexport GOVENDOR +GOVENDOR := +GO111MODULE := ifeq (, $(PRE_GO_111)) ifneq (,$(wildcard go.mod)) # Enforce Go modules support just in case the directory is inside GOPATH (and for Travis CI). @@ -55,32 +58,57 @@ $(warning Some recipes may not work as expected as the current Go runtime is '$( # This repository isn't using Go modules (yet). GOVENDOR := $(FIRST_GOPATH)/bin/govendor endif - - unexport GO111MODULE endif PROMU := $(FIRST_GOPATH)/bin/promu -STATICCHECK := $(FIRST_GOPATH)/bin/staticcheck pkgs = ./... -GO_VERSION ?= $(shell $(GO) version) -GO_BUILD_PLATFORM ?= $(subst /,-,$(lastword $(GO_VERSION))) +ifeq (arm, $(GOHOSTARCH)) + GOHOSTARM ?= $(shell GOARM= $(GO) env GOARM) + GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)v$(GOHOSTARM) +else + GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH) +endif -PROMU_VERSION ?= 0.2.0 +PROMU_VERSION ?= 0.4.0 PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz +GOLANGCI_LINT := +GOLANGCI_LINT_OPTS ?= +GOLANGCI_LINT_VERSION ?= v1.16.0 +# golangci-lint only supports linux, darwin and windows platforms on i386/amd64. +# windows isn't included here because of the path separator being different. +ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) + ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386)) + GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint + endif +endif + PREFIX ?= $(shell pwd) BIN_DIR ?= $(shell pwd) DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) DOCKER_REPO ?= prom -.PHONY: all -all: precheck style staticcheck unused build test +DOCKER_ARCHS ?= amd64 + +BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS)) +PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS)) +TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS)) + +ifeq ($(GOHOSTARCH),amd64) + ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows)) + # Only supported on amd64 + test-flags := -race + endif +endif # This rule is used to forward a target like "build" to "common-build". This # allows a new "build" target to be defined in a Makefile which includes this # one and override "common-build" without override warnings. %: common-% ; +.PHONY: common-all +common-all: precheck style check_license lint unused build test + .PHONY: common-style common-style: @echo ">> checking code style" @@ -102,6 +130,15 @@ common-check_license: exit 1; \ fi +.PHONY: common-deps +common-deps: + @echo ">> getting dependencies" +ifdef GO111MODULE + GO111MODULE=$(GO111MODULE) $(GO) mod download +else + $(GO) get $(GOOPTS) -t ./... +endif + .PHONY: common-test-short common-test-short: @echo ">> running short tests" @@ -110,26 +147,35 @@ common-test-short: .PHONY: common-test common-test: @echo ">> running all tests" - GO111MODULE=$(GO111MODULE) $(GO) test -race $(GOOPTS) $(pkgs) + GO111MODULE=$(GO111MODULE) $(GO) test $(test-flags) $(GOOPTS) $(pkgs) .PHONY: common-format common-format: @echo ">> formatting code" - GO111MODULE=$(GO111MODULE) $(GO) fmt $(GOOPTS) $(pkgs) + GO111MODULE=$(GO111MODULE) $(GO) fmt $(pkgs) .PHONY: common-vet common-vet: @echo ">> vetting code" GO111MODULE=$(GO111MODULE) $(GO) vet $(GOOPTS) $(pkgs) -.PHONY: common-staticcheck -common-staticcheck: $(STATICCHECK) - @echo ">> running staticcheck" +.PHONY: common-lint +common-lint: $(GOLANGCI_LINT) +ifdef GOLANGCI_LINT + @echo ">> running golangci-lint" ifdef GO111MODULE - GO111MODULE=$(GO111MODULE) $(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" -checks "SA*" $(pkgs) +# 'go list' needs to be executed before staticcheck to prepopulate the modules cache. +# Otherwise staticcheck might fail randomly for some reason not yet explained. + GO111MODULE=$(GO111MODULE) $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null + GO111MODULE=$(GO111MODULE) $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs) else - $(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" $(pkgs) + $(GOLANGCI_LINT) run $(pkgs) endif +endif + +# For backward-compatibility. +.PHONY: common-staticcheck +common-staticcheck: lint .PHONY: common-unused common-unused: $(GOVENDOR) @@ -140,8 +186,9 @@ else ifdef GO111MODULE @echo ">> running check for unused/missing packages in go.mod" GO111MODULE=$(GO111MODULE) $(GO) mod tidy +ifeq (,$(wildcard vendor)) @git diff --exit-code -- go.sum go.mod -ifneq (,$(wildcard vendor)) +else @echo ">> running check for unused packages in vendor/" GO111MODULE=$(GO111MODULE) $(GO) mod vendor @git diff --exit-code -- go.sum go.mod vendor/ @@ -159,45 +206,48 @@ common-tarball: promu @echo ">> building release tarball" $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) -.PHONY: common-docker -common-docker: - docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" . - -.PHONY: common-docker-publish -common-docker-publish: - docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)" - -.PHONY: common-docker-tag-latest -common-docker-tag-latest: - docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):latest" +.PHONY: common-docker $(BUILD_DOCKER_ARCHS) +common-docker: $(BUILD_DOCKER_ARCHS) +$(BUILD_DOCKER_ARCHS): common-docker-%: + docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \ + --build-arg ARCH="$*" \ + --build-arg OS="linux" \ + . + +.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS) +common-docker-publish: $(PUBLISH_DOCKER_ARCHS) +$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%: + docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" + +.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS) +common-docker-tag-latest: $(TAG_DOCKER_ARCHS) +$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%: + docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest" + +.PHONY: common-docker-manifest +common-docker-manifest: + DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(DOCKER_IMAGE_TAG)) + DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" .PHONY: promu promu: $(PROMU) $(PROMU): - curl -s -L $(PROMU_URL) | tar -xvz -C /tmp - mkdir -v -p $(FIRST_GOPATH)/bin - cp -v /tmp/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(PROMU) + $(eval PROMU_TMP := $(shell mktemp -d)) + curl -s -L $(PROMU_URL) | tar -xvzf - -C $(PROMU_TMP) + mkdir -p $(FIRST_GOPATH)/bin + cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu + rm -r $(PROMU_TMP) .PHONY: proto proto: @echo ">> generating code from proto files" @./scripts/genproto.sh -.PHONY: $(STATICCHECK) -$(STATICCHECK): -ifdef GO111MODULE -# Get staticcheck from a temporary directory to avoid modifying the local go.{mod,sum}. -# See https://github.com/golang/go/issues/27643. -# For now, we are using the next branch of staticcheck because master isn't compatible yet with Go modules. - tmpModule=$$(mktemp -d 2>&1) && \ - mkdir -p $${tmpModule}/staticcheck && \ - cd "$${tmpModule}"/staticcheck && \ - GO111MODULE=on $(GO) mod init example.com/staticcheck && \ - GO111MODULE=on GOOS= GOARCH= $(GO) get -u honnef.co/go/tools/cmd/staticcheck@next && \ - rm -rf $${tmpModule}; -else - GOOS= GOARCH= GO111MODULE=off $(GO) get -u honnef.co/go/tools/cmd/staticcheck +ifdef GOLANGCI_LINT +$(GOLANGCI_LINT): + mkdir -p $(FIRST_GOPATH)/bin + curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION) endif ifdef GOVENDOR @@ -212,7 +262,6 @@ precheck:: define PRECHECK_COMMAND_template = precheck:: $(1)_precheck - PRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1))) .PHONY: $(1)_precheck $(1)_precheck: diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md index 2095494719b08..6f8850feb6783 100644 --- a/vendor/github.com/prometheus/procfs/README.md +++ b/vendor/github.com/prometheus/procfs/README.md @@ -1,7 +1,7 @@ # procfs This procfs package provides functions to retrieve system, kernel and process -metrics from the pseudo-filesystem proc. +metrics from the pseudo-filesystems /proc and /sys. *WARNING*: This package is a work in progress. Its API may still break in backwards-incompatible ways without warnings. Use it at your own risk. @@ -9,3 +9,45 @@ backwards-incompatible ways without warnings. Use it at your own risk. [![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs) [![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs) [![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/procfs)](https://goreportcard.com/report/github.com/prometheus/procfs) + +## Usage + +The procfs library is organized by packages based on whether the gathered data is coming from +/proc, /sys, or both. Each package contains an `FS` type which represents the path to either /proc, /sys, or both. For example, current cpu statistics are gathered from +`/proc/stat` and are available via the root procfs package. First, the proc filesystem mount +point is initialized, and then the stat information is read. + +```go +fs, err := procfs.NewFS("/proc") +stats, err := fs.Stat() +``` + +Some sub-packages such as `blockdevice`, require access to both the proc and sys filesystems. + +```go + fs, err := blockdevice.NewFS("/proc", "/sys") + stats, err := fs.ProcDiskstats() +``` + +## Building and Testing + +The procfs library is normally built as part of another application. However, when making +changes to the library, the `make test` command can be used to run the API test suite. + +### Updating Test Fixtures + +The procfs library includes a set of test fixtures which include many example files from +the `/proc` and `/sys` filesystems. These fixtures are included as a [ttar](https://github.com/ideaship/ttar) file +which is extracted automatically during testing. To add/update the test fixtures, first +ensure the `fixtures` directory is up to date by removing the existing directory and then +extracting the ttar file using `make fixtures/.unpacked` or just `make test`. + +```bash +rm -rf fixtures +make test +``` + +Next, make the required changes to the extracted files in the `fixtures` directory. When +the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file +based on the updated `fixtures` directory. And finally, verify the changes using +`git diff fixtures.ttar`. diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go index d3a8268078c7b..63d4229a45a0f 100644 --- a/vendor/github.com/prometheus/procfs/buddyinfo.go +++ b/vendor/github.com/prometheus/procfs/buddyinfo.go @@ -31,19 +31,9 @@ type BuddyInfo struct { Sizes []float64 } -// NewBuddyInfo reads the buddyinfo statistics. -func NewBuddyInfo() ([]BuddyInfo, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return nil, err - } - - return fs.NewBuddyInfo() -} - // NewBuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem. -func (fs FS) NewBuddyInfo() ([]BuddyInfo, error) { - file, err := os.Open(fs.Path("buddyinfo")) +func (fs FS) BuddyInfo() ([]BuddyInfo, error) { + file, err := os.Open(fs.proc.Path("buddyinfo")) if err != nil { return nil, err } diff --git a/vendor/github.com/prometheus/procfs/fixtures.ttar b/vendor/github.com/prometheus/procfs/fixtures.ttar index 13c831ef59900..951d909af53a6 100644 --- a/vendor/github.com/prometheus/procfs/fixtures.ttar +++ b/vendor/github.com/prometheus/procfs/fixtures.ttar @@ -1,45 +1,48 @@ # Archive created by ttar -c -f fixtures.ttar fixtures/ Directory: fixtures +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/26231 +Directory: fixtures/proc/26231 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/cmdline +Path: fixtures/proc/26231/cmdline Lines: 1 vimNULLBYTEtest.goNULLBYTE+10NULLBYTEEOF Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/comm +Path: fixtures/proc/26231/comm Lines: 1 vim Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/cwd +Path: fixtures/proc/26231/cwd SymlinkTo: /usr/bin # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/exe +Path: fixtures/proc/26231/exe SymlinkTo: /usr/bin/vim # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/26231/fd +Directory: fixtures/proc/26231/fd Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/fd/0 +Path: fixtures/proc/26231/fd/0 SymlinkTo: ../../symlinktargets/abc # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/fd/1 +Path: fixtures/proc/26231/fd/1 SymlinkTo: ../../symlinktargets/def # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/fd/10 +Path: fixtures/proc/26231/fd/10 SymlinkTo: ../../symlinktargets/xyz # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/fd/2 +Path: fixtures/proc/26231/fd/2 SymlinkTo: ../../symlinktargets/ghi # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/fd/3 +Path: fixtures/proc/26231/fd/3 SymlinkTo: ../../symlinktargets/uvw # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/io +Path: fixtures/proc/26231/io Lines: 7 rchar: 750339 wchar: 818609 @@ -50,7 +53,7 @@ write_bytes: 2048 cancelled_write_bytes: -1024 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/limits +Path: fixtures/proc/26231/limits Lines: 17 Limit Soft Limit Hard Limit Units Max cpu time unlimited unlimited seconds @@ -71,14 +74,14 @@ Max realtime priority 0 0 Max realtime timeout unlimited unlimited us Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/mountstats -Lines: 19 +Path: fixtures/proc/26231/mountstats +Lines: 20 device rootfs mounted on / with fstype rootfs device sysfs mounted on /sys with fstype sysfs device proc mounted on /proc with fstype proc device /dev/sda1 mounted on / with fstype ext4 device 192.168.1.1:/srv/test mounted on /mnt/nfs/test with fstype nfs4 statvers=1.1 - opts: rw,vers=4.0,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,clientaddr=192.168.1.5,local_lock=none + opts: rw,vers=4.0,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,mountaddr=192.168.1.1,clientaddr=192.168.1.5,local_lock=none age: 13968 caps: caps=0xfff7,wtmult=512,dtsize=32768,bsize=0,namlen=255 nfsv4: bm0=0xfdffafff,bm1=0xf9be3e,bm2=0x0,acl=0x0,pnfs=not configured @@ -91,13 +94,14 @@ device 192.168.1.1:/srv/test mounted on /mnt/nfs/test with fstype nfs4 statvers= NULL: 0 0 0 0 0 0 0 0 READ: 1298 1298 0 207680 1210292152 6 79386 79407 WRITE: 0 0 0 0 0 0 0 0 + ACCESS: 2927395007 2927394995 0 526931094212 362996810236 18446743919241604546 1667369447 1953587717 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/26231/net +Directory: fixtures/proc/26231/net Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/net/dev +Path: fixtures/proc/26231/net/dev Lines: 4 Inter-| Receive | Transmit face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed @@ -105,142 +109,226 @@ Inter-| Receive | Transmit eth0: 438 5 0 0 0 0 0 0 648 8 0 0 0 0 0 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/26231/ns +Directory: fixtures/proc/26231/ns Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/ns/mnt +Path: fixtures/proc/26231/ns/mnt SymlinkTo: mnt:[4026531840] # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/ns/net +Path: fixtures/proc/26231/ns/net SymlinkTo: net:[4026531993] # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/root +Path: fixtures/proc/26231/root SymlinkTo: / # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/stat +Path: fixtures/proc/26231/stat Lines: 1 26231 (vim) R 5392 7446 5392 34835 7446 4218880 32533 309516 26 82 1677 44 158 99 20 0 1 0 82375 56274944 1981 18446744073709551615 4194304 6294284 140736914091744 140736914087944 139965136429984 0 0 12288 1870679807 0 0 0 17 0 0 0 31 0 0 8391624 8481048 16420864 140736914093252 140736914093279 140736914093279 140736914096107 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/26232 +Path: fixtures/proc/26231/status +Lines: 53 + +Name: prometheus +Umask: 0022 +State: S (sleeping) +Tgid: 1 +Ngid: 0 +Pid: 1 +PPid: 0 +TracerPid: 0 +Uid: 0 0 0 0 +Gid: 0 0 0 0 +FDSize: 128 +Groups: +NStgid: 1 +NSpid: 1 +NSpgid: 1 +NSsid: 1 +VmPeak: 58472 kB +VmSize: 58440 kB +VmLck: 0 kB +VmPin: 0 kB +VmHWM: 8028 kB +VmRSS: 6716 kB +RssAnon: 2092 kB +RssFile: 4624 kB +RssShmem: 0 kB +VmData: 2580 kB +VmStk: 136 kB +VmExe: 948 kB +VmLib: 6816 kB +VmPTE: 128 kB +VmPMD: 12 kB +VmSwap: 660 kB +HugetlbPages: 0 kB +Threads: 1 +SigQ: 8/63965 +SigPnd: 0000000000000000 +ShdPnd: 0000000000000000 +SigBlk: 7be3c0fe28014a03 +SigIgn: 0000000000001000 +SigCgt: 00000001800004ec +CapInh: 0000000000000000 +CapPrm: 0000003fffffffff +CapEff: 0000003fffffffff +CapBnd: 0000003fffffffff +CapAmb: 0000000000000000 +Seccomp: 0 +Cpus_allowed: ff +Cpus_allowed_list: 0-7 +Mems_allowed: 00000000,00000001 +Mems_allowed_list: 0 +voluntary_ctxt_switches: 4742839 +nonvoluntary_ctxt_switches: 1727500 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/26232 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26232/cmdline +Path: fixtures/proc/26232/cmdline Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26232/comm +Path: fixtures/proc/26232/comm Lines: 1 ata_sff Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26232/cwd +Path: fixtures/proc/26232/cwd SymlinkTo: /does/not/exist # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/26232/fd +Directory: fixtures/proc/26232/fd Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26232/fd/0 +Path: fixtures/proc/26232/fd/0 SymlinkTo: ../../symlinktargets/abc # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26232/fd/1 +Path: fixtures/proc/26232/fd/1 SymlinkTo: ../../symlinktargets/def # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26232/fd/2 +Path: fixtures/proc/26232/fd/2 SymlinkTo: ../../symlinktargets/ghi # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26232/fd/3 +Path: fixtures/proc/26232/fd/3 SymlinkTo: ../../symlinktargets/uvw # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26232/fd/4 +Path: fixtures/proc/26232/fd/4 SymlinkTo: ../../symlinktargets/xyz # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26232/limits +Path: fixtures/proc/26232/limits Lines: 17 -Limit Soft Limit Hard Limit Units -Max cpu time unlimited unlimited seconds -Max file size unlimited unlimited bytes -Max data size unlimited unlimited bytes -Max stack size 8388608 unlimited bytes -Max core file size 0 unlimited bytes -Max resident set unlimited unlimited bytes -Max processes 29436 29436 processes -Max open files 1024 4096 files -Max locked memory 65536 65536 bytes -Max address space unlimited unlimited bytes -Max file locks unlimited unlimited locks -Max pending signals 29436 29436 signals -Max msgqueue size 819200 819200 bytes -Max nice priority 0 0 -Max realtime priority 0 0 -Max realtime timeout unlimited unlimited us -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26232/root +Limit Soft Limit Hard Limit Units +Max cpu time unlimited unlimited seconds +Max file size unlimited unlimited bytes +Max data size unlimited unlimited bytes +Max stack size 8388608 unlimited bytes +Max core file size 0 unlimited bytes +Max resident set unlimited unlimited bytes +Max processes 29436 29436 processes +Max open files 1024 4096 files +Max locked memory 65536 65536 bytes +Max address space unlimited unlimited bytes +Max file locks unlimited unlimited locks +Max pending signals 29436 29436 signals +Max msgqueue size 819200 819200 bytes +Max nice priority 0 0 +Max realtime priority 0 0 +Max realtime timeout unlimited unlimited us +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26232/root SymlinkTo: /does/not/exist # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26232/stat +Path: fixtures/proc/26232/stat Lines: 1 33 (ata_sff) S 2 0 0 0 -1 69238880 0 0 0 0 0 0 0 0 0 -20 1 0 5 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 1 0 0 0 0 0 0 0 0 0 0 0 0 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/26233 +Directory: fixtures/proc/26233 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26233/cmdline +Path: fixtures/proc/26233/cmdline Lines: 1 com.github.uiautomatorNULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTEEOF Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/584 +Directory: fixtures/proc/584 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/584/stat +Path: fixtures/proc/584/stat Lines: 2 1020 ((a b ) ( c d) ) R 28378 1020 28378 34842 1020 4218880 286 0 0 0 0 0 0 0 20 0 1 0 10839175 10395648 155 18446744073709551615 4194304 4238788 140736466511168 140736466511168 140609271124624 0 0 0 0 0 0 0 17 5 0 0 0 0 0 6336016 6337300 25579520 140736466515030 140736466515061 140736466515061 140736466518002 0 #!/bin/cat /proc/self/stat Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/buddyinfo -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/buddyinfo/short -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/buddyinfo/short/buddyinfo -Lines: 3 -Node 0, zone -Node 0, zone -Node 0, zone -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/buddyinfo/sizemismatch -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/buddyinfo/sizemismatch/buddyinfo +Path: fixtures/proc/buddyinfo Lines: 3 -Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3 -Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0 0 -Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 +Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3 +Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0 +Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/buddyinfo/valid -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/buddyinfo/valid/buddyinfo -Lines: 3 -Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3 -Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0 -Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 0 -Mode: 644 +Path: fixtures/proc/diskstats +Lines: 49 + 1 0 ram0 0 0 0 0 0 0 0 0 0 0 0 + 1 1 ram1 0 0 0 0 0 0 0 0 0 0 0 + 1 2 ram2 0 0 0 0 0 0 0 0 0 0 0 + 1 3 ram3 0 0 0 0 0 0 0 0 0 0 0 + 1 4 ram4 0 0 0 0 0 0 0 0 0 0 0 + 1 5 ram5 0 0 0 0 0 0 0 0 0 0 0 + 1 6 ram6 0 0 0 0 0 0 0 0 0 0 0 + 1 7 ram7 0 0 0 0 0 0 0 0 0 0 0 + 1 8 ram8 0 0 0 0 0 0 0 0 0 0 0 + 1 9 ram9 0 0 0 0 0 0 0 0 0 0 0 + 1 10 ram10 0 0 0 0 0 0 0 0 0 0 0 + 1 11 ram11 0 0 0 0 0 0 0 0 0 0 0 + 1 12 ram12 0 0 0 0 0 0 0 0 0 0 0 + 1 13 ram13 0 0 0 0 0 0 0 0 0 0 0 + 1 14 ram14 0 0 0 0 0 0 0 0 0 0 0 + 1 15 ram15 0 0 0 0 0 0 0 0 0 0 0 + 7 0 loop0 0 0 0 0 0 0 0 0 0 0 0 + 7 1 loop1 0 0 0 0 0 0 0 0 0 0 0 + 7 2 loop2 0 0 0 0 0 0 0 0 0 0 0 + 7 3 loop3 0 0 0 0 0 0 0 0 0 0 0 + 7 4 loop4 0 0 0 0 0 0 0 0 0 0 0 + 7 5 loop5 0 0 0 0 0 0 0 0 0 0 0 + 7 6 loop6 0 0 0 0 0 0 0 0 0 0 0 + 7 7 loop7 0 0 0 0 0 0 0 0 0 0 0 + 8 0 sda 25354637 34367663 1003346126 18492372 28444756 11134226 505697032 63877960 0 9653880 82621804 + 8 1 sda1 250 0 2000 36 0 0 0 0 0 36 36 + 8 2 sda2 246 0 1968 32 0 0 0 0 0 32 32 + 8 3 sda3 340 13 2818 52 11 8 152 8 0 56 60 + 8 4 sda4 25353629 34367650 1003337964 18492232 27448755 11134218 505696880 61593380 0 7576432 80332428 + 252 0 dm-0 59910002 0 1003337218 46229572 39231014 0 505696880 1158557800 0 11325968 1206301256 + 252 1 dm-1 388 0 3104 84 74 0 592 0 0 76 84 + 252 2 dm-2 11571 0 308350 6536 153522 0 5093416 122884 0 65400 129416 + 252 3 dm-3 3870 0 3870 104 0 0 0 0 0 16 104 + 252 4 dm-4 392 0 1034 28 38 0 137 16 0 24 44 + 252 5 dm-5 3729 0 84279 924 98918 0 1151688 104684 0 58848 105632 + 179 0 mmcblk0 192 3 1560 156 0 0 0 0 0 136 156 + 179 1 mmcblk0p1 17 3 160 24 0 0 0 0 0 24 24 + 179 2 mmcblk0p2 95 0 760 68 0 0 0 0 0 68 68 + 2 0 fd0 2 0 16 80 0 0 0 0 0 80 80 + 254 0 vda 1775784 15386 32670882 8655768 6038856 20711856 213637440 2069221364 0 41614592 2077872228 + 254 1 vda1 668 85 5984 956 207 4266 35784 32772 0 8808 33720 + 254 2 vda2 1774936 15266 32663262 8654692 5991028 20707590 213601656 2069152216 0 41607628 2077801992 + 11 0 sr0 0 0 0 0 0 0 0 0 0 0 0 + 259 0 nvme0n1 47114 4 4643973 21650 1078320 43950 39451633 1011053 0 222766 1032546 + 259 1 nvme0n1p1 1140 0 9370 16 1 0 1 0 0 16 16 + 259 2 nvme0n1p2 45914 4 4631243 21626 1036885 43950 39451632 919480 0 131580 940970 + 8 0 sdb 326552 841 9657779 84 41822 2895 1972905 5007 0 60730 67070 68851 0 1925173784 11130 + 8 1 sdb1 231 3 34466 4 24 23 106 0 0 64 64 0 0 0 0 + 8 2 sdb2 326310 838 9622281 67 40726 2872 1972799 4924 0 58250 64567 68851 0 1925173784 11130 +Mode: 664 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/fs +Directory: fixtures/proc/fs Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/fs/xfs +Directory: fixtures/proc/fs/xfs Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/fs/xfs/stat +Path: fixtures/proc/fs/xfs/stat Lines: 23 extent_alloc 92447 97589 92448 93751 abt 0 0 0 0 @@ -267,18 +355,18 @@ xpc 399724544 92823103 86219234 debug 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/mdstat +Path: fixtures/proc/mdstat Lines: 26 Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10] md3 : active raid6 sda1[8] sdh1[7] sdg1[6] sdf1[5] sde1[11] sdd1[3] sdc1[10] sdb1[9] 5853468288 blocks super 1.2 level 6, 64k chunk, algorithm 2 [8/8] [UUUUUUUU] - + md127 : active raid1 sdi2[0] sdj2[1] 312319552 blocks [2/2] [UU] - + md0 : active raid1 sdk[2](S) sdi1[0] sdj1[1] 248896 blocks [2/2] [UU] - + md4 : inactive raid1 sda3[0] sdb3[1] 4883648 blocks [2/2] [UU] @@ -297,10 +385,10 @@ md7 : active raid6 sdb1[0] sde1[3] sdd1[2] sdc1[1] unused devices: Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/net +Directory: fixtures/proc/net Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/net/dev +Path: fixtures/proc/net/dev Lines: 6 Inter-| Receive | Transmit face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed @@ -310,7 +398,7 @@ docker0: 2568 38 0 0 0 0 0 0 438 eth0: 874354587 1036395 0 0 0 0 0 0 563352563 732147 0 0 0 0 0 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/net/ip_vs +Path: fixtures/proc/net/ip_vs Lines: 21 IP Virtual Server version 1.2.1 (size=4096) Prot LocalAddress:Port Scheduler Flags @@ -335,7 +423,7 @@ FWM 10001000 wlc -> C0A83215:0CEA Route 0 0 2 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/net/ip_vs_stats +Path: fixtures/proc/net/ip_vs_stats Lines: 6 Total Incoming Outgoing Incoming Outgoing Conns Packets Packets Bytes Bytes @@ -345,10 +433,10 @@ Lines: 6 4 1FB3C 0 1282A8F 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/net/rpc +Directory: fixtures/proc/net/rpc Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/net/rpc/nfs +Path: fixtures/proc/net/rpc/nfs Lines: 5 net 18628 0 18628 6 rpc 4329785 0 4338291 @@ -357,7 +445,7 @@ proc3 22 1 4084749 29200 94754 32580 186 47747 7981 8639 0 6356 0 6962 0 7958 0 proc4 61 1 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/net/rpc/nfsd +Path: fixtures/proc/net/rpc/nfsd Lines: 11 rc 0 6 18622 fh 0 0 0 0 0 @@ -372,7 +460,27 @@ proc4 2 2 10853 proc4ops 72 0 0 0 1098 2 0 0 0 0 8179 5896 0 0 0 0 5900 0 0 2 0 2 0 9609 0 2 150 1272 0 0 0 1236 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/net/xfrm_stat +Path: fixtures/proc/net/unix +Lines: 6 +Num RefCount Protocol Flags Type St Inode Path +0000000000000000: 00000002 00000000 00010000 0001 01 3442596 /var/run/postgresql/.s.PGSQL.5432 +0000000000000000: 0000000a 00000000 00010000 0005 01 10061 /run/udev/control +0000000000000000: 00000007 00000000 00000000 0002 01 12392 /dev/log +0000000000000000: 00000003 00000000 00000000 0001 03 4787297 /var/run/postgresql/.s.PGSQL.5432 +0000000000000000: 00000003 00000000 00000000 0001 03 5091797 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/unix_without_inode +Lines: 6 +Num RefCount Protocol Flags Type St Path +0000000000000000: 00000002 00000000 00010000 0001 01 /var/run/postgresql/.s.PGSQL.5432 +0000000000000000: 0000000a 00000000 00010000 0005 01 /run/udev/control +0000000000000000: 00000007 00000000 00000000 0002 01 /dev/log +0000000000000000: 00000003 00000000 00000000 0001 03 /var/run/postgresql/.s.PGSQL.5432 +0000000000000000: 00000003 00000000 00000000 0001 03 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/xfrm_stat Lines: 28 XfrmInError 1 XfrmInBufferError 2 @@ -404,10 +512,30 @@ XfrmOutStateInvalid 28765 XfrmAcquireError 24532 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/self +Directory: fixtures/proc/pressure +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/pressure/cpu +Lines: 1 +some avg10=0.10 avg60=2.00 avg300=3.85 total=15 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/pressure/io +Lines: 2 +some avg10=0.10 avg60=2.00 avg300=3.85 total=15 +full avg10=0.20 avg60=3.00 avg300=4.95 total=25 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/pressure/memory +Lines: 2 +some avg10=0.10 avg60=2.00 avg300=3.85 total=15 +full avg10=0.20 avg60=3.00 avg300=4.95 total=25 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/self SymlinkTo: 26231 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/stat +Path: fixtures/proc/stat Lines: 16 cpu 301854 612 111922 8979004 3552 2 3944 0 0 0 cpu0 44490 19 21045 1087069 220 1 3410 0 0 0 @@ -427,36 +555,1254 @@ procs_blocked 1 softirq 5057579 250191 1481983 1647 211099 186066 0 1783454 622196 12499 508444 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/symlinktargets +Directory: fixtures/proc/symlinktargets Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/symlinktargets/README +Path: fixtures/proc/symlinktargets/README Lines: 2 This directory contains some empty files that are the symlinks the files in the "fd" directory point to. They are otherwise ignored by the tests Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/symlinktargets/abc +Path: fixtures/proc/symlinktargets/abc +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/symlinktargets/def Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/symlinktargets/def +Path: fixtures/proc/symlinktargets/ghi Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/symlinktargets/ghi +Path: fixtures/proc/symlinktargets/uvw +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/symlinktargets/xyz +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/block +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/block/dm-0 +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/dm-0/stat +Lines: 1 +6447303 0 710266738 1529043 953216 0 31201176 4557464 0 796160 6088971 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/block/sda +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/stat +Lines: 1 +9652963 396792 759304206 412943 8422549 6731723 286915323 13947418 0 5658367 19174573 1 2 3 12 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/net +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/net/eth0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/addr_assign_type +Lines: 1 +3 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/addr_len +Lines: 1 +6 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/address +Lines: 1 +01:01:01:01:01:01 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/broadcast +Lines: 1 +ff:ff:ff:ff:ff:ff +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/carrier +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/carrier_changes +Lines: 1 +2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/carrier_down_count +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/carrier_up_count +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/dev_id +Lines: 1 +0x20 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/dormant +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/duplex +Lines: 1 +full +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/flags +Lines: 1 +0x1303 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/ifalias Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/symlinktargets/uvw +Path: fixtures/sys/class/net/eth0/ifindex +Lines: 1 +2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/iflink +Lines: 1 +2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/link_mode +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/mtu +Lines: 1 +1500 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/name_assign_type +Lines: 1 +2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/netdev_group +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/operstate +Lines: 1 +up +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/phys_port_id Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/symlinktargets/xyz +Path: fixtures/sys/class/net/eth0/phys_port_name Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/.unpacked +Path: fixtures/sys/class/net/eth0/phys_switch_id Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/speed +Lines: 1 +1000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/tx_queue_len +Lines: 1 +1000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/type +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/power_supply +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/power_supply/AC +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/power_supply/AC/online +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/power_supply/AC/type +Lines: 1 +Mains +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/power_supply/AC/uevent +Lines: 2 +POWER_SUPPLY_NAME=AC +POWER_SUPPLY_ONLINE=0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/power_supply/BAT0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/power_supply/BAT0/alarm +Lines: 1 +2503000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/power_supply/BAT0/capacity +Lines: 1 +98 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/power_supply/BAT0/capacity_level +Lines: 1 +Normal +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/power_supply/BAT0/charge_start_threshold +Lines: 1 +95 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/power_supply/BAT0/charge_stop_threshold +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/power_supply/BAT0/cycle_count +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/power_supply/BAT0/energy_full +Lines: 1 +50060000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/power_supply/BAT0/energy_full_design +Lines: 1 +47520000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/power_supply/BAT0/energy_now +Lines: 1 +49450000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/power_supply/BAT0/manufacturer +Lines: 1 +LGC +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/power_supply/BAT0/model_name +Lines: 1 +LNV-45N1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/power_supply/BAT0/power_now +Lines: 1 +4830000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/power_supply/BAT0/present +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/power_supply/BAT0/serial_number +Lines: 1 +38109 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/power_supply/BAT0/status +Lines: 1 +Discharging +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/power_supply/BAT0/technology +Lines: 1 +Li-ion +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/power_supply/BAT0/type +Lines: 1 +Battery +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/power_supply/BAT0/uevent +Lines: 16 +POWER_SUPPLY_NAME=BAT0 +POWER_SUPPLY_STATUS=Discharging +POWER_SUPPLY_PRESENT=1 +POWER_SUPPLY_TECHNOLOGY=Li-ion +POWER_SUPPLY_CYCLE_COUNT=0 +POWER_SUPPLY_VOLTAGE_MIN_DESIGN=10800000 +POWER_SUPPLY_VOLTAGE_NOW=12229000 +POWER_SUPPLY_POWER_NOW=4830000 +POWER_SUPPLY_ENERGY_FULL_DESIGN=47520000 +POWER_SUPPLY_ENERGY_FULL=50060000 +POWER_SUPPLY_ENERGY_NOW=49450000 +POWER_SUPPLY_CAPACITY=98 +POWER_SUPPLY_CAPACITY_LEVEL=Normal +POWER_SUPPLY_MODEL_NAME=LNV-45N1 +POWER_SUPPLY_MANUFACTURER=LGC +POWER_SUPPLY_SERIAL_NUMBER=38109 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/power_supply/BAT0/voltage_min_design +Lines: 1 +10800000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/power_supply/BAT0/voltage_now +Lines: 1 +12229000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/thermal +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/thermal/thermal_zone0 +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/thermal_zone0/policy +Lines: 1 +step_wise +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/thermal_zone0/temp +Lines: 1 +49925 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/thermal_zone0/type +Lines: 1 +bcm2835_thermal +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/thermal/thermal_zone1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/thermal_zone1/mode +Lines: 1 +enabled +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/thermal_zone1/passive +Lines: 1 +0 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/thermal_zone1/policy +Lines: 1 +step_wise +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/thermal_zone1/temp +Lines: 1 +44000 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/thermal_zone1/type +Lines: 1 +acpitz Mode: 664 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/dirty_data +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_hit_ratio +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_hits +Lines: 1 +289 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_hit_ratio +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_hit_ratio +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_hit_ratio +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_hits +Lines: 1 +546 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/io_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/metadata_written +Lines: 1 +512 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/priority_stats +Lines: 5 +Unused: 99% +Metadata: 0% +Average: 10473 +Sectors per Q: 64 +Quantiles: [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946] +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/written +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/clocksource +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/clocksource/clocksource0 +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/clocksource/clocksource0/available_clocksource +Lines: 1 +tsc hpet acpi_pm +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/clocksource/clocksource0/current_clocksource +Lines: 1 +tsc +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu/cpu0 +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu0/cpufreq +SymlinkTo: ../cpufreq/policy0 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu/cpu1 +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu/cpu1/cpufreq +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_cur_freq +Lines: 1 +1200195 +Mode: 400 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_max_freq +Lines: 1 +3300000 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_min_freq +Lines: 1 +1200000 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_transition_latency +Lines: 1 +4294967295 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/related_cpus +Lines: 1 +1 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_available_governors +Lines: 1 +performance powersave +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_driver +Lines: 1 +intel_pstate +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_governor +Lines: 1 +powersave +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_max_freq +Lines: 1 +3300000 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_min_freq +Lines: 1 +1200000 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_setspeed +Lines: 1 + +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu/cpufreq +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu/cpufreq/policy0 +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/affected_cpus +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_max_freq +Lines: 1 +2400000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_min_freq +Lines: 1 +800000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_transition_latency +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/related_cpus +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_available_governors +Lines: 1 +performance powersave +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_cur_freq +Lines: 1 +1219917 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_driver +Lines: 1 +intel_pstate +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_governor +Lines: 1 +powersave +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_max_freq +Lines: 1 +2400000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_min_freq +Lines: 1 +800000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_setspeed +Lines: 1 + +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu/cpufreq/policy1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/average_key_size +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0 +Mode: 777 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/dirty_data +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_hit_ratio +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_hits +Lines: 1 +289 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_hit_ratio +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_hit_ratio +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_hit_ratio +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_hits +Lines: 1 +546 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/btree_cache_size +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0 +Mode: 777 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/io_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/metadata_written +Lines: 1 +512 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/priority_stats +Lines: 5 +Unused: 99% +Metadata: 0% +Average: 10473 +Sectors per Q: 64 +Quantiles: [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946] +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/written +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache_available_percent +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/congested +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/active_journal_entries +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/btree_nodes +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/btree_read_average_duration_us +Lines: 1 +1305 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/cache_read_races +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/root_usage_percent +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_hit_ratio +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_hits +Lines: 1 +289 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_hit_ratio +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_hit_ratio +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_hit_ratio +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_hits +Lines: 1 +546 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/tree_depth +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/xfs +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/xfs/sda1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/xfs/sda1/stats +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/xfs/sda1/stats/stats +Lines: 1 +extent_alloc 1 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/xfs/sdb1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/xfs/sdb1/stats +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/xfs/sdb1/stats/stats +Lines: 1 +extent_alloc 2 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go index b6c6b2ce1f06d..0102ab0fd856c 100644 --- a/vendor/github.com/prometheus/procfs/fs.go +++ b/vendor/github.com/prometheus/procfs/fs.go @@ -14,69 +14,30 @@ package procfs import ( - "fmt" - "os" - "path" - - "github.com/prometheus/procfs/nfs" - "github.com/prometheus/procfs/xfs" + "github.com/prometheus/procfs/internal/fs" ) -// FS represents the pseudo-filesystem proc, which provides an interface to +// FS represents the pseudo-filesystem sys, which provides an interface to // kernel data structures. -type FS string - -// DefaultMountPoint is the common mount point of the proc filesystem. -const DefaultMountPoint = "/proc" - -// NewFS returns a new FS mounted under the given mountPoint. It will error -// if the mount point can't be read. -func NewFS(mountPoint string) (FS, error) { - info, err := os.Stat(mountPoint) - if err != nil { - return "", fmt.Errorf("could not read %s: %s", mountPoint, err) - } - if !info.IsDir() { - return "", fmt.Errorf("mount point %s is not a directory", mountPoint) - } - - return FS(mountPoint), nil -} - -// Path returns the path of the given subsystem relative to the procfs root. -func (fs FS) Path(p ...string) string { - return path.Join(append([]string{string(fs)}, p...)...) +type FS struct { + proc fs.FS } -// XFSStats retrieves XFS filesystem runtime statistics. -func (fs FS) XFSStats() (*xfs.Stats, error) { - f, err := os.Open(fs.Path("fs/xfs/stat")) - if err != nil { - return nil, err - } - defer f.Close() - - return xfs.ParseStats(f) -} - -// NFSClientRPCStats retrieves NFS client RPC statistics. -func (fs FS) NFSClientRPCStats() (*nfs.ClientRPCStats, error) { - f, err := os.Open(fs.Path("net/rpc/nfs")) - if err != nil { - return nil, err - } - defer f.Close() +// DefaultMountPoint is the common mount point of the proc filesystem. +const DefaultMountPoint = fs.DefaultProcMountPoint - return nfs.ParseClientRPCStats(f) +// NewDefaultFS returns a new proc FS mounted under the default proc mountPoint. +// It will error if the mount point directory can't be read or is a file. +func NewDefaultFS() (FS, error) { + return NewFS(DefaultMountPoint) } -// NFSdServerRPCStats retrieves NFS daemon RPC statistics. -func (fs FS) NFSdServerRPCStats() (*nfs.ServerRPCStats, error) { - f, err := os.Open(fs.Path("net/rpc/nfsd")) +// NewFS returns a new proc FS mounted under the given proc mountPoint. It will error +// if the mount point directory can't be read or is a file. +func NewFS(mountPoint string) (FS, error) { + fs, err := fs.NewFS(mountPoint) if err != nil { - return nil, err + return FS{}, err } - defer f.Close() - - return nfs.ParseServerRPCStats(f) + return FS{fs}, nil } diff --git a/vendor/github.com/prometheus/procfs/go.mod b/vendor/github.com/prometheus/procfs/go.mod index e89ee6c90f608..8a1b839fd2327 100644 --- a/vendor/github.com/prometheus/procfs/go.mod +++ b/vendor/github.com/prometheus/procfs/go.mod @@ -1 +1,3 @@ module github.com/prometheus/procfs + +require golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 diff --git a/vendor/github.com/prometheus/procfs/go.sum b/vendor/github.com/prometheus/procfs/go.sum new file mode 100644 index 0000000000000..7827dd3d56c53 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/go.sum @@ -0,0 +1,2 @@ +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= diff --git a/vendor/github.com/prometheus/procfs/internal/util/BUILD.bazel b/vendor/github.com/prometheus/procfs/internal/fs/BUILD.bazel similarity index 61% rename from vendor/github.com/prometheus/procfs/internal/util/BUILD.bazel rename to vendor/github.com/prometheus/procfs/internal/fs/BUILD.bazel index a9b95eb98b64b..49043c0be305d 100644 --- a/vendor/github.com/prometheus/procfs/internal/util/BUILD.bazel +++ b/vendor/github.com/prometheus/procfs/internal/fs/BUILD.bazel @@ -2,11 +2,8 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", - srcs = [ - "parse.go", - "sysreadfile_linux.go", - ], - importmap = "k8s.io/kops/vendor/github.com/prometheus/procfs/internal/util", - importpath = "github.com/prometheus/procfs/internal/util", + srcs = ["fs.go"], + importmap = "k8s.io/kops/vendor/github.com/prometheus/procfs/internal/fs", + importpath = "github.com/prometheus/procfs/internal/fs", visibility = ["//vendor/github.com/prometheus/procfs:__subpackages__"], ) diff --git a/vendor/github.com/prometheus/procfs/internal/fs/fs.go b/vendor/github.com/prometheus/procfs/internal/fs/fs.go new file mode 100644 index 0000000000000..c66a1cf80e481 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/internal/fs/fs.go @@ -0,0 +1,52 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "fmt" + "os" + "path/filepath" +) + +const ( + // DefaultProcMountPoint is the common mount point of the proc filesystem. + DefaultProcMountPoint = "/proc" + + // DefaultSysMountPoint is the common mount point of the sys filesystem. + DefaultSysMountPoint = "/sys" +) + +// FS represents a pseudo-filesystem, normally /proc or /sys, which provides an +// interface to kernel data structures. +type FS string + +// NewFS returns a new FS mounted under the given mountPoint. It will error +// if the mount point can't be read. +func NewFS(mountPoint string) (FS, error) { + info, err := os.Stat(mountPoint) + if err != nil { + return "", fmt.Errorf("could not read %s: %s", mountPoint, err) + } + if !info.IsDir() { + return "", fmt.Errorf("mount point %s is not a directory", mountPoint) + } + + return FS(mountPoint), nil +} + +// Path appends the given path elements to the filesystem path, adding separators +// as necessary. +func (fs FS) Path(p ...string) string { + return filepath.Join(append([]string{string(fs)}, p...)...) +} diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go deleted file mode 100644 index 2ff228e9d1f3e..0000000000000 --- a/vendor/github.com/prometheus/procfs/internal/util/parse.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package util - -import ( - "io/ioutil" - "strconv" - "strings" -) - -// ParseUint32s parses a slice of strings into a slice of uint32s. -func ParseUint32s(ss []string) ([]uint32, error) { - us := make([]uint32, 0, len(ss)) - for _, s := range ss { - u, err := strconv.ParseUint(s, 10, 32) - if err != nil { - return nil, err - } - - us = append(us, uint32(u)) - } - - return us, nil -} - -// ParseUint64s parses a slice of strings into a slice of uint64s. -func ParseUint64s(ss []string) ([]uint64, error) { - us := make([]uint64, 0, len(ss)) - for _, s := range ss { - u, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return nil, err - } - - us = append(us, u) - } - - return us, nil -} - -// ReadUintFromFile reads a file and attempts to parse a uint64 from it. -func ReadUintFromFile(path string) (uint64, error) { - data, err := ioutil.ReadFile(path) - if err != nil { - return 0, err - } - return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) -} diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_linux.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_linux.go deleted file mode 100644 index df0d567b78069..0000000000000 --- a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_linux.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !windows - -package util - -import ( - "bytes" - "os" - "syscall" -) - -// SysReadFile is a simplified ioutil.ReadFile that invokes syscall.Read directly. -// https://github.com/prometheus/node_exporter/pull/728/files -func SysReadFile(file string) (string, error) { - f, err := os.Open(file) - if err != nil { - return "", err - } - defer f.Close() - - // On some machines, hwmon drivers are broken and return EAGAIN. This causes - // Go's ioutil.ReadFile implementation to poll forever. - // - // Since we either want to read data or bail immediately, do the simplest - // possible read using syscall directly. - b := make([]byte, 128) - n, err := syscall.Read(int(f.Fd()), b) - if err != nil { - return "", err - } - - return string(bytes.TrimSpace(b[:n])), nil -} diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go index e36d4a3bd08e2..2d6cb8d1c6c8b 100644 --- a/vendor/github.com/prometheus/procfs/ipvs.go +++ b/vendor/github.com/prometheus/procfs/ipvs.go @@ -62,19 +62,9 @@ type IPVSBackendStatus struct { Weight uint64 } -// NewIPVSStats reads the IPVS statistics. -func NewIPVSStats() (IPVSStats, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return IPVSStats{}, err - } - - return fs.NewIPVSStats() -} - -// NewIPVSStats reads the IPVS statistics from the specified `proc` filesystem. -func (fs FS) NewIPVSStats() (IPVSStats, error) { - file, err := os.Open(fs.Path("net/ip_vs_stats")) +// IPVSStats reads the IPVS statistics from the specified `proc` filesystem. +func (fs FS) IPVSStats() (IPVSStats, error) { + file, err := os.Open(fs.proc.Path("net/ip_vs_stats")) if err != nil { return IPVSStats{}, err } @@ -131,19 +121,9 @@ func parseIPVSStats(file io.Reader) (IPVSStats, error) { return stats, nil } -// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs. -func NewIPVSBackendStatus() ([]IPVSBackendStatus, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return []IPVSBackendStatus{}, err - } - - return fs.NewIPVSBackendStatus() -} - -// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem. -func (fs FS) NewIPVSBackendStatus() ([]IPVSBackendStatus, error) { - file, err := os.Open(fs.Path("net/ip_vs")) +// IPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem. +func (fs FS) IPVSBackendStatus() ([]IPVSBackendStatus, error) { + file, err := os.Open(fs.proc.Path("net/ip_vs")) if err != nil { return nil, err } diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go index 9dc19583d8d00..71c10678250d4 100644 --- a/vendor/github.com/prometheus/procfs/mdstat.go +++ b/vendor/github.com/prometheus/procfs/mdstat.go @@ -42,64 +42,64 @@ type MDStat struct { BlocksSynced int64 } -// ParseMDStat parses an mdstat-file and returns a struct with the relevant infos. -func (fs FS) ParseMDStat() (mdstates []MDStat, err error) { - mdStatusFilePath := fs.Path("mdstat") - content, err := ioutil.ReadFile(mdStatusFilePath) +// MDStat parses an mdstat-file (/proc/mdstat) and returns a slice of +// structs containing the relevant info. More information available here: +// https://raid.wiki.kernel.org/index.php/Mdstat +func (fs FS) MDStat() ([]MDStat, error) { + data, err := ioutil.ReadFile(fs.proc.Path("mdstat")) if err != nil { - return []MDStat{}, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) + return nil, fmt.Errorf("error parsing mdstat %s: %s", fs.proc.Path("mdstat"), err) } + mdstat, err := parseMDStat(data) + if err != nil { + return nil, fmt.Errorf("error parsing mdstat %s: %s", fs.proc.Path("mdstat"), err) + } + return mdstat, nil +} - mdStates := []MDStat{} - lines := strings.Split(string(content), "\n") +// parseMDStat parses data from mdstat file (/proc/mdstat) and returns a slice of +// structs containing the relevant info. +func parseMDStat(mdstatData []byte) ([]MDStat, error) { + mdStats := []MDStat{} + lines := strings.Split(string(mdstatData), "\n") for i, l := range lines { - if l == "" { - continue - } - if l[0] == ' ' { - continue - } - if strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") { + if strings.TrimSpace(l) == "" || l[0] == ' ' || + strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") { continue } - mainLine := strings.Split(l, " ") - if len(mainLine) < 3 { - return mdStates, fmt.Errorf("error parsing mdline: %s", l) + deviceFields := strings.Fields(l) + if len(deviceFields) < 3 { + return nil, fmt.Errorf("not enough fields in mdline (expected at least 3): %s", l) } - mdName := mainLine[0] - activityState := mainLine[2] + mdName := deviceFields[0] + activityState := deviceFields[2] if len(lines) <= i+3 { - return mdStates, fmt.Errorf( - "error parsing %s: too few lines for md device %s", - mdStatusFilePath, - mdName, - ) + return mdStats, fmt.Errorf("missing lines for md device %s", mdName) } - active, total, size, err := evalStatusline(lines[i+1]) + active, total, size, err := evalStatusLine(lines[i+1]) if err != nil { - return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) + return nil, err } - // j is the line number of the syncing-line. - j := i + 2 + syncLineIdx := i + 2 if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line - j = i + 3 + syncLineIdx++ } - // If device is syncing at the moment, get the number of currently + // If device is recovering/syncing at the moment, get the number of currently // synced bytes, otherwise that number equals the size of the device. syncedBlocks := size - if strings.Contains(lines[j], "recovery") || strings.Contains(lines[j], "resync") { - syncedBlocks, err = evalBuildline(lines[j]) + if strings.Contains(lines[syncLineIdx], "recovery") || strings.Contains(lines[syncLineIdx], "resync") { + syncedBlocks, err = evalRecoveryLine(lines[syncLineIdx]) if err != nil { - return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) + return nil, err } } - mdStates = append(mdStates, MDStat{ + mdStats = append(mdStats, MDStat{ Name: mdName, ActivityState: activityState, DisksActive: active, @@ -109,10 +109,10 @@ func (fs FS) ParseMDStat() (mdstates []MDStat, err error) { }) } - return mdStates, nil + return mdStats, nil } -func evalStatusline(statusline string) (active, total, size int64, err error) { +func evalStatusLine(statusline string) (active, total, size int64, err error) { matches := statuslineRE.FindStringSubmatch(statusline) if len(matches) != 4 { return 0, 0, 0, fmt.Errorf("unexpected statusline: %s", statusline) @@ -136,7 +136,7 @@ func evalStatusline(statusline string) (active, total, size int64, err error) { return active, total, size, nil } -func evalBuildline(buildline string) (syncedBlocks int64, err error) { +func evalRecoveryLine(buildline string) (syncedBlocks int64, err error) { matches := buildlineRE.FindStringSubmatch(buildline) if len(matches) != 2 { return 0, fmt.Errorf("unexpected buildline: %s", buildline) diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go index 7a8a1e0990143..35b2ef3513f91 100644 --- a/vendor/github.com/prometheus/procfs/mountstats.go +++ b/vendor/github.com/prometheus/procfs/mountstats.go @@ -69,6 +69,8 @@ type MountStats interface { type MountStatsNFS struct { // The version of statistics provided. StatVersion string + // The mount options of the NFS mount. + Opts map[string]string // The age of the NFS mount. Age time.Duration // Statistics related to byte counters for various operations. @@ -179,11 +181,11 @@ type NFSOperationStats struct { // Number of bytes received for this operation, including RPC headers and payload. BytesReceived uint64 // Duration all requests spent queued for transmission before they were sent. - CumulativeQueueTime time.Duration + CumulativeQueueMilliseconds uint64 // Duration it took to get a reply back after the request was transmitted. - CumulativeTotalResponseTime time.Duration + CumulativeTotalResponseMilliseconds uint64 // Duration from when a request was enqueued to when it was completely handled. - CumulativeTotalRequestTime time.Duration + CumulativeTotalRequestMilliseconds uint64 } // A NFSTransportStats contains statistics for the NFS mount RPC requests and @@ -202,7 +204,7 @@ type NFSTransportStats struct { // spent waiting for connections to the server to be established. ConnectIdleTime uint64 // Duration since the NFS mount last saw any RPC traffic. - IdleTime time.Duration + IdleTimeSeconds uint64 // Number of RPC requests for this mount sent to the NFS server. Sends uint64 // Number of RPC responses for this mount received from the NFS server. @@ -317,6 +319,7 @@ func parseMount(ss []string) (*Mount, error) { func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) { // Field indicators for parsing specific types of data const ( + fieldOpts = "opts:" fieldAge = "age:" fieldBytes = "bytes:" fieldEvents = "events:" @@ -338,6 +341,18 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e } switch ss[0] { + case fieldOpts: + if stats.Opts == nil { + stats.Opts = map[string]string{} + } + for _, opt := range strings.Split(ss[1], ",") { + split := strings.Split(opt, "=") + if len(split) == 2 { + stats.Opts[split[0]] = split[1] + } else { + stats.Opts[opt] = "" + } + } case fieldAge: // Age integer is in seconds d, err := time.ParseDuration(ss[1] + "s") @@ -509,15 +524,15 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { } ops = append(ops, NFSOperationStats{ - Operation: strings.TrimSuffix(ss[0], ":"), - Requests: ns[0], - Transmissions: ns[1], - MajorTimeouts: ns[2], - BytesSent: ns[3], - BytesReceived: ns[4], - CumulativeQueueTime: time.Duration(ns[5]) * time.Millisecond, - CumulativeTotalResponseTime: time.Duration(ns[6]) * time.Millisecond, - CumulativeTotalRequestTime: time.Duration(ns[7]) * time.Millisecond, + Operation: strings.TrimSuffix(ss[0], ":"), + Requests: ns[0], + Transmissions: ns[1], + MajorTimeouts: ns[2], + BytesSent: ns[3], + BytesReceived: ns[4], + CumulativeQueueMilliseconds: ns[5], + CumulativeTotalResponseMilliseconds: ns[6], + CumulativeTotalRequestMilliseconds: ns[7], }) } @@ -593,7 +608,7 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats Bind: ns[1], Connect: ns[2], ConnectIdleTime: ns[3], - IdleTime: time.Duration(ns[4]) * time.Second, + IdleTimeSeconds: ns[4], Sends: ns[5], Receives: ns[6], BadTransactionIDs: ns[7], diff --git a/vendor/github.com/prometheus/procfs/net_dev.go b/vendor/github.com/prometheus/procfs/net_dev.go index 3f2523371ab10..a0b7a01196af7 100644 --- a/vendor/github.com/prometheus/procfs/net_dev.go +++ b/vendor/github.com/prometheus/procfs/net_dev.go @@ -47,23 +47,13 @@ type NetDevLine struct { // are interface names. type NetDev map[string]NetDevLine -// NewNetDev returns kernel/system statistics read from /proc/net/dev. -func NewNetDev() (NetDev, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return nil, err - } - - return fs.NewNetDev() -} - -// NewNetDev returns kernel/system statistics read from /proc/net/dev. -func (fs FS) NewNetDev() (NetDev, error) { - return newNetDev(fs.Path("net/dev")) +// NetDev returns kernel/system statistics read from /proc/net/dev. +func (fs FS) NetDev() (NetDev, error) { + return newNetDev(fs.proc.Path("net/dev")) } -// NewNetDev returns kernel/system statistics read from /proc/[pid]/net/dev. -func (p Proc) NewNetDev() (NetDev, error) { +// NetDev returns kernel/system statistics read from /proc/[pid]/net/dev. +func (p Proc) NetDev() (NetDev, error) { return newNetDev(p.path("net/dev")) } @@ -75,7 +65,7 @@ func newNetDev(file string) (NetDev, error) { } defer f.Close() - nd := NetDev{} + netDev := NetDev{} s := bufio.NewScanner(f) for n := 0; s.Scan(); n++ { // Skip the 2 header lines. @@ -83,20 +73,20 @@ func newNetDev(file string) (NetDev, error) { continue } - line, err := nd.parseLine(s.Text()) + line, err := netDev.parseLine(s.Text()) if err != nil { - return nd, err + return netDev, err } - nd[line.Name] = *line + netDev[line.Name] = *line } - return nd, s.Err() + return netDev, s.Err() } // parseLine parses a single line from the /proc/net/dev file. Header lines // must be filtered prior to calling this method. -func (nd NetDev) parseLine(rawLine string) (*NetDevLine, error) { +func (netDev NetDev) parseLine(rawLine string) (*NetDevLine, error) { parts := strings.SplitN(rawLine, ":", 2) if len(parts) != 2 { return nil, errors.New("invalid net/dev line, missing colon") @@ -185,11 +175,11 @@ func (nd NetDev) parseLine(rawLine string) (*NetDevLine, error) { // Total aggregates the values across interfaces and returns a new NetDevLine. // The Name field will be a sorted comma separated list of interface names. -func (nd NetDev) Total() NetDevLine { +func (netDev NetDev) Total() NetDevLine { total := NetDevLine{} - names := make([]string, 0, len(nd)) - for _, ifc := range nd { + names := make([]string, 0, len(netDev)) + for _, ifc := range netDev { names = append(names, ifc.Name) total.RxBytes += ifc.RxBytes total.RxPackets += ifc.RxPackets diff --git a/vendor/github.com/prometheus/procfs/net_unix.go b/vendor/github.com/prometheus/procfs/net_unix.go new file mode 100644 index 0000000000000..240340a83ab91 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_unix.go @@ -0,0 +1,275 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "errors" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +// For the proc file format details, +// see https://elixir.bootlin.com/linux/v4.17/source/net/unix/af_unix.c#L2815 +// and https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/net.h#L48. + +const ( + netUnixKernelPtrIdx = iota + netUnixRefCountIdx + _ + netUnixFlagsIdx + netUnixTypeIdx + netUnixStateIdx + netUnixInodeIdx + + // Inode and Path are optional. + netUnixStaticFieldsCnt = 6 +) + +const ( + netUnixTypeStream = 1 + netUnixTypeDgram = 2 + netUnixTypeSeqpacket = 5 + + netUnixFlagListen = 1 << 16 + + netUnixStateUnconnected = 1 + netUnixStateConnecting = 2 + netUnixStateConnected = 3 + netUnixStateDisconnected = 4 +) + +var errInvalidKernelPtrFmt = errors.New("Invalid Num(the kernel table slot number) format") + +// NetUnixType is the type of the type field. +type NetUnixType uint64 + +// NetUnixFlags is the type of the flags field. +type NetUnixFlags uint64 + +// NetUnixState is the type of the state field. +type NetUnixState uint64 + +// NetUnixLine represents a line of /proc/net/unix. +type NetUnixLine struct { + KernelPtr string + RefCount uint64 + Protocol uint64 + Flags NetUnixFlags + Type NetUnixType + State NetUnixState + Inode uint64 + Path string +} + +// NetUnix holds the data read from /proc/net/unix. +type NetUnix struct { + Rows []*NetUnixLine +} + +// NewNetUnix returns data read from /proc/net/unix. +func NewNetUnix() (*NetUnix, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return nil, err + } + + return fs.NewNetUnix() +} + +// NewNetUnix returns data read from /proc/net/unix. +func (fs FS) NewNetUnix() (*NetUnix, error) { + return NewNetUnixByPath(fs.proc.Path("net/unix")) +} + +// NewNetUnixByPath returns data read from /proc/net/unix by file path. +// It might returns an error with partial parsed data, if an error occur after some data parsed. +func NewNetUnixByPath(path string) (*NetUnix, error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + return NewNetUnixByReader(f) +} + +// NewNetUnixByReader returns data read from /proc/net/unix by a reader. +// It might returns an error with partial parsed data, if an error occur after some data parsed. +func NewNetUnixByReader(reader io.Reader) (*NetUnix, error) { + nu := &NetUnix{ + Rows: make([]*NetUnixLine, 0, 32), + } + scanner := bufio.NewScanner(reader) + // Omit the header line. + scanner.Scan() + header := scanner.Text() + // From the man page of proc(5), it does not contain an Inode field, + // but in actually it exists. + // This code works for both cases. + hasInode := strings.Contains(header, "Inode") + + minFieldsCnt := netUnixStaticFieldsCnt + if hasInode { + minFieldsCnt++ + } + for scanner.Scan() { + line := scanner.Text() + item, err := nu.parseLine(line, hasInode, minFieldsCnt) + if err != nil { + return nu, err + } + nu.Rows = append(nu.Rows, item) + } + + return nu, scanner.Err() +} + +func (u *NetUnix) parseLine(line string, hasInode bool, minFieldsCnt int) (*NetUnixLine, error) { + fields := strings.Fields(line) + fieldsLen := len(fields) + if fieldsLen < minFieldsCnt { + return nil, fmt.Errorf( + "Parse Unix domain failed: expect at least %d fields but got %d", + minFieldsCnt, fieldsLen) + } + kernelPtr, err := u.parseKernelPtr(fields[netUnixKernelPtrIdx]) + if err != nil { + return nil, fmt.Errorf("Parse Unix domain num(%s) failed: %s", fields[netUnixKernelPtrIdx], err) + } + users, err := u.parseUsers(fields[netUnixRefCountIdx]) + if err != nil { + return nil, fmt.Errorf("Parse Unix domain ref count(%s) failed: %s", fields[netUnixRefCountIdx], err) + } + flags, err := u.parseFlags(fields[netUnixFlagsIdx]) + if err != nil { + return nil, fmt.Errorf("Parse Unix domain flags(%s) failed: %s", fields[netUnixFlagsIdx], err) + } + typ, err := u.parseType(fields[netUnixTypeIdx]) + if err != nil { + return nil, fmt.Errorf("Parse Unix domain type(%s) failed: %s", fields[netUnixTypeIdx], err) + } + state, err := u.parseState(fields[netUnixStateIdx]) + if err != nil { + return nil, fmt.Errorf("Parse Unix domain state(%s) failed: %s", fields[netUnixStateIdx], err) + } + var inode uint64 + if hasInode { + inodeStr := fields[netUnixInodeIdx] + inode, err = u.parseInode(inodeStr) + if err != nil { + return nil, fmt.Errorf("Parse Unix domain inode(%s) failed: %s", inodeStr, err) + } + } + + nuLine := &NetUnixLine{ + KernelPtr: kernelPtr, + RefCount: users, + Type: typ, + Flags: flags, + State: state, + Inode: inode, + } + + // Path field is optional. + if fieldsLen > minFieldsCnt { + pathIdx := netUnixInodeIdx + 1 + if !hasInode { + pathIdx-- + } + nuLine.Path = fields[pathIdx] + } + + return nuLine, nil +} + +func (u NetUnix) parseKernelPtr(str string) (string, error) { + if !strings.HasSuffix(str, ":") { + return "", errInvalidKernelPtrFmt + } + return str[:len(str)-1], nil +} + +func (u NetUnix) parseUsers(hexStr string) (uint64, error) { + return strconv.ParseUint(hexStr, 16, 32) +} + +func (u NetUnix) parseProtocol(hexStr string) (uint64, error) { + return strconv.ParseUint(hexStr, 16, 32) +} + +func (u NetUnix) parseType(hexStr string) (NetUnixType, error) { + typ, err := strconv.ParseUint(hexStr, 16, 16) + if err != nil { + return 0, err + } + return NetUnixType(typ), nil +} + +func (u NetUnix) parseFlags(hexStr string) (NetUnixFlags, error) { + flags, err := strconv.ParseUint(hexStr, 16, 32) + if err != nil { + return 0, err + } + return NetUnixFlags(flags), nil +} + +func (u NetUnix) parseState(hexStr string) (NetUnixState, error) { + st, err := strconv.ParseInt(hexStr, 16, 8) + if err != nil { + return 0, err + } + return NetUnixState(st), nil +} + +func (u NetUnix) parseInode(inodeStr string) (uint64, error) { + return strconv.ParseUint(inodeStr, 10, 64) +} + +func (t NetUnixType) String() string { + switch t { + case netUnixTypeStream: + return "stream" + case netUnixTypeDgram: + return "dgram" + case netUnixTypeSeqpacket: + return "seqpacket" + } + return "unknown" +} + +func (f NetUnixFlags) String() string { + switch f { + case netUnixFlagListen: + return "listen" + default: + return "default" + } +} + +func (s NetUnixState) String() string { + switch s { + case netUnixStateUnconnected: + return "unconnected" + case netUnixStateConnecting: + return "connecting" + case netUnixStateConnected: + return "connected" + case netUnixStateDisconnected: + return "disconnected" + } + return "unknown" +} diff --git a/vendor/github.com/prometheus/procfs/nfs/BUILD.bazel b/vendor/github.com/prometheus/procfs/nfs/BUILD.bazel deleted file mode 100644 index 2989d9a2bf003..0000000000000 --- a/vendor/github.com/prometheus/procfs/nfs/BUILD.bazel +++ /dev/null @@ -1,15 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "nfs.go", - "parse.go", - "parse_nfs.go", - "parse_nfsd.go", - ], - importmap = "k8s.io/kops/vendor/github.com/prometheus/procfs/nfs", - importpath = "github.com/prometheus/procfs/nfs", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/prometheus/procfs/internal/util:go_default_library"], -) diff --git a/vendor/github.com/prometheus/procfs/nfs/nfs.go b/vendor/github.com/prometheus/procfs/nfs/nfs.go deleted file mode 100644 index 651bf68195284..0000000000000 --- a/vendor/github.com/prometheus/procfs/nfs/nfs.go +++ /dev/null @@ -1,263 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package nfs implements parsing of /proc/net/rpc/nfsd. -// Fields are documented in https://www.svennd.be/nfsd-stats-explained-procnetrpcnfsd/ -package nfs - -// ReplyCache models the "rc" line. -type ReplyCache struct { - Hits uint64 - Misses uint64 - NoCache uint64 -} - -// FileHandles models the "fh" line. -type FileHandles struct { - Stale uint64 - TotalLookups uint64 - AnonLookups uint64 - DirNoCache uint64 - NoDirNoCache uint64 -} - -// InputOutput models the "io" line. -type InputOutput struct { - Read uint64 - Write uint64 -} - -// Threads models the "th" line. -type Threads struct { - Threads uint64 - FullCnt uint64 -} - -// ReadAheadCache models the "ra" line. -type ReadAheadCache struct { - CacheSize uint64 - CacheHistogram []uint64 - NotFound uint64 -} - -// Network models the "net" line. -type Network struct { - NetCount uint64 - UDPCount uint64 - TCPCount uint64 - TCPConnect uint64 -} - -// ClientRPC models the nfs "rpc" line. -type ClientRPC struct { - RPCCount uint64 - Retransmissions uint64 - AuthRefreshes uint64 -} - -// ServerRPC models the nfsd "rpc" line. -type ServerRPC struct { - RPCCount uint64 - BadCnt uint64 - BadFmt uint64 - BadAuth uint64 - BadcInt uint64 -} - -// V2Stats models the "proc2" line. -type V2Stats struct { - Null uint64 - GetAttr uint64 - SetAttr uint64 - Root uint64 - Lookup uint64 - ReadLink uint64 - Read uint64 - WrCache uint64 - Write uint64 - Create uint64 - Remove uint64 - Rename uint64 - Link uint64 - SymLink uint64 - MkDir uint64 - RmDir uint64 - ReadDir uint64 - FsStat uint64 -} - -// V3Stats models the "proc3" line. -type V3Stats struct { - Null uint64 - GetAttr uint64 - SetAttr uint64 - Lookup uint64 - Access uint64 - ReadLink uint64 - Read uint64 - Write uint64 - Create uint64 - MkDir uint64 - SymLink uint64 - MkNod uint64 - Remove uint64 - RmDir uint64 - Rename uint64 - Link uint64 - ReadDir uint64 - ReadDirPlus uint64 - FsStat uint64 - FsInfo uint64 - PathConf uint64 - Commit uint64 -} - -// ClientV4Stats models the nfs "proc4" line. -type ClientV4Stats struct { - Null uint64 - Read uint64 - Write uint64 - Commit uint64 - Open uint64 - OpenConfirm uint64 - OpenNoattr uint64 - OpenDowngrade uint64 - Close uint64 - Setattr uint64 - FsInfo uint64 - Renew uint64 - SetClientID uint64 - SetClientIDConfirm uint64 - Lock uint64 - Lockt uint64 - Locku uint64 - Access uint64 - Getattr uint64 - Lookup uint64 - LookupRoot uint64 - Remove uint64 - Rename uint64 - Link uint64 - Symlink uint64 - Create uint64 - Pathconf uint64 - StatFs uint64 - ReadLink uint64 - ReadDir uint64 - ServerCaps uint64 - DelegReturn uint64 - GetACL uint64 - SetACL uint64 - FsLocations uint64 - ReleaseLockowner uint64 - Secinfo uint64 - FsidPresent uint64 - ExchangeID uint64 - CreateSession uint64 - DestroySession uint64 - Sequence uint64 - GetLeaseTime uint64 - ReclaimComplete uint64 - LayoutGet uint64 - GetDeviceInfo uint64 - LayoutCommit uint64 - LayoutReturn uint64 - SecinfoNoName uint64 - TestStateID uint64 - FreeStateID uint64 - GetDeviceList uint64 - BindConnToSession uint64 - DestroyClientID uint64 - Seek uint64 - Allocate uint64 - DeAllocate uint64 - LayoutStats uint64 - Clone uint64 -} - -// ServerV4Stats models the nfsd "proc4" line. -type ServerV4Stats struct { - Null uint64 - Compound uint64 -} - -// V4Ops models the "proc4ops" line: NFSv4 operations -// Variable list, see: -// v4.0 https://tools.ietf.org/html/rfc3010 (38 operations) -// v4.1 https://tools.ietf.org/html/rfc5661 (58 operations) -// v4.2 https://tools.ietf.org/html/draft-ietf-nfsv4-minorversion2-41 (71 operations) -type V4Ops struct { - //Values uint64 // Variable depending on v4.x sub-version. TODO: Will this always at least include the fields in this struct? - Op0Unused uint64 - Op1Unused uint64 - Op2Future uint64 - Access uint64 - Close uint64 - Commit uint64 - Create uint64 - DelegPurge uint64 - DelegReturn uint64 - GetAttr uint64 - GetFH uint64 - Link uint64 - Lock uint64 - Lockt uint64 - Locku uint64 - Lookup uint64 - LookupRoot uint64 - Nverify uint64 - Open uint64 - OpenAttr uint64 - OpenConfirm uint64 - OpenDgrd uint64 - PutFH uint64 - PutPubFH uint64 - PutRootFH uint64 - Read uint64 - ReadDir uint64 - ReadLink uint64 - Remove uint64 - Rename uint64 - Renew uint64 - RestoreFH uint64 - SaveFH uint64 - SecInfo uint64 - SetAttr uint64 - Verify uint64 - Write uint64 - RelLockOwner uint64 -} - -// ClientRPCStats models all stats from /proc/net/rpc/nfs. -type ClientRPCStats struct { - Network Network - ClientRPC ClientRPC - V2Stats V2Stats - V3Stats V3Stats - ClientV4Stats ClientV4Stats -} - -// ServerRPCStats models all stats from /proc/net/rpc/nfsd. -type ServerRPCStats struct { - ReplyCache ReplyCache - FileHandles FileHandles - InputOutput InputOutput - Threads Threads - ReadAheadCache ReadAheadCache - Network Network - ServerRPC ServerRPC - V2Stats V2Stats - V3Stats V3Stats - ServerV4Stats ServerV4Stats - V4Ops V4Ops -} diff --git a/vendor/github.com/prometheus/procfs/nfs/parse.go b/vendor/github.com/prometheus/procfs/nfs/parse.go deleted file mode 100644 index 95a83cc5bc5f9..0000000000000 --- a/vendor/github.com/prometheus/procfs/nfs/parse.go +++ /dev/null @@ -1,317 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nfs - -import ( - "fmt" -) - -func parseReplyCache(v []uint64) (ReplyCache, error) { - if len(v) != 3 { - return ReplyCache{}, fmt.Errorf("invalid ReplyCache line %q", v) - } - - return ReplyCache{ - Hits: v[0], - Misses: v[1], - NoCache: v[2], - }, nil -} - -func parseFileHandles(v []uint64) (FileHandles, error) { - if len(v) != 5 { - return FileHandles{}, fmt.Errorf("invalid FileHandles, line %q", v) - } - - return FileHandles{ - Stale: v[0], - TotalLookups: v[1], - AnonLookups: v[2], - DirNoCache: v[3], - NoDirNoCache: v[4], - }, nil -} - -func parseInputOutput(v []uint64) (InputOutput, error) { - if len(v) != 2 { - return InputOutput{}, fmt.Errorf("invalid InputOutput line %q", v) - } - - return InputOutput{ - Read: v[0], - Write: v[1], - }, nil -} - -func parseThreads(v []uint64) (Threads, error) { - if len(v) != 2 { - return Threads{}, fmt.Errorf("invalid Threads line %q", v) - } - - return Threads{ - Threads: v[0], - FullCnt: v[1], - }, nil -} - -func parseReadAheadCache(v []uint64) (ReadAheadCache, error) { - if len(v) != 12 { - return ReadAheadCache{}, fmt.Errorf("invalid ReadAheadCache line %q", v) - } - - return ReadAheadCache{ - CacheSize: v[0], - CacheHistogram: v[1:11], - NotFound: v[11], - }, nil -} - -func parseNetwork(v []uint64) (Network, error) { - if len(v) != 4 { - return Network{}, fmt.Errorf("invalid Network line %q", v) - } - - return Network{ - NetCount: v[0], - UDPCount: v[1], - TCPCount: v[2], - TCPConnect: v[3], - }, nil -} - -func parseServerRPC(v []uint64) (ServerRPC, error) { - if len(v) != 5 { - return ServerRPC{}, fmt.Errorf("invalid RPC line %q", v) - } - - return ServerRPC{ - RPCCount: v[0], - BadCnt: v[1], - BadFmt: v[2], - BadAuth: v[3], - BadcInt: v[4], - }, nil -} - -func parseClientRPC(v []uint64) (ClientRPC, error) { - if len(v) != 3 { - return ClientRPC{}, fmt.Errorf("invalid RPC line %q", v) - } - - return ClientRPC{ - RPCCount: v[0], - Retransmissions: v[1], - AuthRefreshes: v[2], - }, nil -} - -func parseV2Stats(v []uint64) (V2Stats, error) { - values := int(v[0]) - if len(v[1:]) != values || values != 18 { - return V2Stats{}, fmt.Errorf("invalid V2Stats line %q", v) - } - - return V2Stats{ - Null: v[1], - GetAttr: v[2], - SetAttr: v[3], - Root: v[4], - Lookup: v[5], - ReadLink: v[6], - Read: v[7], - WrCache: v[8], - Write: v[9], - Create: v[10], - Remove: v[11], - Rename: v[12], - Link: v[13], - SymLink: v[14], - MkDir: v[15], - RmDir: v[16], - ReadDir: v[17], - FsStat: v[18], - }, nil -} - -func parseV3Stats(v []uint64) (V3Stats, error) { - values := int(v[0]) - if len(v[1:]) != values || values != 22 { - return V3Stats{}, fmt.Errorf("invalid V3Stats line %q", v) - } - - return V3Stats{ - Null: v[1], - GetAttr: v[2], - SetAttr: v[3], - Lookup: v[4], - Access: v[5], - ReadLink: v[6], - Read: v[7], - Write: v[8], - Create: v[9], - MkDir: v[10], - SymLink: v[11], - MkNod: v[12], - Remove: v[13], - RmDir: v[14], - Rename: v[15], - Link: v[16], - ReadDir: v[17], - ReadDirPlus: v[18], - FsStat: v[19], - FsInfo: v[20], - PathConf: v[21], - Commit: v[22], - }, nil -} - -func parseClientV4Stats(v []uint64) (ClientV4Stats, error) { - values := int(v[0]) - if len(v[1:]) != values { - return ClientV4Stats{}, fmt.Errorf("invalid ClientV4Stats line %q", v) - } - - // This function currently supports mapping 59 NFS v4 client stats. Older - // kernels may emit fewer stats, so we must detect this and pad out the - // values to match the expected slice size. - if values < 59 { - newValues := make([]uint64, 60) - copy(newValues, v) - v = newValues - } - - return ClientV4Stats{ - Null: v[1], - Read: v[2], - Write: v[3], - Commit: v[4], - Open: v[5], - OpenConfirm: v[6], - OpenNoattr: v[7], - OpenDowngrade: v[8], - Close: v[9], - Setattr: v[10], - FsInfo: v[11], - Renew: v[12], - SetClientID: v[13], - SetClientIDConfirm: v[14], - Lock: v[15], - Lockt: v[16], - Locku: v[17], - Access: v[18], - Getattr: v[19], - Lookup: v[20], - LookupRoot: v[21], - Remove: v[22], - Rename: v[23], - Link: v[24], - Symlink: v[25], - Create: v[26], - Pathconf: v[27], - StatFs: v[28], - ReadLink: v[29], - ReadDir: v[30], - ServerCaps: v[31], - DelegReturn: v[32], - GetACL: v[33], - SetACL: v[34], - FsLocations: v[35], - ReleaseLockowner: v[36], - Secinfo: v[37], - FsidPresent: v[38], - ExchangeID: v[39], - CreateSession: v[40], - DestroySession: v[41], - Sequence: v[42], - GetLeaseTime: v[43], - ReclaimComplete: v[44], - LayoutGet: v[45], - GetDeviceInfo: v[46], - LayoutCommit: v[47], - LayoutReturn: v[48], - SecinfoNoName: v[49], - TestStateID: v[50], - FreeStateID: v[51], - GetDeviceList: v[52], - BindConnToSession: v[53], - DestroyClientID: v[54], - Seek: v[55], - Allocate: v[56], - DeAllocate: v[57], - LayoutStats: v[58], - Clone: v[59], - }, nil -} - -func parseServerV4Stats(v []uint64) (ServerV4Stats, error) { - values := int(v[0]) - if len(v[1:]) != values || values != 2 { - return ServerV4Stats{}, fmt.Errorf("invalid V4Stats line %q", v) - } - - return ServerV4Stats{ - Null: v[1], - Compound: v[2], - }, nil -} - -func parseV4Ops(v []uint64) (V4Ops, error) { - values := int(v[0]) - if len(v[1:]) != values || values < 39 { - return V4Ops{}, fmt.Errorf("invalid V4Ops line %q", v) - } - - stats := V4Ops{ - Op0Unused: v[1], - Op1Unused: v[2], - Op2Future: v[3], - Access: v[4], - Close: v[5], - Commit: v[6], - Create: v[7], - DelegPurge: v[8], - DelegReturn: v[9], - GetAttr: v[10], - GetFH: v[11], - Link: v[12], - Lock: v[13], - Lockt: v[14], - Locku: v[15], - Lookup: v[16], - LookupRoot: v[17], - Nverify: v[18], - Open: v[19], - OpenAttr: v[20], - OpenConfirm: v[21], - OpenDgrd: v[22], - PutFH: v[23], - PutPubFH: v[24], - PutRootFH: v[25], - Read: v[26], - ReadDir: v[27], - ReadLink: v[28], - Remove: v[29], - Rename: v[30], - Renew: v[31], - RestoreFH: v[32], - SaveFH: v[33], - SecInfo: v[34], - SetAttr: v[35], - Verify: v[36], - Write: v[37], - RelLockOwner: v[38], - } - - return stats, nil -} diff --git a/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go b/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go deleted file mode 100644 index c0d3a5ad9bdfe..0000000000000 --- a/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nfs - -import ( - "bufio" - "fmt" - "io" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// ParseClientRPCStats returns stats read from /proc/net/rpc/nfs -func ParseClientRPCStats(r io.Reader) (*ClientRPCStats, error) { - stats := &ClientRPCStats{} - - scanner := bufio.NewScanner(r) - for scanner.Scan() { - line := scanner.Text() - parts := strings.Fields(scanner.Text()) - // require at least - if len(parts) < 2 { - return nil, fmt.Errorf("invalid NFS metric line %q", line) - } - - values, err := util.ParseUint64s(parts[1:]) - if err != nil { - return nil, fmt.Errorf("error parsing NFS metric line: %s", err) - } - - switch metricLine := parts[0]; metricLine { - case "net": - stats.Network, err = parseNetwork(values) - case "rpc": - stats.ClientRPC, err = parseClientRPC(values) - case "proc2": - stats.V2Stats, err = parseV2Stats(values) - case "proc3": - stats.V3Stats, err = parseV3Stats(values) - case "proc4": - stats.ClientV4Stats, err = parseClientV4Stats(values) - default: - return nil, fmt.Errorf("unknown NFS metric line %q", metricLine) - } - if err != nil { - return nil, fmt.Errorf("errors parsing NFS metric line: %s", err) - } - } - - if err := scanner.Err(); err != nil { - return nil, fmt.Errorf("error scanning NFS file: %s", err) - } - - return stats, nil -} diff --git a/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go b/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go deleted file mode 100644 index 57bb4a35858c2..0000000000000 --- a/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nfs - -import ( - "bufio" - "fmt" - "io" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// ParseServerRPCStats returns stats read from /proc/net/rpc/nfsd -func ParseServerRPCStats(r io.Reader) (*ServerRPCStats, error) { - stats := &ServerRPCStats{} - - scanner := bufio.NewScanner(r) - for scanner.Scan() { - line := scanner.Text() - parts := strings.Fields(scanner.Text()) - // require at least - if len(parts) < 2 { - return nil, fmt.Errorf("invalid NFSd metric line %q", line) - } - label := parts[0] - - var values []uint64 - var err error - if label == "th" { - if len(parts) < 3 { - return nil, fmt.Errorf("invalid NFSd th metric line %q", line) - } - values, err = util.ParseUint64s(parts[1:3]) - } else { - values, err = util.ParseUint64s(parts[1:]) - } - if err != nil { - return nil, fmt.Errorf("error parsing NFSd metric line: %s", err) - } - - switch metricLine := parts[0]; metricLine { - case "rc": - stats.ReplyCache, err = parseReplyCache(values) - case "fh": - stats.FileHandles, err = parseFileHandles(values) - case "io": - stats.InputOutput, err = parseInputOutput(values) - case "th": - stats.Threads, err = parseThreads(values) - case "ra": - stats.ReadAheadCache, err = parseReadAheadCache(values) - case "net": - stats.Network, err = parseNetwork(values) - case "rpc": - stats.ServerRPC, err = parseServerRPC(values) - case "proc2": - stats.V2Stats, err = parseV2Stats(values) - case "proc3": - stats.V3Stats, err = parseV3Stats(values) - case "proc4": - stats.ServerV4Stats, err = parseServerV4Stats(values) - case "proc4ops": - stats.V4Ops, err = parseV4Ops(values) - default: - return nil, fmt.Errorf("unknown NFSd metric line %q", metricLine) - } - if err != nil { - return nil, fmt.Errorf("errors parsing NFSd metric line: %s", err) - } - } - - if err := scanner.Err(); err != nil { - return nil, fmt.Errorf("error scanning NFSd file: %s", err) - } - - return stats, nil -} diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go index 06bed0ef4a3ad..8a8430147ea5c 100644 --- a/vendor/github.com/prometheus/procfs/proc.go +++ b/vendor/github.com/prometheus/procfs/proc.go @@ -20,6 +20,8 @@ import ( "os" "strconv" "strings" + + "github.com/prometheus/procfs/internal/fs" ) // Proc provides information about a running process. @@ -27,7 +29,7 @@ type Proc struct { // The process ID. PID int - fs FS + fs fs.FS } // Procs represents a list of Proc structs. @@ -52,7 +54,7 @@ func NewProc(pid int) (Proc, error) { if err != nil { return Proc{}, err } - return fs.NewProc(pid) + return fs.Proc(pid) } // AllProcs returns a list of all currently available processes under /proc. @@ -66,28 +68,35 @@ func AllProcs() (Procs, error) { // Self returns a process for the current process. func (fs FS) Self() (Proc, error) { - p, err := os.Readlink(fs.Path("self")) + p, err := os.Readlink(fs.proc.Path("self")) if err != nil { return Proc{}, err } - pid, err := strconv.Atoi(strings.Replace(p, string(fs), "", -1)) + pid, err := strconv.Atoi(strings.Replace(p, string(fs.proc), "", -1)) if err != nil { return Proc{}, err } - return fs.NewProc(pid) + return fs.Proc(pid) } // NewProc returns a process for the given pid. +// +// Deprecated: use fs.Proc() instead func (fs FS) NewProc(pid int) (Proc, error) { - if _, err := os.Stat(fs.Path(strconv.Itoa(pid))); err != nil { + return fs.Proc(pid) +} + +// Proc returns a process for the given pid. +func (fs FS) Proc(pid int) (Proc, error) { + if _, err := os.Stat(fs.proc.Path(strconv.Itoa(pid))); err != nil { return Proc{}, err } - return Proc{PID: pid, fs: fs}, nil + return Proc{PID: pid, fs: fs.proc}, nil } // AllProcs returns a list of all currently available processes. func (fs FS) AllProcs() (Procs, error) { - d, err := os.Open(fs.Path()) + d, err := os.Open(fs.proc.Path()) if err != nil { return Procs{}, err } @@ -104,7 +113,7 @@ func (fs FS) AllProcs() (Procs, error) { if err != nil { continue } - p = append(p, Proc{PID: int(pid), fs: fs}) + p = append(p, Proc{PID: int(pid), fs: fs.proc}) } return p, nil diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go index 0251c83bfe819..0ff89b1cef194 100644 --- a/vendor/github.com/prometheus/procfs/proc_io.go +++ b/vendor/github.com/prometheus/procfs/proc_io.go @@ -39,8 +39,8 @@ type ProcIO struct { CancelledWriteBytes int64 } -// NewIO creates a new ProcIO instance from a given Proc instance. -func (p Proc) NewIO() (ProcIO, error) { +// IO creates a new ProcIO instance from a given Proc instance. +func (p Proc) IO() (ProcIO, error) { pio := ProcIO{} f, err := os.Open(p.path("io")) diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go index f04ba6fda8526..91ee24df8bdec 100644 --- a/vendor/github.com/prometheus/procfs/proc_limits.go +++ b/vendor/github.com/prometheus/procfs/proc_limits.go @@ -78,7 +78,14 @@ var ( ) // NewLimits returns the current soft limits of the process. +// +// Deprecated: use p.Limits() instead func (p Proc) NewLimits() (ProcLimits, error) { + return p.Limits() +} + +// Limits returns the current soft limits of the process. +func (p Proc) Limits() (ProcLimits, error) { f, err := os.Open(p.path("limits")) if err != nil { return ProcLimits{}, err diff --git a/vendor/github.com/prometheus/procfs/proc_ns.go b/vendor/github.com/prometheus/procfs/proc_ns.go index d06c26ebad9b2..c66740ff746a5 100644 --- a/vendor/github.com/prometheus/procfs/proc_ns.go +++ b/vendor/github.com/prometheus/procfs/proc_ns.go @@ -29,9 +29,9 @@ type Namespace struct { // Namespaces contains all of the namespaces that the process is contained in. type Namespaces map[string]Namespace -// NewNamespaces reads from /proc/[pid/ns/* to get the namespaces of which the +// Namespaces reads from /proc//ns/* to get the namespaces of which the // process is a member. -func (p Proc) NewNamespaces() (Namespaces, error) { +func (p Proc) Namespaces() (Namespaces, error) { d, err := os.Open(p.path("ns")) if err != nil { return nil, err diff --git a/vendor/github.com/prometheus/procfs/proc_psi.go b/vendor/github.com/prometheus/procfs/proc_psi.go new file mode 100644 index 0000000000000..46fe266263764 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_psi.go @@ -0,0 +1,101 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +// The PSI / pressure interface is described at +// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/Documentation/accounting/psi.txt +// Each resource (cpu, io, memory, ...) is exposed as a single file. +// Each file may contain up to two lines, one for "some" pressure and one for "full" pressure. +// Each line contains several averages (over n seconds) and a total in µs. +// +// Example io pressure file: +// > some avg10=0.06 avg60=0.21 avg300=0.99 total=8537362 +// > full avg10=0.00 avg60=0.13 avg300=0.96 total=8183134 + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "strings" +) + +const lineFormat = "avg10=%f avg60=%f avg300=%f total=%d" + +// PSILine is a single line of values as returned by /proc/pressure/* +// The Avg entries are averages over n seconds, as a percentage +// The Total line is in microseconds +type PSILine struct { + Avg10 float64 + Avg60 float64 + Avg300 float64 + Total uint64 +} + +// PSIStats represent pressure stall information from /proc/pressure/* +// Some indicates the share of time in which at least some tasks are stalled +// Full indicates the share of time in which all non-idle tasks are stalled simultaneously +type PSIStats struct { + Some *PSILine + Full *PSILine +} + +// PSIStatsForResource reads pressure stall information for the specified +// resource from /proc/pressure/. At time of writing this can be +// either "cpu", "memory" or "io". +func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) { + file, err := os.Open(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource))) + if err != nil { + return PSIStats{}, fmt.Errorf("psi_stats: unavailable for %s", resource) + } + + defer file.Close() + return parsePSIStats(resource, file) +} + +// parsePSIStats parses the specified file for pressure stall information +func parsePSIStats(resource string, file io.Reader) (PSIStats, error) { + psiStats := PSIStats{} + stats, err := ioutil.ReadAll(file) + if err != nil { + return psiStats, fmt.Errorf("psi_stats: unable to read data for %s", resource) + } + + for _, l := range strings.Split(string(stats), "\n") { + prefix := strings.Split(l, " ")[0] + switch prefix { + case "some": + psi := PSILine{} + _, err := fmt.Sscanf(l, fmt.Sprintf("some %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total) + if err != nil { + return PSIStats{}, err + } + psiStats.Some = &psi + case "full": + psi := PSILine{} + _, err := fmt.Sscanf(l, fmt.Sprintf("full %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total) + if err != nil { + return PSIStats{}, err + } + psiStats.Full = &psi + default: + // If we encounter a line with an unknown prefix, ignore it and move on + // Should new measurement types be added in the future we'll simply ignore them instead + // of erroring on retrieval + continue + } + } + + return psiStats, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go index 3cf2a9f18f00f..6ed98a8ae3755 100644 --- a/vendor/github.com/prometheus/procfs/proc_stat.go +++ b/vendor/github.com/prometheus/procfs/proc_stat.go @@ -18,6 +18,8 @@ import ( "fmt" "io/ioutil" "os" + + "github.com/prometheus/procfs/internal/fs" ) // Originally, this USER_HZ value was dynamically retrieved via a sysconf call @@ -95,15 +97,22 @@ type ProcStat struct { // in clock ticks. Starttime uint64 // Virtual memory size in bytes. - VSize int + VSize uint // Resident set size in pages. RSS int - fs FS + proc fs.FS } // NewStat returns the current status information of the process. +// +// Deprecated: use NewStat() instead func (p Proc) NewStat() (ProcStat, error) { + return p.Stat() +} + +// Stat returns the current status information of the process. +func (p Proc) Stat() (ProcStat, error) { f, err := os.Open(p.path("stat")) if err != nil { return ProcStat{}, err @@ -118,7 +127,7 @@ func (p Proc) NewStat() (ProcStat, error) { var ( ignore int - s = ProcStat{PID: p.PID, fs: p.fs} + s = ProcStat{PID: p.PID, proc: p.fs} l = bytes.Index(data, []byte("(")) r = bytes.LastIndex(data, []byte(")")) ) @@ -164,7 +173,7 @@ func (p Proc) NewStat() (ProcStat, error) { } // VirtualMemory returns the virtual memory size in bytes. -func (s ProcStat) VirtualMemory() int { +func (s ProcStat) VirtualMemory() uint { return s.VSize } @@ -175,7 +184,8 @@ func (s ProcStat) ResidentMemory() int { // StartTime returns the unix timestamp of the process in seconds. func (s ProcStat) StartTime() (float64, error) { - stat, err := s.fs.NewStat() + fs := FS{proc: s.proc} + stat, err := fs.Stat() if err != nil { return 0, err } diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go new file mode 100644 index 0000000000000..6b4b61f71cd9d --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_status.go @@ -0,0 +1,162 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bytes" + "io/ioutil" + "os" + "strconv" + "strings" +) + +// ProcStat provides status information about the process, +// read from /proc/[pid]/stat. +type ProcStatus struct { + // The process ID. + PID int + // The process name. + Name string + + // Peak virtual memory size. + VmPeak uint64 + // Virtual memory size. + VmSize uint64 + // Locked memory size. + VmLck uint64 + // Pinned memory size. + VmPin uint64 + // Peak resident set size. + VmHWM uint64 + // Resident set size (sum of RssAnnon RssFile and RssShmem). + VmRSS uint64 + // Size of resident anonymous memory. + RssAnon uint64 + // Size of resident file mappings. + RssFile uint64 + // Size of resident shared memory. + RssShmem uint64 + // Size of data segments. + VmData uint64 + // Size of stack segments. + VmStk uint64 + // Size of text segments. + VmExe uint64 + // Shared library code size. + VmLib uint64 + // Page table entries size. + VmPTE uint64 + // Size of second-level page tables. + VmPMD uint64 + // Swapped-out virtual memory size by anonymous private. + VmSwap uint64 + // Size of hugetlb memory portions + HugetlbPages uint64 + + // Number of voluntary context switches. + VoluntaryCtxtSwitches uint64 + // Number of involuntary context switches. + NonVoluntaryCtxtSwitches uint64 +} + +// NewStatus returns the current status information of the process. +func (p Proc) NewStatus() (ProcStatus, error) { + f, err := os.Open(p.path("status")) + if err != nil { + return ProcStatus{}, err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return ProcStatus{}, err + } + + s := ProcStatus{PID: p.PID} + + lines := strings.Split(string(data), "\n") + for _, line := range lines { + if !bytes.Contains([]byte(line), []byte(":")) { + continue + } + + kv := strings.SplitN(line, ":", 2) + + // removes spaces + k := string(strings.TrimSpace(kv[0])) + v := string(strings.TrimSpace(kv[1])) + // removes "kB" + v = string(bytes.Trim([]byte(v), " kB")) + + // value to int when possible + // we can skip error check here, 'cause vKBytes is not used when value is a string + vKBytes, _ := strconv.ParseUint(v, 10, 64) + // convert kB to B + vBytes := vKBytes * 1024 + + s.fillStatus(k, v, vKBytes, vBytes) + } + + return s, nil +} + +func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) { + switch k { + case "Name": + s.Name = vString + case "VmPeak": + s.VmPeak = vUintBytes + case "VmSize": + s.VmSize = vUintBytes + case "VmLck": + s.VmLck = vUintBytes + case "VmPin": + s.VmPin = vUintBytes + case "VmHWM": + s.VmHWM = vUintBytes + case "VmRSS": + s.VmRSS = vUintBytes + case "RssAnon": + s.RssAnon = vUintBytes + case "RssFile": + s.RssFile = vUintBytes + case "RssShmem": + s.RssShmem = vUintBytes + case "VmData": + s.VmData = vUintBytes + case "VmStk": + s.VmStk = vUintBytes + case "VmExe": + s.VmExe = vUintBytes + case "VmLib": + s.VmLib = vUintBytes + case "VmPTE": + s.VmPTE = vUintBytes + case "VmPMD": + s.VmPMD = vUintBytes + case "VmSwap": + s.VmSwap = vUintBytes + case "HugetlbPages": + s.HugetlbPages = vUintBytes + case "voluntary_ctxt_switches": + s.VoluntaryCtxtSwitches = vUint + case "nonvoluntary_ctxt_switches": + s.NonVoluntaryCtxtSwitches = vUint + } +} + +// TotalCtxtSwitches returns the total context switch. +func (s ProcStatus) TotalCtxtSwitches() uint64 { + return s.VoluntaryCtxtSwitches + s.NonVoluntaryCtxtSwitches +} diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go index 61eb6b0e3ceaa..6661ee03a66f2 100644 --- a/vendor/github.com/prometheus/procfs/stat.go +++ b/vendor/github.com/prometheus/procfs/stat.go @@ -20,6 +20,8 @@ import ( "os" "strconv" "strings" + + "github.com/prometheus/procfs/internal/fs" ) // CPUStat shows how much time the cpu spend in various stages. @@ -78,16 +80,6 @@ type Stat struct { SoftIRQ SoftIRQStat } -// NewStat returns kernel/system statistics read from /proc/stat. -func NewStat() (Stat, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return Stat{}, err - } - - return fs.NewStat() -} - // Parse a cpu statistics line and returns the CPUStat struct plus the cpu id (or -1 for the overall sum). func parseCPUStat(line string) (CPUStat, int64, error) { cpuStat := CPUStat{} @@ -149,11 +141,31 @@ func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) { return softIRQStat, total, nil } -// NewStat returns an information about current kernel/system statistics. +// NewStat returns information about current cpu/process statistics. +// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt +// +// Deprecated: use fs.Stat() instead +func NewStat() (Stat, error) { + fs, err := NewFS(fs.DefaultProcMountPoint) + if err != nil { + return Stat{}, err + } + return fs.Stat() +} + +// NewStat returns information about current cpu/process statistics. +// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt +// +// Deprecated: use fs.Stat() instead func (fs FS) NewStat() (Stat, error) { - // See https://www.kernel.org/doc/Documentation/filesystems/proc.txt + return fs.Stat() +} + +// Stat returns information about current cpu/process statistics. +// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt +func (fs FS) Stat() (Stat, error) { - f, err := os.Open(fs.Path("stat")) + f, err := os.Open(fs.proc.Path("stat")) if err != nil { return Stat{}, err } diff --git a/vendor/github.com/prometheus/procfs/ttar b/vendor/github.com/prometheus/procfs/ttar index b0171a12b59f3..19ef02b8d4e15 100644 --- a/vendor/github.com/prometheus/procfs/ttar +++ b/vendor/github.com/prometheus/procfs/ttar @@ -86,8 +86,10 @@ Usage: $bname [-C ] -c -f (create archive) $bname [-C ] -x -f (extract archive) Options: - -C (change directory) - -v (verbose) + -C (change directory) + -v (verbose) + --recursive-unlink (recursively delete existing directory if path + collides with file or directory to extract) Example: Change to sysfs directory, create ttar file from fixtures directory $bname -C sysfs -c -f sysfs/fixtures.ttar fixtures/ @@ -111,8 +113,9 @@ function set_cmd { } unset VERBOSE +unset RECURSIVE_UNLINK -while getopts :cf:htxvC: opt; do +while getopts :cf:-:htxvC: opt; do case $opt in c) set_cmd "create" @@ -136,6 +139,18 @@ while getopts :cf:htxvC: opt; do C) CDIR=$OPTARG ;; + -) + case $OPTARG in + recursive-unlink) + RECURSIVE_UNLINK="yes" + ;; + *) + echo -e "Error: invalid option -$OPTARG" + echo + usage 1 + ;; + esac + ;; *) echo >&2 "ERROR: invalid option -$OPTARG" echo @@ -212,16 +227,16 @@ function extract { local eof_without_newline if [ "$size" -gt 0 ]; then if [[ "$line" =~ [^\\]EOF ]]; then - # An EOF not preceeded by a backslash indicates that the line + # An EOF not preceded by a backslash indicates that the line # does not end with a newline eof_without_newline=1 else eof_without_newline=0 fi # Replace NULLBYTE with null byte if at beginning of line - # Replace NULLBYTE with null byte unless preceeded by backslash + # Replace NULLBYTE with null byte unless preceded by backslash # Remove one backslash in front of NULLBYTE (if any) - # Remove EOF unless preceeded by backslash + # Remove EOF unless preceded by backslash # Remove one backslash in front of EOF if [ $USE_PYTHON -eq 1 ]; then echo -n "$line" | python -c "$PYTHON_EXTRACT_FILTER" >> "$path" @@ -245,7 +260,16 @@ function extract { fi if [[ $line =~ ^Path:\ (.*)$ ]]; then path=${BASH_REMATCH[1]} - if [ -e "$path" ] || [ -L "$path" ]; then + if [ -L "$path" ]; then + rm "$path" + elif [ -d "$path" ]; then + if [ "${RECURSIVE_UNLINK:-}" == "yes" ]; then + rm -r "$path" + else + # Safe because symlinks to directories are dealt with above + rmdir "$path" + fi + elif [ -e "$path" ]; then rm "$path" fi elif [[ $line =~ ^Lines:\ (.*)$ ]]; then @@ -338,8 +362,8 @@ function _create { else < "$file" \ sed 's/EOF/\\EOF/g; - s/NULLBYTE/\\NULLBYTE/g; - s/\x0/NULLBYTE/g; + s/NULLBYTE/\\NULLBYTE/g; + s/\x0/NULLBYTE/g; ' fi if [[ "$eof_without_newline" -eq 1 ]]; then diff --git a/vendor/github.com/prometheus/procfs/xfrm.go b/vendor/github.com/prometheus/procfs/xfrm.go index 8f1508f0fd114..30aa417d530f8 100644 --- a/vendor/github.com/prometheus/procfs/xfrm.go +++ b/vendor/github.com/prometheus/procfs/xfrm.go @@ -97,7 +97,7 @@ func NewXfrmStat() (XfrmStat, error) { // NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem. func (fs FS) NewXfrmStat() (XfrmStat, error) { - file, err := os.Open(fs.Path("net/xfrm_stat")) + file, err := os.Open(fs.proc.Path("net/xfrm_stat")) if err != nil { return XfrmStat{}, err } diff --git a/vendor/github.com/prometheus/procfs/xfs/BUILD.bazel b/vendor/github.com/prometheus/procfs/xfs/BUILD.bazel deleted file mode 100644 index 0895452178b43..0000000000000 --- a/vendor/github.com/prometheus/procfs/xfs/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "parse.go", - "xfs.go", - ], - importmap = "k8s.io/kops/vendor/github.com/prometheus/procfs/xfs", - importpath = "github.com/prometheus/procfs/xfs", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/prometheus/procfs/internal/util:go_default_library"], -) diff --git a/vendor/github.com/prometheus/procfs/xfs/parse.go b/vendor/github.com/prometheus/procfs/xfs/parse.go deleted file mode 100644 index 2bc0ef3427d2c..0000000000000 --- a/vendor/github.com/prometheus/procfs/xfs/parse.go +++ /dev/null @@ -1,330 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package xfs - -import ( - "bufio" - "fmt" - "io" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// ParseStats parses a Stats from an input io.Reader, using the format -// found in /proc/fs/xfs/stat. -func ParseStats(r io.Reader) (*Stats, error) { - const ( - // Fields parsed into stats structures. - fieldExtentAlloc = "extent_alloc" - fieldAbt = "abt" - fieldBlkMap = "blk_map" - fieldBmbt = "bmbt" - fieldDir = "dir" - fieldTrans = "trans" - fieldIg = "ig" - fieldLog = "log" - fieldRw = "rw" - fieldAttr = "attr" - fieldIcluster = "icluster" - fieldVnodes = "vnodes" - fieldBuf = "buf" - fieldXpc = "xpc" - - // Unimplemented at this time due to lack of documentation. - fieldPushAil = "push_ail" - fieldXstrat = "xstrat" - fieldAbtb2 = "abtb2" - fieldAbtc2 = "abtc2" - fieldBmbt2 = "bmbt2" - fieldIbt2 = "ibt2" - fieldFibt2 = "fibt2" - fieldQm = "qm" - fieldDebug = "debug" - ) - - var xfss Stats - - s := bufio.NewScanner(r) - for s.Scan() { - // Expect at least a string label and a single integer value, ex: - // - abt 0 - // - rw 1 2 - ss := strings.Fields(string(s.Bytes())) - if len(ss) < 2 { - continue - } - label := ss[0] - - // Extended precision counters are uint64 values. - if label == fieldXpc { - us, err := util.ParseUint64s(ss[1:]) - if err != nil { - return nil, err - } - - xfss.ExtendedPrecision, err = extendedPrecisionStats(us) - if err != nil { - return nil, err - } - - continue - } - - // All other counters are uint32 values. - us, err := util.ParseUint32s(ss[1:]) - if err != nil { - return nil, err - } - - switch label { - case fieldExtentAlloc: - xfss.ExtentAllocation, err = extentAllocationStats(us) - case fieldAbt: - xfss.AllocationBTree, err = btreeStats(us) - case fieldBlkMap: - xfss.BlockMapping, err = blockMappingStats(us) - case fieldBmbt: - xfss.BlockMapBTree, err = btreeStats(us) - case fieldDir: - xfss.DirectoryOperation, err = directoryOperationStats(us) - case fieldTrans: - xfss.Transaction, err = transactionStats(us) - case fieldIg: - xfss.InodeOperation, err = inodeOperationStats(us) - case fieldLog: - xfss.LogOperation, err = logOperationStats(us) - case fieldRw: - xfss.ReadWrite, err = readWriteStats(us) - case fieldAttr: - xfss.AttributeOperation, err = attributeOperationStats(us) - case fieldIcluster: - xfss.InodeClustering, err = inodeClusteringStats(us) - case fieldVnodes: - xfss.Vnode, err = vnodeStats(us) - case fieldBuf: - xfss.Buffer, err = bufferStats(us) - } - if err != nil { - return nil, err - } - } - - return &xfss, s.Err() -} - -// extentAllocationStats builds an ExtentAllocationStats from a slice of uint32s. -func extentAllocationStats(us []uint32) (ExtentAllocationStats, error) { - if l := len(us); l != 4 { - return ExtentAllocationStats{}, fmt.Errorf("incorrect number of values for XFS extent allocation stats: %d", l) - } - - return ExtentAllocationStats{ - ExtentsAllocated: us[0], - BlocksAllocated: us[1], - ExtentsFreed: us[2], - BlocksFreed: us[3], - }, nil -} - -// btreeStats builds a BTreeStats from a slice of uint32s. -func btreeStats(us []uint32) (BTreeStats, error) { - if l := len(us); l != 4 { - return BTreeStats{}, fmt.Errorf("incorrect number of values for XFS btree stats: %d", l) - } - - return BTreeStats{ - Lookups: us[0], - Compares: us[1], - RecordsInserted: us[2], - RecordsDeleted: us[3], - }, nil -} - -// BlockMappingStat builds a BlockMappingStats from a slice of uint32s. -func blockMappingStats(us []uint32) (BlockMappingStats, error) { - if l := len(us); l != 7 { - return BlockMappingStats{}, fmt.Errorf("incorrect number of values for XFS block mapping stats: %d", l) - } - - return BlockMappingStats{ - Reads: us[0], - Writes: us[1], - Unmaps: us[2], - ExtentListInsertions: us[3], - ExtentListDeletions: us[4], - ExtentListLookups: us[5], - ExtentListCompares: us[6], - }, nil -} - -// DirectoryOperationStats builds a DirectoryOperationStats from a slice of uint32s. -func directoryOperationStats(us []uint32) (DirectoryOperationStats, error) { - if l := len(us); l != 4 { - return DirectoryOperationStats{}, fmt.Errorf("incorrect number of values for XFS directory operation stats: %d", l) - } - - return DirectoryOperationStats{ - Lookups: us[0], - Creates: us[1], - Removes: us[2], - Getdents: us[3], - }, nil -} - -// TransactionStats builds a TransactionStats from a slice of uint32s. -func transactionStats(us []uint32) (TransactionStats, error) { - if l := len(us); l != 3 { - return TransactionStats{}, fmt.Errorf("incorrect number of values for XFS transaction stats: %d", l) - } - - return TransactionStats{ - Sync: us[0], - Async: us[1], - Empty: us[2], - }, nil -} - -// InodeOperationStats builds an InodeOperationStats from a slice of uint32s. -func inodeOperationStats(us []uint32) (InodeOperationStats, error) { - if l := len(us); l != 7 { - return InodeOperationStats{}, fmt.Errorf("incorrect number of values for XFS inode operation stats: %d", l) - } - - return InodeOperationStats{ - Attempts: us[0], - Found: us[1], - Recycle: us[2], - Missed: us[3], - Duplicate: us[4], - Reclaims: us[5], - AttributeChange: us[6], - }, nil -} - -// LogOperationStats builds a LogOperationStats from a slice of uint32s. -func logOperationStats(us []uint32) (LogOperationStats, error) { - if l := len(us); l != 5 { - return LogOperationStats{}, fmt.Errorf("incorrect number of values for XFS log operation stats: %d", l) - } - - return LogOperationStats{ - Writes: us[0], - Blocks: us[1], - NoInternalBuffers: us[2], - Force: us[3], - ForceSleep: us[4], - }, nil -} - -// ReadWriteStats builds a ReadWriteStats from a slice of uint32s. -func readWriteStats(us []uint32) (ReadWriteStats, error) { - if l := len(us); l != 2 { - return ReadWriteStats{}, fmt.Errorf("incorrect number of values for XFS read write stats: %d", l) - } - - return ReadWriteStats{ - Read: us[0], - Write: us[1], - }, nil -} - -// AttributeOperationStats builds an AttributeOperationStats from a slice of uint32s. -func attributeOperationStats(us []uint32) (AttributeOperationStats, error) { - if l := len(us); l != 4 { - return AttributeOperationStats{}, fmt.Errorf("incorrect number of values for XFS attribute operation stats: %d", l) - } - - return AttributeOperationStats{ - Get: us[0], - Set: us[1], - Remove: us[2], - List: us[3], - }, nil -} - -// InodeClusteringStats builds an InodeClusteringStats from a slice of uint32s. -func inodeClusteringStats(us []uint32) (InodeClusteringStats, error) { - if l := len(us); l != 3 { - return InodeClusteringStats{}, fmt.Errorf("incorrect number of values for XFS inode clustering stats: %d", l) - } - - return InodeClusteringStats{ - Iflush: us[0], - Flush: us[1], - FlushInode: us[2], - }, nil -} - -// VnodeStats builds a VnodeStats from a slice of uint32s. -func vnodeStats(us []uint32) (VnodeStats, error) { - // The attribute "Free" appears to not be available on older XFS - // stats versions. Therefore, 7 or 8 elements may appear in - // this slice. - l := len(us) - if l != 7 && l != 8 { - return VnodeStats{}, fmt.Errorf("incorrect number of values for XFS vnode stats: %d", l) - } - - s := VnodeStats{ - Active: us[0], - Allocate: us[1], - Get: us[2], - Hold: us[3], - Release: us[4], - Reclaim: us[5], - Remove: us[6], - } - - // Skip adding free, unless it is present. The zero value will - // be used in place of an actual count. - if l == 7 { - return s, nil - } - - s.Free = us[7] - return s, nil -} - -// BufferStats builds a BufferStats from a slice of uint32s. -func bufferStats(us []uint32) (BufferStats, error) { - if l := len(us); l != 9 { - return BufferStats{}, fmt.Errorf("incorrect number of values for XFS buffer stats: %d", l) - } - - return BufferStats{ - Get: us[0], - Create: us[1], - GetLocked: us[2], - GetLockedWaited: us[3], - BusyLocked: us[4], - MissLocked: us[5], - PageRetries: us[6], - PageFound: us[7], - GetRead: us[8], - }, nil -} - -// ExtendedPrecisionStats builds an ExtendedPrecisionStats from a slice of uint32s. -func extendedPrecisionStats(us []uint64) (ExtendedPrecisionStats, error) { - if l := len(us); l != 3 { - return ExtendedPrecisionStats{}, fmt.Errorf("incorrect number of values for XFS extended precision stats: %d", l) - } - - return ExtendedPrecisionStats{ - FlushBytes: us[0], - WriteBytes: us[1], - ReadBytes: us[2], - }, nil -} diff --git a/vendor/github.com/prometheus/procfs/xfs/xfs.go b/vendor/github.com/prometheus/procfs/xfs/xfs.go deleted file mode 100644 index d86794b7ca998..0000000000000 --- a/vendor/github.com/prometheus/procfs/xfs/xfs.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package xfs provides access to statistics exposed by the XFS filesystem. -package xfs - -// Stats contains XFS filesystem runtime statistics, parsed from -// /proc/fs/xfs/stat. -// -// The names and meanings of each statistic were taken from -// http://xfs.org/index.php/Runtime_Stats and xfs_stats.h in the Linux -// kernel source. Most counters are uint32s (same data types used in -// xfs_stats.h), but some of the "extended precision stats" are uint64s. -type Stats struct { - // The name of the filesystem used to source these statistics. - // If empty, this indicates aggregated statistics for all XFS - // filesystems on the host. - Name string - - ExtentAllocation ExtentAllocationStats - AllocationBTree BTreeStats - BlockMapping BlockMappingStats - BlockMapBTree BTreeStats - DirectoryOperation DirectoryOperationStats - Transaction TransactionStats - InodeOperation InodeOperationStats - LogOperation LogOperationStats - ReadWrite ReadWriteStats - AttributeOperation AttributeOperationStats - InodeClustering InodeClusteringStats - Vnode VnodeStats - Buffer BufferStats - ExtendedPrecision ExtendedPrecisionStats -} - -// ExtentAllocationStats contains statistics regarding XFS extent allocations. -type ExtentAllocationStats struct { - ExtentsAllocated uint32 - BlocksAllocated uint32 - ExtentsFreed uint32 - BlocksFreed uint32 -} - -// BTreeStats contains statistics regarding an XFS internal B-tree. -type BTreeStats struct { - Lookups uint32 - Compares uint32 - RecordsInserted uint32 - RecordsDeleted uint32 -} - -// BlockMappingStats contains statistics regarding XFS block maps. -type BlockMappingStats struct { - Reads uint32 - Writes uint32 - Unmaps uint32 - ExtentListInsertions uint32 - ExtentListDeletions uint32 - ExtentListLookups uint32 - ExtentListCompares uint32 -} - -// DirectoryOperationStats contains statistics regarding XFS directory entries. -type DirectoryOperationStats struct { - Lookups uint32 - Creates uint32 - Removes uint32 - Getdents uint32 -} - -// TransactionStats contains statistics regarding XFS metadata transactions. -type TransactionStats struct { - Sync uint32 - Async uint32 - Empty uint32 -} - -// InodeOperationStats contains statistics regarding XFS inode operations. -type InodeOperationStats struct { - Attempts uint32 - Found uint32 - Recycle uint32 - Missed uint32 - Duplicate uint32 - Reclaims uint32 - AttributeChange uint32 -} - -// LogOperationStats contains statistics regarding the XFS log buffer. -type LogOperationStats struct { - Writes uint32 - Blocks uint32 - NoInternalBuffers uint32 - Force uint32 - ForceSleep uint32 -} - -// ReadWriteStats contains statistics regarding the number of read and write -// system calls for XFS filesystems. -type ReadWriteStats struct { - Read uint32 - Write uint32 -} - -// AttributeOperationStats contains statistics regarding manipulation of -// XFS extended file attributes. -type AttributeOperationStats struct { - Get uint32 - Set uint32 - Remove uint32 - List uint32 -} - -// InodeClusteringStats contains statistics regarding XFS inode clustering -// operations. -type InodeClusteringStats struct { - Iflush uint32 - Flush uint32 - FlushInode uint32 -} - -// VnodeStats contains statistics regarding XFS vnode operations. -type VnodeStats struct { - Active uint32 - Allocate uint32 - Get uint32 - Hold uint32 - Release uint32 - Reclaim uint32 - Remove uint32 - Free uint32 -} - -// BufferStats contains statistics regarding XFS read/write I/O buffers. -type BufferStats struct { - Get uint32 - Create uint32 - GetLocked uint32 - GetLockedWaited uint32 - BusyLocked uint32 - MissLocked uint32 - PageRetries uint32 - PageFound uint32 - GetRead uint32 -} - -// ExtendedPrecisionStats contains high precision counters used to track the -// total number of bytes read, written, or flushed, during XFS operations. -type ExtendedPrecisionStats struct { - FlushBytes uint64 - WriteBytes uint64 - ReadBytes uint64 -} diff --git a/vendor/github.com/sirupsen/logrus/.travis.yml b/vendor/github.com/sirupsen/logrus/.travis.yml index 1f953bebdc968..848938a6d4ed4 100644 --- a/vendor/github.com/sirupsen/logrus/.travis.yml +++ b/vendor/github.com/sirupsen/logrus/.travis.yml @@ -1,51 +1,25 @@ language: go +go_import_path: github.com/sirupsen/logrus +git: + depth: 1 env: - - GOMAXPROCS=4 GORACE=halt_on_error=1 + - GO111MODULE=on + - GO111MODULE=off +go: [ 1.11.x, 1.12.x ] +os: [ linux, osx ] matrix: - include: - - go: 1.10.x - install: - - go get github.com/stretchr/testify/assert - - go get golang.org/x/crypto/ssh/terminal - - go get golang.org/x/sys/unix - - go get golang.org/x/sys/windows - script: - - go test -race -v ./... - - go: 1.11.x - env: GO111MODULE=on - install: - - go mod download - script: - - go test -race -v ./... - - go: 1.11.x + exclude: + - go: 1.12.x env: GO111MODULE=off - install: - - go get github.com/stretchr/testify/assert - - go get golang.org/x/crypto/ssh/terminal - - go get golang.org/x/sys/unix - - go get golang.org/x/sys/windows - script: - - go test -race -v ./... - - go: 1.10.x - install: - - go get github.com/stretchr/testify/assert - - go get golang.org/x/crypto/ssh/terminal - - go get golang.org/x/sys/unix - - go get golang.org/x/sys/windows - script: - - go test -race -v -tags appengine ./... - go: 1.11.x - env: GO111MODULE=on - install: - - go mod download - script: - - go test -race -v -tags appengine ./... - - go: 1.11.x - env: GO111MODULE=off - install: - - go get github.com/stretchr/testify/assert - - go get golang.org/x/crypto/ssh/terminal - - go get golang.org/x/sys/unix - - go get golang.org/x/sys/windows - script: - - go test -race -v -tags appengine ./... + os: osx +install: + - ./travis/install.sh + - if [[ "$GO111MODULE" == "on" ]]; then go mod download; fi + - if [[ "$GO111MODULE" == "off" ]]; then go get github.com/stretchr/testify/assert golang.org/x/sys/unix github.com/konsorten/go-windows-terminal-sequences; fi +script: + - ./travis/cross_build.sh + - export GOMAXPROCS=4 + - export GORACE=halt_on_error=1 + - go test -race -v ./... + - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then go test -race -v -tags appengine ./... ; fi diff --git a/vendor/github.com/sirupsen/logrus/BUILD.bazel b/vendor/github.com/sirupsen/logrus/BUILD.bazel index 53b743b350ac9..e2d8543accd58 100644 --- a/vendor/github.com/sirupsen/logrus/BUILD.bazel +++ b/vendor/github.com/sirupsen/logrus/BUILD.bazel @@ -12,10 +12,12 @@ go_library( "json_formatter.go", "logger.go", "logrus.go", + "terminal_check_bsd.go", + "terminal_check_no_terminal.go", "terminal_check_notappengine.go", + "terminal_check_solaris.go", + "terminal_check_unix.go", "terminal_check_windows.go", - "terminal_notwindows.go", - "terminal_windows.go", "text_formatter.go", "writer.go", ], @@ -24,37 +26,31 @@ go_library( visibility = ["//visibility:public"], deps = select({ "@io_bazel_rules_go//go/platform:android": [ - "//vendor/golang.org/x/crypto/ssh/terminal:go_default_library", + "//vendor/golang.org/x/sys/unix:go_default_library", ], "@io_bazel_rules_go//go/platform:darwin": [ - "//vendor/golang.org/x/crypto/ssh/terminal:go_default_library", + "//vendor/golang.org/x/sys/unix:go_default_library", ], "@io_bazel_rules_go//go/platform:dragonfly": [ - "//vendor/golang.org/x/crypto/ssh/terminal:go_default_library", + "//vendor/golang.org/x/sys/unix:go_default_library", ], "@io_bazel_rules_go//go/platform:freebsd": [ - "//vendor/golang.org/x/crypto/ssh/terminal:go_default_library", + "//vendor/golang.org/x/sys/unix:go_default_library", ], "@io_bazel_rules_go//go/platform:ios": [ - "//vendor/golang.org/x/crypto/ssh/terminal:go_default_library", + "//vendor/golang.org/x/sys/unix:go_default_library", ], "@io_bazel_rules_go//go/platform:linux": [ - "//vendor/golang.org/x/crypto/ssh/terminal:go_default_library", - ], - "@io_bazel_rules_go//go/platform:nacl": [ - "//vendor/golang.org/x/crypto/ssh/terminal:go_default_library", + "//vendor/golang.org/x/sys/unix:go_default_library", ], "@io_bazel_rules_go//go/platform:netbsd": [ - "//vendor/golang.org/x/crypto/ssh/terminal:go_default_library", + "//vendor/golang.org/x/sys/unix:go_default_library", ], "@io_bazel_rules_go//go/platform:openbsd": [ - "//vendor/golang.org/x/crypto/ssh/terminal:go_default_library", - ], - "@io_bazel_rules_go//go/platform:plan9": [ - "//vendor/golang.org/x/crypto/ssh/terminal:go_default_library", + "//vendor/golang.org/x/sys/unix:go_default_library", ], "@io_bazel_rules_go//go/platform:solaris": [ - "//vendor/golang.org/x/crypto/ssh/terminal:go_default_library", + "//vendor/golang.org/x/sys/unix:go_default_library", ], "@io_bazel_rules_go//go/platform:windows": [ "//vendor/github.com/konsorten/go-windows-terminal-sequences:go_default_library", diff --git a/vendor/github.com/sirupsen/logrus/CHANGELOG.md b/vendor/github.com/sirupsen/logrus/CHANGELOG.md index cb85d9f9f61a4..51a7ab0cab91f 100644 --- a/vendor/github.com/sirupsen/logrus/CHANGELOG.md +++ b/vendor/github.com/sirupsen/logrus/CHANGELOG.md @@ -1,3 +1,38 @@ +# 1.4.2 + * Fixes build break for plan9, nacl, solaris +# 1.4.1 +This new release introduces: + * Enhance TextFormatter to not print caller information when they are empty (#944) + * Remove dependency on golang.org/x/crypto (#932, #943) + +Fixes: + * Fix Entry.WithContext method to return a copy of the initial entry (#941) + +# 1.4.0 +This new release introduces: + * Add `DeferExitHandler`, similar to `RegisterExitHandler` but prepending the handler to the list of handlers (semantically like `defer`) (#848). + * Add `CallerPrettyfier` to `JSONFormatter` and `TextFormatter (#909, #911) + * Add `Entry.WithContext()` and `Entry.Context`, to set a context on entries to be used e.g. in hooks (#919). + +Fixes: + * Fix wrong method calls `Logger.Print` and `Logger.Warningln` (#893). + * Update `Entry.Logf` to not do string formatting unless the log level is enabled (#903) + * Fix infinite recursion on unknown `Level.String()` (#907) + * Fix race condition in `getCaller` (#916). + + +# 1.3.0 +This new release introduces: + * Log, Logf, Logln functions for Logger and Entry that take a Level + +Fixes: + * Building prometheus node_exporter on AIX (#840) + * Race condition in TextFormatter (#468) + * Travis CI import path (#868) + * Remove coloured output on Windows (#862) + * Pointer to func as field in JSONFormatter (#870) + * Properly marshal Levels (#873) + # 1.2.0 This new release introduces: * A new method `SetReportCaller` in the `Logger` to enable the file, line and calling function from which the trace has been issued diff --git a/vendor/github.com/sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md index 093bb13f8352c..a4796eb07d461 100644 --- a/vendor/github.com/sirupsen/logrus/README.md +++ b/vendor/github.com/sirupsen/logrus/README.md @@ -361,9 +361,11 @@ The built-in logging formatters are: Third party logging formatters: * [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can be parsed by Kubernetes and Google Container Engine. +* [`GELF`](https://github.com/fabienm/go-logrus-formatters). Formats entries so they comply to Graylog's [GELF 1.1 specification](http://docs.graylog.org/en/2.4/pages/gelf.html). * [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events. * [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout. * [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. +* [`nested-logrus-formatter`](https://github.com/antonfisher/nested-logrus-formatter). Converts logrus fields to a nested structure. You can define your formatter by implementing the `Formatter` interface, requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a diff --git a/vendor/github.com/sirupsen/logrus/alt_exit.go b/vendor/github.com/sirupsen/logrus/alt_exit.go index 8af90637a99e6..8fd189e1ccae8 100644 --- a/vendor/github.com/sirupsen/logrus/alt_exit.go +++ b/vendor/github.com/sirupsen/logrus/alt_exit.go @@ -51,9 +51,9 @@ func Exit(code int) { os.Exit(code) } -// RegisterExitHandler adds a Logrus Exit handler, call logrus.Exit to invoke -// all handlers. The handlers will also be invoked when any Fatal log entry is -// made. +// RegisterExitHandler appends a Logrus Exit handler to the list of handlers, +// call logrus.Exit to invoke all handlers. The handlers will also be invoked when +// any Fatal log entry is made. // // This method is useful when a caller wishes to use logrus to log a fatal // message but also needs to gracefully shutdown. An example usecase could be @@ -62,3 +62,15 @@ func Exit(code int) { func RegisterExitHandler(handler func()) { handlers = append(handlers, handler) } + +// DeferExitHandler prepends a Logrus Exit handler to the list of handlers, +// call logrus.Exit to invoke all handlers. The handlers will also be invoked when +// any Fatal log entry is made. +// +// This method is useful when a caller wishes to use logrus to log a fatal +// message but also needs to gracefully shutdown. An example usecase could be +// closing database connections, or sending a alert that the application is +// closing. +func DeferExitHandler(handler func()) { + handlers = append([]func(){handler}, handlers...) +} diff --git a/vendor/github.com/sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go index cc85d3aab49e2..63e25583cb03f 100644 --- a/vendor/github.com/sirupsen/logrus/entry.go +++ b/vendor/github.com/sirupsen/logrus/entry.go @@ -2,6 +2,7 @@ package logrus import ( "bytes" + "context" "fmt" "os" "reflect" @@ -69,6 +70,9 @@ type Entry struct { // When formatter is called in entry.log(), a Buffer may be set to entry Buffer *bytes.Buffer + // Contains the context set by the user. Useful for hook processing etc. + Context context.Context + // err may contain a field formatting error err string } @@ -97,6 +101,11 @@ func (entry *Entry) WithError(err error) *Entry { return entry.WithField(ErrorKey, err) } +// Add a context to the Entry. +func (entry *Entry) WithContext(ctx context.Context) *Entry { + return &Entry{Logger: entry.Logger, Data: entry.Data, Time: entry.Time, err: entry.err, Context: ctx} +} + // Add a single field to the Entry. func (entry *Entry) WithField(key string, value interface{}) *Entry { return entry.WithFields(Fields{key: value}) @@ -108,23 +117,34 @@ func (entry *Entry) WithFields(fields Fields) *Entry { for k, v := range entry.Data { data[k] = v } - var field_err string + fieldErr := entry.err for k, v := range fields { - if t := reflect.TypeOf(v); t != nil && t.Kind() == reflect.Func { - field_err = fmt.Sprintf("can not add field %q", k) - if entry.err != "" { - field_err = entry.err + ", " + field_err + isErrField := false + if t := reflect.TypeOf(v); t != nil { + switch t.Kind() { + case reflect.Func: + isErrField = true + case reflect.Ptr: + isErrField = t.Elem().Kind() == reflect.Func + } + } + if isErrField { + tmp := fmt.Sprintf("can not add field %q", k) + if fieldErr != "" { + fieldErr = entry.err + ", " + tmp + } else { + fieldErr = tmp } } else { data[k] = v } } - return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: field_err} + return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: fieldErr, Context: entry.Context} } // Overrides the time of the Entry. func (entry *Entry) WithTime(t time.Time) *Entry { - return &Entry{Logger: entry.Logger, Data: entry.Data, Time: t} + return &Entry{Logger: entry.Logger, Data: entry.Data, Time: t, err: entry.err, Context: entry.Context} } // getPackageName reduces a fully qualified function name to the package name @@ -145,20 +165,23 @@ func getPackageName(f string) string { // getCaller retrieves the name of the first non-logrus calling function func getCaller() *runtime.Frame { - // Restrict the lookback frames to avoid runaway lookups - pcs := make([]uintptr, maximumCallerDepth) - depth := runtime.Callers(minimumCallerDepth, pcs) - frames := runtime.CallersFrames(pcs[:depth]) // cache this package's fully-qualified name callerInitOnce.Do(func() { - logrusPackage = getPackageName(runtime.FuncForPC(pcs[0]).Name()) + pcs := make([]uintptr, 2) + _ = runtime.Callers(0, pcs) + logrusPackage = getPackageName(runtime.FuncForPC(pcs[1]).Name()) // now that we have the cache, we can skip a minimum count of known-logrus functions - // XXX this is dubious, the number of frames may vary store an entry in a logger interface + // XXX this is dubious, the number of frames may vary minimumCallerDepth = knownLogrusFrames }) + // Restrict the lookback frames to avoid runaway lookups + pcs := make([]uintptr, maximumCallerDepth) + depth := runtime.Callers(minimumCallerDepth, pcs) + frames := runtime.CallersFrames(pcs[:depth]) + for f, again := frames.Next(); again; f, again = frames.Next() { pkg := getPackageName(f.Function) @@ -240,16 +263,18 @@ func (entry *Entry) write() { } } -func (entry *Entry) Trace(args ...interface{}) { - if entry.Logger.IsLevelEnabled(TraceLevel) { - entry.log(TraceLevel, fmt.Sprint(args...)) +func (entry *Entry) Log(level Level, args ...interface{}) { + if entry.Logger.IsLevelEnabled(level) { + entry.log(level, fmt.Sprint(args...)) } } +func (entry *Entry) Trace(args ...interface{}) { + entry.Log(TraceLevel, args...) +} + func (entry *Entry) Debug(args ...interface{}) { - if entry.Logger.IsLevelEnabled(DebugLevel) { - entry.log(DebugLevel, fmt.Sprint(args...)) - } + entry.Log(DebugLevel, args...) } func (entry *Entry) Print(args ...interface{}) { @@ -257,15 +282,11 @@ func (entry *Entry) Print(args ...interface{}) { } func (entry *Entry) Info(args ...interface{}) { - if entry.Logger.IsLevelEnabled(InfoLevel) { - entry.log(InfoLevel, fmt.Sprint(args...)) - } + entry.Log(InfoLevel, args...) } func (entry *Entry) Warn(args ...interface{}) { - if entry.Logger.IsLevelEnabled(WarnLevel) { - entry.log(WarnLevel, fmt.Sprint(args...)) - } + entry.Log(WarnLevel, args...) } func (entry *Entry) Warning(args ...interface{}) { @@ -273,43 +294,37 @@ func (entry *Entry) Warning(args ...interface{}) { } func (entry *Entry) Error(args ...interface{}) { - if entry.Logger.IsLevelEnabled(ErrorLevel) { - entry.log(ErrorLevel, fmt.Sprint(args...)) - } + entry.Log(ErrorLevel, args...) } func (entry *Entry) Fatal(args ...interface{}) { - if entry.Logger.IsLevelEnabled(FatalLevel) { - entry.log(FatalLevel, fmt.Sprint(args...)) - } + entry.Log(FatalLevel, args...) entry.Logger.Exit(1) } func (entry *Entry) Panic(args ...interface{}) { - if entry.Logger.IsLevelEnabled(PanicLevel) { - entry.log(PanicLevel, fmt.Sprint(args...)) - } + entry.Log(PanicLevel, args...) panic(fmt.Sprint(args...)) } // Entry Printf family functions -func (entry *Entry) Tracef(format string, args ...interface{}) { - if entry.Logger.IsLevelEnabled(TraceLevel) { - entry.Trace(fmt.Sprintf(format, args...)) +func (entry *Entry) Logf(level Level, format string, args ...interface{}) { + if entry.Logger.IsLevelEnabled(level) { + entry.Log(level, fmt.Sprintf(format, args...)) } } +func (entry *Entry) Tracef(format string, args ...interface{}) { + entry.Logf(TraceLevel, format, args...) +} + func (entry *Entry) Debugf(format string, args ...interface{}) { - if entry.Logger.IsLevelEnabled(DebugLevel) { - entry.Debug(fmt.Sprintf(format, args...)) - } + entry.Logf(DebugLevel, format, args...) } func (entry *Entry) Infof(format string, args ...interface{}) { - if entry.Logger.IsLevelEnabled(InfoLevel) { - entry.Info(fmt.Sprintf(format, args...)) - } + entry.Logf(InfoLevel, format, args...) } func (entry *Entry) Printf(format string, args ...interface{}) { @@ -317,9 +332,7 @@ func (entry *Entry) Printf(format string, args ...interface{}) { } func (entry *Entry) Warnf(format string, args ...interface{}) { - if entry.Logger.IsLevelEnabled(WarnLevel) { - entry.Warn(fmt.Sprintf(format, args...)) - } + entry.Logf(WarnLevel, format, args...) } func (entry *Entry) Warningf(format string, args ...interface{}) { @@ -327,42 +340,36 @@ func (entry *Entry) Warningf(format string, args ...interface{}) { } func (entry *Entry) Errorf(format string, args ...interface{}) { - if entry.Logger.IsLevelEnabled(ErrorLevel) { - entry.Error(fmt.Sprintf(format, args...)) - } + entry.Logf(ErrorLevel, format, args...) } func (entry *Entry) Fatalf(format string, args ...interface{}) { - if entry.Logger.IsLevelEnabled(FatalLevel) { - entry.Fatal(fmt.Sprintf(format, args...)) - } + entry.Logf(FatalLevel, format, args...) entry.Logger.Exit(1) } func (entry *Entry) Panicf(format string, args ...interface{}) { - if entry.Logger.IsLevelEnabled(PanicLevel) { - entry.Panic(fmt.Sprintf(format, args...)) - } + entry.Logf(PanicLevel, format, args...) } // Entry Println family functions -func (entry *Entry) Traceln(args ...interface{}) { - if entry.Logger.IsLevelEnabled(TraceLevel) { - entry.Trace(entry.sprintlnn(args...)) +func (entry *Entry) Logln(level Level, args ...interface{}) { + if entry.Logger.IsLevelEnabled(level) { + entry.Log(level, entry.sprintlnn(args...)) } } +func (entry *Entry) Traceln(args ...interface{}) { + entry.Logln(TraceLevel, args...) +} + func (entry *Entry) Debugln(args ...interface{}) { - if entry.Logger.IsLevelEnabled(DebugLevel) { - entry.Debug(entry.sprintlnn(args...)) - } + entry.Logln(DebugLevel, args...) } func (entry *Entry) Infoln(args ...interface{}) { - if entry.Logger.IsLevelEnabled(InfoLevel) { - entry.Info(entry.sprintlnn(args...)) - } + entry.Logln(InfoLevel, args...) } func (entry *Entry) Println(args ...interface{}) { @@ -370,9 +377,7 @@ func (entry *Entry) Println(args ...interface{}) { } func (entry *Entry) Warnln(args ...interface{}) { - if entry.Logger.IsLevelEnabled(WarnLevel) { - entry.Warn(entry.sprintlnn(args...)) - } + entry.Logln(WarnLevel, args...) } func (entry *Entry) Warningln(args ...interface{}) { @@ -380,22 +385,16 @@ func (entry *Entry) Warningln(args ...interface{}) { } func (entry *Entry) Errorln(args ...interface{}) { - if entry.Logger.IsLevelEnabled(ErrorLevel) { - entry.Error(entry.sprintlnn(args...)) - } + entry.Logln(ErrorLevel, args...) } func (entry *Entry) Fatalln(args ...interface{}) { - if entry.Logger.IsLevelEnabled(FatalLevel) { - entry.Fatal(entry.sprintlnn(args...)) - } + entry.Logln(FatalLevel, args...) entry.Logger.Exit(1) } func (entry *Entry) Panicln(args ...interface{}) { - if entry.Logger.IsLevelEnabled(PanicLevel) { - entry.Panic(entry.sprintlnn(args...)) - } + entry.Logln(PanicLevel, args...) } // Sprintlnn => Sprint no newline. This is to get the behavior of how diff --git a/vendor/github.com/sirupsen/logrus/exported.go b/vendor/github.com/sirupsen/logrus/exported.go index 7342613c37259..62fc2f2193cdd 100644 --- a/vendor/github.com/sirupsen/logrus/exported.go +++ b/vendor/github.com/sirupsen/logrus/exported.go @@ -1,6 +1,7 @@ package logrus import ( + "context" "io" "time" ) @@ -55,6 +56,11 @@ func WithError(err error) *Entry { return std.WithField(ErrorKey, err) } +// WithContext creates an entry from the standard logger and adds a context to it. +func WithContext(ctx context.Context) *Entry { + return std.WithContext(ctx) +} + // WithField creates an entry from the standard logger and adds a field to // it. If you want multiple fields, use `WithFields`. // diff --git a/vendor/github.com/sirupsen/logrus/go.mod b/vendor/github.com/sirupsen/logrus/go.mod index 94574cc635c59..12fdf98984748 100644 --- a/vendor/github.com/sirupsen/logrus/go.mod +++ b/vendor/github.com/sirupsen/logrus/go.mod @@ -6,6 +6,5 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/stretchr/objx v0.1.1 // indirect github.com/stretchr/testify v1.2.2 - golang.org/x/crypto v0.0.0-20180904163835-0709b304e793 - golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33 + golang.org/x/sys v0.0.0-20190422165155-953cdadca894 ) diff --git a/vendor/github.com/sirupsen/logrus/go.sum b/vendor/github.com/sirupsen/logrus/go.sum index 133d34ae11654..596c318b9f701 100644 --- a/vendor/github.com/sirupsen/logrus/go.sum +++ b/vendor/github.com/sirupsen/logrus/go.sum @@ -2,6 +2,7 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe h1:CHRGQ8V7OlCYtwaKPJi3iA7J+YdNKdo8j7nG5IgDhjs= github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -9,7 +10,7 @@ github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793 h1:u+LnwYTOOW7Ukr/fppxEb1Nwz0AtPflrblfvUudpo+I= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33 h1:I6FyU15t786LL7oL/hn43zqTuEGr4PN7F4XJ1p4E3Y8= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/sirupsen/logrus/json_formatter.go b/vendor/github.com/sirupsen/logrus/json_formatter.go index 2605753599faf..098a21a067958 100644 --- a/vendor/github.com/sirupsen/logrus/json_formatter.go +++ b/vendor/github.com/sirupsen/logrus/json_formatter.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/json" "fmt" + "runtime" ) type fieldKey string @@ -42,6 +43,12 @@ type JSONFormatter struct { // } FieldMap FieldMap + // CallerPrettyfier can be set by the user to modify the content + // of the function and file keys in the json data when ReportCaller is + // activated. If any of the returned value is the empty string the + // corresponding key will be removed from json fields. + CallerPrettyfier func(*runtime.Frame) (function string, file string) + // PrettyPrint will indent all json logs PrettyPrint bool } @@ -82,8 +89,17 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String() if entry.HasCaller() { - data[f.FieldMap.resolve(FieldKeyFunc)] = entry.Caller.Function - data[f.FieldMap.resolve(FieldKeyFile)] = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) + funcVal := entry.Caller.Function + fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) + if f.CallerPrettyfier != nil { + funcVal, fileVal = f.CallerPrettyfier(entry.Caller) + } + if funcVal != "" { + data[f.FieldMap.resolve(FieldKeyFunc)] = funcVal + } + if fileVal != "" { + data[f.FieldMap.resolve(FieldKeyFile)] = fileVal + } } var b *bytes.Buffer @@ -98,7 +114,7 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { encoder.SetIndent("", " ") } if err := encoder.Encode(data); err != nil { - return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) + return nil, fmt.Errorf("failed to marshal fields to JSON, %v", err) } return b.Bytes(), nil diff --git a/vendor/github.com/sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go index 5ceca0eab427c..c0c0b1e5590a1 100644 --- a/vendor/github.com/sirupsen/logrus/logger.go +++ b/vendor/github.com/sirupsen/logrus/logger.go @@ -1,6 +1,7 @@ package logrus import ( + "context" "io" "os" "sync" @@ -124,6 +125,13 @@ func (logger *Logger) WithError(err error) *Entry { return entry.WithError(err) } +// Add a context to the log entry. +func (logger *Logger) WithContext(ctx context.Context) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithContext(ctx) +} + // Overrides the time of the log entry. func (logger *Logger) WithTime(t time.Time) *Entry { entry := logger.newEntry() @@ -131,28 +139,24 @@ func (logger *Logger) WithTime(t time.Time) *Entry { return entry.WithTime(t) } -func (logger *Logger) Tracef(format string, args ...interface{}) { - if logger.IsLevelEnabled(TraceLevel) { +func (logger *Logger) Logf(level Level, format string, args ...interface{}) { + if logger.IsLevelEnabled(level) { entry := logger.newEntry() - entry.Tracef(format, args...) + entry.Logf(level, format, args...) logger.releaseEntry(entry) } } +func (logger *Logger) Tracef(format string, args ...interface{}) { + logger.Logf(TraceLevel, format, args...) +} + func (logger *Logger) Debugf(format string, args ...interface{}) { - if logger.IsLevelEnabled(DebugLevel) { - entry := logger.newEntry() - entry.Debugf(format, args...) - logger.releaseEntry(entry) - } + logger.Logf(DebugLevel, format, args...) } func (logger *Logger) Infof(format string, args ...interface{}) { - if logger.IsLevelEnabled(InfoLevel) { - entry := logger.newEntry() - entry.Infof(format, args...) - logger.releaseEntry(entry) - } + logger.Logf(InfoLevel, format, args...) } func (logger *Logger) Printf(format string, args ...interface{}) { @@ -162,139 +166,91 @@ func (logger *Logger) Printf(format string, args ...interface{}) { } func (logger *Logger) Warnf(format string, args ...interface{}) { - if logger.IsLevelEnabled(WarnLevel) { - entry := logger.newEntry() - entry.Warnf(format, args...) - logger.releaseEntry(entry) - } + logger.Logf(WarnLevel, format, args...) } func (logger *Logger) Warningf(format string, args ...interface{}) { - if logger.IsLevelEnabled(WarnLevel) { - entry := logger.newEntry() - entry.Warnf(format, args...) - logger.releaseEntry(entry) - } + logger.Warnf(format, args...) } func (logger *Logger) Errorf(format string, args ...interface{}) { - if logger.IsLevelEnabled(ErrorLevel) { - entry := logger.newEntry() - entry.Errorf(format, args...) - logger.releaseEntry(entry) - } + logger.Logf(ErrorLevel, format, args...) } func (logger *Logger) Fatalf(format string, args ...interface{}) { - if logger.IsLevelEnabled(FatalLevel) { - entry := logger.newEntry() - entry.Fatalf(format, args...) - logger.releaseEntry(entry) - } + logger.Logf(FatalLevel, format, args...) logger.Exit(1) } func (logger *Logger) Panicf(format string, args ...interface{}) { - if logger.IsLevelEnabled(PanicLevel) { + logger.Logf(PanicLevel, format, args...) +} + +func (logger *Logger) Log(level Level, args ...interface{}) { + if logger.IsLevelEnabled(level) { entry := logger.newEntry() - entry.Panicf(format, args...) + entry.Log(level, args...) logger.releaseEntry(entry) } } func (logger *Logger) Trace(args ...interface{}) { - if logger.IsLevelEnabled(TraceLevel) { - entry := logger.newEntry() - entry.Trace(args...) - logger.releaseEntry(entry) - } + logger.Log(TraceLevel, args...) } func (logger *Logger) Debug(args ...interface{}) { - if logger.IsLevelEnabled(DebugLevel) { - entry := logger.newEntry() - entry.Debug(args...) - logger.releaseEntry(entry) - } + logger.Log(DebugLevel, args...) } func (logger *Logger) Info(args ...interface{}) { - if logger.IsLevelEnabled(InfoLevel) { - entry := logger.newEntry() - entry.Info(args...) - logger.releaseEntry(entry) - } + logger.Log(InfoLevel, args...) } func (logger *Logger) Print(args ...interface{}) { entry := logger.newEntry() - entry.Info(args...) + entry.Print(args...) logger.releaseEntry(entry) } func (logger *Logger) Warn(args ...interface{}) { - if logger.IsLevelEnabled(WarnLevel) { - entry := logger.newEntry() - entry.Warn(args...) - logger.releaseEntry(entry) - } + logger.Log(WarnLevel, args...) } func (logger *Logger) Warning(args ...interface{}) { - if logger.IsLevelEnabled(WarnLevel) { - entry := logger.newEntry() - entry.Warn(args...) - logger.releaseEntry(entry) - } + logger.Warn(args...) } func (logger *Logger) Error(args ...interface{}) { - if logger.IsLevelEnabled(ErrorLevel) { - entry := logger.newEntry() - entry.Error(args...) - logger.releaseEntry(entry) - } + logger.Log(ErrorLevel, args...) } func (logger *Logger) Fatal(args ...interface{}) { - if logger.IsLevelEnabled(FatalLevel) { - entry := logger.newEntry() - entry.Fatal(args...) - logger.releaseEntry(entry) - } + logger.Log(FatalLevel, args...) logger.Exit(1) } func (logger *Logger) Panic(args ...interface{}) { - if logger.IsLevelEnabled(PanicLevel) { + logger.Log(PanicLevel, args...) +} + +func (logger *Logger) Logln(level Level, args ...interface{}) { + if logger.IsLevelEnabled(level) { entry := logger.newEntry() - entry.Panic(args...) + entry.Logln(level, args...) logger.releaseEntry(entry) } } func (logger *Logger) Traceln(args ...interface{}) { - if logger.IsLevelEnabled(TraceLevel) { - entry := logger.newEntry() - entry.Traceln(args...) - logger.releaseEntry(entry) - } + logger.Logln(TraceLevel, args...) } func (logger *Logger) Debugln(args ...interface{}) { - if logger.IsLevelEnabled(DebugLevel) { - entry := logger.newEntry() - entry.Debugln(args...) - logger.releaseEntry(entry) - } + logger.Logln(DebugLevel, args...) } func (logger *Logger) Infoln(args ...interface{}) { - if logger.IsLevelEnabled(InfoLevel) { - entry := logger.newEntry() - entry.Infoln(args...) - logger.releaseEntry(entry) - } + logger.Logln(InfoLevel, args...) } func (logger *Logger) Println(args ...interface{}) { @@ -304,44 +260,24 @@ func (logger *Logger) Println(args ...interface{}) { } func (logger *Logger) Warnln(args ...interface{}) { - if logger.IsLevelEnabled(WarnLevel) { - entry := logger.newEntry() - entry.Warnln(args...) - logger.releaseEntry(entry) - } + logger.Logln(WarnLevel, args...) } func (logger *Logger) Warningln(args ...interface{}) { - if logger.IsLevelEnabled(WarnLevel) { - entry := logger.newEntry() - entry.Warnln(args...) - logger.releaseEntry(entry) - } + logger.Warnln(args...) } func (logger *Logger) Errorln(args ...interface{}) { - if logger.IsLevelEnabled(ErrorLevel) { - entry := logger.newEntry() - entry.Errorln(args...) - logger.releaseEntry(entry) - } + logger.Logln(ErrorLevel, args...) } func (logger *Logger) Fatalln(args ...interface{}) { - if logger.IsLevelEnabled(FatalLevel) { - entry := logger.newEntry() - entry.Fatalln(args...) - logger.releaseEntry(entry) - } + logger.Logln(FatalLevel, args...) logger.Exit(1) } func (logger *Logger) Panicln(args ...interface{}) { - if logger.IsLevelEnabled(PanicLevel) { - entry := logger.newEntry() - entry.Panicln(args...) - logger.releaseEntry(entry) - } + logger.Logln(PanicLevel, args...) } func (logger *Logger) Exit(code int) { diff --git a/vendor/github.com/sirupsen/logrus/logrus.go b/vendor/github.com/sirupsen/logrus/logrus.go index 4ef45186620ca..8644761f73cf8 100644 --- a/vendor/github.com/sirupsen/logrus/logrus.go +++ b/vendor/github.com/sirupsen/logrus/logrus.go @@ -14,24 +14,11 @@ type Level uint32 // Convert the Level to a string. E.g. PanicLevel becomes "panic". func (level Level) String() string { - switch level { - case TraceLevel: - return "trace" - case DebugLevel: - return "debug" - case InfoLevel: - return "info" - case WarnLevel: - return "warning" - case ErrorLevel: - return "error" - case FatalLevel: - return "fatal" - case PanicLevel: - return "panic" + if b, err := level.MarshalText(); err == nil { + return string(b) + } else { + return "unknown" } - - return "unknown" } // ParseLevel takes a string level and returns the Logrus log level constant. @@ -69,6 +56,27 @@ func (level *Level) UnmarshalText(text []byte) error { return nil } +func (level Level) MarshalText() ([]byte, error) { + switch level { + case TraceLevel: + return []byte("trace"), nil + case DebugLevel: + return []byte("debug"), nil + case InfoLevel: + return []byte("info"), nil + case WarnLevel: + return []byte("warning"), nil + case ErrorLevel: + return []byte("error"), nil + case FatalLevel: + return []byte("fatal"), nil + case PanicLevel: + return []byte("panic"), nil + } + + return nil, fmt.Errorf("not a valid logrus level %d", level) +} + // A constant exposing all logging levels var AllLevels = []Level{ PanicLevel, diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go new file mode 100644 index 0000000000000..3c4f43f91cd11 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go @@ -0,0 +1,13 @@ +// +build darwin dragonfly freebsd netbsd openbsd + +package logrus + +import "golang.org/x/sys/unix" + +const ioctlReadTermios = unix.TIOCGETA + +func isTerminal(fd int) bool { + _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + return err == nil +} + diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_js.go b/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go similarity index 79% rename from vendor/github.com/sirupsen/logrus/terminal_check_js.go rename to vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go index 0c209750a3346..97af92c68ea2d 100644 --- a/vendor/github.com/sirupsen/logrus/terminal_check_js.go +++ b/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go @@ -1,4 +1,4 @@ -// +build js +// +build js nacl plan9 package logrus diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go index cf309d6fb6e66..3293fb3caad45 100644 --- a/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go +++ b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go @@ -1,18 +1,16 @@ -// +build !appengine,!js,!windows +// +build !appengine,!js,!windows,!nacl,!plan9 package logrus import ( "io" "os" - - "golang.org/x/crypto/ssh/terminal" ) func checkIfTerminal(w io.Writer) bool { switch v := w.(type) { case *os.File: - return terminal.IsTerminal(int(v.Fd())) + return isTerminal(int(v.Fd())) default: return false } diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go b/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go new file mode 100644 index 0000000000000..f6710b3bd0b04 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go @@ -0,0 +1,11 @@ +package logrus + +import ( + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +func isTerminal(fd int) bool { + _, err := unix.IoctlGetTermio(fd, unix.TCGETA) + return err == nil +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go new file mode 100644 index 0000000000000..355dc966f00b3 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go @@ -0,0 +1,13 @@ +// +build linux aix + +package logrus + +import "golang.org/x/sys/unix" + +const ioctlReadTermios = unix.TCGETS + +func isTerminal(fd int) bool { + _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + return err == nil +} + diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_windows.go b/vendor/github.com/sirupsen/logrus/terminal_check_windows.go index 3b9d2864ca346..572889db21629 100644 --- a/vendor/github.com/sirupsen/logrus/terminal_check_windows.go +++ b/vendor/github.com/sirupsen/logrus/terminal_check_windows.go @@ -6,15 +6,29 @@ import ( "io" "os" "syscall" + + sequences "github.com/konsorten/go-windows-terminal-sequences" ) +func initTerminal(w io.Writer) { + switch v := w.(type) { + case *os.File: + sequences.EnableVirtualTerminalProcessing(syscall.Handle(v.Fd()), true) + } +} + func checkIfTerminal(w io.Writer) bool { + var ret bool switch v := w.(type) { case *os.File: var mode uint32 err := syscall.GetConsoleMode(syscall.Handle(v.Fd()), &mode) - return err == nil + ret = (err == nil) default: - return false + ret = false + } + if ret { + initTerminal(w) } + return ret } diff --git a/vendor/github.com/sirupsen/logrus/terminal_notwindows.go b/vendor/github.com/sirupsen/logrus/terminal_notwindows.go deleted file mode 100644 index 3dbd237203097..0000000000000 --- a/vendor/github.com/sirupsen/logrus/terminal_notwindows.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !windows - -package logrus - -import "io" - -func initTerminal(w io.Writer) { -} diff --git a/vendor/github.com/sirupsen/logrus/terminal_windows.go b/vendor/github.com/sirupsen/logrus/terminal_windows.go deleted file mode 100644 index b4ef5286cd472..0000000000000 --- a/vendor/github.com/sirupsen/logrus/terminal_windows.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build !appengine,!js,windows - -package logrus - -import ( - "io" - "os" - "syscall" - - sequences "github.com/konsorten/go-windows-terminal-sequences" -) - -func initTerminal(w io.Writer) { - switch v := w.(type) { - case *os.File: - sequences.EnableVirtualTerminalProcessing(syscall.Handle(v.Fd()), true) - } -} diff --git a/vendor/github.com/sirupsen/logrus/text_formatter.go b/vendor/github.com/sirupsen/logrus/text_formatter.go index 49ec92f172c73..e01587c437de9 100644 --- a/vendor/github.com/sirupsen/logrus/text_formatter.go +++ b/vendor/github.com/sirupsen/logrus/text_formatter.go @@ -4,6 +4,7 @@ import ( "bytes" "fmt" "os" + "runtime" "sort" "strings" "sync" @@ -11,18 +12,13 @@ import ( ) const ( - nocolor = 0 - red = 31 - green = 32 - yellow = 33 - blue = 36 - gray = 37 + red = 31 + yellow = 33 + blue = 36 + gray = 37 ) -var ( - baseTimestamp time.Time - emptyFieldMap FieldMap -) +var baseTimestamp time.Time func init() { baseTimestamp = time.Now() @@ -76,21 +72,23 @@ type TextFormatter struct { // FieldKeyMsg: "@message"}} FieldMap FieldMap + // CallerPrettyfier can be set by the user to modify the content + // of the function and file keys in the data when ReportCaller is + // activated. If any of the returned value is the empty string the + // corresponding key will be removed from fields. + CallerPrettyfier func(*runtime.Frame) (function string, file string) + terminalInitOnce sync.Once } func (f *TextFormatter) init(entry *Entry) { if entry.Logger != nil { f.isTerminal = checkIfTerminal(entry.Logger.Out) - - if f.isTerminal { - initTerminal(entry.Logger.Out) - } } } func (f *TextFormatter) isColored() bool { - isColored := f.ForceColors || f.isTerminal + isColored := f.ForceColors || (f.isTerminal && (runtime.GOOS != "windows")) if f.EnvironmentOverrideColors { if force, ok := os.LookupEnv("CLICOLOR_FORCE"); ok && force != "0" { @@ -107,14 +105,19 @@ func (f *TextFormatter) isColored() bool { // Format renders a single log entry func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { - prefixFieldClashes(entry.Data, f.FieldMap, entry.HasCaller()) - - keys := make([]string, 0, len(entry.Data)) - for k := range entry.Data { + data := make(Fields) + for k, v := range entry.Data { + data[k] = v + } + prefixFieldClashes(data, f.FieldMap, entry.HasCaller()) + keys := make([]string, 0, len(data)) + for k := range data { keys = append(keys, k) } - fixedKeys := make([]string, 0, 4+len(entry.Data)) + var funcVal, fileVal string + + fixedKeys := make([]string, 0, 4+len(data)) if !f.DisableTimestamp { fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyTime)) } @@ -126,8 +129,19 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLogrusError)) } if entry.HasCaller() { - fixedKeys = append(fixedKeys, - f.FieldMap.resolve(FieldKeyFunc), f.FieldMap.resolve(FieldKeyFile)) + if f.CallerPrettyfier != nil { + funcVal, fileVal = f.CallerPrettyfier(entry.Caller) + } else { + funcVal = entry.Caller.Function + fileVal = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) + } + + if funcVal != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFunc)) + } + if fileVal != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFile)) + } } if !f.DisableSorting { @@ -160,8 +174,9 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { timestampFormat = defaultTimestampFormat } if f.isColored() { - f.printColored(b, entry, keys, timestampFormat) + f.printColored(b, entry, keys, data, timestampFormat) } else { + for _, key := range fixedKeys { var value interface{} switch { @@ -174,11 +189,11 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { case key == f.FieldMap.resolve(FieldKeyLogrusError): value = entry.err case key == f.FieldMap.resolve(FieldKeyFunc) && entry.HasCaller(): - value = entry.Caller.Function + value = funcVal case key == f.FieldMap.resolve(FieldKeyFile) && entry.HasCaller(): - value = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) + value = fileVal default: - value = entry.Data[key] + value = data[key] } f.appendKeyValue(b, key, value) } @@ -188,7 +203,7 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { return b.Bytes(), nil } -func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) { +func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, data Fields, timestampFormat string) { var levelColor int switch entry.Level { case DebugLevel, TraceLevel: @@ -211,10 +226,21 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin entry.Message = strings.TrimSuffix(entry.Message, "\n") caller := "" - if entry.HasCaller() { - caller = fmt.Sprintf("%s:%d %s()", - entry.Caller.File, entry.Caller.Line, entry.Caller.Function) + funcVal := fmt.Sprintf("%s()", entry.Caller.Function) + fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) + + if f.CallerPrettyfier != nil { + funcVal, fileVal = f.CallerPrettyfier(entry.Caller) + } + + if fileVal == "" { + caller = funcVal + } else if funcVal == "" { + caller = fileVal + } else { + caller = fileVal + " " + funcVal + } } if f.DisableTimestamp { @@ -225,7 +251,7 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s]%s %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), caller, entry.Message) } for _, k := range keys { - v := entry.Data[k] + v := data[k] fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k) f.appendValue(b, v) } diff --git a/vendor/github.com/spf13/pflag/.travis.yml b/vendor/github.com/spf13/pflag/.travis.yml index f8a63b308ba56..00d04cb9b0269 100644 --- a/vendor/github.com/spf13/pflag/.travis.yml +++ b/vendor/github.com/spf13/pflag/.travis.yml @@ -3,8 +3,9 @@ sudo: false language: go go: - - 1.7.3 - - 1.8.1 + - 1.9.x + - 1.10.x + - 1.11.x - tip matrix: @@ -12,7 +13,7 @@ matrix: - go: tip install: - - go get github.com/golang/lint/golint + - go get golang.org/x/lint/golint - export PATH=$GOPATH/bin:$PATH - go install ./... diff --git a/vendor/github.com/spf13/pflag/BUILD.bazel b/vendor/github.com/spf13/pflag/BUILD.bazel index f160aab61d993..dde682c1ae232 100644 --- a/vendor/github.com/spf13/pflag/BUILD.bazel +++ b/vendor/github.com/spf13/pflag/BUILD.bazel @@ -11,12 +11,16 @@ go_library( "duration_slice.go", "flag.go", "float32.go", + "float32_slice.go", "float64.go", + "float64_slice.go", "golangflag.go", "int.go", "int16.go", "int32.go", + "int32_slice.go", "int64.go", + "int64_slice.go", "int8.go", "int_slice.go", "ip.go", @@ -27,6 +31,7 @@ go_library( "string_array.go", "string_slice.go", "string_to_int.go", + "string_to_int64.go", "string_to_string.go", "uint.go", "uint16.go", diff --git a/vendor/github.com/spf13/pflag/README.md b/vendor/github.com/spf13/pflag/README.md index b052414d12951..7eacc5bdbe5f4 100644 --- a/vendor/github.com/spf13/pflag/README.md +++ b/vendor/github.com/spf13/pflag/README.md @@ -86,8 +86,8 @@ fmt.Println("ip has value ", *ip) fmt.Println("flagvar has value ", flagvar) ``` -There are helpers function to get values later if you have the FlagSet but -it was difficult to keep up with all of the flag pointers in your code. +There are helper functions available to get the value stored in a Flag if you have a FlagSet but find +it difficult to keep up with all of the pointers in your code. If you have a pflag.FlagSet with a flag called 'flagname' of type int you can use GetInt() to get the int value. But notice that 'flagname' must exist and it must be an int. GetString("flagname") will fail. diff --git a/vendor/github.com/spf13/pflag/bool_slice.go b/vendor/github.com/spf13/pflag/bool_slice.go index 5af02f1a75a9d..3731370d6a572 100644 --- a/vendor/github.com/spf13/pflag/bool_slice.go +++ b/vendor/github.com/spf13/pflag/bool_slice.go @@ -71,6 +71,44 @@ func (s *boolSliceValue) String() string { return "[" + out + "]" } +func (s *boolSliceValue) fromString(val string) (bool, error) { + return strconv.ParseBool(val) +} + +func (s *boolSliceValue) toString(val bool) string { + return strconv.FormatBool(val) +} + +func (s *boolSliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *boolSliceValue) Replace(val []string) error { + out := make([]bool, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *boolSliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + func boolSliceConv(val string) (interface{}, error) { val = strings.Trim(val, "[]") // Empty string would cause a slice with one (empty) entry diff --git a/vendor/github.com/spf13/pflag/count.go b/vendor/github.com/spf13/pflag/count.go index aa126e44d1c83..a0b2679f71c75 100644 --- a/vendor/github.com/spf13/pflag/count.go +++ b/vendor/github.com/spf13/pflag/count.go @@ -46,7 +46,7 @@ func (f *FlagSet) GetCount(name string) (int, error) { // CountVar defines a count flag with specified name, default value, and usage string. // The argument p points to an int variable in which to store the value of the flag. -// A count flag will add 1 to its value evey time it is found on the command line +// A count flag will add 1 to its value every time it is found on the command line func (f *FlagSet) CountVar(p *int, name string, usage string) { f.CountVarP(p, name, "", usage) } @@ -69,7 +69,7 @@ func CountVarP(p *int, name, shorthand string, usage string) { // Count defines a count flag with specified name, default value, and usage string. // The return value is the address of an int variable that stores the value of the flag. -// A count flag will add 1 to its value evey time it is found on the command line +// A count flag will add 1 to its value every time it is found on the command line func (f *FlagSet) Count(name string, usage string) *int { p := new(int) f.CountVarP(p, name, "", usage) diff --git a/vendor/github.com/spf13/pflag/duration_slice.go b/vendor/github.com/spf13/pflag/duration_slice.go index 52c6b6dc1041f..badadda53fdbc 100644 --- a/vendor/github.com/spf13/pflag/duration_slice.go +++ b/vendor/github.com/spf13/pflag/duration_slice.go @@ -51,6 +51,44 @@ func (s *durationSliceValue) String() string { return "[" + strings.Join(out, ",") + "]" } +func (s *durationSliceValue) fromString(val string) (time.Duration, error) { + return time.ParseDuration(val) +} + +func (s *durationSliceValue) toString(val time.Duration) string { + return fmt.Sprintf("%s", val) +} + +func (s *durationSliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *durationSliceValue) Replace(val []string) error { + out := make([]time.Duration, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *durationSliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + func durationSliceConv(val string) (interface{}, error) { val = strings.Trim(val, "[]") // Empty string would cause a slice with one (empty) entry diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go index 9beeda8ecca93..24a5036e95b61 100644 --- a/vendor/github.com/spf13/pflag/flag.go +++ b/vendor/github.com/spf13/pflag/flag.go @@ -57,9 +57,9 @@ that give one-letter shorthands for flags. You can use these by appending var ip = flag.IntP("flagname", "f", 1234, "help message") var flagvar bool func init() { - flag.BoolVarP("boolname", "b", true, "help message") + flag.BoolVarP(&flagvar, "boolname", "b", true, "help message") } - flag.VarP(&flagVar, "varname", "v", 1234, "help message") + flag.VarP(&flagval, "varname", "v", "help message") Shorthand letters can be used with single dashes on the command line. Boolean shorthand flags can be combined with other shorthand flags. @@ -190,6 +190,18 @@ type Value interface { Type() string } +// SliceValue is a secondary interface to all flags which hold a list +// of values. This allows full control over the value of list flags, +// and avoids complicated marshalling and unmarshalling to csv. +type SliceValue interface { + // Append adds the specified value to the end of the flag value list. + Append(string) error + // Replace will fully overwrite any data currently in the flag value list. + Replace([]string) error + // GetSlice returns the flag value list as an array of strings. + GetSlice() []string +} + // sortFlags returns the flags as a slice in lexicographical sorted order. func sortFlags(flags map[NormalizedName]*Flag) []*Flag { list := make(sort.StringSlice, len(flags)) diff --git a/vendor/github.com/spf13/pflag/float32_slice.go b/vendor/github.com/spf13/pflag/float32_slice.go new file mode 100644 index 0000000000000..caa352741a603 --- /dev/null +++ b/vendor/github.com/spf13/pflag/float32_slice.go @@ -0,0 +1,174 @@ +package pflag + +import ( + "fmt" + "strconv" + "strings" +) + +// -- float32Slice Value +type float32SliceValue struct { + value *[]float32 + changed bool +} + +func newFloat32SliceValue(val []float32, p *[]float32) *float32SliceValue { + isv := new(float32SliceValue) + isv.value = p + *isv.value = val + return isv +} + +func (s *float32SliceValue) Set(val string) error { + ss := strings.Split(val, ",") + out := make([]float32, len(ss)) + for i, d := range ss { + var err error + var temp64 float64 + temp64, err = strconv.ParseFloat(d, 32) + if err != nil { + return err + } + out[i] = float32(temp64) + + } + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + s.changed = true + return nil +} + +func (s *float32SliceValue) Type() string { + return "float32Slice" +} + +func (s *float32SliceValue) String() string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = fmt.Sprintf("%f", d) + } + return "[" + strings.Join(out, ",") + "]" +} + +func (s *float32SliceValue) fromString(val string) (float32, error) { + t64, err := strconv.ParseFloat(val, 32) + if err != nil { + return 0, err + } + return float32(t64), nil +} + +func (s *float32SliceValue) toString(val float32) string { + return fmt.Sprintf("%f", val) +} + +func (s *float32SliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *float32SliceValue) Replace(val []string) error { + out := make([]float32, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *float32SliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + +func float32SliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Empty string would cause a slice with one (empty) entry + if len(val) == 0 { + return []float32{}, nil + } + ss := strings.Split(val, ",") + out := make([]float32, len(ss)) + for i, d := range ss { + var err error + var temp64 float64 + temp64, err = strconv.ParseFloat(d, 32) + if err != nil { + return nil, err + } + out[i] = float32(temp64) + + } + return out, nil +} + +// GetFloat32Slice return the []float32 value of a flag with the given name +func (f *FlagSet) GetFloat32Slice(name string) ([]float32, error) { + val, err := f.getFlagType(name, "float32Slice", float32SliceConv) + if err != nil { + return []float32{}, err + } + return val.([]float32), nil +} + +// Float32SliceVar defines a float32Slice flag with specified name, default value, and usage string. +// The argument p points to a []float32 variable in which to store the value of the flag. +func (f *FlagSet) Float32SliceVar(p *[]float32, name string, value []float32, usage string) { + f.VarP(newFloat32SliceValue(value, p), name, "", usage) +} + +// Float32SliceVarP is like Float32SliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float32SliceVarP(p *[]float32, name, shorthand string, value []float32, usage string) { + f.VarP(newFloat32SliceValue(value, p), name, shorthand, usage) +} + +// Float32SliceVar defines a float32[] flag with specified name, default value, and usage string. +// The argument p points to a float32[] variable in which to store the value of the flag. +func Float32SliceVar(p *[]float32, name string, value []float32, usage string) { + CommandLine.VarP(newFloat32SliceValue(value, p), name, "", usage) +} + +// Float32SliceVarP is like Float32SliceVar, but accepts a shorthand letter that can be used after a single dash. +func Float32SliceVarP(p *[]float32, name, shorthand string, value []float32, usage string) { + CommandLine.VarP(newFloat32SliceValue(value, p), name, shorthand, usage) +} + +// Float32Slice defines a []float32 flag with specified name, default value, and usage string. +// The return value is the address of a []float32 variable that stores the value of the flag. +func (f *FlagSet) Float32Slice(name string, value []float32, usage string) *[]float32 { + p := []float32{} + f.Float32SliceVarP(&p, name, "", value, usage) + return &p +} + +// Float32SliceP is like Float32Slice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float32SliceP(name, shorthand string, value []float32, usage string) *[]float32 { + p := []float32{} + f.Float32SliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// Float32Slice defines a []float32 flag with specified name, default value, and usage string. +// The return value is the address of a []float32 variable that stores the value of the flag. +func Float32Slice(name string, value []float32, usage string) *[]float32 { + return CommandLine.Float32SliceP(name, "", value, usage) +} + +// Float32SliceP is like Float32Slice, but accepts a shorthand letter that can be used after a single dash. +func Float32SliceP(name, shorthand string, value []float32, usage string) *[]float32 { + return CommandLine.Float32SliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/float64_slice.go b/vendor/github.com/spf13/pflag/float64_slice.go new file mode 100644 index 0000000000000..85bf3073d5064 --- /dev/null +++ b/vendor/github.com/spf13/pflag/float64_slice.go @@ -0,0 +1,166 @@ +package pflag + +import ( + "fmt" + "strconv" + "strings" +) + +// -- float64Slice Value +type float64SliceValue struct { + value *[]float64 + changed bool +} + +func newFloat64SliceValue(val []float64, p *[]float64) *float64SliceValue { + isv := new(float64SliceValue) + isv.value = p + *isv.value = val + return isv +} + +func (s *float64SliceValue) Set(val string) error { + ss := strings.Split(val, ",") + out := make([]float64, len(ss)) + for i, d := range ss { + var err error + out[i], err = strconv.ParseFloat(d, 64) + if err != nil { + return err + } + + } + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + s.changed = true + return nil +} + +func (s *float64SliceValue) Type() string { + return "float64Slice" +} + +func (s *float64SliceValue) String() string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = fmt.Sprintf("%f", d) + } + return "[" + strings.Join(out, ",") + "]" +} + +func (s *float64SliceValue) fromString(val string) (float64, error) { + return strconv.ParseFloat(val, 64) +} + +func (s *float64SliceValue) toString(val float64) string { + return fmt.Sprintf("%f", val) +} + +func (s *float64SliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *float64SliceValue) Replace(val []string) error { + out := make([]float64, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *float64SliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + +func float64SliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Empty string would cause a slice with one (empty) entry + if len(val) == 0 { + return []float64{}, nil + } + ss := strings.Split(val, ",") + out := make([]float64, len(ss)) + for i, d := range ss { + var err error + out[i], err = strconv.ParseFloat(d, 64) + if err != nil { + return nil, err + } + + } + return out, nil +} + +// GetFloat64Slice return the []float64 value of a flag with the given name +func (f *FlagSet) GetFloat64Slice(name string) ([]float64, error) { + val, err := f.getFlagType(name, "float64Slice", float64SliceConv) + if err != nil { + return []float64{}, err + } + return val.([]float64), nil +} + +// Float64SliceVar defines a float64Slice flag with specified name, default value, and usage string. +// The argument p points to a []float64 variable in which to store the value of the flag. +func (f *FlagSet) Float64SliceVar(p *[]float64, name string, value []float64, usage string) { + f.VarP(newFloat64SliceValue(value, p), name, "", usage) +} + +// Float64SliceVarP is like Float64SliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float64SliceVarP(p *[]float64, name, shorthand string, value []float64, usage string) { + f.VarP(newFloat64SliceValue(value, p), name, shorthand, usage) +} + +// Float64SliceVar defines a float64[] flag with specified name, default value, and usage string. +// The argument p points to a float64[] variable in which to store the value of the flag. +func Float64SliceVar(p *[]float64, name string, value []float64, usage string) { + CommandLine.VarP(newFloat64SliceValue(value, p), name, "", usage) +} + +// Float64SliceVarP is like Float64SliceVar, but accepts a shorthand letter that can be used after a single dash. +func Float64SliceVarP(p *[]float64, name, shorthand string, value []float64, usage string) { + CommandLine.VarP(newFloat64SliceValue(value, p), name, shorthand, usage) +} + +// Float64Slice defines a []float64 flag with specified name, default value, and usage string. +// The return value is the address of a []float64 variable that stores the value of the flag. +func (f *FlagSet) Float64Slice(name string, value []float64, usage string) *[]float64 { + p := []float64{} + f.Float64SliceVarP(&p, name, "", value, usage) + return &p +} + +// Float64SliceP is like Float64Slice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float64SliceP(name, shorthand string, value []float64, usage string) *[]float64 { + p := []float64{} + f.Float64SliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// Float64Slice defines a []float64 flag with specified name, default value, and usage string. +// The return value is the address of a []float64 variable that stores the value of the flag. +func Float64Slice(name string, value []float64, usage string) *[]float64 { + return CommandLine.Float64SliceP(name, "", value, usage) +} + +// Float64SliceP is like Float64Slice, but accepts a shorthand letter that can be used after a single dash. +func Float64SliceP(name, shorthand string, value []float64, usage string) *[]float64 { + return CommandLine.Float64SliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/go.mod b/vendor/github.com/spf13/pflag/go.mod new file mode 100644 index 0000000000000..b2287eec13482 --- /dev/null +++ b/vendor/github.com/spf13/pflag/go.mod @@ -0,0 +1,3 @@ +module github.com/spf13/pflag + +go 1.12 diff --git a/vendor/github.com/spf13/pflag/go.sum b/vendor/github.com/spf13/pflag/go.sum new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/vendor/github.com/spf13/pflag/int32_slice.go b/vendor/github.com/spf13/pflag/int32_slice.go new file mode 100644 index 0000000000000..ff128ff06d869 --- /dev/null +++ b/vendor/github.com/spf13/pflag/int32_slice.go @@ -0,0 +1,174 @@ +package pflag + +import ( + "fmt" + "strconv" + "strings" +) + +// -- int32Slice Value +type int32SliceValue struct { + value *[]int32 + changed bool +} + +func newInt32SliceValue(val []int32, p *[]int32) *int32SliceValue { + isv := new(int32SliceValue) + isv.value = p + *isv.value = val + return isv +} + +func (s *int32SliceValue) Set(val string) error { + ss := strings.Split(val, ",") + out := make([]int32, len(ss)) + for i, d := range ss { + var err error + var temp64 int64 + temp64, err = strconv.ParseInt(d, 0, 32) + if err != nil { + return err + } + out[i] = int32(temp64) + + } + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + s.changed = true + return nil +} + +func (s *int32SliceValue) Type() string { + return "int32Slice" +} + +func (s *int32SliceValue) String() string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = fmt.Sprintf("%d", d) + } + return "[" + strings.Join(out, ",") + "]" +} + +func (s *int32SliceValue) fromString(val string) (int32, error) { + t64, err := strconv.ParseInt(val, 0, 32) + if err != nil { + return 0, err + } + return int32(t64), nil +} + +func (s *int32SliceValue) toString(val int32) string { + return fmt.Sprintf("%d", val) +} + +func (s *int32SliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *int32SliceValue) Replace(val []string) error { + out := make([]int32, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *int32SliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + +func int32SliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Empty string would cause a slice with one (empty) entry + if len(val) == 0 { + return []int32{}, nil + } + ss := strings.Split(val, ",") + out := make([]int32, len(ss)) + for i, d := range ss { + var err error + var temp64 int64 + temp64, err = strconv.ParseInt(d, 0, 32) + if err != nil { + return nil, err + } + out[i] = int32(temp64) + + } + return out, nil +} + +// GetInt32Slice return the []int32 value of a flag with the given name +func (f *FlagSet) GetInt32Slice(name string) ([]int32, error) { + val, err := f.getFlagType(name, "int32Slice", int32SliceConv) + if err != nil { + return []int32{}, err + } + return val.([]int32), nil +} + +// Int32SliceVar defines a int32Slice flag with specified name, default value, and usage string. +// The argument p points to a []int32 variable in which to store the value of the flag. +func (f *FlagSet) Int32SliceVar(p *[]int32, name string, value []int32, usage string) { + f.VarP(newInt32SliceValue(value, p), name, "", usage) +} + +// Int32SliceVarP is like Int32SliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int32SliceVarP(p *[]int32, name, shorthand string, value []int32, usage string) { + f.VarP(newInt32SliceValue(value, p), name, shorthand, usage) +} + +// Int32SliceVar defines a int32[] flag with specified name, default value, and usage string. +// The argument p points to a int32[] variable in which to store the value of the flag. +func Int32SliceVar(p *[]int32, name string, value []int32, usage string) { + CommandLine.VarP(newInt32SliceValue(value, p), name, "", usage) +} + +// Int32SliceVarP is like Int32SliceVar, but accepts a shorthand letter that can be used after a single dash. +func Int32SliceVarP(p *[]int32, name, shorthand string, value []int32, usage string) { + CommandLine.VarP(newInt32SliceValue(value, p), name, shorthand, usage) +} + +// Int32Slice defines a []int32 flag with specified name, default value, and usage string. +// The return value is the address of a []int32 variable that stores the value of the flag. +func (f *FlagSet) Int32Slice(name string, value []int32, usage string) *[]int32 { + p := []int32{} + f.Int32SliceVarP(&p, name, "", value, usage) + return &p +} + +// Int32SliceP is like Int32Slice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int32SliceP(name, shorthand string, value []int32, usage string) *[]int32 { + p := []int32{} + f.Int32SliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// Int32Slice defines a []int32 flag with specified name, default value, and usage string. +// The return value is the address of a []int32 variable that stores the value of the flag. +func Int32Slice(name string, value []int32, usage string) *[]int32 { + return CommandLine.Int32SliceP(name, "", value, usage) +} + +// Int32SliceP is like Int32Slice, but accepts a shorthand letter that can be used after a single dash. +func Int32SliceP(name, shorthand string, value []int32, usage string) *[]int32 { + return CommandLine.Int32SliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/int64_slice.go b/vendor/github.com/spf13/pflag/int64_slice.go new file mode 100644 index 0000000000000..25464638f3ab6 --- /dev/null +++ b/vendor/github.com/spf13/pflag/int64_slice.go @@ -0,0 +1,166 @@ +package pflag + +import ( + "fmt" + "strconv" + "strings" +) + +// -- int64Slice Value +type int64SliceValue struct { + value *[]int64 + changed bool +} + +func newInt64SliceValue(val []int64, p *[]int64) *int64SliceValue { + isv := new(int64SliceValue) + isv.value = p + *isv.value = val + return isv +} + +func (s *int64SliceValue) Set(val string) error { + ss := strings.Split(val, ",") + out := make([]int64, len(ss)) + for i, d := range ss { + var err error + out[i], err = strconv.ParseInt(d, 0, 64) + if err != nil { + return err + } + + } + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + s.changed = true + return nil +} + +func (s *int64SliceValue) Type() string { + return "int64Slice" +} + +func (s *int64SliceValue) String() string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = fmt.Sprintf("%d", d) + } + return "[" + strings.Join(out, ",") + "]" +} + +func (s *int64SliceValue) fromString(val string) (int64, error) { + return strconv.ParseInt(val, 0, 64) +} + +func (s *int64SliceValue) toString(val int64) string { + return fmt.Sprintf("%d", val) +} + +func (s *int64SliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *int64SliceValue) Replace(val []string) error { + out := make([]int64, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *int64SliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + +func int64SliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Empty string would cause a slice with one (empty) entry + if len(val) == 0 { + return []int64{}, nil + } + ss := strings.Split(val, ",") + out := make([]int64, len(ss)) + for i, d := range ss { + var err error + out[i], err = strconv.ParseInt(d, 0, 64) + if err != nil { + return nil, err + } + + } + return out, nil +} + +// GetInt64Slice return the []int64 value of a flag with the given name +func (f *FlagSet) GetInt64Slice(name string) ([]int64, error) { + val, err := f.getFlagType(name, "int64Slice", int64SliceConv) + if err != nil { + return []int64{}, err + } + return val.([]int64), nil +} + +// Int64SliceVar defines a int64Slice flag with specified name, default value, and usage string. +// The argument p points to a []int64 variable in which to store the value of the flag. +func (f *FlagSet) Int64SliceVar(p *[]int64, name string, value []int64, usage string) { + f.VarP(newInt64SliceValue(value, p), name, "", usage) +} + +// Int64SliceVarP is like Int64SliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int64SliceVarP(p *[]int64, name, shorthand string, value []int64, usage string) { + f.VarP(newInt64SliceValue(value, p), name, shorthand, usage) +} + +// Int64SliceVar defines a int64[] flag with specified name, default value, and usage string. +// The argument p points to a int64[] variable in which to store the value of the flag. +func Int64SliceVar(p *[]int64, name string, value []int64, usage string) { + CommandLine.VarP(newInt64SliceValue(value, p), name, "", usage) +} + +// Int64SliceVarP is like Int64SliceVar, but accepts a shorthand letter that can be used after a single dash. +func Int64SliceVarP(p *[]int64, name, shorthand string, value []int64, usage string) { + CommandLine.VarP(newInt64SliceValue(value, p), name, shorthand, usage) +} + +// Int64Slice defines a []int64 flag with specified name, default value, and usage string. +// The return value is the address of a []int64 variable that stores the value of the flag. +func (f *FlagSet) Int64Slice(name string, value []int64, usage string) *[]int64 { + p := []int64{} + f.Int64SliceVarP(&p, name, "", value, usage) + return &p +} + +// Int64SliceP is like Int64Slice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int64SliceP(name, shorthand string, value []int64, usage string) *[]int64 { + p := []int64{} + f.Int64SliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// Int64Slice defines a []int64 flag with specified name, default value, and usage string. +// The return value is the address of a []int64 variable that stores the value of the flag. +func Int64Slice(name string, value []int64, usage string) *[]int64 { + return CommandLine.Int64SliceP(name, "", value, usage) +} + +// Int64SliceP is like Int64Slice, but accepts a shorthand letter that can be used after a single dash. +func Int64SliceP(name, shorthand string, value []int64, usage string) *[]int64 { + return CommandLine.Int64SliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/int_slice.go b/vendor/github.com/spf13/pflag/int_slice.go index 1e7c9edde955c..e71c39d91aa71 100644 --- a/vendor/github.com/spf13/pflag/int_slice.go +++ b/vendor/github.com/spf13/pflag/int_slice.go @@ -51,6 +51,36 @@ func (s *intSliceValue) String() string { return "[" + strings.Join(out, ",") + "]" } +func (s *intSliceValue) Append(val string) error { + i, err := strconv.Atoi(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *intSliceValue) Replace(val []string) error { + out := make([]int, len(val)) + for i, d := range val { + var err error + out[i], err = strconv.Atoi(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *intSliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = strconv.Itoa(d) + } + return out +} + func intSliceConv(val string) (interface{}, error) { val = strings.Trim(val, "[]") // Empty string would cause a slice with one (empty) entry diff --git a/vendor/github.com/spf13/pflag/ip_slice.go b/vendor/github.com/spf13/pflag/ip_slice.go index 7dd196fe3fb11..775faae4fd8d2 100644 --- a/vendor/github.com/spf13/pflag/ip_slice.go +++ b/vendor/github.com/spf13/pflag/ip_slice.go @@ -72,9 +72,47 @@ func (s *ipSliceValue) String() string { return "[" + out + "]" } +func (s *ipSliceValue) fromString(val string) (net.IP, error) { + return net.ParseIP(strings.TrimSpace(val)), nil +} + +func (s *ipSliceValue) toString(val net.IP) string { + return val.String() +} + +func (s *ipSliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *ipSliceValue) Replace(val []string) error { + out := make([]net.IP, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *ipSliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + func ipSliceConv(val string) (interface{}, error) { val = strings.Trim(val, "[]") - // Emtpy string would cause a slice with one (empty) entry + // Empty string would cause a slice with one (empty) entry if len(val) == 0 { return []net.IP{}, nil } diff --git a/vendor/github.com/spf13/pflag/string_array.go b/vendor/github.com/spf13/pflag/string_array.go index fa7bc60187a79..4894af818023b 100644 --- a/vendor/github.com/spf13/pflag/string_array.go +++ b/vendor/github.com/spf13/pflag/string_array.go @@ -23,6 +23,32 @@ func (s *stringArrayValue) Set(val string) error { return nil } +func (s *stringArrayValue) Append(val string) error { + *s.value = append(*s.value, val) + return nil +} + +func (s *stringArrayValue) Replace(val []string) error { + out := make([]string, len(val)) + for i, d := range val { + var err error + out[i] = d + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *stringArrayValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = d + } + return out +} + func (s *stringArrayValue) Type() string { return "stringArray" } diff --git a/vendor/github.com/spf13/pflag/string_slice.go b/vendor/github.com/spf13/pflag/string_slice.go index 0cd3ccc083e28..3cb2e69dba026 100644 --- a/vendor/github.com/spf13/pflag/string_slice.go +++ b/vendor/github.com/spf13/pflag/string_slice.go @@ -62,6 +62,20 @@ func (s *stringSliceValue) String() string { return "[" + str + "]" } +func (s *stringSliceValue) Append(val string) error { + *s.value = append(*s.value, val) + return nil +} + +func (s *stringSliceValue) Replace(val []string) error { + *s.value = val + return nil +} + +func (s *stringSliceValue) GetSlice() []string { + return *s.value +} + func stringSliceConv(sval string) (interface{}, error) { sval = sval[1 : len(sval)-1] // An empty string would cause a slice with one (empty) string @@ -84,7 +98,7 @@ func (f *FlagSet) GetStringSlice(name string) ([]string, error) { // The argument p points to a []string variable in which to store the value of the flag. // Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. // For example: -// --ss="v1,v2" -ss="v3" +// --ss="v1,v2" --ss="v3" // will result in // []string{"v1", "v2", "v3"} func (f *FlagSet) StringSliceVar(p *[]string, name string, value []string, usage string) { @@ -100,7 +114,7 @@ func (f *FlagSet) StringSliceVarP(p *[]string, name, shorthand string, value []s // The argument p points to a []string variable in which to store the value of the flag. // Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. // For example: -// --ss="v1,v2" -ss="v3" +// --ss="v1,v2" --ss="v3" // will result in // []string{"v1", "v2", "v3"} func StringSliceVar(p *[]string, name string, value []string, usage string) { @@ -116,7 +130,7 @@ func StringSliceVarP(p *[]string, name, shorthand string, value []string, usage // The return value is the address of a []string variable that stores the value of the flag. // Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. // For example: -// --ss="v1,v2" -ss="v3" +// --ss="v1,v2" --ss="v3" // will result in // []string{"v1", "v2", "v3"} func (f *FlagSet) StringSlice(name string, value []string, usage string) *[]string { @@ -136,7 +150,7 @@ func (f *FlagSet) StringSliceP(name, shorthand string, value []string, usage str // The return value is the address of a []string variable that stores the value of the flag. // Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. // For example: -// --ss="v1,v2" -ss="v3" +// --ss="v1,v2" --ss="v3" // will result in // []string{"v1", "v2", "v3"} func StringSlice(name string, value []string, usage string) *[]string { diff --git a/vendor/github.com/spf13/pflag/string_to_int64.go b/vendor/github.com/spf13/pflag/string_to_int64.go new file mode 100644 index 0000000000000..a807a04a0bad8 --- /dev/null +++ b/vendor/github.com/spf13/pflag/string_to_int64.go @@ -0,0 +1,149 @@ +package pflag + +import ( + "bytes" + "fmt" + "strconv" + "strings" +) + +// -- stringToInt64 Value +type stringToInt64Value struct { + value *map[string]int64 + changed bool +} + +func newStringToInt64Value(val map[string]int64, p *map[string]int64) *stringToInt64Value { + ssv := new(stringToInt64Value) + ssv.value = p + *ssv.value = val + return ssv +} + +// Format: a=1,b=2 +func (s *stringToInt64Value) Set(val string) error { + ss := strings.Split(val, ",") + out := make(map[string]int64, len(ss)) + for _, pair := range ss { + kv := strings.SplitN(pair, "=", 2) + if len(kv) != 2 { + return fmt.Errorf("%s must be formatted as key=value", pair) + } + var err error + out[kv[0]], err = strconv.ParseInt(kv[1], 10, 64) + if err != nil { + return err + } + } + if !s.changed { + *s.value = out + } else { + for k, v := range out { + (*s.value)[k] = v + } + } + s.changed = true + return nil +} + +func (s *stringToInt64Value) Type() string { + return "stringToInt64" +} + +func (s *stringToInt64Value) String() string { + var buf bytes.Buffer + i := 0 + for k, v := range *s.value { + if i > 0 { + buf.WriteRune(',') + } + buf.WriteString(k) + buf.WriteRune('=') + buf.WriteString(strconv.FormatInt(v, 10)) + i++ + } + return "[" + buf.String() + "]" +} + +func stringToInt64Conv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // An empty string would cause an empty map + if len(val) == 0 { + return map[string]int64{}, nil + } + ss := strings.Split(val, ",") + out := make(map[string]int64, len(ss)) + for _, pair := range ss { + kv := strings.SplitN(pair, "=", 2) + if len(kv) != 2 { + return nil, fmt.Errorf("%s must be formatted as key=value", pair) + } + var err error + out[kv[0]], err = strconv.ParseInt(kv[1], 10, 64) + if err != nil { + return nil, err + } + } + return out, nil +} + +// GetStringToInt64 return the map[string]int64 value of a flag with the given name +func (f *FlagSet) GetStringToInt64(name string) (map[string]int64, error) { + val, err := f.getFlagType(name, "stringToInt64", stringToInt64Conv) + if err != nil { + return map[string]int64{}, err + } + return val.(map[string]int64), nil +} + +// StringToInt64Var defines a string flag with specified name, default value, and usage string. +// The argument p point64s to a map[string]int64 variable in which to store the values of the multiple flags. +// The value of each argument will not try to be separated by comma +func (f *FlagSet) StringToInt64Var(p *map[string]int64, name string, value map[string]int64, usage string) { + f.VarP(newStringToInt64Value(value, p), name, "", usage) +} + +// StringToInt64VarP is like StringToInt64Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringToInt64VarP(p *map[string]int64, name, shorthand string, value map[string]int64, usage string) { + f.VarP(newStringToInt64Value(value, p), name, shorthand, usage) +} + +// StringToInt64Var defines a string flag with specified name, default value, and usage string. +// The argument p point64s to a map[string]int64 variable in which to store the value of the flag. +// The value of each argument will not try to be separated by comma +func StringToInt64Var(p *map[string]int64, name string, value map[string]int64, usage string) { + CommandLine.VarP(newStringToInt64Value(value, p), name, "", usage) +} + +// StringToInt64VarP is like StringToInt64Var, but accepts a shorthand letter that can be used after a single dash. +func StringToInt64VarP(p *map[string]int64, name, shorthand string, value map[string]int64, usage string) { + CommandLine.VarP(newStringToInt64Value(value, p), name, shorthand, usage) +} + +// StringToInt64 defines a string flag with specified name, default value, and usage string. +// The return value is the address of a map[string]int64 variable that stores the value of the flag. +// The value of each argument will not try to be separated by comma +func (f *FlagSet) StringToInt64(name string, value map[string]int64, usage string) *map[string]int64 { + p := map[string]int64{} + f.StringToInt64VarP(&p, name, "", value, usage) + return &p +} + +// StringToInt64P is like StringToInt64, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringToInt64P(name, shorthand string, value map[string]int64, usage string) *map[string]int64 { + p := map[string]int64{} + f.StringToInt64VarP(&p, name, shorthand, value, usage) + return &p +} + +// StringToInt64 defines a string flag with specified name, default value, and usage string. +// The return value is the address of a map[string]int64 variable that stores the value of the flag. +// The value of each argument will not try to be separated by comma +func StringToInt64(name string, value map[string]int64, usage string) *map[string]int64 { + return CommandLine.StringToInt64P(name, "", value, usage) +} + +// StringToInt64P is like StringToInt64, but accepts a shorthand letter that can be used after a single dash. +func StringToInt64P(name, shorthand string, value map[string]int64, usage string) *map[string]int64 { + return CommandLine.StringToInt64P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/uint_slice.go b/vendor/github.com/spf13/pflag/uint_slice.go index edd94c600af09..5fa924835ed3f 100644 --- a/vendor/github.com/spf13/pflag/uint_slice.go +++ b/vendor/github.com/spf13/pflag/uint_slice.go @@ -50,6 +50,48 @@ func (s *uintSliceValue) String() string { return "[" + strings.Join(out, ",") + "]" } +func (s *uintSliceValue) fromString(val string) (uint, error) { + t, err := strconv.ParseUint(val, 10, 0) + if err != nil { + return 0, err + } + return uint(t), nil +} + +func (s *uintSliceValue) toString(val uint) string { + return fmt.Sprintf("%d", val) +} + +func (s *uintSliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *uintSliceValue) Replace(val []string) error { + out := make([]uint, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *uintSliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + func uintSliceConv(val string) (interface{}, error) { val = strings.Trim(val, "[]") // Empty string would cause a slice with one (empty) entry diff --git a/vendor/go.opencensus.io/.gitignore b/vendor/go.opencensus.io/.gitignore new file mode 100644 index 0000000000000..74a6db472e56a --- /dev/null +++ b/vendor/go.opencensus.io/.gitignore @@ -0,0 +1,9 @@ +/.idea/ + +# go.opencensus.io/exporter/aws +/exporter/aws/ + +# Exclude vendor, use dep ensure after checkout: +/vendor/github.com/ +/vendor/golang.org/ +/vendor/google.golang.org/ diff --git a/vendor/go.opencensus.io/.travis.yml b/vendor/go.opencensus.io/.travis.yml new file mode 100644 index 0000000000000..bd6b66ee83d34 --- /dev/null +++ b/vendor/go.opencensus.io/.travis.yml @@ -0,0 +1,17 @@ +language: go + +go_import_path: go.opencensus.io + +go: + - 1.11.x + +env: + global: + GO111MODULE=on + +before_script: + - make install-tools + +script: + - make travis-ci + - go run internal/check/version.go # TODO move this to makefile diff --git a/vendor/go.opencensus.io/AUTHORS b/vendor/go.opencensus.io/AUTHORS new file mode 100644 index 0000000000000..e491a9e7f7831 --- /dev/null +++ b/vendor/go.opencensus.io/AUTHORS @@ -0,0 +1 @@ +Google Inc. diff --git a/vendor/go.opencensus.io/BUILD.bazel b/vendor/go.opencensus.io/BUILD.bazel new file mode 100644 index 0000000000000..ce4964ce587ea --- /dev/null +++ b/vendor/go.opencensus.io/BUILD.bazel @@ -0,0 +1,9 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["opencensus.go"], + importmap = "k8s.io/kops/vendor/go.opencensus.io", + importpath = "go.opencensus.io", + visibility = ["//visibility:public"], +) diff --git a/vendor/go.opencensus.io/CONTRIBUTING.md b/vendor/go.opencensus.io/CONTRIBUTING.md new file mode 100644 index 0000000000000..1ba3962c8bfaf --- /dev/null +++ b/vendor/go.opencensus.io/CONTRIBUTING.md @@ -0,0 +1,63 @@ +# How to contribute + +We'd love to accept your patches and contributions to this project. There are +just a few small guidelines you need to follow. + +## Contributor License Agreement + +Contributions to this project must be accompanied by a Contributor License +Agreement. You (or your employer) retain the copyright to your contribution, +this simply gives us permission to use and redistribute your contributions as +part of the project. Head over to to see +your current agreements on file or to sign a new one. + +You generally only need to submit a CLA once, so if you've already submitted one +(even if it was for a different project), you probably don't need to do it +again. + +## Code reviews + +All submissions, including submissions by project members, require review. We +use GitHub pull requests for this purpose. Consult [GitHub Help] for more +information on using pull requests. + +[GitHub Help]: https://help.github.com/articles/about-pull-requests/ + +## Instructions + +Fork the repo, checkout the upstream repo to your GOPATH by: + +``` +$ go get -d go.opencensus.io +``` + +Add your fork as an origin: + +``` +cd $(go env GOPATH)/src/go.opencensus.io +git remote add fork git@github.com:YOUR_GITHUB_USERNAME/opencensus-go.git +``` + +Run tests: + +``` +$ make install-tools # Only first time. +$ make +``` + +Checkout a new branch, make modifications and push the branch to your fork: + +``` +$ git checkout -b feature +# edit files +$ git commit +$ git push fork feature +``` + +Open a pull request against the main opencensus-go repo. + +## General Notes +This project uses Appveyor and Travis for CI. + +The dependencies are managed with `go mod` if you work with the sources under your +`$GOPATH` you need to set the environment variable `GO111MODULE=on`. \ No newline at end of file diff --git a/vendor/go.opencensus.io/Gopkg.lock b/vendor/go.opencensus.io/Gopkg.lock new file mode 100644 index 0000000000000..3be12ac8f2478 --- /dev/null +++ b/vendor/go.opencensus.io/Gopkg.lock @@ -0,0 +1,231 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + branch = "master" + digest = "1:eee9386329f4fcdf8d6c0def0c9771b634bdd5ba460d888aa98c17d59b37a76c" + name = "git.apache.org/thrift.git" + packages = ["lib/go/thrift"] + pruneopts = "UT" + revision = "6e67faa92827ece022380b211c2caaadd6145bf5" + source = "github.com/apache/thrift" + +[[projects]] + branch = "master" + digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d" + name = "github.com/beorn7/perks" + packages = ["quantile"] + pruneopts = "UT" + revision = "3a771d992973f24aa725d07868b467d1ddfceafb" + +[[projects]] + digest = "1:4c0989ca0bcd10799064318923b9bc2db6b4d6338dd75f3f2d86c3511aaaf5cf" + name = "github.com/golang/protobuf" + packages = [ + "proto", + "ptypes", + "ptypes/any", + "ptypes/duration", + "ptypes/timestamp", + ] + pruneopts = "UT" + revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5" + version = "v1.2.0" + +[[projects]] + digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc" + name = "github.com/matttproud/golang_protobuf_extensions" + packages = ["pbutil"] + pruneopts = "UT" + revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" + version = "v1.0.1" + +[[projects]] + digest = "1:824c8f3aa4c5f23928fa84ebbd5ed2e9443b3f0cb958a40c1f2fbed5cf5e64b1" + name = "github.com/openzipkin/zipkin-go" + packages = [ + ".", + "idgenerator", + "model", + "propagation", + "reporter", + "reporter/http", + ] + pruneopts = "UT" + revision = "d455a5674050831c1e187644faa4046d653433c2" + version = "v0.1.1" + +[[projects]] + digest = "1:d14a5f4bfecf017cb780bdde1b6483e5deb87e12c332544d2c430eda58734bcb" + name = "github.com/prometheus/client_golang" + packages = [ + "prometheus", + "prometheus/promhttp", + ] + pruneopts = "UT" + revision = "c5b7fccd204277076155f10851dad72b76a49317" + version = "v0.8.0" + +[[projects]] + branch = "master" + digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4" + name = "github.com/prometheus/client_model" + packages = ["go"] + pruneopts = "UT" + revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f" + +[[projects]] + branch = "master" + digest = "1:63b68062b8968092eb86bedc4e68894bd096ea6b24920faca8b9dcf451f54bb5" + name = "github.com/prometheus/common" + packages = [ + "expfmt", + "internal/bitbucket.org/ww/goautoneg", + "model", + ] + pruneopts = "UT" + revision = "c7de2306084e37d54b8be01f3541a8464345e9a5" + +[[projects]] + branch = "master" + digest = "1:8c49953a1414305f2ff5465147ee576dd705487c35b15918fcd4efdc0cb7a290" + name = "github.com/prometheus/procfs" + packages = [ + ".", + "internal/util", + "nfs", + "xfs", + ] + pruneopts = "UT" + revision = "05ee40e3a273f7245e8777337fc7b46e533a9a92" + +[[projects]] + branch = "master" + digest = "1:deafe4ab271911fec7de5b693d7faae3f38796d9eb8622e2b9e7df42bb3dfea9" + name = "golang.org/x/net" + packages = [ + "context", + "http/httpguts", + "http2", + "http2/hpack", + "idna", + "internal/timeseries", + "trace", + ] + pruneopts = "UT" + revision = "922f4815f713f213882e8ef45e0d315b164d705c" + +[[projects]] + branch = "master" + digest = "1:e0140c0c868c6e0f01c0380865194592c011fe521d6e12d78bfd33e756fe018a" + name = "golang.org/x/sync" + packages = ["semaphore"] + pruneopts = "UT" + revision = "1d60e4601c6fd243af51cc01ddf169918a5407ca" + +[[projects]] + branch = "master" + digest = "1:a3f00ac457c955fe86a41e1495e8f4c54cb5399d609374c5cc26aa7d72e542c8" + name = "golang.org/x/sys" + packages = ["unix"] + pruneopts = "UT" + revision = "3b58ed4ad3395d483fc92d5d14123ce2c3581fec" + +[[projects]] + digest = "1:a2ab62866c75542dd18d2b069fec854577a20211d7c0ea6ae746072a1dccdd18" + name = "golang.org/x/text" + packages = [ + "collate", + "collate/build", + "internal/colltab", + "internal/gen", + "internal/tag", + "internal/triegen", + "internal/ucd", + "language", + "secure/bidirule", + "transform", + "unicode/bidi", + "unicode/cldr", + "unicode/norm", + "unicode/rangetable", + ] + pruneopts = "UT" + revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" + version = "v0.3.0" + +[[projects]] + branch = "master" + digest = "1:c0c17c94fe8bc1ab34e7f586a4a8b788c5e1f4f9f750ff23395b8b2f5a523530" + name = "google.golang.org/api" + packages = ["support/bundler"] + pruneopts = "UT" + revision = "e21acd801f91da814261b938941d193bb036441a" + +[[projects]] + branch = "master" + digest = "1:077c1c599507b3b3e9156d17d36e1e61928ee9b53a5b420f10f28ebd4a0b275c" + name = "google.golang.org/genproto" + packages = ["googleapis/rpc/status"] + pruneopts = "UT" + revision = "c66870c02cf823ceb633bcd05be3c7cda29976f4" + +[[projects]] + digest = "1:3dd7996ce6bf52dec6a2f69fa43e7c4cefea1d4dfa3c8ab7a5f8a9f7434e239d" + name = "google.golang.org/grpc" + packages = [ + ".", + "balancer", + "balancer/base", + "balancer/roundrobin", + "codes", + "connectivity", + "credentials", + "encoding", + "encoding/proto", + "grpclog", + "internal", + "internal/backoff", + "internal/channelz", + "internal/envconfig", + "internal/grpcrand", + "internal/transport", + "keepalive", + "metadata", + "naming", + "peer", + "resolver", + "resolver/dns", + "resolver/passthrough", + "stats", + "status", + "tap", + ] + pruneopts = "UT" + revision = "32fb0ac620c32ba40a4626ddf94d90d12cce3455" + version = "v1.14.0" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + input-imports = [ + "git.apache.org/thrift.git/lib/go/thrift", + "github.com/golang/protobuf/proto", + "github.com/openzipkin/zipkin-go", + "github.com/openzipkin/zipkin-go/model", + "github.com/openzipkin/zipkin-go/reporter", + "github.com/openzipkin/zipkin-go/reporter/http", + "github.com/prometheus/client_golang/prometheus", + "github.com/prometheus/client_golang/prometheus/promhttp", + "golang.org/x/net/context", + "golang.org/x/net/http2", + "google.golang.org/api/support/bundler", + "google.golang.org/grpc", + "google.golang.org/grpc/codes", + "google.golang.org/grpc/grpclog", + "google.golang.org/grpc/metadata", + "google.golang.org/grpc/stats", + "google.golang.org/grpc/status", + ] + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/go.opencensus.io/Gopkg.toml b/vendor/go.opencensus.io/Gopkg.toml new file mode 100644 index 0000000000000..a9f3cd68eb302 --- /dev/null +++ b/vendor/go.opencensus.io/Gopkg.toml @@ -0,0 +1,36 @@ +# For v0.x.y dependencies, prefer adding a constraints of the form: version=">= 0.x.y" +# to avoid locking to a particular minor version which can cause dep to not be +# able to find a satisfying dependency graph. + +[[constraint]] + branch = "master" + name = "git.apache.org/thrift.git" + source = "github.com/apache/thrift" + +[[constraint]] + name = "github.com/golang/protobuf" + version = "1.0.0" + +[[constraint]] + name = "github.com/openzipkin/zipkin-go" + version = ">=0.1.0" + +[[constraint]] + name = "github.com/prometheus/client_golang" + version = ">=0.8.0" + +[[constraint]] + branch = "master" + name = "golang.org/x/net" + +[[constraint]] + branch = "master" + name = "google.golang.org/api" + +[[constraint]] + name = "google.golang.org/grpc" + version = "1.11.3" + +[prune] + go-tests = true + unused-packages = true diff --git a/vendor/go.opencensus.io/LICENSE b/vendor/go.opencensus.io/LICENSE new file mode 100644 index 0000000000000..7a4a3ea2424c0 --- /dev/null +++ b/vendor/go.opencensus.io/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/go.opencensus.io/Makefile b/vendor/go.opencensus.io/Makefile new file mode 100644 index 0000000000000..457866cb1f2fd --- /dev/null +++ b/vendor/go.opencensus.io/Makefile @@ -0,0 +1,96 @@ +# TODO: Fix this on windows. +ALL_SRC := $(shell find . -name '*.go' \ + -not -path './vendor/*' \ + -not -path '*/gen-go/*' \ + -type f | sort) +ALL_PKGS := $(shell go list $(sort $(dir $(ALL_SRC)))) + +GOTEST_OPT?=-v -race -timeout 30s +GOTEST_OPT_WITH_COVERAGE = $(GOTEST_OPT) -coverprofile=coverage.txt -covermode=atomic +GOTEST=go test +GOFMT=gofmt +GOLINT=golint +GOVET=go vet +EMBEDMD=embedmd +# TODO decide if we need to change these names. +TRACE_ID_LINT_EXCEPTION="type name will be used as trace.TraceID by other packages" +TRACE_OPTION_LINT_EXCEPTION="type name will be used as trace.TraceOptions by other packages" +README_FILES := $(shell find . -name '*README.md' | sort | tr '\n' ' ') + +.DEFAULT_GOAL := fmt-lint-vet-embedmd-test + +.PHONY: fmt-lint-vet-embedmd-test +fmt-lint-vet-embedmd-test: fmt lint vet embedmd test + +# TODO enable test-with-coverage in tavis +.PHONY: travis-ci +travis-ci: fmt lint vet embedmd test test-386 + +all-pkgs: + @echo $(ALL_PKGS) | tr ' ' '\n' | sort + +all-srcs: + @echo $(ALL_SRC) | tr ' ' '\n' | sort + +.PHONY: test +test: + $(GOTEST) $(GOTEST_OPT) $(ALL_PKGS) + +.PHONY: test-386 +test-386: + GOARCH=386 $(GOTEST) -v -timeout 30s $(ALL_PKGS) + +.PHONY: test-with-coverage +test-with-coverage: + $(GOTEST) $(GOTEST_OPT_WITH_COVERAGE) $(ALL_PKGS) + +.PHONY: fmt +fmt: + @FMTOUT=`$(GOFMT) -s -l $(ALL_SRC) 2>&1`; \ + if [ "$$FMTOUT" ]; then \ + echo "$(GOFMT) FAILED => gofmt the following files:\n"; \ + echo "$$FMTOUT\n"; \ + exit 1; \ + else \ + echo "Fmt finished successfully"; \ + fi + +.PHONY: lint +lint: + @LINTOUT=`$(GOLINT) $(ALL_PKGS) | grep -v $(TRACE_ID_LINT_EXCEPTION) | grep -v $(TRACE_OPTION_LINT_EXCEPTION) 2>&1`; \ + if [ "$$LINTOUT" ]; then \ + echo "$(GOLINT) FAILED => clean the following lint errors:\n"; \ + echo "$$LINTOUT\n"; \ + exit 1; \ + else \ + echo "Lint finished successfully"; \ + fi + +.PHONY: vet +vet: + # TODO: Understand why go vet downloads "github.com/google/go-cmp v0.2.0" + @VETOUT=`$(GOVET) ./... | grep -v "go: downloading" 2>&1`; \ + if [ "$$VETOUT" ]; then \ + echo "$(GOVET) FAILED => go vet the following files:\n"; \ + echo "$$VETOUT\n"; \ + exit 1; \ + else \ + echo "Vet finished successfully"; \ + fi + +.PHONY: embedmd +embedmd: + @EMBEDMDOUT=`$(EMBEDMD) -d $(README_FILES) 2>&1`; \ + if [ "$$EMBEDMDOUT" ]; then \ + echo "$(EMBEDMD) FAILED => embedmd the following files:\n"; \ + echo "$$EMBEDMDOUT\n"; \ + exit 1; \ + else \ + echo "Embedmd finished successfully"; \ + fi + +.PHONY: install-tools +install-tools: + go get -u golang.org/x/tools/cmd/cover + go get -u golang.org/x/lint/golint + go get -u github.com/rakyll/embedmd diff --git a/vendor/go.opencensus.io/README.md b/vendor/go.opencensus.io/README.md new file mode 100644 index 0000000000000..3f40ed5cbbeb6 --- /dev/null +++ b/vendor/go.opencensus.io/README.md @@ -0,0 +1,263 @@ +# OpenCensus Libraries for Go + +[![Build Status][travis-image]][travis-url] +[![Windows Build Status][appveyor-image]][appveyor-url] +[![GoDoc][godoc-image]][godoc-url] +[![Gitter chat][gitter-image]][gitter-url] + +OpenCensus Go is a Go implementation of OpenCensus, a toolkit for +collecting application performance and behavior monitoring data. +Currently it consists of three major components: tags, stats and tracing. + +## Installation + +``` +$ go get -u go.opencensus.io +``` + +The API of this project is still evolving, see: [Deprecation Policy](#deprecation-policy). +The use of vendoring or a dependency management tool is recommended. + +## Prerequisites + +OpenCensus Go libraries require Go 1.8 or later. + +## Getting Started + +The easiest way to get started using OpenCensus in your application is to use an existing +integration with your RPC framework: + +* [net/http](https://godoc.org/go.opencensus.io/plugin/ochttp) +* [gRPC](https://godoc.org/go.opencensus.io/plugin/ocgrpc) +* [database/sql](https://godoc.org/github.com/opencensus-integrations/ocsql) +* [Go kit](https://godoc.org/github.com/go-kit/kit/tracing/opencensus) +* [Groupcache](https://godoc.org/github.com/orijtech/groupcache) +* [Caddy webserver](https://godoc.org/github.com/orijtech/caddy) +* [MongoDB](https://godoc.org/github.com/orijtech/mongo-go-driver) +* [Redis gomodule/redigo](https://godoc.org/github.com/orijtech/redigo) +* [Redis goredis/redis](https://godoc.org/github.com/orijtech/redis) +* [Memcache](https://godoc.org/github.com/orijtech/gomemcache) + +If you're using a framework not listed here, you could either implement your own middleware for your +framework or use [custom stats](#stats) and [spans](#spans) directly in your application. + +## Exporters + +OpenCensus can export instrumentation data to various backends. +OpenCensus has exporter implementations for the following, users +can implement their own exporters by implementing the exporter interfaces +([stats](https://godoc.org/go.opencensus.io/stats/view#Exporter), +[trace](https://godoc.org/go.opencensus.io/trace#Exporter)): + +* [Prometheus][exporter-prom] for stats +* [OpenZipkin][exporter-zipkin] for traces +* [Stackdriver][exporter-stackdriver] Monitoring for stats and Trace for traces +* [Jaeger][exporter-jaeger] for traces +* [AWS X-Ray][exporter-xray] for traces +* [Datadog][exporter-datadog] for stats and traces +* [Graphite][exporter-graphite] for stats +* [Honeycomb][exporter-honeycomb] for traces + +## Overview + +![OpenCensus Overview](https://i.imgur.com/cf4ElHE.jpg) + +In a microservices environment, a user request may go through +multiple services until there is a response. OpenCensus allows +you to instrument your services and collect diagnostics data all +through your services end-to-end. + +## Tags + +Tags represent propagated key-value pairs. They are propagated using `context.Context` +in the same process or can be encoded to be transmitted on the wire. Usually, this will +be handled by an integration plugin, e.g. `ocgrpc.ServerHandler` and `ocgrpc.ClientHandler` +for gRPC. + +Package `tag` allows adding or modifying tags in the current context. + +[embedmd]:# (internal/readme/tags.go new) +```go +ctx, err = tag.New(ctx, + tag.Insert(osKey, "macOS-10.12.5"), + tag.Upsert(userIDKey, "cde36753ed"), +) +if err != nil { + log.Fatal(err) +} +``` + +## Stats + +OpenCensus is a low-overhead framework even if instrumentation is always enabled. +In order to be so, it is optimized to make recording of data points fast +and separate from the data aggregation. + +OpenCensus stats collection happens in two stages: + +* Definition of measures and recording of data points +* Definition of views and aggregation of the recorded data + +### Recording + +Measurements are data points associated with a measure. +Recording implicitly tags the set of Measurements with the tags from the +provided context: + +[embedmd]:# (internal/readme/stats.go record) +```go +stats.Record(ctx, videoSize.M(102478)) +``` + +### Views + +Views are how Measures are aggregated. You can think of them as queries over the +set of recorded data points (measurements). + +Views have two parts: the tags to group by and the aggregation type used. + +Currently three types of aggregations are supported: +* CountAggregation is used to count the number of times a sample was recorded. +* DistributionAggregation is used to provide a histogram of the values of the samples. +* SumAggregation is used to sum up all sample values. + +[embedmd]:# (internal/readme/stats.go aggs) +```go +distAgg := view.Distribution(1<<32, 2<<32, 3<<32) +countAgg := view.Count() +sumAgg := view.Sum() +``` + +Here we create a view with the DistributionAggregation over our measure. + +[embedmd]:# (internal/readme/stats.go view) +```go +if err := view.Register(&view.View{ + Name: "example.com/video_size_distribution", + Description: "distribution of processed video size over time", + Measure: videoSize, + Aggregation: view.Distribution(1<<32, 2<<32, 3<<32), +}); err != nil { + log.Fatalf("Failed to register view: %v", err) +} +``` + +Register begins collecting data for the view. Registered views' data will be +exported via the registered exporters. + +## Traces + +A distributed trace tracks the progression of a single user request as +it is handled by the services and processes that make up an application. +Each step is called a span in the trace. Spans include metadata about the step, +including especially the time spent in the step, called the span’s latency. + +Below you see a trace and several spans underneath it. + +![Traces and spans](https://i.imgur.com/7hZwRVj.png) + +### Spans + +Span is the unit step in a trace. Each span has a name, latency, status and +additional metadata. + +Below we are starting a span for a cache read and ending it +when we are done: + +[embedmd]:# (internal/readme/trace.go startend) +```go +ctx, span := trace.StartSpan(ctx, "cache.Get") +defer span.End() + +// Do work to get from cache. +``` + +### Propagation + +Spans can have parents or can be root spans if they don't have any parents. +The current span is propagated in-process and across the network to allow associating +new child spans with the parent. + +In the same process, `context.Context` is used to propagate spans. +`trace.StartSpan` creates a new span as a root if the current context +doesn't contain a span. Or, it creates a child of the span that is +already in current context. The returned context can be used to keep +propagating the newly created span in the current context. + +[embedmd]:# (internal/readme/trace.go startend) +```go +ctx, span := trace.StartSpan(ctx, "cache.Get") +defer span.End() + +// Do work to get from cache. +``` + +Across the network, OpenCensus provides different propagation +methods for different protocols. + +* gRPC integrations use the OpenCensus' [binary propagation format](https://godoc.org/go.opencensus.io/trace/propagation). +* HTTP integrations use Zipkin's [B3](https://github.com/openzipkin/b3-propagation) + by default but can be configured to use a custom propagation method by setting another + [propagation.HTTPFormat](https://godoc.org/go.opencensus.io/trace/propagation#HTTPFormat). + +## Execution Tracer + +With Go 1.11, OpenCensus Go will support integration with the Go execution tracer. +See [Debugging Latency in Go](https://medium.com/observability/debugging-latency-in-go-1-11-9f97a7910d68) +for an example of their mutual use. + +## Profiles + +OpenCensus tags can be applied as profiler labels +for users who are on Go 1.9 and above. + +[embedmd]:# (internal/readme/tags.go profiler) +```go +ctx, err = tag.New(ctx, + tag.Insert(osKey, "macOS-10.12.5"), + tag.Insert(userIDKey, "fff0989878"), +) +if err != nil { + log.Fatal(err) +} +tag.Do(ctx, func(ctx context.Context) { + // Do work. + // When profiling is on, samples will be + // recorded with the key/values from the tag map. +}) +``` + +A screenshot of the CPU profile from the program above: + +![CPU profile](https://i.imgur.com/jBKjlkw.png) + +## Deprecation Policy + +Before version 1.0.0, the following deprecation policy will be observed: + +No backwards-incompatible changes will be made except for the removal of symbols that have +been marked as *Deprecated* for at least one minor release (e.g. 0.9.0 to 0.10.0). A release +removing the *Deprecated* functionality will be made no sooner than 28 days after the first +release in which the functionality was marked *Deprecated*. + +[travis-image]: https://travis-ci.org/census-instrumentation/opencensus-go.svg?branch=master +[travis-url]: https://travis-ci.org/census-instrumentation/opencensus-go +[appveyor-image]: https://ci.appveyor.com/api/projects/status/vgtt29ps1783ig38?svg=true +[appveyor-url]: https://ci.appveyor.com/project/opencensusgoteam/opencensus-go/branch/master +[godoc-image]: https://godoc.org/go.opencensus.io?status.svg +[godoc-url]: https://godoc.org/go.opencensus.io +[gitter-image]: https://badges.gitter.im/census-instrumentation/lobby.svg +[gitter-url]: https://gitter.im/census-instrumentation/lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge + + +[new-ex]: https://godoc.org/go.opencensus.io/tag#example-NewMap +[new-replace-ex]: https://godoc.org/go.opencensus.io/tag#example-NewMap--Replace + +[exporter-prom]: https://godoc.org/go.opencensus.io/exporter/prometheus +[exporter-stackdriver]: https://godoc.org/contrib.go.opencensus.io/exporter/stackdriver +[exporter-zipkin]: https://godoc.org/go.opencensus.io/exporter/zipkin +[exporter-jaeger]: https://godoc.org/go.opencensus.io/exporter/jaeger +[exporter-xray]: https://github.com/census-ecosystem/opencensus-go-exporter-aws +[exporter-datadog]: https://github.com/DataDog/opencensus-go-exporter-datadog +[exporter-graphite]: https://github.com/census-ecosystem/opencensus-go-exporter-graphite +[exporter-honeycomb]: https://github.com/honeycombio/opencensus-exporter diff --git a/vendor/go.opencensus.io/appveyor.yml b/vendor/go.opencensus.io/appveyor.yml new file mode 100644 index 0000000000000..12bd7c4c73d7f --- /dev/null +++ b/vendor/go.opencensus.io/appveyor.yml @@ -0,0 +1,25 @@ +version: "{build}" + +platform: x64 + +clone_folder: c:\gopath\src\go.opencensus.io + +environment: + GOPATH: 'c:\gopath' + GOVERSION: '1.11' + GO111MODULE: 'on' + CGO_ENABLED: '0' # See: https://github.com/appveyor/ci/issues/2613 + +install: + - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% + - choco upgrade golang --version 1.11.5 # Temporary fix because of a go.sum bug in 1.11 + - go version + - go env + +build: false +deploy: false + +test_script: + - cd %APPVEYOR_BUILD_FOLDER% + - go build -v .\... + - go test -v .\... # No -race because cgo is disabled diff --git a/vendor/go.opencensus.io/go.mod b/vendor/go.opencensus.io/go.mod new file mode 100644 index 0000000000000..8b7d38e91b701 --- /dev/null +++ b/vendor/go.opencensus.io/go.mod @@ -0,0 +1,10 @@ +module go.opencensus.io + +require ( + github.com/golang/protobuf v1.2.0 + github.com/google/go-cmp v0.2.0 + github.com/hashicorp/golang-lru v0.5.0 + golang.org/x/net v0.0.0-20190311183353-d8887717615a + google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 // indirect + google.golang.org/grpc v1.19.0 +) diff --git a/vendor/go.opencensus.io/go.sum b/vendor/go.opencensus.io/go.sum new file mode 100644 index 0000000000000..cbb37036d404f --- /dev/null +++ b/vendor/go.opencensus.io/go.sum @@ -0,0 +1,50 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= +github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 h1:Lj2SnHtxkRGJDqnGaSjo+CCdIieEnwVazbOXILwQemk= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= +google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/go.opencensus.io/internal/BUILD.bazel b/vendor/go.opencensus.io/internal/BUILD.bazel new file mode 100644 index 0000000000000..e416e8061ce46 --- /dev/null +++ b/vendor/go.opencensus.io/internal/BUILD.bazel @@ -0,0 +1,14 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "internal.go", + "sanitize.go", + "traceinternals.go", + ], + importmap = "k8s.io/kops/vendor/go.opencensus.io/internal", + importpath = "go.opencensus.io/internal", + visibility = ["//vendor/go.opencensus.io:__subpackages__"], + deps = ["//vendor/go.opencensus.io:go_default_library"], +) diff --git a/vendor/go.opencensus.io/internal/internal.go b/vendor/go.opencensus.io/internal/internal.go new file mode 100644 index 0000000000000..9a638781cf1df --- /dev/null +++ b/vendor/go.opencensus.io/internal/internal.go @@ -0,0 +1,37 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "go.opencensus.io/internal" + +import ( + "fmt" + "time" + + opencensus "go.opencensus.io" +) + +// UserAgent is the user agent to be added to the outgoing +// requests from the exporters. +var UserAgent = fmt.Sprintf("opencensus-go/%s", opencensus.Version()) + +// MonotonicEndTime returns the end time at present +// but offset from start, monotonically. +// +// The monotonic clock is used in subtractions hence +// the duration since start added back to start gives +// end as a monotonic time. +// See https://golang.org/pkg/time/#hdr-Monotonic_Clocks +func MonotonicEndTime(start time.Time) time.Time { + return start.Add(time.Now().Sub(start)) +} diff --git a/vendor/go.opencensus.io/internal/sanitize.go b/vendor/go.opencensus.io/internal/sanitize.go new file mode 100644 index 0000000000000..de8ccf236c4b2 --- /dev/null +++ b/vendor/go.opencensus.io/internal/sanitize.go @@ -0,0 +1,50 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "strings" + "unicode" +) + +const labelKeySizeLimit = 100 + +// Sanitize returns a string that is trunacated to 100 characters if it's too +// long, and replaces non-alphanumeric characters to underscores. +func Sanitize(s string) string { + if len(s) == 0 { + return s + } + if len(s) > labelKeySizeLimit { + s = s[:labelKeySizeLimit] + } + s = strings.Map(sanitizeRune, s) + if unicode.IsDigit(rune(s[0])) { + s = "key_" + s + } + if s[0] == '_' { + s = "key" + s + } + return s +} + +// converts anything that is not a letter or digit to an underscore +func sanitizeRune(r rune) rune { + if unicode.IsLetter(r) || unicode.IsDigit(r) { + return r + } + // Everything else turns into an underscore + return '_' +} diff --git a/vendor/go.opencensus.io/internal/tagencoding/BUILD.bazel b/vendor/go.opencensus.io/internal/tagencoding/BUILD.bazel new file mode 100644 index 0000000000000..08680e59cf46e --- /dev/null +++ b/vendor/go.opencensus.io/internal/tagencoding/BUILD.bazel @@ -0,0 +1,9 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["tagencoding.go"], + importmap = "k8s.io/kops/vendor/go.opencensus.io/internal/tagencoding", + importpath = "go.opencensus.io/internal/tagencoding", + visibility = ["//vendor/go.opencensus.io:__subpackages__"], +) diff --git a/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go b/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go new file mode 100644 index 0000000000000..41b2c3fc0387a --- /dev/null +++ b/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go @@ -0,0 +1,75 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package tagencoding contains the tag encoding +// used interally by the stats collector. +package tagencoding // import "go.opencensus.io/internal/tagencoding" + +// Values represent the encoded buffer for the values. +type Values struct { + Buffer []byte + WriteIndex int + ReadIndex int +} + +func (vb *Values) growIfRequired(expected int) { + if len(vb.Buffer)-vb.WriteIndex < expected { + tmp := make([]byte, 2*(len(vb.Buffer)+1)+expected) + copy(tmp, vb.Buffer) + vb.Buffer = tmp + } +} + +// WriteValue is the helper method to encode Values from map[Key][]byte. +func (vb *Values) WriteValue(v []byte) { + length := len(v) & 0xff + vb.growIfRequired(1 + length) + + // writing length of v + vb.Buffer[vb.WriteIndex] = byte(length) + vb.WriteIndex++ + + if length == 0 { + // No value was encoded for this key + return + } + + // writing v + copy(vb.Buffer[vb.WriteIndex:], v[:length]) + vb.WriteIndex += length +} + +// ReadValue is the helper method to decode Values to a map[Key][]byte. +func (vb *Values) ReadValue() []byte { + // read length of v + length := int(vb.Buffer[vb.ReadIndex]) + vb.ReadIndex++ + if length == 0 { + // No value was encoded for this key + return nil + } + + // read value of v + v := make([]byte, length) + endIdx := vb.ReadIndex + length + copy(v, vb.Buffer[vb.ReadIndex:endIdx]) + vb.ReadIndex = endIdx + return v +} + +// Bytes returns a reference to already written bytes in the Buffer. +func (vb *Values) Bytes() []byte { + return vb.Buffer[:vb.WriteIndex] +} diff --git a/vendor/go.opencensus.io/internal/traceinternals.go b/vendor/go.opencensus.io/internal/traceinternals.go new file mode 100644 index 0000000000000..073af7b473a63 --- /dev/null +++ b/vendor/go.opencensus.io/internal/traceinternals.go @@ -0,0 +1,53 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "time" +) + +// Trace allows internal access to some trace functionality. +// TODO(#412): remove this +var Trace interface{} + +// LocalSpanStoreEnabled true if the local span store is enabled. +var LocalSpanStoreEnabled bool + +// BucketConfiguration stores the number of samples to store for span buckets +// for successful and failed spans for a particular span name. +type BucketConfiguration struct { + Name string + MaxRequestsSucceeded int + MaxRequestsErrors int +} + +// PerMethodSummary is a summary of the spans stored for a single span name. +type PerMethodSummary struct { + Active int + LatencyBuckets []LatencyBucketSummary + ErrorBuckets []ErrorBucketSummary +} + +// LatencyBucketSummary is a summary of a latency bucket. +type LatencyBucketSummary struct { + MinLatency, MaxLatency time.Duration + Size int +} + +// ErrorBucketSummary is a summary of an error bucket. +type ErrorBucketSummary struct { + ErrorCode int32 + Size int +} diff --git a/vendor/go.opencensus.io/metric/metricdata/BUILD.bazel b/vendor/go.opencensus.io/metric/metricdata/BUILD.bazel new file mode 100644 index 0000000000000..d61a5b054164e --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/BUILD.bazel @@ -0,0 +1,18 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "exemplar.go", + "label.go", + "metric.go", + "point.go", + "type_string.go", + "unit.go", + ], + importmap = "k8s.io/kops/vendor/go.opencensus.io/metric/metricdata", + importpath = "go.opencensus.io/metric/metricdata", + visibility = ["//visibility:public"], + deps = ["//vendor/go.opencensus.io/resource:go_default_library"], +) diff --git a/vendor/go.opencensus.io/metric/metricdata/doc.go b/vendor/go.opencensus.io/metric/metricdata/doc.go new file mode 100644 index 0000000000000..52a7b3bf8509d --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/doc.go @@ -0,0 +1,19 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package metricdata contains the metrics data model. +// +// This is an EXPERIMENTAL package, and may change in arbitrary ways without +// notice. +package metricdata // import "go.opencensus.io/metric/metricdata" diff --git a/vendor/go.opencensus.io/metric/metricdata/exemplar.go b/vendor/go.opencensus.io/metric/metricdata/exemplar.go new file mode 100644 index 0000000000000..12695ce2dc741 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/exemplar.go @@ -0,0 +1,38 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricdata + +import ( + "time" +) + +// Exemplars keys. +const ( + AttachmentKeySpanContext = "SpanContext" +) + +// Exemplar is an example data point associated with each bucket of a +// distribution type aggregation. +// +// Their purpose is to provide an example of the kind of thing +// (request, RPC, trace span, etc.) that resulted in that measurement. +type Exemplar struct { + Value float64 // the value that was recorded + Timestamp time.Time // the time the value was recorded + Attachments Attachments // attachments (if any) +} + +// Attachments is a map of extra values associated with a recorded data point. +type Attachments map[string]interface{} diff --git a/vendor/go.opencensus.io/metric/metricdata/label.go b/vendor/go.opencensus.io/metric/metricdata/label.go new file mode 100644 index 0000000000000..aadae41e6a218 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/label.go @@ -0,0 +1,35 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricdata + +// LabelKey represents key of a label. It has optional +// description attribute. +type LabelKey struct { + Key string + Description string +} + +// LabelValue represents the value of a label. +// The zero value represents a missing label value, which may be treated +// differently to an empty string value by some back ends. +type LabelValue struct { + Value string // string value of the label + Present bool // flag that indicated whether a value is present or not +} + +// NewLabelValue creates a new non-nil LabelValue that represents the given string. +func NewLabelValue(val string) LabelValue { + return LabelValue{Value: val, Present: true} +} diff --git a/vendor/go.opencensus.io/metric/metricdata/metric.go b/vendor/go.opencensus.io/metric/metricdata/metric.go new file mode 100644 index 0000000000000..8293712c77f00 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/metric.go @@ -0,0 +1,46 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricdata + +import ( + "time" + + "go.opencensus.io/resource" +) + +// Descriptor holds metadata about a metric. +type Descriptor struct { + Name string // full name of the metric + Description string // human-readable description + Unit Unit // units for the measure + Type Type // type of measure + LabelKeys []LabelKey // label keys +} + +// Metric represents a quantity measured against a resource with different +// label value combinations. +type Metric struct { + Descriptor Descriptor // metric descriptor + Resource *resource.Resource // resource against which this was measured + TimeSeries []*TimeSeries // one time series for each combination of label values +} + +// TimeSeries is a sequence of points associated with a combination of label +// values. +type TimeSeries struct { + LabelValues []LabelValue // label values, same order as keys in the metric descriptor + Points []Point // points sequence + StartTime time.Time // time we started recording this time series +} diff --git a/vendor/go.opencensus.io/metric/metricdata/point.go b/vendor/go.opencensus.io/metric/metricdata/point.go new file mode 100644 index 0000000000000..7fe057b19cf7b --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/point.go @@ -0,0 +1,193 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricdata + +import ( + "time" +) + +// Point is a single data point of a time series. +type Point struct { + // Time is the point in time that this point represents in a time series. + Time time.Time + // Value is the value of this point. Prefer using ReadValue to switching on + // the value type, since new value types might be added. + Value interface{} +} + +//go:generate stringer -type ValueType + +// NewFloat64Point creates a new Point holding a float64 value. +func NewFloat64Point(t time.Time, val float64) Point { + return Point{ + Value: val, + Time: t, + } +} + +// NewInt64Point creates a new Point holding an int64 value. +func NewInt64Point(t time.Time, val int64) Point { + return Point{ + Value: val, + Time: t, + } +} + +// NewDistributionPoint creates a new Point holding a Distribution value. +func NewDistributionPoint(t time.Time, val *Distribution) Point { + return Point{ + Value: val, + Time: t, + } +} + +// NewSummaryPoint creates a new Point holding a Summary value. +func NewSummaryPoint(t time.Time, val *Summary) Point { + return Point{ + Value: val, + Time: t, + } +} + +// ValueVisitor allows reading the value of a point. +type ValueVisitor interface { + VisitFloat64Value(float64) + VisitInt64Value(int64) + VisitDistributionValue(*Distribution) + VisitSummaryValue(*Summary) +} + +// ReadValue accepts a ValueVisitor and calls the appropriate method with the +// value of this point. +// Consumers of Point should use this in preference to switching on the type +// of the value directly, since new value types may be added. +func (p Point) ReadValue(vv ValueVisitor) { + switch v := p.Value.(type) { + case int64: + vv.VisitInt64Value(v) + case float64: + vv.VisitFloat64Value(v) + case *Distribution: + vv.VisitDistributionValue(v) + case *Summary: + vv.VisitSummaryValue(v) + default: + panic("unexpected value type") + } +} + +// Distribution contains summary statistics for a population of values. It +// optionally contains a histogram representing the distribution of those +// values across a set of buckets. +type Distribution struct { + // Count is the number of values in the population. Must be non-negative. This value + // must equal the sum of the values in bucket_counts if a histogram is + // provided. + Count int64 + // Sum is the sum of the values in the population. If count is zero then this field + // must be zero. + Sum float64 + // SumOfSquaredDeviation is the sum of squared deviations from the mean of the values in the + // population. For values x_i this is: + // + // Sum[i=1..n]((x_i - mean)^2) + // + // Knuth, "The Art of Computer Programming", Vol. 2, page 323, 3rd edition + // describes Welford's method for accumulating this sum in one pass. + // + // If count is zero then this field must be zero. + SumOfSquaredDeviation float64 + // BucketOptions describes the bounds of the histogram buckets in this + // distribution. + // + // A Distribution may optionally contain a histogram of the values in the + // population. + // + // If nil, there is no associated histogram. + BucketOptions *BucketOptions + // Bucket If the distribution does not have a histogram, then omit this field. + // If there is a histogram, then the sum of the values in the Bucket counts + // must equal the value in the count field of the distribution. + Buckets []Bucket +} + +// BucketOptions describes the bounds of the histogram buckets in this +// distribution. +type BucketOptions struct { + // Bounds specifies a set of bucket upper bounds. + // This defines len(bounds) + 1 (= N) buckets. The boundaries for bucket + // index i are: + // + // [0, Bounds[i]) for i == 0 + // [Bounds[i-1], Bounds[i]) for 0 < i < N-1 + // [Bounds[i-1], +infinity) for i == N-1 + Bounds []float64 +} + +// Bucket represents a single bucket (value range) in a distribution. +type Bucket struct { + // Count is the number of values in each bucket of the histogram, as described in + // bucket_bounds. + Count int64 + // Exemplar associated with this bucket (if any). + Exemplar *Exemplar +} + +// Summary is a representation of percentiles. +type Summary struct { + // Count is the cumulative count (if available). + Count int64 + // Sum is the cumulative sum of values (if available). + Sum float64 + // HasCountAndSum is true if Count and Sum are available. + HasCountAndSum bool + // Snapshot represents percentiles calculated over an arbitrary time window. + // The values in this struct can be reset at arbitrary unknown times, with + // the requirement that all of them are reset at the same time. + Snapshot Snapshot +} + +// Snapshot represents percentiles over an arbitrary time. +// The values in this struct can be reset at arbitrary unknown times, with +// the requirement that all of them are reset at the same time. +type Snapshot struct { + // Count is the number of values in the snapshot. Optional since some systems don't + // expose this. Set to 0 if not available. + Count int64 + // Sum is the sum of values in the snapshot. Optional since some systems don't + // expose this. If count is 0 then this field must be zero. + Sum float64 + // Percentiles is a map from percentile (range (0-100.0]) to the value of + // the percentile. + Percentiles map[float64]float64 +} + +//go:generate stringer -type Type + +// Type is the overall type of metric, including its value type and whether it +// represents a cumulative total (since the start time) or if it represents a +// gauge value. +type Type int + +// Metric types. +const ( + TypeGaugeInt64 Type = iota + TypeGaugeFloat64 + TypeGaugeDistribution + TypeCumulativeInt64 + TypeCumulativeFloat64 + TypeCumulativeDistribution + TypeSummary +) diff --git a/vendor/go.opencensus.io/metric/metricdata/type_string.go b/vendor/go.opencensus.io/metric/metricdata/type_string.go new file mode 100644 index 0000000000000..c3f8ec27b53c8 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/type_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type Type"; DO NOT EDIT. + +package metricdata + +import "strconv" + +const _Type_name = "TypeGaugeInt64TypeGaugeFloat64TypeGaugeDistributionTypeCumulativeInt64TypeCumulativeFloat64TypeCumulativeDistributionTypeSummary" + +var _Type_index = [...]uint8{0, 14, 30, 51, 70, 91, 117, 128} + +func (i Type) String() string { + if i < 0 || i >= Type(len(_Type_index)-1) { + return "Type(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Type_name[_Type_index[i]:_Type_index[i+1]] +} diff --git a/vendor/go.opencensus.io/metric/metricdata/unit.go b/vendor/go.opencensus.io/metric/metricdata/unit.go new file mode 100644 index 0000000000000..b483a1371b09f --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/unit.go @@ -0,0 +1,27 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricdata + +// Unit is a string encoded according to the case-sensitive abbreviations from the +// Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html +type Unit string + +// Predefined units. To record against a unit not represented here, create your +// own Unit type constant from a string. +const ( + UnitDimensionless Unit = "1" + UnitBytes Unit = "By" + UnitMilliseconds Unit = "ms" +) diff --git a/vendor/go.opencensus.io/metric/metricproducer/BUILD.bazel b/vendor/go.opencensus.io/metric/metricproducer/BUILD.bazel new file mode 100644 index 0000000000000..0c6247904a763 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricproducer/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "manager.go", + "producer.go", + ], + importmap = "k8s.io/kops/vendor/go.opencensus.io/metric/metricproducer", + importpath = "go.opencensus.io/metric/metricproducer", + visibility = ["//visibility:public"], + deps = ["//vendor/go.opencensus.io/metric/metricdata:go_default_library"], +) diff --git a/vendor/go.opencensus.io/metric/metricproducer/manager.go b/vendor/go.opencensus.io/metric/metricproducer/manager.go new file mode 100644 index 0000000000000..ca1f390493857 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricproducer/manager.go @@ -0,0 +1,78 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricproducer + +import ( + "sync" +) + +// Manager maintains a list of active producers. Producers can register +// with the manager to allow readers to read all metrics provided by them. +// Readers can retrieve all producers registered with the manager, +// read metrics from the producers and export them. +type Manager struct { + mu sync.RWMutex + producers map[Producer]struct{} +} + +var prodMgr *Manager +var once sync.Once + +// GlobalManager is a single instance of producer manager +// that is used by all producers and all readers. +func GlobalManager() *Manager { + once.Do(func() { + prodMgr = &Manager{} + prodMgr.producers = make(map[Producer]struct{}) + }) + return prodMgr +} + +// AddProducer adds the producer to the Manager if it is not already present. +func (pm *Manager) AddProducer(producer Producer) { + if producer == nil { + return + } + pm.mu.Lock() + defer pm.mu.Unlock() + pm.producers[producer] = struct{}{} +} + +// DeleteProducer deletes the producer from the Manager if it is present. +func (pm *Manager) DeleteProducer(producer Producer) { + if producer == nil { + return + } + pm.mu.Lock() + defer pm.mu.Unlock() + delete(pm.producers, producer) +} + +// GetAll returns a slice of all producer currently registered with +// the Manager. For each call it generates a new slice. The slice +// should not be cached as registration may change at any time. It is +// typically called periodically by exporter to read metrics from +// the producers. +func (pm *Manager) GetAll() []Producer { + pm.mu.Lock() + defer pm.mu.Unlock() + producers := make([]Producer, len(pm.producers)) + i := 0 + for producer := range pm.producers { + producers[i] = producer + i++ + } + return producers +} diff --git a/vendor/go.opencensus.io/metric/metricproducer/producer.go b/vendor/go.opencensus.io/metric/metricproducer/producer.go new file mode 100644 index 0000000000000..6cee9ed178335 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricproducer/producer.go @@ -0,0 +1,28 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricproducer + +import ( + "go.opencensus.io/metric/metricdata" +) + +// Producer is a source of metrics. +type Producer interface { + // Read should return the current values of all metrics supported by this + // metric provider. + // The returned metrics should be unique for each combination of name and + // resource. + Read() []*metricdata.Metric +} diff --git a/vendor/go.opencensus.io/opencensus.go b/vendor/go.opencensus.io/opencensus.go new file mode 100644 index 0000000000000..d2565f1e2bf3a --- /dev/null +++ b/vendor/go.opencensus.io/opencensus.go @@ -0,0 +1,21 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package opencensus contains Go support for OpenCensus. +package opencensus // import "go.opencensus.io" + +// Version is the current release version of OpenCensus in use. +func Version() string { + return "0.21.0" +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/BUILD.bazel b/vendor/go.opencensus.io/plugin/ochttp/BUILD.bazel new file mode 100644 index 0000000000000..45348034a6712 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/BUILD.bazel @@ -0,0 +1,27 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "client.go", + "client_stats.go", + "doc.go", + "route.go", + "server.go", + "span_annotating_client_trace.go", + "stats.go", + "trace.go", + "wrapped_body.go", + ], + importmap = "k8s.io/kops/vendor/go.opencensus.io/plugin/ochttp", + importpath = "go.opencensus.io/plugin/ochttp", + visibility = ["//visibility:public"], + deps = [ + "//vendor/go.opencensus.io/plugin/ochttp/propagation/b3:go_default_library", + "//vendor/go.opencensus.io/stats:go_default_library", + "//vendor/go.opencensus.io/stats/view:go_default_library", + "//vendor/go.opencensus.io/tag:go_default_library", + "//vendor/go.opencensus.io/trace:go_default_library", + "//vendor/go.opencensus.io/trace/propagation:go_default_library", + ], +) diff --git a/vendor/go.opencensus.io/plugin/ochttp/client.go b/vendor/go.opencensus.io/plugin/ochttp/client.go new file mode 100644 index 0000000000000..da815b2a73451 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/client.go @@ -0,0 +1,117 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "net/http" + "net/http/httptrace" + + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" +) + +// Transport is an http.RoundTripper that instruments all outgoing requests with +// OpenCensus stats and tracing. +// +// The zero value is intended to be a useful default, but for +// now it's recommended that you explicitly set Propagation, since the default +// for this may change. +type Transport struct { + // Base may be set to wrap another http.RoundTripper that does the actual + // requests. By default http.DefaultTransport is used. + // + // If base HTTP roundtripper implements CancelRequest, + // the returned round tripper will be cancelable. + Base http.RoundTripper + + // Propagation defines how traces are propagated. If unspecified, a default + // (currently B3 format) will be used. + Propagation propagation.HTTPFormat + + // StartOptions are applied to the span started by this Transport around each + // request. + // + // StartOptions.SpanKind will always be set to trace.SpanKindClient + // for spans started by this transport. + StartOptions trace.StartOptions + + // GetStartOptions allows to set start options per request. If set, + // StartOptions is going to be ignored. + GetStartOptions func(*http.Request) trace.StartOptions + + // NameFromRequest holds the function to use for generating the span name + // from the information found in the outgoing HTTP Request. By default the + // name equals the URL Path. + FormatSpanName func(*http.Request) string + + // NewClientTrace may be set to a function allowing the current *trace.Span + // to be annotated with HTTP request event information emitted by the + // httptrace package. + NewClientTrace func(*http.Request, *trace.Span) *httptrace.ClientTrace + + // TODO: Implement tag propagation for HTTP. +} + +// RoundTrip implements http.RoundTripper, delegating to Base and recording stats and traces for the request. +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + rt := t.base() + if isHealthEndpoint(req.URL.Path) { + return rt.RoundTrip(req) + } + // TODO: remove excessive nesting of http.RoundTrippers here. + format := t.Propagation + if format == nil { + format = defaultFormat + } + spanNameFormatter := t.FormatSpanName + if spanNameFormatter == nil { + spanNameFormatter = spanNameFromURL + } + + startOpts := t.StartOptions + if t.GetStartOptions != nil { + startOpts = t.GetStartOptions(req) + } + + rt = &traceTransport{ + base: rt, + format: format, + startOptions: trace.StartOptions{ + Sampler: startOpts.Sampler, + SpanKind: trace.SpanKindClient, + }, + formatSpanName: spanNameFormatter, + newClientTrace: t.NewClientTrace, + } + rt = statsTransport{base: rt} + return rt.RoundTrip(req) +} + +func (t *Transport) base() http.RoundTripper { + if t.Base != nil { + return t.Base + } + return http.DefaultTransport +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t *Transport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base().(canceler); ok { + cr.CancelRequest(req) + } +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/client_stats.go b/vendor/go.opencensus.io/plugin/ochttp/client_stats.go new file mode 100644 index 0000000000000..17142aabe002a --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/client_stats.go @@ -0,0 +1,143 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "context" + "io" + "net/http" + "strconv" + "sync" + "time" + + "go.opencensus.io/stats" + "go.opencensus.io/tag" +) + +// statsTransport is an http.RoundTripper that collects stats for the outgoing requests. +type statsTransport struct { + base http.RoundTripper +} + +// RoundTrip implements http.RoundTripper, delegating to Base and recording stats for the request. +func (t statsTransport) RoundTrip(req *http.Request) (*http.Response, error) { + ctx, _ := tag.New(req.Context(), + tag.Upsert(KeyClientHost, req.Host), + tag.Upsert(Host, req.Host), + tag.Upsert(KeyClientPath, req.URL.Path), + tag.Upsert(Path, req.URL.Path), + tag.Upsert(KeyClientMethod, req.Method), + tag.Upsert(Method, req.Method)) + req = req.WithContext(ctx) + track := &tracker{ + start: time.Now(), + ctx: ctx, + } + if req.Body == nil { + // TODO: Handle cases where ContentLength is not set. + track.reqSize = -1 + } else if req.ContentLength > 0 { + track.reqSize = req.ContentLength + } + stats.Record(ctx, ClientRequestCount.M(1)) + + // Perform request. + resp, err := t.base.RoundTrip(req) + + if err != nil { + track.statusCode = http.StatusInternalServerError + track.end() + } else { + track.statusCode = resp.StatusCode + if req.Method != "HEAD" { + track.respContentLength = resp.ContentLength + } + if resp.Body == nil { + track.end() + } else { + track.body = resp.Body + resp.Body = wrappedBody(track, resp.Body) + } + } + return resp, err +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t statsTransport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base.(canceler); ok { + cr.CancelRequest(req) + } +} + +type tracker struct { + ctx context.Context + respSize int64 + respContentLength int64 + reqSize int64 + start time.Time + body io.ReadCloser + statusCode int + endOnce sync.Once +} + +var _ io.ReadCloser = (*tracker)(nil) + +func (t *tracker) end() { + t.endOnce.Do(func() { + latencyMs := float64(time.Since(t.start)) / float64(time.Millisecond) + respSize := t.respSize + if t.respSize == 0 && t.respContentLength > 0 { + respSize = t.respContentLength + } + m := []stats.Measurement{ + ClientSentBytes.M(t.reqSize), + ClientReceivedBytes.M(respSize), + ClientRoundtripLatency.M(latencyMs), + ClientLatency.M(latencyMs), + ClientResponseBytes.M(t.respSize), + } + if t.reqSize >= 0 { + m = append(m, ClientRequestBytes.M(t.reqSize)) + } + + stats.RecordWithTags(t.ctx, []tag.Mutator{ + tag.Upsert(StatusCode, strconv.Itoa(t.statusCode)), + tag.Upsert(KeyClientStatus, strconv.Itoa(t.statusCode)), + }, m...) + }) +} + +func (t *tracker) Read(b []byte) (int, error) { + n, err := t.body.Read(b) + t.respSize += int64(n) + switch err { + case nil: + return n, nil + case io.EOF: + t.end() + } + return n, err +} + +func (t *tracker) Close() error { + // Invoking endSpan on Close will help catch the cases + // in which a read returned a non-nil error, we set the + // span status but didn't end the span. + t.end() + return t.body.Close() +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/doc.go b/vendor/go.opencensus.io/plugin/ochttp/doc.go new file mode 100644 index 0000000000000..10e626b16e610 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/doc.go @@ -0,0 +1,19 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package ochttp provides OpenCensus instrumentation for net/http package. +// +// For server instrumentation, see Handler. For client-side instrumentation, +// see Transport. +package ochttp // import "go.opencensus.io/plugin/ochttp" diff --git a/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/BUILD.bazel b/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/BUILD.bazel new file mode 100644 index 0000000000000..eef4777eef25f --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["b3.go"], + importmap = "k8s.io/kops/vendor/go.opencensus.io/plugin/ochttp/propagation/b3", + importpath = "go.opencensus.io/plugin/ochttp/propagation/b3", + visibility = ["//visibility:public"], + deps = [ + "//vendor/go.opencensus.io/trace:go_default_library", + "//vendor/go.opencensus.io/trace/propagation:go_default_library", + ], +) diff --git a/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go b/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go new file mode 100644 index 0000000000000..2f1c7f0063ea8 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go @@ -0,0 +1,123 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package b3 contains a propagation.HTTPFormat implementation +// for B3 propagation. See https://github.com/openzipkin/b3-propagation +// for more details. +package b3 // import "go.opencensus.io/plugin/ochttp/propagation/b3" + +import ( + "encoding/hex" + "net/http" + + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" +) + +// B3 headers that OpenCensus understands. +const ( + TraceIDHeader = "X-B3-TraceId" + SpanIDHeader = "X-B3-SpanId" + SampledHeader = "X-B3-Sampled" +) + +// HTTPFormat implements propagation.HTTPFormat to propagate +// traces in HTTP headers in B3 propagation format. +// HTTPFormat skips the X-B3-ParentId and X-B3-Flags headers +// because there are additional fields not represented in the +// OpenCensus span context. Spans created from the incoming +// header will be the direct children of the client-side span. +// Similarly, receiver of the outgoing spans should use client-side +// span created by OpenCensus as the parent. +type HTTPFormat struct{} + +var _ propagation.HTTPFormat = (*HTTPFormat)(nil) + +// SpanContextFromRequest extracts a B3 span context from incoming requests. +func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) { + tid, ok := ParseTraceID(req.Header.Get(TraceIDHeader)) + if !ok { + return trace.SpanContext{}, false + } + sid, ok := ParseSpanID(req.Header.Get(SpanIDHeader)) + if !ok { + return trace.SpanContext{}, false + } + sampled, _ := ParseSampled(req.Header.Get(SampledHeader)) + return trace.SpanContext{ + TraceID: tid, + SpanID: sid, + TraceOptions: sampled, + }, true +} + +// ParseTraceID parses the value of the X-B3-TraceId header. +func ParseTraceID(tid string) (trace.TraceID, bool) { + if tid == "" { + return trace.TraceID{}, false + } + b, err := hex.DecodeString(tid) + if err != nil { + return trace.TraceID{}, false + } + var traceID trace.TraceID + if len(b) <= 8 { + // The lower 64-bits. + start := 8 + (8 - len(b)) + copy(traceID[start:], b) + } else { + start := 16 - len(b) + copy(traceID[start:], b) + } + + return traceID, true +} + +// ParseSpanID parses the value of the X-B3-SpanId or X-B3-ParentSpanId headers. +func ParseSpanID(sid string) (spanID trace.SpanID, ok bool) { + if sid == "" { + return trace.SpanID{}, false + } + b, err := hex.DecodeString(sid) + if err != nil { + return trace.SpanID{}, false + } + start := 8 - len(b) + copy(spanID[start:], b) + return spanID, true +} + +// ParseSampled parses the value of the X-B3-Sampled header. +func ParseSampled(sampled string) (trace.TraceOptions, bool) { + switch sampled { + case "true", "1": + return trace.TraceOptions(1), true + default: + return trace.TraceOptions(0), false + } +} + +// SpanContextToRequest modifies the given request to include B3 headers. +func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) { + req.Header.Set(TraceIDHeader, hex.EncodeToString(sc.TraceID[:])) + req.Header.Set(SpanIDHeader, hex.EncodeToString(sc.SpanID[:])) + + var sampled string + if sc.IsSampled() { + sampled = "1" + } else { + sampled = "0" + } + req.Header.Set(SampledHeader, sampled) +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/route.go b/vendor/go.opencensus.io/plugin/ochttp/route.go new file mode 100644 index 0000000000000..5e6a3430760b6 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/route.go @@ -0,0 +1,61 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "context" + "net/http" + + "go.opencensus.io/tag" +) + +// SetRoute sets the http_server_route tag to the given value. +// It's useful when an HTTP framework does not support the http.Handler interface +// and using WithRouteTag is not an option, but provides a way to hook into the request flow. +func SetRoute(ctx context.Context, route string) { + if a, ok := ctx.Value(addedTagsKey{}).(*addedTags); ok { + a.t = append(a.t, tag.Upsert(KeyServerRoute, route)) + } +} + +// WithRouteTag returns an http.Handler that records stats with the +// http_server_route tag set to the given value. +func WithRouteTag(handler http.Handler, route string) http.Handler { + return taggedHandlerFunc(func(w http.ResponseWriter, r *http.Request) []tag.Mutator { + addRoute := []tag.Mutator{tag.Upsert(KeyServerRoute, route)} + ctx, _ := tag.New(r.Context(), addRoute...) + r = r.WithContext(ctx) + handler.ServeHTTP(w, r) + return addRoute + }) +} + +// taggedHandlerFunc is a http.Handler that returns tags describing the +// processing of the request. These tags will be recorded along with the +// measures in this package at the end of the request. +type taggedHandlerFunc func(w http.ResponseWriter, r *http.Request) []tag.Mutator + +func (h taggedHandlerFunc) ServeHTTP(w http.ResponseWriter, r *http.Request) { + tags := h(w, r) + if a, ok := r.Context().Value(addedTagsKey{}).(*addedTags); ok { + a.t = append(a.t, tags...) + } +} + +type addedTagsKey struct{} + +type addedTags struct { + t []tag.Mutator +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/server.go b/vendor/go.opencensus.io/plugin/ochttp/server.go new file mode 100644 index 0000000000000..4f6404fa790ca --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/server.go @@ -0,0 +1,449 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "context" + "io" + "net/http" + "strconv" + "sync" + "time" + + "go.opencensus.io/stats" + "go.opencensus.io/tag" + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" +) + +// Handler is an http.Handler wrapper to instrument your HTTP server with +// OpenCensus. It supports both stats and tracing. +// +// Tracing +// +// This handler is aware of the incoming request's span, reading it from request +// headers as configured using the Propagation field. +// The extracted span can be accessed from the incoming request's +// context. +// +// span := trace.FromContext(r.Context()) +// +// The server span will be automatically ended at the end of ServeHTTP. +type Handler struct { + // Propagation defines how traces are propagated. If unspecified, + // B3 propagation will be used. + Propagation propagation.HTTPFormat + + // Handler is the handler used to handle the incoming request. + Handler http.Handler + + // StartOptions are applied to the span started by this Handler around each + // request. + // + // StartOptions.SpanKind will always be set to trace.SpanKindServer + // for spans started by this transport. + StartOptions trace.StartOptions + + // GetStartOptions allows to set start options per request. If set, + // StartOptions is going to be ignored. + GetStartOptions func(*http.Request) trace.StartOptions + + // IsPublicEndpoint should be set to true for publicly accessible HTTP(S) + // servers. If true, any trace metadata set on the incoming request will + // be added as a linked trace instead of being added as a parent of the + // current trace. + IsPublicEndpoint bool + + // FormatSpanName holds the function to use for generating the span name + // from the information found in the incoming HTTP Request. By default the + // name equals the URL Path. + FormatSpanName func(*http.Request) string +} + +func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + var tags addedTags + r, traceEnd := h.startTrace(w, r) + defer traceEnd() + w, statsEnd := h.startStats(w, r) + defer statsEnd(&tags) + handler := h.Handler + if handler == nil { + handler = http.DefaultServeMux + } + r = r.WithContext(context.WithValue(r.Context(), addedTagsKey{}, &tags)) + handler.ServeHTTP(w, r) +} + +func (h *Handler) startTrace(w http.ResponseWriter, r *http.Request) (*http.Request, func()) { + if isHealthEndpoint(r.URL.Path) { + return r, func() {} + } + var name string + if h.FormatSpanName == nil { + name = spanNameFromURL(r) + } else { + name = h.FormatSpanName(r) + } + ctx := r.Context() + + startOpts := h.StartOptions + if h.GetStartOptions != nil { + startOpts = h.GetStartOptions(r) + } + + var span *trace.Span + sc, ok := h.extractSpanContext(r) + if ok && !h.IsPublicEndpoint { + ctx, span = trace.StartSpanWithRemoteParent(ctx, name, sc, + trace.WithSampler(startOpts.Sampler), + trace.WithSpanKind(trace.SpanKindServer)) + } else { + ctx, span = trace.StartSpan(ctx, name, + trace.WithSampler(startOpts.Sampler), + trace.WithSpanKind(trace.SpanKindServer), + ) + if ok { + span.AddLink(trace.Link{ + TraceID: sc.TraceID, + SpanID: sc.SpanID, + Type: trace.LinkTypeParent, + Attributes: nil, + }) + } + } + span.AddAttributes(requestAttrs(r)...) + if r.Body == nil { + // TODO: Handle cases where ContentLength is not set. + } else if r.ContentLength > 0 { + span.AddMessageReceiveEvent(0, /* TODO: messageID */ + int64(r.ContentLength), -1) + } + return r.WithContext(ctx), span.End +} + +func (h *Handler) extractSpanContext(r *http.Request) (trace.SpanContext, bool) { + if h.Propagation == nil { + return defaultFormat.SpanContextFromRequest(r) + } + return h.Propagation.SpanContextFromRequest(r) +} + +func (h *Handler) startStats(w http.ResponseWriter, r *http.Request) (http.ResponseWriter, func(tags *addedTags)) { + ctx, _ := tag.New(r.Context(), + tag.Upsert(Host, r.Host), + tag.Upsert(Path, r.URL.Path), + tag.Upsert(Method, r.Method)) + track := &trackingResponseWriter{ + start: time.Now(), + ctx: ctx, + writer: w, + } + if r.Body == nil { + // TODO: Handle cases where ContentLength is not set. + track.reqSize = -1 + } else if r.ContentLength > 0 { + track.reqSize = r.ContentLength + } + stats.Record(ctx, ServerRequestCount.M(1)) + return track.wrappedResponseWriter(), track.end +} + +type trackingResponseWriter struct { + ctx context.Context + reqSize int64 + respSize int64 + start time.Time + statusCode int + statusLine string + endOnce sync.Once + writer http.ResponseWriter +} + +// Compile time assertion for ResponseWriter interface +var _ http.ResponseWriter = (*trackingResponseWriter)(nil) + +var logTagsErrorOnce sync.Once + +func (t *trackingResponseWriter) end(tags *addedTags) { + t.endOnce.Do(func() { + if t.statusCode == 0 { + t.statusCode = 200 + } + + span := trace.FromContext(t.ctx) + span.SetStatus(TraceStatus(t.statusCode, t.statusLine)) + span.AddAttributes(trace.Int64Attribute(StatusCodeAttribute, int64(t.statusCode))) + + m := []stats.Measurement{ + ServerLatency.M(float64(time.Since(t.start)) / float64(time.Millisecond)), + ServerResponseBytes.M(t.respSize), + } + if t.reqSize >= 0 { + m = append(m, ServerRequestBytes.M(t.reqSize)) + } + allTags := make([]tag.Mutator, len(tags.t)+1) + allTags[0] = tag.Upsert(StatusCode, strconv.Itoa(t.statusCode)) + copy(allTags[1:], tags.t) + stats.RecordWithTags(t.ctx, allTags, m...) + }) +} + +func (t *trackingResponseWriter) Header() http.Header { + return t.writer.Header() +} + +func (t *trackingResponseWriter) Write(data []byte) (int, error) { + n, err := t.writer.Write(data) + t.respSize += int64(n) + // Add message event for request bytes sent. + span := trace.FromContext(t.ctx) + span.AddMessageSendEvent(0 /* TODO: messageID */, int64(n), -1) + return n, err +} + +func (t *trackingResponseWriter) WriteHeader(statusCode int) { + t.writer.WriteHeader(statusCode) + t.statusCode = statusCode + t.statusLine = http.StatusText(t.statusCode) +} + +// wrappedResponseWriter returns a wrapped version of the original +// ResponseWriter and only implements the same combination of additional +// interfaces as the original. +// This implementation is based on https://github.com/felixge/httpsnoop. +func (t *trackingResponseWriter) wrappedResponseWriter() http.ResponseWriter { + var ( + hj, i0 = t.writer.(http.Hijacker) + cn, i1 = t.writer.(http.CloseNotifier) + pu, i2 = t.writer.(http.Pusher) + fl, i3 = t.writer.(http.Flusher) + rf, i4 = t.writer.(io.ReaderFrom) + ) + + switch { + case !i0 && !i1 && !i2 && !i3 && !i4: + return struct { + http.ResponseWriter + }{t} + case !i0 && !i1 && !i2 && !i3 && i4: + return struct { + http.ResponseWriter + io.ReaderFrom + }{t, rf} + case !i0 && !i1 && !i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Flusher + }{t, fl} + case !i0 && !i1 && !i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Flusher + io.ReaderFrom + }{t, fl, rf} + case !i0 && !i1 && i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.Pusher + }{t, pu} + case !i0 && !i1 && i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.Pusher + io.ReaderFrom + }{t, pu, rf} + case !i0 && !i1 && i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Pusher + http.Flusher + }{t, pu, fl} + case !i0 && !i1 && i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Pusher + http.Flusher + io.ReaderFrom + }{t, pu, fl, rf} + case !i0 && i1 && !i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.CloseNotifier + }{t, cn} + case !i0 && i1 && !i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.CloseNotifier + io.ReaderFrom + }{t, cn, rf} + case !i0 && i1 && !i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Flusher + }{t, cn, fl} + case !i0 && i1 && !i2 && i3 && i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Flusher + io.ReaderFrom + }{t, cn, fl, rf} + case !i0 && i1 && i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Pusher + }{t, cn, pu} + case !i0 && i1 && i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Pusher + io.ReaderFrom + }{t, cn, pu, rf} + case !i0 && i1 && i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Pusher + http.Flusher + }{t, cn, pu, fl} + case !i0 && i1 && i2 && i3 && i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Pusher + http.Flusher + io.ReaderFrom + }{t, cn, pu, fl, rf} + case i0 && !i1 && !i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + }{t, hj} + case i0 && !i1 && !i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + io.ReaderFrom + }{t, hj, rf} + case i0 && !i1 && !i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Flusher + }{t, hj, fl} + case i0 && !i1 && !i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Flusher + io.ReaderFrom + }{t, hj, fl, rf} + case i0 && !i1 && i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Pusher + }{t, hj, pu} + case i0 && !i1 && i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Pusher + io.ReaderFrom + }{t, hj, pu, rf} + case i0 && !i1 && i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Pusher + http.Flusher + }{t, hj, pu, fl} + case i0 && !i1 && i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Pusher + http.Flusher + io.ReaderFrom + }{t, hj, pu, fl, rf} + case i0 && i1 && !i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + }{t, hj, cn} + case i0 && i1 && !i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + io.ReaderFrom + }{t, hj, cn, rf} + case i0 && i1 && !i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Flusher + }{t, hj, cn, fl} + case i0 && i1 && !i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Flusher + io.ReaderFrom + }{t, hj, cn, fl, rf} + case i0 && i1 && i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Pusher + }{t, hj, cn, pu} + case i0 && i1 && i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Pusher + io.ReaderFrom + }{t, hj, cn, pu, rf} + case i0 && i1 && i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Pusher + http.Flusher + }{t, hj, cn, pu, fl} + case i0 && i1 && i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Pusher + http.Flusher + io.ReaderFrom + }{t, hj, cn, pu, fl, rf} + default: + return struct { + http.ResponseWriter + }{t} + } +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go b/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go new file mode 100644 index 0000000000000..05c6c56cc790f --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go @@ -0,0 +1,169 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "crypto/tls" + "net/http" + "net/http/httptrace" + "strings" + + "go.opencensus.io/trace" +) + +type spanAnnotator struct { + sp *trace.Span +} + +// TODO: Remove NewSpanAnnotator at the next release. + +// NewSpanAnnotator returns a httptrace.ClientTrace which annotates +// all emitted httptrace events on the provided Span. +// Deprecated: Use NewSpanAnnotatingClientTrace instead +func NewSpanAnnotator(r *http.Request, s *trace.Span) *httptrace.ClientTrace { + return NewSpanAnnotatingClientTrace(r, s) +} + +// NewSpanAnnotatingClientTrace returns a httptrace.ClientTrace which annotates +// all emitted httptrace events on the provided Span. +func NewSpanAnnotatingClientTrace(_ *http.Request, s *trace.Span) *httptrace.ClientTrace { + sa := spanAnnotator{sp: s} + + return &httptrace.ClientTrace{ + GetConn: sa.getConn, + GotConn: sa.gotConn, + PutIdleConn: sa.putIdleConn, + GotFirstResponseByte: sa.gotFirstResponseByte, + Got100Continue: sa.got100Continue, + DNSStart: sa.dnsStart, + DNSDone: sa.dnsDone, + ConnectStart: sa.connectStart, + ConnectDone: sa.connectDone, + TLSHandshakeStart: sa.tlsHandshakeStart, + TLSHandshakeDone: sa.tlsHandshakeDone, + WroteHeaders: sa.wroteHeaders, + Wait100Continue: sa.wait100Continue, + WroteRequest: sa.wroteRequest, + } +} + +func (s spanAnnotator) getConn(hostPort string) { + attrs := []trace.Attribute{ + trace.StringAttribute("httptrace.get_connection.host_port", hostPort), + } + s.sp.Annotate(attrs, "GetConn") +} + +func (s spanAnnotator) gotConn(info httptrace.GotConnInfo) { + attrs := []trace.Attribute{ + trace.BoolAttribute("httptrace.got_connection.reused", info.Reused), + trace.BoolAttribute("httptrace.got_connection.was_idle", info.WasIdle), + } + if info.WasIdle { + attrs = append(attrs, + trace.StringAttribute("httptrace.got_connection.idle_time", info.IdleTime.String())) + } + s.sp.Annotate(attrs, "GotConn") +} + +// PutIdleConn implements a httptrace.ClientTrace hook +func (s spanAnnotator) putIdleConn(err error) { + var attrs []trace.Attribute + if err != nil { + attrs = append(attrs, + trace.StringAttribute("httptrace.put_idle_connection.error", err.Error())) + } + s.sp.Annotate(attrs, "PutIdleConn") +} + +func (s spanAnnotator) gotFirstResponseByte() { + s.sp.Annotate(nil, "GotFirstResponseByte") +} + +func (s spanAnnotator) got100Continue() { + s.sp.Annotate(nil, "Got100Continue") +} + +func (s spanAnnotator) dnsStart(info httptrace.DNSStartInfo) { + attrs := []trace.Attribute{ + trace.StringAttribute("httptrace.dns_start.host", info.Host), + } + s.sp.Annotate(attrs, "DNSStart") +} + +func (s spanAnnotator) dnsDone(info httptrace.DNSDoneInfo) { + var addrs []string + for _, addr := range info.Addrs { + addrs = append(addrs, addr.String()) + } + attrs := []trace.Attribute{ + trace.StringAttribute("httptrace.dns_done.addrs", strings.Join(addrs, " , ")), + } + if info.Err != nil { + attrs = append(attrs, + trace.StringAttribute("httptrace.dns_done.error", info.Err.Error())) + } + s.sp.Annotate(attrs, "DNSDone") +} + +func (s spanAnnotator) connectStart(network, addr string) { + attrs := []trace.Attribute{ + trace.StringAttribute("httptrace.connect_start.network", network), + trace.StringAttribute("httptrace.connect_start.addr", addr), + } + s.sp.Annotate(attrs, "ConnectStart") +} + +func (s spanAnnotator) connectDone(network, addr string, err error) { + attrs := []trace.Attribute{ + trace.StringAttribute("httptrace.connect_done.network", network), + trace.StringAttribute("httptrace.connect_done.addr", addr), + } + if err != nil { + attrs = append(attrs, + trace.StringAttribute("httptrace.connect_done.error", err.Error())) + } + s.sp.Annotate(attrs, "ConnectDone") +} + +func (s spanAnnotator) tlsHandshakeStart() { + s.sp.Annotate(nil, "TLSHandshakeStart") +} + +func (s spanAnnotator) tlsHandshakeDone(_ tls.ConnectionState, err error) { + var attrs []trace.Attribute + if err != nil { + attrs = append(attrs, + trace.StringAttribute("httptrace.tls_handshake_done.error", err.Error())) + } + s.sp.Annotate(attrs, "TLSHandshakeDone") +} + +func (s spanAnnotator) wroteHeaders() { + s.sp.Annotate(nil, "WroteHeaders") +} + +func (s spanAnnotator) wait100Continue() { + s.sp.Annotate(nil, "Wait100Continue") +} + +func (s spanAnnotator) wroteRequest(info httptrace.WroteRequestInfo) { + var attrs []trace.Attribute + if info.Err != nil { + attrs = append(attrs, + trace.StringAttribute("httptrace.wrote_request.error", info.Err.Error())) + } + s.sp.Annotate(attrs, "WroteRequest") +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/stats.go b/vendor/go.opencensus.io/plugin/ochttp/stats.go new file mode 100644 index 0000000000000..63bbcda5e33f5 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/stats.go @@ -0,0 +1,292 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" +) + +// Deprecated: client HTTP measures. +var ( + // Deprecated: Use a Count aggregation over one of the other client measures to achieve the same effect. + ClientRequestCount = stats.Int64( + "opencensus.io/http/client/request_count", + "Number of HTTP requests started", + stats.UnitDimensionless) + // Deprecated: Use ClientSentBytes. + ClientRequestBytes = stats.Int64( + "opencensus.io/http/client/request_bytes", + "HTTP request body size if set as ContentLength (uncompressed)", + stats.UnitBytes) + // Deprecated: Use ClientReceivedBytes. + ClientResponseBytes = stats.Int64( + "opencensus.io/http/client/response_bytes", + "HTTP response body size (uncompressed)", + stats.UnitBytes) + // Deprecated: Use ClientRoundtripLatency. + ClientLatency = stats.Float64( + "opencensus.io/http/client/latency", + "End-to-end latency", + stats.UnitMilliseconds) +) + +// The following client HTTP measures are supported for use in custom views. +var ( + ClientSentBytes = stats.Int64( + "opencensus.io/http/client/sent_bytes", + "Total bytes sent in request body (not including headers)", + stats.UnitBytes, + ) + ClientReceivedBytes = stats.Int64( + "opencensus.io/http/client/received_bytes", + "Total bytes received in response bodies (not including headers but including error responses with bodies)", + stats.UnitBytes, + ) + ClientRoundtripLatency = stats.Float64( + "opencensus.io/http/client/roundtrip_latency", + "Time between first byte of request headers sent to last byte of response received, or terminal error", + stats.UnitMilliseconds, + ) +) + +// The following server HTTP measures are supported for use in custom views: +var ( + ServerRequestCount = stats.Int64( + "opencensus.io/http/server/request_count", + "Number of HTTP requests started", + stats.UnitDimensionless) + ServerRequestBytes = stats.Int64( + "opencensus.io/http/server/request_bytes", + "HTTP request body size if set as ContentLength (uncompressed)", + stats.UnitBytes) + ServerResponseBytes = stats.Int64( + "opencensus.io/http/server/response_bytes", + "HTTP response body size (uncompressed)", + stats.UnitBytes) + ServerLatency = stats.Float64( + "opencensus.io/http/server/latency", + "End-to-end latency", + stats.UnitMilliseconds) +) + +// The following tags are applied to stats recorded by this package. Host, Path +// and Method are applied to all measures. StatusCode is not applied to +// ClientRequestCount or ServerRequestCount, since it is recorded before the status is known. +var ( + // Host is the value of the HTTP Host header. + // + // The value of this tag can be controlled by the HTTP client, so you need + // to watch out for potentially generating high-cardinality labels in your + // metrics backend if you use this tag in views. + Host, _ = tag.NewKey("http.host") + + // StatusCode is the numeric HTTP response status code, + // or "error" if a transport error occurred and no status code was read. + StatusCode, _ = tag.NewKey("http.status") + + // Path is the URL path (not including query string) in the request. + // + // The value of this tag can be controlled by the HTTP client, so you need + // to watch out for potentially generating high-cardinality labels in your + // metrics backend if you use this tag in views. + Path, _ = tag.NewKey("http.path") + + // Method is the HTTP method of the request, capitalized (GET, POST, etc.). + Method, _ = tag.NewKey("http.method") + + // KeyServerRoute is a low cardinality string representing the logical + // handler of the request. This is usually the pattern registered on the a + // ServeMux (or similar string). + KeyServerRoute, _ = tag.NewKey("http_server_route") +) + +// Client tag keys. +var ( + // KeyClientMethod is the HTTP method, capitalized (i.e. GET, POST, PUT, DELETE, etc.). + KeyClientMethod, _ = tag.NewKey("http_client_method") + // KeyClientPath is the URL path (not including query string). + KeyClientPath, _ = tag.NewKey("http_client_path") + // KeyClientStatus is the HTTP status code as an integer (e.g. 200, 404, 500.), or "error" if no response status line was received. + KeyClientStatus, _ = tag.NewKey("http_client_status") + // KeyClientHost is the value of the request Host header. + KeyClientHost, _ = tag.NewKey("http_client_host") +) + +// Default distributions used by views in this package. +var ( + DefaultSizeDistribution = view.Distribution(1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296) + DefaultLatencyDistribution = view.Distribution(1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000) +) + +// Package ochttp provides some convenience views for client measures. +// You still need to register these views for data to actually be collected. +var ( + ClientSentBytesDistribution = &view.View{ + Name: "opencensus.io/http/client/sent_bytes", + Measure: ClientSentBytes, + Aggregation: DefaultSizeDistribution, + Description: "Total bytes sent in request body (not including headers), by HTTP method and response status", + TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, + } + + ClientReceivedBytesDistribution = &view.View{ + Name: "opencensus.io/http/client/received_bytes", + Measure: ClientReceivedBytes, + Aggregation: DefaultSizeDistribution, + Description: "Total bytes received in response bodies (not including headers but including error responses with bodies), by HTTP method and response status", + TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, + } + + ClientRoundtripLatencyDistribution = &view.View{ + Name: "opencensus.io/http/client/roundtrip_latency", + Measure: ClientRoundtripLatency, + Aggregation: DefaultLatencyDistribution, + Description: "End-to-end latency, by HTTP method and response status", + TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, + } + + ClientCompletedCount = &view.View{ + Name: "opencensus.io/http/client/completed_count", + Measure: ClientRoundtripLatency, + Aggregation: view.Count(), + Description: "Count of completed requests, by HTTP method and response status", + TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, + } +) + +// Deprecated: Old client Views. +var ( + // Deprecated: No direct replacement, but see ClientCompletedCount. + ClientRequestCountView = &view.View{ + Name: "opencensus.io/http/client/request_count", + Description: "Count of HTTP requests started", + Measure: ClientRequestCount, + Aggregation: view.Count(), + } + + // Deprecated: Use ClientSentBytesDistribution. + ClientRequestBytesView = &view.View{ + Name: "opencensus.io/http/client/request_bytes", + Description: "Size distribution of HTTP request body", + Measure: ClientSentBytes, + Aggregation: DefaultSizeDistribution, + } + + // Deprecated: Use ClientReceivedBytesDistribution instead. + ClientResponseBytesView = &view.View{ + Name: "opencensus.io/http/client/response_bytes", + Description: "Size distribution of HTTP response body", + Measure: ClientReceivedBytes, + Aggregation: DefaultSizeDistribution, + } + + // Deprecated: Use ClientRoundtripLatencyDistribution instead. + ClientLatencyView = &view.View{ + Name: "opencensus.io/http/client/latency", + Description: "Latency distribution of HTTP requests", + Measure: ClientRoundtripLatency, + Aggregation: DefaultLatencyDistribution, + } + + // Deprecated: Use ClientCompletedCount instead. + ClientRequestCountByMethod = &view.View{ + Name: "opencensus.io/http/client/request_count_by_method", + Description: "Client request count by HTTP method", + TagKeys: []tag.Key{Method}, + Measure: ClientSentBytes, + Aggregation: view.Count(), + } + + // Deprecated: Use ClientCompletedCount instead. + ClientResponseCountByStatusCode = &view.View{ + Name: "opencensus.io/http/client/response_count_by_status_code", + Description: "Client response count by status code", + TagKeys: []tag.Key{StatusCode}, + Measure: ClientRoundtripLatency, + Aggregation: view.Count(), + } +) + +// Package ochttp provides some convenience views for server measures. +// You still need to register these views for data to actually be collected. +var ( + ServerRequestCountView = &view.View{ + Name: "opencensus.io/http/server/request_count", + Description: "Count of HTTP requests started", + Measure: ServerRequestCount, + Aggregation: view.Count(), + } + + ServerRequestBytesView = &view.View{ + Name: "opencensus.io/http/server/request_bytes", + Description: "Size distribution of HTTP request body", + Measure: ServerRequestBytes, + Aggregation: DefaultSizeDistribution, + } + + ServerResponseBytesView = &view.View{ + Name: "opencensus.io/http/server/response_bytes", + Description: "Size distribution of HTTP response body", + Measure: ServerResponseBytes, + Aggregation: DefaultSizeDistribution, + } + + ServerLatencyView = &view.View{ + Name: "opencensus.io/http/server/latency", + Description: "Latency distribution of HTTP requests", + Measure: ServerLatency, + Aggregation: DefaultLatencyDistribution, + } + + ServerRequestCountByMethod = &view.View{ + Name: "opencensus.io/http/server/request_count_by_method", + Description: "Server request count by HTTP method", + TagKeys: []tag.Key{Method}, + Measure: ServerRequestCount, + Aggregation: view.Count(), + } + + ServerResponseCountByStatusCode = &view.View{ + Name: "opencensus.io/http/server/response_count_by_status_code", + Description: "Server response count by status code", + TagKeys: []tag.Key{StatusCode}, + Measure: ServerLatency, + Aggregation: view.Count(), + } +) + +// DefaultClientViews are the default client views provided by this package. +// Deprecated: No replacement. Register the views you would like individually. +var DefaultClientViews = []*view.View{ + ClientRequestCountView, + ClientRequestBytesView, + ClientResponseBytesView, + ClientLatencyView, + ClientRequestCountByMethod, + ClientResponseCountByStatusCode, +} + +// DefaultServerViews are the default server views provided by this package. +// Deprecated: No replacement. Register the views you would like individually. +var DefaultServerViews = []*view.View{ + ServerRequestCountView, + ServerRequestBytesView, + ServerResponseBytesView, + ServerLatencyView, + ServerRequestCountByMethod, + ServerResponseCountByStatusCode, +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/trace.go b/vendor/go.opencensus.io/plugin/ochttp/trace.go new file mode 100644 index 0000000000000..c23b97fb1fe22 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/trace.go @@ -0,0 +1,239 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "io" + "net/http" + "net/http/httptrace" + + "go.opencensus.io/plugin/ochttp/propagation/b3" + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" +) + +// TODO(jbd): Add godoc examples. + +var defaultFormat propagation.HTTPFormat = &b3.HTTPFormat{} + +// Attributes recorded on the span for the requests. +// Only trace exporters will need them. +const ( + HostAttribute = "http.host" + MethodAttribute = "http.method" + PathAttribute = "http.path" + URLAttribute = "http.url" + UserAgentAttribute = "http.user_agent" + StatusCodeAttribute = "http.status_code" +) + +type traceTransport struct { + base http.RoundTripper + startOptions trace.StartOptions + format propagation.HTTPFormat + formatSpanName func(*http.Request) string + newClientTrace func(*http.Request, *trace.Span) *httptrace.ClientTrace +} + +// TODO(jbd): Add message events for request and response size. + +// RoundTrip creates a trace.Span and inserts it into the outgoing request's headers. +// The created span can follow a parent span, if a parent is presented in +// the request's context. +func (t *traceTransport) RoundTrip(req *http.Request) (*http.Response, error) { + name := t.formatSpanName(req) + // TODO(jbd): Discuss whether we want to prefix + // outgoing requests with Sent. + ctx, span := trace.StartSpan(req.Context(), name, + trace.WithSampler(t.startOptions.Sampler), + trace.WithSpanKind(trace.SpanKindClient)) + + if t.newClientTrace != nil { + req = req.WithContext(httptrace.WithClientTrace(ctx, t.newClientTrace(req, span))) + } else { + req = req.WithContext(ctx) + } + + if t.format != nil { + // SpanContextToRequest will modify its Request argument, which is + // contrary to the contract for http.RoundTripper, so we need to + // pass it a copy of the Request. + // However, the Request struct itself was already copied by + // the WithContext calls above and so we just need to copy the header. + header := make(http.Header) + for k, v := range req.Header { + header[k] = v + } + req.Header = header + t.format.SpanContextToRequest(span.SpanContext(), req) + } + + span.AddAttributes(requestAttrs(req)...) + resp, err := t.base.RoundTrip(req) + if err != nil { + span.SetStatus(trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()}) + span.End() + return resp, err + } + + span.AddAttributes(responseAttrs(resp)...) + span.SetStatus(TraceStatus(resp.StatusCode, resp.Status)) + + // span.End() will be invoked after + // a read from resp.Body returns io.EOF or when + // resp.Body.Close() is invoked. + bt := &bodyTracker{rc: resp.Body, span: span} + resp.Body = wrappedBody(bt, resp.Body) + return resp, err +} + +// bodyTracker wraps a response.Body and invokes +// trace.EndSpan on encountering io.EOF on reading +// the body of the original response. +type bodyTracker struct { + rc io.ReadCloser + span *trace.Span +} + +var _ io.ReadCloser = (*bodyTracker)(nil) + +func (bt *bodyTracker) Read(b []byte) (int, error) { + n, err := bt.rc.Read(b) + + switch err { + case nil: + return n, nil + case io.EOF: + bt.span.End() + default: + // For all other errors, set the span status + bt.span.SetStatus(trace.Status{ + // Code 2 is the error code for Internal server error. + Code: 2, + Message: err.Error(), + }) + } + return n, err +} + +func (bt *bodyTracker) Close() error { + // Invoking endSpan on Close will help catch the cases + // in which a read returned a non-nil error, we set the + // span status but didn't end the span. + bt.span.End() + return bt.rc.Close() +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t *traceTransport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base.(canceler); ok { + cr.CancelRequest(req) + } +} + +func spanNameFromURL(req *http.Request) string { + return req.URL.Path +} + +func requestAttrs(r *http.Request) []trace.Attribute { + userAgent := r.UserAgent() + + attrs := make([]trace.Attribute, 0, 5) + attrs = append(attrs, + trace.StringAttribute(PathAttribute, r.URL.Path), + trace.StringAttribute(URLAttribute, r.URL.String()), + trace.StringAttribute(HostAttribute, r.Host), + trace.StringAttribute(MethodAttribute, r.Method), + ) + + if userAgent != "" { + attrs = append(attrs, trace.StringAttribute(UserAgentAttribute, userAgent)) + } + + return attrs +} + +func responseAttrs(resp *http.Response) []trace.Attribute { + return []trace.Attribute{ + trace.Int64Attribute(StatusCodeAttribute, int64(resp.StatusCode)), + } +} + +// TraceStatus is a utility to convert the HTTP status code to a trace.Status that +// represents the outcome as closely as possible. +func TraceStatus(httpStatusCode int, statusLine string) trace.Status { + var code int32 + if httpStatusCode < 200 || httpStatusCode >= 400 { + code = trace.StatusCodeUnknown + } + switch httpStatusCode { + case 499: + code = trace.StatusCodeCancelled + case http.StatusBadRequest: + code = trace.StatusCodeInvalidArgument + case http.StatusGatewayTimeout: + code = trace.StatusCodeDeadlineExceeded + case http.StatusNotFound: + code = trace.StatusCodeNotFound + case http.StatusForbidden: + code = trace.StatusCodePermissionDenied + case http.StatusUnauthorized: // 401 is actually unauthenticated. + code = trace.StatusCodeUnauthenticated + case http.StatusTooManyRequests: + code = trace.StatusCodeResourceExhausted + case http.StatusNotImplemented: + code = trace.StatusCodeUnimplemented + case http.StatusServiceUnavailable: + code = trace.StatusCodeUnavailable + case http.StatusOK: + code = trace.StatusCodeOK + } + return trace.Status{Code: code, Message: codeToStr[code]} +} + +var codeToStr = map[int32]string{ + trace.StatusCodeOK: `OK`, + trace.StatusCodeCancelled: `CANCELLED`, + trace.StatusCodeUnknown: `UNKNOWN`, + trace.StatusCodeInvalidArgument: `INVALID_ARGUMENT`, + trace.StatusCodeDeadlineExceeded: `DEADLINE_EXCEEDED`, + trace.StatusCodeNotFound: `NOT_FOUND`, + trace.StatusCodeAlreadyExists: `ALREADY_EXISTS`, + trace.StatusCodePermissionDenied: `PERMISSION_DENIED`, + trace.StatusCodeResourceExhausted: `RESOURCE_EXHAUSTED`, + trace.StatusCodeFailedPrecondition: `FAILED_PRECONDITION`, + trace.StatusCodeAborted: `ABORTED`, + trace.StatusCodeOutOfRange: `OUT_OF_RANGE`, + trace.StatusCodeUnimplemented: `UNIMPLEMENTED`, + trace.StatusCodeInternal: `INTERNAL`, + trace.StatusCodeUnavailable: `UNAVAILABLE`, + trace.StatusCodeDataLoss: `DATA_LOSS`, + trace.StatusCodeUnauthenticated: `UNAUTHENTICATED`, +} + +func isHealthEndpoint(path string) bool { + // Health checking is pretty frequent and + // traces collected for health endpoints + // can be extremely noisy and expensive. + // Disable canonical health checking endpoints + // like /healthz and /_ah/health for now. + if path == "/healthz" || path == "/_ah/health" { + return true + } + return false +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go b/vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go new file mode 100644 index 0000000000000..7d75cae2b1853 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go @@ -0,0 +1,44 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "io" +) + +// wrappedBody returns a wrapped version of the original +// Body and only implements the same combination of additional +// interfaces as the original. +func wrappedBody(wrapper io.ReadCloser, body io.ReadCloser) io.ReadCloser { + var ( + wr, i0 = body.(io.Writer) + ) + switch { + case !i0: + return struct { + io.ReadCloser + }{wrapper} + + case i0: + return struct { + io.ReadCloser + io.Writer + }{wrapper, wr} + default: + return struct { + io.ReadCloser + }{wrapper} + } +} diff --git a/vendor/go.opencensus.io/resource/BUILD.bazel b/vendor/go.opencensus.io/resource/BUILD.bazel new file mode 100644 index 0000000000000..1c76b00a080a1 --- /dev/null +++ b/vendor/go.opencensus.io/resource/BUILD.bazel @@ -0,0 +1,9 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["resource.go"], + importmap = "k8s.io/kops/vendor/go.opencensus.io/resource", + importpath = "go.opencensus.io/resource", + visibility = ["//visibility:public"], +) diff --git a/vendor/go.opencensus.io/resource/resource.go b/vendor/go.opencensus.io/resource/resource.go new file mode 100644 index 0000000000000..b1764e1d3b942 --- /dev/null +++ b/vendor/go.opencensus.io/resource/resource.go @@ -0,0 +1,164 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package resource provides functionality for resource, which capture +// identifying information about the entities for which signals are exported. +package resource + +import ( + "context" + "fmt" + "os" + "regexp" + "sort" + "strconv" + "strings" +) + +// Environment variables used by FromEnv to decode a resource. +const ( + EnvVarType = "OC_RESOURCE_TYPE" + EnvVarLabels = "OC_RESOURCE_LABELS" +) + +// Resource describes an entity about which identifying information and metadata is exposed. +// For example, a type "k8s.io/container" may hold labels describing the pod name and namespace. +type Resource struct { + Type string + Labels map[string]string +} + +// EncodeLabels encodes a labels map to a string as provided via the OC_RESOURCE_LABELS environment variable. +func EncodeLabels(labels map[string]string) string { + sortedKeys := make([]string, 0, len(labels)) + for k := range labels { + sortedKeys = append(sortedKeys, k) + } + sort.Strings(sortedKeys) + + s := "" + for i, k := range sortedKeys { + if i > 0 { + s += "," + } + s += k + "=" + strconv.Quote(labels[k]) + } + return s +} + +var labelRegex = regexp.MustCompile(`^\s*([[:ascii:]]{1,256}?)=("[[:ascii:]]{0,256}?")\s*,`) + +// DecodeLabels decodes a serialized label map as used in the OC_RESOURCE_LABELS variable. +// A list of labels of the form `="",="",...` is accepted. +// Domain names and paths are accepted as label keys. +// Most users will want to use FromEnv instead. +func DecodeLabels(s string) (map[string]string, error) { + m := map[string]string{} + // Ensure a trailing comma, which allows us to keep the regex simpler + s = strings.TrimRight(strings.TrimSpace(s), ",") + "," + + for len(s) > 0 { + match := labelRegex.FindStringSubmatch(s) + if len(match) == 0 { + return nil, fmt.Errorf("invalid label formatting, remainder: %s", s) + } + v := match[2] + if v == "" { + v = match[3] + } else { + var err error + if v, err = strconv.Unquote(v); err != nil { + return nil, fmt.Errorf("invalid label formatting, remainder: %s, err: %s", s, err) + } + } + m[match[1]] = v + + s = s[len(match[0]):] + } + return m, nil +} + +// FromEnv is a detector that loads resource information from the OC_RESOURCE_TYPE +// and OC_RESOURCE_labelS environment variables. +func FromEnv(context.Context) (*Resource, error) { + res := &Resource{ + Type: strings.TrimSpace(os.Getenv(EnvVarType)), + } + labels := strings.TrimSpace(os.Getenv(EnvVarLabels)) + if labels == "" { + return res, nil + } + var err error + if res.Labels, err = DecodeLabels(labels); err != nil { + return nil, err + } + return res, nil +} + +var _ Detector = FromEnv + +// merge resource information from b into a. In case of a collision, a takes precedence. +func merge(a, b *Resource) *Resource { + if a == nil { + return b + } + if b == nil { + return a + } + res := &Resource{ + Type: a.Type, + Labels: map[string]string{}, + } + if res.Type == "" { + res.Type = b.Type + } + for k, v := range b.Labels { + res.Labels[k] = v + } + // Labels from resource a overwrite labels from resource b. + for k, v := range a.Labels { + res.Labels[k] = v + } + return res +} + +// Detector attempts to detect resource information. +// If the detector cannot find resource information, the returned resource is nil but no +// error is returned. +// An error is only returned on unexpected failures. +type Detector func(context.Context) (*Resource, error) + +// MultiDetector returns a Detector that calls all input detectors in order and +// merges each result with the previous one. In case a type of label key is already set, +// the first set value is takes precedence. +// It returns on the first error that a sub-detector encounters. +func MultiDetector(detectors ...Detector) Detector { + return func(ctx context.Context) (*Resource, error) { + return detectAll(ctx, detectors...) + } +} + +// detectall calls all input detectors sequentially an merges each result with the previous one. +// It returns on the first error that a sub-detector encounters. +func detectAll(ctx context.Context, detectors ...Detector) (*Resource, error) { + var res *Resource + for _, d := range detectors { + r, err := d(ctx) + if err != nil { + return nil, err + } + res = merge(res, r) + } + return res, nil +} diff --git a/vendor/go.opencensus.io/stats/BUILD.bazel b/vendor/go.opencensus.io/stats/BUILD.bazel new file mode 100644 index 0000000000000..6417be0c691f3 --- /dev/null +++ b/vendor/go.opencensus.io/stats/BUILD.bazel @@ -0,0 +1,21 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "measure.go", + "measure_float64.go", + "measure_int64.go", + "record.go", + "units.go", + ], + importmap = "k8s.io/kops/vendor/go.opencensus.io/stats", + importpath = "go.opencensus.io/stats", + visibility = ["//visibility:public"], + deps = [ + "//vendor/go.opencensus.io/metric/metricdata:go_default_library", + "//vendor/go.opencensus.io/stats/internal:go_default_library", + "//vendor/go.opencensus.io/tag:go_default_library", + ], +) diff --git a/vendor/go.opencensus.io/stats/doc.go b/vendor/go.opencensus.io/stats/doc.go new file mode 100644 index 0000000000000..00d473ee02989 --- /dev/null +++ b/vendor/go.opencensus.io/stats/doc.go @@ -0,0 +1,69 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +/* +Package stats contains support for OpenCensus stats recording. + +OpenCensus allows users to create typed measures, record measurements, +aggregate the collected data, and export the aggregated data. + +Measures + +A measure represents a type of data point to be tracked and recorded. +For example, latency, request Mb/s, and response Mb/s are measures +to collect from a server. + +Measure constructors such as Int64 and Float64 automatically +register the measure by the given name. Each registered measure needs +to be unique by name. Measures also have a description and a unit. + +Libraries can define and export measures. Application authors can then +create views and collect and break down measures by the tags they are +interested in. + +Recording measurements + +Measurement is a data point to be collected for a measure. For example, +for a latency (ms) measure, 100 is a measurement that represents a 100ms +latency event. Measurements are created from measures with +the current context. Tags from the current context are recorded with the +measurements if they are any. + +Recorded measurements are dropped immediately if no views are registered for them. +There is usually no need to conditionally enable and disable +recording to reduce cost. Recording of measurements is cheap. + +Libraries can always record measurements, and applications can later decide +on which measurements they want to collect by registering views. This allows +libraries to turn on the instrumentation by default. + +Exemplars + +For a given recorded measurement, the associated exemplar is a diagnostic map +that gives more information about the measurement. + +When aggregated using a Distribution aggregation, an exemplar is kept for each +bucket in the Distribution. This allows you to easily find an example of a +measurement that fell into each bucket. + +For example, if you also use the OpenCensus trace package and you +record a measurement with a context that contains a sampled trace span, +then the trace span will be added to the exemplar associated with the measurement. + +When exported to a supporting back end, you should be able to easily navigate +to example traces that fell into each bucket in the Distribution. + +*/ +package stats // import "go.opencensus.io/stats" diff --git a/vendor/go.opencensus.io/stats/internal/BUILD.bazel b/vendor/go.opencensus.io/stats/internal/BUILD.bazel new file mode 100644 index 0000000000000..5f08d2a4b93c8 --- /dev/null +++ b/vendor/go.opencensus.io/stats/internal/BUILD.bazel @@ -0,0 +1,10 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["record.go"], + importmap = "k8s.io/kops/vendor/go.opencensus.io/stats/internal", + importpath = "go.opencensus.io/stats/internal", + visibility = ["//vendor/go.opencensus.io/stats:__subpackages__"], + deps = ["//vendor/go.opencensus.io/tag:go_default_library"], +) diff --git a/vendor/go.opencensus.io/stats/internal/record.go b/vendor/go.opencensus.io/stats/internal/record.go new file mode 100644 index 0000000000000..36935e629b66c --- /dev/null +++ b/vendor/go.opencensus.io/stats/internal/record.go @@ -0,0 +1,25 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "go.opencensus.io/tag" +) + +// DefaultRecorder will be called for each Record call. +var DefaultRecorder func(tags *tag.Map, measurement interface{}, attachments map[string]interface{}) + +// SubscriptionReporter reports when a view subscribed with a measure. +var SubscriptionReporter func(measure string) diff --git a/vendor/go.opencensus.io/stats/measure.go b/vendor/go.opencensus.io/stats/measure.go new file mode 100644 index 0000000000000..1ffd3cefc730c --- /dev/null +++ b/vendor/go.opencensus.io/stats/measure.go @@ -0,0 +1,109 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package stats + +import ( + "sync" + "sync/atomic" +) + +// Measure represents a single numeric value to be tracked and recorded. +// For example, latency, request bytes, and response bytes could be measures +// to collect from a server. +// +// Measures by themselves have no outside effects. In order to be exported, +// the measure needs to be used in a View. If no Views are defined over a +// measure, there is very little cost in recording it. +type Measure interface { + // Name returns the name of this measure. + // + // Measure names are globally unique (among all libraries linked into your program). + // We recommend prefixing the measure name with a domain name relevant to your + // project or application. + // + // Measure names are never sent over the wire or exported to backends. + // They are only used to create Views. + Name() string + + // Description returns the human-readable description of this measure. + Description() string + + // Unit returns the units for the values this measure takes on. + // + // Units are encoded according to the case-sensitive abbreviations from the + // Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html + Unit() string +} + +// measureDescriptor is the untyped descriptor associated with each measure. +// Int64Measure and Float64Measure wrap measureDescriptor to provide typed +// recording APIs. +// Two Measures with the same name will have the same measureDescriptor. +type measureDescriptor struct { + subs int32 // access atomically + + name string + description string + unit string +} + +func (m *measureDescriptor) subscribe() { + atomic.StoreInt32(&m.subs, 1) +} + +func (m *measureDescriptor) subscribed() bool { + return atomic.LoadInt32(&m.subs) == 1 +} + +var ( + mu sync.RWMutex + measures = make(map[string]*measureDescriptor) +) + +func registerMeasureHandle(name, desc, unit string) *measureDescriptor { + mu.Lock() + defer mu.Unlock() + + if stored, ok := measures[name]; ok { + return stored + } + m := &measureDescriptor{ + name: name, + description: desc, + unit: unit, + } + measures[name] = m + return m +} + +// Measurement is the numeric value measured when recording stats. Each measure +// provides methods to create measurements of their kind. For example, Int64Measure +// provides M to convert an int64 into a measurement. +type Measurement struct { + v float64 + m Measure + desc *measureDescriptor +} + +// Value returns the value of the Measurement as a float64. +func (m Measurement) Value() float64 { + return m.v +} + +// Measure returns the Measure from which this Measurement was created. +func (m Measurement) Measure() Measure { + return m.m +} diff --git a/vendor/go.opencensus.io/stats/measure_float64.go b/vendor/go.opencensus.io/stats/measure_float64.go new file mode 100644 index 0000000000000..f02c1eda845d2 --- /dev/null +++ b/vendor/go.opencensus.io/stats/measure_float64.go @@ -0,0 +1,55 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package stats + +// Float64Measure is a measure for float64 values. +type Float64Measure struct { + desc *measureDescriptor +} + +// M creates a new float64 measurement. +// Use Record to record measurements. +func (m *Float64Measure) M(v float64) Measurement { + return Measurement{ + m: m, + desc: m.desc, + v: v, + } +} + +// Float64 creates a new measure for float64 values. +// +// See the documentation for interface Measure for more guidance on the +// parameters of this function. +func Float64(name, description, unit string) *Float64Measure { + mi := registerMeasureHandle(name, description, unit) + return &Float64Measure{mi} +} + +// Name returns the name of the measure. +func (m *Float64Measure) Name() string { + return m.desc.name +} + +// Description returns the description of the measure. +func (m *Float64Measure) Description() string { + return m.desc.description +} + +// Unit returns the unit of the measure. +func (m *Float64Measure) Unit() string { + return m.desc.unit +} diff --git a/vendor/go.opencensus.io/stats/measure_int64.go b/vendor/go.opencensus.io/stats/measure_int64.go new file mode 100644 index 0000000000000..d101d79735813 --- /dev/null +++ b/vendor/go.opencensus.io/stats/measure_int64.go @@ -0,0 +1,55 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package stats + +// Int64Measure is a measure for int64 values. +type Int64Measure struct { + desc *measureDescriptor +} + +// M creates a new int64 measurement. +// Use Record to record measurements. +func (m *Int64Measure) M(v int64) Measurement { + return Measurement{ + m: m, + desc: m.desc, + v: float64(v), + } +} + +// Int64 creates a new measure for int64 values. +// +// See the documentation for interface Measure for more guidance on the +// parameters of this function. +func Int64(name, description, unit string) *Int64Measure { + mi := registerMeasureHandle(name, description, unit) + return &Int64Measure{mi} +} + +// Name returns the name of the measure. +func (m *Int64Measure) Name() string { + return m.desc.name +} + +// Description returns the description of the measure. +func (m *Int64Measure) Description() string { + return m.desc.description +} + +// Unit returns the unit of the measure. +func (m *Int64Measure) Unit() string { + return m.desc.unit +} diff --git a/vendor/go.opencensus.io/stats/record.go b/vendor/go.opencensus.io/stats/record.go new file mode 100644 index 0000000000000..ad4691184dfc5 --- /dev/null +++ b/vendor/go.opencensus.io/stats/record.go @@ -0,0 +1,117 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package stats + +import ( + "context" + + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/stats/internal" + "go.opencensus.io/tag" +) + +func init() { + internal.SubscriptionReporter = func(measure string) { + mu.Lock() + measures[measure].subscribe() + mu.Unlock() + } +} + +type recordOptions struct { + attachments metricdata.Attachments + mutators []tag.Mutator + measurements []Measurement +} + +// WithAttachments applies provided exemplar attachments. +func WithAttachments(attachments metricdata.Attachments) Options { + return func(ro *recordOptions) { + ro.attachments = attachments + } +} + +// WithTags applies provided tag mutators. +func WithTags(mutators ...tag.Mutator) Options { + return func(ro *recordOptions) { + ro.mutators = mutators + } +} + +// WithMeasurements applies provided measurements. +func WithMeasurements(measurements ...Measurement) Options { + return func(ro *recordOptions) { + ro.measurements = measurements + } +} + +// Options apply changes to recordOptions. +type Options func(*recordOptions) + +func createRecordOption(ros ...Options) *recordOptions { + o := &recordOptions{} + for _, ro := range ros { + ro(o) + } + return o +} + +// Record records one or multiple measurements with the same context at once. +// If there are any tags in the context, measurements will be tagged with them. +func Record(ctx context.Context, ms ...Measurement) { + RecordWithOptions(ctx, WithMeasurements(ms...)) +} + +// RecordWithTags records one or multiple measurements at once. +// +// Measurements will be tagged with the tags in the context mutated by the mutators. +// RecordWithTags is useful if you want to record with tag mutations but don't want +// to propagate the mutations in the context. +func RecordWithTags(ctx context.Context, mutators []tag.Mutator, ms ...Measurement) error { + return RecordWithOptions(ctx, WithTags(mutators...), WithMeasurements(ms...)) +} + +// RecordWithOptions records measurements from the given options (if any) against context +// and tags and attachments in the options (if any). +// If there are any tags in the context, measurements will be tagged with them. +func RecordWithOptions(ctx context.Context, ros ...Options) error { + o := createRecordOption(ros...) + if len(o.measurements) == 0 { + return nil + } + recorder := internal.DefaultRecorder + if recorder == nil { + return nil + } + record := false + for _, m := range o.measurements { + if m.desc.subscribed() { + record = true + break + } + } + if !record { + return nil + } + if len(o.mutators) > 0 { + var err error + if ctx, err = tag.New(ctx, o.mutators...); err != nil { + return err + } + } + recorder(tag.FromContext(ctx), o.measurements, o.attachments) + return nil +} diff --git a/vendor/go.opencensus.io/stats/units.go b/vendor/go.opencensus.io/stats/units.go new file mode 100644 index 0000000000000..6931a5f296616 --- /dev/null +++ b/vendor/go.opencensus.io/stats/units.go @@ -0,0 +1,25 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package stats + +// Units are encoded according to the case-sensitive abbreviations from the +// Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html +const ( + UnitNone = "1" // Deprecated: Use UnitDimensionless. + UnitDimensionless = "1" + UnitBytes = "By" + UnitMilliseconds = "ms" +) diff --git a/vendor/go.opencensus.io/stats/view/BUILD.bazel b/vendor/go.opencensus.io/stats/view/BUILD.bazel new file mode 100644 index 0000000000000..c409da2686dab --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/BUILD.bazel @@ -0,0 +1,27 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "aggregation.go", + "aggregation_data.go", + "collector.go", + "doc.go", + "export.go", + "view.go", + "view_to_metric.go", + "worker.go", + "worker_commands.go", + ], + importmap = "k8s.io/kops/vendor/go.opencensus.io/stats/view", + importpath = "go.opencensus.io/stats/view", + visibility = ["//visibility:public"], + deps = [ + "//vendor/go.opencensus.io/internal/tagencoding:go_default_library", + "//vendor/go.opencensus.io/metric/metricdata:go_default_library", + "//vendor/go.opencensus.io/metric/metricproducer:go_default_library", + "//vendor/go.opencensus.io/stats:go_default_library", + "//vendor/go.opencensus.io/stats/internal:go_default_library", + "//vendor/go.opencensus.io/tag:go_default_library", + ], +) diff --git a/vendor/go.opencensus.io/stats/view/aggregation.go b/vendor/go.opencensus.io/stats/view/aggregation.go new file mode 100644 index 0000000000000..b7f169b4a5f0d --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/aggregation.go @@ -0,0 +1,120 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +// AggType represents the type of aggregation function used on a View. +type AggType int + +// All available aggregation types. +const ( + AggTypeNone AggType = iota // no aggregation; reserved for future use. + AggTypeCount // the count aggregation, see Count. + AggTypeSum // the sum aggregation, see Sum. + AggTypeDistribution // the distribution aggregation, see Distribution. + AggTypeLastValue // the last value aggregation, see LastValue. +) + +func (t AggType) String() string { + return aggTypeName[t] +} + +var aggTypeName = map[AggType]string{ + AggTypeNone: "None", + AggTypeCount: "Count", + AggTypeSum: "Sum", + AggTypeDistribution: "Distribution", + AggTypeLastValue: "LastValue", +} + +// Aggregation represents a data aggregation method. Use one of the functions: +// Count, Sum, or Distribution to construct an Aggregation. +type Aggregation struct { + Type AggType // Type is the AggType of this Aggregation. + Buckets []float64 // Buckets are the bucket endpoints if this Aggregation represents a distribution, see Distribution. + + newData func() AggregationData +} + +var ( + aggCount = &Aggregation{ + Type: AggTypeCount, + newData: func() AggregationData { + return &CountData{} + }, + } + aggSum = &Aggregation{ + Type: AggTypeSum, + newData: func() AggregationData { + return &SumData{} + }, + } +) + +// Count indicates that data collected and aggregated +// with this method will be turned into a count value. +// For example, total number of accepted requests can be +// aggregated by using Count. +func Count() *Aggregation { + return aggCount +} + +// Sum indicates that data collected and aggregated +// with this method will be summed up. +// For example, accumulated request bytes can be aggregated by using +// Sum. +func Sum() *Aggregation { + return aggSum +} + +// Distribution indicates that the desired aggregation is +// a histogram distribution. +// +// An distribution aggregation may contain a histogram of the values in the +// population. The bucket boundaries for that histogram are described +// by the bounds. This defines len(bounds)+1 buckets. +// +// If len(bounds) >= 2 then the boundaries for bucket index i are: +// +// [-infinity, bounds[i]) for i = 0 +// [bounds[i-1], bounds[i]) for 0 < i < length +// [bounds[i-1], +infinity) for i = length +// +// If len(bounds) is 0 then there is no histogram associated with the +// distribution. There will be a single bucket with boundaries +// (-infinity, +infinity). +// +// If len(bounds) is 1 then there is no finite buckets, and that single +// element is the common boundary of the overflow and underflow buckets. +func Distribution(bounds ...float64) *Aggregation { + return &Aggregation{ + Type: AggTypeDistribution, + Buckets: bounds, + newData: func() AggregationData { + return newDistributionData(bounds) + }, + } +} + +// LastValue only reports the last value recorded using this +// aggregation. All other measurements will be dropped. +func LastValue() *Aggregation { + return &Aggregation{ + Type: AggTypeLastValue, + newData: func() AggregationData { + return &LastValueData{} + }, + } +} diff --git a/vendor/go.opencensus.io/stats/view/aggregation_data.go b/vendor/go.opencensus.io/stats/view/aggregation_data.go new file mode 100644 index 0000000000000..d500e67f7335e --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/aggregation_data.go @@ -0,0 +1,293 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +import ( + "math" + "time" + + "go.opencensus.io/metric/metricdata" +) + +// AggregationData represents an aggregated value from a collection. +// They are reported on the view data during exporting. +// Mosts users won't directly access aggregration data. +type AggregationData interface { + isAggregationData() bool + addSample(v float64, attachments map[string]interface{}, t time.Time) + clone() AggregationData + equal(other AggregationData) bool + toPoint(t metricdata.Type, time time.Time) metricdata.Point +} + +const epsilon = 1e-9 + +// CountData is the aggregated data for the Count aggregation. +// A count aggregation processes data and counts the recordings. +// +// Most users won't directly access count data. +type CountData struct { + Value int64 +} + +func (a *CountData) isAggregationData() bool { return true } + +func (a *CountData) addSample(_ float64, _ map[string]interface{}, _ time.Time) { + a.Value = a.Value + 1 +} + +func (a *CountData) clone() AggregationData { + return &CountData{Value: a.Value} +} + +func (a *CountData) equal(other AggregationData) bool { + a2, ok := other.(*CountData) + if !ok { + return false + } + + return a.Value == a2.Value +} + +func (a *CountData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { + switch metricType { + case metricdata.TypeCumulativeInt64: + return metricdata.NewInt64Point(t, a.Value) + default: + panic("unsupported metricdata.Type") + } +} + +// SumData is the aggregated data for the Sum aggregation. +// A sum aggregation processes data and sums up the recordings. +// +// Most users won't directly access sum data. +type SumData struct { + Value float64 +} + +func (a *SumData) isAggregationData() bool { return true } + +func (a *SumData) addSample(v float64, _ map[string]interface{}, _ time.Time) { + a.Value += v +} + +func (a *SumData) clone() AggregationData { + return &SumData{Value: a.Value} +} + +func (a *SumData) equal(other AggregationData) bool { + a2, ok := other.(*SumData) + if !ok { + return false + } + return math.Pow(a.Value-a2.Value, 2) < epsilon +} + +func (a *SumData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { + switch metricType { + case metricdata.TypeCumulativeInt64: + return metricdata.NewInt64Point(t, int64(a.Value)) + case metricdata.TypeCumulativeFloat64: + return metricdata.NewFloat64Point(t, a.Value) + default: + panic("unsupported metricdata.Type") + } +} + +// DistributionData is the aggregated data for the +// Distribution aggregation. +// +// Most users won't directly access distribution data. +// +// For a distribution with N bounds, the associated DistributionData will have +// N+1 buckets. +type DistributionData struct { + Count int64 // number of data points aggregated + Min float64 // minimum value in the distribution + Max float64 // max value in the distribution + Mean float64 // mean of the distribution + SumOfSquaredDev float64 // sum of the squared deviation from the mean + CountPerBucket []int64 // number of occurrences per bucket + // ExemplarsPerBucket is slice the same length as CountPerBucket containing + // an exemplar for the associated bucket, or nil. + ExemplarsPerBucket []*metricdata.Exemplar + bounds []float64 // histogram distribution of the values +} + +func newDistributionData(bounds []float64) *DistributionData { + bucketCount := len(bounds) + 1 + return &DistributionData{ + CountPerBucket: make([]int64, bucketCount), + ExemplarsPerBucket: make([]*metricdata.Exemplar, bucketCount), + bounds: bounds, + Min: math.MaxFloat64, + Max: math.SmallestNonzeroFloat64, + } +} + +// Sum returns the sum of all samples collected. +func (a *DistributionData) Sum() float64 { return a.Mean * float64(a.Count) } + +func (a *DistributionData) variance() float64 { + if a.Count <= 1 { + return 0 + } + return a.SumOfSquaredDev / float64(a.Count-1) +} + +func (a *DistributionData) isAggregationData() bool { return true } + +// TODO(songy23): support exemplar attachments. +func (a *DistributionData) addSample(v float64, attachments map[string]interface{}, t time.Time) { + if v < a.Min { + a.Min = v + } + if v > a.Max { + a.Max = v + } + a.Count++ + a.addToBucket(v, attachments, t) + + if a.Count == 1 { + a.Mean = v + return + } + + oldMean := a.Mean + a.Mean = a.Mean + (v-a.Mean)/float64(a.Count) + a.SumOfSquaredDev = a.SumOfSquaredDev + (v-oldMean)*(v-a.Mean) +} + +func (a *DistributionData) addToBucket(v float64, attachments map[string]interface{}, t time.Time) { + var count *int64 + var i int + var b float64 + for i, b = range a.bounds { + if v < b { + count = &a.CountPerBucket[i] + break + } + } + if count == nil { // Last bucket. + i = len(a.bounds) + count = &a.CountPerBucket[i] + } + *count++ + if exemplar := getExemplar(v, attachments, t); exemplar != nil { + a.ExemplarsPerBucket[i] = exemplar + } +} + +func getExemplar(v float64, attachments map[string]interface{}, t time.Time) *metricdata.Exemplar { + if len(attachments) == 0 { + return nil + } + return &metricdata.Exemplar{ + Value: v, + Timestamp: t, + Attachments: attachments, + } +} + +func (a *DistributionData) clone() AggregationData { + c := *a + c.CountPerBucket = append([]int64(nil), a.CountPerBucket...) + c.ExemplarsPerBucket = append([]*metricdata.Exemplar(nil), a.ExemplarsPerBucket...) + return &c +} + +func (a *DistributionData) equal(other AggregationData) bool { + a2, ok := other.(*DistributionData) + if !ok { + return false + } + if a2 == nil { + return false + } + if len(a.CountPerBucket) != len(a2.CountPerBucket) { + return false + } + for i := range a.CountPerBucket { + if a.CountPerBucket[i] != a2.CountPerBucket[i] { + return false + } + } + return a.Count == a2.Count && a.Min == a2.Min && a.Max == a2.Max && math.Pow(a.Mean-a2.Mean, 2) < epsilon && math.Pow(a.variance()-a2.variance(), 2) < epsilon +} + +func (a *DistributionData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { + switch metricType { + case metricdata.TypeCumulativeDistribution: + buckets := []metricdata.Bucket{} + for i := 0; i < len(a.CountPerBucket); i++ { + buckets = append(buckets, metricdata.Bucket{ + Count: a.CountPerBucket[i], + Exemplar: a.ExemplarsPerBucket[i], + }) + } + bucketOptions := &metricdata.BucketOptions{Bounds: a.bounds} + + val := &metricdata.Distribution{ + Count: a.Count, + Sum: a.Sum(), + SumOfSquaredDeviation: a.SumOfSquaredDev, + BucketOptions: bucketOptions, + Buckets: buckets, + } + return metricdata.NewDistributionPoint(t, val) + + default: + // TODO: [rghetia] when we have a use case for TypeGaugeDistribution. + panic("unsupported metricdata.Type") + } +} + +// LastValueData returns the last value recorded for LastValue aggregation. +type LastValueData struct { + Value float64 +} + +func (l *LastValueData) isAggregationData() bool { + return true +} + +func (l *LastValueData) addSample(v float64, _ map[string]interface{}, _ time.Time) { + l.Value = v +} + +func (l *LastValueData) clone() AggregationData { + return &LastValueData{l.Value} +} + +func (l *LastValueData) equal(other AggregationData) bool { + a2, ok := other.(*LastValueData) + if !ok { + return false + } + return l.Value == a2.Value +} + +func (l *LastValueData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { + switch metricType { + case metricdata.TypeGaugeInt64: + return metricdata.NewInt64Point(t, int64(l.Value)) + case metricdata.TypeGaugeFloat64: + return metricdata.NewFloat64Point(t, l.Value) + default: + panic("unsupported metricdata.Type") + } +} diff --git a/vendor/go.opencensus.io/stats/view/collector.go b/vendor/go.opencensus.io/stats/view/collector.go new file mode 100644 index 0000000000000..8a6a2c0fdc9fb --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/collector.go @@ -0,0 +1,86 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +import ( + "sort" + "time" + + "go.opencensus.io/internal/tagencoding" + "go.opencensus.io/tag" +) + +type collector struct { + // signatures holds the aggregations values for each unique tag signature + // (values for all keys) to its aggregator. + signatures map[string]AggregationData + // Aggregation is the description of the aggregation to perform for this + // view. + a *Aggregation +} + +func (c *collector) addSample(s string, v float64, attachments map[string]interface{}, t time.Time) { + aggregator, ok := c.signatures[s] + if !ok { + aggregator = c.a.newData() + c.signatures[s] = aggregator + } + aggregator.addSample(v, attachments, t) +} + +// collectRows returns a snapshot of the collected Row values. +func (c *collector) collectedRows(keys []tag.Key) []*Row { + rows := make([]*Row, 0, len(c.signatures)) + for sig, aggregator := range c.signatures { + tags := decodeTags([]byte(sig), keys) + row := &Row{Tags: tags, Data: aggregator.clone()} + rows = append(rows, row) + } + return rows +} + +func (c *collector) clearRows() { + c.signatures = make(map[string]AggregationData) +} + +// encodeWithKeys encodes the map by using values +// only associated with the keys provided. +func encodeWithKeys(m *tag.Map, keys []tag.Key) []byte { + vb := &tagencoding.Values{ + Buffer: make([]byte, len(keys)), + } + for _, k := range keys { + v, _ := m.Value(k) + vb.WriteValue([]byte(v)) + } + return vb.Bytes() +} + +// decodeTags decodes tags from the buffer and +// orders them by the keys. +func decodeTags(buf []byte, keys []tag.Key) []tag.Tag { + vb := &tagencoding.Values{Buffer: buf} + var tags []tag.Tag + for _, k := range keys { + v := vb.ReadValue() + if v != nil { + tags = append(tags, tag.Tag{Key: k, Value: string(v)}) + } + } + vb.ReadIndex = 0 + sort.Slice(tags, func(i, j int) bool { return tags[i].Key.Name() < tags[j].Key.Name() }) + return tags +} diff --git a/vendor/go.opencensus.io/stats/view/doc.go b/vendor/go.opencensus.io/stats/view/doc.go new file mode 100644 index 0000000000000..dced225c3dc47 --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/doc.go @@ -0,0 +1,47 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package view contains support for collecting and exposing aggregates over stats. +// +// In order to collect measurements, views need to be defined and registered. +// A view allows recorded measurements to be filtered and aggregated. +// +// All recorded measurements can be grouped by a list of tags. +// +// OpenCensus provides several aggregation methods: Count, Distribution and Sum. +// +// Count only counts the number of measurement points recorded. +// Distribution provides statistical summary of the aggregated data by counting +// how many recorded measurements fall into each bucket. +// Sum adds up the measurement values. +// LastValue just keeps track of the most recently recorded measurement value. +// All aggregations are cumulative. +// +// Views can be registerd and unregistered at any time during program execution. +// +// Libraries can define views but it is recommended that in most cases registering +// views be left up to applications. +// +// Exporting +// +// Collected and aggregated data can be exported to a metric collection +// backend by registering its exporter. +// +// Multiple exporters can be registered to upload the data to various +// different back ends. +package view // import "go.opencensus.io/stats/view" + +// TODO(acetechnologist): Add a link to the language independent OpenCensus +// spec when it is available. diff --git a/vendor/go.opencensus.io/stats/view/export.go b/vendor/go.opencensus.io/stats/view/export.go new file mode 100644 index 0000000000000..7cb59718f5fea --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/export.go @@ -0,0 +1,58 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package view + +import "sync" + +var ( + exportersMu sync.RWMutex // guards exporters + exporters = make(map[Exporter]struct{}) +) + +// Exporter exports the collected records as view data. +// +// The ExportView method should return quickly; if an +// Exporter takes a significant amount of time to +// process a Data, that work should be done on another goroutine. +// +// It is safe to assume that ExportView will not be called concurrently from +// multiple goroutines. +// +// The Data should not be modified. +type Exporter interface { + ExportView(viewData *Data) +} + +// RegisterExporter registers an exporter. +// Collected data will be reported via all the +// registered exporters. Once you no longer +// want data to be exported, invoke UnregisterExporter +// with the previously registered exporter. +// +// Binaries can register exporters, libraries shouldn't register exporters. +func RegisterExporter(e Exporter) { + exportersMu.Lock() + defer exportersMu.Unlock() + + exporters[e] = struct{}{} +} + +// UnregisterExporter unregisters an exporter. +func UnregisterExporter(e Exporter) { + exportersMu.Lock() + defer exportersMu.Unlock() + + delete(exporters, e) +} diff --git a/vendor/go.opencensus.io/stats/view/view.go b/vendor/go.opencensus.io/stats/view/view.go new file mode 100644 index 0000000000000..37f88e1d9fa1d --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/view.go @@ -0,0 +1,221 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "sort" + "sync/atomic" + "time" + + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/stats" + "go.opencensus.io/tag" +) + +// View allows users to aggregate the recorded stats.Measurements. +// Views need to be passed to the Register function to be before data will be +// collected and sent to Exporters. +type View struct { + Name string // Name of View. Must be unique. If unset, will default to the name of the Measure. + Description string // Description is a human-readable description for this view. + + // TagKeys are the tag keys describing the grouping of this view. + // A single Row will be produced for each combination of associated tag values. + TagKeys []tag.Key + + // Measure is a stats.Measure to aggregate in this view. + Measure stats.Measure + + // Aggregation is the aggregation function tp apply to the set of Measurements. + Aggregation *Aggregation +} + +// WithName returns a copy of the View with a new name. This is useful for +// renaming views to cope with limitations placed on metric names by various +// backends. +func (v *View) WithName(name string) *View { + vNew := *v + vNew.Name = name + return &vNew +} + +// same compares two views and returns true if they represent the same aggregation. +func (v *View) same(other *View) bool { + if v == other { + return true + } + if v == nil { + return false + } + return reflect.DeepEqual(v.Aggregation, other.Aggregation) && + v.Measure.Name() == other.Measure.Name() +} + +// ErrNegativeBucketBounds error returned if histogram contains negative bounds. +// +// Deprecated: this should not be public. +var ErrNegativeBucketBounds = errors.New("negative bucket bounds not supported") + +// canonicalize canonicalizes v by setting explicit +// defaults for Name and Description and sorting the TagKeys +func (v *View) canonicalize() error { + if v.Measure == nil { + return fmt.Errorf("cannot register view %q: measure not set", v.Name) + } + if v.Aggregation == nil { + return fmt.Errorf("cannot register view %q: aggregation not set", v.Name) + } + if v.Name == "" { + v.Name = v.Measure.Name() + } + if v.Description == "" { + v.Description = v.Measure.Description() + } + if err := checkViewName(v.Name); err != nil { + return err + } + sort.Slice(v.TagKeys, func(i, j int) bool { + return v.TagKeys[i].Name() < v.TagKeys[j].Name() + }) + sort.Float64s(v.Aggregation.Buckets) + for _, b := range v.Aggregation.Buckets { + if b < 0 { + return ErrNegativeBucketBounds + } + } + // drop 0 bucket silently. + v.Aggregation.Buckets = dropZeroBounds(v.Aggregation.Buckets...) + + return nil +} + +func dropZeroBounds(bounds ...float64) []float64 { + for i, bound := range bounds { + if bound > 0 { + return bounds[i:] + } + } + return []float64{} +} + +// viewInternal is the internal representation of a View. +type viewInternal struct { + view *View // view is the canonicalized View definition associated with this view. + subscribed uint32 // 1 if someone is subscribed and data need to be exported, use atomic to access + collector *collector + metricDescriptor *metricdata.Descriptor +} + +func newViewInternal(v *View) (*viewInternal, error) { + return &viewInternal{ + view: v, + collector: &collector{make(map[string]AggregationData), v.Aggregation}, + metricDescriptor: viewToMetricDescriptor(v), + }, nil +} + +func (v *viewInternal) subscribe() { + atomic.StoreUint32(&v.subscribed, 1) +} + +func (v *viewInternal) unsubscribe() { + atomic.StoreUint32(&v.subscribed, 0) +} + +// isSubscribed returns true if the view is exporting +// data by subscription. +func (v *viewInternal) isSubscribed() bool { + return atomic.LoadUint32(&v.subscribed) == 1 +} + +func (v *viewInternal) clearRows() { + v.collector.clearRows() +} + +func (v *viewInternal) collectedRows() []*Row { + return v.collector.collectedRows(v.view.TagKeys) +} + +func (v *viewInternal) addSample(m *tag.Map, val float64, attachments map[string]interface{}, t time.Time) { + if !v.isSubscribed() { + return + } + sig := string(encodeWithKeys(m, v.view.TagKeys)) + v.collector.addSample(sig, val, attachments, t) +} + +// A Data is a set of rows about usage of the single measure associated +// with the given view. Each row is specific to a unique set of tags. +type Data struct { + View *View + Start, End time.Time + Rows []*Row +} + +// Row is the collected value for a specific set of key value pairs a.k.a tags. +type Row struct { + Tags []tag.Tag + Data AggregationData +} + +func (r *Row) String() string { + var buffer bytes.Buffer + buffer.WriteString("{ ") + buffer.WriteString("{ ") + for _, t := range r.Tags { + buffer.WriteString(fmt.Sprintf("{%v %v}", t.Key.Name(), t.Value)) + } + buffer.WriteString(" }") + buffer.WriteString(fmt.Sprintf("%v", r.Data)) + buffer.WriteString(" }") + return buffer.String() +} + +// Equal returns true if both rows are equal. Tags are expected to be ordered +// by the key name. Even both rows have the same tags but the tags appear in +// different orders it will return false. +func (r *Row) Equal(other *Row) bool { + if r == other { + return true + } + return reflect.DeepEqual(r.Tags, other.Tags) && r.Data.equal(other.Data) +} + +const maxNameLength = 255 + +// Returns true if the given string contains only printable characters. +func isPrintable(str string) bool { + for _, r := range str { + if !(r >= ' ' && r <= '~') { + return false + } + } + return true +} + +func checkViewName(name string) error { + if len(name) > maxNameLength { + return fmt.Errorf("view name cannot be larger than %v", maxNameLength) + } + if !isPrintable(name) { + return fmt.Errorf("view name needs to be an ASCII string") + } + return nil +} diff --git a/vendor/go.opencensus.io/stats/view/view_to_metric.go b/vendor/go.opencensus.io/stats/view/view_to_metric.go new file mode 100644 index 0000000000000..010f81bab582a --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/view_to_metric.go @@ -0,0 +1,140 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +import ( + "time" + + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/stats" +) + +func getUnit(unit string) metricdata.Unit { + switch unit { + case "1": + return metricdata.UnitDimensionless + case "ms": + return metricdata.UnitMilliseconds + case "By": + return metricdata.UnitBytes + } + return metricdata.UnitDimensionless +} + +func getType(v *View) metricdata.Type { + m := v.Measure + agg := v.Aggregation + + switch agg.Type { + case AggTypeSum: + switch m.(type) { + case *stats.Int64Measure: + return metricdata.TypeCumulativeInt64 + case *stats.Float64Measure: + return metricdata.TypeCumulativeFloat64 + default: + panic("unexpected measure type") + } + case AggTypeDistribution: + return metricdata.TypeCumulativeDistribution + case AggTypeLastValue: + switch m.(type) { + case *stats.Int64Measure: + return metricdata.TypeGaugeInt64 + case *stats.Float64Measure: + return metricdata.TypeGaugeFloat64 + default: + panic("unexpected measure type") + } + case AggTypeCount: + switch m.(type) { + case *stats.Int64Measure: + return metricdata.TypeCumulativeInt64 + case *stats.Float64Measure: + return metricdata.TypeCumulativeInt64 + default: + panic("unexpected measure type") + } + default: + panic("unexpected aggregation type") + } +} + +func getLableKeys(v *View) []metricdata.LabelKey { + labelKeys := []metricdata.LabelKey{} + for _, k := range v.TagKeys { + labelKeys = append(labelKeys, metricdata.LabelKey{Key: k.Name()}) + } + return labelKeys +} + +func viewToMetricDescriptor(v *View) *metricdata.Descriptor { + return &metricdata.Descriptor{ + Name: v.Name, + Description: v.Description, + Unit: getUnit(v.Measure.Unit()), + Type: getType(v), + LabelKeys: getLableKeys(v), + } +} + +func toLabelValues(row *Row, expectedKeys []metricdata.LabelKey) []metricdata.LabelValue { + labelValues := []metricdata.LabelValue{} + tagMap := make(map[string]string) + for _, tag := range row.Tags { + tagMap[tag.Key.Name()] = tag.Value + } + + for _, key := range expectedKeys { + if val, ok := tagMap[key.Key]; ok { + labelValues = append(labelValues, metricdata.NewLabelValue(val)) + } else { + labelValues = append(labelValues, metricdata.LabelValue{}) + } + } + return labelValues +} + +func rowToTimeseries(v *viewInternal, row *Row, now time.Time, startTime time.Time) *metricdata.TimeSeries { + return &metricdata.TimeSeries{ + Points: []metricdata.Point{row.Data.toPoint(v.metricDescriptor.Type, now)}, + LabelValues: toLabelValues(row, v.metricDescriptor.LabelKeys), + StartTime: startTime, + } +} + +func viewToMetric(v *viewInternal, now time.Time, startTime time.Time) *metricdata.Metric { + if v.metricDescriptor.Type == metricdata.TypeGaugeInt64 || + v.metricDescriptor.Type == metricdata.TypeGaugeFloat64 { + startTime = time.Time{} + } + + rows := v.collectedRows() + if len(rows) == 0 { + return nil + } + + ts := []*metricdata.TimeSeries{} + for _, row := range rows { + ts = append(ts, rowToTimeseries(v, row, now, startTime)) + } + + m := &metricdata.Metric{ + Descriptor: *v.metricDescriptor, + TimeSeries: ts, + } + return m +} diff --git a/vendor/go.opencensus.io/stats/view/worker.go b/vendor/go.opencensus.io/stats/view/worker.go new file mode 100644 index 0000000000000..2f3c018af0e18 --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/worker.go @@ -0,0 +1,281 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +import ( + "fmt" + "sync" + "time" + + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/metric/metricproducer" + "go.opencensus.io/stats" + "go.opencensus.io/stats/internal" + "go.opencensus.io/tag" +) + +func init() { + defaultWorker = newWorker() + go defaultWorker.start() + internal.DefaultRecorder = record +} + +type measureRef struct { + measure string + views map[*viewInternal]struct{} +} + +type worker struct { + measures map[string]*measureRef + views map[string]*viewInternal + startTimes map[*viewInternal]time.Time + + timer *time.Ticker + c chan command + quit, done chan bool + mu sync.RWMutex +} + +var defaultWorker *worker + +var defaultReportingDuration = 10 * time.Second + +// Find returns a registered view associated with this name. +// If no registered view is found, nil is returned. +func Find(name string) (v *View) { + req := &getViewByNameReq{ + name: name, + c: make(chan *getViewByNameResp), + } + defaultWorker.c <- req + resp := <-req.c + return resp.v +} + +// Register begins collecting data for the given views. +// Once a view is registered, it reports data to the registered exporters. +func Register(views ...*View) error { + req := ®isterViewReq{ + views: views, + err: make(chan error), + } + defaultWorker.c <- req + return <-req.err +} + +// Unregister the given views. Data will not longer be exported for these views +// after Unregister returns. +// It is not necessary to unregister from views you expect to collect for the +// duration of your program execution. +func Unregister(views ...*View) { + names := make([]string, len(views)) + for i := range views { + names[i] = views[i].Name + } + req := &unregisterFromViewReq{ + views: names, + done: make(chan struct{}), + } + defaultWorker.c <- req + <-req.done +} + +// RetrieveData gets a snapshot of the data collected for the the view registered +// with the given name. It is intended for testing only. +func RetrieveData(viewName string) ([]*Row, error) { + req := &retrieveDataReq{ + now: time.Now(), + v: viewName, + c: make(chan *retrieveDataResp), + } + defaultWorker.c <- req + resp := <-req.c + return resp.rows, resp.err +} + +func record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) { + req := &recordReq{ + tm: tags, + ms: ms.([]stats.Measurement), + attachments: attachments, + t: time.Now(), + } + defaultWorker.c <- req +} + +// SetReportingPeriod sets the interval between reporting aggregated views in +// the program. If duration is less than or equal to zero, it enables the +// default behavior. +// +// Note: each exporter makes different promises about what the lowest supported +// duration is. For example, the Stackdriver exporter recommends a value no +// lower than 1 minute. Consult each exporter per your needs. +func SetReportingPeriod(d time.Duration) { + // TODO(acetechnologist): ensure that the duration d is more than a certain + // value. e.g. 1s + req := &setReportingPeriodReq{ + d: d, + c: make(chan bool), + } + defaultWorker.c <- req + <-req.c // don't return until the timer is set to the new duration. +} + +func newWorker() *worker { + return &worker{ + measures: make(map[string]*measureRef), + views: make(map[string]*viewInternal), + startTimes: make(map[*viewInternal]time.Time), + timer: time.NewTicker(defaultReportingDuration), + c: make(chan command, 1024), + quit: make(chan bool), + done: make(chan bool), + } +} + +func (w *worker) start() { + prodMgr := metricproducer.GlobalManager() + prodMgr.AddProducer(w) + + for { + select { + case cmd := <-w.c: + cmd.handleCommand(w) + case <-w.timer.C: + w.reportUsage(time.Now()) + case <-w.quit: + w.timer.Stop() + close(w.c) + w.done <- true + return + } + } +} + +func (w *worker) stop() { + prodMgr := metricproducer.GlobalManager() + prodMgr.DeleteProducer(w) + + w.quit <- true + <-w.done +} + +func (w *worker) getMeasureRef(name string) *measureRef { + if mr, ok := w.measures[name]; ok { + return mr + } + mr := &measureRef{ + measure: name, + views: make(map[*viewInternal]struct{}), + } + w.measures[name] = mr + return mr +} + +func (w *worker) tryRegisterView(v *View) (*viewInternal, error) { + w.mu.Lock() + defer w.mu.Unlock() + vi, err := newViewInternal(v) + if err != nil { + return nil, err + } + if x, ok := w.views[vi.view.Name]; ok { + if !x.view.same(vi.view) { + return nil, fmt.Errorf("cannot register view %q; a different view with the same name is already registered", v.Name) + } + + // the view is already registered so there is nothing to do and the + // command is considered successful. + return x, nil + } + w.views[vi.view.Name] = vi + ref := w.getMeasureRef(vi.view.Measure.Name()) + ref.views[vi] = struct{}{} + return vi, nil +} + +func (w *worker) unregisterView(viewName string) { + w.mu.Lock() + defer w.mu.Unlock() + delete(w.views, viewName) +} + +func (w *worker) reportView(v *viewInternal, now time.Time) { + if !v.isSubscribed() { + return + } + rows := v.collectedRows() + _, ok := w.startTimes[v] + if !ok { + w.startTimes[v] = now + } + viewData := &Data{ + View: v.view, + Start: w.startTimes[v], + End: time.Now(), + Rows: rows, + } + exportersMu.Lock() + for e := range exporters { + e.ExportView(viewData) + } + exportersMu.Unlock() +} + +func (w *worker) reportUsage(now time.Time) { + w.mu.Lock() + defer w.mu.Unlock() + for _, v := range w.views { + w.reportView(v, now) + } +} + +func (w *worker) toMetric(v *viewInternal, now time.Time) *metricdata.Metric { + if !v.isSubscribed() { + return nil + } + + _, ok := w.startTimes[v] + if !ok { + w.startTimes[v] = now + } + + var startTime time.Time + if v.metricDescriptor.Type == metricdata.TypeGaugeInt64 || + v.metricDescriptor.Type == metricdata.TypeGaugeFloat64 { + startTime = time.Time{} + } else { + startTime = w.startTimes[v] + } + + return viewToMetric(v, now, startTime) +} + +// Read reads all view data and returns them as metrics. +// It is typically invoked by metric reader to export stats in metric format. +func (w *worker) Read() []*metricdata.Metric { + w.mu.Lock() + defer w.mu.Unlock() + now := time.Now() + metrics := make([]*metricdata.Metric, 0, len(w.views)) + for _, v := range w.views { + metric := w.toMetric(v, now) + if metric != nil { + metrics = append(metrics, metric) + } + } + return metrics +} diff --git a/vendor/go.opencensus.io/stats/view/worker_commands.go b/vendor/go.opencensus.io/stats/view/worker_commands.go new file mode 100644 index 0000000000000..0267e179aed4e --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/worker_commands.go @@ -0,0 +1,186 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +import ( + "errors" + "fmt" + "strings" + "time" + + "go.opencensus.io/stats" + "go.opencensus.io/stats/internal" + "go.opencensus.io/tag" +) + +type command interface { + handleCommand(w *worker) +} + +// getViewByNameReq is the command to get a view given its name. +type getViewByNameReq struct { + name string + c chan *getViewByNameResp +} + +type getViewByNameResp struct { + v *View +} + +func (cmd *getViewByNameReq) handleCommand(w *worker) { + v := w.views[cmd.name] + if v == nil { + cmd.c <- &getViewByNameResp{nil} + return + } + cmd.c <- &getViewByNameResp{v.view} +} + +// registerViewReq is the command to register a view. +type registerViewReq struct { + views []*View + err chan error +} + +func (cmd *registerViewReq) handleCommand(w *worker) { + for _, v := range cmd.views { + if err := v.canonicalize(); err != nil { + cmd.err <- err + return + } + } + var errstr []string + for _, view := range cmd.views { + vi, err := w.tryRegisterView(view) + if err != nil { + errstr = append(errstr, fmt.Sprintf("%s: %v", view.Name, err)) + continue + } + internal.SubscriptionReporter(view.Measure.Name()) + vi.subscribe() + } + if len(errstr) > 0 { + cmd.err <- errors.New(strings.Join(errstr, "\n")) + } else { + cmd.err <- nil + } +} + +// unregisterFromViewReq is the command to unregister to a view. Has no +// impact on the data collection for client that are pulling data from the +// library. +type unregisterFromViewReq struct { + views []string + done chan struct{} +} + +func (cmd *unregisterFromViewReq) handleCommand(w *worker) { + for _, name := range cmd.views { + vi, ok := w.views[name] + if !ok { + continue + } + + // Report pending data for this view before removing it. + w.reportView(vi, time.Now()) + + vi.unsubscribe() + if !vi.isSubscribed() { + // this was the last subscription and view is not collecting anymore. + // The collected data can be cleared. + vi.clearRows() + } + w.unregisterView(name) + } + cmd.done <- struct{}{} +} + +// retrieveDataReq is the command to retrieve data for a view. +type retrieveDataReq struct { + now time.Time + v string + c chan *retrieveDataResp +} + +type retrieveDataResp struct { + rows []*Row + err error +} + +func (cmd *retrieveDataReq) handleCommand(w *worker) { + w.mu.Lock() + defer w.mu.Unlock() + vi, ok := w.views[cmd.v] + if !ok { + cmd.c <- &retrieveDataResp{ + nil, + fmt.Errorf("cannot retrieve data; view %q is not registered", cmd.v), + } + return + } + + if !vi.isSubscribed() { + cmd.c <- &retrieveDataResp{ + nil, + fmt.Errorf("cannot retrieve data; view %q has no subscriptions or collection is not forcibly started", cmd.v), + } + return + } + cmd.c <- &retrieveDataResp{ + vi.collectedRows(), + nil, + } +} + +// recordReq is the command to record data related to multiple measures +// at once. +type recordReq struct { + tm *tag.Map + ms []stats.Measurement + attachments map[string]interface{} + t time.Time +} + +func (cmd *recordReq) handleCommand(w *worker) { + w.mu.Lock() + defer w.mu.Unlock() + for _, m := range cmd.ms { + if (m == stats.Measurement{}) { // not registered + continue + } + ref := w.getMeasureRef(m.Measure().Name()) + for v := range ref.views { + v.addSample(cmd.tm, m.Value(), cmd.attachments, time.Now()) + } + } +} + +// setReportingPeriodReq is the command to modify the duration between +// reporting the collected data to the registered clients. +type setReportingPeriodReq struct { + d time.Duration + c chan bool +} + +func (cmd *setReportingPeriodReq) handleCommand(w *worker) { + w.timer.Stop() + if cmd.d <= 0 { + w.timer = time.NewTicker(defaultReportingDuration) + } else { + w.timer = time.NewTicker(cmd.d) + } + cmd.c <- true +} diff --git a/vendor/go.opencensus.io/tag/BUILD.bazel b/vendor/go.opencensus.io/tag/BUILD.bazel new file mode 100644 index 0000000000000..90e461aedca30 --- /dev/null +++ b/vendor/go.opencensus.io/tag/BUILD.bazel @@ -0,0 +1,19 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "context.go", + "doc.go", + "key.go", + "map.go", + "map_codec.go", + "metadata.go", + "profile_19.go", + "profile_not19.go", + "validate.go", + ], + importmap = "k8s.io/kops/vendor/go.opencensus.io/tag", + importpath = "go.opencensus.io/tag", + visibility = ["//visibility:public"], +) diff --git a/vendor/go.opencensus.io/tag/context.go b/vendor/go.opencensus.io/tag/context.go new file mode 100644 index 0000000000000..b27d1b26b1323 --- /dev/null +++ b/vendor/go.opencensus.io/tag/context.go @@ -0,0 +1,43 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package tag + +import ( + "context" +) + +// FromContext returns the tag map stored in the context. +func FromContext(ctx context.Context) *Map { + // The returned tag map shouldn't be mutated. + ts := ctx.Value(mapCtxKey) + if ts == nil { + return nil + } + return ts.(*Map) +} + +// NewContext creates a new context with the given tag map. +// To propagate a tag map to downstream methods and downstream RPCs, add a tag map +// to the current context. NewContext will return a copy of the current context, +// and put the tag map into the returned one. +// If there is already a tag map in the current context, it will be replaced with m. +func NewContext(ctx context.Context, m *Map) context.Context { + return context.WithValue(ctx, mapCtxKey, m) +} + +type ctxKey struct{} + +var mapCtxKey = ctxKey{} diff --git a/vendor/go.opencensus.io/tag/doc.go b/vendor/go.opencensus.io/tag/doc.go new file mode 100644 index 0000000000000..da16b74e4debb --- /dev/null +++ b/vendor/go.opencensus.io/tag/doc.go @@ -0,0 +1,26 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +/* +Package tag contains OpenCensus tags. + +Tags are key-value pairs. Tags provide additional cardinality to +the OpenCensus instrumentation data. + +Tags can be propagated on the wire and in the same +process via context.Context. Encode and Decode should be +used to represent tags into their binary propagation form. +*/ +package tag // import "go.opencensus.io/tag" diff --git a/vendor/go.opencensus.io/tag/key.go b/vendor/go.opencensus.io/tag/key.go new file mode 100644 index 0000000000000..ebbed95000625 --- /dev/null +++ b/vendor/go.opencensus.io/tag/key.go @@ -0,0 +1,35 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package tag + +// Key represents a tag key. +type Key struct { + name string +} + +// NewKey creates or retrieves a string key identified by name. +// Calling NewKey consequently with the same name returns the same key. +func NewKey(name string) (Key, error) { + if !checkKeyName(name) { + return Key{}, errInvalidKeyName + } + return Key{name: name}, nil +} + +// Name returns the name of the key. +func (k Key) Name() string { + return k.name +} diff --git a/vendor/go.opencensus.io/tag/map.go b/vendor/go.opencensus.io/tag/map.go new file mode 100644 index 0000000000000..0272ef85a4ccf --- /dev/null +++ b/vendor/go.opencensus.io/tag/map.go @@ -0,0 +1,229 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package tag + +import ( + "bytes" + "context" + "fmt" + "sort" +) + +// Tag is a key value pair that can be propagated on wire. +type Tag struct { + Key Key + Value string +} + +type tagContent struct { + value string + m metadatas +} + +// Map is a map of tags. Use New to create a context containing +// a new Map. +type Map struct { + m map[Key]tagContent +} + +// Value returns the value for the key if a value for the key exists. +func (m *Map) Value(k Key) (string, bool) { + if m == nil { + return "", false + } + v, ok := m.m[k] + return v.value, ok +} + +func (m *Map) String() string { + if m == nil { + return "nil" + } + keys := make([]Key, 0, len(m.m)) + for k := range m.m { + keys = append(keys, k) + } + sort.Slice(keys, func(i, j int) bool { return keys[i].Name() < keys[j].Name() }) + + var buffer bytes.Buffer + buffer.WriteString("{ ") + for _, k := range keys { + buffer.WriteString(fmt.Sprintf("{%v %v}", k.name, m.m[k])) + } + buffer.WriteString(" }") + return buffer.String() +} + +func (m *Map) insert(k Key, v string, md metadatas) { + if _, ok := m.m[k]; ok { + return + } + m.m[k] = tagContent{value: v, m: md} +} + +func (m *Map) update(k Key, v string, md metadatas) { + if _, ok := m.m[k]; ok { + m.m[k] = tagContent{value: v, m: md} + } +} + +func (m *Map) upsert(k Key, v string, md metadatas) { + m.m[k] = tagContent{value: v, m: md} +} + +func (m *Map) delete(k Key) { + delete(m.m, k) +} + +func newMap() *Map { + return &Map{m: make(map[Key]tagContent)} +} + +// Mutator modifies a tag map. +type Mutator interface { + Mutate(t *Map) (*Map, error) +} + +// Insert returns a mutator that inserts a +// value associated with k. If k already exists in the tag map, +// mutator doesn't update the value. +// Metadata applies metadata to the tag. It is optional. +// Metadatas are applied in the order in which it is provided. +// If more than one metadata updates the same attribute then +// the update from the last metadata prevails. +func Insert(k Key, v string, mds ...Metadata) Mutator { + return &mutator{ + fn: func(m *Map) (*Map, error) { + if !checkValue(v) { + return nil, errInvalidValue + } + m.insert(k, v, createMetadatas(mds...)) + return m, nil + }, + } +} + +// Update returns a mutator that updates the +// value of the tag associated with k with v. If k doesn't +// exists in the tag map, the mutator doesn't insert the value. +// Metadata applies metadata to the tag. It is optional. +// Metadatas are applied in the order in which it is provided. +// If more than one metadata updates the same attribute then +// the update from the last metadata prevails. +func Update(k Key, v string, mds ...Metadata) Mutator { + return &mutator{ + fn: func(m *Map) (*Map, error) { + if !checkValue(v) { + return nil, errInvalidValue + } + m.update(k, v, createMetadatas(mds...)) + return m, nil + }, + } +} + +// Upsert returns a mutator that upserts the +// value of the tag associated with k with v. It inserts the +// value if k doesn't exist already. It mutates the value +// if k already exists. +// Metadata applies metadata to the tag. It is optional. +// Metadatas are applied in the order in which it is provided. +// If more than one metadata updates the same attribute then +// the update from the last metadata prevails. +func Upsert(k Key, v string, mds ...Metadata) Mutator { + return &mutator{ + fn: func(m *Map) (*Map, error) { + if !checkValue(v) { + return nil, errInvalidValue + } + m.upsert(k, v, createMetadatas(mds...)) + return m, nil + }, + } +} + +func createMetadatas(mds ...Metadata) metadatas { + var metas metadatas + if len(mds) > 0 { + for _, md := range mds { + if md != nil { + md(&metas) + } + } + } else { + WithTTL(TTLUnlimitedPropagation)(&metas) + } + return metas + +} + +// Delete returns a mutator that deletes +// the value associated with k. +func Delete(k Key) Mutator { + return &mutator{ + fn: func(m *Map) (*Map, error) { + m.delete(k) + return m, nil + }, + } +} + +// New returns a new context that contains a tag map +// originated from the incoming context and modified +// with the provided mutators. +func New(ctx context.Context, mutator ...Mutator) (context.Context, error) { + m := newMap() + orig := FromContext(ctx) + if orig != nil { + for k, v := range orig.m { + if !checkKeyName(k.Name()) { + return ctx, fmt.Errorf("key:%q: %v", k, errInvalidKeyName) + } + if !checkValue(v.value) { + return ctx, fmt.Errorf("key:%q value:%q: %v", k.Name(), v, errInvalidValue) + } + m.insert(k, v.value, v.m) + } + } + var err error + for _, mod := range mutator { + m, err = mod.Mutate(m) + if err != nil { + return ctx, err + } + } + return NewContext(ctx, m), nil +} + +// Do is similar to pprof.Do: a convenience for installing the tags +// from the context as Go profiler labels. This allows you to +// correlated runtime profiling with stats. +// +// It converts the key/values from the given map to Go profiler labels +// and calls pprof.Do. +// +// Do is going to do nothing if your Go version is below 1.9. +func Do(ctx context.Context, f func(ctx context.Context)) { + do(ctx, f) +} + +type mutator struct { + fn func(t *Map) (*Map, error) +} + +func (m *mutator) Mutate(t *Map) (*Map, error) { + return m.fn(t) +} diff --git a/vendor/go.opencensus.io/tag/map_codec.go b/vendor/go.opencensus.io/tag/map_codec.go new file mode 100644 index 0000000000000..f8b58276153f0 --- /dev/null +++ b/vendor/go.opencensus.io/tag/map_codec.go @@ -0,0 +1,239 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package tag + +import ( + "encoding/binary" + "fmt" +) + +// KeyType defines the types of keys allowed. Currently only keyTypeString is +// supported. +type keyType byte + +const ( + keyTypeString keyType = iota + keyTypeInt64 + keyTypeTrue + keyTypeFalse + + tagsVersionID = byte(0) +) + +type encoderGRPC struct { + buf []byte + writeIdx, readIdx int +} + +// writeKeyString writes the fieldID '0' followed by the key string and value +// string. +func (eg *encoderGRPC) writeTagString(k, v string) { + eg.writeByte(byte(keyTypeString)) + eg.writeStringWithVarintLen(k) + eg.writeStringWithVarintLen(v) +} + +func (eg *encoderGRPC) writeTagUint64(k string, i uint64) { + eg.writeByte(byte(keyTypeInt64)) + eg.writeStringWithVarintLen(k) + eg.writeUint64(i) +} + +func (eg *encoderGRPC) writeTagTrue(k string) { + eg.writeByte(byte(keyTypeTrue)) + eg.writeStringWithVarintLen(k) +} + +func (eg *encoderGRPC) writeTagFalse(k string) { + eg.writeByte(byte(keyTypeFalse)) + eg.writeStringWithVarintLen(k) +} + +func (eg *encoderGRPC) writeBytesWithVarintLen(bytes []byte) { + length := len(bytes) + + eg.growIfRequired(binary.MaxVarintLen64 + length) + eg.writeIdx += binary.PutUvarint(eg.buf[eg.writeIdx:], uint64(length)) + copy(eg.buf[eg.writeIdx:], bytes) + eg.writeIdx += length +} + +func (eg *encoderGRPC) writeStringWithVarintLen(s string) { + length := len(s) + + eg.growIfRequired(binary.MaxVarintLen64 + length) + eg.writeIdx += binary.PutUvarint(eg.buf[eg.writeIdx:], uint64(length)) + copy(eg.buf[eg.writeIdx:], s) + eg.writeIdx += length +} + +func (eg *encoderGRPC) writeByte(v byte) { + eg.growIfRequired(1) + eg.buf[eg.writeIdx] = v + eg.writeIdx++ +} + +func (eg *encoderGRPC) writeUint32(i uint32) { + eg.growIfRequired(4) + binary.LittleEndian.PutUint32(eg.buf[eg.writeIdx:], i) + eg.writeIdx += 4 +} + +func (eg *encoderGRPC) writeUint64(i uint64) { + eg.growIfRequired(8) + binary.LittleEndian.PutUint64(eg.buf[eg.writeIdx:], i) + eg.writeIdx += 8 +} + +func (eg *encoderGRPC) readByte() byte { + b := eg.buf[eg.readIdx] + eg.readIdx++ + return b +} + +func (eg *encoderGRPC) readUint32() uint32 { + i := binary.LittleEndian.Uint32(eg.buf[eg.readIdx:]) + eg.readIdx += 4 + return i +} + +func (eg *encoderGRPC) readUint64() uint64 { + i := binary.LittleEndian.Uint64(eg.buf[eg.readIdx:]) + eg.readIdx += 8 + return i +} + +func (eg *encoderGRPC) readBytesWithVarintLen() ([]byte, error) { + if eg.readEnded() { + return nil, fmt.Errorf("unexpected end while readBytesWithVarintLen '%x' starting at idx '%v'", eg.buf, eg.readIdx) + } + length, valueStart := binary.Uvarint(eg.buf[eg.readIdx:]) + if valueStart <= 0 { + return nil, fmt.Errorf("unexpected end while readBytesWithVarintLen '%x' starting at idx '%v'", eg.buf, eg.readIdx) + } + + valueStart += eg.readIdx + valueEnd := valueStart + int(length) + if valueEnd > len(eg.buf) { + return nil, fmt.Errorf("malformed encoding: length:%v, upper:%v, maxLength:%v", length, valueEnd, len(eg.buf)) + } + + eg.readIdx = valueEnd + return eg.buf[valueStart:valueEnd], nil +} + +func (eg *encoderGRPC) readStringWithVarintLen() (string, error) { + bytes, err := eg.readBytesWithVarintLen() + if err != nil { + return "", err + } + return string(bytes), nil +} + +func (eg *encoderGRPC) growIfRequired(expected int) { + if len(eg.buf)-eg.writeIdx < expected { + tmp := make([]byte, 2*(len(eg.buf)+1)+expected) + copy(tmp, eg.buf) + eg.buf = tmp + } +} + +func (eg *encoderGRPC) readEnded() bool { + return eg.readIdx >= len(eg.buf) +} + +func (eg *encoderGRPC) bytes() []byte { + return eg.buf[:eg.writeIdx] +} + +// Encode encodes the tag map into a []byte. It is useful to propagate +// the tag maps on wire in binary format. +func Encode(m *Map) []byte { + if m == nil { + return nil + } + eg := &encoderGRPC{ + buf: make([]byte, len(m.m)), + } + eg.writeByte(byte(tagsVersionID)) + for k, v := range m.m { + if v.m.ttl.ttl == valueTTLUnlimitedPropagation { + eg.writeByte(byte(keyTypeString)) + eg.writeStringWithVarintLen(k.name) + eg.writeBytesWithVarintLen([]byte(v.value)) + } + } + return eg.bytes() +} + +// Decode decodes the given []byte into a tag map. +func Decode(bytes []byte) (*Map, error) { + ts := newMap() + err := DecodeEach(bytes, ts.upsert) + if err != nil { + // no partial failures + return nil, err + } + return ts, nil +} + +// DecodeEach decodes the given serialized tag map, calling handler for each +// tag key and value decoded. +func DecodeEach(bytes []byte, fn func(key Key, val string, md metadatas)) error { + eg := &encoderGRPC{ + buf: bytes, + } + if len(eg.buf) == 0 { + return nil + } + + version := eg.readByte() + if version > tagsVersionID { + return fmt.Errorf("cannot decode: unsupported version: %q; supports only up to: %q", version, tagsVersionID) + } + + for !eg.readEnded() { + typ := keyType(eg.readByte()) + + if typ != keyTypeString { + return fmt.Errorf("cannot decode: invalid key type: %q", typ) + } + + k, err := eg.readBytesWithVarintLen() + if err != nil { + return err + } + + v, err := eg.readBytesWithVarintLen() + if err != nil { + return err + } + + key, err := NewKey(string(k)) + if err != nil { + return err + } + val := string(v) + if !checkValue(val) { + return errInvalidValue + } + fn(key, val, createMetadatas(WithTTL(TTLUnlimitedPropagation))) + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opencensus.io/tag/metadata.go b/vendor/go.opencensus.io/tag/metadata.go new file mode 100644 index 0000000000000..6571a583ea6c7 --- /dev/null +++ b/vendor/go.opencensus.io/tag/metadata.go @@ -0,0 +1,52 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package tag + +const ( + // valueTTLNoPropagation prevents tag from propagating. + valueTTLNoPropagation = 0 + + // valueTTLUnlimitedPropagation allows tag to propagate without any limits on number of hops. + valueTTLUnlimitedPropagation = -1 +) + +// TTL is metadata that specifies number of hops a tag can propagate. +// Details about TTL metadata is specified at https://github.com/census-instrumentation/opencensus-specs/blob/master/tags/TagMap.md#tagmetadata +type TTL struct { + ttl int +} + +var ( + // TTLUnlimitedPropagation is TTL metadata that allows tag to propagate without any limits on number of hops. + TTLUnlimitedPropagation = TTL{ttl: valueTTLUnlimitedPropagation} + + // TTLNoPropagation is TTL metadata that prevents tag from propagating. + TTLNoPropagation = TTL{ttl: valueTTLNoPropagation} +) + +type metadatas struct { + ttl TTL +} + +// Metadata applies metadatas specified by the function. +type Metadata func(*metadatas) + +// WithTTL applies metadata with provided ttl. +func WithTTL(ttl TTL) Metadata { + return func(m *metadatas) { + m.ttl = ttl + } +} diff --git a/vendor/go.opencensus.io/tag/profile_19.go b/vendor/go.opencensus.io/tag/profile_19.go new file mode 100644 index 0000000000000..b34d95e34a2cf --- /dev/null +++ b/vendor/go.opencensus.io/tag/profile_19.go @@ -0,0 +1,31 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.9 + +package tag + +import ( + "context" + "runtime/pprof" +) + +func do(ctx context.Context, f func(ctx context.Context)) { + m := FromContext(ctx) + keyvals := make([]string, 0, 2*len(m.m)) + for k, v := range m.m { + keyvals = append(keyvals, k.Name(), v.value) + } + pprof.Do(ctx, pprof.Labels(keyvals...), f) +} diff --git a/vendor/go.opencensus.io/tag/profile_not19.go b/vendor/go.opencensus.io/tag/profile_not19.go new file mode 100644 index 0000000000000..83adbce56b72a --- /dev/null +++ b/vendor/go.opencensus.io/tag/profile_not19.go @@ -0,0 +1,23 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.9 + +package tag + +import "context" + +func do(ctx context.Context, f func(ctx context.Context)) { + f(ctx) +} diff --git a/vendor/go.opencensus.io/tag/validate.go b/vendor/go.opencensus.io/tag/validate.go new file mode 100644 index 0000000000000..0939fc67483a1 --- /dev/null +++ b/vendor/go.opencensus.io/tag/validate.go @@ -0,0 +1,56 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tag + +import "errors" + +const ( + maxKeyLength = 255 + + // valid are restricted to US-ASCII subset (range 0x20 (' ') to 0x7e ('~')). + validKeyValueMin = 32 + validKeyValueMax = 126 +) + +var ( + errInvalidKeyName = errors.New("invalid key name: only ASCII characters accepted; max length must be 255 characters") + errInvalidValue = errors.New("invalid value: only ASCII characters accepted; max length must be 255 characters") +) + +func checkKeyName(name string) bool { + if len(name) == 0 { + return false + } + if len(name) > maxKeyLength { + return false + } + return isASCII(name) +} + +func isASCII(s string) bool { + for _, c := range s { + if (c < validKeyValueMin) || (c > validKeyValueMax) { + return false + } + } + return true +} + +func checkValue(v string) bool { + if len(v) > maxKeyLength { + return false + } + return isASCII(v) +} diff --git a/vendor/go.opencensus.io/trace/BUILD.bazel b/vendor/go.opencensus.io/trace/BUILD.bazel new file mode 100644 index 0000000000000..139b7b9fb7c0f --- /dev/null +++ b/vendor/go.opencensus.io/trace/BUILD.bazel @@ -0,0 +1,29 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "basetypes.go", + "config.go", + "doc.go", + "evictedqueue.go", + "export.go", + "lrumap.go", + "sampling.go", + "spanbucket.go", + "spanstore.go", + "status_codes.go", + "trace.go", + "trace_go11.go", + "trace_nongo11.go", + ], + importmap = "k8s.io/kops/vendor/go.opencensus.io/trace", + importpath = "go.opencensus.io/trace", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/hashicorp/golang-lru/simplelru:go_default_library", + "//vendor/go.opencensus.io/internal:go_default_library", + "//vendor/go.opencensus.io/trace/internal:go_default_library", + "//vendor/go.opencensus.io/trace/tracestate:go_default_library", + ], +) diff --git a/vendor/go.opencensus.io/trace/basetypes.go b/vendor/go.opencensus.io/trace/basetypes.go new file mode 100644 index 0000000000000..0c54492a2b1d0 --- /dev/null +++ b/vendor/go.opencensus.io/trace/basetypes.go @@ -0,0 +1,119 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "fmt" + "time" +) + +type ( + // TraceID is a 16-byte identifier for a set of spans. + TraceID [16]byte + + // SpanID is an 8-byte identifier for a single span. + SpanID [8]byte +) + +func (t TraceID) String() string { + return fmt.Sprintf("%02x", t[:]) +} + +func (s SpanID) String() string { + return fmt.Sprintf("%02x", s[:]) +} + +// Annotation represents a text annotation with a set of attributes and a timestamp. +type Annotation struct { + Time time.Time + Message string + Attributes map[string]interface{} +} + +// Attribute represents a key-value pair on a span, link or annotation. +// Construct with one of: BoolAttribute, Int64Attribute, or StringAttribute. +type Attribute struct { + key string + value interface{} +} + +// BoolAttribute returns a bool-valued attribute. +func BoolAttribute(key string, value bool) Attribute { + return Attribute{key: key, value: value} +} + +// Int64Attribute returns an int64-valued attribute. +func Int64Attribute(key string, value int64) Attribute { + return Attribute{key: key, value: value} +} + +// Float64Attribute returns a float64-valued attribute. +func Float64Attribute(key string, value float64) Attribute { + return Attribute{key: key, value: value} +} + +// StringAttribute returns a string-valued attribute. +func StringAttribute(key string, value string) Attribute { + return Attribute{key: key, value: value} +} + +// LinkType specifies the relationship between the span that had the link +// added, and the linked span. +type LinkType int32 + +// LinkType values. +const ( + LinkTypeUnspecified LinkType = iota // The relationship of the two spans is unknown. + LinkTypeChild // The linked span is a child of the current span. + LinkTypeParent // The linked span is the parent of the current span. +) + +// Link represents a reference from one span to another span. +type Link struct { + TraceID TraceID + SpanID SpanID + Type LinkType + // Attributes is a set of attributes on the link. + Attributes map[string]interface{} +} + +// MessageEventType specifies the type of message event. +type MessageEventType int32 + +// MessageEventType values. +const ( + MessageEventTypeUnspecified MessageEventType = iota // Unknown event type. + MessageEventTypeSent // Indicates a sent RPC message. + MessageEventTypeRecv // Indicates a received RPC message. +) + +// MessageEvent represents an event describing a message sent or received on the network. +type MessageEvent struct { + Time time.Time + EventType MessageEventType + MessageID int64 + UncompressedByteSize int64 + CompressedByteSize int64 +} + +// Status is the status of a Span. +type Status struct { + // Code is a status code. Zero indicates success. + // + // If Code will be propagated to Google APIs, it ideally should be a value from + // https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto . + Code int32 + Message string +} diff --git a/vendor/go.opencensus.io/trace/config.go b/vendor/go.opencensus.io/trace/config.go new file mode 100644 index 0000000000000..775f8274faae2 --- /dev/null +++ b/vendor/go.opencensus.io/trace/config.go @@ -0,0 +1,86 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "sync" + + "go.opencensus.io/trace/internal" +) + +// Config represents the global tracing configuration. +type Config struct { + // DefaultSampler is the default sampler used when creating new spans. + DefaultSampler Sampler + + // IDGenerator is for internal use only. + IDGenerator internal.IDGenerator + + // MaxAnnotationEventsPerSpan is max number of annotation events per span + MaxAnnotationEventsPerSpan int + + // MaxMessageEventsPerSpan is max number of message events per span + MaxMessageEventsPerSpan int + + // MaxAnnotationEventsPerSpan is max number of attributes per span + MaxAttributesPerSpan int + + // MaxLinksPerSpan is max number of links per span + MaxLinksPerSpan int +} + +var configWriteMu sync.Mutex + +const ( + // DefaultMaxAnnotationEventsPerSpan is default max number of annotation events per span + DefaultMaxAnnotationEventsPerSpan = 32 + + // DefaultMaxMessageEventsPerSpan is default max number of message events per span + DefaultMaxMessageEventsPerSpan = 128 + + // DefaultMaxAttributesPerSpan is default max number of attributes per span + DefaultMaxAttributesPerSpan = 32 + + // DefaultMaxLinksPerSpan is default max number of links per span + DefaultMaxLinksPerSpan = 32 +) + +// ApplyConfig applies changes to the global tracing configuration. +// +// Fields not provided in the given config are going to be preserved. +func ApplyConfig(cfg Config) { + configWriteMu.Lock() + defer configWriteMu.Unlock() + c := *config.Load().(*Config) + if cfg.DefaultSampler != nil { + c.DefaultSampler = cfg.DefaultSampler + } + if cfg.IDGenerator != nil { + c.IDGenerator = cfg.IDGenerator + } + if cfg.MaxAnnotationEventsPerSpan > 0 { + c.MaxAnnotationEventsPerSpan = cfg.MaxAnnotationEventsPerSpan + } + if cfg.MaxMessageEventsPerSpan > 0 { + c.MaxMessageEventsPerSpan = cfg.MaxMessageEventsPerSpan + } + if cfg.MaxAttributesPerSpan > 0 { + c.MaxAttributesPerSpan = cfg.MaxAttributesPerSpan + } + if cfg.MaxLinksPerSpan > 0 { + c.MaxLinksPerSpan = cfg.MaxLinksPerSpan + } + config.Store(&c) +} diff --git a/vendor/go.opencensus.io/trace/doc.go b/vendor/go.opencensus.io/trace/doc.go new file mode 100644 index 0000000000000..04b1ee4f38eab --- /dev/null +++ b/vendor/go.opencensus.io/trace/doc.go @@ -0,0 +1,53 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package trace contains support for OpenCensus distributed tracing. + +The following assumes a basic familiarity with OpenCensus concepts. +See http://opencensus.io + + +Exporting Traces + +To export collected tracing data, register at least one exporter. You can use +one of the provided exporters or write your own. + + trace.RegisterExporter(exporter) + +By default, traces will be sampled relatively rarely. To change the sampling +frequency for your entire program, call ApplyConfig. Use a ProbabilitySampler +to sample a subset of traces, or use AlwaysSample to collect a trace on every run: + + trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) + +Be careful about using trace.AlwaysSample in a production application with +significant traffic: a new trace will be started and exported for every request. + +Adding Spans to a Trace + +A trace consists of a tree of spans. In Go, the current span is carried in a +context.Context. + +It is common to want to capture all the activity of a function call in a span. For +this to work, the function must take a context.Context as a parameter. Add these two +lines to the top of the function: + + ctx, span := trace.StartSpan(ctx, "example.com/Run") + defer span.End() + +StartSpan will create a new top-level span if the context +doesn't contain another span, otherwise it will create a child span. +*/ +package trace // import "go.opencensus.io/trace" diff --git a/vendor/go.opencensus.io/trace/evictedqueue.go b/vendor/go.opencensus.io/trace/evictedqueue.go new file mode 100644 index 0000000000000..ffc264f23d2dd --- /dev/null +++ b/vendor/go.opencensus.io/trace/evictedqueue.go @@ -0,0 +1,38 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +type evictedQueue struct { + queue []interface{} + capacity int + droppedCount int +} + +func newEvictedQueue(capacity int) *evictedQueue { + eq := &evictedQueue{ + capacity: capacity, + queue: make([]interface{}, 0), + } + + return eq +} + +func (eq *evictedQueue) add(value interface{}) { + if len(eq.queue) == eq.capacity { + eq.queue = eq.queue[1:] + eq.droppedCount++ + } + eq.queue = append(eq.queue, value) +} diff --git a/vendor/go.opencensus.io/trace/export.go b/vendor/go.opencensus.io/trace/export.go new file mode 100644 index 0000000000000..e0d9a4b99e96f --- /dev/null +++ b/vendor/go.opencensus.io/trace/export.go @@ -0,0 +1,97 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "sync" + "sync/atomic" + "time" +) + +// Exporter is a type for functions that receive sampled trace spans. +// +// The ExportSpan method should be safe for concurrent use and should return +// quickly; if an Exporter takes a significant amount of time to process a +// SpanData, that work should be done on another goroutine. +// +// The SpanData should not be modified, but a pointer to it can be kept. +type Exporter interface { + ExportSpan(s *SpanData) +} + +type exportersMap map[Exporter]struct{} + +var ( + exporterMu sync.Mutex + exporters atomic.Value +) + +// RegisterExporter adds to the list of Exporters that will receive sampled +// trace spans. +// +// Binaries can register exporters, libraries shouldn't register exporters. +func RegisterExporter(e Exporter) { + exporterMu.Lock() + new := make(exportersMap) + if old, ok := exporters.Load().(exportersMap); ok { + for k, v := range old { + new[k] = v + } + } + new[e] = struct{}{} + exporters.Store(new) + exporterMu.Unlock() +} + +// UnregisterExporter removes from the list of Exporters the Exporter that was +// registered with the given name. +func UnregisterExporter(e Exporter) { + exporterMu.Lock() + new := make(exportersMap) + if old, ok := exporters.Load().(exportersMap); ok { + for k, v := range old { + new[k] = v + } + } + delete(new, e) + exporters.Store(new) + exporterMu.Unlock() +} + +// SpanData contains all the information collected by a Span. +type SpanData struct { + SpanContext + ParentSpanID SpanID + SpanKind int + Name string + StartTime time.Time + // The wall clock time of EndTime will be adjusted to always be offset + // from StartTime by the duration of the span. + EndTime time.Time + // The values of Attributes each have type string, bool, or int64. + Attributes map[string]interface{} + Annotations []Annotation + MessageEvents []MessageEvent + Status + Links []Link + HasRemoteParent bool + DroppedAttributeCount int + DroppedAnnotationCount int + DroppedMessageEventCount int + DroppedLinkCount int + + // ChildSpanCount holds the number of child span created for this span. + ChildSpanCount int +} diff --git a/vendor/go.opencensus.io/trace/internal/BUILD.bazel b/vendor/go.opencensus.io/trace/internal/BUILD.bazel new file mode 100644 index 0000000000000..8d10a36486ffb --- /dev/null +++ b/vendor/go.opencensus.io/trace/internal/BUILD.bazel @@ -0,0 +1,9 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["internal.go"], + importmap = "k8s.io/kops/vendor/go.opencensus.io/trace/internal", + importpath = "go.opencensus.io/trace/internal", + visibility = ["//vendor/go.opencensus.io/trace:__subpackages__"], +) diff --git a/vendor/go.opencensus.io/trace/internal/internal.go b/vendor/go.opencensus.io/trace/internal/internal.go new file mode 100644 index 0000000000000..7e808d8f30e60 --- /dev/null +++ b/vendor/go.opencensus.io/trace/internal/internal.go @@ -0,0 +1,22 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package internal provides trace internals. +package internal + +// IDGenerator allows custom generators for TraceId and SpanId. +type IDGenerator interface { + NewTraceID() [16]byte + NewSpanID() [8]byte +} diff --git a/vendor/go.opencensus.io/trace/lrumap.go b/vendor/go.opencensus.io/trace/lrumap.go new file mode 100644 index 0000000000000..3f80a3368139e --- /dev/null +++ b/vendor/go.opencensus.io/trace/lrumap.go @@ -0,0 +1,37 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "github.com/hashicorp/golang-lru/simplelru" +) + +type lruMap struct { + simpleLruMap *simplelru.LRU + droppedCount int +} + +func newLruMap(size int) *lruMap { + lm := &lruMap{} + lm.simpleLruMap, _ = simplelru.NewLRU(size, nil) + return lm +} + +func (lm *lruMap) add(key, value interface{}) { + evicted := lm.simpleLruMap.Add(key, value) + if evicted { + lm.droppedCount++ + } +} diff --git a/vendor/go.opencensus.io/trace/propagation/BUILD.bazel b/vendor/go.opencensus.io/trace/propagation/BUILD.bazel new file mode 100644 index 0000000000000..99b4b7cac82fc --- /dev/null +++ b/vendor/go.opencensus.io/trace/propagation/BUILD.bazel @@ -0,0 +1,10 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["propagation.go"], + importmap = "k8s.io/kops/vendor/go.opencensus.io/trace/propagation", + importpath = "go.opencensus.io/trace/propagation", + visibility = ["//visibility:public"], + deps = ["//vendor/go.opencensus.io/trace:go_default_library"], +) diff --git a/vendor/go.opencensus.io/trace/propagation/propagation.go b/vendor/go.opencensus.io/trace/propagation/propagation.go new file mode 100644 index 0000000000000..1eb190a96a3a8 --- /dev/null +++ b/vendor/go.opencensus.io/trace/propagation/propagation.go @@ -0,0 +1,108 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package propagation implements the binary trace context format. +package propagation // import "go.opencensus.io/trace/propagation" + +// TODO: link to external spec document. + +// BinaryFormat format: +// +// Binary value: +// version_id: 1 byte representing the version id. +// +// For version_id = 0: +// +// version_format: +// field_format: +// +// Fields: +// +// TraceId: (field_id = 0, len = 16, default = "0000000000000000") - 16-byte array representing the trace_id. +// SpanId: (field_id = 1, len = 8, default = "00000000") - 8-byte array representing the span_id. +// TraceOptions: (field_id = 2, len = 1, default = "0") - 1-byte array representing the trace_options. +// +// Fields MUST be encoded using the field id order (smaller to higher). +// +// Valid value example: +// +// {0, 0, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 1, 97, +// 98, 99, 100, 101, 102, 103, 104, 2, 1} +// +// version_id = 0; +// trace_id = {64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79} +// span_id = {97, 98, 99, 100, 101, 102, 103, 104}; +// trace_options = {1}; + +import ( + "net/http" + + "go.opencensus.io/trace" +) + +// Binary returns the binary format representation of a SpanContext. +// +// If sc is the zero value, Binary returns nil. +func Binary(sc trace.SpanContext) []byte { + if sc == (trace.SpanContext{}) { + return nil + } + var b [29]byte + copy(b[2:18], sc.TraceID[:]) + b[18] = 1 + copy(b[19:27], sc.SpanID[:]) + b[27] = 2 + b[28] = uint8(sc.TraceOptions) + return b[:] +} + +// FromBinary returns the SpanContext represented by b. +// +// If b has an unsupported version ID or contains no TraceID, FromBinary +// returns with ok==false. +func FromBinary(b []byte) (sc trace.SpanContext, ok bool) { + if len(b) == 0 || b[0] != 0 { + return trace.SpanContext{}, false + } + b = b[1:] + if len(b) >= 17 && b[0] == 0 { + copy(sc.TraceID[:], b[1:17]) + b = b[17:] + } else { + return trace.SpanContext{}, false + } + if len(b) >= 9 && b[0] == 1 { + copy(sc.SpanID[:], b[1:9]) + b = b[9:] + } + if len(b) >= 2 && b[0] == 2 { + sc.TraceOptions = trace.TraceOptions(b[1]) + } + return sc, true +} + +// HTTPFormat implementations propagate span contexts +// in HTTP requests. +// +// SpanContextFromRequest extracts a span context from incoming +// requests. +// +// SpanContextToRequest modifies the given request to include the given +// span context. +type HTTPFormat interface { + SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) + SpanContextToRequest(sc trace.SpanContext, req *http.Request) +} + +// TODO(jbd): Find a more representative but short name for HTTPFormat. diff --git a/vendor/go.opencensus.io/trace/sampling.go b/vendor/go.opencensus.io/trace/sampling.go new file mode 100644 index 0000000000000..71c10f9e3b424 --- /dev/null +++ b/vendor/go.opencensus.io/trace/sampling.go @@ -0,0 +1,75 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "encoding/binary" +) + +const defaultSamplingProbability = 1e-4 + +// Sampler decides whether a trace should be sampled and exported. +type Sampler func(SamplingParameters) SamplingDecision + +// SamplingParameters contains the values passed to a Sampler. +type SamplingParameters struct { + ParentContext SpanContext + TraceID TraceID + SpanID SpanID + Name string + HasRemoteParent bool +} + +// SamplingDecision is the value returned by a Sampler. +type SamplingDecision struct { + Sample bool +} + +// ProbabilitySampler returns a Sampler that samples a given fraction of traces. +// +// It also samples spans whose parents are sampled. +func ProbabilitySampler(fraction float64) Sampler { + if !(fraction >= 0) { + fraction = 0 + } else if fraction >= 1 { + return AlwaysSample() + } + + traceIDUpperBound := uint64(fraction * (1 << 63)) + return Sampler(func(p SamplingParameters) SamplingDecision { + if p.ParentContext.IsSampled() { + return SamplingDecision{Sample: true} + } + x := binary.BigEndian.Uint64(p.TraceID[0:8]) >> 1 + return SamplingDecision{Sample: x < traceIDUpperBound} + }) +} + +// AlwaysSample returns a Sampler that samples every trace. +// Be careful about using this sampler in a production application with +// significant traffic: a new trace will be started and exported for every +// request. +func AlwaysSample() Sampler { + return func(p SamplingParameters) SamplingDecision { + return SamplingDecision{Sample: true} + } +} + +// NeverSample returns a Sampler that samples no traces. +func NeverSample() Sampler { + return func(p SamplingParameters) SamplingDecision { + return SamplingDecision{Sample: false} + } +} diff --git a/vendor/go.opencensus.io/trace/spanbucket.go b/vendor/go.opencensus.io/trace/spanbucket.go new file mode 100644 index 0000000000000..fbabad34c000d --- /dev/null +++ b/vendor/go.opencensus.io/trace/spanbucket.go @@ -0,0 +1,130 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "time" +) + +// samplePeriod is the minimum time between accepting spans in a single bucket. +const samplePeriod = time.Second + +// defaultLatencies contains the default latency bucket bounds. +// TODO: consider defaults, make configurable +var defaultLatencies = [...]time.Duration{ + 10 * time.Microsecond, + 100 * time.Microsecond, + time.Millisecond, + 10 * time.Millisecond, + 100 * time.Millisecond, + time.Second, + 10 * time.Second, + time.Minute, +} + +// bucket is a container for a set of spans for a particular error code or latency range. +type bucket struct { + nextTime time.Time // next time we can accept a span + buffer []*SpanData // circular buffer of spans + nextIndex int // location next SpanData should be placed in buffer + overflow bool // whether the circular buffer has wrapped around +} + +func makeBucket(bufferSize int) bucket { + return bucket{ + buffer: make([]*SpanData, bufferSize), + } +} + +// add adds a span to the bucket, if nextTime has been reached. +func (b *bucket) add(s *SpanData) { + if s.EndTime.Before(b.nextTime) { + return + } + if len(b.buffer) == 0 { + return + } + b.nextTime = s.EndTime.Add(samplePeriod) + b.buffer[b.nextIndex] = s + b.nextIndex++ + if b.nextIndex == len(b.buffer) { + b.nextIndex = 0 + b.overflow = true + } +} + +// size returns the number of spans in the bucket. +func (b *bucket) size() int { + if b.overflow { + return len(b.buffer) + } + return b.nextIndex +} + +// span returns the ith span in the bucket. +func (b *bucket) span(i int) *SpanData { + if !b.overflow { + return b.buffer[i] + } + if i < len(b.buffer)-b.nextIndex { + return b.buffer[b.nextIndex+i] + } + return b.buffer[b.nextIndex+i-len(b.buffer)] +} + +// resize changes the size of the bucket to n, keeping up to n existing spans. +func (b *bucket) resize(n int) { + cur := b.size() + newBuffer := make([]*SpanData, n) + if cur < n { + for i := 0; i < cur; i++ { + newBuffer[i] = b.span(i) + } + b.buffer = newBuffer + b.nextIndex = cur + b.overflow = false + return + } + for i := 0; i < n; i++ { + newBuffer[i] = b.span(i + cur - n) + } + b.buffer = newBuffer + b.nextIndex = 0 + b.overflow = true +} + +// latencyBucket returns the appropriate bucket number for a given latency. +func latencyBucket(latency time.Duration) int { + i := 0 + for i < len(defaultLatencies) && latency >= defaultLatencies[i] { + i++ + } + return i +} + +// latencyBucketBounds returns the lower and upper bounds for a latency bucket +// number. +// +// The lower bound is inclusive, the upper bound is exclusive (except for the +// last bucket.) +func latencyBucketBounds(index int) (lower time.Duration, upper time.Duration) { + if index == 0 { + return 0, defaultLatencies[index] + } + if index == len(defaultLatencies) { + return defaultLatencies[index-1], 1<<63 - 1 + } + return defaultLatencies[index-1], defaultLatencies[index] +} diff --git a/vendor/go.opencensus.io/trace/spanstore.go b/vendor/go.opencensus.io/trace/spanstore.go new file mode 100644 index 0000000000000..c442d990218a3 --- /dev/null +++ b/vendor/go.opencensus.io/trace/spanstore.go @@ -0,0 +1,306 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "sync" + "time" + + "go.opencensus.io/internal" +) + +const ( + maxBucketSize = 100000 + defaultBucketSize = 10 +) + +var ( + ssmu sync.RWMutex // protects spanStores + spanStores = make(map[string]*spanStore) +) + +// This exists purely to avoid exposing internal methods used by z-Pages externally. +type internalOnly struct{} + +func init() { + //TODO(#412): remove + internal.Trace = &internalOnly{} +} + +// ReportActiveSpans returns the active spans for the given name. +func (i internalOnly) ReportActiveSpans(name string) []*SpanData { + s := spanStoreForName(name) + if s == nil { + return nil + } + var out []*SpanData + s.mu.Lock() + defer s.mu.Unlock() + for span := range s.active { + out = append(out, span.makeSpanData()) + } + return out +} + +// ReportSpansByError returns a sample of error spans. +// +// If code is nonzero, only spans with that status code are returned. +func (i internalOnly) ReportSpansByError(name string, code int32) []*SpanData { + s := spanStoreForName(name) + if s == nil { + return nil + } + var out []*SpanData + s.mu.Lock() + defer s.mu.Unlock() + if code != 0 { + if b, ok := s.errors[code]; ok { + for _, sd := range b.buffer { + if sd == nil { + break + } + out = append(out, sd) + } + } + } else { + for _, b := range s.errors { + for _, sd := range b.buffer { + if sd == nil { + break + } + out = append(out, sd) + } + } + } + return out +} + +// ConfigureBucketSizes sets the number of spans to keep per latency and error +// bucket for different span names. +func (i internalOnly) ConfigureBucketSizes(bcs []internal.BucketConfiguration) { + for _, bc := range bcs { + latencyBucketSize := bc.MaxRequestsSucceeded + if latencyBucketSize < 0 { + latencyBucketSize = 0 + } + if latencyBucketSize > maxBucketSize { + latencyBucketSize = maxBucketSize + } + errorBucketSize := bc.MaxRequestsErrors + if errorBucketSize < 0 { + errorBucketSize = 0 + } + if errorBucketSize > maxBucketSize { + errorBucketSize = maxBucketSize + } + spanStoreSetSize(bc.Name, latencyBucketSize, errorBucketSize) + } +} + +// ReportSpansPerMethod returns a summary of what spans are being stored for each span name. +func (i internalOnly) ReportSpansPerMethod() map[string]internal.PerMethodSummary { + out := make(map[string]internal.PerMethodSummary) + ssmu.RLock() + defer ssmu.RUnlock() + for name, s := range spanStores { + s.mu.Lock() + p := internal.PerMethodSummary{ + Active: len(s.active), + } + for code, b := range s.errors { + p.ErrorBuckets = append(p.ErrorBuckets, internal.ErrorBucketSummary{ + ErrorCode: code, + Size: b.size(), + }) + } + for i, b := range s.latency { + min, max := latencyBucketBounds(i) + p.LatencyBuckets = append(p.LatencyBuckets, internal.LatencyBucketSummary{ + MinLatency: min, + MaxLatency: max, + Size: b.size(), + }) + } + s.mu.Unlock() + out[name] = p + } + return out +} + +// ReportSpansByLatency returns a sample of successful spans. +// +// minLatency is the minimum latency of spans to be returned. +// maxLatency, if nonzero, is the maximum latency of spans to be returned. +func (i internalOnly) ReportSpansByLatency(name string, minLatency, maxLatency time.Duration) []*SpanData { + s := spanStoreForName(name) + if s == nil { + return nil + } + var out []*SpanData + s.mu.Lock() + defer s.mu.Unlock() + for i, b := range s.latency { + min, max := latencyBucketBounds(i) + if i+1 != len(s.latency) && max <= minLatency { + continue + } + if maxLatency != 0 && maxLatency < min { + continue + } + for _, sd := range b.buffer { + if sd == nil { + break + } + if minLatency != 0 || maxLatency != 0 { + d := sd.EndTime.Sub(sd.StartTime) + if d < minLatency { + continue + } + if maxLatency != 0 && d > maxLatency { + continue + } + } + out = append(out, sd) + } + } + return out +} + +// spanStore keeps track of spans stored for a particular span name. +// +// It contains all active spans; a sample of spans for failed requests, +// categorized by error code; and a sample of spans for successful requests, +// bucketed by latency. +type spanStore struct { + mu sync.Mutex // protects everything below. + active map[*Span]struct{} + errors map[int32]*bucket + latency []bucket + maxSpansPerErrorBucket int +} + +// newSpanStore creates a span store. +func newSpanStore(name string, latencyBucketSize int, errorBucketSize int) *spanStore { + s := &spanStore{ + active: make(map[*Span]struct{}), + latency: make([]bucket, len(defaultLatencies)+1), + maxSpansPerErrorBucket: errorBucketSize, + } + for i := range s.latency { + s.latency[i] = makeBucket(latencyBucketSize) + } + return s +} + +// spanStoreForName returns the spanStore for the given name. +// +// It returns nil if it doesn't exist. +func spanStoreForName(name string) *spanStore { + var s *spanStore + ssmu.RLock() + s, _ = spanStores[name] + ssmu.RUnlock() + return s +} + +// spanStoreForNameCreateIfNew returns the spanStore for the given name. +// +// It creates it if it didn't exist. +func spanStoreForNameCreateIfNew(name string) *spanStore { + ssmu.RLock() + s, ok := spanStores[name] + ssmu.RUnlock() + if ok { + return s + } + ssmu.Lock() + defer ssmu.Unlock() + s, ok = spanStores[name] + if ok { + return s + } + s = newSpanStore(name, defaultBucketSize, defaultBucketSize) + spanStores[name] = s + return s +} + +// spanStoreSetSize resizes the spanStore for the given name. +// +// It creates it if it didn't exist. +func spanStoreSetSize(name string, latencyBucketSize int, errorBucketSize int) { + ssmu.RLock() + s, ok := spanStores[name] + ssmu.RUnlock() + if ok { + s.resize(latencyBucketSize, errorBucketSize) + return + } + ssmu.Lock() + defer ssmu.Unlock() + s, ok = spanStores[name] + if ok { + s.resize(latencyBucketSize, errorBucketSize) + return + } + s = newSpanStore(name, latencyBucketSize, errorBucketSize) + spanStores[name] = s +} + +func (s *spanStore) resize(latencyBucketSize int, errorBucketSize int) { + s.mu.Lock() + for i := range s.latency { + s.latency[i].resize(latencyBucketSize) + } + for _, b := range s.errors { + b.resize(errorBucketSize) + } + s.maxSpansPerErrorBucket = errorBucketSize + s.mu.Unlock() +} + +// add adds a span to the active bucket of the spanStore. +func (s *spanStore) add(span *Span) { + s.mu.Lock() + s.active[span] = struct{}{} + s.mu.Unlock() +} + +// finished removes a span from the active set, and adds a corresponding +// SpanData to a latency or error bucket. +func (s *spanStore) finished(span *Span, sd *SpanData) { + latency := sd.EndTime.Sub(sd.StartTime) + if latency < 0 { + latency = 0 + } + code := sd.Status.Code + + s.mu.Lock() + delete(s.active, span) + if code == 0 { + s.latency[latencyBucket(latency)].add(sd) + } else { + if s.errors == nil { + s.errors = make(map[int32]*bucket) + } + if b := s.errors[code]; b != nil { + b.add(sd) + } else { + b := makeBucket(s.maxSpansPerErrorBucket) + s.errors[code] = &b + b.add(sd) + } + } + s.mu.Unlock() +} diff --git a/vendor/go.opencensus.io/trace/status_codes.go b/vendor/go.opencensus.io/trace/status_codes.go new file mode 100644 index 0000000000000..ec60effd10882 --- /dev/null +++ b/vendor/go.opencensus.io/trace/status_codes.go @@ -0,0 +1,37 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +// Status codes for use with Span.SetStatus. These correspond to the status +// codes used by gRPC defined here: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto +const ( + StatusCodeOK = 0 + StatusCodeCancelled = 1 + StatusCodeUnknown = 2 + StatusCodeInvalidArgument = 3 + StatusCodeDeadlineExceeded = 4 + StatusCodeNotFound = 5 + StatusCodeAlreadyExists = 6 + StatusCodePermissionDenied = 7 + StatusCodeResourceExhausted = 8 + StatusCodeFailedPrecondition = 9 + StatusCodeAborted = 10 + StatusCodeOutOfRange = 11 + StatusCodeUnimplemented = 12 + StatusCodeInternal = 13 + StatusCodeUnavailable = 14 + StatusCodeDataLoss = 15 + StatusCodeUnauthenticated = 16 +) diff --git a/vendor/go.opencensus.io/trace/trace.go b/vendor/go.opencensus.io/trace/trace.go new file mode 100644 index 0000000000000..38ead7bf0ad07 --- /dev/null +++ b/vendor/go.opencensus.io/trace/trace.go @@ -0,0 +1,598 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "context" + crand "crypto/rand" + "encoding/binary" + "fmt" + "math/rand" + "sync" + "sync/atomic" + "time" + + "go.opencensus.io/internal" + "go.opencensus.io/trace/tracestate" +) + +// Span represents a span of a trace. It has an associated SpanContext, and +// stores data accumulated while the span is active. +// +// Ideally users should interact with Spans by calling the functions in this +// package that take a Context parameter. +type Span struct { + // data contains information recorded about the span. + // + // It will be non-nil if we are exporting the span or recording events for it. + // Otherwise, data is nil, and the Span is simply a carrier for the + // SpanContext, so that the trace ID is propagated. + data *SpanData + mu sync.Mutex // protects the contents of *data (but not the pointer value.) + spanContext SpanContext + + // lruAttributes are capped at configured limit. When the capacity is reached an oldest entry + // is removed to create room for a new entry. + lruAttributes *lruMap + + // annotations are stored in FIFO queue capped by configured limit. + annotations *evictedQueue + + // messageEvents are stored in FIFO queue capped by configured limit. + messageEvents *evictedQueue + + // links are stored in FIFO queue capped by configured limit. + links *evictedQueue + + // spanStore is the spanStore this span belongs to, if any, otherwise it is nil. + *spanStore + endOnce sync.Once + + executionTracerTaskEnd func() // ends the execution tracer span +} + +// IsRecordingEvents returns true if events are being recorded for this span. +// Use this check to avoid computing expensive annotations when they will never +// be used. +func (s *Span) IsRecordingEvents() bool { + if s == nil { + return false + } + return s.data != nil +} + +// TraceOptions contains options associated with a trace span. +type TraceOptions uint32 + +// IsSampled returns true if the span will be exported. +func (sc SpanContext) IsSampled() bool { + return sc.TraceOptions.IsSampled() +} + +// setIsSampled sets the TraceOptions bit that determines whether the span will be exported. +func (sc *SpanContext) setIsSampled(sampled bool) { + if sampled { + sc.TraceOptions |= 1 + } else { + sc.TraceOptions &= ^TraceOptions(1) + } +} + +// IsSampled returns true if the span will be exported. +func (t TraceOptions) IsSampled() bool { + return t&1 == 1 +} + +// SpanContext contains the state that must propagate across process boundaries. +// +// SpanContext is not an implementation of context.Context. +// TODO: add reference to external Census docs for SpanContext. +type SpanContext struct { + TraceID TraceID + SpanID SpanID + TraceOptions TraceOptions + Tracestate *tracestate.Tracestate +} + +type contextKey struct{} + +// FromContext returns the Span stored in a context, or nil if there isn't one. +func FromContext(ctx context.Context) *Span { + s, _ := ctx.Value(contextKey{}).(*Span) + return s +} + +// NewContext returns a new context with the given Span attached. +func NewContext(parent context.Context, s *Span) context.Context { + return context.WithValue(parent, contextKey{}, s) +} + +// All available span kinds. Span kind must be either one of these values. +const ( + SpanKindUnspecified = iota + SpanKindServer + SpanKindClient +) + +// StartOptions contains options concerning how a span is started. +type StartOptions struct { + // Sampler to consult for this Span. If provided, it is always consulted. + // + // If not provided, then the behavior differs based on whether + // the parent of this Span is remote, local, or there is no parent. + // In the case of a remote parent or no parent, the + // default sampler (see Config) will be consulted. Otherwise, + // when there is a non-remote parent, no new sampling decision will be made: + // we will preserve the sampling of the parent. + Sampler Sampler + + // SpanKind represents the kind of a span. If none is set, + // SpanKindUnspecified is used. + SpanKind int +} + +// StartOption apply changes to StartOptions. +type StartOption func(*StartOptions) + +// WithSpanKind makes new spans to be created with the given kind. +func WithSpanKind(spanKind int) StartOption { + return func(o *StartOptions) { + o.SpanKind = spanKind + } +} + +// WithSampler makes new spans to be be created with a custom sampler. +// Otherwise, the global sampler is used. +func WithSampler(sampler Sampler) StartOption { + return func(o *StartOptions) { + o.Sampler = sampler + } +} + +// StartSpan starts a new child span of the current span in the context. If +// there is no span in the context, creates a new trace and span. +// +// Returned context contains the newly created span. You can use it to +// propagate the returned span in process. +func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) { + var opts StartOptions + var parent SpanContext + if p := FromContext(ctx); p != nil { + p.addChild() + parent = p.spanContext + } + for _, op := range o { + op(&opts) + } + span := startSpanInternal(name, parent != SpanContext{}, parent, false, opts) + + ctx, end := startExecutionTracerTask(ctx, name) + span.executionTracerTaskEnd = end + return NewContext(ctx, span), span +} + +// StartSpanWithRemoteParent starts a new child span of the span from the given parent. +// +// If the incoming context contains a parent, it ignores. StartSpanWithRemoteParent is +// preferred for cases where the parent is propagated via an incoming request. +// +// Returned context contains the newly created span. You can use it to +// propagate the returned span in process. +func StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) { + var opts StartOptions + for _, op := range o { + op(&opts) + } + span := startSpanInternal(name, parent != SpanContext{}, parent, true, opts) + ctx, end := startExecutionTracerTask(ctx, name) + span.executionTracerTaskEnd = end + return NewContext(ctx, span), span +} + +func startSpanInternal(name string, hasParent bool, parent SpanContext, remoteParent bool, o StartOptions) *Span { + span := &Span{} + span.spanContext = parent + + cfg := config.Load().(*Config) + + if !hasParent { + span.spanContext.TraceID = cfg.IDGenerator.NewTraceID() + } + span.spanContext.SpanID = cfg.IDGenerator.NewSpanID() + sampler := cfg.DefaultSampler + + if !hasParent || remoteParent || o.Sampler != nil { + // If this span is the child of a local span and no Sampler is set in the + // options, keep the parent's TraceOptions. + // + // Otherwise, consult the Sampler in the options if it is non-nil, otherwise + // the default sampler. + if o.Sampler != nil { + sampler = o.Sampler + } + span.spanContext.setIsSampled(sampler(SamplingParameters{ + ParentContext: parent, + TraceID: span.spanContext.TraceID, + SpanID: span.spanContext.SpanID, + Name: name, + HasRemoteParent: remoteParent}).Sample) + } + + if !internal.LocalSpanStoreEnabled && !span.spanContext.IsSampled() { + return span + } + + span.data = &SpanData{ + SpanContext: span.spanContext, + StartTime: time.Now(), + SpanKind: o.SpanKind, + Name: name, + HasRemoteParent: remoteParent, + } + span.lruAttributes = newLruMap(cfg.MaxAttributesPerSpan) + span.annotations = newEvictedQueue(cfg.MaxAnnotationEventsPerSpan) + span.messageEvents = newEvictedQueue(cfg.MaxMessageEventsPerSpan) + span.links = newEvictedQueue(cfg.MaxLinksPerSpan) + + if hasParent { + span.data.ParentSpanID = parent.SpanID + } + if internal.LocalSpanStoreEnabled { + var ss *spanStore + ss = spanStoreForNameCreateIfNew(name) + if ss != nil { + span.spanStore = ss + ss.add(span) + } + } + + return span +} + +// End ends the span. +func (s *Span) End() { + if s == nil { + return + } + if s.executionTracerTaskEnd != nil { + s.executionTracerTaskEnd() + } + if !s.IsRecordingEvents() { + return + } + s.endOnce.Do(func() { + exp, _ := exporters.Load().(exportersMap) + mustExport := s.spanContext.IsSampled() && len(exp) > 0 + if s.spanStore != nil || mustExport { + sd := s.makeSpanData() + sd.EndTime = internal.MonotonicEndTime(sd.StartTime) + if s.spanStore != nil { + s.spanStore.finished(s, sd) + } + if mustExport { + for e := range exp { + e.ExportSpan(sd) + } + } + } + }) +} + +// makeSpanData produces a SpanData representing the current state of the Span. +// It requires that s.data is non-nil. +func (s *Span) makeSpanData() *SpanData { + var sd SpanData + s.mu.Lock() + sd = *s.data + if s.lruAttributes.simpleLruMap.Len() > 0 { + sd.Attributes = s.lruAttributesToAttributeMap() + sd.DroppedAttributeCount = s.lruAttributes.droppedCount + } + if len(s.annotations.queue) > 0 { + sd.Annotations = s.interfaceArrayToAnnotationArray() + sd.DroppedAnnotationCount = s.annotations.droppedCount + } + if len(s.messageEvents.queue) > 0 { + sd.MessageEvents = s.interfaceArrayToMessageEventArray() + sd.DroppedMessageEventCount = s.messageEvents.droppedCount + } + if len(s.links.queue) > 0 { + sd.Links = s.interfaceArrayToLinksArray() + sd.DroppedLinkCount = s.links.droppedCount + } + s.mu.Unlock() + return &sd +} + +// SpanContext returns the SpanContext of the span. +func (s *Span) SpanContext() SpanContext { + if s == nil { + return SpanContext{} + } + return s.spanContext +} + +// SetName sets the name of the span, if it is recording events. +func (s *Span) SetName(name string) { + if !s.IsRecordingEvents() { + return + } + s.mu.Lock() + s.data.Name = name + s.mu.Unlock() +} + +// SetStatus sets the status of the span, if it is recording events. +func (s *Span) SetStatus(status Status) { + if !s.IsRecordingEvents() { + return + } + s.mu.Lock() + s.data.Status = status + s.mu.Unlock() +} + +func (s *Span) interfaceArrayToLinksArray() []Link { + linksArr := make([]Link, 0) + for _, value := range s.links.queue { + linksArr = append(linksArr, value.(Link)) + } + return linksArr +} + +func (s *Span) interfaceArrayToMessageEventArray() []MessageEvent { + messageEventArr := make([]MessageEvent, 0) + for _, value := range s.messageEvents.queue { + messageEventArr = append(messageEventArr, value.(MessageEvent)) + } + return messageEventArr +} + +func (s *Span) interfaceArrayToAnnotationArray() []Annotation { + annotationArr := make([]Annotation, 0) + for _, value := range s.annotations.queue { + annotationArr = append(annotationArr, value.(Annotation)) + } + return annotationArr +} + +func (s *Span) lruAttributesToAttributeMap() map[string]interface{} { + attributes := make(map[string]interface{}) + for _, key := range s.lruAttributes.simpleLruMap.Keys() { + value, ok := s.lruAttributes.simpleLruMap.Get(key) + if ok { + keyStr := key.(string) + attributes[keyStr] = value + } + } + return attributes +} + +func (s *Span) copyToCappedAttributes(attributes []Attribute) { + for _, a := range attributes { + s.lruAttributes.add(a.key, a.value) + } +} + +func (s *Span) addChild() { + if !s.IsRecordingEvents() { + return + } + s.mu.Lock() + s.data.ChildSpanCount++ + s.mu.Unlock() +} + +// AddAttributes sets attributes in the span. +// +// Existing attributes whose keys appear in the attributes parameter are overwritten. +func (s *Span) AddAttributes(attributes ...Attribute) { + if !s.IsRecordingEvents() { + return + } + s.mu.Lock() + s.copyToCappedAttributes(attributes) + s.mu.Unlock() +} + +// copyAttributes copies a slice of Attributes into a map. +func copyAttributes(m map[string]interface{}, attributes []Attribute) { + for _, a := range attributes { + m[a.key] = a.value + } +} + +func (s *Span) lazyPrintfInternal(attributes []Attribute, format string, a ...interface{}) { + now := time.Now() + msg := fmt.Sprintf(format, a...) + var m map[string]interface{} + s.mu.Lock() + if len(attributes) != 0 { + m = make(map[string]interface{}) + copyAttributes(m, attributes) + } + s.annotations.add(Annotation{ + Time: now, + Message: msg, + Attributes: m, + }) + s.mu.Unlock() +} + +func (s *Span) printStringInternal(attributes []Attribute, str string) { + now := time.Now() + var a map[string]interface{} + s.mu.Lock() + if len(attributes) != 0 { + a = make(map[string]interface{}) + copyAttributes(a, attributes) + } + s.annotations.add(Annotation{ + Time: now, + Message: str, + Attributes: a, + }) + s.mu.Unlock() +} + +// Annotate adds an annotation with attributes. +// Attributes can be nil. +func (s *Span) Annotate(attributes []Attribute, str string) { + if !s.IsRecordingEvents() { + return + } + s.printStringInternal(attributes, str) +} + +// Annotatef adds an annotation with attributes. +func (s *Span) Annotatef(attributes []Attribute, format string, a ...interface{}) { + if !s.IsRecordingEvents() { + return + } + s.lazyPrintfInternal(attributes, format, a...) +} + +// AddMessageSendEvent adds a message send event to the span. +// +// messageID is an identifier for the message, which is recommended to be +// unique in this span and the same between the send event and the receive +// event (this allows to identify a message between the sender and receiver). +// For example, this could be a sequence id. +func (s *Span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) { + if !s.IsRecordingEvents() { + return + } + now := time.Now() + s.mu.Lock() + s.messageEvents.add(MessageEvent{ + Time: now, + EventType: MessageEventTypeSent, + MessageID: messageID, + UncompressedByteSize: uncompressedByteSize, + CompressedByteSize: compressedByteSize, + }) + s.mu.Unlock() +} + +// AddMessageReceiveEvent adds a message receive event to the span. +// +// messageID is an identifier for the message, which is recommended to be +// unique in this span and the same between the send event and the receive +// event (this allows to identify a message between the sender and receiver). +// For example, this could be a sequence id. +func (s *Span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) { + if !s.IsRecordingEvents() { + return + } + now := time.Now() + s.mu.Lock() + s.messageEvents.add(MessageEvent{ + Time: now, + EventType: MessageEventTypeRecv, + MessageID: messageID, + UncompressedByteSize: uncompressedByteSize, + CompressedByteSize: compressedByteSize, + }) + s.mu.Unlock() +} + +// AddLink adds a link to the span. +func (s *Span) AddLink(l Link) { + if !s.IsRecordingEvents() { + return + } + s.mu.Lock() + s.links.add(l) + s.mu.Unlock() +} + +func (s *Span) String() string { + if s == nil { + return "" + } + if s.data == nil { + return fmt.Sprintf("span %s", s.spanContext.SpanID) + } + s.mu.Lock() + str := fmt.Sprintf("span %s %q", s.spanContext.SpanID, s.data.Name) + s.mu.Unlock() + return str +} + +var config atomic.Value // access atomically + +func init() { + gen := &defaultIDGenerator{} + // initialize traceID and spanID generators. + var rngSeed int64 + for _, p := range []interface{}{ + &rngSeed, &gen.traceIDAdd, &gen.nextSpanID, &gen.spanIDInc, + } { + binary.Read(crand.Reader, binary.LittleEndian, p) + } + gen.traceIDRand = rand.New(rand.NewSource(rngSeed)) + gen.spanIDInc |= 1 + + config.Store(&Config{ + DefaultSampler: ProbabilitySampler(defaultSamplingProbability), + IDGenerator: gen, + MaxAttributesPerSpan: DefaultMaxAttributesPerSpan, + MaxAnnotationEventsPerSpan: DefaultMaxAnnotationEventsPerSpan, + MaxMessageEventsPerSpan: DefaultMaxMessageEventsPerSpan, + MaxLinksPerSpan: DefaultMaxLinksPerSpan, + }) +} + +type defaultIDGenerator struct { + sync.Mutex + + // Please keep these as the first fields + // so that these 8 byte fields will be aligned on addresses + // divisible by 8, on both 32-bit and 64-bit machines when + // performing atomic increments and accesses. + // See: + // * https://github.com/census-instrumentation/opencensus-go/issues/587 + // * https://github.com/census-instrumentation/opencensus-go/issues/865 + // * https://golang.org/pkg/sync/atomic/#pkg-note-BUG + nextSpanID uint64 + spanIDInc uint64 + + traceIDAdd [2]uint64 + traceIDRand *rand.Rand +} + +// NewSpanID returns a non-zero span ID from a randomly-chosen sequence. +func (gen *defaultIDGenerator) NewSpanID() [8]byte { + var id uint64 + for id == 0 { + id = atomic.AddUint64(&gen.nextSpanID, gen.spanIDInc) + } + var sid [8]byte + binary.LittleEndian.PutUint64(sid[:], id) + return sid +} + +// NewTraceID returns a non-zero trace ID from a randomly-chosen sequence. +// mu should be held while this function is called. +func (gen *defaultIDGenerator) NewTraceID() [16]byte { + var tid [16]byte + // Construct the trace ID from two outputs of traceIDRand, with a constant + // added to each half for additional entropy. + gen.Lock() + binary.LittleEndian.PutUint64(tid[0:8], gen.traceIDRand.Uint64()+gen.traceIDAdd[0]) + binary.LittleEndian.PutUint64(tid[8:16], gen.traceIDRand.Uint64()+gen.traceIDAdd[1]) + gen.Unlock() + return tid +} diff --git a/vendor/go.opencensus.io/trace/trace_go11.go b/vendor/go.opencensus.io/trace/trace_go11.go new file mode 100644 index 0000000000000..b7d8aaf284778 --- /dev/null +++ b/vendor/go.opencensus.io/trace/trace_go11.go @@ -0,0 +1,32 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.11 + +package trace + +import ( + "context" + t "runtime/trace" +) + +func startExecutionTracerTask(ctx context.Context, name string) (context.Context, func()) { + if !t.IsEnabled() { + // Avoid additional overhead if + // runtime/trace is not enabled. + return ctx, func() {} + } + nctx, task := t.NewTask(ctx, name) + return nctx, task.End +} diff --git a/vendor/go.opencensus.io/trace/trace_nongo11.go b/vendor/go.opencensus.io/trace/trace_nongo11.go new file mode 100644 index 0000000000000..e25419859c02a --- /dev/null +++ b/vendor/go.opencensus.io/trace/trace_nongo11.go @@ -0,0 +1,25 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.11 + +package trace + +import ( + "context" +) + +func startExecutionTracerTask(ctx context.Context, name string) (context.Context, func()) { + return ctx, func() {} +} diff --git a/vendor/go.opencensus.io/trace/tracestate/BUILD.bazel b/vendor/go.opencensus.io/trace/tracestate/BUILD.bazel new file mode 100644 index 0000000000000..99fce246ccbd7 --- /dev/null +++ b/vendor/go.opencensus.io/trace/tracestate/BUILD.bazel @@ -0,0 +1,9 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["tracestate.go"], + importmap = "k8s.io/kops/vendor/go.opencensus.io/trace/tracestate", + importpath = "go.opencensus.io/trace/tracestate", + visibility = ["//visibility:public"], +) diff --git a/vendor/go.opencensus.io/trace/tracestate/tracestate.go b/vendor/go.opencensus.io/trace/tracestate/tracestate.go new file mode 100644 index 0000000000000..2d6c713eb3a19 --- /dev/null +++ b/vendor/go.opencensus.io/trace/tracestate/tracestate.go @@ -0,0 +1,147 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package tracestate implements support for the Tracestate header of the +// W3C TraceContext propagation format. +package tracestate + +import ( + "fmt" + "regexp" +) + +const ( + keyMaxSize = 256 + valueMaxSize = 256 + maxKeyValuePairs = 32 +) + +const ( + keyWithoutVendorFormat = `[a-z][_0-9a-z\-\*\/]{0,255}` + keyWithVendorFormat = `[a-z][_0-9a-z\-\*\/]{0,240}@[a-z][_0-9a-z\-\*\/]{0,13}` + keyFormat = `(` + keyWithoutVendorFormat + `)|(` + keyWithVendorFormat + `)` + valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]{0,255}[\x21-\x2b\x2d-\x3c\x3e-\x7e]` +) + +var keyValidationRegExp = regexp.MustCompile(`^(` + keyFormat + `)$`) +var valueValidationRegExp = regexp.MustCompile(`^(` + valueFormat + `)$`) + +// Tracestate represents tracing-system specific context in a list of key-value pairs. Tracestate allows different +// vendors propagate additional information and inter-operate with their legacy Id formats. +type Tracestate struct { + entries []Entry +} + +// Entry represents one key-value pair in a list of key-value pair of Tracestate. +type Entry struct { + // Key is an opaque string up to 256 characters printable. It MUST begin with a lowercase letter, + // and can only contain lowercase letters a-z, digits 0-9, underscores _, dashes -, asterisks *, and + // forward slashes /. + Key string + + // Value is an opaque string up to 256 characters printable ASCII RFC0020 characters (i.e., the + // range 0x20 to 0x7E) except comma , and =. + Value string +} + +// Entries returns a slice of Entry. +func (ts *Tracestate) Entries() []Entry { + if ts == nil { + return nil + } + return ts.entries +} + +func (ts *Tracestate) remove(key string) *Entry { + for index, entry := range ts.entries { + if entry.Key == key { + ts.entries = append(ts.entries[:index], ts.entries[index+1:]...) + return &entry + } + } + return nil +} + +func (ts *Tracestate) add(entries []Entry) error { + for _, entry := range entries { + ts.remove(entry.Key) + } + if len(ts.entries)+len(entries) > maxKeyValuePairs { + return fmt.Errorf("adding %d key-value pairs to current %d pairs exceeds the limit of %d", + len(entries), len(ts.entries), maxKeyValuePairs) + } + ts.entries = append(entries, ts.entries...) + return nil +} + +func isValid(entry Entry) bool { + return keyValidationRegExp.MatchString(entry.Key) && + valueValidationRegExp.MatchString(entry.Value) +} + +func containsDuplicateKey(entries ...Entry) (string, bool) { + keyMap := make(map[string]int) + for _, entry := range entries { + if _, ok := keyMap[entry.Key]; ok { + return entry.Key, true + } + keyMap[entry.Key] = 1 + } + return "", false +} + +func areEntriesValid(entries ...Entry) (*Entry, bool) { + for _, entry := range entries { + if !isValid(entry) { + return &entry, false + } + } + return nil, true +} + +// New creates a Tracestate object from a parent and/or entries (key-value pair). +// Entries from the parent are copied if present. The entries passed to this function +// are inserted in front of those copied from the parent. If an entry copied from the +// parent contains the same key as one of the entry in entries then the entry copied +// from the parent is removed. See add func. +// +// An error is returned with nil Tracestate if +// 1. one or more entry in entries is invalid. +// 2. two or more entries in the input entries have the same key. +// 3. the number of entries combined from the parent and the input entries exceeds maxKeyValuePairs. +// (duplicate entry is counted only once). +func New(parent *Tracestate, entries ...Entry) (*Tracestate, error) { + if parent == nil && len(entries) == 0 { + return nil, nil + } + if entry, ok := areEntriesValid(entries...); !ok { + return nil, fmt.Errorf("key-value pair {%s, %s} is invalid", entry.Key, entry.Value) + } + + if key, duplicate := containsDuplicateKey(entries...); duplicate { + return nil, fmt.Errorf("contains duplicate keys (%s)", key) + } + + tracestate := Tracestate{} + + if parent != nil && len(parent.entries) > 0 { + tracestate.entries = append([]Entry{}, parent.entries...) + } + + err := tracestate.add(entries) + if err != nil { + return nil, err + } + return &tracestate, nil +} diff --git a/vendor/golang.org/x/crypto/internal/chacha20/BUILD.bazel b/vendor/golang.org/x/crypto/internal/chacha20/BUILD.bazel index 97fb2ddf5f764..081ba8161ae99 100644 --- a/vendor/golang.org/x/crypto/internal/chacha20/BUILD.bazel +++ b/vendor/golang.org/x/crypto/internal/chacha20/BUILD.bazel @@ -4,9 +4,11 @@ go_library( name = "go_default_library", srcs = [ "asm_arm64.s", + "asm_ppc64le.s", "chacha_arm64.go", "chacha_generic.go", "chacha_noasm.go", + "chacha_ppc64le.go", "chacha_s390x.go", "chacha_s390x.s", "xor.go", diff --git a/vendor/golang.org/x/crypto/internal/chacha20/asm_ppc64le.s b/vendor/golang.org/x/crypto/internal/chacha20/asm_ppc64le.s new file mode 100644 index 0000000000000..cde3fc989b729 --- /dev/null +++ b/vendor/golang.org/x/crypto/internal/chacha20/asm_ppc64le.s @@ -0,0 +1,668 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on CRYPTOGAMS code with the following comment: +// # ==================================================================== +// # Written by Andy Polyakov for the OpenSSL +// # project. The module is, however, dual licensed under OpenSSL and +// # CRYPTOGAMS licenses depending on where you obtain it. For further +// # details see http://www.openssl.org/~appro/cryptogams/. +// # ==================================================================== + +// Original code can be found at the link below: +// https://github.com/dot-asm/cryptogams/commit/a60f5b50ed908e91e5c39ca79126a4a876d5d8ff + +// There are some differences between CRYPTOGAMS code and this one. The round +// loop for "_int" isn't the same as the original. Some adjustments were +// necessary because there are less vector registers available. For example, some +// X variables (r12, r13, r14, and r15) share the same register used by the +// counter. The original code uses ctr to name the counter. Here we use CNT +// because golang uses CTR as the counter register name. + +// +build ppc64le,!gccgo,!appengine + +#include "textflag.h" + +#define OUT R3 +#define INP R4 +#define LEN R5 +#define KEY R6 +#define CNT R7 + +#define TEMP R8 + +#define X0 R11 +#define X1 R12 +#define X2 R14 +#define X3 R15 +#define X4 R16 +#define X5 R17 +#define X6 R18 +#define X7 R19 +#define X8 R20 +#define X9 R21 +#define X10 R22 +#define X11 R23 +#define X12 R24 +#define X13 R25 +#define X14 R26 +#define X15 R27 + +#define CON0 X0 +#define CON1 X1 +#define CON2 X2 +#define CON3 X3 + +#define KEY0 X4 +#define KEY1 X5 +#define KEY2 X6 +#define KEY3 X7 +#define KEY4 X8 +#define KEY5 X9 +#define KEY6 X10 +#define KEY7 X11 + +#define CNT0 X12 +#define CNT1 X13 +#define CNT2 X14 +#define CNT3 X15 + +#define TMP0 R9 +#define TMP1 R10 +#define TMP2 R28 +#define TMP3 R29 + +#define CONSTS R8 + +#define A0 V0 +#define B0 V1 +#define C0 V2 +#define D0 V3 +#define A1 V4 +#define B1 V5 +#define C1 V6 +#define D1 V7 +#define A2 V8 +#define B2 V9 +#define C2 V10 +#define D2 V11 +#define T0 V12 +#define T1 V13 +#define T2 V14 + +#define K0 V15 +#define K1 V16 +#define K2 V17 +#define K3 V18 +#define K4 V19 +#define K5 V20 + +#define FOUR V21 +#define SIXTEEN V22 +#define TWENTY4 V23 +#define TWENTY V24 +#define TWELVE V25 +#define TWENTY5 V26 +#define SEVEN V27 + +#define INPPERM V28 +#define OUTPERM V29 +#define OUTMASK V30 + +#define DD0 V31 +#define DD1 SEVEN +#define DD2 T0 +#define DD3 T1 +#define DD4 T2 + +DATA ·consts+0x00(SB)/8, $0x3320646e61707865 +DATA ·consts+0x08(SB)/8, $0x6b20657479622d32 +DATA ·consts+0x10(SB)/8, $0x0000000000000001 +DATA ·consts+0x18(SB)/8, $0x0000000000000000 +DATA ·consts+0x20(SB)/8, $0x0000000000000004 +DATA ·consts+0x28(SB)/8, $0x0000000000000000 +DATA ·consts+0x30(SB)/8, $0x0a0b08090e0f0c0d +DATA ·consts+0x38(SB)/8, $0x0203000106070405 +DATA ·consts+0x40(SB)/8, $0x090a0b080d0e0f0c +DATA ·consts+0x48(SB)/8, $0x0102030005060704 +GLOBL ·consts(SB), RODATA, $80 + +//func chaCha20_ctr32_vmx(out, inp *byte, len int, key *[32]byte, counter *[16]byte) +TEXT ·chaCha20_ctr32_vmx(SB),NOSPLIT|NOFRAME,$0 + // Load the arguments inside the registers + MOVD out+0(FP), OUT + MOVD inp+8(FP), INP + MOVD len+16(FP), LEN + MOVD key+24(FP), KEY + MOVD counter+32(FP), CNT + + MOVD $·consts(SB), CONSTS // point to consts addr + + MOVD $16, X0 + MOVD $32, X1 + MOVD $48, X2 + MOVD $64, X3 + MOVD $31, X4 + MOVD $15, X5 + + // Load key + LVX (KEY)(R0), K1 + LVSR (KEY)(R0), T0 + LVX (KEY)(X0), K2 + LVX (KEY)(X4), DD0 + + // Load counter + LVX (CNT)(R0), K3 + LVSR (CNT)(R0), T1 + LVX (CNT)(X5), DD1 + + // Load constants + LVX (CONSTS)(R0), K0 + LVX (CONSTS)(X0), K5 + LVX (CONSTS)(X1), FOUR + LVX (CONSTS)(X2), SIXTEEN + LVX (CONSTS)(X3), TWENTY4 + + // Align key and counter + VPERM K2, K1, T0, K1 + VPERM DD0, K2, T0, K2 + VPERM DD1, K3, T1, K3 + + // Load counter to GPR + MOVWZ 0(CNT), CNT0 + MOVWZ 4(CNT), CNT1 + MOVWZ 8(CNT), CNT2 + MOVWZ 12(CNT), CNT3 + + // Adjust vectors for the initial state + VADDUWM K3, K5, K3 + VADDUWM K3, K5, K4 + VADDUWM K4, K5, K5 + + // Synthesized constants + VSPLTISW $-12, TWENTY + VSPLTISW $12, TWELVE + VSPLTISW $-7, TWENTY5 + + VXOR T0, T0, T0 + VSPLTISW $-1, OUTMASK + LVSR (INP)(R0), INPPERM + LVSL (OUT)(R0), OUTPERM + VPERM OUTMASK, T0, OUTPERM, OUTMASK + +loop_outer_vmx: + // Load constant + MOVD $0x61707865, CON0 + MOVD $0x3320646e, CON1 + MOVD $0x79622d32, CON2 + MOVD $0x6b206574, CON3 + + VOR K0, K0, A0 + VOR K0, K0, A1 + VOR K0, K0, A2 + VOR K1, K1, B0 + + MOVD $10, TEMP + + // Load key to GPR + MOVWZ 0(KEY), X4 + MOVWZ 4(KEY), X5 + MOVWZ 8(KEY), X6 + MOVWZ 12(KEY), X7 + VOR K1, K1, B1 + VOR K1, K1, B2 + MOVWZ 16(KEY), X8 + MOVWZ 0(CNT), X12 + MOVWZ 20(KEY), X9 + MOVWZ 4(CNT), X13 + VOR K2, K2, C0 + VOR K2, K2, C1 + MOVWZ 24(KEY), X10 + MOVWZ 8(CNT), X14 + VOR K2, K2, C2 + VOR K3, K3, D0 + MOVWZ 28(KEY), X11 + MOVWZ 12(CNT), X15 + VOR K4, K4, D1 + VOR K5, K5, D2 + + MOVD X4, TMP0 + MOVD X5, TMP1 + MOVD X6, TMP2 + MOVD X7, TMP3 + VSPLTISW $7, SEVEN + + MOVD TEMP, CTR + +loop_vmx: + // CRYPTOGAMS uses a macro to create a loop using perl. This isn't possible + // using assembly macros. Therefore, the macro expansion result was used + // in order to maintain the algorithm efficiency. + // This loop generates three keystream blocks using VMX instructions and, + // in parallel, one keystream block using scalar instructions. + ADD X4, X0, X0 + ADD X5, X1, X1 + VADDUWM A0, B0, A0 + VADDUWM A1, B1, A1 + ADD X6, X2, X2 + ADD X7, X3, X3 + VADDUWM A2, B2, A2 + VXOR D0, A0, D0 + XOR X0, X12, X12 + XOR X1, X13, X13 + VXOR D1, A1, D1 + VXOR D2, A2, D2 + XOR X2, X14, X14 + XOR X3, X15, X15 + VPERM D0, D0, SIXTEEN, D0 + VPERM D1, D1, SIXTEEN, D1 + ROTLW $16, X12, X12 + ROTLW $16, X13, X13 + VPERM D2, D2, SIXTEEN, D2 + VADDUWM C0, D0, C0 + ROTLW $16, X14, X14 + ROTLW $16, X15, X15 + VADDUWM C1, D1, C1 + VADDUWM C2, D2, C2 + ADD X12, X8, X8 + ADD X13, X9, X9 + VXOR B0, C0, T0 + VXOR B1, C1, T1 + ADD X14, X10, X10 + ADD X15, X11, X11 + VXOR B2, C2, T2 + VRLW T0, TWELVE, B0 + XOR X8, X4, X4 + XOR X9, X5, X5 + VRLW T1, TWELVE, B1 + VRLW T2, TWELVE, B2 + XOR X10, X6, X6 + XOR X11, X7, X7 + VADDUWM A0, B0, A0 + VADDUWM A1, B1, A1 + ROTLW $12, X4, X4 + ROTLW $12, X5, X5 + VADDUWM A2, B2, A2 + VXOR D0, A0, D0 + ROTLW $12, X6, X6 + ROTLW $12, X7, X7 + VXOR D1, A1, D1 + VXOR D2, A2, D2 + ADD X4, X0, X0 + ADD X5, X1, X1 + VPERM D0, D0, TWENTY4, D0 + VPERM D1, D1, TWENTY4, D1 + ADD X6, X2, X2 + ADD X7, X3, X3 + VPERM D2, D2, TWENTY4, D2 + VADDUWM C0, D0, C0 + XOR X0, X12, X12 + XOR X1, X13, X13 + VADDUWM C1, D1, C1 + VADDUWM C2, D2, C2 + XOR X2, X14, X14 + XOR X3, X15, X15 + VXOR B0, C0, T0 + VXOR B1, C1, T1 + ROTLW $8, X12, X12 + ROTLW $8, X13, X13 + VXOR B2, C2, T2 + VRLW T0, SEVEN, B0 + ROTLW $8, X14, X14 + ROTLW $8, X15, X15 + VRLW T1, SEVEN, B1 + VRLW T2, SEVEN, B2 + ADD X12, X8, X8 + ADD X13, X9, X9 + VSLDOI $8, C0, C0, C0 + VSLDOI $8, C1, C1, C1 + ADD X14, X10, X10 + ADD X15, X11, X11 + VSLDOI $8, C2, C2, C2 + VSLDOI $12, B0, B0, B0 + XOR X8, X4, X4 + XOR X9, X5, X5 + VSLDOI $12, B1, B1, B1 + VSLDOI $12, B2, B2, B2 + XOR X10, X6, X6 + XOR X11, X7, X7 + VSLDOI $4, D0, D0, D0 + VSLDOI $4, D1, D1, D1 + ROTLW $7, X4, X4 + ROTLW $7, X5, X5 + VSLDOI $4, D2, D2, D2 + VADDUWM A0, B0, A0 + ROTLW $7, X6, X6 + ROTLW $7, X7, X7 + VADDUWM A1, B1, A1 + VADDUWM A2, B2, A2 + ADD X5, X0, X0 + ADD X6, X1, X1 + VXOR D0, A0, D0 + VXOR D1, A1, D1 + ADD X7, X2, X2 + ADD X4, X3, X3 + VXOR D2, A2, D2 + VPERM D0, D0, SIXTEEN, D0 + XOR X0, X15, X15 + XOR X1, X12, X12 + VPERM D1, D1, SIXTEEN, D1 + VPERM D2, D2, SIXTEEN, D2 + XOR X2, X13, X13 + XOR X3, X14, X14 + VADDUWM C0, D0, C0 + VADDUWM C1, D1, C1 + ROTLW $16, X15, X15 + ROTLW $16, X12, X12 + VADDUWM C2, D2, C2 + VXOR B0, C0, T0 + ROTLW $16, X13, X13 + ROTLW $16, X14, X14 + VXOR B1, C1, T1 + VXOR B2, C2, T2 + ADD X15, X10, X10 + ADD X12, X11, X11 + VRLW T0, TWELVE, B0 + VRLW T1, TWELVE, B1 + ADD X13, X8, X8 + ADD X14, X9, X9 + VRLW T2, TWELVE, B2 + VADDUWM A0, B0, A0 + XOR X10, X5, X5 + XOR X11, X6, X6 + VADDUWM A1, B1, A1 + VADDUWM A2, B2, A2 + XOR X8, X7, X7 + XOR X9, X4, X4 + VXOR D0, A0, D0 + VXOR D1, A1, D1 + ROTLW $12, X5, X5 + ROTLW $12, X6, X6 + VXOR D2, A2, D2 + VPERM D0, D0, TWENTY4, D0 + ROTLW $12, X7, X7 + ROTLW $12, X4, X4 + VPERM D1, D1, TWENTY4, D1 + VPERM D2, D2, TWENTY4, D2 + ADD X5, X0, X0 + ADD X6, X1, X1 + VADDUWM C0, D0, C0 + VADDUWM C1, D1, C1 + ADD X7, X2, X2 + ADD X4, X3, X3 + VADDUWM C2, D2, C2 + VXOR B0, C0, T0 + XOR X0, X15, X15 + XOR X1, X12, X12 + VXOR B1, C1, T1 + VXOR B2, C2, T2 + XOR X2, X13, X13 + XOR X3, X14, X14 + VRLW T0, SEVEN, B0 + VRLW T1, SEVEN, B1 + ROTLW $8, X15, X15 + ROTLW $8, X12, X12 + VRLW T2, SEVEN, B2 + VSLDOI $8, C0, C0, C0 + ROTLW $8, X13, X13 + ROTLW $8, X14, X14 + VSLDOI $8, C1, C1, C1 + VSLDOI $8, C2, C2, C2 + ADD X15, X10, X10 + ADD X12, X11, X11 + VSLDOI $4, B0, B0, B0 + VSLDOI $4, B1, B1, B1 + ADD X13, X8, X8 + ADD X14, X9, X9 + VSLDOI $4, B2, B2, B2 + VSLDOI $12, D0, D0, D0 + XOR X10, X5, X5 + XOR X11, X6, X6 + VSLDOI $12, D1, D1, D1 + VSLDOI $12, D2, D2, D2 + XOR X8, X7, X7 + XOR X9, X4, X4 + ROTLW $7, X5, X5 + ROTLW $7, X6, X6 + ROTLW $7, X7, X7 + ROTLW $7, X4, X4 + BC 0x10, 0, loop_vmx + + SUB $256, LEN, LEN + + // Accumulate key block + ADD $0x61707865, X0, X0 + ADD $0x3320646e, X1, X1 + ADD $0x79622d32, X2, X2 + ADD $0x6b206574, X3, X3 + ADD TMP0, X4, X4 + ADD TMP1, X5, X5 + ADD TMP2, X6, X6 + ADD TMP3, X7, X7 + MOVWZ 16(KEY), TMP0 + MOVWZ 20(KEY), TMP1 + MOVWZ 24(KEY), TMP2 + MOVWZ 28(KEY), TMP3 + ADD TMP0, X8, X8 + ADD TMP1, X9, X9 + ADD TMP2, X10, X10 + ADD TMP3, X11, X11 + + MOVWZ 12(CNT), TMP0 + MOVWZ 8(CNT), TMP1 + MOVWZ 4(CNT), TMP2 + MOVWZ 0(CNT), TEMP + ADD TMP0, X15, X15 + ADD TMP1, X14, X14 + ADD TMP2, X13, X13 + ADD TEMP, X12, X12 + + // Accumulate key block + VADDUWM A0, K0, A0 + VADDUWM A1, K0, A1 + VADDUWM A2, K0, A2 + VADDUWM B0, K1, B0 + VADDUWM B1, K1, B1 + VADDUWM B2, K1, B2 + VADDUWM C0, K2, C0 + VADDUWM C1, K2, C1 + VADDUWM C2, K2, C2 + VADDUWM D0, K3, D0 + VADDUWM D1, K4, D1 + VADDUWM D2, K5, D2 + + // Increment counter + ADD $4, TEMP, TEMP + MOVW TEMP, 0(CNT) + + VADDUWM K3, FOUR, K3 + VADDUWM K4, FOUR, K4 + VADDUWM K5, FOUR, K5 + + // XOR the input slice (INP) with the keystream, which is stored in GPRs (X0-X3). + + // Load input (aligned or not) + MOVWZ 0(INP), TMP0 + MOVWZ 4(INP), TMP1 + MOVWZ 8(INP), TMP2 + MOVWZ 12(INP), TMP3 + + // XOR with input + XOR TMP0, X0, X0 + XOR TMP1, X1, X1 + XOR TMP2, X2, X2 + XOR TMP3, X3, X3 + MOVWZ 16(INP), TMP0 + MOVWZ 20(INP), TMP1 + MOVWZ 24(INP), TMP2 + MOVWZ 28(INP), TMP3 + XOR TMP0, X4, X4 + XOR TMP1, X5, X5 + XOR TMP2, X6, X6 + XOR TMP3, X7, X7 + MOVWZ 32(INP), TMP0 + MOVWZ 36(INP), TMP1 + MOVWZ 40(INP), TMP2 + MOVWZ 44(INP), TMP3 + XOR TMP0, X8, X8 + XOR TMP1, X9, X9 + XOR TMP2, X10, X10 + XOR TMP3, X11, X11 + MOVWZ 48(INP), TMP0 + MOVWZ 52(INP), TMP1 + MOVWZ 56(INP), TMP2 + MOVWZ 60(INP), TMP3 + XOR TMP0, X12, X12 + XOR TMP1, X13, X13 + XOR TMP2, X14, X14 + XOR TMP3, X15, X15 + + // Store output (aligned or not) + MOVW X0, 0(OUT) + MOVW X1, 4(OUT) + MOVW X2, 8(OUT) + MOVW X3, 12(OUT) + + ADD $64, INP, INP // INP points to the end of the slice for the alignment code below + + MOVW X4, 16(OUT) + MOVD $16, TMP0 + MOVW X5, 20(OUT) + MOVD $32, TMP1 + MOVW X6, 24(OUT) + MOVD $48, TMP2 + MOVW X7, 28(OUT) + MOVD $64, TMP3 + MOVW X8, 32(OUT) + MOVW X9, 36(OUT) + MOVW X10, 40(OUT) + MOVW X11, 44(OUT) + MOVW X12, 48(OUT) + MOVW X13, 52(OUT) + MOVW X14, 56(OUT) + MOVW X15, 60(OUT) + ADD $64, OUT, OUT + + // Load input + LVX (INP)(R0), DD0 + LVX (INP)(TMP0), DD1 + LVX (INP)(TMP1), DD2 + LVX (INP)(TMP2), DD3 + LVX (INP)(TMP3), DD4 + ADD $64, INP, INP + + VPERM DD1, DD0, INPPERM, DD0 // Align input + VPERM DD2, DD1, INPPERM, DD1 + VPERM DD3, DD2, INPPERM, DD2 + VPERM DD4, DD3, INPPERM, DD3 + VXOR A0, DD0, A0 // XOR with input + VXOR B0, DD1, B0 + LVX (INP)(TMP0), DD1 // Keep loading input + VXOR C0, DD2, C0 + LVX (INP)(TMP1), DD2 + VXOR D0, DD3, D0 + LVX (INP)(TMP2), DD3 + LVX (INP)(TMP3), DD0 + ADD $64, INP, INP + MOVD $63, TMP3 // 63 is not a typo + VPERM A0, A0, OUTPERM, A0 + VPERM B0, B0, OUTPERM, B0 + VPERM C0, C0, OUTPERM, C0 + VPERM D0, D0, OUTPERM, D0 + + VPERM DD1, DD4, INPPERM, DD4 // Align input + VPERM DD2, DD1, INPPERM, DD1 + VPERM DD3, DD2, INPPERM, DD2 + VPERM DD0, DD3, INPPERM, DD3 + VXOR A1, DD4, A1 + VXOR B1, DD1, B1 + LVX (INP)(TMP0), DD1 // Keep loading + VXOR C1, DD2, C1 + LVX (INP)(TMP1), DD2 + VXOR D1, DD3, D1 + LVX (INP)(TMP2), DD3 + + // Note that the LVX address is always rounded down to the nearest 16-byte + // boundary, and that it always points to at most 15 bytes beyond the end of + // the slice, so we cannot cross a page boundary. + LVX (INP)(TMP3), DD4 // Redundant in aligned case. + ADD $64, INP, INP + VPERM A1, A1, OUTPERM, A1 // Pre-misalign output + VPERM B1, B1, OUTPERM, B1 + VPERM C1, C1, OUTPERM, C1 + VPERM D1, D1, OUTPERM, D1 + + VPERM DD1, DD0, INPPERM, DD0 // Align Input + VPERM DD2, DD1, INPPERM, DD1 + VPERM DD3, DD2, INPPERM, DD2 + VPERM DD4, DD3, INPPERM, DD3 + VXOR A2, DD0, A2 + VXOR B2, DD1, B2 + VXOR C2, DD2, C2 + VXOR D2, DD3, D2 + VPERM A2, A2, OUTPERM, A2 + VPERM B2, B2, OUTPERM, B2 + VPERM C2, C2, OUTPERM, C2 + VPERM D2, D2, OUTPERM, D2 + + ANDCC $15, OUT, X1 // Is out aligned? + MOVD OUT, X0 + + VSEL A0, B0, OUTMASK, DD0 // Collect pre-misaligned output + VSEL B0, C0, OUTMASK, DD1 + VSEL C0, D0, OUTMASK, DD2 + VSEL D0, A1, OUTMASK, DD3 + VSEL A1, B1, OUTMASK, B0 + VSEL B1, C1, OUTMASK, C0 + VSEL C1, D1, OUTMASK, D0 + VSEL D1, A2, OUTMASK, A1 + VSEL A2, B2, OUTMASK, B1 + VSEL B2, C2, OUTMASK, C1 + VSEL C2, D2, OUTMASK, D1 + + STVX DD0, (OUT+TMP0) + STVX DD1, (OUT+TMP1) + STVX DD2, (OUT+TMP2) + ADD $64, OUT, OUT + STVX DD3, (OUT+R0) + STVX B0, (OUT+TMP0) + STVX C0, (OUT+TMP1) + STVX D0, (OUT+TMP2) + ADD $64, OUT, OUT + STVX A1, (OUT+R0) + STVX B1, (OUT+TMP0) + STVX C1, (OUT+TMP1) + STVX D1, (OUT+TMP2) + ADD $64, OUT, OUT + + BEQ aligned_vmx + + SUB X1, OUT, X2 // in misaligned case edges + MOVD $0, X3 // are written byte-by-byte + +unaligned_tail_vmx: + STVEBX D2, (X2+X3) + ADD $1, X3, X3 + CMPW X3, X1 + BNE unaligned_tail_vmx + SUB X1, X0, X2 + +unaligned_head_vmx: + STVEBX A0, (X2+X1) + CMPW X1, $15 + ADD $1, X1, X1 + BNE unaligned_head_vmx + + CMPU LEN, $255 // done with 256-byte block yet? + BGT loop_outer_vmx + + JMP done_vmx + +aligned_vmx: + STVX A0, (X0+R0) + CMPU LEN, $255 // done with 256-byte block yet? + BGT loop_outer_vmx + +done_vmx: + RET diff --git a/vendor/golang.org/x/crypto/internal/chacha20/chacha_noasm.go b/vendor/golang.org/x/crypto/internal/chacha20/chacha_noasm.go index 47eac0314c9d0..bf8beba67084b 100644 --- a/vendor/golang.org/x/crypto/internal/chacha20/chacha_noasm.go +++ b/vendor/golang.org/x/crypto/internal/chacha20/chacha_noasm.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !arm64,!s390x arm64,!go1.11 gccgo appengine +// +build !ppc64le,!arm64,!s390x arm64,!go1.11 gccgo appengine package chacha20 diff --git a/vendor/golang.org/x/crypto/internal/chacha20/chacha_ppc64le.go b/vendor/golang.org/x/crypto/internal/chacha20/chacha_ppc64le.go new file mode 100644 index 0000000000000..638cb5e5de53a --- /dev/null +++ b/vendor/golang.org/x/crypto/internal/chacha20/chacha_ppc64le.go @@ -0,0 +1,52 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ppc64le,!gccgo,!appengine + +package chacha20 + +import "encoding/binary" + +const ( + bufSize = 256 + haveAsm = true +) + +//go:noescape +func chaCha20_ctr32_vmx(out, inp *byte, len int, key *[8]uint32, counter *uint32) + +func (c *Cipher) xorKeyStreamAsm(dst, src []byte) { + if len(src) >= bufSize { + chaCha20_ctr32_vmx(&dst[0], &src[0], len(src)-len(src)%bufSize, &c.key, &c.counter) + } + if len(src)%bufSize != 0 { + chaCha20_ctr32_vmx(&c.buf[0], &c.buf[0], bufSize, &c.key, &c.counter) + start := len(src) - len(src)%bufSize + ts, td, tb := src[start:], dst[start:], c.buf[:] + // Unroll loop to XOR 32 bytes per iteration. + for i := 0; i < len(ts)-32; i += 32 { + td, tb = td[:len(ts)], tb[:len(ts)] // bounds check elimination + s0 := binary.LittleEndian.Uint64(ts[0:8]) + s1 := binary.LittleEndian.Uint64(ts[8:16]) + s2 := binary.LittleEndian.Uint64(ts[16:24]) + s3 := binary.LittleEndian.Uint64(ts[24:32]) + b0 := binary.LittleEndian.Uint64(tb[0:8]) + b1 := binary.LittleEndian.Uint64(tb[8:16]) + b2 := binary.LittleEndian.Uint64(tb[16:24]) + b3 := binary.LittleEndian.Uint64(tb[24:32]) + binary.LittleEndian.PutUint64(td[0:8], s0^b0) + binary.LittleEndian.PutUint64(td[8:16], s1^b1) + binary.LittleEndian.PutUint64(td[16:24], s2^b2) + binary.LittleEndian.PutUint64(td[24:32], s3^b3) + ts, td, tb = ts[32:], td[32:], tb[32:] + } + td, tb = td[:len(ts)], tb[:len(ts)] // bounds check elimination + for i, v := range ts { + td[i] = tb[i] ^ v + } + c.len = bufSize - (len(src) % bufSize) + + } + +} diff --git a/vendor/golang.org/x/crypto/poly1305/BUILD.bazel b/vendor/golang.org/x/crypto/poly1305/BUILD.bazel index 66c5d2b9ab139..cc5bdca07c624 100644 --- a/vendor/golang.org/x/crypto/poly1305/BUILD.bazel +++ b/vendor/golang.org/x/crypto/poly1305/BUILD.bazel @@ -11,6 +11,8 @@ go_library( "sum_arm.s", "sum_generic.go", "sum_noasm.go", + "sum_ppc64le.go", + "sum_ppc64le.s", "sum_s390x.go", "sum_s390x.s", "sum_vmsl_s390x.s", diff --git a/vendor/golang.org/x/crypto/poly1305/mac_noasm.go b/vendor/golang.org/x/crypto/poly1305/mac_noasm.go index 8387d29998feb..a8dd589ae3906 100644 --- a/vendor/golang.org/x/crypto/poly1305/mac_noasm.go +++ b/vendor/golang.org/x/crypto/poly1305/mac_noasm.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !amd64 gccgo appengine +// +build !amd64,!ppc64le gccgo appengine package poly1305 diff --git a/vendor/golang.org/x/crypto/poly1305/sum_noasm.go b/vendor/golang.org/x/crypto/poly1305/sum_noasm.go index fcdef46ab6e7c..8a9c2070b9f2f 100644 --- a/vendor/golang.org/x/crypto/poly1305/sum_noasm.go +++ b/vendor/golang.org/x/crypto/poly1305/sum_noasm.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build s390x,!go1.11 !arm,!amd64,!s390x gccgo appengine nacl +// +build s390x,!go1.11 !arm,!amd64,!s390x,!ppc64le gccgo appengine nacl package poly1305 diff --git a/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go b/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go new file mode 100644 index 0000000000000..2402b6371bfcc --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go @@ -0,0 +1,68 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ppc64le,!gccgo,!appengine + +package poly1305 + +//go:noescape +func initialize(state *[7]uint64, key *[32]byte) + +//go:noescape +func update(state *[7]uint64, msg []byte) + +//go:noescape +func finalize(tag *[TagSize]byte, state *[7]uint64) + +// Sum generates an authenticator for m using a one-time key and puts the +// 16-byte result into out. Authenticating two different messages with the same +// key allows an attacker to forge messages at will. +func Sum(out *[16]byte, m []byte, key *[32]byte) { + h := newMAC(key) + h.Write(m) + h.Sum(out) +} + +func newMAC(key *[32]byte) (h mac) { + initialize(&h.state, key) + return +} + +type mac struct { + state [7]uint64 // := uint64{ h0, h1, h2, r0, r1, pad0, pad1 } + + buffer [TagSize]byte + offset int +} + +func (h *mac) Write(p []byte) (n int, err error) { + n = len(p) + if h.offset > 0 { + remaining := TagSize - h.offset + if n < remaining { + h.offset += copy(h.buffer[h.offset:], p) + return n, nil + } + copy(h.buffer[h.offset:], p[:remaining]) + p = p[remaining:] + h.offset = 0 + update(&h.state, h.buffer[:]) + } + if nn := len(p) - (len(p) % TagSize); nn > 0 { + update(&h.state, p[:nn]) + p = p[nn:] + } + if len(p) > 0 { + h.offset += copy(h.buffer[h.offset:], p) + } + return n, nil +} + +func (h *mac) Sum(out *[16]byte) { + state := h.state + if h.offset > 0 { + update(&state, h.buffer[:h.offset]) + } + finalize(out, &state) +} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s b/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s new file mode 100644 index 0000000000000..55c7167ec9813 --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s @@ -0,0 +1,247 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ppc64le,!gccgo,!appengine + +#include "textflag.h" + +// This was ported from the amd64 implementation. + +#define POLY1305_ADD(msg, h0, h1, h2, t0, t1, t2) \ + MOVD (msg), t0; \ + MOVD 8(msg), t1; \ + MOVD $1, t2; \ + ADDC t0, h0, h0; \ + ADDE t1, h1, h1; \ + ADDE t2, h2; \ + ADD $16, msg + +#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3, t4, t5) \ + MULLD r0, h0, t0; \ + MULLD r0, h1, t4; \ + MULHDU r0, h0, t1; \ + MULHDU r0, h1, t5; \ + ADDC t4, t1, t1; \ + MULLD r0, h2, t2; \ + ADDZE t5; \ + MULHDU r1, h0, t4; \ + MULLD r1, h0, h0; \ + ADD t5, t2, t2; \ + ADDC h0, t1, t1; \ + MULLD h2, r1, t3; \ + ADDZE t4, h0; \ + MULHDU r1, h1, t5; \ + MULLD r1, h1, t4; \ + ADDC t4, t2, t2; \ + ADDE t5, t3, t3; \ + ADDC h0, t2, t2; \ + MOVD $-4, t4; \ + MOVD t0, h0; \ + MOVD t1, h1; \ + ADDZE t3; \ + ANDCC $3, t2, h2; \ + AND t2, t4, t0; \ + ADDC t0, h0, h0; \ + ADDE t3, h1, h1; \ + SLD $62, t3, t4; \ + SRD $2, t2; \ + ADDZE h2; \ + OR t4, t2, t2; \ + SRD $2, t3; \ + ADDC t2, h0, h0; \ + ADDE t3, h1, h1; \ + ADDZE h2 + +DATA ·poly1305Mask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF +DATA ·poly1305Mask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC +GLOBL ·poly1305Mask<>(SB), RODATA, $16 + +// func update(state *[7]uint64, msg []byte) + +TEXT ·update(SB), $0-32 + MOVD state+0(FP), R3 + MOVD msg_base+8(FP), R4 + MOVD msg_len+16(FP), R5 + + MOVD 0(R3), R8 // h0 + MOVD 8(R3), R9 // h1 + MOVD 16(R3), R10 // h2 + MOVD 24(R3), R11 // r0 + MOVD 32(R3), R12 // r1 + + CMP R5, $16 + BLT bytes_between_0_and_15 + +loop: + POLY1305_ADD(R4, R8, R9, R10, R20, R21, R22) + +multiply: + POLY1305_MUL(R8, R9, R10, R11, R12, R16, R17, R18, R14, R20, R21) + ADD $-16, R5 + CMP R5, $16 + BGE loop + +bytes_between_0_and_15: + CMP $0, R5 + BEQ done + MOVD $0, R16 // h0 + MOVD $0, R17 // h1 + +flush_buffer: + CMP R5, $8 + BLE just1 + + MOVD $8, R21 + SUB R21, R5, R21 + + // Greater than 8 -- load the rightmost remaining bytes in msg + // and put into R17 (h1) + MOVD (R4)(R21), R17 + MOVD $16, R22 + + // Find the offset to those bytes + SUB R5, R22, R22 + SLD $3, R22 + + // Shift to get only the bytes in msg + SRD R22, R17, R17 + + // Put 1 at high end + MOVD $1, R23 + SLD $3, R21 + SLD R21, R23, R23 + OR R23, R17, R17 + + // Remainder is 8 + MOVD $8, R5 + +just1: + CMP R5, $8 + BLT less8 + + // Exactly 8 + MOVD (R4), R16 + + CMP $0, R17 + + // Check if we've already set R17; if not + // set 1 to indicate end of msg. + BNE carry + MOVD $1, R17 + BR carry + +less8: + MOVD $0, R16 // h0 + MOVD $0, R22 // shift count + CMP R5, $4 + BLT less4 + MOVWZ (R4), R16 + ADD $4, R4 + ADD $-4, R5 + MOVD $32, R22 + +less4: + CMP R5, $2 + BLT less2 + MOVHZ (R4), R21 + SLD R22, R21, R21 + OR R16, R21, R16 + ADD $16, R22 + ADD $-2, R5 + ADD $2, R4 + +less2: + CMP $0, R5 + BEQ insert1 + MOVBZ (R4), R21 + SLD R22, R21, R21 + OR R16, R21, R16 + ADD $8, R22 + +insert1: + // Insert 1 at end of msg + MOVD $1, R21 + SLD R22, R21, R21 + OR R16, R21, R16 + +carry: + // Add new values to h0, h1, h2 + ADDC R16, R8 + ADDE R17, R9 + ADDE $0, R10 + MOVD $16, R5 + ADD R5, R4 + BR multiply + +done: + // Save h0, h1, h2 in state + MOVD R8, 0(R3) + MOVD R9, 8(R3) + MOVD R10, 16(R3) + RET + +// func initialize(state *[7]uint64, key *[32]byte) +TEXT ·initialize(SB), $0-16 + MOVD state+0(FP), R3 + MOVD key+8(FP), R4 + + // state[0...7] is initialized with zero + // Load key + MOVD 0(R4), R5 + MOVD 8(R4), R6 + MOVD 16(R4), R7 + MOVD 24(R4), R8 + + // Address of key mask + MOVD $·poly1305Mask<>(SB), R9 + + // Save original key in state + MOVD R7, 40(R3) + MOVD R8, 48(R3) + + // Get mask + MOVD (R9), R7 + MOVD 8(R9), R8 + + // And with key + AND R5, R7, R5 + AND R6, R8, R6 + + // Save masked key in state + MOVD R5, 24(R3) + MOVD R6, 32(R3) + RET + +// func finalize(tag *[TagSize]byte, state *[7]uint64) +TEXT ·finalize(SB), $0-16 + MOVD tag+0(FP), R3 + MOVD state+8(FP), R4 + + // Get h0, h1, h2 from state + MOVD 0(R4), R5 + MOVD 8(R4), R6 + MOVD 16(R4), R7 + + // Save h0, h1 + MOVD R5, R8 + MOVD R6, R9 + MOVD $3, R20 + MOVD $-1, R21 + SUBC $-5, R5 + SUBE R21, R6 + SUBE R20, R7 + MOVD $0, R21 + SUBZE R21 + + // Check for carry + CMP $0, R21 + ISEL $2, R5, R8, R5 + ISEL $2, R6, R9, R6 + MOVD 40(R4), R8 + MOVD 48(R4), R9 + ADDC R8, R5 + ADDE R9, R6 + MOVD R5, 0(R3) + MOVD R6, 8(R3) + RET diff --git a/vendor/golang.org/x/net/bpf/vm_instructions.go b/vendor/golang.org/x/net/bpf/vm_instructions.go index f0d2e55bdc6e9..cf8947c33276b 100644 --- a/vendor/golang.org/x/net/bpf/vm_instructions.go +++ b/vendor/golang.org/x/net/bpf/vm_instructions.go @@ -129,7 +129,8 @@ func loadIndirect(ins LoadIndirect, in []byte, regX uint32) (uint32, bool) { func loadMemShift(ins LoadMemShift, in []byte) (uint32, bool) { offset := int(ins.Off) - if !inBounds(len(in), offset, 0) { + // Size of LoadMemShift is always 1 byte + if !inBounds(len(in), offset, 1) { return 0, false } diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index b46791d1da2c7..514c126c5f8f2 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -643,7 +643,7 @@ func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error { return f.WriteDataPadded(streamID, endStream, data, nil) } -// WriteData writes a DATA frame with optional padding. +// WriteDataPadded writes a DATA frame with optional padding. // // If pad is nil, the padding bit is not sent. // The length of pad must not exceed 255 bytes. diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 6f8e8b0d98b18..34d429320b60a 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -283,7 +283,20 @@ func ConfigureServer(s *http.Server, conf *Server) error { if testHookOnConn != nil { testHookOnConn() } + // The TLSNextProto interface predates contexts, so + // the net/http package passes down its per-connection + // base context via an exported but unadvertised + // method on the Handler. This is for internal + // net/http<=>http2 use only. + var ctx context.Context + type baseContexter interface { + BaseContext() context.Context + } + if bc, ok := h.(baseContexter); ok { + ctx = bc.BaseContext() + } conf.ServeConn(c, &ServeConnOpts{ + Context: ctx, Handler: h, BaseConfig: hs, }) @@ -294,6 +307,10 @@ func ConfigureServer(s *http.Server, conf *Server) error { // ServeConnOpts are options for the Server.ServeConn method. type ServeConnOpts struct { + // Context is the base context to use. + // If nil, context.Background is used. + Context context.Context + // BaseConfig optionally sets the base configuration // for values. If nil, defaults are used. BaseConfig *http.Server @@ -304,6 +321,13 @@ type ServeConnOpts struct { Handler http.Handler } +func (o *ServeConnOpts) context() context.Context { + if o != nil && o.Context != nil { + return o.Context + } + return context.Background() +} + func (o *ServeConnOpts) baseConfig() *http.Server { if o != nil && o.BaseConfig != nil { return o.BaseConfig @@ -449,7 +473,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { } func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx context.Context, cancel func()) { - ctx, cancel = context.WithCancel(context.Background()) + ctx, cancel = context.WithCancel(opts.context()) ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr()) if hs := opts.baseConfig(); hs != nil { ctx = context.WithValue(ctx, http.ServerContextKey, hs) @@ -2337,7 +2361,16 @@ type chunkWriter struct{ rws *responseWriterState } func (cw chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) } -func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) != 0 } +func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) > 0 } + +func (rws *responseWriterState) hasNonemptyTrailers() bool { + for _, trailer := range rws.trailers { + if _, ok := rws.handlerHeader[trailer]; ok { + return true + } + } + return false +} // declareTrailer is called for each Trailer header when the // response header is written. It notes that a header will need to be @@ -2437,7 +2470,10 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { rws.promoteUndeclaredTrailers() } - endStream := rws.handlerDone && !rws.hasTrailers() + // only send trailers if they have actually been defined by the + // server handler. + hasNonemptyTrailers := rws.hasNonemptyTrailers() + endStream := rws.handlerDone && !hasNonemptyTrailers if len(p) > 0 || endStream { // only send a 0 byte DATA frame if we're ending the stream. if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil { @@ -2446,7 +2482,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { } } - if rws.handlerDone && rws.hasTrailers() { + if rws.handlerDone && hasNonemptyTrailers { err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ streamID: rws.stream.id, h: rws.handlerHeader, diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index f272e8f9fac20..aeac7d8a51a2f 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -28,6 +28,7 @@ import ( "strconv" "strings" "sync" + "sync/atomic" "time" "golang.org/x/net/http/httpguts" @@ -199,6 +200,7 @@ type ClientConn struct { t *Transport tconn net.Conn // usually *tls.Conn, except specialized impls tlsState *tls.ConnectionState // nil only for specialized impls + reused uint32 // whether conn is being reused; atomic singleUse bool // whether being used for a single http.Request // readLoop goroutine fields: @@ -440,7 +442,8 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err) return nil, err } - traceGotConn(req, cc) + reused := !atomic.CompareAndSwapUint32(&cc.reused, 0, 1) + traceGotConn(req, cc, reused) res, gotErrAfterReqBodyWrite, err := cc.roundTrip(req) if err != nil && retry <= 6 { if req, err = shouldRetryRequest(req, err, gotErrAfterReqBodyWrite); err == nil { @@ -989,7 +992,7 @@ func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAf req.Method != "HEAD" { // Request gzip only, not deflate. Deflate is ambiguous and // not as universally supported anyway. - // See: http://www.gzip.org/zlib/zlib_faq.html#faq38 + // See: https://zlib.net/zlib_faq.html#faq39 // // Note that we don't request this for HEAD requests, // due to a bug in nginx: @@ -1411,7 +1414,11 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail // followed by the query production (see Sections 3.3 and 3.4 of // [RFC3986]). f(":authority", host) - f(":method", req.Method) + m := req.Method + if m == "" { + m = http.MethodGet + } + f(":method", m) if req.Method != "CONNECT" { f(":path", path) f(":scheme", req.URL.Scheme) @@ -2555,15 +2562,15 @@ func traceGetConn(req *http.Request, hostPort string) { trace.GetConn(hostPort) } -func traceGotConn(req *http.Request, cc *ClientConn) { +func traceGotConn(req *http.Request, cc *ClientConn, reused bool) { trace := httptrace.ContextClientTrace(req.Context()) if trace == nil || trace.GotConn == nil { return } ci := httptrace.GotConnInfo{Conn: cc.tconn} + ci.Reused = reused cc.mu.Lock() - ci.Reused = cc.nextStreamID > 1 - ci.WasIdle = len(cc.streams) == 0 && ci.Reused + ci.WasIdle = len(cc.streams) == 0 && reused if ci.WasIdle && !cc.lastActive.IsZero() { ci.IdleTime = time.Now().Sub(cc.lastActive) } diff --git a/vendor/golang.org/x/net/idna/BUILD.bazel b/vendor/golang.org/x/net/idna/BUILD.bazel index 4e5cf6ffb8f96..b665b4ad8a48c 100644 --- a/vendor/golang.org/x/net/idna/BUILD.bazel +++ b/vendor/golang.org/x/net/idna/BUILD.bazel @@ -3,9 +3,12 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = [ - "idna.go", + "idna10.0.0.go", + "idna9.0.0.go", "punycode.go", - "tables.go", + "tables10.0.0.go", + "tables11.0.0.go", + "tables9.0.0.go", "trie.go", "trieval.go", ], diff --git a/vendor/golang.org/x/net/idna/idna.go b/vendor/golang.org/x/net/idna/idna10.0.0.go similarity index 99% rename from vendor/golang.org/x/net/idna/idna.go rename to vendor/golang.org/x/net/idna/idna10.0.0.go index 346fe4423ed71..a98a31f403888 100644 --- a/vendor/golang.org/x/net/idna/idna.go +++ b/vendor/golang.org/x/net/idna/idna10.0.0.go @@ -4,14 +4,16 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// +build go1.10 + // Package idna implements IDNA2008 using the compatibility processing // defined by UTS (Unicode Technical Standard) #46, which defines a standard to // deal with the transition from IDNA2003. // // IDNA2008 (Internationalized Domain Names for Applications), is defined in RFC // 5890, RFC 5891, RFC 5892, RFC 5893 and RFC 5894. -// UTS #46 is defined in http://www.unicode.org/reports/tr46. -// See http://unicode.org/cldr/utility/idna.jsp for a visualization of the +// UTS #46 is defined in https://www.unicode.org/reports/tr46. +// See https://unicode.org/cldr/utility/idna.jsp for a visualization of the // differences between these two standards. package idna // import "golang.org/x/net/idna" @@ -297,7 +299,7 @@ func (e runeError) Error() string { } // process implements the algorithm described in section 4 of UTS #46, -// see http://www.unicode.org/reports/tr46. +// see https://www.unicode.org/reports/tr46. func (p *Profile) process(s string, toASCII bool) (string, error) { var err error var isBidi bool diff --git a/vendor/golang.org/x/net/idna/idna9.0.0.go b/vendor/golang.org/x/net/idna/idna9.0.0.go new file mode 100644 index 0000000000000..8842146b5d992 --- /dev/null +++ b/vendor/golang.org/x/net/idna/idna9.0.0.go @@ -0,0 +1,682 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.10 + +// Package idna implements IDNA2008 using the compatibility processing +// defined by UTS (Unicode Technical Standard) #46, which defines a standard to +// deal with the transition from IDNA2003. +// +// IDNA2008 (Internationalized Domain Names for Applications), is defined in RFC +// 5890, RFC 5891, RFC 5892, RFC 5893 and RFC 5894. +// UTS #46 is defined in https://www.unicode.org/reports/tr46. +// See https://unicode.org/cldr/utility/idna.jsp for a visualization of the +// differences between these two standards. +package idna // import "golang.org/x/net/idna" + +import ( + "fmt" + "strings" + "unicode/utf8" + + "golang.org/x/text/secure/bidirule" + "golang.org/x/text/unicode/norm" +) + +// NOTE: Unlike common practice in Go APIs, the functions will return a +// sanitized domain name in case of errors. Browsers sometimes use a partially +// evaluated string as lookup. +// TODO: the current error handling is, in my opinion, the least opinionated. +// Other strategies are also viable, though: +// Option 1) Return an empty string in case of error, but allow the user to +// specify explicitly which errors to ignore. +// Option 2) Return the partially evaluated string if it is itself a valid +// string, otherwise return the empty string in case of error. +// Option 3) Option 1 and 2. +// Option 4) Always return an empty string for now and implement Option 1 as +// needed, and document that the return string may not be empty in case of +// error in the future. +// I think Option 1 is best, but it is quite opinionated. + +// ToASCII is a wrapper for Punycode.ToASCII. +func ToASCII(s string) (string, error) { + return Punycode.process(s, true) +} + +// ToUnicode is a wrapper for Punycode.ToUnicode. +func ToUnicode(s string) (string, error) { + return Punycode.process(s, false) +} + +// An Option configures a Profile at creation time. +type Option func(*options) + +// Transitional sets a Profile to use the Transitional mapping as defined in UTS +// #46. This will cause, for example, "ß" to be mapped to "ss". Using the +// transitional mapping provides a compromise between IDNA2003 and IDNA2008 +// compatibility. It is used by most browsers when resolving domain names. This +// option is only meaningful if combined with MapForLookup. +func Transitional(transitional bool) Option { + return func(o *options) { o.transitional = true } +} + +// VerifyDNSLength sets whether a Profile should fail if any of the IDN parts +// are longer than allowed by the RFC. +func VerifyDNSLength(verify bool) Option { + return func(o *options) { o.verifyDNSLength = verify } +} + +// RemoveLeadingDots removes leading label separators. Leading runes that map to +// dots, such as U+3002 IDEOGRAPHIC FULL STOP, are removed as well. +// +// This is the behavior suggested by the UTS #46 and is adopted by some +// browsers. +func RemoveLeadingDots(remove bool) Option { + return func(o *options) { o.removeLeadingDots = remove } +} + +// ValidateLabels sets whether to check the mandatory label validation criteria +// as defined in Section 5.4 of RFC 5891. This includes testing for correct use +// of hyphens ('-'), normalization, validity of runes, and the context rules. +func ValidateLabels(enable bool) Option { + return func(o *options) { + // Don't override existing mappings, but set one that at least checks + // normalization if it is not set. + if o.mapping == nil && enable { + o.mapping = normalize + } + o.trie = trie + o.validateLabels = enable + o.fromPuny = validateFromPunycode + } +} + +// StrictDomainName limits the set of permissable ASCII characters to those +// allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the +// hyphen). This is set by default for MapForLookup and ValidateForRegistration. +// +// This option is useful, for instance, for browsers that allow characters +// outside this range, for example a '_' (U+005F LOW LINE). See +// http://www.rfc-editor.org/std/std3.txt for more details This option +// corresponds to the UseSTD3ASCIIRules option in UTS #46. +func StrictDomainName(use bool) Option { + return func(o *options) { + o.trie = trie + o.useSTD3Rules = use + o.fromPuny = validateFromPunycode + } +} + +// NOTE: the following options pull in tables. The tables should not be linked +// in as long as the options are not used. + +// BidiRule enables the Bidi rule as defined in RFC 5893. Any application +// that relies on proper validation of labels should include this rule. +func BidiRule() Option { + return func(o *options) { o.bidirule = bidirule.ValidString } +} + +// ValidateForRegistration sets validation options to verify that a given IDN is +// properly formatted for registration as defined by Section 4 of RFC 5891. +func ValidateForRegistration() Option { + return func(o *options) { + o.mapping = validateRegistration + StrictDomainName(true)(o) + ValidateLabels(true)(o) + VerifyDNSLength(true)(o) + BidiRule()(o) + } +} + +// MapForLookup sets validation and mapping options such that a given IDN is +// transformed for domain name lookup according to the requirements set out in +// Section 5 of RFC 5891. The mappings follow the recommendations of RFC 5894, +// RFC 5895 and UTS 46. It does not add the Bidi Rule. Use the BidiRule option +// to add this check. +// +// The mappings include normalization and mapping case, width and other +// compatibility mappings. +func MapForLookup() Option { + return func(o *options) { + o.mapping = validateAndMap + StrictDomainName(true)(o) + ValidateLabels(true)(o) + RemoveLeadingDots(true)(o) + } +} + +type options struct { + transitional bool + useSTD3Rules bool + validateLabels bool + verifyDNSLength bool + removeLeadingDots bool + + trie *idnaTrie + + // fromPuny calls validation rules when converting A-labels to U-labels. + fromPuny func(p *Profile, s string) error + + // mapping implements a validation and mapping step as defined in RFC 5895 + // or UTS 46, tailored to, for example, domain registration or lookup. + mapping func(p *Profile, s string) (string, error) + + // bidirule, if specified, checks whether s conforms to the Bidi Rule + // defined in RFC 5893. + bidirule func(s string) bool +} + +// A Profile defines the configuration of a IDNA mapper. +type Profile struct { + options +} + +func apply(o *options, opts []Option) { + for _, f := range opts { + f(o) + } +} + +// New creates a new Profile. +// +// With no options, the returned Profile is the most permissive and equals the +// Punycode Profile. Options can be passed to further restrict the Profile. The +// MapForLookup and ValidateForRegistration options set a collection of options, +// for lookup and registration purposes respectively, which can be tailored by +// adding more fine-grained options, where later options override earlier +// options. +func New(o ...Option) *Profile { + p := &Profile{} + apply(&p.options, o) + return p +} + +// ToASCII converts a domain or domain label to its ASCII form. For example, +// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and +// ToASCII("golang") is "golang". If an error is encountered it will return +// an error and a (partially) processed result. +func (p *Profile) ToASCII(s string) (string, error) { + return p.process(s, true) +} + +// ToUnicode converts a domain or domain label to its Unicode form. For example, +// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and +// ToUnicode("golang") is "golang". If an error is encountered it will return +// an error and a (partially) processed result. +func (p *Profile) ToUnicode(s string) (string, error) { + pp := *p + pp.transitional = false + return pp.process(s, false) +} + +// String reports a string with a description of the profile for debugging +// purposes. The string format may change with different versions. +func (p *Profile) String() string { + s := "" + if p.transitional { + s = "Transitional" + } else { + s = "NonTransitional" + } + if p.useSTD3Rules { + s += ":UseSTD3Rules" + } + if p.validateLabels { + s += ":ValidateLabels" + } + if p.verifyDNSLength { + s += ":VerifyDNSLength" + } + return s +} + +var ( + // Punycode is a Profile that does raw punycode processing with a minimum + // of validation. + Punycode *Profile = punycode + + // Lookup is the recommended profile for looking up domain names, according + // to Section 5 of RFC 5891. The exact configuration of this profile may + // change over time. + Lookup *Profile = lookup + + // Display is the recommended profile for displaying domain names. + // The configuration of this profile may change over time. + Display *Profile = display + + // Registration is the recommended profile for checking whether a given + // IDN is valid for registration, according to Section 4 of RFC 5891. + Registration *Profile = registration + + punycode = &Profile{} + lookup = &Profile{options{ + transitional: true, + useSTD3Rules: true, + validateLabels: true, + removeLeadingDots: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateAndMap, + bidirule: bidirule.ValidString, + }} + display = &Profile{options{ + useSTD3Rules: true, + validateLabels: true, + removeLeadingDots: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateAndMap, + bidirule: bidirule.ValidString, + }} + registration = &Profile{options{ + useSTD3Rules: true, + validateLabels: true, + verifyDNSLength: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateRegistration, + bidirule: bidirule.ValidString, + }} + + // TODO: profiles + // Register: recommended for approving domain names: don't do any mappings + // but rather reject on invalid input. Bundle or block deviation characters. +) + +type labelError struct{ label, code_ string } + +func (e labelError) code() string { return e.code_ } +func (e labelError) Error() string { + return fmt.Sprintf("idna: invalid label %q", e.label) +} + +type runeError rune + +func (e runeError) code() string { return "P1" } +func (e runeError) Error() string { + return fmt.Sprintf("idna: disallowed rune %U", e) +} + +// process implements the algorithm described in section 4 of UTS #46, +// see https://www.unicode.org/reports/tr46. +func (p *Profile) process(s string, toASCII bool) (string, error) { + var err error + if p.mapping != nil { + s, err = p.mapping(p, s) + } + // Remove leading empty labels. + if p.removeLeadingDots { + for ; len(s) > 0 && s[0] == '.'; s = s[1:] { + } + } + // It seems like we should only create this error on ToASCII, but the + // UTS 46 conformance tests suggests we should always check this. + if err == nil && p.verifyDNSLength && s == "" { + err = &labelError{s, "A4"} + } + labels := labelIter{orig: s} + for ; !labels.done(); labels.next() { + label := labels.label() + if label == "" { + // Empty labels are not okay. The label iterator skips the last + // label if it is empty. + if err == nil && p.verifyDNSLength { + err = &labelError{s, "A4"} + } + continue + } + if strings.HasPrefix(label, acePrefix) { + u, err2 := decode(label[len(acePrefix):]) + if err2 != nil { + if err == nil { + err = err2 + } + // Spec says keep the old label. + continue + } + labels.set(u) + if err == nil && p.validateLabels { + err = p.fromPuny(p, u) + } + if err == nil { + // This should be called on NonTransitional, according to the + // spec, but that currently does not have any effect. Use the + // original profile to preserve options. + err = p.validateLabel(u) + } + } else if err == nil { + err = p.validateLabel(label) + } + } + if toASCII { + for labels.reset(); !labels.done(); labels.next() { + label := labels.label() + if !ascii(label) { + a, err2 := encode(acePrefix, label) + if err == nil { + err = err2 + } + label = a + labels.set(a) + } + n := len(label) + if p.verifyDNSLength && err == nil && (n == 0 || n > 63) { + err = &labelError{label, "A4"} + } + } + } + s = labels.result() + if toASCII && p.verifyDNSLength && err == nil { + // Compute the length of the domain name minus the root label and its dot. + n := len(s) + if n > 0 && s[n-1] == '.' { + n-- + } + if len(s) < 1 || n > 253 { + err = &labelError{s, "A4"} + } + } + return s, err +} + +func normalize(p *Profile, s string) (string, error) { + return norm.NFC.String(s), nil +} + +func validateRegistration(p *Profile, s string) (string, error) { + if !norm.NFC.IsNormalString(s) { + return s, &labelError{s, "V1"} + } + for i := 0; i < len(s); { + v, sz := trie.lookupString(s[i:]) + // Copy bytes not copied so far. + switch p.simplify(info(v).category()) { + // TODO: handle the NV8 defined in the Unicode idna data set to allow + // for strict conformance to IDNA2008. + case valid, deviation: + case disallowed, mapped, unknown, ignored: + r, _ := utf8.DecodeRuneInString(s[i:]) + return s, runeError(r) + } + i += sz + } + return s, nil +} + +func validateAndMap(p *Profile, s string) (string, error) { + var ( + err error + b []byte + k int + ) + for i := 0; i < len(s); { + v, sz := trie.lookupString(s[i:]) + start := i + i += sz + // Copy bytes not copied so far. + switch p.simplify(info(v).category()) { + case valid: + continue + case disallowed: + if err == nil { + r, _ := utf8.DecodeRuneInString(s[start:]) + err = runeError(r) + } + continue + case mapped, deviation: + b = append(b, s[k:start]...) + b = info(v).appendMapping(b, s[start:i]) + case ignored: + b = append(b, s[k:start]...) + // drop the rune + case unknown: + b = append(b, s[k:start]...) + b = append(b, "\ufffd"...) + } + k = i + } + if k == 0 { + // No changes so far. + s = norm.NFC.String(s) + } else { + b = append(b, s[k:]...) + if norm.NFC.QuickSpan(b) != len(b) { + b = norm.NFC.Bytes(b) + } + // TODO: the punycode converters require strings as input. + s = string(b) + } + return s, err +} + +// A labelIter allows iterating over domain name labels. +type labelIter struct { + orig string + slice []string + curStart int + curEnd int + i int +} + +func (l *labelIter) reset() { + l.curStart = 0 + l.curEnd = 0 + l.i = 0 +} + +func (l *labelIter) done() bool { + return l.curStart >= len(l.orig) +} + +func (l *labelIter) result() string { + if l.slice != nil { + return strings.Join(l.slice, ".") + } + return l.orig +} + +func (l *labelIter) label() string { + if l.slice != nil { + return l.slice[l.i] + } + p := strings.IndexByte(l.orig[l.curStart:], '.') + l.curEnd = l.curStart + p + if p == -1 { + l.curEnd = len(l.orig) + } + return l.orig[l.curStart:l.curEnd] +} + +// next sets the value to the next label. It skips the last label if it is empty. +func (l *labelIter) next() { + l.i++ + if l.slice != nil { + if l.i >= len(l.slice) || l.i == len(l.slice)-1 && l.slice[l.i] == "" { + l.curStart = len(l.orig) + } + } else { + l.curStart = l.curEnd + 1 + if l.curStart == len(l.orig)-1 && l.orig[l.curStart] == '.' { + l.curStart = len(l.orig) + } + } +} + +func (l *labelIter) set(s string) { + if l.slice == nil { + l.slice = strings.Split(l.orig, ".") + } + l.slice[l.i] = s +} + +// acePrefix is the ASCII Compatible Encoding prefix. +const acePrefix = "xn--" + +func (p *Profile) simplify(cat category) category { + switch cat { + case disallowedSTD3Mapped: + if p.useSTD3Rules { + cat = disallowed + } else { + cat = mapped + } + case disallowedSTD3Valid: + if p.useSTD3Rules { + cat = disallowed + } else { + cat = valid + } + case deviation: + if !p.transitional { + cat = valid + } + case validNV8, validXV8: + // TODO: handle V2008 + cat = valid + } + return cat +} + +func validateFromPunycode(p *Profile, s string) error { + if !norm.NFC.IsNormalString(s) { + return &labelError{s, "V1"} + } + for i := 0; i < len(s); { + v, sz := trie.lookupString(s[i:]) + if c := p.simplify(info(v).category()); c != valid && c != deviation { + return &labelError{s, "V6"} + } + i += sz + } + return nil +} + +const ( + zwnj = "\u200c" + zwj = "\u200d" +) + +type joinState int8 + +const ( + stateStart joinState = iota + stateVirama + stateBefore + stateBeforeVirama + stateAfter + stateFAIL +) + +var joinStates = [][numJoinTypes]joinState{ + stateStart: { + joiningL: stateBefore, + joiningD: stateBefore, + joinZWNJ: stateFAIL, + joinZWJ: stateFAIL, + joinVirama: stateVirama, + }, + stateVirama: { + joiningL: stateBefore, + joiningD: stateBefore, + }, + stateBefore: { + joiningL: stateBefore, + joiningD: stateBefore, + joiningT: stateBefore, + joinZWNJ: stateAfter, + joinZWJ: stateFAIL, + joinVirama: stateBeforeVirama, + }, + stateBeforeVirama: { + joiningL: stateBefore, + joiningD: stateBefore, + joiningT: stateBefore, + }, + stateAfter: { + joiningL: stateFAIL, + joiningD: stateBefore, + joiningT: stateAfter, + joiningR: stateStart, + joinZWNJ: stateFAIL, + joinZWJ: stateFAIL, + joinVirama: stateAfter, // no-op as we can't accept joiners here + }, + stateFAIL: { + 0: stateFAIL, + joiningL: stateFAIL, + joiningD: stateFAIL, + joiningT: stateFAIL, + joiningR: stateFAIL, + joinZWNJ: stateFAIL, + joinZWJ: stateFAIL, + joinVirama: stateFAIL, + }, +} + +// validateLabel validates the criteria from Section 4.1. Item 1, 4, and 6 are +// already implicitly satisfied by the overall implementation. +func (p *Profile) validateLabel(s string) error { + if s == "" { + if p.verifyDNSLength { + return &labelError{s, "A4"} + } + return nil + } + if p.bidirule != nil && !p.bidirule(s) { + return &labelError{s, "B"} + } + if !p.validateLabels { + return nil + } + trie := p.trie // p.validateLabels is only set if trie is set. + if len(s) > 4 && s[2] == '-' && s[3] == '-' { + return &labelError{s, "V2"} + } + if s[0] == '-' || s[len(s)-1] == '-' { + return &labelError{s, "V3"} + } + // TODO: merge the use of this in the trie. + v, sz := trie.lookupString(s) + x := info(v) + if x.isModifier() { + return &labelError{s, "V5"} + } + // Quickly return in the absence of zero-width (non) joiners. + if strings.Index(s, zwj) == -1 && strings.Index(s, zwnj) == -1 { + return nil + } + st := stateStart + for i := 0; ; { + jt := x.joinType() + if s[i:i+sz] == zwj { + jt = joinZWJ + } else if s[i:i+sz] == zwnj { + jt = joinZWNJ + } + st = joinStates[st][jt] + if x.isViramaModifier() { + st = joinStates[st][joinVirama] + } + if i += sz; i == len(s) { + break + } + v, sz = trie.lookupString(s[i:]) + x = info(v) + } + if st == stateFAIL || st == stateAfter { + return &labelError{s, "C"} + } + return nil +} + +func ascii(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] >= utf8.RuneSelf { + return false + } + } + return true +} diff --git a/vendor/golang.org/x/net/idna/tables.go b/vendor/golang.org/x/net/idna/tables10.0.0.go similarity index 99% rename from vendor/golang.org/x/net/idna/tables.go rename to vendor/golang.org/x/net/idna/tables10.0.0.go index f910b26914431..54fddb4b16cce 100644 --- a/vendor/golang.org/x/net/idna/tables.go +++ b/vendor/golang.org/x/net/idna/tables10.0.0.go @@ -1,11 +1,13 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +// +build go1.10,!go1.13 + package idna // UnicodeVersion is the Unicode version from which the tables in this package are derived. const UnicodeVersion = "10.0.0" -var mappings string = "" + // Size: 8176 bytes +var mappings string = "" + // Size: 8175 bytes "\x00\x01 \x03 ̈\x01a\x03 ̄\x012\x013\x03 ́\x03 ̧\x011\x01o\x051⁄4\x051⁄2" + "\x053⁄4\x03i̇\x03l·\x03ʼn\x01s\x03dž\x03ⱥ\x03ⱦ\x01h\x01j\x01r\x01w\x01y" + "\x03 ̆\x03 ̇\x03 ̊\x03 ̨\x03 ̃\x03 ̋\x01l\x01x\x04̈́\x03 ι\x01;\x05 ̈́" + @@ -4554,4 +4556,4 @@ var idnaSparseValues = [1915]valueRange{ {value: 0x0040, lo: 0xb0, hi: 0xbf}, } -// Total table size 42115 bytes (41KiB); checksum: F4A1FA4E +// Total table size 42114 bytes (41KiB); checksum: 355A58A4 diff --git a/vendor/golang.org/x/net/idna/tables11.0.0.go b/vendor/golang.org/x/net/idna/tables11.0.0.go new file mode 100644 index 0000000000000..c515d7ad2a20d --- /dev/null +++ b/vendor/golang.org/x/net/idna/tables11.0.0.go @@ -0,0 +1,4653 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// +build go1.13 + +package idna + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "11.0.0" + +var mappings string = "" + // Size: 8175 bytes + "\x00\x01 \x03 ̈\x01a\x03 ̄\x012\x013\x03 ́\x03 ̧\x011\x01o\x051⁄4\x051⁄2" + + "\x053⁄4\x03i̇\x03l·\x03ʼn\x01s\x03dž\x03ⱥ\x03ⱦ\x01h\x01j\x01r\x01w\x01y" + + "\x03 ̆\x03 ̇\x03 ̊\x03 ̨\x03 ̃\x03 ̋\x01l\x01x\x04̈́\x03 ι\x01;\x05 ̈́" + + "\x04եւ\x04اٴ\x04وٴ\x04ۇٴ\x04يٴ\x06क़\x06ख़\x06ग़\x06ज़\x06ड़\x06ढ़\x06फ़" + + "\x06य़\x06ড়\x06ঢ়\x06য়\x06ਲ਼\x06ਸ਼\x06ਖ਼\x06ਗ਼\x06ਜ਼\x06ਫ਼\x06ଡ଼\x06ଢ଼" + + "\x06ํา\x06ໍາ\x06ຫນ\x06ຫມ\x06གྷ\x06ཌྷ\x06དྷ\x06བྷ\x06ཛྷ\x06ཀྵ\x06ཱི\x06ཱུ" + + "\x06ྲྀ\x09ྲཱྀ\x06ླྀ\x09ླཱྀ\x06ཱྀ\x06ྒྷ\x06ྜྷ\x06ྡྷ\x06ྦྷ\x06ྫྷ\x06ྐྵ\x02" + + "в\x02д\x02о\x02с\x02т\x02ъ\x02ѣ\x02æ\x01b\x01d\x01e\x02ǝ\x01g\x01i\x01k" + + "\x01m\x01n\x02ȣ\x01p\x01t\x01u\x02ɐ\x02ɑ\x02ə\x02ɛ\x02ɜ\x02ŋ\x02ɔ\x02ɯ" + + "\x01v\x02β\x02γ\x02δ\x02φ\x02χ\x02ρ\x02н\x02ɒ\x01c\x02ɕ\x02ð\x01f\x02ɟ" + + "\x02ɡ\x02ɥ\x02ɨ\x02ɩ\x02ɪ\x02ʝ\x02ɭ\x02ʟ\x02ɱ\x02ɰ\x02ɲ\x02ɳ\x02ɴ\x02ɵ" + + "\x02ɸ\x02ʂ\x02ʃ\x02ƫ\x02ʉ\x02ʊ\x02ʋ\x02ʌ\x01z\x02ʐ\x02ʑ\x02ʒ\x02θ\x02ss" + + "\x02ά\x02έ\x02ή\x02ί\x02ό\x02ύ\x02ώ\x05ἀι\x05ἁι\x05ἂι\x05ἃι\x05ἄι\x05ἅι" + + "\x05ἆι\x05ἇι\x05ἠι\x05ἡι\x05ἢι\x05ἣι\x05ἤι\x05ἥι\x05ἦι\x05ἧι\x05ὠι\x05ὡι" + + "\x05ὢι\x05ὣι\x05ὤι\x05ὥι\x05ὦι\x05ὧι\x05ὰι\x04αι\x04άι\x05ᾶι\x02ι\x05 ̈͂" + + "\x05ὴι\x04ηι\x04ήι\x05ῆι\x05 ̓̀\x05 ̓́\x05 ̓͂\x02ΐ\x05 ̔̀\x05 ̔́\x05 ̔͂" + + "\x02ΰ\x05 ̈̀\x01`\x05ὼι\x04ωι\x04ώι\x05ῶι\x06′′\x09′′′\x06‵‵\x09‵‵‵\x02!" + + "!\x02??\x02?!\x02!?\x0c′′′′\x010\x014\x015\x016\x017\x018\x019\x01+\x01=" + + "\x01(\x01)\x02rs\x02ħ\x02no\x01q\x02sm\x02tm\x02ω\x02å\x02א\x02ב\x02ג" + + "\x02ד\x02π\x051⁄7\x051⁄9\x061⁄10\x051⁄3\x052⁄3\x051⁄5\x052⁄5\x053⁄5\x054" + + "⁄5\x051⁄6\x055⁄6\x051⁄8\x053⁄8\x055⁄8\x057⁄8\x041⁄\x02ii\x02iv\x02vi" + + "\x04viii\x02ix\x02xi\x050⁄3\x06∫∫\x09∫∫∫\x06∮∮\x09∮∮∮\x0210\x0211\x0212" + + "\x0213\x0214\x0215\x0216\x0217\x0218\x0219\x0220\x04(10)\x04(11)\x04(12)" + + "\x04(13)\x04(14)\x04(15)\x04(16)\x04(17)\x04(18)\x04(19)\x04(20)\x0c∫∫∫∫" + + "\x02==\x05⫝̸\x02ɫ\x02ɽ\x02ȿ\x02ɀ\x01.\x04 ゙\x04 ゚\x06より\x06コト\x05(ᄀ)\x05" + + "(ᄂ)\x05(ᄃ)\x05(ᄅ)\x05(ᄆ)\x05(ᄇ)\x05(ᄉ)\x05(ᄋ)\x05(ᄌ)\x05(ᄎ)\x05(ᄏ)\x05(ᄐ" + + ")\x05(ᄑ)\x05(ᄒ)\x05(가)\x05(나)\x05(다)\x05(라)\x05(마)\x05(바)\x05(사)\x05(아)" + + "\x05(자)\x05(차)\x05(카)\x05(타)\x05(파)\x05(하)\x05(주)\x08(오전)\x08(오후)\x05(一)" + + "\x05(二)\x05(三)\x05(四)\x05(五)\x05(六)\x05(七)\x05(八)\x05(九)\x05(十)\x05(月)" + + "\x05(火)\x05(水)\x05(木)\x05(金)\x05(土)\x05(日)\x05(株)\x05(有)\x05(社)\x05(名)" + + "\x05(特)\x05(財)\x05(祝)\x05(労)\x05(代)\x05(呼)\x05(学)\x05(監)\x05(企)\x05(資)" + + "\x05(協)\x05(祭)\x05(休)\x05(自)\x05(至)\x0221\x0222\x0223\x0224\x0225\x0226" + + "\x0227\x0228\x0229\x0230\x0231\x0232\x0233\x0234\x0235\x06참고\x06주의\x0236" + + "\x0237\x0238\x0239\x0240\x0241\x0242\x0243\x0244\x0245\x0246\x0247\x0248" + + "\x0249\x0250\x041月\x042月\x043月\x044月\x045月\x046月\x047月\x048月\x049月\x0510" + + "月\x0511月\x0512月\x02hg\x02ev\x0cアパート\x0cアルファ\x0cアンペア\x09アール\x0cイニング\x09" + + "インチ\x09ウォン\x0fエスクード\x0cエーカー\x09オンス\x09オーム\x09カイリ\x0cカラット\x0cカロリー\x09ガロ" + + "ン\x09ガンマ\x06ギガ\x09ギニー\x0cキュリー\x0cギルダー\x06キロ\x0fキログラム\x12キロメートル\x0fキロワッ" + + "ト\x09グラム\x0fグラムトン\x0fクルゼイロ\x0cクローネ\x09ケース\x09コルナ\x09コーポ\x0cサイクル\x0fサンチ" + + "ーム\x0cシリング\x09センチ\x09セント\x09ダース\x06デシ\x06ドル\x06トン\x06ナノ\x09ノット\x09ハイツ" + + "\x0fパーセント\x09パーツ\x0cバーレル\x0fピアストル\x09ピクル\x06ピコ\x06ビル\x0fファラッド\x0cフィート" + + "\x0fブッシェル\x09フラン\x0fヘクタール\x06ペソ\x09ペニヒ\x09ヘルツ\x09ペンス\x09ページ\x09ベータ\x0cポイ" + + "ント\x09ボルト\x06ホン\x09ポンド\x09ホール\x09ホーン\x0cマイクロ\x09マイル\x09マッハ\x09マルク\x0fマ" + + "ンション\x0cミクロン\x06ミリ\x0fミリバール\x06メガ\x0cメガトン\x0cメートル\x09ヤード\x09ヤール\x09ユアン" + + "\x0cリットル\x06リラ\x09ルピー\x0cルーブル\x06レム\x0fレントゲン\x09ワット\x040点\x041点\x042点" + + "\x043点\x044点\x045点\x046点\x047点\x048点\x049点\x0510点\x0511点\x0512点\x0513点" + + "\x0514点\x0515点\x0516点\x0517点\x0518点\x0519点\x0520点\x0521点\x0522点\x0523点" + + "\x0524点\x02da\x02au\x02ov\x02pc\x02dm\x02iu\x06平成\x06昭和\x06大正\x06明治\x0c株" + + "式会社\x02pa\x02na\x02ma\x02ka\x02kb\x02mb\x02gb\x04kcal\x02pf\x02nf\x02m" + + "g\x02kg\x02hz\x02ml\x02dl\x02kl\x02fm\x02nm\x02mm\x02cm\x02km\x02m2\x02m" + + "3\x05m∕s\x06m∕s2\x07rad∕s\x08rad∕s2\x02ps\x02ns\x02ms\x02pv\x02nv\x02mv" + + "\x02kv\x02pw\x02nw\x02mw\x02kw\x02bq\x02cc\x02cd\x06c∕kg\x02db\x02gy\x02" + + "ha\x02hp\x02in\x02kk\x02kt\x02lm\x02ln\x02lx\x02ph\x02pr\x02sr\x02sv\x02" + + "wb\x05v∕m\x05a∕m\x041日\x042日\x043日\x044日\x045日\x046日\x047日\x048日\x049日" + + "\x0510日\x0511日\x0512日\x0513日\x0514日\x0515日\x0516日\x0517日\x0518日\x0519日" + + "\x0520日\x0521日\x0522日\x0523日\x0524日\x0525日\x0526日\x0527日\x0528日\x0529日" + + "\x0530日\x0531日\x02ь\x02ɦ\x02ɬ\x02ʞ\x02ʇ\x02œ\x04𤋮\x04𢡊\x04𢡄\x04𣏕\x04𥉉" + + "\x04𥳐\x04𧻓\x02ff\x02fi\x02fl\x02st\x04մն\x04մե\x04մի\x04վն\x04մխ\x04יִ" + + "\x04ײַ\x02ע\x02ה\x02כ\x02ל\x02ם\x02ר\x02ת\x04שׁ\x04שׂ\x06שּׁ\x06שּׂ\x04א" + + "ַ\x04אָ\x04אּ\x04בּ\x04גּ\x04דּ\x04הּ\x04וּ\x04זּ\x04טּ\x04יּ\x04ךּ\x04" + + "כּ\x04לּ\x04מּ\x04נּ\x04סּ\x04ףּ\x04פּ\x04צּ\x04קּ\x04רּ\x04שּ\x04תּ" + + "\x04וֹ\x04בֿ\x04כֿ\x04פֿ\x04אל\x02ٱ\x02ٻ\x02پ\x02ڀ\x02ٺ\x02ٿ\x02ٹ\x02ڤ" + + "\x02ڦ\x02ڄ\x02ڃ\x02چ\x02ڇ\x02ڍ\x02ڌ\x02ڎ\x02ڈ\x02ژ\x02ڑ\x02ک\x02گ\x02ڳ" + + "\x02ڱ\x02ں\x02ڻ\x02ۀ\x02ہ\x02ھ\x02ے\x02ۓ\x02ڭ\x02ۇ\x02ۆ\x02ۈ\x02ۋ\x02ۅ" + + "\x02ۉ\x02ې\x02ى\x04ئا\x04ئە\x04ئو\x04ئۇ\x04ئۆ\x04ئۈ\x04ئې\x04ئى\x02ی\x04" + + "ئج\x04ئح\x04ئم\x04ئي\x04بج\x04بح\x04بخ\x04بم\x04بى\x04بي\x04تج\x04تح" + + "\x04تخ\x04تم\x04تى\x04تي\x04ثج\x04ثم\x04ثى\x04ثي\x04جح\x04جم\x04حج\x04حم" + + "\x04خج\x04خح\x04خم\x04سج\x04سح\x04سخ\x04سم\x04صح\x04صم\x04ضج\x04ضح\x04ضخ" + + "\x04ضم\x04طح\x04طم\x04ظم\x04عج\x04عم\x04غج\x04غم\x04فج\x04فح\x04فخ\x04فم" + + "\x04فى\x04في\x04قح\x04قم\x04قى\x04قي\x04كا\x04كج\x04كح\x04كخ\x04كل\x04كم" + + "\x04كى\x04كي\x04لج\x04لح\x04لخ\x04لم\x04لى\x04لي\x04مج\x04مح\x04مخ\x04مم" + + "\x04مى\x04مي\x04نج\x04نح\x04نخ\x04نم\x04نى\x04ني\x04هج\x04هم\x04هى\x04هي" + + "\x04يج\x04يح\x04يخ\x04يم\x04يى\x04يي\x04ذٰ\x04رٰ\x04ىٰ\x05 ٌّ\x05 ٍّ\x05" + + " َّ\x05 ُّ\x05 ِّ\x05 ّٰ\x04ئر\x04ئز\x04ئن\x04بر\x04بز\x04بن\x04تر\x04تز" + + "\x04تن\x04ثر\x04ثز\x04ثن\x04ما\x04نر\x04نز\x04نن\x04ير\x04يز\x04ين\x04ئخ" + + "\x04ئه\x04به\x04ته\x04صخ\x04له\x04نه\x04هٰ\x04يه\x04ثه\x04سه\x04شم\x04شه" + + "\x06ـَّ\x06ـُّ\x06ـِّ\x04طى\x04طي\x04عى\x04عي\x04غى\x04غي\x04سى\x04سي" + + "\x04شى\x04شي\x04حى\x04حي\x04جى\x04جي\x04خى\x04خي\x04صى\x04صي\x04ضى\x04ضي" + + "\x04شج\x04شح\x04شخ\x04شر\x04سر\x04صر\x04ضر\x04اً\x06تجم\x06تحج\x06تحم" + + "\x06تخم\x06تمج\x06تمح\x06تمخ\x06جمح\x06حمي\x06حمى\x06سحج\x06سجح\x06سجى" + + "\x06سمح\x06سمج\x06سمم\x06صحح\x06صمم\x06شحم\x06شجي\x06شمخ\x06شمم\x06ضحى" + + "\x06ضخم\x06طمح\x06طمم\x06طمي\x06عجم\x06عمم\x06عمى\x06غمم\x06غمي\x06غمى" + + "\x06فخم\x06قمح\x06قمم\x06لحم\x06لحي\x06لحى\x06لجج\x06لخم\x06لمح\x06محج" + + "\x06محم\x06محي\x06مجح\x06مجم\x06مخج\x06مخم\x06مجخ\x06همج\x06همم\x06نحم" + + "\x06نحى\x06نجم\x06نجى\x06نمي\x06نمى\x06يمم\x06بخي\x06تجي\x06تجى\x06تخي" + + "\x06تخى\x06تمي\x06تمى\x06جمي\x06جحى\x06جمى\x06سخى\x06صحي\x06شحي\x06ضحي" + + "\x06لجي\x06لمي\x06يحي\x06يجي\x06يمي\x06ممي\x06قمي\x06نحي\x06عمي\x06كمي" + + "\x06نجح\x06مخي\x06لجم\x06كمم\x06جحي\x06حجي\x06مجي\x06فمي\x06بحي\x06سخي" + + "\x06نجي\x06صلے\x06قلے\x08الله\x08اكبر\x08محمد\x08صلعم\x08رسول\x08عليه" + + "\x08وسلم\x06صلى!صلى الله عليه وسلم\x0fجل جلاله\x08ریال\x01,\x01:\x01!" + + "\x01?\x01_\x01{\x01}\x01[\x01]\x01#\x01&\x01*\x01-\x01<\x01>\x01\\\x01$" + + "\x01%\x01@\x04ـً\x04ـَ\x04ـُ\x04ـِ\x04ـّ\x04ـْ\x02ء\x02آ\x02أ\x02ؤ\x02إ" + + "\x02ئ\x02ا\x02ب\x02ة\x02ت\x02ث\x02ج\x02ح\x02خ\x02د\x02ذ\x02ر\x02ز\x02س" + + "\x02ش\x02ص\x02ض\x02ط\x02ظ\x02ع\x02غ\x02ف\x02ق\x02ك\x02ل\x02م\x02ن\x02ه" + + "\x02و\x02ي\x04لآ\x04لأ\x04لإ\x04لا\x01\x22\x01'\x01/\x01^\x01|\x01~\x02¢" + + "\x02£\x02¬\x02¦\x02¥\x08𝅗𝅥\x08𝅘𝅥\x0c𝅘𝅥𝅮\x0c𝅘𝅥𝅯\x0c𝅘𝅥𝅰\x0c𝅘𝅥𝅱\x0c𝅘𝅥𝅲\x08𝆹" + + "𝅥\x08𝆺𝅥\x0c𝆹𝅥𝅮\x0c𝆺𝅥𝅮\x0c𝆹𝅥𝅯\x0c𝆺𝅥𝅯\x02ı\x02ȷ\x02α\x02ε\x02ζ\x02η\x02" + + "κ\x02λ\x02μ\x02ν\x02ξ\x02ο\x02σ\x02τ\x02υ\x02ψ\x03∇\x03∂\x02ϝ\x02ٮ\x02ڡ" + + "\x02ٯ\x020,\x021,\x022,\x023,\x024,\x025,\x026,\x027,\x028,\x029,\x03(a)" + + "\x03(b)\x03(c)\x03(d)\x03(e)\x03(f)\x03(g)\x03(h)\x03(i)\x03(j)\x03(k)" + + "\x03(l)\x03(m)\x03(n)\x03(o)\x03(p)\x03(q)\x03(r)\x03(s)\x03(t)\x03(u)" + + "\x03(v)\x03(w)\x03(x)\x03(y)\x03(z)\x07〔s〕\x02wz\x02hv\x02sd\x03ppv\x02w" + + "c\x02mc\x02md\x02dj\x06ほか\x06ココ\x03サ\x03手\x03字\x03双\x03デ\x03二\x03多\x03解" + + "\x03天\x03交\x03映\x03無\x03料\x03前\x03後\x03再\x03新\x03初\x03終\x03生\x03販\x03声" + + "\x03吹\x03演\x03投\x03捕\x03一\x03三\x03遊\x03左\x03中\x03右\x03指\x03走\x03打\x03禁" + + "\x03空\x03合\x03満\x03有\x03月\x03申\x03割\x03営\x03配\x09〔本〕\x09〔三〕\x09〔二〕\x09〔安" + + "〕\x09〔点〕\x09〔打〕\x09〔盗〕\x09〔勝〕\x09〔敗〕\x03得\x03可\x03丽\x03丸\x03乁\x03你\x03" + + "侮\x03侻\x03倂\x03偺\x03備\x03僧\x03像\x03㒞\x03免\x03兔\x03兤\x03具\x03㒹\x03內\x03" + + "冗\x03冤\x03仌\x03冬\x03况\x03凵\x03刃\x03㓟\x03刻\x03剆\x03剷\x03㔕\x03勇\x03勉\x03" + + "勤\x03勺\x03包\x03匆\x03北\x03卉\x03卑\x03博\x03即\x03卽\x03卿\x03灰\x03及\x03叟\x03" + + "叫\x03叱\x03吆\x03咞\x03吸\x03呈\x03周\x03咢\x03哶\x03唐\x03啓\x03啣\x03善\x03喙\x03" + + "喫\x03喳\x03嗂\x03圖\x03嘆\x03圗\x03噑\x03噴\x03切\x03壮\x03城\x03埴\x03堍\x03型\x03" + + "堲\x03報\x03墬\x03売\x03壷\x03夆\x03夢\x03奢\x03姬\x03娛\x03娧\x03姘\x03婦\x03㛮\x03" + + "嬈\x03嬾\x03寃\x03寘\x03寧\x03寳\x03寿\x03将\x03尢\x03㞁\x03屠\x03屮\x03峀\x03岍\x03" + + "嵃\x03嵮\x03嵫\x03嵼\x03巡\x03巢\x03㠯\x03巽\x03帨\x03帽\x03幩\x03㡢\x03㡼\x03庰\x03" + + "庳\x03庶\x03廊\x03廾\x03舁\x03弢\x03㣇\x03形\x03彫\x03㣣\x03徚\x03忍\x03志\x03忹\x03" + + "悁\x03㤺\x03㤜\x03悔\x03惇\x03慈\x03慌\x03慎\x03慺\x03憎\x03憲\x03憤\x03憯\x03懞\x03" + + "懲\x03懶\x03成\x03戛\x03扝\x03抱\x03拔\x03捐\x03挽\x03拼\x03捨\x03掃\x03揤\x03搢\x03" + + "揅\x03掩\x03㨮\x03摩\x03摾\x03撝\x03摷\x03㩬\x03敏\x03敬\x03旣\x03書\x03晉\x03㬙\x03" + + "暑\x03㬈\x03㫤\x03冒\x03冕\x03最\x03暜\x03肭\x03䏙\x03朗\x03望\x03朡\x03杞\x03杓\x03" + + "㭉\x03柺\x03枅\x03桒\x03梅\x03梎\x03栟\x03椔\x03㮝\x03楂\x03榣\x03槪\x03檨\x03櫛\x03" + + "㰘\x03次\x03歔\x03㱎\x03歲\x03殟\x03殺\x03殻\x03汎\x03沿\x03泍\x03汧\x03洖\x03派\x03" + + "海\x03流\x03浩\x03浸\x03涅\x03洴\x03港\x03湮\x03㴳\x03滋\x03滇\x03淹\x03潮\x03濆\x03" + + "瀹\x03瀞\x03瀛\x03㶖\x03灊\x03災\x03灷\x03炭\x03煅\x03熜\x03爨\x03爵\x03牐\x03犀\x03" + + "犕\x03獺\x03王\x03㺬\x03玥\x03㺸\x03瑇\x03瑜\x03瑱\x03璅\x03瓊\x03㼛\x03甤\x03甾\x03" + + "異\x03瘐\x03㿼\x03䀈\x03直\x03眞\x03真\x03睊\x03䀹\x03瞋\x03䁆\x03䂖\x03硎\x03碌\x03" + + "磌\x03䃣\x03祖\x03福\x03秫\x03䄯\x03穀\x03穊\x03穏\x03䈂\x03篆\x03築\x03䈧\x03糒\x03" + + "䊠\x03糨\x03糣\x03紀\x03絣\x03䌁\x03緇\x03縂\x03繅\x03䌴\x03䍙\x03罺\x03羕\x03翺\x03" + + "者\x03聠\x03聰\x03䏕\x03育\x03脃\x03䐋\x03脾\x03媵\x03舄\x03辞\x03䑫\x03芑\x03芋\x03" + + "芝\x03劳\x03花\x03芳\x03芽\x03苦\x03若\x03茝\x03荣\x03莭\x03茣\x03莽\x03菧\x03著\x03" + + "荓\x03菊\x03菌\x03菜\x03䔫\x03蓱\x03蓳\x03蔖\x03蕤\x03䕝\x03䕡\x03䕫\x03虐\x03虜\x03" + + "虧\x03虩\x03蚩\x03蚈\x03蜎\x03蛢\x03蝹\x03蜨\x03蝫\x03螆\x03蟡\x03蠁\x03䗹\x03衠\x03" + + "衣\x03裗\x03裞\x03䘵\x03裺\x03㒻\x03䚾\x03䛇\x03誠\x03諭\x03變\x03豕\x03貫\x03賁\x03" + + "贛\x03起\x03跋\x03趼\x03跰\x03軔\x03輸\x03邔\x03郱\x03鄑\x03鄛\x03鈸\x03鋗\x03鋘\x03" + + "鉼\x03鏹\x03鐕\x03開\x03䦕\x03閷\x03䧦\x03雃\x03嶲\x03霣\x03䩮\x03䩶\x03韠\x03䪲\x03" + + "頋\x03頩\x03飢\x03䬳\x03餩\x03馧\x03駂\x03駾\x03䯎\x03鬒\x03鱀\x03鳽\x03䳎\x03䳭\x03" + + "鵧\x03䳸\x03麻\x03䵖\x03黹\x03黾\x03鼅\x03鼏\x03鼖\x03鼻" + +var xorData string = "" + // Size: 4855 bytes + "\x02\x0c\x09\x02\xb0\xec\x02\xad\xd8\x02\xad\xd9\x02\x06\x07\x02\x0f\x12" + + "\x02\x0f\x1f\x02\x0f\x1d\x02\x01\x13\x02\x0f\x16\x02\x0f\x0b\x02\x0f3" + + "\x02\x0f7\x02\x0f?\x02\x0f/\x02\x0f*\x02\x0c&\x02\x0c*\x02\x0c;\x02\x0c9" + + "\x02\x0c%\x02\xab\xed\x02\xab\xe2\x02\xab\xe3\x02\xa9\xe0\x02\xa9\xe1" + + "\x02\xa9\xe6\x02\xa3\xcb\x02\xa3\xc8\x02\xa3\xc9\x02\x01#\x02\x01\x08" + + "\x02\x0e>\x02\x0e'\x02\x0f\x03\x02\x03\x0d\x02\x03\x09\x02\x03\x17\x02" + + "\x03\x0e\x02\x02\x03\x02\x011\x02\x01\x00\x02\x01\x10\x02\x03<\x02\x07" + + "\x0d\x02\x02\x0c\x02\x0c0\x02\x01\x03\x02\x01\x01\x02\x01 \x02\x01\x22" + + "\x02\x01)\x02\x01\x0a\x02\x01\x0c\x02\x02\x06\x02\x02\x02\x02\x03\x10" + + "\x03\x037 \x03\x0b+\x03\x02\x01\x04\x02\x01\x02\x02\x019\x02\x03\x1c\x02" + + "\x02$\x03\x80p$\x02\x03:\x02\x03\x0a\x03\xc1r.\x03\xc1r,\x03\xc1r\x02" + + "\x02\x02:\x02\x02>\x02\x02,\x02\x02\x10\x02\x02\x00\x03\xc1s<\x03\xc1s*" + + "\x03\xc2L$\x03\xc2L;\x02\x09)\x02\x0a\x19\x03\x83\xab\xe3\x03\x83\xab" + + "\xf2\x03 4\xe0\x03\x81\xab\xea\x03\x81\xab\xf3\x03 4\xef\x03\x96\xe1\xcd" + + "\x03\x84\xe5\xc3\x02\x0d\x11\x03\x8b\xec\xcb\x03\x94\xec\xcf\x03\x9a\xec" + + "\xc2\x03\x8b\xec\xdb\x03\x94\xec\xdf\x03\x9a\xec\xd2\x03\x01\x0c!\x03" + + "\x01\x0c#\x03ʠ\x9d\x03ʣ\x9c\x03ʢ\x9f\x03ʥ\x9e\x03ʤ\x91\x03ʧ\x90\x03ʦ\x93" + + "\x03ʩ\x92\x03ʨ\x95\x03\xca\xf3\xb5\x03\xca\xf0\xb4\x03\xca\xf1\xb7\x03" + + "\xca\xf6\xb6\x03\xca\xf7\x89\x03\xca\xf4\x88\x03\xca\xf5\x8b\x03\xca\xfa" + + "\x8a\x03\xca\xfb\x8d\x03\xca\xf8\x8c\x03\xca\xf9\x8f\x03\xca\xfe\x8e\x03" + + "\xca\xff\x81\x03\xca\xfc\x80\x03\xca\xfd\x83\x03\xca\xe2\x82\x03\xca\xe3" + + "\x85\x03\xca\xe0\x84\x03\xca\xe1\x87\x03\xca\xe6\x86\x03\xca\xe7\x99\x03" + + "\xca\xe4\x98\x03\xca\xe5\x9b\x03\xca\xea\x9a\x03\xca\xeb\x9d\x03\xca\xe8" + + "\x9c\x03ؓ\x89\x03ߔ\x8b\x02\x010\x03\x03\x04\x1e\x03\x04\x15\x12\x03\x0b" + + "\x05,\x03\x06\x04\x00\x03\x06\x04)\x03\x06\x044\x03\x06\x04<\x03\x06\x05" + + "\x1d\x03\x06\x06\x00\x03\x06\x06\x0a\x03\x06\x06'\x03\x06\x062\x03\x0786" + + "\x03\x079/\x03\x079 \x03\x07:\x0e\x03\x07:\x1b\x03\x07:%\x03\x07;/\x03" + + "\x07;%\x03\x074\x11\x03\x076\x09\x03\x077*\x03\x070\x01\x03\x070\x0f\x03" + + "\x070.\x03\x071\x16\x03\x071\x04\x03\x0710\x03\x072\x18\x03\x072-\x03" + + "\x073\x14\x03\x073>\x03\x07'\x09\x03\x07 \x00\x03\x07\x1f\x0b\x03\x07" + + "\x18#\x03\x07\x18(\x03\x07\x186\x03\x07\x18\x03\x03\x07\x19\x16\x03\x07" + + "\x116\x03\x07\x12'\x03\x07\x13\x10\x03\x07\x0c&\x03\x07\x0c\x08\x03\x07" + + "\x0c\x13\x03\x07\x0d\x02\x03\x07\x0d\x1c\x03\x07\x0b5\x03\x07\x0b\x0a" + + "\x03\x07\x0b\x01\x03\x07\x0b\x0f\x03\x07\x05\x00\x03\x07\x05\x09\x03\x07" + + "\x05\x0b\x03\x07\x07\x01\x03\x07\x07\x08\x03\x07\x00<\x03\x07\x00+\x03" + + "\x07\x01)\x03\x07\x01\x1b\x03\x07\x01\x08\x03\x07\x03?\x03\x0445\x03\x04" + + "4\x08\x03\x0454\x03\x04)/\x03\x04)5\x03\x04+\x05\x03\x04+\x14\x03\x04+ " + + "\x03\x04+<\x03\x04*&\x03\x04*\x22\x03\x04&8\x03\x04!\x01\x03\x04!\x22" + + "\x03\x04\x11+\x03\x04\x10.\x03\x04\x104\x03\x04\x13=\x03\x04\x12\x04\x03" + + "\x04\x12\x0a\x03\x04\x0d\x1d\x03\x04\x0d\x07\x03\x04\x0d \x03\x05<>\x03" + + "\x055<\x03\x055!\x03\x055#\x03\x055&\x03\x054\x1d\x03\x054\x02\x03\x054" + + "\x07\x03\x0571\x03\x053\x1a\x03\x053\x16\x03\x05.<\x03\x05.\x07\x03\x05)" + + ":\x03\x05)<\x03\x05)\x0c\x03\x05)\x15\x03\x05+-\x03\x05+5\x03\x05$\x1e" + + "\x03\x05$\x14\x03\x05'\x04\x03\x05'\x14\x03\x05&\x02\x03\x05\x226\x03" + + "\x05\x22\x0c\x03\x05\x22\x1c\x03\x05\x19\x0a\x03\x05\x1b\x09\x03\x05\x1b" + + "\x0c\x03\x05\x14\x07\x03\x05\x16?\x03\x05\x16\x0c\x03\x05\x0c\x05\x03" + + "\x05\x0e\x0f\x03\x05\x01\x0e\x03\x05\x00(\x03\x05\x030\x03\x05\x03\x06" + + "\x03\x0a==\x03\x0a=1\x03\x0a=,\x03\x0a=\x0c\x03\x0a??\x03\x0a<\x08\x03" + + "\x0a9!\x03\x0a9)\x03\x0a97\x03\x0a99\x03\x0a6\x0a\x03\x0a6\x1c\x03\x0a6" + + "\x17\x03\x0a7'\x03\x0a78\x03\x0a73\x03\x0a'\x01\x03\x0a'&\x03\x0a\x1f" + + "\x0e\x03\x0a\x1f\x03\x03\x0a\x1f3\x03\x0a\x1b/\x03\x0a\x18\x19\x03\x0a" + + "\x19\x01\x03\x0a\x16\x14\x03\x0a\x0e\x22\x03\x0a\x0f\x10\x03\x0a\x0f\x02" + + "\x03\x0a\x0f \x03\x0a\x0c\x04\x03\x0a\x0b>\x03\x0a\x0b+\x03\x0a\x08/\x03" + + "\x0a\x046\x03\x0a\x05\x14\x03\x0a\x00\x04\x03\x0a\x00\x10\x03\x0a\x00" + + "\x14\x03\x0b<3\x03\x0b;*\x03\x0b9\x22\x03\x0b9)\x03\x0b97\x03\x0b+\x10" + + "\x03\x0b((\x03\x0b&5\x03\x0b$\x1c\x03\x0b$\x12\x03\x0b%\x04\x03\x0b#<" + + "\x03\x0b#0\x03\x0b#\x0d\x03\x0b#\x19\x03\x0b!:\x03\x0b!\x1f\x03\x0b!\x00" + + "\x03\x0b\x1e5\x03\x0b\x1c\x1d\x03\x0b\x1d-\x03\x0b\x1d(\x03\x0b\x18.\x03" + + "\x0b\x18 \x03\x0b\x18\x16\x03\x0b\x14\x13\x03\x0b\x15$\x03\x0b\x15\x22" + + "\x03\x0b\x12\x1b\x03\x0b\x12\x10\x03\x0b\x132\x03\x0b\x13=\x03\x0b\x12" + + "\x18\x03\x0b\x0c&\x03\x0b\x061\x03\x0b\x06:\x03\x0b\x05#\x03\x0b\x05<" + + "\x03\x0b\x04\x0b\x03\x0b\x04\x04\x03\x0b\x04\x1b\x03\x0b\x042\x03\x0b" + + "\x041\x03\x0b\x03\x03\x03\x0b\x03\x1d\x03\x0b\x03/\x03\x0b\x03+\x03\x0b" + + "\x02\x1b\x03\x0b\x02\x00\x03\x0b\x01\x1e\x03\x0b\x01\x08\x03\x0b\x015" + + "\x03\x06\x0d9\x03\x06\x0d=\x03\x06\x0d?\x03\x02\x001\x03\x02\x003\x03" + + "\x02\x02\x19\x03\x02\x006\x03\x02\x02\x1b\x03\x02\x004\x03\x02\x00<\x03" + + "\x02\x02\x0a\x03\x02\x02\x0e\x03\x02\x01\x1a\x03\x02\x01\x07\x03\x02\x01" + + "\x05\x03\x02\x01\x0b\x03\x02\x01%\x03\x02\x01\x0c\x03\x02\x01\x04\x03" + + "\x02\x01\x1c\x03\x02\x00.\x03\x02\x002\x03\x02\x00>\x03\x02\x00\x12\x03" + + "\x02\x00\x16\x03\x02\x011\x03\x02\x013\x03\x02\x02 \x03\x02\x02%\x03\x02" + + "\x02$\x03\x02\x028\x03\x02\x02;\x03\x02\x024\x03\x02\x012\x03\x02\x022" + + "\x03\x02\x02/\x03\x02\x01,\x03\x02\x01\x13\x03\x02\x01\x16\x03\x02\x01" + + "\x11\x03\x02\x01\x1e\x03\x02\x01\x15\x03\x02\x01\x17\x03\x02\x01\x0f\x03" + + "\x02\x01\x08\x03\x02\x00?\x03\x02\x03\x07\x03\x02\x03\x0d\x03\x02\x03" + + "\x13\x03\x02\x03\x1d\x03\x02\x03\x1f\x03\x02\x00\x03\x03\x02\x00\x0d\x03" + + "\x02\x00\x01\x03\x02\x00\x1b\x03\x02\x00\x19\x03\x02\x00\x18\x03\x02\x00" + + "\x13\x03\x02\x00/\x03\x07>\x12\x03\x07<\x1f\x03\x07>\x1d\x03\x06\x1d\x0e" + + "\x03\x07>\x1c\x03\x07>:\x03\x07>\x13\x03\x04\x12+\x03\x07?\x03\x03\x07>" + + "\x02\x03\x06\x224\x03\x06\x1a.\x03\x07<%\x03\x06\x1c\x0b\x03\x0609\x03" + + "\x05\x1f\x01\x03\x04'\x08\x03\x93\xfd\xf5\x03\x02\x0d \x03\x02\x0d#\x03" + + "\x02\x0d!\x03\x02\x0d&\x03\x02\x0d\x22\x03\x02\x0d/\x03\x02\x0d,\x03\x02" + + "\x0d$\x03\x02\x0d'\x03\x02\x0d%\x03\x02\x0d;\x03\x02\x0d=\x03\x02\x0d?" + + "\x03\x099.\x03\x08\x0b7\x03\x08\x02\x14\x03\x08\x14\x0d\x03\x08.:\x03" + + "\x089'\x03\x0f\x0b\x18\x03\x0f\x1c1\x03\x0f\x17&\x03\x0f9\x1f\x03\x0f0" + + "\x0c\x03\x0e\x0a9\x03\x0e\x056\x03\x0e\x1c#\x03\x0f\x13\x0e\x03\x072\x00" + + "\x03\x070\x0d\x03\x072\x0b\x03\x06\x11\x18\x03\x070\x10\x03\x06\x0f(\x03" + + "\x072\x05\x03\x06\x0f,\x03\x073\x15\x03\x06\x07\x08\x03\x05\x16\x02\x03" + + "\x04\x0b \x03\x05:8\x03\x05\x16%\x03\x0a\x0d\x1f\x03\x06\x16\x10\x03\x05" + + "\x1d5\x03\x05*;\x03\x05\x16\x1b\x03\x04.-\x03\x06\x1a\x19\x03\x04\x03," + + "\x03\x0b87\x03\x04/\x0a\x03\x06\x00,\x03\x04-\x01\x03\x04\x1e-\x03\x06/(" + + "\x03\x0a\x0b5\x03\x06\x0e7\x03\x06\x07.\x03\x0597\x03\x0a*%\x03\x0760" + + "\x03\x06\x0c;\x03\x05'\x00\x03\x072.\x03\x072\x08\x03\x06=\x01\x03\x06" + + "\x05\x1b\x03\x06\x06\x12\x03\x06$=\x03\x06'\x0d\x03\x04\x11\x0f\x03\x076" + + ",\x03\x06\x07;\x03\x06.,\x03\x86\xf9\xea\x03\x8f\xff\xeb\x02\x092\x02" + + "\x095\x02\x094\x02\x09;\x02\x09>\x02\x098\x02\x09*\x02\x09/\x02\x09,\x02" + + "\x09%\x02\x09&\x02\x09#\x02\x09 \x02\x08!\x02\x08%\x02\x08$\x02\x08+\x02" + + "\x08.\x02\x08*\x02\x08&\x02\x088\x02\x08>\x02\x084\x02\x086\x02\x080\x02" + + "\x08\x10\x02\x08\x17\x02\x08\x12\x02\x08\x1d\x02\x08\x1f\x02\x08\x13\x02" + + "\x08\x15\x02\x08\x14\x02\x08\x0c\x03\x8b\xfd\xd0\x03\x81\xec\xc6\x03\x87" + + "\xe0\x8a\x03-2\xe3\x03\x80\xef\xe4\x03-2\xea\x03\x88\xe6\xeb\x03\x8e\xe6" + + "\xe8\x03\x84\xe6\xe9\x03\x97\xe6\xee\x03-2\xf9\x03-2\xf6\x03\x8e\xe3\xad" + + "\x03\x80\xe3\x92\x03\x88\xe3\x90\x03\x8e\xe3\x90\x03\x80\xe3\x97\x03\x88" + + "\xe3\x95\x03\x88\xfe\xcb\x03\x8e\xfe\xca\x03\x84\xfe\xcd\x03\x91\xef\xc9" + + "\x03-2\xc1\x03-2\xc0\x03-2\xcb\x03\x88@\x09\x03\x8e@\x08\x03\x8f\xe0\xf5" + + "\x03\x8e\xe6\xf9\x03\x8e\xe0\xfa\x03\x93\xff\xf4\x03\x84\xee\xd3\x03\x0b" + + "(\x04\x023 \x021;\x02\x01*\x03\x0b#\x10\x03\x0b 0\x03\x0b!\x10\x03\x0b!0" + + "\x03\x07\x15\x08\x03\x09?5\x03\x07\x1f\x08\x03\x07\x17\x0b\x03\x09\x1f" + + "\x15\x03\x0b\x1c7\x03\x0a+#\x03\x06\x1a\x1b\x03\x06\x1a\x14\x03\x0a\x01" + + "\x18\x03\x06#\x1b\x03\x0a2\x0c\x03\x0a\x01\x04\x03\x09#;\x03\x08='\x03" + + "\x08\x1a\x0a\x03\x07\x03\x0a\x111\x03\x09\x1b\x09\x03\x073.\x03\x07\x01\x00" + + "\x03\x09/,\x03\x07#>\x03\x07\x048\x03\x0a\x1f\x22\x03\x098>\x03\x09\x11" + + "\x00\x03\x08/\x17\x03\x06'\x22\x03\x0b\x1a+\x03\x0a\x22\x19\x03\x0a/1" + + "\x03\x0974\x03\x09\x0f\x22\x03\x08,\x22\x03\x08?\x14\x03\x07$5\x03\x07<3" + + "\x03\x07=*\x03\x07\x13\x18\x03\x068\x0a\x03\x06\x09\x16\x03\x06\x13\x00" + + "\x03\x08\x067\x03\x08\x01\x03\x03\x08\x12\x1d\x03\x07+7\x03\x06(;\x03" + + "\x06\x1c?\x03\x07\x0e\x17\x03\x0a\x06\x1d\x03\x0a\x19\x07\x03\x08\x14$" + + "\x03\x07$;\x03\x08,$\x03\x08\x06\x0d\x03\x07\x16\x0a\x03\x06>>\x03\x0a" + + "\x06\x12\x03\x0a\x14)\x03\x09\x0d\x1f\x03\x09\x12\x17\x03\x09\x19\x01" + + "\x03\x08\x11 \x03\x08\x1d'\x03\x06<\x1a\x03\x0a.\x00\x03\x07'\x18\x03" + + "\x0a\x22\x08\x03\x08\x0d\x0a\x03\x08\x13)\x03\x07*)\x03\x06<,\x03\x07" + + "\x0b\x1a\x03\x09.\x14\x03\x09\x0d\x1e\x03\x07\x0e#\x03\x0b\x1d'\x03\x0a" + + "\x0a8\x03\x09%2\x03\x08+&\x03\x080\x12\x03\x0a)4\x03\x08\x06\x1f\x03\x0b" + + "\x1b\x1a\x03\x0a\x1b\x0f\x03\x0b\x1d*\x03\x09\x16$\x03\x090\x11\x03\x08" + + "\x11\x08\x03\x0a*(\x03\x0a\x042\x03\x089,\x03\x074'\x03\x07\x0f\x05\x03" + + "\x09\x0b\x0a\x03\x07\x1b\x01\x03\x09\x17:\x03\x09.\x0d\x03\x07.\x11\x03" + + "\x09+\x15\x03\x080\x13\x03\x0b\x1f\x19\x03\x0a \x11\x03\x0a\x220\x03\x09" + + "\x07;\x03\x08\x16\x1c\x03\x07,\x13\x03\x07\x0e/\x03\x06\x221\x03\x0a." + + "\x0a\x03\x0a7\x02\x03\x0a\x032\x03\x0a\x1d.\x03\x091\x06\x03\x09\x19:" + + "\x03\x08\x02/\x03\x060+\x03\x06\x0f-\x03\x06\x1c\x1f\x03\x06\x1d\x07\x03" + + "\x0a,\x11\x03\x09=\x0d\x03\x09\x0b;\x03\x07\x1b/\x03\x0a\x1f:\x03\x09 " + + "\x1f\x03\x09.\x10\x03\x094\x0b\x03\x09\x1a1\x03\x08#\x1a\x03\x084\x1d" + + "\x03\x08\x01\x1f\x03\x08\x11\x22\x03\x07'8\x03\x07\x1a>\x03\x0757\x03" + + "\x06&9\x03\x06+\x11\x03\x0a.\x0b\x03\x0a,>\x03\x0a4#\x03\x08%\x17\x03" + + "\x07\x05\x22\x03\x07\x0c\x0b\x03\x0a\x1d+\x03\x0a\x19\x16\x03\x09+\x1f" + + "\x03\x09\x08\x0b\x03\x08\x16\x18\x03\x08+\x12\x03\x0b\x1d\x0c\x03\x0a=" + + "\x10\x03\x0a\x09\x0d\x03\x0a\x10\x11\x03\x09&0\x03\x08(\x1f\x03\x087\x07" + + "\x03\x08\x185\x03\x07'6\x03\x06.\x05\x03\x06=\x04\x03\x06;;\x03\x06\x06," + + "\x03\x0b\x18>\x03\x08\x00\x18\x03\x06 \x03\x03\x06<\x00\x03\x09%\x18\x03" + + "\x0b\x1c<\x03\x0a%!\x03\x0a\x09\x12\x03\x0a\x16\x02\x03\x090'\x03\x09" + + "\x0e=\x03\x08 \x0e\x03\x08>\x03\x03\x074>\x03\x06&?\x03\x06\x19\x09\x03" + + "\x06?(\x03\x0a-\x0e\x03\x09:3\x03\x098:\x03\x09\x12\x0b\x03\x09\x1d\x17" + + "\x03\x087\x05\x03\x082\x14\x03\x08\x06%\x03\x08\x13\x1f\x03\x06\x06\x0e" + + "\x03\x0a\x22<\x03\x09/<\x03\x06>+\x03\x0a'?\x03\x0a\x13\x0c\x03\x09\x10<" + + "\x03\x07\x1b=\x03\x0a\x19\x13\x03\x09\x22\x1d\x03\x09\x07\x0d\x03\x08)" + + "\x1c\x03\x06=\x1a\x03\x0a/4\x03\x0a7\x11\x03\x0a\x16:\x03\x09?3\x03\x09:" + + "/\x03\x09\x05\x0a\x03\x09\x14\x06\x03\x087\x22\x03\x080\x07\x03\x08\x1a" + + "\x1f\x03\x07\x04(\x03\x07\x04\x09\x03\x06 %\x03\x06<\x08\x03\x0a+\x14" + + "\x03\x09\x1d\x16\x03\x0a70\x03\x08 >\x03\x0857\x03\x070\x0a\x03\x06=\x12" + + "\x03\x06\x16%\x03\x06\x1d,\x03\x099#\x03\x09\x10>\x03\x07 \x1e\x03\x08" + + "\x0c<\x03\x08\x0b\x18\x03\x08\x15+\x03\x08,:\x03\x08%\x22\x03\x07\x0a$" + + "\x03\x0b\x1c=\x03\x07+\x08\x03\x0a/\x05\x03\x0a \x07\x03\x0a\x12'\x03" + + "\x09#\x11\x03\x08\x1b\x15\x03\x0a\x06\x01\x03\x09\x1c\x1b\x03\x0922\x03" + + "\x07\x14<\x03\x07\x09\x04\x03\x061\x04\x03\x07\x0e\x01\x03\x0a\x13\x18" + + "\x03\x0a-\x0c\x03\x0a?\x0d\x03\x0a\x09\x0a\x03\x091&\x03\x0a/\x0b\x03" + + "\x08$<\x03\x083\x1d\x03\x08\x0c$\x03\x08\x0d\x07\x03\x08\x0d?\x03\x08" + + "\x0e\x14\x03\x065\x0a\x03\x08\x1a#\x03\x08\x16#\x03\x0702\x03\x07\x03" + + "\x1a\x03\x06(\x1d\x03\x06+\x1b\x03\x06\x0b\x05\x03\x06\x0b\x17\x03\x06" + + "\x0c\x04\x03\x06\x1e\x19\x03\x06+0\x03\x062\x18\x03\x0b\x16\x1e\x03\x0a+" + + "\x16\x03\x0a-?\x03\x0a#:\x03\x0a#\x10\x03\x0a%$\x03\x0a>+\x03\x0a01\x03" + + "\x0a1\x10\x03\x0a\x099\x03\x0a\x0a\x12\x03\x0a\x19\x1f\x03\x0a\x19\x12" + + "\x03\x09*)\x03\x09-\x16\x03\x09.1\x03\x09.2\x03\x09<\x0e\x03\x09> \x03" + + "\x093\x12\x03\x09\x0b\x01\x03\x09\x1c2\x03\x09\x11\x1c\x03\x09\x15%\x03" + + "\x08,&\x03\x08!\x22\x03\x089(\x03\x08\x0b\x1a\x03\x08\x0d2\x03\x08\x0c" + + "\x04\x03\x08\x0c\x06\x03\x08\x0c\x1f\x03\x08\x0c\x0c\x03\x08\x0f\x1f\x03" + + "\x08\x0f\x1d\x03\x08\x00\x14\x03\x08\x03\x14\x03\x08\x06\x16\x03\x08\x1e" + + "#\x03\x08\x11\x11\x03\x08\x10\x18\x03\x08\x14(\x03\x07)\x1e\x03\x07.1" + + "\x03\x07 $\x03\x07 '\x03\x078\x08\x03\x07\x0d0\x03\x07\x0f7\x03\x07\x05#" + + "\x03\x07\x05\x1a\x03\x07\x1a7\x03\x07\x1d-\x03\x07\x17\x10\x03\x06)\x1f" + + "\x03\x062\x0b\x03\x066\x16\x03\x06\x09\x11\x03\x09(\x1e\x03\x07!5\x03" + + "\x0b\x11\x16\x03\x0a/\x04\x03\x0a,\x1a\x03\x0b\x173\x03\x0a,1\x03\x0a/5" + + "\x03\x0a\x221\x03\x0a\x22\x0d\x03\x0a?%\x03\x0a<,\x03\x0a?#\x03\x0a>\x19" + + "\x03\x0a\x08&\x03\x0a\x0b\x0e\x03\x0a\x0c:\x03\x0a\x0c+\x03\x0a\x03\x22" + + "\x03\x0a\x06)\x03\x0a\x11\x10\x03\x0a\x11\x1a\x03\x0a\x17-\x03\x0a\x14(" + + "\x03\x09)\x1e\x03\x09/\x09\x03\x09.\x00\x03\x09,\x07\x03\x09/*\x03\x09-9" + + "\x03\x09\x228\x03\x09%\x09\x03\x09:\x12\x03\x09;\x1d\x03\x09?\x06\x03" + + "\x093%\x03\x096\x05\x03\x096\x08\x03\x097\x02\x03\x09\x07,\x03\x09\x04," + + "\x03\x09\x1f\x16\x03\x09\x11\x03\x03\x09\x11\x12\x03\x09\x168\x03\x08*" + + "\x05\x03\x08/2\x03\x084:\x03\x08\x22+\x03\x08 0\x03\x08&\x0a\x03\x08;" + + "\x10\x03\x08>$\x03\x08>\x18\x03\x0829\x03\x082:\x03\x081,\x03\x081<\x03" + + "\x081\x1c\x03\x087#\x03\x087*\x03\x08\x09'\x03\x08\x00\x1d\x03\x08\x05-" + + "\x03\x08\x1f4\x03\x08\x1d\x04\x03\x08\x16\x0f\x03\x07*7\x03\x07'!\x03" + + "\x07%\x1b\x03\x077\x0c\x03\x07\x0c1\x03\x07\x0c.\x03\x07\x00\x06\x03\x07" + + "\x01\x02\x03\x07\x010\x03\x07\x06=\x03\x07\x01\x03\x03\x07\x01\x13\x03" + + "\x07\x06\x06\x03\x07\x05\x0a\x03\x07\x1f\x09\x03\x07\x17:\x03\x06*1\x03" + + "\x06-\x1d\x03\x06\x223\x03\x062:\x03\x060$\x03\x066\x1e\x03\x064\x12\x03" + + "\x0645\x03\x06\x0b\x00\x03\x06\x0b7\x03\x06\x07\x1f\x03\x06\x15\x12\x03" + + "\x0c\x05\x0f\x03\x0b+\x0b\x03\x0b+-\x03\x06\x16\x1b\x03\x06\x15\x17\x03" + + "\x89\xca\xea\x03\x89\xca\xe8\x03\x0c8\x10\x03\x0c8\x01\x03\x0c8\x0f\x03" + + "\x0d8%\x03\x0d8!\x03\x0c8-\x03\x0c8/\x03\x0c8+\x03\x0c87\x03\x0c85\x03" + + "\x0c9\x09\x03\x0c9\x0d\x03\x0c9\x0f\x03\x0c9\x0b\x03\xcfu\x0c\x03\xcfu" + + "\x0f\x03\xcfu\x0e\x03\xcfu\x09\x03\x0c9\x10\x03\x0d9\x0c\x03\xcf`;\x03" + + "\xcf`>\x03\xcf`9\x03\xcf`8\x03\xcf`7\x03\xcf`*\x03\xcf`-\x03\xcf`,\x03" + + "\x0d\x1b\x1a\x03\x0d\x1b&\x03\x0c=.\x03\x0c=%\x03\x0c>\x1e\x03\x0c>\x14" + + "\x03\x0c?\x06\x03\x0c?\x0b\x03\x0c?\x0c\x03\x0c?\x0d\x03\x0c?\x02\x03" + + "\x0c>\x0f\x03\x0c>\x08\x03\x0c>\x09\x03\x0c>,\x03\x0c>\x0c\x03\x0c?\x13" + + "\x03\x0c?\x16\x03\x0c?\x15\x03\x0c?\x1c\x03\x0c?\x1f\x03\x0c?\x1d\x03" + + "\x0c?\x1a\x03\x0c?\x17\x03\x0c?\x08\x03\x0c?\x09\x03\x0c?\x0e\x03\x0c?" + + "\x04\x03\x0c?\x05\x03\x0c" + + "\x03\x0c=2\x03\x0c=6\x03\x0c<\x07\x03\x0c<\x05\x03\x0e:!\x03\x0e:#\x03" + + "\x0e8\x09\x03\x0e:&\x03\x0e8\x0b\x03\x0e:$\x03\x0e:,\x03\x0e8\x1a\x03" + + "\x0e8\x1e\x03\x0e:*\x03\x0e:7\x03\x0e:5\x03\x0e:;\x03\x0e:\x15\x03\x0e:<" + + "\x03\x0e:4\x03\x0e:'\x03\x0e:-\x03\x0e:%\x03\x0e:?\x03\x0e:=\x03\x0e:)" + + "\x03\x0e:/\x03\xcfs'\x03\x0d=\x0f\x03\x0d+*\x03\x0d99\x03\x0d9;\x03\x0d9" + + "?\x03\x0d)\x0d\x03\x0d(%\x02\x01\x18\x02\x01(\x02\x01\x1e\x03\x0f$!\x03" + + "\x0f87\x03\x0f4\x0e\x03\x0f5\x1d\x03\x06'\x03\x03\x0f\x08\x18\x03\x0f" + + "\x0d\x1b\x03\x0e2=\x03\x0e;\x08\x03\x0e:\x0b\x03\x0e\x06$\x03\x0e\x0d)" + + "\x03\x0e\x16\x1f\x03\x0e\x16\x1b\x03\x0d$\x0a\x03\x05,\x1d\x03\x0d. \x03" + + "\x0d.#\x03\x0c(/\x03\x09%\x02\x03\x0d90\x03\x0d\x0e4\x03\x0d\x0d\x0f\x03" + + "\x0c#\x00\x03\x0c,\x1e\x03\x0c2\x0e\x03\x0c\x01\x17\x03\x0c\x09:\x03\x0e" + + "\x173\x03\x0c\x08\x03\x03\x0c\x11\x07\x03\x0c\x10\x18\x03\x0c\x1f\x1c" + + "\x03\x0c\x19\x0e\x03\x0c\x1a\x1f\x03\x0f0>\x03\x0b->\x03\x0b<+\x03\x0b8" + + "\x13\x03\x0b\x043\x03\x0b\x14\x03\x03\x0b\x16%\x03\x0d\x22&\x03\x0b\x1a" + + "\x1a\x03\x0b\x1a\x04\x03\x0a%9\x03\x0a&2\x03\x0a&0\x03\x0a!\x1a\x03\x0a!" + + "7\x03\x0a5\x10\x03\x0a=4\x03\x0a?\x0e\x03\x0a>\x10\x03\x0a\x00 \x03\x0a" + + "\x0f:\x03\x0a\x0f9\x03\x0a\x0b\x0a\x03\x0a\x17%\x03\x0a\x1b-\x03\x09-" + + "\x1a\x03\x09,4\x03\x09.,\x03\x09)\x09\x03\x096!\x03\x091\x1f\x03\x093" + + "\x16\x03\x0c+\x1f\x03\x098 \x03\x098=\x03\x0c(\x1a\x03\x0c(\x16\x03\x09" + + "\x0a+\x03\x09\x16\x12\x03\x09\x13\x0e\x03\x09\x153\x03\x08)!\x03\x09\x1a" + + "\x01\x03\x09\x18\x01\x03\x08%#\x03\x08>\x22\x03\x08\x05%\x03\x08\x02*" + + "\x03\x08\x15;\x03\x08\x1b7\x03\x0f\x07\x1d\x03\x0f\x04\x03\x03\x070\x0c" + + "\x03\x07;\x0b\x03\x07\x08\x17\x03\x07\x12\x06\x03\x06/-\x03\x0671\x03" + + "\x065+\x03\x06>7\x03\x06\x049\x03\x05+\x1e\x03\x05,\x17\x03\x05 \x1d\x03" + + "\x05\x22\x05\x03\x050\x1d" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *idnaTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return idnaValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = idnaIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *idnaTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return idnaValues[c0] + } + i := idnaIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *idnaTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return idnaValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = idnaIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *idnaTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return idnaValues[c0] + } + i := idnaIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// idnaTrie. Total size: 29404 bytes (28.71 KiB). Checksum: 848c45acb5f7991c. +type idnaTrie struct{} + +func newIdnaTrie(i int) *idnaTrie { + return &idnaTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *idnaTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 125: + return uint16(idnaValues[n<<6+uint32(b)]) + default: + n -= 125 + return uint16(idnaSparse.lookup(n, b)) + } +} + +// idnaValues: 127 blocks, 8128 entries, 16256 bytes +// The third block is the zero block. +var idnaValues = [8128]uint16{ + // Block 0x0, offset 0x0 + 0x00: 0x0080, 0x01: 0x0080, 0x02: 0x0080, 0x03: 0x0080, 0x04: 0x0080, 0x05: 0x0080, + 0x06: 0x0080, 0x07: 0x0080, 0x08: 0x0080, 0x09: 0x0080, 0x0a: 0x0080, 0x0b: 0x0080, + 0x0c: 0x0080, 0x0d: 0x0080, 0x0e: 0x0080, 0x0f: 0x0080, 0x10: 0x0080, 0x11: 0x0080, + 0x12: 0x0080, 0x13: 0x0080, 0x14: 0x0080, 0x15: 0x0080, 0x16: 0x0080, 0x17: 0x0080, + 0x18: 0x0080, 0x19: 0x0080, 0x1a: 0x0080, 0x1b: 0x0080, 0x1c: 0x0080, 0x1d: 0x0080, + 0x1e: 0x0080, 0x1f: 0x0080, 0x20: 0x0080, 0x21: 0x0080, 0x22: 0x0080, 0x23: 0x0080, + 0x24: 0x0080, 0x25: 0x0080, 0x26: 0x0080, 0x27: 0x0080, 0x28: 0x0080, 0x29: 0x0080, + 0x2a: 0x0080, 0x2b: 0x0080, 0x2c: 0x0080, 0x2d: 0x0008, 0x2e: 0x0008, 0x2f: 0x0080, + 0x30: 0x0008, 0x31: 0x0008, 0x32: 0x0008, 0x33: 0x0008, 0x34: 0x0008, 0x35: 0x0008, + 0x36: 0x0008, 0x37: 0x0008, 0x38: 0x0008, 0x39: 0x0008, 0x3a: 0x0080, 0x3b: 0x0080, + 0x3c: 0x0080, 0x3d: 0x0080, 0x3e: 0x0080, 0x3f: 0x0080, + // Block 0x1, offset 0x40 + 0x40: 0x0080, 0x41: 0xe105, 0x42: 0xe105, 0x43: 0xe105, 0x44: 0xe105, 0x45: 0xe105, + 0x46: 0xe105, 0x47: 0xe105, 0x48: 0xe105, 0x49: 0xe105, 0x4a: 0xe105, 0x4b: 0xe105, + 0x4c: 0xe105, 0x4d: 0xe105, 0x4e: 0xe105, 0x4f: 0xe105, 0x50: 0xe105, 0x51: 0xe105, + 0x52: 0xe105, 0x53: 0xe105, 0x54: 0xe105, 0x55: 0xe105, 0x56: 0xe105, 0x57: 0xe105, + 0x58: 0xe105, 0x59: 0xe105, 0x5a: 0xe105, 0x5b: 0x0080, 0x5c: 0x0080, 0x5d: 0x0080, + 0x5e: 0x0080, 0x5f: 0x0080, 0x60: 0x0080, 0x61: 0x0008, 0x62: 0x0008, 0x63: 0x0008, + 0x64: 0x0008, 0x65: 0x0008, 0x66: 0x0008, 0x67: 0x0008, 0x68: 0x0008, 0x69: 0x0008, + 0x6a: 0x0008, 0x6b: 0x0008, 0x6c: 0x0008, 0x6d: 0x0008, 0x6e: 0x0008, 0x6f: 0x0008, + 0x70: 0x0008, 0x71: 0x0008, 0x72: 0x0008, 0x73: 0x0008, 0x74: 0x0008, 0x75: 0x0008, + 0x76: 0x0008, 0x77: 0x0008, 0x78: 0x0008, 0x79: 0x0008, 0x7a: 0x0008, 0x7b: 0x0080, + 0x7c: 0x0080, 0x7d: 0x0080, 0x7e: 0x0080, 0x7f: 0x0080, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x0040, 0xc1: 0x0040, 0xc2: 0x0040, 0xc3: 0x0040, 0xc4: 0x0040, 0xc5: 0x0040, + 0xc6: 0x0040, 0xc7: 0x0040, 0xc8: 0x0040, 0xc9: 0x0040, 0xca: 0x0040, 0xcb: 0x0040, + 0xcc: 0x0040, 0xcd: 0x0040, 0xce: 0x0040, 0xcf: 0x0040, 0xd0: 0x0040, 0xd1: 0x0040, + 0xd2: 0x0040, 0xd3: 0x0040, 0xd4: 0x0040, 0xd5: 0x0040, 0xd6: 0x0040, 0xd7: 0x0040, + 0xd8: 0x0040, 0xd9: 0x0040, 0xda: 0x0040, 0xdb: 0x0040, 0xdc: 0x0040, 0xdd: 0x0040, + 0xde: 0x0040, 0xdf: 0x0040, 0xe0: 0x000a, 0xe1: 0x0018, 0xe2: 0x0018, 0xe3: 0x0018, + 0xe4: 0x0018, 0xe5: 0x0018, 0xe6: 0x0018, 0xe7: 0x0018, 0xe8: 0x001a, 0xe9: 0x0018, + 0xea: 0x0039, 0xeb: 0x0018, 0xec: 0x0018, 0xed: 0x03c0, 0xee: 0x0018, 0xef: 0x004a, + 0xf0: 0x0018, 0xf1: 0x0018, 0xf2: 0x0069, 0xf3: 0x0079, 0xf4: 0x008a, 0xf5: 0x0005, + 0xf6: 0x0018, 0xf7: 0x0008, 0xf8: 0x00aa, 0xf9: 0x00c9, 0xfa: 0x00d9, 0xfb: 0x0018, + 0xfc: 0x00e9, 0xfd: 0x0119, 0xfe: 0x0149, 0xff: 0x0018, + // Block 0x4, offset 0x100 + 0x100: 0xe00d, 0x101: 0x0008, 0x102: 0xe00d, 0x103: 0x0008, 0x104: 0xe00d, 0x105: 0x0008, + 0x106: 0xe00d, 0x107: 0x0008, 0x108: 0xe00d, 0x109: 0x0008, 0x10a: 0xe00d, 0x10b: 0x0008, + 0x10c: 0xe00d, 0x10d: 0x0008, 0x10e: 0xe00d, 0x10f: 0x0008, 0x110: 0xe00d, 0x111: 0x0008, + 0x112: 0xe00d, 0x113: 0x0008, 0x114: 0xe00d, 0x115: 0x0008, 0x116: 0xe00d, 0x117: 0x0008, + 0x118: 0xe00d, 0x119: 0x0008, 0x11a: 0xe00d, 0x11b: 0x0008, 0x11c: 0xe00d, 0x11d: 0x0008, + 0x11e: 0xe00d, 0x11f: 0x0008, 0x120: 0xe00d, 0x121: 0x0008, 0x122: 0xe00d, 0x123: 0x0008, + 0x124: 0xe00d, 0x125: 0x0008, 0x126: 0xe00d, 0x127: 0x0008, 0x128: 0xe00d, 0x129: 0x0008, + 0x12a: 0xe00d, 0x12b: 0x0008, 0x12c: 0xe00d, 0x12d: 0x0008, 0x12e: 0xe00d, 0x12f: 0x0008, + 0x130: 0x0179, 0x131: 0x0008, 0x132: 0x0035, 0x133: 0x004d, 0x134: 0xe00d, 0x135: 0x0008, + 0x136: 0xe00d, 0x137: 0x0008, 0x138: 0x0008, 0x139: 0xe01d, 0x13a: 0x0008, 0x13b: 0xe03d, + 0x13c: 0x0008, 0x13d: 0xe01d, 0x13e: 0x0008, 0x13f: 0x0199, + // Block 0x5, offset 0x140 + 0x140: 0x0199, 0x141: 0xe01d, 0x142: 0x0008, 0x143: 0xe03d, 0x144: 0x0008, 0x145: 0xe01d, + 0x146: 0x0008, 0x147: 0xe07d, 0x148: 0x0008, 0x149: 0x01b9, 0x14a: 0xe00d, 0x14b: 0x0008, + 0x14c: 0xe00d, 0x14d: 0x0008, 0x14e: 0xe00d, 0x14f: 0x0008, 0x150: 0xe00d, 0x151: 0x0008, + 0x152: 0xe00d, 0x153: 0x0008, 0x154: 0xe00d, 0x155: 0x0008, 0x156: 0xe00d, 0x157: 0x0008, + 0x158: 0xe00d, 0x159: 0x0008, 0x15a: 0xe00d, 0x15b: 0x0008, 0x15c: 0xe00d, 0x15d: 0x0008, + 0x15e: 0xe00d, 0x15f: 0x0008, 0x160: 0xe00d, 0x161: 0x0008, 0x162: 0xe00d, 0x163: 0x0008, + 0x164: 0xe00d, 0x165: 0x0008, 0x166: 0xe00d, 0x167: 0x0008, 0x168: 0xe00d, 0x169: 0x0008, + 0x16a: 0xe00d, 0x16b: 0x0008, 0x16c: 0xe00d, 0x16d: 0x0008, 0x16e: 0xe00d, 0x16f: 0x0008, + 0x170: 0xe00d, 0x171: 0x0008, 0x172: 0xe00d, 0x173: 0x0008, 0x174: 0xe00d, 0x175: 0x0008, + 0x176: 0xe00d, 0x177: 0x0008, 0x178: 0x0065, 0x179: 0xe01d, 0x17a: 0x0008, 0x17b: 0xe03d, + 0x17c: 0x0008, 0x17d: 0xe01d, 0x17e: 0x0008, 0x17f: 0x01d9, + // Block 0x6, offset 0x180 + 0x180: 0x0008, 0x181: 0x007d, 0x182: 0xe00d, 0x183: 0x0008, 0x184: 0xe00d, 0x185: 0x0008, + 0x186: 0x007d, 0x187: 0xe07d, 0x188: 0x0008, 0x189: 0x0095, 0x18a: 0x00ad, 0x18b: 0xe03d, + 0x18c: 0x0008, 0x18d: 0x0008, 0x18e: 0x00c5, 0x18f: 0x00dd, 0x190: 0x00f5, 0x191: 0xe01d, + 0x192: 0x0008, 0x193: 0x010d, 0x194: 0x0125, 0x195: 0x0008, 0x196: 0x013d, 0x197: 0x013d, + 0x198: 0xe00d, 0x199: 0x0008, 0x19a: 0x0008, 0x19b: 0x0008, 0x19c: 0x010d, 0x19d: 0x0155, + 0x19e: 0x0008, 0x19f: 0x016d, 0x1a0: 0xe00d, 0x1a1: 0x0008, 0x1a2: 0xe00d, 0x1a3: 0x0008, + 0x1a4: 0xe00d, 0x1a5: 0x0008, 0x1a6: 0x0185, 0x1a7: 0xe07d, 0x1a8: 0x0008, 0x1a9: 0x019d, + 0x1aa: 0x0008, 0x1ab: 0x0008, 0x1ac: 0xe00d, 0x1ad: 0x0008, 0x1ae: 0x0185, 0x1af: 0xe0fd, + 0x1b0: 0x0008, 0x1b1: 0x01b5, 0x1b2: 0x01cd, 0x1b3: 0xe03d, 0x1b4: 0x0008, 0x1b5: 0xe01d, + 0x1b6: 0x0008, 0x1b7: 0x01e5, 0x1b8: 0xe00d, 0x1b9: 0x0008, 0x1ba: 0x0008, 0x1bb: 0x0008, + 0x1bc: 0xe00d, 0x1bd: 0x0008, 0x1be: 0x0008, 0x1bf: 0x0008, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0008, 0x1c1: 0x0008, 0x1c2: 0x0008, 0x1c3: 0x0008, 0x1c4: 0x01e9, 0x1c5: 0x01e9, + 0x1c6: 0x01e9, 0x1c7: 0x01fd, 0x1c8: 0x0215, 0x1c9: 0x022d, 0x1ca: 0x0245, 0x1cb: 0x025d, + 0x1cc: 0x0275, 0x1cd: 0xe01d, 0x1ce: 0x0008, 0x1cf: 0xe0fd, 0x1d0: 0x0008, 0x1d1: 0xe01d, + 0x1d2: 0x0008, 0x1d3: 0xe03d, 0x1d4: 0x0008, 0x1d5: 0xe01d, 0x1d6: 0x0008, 0x1d7: 0xe07d, + 0x1d8: 0x0008, 0x1d9: 0xe01d, 0x1da: 0x0008, 0x1db: 0xe03d, 0x1dc: 0x0008, 0x1dd: 0x0008, + 0x1de: 0xe00d, 0x1df: 0x0008, 0x1e0: 0xe00d, 0x1e1: 0x0008, 0x1e2: 0xe00d, 0x1e3: 0x0008, + 0x1e4: 0xe00d, 0x1e5: 0x0008, 0x1e6: 0xe00d, 0x1e7: 0x0008, 0x1e8: 0xe00d, 0x1e9: 0x0008, + 0x1ea: 0xe00d, 0x1eb: 0x0008, 0x1ec: 0xe00d, 0x1ed: 0x0008, 0x1ee: 0xe00d, 0x1ef: 0x0008, + 0x1f0: 0x0008, 0x1f1: 0x028d, 0x1f2: 0x02a5, 0x1f3: 0x02bd, 0x1f4: 0xe00d, 0x1f5: 0x0008, + 0x1f6: 0x02d5, 0x1f7: 0x02ed, 0x1f8: 0xe00d, 0x1f9: 0x0008, 0x1fa: 0xe00d, 0x1fb: 0x0008, + 0x1fc: 0xe00d, 0x1fd: 0x0008, 0x1fe: 0xe00d, 0x1ff: 0x0008, + // Block 0x8, offset 0x200 + 0x200: 0xe00d, 0x201: 0x0008, 0x202: 0xe00d, 0x203: 0x0008, 0x204: 0xe00d, 0x205: 0x0008, + 0x206: 0xe00d, 0x207: 0x0008, 0x208: 0xe00d, 0x209: 0x0008, 0x20a: 0xe00d, 0x20b: 0x0008, + 0x20c: 0xe00d, 0x20d: 0x0008, 0x20e: 0xe00d, 0x20f: 0x0008, 0x210: 0xe00d, 0x211: 0x0008, + 0x212: 0xe00d, 0x213: 0x0008, 0x214: 0xe00d, 0x215: 0x0008, 0x216: 0xe00d, 0x217: 0x0008, + 0x218: 0xe00d, 0x219: 0x0008, 0x21a: 0xe00d, 0x21b: 0x0008, 0x21c: 0xe00d, 0x21d: 0x0008, + 0x21e: 0xe00d, 0x21f: 0x0008, 0x220: 0x0305, 0x221: 0x0008, 0x222: 0xe00d, 0x223: 0x0008, + 0x224: 0xe00d, 0x225: 0x0008, 0x226: 0xe00d, 0x227: 0x0008, 0x228: 0xe00d, 0x229: 0x0008, + 0x22a: 0xe00d, 0x22b: 0x0008, 0x22c: 0xe00d, 0x22d: 0x0008, 0x22e: 0xe00d, 0x22f: 0x0008, + 0x230: 0xe00d, 0x231: 0x0008, 0x232: 0xe00d, 0x233: 0x0008, 0x234: 0x0008, 0x235: 0x0008, + 0x236: 0x0008, 0x237: 0x0008, 0x238: 0x0008, 0x239: 0x0008, 0x23a: 0x0209, 0x23b: 0xe03d, + 0x23c: 0x0008, 0x23d: 0x031d, 0x23e: 0x0229, 0x23f: 0x0008, + // Block 0x9, offset 0x240 + 0x240: 0x0008, 0x241: 0x0008, 0x242: 0x0018, 0x243: 0x0018, 0x244: 0x0018, 0x245: 0x0018, + 0x246: 0x0008, 0x247: 0x0008, 0x248: 0x0008, 0x249: 0x0008, 0x24a: 0x0008, 0x24b: 0x0008, + 0x24c: 0x0008, 0x24d: 0x0008, 0x24e: 0x0008, 0x24f: 0x0008, 0x250: 0x0008, 0x251: 0x0008, + 0x252: 0x0018, 0x253: 0x0018, 0x254: 0x0018, 0x255: 0x0018, 0x256: 0x0018, 0x257: 0x0018, + 0x258: 0x029a, 0x259: 0x02ba, 0x25a: 0x02da, 0x25b: 0x02fa, 0x25c: 0x031a, 0x25d: 0x033a, + 0x25e: 0x0018, 0x25f: 0x0018, 0x260: 0x03ad, 0x261: 0x0359, 0x262: 0x01d9, 0x263: 0x0369, + 0x264: 0x03c5, 0x265: 0x0018, 0x266: 0x0018, 0x267: 0x0018, 0x268: 0x0018, 0x269: 0x0018, + 0x26a: 0x0018, 0x26b: 0x0018, 0x26c: 0x0008, 0x26d: 0x0018, 0x26e: 0x0008, 0x26f: 0x0018, + 0x270: 0x0018, 0x271: 0x0018, 0x272: 0x0018, 0x273: 0x0018, 0x274: 0x0018, 0x275: 0x0018, + 0x276: 0x0018, 0x277: 0x0018, 0x278: 0x0018, 0x279: 0x0018, 0x27a: 0x0018, 0x27b: 0x0018, + 0x27c: 0x0018, 0x27d: 0x0018, 0x27e: 0x0018, 0x27f: 0x0018, + // Block 0xa, offset 0x280 + 0x280: 0x03dd, 0x281: 0x03dd, 0x282: 0x3308, 0x283: 0x03f5, 0x284: 0x0379, 0x285: 0x040d, + 0x286: 0x3308, 0x287: 0x3308, 0x288: 0x3308, 0x289: 0x3308, 0x28a: 0x3308, 0x28b: 0x3308, + 0x28c: 0x3308, 0x28d: 0x3308, 0x28e: 0x3308, 0x28f: 0x33c0, 0x290: 0x3308, 0x291: 0x3308, + 0x292: 0x3308, 0x293: 0x3308, 0x294: 0x3308, 0x295: 0x3308, 0x296: 0x3308, 0x297: 0x3308, + 0x298: 0x3308, 0x299: 0x3308, 0x29a: 0x3308, 0x29b: 0x3308, 0x29c: 0x3308, 0x29d: 0x3308, + 0x29e: 0x3308, 0x29f: 0x3308, 0x2a0: 0x3308, 0x2a1: 0x3308, 0x2a2: 0x3308, 0x2a3: 0x3308, + 0x2a4: 0x3308, 0x2a5: 0x3308, 0x2a6: 0x3308, 0x2a7: 0x3308, 0x2a8: 0x3308, 0x2a9: 0x3308, + 0x2aa: 0x3308, 0x2ab: 0x3308, 0x2ac: 0x3308, 0x2ad: 0x3308, 0x2ae: 0x3308, 0x2af: 0x3308, + 0x2b0: 0xe00d, 0x2b1: 0x0008, 0x2b2: 0xe00d, 0x2b3: 0x0008, 0x2b4: 0x0425, 0x2b5: 0x0008, + 0x2b6: 0xe00d, 0x2b7: 0x0008, 0x2b8: 0x0040, 0x2b9: 0x0040, 0x2ba: 0x03a2, 0x2bb: 0x0008, + 0x2bc: 0x0008, 0x2bd: 0x0008, 0x2be: 0x03c2, 0x2bf: 0x043d, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x0040, 0x2c1: 0x0040, 0x2c2: 0x0040, 0x2c3: 0x0040, 0x2c4: 0x008a, 0x2c5: 0x03d2, + 0x2c6: 0xe155, 0x2c7: 0x0455, 0x2c8: 0xe12d, 0x2c9: 0xe13d, 0x2ca: 0xe12d, 0x2cb: 0x0040, + 0x2cc: 0x03dd, 0x2cd: 0x0040, 0x2ce: 0x046d, 0x2cf: 0x0485, 0x2d0: 0x0008, 0x2d1: 0xe105, + 0x2d2: 0xe105, 0x2d3: 0xe105, 0x2d4: 0xe105, 0x2d5: 0xe105, 0x2d6: 0xe105, 0x2d7: 0xe105, + 0x2d8: 0xe105, 0x2d9: 0xe105, 0x2da: 0xe105, 0x2db: 0xe105, 0x2dc: 0xe105, 0x2dd: 0xe105, + 0x2de: 0xe105, 0x2df: 0xe105, 0x2e0: 0x049d, 0x2e1: 0x049d, 0x2e2: 0x0040, 0x2e3: 0x049d, + 0x2e4: 0x049d, 0x2e5: 0x049d, 0x2e6: 0x049d, 0x2e7: 0x049d, 0x2e8: 0x049d, 0x2e9: 0x049d, + 0x2ea: 0x049d, 0x2eb: 0x049d, 0x2ec: 0x0008, 0x2ed: 0x0008, 0x2ee: 0x0008, 0x2ef: 0x0008, + 0x2f0: 0x0008, 0x2f1: 0x0008, 0x2f2: 0x0008, 0x2f3: 0x0008, 0x2f4: 0x0008, 0x2f5: 0x0008, + 0x2f6: 0x0008, 0x2f7: 0x0008, 0x2f8: 0x0008, 0x2f9: 0x0008, 0x2fa: 0x0008, 0x2fb: 0x0008, + 0x2fc: 0x0008, 0x2fd: 0x0008, 0x2fe: 0x0008, 0x2ff: 0x0008, + // Block 0xc, offset 0x300 + 0x300: 0x0008, 0x301: 0x0008, 0x302: 0xe00f, 0x303: 0x0008, 0x304: 0x0008, 0x305: 0x0008, + 0x306: 0x0008, 0x307: 0x0008, 0x308: 0x0008, 0x309: 0x0008, 0x30a: 0x0008, 0x30b: 0x0008, + 0x30c: 0x0008, 0x30d: 0x0008, 0x30e: 0x0008, 0x30f: 0xe0c5, 0x310: 0x04b5, 0x311: 0x04cd, + 0x312: 0xe0bd, 0x313: 0xe0f5, 0x314: 0xe0fd, 0x315: 0xe09d, 0x316: 0xe0b5, 0x317: 0x0008, + 0x318: 0xe00d, 0x319: 0x0008, 0x31a: 0xe00d, 0x31b: 0x0008, 0x31c: 0xe00d, 0x31d: 0x0008, + 0x31e: 0xe00d, 0x31f: 0x0008, 0x320: 0xe00d, 0x321: 0x0008, 0x322: 0xe00d, 0x323: 0x0008, + 0x324: 0xe00d, 0x325: 0x0008, 0x326: 0xe00d, 0x327: 0x0008, 0x328: 0xe00d, 0x329: 0x0008, + 0x32a: 0xe00d, 0x32b: 0x0008, 0x32c: 0xe00d, 0x32d: 0x0008, 0x32e: 0xe00d, 0x32f: 0x0008, + 0x330: 0x04e5, 0x331: 0xe185, 0x332: 0xe18d, 0x333: 0x0008, 0x334: 0x04fd, 0x335: 0x03dd, + 0x336: 0x0018, 0x337: 0xe07d, 0x338: 0x0008, 0x339: 0xe1d5, 0x33a: 0xe00d, 0x33b: 0x0008, + 0x33c: 0x0008, 0x33d: 0x0515, 0x33e: 0x052d, 0x33f: 0x052d, + // Block 0xd, offset 0x340 + 0x340: 0x0008, 0x341: 0x0008, 0x342: 0x0008, 0x343: 0x0008, 0x344: 0x0008, 0x345: 0x0008, + 0x346: 0x0008, 0x347: 0x0008, 0x348: 0x0008, 0x349: 0x0008, 0x34a: 0x0008, 0x34b: 0x0008, + 0x34c: 0x0008, 0x34d: 0x0008, 0x34e: 0x0008, 0x34f: 0x0008, 0x350: 0x0008, 0x351: 0x0008, + 0x352: 0x0008, 0x353: 0x0008, 0x354: 0x0008, 0x355: 0x0008, 0x356: 0x0008, 0x357: 0x0008, + 0x358: 0x0008, 0x359: 0x0008, 0x35a: 0x0008, 0x35b: 0x0008, 0x35c: 0x0008, 0x35d: 0x0008, + 0x35e: 0x0008, 0x35f: 0x0008, 0x360: 0xe00d, 0x361: 0x0008, 0x362: 0xe00d, 0x363: 0x0008, + 0x364: 0xe00d, 0x365: 0x0008, 0x366: 0xe00d, 0x367: 0x0008, 0x368: 0xe00d, 0x369: 0x0008, + 0x36a: 0xe00d, 0x36b: 0x0008, 0x36c: 0xe00d, 0x36d: 0x0008, 0x36e: 0xe00d, 0x36f: 0x0008, + 0x370: 0xe00d, 0x371: 0x0008, 0x372: 0xe00d, 0x373: 0x0008, 0x374: 0xe00d, 0x375: 0x0008, + 0x376: 0xe00d, 0x377: 0x0008, 0x378: 0xe00d, 0x379: 0x0008, 0x37a: 0xe00d, 0x37b: 0x0008, + 0x37c: 0xe00d, 0x37d: 0x0008, 0x37e: 0xe00d, 0x37f: 0x0008, + // Block 0xe, offset 0x380 + 0x380: 0xe00d, 0x381: 0x0008, 0x382: 0x0018, 0x383: 0x3308, 0x384: 0x3308, 0x385: 0x3308, + 0x386: 0x3308, 0x387: 0x3308, 0x388: 0x3318, 0x389: 0x3318, 0x38a: 0xe00d, 0x38b: 0x0008, + 0x38c: 0xe00d, 0x38d: 0x0008, 0x38e: 0xe00d, 0x38f: 0x0008, 0x390: 0xe00d, 0x391: 0x0008, + 0x392: 0xe00d, 0x393: 0x0008, 0x394: 0xe00d, 0x395: 0x0008, 0x396: 0xe00d, 0x397: 0x0008, + 0x398: 0xe00d, 0x399: 0x0008, 0x39a: 0xe00d, 0x39b: 0x0008, 0x39c: 0xe00d, 0x39d: 0x0008, + 0x39e: 0xe00d, 0x39f: 0x0008, 0x3a0: 0xe00d, 0x3a1: 0x0008, 0x3a2: 0xe00d, 0x3a3: 0x0008, + 0x3a4: 0xe00d, 0x3a5: 0x0008, 0x3a6: 0xe00d, 0x3a7: 0x0008, 0x3a8: 0xe00d, 0x3a9: 0x0008, + 0x3aa: 0xe00d, 0x3ab: 0x0008, 0x3ac: 0xe00d, 0x3ad: 0x0008, 0x3ae: 0xe00d, 0x3af: 0x0008, + 0x3b0: 0xe00d, 0x3b1: 0x0008, 0x3b2: 0xe00d, 0x3b3: 0x0008, 0x3b4: 0xe00d, 0x3b5: 0x0008, + 0x3b6: 0xe00d, 0x3b7: 0x0008, 0x3b8: 0xe00d, 0x3b9: 0x0008, 0x3ba: 0xe00d, 0x3bb: 0x0008, + 0x3bc: 0xe00d, 0x3bd: 0x0008, 0x3be: 0xe00d, 0x3bf: 0x0008, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x0040, 0x3c1: 0xe01d, 0x3c2: 0x0008, 0x3c3: 0xe03d, 0x3c4: 0x0008, 0x3c5: 0xe01d, + 0x3c6: 0x0008, 0x3c7: 0xe07d, 0x3c8: 0x0008, 0x3c9: 0xe01d, 0x3ca: 0x0008, 0x3cb: 0xe03d, + 0x3cc: 0x0008, 0x3cd: 0xe01d, 0x3ce: 0x0008, 0x3cf: 0x0008, 0x3d0: 0xe00d, 0x3d1: 0x0008, + 0x3d2: 0xe00d, 0x3d3: 0x0008, 0x3d4: 0xe00d, 0x3d5: 0x0008, 0x3d6: 0xe00d, 0x3d7: 0x0008, + 0x3d8: 0xe00d, 0x3d9: 0x0008, 0x3da: 0xe00d, 0x3db: 0x0008, 0x3dc: 0xe00d, 0x3dd: 0x0008, + 0x3de: 0xe00d, 0x3df: 0x0008, 0x3e0: 0xe00d, 0x3e1: 0x0008, 0x3e2: 0xe00d, 0x3e3: 0x0008, + 0x3e4: 0xe00d, 0x3e5: 0x0008, 0x3e6: 0xe00d, 0x3e7: 0x0008, 0x3e8: 0xe00d, 0x3e9: 0x0008, + 0x3ea: 0xe00d, 0x3eb: 0x0008, 0x3ec: 0xe00d, 0x3ed: 0x0008, 0x3ee: 0xe00d, 0x3ef: 0x0008, + 0x3f0: 0xe00d, 0x3f1: 0x0008, 0x3f2: 0xe00d, 0x3f3: 0x0008, 0x3f4: 0xe00d, 0x3f5: 0x0008, + 0x3f6: 0xe00d, 0x3f7: 0x0008, 0x3f8: 0xe00d, 0x3f9: 0x0008, 0x3fa: 0xe00d, 0x3fb: 0x0008, + 0x3fc: 0xe00d, 0x3fd: 0x0008, 0x3fe: 0xe00d, 0x3ff: 0x0008, + // Block 0x10, offset 0x400 + 0x400: 0xe00d, 0x401: 0x0008, 0x402: 0xe00d, 0x403: 0x0008, 0x404: 0xe00d, 0x405: 0x0008, + 0x406: 0xe00d, 0x407: 0x0008, 0x408: 0xe00d, 0x409: 0x0008, 0x40a: 0xe00d, 0x40b: 0x0008, + 0x40c: 0xe00d, 0x40d: 0x0008, 0x40e: 0xe00d, 0x40f: 0x0008, 0x410: 0xe00d, 0x411: 0x0008, + 0x412: 0xe00d, 0x413: 0x0008, 0x414: 0xe00d, 0x415: 0x0008, 0x416: 0xe00d, 0x417: 0x0008, + 0x418: 0xe00d, 0x419: 0x0008, 0x41a: 0xe00d, 0x41b: 0x0008, 0x41c: 0xe00d, 0x41d: 0x0008, + 0x41e: 0xe00d, 0x41f: 0x0008, 0x420: 0xe00d, 0x421: 0x0008, 0x422: 0xe00d, 0x423: 0x0008, + 0x424: 0xe00d, 0x425: 0x0008, 0x426: 0xe00d, 0x427: 0x0008, 0x428: 0xe00d, 0x429: 0x0008, + 0x42a: 0xe00d, 0x42b: 0x0008, 0x42c: 0xe00d, 0x42d: 0x0008, 0x42e: 0xe00d, 0x42f: 0x0008, + 0x430: 0x0040, 0x431: 0x03f5, 0x432: 0x03f5, 0x433: 0x03f5, 0x434: 0x03f5, 0x435: 0x03f5, + 0x436: 0x03f5, 0x437: 0x03f5, 0x438: 0x03f5, 0x439: 0x03f5, 0x43a: 0x03f5, 0x43b: 0x03f5, + 0x43c: 0x03f5, 0x43d: 0x03f5, 0x43e: 0x03f5, 0x43f: 0x03f5, + // Block 0x11, offset 0x440 + 0x440: 0x0840, 0x441: 0x0840, 0x442: 0x0840, 0x443: 0x0840, 0x444: 0x0840, 0x445: 0x0840, + 0x446: 0x0018, 0x447: 0x0018, 0x448: 0x0818, 0x449: 0x0018, 0x44a: 0x0018, 0x44b: 0x0818, + 0x44c: 0x0018, 0x44d: 0x0818, 0x44e: 0x0018, 0x44f: 0x0018, 0x450: 0x3308, 0x451: 0x3308, + 0x452: 0x3308, 0x453: 0x3308, 0x454: 0x3308, 0x455: 0x3308, 0x456: 0x3308, 0x457: 0x3308, + 0x458: 0x3308, 0x459: 0x3308, 0x45a: 0x3308, 0x45b: 0x0818, 0x45c: 0x0b40, 0x45d: 0x0040, + 0x45e: 0x0818, 0x45f: 0x0818, 0x460: 0x0a08, 0x461: 0x0808, 0x462: 0x0c08, 0x463: 0x0c08, + 0x464: 0x0c08, 0x465: 0x0c08, 0x466: 0x0a08, 0x467: 0x0c08, 0x468: 0x0a08, 0x469: 0x0c08, + 0x46a: 0x0a08, 0x46b: 0x0a08, 0x46c: 0x0a08, 0x46d: 0x0a08, 0x46e: 0x0a08, 0x46f: 0x0c08, + 0x470: 0x0c08, 0x471: 0x0c08, 0x472: 0x0c08, 0x473: 0x0a08, 0x474: 0x0a08, 0x475: 0x0a08, + 0x476: 0x0a08, 0x477: 0x0a08, 0x478: 0x0a08, 0x479: 0x0a08, 0x47a: 0x0a08, 0x47b: 0x0a08, + 0x47c: 0x0a08, 0x47d: 0x0a08, 0x47e: 0x0a08, 0x47f: 0x0a08, + // Block 0x12, offset 0x480 + 0x480: 0x0818, 0x481: 0x0a08, 0x482: 0x0a08, 0x483: 0x0a08, 0x484: 0x0a08, 0x485: 0x0a08, + 0x486: 0x0a08, 0x487: 0x0a08, 0x488: 0x0c08, 0x489: 0x0a08, 0x48a: 0x0a08, 0x48b: 0x3308, + 0x48c: 0x3308, 0x48d: 0x3308, 0x48e: 0x3308, 0x48f: 0x3308, 0x490: 0x3308, 0x491: 0x3308, + 0x492: 0x3308, 0x493: 0x3308, 0x494: 0x3308, 0x495: 0x3308, 0x496: 0x3308, 0x497: 0x3308, + 0x498: 0x3308, 0x499: 0x3308, 0x49a: 0x3308, 0x49b: 0x3308, 0x49c: 0x3308, 0x49d: 0x3308, + 0x49e: 0x3308, 0x49f: 0x3308, 0x4a0: 0x0808, 0x4a1: 0x0808, 0x4a2: 0x0808, 0x4a3: 0x0808, + 0x4a4: 0x0808, 0x4a5: 0x0808, 0x4a6: 0x0808, 0x4a7: 0x0808, 0x4a8: 0x0808, 0x4a9: 0x0808, + 0x4aa: 0x0018, 0x4ab: 0x0818, 0x4ac: 0x0818, 0x4ad: 0x0818, 0x4ae: 0x0a08, 0x4af: 0x0a08, + 0x4b0: 0x3308, 0x4b1: 0x0c08, 0x4b2: 0x0c08, 0x4b3: 0x0c08, 0x4b4: 0x0808, 0x4b5: 0x0429, + 0x4b6: 0x0451, 0x4b7: 0x0479, 0x4b8: 0x04a1, 0x4b9: 0x0a08, 0x4ba: 0x0a08, 0x4bb: 0x0a08, + 0x4bc: 0x0a08, 0x4bd: 0x0a08, 0x4be: 0x0a08, 0x4bf: 0x0a08, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x0c08, 0x4c1: 0x0a08, 0x4c2: 0x0a08, 0x4c3: 0x0c08, 0x4c4: 0x0c08, 0x4c5: 0x0c08, + 0x4c6: 0x0c08, 0x4c7: 0x0c08, 0x4c8: 0x0c08, 0x4c9: 0x0c08, 0x4ca: 0x0c08, 0x4cb: 0x0c08, + 0x4cc: 0x0a08, 0x4cd: 0x0c08, 0x4ce: 0x0a08, 0x4cf: 0x0c08, 0x4d0: 0x0a08, 0x4d1: 0x0a08, + 0x4d2: 0x0c08, 0x4d3: 0x0c08, 0x4d4: 0x0818, 0x4d5: 0x0c08, 0x4d6: 0x3308, 0x4d7: 0x3308, + 0x4d8: 0x3308, 0x4d9: 0x3308, 0x4da: 0x3308, 0x4db: 0x3308, 0x4dc: 0x3308, 0x4dd: 0x0840, + 0x4de: 0x0018, 0x4df: 0x3308, 0x4e0: 0x3308, 0x4e1: 0x3308, 0x4e2: 0x3308, 0x4e3: 0x3308, + 0x4e4: 0x3308, 0x4e5: 0x0808, 0x4e6: 0x0808, 0x4e7: 0x3308, 0x4e8: 0x3308, 0x4e9: 0x0018, + 0x4ea: 0x3308, 0x4eb: 0x3308, 0x4ec: 0x3308, 0x4ed: 0x3308, 0x4ee: 0x0c08, 0x4ef: 0x0c08, + 0x4f0: 0x0008, 0x4f1: 0x0008, 0x4f2: 0x0008, 0x4f3: 0x0008, 0x4f4: 0x0008, 0x4f5: 0x0008, + 0x4f6: 0x0008, 0x4f7: 0x0008, 0x4f8: 0x0008, 0x4f9: 0x0008, 0x4fa: 0x0a08, 0x4fb: 0x0a08, + 0x4fc: 0x0a08, 0x4fd: 0x0808, 0x4fe: 0x0808, 0x4ff: 0x0a08, + // Block 0x14, offset 0x500 + 0x500: 0x0818, 0x501: 0x0818, 0x502: 0x0818, 0x503: 0x0818, 0x504: 0x0818, 0x505: 0x0818, + 0x506: 0x0818, 0x507: 0x0818, 0x508: 0x0818, 0x509: 0x0818, 0x50a: 0x0818, 0x50b: 0x0818, + 0x50c: 0x0818, 0x50d: 0x0818, 0x50e: 0x0040, 0x50f: 0x0b40, 0x510: 0x0c08, 0x511: 0x3308, + 0x512: 0x0a08, 0x513: 0x0a08, 0x514: 0x0a08, 0x515: 0x0c08, 0x516: 0x0c08, 0x517: 0x0c08, + 0x518: 0x0c08, 0x519: 0x0c08, 0x51a: 0x0a08, 0x51b: 0x0a08, 0x51c: 0x0a08, 0x51d: 0x0a08, + 0x51e: 0x0c08, 0x51f: 0x0a08, 0x520: 0x0a08, 0x521: 0x0a08, 0x522: 0x0a08, 0x523: 0x0a08, + 0x524: 0x0a08, 0x525: 0x0a08, 0x526: 0x0a08, 0x527: 0x0a08, 0x528: 0x0c08, 0x529: 0x0a08, + 0x52a: 0x0c08, 0x52b: 0x0a08, 0x52c: 0x0c08, 0x52d: 0x0a08, 0x52e: 0x0a08, 0x52f: 0x0c08, + 0x530: 0x3308, 0x531: 0x3308, 0x532: 0x3308, 0x533: 0x3308, 0x534: 0x3308, 0x535: 0x3308, + 0x536: 0x3308, 0x537: 0x3308, 0x538: 0x3308, 0x539: 0x3308, 0x53a: 0x3308, 0x53b: 0x3308, + 0x53c: 0x3308, 0x53d: 0x3308, 0x53e: 0x3308, 0x53f: 0x3308, + // Block 0x15, offset 0x540 + 0x540: 0x0c08, 0x541: 0x0a08, 0x542: 0x0a08, 0x543: 0x0a08, 0x544: 0x0a08, 0x545: 0x0a08, + 0x546: 0x0c08, 0x547: 0x0c08, 0x548: 0x0a08, 0x549: 0x0c08, 0x54a: 0x0a08, 0x54b: 0x0a08, + 0x54c: 0x0a08, 0x54d: 0x0a08, 0x54e: 0x0a08, 0x54f: 0x0a08, 0x550: 0x0a08, 0x551: 0x0a08, + 0x552: 0x0a08, 0x553: 0x0a08, 0x554: 0x0c08, 0x555: 0x0a08, 0x556: 0x0808, 0x557: 0x0808, + 0x558: 0x0808, 0x559: 0x3308, 0x55a: 0x3308, 0x55b: 0x3308, 0x55c: 0x0040, 0x55d: 0x0040, + 0x55e: 0x0818, 0x55f: 0x0040, 0x560: 0x0a08, 0x561: 0x0808, 0x562: 0x0a08, 0x563: 0x0a08, + 0x564: 0x0a08, 0x565: 0x0a08, 0x566: 0x0808, 0x567: 0x0c08, 0x568: 0x0a08, 0x569: 0x0c08, + 0x56a: 0x0c08, 0x56b: 0x0040, 0x56c: 0x0040, 0x56d: 0x0040, 0x56e: 0x0040, 0x56f: 0x0040, + 0x570: 0x0040, 0x571: 0x0040, 0x572: 0x0040, 0x573: 0x0040, 0x574: 0x0040, 0x575: 0x0040, + 0x576: 0x0040, 0x577: 0x0040, 0x578: 0x0040, 0x579: 0x0040, 0x57a: 0x0040, 0x57b: 0x0040, + 0x57c: 0x0040, 0x57d: 0x0040, 0x57e: 0x0040, 0x57f: 0x0040, + // Block 0x16, offset 0x580 + 0x580: 0x3008, 0x581: 0x3308, 0x582: 0x3308, 0x583: 0x3308, 0x584: 0x3308, 0x585: 0x3308, + 0x586: 0x3308, 0x587: 0x3308, 0x588: 0x3308, 0x589: 0x3008, 0x58a: 0x3008, 0x58b: 0x3008, + 0x58c: 0x3008, 0x58d: 0x3b08, 0x58e: 0x3008, 0x58f: 0x3008, 0x590: 0x0008, 0x591: 0x3308, + 0x592: 0x3308, 0x593: 0x3308, 0x594: 0x3308, 0x595: 0x3308, 0x596: 0x3308, 0x597: 0x3308, + 0x598: 0x04c9, 0x599: 0x0501, 0x59a: 0x0539, 0x59b: 0x0571, 0x59c: 0x05a9, 0x59d: 0x05e1, + 0x59e: 0x0619, 0x59f: 0x0651, 0x5a0: 0x0008, 0x5a1: 0x0008, 0x5a2: 0x3308, 0x5a3: 0x3308, + 0x5a4: 0x0018, 0x5a5: 0x0018, 0x5a6: 0x0008, 0x5a7: 0x0008, 0x5a8: 0x0008, 0x5a9: 0x0008, + 0x5aa: 0x0008, 0x5ab: 0x0008, 0x5ac: 0x0008, 0x5ad: 0x0008, 0x5ae: 0x0008, 0x5af: 0x0008, + 0x5b0: 0x0018, 0x5b1: 0x0008, 0x5b2: 0x0008, 0x5b3: 0x0008, 0x5b4: 0x0008, 0x5b5: 0x0008, + 0x5b6: 0x0008, 0x5b7: 0x0008, 0x5b8: 0x0008, 0x5b9: 0x0008, 0x5ba: 0x0008, 0x5bb: 0x0008, + 0x5bc: 0x0008, 0x5bd: 0x0008, 0x5be: 0x0008, 0x5bf: 0x0008, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x0008, 0x5c1: 0x3308, 0x5c2: 0x3008, 0x5c3: 0x3008, 0x5c4: 0x0040, 0x5c5: 0x0008, + 0x5c6: 0x0008, 0x5c7: 0x0008, 0x5c8: 0x0008, 0x5c9: 0x0008, 0x5ca: 0x0008, 0x5cb: 0x0008, + 0x5cc: 0x0008, 0x5cd: 0x0040, 0x5ce: 0x0040, 0x5cf: 0x0008, 0x5d0: 0x0008, 0x5d1: 0x0040, + 0x5d2: 0x0040, 0x5d3: 0x0008, 0x5d4: 0x0008, 0x5d5: 0x0008, 0x5d6: 0x0008, 0x5d7: 0x0008, + 0x5d8: 0x0008, 0x5d9: 0x0008, 0x5da: 0x0008, 0x5db: 0x0008, 0x5dc: 0x0008, 0x5dd: 0x0008, + 0x5de: 0x0008, 0x5df: 0x0008, 0x5e0: 0x0008, 0x5e1: 0x0008, 0x5e2: 0x0008, 0x5e3: 0x0008, + 0x5e4: 0x0008, 0x5e5: 0x0008, 0x5e6: 0x0008, 0x5e7: 0x0008, 0x5e8: 0x0008, 0x5e9: 0x0040, + 0x5ea: 0x0008, 0x5eb: 0x0008, 0x5ec: 0x0008, 0x5ed: 0x0008, 0x5ee: 0x0008, 0x5ef: 0x0008, + 0x5f0: 0x0008, 0x5f1: 0x0040, 0x5f2: 0x0008, 0x5f3: 0x0040, 0x5f4: 0x0040, 0x5f5: 0x0040, + 0x5f6: 0x0008, 0x5f7: 0x0008, 0x5f8: 0x0008, 0x5f9: 0x0008, 0x5fa: 0x0040, 0x5fb: 0x0040, + 0x5fc: 0x3308, 0x5fd: 0x0008, 0x5fe: 0x3008, 0x5ff: 0x3008, + // Block 0x18, offset 0x600 + 0x600: 0x3008, 0x601: 0x3308, 0x602: 0x3308, 0x603: 0x3308, 0x604: 0x3308, 0x605: 0x0040, + 0x606: 0x0040, 0x607: 0x3008, 0x608: 0x3008, 0x609: 0x0040, 0x60a: 0x0040, 0x60b: 0x3008, + 0x60c: 0x3008, 0x60d: 0x3b08, 0x60e: 0x0008, 0x60f: 0x0040, 0x610: 0x0040, 0x611: 0x0040, + 0x612: 0x0040, 0x613: 0x0040, 0x614: 0x0040, 0x615: 0x0040, 0x616: 0x0040, 0x617: 0x3008, + 0x618: 0x0040, 0x619: 0x0040, 0x61a: 0x0040, 0x61b: 0x0040, 0x61c: 0x0689, 0x61d: 0x06c1, + 0x61e: 0x0040, 0x61f: 0x06f9, 0x620: 0x0008, 0x621: 0x0008, 0x622: 0x3308, 0x623: 0x3308, + 0x624: 0x0040, 0x625: 0x0040, 0x626: 0x0008, 0x627: 0x0008, 0x628: 0x0008, 0x629: 0x0008, + 0x62a: 0x0008, 0x62b: 0x0008, 0x62c: 0x0008, 0x62d: 0x0008, 0x62e: 0x0008, 0x62f: 0x0008, + 0x630: 0x0008, 0x631: 0x0008, 0x632: 0x0018, 0x633: 0x0018, 0x634: 0x0018, 0x635: 0x0018, + 0x636: 0x0018, 0x637: 0x0018, 0x638: 0x0018, 0x639: 0x0018, 0x63a: 0x0018, 0x63b: 0x0018, + 0x63c: 0x0008, 0x63d: 0x0018, 0x63e: 0x3308, 0x63f: 0x0040, + // Block 0x19, offset 0x640 + 0x640: 0x0040, 0x641: 0x3308, 0x642: 0x3308, 0x643: 0x3008, 0x644: 0x0040, 0x645: 0x0008, + 0x646: 0x0008, 0x647: 0x0008, 0x648: 0x0008, 0x649: 0x0008, 0x64a: 0x0008, 0x64b: 0x0040, + 0x64c: 0x0040, 0x64d: 0x0040, 0x64e: 0x0040, 0x64f: 0x0008, 0x650: 0x0008, 0x651: 0x0040, + 0x652: 0x0040, 0x653: 0x0008, 0x654: 0x0008, 0x655: 0x0008, 0x656: 0x0008, 0x657: 0x0008, + 0x658: 0x0008, 0x659: 0x0008, 0x65a: 0x0008, 0x65b: 0x0008, 0x65c: 0x0008, 0x65d: 0x0008, + 0x65e: 0x0008, 0x65f: 0x0008, 0x660: 0x0008, 0x661: 0x0008, 0x662: 0x0008, 0x663: 0x0008, + 0x664: 0x0008, 0x665: 0x0008, 0x666: 0x0008, 0x667: 0x0008, 0x668: 0x0008, 0x669: 0x0040, + 0x66a: 0x0008, 0x66b: 0x0008, 0x66c: 0x0008, 0x66d: 0x0008, 0x66e: 0x0008, 0x66f: 0x0008, + 0x670: 0x0008, 0x671: 0x0040, 0x672: 0x0008, 0x673: 0x0731, 0x674: 0x0040, 0x675: 0x0008, + 0x676: 0x0769, 0x677: 0x0040, 0x678: 0x0008, 0x679: 0x0008, 0x67a: 0x0040, 0x67b: 0x0040, + 0x67c: 0x3308, 0x67d: 0x0040, 0x67e: 0x3008, 0x67f: 0x3008, + // Block 0x1a, offset 0x680 + 0x680: 0x3008, 0x681: 0x3308, 0x682: 0x3308, 0x683: 0x0040, 0x684: 0x0040, 0x685: 0x0040, + 0x686: 0x0040, 0x687: 0x3308, 0x688: 0x3308, 0x689: 0x0040, 0x68a: 0x0040, 0x68b: 0x3308, + 0x68c: 0x3308, 0x68d: 0x3b08, 0x68e: 0x0040, 0x68f: 0x0040, 0x690: 0x0040, 0x691: 0x3308, + 0x692: 0x0040, 0x693: 0x0040, 0x694: 0x0040, 0x695: 0x0040, 0x696: 0x0040, 0x697: 0x0040, + 0x698: 0x0040, 0x699: 0x07a1, 0x69a: 0x07d9, 0x69b: 0x0811, 0x69c: 0x0008, 0x69d: 0x0040, + 0x69e: 0x0849, 0x69f: 0x0040, 0x6a0: 0x0040, 0x6a1: 0x0040, 0x6a2: 0x0040, 0x6a3: 0x0040, + 0x6a4: 0x0040, 0x6a5: 0x0040, 0x6a6: 0x0008, 0x6a7: 0x0008, 0x6a8: 0x0008, 0x6a9: 0x0008, + 0x6aa: 0x0008, 0x6ab: 0x0008, 0x6ac: 0x0008, 0x6ad: 0x0008, 0x6ae: 0x0008, 0x6af: 0x0008, + 0x6b0: 0x3308, 0x6b1: 0x3308, 0x6b2: 0x0008, 0x6b3: 0x0008, 0x6b4: 0x0008, 0x6b5: 0x3308, + 0x6b6: 0x0018, 0x6b7: 0x0040, 0x6b8: 0x0040, 0x6b9: 0x0040, 0x6ba: 0x0040, 0x6bb: 0x0040, + 0x6bc: 0x0040, 0x6bd: 0x0040, 0x6be: 0x0040, 0x6bf: 0x0040, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x0040, 0x6c1: 0x3308, 0x6c2: 0x3308, 0x6c3: 0x3008, 0x6c4: 0x0040, 0x6c5: 0x0008, + 0x6c6: 0x0008, 0x6c7: 0x0008, 0x6c8: 0x0008, 0x6c9: 0x0008, 0x6ca: 0x0008, 0x6cb: 0x0008, + 0x6cc: 0x0008, 0x6cd: 0x0008, 0x6ce: 0x0040, 0x6cf: 0x0008, 0x6d0: 0x0008, 0x6d1: 0x0008, + 0x6d2: 0x0040, 0x6d3: 0x0008, 0x6d4: 0x0008, 0x6d5: 0x0008, 0x6d6: 0x0008, 0x6d7: 0x0008, + 0x6d8: 0x0008, 0x6d9: 0x0008, 0x6da: 0x0008, 0x6db: 0x0008, 0x6dc: 0x0008, 0x6dd: 0x0008, + 0x6de: 0x0008, 0x6df: 0x0008, 0x6e0: 0x0008, 0x6e1: 0x0008, 0x6e2: 0x0008, 0x6e3: 0x0008, + 0x6e4: 0x0008, 0x6e5: 0x0008, 0x6e6: 0x0008, 0x6e7: 0x0008, 0x6e8: 0x0008, 0x6e9: 0x0040, + 0x6ea: 0x0008, 0x6eb: 0x0008, 0x6ec: 0x0008, 0x6ed: 0x0008, 0x6ee: 0x0008, 0x6ef: 0x0008, + 0x6f0: 0x0008, 0x6f1: 0x0040, 0x6f2: 0x0008, 0x6f3: 0x0008, 0x6f4: 0x0040, 0x6f5: 0x0008, + 0x6f6: 0x0008, 0x6f7: 0x0008, 0x6f8: 0x0008, 0x6f9: 0x0008, 0x6fa: 0x0040, 0x6fb: 0x0040, + 0x6fc: 0x3308, 0x6fd: 0x0008, 0x6fe: 0x3008, 0x6ff: 0x3008, + // Block 0x1c, offset 0x700 + 0x700: 0x3008, 0x701: 0x3308, 0x702: 0x3308, 0x703: 0x3308, 0x704: 0x3308, 0x705: 0x3308, + 0x706: 0x0040, 0x707: 0x3308, 0x708: 0x3308, 0x709: 0x3008, 0x70a: 0x0040, 0x70b: 0x3008, + 0x70c: 0x3008, 0x70d: 0x3b08, 0x70e: 0x0040, 0x70f: 0x0040, 0x710: 0x0008, 0x711: 0x0040, + 0x712: 0x0040, 0x713: 0x0040, 0x714: 0x0040, 0x715: 0x0040, 0x716: 0x0040, 0x717: 0x0040, + 0x718: 0x0040, 0x719: 0x0040, 0x71a: 0x0040, 0x71b: 0x0040, 0x71c: 0x0040, 0x71d: 0x0040, + 0x71e: 0x0040, 0x71f: 0x0040, 0x720: 0x0008, 0x721: 0x0008, 0x722: 0x3308, 0x723: 0x3308, + 0x724: 0x0040, 0x725: 0x0040, 0x726: 0x0008, 0x727: 0x0008, 0x728: 0x0008, 0x729: 0x0008, + 0x72a: 0x0008, 0x72b: 0x0008, 0x72c: 0x0008, 0x72d: 0x0008, 0x72e: 0x0008, 0x72f: 0x0008, + 0x730: 0x0018, 0x731: 0x0018, 0x732: 0x0040, 0x733: 0x0040, 0x734: 0x0040, 0x735: 0x0040, + 0x736: 0x0040, 0x737: 0x0040, 0x738: 0x0040, 0x739: 0x0008, 0x73a: 0x3308, 0x73b: 0x3308, + 0x73c: 0x3308, 0x73d: 0x3308, 0x73e: 0x3308, 0x73f: 0x3308, + // Block 0x1d, offset 0x740 + 0x740: 0x0040, 0x741: 0x3308, 0x742: 0x3008, 0x743: 0x3008, 0x744: 0x0040, 0x745: 0x0008, + 0x746: 0x0008, 0x747: 0x0008, 0x748: 0x0008, 0x749: 0x0008, 0x74a: 0x0008, 0x74b: 0x0008, + 0x74c: 0x0008, 0x74d: 0x0040, 0x74e: 0x0040, 0x74f: 0x0008, 0x750: 0x0008, 0x751: 0x0040, + 0x752: 0x0040, 0x753: 0x0008, 0x754: 0x0008, 0x755: 0x0008, 0x756: 0x0008, 0x757: 0x0008, + 0x758: 0x0008, 0x759: 0x0008, 0x75a: 0x0008, 0x75b: 0x0008, 0x75c: 0x0008, 0x75d: 0x0008, + 0x75e: 0x0008, 0x75f: 0x0008, 0x760: 0x0008, 0x761: 0x0008, 0x762: 0x0008, 0x763: 0x0008, + 0x764: 0x0008, 0x765: 0x0008, 0x766: 0x0008, 0x767: 0x0008, 0x768: 0x0008, 0x769: 0x0040, + 0x76a: 0x0008, 0x76b: 0x0008, 0x76c: 0x0008, 0x76d: 0x0008, 0x76e: 0x0008, 0x76f: 0x0008, + 0x770: 0x0008, 0x771: 0x0040, 0x772: 0x0008, 0x773: 0x0008, 0x774: 0x0040, 0x775: 0x0008, + 0x776: 0x0008, 0x777: 0x0008, 0x778: 0x0008, 0x779: 0x0008, 0x77a: 0x0040, 0x77b: 0x0040, + 0x77c: 0x3308, 0x77d: 0x0008, 0x77e: 0x3008, 0x77f: 0x3308, + // Block 0x1e, offset 0x780 + 0x780: 0x3008, 0x781: 0x3308, 0x782: 0x3308, 0x783: 0x3308, 0x784: 0x3308, 0x785: 0x0040, + 0x786: 0x0040, 0x787: 0x3008, 0x788: 0x3008, 0x789: 0x0040, 0x78a: 0x0040, 0x78b: 0x3008, + 0x78c: 0x3008, 0x78d: 0x3b08, 0x78e: 0x0040, 0x78f: 0x0040, 0x790: 0x0040, 0x791: 0x0040, + 0x792: 0x0040, 0x793: 0x0040, 0x794: 0x0040, 0x795: 0x0040, 0x796: 0x3308, 0x797: 0x3008, + 0x798: 0x0040, 0x799: 0x0040, 0x79a: 0x0040, 0x79b: 0x0040, 0x79c: 0x0881, 0x79d: 0x08b9, + 0x79e: 0x0040, 0x79f: 0x0008, 0x7a0: 0x0008, 0x7a1: 0x0008, 0x7a2: 0x3308, 0x7a3: 0x3308, + 0x7a4: 0x0040, 0x7a5: 0x0040, 0x7a6: 0x0008, 0x7a7: 0x0008, 0x7a8: 0x0008, 0x7a9: 0x0008, + 0x7aa: 0x0008, 0x7ab: 0x0008, 0x7ac: 0x0008, 0x7ad: 0x0008, 0x7ae: 0x0008, 0x7af: 0x0008, + 0x7b0: 0x0018, 0x7b1: 0x0008, 0x7b2: 0x0018, 0x7b3: 0x0018, 0x7b4: 0x0018, 0x7b5: 0x0018, + 0x7b6: 0x0018, 0x7b7: 0x0018, 0x7b8: 0x0040, 0x7b9: 0x0040, 0x7ba: 0x0040, 0x7bb: 0x0040, + 0x7bc: 0x0040, 0x7bd: 0x0040, 0x7be: 0x0040, 0x7bf: 0x0040, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x0040, 0x7c1: 0x0040, 0x7c2: 0x3308, 0x7c3: 0x0008, 0x7c4: 0x0040, 0x7c5: 0x0008, + 0x7c6: 0x0008, 0x7c7: 0x0008, 0x7c8: 0x0008, 0x7c9: 0x0008, 0x7ca: 0x0008, 0x7cb: 0x0040, + 0x7cc: 0x0040, 0x7cd: 0x0040, 0x7ce: 0x0008, 0x7cf: 0x0008, 0x7d0: 0x0008, 0x7d1: 0x0040, + 0x7d2: 0x0008, 0x7d3: 0x0008, 0x7d4: 0x0008, 0x7d5: 0x0008, 0x7d6: 0x0040, 0x7d7: 0x0040, + 0x7d8: 0x0040, 0x7d9: 0x0008, 0x7da: 0x0008, 0x7db: 0x0040, 0x7dc: 0x0008, 0x7dd: 0x0040, + 0x7de: 0x0008, 0x7df: 0x0008, 0x7e0: 0x0040, 0x7e1: 0x0040, 0x7e2: 0x0040, 0x7e3: 0x0008, + 0x7e4: 0x0008, 0x7e5: 0x0040, 0x7e6: 0x0040, 0x7e7: 0x0040, 0x7e8: 0x0008, 0x7e9: 0x0008, + 0x7ea: 0x0008, 0x7eb: 0x0040, 0x7ec: 0x0040, 0x7ed: 0x0040, 0x7ee: 0x0008, 0x7ef: 0x0008, + 0x7f0: 0x0008, 0x7f1: 0x0008, 0x7f2: 0x0008, 0x7f3: 0x0008, 0x7f4: 0x0008, 0x7f5: 0x0008, + 0x7f6: 0x0008, 0x7f7: 0x0008, 0x7f8: 0x0008, 0x7f9: 0x0008, 0x7fa: 0x0040, 0x7fb: 0x0040, + 0x7fc: 0x0040, 0x7fd: 0x0040, 0x7fe: 0x3008, 0x7ff: 0x3008, + // Block 0x20, offset 0x800 + 0x800: 0x3308, 0x801: 0x3008, 0x802: 0x3008, 0x803: 0x3008, 0x804: 0x3008, 0x805: 0x0040, + 0x806: 0x3308, 0x807: 0x3308, 0x808: 0x3308, 0x809: 0x0040, 0x80a: 0x3308, 0x80b: 0x3308, + 0x80c: 0x3308, 0x80d: 0x3b08, 0x80e: 0x0040, 0x80f: 0x0040, 0x810: 0x0040, 0x811: 0x0040, + 0x812: 0x0040, 0x813: 0x0040, 0x814: 0x0040, 0x815: 0x3308, 0x816: 0x3308, 0x817: 0x0040, + 0x818: 0x0008, 0x819: 0x0008, 0x81a: 0x0008, 0x81b: 0x0040, 0x81c: 0x0040, 0x81d: 0x0040, + 0x81e: 0x0040, 0x81f: 0x0040, 0x820: 0x0008, 0x821: 0x0008, 0x822: 0x3308, 0x823: 0x3308, + 0x824: 0x0040, 0x825: 0x0040, 0x826: 0x0008, 0x827: 0x0008, 0x828: 0x0008, 0x829: 0x0008, + 0x82a: 0x0008, 0x82b: 0x0008, 0x82c: 0x0008, 0x82d: 0x0008, 0x82e: 0x0008, 0x82f: 0x0008, + 0x830: 0x0040, 0x831: 0x0040, 0x832: 0x0040, 0x833: 0x0040, 0x834: 0x0040, 0x835: 0x0040, + 0x836: 0x0040, 0x837: 0x0040, 0x838: 0x0018, 0x839: 0x0018, 0x83a: 0x0018, 0x83b: 0x0018, + 0x83c: 0x0018, 0x83d: 0x0018, 0x83e: 0x0018, 0x83f: 0x0018, + // Block 0x21, offset 0x840 + 0x840: 0x0008, 0x841: 0x3308, 0x842: 0x3008, 0x843: 0x3008, 0x844: 0x0018, 0x845: 0x0008, + 0x846: 0x0008, 0x847: 0x0008, 0x848: 0x0008, 0x849: 0x0008, 0x84a: 0x0008, 0x84b: 0x0008, + 0x84c: 0x0008, 0x84d: 0x0040, 0x84e: 0x0008, 0x84f: 0x0008, 0x850: 0x0008, 0x851: 0x0040, + 0x852: 0x0008, 0x853: 0x0008, 0x854: 0x0008, 0x855: 0x0008, 0x856: 0x0008, 0x857: 0x0008, + 0x858: 0x0008, 0x859: 0x0008, 0x85a: 0x0008, 0x85b: 0x0008, 0x85c: 0x0008, 0x85d: 0x0008, + 0x85e: 0x0008, 0x85f: 0x0008, 0x860: 0x0008, 0x861: 0x0008, 0x862: 0x0008, 0x863: 0x0008, + 0x864: 0x0008, 0x865: 0x0008, 0x866: 0x0008, 0x867: 0x0008, 0x868: 0x0008, 0x869: 0x0040, + 0x86a: 0x0008, 0x86b: 0x0008, 0x86c: 0x0008, 0x86d: 0x0008, 0x86e: 0x0008, 0x86f: 0x0008, + 0x870: 0x0008, 0x871: 0x0008, 0x872: 0x0008, 0x873: 0x0008, 0x874: 0x0040, 0x875: 0x0008, + 0x876: 0x0008, 0x877: 0x0008, 0x878: 0x0008, 0x879: 0x0008, 0x87a: 0x0040, 0x87b: 0x0040, + 0x87c: 0x3308, 0x87d: 0x0008, 0x87e: 0x3008, 0x87f: 0x3308, + // Block 0x22, offset 0x880 + 0x880: 0x3008, 0x881: 0x3008, 0x882: 0x3008, 0x883: 0x3008, 0x884: 0x3008, 0x885: 0x0040, + 0x886: 0x3308, 0x887: 0x3008, 0x888: 0x3008, 0x889: 0x0040, 0x88a: 0x3008, 0x88b: 0x3008, + 0x88c: 0x3308, 0x88d: 0x3b08, 0x88e: 0x0040, 0x88f: 0x0040, 0x890: 0x0040, 0x891: 0x0040, + 0x892: 0x0040, 0x893: 0x0040, 0x894: 0x0040, 0x895: 0x3008, 0x896: 0x3008, 0x897: 0x0040, + 0x898: 0x0040, 0x899: 0x0040, 0x89a: 0x0040, 0x89b: 0x0040, 0x89c: 0x0040, 0x89d: 0x0040, + 0x89e: 0x0008, 0x89f: 0x0040, 0x8a0: 0x0008, 0x8a1: 0x0008, 0x8a2: 0x3308, 0x8a3: 0x3308, + 0x8a4: 0x0040, 0x8a5: 0x0040, 0x8a6: 0x0008, 0x8a7: 0x0008, 0x8a8: 0x0008, 0x8a9: 0x0008, + 0x8aa: 0x0008, 0x8ab: 0x0008, 0x8ac: 0x0008, 0x8ad: 0x0008, 0x8ae: 0x0008, 0x8af: 0x0008, + 0x8b0: 0x0040, 0x8b1: 0x0008, 0x8b2: 0x0008, 0x8b3: 0x0040, 0x8b4: 0x0040, 0x8b5: 0x0040, + 0x8b6: 0x0040, 0x8b7: 0x0040, 0x8b8: 0x0040, 0x8b9: 0x0040, 0x8ba: 0x0040, 0x8bb: 0x0040, + 0x8bc: 0x0040, 0x8bd: 0x0040, 0x8be: 0x0040, 0x8bf: 0x0040, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x3008, 0x8c1: 0x3308, 0x8c2: 0x3308, 0x8c3: 0x3308, 0x8c4: 0x3308, 0x8c5: 0x0040, + 0x8c6: 0x3008, 0x8c7: 0x3008, 0x8c8: 0x3008, 0x8c9: 0x0040, 0x8ca: 0x3008, 0x8cb: 0x3008, + 0x8cc: 0x3008, 0x8cd: 0x3b08, 0x8ce: 0x0008, 0x8cf: 0x0018, 0x8d0: 0x0040, 0x8d1: 0x0040, + 0x8d2: 0x0040, 0x8d3: 0x0040, 0x8d4: 0x0008, 0x8d5: 0x0008, 0x8d6: 0x0008, 0x8d7: 0x3008, + 0x8d8: 0x0018, 0x8d9: 0x0018, 0x8da: 0x0018, 0x8db: 0x0018, 0x8dc: 0x0018, 0x8dd: 0x0018, + 0x8de: 0x0018, 0x8df: 0x0008, 0x8e0: 0x0008, 0x8e1: 0x0008, 0x8e2: 0x3308, 0x8e3: 0x3308, + 0x8e4: 0x0040, 0x8e5: 0x0040, 0x8e6: 0x0008, 0x8e7: 0x0008, 0x8e8: 0x0008, 0x8e9: 0x0008, + 0x8ea: 0x0008, 0x8eb: 0x0008, 0x8ec: 0x0008, 0x8ed: 0x0008, 0x8ee: 0x0008, 0x8ef: 0x0008, + 0x8f0: 0x0018, 0x8f1: 0x0018, 0x8f2: 0x0018, 0x8f3: 0x0018, 0x8f4: 0x0018, 0x8f5: 0x0018, + 0x8f6: 0x0018, 0x8f7: 0x0018, 0x8f8: 0x0018, 0x8f9: 0x0018, 0x8fa: 0x0008, 0x8fb: 0x0008, + 0x8fc: 0x0008, 0x8fd: 0x0008, 0x8fe: 0x0008, 0x8ff: 0x0008, + // Block 0x24, offset 0x900 + 0x900: 0x0040, 0x901: 0x0008, 0x902: 0x0008, 0x903: 0x0040, 0x904: 0x0008, 0x905: 0x0040, + 0x906: 0x0040, 0x907: 0x0008, 0x908: 0x0008, 0x909: 0x0040, 0x90a: 0x0008, 0x90b: 0x0040, + 0x90c: 0x0040, 0x90d: 0x0008, 0x90e: 0x0040, 0x90f: 0x0040, 0x910: 0x0040, 0x911: 0x0040, + 0x912: 0x0040, 0x913: 0x0040, 0x914: 0x0008, 0x915: 0x0008, 0x916: 0x0008, 0x917: 0x0008, + 0x918: 0x0040, 0x919: 0x0008, 0x91a: 0x0008, 0x91b: 0x0008, 0x91c: 0x0008, 0x91d: 0x0008, + 0x91e: 0x0008, 0x91f: 0x0008, 0x920: 0x0040, 0x921: 0x0008, 0x922: 0x0008, 0x923: 0x0008, + 0x924: 0x0040, 0x925: 0x0008, 0x926: 0x0040, 0x927: 0x0008, 0x928: 0x0040, 0x929: 0x0040, + 0x92a: 0x0008, 0x92b: 0x0008, 0x92c: 0x0040, 0x92d: 0x0008, 0x92e: 0x0008, 0x92f: 0x0008, + 0x930: 0x0008, 0x931: 0x3308, 0x932: 0x0008, 0x933: 0x0929, 0x934: 0x3308, 0x935: 0x3308, + 0x936: 0x3308, 0x937: 0x3308, 0x938: 0x3308, 0x939: 0x3308, 0x93a: 0x0040, 0x93b: 0x3308, + 0x93c: 0x3308, 0x93d: 0x0008, 0x93e: 0x0040, 0x93f: 0x0040, + // Block 0x25, offset 0x940 + 0x940: 0x0008, 0x941: 0x0008, 0x942: 0x0008, 0x943: 0x09d1, 0x944: 0x0008, 0x945: 0x0008, + 0x946: 0x0008, 0x947: 0x0008, 0x948: 0x0040, 0x949: 0x0008, 0x94a: 0x0008, 0x94b: 0x0008, + 0x94c: 0x0008, 0x94d: 0x0a09, 0x94e: 0x0008, 0x94f: 0x0008, 0x950: 0x0008, 0x951: 0x0008, + 0x952: 0x0a41, 0x953: 0x0008, 0x954: 0x0008, 0x955: 0x0008, 0x956: 0x0008, 0x957: 0x0a79, + 0x958: 0x0008, 0x959: 0x0008, 0x95a: 0x0008, 0x95b: 0x0008, 0x95c: 0x0ab1, 0x95d: 0x0008, + 0x95e: 0x0008, 0x95f: 0x0008, 0x960: 0x0008, 0x961: 0x0008, 0x962: 0x0008, 0x963: 0x0008, + 0x964: 0x0008, 0x965: 0x0008, 0x966: 0x0008, 0x967: 0x0008, 0x968: 0x0008, 0x969: 0x0ae9, + 0x96a: 0x0008, 0x96b: 0x0008, 0x96c: 0x0008, 0x96d: 0x0040, 0x96e: 0x0040, 0x96f: 0x0040, + 0x970: 0x0040, 0x971: 0x3308, 0x972: 0x3308, 0x973: 0x0b21, 0x974: 0x3308, 0x975: 0x0b59, + 0x976: 0x0b91, 0x977: 0x0bc9, 0x978: 0x0c19, 0x979: 0x0c51, 0x97a: 0x3308, 0x97b: 0x3308, + 0x97c: 0x3308, 0x97d: 0x3308, 0x97e: 0x3308, 0x97f: 0x3008, + // Block 0x26, offset 0x980 + 0x980: 0x3308, 0x981: 0x0ca1, 0x982: 0x3308, 0x983: 0x3308, 0x984: 0x3b08, 0x985: 0x0018, + 0x986: 0x3308, 0x987: 0x3308, 0x988: 0x0008, 0x989: 0x0008, 0x98a: 0x0008, 0x98b: 0x0008, + 0x98c: 0x0008, 0x98d: 0x3308, 0x98e: 0x3308, 0x98f: 0x3308, 0x990: 0x3308, 0x991: 0x3308, + 0x992: 0x3308, 0x993: 0x0cd9, 0x994: 0x3308, 0x995: 0x3308, 0x996: 0x3308, 0x997: 0x3308, + 0x998: 0x0040, 0x999: 0x3308, 0x99a: 0x3308, 0x99b: 0x3308, 0x99c: 0x3308, 0x99d: 0x0d11, + 0x99e: 0x3308, 0x99f: 0x3308, 0x9a0: 0x3308, 0x9a1: 0x3308, 0x9a2: 0x0d49, 0x9a3: 0x3308, + 0x9a4: 0x3308, 0x9a5: 0x3308, 0x9a6: 0x3308, 0x9a7: 0x0d81, 0x9a8: 0x3308, 0x9a9: 0x3308, + 0x9aa: 0x3308, 0x9ab: 0x3308, 0x9ac: 0x0db9, 0x9ad: 0x3308, 0x9ae: 0x3308, 0x9af: 0x3308, + 0x9b0: 0x3308, 0x9b1: 0x3308, 0x9b2: 0x3308, 0x9b3: 0x3308, 0x9b4: 0x3308, 0x9b5: 0x3308, + 0x9b6: 0x3308, 0x9b7: 0x3308, 0x9b8: 0x3308, 0x9b9: 0x0df1, 0x9ba: 0x3308, 0x9bb: 0x3308, + 0x9bc: 0x3308, 0x9bd: 0x0040, 0x9be: 0x0018, 0x9bf: 0x0018, + // Block 0x27, offset 0x9c0 + 0x9c0: 0x0008, 0x9c1: 0x0008, 0x9c2: 0x0008, 0x9c3: 0x0008, 0x9c4: 0x0008, 0x9c5: 0x0008, + 0x9c6: 0x0008, 0x9c7: 0x0008, 0x9c8: 0x0008, 0x9c9: 0x0008, 0x9ca: 0x0008, 0x9cb: 0x0008, + 0x9cc: 0x0008, 0x9cd: 0x0008, 0x9ce: 0x0008, 0x9cf: 0x0008, 0x9d0: 0x0008, 0x9d1: 0x0008, + 0x9d2: 0x0008, 0x9d3: 0x0008, 0x9d4: 0x0008, 0x9d5: 0x0008, 0x9d6: 0x0008, 0x9d7: 0x0008, + 0x9d8: 0x0008, 0x9d9: 0x0008, 0x9da: 0x0008, 0x9db: 0x0008, 0x9dc: 0x0008, 0x9dd: 0x0008, + 0x9de: 0x0008, 0x9df: 0x0008, 0x9e0: 0x0008, 0x9e1: 0x0008, 0x9e2: 0x0008, 0x9e3: 0x0008, + 0x9e4: 0x0008, 0x9e5: 0x0008, 0x9e6: 0x0008, 0x9e7: 0x0008, 0x9e8: 0x0008, 0x9e9: 0x0008, + 0x9ea: 0x0008, 0x9eb: 0x0008, 0x9ec: 0x0039, 0x9ed: 0x0ed1, 0x9ee: 0x0ee9, 0x9ef: 0x0008, + 0x9f0: 0x0ef9, 0x9f1: 0x0f09, 0x9f2: 0x0f19, 0x9f3: 0x0f31, 0x9f4: 0x0249, 0x9f5: 0x0f41, + 0x9f6: 0x0259, 0x9f7: 0x0f51, 0x9f8: 0x0359, 0x9f9: 0x0f61, 0x9fa: 0x0f71, 0x9fb: 0x0008, + 0x9fc: 0x00d9, 0x9fd: 0x0f81, 0x9fe: 0x0f99, 0x9ff: 0x0269, + // Block 0x28, offset 0xa00 + 0xa00: 0x0fa9, 0xa01: 0x0fb9, 0xa02: 0x0279, 0xa03: 0x0039, 0xa04: 0x0fc9, 0xa05: 0x0fe1, + 0xa06: 0x059d, 0xa07: 0x0ee9, 0xa08: 0x0ef9, 0xa09: 0x0f09, 0xa0a: 0x0ff9, 0xa0b: 0x1011, + 0xa0c: 0x1029, 0xa0d: 0x0f31, 0xa0e: 0x0008, 0xa0f: 0x0f51, 0xa10: 0x0f61, 0xa11: 0x1041, + 0xa12: 0x00d9, 0xa13: 0x1059, 0xa14: 0x05b5, 0xa15: 0x05b5, 0xa16: 0x0f99, 0xa17: 0x0fa9, + 0xa18: 0x0fb9, 0xa19: 0x059d, 0xa1a: 0x1071, 0xa1b: 0x1089, 0xa1c: 0x05cd, 0xa1d: 0x1099, + 0xa1e: 0x10b1, 0xa1f: 0x10c9, 0xa20: 0x10e1, 0xa21: 0x10f9, 0xa22: 0x0f41, 0xa23: 0x0269, + 0xa24: 0x0fb9, 0xa25: 0x1089, 0xa26: 0x1099, 0xa27: 0x10b1, 0xa28: 0x1111, 0xa29: 0x10e1, + 0xa2a: 0x10f9, 0xa2b: 0x0008, 0xa2c: 0x0008, 0xa2d: 0x0008, 0xa2e: 0x0008, 0xa2f: 0x0008, + 0xa30: 0x0008, 0xa31: 0x0008, 0xa32: 0x0008, 0xa33: 0x0008, 0xa34: 0x0008, 0xa35: 0x0008, + 0xa36: 0x0008, 0xa37: 0x0008, 0xa38: 0x1129, 0xa39: 0x0008, 0xa3a: 0x0008, 0xa3b: 0x0008, + 0xa3c: 0x0008, 0xa3d: 0x0008, 0xa3e: 0x0008, 0xa3f: 0x0008, + // Block 0x29, offset 0xa40 + 0xa40: 0x0008, 0xa41: 0x0008, 0xa42: 0x0008, 0xa43: 0x0008, 0xa44: 0x0008, 0xa45: 0x0008, + 0xa46: 0x0008, 0xa47: 0x0008, 0xa48: 0x0008, 0xa49: 0x0008, 0xa4a: 0x0008, 0xa4b: 0x0008, + 0xa4c: 0x0008, 0xa4d: 0x0008, 0xa4e: 0x0008, 0xa4f: 0x0008, 0xa50: 0x0008, 0xa51: 0x0008, + 0xa52: 0x0008, 0xa53: 0x0008, 0xa54: 0x0008, 0xa55: 0x0008, 0xa56: 0x0008, 0xa57: 0x0008, + 0xa58: 0x0008, 0xa59: 0x0008, 0xa5a: 0x0008, 0xa5b: 0x1141, 0xa5c: 0x1159, 0xa5d: 0x1169, + 0xa5e: 0x1181, 0xa5f: 0x1029, 0xa60: 0x1199, 0xa61: 0x11a9, 0xa62: 0x11c1, 0xa63: 0x11d9, + 0xa64: 0x11f1, 0xa65: 0x1209, 0xa66: 0x1221, 0xa67: 0x05e5, 0xa68: 0x1239, 0xa69: 0x1251, + 0xa6a: 0xe17d, 0xa6b: 0x1269, 0xa6c: 0x1281, 0xa6d: 0x1299, 0xa6e: 0x12b1, 0xa6f: 0x12c9, + 0xa70: 0x12e1, 0xa71: 0x12f9, 0xa72: 0x1311, 0xa73: 0x1329, 0xa74: 0x1341, 0xa75: 0x1359, + 0xa76: 0x1371, 0xa77: 0x1389, 0xa78: 0x05fd, 0xa79: 0x13a1, 0xa7a: 0x13b9, 0xa7b: 0x13d1, + 0xa7c: 0x13e1, 0xa7d: 0x13f9, 0xa7e: 0x1411, 0xa7f: 0x1429, + // Block 0x2a, offset 0xa80 + 0xa80: 0xe00d, 0xa81: 0x0008, 0xa82: 0xe00d, 0xa83: 0x0008, 0xa84: 0xe00d, 0xa85: 0x0008, + 0xa86: 0xe00d, 0xa87: 0x0008, 0xa88: 0xe00d, 0xa89: 0x0008, 0xa8a: 0xe00d, 0xa8b: 0x0008, + 0xa8c: 0xe00d, 0xa8d: 0x0008, 0xa8e: 0xe00d, 0xa8f: 0x0008, 0xa90: 0xe00d, 0xa91: 0x0008, + 0xa92: 0xe00d, 0xa93: 0x0008, 0xa94: 0xe00d, 0xa95: 0x0008, 0xa96: 0xe00d, 0xa97: 0x0008, + 0xa98: 0xe00d, 0xa99: 0x0008, 0xa9a: 0xe00d, 0xa9b: 0x0008, 0xa9c: 0xe00d, 0xa9d: 0x0008, + 0xa9e: 0xe00d, 0xa9f: 0x0008, 0xaa0: 0xe00d, 0xaa1: 0x0008, 0xaa2: 0xe00d, 0xaa3: 0x0008, + 0xaa4: 0xe00d, 0xaa5: 0x0008, 0xaa6: 0xe00d, 0xaa7: 0x0008, 0xaa8: 0xe00d, 0xaa9: 0x0008, + 0xaaa: 0xe00d, 0xaab: 0x0008, 0xaac: 0xe00d, 0xaad: 0x0008, 0xaae: 0xe00d, 0xaaf: 0x0008, + 0xab0: 0xe00d, 0xab1: 0x0008, 0xab2: 0xe00d, 0xab3: 0x0008, 0xab4: 0xe00d, 0xab5: 0x0008, + 0xab6: 0xe00d, 0xab7: 0x0008, 0xab8: 0xe00d, 0xab9: 0x0008, 0xaba: 0xe00d, 0xabb: 0x0008, + 0xabc: 0xe00d, 0xabd: 0x0008, 0xabe: 0xe00d, 0xabf: 0x0008, + // Block 0x2b, offset 0xac0 + 0xac0: 0xe00d, 0xac1: 0x0008, 0xac2: 0xe00d, 0xac3: 0x0008, 0xac4: 0xe00d, 0xac5: 0x0008, + 0xac6: 0xe00d, 0xac7: 0x0008, 0xac8: 0xe00d, 0xac9: 0x0008, 0xaca: 0xe00d, 0xacb: 0x0008, + 0xacc: 0xe00d, 0xacd: 0x0008, 0xace: 0xe00d, 0xacf: 0x0008, 0xad0: 0xe00d, 0xad1: 0x0008, + 0xad2: 0xe00d, 0xad3: 0x0008, 0xad4: 0xe00d, 0xad5: 0x0008, 0xad6: 0x0008, 0xad7: 0x0008, + 0xad8: 0x0008, 0xad9: 0x0008, 0xada: 0x0615, 0xadb: 0x0635, 0xadc: 0x0008, 0xadd: 0x0008, + 0xade: 0x1441, 0xadf: 0x0008, 0xae0: 0xe00d, 0xae1: 0x0008, 0xae2: 0xe00d, 0xae3: 0x0008, + 0xae4: 0xe00d, 0xae5: 0x0008, 0xae6: 0xe00d, 0xae7: 0x0008, 0xae8: 0xe00d, 0xae9: 0x0008, + 0xaea: 0xe00d, 0xaeb: 0x0008, 0xaec: 0xe00d, 0xaed: 0x0008, 0xaee: 0xe00d, 0xaef: 0x0008, + 0xaf0: 0xe00d, 0xaf1: 0x0008, 0xaf2: 0xe00d, 0xaf3: 0x0008, 0xaf4: 0xe00d, 0xaf5: 0x0008, + 0xaf6: 0xe00d, 0xaf7: 0x0008, 0xaf8: 0xe00d, 0xaf9: 0x0008, 0xafa: 0xe00d, 0xafb: 0x0008, + 0xafc: 0xe00d, 0xafd: 0x0008, 0xafe: 0xe00d, 0xaff: 0x0008, + // Block 0x2c, offset 0xb00 + 0xb00: 0x0008, 0xb01: 0x0008, 0xb02: 0x0008, 0xb03: 0x0008, 0xb04: 0x0008, 0xb05: 0x0008, + 0xb06: 0x0040, 0xb07: 0x0040, 0xb08: 0xe045, 0xb09: 0xe045, 0xb0a: 0xe045, 0xb0b: 0xe045, + 0xb0c: 0xe045, 0xb0d: 0xe045, 0xb0e: 0x0040, 0xb0f: 0x0040, 0xb10: 0x0008, 0xb11: 0x0008, + 0xb12: 0x0008, 0xb13: 0x0008, 0xb14: 0x0008, 0xb15: 0x0008, 0xb16: 0x0008, 0xb17: 0x0008, + 0xb18: 0x0040, 0xb19: 0xe045, 0xb1a: 0x0040, 0xb1b: 0xe045, 0xb1c: 0x0040, 0xb1d: 0xe045, + 0xb1e: 0x0040, 0xb1f: 0xe045, 0xb20: 0x0008, 0xb21: 0x0008, 0xb22: 0x0008, 0xb23: 0x0008, + 0xb24: 0x0008, 0xb25: 0x0008, 0xb26: 0x0008, 0xb27: 0x0008, 0xb28: 0xe045, 0xb29: 0xe045, + 0xb2a: 0xe045, 0xb2b: 0xe045, 0xb2c: 0xe045, 0xb2d: 0xe045, 0xb2e: 0xe045, 0xb2f: 0xe045, + 0xb30: 0x0008, 0xb31: 0x1459, 0xb32: 0x0008, 0xb33: 0x1471, 0xb34: 0x0008, 0xb35: 0x1489, + 0xb36: 0x0008, 0xb37: 0x14a1, 0xb38: 0x0008, 0xb39: 0x14b9, 0xb3a: 0x0008, 0xb3b: 0x14d1, + 0xb3c: 0x0008, 0xb3d: 0x14e9, 0xb3e: 0x0040, 0xb3f: 0x0040, + // Block 0x2d, offset 0xb40 + 0xb40: 0x1501, 0xb41: 0x1531, 0xb42: 0x1561, 0xb43: 0x1591, 0xb44: 0x15c1, 0xb45: 0x15f1, + 0xb46: 0x1621, 0xb47: 0x1651, 0xb48: 0x1501, 0xb49: 0x1531, 0xb4a: 0x1561, 0xb4b: 0x1591, + 0xb4c: 0x15c1, 0xb4d: 0x15f1, 0xb4e: 0x1621, 0xb4f: 0x1651, 0xb50: 0x1681, 0xb51: 0x16b1, + 0xb52: 0x16e1, 0xb53: 0x1711, 0xb54: 0x1741, 0xb55: 0x1771, 0xb56: 0x17a1, 0xb57: 0x17d1, + 0xb58: 0x1681, 0xb59: 0x16b1, 0xb5a: 0x16e1, 0xb5b: 0x1711, 0xb5c: 0x1741, 0xb5d: 0x1771, + 0xb5e: 0x17a1, 0xb5f: 0x17d1, 0xb60: 0x1801, 0xb61: 0x1831, 0xb62: 0x1861, 0xb63: 0x1891, + 0xb64: 0x18c1, 0xb65: 0x18f1, 0xb66: 0x1921, 0xb67: 0x1951, 0xb68: 0x1801, 0xb69: 0x1831, + 0xb6a: 0x1861, 0xb6b: 0x1891, 0xb6c: 0x18c1, 0xb6d: 0x18f1, 0xb6e: 0x1921, 0xb6f: 0x1951, + 0xb70: 0x0008, 0xb71: 0x0008, 0xb72: 0x1981, 0xb73: 0x19b1, 0xb74: 0x19d9, 0xb75: 0x0040, + 0xb76: 0x0008, 0xb77: 0x1a01, 0xb78: 0xe045, 0xb79: 0xe045, 0xb7a: 0x064d, 0xb7b: 0x1459, + 0xb7c: 0x19b1, 0xb7d: 0x0666, 0xb7e: 0x1a31, 0xb7f: 0x0686, + // Block 0x2e, offset 0xb80 + 0xb80: 0x06a6, 0xb81: 0x1a4a, 0xb82: 0x1a79, 0xb83: 0x1aa9, 0xb84: 0x1ad1, 0xb85: 0x0040, + 0xb86: 0x0008, 0xb87: 0x1af9, 0xb88: 0x06c5, 0xb89: 0x1471, 0xb8a: 0x06dd, 0xb8b: 0x1489, + 0xb8c: 0x1aa9, 0xb8d: 0x1b2a, 0xb8e: 0x1b5a, 0xb8f: 0x1b8a, 0xb90: 0x0008, 0xb91: 0x0008, + 0xb92: 0x0008, 0xb93: 0x1bb9, 0xb94: 0x0040, 0xb95: 0x0040, 0xb96: 0x0008, 0xb97: 0x0008, + 0xb98: 0xe045, 0xb99: 0xe045, 0xb9a: 0x06f5, 0xb9b: 0x14a1, 0xb9c: 0x0040, 0xb9d: 0x1bd2, + 0xb9e: 0x1c02, 0xb9f: 0x1c32, 0xba0: 0x0008, 0xba1: 0x0008, 0xba2: 0x0008, 0xba3: 0x1c61, + 0xba4: 0x0008, 0xba5: 0x0008, 0xba6: 0x0008, 0xba7: 0x0008, 0xba8: 0xe045, 0xba9: 0xe045, + 0xbaa: 0x070d, 0xbab: 0x14d1, 0xbac: 0xe04d, 0xbad: 0x1c7a, 0xbae: 0x03d2, 0xbaf: 0x1caa, + 0xbb0: 0x0040, 0xbb1: 0x0040, 0xbb2: 0x1cb9, 0xbb3: 0x1ce9, 0xbb4: 0x1d11, 0xbb5: 0x0040, + 0xbb6: 0x0008, 0xbb7: 0x1d39, 0xbb8: 0x0725, 0xbb9: 0x14b9, 0xbba: 0x0515, 0xbbb: 0x14e9, + 0xbbc: 0x1ce9, 0xbbd: 0x073e, 0xbbe: 0x075e, 0xbbf: 0x0040, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x000a, 0xbc1: 0x000a, 0xbc2: 0x000a, 0xbc3: 0x000a, 0xbc4: 0x000a, 0xbc5: 0x000a, + 0xbc6: 0x000a, 0xbc7: 0x000a, 0xbc8: 0x000a, 0xbc9: 0x000a, 0xbca: 0x000a, 0xbcb: 0x03c0, + 0xbcc: 0x0003, 0xbcd: 0x0003, 0xbce: 0x0340, 0xbcf: 0x0b40, 0xbd0: 0x0018, 0xbd1: 0xe00d, + 0xbd2: 0x0018, 0xbd3: 0x0018, 0xbd4: 0x0018, 0xbd5: 0x0018, 0xbd6: 0x0018, 0xbd7: 0x077e, + 0xbd8: 0x0018, 0xbd9: 0x0018, 0xbda: 0x0018, 0xbdb: 0x0018, 0xbdc: 0x0018, 0xbdd: 0x0018, + 0xbde: 0x0018, 0xbdf: 0x0018, 0xbe0: 0x0018, 0xbe1: 0x0018, 0xbe2: 0x0018, 0xbe3: 0x0018, + 0xbe4: 0x0040, 0xbe5: 0x0040, 0xbe6: 0x0040, 0xbe7: 0x0018, 0xbe8: 0x0040, 0xbe9: 0x0040, + 0xbea: 0x0340, 0xbeb: 0x0340, 0xbec: 0x0340, 0xbed: 0x0340, 0xbee: 0x0340, 0xbef: 0x000a, + 0xbf0: 0x0018, 0xbf1: 0x0018, 0xbf2: 0x0018, 0xbf3: 0x1d69, 0xbf4: 0x1da1, 0xbf5: 0x0018, + 0xbf6: 0x1df1, 0xbf7: 0x1e29, 0xbf8: 0x0018, 0xbf9: 0x0018, 0xbfa: 0x0018, 0xbfb: 0x0018, + 0xbfc: 0x1e7a, 0xbfd: 0x0018, 0xbfe: 0x079e, 0xbff: 0x0018, + // Block 0x30, offset 0xc00 + 0xc00: 0x0018, 0xc01: 0x0018, 0xc02: 0x0018, 0xc03: 0x0018, 0xc04: 0x0018, 0xc05: 0x0018, + 0xc06: 0x0018, 0xc07: 0x1e92, 0xc08: 0x1eaa, 0xc09: 0x1ec2, 0xc0a: 0x0018, 0xc0b: 0x0018, + 0xc0c: 0x0018, 0xc0d: 0x0018, 0xc0e: 0x0018, 0xc0f: 0x0018, 0xc10: 0x0018, 0xc11: 0x0018, + 0xc12: 0x0018, 0xc13: 0x0018, 0xc14: 0x0018, 0xc15: 0x0018, 0xc16: 0x0018, 0xc17: 0x1ed9, + 0xc18: 0x0018, 0xc19: 0x0018, 0xc1a: 0x0018, 0xc1b: 0x0018, 0xc1c: 0x0018, 0xc1d: 0x0018, + 0xc1e: 0x0018, 0xc1f: 0x000a, 0xc20: 0x03c0, 0xc21: 0x0340, 0xc22: 0x0340, 0xc23: 0x0340, + 0xc24: 0x03c0, 0xc25: 0x0040, 0xc26: 0x0040, 0xc27: 0x0040, 0xc28: 0x0040, 0xc29: 0x0040, + 0xc2a: 0x0340, 0xc2b: 0x0340, 0xc2c: 0x0340, 0xc2d: 0x0340, 0xc2e: 0x0340, 0xc2f: 0x0340, + 0xc30: 0x1f41, 0xc31: 0x0f41, 0xc32: 0x0040, 0xc33: 0x0040, 0xc34: 0x1f51, 0xc35: 0x1f61, + 0xc36: 0x1f71, 0xc37: 0x1f81, 0xc38: 0x1f91, 0xc39: 0x1fa1, 0xc3a: 0x1fb2, 0xc3b: 0x07bd, + 0xc3c: 0x1fc2, 0xc3d: 0x1fd2, 0xc3e: 0x1fe2, 0xc3f: 0x0f71, + // Block 0x31, offset 0xc40 + 0xc40: 0x1f41, 0xc41: 0x00c9, 0xc42: 0x0069, 0xc43: 0x0079, 0xc44: 0x1f51, 0xc45: 0x1f61, + 0xc46: 0x1f71, 0xc47: 0x1f81, 0xc48: 0x1f91, 0xc49: 0x1fa1, 0xc4a: 0x1fb2, 0xc4b: 0x07d5, + 0xc4c: 0x1fc2, 0xc4d: 0x1fd2, 0xc4e: 0x1fe2, 0xc4f: 0x0040, 0xc50: 0x0039, 0xc51: 0x0f09, + 0xc52: 0x00d9, 0xc53: 0x0369, 0xc54: 0x0ff9, 0xc55: 0x0249, 0xc56: 0x0f51, 0xc57: 0x0359, + 0xc58: 0x0f61, 0xc59: 0x0f71, 0xc5a: 0x0f99, 0xc5b: 0x01d9, 0xc5c: 0x0fa9, 0xc5d: 0x0040, + 0xc5e: 0x0040, 0xc5f: 0x0040, 0xc60: 0x0018, 0xc61: 0x0018, 0xc62: 0x0018, 0xc63: 0x0018, + 0xc64: 0x0018, 0xc65: 0x0018, 0xc66: 0x0018, 0xc67: 0x0018, 0xc68: 0x1ff1, 0xc69: 0x0018, + 0xc6a: 0x0018, 0xc6b: 0x0018, 0xc6c: 0x0018, 0xc6d: 0x0018, 0xc6e: 0x0018, 0xc6f: 0x0018, + 0xc70: 0x0018, 0xc71: 0x0018, 0xc72: 0x0018, 0xc73: 0x0018, 0xc74: 0x0018, 0xc75: 0x0018, + 0xc76: 0x0018, 0xc77: 0x0018, 0xc78: 0x0018, 0xc79: 0x0018, 0xc7a: 0x0018, 0xc7b: 0x0018, + 0xc7c: 0x0018, 0xc7d: 0x0018, 0xc7e: 0x0018, 0xc7f: 0x0018, + // Block 0x32, offset 0xc80 + 0xc80: 0x07ee, 0xc81: 0x080e, 0xc82: 0x1159, 0xc83: 0x082d, 0xc84: 0x0018, 0xc85: 0x084e, + 0xc86: 0x086e, 0xc87: 0x1011, 0xc88: 0x0018, 0xc89: 0x088d, 0xc8a: 0x0f31, 0xc8b: 0x0249, + 0xc8c: 0x0249, 0xc8d: 0x0249, 0xc8e: 0x0249, 0xc8f: 0x2009, 0xc90: 0x0f41, 0xc91: 0x0f41, + 0xc92: 0x0359, 0xc93: 0x0359, 0xc94: 0x0018, 0xc95: 0x0f71, 0xc96: 0x2021, 0xc97: 0x0018, + 0xc98: 0x0018, 0xc99: 0x0f99, 0xc9a: 0x2039, 0xc9b: 0x0269, 0xc9c: 0x0269, 0xc9d: 0x0269, + 0xc9e: 0x0018, 0xc9f: 0x0018, 0xca0: 0x2049, 0xca1: 0x08ad, 0xca2: 0x2061, 0xca3: 0x0018, + 0xca4: 0x13d1, 0xca5: 0x0018, 0xca6: 0x2079, 0xca7: 0x0018, 0xca8: 0x13d1, 0xca9: 0x0018, + 0xcaa: 0x0f51, 0xcab: 0x2091, 0xcac: 0x0ee9, 0xcad: 0x1159, 0xcae: 0x0018, 0xcaf: 0x0f09, + 0xcb0: 0x0f09, 0xcb1: 0x1199, 0xcb2: 0x0040, 0xcb3: 0x0f61, 0xcb4: 0x00d9, 0xcb5: 0x20a9, + 0xcb6: 0x20c1, 0xcb7: 0x20d9, 0xcb8: 0x20f1, 0xcb9: 0x0f41, 0xcba: 0x0018, 0xcbb: 0x08cd, + 0xcbc: 0x2109, 0xcbd: 0x10b1, 0xcbe: 0x10b1, 0xcbf: 0x2109, + // Block 0x33, offset 0xcc0 + 0xcc0: 0x08ed, 0xcc1: 0x0018, 0xcc2: 0x0018, 0xcc3: 0x0018, 0xcc4: 0x0018, 0xcc5: 0x0ef9, + 0xcc6: 0x0ef9, 0xcc7: 0x0f09, 0xcc8: 0x0f41, 0xcc9: 0x0259, 0xcca: 0x0018, 0xccb: 0x0018, + 0xccc: 0x0018, 0xccd: 0x0018, 0xcce: 0x0008, 0xccf: 0x0018, 0xcd0: 0x2121, 0xcd1: 0x2151, + 0xcd2: 0x2181, 0xcd3: 0x21b9, 0xcd4: 0x21e9, 0xcd5: 0x2219, 0xcd6: 0x2249, 0xcd7: 0x2279, + 0xcd8: 0x22a9, 0xcd9: 0x22d9, 0xcda: 0x2309, 0xcdb: 0x2339, 0xcdc: 0x2369, 0xcdd: 0x2399, + 0xcde: 0x23c9, 0xcdf: 0x23f9, 0xce0: 0x0f41, 0xce1: 0x2421, 0xce2: 0x0905, 0xce3: 0x2439, + 0xce4: 0x1089, 0xce5: 0x2451, 0xce6: 0x0925, 0xce7: 0x2469, 0xce8: 0x2491, 0xce9: 0x0369, + 0xcea: 0x24a9, 0xceb: 0x0945, 0xcec: 0x0359, 0xced: 0x1159, 0xcee: 0x0ef9, 0xcef: 0x0f61, + 0xcf0: 0x0f41, 0xcf1: 0x2421, 0xcf2: 0x0965, 0xcf3: 0x2439, 0xcf4: 0x1089, 0xcf5: 0x2451, + 0xcf6: 0x0985, 0xcf7: 0x2469, 0xcf8: 0x2491, 0xcf9: 0x0369, 0xcfa: 0x24a9, 0xcfb: 0x09a5, + 0xcfc: 0x0359, 0xcfd: 0x1159, 0xcfe: 0x0ef9, 0xcff: 0x0f61, + // Block 0x34, offset 0xd00 + 0xd00: 0x0018, 0xd01: 0x0018, 0xd02: 0x0018, 0xd03: 0x0018, 0xd04: 0x0018, 0xd05: 0x0018, + 0xd06: 0x0018, 0xd07: 0x0018, 0xd08: 0x0018, 0xd09: 0x0018, 0xd0a: 0x0018, 0xd0b: 0x0040, + 0xd0c: 0x0040, 0xd0d: 0x0040, 0xd0e: 0x0040, 0xd0f: 0x0040, 0xd10: 0x0040, 0xd11: 0x0040, + 0xd12: 0x0040, 0xd13: 0x0040, 0xd14: 0x0040, 0xd15: 0x0040, 0xd16: 0x0040, 0xd17: 0x0040, + 0xd18: 0x0040, 0xd19: 0x0040, 0xd1a: 0x0040, 0xd1b: 0x0040, 0xd1c: 0x0040, 0xd1d: 0x0040, + 0xd1e: 0x0040, 0xd1f: 0x0040, 0xd20: 0x00c9, 0xd21: 0x0069, 0xd22: 0x0079, 0xd23: 0x1f51, + 0xd24: 0x1f61, 0xd25: 0x1f71, 0xd26: 0x1f81, 0xd27: 0x1f91, 0xd28: 0x1fa1, 0xd29: 0x2601, + 0xd2a: 0x2619, 0xd2b: 0x2631, 0xd2c: 0x2649, 0xd2d: 0x2661, 0xd2e: 0x2679, 0xd2f: 0x2691, + 0xd30: 0x26a9, 0xd31: 0x26c1, 0xd32: 0x26d9, 0xd33: 0x26f1, 0xd34: 0x0a06, 0xd35: 0x0a26, + 0xd36: 0x0a46, 0xd37: 0x0a66, 0xd38: 0x0a86, 0xd39: 0x0aa6, 0xd3a: 0x0ac6, 0xd3b: 0x0ae6, + 0xd3c: 0x0b06, 0xd3d: 0x270a, 0xd3e: 0x2732, 0xd3f: 0x275a, + // Block 0x35, offset 0xd40 + 0xd40: 0x2782, 0xd41: 0x27aa, 0xd42: 0x27d2, 0xd43: 0x27fa, 0xd44: 0x2822, 0xd45: 0x284a, + 0xd46: 0x2872, 0xd47: 0x289a, 0xd48: 0x0040, 0xd49: 0x0040, 0xd4a: 0x0040, 0xd4b: 0x0040, + 0xd4c: 0x0040, 0xd4d: 0x0040, 0xd4e: 0x0040, 0xd4f: 0x0040, 0xd50: 0x0040, 0xd51: 0x0040, + 0xd52: 0x0040, 0xd53: 0x0040, 0xd54: 0x0040, 0xd55: 0x0040, 0xd56: 0x0040, 0xd57: 0x0040, + 0xd58: 0x0040, 0xd59: 0x0040, 0xd5a: 0x0040, 0xd5b: 0x0040, 0xd5c: 0x0b26, 0xd5d: 0x0b46, + 0xd5e: 0x0b66, 0xd5f: 0x0b86, 0xd60: 0x0ba6, 0xd61: 0x0bc6, 0xd62: 0x0be6, 0xd63: 0x0c06, + 0xd64: 0x0c26, 0xd65: 0x0c46, 0xd66: 0x0c66, 0xd67: 0x0c86, 0xd68: 0x0ca6, 0xd69: 0x0cc6, + 0xd6a: 0x0ce6, 0xd6b: 0x0d06, 0xd6c: 0x0d26, 0xd6d: 0x0d46, 0xd6e: 0x0d66, 0xd6f: 0x0d86, + 0xd70: 0x0da6, 0xd71: 0x0dc6, 0xd72: 0x0de6, 0xd73: 0x0e06, 0xd74: 0x0e26, 0xd75: 0x0e46, + 0xd76: 0x0039, 0xd77: 0x0ee9, 0xd78: 0x1159, 0xd79: 0x0ef9, 0xd7a: 0x0f09, 0xd7b: 0x1199, + 0xd7c: 0x0f31, 0xd7d: 0x0249, 0xd7e: 0x0f41, 0xd7f: 0x0259, + // Block 0x36, offset 0xd80 + 0xd80: 0x0f51, 0xd81: 0x0359, 0xd82: 0x0f61, 0xd83: 0x0f71, 0xd84: 0x00d9, 0xd85: 0x0f99, + 0xd86: 0x2039, 0xd87: 0x0269, 0xd88: 0x01d9, 0xd89: 0x0fa9, 0xd8a: 0x0fb9, 0xd8b: 0x1089, + 0xd8c: 0x0279, 0xd8d: 0x0369, 0xd8e: 0x0289, 0xd8f: 0x13d1, 0xd90: 0x0039, 0xd91: 0x0ee9, + 0xd92: 0x1159, 0xd93: 0x0ef9, 0xd94: 0x0f09, 0xd95: 0x1199, 0xd96: 0x0f31, 0xd97: 0x0249, + 0xd98: 0x0f41, 0xd99: 0x0259, 0xd9a: 0x0f51, 0xd9b: 0x0359, 0xd9c: 0x0f61, 0xd9d: 0x0f71, + 0xd9e: 0x00d9, 0xd9f: 0x0f99, 0xda0: 0x2039, 0xda1: 0x0269, 0xda2: 0x01d9, 0xda3: 0x0fa9, + 0xda4: 0x0fb9, 0xda5: 0x1089, 0xda6: 0x0279, 0xda7: 0x0369, 0xda8: 0x0289, 0xda9: 0x13d1, + 0xdaa: 0x1f41, 0xdab: 0x0018, 0xdac: 0x0018, 0xdad: 0x0018, 0xdae: 0x0018, 0xdaf: 0x0018, + 0xdb0: 0x0018, 0xdb1: 0x0018, 0xdb2: 0x0018, 0xdb3: 0x0018, 0xdb4: 0x0018, 0xdb5: 0x0018, + 0xdb6: 0x0018, 0xdb7: 0x0018, 0xdb8: 0x0018, 0xdb9: 0x0018, 0xdba: 0x0018, 0xdbb: 0x0018, + 0xdbc: 0x0018, 0xdbd: 0x0018, 0xdbe: 0x0018, 0xdbf: 0x0018, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x0008, 0xdc1: 0x0008, 0xdc2: 0x0008, 0xdc3: 0x0008, 0xdc4: 0x0008, 0xdc5: 0x0008, + 0xdc6: 0x0008, 0xdc7: 0x0008, 0xdc8: 0x0008, 0xdc9: 0x0008, 0xdca: 0x0008, 0xdcb: 0x0008, + 0xdcc: 0x0008, 0xdcd: 0x0008, 0xdce: 0x0008, 0xdcf: 0x0008, 0xdd0: 0x0008, 0xdd1: 0x0008, + 0xdd2: 0x0008, 0xdd3: 0x0008, 0xdd4: 0x0008, 0xdd5: 0x0008, 0xdd6: 0x0008, 0xdd7: 0x0008, + 0xdd8: 0x0008, 0xdd9: 0x0008, 0xdda: 0x0008, 0xddb: 0x0008, 0xddc: 0x0008, 0xddd: 0x0008, + 0xdde: 0x0008, 0xddf: 0x0040, 0xde0: 0xe00d, 0xde1: 0x0008, 0xde2: 0x2971, 0xde3: 0x0ebd, + 0xde4: 0x2989, 0xde5: 0x0008, 0xde6: 0x0008, 0xde7: 0xe07d, 0xde8: 0x0008, 0xde9: 0xe01d, + 0xdea: 0x0008, 0xdeb: 0xe03d, 0xdec: 0x0008, 0xded: 0x0fe1, 0xdee: 0x1281, 0xdef: 0x0fc9, + 0xdf0: 0x1141, 0xdf1: 0x0008, 0xdf2: 0xe00d, 0xdf3: 0x0008, 0xdf4: 0x0008, 0xdf5: 0xe01d, + 0xdf6: 0x0008, 0xdf7: 0x0008, 0xdf8: 0x0008, 0xdf9: 0x0008, 0xdfa: 0x0008, 0xdfb: 0x0008, + 0xdfc: 0x0259, 0xdfd: 0x1089, 0xdfe: 0x29a1, 0xdff: 0x29b9, + // Block 0x38, offset 0xe00 + 0xe00: 0xe00d, 0xe01: 0x0008, 0xe02: 0xe00d, 0xe03: 0x0008, 0xe04: 0xe00d, 0xe05: 0x0008, + 0xe06: 0xe00d, 0xe07: 0x0008, 0xe08: 0xe00d, 0xe09: 0x0008, 0xe0a: 0xe00d, 0xe0b: 0x0008, + 0xe0c: 0xe00d, 0xe0d: 0x0008, 0xe0e: 0xe00d, 0xe0f: 0x0008, 0xe10: 0xe00d, 0xe11: 0x0008, + 0xe12: 0xe00d, 0xe13: 0x0008, 0xe14: 0xe00d, 0xe15: 0x0008, 0xe16: 0xe00d, 0xe17: 0x0008, + 0xe18: 0xe00d, 0xe19: 0x0008, 0xe1a: 0xe00d, 0xe1b: 0x0008, 0xe1c: 0xe00d, 0xe1d: 0x0008, + 0xe1e: 0xe00d, 0xe1f: 0x0008, 0xe20: 0xe00d, 0xe21: 0x0008, 0xe22: 0xe00d, 0xe23: 0x0008, + 0xe24: 0x0008, 0xe25: 0x0018, 0xe26: 0x0018, 0xe27: 0x0018, 0xe28: 0x0018, 0xe29: 0x0018, + 0xe2a: 0x0018, 0xe2b: 0xe03d, 0xe2c: 0x0008, 0xe2d: 0xe01d, 0xe2e: 0x0008, 0xe2f: 0x3308, + 0xe30: 0x3308, 0xe31: 0x3308, 0xe32: 0xe00d, 0xe33: 0x0008, 0xe34: 0x0040, 0xe35: 0x0040, + 0xe36: 0x0040, 0xe37: 0x0040, 0xe38: 0x0040, 0xe39: 0x0018, 0xe3a: 0x0018, 0xe3b: 0x0018, + 0xe3c: 0x0018, 0xe3d: 0x0018, 0xe3e: 0x0018, 0xe3f: 0x0018, + // Block 0x39, offset 0xe40 + 0xe40: 0x26fd, 0xe41: 0x271d, 0xe42: 0x273d, 0xe43: 0x275d, 0xe44: 0x277d, 0xe45: 0x279d, + 0xe46: 0x27bd, 0xe47: 0x27dd, 0xe48: 0x27fd, 0xe49: 0x281d, 0xe4a: 0x283d, 0xe4b: 0x285d, + 0xe4c: 0x287d, 0xe4d: 0x289d, 0xe4e: 0x28bd, 0xe4f: 0x28dd, 0xe50: 0x28fd, 0xe51: 0x291d, + 0xe52: 0x293d, 0xe53: 0x295d, 0xe54: 0x297d, 0xe55: 0x299d, 0xe56: 0x0040, 0xe57: 0x0040, + 0xe58: 0x0040, 0xe59: 0x0040, 0xe5a: 0x0040, 0xe5b: 0x0040, 0xe5c: 0x0040, 0xe5d: 0x0040, + 0xe5e: 0x0040, 0xe5f: 0x0040, 0xe60: 0x0040, 0xe61: 0x0040, 0xe62: 0x0040, 0xe63: 0x0040, + 0xe64: 0x0040, 0xe65: 0x0040, 0xe66: 0x0040, 0xe67: 0x0040, 0xe68: 0x0040, 0xe69: 0x0040, + 0xe6a: 0x0040, 0xe6b: 0x0040, 0xe6c: 0x0040, 0xe6d: 0x0040, 0xe6e: 0x0040, 0xe6f: 0x0040, + 0xe70: 0x0040, 0xe71: 0x0040, 0xe72: 0x0040, 0xe73: 0x0040, 0xe74: 0x0040, 0xe75: 0x0040, + 0xe76: 0x0040, 0xe77: 0x0040, 0xe78: 0x0040, 0xe79: 0x0040, 0xe7a: 0x0040, 0xe7b: 0x0040, + 0xe7c: 0x0040, 0xe7d: 0x0040, 0xe7e: 0x0040, 0xe7f: 0x0040, + // Block 0x3a, offset 0xe80 + 0xe80: 0x000a, 0xe81: 0x0018, 0xe82: 0x29d1, 0xe83: 0x0018, 0xe84: 0x0018, 0xe85: 0x0008, + 0xe86: 0x0008, 0xe87: 0x0008, 0xe88: 0x0018, 0xe89: 0x0018, 0xe8a: 0x0018, 0xe8b: 0x0018, + 0xe8c: 0x0018, 0xe8d: 0x0018, 0xe8e: 0x0018, 0xe8f: 0x0018, 0xe90: 0x0018, 0xe91: 0x0018, + 0xe92: 0x0018, 0xe93: 0x0018, 0xe94: 0x0018, 0xe95: 0x0018, 0xe96: 0x0018, 0xe97: 0x0018, + 0xe98: 0x0018, 0xe99: 0x0018, 0xe9a: 0x0018, 0xe9b: 0x0018, 0xe9c: 0x0018, 0xe9d: 0x0018, + 0xe9e: 0x0018, 0xe9f: 0x0018, 0xea0: 0x0018, 0xea1: 0x0018, 0xea2: 0x0018, 0xea3: 0x0018, + 0xea4: 0x0018, 0xea5: 0x0018, 0xea6: 0x0018, 0xea7: 0x0018, 0xea8: 0x0018, 0xea9: 0x0018, + 0xeaa: 0x3308, 0xeab: 0x3308, 0xeac: 0x3308, 0xead: 0x3308, 0xeae: 0x3018, 0xeaf: 0x3018, + 0xeb0: 0x0018, 0xeb1: 0x0018, 0xeb2: 0x0018, 0xeb3: 0x0018, 0xeb4: 0x0018, 0xeb5: 0x0018, + 0xeb6: 0xe125, 0xeb7: 0x0018, 0xeb8: 0x29bd, 0xeb9: 0x29dd, 0xeba: 0x29fd, 0xebb: 0x0018, + 0xebc: 0x0008, 0xebd: 0x0018, 0xebe: 0x0018, 0xebf: 0x0018, + // Block 0x3b, offset 0xec0 + 0xec0: 0x2b3d, 0xec1: 0x2b5d, 0xec2: 0x2b7d, 0xec3: 0x2b9d, 0xec4: 0x2bbd, 0xec5: 0x2bdd, + 0xec6: 0x2bdd, 0xec7: 0x2bdd, 0xec8: 0x2bfd, 0xec9: 0x2bfd, 0xeca: 0x2bfd, 0xecb: 0x2bfd, + 0xecc: 0x2c1d, 0xecd: 0x2c1d, 0xece: 0x2c1d, 0xecf: 0x2c3d, 0xed0: 0x2c5d, 0xed1: 0x2c5d, + 0xed2: 0x2a7d, 0xed3: 0x2a7d, 0xed4: 0x2c5d, 0xed5: 0x2c5d, 0xed6: 0x2c7d, 0xed7: 0x2c7d, + 0xed8: 0x2c5d, 0xed9: 0x2c5d, 0xeda: 0x2a7d, 0xedb: 0x2a7d, 0xedc: 0x2c5d, 0xedd: 0x2c5d, + 0xede: 0x2c3d, 0xedf: 0x2c3d, 0xee0: 0x2c9d, 0xee1: 0x2c9d, 0xee2: 0x2cbd, 0xee3: 0x2cbd, + 0xee4: 0x0040, 0xee5: 0x2cdd, 0xee6: 0x2cfd, 0xee7: 0x2d1d, 0xee8: 0x2d1d, 0xee9: 0x2d3d, + 0xeea: 0x2d5d, 0xeeb: 0x2d7d, 0xeec: 0x2d9d, 0xeed: 0x2dbd, 0xeee: 0x2ddd, 0xeef: 0x2dfd, + 0xef0: 0x2e1d, 0xef1: 0x2e3d, 0xef2: 0x2e3d, 0xef3: 0x2e5d, 0xef4: 0x2e7d, 0xef5: 0x2e7d, + 0xef6: 0x2e9d, 0xef7: 0x2ebd, 0xef8: 0x2e5d, 0xef9: 0x2edd, 0xefa: 0x2efd, 0xefb: 0x2edd, + 0xefc: 0x2e5d, 0xefd: 0x2f1d, 0xefe: 0x2f3d, 0xeff: 0x2f5d, + // Block 0x3c, offset 0xf00 + 0xf00: 0x2f7d, 0xf01: 0x2f9d, 0xf02: 0x2cfd, 0xf03: 0x2cdd, 0xf04: 0x2fbd, 0xf05: 0x2fdd, + 0xf06: 0x2ffd, 0xf07: 0x301d, 0xf08: 0x303d, 0xf09: 0x305d, 0xf0a: 0x307d, 0xf0b: 0x309d, + 0xf0c: 0x30bd, 0xf0d: 0x30dd, 0xf0e: 0x30fd, 0xf0f: 0x0040, 0xf10: 0x0018, 0xf11: 0x0018, + 0xf12: 0x311d, 0xf13: 0x313d, 0xf14: 0x315d, 0xf15: 0x317d, 0xf16: 0x319d, 0xf17: 0x31bd, + 0xf18: 0x31dd, 0xf19: 0x31fd, 0xf1a: 0x321d, 0xf1b: 0x323d, 0xf1c: 0x315d, 0xf1d: 0x325d, + 0xf1e: 0x327d, 0xf1f: 0x329d, 0xf20: 0x0008, 0xf21: 0x0008, 0xf22: 0x0008, 0xf23: 0x0008, + 0xf24: 0x0008, 0xf25: 0x0008, 0xf26: 0x0008, 0xf27: 0x0008, 0xf28: 0x0008, 0xf29: 0x0008, + 0xf2a: 0x0008, 0xf2b: 0x0008, 0xf2c: 0x0008, 0xf2d: 0x0008, 0xf2e: 0x0008, 0xf2f: 0x0008, + 0xf30: 0x0008, 0xf31: 0x0008, 0xf32: 0x0008, 0xf33: 0x0008, 0xf34: 0x0008, 0xf35: 0x0008, + 0xf36: 0x0008, 0xf37: 0x0008, 0xf38: 0x0008, 0xf39: 0x0008, 0xf3a: 0x0008, 0xf3b: 0x0040, + 0xf3c: 0x0040, 0xf3d: 0x0040, 0xf3e: 0x0040, 0xf3f: 0x0040, + // Block 0x3d, offset 0xf40 + 0xf40: 0x36a2, 0xf41: 0x36d2, 0xf42: 0x3702, 0xf43: 0x3732, 0xf44: 0x32bd, 0xf45: 0x32dd, + 0xf46: 0x32fd, 0xf47: 0x331d, 0xf48: 0x0018, 0xf49: 0x0018, 0xf4a: 0x0018, 0xf4b: 0x0018, + 0xf4c: 0x0018, 0xf4d: 0x0018, 0xf4e: 0x0018, 0xf4f: 0x0018, 0xf50: 0x333d, 0xf51: 0x3761, + 0xf52: 0x3779, 0xf53: 0x3791, 0xf54: 0x37a9, 0xf55: 0x37c1, 0xf56: 0x37d9, 0xf57: 0x37f1, + 0xf58: 0x3809, 0xf59: 0x3821, 0xf5a: 0x3839, 0xf5b: 0x3851, 0xf5c: 0x3869, 0xf5d: 0x3881, + 0xf5e: 0x3899, 0xf5f: 0x38b1, 0xf60: 0x335d, 0xf61: 0x337d, 0xf62: 0x339d, 0xf63: 0x33bd, + 0xf64: 0x33dd, 0xf65: 0x33dd, 0xf66: 0x33fd, 0xf67: 0x341d, 0xf68: 0x343d, 0xf69: 0x345d, + 0xf6a: 0x347d, 0xf6b: 0x349d, 0xf6c: 0x34bd, 0xf6d: 0x34dd, 0xf6e: 0x34fd, 0xf6f: 0x351d, + 0xf70: 0x353d, 0xf71: 0x355d, 0xf72: 0x357d, 0xf73: 0x359d, 0xf74: 0x35bd, 0xf75: 0x35dd, + 0xf76: 0x35fd, 0xf77: 0x361d, 0xf78: 0x363d, 0xf79: 0x365d, 0xf7a: 0x367d, 0xf7b: 0x369d, + 0xf7c: 0x38c9, 0xf7d: 0x3901, 0xf7e: 0x36bd, 0xf7f: 0x0018, + // Block 0x3e, offset 0xf80 + 0xf80: 0x36dd, 0xf81: 0x36fd, 0xf82: 0x371d, 0xf83: 0x373d, 0xf84: 0x375d, 0xf85: 0x377d, + 0xf86: 0x379d, 0xf87: 0x37bd, 0xf88: 0x37dd, 0xf89: 0x37fd, 0xf8a: 0x381d, 0xf8b: 0x383d, + 0xf8c: 0x385d, 0xf8d: 0x387d, 0xf8e: 0x389d, 0xf8f: 0x38bd, 0xf90: 0x38dd, 0xf91: 0x38fd, + 0xf92: 0x391d, 0xf93: 0x393d, 0xf94: 0x395d, 0xf95: 0x397d, 0xf96: 0x399d, 0xf97: 0x39bd, + 0xf98: 0x39dd, 0xf99: 0x39fd, 0xf9a: 0x3a1d, 0xf9b: 0x3a3d, 0xf9c: 0x3a5d, 0xf9d: 0x3a7d, + 0xf9e: 0x3a9d, 0xf9f: 0x3abd, 0xfa0: 0x3add, 0xfa1: 0x3afd, 0xfa2: 0x3b1d, 0xfa3: 0x3b3d, + 0xfa4: 0x3b5d, 0xfa5: 0x3b7d, 0xfa6: 0x127d, 0xfa7: 0x3b9d, 0xfa8: 0x3bbd, 0xfa9: 0x3bdd, + 0xfaa: 0x3bfd, 0xfab: 0x3c1d, 0xfac: 0x3c3d, 0xfad: 0x3c5d, 0xfae: 0x239d, 0xfaf: 0x3c7d, + 0xfb0: 0x3c9d, 0xfb1: 0x3939, 0xfb2: 0x3951, 0xfb3: 0x3969, 0xfb4: 0x3981, 0xfb5: 0x3999, + 0xfb6: 0x39b1, 0xfb7: 0x39c9, 0xfb8: 0x39e1, 0xfb9: 0x39f9, 0xfba: 0x3a11, 0xfbb: 0x3a29, + 0xfbc: 0x3a41, 0xfbd: 0x3a59, 0xfbe: 0x3a71, 0xfbf: 0x3a89, + // Block 0x3f, offset 0xfc0 + 0xfc0: 0x3aa1, 0xfc1: 0x3ac9, 0xfc2: 0x3af1, 0xfc3: 0x3b19, 0xfc4: 0x3b41, 0xfc5: 0x3b69, + 0xfc6: 0x3b91, 0xfc7: 0x3bb9, 0xfc8: 0x3be1, 0xfc9: 0x3c09, 0xfca: 0x3c39, 0xfcb: 0x3c69, + 0xfcc: 0x3c99, 0xfcd: 0x3cbd, 0xfce: 0x3cb1, 0xfcf: 0x3cdd, 0xfd0: 0x3cfd, 0xfd1: 0x3d15, + 0xfd2: 0x3d2d, 0xfd3: 0x3d45, 0xfd4: 0x3d5d, 0xfd5: 0x3d5d, 0xfd6: 0x3d45, 0xfd7: 0x3d75, + 0xfd8: 0x07bd, 0xfd9: 0x3d8d, 0xfda: 0x3da5, 0xfdb: 0x3dbd, 0xfdc: 0x3dd5, 0xfdd: 0x3ded, + 0xfde: 0x3e05, 0xfdf: 0x3e1d, 0xfe0: 0x3e35, 0xfe1: 0x3e4d, 0xfe2: 0x3e65, 0xfe3: 0x3e7d, + 0xfe4: 0x3e95, 0xfe5: 0x3e95, 0xfe6: 0x3ead, 0xfe7: 0x3ead, 0xfe8: 0x3ec5, 0xfe9: 0x3ec5, + 0xfea: 0x3edd, 0xfeb: 0x3ef5, 0xfec: 0x3f0d, 0xfed: 0x3f25, 0xfee: 0x3f3d, 0xfef: 0x3f3d, + 0xff0: 0x3f55, 0xff1: 0x3f55, 0xff2: 0x3f55, 0xff3: 0x3f6d, 0xff4: 0x3f85, 0xff5: 0x3f9d, + 0xff6: 0x3fb5, 0xff7: 0x3f9d, 0xff8: 0x3fcd, 0xff9: 0x3fe5, 0xffa: 0x3f6d, 0xffb: 0x3ffd, + 0xffc: 0x4015, 0xffd: 0x4015, 0xffe: 0x4015, 0xfff: 0x0040, + // Block 0x40, offset 0x1000 + 0x1000: 0x3cc9, 0x1001: 0x3d31, 0x1002: 0x3d99, 0x1003: 0x3e01, 0x1004: 0x3e51, 0x1005: 0x3eb9, + 0x1006: 0x3f09, 0x1007: 0x3f59, 0x1008: 0x3fd9, 0x1009: 0x4041, 0x100a: 0x4091, 0x100b: 0x40e1, + 0x100c: 0x4131, 0x100d: 0x4199, 0x100e: 0x4201, 0x100f: 0x4251, 0x1010: 0x42a1, 0x1011: 0x42d9, + 0x1012: 0x4329, 0x1013: 0x4391, 0x1014: 0x43f9, 0x1015: 0x4431, 0x1016: 0x44b1, 0x1017: 0x4549, + 0x1018: 0x45c9, 0x1019: 0x4619, 0x101a: 0x4699, 0x101b: 0x4719, 0x101c: 0x4781, 0x101d: 0x47d1, + 0x101e: 0x4821, 0x101f: 0x4871, 0x1020: 0x48d9, 0x1021: 0x4959, 0x1022: 0x49c1, 0x1023: 0x4a11, + 0x1024: 0x4a61, 0x1025: 0x4ab1, 0x1026: 0x4ae9, 0x1027: 0x4b21, 0x1028: 0x4b59, 0x1029: 0x4b91, + 0x102a: 0x4be1, 0x102b: 0x4c31, 0x102c: 0x4cb1, 0x102d: 0x4d01, 0x102e: 0x4d69, 0x102f: 0x4de9, + 0x1030: 0x4e39, 0x1031: 0x4e71, 0x1032: 0x4ea9, 0x1033: 0x4f29, 0x1034: 0x4f91, 0x1035: 0x5011, + 0x1036: 0x5061, 0x1037: 0x50e1, 0x1038: 0x5119, 0x1039: 0x5169, 0x103a: 0x51b9, 0x103b: 0x5209, + 0x103c: 0x5259, 0x103d: 0x52a9, 0x103e: 0x5311, 0x103f: 0x5361, + // Block 0x41, offset 0x1040 + 0x1040: 0x5399, 0x1041: 0x53e9, 0x1042: 0x5439, 0x1043: 0x5489, 0x1044: 0x54f1, 0x1045: 0x5541, + 0x1046: 0x5591, 0x1047: 0x55e1, 0x1048: 0x5661, 0x1049: 0x56c9, 0x104a: 0x5701, 0x104b: 0x5781, + 0x104c: 0x57b9, 0x104d: 0x5821, 0x104e: 0x5889, 0x104f: 0x58d9, 0x1050: 0x5929, 0x1051: 0x5979, + 0x1052: 0x59e1, 0x1053: 0x5a19, 0x1054: 0x5a69, 0x1055: 0x5ad1, 0x1056: 0x5b09, 0x1057: 0x5b89, + 0x1058: 0x5bd9, 0x1059: 0x5c01, 0x105a: 0x5c29, 0x105b: 0x5c51, 0x105c: 0x5c79, 0x105d: 0x5ca1, + 0x105e: 0x5cc9, 0x105f: 0x5cf1, 0x1060: 0x5d19, 0x1061: 0x5d41, 0x1062: 0x5d69, 0x1063: 0x5d99, + 0x1064: 0x5dc9, 0x1065: 0x5df9, 0x1066: 0x5e29, 0x1067: 0x5e59, 0x1068: 0x5e89, 0x1069: 0x5eb9, + 0x106a: 0x5ee9, 0x106b: 0x5f19, 0x106c: 0x5f49, 0x106d: 0x5f79, 0x106e: 0x5fa9, 0x106f: 0x5fd9, + 0x1070: 0x6009, 0x1071: 0x402d, 0x1072: 0x6039, 0x1073: 0x6051, 0x1074: 0x404d, 0x1075: 0x6069, + 0x1076: 0x6081, 0x1077: 0x6099, 0x1078: 0x406d, 0x1079: 0x406d, 0x107a: 0x60b1, 0x107b: 0x60c9, + 0x107c: 0x6101, 0x107d: 0x6139, 0x107e: 0x6171, 0x107f: 0x61a9, + // Block 0x42, offset 0x1080 + 0x1080: 0x6211, 0x1081: 0x6229, 0x1082: 0x408d, 0x1083: 0x6241, 0x1084: 0x6259, 0x1085: 0x6271, + 0x1086: 0x6289, 0x1087: 0x62a1, 0x1088: 0x40ad, 0x1089: 0x62b9, 0x108a: 0x62e1, 0x108b: 0x62f9, + 0x108c: 0x40cd, 0x108d: 0x40cd, 0x108e: 0x6311, 0x108f: 0x6329, 0x1090: 0x6341, 0x1091: 0x40ed, + 0x1092: 0x410d, 0x1093: 0x412d, 0x1094: 0x414d, 0x1095: 0x416d, 0x1096: 0x6359, 0x1097: 0x6371, + 0x1098: 0x6389, 0x1099: 0x63a1, 0x109a: 0x63b9, 0x109b: 0x418d, 0x109c: 0x63d1, 0x109d: 0x63e9, + 0x109e: 0x6401, 0x109f: 0x41ad, 0x10a0: 0x41cd, 0x10a1: 0x6419, 0x10a2: 0x41ed, 0x10a3: 0x420d, + 0x10a4: 0x422d, 0x10a5: 0x6431, 0x10a6: 0x424d, 0x10a7: 0x6449, 0x10a8: 0x6479, 0x10a9: 0x6211, + 0x10aa: 0x426d, 0x10ab: 0x428d, 0x10ac: 0x42ad, 0x10ad: 0x42cd, 0x10ae: 0x64b1, 0x10af: 0x64f1, + 0x10b0: 0x6539, 0x10b1: 0x6551, 0x10b2: 0x42ed, 0x10b3: 0x6569, 0x10b4: 0x6581, 0x10b5: 0x6599, + 0x10b6: 0x430d, 0x10b7: 0x65b1, 0x10b8: 0x65c9, 0x10b9: 0x65b1, 0x10ba: 0x65e1, 0x10bb: 0x65f9, + 0x10bc: 0x432d, 0x10bd: 0x6611, 0x10be: 0x6629, 0x10bf: 0x6611, + // Block 0x43, offset 0x10c0 + 0x10c0: 0x434d, 0x10c1: 0x436d, 0x10c2: 0x0040, 0x10c3: 0x6641, 0x10c4: 0x6659, 0x10c5: 0x6671, + 0x10c6: 0x6689, 0x10c7: 0x0040, 0x10c8: 0x66c1, 0x10c9: 0x66d9, 0x10ca: 0x66f1, 0x10cb: 0x6709, + 0x10cc: 0x6721, 0x10cd: 0x6739, 0x10ce: 0x6401, 0x10cf: 0x6751, 0x10d0: 0x6769, 0x10d1: 0x6781, + 0x10d2: 0x438d, 0x10d3: 0x6799, 0x10d4: 0x6289, 0x10d5: 0x43ad, 0x10d6: 0x43cd, 0x10d7: 0x67b1, + 0x10d8: 0x0040, 0x10d9: 0x43ed, 0x10da: 0x67c9, 0x10db: 0x67e1, 0x10dc: 0x67f9, 0x10dd: 0x6811, + 0x10de: 0x6829, 0x10df: 0x6859, 0x10e0: 0x6889, 0x10e1: 0x68b1, 0x10e2: 0x68d9, 0x10e3: 0x6901, + 0x10e4: 0x6929, 0x10e5: 0x6951, 0x10e6: 0x6979, 0x10e7: 0x69a1, 0x10e8: 0x69c9, 0x10e9: 0x69f1, + 0x10ea: 0x6a21, 0x10eb: 0x6a51, 0x10ec: 0x6a81, 0x10ed: 0x6ab1, 0x10ee: 0x6ae1, 0x10ef: 0x6b11, + 0x10f0: 0x6b41, 0x10f1: 0x6b71, 0x10f2: 0x6ba1, 0x10f3: 0x6bd1, 0x10f4: 0x6c01, 0x10f5: 0x6c31, + 0x10f6: 0x6c61, 0x10f7: 0x6c91, 0x10f8: 0x6cc1, 0x10f9: 0x6cf1, 0x10fa: 0x6d21, 0x10fb: 0x6d51, + 0x10fc: 0x6d81, 0x10fd: 0x6db1, 0x10fe: 0x6de1, 0x10ff: 0x440d, + // Block 0x44, offset 0x1100 + 0x1100: 0xe00d, 0x1101: 0x0008, 0x1102: 0xe00d, 0x1103: 0x0008, 0x1104: 0xe00d, 0x1105: 0x0008, + 0x1106: 0xe00d, 0x1107: 0x0008, 0x1108: 0xe00d, 0x1109: 0x0008, 0x110a: 0xe00d, 0x110b: 0x0008, + 0x110c: 0xe00d, 0x110d: 0x0008, 0x110e: 0xe00d, 0x110f: 0x0008, 0x1110: 0xe00d, 0x1111: 0x0008, + 0x1112: 0xe00d, 0x1113: 0x0008, 0x1114: 0xe00d, 0x1115: 0x0008, 0x1116: 0xe00d, 0x1117: 0x0008, + 0x1118: 0xe00d, 0x1119: 0x0008, 0x111a: 0xe00d, 0x111b: 0x0008, 0x111c: 0xe00d, 0x111d: 0x0008, + 0x111e: 0xe00d, 0x111f: 0x0008, 0x1120: 0xe00d, 0x1121: 0x0008, 0x1122: 0xe00d, 0x1123: 0x0008, + 0x1124: 0xe00d, 0x1125: 0x0008, 0x1126: 0xe00d, 0x1127: 0x0008, 0x1128: 0xe00d, 0x1129: 0x0008, + 0x112a: 0xe00d, 0x112b: 0x0008, 0x112c: 0xe00d, 0x112d: 0x0008, 0x112e: 0x0008, 0x112f: 0x3308, + 0x1130: 0x3318, 0x1131: 0x3318, 0x1132: 0x3318, 0x1133: 0x0018, 0x1134: 0x3308, 0x1135: 0x3308, + 0x1136: 0x3308, 0x1137: 0x3308, 0x1138: 0x3308, 0x1139: 0x3308, 0x113a: 0x3308, 0x113b: 0x3308, + 0x113c: 0x3308, 0x113d: 0x3308, 0x113e: 0x0018, 0x113f: 0x0008, + // Block 0x45, offset 0x1140 + 0x1140: 0xe00d, 0x1141: 0x0008, 0x1142: 0xe00d, 0x1143: 0x0008, 0x1144: 0xe00d, 0x1145: 0x0008, + 0x1146: 0xe00d, 0x1147: 0x0008, 0x1148: 0xe00d, 0x1149: 0x0008, 0x114a: 0xe00d, 0x114b: 0x0008, + 0x114c: 0xe00d, 0x114d: 0x0008, 0x114e: 0xe00d, 0x114f: 0x0008, 0x1150: 0xe00d, 0x1151: 0x0008, + 0x1152: 0xe00d, 0x1153: 0x0008, 0x1154: 0xe00d, 0x1155: 0x0008, 0x1156: 0xe00d, 0x1157: 0x0008, + 0x1158: 0xe00d, 0x1159: 0x0008, 0x115a: 0xe00d, 0x115b: 0x0008, 0x115c: 0x0ea1, 0x115d: 0x6e11, + 0x115e: 0x3308, 0x115f: 0x3308, 0x1160: 0x0008, 0x1161: 0x0008, 0x1162: 0x0008, 0x1163: 0x0008, + 0x1164: 0x0008, 0x1165: 0x0008, 0x1166: 0x0008, 0x1167: 0x0008, 0x1168: 0x0008, 0x1169: 0x0008, + 0x116a: 0x0008, 0x116b: 0x0008, 0x116c: 0x0008, 0x116d: 0x0008, 0x116e: 0x0008, 0x116f: 0x0008, + 0x1170: 0x0008, 0x1171: 0x0008, 0x1172: 0x0008, 0x1173: 0x0008, 0x1174: 0x0008, 0x1175: 0x0008, + 0x1176: 0x0008, 0x1177: 0x0008, 0x1178: 0x0008, 0x1179: 0x0008, 0x117a: 0x0008, 0x117b: 0x0008, + 0x117c: 0x0008, 0x117d: 0x0008, 0x117e: 0x0008, 0x117f: 0x0008, + // Block 0x46, offset 0x1180 + 0x1180: 0x0018, 0x1181: 0x0018, 0x1182: 0x0018, 0x1183: 0x0018, 0x1184: 0x0018, 0x1185: 0x0018, + 0x1186: 0x0018, 0x1187: 0x0018, 0x1188: 0x0018, 0x1189: 0x0018, 0x118a: 0x0018, 0x118b: 0x0018, + 0x118c: 0x0018, 0x118d: 0x0018, 0x118e: 0x0018, 0x118f: 0x0018, 0x1190: 0x0018, 0x1191: 0x0018, + 0x1192: 0x0018, 0x1193: 0x0018, 0x1194: 0x0018, 0x1195: 0x0018, 0x1196: 0x0018, 0x1197: 0x0008, + 0x1198: 0x0008, 0x1199: 0x0008, 0x119a: 0x0008, 0x119b: 0x0008, 0x119c: 0x0008, 0x119d: 0x0008, + 0x119e: 0x0008, 0x119f: 0x0008, 0x11a0: 0x0018, 0x11a1: 0x0018, 0x11a2: 0xe00d, 0x11a3: 0x0008, + 0x11a4: 0xe00d, 0x11a5: 0x0008, 0x11a6: 0xe00d, 0x11a7: 0x0008, 0x11a8: 0xe00d, 0x11a9: 0x0008, + 0x11aa: 0xe00d, 0x11ab: 0x0008, 0x11ac: 0xe00d, 0x11ad: 0x0008, 0x11ae: 0xe00d, 0x11af: 0x0008, + 0x11b0: 0x0008, 0x11b1: 0x0008, 0x11b2: 0xe00d, 0x11b3: 0x0008, 0x11b4: 0xe00d, 0x11b5: 0x0008, + 0x11b6: 0xe00d, 0x11b7: 0x0008, 0x11b8: 0xe00d, 0x11b9: 0x0008, 0x11ba: 0xe00d, 0x11bb: 0x0008, + 0x11bc: 0xe00d, 0x11bd: 0x0008, 0x11be: 0xe00d, 0x11bf: 0x0008, + // Block 0x47, offset 0x11c0 + 0x11c0: 0xe00d, 0x11c1: 0x0008, 0x11c2: 0xe00d, 0x11c3: 0x0008, 0x11c4: 0xe00d, 0x11c5: 0x0008, + 0x11c6: 0xe00d, 0x11c7: 0x0008, 0x11c8: 0xe00d, 0x11c9: 0x0008, 0x11ca: 0xe00d, 0x11cb: 0x0008, + 0x11cc: 0xe00d, 0x11cd: 0x0008, 0x11ce: 0xe00d, 0x11cf: 0x0008, 0x11d0: 0xe00d, 0x11d1: 0x0008, + 0x11d2: 0xe00d, 0x11d3: 0x0008, 0x11d4: 0xe00d, 0x11d5: 0x0008, 0x11d6: 0xe00d, 0x11d7: 0x0008, + 0x11d8: 0xe00d, 0x11d9: 0x0008, 0x11da: 0xe00d, 0x11db: 0x0008, 0x11dc: 0xe00d, 0x11dd: 0x0008, + 0x11de: 0xe00d, 0x11df: 0x0008, 0x11e0: 0xe00d, 0x11e1: 0x0008, 0x11e2: 0xe00d, 0x11e3: 0x0008, + 0x11e4: 0xe00d, 0x11e5: 0x0008, 0x11e6: 0xe00d, 0x11e7: 0x0008, 0x11e8: 0xe00d, 0x11e9: 0x0008, + 0x11ea: 0xe00d, 0x11eb: 0x0008, 0x11ec: 0xe00d, 0x11ed: 0x0008, 0x11ee: 0xe00d, 0x11ef: 0x0008, + 0x11f0: 0xe0fd, 0x11f1: 0x0008, 0x11f2: 0x0008, 0x11f3: 0x0008, 0x11f4: 0x0008, 0x11f5: 0x0008, + 0x11f6: 0x0008, 0x11f7: 0x0008, 0x11f8: 0x0008, 0x11f9: 0xe01d, 0x11fa: 0x0008, 0x11fb: 0xe03d, + 0x11fc: 0x0008, 0x11fd: 0x442d, 0x11fe: 0xe00d, 0x11ff: 0x0008, + // Block 0x48, offset 0x1200 + 0x1200: 0xe00d, 0x1201: 0x0008, 0x1202: 0xe00d, 0x1203: 0x0008, 0x1204: 0xe00d, 0x1205: 0x0008, + 0x1206: 0xe00d, 0x1207: 0x0008, 0x1208: 0x0008, 0x1209: 0x0018, 0x120a: 0x0018, 0x120b: 0xe03d, + 0x120c: 0x0008, 0x120d: 0x11d9, 0x120e: 0x0008, 0x120f: 0x0008, 0x1210: 0xe00d, 0x1211: 0x0008, + 0x1212: 0xe00d, 0x1213: 0x0008, 0x1214: 0x0008, 0x1215: 0x0008, 0x1216: 0xe00d, 0x1217: 0x0008, + 0x1218: 0xe00d, 0x1219: 0x0008, 0x121a: 0xe00d, 0x121b: 0x0008, 0x121c: 0xe00d, 0x121d: 0x0008, + 0x121e: 0xe00d, 0x121f: 0x0008, 0x1220: 0xe00d, 0x1221: 0x0008, 0x1222: 0xe00d, 0x1223: 0x0008, + 0x1224: 0xe00d, 0x1225: 0x0008, 0x1226: 0xe00d, 0x1227: 0x0008, 0x1228: 0xe00d, 0x1229: 0x0008, + 0x122a: 0x6e29, 0x122b: 0x1029, 0x122c: 0x11c1, 0x122d: 0x6e41, 0x122e: 0x1221, 0x122f: 0x0008, + 0x1230: 0x6e59, 0x1231: 0x6e71, 0x1232: 0x1239, 0x1233: 0x444d, 0x1234: 0xe00d, 0x1235: 0x0008, + 0x1236: 0xe00d, 0x1237: 0x0008, 0x1238: 0x0040, 0x1239: 0x0008, 0x123a: 0x0040, 0x123b: 0x0040, + 0x123c: 0x0040, 0x123d: 0x0040, 0x123e: 0x0040, 0x123f: 0x0040, + // Block 0x49, offset 0x1240 + 0x1240: 0x64d5, 0x1241: 0x64f5, 0x1242: 0x6515, 0x1243: 0x6535, 0x1244: 0x6555, 0x1245: 0x6575, + 0x1246: 0x6595, 0x1247: 0x65b5, 0x1248: 0x65d5, 0x1249: 0x65f5, 0x124a: 0x6615, 0x124b: 0x6635, + 0x124c: 0x6655, 0x124d: 0x6675, 0x124e: 0x0008, 0x124f: 0x0008, 0x1250: 0x6695, 0x1251: 0x0008, + 0x1252: 0x66b5, 0x1253: 0x0008, 0x1254: 0x0008, 0x1255: 0x66d5, 0x1256: 0x66f5, 0x1257: 0x6715, + 0x1258: 0x6735, 0x1259: 0x6755, 0x125a: 0x6775, 0x125b: 0x6795, 0x125c: 0x67b5, 0x125d: 0x67d5, + 0x125e: 0x67f5, 0x125f: 0x0008, 0x1260: 0x6815, 0x1261: 0x0008, 0x1262: 0x6835, 0x1263: 0x0008, + 0x1264: 0x0008, 0x1265: 0x6855, 0x1266: 0x6875, 0x1267: 0x0008, 0x1268: 0x0008, 0x1269: 0x0008, + 0x126a: 0x6895, 0x126b: 0x68b5, 0x126c: 0x68d5, 0x126d: 0x68f5, 0x126e: 0x6915, 0x126f: 0x6935, + 0x1270: 0x6955, 0x1271: 0x6975, 0x1272: 0x6995, 0x1273: 0x69b5, 0x1274: 0x69d5, 0x1275: 0x69f5, + 0x1276: 0x6a15, 0x1277: 0x6a35, 0x1278: 0x6a55, 0x1279: 0x6a75, 0x127a: 0x6a95, 0x127b: 0x6ab5, + 0x127c: 0x6ad5, 0x127d: 0x6af5, 0x127e: 0x6b15, 0x127f: 0x6b35, + // Block 0x4a, offset 0x1280 + 0x1280: 0x7a95, 0x1281: 0x7ab5, 0x1282: 0x7ad5, 0x1283: 0x7af5, 0x1284: 0x7b15, 0x1285: 0x7b35, + 0x1286: 0x7b55, 0x1287: 0x7b75, 0x1288: 0x7b95, 0x1289: 0x7bb5, 0x128a: 0x7bd5, 0x128b: 0x7bf5, + 0x128c: 0x7c15, 0x128d: 0x7c35, 0x128e: 0x7c55, 0x128f: 0x6ec9, 0x1290: 0x6ef1, 0x1291: 0x6f19, + 0x1292: 0x7c75, 0x1293: 0x7c95, 0x1294: 0x7cb5, 0x1295: 0x6f41, 0x1296: 0x6f69, 0x1297: 0x6f91, + 0x1298: 0x7cd5, 0x1299: 0x7cf5, 0x129a: 0x0040, 0x129b: 0x0040, 0x129c: 0x0040, 0x129d: 0x0040, + 0x129e: 0x0040, 0x129f: 0x0040, 0x12a0: 0x0040, 0x12a1: 0x0040, 0x12a2: 0x0040, 0x12a3: 0x0040, + 0x12a4: 0x0040, 0x12a5: 0x0040, 0x12a6: 0x0040, 0x12a7: 0x0040, 0x12a8: 0x0040, 0x12a9: 0x0040, + 0x12aa: 0x0040, 0x12ab: 0x0040, 0x12ac: 0x0040, 0x12ad: 0x0040, 0x12ae: 0x0040, 0x12af: 0x0040, + 0x12b0: 0x0040, 0x12b1: 0x0040, 0x12b2: 0x0040, 0x12b3: 0x0040, 0x12b4: 0x0040, 0x12b5: 0x0040, + 0x12b6: 0x0040, 0x12b7: 0x0040, 0x12b8: 0x0040, 0x12b9: 0x0040, 0x12ba: 0x0040, 0x12bb: 0x0040, + 0x12bc: 0x0040, 0x12bd: 0x0040, 0x12be: 0x0040, 0x12bf: 0x0040, + // Block 0x4b, offset 0x12c0 + 0x12c0: 0x6fb9, 0x12c1: 0x6fd1, 0x12c2: 0x6fe9, 0x12c3: 0x7d15, 0x12c4: 0x7d35, 0x12c5: 0x7001, + 0x12c6: 0x7001, 0x12c7: 0x0040, 0x12c8: 0x0040, 0x12c9: 0x0040, 0x12ca: 0x0040, 0x12cb: 0x0040, + 0x12cc: 0x0040, 0x12cd: 0x0040, 0x12ce: 0x0040, 0x12cf: 0x0040, 0x12d0: 0x0040, 0x12d1: 0x0040, + 0x12d2: 0x0040, 0x12d3: 0x7019, 0x12d4: 0x7041, 0x12d5: 0x7069, 0x12d6: 0x7091, 0x12d7: 0x70b9, + 0x12d8: 0x0040, 0x12d9: 0x0040, 0x12da: 0x0040, 0x12db: 0x0040, 0x12dc: 0x0040, 0x12dd: 0x70e1, + 0x12de: 0x3308, 0x12df: 0x7109, 0x12e0: 0x7131, 0x12e1: 0x20a9, 0x12e2: 0x20f1, 0x12e3: 0x7149, + 0x12e4: 0x7161, 0x12e5: 0x7179, 0x12e6: 0x7191, 0x12e7: 0x71a9, 0x12e8: 0x71c1, 0x12e9: 0x1fb2, + 0x12ea: 0x71d9, 0x12eb: 0x7201, 0x12ec: 0x7229, 0x12ed: 0x7261, 0x12ee: 0x7299, 0x12ef: 0x72c1, + 0x12f0: 0x72e9, 0x12f1: 0x7311, 0x12f2: 0x7339, 0x12f3: 0x7361, 0x12f4: 0x7389, 0x12f5: 0x73b1, + 0x12f6: 0x73d9, 0x12f7: 0x0040, 0x12f8: 0x7401, 0x12f9: 0x7429, 0x12fa: 0x7451, 0x12fb: 0x7479, + 0x12fc: 0x74a1, 0x12fd: 0x0040, 0x12fe: 0x74c9, 0x12ff: 0x0040, + // Block 0x4c, offset 0x1300 + 0x1300: 0x74f1, 0x1301: 0x7519, 0x1302: 0x0040, 0x1303: 0x7541, 0x1304: 0x7569, 0x1305: 0x0040, + 0x1306: 0x7591, 0x1307: 0x75b9, 0x1308: 0x75e1, 0x1309: 0x7609, 0x130a: 0x7631, 0x130b: 0x7659, + 0x130c: 0x7681, 0x130d: 0x76a9, 0x130e: 0x76d1, 0x130f: 0x76f9, 0x1310: 0x7721, 0x1311: 0x7721, + 0x1312: 0x7739, 0x1313: 0x7739, 0x1314: 0x7739, 0x1315: 0x7739, 0x1316: 0x7751, 0x1317: 0x7751, + 0x1318: 0x7751, 0x1319: 0x7751, 0x131a: 0x7769, 0x131b: 0x7769, 0x131c: 0x7769, 0x131d: 0x7769, + 0x131e: 0x7781, 0x131f: 0x7781, 0x1320: 0x7781, 0x1321: 0x7781, 0x1322: 0x7799, 0x1323: 0x7799, + 0x1324: 0x7799, 0x1325: 0x7799, 0x1326: 0x77b1, 0x1327: 0x77b1, 0x1328: 0x77b1, 0x1329: 0x77b1, + 0x132a: 0x77c9, 0x132b: 0x77c9, 0x132c: 0x77c9, 0x132d: 0x77c9, 0x132e: 0x77e1, 0x132f: 0x77e1, + 0x1330: 0x77e1, 0x1331: 0x77e1, 0x1332: 0x77f9, 0x1333: 0x77f9, 0x1334: 0x77f9, 0x1335: 0x77f9, + 0x1336: 0x7811, 0x1337: 0x7811, 0x1338: 0x7811, 0x1339: 0x7811, 0x133a: 0x7829, 0x133b: 0x7829, + 0x133c: 0x7829, 0x133d: 0x7829, 0x133e: 0x7841, 0x133f: 0x7841, + // Block 0x4d, offset 0x1340 + 0x1340: 0x7841, 0x1341: 0x7841, 0x1342: 0x7859, 0x1343: 0x7859, 0x1344: 0x7871, 0x1345: 0x7871, + 0x1346: 0x7889, 0x1347: 0x7889, 0x1348: 0x78a1, 0x1349: 0x78a1, 0x134a: 0x78b9, 0x134b: 0x78b9, + 0x134c: 0x78d1, 0x134d: 0x78d1, 0x134e: 0x78e9, 0x134f: 0x78e9, 0x1350: 0x78e9, 0x1351: 0x78e9, + 0x1352: 0x7901, 0x1353: 0x7901, 0x1354: 0x7901, 0x1355: 0x7901, 0x1356: 0x7919, 0x1357: 0x7919, + 0x1358: 0x7919, 0x1359: 0x7919, 0x135a: 0x7931, 0x135b: 0x7931, 0x135c: 0x7931, 0x135d: 0x7931, + 0x135e: 0x7949, 0x135f: 0x7949, 0x1360: 0x7961, 0x1361: 0x7961, 0x1362: 0x7961, 0x1363: 0x7961, + 0x1364: 0x7979, 0x1365: 0x7979, 0x1366: 0x7991, 0x1367: 0x7991, 0x1368: 0x7991, 0x1369: 0x7991, + 0x136a: 0x79a9, 0x136b: 0x79a9, 0x136c: 0x79a9, 0x136d: 0x79a9, 0x136e: 0x79c1, 0x136f: 0x79c1, + 0x1370: 0x79d9, 0x1371: 0x79d9, 0x1372: 0x0818, 0x1373: 0x0818, 0x1374: 0x0818, 0x1375: 0x0818, + 0x1376: 0x0818, 0x1377: 0x0818, 0x1378: 0x0818, 0x1379: 0x0818, 0x137a: 0x0818, 0x137b: 0x0818, + 0x137c: 0x0818, 0x137d: 0x0818, 0x137e: 0x0818, 0x137f: 0x0818, + // Block 0x4e, offset 0x1380 + 0x1380: 0x0818, 0x1381: 0x0818, 0x1382: 0x0040, 0x1383: 0x0040, 0x1384: 0x0040, 0x1385: 0x0040, + 0x1386: 0x0040, 0x1387: 0x0040, 0x1388: 0x0040, 0x1389: 0x0040, 0x138a: 0x0040, 0x138b: 0x0040, + 0x138c: 0x0040, 0x138d: 0x0040, 0x138e: 0x0040, 0x138f: 0x0040, 0x1390: 0x0040, 0x1391: 0x0040, + 0x1392: 0x0040, 0x1393: 0x79f1, 0x1394: 0x79f1, 0x1395: 0x79f1, 0x1396: 0x79f1, 0x1397: 0x7a09, + 0x1398: 0x7a09, 0x1399: 0x7a21, 0x139a: 0x7a21, 0x139b: 0x7a39, 0x139c: 0x7a39, 0x139d: 0x0479, + 0x139e: 0x7a51, 0x139f: 0x7a51, 0x13a0: 0x7a69, 0x13a1: 0x7a69, 0x13a2: 0x7a81, 0x13a3: 0x7a81, + 0x13a4: 0x7a99, 0x13a5: 0x7a99, 0x13a6: 0x7a99, 0x13a7: 0x7a99, 0x13a8: 0x7ab1, 0x13a9: 0x7ab1, + 0x13aa: 0x7ac9, 0x13ab: 0x7ac9, 0x13ac: 0x7af1, 0x13ad: 0x7af1, 0x13ae: 0x7b19, 0x13af: 0x7b19, + 0x13b0: 0x7b41, 0x13b1: 0x7b41, 0x13b2: 0x7b69, 0x13b3: 0x7b69, 0x13b4: 0x7b91, 0x13b5: 0x7b91, + 0x13b6: 0x7bb9, 0x13b7: 0x7bb9, 0x13b8: 0x7bb9, 0x13b9: 0x7be1, 0x13ba: 0x7be1, 0x13bb: 0x7be1, + 0x13bc: 0x7c09, 0x13bd: 0x7c09, 0x13be: 0x7c09, 0x13bf: 0x7c09, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x85f9, 0x13c1: 0x8621, 0x13c2: 0x8649, 0x13c3: 0x8671, 0x13c4: 0x8699, 0x13c5: 0x86c1, + 0x13c6: 0x86e9, 0x13c7: 0x8711, 0x13c8: 0x8739, 0x13c9: 0x8761, 0x13ca: 0x8789, 0x13cb: 0x87b1, + 0x13cc: 0x87d9, 0x13cd: 0x8801, 0x13ce: 0x8829, 0x13cf: 0x8851, 0x13d0: 0x8879, 0x13d1: 0x88a1, + 0x13d2: 0x88c9, 0x13d3: 0x88f1, 0x13d4: 0x8919, 0x13d5: 0x8941, 0x13d6: 0x8969, 0x13d7: 0x8991, + 0x13d8: 0x89b9, 0x13d9: 0x89e1, 0x13da: 0x8a09, 0x13db: 0x8a31, 0x13dc: 0x8a59, 0x13dd: 0x8a81, + 0x13de: 0x8aaa, 0x13df: 0x8ada, 0x13e0: 0x8b0a, 0x13e1: 0x8b3a, 0x13e2: 0x8b6a, 0x13e3: 0x8b9a, + 0x13e4: 0x8bc9, 0x13e5: 0x8bf1, 0x13e6: 0x7c71, 0x13e7: 0x8c19, 0x13e8: 0x7be1, 0x13e9: 0x7c99, + 0x13ea: 0x8c41, 0x13eb: 0x8c69, 0x13ec: 0x7d39, 0x13ed: 0x8c91, 0x13ee: 0x7d61, 0x13ef: 0x7d89, + 0x13f0: 0x8cb9, 0x13f1: 0x8ce1, 0x13f2: 0x7e29, 0x13f3: 0x8d09, 0x13f4: 0x7e51, 0x13f5: 0x7e79, + 0x13f6: 0x8d31, 0x13f7: 0x8d59, 0x13f8: 0x7ec9, 0x13f9: 0x8d81, 0x13fa: 0x7ef1, 0x13fb: 0x7f19, + 0x13fc: 0x83a1, 0x13fd: 0x83c9, 0x13fe: 0x8441, 0x13ff: 0x8469, + // Block 0x50, offset 0x1400 + 0x1400: 0x8491, 0x1401: 0x8531, 0x1402: 0x8559, 0x1403: 0x8581, 0x1404: 0x85a9, 0x1405: 0x8649, + 0x1406: 0x8671, 0x1407: 0x8699, 0x1408: 0x8da9, 0x1409: 0x8739, 0x140a: 0x8dd1, 0x140b: 0x8df9, + 0x140c: 0x8829, 0x140d: 0x8e21, 0x140e: 0x8851, 0x140f: 0x8879, 0x1410: 0x8a81, 0x1411: 0x8e49, + 0x1412: 0x8e71, 0x1413: 0x89b9, 0x1414: 0x8e99, 0x1415: 0x89e1, 0x1416: 0x8a09, 0x1417: 0x7c21, + 0x1418: 0x7c49, 0x1419: 0x8ec1, 0x141a: 0x7c71, 0x141b: 0x8ee9, 0x141c: 0x7cc1, 0x141d: 0x7ce9, + 0x141e: 0x7d11, 0x141f: 0x7d39, 0x1420: 0x8f11, 0x1421: 0x7db1, 0x1422: 0x7dd9, 0x1423: 0x7e01, + 0x1424: 0x7e29, 0x1425: 0x8f39, 0x1426: 0x7ec9, 0x1427: 0x7f41, 0x1428: 0x7f69, 0x1429: 0x7f91, + 0x142a: 0x7fb9, 0x142b: 0x7fe1, 0x142c: 0x8031, 0x142d: 0x8059, 0x142e: 0x8081, 0x142f: 0x80a9, + 0x1430: 0x80d1, 0x1431: 0x80f9, 0x1432: 0x8f61, 0x1433: 0x8121, 0x1434: 0x8149, 0x1435: 0x8171, + 0x1436: 0x8199, 0x1437: 0x81c1, 0x1438: 0x81e9, 0x1439: 0x8239, 0x143a: 0x8261, 0x143b: 0x8289, + 0x143c: 0x82b1, 0x143d: 0x82d9, 0x143e: 0x8301, 0x143f: 0x8329, + // Block 0x51, offset 0x1440 + 0x1440: 0x8351, 0x1441: 0x8379, 0x1442: 0x83f1, 0x1443: 0x8419, 0x1444: 0x84b9, 0x1445: 0x84e1, + 0x1446: 0x8509, 0x1447: 0x8531, 0x1448: 0x8559, 0x1449: 0x85d1, 0x144a: 0x85f9, 0x144b: 0x8621, + 0x144c: 0x8649, 0x144d: 0x8f89, 0x144e: 0x86c1, 0x144f: 0x86e9, 0x1450: 0x8711, 0x1451: 0x8739, + 0x1452: 0x87b1, 0x1453: 0x87d9, 0x1454: 0x8801, 0x1455: 0x8829, 0x1456: 0x8fb1, 0x1457: 0x88a1, + 0x1458: 0x88c9, 0x1459: 0x8fd9, 0x145a: 0x8941, 0x145b: 0x8969, 0x145c: 0x8991, 0x145d: 0x89b9, + 0x145e: 0x9001, 0x145f: 0x7c71, 0x1460: 0x8ee9, 0x1461: 0x7d39, 0x1462: 0x8f11, 0x1463: 0x7e29, + 0x1464: 0x8f39, 0x1465: 0x7ec9, 0x1466: 0x9029, 0x1467: 0x80d1, 0x1468: 0x9051, 0x1469: 0x9079, + 0x146a: 0x90a1, 0x146b: 0x8531, 0x146c: 0x8559, 0x146d: 0x8649, 0x146e: 0x8829, 0x146f: 0x8fb1, + 0x1470: 0x89b9, 0x1471: 0x9001, 0x1472: 0x90c9, 0x1473: 0x9101, 0x1474: 0x9139, 0x1475: 0x9171, + 0x1476: 0x9199, 0x1477: 0x91c1, 0x1478: 0x91e9, 0x1479: 0x9211, 0x147a: 0x9239, 0x147b: 0x9261, + 0x147c: 0x9289, 0x147d: 0x92b1, 0x147e: 0x92d9, 0x147f: 0x9301, + // Block 0x52, offset 0x1480 + 0x1480: 0x9329, 0x1481: 0x9351, 0x1482: 0x9379, 0x1483: 0x93a1, 0x1484: 0x93c9, 0x1485: 0x93f1, + 0x1486: 0x9419, 0x1487: 0x9441, 0x1488: 0x9469, 0x1489: 0x9491, 0x148a: 0x94b9, 0x148b: 0x94e1, + 0x148c: 0x9079, 0x148d: 0x9509, 0x148e: 0x9531, 0x148f: 0x9559, 0x1490: 0x9581, 0x1491: 0x9171, + 0x1492: 0x9199, 0x1493: 0x91c1, 0x1494: 0x91e9, 0x1495: 0x9211, 0x1496: 0x9239, 0x1497: 0x9261, + 0x1498: 0x9289, 0x1499: 0x92b1, 0x149a: 0x92d9, 0x149b: 0x9301, 0x149c: 0x9329, 0x149d: 0x9351, + 0x149e: 0x9379, 0x149f: 0x93a1, 0x14a0: 0x93c9, 0x14a1: 0x93f1, 0x14a2: 0x9419, 0x14a3: 0x9441, + 0x14a4: 0x9469, 0x14a5: 0x9491, 0x14a6: 0x94b9, 0x14a7: 0x94e1, 0x14a8: 0x9079, 0x14a9: 0x9509, + 0x14aa: 0x9531, 0x14ab: 0x9559, 0x14ac: 0x9581, 0x14ad: 0x9491, 0x14ae: 0x94b9, 0x14af: 0x94e1, + 0x14b0: 0x9079, 0x14b1: 0x9051, 0x14b2: 0x90a1, 0x14b3: 0x8211, 0x14b4: 0x8059, 0x14b5: 0x8081, + 0x14b6: 0x80a9, 0x14b7: 0x9491, 0x14b8: 0x94b9, 0x14b9: 0x94e1, 0x14ba: 0x8211, 0x14bb: 0x8239, + 0x14bc: 0x95a9, 0x14bd: 0x95a9, 0x14be: 0x0018, 0x14bf: 0x0018, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x0040, 0x14c1: 0x0040, 0x14c2: 0x0040, 0x14c3: 0x0040, 0x14c4: 0x0040, 0x14c5: 0x0040, + 0x14c6: 0x0040, 0x14c7: 0x0040, 0x14c8: 0x0040, 0x14c9: 0x0040, 0x14ca: 0x0040, 0x14cb: 0x0040, + 0x14cc: 0x0040, 0x14cd: 0x0040, 0x14ce: 0x0040, 0x14cf: 0x0040, 0x14d0: 0x95d1, 0x14d1: 0x9609, + 0x14d2: 0x9609, 0x14d3: 0x9641, 0x14d4: 0x9679, 0x14d5: 0x96b1, 0x14d6: 0x96e9, 0x14d7: 0x9721, + 0x14d8: 0x9759, 0x14d9: 0x9759, 0x14da: 0x9791, 0x14db: 0x97c9, 0x14dc: 0x9801, 0x14dd: 0x9839, + 0x14de: 0x9871, 0x14df: 0x98a9, 0x14e0: 0x98a9, 0x14e1: 0x98e1, 0x14e2: 0x9919, 0x14e3: 0x9919, + 0x14e4: 0x9951, 0x14e5: 0x9951, 0x14e6: 0x9989, 0x14e7: 0x99c1, 0x14e8: 0x99c1, 0x14e9: 0x99f9, + 0x14ea: 0x9a31, 0x14eb: 0x9a31, 0x14ec: 0x9a69, 0x14ed: 0x9a69, 0x14ee: 0x9aa1, 0x14ef: 0x9ad9, + 0x14f0: 0x9ad9, 0x14f1: 0x9b11, 0x14f2: 0x9b11, 0x14f3: 0x9b49, 0x14f4: 0x9b81, 0x14f5: 0x9bb9, + 0x14f6: 0x9bf1, 0x14f7: 0x9bf1, 0x14f8: 0x9c29, 0x14f9: 0x9c61, 0x14fa: 0x9c99, 0x14fb: 0x9cd1, + 0x14fc: 0x9d09, 0x14fd: 0x9d09, 0x14fe: 0x9d41, 0x14ff: 0x9d79, + // Block 0x54, offset 0x1500 + 0x1500: 0xa949, 0x1501: 0xa981, 0x1502: 0xa9b9, 0x1503: 0xa8a1, 0x1504: 0x9bb9, 0x1505: 0x9989, + 0x1506: 0xa9f1, 0x1507: 0xaa29, 0x1508: 0x0040, 0x1509: 0x0040, 0x150a: 0x0040, 0x150b: 0x0040, + 0x150c: 0x0040, 0x150d: 0x0040, 0x150e: 0x0040, 0x150f: 0x0040, 0x1510: 0x0040, 0x1511: 0x0040, + 0x1512: 0x0040, 0x1513: 0x0040, 0x1514: 0x0040, 0x1515: 0x0040, 0x1516: 0x0040, 0x1517: 0x0040, + 0x1518: 0x0040, 0x1519: 0x0040, 0x151a: 0x0040, 0x151b: 0x0040, 0x151c: 0x0040, 0x151d: 0x0040, + 0x151e: 0x0040, 0x151f: 0x0040, 0x1520: 0x0040, 0x1521: 0x0040, 0x1522: 0x0040, 0x1523: 0x0040, + 0x1524: 0x0040, 0x1525: 0x0040, 0x1526: 0x0040, 0x1527: 0x0040, 0x1528: 0x0040, 0x1529: 0x0040, + 0x152a: 0x0040, 0x152b: 0x0040, 0x152c: 0x0040, 0x152d: 0x0040, 0x152e: 0x0040, 0x152f: 0x0040, + 0x1530: 0xaa61, 0x1531: 0xaa99, 0x1532: 0xaad1, 0x1533: 0xab19, 0x1534: 0xab61, 0x1535: 0xaba9, + 0x1536: 0xabf1, 0x1537: 0xac39, 0x1538: 0xac81, 0x1539: 0xacc9, 0x153a: 0xad02, 0x153b: 0xae12, + 0x153c: 0xae91, 0x153d: 0x0018, 0x153e: 0x0040, 0x153f: 0x0040, + // Block 0x55, offset 0x1540 + 0x1540: 0x33c0, 0x1541: 0x33c0, 0x1542: 0x33c0, 0x1543: 0x33c0, 0x1544: 0x33c0, 0x1545: 0x33c0, + 0x1546: 0x33c0, 0x1547: 0x33c0, 0x1548: 0x33c0, 0x1549: 0x33c0, 0x154a: 0x33c0, 0x154b: 0x33c0, + 0x154c: 0x33c0, 0x154d: 0x33c0, 0x154e: 0x33c0, 0x154f: 0x33c0, 0x1550: 0xaeda, 0x1551: 0x7d55, + 0x1552: 0x0040, 0x1553: 0xaeea, 0x1554: 0x03c2, 0x1555: 0xaefa, 0x1556: 0xaf0a, 0x1557: 0x7d75, + 0x1558: 0x7d95, 0x1559: 0x0040, 0x155a: 0x0040, 0x155b: 0x0040, 0x155c: 0x0040, 0x155d: 0x0040, + 0x155e: 0x0040, 0x155f: 0x0040, 0x1560: 0x3308, 0x1561: 0x3308, 0x1562: 0x3308, 0x1563: 0x3308, + 0x1564: 0x3308, 0x1565: 0x3308, 0x1566: 0x3308, 0x1567: 0x3308, 0x1568: 0x3308, 0x1569: 0x3308, + 0x156a: 0x3308, 0x156b: 0x3308, 0x156c: 0x3308, 0x156d: 0x3308, 0x156e: 0x3308, 0x156f: 0x3308, + 0x1570: 0x0040, 0x1571: 0x7db5, 0x1572: 0x7dd5, 0x1573: 0xaf1a, 0x1574: 0xaf1a, 0x1575: 0x1fd2, + 0x1576: 0x1fe2, 0x1577: 0xaf2a, 0x1578: 0xaf3a, 0x1579: 0x7df5, 0x157a: 0x7e15, 0x157b: 0x7e35, + 0x157c: 0x7df5, 0x157d: 0x7e55, 0x157e: 0x7e75, 0x157f: 0x7e55, + // Block 0x56, offset 0x1580 + 0x1580: 0x7e95, 0x1581: 0x7eb5, 0x1582: 0x7ed5, 0x1583: 0x7eb5, 0x1584: 0x7ef5, 0x1585: 0x0018, + 0x1586: 0x0018, 0x1587: 0xaf4a, 0x1588: 0xaf5a, 0x1589: 0x7f16, 0x158a: 0x7f36, 0x158b: 0x7f56, + 0x158c: 0x7f76, 0x158d: 0xaf1a, 0x158e: 0xaf1a, 0x158f: 0xaf1a, 0x1590: 0xaeda, 0x1591: 0x7f95, + 0x1592: 0x0040, 0x1593: 0x0040, 0x1594: 0x03c2, 0x1595: 0xaeea, 0x1596: 0xaf0a, 0x1597: 0xaefa, + 0x1598: 0x7fb5, 0x1599: 0x1fd2, 0x159a: 0x1fe2, 0x159b: 0xaf2a, 0x159c: 0xaf3a, 0x159d: 0x7e95, + 0x159e: 0x7ef5, 0x159f: 0xaf6a, 0x15a0: 0xaf7a, 0x15a1: 0xaf8a, 0x15a2: 0x1fb2, 0x15a3: 0xaf99, + 0x15a4: 0xafaa, 0x15a5: 0xafba, 0x15a6: 0x1fc2, 0x15a7: 0x0040, 0x15a8: 0xafca, 0x15a9: 0xafda, + 0x15aa: 0xafea, 0x15ab: 0xaffa, 0x15ac: 0x0040, 0x15ad: 0x0040, 0x15ae: 0x0040, 0x15af: 0x0040, + 0x15b0: 0x7fd6, 0x15b1: 0xb009, 0x15b2: 0x7ff6, 0x15b3: 0x0808, 0x15b4: 0x8016, 0x15b5: 0x0040, + 0x15b6: 0x8036, 0x15b7: 0xb031, 0x15b8: 0x8056, 0x15b9: 0xb059, 0x15ba: 0x8076, 0x15bb: 0xb081, + 0x15bc: 0x8096, 0x15bd: 0xb0a9, 0x15be: 0x80b6, 0x15bf: 0xb0d1, + // Block 0x57, offset 0x15c0 + 0x15c0: 0xb0f9, 0x15c1: 0xb111, 0x15c2: 0xb111, 0x15c3: 0xb129, 0x15c4: 0xb129, 0x15c5: 0xb141, + 0x15c6: 0xb141, 0x15c7: 0xb159, 0x15c8: 0xb159, 0x15c9: 0xb171, 0x15ca: 0xb171, 0x15cb: 0xb171, + 0x15cc: 0xb171, 0x15cd: 0xb189, 0x15ce: 0xb189, 0x15cf: 0xb1a1, 0x15d0: 0xb1a1, 0x15d1: 0xb1a1, + 0x15d2: 0xb1a1, 0x15d3: 0xb1b9, 0x15d4: 0xb1b9, 0x15d5: 0xb1d1, 0x15d6: 0xb1d1, 0x15d7: 0xb1d1, + 0x15d8: 0xb1d1, 0x15d9: 0xb1e9, 0x15da: 0xb1e9, 0x15db: 0xb1e9, 0x15dc: 0xb1e9, 0x15dd: 0xb201, + 0x15de: 0xb201, 0x15df: 0xb201, 0x15e0: 0xb201, 0x15e1: 0xb219, 0x15e2: 0xb219, 0x15e3: 0xb219, + 0x15e4: 0xb219, 0x15e5: 0xb231, 0x15e6: 0xb231, 0x15e7: 0xb231, 0x15e8: 0xb231, 0x15e9: 0xb249, + 0x15ea: 0xb249, 0x15eb: 0xb261, 0x15ec: 0xb261, 0x15ed: 0xb279, 0x15ee: 0xb279, 0x15ef: 0xb291, + 0x15f0: 0xb291, 0x15f1: 0xb2a9, 0x15f2: 0xb2a9, 0x15f3: 0xb2a9, 0x15f4: 0xb2a9, 0x15f5: 0xb2c1, + 0x15f6: 0xb2c1, 0x15f7: 0xb2c1, 0x15f8: 0xb2c1, 0x15f9: 0xb2d9, 0x15fa: 0xb2d9, 0x15fb: 0xb2d9, + 0x15fc: 0xb2d9, 0x15fd: 0xb2f1, 0x15fe: 0xb2f1, 0x15ff: 0xb2f1, + // Block 0x58, offset 0x1600 + 0x1600: 0xb2f1, 0x1601: 0xb309, 0x1602: 0xb309, 0x1603: 0xb309, 0x1604: 0xb309, 0x1605: 0xb321, + 0x1606: 0xb321, 0x1607: 0xb321, 0x1608: 0xb321, 0x1609: 0xb339, 0x160a: 0xb339, 0x160b: 0xb339, + 0x160c: 0xb339, 0x160d: 0xb351, 0x160e: 0xb351, 0x160f: 0xb351, 0x1610: 0xb351, 0x1611: 0xb369, + 0x1612: 0xb369, 0x1613: 0xb369, 0x1614: 0xb369, 0x1615: 0xb381, 0x1616: 0xb381, 0x1617: 0xb381, + 0x1618: 0xb381, 0x1619: 0xb399, 0x161a: 0xb399, 0x161b: 0xb399, 0x161c: 0xb399, 0x161d: 0xb3b1, + 0x161e: 0xb3b1, 0x161f: 0xb3b1, 0x1620: 0xb3b1, 0x1621: 0xb3c9, 0x1622: 0xb3c9, 0x1623: 0xb3c9, + 0x1624: 0xb3c9, 0x1625: 0xb3e1, 0x1626: 0xb3e1, 0x1627: 0xb3e1, 0x1628: 0xb3e1, 0x1629: 0xb3f9, + 0x162a: 0xb3f9, 0x162b: 0xb3f9, 0x162c: 0xb3f9, 0x162d: 0xb411, 0x162e: 0xb411, 0x162f: 0x7ab1, + 0x1630: 0x7ab1, 0x1631: 0xb429, 0x1632: 0xb429, 0x1633: 0xb429, 0x1634: 0xb429, 0x1635: 0xb441, + 0x1636: 0xb441, 0x1637: 0xb469, 0x1638: 0xb469, 0x1639: 0xb491, 0x163a: 0xb491, 0x163b: 0xb4b9, + 0x163c: 0xb4b9, 0x163d: 0x0040, 0x163e: 0x0040, 0x163f: 0x03c0, + // Block 0x59, offset 0x1640 + 0x1640: 0x0040, 0x1641: 0xaefa, 0x1642: 0xb4e2, 0x1643: 0xaf6a, 0x1644: 0xafda, 0x1645: 0xafea, + 0x1646: 0xaf7a, 0x1647: 0xb4f2, 0x1648: 0x1fd2, 0x1649: 0x1fe2, 0x164a: 0xaf8a, 0x164b: 0x1fb2, + 0x164c: 0xaeda, 0x164d: 0xaf99, 0x164e: 0x29d1, 0x164f: 0xb502, 0x1650: 0x1f41, 0x1651: 0x00c9, + 0x1652: 0x0069, 0x1653: 0x0079, 0x1654: 0x1f51, 0x1655: 0x1f61, 0x1656: 0x1f71, 0x1657: 0x1f81, + 0x1658: 0x1f91, 0x1659: 0x1fa1, 0x165a: 0xaeea, 0x165b: 0x03c2, 0x165c: 0xafaa, 0x165d: 0x1fc2, + 0x165e: 0xafba, 0x165f: 0xaf0a, 0x1660: 0xaffa, 0x1661: 0x0039, 0x1662: 0x0ee9, 0x1663: 0x1159, + 0x1664: 0x0ef9, 0x1665: 0x0f09, 0x1666: 0x1199, 0x1667: 0x0f31, 0x1668: 0x0249, 0x1669: 0x0f41, + 0x166a: 0x0259, 0x166b: 0x0f51, 0x166c: 0x0359, 0x166d: 0x0f61, 0x166e: 0x0f71, 0x166f: 0x00d9, + 0x1670: 0x0f99, 0x1671: 0x2039, 0x1672: 0x0269, 0x1673: 0x01d9, 0x1674: 0x0fa9, 0x1675: 0x0fb9, + 0x1676: 0x1089, 0x1677: 0x0279, 0x1678: 0x0369, 0x1679: 0x0289, 0x167a: 0x13d1, 0x167b: 0xaf4a, + 0x167c: 0xafca, 0x167d: 0xaf5a, 0x167e: 0xb512, 0x167f: 0xaf1a, + // Block 0x5a, offset 0x1680 + 0x1680: 0x1caa, 0x1681: 0x0039, 0x1682: 0x0ee9, 0x1683: 0x1159, 0x1684: 0x0ef9, 0x1685: 0x0f09, + 0x1686: 0x1199, 0x1687: 0x0f31, 0x1688: 0x0249, 0x1689: 0x0f41, 0x168a: 0x0259, 0x168b: 0x0f51, + 0x168c: 0x0359, 0x168d: 0x0f61, 0x168e: 0x0f71, 0x168f: 0x00d9, 0x1690: 0x0f99, 0x1691: 0x2039, + 0x1692: 0x0269, 0x1693: 0x01d9, 0x1694: 0x0fa9, 0x1695: 0x0fb9, 0x1696: 0x1089, 0x1697: 0x0279, + 0x1698: 0x0369, 0x1699: 0x0289, 0x169a: 0x13d1, 0x169b: 0xaf2a, 0x169c: 0xb522, 0x169d: 0xaf3a, + 0x169e: 0xb532, 0x169f: 0x80d5, 0x16a0: 0x80f5, 0x16a1: 0x29d1, 0x16a2: 0x8115, 0x16a3: 0x8115, + 0x16a4: 0x8135, 0x16a5: 0x8155, 0x16a6: 0x8175, 0x16a7: 0x8195, 0x16a8: 0x81b5, 0x16a9: 0x81d5, + 0x16aa: 0x81f5, 0x16ab: 0x8215, 0x16ac: 0x8235, 0x16ad: 0x8255, 0x16ae: 0x8275, 0x16af: 0x8295, + 0x16b0: 0x82b5, 0x16b1: 0x82d5, 0x16b2: 0x82f5, 0x16b3: 0x8315, 0x16b4: 0x8335, 0x16b5: 0x8355, + 0x16b6: 0x8375, 0x16b7: 0x8395, 0x16b8: 0x83b5, 0x16b9: 0x83d5, 0x16ba: 0x83f5, 0x16bb: 0x8415, + 0x16bc: 0x81b5, 0x16bd: 0x8435, 0x16be: 0x8455, 0x16bf: 0x8215, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x8475, 0x16c1: 0x8495, 0x16c2: 0x84b5, 0x16c3: 0x84d5, 0x16c4: 0x84f5, 0x16c5: 0x8515, + 0x16c6: 0x8535, 0x16c7: 0x8555, 0x16c8: 0x84d5, 0x16c9: 0x8575, 0x16ca: 0x84d5, 0x16cb: 0x8595, + 0x16cc: 0x8595, 0x16cd: 0x85b5, 0x16ce: 0x85b5, 0x16cf: 0x85d5, 0x16d0: 0x8515, 0x16d1: 0x85f5, + 0x16d2: 0x8615, 0x16d3: 0x85f5, 0x16d4: 0x8635, 0x16d5: 0x8615, 0x16d6: 0x8655, 0x16d7: 0x8655, + 0x16d8: 0x8675, 0x16d9: 0x8675, 0x16da: 0x8695, 0x16db: 0x8695, 0x16dc: 0x8615, 0x16dd: 0x8115, + 0x16de: 0x86b5, 0x16df: 0x86d5, 0x16e0: 0x0040, 0x16e1: 0x86f5, 0x16e2: 0x8715, 0x16e3: 0x8735, + 0x16e4: 0x8755, 0x16e5: 0x8735, 0x16e6: 0x8775, 0x16e7: 0x8795, 0x16e8: 0x87b5, 0x16e9: 0x87b5, + 0x16ea: 0x87d5, 0x16eb: 0x87d5, 0x16ec: 0x87f5, 0x16ed: 0x87f5, 0x16ee: 0x87d5, 0x16ef: 0x87d5, + 0x16f0: 0x8815, 0x16f1: 0x8835, 0x16f2: 0x8855, 0x16f3: 0x8875, 0x16f4: 0x8895, 0x16f5: 0x88b5, + 0x16f6: 0x88b5, 0x16f7: 0x88b5, 0x16f8: 0x88d5, 0x16f9: 0x88d5, 0x16fa: 0x88d5, 0x16fb: 0x88d5, + 0x16fc: 0x87b5, 0x16fd: 0x87b5, 0x16fe: 0x87b5, 0x16ff: 0x0040, + // Block 0x5c, offset 0x1700 + 0x1700: 0x0040, 0x1701: 0x0040, 0x1702: 0x8715, 0x1703: 0x86f5, 0x1704: 0x88f5, 0x1705: 0x86f5, + 0x1706: 0x8715, 0x1707: 0x86f5, 0x1708: 0x0040, 0x1709: 0x0040, 0x170a: 0x8915, 0x170b: 0x8715, + 0x170c: 0x8935, 0x170d: 0x88f5, 0x170e: 0x8935, 0x170f: 0x8715, 0x1710: 0x0040, 0x1711: 0x0040, + 0x1712: 0x8955, 0x1713: 0x8975, 0x1714: 0x8875, 0x1715: 0x8935, 0x1716: 0x88f5, 0x1717: 0x8935, + 0x1718: 0x0040, 0x1719: 0x0040, 0x171a: 0x8995, 0x171b: 0x89b5, 0x171c: 0x8995, 0x171d: 0x0040, + 0x171e: 0x0040, 0x171f: 0x0040, 0x1720: 0xb541, 0x1721: 0xb559, 0x1722: 0xb571, 0x1723: 0x89d6, + 0x1724: 0xb589, 0x1725: 0xb5a1, 0x1726: 0x89f5, 0x1727: 0x0040, 0x1728: 0x8a15, 0x1729: 0x8a35, + 0x172a: 0x8a55, 0x172b: 0x8a35, 0x172c: 0x8a75, 0x172d: 0x8a95, 0x172e: 0x8ab5, 0x172f: 0x0040, + 0x1730: 0x0040, 0x1731: 0x0040, 0x1732: 0x0040, 0x1733: 0x0040, 0x1734: 0x0040, 0x1735: 0x0040, + 0x1736: 0x0040, 0x1737: 0x0040, 0x1738: 0x0040, 0x1739: 0x0340, 0x173a: 0x0340, 0x173b: 0x0340, + 0x173c: 0x0040, 0x173d: 0x0040, 0x173e: 0x0040, 0x173f: 0x0040, + // Block 0x5d, offset 0x1740 + 0x1740: 0x0a08, 0x1741: 0x0a08, 0x1742: 0x0a08, 0x1743: 0x0a08, 0x1744: 0x0a08, 0x1745: 0x0c08, + 0x1746: 0x0808, 0x1747: 0x0c08, 0x1748: 0x0818, 0x1749: 0x0c08, 0x174a: 0x0c08, 0x174b: 0x0808, + 0x174c: 0x0808, 0x174d: 0x0908, 0x174e: 0x0c08, 0x174f: 0x0c08, 0x1750: 0x0c08, 0x1751: 0x0c08, + 0x1752: 0x0c08, 0x1753: 0x0a08, 0x1754: 0x0a08, 0x1755: 0x0a08, 0x1756: 0x0a08, 0x1757: 0x0908, + 0x1758: 0x0a08, 0x1759: 0x0a08, 0x175a: 0x0a08, 0x175b: 0x0a08, 0x175c: 0x0a08, 0x175d: 0x0c08, + 0x175e: 0x0a08, 0x175f: 0x0a08, 0x1760: 0x0a08, 0x1761: 0x0c08, 0x1762: 0x0808, 0x1763: 0x0808, + 0x1764: 0x0c08, 0x1765: 0x3308, 0x1766: 0x3308, 0x1767: 0x0040, 0x1768: 0x0040, 0x1769: 0x0040, + 0x176a: 0x0040, 0x176b: 0x0a18, 0x176c: 0x0a18, 0x176d: 0x0a18, 0x176e: 0x0a18, 0x176f: 0x0c18, + 0x1770: 0x0818, 0x1771: 0x0818, 0x1772: 0x0818, 0x1773: 0x0818, 0x1774: 0x0818, 0x1775: 0x0818, + 0x1776: 0x0818, 0x1777: 0x0040, 0x1778: 0x0040, 0x1779: 0x0040, 0x177a: 0x0040, 0x177b: 0x0040, + 0x177c: 0x0040, 0x177d: 0x0040, 0x177e: 0x0040, 0x177f: 0x0040, + // Block 0x5e, offset 0x1780 + 0x1780: 0x0a08, 0x1781: 0x0c08, 0x1782: 0x0a08, 0x1783: 0x0c08, 0x1784: 0x0c08, 0x1785: 0x0c08, + 0x1786: 0x0a08, 0x1787: 0x0a08, 0x1788: 0x0a08, 0x1789: 0x0c08, 0x178a: 0x0a08, 0x178b: 0x0a08, + 0x178c: 0x0c08, 0x178d: 0x0a08, 0x178e: 0x0c08, 0x178f: 0x0c08, 0x1790: 0x0a08, 0x1791: 0x0c08, + 0x1792: 0x0040, 0x1793: 0x0040, 0x1794: 0x0040, 0x1795: 0x0040, 0x1796: 0x0040, 0x1797: 0x0040, + 0x1798: 0x0040, 0x1799: 0x0818, 0x179a: 0x0818, 0x179b: 0x0818, 0x179c: 0x0818, 0x179d: 0x0040, + 0x179e: 0x0040, 0x179f: 0x0040, 0x17a0: 0x0040, 0x17a1: 0x0040, 0x17a2: 0x0040, 0x17a3: 0x0040, + 0x17a4: 0x0040, 0x17a5: 0x0040, 0x17a6: 0x0040, 0x17a7: 0x0040, 0x17a8: 0x0040, 0x17a9: 0x0c18, + 0x17aa: 0x0c18, 0x17ab: 0x0c18, 0x17ac: 0x0c18, 0x17ad: 0x0a18, 0x17ae: 0x0a18, 0x17af: 0x0818, + 0x17b0: 0x0040, 0x17b1: 0x0040, 0x17b2: 0x0040, 0x17b3: 0x0040, 0x17b4: 0x0040, 0x17b5: 0x0040, + 0x17b6: 0x0040, 0x17b7: 0x0040, 0x17b8: 0x0040, 0x17b9: 0x0040, 0x17ba: 0x0040, 0x17bb: 0x0040, + 0x17bc: 0x0040, 0x17bd: 0x0040, 0x17be: 0x0040, 0x17bf: 0x0040, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x3308, 0x17c1: 0x3308, 0x17c2: 0x3008, 0x17c3: 0x3008, 0x17c4: 0x0040, 0x17c5: 0x0008, + 0x17c6: 0x0008, 0x17c7: 0x0008, 0x17c8: 0x0008, 0x17c9: 0x0008, 0x17ca: 0x0008, 0x17cb: 0x0008, + 0x17cc: 0x0008, 0x17cd: 0x0040, 0x17ce: 0x0040, 0x17cf: 0x0008, 0x17d0: 0x0008, 0x17d1: 0x0040, + 0x17d2: 0x0040, 0x17d3: 0x0008, 0x17d4: 0x0008, 0x17d5: 0x0008, 0x17d6: 0x0008, 0x17d7: 0x0008, + 0x17d8: 0x0008, 0x17d9: 0x0008, 0x17da: 0x0008, 0x17db: 0x0008, 0x17dc: 0x0008, 0x17dd: 0x0008, + 0x17de: 0x0008, 0x17df: 0x0008, 0x17e0: 0x0008, 0x17e1: 0x0008, 0x17e2: 0x0008, 0x17e3: 0x0008, + 0x17e4: 0x0008, 0x17e5: 0x0008, 0x17e6: 0x0008, 0x17e7: 0x0008, 0x17e8: 0x0008, 0x17e9: 0x0040, + 0x17ea: 0x0008, 0x17eb: 0x0008, 0x17ec: 0x0008, 0x17ed: 0x0008, 0x17ee: 0x0008, 0x17ef: 0x0008, + 0x17f0: 0x0008, 0x17f1: 0x0040, 0x17f2: 0x0008, 0x17f3: 0x0008, 0x17f4: 0x0040, 0x17f5: 0x0008, + 0x17f6: 0x0008, 0x17f7: 0x0008, 0x17f8: 0x0008, 0x17f9: 0x0008, 0x17fa: 0x0040, 0x17fb: 0x3308, + 0x17fc: 0x3308, 0x17fd: 0x0008, 0x17fe: 0x3008, 0x17ff: 0x3008, + // Block 0x60, offset 0x1800 + 0x1800: 0x3308, 0x1801: 0x3008, 0x1802: 0x3008, 0x1803: 0x3008, 0x1804: 0x3008, 0x1805: 0x0040, + 0x1806: 0x0040, 0x1807: 0x3008, 0x1808: 0x3008, 0x1809: 0x0040, 0x180a: 0x0040, 0x180b: 0x3008, + 0x180c: 0x3008, 0x180d: 0x3808, 0x180e: 0x0040, 0x180f: 0x0040, 0x1810: 0x0008, 0x1811: 0x0040, + 0x1812: 0x0040, 0x1813: 0x0040, 0x1814: 0x0040, 0x1815: 0x0040, 0x1816: 0x0040, 0x1817: 0x3008, + 0x1818: 0x0040, 0x1819: 0x0040, 0x181a: 0x0040, 0x181b: 0x0040, 0x181c: 0x0040, 0x181d: 0x0008, + 0x181e: 0x0008, 0x181f: 0x0008, 0x1820: 0x0008, 0x1821: 0x0008, 0x1822: 0x3008, 0x1823: 0x3008, + 0x1824: 0x0040, 0x1825: 0x0040, 0x1826: 0x3308, 0x1827: 0x3308, 0x1828: 0x3308, 0x1829: 0x3308, + 0x182a: 0x3308, 0x182b: 0x3308, 0x182c: 0x3308, 0x182d: 0x0040, 0x182e: 0x0040, 0x182f: 0x0040, + 0x1830: 0x3308, 0x1831: 0x3308, 0x1832: 0x3308, 0x1833: 0x3308, 0x1834: 0x3308, 0x1835: 0x0040, + 0x1836: 0x0040, 0x1837: 0x0040, 0x1838: 0x0040, 0x1839: 0x0040, 0x183a: 0x0040, 0x183b: 0x0040, + 0x183c: 0x0040, 0x183d: 0x0040, 0x183e: 0x0040, 0x183f: 0x0040, + // Block 0x61, offset 0x1840 + 0x1840: 0x0039, 0x1841: 0x0ee9, 0x1842: 0x1159, 0x1843: 0x0ef9, 0x1844: 0x0f09, 0x1845: 0x1199, + 0x1846: 0x0f31, 0x1847: 0x0249, 0x1848: 0x0f41, 0x1849: 0x0259, 0x184a: 0x0f51, 0x184b: 0x0359, + 0x184c: 0x0f61, 0x184d: 0x0f71, 0x184e: 0x00d9, 0x184f: 0x0f99, 0x1850: 0x2039, 0x1851: 0x0269, + 0x1852: 0x01d9, 0x1853: 0x0fa9, 0x1854: 0x0fb9, 0x1855: 0x1089, 0x1856: 0x0279, 0x1857: 0x0369, + 0x1858: 0x0289, 0x1859: 0x13d1, 0x185a: 0x0039, 0x185b: 0x0ee9, 0x185c: 0x1159, 0x185d: 0x0ef9, + 0x185e: 0x0f09, 0x185f: 0x1199, 0x1860: 0x0f31, 0x1861: 0x0249, 0x1862: 0x0f41, 0x1863: 0x0259, + 0x1864: 0x0f51, 0x1865: 0x0359, 0x1866: 0x0f61, 0x1867: 0x0f71, 0x1868: 0x00d9, 0x1869: 0x0f99, + 0x186a: 0x2039, 0x186b: 0x0269, 0x186c: 0x01d9, 0x186d: 0x0fa9, 0x186e: 0x0fb9, 0x186f: 0x1089, + 0x1870: 0x0279, 0x1871: 0x0369, 0x1872: 0x0289, 0x1873: 0x13d1, 0x1874: 0x0039, 0x1875: 0x0ee9, + 0x1876: 0x1159, 0x1877: 0x0ef9, 0x1878: 0x0f09, 0x1879: 0x1199, 0x187a: 0x0f31, 0x187b: 0x0249, + 0x187c: 0x0f41, 0x187d: 0x0259, 0x187e: 0x0f51, 0x187f: 0x0359, + // Block 0x62, offset 0x1880 + 0x1880: 0x0f61, 0x1881: 0x0f71, 0x1882: 0x00d9, 0x1883: 0x0f99, 0x1884: 0x2039, 0x1885: 0x0269, + 0x1886: 0x01d9, 0x1887: 0x0fa9, 0x1888: 0x0fb9, 0x1889: 0x1089, 0x188a: 0x0279, 0x188b: 0x0369, + 0x188c: 0x0289, 0x188d: 0x13d1, 0x188e: 0x0039, 0x188f: 0x0ee9, 0x1890: 0x1159, 0x1891: 0x0ef9, + 0x1892: 0x0f09, 0x1893: 0x1199, 0x1894: 0x0f31, 0x1895: 0x0040, 0x1896: 0x0f41, 0x1897: 0x0259, + 0x1898: 0x0f51, 0x1899: 0x0359, 0x189a: 0x0f61, 0x189b: 0x0f71, 0x189c: 0x00d9, 0x189d: 0x0f99, + 0x189e: 0x2039, 0x189f: 0x0269, 0x18a0: 0x01d9, 0x18a1: 0x0fa9, 0x18a2: 0x0fb9, 0x18a3: 0x1089, + 0x18a4: 0x0279, 0x18a5: 0x0369, 0x18a6: 0x0289, 0x18a7: 0x13d1, 0x18a8: 0x0039, 0x18a9: 0x0ee9, + 0x18aa: 0x1159, 0x18ab: 0x0ef9, 0x18ac: 0x0f09, 0x18ad: 0x1199, 0x18ae: 0x0f31, 0x18af: 0x0249, + 0x18b0: 0x0f41, 0x18b1: 0x0259, 0x18b2: 0x0f51, 0x18b3: 0x0359, 0x18b4: 0x0f61, 0x18b5: 0x0f71, + 0x18b6: 0x00d9, 0x18b7: 0x0f99, 0x18b8: 0x2039, 0x18b9: 0x0269, 0x18ba: 0x01d9, 0x18bb: 0x0fa9, + 0x18bc: 0x0fb9, 0x18bd: 0x1089, 0x18be: 0x0279, 0x18bf: 0x0369, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x0289, 0x18c1: 0x13d1, 0x18c2: 0x0039, 0x18c3: 0x0ee9, 0x18c4: 0x1159, 0x18c5: 0x0ef9, + 0x18c6: 0x0f09, 0x18c7: 0x1199, 0x18c8: 0x0f31, 0x18c9: 0x0249, 0x18ca: 0x0f41, 0x18cb: 0x0259, + 0x18cc: 0x0f51, 0x18cd: 0x0359, 0x18ce: 0x0f61, 0x18cf: 0x0f71, 0x18d0: 0x00d9, 0x18d1: 0x0f99, + 0x18d2: 0x2039, 0x18d3: 0x0269, 0x18d4: 0x01d9, 0x18d5: 0x0fa9, 0x18d6: 0x0fb9, 0x18d7: 0x1089, + 0x18d8: 0x0279, 0x18d9: 0x0369, 0x18da: 0x0289, 0x18db: 0x13d1, 0x18dc: 0x0039, 0x18dd: 0x0040, + 0x18de: 0x1159, 0x18df: 0x0ef9, 0x18e0: 0x0040, 0x18e1: 0x0040, 0x18e2: 0x0f31, 0x18e3: 0x0040, + 0x18e4: 0x0040, 0x18e5: 0x0259, 0x18e6: 0x0f51, 0x18e7: 0x0040, 0x18e8: 0x0040, 0x18e9: 0x0f71, + 0x18ea: 0x00d9, 0x18eb: 0x0f99, 0x18ec: 0x2039, 0x18ed: 0x0040, 0x18ee: 0x01d9, 0x18ef: 0x0fa9, + 0x18f0: 0x0fb9, 0x18f1: 0x1089, 0x18f2: 0x0279, 0x18f3: 0x0369, 0x18f4: 0x0289, 0x18f5: 0x13d1, + 0x18f6: 0x0039, 0x18f7: 0x0ee9, 0x18f8: 0x1159, 0x18f9: 0x0ef9, 0x18fa: 0x0040, 0x18fb: 0x1199, + 0x18fc: 0x0040, 0x18fd: 0x0249, 0x18fe: 0x0f41, 0x18ff: 0x0259, + // Block 0x64, offset 0x1900 + 0x1900: 0x0f51, 0x1901: 0x0359, 0x1902: 0x0f61, 0x1903: 0x0f71, 0x1904: 0x0040, 0x1905: 0x0f99, + 0x1906: 0x2039, 0x1907: 0x0269, 0x1908: 0x01d9, 0x1909: 0x0fa9, 0x190a: 0x0fb9, 0x190b: 0x1089, + 0x190c: 0x0279, 0x190d: 0x0369, 0x190e: 0x0289, 0x190f: 0x13d1, 0x1910: 0x0039, 0x1911: 0x0ee9, + 0x1912: 0x1159, 0x1913: 0x0ef9, 0x1914: 0x0f09, 0x1915: 0x1199, 0x1916: 0x0f31, 0x1917: 0x0249, + 0x1918: 0x0f41, 0x1919: 0x0259, 0x191a: 0x0f51, 0x191b: 0x0359, 0x191c: 0x0f61, 0x191d: 0x0f71, + 0x191e: 0x00d9, 0x191f: 0x0f99, 0x1920: 0x2039, 0x1921: 0x0269, 0x1922: 0x01d9, 0x1923: 0x0fa9, + 0x1924: 0x0fb9, 0x1925: 0x1089, 0x1926: 0x0279, 0x1927: 0x0369, 0x1928: 0x0289, 0x1929: 0x13d1, + 0x192a: 0x0039, 0x192b: 0x0ee9, 0x192c: 0x1159, 0x192d: 0x0ef9, 0x192e: 0x0f09, 0x192f: 0x1199, + 0x1930: 0x0f31, 0x1931: 0x0249, 0x1932: 0x0f41, 0x1933: 0x0259, 0x1934: 0x0f51, 0x1935: 0x0359, + 0x1936: 0x0f61, 0x1937: 0x0f71, 0x1938: 0x00d9, 0x1939: 0x0f99, 0x193a: 0x2039, 0x193b: 0x0269, + 0x193c: 0x01d9, 0x193d: 0x0fa9, 0x193e: 0x0fb9, 0x193f: 0x1089, + // Block 0x65, offset 0x1940 + 0x1940: 0x0279, 0x1941: 0x0369, 0x1942: 0x0289, 0x1943: 0x13d1, 0x1944: 0x0039, 0x1945: 0x0ee9, + 0x1946: 0x0040, 0x1947: 0x0ef9, 0x1948: 0x0f09, 0x1949: 0x1199, 0x194a: 0x0f31, 0x194b: 0x0040, + 0x194c: 0x0040, 0x194d: 0x0259, 0x194e: 0x0f51, 0x194f: 0x0359, 0x1950: 0x0f61, 0x1951: 0x0f71, + 0x1952: 0x00d9, 0x1953: 0x0f99, 0x1954: 0x2039, 0x1955: 0x0040, 0x1956: 0x01d9, 0x1957: 0x0fa9, + 0x1958: 0x0fb9, 0x1959: 0x1089, 0x195a: 0x0279, 0x195b: 0x0369, 0x195c: 0x0289, 0x195d: 0x0040, + 0x195e: 0x0039, 0x195f: 0x0ee9, 0x1960: 0x1159, 0x1961: 0x0ef9, 0x1962: 0x0f09, 0x1963: 0x1199, + 0x1964: 0x0f31, 0x1965: 0x0249, 0x1966: 0x0f41, 0x1967: 0x0259, 0x1968: 0x0f51, 0x1969: 0x0359, + 0x196a: 0x0f61, 0x196b: 0x0f71, 0x196c: 0x00d9, 0x196d: 0x0f99, 0x196e: 0x2039, 0x196f: 0x0269, + 0x1970: 0x01d9, 0x1971: 0x0fa9, 0x1972: 0x0fb9, 0x1973: 0x1089, 0x1974: 0x0279, 0x1975: 0x0369, + 0x1976: 0x0289, 0x1977: 0x13d1, 0x1978: 0x0039, 0x1979: 0x0ee9, 0x197a: 0x0040, 0x197b: 0x0ef9, + 0x197c: 0x0f09, 0x197d: 0x1199, 0x197e: 0x0f31, 0x197f: 0x0040, + // Block 0x66, offset 0x1980 + 0x1980: 0x0f41, 0x1981: 0x0259, 0x1982: 0x0f51, 0x1983: 0x0359, 0x1984: 0x0f61, 0x1985: 0x0040, + 0x1986: 0x00d9, 0x1987: 0x0040, 0x1988: 0x0040, 0x1989: 0x0040, 0x198a: 0x01d9, 0x198b: 0x0fa9, + 0x198c: 0x0fb9, 0x198d: 0x1089, 0x198e: 0x0279, 0x198f: 0x0369, 0x1990: 0x0289, 0x1991: 0x0040, + 0x1992: 0x0039, 0x1993: 0x0ee9, 0x1994: 0x1159, 0x1995: 0x0ef9, 0x1996: 0x0f09, 0x1997: 0x1199, + 0x1998: 0x0f31, 0x1999: 0x0249, 0x199a: 0x0f41, 0x199b: 0x0259, 0x199c: 0x0f51, 0x199d: 0x0359, + 0x199e: 0x0f61, 0x199f: 0x0f71, 0x19a0: 0x00d9, 0x19a1: 0x0f99, 0x19a2: 0x2039, 0x19a3: 0x0269, + 0x19a4: 0x01d9, 0x19a5: 0x0fa9, 0x19a6: 0x0fb9, 0x19a7: 0x1089, 0x19a8: 0x0279, 0x19a9: 0x0369, + 0x19aa: 0x0289, 0x19ab: 0x13d1, 0x19ac: 0x0039, 0x19ad: 0x0ee9, 0x19ae: 0x1159, 0x19af: 0x0ef9, + 0x19b0: 0x0f09, 0x19b1: 0x1199, 0x19b2: 0x0f31, 0x19b3: 0x0249, 0x19b4: 0x0f41, 0x19b5: 0x0259, + 0x19b6: 0x0f51, 0x19b7: 0x0359, 0x19b8: 0x0f61, 0x19b9: 0x0f71, 0x19ba: 0x00d9, 0x19bb: 0x0f99, + 0x19bc: 0x2039, 0x19bd: 0x0269, 0x19be: 0x01d9, 0x19bf: 0x0fa9, + // Block 0x67, offset 0x19c0 + 0x19c0: 0x0fb9, 0x19c1: 0x1089, 0x19c2: 0x0279, 0x19c3: 0x0369, 0x19c4: 0x0289, 0x19c5: 0x13d1, + 0x19c6: 0x0039, 0x19c7: 0x0ee9, 0x19c8: 0x1159, 0x19c9: 0x0ef9, 0x19ca: 0x0f09, 0x19cb: 0x1199, + 0x19cc: 0x0f31, 0x19cd: 0x0249, 0x19ce: 0x0f41, 0x19cf: 0x0259, 0x19d0: 0x0f51, 0x19d1: 0x0359, + 0x19d2: 0x0f61, 0x19d3: 0x0f71, 0x19d4: 0x00d9, 0x19d5: 0x0f99, 0x19d6: 0x2039, 0x19d7: 0x0269, + 0x19d8: 0x01d9, 0x19d9: 0x0fa9, 0x19da: 0x0fb9, 0x19db: 0x1089, 0x19dc: 0x0279, 0x19dd: 0x0369, + 0x19de: 0x0289, 0x19df: 0x13d1, 0x19e0: 0x0039, 0x19e1: 0x0ee9, 0x19e2: 0x1159, 0x19e3: 0x0ef9, + 0x19e4: 0x0f09, 0x19e5: 0x1199, 0x19e6: 0x0f31, 0x19e7: 0x0249, 0x19e8: 0x0f41, 0x19e9: 0x0259, + 0x19ea: 0x0f51, 0x19eb: 0x0359, 0x19ec: 0x0f61, 0x19ed: 0x0f71, 0x19ee: 0x00d9, 0x19ef: 0x0f99, + 0x19f0: 0x2039, 0x19f1: 0x0269, 0x19f2: 0x01d9, 0x19f3: 0x0fa9, 0x19f4: 0x0fb9, 0x19f5: 0x1089, + 0x19f6: 0x0279, 0x19f7: 0x0369, 0x19f8: 0x0289, 0x19f9: 0x13d1, 0x19fa: 0x0039, 0x19fb: 0x0ee9, + 0x19fc: 0x1159, 0x19fd: 0x0ef9, 0x19fe: 0x0f09, 0x19ff: 0x1199, + // Block 0x68, offset 0x1a00 + 0x1a00: 0x0f31, 0x1a01: 0x0249, 0x1a02: 0x0f41, 0x1a03: 0x0259, 0x1a04: 0x0f51, 0x1a05: 0x0359, + 0x1a06: 0x0f61, 0x1a07: 0x0f71, 0x1a08: 0x00d9, 0x1a09: 0x0f99, 0x1a0a: 0x2039, 0x1a0b: 0x0269, + 0x1a0c: 0x01d9, 0x1a0d: 0x0fa9, 0x1a0e: 0x0fb9, 0x1a0f: 0x1089, 0x1a10: 0x0279, 0x1a11: 0x0369, + 0x1a12: 0x0289, 0x1a13: 0x13d1, 0x1a14: 0x0039, 0x1a15: 0x0ee9, 0x1a16: 0x1159, 0x1a17: 0x0ef9, + 0x1a18: 0x0f09, 0x1a19: 0x1199, 0x1a1a: 0x0f31, 0x1a1b: 0x0249, 0x1a1c: 0x0f41, 0x1a1d: 0x0259, + 0x1a1e: 0x0f51, 0x1a1f: 0x0359, 0x1a20: 0x0f61, 0x1a21: 0x0f71, 0x1a22: 0x00d9, 0x1a23: 0x0f99, + 0x1a24: 0x2039, 0x1a25: 0x0269, 0x1a26: 0x01d9, 0x1a27: 0x0fa9, 0x1a28: 0x0fb9, 0x1a29: 0x1089, + 0x1a2a: 0x0279, 0x1a2b: 0x0369, 0x1a2c: 0x0289, 0x1a2d: 0x13d1, 0x1a2e: 0x0039, 0x1a2f: 0x0ee9, + 0x1a30: 0x1159, 0x1a31: 0x0ef9, 0x1a32: 0x0f09, 0x1a33: 0x1199, 0x1a34: 0x0f31, 0x1a35: 0x0249, + 0x1a36: 0x0f41, 0x1a37: 0x0259, 0x1a38: 0x0f51, 0x1a39: 0x0359, 0x1a3a: 0x0f61, 0x1a3b: 0x0f71, + 0x1a3c: 0x00d9, 0x1a3d: 0x0f99, 0x1a3e: 0x2039, 0x1a3f: 0x0269, + // Block 0x69, offset 0x1a40 + 0x1a40: 0x01d9, 0x1a41: 0x0fa9, 0x1a42: 0x0fb9, 0x1a43: 0x1089, 0x1a44: 0x0279, 0x1a45: 0x0369, + 0x1a46: 0x0289, 0x1a47: 0x13d1, 0x1a48: 0x0039, 0x1a49: 0x0ee9, 0x1a4a: 0x1159, 0x1a4b: 0x0ef9, + 0x1a4c: 0x0f09, 0x1a4d: 0x1199, 0x1a4e: 0x0f31, 0x1a4f: 0x0249, 0x1a50: 0x0f41, 0x1a51: 0x0259, + 0x1a52: 0x0f51, 0x1a53: 0x0359, 0x1a54: 0x0f61, 0x1a55: 0x0f71, 0x1a56: 0x00d9, 0x1a57: 0x0f99, + 0x1a58: 0x2039, 0x1a59: 0x0269, 0x1a5a: 0x01d9, 0x1a5b: 0x0fa9, 0x1a5c: 0x0fb9, 0x1a5d: 0x1089, + 0x1a5e: 0x0279, 0x1a5f: 0x0369, 0x1a60: 0x0289, 0x1a61: 0x13d1, 0x1a62: 0x0039, 0x1a63: 0x0ee9, + 0x1a64: 0x1159, 0x1a65: 0x0ef9, 0x1a66: 0x0f09, 0x1a67: 0x1199, 0x1a68: 0x0f31, 0x1a69: 0x0249, + 0x1a6a: 0x0f41, 0x1a6b: 0x0259, 0x1a6c: 0x0f51, 0x1a6d: 0x0359, 0x1a6e: 0x0f61, 0x1a6f: 0x0f71, + 0x1a70: 0x00d9, 0x1a71: 0x0f99, 0x1a72: 0x2039, 0x1a73: 0x0269, 0x1a74: 0x01d9, 0x1a75: 0x0fa9, + 0x1a76: 0x0fb9, 0x1a77: 0x1089, 0x1a78: 0x0279, 0x1a79: 0x0369, 0x1a7a: 0x0289, 0x1a7b: 0x13d1, + 0x1a7c: 0x0039, 0x1a7d: 0x0ee9, 0x1a7e: 0x1159, 0x1a7f: 0x0ef9, + // Block 0x6a, offset 0x1a80 + 0x1a80: 0x0f09, 0x1a81: 0x1199, 0x1a82: 0x0f31, 0x1a83: 0x0249, 0x1a84: 0x0f41, 0x1a85: 0x0259, + 0x1a86: 0x0f51, 0x1a87: 0x0359, 0x1a88: 0x0f61, 0x1a89: 0x0f71, 0x1a8a: 0x00d9, 0x1a8b: 0x0f99, + 0x1a8c: 0x2039, 0x1a8d: 0x0269, 0x1a8e: 0x01d9, 0x1a8f: 0x0fa9, 0x1a90: 0x0fb9, 0x1a91: 0x1089, + 0x1a92: 0x0279, 0x1a93: 0x0369, 0x1a94: 0x0289, 0x1a95: 0x13d1, 0x1a96: 0x0039, 0x1a97: 0x0ee9, + 0x1a98: 0x1159, 0x1a99: 0x0ef9, 0x1a9a: 0x0f09, 0x1a9b: 0x1199, 0x1a9c: 0x0f31, 0x1a9d: 0x0249, + 0x1a9e: 0x0f41, 0x1a9f: 0x0259, 0x1aa0: 0x0f51, 0x1aa1: 0x0359, 0x1aa2: 0x0f61, 0x1aa3: 0x0f71, + 0x1aa4: 0x00d9, 0x1aa5: 0x0f99, 0x1aa6: 0x2039, 0x1aa7: 0x0269, 0x1aa8: 0x01d9, 0x1aa9: 0x0fa9, + 0x1aaa: 0x0fb9, 0x1aab: 0x1089, 0x1aac: 0x0279, 0x1aad: 0x0369, 0x1aae: 0x0289, 0x1aaf: 0x13d1, + 0x1ab0: 0x0039, 0x1ab1: 0x0ee9, 0x1ab2: 0x1159, 0x1ab3: 0x0ef9, 0x1ab4: 0x0f09, 0x1ab5: 0x1199, + 0x1ab6: 0x0f31, 0x1ab7: 0x0249, 0x1ab8: 0x0f41, 0x1ab9: 0x0259, 0x1aba: 0x0f51, 0x1abb: 0x0359, + 0x1abc: 0x0f61, 0x1abd: 0x0f71, 0x1abe: 0x00d9, 0x1abf: 0x0f99, + // Block 0x6b, offset 0x1ac0 + 0x1ac0: 0x2039, 0x1ac1: 0x0269, 0x1ac2: 0x01d9, 0x1ac3: 0x0fa9, 0x1ac4: 0x0fb9, 0x1ac5: 0x1089, + 0x1ac6: 0x0279, 0x1ac7: 0x0369, 0x1ac8: 0x0289, 0x1ac9: 0x13d1, 0x1aca: 0x0039, 0x1acb: 0x0ee9, + 0x1acc: 0x1159, 0x1acd: 0x0ef9, 0x1ace: 0x0f09, 0x1acf: 0x1199, 0x1ad0: 0x0f31, 0x1ad1: 0x0249, + 0x1ad2: 0x0f41, 0x1ad3: 0x0259, 0x1ad4: 0x0f51, 0x1ad5: 0x0359, 0x1ad6: 0x0f61, 0x1ad7: 0x0f71, + 0x1ad8: 0x00d9, 0x1ad9: 0x0f99, 0x1ada: 0x2039, 0x1adb: 0x0269, 0x1adc: 0x01d9, 0x1add: 0x0fa9, + 0x1ade: 0x0fb9, 0x1adf: 0x1089, 0x1ae0: 0x0279, 0x1ae1: 0x0369, 0x1ae2: 0x0289, 0x1ae3: 0x13d1, + 0x1ae4: 0xba81, 0x1ae5: 0xba99, 0x1ae6: 0x0040, 0x1ae7: 0x0040, 0x1ae8: 0xbab1, 0x1ae9: 0x1099, + 0x1aea: 0x10b1, 0x1aeb: 0x10c9, 0x1aec: 0xbac9, 0x1aed: 0xbae1, 0x1aee: 0xbaf9, 0x1aef: 0x1429, + 0x1af0: 0x1a31, 0x1af1: 0xbb11, 0x1af2: 0xbb29, 0x1af3: 0xbb41, 0x1af4: 0xbb59, 0x1af5: 0xbb71, + 0x1af6: 0xbb89, 0x1af7: 0x2109, 0x1af8: 0x1111, 0x1af9: 0x1429, 0x1afa: 0xbba1, 0x1afb: 0xbbb9, + 0x1afc: 0xbbd1, 0x1afd: 0x10e1, 0x1afe: 0x10f9, 0x1aff: 0xbbe9, + // Block 0x6c, offset 0x1b00 + 0x1b00: 0x2079, 0x1b01: 0xbc01, 0x1b02: 0xbab1, 0x1b03: 0x1099, 0x1b04: 0x10b1, 0x1b05: 0x10c9, + 0x1b06: 0xbac9, 0x1b07: 0xbae1, 0x1b08: 0xbaf9, 0x1b09: 0x1429, 0x1b0a: 0x1a31, 0x1b0b: 0xbb11, + 0x1b0c: 0xbb29, 0x1b0d: 0xbb41, 0x1b0e: 0xbb59, 0x1b0f: 0xbb71, 0x1b10: 0xbb89, 0x1b11: 0x2109, + 0x1b12: 0x1111, 0x1b13: 0xbba1, 0x1b14: 0xbba1, 0x1b15: 0xbbb9, 0x1b16: 0xbbd1, 0x1b17: 0x10e1, + 0x1b18: 0x10f9, 0x1b19: 0xbbe9, 0x1b1a: 0x2079, 0x1b1b: 0xbc21, 0x1b1c: 0xbac9, 0x1b1d: 0x1429, + 0x1b1e: 0xbb11, 0x1b1f: 0x10e1, 0x1b20: 0x1111, 0x1b21: 0x2109, 0x1b22: 0xbab1, 0x1b23: 0x1099, + 0x1b24: 0x10b1, 0x1b25: 0x10c9, 0x1b26: 0xbac9, 0x1b27: 0xbae1, 0x1b28: 0xbaf9, 0x1b29: 0x1429, + 0x1b2a: 0x1a31, 0x1b2b: 0xbb11, 0x1b2c: 0xbb29, 0x1b2d: 0xbb41, 0x1b2e: 0xbb59, 0x1b2f: 0xbb71, + 0x1b30: 0xbb89, 0x1b31: 0x2109, 0x1b32: 0x1111, 0x1b33: 0x1429, 0x1b34: 0xbba1, 0x1b35: 0xbbb9, + 0x1b36: 0xbbd1, 0x1b37: 0x10e1, 0x1b38: 0x10f9, 0x1b39: 0xbbe9, 0x1b3a: 0x2079, 0x1b3b: 0xbc01, + 0x1b3c: 0xbab1, 0x1b3d: 0x1099, 0x1b3e: 0x10b1, 0x1b3f: 0x10c9, + // Block 0x6d, offset 0x1b40 + 0x1b40: 0xbac9, 0x1b41: 0xbae1, 0x1b42: 0xbaf9, 0x1b43: 0x1429, 0x1b44: 0x1a31, 0x1b45: 0xbb11, + 0x1b46: 0xbb29, 0x1b47: 0xbb41, 0x1b48: 0xbb59, 0x1b49: 0xbb71, 0x1b4a: 0xbb89, 0x1b4b: 0x2109, + 0x1b4c: 0x1111, 0x1b4d: 0xbba1, 0x1b4e: 0xbba1, 0x1b4f: 0xbbb9, 0x1b50: 0xbbd1, 0x1b51: 0x10e1, + 0x1b52: 0x10f9, 0x1b53: 0xbbe9, 0x1b54: 0x2079, 0x1b55: 0xbc21, 0x1b56: 0xbac9, 0x1b57: 0x1429, + 0x1b58: 0xbb11, 0x1b59: 0x10e1, 0x1b5a: 0x1111, 0x1b5b: 0x2109, 0x1b5c: 0xbab1, 0x1b5d: 0x1099, + 0x1b5e: 0x10b1, 0x1b5f: 0x10c9, 0x1b60: 0xbac9, 0x1b61: 0xbae1, 0x1b62: 0xbaf9, 0x1b63: 0x1429, + 0x1b64: 0x1a31, 0x1b65: 0xbb11, 0x1b66: 0xbb29, 0x1b67: 0xbb41, 0x1b68: 0xbb59, 0x1b69: 0xbb71, + 0x1b6a: 0xbb89, 0x1b6b: 0x2109, 0x1b6c: 0x1111, 0x1b6d: 0x1429, 0x1b6e: 0xbba1, 0x1b6f: 0xbbb9, + 0x1b70: 0xbbd1, 0x1b71: 0x10e1, 0x1b72: 0x10f9, 0x1b73: 0xbbe9, 0x1b74: 0x2079, 0x1b75: 0xbc01, + 0x1b76: 0xbab1, 0x1b77: 0x1099, 0x1b78: 0x10b1, 0x1b79: 0x10c9, 0x1b7a: 0xbac9, 0x1b7b: 0xbae1, + 0x1b7c: 0xbaf9, 0x1b7d: 0x1429, 0x1b7e: 0x1a31, 0x1b7f: 0xbb11, + // Block 0x6e, offset 0x1b80 + 0x1b80: 0xbb29, 0x1b81: 0xbb41, 0x1b82: 0xbb59, 0x1b83: 0xbb71, 0x1b84: 0xbb89, 0x1b85: 0x2109, + 0x1b86: 0x1111, 0x1b87: 0xbba1, 0x1b88: 0xbba1, 0x1b89: 0xbbb9, 0x1b8a: 0xbbd1, 0x1b8b: 0x10e1, + 0x1b8c: 0x10f9, 0x1b8d: 0xbbe9, 0x1b8e: 0x2079, 0x1b8f: 0xbc21, 0x1b90: 0xbac9, 0x1b91: 0x1429, + 0x1b92: 0xbb11, 0x1b93: 0x10e1, 0x1b94: 0x1111, 0x1b95: 0x2109, 0x1b96: 0xbab1, 0x1b97: 0x1099, + 0x1b98: 0x10b1, 0x1b99: 0x10c9, 0x1b9a: 0xbac9, 0x1b9b: 0xbae1, 0x1b9c: 0xbaf9, 0x1b9d: 0x1429, + 0x1b9e: 0x1a31, 0x1b9f: 0xbb11, 0x1ba0: 0xbb29, 0x1ba1: 0xbb41, 0x1ba2: 0xbb59, 0x1ba3: 0xbb71, + 0x1ba4: 0xbb89, 0x1ba5: 0x2109, 0x1ba6: 0x1111, 0x1ba7: 0x1429, 0x1ba8: 0xbba1, 0x1ba9: 0xbbb9, + 0x1baa: 0xbbd1, 0x1bab: 0x10e1, 0x1bac: 0x10f9, 0x1bad: 0xbbe9, 0x1bae: 0x2079, 0x1baf: 0xbc01, + 0x1bb0: 0xbab1, 0x1bb1: 0x1099, 0x1bb2: 0x10b1, 0x1bb3: 0x10c9, 0x1bb4: 0xbac9, 0x1bb5: 0xbae1, + 0x1bb6: 0xbaf9, 0x1bb7: 0x1429, 0x1bb8: 0x1a31, 0x1bb9: 0xbb11, 0x1bba: 0xbb29, 0x1bbb: 0xbb41, + 0x1bbc: 0xbb59, 0x1bbd: 0xbb71, 0x1bbe: 0xbb89, 0x1bbf: 0x2109, + // Block 0x6f, offset 0x1bc0 + 0x1bc0: 0x1111, 0x1bc1: 0xbba1, 0x1bc2: 0xbba1, 0x1bc3: 0xbbb9, 0x1bc4: 0xbbd1, 0x1bc5: 0x10e1, + 0x1bc6: 0x10f9, 0x1bc7: 0xbbe9, 0x1bc8: 0x2079, 0x1bc9: 0xbc21, 0x1bca: 0xbac9, 0x1bcb: 0x1429, + 0x1bcc: 0xbb11, 0x1bcd: 0x10e1, 0x1bce: 0x1111, 0x1bcf: 0x2109, 0x1bd0: 0xbab1, 0x1bd1: 0x1099, + 0x1bd2: 0x10b1, 0x1bd3: 0x10c9, 0x1bd4: 0xbac9, 0x1bd5: 0xbae1, 0x1bd6: 0xbaf9, 0x1bd7: 0x1429, + 0x1bd8: 0x1a31, 0x1bd9: 0xbb11, 0x1bda: 0xbb29, 0x1bdb: 0xbb41, 0x1bdc: 0xbb59, 0x1bdd: 0xbb71, + 0x1bde: 0xbb89, 0x1bdf: 0x2109, 0x1be0: 0x1111, 0x1be1: 0x1429, 0x1be2: 0xbba1, 0x1be3: 0xbbb9, + 0x1be4: 0xbbd1, 0x1be5: 0x10e1, 0x1be6: 0x10f9, 0x1be7: 0xbbe9, 0x1be8: 0x2079, 0x1be9: 0xbc01, + 0x1bea: 0xbab1, 0x1beb: 0x1099, 0x1bec: 0x10b1, 0x1bed: 0x10c9, 0x1bee: 0xbac9, 0x1bef: 0xbae1, + 0x1bf0: 0xbaf9, 0x1bf1: 0x1429, 0x1bf2: 0x1a31, 0x1bf3: 0xbb11, 0x1bf4: 0xbb29, 0x1bf5: 0xbb41, + 0x1bf6: 0xbb59, 0x1bf7: 0xbb71, 0x1bf8: 0xbb89, 0x1bf9: 0x2109, 0x1bfa: 0x1111, 0x1bfb: 0xbba1, + 0x1bfc: 0xbba1, 0x1bfd: 0xbbb9, 0x1bfe: 0xbbd1, 0x1bff: 0x10e1, + // Block 0x70, offset 0x1c00 + 0x1c00: 0x10f9, 0x1c01: 0xbbe9, 0x1c02: 0x2079, 0x1c03: 0xbc21, 0x1c04: 0xbac9, 0x1c05: 0x1429, + 0x1c06: 0xbb11, 0x1c07: 0x10e1, 0x1c08: 0x1111, 0x1c09: 0x2109, 0x1c0a: 0xbc41, 0x1c0b: 0xbc41, + 0x1c0c: 0x0040, 0x1c0d: 0x0040, 0x1c0e: 0x1f41, 0x1c0f: 0x00c9, 0x1c10: 0x0069, 0x1c11: 0x0079, + 0x1c12: 0x1f51, 0x1c13: 0x1f61, 0x1c14: 0x1f71, 0x1c15: 0x1f81, 0x1c16: 0x1f91, 0x1c17: 0x1fa1, + 0x1c18: 0x1f41, 0x1c19: 0x00c9, 0x1c1a: 0x0069, 0x1c1b: 0x0079, 0x1c1c: 0x1f51, 0x1c1d: 0x1f61, + 0x1c1e: 0x1f71, 0x1c1f: 0x1f81, 0x1c20: 0x1f91, 0x1c21: 0x1fa1, 0x1c22: 0x1f41, 0x1c23: 0x00c9, + 0x1c24: 0x0069, 0x1c25: 0x0079, 0x1c26: 0x1f51, 0x1c27: 0x1f61, 0x1c28: 0x1f71, 0x1c29: 0x1f81, + 0x1c2a: 0x1f91, 0x1c2b: 0x1fa1, 0x1c2c: 0x1f41, 0x1c2d: 0x00c9, 0x1c2e: 0x0069, 0x1c2f: 0x0079, + 0x1c30: 0x1f51, 0x1c31: 0x1f61, 0x1c32: 0x1f71, 0x1c33: 0x1f81, 0x1c34: 0x1f91, 0x1c35: 0x1fa1, + 0x1c36: 0x1f41, 0x1c37: 0x00c9, 0x1c38: 0x0069, 0x1c39: 0x0079, 0x1c3a: 0x1f51, 0x1c3b: 0x1f61, + 0x1c3c: 0x1f71, 0x1c3d: 0x1f81, 0x1c3e: 0x1f91, 0x1c3f: 0x1fa1, + // Block 0x71, offset 0x1c40 + 0x1c40: 0xe115, 0x1c41: 0xe115, 0x1c42: 0xe135, 0x1c43: 0xe135, 0x1c44: 0xe115, 0x1c45: 0xe115, + 0x1c46: 0xe175, 0x1c47: 0xe175, 0x1c48: 0xe115, 0x1c49: 0xe115, 0x1c4a: 0xe135, 0x1c4b: 0xe135, + 0x1c4c: 0xe115, 0x1c4d: 0xe115, 0x1c4e: 0xe1f5, 0x1c4f: 0xe1f5, 0x1c50: 0xe115, 0x1c51: 0xe115, + 0x1c52: 0xe135, 0x1c53: 0xe135, 0x1c54: 0xe115, 0x1c55: 0xe115, 0x1c56: 0xe175, 0x1c57: 0xe175, + 0x1c58: 0xe115, 0x1c59: 0xe115, 0x1c5a: 0xe135, 0x1c5b: 0xe135, 0x1c5c: 0xe115, 0x1c5d: 0xe115, + 0x1c5e: 0x8b05, 0x1c5f: 0x8b05, 0x1c60: 0x04b5, 0x1c61: 0x04b5, 0x1c62: 0x0a08, 0x1c63: 0x0a08, + 0x1c64: 0x0a08, 0x1c65: 0x0a08, 0x1c66: 0x0a08, 0x1c67: 0x0a08, 0x1c68: 0x0a08, 0x1c69: 0x0a08, + 0x1c6a: 0x0a08, 0x1c6b: 0x0a08, 0x1c6c: 0x0a08, 0x1c6d: 0x0a08, 0x1c6e: 0x0a08, 0x1c6f: 0x0a08, + 0x1c70: 0x0a08, 0x1c71: 0x0a08, 0x1c72: 0x0a08, 0x1c73: 0x0a08, 0x1c74: 0x0a08, 0x1c75: 0x0a08, + 0x1c76: 0x0a08, 0x1c77: 0x0a08, 0x1c78: 0x0a08, 0x1c79: 0x0a08, 0x1c7a: 0x0a08, 0x1c7b: 0x0a08, + 0x1c7c: 0x0a08, 0x1c7d: 0x0a08, 0x1c7e: 0x0a08, 0x1c7f: 0x0a08, + // Block 0x72, offset 0x1c80 + 0x1c80: 0xb189, 0x1c81: 0xb1a1, 0x1c82: 0xb201, 0x1c83: 0xb249, 0x1c84: 0x0040, 0x1c85: 0xb411, + 0x1c86: 0xb291, 0x1c87: 0xb219, 0x1c88: 0xb309, 0x1c89: 0xb429, 0x1c8a: 0xb399, 0x1c8b: 0xb3b1, + 0x1c8c: 0xb3c9, 0x1c8d: 0xb3e1, 0x1c8e: 0xb2a9, 0x1c8f: 0xb339, 0x1c90: 0xb369, 0x1c91: 0xb2d9, + 0x1c92: 0xb381, 0x1c93: 0xb279, 0x1c94: 0xb2c1, 0x1c95: 0xb1d1, 0x1c96: 0xb1e9, 0x1c97: 0xb231, + 0x1c98: 0xb261, 0x1c99: 0xb2f1, 0x1c9a: 0xb321, 0x1c9b: 0xb351, 0x1c9c: 0xbc59, 0x1c9d: 0x7949, + 0x1c9e: 0xbc71, 0x1c9f: 0xbc89, 0x1ca0: 0x0040, 0x1ca1: 0xb1a1, 0x1ca2: 0xb201, 0x1ca3: 0x0040, + 0x1ca4: 0xb3f9, 0x1ca5: 0x0040, 0x1ca6: 0x0040, 0x1ca7: 0xb219, 0x1ca8: 0x0040, 0x1ca9: 0xb429, + 0x1caa: 0xb399, 0x1cab: 0xb3b1, 0x1cac: 0xb3c9, 0x1cad: 0xb3e1, 0x1cae: 0xb2a9, 0x1caf: 0xb339, + 0x1cb0: 0xb369, 0x1cb1: 0xb2d9, 0x1cb2: 0xb381, 0x1cb3: 0x0040, 0x1cb4: 0xb2c1, 0x1cb5: 0xb1d1, + 0x1cb6: 0xb1e9, 0x1cb7: 0xb231, 0x1cb8: 0x0040, 0x1cb9: 0xb2f1, 0x1cba: 0x0040, 0x1cbb: 0xb351, + 0x1cbc: 0x0040, 0x1cbd: 0x0040, 0x1cbe: 0x0040, 0x1cbf: 0x0040, + // Block 0x73, offset 0x1cc0 + 0x1cc0: 0x0040, 0x1cc1: 0x0040, 0x1cc2: 0xb201, 0x1cc3: 0x0040, 0x1cc4: 0x0040, 0x1cc5: 0x0040, + 0x1cc6: 0x0040, 0x1cc7: 0xb219, 0x1cc8: 0x0040, 0x1cc9: 0xb429, 0x1cca: 0x0040, 0x1ccb: 0xb3b1, + 0x1ccc: 0x0040, 0x1ccd: 0xb3e1, 0x1cce: 0xb2a9, 0x1ccf: 0xb339, 0x1cd0: 0x0040, 0x1cd1: 0xb2d9, + 0x1cd2: 0xb381, 0x1cd3: 0x0040, 0x1cd4: 0xb2c1, 0x1cd5: 0x0040, 0x1cd6: 0x0040, 0x1cd7: 0xb231, + 0x1cd8: 0x0040, 0x1cd9: 0xb2f1, 0x1cda: 0x0040, 0x1cdb: 0xb351, 0x1cdc: 0x0040, 0x1cdd: 0x7949, + 0x1cde: 0x0040, 0x1cdf: 0xbc89, 0x1ce0: 0x0040, 0x1ce1: 0xb1a1, 0x1ce2: 0xb201, 0x1ce3: 0x0040, + 0x1ce4: 0xb3f9, 0x1ce5: 0x0040, 0x1ce6: 0x0040, 0x1ce7: 0xb219, 0x1ce8: 0xb309, 0x1ce9: 0xb429, + 0x1cea: 0xb399, 0x1ceb: 0x0040, 0x1cec: 0xb3c9, 0x1ced: 0xb3e1, 0x1cee: 0xb2a9, 0x1cef: 0xb339, + 0x1cf0: 0xb369, 0x1cf1: 0xb2d9, 0x1cf2: 0xb381, 0x1cf3: 0x0040, 0x1cf4: 0xb2c1, 0x1cf5: 0xb1d1, + 0x1cf6: 0xb1e9, 0x1cf7: 0xb231, 0x1cf8: 0x0040, 0x1cf9: 0xb2f1, 0x1cfa: 0xb321, 0x1cfb: 0xb351, + 0x1cfc: 0xbc59, 0x1cfd: 0x0040, 0x1cfe: 0xbc71, 0x1cff: 0x0040, + // Block 0x74, offset 0x1d00 + 0x1d00: 0xb189, 0x1d01: 0xb1a1, 0x1d02: 0xb201, 0x1d03: 0xb249, 0x1d04: 0xb3f9, 0x1d05: 0xb411, + 0x1d06: 0xb291, 0x1d07: 0xb219, 0x1d08: 0xb309, 0x1d09: 0xb429, 0x1d0a: 0x0040, 0x1d0b: 0xb3b1, + 0x1d0c: 0xb3c9, 0x1d0d: 0xb3e1, 0x1d0e: 0xb2a9, 0x1d0f: 0xb339, 0x1d10: 0xb369, 0x1d11: 0xb2d9, + 0x1d12: 0xb381, 0x1d13: 0xb279, 0x1d14: 0xb2c1, 0x1d15: 0xb1d1, 0x1d16: 0xb1e9, 0x1d17: 0xb231, + 0x1d18: 0xb261, 0x1d19: 0xb2f1, 0x1d1a: 0xb321, 0x1d1b: 0xb351, 0x1d1c: 0x0040, 0x1d1d: 0x0040, + 0x1d1e: 0x0040, 0x1d1f: 0x0040, 0x1d20: 0x0040, 0x1d21: 0xb1a1, 0x1d22: 0xb201, 0x1d23: 0xb249, + 0x1d24: 0x0040, 0x1d25: 0xb411, 0x1d26: 0xb291, 0x1d27: 0xb219, 0x1d28: 0xb309, 0x1d29: 0xb429, + 0x1d2a: 0x0040, 0x1d2b: 0xb3b1, 0x1d2c: 0xb3c9, 0x1d2d: 0xb3e1, 0x1d2e: 0xb2a9, 0x1d2f: 0xb339, + 0x1d30: 0xb369, 0x1d31: 0xb2d9, 0x1d32: 0xb381, 0x1d33: 0xb279, 0x1d34: 0xb2c1, 0x1d35: 0xb1d1, + 0x1d36: 0xb1e9, 0x1d37: 0xb231, 0x1d38: 0xb261, 0x1d39: 0xb2f1, 0x1d3a: 0xb321, 0x1d3b: 0xb351, + 0x1d3c: 0x0040, 0x1d3d: 0x0040, 0x1d3e: 0x0040, 0x1d3f: 0x0040, + // Block 0x75, offset 0x1d40 + 0x1d40: 0x0040, 0x1d41: 0xbca2, 0x1d42: 0xbcba, 0x1d43: 0xbcd2, 0x1d44: 0xbcea, 0x1d45: 0xbd02, + 0x1d46: 0xbd1a, 0x1d47: 0xbd32, 0x1d48: 0xbd4a, 0x1d49: 0xbd62, 0x1d4a: 0xbd7a, 0x1d4b: 0x0018, + 0x1d4c: 0x0018, 0x1d4d: 0x0040, 0x1d4e: 0x0040, 0x1d4f: 0x0040, 0x1d50: 0xbd92, 0x1d51: 0xbdb2, + 0x1d52: 0xbdd2, 0x1d53: 0xbdf2, 0x1d54: 0xbe12, 0x1d55: 0xbe32, 0x1d56: 0xbe52, 0x1d57: 0xbe72, + 0x1d58: 0xbe92, 0x1d59: 0xbeb2, 0x1d5a: 0xbed2, 0x1d5b: 0xbef2, 0x1d5c: 0xbf12, 0x1d5d: 0xbf32, + 0x1d5e: 0xbf52, 0x1d5f: 0xbf72, 0x1d60: 0xbf92, 0x1d61: 0xbfb2, 0x1d62: 0xbfd2, 0x1d63: 0xbff2, + 0x1d64: 0xc012, 0x1d65: 0xc032, 0x1d66: 0xc052, 0x1d67: 0xc072, 0x1d68: 0xc092, 0x1d69: 0xc0b2, + 0x1d6a: 0xc0d1, 0x1d6b: 0x1159, 0x1d6c: 0x0269, 0x1d6d: 0x6671, 0x1d6e: 0xc111, 0x1d6f: 0x0018, + 0x1d70: 0x0039, 0x1d71: 0x0ee9, 0x1d72: 0x1159, 0x1d73: 0x0ef9, 0x1d74: 0x0f09, 0x1d75: 0x1199, + 0x1d76: 0x0f31, 0x1d77: 0x0249, 0x1d78: 0x0f41, 0x1d79: 0x0259, 0x1d7a: 0x0f51, 0x1d7b: 0x0359, + 0x1d7c: 0x0f61, 0x1d7d: 0x0f71, 0x1d7e: 0x00d9, 0x1d7f: 0x0f99, + // Block 0x76, offset 0x1d80 + 0x1d80: 0x2039, 0x1d81: 0x0269, 0x1d82: 0x01d9, 0x1d83: 0x0fa9, 0x1d84: 0x0fb9, 0x1d85: 0x1089, + 0x1d86: 0x0279, 0x1d87: 0x0369, 0x1d88: 0x0289, 0x1d89: 0x13d1, 0x1d8a: 0xc129, 0x1d8b: 0x65b1, + 0x1d8c: 0xc141, 0x1d8d: 0x1441, 0x1d8e: 0xc159, 0x1d8f: 0xc179, 0x1d90: 0x0018, 0x1d91: 0x0018, + 0x1d92: 0x0018, 0x1d93: 0x0018, 0x1d94: 0x0018, 0x1d95: 0x0018, 0x1d96: 0x0018, 0x1d97: 0x0018, + 0x1d98: 0x0018, 0x1d99: 0x0018, 0x1d9a: 0x0018, 0x1d9b: 0x0018, 0x1d9c: 0x0018, 0x1d9d: 0x0018, + 0x1d9e: 0x0018, 0x1d9f: 0x0018, 0x1da0: 0x0018, 0x1da1: 0x0018, 0x1da2: 0x0018, 0x1da3: 0x0018, + 0x1da4: 0x0018, 0x1da5: 0x0018, 0x1da6: 0x0018, 0x1da7: 0x0018, 0x1da8: 0x0018, 0x1da9: 0x0018, + 0x1daa: 0xc191, 0x1dab: 0xc1a9, 0x1dac: 0x0040, 0x1dad: 0x0040, 0x1dae: 0x0040, 0x1daf: 0x0040, + 0x1db0: 0x0018, 0x1db1: 0x0018, 0x1db2: 0x0018, 0x1db3: 0x0018, 0x1db4: 0x0018, 0x1db5: 0x0018, + 0x1db6: 0x0018, 0x1db7: 0x0018, 0x1db8: 0x0018, 0x1db9: 0x0018, 0x1dba: 0x0018, 0x1dbb: 0x0018, + 0x1dbc: 0x0018, 0x1dbd: 0x0018, 0x1dbe: 0x0018, 0x1dbf: 0x0018, + // Block 0x77, offset 0x1dc0 + 0x1dc0: 0xc1d9, 0x1dc1: 0xc211, 0x1dc2: 0xc249, 0x1dc3: 0x0040, 0x1dc4: 0x0040, 0x1dc5: 0x0040, + 0x1dc6: 0x0040, 0x1dc7: 0x0040, 0x1dc8: 0x0040, 0x1dc9: 0x0040, 0x1dca: 0x0040, 0x1dcb: 0x0040, + 0x1dcc: 0x0040, 0x1dcd: 0x0040, 0x1dce: 0x0040, 0x1dcf: 0x0040, 0x1dd0: 0xc269, 0x1dd1: 0xc289, + 0x1dd2: 0xc2a9, 0x1dd3: 0xc2c9, 0x1dd4: 0xc2e9, 0x1dd5: 0xc309, 0x1dd6: 0xc329, 0x1dd7: 0xc349, + 0x1dd8: 0xc369, 0x1dd9: 0xc389, 0x1dda: 0xc3a9, 0x1ddb: 0xc3c9, 0x1ddc: 0xc3e9, 0x1ddd: 0xc409, + 0x1dde: 0xc429, 0x1ddf: 0xc449, 0x1de0: 0xc469, 0x1de1: 0xc489, 0x1de2: 0xc4a9, 0x1de3: 0xc4c9, + 0x1de4: 0xc4e9, 0x1de5: 0xc509, 0x1de6: 0xc529, 0x1de7: 0xc549, 0x1de8: 0xc569, 0x1de9: 0xc589, + 0x1dea: 0xc5a9, 0x1deb: 0xc5c9, 0x1dec: 0xc5e9, 0x1ded: 0xc609, 0x1dee: 0xc629, 0x1def: 0xc649, + 0x1df0: 0xc669, 0x1df1: 0xc689, 0x1df2: 0xc6a9, 0x1df3: 0xc6c9, 0x1df4: 0xc6e9, 0x1df5: 0xc709, + 0x1df6: 0xc729, 0x1df7: 0xc749, 0x1df8: 0xc769, 0x1df9: 0xc789, 0x1dfa: 0xc7a9, 0x1dfb: 0xc7c9, + 0x1dfc: 0x0040, 0x1dfd: 0x0040, 0x1dfe: 0x0040, 0x1dff: 0x0040, + // Block 0x78, offset 0x1e00 + 0x1e00: 0xcaf9, 0x1e01: 0xcb19, 0x1e02: 0xcb39, 0x1e03: 0x8b1d, 0x1e04: 0xcb59, 0x1e05: 0xcb79, + 0x1e06: 0xcb99, 0x1e07: 0xcbb9, 0x1e08: 0xcbd9, 0x1e09: 0xcbf9, 0x1e0a: 0xcc19, 0x1e0b: 0xcc39, + 0x1e0c: 0xcc59, 0x1e0d: 0x8b3d, 0x1e0e: 0xcc79, 0x1e0f: 0xcc99, 0x1e10: 0xccb9, 0x1e11: 0xccd9, + 0x1e12: 0x8b5d, 0x1e13: 0xccf9, 0x1e14: 0xcd19, 0x1e15: 0xc429, 0x1e16: 0x8b7d, 0x1e17: 0xcd39, + 0x1e18: 0xcd59, 0x1e19: 0xcd79, 0x1e1a: 0xcd99, 0x1e1b: 0xcdb9, 0x1e1c: 0x8b9d, 0x1e1d: 0xcdd9, + 0x1e1e: 0xcdf9, 0x1e1f: 0xce19, 0x1e20: 0xce39, 0x1e21: 0xce59, 0x1e22: 0xc789, 0x1e23: 0xce79, + 0x1e24: 0xce99, 0x1e25: 0xceb9, 0x1e26: 0xced9, 0x1e27: 0xcef9, 0x1e28: 0xcf19, 0x1e29: 0xcf39, + 0x1e2a: 0xcf59, 0x1e2b: 0xcf79, 0x1e2c: 0xcf99, 0x1e2d: 0xcfb9, 0x1e2e: 0xcfd9, 0x1e2f: 0xcff9, + 0x1e30: 0xd019, 0x1e31: 0xd039, 0x1e32: 0xd039, 0x1e33: 0xd039, 0x1e34: 0x8bbd, 0x1e35: 0xd059, + 0x1e36: 0xd079, 0x1e37: 0xd099, 0x1e38: 0x8bdd, 0x1e39: 0xd0b9, 0x1e3a: 0xd0d9, 0x1e3b: 0xd0f9, + 0x1e3c: 0xd119, 0x1e3d: 0xd139, 0x1e3e: 0xd159, 0x1e3f: 0xd179, + // Block 0x79, offset 0x1e40 + 0x1e40: 0xd199, 0x1e41: 0xd1b9, 0x1e42: 0xd1d9, 0x1e43: 0xd1f9, 0x1e44: 0xd219, 0x1e45: 0xd239, + 0x1e46: 0xd239, 0x1e47: 0xd259, 0x1e48: 0xd279, 0x1e49: 0xd299, 0x1e4a: 0xd2b9, 0x1e4b: 0xd2d9, + 0x1e4c: 0xd2f9, 0x1e4d: 0xd319, 0x1e4e: 0xd339, 0x1e4f: 0xd359, 0x1e50: 0xd379, 0x1e51: 0xd399, + 0x1e52: 0xd3b9, 0x1e53: 0xd3d9, 0x1e54: 0xd3f9, 0x1e55: 0xd419, 0x1e56: 0xd439, 0x1e57: 0xd459, + 0x1e58: 0xd479, 0x1e59: 0x8bfd, 0x1e5a: 0xd499, 0x1e5b: 0xd4b9, 0x1e5c: 0xd4d9, 0x1e5d: 0xc309, + 0x1e5e: 0xd4f9, 0x1e5f: 0xd519, 0x1e60: 0x8c1d, 0x1e61: 0x8c3d, 0x1e62: 0xd539, 0x1e63: 0xd559, + 0x1e64: 0xd579, 0x1e65: 0xd599, 0x1e66: 0xd5b9, 0x1e67: 0xd5d9, 0x1e68: 0x2040, 0x1e69: 0xd5f9, + 0x1e6a: 0xd619, 0x1e6b: 0xd619, 0x1e6c: 0x8c5d, 0x1e6d: 0xd639, 0x1e6e: 0xd659, 0x1e6f: 0xd679, + 0x1e70: 0xd699, 0x1e71: 0x8c7d, 0x1e72: 0xd6b9, 0x1e73: 0xd6d9, 0x1e74: 0x2040, 0x1e75: 0xd6f9, + 0x1e76: 0xd719, 0x1e77: 0xd739, 0x1e78: 0xd759, 0x1e79: 0xd779, 0x1e7a: 0xd799, 0x1e7b: 0x8c9d, + 0x1e7c: 0xd7b9, 0x1e7d: 0x8cbd, 0x1e7e: 0xd7d9, 0x1e7f: 0xd7f9, + // Block 0x7a, offset 0x1e80 + 0x1e80: 0xd819, 0x1e81: 0xd839, 0x1e82: 0xd859, 0x1e83: 0xd879, 0x1e84: 0xd899, 0x1e85: 0xd8b9, + 0x1e86: 0xd8d9, 0x1e87: 0xd8f9, 0x1e88: 0xd919, 0x1e89: 0x8cdd, 0x1e8a: 0xd939, 0x1e8b: 0xd959, + 0x1e8c: 0xd979, 0x1e8d: 0xd999, 0x1e8e: 0xd9b9, 0x1e8f: 0x8cfd, 0x1e90: 0xd9d9, 0x1e91: 0x8d1d, + 0x1e92: 0x8d3d, 0x1e93: 0xd9f9, 0x1e94: 0xda19, 0x1e95: 0xda19, 0x1e96: 0xda39, 0x1e97: 0x8d5d, + 0x1e98: 0x8d7d, 0x1e99: 0xda59, 0x1e9a: 0xda79, 0x1e9b: 0xda99, 0x1e9c: 0xdab9, 0x1e9d: 0xdad9, + 0x1e9e: 0xdaf9, 0x1e9f: 0xdb19, 0x1ea0: 0xdb39, 0x1ea1: 0xdb59, 0x1ea2: 0xdb79, 0x1ea3: 0xdb99, + 0x1ea4: 0x8d9d, 0x1ea5: 0xdbb9, 0x1ea6: 0xdbd9, 0x1ea7: 0xdbf9, 0x1ea8: 0xdc19, 0x1ea9: 0xdbf9, + 0x1eaa: 0xdc39, 0x1eab: 0xdc59, 0x1eac: 0xdc79, 0x1ead: 0xdc99, 0x1eae: 0xdcb9, 0x1eaf: 0xdcd9, + 0x1eb0: 0xdcf9, 0x1eb1: 0xdd19, 0x1eb2: 0xdd39, 0x1eb3: 0xdd59, 0x1eb4: 0xdd79, 0x1eb5: 0xdd99, + 0x1eb6: 0xddb9, 0x1eb7: 0xddd9, 0x1eb8: 0x8dbd, 0x1eb9: 0xddf9, 0x1eba: 0xde19, 0x1ebb: 0xde39, + 0x1ebc: 0xde59, 0x1ebd: 0xde79, 0x1ebe: 0x8ddd, 0x1ebf: 0xde99, + // Block 0x7b, offset 0x1ec0 + 0x1ec0: 0xe599, 0x1ec1: 0xe5b9, 0x1ec2: 0xe5d9, 0x1ec3: 0xe5f9, 0x1ec4: 0xe619, 0x1ec5: 0xe639, + 0x1ec6: 0x8efd, 0x1ec7: 0xe659, 0x1ec8: 0xe679, 0x1ec9: 0xe699, 0x1eca: 0xe6b9, 0x1ecb: 0xe6d9, + 0x1ecc: 0xe6f9, 0x1ecd: 0x8f1d, 0x1ece: 0xe719, 0x1ecf: 0xe739, 0x1ed0: 0x8f3d, 0x1ed1: 0x8f5d, + 0x1ed2: 0xe759, 0x1ed3: 0xe779, 0x1ed4: 0xe799, 0x1ed5: 0xe7b9, 0x1ed6: 0xe7d9, 0x1ed7: 0xe7f9, + 0x1ed8: 0xe819, 0x1ed9: 0xe839, 0x1eda: 0xe859, 0x1edb: 0x8f7d, 0x1edc: 0xe879, 0x1edd: 0x8f9d, + 0x1ede: 0xe899, 0x1edf: 0x2040, 0x1ee0: 0xe8b9, 0x1ee1: 0xe8d9, 0x1ee2: 0xe8f9, 0x1ee3: 0x8fbd, + 0x1ee4: 0xe919, 0x1ee5: 0xe939, 0x1ee6: 0x8fdd, 0x1ee7: 0x8ffd, 0x1ee8: 0xe959, 0x1ee9: 0xe979, + 0x1eea: 0xe999, 0x1eeb: 0xe9b9, 0x1eec: 0xe9d9, 0x1eed: 0xe9d9, 0x1eee: 0xe9f9, 0x1eef: 0xea19, + 0x1ef0: 0xea39, 0x1ef1: 0xea59, 0x1ef2: 0xea79, 0x1ef3: 0xea99, 0x1ef4: 0xeab9, 0x1ef5: 0x901d, + 0x1ef6: 0xead9, 0x1ef7: 0x903d, 0x1ef8: 0xeaf9, 0x1ef9: 0x905d, 0x1efa: 0xeb19, 0x1efb: 0x907d, + 0x1efc: 0x909d, 0x1efd: 0x90bd, 0x1efe: 0xeb39, 0x1eff: 0xeb59, + // Block 0x7c, offset 0x1f00 + 0x1f00: 0xeb79, 0x1f01: 0x90dd, 0x1f02: 0x90fd, 0x1f03: 0x911d, 0x1f04: 0x913d, 0x1f05: 0xeb99, + 0x1f06: 0xebb9, 0x1f07: 0xebb9, 0x1f08: 0xebd9, 0x1f09: 0xebf9, 0x1f0a: 0xec19, 0x1f0b: 0xec39, + 0x1f0c: 0xec59, 0x1f0d: 0x915d, 0x1f0e: 0xec79, 0x1f0f: 0xec99, 0x1f10: 0xecb9, 0x1f11: 0xecd9, + 0x1f12: 0x917d, 0x1f13: 0xecf9, 0x1f14: 0x919d, 0x1f15: 0x91bd, 0x1f16: 0xed19, 0x1f17: 0xed39, + 0x1f18: 0xed59, 0x1f19: 0xed79, 0x1f1a: 0xed99, 0x1f1b: 0xedb9, 0x1f1c: 0x91dd, 0x1f1d: 0x91fd, + 0x1f1e: 0x921d, 0x1f1f: 0x2040, 0x1f20: 0xedd9, 0x1f21: 0x923d, 0x1f22: 0xedf9, 0x1f23: 0xee19, + 0x1f24: 0xee39, 0x1f25: 0x925d, 0x1f26: 0xee59, 0x1f27: 0xee79, 0x1f28: 0xee99, 0x1f29: 0xeeb9, + 0x1f2a: 0xeed9, 0x1f2b: 0x927d, 0x1f2c: 0xeef9, 0x1f2d: 0xef19, 0x1f2e: 0xef39, 0x1f2f: 0xef59, + 0x1f30: 0xef79, 0x1f31: 0xef99, 0x1f32: 0x929d, 0x1f33: 0x92bd, 0x1f34: 0xefb9, 0x1f35: 0x92dd, + 0x1f36: 0xefd9, 0x1f37: 0x92fd, 0x1f38: 0xeff9, 0x1f39: 0xf019, 0x1f3a: 0xf039, 0x1f3b: 0x931d, + 0x1f3c: 0x933d, 0x1f3d: 0xf059, 0x1f3e: 0x935d, 0x1f3f: 0xf079, + // Block 0x7d, offset 0x1f40 + 0x1f40: 0xf6b9, 0x1f41: 0xf6d9, 0x1f42: 0xf6f9, 0x1f43: 0xf719, 0x1f44: 0xf739, 0x1f45: 0x951d, + 0x1f46: 0xf759, 0x1f47: 0xf779, 0x1f48: 0xf799, 0x1f49: 0xf7b9, 0x1f4a: 0xf7d9, 0x1f4b: 0x953d, + 0x1f4c: 0x955d, 0x1f4d: 0xf7f9, 0x1f4e: 0xf819, 0x1f4f: 0xf839, 0x1f50: 0xf859, 0x1f51: 0xf879, + 0x1f52: 0xf899, 0x1f53: 0x957d, 0x1f54: 0xf8b9, 0x1f55: 0xf8d9, 0x1f56: 0xf8f9, 0x1f57: 0xf919, + 0x1f58: 0x959d, 0x1f59: 0x95bd, 0x1f5a: 0xf939, 0x1f5b: 0xf959, 0x1f5c: 0xf979, 0x1f5d: 0x95dd, + 0x1f5e: 0xf999, 0x1f5f: 0xf9b9, 0x1f60: 0x6815, 0x1f61: 0x95fd, 0x1f62: 0xf9d9, 0x1f63: 0xf9f9, + 0x1f64: 0xfa19, 0x1f65: 0x961d, 0x1f66: 0xfa39, 0x1f67: 0xfa59, 0x1f68: 0xfa79, 0x1f69: 0xfa99, + 0x1f6a: 0xfab9, 0x1f6b: 0xfad9, 0x1f6c: 0xfaf9, 0x1f6d: 0x963d, 0x1f6e: 0xfb19, 0x1f6f: 0xfb39, + 0x1f70: 0xfb59, 0x1f71: 0x965d, 0x1f72: 0xfb79, 0x1f73: 0xfb99, 0x1f74: 0xfbb9, 0x1f75: 0xfbd9, + 0x1f76: 0x7b35, 0x1f77: 0x967d, 0x1f78: 0xfbf9, 0x1f79: 0xfc19, 0x1f7a: 0xfc39, 0x1f7b: 0x969d, + 0x1f7c: 0xfc59, 0x1f7d: 0x96bd, 0x1f7e: 0xfc79, 0x1f7f: 0xfc79, + // Block 0x7e, offset 0x1f80 + 0x1f80: 0xfc99, 0x1f81: 0x96dd, 0x1f82: 0xfcb9, 0x1f83: 0xfcd9, 0x1f84: 0xfcf9, 0x1f85: 0xfd19, + 0x1f86: 0xfd39, 0x1f87: 0xfd59, 0x1f88: 0xfd79, 0x1f89: 0x96fd, 0x1f8a: 0xfd99, 0x1f8b: 0xfdb9, + 0x1f8c: 0xfdd9, 0x1f8d: 0xfdf9, 0x1f8e: 0xfe19, 0x1f8f: 0xfe39, 0x1f90: 0x971d, 0x1f91: 0xfe59, + 0x1f92: 0x973d, 0x1f93: 0x975d, 0x1f94: 0x977d, 0x1f95: 0xfe79, 0x1f96: 0xfe99, 0x1f97: 0xfeb9, + 0x1f98: 0xfed9, 0x1f99: 0xfef9, 0x1f9a: 0xff19, 0x1f9b: 0xff39, 0x1f9c: 0xff59, 0x1f9d: 0x979d, + 0x1f9e: 0x0040, 0x1f9f: 0x0040, 0x1fa0: 0x0040, 0x1fa1: 0x0040, 0x1fa2: 0x0040, 0x1fa3: 0x0040, + 0x1fa4: 0x0040, 0x1fa5: 0x0040, 0x1fa6: 0x0040, 0x1fa7: 0x0040, 0x1fa8: 0x0040, 0x1fa9: 0x0040, + 0x1faa: 0x0040, 0x1fab: 0x0040, 0x1fac: 0x0040, 0x1fad: 0x0040, 0x1fae: 0x0040, 0x1faf: 0x0040, + 0x1fb0: 0x0040, 0x1fb1: 0x0040, 0x1fb2: 0x0040, 0x1fb3: 0x0040, 0x1fb4: 0x0040, 0x1fb5: 0x0040, + 0x1fb6: 0x0040, 0x1fb7: 0x0040, 0x1fb8: 0x0040, 0x1fb9: 0x0040, 0x1fba: 0x0040, 0x1fbb: 0x0040, + 0x1fbc: 0x0040, 0x1fbd: 0x0040, 0x1fbe: 0x0040, 0x1fbf: 0x0040, +} + +// idnaIndex: 36 blocks, 2304 entries, 4608 bytes +// Block 0 is the zero block. +var idnaIndex = [2304]uint16{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x7d, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x04, 0xc7: 0x05, + 0xc8: 0x06, 0xc9: 0x7e, 0xca: 0x7f, 0xcb: 0x07, 0xcc: 0x80, 0xcd: 0x08, 0xce: 0x09, 0xcf: 0x0a, + 0xd0: 0x81, 0xd1: 0x0b, 0xd2: 0x0c, 0xd3: 0x0d, 0xd4: 0x0e, 0xd5: 0x82, 0xd6: 0x83, 0xd7: 0x84, + 0xd8: 0x0f, 0xd9: 0x10, 0xda: 0x85, 0xdb: 0x11, 0xdc: 0x12, 0xdd: 0x86, 0xde: 0x87, 0xdf: 0x88, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, 0xe5: 0x07, 0xe6: 0x07, 0xe7: 0x07, + 0xe8: 0x07, 0xe9: 0x08, 0xea: 0x09, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x0a, 0xee: 0x0b, 0xef: 0x0c, + 0xf0: 0x1d, 0xf1: 0x1e, 0xf2: 0x1e, 0xf3: 0x20, 0xf4: 0x21, + // Block 0x4, offset 0x100 + 0x120: 0x89, 0x121: 0x13, 0x122: 0x8a, 0x123: 0x8b, 0x124: 0x8c, 0x125: 0x14, 0x126: 0x15, 0x127: 0x16, + 0x128: 0x17, 0x129: 0x18, 0x12a: 0x19, 0x12b: 0x1a, 0x12c: 0x1b, 0x12d: 0x1c, 0x12e: 0x1d, 0x12f: 0x8d, + 0x130: 0x8e, 0x131: 0x1e, 0x132: 0x1f, 0x133: 0x20, 0x134: 0x8f, 0x135: 0x21, 0x136: 0x90, 0x137: 0x91, + 0x138: 0x92, 0x139: 0x93, 0x13a: 0x22, 0x13b: 0x94, 0x13c: 0x95, 0x13d: 0x23, 0x13e: 0x24, 0x13f: 0x96, + // Block 0x5, offset 0x140 + 0x140: 0x97, 0x141: 0x98, 0x142: 0x99, 0x143: 0x9a, 0x144: 0x9b, 0x145: 0x9c, 0x146: 0x9d, 0x147: 0x9e, + 0x148: 0x9f, 0x149: 0xa0, 0x14a: 0xa1, 0x14b: 0xa2, 0x14c: 0xa3, 0x14d: 0xa4, 0x14e: 0xa5, 0x14f: 0xa6, + 0x150: 0xa7, 0x151: 0x9f, 0x152: 0x9f, 0x153: 0x9f, 0x154: 0x9f, 0x155: 0x9f, 0x156: 0x9f, 0x157: 0x9f, + 0x158: 0x9f, 0x159: 0xa8, 0x15a: 0xa9, 0x15b: 0xaa, 0x15c: 0xab, 0x15d: 0xac, 0x15e: 0xad, 0x15f: 0xae, + 0x160: 0xaf, 0x161: 0xb0, 0x162: 0xb1, 0x163: 0xb2, 0x164: 0xb3, 0x165: 0xb4, 0x166: 0xb5, 0x167: 0xb6, + 0x168: 0xb7, 0x169: 0xb8, 0x16a: 0xb9, 0x16b: 0xba, 0x16c: 0xbb, 0x16d: 0xbc, 0x16e: 0xbd, 0x16f: 0xbe, + 0x170: 0xbf, 0x171: 0xc0, 0x172: 0xc1, 0x173: 0xc2, 0x174: 0x25, 0x175: 0x26, 0x176: 0x27, 0x177: 0xc3, + 0x178: 0x28, 0x179: 0x28, 0x17a: 0x29, 0x17b: 0x28, 0x17c: 0xc4, 0x17d: 0x2a, 0x17e: 0x2b, 0x17f: 0x2c, + // Block 0x6, offset 0x180 + 0x180: 0x2d, 0x181: 0x2e, 0x182: 0x2f, 0x183: 0xc5, 0x184: 0x30, 0x185: 0x31, 0x186: 0xc6, 0x187: 0x9b, + 0x188: 0xc7, 0x189: 0xc8, 0x18a: 0x9b, 0x18b: 0x9b, 0x18c: 0xc9, 0x18d: 0x9b, 0x18e: 0x9b, 0x18f: 0x9b, + 0x190: 0xca, 0x191: 0x32, 0x192: 0x33, 0x193: 0x34, 0x194: 0x9b, 0x195: 0x9b, 0x196: 0x9b, 0x197: 0x9b, + 0x198: 0x9b, 0x199: 0x9b, 0x19a: 0x9b, 0x19b: 0x9b, 0x19c: 0x9b, 0x19d: 0x9b, 0x19e: 0x9b, 0x19f: 0x9b, + 0x1a0: 0x9b, 0x1a1: 0x9b, 0x1a2: 0x9b, 0x1a3: 0x9b, 0x1a4: 0x9b, 0x1a5: 0x9b, 0x1a6: 0x9b, 0x1a7: 0x9b, + 0x1a8: 0xcb, 0x1a9: 0xcc, 0x1aa: 0x9b, 0x1ab: 0xcd, 0x1ac: 0x9b, 0x1ad: 0xce, 0x1ae: 0xcf, 0x1af: 0xd0, + 0x1b0: 0xd1, 0x1b1: 0x35, 0x1b2: 0x28, 0x1b3: 0x36, 0x1b4: 0xd2, 0x1b5: 0xd3, 0x1b6: 0xd4, 0x1b7: 0xd5, + 0x1b8: 0xd6, 0x1b9: 0xd7, 0x1ba: 0xd8, 0x1bb: 0xd9, 0x1bc: 0xda, 0x1bd: 0xdb, 0x1be: 0xdc, 0x1bf: 0x37, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x38, 0x1c1: 0xdd, 0x1c2: 0xde, 0x1c3: 0xdf, 0x1c4: 0xe0, 0x1c5: 0x39, 0x1c6: 0x3a, 0x1c7: 0xe1, + 0x1c8: 0xe2, 0x1c9: 0x3b, 0x1ca: 0x3c, 0x1cb: 0x3d, 0x1cc: 0x3e, 0x1cd: 0x3f, 0x1ce: 0x40, 0x1cf: 0x41, + 0x1d0: 0x9f, 0x1d1: 0x9f, 0x1d2: 0x9f, 0x1d3: 0x9f, 0x1d4: 0x9f, 0x1d5: 0x9f, 0x1d6: 0x9f, 0x1d7: 0x9f, + 0x1d8: 0x9f, 0x1d9: 0x9f, 0x1da: 0x9f, 0x1db: 0x9f, 0x1dc: 0x9f, 0x1dd: 0x9f, 0x1de: 0x9f, 0x1df: 0x9f, + 0x1e0: 0x9f, 0x1e1: 0x9f, 0x1e2: 0x9f, 0x1e3: 0x9f, 0x1e4: 0x9f, 0x1e5: 0x9f, 0x1e6: 0x9f, 0x1e7: 0x9f, + 0x1e8: 0x9f, 0x1e9: 0x9f, 0x1ea: 0x9f, 0x1eb: 0x9f, 0x1ec: 0x9f, 0x1ed: 0x9f, 0x1ee: 0x9f, 0x1ef: 0x9f, + 0x1f0: 0x9f, 0x1f1: 0x9f, 0x1f2: 0x9f, 0x1f3: 0x9f, 0x1f4: 0x9f, 0x1f5: 0x9f, 0x1f6: 0x9f, 0x1f7: 0x9f, + 0x1f8: 0x9f, 0x1f9: 0x9f, 0x1fa: 0x9f, 0x1fb: 0x9f, 0x1fc: 0x9f, 0x1fd: 0x9f, 0x1fe: 0x9f, 0x1ff: 0x9f, + // Block 0x8, offset 0x200 + 0x200: 0x9f, 0x201: 0x9f, 0x202: 0x9f, 0x203: 0x9f, 0x204: 0x9f, 0x205: 0x9f, 0x206: 0x9f, 0x207: 0x9f, + 0x208: 0x9f, 0x209: 0x9f, 0x20a: 0x9f, 0x20b: 0x9f, 0x20c: 0x9f, 0x20d: 0x9f, 0x20e: 0x9f, 0x20f: 0x9f, + 0x210: 0x9f, 0x211: 0x9f, 0x212: 0x9f, 0x213: 0x9f, 0x214: 0x9f, 0x215: 0x9f, 0x216: 0x9f, 0x217: 0x9f, + 0x218: 0x9f, 0x219: 0x9f, 0x21a: 0x9f, 0x21b: 0x9f, 0x21c: 0x9f, 0x21d: 0x9f, 0x21e: 0x9f, 0x21f: 0x9f, + 0x220: 0x9f, 0x221: 0x9f, 0x222: 0x9f, 0x223: 0x9f, 0x224: 0x9f, 0x225: 0x9f, 0x226: 0x9f, 0x227: 0x9f, + 0x228: 0x9f, 0x229: 0x9f, 0x22a: 0x9f, 0x22b: 0x9f, 0x22c: 0x9f, 0x22d: 0x9f, 0x22e: 0x9f, 0x22f: 0x9f, + 0x230: 0x9f, 0x231: 0x9f, 0x232: 0x9f, 0x233: 0x9f, 0x234: 0x9f, 0x235: 0x9f, 0x236: 0xb2, 0x237: 0x9b, + 0x238: 0x9f, 0x239: 0x9f, 0x23a: 0x9f, 0x23b: 0x9f, 0x23c: 0x9f, 0x23d: 0x9f, 0x23e: 0x9f, 0x23f: 0x9f, + // Block 0x9, offset 0x240 + 0x240: 0x9f, 0x241: 0x9f, 0x242: 0x9f, 0x243: 0x9f, 0x244: 0x9f, 0x245: 0x9f, 0x246: 0x9f, 0x247: 0x9f, + 0x248: 0x9f, 0x249: 0x9f, 0x24a: 0x9f, 0x24b: 0x9f, 0x24c: 0x9f, 0x24d: 0x9f, 0x24e: 0x9f, 0x24f: 0x9f, + 0x250: 0x9f, 0x251: 0x9f, 0x252: 0x9f, 0x253: 0x9f, 0x254: 0x9f, 0x255: 0x9f, 0x256: 0x9f, 0x257: 0x9f, + 0x258: 0x9f, 0x259: 0x9f, 0x25a: 0x9f, 0x25b: 0x9f, 0x25c: 0x9f, 0x25d: 0x9f, 0x25e: 0x9f, 0x25f: 0x9f, + 0x260: 0x9f, 0x261: 0x9f, 0x262: 0x9f, 0x263: 0x9f, 0x264: 0x9f, 0x265: 0x9f, 0x266: 0x9f, 0x267: 0x9f, + 0x268: 0x9f, 0x269: 0x9f, 0x26a: 0x9f, 0x26b: 0x9f, 0x26c: 0x9f, 0x26d: 0x9f, 0x26e: 0x9f, 0x26f: 0x9f, + 0x270: 0x9f, 0x271: 0x9f, 0x272: 0x9f, 0x273: 0x9f, 0x274: 0x9f, 0x275: 0x9f, 0x276: 0x9f, 0x277: 0x9f, + 0x278: 0x9f, 0x279: 0x9f, 0x27a: 0x9f, 0x27b: 0x9f, 0x27c: 0x9f, 0x27d: 0x9f, 0x27e: 0x9f, 0x27f: 0x9f, + // Block 0xa, offset 0x280 + 0x280: 0x9f, 0x281: 0x9f, 0x282: 0x9f, 0x283: 0x9f, 0x284: 0x9f, 0x285: 0x9f, 0x286: 0x9f, 0x287: 0x9f, + 0x288: 0x9f, 0x289: 0x9f, 0x28a: 0x9f, 0x28b: 0x9f, 0x28c: 0x9f, 0x28d: 0x9f, 0x28e: 0x9f, 0x28f: 0x9f, + 0x290: 0x9f, 0x291: 0x9f, 0x292: 0x9f, 0x293: 0x9f, 0x294: 0x9f, 0x295: 0x9f, 0x296: 0x9f, 0x297: 0x9f, + 0x298: 0x9f, 0x299: 0x9f, 0x29a: 0x9f, 0x29b: 0x9f, 0x29c: 0x9f, 0x29d: 0x9f, 0x29e: 0x9f, 0x29f: 0x9f, + 0x2a0: 0x9f, 0x2a1: 0x9f, 0x2a2: 0x9f, 0x2a3: 0x9f, 0x2a4: 0x9f, 0x2a5: 0x9f, 0x2a6: 0x9f, 0x2a7: 0x9f, + 0x2a8: 0x9f, 0x2a9: 0x9f, 0x2aa: 0x9f, 0x2ab: 0x9f, 0x2ac: 0x9f, 0x2ad: 0x9f, 0x2ae: 0x9f, 0x2af: 0x9f, + 0x2b0: 0x9f, 0x2b1: 0x9f, 0x2b2: 0x9f, 0x2b3: 0x9f, 0x2b4: 0x9f, 0x2b5: 0x9f, 0x2b6: 0x9f, 0x2b7: 0x9f, + 0x2b8: 0x9f, 0x2b9: 0x9f, 0x2ba: 0x9f, 0x2bb: 0x9f, 0x2bc: 0x9f, 0x2bd: 0x9f, 0x2be: 0x9f, 0x2bf: 0xe3, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x9f, 0x2c1: 0x9f, 0x2c2: 0x9f, 0x2c3: 0x9f, 0x2c4: 0x9f, 0x2c5: 0x9f, 0x2c6: 0x9f, 0x2c7: 0x9f, + 0x2c8: 0x9f, 0x2c9: 0x9f, 0x2ca: 0x9f, 0x2cb: 0x9f, 0x2cc: 0x9f, 0x2cd: 0x9f, 0x2ce: 0x9f, 0x2cf: 0x9f, + 0x2d0: 0x9f, 0x2d1: 0x9f, 0x2d2: 0xe4, 0x2d3: 0xe5, 0x2d4: 0x9f, 0x2d5: 0x9f, 0x2d6: 0x9f, 0x2d7: 0x9f, + 0x2d8: 0xe6, 0x2d9: 0x42, 0x2da: 0x43, 0x2db: 0xe7, 0x2dc: 0x44, 0x2dd: 0x45, 0x2de: 0x46, 0x2df: 0xe8, + 0x2e0: 0xe9, 0x2e1: 0xea, 0x2e2: 0xeb, 0x2e3: 0xec, 0x2e4: 0xed, 0x2e5: 0xee, 0x2e6: 0xef, 0x2e7: 0xf0, + 0x2e8: 0xf1, 0x2e9: 0xf2, 0x2ea: 0xf3, 0x2eb: 0xf4, 0x2ec: 0xf5, 0x2ed: 0xf6, 0x2ee: 0xf7, 0x2ef: 0xf8, + 0x2f0: 0x9f, 0x2f1: 0x9f, 0x2f2: 0x9f, 0x2f3: 0x9f, 0x2f4: 0x9f, 0x2f5: 0x9f, 0x2f6: 0x9f, 0x2f7: 0x9f, + 0x2f8: 0x9f, 0x2f9: 0x9f, 0x2fa: 0x9f, 0x2fb: 0x9f, 0x2fc: 0x9f, 0x2fd: 0x9f, 0x2fe: 0x9f, 0x2ff: 0x9f, + // Block 0xc, offset 0x300 + 0x300: 0x9f, 0x301: 0x9f, 0x302: 0x9f, 0x303: 0x9f, 0x304: 0x9f, 0x305: 0x9f, 0x306: 0x9f, 0x307: 0x9f, + 0x308: 0x9f, 0x309: 0x9f, 0x30a: 0x9f, 0x30b: 0x9f, 0x30c: 0x9f, 0x30d: 0x9f, 0x30e: 0x9f, 0x30f: 0x9f, + 0x310: 0x9f, 0x311: 0x9f, 0x312: 0x9f, 0x313: 0x9f, 0x314: 0x9f, 0x315: 0x9f, 0x316: 0x9f, 0x317: 0x9f, + 0x318: 0x9f, 0x319: 0x9f, 0x31a: 0x9f, 0x31b: 0x9f, 0x31c: 0x9f, 0x31d: 0x9f, 0x31e: 0xf9, 0x31f: 0xfa, + // Block 0xd, offset 0x340 + 0x340: 0xba, 0x341: 0xba, 0x342: 0xba, 0x343: 0xba, 0x344: 0xba, 0x345: 0xba, 0x346: 0xba, 0x347: 0xba, + 0x348: 0xba, 0x349: 0xba, 0x34a: 0xba, 0x34b: 0xba, 0x34c: 0xba, 0x34d: 0xba, 0x34e: 0xba, 0x34f: 0xba, + 0x350: 0xba, 0x351: 0xba, 0x352: 0xba, 0x353: 0xba, 0x354: 0xba, 0x355: 0xba, 0x356: 0xba, 0x357: 0xba, + 0x358: 0xba, 0x359: 0xba, 0x35a: 0xba, 0x35b: 0xba, 0x35c: 0xba, 0x35d: 0xba, 0x35e: 0xba, 0x35f: 0xba, + 0x360: 0xba, 0x361: 0xba, 0x362: 0xba, 0x363: 0xba, 0x364: 0xba, 0x365: 0xba, 0x366: 0xba, 0x367: 0xba, + 0x368: 0xba, 0x369: 0xba, 0x36a: 0xba, 0x36b: 0xba, 0x36c: 0xba, 0x36d: 0xba, 0x36e: 0xba, 0x36f: 0xba, + 0x370: 0xba, 0x371: 0xba, 0x372: 0xba, 0x373: 0xba, 0x374: 0xba, 0x375: 0xba, 0x376: 0xba, 0x377: 0xba, + 0x378: 0xba, 0x379: 0xba, 0x37a: 0xba, 0x37b: 0xba, 0x37c: 0xba, 0x37d: 0xba, 0x37e: 0xba, 0x37f: 0xba, + // Block 0xe, offset 0x380 + 0x380: 0xba, 0x381: 0xba, 0x382: 0xba, 0x383: 0xba, 0x384: 0xba, 0x385: 0xba, 0x386: 0xba, 0x387: 0xba, + 0x388: 0xba, 0x389: 0xba, 0x38a: 0xba, 0x38b: 0xba, 0x38c: 0xba, 0x38d: 0xba, 0x38e: 0xba, 0x38f: 0xba, + 0x390: 0xba, 0x391: 0xba, 0x392: 0xba, 0x393: 0xba, 0x394: 0xba, 0x395: 0xba, 0x396: 0xba, 0x397: 0xba, + 0x398: 0xba, 0x399: 0xba, 0x39a: 0xba, 0x39b: 0xba, 0x39c: 0xba, 0x39d: 0xba, 0x39e: 0xba, 0x39f: 0xba, + 0x3a0: 0xba, 0x3a1: 0xba, 0x3a2: 0xba, 0x3a3: 0xba, 0x3a4: 0xfb, 0x3a5: 0xfc, 0x3a6: 0xfd, 0x3a7: 0xfe, + 0x3a8: 0x47, 0x3a9: 0xff, 0x3aa: 0x100, 0x3ab: 0x48, 0x3ac: 0x49, 0x3ad: 0x4a, 0x3ae: 0x4b, 0x3af: 0x4c, + 0x3b0: 0x101, 0x3b1: 0x4d, 0x3b2: 0x4e, 0x3b3: 0x4f, 0x3b4: 0x50, 0x3b5: 0x51, 0x3b6: 0x102, 0x3b7: 0x52, + 0x3b8: 0x53, 0x3b9: 0x54, 0x3ba: 0x55, 0x3bb: 0x56, 0x3bc: 0x57, 0x3bd: 0x58, 0x3be: 0x59, 0x3bf: 0x5a, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x103, 0x3c1: 0x104, 0x3c2: 0x9f, 0x3c3: 0x105, 0x3c4: 0x106, 0x3c5: 0x9b, 0x3c6: 0x107, 0x3c7: 0x108, + 0x3c8: 0xba, 0x3c9: 0xba, 0x3ca: 0x109, 0x3cb: 0x10a, 0x3cc: 0x10b, 0x3cd: 0x10c, 0x3ce: 0x10d, 0x3cf: 0x10e, + 0x3d0: 0x10f, 0x3d1: 0x9f, 0x3d2: 0x110, 0x3d3: 0x111, 0x3d4: 0x112, 0x3d5: 0x113, 0x3d6: 0xba, 0x3d7: 0xba, + 0x3d8: 0x9f, 0x3d9: 0x9f, 0x3da: 0x9f, 0x3db: 0x9f, 0x3dc: 0x114, 0x3dd: 0x115, 0x3de: 0xba, 0x3df: 0xba, + 0x3e0: 0x116, 0x3e1: 0x117, 0x3e2: 0x118, 0x3e3: 0x119, 0x3e4: 0x11a, 0x3e5: 0xba, 0x3e6: 0x11b, 0x3e7: 0x11c, + 0x3e8: 0x11d, 0x3e9: 0x11e, 0x3ea: 0x11f, 0x3eb: 0x5b, 0x3ec: 0x120, 0x3ed: 0x121, 0x3ee: 0x5c, 0x3ef: 0xba, + 0x3f0: 0x122, 0x3f1: 0x123, 0x3f2: 0x124, 0x3f3: 0x125, 0x3f4: 0x126, 0x3f5: 0xba, 0x3f6: 0xba, 0x3f7: 0xba, + 0x3f8: 0xba, 0x3f9: 0x127, 0x3fa: 0xba, 0x3fb: 0xba, 0x3fc: 0x128, 0x3fd: 0x129, 0x3fe: 0xba, 0x3ff: 0xba, + // Block 0x10, offset 0x400 + 0x400: 0x12a, 0x401: 0x12b, 0x402: 0x12c, 0x403: 0x12d, 0x404: 0x12e, 0x405: 0x12f, 0x406: 0x130, 0x407: 0x131, + 0x408: 0x132, 0x409: 0xba, 0x40a: 0x133, 0x40b: 0x134, 0x40c: 0x5d, 0x40d: 0x5e, 0x40e: 0xba, 0x40f: 0xba, + 0x410: 0x135, 0x411: 0x136, 0x412: 0x137, 0x413: 0x138, 0x414: 0xba, 0x415: 0xba, 0x416: 0x139, 0x417: 0x13a, + 0x418: 0x13b, 0x419: 0x13c, 0x41a: 0x13d, 0x41b: 0x13e, 0x41c: 0x13f, 0x41d: 0xba, 0x41e: 0xba, 0x41f: 0xba, + 0x420: 0x140, 0x421: 0xba, 0x422: 0x141, 0x423: 0x142, 0x424: 0xba, 0x425: 0xba, 0x426: 0xba, 0x427: 0xba, + 0x428: 0x143, 0x429: 0x144, 0x42a: 0x145, 0x42b: 0x146, 0x42c: 0xba, 0x42d: 0xba, 0x42e: 0xba, 0x42f: 0xba, + 0x430: 0x147, 0x431: 0x148, 0x432: 0x149, 0x433: 0xba, 0x434: 0x14a, 0x435: 0x14b, 0x436: 0x14c, 0x437: 0xba, + 0x438: 0xba, 0x439: 0xba, 0x43a: 0xba, 0x43b: 0x14d, 0x43c: 0xba, 0x43d: 0xba, 0x43e: 0xba, 0x43f: 0xba, + // Block 0x11, offset 0x440 + 0x440: 0x9f, 0x441: 0x9f, 0x442: 0x9f, 0x443: 0x9f, 0x444: 0x9f, 0x445: 0x9f, 0x446: 0x9f, 0x447: 0x9f, + 0x448: 0x9f, 0x449: 0x9f, 0x44a: 0x9f, 0x44b: 0x9f, 0x44c: 0x9f, 0x44d: 0x9f, 0x44e: 0x14e, 0x44f: 0xba, + 0x450: 0x9b, 0x451: 0x14f, 0x452: 0x9f, 0x453: 0x9f, 0x454: 0x9f, 0x455: 0x150, 0x456: 0xba, 0x457: 0xba, + 0x458: 0xba, 0x459: 0xba, 0x45a: 0xba, 0x45b: 0xba, 0x45c: 0xba, 0x45d: 0xba, 0x45e: 0xba, 0x45f: 0xba, + 0x460: 0xba, 0x461: 0xba, 0x462: 0xba, 0x463: 0xba, 0x464: 0xba, 0x465: 0xba, 0x466: 0xba, 0x467: 0xba, + 0x468: 0xba, 0x469: 0xba, 0x46a: 0xba, 0x46b: 0xba, 0x46c: 0xba, 0x46d: 0xba, 0x46e: 0xba, 0x46f: 0xba, + 0x470: 0xba, 0x471: 0xba, 0x472: 0xba, 0x473: 0xba, 0x474: 0xba, 0x475: 0xba, 0x476: 0xba, 0x477: 0xba, + 0x478: 0xba, 0x479: 0xba, 0x47a: 0xba, 0x47b: 0xba, 0x47c: 0xba, 0x47d: 0xba, 0x47e: 0xba, 0x47f: 0xba, + // Block 0x12, offset 0x480 + 0x480: 0x9f, 0x481: 0x9f, 0x482: 0x9f, 0x483: 0x9f, 0x484: 0x9f, 0x485: 0x9f, 0x486: 0x9f, 0x487: 0x9f, + 0x488: 0x9f, 0x489: 0x9f, 0x48a: 0x9f, 0x48b: 0x9f, 0x48c: 0x9f, 0x48d: 0x9f, 0x48e: 0x9f, 0x48f: 0x9f, + 0x490: 0x151, 0x491: 0xba, 0x492: 0xba, 0x493: 0xba, 0x494: 0xba, 0x495: 0xba, 0x496: 0xba, 0x497: 0xba, + 0x498: 0xba, 0x499: 0xba, 0x49a: 0xba, 0x49b: 0xba, 0x49c: 0xba, 0x49d: 0xba, 0x49e: 0xba, 0x49f: 0xba, + 0x4a0: 0xba, 0x4a1: 0xba, 0x4a2: 0xba, 0x4a3: 0xba, 0x4a4: 0xba, 0x4a5: 0xba, 0x4a6: 0xba, 0x4a7: 0xba, + 0x4a8: 0xba, 0x4a9: 0xba, 0x4aa: 0xba, 0x4ab: 0xba, 0x4ac: 0xba, 0x4ad: 0xba, 0x4ae: 0xba, 0x4af: 0xba, + 0x4b0: 0xba, 0x4b1: 0xba, 0x4b2: 0xba, 0x4b3: 0xba, 0x4b4: 0xba, 0x4b5: 0xba, 0x4b6: 0xba, 0x4b7: 0xba, + 0x4b8: 0xba, 0x4b9: 0xba, 0x4ba: 0xba, 0x4bb: 0xba, 0x4bc: 0xba, 0x4bd: 0xba, 0x4be: 0xba, 0x4bf: 0xba, + // Block 0x13, offset 0x4c0 + 0x4c0: 0xba, 0x4c1: 0xba, 0x4c2: 0xba, 0x4c3: 0xba, 0x4c4: 0xba, 0x4c5: 0xba, 0x4c6: 0xba, 0x4c7: 0xba, + 0x4c8: 0xba, 0x4c9: 0xba, 0x4ca: 0xba, 0x4cb: 0xba, 0x4cc: 0xba, 0x4cd: 0xba, 0x4ce: 0xba, 0x4cf: 0xba, + 0x4d0: 0x9f, 0x4d1: 0x9f, 0x4d2: 0x9f, 0x4d3: 0x9f, 0x4d4: 0x9f, 0x4d5: 0x9f, 0x4d6: 0x9f, 0x4d7: 0x9f, + 0x4d8: 0x9f, 0x4d9: 0x152, 0x4da: 0xba, 0x4db: 0xba, 0x4dc: 0xba, 0x4dd: 0xba, 0x4de: 0xba, 0x4df: 0xba, + 0x4e0: 0xba, 0x4e1: 0xba, 0x4e2: 0xba, 0x4e3: 0xba, 0x4e4: 0xba, 0x4e5: 0xba, 0x4e6: 0xba, 0x4e7: 0xba, + 0x4e8: 0xba, 0x4e9: 0xba, 0x4ea: 0xba, 0x4eb: 0xba, 0x4ec: 0xba, 0x4ed: 0xba, 0x4ee: 0xba, 0x4ef: 0xba, + 0x4f0: 0xba, 0x4f1: 0xba, 0x4f2: 0xba, 0x4f3: 0xba, 0x4f4: 0xba, 0x4f5: 0xba, 0x4f6: 0xba, 0x4f7: 0xba, + 0x4f8: 0xba, 0x4f9: 0xba, 0x4fa: 0xba, 0x4fb: 0xba, 0x4fc: 0xba, 0x4fd: 0xba, 0x4fe: 0xba, 0x4ff: 0xba, + // Block 0x14, offset 0x500 + 0x500: 0xba, 0x501: 0xba, 0x502: 0xba, 0x503: 0xba, 0x504: 0xba, 0x505: 0xba, 0x506: 0xba, 0x507: 0xba, + 0x508: 0xba, 0x509: 0xba, 0x50a: 0xba, 0x50b: 0xba, 0x50c: 0xba, 0x50d: 0xba, 0x50e: 0xba, 0x50f: 0xba, + 0x510: 0xba, 0x511: 0xba, 0x512: 0xba, 0x513: 0xba, 0x514: 0xba, 0x515: 0xba, 0x516: 0xba, 0x517: 0xba, + 0x518: 0xba, 0x519: 0xba, 0x51a: 0xba, 0x51b: 0xba, 0x51c: 0xba, 0x51d: 0xba, 0x51e: 0xba, 0x51f: 0xba, + 0x520: 0x9f, 0x521: 0x9f, 0x522: 0x9f, 0x523: 0x9f, 0x524: 0x9f, 0x525: 0x9f, 0x526: 0x9f, 0x527: 0x9f, + 0x528: 0x146, 0x529: 0x153, 0x52a: 0xba, 0x52b: 0x154, 0x52c: 0x155, 0x52d: 0x156, 0x52e: 0x157, 0x52f: 0xba, + 0x530: 0xba, 0x531: 0xba, 0x532: 0xba, 0x533: 0xba, 0x534: 0xba, 0x535: 0xba, 0x536: 0xba, 0x537: 0xba, + 0x538: 0xba, 0x539: 0x158, 0x53a: 0x159, 0x53b: 0xba, 0x53c: 0x9f, 0x53d: 0x15a, 0x53e: 0x15b, 0x53f: 0x15c, + // Block 0x15, offset 0x540 + 0x540: 0x9f, 0x541: 0x9f, 0x542: 0x9f, 0x543: 0x9f, 0x544: 0x9f, 0x545: 0x9f, 0x546: 0x9f, 0x547: 0x9f, + 0x548: 0x9f, 0x549: 0x9f, 0x54a: 0x9f, 0x54b: 0x9f, 0x54c: 0x9f, 0x54d: 0x9f, 0x54e: 0x9f, 0x54f: 0x9f, + 0x550: 0x9f, 0x551: 0x9f, 0x552: 0x9f, 0x553: 0x9f, 0x554: 0x9f, 0x555: 0x9f, 0x556: 0x9f, 0x557: 0x9f, + 0x558: 0x9f, 0x559: 0x9f, 0x55a: 0x9f, 0x55b: 0x9f, 0x55c: 0x9f, 0x55d: 0x9f, 0x55e: 0x9f, 0x55f: 0x15d, + 0x560: 0x9f, 0x561: 0x9f, 0x562: 0x9f, 0x563: 0x9f, 0x564: 0x9f, 0x565: 0x9f, 0x566: 0x9f, 0x567: 0x9f, + 0x568: 0x9f, 0x569: 0x9f, 0x56a: 0x9f, 0x56b: 0x15e, 0x56c: 0xba, 0x56d: 0xba, 0x56e: 0xba, 0x56f: 0xba, + 0x570: 0xba, 0x571: 0xba, 0x572: 0xba, 0x573: 0xba, 0x574: 0xba, 0x575: 0xba, 0x576: 0xba, 0x577: 0xba, + 0x578: 0xba, 0x579: 0xba, 0x57a: 0xba, 0x57b: 0xba, 0x57c: 0xba, 0x57d: 0xba, 0x57e: 0xba, 0x57f: 0xba, + // Block 0x16, offset 0x580 + 0x580: 0x9f, 0x581: 0x9f, 0x582: 0x9f, 0x583: 0x9f, 0x584: 0x15f, 0x585: 0x160, 0x586: 0x9f, 0x587: 0x9f, + 0x588: 0x9f, 0x589: 0x9f, 0x58a: 0x9f, 0x58b: 0x161, 0x58c: 0xba, 0x58d: 0xba, 0x58e: 0xba, 0x58f: 0xba, + 0x590: 0xba, 0x591: 0xba, 0x592: 0xba, 0x593: 0xba, 0x594: 0xba, 0x595: 0xba, 0x596: 0xba, 0x597: 0xba, + 0x598: 0xba, 0x599: 0xba, 0x59a: 0xba, 0x59b: 0xba, 0x59c: 0xba, 0x59d: 0xba, 0x59e: 0xba, 0x59f: 0xba, + 0x5a0: 0xba, 0x5a1: 0xba, 0x5a2: 0xba, 0x5a3: 0xba, 0x5a4: 0xba, 0x5a5: 0xba, 0x5a6: 0xba, 0x5a7: 0xba, + 0x5a8: 0xba, 0x5a9: 0xba, 0x5aa: 0xba, 0x5ab: 0xba, 0x5ac: 0xba, 0x5ad: 0xba, 0x5ae: 0xba, 0x5af: 0xba, + 0x5b0: 0x9f, 0x5b1: 0x162, 0x5b2: 0x163, 0x5b3: 0xba, 0x5b4: 0xba, 0x5b5: 0xba, 0x5b6: 0xba, 0x5b7: 0xba, + 0x5b8: 0xba, 0x5b9: 0xba, 0x5ba: 0xba, 0x5bb: 0xba, 0x5bc: 0xba, 0x5bd: 0xba, 0x5be: 0xba, 0x5bf: 0xba, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x9b, 0x5c1: 0x9b, 0x5c2: 0x9b, 0x5c3: 0x164, 0x5c4: 0x165, 0x5c5: 0x166, 0x5c6: 0x167, 0x5c7: 0x168, + 0x5c8: 0x9b, 0x5c9: 0x169, 0x5ca: 0xba, 0x5cb: 0x16a, 0x5cc: 0x9b, 0x5cd: 0x16b, 0x5ce: 0xba, 0x5cf: 0xba, + 0x5d0: 0x5f, 0x5d1: 0x60, 0x5d2: 0x61, 0x5d3: 0x62, 0x5d4: 0x63, 0x5d5: 0x64, 0x5d6: 0x65, 0x5d7: 0x66, + 0x5d8: 0x67, 0x5d9: 0x68, 0x5da: 0x69, 0x5db: 0x6a, 0x5dc: 0x6b, 0x5dd: 0x6c, 0x5de: 0x6d, 0x5df: 0x6e, + 0x5e0: 0x9b, 0x5e1: 0x9b, 0x5e2: 0x9b, 0x5e3: 0x9b, 0x5e4: 0x9b, 0x5e5: 0x9b, 0x5e6: 0x9b, 0x5e7: 0x9b, + 0x5e8: 0x16c, 0x5e9: 0x16d, 0x5ea: 0x16e, 0x5eb: 0xba, 0x5ec: 0xba, 0x5ed: 0xba, 0x5ee: 0xba, 0x5ef: 0xba, + 0x5f0: 0xba, 0x5f1: 0xba, 0x5f2: 0xba, 0x5f3: 0xba, 0x5f4: 0xba, 0x5f5: 0xba, 0x5f6: 0xba, 0x5f7: 0xba, + 0x5f8: 0xba, 0x5f9: 0xba, 0x5fa: 0xba, 0x5fb: 0xba, 0x5fc: 0xba, 0x5fd: 0xba, 0x5fe: 0xba, 0x5ff: 0xba, + // Block 0x18, offset 0x600 + 0x600: 0x16f, 0x601: 0xba, 0x602: 0xba, 0x603: 0xba, 0x604: 0xba, 0x605: 0xba, 0x606: 0xba, 0x607: 0xba, + 0x608: 0xba, 0x609: 0xba, 0x60a: 0xba, 0x60b: 0xba, 0x60c: 0xba, 0x60d: 0xba, 0x60e: 0xba, 0x60f: 0xba, + 0x610: 0xba, 0x611: 0xba, 0x612: 0xba, 0x613: 0xba, 0x614: 0xba, 0x615: 0xba, 0x616: 0xba, 0x617: 0xba, + 0x618: 0xba, 0x619: 0xba, 0x61a: 0xba, 0x61b: 0xba, 0x61c: 0xba, 0x61d: 0xba, 0x61e: 0xba, 0x61f: 0xba, + 0x620: 0x122, 0x621: 0x122, 0x622: 0x122, 0x623: 0x170, 0x624: 0x6f, 0x625: 0x171, 0x626: 0xba, 0x627: 0xba, + 0x628: 0xba, 0x629: 0xba, 0x62a: 0xba, 0x62b: 0xba, 0x62c: 0xba, 0x62d: 0xba, 0x62e: 0xba, 0x62f: 0xba, + 0x630: 0xba, 0x631: 0x172, 0x632: 0x173, 0x633: 0xba, 0x634: 0xba, 0x635: 0xba, 0x636: 0xba, 0x637: 0xba, + 0x638: 0x70, 0x639: 0x71, 0x63a: 0x72, 0x63b: 0x174, 0x63c: 0xba, 0x63d: 0xba, 0x63e: 0xba, 0x63f: 0xba, + // Block 0x19, offset 0x640 + 0x640: 0x175, 0x641: 0x9b, 0x642: 0x176, 0x643: 0x177, 0x644: 0x73, 0x645: 0x74, 0x646: 0x178, 0x647: 0x179, + 0x648: 0x75, 0x649: 0x17a, 0x64a: 0xba, 0x64b: 0xba, 0x64c: 0x9b, 0x64d: 0x9b, 0x64e: 0x9b, 0x64f: 0x9b, + 0x650: 0x9b, 0x651: 0x9b, 0x652: 0x9b, 0x653: 0x9b, 0x654: 0x9b, 0x655: 0x9b, 0x656: 0x9b, 0x657: 0x9b, + 0x658: 0x9b, 0x659: 0x9b, 0x65a: 0x9b, 0x65b: 0x17b, 0x65c: 0x9b, 0x65d: 0x17c, 0x65e: 0x9b, 0x65f: 0x17d, + 0x660: 0x17e, 0x661: 0x17f, 0x662: 0x180, 0x663: 0xba, 0x664: 0x181, 0x665: 0x182, 0x666: 0x183, 0x667: 0x184, + 0x668: 0xba, 0x669: 0x185, 0x66a: 0xba, 0x66b: 0xba, 0x66c: 0xba, 0x66d: 0xba, 0x66e: 0xba, 0x66f: 0xba, + 0x670: 0xba, 0x671: 0xba, 0x672: 0xba, 0x673: 0xba, 0x674: 0xba, 0x675: 0xba, 0x676: 0xba, 0x677: 0xba, + 0x678: 0xba, 0x679: 0xba, 0x67a: 0xba, 0x67b: 0xba, 0x67c: 0xba, 0x67d: 0xba, 0x67e: 0xba, 0x67f: 0xba, + // Block 0x1a, offset 0x680 + 0x680: 0x9f, 0x681: 0x9f, 0x682: 0x9f, 0x683: 0x9f, 0x684: 0x9f, 0x685: 0x9f, 0x686: 0x9f, 0x687: 0x9f, + 0x688: 0x9f, 0x689: 0x9f, 0x68a: 0x9f, 0x68b: 0x9f, 0x68c: 0x9f, 0x68d: 0x9f, 0x68e: 0x9f, 0x68f: 0x9f, + 0x690: 0x9f, 0x691: 0x9f, 0x692: 0x9f, 0x693: 0x9f, 0x694: 0x9f, 0x695: 0x9f, 0x696: 0x9f, 0x697: 0x9f, + 0x698: 0x9f, 0x699: 0x9f, 0x69a: 0x9f, 0x69b: 0x186, 0x69c: 0x9f, 0x69d: 0x9f, 0x69e: 0x9f, 0x69f: 0x9f, + 0x6a0: 0x9f, 0x6a1: 0x9f, 0x6a2: 0x9f, 0x6a3: 0x9f, 0x6a4: 0x9f, 0x6a5: 0x9f, 0x6a6: 0x9f, 0x6a7: 0x9f, + 0x6a8: 0x9f, 0x6a9: 0x9f, 0x6aa: 0x9f, 0x6ab: 0x9f, 0x6ac: 0x9f, 0x6ad: 0x9f, 0x6ae: 0x9f, 0x6af: 0x9f, + 0x6b0: 0x9f, 0x6b1: 0x9f, 0x6b2: 0x9f, 0x6b3: 0x9f, 0x6b4: 0x9f, 0x6b5: 0x9f, 0x6b6: 0x9f, 0x6b7: 0x9f, + 0x6b8: 0x9f, 0x6b9: 0x9f, 0x6ba: 0x9f, 0x6bb: 0x9f, 0x6bc: 0x9f, 0x6bd: 0x9f, 0x6be: 0x9f, 0x6bf: 0x9f, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x9f, 0x6c1: 0x9f, 0x6c2: 0x9f, 0x6c3: 0x9f, 0x6c4: 0x9f, 0x6c5: 0x9f, 0x6c6: 0x9f, 0x6c7: 0x9f, + 0x6c8: 0x9f, 0x6c9: 0x9f, 0x6ca: 0x9f, 0x6cb: 0x9f, 0x6cc: 0x9f, 0x6cd: 0x9f, 0x6ce: 0x9f, 0x6cf: 0x9f, + 0x6d0: 0x9f, 0x6d1: 0x9f, 0x6d2: 0x9f, 0x6d3: 0x9f, 0x6d4: 0x9f, 0x6d5: 0x9f, 0x6d6: 0x9f, 0x6d7: 0x9f, + 0x6d8: 0x9f, 0x6d9: 0x9f, 0x6da: 0x9f, 0x6db: 0x9f, 0x6dc: 0x187, 0x6dd: 0x9f, 0x6de: 0x9f, 0x6df: 0x9f, + 0x6e0: 0x188, 0x6e1: 0x9f, 0x6e2: 0x9f, 0x6e3: 0x9f, 0x6e4: 0x9f, 0x6e5: 0x9f, 0x6e6: 0x9f, 0x6e7: 0x9f, + 0x6e8: 0x9f, 0x6e9: 0x9f, 0x6ea: 0x9f, 0x6eb: 0x9f, 0x6ec: 0x9f, 0x6ed: 0x9f, 0x6ee: 0x9f, 0x6ef: 0x9f, + 0x6f0: 0x9f, 0x6f1: 0x9f, 0x6f2: 0x9f, 0x6f3: 0x9f, 0x6f4: 0x9f, 0x6f5: 0x9f, 0x6f6: 0x9f, 0x6f7: 0x9f, + 0x6f8: 0x9f, 0x6f9: 0x9f, 0x6fa: 0x9f, 0x6fb: 0x9f, 0x6fc: 0x9f, 0x6fd: 0x9f, 0x6fe: 0x9f, 0x6ff: 0x9f, + // Block 0x1c, offset 0x700 + 0x700: 0x9f, 0x701: 0x9f, 0x702: 0x9f, 0x703: 0x9f, 0x704: 0x9f, 0x705: 0x9f, 0x706: 0x9f, 0x707: 0x9f, + 0x708: 0x9f, 0x709: 0x9f, 0x70a: 0x9f, 0x70b: 0x9f, 0x70c: 0x9f, 0x70d: 0x9f, 0x70e: 0x9f, 0x70f: 0x9f, + 0x710: 0x9f, 0x711: 0x9f, 0x712: 0x9f, 0x713: 0x9f, 0x714: 0x9f, 0x715: 0x9f, 0x716: 0x9f, 0x717: 0x9f, + 0x718: 0x9f, 0x719: 0x9f, 0x71a: 0x9f, 0x71b: 0x9f, 0x71c: 0x9f, 0x71d: 0x9f, 0x71e: 0x9f, 0x71f: 0x9f, + 0x720: 0x9f, 0x721: 0x9f, 0x722: 0x9f, 0x723: 0x9f, 0x724: 0x9f, 0x725: 0x9f, 0x726: 0x9f, 0x727: 0x9f, + 0x728: 0x9f, 0x729: 0x9f, 0x72a: 0x9f, 0x72b: 0x9f, 0x72c: 0x9f, 0x72d: 0x9f, 0x72e: 0x9f, 0x72f: 0x9f, + 0x730: 0x9f, 0x731: 0x9f, 0x732: 0x9f, 0x733: 0x9f, 0x734: 0x9f, 0x735: 0x9f, 0x736: 0x9f, 0x737: 0x9f, + 0x738: 0x9f, 0x739: 0x9f, 0x73a: 0x189, 0x73b: 0x9f, 0x73c: 0x9f, 0x73d: 0x9f, 0x73e: 0x9f, 0x73f: 0x9f, + // Block 0x1d, offset 0x740 + 0x740: 0x9f, 0x741: 0x9f, 0x742: 0x9f, 0x743: 0x9f, 0x744: 0x9f, 0x745: 0x9f, 0x746: 0x9f, 0x747: 0x9f, + 0x748: 0x9f, 0x749: 0x9f, 0x74a: 0x9f, 0x74b: 0x9f, 0x74c: 0x9f, 0x74d: 0x9f, 0x74e: 0x9f, 0x74f: 0x9f, + 0x750: 0x9f, 0x751: 0x9f, 0x752: 0x9f, 0x753: 0x9f, 0x754: 0x9f, 0x755: 0x9f, 0x756: 0x9f, 0x757: 0x9f, + 0x758: 0x9f, 0x759: 0x9f, 0x75a: 0x9f, 0x75b: 0x9f, 0x75c: 0x9f, 0x75d: 0x9f, 0x75e: 0x9f, 0x75f: 0x9f, + 0x760: 0x9f, 0x761: 0x9f, 0x762: 0x9f, 0x763: 0x9f, 0x764: 0x9f, 0x765: 0x9f, 0x766: 0x9f, 0x767: 0x9f, + 0x768: 0x9f, 0x769: 0x9f, 0x76a: 0x9f, 0x76b: 0x9f, 0x76c: 0x9f, 0x76d: 0x9f, 0x76e: 0x9f, 0x76f: 0x18a, + 0x770: 0xba, 0x771: 0xba, 0x772: 0xba, 0x773: 0xba, 0x774: 0xba, 0x775: 0xba, 0x776: 0xba, 0x777: 0xba, + 0x778: 0xba, 0x779: 0xba, 0x77a: 0xba, 0x77b: 0xba, 0x77c: 0xba, 0x77d: 0xba, 0x77e: 0xba, 0x77f: 0xba, + // Block 0x1e, offset 0x780 + 0x780: 0xba, 0x781: 0xba, 0x782: 0xba, 0x783: 0xba, 0x784: 0xba, 0x785: 0xba, 0x786: 0xba, 0x787: 0xba, + 0x788: 0xba, 0x789: 0xba, 0x78a: 0xba, 0x78b: 0xba, 0x78c: 0xba, 0x78d: 0xba, 0x78e: 0xba, 0x78f: 0xba, + 0x790: 0xba, 0x791: 0xba, 0x792: 0xba, 0x793: 0xba, 0x794: 0xba, 0x795: 0xba, 0x796: 0xba, 0x797: 0xba, + 0x798: 0xba, 0x799: 0xba, 0x79a: 0xba, 0x79b: 0xba, 0x79c: 0xba, 0x79d: 0xba, 0x79e: 0xba, 0x79f: 0xba, + 0x7a0: 0x76, 0x7a1: 0x77, 0x7a2: 0x78, 0x7a3: 0x18b, 0x7a4: 0x79, 0x7a5: 0x7a, 0x7a6: 0x18c, 0x7a7: 0x7b, + 0x7a8: 0x7c, 0x7a9: 0xba, 0x7aa: 0xba, 0x7ab: 0xba, 0x7ac: 0xba, 0x7ad: 0xba, 0x7ae: 0xba, 0x7af: 0xba, + 0x7b0: 0xba, 0x7b1: 0xba, 0x7b2: 0xba, 0x7b3: 0xba, 0x7b4: 0xba, 0x7b5: 0xba, 0x7b6: 0xba, 0x7b7: 0xba, + 0x7b8: 0xba, 0x7b9: 0xba, 0x7ba: 0xba, 0x7bb: 0xba, 0x7bc: 0xba, 0x7bd: 0xba, 0x7be: 0xba, 0x7bf: 0xba, + // Block 0x1f, offset 0x7c0 + 0x7d0: 0x0d, 0x7d1: 0x0e, 0x7d2: 0x0f, 0x7d3: 0x10, 0x7d4: 0x11, 0x7d5: 0x0b, 0x7d6: 0x12, 0x7d7: 0x07, + 0x7d8: 0x13, 0x7d9: 0x0b, 0x7da: 0x0b, 0x7db: 0x14, 0x7dc: 0x0b, 0x7dd: 0x15, 0x7de: 0x16, 0x7df: 0x17, + 0x7e0: 0x07, 0x7e1: 0x07, 0x7e2: 0x07, 0x7e3: 0x07, 0x7e4: 0x07, 0x7e5: 0x07, 0x7e6: 0x07, 0x7e7: 0x07, + 0x7e8: 0x07, 0x7e9: 0x07, 0x7ea: 0x18, 0x7eb: 0x19, 0x7ec: 0x1a, 0x7ed: 0x07, 0x7ee: 0x1b, 0x7ef: 0x1c, + 0x7f0: 0x0b, 0x7f1: 0x0b, 0x7f2: 0x0b, 0x7f3: 0x0b, 0x7f4: 0x0b, 0x7f5: 0x0b, 0x7f6: 0x0b, 0x7f7: 0x0b, + 0x7f8: 0x0b, 0x7f9: 0x0b, 0x7fa: 0x0b, 0x7fb: 0x0b, 0x7fc: 0x0b, 0x7fd: 0x0b, 0x7fe: 0x0b, 0x7ff: 0x0b, + // Block 0x20, offset 0x800 + 0x800: 0x0b, 0x801: 0x0b, 0x802: 0x0b, 0x803: 0x0b, 0x804: 0x0b, 0x805: 0x0b, 0x806: 0x0b, 0x807: 0x0b, + 0x808: 0x0b, 0x809: 0x0b, 0x80a: 0x0b, 0x80b: 0x0b, 0x80c: 0x0b, 0x80d: 0x0b, 0x80e: 0x0b, 0x80f: 0x0b, + 0x810: 0x0b, 0x811: 0x0b, 0x812: 0x0b, 0x813: 0x0b, 0x814: 0x0b, 0x815: 0x0b, 0x816: 0x0b, 0x817: 0x0b, + 0x818: 0x0b, 0x819: 0x0b, 0x81a: 0x0b, 0x81b: 0x0b, 0x81c: 0x0b, 0x81d: 0x0b, 0x81e: 0x0b, 0x81f: 0x0b, + 0x820: 0x0b, 0x821: 0x0b, 0x822: 0x0b, 0x823: 0x0b, 0x824: 0x0b, 0x825: 0x0b, 0x826: 0x0b, 0x827: 0x0b, + 0x828: 0x0b, 0x829: 0x0b, 0x82a: 0x0b, 0x82b: 0x0b, 0x82c: 0x0b, 0x82d: 0x0b, 0x82e: 0x0b, 0x82f: 0x0b, + 0x830: 0x0b, 0x831: 0x0b, 0x832: 0x0b, 0x833: 0x0b, 0x834: 0x0b, 0x835: 0x0b, 0x836: 0x0b, 0x837: 0x0b, + 0x838: 0x0b, 0x839: 0x0b, 0x83a: 0x0b, 0x83b: 0x0b, 0x83c: 0x0b, 0x83d: 0x0b, 0x83e: 0x0b, 0x83f: 0x0b, + // Block 0x21, offset 0x840 + 0x840: 0x18d, 0x841: 0x18e, 0x842: 0xba, 0x843: 0xba, 0x844: 0x18f, 0x845: 0x18f, 0x846: 0x18f, 0x847: 0x190, + 0x848: 0xba, 0x849: 0xba, 0x84a: 0xba, 0x84b: 0xba, 0x84c: 0xba, 0x84d: 0xba, 0x84e: 0xba, 0x84f: 0xba, + 0x850: 0xba, 0x851: 0xba, 0x852: 0xba, 0x853: 0xba, 0x854: 0xba, 0x855: 0xba, 0x856: 0xba, 0x857: 0xba, + 0x858: 0xba, 0x859: 0xba, 0x85a: 0xba, 0x85b: 0xba, 0x85c: 0xba, 0x85d: 0xba, 0x85e: 0xba, 0x85f: 0xba, + 0x860: 0xba, 0x861: 0xba, 0x862: 0xba, 0x863: 0xba, 0x864: 0xba, 0x865: 0xba, 0x866: 0xba, 0x867: 0xba, + 0x868: 0xba, 0x869: 0xba, 0x86a: 0xba, 0x86b: 0xba, 0x86c: 0xba, 0x86d: 0xba, 0x86e: 0xba, 0x86f: 0xba, + 0x870: 0xba, 0x871: 0xba, 0x872: 0xba, 0x873: 0xba, 0x874: 0xba, 0x875: 0xba, 0x876: 0xba, 0x877: 0xba, + 0x878: 0xba, 0x879: 0xba, 0x87a: 0xba, 0x87b: 0xba, 0x87c: 0xba, 0x87d: 0xba, 0x87e: 0xba, 0x87f: 0xba, + // Block 0x22, offset 0x880 + 0x880: 0x0b, 0x881: 0x0b, 0x882: 0x0b, 0x883: 0x0b, 0x884: 0x0b, 0x885: 0x0b, 0x886: 0x0b, 0x887: 0x0b, + 0x888: 0x0b, 0x889: 0x0b, 0x88a: 0x0b, 0x88b: 0x0b, 0x88c: 0x0b, 0x88d: 0x0b, 0x88e: 0x0b, 0x88f: 0x0b, + 0x890: 0x0b, 0x891: 0x0b, 0x892: 0x0b, 0x893: 0x0b, 0x894: 0x0b, 0x895: 0x0b, 0x896: 0x0b, 0x897: 0x0b, + 0x898: 0x0b, 0x899: 0x0b, 0x89a: 0x0b, 0x89b: 0x0b, 0x89c: 0x0b, 0x89d: 0x0b, 0x89e: 0x0b, 0x89f: 0x0b, + 0x8a0: 0x1f, 0x8a1: 0x0b, 0x8a2: 0x0b, 0x8a3: 0x0b, 0x8a4: 0x0b, 0x8a5: 0x0b, 0x8a6: 0x0b, 0x8a7: 0x0b, + 0x8a8: 0x0b, 0x8a9: 0x0b, 0x8aa: 0x0b, 0x8ab: 0x0b, 0x8ac: 0x0b, 0x8ad: 0x0b, 0x8ae: 0x0b, 0x8af: 0x0b, + 0x8b0: 0x0b, 0x8b1: 0x0b, 0x8b2: 0x0b, 0x8b3: 0x0b, 0x8b4: 0x0b, 0x8b5: 0x0b, 0x8b6: 0x0b, 0x8b7: 0x0b, + 0x8b8: 0x0b, 0x8b9: 0x0b, 0x8ba: 0x0b, 0x8bb: 0x0b, 0x8bc: 0x0b, 0x8bd: 0x0b, 0x8be: 0x0b, 0x8bf: 0x0b, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x0b, 0x8c1: 0x0b, 0x8c2: 0x0b, 0x8c3: 0x0b, 0x8c4: 0x0b, 0x8c5: 0x0b, 0x8c6: 0x0b, 0x8c7: 0x0b, + 0x8c8: 0x0b, 0x8c9: 0x0b, 0x8ca: 0x0b, 0x8cb: 0x0b, 0x8cc: 0x0b, 0x8cd: 0x0b, 0x8ce: 0x0b, 0x8cf: 0x0b, +} + +// idnaSparseOffset: 276 entries, 552 bytes +var idnaSparseOffset = []uint16{0x0, 0x8, 0x19, 0x25, 0x27, 0x2c, 0x33, 0x3e, 0x4a, 0x4e, 0x5d, 0x62, 0x6c, 0x78, 0x86, 0x8b, 0x94, 0xa4, 0xb2, 0xbe, 0xca, 0xdb, 0xe5, 0xec, 0xf9, 0x10a, 0x111, 0x11c, 0x12b, 0x139, 0x143, 0x145, 0x14a, 0x14d, 0x150, 0x152, 0x15e, 0x169, 0x171, 0x177, 0x17d, 0x182, 0x187, 0x18a, 0x18e, 0x194, 0x199, 0x1a5, 0x1af, 0x1b5, 0x1c6, 0x1d0, 0x1d3, 0x1db, 0x1de, 0x1eb, 0x1f3, 0x1f7, 0x1fe, 0x206, 0x216, 0x222, 0x224, 0x22e, 0x23a, 0x246, 0x252, 0x25a, 0x25f, 0x269, 0x27a, 0x27e, 0x289, 0x28d, 0x296, 0x29e, 0x2a4, 0x2a9, 0x2ac, 0x2b0, 0x2b6, 0x2ba, 0x2be, 0x2c2, 0x2c7, 0x2cd, 0x2d5, 0x2dc, 0x2e7, 0x2f1, 0x2f5, 0x2f8, 0x2fe, 0x302, 0x304, 0x307, 0x309, 0x30c, 0x316, 0x319, 0x328, 0x32c, 0x331, 0x334, 0x338, 0x33d, 0x342, 0x348, 0x34e, 0x35d, 0x363, 0x367, 0x376, 0x37b, 0x383, 0x38d, 0x398, 0x3a0, 0x3b1, 0x3ba, 0x3ca, 0x3d7, 0x3e1, 0x3e6, 0x3f3, 0x3f7, 0x3fc, 0x3fe, 0x402, 0x404, 0x408, 0x411, 0x417, 0x41b, 0x42b, 0x435, 0x43a, 0x43d, 0x443, 0x44a, 0x44f, 0x453, 0x459, 0x45e, 0x467, 0x46c, 0x472, 0x479, 0x480, 0x487, 0x48b, 0x490, 0x493, 0x498, 0x4a4, 0x4aa, 0x4af, 0x4b6, 0x4be, 0x4c3, 0x4c7, 0x4d7, 0x4de, 0x4e2, 0x4e6, 0x4ed, 0x4ef, 0x4f2, 0x4f5, 0x4f9, 0x502, 0x506, 0x50e, 0x516, 0x51c, 0x525, 0x531, 0x538, 0x541, 0x54b, 0x552, 0x560, 0x56d, 0x57a, 0x583, 0x587, 0x596, 0x59e, 0x5a9, 0x5b2, 0x5b8, 0x5c0, 0x5c9, 0x5d3, 0x5d6, 0x5e2, 0x5eb, 0x5ee, 0x5f3, 0x5fe, 0x607, 0x613, 0x616, 0x620, 0x629, 0x635, 0x642, 0x64f, 0x65d, 0x664, 0x667, 0x66c, 0x66f, 0x672, 0x675, 0x67c, 0x683, 0x687, 0x692, 0x695, 0x698, 0x69b, 0x6a1, 0x6a6, 0x6aa, 0x6ad, 0x6b0, 0x6b3, 0x6b6, 0x6b9, 0x6be, 0x6c8, 0x6cb, 0x6cf, 0x6de, 0x6ea, 0x6ee, 0x6f3, 0x6f7, 0x6fc, 0x700, 0x705, 0x70e, 0x719, 0x71f, 0x727, 0x72a, 0x72d, 0x731, 0x735, 0x73b, 0x741, 0x746, 0x749, 0x759, 0x760, 0x763, 0x766, 0x76a, 0x770, 0x775, 0x77a, 0x782, 0x787, 0x78b, 0x78f, 0x792, 0x795, 0x799, 0x79d, 0x7a0, 0x7b0, 0x7c1, 0x7c6, 0x7c8, 0x7ca} + +// idnaSparseValues: 1997 entries, 7988 bytes +var idnaSparseValues = [1997]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0000, lo: 0x07}, + {value: 0xe105, lo: 0x80, hi: 0x96}, + {value: 0x0018, lo: 0x97, hi: 0x97}, + {value: 0xe105, lo: 0x98, hi: 0x9e}, + {value: 0x001f, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbf}, + // Block 0x1, offset 0x8 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0xe01d, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x82}, + {value: 0x0335, lo: 0x83, hi: 0x83}, + {value: 0x034d, lo: 0x84, hi: 0x84}, + {value: 0x0365, lo: 0x85, hi: 0x85}, + {value: 0xe00d, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x87}, + {value: 0xe00d, lo: 0x88, hi: 0x88}, + {value: 0x0008, lo: 0x89, hi: 0x89}, + {value: 0xe00d, lo: 0x8a, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0x8b}, + {value: 0xe00d, lo: 0x8c, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0x8d}, + {value: 0xe00d, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0xbf}, + // Block 0x2, offset 0x19 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x0249, lo: 0xb0, hi: 0xb0}, + {value: 0x037d, lo: 0xb1, hi: 0xb1}, + {value: 0x0259, lo: 0xb2, hi: 0xb2}, + {value: 0x0269, lo: 0xb3, hi: 0xb3}, + {value: 0x034d, lo: 0xb4, hi: 0xb4}, + {value: 0x0395, lo: 0xb5, hi: 0xb5}, + {value: 0xe1bd, lo: 0xb6, hi: 0xb6}, + {value: 0x0279, lo: 0xb7, hi: 0xb7}, + {value: 0x0289, lo: 0xb8, hi: 0xb8}, + {value: 0x0008, lo: 0xb9, hi: 0xbf}, + // Block 0x3, offset 0x25 + {value: 0x0000, lo: 0x01}, + {value: 0x3308, lo: 0x80, hi: 0xbf}, + // Block 0x4, offset 0x27 + {value: 0x0000, lo: 0x04}, + {value: 0x03f5, lo: 0x80, hi: 0x8f}, + {value: 0xe105, lo: 0x90, hi: 0x9f}, + {value: 0x049d, lo: 0xa0, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x5, offset 0x2c + {value: 0x0000, lo: 0x06}, + {value: 0xe185, lo: 0x80, hi: 0x8f}, + {value: 0x0545, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x98}, + {value: 0x0008, lo: 0x99, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x6, offset 0x33 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0401, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x88}, + {value: 0x0018, lo: 0x89, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x3308, lo: 0x91, hi: 0xbd}, + {value: 0x0818, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x7, offset 0x3e + {value: 0x0000, lo: 0x0b}, + {value: 0x0818, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x82}, + {value: 0x0818, lo: 0x83, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x85}, + {value: 0x0818, lo: 0x86, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xae}, + {value: 0x0808, lo: 0xaf, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x8, offset 0x4a + {value: 0x0000, lo: 0x03}, + {value: 0x0a08, lo: 0x80, hi: 0x87}, + {value: 0x0c08, lo: 0x88, hi: 0x99}, + {value: 0x0a08, lo: 0x9a, hi: 0xbf}, + // Block 0x9, offset 0x4e + {value: 0x0000, lo: 0x0e}, + {value: 0x3308, lo: 0x80, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8c}, + {value: 0x0c08, lo: 0x8d, hi: 0x8d}, + {value: 0x0a08, lo: 0x8e, hi: 0x98}, + {value: 0x0c08, lo: 0x99, hi: 0x9b}, + {value: 0x0a08, lo: 0x9c, hi: 0xaa}, + {value: 0x0c08, lo: 0xab, hi: 0xac}, + {value: 0x0a08, lo: 0xad, hi: 0xb0}, + {value: 0x0c08, lo: 0xb1, hi: 0xb1}, + {value: 0x0a08, lo: 0xb2, hi: 0xb2}, + {value: 0x0c08, lo: 0xb3, hi: 0xb4}, + {value: 0x0a08, lo: 0xb5, hi: 0xb7}, + {value: 0x0c08, lo: 0xb8, hi: 0xb9}, + {value: 0x0a08, lo: 0xba, hi: 0xbf}, + // Block 0xa, offset 0x5d + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xb0}, + {value: 0x0808, lo: 0xb1, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xb, offset 0x62 + {value: 0x0000, lo: 0x09}, + {value: 0x0808, lo: 0x80, hi: 0x89}, + {value: 0x0a08, lo: 0x8a, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xb3}, + {value: 0x0808, lo: 0xb4, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xb9}, + {value: 0x0818, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x0818, lo: 0xbe, hi: 0xbf}, + // Block 0xc, offset 0x6c + {value: 0x0000, lo: 0x0b}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x99}, + {value: 0x0808, lo: 0x9a, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0xa3}, + {value: 0x0808, lo: 0xa4, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa7}, + {value: 0x0808, lo: 0xa8, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0818, lo: 0xb0, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xd, offset 0x78 + {value: 0x0000, lo: 0x0d}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0a08, lo: 0xa0, hi: 0xa9}, + {value: 0x0c08, lo: 0xaa, hi: 0xac}, + {value: 0x0808, lo: 0xad, hi: 0xad}, + {value: 0x0c08, lo: 0xae, hi: 0xae}, + {value: 0x0a08, lo: 0xaf, hi: 0xb0}, + {value: 0x0c08, lo: 0xb1, hi: 0xb2}, + {value: 0x0a08, lo: 0xb3, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xb5}, + {value: 0x0a08, lo: 0xb6, hi: 0xb8}, + {value: 0x0c08, lo: 0xb9, hi: 0xb9}, + {value: 0x0a08, lo: 0xba, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0xe, offset 0x86 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x92}, + {value: 0x3308, lo: 0x93, hi: 0xa1}, + {value: 0x0840, lo: 0xa2, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xbf}, + // Block 0xf, offset 0x8b + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x10, offset 0x94 + {value: 0x0000, lo: 0x0f}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x85}, + {value: 0x3008, lo: 0x86, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x3008, lo: 0x8a, hi: 0x8c}, + {value: 0x3b08, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x11, offset 0xa4 + {value: 0x0000, lo: 0x0d}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xa9}, + {value: 0x0008, lo: 0xaa, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbf}, + // Block 0x12, offset 0xb2 + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0xba}, + {value: 0x3b08, lo: 0xbb, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x13, offset 0xbe + {value: 0x0000, lo: 0x0b}, + {value: 0x0040, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xb2}, + {value: 0x0008, lo: 0xb3, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x14, offset 0xca + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x89}, + {value: 0x3b08, lo: 0x8a, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8e}, + {value: 0x3008, lo: 0x8f, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x3008, lo: 0x98, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x15, offset 0xdb + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb2}, + {value: 0x08f1, lo: 0xb3, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb9}, + {value: 0x3b08, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0x16, offset 0xe5 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x8e}, + {value: 0x0018, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0xbf}, + // Block 0x17, offset 0xec + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x3308, lo: 0x88, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0961, lo: 0x9c, hi: 0x9c}, + {value: 0x0999, lo: 0x9d, hi: 0x9d}, + {value: 0x0008, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0x18, offset 0xf9 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0x8b}, + {value: 0xe03d, lo: 0x8c, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xb8}, + {value: 0x3308, lo: 0xb9, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x19, offset 0x10a + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0018, lo: 0x8e, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0xbf}, + // Block 0x1a, offset 0x111 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x3008, lo: 0xab, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xb0}, + {value: 0x3008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0x1b, offset 0x11c + {value: 0x0000, lo: 0x0e}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x95}, + {value: 0x3008, lo: 0x96, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0x9d}, + {value: 0x3308, lo: 0x9e, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xa1}, + {value: 0x3008, lo: 0xa2, hi: 0xa4}, + {value: 0x0008, lo: 0xa5, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xbf}, + // Block 0x1c, offset 0x12b + {value: 0x0000, lo: 0x0d}, + {value: 0x0008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x8c}, + {value: 0x3308, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x8e}, + {value: 0x3008, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x3008, lo: 0x9a, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0x1d, offset 0x139 + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x86}, + {value: 0x055d, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8c}, + {value: 0x055d, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbb}, + {value: 0xe105, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbf}, + // Block 0x1e, offset 0x143 + {value: 0x0000, lo: 0x01}, + {value: 0x0018, lo: 0x80, hi: 0xbf}, + // Block 0x1f, offset 0x145 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xa0}, + {value: 0x2018, lo: 0xa1, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0x20, offset 0x14a + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xa7}, + {value: 0x2018, lo: 0xa8, hi: 0xbf}, + // Block 0x21, offset 0x14d + {value: 0x0000, lo: 0x02}, + {value: 0x2018, lo: 0x80, hi: 0x82}, + {value: 0x0018, lo: 0x83, hi: 0xbf}, + // Block 0x22, offset 0x150 + {value: 0x0000, lo: 0x01}, + {value: 0x0008, lo: 0x80, hi: 0xbf}, + // Block 0x23, offset 0x152 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x24, offset 0x15e + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x25, offset 0x169 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbf}, + // Block 0x26, offset 0x171 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbf}, + // Block 0x27, offset 0x177 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x28, offset 0x17d + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x29, offset 0x182 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0xe045, lo: 0xb8, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x2a, offset 0x187 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xbf}, + // Block 0x2b, offset 0x18a + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xac}, + {value: 0x0018, lo: 0xad, hi: 0xae}, + {value: 0x0008, lo: 0xaf, hi: 0xbf}, + // Block 0x2c, offset 0x18e + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x2d, offset 0x194 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xb0}, + {value: 0x0008, lo: 0xb1, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0x2e, offset 0x199 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x93}, + {value: 0x3b08, lo: 0x94, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x3b08, lo: 0xb4, hi: 0xb4}, + {value: 0x0018, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x2f, offset 0x1a5 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x30, offset 0x1af + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xb3}, + {value: 0x3340, lo: 0xb4, hi: 0xb5}, + {value: 0x3008, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x31, offset 0x1b5 + {value: 0x0000, lo: 0x10}, + {value: 0x3008, lo: 0x80, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x88}, + {value: 0x3308, lo: 0x89, hi: 0x91}, + {value: 0x3b08, lo: 0x92, hi: 0x92}, + {value: 0x3308, lo: 0x93, hi: 0x93}, + {value: 0x0018, lo: 0x94, hi: 0x96}, + {value: 0x0008, lo: 0x97, hi: 0x97}, + {value: 0x0018, lo: 0x98, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x32, offset 0x1c6 + {value: 0x0000, lo: 0x09}, + {value: 0x0018, lo: 0x80, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x86}, + {value: 0x0218, lo: 0x87, hi: 0x87}, + {value: 0x0018, lo: 0x88, hi: 0x8a}, + {value: 0x33c0, lo: 0x8b, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0208, lo: 0xa0, hi: 0xbf}, + // Block 0x33, offset 0x1d0 + {value: 0x0000, lo: 0x02}, + {value: 0x0208, lo: 0x80, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0x34, offset 0x1d3 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x0208, lo: 0x87, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xa9}, + {value: 0x0208, lo: 0xaa, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x35, offset 0x1db + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0x36, offset 0x1de + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb8}, + {value: 0x3308, lo: 0xb9, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x37, offset 0x1eb + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x83}, + {value: 0x0018, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x38, offset 0x1f3 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x39, offset 0x1f7 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0028, lo: 0x9a, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0xbf}, + // Block 0x3a, offset 0x1fe + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x3308, lo: 0x97, hi: 0x98}, + {value: 0x3008, lo: 0x99, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x3b, offset 0x206 + {value: 0x0000, lo: 0x0f}, + {value: 0x0008, lo: 0x80, hi: 0x94}, + {value: 0x3008, lo: 0x95, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3b08, lo: 0xa0, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xac}, + {value: 0x3008, lo: 0xad, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x3c, offset 0x216 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa7}, + {value: 0x0018, lo: 0xa8, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xbd}, + {value: 0x3318, lo: 0xbe, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x3d, offset 0x222 + {value: 0x0000, lo: 0x01}, + {value: 0x0040, lo: 0x80, hi: 0xbf}, + // Block 0x3e, offset 0x224 + {value: 0x0000, lo: 0x09}, + {value: 0x3308, lo: 0x80, hi: 0x83}, + {value: 0x3008, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbf}, + // Block 0x3f, offset 0x22e + {value: 0x0000, lo: 0x0b}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x3808, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x40, offset 0x23a + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa9}, + {value: 0x3808, lo: 0xaa, hi: 0xaa}, + {value: 0x3b08, lo: 0xab, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xbf}, + // Block 0x41, offset 0x246 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa9}, + {value: 0x3008, lo: 0xaa, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb1}, + {value: 0x3808, lo: 0xb2, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbb}, + {value: 0x0018, lo: 0xbc, hi: 0xbf}, + // Block 0x42, offset 0x252 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x3008, lo: 0xa4, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbf}, + // Block 0x43, offset 0x25a + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0x44, offset 0x25f + {value: 0x0000, lo: 0x09}, + {value: 0x0e29, lo: 0x80, hi: 0x80}, + {value: 0x0e41, lo: 0x81, hi: 0x81}, + {value: 0x0e59, lo: 0x82, hi: 0x82}, + {value: 0x0e71, lo: 0x83, hi: 0x83}, + {value: 0x0e89, lo: 0x84, hi: 0x85}, + {value: 0x0ea1, lo: 0x86, hi: 0x86}, + {value: 0x0eb9, lo: 0x87, hi: 0x87}, + {value: 0x057d, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0xbf}, + // Block 0x45, offset 0x269 + {value: 0x0000, lo: 0x10}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x92}, + {value: 0x0018, lo: 0x93, hi: 0x93}, + {value: 0x3308, lo: 0x94, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa8}, + {value: 0x0008, lo: 0xa9, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xb6}, + {value: 0x3008, lo: 0xb7, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x46, offset 0x27a + {value: 0x0000, lo: 0x03}, + {value: 0x3308, lo: 0x80, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbf}, + // Block 0x47, offset 0x27e + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x87}, + {value: 0xe045, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0xe045, lo: 0x98, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa7}, + {value: 0xe045, lo: 0xa8, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb7}, + {value: 0xe045, lo: 0xb8, hi: 0xbf}, + // Block 0x48, offset 0x289 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x3318, lo: 0x90, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xbf}, + // Block 0x49, offset 0x28d + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x88}, + {value: 0x24c1, lo: 0x89, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x4a, offset 0x296 + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0xab}, + {value: 0x24f1, lo: 0xac, hi: 0xac}, + {value: 0x2529, lo: 0xad, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xae}, + {value: 0x2579, lo: 0xaf, hi: 0xaf}, + {value: 0x25b1, lo: 0xb0, hi: 0xb0}, + {value: 0x0018, lo: 0xb1, hi: 0xbf}, + // Block 0x4b, offset 0x29e + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x9f}, + {value: 0x0080, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xad}, + {value: 0x0080, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x4c, offset 0x2a4 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0xa8}, + {value: 0x09c5, lo: 0xa9, hi: 0xa9}, + {value: 0x09e5, lo: 0xaa, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xbf}, + // Block 0x4d, offset 0x2a9 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xbf}, + // Block 0x4e, offset 0x2ac + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x28c1, lo: 0x8c, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0xbf}, + // Block 0x4f, offset 0x2b0 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0e66, lo: 0xb4, hi: 0xb4}, + {value: 0x292a, lo: 0xb5, hi: 0xb5}, + {value: 0x0e86, lo: 0xb6, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0x50, offset 0x2b6 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x9b}, + {value: 0x2941, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0xbf}, + // Block 0x51, offset 0x2ba + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0x52, offset 0x2be + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0018, lo: 0x98, hi: 0xbf}, + // Block 0x53, offset 0x2c2 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x54, offset 0x2c7 + {value: 0x0000, lo: 0x05}, + {value: 0xe185, lo: 0x80, hi: 0x8f}, + {value: 0x03f5, lo: 0x90, hi: 0x9f}, + {value: 0x0ea5, lo: 0xa0, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x55, offset 0x2cd + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xac}, + {value: 0x0008, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x56, offset 0x2d5 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xae}, + {value: 0xe075, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0x57, offset 0x2dc + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x58, offset 0x2e7 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xbf}, + // Block 0x59, offset 0x2f1 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xae}, + {value: 0x0008, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x5a, offset 0x2f5 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0xbf}, + // Block 0x5b, offset 0x2f8 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9e}, + {value: 0x0edd, lo: 0x9f, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbf}, + // Block 0x5c, offset 0x2fe + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb2}, + {value: 0x0efd, lo: 0xb3, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x5d, offset 0x302 + {value: 0x0020, lo: 0x01}, + {value: 0x0f1d, lo: 0x80, hi: 0xbf}, + // Block 0x5e, offset 0x304 + {value: 0x0020, lo: 0x02}, + {value: 0x171d, lo: 0x80, hi: 0x8f}, + {value: 0x18fd, lo: 0x90, hi: 0xbf}, + // Block 0x5f, offset 0x307 + {value: 0x0020, lo: 0x01}, + {value: 0x1efd, lo: 0x80, hi: 0xbf}, + // Block 0x60, offset 0x309 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xbf}, + // Block 0x61, offset 0x30c + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x98}, + {value: 0x3308, lo: 0x99, hi: 0x9a}, + {value: 0x29e2, lo: 0x9b, hi: 0x9b}, + {value: 0x2a0a, lo: 0x9c, hi: 0x9c}, + {value: 0x0008, lo: 0x9d, hi: 0x9e}, + {value: 0x2a31, lo: 0x9f, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xbf}, + // Block 0x62, offset 0x316 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xbe}, + {value: 0x2a69, lo: 0xbf, hi: 0xbf}, + // Block 0x63, offset 0x319 + {value: 0x0000, lo: 0x0e}, + {value: 0x0040, lo: 0x80, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xb0}, + {value: 0x2a1d, lo: 0xb1, hi: 0xb1}, + {value: 0x2a3d, lo: 0xb2, hi: 0xb2}, + {value: 0x2a5d, lo: 0xb3, hi: 0xb3}, + {value: 0x2a7d, lo: 0xb4, hi: 0xb4}, + {value: 0x2a5d, lo: 0xb5, hi: 0xb5}, + {value: 0x2a9d, lo: 0xb6, hi: 0xb6}, + {value: 0x2abd, lo: 0xb7, hi: 0xb7}, + {value: 0x2add, lo: 0xb8, hi: 0xb9}, + {value: 0x2afd, lo: 0xba, hi: 0xbb}, + {value: 0x2b1d, lo: 0xbc, hi: 0xbd}, + {value: 0x2afd, lo: 0xbe, hi: 0xbf}, + // Block 0x64, offset 0x328 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x65, offset 0x32c + {value: 0x0030, lo: 0x04}, + {value: 0x2aa2, lo: 0x80, hi: 0x9d}, + {value: 0x305a, lo: 0x9e, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x30a2, lo: 0xa0, hi: 0xbf}, + // Block 0x66, offset 0x331 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x67, offset 0x334 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x68, offset 0x338 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0x69, offset 0x33d + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xbf}, + // Block 0x6a, offset 0x342 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x0018, lo: 0xa6, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb1}, + {value: 0x0018, lo: 0xb2, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x6b, offset 0x348 + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0xb6}, + {value: 0x0008, lo: 0xb7, hi: 0xb7}, + {value: 0x2009, lo: 0xb8, hi: 0xb8}, + {value: 0x6e89, lo: 0xb9, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xbf}, + // Block 0x6c, offset 0x34e + {value: 0x0000, lo: 0x0e}, + {value: 0x0008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0x85}, + {value: 0x3b08, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x8a}, + {value: 0x3308, lo: 0x8b, hi: 0x8b}, + {value: 0x0008, lo: 0x8c, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xa7}, + {value: 0x0018, lo: 0xa8, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x6d, offset 0x35d + {value: 0x0000, lo: 0x05}, + {value: 0x0208, lo: 0x80, hi: 0xb1}, + {value: 0x0108, lo: 0xb2, hi: 0xb2}, + {value: 0x0008, lo: 0xb3, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x6e, offset 0x363 + {value: 0x0000, lo: 0x03}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xbf}, + // Block 0x6f, offset 0x367 + {value: 0x0000, lo: 0x0e}, + {value: 0x3008, lo: 0x80, hi: 0x83}, + {value: 0x3b08, lo: 0x84, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8d}, + {value: 0x0018, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xba}, + {value: 0x0008, lo: 0xbb, hi: 0xbb}, + {value: 0x0018, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x70, offset 0x376 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x71, offset 0x37b + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x91}, + {value: 0x3008, lo: 0x92, hi: 0x92}, + {value: 0x3808, lo: 0x93, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x72, offset 0x383 + {value: 0x0000, lo: 0x09}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb9}, + {value: 0x3008, lo: 0xba, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbf}, + // Block 0x73, offset 0x38d + {value: 0x0000, lo: 0x0a}, + {value: 0x3808, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x74, offset 0x398 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x75, offset 0x3a0 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x8b}, + {value: 0x3308, lo: 0x8c, hi: 0x8c}, + {value: 0x3008, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0018, lo: 0x9c, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbd}, + {value: 0x0008, lo: 0xbe, hi: 0xbf}, + // Block 0x76, offset 0x3b1 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb0}, + {value: 0x0008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb8}, + {value: 0x0008, lo: 0xb9, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbf}, + // Block 0x77, offset 0x3ba + {value: 0x0000, lo: 0x0f}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x9a}, + {value: 0x0008, lo: 0x9b, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xaa}, + {value: 0x3008, lo: 0xab, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb5}, + {value: 0x3b08, lo: 0xb6, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x78, offset 0x3ca + {value: 0x0000, lo: 0x0c}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x88}, + {value: 0x0008, lo: 0x89, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x90}, + {value: 0x0008, lo: 0x91, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x79, offset 0x3d7 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x4465, lo: 0x9c, hi: 0x9c}, + {value: 0x447d, lo: 0x9d, hi: 0x9d}, + {value: 0x2971, lo: 0x9e, hi: 0x9e}, + {value: 0xe06d, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xaf}, + {value: 0x4495, lo: 0xb0, hi: 0xbf}, + // Block 0x7a, offset 0x3e1 + {value: 0x0000, lo: 0x04}, + {value: 0x44b5, lo: 0x80, hi: 0x8f}, + {value: 0x44d5, lo: 0x90, hi: 0x9f}, + {value: 0x44f5, lo: 0xa0, hi: 0xaf}, + {value: 0x44d5, lo: 0xb0, hi: 0xbf}, + // Block 0x7b, offset 0x3e6 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3b08, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x7c, offset 0x3f3 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x7d, offset 0x3f7 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8a}, + {value: 0x0018, lo: 0x8b, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x7e, offset 0x3fc + {value: 0x0020, lo: 0x01}, + {value: 0x4515, lo: 0x80, hi: 0xbf}, + // Block 0x7f, offset 0x3fe + {value: 0x0020, lo: 0x03}, + {value: 0x4d15, lo: 0x80, hi: 0x94}, + {value: 0x4ad5, lo: 0x95, hi: 0x95}, + {value: 0x4fb5, lo: 0x96, hi: 0xbf}, + // Block 0x80, offset 0x402 + {value: 0x0020, lo: 0x01}, + {value: 0x54f5, lo: 0x80, hi: 0xbf}, + // Block 0x81, offset 0x404 + {value: 0x0020, lo: 0x03}, + {value: 0x5cf5, lo: 0x80, hi: 0x84}, + {value: 0x5655, lo: 0x85, hi: 0x85}, + {value: 0x5d95, lo: 0x86, hi: 0xbf}, + // Block 0x82, offset 0x408 + {value: 0x0020, lo: 0x08}, + {value: 0x6b55, lo: 0x80, hi: 0x8f}, + {value: 0x6d15, lo: 0x90, hi: 0x90}, + {value: 0x6d55, lo: 0x91, hi: 0xab}, + {value: 0x6ea1, lo: 0xac, hi: 0xac}, + {value: 0x70b5, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x70d5, lo: 0xb0, hi: 0xbf}, + // Block 0x83, offset 0x411 + {value: 0x0020, lo: 0x05}, + {value: 0x72d5, lo: 0x80, hi: 0xad}, + {value: 0x6535, lo: 0xae, hi: 0xae}, + {value: 0x7895, lo: 0xaf, hi: 0xb5}, + {value: 0x6f55, lo: 0xb6, hi: 0xb6}, + {value: 0x7975, lo: 0xb7, hi: 0xbf}, + // Block 0x84, offset 0x417 + {value: 0x0028, lo: 0x03}, + {value: 0x7c21, lo: 0x80, hi: 0x82}, + {value: 0x7be1, lo: 0x83, hi: 0x83}, + {value: 0x7c99, lo: 0x84, hi: 0xbf}, + // Block 0x85, offset 0x41b + {value: 0x0038, lo: 0x0f}, + {value: 0x9db1, lo: 0x80, hi: 0x83}, + {value: 0x9e59, lo: 0x84, hi: 0x85}, + {value: 0x9e91, lo: 0x86, hi: 0x87}, + {value: 0x9ec9, lo: 0x88, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0xa089, lo: 0x92, hi: 0x97}, + {value: 0xa1a1, lo: 0x98, hi: 0x9c}, + {value: 0xa281, lo: 0x9d, hi: 0xb3}, + {value: 0x9d41, lo: 0xb4, hi: 0xb4}, + {value: 0x9db1, lo: 0xb5, hi: 0xb5}, + {value: 0xa789, lo: 0xb6, hi: 0xbb}, + {value: 0xa869, lo: 0xbc, hi: 0xbc}, + {value: 0xa7f9, lo: 0xbd, hi: 0xbd}, + {value: 0xa8d9, lo: 0xbe, hi: 0xbf}, + // Block 0x86, offset 0x42b + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbb}, + {value: 0x0008, lo: 0xbc, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0x87, offset 0x435 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0x88, offset 0x43a + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x89, offset 0x43d + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0x8a, offset 0x443 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa0}, + {value: 0x0040, lo: 0xa1, hi: 0xbf}, + // Block 0x8b, offset 0x44a + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x8c, offset 0x44f + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x8d, offset 0x453 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x8e, offset 0x459 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xac}, + {value: 0x0008, lo: 0xad, hi: 0xbf}, + // Block 0x8f, offset 0x45e + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x90, offset 0x467 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x91, offset 0x46c + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0xbf}, + // Block 0x92, offset 0x472 + {value: 0x0000, lo: 0x06}, + {value: 0xe145, lo: 0x80, hi: 0x87}, + {value: 0xe1c5, lo: 0x88, hi: 0x8f}, + {value: 0xe145, lo: 0x90, hi: 0x97}, + {value: 0x8ad5, lo: 0x98, hi: 0x9f}, + {value: 0x8aed, lo: 0xa0, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xbf}, + // Block 0x93, offset 0x479 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x8aed, lo: 0xb0, hi: 0xb7}, + {value: 0x8ad5, lo: 0xb8, hi: 0xbf}, + // Block 0x94, offset 0x480 + {value: 0x0000, lo: 0x06}, + {value: 0xe145, lo: 0x80, hi: 0x87}, + {value: 0xe1c5, lo: 0x88, hi: 0x8f}, + {value: 0xe145, lo: 0x90, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x95, offset 0x487 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x96, offset 0x48b + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xae}, + {value: 0x0018, lo: 0xaf, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x97, offset 0x490 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x98, offset 0x493 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xbf}, + // Block 0x99, offset 0x498 + {value: 0x0000, lo: 0x0b}, + {value: 0x0808, lo: 0x80, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x87}, + {value: 0x0808, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0808, lo: 0x8a, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb6}, + {value: 0x0808, lo: 0xb7, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbb}, + {value: 0x0808, lo: 0xbc, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbe}, + {value: 0x0808, lo: 0xbf, hi: 0xbf}, + // Block 0x9a, offset 0x4a4 + {value: 0x0000, lo: 0x05}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x96}, + {value: 0x0818, lo: 0x97, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb6}, + {value: 0x0818, lo: 0xb7, hi: 0xbf}, + // Block 0x9b, offset 0x4aa + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xa6}, + {value: 0x0818, lo: 0xa7, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x9c, offset 0x4af + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb3}, + {value: 0x0808, lo: 0xb4, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xba}, + {value: 0x0818, lo: 0xbb, hi: 0xbf}, + // Block 0x9d, offset 0x4b6 + {value: 0x0000, lo: 0x07}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0818, lo: 0x96, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbe}, + {value: 0x0818, lo: 0xbf, hi: 0xbf}, + // Block 0x9e, offset 0x4be + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbb}, + {value: 0x0818, lo: 0xbc, hi: 0xbd}, + {value: 0x0808, lo: 0xbe, hi: 0xbf}, + // Block 0x9f, offset 0x4c3 + {value: 0x0000, lo: 0x03}, + {value: 0x0818, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x91}, + {value: 0x0818, lo: 0x92, hi: 0xbf}, + // Block 0xa0, offset 0x4c7 + {value: 0x0000, lo: 0x0f}, + {value: 0x0808, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8b}, + {value: 0x3308, lo: 0x8c, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x94}, + {value: 0x0808, lo: 0x95, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0x98}, + {value: 0x0808, lo: 0x99, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xa1, offset 0x4d7 + {value: 0x0000, lo: 0x06}, + {value: 0x0818, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0x0818, lo: 0x90, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xbc}, + {value: 0x0818, lo: 0xbd, hi: 0xbf}, + // Block 0xa2, offset 0x4de + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0x9c}, + {value: 0x0818, lo: 0x9d, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xa3, offset 0x4e2 + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb8}, + {value: 0x0018, lo: 0xb9, hi: 0xbf}, + // Block 0xa4, offset 0x4e6 + {value: 0x0000, lo: 0x06}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0818, lo: 0x98, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb7}, + {value: 0x0818, lo: 0xb8, hi: 0xbf}, + // Block 0xa5, offset 0x4ed + {value: 0x0000, lo: 0x01}, + {value: 0x0808, lo: 0x80, hi: 0xbf}, + // Block 0xa6, offset 0x4ef + {value: 0x0000, lo: 0x02}, + {value: 0x0808, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0xbf}, + // Block 0xa7, offset 0x4f2 + {value: 0x0000, lo: 0x02}, + {value: 0x03dd, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbf}, + // Block 0xa8, offset 0x4f5 + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb9}, + {value: 0x0818, lo: 0xba, hi: 0xbf}, + // Block 0xa9, offset 0x4f9 + {value: 0x0000, lo: 0x08}, + {value: 0x0908, lo: 0x80, hi: 0x80}, + {value: 0x0a08, lo: 0x81, hi: 0xa1}, + {value: 0x0c08, lo: 0xa2, hi: 0xa2}, + {value: 0x0a08, lo: 0xa3, hi: 0xa3}, + {value: 0x3308, lo: 0xa4, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xaf}, + {value: 0x0808, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xaa, offset 0x502 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0818, lo: 0xa0, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xab, offset 0x506 + {value: 0x0000, lo: 0x07}, + {value: 0x0808, lo: 0x80, hi: 0x9c}, + {value: 0x0818, lo: 0x9d, hi: 0xa6}, + {value: 0x0808, lo: 0xa7, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xaf}, + {value: 0x0a08, lo: 0xb0, hi: 0xb2}, + {value: 0x0c08, lo: 0xb3, hi: 0xb3}, + {value: 0x0a08, lo: 0xb4, hi: 0xbf}, + // Block 0xac, offset 0x50e + {value: 0x0000, lo: 0x07}, + {value: 0x0a08, lo: 0x80, hi: 0x84}, + {value: 0x0808, lo: 0x85, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x90}, + {value: 0x0a18, lo: 0x91, hi: 0x93}, + {value: 0x0c18, lo: 0x94, hi: 0x94}, + {value: 0x0818, lo: 0x95, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xad, offset 0x516 + {value: 0x0000, lo: 0x05}, + {value: 0x3008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbf}, + // Block 0xae, offset 0x51c + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x85}, + {value: 0x3b08, lo: 0x86, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x91}, + {value: 0x0018, lo: 0x92, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xaf, offset 0x525 + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb6}, + {value: 0x3008, lo: 0xb7, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0xb0, offset 0x531 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x81}, + {value: 0x0040, lo: 0x82, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xb1, offset 0x538 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xb2}, + {value: 0x3b08, lo: 0xb3, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xb5}, + {value: 0x0008, lo: 0xb6, hi: 0xbf}, + // Block 0xb2, offset 0x541 + {value: 0x0000, lo: 0x09}, + {value: 0x0018, lo: 0x80, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x3008, lo: 0x85, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb5}, + {value: 0x0008, lo: 0xb6, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xb3, offset 0x54b + {value: 0x0000, lo: 0x06}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xbe}, + {value: 0x3008, lo: 0xbf, hi: 0xbf}, + // Block 0xb4, offset 0x552 + {value: 0x0000, lo: 0x0d}, + {value: 0x3808, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x88}, + {value: 0x3308, lo: 0x89, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xb5, offset 0x560 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0x92}, + {value: 0x0008, lo: 0x93, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x3808, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xb6, offset 0x56d + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9e}, + {value: 0x0008, lo: 0x9f, hi: 0xa8}, + {value: 0x0018, lo: 0xa9, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0xb7, offset 0x57a + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x3308, lo: 0x9f, hi: 0x9f}, + {value: 0x3008, lo: 0xa0, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xa9}, + {value: 0x3b08, lo: 0xaa, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xb8, offset 0x583 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbf}, + // Block 0xb9, offset 0x587 + {value: 0x0000, lo: 0x0e}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x3b08, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x84}, + {value: 0x3008, lo: 0x85, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x8a}, + {value: 0x0018, lo: 0x8b, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0x9d}, + {value: 0x3308, lo: 0x9e, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xbf}, + // Block 0xba, offset 0x596 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb8}, + {value: 0x3008, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0xbb, offset 0x59e + {value: 0x0000, lo: 0x0a}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x81}, + {value: 0x3b08, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x85}, + {value: 0x0018, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xbc, offset 0x5a9 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xbd, offset 0x5b2 + {value: 0x0000, lo: 0x05}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x9b}, + {value: 0x3308, lo: 0x9c, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0xbe, offset 0x5b8 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xbf, offset 0x5c0 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xc0, offset 0x5c9 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb5}, + {value: 0x3808, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0xc1, offset 0x5d3 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0xbf}, + // Block 0xc2, offset 0x5d6 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9f}, + {value: 0x3008, lo: 0xa0, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xaa}, + {value: 0x3b08, lo: 0xab, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xbf}, + // Block 0xc3, offset 0x5e2 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0xc4, offset 0x5eb + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x049d, lo: 0xa0, hi: 0xbf}, + // Block 0xc5, offset 0x5ee + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0xc6, offset 0x5f3 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x3b08, lo: 0xb4, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb8}, + {value: 0x3008, lo: 0xb9, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0xc7, offset 0x5fe + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x3b08, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x3308, lo: 0x91, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x98}, + {value: 0x3308, lo: 0x99, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0xbf}, + // Block 0xc8, offset 0x607 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x89}, + {value: 0x3308, lo: 0x8a, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x98}, + {value: 0x3b08, lo: 0x99, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9c}, + {value: 0x0008, lo: 0x9d, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0xa2}, + {value: 0x0040, lo: 0xa3, hi: 0xbf}, + // Block 0xc9, offset 0x613 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xca, offset 0x616 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xcb, offset 0x620 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xbf}, + // Block 0xcc, offset 0x629 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xa9}, + {value: 0x3308, lo: 0xaa, hi: 0xb0}, + {value: 0x3008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xcd, offset 0x635 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0xce, offset 0x642 + {value: 0x0000, lo: 0x0c}, + {value: 0x3308, lo: 0x80, hi: 0x83}, + {value: 0x3b08, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xa9}, + {value: 0x0008, lo: 0xaa, hi: 0xbf}, + // Block 0xcf, offset 0x64f + {value: 0x0000, lo: 0x0d}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x3008, lo: 0x8a, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0x92}, + {value: 0x3008, lo: 0x93, hi: 0x94}, + {value: 0x3308, lo: 0x95, hi: 0x95}, + {value: 0x3008, lo: 0x96, hi: 0x96}, + {value: 0x3b08, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xbf}, + // Block 0xd0, offset 0x65d + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xd1, offset 0x664 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xd2, offset 0x667 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xd3, offset 0x66c + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0xbf}, + // Block 0xd4, offset 0x66f + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xbf}, + // Block 0xd5, offset 0x672 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0xbf}, + // Block 0xd6, offset 0x675 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0xd7, offset 0x67c + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb4}, + {value: 0x0018, lo: 0xb5, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xd8, offset 0x683 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0xd9, offset 0x687 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0018, lo: 0x84, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xa2}, + {value: 0x0008, lo: 0xa3, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbf}, + // Block 0xda, offset 0x692 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0xbf}, + // Block 0xdb, offset 0x695 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0xdc, offset 0x698 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0xbf}, + // Block 0xdd, offset 0x69b + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x3008, lo: 0x91, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xde, offset 0x6a1 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x8e}, + {value: 0x3308, lo: 0x8f, hi: 0x92}, + {value: 0x0008, lo: 0x93, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xdf, offset 0x6a6 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xbf}, + // Block 0xe0, offset 0x6aa + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xe1, offset 0x6ad + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbf}, + // Block 0xe2, offset 0x6b0 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xbf}, + // Block 0xe3, offset 0x6b3 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0xe4, offset 0x6b6 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0xe5, offset 0x6b9 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0xe6, offset 0x6be + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0018, lo: 0x9c, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x03c0, lo: 0xa0, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xbf}, + // Block 0xe7, offset 0x6c8 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xe8, offset 0x6cb + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa8}, + {value: 0x0018, lo: 0xa9, hi: 0xbf}, + // Block 0xe9, offset 0x6cf + {value: 0x0000, lo: 0x0e}, + {value: 0x0018, lo: 0x80, hi: 0x9d}, + {value: 0xb5b9, lo: 0x9e, hi: 0x9e}, + {value: 0xb601, lo: 0x9f, hi: 0x9f}, + {value: 0xb649, lo: 0xa0, hi: 0xa0}, + {value: 0xb6b1, lo: 0xa1, hi: 0xa1}, + {value: 0xb719, lo: 0xa2, hi: 0xa2}, + {value: 0xb781, lo: 0xa3, hi: 0xa3}, + {value: 0xb7e9, lo: 0xa4, hi: 0xa4}, + {value: 0x3018, lo: 0xa5, hi: 0xa6}, + {value: 0x3318, lo: 0xa7, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xac}, + {value: 0x3018, lo: 0xad, hi: 0xb2}, + {value: 0x0340, lo: 0xb3, hi: 0xba}, + {value: 0x3318, lo: 0xbb, hi: 0xbf}, + // Block 0xea, offset 0x6de + {value: 0x0000, lo: 0x0b}, + {value: 0x3318, lo: 0x80, hi: 0x82}, + {value: 0x0018, lo: 0x83, hi: 0x84}, + {value: 0x3318, lo: 0x85, hi: 0x8b}, + {value: 0x0018, lo: 0x8c, hi: 0xa9}, + {value: 0x3318, lo: 0xaa, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xba}, + {value: 0xb851, lo: 0xbb, hi: 0xbb}, + {value: 0xb899, lo: 0xbc, hi: 0xbc}, + {value: 0xb8e1, lo: 0xbd, hi: 0xbd}, + {value: 0xb949, lo: 0xbe, hi: 0xbe}, + {value: 0xb9b1, lo: 0xbf, hi: 0xbf}, + // Block 0xeb, offset 0x6ea + {value: 0x0000, lo: 0x03}, + {value: 0xba19, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xbf}, + // Block 0xec, offset 0x6ee + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x81}, + {value: 0x3318, lo: 0x82, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0xbf}, + // Block 0xed, offset 0x6f3 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0xee, offset 0x6f7 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xef, offset 0x6fc + {value: 0x0000, lo: 0x03}, + {value: 0x3308, lo: 0x80, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbf}, + // Block 0xf0, offset 0x700 + {value: 0x0000, lo: 0x04}, + {value: 0x3308, lo: 0x80, hi: 0xac}, + {value: 0x0018, lo: 0xad, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0xf1, offset 0x705 + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x3308, lo: 0xa1, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0xf2, offset 0x70e + {value: 0x0000, lo: 0x0a}, + {value: 0x3308, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x3308, lo: 0x88, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xa4}, + {value: 0x0040, lo: 0xa5, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xbf}, + // Block 0xf3, offset 0x719 + {value: 0x0000, lo: 0x05}, + {value: 0x0808, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x86}, + {value: 0x0818, lo: 0x87, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0xbf}, + // Block 0xf4, offset 0x71f + {value: 0x0000, lo: 0x07}, + {value: 0x0a08, lo: 0x80, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9d}, + {value: 0x0818, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xf5, offset 0x727 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0xb0}, + {value: 0x0818, lo: 0xb1, hi: 0xbf}, + // Block 0xf6, offset 0x72a + {value: 0x0000, lo: 0x02}, + {value: 0x0818, lo: 0x80, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xf7, offset 0x72d + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xf8, offset 0x731 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0xf9, offset 0x735 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xb0}, + {value: 0x0018, lo: 0xb1, hi: 0xbf}, + // Block 0xfa, offset 0x73b + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x0018, lo: 0x91, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xfb, offset 0x741 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x8f}, + {value: 0xc1c1, lo: 0x90, hi: 0x90}, + {value: 0x0018, lo: 0x91, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xfc, offset 0x746 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0xa5}, + {value: 0x0018, lo: 0xa6, hi: 0xbf}, + // Block 0xfd, offset 0x749 + {value: 0x0000, lo: 0x0f}, + {value: 0xc7e9, lo: 0x80, hi: 0x80}, + {value: 0xc839, lo: 0x81, hi: 0x81}, + {value: 0xc889, lo: 0x82, hi: 0x82}, + {value: 0xc8d9, lo: 0x83, hi: 0x83}, + {value: 0xc929, lo: 0x84, hi: 0x84}, + {value: 0xc979, lo: 0x85, hi: 0x85}, + {value: 0xc9c9, lo: 0x86, hi: 0x86}, + {value: 0xca19, lo: 0x87, hi: 0x87}, + {value: 0xca69, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0xcab9, lo: 0x90, hi: 0x90}, + {value: 0xcad9, lo: 0x91, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xbf}, + // Block 0xfe, offset 0x759 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xff, offset 0x760 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x100, offset 0x763 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0xbf}, + // Block 0x101, offset 0x766 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x102, offset 0x76a + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbf}, + // Block 0x103, offset 0x770 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xbf}, + // Block 0x104, offset 0x775 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x105, offset 0x77a + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb2}, + {value: 0x0018, lo: 0xb3, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbb}, + {value: 0x0018, lo: 0xbc, hi: 0xbf}, + // Block 0x106, offset 0x782 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0xa2}, + {value: 0x0040, lo: 0xa3, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x107, offset 0x787 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x108, offset 0x78b + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xbf}, + // Block 0x109, offset 0x78f + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0xbf}, + // Block 0x10a, offset 0x792 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x10b, offset 0x795 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x10c, offset 0x799 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x10d, offset 0x79d + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xa0}, + {value: 0x0040, lo: 0xa1, hi: 0xbf}, + // Block 0x10e, offset 0x7a0 + {value: 0x0020, lo: 0x0f}, + {value: 0xdeb9, lo: 0x80, hi: 0x89}, + {value: 0x8dfd, lo: 0x8a, hi: 0x8a}, + {value: 0xdff9, lo: 0x8b, hi: 0x9c}, + {value: 0x8e1d, lo: 0x9d, hi: 0x9d}, + {value: 0xe239, lo: 0x9e, hi: 0xa2}, + {value: 0x8e3d, lo: 0xa3, hi: 0xa3}, + {value: 0xe2d9, lo: 0xa4, hi: 0xab}, + {value: 0x7ed5, lo: 0xac, hi: 0xac}, + {value: 0xe3d9, lo: 0xad, hi: 0xaf}, + {value: 0x8e5d, lo: 0xb0, hi: 0xb0}, + {value: 0xe439, lo: 0xb1, hi: 0xb6}, + {value: 0x8e7d, lo: 0xb7, hi: 0xb9}, + {value: 0xe4f9, lo: 0xba, hi: 0xba}, + {value: 0x8edd, lo: 0xbb, hi: 0xbb}, + {value: 0xe519, lo: 0xbc, hi: 0xbf}, + // Block 0x10f, offset 0x7b0 + {value: 0x0020, lo: 0x10}, + {value: 0x937d, lo: 0x80, hi: 0x80}, + {value: 0xf099, lo: 0x81, hi: 0x86}, + {value: 0x939d, lo: 0x87, hi: 0x8a}, + {value: 0xd9f9, lo: 0x8b, hi: 0x8b}, + {value: 0xf159, lo: 0x8c, hi: 0x96}, + {value: 0x941d, lo: 0x97, hi: 0x97}, + {value: 0xf2b9, lo: 0x98, hi: 0xa3}, + {value: 0x943d, lo: 0xa4, hi: 0xa6}, + {value: 0xf439, lo: 0xa7, hi: 0xaa}, + {value: 0x949d, lo: 0xab, hi: 0xab}, + {value: 0xf4b9, lo: 0xac, hi: 0xac}, + {value: 0x94bd, lo: 0xad, hi: 0xad}, + {value: 0xf4d9, lo: 0xae, hi: 0xaf}, + {value: 0x94dd, lo: 0xb0, hi: 0xb1}, + {value: 0xf519, lo: 0xb2, hi: 0xbe}, + {value: 0x2040, lo: 0xbf, hi: 0xbf}, + // Block 0x110, offset 0x7c1 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0340, lo: 0x81, hi: 0x81}, + {value: 0x0040, lo: 0x82, hi: 0x9f}, + {value: 0x0340, lo: 0xa0, hi: 0xbf}, + // Block 0x111, offset 0x7c6 + {value: 0x0000, lo: 0x01}, + {value: 0x0340, lo: 0x80, hi: 0xbf}, + // Block 0x112, offset 0x7c8 + {value: 0x0000, lo: 0x01}, + {value: 0x33c0, lo: 0x80, hi: 0xbf}, + // Block 0x113, offset 0x7ca + {value: 0x0000, lo: 0x02}, + {value: 0x33c0, lo: 0x80, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, +} + +// Total table size 42466 bytes (41KiB); checksum: 355A58A4 diff --git a/vendor/golang.org/x/net/idna/tables9.0.0.go b/vendor/golang.org/x/net/idna/tables9.0.0.go new file mode 100644 index 0000000000000..8b65fa1678303 --- /dev/null +++ b/vendor/golang.org/x/net/idna/tables9.0.0.go @@ -0,0 +1,4486 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// +build !go1.10 + +package idna + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "9.0.0" + +var mappings string = "" + // Size: 8175 bytes + "\x00\x01 \x03 ̈\x01a\x03 ̄\x012\x013\x03 ́\x03 ̧\x011\x01o\x051⁄4\x051⁄2" + + "\x053⁄4\x03i̇\x03l·\x03ʼn\x01s\x03dž\x03ⱥ\x03ⱦ\x01h\x01j\x01r\x01w\x01y" + + "\x03 ̆\x03 ̇\x03 ̊\x03 ̨\x03 ̃\x03 ̋\x01l\x01x\x04̈́\x03 ι\x01;\x05 ̈́" + + "\x04եւ\x04اٴ\x04وٴ\x04ۇٴ\x04يٴ\x06क़\x06ख़\x06ग़\x06ज़\x06ड़\x06ढ़\x06फ़" + + "\x06य़\x06ড়\x06ঢ়\x06য়\x06ਲ਼\x06ਸ਼\x06ਖ਼\x06ਗ਼\x06ਜ਼\x06ਫ਼\x06ଡ଼\x06ଢ଼" + + "\x06ํา\x06ໍາ\x06ຫນ\x06ຫມ\x06གྷ\x06ཌྷ\x06དྷ\x06བྷ\x06ཛྷ\x06ཀྵ\x06ཱི\x06ཱུ" + + "\x06ྲྀ\x09ྲཱྀ\x06ླྀ\x09ླཱྀ\x06ཱྀ\x06ྒྷ\x06ྜྷ\x06ྡྷ\x06ྦྷ\x06ྫྷ\x06ྐྵ\x02" + + "в\x02д\x02о\x02с\x02т\x02ъ\x02ѣ\x02æ\x01b\x01d\x01e\x02ǝ\x01g\x01i\x01k" + + "\x01m\x01n\x02ȣ\x01p\x01t\x01u\x02ɐ\x02ɑ\x02ə\x02ɛ\x02ɜ\x02ŋ\x02ɔ\x02ɯ" + + "\x01v\x02β\x02γ\x02δ\x02φ\x02χ\x02ρ\x02н\x02ɒ\x01c\x02ɕ\x02ð\x01f\x02ɟ" + + "\x02ɡ\x02ɥ\x02ɨ\x02ɩ\x02ɪ\x02ʝ\x02ɭ\x02ʟ\x02ɱ\x02ɰ\x02ɲ\x02ɳ\x02ɴ\x02ɵ" + + "\x02ɸ\x02ʂ\x02ʃ\x02ƫ\x02ʉ\x02ʊ\x02ʋ\x02ʌ\x01z\x02ʐ\x02ʑ\x02ʒ\x02θ\x02ss" + + "\x02ά\x02έ\x02ή\x02ί\x02ό\x02ύ\x02ώ\x05ἀι\x05ἁι\x05ἂι\x05ἃι\x05ἄι\x05ἅι" + + "\x05ἆι\x05ἇι\x05ἠι\x05ἡι\x05ἢι\x05ἣι\x05ἤι\x05ἥι\x05ἦι\x05ἧι\x05ὠι\x05ὡι" + + "\x05ὢι\x05ὣι\x05ὤι\x05ὥι\x05ὦι\x05ὧι\x05ὰι\x04αι\x04άι\x05ᾶι\x02ι\x05 ̈͂" + + "\x05ὴι\x04ηι\x04ήι\x05ῆι\x05 ̓̀\x05 ̓́\x05 ̓͂\x02ΐ\x05 ̔̀\x05 ̔́\x05 ̔͂" + + "\x02ΰ\x05 ̈̀\x01`\x05ὼι\x04ωι\x04ώι\x05ῶι\x06′′\x09′′′\x06‵‵\x09‵‵‵\x02!" + + "!\x02??\x02?!\x02!?\x0c′′′′\x010\x014\x015\x016\x017\x018\x019\x01+\x01=" + + "\x01(\x01)\x02rs\x02ħ\x02no\x01q\x02sm\x02tm\x02ω\x02å\x02א\x02ב\x02ג" + + "\x02ד\x02π\x051⁄7\x051⁄9\x061⁄10\x051⁄3\x052⁄3\x051⁄5\x052⁄5\x053⁄5\x054" + + "⁄5\x051⁄6\x055⁄6\x051⁄8\x053⁄8\x055⁄8\x057⁄8\x041⁄\x02ii\x02iv\x02vi" + + "\x04viii\x02ix\x02xi\x050⁄3\x06∫∫\x09∫∫∫\x06∮∮\x09∮∮∮\x0210\x0211\x0212" + + "\x0213\x0214\x0215\x0216\x0217\x0218\x0219\x0220\x04(10)\x04(11)\x04(12)" + + "\x04(13)\x04(14)\x04(15)\x04(16)\x04(17)\x04(18)\x04(19)\x04(20)\x0c∫∫∫∫" + + "\x02==\x05⫝̸\x02ɫ\x02ɽ\x02ȿ\x02ɀ\x01.\x04 ゙\x04 ゚\x06より\x06コト\x05(ᄀ)\x05" + + "(ᄂ)\x05(ᄃ)\x05(ᄅ)\x05(ᄆ)\x05(ᄇ)\x05(ᄉ)\x05(ᄋ)\x05(ᄌ)\x05(ᄎ)\x05(ᄏ)\x05(ᄐ" + + ")\x05(ᄑ)\x05(ᄒ)\x05(가)\x05(나)\x05(다)\x05(라)\x05(마)\x05(바)\x05(사)\x05(아)" + + "\x05(자)\x05(차)\x05(카)\x05(타)\x05(파)\x05(하)\x05(주)\x08(오전)\x08(오후)\x05(一)" + + "\x05(二)\x05(三)\x05(四)\x05(五)\x05(六)\x05(七)\x05(八)\x05(九)\x05(十)\x05(月)" + + "\x05(火)\x05(水)\x05(木)\x05(金)\x05(土)\x05(日)\x05(株)\x05(有)\x05(社)\x05(名)" + + "\x05(特)\x05(財)\x05(祝)\x05(労)\x05(代)\x05(呼)\x05(学)\x05(監)\x05(企)\x05(資)" + + "\x05(協)\x05(祭)\x05(休)\x05(自)\x05(至)\x0221\x0222\x0223\x0224\x0225\x0226" + + "\x0227\x0228\x0229\x0230\x0231\x0232\x0233\x0234\x0235\x06참고\x06주의\x0236" + + "\x0237\x0238\x0239\x0240\x0241\x0242\x0243\x0244\x0245\x0246\x0247\x0248" + + "\x0249\x0250\x041月\x042月\x043月\x044月\x045月\x046月\x047月\x048月\x049月\x0510" + + "月\x0511月\x0512月\x02hg\x02ev\x0cアパート\x0cアルファ\x0cアンペア\x09アール\x0cイニング\x09" + + "インチ\x09ウォン\x0fエスクード\x0cエーカー\x09オンス\x09オーム\x09カイリ\x0cカラット\x0cカロリー\x09ガロ" + + "ン\x09ガンマ\x06ギガ\x09ギニー\x0cキュリー\x0cギルダー\x06キロ\x0fキログラム\x12キロメートル\x0fキロワッ" + + "ト\x09グラム\x0fグラムトン\x0fクルゼイロ\x0cクローネ\x09ケース\x09コルナ\x09コーポ\x0cサイクル\x0fサンチ" + + "ーム\x0cシリング\x09センチ\x09セント\x09ダース\x06デシ\x06ドル\x06トン\x06ナノ\x09ノット\x09ハイツ" + + "\x0fパーセント\x09パーツ\x0cバーレル\x0fピアストル\x09ピクル\x06ピコ\x06ビル\x0fファラッド\x0cフィート" + + "\x0fブッシェル\x09フラン\x0fヘクタール\x06ペソ\x09ペニヒ\x09ヘルツ\x09ペンス\x09ページ\x09ベータ\x0cポイ" + + "ント\x09ボルト\x06ホン\x09ポンド\x09ホール\x09ホーン\x0cマイクロ\x09マイル\x09マッハ\x09マルク\x0fマ" + + "ンション\x0cミクロン\x06ミリ\x0fミリバール\x06メガ\x0cメガトン\x0cメートル\x09ヤード\x09ヤール\x09ユアン" + + "\x0cリットル\x06リラ\x09ルピー\x0cルーブル\x06レム\x0fレントゲン\x09ワット\x040点\x041点\x042点" + + "\x043点\x044点\x045点\x046点\x047点\x048点\x049点\x0510点\x0511点\x0512点\x0513点" + + "\x0514点\x0515点\x0516点\x0517点\x0518点\x0519点\x0520点\x0521点\x0522点\x0523点" + + "\x0524点\x02da\x02au\x02ov\x02pc\x02dm\x02iu\x06平成\x06昭和\x06大正\x06明治\x0c株" + + "式会社\x02pa\x02na\x02ma\x02ka\x02kb\x02mb\x02gb\x04kcal\x02pf\x02nf\x02m" + + "g\x02kg\x02hz\x02ml\x02dl\x02kl\x02fm\x02nm\x02mm\x02cm\x02km\x02m2\x02m" + + "3\x05m∕s\x06m∕s2\x07rad∕s\x08rad∕s2\x02ps\x02ns\x02ms\x02pv\x02nv\x02mv" + + "\x02kv\x02pw\x02nw\x02mw\x02kw\x02bq\x02cc\x02cd\x06c∕kg\x02db\x02gy\x02" + + "ha\x02hp\x02in\x02kk\x02kt\x02lm\x02ln\x02lx\x02ph\x02pr\x02sr\x02sv\x02" + + "wb\x05v∕m\x05a∕m\x041日\x042日\x043日\x044日\x045日\x046日\x047日\x048日\x049日" + + "\x0510日\x0511日\x0512日\x0513日\x0514日\x0515日\x0516日\x0517日\x0518日\x0519日" + + "\x0520日\x0521日\x0522日\x0523日\x0524日\x0525日\x0526日\x0527日\x0528日\x0529日" + + "\x0530日\x0531日\x02ь\x02ɦ\x02ɬ\x02ʞ\x02ʇ\x02œ\x04𤋮\x04𢡊\x04𢡄\x04𣏕\x04𥉉" + + "\x04𥳐\x04𧻓\x02ff\x02fi\x02fl\x02st\x04մն\x04մե\x04մի\x04վն\x04մխ\x04יִ" + + "\x04ײַ\x02ע\x02ה\x02כ\x02ל\x02ם\x02ר\x02ת\x04שׁ\x04שׂ\x06שּׁ\x06שּׂ\x04א" + + "ַ\x04אָ\x04אּ\x04בּ\x04גּ\x04דּ\x04הּ\x04וּ\x04זּ\x04טּ\x04יּ\x04ךּ\x04" + + "כּ\x04לּ\x04מּ\x04נּ\x04סּ\x04ףּ\x04פּ\x04צּ\x04קּ\x04רּ\x04שּ\x04תּ" + + "\x04וֹ\x04בֿ\x04כֿ\x04פֿ\x04אל\x02ٱ\x02ٻ\x02پ\x02ڀ\x02ٺ\x02ٿ\x02ٹ\x02ڤ" + + "\x02ڦ\x02ڄ\x02ڃ\x02چ\x02ڇ\x02ڍ\x02ڌ\x02ڎ\x02ڈ\x02ژ\x02ڑ\x02ک\x02گ\x02ڳ" + + "\x02ڱ\x02ں\x02ڻ\x02ۀ\x02ہ\x02ھ\x02ے\x02ۓ\x02ڭ\x02ۇ\x02ۆ\x02ۈ\x02ۋ\x02ۅ" + + "\x02ۉ\x02ې\x02ى\x04ئا\x04ئە\x04ئو\x04ئۇ\x04ئۆ\x04ئۈ\x04ئې\x04ئى\x02ی\x04" + + "ئج\x04ئح\x04ئم\x04ئي\x04بج\x04بح\x04بخ\x04بم\x04بى\x04بي\x04تج\x04تح" + + "\x04تخ\x04تم\x04تى\x04تي\x04ثج\x04ثم\x04ثى\x04ثي\x04جح\x04جم\x04حج\x04حم" + + "\x04خج\x04خح\x04خم\x04سج\x04سح\x04سخ\x04سم\x04صح\x04صم\x04ضج\x04ضح\x04ضخ" + + "\x04ضم\x04طح\x04طم\x04ظم\x04عج\x04عم\x04غج\x04غم\x04فج\x04فح\x04فخ\x04فم" + + "\x04فى\x04في\x04قح\x04قم\x04قى\x04قي\x04كا\x04كج\x04كح\x04كخ\x04كل\x04كم" + + "\x04كى\x04كي\x04لج\x04لح\x04لخ\x04لم\x04لى\x04لي\x04مج\x04مح\x04مخ\x04مم" + + "\x04مى\x04مي\x04نج\x04نح\x04نخ\x04نم\x04نى\x04ني\x04هج\x04هم\x04هى\x04هي" + + "\x04يج\x04يح\x04يخ\x04يم\x04يى\x04يي\x04ذٰ\x04رٰ\x04ىٰ\x05 ٌّ\x05 ٍّ\x05" + + " َّ\x05 ُّ\x05 ِّ\x05 ّٰ\x04ئر\x04ئز\x04ئن\x04بر\x04بز\x04بن\x04تر\x04تز" + + "\x04تن\x04ثر\x04ثز\x04ثن\x04ما\x04نر\x04نز\x04نن\x04ير\x04يز\x04ين\x04ئخ" + + "\x04ئه\x04به\x04ته\x04صخ\x04له\x04نه\x04هٰ\x04يه\x04ثه\x04سه\x04شم\x04شه" + + "\x06ـَّ\x06ـُّ\x06ـِّ\x04طى\x04طي\x04عى\x04عي\x04غى\x04غي\x04سى\x04سي" + + "\x04شى\x04شي\x04حى\x04حي\x04جى\x04جي\x04خى\x04خي\x04صى\x04صي\x04ضى\x04ضي" + + "\x04شج\x04شح\x04شخ\x04شر\x04سر\x04صر\x04ضر\x04اً\x06تجم\x06تحج\x06تحم" + + "\x06تخم\x06تمج\x06تمح\x06تمخ\x06جمح\x06حمي\x06حمى\x06سحج\x06سجح\x06سجى" + + "\x06سمح\x06سمج\x06سمم\x06صحح\x06صمم\x06شحم\x06شجي\x06شمخ\x06شمم\x06ضحى" + + "\x06ضخم\x06طمح\x06طمم\x06طمي\x06عجم\x06عمم\x06عمى\x06غمم\x06غمي\x06غمى" + + "\x06فخم\x06قمح\x06قمم\x06لحم\x06لحي\x06لحى\x06لجج\x06لخم\x06لمح\x06محج" + + "\x06محم\x06محي\x06مجح\x06مجم\x06مخج\x06مخم\x06مجخ\x06همج\x06همم\x06نحم" + + "\x06نحى\x06نجم\x06نجى\x06نمي\x06نمى\x06يمم\x06بخي\x06تجي\x06تجى\x06تخي" + + "\x06تخى\x06تمي\x06تمى\x06جمي\x06جحى\x06جمى\x06سخى\x06صحي\x06شحي\x06ضحي" + + "\x06لجي\x06لمي\x06يحي\x06يجي\x06يمي\x06ممي\x06قمي\x06نحي\x06عمي\x06كمي" + + "\x06نجح\x06مخي\x06لجم\x06كمم\x06جحي\x06حجي\x06مجي\x06فمي\x06بحي\x06سخي" + + "\x06نجي\x06صلے\x06قلے\x08الله\x08اكبر\x08محمد\x08صلعم\x08رسول\x08عليه" + + "\x08وسلم\x06صلى!صلى الله عليه وسلم\x0fجل جلاله\x08ریال\x01,\x01:\x01!" + + "\x01?\x01_\x01{\x01}\x01[\x01]\x01#\x01&\x01*\x01-\x01<\x01>\x01\\\x01$" + + "\x01%\x01@\x04ـً\x04ـَ\x04ـُ\x04ـِ\x04ـّ\x04ـْ\x02ء\x02آ\x02أ\x02ؤ\x02إ" + + "\x02ئ\x02ا\x02ب\x02ة\x02ت\x02ث\x02ج\x02ح\x02خ\x02د\x02ذ\x02ر\x02ز\x02س" + + "\x02ش\x02ص\x02ض\x02ط\x02ظ\x02ع\x02غ\x02ف\x02ق\x02ك\x02ل\x02م\x02ن\x02ه" + + "\x02و\x02ي\x04لآ\x04لأ\x04لإ\x04لا\x01\x22\x01'\x01/\x01^\x01|\x01~\x02¢" + + "\x02£\x02¬\x02¦\x02¥\x08𝅗𝅥\x08𝅘𝅥\x0c𝅘𝅥𝅮\x0c𝅘𝅥𝅯\x0c𝅘𝅥𝅰\x0c𝅘𝅥𝅱\x0c𝅘𝅥𝅲\x08𝆹" + + "𝅥\x08𝆺𝅥\x0c𝆹𝅥𝅮\x0c𝆺𝅥𝅮\x0c𝆹𝅥𝅯\x0c𝆺𝅥𝅯\x02ı\x02ȷ\x02α\x02ε\x02ζ\x02η\x02" + + "κ\x02λ\x02μ\x02ν\x02ξ\x02ο\x02σ\x02τ\x02υ\x02ψ\x03∇\x03∂\x02ϝ\x02ٮ\x02ڡ" + + "\x02ٯ\x020,\x021,\x022,\x023,\x024,\x025,\x026,\x027,\x028,\x029,\x03(a)" + + "\x03(b)\x03(c)\x03(d)\x03(e)\x03(f)\x03(g)\x03(h)\x03(i)\x03(j)\x03(k)" + + "\x03(l)\x03(m)\x03(n)\x03(o)\x03(p)\x03(q)\x03(r)\x03(s)\x03(t)\x03(u)" + + "\x03(v)\x03(w)\x03(x)\x03(y)\x03(z)\x07〔s〕\x02wz\x02hv\x02sd\x03ppv\x02w" + + "c\x02mc\x02md\x02dj\x06ほか\x06ココ\x03サ\x03手\x03字\x03双\x03デ\x03二\x03多\x03解" + + "\x03天\x03交\x03映\x03無\x03料\x03前\x03後\x03再\x03新\x03初\x03終\x03生\x03販\x03声" + + "\x03吹\x03演\x03投\x03捕\x03一\x03三\x03遊\x03左\x03中\x03右\x03指\x03走\x03打\x03禁" + + "\x03空\x03合\x03満\x03有\x03月\x03申\x03割\x03営\x03配\x09〔本〕\x09〔三〕\x09〔二〕\x09〔安" + + "〕\x09〔点〕\x09〔打〕\x09〔盗〕\x09〔勝〕\x09〔敗〕\x03得\x03可\x03丽\x03丸\x03乁\x03你\x03" + + "侮\x03侻\x03倂\x03偺\x03備\x03僧\x03像\x03㒞\x03免\x03兔\x03兤\x03具\x03㒹\x03內\x03" + + "冗\x03冤\x03仌\x03冬\x03况\x03凵\x03刃\x03㓟\x03刻\x03剆\x03剷\x03㔕\x03勇\x03勉\x03" + + "勤\x03勺\x03包\x03匆\x03北\x03卉\x03卑\x03博\x03即\x03卽\x03卿\x03灰\x03及\x03叟\x03" + + "叫\x03叱\x03吆\x03咞\x03吸\x03呈\x03周\x03咢\x03哶\x03唐\x03啓\x03啣\x03善\x03喙\x03" + + "喫\x03喳\x03嗂\x03圖\x03嘆\x03圗\x03噑\x03噴\x03切\x03壮\x03城\x03埴\x03堍\x03型\x03" + + "堲\x03報\x03墬\x03売\x03壷\x03夆\x03夢\x03奢\x03姬\x03娛\x03娧\x03姘\x03婦\x03㛮\x03" + + "嬈\x03嬾\x03寃\x03寘\x03寧\x03寳\x03寿\x03将\x03尢\x03㞁\x03屠\x03屮\x03峀\x03岍\x03" + + "嵃\x03嵮\x03嵫\x03嵼\x03巡\x03巢\x03㠯\x03巽\x03帨\x03帽\x03幩\x03㡢\x03㡼\x03庰\x03" + + "庳\x03庶\x03廊\x03廾\x03舁\x03弢\x03㣇\x03形\x03彫\x03㣣\x03徚\x03忍\x03志\x03忹\x03" + + "悁\x03㤺\x03㤜\x03悔\x03惇\x03慈\x03慌\x03慎\x03慺\x03憎\x03憲\x03憤\x03憯\x03懞\x03" + + "懲\x03懶\x03成\x03戛\x03扝\x03抱\x03拔\x03捐\x03挽\x03拼\x03捨\x03掃\x03揤\x03搢\x03" + + "揅\x03掩\x03㨮\x03摩\x03摾\x03撝\x03摷\x03㩬\x03敏\x03敬\x03旣\x03書\x03晉\x03㬙\x03" + + "暑\x03㬈\x03㫤\x03冒\x03冕\x03最\x03暜\x03肭\x03䏙\x03朗\x03望\x03朡\x03杞\x03杓\x03" + + "㭉\x03柺\x03枅\x03桒\x03梅\x03梎\x03栟\x03椔\x03㮝\x03楂\x03榣\x03槪\x03檨\x03櫛\x03" + + "㰘\x03次\x03歔\x03㱎\x03歲\x03殟\x03殺\x03殻\x03汎\x03沿\x03泍\x03汧\x03洖\x03派\x03" + + "海\x03流\x03浩\x03浸\x03涅\x03洴\x03港\x03湮\x03㴳\x03滋\x03滇\x03淹\x03潮\x03濆\x03" + + "瀹\x03瀞\x03瀛\x03㶖\x03灊\x03災\x03灷\x03炭\x03煅\x03熜\x03爨\x03爵\x03牐\x03犀\x03" + + "犕\x03獺\x03王\x03㺬\x03玥\x03㺸\x03瑇\x03瑜\x03瑱\x03璅\x03瓊\x03㼛\x03甤\x03甾\x03" + + "異\x03瘐\x03㿼\x03䀈\x03直\x03眞\x03真\x03睊\x03䀹\x03瞋\x03䁆\x03䂖\x03硎\x03碌\x03" + + "磌\x03䃣\x03祖\x03福\x03秫\x03䄯\x03穀\x03穊\x03穏\x03䈂\x03篆\x03築\x03䈧\x03糒\x03" + + "䊠\x03糨\x03糣\x03紀\x03絣\x03䌁\x03緇\x03縂\x03繅\x03䌴\x03䍙\x03罺\x03羕\x03翺\x03" + + "者\x03聠\x03聰\x03䏕\x03育\x03脃\x03䐋\x03脾\x03媵\x03舄\x03辞\x03䑫\x03芑\x03芋\x03" + + "芝\x03劳\x03花\x03芳\x03芽\x03苦\x03若\x03茝\x03荣\x03莭\x03茣\x03莽\x03菧\x03著\x03" + + "荓\x03菊\x03菌\x03菜\x03䔫\x03蓱\x03蓳\x03蔖\x03蕤\x03䕝\x03䕡\x03䕫\x03虐\x03虜\x03" + + "虧\x03虩\x03蚩\x03蚈\x03蜎\x03蛢\x03蝹\x03蜨\x03蝫\x03螆\x03蟡\x03蠁\x03䗹\x03衠\x03" + + "衣\x03裗\x03裞\x03䘵\x03裺\x03㒻\x03䚾\x03䛇\x03誠\x03諭\x03變\x03豕\x03貫\x03賁\x03" + + "贛\x03起\x03跋\x03趼\x03跰\x03軔\x03輸\x03邔\x03郱\x03鄑\x03鄛\x03鈸\x03鋗\x03鋘\x03" + + "鉼\x03鏹\x03鐕\x03開\x03䦕\x03閷\x03䧦\x03雃\x03嶲\x03霣\x03䩮\x03䩶\x03韠\x03䪲\x03" + + "頋\x03頩\x03飢\x03䬳\x03餩\x03馧\x03駂\x03駾\x03䯎\x03鬒\x03鱀\x03鳽\x03䳎\x03䳭\x03" + + "鵧\x03䳸\x03麻\x03䵖\x03黹\x03黾\x03鼅\x03鼏\x03鼖\x03鼻" + +var xorData string = "" + // Size: 4855 bytes + "\x02\x0c\x09\x02\xb0\xec\x02\xad\xd8\x02\xad\xd9\x02\x06\x07\x02\x0f\x12" + + "\x02\x0f\x1f\x02\x0f\x1d\x02\x01\x13\x02\x0f\x16\x02\x0f\x0b\x02\x0f3" + + "\x02\x0f7\x02\x0f?\x02\x0f/\x02\x0f*\x02\x0c&\x02\x0c*\x02\x0c;\x02\x0c9" + + "\x02\x0c%\x02\xab\xed\x02\xab\xe2\x02\xab\xe3\x02\xa9\xe0\x02\xa9\xe1" + + "\x02\xa9\xe6\x02\xa3\xcb\x02\xa3\xc8\x02\xa3\xc9\x02\x01#\x02\x01\x08" + + "\x02\x0e>\x02\x0e'\x02\x0f\x03\x02\x03\x0d\x02\x03\x09\x02\x03\x17\x02" + + "\x03\x0e\x02\x02\x03\x02\x011\x02\x01\x00\x02\x01\x10\x02\x03<\x02\x07" + + "\x0d\x02\x02\x0c\x02\x0c0\x02\x01\x03\x02\x01\x01\x02\x01 \x02\x01\x22" + + "\x02\x01)\x02\x01\x0a\x02\x01\x0c\x02\x02\x06\x02\x02\x02\x02\x03\x10" + + "\x03\x037 \x03\x0b+\x03\x02\x01\x04\x02\x01\x02\x02\x019\x02\x03\x1c\x02" + + "\x02$\x03\x80p$\x02\x03:\x02\x03\x0a\x03\xc1r.\x03\xc1r,\x03\xc1r\x02" + + "\x02\x02:\x02\x02>\x02\x02,\x02\x02\x10\x02\x02\x00\x03\xc1s<\x03\xc1s*" + + "\x03\xc2L$\x03\xc2L;\x02\x09)\x02\x0a\x19\x03\x83\xab\xe3\x03\x83\xab" + + "\xf2\x03 4\xe0\x03\x81\xab\xea\x03\x81\xab\xf3\x03 4\xef\x03\x96\xe1\xcd" + + "\x03\x84\xe5\xc3\x02\x0d\x11\x03\x8b\xec\xcb\x03\x94\xec\xcf\x03\x9a\xec" + + "\xc2\x03\x8b\xec\xdb\x03\x94\xec\xdf\x03\x9a\xec\xd2\x03\x01\x0c!\x03" + + "\x01\x0c#\x03ʠ\x9d\x03ʣ\x9c\x03ʢ\x9f\x03ʥ\x9e\x03ʤ\x91\x03ʧ\x90\x03ʦ\x93" + + "\x03ʩ\x92\x03ʨ\x95\x03\xca\xf3\xb5\x03\xca\xf0\xb4\x03\xca\xf1\xb7\x03" + + "\xca\xf6\xb6\x03\xca\xf7\x89\x03\xca\xf4\x88\x03\xca\xf5\x8b\x03\xca\xfa" + + "\x8a\x03\xca\xfb\x8d\x03\xca\xf8\x8c\x03\xca\xf9\x8f\x03\xca\xfe\x8e\x03" + + "\xca\xff\x81\x03\xca\xfc\x80\x03\xca\xfd\x83\x03\xca\xe2\x82\x03\xca\xe3" + + "\x85\x03\xca\xe0\x84\x03\xca\xe1\x87\x03\xca\xe6\x86\x03\xca\xe7\x99\x03" + + "\xca\xe4\x98\x03\xca\xe5\x9b\x03\xca\xea\x9a\x03\xca\xeb\x9d\x03\xca\xe8" + + "\x9c\x03ؓ\x89\x03ߔ\x8b\x02\x010\x03\x03\x04\x1e\x03\x04\x15\x12\x03\x0b" + + "\x05,\x03\x06\x04\x00\x03\x06\x04)\x03\x06\x044\x03\x06\x04<\x03\x06\x05" + + "\x1d\x03\x06\x06\x00\x03\x06\x06\x0a\x03\x06\x06'\x03\x06\x062\x03\x0786" + + "\x03\x079/\x03\x079 \x03\x07:\x0e\x03\x07:\x1b\x03\x07:%\x03\x07;/\x03" + + "\x07;%\x03\x074\x11\x03\x076\x09\x03\x077*\x03\x070\x01\x03\x070\x0f\x03" + + "\x070.\x03\x071\x16\x03\x071\x04\x03\x0710\x03\x072\x18\x03\x072-\x03" + + "\x073\x14\x03\x073>\x03\x07'\x09\x03\x07 \x00\x03\x07\x1f\x0b\x03\x07" + + "\x18#\x03\x07\x18(\x03\x07\x186\x03\x07\x18\x03\x03\x07\x19\x16\x03\x07" + + "\x116\x03\x07\x12'\x03\x07\x13\x10\x03\x07\x0c&\x03\x07\x0c\x08\x03\x07" + + "\x0c\x13\x03\x07\x0d\x02\x03\x07\x0d\x1c\x03\x07\x0b5\x03\x07\x0b\x0a" + + "\x03\x07\x0b\x01\x03\x07\x0b\x0f\x03\x07\x05\x00\x03\x07\x05\x09\x03\x07" + + "\x05\x0b\x03\x07\x07\x01\x03\x07\x07\x08\x03\x07\x00<\x03\x07\x00+\x03" + + "\x07\x01)\x03\x07\x01\x1b\x03\x07\x01\x08\x03\x07\x03?\x03\x0445\x03\x04" + + "4\x08\x03\x0454\x03\x04)/\x03\x04)5\x03\x04+\x05\x03\x04+\x14\x03\x04+ " + + "\x03\x04+<\x03\x04*&\x03\x04*\x22\x03\x04&8\x03\x04!\x01\x03\x04!\x22" + + "\x03\x04\x11+\x03\x04\x10.\x03\x04\x104\x03\x04\x13=\x03\x04\x12\x04\x03" + + "\x04\x12\x0a\x03\x04\x0d\x1d\x03\x04\x0d\x07\x03\x04\x0d \x03\x05<>\x03" + + "\x055<\x03\x055!\x03\x055#\x03\x055&\x03\x054\x1d\x03\x054\x02\x03\x054" + + "\x07\x03\x0571\x03\x053\x1a\x03\x053\x16\x03\x05.<\x03\x05.\x07\x03\x05)" + + ":\x03\x05)<\x03\x05)\x0c\x03\x05)\x15\x03\x05+-\x03\x05+5\x03\x05$\x1e" + + "\x03\x05$\x14\x03\x05'\x04\x03\x05'\x14\x03\x05&\x02\x03\x05\x226\x03" + + "\x05\x22\x0c\x03\x05\x22\x1c\x03\x05\x19\x0a\x03\x05\x1b\x09\x03\x05\x1b" + + "\x0c\x03\x05\x14\x07\x03\x05\x16?\x03\x05\x16\x0c\x03\x05\x0c\x05\x03" + + "\x05\x0e\x0f\x03\x05\x01\x0e\x03\x05\x00(\x03\x05\x030\x03\x05\x03\x06" + + "\x03\x0a==\x03\x0a=1\x03\x0a=,\x03\x0a=\x0c\x03\x0a??\x03\x0a<\x08\x03" + + "\x0a9!\x03\x0a9)\x03\x0a97\x03\x0a99\x03\x0a6\x0a\x03\x0a6\x1c\x03\x0a6" + + "\x17\x03\x0a7'\x03\x0a78\x03\x0a73\x03\x0a'\x01\x03\x0a'&\x03\x0a\x1f" + + "\x0e\x03\x0a\x1f\x03\x03\x0a\x1f3\x03\x0a\x1b/\x03\x0a\x18\x19\x03\x0a" + + "\x19\x01\x03\x0a\x16\x14\x03\x0a\x0e\x22\x03\x0a\x0f\x10\x03\x0a\x0f\x02" + + "\x03\x0a\x0f \x03\x0a\x0c\x04\x03\x0a\x0b>\x03\x0a\x0b+\x03\x0a\x08/\x03" + + "\x0a\x046\x03\x0a\x05\x14\x03\x0a\x00\x04\x03\x0a\x00\x10\x03\x0a\x00" + + "\x14\x03\x0b<3\x03\x0b;*\x03\x0b9\x22\x03\x0b9)\x03\x0b97\x03\x0b+\x10" + + "\x03\x0b((\x03\x0b&5\x03\x0b$\x1c\x03\x0b$\x12\x03\x0b%\x04\x03\x0b#<" + + "\x03\x0b#0\x03\x0b#\x0d\x03\x0b#\x19\x03\x0b!:\x03\x0b!\x1f\x03\x0b!\x00" + + "\x03\x0b\x1e5\x03\x0b\x1c\x1d\x03\x0b\x1d-\x03\x0b\x1d(\x03\x0b\x18.\x03" + + "\x0b\x18 \x03\x0b\x18\x16\x03\x0b\x14\x13\x03\x0b\x15$\x03\x0b\x15\x22" + + "\x03\x0b\x12\x1b\x03\x0b\x12\x10\x03\x0b\x132\x03\x0b\x13=\x03\x0b\x12" + + "\x18\x03\x0b\x0c&\x03\x0b\x061\x03\x0b\x06:\x03\x0b\x05#\x03\x0b\x05<" + + "\x03\x0b\x04\x0b\x03\x0b\x04\x04\x03\x0b\x04\x1b\x03\x0b\x042\x03\x0b" + + "\x041\x03\x0b\x03\x03\x03\x0b\x03\x1d\x03\x0b\x03/\x03\x0b\x03+\x03\x0b" + + "\x02\x1b\x03\x0b\x02\x00\x03\x0b\x01\x1e\x03\x0b\x01\x08\x03\x0b\x015" + + "\x03\x06\x0d9\x03\x06\x0d=\x03\x06\x0d?\x03\x02\x001\x03\x02\x003\x03" + + "\x02\x02\x19\x03\x02\x006\x03\x02\x02\x1b\x03\x02\x004\x03\x02\x00<\x03" + + "\x02\x02\x0a\x03\x02\x02\x0e\x03\x02\x01\x1a\x03\x02\x01\x07\x03\x02\x01" + + "\x05\x03\x02\x01\x0b\x03\x02\x01%\x03\x02\x01\x0c\x03\x02\x01\x04\x03" + + "\x02\x01\x1c\x03\x02\x00.\x03\x02\x002\x03\x02\x00>\x03\x02\x00\x12\x03" + + "\x02\x00\x16\x03\x02\x011\x03\x02\x013\x03\x02\x02 \x03\x02\x02%\x03\x02" + + "\x02$\x03\x02\x028\x03\x02\x02;\x03\x02\x024\x03\x02\x012\x03\x02\x022" + + "\x03\x02\x02/\x03\x02\x01,\x03\x02\x01\x13\x03\x02\x01\x16\x03\x02\x01" + + "\x11\x03\x02\x01\x1e\x03\x02\x01\x15\x03\x02\x01\x17\x03\x02\x01\x0f\x03" + + "\x02\x01\x08\x03\x02\x00?\x03\x02\x03\x07\x03\x02\x03\x0d\x03\x02\x03" + + "\x13\x03\x02\x03\x1d\x03\x02\x03\x1f\x03\x02\x00\x03\x03\x02\x00\x0d\x03" + + "\x02\x00\x01\x03\x02\x00\x1b\x03\x02\x00\x19\x03\x02\x00\x18\x03\x02\x00" + + "\x13\x03\x02\x00/\x03\x07>\x12\x03\x07<\x1f\x03\x07>\x1d\x03\x06\x1d\x0e" + + "\x03\x07>\x1c\x03\x07>:\x03\x07>\x13\x03\x04\x12+\x03\x07?\x03\x03\x07>" + + "\x02\x03\x06\x224\x03\x06\x1a.\x03\x07<%\x03\x06\x1c\x0b\x03\x0609\x03" + + "\x05\x1f\x01\x03\x04'\x08\x03\x93\xfd\xf5\x03\x02\x0d \x03\x02\x0d#\x03" + + "\x02\x0d!\x03\x02\x0d&\x03\x02\x0d\x22\x03\x02\x0d/\x03\x02\x0d,\x03\x02" + + "\x0d$\x03\x02\x0d'\x03\x02\x0d%\x03\x02\x0d;\x03\x02\x0d=\x03\x02\x0d?" + + "\x03\x099.\x03\x08\x0b7\x03\x08\x02\x14\x03\x08\x14\x0d\x03\x08.:\x03" + + "\x089'\x03\x0f\x0b\x18\x03\x0f\x1c1\x03\x0f\x17&\x03\x0f9\x1f\x03\x0f0" + + "\x0c\x03\x0e\x0a9\x03\x0e\x056\x03\x0e\x1c#\x03\x0f\x13\x0e\x03\x072\x00" + + "\x03\x070\x0d\x03\x072\x0b\x03\x06\x11\x18\x03\x070\x10\x03\x06\x0f(\x03" + + "\x072\x05\x03\x06\x0f,\x03\x073\x15\x03\x06\x07\x08\x03\x05\x16\x02\x03" + + "\x04\x0b \x03\x05:8\x03\x05\x16%\x03\x0a\x0d\x1f\x03\x06\x16\x10\x03\x05" + + "\x1d5\x03\x05*;\x03\x05\x16\x1b\x03\x04.-\x03\x06\x1a\x19\x03\x04\x03," + + "\x03\x0b87\x03\x04/\x0a\x03\x06\x00,\x03\x04-\x01\x03\x04\x1e-\x03\x06/(" + + "\x03\x0a\x0b5\x03\x06\x0e7\x03\x06\x07.\x03\x0597\x03\x0a*%\x03\x0760" + + "\x03\x06\x0c;\x03\x05'\x00\x03\x072.\x03\x072\x08\x03\x06=\x01\x03\x06" + + "\x05\x1b\x03\x06\x06\x12\x03\x06$=\x03\x06'\x0d\x03\x04\x11\x0f\x03\x076" + + ",\x03\x06\x07;\x03\x06.,\x03\x86\xf9\xea\x03\x8f\xff\xeb\x02\x092\x02" + + "\x095\x02\x094\x02\x09;\x02\x09>\x02\x098\x02\x09*\x02\x09/\x02\x09,\x02" + + "\x09%\x02\x09&\x02\x09#\x02\x09 \x02\x08!\x02\x08%\x02\x08$\x02\x08+\x02" + + "\x08.\x02\x08*\x02\x08&\x02\x088\x02\x08>\x02\x084\x02\x086\x02\x080\x02" + + "\x08\x10\x02\x08\x17\x02\x08\x12\x02\x08\x1d\x02\x08\x1f\x02\x08\x13\x02" + + "\x08\x15\x02\x08\x14\x02\x08\x0c\x03\x8b\xfd\xd0\x03\x81\xec\xc6\x03\x87" + + "\xe0\x8a\x03-2\xe3\x03\x80\xef\xe4\x03-2\xea\x03\x88\xe6\xeb\x03\x8e\xe6" + + "\xe8\x03\x84\xe6\xe9\x03\x97\xe6\xee\x03-2\xf9\x03-2\xf6\x03\x8e\xe3\xad" + + "\x03\x80\xe3\x92\x03\x88\xe3\x90\x03\x8e\xe3\x90\x03\x80\xe3\x97\x03\x88" + + "\xe3\x95\x03\x88\xfe\xcb\x03\x8e\xfe\xca\x03\x84\xfe\xcd\x03\x91\xef\xc9" + + "\x03-2\xc1\x03-2\xc0\x03-2\xcb\x03\x88@\x09\x03\x8e@\x08\x03\x8f\xe0\xf5" + + "\x03\x8e\xe6\xf9\x03\x8e\xe0\xfa\x03\x93\xff\xf4\x03\x84\xee\xd3\x03\x0b" + + "(\x04\x023 \x021;\x02\x01*\x03\x0b#\x10\x03\x0b 0\x03\x0b!\x10\x03\x0b!0" + + "\x03\x07\x15\x08\x03\x09?5\x03\x07\x1f\x08\x03\x07\x17\x0b\x03\x09\x1f" + + "\x15\x03\x0b\x1c7\x03\x0a+#\x03\x06\x1a\x1b\x03\x06\x1a\x14\x03\x0a\x01" + + "\x18\x03\x06#\x1b\x03\x0a2\x0c\x03\x0a\x01\x04\x03\x09#;\x03\x08='\x03" + + "\x08\x1a\x0a\x03\x07\x03\x0a\x111\x03\x09\x1b\x09\x03\x073.\x03\x07\x01\x00" + + "\x03\x09/,\x03\x07#>\x03\x07\x048\x03\x0a\x1f\x22\x03\x098>\x03\x09\x11" + + "\x00\x03\x08/\x17\x03\x06'\x22\x03\x0b\x1a+\x03\x0a\x22\x19\x03\x0a/1" + + "\x03\x0974\x03\x09\x0f\x22\x03\x08,\x22\x03\x08?\x14\x03\x07$5\x03\x07<3" + + "\x03\x07=*\x03\x07\x13\x18\x03\x068\x0a\x03\x06\x09\x16\x03\x06\x13\x00" + + "\x03\x08\x067\x03\x08\x01\x03\x03\x08\x12\x1d\x03\x07+7\x03\x06(;\x03" + + "\x06\x1c?\x03\x07\x0e\x17\x03\x0a\x06\x1d\x03\x0a\x19\x07\x03\x08\x14$" + + "\x03\x07$;\x03\x08,$\x03\x08\x06\x0d\x03\x07\x16\x0a\x03\x06>>\x03\x0a" + + "\x06\x12\x03\x0a\x14)\x03\x09\x0d\x1f\x03\x09\x12\x17\x03\x09\x19\x01" + + "\x03\x08\x11 \x03\x08\x1d'\x03\x06<\x1a\x03\x0a.\x00\x03\x07'\x18\x03" + + "\x0a\x22\x08\x03\x08\x0d\x0a\x03\x08\x13)\x03\x07*)\x03\x06<,\x03\x07" + + "\x0b\x1a\x03\x09.\x14\x03\x09\x0d\x1e\x03\x07\x0e#\x03\x0b\x1d'\x03\x0a" + + "\x0a8\x03\x09%2\x03\x08+&\x03\x080\x12\x03\x0a)4\x03\x08\x06\x1f\x03\x0b" + + "\x1b\x1a\x03\x0a\x1b\x0f\x03\x0b\x1d*\x03\x09\x16$\x03\x090\x11\x03\x08" + + "\x11\x08\x03\x0a*(\x03\x0a\x042\x03\x089,\x03\x074'\x03\x07\x0f\x05\x03" + + "\x09\x0b\x0a\x03\x07\x1b\x01\x03\x09\x17:\x03\x09.\x0d\x03\x07.\x11\x03" + + "\x09+\x15\x03\x080\x13\x03\x0b\x1f\x19\x03\x0a \x11\x03\x0a\x220\x03\x09" + + "\x07;\x03\x08\x16\x1c\x03\x07,\x13\x03\x07\x0e/\x03\x06\x221\x03\x0a." + + "\x0a\x03\x0a7\x02\x03\x0a\x032\x03\x0a\x1d.\x03\x091\x06\x03\x09\x19:" + + "\x03\x08\x02/\x03\x060+\x03\x06\x0f-\x03\x06\x1c\x1f\x03\x06\x1d\x07\x03" + + "\x0a,\x11\x03\x09=\x0d\x03\x09\x0b;\x03\x07\x1b/\x03\x0a\x1f:\x03\x09 " + + "\x1f\x03\x09.\x10\x03\x094\x0b\x03\x09\x1a1\x03\x08#\x1a\x03\x084\x1d" + + "\x03\x08\x01\x1f\x03\x08\x11\x22\x03\x07'8\x03\x07\x1a>\x03\x0757\x03" + + "\x06&9\x03\x06+\x11\x03\x0a.\x0b\x03\x0a,>\x03\x0a4#\x03\x08%\x17\x03" + + "\x07\x05\x22\x03\x07\x0c\x0b\x03\x0a\x1d+\x03\x0a\x19\x16\x03\x09+\x1f" + + "\x03\x09\x08\x0b\x03\x08\x16\x18\x03\x08+\x12\x03\x0b\x1d\x0c\x03\x0a=" + + "\x10\x03\x0a\x09\x0d\x03\x0a\x10\x11\x03\x09&0\x03\x08(\x1f\x03\x087\x07" + + "\x03\x08\x185\x03\x07'6\x03\x06.\x05\x03\x06=\x04\x03\x06;;\x03\x06\x06," + + "\x03\x0b\x18>\x03\x08\x00\x18\x03\x06 \x03\x03\x06<\x00\x03\x09%\x18\x03" + + "\x0b\x1c<\x03\x0a%!\x03\x0a\x09\x12\x03\x0a\x16\x02\x03\x090'\x03\x09" + + "\x0e=\x03\x08 \x0e\x03\x08>\x03\x03\x074>\x03\x06&?\x03\x06\x19\x09\x03" + + "\x06?(\x03\x0a-\x0e\x03\x09:3\x03\x098:\x03\x09\x12\x0b\x03\x09\x1d\x17" + + "\x03\x087\x05\x03\x082\x14\x03\x08\x06%\x03\x08\x13\x1f\x03\x06\x06\x0e" + + "\x03\x0a\x22<\x03\x09/<\x03\x06>+\x03\x0a'?\x03\x0a\x13\x0c\x03\x09\x10<" + + "\x03\x07\x1b=\x03\x0a\x19\x13\x03\x09\x22\x1d\x03\x09\x07\x0d\x03\x08)" + + "\x1c\x03\x06=\x1a\x03\x0a/4\x03\x0a7\x11\x03\x0a\x16:\x03\x09?3\x03\x09:" + + "/\x03\x09\x05\x0a\x03\x09\x14\x06\x03\x087\x22\x03\x080\x07\x03\x08\x1a" + + "\x1f\x03\x07\x04(\x03\x07\x04\x09\x03\x06 %\x03\x06<\x08\x03\x0a+\x14" + + "\x03\x09\x1d\x16\x03\x0a70\x03\x08 >\x03\x0857\x03\x070\x0a\x03\x06=\x12" + + "\x03\x06\x16%\x03\x06\x1d,\x03\x099#\x03\x09\x10>\x03\x07 \x1e\x03\x08" + + "\x0c<\x03\x08\x0b\x18\x03\x08\x15+\x03\x08,:\x03\x08%\x22\x03\x07\x0a$" + + "\x03\x0b\x1c=\x03\x07+\x08\x03\x0a/\x05\x03\x0a \x07\x03\x0a\x12'\x03" + + "\x09#\x11\x03\x08\x1b\x15\x03\x0a\x06\x01\x03\x09\x1c\x1b\x03\x0922\x03" + + "\x07\x14<\x03\x07\x09\x04\x03\x061\x04\x03\x07\x0e\x01\x03\x0a\x13\x18" + + "\x03\x0a-\x0c\x03\x0a?\x0d\x03\x0a\x09\x0a\x03\x091&\x03\x0a/\x0b\x03" + + "\x08$<\x03\x083\x1d\x03\x08\x0c$\x03\x08\x0d\x07\x03\x08\x0d?\x03\x08" + + "\x0e\x14\x03\x065\x0a\x03\x08\x1a#\x03\x08\x16#\x03\x0702\x03\x07\x03" + + "\x1a\x03\x06(\x1d\x03\x06+\x1b\x03\x06\x0b\x05\x03\x06\x0b\x17\x03\x06" + + "\x0c\x04\x03\x06\x1e\x19\x03\x06+0\x03\x062\x18\x03\x0b\x16\x1e\x03\x0a+" + + "\x16\x03\x0a-?\x03\x0a#:\x03\x0a#\x10\x03\x0a%$\x03\x0a>+\x03\x0a01\x03" + + "\x0a1\x10\x03\x0a\x099\x03\x0a\x0a\x12\x03\x0a\x19\x1f\x03\x0a\x19\x12" + + "\x03\x09*)\x03\x09-\x16\x03\x09.1\x03\x09.2\x03\x09<\x0e\x03\x09> \x03" + + "\x093\x12\x03\x09\x0b\x01\x03\x09\x1c2\x03\x09\x11\x1c\x03\x09\x15%\x03" + + "\x08,&\x03\x08!\x22\x03\x089(\x03\x08\x0b\x1a\x03\x08\x0d2\x03\x08\x0c" + + "\x04\x03\x08\x0c\x06\x03\x08\x0c\x1f\x03\x08\x0c\x0c\x03\x08\x0f\x1f\x03" + + "\x08\x0f\x1d\x03\x08\x00\x14\x03\x08\x03\x14\x03\x08\x06\x16\x03\x08\x1e" + + "#\x03\x08\x11\x11\x03\x08\x10\x18\x03\x08\x14(\x03\x07)\x1e\x03\x07.1" + + "\x03\x07 $\x03\x07 '\x03\x078\x08\x03\x07\x0d0\x03\x07\x0f7\x03\x07\x05#" + + "\x03\x07\x05\x1a\x03\x07\x1a7\x03\x07\x1d-\x03\x07\x17\x10\x03\x06)\x1f" + + "\x03\x062\x0b\x03\x066\x16\x03\x06\x09\x11\x03\x09(\x1e\x03\x07!5\x03" + + "\x0b\x11\x16\x03\x0a/\x04\x03\x0a,\x1a\x03\x0b\x173\x03\x0a,1\x03\x0a/5" + + "\x03\x0a\x221\x03\x0a\x22\x0d\x03\x0a?%\x03\x0a<,\x03\x0a?#\x03\x0a>\x19" + + "\x03\x0a\x08&\x03\x0a\x0b\x0e\x03\x0a\x0c:\x03\x0a\x0c+\x03\x0a\x03\x22" + + "\x03\x0a\x06)\x03\x0a\x11\x10\x03\x0a\x11\x1a\x03\x0a\x17-\x03\x0a\x14(" + + "\x03\x09)\x1e\x03\x09/\x09\x03\x09.\x00\x03\x09,\x07\x03\x09/*\x03\x09-9" + + "\x03\x09\x228\x03\x09%\x09\x03\x09:\x12\x03\x09;\x1d\x03\x09?\x06\x03" + + "\x093%\x03\x096\x05\x03\x096\x08\x03\x097\x02\x03\x09\x07,\x03\x09\x04," + + "\x03\x09\x1f\x16\x03\x09\x11\x03\x03\x09\x11\x12\x03\x09\x168\x03\x08*" + + "\x05\x03\x08/2\x03\x084:\x03\x08\x22+\x03\x08 0\x03\x08&\x0a\x03\x08;" + + "\x10\x03\x08>$\x03\x08>\x18\x03\x0829\x03\x082:\x03\x081,\x03\x081<\x03" + + "\x081\x1c\x03\x087#\x03\x087*\x03\x08\x09'\x03\x08\x00\x1d\x03\x08\x05-" + + "\x03\x08\x1f4\x03\x08\x1d\x04\x03\x08\x16\x0f\x03\x07*7\x03\x07'!\x03" + + "\x07%\x1b\x03\x077\x0c\x03\x07\x0c1\x03\x07\x0c.\x03\x07\x00\x06\x03\x07" + + "\x01\x02\x03\x07\x010\x03\x07\x06=\x03\x07\x01\x03\x03\x07\x01\x13\x03" + + "\x07\x06\x06\x03\x07\x05\x0a\x03\x07\x1f\x09\x03\x07\x17:\x03\x06*1\x03" + + "\x06-\x1d\x03\x06\x223\x03\x062:\x03\x060$\x03\x066\x1e\x03\x064\x12\x03" + + "\x0645\x03\x06\x0b\x00\x03\x06\x0b7\x03\x06\x07\x1f\x03\x06\x15\x12\x03" + + "\x0c\x05\x0f\x03\x0b+\x0b\x03\x0b+-\x03\x06\x16\x1b\x03\x06\x15\x17\x03" + + "\x89\xca\xea\x03\x89\xca\xe8\x03\x0c8\x10\x03\x0c8\x01\x03\x0c8\x0f\x03" + + "\x0d8%\x03\x0d8!\x03\x0c8-\x03\x0c8/\x03\x0c8+\x03\x0c87\x03\x0c85\x03" + + "\x0c9\x09\x03\x0c9\x0d\x03\x0c9\x0f\x03\x0c9\x0b\x03\xcfu\x0c\x03\xcfu" + + "\x0f\x03\xcfu\x0e\x03\xcfu\x09\x03\x0c9\x10\x03\x0d9\x0c\x03\xcf`;\x03" + + "\xcf`>\x03\xcf`9\x03\xcf`8\x03\xcf`7\x03\xcf`*\x03\xcf`-\x03\xcf`,\x03" + + "\x0d\x1b\x1a\x03\x0d\x1b&\x03\x0c=.\x03\x0c=%\x03\x0c>\x1e\x03\x0c>\x14" + + "\x03\x0c?\x06\x03\x0c?\x0b\x03\x0c?\x0c\x03\x0c?\x0d\x03\x0c?\x02\x03" + + "\x0c>\x0f\x03\x0c>\x08\x03\x0c>\x09\x03\x0c>,\x03\x0c>\x0c\x03\x0c?\x13" + + "\x03\x0c?\x16\x03\x0c?\x15\x03\x0c?\x1c\x03\x0c?\x1f\x03\x0c?\x1d\x03" + + "\x0c?\x1a\x03\x0c?\x17\x03\x0c?\x08\x03\x0c?\x09\x03\x0c?\x0e\x03\x0c?" + + "\x04\x03\x0c?\x05\x03\x0c" + + "\x03\x0c=2\x03\x0c=6\x03\x0c<\x07\x03\x0c<\x05\x03\x0e:!\x03\x0e:#\x03" + + "\x0e8\x09\x03\x0e:&\x03\x0e8\x0b\x03\x0e:$\x03\x0e:,\x03\x0e8\x1a\x03" + + "\x0e8\x1e\x03\x0e:*\x03\x0e:7\x03\x0e:5\x03\x0e:;\x03\x0e:\x15\x03\x0e:<" + + "\x03\x0e:4\x03\x0e:'\x03\x0e:-\x03\x0e:%\x03\x0e:?\x03\x0e:=\x03\x0e:)" + + "\x03\x0e:/\x03\xcfs'\x03\x0d=\x0f\x03\x0d+*\x03\x0d99\x03\x0d9;\x03\x0d9" + + "?\x03\x0d)\x0d\x03\x0d(%\x02\x01\x18\x02\x01(\x02\x01\x1e\x03\x0f$!\x03" + + "\x0f87\x03\x0f4\x0e\x03\x0f5\x1d\x03\x06'\x03\x03\x0f\x08\x18\x03\x0f" + + "\x0d\x1b\x03\x0e2=\x03\x0e;\x08\x03\x0e:\x0b\x03\x0e\x06$\x03\x0e\x0d)" + + "\x03\x0e\x16\x1f\x03\x0e\x16\x1b\x03\x0d$\x0a\x03\x05,\x1d\x03\x0d. \x03" + + "\x0d.#\x03\x0c(/\x03\x09%\x02\x03\x0d90\x03\x0d\x0e4\x03\x0d\x0d\x0f\x03" + + "\x0c#\x00\x03\x0c,\x1e\x03\x0c2\x0e\x03\x0c\x01\x17\x03\x0c\x09:\x03\x0e" + + "\x173\x03\x0c\x08\x03\x03\x0c\x11\x07\x03\x0c\x10\x18\x03\x0c\x1f\x1c" + + "\x03\x0c\x19\x0e\x03\x0c\x1a\x1f\x03\x0f0>\x03\x0b->\x03\x0b<+\x03\x0b8" + + "\x13\x03\x0b\x043\x03\x0b\x14\x03\x03\x0b\x16%\x03\x0d\x22&\x03\x0b\x1a" + + "\x1a\x03\x0b\x1a\x04\x03\x0a%9\x03\x0a&2\x03\x0a&0\x03\x0a!\x1a\x03\x0a!" + + "7\x03\x0a5\x10\x03\x0a=4\x03\x0a?\x0e\x03\x0a>\x10\x03\x0a\x00 \x03\x0a" + + "\x0f:\x03\x0a\x0f9\x03\x0a\x0b\x0a\x03\x0a\x17%\x03\x0a\x1b-\x03\x09-" + + "\x1a\x03\x09,4\x03\x09.,\x03\x09)\x09\x03\x096!\x03\x091\x1f\x03\x093" + + "\x16\x03\x0c+\x1f\x03\x098 \x03\x098=\x03\x0c(\x1a\x03\x0c(\x16\x03\x09" + + "\x0a+\x03\x09\x16\x12\x03\x09\x13\x0e\x03\x09\x153\x03\x08)!\x03\x09\x1a" + + "\x01\x03\x09\x18\x01\x03\x08%#\x03\x08>\x22\x03\x08\x05%\x03\x08\x02*" + + "\x03\x08\x15;\x03\x08\x1b7\x03\x0f\x07\x1d\x03\x0f\x04\x03\x03\x070\x0c" + + "\x03\x07;\x0b\x03\x07\x08\x17\x03\x07\x12\x06\x03\x06/-\x03\x0671\x03" + + "\x065+\x03\x06>7\x03\x06\x049\x03\x05+\x1e\x03\x05,\x17\x03\x05 \x1d\x03" + + "\x05\x22\x05\x03\x050\x1d" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *idnaTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return idnaValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = idnaIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *idnaTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return idnaValues[c0] + } + i := idnaIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *idnaTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return idnaValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = idnaIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *idnaTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return idnaValues[c0] + } + i := idnaIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// idnaTrie. Total size: 28600 bytes (27.93 KiB). Checksum: 95575047b5d8fff. +type idnaTrie struct{} + +func newIdnaTrie(i int) *idnaTrie { + return &idnaTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *idnaTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 124: + return uint16(idnaValues[n<<6+uint32(b)]) + default: + n -= 124 + return uint16(idnaSparse.lookup(n, b)) + } +} + +// idnaValues: 126 blocks, 8064 entries, 16128 bytes +// The third block is the zero block. +var idnaValues = [8064]uint16{ + // Block 0x0, offset 0x0 + 0x00: 0x0080, 0x01: 0x0080, 0x02: 0x0080, 0x03: 0x0080, 0x04: 0x0080, 0x05: 0x0080, + 0x06: 0x0080, 0x07: 0x0080, 0x08: 0x0080, 0x09: 0x0080, 0x0a: 0x0080, 0x0b: 0x0080, + 0x0c: 0x0080, 0x0d: 0x0080, 0x0e: 0x0080, 0x0f: 0x0080, 0x10: 0x0080, 0x11: 0x0080, + 0x12: 0x0080, 0x13: 0x0080, 0x14: 0x0080, 0x15: 0x0080, 0x16: 0x0080, 0x17: 0x0080, + 0x18: 0x0080, 0x19: 0x0080, 0x1a: 0x0080, 0x1b: 0x0080, 0x1c: 0x0080, 0x1d: 0x0080, + 0x1e: 0x0080, 0x1f: 0x0080, 0x20: 0x0080, 0x21: 0x0080, 0x22: 0x0080, 0x23: 0x0080, + 0x24: 0x0080, 0x25: 0x0080, 0x26: 0x0080, 0x27: 0x0080, 0x28: 0x0080, 0x29: 0x0080, + 0x2a: 0x0080, 0x2b: 0x0080, 0x2c: 0x0080, 0x2d: 0x0008, 0x2e: 0x0008, 0x2f: 0x0080, + 0x30: 0x0008, 0x31: 0x0008, 0x32: 0x0008, 0x33: 0x0008, 0x34: 0x0008, 0x35: 0x0008, + 0x36: 0x0008, 0x37: 0x0008, 0x38: 0x0008, 0x39: 0x0008, 0x3a: 0x0080, 0x3b: 0x0080, + 0x3c: 0x0080, 0x3d: 0x0080, 0x3e: 0x0080, 0x3f: 0x0080, + // Block 0x1, offset 0x40 + 0x40: 0x0080, 0x41: 0xe105, 0x42: 0xe105, 0x43: 0xe105, 0x44: 0xe105, 0x45: 0xe105, + 0x46: 0xe105, 0x47: 0xe105, 0x48: 0xe105, 0x49: 0xe105, 0x4a: 0xe105, 0x4b: 0xe105, + 0x4c: 0xe105, 0x4d: 0xe105, 0x4e: 0xe105, 0x4f: 0xe105, 0x50: 0xe105, 0x51: 0xe105, + 0x52: 0xe105, 0x53: 0xe105, 0x54: 0xe105, 0x55: 0xe105, 0x56: 0xe105, 0x57: 0xe105, + 0x58: 0xe105, 0x59: 0xe105, 0x5a: 0xe105, 0x5b: 0x0080, 0x5c: 0x0080, 0x5d: 0x0080, + 0x5e: 0x0080, 0x5f: 0x0080, 0x60: 0x0080, 0x61: 0x0008, 0x62: 0x0008, 0x63: 0x0008, + 0x64: 0x0008, 0x65: 0x0008, 0x66: 0x0008, 0x67: 0x0008, 0x68: 0x0008, 0x69: 0x0008, + 0x6a: 0x0008, 0x6b: 0x0008, 0x6c: 0x0008, 0x6d: 0x0008, 0x6e: 0x0008, 0x6f: 0x0008, + 0x70: 0x0008, 0x71: 0x0008, 0x72: 0x0008, 0x73: 0x0008, 0x74: 0x0008, 0x75: 0x0008, + 0x76: 0x0008, 0x77: 0x0008, 0x78: 0x0008, 0x79: 0x0008, 0x7a: 0x0008, 0x7b: 0x0080, + 0x7c: 0x0080, 0x7d: 0x0080, 0x7e: 0x0080, 0x7f: 0x0080, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x0040, 0xc1: 0x0040, 0xc2: 0x0040, 0xc3: 0x0040, 0xc4: 0x0040, 0xc5: 0x0040, + 0xc6: 0x0040, 0xc7: 0x0040, 0xc8: 0x0040, 0xc9: 0x0040, 0xca: 0x0040, 0xcb: 0x0040, + 0xcc: 0x0040, 0xcd: 0x0040, 0xce: 0x0040, 0xcf: 0x0040, 0xd0: 0x0040, 0xd1: 0x0040, + 0xd2: 0x0040, 0xd3: 0x0040, 0xd4: 0x0040, 0xd5: 0x0040, 0xd6: 0x0040, 0xd7: 0x0040, + 0xd8: 0x0040, 0xd9: 0x0040, 0xda: 0x0040, 0xdb: 0x0040, 0xdc: 0x0040, 0xdd: 0x0040, + 0xde: 0x0040, 0xdf: 0x0040, 0xe0: 0x000a, 0xe1: 0x0018, 0xe2: 0x0018, 0xe3: 0x0018, + 0xe4: 0x0018, 0xe5: 0x0018, 0xe6: 0x0018, 0xe7: 0x0018, 0xe8: 0x001a, 0xe9: 0x0018, + 0xea: 0x0039, 0xeb: 0x0018, 0xec: 0x0018, 0xed: 0x03c0, 0xee: 0x0018, 0xef: 0x004a, + 0xf0: 0x0018, 0xf1: 0x0018, 0xf2: 0x0069, 0xf3: 0x0079, 0xf4: 0x008a, 0xf5: 0x0005, + 0xf6: 0x0018, 0xf7: 0x0008, 0xf8: 0x00aa, 0xf9: 0x00c9, 0xfa: 0x00d9, 0xfb: 0x0018, + 0xfc: 0x00e9, 0xfd: 0x0119, 0xfe: 0x0149, 0xff: 0x0018, + // Block 0x4, offset 0x100 + 0x100: 0xe00d, 0x101: 0x0008, 0x102: 0xe00d, 0x103: 0x0008, 0x104: 0xe00d, 0x105: 0x0008, + 0x106: 0xe00d, 0x107: 0x0008, 0x108: 0xe00d, 0x109: 0x0008, 0x10a: 0xe00d, 0x10b: 0x0008, + 0x10c: 0xe00d, 0x10d: 0x0008, 0x10e: 0xe00d, 0x10f: 0x0008, 0x110: 0xe00d, 0x111: 0x0008, + 0x112: 0xe00d, 0x113: 0x0008, 0x114: 0xe00d, 0x115: 0x0008, 0x116: 0xe00d, 0x117: 0x0008, + 0x118: 0xe00d, 0x119: 0x0008, 0x11a: 0xe00d, 0x11b: 0x0008, 0x11c: 0xe00d, 0x11d: 0x0008, + 0x11e: 0xe00d, 0x11f: 0x0008, 0x120: 0xe00d, 0x121: 0x0008, 0x122: 0xe00d, 0x123: 0x0008, + 0x124: 0xe00d, 0x125: 0x0008, 0x126: 0xe00d, 0x127: 0x0008, 0x128: 0xe00d, 0x129: 0x0008, + 0x12a: 0xe00d, 0x12b: 0x0008, 0x12c: 0xe00d, 0x12d: 0x0008, 0x12e: 0xe00d, 0x12f: 0x0008, + 0x130: 0x0179, 0x131: 0x0008, 0x132: 0x0035, 0x133: 0x004d, 0x134: 0xe00d, 0x135: 0x0008, + 0x136: 0xe00d, 0x137: 0x0008, 0x138: 0x0008, 0x139: 0xe01d, 0x13a: 0x0008, 0x13b: 0xe03d, + 0x13c: 0x0008, 0x13d: 0xe01d, 0x13e: 0x0008, 0x13f: 0x0199, + // Block 0x5, offset 0x140 + 0x140: 0x0199, 0x141: 0xe01d, 0x142: 0x0008, 0x143: 0xe03d, 0x144: 0x0008, 0x145: 0xe01d, + 0x146: 0x0008, 0x147: 0xe07d, 0x148: 0x0008, 0x149: 0x01b9, 0x14a: 0xe00d, 0x14b: 0x0008, + 0x14c: 0xe00d, 0x14d: 0x0008, 0x14e: 0xe00d, 0x14f: 0x0008, 0x150: 0xe00d, 0x151: 0x0008, + 0x152: 0xe00d, 0x153: 0x0008, 0x154: 0xe00d, 0x155: 0x0008, 0x156: 0xe00d, 0x157: 0x0008, + 0x158: 0xe00d, 0x159: 0x0008, 0x15a: 0xe00d, 0x15b: 0x0008, 0x15c: 0xe00d, 0x15d: 0x0008, + 0x15e: 0xe00d, 0x15f: 0x0008, 0x160: 0xe00d, 0x161: 0x0008, 0x162: 0xe00d, 0x163: 0x0008, + 0x164: 0xe00d, 0x165: 0x0008, 0x166: 0xe00d, 0x167: 0x0008, 0x168: 0xe00d, 0x169: 0x0008, + 0x16a: 0xe00d, 0x16b: 0x0008, 0x16c: 0xe00d, 0x16d: 0x0008, 0x16e: 0xe00d, 0x16f: 0x0008, + 0x170: 0xe00d, 0x171: 0x0008, 0x172: 0xe00d, 0x173: 0x0008, 0x174: 0xe00d, 0x175: 0x0008, + 0x176: 0xe00d, 0x177: 0x0008, 0x178: 0x0065, 0x179: 0xe01d, 0x17a: 0x0008, 0x17b: 0xe03d, + 0x17c: 0x0008, 0x17d: 0xe01d, 0x17e: 0x0008, 0x17f: 0x01d9, + // Block 0x6, offset 0x180 + 0x180: 0x0008, 0x181: 0x007d, 0x182: 0xe00d, 0x183: 0x0008, 0x184: 0xe00d, 0x185: 0x0008, + 0x186: 0x007d, 0x187: 0xe07d, 0x188: 0x0008, 0x189: 0x0095, 0x18a: 0x00ad, 0x18b: 0xe03d, + 0x18c: 0x0008, 0x18d: 0x0008, 0x18e: 0x00c5, 0x18f: 0x00dd, 0x190: 0x00f5, 0x191: 0xe01d, + 0x192: 0x0008, 0x193: 0x010d, 0x194: 0x0125, 0x195: 0x0008, 0x196: 0x013d, 0x197: 0x013d, + 0x198: 0xe00d, 0x199: 0x0008, 0x19a: 0x0008, 0x19b: 0x0008, 0x19c: 0x010d, 0x19d: 0x0155, + 0x19e: 0x0008, 0x19f: 0x016d, 0x1a0: 0xe00d, 0x1a1: 0x0008, 0x1a2: 0xe00d, 0x1a3: 0x0008, + 0x1a4: 0xe00d, 0x1a5: 0x0008, 0x1a6: 0x0185, 0x1a7: 0xe07d, 0x1a8: 0x0008, 0x1a9: 0x019d, + 0x1aa: 0x0008, 0x1ab: 0x0008, 0x1ac: 0xe00d, 0x1ad: 0x0008, 0x1ae: 0x0185, 0x1af: 0xe0fd, + 0x1b0: 0x0008, 0x1b1: 0x01b5, 0x1b2: 0x01cd, 0x1b3: 0xe03d, 0x1b4: 0x0008, 0x1b5: 0xe01d, + 0x1b6: 0x0008, 0x1b7: 0x01e5, 0x1b8: 0xe00d, 0x1b9: 0x0008, 0x1ba: 0x0008, 0x1bb: 0x0008, + 0x1bc: 0xe00d, 0x1bd: 0x0008, 0x1be: 0x0008, 0x1bf: 0x0008, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0008, 0x1c1: 0x0008, 0x1c2: 0x0008, 0x1c3: 0x0008, 0x1c4: 0x01e9, 0x1c5: 0x01e9, + 0x1c6: 0x01e9, 0x1c7: 0x01fd, 0x1c8: 0x0215, 0x1c9: 0x022d, 0x1ca: 0x0245, 0x1cb: 0x025d, + 0x1cc: 0x0275, 0x1cd: 0xe01d, 0x1ce: 0x0008, 0x1cf: 0xe0fd, 0x1d0: 0x0008, 0x1d1: 0xe01d, + 0x1d2: 0x0008, 0x1d3: 0xe03d, 0x1d4: 0x0008, 0x1d5: 0xe01d, 0x1d6: 0x0008, 0x1d7: 0xe07d, + 0x1d8: 0x0008, 0x1d9: 0xe01d, 0x1da: 0x0008, 0x1db: 0xe03d, 0x1dc: 0x0008, 0x1dd: 0x0008, + 0x1de: 0xe00d, 0x1df: 0x0008, 0x1e0: 0xe00d, 0x1e1: 0x0008, 0x1e2: 0xe00d, 0x1e3: 0x0008, + 0x1e4: 0xe00d, 0x1e5: 0x0008, 0x1e6: 0xe00d, 0x1e7: 0x0008, 0x1e8: 0xe00d, 0x1e9: 0x0008, + 0x1ea: 0xe00d, 0x1eb: 0x0008, 0x1ec: 0xe00d, 0x1ed: 0x0008, 0x1ee: 0xe00d, 0x1ef: 0x0008, + 0x1f0: 0x0008, 0x1f1: 0x028d, 0x1f2: 0x02a5, 0x1f3: 0x02bd, 0x1f4: 0xe00d, 0x1f5: 0x0008, + 0x1f6: 0x02d5, 0x1f7: 0x02ed, 0x1f8: 0xe00d, 0x1f9: 0x0008, 0x1fa: 0xe00d, 0x1fb: 0x0008, + 0x1fc: 0xe00d, 0x1fd: 0x0008, 0x1fe: 0xe00d, 0x1ff: 0x0008, + // Block 0x8, offset 0x200 + 0x200: 0xe00d, 0x201: 0x0008, 0x202: 0xe00d, 0x203: 0x0008, 0x204: 0xe00d, 0x205: 0x0008, + 0x206: 0xe00d, 0x207: 0x0008, 0x208: 0xe00d, 0x209: 0x0008, 0x20a: 0xe00d, 0x20b: 0x0008, + 0x20c: 0xe00d, 0x20d: 0x0008, 0x20e: 0xe00d, 0x20f: 0x0008, 0x210: 0xe00d, 0x211: 0x0008, + 0x212: 0xe00d, 0x213: 0x0008, 0x214: 0xe00d, 0x215: 0x0008, 0x216: 0xe00d, 0x217: 0x0008, + 0x218: 0xe00d, 0x219: 0x0008, 0x21a: 0xe00d, 0x21b: 0x0008, 0x21c: 0xe00d, 0x21d: 0x0008, + 0x21e: 0xe00d, 0x21f: 0x0008, 0x220: 0x0305, 0x221: 0x0008, 0x222: 0xe00d, 0x223: 0x0008, + 0x224: 0xe00d, 0x225: 0x0008, 0x226: 0xe00d, 0x227: 0x0008, 0x228: 0xe00d, 0x229: 0x0008, + 0x22a: 0xe00d, 0x22b: 0x0008, 0x22c: 0xe00d, 0x22d: 0x0008, 0x22e: 0xe00d, 0x22f: 0x0008, + 0x230: 0xe00d, 0x231: 0x0008, 0x232: 0xe00d, 0x233: 0x0008, 0x234: 0x0008, 0x235: 0x0008, + 0x236: 0x0008, 0x237: 0x0008, 0x238: 0x0008, 0x239: 0x0008, 0x23a: 0x0209, 0x23b: 0xe03d, + 0x23c: 0x0008, 0x23d: 0x031d, 0x23e: 0x0229, 0x23f: 0x0008, + // Block 0x9, offset 0x240 + 0x240: 0x0008, 0x241: 0x0008, 0x242: 0x0018, 0x243: 0x0018, 0x244: 0x0018, 0x245: 0x0018, + 0x246: 0x0008, 0x247: 0x0008, 0x248: 0x0008, 0x249: 0x0008, 0x24a: 0x0008, 0x24b: 0x0008, + 0x24c: 0x0008, 0x24d: 0x0008, 0x24e: 0x0008, 0x24f: 0x0008, 0x250: 0x0008, 0x251: 0x0008, + 0x252: 0x0018, 0x253: 0x0018, 0x254: 0x0018, 0x255: 0x0018, 0x256: 0x0018, 0x257: 0x0018, + 0x258: 0x029a, 0x259: 0x02ba, 0x25a: 0x02da, 0x25b: 0x02fa, 0x25c: 0x031a, 0x25d: 0x033a, + 0x25e: 0x0018, 0x25f: 0x0018, 0x260: 0x03ad, 0x261: 0x0359, 0x262: 0x01d9, 0x263: 0x0369, + 0x264: 0x03c5, 0x265: 0x0018, 0x266: 0x0018, 0x267: 0x0018, 0x268: 0x0018, 0x269: 0x0018, + 0x26a: 0x0018, 0x26b: 0x0018, 0x26c: 0x0008, 0x26d: 0x0018, 0x26e: 0x0008, 0x26f: 0x0018, + 0x270: 0x0018, 0x271: 0x0018, 0x272: 0x0018, 0x273: 0x0018, 0x274: 0x0018, 0x275: 0x0018, + 0x276: 0x0018, 0x277: 0x0018, 0x278: 0x0018, 0x279: 0x0018, 0x27a: 0x0018, 0x27b: 0x0018, + 0x27c: 0x0018, 0x27d: 0x0018, 0x27e: 0x0018, 0x27f: 0x0018, + // Block 0xa, offset 0x280 + 0x280: 0x03dd, 0x281: 0x03dd, 0x282: 0x3308, 0x283: 0x03f5, 0x284: 0x0379, 0x285: 0x040d, + 0x286: 0x3308, 0x287: 0x3308, 0x288: 0x3308, 0x289: 0x3308, 0x28a: 0x3308, 0x28b: 0x3308, + 0x28c: 0x3308, 0x28d: 0x3308, 0x28e: 0x3308, 0x28f: 0x33c0, 0x290: 0x3308, 0x291: 0x3308, + 0x292: 0x3308, 0x293: 0x3308, 0x294: 0x3308, 0x295: 0x3308, 0x296: 0x3308, 0x297: 0x3308, + 0x298: 0x3308, 0x299: 0x3308, 0x29a: 0x3308, 0x29b: 0x3308, 0x29c: 0x3308, 0x29d: 0x3308, + 0x29e: 0x3308, 0x29f: 0x3308, 0x2a0: 0x3308, 0x2a1: 0x3308, 0x2a2: 0x3308, 0x2a3: 0x3308, + 0x2a4: 0x3308, 0x2a5: 0x3308, 0x2a6: 0x3308, 0x2a7: 0x3308, 0x2a8: 0x3308, 0x2a9: 0x3308, + 0x2aa: 0x3308, 0x2ab: 0x3308, 0x2ac: 0x3308, 0x2ad: 0x3308, 0x2ae: 0x3308, 0x2af: 0x3308, + 0x2b0: 0xe00d, 0x2b1: 0x0008, 0x2b2: 0xe00d, 0x2b3: 0x0008, 0x2b4: 0x0425, 0x2b5: 0x0008, + 0x2b6: 0xe00d, 0x2b7: 0x0008, 0x2b8: 0x0040, 0x2b9: 0x0040, 0x2ba: 0x03a2, 0x2bb: 0x0008, + 0x2bc: 0x0008, 0x2bd: 0x0008, 0x2be: 0x03c2, 0x2bf: 0x043d, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x0040, 0x2c1: 0x0040, 0x2c2: 0x0040, 0x2c3: 0x0040, 0x2c4: 0x008a, 0x2c5: 0x03d2, + 0x2c6: 0xe155, 0x2c7: 0x0455, 0x2c8: 0xe12d, 0x2c9: 0xe13d, 0x2ca: 0xe12d, 0x2cb: 0x0040, + 0x2cc: 0x03dd, 0x2cd: 0x0040, 0x2ce: 0x046d, 0x2cf: 0x0485, 0x2d0: 0x0008, 0x2d1: 0xe105, + 0x2d2: 0xe105, 0x2d3: 0xe105, 0x2d4: 0xe105, 0x2d5: 0xe105, 0x2d6: 0xe105, 0x2d7: 0xe105, + 0x2d8: 0xe105, 0x2d9: 0xe105, 0x2da: 0xe105, 0x2db: 0xe105, 0x2dc: 0xe105, 0x2dd: 0xe105, + 0x2de: 0xe105, 0x2df: 0xe105, 0x2e0: 0x049d, 0x2e1: 0x049d, 0x2e2: 0x0040, 0x2e3: 0x049d, + 0x2e4: 0x049d, 0x2e5: 0x049d, 0x2e6: 0x049d, 0x2e7: 0x049d, 0x2e8: 0x049d, 0x2e9: 0x049d, + 0x2ea: 0x049d, 0x2eb: 0x049d, 0x2ec: 0x0008, 0x2ed: 0x0008, 0x2ee: 0x0008, 0x2ef: 0x0008, + 0x2f0: 0x0008, 0x2f1: 0x0008, 0x2f2: 0x0008, 0x2f3: 0x0008, 0x2f4: 0x0008, 0x2f5: 0x0008, + 0x2f6: 0x0008, 0x2f7: 0x0008, 0x2f8: 0x0008, 0x2f9: 0x0008, 0x2fa: 0x0008, 0x2fb: 0x0008, + 0x2fc: 0x0008, 0x2fd: 0x0008, 0x2fe: 0x0008, 0x2ff: 0x0008, + // Block 0xc, offset 0x300 + 0x300: 0x0008, 0x301: 0x0008, 0x302: 0xe00f, 0x303: 0x0008, 0x304: 0x0008, 0x305: 0x0008, + 0x306: 0x0008, 0x307: 0x0008, 0x308: 0x0008, 0x309: 0x0008, 0x30a: 0x0008, 0x30b: 0x0008, + 0x30c: 0x0008, 0x30d: 0x0008, 0x30e: 0x0008, 0x30f: 0xe0c5, 0x310: 0x04b5, 0x311: 0x04cd, + 0x312: 0xe0bd, 0x313: 0xe0f5, 0x314: 0xe0fd, 0x315: 0xe09d, 0x316: 0xe0b5, 0x317: 0x0008, + 0x318: 0xe00d, 0x319: 0x0008, 0x31a: 0xe00d, 0x31b: 0x0008, 0x31c: 0xe00d, 0x31d: 0x0008, + 0x31e: 0xe00d, 0x31f: 0x0008, 0x320: 0xe00d, 0x321: 0x0008, 0x322: 0xe00d, 0x323: 0x0008, + 0x324: 0xe00d, 0x325: 0x0008, 0x326: 0xe00d, 0x327: 0x0008, 0x328: 0xe00d, 0x329: 0x0008, + 0x32a: 0xe00d, 0x32b: 0x0008, 0x32c: 0xe00d, 0x32d: 0x0008, 0x32e: 0xe00d, 0x32f: 0x0008, + 0x330: 0x04e5, 0x331: 0xe185, 0x332: 0xe18d, 0x333: 0x0008, 0x334: 0x04fd, 0x335: 0x03dd, + 0x336: 0x0018, 0x337: 0xe07d, 0x338: 0x0008, 0x339: 0xe1d5, 0x33a: 0xe00d, 0x33b: 0x0008, + 0x33c: 0x0008, 0x33d: 0x0515, 0x33e: 0x052d, 0x33f: 0x052d, + // Block 0xd, offset 0x340 + 0x340: 0x0008, 0x341: 0x0008, 0x342: 0x0008, 0x343: 0x0008, 0x344: 0x0008, 0x345: 0x0008, + 0x346: 0x0008, 0x347: 0x0008, 0x348: 0x0008, 0x349: 0x0008, 0x34a: 0x0008, 0x34b: 0x0008, + 0x34c: 0x0008, 0x34d: 0x0008, 0x34e: 0x0008, 0x34f: 0x0008, 0x350: 0x0008, 0x351: 0x0008, + 0x352: 0x0008, 0x353: 0x0008, 0x354: 0x0008, 0x355: 0x0008, 0x356: 0x0008, 0x357: 0x0008, + 0x358: 0x0008, 0x359: 0x0008, 0x35a: 0x0008, 0x35b: 0x0008, 0x35c: 0x0008, 0x35d: 0x0008, + 0x35e: 0x0008, 0x35f: 0x0008, 0x360: 0xe00d, 0x361: 0x0008, 0x362: 0xe00d, 0x363: 0x0008, + 0x364: 0xe00d, 0x365: 0x0008, 0x366: 0xe00d, 0x367: 0x0008, 0x368: 0xe00d, 0x369: 0x0008, + 0x36a: 0xe00d, 0x36b: 0x0008, 0x36c: 0xe00d, 0x36d: 0x0008, 0x36e: 0xe00d, 0x36f: 0x0008, + 0x370: 0xe00d, 0x371: 0x0008, 0x372: 0xe00d, 0x373: 0x0008, 0x374: 0xe00d, 0x375: 0x0008, + 0x376: 0xe00d, 0x377: 0x0008, 0x378: 0xe00d, 0x379: 0x0008, 0x37a: 0xe00d, 0x37b: 0x0008, + 0x37c: 0xe00d, 0x37d: 0x0008, 0x37e: 0xe00d, 0x37f: 0x0008, + // Block 0xe, offset 0x380 + 0x380: 0xe00d, 0x381: 0x0008, 0x382: 0x0018, 0x383: 0x3308, 0x384: 0x3308, 0x385: 0x3308, + 0x386: 0x3308, 0x387: 0x3308, 0x388: 0x3318, 0x389: 0x3318, 0x38a: 0xe00d, 0x38b: 0x0008, + 0x38c: 0xe00d, 0x38d: 0x0008, 0x38e: 0xe00d, 0x38f: 0x0008, 0x390: 0xe00d, 0x391: 0x0008, + 0x392: 0xe00d, 0x393: 0x0008, 0x394: 0xe00d, 0x395: 0x0008, 0x396: 0xe00d, 0x397: 0x0008, + 0x398: 0xe00d, 0x399: 0x0008, 0x39a: 0xe00d, 0x39b: 0x0008, 0x39c: 0xe00d, 0x39d: 0x0008, + 0x39e: 0xe00d, 0x39f: 0x0008, 0x3a0: 0xe00d, 0x3a1: 0x0008, 0x3a2: 0xe00d, 0x3a3: 0x0008, + 0x3a4: 0xe00d, 0x3a5: 0x0008, 0x3a6: 0xe00d, 0x3a7: 0x0008, 0x3a8: 0xe00d, 0x3a9: 0x0008, + 0x3aa: 0xe00d, 0x3ab: 0x0008, 0x3ac: 0xe00d, 0x3ad: 0x0008, 0x3ae: 0xe00d, 0x3af: 0x0008, + 0x3b0: 0xe00d, 0x3b1: 0x0008, 0x3b2: 0xe00d, 0x3b3: 0x0008, 0x3b4: 0xe00d, 0x3b5: 0x0008, + 0x3b6: 0xe00d, 0x3b7: 0x0008, 0x3b8: 0xe00d, 0x3b9: 0x0008, 0x3ba: 0xe00d, 0x3bb: 0x0008, + 0x3bc: 0xe00d, 0x3bd: 0x0008, 0x3be: 0xe00d, 0x3bf: 0x0008, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x0040, 0x3c1: 0xe01d, 0x3c2: 0x0008, 0x3c3: 0xe03d, 0x3c4: 0x0008, 0x3c5: 0xe01d, + 0x3c6: 0x0008, 0x3c7: 0xe07d, 0x3c8: 0x0008, 0x3c9: 0xe01d, 0x3ca: 0x0008, 0x3cb: 0xe03d, + 0x3cc: 0x0008, 0x3cd: 0xe01d, 0x3ce: 0x0008, 0x3cf: 0x0008, 0x3d0: 0xe00d, 0x3d1: 0x0008, + 0x3d2: 0xe00d, 0x3d3: 0x0008, 0x3d4: 0xe00d, 0x3d5: 0x0008, 0x3d6: 0xe00d, 0x3d7: 0x0008, + 0x3d8: 0xe00d, 0x3d9: 0x0008, 0x3da: 0xe00d, 0x3db: 0x0008, 0x3dc: 0xe00d, 0x3dd: 0x0008, + 0x3de: 0xe00d, 0x3df: 0x0008, 0x3e0: 0xe00d, 0x3e1: 0x0008, 0x3e2: 0xe00d, 0x3e3: 0x0008, + 0x3e4: 0xe00d, 0x3e5: 0x0008, 0x3e6: 0xe00d, 0x3e7: 0x0008, 0x3e8: 0xe00d, 0x3e9: 0x0008, + 0x3ea: 0xe00d, 0x3eb: 0x0008, 0x3ec: 0xe00d, 0x3ed: 0x0008, 0x3ee: 0xe00d, 0x3ef: 0x0008, + 0x3f0: 0xe00d, 0x3f1: 0x0008, 0x3f2: 0xe00d, 0x3f3: 0x0008, 0x3f4: 0xe00d, 0x3f5: 0x0008, + 0x3f6: 0xe00d, 0x3f7: 0x0008, 0x3f8: 0xe00d, 0x3f9: 0x0008, 0x3fa: 0xe00d, 0x3fb: 0x0008, + 0x3fc: 0xe00d, 0x3fd: 0x0008, 0x3fe: 0xe00d, 0x3ff: 0x0008, + // Block 0x10, offset 0x400 + 0x400: 0xe00d, 0x401: 0x0008, 0x402: 0xe00d, 0x403: 0x0008, 0x404: 0xe00d, 0x405: 0x0008, + 0x406: 0xe00d, 0x407: 0x0008, 0x408: 0xe00d, 0x409: 0x0008, 0x40a: 0xe00d, 0x40b: 0x0008, + 0x40c: 0xe00d, 0x40d: 0x0008, 0x40e: 0xe00d, 0x40f: 0x0008, 0x410: 0xe00d, 0x411: 0x0008, + 0x412: 0xe00d, 0x413: 0x0008, 0x414: 0xe00d, 0x415: 0x0008, 0x416: 0xe00d, 0x417: 0x0008, + 0x418: 0xe00d, 0x419: 0x0008, 0x41a: 0xe00d, 0x41b: 0x0008, 0x41c: 0xe00d, 0x41d: 0x0008, + 0x41e: 0xe00d, 0x41f: 0x0008, 0x420: 0xe00d, 0x421: 0x0008, 0x422: 0xe00d, 0x423: 0x0008, + 0x424: 0xe00d, 0x425: 0x0008, 0x426: 0xe00d, 0x427: 0x0008, 0x428: 0xe00d, 0x429: 0x0008, + 0x42a: 0xe00d, 0x42b: 0x0008, 0x42c: 0xe00d, 0x42d: 0x0008, 0x42e: 0xe00d, 0x42f: 0x0008, + 0x430: 0x0040, 0x431: 0x03f5, 0x432: 0x03f5, 0x433: 0x03f5, 0x434: 0x03f5, 0x435: 0x03f5, + 0x436: 0x03f5, 0x437: 0x03f5, 0x438: 0x03f5, 0x439: 0x03f5, 0x43a: 0x03f5, 0x43b: 0x03f5, + 0x43c: 0x03f5, 0x43d: 0x03f5, 0x43e: 0x03f5, 0x43f: 0x03f5, + // Block 0x11, offset 0x440 + 0x440: 0x0840, 0x441: 0x0840, 0x442: 0x0840, 0x443: 0x0840, 0x444: 0x0840, 0x445: 0x0840, + 0x446: 0x0018, 0x447: 0x0018, 0x448: 0x0818, 0x449: 0x0018, 0x44a: 0x0018, 0x44b: 0x0818, + 0x44c: 0x0018, 0x44d: 0x0818, 0x44e: 0x0018, 0x44f: 0x0018, 0x450: 0x3308, 0x451: 0x3308, + 0x452: 0x3308, 0x453: 0x3308, 0x454: 0x3308, 0x455: 0x3308, 0x456: 0x3308, 0x457: 0x3308, + 0x458: 0x3308, 0x459: 0x3308, 0x45a: 0x3308, 0x45b: 0x0818, 0x45c: 0x0b40, 0x45d: 0x0040, + 0x45e: 0x0818, 0x45f: 0x0818, 0x460: 0x0a08, 0x461: 0x0808, 0x462: 0x0c08, 0x463: 0x0c08, + 0x464: 0x0c08, 0x465: 0x0c08, 0x466: 0x0a08, 0x467: 0x0c08, 0x468: 0x0a08, 0x469: 0x0c08, + 0x46a: 0x0a08, 0x46b: 0x0a08, 0x46c: 0x0a08, 0x46d: 0x0a08, 0x46e: 0x0a08, 0x46f: 0x0c08, + 0x470: 0x0c08, 0x471: 0x0c08, 0x472: 0x0c08, 0x473: 0x0a08, 0x474: 0x0a08, 0x475: 0x0a08, + 0x476: 0x0a08, 0x477: 0x0a08, 0x478: 0x0a08, 0x479: 0x0a08, 0x47a: 0x0a08, 0x47b: 0x0a08, + 0x47c: 0x0a08, 0x47d: 0x0a08, 0x47e: 0x0a08, 0x47f: 0x0a08, + // Block 0x12, offset 0x480 + 0x480: 0x0818, 0x481: 0x0a08, 0x482: 0x0a08, 0x483: 0x0a08, 0x484: 0x0a08, 0x485: 0x0a08, + 0x486: 0x0a08, 0x487: 0x0a08, 0x488: 0x0c08, 0x489: 0x0a08, 0x48a: 0x0a08, 0x48b: 0x3308, + 0x48c: 0x3308, 0x48d: 0x3308, 0x48e: 0x3308, 0x48f: 0x3308, 0x490: 0x3308, 0x491: 0x3308, + 0x492: 0x3308, 0x493: 0x3308, 0x494: 0x3308, 0x495: 0x3308, 0x496: 0x3308, 0x497: 0x3308, + 0x498: 0x3308, 0x499: 0x3308, 0x49a: 0x3308, 0x49b: 0x3308, 0x49c: 0x3308, 0x49d: 0x3308, + 0x49e: 0x3308, 0x49f: 0x3308, 0x4a0: 0x0808, 0x4a1: 0x0808, 0x4a2: 0x0808, 0x4a3: 0x0808, + 0x4a4: 0x0808, 0x4a5: 0x0808, 0x4a6: 0x0808, 0x4a7: 0x0808, 0x4a8: 0x0808, 0x4a9: 0x0808, + 0x4aa: 0x0018, 0x4ab: 0x0818, 0x4ac: 0x0818, 0x4ad: 0x0818, 0x4ae: 0x0a08, 0x4af: 0x0a08, + 0x4b0: 0x3308, 0x4b1: 0x0c08, 0x4b2: 0x0c08, 0x4b3: 0x0c08, 0x4b4: 0x0808, 0x4b5: 0x0429, + 0x4b6: 0x0451, 0x4b7: 0x0479, 0x4b8: 0x04a1, 0x4b9: 0x0a08, 0x4ba: 0x0a08, 0x4bb: 0x0a08, + 0x4bc: 0x0a08, 0x4bd: 0x0a08, 0x4be: 0x0a08, 0x4bf: 0x0a08, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x0c08, 0x4c1: 0x0a08, 0x4c2: 0x0a08, 0x4c3: 0x0c08, 0x4c4: 0x0c08, 0x4c5: 0x0c08, + 0x4c6: 0x0c08, 0x4c7: 0x0c08, 0x4c8: 0x0c08, 0x4c9: 0x0c08, 0x4ca: 0x0c08, 0x4cb: 0x0c08, + 0x4cc: 0x0a08, 0x4cd: 0x0c08, 0x4ce: 0x0a08, 0x4cf: 0x0c08, 0x4d0: 0x0a08, 0x4d1: 0x0a08, + 0x4d2: 0x0c08, 0x4d3: 0x0c08, 0x4d4: 0x0818, 0x4d5: 0x0c08, 0x4d6: 0x3308, 0x4d7: 0x3308, + 0x4d8: 0x3308, 0x4d9: 0x3308, 0x4da: 0x3308, 0x4db: 0x3308, 0x4dc: 0x3308, 0x4dd: 0x0840, + 0x4de: 0x0018, 0x4df: 0x3308, 0x4e0: 0x3308, 0x4e1: 0x3308, 0x4e2: 0x3308, 0x4e3: 0x3308, + 0x4e4: 0x3308, 0x4e5: 0x0808, 0x4e6: 0x0808, 0x4e7: 0x3308, 0x4e8: 0x3308, 0x4e9: 0x0018, + 0x4ea: 0x3308, 0x4eb: 0x3308, 0x4ec: 0x3308, 0x4ed: 0x3308, 0x4ee: 0x0c08, 0x4ef: 0x0c08, + 0x4f0: 0x0008, 0x4f1: 0x0008, 0x4f2: 0x0008, 0x4f3: 0x0008, 0x4f4: 0x0008, 0x4f5: 0x0008, + 0x4f6: 0x0008, 0x4f7: 0x0008, 0x4f8: 0x0008, 0x4f9: 0x0008, 0x4fa: 0x0a08, 0x4fb: 0x0a08, + 0x4fc: 0x0a08, 0x4fd: 0x0808, 0x4fe: 0x0808, 0x4ff: 0x0a08, + // Block 0x14, offset 0x500 + 0x500: 0x0818, 0x501: 0x0818, 0x502: 0x0818, 0x503: 0x0818, 0x504: 0x0818, 0x505: 0x0818, + 0x506: 0x0818, 0x507: 0x0818, 0x508: 0x0818, 0x509: 0x0818, 0x50a: 0x0818, 0x50b: 0x0818, + 0x50c: 0x0818, 0x50d: 0x0818, 0x50e: 0x0040, 0x50f: 0x0b40, 0x510: 0x0c08, 0x511: 0x3308, + 0x512: 0x0a08, 0x513: 0x0a08, 0x514: 0x0a08, 0x515: 0x0c08, 0x516: 0x0c08, 0x517: 0x0c08, + 0x518: 0x0c08, 0x519: 0x0c08, 0x51a: 0x0a08, 0x51b: 0x0a08, 0x51c: 0x0a08, 0x51d: 0x0a08, + 0x51e: 0x0c08, 0x51f: 0x0a08, 0x520: 0x0a08, 0x521: 0x0a08, 0x522: 0x0a08, 0x523: 0x0a08, + 0x524: 0x0a08, 0x525: 0x0a08, 0x526: 0x0a08, 0x527: 0x0a08, 0x528: 0x0c08, 0x529: 0x0a08, + 0x52a: 0x0c08, 0x52b: 0x0a08, 0x52c: 0x0c08, 0x52d: 0x0a08, 0x52e: 0x0a08, 0x52f: 0x0c08, + 0x530: 0x3308, 0x531: 0x3308, 0x532: 0x3308, 0x533: 0x3308, 0x534: 0x3308, 0x535: 0x3308, + 0x536: 0x3308, 0x537: 0x3308, 0x538: 0x3308, 0x539: 0x3308, 0x53a: 0x3308, 0x53b: 0x3308, + 0x53c: 0x3308, 0x53d: 0x3308, 0x53e: 0x3308, 0x53f: 0x3308, + // Block 0x15, offset 0x540 + 0x540: 0x3008, 0x541: 0x3308, 0x542: 0x3308, 0x543: 0x3308, 0x544: 0x3308, 0x545: 0x3308, + 0x546: 0x3308, 0x547: 0x3308, 0x548: 0x3308, 0x549: 0x3008, 0x54a: 0x3008, 0x54b: 0x3008, + 0x54c: 0x3008, 0x54d: 0x3b08, 0x54e: 0x3008, 0x54f: 0x3008, 0x550: 0x0008, 0x551: 0x3308, + 0x552: 0x3308, 0x553: 0x3308, 0x554: 0x3308, 0x555: 0x3308, 0x556: 0x3308, 0x557: 0x3308, + 0x558: 0x04c9, 0x559: 0x0501, 0x55a: 0x0539, 0x55b: 0x0571, 0x55c: 0x05a9, 0x55d: 0x05e1, + 0x55e: 0x0619, 0x55f: 0x0651, 0x560: 0x0008, 0x561: 0x0008, 0x562: 0x3308, 0x563: 0x3308, + 0x564: 0x0018, 0x565: 0x0018, 0x566: 0x0008, 0x567: 0x0008, 0x568: 0x0008, 0x569: 0x0008, + 0x56a: 0x0008, 0x56b: 0x0008, 0x56c: 0x0008, 0x56d: 0x0008, 0x56e: 0x0008, 0x56f: 0x0008, + 0x570: 0x0018, 0x571: 0x0008, 0x572: 0x0008, 0x573: 0x0008, 0x574: 0x0008, 0x575: 0x0008, + 0x576: 0x0008, 0x577: 0x0008, 0x578: 0x0008, 0x579: 0x0008, 0x57a: 0x0008, 0x57b: 0x0008, + 0x57c: 0x0008, 0x57d: 0x0008, 0x57e: 0x0008, 0x57f: 0x0008, + // Block 0x16, offset 0x580 + 0x580: 0x0008, 0x581: 0x3308, 0x582: 0x3008, 0x583: 0x3008, 0x584: 0x0040, 0x585: 0x0008, + 0x586: 0x0008, 0x587: 0x0008, 0x588: 0x0008, 0x589: 0x0008, 0x58a: 0x0008, 0x58b: 0x0008, + 0x58c: 0x0008, 0x58d: 0x0040, 0x58e: 0x0040, 0x58f: 0x0008, 0x590: 0x0008, 0x591: 0x0040, + 0x592: 0x0040, 0x593: 0x0008, 0x594: 0x0008, 0x595: 0x0008, 0x596: 0x0008, 0x597: 0x0008, + 0x598: 0x0008, 0x599: 0x0008, 0x59a: 0x0008, 0x59b: 0x0008, 0x59c: 0x0008, 0x59d: 0x0008, + 0x59e: 0x0008, 0x59f: 0x0008, 0x5a0: 0x0008, 0x5a1: 0x0008, 0x5a2: 0x0008, 0x5a3: 0x0008, + 0x5a4: 0x0008, 0x5a5: 0x0008, 0x5a6: 0x0008, 0x5a7: 0x0008, 0x5a8: 0x0008, 0x5a9: 0x0040, + 0x5aa: 0x0008, 0x5ab: 0x0008, 0x5ac: 0x0008, 0x5ad: 0x0008, 0x5ae: 0x0008, 0x5af: 0x0008, + 0x5b0: 0x0008, 0x5b1: 0x0040, 0x5b2: 0x0008, 0x5b3: 0x0040, 0x5b4: 0x0040, 0x5b5: 0x0040, + 0x5b6: 0x0008, 0x5b7: 0x0008, 0x5b8: 0x0008, 0x5b9: 0x0008, 0x5ba: 0x0040, 0x5bb: 0x0040, + 0x5bc: 0x3308, 0x5bd: 0x0008, 0x5be: 0x3008, 0x5bf: 0x3008, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x3008, 0x5c1: 0x3308, 0x5c2: 0x3308, 0x5c3: 0x3308, 0x5c4: 0x3308, 0x5c5: 0x0040, + 0x5c6: 0x0040, 0x5c7: 0x3008, 0x5c8: 0x3008, 0x5c9: 0x0040, 0x5ca: 0x0040, 0x5cb: 0x3008, + 0x5cc: 0x3008, 0x5cd: 0x3b08, 0x5ce: 0x0008, 0x5cf: 0x0040, 0x5d0: 0x0040, 0x5d1: 0x0040, + 0x5d2: 0x0040, 0x5d3: 0x0040, 0x5d4: 0x0040, 0x5d5: 0x0040, 0x5d6: 0x0040, 0x5d7: 0x3008, + 0x5d8: 0x0040, 0x5d9: 0x0040, 0x5da: 0x0040, 0x5db: 0x0040, 0x5dc: 0x0689, 0x5dd: 0x06c1, + 0x5de: 0x0040, 0x5df: 0x06f9, 0x5e0: 0x0008, 0x5e1: 0x0008, 0x5e2: 0x3308, 0x5e3: 0x3308, + 0x5e4: 0x0040, 0x5e5: 0x0040, 0x5e6: 0x0008, 0x5e7: 0x0008, 0x5e8: 0x0008, 0x5e9: 0x0008, + 0x5ea: 0x0008, 0x5eb: 0x0008, 0x5ec: 0x0008, 0x5ed: 0x0008, 0x5ee: 0x0008, 0x5ef: 0x0008, + 0x5f0: 0x0008, 0x5f1: 0x0008, 0x5f2: 0x0018, 0x5f3: 0x0018, 0x5f4: 0x0018, 0x5f5: 0x0018, + 0x5f6: 0x0018, 0x5f7: 0x0018, 0x5f8: 0x0018, 0x5f9: 0x0018, 0x5fa: 0x0018, 0x5fb: 0x0018, + 0x5fc: 0x0040, 0x5fd: 0x0040, 0x5fe: 0x0040, 0x5ff: 0x0040, + // Block 0x18, offset 0x600 + 0x600: 0x0040, 0x601: 0x3308, 0x602: 0x3308, 0x603: 0x3008, 0x604: 0x0040, 0x605: 0x0008, + 0x606: 0x0008, 0x607: 0x0008, 0x608: 0x0008, 0x609: 0x0008, 0x60a: 0x0008, 0x60b: 0x0040, + 0x60c: 0x0040, 0x60d: 0x0040, 0x60e: 0x0040, 0x60f: 0x0008, 0x610: 0x0008, 0x611: 0x0040, + 0x612: 0x0040, 0x613: 0x0008, 0x614: 0x0008, 0x615: 0x0008, 0x616: 0x0008, 0x617: 0x0008, + 0x618: 0x0008, 0x619: 0x0008, 0x61a: 0x0008, 0x61b: 0x0008, 0x61c: 0x0008, 0x61d: 0x0008, + 0x61e: 0x0008, 0x61f: 0x0008, 0x620: 0x0008, 0x621: 0x0008, 0x622: 0x0008, 0x623: 0x0008, + 0x624: 0x0008, 0x625: 0x0008, 0x626: 0x0008, 0x627: 0x0008, 0x628: 0x0008, 0x629: 0x0040, + 0x62a: 0x0008, 0x62b: 0x0008, 0x62c: 0x0008, 0x62d: 0x0008, 0x62e: 0x0008, 0x62f: 0x0008, + 0x630: 0x0008, 0x631: 0x0040, 0x632: 0x0008, 0x633: 0x0731, 0x634: 0x0040, 0x635: 0x0008, + 0x636: 0x0769, 0x637: 0x0040, 0x638: 0x0008, 0x639: 0x0008, 0x63a: 0x0040, 0x63b: 0x0040, + 0x63c: 0x3308, 0x63d: 0x0040, 0x63e: 0x3008, 0x63f: 0x3008, + // Block 0x19, offset 0x640 + 0x640: 0x3008, 0x641: 0x3308, 0x642: 0x3308, 0x643: 0x0040, 0x644: 0x0040, 0x645: 0x0040, + 0x646: 0x0040, 0x647: 0x3308, 0x648: 0x3308, 0x649: 0x0040, 0x64a: 0x0040, 0x64b: 0x3308, + 0x64c: 0x3308, 0x64d: 0x3b08, 0x64e: 0x0040, 0x64f: 0x0040, 0x650: 0x0040, 0x651: 0x3308, + 0x652: 0x0040, 0x653: 0x0040, 0x654: 0x0040, 0x655: 0x0040, 0x656: 0x0040, 0x657: 0x0040, + 0x658: 0x0040, 0x659: 0x07a1, 0x65a: 0x07d9, 0x65b: 0x0811, 0x65c: 0x0008, 0x65d: 0x0040, + 0x65e: 0x0849, 0x65f: 0x0040, 0x660: 0x0040, 0x661: 0x0040, 0x662: 0x0040, 0x663: 0x0040, + 0x664: 0x0040, 0x665: 0x0040, 0x666: 0x0008, 0x667: 0x0008, 0x668: 0x0008, 0x669: 0x0008, + 0x66a: 0x0008, 0x66b: 0x0008, 0x66c: 0x0008, 0x66d: 0x0008, 0x66e: 0x0008, 0x66f: 0x0008, + 0x670: 0x3308, 0x671: 0x3308, 0x672: 0x0008, 0x673: 0x0008, 0x674: 0x0008, 0x675: 0x3308, + 0x676: 0x0040, 0x677: 0x0040, 0x678: 0x0040, 0x679: 0x0040, 0x67a: 0x0040, 0x67b: 0x0040, + 0x67c: 0x0040, 0x67d: 0x0040, 0x67e: 0x0040, 0x67f: 0x0040, + // Block 0x1a, offset 0x680 + 0x680: 0x0040, 0x681: 0x3308, 0x682: 0x3308, 0x683: 0x3008, 0x684: 0x0040, 0x685: 0x0008, + 0x686: 0x0008, 0x687: 0x0008, 0x688: 0x0008, 0x689: 0x0008, 0x68a: 0x0008, 0x68b: 0x0008, + 0x68c: 0x0008, 0x68d: 0x0008, 0x68e: 0x0040, 0x68f: 0x0008, 0x690: 0x0008, 0x691: 0x0008, + 0x692: 0x0040, 0x693: 0x0008, 0x694: 0x0008, 0x695: 0x0008, 0x696: 0x0008, 0x697: 0x0008, + 0x698: 0x0008, 0x699: 0x0008, 0x69a: 0x0008, 0x69b: 0x0008, 0x69c: 0x0008, 0x69d: 0x0008, + 0x69e: 0x0008, 0x69f: 0x0008, 0x6a0: 0x0008, 0x6a1: 0x0008, 0x6a2: 0x0008, 0x6a3: 0x0008, + 0x6a4: 0x0008, 0x6a5: 0x0008, 0x6a6: 0x0008, 0x6a7: 0x0008, 0x6a8: 0x0008, 0x6a9: 0x0040, + 0x6aa: 0x0008, 0x6ab: 0x0008, 0x6ac: 0x0008, 0x6ad: 0x0008, 0x6ae: 0x0008, 0x6af: 0x0008, + 0x6b0: 0x0008, 0x6b1: 0x0040, 0x6b2: 0x0008, 0x6b3: 0x0008, 0x6b4: 0x0040, 0x6b5: 0x0008, + 0x6b6: 0x0008, 0x6b7: 0x0008, 0x6b8: 0x0008, 0x6b9: 0x0008, 0x6ba: 0x0040, 0x6bb: 0x0040, + 0x6bc: 0x3308, 0x6bd: 0x0008, 0x6be: 0x3008, 0x6bf: 0x3008, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x3008, 0x6c1: 0x3308, 0x6c2: 0x3308, 0x6c3: 0x3308, 0x6c4: 0x3308, 0x6c5: 0x3308, + 0x6c6: 0x0040, 0x6c7: 0x3308, 0x6c8: 0x3308, 0x6c9: 0x3008, 0x6ca: 0x0040, 0x6cb: 0x3008, + 0x6cc: 0x3008, 0x6cd: 0x3b08, 0x6ce: 0x0040, 0x6cf: 0x0040, 0x6d0: 0x0008, 0x6d1: 0x0040, + 0x6d2: 0x0040, 0x6d3: 0x0040, 0x6d4: 0x0040, 0x6d5: 0x0040, 0x6d6: 0x0040, 0x6d7: 0x0040, + 0x6d8: 0x0040, 0x6d9: 0x0040, 0x6da: 0x0040, 0x6db: 0x0040, 0x6dc: 0x0040, 0x6dd: 0x0040, + 0x6de: 0x0040, 0x6df: 0x0040, 0x6e0: 0x0008, 0x6e1: 0x0008, 0x6e2: 0x3308, 0x6e3: 0x3308, + 0x6e4: 0x0040, 0x6e5: 0x0040, 0x6e6: 0x0008, 0x6e7: 0x0008, 0x6e8: 0x0008, 0x6e9: 0x0008, + 0x6ea: 0x0008, 0x6eb: 0x0008, 0x6ec: 0x0008, 0x6ed: 0x0008, 0x6ee: 0x0008, 0x6ef: 0x0008, + 0x6f0: 0x0018, 0x6f1: 0x0018, 0x6f2: 0x0040, 0x6f3: 0x0040, 0x6f4: 0x0040, 0x6f5: 0x0040, + 0x6f6: 0x0040, 0x6f7: 0x0040, 0x6f8: 0x0040, 0x6f9: 0x0008, 0x6fa: 0x0040, 0x6fb: 0x0040, + 0x6fc: 0x0040, 0x6fd: 0x0040, 0x6fe: 0x0040, 0x6ff: 0x0040, + // Block 0x1c, offset 0x700 + 0x700: 0x0040, 0x701: 0x3308, 0x702: 0x3008, 0x703: 0x3008, 0x704: 0x0040, 0x705: 0x0008, + 0x706: 0x0008, 0x707: 0x0008, 0x708: 0x0008, 0x709: 0x0008, 0x70a: 0x0008, 0x70b: 0x0008, + 0x70c: 0x0008, 0x70d: 0x0040, 0x70e: 0x0040, 0x70f: 0x0008, 0x710: 0x0008, 0x711: 0x0040, + 0x712: 0x0040, 0x713: 0x0008, 0x714: 0x0008, 0x715: 0x0008, 0x716: 0x0008, 0x717: 0x0008, + 0x718: 0x0008, 0x719: 0x0008, 0x71a: 0x0008, 0x71b: 0x0008, 0x71c: 0x0008, 0x71d: 0x0008, + 0x71e: 0x0008, 0x71f: 0x0008, 0x720: 0x0008, 0x721: 0x0008, 0x722: 0x0008, 0x723: 0x0008, + 0x724: 0x0008, 0x725: 0x0008, 0x726: 0x0008, 0x727: 0x0008, 0x728: 0x0008, 0x729: 0x0040, + 0x72a: 0x0008, 0x72b: 0x0008, 0x72c: 0x0008, 0x72d: 0x0008, 0x72e: 0x0008, 0x72f: 0x0008, + 0x730: 0x0008, 0x731: 0x0040, 0x732: 0x0008, 0x733: 0x0008, 0x734: 0x0040, 0x735: 0x0008, + 0x736: 0x0008, 0x737: 0x0008, 0x738: 0x0008, 0x739: 0x0008, 0x73a: 0x0040, 0x73b: 0x0040, + 0x73c: 0x3308, 0x73d: 0x0008, 0x73e: 0x3008, 0x73f: 0x3308, + // Block 0x1d, offset 0x740 + 0x740: 0x3008, 0x741: 0x3308, 0x742: 0x3308, 0x743: 0x3308, 0x744: 0x3308, 0x745: 0x0040, + 0x746: 0x0040, 0x747: 0x3008, 0x748: 0x3008, 0x749: 0x0040, 0x74a: 0x0040, 0x74b: 0x3008, + 0x74c: 0x3008, 0x74d: 0x3b08, 0x74e: 0x0040, 0x74f: 0x0040, 0x750: 0x0040, 0x751: 0x0040, + 0x752: 0x0040, 0x753: 0x0040, 0x754: 0x0040, 0x755: 0x0040, 0x756: 0x3308, 0x757: 0x3008, + 0x758: 0x0040, 0x759: 0x0040, 0x75a: 0x0040, 0x75b: 0x0040, 0x75c: 0x0881, 0x75d: 0x08b9, + 0x75e: 0x0040, 0x75f: 0x0008, 0x760: 0x0008, 0x761: 0x0008, 0x762: 0x3308, 0x763: 0x3308, + 0x764: 0x0040, 0x765: 0x0040, 0x766: 0x0008, 0x767: 0x0008, 0x768: 0x0008, 0x769: 0x0008, + 0x76a: 0x0008, 0x76b: 0x0008, 0x76c: 0x0008, 0x76d: 0x0008, 0x76e: 0x0008, 0x76f: 0x0008, + 0x770: 0x0018, 0x771: 0x0008, 0x772: 0x0018, 0x773: 0x0018, 0x774: 0x0018, 0x775: 0x0018, + 0x776: 0x0018, 0x777: 0x0018, 0x778: 0x0040, 0x779: 0x0040, 0x77a: 0x0040, 0x77b: 0x0040, + 0x77c: 0x0040, 0x77d: 0x0040, 0x77e: 0x0040, 0x77f: 0x0040, + // Block 0x1e, offset 0x780 + 0x780: 0x0040, 0x781: 0x0040, 0x782: 0x3308, 0x783: 0x0008, 0x784: 0x0040, 0x785: 0x0008, + 0x786: 0x0008, 0x787: 0x0008, 0x788: 0x0008, 0x789: 0x0008, 0x78a: 0x0008, 0x78b: 0x0040, + 0x78c: 0x0040, 0x78d: 0x0040, 0x78e: 0x0008, 0x78f: 0x0008, 0x790: 0x0008, 0x791: 0x0040, + 0x792: 0x0008, 0x793: 0x0008, 0x794: 0x0008, 0x795: 0x0008, 0x796: 0x0040, 0x797: 0x0040, + 0x798: 0x0040, 0x799: 0x0008, 0x79a: 0x0008, 0x79b: 0x0040, 0x79c: 0x0008, 0x79d: 0x0040, + 0x79e: 0x0008, 0x79f: 0x0008, 0x7a0: 0x0040, 0x7a1: 0x0040, 0x7a2: 0x0040, 0x7a3: 0x0008, + 0x7a4: 0x0008, 0x7a5: 0x0040, 0x7a6: 0x0040, 0x7a7: 0x0040, 0x7a8: 0x0008, 0x7a9: 0x0008, + 0x7aa: 0x0008, 0x7ab: 0x0040, 0x7ac: 0x0040, 0x7ad: 0x0040, 0x7ae: 0x0008, 0x7af: 0x0008, + 0x7b0: 0x0008, 0x7b1: 0x0008, 0x7b2: 0x0008, 0x7b3: 0x0008, 0x7b4: 0x0008, 0x7b5: 0x0008, + 0x7b6: 0x0008, 0x7b7: 0x0008, 0x7b8: 0x0008, 0x7b9: 0x0008, 0x7ba: 0x0040, 0x7bb: 0x0040, + 0x7bc: 0x0040, 0x7bd: 0x0040, 0x7be: 0x3008, 0x7bf: 0x3008, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x3308, 0x7c1: 0x3008, 0x7c2: 0x3008, 0x7c3: 0x3008, 0x7c4: 0x3008, 0x7c5: 0x0040, + 0x7c6: 0x3308, 0x7c7: 0x3308, 0x7c8: 0x3308, 0x7c9: 0x0040, 0x7ca: 0x3308, 0x7cb: 0x3308, + 0x7cc: 0x3308, 0x7cd: 0x3b08, 0x7ce: 0x0040, 0x7cf: 0x0040, 0x7d0: 0x0040, 0x7d1: 0x0040, + 0x7d2: 0x0040, 0x7d3: 0x0040, 0x7d4: 0x0040, 0x7d5: 0x3308, 0x7d6: 0x3308, 0x7d7: 0x0040, + 0x7d8: 0x0008, 0x7d9: 0x0008, 0x7da: 0x0008, 0x7db: 0x0040, 0x7dc: 0x0040, 0x7dd: 0x0040, + 0x7de: 0x0040, 0x7df: 0x0040, 0x7e0: 0x0008, 0x7e1: 0x0008, 0x7e2: 0x3308, 0x7e3: 0x3308, + 0x7e4: 0x0040, 0x7e5: 0x0040, 0x7e6: 0x0008, 0x7e7: 0x0008, 0x7e8: 0x0008, 0x7e9: 0x0008, + 0x7ea: 0x0008, 0x7eb: 0x0008, 0x7ec: 0x0008, 0x7ed: 0x0008, 0x7ee: 0x0008, 0x7ef: 0x0008, + 0x7f0: 0x0040, 0x7f1: 0x0040, 0x7f2: 0x0040, 0x7f3: 0x0040, 0x7f4: 0x0040, 0x7f5: 0x0040, + 0x7f6: 0x0040, 0x7f7: 0x0040, 0x7f8: 0x0018, 0x7f9: 0x0018, 0x7fa: 0x0018, 0x7fb: 0x0018, + 0x7fc: 0x0018, 0x7fd: 0x0018, 0x7fe: 0x0018, 0x7ff: 0x0018, + // Block 0x20, offset 0x800 + 0x800: 0x0008, 0x801: 0x3308, 0x802: 0x3008, 0x803: 0x3008, 0x804: 0x0040, 0x805: 0x0008, + 0x806: 0x0008, 0x807: 0x0008, 0x808: 0x0008, 0x809: 0x0008, 0x80a: 0x0008, 0x80b: 0x0008, + 0x80c: 0x0008, 0x80d: 0x0040, 0x80e: 0x0008, 0x80f: 0x0008, 0x810: 0x0008, 0x811: 0x0040, + 0x812: 0x0008, 0x813: 0x0008, 0x814: 0x0008, 0x815: 0x0008, 0x816: 0x0008, 0x817: 0x0008, + 0x818: 0x0008, 0x819: 0x0008, 0x81a: 0x0008, 0x81b: 0x0008, 0x81c: 0x0008, 0x81d: 0x0008, + 0x81e: 0x0008, 0x81f: 0x0008, 0x820: 0x0008, 0x821: 0x0008, 0x822: 0x0008, 0x823: 0x0008, + 0x824: 0x0008, 0x825: 0x0008, 0x826: 0x0008, 0x827: 0x0008, 0x828: 0x0008, 0x829: 0x0040, + 0x82a: 0x0008, 0x82b: 0x0008, 0x82c: 0x0008, 0x82d: 0x0008, 0x82e: 0x0008, 0x82f: 0x0008, + 0x830: 0x0008, 0x831: 0x0008, 0x832: 0x0008, 0x833: 0x0008, 0x834: 0x0040, 0x835: 0x0008, + 0x836: 0x0008, 0x837: 0x0008, 0x838: 0x0008, 0x839: 0x0008, 0x83a: 0x0040, 0x83b: 0x0040, + 0x83c: 0x3308, 0x83d: 0x0008, 0x83e: 0x3008, 0x83f: 0x3308, + // Block 0x21, offset 0x840 + 0x840: 0x3008, 0x841: 0x3008, 0x842: 0x3008, 0x843: 0x3008, 0x844: 0x3008, 0x845: 0x0040, + 0x846: 0x3308, 0x847: 0x3008, 0x848: 0x3008, 0x849: 0x0040, 0x84a: 0x3008, 0x84b: 0x3008, + 0x84c: 0x3308, 0x84d: 0x3b08, 0x84e: 0x0040, 0x84f: 0x0040, 0x850: 0x0040, 0x851: 0x0040, + 0x852: 0x0040, 0x853: 0x0040, 0x854: 0x0040, 0x855: 0x3008, 0x856: 0x3008, 0x857: 0x0040, + 0x858: 0x0040, 0x859: 0x0040, 0x85a: 0x0040, 0x85b: 0x0040, 0x85c: 0x0040, 0x85d: 0x0040, + 0x85e: 0x0008, 0x85f: 0x0040, 0x860: 0x0008, 0x861: 0x0008, 0x862: 0x3308, 0x863: 0x3308, + 0x864: 0x0040, 0x865: 0x0040, 0x866: 0x0008, 0x867: 0x0008, 0x868: 0x0008, 0x869: 0x0008, + 0x86a: 0x0008, 0x86b: 0x0008, 0x86c: 0x0008, 0x86d: 0x0008, 0x86e: 0x0008, 0x86f: 0x0008, + 0x870: 0x0040, 0x871: 0x0008, 0x872: 0x0008, 0x873: 0x0040, 0x874: 0x0040, 0x875: 0x0040, + 0x876: 0x0040, 0x877: 0x0040, 0x878: 0x0040, 0x879: 0x0040, 0x87a: 0x0040, 0x87b: 0x0040, + 0x87c: 0x0040, 0x87d: 0x0040, 0x87e: 0x0040, 0x87f: 0x0040, + // Block 0x22, offset 0x880 + 0x880: 0x3008, 0x881: 0x3308, 0x882: 0x3308, 0x883: 0x3308, 0x884: 0x3308, 0x885: 0x0040, + 0x886: 0x3008, 0x887: 0x3008, 0x888: 0x3008, 0x889: 0x0040, 0x88a: 0x3008, 0x88b: 0x3008, + 0x88c: 0x3008, 0x88d: 0x3b08, 0x88e: 0x0008, 0x88f: 0x0018, 0x890: 0x0040, 0x891: 0x0040, + 0x892: 0x0040, 0x893: 0x0040, 0x894: 0x0008, 0x895: 0x0008, 0x896: 0x0008, 0x897: 0x3008, + 0x898: 0x0018, 0x899: 0x0018, 0x89a: 0x0018, 0x89b: 0x0018, 0x89c: 0x0018, 0x89d: 0x0018, + 0x89e: 0x0018, 0x89f: 0x0008, 0x8a0: 0x0008, 0x8a1: 0x0008, 0x8a2: 0x3308, 0x8a3: 0x3308, + 0x8a4: 0x0040, 0x8a5: 0x0040, 0x8a6: 0x0008, 0x8a7: 0x0008, 0x8a8: 0x0008, 0x8a9: 0x0008, + 0x8aa: 0x0008, 0x8ab: 0x0008, 0x8ac: 0x0008, 0x8ad: 0x0008, 0x8ae: 0x0008, 0x8af: 0x0008, + 0x8b0: 0x0018, 0x8b1: 0x0018, 0x8b2: 0x0018, 0x8b3: 0x0018, 0x8b4: 0x0018, 0x8b5: 0x0018, + 0x8b6: 0x0018, 0x8b7: 0x0018, 0x8b8: 0x0018, 0x8b9: 0x0018, 0x8ba: 0x0008, 0x8bb: 0x0008, + 0x8bc: 0x0008, 0x8bd: 0x0008, 0x8be: 0x0008, 0x8bf: 0x0008, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x0040, 0x8c1: 0x0008, 0x8c2: 0x0008, 0x8c3: 0x0040, 0x8c4: 0x0008, 0x8c5: 0x0040, + 0x8c6: 0x0040, 0x8c7: 0x0008, 0x8c8: 0x0008, 0x8c9: 0x0040, 0x8ca: 0x0008, 0x8cb: 0x0040, + 0x8cc: 0x0040, 0x8cd: 0x0008, 0x8ce: 0x0040, 0x8cf: 0x0040, 0x8d0: 0x0040, 0x8d1: 0x0040, + 0x8d2: 0x0040, 0x8d3: 0x0040, 0x8d4: 0x0008, 0x8d5: 0x0008, 0x8d6: 0x0008, 0x8d7: 0x0008, + 0x8d8: 0x0040, 0x8d9: 0x0008, 0x8da: 0x0008, 0x8db: 0x0008, 0x8dc: 0x0008, 0x8dd: 0x0008, + 0x8de: 0x0008, 0x8df: 0x0008, 0x8e0: 0x0040, 0x8e1: 0x0008, 0x8e2: 0x0008, 0x8e3: 0x0008, + 0x8e4: 0x0040, 0x8e5: 0x0008, 0x8e6: 0x0040, 0x8e7: 0x0008, 0x8e8: 0x0040, 0x8e9: 0x0040, + 0x8ea: 0x0008, 0x8eb: 0x0008, 0x8ec: 0x0040, 0x8ed: 0x0008, 0x8ee: 0x0008, 0x8ef: 0x0008, + 0x8f0: 0x0008, 0x8f1: 0x3308, 0x8f2: 0x0008, 0x8f3: 0x0929, 0x8f4: 0x3308, 0x8f5: 0x3308, + 0x8f6: 0x3308, 0x8f7: 0x3308, 0x8f8: 0x3308, 0x8f9: 0x3308, 0x8fa: 0x0040, 0x8fb: 0x3308, + 0x8fc: 0x3308, 0x8fd: 0x0008, 0x8fe: 0x0040, 0x8ff: 0x0040, + // Block 0x24, offset 0x900 + 0x900: 0x0008, 0x901: 0x0008, 0x902: 0x0008, 0x903: 0x09d1, 0x904: 0x0008, 0x905: 0x0008, + 0x906: 0x0008, 0x907: 0x0008, 0x908: 0x0040, 0x909: 0x0008, 0x90a: 0x0008, 0x90b: 0x0008, + 0x90c: 0x0008, 0x90d: 0x0a09, 0x90e: 0x0008, 0x90f: 0x0008, 0x910: 0x0008, 0x911: 0x0008, + 0x912: 0x0a41, 0x913: 0x0008, 0x914: 0x0008, 0x915: 0x0008, 0x916: 0x0008, 0x917: 0x0a79, + 0x918: 0x0008, 0x919: 0x0008, 0x91a: 0x0008, 0x91b: 0x0008, 0x91c: 0x0ab1, 0x91d: 0x0008, + 0x91e: 0x0008, 0x91f: 0x0008, 0x920: 0x0008, 0x921: 0x0008, 0x922: 0x0008, 0x923: 0x0008, + 0x924: 0x0008, 0x925: 0x0008, 0x926: 0x0008, 0x927: 0x0008, 0x928: 0x0008, 0x929: 0x0ae9, + 0x92a: 0x0008, 0x92b: 0x0008, 0x92c: 0x0008, 0x92d: 0x0040, 0x92e: 0x0040, 0x92f: 0x0040, + 0x930: 0x0040, 0x931: 0x3308, 0x932: 0x3308, 0x933: 0x0b21, 0x934: 0x3308, 0x935: 0x0b59, + 0x936: 0x0b91, 0x937: 0x0bc9, 0x938: 0x0c19, 0x939: 0x0c51, 0x93a: 0x3308, 0x93b: 0x3308, + 0x93c: 0x3308, 0x93d: 0x3308, 0x93e: 0x3308, 0x93f: 0x3008, + // Block 0x25, offset 0x940 + 0x940: 0x3308, 0x941: 0x0ca1, 0x942: 0x3308, 0x943: 0x3308, 0x944: 0x3b08, 0x945: 0x0018, + 0x946: 0x3308, 0x947: 0x3308, 0x948: 0x0008, 0x949: 0x0008, 0x94a: 0x0008, 0x94b: 0x0008, + 0x94c: 0x0008, 0x94d: 0x3308, 0x94e: 0x3308, 0x94f: 0x3308, 0x950: 0x3308, 0x951: 0x3308, + 0x952: 0x3308, 0x953: 0x0cd9, 0x954: 0x3308, 0x955: 0x3308, 0x956: 0x3308, 0x957: 0x3308, + 0x958: 0x0040, 0x959: 0x3308, 0x95a: 0x3308, 0x95b: 0x3308, 0x95c: 0x3308, 0x95d: 0x0d11, + 0x95e: 0x3308, 0x95f: 0x3308, 0x960: 0x3308, 0x961: 0x3308, 0x962: 0x0d49, 0x963: 0x3308, + 0x964: 0x3308, 0x965: 0x3308, 0x966: 0x3308, 0x967: 0x0d81, 0x968: 0x3308, 0x969: 0x3308, + 0x96a: 0x3308, 0x96b: 0x3308, 0x96c: 0x0db9, 0x96d: 0x3308, 0x96e: 0x3308, 0x96f: 0x3308, + 0x970: 0x3308, 0x971: 0x3308, 0x972: 0x3308, 0x973: 0x3308, 0x974: 0x3308, 0x975: 0x3308, + 0x976: 0x3308, 0x977: 0x3308, 0x978: 0x3308, 0x979: 0x0df1, 0x97a: 0x3308, 0x97b: 0x3308, + 0x97c: 0x3308, 0x97d: 0x0040, 0x97e: 0x0018, 0x97f: 0x0018, + // Block 0x26, offset 0x980 + 0x980: 0x0008, 0x981: 0x0008, 0x982: 0x0008, 0x983: 0x0008, 0x984: 0x0008, 0x985: 0x0008, + 0x986: 0x0008, 0x987: 0x0008, 0x988: 0x0008, 0x989: 0x0008, 0x98a: 0x0008, 0x98b: 0x0008, + 0x98c: 0x0008, 0x98d: 0x0008, 0x98e: 0x0008, 0x98f: 0x0008, 0x990: 0x0008, 0x991: 0x0008, + 0x992: 0x0008, 0x993: 0x0008, 0x994: 0x0008, 0x995: 0x0008, 0x996: 0x0008, 0x997: 0x0008, + 0x998: 0x0008, 0x999: 0x0008, 0x99a: 0x0008, 0x99b: 0x0008, 0x99c: 0x0008, 0x99d: 0x0008, + 0x99e: 0x0008, 0x99f: 0x0008, 0x9a0: 0x0008, 0x9a1: 0x0008, 0x9a2: 0x0008, 0x9a3: 0x0008, + 0x9a4: 0x0008, 0x9a5: 0x0008, 0x9a6: 0x0008, 0x9a7: 0x0008, 0x9a8: 0x0008, 0x9a9: 0x0008, + 0x9aa: 0x0008, 0x9ab: 0x0008, 0x9ac: 0x0039, 0x9ad: 0x0ed1, 0x9ae: 0x0ee9, 0x9af: 0x0008, + 0x9b0: 0x0ef9, 0x9b1: 0x0f09, 0x9b2: 0x0f19, 0x9b3: 0x0f31, 0x9b4: 0x0249, 0x9b5: 0x0f41, + 0x9b6: 0x0259, 0x9b7: 0x0f51, 0x9b8: 0x0359, 0x9b9: 0x0f61, 0x9ba: 0x0f71, 0x9bb: 0x0008, + 0x9bc: 0x00d9, 0x9bd: 0x0f81, 0x9be: 0x0f99, 0x9bf: 0x0269, + // Block 0x27, offset 0x9c0 + 0x9c0: 0x0fa9, 0x9c1: 0x0fb9, 0x9c2: 0x0279, 0x9c3: 0x0039, 0x9c4: 0x0fc9, 0x9c5: 0x0fe1, + 0x9c6: 0x059d, 0x9c7: 0x0ee9, 0x9c8: 0x0ef9, 0x9c9: 0x0f09, 0x9ca: 0x0ff9, 0x9cb: 0x1011, + 0x9cc: 0x1029, 0x9cd: 0x0f31, 0x9ce: 0x0008, 0x9cf: 0x0f51, 0x9d0: 0x0f61, 0x9d1: 0x1041, + 0x9d2: 0x00d9, 0x9d3: 0x1059, 0x9d4: 0x05b5, 0x9d5: 0x05b5, 0x9d6: 0x0f99, 0x9d7: 0x0fa9, + 0x9d8: 0x0fb9, 0x9d9: 0x059d, 0x9da: 0x1071, 0x9db: 0x1089, 0x9dc: 0x05cd, 0x9dd: 0x1099, + 0x9de: 0x10b1, 0x9df: 0x10c9, 0x9e0: 0x10e1, 0x9e1: 0x10f9, 0x9e2: 0x0f41, 0x9e3: 0x0269, + 0x9e4: 0x0fb9, 0x9e5: 0x1089, 0x9e6: 0x1099, 0x9e7: 0x10b1, 0x9e8: 0x1111, 0x9e9: 0x10e1, + 0x9ea: 0x10f9, 0x9eb: 0x0008, 0x9ec: 0x0008, 0x9ed: 0x0008, 0x9ee: 0x0008, 0x9ef: 0x0008, + 0x9f0: 0x0008, 0x9f1: 0x0008, 0x9f2: 0x0008, 0x9f3: 0x0008, 0x9f4: 0x0008, 0x9f5: 0x0008, + 0x9f6: 0x0008, 0x9f7: 0x0008, 0x9f8: 0x1129, 0x9f9: 0x0008, 0x9fa: 0x0008, 0x9fb: 0x0008, + 0x9fc: 0x0008, 0x9fd: 0x0008, 0x9fe: 0x0008, 0x9ff: 0x0008, + // Block 0x28, offset 0xa00 + 0xa00: 0x0008, 0xa01: 0x0008, 0xa02: 0x0008, 0xa03: 0x0008, 0xa04: 0x0008, 0xa05: 0x0008, + 0xa06: 0x0008, 0xa07: 0x0008, 0xa08: 0x0008, 0xa09: 0x0008, 0xa0a: 0x0008, 0xa0b: 0x0008, + 0xa0c: 0x0008, 0xa0d: 0x0008, 0xa0e: 0x0008, 0xa0f: 0x0008, 0xa10: 0x0008, 0xa11: 0x0008, + 0xa12: 0x0008, 0xa13: 0x0008, 0xa14: 0x0008, 0xa15: 0x0008, 0xa16: 0x0008, 0xa17: 0x0008, + 0xa18: 0x0008, 0xa19: 0x0008, 0xa1a: 0x0008, 0xa1b: 0x1141, 0xa1c: 0x1159, 0xa1d: 0x1169, + 0xa1e: 0x1181, 0xa1f: 0x1029, 0xa20: 0x1199, 0xa21: 0x11a9, 0xa22: 0x11c1, 0xa23: 0x11d9, + 0xa24: 0x11f1, 0xa25: 0x1209, 0xa26: 0x1221, 0xa27: 0x05e5, 0xa28: 0x1239, 0xa29: 0x1251, + 0xa2a: 0xe17d, 0xa2b: 0x1269, 0xa2c: 0x1281, 0xa2d: 0x1299, 0xa2e: 0x12b1, 0xa2f: 0x12c9, + 0xa30: 0x12e1, 0xa31: 0x12f9, 0xa32: 0x1311, 0xa33: 0x1329, 0xa34: 0x1341, 0xa35: 0x1359, + 0xa36: 0x1371, 0xa37: 0x1389, 0xa38: 0x05fd, 0xa39: 0x13a1, 0xa3a: 0x13b9, 0xa3b: 0x13d1, + 0xa3c: 0x13e1, 0xa3d: 0x13f9, 0xa3e: 0x1411, 0xa3f: 0x1429, + // Block 0x29, offset 0xa40 + 0xa40: 0xe00d, 0xa41: 0x0008, 0xa42: 0xe00d, 0xa43: 0x0008, 0xa44: 0xe00d, 0xa45: 0x0008, + 0xa46: 0xe00d, 0xa47: 0x0008, 0xa48: 0xe00d, 0xa49: 0x0008, 0xa4a: 0xe00d, 0xa4b: 0x0008, + 0xa4c: 0xe00d, 0xa4d: 0x0008, 0xa4e: 0xe00d, 0xa4f: 0x0008, 0xa50: 0xe00d, 0xa51: 0x0008, + 0xa52: 0xe00d, 0xa53: 0x0008, 0xa54: 0xe00d, 0xa55: 0x0008, 0xa56: 0xe00d, 0xa57: 0x0008, + 0xa58: 0xe00d, 0xa59: 0x0008, 0xa5a: 0xe00d, 0xa5b: 0x0008, 0xa5c: 0xe00d, 0xa5d: 0x0008, + 0xa5e: 0xe00d, 0xa5f: 0x0008, 0xa60: 0xe00d, 0xa61: 0x0008, 0xa62: 0xe00d, 0xa63: 0x0008, + 0xa64: 0xe00d, 0xa65: 0x0008, 0xa66: 0xe00d, 0xa67: 0x0008, 0xa68: 0xe00d, 0xa69: 0x0008, + 0xa6a: 0xe00d, 0xa6b: 0x0008, 0xa6c: 0xe00d, 0xa6d: 0x0008, 0xa6e: 0xe00d, 0xa6f: 0x0008, + 0xa70: 0xe00d, 0xa71: 0x0008, 0xa72: 0xe00d, 0xa73: 0x0008, 0xa74: 0xe00d, 0xa75: 0x0008, + 0xa76: 0xe00d, 0xa77: 0x0008, 0xa78: 0xe00d, 0xa79: 0x0008, 0xa7a: 0xe00d, 0xa7b: 0x0008, + 0xa7c: 0xe00d, 0xa7d: 0x0008, 0xa7e: 0xe00d, 0xa7f: 0x0008, + // Block 0x2a, offset 0xa80 + 0xa80: 0xe00d, 0xa81: 0x0008, 0xa82: 0xe00d, 0xa83: 0x0008, 0xa84: 0xe00d, 0xa85: 0x0008, + 0xa86: 0xe00d, 0xa87: 0x0008, 0xa88: 0xe00d, 0xa89: 0x0008, 0xa8a: 0xe00d, 0xa8b: 0x0008, + 0xa8c: 0xe00d, 0xa8d: 0x0008, 0xa8e: 0xe00d, 0xa8f: 0x0008, 0xa90: 0xe00d, 0xa91: 0x0008, + 0xa92: 0xe00d, 0xa93: 0x0008, 0xa94: 0xe00d, 0xa95: 0x0008, 0xa96: 0x0008, 0xa97: 0x0008, + 0xa98: 0x0008, 0xa99: 0x0008, 0xa9a: 0x0615, 0xa9b: 0x0635, 0xa9c: 0x0008, 0xa9d: 0x0008, + 0xa9e: 0x1441, 0xa9f: 0x0008, 0xaa0: 0xe00d, 0xaa1: 0x0008, 0xaa2: 0xe00d, 0xaa3: 0x0008, + 0xaa4: 0xe00d, 0xaa5: 0x0008, 0xaa6: 0xe00d, 0xaa7: 0x0008, 0xaa8: 0xe00d, 0xaa9: 0x0008, + 0xaaa: 0xe00d, 0xaab: 0x0008, 0xaac: 0xe00d, 0xaad: 0x0008, 0xaae: 0xe00d, 0xaaf: 0x0008, + 0xab0: 0xe00d, 0xab1: 0x0008, 0xab2: 0xe00d, 0xab3: 0x0008, 0xab4: 0xe00d, 0xab5: 0x0008, + 0xab6: 0xe00d, 0xab7: 0x0008, 0xab8: 0xe00d, 0xab9: 0x0008, 0xaba: 0xe00d, 0xabb: 0x0008, + 0xabc: 0xe00d, 0xabd: 0x0008, 0xabe: 0xe00d, 0xabf: 0x0008, + // Block 0x2b, offset 0xac0 + 0xac0: 0x0008, 0xac1: 0x0008, 0xac2: 0x0008, 0xac3: 0x0008, 0xac4: 0x0008, 0xac5: 0x0008, + 0xac6: 0x0040, 0xac7: 0x0040, 0xac8: 0xe045, 0xac9: 0xe045, 0xaca: 0xe045, 0xacb: 0xe045, + 0xacc: 0xe045, 0xacd: 0xe045, 0xace: 0x0040, 0xacf: 0x0040, 0xad0: 0x0008, 0xad1: 0x0008, + 0xad2: 0x0008, 0xad3: 0x0008, 0xad4: 0x0008, 0xad5: 0x0008, 0xad6: 0x0008, 0xad7: 0x0008, + 0xad8: 0x0040, 0xad9: 0xe045, 0xada: 0x0040, 0xadb: 0xe045, 0xadc: 0x0040, 0xadd: 0xe045, + 0xade: 0x0040, 0xadf: 0xe045, 0xae0: 0x0008, 0xae1: 0x0008, 0xae2: 0x0008, 0xae3: 0x0008, + 0xae4: 0x0008, 0xae5: 0x0008, 0xae6: 0x0008, 0xae7: 0x0008, 0xae8: 0xe045, 0xae9: 0xe045, + 0xaea: 0xe045, 0xaeb: 0xe045, 0xaec: 0xe045, 0xaed: 0xe045, 0xaee: 0xe045, 0xaef: 0xe045, + 0xaf0: 0x0008, 0xaf1: 0x1459, 0xaf2: 0x0008, 0xaf3: 0x1471, 0xaf4: 0x0008, 0xaf5: 0x1489, + 0xaf6: 0x0008, 0xaf7: 0x14a1, 0xaf8: 0x0008, 0xaf9: 0x14b9, 0xafa: 0x0008, 0xafb: 0x14d1, + 0xafc: 0x0008, 0xafd: 0x14e9, 0xafe: 0x0040, 0xaff: 0x0040, + // Block 0x2c, offset 0xb00 + 0xb00: 0x1501, 0xb01: 0x1531, 0xb02: 0x1561, 0xb03: 0x1591, 0xb04: 0x15c1, 0xb05: 0x15f1, + 0xb06: 0x1621, 0xb07: 0x1651, 0xb08: 0x1501, 0xb09: 0x1531, 0xb0a: 0x1561, 0xb0b: 0x1591, + 0xb0c: 0x15c1, 0xb0d: 0x15f1, 0xb0e: 0x1621, 0xb0f: 0x1651, 0xb10: 0x1681, 0xb11: 0x16b1, + 0xb12: 0x16e1, 0xb13: 0x1711, 0xb14: 0x1741, 0xb15: 0x1771, 0xb16: 0x17a1, 0xb17: 0x17d1, + 0xb18: 0x1681, 0xb19: 0x16b1, 0xb1a: 0x16e1, 0xb1b: 0x1711, 0xb1c: 0x1741, 0xb1d: 0x1771, + 0xb1e: 0x17a1, 0xb1f: 0x17d1, 0xb20: 0x1801, 0xb21: 0x1831, 0xb22: 0x1861, 0xb23: 0x1891, + 0xb24: 0x18c1, 0xb25: 0x18f1, 0xb26: 0x1921, 0xb27: 0x1951, 0xb28: 0x1801, 0xb29: 0x1831, + 0xb2a: 0x1861, 0xb2b: 0x1891, 0xb2c: 0x18c1, 0xb2d: 0x18f1, 0xb2e: 0x1921, 0xb2f: 0x1951, + 0xb30: 0x0008, 0xb31: 0x0008, 0xb32: 0x1981, 0xb33: 0x19b1, 0xb34: 0x19d9, 0xb35: 0x0040, + 0xb36: 0x0008, 0xb37: 0x1a01, 0xb38: 0xe045, 0xb39: 0xe045, 0xb3a: 0x064d, 0xb3b: 0x1459, + 0xb3c: 0x19b1, 0xb3d: 0x0666, 0xb3e: 0x1a31, 0xb3f: 0x0686, + // Block 0x2d, offset 0xb40 + 0xb40: 0x06a6, 0xb41: 0x1a4a, 0xb42: 0x1a79, 0xb43: 0x1aa9, 0xb44: 0x1ad1, 0xb45: 0x0040, + 0xb46: 0x0008, 0xb47: 0x1af9, 0xb48: 0x06c5, 0xb49: 0x1471, 0xb4a: 0x06dd, 0xb4b: 0x1489, + 0xb4c: 0x1aa9, 0xb4d: 0x1b2a, 0xb4e: 0x1b5a, 0xb4f: 0x1b8a, 0xb50: 0x0008, 0xb51: 0x0008, + 0xb52: 0x0008, 0xb53: 0x1bb9, 0xb54: 0x0040, 0xb55: 0x0040, 0xb56: 0x0008, 0xb57: 0x0008, + 0xb58: 0xe045, 0xb59: 0xe045, 0xb5a: 0x06f5, 0xb5b: 0x14a1, 0xb5c: 0x0040, 0xb5d: 0x1bd2, + 0xb5e: 0x1c02, 0xb5f: 0x1c32, 0xb60: 0x0008, 0xb61: 0x0008, 0xb62: 0x0008, 0xb63: 0x1c61, + 0xb64: 0x0008, 0xb65: 0x0008, 0xb66: 0x0008, 0xb67: 0x0008, 0xb68: 0xe045, 0xb69: 0xe045, + 0xb6a: 0x070d, 0xb6b: 0x14d1, 0xb6c: 0xe04d, 0xb6d: 0x1c7a, 0xb6e: 0x03d2, 0xb6f: 0x1caa, + 0xb70: 0x0040, 0xb71: 0x0040, 0xb72: 0x1cb9, 0xb73: 0x1ce9, 0xb74: 0x1d11, 0xb75: 0x0040, + 0xb76: 0x0008, 0xb77: 0x1d39, 0xb78: 0x0725, 0xb79: 0x14b9, 0xb7a: 0x0515, 0xb7b: 0x14e9, + 0xb7c: 0x1ce9, 0xb7d: 0x073e, 0xb7e: 0x075e, 0xb7f: 0x0040, + // Block 0x2e, offset 0xb80 + 0xb80: 0x000a, 0xb81: 0x000a, 0xb82: 0x000a, 0xb83: 0x000a, 0xb84: 0x000a, 0xb85: 0x000a, + 0xb86: 0x000a, 0xb87: 0x000a, 0xb88: 0x000a, 0xb89: 0x000a, 0xb8a: 0x000a, 0xb8b: 0x03c0, + 0xb8c: 0x0003, 0xb8d: 0x0003, 0xb8e: 0x0340, 0xb8f: 0x0b40, 0xb90: 0x0018, 0xb91: 0xe00d, + 0xb92: 0x0018, 0xb93: 0x0018, 0xb94: 0x0018, 0xb95: 0x0018, 0xb96: 0x0018, 0xb97: 0x077e, + 0xb98: 0x0018, 0xb99: 0x0018, 0xb9a: 0x0018, 0xb9b: 0x0018, 0xb9c: 0x0018, 0xb9d: 0x0018, + 0xb9e: 0x0018, 0xb9f: 0x0018, 0xba0: 0x0018, 0xba1: 0x0018, 0xba2: 0x0018, 0xba3: 0x0018, + 0xba4: 0x0040, 0xba5: 0x0040, 0xba6: 0x0040, 0xba7: 0x0018, 0xba8: 0x0040, 0xba9: 0x0040, + 0xbaa: 0x0340, 0xbab: 0x0340, 0xbac: 0x0340, 0xbad: 0x0340, 0xbae: 0x0340, 0xbaf: 0x000a, + 0xbb0: 0x0018, 0xbb1: 0x0018, 0xbb2: 0x0018, 0xbb3: 0x1d69, 0xbb4: 0x1da1, 0xbb5: 0x0018, + 0xbb6: 0x1df1, 0xbb7: 0x1e29, 0xbb8: 0x0018, 0xbb9: 0x0018, 0xbba: 0x0018, 0xbbb: 0x0018, + 0xbbc: 0x1e7a, 0xbbd: 0x0018, 0xbbe: 0x079e, 0xbbf: 0x0018, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x0018, 0xbc1: 0x0018, 0xbc2: 0x0018, 0xbc3: 0x0018, 0xbc4: 0x0018, 0xbc5: 0x0018, + 0xbc6: 0x0018, 0xbc7: 0x1e92, 0xbc8: 0x1eaa, 0xbc9: 0x1ec2, 0xbca: 0x0018, 0xbcb: 0x0018, + 0xbcc: 0x0018, 0xbcd: 0x0018, 0xbce: 0x0018, 0xbcf: 0x0018, 0xbd0: 0x0018, 0xbd1: 0x0018, + 0xbd2: 0x0018, 0xbd3: 0x0018, 0xbd4: 0x0018, 0xbd5: 0x0018, 0xbd6: 0x0018, 0xbd7: 0x1ed9, + 0xbd8: 0x0018, 0xbd9: 0x0018, 0xbda: 0x0018, 0xbdb: 0x0018, 0xbdc: 0x0018, 0xbdd: 0x0018, + 0xbde: 0x0018, 0xbdf: 0x000a, 0xbe0: 0x03c0, 0xbe1: 0x0340, 0xbe2: 0x0340, 0xbe3: 0x0340, + 0xbe4: 0x03c0, 0xbe5: 0x0040, 0xbe6: 0x0040, 0xbe7: 0x0040, 0xbe8: 0x0040, 0xbe9: 0x0040, + 0xbea: 0x0340, 0xbeb: 0x0340, 0xbec: 0x0340, 0xbed: 0x0340, 0xbee: 0x0340, 0xbef: 0x0340, + 0xbf0: 0x1f41, 0xbf1: 0x0f41, 0xbf2: 0x0040, 0xbf3: 0x0040, 0xbf4: 0x1f51, 0xbf5: 0x1f61, + 0xbf6: 0x1f71, 0xbf7: 0x1f81, 0xbf8: 0x1f91, 0xbf9: 0x1fa1, 0xbfa: 0x1fb2, 0xbfb: 0x07bd, + 0xbfc: 0x1fc2, 0xbfd: 0x1fd2, 0xbfe: 0x1fe2, 0xbff: 0x0f71, + // Block 0x30, offset 0xc00 + 0xc00: 0x1f41, 0xc01: 0x00c9, 0xc02: 0x0069, 0xc03: 0x0079, 0xc04: 0x1f51, 0xc05: 0x1f61, + 0xc06: 0x1f71, 0xc07: 0x1f81, 0xc08: 0x1f91, 0xc09: 0x1fa1, 0xc0a: 0x1fb2, 0xc0b: 0x07d5, + 0xc0c: 0x1fc2, 0xc0d: 0x1fd2, 0xc0e: 0x1fe2, 0xc0f: 0x0040, 0xc10: 0x0039, 0xc11: 0x0f09, + 0xc12: 0x00d9, 0xc13: 0x0369, 0xc14: 0x0ff9, 0xc15: 0x0249, 0xc16: 0x0f51, 0xc17: 0x0359, + 0xc18: 0x0f61, 0xc19: 0x0f71, 0xc1a: 0x0f99, 0xc1b: 0x01d9, 0xc1c: 0x0fa9, 0xc1d: 0x0040, + 0xc1e: 0x0040, 0xc1f: 0x0040, 0xc20: 0x0018, 0xc21: 0x0018, 0xc22: 0x0018, 0xc23: 0x0018, + 0xc24: 0x0018, 0xc25: 0x0018, 0xc26: 0x0018, 0xc27: 0x0018, 0xc28: 0x1ff1, 0xc29: 0x0018, + 0xc2a: 0x0018, 0xc2b: 0x0018, 0xc2c: 0x0018, 0xc2d: 0x0018, 0xc2e: 0x0018, 0xc2f: 0x0018, + 0xc30: 0x0018, 0xc31: 0x0018, 0xc32: 0x0018, 0xc33: 0x0018, 0xc34: 0x0018, 0xc35: 0x0018, + 0xc36: 0x0018, 0xc37: 0x0018, 0xc38: 0x0018, 0xc39: 0x0018, 0xc3a: 0x0018, 0xc3b: 0x0018, + 0xc3c: 0x0018, 0xc3d: 0x0018, 0xc3e: 0x0018, 0xc3f: 0x0040, + // Block 0x31, offset 0xc40 + 0xc40: 0x07ee, 0xc41: 0x080e, 0xc42: 0x1159, 0xc43: 0x082d, 0xc44: 0x0018, 0xc45: 0x084e, + 0xc46: 0x086e, 0xc47: 0x1011, 0xc48: 0x0018, 0xc49: 0x088d, 0xc4a: 0x0f31, 0xc4b: 0x0249, + 0xc4c: 0x0249, 0xc4d: 0x0249, 0xc4e: 0x0249, 0xc4f: 0x2009, 0xc50: 0x0f41, 0xc51: 0x0f41, + 0xc52: 0x0359, 0xc53: 0x0359, 0xc54: 0x0018, 0xc55: 0x0f71, 0xc56: 0x2021, 0xc57: 0x0018, + 0xc58: 0x0018, 0xc59: 0x0f99, 0xc5a: 0x2039, 0xc5b: 0x0269, 0xc5c: 0x0269, 0xc5d: 0x0269, + 0xc5e: 0x0018, 0xc5f: 0x0018, 0xc60: 0x2049, 0xc61: 0x08ad, 0xc62: 0x2061, 0xc63: 0x0018, + 0xc64: 0x13d1, 0xc65: 0x0018, 0xc66: 0x2079, 0xc67: 0x0018, 0xc68: 0x13d1, 0xc69: 0x0018, + 0xc6a: 0x0f51, 0xc6b: 0x2091, 0xc6c: 0x0ee9, 0xc6d: 0x1159, 0xc6e: 0x0018, 0xc6f: 0x0f09, + 0xc70: 0x0f09, 0xc71: 0x1199, 0xc72: 0x0040, 0xc73: 0x0f61, 0xc74: 0x00d9, 0xc75: 0x20a9, + 0xc76: 0x20c1, 0xc77: 0x20d9, 0xc78: 0x20f1, 0xc79: 0x0f41, 0xc7a: 0x0018, 0xc7b: 0x08cd, + 0xc7c: 0x2109, 0xc7d: 0x10b1, 0xc7e: 0x10b1, 0xc7f: 0x2109, + // Block 0x32, offset 0xc80 + 0xc80: 0x08ed, 0xc81: 0x0018, 0xc82: 0x0018, 0xc83: 0x0018, 0xc84: 0x0018, 0xc85: 0x0ef9, + 0xc86: 0x0ef9, 0xc87: 0x0f09, 0xc88: 0x0f41, 0xc89: 0x0259, 0xc8a: 0x0018, 0xc8b: 0x0018, + 0xc8c: 0x0018, 0xc8d: 0x0018, 0xc8e: 0x0008, 0xc8f: 0x0018, 0xc90: 0x2121, 0xc91: 0x2151, + 0xc92: 0x2181, 0xc93: 0x21b9, 0xc94: 0x21e9, 0xc95: 0x2219, 0xc96: 0x2249, 0xc97: 0x2279, + 0xc98: 0x22a9, 0xc99: 0x22d9, 0xc9a: 0x2309, 0xc9b: 0x2339, 0xc9c: 0x2369, 0xc9d: 0x2399, + 0xc9e: 0x23c9, 0xc9f: 0x23f9, 0xca0: 0x0f41, 0xca1: 0x2421, 0xca2: 0x0905, 0xca3: 0x2439, + 0xca4: 0x1089, 0xca5: 0x2451, 0xca6: 0x0925, 0xca7: 0x2469, 0xca8: 0x2491, 0xca9: 0x0369, + 0xcaa: 0x24a9, 0xcab: 0x0945, 0xcac: 0x0359, 0xcad: 0x1159, 0xcae: 0x0ef9, 0xcaf: 0x0f61, + 0xcb0: 0x0f41, 0xcb1: 0x2421, 0xcb2: 0x0965, 0xcb3: 0x2439, 0xcb4: 0x1089, 0xcb5: 0x2451, + 0xcb6: 0x0985, 0xcb7: 0x2469, 0xcb8: 0x2491, 0xcb9: 0x0369, 0xcba: 0x24a9, 0xcbb: 0x09a5, + 0xcbc: 0x0359, 0xcbd: 0x1159, 0xcbe: 0x0ef9, 0xcbf: 0x0f61, + // Block 0x33, offset 0xcc0 + 0xcc0: 0x0018, 0xcc1: 0x0018, 0xcc2: 0x0018, 0xcc3: 0x0018, 0xcc4: 0x0018, 0xcc5: 0x0018, + 0xcc6: 0x0018, 0xcc7: 0x0018, 0xcc8: 0x0018, 0xcc9: 0x0018, 0xcca: 0x0018, 0xccb: 0x0040, + 0xccc: 0x0040, 0xccd: 0x0040, 0xcce: 0x0040, 0xccf: 0x0040, 0xcd0: 0x0040, 0xcd1: 0x0040, + 0xcd2: 0x0040, 0xcd3: 0x0040, 0xcd4: 0x0040, 0xcd5: 0x0040, 0xcd6: 0x0040, 0xcd7: 0x0040, + 0xcd8: 0x0040, 0xcd9: 0x0040, 0xcda: 0x0040, 0xcdb: 0x0040, 0xcdc: 0x0040, 0xcdd: 0x0040, + 0xcde: 0x0040, 0xcdf: 0x0040, 0xce0: 0x00c9, 0xce1: 0x0069, 0xce2: 0x0079, 0xce3: 0x1f51, + 0xce4: 0x1f61, 0xce5: 0x1f71, 0xce6: 0x1f81, 0xce7: 0x1f91, 0xce8: 0x1fa1, 0xce9: 0x2601, + 0xcea: 0x2619, 0xceb: 0x2631, 0xcec: 0x2649, 0xced: 0x2661, 0xcee: 0x2679, 0xcef: 0x2691, + 0xcf0: 0x26a9, 0xcf1: 0x26c1, 0xcf2: 0x26d9, 0xcf3: 0x26f1, 0xcf4: 0x0a06, 0xcf5: 0x0a26, + 0xcf6: 0x0a46, 0xcf7: 0x0a66, 0xcf8: 0x0a86, 0xcf9: 0x0aa6, 0xcfa: 0x0ac6, 0xcfb: 0x0ae6, + 0xcfc: 0x0b06, 0xcfd: 0x270a, 0xcfe: 0x2732, 0xcff: 0x275a, + // Block 0x34, offset 0xd00 + 0xd00: 0x2782, 0xd01: 0x27aa, 0xd02: 0x27d2, 0xd03: 0x27fa, 0xd04: 0x2822, 0xd05: 0x284a, + 0xd06: 0x2872, 0xd07: 0x289a, 0xd08: 0x0040, 0xd09: 0x0040, 0xd0a: 0x0040, 0xd0b: 0x0040, + 0xd0c: 0x0040, 0xd0d: 0x0040, 0xd0e: 0x0040, 0xd0f: 0x0040, 0xd10: 0x0040, 0xd11: 0x0040, + 0xd12: 0x0040, 0xd13: 0x0040, 0xd14: 0x0040, 0xd15: 0x0040, 0xd16: 0x0040, 0xd17: 0x0040, + 0xd18: 0x0040, 0xd19: 0x0040, 0xd1a: 0x0040, 0xd1b: 0x0040, 0xd1c: 0x0b26, 0xd1d: 0x0b46, + 0xd1e: 0x0b66, 0xd1f: 0x0b86, 0xd20: 0x0ba6, 0xd21: 0x0bc6, 0xd22: 0x0be6, 0xd23: 0x0c06, + 0xd24: 0x0c26, 0xd25: 0x0c46, 0xd26: 0x0c66, 0xd27: 0x0c86, 0xd28: 0x0ca6, 0xd29: 0x0cc6, + 0xd2a: 0x0ce6, 0xd2b: 0x0d06, 0xd2c: 0x0d26, 0xd2d: 0x0d46, 0xd2e: 0x0d66, 0xd2f: 0x0d86, + 0xd30: 0x0da6, 0xd31: 0x0dc6, 0xd32: 0x0de6, 0xd33: 0x0e06, 0xd34: 0x0e26, 0xd35: 0x0e46, + 0xd36: 0x0039, 0xd37: 0x0ee9, 0xd38: 0x1159, 0xd39: 0x0ef9, 0xd3a: 0x0f09, 0xd3b: 0x1199, + 0xd3c: 0x0f31, 0xd3d: 0x0249, 0xd3e: 0x0f41, 0xd3f: 0x0259, + // Block 0x35, offset 0xd40 + 0xd40: 0x0f51, 0xd41: 0x0359, 0xd42: 0x0f61, 0xd43: 0x0f71, 0xd44: 0x00d9, 0xd45: 0x0f99, + 0xd46: 0x2039, 0xd47: 0x0269, 0xd48: 0x01d9, 0xd49: 0x0fa9, 0xd4a: 0x0fb9, 0xd4b: 0x1089, + 0xd4c: 0x0279, 0xd4d: 0x0369, 0xd4e: 0x0289, 0xd4f: 0x13d1, 0xd50: 0x0039, 0xd51: 0x0ee9, + 0xd52: 0x1159, 0xd53: 0x0ef9, 0xd54: 0x0f09, 0xd55: 0x1199, 0xd56: 0x0f31, 0xd57: 0x0249, + 0xd58: 0x0f41, 0xd59: 0x0259, 0xd5a: 0x0f51, 0xd5b: 0x0359, 0xd5c: 0x0f61, 0xd5d: 0x0f71, + 0xd5e: 0x00d9, 0xd5f: 0x0f99, 0xd60: 0x2039, 0xd61: 0x0269, 0xd62: 0x01d9, 0xd63: 0x0fa9, + 0xd64: 0x0fb9, 0xd65: 0x1089, 0xd66: 0x0279, 0xd67: 0x0369, 0xd68: 0x0289, 0xd69: 0x13d1, + 0xd6a: 0x1f41, 0xd6b: 0x0018, 0xd6c: 0x0018, 0xd6d: 0x0018, 0xd6e: 0x0018, 0xd6f: 0x0018, + 0xd70: 0x0018, 0xd71: 0x0018, 0xd72: 0x0018, 0xd73: 0x0018, 0xd74: 0x0018, 0xd75: 0x0018, + 0xd76: 0x0018, 0xd77: 0x0018, 0xd78: 0x0018, 0xd79: 0x0018, 0xd7a: 0x0018, 0xd7b: 0x0018, + 0xd7c: 0x0018, 0xd7d: 0x0018, 0xd7e: 0x0018, 0xd7f: 0x0018, + // Block 0x36, offset 0xd80 + 0xd80: 0x0008, 0xd81: 0x0008, 0xd82: 0x0008, 0xd83: 0x0008, 0xd84: 0x0008, 0xd85: 0x0008, + 0xd86: 0x0008, 0xd87: 0x0008, 0xd88: 0x0008, 0xd89: 0x0008, 0xd8a: 0x0008, 0xd8b: 0x0008, + 0xd8c: 0x0008, 0xd8d: 0x0008, 0xd8e: 0x0008, 0xd8f: 0x0008, 0xd90: 0x0008, 0xd91: 0x0008, + 0xd92: 0x0008, 0xd93: 0x0008, 0xd94: 0x0008, 0xd95: 0x0008, 0xd96: 0x0008, 0xd97: 0x0008, + 0xd98: 0x0008, 0xd99: 0x0008, 0xd9a: 0x0008, 0xd9b: 0x0008, 0xd9c: 0x0008, 0xd9d: 0x0008, + 0xd9e: 0x0008, 0xd9f: 0x0040, 0xda0: 0xe00d, 0xda1: 0x0008, 0xda2: 0x2971, 0xda3: 0x0ebd, + 0xda4: 0x2989, 0xda5: 0x0008, 0xda6: 0x0008, 0xda7: 0xe07d, 0xda8: 0x0008, 0xda9: 0xe01d, + 0xdaa: 0x0008, 0xdab: 0xe03d, 0xdac: 0x0008, 0xdad: 0x0fe1, 0xdae: 0x1281, 0xdaf: 0x0fc9, + 0xdb0: 0x1141, 0xdb1: 0x0008, 0xdb2: 0xe00d, 0xdb3: 0x0008, 0xdb4: 0x0008, 0xdb5: 0xe01d, + 0xdb6: 0x0008, 0xdb7: 0x0008, 0xdb8: 0x0008, 0xdb9: 0x0008, 0xdba: 0x0008, 0xdbb: 0x0008, + 0xdbc: 0x0259, 0xdbd: 0x1089, 0xdbe: 0x29a1, 0xdbf: 0x29b9, + // Block 0x37, offset 0xdc0 + 0xdc0: 0xe00d, 0xdc1: 0x0008, 0xdc2: 0xe00d, 0xdc3: 0x0008, 0xdc4: 0xe00d, 0xdc5: 0x0008, + 0xdc6: 0xe00d, 0xdc7: 0x0008, 0xdc8: 0xe00d, 0xdc9: 0x0008, 0xdca: 0xe00d, 0xdcb: 0x0008, + 0xdcc: 0xe00d, 0xdcd: 0x0008, 0xdce: 0xe00d, 0xdcf: 0x0008, 0xdd0: 0xe00d, 0xdd1: 0x0008, + 0xdd2: 0xe00d, 0xdd3: 0x0008, 0xdd4: 0xe00d, 0xdd5: 0x0008, 0xdd6: 0xe00d, 0xdd7: 0x0008, + 0xdd8: 0xe00d, 0xdd9: 0x0008, 0xdda: 0xe00d, 0xddb: 0x0008, 0xddc: 0xe00d, 0xddd: 0x0008, + 0xdde: 0xe00d, 0xddf: 0x0008, 0xde0: 0xe00d, 0xde1: 0x0008, 0xde2: 0xe00d, 0xde3: 0x0008, + 0xde4: 0x0008, 0xde5: 0x0018, 0xde6: 0x0018, 0xde7: 0x0018, 0xde8: 0x0018, 0xde9: 0x0018, + 0xdea: 0x0018, 0xdeb: 0xe03d, 0xdec: 0x0008, 0xded: 0xe01d, 0xdee: 0x0008, 0xdef: 0x3308, + 0xdf0: 0x3308, 0xdf1: 0x3308, 0xdf2: 0xe00d, 0xdf3: 0x0008, 0xdf4: 0x0040, 0xdf5: 0x0040, + 0xdf6: 0x0040, 0xdf7: 0x0040, 0xdf8: 0x0040, 0xdf9: 0x0018, 0xdfa: 0x0018, 0xdfb: 0x0018, + 0xdfc: 0x0018, 0xdfd: 0x0018, 0xdfe: 0x0018, 0xdff: 0x0018, + // Block 0x38, offset 0xe00 + 0xe00: 0x26fd, 0xe01: 0x271d, 0xe02: 0x273d, 0xe03: 0x275d, 0xe04: 0x277d, 0xe05: 0x279d, + 0xe06: 0x27bd, 0xe07: 0x27dd, 0xe08: 0x27fd, 0xe09: 0x281d, 0xe0a: 0x283d, 0xe0b: 0x285d, + 0xe0c: 0x287d, 0xe0d: 0x289d, 0xe0e: 0x28bd, 0xe0f: 0x28dd, 0xe10: 0x28fd, 0xe11: 0x291d, + 0xe12: 0x293d, 0xe13: 0x295d, 0xe14: 0x297d, 0xe15: 0x299d, 0xe16: 0x0040, 0xe17: 0x0040, + 0xe18: 0x0040, 0xe19: 0x0040, 0xe1a: 0x0040, 0xe1b: 0x0040, 0xe1c: 0x0040, 0xe1d: 0x0040, + 0xe1e: 0x0040, 0xe1f: 0x0040, 0xe20: 0x0040, 0xe21: 0x0040, 0xe22: 0x0040, 0xe23: 0x0040, + 0xe24: 0x0040, 0xe25: 0x0040, 0xe26: 0x0040, 0xe27: 0x0040, 0xe28: 0x0040, 0xe29: 0x0040, + 0xe2a: 0x0040, 0xe2b: 0x0040, 0xe2c: 0x0040, 0xe2d: 0x0040, 0xe2e: 0x0040, 0xe2f: 0x0040, + 0xe30: 0x0040, 0xe31: 0x0040, 0xe32: 0x0040, 0xe33: 0x0040, 0xe34: 0x0040, 0xe35: 0x0040, + 0xe36: 0x0040, 0xe37: 0x0040, 0xe38: 0x0040, 0xe39: 0x0040, 0xe3a: 0x0040, 0xe3b: 0x0040, + 0xe3c: 0x0040, 0xe3d: 0x0040, 0xe3e: 0x0040, 0xe3f: 0x0040, + // Block 0x39, offset 0xe40 + 0xe40: 0x000a, 0xe41: 0x0018, 0xe42: 0x29d1, 0xe43: 0x0018, 0xe44: 0x0018, 0xe45: 0x0008, + 0xe46: 0x0008, 0xe47: 0x0008, 0xe48: 0x0018, 0xe49: 0x0018, 0xe4a: 0x0018, 0xe4b: 0x0018, + 0xe4c: 0x0018, 0xe4d: 0x0018, 0xe4e: 0x0018, 0xe4f: 0x0018, 0xe50: 0x0018, 0xe51: 0x0018, + 0xe52: 0x0018, 0xe53: 0x0018, 0xe54: 0x0018, 0xe55: 0x0018, 0xe56: 0x0018, 0xe57: 0x0018, + 0xe58: 0x0018, 0xe59: 0x0018, 0xe5a: 0x0018, 0xe5b: 0x0018, 0xe5c: 0x0018, 0xe5d: 0x0018, + 0xe5e: 0x0018, 0xe5f: 0x0018, 0xe60: 0x0018, 0xe61: 0x0018, 0xe62: 0x0018, 0xe63: 0x0018, + 0xe64: 0x0018, 0xe65: 0x0018, 0xe66: 0x0018, 0xe67: 0x0018, 0xe68: 0x0018, 0xe69: 0x0018, + 0xe6a: 0x3308, 0xe6b: 0x3308, 0xe6c: 0x3308, 0xe6d: 0x3308, 0xe6e: 0x3018, 0xe6f: 0x3018, + 0xe70: 0x0018, 0xe71: 0x0018, 0xe72: 0x0018, 0xe73: 0x0018, 0xe74: 0x0018, 0xe75: 0x0018, + 0xe76: 0xe125, 0xe77: 0x0018, 0xe78: 0x29bd, 0xe79: 0x29dd, 0xe7a: 0x29fd, 0xe7b: 0x0018, + 0xe7c: 0x0008, 0xe7d: 0x0018, 0xe7e: 0x0018, 0xe7f: 0x0018, + // Block 0x3a, offset 0xe80 + 0xe80: 0x2b3d, 0xe81: 0x2b5d, 0xe82: 0x2b7d, 0xe83: 0x2b9d, 0xe84: 0x2bbd, 0xe85: 0x2bdd, + 0xe86: 0x2bdd, 0xe87: 0x2bdd, 0xe88: 0x2bfd, 0xe89: 0x2bfd, 0xe8a: 0x2bfd, 0xe8b: 0x2bfd, + 0xe8c: 0x2c1d, 0xe8d: 0x2c1d, 0xe8e: 0x2c1d, 0xe8f: 0x2c3d, 0xe90: 0x2c5d, 0xe91: 0x2c5d, + 0xe92: 0x2a7d, 0xe93: 0x2a7d, 0xe94: 0x2c5d, 0xe95: 0x2c5d, 0xe96: 0x2c7d, 0xe97: 0x2c7d, + 0xe98: 0x2c5d, 0xe99: 0x2c5d, 0xe9a: 0x2a7d, 0xe9b: 0x2a7d, 0xe9c: 0x2c5d, 0xe9d: 0x2c5d, + 0xe9e: 0x2c3d, 0xe9f: 0x2c3d, 0xea0: 0x2c9d, 0xea1: 0x2c9d, 0xea2: 0x2cbd, 0xea3: 0x2cbd, + 0xea4: 0x0040, 0xea5: 0x2cdd, 0xea6: 0x2cfd, 0xea7: 0x2d1d, 0xea8: 0x2d1d, 0xea9: 0x2d3d, + 0xeaa: 0x2d5d, 0xeab: 0x2d7d, 0xeac: 0x2d9d, 0xead: 0x2dbd, 0xeae: 0x2ddd, 0xeaf: 0x2dfd, + 0xeb0: 0x2e1d, 0xeb1: 0x2e3d, 0xeb2: 0x2e3d, 0xeb3: 0x2e5d, 0xeb4: 0x2e7d, 0xeb5: 0x2e7d, + 0xeb6: 0x2e9d, 0xeb7: 0x2ebd, 0xeb8: 0x2e5d, 0xeb9: 0x2edd, 0xeba: 0x2efd, 0xebb: 0x2edd, + 0xebc: 0x2e5d, 0xebd: 0x2f1d, 0xebe: 0x2f3d, 0xebf: 0x2f5d, + // Block 0x3b, offset 0xec0 + 0xec0: 0x2f7d, 0xec1: 0x2f9d, 0xec2: 0x2cfd, 0xec3: 0x2cdd, 0xec4: 0x2fbd, 0xec5: 0x2fdd, + 0xec6: 0x2ffd, 0xec7: 0x301d, 0xec8: 0x303d, 0xec9: 0x305d, 0xeca: 0x307d, 0xecb: 0x309d, + 0xecc: 0x30bd, 0xecd: 0x30dd, 0xece: 0x30fd, 0xecf: 0x0040, 0xed0: 0x0018, 0xed1: 0x0018, + 0xed2: 0x311d, 0xed3: 0x313d, 0xed4: 0x315d, 0xed5: 0x317d, 0xed6: 0x319d, 0xed7: 0x31bd, + 0xed8: 0x31dd, 0xed9: 0x31fd, 0xeda: 0x321d, 0xedb: 0x323d, 0xedc: 0x315d, 0xedd: 0x325d, + 0xede: 0x327d, 0xedf: 0x329d, 0xee0: 0x0008, 0xee1: 0x0008, 0xee2: 0x0008, 0xee3: 0x0008, + 0xee4: 0x0008, 0xee5: 0x0008, 0xee6: 0x0008, 0xee7: 0x0008, 0xee8: 0x0008, 0xee9: 0x0008, + 0xeea: 0x0008, 0xeeb: 0x0008, 0xeec: 0x0008, 0xeed: 0x0008, 0xeee: 0x0008, 0xeef: 0x0008, + 0xef0: 0x0008, 0xef1: 0x0008, 0xef2: 0x0008, 0xef3: 0x0008, 0xef4: 0x0008, 0xef5: 0x0008, + 0xef6: 0x0008, 0xef7: 0x0008, 0xef8: 0x0008, 0xef9: 0x0008, 0xefa: 0x0008, 0xefb: 0x0040, + 0xefc: 0x0040, 0xefd: 0x0040, 0xefe: 0x0040, 0xeff: 0x0040, + // Block 0x3c, offset 0xf00 + 0xf00: 0x36a2, 0xf01: 0x36d2, 0xf02: 0x3702, 0xf03: 0x3732, 0xf04: 0x32bd, 0xf05: 0x32dd, + 0xf06: 0x32fd, 0xf07: 0x331d, 0xf08: 0x0018, 0xf09: 0x0018, 0xf0a: 0x0018, 0xf0b: 0x0018, + 0xf0c: 0x0018, 0xf0d: 0x0018, 0xf0e: 0x0018, 0xf0f: 0x0018, 0xf10: 0x333d, 0xf11: 0x3761, + 0xf12: 0x3779, 0xf13: 0x3791, 0xf14: 0x37a9, 0xf15: 0x37c1, 0xf16: 0x37d9, 0xf17: 0x37f1, + 0xf18: 0x3809, 0xf19: 0x3821, 0xf1a: 0x3839, 0xf1b: 0x3851, 0xf1c: 0x3869, 0xf1d: 0x3881, + 0xf1e: 0x3899, 0xf1f: 0x38b1, 0xf20: 0x335d, 0xf21: 0x337d, 0xf22: 0x339d, 0xf23: 0x33bd, + 0xf24: 0x33dd, 0xf25: 0x33dd, 0xf26: 0x33fd, 0xf27: 0x341d, 0xf28: 0x343d, 0xf29: 0x345d, + 0xf2a: 0x347d, 0xf2b: 0x349d, 0xf2c: 0x34bd, 0xf2d: 0x34dd, 0xf2e: 0x34fd, 0xf2f: 0x351d, + 0xf30: 0x353d, 0xf31: 0x355d, 0xf32: 0x357d, 0xf33: 0x359d, 0xf34: 0x35bd, 0xf35: 0x35dd, + 0xf36: 0x35fd, 0xf37: 0x361d, 0xf38: 0x363d, 0xf39: 0x365d, 0xf3a: 0x367d, 0xf3b: 0x369d, + 0xf3c: 0x38c9, 0xf3d: 0x3901, 0xf3e: 0x36bd, 0xf3f: 0x0018, + // Block 0x3d, offset 0xf40 + 0xf40: 0x36dd, 0xf41: 0x36fd, 0xf42: 0x371d, 0xf43: 0x373d, 0xf44: 0x375d, 0xf45: 0x377d, + 0xf46: 0x379d, 0xf47: 0x37bd, 0xf48: 0x37dd, 0xf49: 0x37fd, 0xf4a: 0x381d, 0xf4b: 0x383d, + 0xf4c: 0x385d, 0xf4d: 0x387d, 0xf4e: 0x389d, 0xf4f: 0x38bd, 0xf50: 0x38dd, 0xf51: 0x38fd, + 0xf52: 0x391d, 0xf53: 0x393d, 0xf54: 0x395d, 0xf55: 0x397d, 0xf56: 0x399d, 0xf57: 0x39bd, + 0xf58: 0x39dd, 0xf59: 0x39fd, 0xf5a: 0x3a1d, 0xf5b: 0x3a3d, 0xf5c: 0x3a5d, 0xf5d: 0x3a7d, + 0xf5e: 0x3a9d, 0xf5f: 0x3abd, 0xf60: 0x3add, 0xf61: 0x3afd, 0xf62: 0x3b1d, 0xf63: 0x3b3d, + 0xf64: 0x3b5d, 0xf65: 0x3b7d, 0xf66: 0x127d, 0xf67: 0x3b9d, 0xf68: 0x3bbd, 0xf69: 0x3bdd, + 0xf6a: 0x3bfd, 0xf6b: 0x3c1d, 0xf6c: 0x3c3d, 0xf6d: 0x3c5d, 0xf6e: 0x239d, 0xf6f: 0x3c7d, + 0xf70: 0x3c9d, 0xf71: 0x3939, 0xf72: 0x3951, 0xf73: 0x3969, 0xf74: 0x3981, 0xf75: 0x3999, + 0xf76: 0x39b1, 0xf77: 0x39c9, 0xf78: 0x39e1, 0xf79: 0x39f9, 0xf7a: 0x3a11, 0xf7b: 0x3a29, + 0xf7c: 0x3a41, 0xf7d: 0x3a59, 0xf7e: 0x3a71, 0xf7f: 0x3a89, + // Block 0x3e, offset 0xf80 + 0xf80: 0x3aa1, 0xf81: 0x3ac9, 0xf82: 0x3af1, 0xf83: 0x3b19, 0xf84: 0x3b41, 0xf85: 0x3b69, + 0xf86: 0x3b91, 0xf87: 0x3bb9, 0xf88: 0x3be1, 0xf89: 0x3c09, 0xf8a: 0x3c39, 0xf8b: 0x3c69, + 0xf8c: 0x3c99, 0xf8d: 0x3cbd, 0xf8e: 0x3cb1, 0xf8f: 0x3cdd, 0xf90: 0x3cfd, 0xf91: 0x3d15, + 0xf92: 0x3d2d, 0xf93: 0x3d45, 0xf94: 0x3d5d, 0xf95: 0x3d5d, 0xf96: 0x3d45, 0xf97: 0x3d75, + 0xf98: 0x07bd, 0xf99: 0x3d8d, 0xf9a: 0x3da5, 0xf9b: 0x3dbd, 0xf9c: 0x3dd5, 0xf9d: 0x3ded, + 0xf9e: 0x3e05, 0xf9f: 0x3e1d, 0xfa0: 0x3e35, 0xfa1: 0x3e4d, 0xfa2: 0x3e65, 0xfa3: 0x3e7d, + 0xfa4: 0x3e95, 0xfa5: 0x3e95, 0xfa6: 0x3ead, 0xfa7: 0x3ead, 0xfa8: 0x3ec5, 0xfa9: 0x3ec5, + 0xfaa: 0x3edd, 0xfab: 0x3ef5, 0xfac: 0x3f0d, 0xfad: 0x3f25, 0xfae: 0x3f3d, 0xfaf: 0x3f3d, + 0xfb0: 0x3f55, 0xfb1: 0x3f55, 0xfb2: 0x3f55, 0xfb3: 0x3f6d, 0xfb4: 0x3f85, 0xfb5: 0x3f9d, + 0xfb6: 0x3fb5, 0xfb7: 0x3f9d, 0xfb8: 0x3fcd, 0xfb9: 0x3fe5, 0xfba: 0x3f6d, 0xfbb: 0x3ffd, + 0xfbc: 0x4015, 0xfbd: 0x4015, 0xfbe: 0x4015, 0xfbf: 0x0040, + // Block 0x3f, offset 0xfc0 + 0xfc0: 0x3cc9, 0xfc1: 0x3d31, 0xfc2: 0x3d99, 0xfc3: 0x3e01, 0xfc4: 0x3e51, 0xfc5: 0x3eb9, + 0xfc6: 0x3f09, 0xfc7: 0x3f59, 0xfc8: 0x3fd9, 0xfc9: 0x4041, 0xfca: 0x4091, 0xfcb: 0x40e1, + 0xfcc: 0x4131, 0xfcd: 0x4199, 0xfce: 0x4201, 0xfcf: 0x4251, 0xfd0: 0x42a1, 0xfd1: 0x42d9, + 0xfd2: 0x4329, 0xfd3: 0x4391, 0xfd4: 0x43f9, 0xfd5: 0x4431, 0xfd6: 0x44b1, 0xfd7: 0x4549, + 0xfd8: 0x45c9, 0xfd9: 0x4619, 0xfda: 0x4699, 0xfdb: 0x4719, 0xfdc: 0x4781, 0xfdd: 0x47d1, + 0xfde: 0x4821, 0xfdf: 0x4871, 0xfe0: 0x48d9, 0xfe1: 0x4959, 0xfe2: 0x49c1, 0xfe3: 0x4a11, + 0xfe4: 0x4a61, 0xfe5: 0x4ab1, 0xfe6: 0x4ae9, 0xfe7: 0x4b21, 0xfe8: 0x4b59, 0xfe9: 0x4b91, + 0xfea: 0x4be1, 0xfeb: 0x4c31, 0xfec: 0x4cb1, 0xfed: 0x4d01, 0xfee: 0x4d69, 0xfef: 0x4de9, + 0xff0: 0x4e39, 0xff1: 0x4e71, 0xff2: 0x4ea9, 0xff3: 0x4f29, 0xff4: 0x4f91, 0xff5: 0x5011, + 0xff6: 0x5061, 0xff7: 0x50e1, 0xff8: 0x5119, 0xff9: 0x5169, 0xffa: 0x51b9, 0xffb: 0x5209, + 0xffc: 0x5259, 0xffd: 0x52a9, 0xffe: 0x5311, 0xfff: 0x5361, + // Block 0x40, offset 0x1000 + 0x1000: 0x5399, 0x1001: 0x53e9, 0x1002: 0x5439, 0x1003: 0x5489, 0x1004: 0x54f1, 0x1005: 0x5541, + 0x1006: 0x5591, 0x1007: 0x55e1, 0x1008: 0x5661, 0x1009: 0x56c9, 0x100a: 0x5701, 0x100b: 0x5781, + 0x100c: 0x57b9, 0x100d: 0x5821, 0x100e: 0x5889, 0x100f: 0x58d9, 0x1010: 0x5929, 0x1011: 0x5979, + 0x1012: 0x59e1, 0x1013: 0x5a19, 0x1014: 0x5a69, 0x1015: 0x5ad1, 0x1016: 0x5b09, 0x1017: 0x5b89, + 0x1018: 0x5bd9, 0x1019: 0x5c01, 0x101a: 0x5c29, 0x101b: 0x5c51, 0x101c: 0x5c79, 0x101d: 0x5ca1, + 0x101e: 0x5cc9, 0x101f: 0x5cf1, 0x1020: 0x5d19, 0x1021: 0x5d41, 0x1022: 0x5d69, 0x1023: 0x5d99, + 0x1024: 0x5dc9, 0x1025: 0x5df9, 0x1026: 0x5e29, 0x1027: 0x5e59, 0x1028: 0x5e89, 0x1029: 0x5eb9, + 0x102a: 0x5ee9, 0x102b: 0x5f19, 0x102c: 0x5f49, 0x102d: 0x5f79, 0x102e: 0x5fa9, 0x102f: 0x5fd9, + 0x1030: 0x6009, 0x1031: 0x402d, 0x1032: 0x6039, 0x1033: 0x6051, 0x1034: 0x404d, 0x1035: 0x6069, + 0x1036: 0x6081, 0x1037: 0x6099, 0x1038: 0x406d, 0x1039: 0x406d, 0x103a: 0x60b1, 0x103b: 0x60c9, + 0x103c: 0x6101, 0x103d: 0x6139, 0x103e: 0x6171, 0x103f: 0x61a9, + // Block 0x41, offset 0x1040 + 0x1040: 0x6211, 0x1041: 0x6229, 0x1042: 0x408d, 0x1043: 0x6241, 0x1044: 0x6259, 0x1045: 0x6271, + 0x1046: 0x6289, 0x1047: 0x62a1, 0x1048: 0x40ad, 0x1049: 0x62b9, 0x104a: 0x62e1, 0x104b: 0x62f9, + 0x104c: 0x40cd, 0x104d: 0x40cd, 0x104e: 0x6311, 0x104f: 0x6329, 0x1050: 0x6341, 0x1051: 0x40ed, + 0x1052: 0x410d, 0x1053: 0x412d, 0x1054: 0x414d, 0x1055: 0x416d, 0x1056: 0x6359, 0x1057: 0x6371, + 0x1058: 0x6389, 0x1059: 0x63a1, 0x105a: 0x63b9, 0x105b: 0x418d, 0x105c: 0x63d1, 0x105d: 0x63e9, + 0x105e: 0x6401, 0x105f: 0x41ad, 0x1060: 0x41cd, 0x1061: 0x6419, 0x1062: 0x41ed, 0x1063: 0x420d, + 0x1064: 0x422d, 0x1065: 0x6431, 0x1066: 0x424d, 0x1067: 0x6449, 0x1068: 0x6479, 0x1069: 0x6211, + 0x106a: 0x426d, 0x106b: 0x428d, 0x106c: 0x42ad, 0x106d: 0x42cd, 0x106e: 0x64b1, 0x106f: 0x64f1, + 0x1070: 0x6539, 0x1071: 0x6551, 0x1072: 0x42ed, 0x1073: 0x6569, 0x1074: 0x6581, 0x1075: 0x6599, + 0x1076: 0x430d, 0x1077: 0x65b1, 0x1078: 0x65c9, 0x1079: 0x65b1, 0x107a: 0x65e1, 0x107b: 0x65f9, + 0x107c: 0x432d, 0x107d: 0x6611, 0x107e: 0x6629, 0x107f: 0x6611, + // Block 0x42, offset 0x1080 + 0x1080: 0x434d, 0x1081: 0x436d, 0x1082: 0x0040, 0x1083: 0x6641, 0x1084: 0x6659, 0x1085: 0x6671, + 0x1086: 0x6689, 0x1087: 0x0040, 0x1088: 0x66c1, 0x1089: 0x66d9, 0x108a: 0x66f1, 0x108b: 0x6709, + 0x108c: 0x6721, 0x108d: 0x6739, 0x108e: 0x6401, 0x108f: 0x6751, 0x1090: 0x6769, 0x1091: 0x6781, + 0x1092: 0x438d, 0x1093: 0x6799, 0x1094: 0x6289, 0x1095: 0x43ad, 0x1096: 0x43cd, 0x1097: 0x67b1, + 0x1098: 0x0040, 0x1099: 0x43ed, 0x109a: 0x67c9, 0x109b: 0x67e1, 0x109c: 0x67f9, 0x109d: 0x6811, + 0x109e: 0x6829, 0x109f: 0x6859, 0x10a0: 0x6889, 0x10a1: 0x68b1, 0x10a2: 0x68d9, 0x10a3: 0x6901, + 0x10a4: 0x6929, 0x10a5: 0x6951, 0x10a6: 0x6979, 0x10a7: 0x69a1, 0x10a8: 0x69c9, 0x10a9: 0x69f1, + 0x10aa: 0x6a21, 0x10ab: 0x6a51, 0x10ac: 0x6a81, 0x10ad: 0x6ab1, 0x10ae: 0x6ae1, 0x10af: 0x6b11, + 0x10b0: 0x6b41, 0x10b1: 0x6b71, 0x10b2: 0x6ba1, 0x10b3: 0x6bd1, 0x10b4: 0x6c01, 0x10b5: 0x6c31, + 0x10b6: 0x6c61, 0x10b7: 0x6c91, 0x10b8: 0x6cc1, 0x10b9: 0x6cf1, 0x10ba: 0x6d21, 0x10bb: 0x6d51, + 0x10bc: 0x6d81, 0x10bd: 0x6db1, 0x10be: 0x6de1, 0x10bf: 0x440d, + // Block 0x43, offset 0x10c0 + 0x10c0: 0xe00d, 0x10c1: 0x0008, 0x10c2: 0xe00d, 0x10c3: 0x0008, 0x10c4: 0xe00d, 0x10c5: 0x0008, + 0x10c6: 0xe00d, 0x10c7: 0x0008, 0x10c8: 0xe00d, 0x10c9: 0x0008, 0x10ca: 0xe00d, 0x10cb: 0x0008, + 0x10cc: 0xe00d, 0x10cd: 0x0008, 0x10ce: 0xe00d, 0x10cf: 0x0008, 0x10d0: 0xe00d, 0x10d1: 0x0008, + 0x10d2: 0xe00d, 0x10d3: 0x0008, 0x10d4: 0xe00d, 0x10d5: 0x0008, 0x10d6: 0xe00d, 0x10d7: 0x0008, + 0x10d8: 0xe00d, 0x10d9: 0x0008, 0x10da: 0xe00d, 0x10db: 0x0008, 0x10dc: 0xe00d, 0x10dd: 0x0008, + 0x10de: 0xe00d, 0x10df: 0x0008, 0x10e0: 0xe00d, 0x10e1: 0x0008, 0x10e2: 0xe00d, 0x10e3: 0x0008, + 0x10e4: 0xe00d, 0x10e5: 0x0008, 0x10e6: 0xe00d, 0x10e7: 0x0008, 0x10e8: 0xe00d, 0x10e9: 0x0008, + 0x10ea: 0xe00d, 0x10eb: 0x0008, 0x10ec: 0xe00d, 0x10ed: 0x0008, 0x10ee: 0x0008, 0x10ef: 0x3308, + 0x10f0: 0x3318, 0x10f1: 0x3318, 0x10f2: 0x3318, 0x10f3: 0x0018, 0x10f4: 0x3308, 0x10f5: 0x3308, + 0x10f6: 0x3308, 0x10f7: 0x3308, 0x10f8: 0x3308, 0x10f9: 0x3308, 0x10fa: 0x3308, 0x10fb: 0x3308, + 0x10fc: 0x3308, 0x10fd: 0x3308, 0x10fe: 0x0018, 0x10ff: 0x0008, + // Block 0x44, offset 0x1100 + 0x1100: 0xe00d, 0x1101: 0x0008, 0x1102: 0xe00d, 0x1103: 0x0008, 0x1104: 0xe00d, 0x1105: 0x0008, + 0x1106: 0xe00d, 0x1107: 0x0008, 0x1108: 0xe00d, 0x1109: 0x0008, 0x110a: 0xe00d, 0x110b: 0x0008, + 0x110c: 0xe00d, 0x110d: 0x0008, 0x110e: 0xe00d, 0x110f: 0x0008, 0x1110: 0xe00d, 0x1111: 0x0008, + 0x1112: 0xe00d, 0x1113: 0x0008, 0x1114: 0xe00d, 0x1115: 0x0008, 0x1116: 0xe00d, 0x1117: 0x0008, + 0x1118: 0xe00d, 0x1119: 0x0008, 0x111a: 0xe00d, 0x111b: 0x0008, 0x111c: 0x0ea1, 0x111d: 0x6e11, + 0x111e: 0x3308, 0x111f: 0x3308, 0x1120: 0x0008, 0x1121: 0x0008, 0x1122: 0x0008, 0x1123: 0x0008, + 0x1124: 0x0008, 0x1125: 0x0008, 0x1126: 0x0008, 0x1127: 0x0008, 0x1128: 0x0008, 0x1129: 0x0008, + 0x112a: 0x0008, 0x112b: 0x0008, 0x112c: 0x0008, 0x112d: 0x0008, 0x112e: 0x0008, 0x112f: 0x0008, + 0x1130: 0x0008, 0x1131: 0x0008, 0x1132: 0x0008, 0x1133: 0x0008, 0x1134: 0x0008, 0x1135: 0x0008, + 0x1136: 0x0008, 0x1137: 0x0008, 0x1138: 0x0008, 0x1139: 0x0008, 0x113a: 0x0008, 0x113b: 0x0008, + 0x113c: 0x0008, 0x113d: 0x0008, 0x113e: 0x0008, 0x113f: 0x0008, + // Block 0x45, offset 0x1140 + 0x1140: 0x0018, 0x1141: 0x0018, 0x1142: 0x0018, 0x1143: 0x0018, 0x1144: 0x0018, 0x1145: 0x0018, + 0x1146: 0x0018, 0x1147: 0x0018, 0x1148: 0x0018, 0x1149: 0x0018, 0x114a: 0x0018, 0x114b: 0x0018, + 0x114c: 0x0018, 0x114d: 0x0018, 0x114e: 0x0018, 0x114f: 0x0018, 0x1150: 0x0018, 0x1151: 0x0018, + 0x1152: 0x0018, 0x1153: 0x0018, 0x1154: 0x0018, 0x1155: 0x0018, 0x1156: 0x0018, 0x1157: 0x0008, + 0x1158: 0x0008, 0x1159: 0x0008, 0x115a: 0x0008, 0x115b: 0x0008, 0x115c: 0x0008, 0x115d: 0x0008, + 0x115e: 0x0008, 0x115f: 0x0008, 0x1160: 0x0018, 0x1161: 0x0018, 0x1162: 0xe00d, 0x1163: 0x0008, + 0x1164: 0xe00d, 0x1165: 0x0008, 0x1166: 0xe00d, 0x1167: 0x0008, 0x1168: 0xe00d, 0x1169: 0x0008, + 0x116a: 0xe00d, 0x116b: 0x0008, 0x116c: 0xe00d, 0x116d: 0x0008, 0x116e: 0xe00d, 0x116f: 0x0008, + 0x1170: 0x0008, 0x1171: 0x0008, 0x1172: 0xe00d, 0x1173: 0x0008, 0x1174: 0xe00d, 0x1175: 0x0008, + 0x1176: 0xe00d, 0x1177: 0x0008, 0x1178: 0xe00d, 0x1179: 0x0008, 0x117a: 0xe00d, 0x117b: 0x0008, + 0x117c: 0xe00d, 0x117d: 0x0008, 0x117e: 0xe00d, 0x117f: 0x0008, + // Block 0x46, offset 0x1180 + 0x1180: 0xe00d, 0x1181: 0x0008, 0x1182: 0xe00d, 0x1183: 0x0008, 0x1184: 0xe00d, 0x1185: 0x0008, + 0x1186: 0xe00d, 0x1187: 0x0008, 0x1188: 0xe00d, 0x1189: 0x0008, 0x118a: 0xe00d, 0x118b: 0x0008, + 0x118c: 0xe00d, 0x118d: 0x0008, 0x118e: 0xe00d, 0x118f: 0x0008, 0x1190: 0xe00d, 0x1191: 0x0008, + 0x1192: 0xe00d, 0x1193: 0x0008, 0x1194: 0xe00d, 0x1195: 0x0008, 0x1196: 0xe00d, 0x1197: 0x0008, + 0x1198: 0xe00d, 0x1199: 0x0008, 0x119a: 0xe00d, 0x119b: 0x0008, 0x119c: 0xe00d, 0x119d: 0x0008, + 0x119e: 0xe00d, 0x119f: 0x0008, 0x11a0: 0xe00d, 0x11a1: 0x0008, 0x11a2: 0xe00d, 0x11a3: 0x0008, + 0x11a4: 0xe00d, 0x11a5: 0x0008, 0x11a6: 0xe00d, 0x11a7: 0x0008, 0x11a8: 0xe00d, 0x11a9: 0x0008, + 0x11aa: 0xe00d, 0x11ab: 0x0008, 0x11ac: 0xe00d, 0x11ad: 0x0008, 0x11ae: 0xe00d, 0x11af: 0x0008, + 0x11b0: 0xe0fd, 0x11b1: 0x0008, 0x11b2: 0x0008, 0x11b3: 0x0008, 0x11b4: 0x0008, 0x11b5: 0x0008, + 0x11b6: 0x0008, 0x11b7: 0x0008, 0x11b8: 0x0008, 0x11b9: 0xe01d, 0x11ba: 0x0008, 0x11bb: 0xe03d, + 0x11bc: 0x0008, 0x11bd: 0x442d, 0x11be: 0xe00d, 0x11bf: 0x0008, + // Block 0x47, offset 0x11c0 + 0x11c0: 0xe00d, 0x11c1: 0x0008, 0x11c2: 0xe00d, 0x11c3: 0x0008, 0x11c4: 0xe00d, 0x11c5: 0x0008, + 0x11c6: 0xe00d, 0x11c7: 0x0008, 0x11c8: 0x0008, 0x11c9: 0x0018, 0x11ca: 0x0018, 0x11cb: 0xe03d, + 0x11cc: 0x0008, 0x11cd: 0x11d9, 0x11ce: 0x0008, 0x11cf: 0x0008, 0x11d0: 0xe00d, 0x11d1: 0x0008, + 0x11d2: 0xe00d, 0x11d3: 0x0008, 0x11d4: 0x0008, 0x11d5: 0x0008, 0x11d6: 0xe00d, 0x11d7: 0x0008, + 0x11d8: 0xe00d, 0x11d9: 0x0008, 0x11da: 0xe00d, 0x11db: 0x0008, 0x11dc: 0xe00d, 0x11dd: 0x0008, + 0x11de: 0xe00d, 0x11df: 0x0008, 0x11e0: 0xe00d, 0x11e1: 0x0008, 0x11e2: 0xe00d, 0x11e3: 0x0008, + 0x11e4: 0xe00d, 0x11e5: 0x0008, 0x11e6: 0xe00d, 0x11e7: 0x0008, 0x11e8: 0xe00d, 0x11e9: 0x0008, + 0x11ea: 0x6e29, 0x11eb: 0x1029, 0x11ec: 0x11c1, 0x11ed: 0x6e41, 0x11ee: 0x1221, 0x11ef: 0x0040, + 0x11f0: 0x6e59, 0x11f1: 0x6e71, 0x11f2: 0x1239, 0x11f3: 0x444d, 0x11f4: 0xe00d, 0x11f5: 0x0008, + 0x11f6: 0xe00d, 0x11f7: 0x0008, 0x11f8: 0x0040, 0x11f9: 0x0040, 0x11fa: 0x0040, 0x11fb: 0x0040, + 0x11fc: 0x0040, 0x11fd: 0x0040, 0x11fe: 0x0040, 0x11ff: 0x0040, + // Block 0x48, offset 0x1200 + 0x1200: 0x64d5, 0x1201: 0x64f5, 0x1202: 0x6515, 0x1203: 0x6535, 0x1204: 0x6555, 0x1205: 0x6575, + 0x1206: 0x6595, 0x1207: 0x65b5, 0x1208: 0x65d5, 0x1209: 0x65f5, 0x120a: 0x6615, 0x120b: 0x6635, + 0x120c: 0x6655, 0x120d: 0x6675, 0x120e: 0x0008, 0x120f: 0x0008, 0x1210: 0x6695, 0x1211: 0x0008, + 0x1212: 0x66b5, 0x1213: 0x0008, 0x1214: 0x0008, 0x1215: 0x66d5, 0x1216: 0x66f5, 0x1217: 0x6715, + 0x1218: 0x6735, 0x1219: 0x6755, 0x121a: 0x6775, 0x121b: 0x6795, 0x121c: 0x67b5, 0x121d: 0x67d5, + 0x121e: 0x67f5, 0x121f: 0x0008, 0x1220: 0x6815, 0x1221: 0x0008, 0x1222: 0x6835, 0x1223: 0x0008, + 0x1224: 0x0008, 0x1225: 0x6855, 0x1226: 0x6875, 0x1227: 0x0008, 0x1228: 0x0008, 0x1229: 0x0008, + 0x122a: 0x6895, 0x122b: 0x68b5, 0x122c: 0x68d5, 0x122d: 0x68f5, 0x122e: 0x6915, 0x122f: 0x6935, + 0x1230: 0x6955, 0x1231: 0x6975, 0x1232: 0x6995, 0x1233: 0x69b5, 0x1234: 0x69d5, 0x1235: 0x69f5, + 0x1236: 0x6a15, 0x1237: 0x6a35, 0x1238: 0x6a55, 0x1239: 0x6a75, 0x123a: 0x6a95, 0x123b: 0x6ab5, + 0x123c: 0x6ad5, 0x123d: 0x6af5, 0x123e: 0x6b15, 0x123f: 0x6b35, + // Block 0x49, offset 0x1240 + 0x1240: 0x7a95, 0x1241: 0x7ab5, 0x1242: 0x7ad5, 0x1243: 0x7af5, 0x1244: 0x7b15, 0x1245: 0x7b35, + 0x1246: 0x7b55, 0x1247: 0x7b75, 0x1248: 0x7b95, 0x1249: 0x7bb5, 0x124a: 0x7bd5, 0x124b: 0x7bf5, + 0x124c: 0x7c15, 0x124d: 0x7c35, 0x124e: 0x7c55, 0x124f: 0x6ec9, 0x1250: 0x6ef1, 0x1251: 0x6f19, + 0x1252: 0x7c75, 0x1253: 0x7c95, 0x1254: 0x7cb5, 0x1255: 0x6f41, 0x1256: 0x6f69, 0x1257: 0x6f91, + 0x1258: 0x7cd5, 0x1259: 0x7cf5, 0x125a: 0x0040, 0x125b: 0x0040, 0x125c: 0x0040, 0x125d: 0x0040, + 0x125e: 0x0040, 0x125f: 0x0040, 0x1260: 0x0040, 0x1261: 0x0040, 0x1262: 0x0040, 0x1263: 0x0040, + 0x1264: 0x0040, 0x1265: 0x0040, 0x1266: 0x0040, 0x1267: 0x0040, 0x1268: 0x0040, 0x1269: 0x0040, + 0x126a: 0x0040, 0x126b: 0x0040, 0x126c: 0x0040, 0x126d: 0x0040, 0x126e: 0x0040, 0x126f: 0x0040, + 0x1270: 0x0040, 0x1271: 0x0040, 0x1272: 0x0040, 0x1273: 0x0040, 0x1274: 0x0040, 0x1275: 0x0040, + 0x1276: 0x0040, 0x1277: 0x0040, 0x1278: 0x0040, 0x1279: 0x0040, 0x127a: 0x0040, 0x127b: 0x0040, + 0x127c: 0x0040, 0x127d: 0x0040, 0x127e: 0x0040, 0x127f: 0x0040, + // Block 0x4a, offset 0x1280 + 0x1280: 0x6fb9, 0x1281: 0x6fd1, 0x1282: 0x6fe9, 0x1283: 0x7d15, 0x1284: 0x7d35, 0x1285: 0x7001, + 0x1286: 0x7001, 0x1287: 0x0040, 0x1288: 0x0040, 0x1289: 0x0040, 0x128a: 0x0040, 0x128b: 0x0040, + 0x128c: 0x0040, 0x128d: 0x0040, 0x128e: 0x0040, 0x128f: 0x0040, 0x1290: 0x0040, 0x1291: 0x0040, + 0x1292: 0x0040, 0x1293: 0x7019, 0x1294: 0x7041, 0x1295: 0x7069, 0x1296: 0x7091, 0x1297: 0x70b9, + 0x1298: 0x0040, 0x1299: 0x0040, 0x129a: 0x0040, 0x129b: 0x0040, 0x129c: 0x0040, 0x129d: 0x70e1, + 0x129e: 0x3308, 0x129f: 0x7109, 0x12a0: 0x7131, 0x12a1: 0x20a9, 0x12a2: 0x20f1, 0x12a3: 0x7149, + 0x12a4: 0x7161, 0x12a5: 0x7179, 0x12a6: 0x7191, 0x12a7: 0x71a9, 0x12a8: 0x71c1, 0x12a9: 0x1fb2, + 0x12aa: 0x71d9, 0x12ab: 0x7201, 0x12ac: 0x7229, 0x12ad: 0x7261, 0x12ae: 0x7299, 0x12af: 0x72c1, + 0x12b0: 0x72e9, 0x12b1: 0x7311, 0x12b2: 0x7339, 0x12b3: 0x7361, 0x12b4: 0x7389, 0x12b5: 0x73b1, + 0x12b6: 0x73d9, 0x12b7: 0x0040, 0x12b8: 0x7401, 0x12b9: 0x7429, 0x12ba: 0x7451, 0x12bb: 0x7479, + 0x12bc: 0x74a1, 0x12bd: 0x0040, 0x12be: 0x74c9, 0x12bf: 0x0040, + // Block 0x4b, offset 0x12c0 + 0x12c0: 0x74f1, 0x12c1: 0x7519, 0x12c2: 0x0040, 0x12c3: 0x7541, 0x12c4: 0x7569, 0x12c5: 0x0040, + 0x12c6: 0x7591, 0x12c7: 0x75b9, 0x12c8: 0x75e1, 0x12c9: 0x7609, 0x12ca: 0x7631, 0x12cb: 0x7659, + 0x12cc: 0x7681, 0x12cd: 0x76a9, 0x12ce: 0x76d1, 0x12cf: 0x76f9, 0x12d0: 0x7721, 0x12d1: 0x7721, + 0x12d2: 0x7739, 0x12d3: 0x7739, 0x12d4: 0x7739, 0x12d5: 0x7739, 0x12d6: 0x7751, 0x12d7: 0x7751, + 0x12d8: 0x7751, 0x12d9: 0x7751, 0x12da: 0x7769, 0x12db: 0x7769, 0x12dc: 0x7769, 0x12dd: 0x7769, + 0x12de: 0x7781, 0x12df: 0x7781, 0x12e0: 0x7781, 0x12e1: 0x7781, 0x12e2: 0x7799, 0x12e3: 0x7799, + 0x12e4: 0x7799, 0x12e5: 0x7799, 0x12e6: 0x77b1, 0x12e7: 0x77b1, 0x12e8: 0x77b1, 0x12e9: 0x77b1, + 0x12ea: 0x77c9, 0x12eb: 0x77c9, 0x12ec: 0x77c9, 0x12ed: 0x77c9, 0x12ee: 0x77e1, 0x12ef: 0x77e1, + 0x12f0: 0x77e1, 0x12f1: 0x77e1, 0x12f2: 0x77f9, 0x12f3: 0x77f9, 0x12f4: 0x77f9, 0x12f5: 0x77f9, + 0x12f6: 0x7811, 0x12f7: 0x7811, 0x12f8: 0x7811, 0x12f9: 0x7811, 0x12fa: 0x7829, 0x12fb: 0x7829, + 0x12fc: 0x7829, 0x12fd: 0x7829, 0x12fe: 0x7841, 0x12ff: 0x7841, + // Block 0x4c, offset 0x1300 + 0x1300: 0x7841, 0x1301: 0x7841, 0x1302: 0x7859, 0x1303: 0x7859, 0x1304: 0x7871, 0x1305: 0x7871, + 0x1306: 0x7889, 0x1307: 0x7889, 0x1308: 0x78a1, 0x1309: 0x78a1, 0x130a: 0x78b9, 0x130b: 0x78b9, + 0x130c: 0x78d1, 0x130d: 0x78d1, 0x130e: 0x78e9, 0x130f: 0x78e9, 0x1310: 0x78e9, 0x1311: 0x78e9, + 0x1312: 0x7901, 0x1313: 0x7901, 0x1314: 0x7901, 0x1315: 0x7901, 0x1316: 0x7919, 0x1317: 0x7919, + 0x1318: 0x7919, 0x1319: 0x7919, 0x131a: 0x7931, 0x131b: 0x7931, 0x131c: 0x7931, 0x131d: 0x7931, + 0x131e: 0x7949, 0x131f: 0x7949, 0x1320: 0x7961, 0x1321: 0x7961, 0x1322: 0x7961, 0x1323: 0x7961, + 0x1324: 0x7979, 0x1325: 0x7979, 0x1326: 0x7991, 0x1327: 0x7991, 0x1328: 0x7991, 0x1329: 0x7991, + 0x132a: 0x79a9, 0x132b: 0x79a9, 0x132c: 0x79a9, 0x132d: 0x79a9, 0x132e: 0x79c1, 0x132f: 0x79c1, + 0x1330: 0x79d9, 0x1331: 0x79d9, 0x1332: 0x0818, 0x1333: 0x0818, 0x1334: 0x0818, 0x1335: 0x0818, + 0x1336: 0x0818, 0x1337: 0x0818, 0x1338: 0x0818, 0x1339: 0x0818, 0x133a: 0x0818, 0x133b: 0x0818, + 0x133c: 0x0818, 0x133d: 0x0818, 0x133e: 0x0818, 0x133f: 0x0818, + // Block 0x4d, offset 0x1340 + 0x1340: 0x0818, 0x1341: 0x0818, 0x1342: 0x0040, 0x1343: 0x0040, 0x1344: 0x0040, 0x1345: 0x0040, + 0x1346: 0x0040, 0x1347: 0x0040, 0x1348: 0x0040, 0x1349: 0x0040, 0x134a: 0x0040, 0x134b: 0x0040, + 0x134c: 0x0040, 0x134d: 0x0040, 0x134e: 0x0040, 0x134f: 0x0040, 0x1350: 0x0040, 0x1351: 0x0040, + 0x1352: 0x0040, 0x1353: 0x79f1, 0x1354: 0x79f1, 0x1355: 0x79f1, 0x1356: 0x79f1, 0x1357: 0x7a09, + 0x1358: 0x7a09, 0x1359: 0x7a21, 0x135a: 0x7a21, 0x135b: 0x7a39, 0x135c: 0x7a39, 0x135d: 0x0479, + 0x135e: 0x7a51, 0x135f: 0x7a51, 0x1360: 0x7a69, 0x1361: 0x7a69, 0x1362: 0x7a81, 0x1363: 0x7a81, + 0x1364: 0x7a99, 0x1365: 0x7a99, 0x1366: 0x7a99, 0x1367: 0x7a99, 0x1368: 0x7ab1, 0x1369: 0x7ab1, + 0x136a: 0x7ac9, 0x136b: 0x7ac9, 0x136c: 0x7af1, 0x136d: 0x7af1, 0x136e: 0x7b19, 0x136f: 0x7b19, + 0x1370: 0x7b41, 0x1371: 0x7b41, 0x1372: 0x7b69, 0x1373: 0x7b69, 0x1374: 0x7b91, 0x1375: 0x7b91, + 0x1376: 0x7bb9, 0x1377: 0x7bb9, 0x1378: 0x7bb9, 0x1379: 0x7be1, 0x137a: 0x7be1, 0x137b: 0x7be1, + 0x137c: 0x7c09, 0x137d: 0x7c09, 0x137e: 0x7c09, 0x137f: 0x7c09, + // Block 0x4e, offset 0x1380 + 0x1380: 0x85f9, 0x1381: 0x8621, 0x1382: 0x8649, 0x1383: 0x8671, 0x1384: 0x8699, 0x1385: 0x86c1, + 0x1386: 0x86e9, 0x1387: 0x8711, 0x1388: 0x8739, 0x1389: 0x8761, 0x138a: 0x8789, 0x138b: 0x87b1, + 0x138c: 0x87d9, 0x138d: 0x8801, 0x138e: 0x8829, 0x138f: 0x8851, 0x1390: 0x8879, 0x1391: 0x88a1, + 0x1392: 0x88c9, 0x1393: 0x88f1, 0x1394: 0x8919, 0x1395: 0x8941, 0x1396: 0x8969, 0x1397: 0x8991, + 0x1398: 0x89b9, 0x1399: 0x89e1, 0x139a: 0x8a09, 0x139b: 0x8a31, 0x139c: 0x8a59, 0x139d: 0x8a81, + 0x139e: 0x8aaa, 0x139f: 0x8ada, 0x13a0: 0x8b0a, 0x13a1: 0x8b3a, 0x13a2: 0x8b6a, 0x13a3: 0x8b9a, + 0x13a4: 0x8bc9, 0x13a5: 0x8bf1, 0x13a6: 0x7c71, 0x13a7: 0x8c19, 0x13a8: 0x7be1, 0x13a9: 0x7c99, + 0x13aa: 0x8c41, 0x13ab: 0x8c69, 0x13ac: 0x7d39, 0x13ad: 0x8c91, 0x13ae: 0x7d61, 0x13af: 0x7d89, + 0x13b0: 0x8cb9, 0x13b1: 0x8ce1, 0x13b2: 0x7e29, 0x13b3: 0x8d09, 0x13b4: 0x7e51, 0x13b5: 0x7e79, + 0x13b6: 0x8d31, 0x13b7: 0x8d59, 0x13b8: 0x7ec9, 0x13b9: 0x8d81, 0x13ba: 0x7ef1, 0x13bb: 0x7f19, + 0x13bc: 0x83a1, 0x13bd: 0x83c9, 0x13be: 0x8441, 0x13bf: 0x8469, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x8491, 0x13c1: 0x8531, 0x13c2: 0x8559, 0x13c3: 0x8581, 0x13c4: 0x85a9, 0x13c5: 0x8649, + 0x13c6: 0x8671, 0x13c7: 0x8699, 0x13c8: 0x8da9, 0x13c9: 0x8739, 0x13ca: 0x8dd1, 0x13cb: 0x8df9, + 0x13cc: 0x8829, 0x13cd: 0x8e21, 0x13ce: 0x8851, 0x13cf: 0x8879, 0x13d0: 0x8a81, 0x13d1: 0x8e49, + 0x13d2: 0x8e71, 0x13d3: 0x89b9, 0x13d4: 0x8e99, 0x13d5: 0x89e1, 0x13d6: 0x8a09, 0x13d7: 0x7c21, + 0x13d8: 0x7c49, 0x13d9: 0x8ec1, 0x13da: 0x7c71, 0x13db: 0x8ee9, 0x13dc: 0x7cc1, 0x13dd: 0x7ce9, + 0x13de: 0x7d11, 0x13df: 0x7d39, 0x13e0: 0x8f11, 0x13e1: 0x7db1, 0x13e2: 0x7dd9, 0x13e3: 0x7e01, + 0x13e4: 0x7e29, 0x13e5: 0x8f39, 0x13e6: 0x7ec9, 0x13e7: 0x7f41, 0x13e8: 0x7f69, 0x13e9: 0x7f91, + 0x13ea: 0x7fb9, 0x13eb: 0x7fe1, 0x13ec: 0x8031, 0x13ed: 0x8059, 0x13ee: 0x8081, 0x13ef: 0x80a9, + 0x13f0: 0x80d1, 0x13f1: 0x80f9, 0x13f2: 0x8f61, 0x13f3: 0x8121, 0x13f4: 0x8149, 0x13f5: 0x8171, + 0x13f6: 0x8199, 0x13f7: 0x81c1, 0x13f8: 0x81e9, 0x13f9: 0x8239, 0x13fa: 0x8261, 0x13fb: 0x8289, + 0x13fc: 0x82b1, 0x13fd: 0x82d9, 0x13fe: 0x8301, 0x13ff: 0x8329, + // Block 0x50, offset 0x1400 + 0x1400: 0x8351, 0x1401: 0x8379, 0x1402: 0x83f1, 0x1403: 0x8419, 0x1404: 0x84b9, 0x1405: 0x84e1, + 0x1406: 0x8509, 0x1407: 0x8531, 0x1408: 0x8559, 0x1409: 0x85d1, 0x140a: 0x85f9, 0x140b: 0x8621, + 0x140c: 0x8649, 0x140d: 0x8f89, 0x140e: 0x86c1, 0x140f: 0x86e9, 0x1410: 0x8711, 0x1411: 0x8739, + 0x1412: 0x87b1, 0x1413: 0x87d9, 0x1414: 0x8801, 0x1415: 0x8829, 0x1416: 0x8fb1, 0x1417: 0x88a1, + 0x1418: 0x88c9, 0x1419: 0x8fd9, 0x141a: 0x8941, 0x141b: 0x8969, 0x141c: 0x8991, 0x141d: 0x89b9, + 0x141e: 0x9001, 0x141f: 0x7c71, 0x1420: 0x8ee9, 0x1421: 0x7d39, 0x1422: 0x8f11, 0x1423: 0x7e29, + 0x1424: 0x8f39, 0x1425: 0x7ec9, 0x1426: 0x9029, 0x1427: 0x80d1, 0x1428: 0x9051, 0x1429: 0x9079, + 0x142a: 0x90a1, 0x142b: 0x8531, 0x142c: 0x8559, 0x142d: 0x8649, 0x142e: 0x8829, 0x142f: 0x8fb1, + 0x1430: 0x89b9, 0x1431: 0x9001, 0x1432: 0x90c9, 0x1433: 0x9101, 0x1434: 0x9139, 0x1435: 0x9171, + 0x1436: 0x9199, 0x1437: 0x91c1, 0x1438: 0x91e9, 0x1439: 0x9211, 0x143a: 0x9239, 0x143b: 0x9261, + 0x143c: 0x9289, 0x143d: 0x92b1, 0x143e: 0x92d9, 0x143f: 0x9301, + // Block 0x51, offset 0x1440 + 0x1440: 0x9329, 0x1441: 0x9351, 0x1442: 0x9379, 0x1443: 0x93a1, 0x1444: 0x93c9, 0x1445: 0x93f1, + 0x1446: 0x9419, 0x1447: 0x9441, 0x1448: 0x9469, 0x1449: 0x9491, 0x144a: 0x94b9, 0x144b: 0x94e1, + 0x144c: 0x9079, 0x144d: 0x9509, 0x144e: 0x9531, 0x144f: 0x9559, 0x1450: 0x9581, 0x1451: 0x9171, + 0x1452: 0x9199, 0x1453: 0x91c1, 0x1454: 0x91e9, 0x1455: 0x9211, 0x1456: 0x9239, 0x1457: 0x9261, + 0x1458: 0x9289, 0x1459: 0x92b1, 0x145a: 0x92d9, 0x145b: 0x9301, 0x145c: 0x9329, 0x145d: 0x9351, + 0x145e: 0x9379, 0x145f: 0x93a1, 0x1460: 0x93c9, 0x1461: 0x93f1, 0x1462: 0x9419, 0x1463: 0x9441, + 0x1464: 0x9469, 0x1465: 0x9491, 0x1466: 0x94b9, 0x1467: 0x94e1, 0x1468: 0x9079, 0x1469: 0x9509, + 0x146a: 0x9531, 0x146b: 0x9559, 0x146c: 0x9581, 0x146d: 0x9491, 0x146e: 0x94b9, 0x146f: 0x94e1, + 0x1470: 0x9079, 0x1471: 0x9051, 0x1472: 0x90a1, 0x1473: 0x8211, 0x1474: 0x8059, 0x1475: 0x8081, + 0x1476: 0x80a9, 0x1477: 0x9491, 0x1478: 0x94b9, 0x1479: 0x94e1, 0x147a: 0x8211, 0x147b: 0x8239, + 0x147c: 0x95a9, 0x147d: 0x95a9, 0x147e: 0x0018, 0x147f: 0x0018, + // Block 0x52, offset 0x1480 + 0x1480: 0x0040, 0x1481: 0x0040, 0x1482: 0x0040, 0x1483: 0x0040, 0x1484: 0x0040, 0x1485: 0x0040, + 0x1486: 0x0040, 0x1487: 0x0040, 0x1488: 0x0040, 0x1489: 0x0040, 0x148a: 0x0040, 0x148b: 0x0040, + 0x148c: 0x0040, 0x148d: 0x0040, 0x148e: 0x0040, 0x148f: 0x0040, 0x1490: 0x95d1, 0x1491: 0x9609, + 0x1492: 0x9609, 0x1493: 0x9641, 0x1494: 0x9679, 0x1495: 0x96b1, 0x1496: 0x96e9, 0x1497: 0x9721, + 0x1498: 0x9759, 0x1499: 0x9759, 0x149a: 0x9791, 0x149b: 0x97c9, 0x149c: 0x9801, 0x149d: 0x9839, + 0x149e: 0x9871, 0x149f: 0x98a9, 0x14a0: 0x98a9, 0x14a1: 0x98e1, 0x14a2: 0x9919, 0x14a3: 0x9919, + 0x14a4: 0x9951, 0x14a5: 0x9951, 0x14a6: 0x9989, 0x14a7: 0x99c1, 0x14a8: 0x99c1, 0x14a9: 0x99f9, + 0x14aa: 0x9a31, 0x14ab: 0x9a31, 0x14ac: 0x9a69, 0x14ad: 0x9a69, 0x14ae: 0x9aa1, 0x14af: 0x9ad9, + 0x14b0: 0x9ad9, 0x14b1: 0x9b11, 0x14b2: 0x9b11, 0x14b3: 0x9b49, 0x14b4: 0x9b81, 0x14b5: 0x9bb9, + 0x14b6: 0x9bf1, 0x14b7: 0x9bf1, 0x14b8: 0x9c29, 0x14b9: 0x9c61, 0x14ba: 0x9c99, 0x14bb: 0x9cd1, + 0x14bc: 0x9d09, 0x14bd: 0x9d09, 0x14be: 0x9d41, 0x14bf: 0x9d79, + // Block 0x53, offset 0x14c0 + 0x14c0: 0xa949, 0x14c1: 0xa981, 0x14c2: 0xa9b9, 0x14c3: 0xa8a1, 0x14c4: 0x9bb9, 0x14c5: 0x9989, + 0x14c6: 0xa9f1, 0x14c7: 0xaa29, 0x14c8: 0x0040, 0x14c9: 0x0040, 0x14ca: 0x0040, 0x14cb: 0x0040, + 0x14cc: 0x0040, 0x14cd: 0x0040, 0x14ce: 0x0040, 0x14cf: 0x0040, 0x14d0: 0x0040, 0x14d1: 0x0040, + 0x14d2: 0x0040, 0x14d3: 0x0040, 0x14d4: 0x0040, 0x14d5: 0x0040, 0x14d6: 0x0040, 0x14d7: 0x0040, + 0x14d8: 0x0040, 0x14d9: 0x0040, 0x14da: 0x0040, 0x14db: 0x0040, 0x14dc: 0x0040, 0x14dd: 0x0040, + 0x14de: 0x0040, 0x14df: 0x0040, 0x14e0: 0x0040, 0x14e1: 0x0040, 0x14e2: 0x0040, 0x14e3: 0x0040, + 0x14e4: 0x0040, 0x14e5: 0x0040, 0x14e6: 0x0040, 0x14e7: 0x0040, 0x14e8: 0x0040, 0x14e9: 0x0040, + 0x14ea: 0x0040, 0x14eb: 0x0040, 0x14ec: 0x0040, 0x14ed: 0x0040, 0x14ee: 0x0040, 0x14ef: 0x0040, + 0x14f0: 0xaa61, 0x14f1: 0xaa99, 0x14f2: 0xaad1, 0x14f3: 0xab19, 0x14f4: 0xab61, 0x14f5: 0xaba9, + 0x14f6: 0xabf1, 0x14f7: 0xac39, 0x14f8: 0xac81, 0x14f9: 0xacc9, 0x14fa: 0xad02, 0x14fb: 0xae12, + 0x14fc: 0xae91, 0x14fd: 0x0018, 0x14fe: 0x0040, 0x14ff: 0x0040, + // Block 0x54, offset 0x1500 + 0x1500: 0x33c0, 0x1501: 0x33c0, 0x1502: 0x33c0, 0x1503: 0x33c0, 0x1504: 0x33c0, 0x1505: 0x33c0, + 0x1506: 0x33c0, 0x1507: 0x33c0, 0x1508: 0x33c0, 0x1509: 0x33c0, 0x150a: 0x33c0, 0x150b: 0x33c0, + 0x150c: 0x33c0, 0x150d: 0x33c0, 0x150e: 0x33c0, 0x150f: 0x33c0, 0x1510: 0xaeda, 0x1511: 0x7d55, + 0x1512: 0x0040, 0x1513: 0xaeea, 0x1514: 0x03c2, 0x1515: 0xaefa, 0x1516: 0xaf0a, 0x1517: 0x7d75, + 0x1518: 0x7d95, 0x1519: 0x0040, 0x151a: 0x0040, 0x151b: 0x0040, 0x151c: 0x0040, 0x151d: 0x0040, + 0x151e: 0x0040, 0x151f: 0x0040, 0x1520: 0x3308, 0x1521: 0x3308, 0x1522: 0x3308, 0x1523: 0x3308, + 0x1524: 0x3308, 0x1525: 0x3308, 0x1526: 0x3308, 0x1527: 0x3308, 0x1528: 0x3308, 0x1529: 0x3308, + 0x152a: 0x3308, 0x152b: 0x3308, 0x152c: 0x3308, 0x152d: 0x3308, 0x152e: 0x3308, 0x152f: 0x3308, + 0x1530: 0x0040, 0x1531: 0x7db5, 0x1532: 0x7dd5, 0x1533: 0xaf1a, 0x1534: 0xaf1a, 0x1535: 0x1fd2, + 0x1536: 0x1fe2, 0x1537: 0xaf2a, 0x1538: 0xaf3a, 0x1539: 0x7df5, 0x153a: 0x7e15, 0x153b: 0x7e35, + 0x153c: 0x7df5, 0x153d: 0x7e55, 0x153e: 0x7e75, 0x153f: 0x7e55, + // Block 0x55, offset 0x1540 + 0x1540: 0x7e95, 0x1541: 0x7eb5, 0x1542: 0x7ed5, 0x1543: 0x7eb5, 0x1544: 0x7ef5, 0x1545: 0x0018, + 0x1546: 0x0018, 0x1547: 0xaf4a, 0x1548: 0xaf5a, 0x1549: 0x7f16, 0x154a: 0x7f36, 0x154b: 0x7f56, + 0x154c: 0x7f76, 0x154d: 0xaf1a, 0x154e: 0xaf1a, 0x154f: 0xaf1a, 0x1550: 0xaeda, 0x1551: 0x7f95, + 0x1552: 0x0040, 0x1553: 0x0040, 0x1554: 0x03c2, 0x1555: 0xaeea, 0x1556: 0xaf0a, 0x1557: 0xaefa, + 0x1558: 0x7fb5, 0x1559: 0x1fd2, 0x155a: 0x1fe2, 0x155b: 0xaf2a, 0x155c: 0xaf3a, 0x155d: 0x7e95, + 0x155e: 0x7ef5, 0x155f: 0xaf6a, 0x1560: 0xaf7a, 0x1561: 0xaf8a, 0x1562: 0x1fb2, 0x1563: 0xaf99, + 0x1564: 0xafaa, 0x1565: 0xafba, 0x1566: 0x1fc2, 0x1567: 0x0040, 0x1568: 0xafca, 0x1569: 0xafda, + 0x156a: 0xafea, 0x156b: 0xaffa, 0x156c: 0x0040, 0x156d: 0x0040, 0x156e: 0x0040, 0x156f: 0x0040, + 0x1570: 0x7fd6, 0x1571: 0xb009, 0x1572: 0x7ff6, 0x1573: 0x0808, 0x1574: 0x8016, 0x1575: 0x0040, + 0x1576: 0x8036, 0x1577: 0xb031, 0x1578: 0x8056, 0x1579: 0xb059, 0x157a: 0x8076, 0x157b: 0xb081, + 0x157c: 0x8096, 0x157d: 0xb0a9, 0x157e: 0x80b6, 0x157f: 0xb0d1, + // Block 0x56, offset 0x1580 + 0x1580: 0xb0f9, 0x1581: 0xb111, 0x1582: 0xb111, 0x1583: 0xb129, 0x1584: 0xb129, 0x1585: 0xb141, + 0x1586: 0xb141, 0x1587: 0xb159, 0x1588: 0xb159, 0x1589: 0xb171, 0x158a: 0xb171, 0x158b: 0xb171, + 0x158c: 0xb171, 0x158d: 0xb189, 0x158e: 0xb189, 0x158f: 0xb1a1, 0x1590: 0xb1a1, 0x1591: 0xb1a1, + 0x1592: 0xb1a1, 0x1593: 0xb1b9, 0x1594: 0xb1b9, 0x1595: 0xb1d1, 0x1596: 0xb1d1, 0x1597: 0xb1d1, + 0x1598: 0xb1d1, 0x1599: 0xb1e9, 0x159a: 0xb1e9, 0x159b: 0xb1e9, 0x159c: 0xb1e9, 0x159d: 0xb201, + 0x159e: 0xb201, 0x159f: 0xb201, 0x15a0: 0xb201, 0x15a1: 0xb219, 0x15a2: 0xb219, 0x15a3: 0xb219, + 0x15a4: 0xb219, 0x15a5: 0xb231, 0x15a6: 0xb231, 0x15a7: 0xb231, 0x15a8: 0xb231, 0x15a9: 0xb249, + 0x15aa: 0xb249, 0x15ab: 0xb261, 0x15ac: 0xb261, 0x15ad: 0xb279, 0x15ae: 0xb279, 0x15af: 0xb291, + 0x15b0: 0xb291, 0x15b1: 0xb2a9, 0x15b2: 0xb2a9, 0x15b3: 0xb2a9, 0x15b4: 0xb2a9, 0x15b5: 0xb2c1, + 0x15b6: 0xb2c1, 0x15b7: 0xb2c1, 0x15b8: 0xb2c1, 0x15b9: 0xb2d9, 0x15ba: 0xb2d9, 0x15bb: 0xb2d9, + 0x15bc: 0xb2d9, 0x15bd: 0xb2f1, 0x15be: 0xb2f1, 0x15bf: 0xb2f1, + // Block 0x57, offset 0x15c0 + 0x15c0: 0xb2f1, 0x15c1: 0xb309, 0x15c2: 0xb309, 0x15c3: 0xb309, 0x15c4: 0xb309, 0x15c5: 0xb321, + 0x15c6: 0xb321, 0x15c7: 0xb321, 0x15c8: 0xb321, 0x15c9: 0xb339, 0x15ca: 0xb339, 0x15cb: 0xb339, + 0x15cc: 0xb339, 0x15cd: 0xb351, 0x15ce: 0xb351, 0x15cf: 0xb351, 0x15d0: 0xb351, 0x15d1: 0xb369, + 0x15d2: 0xb369, 0x15d3: 0xb369, 0x15d4: 0xb369, 0x15d5: 0xb381, 0x15d6: 0xb381, 0x15d7: 0xb381, + 0x15d8: 0xb381, 0x15d9: 0xb399, 0x15da: 0xb399, 0x15db: 0xb399, 0x15dc: 0xb399, 0x15dd: 0xb3b1, + 0x15de: 0xb3b1, 0x15df: 0xb3b1, 0x15e0: 0xb3b1, 0x15e1: 0xb3c9, 0x15e2: 0xb3c9, 0x15e3: 0xb3c9, + 0x15e4: 0xb3c9, 0x15e5: 0xb3e1, 0x15e6: 0xb3e1, 0x15e7: 0xb3e1, 0x15e8: 0xb3e1, 0x15e9: 0xb3f9, + 0x15ea: 0xb3f9, 0x15eb: 0xb3f9, 0x15ec: 0xb3f9, 0x15ed: 0xb411, 0x15ee: 0xb411, 0x15ef: 0x7ab1, + 0x15f0: 0x7ab1, 0x15f1: 0xb429, 0x15f2: 0xb429, 0x15f3: 0xb429, 0x15f4: 0xb429, 0x15f5: 0xb441, + 0x15f6: 0xb441, 0x15f7: 0xb469, 0x15f8: 0xb469, 0x15f9: 0xb491, 0x15fa: 0xb491, 0x15fb: 0xb4b9, + 0x15fc: 0xb4b9, 0x15fd: 0x0040, 0x15fe: 0x0040, 0x15ff: 0x03c0, + // Block 0x58, offset 0x1600 + 0x1600: 0x0040, 0x1601: 0xaefa, 0x1602: 0xb4e2, 0x1603: 0xaf6a, 0x1604: 0xafda, 0x1605: 0xafea, + 0x1606: 0xaf7a, 0x1607: 0xb4f2, 0x1608: 0x1fd2, 0x1609: 0x1fe2, 0x160a: 0xaf8a, 0x160b: 0x1fb2, + 0x160c: 0xaeda, 0x160d: 0xaf99, 0x160e: 0x29d1, 0x160f: 0xb502, 0x1610: 0x1f41, 0x1611: 0x00c9, + 0x1612: 0x0069, 0x1613: 0x0079, 0x1614: 0x1f51, 0x1615: 0x1f61, 0x1616: 0x1f71, 0x1617: 0x1f81, + 0x1618: 0x1f91, 0x1619: 0x1fa1, 0x161a: 0xaeea, 0x161b: 0x03c2, 0x161c: 0xafaa, 0x161d: 0x1fc2, + 0x161e: 0xafba, 0x161f: 0xaf0a, 0x1620: 0xaffa, 0x1621: 0x0039, 0x1622: 0x0ee9, 0x1623: 0x1159, + 0x1624: 0x0ef9, 0x1625: 0x0f09, 0x1626: 0x1199, 0x1627: 0x0f31, 0x1628: 0x0249, 0x1629: 0x0f41, + 0x162a: 0x0259, 0x162b: 0x0f51, 0x162c: 0x0359, 0x162d: 0x0f61, 0x162e: 0x0f71, 0x162f: 0x00d9, + 0x1630: 0x0f99, 0x1631: 0x2039, 0x1632: 0x0269, 0x1633: 0x01d9, 0x1634: 0x0fa9, 0x1635: 0x0fb9, + 0x1636: 0x1089, 0x1637: 0x0279, 0x1638: 0x0369, 0x1639: 0x0289, 0x163a: 0x13d1, 0x163b: 0xaf4a, + 0x163c: 0xafca, 0x163d: 0xaf5a, 0x163e: 0xb512, 0x163f: 0xaf1a, + // Block 0x59, offset 0x1640 + 0x1640: 0x1caa, 0x1641: 0x0039, 0x1642: 0x0ee9, 0x1643: 0x1159, 0x1644: 0x0ef9, 0x1645: 0x0f09, + 0x1646: 0x1199, 0x1647: 0x0f31, 0x1648: 0x0249, 0x1649: 0x0f41, 0x164a: 0x0259, 0x164b: 0x0f51, + 0x164c: 0x0359, 0x164d: 0x0f61, 0x164e: 0x0f71, 0x164f: 0x00d9, 0x1650: 0x0f99, 0x1651: 0x2039, + 0x1652: 0x0269, 0x1653: 0x01d9, 0x1654: 0x0fa9, 0x1655: 0x0fb9, 0x1656: 0x1089, 0x1657: 0x0279, + 0x1658: 0x0369, 0x1659: 0x0289, 0x165a: 0x13d1, 0x165b: 0xaf2a, 0x165c: 0xb522, 0x165d: 0xaf3a, + 0x165e: 0xb532, 0x165f: 0x80d5, 0x1660: 0x80f5, 0x1661: 0x29d1, 0x1662: 0x8115, 0x1663: 0x8115, + 0x1664: 0x8135, 0x1665: 0x8155, 0x1666: 0x8175, 0x1667: 0x8195, 0x1668: 0x81b5, 0x1669: 0x81d5, + 0x166a: 0x81f5, 0x166b: 0x8215, 0x166c: 0x8235, 0x166d: 0x8255, 0x166e: 0x8275, 0x166f: 0x8295, + 0x1670: 0x82b5, 0x1671: 0x82d5, 0x1672: 0x82f5, 0x1673: 0x8315, 0x1674: 0x8335, 0x1675: 0x8355, + 0x1676: 0x8375, 0x1677: 0x8395, 0x1678: 0x83b5, 0x1679: 0x83d5, 0x167a: 0x83f5, 0x167b: 0x8415, + 0x167c: 0x81b5, 0x167d: 0x8435, 0x167e: 0x8455, 0x167f: 0x8215, + // Block 0x5a, offset 0x1680 + 0x1680: 0x8475, 0x1681: 0x8495, 0x1682: 0x84b5, 0x1683: 0x84d5, 0x1684: 0x84f5, 0x1685: 0x8515, + 0x1686: 0x8535, 0x1687: 0x8555, 0x1688: 0x84d5, 0x1689: 0x8575, 0x168a: 0x84d5, 0x168b: 0x8595, + 0x168c: 0x8595, 0x168d: 0x85b5, 0x168e: 0x85b5, 0x168f: 0x85d5, 0x1690: 0x8515, 0x1691: 0x85f5, + 0x1692: 0x8615, 0x1693: 0x85f5, 0x1694: 0x8635, 0x1695: 0x8615, 0x1696: 0x8655, 0x1697: 0x8655, + 0x1698: 0x8675, 0x1699: 0x8675, 0x169a: 0x8695, 0x169b: 0x8695, 0x169c: 0x8615, 0x169d: 0x8115, + 0x169e: 0x86b5, 0x169f: 0x86d5, 0x16a0: 0x0040, 0x16a1: 0x86f5, 0x16a2: 0x8715, 0x16a3: 0x8735, + 0x16a4: 0x8755, 0x16a5: 0x8735, 0x16a6: 0x8775, 0x16a7: 0x8795, 0x16a8: 0x87b5, 0x16a9: 0x87b5, + 0x16aa: 0x87d5, 0x16ab: 0x87d5, 0x16ac: 0x87f5, 0x16ad: 0x87f5, 0x16ae: 0x87d5, 0x16af: 0x87d5, + 0x16b0: 0x8815, 0x16b1: 0x8835, 0x16b2: 0x8855, 0x16b3: 0x8875, 0x16b4: 0x8895, 0x16b5: 0x88b5, + 0x16b6: 0x88b5, 0x16b7: 0x88b5, 0x16b8: 0x88d5, 0x16b9: 0x88d5, 0x16ba: 0x88d5, 0x16bb: 0x88d5, + 0x16bc: 0x87b5, 0x16bd: 0x87b5, 0x16be: 0x87b5, 0x16bf: 0x0040, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x0040, 0x16c1: 0x0040, 0x16c2: 0x8715, 0x16c3: 0x86f5, 0x16c4: 0x88f5, 0x16c5: 0x86f5, + 0x16c6: 0x8715, 0x16c7: 0x86f5, 0x16c8: 0x0040, 0x16c9: 0x0040, 0x16ca: 0x8915, 0x16cb: 0x8715, + 0x16cc: 0x8935, 0x16cd: 0x88f5, 0x16ce: 0x8935, 0x16cf: 0x8715, 0x16d0: 0x0040, 0x16d1: 0x0040, + 0x16d2: 0x8955, 0x16d3: 0x8975, 0x16d4: 0x8875, 0x16d5: 0x8935, 0x16d6: 0x88f5, 0x16d7: 0x8935, + 0x16d8: 0x0040, 0x16d9: 0x0040, 0x16da: 0x8995, 0x16db: 0x89b5, 0x16dc: 0x8995, 0x16dd: 0x0040, + 0x16de: 0x0040, 0x16df: 0x0040, 0x16e0: 0xb541, 0x16e1: 0xb559, 0x16e2: 0xb571, 0x16e3: 0x89d6, + 0x16e4: 0xb589, 0x16e5: 0xb5a1, 0x16e6: 0x89f5, 0x16e7: 0x0040, 0x16e8: 0x8a15, 0x16e9: 0x8a35, + 0x16ea: 0x8a55, 0x16eb: 0x8a35, 0x16ec: 0x8a75, 0x16ed: 0x8a95, 0x16ee: 0x8ab5, 0x16ef: 0x0040, + 0x16f0: 0x0040, 0x16f1: 0x0040, 0x16f2: 0x0040, 0x16f3: 0x0040, 0x16f4: 0x0040, 0x16f5: 0x0040, + 0x16f6: 0x0040, 0x16f7: 0x0040, 0x16f8: 0x0040, 0x16f9: 0x0340, 0x16fa: 0x0340, 0x16fb: 0x0340, + 0x16fc: 0x0040, 0x16fd: 0x0040, 0x16fe: 0x0040, 0x16ff: 0x0040, + // Block 0x5c, offset 0x1700 + 0x1700: 0x0a08, 0x1701: 0x0a08, 0x1702: 0x0a08, 0x1703: 0x0a08, 0x1704: 0x0a08, 0x1705: 0x0c08, + 0x1706: 0x0808, 0x1707: 0x0c08, 0x1708: 0x0818, 0x1709: 0x0c08, 0x170a: 0x0c08, 0x170b: 0x0808, + 0x170c: 0x0808, 0x170d: 0x0908, 0x170e: 0x0c08, 0x170f: 0x0c08, 0x1710: 0x0c08, 0x1711: 0x0c08, + 0x1712: 0x0c08, 0x1713: 0x0a08, 0x1714: 0x0a08, 0x1715: 0x0a08, 0x1716: 0x0a08, 0x1717: 0x0908, + 0x1718: 0x0a08, 0x1719: 0x0a08, 0x171a: 0x0a08, 0x171b: 0x0a08, 0x171c: 0x0a08, 0x171d: 0x0c08, + 0x171e: 0x0a08, 0x171f: 0x0a08, 0x1720: 0x0a08, 0x1721: 0x0c08, 0x1722: 0x0808, 0x1723: 0x0808, + 0x1724: 0x0c08, 0x1725: 0x3308, 0x1726: 0x3308, 0x1727: 0x0040, 0x1728: 0x0040, 0x1729: 0x0040, + 0x172a: 0x0040, 0x172b: 0x0a18, 0x172c: 0x0a18, 0x172d: 0x0a18, 0x172e: 0x0a18, 0x172f: 0x0c18, + 0x1730: 0x0818, 0x1731: 0x0818, 0x1732: 0x0818, 0x1733: 0x0818, 0x1734: 0x0818, 0x1735: 0x0818, + 0x1736: 0x0818, 0x1737: 0x0040, 0x1738: 0x0040, 0x1739: 0x0040, 0x173a: 0x0040, 0x173b: 0x0040, + 0x173c: 0x0040, 0x173d: 0x0040, 0x173e: 0x0040, 0x173f: 0x0040, + // Block 0x5d, offset 0x1740 + 0x1740: 0x0a08, 0x1741: 0x0c08, 0x1742: 0x0a08, 0x1743: 0x0c08, 0x1744: 0x0c08, 0x1745: 0x0c08, + 0x1746: 0x0a08, 0x1747: 0x0a08, 0x1748: 0x0a08, 0x1749: 0x0c08, 0x174a: 0x0a08, 0x174b: 0x0a08, + 0x174c: 0x0c08, 0x174d: 0x0a08, 0x174e: 0x0c08, 0x174f: 0x0c08, 0x1750: 0x0a08, 0x1751: 0x0c08, + 0x1752: 0x0040, 0x1753: 0x0040, 0x1754: 0x0040, 0x1755: 0x0040, 0x1756: 0x0040, 0x1757: 0x0040, + 0x1758: 0x0040, 0x1759: 0x0818, 0x175a: 0x0818, 0x175b: 0x0818, 0x175c: 0x0818, 0x175d: 0x0040, + 0x175e: 0x0040, 0x175f: 0x0040, 0x1760: 0x0040, 0x1761: 0x0040, 0x1762: 0x0040, 0x1763: 0x0040, + 0x1764: 0x0040, 0x1765: 0x0040, 0x1766: 0x0040, 0x1767: 0x0040, 0x1768: 0x0040, 0x1769: 0x0c18, + 0x176a: 0x0c18, 0x176b: 0x0c18, 0x176c: 0x0c18, 0x176d: 0x0a18, 0x176e: 0x0a18, 0x176f: 0x0818, + 0x1770: 0x0040, 0x1771: 0x0040, 0x1772: 0x0040, 0x1773: 0x0040, 0x1774: 0x0040, 0x1775: 0x0040, + 0x1776: 0x0040, 0x1777: 0x0040, 0x1778: 0x0040, 0x1779: 0x0040, 0x177a: 0x0040, 0x177b: 0x0040, + 0x177c: 0x0040, 0x177d: 0x0040, 0x177e: 0x0040, 0x177f: 0x0040, + // Block 0x5e, offset 0x1780 + 0x1780: 0x3308, 0x1781: 0x3308, 0x1782: 0x3008, 0x1783: 0x3008, 0x1784: 0x0040, 0x1785: 0x0008, + 0x1786: 0x0008, 0x1787: 0x0008, 0x1788: 0x0008, 0x1789: 0x0008, 0x178a: 0x0008, 0x178b: 0x0008, + 0x178c: 0x0008, 0x178d: 0x0040, 0x178e: 0x0040, 0x178f: 0x0008, 0x1790: 0x0008, 0x1791: 0x0040, + 0x1792: 0x0040, 0x1793: 0x0008, 0x1794: 0x0008, 0x1795: 0x0008, 0x1796: 0x0008, 0x1797: 0x0008, + 0x1798: 0x0008, 0x1799: 0x0008, 0x179a: 0x0008, 0x179b: 0x0008, 0x179c: 0x0008, 0x179d: 0x0008, + 0x179e: 0x0008, 0x179f: 0x0008, 0x17a0: 0x0008, 0x17a1: 0x0008, 0x17a2: 0x0008, 0x17a3: 0x0008, + 0x17a4: 0x0008, 0x17a5: 0x0008, 0x17a6: 0x0008, 0x17a7: 0x0008, 0x17a8: 0x0008, 0x17a9: 0x0040, + 0x17aa: 0x0008, 0x17ab: 0x0008, 0x17ac: 0x0008, 0x17ad: 0x0008, 0x17ae: 0x0008, 0x17af: 0x0008, + 0x17b0: 0x0008, 0x17b1: 0x0040, 0x17b2: 0x0008, 0x17b3: 0x0008, 0x17b4: 0x0040, 0x17b5: 0x0008, + 0x17b6: 0x0008, 0x17b7: 0x0008, 0x17b8: 0x0008, 0x17b9: 0x0008, 0x17ba: 0x0040, 0x17bb: 0x0040, + 0x17bc: 0x3308, 0x17bd: 0x0008, 0x17be: 0x3008, 0x17bf: 0x3008, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x3308, 0x17c1: 0x3008, 0x17c2: 0x3008, 0x17c3: 0x3008, 0x17c4: 0x3008, 0x17c5: 0x0040, + 0x17c6: 0x0040, 0x17c7: 0x3008, 0x17c8: 0x3008, 0x17c9: 0x0040, 0x17ca: 0x0040, 0x17cb: 0x3008, + 0x17cc: 0x3008, 0x17cd: 0x3808, 0x17ce: 0x0040, 0x17cf: 0x0040, 0x17d0: 0x0008, 0x17d1: 0x0040, + 0x17d2: 0x0040, 0x17d3: 0x0040, 0x17d4: 0x0040, 0x17d5: 0x0040, 0x17d6: 0x0040, 0x17d7: 0x3008, + 0x17d8: 0x0040, 0x17d9: 0x0040, 0x17da: 0x0040, 0x17db: 0x0040, 0x17dc: 0x0040, 0x17dd: 0x0008, + 0x17de: 0x0008, 0x17df: 0x0008, 0x17e0: 0x0008, 0x17e1: 0x0008, 0x17e2: 0x3008, 0x17e3: 0x3008, + 0x17e4: 0x0040, 0x17e5: 0x0040, 0x17e6: 0x3308, 0x17e7: 0x3308, 0x17e8: 0x3308, 0x17e9: 0x3308, + 0x17ea: 0x3308, 0x17eb: 0x3308, 0x17ec: 0x3308, 0x17ed: 0x0040, 0x17ee: 0x0040, 0x17ef: 0x0040, + 0x17f0: 0x3308, 0x17f1: 0x3308, 0x17f2: 0x3308, 0x17f3: 0x3308, 0x17f4: 0x3308, 0x17f5: 0x0040, + 0x17f6: 0x0040, 0x17f7: 0x0040, 0x17f8: 0x0040, 0x17f9: 0x0040, 0x17fa: 0x0040, 0x17fb: 0x0040, + 0x17fc: 0x0040, 0x17fd: 0x0040, 0x17fe: 0x0040, 0x17ff: 0x0040, + // Block 0x60, offset 0x1800 + 0x1800: 0x0039, 0x1801: 0x0ee9, 0x1802: 0x1159, 0x1803: 0x0ef9, 0x1804: 0x0f09, 0x1805: 0x1199, + 0x1806: 0x0f31, 0x1807: 0x0249, 0x1808: 0x0f41, 0x1809: 0x0259, 0x180a: 0x0f51, 0x180b: 0x0359, + 0x180c: 0x0f61, 0x180d: 0x0f71, 0x180e: 0x00d9, 0x180f: 0x0f99, 0x1810: 0x2039, 0x1811: 0x0269, + 0x1812: 0x01d9, 0x1813: 0x0fa9, 0x1814: 0x0fb9, 0x1815: 0x1089, 0x1816: 0x0279, 0x1817: 0x0369, + 0x1818: 0x0289, 0x1819: 0x13d1, 0x181a: 0x0039, 0x181b: 0x0ee9, 0x181c: 0x1159, 0x181d: 0x0ef9, + 0x181e: 0x0f09, 0x181f: 0x1199, 0x1820: 0x0f31, 0x1821: 0x0249, 0x1822: 0x0f41, 0x1823: 0x0259, + 0x1824: 0x0f51, 0x1825: 0x0359, 0x1826: 0x0f61, 0x1827: 0x0f71, 0x1828: 0x00d9, 0x1829: 0x0f99, + 0x182a: 0x2039, 0x182b: 0x0269, 0x182c: 0x01d9, 0x182d: 0x0fa9, 0x182e: 0x0fb9, 0x182f: 0x1089, + 0x1830: 0x0279, 0x1831: 0x0369, 0x1832: 0x0289, 0x1833: 0x13d1, 0x1834: 0x0039, 0x1835: 0x0ee9, + 0x1836: 0x1159, 0x1837: 0x0ef9, 0x1838: 0x0f09, 0x1839: 0x1199, 0x183a: 0x0f31, 0x183b: 0x0249, + 0x183c: 0x0f41, 0x183d: 0x0259, 0x183e: 0x0f51, 0x183f: 0x0359, + // Block 0x61, offset 0x1840 + 0x1840: 0x0f61, 0x1841: 0x0f71, 0x1842: 0x00d9, 0x1843: 0x0f99, 0x1844: 0x2039, 0x1845: 0x0269, + 0x1846: 0x01d9, 0x1847: 0x0fa9, 0x1848: 0x0fb9, 0x1849: 0x1089, 0x184a: 0x0279, 0x184b: 0x0369, + 0x184c: 0x0289, 0x184d: 0x13d1, 0x184e: 0x0039, 0x184f: 0x0ee9, 0x1850: 0x1159, 0x1851: 0x0ef9, + 0x1852: 0x0f09, 0x1853: 0x1199, 0x1854: 0x0f31, 0x1855: 0x0040, 0x1856: 0x0f41, 0x1857: 0x0259, + 0x1858: 0x0f51, 0x1859: 0x0359, 0x185a: 0x0f61, 0x185b: 0x0f71, 0x185c: 0x00d9, 0x185d: 0x0f99, + 0x185e: 0x2039, 0x185f: 0x0269, 0x1860: 0x01d9, 0x1861: 0x0fa9, 0x1862: 0x0fb9, 0x1863: 0x1089, + 0x1864: 0x0279, 0x1865: 0x0369, 0x1866: 0x0289, 0x1867: 0x13d1, 0x1868: 0x0039, 0x1869: 0x0ee9, + 0x186a: 0x1159, 0x186b: 0x0ef9, 0x186c: 0x0f09, 0x186d: 0x1199, 0x186e: 0x0f31, 0x186f: 0x0249, + 0x1870: 0x0f41, 0x1871: 0x0259, 0x1872: 0x0f51, 0x1873: 0x0359, 0x1874: 0x0f61, 0x1875: 0x0f71, + 0x1876: 0x00d9, 0x1877: 0x0f99, 0x1878: 0x2039, 0x1879: 0x0269, 0x187a: 0x01d9, 0x187b: 0x0fa9, + 0x187c: 0x0fb9, 0x187d: 0x1089, 0x187e: 0x0279, 0x187f: 0x0369, + // Block 0x62, offset 0x1880 + 0x1880: 0x0289, 0x1881: 0x13d1, 0x1882: 0x0039, 0x1883: 0x0ee9, 0x1884: 0x1159, 0x1885: 0x0ef9, + 0x1886: 0x0f09, 0x1887: 0x1199, 0x1888: 0x0f31, 0x1889: 0x0249, 0x188a: 0x0f41, 0x188b: 0x0259, + 0x188c: 0x0f51, 0x188d: 0x0359, 0x188e: 0x0f61, 0x188f: 0x0f71, 0x1890: 0x00d9, 0x1891: 0x0f99, + 0x1892: 0x2039, 0x1893: 0x0269, 0x1894: 0x01d9, 0x1895: 0x0fa9, 0x1896: 0x0fb9, 0x1897: 0x1089, + 0x1898: 0x0279, 0x1899: 0x0369, 0x189a: 0x0289, 0x189b: 0x13d1, 0x189c: 0x0039, 0x189d: 0x0040, + 0x189e: 0x1159, 0x189f: 0x0ef9, 0x18a0: 0x0040, 0x18a1: 0x0040, 0x18a2: 0x0f31, 0x18a3: 0x0040, + 0x18a4: 0x0040, 0x18a5: 0x0259, 0x18a6: 0x0f51, 0x18a7: 0x0040, 0x18a8: 0x0040, 0x18a9: 0x0f71, + 0x18aa: 0x00d9, 0x18ab: 0x0f99, 0x18ac: 0x2039, 0x18ad: 0x0040, 0x18ae: 0x01d9, 0x18af: 0x0fa9, + 0x18b0: 0x0fb9, 0x18b1: 0x1089, 0x18b2: 0x0279, 0x18b3: 0x0369, 0x18b4: 0x0289, 0x18b5: 0x13d1, + 0x18b6: 0x0039, 0x18b7: 0x0ee9, 0x18b8: 0x1159, 0x18b9: 0x0ef9, 0x18ba: 0x0040, 0x18bb: 0x1199, + 0x18bc: 0x0040, 0x18bd: 0x0249, 0x18be: 0x0f41, 0x18bf: 0x0259, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x0f51, 0x18c1: 0x0359, 0x18c2: 0x0f61, 0x18c3: 0x0f71, 0x18c4: 0x0040, 0x18c5: 0x0f99, + 0x18c6: 0x2039, 0x18c7: 0x0269, 0x18c8: 0x01d9, 0x18c9: 0x0fa9, 0x18ca: 0x0fb9, 0x18cb: 0x1089, + 0x18cc: 0x0279, 0x18cd: 0x0369, 0x18ce: 0x0289, 0x18cf: 0x13d1, 0x18d0: 0x0039, 0x18d1: 0x0ee9, + 0x18d2: 0x1159, 0x18d3: 0x0ef9, 0x18d4: 0x0f09, 0x18d5: 0x1199, 0x18d6: 0x0f31, 0x18d7: 0x0249, + 0x18d8: 0x0f41, 0x18d9: 0x0259, 0x18da: 0x0f51, 0x18db: 0x0359, 0x18dc: 0x0f61, 0x18dd: 0x0f71, + 0x18de: 0x00d9, 0x18df: 0x0f99, 0x18e0: 0x2039, 0x18e1: 0x0269, 0x18e2: 0x01d9, 0x18e3: 0x0fa9, + 0x18e4: 0x0fb9, 0x18e5: 0x1089, 0x18e6: 0x0279, 0x18e7: 0x0369, 0x18e8: 0x0289, 0x18e9: 0x13d1, + 0x18ea: 0x0039, 0x18eb: 0x0ee9, 0x18ec: 0x1159, 0x18ed: 0x0ef9, 0x18ee: 0x0f09, 0x18ef: 0x1199, + 0x18f0: 0x0f31, 0x18f1: 0x0249, 0x18f2: 0x0f41, 0x18f3: 0x0259, 0x18f4: 0x0f51, 0x18f5: 0x0359, + 0x18f6: 0x0f61, 0x18f7: 0x0f71, 0x18f8: 0x00d9, 0x18f9: 0x0f99, 0x18fa: 0x2039, 0x18fb: 0x0269, + 0x18fc: 0x01d9, 0x18fd: 0x0fa9, 0x18fe: 0x0fb9, 0x18ff: 0x1089, + // Block 0x64, offset 0x1900 + 0x1900: 0x0279, 0x1901: 0x0369, 0x1902: 0x0289, 0x1903: 0x13d1, 0x1904: 0x0039, 0x1905: 0x0ee9, + 0x1906: 0x0040, 0x1907: 0x0ef9, 0x1908: 0x0f09, 0x1909: 0x1199, 0x190a: 0x0f31, 0x190b: 0x0040, + 0x190c: 0x0040, 0x190d: 0x0259, 0x190e: 0x0f51, 0x190f: 0x0359, 0x1910: 0x0f61, 0x1911: 0x0f71, + 0x1912: 0x00d9, 0x1913: 0x0f99, 0x1914: 0x2039, 0x1915: 0x0040, 0x1916: 0x01d9, 0x1917: 0x0fa9, + 0x1918: 0x0fb9, 0x1919: 0x1089, 0x191a: 0x0279, 0x191b: 0x0369, 0x191c: 0x0289, 0x191d: 0x0040, + 0x191e: 0x0039, 0x191f: 0x0ee9, 0x1920: 0x1159, 0x1921: 0x0ef9, 0x1922: 0x0f09, 0x1923: 0x1199, + 0x1924: 0x0f31, 0x1925: 0x0249, 0x1926: 0x0f41, 0x1927: 0x0259, 0x1928: 0x0f51, 0x1929: 0x0359, + 0x192a: 0x0f61, 0x192b: 0x0f71, 0x192c: 0x00d9, 0x192d: 0x0f99, 0x192e: 0x2039, 0x192f: 0x0269, + 0x1930: 0x01d9, 0x1931: 0x0fa9, 0x1932: 0x0fb9, 0x1933: 0x1089, 0x1934: 0x0279, 0x1935: 0x0369, + 0x1936: 0x0289, 0x1937: 0x13d1, 0x1938: 0x0039, 0x1939: 0x0ee9, 0x193a: 0x0040, 0x193b: 0x0ef9, + 0x193c: 0x0f09, 0x193d: 0x1199, 0x193e: 0x0f31, 0x193f: 0x0040, + // Block 0x65, offset 0x1940 + 0x1940: 0x0f41, 0x1941: 0x0259, 0x1942: 0x0f51, 0x1943: 0x0359, 0x1944: 0x0f61, 0x1945: 0x0040, + 0x1946: 0x00d9, 0x1947: 0x0040, 0x1948: 0x0040, 0x1949: 0x0040, 0x194a: 0x01d9, 0x194b: 0x0fa9, + 0x194c: 0x0fb9, 0x194d: 0x1089, 0x194e: 0x0279, 0x194f: 0x0369, 0x1950: 0x0289, 0x1951: 0x0040, + 0x1952: 0x0039, 0x1953: 0x0ee9, 0x1954: 0x1159, 0x1955: 0x0ef9, 0x1956: 0x0f09, 0x1957: 0x1199, + 0x1958: 0x0f31, 0x1959: 0x0249, 0x195a: 0x0f41, 0x195b: 0x0259, 0x195c: 0x0f51, 0x195d: 0x0359, + 0x195e: 0x0f61, 0x195f: 0x0f71, 0x1960: 0x00d9, 0x1961: 0x0f99, 0x1962: 0x2039, 0x1963: 0x0269, + 0x1964: 0x01d9, 0x1965: 0x0fa9, 0x1966: 0x0fb9, 0x1967: 0x1089, 0x1968: 0x0279, 0x1969: 0x0369, + 0x196a: 0x0289, 0x196b: 0x13d1, 0x196c: 0x0039, 0x196d: 0x0ee9, 0x196e: 0x1159, 0x196f: 0x0ef9, + 0x1970: 0x0f09, 0x1971: 0x1199, 0x1972: 0x0f31, 0x1973: 0x0249, 0x1974: 0x0f41, 0x1975: 0x0259, + 0x1976: 0x0f51, 0x1977: 0x0359, 0x1978: 0x0f61, 0x1979: 0x0f71, 0x197a: 0x00d9, 0x197b: 0x0f99, + 0x197c: 0x2039, 0x197d: 0x0269, 0x197e: 0x01d9, 0x197f: 0x0fa9, + // Block 0x66, offset 0x1980 + 0x1980: 0x0fb9, 0x1981: 0x1089, 0x1982: 0x0279, 0x1983: 0x0369, 0x1984: 0x0289, 0x1985: 0x13d1, + 0x1986: 0x0039, 0x1987: 0x0ee9, 0x1988: 0x1159, 0x1989: 0x0ef9, 0x198a: 0x0f09, 0x198b: 0x1199, + 0x198c: 0x0f31, 0x198d: 0x0249, 0x198e: 0x0f41, 0x198f: 0x0259, 0x1990: 0x0f51, 0x1991: 0x0359, + 0x1992: 0x0f61, 0x1993: 0x0f71, 0x1994: 0x00d9, 0x1995: 0x0f99, 0x1996: 0x2039, 0x1997: 0x0269, + 0x1998: 0x01d9, 0x1999: 0x0fa9, 0x199a: 0x0fb9, 0x199b: 0x1089, 0x199c: 0x0279, 0x199d: 0x0369, + 0x199e: 0x0289, 0x199f: 0x13d1, 0x19a0: 0x0039, 0x19a1: 0x0ee9, 0x19a2: 0x1159, 0x19a3: 0x0ef9, + 0x19a4: 0x0f09, 0x19a5: 0x1199, 0x19a6: 0x0f31, 0x19a7: 0x0249, 0x19a8: 0x0f41, 0x19a9: 0x0259, + 0x19aa: 0x0f51, 0x19ab: 0x0359, 0x19ac: 0x0f61, 0x19ad: 0x0f71, 0x19ae: 0x00d9, 0x19af: 0x0f99, + 0x19b0: 0x2039, 0x19b1: 0x0269, 0x19b2: 0x01d9, 0x19b3: 0x0fa9, 0x19b4: 0x0fb9, 0x19b5: 0x1089, + 0x19b6: 0x0279, 0x19b7: 0x0369, 0x19b8: 0x0289, 0x19b9: 0x13d1, 0x19ba: 0x0039, 0x19bb: 0x0ee9, + 0x19bc: 0x1159, 0x19bd: 0x0ef9, 0x19be: 0x0f09, 0x19bf: 0x1199, + // Block 0x67, offset 0x19c0 + 0x19c0: 0x0f31, 0x19c1: 0x0249, 0x19c2: 0x0f41, 0x19c3: 0x0259, 0x19c4: 0x0f51, 0x19c5: 0x0359, + 0x19c6: 0x0f61, 0x19c7: 0x0f71, 0x19c8: 0x00d9, 0x19c9: 0x0f99, 0x19ca: 0x2039, 0x19cb: 0x0269, + 0x19cc: 0x01d9, 0x19cd: 0x0fa9, 0x19ce: 0x0fb9, 0x19cf: 0x1089, 0x19d0: 0x0279, 0x19d1: 0x0369, + 0x19d2: 0x0289, 0x19d3: 0x13d1, 0x19d4: 0x0039, 0x19d5: 0x0ee9, 0x19d6: 0x1159, 0x19d7: 0x0ef9, + 0x19d8: 0x0f09, 0x19d9: 0x1199, 0x19da: 0x0f31, 0x19db: 0x0249, 0x19dc: 0x0f41, 0x19dd: 0x0259, + 0x19de: 0x0f51, 0x19df: 0x0359, 0x19e0: 0x0f61, 0x19e1: 0x0f71, 0x19e2: 0x00d9, 0x19e3: 0x0f99, + 0x19e4: 0x2039, 0x19e5: 0x0269, 0x19e6: 0x01d9, 0x19e7: 0x0fa9, 0x19e8: 0x0fb9, 0x19e9: 0x1089, + 0x19ea: 0x0279, 0x19eb: 0x0369, 0x19ec: 0x0289, 0x19ed: 0x13d1, 0x19ee: 0x0039, 0x19ef: 0x0ee9, + 0x19f0: 0x1159, 0x19f1: 0x0ef9, 0x19f2: 0x0f09, 0x19f3: 0x1199, 0x19f4: 0x0f31, 0x19f5: 0x0249, + 0x19f6: 0x0f41, 0x19f7: 0x0259, 0x19f8: 0x0f51, 0x19f9: 0x0359, 0x19fa: 0x0f61, 0x19fb: 0x0f71, + 0x19fc: 0x00d9, 0x19fd: 0x0f99, 0x19fe: 0x2039, 0x19ff: 0x0269, + // Block 0x68, offset 0x1a00 + 0x1a00: 0x01d9, 0x1a01: 0x0fa9, 0x1a02: 0x0fb9, 0x1a03: 0x1089, 0x1a04: 0x0279, 0x1a05: 0x0369, + 0x1a06: 0x0289, 0x1a07: 0x13d1, 0x1a08: 0x0039, 0x1a09: 0x0ee9, 0x1a0a: 0x1159, 0x1a0b: 0x0ef9, + 0x1a0c: 0x0f09, 0x1a0d: 0x1199, 0x1a0e: 0x0f31, 0x1a0f: 0x0249, 0x1a10: 0x0f41, 0x1a11: 0x0259, + 0x1a12: 0x0f51, 0x1a13: 0x0359, 0x1a14: 0x0f61, 0x1a15: 0x0f71, 0x1a16: 0x00d9, 0x1a17: 0x0f99, + 0x1a18: 0x2039, 0x1a19: 0x0269, 0x1a1a: 0x01d9, 0x1a1b: 0x0fa9, 0x1a1c: 0x0fb9, 0x1a1d: 0x1089, + 0x1a1e: 0x0279, 0x1a1f: 0x0369, 0x1a20: 0x0289, 0x1a21: 0x13d1, 0x1a22: 0x0039, 0x1a23: 0x0ee9, + 0x1a24: 0x1159, 0x1a25: 0x0ef9, 0x1a26: 0x0f09, 0x1a27: 0x1199, 0x1a28: 0x0f31, 0x1a29: 0x0249, + 0x1a2a: 0x0f41, 0x1a2b: 0x0259, 0x1a2c: 0x0f51, 0x1a2d: 0x0359, 0x1a2e: 0x0f61, 0x1a2f: 0x0f71, + 0x1a30: 0x00d9, 0x1a31: 0x0f99, 0x1a32: 0x2039, 0x1a33: 0x0269, 0x1a34: 0x01d9, 0x1a35: 0x0fa9, + 0x1a36: 0x0fb9, 0x1a37: 0x1089, 0x1a38: 0x0279, 0x1a39: 0x0369, 0x1a3a: 0x0289, 0x1a3b: 0x13d1, + 0x1a3c: 0x0039, 0x1a3d: 0x0ee9, 0x1a3e: 0x1159, 0x1a3f: 0x0ef9, + // Block 0x69, offset 0x1a40 + 0x1a40: 0x0f09, 0x1a41: 0x1199, 0x1a42: 0x0f31, 0x1a43: 0x0249, 0x1a44: 0x0f41, 0x1a45: 0x0259, + 0x1a46: 0x0f51, 0x1a47: 0x0359, 0x1a48: 0x0f61, 0x1a49: 0x0f71, 0x1a4a: 0x00d9, 0x1a4b: 0x0f99, + 0x1a4c: 0x2039, 0x1a4d: 0x0269, 0x1a4e: 0x01d9, 0x1a4f: 0x0fa9, 0x1a50: 0x0fb9, 0x1a51: 0x1089, + 0x1a52: 0x0279, 0x1a53: 0x0369, 0x1a54: 0x0289, 0x1a55: 0x13d1, 0x1a56: 0x0039, 0x1a57: 0x0ee9, + 0x1a58: 0x1159, 0x1a59: 0x0ef9, 0x1a5a: 0x0f09, 0x1a5b: 0x1199, 0x1a5c: 0x0f31, 0x1a5d: 0x0249, + 0x1a5e: 0x0f41, 0x1a5f: 0x0259, 0x1a60: 0x0f51, 0x1a61: 0x0359, 0x1a62: 0x0f61, 0x1a63: 0x0f71, + 0x1a64: 0x00d9, 0x1a65: 0x0f99, 0x1a66: 0x2039, 0x1a67: 0x0269, 0x1a68: 0x01d9, 0x1a69: 0x0fa9, + 0x1a6a: 0x0fb9, 0x1a6b: 0x1089, 0x1a6c: 0x0279, 0x1a6d: 0x0369, 0x1a6e: 0x0289, 0x1a6f: 0x13d1, + 0x1a70: 0x0039, 0x1a71: 0x0ee9, 0x1a72: 0x1159, 0x1a73: 0x0ef9, 0x1a74: 0x0f09, 0x1a75: 0x1199, + 0x1a76: 0x0f31, 0x1a77: 0x0249, 0x1a78: 0x0f41, 0x1a79: 0x0259, 0x1a7a: 0x0f51, 0x1a7b: 0x0359, + 0x1a7c: 0x0f61, 0x1a7d: 0x0f71, 0x1a7e: 0x00d9, 0x1a7f: 0x0f99, + // Block 0x6a, offset 0x1a80 + 0x1a80: 0x2039, 0x1a81: 0x0269, 0x1a82: 0x01d9, 0x1a83: 0x0fa9, 0x1a84: 0x0fb9, 0x1a85: 0x1089, + 0x1a86: 0x0279, 0x1a87: 0x0369, 0x1a88: 0x0289, 0x1a89: 0x13d1, 0x1a8a: 0x0039, 0x1a8b: 0x0ee9, + 0x1a8c: 0x1159, 0x1a8d: 0x0ef9, 0x1a8e: 0x0f09, 0x1a8f: 0x1199, 0x1a90: 0x0f31, 0x1a91: 0x0249, + 0x1a92: 0x0f41, 0x1a93: 0x0259, 0x1a94: 0x0f51, 0x1a95: 0x0359, 0x1a96: 0x0f61, 0x1a97: 0x0f71, + 0x1a98: 0x00d9, 0x1a99: 0x0f99, 0x1a9a: 0x2039, 0x1a9b: 0x0269, 0x1a9c: 0x01d9, 0x1a9d: 0x0fa9, + 0x1a9e: 0x0fb9, 0x1a9f: 0x1089, 0x1aa0: 0x0279, 0x1aa1: 0x0369, 0x1aa2: 0x0289, 0x1aa3: 0x13d1, + 0x1aa4: 0xba81, 0x1aa5: 0xba99, 0x1aa6: 0x0040, 0x1aa7: 0x0040, 0x1aa8: 0xbab1, 0x1aa9: 0x1099, + 0x1aaa: 0x10b1, 0x1aab: 0x10c9, 0x1aac: 0xbac9, 0x1aad: 0xbae1, 0x1aae: 0xbaf9, 0x1aaf: 0x1429, + 0x1ab0: 0x1a31, 0x1ab1: 0xbb11, 0x1ab2: 0xbb29, 0x1ab3: 0xbb41, 0x1ab4: 0xbb59, 0x1ab5: 0xbb71, + 0x1ab6: 0xbb89, 0x1ab7: 0x2109, 0x1ab8: 0x1111, 0x1ab9: 0x1429, 0x1aba: 0xbba1, 0x1abb: 0xbbb9, + 0x1abc: 0xbbd1, 0x1abd: 0x10e1, 0x1abe: 0x10f9, 0x1abf: 0xbbe9, + // Block 0x6b, offset 0x1ac0 + 0x1ac0: 0x2079, 0x1ac1: 0xbc01, 0x1ac2: 0xbab1, 0x1ac3: 0x1099, 0x1ac4: 0x10b1, 0x1ac5: 0x10c9, + 0x1ac6: 0xbac9, 0x1ac7: 0xbae1, 0x1ac8: 0xbaf9, 0x1ac9: 0x1429, 0x1aca: 0x1a31, 0x1acb: 0xbb11, + 0x1acc: 0xbb29, 0x1acd: 0xbb41, 0x1ace: 0xbb59, 0x1acf: 0xbb71, 0x1ad0: 0xbb89, 0x1ad1: 0x2109, + 0x1ad2: 0x1111, 0x1ad3: 0xbba1, 0x1ad4: 0xbba1, 0x1ad5: 0xbbb9, 0x1ad6: 0xbbd1, 0x1ad7: 0x10e1, + 0x1ad8: 0x10f9, 0x1ad9: 0xbbe9, 0x1ada: 0x2079, 0x1adb: 0xbc21, 0x1adc: 0xbac9, 0x1add: 0x1429, + 0x1ade: 0xbb11, 0x1adf: 0x10e1, 0x1ae0: 0x1111, 0x1ae1: 0x2109, 0x1ae2: 0xbab1, 0x1ae3: 0x1099, + 0x1ae4: 0x10b1, 0x1ae5: 0x10c9, 0x1ae6: 0xbac9, 0x1ae7: 0xbae1, 0x1ae8: 0xbaf9, 0x1ae9: 0x1429, + 0x1aea: 0x1a31, 0x1aeb: 0xbb11, 0x1aec: 0xbb29, 0x1aed: 0xbb41, 0x1aee: 0xbb59, 0x1aef: 0xbb71, + 0x1af0: 0xbb89, 0x1af1: 0x2109, 0x1af2: 0x1111, 0x1af3: 0x1429, 0x1af4: 0xbba1, 0x1af5: 0xbbb9, + 0x1af6: 0xbbd1, 0x1af7: 0x10e1, 0x1af8: 0x10f9, 0x1af9: 0xbbe9, 0x1afa: 0x2079, 0x1afb: 0xbc01, + 0x1afc: 0xbab1, 0x1afd: 0x1099, 0x1afe: 0x10b1, 0x1aff: 0x10c9, + // Block 0x6c, offset 0x1b00 + 0x1b00: 0xbac9, 0x1b01: 0xbae1, 0x1b02: 0xbaf9, 0x1b03: 0x1429, 0x1b04: 0x1a31, 0x1b05: 0xbb11, + 0x1b06: 0xbb29, 0x1b07: 0xbb41, 0x1b08: 0xbb59, 0x1b09: 0xbb71, 0x1b0a: 0xbb89, 0x1b0b: 0x2109, + 0x1b0c: 0x1111, 0x1b0d: 0xbba1, 0x1b0e: 0xbba1, 0x1b0f: 0xbbb9, 0x1b10: 0xbbd1, 0x1b11: 0x10e1, + 0x1b12: 0x10f9, 0x1b13: 0xbbe9, 0x1b14: 0x2079, 0x1b15: 0xbc21, 0x1b16: 0xbac9, 0x1b17: 0x1429, + 0x1b18: 0xbb11, 0x1b19: 0x10e1, 0x1b1a: 0x1111, 0x1b1b: 0x2109, 0x1b1c: 0xbab1, 0x1b1d: 0x1099, + 0x1b1e: 0x10b1, 0x1b1f: 0x10c9, 0x1b20: 0xbac9, 0x1b21: 0xbae1, 0x1b22: 0xbaf9, 0x1b23: 0x1429, + 0x1b24: 0x1a31, 0x1b25: 0xbb11, 0x1b26: 0xbb29, 0x1b27: 0xbb41, 0x1b28: 0xbb59, 0x1b29: 0xbb71, + 0x1b2a: 0xbb89, 0x1b2b: 0x2109, 0x1b2c: 0x1111, 0x1b2d: 0x1429, 0x1b2e: 0xbba1, 0x1b2f: 0xbbb9, + 0x1b30: 0xbbd1, 0x1b31: 0x10e1, 0x1b32: 0x10f9, 0x1b33: 0xbbe9, 0x1b34: 0x2079, 0x1b35: 0xbc01, + 0x1b36: 0xbab1, 0x1b37: 0x1099, 0x1b38: 0x10b1, 0x1b39: 0x10c9, 0x1b3a: 0xbac9, 0x1b3b: 0xbae1, + 0x1b3c: 0xbaf9, 0x1b3d: 0x1429, 0x1b3e: 0x1a31, 0x1b3f: 0xbb11, + // Block 0x6d, offset 0x1b40 + 0x1b40: 0xbb29, 0x1b41: 0xbb41, 0x1b42: 0xbb59, 0x1b43: 0xbb71, 0x1b44: 0xbb89, 0x1b45: 0x2109, + 0x1b46: 0x1111, 0x1b47: 0xbba1, 0x1b48: 0xbba1, 0x1b49: 0xbbb9, 0x1b4a: 0xbbd1, 0x1b4b: 0x10e1, + 0x1b4c: 0x10f9, 0x1b4d: 0xbbe9, 0x1b4e: 0x2079, 0x1b4f: 0xbc21, 0x1b50: 0xbac9, 0x1b51: 0x1429, + 0x1b52: 0xbb11, 0x1b53: 0x10e1, 0x1b54: 0x1111, 0x1b55: 0x2109, 0x1b56: 0xbab1, 0x1b57: 0x1099, + 0x1b58: 0x10b1, 0x1b59: 0x10c9, 0x1b5a: 0xbac9, 0x1b5b: 0xbae1, 0x1b5c: 0xbaf9, 0x1b5d: 0x1429, + 0x1b5e: 0x1a31, 0x1b5f: 0xbb11, 0x1b60: 0xbb29, 0x1b61: 0xbb41, 0x1b62: 0xbb59, 0x1b63: 0xbb71, + 0x1b64: 0xbb89, 0x1b65: 0x2109, 0x1b66: 0x1111, 0x1b67: 0x1429, 0x1b68: 0xbba1, 0x1b69: 0xbbb9, + 0x1b6a: 0xbbd1, 0x1b6b: 0x10e1, 0x1b6c: 0x10f9, 0x1b6d: 0xbbe9, 0x1b6e: 0x2079, 0x1b6f: 0xbc01, + 0x1b70: 0xbab1, 0x1b71: 0x1099, 0x1b72: 0x10b1, 0x1b73: 0x10c9, 0x1b74: 0xbac9, 0x1b75: 0xbae1, + 0x1b76: 0xbaf9, 0x1b77: 0x1429, 0x1b78: 0x1a31, 0x1b79: 0xbb11, 0x1b7a: 0xbb29, 0x1b7b: 0xbb41, + 0x1b7c: 0xbb59, 0x1b7d: 0xbb71, 0x1b7e: 0xbb89, 0x1b7f: 0x2109, + // Block 0x6e, offset 0x1b80 + 0x1b80: 0x1111, 0x1b81: 0xbba1, 0x1b82: 0xbba1, 0x1b83: 0xbbb9, 0x1b84: 0xbbd1, 0x1b85: 0x10e1, + 0x1b86: 0x10f9, 0x1b87: 0xbbe9, 0x1b88: 0x2079, 0x1b89: 0xbc21, 0x1b8a: 0xbac9, 0x1b8b: 0x1429, + 0x1b8c: 0xbb11, 0x1b8d: 0x10e1, 0x1b8e: 0x1111, 0x1b8f: 0x2109, 0x1b90: 0xbab1, 0x1b91: 0x1099, + 0x1b92: 0x10b1, 0x1b93: 0x10c9, 0x1b94: 0xbac9, 0x1b95: 0xbae1, 0x1b96: 0xbaf9, 0x1b97: 0x1429, + 0x1b98: 0x1a31, 0x1b99: 0xbb11, 0x1b9a: 0xbb29, 0x1b9b: 0xbb41, 0x1b9c: 0xbb59, 0x1b9d: 0xbb71, + 0x1b9e: 0xbb89, 0x1b9f: 0x2109, 0x1ba0: 0x1111, 0x1ba1: 0x1429, 0x1ba2: 0xbba1, 0x1ba3: 0xbbb9, + 0x1ba4: 0xbbd1, 0x1ba5: 0x10e1, 0x1ba6: 0x10f9, 0x1ba7: 0xbbe9, 0x1ba8: 0x2079, 0x1ba9: 0xbc01, + 0x1baa: 0xbab1, 0x1bab: 0x1099, 0x1bac: 0x10b1, 0x1bad: 0x10c9, 0x1bae: 0xbac9, 0x1baf: 0xbae1, + 0x1bb0: 0xbaf9, 0x1bb1: 0x1429, 0x1bb2: 0x1a31, 0x1bb3: 0xbb11, 0x1bb4: 0xbb29, 0x1bb5: 0xbb41, + 0x1bb6: 0xbb59, 0x1bb7: 0xbb71, 0x1bb8: 0xbb89, 0x1bb9: 0x2109, 0x1bba: 0x1111, 0x1bbb: 0xbba1, + 0x1bbc: 0xbba1, 0x1bbd: 0xbbb9, 0x1bbe: 0xbbd1, 0x1bbf: 0x10e1, + // Block 0x6f, offset 0x1bc0 + 0x1bc0: 0x10f9, 0x1bc1: 0xbbe9, 0x1bc2: 0x2079, 0x1bc3: 0xbc21, 0x1bc4: 0xbac9, 0x1bc5: 0x1429, + 0x1bc6: 0xbb11, 0x1bc7: 0x10e1, 0x1bc8: 0x1111, 0x1bc9: 0x2109, 0x1bca: 0xbc41, 0x1bcb: 0xbc41, + 0x1bcc: 0x0040, 0x1bcd: 0x0040, 0x1bce: 0x1f41, 0x1bcf: 0x00c9, 0x1bd0: 0x0069, 0x1bd1: 0x0079, + 0x1bd2: 0x1f51, 0x1bd3: 0x1f61, 0x1bd4: 0x1f71, 0x1bd5: 0x1f81, 0x1bd6: 0x1f91, 0x1bd7: 0x1fa1, + 0x1bd8: 0x1f41, 0x1bd9: 0x00c9, 0x1bda: 0x0069, 0x1bdb: 0x0079, 0x1bdc: 0x1f51, 0x1bdd: 0x1f61, + 0x1bde: 0x1f71, 0x1bdf: 0x1f81, 0x1be0: 0x1f91, 0x1be1: 0x1fa1, 0x1be2: 0x1f41, 0x1be3: 0x00c9, + 0x1be4: 0x0069, 0x1be5: 0x0079, 0x1be6: 0x1f51, 0x1be7: 0x1f61, 0x1be8: 0x1f71, 0x1be9: 0x1f81, + 0x1bea: 0x1f91, 0x1beb: 0x1fa1, 0x1bec: 0x1f41, 0x1bed: 0x00c9, 0x1bee: 0x0069, 0x1bef: 0x0079, + 0x1bf0: 0x1f51, 0x1bf1: 0x1f61, 0x1bf2: 0x1f71, 0x1bf3: 0x1f81, 0x1bf4: 0x1f91, 0x1bf5: 0x1fa1, + 0x1bf6: 0x1f41, 0x1bf7: 0x00c9, 0x1bf8: 0x0069, 0x1bf9: 0x0079, 0x1bfa: 0x1f51, 0x1bfb: 0x1f61, + 0x1bfc: 0x1f71, 0x1bfd: 0x1f81, 0x1bfe: 0x1f91, 0x1bff: 0x1fa1, + // Block 0x70, offset 0x1c00 + 0x1c00: 0xe115, 0x1c01: 0xe115, 0x1c02: 0xe135, 0x1c03: 0xe135, 0x1c04: 0xe115, 0x1c05: 0xe115, + 0x1c06: 0xe175, 0x1c07: 0xe175, 0x1c08: 0xe115, 0x1c09: 0xe115, 0x1c0a: 0xe135, 0x1c0b: 0xe135, + 0x1c0c: 0xe115, 0x1c0d: 0xe115, 0x1c0e: 0xe1f5, 0x1c0f: 0xe1f5, 0x1c10: 0xe115, 0x1c11: 0xe115, + 0x1c12: 0xe135, 0x1c13: 0xe135, 0x1c14: 0xe115, 0x1c15: 0xe115, 0x1c16: 0xe175, 0x1c17: 0xe175, + 0x1c18: 0xe115, 0x1c19: 0xe115, 0x1c1a: 0xe135, 0x1c1b: 0xe135, 0x1c1c: 0xe115, 0x1c1d: 0xe115, + 0x1c1e: 0x8b05, 0x1c1f: 0x8b05, 0x1c20: 0x04b5, 0x1c21: 0x04b5, 0x1c22: 0x0a08, 0x1c23: 0x0a08, + 0x1c24: 0x0a08, 0x1c25: 0x0a08, 0x1c26: 0x0a08, 0x1c27: 0x0a08, 0x1c28: 0x0a08, 0x1c29: 0x0a08, + 0x1c2a: 0x0a08, 0x1c2b: 0x0a08, 0x1c2c: 0x0a08, 0x1c2d: 0x0a08, 0x1c2e: 0x0a08, 0x1c2f: 0x0a08, + 0x1c30: 0x0a08, 0x1c31: 0x0a08, 0x1c32: 0x0a08, 0x1c33: 0x0a08, 0x1c34: 0x0a08, 0x1c35: 0x0a08, + 0x1c36: 0x0a08, 0x1c37: 0x0a08, 0x1c38: 0x0a08, 0x1c39: 0x0a08, 0x1c3a: 0x0a08, 0x1c3b: 0x0a08, + 0x1c3c: 0x0a08, 0x1c3d: 0x0a08, 0x1c3e: 0x0a08, 0x1c3f: 0x0a08, + // Block 0x71, offset 0x1c40 + 0x1c40: 0xb189, 0x1c41: 0xb1a1, 0x1c42: 0xb201, 0x1c43: 0xb249, 0x1c44: 0x0040, 0x1c45: 0xb411, + 0x1c46: 0xb291, 0x1c47: 0xb219, 0x1c48: 0xb309, 0x1c49: 0xb429, 0x1c4a: 0xb399, 0x1c4b: 0xb3b1, + 0x1c4c: 0xb3c9, 0x1c4d: 0xb3e1, 0x1c4e: 0xb2a9, 0x1c4f: 0xb339, 0x1c50: 0xb369, 0x1c51: 0xb2d9, + 0x1c52: 0xb381, 0x1c53: 0xb279, 0x1c54: 0xb2c1, 0x1c55: 0xb1d1, 0x1c56: 0xb1e9, 0x1c57: 0xb231, + 0x1c58: 0xb261, 0x1c59: 0xb2f1, 0x1c5a: 0xb321, 0x1c5b: 0xb351, 0x1c5c: 0xbc59, 0x1c5d: 0x7949, + 0x1c5e: 0xbc71, 0x1c5f: 0xbc89, 0x1c60: 0x0040, 0x1c61: 0xb1a1, 0x1c62: 0xb201, 0x1c63: 0x0040, + 0x1c64: 0xb3f9, 0x1c65: 0x0040, 0x1c66: 0x0040, 0x1c67: 0xb219, 0x1c68: 0x0040, 0x1c69: 0xb429, + 0x1c6a: 0xb399, 0x1c6b: 0xb3b1, 0x1c6c: 0xb3c9, 0x1c6d: 0xb3e1, 0x1c6e: 0xb2a9, 0x1c6f: 0xb339, + 0x1c70: 0xb369, 0x1c71: 0xb2d9, 0x1c72: 0xb381, 0x1c73: 0x0040, 0x1c74: 0xb2c1, 0x1c75: 0xb1d1, + 0x1c76: 0xb1e9, 0x1c77: 0xb231, 0x1c78: 0x0040, 0x1c79: 0xb2f1, 0x1c7a: 0x0040, 0x1c7b: 0xb351, + 0x1c7c: 0x0040, 0x1c7d: 0x0040, 0x1c7e: 0x0040, 0x1c7f: 0x0040, + // Block 0x72, offset 0x1c80 + 0x1c80: 0x0040, 0x1c81: 0x0040, 0x1c82: 0xb201, 0x1c83: 0x0040, 0x1c84: 0x0040, 0x1c85: 0x0040, + 0x1c86: 0x0040, 0x1c87: 0xb219, 0x1c88: 0x0040, 0x1c89: 0xb429, 0x1c8a: 0x0040, 0x1c8b: 0xb3b1, + 0x1c8c: 0x0040, 0x1c8d: 0xb3e1, 0x1c8e: 0xb2a9, 0x1c8f: 0xb339, 0x1c90: 0x0040, 0x1c91: 0xb2d9, + 0x1c92: 0xb381, 0x1c93: 0x0040, 0x1c94: 0xb2c1, 0x1c95: 0x0040, 0x1c96: 0x0040, 0x1c97: 0xb231, + 0x1c98: 0x0040, 0x1c99: 0xb2f1, 0x1c9a: 0x0040, 0x1c9b: 0xb351, 0x1c9c: 0x0040, 0x1c9d: 0x7949, + 0x1c9e: 0x0040, 0x1c9f: 0xbc89, 0x1ca0: 0x0040, 0x1ca1: 0xb1a1, 0x1ca2: 0xb201, 0x1ca3: 0x0040, + 0x1ca4: 0xb3f9, 0x1ca5: 0x0040, 0x1ca6: 0x0040, 0x1ca7: 0xb219, 0x1ca8: 0xb309, 0x1ca9: 0xb429, + 0x1caa: 0xb399, 0x1cab: 0x0040, 0x1cac: 0xb3c9, 0x1cad: 0xb3e1, 0x1cae: 0xb2a9, 0x1caf: 0xb339, + 0x1cb0: 0xb369, 0x1cb1: 0xb2d9, 0x1cb2: 0xb381, 0x1cb3: 0x0040, 0x1cb4: 0xb2c1, 0x1cb5: 0xb1d1, + 0x1cb6: 0xb1e9, 0x1cb7: 0xb231, 0x1cb8: 0x0040, 0x1cb9: 0xb2f1, 0x1cba: 0xb321, 0x1cbb: 0xb351, + 0x1cbc: 0xbc59, 0x1cbd: 0x0040, 0x1cbe: 0xbc71, 0x1cbf: 0x0040, + // Block 0x73, offset 0x1cc0 + 0x1cc0: 0xb189, 0x1cc1: 0xb1a1, 0x1cc2: 0xb201, 0x1cc3: 0xb249, 0x1cc4: 0xb3f9, 0x1cc5: 0xb411, + 0x1cc6: 0xb291, 0x1cc7: 0xb219, 0x1cc8: 0xb309, 0x1cc9: 0xb429, 0x1cca: 0x0040, 0x1ccb: 0xb3b1, + 0x1ccc: 0xb3c9, 0x1ccd: 0xb3e1, 0x1cce: 0xb2a9, 0x1ccf: 0xb339, 0x1cd0: 0xb369, 0x1cd1: 0xb2d9, + 0x1cd2: 0xb381, 0x1cd3: 0xb279, 0x1cd4: 0xb2c1, 0x1cd5: 0xb1d1, 0x1cd6: 0xb1e9, 0x1cd7: 0xb231, + 0x1cd8: 0xb261, 0x1cd9: 0xb2f1, 0x1cda: 0xb321, 0x1cdb: 0xb351, 0x1cdc: 0x0040, 0x1cdd: 0x0040, + 0x1cde: 0x0040, 0x1cdf: 0x0040, 0x1ce0: 0x0040, 0x1ce1: 0xb1a1, 0x1ce2: 0xb201, 0x1ce3: 0xb249, + 0x1ce4: 0x0040, 0x1ce5: 0xb411, 0x1ce6: 0xb291, 0x1ce7: 0xb219, 0x1ce8: 0xb309, 0x1ce9: 0xb429, + 0x1cea: 0x0040, 0x1ceb: 0xb3b1, 0x1cec: 0xb3c9, 0x1ced: 0xb3e1, 0x1cee: 0xb2a9, 0x1cef: 0xb339, + 0x1cf0: 0xb369, 0x1cf1: 0xb2d9, 0x1cf2: 0xb381, 0x1cf3: 0xb279, 0x1cf4: 0xb2c1, 0x1cf5: 0xb1d1, + 0x1cf6: 0xb1e9, 0x1cf7: 0xb231, 0x1cf8: 0xb261, 0x1cf9: 0xb2f1, 0x1cfa: 0xb321, 0x1cfb: 0xb351, + 0x1cfc: 0x0040, 0x1cfd: 0x0040, 0x1cfe: 0x0040, 0x1cff: 0x0040, + // Block 0x74, offset 0x1d00 + 0x1d00: 0x0040, 0x1d01: 0xbca2, 0x1d02: 0xbcba, 0x1d03: 0xbcd2, 0x1d04: 0xbcea, 0x1d05: 0xbd02, + 0x1d06: 0xbd1a, 0x1d07: 0xbd32, 0x1d08: 0xbd4a, 0x1d09: 0xbd62, 0x1d0a: 0xbd7a, 0x1d0b: 0x0018, + 0x1d0c: 0x0018, 0x1d0d: 0x0040, 0x1d0e: 0x0040, 0x1d0f: 0x0040, 0x1d10: 0xbd92, 0x1d11: 0xbdb2, + 0x1d12: 0xbdd2, 0x1d13: 0xbdf2, 0x1d14: 0xbe12, 0x1d15: 0xbe32, 0x1d16: 0xbe52, 0x1d17: 0xbe72, + 0x1d18: 0xbe92, 0x1d19: 0xbeb2, 0x1d1a: 0xbed2, 0x1d1b: 0xbef2, 0x1d1c: 0xbf12, 0x1d1d: 0xbf32, + 0x1d1e: 0xbf52, 0x1d1f: 0xbf72, 0x1d20: 0xbf92, 0x1d21: 0xbfb2, 0x1d22: 0xbfd2, 0x1d23: 0xbff2, + 0x1d24: 0xc012, 0x1d25: 0xc032, 0x1d26: 0xc052, 0x1d27: 0xc072, 0x1d28: 0xc092, 0x1d29: 0xc0b2, + 0x1d2a: 0xc0d1, 0x1d2b: 0x1159, 0x1d2c: 0x0269, 0x1d2d: 0x6671, 0x1d2e: 0xc111, 0x1d2f: 0x0040, + 0x1d30: 0x0039, 0x1d31: 0x0ee9, 0x1d32: 0x1159, 0x1d33: 0x0ef9, 0x1d34: 0x0f09, 0x1d35: 0x1199, + 0x1d36: 0x0f31, 0x1d37: 0x0249, 0x1d38: 0x0f41, 0x1d39: 0x0259, 0x1d3a: 0x0f51, 0x1d3b: 0x0359, + 0x1d3c: 0x0f61, 0x1d3d: 0x0f71, 0x1d3e: 0x00d9, 0x1d3f: 0x0f99, + // Block 0x75, offset 0x1d40 + 0x1d40: 0x2039, 0x1d41: 0x0269, 0x1d42: 0x01d9, 0x1d43: 0x0fa9, 0x1d44: 0x0fb9, 0x1d45: 0x1089, + 0x1d46: 0x0279, 0x1d47: 0x0369, 0x1d48: 0x0289, 0x1d49: 0x13d1, 0x1d4a: 0xc129, 0x1d4b: 0x65b1, + 0x1d4c: 0xc141, 0x1d4d: 0x1441, 0x1d4e: 0xc159, 0x1d4f: 0xc179, 0x1d50: 0x0018, 0x1d51: 0x0018, + 0x1d52: 0x0018, 0x1d53: 0x0018, 0x1d54: 0x0018, 0x1d55: 0x0018, 0x1d56: 0x0018, 0x1d57: 0x0018, + 0x1d58: 0x0018, 0x1d59: 0x0018, 0x1d5a: 0x0018, 0x1d5b: 0x0018, 0x1d5c: 0x0018, 0x1d5d: 0x0018, + 0x1d5e: 0x0018, 0x1d5f: 0x0018, 0x1d60: 0x0018, 0x1d61: 0x0018, 0x1d62: 0x0018, 0x1d63: 0x0018, + 0x1d64: 0x0018, 0x1d65: 0x0018, 0x1d66: 0x0018, 0x1d67: 0x0018, 0x1d68: 0x0018, 0x1d69: 0x0018, + 0x1d6a: 0xc191, 0x1d6b: 0xc1a9, 0x1d6c: 0x0040, 0x1d6d: 0x0040, 0x1d6e: 0x0040, 0x1d6f: 0x0040, + 0x1d70: 0x0018, 0x1d71: 0x0018, 0x1d72: 0x0018, 0x1d73: 0x0018, 0x1d74: 0x0018, 0x1d75: 0x0018, + 0x1d76: 0x0018, 0x1d77: 0x0018, 0x1d78: 0x0018, 0x1d79: 0x0018, 0x1d7a: 0x0018, 0x1d7b: 0x0018, + 0x1d7c: 0x0018, 0x1d7d: 0x0018, 0x1d7e: 0x0018, 0x1d7f: 0x0018, + // Block 0x76, offset 0x1d80 + 0x1d80: 0xc1d9, 0x1d81: 0xc211, 0x1d82: 0xc249, 0x1d83: 0x0040, 0x1d84: 0x0040, 0x1d85: 0x0040, + 0x1d86: 0x0040, 0x1d87: 0x0040, 0x1d88: 0x0040, 0x1d89: 0x0040, 0x1d8a: 0x0040, 0x1d8b: 0x0040, + 0x1d8c: 0x0040, 0x1d8d: 0x0040, 0x1d8e: 0x0040, 0x1d8f: 0x0040, 0x1d90: 0xc269, 0x1d91: 0xc289, + 0x1d92: 0xc2a9, 0x1d93: 0xc2c9, 0x1d94: 0xc2e9, 0x1d95: 0xc309, 0x1d96: 0xc329, 0x1d97: 0xc349, + 0x1d98: 0xc369, 0x1d99: 0xc389, 0x1d9a: 0xc3a9, 0x1d9b: 0xc3c9, 0x1d9c: 0xc3e9, 0x1d9d: 0xc409, + 0x1d9e: 0xc429, 0x1d9f: 0xc449, 0x1da0: 0xc469, 0x1da1: 0xc489, 0x1da2: 0xc4a9, 0x1da3: 0xc4c9, + 0x1da4: 0xc4e9, 0x1da5: 0xc509, 0x1da6: 0xc529, 0x1da7: 0xc549, 0x1da8: 0xc569, 0x1da9: 0xc589, + 0x1daa: 0xc5a9, 0x1dab: 0xc5c9, 0x1dac: 0xc5e9, 0x1dad: 0xc609, 0x1dae: 0xc629, 0x1daf: 0xc649, + 0x1db0: 0xc669, 0x1db1: 0xc689, 0x1db2: 0xc6a9, 0x1db3: 0xc6c9, 0x1db4: 0xc6e9, 0x1db5: 0xc709, + 0x1db6: 0xc729, 0x1db7: 0xc749, 0x1db8: 0xc769, 0x1db9: 0xc789, 0x1dba: 0xc7a9, 0x1dbb: 0xc7c9, + 0x1dbc: 0x0040, 0x1dbd: 0x0040, 0x1dbe: 0x0040, 0x1dbf: 0x0040, + // Block 0x77, offset 0x1dc0 + 0x1dc0: 0xcaf9, 0x1dc1: 0xcb19, 0x1dc2: 0xcb39, 0x1dc3: 0x8b1d, 0x1dc4: 0xcb59, 0x1dc5: 0xcb79, + 0x1dc6: 0xcb99, 0x1dc7: 0xcbb9, 0x1dc8: 0xcbd9, 0x1dc9: 0xcbf9, 0x1dca: 0xcc19, 0x1dcb: 0xcc39, + 0x1dcc: 0xcc59, 0x1dcd: 0x8b3d, 0x1dce: 0xcc79, 0x1dcf: 0xcc99, 0x1dd0: 0xccb9, 0x1dd1: 0xccd9, + 0x1dd2: 0x8b5d, 0x1dd3: 0xccf9, 0x1dd4: 0xcd19, 0x1dd5: 0xc429, 0x1dd6: 0x8b7d, 0x1dd7: 0xcd39, + 0x1dd8: 0xcd59, 0x1dd9: 0xcd79, 0x1dda: 0xcd99, 0x1ddb: 0xcdb9, 0x1ddc: 0x8b9d, 0x1ddd: 0xcdd9, + 0x1dde: 0xcdf9, 0x1ddf: 0xce19, 0x1de0: 0xce39, 0x1de1: 0xce59, 0x1de2: 0xc789, 0x1de3: 0xce79, + 0x1de4: 0xce99, 0x1de5: 0xceb9, 0x1de6: 0xced9, 0x1de7: 0xcef9, 0x1de8: 0xcf19, 0x1de9: 0xcf39, + 0x1dea: 0xcf59, 0x1deb: 0xcf79, 0x1dec: 0xcf99, 0x1ded: 0xcfb9, 0x1dee: 0xcfd9, 0x1def: 0xcff9, + 0x1df0: 0xd019, 0x1df1: 0xd039, 0x1df2: 0xd039, 0x1df3: 0xd039, 0x1df4: 0x8bbd, 0x1df5: 0xd059, + 0x1df6: 0xd079, 0x1df7: 0xd099, 0x1df8: 0x8bdd, 0x1df9: 0xd0b9, 0x1dfa: 0xd0d9, 0x1dfb: 0xd0f9, + 0x1dfc: 0xd119, 0x1dfd: 0xd139, 0x1dfe: 0xd159, 0x1dff: 0xd179, + // Block 0x78, offset 0x1e00 + 0x1e00: 0xd199, 0x1e01: 0xd1b9, 0x1e02: 0xd1d9, 0x1e03: 0xd1f9, 0x1e04: 0xd219, 0x1e05: 0xd239, + 0x1e06: 0xd239, 0x1e07: 0xd259, 0x1e08: 0xd279, 0x1e09: 0xd299, 0x1e0a: 0xd2b9, 0x1e0b: 0xd2d9, + 0x1e0c: 0xd2f9, 0x1e0d: 0xd319, 0x1e0e: 0xd339, 0x1e0f: 0xd359, 0x1e10: 0xd379, 0x1e11: 0xd399, + 0x1e12: 0xd3b9, 0x1e13: 0xd3d9, 0x1e14: 0xd3f9, 0x1e15: 0xd419, 0x1e16: 0xd439, 0x1e17: 0xd459, + 0x1e18: 0xd479, 0x1e19: 0x8bfd, 0x1e1a: 0xd499, 0x1e1b: 0xd4b9, 0x1e1c: 0xd4d9, 0x1e1d: 0xc309, + 0x1e1e: 0xd4f9, 0x1e1f: 0xd519, 0x1e20: 0x8c1d, 0x1e21: 0x8c3d, 0x1e22: 0xd539, 0x1e23: 0xd559, + 0x1e24: 0xd579, 0x1e25: 0xd599, 0x1e26: 0xd5b9, 0x1e27: 0xd5d9, 0x1e28: 0x2040, 0x1e29: 0xd5f9, + 0x1e2a: 0xd619, 0x1e2b: 0xd619, 0x1e2c: 0x8c5d, 0x1e2d: 0xd639, 0x1e2e: 0xd659, 0x1e2f: 0xd679, + 0x1e30: 0xd699, 0x1e31: 0x8c7d, 0x1e32: 0xd6b9, 0x1e33: 0xd6d9, 0x1e34: 0x2040, 0x1e35: 0xd6f9, + 0x1e36: 0xd719, 0x1e37: 0xd739, 0x1e38: 0xd759, 0x1e39: 0xd779, 0x1e3a: 0xd799, 0x1e3b: 0x8c9d, + 0x1e3c: 0xd7b9, 0x1e3d: 0x8cbd, 0x1e3e: 0xd7d9, 0x1e3f: 0xd7f9, + // Block 0x79, offset 0x1e40 + 0x1e40: 0xd819, 0x1e41: 0xd839, 0x1e42: 0xd859, 0x1e43: 0xd879, 0x1e44: 0xd899, 0x1e45: 0xd8b9, + 0x1e46: 0xd8d9, 0x1e47: 0xd8f9, 0x1e48: 0xd919, 0x1e49: 0x8cdd, 0x1e4a: 0xd939, 0x1e4b: 0xd959, + 0x1e4c: 0xd979, 0x1e4d: 0xd999, 0x1e4e: 0xd9b9, 0x1e4f: 0x8cfd, 0x1e50: 0xd9d9, 0x1e51: 0x8d1d, + 0x1e52: 0x8d3d, 0x1e53: 0xd9f9, 0x1e54: 0xda19, 0x1e55: 0xda19, 0x1e56: 0xda39, 0x1e57: 0x8d5d, + 0x1e58: 0x8d7d, 0x1e59: 0xda59, 0x1e5a: 0xda79, 0x1e5b: 0xda99, 0x1e5c: 0xdab9, 0x1e5d: 0xdad9, + 0x1e5e: 0xdaf9, 0x1e5f: 0xdb19, 0x1e60: 0xdb39, 0x1e61: 0xdb59, 0x1e62: 0xdb79, 0x1e63: 0xdb99, + 0x1e64: 0x8d9d, 0x1e65: 0xdbb9, 0x1e66: 0xdbd9, 0x1e67: 0xdbf9, 0x1e68: 0xdc19, 0x1e69: 0xdbf9, + 0x1e6a: 0xdc39, 0x1e6b: 0xdc59, 0x1e6c: 0xdc79, 0x1e6d: 0xdc99, 0x1e6e: 0xdcb9, 0x1e6f: 0xdcd9, + 0x1e70: 0xdcf9, 0x1e71: 0xdd19, 0x1e72: 0xdd39, 0x1e73: 0xdd59, 0x1e74: 0xdd79, 0x1e75: 0xdd99, + 0x1e76: 0xddb9, 0x1e77: 0xddd9, 0x1e78: 0x8dbd, 0x1e79: 0xddf9, 0x1e7a: 0xde19, 0x1e7b: 0xde39, + 0x1e7c: 0xde59, 0x1e7d: 0xde79, 0x1e7e: 0x8ddd, 0x1e7f: 0xde99, + // Block 0x7a, offset 0x1e80 + 0x1e80: 0xe599, 0x1e81: 0xe5b9, 0x1e82: 0xe5d9, 0x1e83: 0xe5f9, 0x1e84: 0xe619, 0x1e85: 0xe639, + 0x1e86: 0x8efd, 0x1e87: 0xe659, 0x1e88: 0xe679, 0x1e89: 0xe699, 0x1e8a: 0xe6b9, 0x1e8b: 0xe6d9, + 0x1e8c: 0xe6f9, 0x1e8d: 0x8f1d, 0x1e8e: 0xe719, 0x1e8f: 0xe739, 0x1e90: 0x8f3d, 0x1e91: 0x8f5d, + 0x1e92: 0xe759, 0x1e93: 0xe779, 0x1e94: 0xe799, 0x1e95: 0xe7b9, 0x1e96: 0xe7d9, 0x1e97: 0xe7f9, + 0x1e98: 0xe819, 0x1e99: 0xe839, 0x1e9a: 0xe859, 0x1e9b: 0x8f7d, 0x1e9c: 0xe879, 0x1e9d: 0x8f9d, + 0x1e9e: 0xe899, 0x1e9f: 0x2040, 0x1ea0: 0xe8b9, 0x1ea1: 0xe8d9, 0x1ea2: 0xe8f9, 0x1ea3: 0x8fbd, + 0x1ea4: 0xe919, 0x1ea5: 0xe939, 0x1ea6: 0x8fdd, 0x1ea7: 0x8ffd, 0x1ea8: 0xe959, 0x1ea9: 0xe979, + 0x1eaa: 0xe999, 0x1eab: 0xe9b9, 0x1eac: 0xe9d9, 0x1ead: 0xe9d9, 0x1eae: 0xe9f9, 0x1eaf: 0xea19, + 0x1eb0: 0xea39, 0x1eb1: 0xea59, 0x1eb2: 0xea79, 0x1eb3: 0xea99, 0x1eb4: 0xeab9, 0x1eb5: 0x901d, + 0x1eb6: 0xead9, 0x1eb7: 0x903d, 0x1eb8: 0xeaf9, 0x1eb9: 0x905d, 0x1eba: 0xeb19, 0x1ebb: 0x907d, + 0x1ebc: 0x909d, 0x1ebd: 0x90bd, 0x1ebe: 0xeb39, 0x1ebf: 0xeb59, + // Block 0x7b, offset 0x1ec0 + 0x1ec0: 0xeb79, 0x1ec1: 0x90dd, 0x1ec2: 0x90fd, 0x1ec3: 0x911d, 0x1ec4: 0x913d, 0x1ec5: 0xeb99, + 0x1ec6: 0xebb9, 0x1ec7: 0xebb9, 0x1ec8: 0xebd9, 0x1ec9: 0xebf9, 0x1eca: 0xec19, 0x1ecb: 0xec39, + 0x1ecc: 0xec59, 0x1ecd: 0x915d, 0x1ece: 0xec79, 0x1ecf: 0xec99, 0x1ed0: 0xecb9, 0x1ed1: 0xecd9, + 0x1ed2: 0x917d, 0x1ed3: 0xecf9, 0x1ed4: 0x919d, 0x1ed5: 0x91bd, 0x1ed6: 0xed19, 0x1ed7: 0xed39, + 0x1ed8: 0xed59, 0x1ed9: 0xed79, 0x1eda: 0xed99, 0x1edb: 0xedb9, 0x1edc: 0x91dd, 0x1edd: 0x91fd, + 0x1ede: 0x921d, 0x1edf: 0x2040, 0x1ee0: 0xedd9, 0x1ee1: 0x923d, 0x1ee2: 0xedf9, 0x1ee3: 0xee19, + 0x1ee4: 0xee39, 0x1ee5: 0x925d, 0x1ee6: 0xee59, 0x1ee7: 0xee79, 0x1ee8: 0xee99, 0x1ee9: 0xeeb9, + 0x1eea: 0xeed9, 0x1eeb: 0x927d, 0x1eec: 0xeef9, 0x1eed: 0xef19, 0x1eee: 0xef39, 0x1eef: 0xef59, + 0x1ef0: 0xef79, 0x1ef1: 0xef99, 0x1ef2: 0x929d, 0x1ef3: 0x92bd, 0x1ef4: 0xefb9, 0x1ef5: 0x92dd, + 0x1ef6: 0xefd9, 0x1ef7: 0x92fd, 0x1ef8: 0xeff9, 0x1ef9: 0xf019, 0x1efa: 0xf039, 0x1efb: 0x931d, + 0x1efc: 0x933d, 0x1efd: 0xf059, 0x1efe: 0x935d, 0x1eff: 0xf079, + // Block 0x7c, offset 0x1f00 + 0x1f00: 0xf6b9, 0x1f01: 0xf6d9, 0x1f02: 0xf6f9, 0x1f03: 0xf719, 0x1f04: 0xf739, 0x1f05: 0x951d, + 0x1f06: 0xf759, 0x1f07: 0xf779, 0x1f08: 0xf799, 0x1f09: 0xf7b9, 0x1f0a: 0xf7d9, 0x1f0b: 0x953d, + 0x1f0c: 0x955d, 0x1f0d: 0xf7f9, 0x1f0e: 0xf819, 0x1f0f: 0xf839, 0x1f10: 0xf859, 0x1f11: 0xf879, + 0x1f12: 0xf899, 0x1f13: 0x957d, 0x1f14: 0xf8b9, 0x1f15: 0xf8d9, 0x1f16: 0xf8f9, 0x1f17: 0xf919, + 0x1f18: 0x959d, 0x1f19: 0x95bd, 0x1f1a: 0xf939, 0x1f1b: 0xf959, 0x1f1c: 0xf979, 0x1f1d: 0x95dd, + 0x1f1e: 0xf999, 0x1f1f: 0xf9b9, 0x1f20: 0x6815, 0x1f21: 0x95fd, 0x1f22: 0xf9d9, 0x1f23: 0xf9f9, + 0x1f24: 0xfa19, 0x1f25: 0x961d, 0x1f26: 0xfa39, 0x1f27: 0xfa59, 0x1f28: 0xfa79, 0x1f29: 0xfa99, + 0x1f2a: 0xfab9, 0x1f2b: 0xfad9, 0x1f2c: 0xfaf9, 0x1f2d: 0x963d, 0x1f2e: 0xfb19, 0x1f2f: 0xfb39, + 0x1f30: 0xfb59, 0x1f31: 0x965d, 0x1f32: 0xfb79, 0x1f33: 0xfb99, 0x1f34: 0xfbb9, 0x1f35: 0xfbd9, + 0x1f36: 0x7b35, 0x1f37: 0x967d, 0x1f38: 0xfbf9, 0x1f39: 0xfc19, 0x1f3a: 0xfc39, 0x1f3b: 0x969d, + 0x1f3c: 0xfc59, 0x1f3d: 0x96bd, 0x1f3e: 0xfc79, 0x1f3f: 0xfc79, + // Block 0x7d, offset 0x1f40 + 0x1f40: 0xfc99, 0x1f41: 0x96dd, 0x1f42: 0xfcb9, 0x1f43: 0xfcd9, 0x1f44: 0xfcf9, 0x1f45: 0xfd19, + 0x1f46: 0xfd39, 0x1f47: 0xfd59, 0x1f48: 0xfd79, 0x1f49: 0x96fd, 0x1f4a: 0xfd99, 0x1f4b: 0xfdb9, + 0x1f4c: 0xfdd9, 0x1f4d: 0xfdf9, 0x1f4e: 0xfe19, 0x1f4f: 0xfe39, 0x1f50: 0x971d, 0x1f51: 0xfe59, + 0x1f52: 0x973d, 0x1f53: 0x975d, 0x1f54: 0x977d, 0x1f55: 0xfe79, 0x1f56: 0xfe99, 0x1f57: 0xfeb9, + 0x1f58: 0xfed9, 0x1f59: 0xfef9, 0x1f5a: 0xff19, 0x1f5b: 0xff39, 0x1f5c: 0xff59, 0x1f5d: 0x979d, + 0x1f5e: 0x0040, 0x1f5f: 0x0040, 0x1f60: 0x0040, 0x1f61: 0x0040, 0x1f62: 0x0040, 0x1f63: 0x0040, + 0x1f64: 0x0040, 0x1f65: 0x0040, 0x1f66: 0x0040, 0x1f67: 0x0040, 0x1f68: 0x0040, 0x1f69: 0x0040, + 0x1f6a: 0x0040, 0x1f6b: 0x0040, 0x1f6c: 0x0040, 0x1f6d: 0x0040, 0x1f6e: 0x0040, 0x1f6f: 0x0040, + 0x1f70: 0x0040, 0x1f71: 0x0040, 0x1f72: 0x0040, 0x1f73: 0x0040, 0x1f74: 0x0040, 0x1f75: 0x0040, + 0x1f76: 0x0040, 0x1f77: 0x0040, 0x1f78: 0x0040, 0x1f79: 0x0040, 0x1f7a: 0x0040, 0x1f7b: 0x0040, + 0x1f7c: 0x0040, 0x1f7d: 0x0040, 0x1f7e: 0x0040, 0x1f7f: 0x0040, +} + +// idnaIndex: 35 blocks, 2240 entries, 4480 bytes +// Block 0 is the zero block. +var idnaIndex = [2240]uint16{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x7c, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x04, 0xc7: 0x05, + 0xc8: 0x06, 0xc9: 0x7d, 0xca: 0x7e, 0xcb: 0x07, 0xcc: 0x7f, 0xcd: 0x08, 0xce: 0x09, 0xcf: 0x0a, + 0xd0: 0x80, 0xd1: 0x0b, 0xd2: 0x0c, 0xd3: 0x0d, 0xd4: 0x0e, 0xd5: 0x81, 0xd6: 0x82, 0xd7: 0x83, + 0xd8: 0x0f, 0xd9: 0x10, 0xda: 0x84, 0xdb: 0x11, 0xdc: 0x12, 0xdd: 0x85, 0xde: 0x86, 0xdf: 0x87, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, 0xe5: 0x07, 0xe6: 0x07, 0xe7: 0x07, + 0xe8: 0x07, 0xe9: 0x08, 0xea: 0x09, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x0a, 0xee: 0x0b, 0xef: 0x0c, + 0xf0: 0x1c, 0xf1: 0x1d, 0xf2: 0x1d, 0xf3: 0x1f, 0xf4: 0x20, + // Block 0x4, offset 0x100 + 0x120: 0x88, 0x121: 0x89, 0x122: 0x8a, 0x123: 0x8b, 0x124: 0x8c, 0x125: 0x13, 0x126: 0x14, 0x127: 0x15, + 0x128: 0x16, 0x129: 0x17, 0x12a: 0x18, 0x12b: 0x19, 0x12c: 0x1a, 0x12d: 0x1b, 0x12e: 0x1c, 0x12f: 0x8d, + 0x130: 0x8e, 0x131: 0x1d, 0x132: 0x1e, 0x133: 0x1f, 0x134: 0x8f, 0x135: 0x20, 0x136: 0x90, 0x137: 0x91, + 0x138: 0x92, 0x139: 0x93, 0x13a: 0x21, 0x13b: 0x94, 0x13c: 0x95, 0x13d: 0x22, 0x13e: 0x23, 0x13f: 0x96, + // Block 0x5, offset 0x140 + 0x140: 0x97, 0x141: 0x98, 0x142: 0x99, 0x143: 0x9a, 0x144: 0x9b, 0x145: 0x9c, 0x146: 0x9d, 0x147: 0x9e, + 0x148: 0x9f, 0x149: 0xa0, 0x14a: 0xa1, 0x14b: 0xa2, 0x14c: 0xa3, 0x14d: 0xa4, 0x14e: 0xa5, 0x14f: 0xa6, + 0x150: 0xa7, 0x151: 0x9f, 0x152: 0x9f, 0x153: 0x9f, 0x154: 0x9f, 0x155: 0x9f, 0x156: 0x9f, 0x157: 0x9f, + 0x158: 0x9f, 0x159: 0xa8, 0x15a: 0xa9, 0x15b: 0xaa, 0x15c: 0xab, 0x15d: 0xac, 0x15e: 0xad, 0x15f: 0xae, + 0x160: 0xaf, 0x161: 0xb0, 0x162: 0xb1, 0x163: 0xb2, 0x164: 0xb3, 0x165: 0xb4, 0x166: 0xb5, 0x167: 0xb6, + 0x168: 0xb7, 0x169: 0xb8, 0x16a: 0xb9, 0x16b: 0xba, 0x16c: 0xbb, 0x16d: 0xbc, 0x16e: 0xbd, 0x16f: 0xbe, + 0x170: 0xbf, 0x171: 0xc0, 0x172: 0xc1, 0x173: 0xc2, 0x174: 0x24, 0x175: 0x25, 0x176: 0x26, 0x177: 0xc3, + 0x178: 0x27, 0x179: 0x27, 0x17a: 0x28, 0x17b: 0x27, 0x17c: 0xc4, 0x17d: 0x29, 0x17e: 0x2a, 0x17f: 0x2b, + // Block 0x6, offset 0x180 + 0x180: 0x2c, 0x181: 0x2d, 0x182: 0x2e, 0x183: 0xc5, 0x184: 0x2f, 0x185: 0x30, 0x186: 0xc6, 0x187: 0x9b, + 0x188: 0xc7, 0x189: 0xc8, 0x18a: 0x9b, 0x18b: 0x9b, 0x18c: 0xc9, 0x18d: 0x9b, 0x18e: 0x9b, 0x18f: 0xca, + 0x190: 0xcb, 0x191: 0x31, 0x192: 0x32, 0x193: 0x33, 0x194: 0x9b, 0x195: 0x9b, 0x196: 0x9b, 0x197: 0x9b, + 0x198: 0x9b, 0x199: 0x9b, 0x19a: 0x9b, 0x19b: 0x9b, 0x19c: 0x9b, 0x19d: 0x9b, 0x19e: 0x9b, 0x19f: 0x9b, + 0x1a0: 0x9b, 0x1a1: 0x9b, 0x1a2: 0x9b, 0x1a3: 0x9b, 0x1a4: 0x9b, 0x1a5: 0x9b, 0x1a6: 0x9b, 0x1a7: 0x9b, + 0x1a8: 0xcc, 0x1a9: 0xcd, 0x1aa: 0x9b, 0x1ab: 0xce, 0x1ac: 0x9b, 0x1ad: 0xcf, 0x1ae: 0xd0, 0x1af: 0xd1, + 0x1b0: 0xd2, 0x1b1: 0x34, 0x1b2: 0x27, 0x1b3: 0x35, 0x1b4: 0xd3, 0x1b5: 0xd4, 0x1b6: 0xd5, 0x1b7: 0xd6, + 0x1b8: 0xd7, 0x1b9: 0xd8, 0x1ba: 0xd9, 0x1bb: 0xda, 0x1bc: 0xdb, 0x1bd: 0xdc, 0x1be: 0xdd, 0x1bf: 0x36, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x37, 0x1c1: 0xde, 0x1c2: 0xdf, 0x1c3: 0xe0, 0x1c4: 0xe1, 0x1c5: 0x38, 0x1c6: 0x39, 0x1c7: 0xe2, + 0x1c8: 0xe3, 0x1c9: 0x3a, 0x1ca: 0x3b, 0x1cb: 0x3c, 0x1cc: 0x3d, 0x1cd: 0x3e, 0x1ce: 0x3f, 0x1cf: 0x40, + 0x1d0: 0x9f, 0x1d1: 0x9f, 0x1d2: 0x9f, 0x1d3: 0x9f, 0x1d4: 0x9f, 0x1d5: 0x9f, 0x1d6: 0x9f, 0x1d7: 0x9f, + 0x1d8: 0x9f, 0x1d9: 0x9f, 0x1da: 0x9f, 0x1db: 0x9f, 0x1dc: 0x9f, 0x1dd: 0x9f, 0x1de: 0x9f, 0x1df: 0x9f, + 0x1e0: 0x9f, 0x1e1: 0x9f, 0x1e2: 0x9f, 0x1e3: 0x9f, 0x1e4: 0x9f, 0x1e5: 0x9f, 0x1e6: 0x9f, 0x1e7: 0x9f, + 0x1e8: 0x9f, 0x1e9: 0x9f, 0x1ea: 0x9f, 0x1eb: 0x9f, 0x1ec: 0x9f, 0x1ed: 0x9f, 0x1ee: 0x9f, 0x1ef: 0x9f, + 0x1f0: 0x9f, 0x1f1: 0x9f, 0x1f2: 0x9f, 0x1f3: 0x9f, 0x1f4: 0x9f, 0x1f5: 0x9f, 0x1f6: 0x9f, 0x1f7: 0x9f, + 0x1f8: 0x9f, 0x1f9: 0x9f, 0x1fa: 0x9f, 0x1fb: 0x9f, 0x1fc: 0x9f, 0x1fd: 0x9f, 0x1fe: 0x9f, 0x1ff: 0x9f, + // Block 0x8, offset 0x200 + 0x200: 0x9f, 0x201: 0x9f, 0x202: 0x9f, 0x203: 0x9f, 0x204: 0x9f, 0x205: 0x9f, 0x206: 0x9f, 0x207: 0x9f, + 0x208: 0x9f, 0x209: 0x9f, 0x20a: 0x9f, 0x20b: 0x9f, 0x20c: 0x9f, 0x20d: 0x9f, 0x20e: 0x9f, 0x20f: 0x9f, + 0x210: 0x9f, 0x211: 0x9f, 0x212: 0x9f, 0x213: 0x9f, 0x214: 0x9f, 0x215: 0x9f, 0x216: 0x9f, 0x217: 0x9f, + 0x218: 0x9f, 0x219: 0x9f, 0x21a: 0x9f, 0x21b: 0x9f, 0x21c: 0x9f, 0x21d: 0x9f, 0x21e: 0x9f, 0x21f: 0x9f, + 0x220: 0x9f, 0x221: 0x9f, 0x222: 0x9f, 0x223: 0x9f, 0x224: 0x9f, 0x225: 0x9f, 0x226: 0x9f, 0x227: 0x9f, + 0x228: 0x9f, 0x229: 0x9f, 0x22a: 0x9f, 0x22b: 0x9f, 0x22c: 0x9f, 0x22d: 0x9f, 0x22e: 0x9f, 0x22f: 0x9f, + 0x230: 0x9f, 0x231: 0x9f, 0x232: 0x9f, 0x233: 0x9f, 0x234: 0x9f, 0x235: 0x9f, 0x236: 0xb2, 0x237: 0x9b, + 0x238: 0x9f, 0x239: 0x9f, 0x23a: 0x9f, 0x23b: 0x9f, 0x23c: 0x9f, 0x23d: 0x9f, 0x23e: 0x9f, 0x23f: 0x9f, + // Block 0x9, offset 0x240 + 0x240: 0x9f, 0x241: 0x9f, 0x242: 0x9f, 0x243: 0x9f, 0x244: 0x9f, 0x245: 0x9f, 0x246: 0x9f, 0x247: 0x9f, + 0x248: 0x9f, 0x249: 0x9f, 0x24a: 0x9f, 0x24b: 0x9f, 0x24c: 0x9f, 0x24d: 0x9f, 0x24e: 0x9f, 0x24f: 0x9f, + 0x250: 0x9f, 0x251: 0x9f, 0x252: 0x9f, 0x253: 0x9f, 0x254: 0x9f, 0x255: 0x9f, 0x256: 0x9f, 0x257: 0x9f, + 0x258: 0x9f, 0x259: 0x9f, 0x25a: 0x9f, 0x25b: 0x9f, 0x25c: 0x9f, 0x25d: 0x9f, 0x25e: 0x9f, 0x25f: 0x9f, + 0x260: 0x9f, 0x261: 0x9f, 0x262: 0x9f, 0x263: 0x9f, 0x264: 0x9f, 0x265: 0x9f, 0x266: 0x9f, 0x267: 0x9f, + 0x268: 0x9f, 0x269: 0x9f, 0x26a: 0x9f, 0x26b: 0x9f, 0x26c: 0x9f, 0x26d: 0x9f, 0x26e: 0x9f, 0x26f: 0x9f, + 0x270: 0x9f, 0x271: 0x9f, 0x272: 0x9f, 0x273: 0x9f, 0x274: 0x9f, 0x275: 0x9f, 0x276: 0x9f, 0x277: 0x9f, + 0x278: 0x9f, 0x279: 0x9f, 0x27a: 0x9f, 0x27b: 0x9f, 0x27c: 0x9f, 0x27d: 0x9f, 0x27e: 0x9f, 0x27f: 0x9f, + // Block 0xa, offset 0x280 + 0x280: 0x9f, 0x281: 0x9f, 0x282: 0x9f, 0x283: 0x9f, 0x284: 0x9f, 0x285: 0x9f, 0x286: 0x9f, 0x287: 0x9f, + 0x288: 0x9f, 0x289: 0x9f, 0x28a: 0x9f, 0x28b: 0x9f, 0x28c: 0x9f, 0x28d: 0x9f, 0x28e: 0x9f, 0x28f: 0x9f, + 0x290: 0x9f, 0x291: 0x9f, 0x292: 0x9f, 0x293: 0x9f, 0x294: 0x9f, 0x295: 0x9f, 0x296: 0x9f, 0x297: 0x9f, + 0x298: 0x9f, 0x299: 0x9f, 0x29a: 0x9f, 0x29b: 0x9f, 0x29c: 0x9f, 0x29d: 0x9f, 0x29e: 0x9f, 0x29f: 0x9f, + 0x2a0: 0x9f, 0x2a1: 0x9f, 0x2a2: 0x9f, 0x2a3: 0x9f, 0x2a4: 0x9f, 0x2a5: 0x9f, 0x2a6: 0x9f, 0x2a7: 0x9f, + 0x2a8: 0x9f, 0x2a9: 0x9f, 0x2aa: 0x9f, 0x2ab: 0x9f, 0x2ac: 0x9f, 0x2ad: 0x9f, 0x2ae: 0x9f, 0x2af: 0x9f, + 0x2b0: 0x9f, 0x2b1: 0x9f, 0x2b2: 0x9f, 0x2b3: 0x9f, 0x2b4: 0x9f, 0x2b5: 0x9f, 0x2b6: 0x9f, 0x2b7: 0x9f, + 0x2b8: 0x9f, 0x2b9: 0x9f, 0x2ba: 0x9f, 0x2bb: 0x9f, 0x2bc: 0x9f, 0x2bd: 0x9f, 0x2be: 0x9f, 0x2bf: 0xe4, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x9f, 0x2c1: 0x9f, 0x2c2: 0x9f, 0x2c3: 0x9f, 0x2c4: 0x9f, 0x2c5: 0x9f, 0x2c6: 0x9f, 0x2c7: 0x9f, + 0x2c8: 0x9f, 0x2c9: 0x9f, 0x2ca: 0x9f, 0x2cb: 0x9f, 0x2cc: 0x9f, 0x2cd: 0x9f, 0x2ce: 0x9f, 0x2cf: 0x9f, + 0x2d0: 0x9f, 0x2d1: 0x9f, 0x2d2: 0xe5, 0x2d3: 0xe6, 0x2d4: 0x9f, 0x2d5: 0x9f, 0x2d6: 0x9f, 0x2d7: 0x9f, + 0x2d8: 0xe7, 0x2d9: 0x41, 0x2da: 0x42, 0x2db: 0xe8, 0x2dc: 0x43, 0x2dd: 0x44, 0x2de: 0x45, 0x2df: 0xe9, + 0x2e0: 0xea, 0x2e1: 0xeb, 0x2e2: 0xec, 0x2e3: 0xed, 0x2e4: 0xee, 0x2e5: 0xef, 0x2e6: 0xf0, 0x2e7: 0xf1, + 0x2e8: 0xf2, 0x2e9: 0xf3, 0x2ea: 0xf4, 0x2eb: 0xf5, 0x2ec: 0xf6, 0x2ed: 0xf7, 0x2ee: 0xf8, 0x2ef: 0xf9, + 0x2f0: 0x9f, 0x2f1: 0x9f, 0x2f2: 0x9f, 0x2f3: 0x9f, 0x2f4: 0x9f, 0x2f5: 0x9f, 0x2f6: 0x9f, 0x2f7: 0x9f, + 0x2f8: 0x9f, 0x2f9: 0x9f, 0x2fa: 0x9f, 0x2fb: 0x9f, 0x2fc: 0x9f, 0x2fd: 0x9f, 0x2fe: 0x9f, 0x2ff: 0x9f, + // Block 0xc, offset 0x300 + 0x300: 0x9f, 0x301: 0x9f, 0x302: 0x9f, 0x303: 0x9f, 0x304: 0x9f, 0x305: 0x9f, 0x306: 0x9f, 0x307: 0x9f, + 0x308: 0x9f, 0x309: 0x9f, 0x30a: 0x9f, 0x30b: 0x9f, 0x30c: 0x9f, 0x30d: 0x9f, 0x30e: 0x9f, 0x30f: 0x9f, + 0x310: 0x9f, 0x311: 0x9f, 0x312: 0x9f, 0x313: 0x9f, 0x314: 0x9f, 0x315: 0x9f, 0x316: 0x9f, 0x317: 0x9f, + 0x318: 0x9f, 0x319: 0x9f, 0x31a: 0x9f, 0x31b: 0x9f, 0x31c: 0x9f, 0x31d: 0x9f, 0x31e: 0xfa, 0x31f: 0xfb, + // Block 0xd, offset 0x340 + 0x340: 0xba, 0x341: 0xba, 0x342: 0xba, 0x343: 0xba, 0x344: 0xba, 0x345: 0xba, 0x346: 0xba, 0x347: 0xba, + 0x348: 0xba, 0x349: 0xba, 0x34a: 0xba, 0x34b: 0xba, 0x34c: 0xba, 0x34d: 0xba, 0x34e: 0xba, 0x34f: 0xba, + 0x350: 0xba, 0x351: 0xba, 0x352: 0xba, 0x353: 0xba, 0x354: 0xba, 0x355: 0xba, 0x356: 0xba, 0x357: 0xba, + 0x358: 0xba, 0x359: 0xba, 0x35a: 0xba, 0x35b: 0xba, 0x35c: 0xba, 0x35d: 0xba, 0x35e: 0xba, 0x35f: 0xba, + 0x360: 0xba, 0x361: 0xba, 0x362: 0xba, 0x363: 0xba, 0x364: 0xba, 0x365: 0xba, 0x366: 0xba, 0x367: 0xba, + 0x368: 0xba, 0x369: 0xba, 0x36a: 0xba, 0x36b: 0xba, 0x36c: 0xba, 0x36d: 0xba, 0x36e: 0xba, 0x36f: 0xba, + 0x370: 0xba, 0x371: 0xba, 0x372: 0xba, 0x373: 0xba, 0x374: 0xba, 0x375: 0xba, 0x376: 0xba, 0x377: 0xba, + 0x378: 0xba, 0x379: 0xba, 0x37a: 0xba, 0x37b: 0xba, 0x37c: 0xba, 0x37d: 0xba, 0x37e: 0xba, 0x37f: 0xba, + // Block 0xe, offset 0x380 + 0x380: 0xba, 0x381: 0xba, 0x382: 0xba, 0x383: 0xba, 0x384: 0xba, 0x385: 0xba, 0x386: 0xba, 0x387: 0xba, + 0x388: 0xba, 0x389: 0xba, 0x38a: 0xba, 0x38b: 0xba, 0x38c: 0xba, 0x38d: 0xba, 0x38e: 0xba, 0x38f: 0xba, + 0x390: 0xba, 0x391: 0xba, 0x392: 0xba, 0x393: 0xba, 0x394: 0xba, 0x395: 0xba, 0x396: 0xba, 0x397: 0xba, + 0x398: 0xba, 0x399: 0xba, 0x39a: 0xba, 0x39b: 0xba, 0x39c: 0xba, 0x39d: 0xba, 0x39e: 0xba, 0x39f: 0xba, + 0x3a0: 0xba, 0x3a1: 0xba, 0x3a2: 0xba, 0x3a3: 0xba, 0x3a4: 0xfc, 0x3a5: 0xfd, 0x3a6: 0xfe, 0x3a7: 0xff, + 0x3a8: 0x46, 0x3a9: 0x100, 0x3aa: 0x101, 0x3ab: 0x47, 0x3ac: 0x48, 0x3ad: 0x49, 0x3ae: 0x4a, 0x3af: 0x4b, + 0x3b0: 0x102, 0x3b1: 0x4c, 0x3b2: 0x4d, 0x3b3: 0x4e, 0x3b4: 0x4f, 0x3b5: 0x50, 0x3b6: 0x103, 0x3b7: 0x51, + 0x3b8: 0x52, 0x3b9: 0x53, 0x3ba: 0x54, 0x3bb: 0x55, 0x3bc: 0x56, 0x3bd: 0x57, 0x3be: 0x58, 0x3bf: 0x59, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x104, 0x3c1: 0x105, 0x3c2: 0x9f, 0x3c3: 0x106, 0x3c4: 0x107, 0x3c5: 0x9b, 0x3c6: 0x108, 0x3c7: 0x109, + 0x3c8: 0xba, 0x3c9: 0xba, 0x3ca: 0x10a, 0x3cb: 0x10b, 0x3cc: 0x10c, 0x3cd: 0x10d, 0x3ce: 0x10e, 0x3cf: 0x10f, + 0x3d0: 0x110, 0x3d1: 0x9f, 0x3d2: 0x111, 0x3d3: 0x112, 0x3d4: 0x113, 0x3d5: 0x114, 0x3d6: 0xba, 0x3d7: 0xba, + 0x3d8: 0x9f, 0x3d9: 0x9f, 0x3da: 0x9f, 0x3db: 0x9f, 0x3dc: 0x115, 0x3dd: 0x116, 0x3de: 0xba, 0x3df: 0xba, + 0x3e0: 0x117, 0x3e1: 0x118, 0x3e2: 0x119, 0x3e3: 0x11a, 0x3e4: 0x11b, 0x3e5: 0xba, 0x3e6: 0x11c, 0x3e7: 0x11d, + 0x3e8: 0x11e, 0x3e9: 0x11f, 0x3ea: 0x120, 0x3eb: 0x5a, 0x3ec: 0x121, 0x3ed: 0x122, 0x3ee: 0x5b, 0x3ef: 0xba, + 0x3f0: 0x123, 0x3f1: 0x124, 0x3f2: 0x125, 0x3f3: 0x126, 0x3f4: 0xba, 0x3f5: 0xba, 0x3f6: 0xba, 0x3f7: 0xba, + 0x3f8: 0xba, 0x3f9: 0x127, 0x3fa: 0xba, 0x3fb: 0xba, 0x3fc: 0xba, 0x3fd: 0xba, 0x3fe: 0xba, 0x3ff: 0xba, + // Block 0x10, offset 0x400 + 0x400: 0x128, 0x401: 0x129, 0x402: 0x12a, 0x403: 0x12b, 0x404: 0x12c, 0x405: 0x12d, 0x406: 0x12e, 0x407: 0x12f, + 0x408: 0x130, 0x409: 0xba, 0x40a: 0x131, 0x40b: 0x132, 0x40c: 0x5c, 0x40d: 0x5d, 0x40e: 0xba, 0x40f: 0xba, + 0x410: 0x133, 0x411: 0x134, 0x412: 0x135, 0x413: 0x136, 0x414: 0xba, 0x415: 0xba, 0x416: 0x137, 0x417: 0x138, + 0x418: 0x139, 0x419: 0x13a, 0x41a: 0x13b, 0x41b: 0x13c, 0x41c: 0x13d, 0x41d: 0xba, 0x41e: 0xba, 0x41f: 0xba, + 0x420: 0xba, 0x421: 0xba, 0x422: 0x13e, 0x423: 0x13f, 0x424: 0xba, 0x425: 0xba, 0x426: 0xba, 0x427: 0xba, + 0x428: 0xba, 0x429: 0xba, 0x42a: 0xba, 0x42b: 0x140, 0x42c: 0xba, 0x42d: 0xba, 0x42e: 0xba, 0x42f: 0xba, + 0x430: 0x141, 0x431: 0x142, 0x432: 0x143, 0x433: 0xba, 0x434: 0xba, 0x435: 0xba, 0x436: 0xba, 0x437: 0xba, + 0x438: 0xba, 0x439: 0xba, 0x43a: 0xba, 0x43b: 0xba, 0x43c: 0xba, 0x43d: 0xba, 0x43e: 0xba, 0x43f: 0xba, + // Block 0x11, offset 0x440 + 0x440: 0x9f, 0x441: 0x9f, 0x442: 0x9f, 0x443: 0x9f, 0x444: 0x9f, 0x445: 0x9f, 0x446: 0x9f, 0x447: 0x9f, + 0x448: 0x9f, 0x449: 0x9f, 0x44a: 0x9f, 0x44b: 0x9f, 0x44c: 0x9f, 0x44d: 0x9f, 0x44e: 0x144, 0x44f: 0xba, + 0x450: 0x9b, 0x451: 0x145, 0x452: 0x9f, 0x453: 0x9f, 0x454: 0x9f, 0x455: 0x146, 0x456: 0xba, 0x457: 0xba, + 0x458: 0xba, 0x459: 0xba, 0x45a: 0xba, 0x45b: 0xba, 0x45c: 0xba, 0x45d: 0xba, 0x45e: 0xba, 0x45f: 0xba, + 0x460: 0xba, 0x461: 0xba, 0x462: 0xba, 0x463: 0xba, 0x464: 0xba, 0x465: 0xba, 0x466: 0xba, 0x467: 0xba, + 0x468: 0xba, 0x469: 0xba, 0x46a: 0xba, 0x46b: 0xba, 0x46c: 0xba, 0x46d: 0xba, 0x46e: 0xba, 0x46f: 0xba, + 0x470: 0xba, 0x471: 0xba, 0x472: 0xba, 0x473: 0xba, 0x474: 0xba, 0x475: 0xba, 0x476: 0xba, 0x477: 0xba, + 0x478: 0xba, 0x479: 0xba, 0x47a: 0xba, 0x47b: 0xba, 0x47c: 0xba, 0x47d: 0xba, 0x47e: 0xba, 0x47f: 0xba, + // Block 0x12, offset 0x480 + 0x480: 0x9f, 0x481: 0x9f, 0x482: 0x9f, 0x483: 0x9f, 0x484: 0x9f, 0x485: 0x9f, 0x486: 0x9f, 0x487: 0x9f, + 0x488: 0x9f, 0x489: 0x9f, 0x48a: 0x9f, 0x48b: 0x9f, 0x48c: 0x9f, 0x48d: 0x9f, 0x48e: 0x9f, 0x48f: 0x9f, + 0x490: 0x147, 0x491: 0xba, 0x492: 0xba, 0x493: 0xba, 0x494: 0xba, 0x495: 0xba, 0x496: 0xba, 0x497: 0xba, + 0x498: 0xba, 0x499: 0xba, 0x49a: 0xba, 0x49b: 0xba, 0x49c: 0xba, 0x49d: 0xba, 0x49e: 0xba, 0x49f: 0xba, + 0x4a0: 0xba, 0x4a1: 0xba, 0x4a2: 0xba, 0x4a3: 0xba, 0x4a4: 0xba, 0x4a5: 0xba, 0x4a6: 0xba, 0x4a7: 0xba, + 0x4a8: 0xba, 0x4a9: 0xba, 0x4aa: 0xba, 0x4ab: 0xba, 0x4ac: 0xba, 0x4ad: 0xba, 0x4ae: 0xba, 0x4af: 0xba, + 0x4b0: 0xba, 0x4b1: 0xba, 0x4b2: 0xba, 0x4b3: 0xba, 0x4b4: 0xba, 0x4b5: 0xba, 0x4b6: 0xba, 0x4b7: 0xba, + 0x4b8: 0xba, 0x4b9: 0xba, 0x4ba: 0xba, 0x4bb: 0xba, 0x4bc: 0xba, 0x4bd: 0xba, 0x4be: 0xba, 0x4bf: 0xba, + // Block 0x13, offset 0x4c0 + 0x4c0: 0xba, 0x4c1: 0xba, 0x4c2: 0xba, 0x4c3: 0xba, 0x4c4: 0xba, 0x4c5: 0xba, 0x4c6: 0xba, 0x4c7: 0xba, + 0x4c8: 0xba, 0x4c9: 0xba, 0x4ca: 0xba, 0x4cb: 0xba, 0x4cc: 0xba, 0x4cd: 0xba, 0x4ce: 0xba, 0x4cf: 0xba, + 0x4d0: 0x9f, 0x4d1: 0x9f, 0x4d2: 0x9f, 0x4d3: 0x9f, 0x4d4: 0x9f, 0x4d5: 0x9f, 0x4d6: 0x9f, 0x4d7: 0x9f, + 0x4d8: 0x9f, 0x4d9: 0x148, 0x4da: 0xba, 0x4db: 0xba, 0x4dc: 0xba, 0x4dd: 0xba, 0x4de: 0xba, 0x4df: 0xba, + 0x4e0: 0xba, 0x4e1: 0xba, 0x4e2: 0xba, 0x4e3: 0xba, 0x4e4: 0xba, 0x4e5: 0xba, 0x4e6: 0xba, 0x4e7: 0xba, + 0x4e8: 0xba, 0x4e9: 0xba, 0x4ea: 0xba, 0x4eb: 0xba, 0x4ec: 0xba, 0x4ed: 0xba, 0x4ee: 0xba, 0x4ef: 0xba, + 0x4f0: 0xba, 0x4f1: 0xba, 0x4f2: 0xba, 0x4f3: 0xba, 0x4f4: 0xba, 0x4f5: 0xba, 0x4f6: 0xba, 0x4f7: 0xba, + 0x4f8: 0xba, 0x4f9: 0xba, 0x4fa: 0xba, 0x4fb: 0xba, 0x4fc: 0xba, 0x4fd: 0xba, 0x4fe: 0xba, 0x4ff: 0xba, + // Block 0x14, offset 0x500 + 0x500: 0xba, 0x501: 0xba, 0x502: 0xba, 0x503: 0xba, 0x504: 0xba, 0x505: 0xba, 0x506: 0xba, 0x507: 0xba, + 0x508: 0xba, 0x509: 0xba, 0x50a: 0xba, 0x50b: 0xba, 0x50c: 0xba, 0x50d: 0xba, 0x50e: 0xba, 0x50f: 0xba, + 0x510: 0xba, 0x511: 0xba, 0x512: 0xba, 0x513: 0xba, 0x514: 0xba, 0x515: 0xba, 0x516: 0xba, 0x517: 0xba, + 0x518: 0xba, 0x519: 0xba, 0x51a: 0xba, 0x51b: 0xba, 0x51c: 0xba, 0x51d: 0xba, 0x51e: 0xba, 0x51f: 0xba, + 0x520: 0x9f, 0x521: 0x9f, 0x522: 0x9f, 0x523: 0x9f, 0x524: 0x9f, 0x525: 0x9f, 0x526: 0x9f, 0x527: 0x9f, + 0x528: 0x140, 0x529: 0x149, 0x52a: 0xba, 0x52b: 0x14a, 0x52c: 0x14b, 0x52d: 0x14c, 0x52e: 0x14d, 0x52f: 0xba, + 0x530: 0xba, 0x531: 0xba, 0x532: 0xba, 0x533: 0xba, 0x534: 0xba, 0x535: 0xba, 0x536: 0xba, 0x537: 0xba, + 0x538: 0xba, 0x539: 0xba, 0x53a: 0xba, 0x53b: 0xba, 0x53c: 0x9f, 0x53d: 0x14e, 0x53e: 0x14f, 0x53f: 0x150, + // Block 0x15, offset 0x540 + 0x540: 0x9f, 0x541: 0x9f, 0x542: 0x9f, 0x543: 0x9f, 0x544: 0x9f, 0x545: 0x9f, 0x546: 0x9f, 0x547: 0x9f, + 0x548: 0x9f, 0x549: 0x9f, 0x54a: 0x9f, 0x54b: 0x9f, 0x54c: 0x9f, 0x54d: 0x9f, 0x54e: 0x9f, 0x54f: 0x9f, + 0x550: 0x9f, 0x551: 0x9f, 0x552: 0x9f, 0x553: 0x9f, 0x554: 0x9f, 0x555: 0x9f, 0x556: 0x9f, 0x557: 0x9f, + 0x558: 0x9f, 0x559: 0x9f, 0x55a: 0x9f, 0x55b: 0x9f, 0x55c: 0x9f, 0x55d: 0x9f, 0x55e: 0x9f, 0x55f: 0x151, + 0x560: 0x9f, 0x561: 0x9f, 0x562: 0x9f, 0x563: 0x9f, 0x564: 0x9f, 0x565: 0x9f, 0x566: 0x9f, 0x567: 0x9f, + 0x568: 0x9f, 0x569: 0x9f, 0x56a: 0x9f, 0x56b: 0x152, 0x56c: 0xba, 0x56d: 0xba, 0x56e: 0xba, 0x56f: 0xba, + 0x570: 0xba, 0x571: 0xba, 0x572: 0xba, 0x573: 0xba, 0x574: 0xba, 0x575: 0xba, 0x576: 0xba, 0x577: 0xba, + 0x578: 0xba, 0x579: 0xba, 0x57a: 0xba, 0x57b: 0xba, 0x57c: 0xba, 0x57d: 0xba, 0x57e: 0xba, 0x57f: 0xba, + // Block 0x16, offset 0x580 + 0x580: 0x153, 0x581: 0xba, 0x582: 0xba, 0x583: 0xba, 0x584: 0xba, 0x585: 0xba, 0x586: 0xba, 0x587: 0xba, + 0x588: 0xba, 0x589: 0xba, 0x58a: 0xba, 0x58b: 0xba, 0x58c: 0xba, 0x58d: 0xba, 0x58e: 0xba, 0x58f: 0xba, + 0x590: 0xba, 0x591: 0xba, 0x592: 0xba, 0x593: 0xba, 0x594: 0xba, 0x595: 0xba, 0x596: 0xba, 0x597: 0xba, + 0x598: 0xba, 0x599: 0xba, 0x59a: 0xba, 0x59b: 0xba, 0x59c: 0xba, 0x59d: 0xba, 0x59e: 0xba, 0x59f: 0xba, + 0x5a0: 0xba, 0x5a1: 0xba, 0x5a2: 0xba, 0x5a3: 0xba, 0x5a4: 0xba, 0x5a5: 0xba, 0x5a6: 0xba, 0x5a7: 0xba, + 0x5a8: 0xba, 0x5a9: 0xba, 0x5aa: 0xba, 0x5ab: 0xba, 0x5ac: 0xba, 0x5ad: 0xba, 0x5ae: 0xba, 0x5af: 0xba, + 0x5b0: 0x9f, 0x5b1: 0x154, 0x5b2: 0x155, 0x5b3: 0xba, 0x5b4: 0xba, 0x5b5: 0xba, 0x5b6: 0xba, 0x5b7: 0xba, + 0x5b8: 0xba, 0x5b9: 0xba, 0x5ba: 0xba, 0x5bb: 0xba, 0x5bc: 0xba, 0x5bd: 0xba, 0x5be: 0xba, 0x5bf: 0xba, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x9b, 0x5c1: 0x9b, 0x5c2: 0x9b, 0x5c3: 0x156, 0x5c4: 0x157, 0x5c5: 0x158, 0x5c6: 0x159, 0x5c7: 0x15a, + 0x5c8: 0x9b, 0x5c9: 0x15b, 0x5ca: 0xba, 0x5cb: 0xba, 0x5cc: 0x9b, 0x5cd: 0x15c, 0x5ce: 0xba, 0x5cf: 0xba, + 0x5d0: 0x5e, 0x5d1: 0x5f, 0x5d2: 0x60, 0x5d3: 0x61, 0x5d4: 0x62, 0x5d5: 0x63, 0x5d6: 0x64, 0x5d7: 0x65, + 0x5d8: 0x66, 0x5d9: 0x67, 0x5da: 0x68, 0x5db: 0x69, 0x5dc: 0x6a, 0x5dd: 0x6b, 0x5de: 0x6c, 0x5df: 0x6d, + 0x5e0: 0x9b, 0x5e1: 0x9b, 0x5e2: 0x9b, 0x5e3: 0x9b, 0x5e4: 0x9b, 0x5e5: 0x9b, 0x5e6: 0x9b, 0x5e7: 0x9b, + 0x5e8: 0x15d, 0x5e9: 0x15e, 0x5ea: 0x15f, 0x5eb: 0xba, 0x5ec: 0xba, 0x5ed: 0xba, 0x5ee: 0xba, 0x5ef: 0xba, + 0x5f0: 0xba, 0x5f1: 0xba, 0x5f2: 0xba, 0x5f3: 0xba, 0x5f4: 0xba, 0x5f5: 0xba, 0x5f6: 0xba, 0x5f7: 0xba, + 0x5f8: 0xba, 0x5f9: 0xba, 0x5fa: 0xba, 0x5fb: 0xba, 0x5fc: 0xba, 0x5fd: 0xba, 0x5fe: 0xba, 0x5ff: 0xba, + // Block 0x18, offset 0x600 + 0x600: 0x160, 0x601: 0xba, 0x602: 0xba, 0x603: 0xba, 0x604: 0xba, 0x605: 0xba, 0x606: 0xba, 0x607: 0xba, + 0x608: 0xba, 0x609: 0xba, 0x60a: 0xba, 0x60b: 0xba, 0x60c: 0xba, 0x60d: 0xba, 0x60e: 0xba, 0x60f: 0xba, + 0x610: 0xba, 0x611: 0xba, 0x612: 0xba, 0x613: 0xba, 0x614: 0xba, 0x615: 0xba, 0x616: 0xba, 0x617: 0xba, + 0x618: 0xba, 0x619: 0xba, 0x61a: 0xba, 0x61b: 0xba, 0x61c: 0xba, 0x61d: 0xba, 0x61e: 0xba, 0x61f: 0xba, + 0x620: 0x123, 0x621: 0x123, 0x622: 0x123, 0x623: 0x161, 0x624: 0x6e, 0x625: 0x162, 0x626: 0xba, 0x627: 0xba, + 0x628: 0xba, 0x629: 0xba, 0x62a: 0xba, 0x62b: 0xba, 0x62c: 0xba, 0x62d: 0xba, 0x62e: 0xba, 0x62f: 0xba, + 0x630: 0xba, 0x631: 0xba, 0x632: 0xba, 0x633: 0xba, 0x634: 0xba, 0x635: 0xba, 0x636: 0xba, 0x637: 0xba, + 0x638: 0x6f, 0x639: 0x70, 0x63a: 0x71, 0x63b: 0x163, 0x63c: 0xba, 0x63d: 0xba, 0x63e: 0xba, 0x63f: 0xba, + // Block 0x19, offset 0x640 + 0x640: 0x164, 0x641: 0x9b, 0x642: 0x165, 0x643: 0x166, 0x644: 0x72, 0x645: 0x73, 0x646: 0x167, 0x647: 0x168, + 0x648: 0x74, 0x649: 0x169, 0x64a: 0xba, 0x64b: 0xba, 0x64c: 0x9b, 0x64d: 0x9b, 0x64e: 0x9b, 0x64f: 0x9b, + 0x650: 0x9b, 0x651: 0x9b, 0x652: 0x9b, 0x653: 0x9b, 0x654: 0x9b, 0x655: 0x9b, 0x656: 0x9b, 0x657: 0x9b, + 0x658: 0x9b, 0x659: 0x9b, 0x65a: 0x9b, 0x65b: 0x16a, 0x65c: 0x9b, 0x65d: 0x16b, 0x65e: 0x9b, 0x65f: 0x16c, + 0x660: 0x16d, 0x661: 0x16e, 0x662: 0x16f, 0x663: 0xba, 0x664: 0x170, 0x665: 0x171, 0x666: 0x172, 0x667: 0x173, + 0x668: 0xba, 0x669: 0xba, 0x66a: 0xba, 0x66b: 0xba, 0x66c: 0xba, 0x66d: 0xba, 0x66e: 0xba, 0x66f: 0xba, + 0x670: 0xba, 0x671: 0xba, 0x672: 0xba, 0x673: 0xba, 0x674: 0xba, 0x675: 0xba, 0x676: 0xba, 0x677: 0xba, + 0x678: 0xba, 0x679: 0xba, 0x67a: 0xba, 0x67b: 0xba, 0x67c: 0xba, 0x67d: 0xba, 0x67e: 0xba, 0x67f: 0xba, + // Block 0x1a, offset 0x680 + 0x680: 0x9f, 0x681: 0x9f, 0x682: 0x9f, 0x683: 0x9f, 0x684: 0x9f, 0x685: 0x9f, 0x686: 0x9f, 0x687: 0x9f, + 0x688: 0x9f, 0x689: 0x9f, 0x68a: 0x9f, 0x68b: 0x9f, 0x68c: 0x9f, 0x68d: 0x9f, 0x68e: 0x9f, 0x68f: 0x9f, + 0x690: 0x9f, 0x691: 0x9f, 0x692: 0x9f, 0x693: 0x9f, 0x694: 0x9f, 0x695: 0x9f, 0x696: 0x9f, 0x697: 0x9f, + 0x698: 0x9f, 0x699: 0x9f, 0x69a: 0x9f, 0x69b: 0x174, 0x69c: 0x9f, 0x69d: 0x9f, 0x69e: 0x9f, 0x69f: 0x9f, + 0x6a0: 0x9f, 0x6a1: 0x9f, 0x6a2: 0x9f, 0x6a3: 0x9f, 0x6a4: 0x9f, 0x6a5: 0x9f, 0x6a6: 0x9f, 0x6a7: 0x9f, + 0x6a8: 0x9f, 0x6a9: 0x9f, 0x6aa: 0x9f, 0x6ab: 0x9f, 0x6ac: 0x9f, 0x6ad: 0x9f, 0x6ae: 0x9f, 0x6af: 0x9f, + 0x6b0: 0x9f, 0x6b1: 0x9f, 0x6b2: 0x9f, 0x6b3: 0x9f, 0x6b4: 0x9f, 0x6b5: 0x9f, 0x6b6: 0x9f, 0x6b7: 0x9f, + 0x6b8: 0x9f, 0x6b9: 0x9f, 0x6ba: 0x9f, 0x6bb: 0x9f, 0x6bc: 0x9f, 0x6bd: 0x9f, 0x6be: 0x9f, 0x6bf: 0x9f, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x9f, 0x6c1: 0x9f, 0x6c2: 0x9f, 0x6c3: 0x9f, 0x6c4: 0x9f, 0x6c5: 0x9f, 0x6c6: 0x9f, 0x6c7: 0x9f, + 0x6c8: 0x9f, 0x6c9: 0x9f, 0x6ca: 0x9f, 0x6cb: 0x9f, 0x6cc: 0x9f, 0x6cd: 0x9f, 0x6ce: 0x9f, 0x6cf: 0x9f, + 0x6d0: 0x9f, 0x6d1: 0x9f, 0x6d2: 0x9f, 0x6d3: 0x9f, 0x6d4: 0x9f, 0x6d5: 0x9f, 0x6d6: 0x9f, 0x6d7: 0x9f, + 0x6d8: 0x9f, 0x6d9: 0x9f, 0x6da: 0x9f, 0x6db: 0x9f, 0x6dc: 0x175, 0x6dd: 0x9f, 0x6de: 0x9f, 0x6df: 0x9f, + 0x6e0: 0x176, 0x6e1: 0x9f, 0x6e2: 0x9f, 0x6e3: 0x9f, 0x6e4: 0x9f, 0x6e5: 0x9f, 0x6e6: 0x9f, 0x6e7: 0x9f, + 0x6e8: 0x9f, 0x6e9: 0x9f, 0x6ea: 0x9f, 0x6eb: 0x9f, 0x6ec: 0x9f, 0x6ed: 0x9f, 0x6ee: 0x9f, 0x6ef: 0x9f, + 0x6f0: 0x9f, 0x6f1: 0x9f, 0x6f2: 0x9f, 0x6f3: 0x9f, 0x6f4: 0x9f, 0x6f5: 0x9f, 0x6f6: 0x9f, 0x6f7: 0x9f, + 0x6f8: 0x9f, 0x6f9: 0x9f, 0x6fa: 0x9f, 0x6fb: 0x9f, 0x6fc: 0x9f, 0x6fd: 0x9f, 0x6fe: 0x9f, 0x6ff: 0x9f, + // Block 0x1c, offset 0x700 + 0x700: 0x9f, 0x701: 0x9f, 0x702: 0x9f, 0x703: 0x9f, 0x704: 0x9f, 0x705: 0x9f, 0x706: 0x9f, 0x707: 0x9f, + 0x708: 0x9f, 0x709: 0x9f, 0x70a: 0x9f, 0x70b: 0x9f, 0x70c: 0x9f, 0x70d: 0x9f, 0x70e: 0x9f, 0x70f: 0x9f, + 0x710: 0x9f, 0x711: 0x9f, 0x712: 0x9f, 0x713: 0x9f, 0x714: 0x9f, 0x715: 0x9f, 0x716: 0x9f, 0x717: 0x9f, + 0x718: 0x9f, 0x719: 0x9f, 0x71a: 0x9f, 0x71b: 0x9f, 0x71c: 0x9f, 0x71d: 0x9f, 0x71e: 0x9f, 0x71f: 0x9f, + 0x720: 0x9f, 0x721: 0x9f, 0x722: 0x9f, 0x723: 0x9f, 0x724: 0x9f, 0x725: 0x9f, 0x726: 0x9f, 0x727: 0x9f, + 0x728: 0x9f, 0x729: 0x9f, 0x72a: 0x9f, 0x72b: 0x9f, 0x72c: 0x9f, 0x72d: 0x9f, 0x72e: 0x9f, 0x72f: 0x9f, + 0x730: 0x9f, 0x731: 0x9f, 0x732: 0x9f, 0x733: 0x9f, 0x734: 0x9f, 0x735: 0x9f, 0x736: 0x9f, 0x737: 0x9f, + 0x738: 0x9f, 0x739: 0x9f, 0x73a: 0x177, 0x73b: 0xba, 0x73c: 0xba, 0x73d: 0xba, 0x73e: 0xba, 0x73f: 0xba, + // Block 0x1d, offset 0x740 + 0x740: 0xba, 0x741: 0xba, 0x742: 0xba, 0x743: 0xba, 0x744: 0xba, 0x745: 0xba, 0x746: 0xba, 0x747: 0xba, + 0x748: 0xba, 0x749: 0xba, 0x74a: 0xba, 0x74b: 0xba, 0x74c: 0xba, 0x74d: 0xba, 0x74e: 0xba, 0x74f: 0xba, + 0x750: 0xba, 0x751: 0xba, 0x752: 0xba, 0x753: 0xba, 0x754: 0xba, 0x755: 0xba, 0x756: 0xba, 0x757: 0xba, + 0x758: 0xba, 0x759: 0xba, 0x75a: 0xba, 0x75b: 0xba, 0x75c: 0xba, 0x75d: 0xba, 0x75e: 0xba, 0x75f: 0xba, + 0x760: 0x75, 0x761: 0x76, 0x762: 0x77, 0x763: 0x178, 0x764: 0x78, 0x765: 0x79, 0x766: 0x179, 0x767: 0x7a, + 0x768: 0x7b, 0x769: 0xba, 0x76a: 0xba, 0x76b: 0xba, 0x76c: 0xba, 0x76d: 0xba, 0x76e: 0xba, 0x76f: 0xba, + 0x770: 0xba, 0x771: 0xba, 0x772: 0xba, 0x773: 0xba, 0x774: 0xba, 0x775: 0xba, 0x776: 0xba, 0x777: 0xba, + 0x778: 0xba, 0x779: 0xba, 0x77a: 0xba, 0x77b: 0xba, 0x77c: 0xba, 0x77d: 0xba, 0x77e: 0xba, 0x77f: 0xba, + // Block 0x1e, offset 0x780 + 0x790: 0x0d, 0x791: 0x0e, 0x792: 0x0f, 0x793: 0x10, 0x794: 0x11, 0x795: 0x0b, 0x796: 0x12, 0x797: 0x07, + 0x798: 0x13, 0x799: 0x0b, 0x79a: 0x0b, 0x79b: 0x14, 0x79c: 0x0b, 0x79d: 0x15, 0x79e: 0x16, 0x79f: 0x17, + 0x7a0: 0x07, 0x7a1: 0x07, 0x7a2: 0x07, 0x7a3: 0x07, 0x7a4: 0x07, 0x7a5: 0x07, 0x7a6: 0x07, 0x7a7: 0x07, + 0x7a8: 0x07, 0x7a9: 0x07, 0x7aa: 0x18, 0x7ab: 0x19, 0x7ac: 0x1a, 0x7ad: 0x0b, 0x7ae: 0x0b, 0x7af: 0x1b, + 0x7b0: 0x0b, 0x7b1: 0x0b, 0x7b2: 0x0b, 0x7b3: 0x0b, 0x7b4: 0x0b, 0x7b5: 0x0b, 0x7b6: 0x0b, 0x7b7: 0x0b, + 0x7b8: 0x0b, 0x7b9: 0x0b, 0x7ba: 0x0b, 0x7bb: 0x0b, 0x7bc: 0x0b, 0x7bd: 0x0b, 0x7be: 0x0b, 0x7bf: 0x0b, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x0b, 0x7c1: 0x0b, 0x7c2: 0x0b, 0x7c3: 0x0b, 0x7c4: 0x0b, 0x7c5: 0x0b, 0x7c6: 0x0b, 0x7c7: 0x0b, + 0x7c8: 0x0b, 0x7c9: 0x0b, 0x7ca: 0x0b, 0x7cb: 0x0b, 0x7cc: 0x0b, 0x7cd: 0x0b, 0x7ce: 0x0b, 0x7cf: 0x0b, + 0x7d0: 0x0b, 0x7d1: 0x0b, 0x7d2: 0x0b, 0x7d3: 0x0b, 0x7d4: 0x0b, 0x7d5: 0x0b, 0x7d6: 0x0b, 0x7d7: 0x0b, + 0x7d8: 0x0b, 0x7d9: 0x0b, 0x7da: 0x0b, 0x7db: 0x0b, 0x7dc: 0x0b, 0x7dd: 0x0b, 0x7de: 0x0b, 0x7df: 0x0b, + 0x7e0: 0x0b, 0x7e1: 0x0b, 0x7e2: 0x0b, 0x7e3: 0x0b, 0x7e4: 0x0b, 0x7e5: 0x0b, 0x7e6: 0x0b, 0x7e7: 0x0b, + 0x7e8: 0x0b, 0x7e9: 0x0b, 0x7ea: 0x0b, 0x7eb: 0x0b, 0x7ec: 0x0b, 0x7ed: 0x0b, 0x7ee: 0x0b, 0x7ef: 0x0b, + 0x7f0: 0x0b, 0x7f1: 0x0b, 0x7f2: 0x0b, 0x7f3: 0x0b, 0x7f4: 0x0b, 0x7f5: 0x0b, 0x7f6: 0x0b, 0x7f7: 0x0b, + 0x7f8: 0x0b, 0x7f9: 0x0b, 0x7fa: 0x0b, 0x7fb: 0x0b, 0x7fc: 0x0b, 0x7fd: 0x0b, 0x7fe: 0x0b, 0x7ff: 0x0b, + // Block 0x20, offset 0x800 + 0x800: 0x17a, 0x801: 0x17b, 0x802: 0xba, 0x803: 0xba, 0x804: 0x17c, 0x805: 0x17c, 0x806: 0x17c, 0x807: 0x17d, + 0x808: 0xba, 0x809: 0xba, 0x80a: 0xba, 0x80b: 0xba, 0x80c: 0xba, 0x80d: 0xba, 0x80e: 0xba, 0x80f: 0xba, + 0x810: 0xba, 0x811: 0xba, 0x812: 0xba, 0x813: 0xba, 0x814: 0xba, 0x815: 0xba, 0x816: 0xba, 0x817: 0xba, + 0x818: 0xba, 0x819: 0xba, 0x81a: 0xba, 0x81b: 0xba, 0x81c: 0xba, 0x81d: 0xba, 0x81e: 0xba, 0x81f: 0xba, + 0x820: 0xba, 0x821: 0xba, 0x822: 0xba, 0x823: 0xba, 0x824: 0xba, 0x825: 0xba, 0x826: 0xba, 0x827: 0xba, + 0x828: 0xba, 0x829: 0xba, 0x82a: 0xba, 0x82b: 0xba, 0x82c: 0xba, 0x82d: 0xba, 0x82e: 0xba, 0x82f: 0xba, + 0x830: 0xba, 0x831: 0xba, 0x832: 0xba, 0x833: 0xba, 0x834: 0xba, 0x835: 0xba, 0x836: 0xba, 0x837: 0xba, + 0x838: 0xba, 0x839: 0xba, 0x83a: 0xba, 0x83b: 0xba, 0x83c: 0xba, 0x83d: 0xba, 0x83e: 0xba, 0x83f: 0xba, + // Block 0x21, offset 0x840 + 0x840: 0x0b, 0x841: 0x0b, 0x842: 0x0b, 0x843: 0x0b, 0x844: 0x0b, 0x845: 0x0b, 0x846: 0x0b, 0x847: 0x0b, + 0x848: 0x0b, 0x849: 0x0b, 0x84a: 0x0b, 0x84b: 0x0b, 0x84c: 0x0b, 0x84d: 0x0b, 0x84e: 0x0b, 0x84f: 0x0b, + 0x850: 0x0b, 0x851: 0x0b, 0x852: 0x0b, 0x853: 0x0b, 0x854: 0x0b, 0x855: 0x0b, 0x856: 0x0b, 0x857: 0x0b, + 0x858: 0x0b, 0x859: 0x0b, 0x85a: 0x0b, 0x85b: 0x0b, 0x85c: 0x0b, 0x85d: 0x0b, 0x85e: 0x0b, 0x85f: 0x0b, + 0x860: 0x1e, 0x861: 0x0b, 0x862: 0x0b, 0x863: 0x0b, 0x864: 0x0b, 0x865: 0x0b, 0x866: 0x0b, 0x867: 0x0b, + 0x868: 0x0b, 0x869: 0x0b, 0x86a: 0x0b, 0x86b: 0x0b, 0x86c: 0x0b, 0x86d: 0x0b, 0x86e: 0x0b, 0x86f: 0x0b, + 0x870: 0x0b, 0x871: 0x0b, 0x872: 0x0b, 0x873: 0x0b, 0x874: 0x0b, 0x875: 0x0b, 0x876: 0x0b, 0x877: 0x0b, + 0x878: 0x0b, 0x879: 0x0b, 0x87a: 0x0b, 0x87b: 0x0b, 0x87c: 0x0b, 0x87d: 0x0b, 0x87e: 0x0b, 0x87f: 0x0b, + // Block 0x22, offset 0x880 + 0x880: 0x0b, 0x881: 0x0b, 0x882: 0x0b, 0x883: 0x0b, 0x884: 0x0b, 0x885: 0x0b, 0x886: 0x0b, 0x887: 0x0b, + 0x888: 0x0b, 0x889: 0x0b, 0x88a: 0x0b, 0x88b: 0x0b, 0x88c: 0x0b, 0x88d: 0x0b, 0x88e: 0x0b, 0x88f: 0x0b, +} + +// idnaSparseOffset: 258 entries, 516 bytes +var idnaSparseOffset = []uint16{0x0, 0x8, 0x19, 0x25, 0x27, 0x2c, 0x34, 0x3f, 0x4b, 0x4f, 0x5e, 0x63, 0x6b, 0x77, 0x85, 0x93, 0x98, 0xa1, 0xb1, 0xbf, 0xcc, 0xd8, 0xe9, 0xf3, 0xfa, 0x107, 0x118, 0x11f, 0x12a, 0x139, 0x147, 0x151, 0x153, 0x158, 0x15b, 0x15e, 0x160, 0x16c, 0x177, 0x17f, 0x185, 0x18b, 0x190, 0x195, 0x198, 0x19c, 0x1a2, 0x1a7, 0x1b3, 0x1bd, 0x1c3, 0x1d4, 0x1de, 0x1e1, 0x1e9, 0x1ec, 0x1f9, 0x201, 0x205, 0x20c, 0x214, 0x224, 0x230, 0x232, 0x23c, 0x248, 0x254, 0x260, 0x268, 0x26d, 0x277, 0x288, 0x28c, 0x297, 0x29b, 0x2a4, 0x2ac, 0x2b2, 0x2b7, 0x2ba, 0x2bd, 0x2c1, 0x2c7, 0x2cb, 0x2cf, 0x2d5, 0x2dc, 0x2e2, 0x2ea, 0x2f1, 0x2fc, 0x306, 0x30a, 0x30d, 0x313, 0x317, 0x319, 0x31c, 0x31e, 0x321, 0x32b, 0x32e, 0x33d, 0x341, 0x346, 0x349, 0x34d, 0x352, 0x357, 0x35d, 0x363, 0x372, 0x378, 0x37c, 0x38b, 0x390, 0x398, 0x3a2, 0x3ad, 0x3b5, 0x3c6, 0x3cf, 0x3df, 0x3ec, 0x3f6, 0x3fb, 0x408, 0x40c, 0x411, 0x413, 0x417, 0x419, 0x41d, 0x426, 0x42c, 0x430, 0x440, 0x44a, 0x44f, 0x452, 0x458, 0x45f, 0x464, 0x468, 0x46e, 0x473, 0x47c, 0x481, 0x487, 0x48e, 0x495, 0x49c, 0x4a0, 0x4a5, 0x4a8, 0x4ad, 0x4b9, 0x4bf, 0x4c4, 0x4cb, 0x4d3, 0x4d8, 0x4dc, 0x4ec, 0x4f3, 0x4f7, 0x4fb, 0x502, 0x504, 0x507, 0x50a, 0x50e, 0x512, 0x518, 0x521, 0x52d, 0x534, 0x53d, 0x545, 0x54c, 0x55a, 0x567, 0x574, 0x57d, 0x581, 0x58f, 0x597, 0x5a2, 0x5ab, 0x5b1, 0x5b9, 0x5c2, 0x5cc, 0x5cf, 0x5db, 0x5de, 0x5e3, 0x5e6, 0x5f0, 0x5f9, 0x605, 0x608, 0x60d, 0x610, 0x613, 0x616, 0x61d, 0x624, 0x628, 0x633, 0x636, 0x63c, 0x641, 0x645, 0x648, 0x64b, 0x64e, 0x653, 0x65d, 0x660, 0x664, 0x673, 0x67f, 0x683, 0x688, 0x68d, 0x691, 0x696, 0x69f, 0x6aa, 0x6b0, 0x6b8, 0x6bc, 0x6c0, 0x6c6, 0x6cc, 0x6d1, 0x6d4, 0x6e2, 0x6e9, 0x6ec, 0x6ef, 0x6f3, 0x6f9, 0x6fe, 0x708, 0x70d, 0x710, 0x713, 0x716, 0x719, 0x71d, 0x720, 0x730, 0x741, 0x746, 0x748, 0x74a} + +// idnaSparseValues: 1869 entries, 7476 bytes +var idnaSparseValues = [1869]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0000, lo: 0x07}, + {value: 0xe105, lo: 0x80, hi: 0x96}, + {value: 0x0018, lo: 0x97, hi: 0x97}, + {value: 0xe105, lo: 0x98, hi: 0x9e}, + {value: 0x001f, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbf}, + // Block 0x1, offset 0x8 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0xe01d, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x82}, + {value: 0x0335, lo: 0x83, hi: 0x83}, + {value: 0x034d, lo: 0x84, hi: 0x84}, + {value: 0x0365, lo: 0x85, hi: 0x85}, + {value: 0xe00d, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x87}, + {value: 0xe00d, lo: 0x88, hi: 0x88}, + {value: 0x0008, lo: 0x89, hi: 0x89}, + {value: 0xe00d, lo: 0x8a, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0x8b}, + {value: 0xe00d, lo: 0x8c, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0x8d}, + {value: 0xe00d, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0xbf}, + // Block 0x2, offset 0x19 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x0249, lo: 0xb0, hi: 0xb0}, + {value: 0x037d, lo: 0xb1, hi: 0xb1}, + {value: 0x0259, lo: 0xb2, hi: 0xb2}, + {value: 0x0269, lo: 0xb3, hi: 0xb3}, + {value: 0x034d, lo: 0xb4, hi: 0xb4}, + {value: 0x0395, lo: 0xb5, hi: 0xb5}, + {value: 0xe1bd, lo: 0xb6, hi: 0xb6}, + {value: 0x0279, lo: 0xb7, hi: 0xb7}, + {value: 0x0289, lo: 0xb8, hi: 0xb8}, + {value: 0x0008, lo: 0xb9, hi: 0xbf}, + // Block 0x3, offset 0x25 + {value: 0x0000, lo: 0x01}, + {value: 0x3308, lo: 0x80, hi: 0xbf}, + // Block 0x4, offset 0x27 + {value: 0x0000, lo: 0x04}, + {value: 0x03f5, lo: 0x80, hi: 0x8f}, + {value: 0xe105, lo: 0x90, hi: 0x9f}, + {value: 0x049d, lo: 0xa0, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x5, offset 0x2c + {value: 0x0000, lo: 0x07}, + {value: 0xe185, lo: 0x80, hi: 0x8f}, + {value: 0x0545, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x98}, + {value: 0x0008, lo: 0x99, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xbf}, + // Block 0x6, offset 0x34 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0401, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x88}, + {value: 0x0018, lo: 0x89, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x3308, lo: 0x91, hi: 0xbd}, + {value: 0x0818, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x7, offset 0x3f + {value: 0x0000, lo: 0x0b}, + {value: 0x0818, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x82}, + {value: 0x0818, lo: 0x83, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x85}, + {value: 0x0818, lo: 0x86, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0808, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x8, offset 0x4b + {value: 0x0000, lo: 0x03}, + {value: 0x0a08, lo: 0x80, hi: 0x87}, + {value: 0x0c08, lo: 0x88, hi: 0x99}, + {value: 0x0a08, lo: 0x9a, hi: 0xbf}, + // Block 0x9, offset 0x4f + {value: 0x0000, lo: 0x0e}, + {value: 0x3308, lo: 0x80, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8c}, + {value: 0x0c08, lo: 0x8d, hi: 0x8d}, + {value: 0x0a08, lo: 0x8e, hi: 0x98}, + {value: 0x0c08, lo: 0x99, hi: 0x9b}, + {value: 0x0a08, lo: 0x9c, hi: 0xaa}, + {value: 0x0c08, lo: 0xab, hi: 0xac}, + {value: 0x0a08, lo: 0xad, hi: 0xb0}, + {value: 0x0c08, lo: 0xb1, hi: 0xb1}, + {value: 0x0a08, lo: 0xb2, hi: 0xb2}, + {value: 0x0c08, lo: 0xb3, hi: 0xb4}, + {value: 0x0a08, lo: 0xb5, hi: 0xb7}, + {value: 0x0c08, lo: 0xb8, hi: 0xb9}, + {value: 0x0a08, lo: 0xba, hi: 0xbf}, + // Block 0xa, offset 0x5e + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xb0}, + {value: 0x0808, lo: 0xb1, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xb, offset 0x63 + {value: 0x0000, lo: 0x07}, + {value: 0x0808, lo: 0x80, hi: 0x89}, + {value: 0x0a08, lo: 0x8a, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xb3}, + {value: 0x0808, lo: 0xb4, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xb9}, + {value: 0x0818, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0xc, offset 0x6b + {value: 0x0000, lo: 0x0b}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x99}, + {value: 0x0808, lo: 0x9a, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0xa3}, + {value: 0x0808, lo: 0xa4, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa7}, + {value: 0x0808, lo: 0xa8, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0818, lo: 0xb0, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xd, offset 0x77 + {value: 0x0000, lo: 0x0d}, + {value: 0x0c08, lo: 0x80, hi: 0x80}, + {value: 0x0a08, lo: 0x81, hi: 0x85}, + {value: 0x0c08, lo: 0x86, hi: 0x87}, + {value: 0x0a08, lo: 0x88, hi: 0x88}, + {value: 0x0c08, lo: 0x89, hi: 0x89}, + {value: 0x0a08, lo: 0x8a, hi: 0x93}, + {value: 0x0c08, lo: 0x94, hi: 0x94}, + {value: 0x0a08, lo: 0x95, hi: 0x95}, + {value: 0x0808, lo: 0x96, hi: 0x98}, + {value: 0x3308, lo: 0x99, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9d}, + {value: 0x0818, lo: 0x9e, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xbf}, + // Block 0xe, offset 0x85 + {value: 0x0000, lo: 0x0d}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0a08, lo: 0xa0, hi: 0xa9}, + {value: 0x0c08, lo: 0xaa, hi: 0xac}, + {value: 0x0808, lo: 0xad, hi: 0xad}, + {value: 0x0c08, lo: 0xae, hi: 0xae}, + {value: 0x0a08, lo: 0xaf, hi: 0xb0}, + {value: 0x0c08, lo: 0xb1, hi: 0xb2}, + {value: 0x0a08, lo: 0xb3, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xb5}, + {value: 0x0a08, lo: 0xb6, hi: 0xb8}, + {value: 0x0c08, lo: 0xb9, hi: 0xb9}, + {value: 0x0a08, lo: 0xba, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0xf, offset 0x93 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x93}, + {value: 0x3308, lo: 0x94, hi: 0xa1}, + {value: 0x0840, lo: 0xa2, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xbf}, + // Block 0x10, offset 0x98 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x11, offset 0xa1 + {value: 0x0000, lo: 0x0f}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x85}, + {value: 0x3008, lo: 0x86, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x3008, lo: 0x8a, hi: 0x8c}, + {value: 0x3b08, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x12, offset 0xb1 + {value: 0x0000, lo: 0x0d}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xa9}, + {value: 0x0008, lo: 0xaa, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbf}, + // Block 0x13, offset 0xbf + {value: 0x0000, lo: 0x0c}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x14, offset 0xcc + {value: 0x0000, lo: 0x0b}, + {value: 0x0040, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xb2}, + {value: 0x0008, lo: 0xb3, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x15, offset 0xd8 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x89}, + {value: 0x3b08, lo: 0x8a, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8e}, + {value: 0x3008, lo: 0x8f, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x3008, lo: 0x98, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x16, offset 0xe9 + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb2}, + {value: 0x08f1, lo: 0xb3, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb9}, + {value: 0x3b08, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0x17, offset 0xf3 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x8e}, + {value: 0x0018, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0xbf}, + // Block 0x18, offset 0xfa + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x3308, lo: 0x88, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0961, lo: 0x9c, hi: 0x9c}, + {value: 0x0999, lo: 0x9d, hi: 0x9d}, + {value: 0x0008, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0x19, offset 0x107 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0x8b}, + {value: 0xe03d, lo: 0x8c, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xb8}, + {value: 0x3308, lo: 0xb9, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x1a, offset 0x118 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0018, lo: 0x8e, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0xbf}, + // Block 0x1b, offset 0x11f + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x3008, lo: 0xab, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xb0}, + {value: 0x3008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0x1c, offset 0x12a + {value: 0x0000, lo: 0x0e}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x95}, + {value: 0x3008, lo: 0x96, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0x9d}, + {value: 0x3308, lo: 0x9e, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xa1}, + {value: 0x3008, lo: 0xa2, hi: 0xa4}, + {value: 0x0008, lo: 0xa5, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xbf}, + // Block 0x1d, offset 0x139 + {value: 0x0000, lo: 0x0d}, + {value: 0x0008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x8c}, + {value: 0x3308, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x8e}, + {value: 0x3008, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x3008, lo: 0x9a, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0x1e, offset 0x147 + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x86}, + {value: 0x055d, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8c}, + {value: 0x055d, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbb}, + {value: 0xe105, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbf}, + // Block 0x1f, offset 0x151 + {value: 0x0000, lo: 0x01}, + {value: 0x0018, lo: 0x80, hi: 0xbf}, + // Block 0x20, offset 0x153 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xa0}, + {value: 0x2018, lo: 0xa1, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0x21, offset 0x158 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xa7}, + {value: 0x2018, lo: 0xa8, hi: 0xbf}, + // Block 0x22, offset 0x15b + {value: 0x0000, lo: 0x02}, + {value: 0x2018, lo: 0x80, hi: 0x82}, + {value: 0x0018, lo: 0x83, hi: 0xbf}, + // Block 0x23, offset 0x15e + {value: 0x0000, lo: 0x01}, + {value: 0x0008, lo: 0x80, hi: 0xbf}, + // Block 0x24, offset 0x160 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x25, offset 0x16c + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x26, offset 0x177 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbf}, + // Block 0x27, offset 0x17f + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbf}, + // Block 0x28, offset 0x185 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x29, offset 0x18b + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x2a, offset 0x190 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0xe045, lo: 0xb8, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x2b, offset 0x195 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xbf}, + // Block 0x2c, offset 0x198 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xac}, + {value: 0x0018, lo: 0xad, hi: 0xae}, + {value: 0x0008, lo: 0xaf, hi: 0xbf}, + // Block 0x2d, offset 0x19c + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x2e, offset 0x1a2 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xb0}, + {value: 0x0008, lo: 0xb1, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0x2f, offset 0x1a7 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x93}, + {value: 0x3b08, lo: 0x94, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x3b08, lo: 0xb4, hi: 0xb4}, + {value: 0x0018, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x30, offset 0x1b3 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x31, offset 0x1bd + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xb3}, + {value: 0x3340, lo: 0xb4, hi: 0xb5}, + {value: 0x3008, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x32, offset 0x1c3 + {value: 0x0000, lo: 0x10}, + {value: 0x3008, lo: 0x80, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x88}, + {value: 0x3308, lo: 0x89, hi: 0x91}, + {value: 0x3b08, lo: 0x92, hi: 0x92}, + {value: 0x3308, lo: 0x93, hi: 0x93}, + {value: 0x0018, lo: 0x94, hi: 0x96}, + {value: 0x0008, lo: 0x97, hi: 0x97}, + {value: 0x0018, lo: 0x98, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x33, offset 0x1d4 + {value: 0x0000, lo: 0x09}, + {value: 0x0018, lo: 0x80, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x86}, + {value: 0x0218, lo: 0x87, hi: 0x87}, + {value: 0x0018, lo: 0x88, hi: 0x8a}, + {value: 0x33c0, lo: 0x8b, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0208, lo: 0xa0, hi: 0xbf}, + // Block 0x34, offset 0x1de + {value: 0x0000, lo: 0x02}, + {value: 0x0208, lo: 0x80, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x35, offset 0x1e1 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x0208, lo: 0x87, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xa9}, + {value: 0x0208, lo: 0xaa, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x36, offset 0x1e9 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0x37, offset 0x1ec + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb8}, + {value: 0x3308, lo: 0xb9, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x38, offset 0x1f9 + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x83}, + {value: 0x0018, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x39, offset 0x201 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x3a, offset 0x205 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0028, lo: 0x9a, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0xbf}, + // Block 0x3b, offset 0x20c + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x3308, lo: 0x97, hi: 0x98}, + {value: 0x3008, lo: 0x99, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x3c, offset 0x214 + {value: 0x0000, lo: 0x0f}, + {value: 0x0008, lo: 0x80, hi: 0x94}, + {value: 0x3008, lo: 0x95, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3b08, lo: 0xa0, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xac}, + {value: 0x3008, lo: 0xad, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x3d, offset 0x224 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa7}, + {value: 0x0018, lo: 0xa8, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xbd}, + {value: 0x3318, lo: 0xbe, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x3e, offset 0x230 + {value: 0x0000, lo: 0x01}, + {value: 0x0040, lo: 0x80, hi: 0xbf}, + // Block 0x3f, offset 0x232 + {value: 0x0000, lo: 0x09}, + {value: 0x3308, lo: 0x80, hi: 0x83}, + {value: 0x3008, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbf}, + // Block 0x40, offset 0x23c + {value: 0x0000, lo: 0x0b}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x3808, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x41, offset 0x248 + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa9}, + {value: 0x3808, lo: 0xaa, hi: 0xaa}, + {value: 0x3b08, lo: 0xab, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xbf}, + // Block 0x42, offset 0x254 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa9}, + {value: 0x3008, lo: 0xaa, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb1}, + {value: 0x3808, lo: 0xb2, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbb}, + {value: 0x0018, lo: 0xbc, hi: 0xbf}, + // Block 0x43, offset 0x260 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x3008, lo: 0xa4, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbf}, + // Block 0x44, offset 0x268 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0x45, offset 0x26d + {value: 0x0000, lo: 0x09}, + {value: 0x0e29, lo: 0x80, hi: 0x80}, + {value: 0x0e41, lo: 0x81, hi: 0x81}, + {value: 0x0e59, lo: 0x82, hi: 0x82}, + {value: 0x0e71, lo: 0x83, hi: 0x83}, + {value: 0x0e89, lo: 0x84, hi: 0x85}, + {value: 0x0ea1, lo: 0x86, hi: 0x86}, + {value: 0x0eb9, lo: 0x87, hi: 0x87}, + {value: 0x057d, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0xbf}, + // Block 0x46, offset 0x277 + {value: 0x0000, lo: 0x10}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x92}, + {value: 0x0018, lo: 0x93, hi: 0x93}, + {value: 0x3308, lo: 0x94, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa8}, + {value: 0x0008, lo: 0xa9, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x47, offset 0x288 + {value: 0x0000, lo: 0x03}, + {value: 0x3308, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbf}, + // Block 0x48, offset 0x28c + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x87}, + {value: 0xe045, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0xe045, lo: 0x98, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa7}, + {value: 0xe045, lo: 0xa8, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb7}, + {value: 0xe045, lo: 0xb8, hi: 0xbf}, + // Block 0x49, offset 0x297 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x3318, lo: 0x90, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xbf}, + // Block 0x4a, offset 0x29b + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x88}, + {value: 0x24c1, lo: 0x89, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x4b, offset 0x2a4 + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0xab}, + {value: 0x24f1, lo: 0xac, hi: 0xac}, + {value: 0x2529, lo: 0xad, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xae}, + {value: 0x2579, lo: 0xaf, hi: 0xaf}, + {value: 0x25b1, lo: 0xb0, hi: 0xb0}, + {value: 0x0018, lo: 0xb1, hi: 0xbf}, + // Block 0x4c, offset 0x2ac + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x9f}, + {value: 0x0080, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xad}, + {value: 0x0080, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x4d, offset 0x2b2 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0xa8}, + {value: 0x09c5, lo: 0xa9, hi: 0xa9}, + {value: 0x09e5, lo: 0xaa, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xbf}, + // Block 0x4e, offset 0x2b7 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x4f, offset 0x2ba + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xbf}, + // Block 0x50, offset 0x2bd + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x28c1, lo: 0x8c, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0xbf}, + // Block 0x51, offset 0x2c1 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0e66, lo: 0xb4, hi: 0xb4}, + {value: 0x292a, lo: 0xb5, hi: 0xb5}, + {value: 0x0e86, lo: 0xb6, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0x52, offset 0x2c7 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x9b}, + {value: 0x2941, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0xbf}, + // Block 0x53, offset 0x2cb + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0x54, offset 0x2cf + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0018, lo: 0x98, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbc}, + {value: 0x0018, lo: 0xbd, hi: 0xbf}, + // Block 0x55, offset 0x2d5 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0xab}, + {value: 0x0018, lo: 0xac, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x56, offset 0x2dc + {value: 0x0000, lo: 0x05}, + {value: 0xe185, lo: 0x80, hi: 0x8f}, + {value: 0x03f5, lo: 0x90, hi: 0x9f}, + {value: 0x0ea5, lo: 0xa0, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x57, offset 0x2e2 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xac}, + {value: 0x0008, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x58, offset 0x2ea + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xae}, + {value: 0xe075, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0x59, offset 0x2f1 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x5a, offset 0x2fc + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xbf}, + // Block 0x5b, offset 0x306 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xae}, + {value: 0x0008, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x5c, offset 0x30a + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0xbf}, + // Block 0x5d, offset 0x30d + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9e}, + {value: 0x0edd, lo: 0x9f, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbf}, + // Block 0x5e, offset 0x313 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb2}, + {value: 0x0efd, lo: 0xb3, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x5f, offset 0x317 + {value: 0x0020, lo: 0x01}, + {value: 0x0f1d, lo: 0x80, hi: 0xbf}, + // Block 0x60, offset 0x319 + {value: 0x0020, lo: 0x02}, + {value: 0x171d, lo: 0x80, hi: 0x8f}, + {value: 0x18fd, lo: 0x90, hi: 0xbf}, + // Block 0x61, offset 0x31c + {value: 0x0020, lo: 0x01}, + {value: 0x1efd, lo: 0x80, hi: 0xbf}, + // Block 0x62, offset 0x31e + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xbf}, + // Block 0x63, offset 0x321 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x98}, + {value: 0x3308, lo: 0x99, hi: 0x9a}, + {value: 0x29e2, lo: 0x9b, hi: 0x9b}, + {value: 0x2a0a, lo: 0x9c, hi: 0x9c}, + {value: 0x0008, lo: 0x9d, hi: 0x9e}, + {value: 0x2a31, lo: 0x9f, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xbf}, + // Block 0x64, offset 0x32b + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xbe}, + {value: 0x2a69, lo: 0xbf, hi: 0xbf}, + // Block 0x65, offset 0x32e + {value: 0x0000, lo: 0x0e}, + {value: 0x0040, lo: 0x80, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xb0}, + {value: 0x2a1d, lo: 0xb1, hi: 0xb1}, + {value: 0x2a3d, lo: 0xb2, hi: 0xb2}, + {value: 0x2a5d, lo: 0xb3, hi: 0xb3}, + {value: 0x2a7d, lo: 0xb4, hi: 0xb4}, + {value: 0x2a5d, lo: 0xb5, hi: 0xb5}, + {value: 0x2a9d, lo: 0xb6, hi: 0xb6}, + {value: 0x2abd, lo: 0xb7, hi: 0xb7}, + {value: 0x2add, lo: 0xb8, hi: 0xb9}, + {value: 0x2afd, lo: 0xba, hi: 0xbb}, + {value: 0x2b1d, lo: 0xbc, hi: 0xbd}, + {value: 0x2afd, lo: 0xbe, hi: 0xbf}, + // Block 0x66, offset 0x33d + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x67, offset 0x341 + {value: 0x0030, lo: 0x04}, + {value: 0x2aa2, lo: 0x80, hi: 0x9d}, + {value: 0x305a, lo: 0x9e, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x30a2, lo: 0xa0, hi: 0xbf}, + // Block 0x68, offset 0x346 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0xbf}, + // Block 0x69, offset 0x349 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x6a, offset 0x34d + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0x6b, offset 0x352 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xbf}, + // Block 0x6c, offset 0x357 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x0018, lo: 0xa6, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb1}, + {value: 0x0018, lo: 0xb2, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x6d, offset 0x35d + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0xb6}, + {value: 0x0008, lo: 0xb7, hi: 0xb7}, + {value: 0x2009, lo: 0xb8, hi: 0xb8}, + {value: 0x6e89, lo: 0xb9, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xbf}, + // Block 0x6e, offset 0x363 + {value: 0x0000, lo: 0x0e}, + {value: 0x0008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0x85}, + {value: 0x3b08, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x8a}, + {value: 0x3308, lo: 0x8b, hi: 0x8b}, + {value: 0x0008, lo: 0x8c, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xa7}, + {value: 0x0018, lo: 0xa8, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x6f, offset 0x372 + {value: 0x0000, lo: 0x05}, + {value: 0x0208, lo: 0x80, hi: 0xb1}, + {value: 0x0108, lo: 0xb2, hi: 0xb2}, + {value: 0x0008, lo: 0xb3, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x70, offset 0x378 + {value: 0x0000, lo: 0x03}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xbf}, + // Block 0x71, offset 0x37c + {value: 0x0000, lo: 0x0e}, + {value: 0x3008, lo: 0x80, hi: 0x83}, + {value: 0x3b08, lo: 0x84, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8d}, + {value: 0x0018, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xba}, + {value: 0x0008, lo: 0xbb, hi: 0xbb}, + {value: 0x0018, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x72, offset 0x38b + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x73, offset 0x390 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x91}, + {value: 0x3008, lo: 0x92, hi: 0x92}, + {value: 0x3808, lo: 0x93, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x74, offset 0x398 + {value: 0x0000, lo: 0x09}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb9}, + {value: 0x3008, lo: 0xba, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbf}, + // Block 0x75, offset 0x3a2 + {value: 0x0000, lo: 0x0a}, + {value: 0x3808, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x76, offset 0x3ad + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x77, offset 0x3b5 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x8b}, + {value: 0x3308, lo: 0x8c, hi: 0x8c}, + {value: 0x3008, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0018, lo: 0x9c, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbd}, + {value: 0x0008, lo: 0xbe, hi: 0xbf}, + // Block 0x78, offset 0x3c6 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb0}, + {value: 0x0008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb8}, + {value: 0x0008, lo: 0xb9, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbf}, + // Block 0x79, offset 0x3cf + {value: 0x0000, lo: 0x0f}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x9a}, + {value: 0x0008, lo: 0x9b, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xaa}, + {value: 0x3008, lo: 0xab, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb5}, + {value: 0x3b08, lo: 0xb6, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x7a, offset 0x3df + {value: 0x0000, lo: 0x0c}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x88}, + {value: 0x0008, lo: 0x89, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x90}, + {value: 0x0008, lo: 0x91, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x7b, offset 0x3ec + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x4465, lo: 0x9c, hi: 0x9c}, + {value: 0x447d, lo: 0x9d, hi: 0x9d}, + {value: 0x2971, lo: 0x9e, hi: 0x9e}, + {value: 0xe06d, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xaf}, + {value: 0x4495, lo: 0xb0, hi: 0xbf}, + // Block 0x7c, offset 0x3f6 + {value: 0x0000, lo: 0x04}, + {value: 0x44b5, lo: 0x80, hi: 0x8f}, + {value: 0x44d5, lo: 0x90, hi: 0x9f}, + {value: 0x44f5, lo: 0xa0, hi: 0xaf}, + {value: 0x44d5, lo: 0xb0, hi: 0xbf}, + // Block 0x7d, offset 0x3fb + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3b08, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x7e, offset 0x408 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x7f, offset 0x40c + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8a}, + {value: 0x0018, lo: 0x8b, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x80, offset 0x411 + {value: 0x0020, lo: 0x01}, + {value: 0x4515, lo: 0x80, hi: 0xbf}, + // Block 0x81, offset 0x413 + {value: 0x0020, lo: 0x03}, + {value: 0x4d15, lo: 0x80, hi: 0x94}, + {value: 0x4ad5, lo: 0x95, hi: 0x95}, + {value: 0x4fb5, lo: 0x96, hi: 0xbf}, + // Block 0x82, offset 0x417 + {value: 0x0020, lo: 0x01}, + {value: 0x54f5, lo: 0x80, hi: 0xbf}, + // Block 0x83, offset 0x419 + {value: 0x0020, lo: 0x03}, + {value: 0x5cf5, lo: 0x80, hi: 0x84}, + {value: 0x5655, lo: 0x85, hi: 0x85}, + {value: 0x5d95, lo: 0x86, hi: 0xbf}, + // Block 0x84, offset 0x41d + {value: 0x0020, lo: 0x08}, + {value: 0x6b55, lo: 0x80, hi: 0x8f}, + {value: 0x6d15, lo: 0x90, hi: 0x90}, + {value: 0x6d55, lo: 0x91, hi: 0xab}, + {value: 0x6ea1, lo: 0xac, hi: 0xac}, + {value: 0x70b5, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x70d5, lo: 0xb0, hi: 0xbf}, + // Block 0x85, offset 0x426 + {value: 0x0020, lo: 0x05}, + {value: 0x72d5, lo: 0x80, hi: 0xad}, + {value: 0x6535, lo: 0xae, hi: 0xae}, + {value: 0x7895, lo: 0xaf, hi: 0xb5}, + {value: 0x6f55, lo: 0xb6, hi: 0xb6}, + {value: 0x7975, lo: 0xb7, hi: 0xbf}, + // Block 0x86, offset 0x42c + {value: 0x0028, lo: 0x03}, + {value: 0x7c21, lo: 0x80, hi: 0x82}, + {value: 0x7be1, lo: 0x83, hi: 0x83}, + {value: 0x7c99, lo: 0x84, hi: 0xbf}, + // Block 0x87, offset 0x430 + {value: 0x0038, lo: 0x0f}, + {value: 0x9db1, lo: 0x80, hi: 0x83}, + {value: 0x9e59, lo: 0x84, hi: 0x85}, + {value: 0x9e91, lo: 0x86, hi: 0x87}, + {value: 0x9ec9, lo: 0x88, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0xa089, lo: 0x92, hi: 0x97}, + {value: 0xa1a1, lo: 0x98, hi: 0x9c}, + {value: 0xa281, lo: 0x9d, hi: 0xb3}, + {value: 0x9d41, lo: 0xb4, hi: 0xb4}, + {value: 0x9db1, lo: 0xb5, hi: 0xb5}, + {value: 0xa789, lo: 0xb6, hi: 0xbb}, + {value: 0xa869, lo: 0xbc, hi: 0xbc}, + {value: 0xa7f9, lo: 0xbd, hi: 0xbd}, + {value: 0xa8d9, lo: 0xbe, hi: 0xbf}, + // Block 0x88, offset 0x440 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbb}, + {value: 0x0008, lo: 0xbc, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0x89, offset 0x44a + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0x8a, offset 0x44f + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x8b, offset 0x452 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0x8c, offset 0x458 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa0}, + {value: 0x0040, lo: 0xa1, hi: 0xbf}, + // Block 0x8d, offset 0x45f + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x8e, offset 0x464 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x8f, offset 0x468 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x90, offset 0x46e + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x91, offset 0x473 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x92, offset 0x47c + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x93, offset 0x481 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0xbf}, + // Block 0x94, offset 0x487 + {value: 0x0000, lo: 0x06}, + {value: 0xe145, lo: 0x80, hi: 0x87}, + {value: 0xe1c5, lo: 0x88, hi: 0x8f}, + {value: 0xe145, lo: 0x90, hi: 0x97}, + {value: 0x8ad5, lo: 0x98, hi: 0x9f}, + {value: 0x8aed, lo: 0xa0, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xbf}, + // Block 0x95, offset 0x48e + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x8aed, lo: 0xb0, hi: 0xb7}, + {value: 0x8ad5, lo: 0xb8, hi: 0xbf}, + // Block 0x96, offset 0x495 + {value: 0x0000, lo: 0x06}, + {value: 0xe145, lo: 0x80, hi: 0x87}, + {value: 0xe1c5, lo: 0x88, hi: 0x8f}, + {value: 0xe145, lo: 0x90, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x97, offset 0x49c + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x98, offset 0x4a0 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xae}, + {value: 0x0018, lo: 0xaf, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x99, offset 0x4a5 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x9a, offset 0x4a8 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xbf}, + // Block 0x9b, offset 0x4ad + {value: 0x0000, lo: 0x0b}, + {value: 0x0808, lo: 0x80, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x87}, + {value: 0x0808, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0808, lo: 0x8a, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb6}, + {value: 0x0808, lo: 0xb7, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbb}, + {value: 0x0808, lo: 0xbc, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbe}, + {value: 0x0808, lo: 0xbf, hi: 0xbf}, + // Block 0x9c, offset 0x4b9 + {value: 0x0000, lo: 0x05}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x96}, + {value: 0x0818, lo: 0x97, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb6}, + {value: 0x0818, lo: 0xb7, hi: 0xbf}, + // Block 0x9d, offset 0x4bf + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xa6}, + {value: 0x0818, lo: 0xa7, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x9e, offset 0x4c4 + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb3}, + {value: 0x0808, lo: 0xb4, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xba}, + {value: 0x0818, lo: 0xbb, hi: 0xbf}, + // Block 0x9f, offset 0x4cb + {value: 0x0000, lo: 0x07}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0818, lo: 0x96, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbe}, + {value: 0x0818, lo: 0xbf, hi: 0xbf}, + // Block 0xa0, offset 0x4d3 + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbb}, + {value: 0x0818, lo: 0xbc, hi: 0xbd}, + {value: 0x0808, lo: 0xbe, hi: 0xbf}, + // Block 0xa1, offset 0x4d8 + {value: 0x0000, lo: 0x03}, + {value: 0x0818, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x91}, + {value: 0x0818, lo: 0x92, hi: 0xbf}, + // Block 0xa2, offset 0x4dc + {value: 0x0000, lo: 0x0f}, + {value: 0x0808, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8b}, + {value: 0x3308, lo: 0x8c, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x94}, + {value: 0x0808, lo: 0x95, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0x98}, + {value: 0x0808, lo: 0x99, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xa3, offset 0x4ec + {value: 0x0000, lo: 0x06}, + {value: 0x0818, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0818, lo: 0x90, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xbc}, + {value: 0x0818, lo: 0xbd, hi: 0xbf}, + // Block 0xa4, offset 0x4f3 + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0x9c}, + {value: 0x0818, lo: 0x9d, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xa5, offset 0x4f7 + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb8}, + {value: 0x0018, lo: 0xb9, hi: 0xbf}, + // Block 0xa6, offset 0x4fb + {value: 0x0000, lo: 0x06}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0818, lo: 0x98, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb7}, + {value: 0x0818, lo: 0xb8, hi: 0xbf}, + // Block 0xa7, offset 0x502 + {value: 0x0000, lo: 0x01}, + {value: 0x0808, lo: 0x80, hi: 0xbf}, + // Block 0xa8, offset 0x504 + {value: 0x0000, lo: 0x02}, + {value: 0x0808, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0xbf}, + // Block 0xa9, offset 0x507 + {value: 0x0000, lo: 0x02}, + {value: 0x03dd, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbf}, + // Block 0xaa, offset 0x50a + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb9}, + {value: 0x0818, lo: 0xba, hi: 0xbf}, + // Block 0xab, offset 0x50e + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0818, lo: 0xa0, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xac, offset 0x512 + {value: 0x0000, lo: 0x05}, + {value: 0x3008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbf}, + // Block 0xad, offset 0x518 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x85}, + {value: 0x3b08, lo: 0x86, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x91}, + {value: 0x0018, lo: 0x92, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xae, offset 0x521 + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb6}, + {value: 0x3008, lo: 0xb7, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbc}, + {value: 0x0340, lo: 0xbd, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0xaf, offset 0x52d + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x81}, + {value: 0x0040, lo: 0x82, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xb0, offset 0x534 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xb2}, + {value: 0x3b08, lo: 0xb3, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xb5}, + {value: 0x0008, lo: 0xb6, hi: 0xbf}, + // Block 0xb1, offset 0x53d + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb5}, + {value: 0x0008, lo: 0xb6, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xb2, offset 0x545 + {value: 0x0000, lo: 0x06}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xbe}, + {value: 0x3008, lo: 0xbf, hi: 0xbf}, + // Block 0xb3, offset 0x54c + {value: 0x0000, lo: 0x0d}, + {value: 0x3808, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x89}, + {value: 0x3308, lo: 0x8a, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xb4, offset 0x55a + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0x92}, + {value: 0x0008, lo: 0x93, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x3808, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xb5, offset 0x567 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9e}, + {value: 0x0008, lo: 0x9f, hi: 0xa8}, + {value: 0x0018, lo: 0xa9, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0xb6, offset 0x574 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x3308, lo: 0x9f, hi: 0x9f}, + {value: 0x3008, lo: 0xa0, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xa9}, + {value: 0x3b08, lo: 0xaa, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xb7, offset 0x57d + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbf}, + // Block 0xb8, offset 0x581 + {value: 0x0000, lo: 0x0d}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x3b08, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x84}, + {value: 0x3008, lo: 0x85, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x8a}, + {value: 0x0018, lo: 0x8b, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0xb9, offset 0x58f + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb8}, + {value: 0x3008, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0xba, offset 0x597 + {value: 0x0000, lo: 0x0a}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x81}, + {value: 0x3b08, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x85}, + {value: 0x0018, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xbb, offset 0x5a2 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xbc, offset 0x5ab + {value: 0x0000, lo: 0x05}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x9b}, + {value: 0x3308, lo: 0x9c, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0xbd, offset 0x5b1 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xbe, offset 0x5b9 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xbf, offset 0x5c2 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb5}, + {value: 0x3808, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0xc0, offset 0x5cc + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0xbf}, + // Block 0xc1, offset 0x5cf + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9f}, + {value: 0x3008, lo: 0xa0, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xaa}, + {value: 0x3b08, lo: 0xab, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xbf}, + // Block 0xc2, offset 0x5db + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x049d, lo: 0xa0, hi: 0xbf}, + // Block 0xc3, offset 0x5de + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0xc4, offset 0x5e3 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xc5, offset 0x5e6 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xc6, offset 0x5f0 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xbf}, + // Block 0xc7, offset 0x5f9 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xa9}, + {value: 0x3308, lo: 0xaa, hi: 0xb0}, + {value: 0x3008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xc8, offset 0x605 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xc9, offset 0x608 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xca, offset 0x60d + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0xbf}, + // Block 0xcb, offset 0x610 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xbf}, + // Block 0xcc, offset 0x613 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0xbf}, + // Block 0xcd, offset 0x616 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0xce, offset 0x61d + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb4}, + {value: 0x0018, lo: 0xb5, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xcf, offset 0x624 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0xd0, offset 0x628 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0018, lo: 0x84, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xa2}, + {value: 0x0008, lo: 0xa3, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbf}, + // Block 0xd1, offset 0x633 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0xbf}, + // Block 0xd2, offset 0x636 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x3008, lo: 0x91, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xd3, offset 0x63c + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x8e}, + {value: 0x3308, lo: 0x8f, hi: 0x92}, + {value: 0x0008, lo: 0x93, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xd4, offset 0x641 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa0}, + {value: 0x0040, lo: 0xa1, hi: 0xbf}, + // Block 0xd5, offset 0x645 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xd6, offset 0x648 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbf}, + // Block 0xd7, offset 0x64b + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x81}, + {value: 0x0040, lo: 0x82, hi: 0xbf}, + // Block 0xd8, offset 0x64e + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0xd9, offset 0x653 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0018, lo: 0x9c, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x03c0, lo: 0xa0, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xbf}, + // Block 0xda, offset 0x65d + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xdb, offset 0x660 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa8}, + {value: 0x0018, lo: 0xa9, hi: 0xbf}, + // Block 0xdc, offset 0x664 + {value: 0x0000, lo: 0x0e}, + {value: 0x0018, lo: 0x80, hi: 0x9d}, + {value: 0xb5b9, lo: 0x9e, hi: 0x9e}, + {value: 0xb601, lo: 0x9f, hi: 0x9f}, + {value: 0xb649, lo: 0xa0, hi: 0xa0}, + {value: 0xb6b1, lo: 0xa1, hi: 0xa1}, + {value: 0xb719, lo: 0xa2, hi: 0xa2}, + {value: 0xb781, lo: 0xa3, hi: 0xa3}, + {value: 0xb7e9, lo: 0xa4, hi: 0xa4}, + {value: 0x3018, lo: 0xa5, hi: 0xa6}, + {value: 0x3318, lo: 0xa7, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xac}, + {value: 0x3018, lo: 0xad, hi: 0xb2}, + {value: 0x0340, lo: 0xb3, hi: 0xba}, + {value: 0x3318, lo: 0xbb, hi: 0xbf}, + // Block 0xdd, offset 0x673 + {value: 0x0000, lo: 0x0b}, + {value: 0x3318, lo: 0x80, hi: 0x82}, + {value: 0x0018, lo: 0x83, hi: 0x84}, + {value: 0x3318, lo: 0x85, hi: 0x8b}, + {value: 0x0018, lo: 0x8c, hi: 0xa9}, + {value: 0x3318, lo: 0xaa, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xba}, + {value: 0xb851, lo: 0xbb, hi: 0xbb}, + {value: 0xb899, lo: 0xbc, hi: 0xbc}, + {value: 0xb8e1, lo: 0xbd, hi: 0xbd}, + {value: 0xb949, lo: 0xbe, hi: 0xbe}, + {value: 0xb9b1, lo: 0xbf, hi: 0xbf}, + // Block 0xde, offset 0x67f + {value: 0x0000, lo: 0x03}, + {value: 0xba19, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xbf}, + // Block 0xdf, offset 0x683 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x81}, + {value: 0x3318, lo: 0x82, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0xbf}, + // Block 0xe0, offset 0x688 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xe1, offset 0x68d + {value: 0x0000, lo: 0x03}, + {value: 0x3308, lo: 0x80, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbf}, + // Block 0xe2, offset 0x691 + {value: 0x0000, lo: 0x04}, + {value: 0x3308, lo: 0x80, hi: 0xac}, + {value: 0x0018, lo: 0xad, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0xe3, offset 0x696 + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x3308, lo: 0xa1, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0xe4, offset 0x69f + {value: 0x0000, lo: 0x0a}, + {value: 0x3308, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x3308, lo: 0x88, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xa4}, + {value: 0x0040, lo: 0xa5, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xbf}, + // Block 0xe5, offset 0x6aa + {value: 0x0000, lo: 0x05}, + {value: 0x0808, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x86}, + {value: 0x0818, lo: 0x87, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0xbf}, + // Block 0xe6, offset 0x6b0 + {value: 0x0000, lo: 0x07}, + {value: 0x0a08, lo: 0x80, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9d}, + {value: 0x0818, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xe7, offset 0x6b8 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xe8, offset 0x6bc + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0xe9, offset 0x6c0 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xb0}, + {value: 0x0018, lo: 0xb1, hi: 0xbf}, + // Block 0xea, offset 0x6c6 + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x0018, lo: 0x91, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xeb, offset 0x6cc + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x8f}, + {value: 0xc1c1, lo: 0x90, hi: 0x90}, + {value: 0x0018, lo: 0x91, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xec, offset 0x6d1 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0xa5}, + {value: 0x0018, lo: 0xa6, hi: 0xbf}, + // Block 0xed, offset 0x6d4 + {value: 0x0000, lo: 0x0d}, + {value: 0xc7e9, lo: 0x80, hi: 0x80}, + {value: 0xc839, lo: 0x81, hi: 0x81}, + {value: 0xc889, lo: 0x82, hi: 0x82}, + {value: 0xc8d9, lo: 0x83, hi: 0x83}, + {value: 0xc929, lo: 0x84, hi: 0x84}, + {value: 0xc979, lo: 0x85, hi: 0x85}, + {value: 0xc9c9, lo: 0x86, hi: 0x86}, + {value: 0xca19, lo: 0x87, hi: 0x87}, + {value: 0xca69, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0xcab9, lo: 0x90, hi: 0x90}, + {value: 0xcad9, lo: 0x91, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0xbf}, + // Block 0xee, offset 0x6e2 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x92}, + {value: 0x0040, lo: 0x93, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xef, offset 0x6e9 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0xf0, offset 0x6ec + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0xbf}, + // Block 0xf1, offset 0x6ef + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0xf2, offset 0x6f3 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbf}, + // Block 0xf3, offset 0x6f9 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xbf}, + // Block 0xf4, offset 0x6fe + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb2}, + {value: 0x0018, lo: 0xb3, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xf5, offset 0x708 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xbf}, + // Block 0xf6, offset 0x70d + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0xbf}, + // Block 0xf7, offset 0x710 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0xbf}, + // Block 0xf8, offset 0x713 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0xbf}, + // Block 0xf9, offset 0x716 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xfa, offset 0x719 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0xfb, offset 0x71d + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xbf}, + // Block 0xfc, offset 0x720 + {value: 0x0020, lo: 0x0f}, + {value: 0xdeb9, lo: 0x80, hi: 0x89}, + {value: 0x8dfd, lo: 0x8a, hi: 0x8a}, + {value: 0xdff9, lo: 0x8b, hi: 0x9c}, + {value: 0x8e1d, lo: 0x9d, hi: 0x9d}, + {value: 0xe239, lo: 0x9e, hi: 0xa2}, + {value: 0x8e3d, lo: 0xa3, hi: 0xa3}, + {value: 0xe2d9, lo: 0xa4, hi: 0xab}, + {value: 0x7ed5, lo: 0xac, hi: 0xac}, + {value: 0xe3d9, lo: 0xad, hi: 0xaf}, + {value: 0x8e5d, lo: 0xb0, hi: 0xb0}, + {value: 0xe439, lo: 0xb1, hi: 0xb6}, + {value: 0x8e7d, lo: 0xb7, hi: 0xb9}, + {value: 0xe4f9, lo: 0xba, hi: 0xba}, + {value: 0x8edd, lo: 0xbb, hi: 0xbb}, + {value: 0xe519, lo: 0xbc, hi: 0xbf}, + // Block 0xfd, offset 0x730 + {value: 0x0020, lo: 0x10}, + {value: 0x937d, lo: 0x80, hi: 0x80}, + {value: 0xf099, lo: 0x81, hi: 0x86}, + {value: 0x939d, lo: 0x87, hi: 0x8a}, + {value: 0xd9f9, lo: 0x8b, hi: 0x8b}, + {value: 0xf159, lo: 0x8c, hi: 0x96}, + {value: 0x941d, lo: 0x97, hi: 0x97}, + {value: 0xf2b9, lo: 0x98, hi: 0xa3}, + {value: 0x943d, lo: 0xa4, hi: 0xa6}, + {value: 0xf439, lo: 0xa7, hi: 0xaa}, + {value: 0x949d, lo: 0xab, hi: 0xab}, + {value: 0xf4b9, lo: 0xac, hi: 0xac}, + {value: 0x94bd, lo: 0xad, hi: 0xad}, + {value: 0xf4d9, lo: 0xae, hi: 0xaf}, + {value: 0x94dd, lo: 0xb0, hi: 0xb1}, + {value: 0xf519, lo: 0xb2, hi: 0xbe}, + {value: 0x2040, lo: 0xbf, hi: 0xbf}, + // Block 0xfe, offset 0x741 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0340, lo: 0x81, hi: 0x81}, + {value: 0x0040, lo: 0x82, hi: 0x9f}, + {value: 0x0340, lo: 0xa0, hi: 0xbf}, + // Block 0xff, offset 0x746 + {value: 0x0000, lo: 0x01}, + {value: 0x0340, lo: 0x80, hi: 0xbf}, + // Block 0x100, offset 0x748 + {value: 0x0000, lo: 0x01}, + {value: 0x33c0, lo: 0x80, hi: 0xbf}, + // Block 0x101, offset 0x74a + {value: 0x0000, lo: 0x02}, + {value: 0x33c0, lo: 0x80, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, +} + +// Total table size 41662 bytes (40KiB); checksum: 355A58A4 diff --git a/vendor/golang.org/x/net/internal/socket/BUILD.bazel b/vendor/golang.org/x/net/internal/socket/BUILD.bazel index 5c388383b0176..be18eb38e6d18 100644 --- a/vendor/golang.org/x/net/internal/socket/BUILD.bazel +++ b/vendor/golang.org/x/net/internal/socket/BUILD.bazel @@ -31,16 +31,15 @@ go_library( "rawconn_msg.go", "rawconn_nommsg.go", "rawconn_nomsg.go", - "rawconn_stub.go", - "reflect.go", "socket.go", "sys.go", "sys_bsd.go", "sys_bsdvar.go", + "sys_const_unix.go", "sys_darwin.go", "sys_dragonfly.go", "sys_go1_11_darwin.go", - "sys_go1_12_darwin.go", + "sys_linkname.go", "sys_linux.go", "sys_linux_386.go", "sys_linux_386.s", @@ -92,4 +91,37 @@ go_library( importmap = "k8s.io/kops/vendor/golang.org/x/net/internal/socket", importpath = "golang.org/x/net/internal/socket", visibility = ["//vendor/golang.org/x/net:__subpackages__"], + deps = select({ + "@io_bazel_rules_go//go/platform:android": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:ios": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "//vendor/golang.org/x/sys/windows:go_default_library", + ], + "//conditions:default": [], + }), ) diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr.go b/vendor/golang.org/x/net/internal/socket/cmsghdr.go index 1eb07d26dee6a..0a73e277e0950 100644 --- a/vendor/golang.org/x/net/internal/socket/cmsghdr.go +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package socket diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go index d1d0c2de54b9f..14dbb3ad42de6 100644 --- a/vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd netbsd openbsd +// +build aix darwin dragonfly freebsd netbsd openbsd package socket diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go index 63f0534fa7673..27be0efaca924 100644 --- a/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build arm64 amd64 ppc64 ppc64le mips64 mips64le s390x +// +build arm64 amd64 ppc64 ppc64le mips64 mips64le riscv64 s390x // +build linux package socket diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go index a4e71226f809e..e581011b055a1 100644 --- a/vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris package socket diff --git a/vendor/golang.org/x/net/internal/socket/error_unix.go b/vendor/golang.org/x/net/internal/socket/error_unix.go index 93dff91802827..f14872d3d3575 100644 --- a/vendor/golang.org/x/net/internal/socket/error_unix.go +++ b/vendor/golang.org/x/net/internal/socket/error_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package socket diff --git a/vendor/golang.org/x/net/internal/socket/iovec_64bit.go b/vendor/golang.org/x/net/internal/socket/iovec_64bit.go index afb34ad58eba8..dfeda752be28a 100644 --- a/vendor/golang.org/x/net/internal/socket/iovec_64bit.go +++ b/vendor/golang.org/x/net/internal/socket/iovec_64bit.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build arm64 amd64 ppc64 ppc64le mips64 mips64le s390x -// +build darwin dragonfly freebsd linux netbsd openbsd +// +build arm64 amd64 ppc64 ppc64le mips64 mips64le riscv64 s390x +// +build aix darwin dragonfly freebsd linux netbsd openbsd package socket diff --git a/vendor/golang.org/x/net/internal/socket/iovec_stub.go b/vendor/golang.org/x/net/internal/socket/iovec_stub.go index c87d2a9339d9e..a746e90e3073f 100644 --- a/vendor/golang.org/x/net/internal/socket/iovec_stub.go +++ b/vendor/golang.org/x/net/internal/socket/iovec_stub.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris package socket diff --git a/vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go b/vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go index 2e80a9cb747c8..1a7f2792f2548 100644 --- a/vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go +++ b/vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !linux,!netbsd +// +build !aix,!linux,!netbsd package socket diff --git a/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go b/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go index 3c42ea7ad85a5..f1100683a5791 100644 --- a/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go +++ b/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build linux netbsd +// +build aix linux netbsd package socket diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_bsd.go b/vendor/golang.org/x/net/internal/socket/msghdr_bsd.go index 5567afc88da40..77f44c1f12933 100644 --- a/vendor/golang.org/x/net/internal/socket/msghdr_bsd.go +++ b/vendor/golang.org/x/net/internal/socket/msghdr_bsd.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd netbsd openbsd +// +build aix darwin dragonfly freebsd netbsd openbsd package socket diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go b/vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go index b8c87b72b9485..c5562dd66ad9b 100644 --- a/vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go +++ b/vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd netbsd +// +build aix darwin dragonfly freebsd netbsd package socket diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go b/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go index 610fc4f3bbaa6..e731833a262d9 100644 --- a/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go +++ b/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build arm64 amd64 ppc64 ppc64le mips64 mips64le s390x +// +build arm64 amd64 ppc64 ppc64le mips64 mips64le riscv64 s390x // +build linux package socket diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_stub.go b/vendor/golang.org/x/net/internal/socket/msghdr_stub.go index 64e8173352a4a..873490a7ae9d4 100644 --- a/vendor/golang.org/x/net/internal/socket/msghdr_stub.go +++ b/vendor/golang.org/x/net/internal/socket/msghdr_stub.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris package socket diff --git a/vendor/golang.org/x/net/internal/socket/rawconn.go b/vendor/golang.org/x/net/internal/socket/rawconn.go index d6871d55f7268..b07b8900506b3 100644 --- a/vendor/golang.org/x/net/internal/socket/rawconn.go +++ b/vendor/golang.org/x/net/internal/socket/rawconn.go @@ -2,8 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build go1.9 - package socket import ( diff --git a/vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go b/vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go index 499164a3fbf1e..1f4cb3b36e867 100644 --- a/vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go +++ b/vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go @@ -2,7 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build go1.9 // +build linux package socket diff --git a/vendor/golang.org/x/net/internal/socket/rawconn_msg.go b/vendor/golang.org/x/net/internal/socket/rawconn_msg.go index b21d2e6418dfd..a972011812243 100644 --- a/vendor/golang.org/x/net/internal/socket/rawconn_msg.go +++ b/vendor/golang.org/x/net/internal/socket/rawconn_msg.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build go1.9 -// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows package socket diff --git a/vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go b/vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go index f78832aa4a760..fe5bb942ba69d 100644 --- a/vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go +++ b/vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go @@ -2,17 +2,14 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build go1.9 // +build !linux package socket -import "errors" - func (c *Conn) recvMsgs(ms []Message, flags int) (int, error) { - return 0, errors.New("not implemented") + return 0, errNotImplemented } func (c *Conn) sendMsgs(ms []Message, flags int) (int, error) { - return 0, errors.New("not implemented") + return 0, errNotImplemented } diff --git a/vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go b/vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go index 96733cbe1b96c..b8cea6fe5343c 100644 --- a/vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go +++ b/vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go @@ -2,17 +2,14 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build go1.9 -// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows package socket -import "errors" - func (c *Conn) recvMsg(m *Message, flags int) error { - return errors.New("not implemented") + return errNotImplemented } func (c *Conn) sendMsg(m *Message, flags int) error { - return errors.New("not implemented") + return errNotImplemented } diff --git a/vendor/golang.org/x/net/internal/socket/rawconn_stub.go b/vendor/golang.org/x/net/internal/socket/rawconn_stub.go deleted file mode 100644 index d2add1a0aa978..0000000000000 --- a/vendor/golang.org/x/net/internal/socket/rawconn_stub.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.9 - -package socket - -import "errors" - -func (c *Conn) recvMsg(m *Message, flags int) error { - return errors.New("not implemented") -} - -func (c *Conn) sendMsg(m *Message, flags int) error { - return errors.New("not implemented") -} - -func (c *Conn) recvMsgs(ms []Message, flags int) (int, error) { - return 0, errors.New("not implemented") -} - -func (c *Conn) sendMsgs(ms []Message, flags int) (int, error) { - return 0, errors.New("not implemented") -} diff --git a/vendor/golang.org/x/net/internal/socket/reflect.go b/vendor/golang.org/x/net/internal/socket/reflect.go deleted file mode 100644 index bb179f11d8c51..0000000000000 --- a/vendor/golang.org/x/net/internal/socket/reflect.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.9 - -package socket - -import ( - "errors" - "net" - "os" - "reflect" - "runtime" -) - -// A Conn represents a raw connection. -type Conn struct { - c net.Conn -} - -// NewConn returns a new raw connection. -func NewConn(c net.Conn) (*Conn, error) { - return &Conn{c: c}, nil -} - -func (o *Option) get(c *Conn, b []byte) (int, error) { - s, err := socketOf(c.c) - if err != nil { - return 0, err - } - n, err := getsockopt(s, o.Level, o.Name, b) - return n, os.NewSyscallError("getsockopt", err) -} - -func (o *Option) set(c *Conn, b []byte) error { - s, err := socketOf(c.c) - if err != nil { - return err - } - return os.NewSyscallError("setsockopt", setsockopt(s, o.Level, o.Name, b)) -} - -func socketOf(c net.Conn) (uintptr, error) { - switch c.(type) { - case *net.TCPConn, *net.UDPConn, *net.IPConn: - v := reflect.ValueOf(c) - switch e := v.Elem(); e.Kind() { - case reflect.Struct: - fd := e.FieldByName("conn").FieldByName("fd") - switch e := fd.Elem(); e.Kind() { - case reflect.Struct: - sysfd := e.FieldByName("sysfd") - if runtime.GOOS == "windows" { - return uintptr(sysfd.Uint()), nil - } - return uintptr(sysfd.Int()), nil - } - } - } - return 0, errors.New("invalid type") -} diff --git a/vendor/golang.org/x/net/internal/socket/socket.go b/vendor/golang.org/x/net/internal/socket/socket.go index 5f9730e6d97f0..23571b8d4dc6e 100644 --- a/vendor/golang.org/x/net/internal/socket/socket.go +++ b/vendor/golang.org/x/net/internal/socket/socket.go @@ -9,9 +9,12 @@ package socket // import "golang.org/x/net/internal/socket" import ( "errors" "net" + "runtime" "unsafe" ) +var errNotImplemented = errors.New("not implemented on " + runtime.GOOS + "/" + runtime.GOARCH) + // An Option represents a sticky socket option. type Option struct { Level int // level diff --git a/vendor/golang.org/x/net/internal/socket/sys.go b/vendor/golang.org/x/net/internal/socket/sys.go index 4f0eead138aa7..ee492ba86b124 100644 --- a/vendor/golang.org/x/net/internal/socket/sys.go +++ b/vendor/golang.org/x/net/internal/socket/sys.go @@ -29,5 +29,5 @@ func init() { } func roundup(l int) int { - return (l + kernelAlign - 1) & ^(kernelAlign - 1) + return (l + kernelAlign - 1) &^ (kernelAlign - 1) } diff --git a/vendor/golang.org/x/net/internal/socket/sys_bsd.go b/vendor/golang.org/x/net/internal/socket/sys_bsd.go index f13e14ff36878..d432835b4195d 100644 --- a/vendor/golang.org/x/net/internal/socket/sys_bsd.go +++ b/vendor/golang.org/x/net/internal/socket/sys_bsd.go @@ -2,16 +2,14 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd openbsd +// +build aix darwin dragonfly freebsd openbsd package socket -import "errors" - func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { - return 0, errors.New("not implemented") + return 0, errNotImplemented } func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { - return 0, errors.New("not implemented") + return 0, errNotImplemented } diff --git a/vendor/golang.org/x/net/internal/socket/sys_bsdvar.go b/vendor/golang.org/x/net/internal/socket/sys_bsdvar.go index 164ddfce837ca..b4f41b5522f91 100644 --- a/vendor/golang.org/x/net/internal/socket/sys_bsdvar.go +++ b/vendor/golang.org/x/net/internal/socket/sys_bsdvar.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build freebsd netbsd openbsd +// +build aix freebsd netbsd openbsd package socket @@ -12,9 +12,12 @@ import ( ) func probeProtocolStack() int { - if runtime.GOOS == "openbsd" && runtime.GOARCH == "arm" { + if (runtime.GOOS == "netbsd" || runtime.GOOS == "openbsd") && runtime.GOARCH == "arm" { return 8 } + if runtime.GOOS == "aix" { + return 1 + } var p uintptr return int(unsafe.Sizeof(p)) } diff --git a/vendor/golang.org/x/net/internal/socket/sys_const_unix.go b/vendor/golang.org/x/net/internal/socket/sys_const_unix.go new file mode 100644 index 0000000000000..43797d6e53533 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_const_unix.go @@ -0,0 +1,17 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris + +package socket + +import "golang.org/x/sys/unix" + +const ( + sysAF_UNSPEC = unix.AF_UNSPEC + sysAF_INET = unix.AF_INET + sysAF_INET6 = unix.AF_INET6 + + sysSOCK_RAW = unix.SOCK_RAW +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_go1_12_darwin.go b/vendor/golang.org/x/net/internal/socket/sys_linkname.go similarity index 98% rename from vendor/golang.org/x/net/internal/socket/sys_go1_12_darwin.go rename to vendor/golang.org/x/net/internal/socket/sys_linkname.go index 0999a19fbdc23..61c3f38a51b34 100644 --- a/vendor/golang.org/x/net/internal/socket/sys_go1_12_darwin.go +++ b/vendor/golang.org/x/net/internal/socket/sys_linkname.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build go1.12 +// +build aix go1.12,darwin package socket diff --git a/vendor/golang.org/x/sys/windows/mkerrors.go b/vendor/golang.org/x/net/internal/socket/sys_linux_riscv64.go similarity index 65% rename from vendor/golang.org/x/sys/windows/mkerrors.go rename to vendor/golang.org/x/net/internal/socket/sys_linux_riscv64.go index cbf123ef89d2b..64f69f1dc559d 100644 --- a/vendor/golang.org/x/sys/windows/mkerrors.go +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_riscv64.go @@ -2,6 +2,11 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package windows +// +build riscv64 -//go:generate ./mkerrors.bash zerrors_windows.go +package socket + +const ( + sysRECVMMSG = 0xf3 + sysSENDMMSG = 0x10d +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_posix.go b/vendor/golang.org/x/net/internal/socket/sys_posix.go index 9a9bc47621e5e..22eae809c9e12 100644 --- a/vendor/golang.org/x/net/internal/socket/sys_posix.go +++ b/vendor/golang.org/x/net/internal/socket/sys_posix.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build go1.9 -// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows package socket @@ -34,7 +33,7 @@ func marshalSockaddr(ip net.IP, port int, zone string) []byte { if ip4 := ip.To4(); ip4 != nil { b := make([]byte, sizeofSockaddrInet) switch runtime.GOOS { - case "android", "linux", "solaris", "windows": + case "android", "illumos", "linux", "solaris", "windows": NativeEndian.PutUint16(b[:2], uint16(sysAF_INET)) default: b[0] = sizeofSockaddrInet @@ -47,7 +46,7 @@ func marshalSockaddr(ip net.IP, port int, zone string) []byte { if ip6 := ip.To16(); ip6 != nil && ip.To4() == nil { b := make([]byte, sizeofSockaddrInet6) switch runtime.GOOS { - case "android", "linux", "solaris", "windows": + case "android", "illumos", "linux", "solaris", "windows": NativeEndian.PutUint16(b[:2], uint16(sysAF_INET6)) default: b[0] = sizeofSockaddrInet6 @@ -69,7 +68,7 @@ func parseInetAddr(b []byte, network string) (net.Addr, error) { } var af int switch runtime.GOOS { - case "android", "linux", "solaris", "windows": + case "android", "illumos", "linux", "solaris", "windows": af = int(NativeEndian.Uint16(b[:2])) default: af = int(b[1]) diff --git a/vendor/golang.org/x/net/internal/socket/sys_solaris.go b/vendor/golang.org/x/net/internal/socket/sys_solaris.go index cced74e60d588..66b5547868ccb 100644 --- a/vendor/golang.org/x/net/internal/socket/sys_solaris.go +++ b/vendor/golang.org/x/net/internal/socket/sys_solaris.go @@ -5,7 +5,6 @@ package socket import ( - "errors" "runtime" "syscall" "unsafe" @@ -63,9 +62,9 @@ func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { } func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { - return 0, errors.New("not implemented") + return 0, errNotImplemented } func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { - return 0, errors.New("not implemented") + return 0, errNotImplemented } diff --git a/vendor/golang.org/x/net/internal/socket/sys_stub.go b/vendor/golang.org/x/net/internal/socket/sys_stub.go index d9f06d00e9b8d..0f61742628164 100644 --- a/vendor/golang.org/x/net/internal/socket/sys_stub.go +++ b/vendor/golang.org/x/net/internal/socket/sys_stub.go @@ -2,12 +2,11 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows package socket import ( - "errors" "net" "runtime" "unsafe" @@ -36,29 +35,29 @@ func marshalInetAddr(ip net.IP, port int, zone string) []byte { } func parseInetAddr(b []byte, network string) (net.Addr, error) { - return nil, errors.New("not implemented") + return nil, errNotImplemented } func getsockopt(s uintptr, level, name int, b []byte) (int, error) { - return 0, errors.New("not implemented") + return 0, errNotImplemented } func setsockopt(s uintptr, level, name int, b []byte) error { - return errors.New("not implemented") + return errNotImplemented } func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { - return 0, errors.New("not implemented") + return 0, errNotImplemented } func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { - return 0, errors.New("not implemented") + return 0, errNotImplemented } func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { - return 0, errors.New("not implemented") + return 0, errNotImplemented } func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { - return 0, errors.New("not implemented") + return 0, errNotImplemented } diff --git a/vendor/golang.org/x/net/internal/socket/sys_windows.go b/vendor/golang.org/x/net/internal/socket/sys_windows.go index 54a470ebe3435..d556a44615707 100644 --- a/vendor/golang.org/x/net/internal/socket/sys_windows.go +++ b/vendor/golang.org/x/net/internal/socket/sys_windows.go @@ -5,9 +5,10 @@ package socket import ( - "errors" "syscall" "unsafe" + + "golang.org/x/sys/windows" ) func probeProtocolStack() int { @@ -16,11 +17,11 @@ func probeProtocolStack() int { } const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_INET6 = 0x17 + sysAF_UNSPEC = windows.AF_UNSPEC + sysAF_INET = windows.AF_INET + sysAF_INET6 = windows.AF_INET6 - sysSOCK_RAW = 0x3 + sysSOCK_RAW = windows.SOCK_RAW ) type sockaddrInet struct { @@ -54,17 +55,17 @@ func setsockopt(s uintptr, level, name int, b []byte) error { } func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { - return 0, errors.New("not implemented") + return 0, errNotImplemented } func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { - return 0, errors.New("not implemented") + return 0, errNotImplemented } func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { - return 0, errors.New("not implemented") + return 0, errNotImplemented } func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { - return 0, errors.New("not implemented") + return 0, errNotImplemented } diff --git a/vendor/golang.org/x/net/internal/socket/zsys_aix_ppc64.go b/vendor/golang.org/x/net/internal/socket/zsys_aix_ppc64.go new file mode 100644 index 0000000000000..813385a98b101 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_aix_ppc64.go @@ -0,0 +1,61 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_aix.go + +// Added for go1.11 compatibility +// +build aix + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]uint8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofMmsghdr = 0x38 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_darwin_386.go b/vendor/golang.org/x/net/internal/socket/zsys_darwin_386.go index 26f8feff3a198..083bda51c3c06 100644 --- a/vendor/golang.org/x/net/internal/socket/zsys_darwin_386.go +++ b/vendor/golang.org/x/net/internal/socket/zsys_darwin_386.go @@ -1,16 +1,8 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_darwin.go package socket -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_INET6 = 0x1e - - sysSOCK_RAW = 0x3 -) - type iovec struct { Base *byte Len uint32 diff --git a/vendor/golang.org/x/net/internal/socket/zsys_darwin_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_darwin_amd64.go index e2987f7db8201..55c6c9f5770ce 100644 --- a/vendor/golang.org/x/net/internal/socket/zsys_darwin_amd64.go +++ b/vendor/golang.org/x/net/internal/socket/zsys_darwin_amd64.go @@ -1,16 +1,8 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_darwin.go package socket -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_INET6 = 0x1e - - sysSOCK_RAW = 0x3 -) - type iovec struct { Base *byte Len uint64 diff --git a/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm.go b/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm.go index 26f8feff3a198..083bda51c3c06 100644 --- a/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm.go +++ b/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm.go @@ -1,16 +1,8 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_darwin.go package socket -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_INET6 = 0x1e - - sysSOCK_RAW = 0x3 -) - type iovec struct { Base *byte Len uint32 diff --git a/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm64.go b/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm64.go index e2987f7db8201..55c6c9f5770ce 100644 --- a/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm64.go +++ b/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm64.go @@ -1,16 +1,8 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_darwin.go package socket -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_INET6 = 0x1e - - sysSOCK_RAW = 0x3 -) - type iovec struct { Base *byte Len uint64 diff --git a/vendor/golang.org/x/net/internal/socket/zsys_dragonfly_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_dragonfly_amd64.go index c582abd57df8d..8b7d161d7d52a 100644 --- a/vendor/golang.org/x/net/internal/socket/zsys_dragonfly_amd64.go +++ b/vendor/golang.org/x/net/internal/socket/zsys_dragonfly_amd64.go @@ -1,16 +1,8 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_dragonfly.go package socket -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_INET6 = 0x1c - - sysSOCK_RAW = 0x3 -) - type iovec struct { Base *byte Len uint64 diff --git a/vendor/golang.org/x/net/internal/socket/zsys_freebsd_386.go b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_386.go index 04a24886c7a22..3e71ff57438e9 100644 --- a/vendor/golang.org/x/net/internal/socket/zsys_freebsd_386.go +++ b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_386.go @@ -1,16 +1,8 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_freebsd.go package socket -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_INET6 = 0x1c - - sysSOCK_RAW = 0x3 -) - type iovec struct { Base *byte Len uint32 diff --git a/vendor/golang.org/x/net/internal/socket/zsys_freebsd_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_amd64.go index 35c7cb9c953da..238d90de6266c 100644 --- a/vendor/golang.org/x/net/internal/socket/zsys_freebsd_amd64.go +++ b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_amd64.go @@ -1,16 +1,8 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_freebsd.go package socket -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_INET6 = 0x1c - - sysSOCK_RAW = 0x3 -) - type iovec struct { Base *byte Len uint64 diff --git a/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm.go b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm.go index 04a24886c7a22..3e71ff57438e9 100644 --- a/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm.go +++ b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm.go @@ -1,16 +1,8 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_freebsd.go package socket -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_INET6 = 0x1c - - sysSOCK_RAW = 0x3 -) - type iovec struct { Base *byte Len uint32 diff --git a/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm64.go b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm64.go new file mode 100644 index 0000000000000..238d90de6266c --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm64.go @@ -0,0 +1,53 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_freebsd.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_386.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_386.go index 430206930b800..72d8b25421b27 100644 --- a/vendor/golang.org/x/net/internal/socket/zsys_linux_386.go +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_386.go @@ -1,16 +1,8 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package socket -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_INET6 = 0xa - - sysSOCK_RAW = 0x3 -) - type iovec struct { Base *byte Len uint32 diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_amd64.go index 1502f6c5529bc..3545319ae91ba 100644 --- a/vendor/golang.org/x/net/internal/socket/zsys_linux_amd64.go +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_amd64.go @@ -1,16 +1,8 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package socket -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_INET6 = 0xa - - sysSOCK_RAW = 0x3 -) - type iovec struct { Base *byte Len uint64 diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_arm.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_arm.go index 430206930b800..72d8b25421b27 100644 --- a/vendor/golang.org/x/net/internal/socket/zsys_linux_arm.go +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_arm.go @@ -1,16 +1,8 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package socket -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_INET6 = 0xa - - sysSOCK_RAW = 0x3 -) - type iovec struct { Base *byte Len uint32 diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_arm64.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_arm64.go index 1502f6c5529bc..3545319ae91ba 100644 --- a/vendor/golang.org/x/net/internal/socket/zsys_linux_arm64.go +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_arm64.go @@ -1,16 +1,8 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package socket -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_INET6 = 0xa - - sysSOCK_RAW = 0x3 -) - type iovec struct { Base *byte Len uint64 diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_mips.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_mips.go index 430206930b800..72d8b25421b27 100644 --- a/vendor/golang.org/x/net/internal/socket/zsys_linux_mips.go +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_mips.go @@ -1,16 +1,8 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package socket -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_INET6 = 0xa - - sysSOCK_RAW = 0x3 -) - type iovec struct { Base *byte Len uint32 diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64.go index 1502f6c5529bc..3545319ae91ba 100644 --- a/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64.go +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64.go @@ -1,16 +1,8 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package socket -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_INET6 = 0xa - - sysSOCK_RAW = 0x3 -) - type iovec struct { Base *byte Len uint64 diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64le.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64le.go index 1502f6c5529bc..3545319ae91ba 100644 --- a/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64le.go +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64le.go @@ -1,16 +1,8 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package socket -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_INET6 = 0xa - - sysSOCK_RAW = 0x3 -) - type iovec struct { Base *byte Len uint64 diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_mipsle.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_mipsle.go index 430206930b800..72d8b25421b27 100644 --- a/vendor/golang.org/x/net/internal/socket/zsys_linux_mipsle.go +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_mipsle.go @@ -1,16 +1,8 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package socket -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_INET6 = 0xa - - sysSOCK_RAW = 0x3 -) - type iovec struct { Base *byte Len uint32 diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64.go index 1502f6c5529bc..3545319ae91ba 100644 --- a/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64.go +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64.go @@ -1,16 +1,8 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package socket -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_INET6 = 0xa - - sysSOCK_RAW = 0x3 -) - type iovec struct { Base *byte Len uint64 diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64le.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64le.go index 1502f6c5529bc..3545319ae91ba 100644 --- a/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64le.go +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64le.go @@ -1,16 +1,8 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package socket -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_INET6 = 0xa - - sysSOCK_RAW = 0x3 -) - type iovec struct { Base *byte Len uint64 diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_riscv64.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_riscv64.go new file mode 100644 index 0000000000000..dbff234fbdb34 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_riscv64.go @@ -0,0 +1,59 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +// +build riscv64 + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_0 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_s390x.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_s390x.go index 1502f6c5529bc..3545319ae91ba 100644 --- a/vendor/golang.org/x/net/internal/socket/zsys_linux_s390x.go +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_s390x.go @@ -1,16 +1,8 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package socket -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_INET6 = 0xa - - sysSOCK_RAW = 0x3 -) - type iovec struct { Base *byte Len uint64 diff --git a/vendor/golang.org/x/net/internal/socket/zsys_netbsd_386.go b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_386.go index db60491fe37b8..bf8f47c88c7fd 100644 --- a/vendor/golang.org/x/net/internal/socket/zsys_netbsd_386.go +++ b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_386.go @@ -1,16 +1,8 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_netbsd.go package socket -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_INET6 = 0x18 - - sysSOCK_RAW = 0x3 -) - type iovec struct { Base *byte Len uint32 diff --git a/vendor/golang.org/x/net/internal/socket/zsys_netbsd_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_amd64.go index 2a1a79985a02d..a46eff9912865 100644 --- a/vendor/golang.org/x/net/internal/socket/zsys_netbsd_amd64.go +++ b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_amd64.go @@ -1,16 +1,8 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_netbsd.go package socket -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_INET6 = 0x18 - - sysSOCK_RAW = 0x3 -) - type iovec struct { Base *byte Len uint64 diff --git a/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go index db60491fe37b8..bf8f47c88c7fd 100644 --- a/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go +++ b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go @@ -1,16 +1,8 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_netbsd.go package socket -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_INET6 = 0x18 - - sysSOCK_RAW = 0x3 -) - type iovec struct { Base *byte Len uint32 diff --git a/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm64.go b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm64.go new file mode 100644 index 0000000000000..a46eff9912865 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm64.go @@ -0,0 +1,60 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_netbsd.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_openbsd_386.go b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_386.go index 1c836361e8202..73655a14c4cf5 100644 --- a/vendor/golang.org/x/net/internal/socket/zsys_openbsd_386.go +++ b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_386.go @@ -1,16 +1,8 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_openbsd.go package socket -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_INET6 = 0x18 - - sysSOCK_RAW = 0x3 -) - type iovec struct { Base *byte Len uint32 diff --git a/vendor/golang.org/x/net/internal/socket/zsys_openbsd_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_amd64.go index a6c0bf464a18c..0a4de80f23572 100644 --- a/vendor/golang.org/x/net/internal/socket/zsys_openbsd_amd64.go +++ b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_amd64.go @@ -1,16 +1,8 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_openbsd.go package socket -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_INET6 = 0x18 - - sysSOCK_RAW = 0x3 -) - type iovec struct { Base *byte Len uint64 diff --git a/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm.go b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm.go index 1c836361e8202..73655a14c4cf5 100644 --- a/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm.go +++ b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm.go @@ -1,16 +1,8 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_openbsd.go package socket -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_INET6 = 0x18 - - sysSOCK_RAW = 0x3 -) - type iovec struct { Base *byte Len uint32 diff --git a/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm64.go b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm64.go new file mode 100644 index 0000000000000..0a4de80f23572 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm64.go @@ -0,0 +1,53 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_openbsd.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_solaris_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_solaris_amd64.go index 327c63290cddb..353cd5fb4ec3f 100644 --- a/vendor/golang.org/x/net/internal/socket/zsys_solaris_amd64.go +++ b/vendor/golang.org/x/net/internal/socket/zsys_solaris_amd64.go @@ -1,16 +1,8 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_solaris.go package socket -const ( - sysAF_UNSPEC = 0x0 - sysAF_INET = 0x2 - sysAF_INET6 = 0x1a - - sysSOCK_RAW = 0x4 -) - type iovec struct { Base *int8 Len uint64 diff --git a/vendor/golang.org/x/net/internal/timeseries/BUILD.bazel b/vendor/golang.org/x/net/internal/timeseries/BUILD.bazel new file mode 100644 index 0000000000000..71209337ef017 --- /dev/null +++ b/vendor/golang.org/x/net/internal/timeseries/BUILD.bazel @@ -0,0 +1,9 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["timeseries.go"], + importmap = "k8s.io/kops/vendor/golang.org/x/net/internal/timeseries", + importpath = "golang.org/x/net/internal/timeseries", + visibility = ["//vendor/golang.org/x/net:__subpackages__"], +) diff --git a/vendor/golang.org/x/net/internal/timeseries/timeseries.go b/vendor/golang.org/x/net/internal/timeseries/timeseries.go new file mode 100644 index 0000000000000..685f0e7ea236b --- /dev/null +++ b/vendor/golang.org/x/net/internal/timeseries/timeseries.go @@ -0,0 +1,525 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package timeseries implements a time series structure for stats collection. +package timeseries // import "golang.org/x/net/internal/timeseries" + +import ( + "fmt" + "log" + "time" +) + +const ( + timeSeriesNumBuckets = 64 + minuteHourSeriesNumBuckets = 60 +) + +var timeSeriesResolutions = []time.Duration{ + 1 * time.Second, + 10 * time.Second, + 1 * time.Minute, + 10 * time.Minute, + 1 * time.Hour, + 6 * time.Hour, + 24 * time.Hour, // 1 day + 7 * 24 * time.Hour, // 1 week + 4 * 7 * 24 * time.Hour, // 4 weeks + 16 * 7 * 24 * time.Hour, // 16 weeks +} + +var minuteHourSeriesResolutions = []time.Duration{ + 1 * time.Second, + 1 * time.Minute, +} + +// An Observable is a kind of data that can be aggregated in a time series. +type Observable interface { + Multiply(ratio float64) // Multiplies the data in self by a given ratio + Add(other Observable) // Adds the data from a different observation to self + Clear() // Clears the observation so it can be reused. + CopyFrom(other Observable) // Copies the contents of a given observation to self +} + +// Float attaches the methods of Observable to a float64. +type Float float64 + +// NewFloat returns a Float. +func NewFloat() Observable { + f := Float(0) + return &f +} + +// String returns the float as a string. +func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) } + +// Value returns the float's value. +func (f *Float) Value() float64 { return float64(*f) } + +func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) } + +func (f *Float) Add(other Observable) { + o := other.(*Float) + *f += *o +} + +func (f *Float) Clear() { *f = 0 } + +func (f *Float) CopyFrom(other Observable) { + o := other.(*Float) + *f = *o +} + +// A Clock tells the current time. +type Clock interface { + Time() time.Time +} + +type defaultClock int + +var defaultClockInstance defaultClock + +func (defaultClock) Time() time.Time { return time.Now() } + +// Information kept per level. Each level consists of a circular list of +// observations. The start of the level may be derived from end and the +// len(buckets) * sizeInMillis. +type tsLevel struct { + oldest int // index to oldest bucketed Observable + newest int // index to newest bucketed Observable + end time.Time // end timestamp for this level + size time.Duration // duration of the bucketed Observable + buckets []Observable // collections of observations + provider func() Observable // used for creating new Observable +} + +func (l *tsLevel) Clear() { + l.oldest = 0 + l.newest = len(l.buckets) - 1 + l.end = time.Time{} + for i := range l.buckets { + if l.buckets[i] != nil { + l.buckets[i].Clear() + l.buckets[i] = nil + } + } +} + +func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) { + l.size = size + l.provider = f + l.buckets = make([]Observable, numBuckets) +} + +// Keeps a sequence of levels. Each level is responsible for storing data at +// a given resolution. For example, the first level stores data at a one +// minute resolution while the second level stores data at a one hour +// resolution. + +// Each level is represented by a sequence of buckets. Each bucket spans an +// interval equal to the resolution of the level. New observations are added +// to the last bucket. +type timeSeries struct { + provider func() Observable // make more Observable + numBuckets int // number of buckets in each level + levels []*tsLevel // levels of bucketed Observable + lastAdd time.Time // time of last Observable tracked + total Observable // convenient aggregation of all Observable + clock Clock // Clock for getting current time + pending Observable // observations not yet bucketed + pendingTime time.Time // what time are we keeping in pending + dirty bool // if there are pending observations +} + +// init initializes a level according to the supplied criteria. +func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) { + ts.provider = f + ts.numBuckets = numBuckets + ts.clock = clock + ts.levels = make([]*tsLevel, len(resolutions)) + + for i := range resolutions { + if i > 0 && resolutions[i-1] >= resolutions[i] { + log.Print("timeseries: resolutions must be monotonically increasing") + break + } + newLevel := new(tsLevel) + newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider) + ts.levels[i] = newLevel + } + + ts.Clear() +} + +// Clear removes all observations from the time series. +func (ts *timeSeries) Clear() { + ts.lastAdd = time.Time{} + ts.total = ts.resetObservation(ts.total) + ts.pending = ts.resetObservation(ts.pending) + ts.pendingTime = time.Time{} + ts.dirty = false + + for i := range ts.levels { + ts.levels[i].Clear() + } +} + +// Add records an observation at the current time. +func (ts *timeSeries) Add(observation Observable) { + ts.AddWithTime(observation, ts.clock.Time()) +} + +// AddWithTime records an observation at the specified time. +func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) { + + smallBucketDuration := ts.levels[0].size + + if t.After(ts.lastAdd) { + ts.lastAdd = t + } + + if t.After(ts.pendingTime) { + ts.advance(t) + ts.mergePendingUpdates() + ts.pendingTime = ts.levels[0].end + ts.pending.CopyFrom(observation) + ts.dirty = true + } else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) { + // The observation is close enough to go into the pending bucket. + // This compensates for clock skewing and small scheduling delays + // by letting the update stay in the fast path. + ts.pending.Add(observation) + ts.dirty = true + } else { + ts.mergeValue(observation, t) + } +} + +// mergeValue inserts the observation at the specified time in the past into all levels. +func (ts *timeSeries) mergeValue(observation Observable, t time.Time) { + for _, level := range ts.levels { + index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size) + if 0 <= index && index < ts.numBuckets { + bucketNumber := (level.oldest + index) % ts.numBuckets + if level.buckets[bucketNumber] == nil { + level.buckets[bucketNumber] = level.provider() + } + level.buckets[bucketNumber].Add(observation) + } + } + ts.total.Add(observation) +} + +// mergePendingUpdates applies the pending updates into all levels. +func (ts *timeSeries) mergePendingUpdates() { + if ts.dirty { + ts.mergeValue(ts.pending, ts.pendingTime) + ts.pending = ts.resetObservation(ts.pending) + ts.dirty = false + } +} + +// advance cycles the buckets at each level until the latest bucket in +// each level can hold the time specified. +func (ts *timeSeries) advance(t time.Time) { + if !t.After(ts.levels[0].end) { + return + } + for i := 0; i < len(ts.levels); i++ { + level := ts.levels[i] + if !level.end.Before(t) { + break + } + + // If the time is sufficiently far, just clear the level and advance + // directly. + if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) { + for _, b := range level.buckets { + ts.resetObservation(b) + } + level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds()) + } + + for t.After(level.end) { + level.end = level.end.Add(level.size) + level.newest = level.oldest + level.oldest = (level.oldest + 1) % ts.numBuckets + ts.resetObservation(level.buckets[level.newest]) + } + + t = level.end + } +} + +// Latest returns the sum of the num latest buckets from the level. +func (ts *timeSeries) Latest(level, num int) Observable { + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + result := ts.provider() + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + if l.buckets[index] != nil { + result.Add(l.buckets[index]) + } + if index == 0 { + index = ts.numBuckets + } + index-- + } + + return result +} + +// LatestBuckets returns a copy of the num latest buckets from level. +func (ts *timeSeries) LatestBuckets(level, num int) []Observable { + if level < 0 || level > len(ts.levels) { + log.Print("timeseries: bad level argument: ", level) + return nil + } + if num < 0 || num >= ts.numBuckets { + log.Print("timeseries: bad num argument: ", num) + return nil + } + + results := make([]Observable, num) + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + result := ts.provider() + results[i] = result + if l.buckets[index] != nil { + result.CopyFrom(l.buckets[index]) + } + + if index == 0 { + index = ts.numBuckets + } + index -= 1 + } + return results +} + +// ScaleBy updates observations by scaling by factor. +func (ts *timeSeries) ScaleBy(factor float64) { + for _, l := range ts.levels { + for i := 0; i < ts.numBuckets; i++ { + l.buckets[i].Multiply(factor) + } + } + + ts.total.Multiply(factor) + ts.pending.Multiply(factor) +} + +// Range returns the sum of observations added over the specified time range. +// If start or finish times don't fall on bucket boundaries of the same +// level, then return values are approximate answers. +func (ts *timeSeries) Range(start, finish time.Time) Observable { + return ts.ComputeRange(start, finish, 1)[0] +} + +// Recent returns the sum of observations from the last delta. +func (ts *timeSeries) Recent(delta time.Duration) Observable { + now := ts.clock.Time() + return ts.Range(now.Add(-delta), now) +} + +// Total returns the total of all observations. +func (ts *timeSeries) Total() Observable { + ts.mergePendingUpdates() + return ts.total +} + +// ComputeRange computes a specified number of values into a slice using +// the observations recorded over the specified time period. The return +// values are approximate if the start or finish times don't fall on the +// bucket boundaries at the same level or if the number of buckets spanning +// the range is not an integral multiple of num. +func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable { + if start.After(finish) { + log.Printf("timeseries: start > finish, %v>%v", start, finish) + return nil + } + + if num < 0 { + log.Printf("timeseries: num < 0, %v", num) + return nil + } + + results := make([]Observable, num) + + for _, l := range ts.levels { + if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) { + ts.extract(l, start, finish, num, results) + return results + } + } + + // Failed to find a level that covers the desired range. So just + // extract from the last level, even if it doesn't cover the entire + // desired range. + ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results) + + return results +} + +// RecentList returns the specified number of values in slice over the most +// recent time period of the specified range. +func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable { + if delta < 0 { + return nil + } + now := ts.clock.Time() + return ts.ComputeRange(now.Add(-delta), now, num) +} + +// extract returns a slice of specified number of observations from a given +// level over a given range. +func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) { + ts.mergePendingUpdates() + + srcInterval := l.size + dstInterval := finish.Sub(start) / time.Duration(num) + dstStart := start + srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets)) + + srcIndex := 0 + + // Where should scanning start? + if dstStart.After(srcStart) { + advance := dstStart.Sub(srcStart) / srcInterval + srcIndex += int(advance) + srcStart = srcStart.Add(advance * srcInterval) + } + + // The i'th value is computed as show below. + // interval = (finish/start)/num + // i'th value = sum of observation in range + // [ start + i * interval, + // start + (i + 1) * interval ) + for i := 0; i < num; i++ { + results[i] = ts.resetObservation(results[i]) + dstEnd := dstStart.Add(dstInterval) + for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) { + srcEnd := srcStart.Add(srcInterval) + if srcEnd.After(ts.lastAdd) { + srcEnd = ts.lastAdd + } + + if !srcEnd.Before(dstStart) { + srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets] + if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) { + // dst completely contains src. + if srcValue != nil { + results[i].Add(srcValue) + } + } else { + // dst partially overlaps src. + overlapStart := maxTime(srcStart, dstStart) + overlapEnd := minTime(srcEnd, dstEnd) + base := srcEnd.Sub(srcStart) + fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds() + + used := ts.provider() + if srcValue != nil { + used.CopyFrom(srcValue) + } + used.Multiply(fraction) + results[i].Add(used) + } + + if srcEnd.After(dstEnd) { + break + } + } + srcIndex++ + srcStart = srcStart.Add(srcInterval) + } + dstStart = dstStart.Add(dstInterval) + } +} + +// resetObservation clears the content so the struct may be reused. +func (ts *timeSeries) resetObservation(observation Observable) Observable { + if observation == nil { + observation = ts.provider() + } else { + observation.Clear() + } + return observation +} + +// TimeSeries tracks data at granularities from 1 second to 16 weeks. +type TimeSeries struct { + timeSeries +} + +// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable. +func NewTimeSeries(f func() Observable) *TimeSeries { + return NewTimeSeriesWithClock(f, defaultClockInstance) +} + +// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries { + ts := new(TimeSeries) + ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock) + return ts +} + +// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour. +type MinuteHourSeries struct { + timeSeries +} + +// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable. +func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries { + return NewMinuteHourSeriesWithClock(f, defaultClockInstance) +} + +// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries { + ts := new(MinuteHourSeries) + ts.timeSeries.init(minuteHourSeriesResolutions, f, + minuteHourSeriesNumBuckets, clock) + return ts +} + +func (ts *MinuteHourSeries) Minute() Observable { + return ts.timeSeries.Latest(0, 60) +} + +func (ts *MinuteHourSeries) Hour() Observable { + return ts.timeSeries.Latest(1, 60) +} + +func minTime(a, b time.Time) time.Time { + if a.Before(b) { + return a + } + return b +} + +func maxTime(a, b time.Time) time.Time { + if a.After(b) { + return a + } + return b +} diff --git a/vendor/golang.org/x/net/ipv4/BUILD.bazel b/vendor/golang.org/x/net/ipv4/BUILD.bazel index b258eda2a5b66..6ce138a993444 100644 --- a/vendor/golang.org/x/net/ipv4/BUILD.bazel +++ b/vendor/golang.org/x/net/ipv4/BUILD.bazel @@ -21,12 +21,8 @@ go_library( "icmp_linux.go", "icmp_stub.go", "packet.go", - "packet_go1_8.go", - "packet_go1_9.go", "payload.go", "payload_cmsg.go", - "payload_cmsg_go1_8.go", - "payload_cmsg_go1_9.go", "payload_nocmsg.go", "sockopt.go", "sockopt_posix.go", diff --git a/vendor/golang.org/x/net/ipv4/batch.go b/vendor/golang.org/x/net/ipv4/batch.go index 5ce9b3583cc26..1a3a4fc0c1095 100644 --- a/vendor/golang.org/x/net/ipv4/batch.go +++ b/vendor/golang.org/x/net/ipv4/batch.go @@ -2,8 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build go1.9 - package ipv4 import ( @@ -91,6 +89,9 @@ func (c *payloadHandler) ReadBatch(ms []Message, flags int) (int, error) { n = 0 err = &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} } + if compatFreeBSD32 && ms[0].NN > 0 { + adjustFreeBSD32(&ms[0]) + } return n, err } } @@ -154,6 +155,9 @@ func (c *packetHandler) ReadBatch(ms []Message, flags int) (int, error) { n = 0 err = &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} } + if compatFreeBSD32 && ms[0].NN > 0 { + adjustFreeBSD32(&ms[0]) + } return n, err } } diff --git a/vendor/golang.org/x/net/ipv4/control_bsd.go b/vendor/golang.org/x/net/ipv4/control_bsd.go index 77e7ad5bed76b..19845c55b1545 100644 --- a/vendor/golang.org/x/net/ipv4/control_bsd.go +++ b/vendor/golang.org/x/net/ipv4/control_bsd.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd netbsd openbsd +// +build aix darwin dragonfly freebsd netbsd openbsd package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/control_stub.go b/vendor/golang.org/x/net/ipv4/control_stub.go index 5a2f7d8d3c9b7..a0c049d683ade 100644 --- a/vendor/golang.org/x/net/ipv4/control_stub.go +++ b/vendor/golang.org/x/net/ipv4/control_stub.go @@ -2,12 +2,12 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows package ipv4 import "golang.org/x/net/internal/socket" func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { - return errOpNoSupport + return errNotImplemented } diff --git a/vendor/golang.org/x/net/ipv4/control_unix.go b/vendor/golang.org/x/net/ipv4/control_unix.go index e1ae8167b3a4b..b27fa4903a996 100644 --- a/vendor/golang.org/x/net/ipv4/control_unix.go +++ b/vendor/golang.org/x/net/ipv4/control_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/control_windows.go b/vendor/golang.org/x/net/ipv4/control_windows.go index ce55c66447d07..82c6306421b9d 100644 --- a/vendor/golang.org/x/net/ipv4/control_windows.go +++ b/vendor/golang.org/x/net/ipv4/control_windows.go @@ -4,13 +4,9 @@ package ipv4 -import ( - "syscall" - - "golang.org/x/net/internal/socket" -) +import "golang.org/x/net/internal/socket" func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { // TODO(mikio): implement this - return syscall.EWINDOWS + return errNotImplemented } diff --git a/vendor/golang.org/x/net/ipv4/dgramopt.go b/vendor/golang.org/x/net/ipv4/dgramopt.go index 36764492d9de9..c191c22aba4ae 100644 --- a/vendor/golang.org/x/net/ipv4/dgramopt.go +++ b/vendor/golang.org/x/net/ipv4/dgramopt.go @@ -18,7 +18,7 @@ func (c *dgramOpt) MulticastTTL() (int, error) { } so, ok := sockOpts[ssoMulticastTTL] if !ok { - return 0, errOpNoSupport + return 0, errNotImplemented } return so.GetInt(c.Conn) } @@ -31,7 +31,7 @@ func (c *dgramOpt) SetMulticastTTL(ttl int) error { } so, ok := sockOpts[ssoMulticastTTL] if !ok { - return errOpNoSupport + return errNotImplemented } return so.SetInt(c.Conn, ttl) } @@ -44,7 +44,7 @@ func (c *dgramOpt) MulticastInterface() (*net.Interface, error) { } so, ok := sockOpts[ssoMulticastInterface] if !ok { - return nil, errOpNoSupport + return nil, errNotImplemented } return so.getMulticastInterface(c.Conn) } @@ -57,7 +57,7 @@ func (c *dgramOpt) SetMulticastInterface(ifi *net.Interface) error { } so, ok := sockOpts[ssoMulticastInterface] if !ok { - return errOpNoSupport + return errNotImplemented } return so.setMulticastInterface(c.Conn, ifi) } @@ -70,7 +70,7 @@ func (c *dgramOpt) MulticastLoopback() (bool, error) { } so, ok := sockOpts[ssoMulticastLoopback] if !ok { - return false, errOpNoSupport + return false, errNotImplemented } on, err := so.GetInt(c.Conn) if err != nil { @@ -87,7 +87,7 @@ func (c *dgramOpt) SetMulticastLoopback(on bool) error { } so, ok := sockOpts[ssoMulticastLoopback] if !ok { - return errOpNoSupport + return errNotImplemented } return so.SetInt(c.Conn, boolint(on)) } @@ -107,7 +107,7 @@ func (c *dgramOpt) JoinGroup(ifi *net.Interface, group net.Addr) error { } so, ok := sockOpts[ssoJoinGroup] if !ok { - return errOpNoSupport + return errNotImplemented } grp := netAddrToIP4(group) if grp == nil { @@ -125,7 +125,7 @@ func (c *dgramOpt) LeaveGroup(ifi *net.Interface, group net.Addr) error { } so, ok := sockOpts[ssoLeaveGroup] if !ok { - return errOpNoSupport + return errNotImplemented } grp := netAddrToIP4(group) if grp == nil { @@ -146,7 +146,7 @@ func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net } so, ok := sockOpts[ssoJoinSourceGroup] if !ok { - return errOpNoSupport + return errNotImplemented } grp := netAddrToIP4(group) if grp == nil { @@ -167,7 +167,7 @@ func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source ne } so, ok := sockOpts[ssoLeaveSourceGroup] if !ok { - return errOpNoSupport + return errNotImplemented } grp := netAddrToIP4(group) if grp == nil { @@ -189,7 +189,7 @@ func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source } so, ok := sockOpts[ssoBlockSourceGroup] if !ok { - return errOpNoSupport + return errNotImplemented } grp := netAddrToIP4(group) if grp == nil { @@ -210,7 +210,7 @@ func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source } so, ok := sockOpts[ssoUnblockSourceGroup] if !ok { - return errOpNoSupport + return errNotImplemented } grp := netAddrToIP4(group) if grp == nil { @@ -231,7 +231,7 @@ func (c *dgramOpt) ICMPFilter() (*ICMPFilter, error) { } so, ok := sockOpts[ssoICMPFilter] if !ok { - return nil, errOpNoSupport + return nil, errNotImplemented } return so.getICMPFilter(c.Conn) } @@ -244,7 +244,7 @@ func (c *dgramOpt) SetICMPFilter(f *ICMPFilter) error { } so, ok := sockOpts[ssoICMPFilter] if !ok { - return errOpNoSupport + return errNotImplemented } return so.setICMPFilter(c.Conn, f) } @@ -258,7 +258,7 @@ func (c *dgramOpt) SetBPF(filter []bpf.RawInstruction) error { } so, ok := sockOpts[ssoAttachFilter] if !ok { - return errOpNoSupport + return errNotImplemented } return so.setBPF(c.Conn, filter) } diff --git a/vendor/golang.org/x/net/ipv4/doc.go b/vendor/golang.org/x/net/ipv4/doc.go index 863d55b8d6778..2458349799382 100644 --- a/vendor/golang.org/x/net/ipv4/doc.go +++ b/vendor/golang.org/x/net/ipv4/doc.go @@ -209,7 +209,7 @@ // LeaveSourceSpecificGroup for the operation known as "include" mode, // // ssmgroup := net.UDPAddr{IP: net.IPv4(232, 7, 8, 9)} -// ssmsource := net.UDPAddr{IP: net.IPv4(192, 168, 0, 1)}) +// ssmsource := net.UDPAddr{IP: net.IPv4(192, 168, 0, 1)} // if err := p.JoinSourceSpecificGroup(en0, &ssmgroup, &ssmsource); err != nil { // // error handling // } @@ -241,5 +241,4 @@ // IncludeSourceSpecificGroup may return an error. package ipv4 // import "golang.org/x/net/ipv4" -// BUG(mikio): This package is not implemented on AIX, JS, NaCl and -// Plan 9. +// BUG(mikio): This package is not implemented on JS, NaCl and Plan 9. diff --git a/vendor/golang.org/x/net/ipv4/endpoint.go b/vendor/golang.org/x/net/ipv4/endpoint.go index 5009463787ef2..4a6d7a85ee67b 100644 --- a/vendor/golang.org/x/net/ipv4/endpoint.go +++ b/vendor/golang.org/x/net/ipv4/endpoint.go @@ -177,7 +177,7 @@ func NewRawConn(c net.PacketConn) (*RawConn, error) { } so, ok := sockOpts[ssoHeaderPrepend] if !ok { - return nil, errOpNoSupport + return nil, errNotImplemented } if err := so.SetInt(r.dgramOpt.Conn, boolint(true)); err != nil { return nil, err diff --git a/vendor/golang.org/x/net/ipv4/genericopt.go b/vendor/golang.org/x/net/ipv4/genericopt.go index 587ae4a1944dc..51c12371eb467 100644 --- a/vendor/golang.org/x/net/ipv4/genericopt.go +++ b/vendor/golang.org/x/net/ipv4/genericopt.go @@ -11,7 +11,7 @@ func (c *genericOpt) TOS() (int, error) { } so, ok := sockOpts[ssoTOS] if !ok { - return 0, errOpNoSupport + return 0, errNotImplemented } return so.GetInt(c.Conn) } @@ -24,7 +24,7 @@ func (c *genericOpt) SetTOS(tos int) error { } so, ok := sockOpts[ssoTOS] if !ok { - return errOpNoSupport + return errNotImplemented } return so.SetInt(c.Conn, tos) } @@ -36,7 +36,7 @@ func (c *genericOpt) TTL() (int, error) { } so, ok := sockOpts[ssoTTL] if !ok { - return 0, errOpNoSupport + return 0, errNotImplemented } return so.GetInt(c.Conn) } @@ -49,7 +49,7 @@ func (c *genericOpt) SetTTL(ttl int) error { } so, ok := sockOpts[ssoTTL] if !ok { - return errOpNoSupport + return errNotImplemented } return so.SetInt(c.Conn, ttl) } diff --git a/vendor/golang.org/x/net/ipv4/header.go b/vendor/golang.org/x/net/ipv4/header.go index a8c8f7a6c858d..701bd4b22d816 100644 --- a/vendor/golang.org/x/net/ipv4/header.go +++ b/vendor/golang.org/x/net/ipv4/header.go @@ -57,7 +57,7 @@ func (h *Header) String() string { // This may differ from the wire format, depending on the system. func (h *Header) Marshal() ([]byte, error) { if h == nil { - return nil, errInvalidConn + return nil, errNilHeader } if h.Len < HeaderLen { return nil, errHeaderTooShort @@ -107,12 +107,15 @@ func (h *Header) Marshal() ([]byte, error) { // local system. // This may differ from the wire format, depending on the system. func (h *Header) Parse(b []byte) error { - if h == nil || len(b) < HeaderLen { + if h == nil || b == nil { + return errNilHeader + } + if len(b) < HeaderLen { return errHeaderTooShort } hdrlen := int(b[0]&0x0f) << 2 - if hdrlen > len(b) { - return errBufferTooShort + if len(b) < hdrlen { + return errExtHeaderTooShort } h.Version = int(b[0] >> 4) h.Len = hdrlen diff --git a/vendor/golang.org/x/net/ipv4/helper.go b/vendor/golang.org/x/net/ipv4/helper.go index 8d8ff98e94a6c..b494a2cde46be 100644 --- a/vendor/golang.org/x/net/ipv4/helper.go +++ b/vendor/golang.org/x/net/ipv4/helper.go @@ -7,23 +7,39 @@ package ipv4 import ( "errors" "net" + "runtime" + + "golang.org/x/net/internal/socket" ) var ( errInvalidConn = errors.New("invalid connection") errMissingAddress = errors.New("missing address") errMissingHeader = errors.New("missing header") + errNilHeader = errors.New("nil header") errHeaderTooShort = errors.New("header too short") - errBufferTooShort = errors.New("buffer too short") + errExtHeaderTooShort = errors.New("extension header too short") errInvalidConnType = errors.New("invalid conn type") - errOpNoSupport = errors.New("operation not supported") errNoSuchInterface = errors.New("no such interface") errNoSuchMulticastInterface = errors.New("no such multicast interface") + errNotImplemented = errors.New("not implemented on " + runtime.GOOS + "/" + runtime.GOARCH) - // See http://www.freebsd.org/doc/en/books/porters-handbook/freebsd-versions.html. - freebsdVersion uint32 + // See https://www.freebsd.org/doc/en/books/porters-handbook/versions.html. + freebsdVersion uint32 + compatFreeBSD32 bool // 386 emulation on amd64 ) +// See golang.org/issue/30899. +func adjustFreeBSD32(m *socket.Message) { + // FreeBSD 12.0-RELEASE is affected by https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=236737 + if 1200086 <= freebsdVersion && freebsdVersion < 1201000 { + l := (m.NN + 4 - 1) &^ (4 - 1) + if m.NN < l && l <= len(m.OOB) { + m.NN = l + } + } +} + func boolint(b bool) int { if b { return 1 diff --git a/vendor/golang.org/x/net/ipv4/packet.go b/vendor/golang.org/x/net/ipv4/packet.go index 966bb776f5608..7d784e06dd086 100644 --- a/vendor/golang.org/x/net/ipv4/packet.go +++ b/vendor/golang.org/x/net/ipv4/packet.go @@ -29,7 +29,35 @@ func (c *packetHandler) ReadFrom(b []byte) (h *Header, p []byte, cm *ControlMess if !c.ok() { return nil, nil, nil, errInvalidConn } - return c.readFrom(b) + c.rawOpt.RLock() + m := socket.Message{ + Buffers: [][]byte{b}, + OOB: NewControlMessage(c.rawOpt.cflags), + } + c.rawOpt.RUnlock() + if err := c.RecvMsg(&m, 0); err != nil { + return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + var hs []byte + if hs, p, err = slicePacket(b[:m.N]); err != nil { + return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + if h, err = ParseHeader(hs); err != nil { + return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + if m.NN > 0 { + if compatFreeBSD32 { + adjustFreeBSD32(&m) + } + cm = new(ControlMessage) + if err := cm.Parse(m.OOB[:m.NN]); err != nil { + return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + } + if src, ok := m.Addr.(*net.IPAddr); ok && cm != nil { + cm.Src = src.IP + } + return } func slicePacket(b []byte) (h, p []byte, err error) { @@ -64,5 +92,26 @@ func (c *packetHandler) WriteTo(h *Header, p []byte, cm *ControlMessage) error { if !c.ok() { return errInvalidConn } - return c.writeTo(h, p, cm) + m := socket.Message{ + OOB: cm.Marshal(), + } + wh, err := h.Marshal() + if err != nil { + return err + } + m.Buffers = [][]byte{wh, p} + dst := new(net.IPAddr) + if cm != nil { + if ip := cm.Dst.To4(); ip != nil { + dst.IP = ip + } + } + if dst.IP == nil { + dst.IP = h.Dst + } + m.Addr = dst + if err := c.SendMsg(&m, 0); err != nil { + return &net.OpError{Op: "write", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Addr: opAddr(dst), Err: err} + } + return nil } diff --git a/vendor/golang.org/x/net/ipv4/packet_go1_8.go b/vendor/golang.org/x/net/ipv4/packet_go1_8.go deleted file mode 100644 index b47d186834d8d..0000000000000 --- a/vendor/golang.org/x/net/ipv4/packet_go1_8.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.9 - -package ipv4 - -import "net" - -func (c *packetHandler) readFrom(b []byte) (h *Header, p []byte, cm *ControlMessage, err error) { - c.rawOpt.RLock() - oob := NewControlMessage(c.rawOpt.cflags) - c.rawOpt.RUnlock() - n, nn, _, src, err := c.ReadMsgIP(b, oob) - if err != nil { - return nil, nil, nil, err - } - var hs []byte - if hs, p, err = slicePacket(b[:n]); err != nil { - return nil, nil, nil, err - } - if h, err = ParseHeader(hs); err != nil { - return nil, nil, nil, err - } - if nn > 0 { - cm = new(ControlMessage) - if err := cm.Parse(oob[:nn]); err != nil { - return nil, nil, nil, err - } - } - if src != nil && cm != nil { - cm.Src = src.IP - } - return -} - -func (c *packetHandler) writeTo(h *Header, p []byte, cm *ControlMessage) error { - oob := cm.Marshal() - wh, err := h.Marshal() - if err != nil { - return err - } - dst := new(net.IPAddr) - if cm != nil { - if ip := cm.Dst.To4(); ip != nil { - dst.IP = ip - } - } - if dst.IP == nil { - dst.IP = h.Dst - } - wh = append(wh, p...) - _, _, err = c.WriteMsgIP(wh, oob, dst) - return err -} diff --git a/vendor/golang.org/x/net/ipv4/packet_go1_9.go b/vendor/golang.org/x/net/ipv4/packet_go1_9.go deleted file mode 100644 index 082c36d73e19b..0000000000000 --- a/vendor/golang.org/x/net/ipv4/packet_go1_9.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.9 - -package ipv4 - -import ( - "net" - - "golang.org/x/net/internal/socket" -) - -func (c *packetHandler) readFrom(b []byte) (h *Header, p []byte, cm *ControlMessage, err error) { - c.rawOpt.RLock() - m := socket.Message{ - Buffers: [][]byte{b}, - OOB: NewControlMessage(c.rawOpt.cflags), - } - c.rawOpt.RUnlock() - if err := c.RecvMsg(&m, 0); err != nil { - return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} - } - var hs []byte - if hs, p, err = slicePacket(b[:m.N]); err != nil { - return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} - } - if h, err = ParseHeader(hs); err != nil { - return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} - } - if m.NN > 0 { - cm = new(ControlMessage) - if err := cm.Parse(m.OOB[:m.NN]); err != nil { - return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} - } - } - if src, ok := m.Addr.(*net.IPAddr); ok && cm != nil { - cm.Src = src.IP - } - return -} - -func (c *packetHandler) writeTo(h *Header, p []byte, cm *ControlMessage) error { - m := socket.Message{ - OOB: cm.Marshal(), - } - wh, err := h.Marshal() - if err != nil { - return err - } - m.Buffers = [][]byte{wh, p} - dst := new(net.IPAddr) - if cm != nil { - if ip := cm.Dst.To4(); ip != nil { - dst.IP = ip - } - } - if dst.IP == nil { - dst.IP = h.Dst - } - m.Addr = dst - if err := c.SendMsg(&m, 0); err != nil { - return &net.OpError{Op: "write", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Addr: opAddr(dst), Err: err} - } - return nil -} diff --git a/vendor/golang.org/x/net/ipv4/payload_cmsg.go b/vendor/golang.org/x/net/ipv4/payload_cmsg.go index a7c892dc4448a..e7614661d7b75 100644 --- a/vendor/golang.org/x/net/ipv4/payload_cmsg.go +++ b/vendor/golang.org/x/net/ipv4/payload_cmsg.go @@ -2,11 +2,15 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package ipv4 -import "net" +import ( + "net" + + "golang.org/x/net/internal/socket" +) // ReadFrom reads a payload of the received IPv4 datagram, from the // endpoint c, copying the payload into b. It returns the number of @@ -16,7 +20,45 @@ func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net. if !c.ok() { return 0, nil, nil, errInvalidConn } - return c.readFrom(b) + c.rawOpt.RLock() + m := socket.Message{ + OOB: NewControlMessage(c.rawOpt.cflags), + } + c.rawOpt.RUnlock() + switch c.PacketConn.(type) { + case *net.UDPConn: + m.Buffers = [][]byte{b} + if err := c.RecvMsg(&m, 0); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + case *net.IPConn: + h := make([]byte, HeaderLen) + m.Buffers = [][]byte{h, b} + if err := c.RecvMsg(&m, 0); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + hdrlen := int(h[0]&0x0f) << 2 + if hdrlen > len(h) { + d := hdrlen - len(h) + copy(b, b[d:]) + m.N -= d + } else { + m.N -= hdrlen + } + default: + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: errInvalidConnType} + } + if m.NN > 0 { + if compatFreeBSD32 { + adjustFreeBSD32(&m) + } + cm = new(ControlMessage) + if err := cm.Parse(m.OOB[:m.NN]); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + cm.Src = netAddrToIP4(m.Addr) + } + return m.N, cm, m.Addr, nil } // WriteTo writes a payload of the IPv4 datagram, to the destination @@ -29,5 +71,14 @@ func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n if !c.ok() { return 0, errInvalidConn } - return c.writeTo(b, cm, dst) + m := socket.Message{ + Buffers: [][]byte{b}, + OOB: cm.Marshal(), + Addr: dst, + } + err = c.SendMsg(&m, 0) + if err != nil { + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Addr: opAddr(dst), Err: err} + } + return m.N, err } diff --git a/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_8.go b/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_8.go deleted file mode 100644 index 15a27b7a001f2..0000000000000 --- a/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_8.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.9 -// +build darwin dragonfly freebsd linux netbsd openbsd solaris - -package ipv4 - -import "net" - -func (c *payloadHandler) readFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { - c.rawOpt.RLock() - oob := NewControlMessage(c.rawOpt.cflags) - c.rawOpt.RUnlock() - var nn int - switch c := c.PacketConn.(type) { - case *net.UDPConn: - if n, nn, _, src, err = c.ReadMsgUDP(b, oob); err != nil { - return 0, nil, nil, err - } - case *net.IPConn: - nb := make([]byte, maxHeaderLen+len(b)) - if n, nn, _, src, err = c.ReadMsgIP(nb, oob); err != nil { - return 0, nil, nil, err - } - hdrlen := int(nb[0]&0x0f) << 2 - copy(b, nb[hdrlen:]) - n -= hdrlen - default: - return 0, nil, nil, &net.OpError{Op: "read", Net: c.LocalAddr().Network(), Source: c.LocalAddr(), Err: errInvalidConnType} - } - if nn > 0 { - cm = new(ControlMessage) - if err = cm.Parse(oob[:nn]); err != nil { - return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} - } - } - if cm != nil { - cm.Src = netAddrToIP4(src) - } - return -} - -func (c *payloadHandler) writeTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { - oob := cm.Marshal() - if dst == nil { - return 0, &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: errMissingAddress} - } - switch c := c.PacketConn.(type) { - case *net.UDPConn: - n, _, err = c.WriteMsgUDP(b, oob, dst.(*net.UDPAddr)) - case *net.IPConn: - n, _, err = c.WriteMsgIP(b, oob, dst.(*net.IPAddr)) - default: - return 0, &net.OpError{Op: "write", Net: c.LocalAddr().Network(), Source: c.LocalAddr(), Addr: opAddr(dst), Err: errInvalidConnType} - } - return -} diff --git a/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_9.go b/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_9.go deleted file mode 100644 index aab3b224e5781..0000000000000 --- a/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_9.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.9 -// +build darwin dragonfly freebsd linux netbsd openbsd solaris - -package ipv4 - -import ( - "net" - - "golang.org/x/net/internal/socket" -) - -func (c *payloadHandler) readFrom(b []byte) (int, *ControlMessage, net.Addr, error) { - c.rawOpt.RLock() - m := socket.Message{ - OOB: NewControlMessage(c.rawOpt.cflags), - } - c.rawOpt.RUnlock() - switch c.PacketConn.(type) { - case *net.UDPConn: - m.Buffers = [][]byte{b} - if err := c.RecvMsg(&m, 0); err != nil { - return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} - } - case *net.IPConn: - h := make([]byte, HeaderLen) - m.Buffers = [][]byte{h, b} - if err := c.RecvMsg(&m, 0); err != nil { - return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} - } - hdrlen := int(h[0]&0x0f) << 2 - if hdrlen > len(h) { - d := hdrlen - len(h) - copy(b, b[d:]) - m.N -= d - } else { - m.N -= hdrlen - } - default: - return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: errInvalidConnType} - } - var cm *ControlMessage - if m.NN > 0 { - cm = new(ControlMessage) - if err := cm.Parse(m.OOB[:m.NN]); err != nil { - return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} - } - cm.Src = netAddrToIP4(m.Addr) - } - return m.N, cm, m.Addr, nil -} - -func (c *payloadHandler) writeTo(b []byte, cm *ControlMessage, dst net.Addr) (int, error) { - m := socket.Message{ - Buffers: [][]byte{b}, - OOB: cm.Marshal(), - Addr: dst, - } - err := c.SendMsg(&m, 0) - if err != nil { - err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Addr: opAddr(dst), Err: err} - } - return m.N, err -} diff --git a/vendor/golang.org/x/net/ipv4/payload_nocmsg.go b/vendor/golang.org/x/net/ipv4/payload_nocmsg.go index d57f05c1077ec..1116256f24576 100644 --- a/vendor/golang.org/x/net/ipv4/payload_nocmsg.go +++ b/vendor/golang.org/x/net/ipv4/payload_nocmsg.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/sockopt_posix.go b/vendor/golang.org/x/net/ipv4/sockopt_posix.go index e96955bc188b7..dea64519d8e26 100644 --- a/vendor/golang.org/x/net/ipv4/sockopt_posix.go +++ b/vendor/golang.org/x/net/ipv4/sockopt_posix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows package ipv4 @@ -39,7 +39,7 @@ func (so *sockOpt) getICMPFilter(c *socket.Conn) (*ICMPFilter, error) { return nil, err } if n != sizeofICMPFilter { - return nil, errOpNoSupport + return nil, errNotImplemented } return (*ICMPFilter)(unsafe.Pointer(&b[0])), nil } @@ -58,7 +58,7 @@ func (so *sockOpt) setGroup(c *socket.Conn, ifi *net.Interface, grp net.IP) erro case ssoTypeGroupReq: return so.setGroupReq(c, ifi, grp) default: - return errOpNoSupport + return errNotImplemented } } diff --git a/vendor/golang.org/x/net/ipv4/sockopt_stub.go b/vendor/golang.org/x/net/ipv4/sockopt_stub.go index 23249b782e335..37d4806b34297 100644 --- a/vendor/golang.org/x/net/ipv4/sockopt_stub.go +++ b/vendor/golang.org/x/net/ipv4/sockopt_stub.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows package ipv4 @@ -14,29 +14,29 @@ import ( ) func (so *sockOpt) getMulticastInterface(c *socket.Conn) (*net.Interface, error) { - return nil, errOpNoSupport + return nil, errNotImplemented } func (so *sockOpt) setMulticastInterface(c *socket.Conn, ifi *net.Interface) error { - return errOpNoSupport + return errNotImplemented } func (so *sockOpt) getICMPFilter(c *socket.Conn) (*ICMPFilter, error) { - return nil, errOpNoSupport + return nil, errNotImplemented } func (so *sockOpt) setICMPFilter(c *socket.Conn, f *ICMPFilter) error { - return errOpNoSupport + return errNotImplemented } func (so *sockOpt) setGroup(c *socket.Conn, ifi *net.Interface, grp net.IP) error { - return errOpNoSupport + return errNotImplemented } func (so *sockOpt) setSourceGroup(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { - return errOpNoSupport + return errNotImplemented } func (so *sockOpt) setBPF(c *socket.Conn, f []bpf.RawInstruction) error { - return errOpNoSupport + return errNotImplemented } diff --git a/vendor/golang.org/x/net/ipv4/sys_aix.go b/vendor/golang.org/x/net/ipv4/sys_aix.go new file mode 100644 index 0000000000000..3d1201e6d7c76 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_aix.go @@ -0,0 +1,38 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Added for go1.11 compatibility +// +build aix + +package ipv4 + +import ( + "net" + "syscall" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL}, + ctlDst: {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst}, + ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface}, + } + + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 1}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoReceiveDst: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVDSTADDR, Len: 4}}, + ssoReceiveInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVIF, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_ADD_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_DROP_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + } +) diff --git a/vendor/golang.org/x/net/ipv4/sys_asmreq.go b/vendor/golang.org/x/net/ipv4/sys_asmreq.go index 0388cba00c38c..c5eaafe96be27 100644 --- a/vendor/golang.org/x/net/ipv4/sys_asmreq.go +++ b/vendor/golang.org/x/net/ipv4/sys_asmreq.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd netbsd openbsd solaris windows +// +build aix darwin dragonfly freebsd netbsd openbsd solaris windows package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go b/vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go index f3919208b6eea..6dc339ce67a38 100644 --- a/vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go +++ b/vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !darwin,!dragonfly,!freebsd,!netbsd,!openbsd,!solaris,!windows +// +build !aix,!darwin,!dragonfly,!freebsd,!netbsd,!openbsd,!solaris,!windows package ipv4 @@ -13,13 +13,13 @@ import ( ) func (so *sockOpt) setIPMreq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { - return errOpNoSupport + return errNotImplemented } func (so *sockOpt) getMulticastIf(c *socket.Conn) (*net.Interface, error) { - return nil, errOpNoSupport + return nil, errNotImplemented } func (so *sockOpt) setMulticastIf(c *socket.Conn, ifi *net.Interface) error { - return errOpNoSupport + return errNotImplemented } diff --git a/vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go b/vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go index 0711d3d786aa8..48ef55624ec40 100644 --- a/vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go +++ b/vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go @@ -13,9 +13,9 @@ import ( ) func (so *sockOpt) getIPMreqn(c *socket.Conn) (*net.Interface, error) { - return nil, errOpNoSupport + return nil, errNotImplemented } func (so *sockOpt) setIPMreqn(c *socket.Conn, ifi *net.Interface, grp net.IP) error { - return errOpNoSupport + return errNotImplemented } diff --git a/vendor/golang.org/x/net/ipv4/sys_bpf_stub.go b/vendor/golang.org/x/net/ipv4/sys_bpf_stub.go index 9a2132093dace..5c98642716b56 100644 --- a/vendor/golang.org/x/net/ipv4/sys_bpf_stub.go +++ b/vendor/golang.org/x/net/ipv4/sys_bpf_stub.go @@ -12,5 +12,5 @@ import ( ) func (so *sockOpt) setAttachFilter(c *socket.Conn, f []bpf.RawInstruction) error { - return errOpNoSupport + return errNotImplemented } diff --git a/vendor/golang.org/x/net/ipv4/sys_freebsd.go b/vendor/golang.org/x/net/ipv4/sys_freebsd.go index b80032454a5d5..482873d9a15d0 100644 --- a/vendor/golang.org/x/net/ipv4/sys_freebsd.go +++ b/vendor/golang.org/x/net/ipv4/sys_freebsd.go @@ -50,7 +50,7 @@ func init() { archs, _ := syscall.Sysctl("kern.supported_archs") for _, s := range strings.Fields(archs) { if s == "amd64" { - freebsd32o64 = true + compatFreeBSD32 = true break } } diff --git a/vendor/golang.org/x/net/ipv4/sys_ssmreq.go b/vendor/golang.org/x/net/ipv4/sys_ssmreq.go index ae5704e77a2da..eeced7f3138bf 100644 --- a/vendor/golang.org/x/net/ipv4/sys_ssmreq.go +++ b/vendor/golang.org/x/net/ipv4/sys_ssmreq.go @@ -13,8 +13,6 @@ import ( "golang.org/x/net/internal/socket" ) -var freebsd32o64 bool - func (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { var gr groupReq if ifi != nil { @@ -22,7 +20,7 @@ func (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) e } gr.setGroup(grp) var b []byte - if freebsd32o64 { + if compatFreeBSD32 { var d [sizeofGroupReq + 4]byte s := (*[sizeofGroupReq]byte)(unsafe.Pointer(&gr)) copy(d[:4], s[:4]) @@ -41,7 +39,7 @@ func (so *sockOpt) setGroupSourceReq(c *socket.Conn, ifi *net.Interface, grp, sr } gsr.setSourceGroup(grp, src) var b []byte - if freebsd32o64 { + if compatFreeBSD32 { var d [sizeofGroupSourceReq + 4]byte s := (*[sizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr)) copy(d[:4], s[:4]) diff --git a/vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go b/vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go index e6b7623d0d553..c0921674b0506 100644 --- a/vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go +++ b/vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go @@ -13,9 +13,9 @@ import ( ) func (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { - return errOpNoSupport + return errNotImplemented } func (so *sockOpt) setGroupSourceReq(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { - return errOpNoSupport + return errNotImplemented } diff --git a/vendor/golang.org/x/net/ipv4/sys_stub.go b/vendor/golang.org/x/net/ipv4/sys_stub.go index 4f076473bd1dc..b9c85b334fcc4 100644 --- a/vendor/golang.org/x/net/ipv4/sys_stub.go +++ b/vendor/golang.org/x/net/ipv4/sys_stub.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/zsys_aix_ppc64.go b/vendor/golang.org/x/net/ipv4/zsys_aix_ppc64.go new file mode 100644 index 0000000000000..c741d5c8ea91f --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_aix_ppc64.go @@ -0,0 +1,33 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_aix.go + +// Added for go1.11 compatibility +// +build aix + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x20 + sysIP_RECVTTL = 0x22 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + + sizeofIPMreq = 0x8 +) + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_darwin.go b/vendor/golang.org/x/net/ipv4/zsys_darwin.go index c07cc883fc34d..e05a251ba0761 100644 --- a/vendor/golang.org/x/net/ipv4/zsys_darwin.go +++ b/vendor/golang.org/x/net/ipv4/zsys_darwin.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_darwin.go package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/zsys_dragonfly.go b/vendor/golang.org/x/net/ipv4/zsys_dragonfly.go index c4365e9e7121e..6d65e9fcb8155 100644 --- a/vendor/golang.org/x/net/ipv4/zsys_dragonfly.go +++ b/vendor/golang.org/x/net/ipv4/zsys_dragonfly.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_dragonfly.go package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go b/vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go index 8c4aec94c8a0b..136e2b8f1d615 100644 --- a/vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go +++ b/vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_freebsd.go package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go b/vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go index 4b10b7c575fe5..4f730f19ef349 100644 --- a/vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go +++ b/vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_freebsd.go package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go b/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go index 4b10b7c575fe5..4f730f19ef349 100644 --- a/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go +++ b/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_freebsd.go package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_386.go b/vendor/golang.org/x/net/ipv4/zsys_linux_386.go index c0260f0ce34f1..43ef8e59225fb 100644 --- a/vendor/golang.org/x/net/ipv4/zsys_linux_386.go +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_386.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go index 9c967eaa642d5..ee8204da4609e 100644 --- a/vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_arm.go b/vendor/golang.org/x/net/ipv4/zsys_linux_arm.go index c0260f0ce34f1..43ef8e59225fb 100644 --- a/vendor/golang.org/x/net/ipv4/zsys_linux_arm.go +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_arm.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go index 9c967eaa642d5..ee8204da4609e 100644 --- a/vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_mips.go b/vendor/golang.org/x/net/ipv4/zsys_linux_mips.go index c0260f0ce34f1..43ef8e59225fb 100644 --- a/vendor/golang.org/x/net/ipv4/zsys_linux_mips.go +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_mips.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go index 9c967eaa642d5..ee8204da4609e 100644 --- a/vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go b/vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go index 9c967eaa642d5..ee8204da4609e 100644 --- a/vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_mipsle.go b/vendor/golang.org/x/net/ipv4/zsys_linux_mipsle.go index c0260f0ce34f1..43ef8e59225fb 100644 --- a/vendor/golang.org/x/net/ipv4/zsys_linux_mipsle.go +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_mipsle.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go index f65bd9a7a68de..fa1b6bc61deb1 100644 --- a/vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go index 9c967eaa642d5..ee8204da4609e 100644 --- a/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go index 9c967eaa642d5..ee8204da4609e 100644 --- a/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_riscv64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_riscv64.go new file mode 100644 index 0000000000000..0c0d48012f796 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_riscv64.go @@ -0,0 +1,151 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +// +build riscv64 + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go b/vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go index 9c967eaa642d5..ee8204da4609e 100644 --- a/vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/zsys_netbsd.go b/vendor/golang.org/x/net/ipv4/zsys_netbsd.go index fd3624d93c430..8cfc648ad7e8c 100644 --- a/vendor/golang.org/x/net/ipv4/zsys_netbsd.go +++ b/vendor/golang.org/x/net/ipv4/zsys_netbsd.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_netbsd.go package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/zsys_openbsd.go b/vendor/golang.org/x/net/ipv4/zsys_openbsd.go index 12f36be759e00..37629cb0ab5f5 100644 --- a/vendor/golang.org/x/net/ipv4/zsys_openbsd.go +++ b/vendor/golang.org/x/net/ipv4/zsys_openbsd.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_openbsd.go package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/zsys_solaris.go b/vendor/golang.org/x/net/ipv4/zsys_solaris.go index 0a3875cc41a9e..cb80a308b0e64 100644 --- a/vendor/golang.org/x/net/ipv4/zsys_solaris.go +++ b/vendor/golang.org/x/net/ipv4/zsys_solaris.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_solaris.go package ipv4 diff --git a/vendor/golang.org/x/net/ipv6/BUILD.bazel b/vendor/golang.org/x/net/ipv6/BUILD.bazel index decd1a52ddce1..57e664efe2cd6 100644 --- a/vendor/golang.org/x/net/ipv6/BUILD.bazel +++ b/vendor/golang.org/x/net/ipv6/BUILD.bazel @@ -25,8 +25,6 @@ go_library( "icmp_windows.go", "payload.go", "payload_cmsg.go", - "payload_cmsg_go1_8.go", - "payload_cmsg_go1_9.go", "payload_nocmsg.go", "sockopt.go", "sockopt_posix.go", diff --git a/vendor/golang.org/x/net/ipv6/batch.go b/vendor/golang.org/x/net/ipv6/batch.go index 10d6492451537..2ccb9849c7859 100644 --- a/vendor/golang.org/x/net/ipv6/batch.go +++ b/vendor/golang.org/x/net/ipv6/batch.go @@ -2,8 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build go1.9 - package ipv6 import ( diff --git a/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go b/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go index eec529c205e53..8c221b598957f 100644 --- a/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go +++ b/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/control_stub.go b/vendor/golang.org/x/net/ipv6/control_stub.go index a045f28f74c9d..1d773cbcc8e69 100644 --- a/vendor/golang.org/x/net/ipv6/control_stub.go +++ b/vendor/golang.org/x/net/ipv6/control_stub.go @@ -2,12 +2,12 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows package ipv6 import "golang.org/x/net/internal/socket" func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { - return errOpNoSupport + return errNotImplemented } diff --git a/vendor/golang.org/x/net/ipv6/control_unix.go b/vendor/golang.org/x/net/ipv6/control_unix.go index 66515060a88df..0971a008bf4f9 100644 --- a/vendor/golang.org/x/net/ipv6/control_unix.go +++ b/vendor/golang.org/x/net/ipv6/control_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/control_windows.go b/vendor/golang.org/x/net/ipv6/control_windows.go index ef2563b3fc602..8882d81934dd0 100644 --- a/vendor/golang.org/x/net/ipv6/control_windows.go +++ b/vendor/golang.org/x/net/ipv6/control_windows.go @@ -4,13 +4,9 @@ package ipv6 -import ( - "syscall" - - "golang.org/x/net/internal/socket" -) +import "golang.org/x/net/internal/socket" func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { // TODO(mikio): implement this - return syscall.EWINDOWS + return errNotImplemented } diff --git a/vendor/golang.org/x/net/ipv6/dgramopt.go b/vendor/golang.org/x/net/ipv6/dgramopt.go index eea4fde25627b..1f422e71dc351 100644 --- a/vendor/golang.org/x/net/ipv6/dgramopt.go +++ b/vendor/golang.org/x/net/ipv6/dgramopt.go @@ -18,7 +18,7 @@ func (c *dgramOpt) MulticastHopLimit() (int, error) { } so, ok := sockOpts[ssoMulticastHopLimit] if !ok { - return 0, errOpNoSupport + return 0, errNotImplemented } return so.GetInt(c.Conn) } @@ -31,7 +31,7 @@ func (c *dgramOpt) SetMulticastHopLimit(hoplim int) error { } so, ok := sockOpts[ssoMulticastHopLimit] if !ok { - return errOpNoSupport + return errNotImplemented } return so.SetInt(c.Conn, hoplim) } @@ -44,7 +44,7 @@ func (c *dgramOpt) MulticastInterface() (*net.Interface, error) { } so, ok := sockOpts[ssoMulticastInterface] if !ok { - return nil, errOpNoSupport + return nil, errNotImplemented } return so.getMulticastInterface(c.Conn) } @@ -57,7 +57,7 @@ func (c *dgramOpt) SetMulticastInterface(ifi *net.Interface) error { } so, ok := sockOpts[ssoMulticastInterface] if !ok { - return errOpNoSupport + return errNotImplemented } return so.setMulticastInterface(c.Conn, ifi) } @@ -70,7 +70,7 @@ func (c *dgramOpt) MulticastLoopback() (bool, error) { } so, ok := sockOpts[ssoMulticastLoopback] if !ok { - return false, errOpNoSupport + return false, errNotImplemented } on, err := so.GetInt(c.Conn) if err != nil { @@ -87,7 +87,7 @@ func (c *dgramOpt) SetMulticastLoopback(on bool) error { } so, ok := sockOpts[ssoMulticastLoopback] if !ok { - return errOpNoSupport + return errNotImplemented } return so.SetInt(c.Conn, boolint(on)) } @@ -107,7 +107,7 @@ func (c *dgramOpt) JoinGroup(ifi *net.Interface, group net.Addr) error { } so, ok := sockOpts[ssoJoinGroup] if !ok { - return errOpNoSupport + return errNotImplemented } grp := netAddrToIP16(group) if grp == nil { @@ -125,7 +125,7 @@ func (c *dgramOpt) LeaveGroup(ifi *net.Interface, group net.Addr) error { } so, ok := sockOpts[ssoLeaveGroup] if !ok { - return errOpNoSupport + return errNotImplemented } grp := netAddrToIP16(group) if grp == nil { @@ -146,7 +146,7 @@ func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net } so, ok := sockOpts[ssoJoinSourceGroup] if !ok { - return errOpNoSupport + return errNotImplemented } grp := netAddrToIP16(group) if grp == nil { @@ -167,7 +167,7 @@ func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source ne } so, ok := sockOpts[ssoLeaveSourceGroup] if !ok { - return errOpNoSupport + return errNotImplemented } grp := netAddrToIP16(group) if grp == nil { @@ -189,7 +189,7 @@ func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source } so, ok := sockOpts[ssoBlockSourceGroup] if !ok { - return errOpNoSupport + return errNotImplemented } grp := netAddrToIP16(group) if grp == nil { @@ -210,7 +210,7 @@ func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source } so, ok := sockOpts[ssoUnblockSourceGroup] if !ok { - return errOpNoSupport + return errNotImplemented } grp := netAddrToIP16(group) if grp == nil { @@ -233,7 +233,7 @@ func (c *dgramOpt) Checksum() (on bool, offset int, err error) { } so, ok := sockOpts[ssoChecksum] if !ok { - return false, 0, errOpNoSupport + return false, 0, errNotImplemented } offset, err = so.GetInt(c.Conn) if err != nil { @@ -254,7 +254,7 @@ func (c *dgramOpt) SetChecksum(on bool, offset int) error { } so, ok := sockOpts[ssoChecksum] if !ok { - return errOpNoSupport + return errNotImplemented } if !on { offset = -1 @@ -269,7 +269,7 @@ func (c *dgramOpt) ICMPFilter() (*ICMPFilter, error) { } so, ok := sockOpts[ssoICMPFilter] if !ok { - return nil, errOpNoSupport + return nil, errNotImplemented } return so.getICMPFilter(c.Conn) } @@ -281,7 +281,7 @@ func (c *dgramOpt) SetICMPFilter(f *ICMPFilter) error { } so, ok := sockOpts[ssoICMPFilter] if !ok { - return errOpNoSupport + return errNotImplemented } return so.setICMPFilter(c.Conn, f) } @@ -295,7 +295,7 @@ func (c *dgramOpt) SetBPF(filter []bpf.RawInstruction) error { } so, ok := sockOpts[ssoAttachFilter] if !ok { - return errOpNoSupport + return errNotImplemented } return so.setBPF(c.Conn, filter) } diff --git a/vendor/golang.org/x/net/ipv6/doc.go b/vendor/golang.org/x/net/ipv6/doc.go index d38ea0da6c298..e0be9d50d70d3 100644 --- a/vendor/golang.org/x/net/ipv6/doc.go +++ b/vendor/golang.org/x/net/ipv6/doc.go @@ -240,5 +240,4 @@ // IncludeSourceSpecificGroup may return an error. package ipv6 // import "golang.org/x/net/ipv6" -// BUG(mikio): This package is not implemented on AIX, JS, NaCl and -// Plan 9. +// BUG(mikio): This package is not implemented on JS, NaCl and Plan 9. diff --git a/vendor/golang.org/x/net/ipv6/endpoint.go b/vendor/golang.org/x/net/ipv6/endpoint.go index 932575639632a..f534a0bf38d3d 100644 --- a/vendor/golang.org/x/net/ipv6/endpoint.go +++ b/vendor/golang.org/x/net/ipv6/endpoint.go @@ -37,7 +37,7 @@ func (c *Conn) PathMTU() (int, error) { } so, ok := sockOpts[ssoPathMTU] if !ok { - return 0, errOpNoSupport + return 0, errNotImplemented } _, mtu, err := so.getMTUInfo(c.Conn) if err != nil { diff --git a/vendor/golang.org/x/net/ipv6/genericopt.go b/vendor/golang.org/x/net/ipv6/genericopt.go index 1a18f7549abe5..0326aed6def8b 100644 --- a/vendor/golang.org/x/net/ipv6/genericopt.go +++ b/vendor/golang.org/x/net/ipv6/genericopt.go @@ -12,7 +12,7 @@ func (c *genericOpt) TrafficClass() (int, error) { } so, ok := sockOpts[ssoTrafficClass] if !ok { - return 0, errOpNoSupport + return 0, errNotImplemented } return so.GetInt(c.Conn) } @@ -25,7 +25,7 @@ func (c *genericOpt) SetTrafficClass(tclass int) error { } so, ok := sockOpts[ssoTrafficClass] if !ok { - return errOpNoSupport + return errNotImplemented } return so.SetInt(c.Conn, tclass) } @@ -37,7 +37,7 @@ func (c *genericOpt) HopLimit() (int, error) { } so, ok := sockOpts[ssoHopLimit] if !ok { - return 0, errOpNoSupport + return 0, errNotImplemented } return so.GetInt(c.Conn) } @@ -50,7 +50,7 @@ func (c *genericOpt) SetHopLimit(hoplim int) error { } so, ok := sockOpts[ssoHopLimit] if !ok { - return errOpNoSupport + return errNotImplemented } return so.SetInt(c.Conn, hoplim) } diff --git a/vendor/golang.org/x/net/ipv6/helper.go b/vendor/golang.org/x/net/ipv6/helper.go index 7ac5352294bdb..f767b1f5dddb1 100644 --- a/vendor/golang.org/x/net/ipv6/helper.go +++ b/vendor/golang.org/x/net/ipv6/helper.go @@ -7,6 +7,7 @@ package ipv6 import ( "errors" "net" + "runtime" ) var ( @@ -14,8 +15,8 @@ var ( errMissingAddress = errors.New("missing address") errHeaderTooShort = errors.New("header too short") errInvalidConnType = errors.New("invalid conn type") - errOpNoSupport = errors.New("operation not supported") errNoSuchInterface = errors.New("no such interface") + errNotImplemented = errors.New("not implemented on " + runtime.GOOS + "/" + runtime.GOARCH) ) func boolint(b bool) int { diff --git a/vendor/golang.org/x/net/ipv6/icmp_bsd.go b/vendor/golang.org/x/net/ipv6/icmp_bsd.go index e1a791de46ed3..b03025cdccfc5 100644 --- a/vendor/golang.org/x/net/ipv6/icmp_bsd.go +++ b/vendor/golang.org/x/net/ipv6/icmp_bsd.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd netbsd openbsd +// +build aix darwin dragonfly freebsd netbsd openbsd package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/icmp_stub.go b/vendor/golang.org/x/net/ipv6/icmp_stub.go index c4b9be6dbffb7..370e51acd1fb3 100644 --- a/vendor/golang.org/x/net/ipv6/icmp_stub.go +++ b/vendor/golang.org/x/net/ipv6/icmp_stub.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/payload_cmsg.go b/vendor/golang.org/x/net/ipv6/payload_cmsg.go index e17847d0be388..284a04278ed02 100644 --- a/vendor/golang.org/x/net/ipv6/payload_cmsg.go +++ b/vendor/golang.org/x/net/ipv6/payload_cmsg.go @@ -2,11 +2,15 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package ipv6 -import "net" +import ( + "net" + + "golang.org/x/net/internal/socket" +) // ReadFrom reads a payload of the received IPv6 datagram, from the // endpoint c, copying the payload into b. It returns the number of @@ -16,7 +20,32 @@ func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net. if !c.ok() { return 0, nil, nil, errInvalidConn } - return c.readFrom(b) + c.rawOpt.RLock() + m := socket.Message{ + Buffers: [][]byte{b}, + OOB: NewControlMessage(c.rawOpt.cflags), + } + c.rawOpt.RUnlock() + switch c.PacketConn.(type) { + case *net.UDPConn: + if err := c.RecvMsg(&m, 0); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + case *net.IPConn: + if err := c.RecvMsg(&m, 0); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + default: + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: errInvalidConnType} + } + if m.NN > 0 { + cm = new(ControlMessage) + if err := cm.Parse(m.OOB[:m.NN]); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + cm.Src = netAddrToIP16(m.Addr) + } + return m.N, cm, m.Addr, nil } // WriteTo writes a payload of the IPv6 datagram, to the destination @@ -28,5 +57,14 @@ func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n if !c.ok() { return 0, errInvalidConn } - return c.writeTo(b, cm, dst) + m := socket.Message{ + Buffers: [][]byte{b}, + OOB: cm.Marshal(), + Addr: dst, + } + err = c.SendMsg(&m, 0) + if err != nil { + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Addr: opAddr(dst), Err: err} + } + return m.N, err } diff --git a/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_8.go b/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_8.go deleted file mode 100644 index a48a6ed64af14..0000000000000 --- a/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_8.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.9 -// +build darwin dragonfly freebsd linux netbsd openbsd solaris - -package ipv6 - -import "net" - -func (c *payloadHandler) readFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { - c.rawOpt.RLock() - oob := NewControlMessage(c.rawOpt.cflags) - c.rawOpt.RUnlock() - var nn int - switch c := c.PacketConn.(type) { - case *net.UDPConn: - if n, nn, _, src, err = c.ReadMsgUDP(b, oob); err != nil { - return 0, nil, nil, err - } - case *net.IPConn: - if n, nn, _, src, err = c.ReadMsgIP(b, oob); err != nil { - return 0, nil, nil, err - } - default: - return 0, nil, nil, &net.OpError{Op: "read", Net: c.LocalAddr().Network(), Source: c.LocalAddr(), Err: errInvalidConnType} - } - if nn > 0 { - cm = new(ControlMessage) - if err = cm.Parse(oob[:nn]); err != nil { - return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} - } - } - if cm != nil { - cm.Src = netAddrToIP16(src) - } - return -} - -func (c *payloadHandler) writeTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { - oob := cm.Marshal() - if dst == nil { - return 0, &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: errMissingAddress} - } - switch c := c.PacketConn.(type) { - case *net.UDPConn: - n, _, err = c.WriteMsgUDP(b, oob, dst.(*net.UDPAddr)) - case *net.IPConn: - n, _, err = c.WriteMsgIP(b, oob, dst.(*net.IPAddr)) - default: - return 0, &net.OpError{Op: "write", Net: c.LocalAddr().Network(), Source: c.LocalAddr(), Addr: opAddr(dst), Err: errInvalidConnType} - } - return -} diff --git a/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_9.go b/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_9.go deleted file mode 100644 index fb196ed804b2e..0000000000000 --- a/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_9.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.9 -// +build darwin dragonfly freebsd linux netbsd openbsd solaris - -package ipv6 - -import ( - "net" - - "golang.org/x/net/internal/socket" -) - -func (c *payloadHandler) readFrom(b []byte) (int, *ControlMessage, net.Addr, error) { - c.rawOpt.RLock() - m := socket.Message{ - Buffers: [][]byte{b}, - OOB: NewControlMessage(c.rawOpt.cflags), - } - c.rawOpt.RUnlock() - switch c.PacketConn.(type) { - case *net.UDPConn: - if err := c.RecvMsg(&m, 0); err != nil { - return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} - } - case *net.IPConn: - if err := c.RecvMsg(&m, 0); err != nil { - return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} - } - default: - return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: errInvalidConnType} - } - var cm *ControlMessage - if m.NN > 0 { - cm = new(ControlMessage) - if err := cm.Parse(m.OOB[:m.NN]); err != nil { - return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} - } - cm.Src = netAddrToIP16(m.Addr) - } - return m.N, cm, m.Addr, nil -} - -func (c *payloadHandler) writeTo(b []byte, cm *ControlMessage, dst net.Addr) (int, error) { - m := socket.Message{ - Buffers: [][]byte{b}, - OOB: cm.Marshal(), - Addr: dst, - } - err := c.SendMsg(&m, 0) - if err != nil { - err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Addr: opAddr(dst), Err: err} - } - return m.N, err -} diff --git a/vendor/golang.org/x/net/ipv6/payload_nocmsg.go b/vendor/golang.org/x/net/ipv6/payload_nocmsg.go index bfb5447840733..c5a4c967527d7 100644 --- a/vendor/golang.org/x/net/ipv6/payload_nocmsg.go +++ b/vendor/golang.org/x/net/ipv6/payload_nocmsg.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/sockopt_posix.go b/vendor/golang.org/x/net/ipv6/sockopt_posix.go index 0eac86eb8ccd7..824c623ccefd5 100644 --- a/vendor/golang.org/x/net/ipv6/sockopt_posix.go +++ b/vendor/golang.org/x/net/ipv6/sockopt_posix.go @@ -2,12 +2,13 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows package ipv6 import ( "net" + "runtime" "unsafe" "golang.org/x/net/bpf" @@ -37,7 +38,7 @@ func (so *sockOpt) getICMPFilter(c *socket.Conn) (*ICMPFilter, error) { return nil, err } if n != sizeofICMPv6Filter { - return nil, errOpNoSupport + return nil, errNotImplemented } return (*ICMPFilter)(unsafe.Pointer(&b[0])), nil } @@ -54,10 +55,11 @@ func (so *sockOpt) getMTUInfo(c *socket.Conn) (*net.Interface, int, error) { return nil, 0, err } if n != sizeofIPv6Mtuinfo { - return nil, 0, errOpNoSupport + return nil, 0, errNotImplemented } mi := (*ipv6Mtuinfo)(unsafe.Pointer(&b[0])) - if mi.Addr.Scope_id == 0 { + if mi.Addr.Scope_id == 0 || runtime.GOOS == "aix" { + // AIX kernel might return a wrong address. return nil, int(mi.Mtu), nil } ifi, err := net.InterfaceByIndex(int(mi.Addr.Scope_id)) @@ -74,7 +76,7 @@ func (so *sockOpt) setGroup(c *socket.Conn, ifi *net.Interface, grp net.IP) erro case ssoTypeGroupReq: return so.setGroupReq(c, ifi, grp) default: - return errOpNoSupport + return errNotImplemented } } diff --git a/vendor/golang.org/x/net/ipv6/sockopt_stub.go b/vendor/golang.org/x/net/ipv6/sockopt_stub.go index 1f4a273e44529..0a87a93bbd4db 100644 --- a/vendor/golang.org/x/net/ipv6/sockopt_stub.go +++ b/vendor/golang.org/x/net/ipv6/sockopt_stub.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows package ipv6 @@ -14,33 +14,33 @@ import ( ) func (so *sockOpt) getMulticastInterface(c *socket.Conn) (*net.Interface, error) { - return nil, errOpNoSupport + return nil, errNotImplemented } func (so *sockOpt) setMulticastInterface(c *socket.Conn, ifi *net.Interface) error { - return errOpNoSupport + return errNotImplemented } func (so *sockOpt) getICMPFilter(c *socket.Conn) (*ICMPFilter, error) { - return nil, errOpNoSupport + return nil, errNotImplemented } func (so *sockOpt) setICMPFilter(c *socket.Conn, f *ICMPFilter) error { - return errOpNoSupport + return errNotImplemented } func (so *sockOpt) getMTUInfo(c *socket.Conn) (*net.Interface, int, error) { - return nil, 0, errOpNoSupport + return nil, 0, errNotImplemented } func (so *sockOpt) setGroup(c *socket.Conn, ifi *net.Interface, grp net.IP) error { - return errOpNoSupport + return errNotImplemented } func (so *sockOpt) setSourceGroup(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { - return errOpNoSupport + return errNotImplemented } func (so *sockOpt) setBPF(c *socket.Conn, f []bpf.RawInstruction) error { - return errOpNoSupport + return errNotImplemented } diff --git a/vendor/golang.org/x/net/ipv6/sys_aix.go b/vendor/golang.org/x/net/ipv6/sys_aix.go new file mode 100644 index 0000000000000..bce7091fb0b3d --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_aix.go @@ -0,0 +1,77 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Added for go1.11 compatibility +// +build aix + +package ipv6 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, + ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, + ctlNextHop: {sysIPV6_NEXTHOP, sizeofSockaddrInet6, marshalNextHop, parseNextHop}, + ctlPathMTU: {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, + } + + sockOpts = map[int]*sockOpt{ + ssoTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}}, + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}}, + ssoReceivePathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}}, + ssoPathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMP6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_JOIN_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_LEAVE_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + } +) + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *inet6Pktinfo) setIfindex(i int) { + pi.Ifindex = int32(i) +} + +func (mreq *ipv6Mreq) setIfindex(i int) { + mreq.Interface = uint32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gr)) + 4)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 4)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) + sa = (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 132)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_asmreq.go b/vendor/golang.org/x/net/ipv6/sys_asmreq.go index b0510c0b5d5e0..8c3934c3eecb0 100644 --- a/vendor/golang.org/x/net/ipv6/sys_asmreq.go +++ b/vendor/golang.org/x/net/ipv6/sys_asmreq.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go b/vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go index eece96187b8f5..87ae4818144fc 100644 --- a/vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go +++ b/vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows package ipv6 @@ -13,5 +13,5 @@ import ( ) func (so *sockOpt) setIPMreq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { - return errOpNoSupport + return errNotImplemented } diff --git a/vendor/golang.org/x/net/ipv6/sys_bpf_stub.go b/vendor/golang.org/x/net/ipv6/sys_bpf_stub.go index 676bea555f0f2..eb9f8316237a2 100644 --- a/vendor/golang.org/x/net/ipv6/sys_bpf_stub.go +++ b/vendor/golang.org/x/net/ipv6/sys_bpf_stub.go @@ -12,5 +12,5 @@ import ( ) func (so *sockOpt) setAttachFilter(c *socket.Conn, f []bpf.RawInstruction) error { - return errOpNoSupport + return errNotImplemented } diff --git a/vendor/golang.org/x/net/ipv6/sys_freebsd.go b/vendor/golang.org/x/net/ipv6/sys_freebsd.go index e9349dc2cc20d..85a9f5d07de48 100644 --- a/vendor/golang.org/x/net/ipv6/sys_freebsd.go +++ b/vendor/golang.org/x/net/ipv6/sys_freebsd.go @@ -51,7 +51,7 @@ func init() { archs, _ := syscall.Sysctl("kern.supported_archs") for _, s := range strings.Fields(archs) { if s == "amd64" { - freebsd32o64 = true + compatFreeBSD32 = true break } } diff --git a/vendor/golang.org/x/net/ipv6/sys_ssmreq.go b/vendor/golang.org/x/net/ipv6/sys_ssmreq.go index add8ccc0b1b19..9b52e978cbd9d 100644 --- a/vendor/golang.org/x/net/ipv6/sys_ssmreq.go +++ b/vendor/golang.org/x/net/ipv6/sys_ssmreq.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin freebsd linux solaris +// +build aix darwin freebsd linux solaris package ipv6 @@ -13,7 +13,7 @@ import ( "golang.org/x/net/internal/socket" ) -var freebsd32o64 bool +var compatFreeBSD32 bool // 386 emulation on amd64 func (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { var gr groupReq @@ -22,7 +22,7 @@ func (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) e } gr.setGroup(grp) var b []byte - if freebsd32o64 { + if compatFreeBSD32 { var d [sizeofGroupReq + 4]byte s := (*[sizeofGroupReq]byte)(unsafe.Pointer(&gr)) copy(d[:4], s[:4]) @@ -41,7 +41,7 @@ func (so *sockOpt) setGroupSourceReq(c *socket.Conn, ifi *net.Interface, grp, sr } gsr.setSourceGroup(grp, src) var b []byte - if freebsd32o64 { + if compatFreeBSD32 { var d [sizeofGroupSourceReq + 4]byte s := (*[sizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr)) copy(d[:4], s[:4]) diff --git a/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go b/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go index 581ee490ff210..d5bc1108c5f2e 100644 --- a/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go +++ b/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !darwin,!freebsd,!linux,!solaris +// +build !aix,!darwin,!freebsd,!linux,!solaris package ipv6 @@ -13,9 +13,9 @@ import ( ) func (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { - return errOpNoSupport + return errNotImplemented } func (so *sockOpt) setGroupSourceReq(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { - return errOpNoSupport + return errNotImplemented } diff --git a/vendor/golang.org/x/net/ipv6/sys_stub.go b/vendor/golang.org/x/net/ipv6/sys_stub.go index b845388ea4a3e..4f252d09f6cb7 100644 --- a/vendor/golang.org/x/net/ipv6/sys_stub.go +++ b/vendor/golang.org/x/net/ipv6/sys_stub.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/zsys_aix_ppc64.go b/vendor/golang.org/x/net/ipv6/zsys_aix_ppc64.go new file mode 100644 index 0000000000000..bf44e338bd80c --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_aix_ppc64.go @@ -0,0 +1,103 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_aix.go + +// Added for go1.11 compatibility +// +build aix + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysICMP6_FILTER = 0x26 + + sysIPV6_CHECKSUM = 0x27 + sysIPV6_V6ONLY = 0x25 + + sysIPV6_RTHDRDSTOPTS = 0x37 + + sysIPV6_RECVPKTINFO = 0x23 + sysIPV6_RECVHOPLIMIT = 0x29 + sysIPV6_RECVRTHDR = 0x33 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_RECVDSTOPTS = 0x38 + + sysIPV6_USE_MIN_MTU = 0x2c + sysIPV6_RECVPATHMTU = 0x2f + sysIPV6_PATHMTU = 0x2e + + sysIPV6_PKTINFO = 0x21 + sysIPV6_HOPLIMIT = 0x28 + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x34 + sysIPV6_DSTOPTS = 0x36 + sysIPV6_RTHDR = 0x32 + + sysIPV6_RECVTCLASS = 0x2a + + sysIPV6_TCLASS = 0x2b + sysIPV6_DONTFRAG = 0x2d + + sizeofSockaddrStorage = 0x508 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x510 + sizeofGroupSourceReq = 0xa18 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrStorage struct { + X__ss_len uint8 + Family uint8 + X__ss_pad1 [6]uint8 + X__ss_align int64 + X__ss_pad2 [1265]uint8 + Pad_cgo_0 [7]byte +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type icmpv6Filter struct { + Filt [8]uint32 +} + +type groupReq struct { + Interface uint32 + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group sockaddrStorage + Source sockaddrStorage +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_darwin.go b/vendor/golang.org/x/net/ipv6/zsys_darwin.go index 6aab1dfab7cb4..555744afd71ad 100644 --- a/vendor/golang.org/x/net/ipv6/zsys_darwin.go +++ b/vendor/golang.org/x/net/ipv6/zsys_darwin.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_darwin.go package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/zsys_dragonfly.go b/vendor/golang.org/x/net/ipv6/zsys_dragonfly.go index d2de804d88c3b..cf3cc1024acb1 100644 --- a/vendor/golang.org/x/net/ipv6/zsys_dragonfly.go +++ b/vendor/golang.org/x/net/ipv6/zsys_dragonfly.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_dragonfly.go package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go b/vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go index 919e572d4a101..73f31b260ea18 100644 --- a/vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go +++ b/vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_freebsd.go package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go b/vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go index cb8141f9c65e3..490ce7cf1044c 100644 --- a/vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go +++ b/vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_freebsd.go package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go b/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go index cb8141f9c65e3..490ce7cf1044c 100644 --- a/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go +++ b/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_freebsd.go package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_386.go b/vendor/golang.org/x/net/ipv6/zsys_linux_386.go index 73aa8c6dfce03..14155dec21e82 100644 --- a/vendor/golang.org/x/net/ipv6/zsys_linux_386.go +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_386.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go index b64f0157d7a45..9566d7646857f 100644 --- a/vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_arm.go b/vendor/golang.org/x/net/ipv6/zsys_linux_arm.go index 73aa8c6dfce03..14155dec21e82 100644 --- a/vendor/golang.org/x/net/ipv6/zsys_linux_arm.go +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_arm.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go index b64f0157d7a45..9566d7646857f 100644 --- a/vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_mips.go b/vendor/golang.org/x/net/ipv6/zsys_linux_mips.go index 73aa8c6dfce03..14155dec21e82 100644 --- a/vendor/golang.org/x/net/ipv6/zsys_linux_mips.go +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_mips.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go index b64f0157d7a45..9566d7646857f 100644 --- a/vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go b/vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go index b64f0157d7a45..9566d7646857f 100644 --- a/vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_mipsle.go b/vendor/golang.org/x/net/ipv6/zsys_linux_mipsle.go index 73aa8c6dfce03..14155dec21e82 100644 --- a/vendor/golang.org/x/net/ipv6/zsys_linux_mipsle.go +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_mipsle.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go index c9bf6a87ef737..a51e142b42a5a 100644 --- a/vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go index b64f0157d7a45..9566d7646857f 100644 --- a/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go index b64f0157d7a45..9566d7646857f 100644 --- a/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_riscv64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_riscv64.go new file mode 100644 index 0000000000000..1ee237b2b76ab --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_riscv64.go @@ -0,0 +1,173 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +// +build riscv64 + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go b/vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go index b64f0157d7a45..9566d7646857f 100644 --- a/vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_linux.go package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/zsys_netbsd.go b/vendor/golang.org/x/net/ipv6/zsys_netbsd.go index bcada13b7a7fc..e39571e072509 100644 --- a/vendor/golang.org/x/net/ipv6/zsys_netbsd.go +++ b/vendor/golang.org/x/net/ipv6/zsys_netbsd.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_netbsd.go package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/zsys_openbsd.go b/vendor/golang.org/x/net/ipv6/zsys_openbsd.go index 86cf3c63799e6..cc1899a630c2b 100644 --- a/vendor/golang.org/x/net/ipv6/zsys_openbsd.go +++ b/vendor/golang.org/x/net/ipv6/zsys_openbsd.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_openbsd.go package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/zsys_solaris.go b/vendor/golang.org/x/net/ipv6/zsys_solaris.go index cf1837dd2af63..690eef9341ac7 100644 --- a/vendor/golang.org/x/net/ipv6/zsys_solaris.go +++ b/vendor/golang.org/x/net/ipv6/zsys_solaris.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs defs_solaris.go package ipv6 diff --git a/vendor/golang.org/x/net/proxy/BUILD.bazel b/vendor/golang.org/x/net/proxy/BUILD.bazel index 2e136bb61ee08..21d838f8781d1 100644 --- a/vendor/golang.org/x/net/proxy/BUILD.bazel +++ b/vendor/golang.org/x/net/proxy/BUILD.bazel @@ -3,6 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = [ + "dial.go", "direct.go", "per_host.go", "proxy.go", diff --git a/vendor/golang.org/x/net/proxy/dial.go b/vendor/golang.org/x/net/proxy/dial.go new file mode 100644 index 0000000000000..811c2e4e962ee --- /dev/null +++ b/vendor/golang.org/x/net/proxy/dial.go @@ -0,0 +1,54 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "context" + "net" +) + +// A ContextDialer dials using a context. +type ContextDialer interface { + DialContext(ctx context.Context, network, address string) (net.Conn, error) +} + +// Dial works like DialContext on net.Dialer but using a dialer returned by FromEnvironment. +// +// The passed ctx is only used for returning the Conn, not the lifetime of the Conn. +// +// Custom dialers (registered via RegisterDialerType) that do not implement ContextDialer +// can leak a goroutine for as long as it takes the underlying Dialer implementation to timeout. +// +// A Conn returned from a successful Dial after the context has been cancelled will be immediately closed. +func Dial(ctx context.Context, network, address string) (net.Conn, error) { + d := FromEnvironment() + if xd, ok := d.(ContextDialer); ok { + return xd.DialContext(ctx, network, address) + } + return dialContext(ctx, d, network, address) +} + +// WARNING: this can leak a goroutine for as long as the underlying Dialer implementation takes to timeout +// A Conn returned from a successful Dial after the context has been cancelled will be immediately closed. +func dialContext(ctx context.Context, d Dialer, network, address string) (net.Conn, error) { + var ( + conn net.Conn + done = make(chan struct{}, 1) + err error + ) + go func() { + conn, err = d.Dial(network, address) + close(done) + if conn != nil && ctx.Err() != nil { + conn.Close() + } + }() + select { + case <-ctx.Done(): + err = ctx.Err() + case <-done: + } + return conn, err +} diff --git a/vendor/golang.org/x/net/proxy/direct.go b/vendor/golang.org/x/net/proxy/direct.go index 4c5ad88b1e759..3d66bdef9d749 100644 --- a/vendor/golang.org/x/net/proxy/direct.go +++ b/vendor/golang.org/x/net/proxy/direct.go @@ -5,14 +5,27 @@ package proxy import ( + "context" "net" ) type direct struct{} -// Direct is a direct proxy: one that makes network connections directly. +// Direct implements Dialer by making network connections directly using net.Dial or net.DialContext. var Direct = direct{} +var ( + _ Dialer = Direct + _ ContextDialer = Direct +) + +// Dial directly invokes net.Dial with the supplied parameters. func (direct) Dial(network, addr string) (net.Conn, error) { return net.Dial(network, addr) } + +// DialContext instantiates a net.Dialer and invokes its DialContext receiver with the supplied parameters. +func (direct) DialContext(ctx context.Context, network, addr string) (net.Conn, error) { + var d net.Dialer + return d.DialContext(ctx, network, addr) +} diff --git a/vendor/golang.org/x/net/proxy/per_host.go b/vendor/golang.org/x/net/proxy/per_host.go index 0689bb6a70f66..573fe79e86ec5 100644 --- a/vendor/golang.org/x/net/proxy/per_host.go +++ b/vendor/golang.org/x/net/proxy/per_host.go @@ -5,6 +5,7 @@ package proxy import ( + "context" "net" "strings" ) @@ -41,6 +42,20 @@ func (p *PerHost) Dial(network, addr string) (c net.Conn, err error) { return p.dialerForRequest(host).Dial(network, addr) } +// DialContext connects to the address addr on the given network through either +// defaultDialer or bypass. +func (p *PerHost) DialContext(ctx context.Context, network, addr string) (c net.Conn, err error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + d := p.dialerForRequest(host) + if x, ok := d.(ContextDialer); ok { + return x.DialContext(ctx, network, addr) + } + return dialContext(ctx, d, network, addr) +} + func (p *PerHost) dialerForRequest(host string) Dialer { if ip := net.ParseIP(host); ip != nil { for _, net := range p.bypassNetworks { diff --git a/vendor/golang.org/x/net/proxy/proxy.go b/vendor/golang.org/x/net/proxy/proxy.go index f6026b902c894..9ff4b9a7767ac 100644 --- a/vendor/golang.org/x/net/proxy/proxy.go +++ b/vendor/golang.org/x/net/proxy/proxy.go @@ -15,6 +15,7 @@ import ( ) // A Dialer is a means to establish a connection. +// Custom dialers should also implement ContextDialer. type Dialer interface { // Dial connects to the given address via the proxy. Dial(network, addr string) (c net.Conn, err error) @@ -25,21 +26,30 @@ type Auth struct { User, Password string } -// FromEnvironment returns the dialer specified by the proxy related variables in -// the environment. +// FromEnvironment returns the dialer specified by the proxy-related +// variables in the environment and makes underlying connections +// directly. func FromEnvironment() Dialer { + return FromEnvironmentUsing(Direct) +} + +// FromEnvironmentUsing returns the dialer specify by the proxy-related +// variables in the environment and makes underlying connections +// using the provided forwarding Dialer (for instance, a *net.Dialer +// with desired configuration). +func FromEnvironmentUsing(forward Dialer) Dialer { allProxy := allProxyEnv.Get() if len(allProxy) == 0 { - return Direct + return forward } proxyURL, err := url.Parse(allProxy) if err != nil { - return Direct + return forward } - proxy, err := FromURL(proxyURL, Direct) + proxy, err := FromURL(proxyURL, forward) if err != nil { - return Direct + return forward } noProxy := noProxyEnv.Get() @@ -47,7 +57,7 @@ func FromEnvironment() Dialer { return proxy } - perHost := NewPerHost(proxy, Direct) + perHost := NewPerHost(proxy, forward) perHost.AddFromString(noProxy) return perHost } diff --git a/vendor/golang.org/x/net/proxy/socks5.go b/vendor/golang.org/x/net/proxy/socks5.go index 56345ec8b6394..c91651f96db92 100644 --- a/vendor/golang.org/x/net/proxy/socks5.go +++ b/vendor/golang.org/x/net/proxy/socks5.go @@ -17,8 +17,14 @@ import ( func SOCKS5(network, address string, auth *Auth, forward Dialer) (Dialer, error) { d := socks.NewDialer(network, address) if forward != nil { - d.ProxyDial = func(_ context.Context, network string, address string) (net.Conn, error) { - return forward.Dial(network, address) + if f, ok := forward.(ContextDialer); ok { + d.ProxyDial = func(ctx context.Context, network string, address string) (net.Conn, error) { + return f.DialContext(ctx, network, address) + } + } else { + d.ProxyDial = func(ctx context.Context, network string, address string) (net.Conn, error) { + return dialContext(ctx, forward, network, address) + } } } if auth != nil { diff --git a/vendor/golang.org/x/net/trace/BUILD.bazel b/vendor/golang.org/x/net/trace/BUILD.bazel new file mode 100644 index 0000000000000..b6a666d832dc8 --- /dev/null +++ b/vendor/golang.org/x/net/trace/BUILD.bazel @@ -0,0 +1,14 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "events.go", + "histogram.go", + "trace.go", + ], + importmap = "k8s.io/kops/vendor/golang.org/x/net/trace", + importpath = "golang.org/x/net/trace", + visibility = ["//visibility:public"], + deps = ["//vendor/golang.org/x/net/internal/timeseries:go_default_library"], +) diff --git a/vendor/golang.org/x/net/trace/events.go b/vendor/golang.org/x/net/trace/events.go new file mode 100644 index 0000000000000..c646a6952e5e3 --- /dev/null +++ b/vendor/golang.org/x/net/trace/events.go @@ -0,0 +1,532 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "bytes" + "fmt" + "html/template" + "io" + "log" + "net/http" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "text/tabwriter" + "time" +) + +const maxEventsPerLog = 100 + +type bucket struct { + MaxErrAge time.Duration + String string +} + +var buckets = []bucket{ + {0, "total"}, + {10 * time.Second, "errs<10s"}, + {1 * time.Minute, "errs<1m"}, + {10 * time.Minute, "errs<10m"}, + {1 * time.Hour, "errs<1h"}, + {10 * time.Hour, "errs<10h"}, + {24000 * time.Hour, "errors"}, +} + +// RenderEvents renders the HTML page typically served at /debug/events. +// It does not do any auth checking. The request may be nil. +// +// Most users will use the Events handler. +func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) { + now := time.Now() + data := &struct { + Families []string // family names + Buckets []bucket + Counts [][]int // eventLog count per family/bucket + + // Set when a bucket has been selected. + Family string + Bucket int + EventLogs eventLogs + Expanded bool + }{ + Buckets: buckets, + } + + data.Families = make([]string, 0, len(families)) + famMu.RLock() + for name := range families { + data.Families = append(data.Families, name) + } + famMu.RUnlock() + sort.Strings(data.Families) + + // Count the number of eventLogs in each family for each error age. + data.Counts = make([][]int, len(data.Families)) + for i, name := range data.Families { + // TODO(sameer): move this loop under the family lock. + f := getEventFamily(name) + data.Counts[i] = make([]int, len(data.Buckets)) + for j, b := range data.Buckets { + data.Counts[i][j] = f.Count(now, b.MaxErrAge) + } + } + + if req != nil { + var ok bool + data.Family, data.Bucket, ok = parseEventsArgs(req) + if !ok { + // No-op + } else { + data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge) + } + if data.EventLogs != nil { + defer data.EventLogs.Free() + sort.Sort(data.EventLogs) + } + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + } + + famMu.RLock() + defer famMu.RUnlock() + if err := eventsTmpl().Execute(w, data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) { + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < 0 || b >= len(buckets) { + return "", 0, false + } + return fam, b, true +} + +// An EventLog provides a log of events associated with a specific object. +type EventLog interface { + // Printf formats its arguments with fmt.Sprintf and adds the + // result to the event log. + Printf(format string, a ...interface{}) + + // Errorf is like Printf, but it marks this event as an error. + Errorf(format string, a ...interface{}) + + // Finish declares that this event log is complete. + // The event log should not be used after calling this method. + Finish() +} + +// NewEventLog returns a new EventLog with the specified family name +// and title. +func NewEventLog(family, title string) EventLog { + el := newEventLog() + el.ref() + el.Family, el.Title = family, title + el.Start = time.Now() + el.events = make([]logEntry, 0, maxEventsPerLog) + el.stack = make([]uintptr, 32) + n := runtime.Callers(2, el.stack) + el.stack = el.stack[:n] + + getEventFamily(family).add(el) + return el +} + +func (el *eventLog) Finish() { + getEventFamily(el.Family).remove(el) + el.unref() // matches ref in New +} + +var ( + famMu sync.RWMutex + families = make(map[string]*eventFamily) // family name => family +) + +func getEventFamily(fam string) *eventFamily { + famMu.Lock() + defer famMu.Unlock() + f := families[fam] + if f == nil { + f = &eventFamily{} + families[fam] = f + } + return f +} + +type eventFamily struct { + mu sync.RWMutex + eventLogs eventLogs +} + +func (f *eventFamily) add(el *eventLog) { + f.mu.Lock() + f.eventLogs = append(f.eventLogs, el) + f.mu.Unlock() +} + +func (f *eventFamily) remove(el *eventLog) { + f.mu.Lock() + defer f.mu.Unlock() + for i, el0 := range f.eventLogs { + if el == el0 { + copy(f.eventLogs[i:], f.eventLogs[i+1:]) + f.eventLogs = f.eventLogs[:len(f.eventLogs)-1] + return + } + } +} + +func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) { + f.mu.RLock() + defer f.mu.RUnlock() + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + n++ + } + } + return +} + +func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) { + f.mu.RLock() + defer f.mu.RUnlock() + els = make(eventLogs, 0, len(f.eventLogs)) + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + el.ref() + els = append(els, el) + } + } + return +} + +type eventLogs []*eventLog + +// Free calls unref on each element of the list. +func (els eventLogs) Free() { + for _, el := range els { + el.unref() + } +} + +// eventLogs may be sorted in reverse chronological order. +func (els eventLogs) Len() int { return len(els) } +func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) } +func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] } + +// A logEntry is a timestamped log entry in an event log. +type logEntry struct { + When time.Time + Elapsed time.Duration // since previous event in log + NewDay bool // whether this event is on a different day to the previous event + What string + IsErr bool +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e logEntry) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// An eventLog represents an active event log. +type eventLog struct { + // Family is the top-level grouping of event logs to which this belongs. + Family string + + // Title is the title of this event log. + Title string + + // Timing information. + Start time.Time + + // Call stack where this event log was created. + stack []uintptr + + // Append-only sequence of events. + // + // TODO(sameer): change this to a ring buffer to avoid the array copy + // when we hit maxEventsPerLog. + mu sync.RWMutex + events []logEntry + LastErrorTime time.Time + discarded int + + refs int32 // how many buckets this is in +} + +func (el *eventLog) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + el.Family = "" + el.Title = "" + el.Start = time.Time{} + el.stack = nil + el.events = nil + el.LastErrorTime = time.Time{} + el.discarded = 0 + el.refs = 0 +} + +func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool { + if maxErrAge == 0 { + return true + } + el.mu.RLock() + defer el.mu.RUnlock() + return now.Sub(el.LastErrorTime) < maxErrAge +} + +// delta returns the elapsed time since the last event or the log start, +// and whether it spans midnight. +// L >= el.mu +func (el *eventLog) delta(t time.Time) (time.Duration, bool) { + if len(el.events) == 0 { + return t.Sub(el.Start), false + } + prev := el.events[len(el.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() + +} + +func (el *eventLog) Printf(format string, a ...interface{}) { + el.printf(false, format, a...) +} + +func (el *eventLog) Errorf(format string, a ...interface{}) { + el.printf(true, format, a...) +} + +func (el *eventLog) printf(isErr bool, format string, a ...interface{}) { + e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)} + el.mu.Lock() + e.Elapsed, e.NewDay = el.delta(e.When) + if len(el.events) < maxEventsPerLog { + el.events = append(el.events, e) + } else { + // Discard the oldest event. + if el.discarded == 0 { + // el.discarded starts at two to count for the event it + // is replacing, plus the next one that we are about to + // drop. + el.discarded = 2 + } else { + el.discarded++ + } + // TODO(sameer): if this causes allocations on a critical path, + // change eventLog.What to be a fmt.Stringer, as in trace.go. + el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded) + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + el.events[0].When = el.events[1].When + copy(el.events[1:], el.events[2:]) + el.events[maxEventsPerLog-1] = e + } + if e.IsErr { + el.LastErrorTime = e.When + } + el.mu.Unlock() +} + +func (el *eventLog) ref() { + atomic.AddInt32(&el.refs, 1) +} + +func (el *eventLog) unref() { + if atomic.AddInt32(&el.refs, -1) == 0 { + freeEventLog(el) + } +} + +func (el *eventLog) When() string { + return el.Start.Format("2006/01/02 15:04:05.000000") +} + +func (el *eventLog) ElapsedTime() string { + elapsed := time.Since(el.Start) + return fmt.Sprintf("%.6f", elapsed.Seconds()) +} + +func (el *eventLog) Stack() string { + buf := new(bytes.Buffer) + tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0) + printStackRecord(tw, el.stack) + tw.Flush() + return buf.String() +} + +// printStackRecord prints the function + source line information +// for a single stack trace. +// Adapted from runtime/pprof/pprof.go. +func printStackRecord(w io.Writer, stk []uintptr) { + for _, pc := range stk { + f := runtime.FuncForPC(pc) + if f == nil { + continue + } + file, line := f.FileLine(pc) + name := f.Name() + // Hide runtime.goexit and any runtime functions at the beginning. + if strings.HasPrefix(name, "runtime.") { + continue + } + fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line) + } +} + +func (el *eventLog) Events() []logEntry { + el.mu.RLock() + defer el.mu.RUnlock() + return el.events +} + +// freeEventLogs is a freelist of *eventLog +var freeEventLogs = make(chan *eventLog, 1000) + +// newEventLog returns a event log ready to use. +func newEventLog() *eventLog { + select { + case el := <-freeEventLogs: + return el + default: + return new(eventLog) + } +} + +// freeEventLog adds el to freeEventLogs if there's room. +// This is non-blocking. +func freeEventLog(el *eventLog) { + el.reset() + select { + case freeEventLogs <- el: + default: + } +} + +var eventsTmplCache *template.Template +var eventsTmplOnce sync.Once + +func eventsTmpl() *template.Template { + eventsTmplOnce.Do(func() { + eventsTmplCache = template.Must(template.New("events").Funcs(template.FuncMap{ + "elapsed": elapsed, + "trimSpace": strings.TrimSpace, + }).Parse(eventsHTML)) + }) + return eventsTmplCache +} + +const eventsHTML = ` + + + events + + + + +

/debug/events

+ + + {{range $i, $fam := .Families}} + + + + {{range $j, $bucket := $.Buckets}} + {{$n := index $.Counts $i $j}} + + {{end}} + + {{end}} +
{{$fam}} + {{if $n}}{{end}} + [{{$n}} {{$bucket.String}}] + {{if $n}}{{end}} +
+ +{{if $.EventLogs}} +
+

Family: {{$.Family}}

+ +{{if $.Expanded}}{{end}} +[Summary]{{if $.Expanded}}{{end}} + +{{if not $.Expanded}}{{end}} +[Expanded]{{if not $.Expanded}}{{end}} + + + + {{range $el := $.EventLogs}} + + + + + {{if $.Expanded}} + + + + + + {{range $el.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
WhenElapsed
{{$el.When}}{{$el.ElapsedTime}}{{$el.Title}} +
{{$el.Stack|trimSpace}}
{{.WhenString}}{{elapsed .Elapsed}}.{{if .IsErr}}E{{else}}.{{end}}. {{.What}}
+{{end}} + + +` diff --git a/vendor/golang.org/x/net/trace/histogram.go b/vendor/golang.org/x/net/trace/histogram.go new file mode 100644 index 0000000000000..9bf4286c794b8 --- /dev/null +++ b/vendor/golang.org/x/net/trace/histogram.go @@ -0,0 +1,365 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +// This file implements histogramming for RPC statistics collection. + +import ( + "bytes" + "fmt" + "html/template" + "log" + "math" + "sync" + + "golang.org/x/net/internal/timeseries" +) + +const ( + bucketCount = 38 +) + +// histogram keeps counts of values in buckets that are spaced +// out in powers of 2: 0-1, 2-3, 4-7... +// histogram implements timeseries.Observable +type histogram struct { + sum int64 // running total of measurements + sumOfSquares float64 // square of running total + buckets []int64 // bucketed values for histogram + value int // holds a single value as an optimization + valueCount int64 // number of values recorded for single value +} + +// AddMeasurement records a value measurement observation to the histogram. +func (h *histogram) addMeasurement(value int64) { + // TODO: assert invariant + h.sum += value + h.sumOfSquares += float64(value) * float64(value) + + bucketIndex := getBucket(value) + + if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) { + h.value = bucketIndex + h.valueCount++ + } else { + h.allocateBuckets() + h.buckets[bucketIndex]++ + } +} + +func (h *histogram) allocateBuckets() { + if h.buckets == nil { + h.buckets = make([]int64, bucketCount) + h.buckets[h.value] = h.valueCount + h.value = 0 + h.valueCount = -1 + } +} + +func log2(i int64) int { + n := 0 + for ; i >= 0x100; i >>= 8 { + n += 8 + } + for ; i > 0; i >>= 1 { + n += 1 + } + return n +} + +func getBucket(i int64) (index int) { + index = log2(i) - 1 + if index < 0 { + index = 0 + } + if index >= bucketCount { + index = bucketCount - 1 + } + return +} + +// Total returns the number of recorded observations. +func (h *histogram) total() (total int64) { + if h.valueCount >= 0 { + total = h.valueCount + } + for _, val := range h.buckets { + total += int64(val) + } + return +} + +// Average returns the average value of recorded observations. +func (h *histogram) average() float64 { + t := h.total() + if t == 0 { + return 0 + } + return float64(h.sum) / float64(t) +} + +// Variance returns the variance of recorded observations. +func (h *histogram) variance() float64 { + t := float64(h.total()) + if t == 0 { + return 0 + } + s := float64(h.sum) / t + return h.sumOfSquares/t - s*s +} + +// StandardDeviation returns the standard deviation of recorded observations. +func (h *histogram) standardDeviation() float64 { + return math.Sqrt(h.variance()) +} + +// PercentileBoundary estimates the value that the given fraction of recorded +// observations are less than. +func (h *histogram) percentileBoundary(percentile float64) int64 { + total := h.total() + + // Corner cases (make sure result is strictly less than Total()) + if total == 0 { + return 0 + } else if total == 1 { + return int64(h.average()) + } + + percentOfTotal := round(float64(total) * percentile) + var runningTotal int64 + + for i := range h.buckets { + value := h.buckets[i] + runningTotal += value + if runningTotal == percentOfTotal { + // We hit an exact bucket boundary. If the next bucket has data, it is a + // good estimate of the value. If the bucket is empty, we interpolate the + // midpoint between the next bucket's boundary and the next non-zero + // bucket. If the remaining buckets are all empty, then we use the + // boundary for the next bucket as the estimate. + j := uint8(i + 1) + min := bucketBoundary(j) + if runningTotal < total { + for h.buckets[j] == 0 { + j++ + } + } + max := bucketBoundary(j) + return min + round(float64(max-min)/2) + } else if runningTotal > percentOfTotal { + // The value is in this bucket. Interpolate the value. + delta := runningTotal - percentOfTotal + percentBucket := float64(value-delta) / float64(value) + bucketMin := bucketBoundary(uint8(i)) + nextBucketMin := bucketBoundary(uint8(i + 1)) + bucketSize := nextBucketMin - bucketMin + return bucketMin + round(percentBucket*float64(bucketSize)) + } + } + return bucketBoundary(bucketCount - 1) +} + +// Median returns the estimated median of the observed values. +func (h *histogram) median() int64 { + return h.percentileBoundary(0.5) +} + +// Add adds other to h. +func (h *histogram) Add(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == 0 { + // Other histogram is empty + } else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value { + // Both have a single bucketed value, aggregate them + h.valueCount += o.valueCount + } else { + // Two different values necessitate buckets in this histogram + h.allocateBuckets() + if o.valueCount >= 0 { + h.buckets[o.value] += o.valueCount + } else { + for i := range h.buckets { + h.buckets[i] += o.buckets[i] + } + } + } + h.sumOfSquares += o.sumOfSquares + h.sum += o.sum +} + +// Clear resets the histogram to an empty state, removing all observed values. +func (h *histogram) Clear() { + h.buckets = nil + h.value = 0 + h.valueCount = 0 + h.sum = 0 + h.sumOfSquares = 0 +} + +// CopyFrom copies from other, which must be a *histogram, into h. +func (h *histogram) CopyFrom(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == -1 { + h.allocateBuckets() + copy(h.buckets, o.buckets) + } + h.sum = o.sum + h.sumOfSquares = o.sumOfSquares + h.value = o.value + h.valueCount = o.valueCount +} + +// Multiply scales the histogram by the specified ratio. +func (h *histogram) Multiply(ratio float64) { + if h.valueCount == -1 { + for i := range h.buckets { + h.buckets[i] = int64(float64(h.buckets[i]) * ratio) + } + } else { + h.valueCount = int64(float64(h.valueCount) * ratio) + } + h.sum = int64(float64(h.sum) * ratio) + h.sumOfSquares = h.sumOfSquares * ratio +} + +// New creates a new histogram. +func (h *histogram) New() timeseries.Observable { + r := new(histogram) + r.Clear() + return r +} + +func (h *histogram) String() string { + return fmt.Sprintf("%d, %f, %d, %d, %v", + h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets) +} + +// round returns the closest int64 to the argument +func round(in float64) int64 { + return int64(math.Floor(in + 0.5)) +} + +// bucketBoundary returns the first value in the bucket. +func bucketBoundary(bucket uint8) int64 { + if bucket == 0 { + return 0 + } + return 1 << bucket +} + +// bucketData holds data about a specific bucket for use in distTmpl. +type bucketData struct { + Lower, Upper int64 + N int64 + Pct, CumulativePct float64 + GraphWidth int +} + +// data holds data about a Distribution for use in distTmpl. +type data struct { + Buckets []*bucketData + Count, Median int64 + Mean, StandardDeviation float64 +} + +// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets. +const maxHTMLBarWidth = 350.0 + +// newData returns data representing h for use in distTmpl. +func (h *histogram) newData() *data { + // Force the allocation of buckets to simplify the rendering implementation + h.allocateBuckets() + // We scale the bars on the right so that the largest bar is + // maxHTMLBarWidth pixels in width. + maxBucket := int64(0) + for _, n := range h.buckets { + if n > maxBucket { + maxBucket = n + } + } + total := h.total() + barsizeMult := maxHTMLBarWidth / float64(maxBucket) + var pctMult float64 + if total == 0 { + pctMult = 1.0 + } else { + pctMult = 100.0 / float64(total) + } + + buckets := make([]*bucketData, len(h.buckets)) + runningTotal := int64(0) + for i, n := range h.buckets { + if n == 0 { + continue + } + runningTotal += n + var upperBound int64 + if i < bucketCount-1 { + upperBound = bucketBoundary(uint8(i + 1)) + } else { + upperBound = math.MaxInt64 + } + buckets[i] = &bucketData{ + Lower: bucketBoundary(uint8(i)), + Upper: upperBound, + N: n, + Pct: float64(n) * pctMult, + CumulativePct: float64(runningTotal) * pctMult, + GraphWidth: int(float64(n) * barsizeMult), + } + } + return &data{ + Buckets: buckets, + Count: total, + Median: h.median(), + Mean: h.average(), + StandardDeviation: h.standardDeviation(), + } +} + +func (h *histogram) html() template.HTML { + buf := new(bytes.Buffer) + if err := distTmpl().Execute(buf, h.newData()); err != nil { + buf.Reset() + log.Printf("net/trace: couldn't execute template: %v", err) + } + return template.HTML(buf.String()) +} + +var distTmplCache *template.Template +var distTmplOnce sync.Once + +func distTmpl() *template.Template { + distTmplOnce.Do(func() { + // Input: data + distTmplCache = template.Must(template.New("distTmpl").Parse(` + + + + + + + +
Count: {{.Count}}Mean: {{printf "%.0f" .Mean}}StdDev: {{printf "%.0f" .StandardDeviation}}Median: {{.Median}}
+
+ +{{range $b := .Buckets}} +{{if $b}} + + + + + + + + + +{{end}} +{{end}} +
[{{.Lower}},{{.Upper}}){{.N}}{{printf "%#.3f" .Pct}}%{{printf "%#.3f" .CumulativePct}}%
+`)) + }) + return distTmplCache +} diff --git a/vendor/golang.org/x/net/trace/trace.go b/vendor/golang.org/x/net/trace/trace.go new file mode 100644 index 0000000000000..3ebf6f2daa304 --- /dev/null +++ b/vendor/golang.org/x/net/trace/trace.go @@ -0,0 +1,1130 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package trace implements tracing of requests and long-lived objects. +It exports HTTP interfaces on /debug/requests and /debug/events. + +A trace.Trace provides tracing for short-lived objects, usually requests. +A request handler might be implemented like this: + + func fooHandler(w http.ResponseWriter, req *http.Request) { + tr := trace.New("mypkg.Foo", req.URL.Path) + defer tr.Finish() + ... + tr.LazyPrintf("some event %q happened", str) + ... + if err := somethingImportant(); err != nil { + tr.LazyPrintf("somethingImportant failed: %v", err) + tr.SetError() + } + } + +The /debug/requests HTTP endpoint organizes the traces by family, +errors, and duration. It also provides histogram of request duration +for each family. + +A trace.EventLog provides tracing for long-lived objects, such as RPC +connections. + + // A Fetcher fetches URL paths for a single domain. + type Fetcher struct { + domain string + events trace.EventLog + } + + func NewFetcher(domain string) *Fetcher { + return &Fetcher{ + domain, + trace.NewEventLog("mypkg.Fetcher", domain), + } + } + + func (f *Fetcher) Fetch(path string) (string, error) { + resp, err := http.Get("http://" + f.domain + "/" + path) + if err != nil { + f.events.Errorf("Get(%q) = %v", path, err) + return "", err + } + f.events.Printf("Get(%q) = %s", path, resp.Status) + ... + } + + func (f *Fetcher) Close() error { + f.events.Finish() + return nil + } + +The /debug/events HTTP endpoint organizes the event logs by family and +by time since the last error. The expanded view displays recent log +entries and the log's call stack. +*/ +package trace // import "golang.org/x/net/trace" + +import ( + "bytes" + "context" + "fmt" + "html/template" + "io" + "log" + "net" + "net/http" + "net/url" + "runtime" + "sort" + "strconv" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/internal/timeseries" +) + +// DebugUseAfterFinish controls whether to debug uses of Trace values after finishing. +// FOR DEBUGGING ONLY. This will slow down the program. +var DebugUseAfterFinish = false + +// HTTP ServeMux paths. +const ( + debugRequestsPath = "/debug/requests" + debugEventsPath = "/debug/events" +) + +// AuthRequest determines whether a specific request is permitted to load the +// /debug/requests or /debug/events pages. +// +// It returns two bools; the first indicates whether the page may be viewed at all, +// and the second indicates whether sensitive events will be shown. +// +// AuthRequest may be replaced by a program to customize its authorization requirements. +// +// The default AuthRequest function returns (true, true) if and only if the request +// comes from localhost/127.0.0.1/[::1]. +var AuthRequest = func(req *http.Request) (any, sensitive bool) { + // RemoteAddr is commonly in the form "IP" or "IP:port". + // If it is in the form "IP:port", split off the port. + host, _, err := net.SplitHostPort(req.RemoteAddr) + if err != nil { + host = req.RemoteAddr + } + switch host { + case "localhost", "127.0.0.1", "::1": + return true, true + default: + return false, false + } +} + +func init() { + _, pat := http.DefaultServeMux.Handler(&http.Request{URL: &url.URL{Path: debugRequestsPath}}) + if pat == debugRequestsPath { + panic("/debug/requests is already registered. You may have two independent copies of " + + "golang.org/x/net/trace in your binary, trying to maintain separate state. This may " + + "involve a vendored copy of golang.org/x/net/trace.") + } + + // TODO(jbd): Serve Traces from /debug/traces in the future? + // There is no requirement for a request to be present to have traces. + http.HandleFunc(debugRequestsPath, Traces) + http.HandleFunc(debugEventsPath, Events) +} + +// NewContext returns a copy of the parent context +// and associates it with a Trace. +func NewContext(ctx context.Context, tr Trace) context.Context { + return context.WithValue(ctx, contextKey, tr) +} + +// FromContext returns the Trace bound to the context, if any. +func FromContext(ctx context.Context) (tr Trace, ok bool) { + tr, ok = ctx.Value(contextKey).(Trace) + return +} + +// Traces responds with traces from the program. +// The package initialization registers it in http.DefaultServeMux +// at /debug/requests. +// +// It performs authorization by running AuthRequest. +func Traces(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + Render(w, req, sensitive) +} + +// Events responds with a page of events collected by EventLogs. +// The package initialization registers it in http.DefaultServeMux +// at /debug/events. +// +// It performs authorization by running AuthRequest. +func Events(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + RenderEvents(w, req, sensitive) +} + +// Render renders the HTML page typically served at /debug/requests. +// It does not do any auth checking. The request may be nil. +// +// Most users will use the Traces handler. +func Render(w io.Writer, req *http.Request, sensitive bool) { + data := &struct { + Families []string + ActiveTraceCount map[string]int + CompletedTraces map[string]*family + + // Set when a bucket has been selected. + Traces traceList + Family string + Bucket int + Expanded bool + Traced bool + Active bool + ShowSensitive bool // whether to show sensitive events + + Histogram template.HTML + HistogramWindow string // e.g. "last minute", "last hour", "all time" + + // If non-zero, the set of traces is a partial set, + // and this is the total number. + Total int + }{ + CompletedTraces: completedTraces, + } + + data.ShowSensitive = sensitive + if req != nil { + // Allow show_sensitive=0 to force hiding of sensitive data for testing. + // This only goes one way; you can't use show_sensitive=1 to see things. + if req.FormValue("show_sensitive") == "0" { + data.ShowSensitive = false + } + + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + if exp, err := strconv.ParseBool(req.FormValue("rtraced")); err == nil { + data.Traced = exp + } + } + + completedMu.RLock() + data.Families = make([]string, 0, len(completedTraces)) + for fam := range completedTraces { + data.Families = append(data.Families, fam) + } + completedMu.RUnlock() + sort.Strings(data.Families) + + // We are careful here to minimize the time spent locking activeMu, + // since that lock is required every time an RPC starts and finishes. + data.ActiveTraceCount = make(map[string]int, len(data.Families)) + activeMu.RLock() + for fam, s := range activeTraces { + data.ActiveTraceCount[fam] = s.Len() + } + activeMu.RUnlock() + + var ok bool + data.Family, data.Bucket, ok = parseArgs(req) + switch { + case !ok: + // No-op + case data.Bucket == -1: + data.Active = true + n := data.ActiveTraceCount[data.Family] + data.Traces = getActiveTraces(data.Family) + if len(data.Traces) < n { + data.Total = n + } + case data.Bucket < bucketsPerFamily: + if b := lookupBucket(data.Family, data.Bucket); b != nil { + data.Traces = b.Copy(data.Traced) + } + default: + if f := getFamily(data.Family, false); f != nil { + var obs timeseries.Observable + f.LatencyMu.RLock() + switch o := data.Bucket - bucketsPerFamily; o { + case 0: + obs = f.Latency.Minute() + data.HistogramWindow = "last minute" + case 1: + obs = f.Latency.Hour() + data.HistogramWindow = "last hour" + case 2: + obs = f.Latency.Total() + data.HistogramWindow = "all time" + } + f.LatencyMu.RUnlock() + if obs != nil { + data.Histogram = obs.(*histogram).html() + } + } + } + + if data.Traces != nil { + defer data.Traces.Free() + sort.Sort(data.Traces) + } + + completedMu.RLock() + defer completedMu.RUnlock() + if err := pageTmpl().ExecuteTemplate(w, "Page", data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseArgs(req *http.Request) (fam string, b int, ok bool) { + if req == nil { + return "", 0, false + } + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < -1 { + return "", 0, false + } + + return fam, b, true +} + +func lookupBucket(fam string, b int) *traceBucket { + f := getFamily(fam, false) + if f == nil || b < 0 || b >= len(f.Buckets) { + return nil + } + return f.Buckets[b] +} + +type contextKeyT string + +var contextKey = contextKeyT("golang.org/x/net/trace.Trace") + +// Trace represents an active request. +type Trace interface { + // LazyLog adds x to the event log. It will be evaluated each time the + // /debug/requests page is rendered. Any memory referenced by x will be + // pinned until the trace is finished and later discarded. + LazyLog(x fmt.Stringer, sensitive bool) + + // LazyPrintf evaluates its arguments with fmt.Sprintf each time the + // /debug/requests page is rendered. Any memory referenced by a will be + // pinned until the trace is finished and later discarded. + LazyPrintf(format string, a ...interface{}) + + // SetError declares that this trace resulted in an error. + SetError() + + // SetRecycler sets a recycler for the trace. + // f will be called for each event passed to LazyLog at a time when + // it is no longer required, whether while the trace is still active + // and the event is discarded, or when a completed trace is discarded. + SetRecycler(f func(interface{})) + + // SetTraceInfo sets the trace info for the trace. + // This is currently unused. + SetTraceInfo(traceID, spanID uint64) + + // SetMaxEvents sets the maximum number of events that will be stored + // in the trace. This has no effect if any events have already been + // added to the trace. + SetMaxEvents(m int) + + // Finish declares that this trace is complete. + // The trace should not be used after calling this method. + Finish() +} + +type lazySprintf struct { + format string + a []interface{} +} + +func (l *lazySprintf) String() string { + return fmt.Sprintf(l.format, l.a...) +} + +// New returns a new Trace with the specified family and title. +func New(family, title string) Trace { + tr := newTrace() + tr.ref() + tr.Family, tr.Title = family, title + tr.Start = time.Now() + tr.maxEvents = maxEventsPerTrace + tr.events = tr.eventsBuf[:0] + + activeMu.RLock() + s := activeTraces[tr.Family] + activeMu.RUnlock() + if s == nil { + activeMu.Lock() + s = activeTraces[tr.Family] // check again + if s == nil { + s = new(traceSet) + activeTraces[tr.Family] = s + } + activeMu.Unlock() + } + s.Add(tr) + + // Trigger allocation of the completed trace structure for this family. + // This will cause the family to be present in the request page during + // the first trace of this family. We don't care about the return value, + // nor is there any need for this to run inline, so we execute it in its + // own goroutine, but only if the family isn't allocated yet. + completedMu.RLock() + if _, ok := completedTraces[tr.Family]; !ok { + go allocFamily(tr.Family) + } + completedMu.RUnlock() + + return tr +} + +func (tr *trace) Finish() { + elapsed := time.Now().Sub(tr.Start) + tr.mu.Lock() + tr.Elapsed = elapsed + tr.mu.Unlock() + + if DebugUseAfterFinish { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + tr.finishStack = buf[:n] + } + + activeMu.RLock() + m := activeTraces[tr.Family] + activeMu.RUnlock() + m.Remove(tr) + + f := getFamily(tr.Family, true) + tr.mu.RLock() // protects tr fields in Cond.match calls + for _, b := range f.Buckets { + if b.Cond.match(tr) { + b.Add(tr) + } + } + tr.mu.RUnlock() + + // Add a sample of elapsed time as microseconds to the family's timeseries + h := new(histogram) + h.addMeasurement(elapsed.Nanoseconds() / 1e3) + f.LatencyMu.Lock() + f.Latency.Add(h) + f.LatencyMu.Unlock() + + tr.unref() // matches ref in New +} + +const ( + bucketsPerFamily = 9 + tracesPerBucket = 10 + maxActiveTraces = 20 // Maximum number of active traces to show. + maxEventsPerTrace = 10 + numHistogramBuckets = 38 +) + +var ( + // The active traces. + activeMu sync.RWMutex + activeTraces = make(map[string]*traceSet) // family -> traces + + // Families of completed traces. + completedMu sync.RWMutex + completedTraces = make(map[string]*family) // family -> traces +) + +type traceSet struct { + mu sync.RWMutex + m map[*trace]bool + + // We could avoid the entire map scan in FirstN by having a slice of all the traces + // ordered by start time, and an index into that from the trace struct, with a periodic + // repack of the slice after enough traces finish; we could also use a skip list or similar. + // However, that would shift some of the expense from /debug/requests time to RPC time, + // which is probably the wrong trade-off. +} + +func (ts *traceSet) Len() int { + ts.mu.RLock() + defer ts.mu.RUnlock() + return len(ts.m) +} + +func (ts *traceSet) Add(tr *trace) { + ts.mu.Lock() + if ts.m == nil { + ts.m = make(map[*trace]bool) + } + ts.m[tr] = true + ts.mu.Unlock() +} + +func (ts *traceSet) Remove(tr *trace) { + ts.mu.Lock() + delete(ts.m, tr) + ts.mu.Unlock() +} + +// FirstN returns the first n traces ordered by time. +func (ts *traceSet) FirstN(n int) traceList { + ts.mu.RLock() + defer ts.mu.RUnlock() + + if n > len(ts.m) { + n = len(ts.m) + } + trl := make(traceList, 0, n) + + // Fast path for when no selectivity is needed. + if n == len(ts.m) { + for tr := range ts.m { + tr.ref() + trl = append(trl, tr) + } + sort.Sort(trl) + return trl + } + + // Pick the oldest n traces. + // This is inefficient. See the comment in the traceSet struct. + for tr := range ts.m { + // Put the first n traces into trl in the order they occur. + // When we have n, sort trl, and thereafter maintain its order. + if len(trl) < n { + tr.ref() + trl = append(trl, tr) + if len(trl) == n { + // This is guaranteed to happen exactly once during this loop. + sort.Sort(trl) + } + continue + } + if tr.Start.After(trl[n-1].Start) { + continue + } + + // Find where to insert this one. + tr.ref() + i := sort.Search(n, func(i int) bool { return trl[i].Start.After(tr.Start) }) + trl[n-1].unref() + copy(trl[i+1:], trl[i:]) + trl[i] = tr + } + + return trl +} + +func getActiveTraces(fam string) traceList { + activeMu.RLock() + s := activeTraces[fam] + activeMu.RUnlock() + if s == nil { + return nil + } + return s.FirstN(maxActiveTraces) +} + +func getFamily(fam string, allocNew bool) *family { + completedMu.RLock() + f := completedTraces[fam] + completedMu.RUnlock() + if f == nil && allocNew { + f = allocFamily(fam) + } + return f +} + +func allocFamily(fam string) *family { + completedMu.Lock() + defer completedMu.Unlock() + f := completedTraces[fam] + if f == nil { + f = newFamily() + completedTraces[fam] = f + } + return f +} + +// family represents a set of trace buckets and associated latency information. +type family struct { + // traces may occur in multiple buckets. + Buckets [bucketsPerFamily]*traceBucket + + // latency time series + LatencyMu sync.RWMutex + Latency *timeseries.MinuteHourSeries +} + +func newFamily() *family { + return &family{ + Buckets: [bucketsPerFamily]*traceBucket{ + {Cond: minCond(0)}, + {Cond: minCond(50 * time.Millisecond)}, + {Cond: minCond(100 * time.Millisecond)}, + {Cond: minCond(200 * time.Millisecond)}, + {Cond: minCond(500 * time.Millisecond)}, + {Cond: minCond(1 * time.Second)}, + {Cond: minCond(10 * time.Second)}, + {Cond: minCond(100 * time.Second)}, + {Cond: errorCond{}}, + }, + Latency: timeseries.NewMinuteHourSeries(func() timeseries.Observable { return new(histogram) }), + } +} + +// traceBucket represents a size-capped bucket of historic traces, +// along with a condition for a trace to belong to the bucket. +type traceBucket struct { + Cond cond + + // Ring buffer implementation of a fixed-size FIFO queue. + mu sync.RWMutex + buf [tracesPerBucket]*trace + start int // < tracesPerBucket + length int // <= tracesPerBucket +} + +func (b *traceBucket) Add(tr *trace) { + b.mu.Lock() + defer b.mu.Unlock() + + i := b.start + b.length + if i >= tracesPerBucket { + i -= tracesPerBucket + } + if b.length == tracesPerBucket { + // "Remove" an element from the bucket. + b.buf[i].unref() + b.start++ + if b.start == tracesPerBucket { + b.start = 0 + } + } + b.buf[i] = tr + if b.length < tracesPerBucket { + b.length++ + } + tr.ref() +} + +// Copy returns a copy of the traces in the bucket. +// If tracedOnly is true, only the traces with trace information will be returned. +// The logs will be ref'd before returning; the caller should call +// the Free method when it is done with them. +// TODO(dsymonds): keep track of traced requests in separate buckets. +func (b *traceBucket) Copy(tracedOnly bool) traceList { + b.mu.RLock() + defer b.mu.RUnlock() + + trl := make(traceList, 0, b.length) + for i, x := 0, b.start; i < b.length; i++ { + tr := b.buf[x] + if !tracedOnly || tr.spanID != 0 { + tr.ref() + trl = append(trl, tr) + } + x++ + if x == b.length { + x = 0 + } + } + return trl +} + +func (b *traceBucket) Empty() bool { + b.mu.RLock() + defer b.mu.RUnlock() + return b.length == 0 +} + +// cond represents a condition on a trace. +type cond interface { + match(t *trace) bool + String() string +} + +type minCond time.Duration + +func (m minCond) match(t *trace) bool { return t.Elapsed >= time.Duration(m) } +func (m minCond) String() string { return fmt.Sprintf("≥%gs", time.Duration(m).Seconds()) } + +type errorCond struct{} + +func (e errorCond) match(t *trace) bool { return t.IsError } +func (e errorCond) String() string { return "errors" } + +type traceList []*trace + +// Free calls unref on each element of the list. +func (trl traceList) Free() { + for _, t := range trl { + t.unref() + } +} + +// traceList may be sorted in reverse chronological order. +func (trl traceList) Len() int { return len(trl) } +func (trl traceList) Less(i, j int) bool { return trl[i].Start.After(trl[j].Start) } +func (trl traceList) Swap(i, j int) { trl[i], trl[j] = trl[j], trl[i] } + +// An event is a timestamped log entry in a trace. +type event struct { + When time.Time + Elapsed time.Duration // since previous event in trace + NewDay bool // whether this event is on a different day to the previous event + Recyclable bool // whether this event was passed via LazyLog + Sensitive bool // whether this event contains sensitive information + What interface{} // string or fmt.Stringer +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e event) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// discarded represents a number of discarded events. +// It is stored as *discarded to make it easier to update in-place. +type discarded int + +func (d *discarded) String() string { + return fmt.Sprintf("(%d events discarded)", int(*d)) +} + +// trace represents an active or complete request, +// either sent or received by this program. +type trace struct { + // Family is the top-level grouping of traces to which this belongs. + Family string + + // Title is the title of this trace. + Title string + + // Start time of the this trace. + Start time.Time + + mu sync.RWMutex + events []event // Append-only sequence of events (modulo discards). + maxEvents int + recycler func(interface{}) + IsError bool // Whether this trace resulted in an error. + Elapsed time.Duration // Elapsed time for this trace, zero while active. + traceID uint64 // Trace information if non-zero. + spanID uint64 + + refs int32 // how many buckets this is in + disc discarded // scratch space to avoid allocation + + finishStack []byte // where finish was called, if DebugUseAfterFinish is set + + eventsBuf [4]event // preallocated buffer in case we only log a few events +} + +func (tr *trace) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + tr.Family = "" + tr.Title = "" + tr.Start = time.Time{} + + tr.mu.Lock() + tr.Elapsed = 0 + tr.traceID = 0 + tr.spanID = 0 + tr.IsError = false + tr.maxEvents = 0 + tr.events = nil + tr.recycler = nil + tr.mu.Unlock() + + tr.refs = 0 + tr.disc = 0 + tr.finishStack = nil + for i := range tr.eventsBuf { + tr.eventsBuf[i] = event{} + } +} + +// delta returns the elapsed time since the last event or the trace start, +// and whether it spans midnight. +// L >= tr.mu +func (tr *trace) delta(t time.Time) (time.Duration, bool) { + if len(tr.events) == 0 { + return t.Sub(tr.Start), false + } + prev := tr.events[len(tr.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() +} + +func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) { + if DebugUseAfterFinish && tr.finishStack != nil { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + log.Printf("net/trace: trace used after finish:\nFinished at:\n%s\nUsed at:\n%s", tr.finishStack, buf[:n]) + } + + /* + NOTE TO DEBUGGERS + + If you are here because your program panicked in this code, + it is almost definitely the fault of code using this package, + and very unlikely to be the fault of this code. + + The most likely scenario is that some code elsewhere is using + a trace.Trace after its Finish method is called. + You can temporarily set the DebugUseAfterFinish var + to help discover where that is; do not leave that var set, + since it makes this package much less efficient. + */ + + e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive} + tr.mu.Lock() + e.Elapsed, e.NewDay = tr.delta(e.When) + if len(tr.events) < tr.maxEvents { + tr.events = append(tr.events, e) + } else { + // Discard the middle events. + di := int((tr.maxEvents - 1) / 2) + if d, ok := tr.events[di].What.(*discarded); ok { + (*d)++ + } else { + // disc starts at two to count for the event it is replacing, + // plus the next one that we are about to drop. + tr.disc = 2 + if tr.recycler != nil && tr.events[di].Recyclable { + go tr.recycler(tr.events[di].What) + } + tr.events[di].What = &tr.disc + } + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + tr.events[di].When = tr.events[di+1].When + + if tr.recycler != nil && tr.events[di+1].Recyclable { + go tr.recycler(tr.events[di+1].What) + } + copy(tr.events[di+1:], tr.events[di+2:]) + tr.events[tr.maxEvents-1] = e + } + tr.mu.Unlock() +} + +func (tr *trace) LazyLog(x fmt.Stringer, sensitive bool) { + tr.addEvent(x, true, sensitive) +} + +func (tr *trace) LazyPrintf(format string, a ...interface{}) { + tr.addEvent(&lazySprintf{format, a}, false, false) +} + +func (tr *trace) SetError() { + tr.mu.Lock() + tr.IsError = true + tr.mu.Unlock() +} + +func (tr *trace) SetRecycler(f func(interface{})) { + tr.mu.Lock() + tr.recycler = f + tr.mu.Unlock() +} + +func (tr *trace) SetTraceInfo(traceID, spanID uint64) { + tr.mu.Lock() + tr.traceID, tr.spanID = traceID, spanID + tr.mu.Unlock() +} + +func (tr *trace) SetMaxEvents(m int) { + tr.mu.Lock() + // Always keep at least three events: first, discarded count, last. + if len(tr.events) == 0 && m > 3 { + tr.maxEvents = m + } + tr.mu.Unlock() +} + +func (tr *trace) ref() { + atomic.AddInt32(&tr.refs, 1) +} + +func (tr *trace) unref() { + if atomic.AddInt32(&tr.refs, -1) == 0 { + tr.mu.RLock() + if tr.recycler != nil { + // freeTrace clears tr, so we hold tr.recycler and tr.events here. + go func(f func(interface{}), es []event) { + for _, e := range es { + if e.Recyclable { + f(e.What) + } + } + }(tr.recycler, tr.events) + } + tr.mu.RUnlock() + + freeTrace(tr) + } +} + +func (tr *trace) When() string { + return tr.Start.Format("2006/01/02 15:04:05.000000") +} + +func (tr *trace) ElapsedTime() string { + tr.mu.RLock() + t := tr.Elapsed + tr.mu.RUnlock() + + if t == 0 { + // Active trace. + t = time.Since(tr.Start) + } + return fmt.Sprintf("%.6f", t.Seconds()) +} + +func (tr *trace) Events() []event { + tr.mu.RLock() + defer tr.mu.RUnlock() + return tr.events +} + +var traceFreeList = make(chan *trace, 1000) // TODO(dsymonds): Use sync.Pool? + +// newTrace returns a trace ready to use. +func newTrace() *trace { + select { + case tr := <-traceFreeList: + return tr + default: + return new(trace) + } +} + +// freeTrace adds tr to traceFreeList if there's room. +// This is non-blocking. +func freeTrace(tr *trace) { + if DebugUseAfterFinish { + return // never reuse + } + tr.reset() + select { + case traceFreeList <- tr: + default: + } +} + +func elapsed(d time.Duration) string { + b := []byte(fmt.Sprintf("%.6f", d.Seconds())) + + // For subsecond durations, blank all zeros before decimal point, + // and all zeros between the decimal point and the first non-zero digit. + if d < time.Second { + dot := bytes.IndexByte(b, '.') + for i := 0; i < dot; i++ { + b[i] = ' ' + } + for i := dot + 1; i < len(b); i++ { + if b[i] == '0' { + b[i] = ' ' + } else { + break + } + } + } + + return string(b) +} + +var pageTmplCache *template.Template +var pageTmplOnce sync.Once + +func pageTmpl() *template.Template { + pageTmplOnce.Do(func() { + pageTmplCache = template.Must(template.New("Page").Funcs(template.FuncMap{ + "elapsed": elapsed, + "add": func(a, b int) int { return a + b }, + }).Parse(pageHTML)) + }) + return pageTmplCache +} + +const pageHTML = ` +{{template "Prolog" .}} +{{template "StatusTable" .}} +{{template "Epilog" .}} + +{{define "Prolog"}} + + + /debug/requests + + + + +

/debug/requests

+{{end}} {{/* end of Prolog */}} + +{{define "StatusTable"}} + + {{range $fam := .Families}} + + + + {{$n := index $.ActiveTraceCount $fam}} + + + {{$f := index $.CompletedTraces $fam}} + {{range $i, $b := $f.Buckets}} + {{$empty := $b.Empty}} + + {{end}} + + {{$nb := len $f.Buckets}} + + + + + + {{end}} +
{{$fam}} + {{if $n}}{{end}} + [{{$n}} active] + {{if $n}}{{end}} + + {{if not $empty}}{{end}} + [{{.Cond}}] + {{if not $empty}}{{end}} + + [minute] + + [hour] + + [total] +
+{{end}} {{/* end of StatusTable */}} + +{{define "Epilog"}} +{{if $.Traces}} +
+

Family: {{$.Family}}

+ +{{if or $.Expanded $.Traced}} + [Normal/Summary] +{{else}} + [Normal/Summary] +{{end}} + +{{if or (not $.Expanded) $.Traced}} + [Normal/Expanded] +{{else}} + [Normal/Expanded] +{{end}} + +{{if not $.Active}} + {{if or $.Expanded (not $.Traced)}} + [Traced/Summary] + {{else}} + [Traced/Summary] + {{end}} + {{if or (not $.Expanded) (not $.Traced)}} + [Traced/Expanded] + {{else}} + [Traced/Expanded] + {{end}} +{{end}} + +{{if $.Total}} +

Showing {{len $.Traces}} of {{$.Total}} traces.

+{{end}} + + + + + {{range $tr := $.Traces}} + + + + + {{/* TODO: include traceID/spanID */}} + + {{if $.Expanded}} + {{range $tr.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
+ {{if $.Active}}Active{{else}}Completed{{end}} Requests +
WhenElapsed (s)
{{$tr.When}}{{$tr.ElapsedTime}}{{$tr.Title}}
{{.WhenString}}{{elapsed .Elapsed}}{{if or $.ShowSensitive (not .Sensitive)}}... {{.What}}{{else}}[redacted]{{end}}
+{{end}} {{/* if $.Traces */}} + +{{if $.Histogram}} +

Latency (µs) of {{$.Family}} over {{$.HistogramWindow}}

+{{$.Histogram}} +{{end}} {{/* if $.Histogram */}} + + + +{{end}} {{/* end of Epilog */}} +` diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go index 6eb2aa95f5b69..81de32b360b27 100644 --- a/vendor/golang.org/x/oauth2/google/google.go +++ b/vendor/golang.org/x/oauth2/google/google.go @@ -194,9 +194,16 @@ func (cs computeSource) Token() (*oauth2.Token, error) { if res.ExpiresInSec == 0 || res.AccessToken == "" { return nil, fmt.Errorf("oauth2/google: incomplete token received from metadata") } - return &oauth2.Token{ + tok := &oauth2.Token{ AccessToken: res.AccessToken, TokenType: res.TokenType, Expiry: time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second), - }, nil + } + // NOTE(cbro): add hidden metadata about where the token is from. + // This is needed for detection by client libraries to know that credentials come from the metadata server. + // This may be removed in a future version of this library. + return tok.WithExtra(map[string]interface{}{ + "oauth2.google.tokenSource": "compute-metadata", + "oauth2.google.serviceAccount": acct, + }), nil } diff --git a/vendor/golang.org/x/oauth2/jwt/jwt.go b/vendor/golang.org/x/oauth2/jwt/jwt.go index 99f3e0a32c8a4..b2bf18298b0c9 100644 --- a/vendor/golang.org/x/oauth2/jwt/jwt.go +++ b/vendor/golang.org/x/oauth2/jwt/jwt.go @@ -66,6 +66,14 @@ type Config struct { // request. If empty, the value of TokenURL is used as the // intended audience. Audience string + + // PrivateClaims optionally specifies custom private claims in the JWT. + // See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3 + PrivateClaims map[string]interface{} + + // UseIDToken optionally specifies whether ID token should be used instead + // of access token when the server returns both. + UseIDToken bool } // TokenSource returns a JWT TokenSource using the configuration @@ -97,9 +105,10 @@ func (js jwtSource) Token() (*oauth2.Token, error) { } hc := oauth2.NewClient(js.ctx, nil) claimSet := &jws.ClaimSet{ - Iss: js.conf.Email, - Scope: strings.Join(js.conf.Scopes, " "), - Aud: js.conf.TokenURL, + Iss: js.conf.Email, + Scope: strings.Join(js.conf.Scopes, " "), + Aud: js.conf.TokenURL, + PrivateClaims: js.conf.PrivateClaims, } if subject := js.conf.Subject; subject != "" { claimSet.Sub = subject @@ -166,5 +175,11 @@ func (js jwtSource) Token() (*oauth2.Token, error) { } token.Expiry = time.Unix(claimSet.Exp, 0) } + if js.conf.UseIDToken { + if tokenRes.IDToken == "" { + return nil, fmt.Errorf("oauth2: response doesn't have JWT token") + } + token.AccessToken = tokenRes.IDToken + } return token, nil } diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go index 428283f0b0102..291df5c833f96 100644 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -117,7 +117,7 @@ var ( // ApprovalForce forces the users to view the consent dialog // and confirm the permissions request at the URL returned // from AuthCodeURL, even if they've already done so. - ApprovalForce AuthCodeOption = SetAuthURLParam("approval_prompt", "force") + ApprovalForce AuthCodeOption = SetAuthURLParam("prompt", "consent") ) // An AuthCodeOption is passed to Config.AuthCodeURL. diff --git a/vendor/golang.org/x/sys/cpu/BUILD.bazel b/vendor/golang.org/x/sys/cpu/BUILD.bazel index a3959f3a5daf8..6351ed8b148f4 100644 --- a/vendor/golang.org/x/sys/cpu/BUILD.bazel +++ b/vendor/golang.org/x/sys/cpu/BUILD.bazel @@ -3,6 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = [ + "asm_aix_ppc64.s", "byteorder.go", "cpu.go", "cpu_arm.go", diff --git a/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s b/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s new file mode 100644 index 0000000000000..06f84b8555800 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s @@ -0,0 +1,17 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +// +// System calls for ppc64, AIX are implemented in runtime/syscall_aix.go +// + +TEXT ·syscall6(SB),NOSPLIT,$0-88 + JMP syscall·syscall6(SB) + +TEXT ·rawSyscall6(SB),NOSPLIT,$0-88 + JMP syscall·rawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/cpu/cpu_aix_ppc64.go b/vendor/golang.org/x/sys/cpu/cpu_aix_ppc64.go index d8c26a048d73b..be6027224728b 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_aix_ppc64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_aix_ppc64.go @@ -6,8 +6,6 @@ package cpu -import "golang.org/x/sys/unix" - const cacheLineSize = 128 const ( @@ -18,7 +16,7 @@ const ( ) func init() { - impl := unix.Getsystemcfg(_SC_IMPL) + impl := getsystemcfg(_SC_IMPL) if impl&_IMPL_POWER8 != 0 { PPC64.IsPOWER8 = true } @@ -28,3 +26,9 @@ func init() { Initialized = true } + +func getsystemcfg(label int) (n uint64) { + r0, _ := callgetsystemcfg(label) + n = uint64(r0) + return +} diff --git a/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go b/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go new file mode 100644 index 0000000000000..78fe25e86fbbe --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go @@ -0,0 +1,36 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Minimal copy of x/sys/unix so the cpu package can make a +// system call on AIX without depending on x/sys/unix. +// (See golang.org/issue/32102) + +// +build aix,ppc64 +// +build !gccgo + +package cpu + +import ( + "syscall" + "unsafe" +) + +//go:cgo_import_dynamic libc_getsystemcfg getsystemcfg "libc.a/shr_64.o" + +//go:linkname libc_getsystemcfg libc_getsystemcfg + +type syscallFunc uintptr + +var libc_getsystemcfg syscallFunc + +type errno = syscall.Errno + +// Implemented in runtime/syscall_aix.go. +func rawSyscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err errno) +func syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err errno) + +func callgetsystemcfg(label int) (r1 uintptr, e1 errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_getsystemcfg)), 1, uintptr(label), 0, 0, 0, 0, 0) + return +} diff --git a/vendor/golang.org/x/sys/unix/BUILD.bazel b/vendor/golang.org/x/sys/unix/BUILD.bazel index f95df7764534c..16309e1077e1a 100644 --- a/vendor/golang.org/x/sys/unix/BUILD.bazel +++ b/vendor/golang.org/x/sys/unix/BUILD.bazel @@ -49,9 +49,8 @@ go_library( "fcntl_darwin.go", "fcntl_linux_32bit.go", "ioctl.go", - "openbsd_pledge.go", - "openbsd_unveil.go", "pagesize_unix.go", + "pledge_openbsd.go", "race.go", "race0.go", "sockcmsg_linux.go", @@ -97,6 +96,7 @@ go_library( "syscall_unix_gc.go", "syscall_unix_gc_ppc64x.go", "timestruct.go", + "unveil_openbsd.go", "xattr_bsd.go", "zerrors_darwin_386.go", "zerrors_darwin_amd64.go", diff --git a/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s b/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s new file mode 100644 index 0000000000000..6db717de53c7f --- /dev/null +++ b/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s @@ -0,0 +1,54 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build riscv64,!gccgo + +#include "textflag.h" + +// +// System calls for linux/riscv64. +// +// Where available, just jump to package syscall's implementation of +// these functions. + +TEXT ·Syscall(SB),NOSPLIT,$0-56 + JMP syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-80 + JMP syscall·Syscall6(SB) + +TEXT ·SyscallNoError(SB),NOSPLIT,$0-48 + CALL runtime·entersyscall(SB) + MOV a1+8(FP), A0 + MOV a2+16(FP), A1 + MOV a3+24(FP), A2 + MOV $0, A3 + MOV $0, A4 + MOV $0, A5 + MOV $0, A6 + MOV trap+0(FP), A7 // syscall entry + ECALL + MOV A0, r1+32(FP) // r1 + MOV A1, r2+40(FP) // r2 + CALL runtime·exitsyscall(SB) + RET + +TEXT ·RawSyscall(SB),NOSPLIT,$0-56 + JMP syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 + JMP syscall·RawSyscall6(SB) + +TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48 + MOV a1+8(FP), A0 + MOV a2+16(FP), A1 + MOV a3+24(FP), A2 + MOV ZERO, A3 + MOV ZERO, A4 + MOV ZERO, A5 + MOV trap+0(FP), A7 // syscall entry + ECALL + MOV A0, r1+32(FP) + MOV A1, r2+40(FP) + RET diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_arm64.s b/vendor/golang.org/x/sys/unix/asm_openbsd_arm64.s new file mode 100644 index 0000000000000..0cedea3d39d8e --- /dev/null +++ b/vendor/golang.org/x/sys/unix/asm_openbsd_arm64.s @@ -0,0 +1,29 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +// +// System call support for arm64, OpenBSD +// + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-56 + JMP syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-80 + JMP syscall·Syscall6(SB) + +TEXT ·Syscall9(SB),NOSPLIT,$0-104 + JMP syscall·Syscall9(SB) + +TEXT ·RawSyscall(SB),NOSPLIT,$0-56 + JMP syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 + JMP syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh index 1e5c59d0dd2ea..5a22eca967612 100644 --- a/vendor/golang.org/x/sys/unix/mkall.sh +++ b/vendor/golang.org/x/sys/unix/mkall.sh @@ -105,25 +105,25 @@ dragonfly_amd64) freebsd_386) mkerrors="$mkerrors -m32" mksyscall="go run mksyscall.go -l32" - mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master'" + mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; freebsd_amd64) mkerrors="$mkerrors -m64" - mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master'" + mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; freebsd_arm) mkerrors="$mkerrors" mksyscall="go run mksyscall.go -l32 -arm" - mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master'" + mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'" # Let the type of C char be signed for making the bare syscall # API consistent across platforms. mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" ;; freebsd_arm64) mkerrors="$mkerrors -m64" - mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master'" + mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; netbsd_386) @@ -146,24 +146,39 @@ netbsd_arm) # API consistent across platforms. mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" ;; +netbsd_arm64) + mkerrors="$mkerrors -m64" + mksyscall="go run mksyscall.go -netbsd" + mksysnum="go run mksysnum.go 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master'" + mktypes="GOARCH=$GOARCH go tool cgo -godefs" + ;; openbsd_386) mkerrors="$mkerrors -m32" mksyscall="go run mksyscall.go -l32 -openbsd" - mksysctl="./mksysctl_openbsd.pl" + mksysctl="go run mksysctl_openbsd.go" mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; openbsd_amd64) mkerrors="$mkerrors -m64" mksyscall="go run mksyscall.go -openbsd" - mksysctl="./mksysctl_openbsd.pl" + mksysctl="go run mksysctl_openbsd.go" mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; openbsd_arm) mkerrors="$mkerrors" mksyscall="go run mksyscall.go -l32 -openbsd -arm" - mksysctl="./mksysctl_openbsd.pl" + mksysctl="go run mksysctl_openbsd.go" + mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'" + # Let the type of C char be signed for making the bare syscall + # API consistent across platforms. + mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" + ;; +openbsd_arm64) + mkerrors="$mkerrors -m64" + mksyscall="go run mksyscall.go -openbsd" + mksysctl="go run mksysctl_openbsd.go" mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'" # Let the type of C char be signed for making the bare syscall # API consistent across platforms. diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index cfb61ba049794..4c91159c12116 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -182,6 +182,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -222,6 +223,7 @@ struct ltchars { #include #include #include +#include #include #include @@ -432,7 +434,7 @@ ccflags="$@" $2 ~ /^TC[IO](ON|OFF)$/ || $2 ~ /^IN_/ || $2 ~ /^LOCK_(SH|EX|NB|UN)$/ || - $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|ICMP6|TCP|EVFILT|NOTE|EV|SHUT|PROT|MAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR)_/ || + $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|ICMP6|TCP|MCAST|EVFILT|NOTE|EV|SHUT|PROT|MAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR)_/ || $2 ~ /^TP_STATUS_/ || $2 ~ /^FALLOC_/ || $2 == "ICMPV6_FILTER" || @@ -465,7 +467,7 @@ ccflags="$@" $2 ~ /^RLIMIT_(AS|CORE|CPU|DATA|FSIZE|LOCKS|MEMLOCK|MSGQUEUE|NICE|NOFILE|NPROC|RSS|RTPRIO|RTTIME|SIGPENDING|STACK)|RLIM_INFINITY/ || $2 ~ /^PRIO_(PROCESS|PGRP|USER)/ || $2 ~ /^CLONE_[A-Z_]+/ || - $2 !~ /^(BPF_TIMEVAL)$/ && + $2 !~ /^(BPF_TIMEVAL|BPF_FIB_LOOKUP_[A-Z]+)$/ && $2 ~ /^(BPF|DLT)_/ || $2 ~ /^(CLOCK|TIMER)_/ || $2 ~ /^CAN_/ || @@ -499,6 +501,7 @@ ccflags="$@" $2 ~ /^NFN/ || $2 ~ /^XDP_/ || $2 ~ /^(HDIO|WIN|SMART)_/ || + $2 ~ /^CRYPTO_/ || $2 !~ "WMESGLEN" && $2 ~ /^W[A-Z0-9]+$/ || $2 ~/^PPPIOC/ || diff --git a/vendor/golang.org/x/sys/unix/mksysctl_openbsd.pl b/vendor/golang.org/x/sys/unix/mksysctl_openbsd.pl deleted file mode 100644 index 20632e14608e3..0000000000000 --- a/vendor/golang.org/x/sys/unix/mksysctl_openbsd.pl +++ /dev/null @@ -1,265 +0,0 @@ -#!/usr/bin/env perl - -# Copyright 2011 The Go Authors. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - -# -# Parse the header files for OpenBSD and generate a Go usable sysctl MIB. -# -# Build a MIB with each entry being an array containing the level, type and -# a hash that will contain additional entries if the current entry is a node. -# We then walk this MIB and create a flattened sysctl name to OID hash. -# - -use strict; - -if($ENV{'GOARCH'} eq "" || $ENV{'GOOS'} eq "") { - print STDERR "GOARCH or GOOS not defined in environment\n"; - exit 1; -} - -my $debug = 0; -my %ctls = (); - -my @headers = qw ( - sys/sysctl.h - sys/socket.h - sys/tty.h - sys/malloc.h - sys/mount.h - sys/namei.h - sys/sem.h - sys/shm.h - sys/vmmeter.h - uvm/uvmexp.h - uvm/uvm_param.h - uvm/uvm_swap_encrypt.h - ddb/db_var.h - net/if.h - net/if_pfsync.h - net/pipex.h - netinet/in.h - netinet/icmp_var.h - netinet/igmp_var.h - netinet/ip_ah.h - netinet/ip_carp.h - netinet/ip_divert.h - netinet/ip_esp.h - netinet/ip_ether.h - netinet/ip_gre.h - netinet/ip_ipcomp.h - netinet/ip_ipip.h - netinet/pim_var.h - netinet/tcp_var.h - netinet/udp_var.h - netinet6/in6.h - netinet6/ip6_divert.h - netinet6/pim6_var.h - netinet/icmp6.h - netmpls/mpls.h -); - -my @ctls = qw ( - kern - vm - fs - net - #debug # Special handling required - hw - #machdep # Arch specific - user - ddb - #vfs # Special handling required - fs.posix - kern.forkstat - kern.intrcnt - kern.malloc - kern.nchstats - kern.seminfo - kern.shminfo - kern.timecounter - kern.tty - kern.watchdog - net.bpf - net.ifq - net.inet - net.inet.ah - net.inet.carp - net.inet.divert - net.inet.esp - net.inet.etherip - net.inet.gre - net.inet.icmp - net.inet.igmp - net.inet.ip - net.inet.ip.ifq - net.inet.ipcomp - net.inet.ipip - net.inet.mobileip - net.inet.pfsync - net.inet.pim - net.inet.tcp - net.inet.udp - net.inet6 - net.inet6.divert - net.inet6.ip6 - net.inet6.icmp6 - net.inet6.pim6 - net.inet6.tcp6 - net.inet6.udp6 - net.mpls - net.mpls.ifq - net.key - net.pflow - net.pfsync - net.pipex - net.rt - vm.swapencrypt - #vfsgenctl # Special handling required -); - -# Node name "fixups" -my %ctl_map = ( - "ipproto" => "net.inet", - "net.inet.ipproto" => "net.inet", - "net.inet6.ipv6proto" => "net.inet6", - "net.inet6.ipv6" => "net.inet6.ip6", - "net.inet.icmpv6" => "net.inet6.icmp6", - "net.inet6.divert6" => "net.inet6.divert", - "net.inet6.tcp6" => "net.inet.tcp", - "net.inet6.udp6" => "net.inet.udp", - "mpls" => "net.mpls", - "swpenc" => "vm.swapencrypt" -); - -# Node mappings -my %node_map = ( - "net.inet.ip.ifq" => "net.ifq", - "net.inet.pfsync" => "net.pfsync", - "net.mpls.ifq" => "net.ifq" -); - -my $ctlname; -my %mib = (); -my %sysctl = (); -my $node; - -sub debug() { - print STDERR "$_[0]\n" if $debug; -} - -# Walk the MIB and build a sysctl name to OID mapping. -sub build_sysctl() { - my ($node, $name, $oid) = @_; - my %node = %{$node}; - my @oid = @{$oid}; - - foreach my $key (sort keys %node) { - my @node = @{$node{$key}}; - my $nodename = $name.($name ne '' ? '.' : '').$key; - my @nodeoid = (@oid, $node[0]); - if ($node[1] eq 'CTLTYPE_NODE') { - if (exists $node_map{$nodename}) { - $node = \%mib; - $ctlname = $node_map{$nodename}; - foreach my $part (split /\./, $ctlname) { - $node = \%{@{$$node{$part}}[2]}; - } - } else { - $node = $node[2]; - } - &build_sysctl($node, $nodename, \@nodeoid); - } elsif ($node[1] ne '') { - $sysctl{$nodename} = \@nodeoid; - } - } -} - -foreach my $ctl (@ctls) { - $ctls{$ctl} = $ctl; -} - -# Build MIB -foreach my $header (@headers) { - &debug("Processing $header..."); - open HEADER, "/usr/include/$header" || - print STDERR "Failed to open $header\n"; - while (
) { - if ($_ =~ /^#define\s+(CTL_NAMES)\s+{/ || - $_ =~ /^#define\s+(CTL_(.*)_NAMES)\s+{/ || - $_ =~ /^#define\s+((.*)CTL_NAMES)\s+{/) { - if ($1 eq 'CTL_NAMES') { - # Top level. - $node = \%mib; - } else { - # Node. - my $nodename = lc($2); - if ($header =~ /^netinet\//) { - $ctlname = "net.inet.$nodename"; - } elsif ($header =~ /^netinet6\//) { - $ctlname = "net.inet6.$nodename"; - } elsif ($header =~ /^net\//) { - $ctlname = "net.$nodename"; - } else { - $ctlname = "$nodename"; - $ctlname =~ s/^(fs|net|kern)_/$1\./; - } - if (exists $ctl_map{$ctlname}) { - $ctlname = $ctl_map{$ctlname}; - } - if (not exists $ctls{$ctlname}) { - &debug("Ignoring $ctlname..."); - next; - } - - # Walk down from the top of the MIB. - $node = \%mib; - foreach my $part (split /\./, $ctlname) { - if (not exists $$node{$part}) { - &debug("Missing node $part"); - $$node{$part} = [ 0, '', {} ]; - } - $node = \%{@{$$node{$part}}[2]}; - } - } - - # Populate current node with entries. - my $i = -1; - while (defined($_) && $_ !~ /^}/) { - $_ =
; - $i++ if $_ =~ /{.*}/; - next if $_ !~ /{\s+"(\w+)",\s+(CTLTYPE_[A-Z]+)\s+}/; - $$node{$1} = [ $i, $2, {} ]; - } - } - } - close HEADER; -} - -&build_sysctl(\%mib, "", []); - -print <>= 32 + stat.Mtim.Nsec >>= 32 + stat.Ctim.Nsec >>= 32 +} + +func Fstat(fd int, stat *Stat_t) error { + err := fstat(fd, stat) + if err != nil { + return err + } + fixStatTimFields(stat) + return nil +} + +func Fstatat(dirfd int, path string, stat *Stat_t, flags int) error { + err := fstatat(dirfd, path, stat, flags) + if err != nil { + return err + } + fixStatTimFields(stat) + return nil +} + +func Lstat(path string, stat *Stat_t) error { + err := lstat(path, stat) + if err != nil { + return err + } + fixStatTimFields(stat) + return nil +} + +func Stat(path string, statptr *Stat_t) error { + err := stat(path, statptr) + if err != nil { + return err + } + fixStatTimFields(statptr) + return nil +} diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index a7ca1ebea315e..1b6abe123b35b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -362,7 +362,21 @@ func Getdents(fd int, buf []byte) (n int, err error) { func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { if supportsABI(_ino64First) { - return getdirentries_freebsd12(fd, buf, basep) + if unsafe.Sizeof(*basep) == 8 { + return getdirentries_freebsd12(fd, buf, (*uint64)(unsafe.Pointer(basep))) + } + // The freebsd12 syscall needs a 64-bit base. On 32-bit machines + // we can't just use the basep passed in. See #32498. + var base uint64 = uint64(*basep) + n, err = getdirentries_freebsd12(fd, buf, &base) + *basep = uintptr(base) + if base>>32 != 0 { + // We can't stuff the base back into a uintptr, so any + // future calls would be suspect. Generate an error. + // EIO is allowed by getdirentries. + err = EIO + } + return } // The old syscall entries are smaller than the new. Use 1/4 of the original @@ -404,22 +418,22 @@ func roundup(x, y int) int { func (s *Stat_t) convertFrom(old *stat_freebsd11_t) { *s = Stat_t{ - Dev: uint64(old.Dev), - Ino: uint64(old.Ino), - Nlink: uint64(old.Nlink), - Mode: old.Mode, - Uid: old.Uid, - Gid: old.Gid, - Rdev: uint64(old.Rdev), - Atim: old.Atim, - Mtim: old.Mtim, - Ctim: old.Ctim, - Birthtim: old.Birthtim, - Size: old.Size, - Blocks: old.Blocks, - Blksize: old.Blksize, - Flags: old.Flags, - Gen: uint64(old.Gen), + Dev: uint64(old.Dev), + Ino: uint64(old.Ino), + Nlink: uint64(old.Nlink), + Mode: old.Mode, + Uid: old.Uid, + Gid: old.Gid, + Rdev: uint64(old.Rdev), + Atim: old.Atim, + Mtim: old.Mtim, + Ctim: old.Ctim, + Btim: old.Btim, + Size: old.Size, + Blocks: old.Blocks, + Blksize: old.Blksize, + Flags: old.Flags, + Gen: uint64(old.Gen), } } @@ -555,7 +569,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Fsync(fd int) (err error) //sys Ftruncate(fd int, length int64) (err error) //sys getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) -//sys getdirentries_freebsd12(fd int, buf []byte, basep *uintptr) (n int, err error) +//sys getdirentries_freebsd12(fd int, buf []byte, basep *uint64) (n int, err error) //sys Getdtablesize() (size int) //sysnb Getegid() (egid int) //sysnb Geteuid() (uid int) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 7e429ab2f9855..c92545ea51c1d 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -109,6 +109,12 @@ func IoctlGetInt(fd int, req uint) (int, error) { return value, err } +func IoctlGetUint32(fd int, req uint) (uint32, error) { + var value uint32 + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + return value, err +} + func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { var value Winsize err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) @@ -1531,9 +1537,13 @@ func Setgid(uid int) (err error) { return EOPNOTSUPP } +func Signalfd(fd int, sigmask *Sigset_t, flags int) (newfd int, err error) { + return signalfd(fd, sigmask, _C__NSIG/8, flags) +} + //sys Setpriority(which int, who int, prio int) (err error) //sys Setxattr(path string, attr string, data []byte, flags int) (err error) -//sys Signalfd(fd int, mask *Sigset_t, flags int) = SYS_SIGNALFD4 +//sys signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) = SYS_SIGNALFD4 //sys Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) //sys Sync() //sys Syncfs(fd int) (err error) @@ -1662,6 +1672,82 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { return EACCES } +//sys nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) = SYS_NAME_TO_HANDLE_AT +//sys openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) = SYS_OPEN_BY_HANDLE_AT + +// fileHandle is the argument to nameToHandleAt and openByHandleAt. We +// originally tried to generate it via unix/linux/types.go with "type +// fileHandle C.struct_file_handle" but that generated empty structs +// for mips64 and mips64le. Instead, hard code it for now (it's the +// same everywhere else) until the mips64 generator issue is fixed. +type fileHandle struct { + Bytes uint32 + Type int32 +} + +// FileHandle represents the C struct file_handle used by +// name_to_handle_at (see NameToHandleAt) and open_by_handle_at (see +// OpenByHandleAt). +type FileHandle struct { + *fileHandle +} + +// NewFileHandle constructs a FileHandle. +func NewFileHandle(handleType int32, handle []byte) FileHandle { + const hdrSize = unsafe.Sizeof(fileHandle{}) + buf := make([]byte, hdrSize+uintptr(len(handle))) + copy(buf[hdrSize:], handle) + fh := (*fileHandle)(unsafe.Pointer(&buf[0])) + fh.Type = handleType + fh.Bytes = uint32(len(handle)) + return FileHandle{fh} +} + +func (fh *FileHandle) Size() int { return int(fh.fileHandle.Bytes) } +func (fh *FileHandle) Type() int32 { return fh.fileHandle.Type } +func (fh *FileHandle) Bytes() []byte { + n := fh.Size() + if n == 0 { + return nil + } + return (*[1 << 30]byte)(unsafe.Pointer(uintptr(unsafe.Pointer(&fh.fileHandle.Type)) + 4))[:n:n] +} + +// NameToHandleAt wraps the name_to_handle_at system call; it obtains +// a handle for a path name. +func NameToHandleAt(dirfd int, path string, flags int) (handle FileHandle, mountID int, err error) { + var mid _C_int + // Try first with a small buffer, assuming the handle will + // only be 32 bytes. + size := uint32(32 + unsafe.Sizeof(fileHandle{})) + didResize := false + for { + buf := make([]byte, size) + fh := (*fileHandle)(unsafe.Pointer(&buf[0])) + fh.Bytes = size - uint32(unsafe.Sizeof(fileHandle{})) + err = nameToHandleAt(dirfd, path, fh, &mid, flags) + if err == EOVERFLOW { + if didResize { + // We shouldn't need to resize more than once + return + } + didResize = true + size = fh.Bytes + uint32(unsafe.Sizeof(fileHandle{})) + continue + } + if err != nil { + return + } + return FileHandle{fh}, int(mid), nil + } +} + +// OpenByHandleAt wraps the open_by_handle_at system call; it opens a +// file via a handle as previously returned by NameToHandleAt. +func OpenByHandleAt(mountFD int, handle FileHandle, flags int) (fd int, err error) { + return openByHandleAt(mountFD, handle.fileHandle, flags) +} + /* * Unimplemented */ diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go index 3a3c37b4c8b6d..f626794439bb3 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go @@ -272,3 +272,16 @@ func SyncFileRange(fd int, off int64, n int64, flags int) error { // order of their arguments. return armSyncFileRange(fd, flags, off, n) } + +//sys kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) + +func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error { + cmdlineLen := len(cmdline) + if cmdlineLen > 0 { + // Account for the additional NULL byte added by + // BytePtrFromString in kexecFileLoad. The kexec_file_load + // syscall expects a NULL-terminated string. + cmdlineLen++ + } + return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go index 5240e16e4b34f..8f4c320eba8a2 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -120,9 +120,30 @@ func Pipe(p []int) (err error) { return } -//sys getdents(fd int, buf []byte) (n int, err error) +//sys Getdents(fd int, buf []byte) (n int, err error) func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { - return getdents(fd, buf) + n, err = Getdents(fd, buf) + if err != nil || basep == nil { + return + } + + var off int64 + off, err = Seek(fd, 0, 1 /* SEEK_CUR */) + if err != nil { + *basep = ^uintptr(0) + return + } + *basep = uintptr(off) + if unsafe.Sizeof(*basep) == 8 { + return + } + if off>>4 != 0 { + // We can't stuff the offset back into a uintptr, so any + // future calls would be suspect. Generate an error. + // EIO is allowed by getdirentries. + err = EIO + } + return } const ImplementsGetwd = true diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index c8648ec026787..276c93bef41ea 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -89,9 +89,30 @@ func Pipe(p []int) (err error) { return } -//sys getdents(fd int, buf []byte) (n int, err error) +//sys Getdents(fd int, buf []byte) (n int, err error) func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { - return getdents(fd, buf) + n, err = Getdents(fd, buf) + if err != nil || basep == nil { + return + } + + var off int64 + off, err = Seek(fd, 0, 1 /* SEEK_CUR */) + if err != nil { + *basep = ^uintptr(0) + return + } + *basep = uintptr(off) + if unsafe.Sizeof(*basep) == 8 { + return + } + if off>>4 != 0 { + // We can't stuff the offset back into a uintptr, so any + // future calls would be suspect. Generate an error. + // EIO was allowed by getdirentries. + err = EIO + } + return } const ImplementsGetwd = true diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go new file mode 100644 index 0000000000000..0fb39cf5eb121 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go @@ -0,0 +1,37 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm64,openbsd + +package unix + +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} +} + +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: usec} +} + +func SetKevent(k *Kevent_t, fd, mode, flags int) { + k.Ident = uint64(fd) + k.Filter = int16(mode) + k.Flags = uint16(flags) +} + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint64(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} + +// SYS___SYSCTL is used by syscall_bsd.go for all BSDs, but in modern versions +// of openbsd/amd64 the syscall is called sysctl instead of __sysctl. +const SYS___SYSCTL = SYS_SYSCTL diff --git a/vendor/golang.org/x/sys/unix/openbsd_unveil.go b/vendor/golang.org/x/sys/unix/unveil_openbsd.go similarity index 98% rename from vendor/golang.org/x/sys/unix/openbsd_unveil.go rename to vendor/golang.org/x/sys/unix/unveil_openbsd.go index aebc2dc5768f4..168d5ae77914b 100644 --- a/vendor/golang.org/x/sys/unix/openbsd_unveil.go +++ b/vendor/golang.org/x/sys/unix/unveil_openbsd.go @@ -2,8 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build openbsd - package unix import ( diff --git a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go index 4b7b965027da3..1def8a5812657 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go @@ -926,6 +926,8 @@ const ( TCSETSF = 0x5404 TCSETSW = 0x5403 TCXONC = 0x540b + TIMER_ABSTIME = 0x3e7 + TIMER_MAX = 0x20 TIOC = 0x5400 TIOCCBRK = 0x2000747a TIOCCDTR = 0x20007478 diff --git a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go index ed04fd1b77db9..03187dea98f39 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go @@ -3,7 +3,7 @@ // +build ppc64,aix -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -maix64 _const.go package unix @@ -926,6 +926,8 @@ const ( TCSETSF = 0x5404 TCSETSW = 0x5403 TCXONC = 0x540b + TIMER_ABSTIME = 0x3e7 + TIMER_MAX = 0x20 TIOC = 0x5400 TIOCCBRK = 0x2000747a TIOCCDTR = 0x20007478 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 9e99d67cb8a38..881e69f128720 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -197,10 +197,59 @@ const ( BPF_ABS = 0x20 BPF_ADD = 0x0 BPF_ALU = 0x4 + BPF_ALU64 = 0x7 BPF_AND = 0x50 + BPF_ANY = 0x0 + BPF_ARSH = 0xc0 BPF_B = 0x10 + BPF_BUILD_ID_SIZE = 0x14 + BPF_CALL = 0x80 + BPF_DEVCG_ACC_MKNOD = 0x1 + BPF_DEVCG_ACC_READ = 0x2 + BPF_DEVCG_ACC_WRITE = 0x4 + BPF_DEVCG_DEV_BLOCK = 0x1 + BPF_DEVCG_DEV_CHAR = 0x2 BPF_DIV = 0x30 + BPF_DW = 0x18 + BPF_END = 0xd0 + BPF_EXIST = 0x2 + BPF_EXIT = 0x90 + BPF_FROM_BE = 0x8 + BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ALLOW_MULTI = 0x2 + BPF_F_ALLOW_OVERRIDE = 0x1 + BPF_F_ANY_ALIGNMENT = 0x2 + BPF_F_CTXLEN_MASK = 0xfffff00000000 + BPF_F_CURRENT_CPU = 0xffffffff + BPF_F_CURRENT_NETNS = -0x1 + BPF_F_DONT_FRAGMENT = 0x4 + BPF_F_FAST_STACK_CMP = 0x200 + BPF_F_HDR_FIELD_MASK = 0xf + BPF_F_INDEX_MASK = 0xffffffff + BPF_F_INGRESS = 0x1 + BPF_F_INVALIDATE_HASH = 0x2 + BPF_F_LOCK = 0x4 + BPF_F_MARK_ENFORCE = 0x40 + BPF_F_MARK_MANGLED_0 = 0x20 + BPF_F_NO_COMMON_LRU = 0x2 + BPF_F_NO_PREALLOC = 0x1 + BPF_F_NUMA_NODE = 0x4 + BPF_F_PSEUDO_HDR = 0x10 + BPF_F_QUERY_EFFECTIVE = 0x1 + BPF_F_RDONLY = 0x8 + BPF_F_RECOMPUTE_CSUM = 0x1 + BPF_F_REUSE_STACKID = 0x400 + BPF_F_SEQ_NUMBER = 0x8 + BPF_F_SKIP_FIELD_MASK = 0xff + BPF_F_STACK_BUILD_ID = 0x20 + BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_TUNINFO_IPV6 = 0x1 + BPF_F_USER_BUILD_ID = 0x800 + BPF_F_USER_STACK = 0x100 + BPF_F_WRONLY = 0x10 + BPF_F_ZERO_CSUM_TX = 0x2 + BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -208,8 +257,16 @@ const ( BPF_JEQ = 0x10 BPF_JGE = 0x30 BPF_JGT = 0x20 + BPF_JLE = 0xb0 + BPF_JLT = 0xa0 BPF_JMP = 0x5 + BPF_JMP32 = 0x6 + BPF_JNE = 0x50 BPF_JSET = 0x40 + BPF_JSGE = 0x70 + BPF_JSGT = 0x60 + BPF_JSLE = 0xd0 + BPF_JSLT = 0xc0 BPF_K = 0x0 BPF_LD = 0x0 BPF_LDX = 0x1 @@ -223,20 +280,33 @@ const ( BPF_MINOR_VERSION = 0x1 BPF_MISC = 0x7 BPF_MOD = 0x90 + BPF_MOV = 0xb0 BPF_MSH = 0xa0 BPF_MUL = 0x20 BPF_NEG = 0x80 BPF_NET_OFF = -0x100000 + BPF_NOEXIST = 0x1 + BPF_OBJ_NAME_LEN = 0x10 BPF_OR = 0x40 + BPF_PSEUDO_CALL = 0x1 + BPF_PSEUDO_MAP_FD = 0x1 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 + BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 BPF_SUB = 0x10 + BPF_TAG_SIZE = 0x8 BPF_TAX = 0x0 + BPF_TO_BE = 0x8 + BPF_TO_LE = 0x0 BPF_TXA = 0x80 BPF_W = 0x0 BPF_X = 0x8 + BPF_XADD = 0xc0 BPF_XOR = 0xa0 BRKINT = 0x2 BS0 = 0x0 @@ -320,6 +390,10 @@ const ( CRDLY = 0x600 CREAD = 0x80 CRTSCTS = 0x80000000 + CRYPTO_MAX_NAME = 0x40 + CRYPTO_MSG_MAX = 0x15 + CRYPTO_NR_MSGTYPES = 0x6 + CRYPTO_REPORT_MAXSIZE = 0x160 CS5 = 0x0 CS6 = 0x10 CS7 = 0x20 @@ -497,6 +571,7 @@ const ( FAN_ALL_MARK_FLAGS = 0xff FAN_ALL_OUTGOING_EVENTS = 0x3403b FAN_ALL_PERM_EVENTS = 0x30000 + FAN_ATTRIB = 0x4 FAN_AUDIT = 0x10 FAN_CLASS_CONTENT = 0x4 FAN_CLASS_NOTIF = 0x0 @@ -505,8 +580,12 @@ const ( FAN_CLOSE = 0x18 FAN_CLOSE_NOWRITE = 0x10 FAN_CLOSE_WRITE = 0x8 + FAN_CREATE = 0x100 + FAN_DELETE = 0x200 + FAN_DELETE_SELF = 0x400 FAN_DENY = 0x2 FAN_ENABLE_AUDIT = 0x40 + FAN_EVENT_INFO_TYPE_FID = 0x1 FAN_EVENT_METADATA_LEN = 0x18 FAN_EVENT_ON_CHILD = 0x8000000 FAN_MARK_ADD = 0x1 @@ -520,6 +599,10 @@ const ( FAN_MARK_ONLYDIR = 0x8 FAN_MARK_REMOVE = 0x2 FAN_MODIFY = 0x2 + FAN_MOVE = 0xc0 + FAN_MOVED_FROM = 0x40 + FAN_MOVED_TO = 0x80 + FAN_MOVE_SELF = 0x800 FAN_NOFD = -0x1 FAN_NONBLOCK = 0x2 FAN_ONDIR = 0x40000000 @@ -528,6 +611,7 @@ const ( FAN_OPEN_EXEC_PERM = 0x40000 FAN_OPEN_PERM = 0x10000 FAN_Q_OVERFLOW = 0x4000 + FAN_REPORT_FID = 0x200 FAN_REPORT_TID = 0x100 FAN_UNLIMITED_MARKS = 0x20 FAN_UNLIMITED_QUEUE = 0x10 @@ -1052,6 +1136,15 @@ const ( MAP_STACK = 0x20000 MAP_SYNC = 0x80000 MAP_TYPE = 0xf + MCAST_BLOCK_SOURCE = 0x2b + MCAST_EXCLUDE = 0x0 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x2a + MCAST_JOIN_SOURCE_GROUP = 0x2e + MCAST_LEAVE_GROUP = 0x2d + MCAST_LEAVE_SOURCE_GROUP = 0x2f + MCAST_MSFILTER = 0x30 + MCAST_UNBLOCK_SOURCE = 0x2c MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 @@ -1487,6 +1580,7 @@ const ( PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 PR_SPEC_DISABLE = 0x4 + PR_SPEC_DISABLE_NOEXEC = 0x10 PR_SPEC_ENABLE = 0x2 PR_SPEC_FORCE_DISABLE = 0x8 PR_SPEC_INDIRECT_BRANCH = 0x1 @@ -1957,6 +2051,7 @@ const ( SO_ATTACH_REUSEPORT_CBPF = 0x33 SO_ATTACH_REUSEPORT_EBPF = 0x34 SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe @@ -2005,6 +2100,8 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVTIMEO = 0x14 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x14 SO_REUSEADDR = 0x2 SO_REUSEPORT = 0xf SO_RXQ_OVFL = 0x28 @@ -2016,9 +2113,17 @@ const ( SO_SNDBUFFORCE = 0x20 SO_SNDLOWAT = 0x13 SO_SNDTIMEO = 0x15 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x15 SO_TIMESTAMP = 0x1d SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TIMESTAMP_OLD = 0x1d SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 @@ -2111,6 +2216,8 @@ const ( TCOFLUSH = 0x1 TCOOFF = 0x0 TCOON = 0x1 + TCP_BPF_IW = 0x3e9 + TCP_BPF_SNDCWND_CLAMP = 0x3ea TCP_CC_INFO = 0x1a TCP_CM_INQ = 0x24 TCP_CONGESTION = 0xd @@ -2312,8 +2419,10 @@ const ( UBI_IOCMKVOL = 0x40986f00 UBI_IOCRMVOL = 0x40046f01 UBI_IOCRNVOL = 0x51106f03 + UBI_IOCRPEB = 0x40046f04 UBI_IOCRSVOL = 0x400c6f02 UBI_IOCSETVOLPROP = 0x40104f06 + UBI_IOCSPEB = 0x40046f05 UBI_IOCVOLCRBLK = 0x40804f07 UBI_IOCVOLRMBLK = 0x4f08 UBI_IOCVOLUP = 0x40084f00 @@ -2462,6 +2571,7 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 XDP_RX_RING = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index e3091f1e30b64..039b007d7b880 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -197,10 +197,59 @@ const ( BPF_ABS = 0x20 BPF_ADD = 0x0 BPF_ALU = 0x4 + BPF_ALU64 = 0x7 BPF_AND = 0x50 + BPF_ANY = 0x0 + BPF_ARSH = 0xc0 BPF_B = 0x10 + BPF_BUILD_ID_SIZE = 0x14 + BPF_CALL = 0x80 + BPF_DEVCG_ACC_MKNOD = 0x1 + BPF_DEVCG_ACC_READ = 0x2 + BPF_DEVCG_ACC_WRITE = 0x4 + BPF_DEVCG_DEV_BLOCK = 0x1 + BPF_DEVCG_DEV_CHAR = 0x2 BPF_DIV = 0x30 + BPF_DW = 0x18 + BPF_END = 0xd0 + BPF_EXIST = 0x2 + BPF_EXIT = 0x90 + BPF_FROM_BE = 0x8 + BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ALLOW_MULTI = 0x2 + BPF_F_ALLOW_OVERRIDE = 0x1 + BPF_F_ANY_ALIGNMENT = 0x2 + BPF_F_CTXLEN_MASK = 0xfffff00000000 + BPF_F_CURRENT_CPU = 0xffffffff + BPF_F_CURRENT_NETNS = -0x1 + BPF_F_DONT_FRAGMENT = 0x4 + BPF_F_FAST_STACK_CMP = 0x200 + BPF_F_HDR_FIELD_MASK = 0xf + BPF_F_INDEX_MASK = 0xffffffff + BPF_F_INGRESS = 0x1 + BPF_F_INVALIDATE_HASH = 0x2 + BPF_F_LOCK = 0x4 + BPF_F_MARK_ENFORCE = 0x40 + BPF_F_MARK_MANGLED_0 = 0x20 + BPF_F_NO_COMMON_LRU = 0x2 + BPF_F_NO_PREALLOC = 0x1 + BPF_F_NUMA_NODE = 0x4 + BPF_F_PSEUDO_HDR = 0x10 + BPF_F_QUERY_EFFECTIVE = 0x1 + BPF_F_RDONLY = 0x8 + BPF_F_RECOMPUTE_CSUM = 0x1 + BPF_F_REUSE_STACKID = 0x400 + BPF_F_SEQ_NUMBER = 0x8 + BPF_F_SKIP_FIELD_MASK = 0xff + BPF_F_STACK_BUILD_ID = 0x20 + BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_TUNINFO_IPV6 = 0x1 + BPF_F_USER_BUILD_ID = 0x800 + BPF_F_USER_STACK = 0x100 + BPF_F_WRONLY = 0x10 + BPF_F_ZERO_CSUM_TX = 0x2 + BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -208,8 +257,16 @@ const ( BPF_JEQ = 0x10 BPF_JGE = 0x30 BPF_JGT = 0x20 + BPF_JLE = 0xb0 + BPF_JLT = 0xa0 BPF_JMP = 0x5 + BPF_JMP32 = 0x6 + BPF_JNE = 0x50 BPF_JSET = 0x40 + BPF_JSGE = 0x70 + BPF_JSGT = 0x60 + BPF_JSLE = 0xd0 + BPF_JSLT = 0xc0 BPF_K = 0x0 BPF_LD = 0x0 BPF_LDX = 0x1 @@ -223,20 +280,33 @@ const ( BPF_MINOR_VERSION = 0x1 BPF_MISC = 0x7 BPF_MOD = 0x90 + BPF_MOV = 0xb0 BPF_MSH = 0xa0 BPF_MUL = 0x20 BPF_NEG = 0x80 BPF_NET_OFF = -0x100000 + BPF_NOEXIST = 0x1 + BPF_OBJ_NAME_LEN = 0x10 BPF_OR = 0x40 + BPF_PSEUDO_CALL = 0x1 + BPF_PSEUDO_MAP_FD = 0x1 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 + BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 BPF_SUB = 0x10 + BPF_TAG_SIZE = 0x8 BPF_TAX = 0x0 + BPF_TO_BE = 0x8 + BPF_TO_LE = 0x0 BPF_TXA = 0x80 BPF_W = 0x0 BPF_X = 0x8 + BPF_XADD = 0xc0 BPF_XOR = 0xa0 BRKINT = 0x2 BS0 = 0x0 @@ -320,6 +390,10 @@ const ( CRDLY = 0x600 CREAD = 0x80 CRTSCTS = 0x80000000 + CRYPTO_MAX_NAME = 0x40 + CRYPTO_MSG_MAX = 0x15 + CRYPTO_NR_MSGTYPES = 0x6 + CRYPTO_REPORT_MAXSIZE = 0x160 CS5 = 0x0 CS6 = 0x10 CS7 = 0x20 @@ -497,6 +571,7 @@ const ( FAN_ALL_MARK_FLAGS = 0xff FAN_ALL_OUTGOING_EVENTS = 0x3403b FAN_ALL_PERM_EVENTS = 0x30000 + FAN_ATTRIB = 0x4 FAN_AUDIT = 0x10 FAN_CLASS_CONTENT = 0x4 FAN_CLASS_NOTIF = 0x0 @@ -505,8 +580,12 @@ const ( FAN_CLOSE = 0x18 FAN_CLOSE_NOWRITE = 0x10 FAN_CLOSE_WRITE = 0x8 + FAN_CREATE = 0x100 + FAN_DELETE = 0x200 + FAN_DELETE_SELF = 0x400 FAN_DENY = 0x2 FAN_ENABLE_AUDIT = 0x40 + FAN_EVENT_INFO_TYPE_FID = 0x1 FAN_EVENT_METADATA_LEN = 0x18 FAN_EVENT_ON_CHILD = 0x8000000 FAN_MARK_ADD = 0x1 @@ -520,6 +599,10 @@ const ( FAN_MARK_ONLYDIR = 0x8 FAN_MARK_REMOVE = 0x2 FAN_MODIFY = 0x2 + FAN_MOVE = 0xc0 + FAN_MOVED_FROM = 0x40 + FAN_MOVED_TO = 0x80 + FAN_MOVE_SELF = 0x800 FAN_NOFD = -0x1 FAN_NONBLOCK = 0x2 FAN_ONDIR = 0x40000000 @@ -528,6 +611,7 @@ const ( FAN_OPEN_EXEC_PERM = 0x40000 FAN_OPEN_PERM = 0x10000 FAN_Q_OVERFLOW = 0x4000 + FAN_REPORT_FID = 0x200 FAN_REPORT_TID = 0x100 FAN_UNLIMITED_MARKS = 0x20 FAN_UNLIMITED_QUEUE = 0x10 @@ -1052,6 +1136,15 @@ const ( MAP_STACK = 0x20000 MAP_SYNC = 0x80000 MAP_TYPE = 0xf + MCAST_BLOCK_SOURCE = 0x2b + MCAST_EXCLUDE = 0x0 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x2a + MCAST_JOIN_SOURCE_GROUP = 0x2e + MCAST_LEAVE_GROUP = 0x2d + MCAST_LEAVE_SOURCE_GROUP = 0x2f + MCAST_MSFILTER = 0x30 + MCAST_UNBLOCK_SOURCE = 0x2c MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 @@ -1487,6 +1580,7 @@ const ( PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 PR_SPEC_DISABLE = 0x4 + PR_SPEC_DISABLE_NOEXEC = 0x10 PR_SPEC_ENABLE = 0x2 PR_SPEC_FORCE_DISABLE = 0x8 PR_SPEC_INDIRECT_BRANCH = 0x1 @@ -1958,6 +2052,7 @@ const ( SO_ATTACH_REUSEPORT_CBPF = 0x33 SO_ATTACH_REUSEPORT_EBPF = 0x34 SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe @@ -2006,6 +2101,8 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVTIMEO = 0x14 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x14 SO_REUSEADDR = 0x2 SO_REUSEPORT = 0xf SO_RXQ_OVFL = 0x28 @@ -2017,9 +2114,17 @@ const ( SO_SNDBUFFORCE = 0x20 SO_SNDLOWAT = 0x13 SO_SNDTIMEO = 0x15 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x15 SO_TIMESTAMP = 0x1d SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TIMESTAMP_OLD = 0x1d SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 @@ -2112,6 +2217,8 @@ const ( TCOFLUSH = 0x1 TCOOFF = 0x0 TCOON = 0x1 + TCP_BPF_IW = 0x3e9 + TCP_BPF_SNDCWND_CLAMP = 0x3ea TCP_CC_INFO = 0x1a TCP_CM_INQ = 0x24 TCP_CONGESTION = 0xd @@ -2313,8 +2420,10 @@ const ( UBI_IOCMKVOL = 0x40986f00 UBI_IOCRMVOL = 0x40046f01 UBI_IOCRNVOL = 0x51106f03 + UBI_IOCRPEB = 0x40046f04 UBI_IOCRSVOL = 0x400c6f02 UBI_IOCSETVOLPROP = 0x40104f06 + UBI_IOCSPEB = 0x40046f05 UBI_IOCVOLCRBLK = 0x40804f07 UBI_IOCVOLRMBLK = 0x4f08 UBI_IOCVOLUP = 0x40084f00 @@ -2462,6 +2571,7 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 XDP_RX_RING = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index a75dfebcc48cc..97ed569a2ac0b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -197,10 +197,59 @@ const ( BPF_ABS = 0x20 BPF_ADD = 0x0 BPF_ALU = 0x4 + BPF_ALU64 = 0x7 BPF_AND = 0x50 + BPF_ANY = 0x0 + BPF_ARSH = 0xc0 BPF_B = 0x10 + BPF_BUILD_ID_SIZE = 0x14 + BPF_CALL = 0x80 + BPF_DEVCG_ACC_MKNOD = 0x1 + BPF_DEVCG_ACC_READ = 0x2 + BPF_DEVCG_ACC_WRITE = 0x4 + BPF_DEVCG_DEV_BLOCK = 0x1 + BPF_DEVCG_DEV_CHAR = 0x2 BPF_DIV = 0x30 + BPF_DW = 0x18 + BPF_END = 0xd0 + BPF_EXIST = 0x2 + BPF_EXIT = 0x90 + BPF_FROM_BE = 0x8 + BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ALLOW_MULTI = 0x2 + BPF_F_ALLOW_OVERRIDE = 0x1 + BPF_F_ANY_ALIGNMENT = 0x2 + BPF_F_CTXLEN_MASK = 0xfffff00000000 + BPF_F_CURRENT_CPU = 0xffffffff + BPF_F_CURRENT_NETNS = -0x1 + BPF_F_DONT_FRAGMENT = 0x4 + BPF_F_FAST_STACK_CMP = 0x200 + BPF_F_HDR_FIELD_MASK = 0xf + BPF_F_INDEX_MASK = 0xffffffff + BPF_F_INGRESS = 0x1 + BPF_F_INVALIDATE_HASH = 0x2 + BPF_F_LOCK = 0x4 + BPF_F_MARK_ENFORCE = 0x40 + BPF_F_MARK_MANGLED_0 = 0x20 + BPF_F_NO_COMMON_LRU = 0x2 + BPF_F_NO_PREALLOC = 0x1 + BPF_F_NUMA_NODE = 0x4 + BPF_F_PSEUDO_HDR = 0x10 + BPF_F_QUERY_EFFECTIVE = 0x1 + BPF_F_RDONLY = 0x8 + BPF_F_RECOMPUTE_CSUM = 0x1 + BPF_F_REUSE_STACKID = 0x400 + BPF_F_SEQ_NUMBER = 0x8 + BPF_F_SKIP_FIELD_MASK = 0xff + BPF_F_STACK_BUILD_ID = 0x20 + BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_TUNINFO_IPV6 = 0x1 + BPF_F_USER_BUILD_ID = 0x800 + BPF_F_USER_STACK = 0x100 + BPF_F_WRONLY = 0x10 + BPF_F_ZERO_CSUM_TX = 0x2 + BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -208,8 +257,16 @@ const ( BPF_JEQ = 0x10 BPF_JGE = 0x30 BPF_JGT = 0x20 + BPF_JLE = 0xb0 + BPF_JLT = 0xa0 BPF_JMP = 0x5 + BPF_JMP32 = 0x6 + BPF_JNE = 0x50 BPF_JSET = 0x40 + BPF_JSGE = 0x70 + BPF_JSGT = 0x60 + BPF_JSLE = 0xd0 + BPF_JSLT = 0xc0 BPF_K = 0x0 BPF_LD = 0x0 BPF_LDX = 0x1 @@ -223,20 +280,33 @@ const ( BPF_MINOR_VERSION = 0x1 BPF_MISC = 0x7 BPF_MOD = 0x90 + BPF_MOV = 0xb0 BPF_MSH = 0xa0 BPF_MUL = 0x20 BPF_NEG = 0x80 BPF_NET_OFF = -0x100000 + BPF_NOEXIST = 0x1 + BPF_OBJ_NAME_LEN = 0x10 BPF_OR = 0x40 + BPF_PSEUDO_CALL = 0x1 + BPF_PSEUDO_MAP_FD = 0x1 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 + BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 BPF_SUB = 0x10 + BPF_TAG_SIZE = 0x8 BPF_TAX = 0x0 + BPF_TO_BE = 0x8 + BPF_TO_LE = 0x0 BPF_TXA = 0x80 BPF_W = 0x0 BPF_X = 0x8 + BPF_XADD = 0xc0 BPF_XOR = 0xa0 BRKINT = 0x2 BS0 = 0x0 @@ -320,6 +390,10 @@ const ( CRDLY = 0x600 CREAD = 0x80 CRTSCTS = 0x80000000 + CRYPTO_MAX_NAME = 0x40 + CRYPTO_MSG_MAX = 0x15 + CRYPTO_NR_MSGTYPES = 0x6 + CRYPTO_REPORT_MAXSIZE = 0x160 CS5 = 0x0 CS6 = 0x10 CS7 = 0x20 @@ -497,6 +571,7 @@ const ( FAN_ALL_MARK_FLAGS = 0xff FAN_ALL_OUTGOING_EVENTS = 0x3403b FAN_ALL_PERM_EVENTS = 0x30000 + FAN_ATTRIB = 0x4 FAN_AUDIT = 0x10 FAN_CLASS_CONTENT = 0x4 FAN_CLASS_NOTIF = 0x0 @@ -505,8 +580,12 @@ const ( FAN_CLOSE = 0x18 FAN_CLOSE_NOWRITE = 0x10 FAN_CLOSE_WRITE = 0x8 + FAN_CREATE = 0x100 + FAN_DELETE = 0x200 + FAN_DELETE_SELF = 0x400 FAN_DENY = 0x2 FAN_ENABLE_AUDIT = 0x40 + FAN_EVENT_INFO_TYPE_FID = 0x1 FAN_EVENT_METADATA_LEN = 0x18 FAN_EVENT_ON_CHILD = 0x8000000 FAN_MARK_ADD = 0x1 @@ -520,6 +599,10 @@ const ( FAN_MARK_ONLYDIR = 0x8 FAN_MARK_REMOVE = 0x2 FAN_MODIFY = 0x2 + FAN_MOVE = 0xc0 + FAN_MOVED_FROM = 0x40 + FAN_MOVED_TO = 0x80 + FAN_MOVE_SELF = 0x800 FAN_NOFD = -0x1 FAN_NONBLOCK = 0x2 FAN_ONDIR = 0x40000000 @@ -528,6 +611,7 @@ const ( FAN_OPEN_EXEC_PERM = 0x40000 FAN_OPEN_PERM = 0x10000 FAN_Q_OVERFLOW = 0x4000 + FAN_REPORT_FID = 0x200 FAN_REPORT_TID = 0x100 FAN_UNLIMITED_MARKS = 0x20 FAN_UNLIMITED_QUEUE = 0x10 @@ -1050,6 +1134,15 @@ const ( MAP_STACK = 0x20000 MAP_SYNC = 0x80000 MAP_TYPE = 0xf + MCAST_BLOCK_SOURCE = 0x2b + MCAST_EXCLUDE = 0x0 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x2a + MCAST_JOIN_SOURCE_GROUP = 0x2e + MCAST_LEAVE_GROUP = 0x2d + MCAST_LEAVE_SOURCE_GROUP = 0x2f + MCAST_MSFILTER = 0x30 + MCAST_UNBLOCK_SOURCE = 0x2c MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 @@ -1485,6 +1578,7 @@ const ( PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 PR_SPEC_DISABLE = 0x4 + PR_SPEC_DISABLE_NOEXEC = 0x10 PR_SPEC_ENABLE = 0x2 PR_SPEC_FORCE_DISABLE = 0x8 PR_SPEC_INDIRECT_BRANCH = 0x1 @@ -1964,6 +2058,7 @@ const ( SO_ATTACH_REUSEPORT_CBPF = 0x33 SO_ATTACH_REUSEPORT_EBPF = 0x34 SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe @@ -2012,6 +2107,8 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVTIMEO = 0x14 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x14 SO_REUSEADDR = 0x2 SO_REUSEPORT = 0xf SO_RXQ_OVFL = 0x28 @@ -2023,9 +2120,17 @@ const ( SO_SNDBUFFORCE = 0x20 SO_SNDLOWAT = 0x13 SO_SNDTIMEO = 0x15 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x15 SO_TIMESTAMP = 0x1d SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TIMESTAMP_OLD = 0x1d SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 @@ -2118,6 +2223,8 @@ const ( TCOFLUSH = 0x1 TCOOFF = 0x0 TCOON = 0x1 + TCP_BPF_IW = 0x3e9 + TCP_BPF_SNDCWND_CLAMP = 0x3ea TCP_CC_INFO = 0x1a TCP_CM_INQ = 0x24 TCP_CONGESTION = 0xd @@ -2319,8 +2426,10 @@ const ( UBI_IOCMKVOL = 0x40986f00 UBI_IOCRMVOL = 0x40046f01 UBI_IOCRNVOL = 0x51106f03 + UBI_IOCRPEB = 0x40046f04 UBI_IOCRSVOL = 0x400c6f02 UBI_IOCSETVOLPROP = 0x40104f06 + UBI_IOCSPEB = 0x40046f05 UBI_IOCVOLCRBLK = 0x40804f07 UBI_IOCVOLRMBLK = 0x4f08 UBI_IOCVOLUP = 0x40084f00 @@ -2468,6 +2577,7 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 XDP_RX_RING = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 393ad7c91b28f..d47f3ba6a188e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -197,10 +197,59 @@ const ( BPF_ABS = 0x20 BPF_ADD = 0x0 BPF_ALU = 0x4 + BPF_ALU64 = 0x7 BPF_AND = 0x50 + BPF_ANY = 0x0 + BPF_ARSH = 0xc0 BPF_B = 0x10 + BPF_BUILD_ID_SIZE = 0x14 + BPF_CALL = 0x80 + BPF_DEVCG_ACC_MKNOD = 0x1 + BPF_DEVCG_ACC_READ = 0x2 + BPF_DEVCG_ACC_WRITE = 0x4 + BPF_DEVCG_DEV_BLOCK = 0x1 + BPF_DEVCG_DEV_CHAR = 0x2 BPF_DIV = 0x30 + BPF_DW = 0x18 + BPF_END = 0xd0 + BPF_EXIST = 0x2 + BPF_EXIT = 0x90 + BPF_FROM_BE = 0x8 + BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ALLOW_MULTI = 0x2 + BPF_F_ALLOW_OVERRIDE = 0x1 + BPF_F_ANY_ALIGNMENT = 0x2 + BPF_F_CTXLEN_MASK = 0xfffff00000000 + BPF_F_CURRENT_CPU = 0xffffffff + BPF_F_CURRENT_NETNS = -0x1 + BPF_F_DONT_FRAGMENT = 0x4 + BPF_F_FAST_STACK_CMP = 0x200 + BPF_F_HDR_FIELD_MASK = 0xf + BPF_F_INDEX_MASK = 0xffffffff + BPF_F_INGRESS = 0x1 + BPF_F_INVALIDATE_HASH = 0x2 + BPF_F_LOCK = 0x4 + BPF_F_MARK_ENFORCE = 0x40 + BPF_F_MARK_MANGLED_0 = 0x20 + BPF_F_NO_COMMON_LRU = 0x2 + BPF_F_NO_PREALLOC = 0x1 + BPF_F_NUMA_NODE = 0x4 + BPF_F_PSEUDO_HDR = 0x10 + BPF_F_QUERY_EFFECTIVE = 0x1 + BPF_F_RDONLY = 0x8 + BPF_F_RECOMPUTE_CSUM = 0x1 + BPF_F_REUSE_STACKID = 0x400 + BPF_F_SEQ_NUMBER = 0x8 + BPF_F_SKIP_FIELD_MASK = 0xff + BPF_F_STACK_BUILD_ID = 0x20 + BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_TUNINFO_IPV6 = 0x1 + BPF_F_USER_BUILD_ID = 0x800 + BPF_F_USER_STACK = 0x100 + BPF_F_WRONLY = 0x10 + BPF_F_ZERO_CSUM_TX = 0x2 + BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -208,8 +257,16 @@ const ( BPF_JEQ = 0x10 BPF_JGE = 0x30 BPF_JGT = 0x20 + BPF_JLE = 0xb0 + BPF_JLT = 0xa0 BPF_JMP = 0x5 + BPF_JMP32 = 0x6 + BPF_JNE = 0x50 BPF_JSET = 0x40 + BPF_JSGE = 0x70 + BPF_JSGT = 0x60 + BPF_JSLE = 0xd0 + BPF_JSLT = 0xc0 BPF_K = 0x0 BPF_LD = 0x0 BPF_LDX = 0x1 @@ -223,20 +280,33 @@ const ( BPF_MINOR_VERSION = 0x1 BPF_MISC = 0x7 BPF_MOD = 0x90 + BPF_MOV = 0xb0 BPF_MSH = 0xa0 BPF_MUL = 0x20 BPF_NEG = 0x80 BPF_NET_OFF = -0x100000 + BPF_NOEXIST = 0x1 + BPF_OBJ_NAME_LEN = 0x10 BPF_OR = 0x40 + BPF_PSEUDO_CALL = 0x1 + BPF_PSEUDO_MAP_FD = 0x1 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 + BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 BPF_SUB = 0x10 + BPF_TAG_SIZE = 0x8 BPF_TAX = 0x0 + BPF_TO_BE = 0x8 + BPF_TO_LE = 0x0 BPF_TXA = 0x80 BPF_W = 0x0 BPF_X = 0x8 + BPF_XADD = 0xc0 BPF_XOR = 0xa0 BRKINT = 0x2 BS0 = 0x0 @@ -320,6 +390,10 @@ const ( CRDLY = 0x600 CREAD = 0x80 CRTSCTS = 0x80000000 + CRYPTO_MAX_NAME = 0x40 + CRYPTO_MSG_MAX = 0x15 + CRYPTO_NR_MSGTYPES = 0x6 + CRYPTO_REPORT_MAXSIZE = 0x160 CS5 = 0x0 CS6 = 0x10 CS7 = 0x20 @@ -499,6 +573,7 @@ const ( FAN_ALL_MARK_FLAGS = 0xff FAN_ALL_OUTGOING_EVENTS = 0x3403b FAN_ALL_PERM_EVENTS = 0x30000 + FAN_ATTRIB = 0x4 FAN_AUDIT = 0x10 FAN_CLASS_CONTENT = 0x4 FAN_CLASS_NOTIF = 0x0 @@ -507,8 +582,12 @@ const ( FAN_CLOSE = 0x18 FAN_CLOSE_NOWRITE = 0x10 FAN_CLOSE_WRITE = 0x8 + FAN_CREATE = 0x100 + FAN_DELETE = 0x200 + FAN_DELETE_SELF = 0x400 FAN_DENY = 0x2 FAN_ENABLE_AUDIT = 0x40 + FAN_EVENT_INFO_TYPE_FID = 0x1 FAN_EVENT_METADATA_LEN = 0x18 FAN_EVENT_ON_CHILD = 0x8000000 FAN_MARK_ADD = 0x1 @@ -522,6 +601,10 @@ const ( FAN_MARK_ONLYDIR = 0x8 FAN_MARK_REMOVE = 0x2 FAN_MODIFY = 0x2 + FAN_MOVE = 0xc0 + FAN_MOVED_FROM = 0x40 + FAN_MOVED_TO = 0x80 + FAN_MOVE_SELF = 0x800 FAN_NOFD = -0x1 FAN_NONBLOCK = 0x2 FAN_ONDIR = 0x40000000 @@ -530,6 +613,7 @@ const ( FAN_OPEN_EXEC_PERM = 0x40000 FAN_OPEN_PERM = 0x10000 FAN_Q_OVERFLOW = 0x4000 + FAN_REPORT_FID = 0x200 FAN_REPORT_TID = 0x100 FAN_UNLIMITED_MARKS = 0x20 FAN_UNLIMITED_QUEUE = 0x10 @@ -1053,6 +1137,15 @@ const ( MAP_STACK = 0x20000 MAP_SYNC = 0x80000 MAP_TYPE = 0xf + MCAST_BLOCK_SOURCE = 0x2b + MCAST_EXCLUDE = 0x0 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x2a + MCAST_JOIN_SOURCE_GROUP = 0x2e + MCAST_LEAVE_GROUP = 0x2d + MCAST_LEAVE_SOURCE_GROUP = 0x2f + MCAST_MSFILTER = 0x30 + MCAST_UNBLOCK_SOURCE = 0x2c MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 @@ -1488,6 +1581,7 @@ const ( PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 PR_SPEC_DISABLE = 0x4 + PR_SPEC_DISABLE_NOEXEC = 0x10 PR_SPEC_ENABLE = 0x2 PR_SPEC_FORCE_DISABLE = 0x8 PR_SPEC_INDIRECT_BRANCH = 0x1 @@ -1948,6 +2042,7 @@ const ( SO_ATTACH_REUSEPORT_CBPF = 0x33 SO_ATTACH_REUSEPORT_EBPF = 0x34 SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe @@ -1996,6 +2091,8 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVTIMEO = 0x14 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x14 SO_REUSEADDR = 0x2 SO_REUSEPORT = 0xf SO_RXQ_OVFL = 0x28 @@ -2007,9 +2104,17 @@ const ( SO_SNDBUFFORCE = 0x20 SO_SNDLOWAT = 0x13 SO_SNDTIMEO = 0x15 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x15 SO_TIMESTAMP = 0x1d SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TIMESTAMP_OLD = 0x1d SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 @@ -2103,6 +2208,8 @@ const ( TCOFLUSH = 0x1 TCOOFF = 0x0 TCOON = 0x1 + TCP_BPF_IW = 0x3e9 + TCP_BPF_SNDCWND_CLAMP = 0x3ea TCP_CC_INFO = 0x1a TCP_CM_INQ = 0x24 TCP_CONGESTION = 0xd @@ -2304,8 +2411,10 @@ const ( UBI_IOCMKVOL = 0x40986f00 UBI_IOCRMVOL = 0x40046f01 UBI_IOCRNVOL = 0x51106f03 + UBI_IOCRPEB = 0x40046f04 UBI_IOCRSVOL = 0x400c6f02 UBI_IOCSETVOLPROP = 0x40104f06 + UBI_IOCSPEB = 0x40046f05 UBI_IOCVOLCRBLK = 0x40804f07 UBI_IOCVOLRMBLK = 0x4f08 UBI_IOCVOLUP = 0x40084f00 @@ -2453,6 +2562,7 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 XDP_RX_RING = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index ba1beb90935e0..0ae030ee4e072 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -197,10 +197,59 @@ const ( BPF_ABS = 0x20 BPF_ADD = 0x0 BPF_ALU = 0x4 + BPF_ALU64 = 0x7 BPF_AND = 0x50 + BPF_ANY = 0x0 + BPF_ARSH = 0xc0 BPF_B = 0x10 + BPF_BUILD_ID_SIZE = 0x14 + BPF_CALL = 0x80 + BPF_DEVCG_ACC_MKNOD = 0x1 + BPF_DEVCG_ACC_READ = 0x2 + BPF_DEVCG_ACC_WRITE = 0x4 + BPF_DEVCG_DEV_BLOCK = 0x1 + BPF_DEVCG_DEV_CHAR = 0x2 BPF_DIV = 0x30 + BPF_DW = 0x18 + BPF_END = 0xd0 + BPF_EXIST = 0x2 + BPF_EXIT = 0x90 + BPF_FROM_BE = 0x8 + BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ALLOW_MULTI = 0x2 + BPF_F_ALLOW_OVERRIDE = 0x1 + BPF_F_ANY_ALIGNMENT = 0x2 + BPF_F_CTXLEN_MASK = 0xfffff00000000 + BPF_F_CURRENT_CPU = 0xffffffff + BPF_F_CURRENT_NETNS = -0x1 + BPF_F_DONT_FRAGMENT = 0x4 + BPF_F_FAST_STACK_CMP = 0x200 + BPF_F_HDR_FIELD_MASK = 0xf + BPF_F_INDEX_MASK = 0xffffffff + BPF_F_INGRESS = 0x1 + BPF_F_INVALIDATE_HASH = 0x2 + BPF_F_LOCK = 0x4 + BPF_F_MARK_ENFORCE = 0x40 + BPF_F_MARK_MANGLED_0 = 0x20 + BPF_F_NO_COMMON_LRU = 0x2 + BPF_F_NO_PREALLOC = 0x1 + BPF_F_NUMA_NODE = 0x4 + BPF_F_PSEUDO_HDR = 0x10 + BPF_F_QUERY_EFFECTIVE = 0x1 + BPF_F_RDONLY = 0x8 + BPF_F_RECOMPUTE_CSUM = 0x1 + BPF_F_REUSE_STACKID = 0x400 + BPF_F_SEQ_NUMBER = 0x8 + BPF_F_SKIP_FIELD_MASK = 0xff + BPF_F_STACK_BUILD_ID = 0x20 + BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_TUNINFO_IPV6 = 0x1 + BPF_F_USER_BUILD_ID = 0x800 + BPF_F_USER_STACK = 0x100 + BPF_F_WRONLY = 0x10 + BPF_F_ZERO_CSUM_TX = 0x2 + BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -208,8 +257,16 @@ const ( BPF_JEQ = 0x10 BPF_JGE = 0x30 BPF_JGT = 0x20 + BPF_JLE = 0xb0 + BPF_JLT = 0xa0 BPF_JMP = 0x5 + BPF_JMP32 = 0x6 + BPF_JNE = 0x50 BPF_JSET = 0x40 + BPF_JSGE = 0x70 + BPF_JSGT = 0x60 + BPF_JSLE = 0xd0 + BPF_JSLT = 0xc0 BPF_K = 0x0 BPF_LD = 0x0 BPF_LDX = 0x1 @@ -223,20 +280,33 @@ const ( BPF_MINOR_VERSION = 0x1 BPF_MISC = 0x7 BPF_MOD = 0x90 + BPF_MOV = 0xb0 BPF_MSH = 0xa0 BPF_MUL = 0x20 BPF_NEG = 0x80 BPF_NET_OFF = -0x100000 + BPF_NOEXIST = 0x1 + BPF_OBJ_NAME_LEN = 0x10 BPF_OR = 0x40 + BPF_PSEUDO_CALL = 0x1 + BPF_PSEUDO_MAP_FD = 0x1 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 + BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 BPF_SUB = 0x10 + BPF_TAG_SIZE = 0x8 BPF_TAX = 0x0 + BPF_TO_BE = 0x8 + BPF_TO_LE = 0x0 BPF_TXA = 0x80 BPF_W = 0x0 BPF_X = 0x8 + BPF_XADD = 0xc0 BPF_XOR = 0xa0 BRKINT = 0x2 BS0 = 0x0 @@ -320,6 +390,10 @@ const ( CRDLY = 0x600 CREAD = 0x80 CRTSCTS = 0x80000000 + CRYPTO_MAX_NAME = 0x40 + CRYPTO_MSG_MAX = 0x15 + CRYPTO_NR_MSGTYPES = 0x6 + CRYPTO_REPORT_MAXSIZE = 0x160 CS5 = 0x0 CS6 = 0x10 CS7 = 0x20 @@ -497,6 +571,7 @@ const ( FAN_ALL_MARK_FLAGS = 0xff FAN_ALL_OUTGOING_EVENTS = 0x3403b FAN_ALL_PERM_EVENTS = 0x30000 + FAN_ATTRIB = 0x4 FAN_AUDIT = 0x10 FAN_CLASS_CONTENT = 0x4 FAN_CLASS_NOTIF = 0x0 @@ -505,8 +580,12 @@ const ( FAN_CLOSE = 0x18 FAN_CLOSE_NOWRITE = 0x10 FAN_CLOSE_WRITE = 0x8 + FAN_CREATE = 0x100 + FAN_DELETE = 0x200 + FAN_DELETE_SELF = 0x400 FAN_DENY = 0x2 FAN_ENABLE_AUDIT = 0x40 + FAN_EVENT_INFO_TYPE_FID = 0x1 FAN_EVENT_METADATA_LEN = 0x18 FAN_EVENT_ON_CHILD = 0x8000000 FAN_MARK_ADD = 0x1 @@ -520,6 +599,10 @@ const ( FAN_MARK_ONLYDIR = 0x8 FAN_MARK_REMOVE = 0x2 FAN_MODIFY = 0x2 + FAN_MOVE = 0xc0 + FAN_MOVED_FROM = 0x40 + FAN_MOVED_TO = 0x80 + FAN_MOVE_SELF = 0x800 FAN_NOFD = -0x1 FAN_NONBLOCK = 0x2 FAN_ONDIR = 0x40000000 @@ -528,6 +611,7 @@ const ( FAN_OPEN_EXEC_PERM = 0x40000 FAN_OPEN_PERM = 0x10000 FAN_Q_OVERFLOW = 0x4000 + FAN_REPORT_FID = 0x200 FAN_REPORT_TID = 0x100 FAN_UNLIMITED_MARKS = 0x20 FAN_UNLIMITED_QUEUE = 0x10 @@ -1050,6 +1134,15 @@ const ( MAP_SHARED_VALIDATE = 0x3 MAP_STACK = 0x40000 MAP_TYPE = 0xf + MCAST_BLOCK_SOURCE = 0x2b + MCAST_EXCLUDE = 0x0 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x2a + MCAST_JOIN_SOURCE_GROUP = 0x2e + MCAST_LEAVE_GROUP = 0x2d + MCAST_LEAVE_SOURCE_GROUP = 0x2f + MCAST_MSFILTER = 0x30 + MCAST_UNBLOCK_SOURCE = 0x2c MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 @@ -1485,6 +1578,7 @@ const ( PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 PR_SPEC_DISABLE = 0x4 + PR_SPEC_DISABLE_NOEXEC = 0x10 PR_SPEC_ENABLE = 0x2 PR_SPEC_FORCE_DISABLE = 0x8 PR_SPEC_INDIRECT_BRANCH = 0x1 @@ -1957,6 +2051,7 @@ const ( SO_ATTACH_REUSEPORT_CBPF = 0x33 SO_ATTACH_REUSEPORT_EBPF = 0x34 SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x20 SO_BSDCOMPAT = 0xe @@ -2005,6 +2100,8 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 SO_RCVTIMEO = 0x1006 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x1006 SO_REUSEADDR = 0x4 SO_REUSEPORT = 0x200 SO_RXQ_OVFL = 0x28 @@ -2016,10 +2113,18 @@ const ( SO_SNDBUFFORCE = 0x1f SO_SNDLOWAT = 0x1003 SO_SNDTIMEO = 0x1005 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x1005 SO_STYLE = 0x1008 SO_TIMESTAMP = 0x1d SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TIMESTAMP_OLD = 0x1d SO_TXTIME = 0x3d SO_TYPE = 0x1008 SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 @@ -2111,6 +2216,8 @@ const ( TCOFLUSH = 0x1 TCOOFF = 0x0 TCOON = 0x1 + TCP_BPF_IW = 0x3e9 + TCP_BPF_SNDCWND_CLAMP = 0x3ea TCP_CC_INFO = 0x1a TCP_CM_INQ = 0x24 TCP_CONGESTION = 0xd @@ -2314,8 +2421,10 @@ const ( UBI_IOCMKVOL = 0x80986f00 UBI_IOCRMVOL = 0x80046f01 UBI_IOCRNVOL = 0x91106f03 + UBI_IOCRPEB = 0x80046f04 UBI_IOCRSVOL = 0x800c6f02 UBI_IOCSETVOLPROP = 0x80104f06 + UBI_IOCSPEB = 0x80046f05 UBI_IOCVOLCRBLK = 0x80804f07 UBI_IOCVOLRMBLK = 0x20004f08 UBI_IOCVOLUP = 0x80084f00 @@ -2464,6 +2573,7 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 XDP_RX_RING = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index efba3e5c9df07..91b49dddbed75 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -197,10 +197,59 @@ const ( BPF_ABS = 0x20 BPF_ADD = 0x0 BPF_ALU = 0x4 + BPF_ALU64 = 0x7 BPF_AND = 0x50 + BPF_ANY = 0x0 + BPF_ARSH = 0xc0 BPF_B = 0x10 + BPF_BUILD_ID_SIZE = 0x14 + BPF_CALL = 0x80 + BPF_DEVCG_ACC_MKNOD = 0x1 + BPF_DEVCG_ACC_READ = 0x2 + BPF_DEVCG_ACC_WRITE = 0x4 + BPF_DEVCG_DEV_BLOCK = 0x1 + BPF_DEVCG_DEV_CHAR = 0x2 BPF_DIV = 0x30 + BPF_DW = 0x18 + BPF_END = 0xd0 + BPF_EXIST = 0x2 + BPF_EXIT = 0x90 + BPF_FROM_BE = 0x8 + BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ALLOW_MULTI = 0x2 + BPF_F_ALLOW_OVERRIDE = 0x1 + BPF_F_ANY_ALIGNMENT = 0x2 + BPF_F_CTXLEN_MASK = 0xfffff00000000 + BPF_F_CURRENT_CPU = 0xffffffff + BPF_F_CURRENT_NETNS = -0x1 + BPF_F_DONT_FRAGMENT = 0x4 + BPF_F_FAST_STACK_CMP = 0x200 + BPF_F_HDR_FIELD_MASK = 0xf + BPF_F_INDEX_MASK = 0xffffffff + BPF_F_INGRESS = 0x1 + BPF_F_INVALIDATE_HASH = 0x2 + BPF_F_LOCK = 0x4 + BPF_F_MARK_ENFORCE = 0x40 + BPF_F_MARK_MANGLED_0 = 0x20 + BPF_F_NO_COMMON_LRU = 0x2 + BPF_F_NO_PREALLOC = 0x1 + BPF_F_NUMA_NODE = 0x4 + BPF_F_PSEUDO_HDR = 0x10 + BPF_F_QUERY_EFFECTIVE = 0x1 + BPF_F_RDONLY = 0x8 + BPF_F_RECOMPUTE_CSUM = 0x1 + BPF_F_REUSE_STACKID = 0x400 + BPF_F_SEQ_NUMBER = 0x8 + BPF_F_SKIP_FIELD_MASK = 0xff + BPF_F_STACK_BUILD_ID = 0x20 + BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_TUNINFO_IPV6 = 0x1 + BPF_F_USER_BUILD_ID = 0x800 + BPF_F_USER_STACK = 0x100 + BPF_F_WRONLY = 0x10 + BPF_F_ZERO_CSUM_TX = 0x2 + BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -208,8 +257,16 @@ const ( BPF_JEQ = 0x10 BPF_JGE = 0x30 BPF_JGT = 0x20 + BPF_JLE = 0xb0 + BPF_JLT = 0xa0 BPF_JMP = 0x5 + BPF_JMP32 = 0x6 + BPF_JNE = 0x50 BPF_JSET = 0x40 + BPF_JSGE = 0x70 + BPF_JSGT = 0x60 + BPF_JSLE = 0xd0 + BPF_JSLT = 0xc0 BPF_K = 0x0 BPF_LD = 0x0 BPF_LDX = 0x1 @@ -223,20 +280,33 @@ const ( BPF_MINOR_VERSION = 0x1 BPF_MISC = 0x7 BPF_MOD = 0x90 + BPF_MOV = 0xb0 BPF_MSH = 0xa0 BPF_MUL = 0x20 BPF_NEG = 0x80 BPF_NET_OFF = -0x100000 + BPF_NOEXIST = 0x1 + BPF_OBJ_NAME_LEN = 0x10 BPF_OR = 0x40 + BPF_PSEUDO_CALL = 0x1 + BPF_PSEUDO_MAP_FD = 0x1 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 + BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 BPF_SUB = 0x10 + BPF_TAG_SIZE = 0x8 BPF_TAX = 0x0 + BPF_TO_BE = 0x8 + BPF_TO_LE = 0x0 BPF_TXA = 0x80 BPF_W = 0x0 BPF_X = 0x8 + BPF_XADD = 0xc0 BPF_XOR = 0xa0 BRKINT = 0x2 BS0 = 0x0 @@ -320,6 +390,10 @@ const ( CRDLY = 0x600 CREAD = 0x80 CRTSCTS = 0x80000000 + CRYPTO_MAX_NAME = 0x40 + CRYPTO_MSG_MAX = 0x15 + CRYPTO_NR_MSGTYPES = 0x6 + CRYPTO_REPORT_MAXSIZE = 0x160 CS5 = 0x0 CS6 = 0x10 CS7 = 0x20 @@ -497,6 +571,7 @@ const ( FAN_ALL_MARK_FLAGS = 0xff FAN_ALL_OUTGOING_EVENTS = 0x3403b FAN_ALL_PERM_EVENTS = 0x30000 + FAN_ATTRIB = 0x4 FAN_AUDIT = 0x10 FAN_CLASS_CONTENT = 0x4 FAN_CLASS_NOTIF = 0x0 @@ -505,8 +580,12 @@ const ( FAN_CLOSE = 0x18 FAN_CLOSE_NOWRITE = 0x10 FAN_CLOSE_WRITE = 0x8 + FAN_CREATE = 0x100 + FAN_DELETE = 0x200 + FAN_DELETE_SELF = 0x400 FAN_DENY = 0x2 FAN_ENABLE_AUDIT = 0x40 + FAN_EVENT_INFO_TYPE_FID = 0x1 FAN_EVENT_METADATA_LEN = 0x18 FAN_EVENT_ON_CHILD = 0x8000000 FAN_MARK_ADD = 0x1 @@ -520,6 +599,10 @@ const ( FAN_MARK_ONLYDIR = 0x8 FAN_MARK_REMOVE = 0x2 FAN_MODIFY = 0x2 + FAN_MOVE = 0xc0 + FAN_MOVED_FROM = 0x40 + FAN_MOVED_TO = 0x80 + FAN_MOVE_SELF = 0x800 FAN_NOFD = -0x1 FAN_NONBLOCK = 0x2 FAN_ONDIR = 0x40000000 @@ -528,6 +611,7 @@ const ( FAN_OPEN_EXEC_PERM = 0x40000 FAN_OPEN_PERM = 0x10000 FAN_Q_OVERFLOW = 0x4000 + FAN_REPORT_FID = 0x200 FAN_REPORT_TID = 0x100 FAN_UNLIMITED_MARKS = 0x20 FAN_UNLIMITED_QUEUE = 0x10 @@ -1050,6 +1134,15 @@ const ( MAP_SHARED_VALIDATE = 0x3 MAP_STACK = 0x40000 MAP_TYPE = 0xf + MCAST_BLOCK_SOURCE = 0x2b + MCAST_EXCLUDE = 0x0 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x2a + MCAST_JOIN_SOURCE_GROUP = 0x2e + MCAST_LEAVE_GROUP = 0x2d + MCAST_LEAVE_SOURCE_GROUP = 0x2f + MCAST_MSFILTER = 0x30 + MCAST_UNBLOCK_SOURCE = 0x2c MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 @@ -1485,6 +1578,7 @@ const ( PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 PR_SPEC_DISABLE = 0x4 + PR_SPEC_DISABLE_NOEXEC = 0x10 PR_SPEC_ENABLE = 0x2 PR_SPEC_FORCE_DISABLE = 0x8 PR_SPEC_INDIRECT_BRANCH = 0x1 @@ -1957,6 +2051,7 @@ const ( SO_ATTACH_REUSEPORT_CBPF = 0x33 SO_ATTACH_REUSEPORT_EBPF = 0x34 SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x20 SO_BSDCOMPAT = 0xe @@ -2005,6 +2100,8 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 SO_RCVTIMEO = 0x1006 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x1006 SO_REUSEADDR = 0x4 SO_REUSEPORT = 0x200 SO_RXQ_OVFL = 0x28 @@ -2016,10 +2113,18 @@ const ( SO_SNDBUFFORCE = 0x1f SO_SNDLOWAT = 0x1003 SO_SNDTIMEO = 0x1005 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x1005 SO_STYLE = 0x1008 SO_TIMESTAMP = 0x1d SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TIMESTAMP_OLD = 0x1d SO_TXTIME = 0x3d SO_TYPE = 0x1008 SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 @@ -2111,6 +2216,8 @@ const ( TCOFLUSH = 0x1 TCOOFF = 0x0 TCOON = 0x1 + TCP_BPF_IW = 0x3e9 + TCP_BPF_SNDCWND_CLAMP = 0x3ea TCP_CC_INFO = 0x1a TCP_CM_INQ = 0x24 TCP_CONGESTION = 0xd @@ -2314,8 +2421,10 @@ const ( UBI_IOCMKVOL = 0x80986f00 UBI_IOCRMVOL = 0x80046f01 UBI_IOCRNVOL = 0x91106f03 + UBI_IOCRPEB = 0x80046f04 UBI_IOCRSVOL = 0x800c6f02 UBI_IOCSETVOLPROP = 0x80104f06 + UBI_IOCSPEB = 0x80046f05 UBI_IOCVOLCRBLK = 0x80804f07 UBI_IOCVOLRMBLK = 0x20004f08 UBI_IOCVOLUP = 0x80084f00 @@ -2464,6 +2573,7 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 XDP_RX_RING = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index d3f6e90652499..7f1ef04eba9d6 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -197,10 +197,59 @@ const ( BPF_ABS = 0x20 BPF_ADD = 0x0 BPF_ALU = 0x4 + BPF_ALU64 = 0x7 BPF_AND = 0x50 + BPF_ANY = 0x0 + BPF_ARSH = 0xc0 BPF_B = 0x10 + BPF_BUILD_ID_SIZE = 0x14 + BPF_CALL = 0x80 + BPF_DEVCG_ACC_MKNOD = 0x1 + BPF_DEVCG_ACC_READ = 0x2 + BPF_DEVCG_ACC_WRITE = 0x4 + BPF_DEVCG_DEV_BLOCK = 0x1 + BPF_DEVCG_DEV_CHAR = 0x2 BPF_DIV = 0x30 + BPF_DW = 0x18 + BPF_END = 0xd0 + BPF_EXIST = 0x2 + BPF_EXIT = 0x90 + BPF_FROM_BE = 0x8 + BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ALLOW_MULTI = 0x2 + BPF_F_ALLOW_OVERRIDE = 0x1 + BPF_F_ANY_ALIGNMENT = 0x2 + BPF_F_CTXLEN_MASK = 0xfffff00000000 + BPF_F_CURRENT_CPU = 0xffffffff + BPF_F_CURRENT_NETNS = -0x1 + BPF_F_DONT_FRAGMENT = 0x4 + BPF_F_FAST_STACK_CMP = 0x200 + BPF_F_HDR_FIELD_MASK = 0xf + BPF_F_INDEX_MASK = 0xffffffff + BPF_F_INGRESS = 0x1 + BPF_F_INVALIDATE_HASH = 0x2 + BPF_F_LOCK = 0x4 + BPF_F_MARK_ENFORCE = 0x40 + BPF_F_MARK_MANGLED_0 = 0x20 + BPF_F_NO_COMMON_LRU = 0x2 + BPF_F_NO_PREALLOC = 0x1 + BPF_F_NUMA_NODE = 0x4 + BPF_F_PSEUDO_HDR = 0x10 + BPF_F_QUERY_EFFECTIVE = 0x1 + BPF_F_RDONLY = 0x8 + BPF_F_RECOMPUTE_CSUM = 0x1 + BPF_F_REUSE_STACKID = 0x400 + BPF_F_SEQ_NUMBER = 0x8 + BPF_F_SKIP_FIELD_MASK = 0xff + BPF_F_STACK_BUILD_ID = 0x20 + BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_TUNINFO_IPV6 = 0x1 + BPF_F_USER_BUILD_ID = 0x800 + BPF_F_USER_STACK = 0x100 + BPF_F_WRONLY = 0x10 + BPF_F_ZERO_CSUM_TX = 0x2 + BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -208,8 +257,16 @@ const ( BPF_JEQ = 0x10 BPF_JGE = 0x30 BPF_JGT = 0x20 + BPF_JLE = 0xb0 + BPF_JLT = 0xa0 BPF_JMP = 0x5 + BPF_JMP32 = 0x6 + BPF_JNE = 0x50 BPF_JSET = 0x40 + BPF_JSGE = 0x70 + BPF_JSGT = 0x60 + BPF_JSLE = 0xd0 + BPF_JSLT = 0xc0 BPF_K = 0x0 BPF_LD = 0x0 BPF_LDX = 0x1 @@ -223,20 +280,33 @@ const ( BPF_MINOR_VERSION = 0x1 BPF_MISC = 0x7 BPF_MOD = 0x90 + BPF_MOV = 0xb0 BPF_MSH = 0xa0 BPF_MUL = 0x20 BPF_NEG = 0x80 BPF_NET_OFF = -0x100000 + BPF_NOEXIST = 0x1 + BPF_OBJ_NAME_LEN = 0x10 BPF_OR = 0x40 + BPF_PSEUDO_CALL = 0x1 + BPF_PSEUDO_MAP_FD = 0x1 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 + BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 BPF_SUB = 0x10 + BPF_TAG_SIZE = 0x8 BPF_TAX = 0x0 + BPF_TO_BE = 0x8 + BPF_TO_LE = 0x0 BPF_TXA = 0x80 BPF_W = 0x0 BPF_X = 0x8 + BPF_XADD = 0xc0 BPF_XOR = 0xa0 BRKINT = 0x2 BS0 = 0x0 @@ -320,6 +390,10 @@ const ( CRDLY = 0x600 CREAD = 0x80 CRTSCTS = 0x80000000 + CRYPTO_MAX_NAME = 0x40 + CRYPTO_MSG_MAX = 0x15 + CRYPTO_NR_MSGTYPES = 0x6 + CRYPTO_REPORT_MAXSIZE = 0x160 CS5 = 0x0 CS6 = 0x10 CS7 = 0x20 @@ -497,6 +571,7 @@ const ( FAN_ALL_MARK_FLAGS = 0xff FAN_ALL_OUTGOING_EVENTS = 0x3403b FAN_ALL_PERM_EVENTS = 0x30000 + FAN_ATTRIB = 0x4 FAN_AUDIT = 0x10 FAN_CLASS_CONTENT = 0x4 FAN_CLASS_NOTIF = 0x0 @@ -505,8 +580,12 @@ const ( FAN_CLOSE = 0x18 FAN_CLOSE_NOWRITE = 0x10 FAN_CLOSE_WRITE = 0x8 + FAN_CREATE = 0x100 + FAN_DELETE = 0x200 + FAN_DELETE_SELF = 0x400 FAN_DENY = 0x2 FAN_ENABLE_AUDIT = 0x40 + FAN_EVENT_INFO_TYPE_FID = 0x1 FAN_EVENT_METADATA_LEN = 0x18 FAN_EVENT_ON_CHILD = 0x8000000 FAN_MARK_ADD = 0x1 @@ -520,6 +599,10 @@ const ( FAN_MARK_ONLYDIR = 0x8 FAN_MARK_REMOVE = 0x2 FAN_MODIFY = 0x2 + FAN_MOVE = 0xc0 + FAN_MOVED_FROM = 0x40 + FAN_MOVED_TO = 0x80 + FAN_MOVE_SELF = 0x800 FAN_NOFD = -0x1 FAN_NONBLOCK = 0x2 FAN_ONDIR = 0x40000000 @@ -528,6 +611,7 @@ const ( FAN_OPEN_EXEC_PERM = 0x40000 FAN_OPEN_PERM = 0x10000 FAN_Q_OVERFLOW = 0x4000 + FAN_REPORT_FID = 0x200 FAN_REPORT_TID = 0x100 FAN_UNLIMITED_MARKS = 0x20 FAN_UNLIMITED_QUEUE = 0x10 @@ -1050,6 +1134,15 @@ const ( MAP_SHARED_VALIDATE = 0x3 MAP_STACK = 0x40000 MAP_TYPE = 0xf + MCAST_BLOCK_SOURCE = 0x2b + MCAST_EXCLUDE = 0x0 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x2a + MCAST_JOIN_SOURCE_GROUP = 0x2e + MCAST_LEAVE_GROUP = 0x2d + MCAST_LEAVE_SOURCE_GROUP = 0x2f + MCAST_MSFILTER = 0x30 + MCAST_UNBLOCK_SOURCE = 0x2c MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 @@ -1485,6 +1578,7 @@ const ( PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 PR_SPEC_DISABLE = 0x4 + PR_SPEC_DISABLE_NOEXEC = 0x10 PR_SPEC_ENABLE = 0x2 PR_SPEC_FORCE_DISABLE = 0x8 PR_SPEC_INDIRECT_BRANCH = 0x1 @@ -1957,6 +2051,7 @@ const ( SO_ATTACH_REUSEPORT_CBPF = 0x33 SO_ATTACH_REUSEPORT_EBPF = 0x34 SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x20 SO_BSDCOMPAT = 0xe @@ -2005,6 +2100,8 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 SO_RCVTIMEO = 0x1006 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x1006 SO_REUSEADDR = 0x4 SO_REUSEPORT = 0x200 SO_RXQ_OVFL = 0x28 @@ -2016,10 +2113,18 @@ const ( SO_SNDBUFFORCE = 0x1f SO_SNDLOWAT = 0x1003 SO_SNDTIMEO = 0x1005 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x1005 SO_STYLE = 0x1008 SO_TIMESTAMP = 0x1d SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TIMESTAMP_OLD = 0x1d SO_TXTIME = 0x3d SO_TYPE = 0x1008 SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 @@ -2111,6 +2216,8 @@ const ( TCOFLUSH = 0x1 TCOOFF = 0x0 TCOON = 0x1 + TCP_BPF_IW = 0x3e9 + TCP_BPF_SNDCWND_CLAMP = 0x3ea TCP_CC_INFO = 0x1a TCP_CM_INQ = 0x24 TCP_CONGESTION = 0xd @@ -2314,8 +2421,10 @@ const ( UBI_IOCMKVOL = 0x80986f00 UBI_IOCRMVOL = 0x80046f01 UBI_IOCRNVOL = 0x91106f03 + UBI_IOCRPEB = 0x80046f04 UBI_IOCRSVOL = 0x800c6f02 UBI_IOCSETVOLPROP = 0x80104f06 + UBI_IOCSPEB = 0x80046f05 UBI_IOCVOLCRBLK = 0x80804f07 UBI_IOCVOLRMBLK = 0x20004f08 UBI_IOCVOLUP = 0x80084f00 @@ -2464,6 +2573,7 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 XDP_RX_RING = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 7275cd876b317..724a244fd3600 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -197,10 +197,59 @@ const ( BPF_ABS = 0x20 BPF_ADD = 0x0 BPF_ALU = 0x4 + BPF_ALU64 = 0x7 BPF_AND = 0x50 + BPF_ANY = 0x0 + BPF_ARSH = 0xc0 BPF_B = 0x10 + BPF_BUILD_ID_SIZE = 0x14 + BPF_CALL = 0x80 + BPF_DEVCG_ACC_MKNOD = 0x1 + BPF_DEVCG_ACC_READ = 0x2 + BPF_DEVCG_ACC_WRITE = 0x4 + BPF_DEVCG_DEV_BLOCK = 0x1 + BPF_DEVCG_DEV_CHAR = 0x2 BPF_DIV = 0x30 + BPF_DW = 0x18 + BPF_END = 0xd0 + BPF_EXIST = 0x2 + BPF_EXIT = 0x90 + BPF_FROM_BE = 0x8 + BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ALLOW_MULTI = 0x2 + BPF_F_ALLOW_OVERRIDE = 0x1 + BPF_F_ANY_ALIGNMENT = 0x2 + BPF_F_CTXLEN_MASK = 0xfffff00000000 + BPF_F_CURRENT_CPU = 0xffffffff + BPF_F_CURRENT_NETNS = -0x1 + BPF_F_DONT_FRAGMENT = 0x4 + BPF_F_FAST_STACK_CMP = 0x200 + BPF_F_HDR_FIELD_MASK = 0xf + BPF_F_INDEX_MASK = 0xffffffff + BPF_F_INGRESS = 0x1 + BPF_F_INVALIDATE_HASH = 0x2 + BPF_F_LOCK = 0x4 + BPF_F_MARK_ENFORCE = 0x40 + BPF_F_MARK_MANGLED_0 = 0x20 + BPF_F_NO_COMMON_LRU = 0x2 + BPF_F_NO_PREALLOC = 0x1 + BPF_F_NUMA_NODE = 0x4 + BPF_F_PSEUDO_HDR = 0x10 + BPF_F_QUERY_EFFECTIVE = 0x1 + BPF_F_RDONLY = 0x8 + BPF_F_RECOMPUTE_CSUM = 0x1 + BPF_F_REUSE_STACKID = 0x400 + BPF_F_SEQ_NUMBER = 0x8 + BPF_F_SKIP_FIELD_MASK = 0xff + BPF_F_STACK_BUILD_ID = 0x20 + BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_TUNINFO_IPV6 = 0x1 + BPF_F_USER_BUILD_ID = 0x800 + BPF_F_USER_STACK = 0x100 + BPF_F_WRONLY = 0x10 + BPF_F_ZERO_CSUM_TX = 0x2 + BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -208,8 +257,16 @@ const ( BPF_JEQ = 0x10 BPF_JGE = 0x30 BPF_JGT = 0x20 + BPF_JLE = 0xb0 + BPF_JLT = 0xa0 BPF_JMP = 0x5 + BPF_JMP32 = 0x6 + BPF_JNE = 0x50 BPF_JSET = 0x40 + BPF_JSGE = 0x70 + BPF_JSGT = 0x60 + BPF_JSLE = 0xd0 + BPF_JSLT = 0xc0 BPF_K = 0x0 BPF_LD = 0x0 BPF_LDX = 0x1 @@ -223,20 +280,33 @@ const ( BPF_MINOR_VERSION = 0x1 BPF_MISC = 0x7 BPF_MOD = 0x90 + BPF_MOV = 0xb0 BPF_MSH = 0xa0 BPF_MUL = 0x20 BPF_NEG = 0x80 BPF_NET_OFF = -0x100000 + BPF_NOEXIST = 0x1 + BPF_OBJ_NAME_LEN = 0x10 BPF_OR = 0x40 + BPF_PSEUDO_CALL = 0x1 + BPF_PSEUDO_MAP_FD = 0x1 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 + BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 BPF_SUB = 0x10 + BPF_TAG_SIZE = 0x8 BPF_TAX = 0x0 + BPF_TO_BE = 0x8 + BPF_TO_LE = 0x0 BPF_TXA = 0x80 BPF_W = 0x0 BPF_X = 0x8 + BPF_XADD = 0xc0 BPF_XOR = 0xa0 BRKINT = 0x2 BS0 = 0x0 @@ -320,6 +390,10 @@ const ( CRDLY = 0x600 CREAD = 0x80 CRTSCTS = 0x80000000 + CRYPTO_MAX_NAME = 0x40 + CRYPTO_MSG_MAX = 0x15 + CRYPTO_NR_MSGTYPES = 0x6 + CRYPTO_REPORT_MAXSIZE = 0x160 CS5 = 0x0 CS6 = 0x10 CS7 = 0x20 @@ -497,6 +571,7 @@ const ( FAN_ALL_MARK_FLAGS = 0xff FAN_ALL_OUTGOING_EVENTS = 0x3403b FAN_ALL_PERM_EVENTS = 0x30000 + FAN_ATTRIB = 0x4 FAN_AUDIT = 0x10 FAN_CLASS_CONTENT = 0x4 FAN_CLASS_NOTIF = 0x0 @@ -505,8 +580,12 @@ const ( FAN_CLOSE = 0x18 FAN_CLOSE_NOWRITE = 0x10 FAN_CLOSE_WRITE = 0x8 + FAN_CREATE = 0x100 + FAN_DELETE = 0x200 + FAN_DELETE_SELF = 0x400 FAN_DENY = 0x2 FAN_ENABLE_AUDIT = 0x40 + FAN_EVENT_INFO_TYPE_FID = 0x1 FAN_EVENT_METADATA_LEN = 0x18 FAN_EVENT_ON_CHILD = 0x8000000 FAN_MARK_ADD = 0x1 @@ -520,6 +599,10 @@ const ( FAN_MARK_ONLYDIR = 0x8 FAN_MARK_REMOVE = 0x2 FAN_MODIFY = 0x2 + FAN_MOVE = 0xc0 + FAN_MOVED_FROM = 0x40 + FAN_MOVED_TO = 0x80 + FAN_MOVE_SELF = 0x800 FAN_NOFD = -0x1 FAN_NONBLOCK = 0x2 FAN_ONDIR = 0x40000000 @@ -528,6 +611,7 @@ const ( FAN_OPEN_EXEC_PERM = 0x40000 FAN_OPEN_PERM = 0x10000 FAN_Q_OVERFLOW = 0x4000 + FAN_REPORT_FID = 0x200 FAN_REPORT_TID = 0x100 FAN_UNLIMITED_MARKS = 0x20 FAN_UNLIMITED_QUEUE = 0x10 @@ -1050,6 +1134,15 @@ const ( MAP_SHARED_VALIDATE = 0x3 MAP_STACK = 0x40000 MAP_TYPE = 0xf + MCAST_BLOCK_SOURCE = 0x2b + MCAST_EXCLUDE = 0x0 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x2a + MCAST_JOIN_SOURCE_GROUP = 0x2e + MCAST_LEAVE_GROUP = 0x2d + MCAST_LEAVE_SOURCE_GROUP = 0x2f + MCAST_MSFILTER = 0x30 + MCAST_UNBLOCK_SOURCE = 0x2c MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 @@ -1485,6 +1578,7 @@ const ( PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 PR_SPEC_DISABLE = 0x4 + PR_SPEC_DISABLE_NOEXEC = 0x10 PR_SPEC_ENABLE = 0x2 PR_SPEC_FORCE_DISABLE = 0x8 PR_SPEC_INDIRECT_BRANCH = 0x1 @@ -1957,6 +2051,7 @@ const ( SO_ATTACH_REUSEPORT_CBPF = 0x33 SO_ATTACH_REUSEPORT_EBPF = 0x34 SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x20 SO_BSDCOMPAT = 0xe @@ -2005,6 +2100,8 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 SO_RCVTIMEO = 0x1006 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x1006 SO_REUSEADDR = 0x4 SO_REUSEPORT = 0x200 SO_RXQ_OVFL = 0x28 @@ -2016,10 +2113,18 @@ const ( SO_SNDBUFFORCE = 0x1f SO_SNDLOWAT = 0x1003 SO_SNDTIMEO = 0x1005 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x1005 SO_STYLE = 0x1008 SO_TIMESTAMP = 0x1d SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TIMESTAMP_OLD = 0x1d SO_TXTIME = 0x3d SO_TYPE = 0x1008 SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 @@ -2111,6 +2216,8 @@ const ( TCOFLUSH = 0x1 TCOOFF = 0x0 TCOON = 0x1 + TCP_BPF_IW = 0x3e9 + TCP_BPF_SNDCWND_CLAMP = 0x3ea TCP_CC_INFO = 0x1a TCP_CM_INQ = 0x24 TCP_CONGESTION = 0xd @@ -2314,8 +2421,10 @@ const ( UBI_IOCMKVOL = 0x80986f00 UBI_IOCRMVOL = 0x80046f01 UBI_IOCRNVOL = 0x91106f03 + UBI_IOCRPEB = 0x80046f04 UBI_IOCRSVOL = 0x800c6f02 UBI_IOCSETVOLPROP = 0x80104f06 + UBI_IOCSPEB = 0x80046f05 UBI_IOCVOLCRBLK = 0x80804f07 UBI_IOCVOLRMBLK = 0x20004f08 UBI_IOCVOLUP = 0x80084f00 @@ -2464,6 +2573,7 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 XDP_RX_RING = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 7586a134ef754..250446292b9d5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -197,10 +197,59 @@ const ( BPF_ABS = 0x20 BPF_ADD = 0x0 BPF_ALU = 0x4 + BPF_ALU64 = 0x7 BPF_AND = 0x50 + BPF_ANY = 0x0 + BPF_ARSH = 0xc0 BPF_B = 0x10 + BPF_BUILD_ID_SIZE = 0x14 + BPF_CALL = 0x80 + BPF_DEVCG_ACC_MKNOD = 0x1 + BPF_DEVCG_ACC_READ = 0x2 + BPF_DEVCG_ACC_WRITE = 0x4 + BPF_DEVCG_DEV_BLOCK = 0x1 + BPF_DEVCG_DEV_CHAR = 0x2 BPF_DIV = 0x30 + BPF_DW = 0x18 + BPF_END = 0xd0 + BPF_EXIST = 0x2 + BPF_EXIT = 0x90 + BPF_FROM_BE = 0x8 + BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ALLOW_MULTI = 0x2 + BPF_F_ALLOW_OVERRIDE = 0x1 + BPF_F_ANY_ALIGNMENT = 0x2 + BPF_F_CTXLEN_MASK = 0xfffff00000000 + BPF_F_CURRENT_CPU = 0xffffffff + BPF_F_CURRENT_NETNS = -0x1 + BPF_F_DONT_FRAGMENT = 0x4 + BPF_F_FAST_STACK_CMP = 0x200 + BPF_F_HDR_FIELD_MASK = 0xf + BPF_F_INDEX_MASK = 0xffffffff + BPF_F_INGRESS = 0x1 + BPF_F_INVALIDATE_HASH = 0x2 + BPF_F_LOCK = 0x4 + BPF_F_MARK_ENFORCE = 0x40 + BPF_F_MARK_MANGLED_0 = 0x20 + BPF_F_NO_COMMON_LRU = 0x2 + BPF_F_NO_PREALLOC = 0x1 + BPF_F_NUMA_NODE = 0x4 + BPF_F_PSEUDO_HDR = 0x10 + BPF_F_QUERY_EFFECTIVE = 0x1 + BPF_F_RDONLY = 0x8 + BPF_F_RECOMPUTE_CSUM = 0x1 + BPF_F_REUSE_STACKID = 0x400 + BPF_F_SEQ_NUMBER = 0x8 + BPF_F_SKIP_FIELD_MASK = 0xff + BPF_F_STACK_BUILD_ID = 0x20 + BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_TUNINFO_IPV6 = 0x1 + BPF_F_USER_BUILD_ID = 0x800 + BPF_F_USER_STACK = 0x100 + BPF_F_WRONLY = 0x10 + BPF_F_ZERO_CSUM_TX = 0x2 + BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -208,8 +257,16 @@ const ( BPF_JEQ = 0x10 BPF_JGE = 0x30 BPF_JGT = 0x20 + BPF_JLE = 0xb0 + BPF_JLT = 0xa0 BPF_JMP = 0x5 + BPF_JMP32 = 0x6 + BPF_JNE = 0x50 BPF_JSET = 0x40 + BPF_JSGE = 0x70 + BPF_JSGT = 0x60 + BPF_JSLE = 0xd0 + BPF_JSLT = 0xc0 BPF_K = 0x0 BPF_LD = 0x0 BPF_LDX = 0x1 @@ -223,20 +280,33 @@ const ( BPF_MINOR_VERSION = 0x1 BPF_MISC = 0x7 BPF_MOD = 0x90 + BPF_MOV = 0xb0 BPF_MSH = 0xa0 BPF_MUL = 0x20 BPF_NEG = 0x80 BPF_NET_OFF = -0x100000 + BPF_NOEXIST = 0x1 + BPF_OBJ_NAME_LEN = 0x10 BPF_OR = 0x40 + BPF_PSEUDO_CALL = 0x1 + BPF_PSEUDO_MAP_FD = 0x1 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 + BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 BPF_SUB = 0x10 + BPF_TAG_SIZE = 0x8 BPF_TAX = 0x0 + BPF_TO_BE = 0x8 + BPF_TO_LE = 0x0 BPF_TXA = 0x80 BPF_W = 0x0 BPF_X = 0x8 + BPF_XADD = 0xc0 BPF_XOR = 0xa0 BRKINT = 0x2 BS0 = 0x0 @@ -320,6 +390,10 @@ const ( CRDLY = 0x3000 CREAD = 0x800 CRTSCTS = 0x80000000 + CRYPTO_MAX_NAME = 0x40 + CRYPTO_MSG_MAX = 0x15 + CRYPTO_NR_MSGTYPES = 0x6 + CRYPTO_REPORT_MAXSIZE = 0x160 CS5 = 0x0 CS6 = 0x100 CS7 = 0x200 @@ -497,6 +571,7 @@ const ( FAN_ALL_MARK_FLAGS = 0xff FAN_ALL_OUTGOING_EVENTS = 0x3403b FAN_ALL_PERM_EVENTS = 0x30000 + FAN_ATTRIB = 0x4 FAN_AUDIT = 0x10 FAN_CLASS_CONTENT = 0x4 FAN_CLASS_NOTIF = 0x0 @@ -505,8 +580,12 @@ const ( FAN_CLOSE = 0x18 FAN_CLOSE_NOWRITE = 0x10 FAN_CLOSE_WRITE = 0x8 + FAN_CREATE = 0x100 + FAN_DELETE = 0x200 + FAN_DELETE_SELF = 0x400 FAN_DENY = 0x2 FAN_ENABLE_AUDIT = 0x40 + FAN_EVENT_INFO_TYPE_FID = 0x1 FAN_EVENT_METADATA_LEN = 0x18 FAN_EVENT_ON_CHILD = 0x8000000 FAN_MARK_ADD = 0x1 @@ -520,6 +599,10 @@ const ( FAN_MARK_ONLYDIR = 0x8 FAN_MARK_REMOVE = 0x2 FAN_MODIFY = 0x2 + FAN_MOVE = 0xc0 + FAN_MOVED_FROM = 0x40 + FAN_MOVED_TO = 0x80 + FAN_MOVE_SELF = 0x800 FAN_NOFD = -0x1 FAN_NONBLOCK = 0x2 FAN_ONDIR = 0x40000000 @@ -528,6 +611,7 @@ const ( FAN_OPEN_EXEC_PERM = 0x40000 FAN_OPEN_PERM = 0x10000 FAN_Q_OVERFLOW = 0x4000 + FAN_REPORT_FID = 0x200 FAN_REPORT_TID = 0x100 FAN_UNLIMITED_MARKS = 0x20 FAN_UNLIMITED_QUEUE = 0x10 @@ -1049,6 +1133,15 @@ const ( MAP_SHARED_VALIDATE = 0x3 MAP_STACK = 0x20000 MAP_TYPE = 0xf + MCAST_BLOCK_SOURCE = 0x2b + MCAST_EXCLUDE = 0x0 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x2a + MCAST_JOIN_SOURCE_GROUP = 0x2e + MCAST_LEAVE_GROUP = 0x2d + MCAST_LEAVE_SOURCE_GROUP = 0x2f + MCAST_MSFILTER = 0x30 + MCAST_UNBLOCK_SOURCE = 0x2c MCL_CURRENT = 0x2000 MCL_FUTURE = 0x4000 MCL_ONFAULT = 0x8000 @@ -1487,6 +1580,7 @@ const ( PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 PR_SPEC_DISABLE = 0x4 + PR_SPEC_DISABLE_NOEXEC = 0x10 PR_SPEC_ENABLE = 0x2 PR_SPEC_FORCE_DISABLE = 0x8 PR_SPEC_INDIRECT_BRANCH = 0x1 @@ -2015,6 +2109,7 @@ const ( SO_ATTACH_REUSEPORT_CBPF = 0x33 SO_ATTACH_REUSEPORT_EBPF = 0x34 SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe @@ -2063,6 +2158,8 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x10 SO_RCVTIMEO = 0x12 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x12 SO_REUSEADDR = 0x2 SO_REUSEPORT = 0xf SO_RXQ_OVFL = 0x28 @@ -2074,9 +2171,17 @@ const ( SO_SNDBUFFORCE = 0x20 SO_SNDLOWAT = 0x11 SO_SNDTIMEO = 0x13 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x13 SO_TIMESTAMP = 0x1d SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TIMESTAMP_OLD = 0x1d SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 @@ -2167,6 +2272,8 @@ const ( TCOFLUSH = 0x1 TCOOFF = 0x0 TCOON = 0x1 + TCP_BPF_IW = 0x3e9 + TCP_BPF_SNDCWND_CLAMP = 0x3ea TCP_CC_INFO = 0x1a TCP_CM_INQ = 0x24 TCP_CONGESTION = 0xd @@ -2374,8 +2481,10 @@ const ( UBI_IOCMKVOL = 0x80986f00 UBI_IOCRMVOL = 0x80046f01 UBI_IOCRNVOL = 0x91106f03 + UBI_IOCRPEB = 0x80046f04 UBI_IOCRSVOL = 0x800c6f02 UBI_IOCSETVOLPROP = 0x80104f06 + UBI_IOCSPEB = 0x80046f05 UBI_IOCVOLCRBLK = 0x80804f07 UBI_IOCVOLRMBLK = 0x20004f08 UBI_IOCVOLUP = 0x80084f00 @@ -2523,6 +2632,7 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 XDP_RX_RING = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index b861ec7834724..e7c49911b4ce9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -197,10 +197,59 @@ const ( BPF_ABS = 0x20 BPF_ADD = 0x0 BPF_ALU = 0x4 + BPF_ALU64 = 0x7 BPF_AND = 0x50 + BPF_ANY = 0x0 + BPF_ARSH = 0xc0 BPF_B = 0x10 + BPF_BUILD_ID_SIZE = 0x14 + BPF_CALL = 0x80 + BPF_DEVCG_ACC_MKNOD = 0x1 + BPF_DEVCG_ACC_READ = 0x2 + BPF_DEVCG_ACC_WRITE = 0x4 + BPF_DEVCG_DEV_BLOCK = 0x1 + BPF_DEVCG_DEV_CHAR = 0x2 BPF_DIV = 0x30 + BPF_DW = 0x18 + BPF_END = 0xd0 + BPF_EXIST = 0x2 + BPF_EXIT = 0x90 + BPF_FROM_BE = 0x8 + BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ALLOW_MULTI = 0x2 + BPF_F_ALLOW_OVERRIDE = 0x1 + BPF_F_ANY_ALIGNMENT = 0x2 + BPF_F_CTXLEN_MASK = 0xfffff00000000 + BPF_F_CURRENT_CPU = 0xffffffff + BPF_F_CURRENT_NETNS = -0x1 + BPF_F_DONT_FRAGMENT = 0x4 + BPF_F_FAST_STACK_CMP = 0x200 + BPF_F_HDR_FIELD_MASK = 0xf + BPF_F_INDEX_MASK = 0xffffffff + BPF_F_INGRESS = 0x1 + BPF_F_INVALIDATE_HASH = 0x2 + BPF_F_LOCK = 0x4 + BPF_F_MARK_ENFORCE = 0x40 + BPF_F_MARK_MANGLED_0 = 0x20 + BPF_F_NO_COMMON_LRU = 0x2 + BPF_F_NO_PREALLOC = 0x1 + BPF_F_NUMA_NODE = 0x4 + BPF_F_PSEUDO_HDR = 0x10 + BPF_F_QUERY_EFFECTIVE = 0x1 + BPF_F_RDONLY = 0x8 + BPF_F_RECOMPUTE_CSUM = 0x1 + BPF_F_REUSE_STACKID = 0x400 + BPF_F_SEQ_NUMBER = 0x8 + BPF_F_SKIP_FIELD_MASK = 0xff + BPF_F_STACK_BUILD_ID = 0x20 + BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_TUNINFO_IPV6 = 0x1 + BPF_F_USER_BUILD_ID = 0x800 + BPF_F_USER_STACK = 0x100 + BPF_F_WRONLY = 0x10 + BPF_F_ZERO_CSUM_TX = 0x2 + BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -208,8 +257,16 @@ const ( BPF_JEQ = 0x10 BPF_JGE = 0x30 BPF_JGT = 0x20 + BPF_JLE = 0xb0 + BPF_JLT = 0xa0 BPF_JMP = 0x5 + BPF_JMP32 = 0x6 + BPF_JNE = 0x50 BPF_JSET = 0x40 + BPF_JSGE = 0x70 + BPF_JSGT = 0x60 + BPF_JSLE = 0xd0 + BPF_JSLT = 0xc0 BPF_K = 0x0 BPF_LD = 0x0 BPF_LDX = 0x1 @@ -223,20 +280,33 @@ const ( BPF_MINOR_VERSION = 0x1 BPF_MISC = 0x7 BPF_MOD = 0x90 + BPF_MOV = 0xb0 BPF_MSH = 0xa0 BPF_MUL = 0x20 BPF_NEG = 0x80 BPF_NET_OFF = -0x100000 + BPF_NOEXIST = 0x1 + BPF_OBJ_NAME_LEN = 0x10 BPF_OR = 0x40 + BPF_PSEUDO_CALL = 0x1 + BPF_PSEUDO_MAP_FD = 0x1 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 + BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 BPF_SUB = 0x10 + BPF_TAG_SIZE = 0x8 BPF_TAX = 0x0 + BPF_TO_BE = 0x8 + BPF_TO_LE = 0x0 BPF_TXA = 0x80 BPF_W = 0x0 BPF_X = 0x8 + BPF_XADD = 0xc0 BPF_XOR = 0xa0 BRKINT = 0x2 BS0 = 0x0 @@ -320,6 +390,10 @@ const ( CRDLY = 0x3000 CREAD = 0x800 CRTSCTS = 0x80000000 + CRYPTO_MAX_NAME = 0x40 + CRYPTO_MSG_MAX = 0x15 + CRYPTO_NR_MSGTYPES = 0x6 + CRYPTO_REPORT_MAXSIZE = 0x160 CS5 = 0x0 CS6 = 0x100 CS7 = 0x200 @@ -497,6 +571,7 @@ const ( FAN_ALL_MARK_FLAGS = 0xff FAN_ALL_OUTGOING_EVENTS = 0x3403b FAN_ALL_PERM_EVENTS = 0x30000 + FAN_ATTRIB = 0x4 FAN_AUDIT = 0x10 FAN_CLASS_CONTENT = 0x4 FAN_CLASS_NOTIF = 0x0 @@ -505,8 +580,12 @@ const ( FAN_CLOSE = 0x18 FAN_CLOSE_NOWRITE = 0x10 FAN_CLOSE_WRITE = 0x8 + FAN_CREATE = 0x100 + FAN_DELETE = 0x200 + FAN_DELETE_SELF = 0x400 FAN_DENY = 0x2 FAN_ENABLE_AUDIT = 0x40 + FAN_EVENT_INFO_TYPE_FID = 0x1 FAN_EVENT_METADATA_LEN = 0x18 FAN_EVENT_ON_CHILD = 0x8000000 FAN_MARK_ADD = 0x1 @@ -520,6 +599,10 @@ const ( FAN_MARK_ONLYDIR = 0x8 FAN_MARK_REMOVE = 0x2 FAN_MODIFY = 0x2 + FAN_MOVE = 0xc0 + FAN_MOVED_FROM = 0x40 + FAN_MOVED_TO = 0x80 + FAN_MOVE_SELF = 0x800 FAN_NOFD = -0x1 FAN_NONBLOCK = 0x2 FAN_ONDIR = 0x40000000 @@ -528,6 +611,7 @@ const ( FAN_OPEN_EXEC_PERM = 0x40000 FAN_OPEN_PERM = 0x10000 FAN_Q_OVERFLOW = 0x4000 + FAN_REPORT_FID = 0x200 FAN_REPORT_TID = 0x100 FAN_UNLIMITED_MARKS = 0x20 FAN_UNLIMITED_QUEUE = 0x10 @@ -1049,6 +1133,15 @@ const ( MAP_SHARED_VALIDATE = 0x3 MAP_STACK = 0x20000 MAP_TYPE = 0xf + MCAST_BLOCK_SOURCE = 0x2b + MCAST_EXCLUDE = 0x0 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x2a + MCAST_JOIN_SOURCE_GROUP = 0x2e + MCAST_LEAVE_GROUP = 0x2d + MCAST_LEAVE_SOURCE_GROUP = 0x2f + MCAST_MSFILTER = 0x30 + MCAST_UNBLOCK_SOURCE = 0x2c MCL_CURRENT = 0x2000 MCL_FUTURE = 0x4000 MCL_ONFAULT = 0x8000 @@ -1487,6 +1580,7 @@ const ( PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 PR_SPEC_DISABLE = 0x4 + PR_SPEC_DISABLE_NOEXEC = 0x10 PR_SPEC_ENABLE = 0x2 PR_SPEC_FORCE_DISABLE = 0x8 PR_SPEC_INDIRECT_BRANCH = 0x1 @@ -2015,6 +2109,7 @@ const ( SO_ATTACH_REUSEPORT_CBPF = 0x33 SO_ATTACH_REUSEPORT_EBPF = 0x34 SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe @@ -2063,6 +2158,8 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x10 SO_RCVTIMEO = 0x12 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x12 SO_REUSEADDR = 0x2 SO_REUSEPORT = 0xf SO_RXQ_OVFL = 0x28 @@ -2074,9 +2171,17 @@ const ( SO_SNDBUFFORCE = 0x20 SO_SNDLOWAT = 0x11 SO_SNDTIMEO = 0x13 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x13 SO_TIMESTAMP = 0x1d SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TIMESTAMP_OLD = 0x1d SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 @@ -2167,6 +2272,8 @@ const ( TCOFLUSH = 0x1 TCOOFF = 0x0 TCOON = 0x1 + TCP_BPF_IW = 0x3e9 + TCP_BPF_SNDCWND_CLAMP = 0x3ea TCP_CC_INFO = 0x1a TCP_CM_INQ = 0x24 TCP_CONGESTION = 0xd @@ -2374,8 +2481,10 @@ const ( UBI_IOCMKVOL = 0x80986f00 UBI_IOCRMVOL = 0x80046f01 UBI_IOCRNVOL = 0x91106f03 + UBI_IOCRPEB = 0x80046f04 UBI_IOCRSVOL = 0x800c6f02 UBI_IOCSETVOLPROP = 0x80104f06 + UBI_IOCSPEB = 0x80046f05 UBI_IOCVOLCRBLK = 0x80804f07 UBI_IOCVOLRMBLK = 0x20004f08 UBI_IOCVOLUP = 0x80084f00 @@ -2523,6 +2632,7 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 XDP_RX_RING = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index a321ec23f4662..0373d65ae790b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -197,10 +197,59 @@ const ( BPF_ABS = 0x20 BPF_ADD = 0x0 BPF_ALU = 0x4 + BPF_ALU64 = 0x7 BPF_AND = 0x50 + BPF_ANY = 0x0 + BPF_ARSH = 0xc0 BPF_B = 0x10 + BPF_BUILD_ID_SIZE = 0x14 + BPF_CALL = 0x80 + BPF_DEVCG_ACC_MKNOD = 0x1 + BPF_DEVCG_ACC_READ = 0x2 + BPF_DEVCG_ACC_WRITE = 0x4 + BPF_DEVCG_DEV_BLOCK = 0x1 + BPF_DEVCG_DEV_CHAR = 0x2 BPF_DIV = 0x30 + BPF_DW = 0x18 + BPF_END = 0xd0 + BPF_EXIST = 0x2 + BPF_EXIT = 0x90 + BPF_FROM_BE = 0x8 + BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ALLOW_MULTI = 0x2 + BPF_F_ALLOW_OVERRIDE = 0x1 + BPF_F_ANY_ALIGNMENT = 0x2 + BPF_F_CTXLEN_MASK = 0xfffff00000000 + BPF_F_CURRENT_CPU = 0xffffffff + BPF_F_CURRENT_NETNS = -0x1 + BPF_F_DONT_FRAGMENT = 0x4 + BPF_F_FAST_STACK_CMP = 0x200 + BPF_F_HDR_FIELD_MASK = 0xf + BPF_F_INDEX_MASK = 0xffffffff + BPF_F_INGRESS = 0x1 + BPF_F_INVALIDATE_HASH = 0x2 + BPF_F_LOCK = 0x4 + BPF_F_MARK_ENFORCE = 0x40 + BPF_F_MARK_MANGLED_0 = 0x20 + BPF_F_NO_COMMON_LRU = 0x2 + BPF_F_NO_PREALLOC = 0x1 + BPF_F_NUMA_NODE = 0x4 + BPF_F_PSEUDO_HDR = 0x10 + BPF_F_QUERY_EFFECTIVE = 0x1 + BPF_F_RDONLY = 0x8 + BPF_F_RECOMPUTE_CSUM = 0x1 + BPF_F_REUSE_STACKID = 0x400 + BPF_F_SEQ_NUMBER = 0x8 + BPF_F_SKIP_FIELD_MASK = 0xff + BPF_F_STACK_BUILD_ID = 0x20 + BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_TUNINFO_IPV6 = 0x1 + BPF_F_USER_BUILD_ID = 0x800 + BPF_F_USER_STACK = 0x100 + BPF_F_WRONLY = 0x10 + BPF_F_ZERO_CSUM_TX = 0x2 + BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -208,8 +257,16 @@ const ( BPF_JEQ = 0x10 BPF_JGE = 0x30 BPF_JGT = 0x20 + BPF_JLE = 0xb0 + BPF_JLT = 0xa0 BPF_JMP = 0x5 + BPF_JMP32 = 0x6 + BPF_JNE = 0x50 BPF_JSET = 0x40 + BPF_JSGE = 0x70 + BPF_JSGT = 0x60 + BPF_JSLE = 0xd0 + BPF_JSLT = 0xc0 BPF_K = 0x0 BPF_LD = 0x0 BPF_LDX = 0x1 @@ -223,20 +280,33 @@ const ( BPF_MINOR_VERSION = 0x1 BPF_MISC = 0x7 BPF_MOD = 0x90 + BPF_MOV = 0xb0 BPF_MSH = 0xa0 BPF_MUL = 0x20 BPF_NEG = 0x80 BPF_NET_OFF = -0x100000 + BPF_NOEXIST = 0x1 + BPF_OBJ_NAME_LEN = 0x10 BPF_OR = 0x40 + BPF_PSEUDO_CALL = 0x1 + BPF_PSEUDO_MAP_FD = 0x1 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 + BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 BPF_SUB = 0x10 + BPF_TAG_SIZE = 0x8 BPF_TAX = 0x0 + BPF_TO_BE = 0x8 + BPF_TO_LE = 0x0 BPF_TXA = 0x80 BPF_W = 0x0 BPF_X = 0x8 + BPF_XADD = 0xc0 BPF_XOR = 0xa0 BRKINT = 0x2 BS0 = 0x0 @@ -320,6 +390,10 @@ const ( CRDLY = 0x600 CREAD = 0x80 CRTSCTS = 0x80000000 + CRYPTO_MAX_NAME = 0x40 + CRYPTO_MSG_MAX = 0x15 + CRYPTO_NR_MSGTYPES = 0x6 + CRYPTO_REPORT_MAXSIZE = 0x160 CS5 = 0x0 CS6 = 0x10 CS7 = 0x20 @@ -497,6 +571,7 @@ const ( FAN_ALL_MARK_FLAGS = 0xff FAN_ALL_OUTGOING_EVENTS = 0x3403b FAN_ALL_PERM_EVENTS = 0x30000 + FAN_ATTRIB = 0x4 FAN_AUDIT = 0x10 FAN_CLASS_CONTENT = 0x4 FAN_CLASS_NOTIF = 0x0 @@ -505,8 +580,12 @@ const ( FAN_CLOSE = 0x18 FAN_CLOSE_NOWRITE = 0x10 FAN_CLOSE_WRITE = 0x8 + FAN_CREATE = 0x100 + FAN_DELETE = 0x200 + FAN_DELETE_SELF = 0x400 FAN_DENY = 0x2 FAN_ENABLE_AUDIT = 0x40 + FAN_EVENT_INFO_TYPE_FID = 0x1 FAN_EVENT_METADATA_LEN = 0x18 FAN_EVENT_ON_CHILD = 0x8000000 FAN_MARK_ADD = 0x1 @@ -520,6 +599,10 @@ const ( FAN_MARK_ONLYDIR = 0x8 FAN_MARK_REMOVE = 0x2 FAN_MODIFY = 0x2 + FAN_MOVE = 0xc0 + FAN_MOVED_FROM = 0x40 + FAN_MOVED_TO = 0x80 + FAN_MOVE_SELF = 0x800 FAN_NOFD = -0x1 FAN_NONBLOCK = 0x2 FAN_ONDIR = 0x40000000 @@ -528,6 +611,7 @@ const ( FAN_OPEN_EXEC_PERM = 0x40000 FAN_OPEN_PERM = 0x10000 FAN_Q_OVERFLOW = 0x4000 + FAN_REPORT_FID = 0x200 FAN_REPORT_TID = 0x100 FAN_UNLIMITED_MARKS = 0x20 FAN_UNLIMITED_QUEUE = 0x10 @@ -1050,6 +1134,15 @@ const ( MAP_STACK = 0x20000 MAP_SYNC = 0x80000 MAP_TYPE = 0xf + MCAST_BLOCK_SOURCE = 0x2b + MCAST_EXCLUDE = 0x0 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x2a + MCAST_JOIN_SOURCE_GROUP = 0x2e + MCAST_LEAVE_GROUP = 0x2d + MCAST_LEAVE_SOURCE_GROUP = 0x2f + MCAST_MSFILTER = 0x30 + MCAST_UNBLOCK_SOURCE = 0x2c MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 @@ -1485,6 +1578,7 @@ const ( PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 PR_SPEC_DISABLE = 0x4 + PR_SPEC_DISABLE_NOEXEC = 0x10 PR_SPEC_ENABLE = 0x2 PR_SPEC_FORCE_DISABLE = 0x8 PR_SPEC_INDIRECT_BRANCH = 0x1 @@ -1945,6 +2039,7 @@ const ( SO_ATTACH_REUSEPORT_CBPF = 0x33 SO_ATTACH_REUSEPORT_EBPF = 0x34 SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe @@ -1993,6 +2088,8 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVTIMEO = 0x14 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x14 SO_REUSEADDR = 0x2 SO_REUSEPORT = 0xf SO_RXQ_OVFL = 0x28 @@ -2004,9 +2101,17 @@ const ( SO_SNDBUFFORCE = 0x20 SO_SNDLOWAT = 0x13 SO_SNDTIMEO = 0x15 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x15 SO_TIMESTAMP = 0x1d SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TIMESTAMP_OLD = 0x1d SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 @@ -2099,6 +2204,8 @@ const ( TCOFLUSH = 0x1 TCOOFF = 0x0 TCOON = 0x1 + TCP_BPF_IW = 0x3e9 + TCP_BPF_SNDCWND_CLAMP = 0x3ea TCP_CC_INFO = 0x1a TCP_CM_INQ = 0x24 TCP_CONGESTION = 0xd @@ -2300,8 +2407,10 @@ const ( UBI_IOCMKVOL = 0x40986f00 UBI_IOCRMVOL = 0x40046f01 UBI_IOCRNVOL = 0x51106f03 + UBI_IOCRPEB = 0x40046f04 UBI_IOCRSVOL = 0x400c6f02 UBI_IOCSETVOLPROP = 0x40104f06 + UBI_IOCSPEB = 0x40046f05 UBI_IOCVOLCRBLK = 0x40804f07 UBI_IOCVOLRMBLK = 0x4f08 UBI_IOCVOLUP = 0x40084f00 @@ -2449,6 +2558,7 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 XDP_RX_RING = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index f6c99164ffcca..b2ed7ee6a10a1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -197,10 +197,59 @@ const ( BPF_ABS = 0x20 BPF_ADD = 0x0 BPF_ALU = 0x4 + BPF_ALU64 = 0x7 BPF_AND = 0x50 + BPF_ANY = 0x0 + BPF_ARSH = 0xc0 BPF_B = 0x10 + BPF_BUILD_ID_SIZE = 0x14 + BPF_CALL = 0x80 + BPF_DEVCG_ACC_MKNOD = 0x1 + BPF_DEVCG_ACC_READ = 0x2 + BPF_DEVCG_ACC_WRITE = 0x4 + BPF_DEVCG_DEV_BLOCK = 0x1 + BPF_DEVCG_DEV_CHAR = 0x2 BPF_DIV = 0x30 + BPF_DW = 0x18 + BPF_END = 0xd0 + BPF_EXIST = 0x2 + BPF_EXIT = 0x90 + BPF_FROM_BE = 0x8 + BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ALLOW_MULTI = 0x2 + BPF_F_ALLOW_OVERRIDE = 0x1 + BPF_F_ANY_ALIGNMENT = 0x2 + BPF_F_CTXLEN_MASK = 0xfffff00000000 + BPF_F_CURRENT_CPU = 0xffffffff + BPF_F_CURRENT_NETNS = -0x1 + BPF_F_DONT_FRAGMENT = 0x4 + BPF_F_FAST_STACK_CMP = 0x200 + BPF_F_HDR_FIELD_MASK = 0xf + BPF_F_INDEX_MASK = 0xffffffff + BPF_F_INGRESS = 0x1 + BPF_F_INVALIDATE_HASH = 0x2 + BPF_F_LOCK = 0x4 + BPF_F_MARK_ENFORCE = 0x40 + BPF_F_MARK_MANGLED_0 = 0x20 + BPF_F_NO_COMMON_LRU = 0x2 + BPF_F_NO_PREALLOC = 0x1 + BPF_F_NUMA_NODE = 0x4 + BPF_F_PSEUDO_HDR = 0x10 + BPF_F_QUERY_EFFECTIVE = 0x1 + BPF_F_RDONLY = 0x8 + BPF_F_RECOMPUTE_CSUM = 0x1 + BPF_F_REUSE_STACKID = 0x400 + BPF_F_SEQ_NUMBER = 0x8 + BPF_F_SKIP_FIELD_MASK = 0xff + BPF_F_STACK_BUILD_ID = 0x20 + BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_TUNINFO_IPV6 = 0x1 + BPF_F_USER_BUILD_ID = 0x800 + BPF_F_USER_STACK = 0x100 + BPF_F_WRONLY = 0x10 + BPF_F_ZERO_CSUM_TX = 0x2 + BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -208,8 +257,16 @@ const ( BPF_JEQ = 0x10 BPF_JGE = 0x30 BPF_JGT = 0x20 + BPF_JLE = 0xb0 + BPF_JLT = 0xa0 BPF_JMP = 0x5 + BPF_JMP32 = 0x6 + BPF_JNE = 0x50 BPF_JSET = 0x40 + BPF_JSGE = 0x70 + BPF_JSGT = 0x60 + BPF_JSLE = 0xd0 + BPF_JSLT = 0xc0 BPF_K = 0x0 BPF_LD = 0x0 BPF_LDX = 0x1 @@ -223,20 +280,33 @@ const ( BPF_MINOR_VERSION = 0x1 BPF_MISC = 0x7 BPF_MOD = 0x90 + BPF_MOV = 0xb0 BPF_MSH = 0xa0 BPF_MUL = 0x20 BPF_NEG = 0x80 BPF_NET_OFF = -0x100000 + BPF_NOEXIST = 0x1 + BPF_OBJ_NAME_LEN = 0x10 BPF_OR = 0x40 + BPF_PSEUDO_CALL = 0x1 + BPF_PSEUDO_MAP_FD = 0x1 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 + BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 BPF_SUB = 0x10 + BPF_TAG_SIZE = 0x8 BPF_TAX = 0x0 + BPF_TO_BE = 0x8 + BPF_TO_LE = 0x0 BPF_TXA = 0x80 BPF_W = 0x0 BPF_X = 0x8 + BPF_XADD = 0xc0 BPF_XOR = 0xa0 BRKINT = 0x2 BS0 = 0x0 @@ -320,6 +390,10 @@ const ( CRDLY = 0x600 CREAD = 0x80 CRTSCTS = 0x80000000 + CRYPTO_MAX_NAME = 0x40 + CRYPTO_MSG_MAX = 0x15 + CRYPTO_NR_MSGTYPES = 0x6 + CRYPTO_REPORT_MAXSIZE = 0x160 CS5 = 0x0 CS6 = 0x10 CS7 = 0x20 @@ -497,6 +571,7 @@ const ( FAN_ALL_MARK_FLAGS = 0xff FAN_ALL_OUTGOING_EVENTS = 0x3403b FAN_ALL_PERM_EVENTS = 0x30000 + FAN_ATTRIB = 0x4 FAN_AUDIT = 0x10 FAN_CLASS_CONTENT = 0x4 FAN_CLASS_NOTIF = 0x0 @@ -505,8 +580,12 @@ const ( FAN_CLOSE = 0x18 FAN_CLOSE_NOWRITE = 0x10 FAN_CLOSE_WRITE = 0x8 + FAN_CREATE = 0x100 + FAN_DELETE = 0x200 + FAN_DELETE_SELF = 0x400 FAN_DENY = 0x2 FAN_ENABLE_AUDIT = 0x40 + FAN_EVENT_INFO_TYPE_FID = 0x1 FAN_EVENT_METADATA_LEN = 0x18 FAN_EVENT_ON_CHILD = 0x8000000 FAN_MARK_ADD = 0x1 @@ -520,6 +599,10 @@ const ( FAN_MARK_ONLYDIR = 0x8 FAN_MARK_REMOVE = 0x2 FAN_MODIFY = 0x2 + FAN_MOVE = 0xc0 + FAN_MOVED_FROM = 0x40 + FAN_MOVED_TO = 0x80 + FAN_MOVE_SELF = 0x800 FAN_NOFD = -0x1 FAN_NONBLOCK = 0x2 FAN_ONDIR = 0x40000000 @@ -528,6 +611,7 @@ const ( FAN_OPEN_EXEC_PERM = 0x40000 FAN_OPEN_PERM = 0x10000 FAN_Q_OVERFLOW = 0x4000 + FAN_REPORT_FID = 0x200 FAN_REPORT_TID = 0x100 FAN_UNLIMITED_MARKS = 0x20 FAN_UNLIMITED_QUEUE = 0x10 @@ -1050,6 +1134,15 @@ const ( MAP_STACK = 0x20000 MAP_SYNC = 0x80000 MAP_TYPE = 0xf + MCAST_BLOCK_SOURCE = 0x2b + MCAST_EXCLUDE = 0x0 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x2a + MCAST_JOIN_SOURCE_GROUP = 0x2e + MCAST_LEAVE_GROUP = 0x2d + MCAST_LEAVE_SOURCE_GROUP = 0x2f + MCAST_MSFILTER = 0x30 + MCAST_UNBLOCK_SOURCE = 0x2c MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 @@ -1485,6 +1578,7 @@ const ( PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 PR_SPEC_DISABLE = 0x4 + PR_SPEC_DISABLE_NOEXEC = 0x10 PR_SPEC_ENABLE = 0x2 PR_SPEC_FORCE_DISABLE = 0x8 PR_SPEC_INDIRECT_BRANCH = 0x1 @@ -2018,6 +2112,7 @@ const ( SO_ATTACH_REUSEPORT_CBPF = 0x33 SO_ATTACH_REUSEPORT_EBPF = 0x34 SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe @@ -2066,6 +2161,8 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVTIMEO = 0x14 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x14 SO_REUSEADDR = 0x2 SO_REUSEPORT = 0xf SO_RXQ_OVFL = 0x28 @@ -2077,9 +2174,17 @@ const ( SO_SNDBUFFORCE = 0x20 SO_SNDLOWAT = 0x13 SO_SNDTIMEO = 0x15 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x15 SO_TIMESTAMP = 0x1d SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TIMESTAMP_OLD = 0x1d SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 @@ -2172,6 +2277,8 @@ const ( TCOFLUSH = 0x1 TCOOFF = 0x0 TCOON = 0x1 + TCP_BPF_IW = 0x3e9 + TCP_BPF_SNDCWND_CLAMP = 0x3ea TCP_CC_INFO = 0x1a TCP_CM_INQ = 0x24 TCP_CONGESTION = 0xd @@ -2373,8 +2480,10 @@ const ( UBI_IOCMKVOL = 0x40986f00 UBI_IOCRMVOL = 0x40046f01 UBI_IOCRNVOL = 0x51106f03 + UBI_IOCRPEB = 0x40046f04 UBI_IOCRSVOL = 0x400c6f02 UBI_IOCSETVOLPROP = 0x40104f06 + UBI_IOCSPEB = 0x40046f05 UBI_IOCVOLCRBLK = 0x40804f07 UBI_IOCVOLRMBLK = 0x4f08 UBI_IOCVOLUP = 0x40084f00 @@ -2522,6 +2631,7 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 XDP_RX_RING = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index c1e95e29cbcaf..58067c529a80e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -200,10 +200,59 @@ const ( BPF_ABS = 0x20 BPF_ADD = 0x0 BPF_ALU = 0x4 + BPF_ALU64 = 0x7 BPF_AND = 0x50 + BPF_ANY = 0x0 + BPF_ARSH = 0xc0 BPF_B = 0x10 + BPF_BUILD_ID_SIZE = 0x14 + BPF_CALL = 0x80 + BPF_DEVCG_ACC_MKNOD = 0x1 + BPF_DEVCG_ACC_READ = 0x2 + BPF_DEVCG_ACC_WRITE = 0x4 + BPF_DEVCG_DEV_BLOCK = 0x1 + BPF_DEVCG_DEV_CHAR = 0x2 BPF_DIV = 0x30 + BPF_DW = 0x18 + BPF_END = 0xd0 + BPF_EXIST = 0x2 + BPF_EXIT = 0x90 + BPF_FROM_BE = 0x8 + BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ALLOW_MULTI = 0x2 + BPF_F_ALLOW_OVERRIDE = 0x1 + BPF_F_ANY_ALIGNMENT = 0x2 + BPF_F_CTXLEN_MASK = 0xfffff00000000 + BPF_F_CURRENT_CPU = 0xffffffff + BPF_F_CURRENT_NETNS = -0x1 + BPF_F_DONT_FRAGMENT = 0x4 + BPF_F_FAST_STACK_CMP = 0x200 + BPF_F_HDR_FIELD_MASK = 0xf + BPF_F_INDEX_MASK = 0xffffffff + BPF_F_INGRESS = 0x1 + BPF_F_INVALIDATE_HASH = 0x2 + BPF_F_LOCK = 0x4 + BPF_F_MARK_ENFORCE = 0x40 + BPF_F_MARK_MANGLED_0 = 0x20 + BPF_F_NO_COMMON_LRU = 0x2 + BPF_F_NO_PREALLOC = 0x1 + BPF_F_NUMA_NODE = 0x4 + BPF_F_PSEUDO_HDR = 0x10 + BPF_F_QUERY_EFFECTIVE = 0x1 + BPF_F_RDONLY = 0x8 + BPF_F_RECOMPUTE_CSUM = 0x1 + BPF_F_REUSE_STACKID = 0x400 + BPF_F_SEQ_NUMBER = 0x8 + BPF_F_SKIP_FIELD_MASK = 0xff + BPF_F_STACK_BUILD_ID = 0x20 + BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_TUNINFO_IPV6 = 0x1 + BPF_F_USER_BUILD_ID = 0x800 + BPF_F_USER_STACK = 0x100 + BPF_F_WRONLY = 0x10 + BPF_F_ZERO_CSUM_TX = 0x2 + BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -211,8 +260,16 @@ const ( BPF_JEQ = 0x10 BPF_JGE = 0x30 BPF_JGT = 0x20 + BPF_JLE = 0xb0 + BPF_JLT = 0xa0 BPF_JMP = 0x5 + BPF_JMP32 = 0x6 + BPF_JNE = 0x50 BPF_JSET = 0x40 + BPF_JSGE = 0x70 + BPF_JSGT = 0x60 + BPF_JSLE = 0xd0 + BPF_JSLT = 0xc0 BPF_K = 0x0 BPF_LD = 0x0 BPF_LDX = 0x1 @@ -226,20 +283,33 @@ const ( BPF_MINOR_VERSION = 0x1 BPF_MISC = 0x7 BPF_MOD = 0x90 + BPF_MOV = 0xb0 BPF_MSH = 0xa0 BPF_MUL = 0x20 BPF_NEG = 0x80 BPF_NET_OFF = -0x100000 + BPF_NOEXIST = 0x1 + BPF_OBJ_NAME_LEN = 0x10 BPF_OR = 0x40 + BPF_PSEUDO_CALL = 0x1 + BPF_PSEUDO_MAP_FD = 0x1 BPF_RET = 0x6 BPF_RSH = 0x70 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 + BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 BPF_SUB = 0x10 + BPF_TAG_SIZE = 0x8 BPF_TAX = 0x0 + BPF_TO_BE = 0x8 + BPF_TO_LE = 0x0 BPF_TXA = 0x80 BPF_W = 0x0 BPF_X = 0x8 + BPF_XADD = 0xc0 BPF_XOR = 0xa0 BRKINT = 0x2 BS0 = 0x0 @@ -323,6 +393,10 @@ const ( CRDLY = 0x600 CREAD = 0x80 CRTSCTS = 0x80000000 + CRYPTO_MAX_NAME = 0x40 + CRYPTO_MSG_MAX = 0x15 + CRYPTO_NR_MSGTYPES = 0x6 + CRYPTO_REPORT_MAXSIZE = 0x160 CS5 = 0x0 CS6 = 0x10 CS7 = 0x20 @@ -501,6 +575,7 @@ const ( FAN_ALL_MARK_FLAGS = 0xff FAN_ALL_OUTGOING_EVENTS = 0x3403b FAN_ALL_PERM_EVENTS = 0x30000 + FAN_ATTRIB = 0x4 FAN_AUDIT = 0x10 FAN_CLASS_CONTENT = 0x4 FAN_CLASS_NOTIF = 0x0 @@ -509,8 +584,12 @@ const ( FAN_CLOSE = 0x18 FAN_CLOSE_NOWRITE = 0x10 FAN_CLOSE_WRITE = 0x8 + FAN_CREATE = 0x100 + FAN_DELETE = 0x200 + FAN_DELETE_SELF = 0x400 FAN_DENY = 0x2 FAN_ENABLE_AUDIT = 0x40 + FAN_EVENT_INFO_TYPE_FID = 0x1 FAN_EVENT_METADATA_LEN = 0x18 FAN_EVENT_ON_CHILD = 0x8000000 FAN_MARK_ADD = 0x1 @@ -524,6 +603,10 @@ const ( FAN_MARK_ONLYDIR = 0x8 FAN_MARK_REMOVE = 0x2 FAN_MODIFY = 0x2 + FAN_MOVE = 0xc0 + FAN_MOVED_FROM = 0x40 + FAN_MOVED_TO = 0x80 + FAN_MOVE_SELF = 0x800 FAN_NOFD = -0x1 FAN_NONBLOCK = 0x2 FAN_ONDIR = 0x40000000 @@ -532,6 +615,7 @@ const ( FAN_OPEN_EXEC_PERM = 0x40000 FAN_OPEN_PERM = 0x10000 FAN_Q_OVERFLOW = 0x4000 + FAN_REPORT_FID = 0x200 FAN_REPORT_TID = 0x100 FAN_UNLIMITED_MARKS = 0x20 FAN_UNLIMITED_QUEUE = 0x10 @@ -1054,6 +1138,15 @@ const ( MAP_SHARED_VALIDATE = 0x3 MAP_STACK = 0x20000 MAP_TYPE = 0xf + MCAST_BLOCK_SOURCE = 0x2b + MCAST_EXCLUDE = 0x0 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x2a + MCAST_JOIN_SOURCE_GROUP = 0x2e + MCAST_LEAVE_GROUP = 0x2d + MCAST_LEAVE_SOURCE_GROUP = 0x2f + MCAST_MSFILTER = 0x30 + MCAST_UNBLOCK_SOURCE = 0x2c MCL_CURRENT = 0x2000 MCL_FUTURE = 0x4000 MCL_ONFAULT = 0x8000 @@ -1489,6 +1582,7 @@ const ( PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 PR_SPEC_DISABLE = 0x4 + PR_SPEC_DISABLE_NOEXEC = 0x10 PR_SPEC_ENABLE = 0x2 PR_SPEC_FORCE_DISABLE = 0x8 PR_SPEC_INDIRECT_BRANCH = 0x1 @@ -2010,6 +2104,7 @@ const ( SO_ATTACH_REUSEPORT_CBPF = 0x35 SO_ATTACH_REUSEPORT_EBPF = 0x36 SO_BINDTODEVICE = 0xd + SO_BINDTOIFINDEX = 0x41 SO_BPF_EXTENSIONS = 0x32 SO_BROADCAST = 0x20 SO_BSDCOMPAT = 0x400 @@ -2058,6 +2153,8 @@ const ( SO_RCVBUFFORCE = 0x100b SO_RCVLOWAT = 0x800 SO_RCVTIMEO = 0x2000 + SO_RCVTIMEO_NEW = 0x44 + SO_RCVTIMEO_OLD = 0x2000 SO_REUSEADDR = 0x4 SO_REUSEPORT = 0x200 SO_RXQ_OVFL = 0x24 @@ -2069,9 +2166,17 @@ const ( SO_SNDBUFFORCE = 0x100a SO_SNDLOWAT = 0x1000 SO_SNDTIMEO = 0x4000 + SO_SNDTIMEO_NEW = 0x45 + SO_SNDTIMEO_OLD = 0x4000 SO_TIMESTAMP = 0x1d SO_TIMESTAMPING = 0x23 + SO_TIMESTAMPING_NEW = 0x43 + SO_TIMESTAMPING_OLD = 0x23 SO_TIMESTAMPNS = 0x21 + SO_TIMESTAMPNS_NEW = 0x42 + SO_TIMESTAMPNS_OLD = 0x21 + SO_TIMESTAMP_NEW = 0x46 + SO_TIMESTAMP_OLD = 0x1d SO_TXTIME = 0x3f SO_TYPE = 0x1008 SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 @@ -2163,6 +2268,8 @@ const ( TCOFLUSH = 0x1 TCOOFF = 0x0 TCOON = 0x1 + TCP_BPF_IW = 0x3e9 + TCP_BPF_SNDCWND_CLAMP = 0x3ea TCP_CC_INFO = 0x1a TCP_CM_INQ = 0x24 TCP_CONGESTION = 0xd @@ -2362,8 +2469,10 @@ const ( UBI_IOCMKVOL = 0x80986f00 UBI_IOCRMVOL = 0x80046f01 UBI_IOCRNVOL = 0x91106f03 + UBI_IOCRPEB = 0x80046f04 UBI_IOCRSVOL = 0x800c6f02 UBI_IOCSETVOLPROP = 0x80104f06 + UBI_IOCSPEB = 0x80046f05 UBI_IOCVOLCRBLK = 0x80804f07 UBI_IOCVOLRMBLK = 0x20004f08 UBI_IOCVOLUP = 0x80084f00 @@ -2511,6 +2620,7 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 XDP_RX_RING = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go new file mode 100644 index 0000000000000..ec5f92de8883f --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go @@ -0,0 +1,1789 @@ +// mkerrors.sh -m64 +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build arm64,openbsd + +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs -- -m64 _const.go + +package unix + +import "syscall" + +const ( + AF_APPLETALK = 0x10 + AF_BLUETOOTH = 0x20 + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_CNT = 0x15 + AF_COIP = 0x14 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_E164 = 0x1a + AF_ECMA = 0x8 + AF_ENCAP = 0x1c + AF_HYLINK = 0xf + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x18 + AF_IPX = 0x17 + AF_ISDN = 0x1a + AF_ISO = 0x7 + AF_KEY = 0x1e + AF_LAT = 0xe + AF_LINK = 0x12 + AF_LOCAL = 0x1 + AF_MAX = 0x24 + AF_MPLS = 0x21 + AF_NATM = 0x1b + AF_NS = 0x6 + AF_OSI = 0x7 + AF_PUP = 0x4 + AF_ROUTE = 0x11 + AF_SIP = 0x1d + AF_SNA = 0xb + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + ALTWERASE = 0x200 + ARPHRD_ETHER = 0x1 + ARPHRD_FRELAY = 0xf + ARPHRD_IEEE1394 = 0x18 + ARPHRD_IEEE802 = 0x6 + B0 = 0x0 + B110 = 0x6e + B115200 = 0x1c200 + B1200 = 0x4b0 + B134 = 0x86 + B14400 = 0x3840 + B150 = 0x96 + B1800 = 0x708 + B19200 = 0x4b00 + B200 = 0xc8 + B230400 = 0x38400 + B2400 = 0x960 + B28800 = 0x7080 + B300 = 0x12c + B38400 = 0x9600 + B4800 = 0x12c0 + B50 = 0x32 + B57600 = 0xe100 + B600 = 0x258 + B7200 = 0x1c20 + B75 = 0x4b + B76800 = 0x12c00 + B9600 = 0x2580 + BIOCFLUSH = 0x20004268 + BIOCGBLEN = 0x40044266 + BIOCGDIRFILT = 0x4004427c + BIOCGDLT = 0x4004426a + BIOCGDLTLIST = 0xc010427b + BIOCGETIF = 0x4020426b + BIOCGFILDROP = 0x40044278 + BIOCGHDRCMPLT = 0x40044274 + BIOCGRSIG = 0x40044273 + BIOCGRTIMEOUT = 0x4010426e + BIOCGSTATS = 0x4008426f + BIOCIMMEDIATE = 0x80044270 + BIOCLOCK = 0x20004276 + BIOCPROMISC = 0x20004269 + BIOCSBLEN = 0xc0044266 + BIOCSDIRFILT = 0x8004427d + BIOCSDLT = 0x8004427a + BIOCSETF = 0x80104267 + BIOCSETIF = 0x8020426c + BIOCSETWF = 0x80104277 + BIOCSFILDROP = 0x80044279 + BIOCSHDRCMPLT = 0x80044275 + BIOCSRSIG = 0x80044272 + BIOCSRTIMEOUT = 0x8010426d + BIOCVERSION = 0x40044271 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALIGNMENT = 0x4 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DIRECTION_IN = 0x1 + BPF_DIRECTION_OUT = 0x2 + BPF_DIV = 0x30 + BPF_FILDROP_CAPTURE = 0x1 + BPF_FILDROP_DROP = 0x2 + BPF_FILDROP_PASS = 0x0 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXBUFSIZE = 0x200000 + BPF_MAXINSNS = 0x200 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINBUFSIZE = 0x20 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RELEASE = 0x30bb6 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BRKINT = 0x2 + CFLUSH = 0xf + CLOCAL = 0x8000 + CLOCK_BOOTTIME = 0x6 + CLOCK_MONOTONIC = 0x3 + CLOCK_PROCESS_CPUTIME_ID = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_THREAD_CPUTIME_ID = 0x4 + CLOCK_UPTIME = 0x5 + CREAD = 0x800 + CRTSCTS = 0x10000 + CS5 = 0x0 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTART = 0x11 + CSTATUS = 0xff + CSTOP = 0x13 + CSTOPB = 0x400 + CSUSP = 0x1a + CTL_HW = 0x6 + CTL_KERN = 0x1 + CTL_MAXNAME = 0xc + CTL_NET = 0x4 + DIOCOSFPFLUSH = 0x2000444e + DLT_ARCNET = 0x7 + DLT_ATM_RFC1483 = 0xb + DLT_AX25 = 0x3 + DLT_CHAOS = 0x5 + DLT_C_HDLC = 0x68 + DLT_EN10MB = 0x1 + DLT_EN3MB = 0x2 + DLT_ENC = 0xd + DLT_FDDI = 0xa + DLT_IEEE802 = 0x6 + DLT_IEEE802_11 = 0x69 + DLT_IEEE802_11_RADIO = 0x7f + DLT_LOOP = 0xc + DLT_MPLS = 0xdb + DLT_NULL = 0x0 + DLT_OPENFLOW = 0x10b + DLT_PFLOG = 0x75 + DLT_PFSYNC = 0x12 + DLT_PPP = 0x9 + DLT_PPP_BSDOS = 0x10 + DLT_PPP_ETHER = 0x33 + DLT_PPP_SERIAL = 0x32 + DLT_PRONET = 0x4 + DLT_RAW = 0xe + DLT_SLIP = 0x8 + DLT_SLIP_BSDOS = 0xf + DLT_USBPCAP = 0xf9 + DLT_USER0 = 0x93 + DLT_USER1 = 0x94 + DLT_USER10 = 0x9d + DLT_USER11 = 0x9e + DLT_USER12 = 0x9f + DLT_USER13 = 0xa0 + DLT_USER14 = 0xa1 + DLT_USER15 = 0xa2 + DLT_USER2 = 0x95 + DLT_USER3 = 0x96 + DLT_USER4 = 0x97 + DLT_USER5 = 0x98 + DLT_USER6 = 0x99 + DLT_USER7 = 0x9a + DLT_USER8 = 0x9b + DLT_USER9 = 0x9c + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + ECHO = 0x8 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EMT_TAGOVF = 0x1 + EMUL_ENABLED = 0x1 + EMUL_NATIVE = 0x2 + ENDRUNDISC = 0x9 + ETHERMIN = 0x2e + ETHERMTU = 0x5dc + ETHERTYPE_8023 = 0x4 + ETHERTYPE_AARP = 0x80f3 + ETHERTYPE_ACCTON = 0x8390 + ETHERTYPE_AEONIC = 0x8036 + ETHERTYPE_ALPHA = 0x814a + ETHERTYPE_AMBER = 0x6008 + ETHERTYPE_AMOEBA = 0x8145 + ETHERTYPE_AOE = 0x88a2 + ETHERTYPE_APOLLO = 0x80f7 + ETHERTYPE_APOLLODOMAIN = 0x8019 + ETHERTYPE_APPLETALK = 0x809b + ETHERTYPE_APPLITEK = 0x80c7 + ETHERTYPE_ARGONAUT = 0x803a + ETHERTYPE_ARP = 0x806 + ETHERTYPE_AT = 0x809b + ETHERTYPE_ATALK = 0x809b + ETHERTYPE_ATOMIC = 0x86df + ETHERTYPE_ATT = 0x8069 + ETHERTYPE_ATTSTANFORD = 0x8008 + ETHERTYPE_AUTOPHON = 0x806a + ETHERTYPE_AXIS = 0x8856 + ETHERTYPE_BCLOOP = 0x9003 + ETHERTYPE_BOFL = 0x8102 + ETHERTYPE_CABLETRON = 0x7034 + ETHERTYPE_CHAOS = 0x804 + ETHERTYPE_COMDESIGN = 0x806c + ETHERTYPE_COMPUGRAPHIC = 0x806d + ETHERTYPE_COUNTERPOINT = 0x8062 + ETHERTYPE_CRONUS = 0x8004 + ETHERTYPE_CRONUSVLN = 0x8003 + ETHERTYPE_DCA = 0x1234 + ETHERTYPE_DDE = 0x807b + ETHERTYPE_DEBNI = 0xaaaa + ETHERTYPE_DECAM = 0x8048 + ETHERTYPE_DECCUST = 0x6006 + ETHERTYPE_DECDIAG = 0x6005 + ETHERTYPE_DECDNS = 0x803c + ETHERTYPE_DECDTS = 0x803e + ETHERTYPE_DECEXPER = 0x6000 + ETHERTYPE_DECLAST = 0x8041 + ETHERTYPE_DECLTM = 0x803f + ETHERTYPE_DECMUMPS = 0x6009 + ETHERTYPE_DECNETBIOS = 0x8040 + ETHERTYPE_DELTACON = 0x86de + ETHERTYPE_DIDDLE = 0x4321 + ETHERTYPE_DLOG1 = 0x660 + ETHERTYPE_DLOG2 = 0x661 + ETHERTYPE_DN = 0x6003 + ETHERTYPE_DOGFIGHT = 0x1989 + ETHERTYPE_DSMD = 0x8039 + ETHERTYPE_ECMA = 0x803 + ETHERTYPE_ENCRYPT = 0x803d + ETHERTYPE_ES = 0x805d + ETHERTYPE_EXCELAN = 0x8010 + ETHERTYPE_EXPERDATA = 0x8049 + ETHERTYPE_FLIP = 0x8146 + ETHERTYPE_FLOWCONTROL = 0x8808 + ETHERTYPE_FRARP = 0x808 + ETHERTYPE_GENDYN = 0x8068 + ETHERTYPE_HAYES = 0x8130 + ETHERTYPE_HIPPI_FP = 0x8180 + ETHERTYPE_HITACHI = 0x8820 + ETHERTYPE_HP = 0x8005 + ETHERTYPE_IEEEPUP = 0xa00 + ETHERTYPE_IEEEPUPAT = 0xa01 + ETHERTYPE_IMLBL = 0x4c42 + ETHERTYPE_IMLBLDIAG = 0x424c + ETHERTYPE_IP = 0x800 + ETHERTYPE_IPAS = 0x876c + ETHERTYPE_IPV6 = 0x86dd + ETHERTYPE_IPX = 0x8137 + ETHERTYPE_IPXNEW = 0x8037 + ETHERTYPE_KALPANA = 0x8582 + ETHERTYPE_LANBRIDGE = 0x8038 + ETHERTYPE_LANPROBE = 0x8888 + ETHERTYPE_LAT = 0x6004 + ETHERTYPE_LBACK = 0x9000 + ETHERTYPE_LITTLE = 0x8060 + ETHERTYPE_LLDP = 0x88cc + ETHERTYPE_LOGICRAFT = 0x8148 + ETHERTYPE_LOOPBACK = 0x9000 + ETHERTYPE_MATRA = 0x807a + ETHERTYPE_MAX = 0xffff + ETHERTYPE_MERIT = 0x807c + ETHERTYPE_MICP = 0x873a + ETHERTYPE_MOPDL = 0x6001 + ETHERTYPE_MOPRC = 0x6002 + ETHERTYPE_MOTOROLA = 0x818d + ETHERTYPE_MPLS = 0x8847 + ETHERTYPE_MPLS_MCAST = 0x8848 + ETHERTYPE_MUMPS = 0x813f + ETHERTYPE_NBPCC = 0x3c04 + ETHERTYPE_NBPCLAIM = 0x3c09 + ETHERTYPE_NBPCLREQ = 0x3c05 + ETHERTYPE_NBPCLRSP = 0x3c06 + ETHERTYPE_NBPCREQ = 0x3c02 + ETHERTYPE_NBPCRSP = 0x3c03 + ETHERTYPE_NBPDG = 0x3c07 + ETHERTYPE_NBPDGB = 0x3c08 + ETHERTYPE_NBPDLTE = 0x3c0a + ETHERTYPE_NBPRAR = 0x3c0c + ETHERTYPE_NBPRAS = 0x3c0b + ETHERTYPE_NBPRST = 0x3c0d + ETHERTYPE_NBPSCD = 0x3c01 + ETHERTYPE_NBPVCD = 0x3c00 + ETHERTYPE_NBS = 0x802 + ETHERTYPE_NCD = 0x8149 + ETHERTYPE_NESTAR = 0x8006 + ETHERTYPE_NETBEUI = 0x8191 + ETHERTYPE_NOVELL = 0x8138 + ETHERTYPE_NS = 0x600 + ETHERTYPE_NSAT = 0x601 + ETHERTYPE_NSCOMPAT = 0x807 + ETHERTYPE_NTRAILER = 0x10 + ETHERTYPE_OS9 = 0x7007 + ETHERTYPE_OS9NET = 0x7009 + ETHERTYPE_PACER = 0x80c6 + ETHERTYPE_PAE = 0x888e + ETHERTYPE_PBB = 0x88e7 + ETHERTYPE_PCS = 0x4242 + ETHERTYPE_PLANNING = 0x8044 + ETHERTYPE_PPP = 0x880b + ETHERTYPE_PPPOE = 0x8864 + ETHERTYPE_PPPOEDISC = 0x8863 + ETHERTYPE_PRIMENTS = 0x7031 + ETHERTYPE_PUP = 0x200 + ETHERTYPE_PUPAT = 0x200 + ETHERTYPE_QINQ = 0x88a8 + ETHERTYPE_RACAL = 0x7030 + ETHERTYPE_RATIONAL = 0x8150 + ETHERTYPE_RAWFR = 0x6559 + ETHERTYPE_RCL = 0x1995 + ETHERTYPE_RDP = 0x8739 + ETHERTYPE_RETIX = 0x80f2 + ETHERTYPE_REVARP = 0x8035 + ETHERTYPE_SCA = 0x6007 + ETHERTYPE_SECTRA = 0x86db + ETHERTYPE_SECUREDATA = 0x876d + ETHERTYPE_SGITW = 0x817e + ETHERTYPE_SG_BOUNCE = 0x8016 + ETHERTYPE_SG_DIAG = 0x8013 + ETHERTYPE_SG_NETGAMES = 0x8014 + ETHERTYPE_SG_RESV = 0x8015 + ETHERTYPE_SIMNET = 0x5208 + ETHERTYPE_SLOW = 0x8809 + ETHERTYPE_SNA = 0x80d5 + ETHERTYPE_SNMP = 0x814c + ETHERTYPE_SONIX = 0xfaf5 + ETHERTYPE_SPIDER = 0x809f + ETHERTYPE_SPRITE = 0x500 + ETHERTYPE_STP = 0x8181 + ETHERTYPE_TALARIS = 0x812b + ETHERTYPE_TALARISMC = 0x852b + ETHERTYPE_TCPCOMP = 0x876b + ETHERTYPE_TCPSM = 0x9002 + ETHERTYPE_TEC = 0x814f + ETHERTYPE_TIGAN = 0x802f + ETHERTYPE_TRAIL = 0x1000 + ETHERTYPE_TRANSETHER = 0x6558 + ETHERTYPE_TYMSHARE = 0x802e + ETHERTYPE_UBBST = 0x7005 + ETHERTYPE_UBDEBUG = 0x900 + ETHERTYPE_UBDIAGLOOP = 0x7002 + ETHERTYPE_UBDL = 0x7000 + ETHERTYPE_UBNIU = 0x7001 + ETHERTYPE_UBNMC = 0x7003 + ETHERTYPE_VALID = 0x1600 + ETHERTYPE_VARIAN = 0x80dd + ETHERTYPE_VAXELN = 0x803b + ETHERTYPE_VEECO = 0x8067 + ETHERTYPE_VEXP = 0x805b + ETHERTYPE_VGLAB = 0x8131 + ETHERTYPE_VINES = 0xbad + ETHERTYPE_VINESECHO = 0xbaf + ETHERTYPE_VINESLOOP = 0xbae + ETHERTYPE_VITAL = 0xff00 + ETHERTYPE_VLAN = 0x8100 + ETHERTYPE_VLTLMAN = 0x8080 + ETHERTYPE_VPROD = 0x805c + ETHERTYPE_VURESERVED = 0x8147 + ETHERTYPE_WATERLOO = 0x8130 + ETHERTYPE_WELLFLEET = 0x8103 + ETHERTYPE_X25 = 0x805 + ETHERTYPE_X75 = 0x801 + ETHERTYPE_XNSSM = 0x9001 + ETHERTYPE_XTP = 0x817d + ETHER_ADDR_LEN = 0x6 + ETHER_ALIGN = 0x2 + ETHER_CRC_LEN = 0x4 + ETHER_CRC_POLY_BE = 0x4c11db6 + ETHER_CRC_POLY_LE = 0xedb88320 + ETHER_HDR_LEN = 0xe + ETHER_MAX_DIX_LEN = 0x600 + ETHER_MAX_HARDMTU_LEN = 0xff9b + ETHER_MAX_LEN = 0x5ee + ETHER_MIN_LEN = 0x40 + ETHER_TYPE_LEN = 0x2 + ETHER_VLAN_ENCAP_LEN = 0x4 + EVFILT_AIO = -0x3 + EVFILT_DEVICE = -0x8 + EVFILT_PROC = -0x5 + EVFILT_READ = -0x1 + EVFILT_SIGNAL = -0x6 + EVFILT_SYSCOUNT = 0x8 + EVFILT_TIMER = -0x7 + EVFILT_VNODE = -0x4 + EVFILT_WRITE = -0x2 + EVL_ENCAPLEN = 0x4 + EVL_PRIO_BITS = 0xd + EVL_PRIO_MAX = 0x7 + EVL_VLID_MASK = 0xfff + EVL_VLID_MAX = 0xffe + EVL_VLID_MIN = 0x1 + EVL_VLID_NULL = 0x0 + EV_ADD = 0x1 + EV_CLEAR = 0x20 + EV_DELETE = 0x2 + EV_DISABLE = 0x8 + EV_DISPATCH = 0x80 + EV_ENABLE = 0x4 + EV_EOF = 0x8000 + EV_ERROR = 0x4000 + EV_FLAG1 = 0x2000 + EV_ONESHOT = 0x10 + EV_RECEIPT = 0x40 + EV_SYSFLAGS = 0xf000 + EXTA = 0x4b00 + EXTB = 0x9600 + EXTPROC = 0x800 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FLUSHO = 0x800000 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0xa + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0x7 + F_GETOWN = 0x5 + F_ISATTY = 0xb + F_OK = 0x0 + F_RDLCK = 0x1 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0x8 + F_SETLKW = 0x9 + F_SETOWN = 0x6 + F_UNLCK = 0x2 + F_WRLCK = 0x3 + HUPCL = 0x4000 + HW_MACHINE = 0x1 + ICANON = 0x100 + ICMP6_FILTER = 0x12 + ICRNL = 0x100 + IEXTEN = 0x400 + IFAN_ARRIVAL = 0x0 + IFAN_DEPARTURE = 0x1 + IFF_ALLMULTI = 0x200 + IFF_BROADCAST = 0x2 + IFF_CANTCHANGE = 0x8e52 + IFF_DEBUG = 0x4 + IFF_LINK0 = 0x1000 + IFF_LINK1 = 0x2000 + IFF_LINK2 = 0x4000 + IFF_LOOPBACK = 0x8 + IFF_MULTICAST = 0x8000 + IFF_NOARP = 0x80 + IFF_OACTIVE = 0x400 + IFF_POINTOPOINT = 0x10 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SIMPLEX = 0x800 + IFF_STATICARP = 0x20 + IFF_UP = 0x1 + IFNAMSIZ = 0x10 + IFT_1822 = 0x2 + IFT_A12MPPSWITCH = 0x82 + IFT_AAL2 = 0xbb + IFT_AAL5 = 0x31 + IFT_ADSL = 0x5e + IFT_AFLANE8023 = 0x3b + IFT_AFLANE8025 = 0x3c + IFT_ARAP = 0x58 + IFT_ARCNET = 0x23 + IFT_ARCNETPLUS = 0x24 + IFT_ASYNC = 0x54 + IFT_ATM = 0x25 + IFT_ATMDXI = 0x69 + IFT_ATMFUNI = 0x6a + IFT_ATMIMA = 0x6b + IFT_ATMLOGICAL = 0x50 + IFT_ATMRADIO = 0xbd + IFT_ATMSUBINTERFACE = 0x86 + IFT_ATMVCIENDPT = 0xc2 + IFT_ATMVIRTUAL = 0x95 + IFT_BGPPOLICYACCOUNTING = 0xa2 + IFT_BLUETOOTH = 0xf8 + IFT_BRIDGE = 0xd1 + IFT_BSC = 0x53 + IFT_CARP = 0xf7 + IFT_CCTEMUL = 0x3d + IFT_CEPT = 0x13 + IFT_CES = 0x85 + IFT_CHANNEL = 0x46 + IFT_CNR = 0x55 + IFT_COFFEE = 0x84 + IFT_COMPOSITELINK = 0x9b + IFT_DCN = 0x8d + IFT_DIGITALPOWERLINE = 0x8a + IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba + IFT_DLSW = 0x4a + IFT_DOCSCABLEDOWNSTREAM = 0x80 + IFT_DOCSCABLEMACLAYER = 0x7f + IFT_DOCSCABLEUPSTREAM = 0x81 + IFT_DOCSCABLEUPSTREAMCHANNEL = 0xcd + IFT_DS0 = 0x51 + IFT_DS0BUNDLE = 0x52 + IFT_DS1FDL = 0xaa + IFT_DS3 = 0x1e + IFT_DTM = 0x8c + IFT_DUMMY = 0xf1 + IFT_DVBASILN = 0xac + IFT_DVBASIOUT = 0xad + IFT_DVBRCCDOWNSTREAM = 0x93 + IFT_DVBRCCMACLAYER = 0x92 + IFT_DVBRCCUPSTREAM = 0x94 + IFT_ECONET = 0xce + IFT_ENC = 0xf4 + IFT_EON = 0x19 + IFT_EPLRS = 0x57 + IFT_ESCON = 0x49 + IFT_ETHER = 0x6 + IFT_FAITH = 0xf3 + IFT_FAST = 0x7d + IFT_FASTETHER = 0x3e + IFT_FASTETHERFX = 0x45 + IFT_FDDI = 0xf + IFT_FIBRECHANNEL = 0x38 + IFT_FRAMERELAYINTERCONNECT = 0x3a + IFT_FRAMERELAYMPI = 0x5c + IFT_FRDLCIENDPT = 0xc1 + IFT_FRELAY = 0x20 + IFT_FRELAYDCE = 0x2c + IFT_FRF16MFRBUNDLE = 0xa3 + IFT_FRFORWARD = 0x9e + IFT_G703AT2MB = 0x43 + IFT_G703AT64K = 0x42 + IFT_GIF = 0xf0 + IFT_GIGABITETHERNET = 0x75 + IFT_GR303IDT = 0xb2 + IFT_GR303RDT = 0xb1 + IFT_H323GATEKEEPER = 0xa4 + IFT_H323PROXY = 0xa5 + IFT_HDH1822 = 0x3 + IFT_HDLC = 0x76 + IFT_HDSL2 = 0xa8 + IFT_HIPERLAN2 = 0xb7 + IFT_HIPPI = 0x2f + IFT_HIPPIINTERFACE = 0x39 + IFT_HOSTPAD = 0x5a + IFT_HSSI = 0x2e + IFT_HY = 0xe + IFT_IBM370PARCHAN = 0x48 + IFT_IDSL = 0x9a + IFT_IEEE1394 = 0x90 + IFT_IEEE80211 = 0x47 + IFT_IEEE80212 = 0x37 + IFT_IEEE8023ADLAG = 0xa1 + IFT_IFGSN = 0x91 + IFT_IMT = 0xbe + IFT_INFINIBAND = 0xc7 + IFT_INTERLEAVE = 0x7c + IFT_IP = 0x7e + IFT_IPFORWARD = 0x8e + IFT_IPOVERATM = 0x72 + IFT_IPOVERCDLC = 0x6d + IFT_IPOVERCLAW = 0x6e + IFT_IPSWITCH = 0x4e + IFT_ISDN = 0x3f + IFT_ISDNBASIC = 0x14 + IFT_ISDNPRIMARY = 0x15 + IFT_ISDNS = 0x4b + IFT_ISDNU = 0x4c + IFT_ISO88022LLC = 0x29 + IFT_ISO88023 = 0x7 + IFT_ISO88024 = 0x8 + IFT_ISO88025 = 0x9 + IFT_ISO88025CRFPINT = 0x62 + IFT_ISO88025DTR = 0x56 + IFT_ISO88025FIBER = 0x73 + IFT_ISO88026 = 0xa + IFT_ISUP = 0xb3 + IFT_L2VLAN = 0x87 + IFT_L3IPVLAN = 0x88 + IFT_L3IPXVLAN = 0x89 + IFT_LAPB = 0x10 + IFT_LAPD = 0x4d + IFT_LAPF = 0x77 + IFT_LINEGROUP = 0xd2 + IFT_LOCALTALK = 0x2a + IFT_LOOP = 0x18 + IFT_MBIM = 0xfa + IFT_MEDIAMAILOVERIP = 0x8b + IFT_MFSIGLINK = 0xa7 + IFT_MIOX25 = 0x26 + IFT_MODEM = 0x30 + IFT_MPC = 0x71 + IFT_MPLS = 0xa6 + IFT_MPLSTUNNEL = 0x96 + IFT_MSDSL = 0x8f + IFT_MVL = 0xbf + IFT_MYRINET = 0x63 + IFT_NFAS = 0xaf + IFT_NSIP = 0x1b + IFT_OPTICALCHANNEL = 0xc3 + IFT_OPTICALTRANSPORT = 0xc4 + IFT_OTHER = 0x1 + IFT_P10 = 0xc + IFT_P80 = 0xd + IFT_PARA = 0x22 + IFT_PFLOG = 0xf5 + IFT_PFLOW = 0xf9 + IFT_PFSYNC = 0xf6 + IFT_PLC = 0xae + IFT_PON155 = 0xcf + IFT_PON622 = 0xd0 + IFT_POS = 0xab + IFT_PPP = 0x17 + IFT_PPPMULTILINKBUNDLE = 0x6c + IFT_PROPATM = 0xc5 + IFT_PROPBWAP2MP = 0xb8 + IFT_PROPCNLS = 0x59 + IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5 + IFT_PROPDOCSWIRELESSMACLAYER = 0xb4 + IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6 + IFT_PROPMUX = 0x36 + IFT_PROPVIRTUAL = 0x35 + IFT_PROPWIRELESSP2P = 0x9d + IFT_PTPSERIAL = 0x16 + IFT_PVC = 0xf2 + IFT_Q2931 = 0xc9 + IFT_QLLC = 0x44 + IFT_RADIOMAC = 0xbc + IFT_RADSL = 0x5f + IFT_REACHDSL = 0xc0 + IFT_RFC1483 = 0x9f + IFT_RS232 = 0x21 + IFT_RSRB = 0x4f + IFT_SDLC = 0x11 + IFT_SDSL = 0x60 + IFT_SHDSL = 0xa9 + IFT_SIP = 0x1f + IFT_SIPSIG = 0xcc + IFT_SIPTG = 0xcb + IFT_SLIP = 0x1c + IFT_SMDSDXI = 0x2b + IFT_SMDSICIP = 0x34 + IFT_SONET = 0x27 + IFT_SONETOVERHEADCHANNEL = 0xb9 + IFT_SONETPATH = 0x32 + IFT_SONETVT = 0x33 + IFT_SRP = 0x97 + IFT_SS7SIGLINK = 0x9c + IFT_STACKTOSTACK = 0x6f + IFT_STARLAN = 0xb + IFT_T1 = 0x12 + IFT_TDLC = 0x74 + IFT_TELINK = 0xc8 + IFT_TERMPAD = 0x5b + IFT_TR008 = 0xb0 + IFT_TRANSPHDLC = 0x7b + IFT_TUNNEL = 0x83 + IFT_ULTRA = 0x1d + IFT_USB = 0xa0 + IFT_V11 = 0x40 + IFT_V35 = 0x2d + IFT_V36 = 0x41 + IFT_V37 = 0x78 + IFT_VDSL = 0x61 + IFT_VIRTUALIPADDRESS = 0x70 + IFT_VIRTUALTG = 0xca + IFT_VOICEDID = 0xd5 + IFT_VOICEEM = 0x64 + IFT_VOICEEMFGD = 0xd3 + IFT_VOICEENCAP = 0x67 + IFT_VOICEFGDEANA = 0xd4 + IFT_VOICEFXO = 0x65 + IFT_VOICEFXS = 0x66 + IFT_VOICEOVERATM = 0x98 + IFT_VOICEOVERCABLE = 0xc6 + IFT_VOICEOVERFRAMERELAY = 0x99 + IFT_VOICEOVERIP = 0x68 + IFT_X213 = 0x5d + IFT_X25 = 0x5 + IFT_X25DDN = 0x4 + IFT_X25HUNTGROUP = 0x7a + IFT_X25MLP = 0x79 + IFT_X25PLE = 0x28 + IFT_XETHER = 0x1a + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_LOOPBACKNET = 0x7f + IN_RFC3021_HOST = 0x1 + IN_RFC3021_NET = 0xfffffffe + IN_RFC3021_NSHIFT = 0x1f + IPPROTO_AH = 0x33 + IPPROTO_CARP = 0x70 + IPPROTO_DIVERT = 0x102 + IPPROTO_DONE = 0x101 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_ENCAP = 0x62 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_ETHERIP = 0x61 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_GRE = 0x2f + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IGMP = 0x2 + IPPROTO_IP = 0x0 + IPPROTO_IPCOMP = 0x6c + IPPROTO_IPIP = 0x4 + IPPROTO_IPV4 = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_MAX = 0x100 + IPPROTO_MAXID = 0x103 + IPPROTO_MOBILE = 0x37 + IPPROTO_MPLS = 0x89 + IPPROTO_NONE = 0x3b + IPPROTO_PFSYNC = 0xf0 + IPPROTO_PIM = 0x67 + IPPROTO_PUP = 0xc + IPPROTO_RAW = 0xff + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_UDP = 0x11 + IPV6_AUTH_LEVEL = 0x35 + IPV6_AUTOFLOWLABEL = 0x3b + IPV6_CHECKSUM = 0x1a + IPV6_DEFAULT_MULTICAST_HOPS = 0x1 + IPV6_DEFAULT_MULTICAST_LOOP = 0x1 + IPV6_DEFHLIM = 0x40 + IPV6_DONTFRAG = 0x3e + IPV6_DSTOPTS = 0x32 + IPV6_ESP_NETWORK_LEVEL = 0x37 + IPV6_ESP_TRANS_LEVEL = 0x36 + IPV6_FAITH = 0x1d + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 + IPV6_FRAGTTL = 0x78 + IPV6_HLIMDEC = 0x1 + IPV6_HOPLIMIT = 0x2f + IPV6_HOPOPTS = 0x31 + IPV6_IPCOMP_LEVEL = 0x3c + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_MAXHLIM = 0xff + IPV6_MAXPACKET = 0xffff + IPV6_MINHOPCOUNT = 0x41 + IPV6_MMTU = 0x500 + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_LOOP = 0xb + IPV6_NEXTHOP = 0x30 + IPV6_OPTIONS = 0x1 + IPV6_PATHMTU = 0x2c + IPV6_PIPEX = 0x3f + IPV6_PKTINFO = 0x2e + IPV6_PORTRANGE = 0xe + IPV6_PORTRANGE_DEFAULT = 0x0 + IPV6_PORTRANGE_HIGH = 0x1 + IPV6_PORTRANGE_LOW = 0x2 + IPV6_RECVDSTOPTS = 0x28 + IPV6_RECVDSTPORT = 0x40 + IPV6_RECVHOPLIMIT = 0x25 + IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVPATHMTU = 0x2b + IPV6_RECVPKTINFO = 0x24 + IPV6_RECVRTHDR = 0x26 + IPV6_RECVTCLASS = 0x39 + IPV6_RTABLE = 0x1021 + IPV6_RTHDR = 0x33 + IPV6_RTHDRDSTOPTS = 0x23 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_SOCKOPT_RESERVED1 = 0x3 + IPV6_TCLASS = 0x3d + IPV6_UNICAST_HOPS = 0x4 + IPV6_USE_MIN_MTU = 0x2a + IPV6_V6ONLY = 0x1b + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 + IP_ADD_MEMBERSHIP = 0xc + IP_AUTH_LEVEL = 0x14 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DROP_MEMBERSHIP = 0xd + IP_ESP_NETWORK_LEVEL = 0x16 + IP_ESP_TRANS_LEVEL = 0x15 + IP_HDRINCL = 0x2 + IP_IPCOMP_LEVEL = 0x1d + IP_IPDEFTTL = 0x25 + IP_IPSECFLOWINFO = 0x24 + IP_IPSEC_LOCAL_AUTH = 0x1b + IP_IPSEC_LOCAL_CRED = 0x19 + IP_IPSEC_LOCAL_ID = 0x17 + IP_IPSEC_REMOTE_AUTH = 0x1c + IP_IPSEC_REMOTE_CRED = 0x1a + IP_IPSEC_REMOTE_ID = 0x18 + IP_MAXPACKET = 0xffff + IP_MAX_MEMBERSHIPS = 0xfff + IP_MF = 0x2000 + IP_MINTTL = 0x20 + IP_MIN_MEMBERSHIPS = 0xf + IP_MSS = 0x240 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_LOOP = 0xb + IP_MULTICAST_TTL = 0xa + IP_OFFMASK = 0x1fff + IP_OPTIONS = 0x1 + IP_PIPEX = 0x22 + IP_PORTRANGE = 0x13 + IP_PORTRANGE_DEFAULT = 0x0 + IP_PORTRANGE_HIGH = 0x1 + IP_PORTRANGE_LOW = 0x2 + IP_RECVDSTADDR = 0x7 + IP_RECVDSTPORT = 0x21 + IP_RECVIF = 0x1e + IP_RECVOPTS = 0x5 + IP_RECVRETOPTS = 0x6 + IP_RECVRTABLE = 0x23 + IP_RECVTTL = 0x1f + IP_RETOPTS = 0x8 + IP_RF = 0x8000 + IP_RTABLE = 0x1021 + IP_SENDSRCADDR = 0x7 + IP_TOS = 0x3 + IP_TTL = 0x4 + ISIG = 0x80 + ISTRIP = 0x20 + IUCLC = 0x1000 + IXANY = 0x800 + IXOFF = 0x400 + IXON = 0x200 + KERN_HOSTNAME = 0xa + KERN_OSRELEASE = 0x2 + KERN_OSTYPE = 0x1 + KERN_VERSION = 0x4 + LCNT_OVERLOAD_FLUSH = 0x6 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_DONTNEED = 0x4 + MADV_FREE = 0x6 + MADV_NORMAL = 0x0 + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_SPACEAVAIL = 0x5 + MADV_WILLNEED = 0x3 + MAP_ANON = 0x1000 + MAP_ANONYMOUS = 0x1000 + MAP_CONCEAL = 0x8000 + MAP_COPY = 0x2 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_FLAGMASK = 0xfff7 + MAP_HASSEMAPHORE = 0x0 + MAP_INHERIT = 0x0 + MAP_INHERIT_COPY = 0x1 + MAP_INHERIT_NONE = 0x2 + MAP_INHERIT_SHARE = 0x0 + MAP_INHERIT_ZERO = 0x3 + MAP_NOEXTEND = 0x0 + MAP_NORESERVE = 0x0 + MAP_PRIVATE = 0x2 + MAP_RENAME = 0x0 + MAP_SHARED = 0x1 + MAP_STACK = 0x4000 + MAP_TRYFIXED = 0x0 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MNT_ASYNC = 0x40 + MNT_DEFEXPORTED = 0x200 + MNT_DELEXPORT = 0x20000 + MNT_DOOMED = 0x8000000 + MNT_EXPORTANON = 0x400 + MNT_EXPORTED = 0x100 + MNT_EXRDONLY = 0x80 + MNT_FORCE = 0x80000 + MNT_LAZY = 0x3 + MNT_LOCAL = 0x1000 + MNT_NOATIME = 0x8000 + MNT_NODEV = 0x10 + MNT_NOEXEC = 0x4 + MNT_NOPERM = 0x20 + MNT_NOSUID = 0x8 + MNT_NOWAIT = 0x2 + MNT_QUOTA = 0x2000 + MNT_RDONLY = 0x1 + MNT_RELOAD = 0x40000 + MNT_ROOTFS = 0x4000 + MNT_SOFTDEP = 0x4000000 + MNT_STALLED = 0x100000 + MNT_SWAPPABLE = 0x200000 + MNT_SYNCHRONOUS = 0x2 + MNT_UPDATE = 0x10000 + MNT_VISFLAGMASK = 0x400ffff + MNT_WAIT = 0x1 + MNT_WANTRDWR = 0x2000000 + MNT_WXALLOWED = 0x800 + MSG_BCAST = 0x100 + MSG_CMSG_CLOEXEC = 0x800 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x80 + MSG_EOR = 0x8 + MSG_MCAST = 0x200 + MSG_NOSIGNAL = 0x400 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_TRUNC = 0x10 + MSG_WAITALL = 0x40 + MS_ASYNC = 0x1 + MS_INVALIDATE = 0x4 + MS_SYNC = 0x2 + NAME_MAX = 0xff + NET_RT_DUMP = 0x1 + NET_RT_FLAGS = 0x2 + NET_RT_IFLIST = 0x3 + NET_RT_IFNAMES = 0x6 + NET_RT_MAXID = 0x7 + NET_RT_STATS = 0x4 + NET_RT_TABLE = 0x5 + NOFLSH = 0x80000000 + NOKERNINFO = 0x2000000 + NOTE_ATTRIB = 0x8 + NOTE_CHANGE = 0x1 + NOTE_CHILD = 0x4 + NOTE_DELETE = 0x1 + NOTE_EOF = 0x2 + NOTE_EXEC = 0x20000000 + NOTE_EXIT = 0x80000000 + NOTE_EXTEND = 0x4 + NOTE_FORK = 0x40000000 + NOTE_LINK = 0x10 + NOTE_LOWAT = 0x1 + NOTE_PCTRLMASK = 0xf0000000 + NOTE_PDATAMASK = 0xfffff + NOTE_RENAME = 0x20 + NOTE_REVOKE = 0x40 + NOTE_TRACK = 0x1 + NOTE_TRACKERR = 0x2 + NOTE_TRUNCATE = 0x80 + NOTE_WRITE = 0x2 + OCRNL = 0x10 + OLCUC = 0x20 + ONLCR = 0x2 + ONLRET = 0x80 + ONOCR = 0x40 + ONOEOT = 0x8 + OPOST = 0x1 + OXTABS = 0x4 + O_ACCMODE = 0x3 + O_APPEND = 0x8 + O_ASYNC = 0x40 + O_CLOEXEC = 0x10000 + O_CREAT = 0x200 + O_DIRECTORY = 0x20000 + O_DSYNC = 0x80 + O_EXCL = 0x800 + O_EXLOCK = 0x20 + O_FSYNC = 0x80 + O_NDELAY = 0x4 + O_NOCTTY = 0x8000 + O_NOFOLLOW = 0x100 + O_NONBLOCK = 0x4 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_RSYNC = 0x80 + O_SHLOCK = 0x10 + O_SYNC = 0x80 + O_TRUNC = 0x400 + O_WRONLY = 0x1 + PARENB = 0x1000 + PARMRK = 0x8 + PARODD = 0x2000 + PENDIN = 0x20000000 + PF_FLUSH = 0x1 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_MEMLOCK = 0x6 + RLIMIT_NOFILE = 0x8 + RLIMIT_NPROC = 0x7 + RLIMIT_RSS = 0x5 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0x7fffffffffffffff + RTAX_AUTHOR = 0x6 + RTAX_BFD = 0xb + RTAX_BRD = 0x7 + RTAX_DNS = 0xc + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_LABEL = 0xa + RTAX_MAX = 0xf + RTAX_NETMASK = 0x2 + RTAX_SEARCH = 0xe + RTAX_SRC = 0x8 + RTAX_SRCMASK = 0x9 + RTAX_STATIC = 0xd + RTA_AUTHOR = 0x40 + RTA_BFD = 0x800 + RTA_BRD = 0x80 + RTA_DNS = 0x1000 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_LABEL = 0x400 + RTA_NETMASK = 0x4 + RTA_SEARCH = 0x4000 + RTA_SRC = 0x100 + RTA_SRCMASK = 0x200 + RTA_STATIC = 0x2000 + RTF_ANNOUNCE = 0x4000 + RTF_BFD = 0x1000000 + RTF_BLACKHOLE = 0x1000 + RTF_BROADCAST = 0x400000 + RTF_CACHED = 0x20000 + RTF_CLONED = 0x10000 + RTF_CLONING = 0x100 + RTF_CONNECTED = 0x800000 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_FMASK = 0x110fc08 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_LLINFO = 0x400 + RTF_LOCAL = 0x200000 + RTF_MODIFIED = 0x20 + RTF_MPATH = 0x40000 + RTF_MPLS = 0x100000 + RTF_MULTICAST = 0x200 + RTF_PERMANENT_ARP = 0x2000 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_PROTO3 = 0x2000 + RTF_REJECT = 0x8 + RTF_STATIC = 0x800 + RTF_UP = 0x1 + RTF_USETRAILERS = 0x8000 + RTM_80211INFO = 0x15 + RTM_ADD = 0x1 + RTM_BFD = 0x12 + RTM_CHANGE = 0x3 + RTM_CHGADDRATTR = 0x14 + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_DESYNC = 0x10 + RTM_GET = 0x4 + RTM_IFANNOUNCE = 0xf + RTM_IFINFO = 0xe + RTM_INVALIDATE = 0x11 + RTM_LOSING = 0x5 + RTM_MAXSIZE = 0x800 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_PROPOSAL = 0x13 + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_RTTUNIT = 0xf4240 + RTM_VERSION = 0x5 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RT_TABLEID_BITS = 0x8 + RT_TABLEID_MASK = 0xff + RT_TABLEID_MAX = 0xff + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + RUSAGE_THREAD = 0x1 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x4 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDMULTI = 0x80206931 + SIOCAIFADDR = 0x8040691a + SIOCAIFGROUP = 0x80286987 + SIOCATMARK = 0x40047307 + SIOCBRDGADD = 0x8060693c + SIOCBRDGADDL = 0x80606949 + SIOCBRDGADDS = 0x80606941 + SIOCBRDGARL = 0x808c694d + SIOCBRDGDADDR = 0x81286947 + SIOCBRDGDEL = 0x8060693d + SIOCBRDGDELS = 0x80606942 + SIOCBRDGFLUSH = 0x80606948 + SIOCBRDGFRL = 0x808c694e + SIOCBRDGGCACHE = 0xc0186941 + SIOCBRDGGFD = 0xc0186952 + SIOCBRDGGHT = 0xc0186951 + SIOCBRDGGIFFLGS = 0xc060693e + SIOCBRDGGMA = 0xc0186953 + SIOCBRDGGPARAM = 0xc0406958 + SIOCBRDGGPRI = 0xc0186950 + SIOCBRDGGRL = 0xc030694f + SIOCBRDGGTO = 0xc0186946 + SIOCBRDGIFS = 0xc0606942 + SIOCBRDGRTS = 0xc0206943 + SIOCBRDGSADDR = 0xc1286944 + SIOCBRDGSCACHE = 0x80186940 + SIOCBRDGSFD = 0x80186952 + SIOCBRDGSHT = 0x80186951 + SIOCBRDGSIFCOST = 0x80606955 + SIOCBRDGSIFFLGS = 0x8060693f + SIOCBRDGSIFPRIO = 0x80606954 + SIOCBRDGSIFPROT = 0x8060694a + SIOCBRDGSMA = 0x80186953 + SIOCBRDGSPRI = 0x80186950 + SIOCBRDGSPROTO = 0x8018695a + SIOCBRDGSTO = 0x80186945 + SIOCBRDGSTXHC = 0x80186959 + SIOCDELLABEL = 0x80206997 + SIOCDELMULTI = 0x80206932 + SIOCDIFADDR = 0x80206919 + SIOCDIFGROUP = 0x80286989 + SIOCDIFPARENT = 0x802069b4 + SIOCDIFPHYADDR = 0x80206949 + SIOCDPWE3NEIGHBOR = 0x802069de + SIOCDVNETID = 0x802069af + SIOCGETKALIVE = 0xc01869a4 + SIOCGETLABEL = 0x8020699a + SIOCGETMPWCFG = 0xc02069ae + SIOCGETPFLOW = 0xc02069fe + SIOCGETPFSYNC = 0xc02069f8 + SIOCGETSGCNT = 0xc0207534 + SIOCGETVIFCNT = 0xc0287533 + SIOCGETVLAN = 0xc0206990 + SIOCGIFADDR = 0xc0206921 + SIOCGIFBRDADDR = 0xc0206923 + SIOCGIFCONF = 0xc0106924 + SIOCGIFDATA = 0xc020691b + SIOCGIFDESCR = 0xc0206981 + SIOCGIFDSTADDR = 0xc0206922 + SIOCGIFFLAGS = 0xc0206911 + SIOCGIFGATTR = 0xc028698b + SIOCGIFGENERIC = 0xc020693a + SIOCGIFGLIST = 0xc028698d + SIOCGIFGMEMB = 0xc028698a + SIOCGIFGROUP = 0xc0286988 + SIOCGIFHARDMTU = 0xc02069a5 + SIOCGIFLLPRIO = 0xc02069b6 + SIOCGIFMEDIA = 0xc0406938 + SIOCGIFMETRIC = 0xc0206917 + SIOCGIFMTU = 0xc020697e + SIOCGIFNETMASK = 0xc0206925 + SIOCGIFPAIR = 0xc02069b1 + SIOCGIFPARENT = 0xc02069b3 + SIOCGIFPRIORITY = 0xc020699c + SIOCGIFRDOMAIN = 0xc02069a0 + SIOCGIFRTLABEL = 0xc0206983 + SIOCGIFRXR = 0x802069aa + SIOCGIFSFFPAGE = 0xc1126939 + SIOCGIFXFLAGS = 0xc020699e + SIOCGLIFPHYADDR = 0xc218694b + SIOCGLIFPHYDF = 0xc02069c2 + SIOCGLIFPHYECN = 0xc02069c8 + SIOCGLIFPHYRTABLE = 0xc02069a2 + SIOCGLIFPHYTTL = 0xc02069a9 + SIOCGPGRP = 0x40047309 + SIOCGPWE3 = 0xc0206998 + SIOCGPWE3CTRLWORD = 0xc02069dc + SIOCGPWE3FAT = 0xc02069dd + SIOCGPWE3NEIGHBOR = 0xc21869de + SIOCGSPPPPARAMS = 0xc0206994 + SIOCGTXHPRIO = 0xc02069c6 + SIOCGUMBINFO = 0xc02069be + SIOCGUMBPARAM = 0xc02069c0 + SIOCGVH = 0xc02069f6 + SIOCGVNETFLOWID = 0xc02069c4 + SIOCGVNETID = 0xc02069a7 + SIOCIFAFATTACH = 0x801169ab + SIOCIFAFDETACH = 0x801169ac + SIOCIFCREATE = 0x8020697a + SIOCIFDESTROY = 0x80206979 + SIOCIFGCLONERS = 0xc0106978 + SIOCSETKALIVE = 0x801869a3 + SIOCSETLABEL = 0x80206999 + SIOCSETMPWCFG = 0x802069ad + SIOCSETPFLOW = 0x802069fd + SIOCSETPFSYNC = 0x802069f7 + SIOCSETVLAN = 0x8020698f + SIOCSIFADDR = 0x8020690c + SIOCSIFBRDADDR = 0x80206913 + SIOCSIFDESCR = 0x80206980 + SIOCSIFDSTADDR = 0x8020690e + SIOCSIFFLAGS = 0x80206910 + SIOCSIFGATTR = 0x8028698c + SIOCSIFGENERIC = 0x80206939 + SIOCSIFLLADDR = 0x8020691f + SIOCSIFLLPRIO = 0x802069b5 + SIOCSIFMEDIA = 0xc0206937 + SIOCSIFMETRIC = 0x80206918 + SIOCSIFMTU = 0x8020697f + SIOCSIFNETMASK = 0x80206916 + SIOCSIFPAIR = 0x802069b0 + SIOCSIFPARENT = 0x802069b2 + SIOCSIFPRIORITY = 0x8020699b + SIOCSIFRDOMAIN = 0x8020699f + SIOCSIFRTLABEL = 0x80206982 + SIOCSIFXFLAGS = 0x8020699d + SIOCSLIFPHYADDR = 0x8218694a + SIOCSLIFPHYDF = 0x802069c1 + SIOCSLIFPHYECN = 0x802069c7 + SIOCSLIFPHYRTABLE = 0x802069a1 + SIOCSLIFPHYTTL = 0x802069a8 + SIOCSPGRP = 0x80047308 + SIOCSPWE3CTRLWORD = 0x802069dc + SIOCSPWE3FAT = 0x802069dd + SIOCSPWE3NEIGHBOR = 0x821869de + SIOCSSPPPPARAMS = 0x80206993 + SIOCSTXHPRIO = 0x802069c5 + SIOCSUMBPARAM = 0x802069bf + SIOCSVH = 0xc02069f5 + SIOCSVNETFLOWID = 0x802069c3 + SIOCSVNETID = 0x802069a6 + SIOCSWGDPID = 0xc018695b + SIOCSWGMAXFLOW = 0xc0186960 + SIOCSWGMAXGROUP = 0xc018695d + SIOCSWSDPID = 0x8018695c + SIOCSWSPORTNO = 0xc060695f + SOCK_CLOEXEC = 0x8000 + SOCK_DGRAM = 0x2 + SOCK_DNS = 0x1000 + SOCK_NONBLOCK = 0x4000 + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0xffff + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x2 + SO_BINDANY = 0x1000 + SO_BROADCAST = 0x20 + SO_DEBUG = 0x1 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_KEEPALIVE = 0x8 + SO_LINGER = 0x80 + SO_NETPROC = 0x1020 + SO_OOBINLINE = 0x100 + SO_PEERCRED = 0x1022 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_RTABLE = 0x1021 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_SPLICE = 0x1023 + SO_TIMESTAMP = 0x800 + SO_TYPE = 0x1008 + SO_USELOOPBACK = 0x40 + SO_ZEROIZE = 0x2000 + S_BLKSIZE = 0x200 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISTXT = 0x200 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + TCIFLUSH = 0x1 + TCIOFF = 0x3 + TCIOFLUSH = 0x3 + TCION = 0x4 + TCOFLUSH = 0x2 + TCOOFF = 0x1 + TCOON = 0x2 + TCP_MAXBURST = 0x4 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_SACK = 0x3 + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0x4 + TCP_MSS = 0x200 + TCP_NODELAY = 0x1 + TCP_NOPUSH = 0x10 + TCP_SACK_ENABLE = 0x8 + TCSAFLUSH = 0x2 + TIMER_ABSTIME = 0x1 + TIMER_RELTIME = 0x0 + TIOCCBRK = 0x2000747a + TIOCCDTR = 0x20007478 + TIOCCHKVERAUTH = 0x2000741e + TIOCCLRVERAUTH = 0x2000741d + TIOCCONS = 0x80047462 + TIOCDRAIN = 0x2000745e + TIOCEXCL = 0x2000740d + TIOCEXT = 0x80047460 + TIOCFLAG_CLOCAL = 0x2 + TIOCFLAG_CRTSCTS = 0x4 + TIOCFLAG_MDMBUF = 0x8 + TIOCFLAG_PPS = 0x10 + TIOCFLAG_SOFTCAR = 0x1 + TIOCFLUSH = 0x80047410 + TIOCGETA = 0x402c7413 + TIOCGETD = 0x4004741a + TIOCGFLAGS = 0x4004745d + TIOCGPGRP = 0x40047477 + TIOCGSID = 0x40047463 + TIOCGTSTAMP = 0x4010745b + TIOCGWINSZ = 0x40087468 + TIOCMBIC = 0x8004746b + TIOCMBIS = 0x8004746c + TIOCMGET = 0x4004746a + TIOCMODG = 0x4004746a + TIOCMODS = 0x8004746d + TIOCMSET = 0x8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x80047470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCREMOTE = 0x80047469 + TIOCSBRK = 0x2000747b + TIOCSCTTY = 0x20007461 + TIOCSDTR = 0x20007479 + TIOCSETA = 0x802c7414 + TIOCSETAF = 0x802c7416 + TIOCSETAW = 0x802c7415 + TIOCSETD = 0x8004741b + TIOCSETVERAUTH = 0x8004741c + TIOCSFLAGS = 0x8004745c + TIOCSIG = 0x8004745f + TIOCSPGRP = 0x80047476 + TIOCSTART = 0x2000746e + TIOCSTAT = 0x20007465 + TIOCSTOP = 0x2000746f + TIOCSTSTAMP = 0x8008745a + TIOCSWINSZ = 0x80087467 + TIOCUCNTL = 0x80047466 + TIOCUCNTL_CBRK = 0x7a + TIOCUCNTL_SBRK = 0x7b + TOSTOP = 0x400000 + UTIME_NOW = -0x2 + UTIME_OMIT = -0x1 + VDISCARD = 0xf + VDSUSP = 0xb + VEOF = 0x0 + VEOL = 0x1 + VEOL2 = 0x2 + VERASE = 0x3 + VINTR = 0x8 + VKILL = 0x5 + VLNEXT = 0xe + VMIN = 0x10 + VM_ANONMIN = 0x7 + VM_LOADAVG = 0x2 + VM_MALLOC_CONF = 0xc + VM_MAXID = 0xd + VM_MAXSLP = 0xa + VM_METER = 0x1 + VM_NKMEMPAGES = 0x6 + VM_PSSTRINGS = 0x3 + VM_SWAPENCRYPT = 0x5 + VM_USPACE = 0xb + VM_UVMEXP = 0x4 + VM_VNODEMIN = 0x9 + VM_VTEXTMIN = 0x8 + VQUIT = 0x9 + VREPRINT = 0x6 + VSTART = 0xc + VSTATUS = 0x12 + VSTOP = 0xd + VSUSP = 0xa + VTIME = 0x11 + VWERASE = 0x4 + WALTSIG = 0x4 + WCONTINUED = 0x8 + WCOREFLAG = 0x80 + WNOHANG = 0x1 + WUNTRACED = 0x2 + XCASE = 0x1000000 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x30) + EADDRNOTAVAIL = syscall.Errno(0x31) + EAFNOSUPPORT = syscall.Errno(0x2f) + EAGAIN = syscall.Errno(0x23) + EALREADY = syscall.Errno(0x25) + EAUTH = syscall.Errno(0x50) + EBADF = syscall.Errno(0x9) + EBADMSG = syscall.Errno(0x5c) + EBADRPC = syscall.Errno(0x48) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x58) + ECHILD = syscall.Errno(0xa) + ECONNABORTED = syscall.Errno(0x35) + ECONNREFUSED = syscall.Errno(0x3d) + ECONNRESET = syscall.Errno(0x36) + EDEADLK = syscall.Errno(0xb) + EDESTADDRREQ = syscall.Errno(0x27) + EDOM = syscall.Errno(0x21) + EDQUOT = syscall.Errno(0x45) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EFTYPE = syscall.Errno(0x4f) + EHOSTDOWN = syscall.Errno(0x40) + EHOSTUNREACH = syscall.Errno(0x41) + EIDRM = syscall.Errno(0x59) + EILSEQ = syscall.Errno(0x54) + EINPROGRESS = syscall.Errno(0x24) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EIPSEC = syscall.Errno(0x52) + EISCONN = syscall.Errno(0x38) + EISDIR = syscall.Errno(0x15) + ELAST = syscall.Errno(0x5f) + ELOOP = syscall.Errno(0x3e) + EMEDIUMTYPE = syscall.Errno(0x56) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x28) + ENAMETOOLONG = syscall.Errno(0x3f) + ENEEDAUTH = syscall.Errno(0x51) + ENETDOWN = syscall.Errno(0x32) + ENETRESET = syscall.Errno(0x34) + ENETUNREACH = syscall.Errno(0x33) + ENFILE = syscall.Errno(0x17) + ENOATTR = syscall.Errno(0x53) + ENOBUFS = syscall.Errno(0x37) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOLCK = syscall.Errno(0x4d) + ENOMEDIUM = syscall.Errno(0x55) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x5a) + ENOPROTOOPT = syscall.Errno(0x2a) + ENOSPC = syscall.Errno(0x1c) + ENOSYS = syscall.Errno(0x4e) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x39) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x42) + ENOTRECOVERABLE = syscall.Errno(0x5d) + ENOTSOCK = syscall.Errno(0x26) + ENOTSUP = syscall.Errno(0x5b) + ENOTTY = syscall.Errno(0x19) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x2d) + EOVERFLOW = syscall.Errno(0x57) + EOWNERDEAD = syscall.Errno(0x5e) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x2e) + EPIPE = syscall.Errno(0x20) + EPROCLIM = syscall.Errno(0x43) + EPROCUNAVAIL = syscall.Errno(0x4c) + EPROGMISMATCH = syscall.Errno(0x4b) + EPROGUNAVAIL = syscall.Errno(0x4a) + EPROTO = syscall.Errno(0x5f) + EPROTONOSUPPORT = syscall.Errno(0x2b) + EPROTOTYPE = syscall.Errno(0x29) + ERANGE = syscall.Errno(0x22) + EREMOTE = syscall.Errno(0x47) + EROFS = syscall.Errno(0x1e) + ERPCMISMATCH = syscall.Errno(0x49) + ESHUTDOWN = syscall.Errno(0x3a) + ESOCKTNOSUPPORT = syscall.Errno(0x2c) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESTALE = syscall.Errno(0x46) + ETIMEDOUT = syscall.Errno(0x3c) + ETOOMANYREFS = syscall.Errno(0x3b) + ETXTBSY = syscall.Errno(0x1a) + EUSERS = syscall.Errno(0x44) + EWOULDBLOCK = syscall.Errno(0x23) + EXDEV = syscall.Errno(0x12) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0xa) + SIGCHLD = syscall.Signal(0x14) + SIGCONT = syscall.Signal(0x13) + SIGEMT = syscall.Signal(0x7) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINFO = syscall.Signal(0x1d) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x17) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGPIPE = syscall.Signal(0xd) + SIGPROF = syscall.Signal(0x1b) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTOP = syscall.Signal(0x11) + SIGSYS = syscall.Signal(0xc) + SIGTERM = syscall.Signal(0xf) + SIGTHR = syscall.Signal(0x20) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x12) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x10) + SIGUSR1 = syscall.Signal(0x1e) + SIGUSR2 = syscall.Signal(0x1f) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "device not configured"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EDEADLK", "resource deadlock avoided"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "cross-device link"}, + {19, "ENODEV", "operation not supported by device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "result too large"}, + {35, "EAGAIN", "resource temporarily unavailable"}, + {36, "EINPROGRESS", "operation now in progress"}, + {37, "EALREADY", "operation already in progress"}, + {38, "ENOTSOCK", "socket operation on non-socket"}, + {39, "EDESTADDRREQ", "destination address required"}, + {40, "EMSGSIZE", "message too long"}, + {41, "EPROTOTYPE", "protocol wrong type for socket"}, + {42, "ENOPROTOOPT", "protocol not available"}, + {43, "EPROTONOSUPPORT", "protocol not supported"}, + {44, "ESOCKTNOSUPPORT", "socket type not supported"}, + {45, "EOPNOTSUPP", "operation not supported"}, + {46, "EPFNOSUPPORT", "protocol family not supported"}, + {47, "EAFNOSUPPORT", "address family not supported by protocol family"}, + {48, "EADDRINUSE", "address already in use"}, + {49, "EADDRNOTAVAIL", "can't assign requested address"}, + {50, "ENETDOWN", "network is down"}, + {51, "ENETUNREACH", "network is unreachable"}, + {52, "ENETRESET", "network dropped connection on reset"}, + {53, "ECONNABORTED", "software caused connection abort"}, + {54, "ECONNRESET", "connection reset by peer"}, + {55, "ENOBUFS", "no buffer space available"}, + {56, "EISCONN", "socket is already connected"}, + {57, "ENOTCONN", "socket is not connected"}, + {58, "ESHUTDOWN", "can't send after socket shutdown"}, + {59, "ETOOMANYREFS", "too many references: can't splice"}, + {60, "ETIMEDOUT", "operation timed out"}, + {61, "ECONNREFUSED", "connection refused"}, + {62, "ELOOP", "too many levels of symbolic links"}, + {63, "ENAMETOOLONG", "file name too long"}, + {64, "EHOSTDOWN", "host is down"}, + {65, "EHOSTUNREACH", "no route to host"}, + {66, "ENOTEMPTY", "directory not empty"}, + {67, "EPROCLIM", "too many processes"}, + {68, "EUSERS", "too many users"}, + {69, "EDQUOT", "disk quota exceeded"}, + {70, "ESTALE", "stale NFS file handle"}, + {71, "EREMOTE", "too many levels of remote in path"}, + {72, "EBADRPC", "RPC struct is bad"}, + {73, "ERPCMISMATCH", "RPC version wrong"}, + {74, "EPROGUNAVAIL", "RPC program not available"}, + {75, "EPROGMISMATCH", "program version wrong"}, + {76, "EPROCUNAVAIL", "bad procedure for program"}, + {77, "ENOLCK", "no locks available"}, + {78, "ENOSYS", "function not implemented"}, + {79, "EFTYPE", "inappropriate file type or format"}, + {80, "EAUTH", "authentication error"}, + {81, "ENEEDAUTH", "need authenticator"}, + {82, "EIPSEC", "IPsec processing failure"}, + {83, "ENOATTR", "attribute not found"}, + {84, "EILSEQ", "illegal byte sequence"}, + {85, "ENOMEDIUM", "no medium found"}, + {86, "EMEDIUMTYPE", "wrong medium type"}, + {87, "EOVERFLOW", "value too large to be stored in data type"}, + {88, "ECANCELED", "operation canceled"}, + {89, "EIDRM", "identifier removed"}, + {90, "ENOMSG", "no message of desired type"}, + {91, "ENOTSUP", "not supported"}, + {92, "EBADMSG", "bad message"}, + {93, "ENOTRECOVERABLE", "state not recoverable"}, + {94, "EOWNERDEAD", "previous owner died"}, + {95, "ELAST", "protocol error"}, +} + +// Signal table +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/BPT trap"}, + {6, "SIGABRT", "abort trap"}, + {7, "SIGEMT", "EMT trap"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad system call"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGURG", "urgent I/O condition"}, + {17, "SIGSTOP", "suspended (signal)"}, + {18, "SIGTSTP", "suspended"}, + {19, "SIGCONT", "continued"}, + {20, "SIGCHLD", "child exited"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGIO", "I/O possible"}, + {24, "SIGXCPU", "cputime limit exceeded"}, + {25, "SIGXFSZ", "filesize limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window size changes"}, + {29, "SIGINFO", "information request"}, + {30, "SIGUSR1", "user defined signal 1"}, + {31, "SIGUSR2", "user defined signal 2"}, + {32, "SIGTHR", "thread AST"}, +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go index 79f6e0566e316..ed657ff1bc0bb 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go @@ -83,6 +83,8 @@ int lstat(uintptr_t, uintptr_t); int pause(); int pread64(int, uintptr_t, size_t, long long); int pwrite64(int, uintptr_t, size_t, long long); +#define c_select select +int select(int, uintptr_t, uintptr_t, uintptr_t, uintptr_t); int pselect(int, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t); int setregid(int, int); int setreuid(int, int); @@ -103,8 +105,8 @@ int getpeername(int, uintptr_t, uintptr_t); int getsockname(int, uintptr_t, uintptr_t); int recvfrom(int, uintptr_t, size_t, int, uintptr_t, uintptr_t); int sendto(int, uintptr_t, size_t, int, uintptr_t, uintptr_t); -int recvmsg(int, uintptr_t, int); -int sendmsg(int, uintptr_t, int); +int nrecvmsg(int, uintptr_t, int); +int nsendmsg(int, uintptr_t, int); int munmap(uintptr_t, uintptr_t); int madvise(uintptr_t, size_t, int); int mprotect(uintptr_t, size_t, int); @@ -118,6 +120,8 @@ int poll(uintptr_t, int, int); int gettimeofday(uintptr_t, uintptr_t); int time(uintptr_t); int utime(uintptr_t, uintptr_t); +unsigned long long getsystemcfg(int); +int umount(uintptr_t); int getrlimit64(int, uintptr_t); int setrlimit64(int, uintptr_t); long long lseek64(int, long long, int); @@ -855,7 +859,7 @@ func Fchown(fd int, uid int, gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fstat(fd int, stat *Stat_t) (err error) { +func fstat(fd int, stat *Stat_t) (err error) { r0, er := C.fstat(C.int(fd), C.uintptr_t(uintptr(unsafe.Pointer(stat)))) if r0 == -1 && er != nil { err = er @@ -865,7 +869,7 @@ func Fstat(fd int, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) { +func fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) { _p0 := uintptr(unsafe.Pointer(C.CString(path))) r0, er := C.fstatat(C.int(dirfd), C.uintptr_t(_p0), C.uintptr_t(uintptr(unsafe.Pointer(stat))), C.int(flags)) if r0 == -1 && er != nil { @@ -949,7 +953,7 @@ func Listen(s int, n int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Lstat(path string, stat *Stat_t) (err error) { +func lstat(path string, stat *Stat_t) (err error) { _p0 := uintptr(unsafe.Pointer(C.CString(path))) r0, er := C.lstat(C.uintptr_t(_p0), C.uintptr_t(uintptr(unsafe.Pointer(stat)))) if r0 == -1 && er != nil { @@ -1004,6 +1008,17 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, er := C.c_select(C.int(nfd), C.uintptr_t(uintptr(unsafe.Pointer(r))), C.uintptr_t(uintptr(unsafe.Pointer(w))), C.uintptr_t(uintptr(unsafe.Pointer(e))), C.uintptr_t(uintptr(unsafe.Pointer(timeout)))) + n = int(r0) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, er := C.pselect(C.int(nfd), C.uintptr_t(uintptr(unsafe.Pointer(r))), C.uintptr_t(uintptr(unsafe.Pointer(w))), C.uintptr_t(uintptr(unsafe.Pointer(e))), C.uintptr_t(uintptr(unsafe.Pointer(timeout))), C.uintptr_t(uintptr(unsafe.Pointer(sigmask)))) n = int(r0) @@ -1056,9 +1071,9 @@ func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n i // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Stat(path string, stat *Stat_t) (err error) { +func stat(path string, statptr *Stat_t) (err error) { _p0 := uintptr(unsafe.Pointer(C.CString(path))) - r0, er := C.stat(C.uintptr_t(_p0), C.uintptr_t(uintptr(unsafe.Pointer(stat)))) + r0, er := C.stat(C.uintptr_t(_p0), C.uintptr_t(uintptr(unsafe.Pointer(statptr)))) if r0 == -1 && er != nil { err = er } @@ -1225,7 +1240,7 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, er := C.recvmsg(C.int(s), C.uintptr_t(uintptr(unsafe.Pointer(msg))), C.int(flags)) + r0, er := C.nrecvmsg(C.int(s), C.uintptr_t(uintptr(unsafe.Pointer(msg))), C.int(flags)) n = int(r0) if r0 == -1 && er != nil { err = er @@ -1236,7 +1251,7 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, er := C.sendmsg(C.int(s), C.uintptr_t(uintptr(unsafe.Pointer(msg))), C.int(flags)) + r0, er := C.nsendmsg(C.int(s), C.uintptr_t(uintptr(unsafe.Pointer(msg))), C.int(flags)) n = int(r0) if r0 == -1 && er != nil { err = er @@ -1409,6 +1424,25 @@ func Utime(path string, buf *Utimbuf) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getsystemcfg(label int) (n uint64) { + r0, _ := C.getsystemcfg(C.int(label)) + n = uint64(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func umount(target string) (err error) { + _p0 := uintptr(unsafe.Pointer(C.CString(target))) + r0, er := C.umount(C.uintptr_t(_p0)) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getrlimit(resource int, rlim *Rlimit) (err error) { r0, er := C.getrlimit64(C.int(resource), C.uintptr_t(uintptr(unsafe.Pointer(rlim)))) if r0 == -1 && er != nil { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go index 52802bfc17a6c..664b293b43170 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go @@ -803,7 +803,7 @@ func Fchown(fd int, uid int, gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fstat(fd int, stat *Stat_t) (err error) { +func fstat(fd int, stat *Stat_t) (err error) { _, e1 := callfstat(fd, uintptr(unsafe.Pointer(stat))) if e1 != 0 { err = errnoErr(e1) @@ -813,7 +813,7 @@ func Fstat(fd int, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) { +func fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -905,7 +905,7 @@ func Listen(s int, n int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Lstat(path string, stat *Stat_t) (err error) { +func lstat(path string, stat *Stat_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -960,6 +960,17 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, e1 := callselect(nfd, uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, e1 := callpselect(nfd, uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) n = int(r0) @@ -1012,13 +1023,13 @@ func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n i // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Stat(path string, stat *Stat_t) (err error) { +func stat(path string, statptr *Stat_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, e1 := callstat(uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat))) + _, e1 := callstat(uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(statptr))) if e1 != 0 { err = errnoErr(e1) } @@ -1189,7 +1200,7 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, e1 := callrecvmsg(s, uintptr(unsafe.Pointer(msg)), flags) + r0, e1 := callnrecvmsg(s, uintptr(unsafe.Pointer(msg)), flags) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1200,7 +1211,7 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, e1 := callsendmsg(s, uintptr(unsafe.Pointer(msg)), flags) + r0, e1 := callnsendmsg(s, uintptr(unsafe.Pointer(msg)), flags) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1375,6 +1386,21 @@ func Getsystemcfg(label int) (n uint64) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func umount(target string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(target) + if err != nil { + return + } + _, e1 := callumount(uintptr(unsafe.Pointer(_p0))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getrlimit(resource int, rlim *Rlimit) (err error) { _, e1 := callgetrlimit(resource, uintptr(unsafe.Pointer(rlim))) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go index a2b24e4a430f1..4b3a8ad7bec18 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go @@ -85,6 +85,7 @@ import ( //go:cgo_import_dynamic libc_pause pause "libc.a/shr_64.o" //go:cgo_import_dynamic libc_pread64 pread64 "libc.a/shr_64.o" //go:cgo_import_dynamic libc_pwrite64 pwrite64 "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_select select "libc.a/shr_64.o" //go:cgo_import_dynamic libc_pselect pselect "libc.a/shr_64.o" //go:cgo_import_dynamic libc_setregid setregid "libc.a/shr_64.o" //go:cgo_import_dynamic libc_setreuid setreuid "libc.a/shr_64.o" @@ -105,8 +106,8 @@ import ( //go:cgo_import_dynamic libc_getsockname getsockname "libc.a/shr_64.o" //go:cgo_import_dynamic libc_recvfrom recvfrom "libc.a/shr_64.o" //go:cgo_import_dynamic libc_sendto sendto "libc.a/shr_64.o" -//go:cgo_import_dynamic libc_recvmsg recvmsg "libc.a/shr_64.o" -//go:cgo_import_dynamic libc_sendmsg sendmsg "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_nrecvmsg nrecvmsg "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_nsendmsg nsendmsg "libc.a/shr_64.o" //go:cgo_import_dynamic libc_munmap munmap "libc.a/shr_64.o" //go:cgo_import_dynamic libc_madvise madvise "libc.a/shr_64.o" //go:cgo_import_dynamic libc_mprotect mprotect "libc.a/shr_64.o" @@ -121,6 +122,7 @@ import ( //go:cgo_import_dynamic libc_time time "libc.a/shr_64.o" //go:cgo_import_dynamic libc_utime utime "libc.a/shr_64.o" //go:cgo_import_dynamic libc_getsystemcfg getsystemcfg "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_umount umount "libc.a/shr_64.o" //go:cgo_import_dynamic libc_getrlimit getrlimit "libc.a/shr_64.o" //go:cgo_import_dynamic libc_setrlimit setrlimit "libc.a/shr_64.o" //go:cgo_import_dynamic libc_lseek lseek "libc.a/shr_64.o" @@ -201,6 +203,7 @@ import ( //go:linkname libc_pause libc_pause //go:linkname libc_pread64 libc_pread64 //go:linkname libc_pwrite64 libc_pwrite64 +//go:linkname libc_select libc_select //go:linkname libc_pselect libc_pselect //go:linkname libc_setregid libc_setregid //go:linkname libc_setreuid libc_setreuid @@ -221,8 +224,8 @@ import ( //go:linkname libc_getsockname libc_getsockname //go:linkname libc_recvfrom libc_recvfrom //go:linkname libc_sendto libc_sendto -//go:linkname libc_recvmsg libc_recvmsg -//go:linkname libc_sendmsg libc_sendmsg +//go:linkname libc_nrecvmsg libc_nrecvmsg +//go:linkname libc_nsendmsg libc_nsendmsg //go:linkname libc_munmap libc_munmap //go:linkname libc_madvise libc_madvise //go:linkname libc_mprotect libc_mprotect @@ -237,6 +240,7 @@ import ( //go:linkname libc_time libc_time //go:linkname libc_utime libc_utime //go:linkname libc_getsystemcfg libc_getsystemcfg +//go:linkname libc_umount libc_umount //go:linkname libc_getrlimit libc_getrlimit //go:linkname libc_setrlimit libc_setrlimit //go:linkname libc_lseek libc_lseek @@ -320,6 +324,7 @@ var ( libc_pause, libc_pread64, libc_pwrite64, + libc_select, libc_pselect, libc_setregid, libc_setreuid, @@ -340,8 +345,8 @@ var ( libc_getsockname, libc_recvfrom, libc_sendto, - libc_recvmsg, - libc_sendmsg, + libc_nrecvmsg, + libc_nsendmsg, libc_munmap, libc_madvise, libc_mprotect, @@ -356,6 +361,7 @@ var ( libc_time, libc_utime, libc_getsystemcfg, + libc_umount, libc_getrlimit, libc_setrlimit, libc_lseek, @@ -893,6 +899,13 @@ func callpwrite64(fd int, _p0 uintptr, _lenp0 int, offset int64) (r1 uintptr, e1 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func callselect(nfd int, r uintptr, w uintptr, e uintptr, timeout uintptr) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_select)), 5, uintptr(nfd), r, w, e, timeout, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func callpselect(nfd int, r uintptr, w uintptr, e uintptr, timeout uintptr, sigmask uintptr) (r1 uintptr, e1 Errno) { r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_pselect)), 6, uintptr(nfd), r, w, e, timeout, sigmask) return @@ -928,8 +941,8 @@ func callsplice(rfd int, roff uintptr, wfd int, woff uintptr, len int, flags int // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func callstat(_p0 uintptr, stat uintptr) (r1 uintptr, e1 Errno) { - r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_stat)), 2, _p0, stat, 0, 0, 0, 0) +func callstat(_p0 uintptr, statptr uintptr) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_stat)), 2, _p0, statptr, 0, 0, 0, 0) return } @@ -1033,15 +1046,15 @@ func callsendto(s int, _p0 uintptr, _lenp0 int, flags int, to uintptr, addrlen u // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func callrecvmsg(s int, msg uintptr, flags int) (r1 uintptr, e1 Errno) { - r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_recvmsg)), 3, uintptr(s), msg, uintptr(flags), 0, 0, 0) +func callnrecvmsg(s int, msg uintptr, flags int) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_nrecvmsg)), 3, uintptr(s), msg, uintptr(flags), 0, 0, 0) return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func callsendmsg(s int, msg uintptr, flags int) (r1 uintptr, e1 Errno) { - r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_sendmsg)), 3, uintptr(s), msg, uintptr(flags), 0, 0, 0) +func callnsendmsg(s int, msg uintptr, flags int) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_nsendmsg)), 3, uintptr(s), msg, uintptr(flags), 0, 0, 0) return } @@ -1145,6 +1158,13 @@ func callgetsystemcfg(label int) (r1 uintptr, e1 Errno) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func callumount(_p0 uintptr) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_umount)), 1, _p0, 0, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func callgetrlimit(resource int, rlim uintptr) (r1 uintptr, e1 Errno) { r1, _, e1 = rawSyscall6(uintptr(unsafe.Pointer(&libc_getrlimit)), 2, uintptr(resource), rlim, 0, 0, 0, 0) return diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go index 5491c89e96826..cde4dbc5f543c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go @@ -83,6 +83,8 @@ int lstat(uintptr_t, uintptr_t); int pause(); int pread64(int, uintptr_t, size_t, long long); int pwrite64(int, uintptr_t, size_t, long long); +#define c_select select +int select(int, uintptr_t, uintptr_t, uintptr_t, uintptr_t); int pselect(int, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t); int setregid(int, int); int setreuid(int, int); @@ -103,8 +105,8 @@ int getpeername(int, uintptr_t, uintptr_t); int getsockname(int, uintptr_t, uintptr_t); int recvfrom(int, uintptr_t, size_t, int, uintptr_t, uintptr_t); int sendto(int, uintptr_t, size_t, int, uintptr_t, uintptr_t); -int recvmsg(int, uintptr_t, int); -int sendmsg(int, uintptr_t, int); +int nrecvmsg(int, uintptr_t, int); +int nsendmsg(int, uintptr_t, int); int munmap(uintptr_t, uintptr_t); int madvise(uintptr_t, size_t, int); int mprotect(uintptr_t, size_t, int); @@ -119,6 +121,7 @@ int gettimeofday(uintptr_t, uintptr_t); int time(uintptr_t); int utime(uintptr_t, uintptr_t); unsigned long long getsystemcfg(int); +int umount(uintptr_t); int getrlimit(int, uintptr_t); int setrlimit(int, uintptr_t); long long lseek(int, long long, int); @@ -732,6 +735,14 @@ func callpwrite64(fd int, _p0 uintptr, _lenp0 int, offset int64) (r1 uintptr, e1 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func callselect(nfd int, r uintptr, w uintptr, e uintptr, timeout uintptr) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.c_select(C.int(nfd), C.uintptr_t(r), C.uintptr_t(w), C.uintptr_t(e), C.uintptr_t(timeout))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func callpselect(nfd int, r uintptr, w uintptr, e uintptr, timeout uintptr, sigmask uintptr) (r1 uintptr, e1 Errno) { r1 = uintptr(C.pselect(C.int(nfd), C.uintptr_t(r), C.uintptr_t(w), C.uintptr_t(e), C.uintptr_t(timeout), C.uintptr_t(sigmask))) e1 = syscall.GetErrno() @@ -772,8 +783,8 @@ func callsplice(rfd int, roff uintptr, wfd int, woff uintptr, len int, flags int // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func callstat(_p0 uintptr, stat uintptr) (r1 uintptr, e1 Errno) { - r1 = uintptr(C.stat(C.uintptr_t(_p0), C.uintptr_t(stat))) +func callstat(_p0 uintptr, statptr uintptr) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.stat(C.uintptr_t(_p0), C.uintptr_t(statptr))) e1 = syscall.GetErrno() return } @@ -892,16 +903,16 @@ func callsendto(s int, _p0 uintptr, _lenp0 int, flags int, to uintptr, addrlen u // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func callrecvmsg(s int, msg uintptr, flags int) (r1 uintptr, e1 Errno) { - r1 = uintptr(C.recvmsg(C.int(s), C.uintptr_t(msg), C.int(flags))) +func callnrecvmsg(s int, msg uintptr, flags int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.nrecvmsg(C.int(s), C.uintptr_t(msg), C.int(flags))) e1 = syscall.GetErrno() return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func callsendmsg(s int, msg uintptr, flags int) (r1 uintptr, e1 Errno) { - r1 = uintptr(C.sendmsg(C.int(s), C.uintptr_t(msg), C.int(flags))) +func callnsendmsg(s int, msg uintptr, flags int) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.nsendmsg(C.int(s), C.uintptr_t(msg), C.int(flags))) e1 = syscall.GetErrno() return } @@ -1020,6 +1031,14 @@ func callgetsystemcfg(label int) (r1 uintptr, e1 Errno) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func callumount(_p0 uintptr) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.umount(C.uintptr_t(_p0))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func callgetrlimit(resource int, rlim uintptr) (r1 uintptr, e1 Errno) { r1 = uintptr(C.getrlimit(C.int(resource), C.uintptr_t(rlim))) e1 = syscall.GetErrno() diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go index 80903e47b65c2..2707c0131794b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go @@ -1019,7 +1019,7 @@ func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries_freebsd12(fd int, buf []byte, basep *uintptr) (n int, err error) { +func getdirentries_freebsd12(fd int, buf []byte, basep *uint64) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go index cd250ff0e240e..8e3c0cea9a2d5 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go @@ -1019,7 +1019,7 @@ func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries_freebsd12(fd int, buf []byte, basep *uintptr) (n int, err error) { +func getdirentries_freebsd12(fd int, buf []byte, basep *uint64) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go index 290a9c2cb06c3..641f86a034cdb 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go @@ -1019,7 +1019,7 @@ func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries_freebsd12(fd int, buf []byte, basep *uintptr) (n int, err error) { +func getdirentries_freebsd12(fd int, buf []byte, basep *uint64) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go index c6df9d2e8f62b..68fbccf7271b1 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go @@ -1019,7 +1019,7 @@ func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries_freebsd12(fd int, buf []byte, basep *uintptr) (n int, err error) { +func getdirentries_freebsd12(fd int, buf []byte, basep *uint64) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go index 9855afa767938..81d90a27e4c48 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go @@ -1381,8 +1381,12 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Signalfd(fd int, mask *Sigset_t, flags int) { - SyscallNoError(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(mask)), uintptr(flags)) +func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { + r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) + newfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } return } @@ -1679,6 +1683,32 @@ func faccessat(dirfd int, path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pipe(p *[2]_C_int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go index 773e251185281..0c184586bf412 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go @@ -1381,8 +1381,12 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Signalfd(fd int, mask *Sigset_t, flags int) { - SyscallNoError(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(mask)), uintptr(flags)) +func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { + r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) + newfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } return } @@ -1679,6 +1683,32 @@ func faccessat(dirfd int, path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go index ccea621d48818..18ef8a6268a95 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go @@ -1381,8 +1381,12 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Signalfd(fd int, mask *Sigset_t, flags int) { - SyscallNoError(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(mask)), uintptr(flags)) +func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { + r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) + newfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } return } @@ -1679,6 +1683,32 @@ func faccessat(dirfd int, path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pipe(p *[2]_C_int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) if e1 != 0 { @@ -2340,3 +2370,18 @@ func armSyncFileRange(fd int, flags int, off int64, n int64) (err error) { } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(cmdline) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEXEC_FILE_LOAD, uintptr(kernelFd), uintptr(initrdFd), uintptr(cmdlineLen), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go index 712c7a3265be7..2fba25d05b9df 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go @@ -1381,8 +1381,12 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Signalfd(fd int, mask *Sigset_t, flags int) { - SyscallNoError(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(mask)), uintptr(flags)) +func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { + r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) + newfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } return } @@ -1679,6 +1683,32 @@ func faccessat(dirfd int, path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { var _p0 unsafe.Pointer if len(events) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go index 68b325100b4b6..c330f4ffa0a5d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go @@ -1381,8 +1381,12 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Signalfd(fd int, mask *Sigset_t, flags int) { - SyscallNoError(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(mask)), uintptr(flags)) +func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { + r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) + newfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } return } @@ -1679,6 +1683,32 @@ func faccessat(dirfd int, path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go index a8be4076cf6d4..8e9e0098a1128 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go @@ -1381,8 +1381,12 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Signalfd(fd int, mask *Sigset_t, flags int) { - SyscallNoError(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(mask)), uintptr(flags)) +func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { + r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) + newfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } return } @@ -1679,6 +1683,32 @@ func faccessat(dirfd int, path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go index 1351028adc22f..c22d62607df2c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go @@ -1381,8 +1381,12 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Signalfd(fd int, mask *Sigset_t, flags int) { - SyscallNoError(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(mask)), uintptr(flags)) +func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { + r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) + newfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } return } @@ -1679,6 +1683,32 @@ func faccessat(dirfd int, path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go index 327b4f97a84f0..700a99e97960c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go @@ -1381,8 +1381,12 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Signalfd(fd int, mask *Sigset_t, flags int) { - SyscallNoError(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(mask)), uintptr(flags)) +func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { + r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) + newfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } return } @@ -1679,6 +1683,32 @@ func faccessat(dirfd int, path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go index c145bd3ceca23..cec4c106c6275 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -1381,8 +1381,12 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Signalfd(fd int, mask *Sigset_t, flags int) { - SyscallNoError(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(mask)), uintptr(flags)) +func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { + r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) + newfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } return } @@ -1679,6 +1683,32 @@ func faccessat(dirfd int, path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go index cd8179c7a5efe..677ef5a697deb 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -1381,8 +1381,12 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Signalfd(fd int, mask *Sigset_t, flags int) { - SyscallNoError(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(mask)), uintptr(flags)) +func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { + r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) + newfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } return } @@ -1679,6 +1683,32 @@ func faccessat(dirfd int, path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go index 2e90cf0f27c7c..565034c54bce9 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go @@ -1381,8 +1381,12 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Signalfd(fd int, mask *Sigset_t, flags int) { - SyscallNoError(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(mask)), uintptr(flags)) +func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { + r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) + newfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } return } @@ -1679,6 +1683,32 @@ func faccessat(dirfd int, path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { var _p0 unsafe.Pointer if len(events) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go index fe9c7e12b09fd..7feb2c6b6ba57 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go @@ -1381,8 +1381,12 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Signalfd(fd int, mask *Sigset_t, flags int) { - SyscallNoError(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(mask)), uintptr(flags)) +func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { + r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) + newfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } return } @@ -1679,6 +1683,32 @@ func faccessat(dirfd int, path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go index d4a2100b09f3d..07655c4554b79 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go @@ -1381,8 +1381,12 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Signalfd(fd int, mask *Sigset_t, flags int) { - SyscallNoError(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(mask)), uintptr(flags)) +func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { + r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) + newfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } return } @@ -1679,6 +1683,32 @@ func faccessat(dirfd int, path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { var _p0 unsafe.Pointer if len(events) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go index 642db7670a209..7e05826647e4e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -389,7 +389,7 @@ func pipe() (fd1 int, fd2 int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdents(fd int, buf []byte) (n int, err error) { +func Getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go index 59585fee3541f..d94d076aa019e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -389,7 +389,7 @@ func pipe() (fd1 int, fd2 int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdents(fd int, buf []byte) (n int, err error) { +func Getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go index 6ec31434b214b..cf5bf3d05467c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -389,7 +389,7 @@ func pipe() (fd1 int, fd2 int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdents(fd int, buf []byte) (n int, err error) { +func Getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go index 603d144334966..243a9317cf287 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go @@ -389,7 +389,7 @@ func pipe() (fd1 int, fd2 int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdents(fd int, buf []byte) (n int, err error) { +func Getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index 6a489fac0a69e..a9532d07870b1 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -387,7 +387,7 @@ func pipe(p *[2]_C_int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdents(fd int, buf []byte) (n int, err error) { +func Getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index 30cba4347c121..0cb9f01774b1c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -387,7 +387,7 @@ func pipe(p *[2]_C_int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdents(fd int, buf []byte) (n int, err error) { +func Getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index fa1beda33e38d..6fc99b54947de 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -387,7 +387,7 @@ func pipe(p *[2]_C_int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdents(fd int, buf []byte) (n int, err error) { +func Getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go new file mode 100644 index 0000000000000..27878a72b83c0 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go @@ -0,0 +1,1692 @@ +// go run mksyscall.go -openbsd -tags openbsd,arm64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_arm64.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build openbsd,arm64 + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(ngid int, gid *_Gid_t) (n int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(ngid int, gid *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(s int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { + r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, timeval *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimes(fd int, timeval *[2]Timeval) (err error) { + _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Madvise(b []byte, behav int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe(p *[2]_C_int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdents(fd int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Access(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { + _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chflags(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chmod(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(fd int) (nfd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + nfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(from int, to int) (err error) { + _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + Syscall(SYS_EXIT, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchflags(fd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fpathconf(fd int, name int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, stat *Statfs_t) (err error) { + _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), 0, uintptr(length)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgrp() (pgrp int) { + r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + pgrp = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrtable() (rtable int, err error) { + r0, _, e1 := RawSyscall(SYS_GETRTABLE, 0, 0, 0) + rtable = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Issetugid() (tainted bool) { + r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0) + tainted = bool(r0 != 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, signum syscall.Signal) (err error) { + _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kqueue() (fd int, err error) { + r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Link(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, backlog int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdir(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifo(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifoat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKFIFOAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknod(path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Open(path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pathconf(path string, name int) (val int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlink(path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rename(from string, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Renameat(fromfd int, from string, tofd int, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Revoke(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rmdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { + r0, _, e1 := Syscall6(SYS_LSEEK, uintptr(fd), 0, uintptr(offset), uintptr(whence), 0, 0) + newoffset = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { + _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setegid(egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seteuid(euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setgid(gid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setlogin(name string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresgid(rgid int, egid int, sgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresuid(ruid int, euid int, suid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrtable(rtable int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRTABLE, uintptr(rtable), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tp *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setuid(uid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, stat *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlink(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() (err error) { + _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(newmask int) (oldmask int) { + r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlink(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { + r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), 0, 0) + ret = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go index b005031abed3c..37dcc74c2de52 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go @@ -1,6 +1,8 @@ // mksysctl_openbsd.pl // Code generated by the command above; DO NOT EDIT. +// +build 386,openbsd + package unix type mibentry struct { diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go index d014451c9d8e9..fe6caa6eb7f2f 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go @@ -1,4 +1,4 @@ -// mksysctl_openbsd.pl +// go run mksysctl_openbsd.go // Code generated by the command above; DO NOT EDIT. // +build amd64,openbsd diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go index b005031abed3c..6eb8c0b086a62 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go @@ -1,6 +1,8 @@ -// mksysctl_openbsd.pl +// go run mksysctl_openbsd.go // Code generated by the command above; DO NOT EDIT. +// +build arm,openbsd + package unix type mibentry struct { diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go new file mode 100644 index 0000000000000..ba4304fd23388 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go @@ -0,0 +1,275 @@ +// go run mksysctl_openbsd.go +// Code generated by the command above; DO NOT EDIT. + +// +build arm64,openbsd + +package unix + +type mibentry struct { + ctlname string + ctloid []_C_int +} + +var sysctlMib = []mibentry{ + {"ddb.console", []_C_int{9, 6}}, + {"ddb.log", []_C_int{9, 7}}, + {"ddb.max_line", []_C_int{9, 3}}, + {"ddb.max_width", []_C_int{9, 2}}, + {"ddb.panic", []_C_int{9, 5}}, + {"ddb.profile", []_C_int{9, 9}}, + {"ddb.radix", []_C_int{9, 1}}, + {"ddb.tab_stop_width", []_C_int{9, 4}}, + {"ddb.trigger", []_C_int{9, 8}}, + {"fs.posix.setuid", []_C_int{3, 1, 1}}, + {"hw.allowpowerdown", []_C_int{6, 22}}, + {"hw.byteorder", []_C_int{6, 4}}, + {"hw.cpuspeed", []_C_int{6, 12}}, + {"hw.diskcount", []_C_int{6, 10}}, + {"hw.disknames", []_C_int{6, 8}}, + {"hw.diskstats", []_C_int{6, 9}}, + {"hw.machine", []_C_int{6, 1}}, + {"hw.model", []_C_int{6, 2}}, + {"hw.ncpu", []_C_int{6, 3}}, + {"hw.ncpufound", []_C_int{6, 21}}, + {"hw.ncpuonline", []_C_int{6, 25}}, + {"hw.pagesize", []_C_int{6, 7}}, + {"hw.perfpolicy", []_C_int{6, 23}}, + {"hw.physmem", []_C_int{6, 19}}, + {"hw.product", []_C_int{6, 15}}, + {"hw.serialno", []_C_int{6, 17}}, + {"hw.setperf", []_C_int{6, 13}}, + {"hw.smt", []_C_int{6, 24}}, + {"hw.usermem", []_C_int{6, 20}}, + {"hw.uuid", []_C_int{6, 18}}, + {"hw.vendor", []_C_int{6, 14}}, + {"hw.version", []_C_int{6, 16}}, + {"kern.allowkmem", []_C_int{1, 52}}, + {"kern.argmax", []_C_int{1, 8}}, + {"kern.audio", []_C_int{1, 84}}, + {"kern.boottime", []_C_int{1, 21}}, + {"kern.bufcachepercent", []_C_int{1, 72}}, + {"kern.ccpu", []_C_int{1, 45}}, + {"kern.clockrate", []_C_int{1, 12}}, + {"kern.consdev", []_C_int{1, 75}}, + {"kern.cp_time", []_C_int{1, 40}}, + {"kern.cp_time2", []_C_int{1, 71}}, + {"kern.cpustats", []_C_int{1, 85}}, + {"kern.domainname", []_C_int{1, 22}}, + {"kern.file", []_C_int{1, 73}}, + {"kern.forkstat", []_C_int{1, 42}}, + {"kern.fscale", []_C_int{1, 46}}, + {"kern.fsync", []_C_int{1, 33}}, + {"kern.global_ptrace", []_C_int{1, 81}}, + {"kern.hostid", []_C_int{1, 11}}, + {"kern.hostname", []_C_int{1, 10}}, + {"kern.intrcnt.nintrcnt", []_C_int{1, 63, 1}}, + {"kern.job_control", []_C_int{1, 19}}, + {"kern.malloc.buckets", []_C_int{1, 39, 1}}, + {"kern.malloc.kmemnames", []_C_int{1, 39, 3}}, + {"kern.maxclusters", []_C_int{1, 67}}, + {"kern.maxfiles", []_C_int{1, 7}}, + {"kern.maxlocksperuid", []_C_int{1, 70}}, + {"kern.maxpartitions", []_C_int{1, 23}}, + {"kern.maxproc", []_C_int{1, 6}}, + {"kern.maxthread", []_C_int{1, 25}}, + {"kern.maxvnodes", []_C_int{1, 5}}, + {"kern.mbstat", []_C_int{1, 59}}, + {"kern.msgbuf", []_C_int{1, 48}}, + {"kern.msgbufsize", []_C_int{1, 38}}, + {"kern.nchstats", []_C_int{1, 41}}, + {"kern.netlivelocks", []_C_int{1, 76}}, + {"kern.nfiles", []_C_int{1, 56}}, + {"kern.ngroups", []_C_int{1, 18}}, + {"kern.nosuidcoredump", []_C_int{1, 32}}, + {"kern.nprocs", []_C_int{1, 47}}, + {"kern.nselcoll", []_C_int{1, 43}}, + {"kern.nthreads", []_C_int{1, 26}}, + {"kern.numvnodes", []_C_int{1, 58}}, + {"kern.osrelease", []_C_int{1, 2}}, + {"kern.osrevision", []_C_int{1, 3}}, + {"kern.ostype", []_C_int{1, 1}}, + {"kern.osversion", []_C_int{1, 27}}, + {"kern.pool_debug", []_C_int{1, 77}}, + {"kern.posix1version", []_C_int{1, 17}}, + {"kern.proc", []_C_int{1, 66}}, + {"kern.rawpartition", []_C_int{1, 24}}, + {"kern.saved_ids", []_C_int{1, 20}}, + {"kern.securelevel", []_C_int{1, 9}}, + {"kern.seminfo", []_C_int{1, 61}}, + {"kern.shminfo", []_C_int{1, 62}}, + {"kern.somaxconn", []_C_int{1, 28}}, + {"kern.sominconn", []_C_int{1, 29}}, + {"kern.splassert", []_C_int{1, 54}}, + {"kern.stackgap_random", []_C_int{1, 50}}, + {"kern.sysvipc_info", []_C_int{1, 51}}, + {"kern.sysvmsg", []_C_int{1, 34}}, + {"kern.sysvsem", []_C_int{1, 35}}, + {"kern.sysvshm", []_C_int{1, 36}}, + {"kern.timecounter.choice", []_C_int{1, 69, 4}}, + {"kern.timecounter.hardware", []_C_int{1, 69, 3}}, + {"kern.timecounter.tick", []_C_int{1, 69, 1}}, + {"kern.timecounter.timestepwarnings", []_C_int{1, 69, 2}}, + {"kern.tty.tk_cancc", []_C_int{1, 44, 4}}, + {"kern.tty.tk_nin", []_C_int{1, 44, 1}}, + {"kern.tty.tk_nout", []_C_int{1, 44, 2}}, + {"kern.tty.tk_rawcc", []_C_int{1, 44, 3}}, + {"kern.tty.ttyinfo", []_C_int{1, 44, 5}}, + {"kern.ttycount", []_C_int{1, 57}}, + {"kern.version", []_C_int{1, 4}}, + {"kern.watchdog.auto", []_C_int{1, 64, 2}}, + {"kern.watchdog.period", []_C_int{1, 64, 1}}, + {"kern.witnesswatch", []_C_int{1, 53}}, + {"kern.wxabort", []_C_int{1, 74}}, + {"net.bpf.bufsize", []_C_int{4, 31, 1}}, + {"net.bpf.maxbufsize", []_C_int{4, 31, 2}}, + {"net.inet.ah.enable", []_C_int{4, 2, 51, 1}}, + {"net.inet.ah.stats", []_C_int{4, 2, 51, 2}}, + {"net.inet.carp.allow", []_C_int{4, 2, 112, 1}}, + {"net.inet.carp.log", []_C_int{4, 2, 112, 3}}, + {"net.inet.carp.preempt", []_C_int{4, 2, 112, 2}}, + {"net.inet.carp.stats", []_C_int{4, 2, 112, 4}}, + {"net.inet.divert.recvspace", []_C_int{4, 2, 258, 1}}, + {"net.inet.divert.sendspace", []_C_int{4, 2, 258, 2}}, + {"net.inet.divert.stats", []_C_int{4, 2, 258, 3}}, + {"net.inet.esp.enable", []_C_int{4, 2, 50, 1}}, + {"net.inet.esp.stats", []_C_int{4, 2, 50, 4}}, + {"net.inet.esp.udpencap", []_C_int{4, 2, 50, 2}}, + {"net.inet.esp.udpencap_port", []_C_int{4, 2, 50, 3}}, + {"net.inet.etherip.allow", []_C_int{4, 2, 97, 1}}, + {"net.inet.etherip.stats", []_C_int{4, 2, 97, 2}}, + {"net.inet.gre.allow", []_C_int{4, 2, 47, 1}}, + {"net.inet.gre.wccp", []_C_int{4, 2, 47, 2}}, + {"net.inet.icmp.bmcastecho", []_C_int{4, 2, 1, 2}}, + {"net.inet.icmp.errppslimit", []_C_int{4, 2, 1, 3}}, + {"net.inet.icmp.maskrepl", []_C_int{4, 2, 1, 1}}, + {"net.inet.icmp.rediraccept", []_C_int{4, 2, 1, 4}}, + {"net.inet.icmp.redirtimeout", []_C_int{4, 2, 1, 5}}, + {"net.inet.icmp.stats", []_C_int{4, 2, 1, 7}}, + {"net.inet.icmp.tstamprepl", []_C_int{4, 2, 1, 6}}, + {"net.inet.igmp.stats", []_C_int{4, 2, 2, 1}}, + {"net.inet.ip.arpdown", []_C_int{4, 2, 0, 40}}, + {"net.inet.ip.arpqueued", []_C_int{4, 2, 0, 36}}, + {"net.inet.ip.arptimeout", []_C_int{4, 2, 0, 39}}, + {"net.inet.ip.encdebug", []_C_int{4, 2, 0, 12}}, + {"net.inet.ip.forwarding", []_C_int{4, 2, 0, 1}}, + {"net.inet.ip.ifq.congestion", []_C_int{4, 2, 0, 30, 4}}, + {"net.inet.ip.ifq.drops", []_C_int{4, 2, 0, 30, 3}}, + {"net.inet.ip.ifq.len", []_C_int{4, 2, 0, 30, 1}}, + {"net.inet.ip.ifq.maxlen", []_C_int{4, 2, 0, 30, 2}}, + {"net.inet.ip.maxqueue", []_C_int{4, 2, 0, 11}}, + {"net.inet.ip.mforwarding", []_C_int{4, 2, 0, 31}}, + {"net.inet.ip.mrtmfc", []_C_int{4, 2, 0, 37}}, + {"net.inet.ip.mrtproto", []_C_int{4, 2, 0, 34}}, + {"net.inet.ip.mrtstats", []_C_int{4, 2, 0, 35}}, + {"net.inet.ip.mrtvif", []_C_int{4, 2, 0, 38}}, + {"net.inet.ip.mtu", []_C_int{4, 2, 0, 4}}, + {"net.inet.ip.mtudisc", []_C_int{4, 2, 0, 27}}, + {"net.inet.ip.mtudisctimeout", []_C_int{4, 2, 0, 28}}, + {"net.inet.ip.multipath", []_C_int{4, 2, 0, 32}}, + {"net.inet.ip.portfirst", []_C_int{4, 2, 0, 7}}, + {"net.inet.ip.porthifirst", []_C_int{4, 2, 0, 9}}, + {"net.inet.ip.porthilast", []_C_int{4, 2, 0, 10}}, + {"net.inet.ip.portlast", []_C_int{4, 2, 0, 8}}, + {"net.inet.ip.redirect", []_C_int{4, 2, 0, 2}}, + {"net.inet.ip.sourceroute", []_C_int{4, 2, 0, 5}}, + {"net.inet.ip.stats", []_C_int{4, 2, 0, 33}}, + {"net.inet.ip.ttl", []_C_int{4, 2, 0, 3}}, + {"net.inet.ipcomp.enable", []_C_int{4, 2, 108, 1}}, + {"net.inet.ipcomp.stats", []_C_int{4, 2, 108, 2}}, + {"net.inet.ipip.allow", []_C_int{4, 2, 4, 1}}, + {"net.inet.ipip.stats", []_C_int{4, 2, 4, 2}}, + {"net.inet.mobileip.allow", []_C_int{4, 2, 55, 1}}, + {"net.inet.pfsync.stats", []_C_int{4, 2, 240, 1}}, + {"net.inet.tcp.ackonpush", []_C_int{4, 2, 6, 13}}, + {"net.inet.tcp.always_keepalive", []_C_int{4, 2, 6, 22}}, + {"net.inet.tcp.baddynamic", []_C_int{4, 2, 6, 6}}, + {"net.inet.tcp.drop", []_C_int{4, 2, 6, 19}}, + {"net.inet.tcp.ecn", []_C_int{4, 2, 6, 14}}, + {"net.inet.tcp.ident", []_C_int{4, 2, 6, 9}}, + {"net.inet.tcp.keepidle", []_C_int{4, 2, 6, 3}}, + {"net.inet.tcp.keepinittime", []_C_int{4, 2, 6, 2}}, + {"net.inet.tcp.keepintvl", []_C_int{4, 2, 6, 4}}, + {"net.inet.tcp.mssdflt", []_C_int{4, 2, 6, 11}}, + {"net.inet.tcp.reasslimit", []_C_int{4, 2, 6, 18}}, + {"net.inet.tcp.rfc1323", []_C_int{4, 2, 6, 1}}, + {"net.inet.tcp.rfc3390", []_C_int{4, 2, 6, 17}}, + {"net.inet.tcp.rootonly", []_C_int{4, 2, 6, 24}}, + {"net.inet.tcp.rstppslimit", []_C_int{4, 2, 6, 12}}, + {"net.inet.tcp.sack", []_C_int{4, 2, 6, 10}}, + {"net.inet.tcp.sackholelimit", []_C_int{4, 2, 6, 20}}, + {"net.inet.tcp.slowhz", []_C_int{4, 2, 6, 5}}, + {"net.inet.tcp.stats", []_C_int{4, 2, 6, 21}}, + {"net.inet.tcp.synbucketlimit", []_C_int{4, 2, 6, 16}}, + {"net.inet.tcp.syncachelimit", []_C_int{4, 2, 6, 15}}, + {"net.inet.tcp.synhashsize", []_C_int{4, 2, 6, 25}}, + {"net.inet.tcp.synuselimit", []_C_int{4, 2, 6, 23}}, + {"net.inet.udp.baddynamic", []_C_int{4, 2, 17, 2}}, + {"net.inet.udp.checksum", []_C_int{4, 2, 17, 1}}, + {"net.inet.udp.recvspace", []_C_int{4, 2, 17, 3}}, + {"net.inet.udp.rootonly", []_C_int{4, 2, 17, 6}}, + {"net.inet.udp.sendspace", []_C_int{4, 2, 17, 4}}, + {"net.inet.udp.stats", []_C_int{4, 2, 17, 5}}, + {"net.inet6.divert.recvspace", []_C_int{4, 24, 86, 1}}, + {"net.inet6.divert.sendspace", []_C_int{4, 24, 86, 2}}, + {"net.inet6.divert.stats", []_C_int{4, 24, 86, 3}}, + {"net.inet6.icmp6.errppslimit", []_C_int{4, 24, 30, 14}}, + {"net.inet6.icmp6.mtudisc_hiwat", []_C_int{4, 24, 30, 16}}, + {"net.inet6.icmp6.mtudisc_lowat", []_C_int{4, 24, 30, 17}}, + {"net.inet6.icmp6.nd6_debug", []_C_int{4, 24, 30, 18}}, + {"net.inet6.icmp6.nd6_delay", []_C_int{4, 24, 30, 8}}, + {"net.inet6.icmp6.nd6_maxnudhint", []_C_int{4, 24, 30, 15}}, + {"net.inet6.icmp6.nd6_mmaxtries", []_C_int{4, 24, 30, 10}}, + {"net.inet6.icmp6.nd6_umaxtries", []_C_int{4, 24, 30, 9}}, + {"net.inet6.icmp6.redirtimeout", []_C_int{4, 24, 30, 3}}, + {"net.inet6.ip6.auto_flowlabel", []_C_int{4, 24, 17, 17}}, + {"net.inet6.ip6.dad_count", []_C_int{4, 24, 17, 16}}, + {"net.inet6.ip6.dad_pending", []_C_int{4, 24, 17, 49}}, + {"net.inet6.ip6.defmcasthlim", []_C_int{4, 24, 17, 18}}, + {"net.inet6.ip6.forwarding", []_C_int{4, 24, 17, 1}}, + {"net.inet6.ip6.forwsrcrt", []_C_int{4, 24, 17, 5}}, + {"net.inet6.ip6.hdrnestlimit", []_C_int{4, 24, 17, 15}}, + {"net.inet6.ip6.hlim", []_C_int{4, 24, 17, 3}}, + {"net.inet6.ip6.log_interval", []_C_int{4, 24, 17, 14}}, + {"net.inet6.ip6.maxdynroutes", []_C_int{4, 24, 17, 48}}, + {"net.inet6.ip6.maxfragpackets", []_C_int{4, 24, 17, 9}}, + {"net.inet6.ip6.maxfrags", []_C_int{4, 24, 17, 41}}, + {"net.inet6.ip6.mforwarding", []_C_int{4, 24, 17, 42}}, + {"net.inet6.ip6.mrtmfc", []_C_int{4, 24, 17, 53}}, + {"net.inet6.ip6.mrtmif", []_C_int{4, 24, 17, 52}}, + {"net.inet6.ip6.mrtproto", []_C_int{4, 24, 17, 8}}, + {"net.inet6.ip6.mtudisctimeout", []_C_int{4, 24, 17, 50}}, + {"net.inet6.ip6.multicast_mtudisc", []_C_int{4, 24, 17, 44}}, + {"net.inet6.ip6.multipath", []_C_int{4, 24, 17, 43}}, + {"net.inet6.ip6.neighborgcthresh", []_C_int{4, 24, 17, 45}}, + {"net.inet6.ip6.redirect", []_C_int{4, 24, 17, 2}}, + {"net.inet6.ip6.soiikey", []_C_int{4, 24, 17, 54}}, + {"net.inet6.ip6.sourcecheck", []_C_int{4, 24, 17, 10}}, + {"net.inet6.ip6.sourcecheck_logint", []_C_int{4, 24, 17, 11}}, + {"net.inet6.ip6.use_deprecated", []_C_int{4, 24, 17, 21}}, + {"net.key.sadb_dump", []_C_int{4, 30, 1}}, + {"net.key.spd_dump", []_C_int{4, 30, 2}}, + {"net.mpls.ifq.congestion", []_C_int{4, 33, 3, 4}}, + {"net.mpls.ifq.drops", []_C_int{4, 33, 3, 3}}, + {"net.mpls.ifq.len", []_C_int{4, 33, 3, 1}}, + {"net.mpls.ifq.maxlen", []_C_int{4, 33, 3, 2}}, + {"net.mpls.mapttl_ip", []_C_int{4, 33, 5}}, + {"net.mpls.mapttl_ip6", []_C_int{4, 33, 6}}, + {"net.mpls.maxloop_inkernel", []_C_int{4, 33, 4}}, + {"net.mpls.ttl", []_C_int{4, 33, 2}}, + {"net.pflow.stats", []_C_int{4, 34, 1}}, + {"net.pipex.enable", []_C_int{4, 35, 1}}, + {"vm.anonmin", []_C_int{2, 7}}, + {"vm.loadavg", []_C_int{2, 2}}, + {"vm.malloc_conf", []_C_int{2, 12}}, + {"vm.maxslp", []_C_int{2, 10}}, + {"vm.nkmempages", []_C_int{2, 6}}, + {"vm.psstrings", []_C_int{2, 3}}, + {"vm.swapencrypt.enable", []_C_int{2, 5, 0}}, + {"vm.swapencrypt.keyscreated", []_C_int{2, 5, 1}}, + {"vm.swapencrypt.keysdeleted", []_C_int{2, 5, 2}}, + {"vm.uspace", []_C_int{2, 11}}, + {"vm.uvmexp", []_C_int{2, 4}}, + {"vm.vmmeter", []_C_int{2, 1}}, + {"vm.vnodemin", []_C_int{2, 9}}, + {"vm.vtextmin", []_C_int{2, 8}}, +} diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go index 55c3a32945d35..9474974b657d7 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go @@ -1,4 +1,4 @@ -// go run mksysnum.go https://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master +// go run mksysnum.go https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master // Code generated by the command above; see README.md. DO NOT EDIT. // +build 386,freebsd @@ -118,8 +118,6 @@ const ( SYS_SEMSYS = 169 // { int semsys(int which, int a2, int a3, int a4, int a5); } SYS_MSGSYS = 170 // { int msgsys(int which, int a2, int a3, int a4, int a5, int a6); } SYS_SHMSYS = 171 // { int shmsys(int which, int a2, int a3, int a4); } - SYS_FREEBSD6_PREAD = 173 // { ssize_t freebsd6_pread(int fd, void *buf, size_t nbyte, int pad, off_t offset); } - SYS_FREEBSD6_PWRITE = 174 // { ssize_t freebsd6_pwrite(int fd, const void *buf, size_t nbyte, int pad, off_t offset); } SYS_SETFIB = 175 // { int setfib(int fibnum); } SYS_NTP_ADJTIME = 176 // { int ntp_adjtime(struct timex *tp); } SYS_SETGID = 181 // { int setgid(gid_t gid); } @@ -133,10 +131,6 @@ const ( SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, struct rlimit *rlp); } getrlimit __getrlimit_args int SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, struct rlimit *rlp); } setrlimit __setrlimit_args int SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, u_int count, long *basep); } - SYS_FREEBSD6_MMAP = 197 // { caddr_t freebsd6_mmap(caddr_t addr, size_t len, int prot, int flags, int fd, int pad, off_t pos); } - SYS_FREEBSD6_LSEEK = 199 // { off_t freebsd6_lseek(int fd, int pad, off_t offset, int whence); } - SYS_FREEBSD6_TRUNCATE = 200 // { int freebsd6_truncate(char *path, int pad, off_t length); } - SYS_FREEBSD6_FTRUNCATE = 201 // { int freebsd6_ftruncate(int fd, int pad, off_t length); } SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } __sysctl sysctl_args int SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } @@ -164,6 +158,7 @@ const ( SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( struct ffclock_estimate *cest); } SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( struct ffclock_estimate *cest); } + SYS_CLOCK_NANOSLEEP = 244 // { int clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *rqtp, struct timespec *rmtp); } SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,int which, clockid_t *clock_id); } SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, int inherit); } @@ -197,13 +192,10 @@ const ( SYS_GETSID = 310 // { int getsid(pid_t pid); } SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, uid_t suid); } SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, gid_t sgid); } - SYS_AIO_RETURN = 314 // { int aio_return(struct aiocb *aiocbp); } + SYS_AIO_RETURN = 314 // { ssize_t aio_return(struct aiocb *aiocbp); } SYS_AIO_SUSPEND = 315 // { int aio_suspend( struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, struct aiocb *aiocbp); } SYS_AIO_ERROR = 317 // { int aio_error(struct aiocb *aiocbp); } - SYS_OAIO_READ = 318 // { int oaio_read(struct oaiocb *aiocbp); } - SYS_OAIO_WRITE = 319 // { int oaio_write(struct oaiocb *aiocbp); } - SYS_OLIO_LISTIO = 320 // { int olio_listio(int mode, struct oaiocb * const *acb_list, int nent, struct osigevent *sig); } SYS_YIELD = 321 // { int yield(void); } SYS_MLOCKALL = 324 // { int mlockall(int how); } SYS_MUNLOCKALL = 325 // { int munlockall(void); } @@ -236,7 +228,7 @@ const ( SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, int attrnamespace, const char *attrname); } - SYS_AIO_WAITCOMPLETE = 359 // { int aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); } + SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); } SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); } SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); } SYS_KQUEUE = 362 // { int kqueue(void); } @@ -258,7 +250,7 @@ const ( SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, int count); } SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, size_t nbytes, struct sf_hdtr *hdtr, off_t *sbytes, int flags); } SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, int call, void *arg); } - SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int flags); } + SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } SYS_STATFS = 396 // { int statfs(char *path, struct statfs *buf); } SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); } SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } @@ -293,8 +285,6 @@ const ( SYS_THR_EXIT = 431 // { void thr_exit(long *state); } SYS_THR_SELF = 432 // { int thr_self(long *id); } SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } - SYS__UMTX_LOCK = 434 // { int _umtx_lock(struct umtx *umtx); } - SYS__UMTX_UNLOCK = 435 // { int _umtx_unlock(struct umtx *umtx); } SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); } SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( const char *path, int attrnamespace, void *data, size_t nbytes); } @@ -400,4 +390,7 @@ const ( SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *set); } SYS_FUTIMENS = 546 // { int futimens(int fd, struct timespec *times); } SYS_UTIMENSAT = 547 // { int utimensat(int fd, char *path, struct timespec *times, int flag); } + SYS_NUMA_GETAFFINITY = 548 // { int numa_getaffinity(cpuwhich_t which, id_t id, struct vm_domain_policy_entry *policy); } + SYS_NUMA_SETAFFINITY = 549 // { int numa_setaffinity(cpuwhich_t which, id_t id, const struct vm_domain_policy_entry *policy); } + SYS_FDATASYNC = 550 // { int fdatasync(int fd); } ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go index b39be6cb8f4a0..48a7beae7bb50 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go @@ -1,4 +1,4 @@ -// go run mksysnum.go https://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master +// go run mksysnum.go https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master // Code generated by the command above; see README.md. DO NOT EDIT. // +build amd64,freebsd @@ -118,8 +118,6 @@ const ( SYS_SEMSYS = 169 // { int semsys(int which, int a2, int a3, int a4, int a5); } SYS_MSGSYS = 170 // { int msgsys(int which, int a2, int a3, int a4, int a5, int a6); } SYS_SHMSYS = 171 // { int shmsys(int which, int a2, int a3, int a4); } - SYS_FREEBSD6_PREAD = 173 // { ssize_t freebsd6_pread(int fd, void *buf, size_t nbyte, int pad, off_t offset); } - SYS_FREEBSD6_PWRITE = 174 // { ssize_t freebsd6_pwrite(int fd, const void *buf, size_t nbyte, int pad, off_t offset); } SYS_SETFIB = 175 // { int setfib(int fibnum); } SYS_NTP_ADJTIME = 176 // { int ntp_adjtime(struct timex *tp); } SYS_SETGID = 181 // { int setgid(gid_t gid); } @@ -133,10 +131,6 @@ const ( SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, struct rlimit *rlp); } getrlimit __getrlimit_args int SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, struct rlimit *rlp); } setrlimit __setrlimit_args int SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, u_int count, long *basep); } - SYS_FREEBSD6_MMAP = 197 // { caddr_t freebsd6_mmap(caddr_t addr, size_t len, int prot, int flags, int fd, int pad, off_t pos); } - SYS_FREEBSD6_LSEEK = 199 // { off_t freebsd6_lseek(int fd, int pad, off_t offset, int whence); } - SYS_FREEBSD6_TRUNCATE = 200 // { int freebsd6_truncate(char *path, int pad, off_t length); } - SYS_FREEBSD6_FTRUNCATE = 201 // { int freebsd6_ftruncate(int fd, int pad, off_t length); } SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } __sysctl sysctl_args int SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } @@ -164,6 +158,7 @@ const ( SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( struct ffclock_estimate *cest); } SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( struct ffclock_estimate *cest); } + SYS_CLOCK_NANOSLEEP = 244 // { int clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *rqtp, struct timespec *rmtp); } SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,int which, clockid_t *clock_id); } SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, int inherit); } @@ -197,13 +192,10 @@ const ( SYS_GETSID = 310 // { int getsid(pid_t pid); } SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, uid_t suid); } SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, gid_t sgid); } - SYS_AIO_RETURN = 314 // { int aio_return(struct aiocb *aiocbp); } + SYS_AIO_RETURN = 314 // { ssize_t aio_return(struct aiocb *aiocbp); } SYS_AIO_SUSPEND = 315 // { int aio_suspend( struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, struct aiocb *aiocbp); } SYS_AIO_ERROR = 317 // { int aio_error(struct aiocb *aiocbp); } - SYS_OAIO_READ = 318 // { int oaio_read(struct oaiocb *aiocbp); } - SYS_OAIO_WRITE = 319 // { int oaio_write(struct oaiocb *aiocbp); } - SYS_OLIO_LISTIO = 320 // { int olio_listio(int mode, struct oaiocb * const *acb_list, int nent, struct osigevent *sig); } SYS_YIELD = 321 // { int yield(void); } SYS_MLOCKALL = 324 // { int mlockall(int how); } SYS_MUNLOCKALL = 325 // { int munlockall(void); } @@ -236,7 +228,7 @@ const ( SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, int attrnamespace, const char *attrname); } - SYS_AIO_WAITCOMPLETE = 359 // { int aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); } + SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); } SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); } SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); } SYS_KQUEUE = 362 // { int kqueue(void); } @@ -258,7 +250,7 @@ const ( SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, int count); } SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, size_t nbytes, struct sf_hdtr *hdtr, off_t *sbytes, int flags); } SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, int call, void *arg); } - SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int flags); } + SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } SYS_STATFS = 396 // { int statfs(char *path, struct statfs *buf); } SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); } SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } @@ -293,8 +285,6 @@ const ( SYS_THR_EXIT = 431 // { void thr_exit(long *state); } SYS_THR_SELF = 432 // { int thr_self(long *id); } SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } - SYS__UMTX_LOCK = 434 // { int _umtx_lock(struct umtx *umtx); } - SYS__UMTX_UNLOCK = 435 // { int _umtx_unlock(struct umtx *umtx); } SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); } SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( const char *path, int attrnamespace, void *data, size_t nbytes); } @@ -400,4 +390,7 @@ const ( SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *set); } SYS_FUTIMENS = 546 // { int futimens(int fd, struct timespec *times); } SYS_UTIMENSAT = 547 // { int utimensat(int fd, char *path, struct timespec *times, int flag); } + SYS_NUMA_GETAFFINITY = 548 // { int numa_getaffinity(cpuwhich_t which, id_t id, struct vm_domain_policy_entry *policy); } + SYS_NUMA_SETAFFINITY = 549 // { int numa_setaffinity(cpuwhich_t which, id_t id, const struct vm_domain_policy_entry *policy); } + SYS_FDATASYNC = 550 // { int fdatasync(int fd); } ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go index 44ffd4ce5e980..4a6dfd4a74594 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go @@ -1,4 +1,4 @@ -// go run mksysnum.go https://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master +// go run mksysnum.go https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master // Code generated by the command above; see README.md. DO NOT EDIT. // +build arm,freebsd @@ -118,8 +118,6 @@ const ( SYS_SEMSYS = 169 // { int semsys(int which, int a2, int a3, int a4, int a5); } SYS_MSGSYS = 170 // { int msgsys(int which, int a2, int a3, int a4, int a5, int a6); } SYS_SHMSYS = 171 // { int shmsys(int which, int a2, int a3, int a4); } - SYS_FREEBSD6_PREAD = 173 // { ssize_t freebsd6_pread(int fd, void *buf, size_t nbyte, int pad, off_t offset); } - SYS_FREEBSD6_PWRITE = 174 // { ssize_t freebsd6_pwrite(int fd, const void *buf, size_t nbyte, int pad, off_t offset); } SYS_SETFIB = 175 // { int setfib(int fibnum); } SYS_NTP_ADJTIME = 176 // { int ntp_adjtime(struct timex *tp); } SYS_SETGID = 181 // { int setgid(gid_t gid); } @@ -133,10 +131,6 @@ const ( SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, struct rlimit *rlp); } getrlimit __getrlimit_args int SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, struct rlimit *rlp); } setrlimit __setrlimit_args int SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, u_int count, long *basep); } - SYS_FREEBSD6_MMAP = 197 // { caddr_t freebsd6_mmap(caddr_t addr, size_t len, int prot, int flags, int fd, int pad, off_t pos); } - SYS_FREEBSD6_LSEEK = 199 // { off_t freebsd6_lseek(int fd, int pad, off_t offset, int whence); } - SYS_FREEBSD6_TRUNCATE = 200 // { int freebsd6_truncate(char *path, int pad, off_t length); } - SYS_FREEBSD6_FTRUNCATE = 201 // { int freebsd6_ftruncate(int fd, int pad, off_t length); } SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } __sysctl sysctl_args int SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } @@ -164,6 +158,7 @@ const ( SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( struct ffclock_estimate *cest); } SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( struct ffclock_estimate *cest); } + SYS_CLOCK_NANOSLEEP = 244 // { int clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *rqtp, struct timespec *rmtp); } SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,int which, clockid_t *clock_id); } SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, int inherit); } @@ -197,13 +192,10 @@ const ( SYS_GETSID = 310 // { int getsid(pid_t pid); } SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, uid_t suid); } SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, gid_t sgid); } - SYS_AIO_RETURN = 314 // { int aio_return(struct aiocb *aiocbp); } + SYS_AIO_RETURN = 314 // { ssize_t aio_return(struct aiocb *aiocbp); } SYS_AIO_SUSPEND = 315 // { int aio_suspend( struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, struct aiocb *aiocbp); } SYS_AIO_ERROR = 317 // { int aio_error(struct aiocb *aiocbp); } - SYS_OAIO_READ = 318 // { int oaio_read(struct oaiocb *aiocbp); } - SYS_OAIO_WRITE = 319 // { int oaio_write(struct oaiocb *aiocbp); } - SYS_OLIO_LISTIO = 320 // { int olio_listio(int mode, struct oaiocb * const *acb_list, int nent, struct osigevent *sig); } SYS_YIELD = 321 // { int yield(void); } SYS_MLOCKALL = 324 // { int mlockall(int how); } SYS_MUNLOCKALL = 325 // { int munlockall(void); } @@ -236,7 +228,7 @@ const ( SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, int attrnamespace, const char *attrname); } - SYS_AIO_WAITCOMPLETE = 359 // { int aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); } + SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); } SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); } SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); } SYS_KQUEUE = 362 // { int kqueue(void); } @@ -258,7 +250,7 @@ const ( SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, int count); } SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, size_t nbytes, struct sf_hdtr *hdtr, off_t *sbytes, int flags); } SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, int call, void *arg); } - SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int flags); } + SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } SYS_STATFS = 396 // { int statfs(char *path, struct statfs *buf); } SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); } SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } @@ -293,8 +285,6 @@ const ( SYS_THR_EXIT = 431 // { void thr_exit(long *state); } SYS_THR_SELF = 432 // { int thr_self(long *id); } SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } - SYS__UMTX_LOCK = 434 // { int _umtx_lock(struct umtx *umtx); } - SYS__UMTX_UNLOCK = 435 // { int _umtx_unlock(struct umtx *umtx); } SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); } SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( const char *path, int attrnamespace, void *data, size_t nbytes); } @@ -400,4 +390,7 @@ const ( SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *set); } SYS_FUTIMENS = 546 // { int futimens(int fd, struct timespec *times); } SYS_UTIMENSAT = 547 // { int utimensat(int fd, char *path, struct timespec *times, int flag); } + SYS_NUMA_GETAFFINITY = 548 // { int numa_getaffinity(cpuwhich_t which, id_t id, struct vm_domain_policy_entry *policy); } + SYS_NUMA_SETAFFINITY = 549 // { int numa_setaffinity(cpuwhich_t which, id_t id, const struct vm_domain_policy_entry *policy); } + SYS_FDATASYNC = 550 // { int fdatasync(int fd); } ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go index 9f21e9550edd6..3e51af8edd210 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go @@ -1,4 +1,4 @@ -// go run mksysnum.go https://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master +// go run mksysnum.go https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master // Code generated by the command above; see README.md. DO NOT EDIT. // +build arm64,freebsd @@ -7,13 +7,13 @@ package unix const ( // SYS_NOSYS = 0; // { int nosys(void); } syscall nosys_args int - SYS_EXIT = 1 // { void sys_exit(int rval); } exit \ + SYS_EXIT = 1 // { void sys_exit(int rval); } exit sys_exit_args void SYS_FORK = 2 // { int fork(void); } - SYS_READ = 3 // { ssize_t read(int fd, void *buf, \ - SYS_WRITE = 4 // { ssize_t write(int fd, const void *buf, \ + SYS_READ = 3 // { ssize_t read(int fd, void *buf, size_t nbyte); } + SYS_WRITE = 4 // { ssize_t write(int fd, const void *buf, size_t nbyte); } SYS_OPEN = 5 // { int open(char *path, int flags, int mode); } SYS_CLOSE = 6 // { int close(int fd); } - SYS_WAIT4 = 7 // { int wait4(int pid, int *status, \ + SYS_WAIT4 = 7 // { int wait4(int pid, int *status, int options, struct rusage *rusage); } SYS_LINK = 9 // { int link(char *path, char *link); } SYS_UNLINK = 10 // { int unlink(char *path); } SYS_CHDIR = 12 // { int chdir(char *path); } @@ -21,20 +21,20 @@ const ( SYS_MKNOD = 14 // { int mknod(char *path, int mode, int dev); } SYS_CHMOD = 15 // { int chmod(char *path, int mode); } SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); } - SYS_OBREAK = 17 // { int obreak(char *nsize); } break \ + SYS_OBREAK = 17 // { int obreak(char *nsize); } break obreak_args int SYS_GETPID = 20 // { pid_t getpid(void); } - SYS_MOUNT = 21 // { int mount(char *type, char *path, \ + SYS_MOUNT = 21 // { int mount(char *type, char *path, int flags, caddr_t data); } SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); } SYS_SETUID = 23 // { int setuid(uid_t uid); } SYS_GETUID = 24 // { uid_t getuid(void); } SYS_GETEUID = 25 // { uid_t geteuid(void); } - SYS_PTRACE = 26 // { int ptrace(int req, pid_t pid, \ - SYS_RECVMSG = 27 // { int recvmsg(int s, struct msghdr *msg, \ - SYS_SENDMSG = 28 // { int sendmsg(int s, struct msghdr *msg, \ - SYS_RECVFROM = 29 // { int recvfrom(int s, caddr_t buf, \ - SYS_ACCEPT = 30 // { int accept(int s, \ - SYS_GETPEERNAME = 31 // { int getpeername(int fdes, \ - SYS_GETSOCKNAME = 32 // { int getsockname(int fdes, \ + SYS_PTRACE = 26 // { int ptrace(int req, pid_t pid, caddr_t addr, int data); } + SYS_RECVMSG = 27 // { int recvmsg(int s, struct msghdr *msg, int flags); } + SYS_SENDMSG = 28 // { int sendmsg(int s, struct msghdr *msg, int flags); } + SYS_RECVFROM = 29 // { int recvfrom(int s, caddr_t buf, size_t len, int flags, struct sockaddr * __restrict from, __socklen_t * __restrict fromlenaddr); } + SYS_ACCEPT = 30 // { int accept(int s, struct sockaddr * __restrict name, __socklen_t * __restrict anamelen); } + SYS_GETPEERNAME = 31 // { int getpeername(int fdes, struct sockaddr * __restrict asa, __socklen_t * __restrict alen); } + SYS_GETSOCKNAME = 32 // { int getsockname(int fdes, struct sockaddr * __restrict asa, __socklen_t * __restrict alen); } SYS_ACCESS = 33 // { int access(char *path, int amode); } SYS_CHFLAGS = 34 // { int chflags(const char *path, u_long flags); } SYS_FCHFLAGS = 35 // { int fchflags(int fd, u_long flags); } @@ -42,56 +42,57 @@ const ( SYS_KILL = 37 // { int kill(int pid, int signum); } SYS_GETPPID = 39 // { pid_t getppid(void); } SYS_DUP = 41 // { int dup(u_int fd); } + SYS_PIPE = 42 // { int pipe(void); } SYS_GETEGID = 43 // { gid_t getegid(void); } - SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, \ - SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, \ + SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, size_t offset, u_int scale); } + SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, int facs, int pid); } SYS_GETGID = 47 // { gid_t getgid(void); } - SYS_GETLOGIN = 49 // { int getlogin(char *namebuf, u_int \ + SYS_GETLOGIN = 49 // { int getlogin(char *namebuf, u_int namelen); } SYS_SETLOGIN = 50 // { int setlogin(char *namebuf); } SYS_ACCT = 51 // { int acct(char *path); } - SYS_SIGALTSTACK = 53 // { int sigaltstack(stack_t *ss, \ - SYS_IOCTL = 54 // { int ioctl(int fd, u_long com, \ + SYS_SIGALTSTACK = 53 // { int sigaltstack(stack_t *ss, stack_t *oss); } + SYS_IOCTL = 54 // { int ioctl(int fd, u_long com, caddr_t data); } SYS_REBOOT = 55 // { int reboot(int opt); } SYS_REVOKE = 56 // { int revoke(char *path); } SYS_SYMLINK = 57 // { int symlink(char *path, char *link); } - SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, \ - SYS_EXECVE = 59 // { int execve(char *fname, char **argv, \ - SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args \ + SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, size_t count); } + SYS_EXECVE = 59 // { int execve(char *fname, char **argv, char **envv); } + SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args int SYS_CHROOT = 61 // { int chroot(char *path); } - SYS_MSYNC = 65 // { int msync(void *addr, size_t len, \ + SYS_MSYNC = 65 // { int msync(void *addr, size_t len, int flags); } SYS_VFORK = 66 // { int vfork(void); } SYS_SBRK = 69 // { int sbrk(int incr); } SYS_SSTK = 70 // { int sstk(int incr); } - SYS_OVADVISE = 72 // { int ovadvise(int anom); } vadvise \ + SYS_OVADVISE = 72 // { int ovadvise(int anom); } vadvise ovadvise_args int SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); } - SYS_MPROTECT = 74 // { int mprotect(const void *addr, size_t len, \ - SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, \ - SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, \ - SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, \ - SYS_SETGROUPS = 80 // { int setgroups(u_int gidsetsize, \ + SYS_MPROTECT = 74 // { int mprotect(const void *addr, size_t len, int prot); } + SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, int behav); } + SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, char *vec); } + SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, gid_t *gidset); } + SYS_SETGROUPS = 80 // { int setgroups(u_int gidsetsize, gid_t *gidset); } SYS_GETPGRP = 81 // { int getpgrp(void); } SYS_SETPGID = 82 // { int setpgid(int pid, int pgid); } - SYS_SETITIMER = 83 // { int setitimer(u_int which, struct \ + SYS_SETITIMER = 83 // { int setitimer(u_int which, struct itimerval *itv, struct itimerval *oitv); } SYS_SWAPON = 85 // { int swapon(char *name); } - SYS_GETITIMER = 86 // { int getitimer(u_int which, \ + SYS_GETITIMER = 86 // { int getitimer(u_int which, struct itimerval *itv); } SYS_GETDTABLESIZE = 89 // { int getdtablesize(void); } SYS_DUP2 = 90 // { int dup2(u_int from, u_int to); } SYS_FCNTL = 92 // { int fcntl(int fd, int cmd, long arg); } - SYS_SELECT = 93 // { int select(int nd, fd_set *in, fd_set *ou, \ + SYS_SELECT = 93 // { int select(int nd, fd_set *in, fd_set *ou, fd_set *ex, struct timeval *tv); } SYS_FSYNC = 95 // { int fsync(int fd); } - SYS_SETPRIORITY = 96 // { int setpriority(int which, int who, \ - SYS_SOCKET = 97 // { int socket(int domain, int type, \ - SYS_CONNECT = 98 // { int connect(int s, caddr_t name, \ + SYS_SETPRIORITY = 96 // { int setpriority(int which, int who, int prio); } + SYS_SOCKET = 97 // { int socket(int domain, int type, int protocol); } + SYS_CONNECT = 98 // { int connect(int s, caddr_t name, int namelen); } SYS_GETPRIORITY = 100 // { int getpriority(int which, int who); } - SYS_BIND = 104 // { int bind(int s, caddr_t name, \ - SYS_SETSOCKOPT = 105 // { int setsockopt(int s, int level, int name, \ + SYS_BIND = 104 // { int bind(int s, caddr_t name, int namelen); } + SYS_SETSOCKOPT = 105 // { int setsockopt(int s, int level, int name, caddr_t val, int valsize); } SYS_LISTEN = 106 // { int listen(int s, int backlog); } - SYS_GETTIMEOFDAY = 116 // { int gettimeofday(struct timeval *tp, \ - SYS_GETRUSAGE = 117 // { int getrusage(int who, \ - SYS_GETSOCKOPT = 118 // { int getsockopt(int s, int level, int name, \ - SYS_READV = 120 // { int readv(int fd, struct iovec *iovp, \ - SYS_WRITEV = 121 // { int writev(int fd, struct iovec *iovp, \ - SYS_SETTIMEOFDAY = 122 // { int settimeofday(struct timeval *tv, \ + SYS_GETTIMEOFDAY = 116 // { int gettimeofday(struct timeval *tp, struct timezone *tzp); } + SYS_GETRUSAGE = 117 // { int getrusage(int who, struct rusage *rusage); } + SYS_GETSOCKOPT = 118 // { int getsockopt(int s, int level, int name, caddr_t val, int *avalsize); } + SYS_READV = 120 // { int readv(int fd, struct iovec *iovp, u_int iovcnt); } + SYS_WRITEV = 121 // { int writev(int fd, struct iovec *iovp, u_int iovcnt); } + SYS_SETTIMEOFDAY = 122 // { int settimeofday(struct timeval *tv, struct timezone *tzp); } SYS_FCHOWN = 123 // { int fchown(int fd, int uid, int gid); } SYS_FCHMOD = 124 // { int fchmod(int fd, int mode); } SYS_SETREUID = 126 // { int setreuid(int ruid, int euid); } @@ -99,24 +100,24 @@ const ( SYS_RENAME = 128 // { int rename(char *from, char *to); } SYS_FLOCK = 131 // { int flock(int fd, int how); } SYS_MKFIFO = 132 // { int mkfifo(char *path, int mode); } - SYS_SENDTO = 133 // { int sendto(int s, caddr_t buf, size_t len, \ + SYS_SENDTO = 133 // { int sendto(int s, caddr_t buf, size_t len, int flags, caddr_t to, int tolen); } SYS_SHUTDOWN = 134 // { int shutdown(int s, int how); } - SYS_SOCKETPAIR = 135 // { int socketpair(int domain, int type, \ + SYS_SOCKETPAIR = 135 // { int socketpair(int domain, int type, int protocol, int *rsv); } SYS_MKDIR = 136 // { int mkdir(char *path, int mode); } SYS_RMDIR = 137 // { int rmdir(char *path); } - SYS_UTIMES = 138 // { int utimes(char *path, \ - SYS_ADJTIME = 140 // { int adjtime(struct timeval *delta, \ + SYS_UTIMES = 138 // { int utimes(char *path, struct timeval *tptr); } + SYS_ADJTIME = 140 // { int adjtime(struct timeval *delta, struct timeval *olddelta); } SYS_SETSID = 147 // { int setsid(void); } - SYS_QUOTACTL = 148 // { int quotactl(char *path, int cmd, int uid, \ + SYS_QUOTACTL = 148 // { int quotactl(char *path, int cmd, int uid, caddr_t arg); } SYS_NLM_SYSCALL = 154 // { int nlm_syscall(int debug_level, int grace_period, int addr_count, char **addrs); } SYS_NFSSVC = 155 // { int nfssvc(int flag, caddr_t argp); } - SYS_LGETFH = 160 // { int lgetfh(char *fname, \ - SYS_GETFH = 161 // { int getfh(char *fname, \ + SYS_LGETFH = 160 // { int lgetfh(char *fname, struct fhandle *fhp); } + SYS_GETFH = 161 // { int getfh(char *fname, struct fhandle *fhp); } SYS_SYSARCH = 165 // { int sysarch(int op, char *parms); } - SYS_RTPRIO = 166 // { int rtprio(int function, pid_t pid, \ - SYS_SEMSYS = 169 // { int semsys(int which, int a2, int a3, \ - SYS_MSGSYS = 170 // { int msgsys(int which, int a2, int a3, \ - SYS_SHMSYS = 171 // { int shmsys(int which, int a2, int a3, \ + SYS_RTPRIO = 166 // { int rtprio(int function, pid_t pid, struct rtprio *rtp); } + SYS_SEMSYS = 169 // { int semsys(int which, int a2, int a3, int a4, int a5); } + SYS_MSGSYS = 170 // { int msgsys(int which, int a2, int a3, int a4, int a5, int a6); } + SYS_SHMSYS = 171 // { int shmsys(int which, int a2, int a3, int a4); } SYS_SETFIB = 175 // { int setfib(int fibnum); } SYS_NTP_ADJTIME = 176 // { int ntp_adjtime(struct timex *tp); } SYS_SETGID = 181 // { int setgid(gid_t gid); } @@ -127,269 +128,269 @@ const ( SYS_LSTAT = 190 // { int lstat(char *path, struct stat *ub); } SYS_PATHCONF = 191 // { int pathconf(char *path, int name); } SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); } - SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, \ - SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, \ - SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, \ - SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, \ + SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, struct rlimit *rlp); } getrlimit __getrlimit_args int + SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, struct rlimit *rlp); } setrlimit __setrlimit_args int + SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, u_int count, long *basep); } + SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } __sysctl sysctl_args int SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } SYS_UNDELETE = 205 // { int undelete(char *path); } SYS_FUTIMES = 206 // { int futimes(int fd, struct timeval *tptr); } SYS_GETPGID = 207 // { int getpgid(pid_t pid); } - SYS_POLL = 209 // { int poll(struct pollfd *fds, u_int nfds, \ - SYS_SEMGET = 221 // { int semget(key_t key, int nsems, \ - SYS_SEMOP = 222 // { int semop(int semid, struct sembuf *sops, \ + SYS_POLL = 209 // { int poll(struct pollfd *fds, u_int nfds, int timeout); } + SYS_SEMGET = 221 // { int semget(key_t key, int nsems, int semflg); } + SYS_SEMOP = 222 // { int semop(int semid, struct sembuf *sops, size_t nsops); } SYS_MSGGET = 225 // { int msgget(key_t key, int msgflg); } - SYS_MSGSND = 226 // { int msgsnd(int msqid, const void *msgp, \ - SYS_MSGRCV = 227 // { int msgrcv(int msqid, void *msgp, \ - SYS_SHMAT = 228 // { int shmat(int shmid, const void *shmaddr, \ + SYS_MSGSND = 226 // { int msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg); } + SYS_MSGRCV = 227 // { int msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } + SYS_SHMAT = 228 // { int shmat(int shmid, const void *shmaddr, int shmflg); } SYS_SHMDT = 230 // { int shmdt(const void *shmaddr); } - SYS_SHMGET = 231 // { int shmget(key_t key, size_t size, \ - SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, \ - SYS_CLOCK_SETTIME = 233 // { int clock_settime( \ - SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, \ - SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, \ + SYS_SHMGET = 231 // { int shmget(key_t key, size_t size, int shmflg); } + SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, struct timespec *tp); } + SYS_CLOCK_SETTIME = 233 // { int clock_settime( clockid_t clock_id, const struct timespec *tp); } + SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, struct timespec *tp); } + SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, struct sigevent *evp, int *timerid); } SYS_KTIMER_DELETE = 236 // { int ktimer_delete(int timerid); } - SYS_KTIMER_SETTIME = 237 // { int ktimer_settime(int timerid, int flags, \ - SYS_KTIMER_GETTIME = 238 // { int ktimer_gettime(int timerid, struct \ + SYS_KTIMER_SETTIME = 237 // { int ktimer_settime(int timerid, int flags, const struct itimerspec *value, struct itimerspec *ovalue); } + SYS_KTIMER_GETTIME = 238 // { int ktimer_gettime(int timerid, struct itimerspec *value); } SYS_KTIMER_GETOVERRUN = 239 // { int ktimer_getoverrun(int timerid); } - SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, \ + SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, struct timespec *rmtp); } SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } - SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( \ - SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( \ - SYS_CLOCK_NANOSLEEP = 244 // { int clock_nanosleep(clockid_t clock_id, \ - SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,\ + SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( struct ffclock_estimate *cest); } + SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( struct ffclock_estimate *cest); } + SYS_CLOCK_NANOSLEEP = 244 // { int clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *rqtp, struct timespec *rmtp); } + SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,int which, clockid_t *clock_id); } SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } - SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, \ + SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, int inherit); } SYS_RFORK = 251 // { int rfork(int flags); } - SYS_OPENBSD_POLL = 252 // { int openbsd_poll(struct pollfd *fds, \ + SYS_OPENBSD_POLL = 252 // { int openbsd_poll(struct pollfd *fds, u_int nfds, int timeout); } SYS_ISSETUGID = 253 // { int issetugid(void); } SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); } SYS_AIO_READ = 255 // { int aio_read(struct aiocb *aiocbp); } SYS_AIO_WRITE = 256 // { int aio_write(struct aiocb *aiocbp); } - SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, \ - SYS_GETDENTS = 272 // { int getdents(int fd, char *buf, \ + SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, struct aiocb * const *acb_list, int nent, struct sigevent *sig); } + SYS_GETDENTS = 272 // { int getdents(int fd, char *buf, size_t count); } SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); } - SYS_LUTIMES = 276 // { int lutimes(char *path, \ + SYS_LUTIMES = 276 // { int lutimes(char *path, struct timeval *tptr); } SYS_NSTAT = 278 // { int nstat(char *path, struct nstat *ub); } SYS_NFSTAT = 279 // { int nfstat(int fd, struct nstat *sb); } SYS_NLSTAT = 280 // { int nlstat(char *path, struct nstat *ub); } - SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, \ - SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, \ - SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, \ - SYS_FHSTAT = 299 // { int fhstat(const struct fhandle *u_fhp, \ + SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } + SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } + SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, int flags); } + SYS_FHSTAT = 299 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); } SYS_MODNEXT = 300 // { int modnext(int modid); } - SYS_MODSTAT = 301 // { int modstat(int modid, \ + SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat *stat); } SYS_MODFNEXT = 302 // { int modfnext(int modid); } SYS_MODFIND = 303 // { int modfind(const char *name); } SYS_KLDLOAD = 304 // { int kldload(const char *file); } SYS_KLDUNLOAD = 305 // { int kldunload(int fileid); } SYS_KLDFIND = 306 // { int kldfind(const char *file); } SYS_KLDNEXT = 307 // { int kldnext(int fileid); } - SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct \ + SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat* stat); } SYS_KLDFIRSTMOD = 309 // { int kldfirstmod(int fileid); } SYS_GETSID = 310 // { int getsid(pid_t pid); } - SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, \ - SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, \ + SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, uid_t suid); } + SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, gid_t sgid); } SYS_AIO_RETURN = 314 // { ssize_t aio_return(struct aiocb *aiocbp); } - SYS_AIO_SUSPEND = 315 // { int aio_suspend( \ - SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, \ + SYS_AIO_SUSPEND = 315 // { int aio_suspend( struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } + SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, struct aiocb *aiocbp); } SYS_AIO_ERROR = 317 // { int aio_error(struct aiocb *aiocbp); } SYS_YIELD = 321 // { int yield(void); } SYS_MLOCKALL = 324 // { int mlockall(int how); } SYS_MUNLOCKALL = 325 // { int munlockall(void); } SYS___GETCWD = 326 // { int __getcwd(char *buf, u_int buflen); } - SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, \ - SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct \ - SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int \ + SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, const struct sched_param *param); } + SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct sched_param *param); } + SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int policy, const struct sched_param *param); } SYS_SCHED_GETSCHEDULER = 330 // { int sched_getscheduler (pid_t pid); } SYS_SCHED_YIELD = 331 // { int sched_yield (void); } SYS_SCHED_GET_PRIORITY_MAX = 332 // { int sched_get_priority_max (int policy); } SYS_SCHED_GET_PRIORITY_MIN = 333 // { int sched_get_priority_min (int policy); } - SYS_SCHED_RR_GET_INTERVAL = 334 // { int sched_rr_get_interval (pid_t pid, \ + SYS_SCHED_RR_GET_INTERVAL = 334 // { int sched_rr_get_interval (pid_t pid, struct timespec *interval); } SYS_UTRACE = 335 // { int utrace(const void *addr, size_t len); } - SYS_KLDSYM = 337 // { int kldsym(int fileid, int cmd, \ + SYS_KLDSYM = 337 // { int kldsym(int fileid, int cmd, void *data); } SYS_JAIL = 338 // { int jail(struct jail *jail); } - SYS_SIGPROCMASK = 340 // { int sigprocmask(int how, \ + SYS_SIGPROCMASK = 340 // { int sigprocmask(int how, const sigset_t *set, sigset_t *oset); } SYS_SIGSUSPEND = 341 // { int sigsuspend(const sigset_t *sigmask); } SYS_SIGPENDING = 343 // { int sigpending(sigset_t *set); } - SYS_SIGTIMEDWAIT = 345 // { int sigtimedwait(const sigset_t *set, \ - SYS_SIGWAITINFO = 346 // { int sigwaitinfo(const sigset_t *set, \ - SYS___ACL_GET_FILE = 347 // { int __acl_get_file(const char *path, \ - SYS___ACL_SET_FILE = 348 // { int __acl_set_file(const char *path, \ - SYS___ACL_GET_FD = 349 // { int __acl_get_fd(int filedes, \ - SYS___ACL_SET_FD = 350 // { int __acl_set_fd(int filedes, \ - SYS___ACL_DELETE_FILE = 351 // { int __acl_delete_file(const char *path, \ - SYS___ACL_DELETE_FD = 352 // { int __acl_delete_fd(int filedes, \ - SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, \ - SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, \ - SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, \ - SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( \ - SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( \ - SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, \ - SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete( \ - SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, \ - SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, \ + SYS_SIGTIMEDWAIT = 345 // { int sigtimedwait(const sigset_t *set, siginfo_t *info, const struct timespec *timeout); } + SYS_SIGWAITINFO = 346 // { int sigwaitinfo(const sigset_t *set, siginfo_t *info); } + SYS___ACL_GET_FILE = 347 // { int __acl_get_file(const char *path, acl_type_t type, struct acl *aclp); } + SYS___ACL_SET_FILE = 348 // { int __acl_set_file(const char *path, acl_type_t type, struct acl *aclp); } + SYS___ACL_GET_FD = 349 // { int __acl_get_fd(int filedes, acl_type_t type, struct acl *aclp); } + SYS___ACL_SET_FD = 350 // { int __acl_set_fd(int filedes, acl_type_t type, struct acl *aclp); } + SYS___ACL_DELETE_FILE = 351 // { int __acl_delete_file(const char *path, acl_type_t type); } + SYS___ACL_DELETE_FD = 352 // { int __acl_delete_fd(int filedes, acl_type_t type); } + SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, acl_type_t type, struct acl *aclp); } + SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, acl_type_t type, struct acl *aclp); } + SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, const char *filename, int attrnamespace, const char *attrname); } + SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, int attrnamespace, const char *attrname); } + SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); } + SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); } + SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); } SYS_KQUEUE = 362 // { int kqueue(void); } - SYS_KEVENT = 363 // { int kevent(int fd, \ - SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, \ - SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, \ - SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, \ + SYS_KEVENT = 363 // { int kevent(int fd, struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } + SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, int attrnamespace, const char *attrname); } SYS___SETUGID = 374 // { int __setugid(int flag); } SYS_EACCESS = 376 // { int eaccess(char *path, int amode); } - SYS_NMOUNT = 378 // { int nmount(struct iovec *iovp, \ + SYS_NMOUNT = 378 // { int nmount(struct iovec *iovp, unsigned int iovcnt, int flags); } SYS___MAC_GET_PROC = 384 // { int __mac_get_proc(struct mac *mac_p); } SYS___MAC_SET_PROC = 385 // { int __mac_set_proc(struct mac *mac_p); } - SYS___MAC_GET_FD = 386 // { int __mac_get_fd(int fd, \ - SYS___MAC_GET_FILE = 387 // { int __mac_get_file(const char *path_p, \ - SYS___MAC_SET_FD = 388 // { int __mac_set_fd(int fd, \ - SYS___MAC_SET_FILE = 389 // { int __mac_set_file(const char *path_p, \ - SYS_KENV = 390 // { int kenv(int what, const char *name, \ - SYS_LCHFLAGS = 391 // { int lchflags(const char *path, \ - SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, \ - SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, \ - SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, \ - SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, \ - SYS_STATFS = 396 // { int statfs(char *path, \ + SYS___MAC_GET_FD = 386 // { int __mac_get_fd(int fd, struct mac *mac_p); } + SYS___MAC_GET_FILE = 387 // { int __mac_get_file(const char *path_p, struct mac *mac_p); } + SYS___MAC_SET_FD = 388 // { int __mac_set_fd(int fd, struct mac *mac_p); } + SYS___MAC_SET_FILE = 389 // { int __mac_set_file(const char *path_p, struct mac *mac_p); } + SYS_KENV = 390 // { int kenv(int what, const char *name, char *value, int len); } + SYS_LCHFLAGS = 391 // { int lchflags(const char *path, u_long flags); } + SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, int count); } + SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, size_t nbytes, struct sf_hdtr *hdtr, off_t *sbytes, int flags); } + SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, int call, void *arg); } + SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } + SYS_STATFS = 396 // { int statfs(char *path, struct statfs *buf); } SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); } - SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, \ + SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } SYS_KSEM_CLOSE = 400 // { int ksem_close(semid_t id); } SYS_KSEM_POST = 401 // { int ksem_post(semid_t id); } SYS_KSEM_WAIT = 402 // { int ksem_wait(semid_t id); } SYS_KSEM_TRYWAIT = 403 // { int ksem_trywait(semid_t id); } - SYS_KSEM_INIT = 404 // { int ksem_init(semid_t *idp, \ - SYS_KSEM_OPEN = 405 // { int ksem_open(semid_t *idp, \ + SYS_KSEM_INIT = 404 // { int ksem_init(semid_t *idp, unsigned int value); } + SYS_KSEM_OPEN = 405 // { int ksem_open(semid_t *idp, const char *name, int oflag, mode_t mode, unsigned int value); } SYS_KSEM_UNLINK = 406 // { int ksem_unlink(const char *name); } SYS_KSEM_GETVALUE = 407 // { int ksem_getvalue(semid_t id, int *val); } SYS_KSEM_DESTROY = 408 // { int ksem_destroy(semid_t id); } - SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, \ - SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, \ - SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, \ - SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link( \ - SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link( \ - SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link( \ - SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, \ - SYS_SIGACTION = 416 // { int sigaction(int sig, \ - SYS_SIGRETURN = 417 // { int sigreturn( \ + SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, struct mac *mac_p); } + SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, struct mac *mac_p); } + SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, struct mac *mac_p); } + SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link( const char *path, int attrnamespace, const char *attrname); } + SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, char **envv, struct mac *mac_p); } + SYS_SIGACTION = 416 // { int sigaction(int sig, const struct sigaction *act, struct sigaction *oact); } + SYS_SIGRETURN = 417 // { int sigreturn( const struct __ucontext *sigcntxp); } SYS_GETCONTEXT = 421 // { int getcontext(struct __ucontext *ucp); } - SYS_SETCONTEXT = 422 // { int setcontext( \ - SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, \ + SYS_SETCONTEXT = 422 // { int setcontext( const struct __ucontext *ucp); } + SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, const struct __ucontext *ucp); } SYS_SWAPOFF = 424 // { int swapoff(const char *name); } - SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, \ - SYS___ACL_SET_LINK = 426 // { int __acl_set_link(const char *path, \ - SYS___ACL_DELETE_LINK = 427 // { int __acl_delete_link(const char *path, \ - SYS___ACL_ACLCHECK_LINK = 428 // { int __acl_aclcheck_link(const char *path, \ - SYS_SIGWAIT = 429 // { int sigwait(const sigset_t *set, \ - SYS_THR_CREATE = 430 // { int thr_create(ucontext_t *ctx, long *id, \ + SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, acl_type_t type, struct acl *aclp); } + SYS___ACL_SET_LINK = 426 // { int __acl_set_link(const char *path, acl_type_t type, struct acl *aclp); } + SYS___ACL_DELETE_LINK = 427 // { int __acl_delete_link(const char *path, acl_type_t type); } + SYS___ACL_ACLCHECK_LINK = 428 // { int __acl_aclcheck_link(const char *path, acl_type_t type, struct acl *aclp); } + SYS_SIGWAIT = 429 // { int sigwait(const sigset_t *set, int *sig); } + SYS_THR_CREATE = 430 // { int thr_create(ucontext_t *ctx, long *id, int flags); } SYS_THR_EXIT = 431 // { void thr_exit(long *state); } SYS_THR_SELF = 432 // { int thr_self(long *id); } SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } - SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, \ - SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( \ - SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link( \ - SYS_KSEM_TIMEDWAIT = 441 // { int ksem_timedwait(semid_t id, \ - SYS_THR_SUSPEND = 442 // { int thr_suspend( \ + SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link( const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_KSEM_TIMEDWAIT = 441 // { int ksem_timedwait(semid_t id, const struct timespec *abstime); } + SYS_THR_SUSPEND = 442 // { int thr_suspend( const struct timespec *timeout); } SYS_THR_WAKE = 443 // { int thr_wake(long id); } SYS_KLDUNLOADF = 444 // { int kldunloadf(int fileid, int flags); } - SYS_AUDIT = 445 // { int audit(const void *record, \ - SYS_AUDITON = 446 // { int auditon(int cmd, void *data, \ + SYS_AUDIT = 445 // { int audit(const void *record, u_int length); } + SYS_AUDITON = 446 // { int auditon(int cmd, void *data, u_int length); } SYS_GETAUID = 447 // { int getauid(uid_t *auid); } SYS_SETAUID = 448 // { int setauid(uid_t *auid); } SYS_GETAUDIT = 449 // { int getaudit(struct auditinfo *auditinfo); } SYS_SETAUDIT = 450 // { int setaudit(struct auditinfo *auditinfo); } - SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr( \ - SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr( \ + SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr( struct auditinfo_addr *auditinfo_addr, u_int length); } + SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr( struct auditinfo_addr *auditinfo_addr, u_int length); } SYS_AUDITCTL = 453 // { int auditctl(char *path); } - SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, \ - SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, \ + SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, u_long val, void *uaddr1, void *uaddr2); } + SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, int param_size); } SYS_SIGQUEUE = 456 // { int sigqueue(pid_t pid, int signum, void *value); } - SYS_KMQ_OPEN = 457 // { int kmq_open(const char *path, int flags, \ - SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, \ - SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, \ - SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, \ - SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, \ + SYS_KMQ_OPEN = 457 // { int kmq_open(const char *path, int flags, mode_t mode, const struct mq_attr *attr); } + SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, const struct mq_attr *attr, struct mq_attr *oattr); } + SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, char *msg_ptr, size_t msg_len, unsigned *msg_prio, const struct timespec *abs_timeout); } + SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, const char *msg_ptr, size_t msg_len,unsigned msg_prio, const struct timespec *abs_timeout);} + SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, const struct sigevent *sigev); } SYS_KMQ_UNLINK = 462 // { int kmq_unlink(const char *path); } SYS_ABORT2 = 463 // { int abort2(const char *why, int nargs, void **args); } SYS_THR_SET_NAME = 464 // { int thr_set_name(long id, const char *name); } SYS_AIO_FSYNC = 465 // { int aio_fsync(int op, struct aiocb *aiocbp); } - SYS_RTPRIO_THREAD = 466 // { int rtprio_thread(int function, \ + SYS_RTPRIO_THREAD = 466 // { int rtprio_thread(int function, lwpid_t lwpid, struct rtprio *rtp); } SYS_SCTP_PEELOFF = 471 // { int sctp_peeloff(int sd, uint32_t name); } - SYS_SCTP_GENERIC_SENDMSG = 472 // { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, \ - SYS_SCTP_GENERIC_SENDMSG_IOV = 473 // { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, \ - SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, \ - SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, \ - SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, \ - SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, \ - SYS_LSEEK = 478 // { off_t lseek(int fd, off_t offset, \ + SYS_SCTP_GENERIC_SENDMSG = 472 // { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } + SYS_SCTP_GENERIC_SENDMSG_IOV = 473 // { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } + SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, struct sockaddr * from, __socklen_t *fromlenaddr, struct sctp_sndrcvinfo *sinfo, int *msg_flags); } + SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, size_t nbyte, off_t offset); } + SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, size_t nbyte, off_t offset); } + SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, int prot, int flags, int fd, off_t pos); } + SYS_LSEEK = 478 // { off_t lseek(int fd, off_t offset, int whence); } SYS_TRUNCATE = 479 // { int truncate(char *path, off_t length); } SYS_FTRUNCATE = 480 // { int ftruncate(int fd, off_t length); } SYS_THR_KILL2 = 481 // { int thr_kill2(pid_t pid, long id, int sig); } - SYS_SHM_OPEN = 482 // { int shm_open(const char *path, int flags, \ + SYS_SHM_OPEN = 482 // { int shm_open(const char *path, int flags, mode_t mode); } SYS_SHM_UNLINK = 483 // { int shm_unlink(const char *path); } SYS_CPUSET = 484 // { int cpuset(cpusetid_t *setid); } - SYS_CPUSET_SETID = 485 // { int cpuset_setid(cpuwhich_t which, id_t id, \ - SYS_CPUSET_GETID = 486 // { int cpuset_getid(cpulevel_t level, \ - SYS_CPUSET_GETAFFINITY = 487 // { int cpuset_getaffinity(cpulevel_t level, \ - SYS_CPUSET_SETAFFINITY = 488 // { int cpuset_setaffinity(cpulevel_t level, \ - SYS_FACCESSAT = 489 // { int faccessat(int fd, char *path, int amode, \ - SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, \ - SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, \ - SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, \ - SYS_FSTATAT = 493 // { int fstatat(int fd, char *path, \ - SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, \ - SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, \ + SYS_CPUSET_SETID = 485 // { int cpuset_setid(cpuwhich_t which, id_t id, cpusetid_t setid); } + SYS_CPUSET_GETID = 486 // { int cpuset_getid(cpulevel_t level, cpuwhich_t which, id_t id, cpusetid_t *setid); } + SYS_CPUSET_GETAFFINITY = 487 // { int cpuset_getaffinity(cpulevel_t level, cpuwhich_t which, id_t id, size_t cpusetsize, cpuset_t *mask); } + SYS_CPUSET_SETAFFINITY = 488 // { int cpuset_setaffinity(cpulevel_t level, cpuwhich_t which, id_t id, size_t cpusetsize, const cpuset_t *mask); } + SYS_FACCESSAT = 489 // { int faccessat(int fd, char *path, int amode, int flag); } + SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, int flag); } + SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, gid_t gid, int flag); } + SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, char **envv); } + SYS_FSTATAT = 493 // { int fstatat(int fd, char *path, struct stat *buf, int flag); } + SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, struct timeval *times); } + SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, char *path2, int flag); } SYS_MKDIRAT = 496 // { int mkdirat(int fd, char *path, mode_t mode); } SYS_MKFIFOAT = 497 // { int mkfifoat(int fd, char *path, mode_t mode); } - SYS_MKNODAT = 498 // { int mknodat(int fd, char *path, mode_t mode, \ - SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, \ - SYS_READLINKAT = 500 // { int readlinkat(int fd, char *path, char *buf, \ - SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, \ - SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, \ + SYS_MKNODAT = 498 // { int mknodat(int fd, char *path, mode_t mode, dev_t dev); } + SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, mode_t mode); } + SYS_READLINKAT = 500 // { int readlinkat(int fd, char *path, char *buf, size_t bufsize); } + SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, char *new); } + SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, char *path2); } SYS_UNLINKAT = 503 // { int unlinkat(int fd, char *path, int flag); } SYS_POSIX_OPENPT = 504 // { int posix_openpt(int flags); } SYS_GSSD_SYSCALL = 505 // { int gssd_syscall(char *path); } - SYS_JAIL_GET = 506 // { int jail_get(struct iovec *iovp, \ - SYS_JAIL_SET = 507 // { int jail_set(struct iovec *iovp, \ + SYS_JAIL_GET = 506 // { int jail_get(struct iovec *iovp, unsigned int iovcnt, int flags); } + SYS_JAIL_SET = 507 // { int jail_set(struct iovec *iovp, unsigned int iovcnt, int flags); } SYS_JAIL_REMOVE = 508 // { int jail_remove(int jid); } SYS_CLOSEFROM = 509 // { int closefrom(int lowfd); } - SYS___SEMCTL = 510 // { int __semctl(int semid, int semnum, \ - SYS_MSGCTL = 511 // { int msgctl(int msqid, int cmd, \ - SYS_SHMCTL = 512 // { int shmctl(int shmid, int cmd, \ + SYS___SEMCTL = 510 // { int __semctl(int semid, int semnum, int cmd, union semun *arg); } + SYS_MSGCTL = 511 // { int msgctl(int msqid, int cmd, struct msqid_ds *buf); } + SYS_SHMCTL = 512 // { int shmctl(int shmid, int cmd, struct shmid_ds *buf); } SYS_LPATHCONF = 513 // { int lpathconf(char *path, int name); } - SYS___CAP_RIGHTS_GET = 515 // { int __cap_rights_get(int version, \ + SYS___CAP_RIGHTS_GET = 515 // { int __cap_rights_get(int version, int fd, cap_rights_t *rightsp); } SYS_CAP_ENTER = 516 // { int cap_enter(void); } SYS_CAP_GETMODE = 517 // { int cap_getmode(u_int *modep); } SYS_PDFORK = 518 // { int pdfork(int *fdp, int flags); } SYS_PDKILL = 519 // { int pdkill(int fd, int signum); } SYS_PDGETPID = 520 // { int pdgetpid(int fd, pid_t *pidp); } - SYS_PSELECT = 522 // { int pselect(int nd, fd_set *in, \ - SYS_GETLOGINCLASS = 523 // { int getloginclass(char *namebuf, \ + SYS_PSELECT = 522 // { int pselect(int nd, fd_set *in, fd_set *ou, fd_set *ex, const struct timespec *ts, const sigset_t *sm); } + SYS_GETLOGINCLASS = 523 // { int getloginclass(char *namebuf, size_t namelen); } SYS_SETLOGINCLASS = 524 // { int setloginclass(const char *namebuf); } - SYS_RCTL_GET_RACCT = 525 // { int rctl_get_racct(const void *inbufp, \ - SYS_RCTL_GET_RULES = 526 // { int rctl_get_rules(const void *inbufp, \ - SYS_RCTL_GET_LIMITS = 527 // { int rctl_get_limits(const void *inbufp, \ - SYS_RCTL_ADD_RULE = 528 // { int rctl_add_rule(const void *inbufp, \ - SYS_RCTL_REMOVE_RULE = 529 // { int rctl_remove_rule(const void *inbufp, \ - SYS_POSIX_FALLOCATE = 530 // { int posix_fallocate(int fd, \ - SYS_POSIX_FADVISE = 531 // { int posix_fadvise(int fd, off_t offset, \ - SYS_WAIT6 = 532 // { int wait6(idtype_t idtype, id_t id, \ - SYS_CAP_RIGHTS_LIMIT = 533 // { int cap_rights_limit(int fd, \ - SYS_CAP_IOCTLS_LIMIT = 534 // { int cap_ioctls_limit(int fd, \ - SYS_CAP_IOCTLS_GET = 535 // { ssize_t cap_ioctls_get(int fd, \ - SYS_CAP_FCNTLS_LIMIT = 536 // { int cap_fcntls_limit(int fd, \ - SYS_CAP_FCNTLS_GET = 537 // { int cap_fcntls_get(int fd, \ - SYS_BINDAT = 538 // { int bindat(int fd, int s, caddr_t name, \ - SYS_CONNECTAT = 539 // { int connectat(int fd, int s, caddr_t name, \ - SYS_CHFLAGSAT = 540 // { int chflagsat(int fd, const char *path, \ - SYS_ACCEPT4 = 541 // { int accept4(int s, \ + SYS_RCTL_GET_RACCT = 525 // { int rctl_get_racct(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); } + SYS_RCTL_GET_RULES = 526 // { int rctl_get_rules(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); } + SYS_RCTL_GET_LIMITS = 527 // { int rctl_get_limits(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); } + SYS_RCTL_ADD_RULE = 528 // { int rctl_add_rule(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); } + SYS_RCTL_REMOVE_RULE = 529 // { int rctl_remove_rule(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); } + SYS_POSIX_FALLOCATE = 530 // { int posix_fallocate(int fd, off_t offset, off_t len); } + SYS_POSIX_FADVISE = 531 // { int posix_fadvise(int fd, off_t offset, off_t len, int advice); } + SYS_WAIT6 = 532 // { int wait6(idtype_t idtype, id_t id, int *status, int options, struct __wrusage *wrusage, siginfo_t *info); } + SYS_CAP_RIGHTS_LIMIT = 533 // { int cap_rights_limit(int fd, cap_rights_t *rightsp); } + SYS_CAP_IOCTLS_LIMIT = 534 // { int cap_ioctls_limit(int fd, const u_long *cmds, size_t ncmds); } + SYS_CAP_IOCTLS_GET = 535 // { ssize_t cap_ioctls_get(int fd, u_long *cmds, size_t maxcmds); } + SYS_CAP_FCNTLS_LIMIT = 536 // { int cap_fcntls_limit(int fd, uint32_t fcntlrights); } + SYS_CAP_FCNTLS_GET = 537 // { int cap_fcntls_get(int fd, uint32_t *fcntlrightsp); } + SYS_BINDAT = 538 // { int bindat(int fd, int s, caddr_t name, int namelen); } + SYS_CONNECTAT = 539 // { int connectat(int fd, int s, caddr_t name, int namelen); } + SYS_CHFLAGSAT = 540 // { int chflagsat(int fd, const char *path, u_long flags, int atflag); } + SYS_ACCEPT4 = 541 // { int accept4(int s, struct sockaddr * __restrict name, __socklen_t * __restrict anamelen, int flags); } SYS_PIPE2 = 542 // { int pipe2(int *fildes, int flags); } SYS_AIO_MLOCK = 543 // { int aio_mlock(struct aiocb *aiocbp); } - SYS_PROCCTL = 544 // { int procctl(idtype_t idtype, id_t id, \ - SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, \ - SYS_FUTIMENS = 546 // { int futimens(int fd, \ - SYS_UTIMENSAT = 547 // { int utimensat(int fd, \ - SYS_NUMA_GETAFFINITY = 548 // { int numa_getaffinity(cpuwhich_t which, \ - SYS_NUMA_SETAFFINITY = 549 // { int numa_setaffinity(cpuwhich_t which, \ + SYS_PROCCTL = 544 // { int procctl(idtype_t idtype, id_t id, int com, void *data); } + SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *set); } + SYS_FUTIMENS = 546 // { int futimens(int fd, struct timespec *times); } + SYS_UTIMENSAT = 547 // { int utimensat(int fd, char *path, struct timespec *times, int flag); } + SYS_NUMA_GETAFFINITY = 548 // { int numa_getaffinity(cpuwhich_t which, id_t id, struct vm_domain_policy_entry *policy); } + SYS_NUMA_SETAFFINITY = 549 // { int numa_setaffinity(cpuwhich_t which, id_t id, const struct vm_domain_policy_entry *policy); } SYS_FDATASYNC = 550 // { int fdatasync(int fd); } ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index 8d17873de0fe0..33b6e4d1afcf8 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -6,387 +6,421 @@ package unix const ( - SYS_RESTART_SYSCALL = 0 - SYS_EXIT = 1 - SYS_FORK = 2 - SYS_READ = 3 - SYS_WRITE = 4 - SYS_OPEN = 5 - SYS_CLOSE = 6 - SYS_WAITPID = 7 - SYS_CREAT = 8 - SYS_LINK = 9 - SYS_UNLINK = 10 - SYS_EXECVE = 11 - SYS_CHDIR = 12 - SYS_TIME = 13 - SYS_MKNOD = 14 - SYS_CHMOD = 15 - SYS_LCHOWN = 16 - SYS_BREAK = 17 - SYS_OLDSTAT = 18 - SYS_LSEEK = 19 - SYS_GETPID = 20 - SYS_MOUNT = 21 - SYS_UMOUNT = 22 - SYS_SETUID = 23 - SYS_GETUID = 24 - SYS_STIME = 25 - SYS_PTRACE = 26 - SYS_ALARM = 27 - SYS_OLDFSTAT = 28 - SYS_PAUSE = 29 - SYS_UTIME = 30 - SYS_STTY = 31 - SYS_GTTY = 32 - SYS_ACCESS = 33 - SYS_NICE = 34 - SYS_FTIME = 35 - SYS_SYNC = 36 - SYS_KILL = 37 - SYS_RENAME = 38 - SYS_MKDIR = 39 - SYS_RMDIR = 40 - SYS_DUP = 41 - SYS_PIPE = 42 - SYS_TIMES = 43 - SYS_PROF = 44 - SYS_BRK = 45 - SYS_SETGID = 46 - SYS_GETGID = 47 - SYS_SIGNAL = 48 - SYS_GETEUID = 49 - SYS_GETEGID = 50 - SYS_ACCT = 51 - SYS_UMOUNT2 = 52 - SYS_LOCK = 53 - SYS_IOCTL = 54 - SYS_FCNTL = 55 - SYS_MPX = 56 - SYS_SETPGID = 57 - SYS_ULIMIT = 58 - SYS_OLDOLDUNAME = 59 - SYS_UMASK = 60 - SYS_CHROOT = 61 - SYS_USTAT = 62 - SYS_DUP2 = 63 - SYS_GETPPID = 64 - SYS_GETPGRP = 65 - SYS_SETSID = 66 - SYS_SIGACTION = 67 - SYS_SGETMASK = 68 - SYS_SSETMASK = 69 - SYS_SETREUID = 70 - SYS_SETREGID = 71 - SYS_SIGSUSPEND = 72 - SYS_SIGPENDING = 73 - SYS_SETHOSTNAME = 74 - SYS_SETRLIMIT = 75 - SYS_GETRLIMIT = 76 - SYS_GETRUSAGE = 77 - SYS_GETTIMEOFDAY = 78 - SYS_SETTIMEOFDAY = 79 - SYS_GETGROUPS = 80 - SYS_SETGROUPS = 81 - SYS_SELECT = 82 - SYS_SYMLINK = 83 - SYS_OLDLSTAT = 84 - SYS_READLINK = 85 - SYS_USELIB = 86 - SYS_SWAPON = 87 - SYS_REBOOT = 88 - SYS_READDIR = 89 - SYS_MMAP = 90 - SYS_MUNMAP = 91 - SYS_TRUNCATE = 92 - SYS_FTRUNCATE = 93 - SYS_FCHMOD = 94 - SYS_FCHOWN = 95 - SYS_GETPRIORITY = 96 - SYS_SETPRIORITY = 97 - SYS_PROFIL = 98 - SYS_STATFS = 99 - SYS_FSTATFS = 100 - SYS_IOPERM = 101 - SYS_SOCKETCALL = 102 - SYS_SYSLOG = 103 - SYS_SETITIMER = 104 - SYS_GETITIMER = 105 - SYS_STAT = 106 - SYS_LSTAT = 107 - SYS_FSTAT = 108 - SYS_OLDUNAME = 109 - SYS_IOPL = 110 - SYS_VHANGUP = 111 - SYS_IDLE = 112 - SYS_VM86OLD = 113 - SYS_WAIT4 = 114 - SYS_SWAPOFF = 115 - SYS_SYSINFO = 116 - SYS_IPC = 117 - SYS_FSYNC = 118 - SYS_SIGRETURN = 119 - SYS_CLONE = 120 - SYS_SETDOMAINNAME = 121 - SYS_UNAME = 122 - SYS_MODIFY_LDT = 123 - SYS_ADJTIMEX = 124 - SYS_MPROTECT = 125 - SYS_SIGPROCMASK = 126 - SYS_CREATE_MODULE = 127 - SYS_INIT_MODULE = 128 - SYS_DELETE_MODULE = 129 - SYS_GET_KERNEL_SYMS = 130 - SYS_QUOTACTL = 131 - SYS_GETPGID = 132 - SYS_FCHDIR = 133 - SYS_BDFLUSH = 134 - SYS_SYSFS = 135 - SYS_PERSONALITY = 136 - SYS_AFS_SYSCALL = 137 - SYS_SETFSUID = 138 - SYS_SETFSGID = 139 - SYS__LLSEEK = 140 - SYS_GETDENTS = 141 - SYS__NEWSELECT = 142 - SYS_FLOCK = 143 - SYS_MSYNC = 144 - SYS_READV = 145 - SYS_WRITEV = 146 - SYS_GETSID = 147 - SYS_FDATASYNC = 148 - SYS__SYSCTL = 149 - SYS_MLOCK = 150 - SYS_MUNLOCK = 151 - SYS_MLOCKALL = 152 - SYS_MUNLOCKALL = 153 - SYS_SCHED_SETPARAM = 154 - SYS_SCHED_GETPARAM = 155 - SYS_SCHED_SETSCHEDULER = 156 - SYS_SCHED_GETSCHEDULER = 157 - SYS_SCHED_YIELD = 158 - SYS_SCHED_GET_PRIORITY_MAX = 159 - SYS_SCHED_GET_PRIORITY_MIN = 160 - SYS_SCHED_RR_GET_INTERVAL = 161 - SYS_NANOSLEEP = 162 - SYS_MREMAP = 163 - SYS_SETRESUID = 164 - SYS_GETRESUID = 165 - SYS_VM86 = 166 - SYS_QUERY_MODULE = 167 - SYS_POLL = 168 - SYS_NFSSERVCTL = 169 - SYS_SETRESGID = 170 - SYS_GETRESGID = 171 - SYS_PRCTL = 172 - SYS_RT_SIGRETURN = 173 - SYS_RT_SIGACTION = 174 - SYS_RT_SIGPROCMASK = 175 - SYS_RT_SIGPENDING = 176 - SYS_RT_SIGTIMEDWAIT = 177 - SYS_RT_SIGQUEUEINFO = 178 - SYS_RT_SIGSUSPEND = 179 - SYS_PREAD64 = 180 - SYS_PWRITE64 = 181 - SYS_CHOWN = 182 - SYS_GETCWD = 183 - SYS_CAPGET = 184 - SYS_CAPSET = 185 - SYS_SIGALTSTACK = 186 - SYS_SENDFILE = 187 - SYS_GETPMSG = 188 - SYS_PUTPMSG = 189 - SYS_VFORK = 190 - SYS_UGETRLIMIT = 191 - SYS_MMAP2 = 192 - SYS_TRUNCATE64 = 193 - SYS_FTRUNCATE64 = 194 - SYS_STAT64 = 195 - SYS_LSTAT64 = 196 - SYS_FSTAT64 = 197 - SYS_LCHOWN32 = 198 - SYS_GETUID32 = 199 - SYS_GETGID32 = 200 - SYS_GETEUID32 = 201 - SYS_GETEGID32 = 202 - SYS_SETREUID32 = 203 - SYS_SETREGID32 = 204 - SYS_GETGROUPS32 = 205 - SYS_SETGROUPS32 = 206 - SYS_FCHOWN32 = 207 - SYS_SETRESUID32 = 208 - SYS_GETRESUID32 = 209 - SYS_SETRESGID32 = 210 - SYS_GETRESGID32 = 211 - SYS_CHOWN32 = 212 - SYS_SETUID32 = 213 - SYS_SETGID32 = 214 - SYS_SETFSUID32 = 215 - SYS_SETFSGID32 = 216 - SYS_PIVOT_ROOT = 217 - SYS_MINCORE = 218 - SYS_MADVISE = 219 - SYS_GETDENTS64 = 220 - SYS_FCNTL64 = 221 - SYS_GETTID = 224 - SYS_READAHEAD = 225 - SYS_SETXATTR = 226 - SYS_LSETXATTR = 227 - SYS_FSETXATTR = 228 - SYS_GETXATTR = 229 - SYS_LGETXATTR = 230 - SYS_FGETXATTR = 231 - SYS_LISTXATTR = 232 - SYS_LLISTXATTR = 233 - SYS_FLISTXATTR = 234 - SYS_REMOVEXATTR = 235 - SYS_LREMOVEXATTR = 236 - SYS_FREMOVEXATTR = 237 - SYS_TKILL = 238 - SYS_SENDFILE64 = 239 - SYS_FUTEX = 240 - SYS_SCHED_SETAFFINITY = 241 - SYS_SCHED_GETAFFINITY = 242 - SYS_SET_THREAD_AREA = 243 - SYS_GET_THREAD_AREA = 244 - SYS_IO_SETUP = 245 - SYS_IO_DESTROY = 246 - SYS_IO_GETEVENTS = 247 - SYS_IO_SUBMIT = 248 - SYS_IO_CANCEL = 249 - SYS_FADVISE64 = 250 - SYS_EXIT_GROUP = 252 - SYS_LOOKUP_DCOOKIE = 253 - SYS_EPOLL_CREATE = 254 - SYS_EPOLL_CTL = 255 - SYS_EPOLL_WAIT = 256 - SYS_REMAP_FILE_PAGES = 257 - SYS_SET_TID_ADDRESS = 258 - SYS_TIMER_CREATE = 259 - SYS_TIMER_SETTIME = 260 - SYS_TIMER_GETTIME = 261 - SYS_TIMER_GETOVERRUN = 262 - SYS_TIMER_DELETE = 263 - SYS_CLOCK_SETTIME = 264 - SYS_CLOCK_GETTIME = 265 - SYS_CLOCK_GETRES = 266 - SYS_CLOCK_NANOSLEEP = 267 - SYS_STATFS64 = 268 - SYS_FSTATFS64 = 269 - SYS_TGKILL = 270 - SYS_UTIMES = 271 - SYS_FADVISE64_64 = 272 - SYS_VSERVER = 273 - SYS_MBIND = 274 - SYS_GET_MEMPOLICY = 275 - SYS_SET_MEMPOLICY = 276 - SYS_MQ_OPEN = 277 - SYS_MQ_UNLINK = 278 - SYS_MQ_TIMEDSEND = 279 - SYS_MQ_TIMEDRECEIVE = 280 - SYS_MQ_NOTIFY = 281 - SYS_MQ_GETSETATTR = 282 - SYS_KEXEC_LOAD = 283 - SYS_WAITID = 284 - SYS_ADD_KEY = 286 - SYS_REQUEST_KEY = 287 - SYS_KEYCTL = 288 - SYS_IOPRIO_SET = 289 - SYS_IOPRIO_GET = 290 - SYS_INOTIFY_INIT = 291 - SYS_INOTIFY_ADD_WATCH = 292 - SYS_INOTIFY_RM_WATCH = 293 - SYS_MIGRATE_PAGES = 294 - SYS_OPENAT = 295 - SYS_MKDIRAT = 296 - SYS_MKNODAT = 297 - SYS_FCHOWNAT = 298 - SYS_FUTIMESAT = 299 - SYS_FSTATAT64 = 300 - SYS_UNLINKAT = 301 - SYS_RENAMEAT = 302 - SYS_LINKAT = 303 - SYS_SYMLINKAT = 304 - SYS_READLINKAT = 305 - SYS_FCHMODAT = 306 - SYS_FACCESSAT = 307 - SYS_PSELECT6 = 308 - SYS_PPOLL = 309 - SYS_UNSHARE = 310 - SYS_SET_ROBUST_LIST = 311 - SYS_GET_ROBUST_LIST = 312 - SYS_SPLICE = 313 - SYS_SYNC_FILE_RANGE = 314 - SYS_TEE = 315 - SYS_VMSPLICE = 316 - SYS_MOVE_PAGES = 317 - SYS_GETCPU = 318 - SYS_EPOLL_PWAIT = 319 - SYS_UTIMENSAT = 320 - SYS_SIGNALFD = 321 - SYS_TIMERFD_CREATE = 322 - SYS_EVENTFD = 323 - SYS_FALLOCATE = 324 - SYS_TIMERFD_SETTIME = 325 - SYS_TIMERFD_GETTIME = 326 - SYS_SIGNALFD4 = 327 - SYS_EVENTFD2 = 328 - SYS_EPOLL_CREATE1 = 329 - SYS_DUP3 = 330 - SYS_PIPE2 = 331 - SYS_INOTIFY_INIT1 = 332 - SYS_PREADV = 333 - SYS_PWRITEV = 334 - SYS_RT_TGSIGQUEUEINFO = 335 - SYS_PERF_EVENT_OPEN = 336 - SYS_RECVMMSG = 337 - SYS_FANOTIFY_INIT = 338 - SYS_FANOTIFY_MARK = 339 - SYS_PRLIMIT64 = 340 - SYS_NAME_TO_HANDLE_AT = 341 - SYS_OPEN_BY_HANDLE_AT = 342 - SYS_CLOCK_ADJTIME = 343 - SYS_SYNCFS = 344 - SYS_SENDMMSG = 345 - SYS_SETNS = 346 - SYS_PROCESS_VM_READV = 347 - SYS_PROCESS_VM_WRITEV = 348 - SYS_KCMP = 349 - SYS_FINIT_MODULE = 350 - SYS_SCHED_SETATTR = 351 - SYS_SCHED_GETATTR = 352 - SYS_RENAMEAT2 = 353 - SYS_SECCOMP = 354 - SYS_GETRANDOM = 355 - SYS_MEMFD_CREATE = 356 - SYS_BPF = 357 - SYS_EXECVEAT = 358 - SYS_SOCKET = 359 - SYS_SOCKETPAIR = 360 - SYS_BIND = 361 - SYS_CONNECT = 362 - SYS_LISTEN = 363 - SYS_ACCEPT4 = 364 - SYS_GETSOCKOPT = 365 - SYS_SETSOCKOPT = 366 - SYS_GETSOCKNAME = 367 - SYS_GETPEERNAME = 368 - SYS_SENDTO = 369 - SYS_SENDMSG = 370 - SYS_RECVFROM = 371 - SYS_RECVMSG = 372 - SYS_SHUTDOWN = 373 - SYS_USERFAULTFD = 374 - SYS_MEMBARRIER = 375 - SYS_MLOCK2 = 376 - SYS_COPY_FILE_RANGE = 377 - SYS_PREADV2 = 378 - SYS_PWRITEV2 = 379 - SYS_PKEY_MPROTECT = 380 - SYS_PKEY_ALLOC = 381 - SYS_PKEY_FREE = 382 - SYS_STATX = 383 - SYS_ARCH_PRCTL = 384 - SYS_IO_PGETEVENTS = 385 - SYS_RSEQ = 386 + SYS_RESTART_SYSCALL = 0 + SYS_EXIT = 1 + SYS_FORK = 2 + SYS_READ = 3 + SYS_WRITE = 4 + SYS_OPEN = 5 + SYS_CLOSE = 6 + SYS_WAITPID = 7 + SYS_CREAT = 8 + SYS_LINK = 9 + SYS_UNLINK = 10 + SYS_EXECVE = 11 + SYS_CHDIR = 12 + SYS_TIME = 13 + SYS_MKNOD = 14 + SYS_CHMOD = 15 + SYS_LCHOWN = 16 + SYS_BREAK = 17 + SYS_OLDSTAT = 18 + SYS_LSEEK = 19 + SYS_GETPID = 20 + SYS_MOUNT = 21 + SYS_UMOUNT = 22 + SYS_SETUID = 23 + SYS_GETUID = 24 + SYS_STIME = 25 + SYS_PTRACE = 26 + SYS_ALARM = 27 + SYS_OLDFSTAT = 28 + SYS_PAUSE = 29 + SYS_UTIME = 30 + SYS_STTY = 31 + SYS_GTTY = 32 + SYS_ACCESS = 33 + SYS_NICE = 34 + SYS_FTIME = 35 + SYS_SYNC = 36 + SYS_KILL = 37 + SYS_RENAME = 38 + SYS_MKDIR = 39 + SYS_RMDIR = 40 + SYS_DUP = 41 + SYS_PIPE = 42 + SYS_TIMES = 43 + SYS_PROF = 44 + SYS_BRK = 45 + SYS_SETGID = 46 + SYS_GETGID = 47 + SYS_SIGNAL = 48 + SYS_GETEUID = 49 + SYS_GETEGID = 50 + SYS_ACCT = 51 + SYS_UMOUNT2 = 52 + SYS_LOCK = 53 + SYS_IOCTL = 54 + SYS_FCNTL = 55 + SYS_MPX = 56 + SYS_SETPGID = 57 + SYS_ULIMIT = 58 + SYS_OLDOLDUNAME = 59 + SYS_UMASK = 60 + SYS_CHROOT = 61 + SYS_USTAT = 62 + SYS_DUP2 = 63 + SYS_GETPPID = 64 + SYS_GETPGRP = 65 + SYS_SETSID = 66 + SYS_SIGACTION = 67 + SYS_SGETMASK = 68 + SYS_SSETMASK = 69 + SYS_SETREUID = 70 + SYS_SETREGID = 71 + SYS_SIGSUSPEND = 72 + SYS_SIGPENDING = 73 + SYS_SETHOSTNAME = 74 + SYS_SETRLIMIT = 75 + SYS_GETRLIMIT = 76 + SYS_GETRUSAGE = 77 + SYS_GETTIMEOFDAY = 78 + SYS_SETTIMEOFDAY = 79 + SYS_GETGROUPS = 80 + SYS_SETGROUPS = 81 + SYS_SELECT = 82 + SYS_SYMLINK = 83 + SYS_OLDLSTAT = 84 + SYS_READLINK = 85 + SYS_USELIB = 86 + SYS_SWAPON = 87 + SYS_REBOOT = 88 + SYS_READDIR = 89 + SYS_MMAP = 90 + SYS_MUNMAP = 91 + SYS_TRUNCATE = 92 + SYS_FTRUNCATE = 93 + SYS_FCHMOD = 94 + SYS_FCHOWN = 95 + SYS_GETPRIORITY = 96 + SYS_SETPRIORITY = 97 + SYS_PROFIL = 98 + SYS_STATFS = 99 + SYS_FSTATFS = 100 + SYS_IOPERM = 101 + SYS_SOCKETCALL = 102 + SYS_SYSLOG = 103 + SYS_SETITIMER = 104 + SYS_GETITIMER = 105 + SYS_STAT = 106 + SYS_LSTAT = 107 + SYS_FSTAT = 108 + SYS_OLDUNAME = 109 + SYS_IOPL = 110 + SYS_VHANGUP = 111 + SYS_IDLE = 112 + SYS_VM86OLD = 113 + SYS_WAIT4 = 114 + SYS_SWAPOFF = 115 + SYS_SYSINFO = 116 + SYS_IPC = 117 + SYS_FSYNC = 118 + SYS_SIGRETURN = 119 + SYS_CLONE = 120 + SYS_SETDOMAINNAME = 121 + SYS_UNAME = 122 + SYS_MODIFY_LDT = 123 + SYS_ADJTIMEX = 124 + SYS_MPROTECT = 125 + SYS_SIGPROCMASK = 126 + SYS_CREATE_MODULE = 127 + SYS_INIT_MODULE = 128 + SYS_DELETE_MODULE = 129 + SYS_GET_KERNEL_SYMS = 130 + SYS_QUOTACTL = 131 + SYS_GETPGID = 132 + SYS_FCHDIR = 133 + SYS_BDFLUSH = 134 + SYS_SYSFS = 135 + SYS_PERSONALITY = 136 + SYS_AFS_SYSCALL = 137 + SYS_SETFSUID = 138 + SYS_SETFSGID = 139 + SYS__LLSEEK = 140 + SYS_GETDENTS = 141 + SYS__NEWSELECT = 142 + SYS_FLOCK = 143 + SYS_MSYNC = 144 + SYS_READV = 145 + SYS_WRITEV = 146 + SYS_GETSID = 147 + SYS_FDATASYNC = 148 + SYS__SYSCTL = 149 + SYS_MLOCK = 150 + SYS_MUNLOCK = 151 + SYS_MLOCKALL = 152 + SYS_MUNLOCKALL = 153 + SYS_SCHED_SETPARAM = 154 + SYS_SCHED_GETPARAM = 155 + SYS_SCHED_SETSCHEDULER = 156 + SYS_SCHED_GETSCHEDULER = 157 + SYS_SCHED_YIELD = 158 + SYS_SCHED_GET_PRIORITY_MAX = 159 + SYS_SCHED_GET_PRIORITY_MIN = 160 + SYS_SCHED_RR_GET_INTERVAL = 161 + SYS_NANOSLEEP = 162 + SYS_MREMAP = 163 + SYS_SETRESUID = 164 + SYS_GETRESUID = 165 + SYS_VM86 = 166 + SYS_QUERY_MODULE = 167 + SYS_POLL = 168 + SYS_NFSSERVCTL = 169 + SYS_SETRESGID = 170 + SYS_GETRESGID = 171 + SYS_PRCTL = 172 + SYS_RT_SIGRETURN = 173 + SYS_RT_SIGACTION = 174 + SYS_RT_SIGPROCMASK = 175 + SYS_RT_SIGPENDING = 176 + SYS_RT_SIGTIMEDWAIT = 177 + SYS_RT_SIGQUEUEINFO = 178 + SYS_RT_SIGSUSPEND = 179 + SYS_PREAD64 = 180 + SYS_PWRITE64 = 181 + SYS_CHOWN = 182 + SYS_GETCWD = 183 + SYS_CAPGET = 184 + SYS_CAPSET = 185 + SYS_SIGALTSTACK = 186 + SYS_SENDFILE = 187 + SYS_GETPMSG = 188 + SYS_PUTPMSG = 189 + SYS_VFORK = 190 + SYS_UGETRLIMIT = 191 + SYS_MMAP2 = 192 + SYS_TRUNCATE64 = 193 + SYS_FTRUNCATE64 = 194 + SYS_STAT64 = 195 + SYS_LSTAT64 = 196 + SYS_FSTAT64 = 197 + SYS_LCHOWN32 = 198 + SYS_GETUID32 = 199 + SYS_GETGID32 = 200 + SYS_GETEUID32 = 201 + SYS_GETEGID32 = 202 + SYS_SETREUID32 = 203 + SYS_SETREGID32 = 204 + SYS_GETGROUPS32 = 205 + SYS_SETGROUPS32 = 206 + SYS_FCHOWN32 = 207 + SYS_SETRESUID32 = 208 + SYS_GETRESUID32 = 209 + SYS_SETRESGID32 = 210 + SYS_GETRESGID32 = 211 + SYS_CHOWN32 = 212 + SYS_SETUID32 = 213 + SYS_SETGID32 = 214 + SYS_SETFSUID32 = 215 + SYS_SETFSGID32 = 216 + SYS_PIVOT_ROOT = 217 + SYS_MINCORE = 218 + SYS_MADVISE = 219 + SYS_GETDENTS64 = 220 + SYS_FCNTL64 = 221 + SYS_GETTID = 224 + SYS_READAHEAD = 225 + SYS_SETXATTR = 226 + SYS_LSETXATTR = 227 + SYS_FSETXATTR = 228 + SYS_GETXATTR = 229 + SYS_LGETXATTR = 230 + SYS_FGETXATTR = 231 + SYS_LISTXATTR = 232 + SYS_LLISTXATTR = 233 + SYS_FLISTXATTR = 234 + SYS_REMOVEXATTR = 235 + SYS_LREMOVEXATTR = 236 + SYS_FREMOVEXATTR = 237 + SYS_TKILL = 238 + SYS_SENDFILE64 = 239 + SYS_FUTEX = 240 + SYS_SCHED_SETAFFINITY = 241 + SYS_SCHED_GETAFFINITY = 242 + SYS_SET_THREAD_AREA = 243 + SYS_GET_THREAD_AREA = 244 + SYS_IO_SETUP = 245 + SYS_IO_DESTROY = 246 + SYS_IO_GETEVENTS = 247 + SYS_IO_SUBMIT = 248 + SYS_IO_CANCEL = 249 + SYS_FADVISE64 = 250 + SYS_EXIT_GROUP = 252 + SYS_LOOKUP_DCOOKIE = 253 + SYS_EPOLL_CREATE = 254 + SYS_EPOLL_CTL = 255 + SYS_EPOLL_WAIT = 256 + SYS_REMAP_FILE_PAGES = 257 + SYS_SET_TID_ADDRESS = 258 + SYS_TIMER_CREATE = 259 + SYS_TIMER_SETTIME = 260 + SYS_TIMER_GETTIME = 261 + SYS_TIMER_GETOVERRUN = 262 + SYS_TIMER_DELETE = 263 + SYS_CLOCK_SETTIME = 264 + SYS_CLOCK_GETTIME = 265 + SYS_CLOCK_GETRES = 266 + SYS_CLOCK_NANOSLEEP = 267 + SYS_STATFS64 = 268 + SYS_FSTATFS64 = 269 + SYS_TGKILL = 270 + SYS_UTIMES = 271 + SYS_FADVISE64_64 = 272 + SYS_VSERVER = 273 + SYS_MBIND = 274 + SYS_GET_MEMPOLICY = 275 + SYS_SET_MEMPOLICY = 276 + SYS_MQ_OPEN = 277 + SYS_MQ_UNLINK = 278 + SYS_MQ_TIMEDSEND = 279 + SYS_MQ_TIMEDRECEIVE = 280 + SYS_MQ_NOTIFY = 281 + SYS_MQ_GETSETATTR = 282 + SYS_KEXEC_LOAD = 283 + SYS_WAITID = 284 + SYS_ADD_KEY = 286 + SYS_REQUEST_KEY = 287 + SYS_KEYCTL = 288 + SYS_IOPRIO_SET = 289 + SYS_IOPRIO_GET = 290 + SYS_INOTIFY_INIT = 291 + SYS_INOTIFY_ADD_WATCH = 292 + SYS_INOTIFY_RM_WATCH = 293 + SYS_MIGRATE_PAGES = 294 + SYS_OPENAT = 295 + SYS_MKDIRAT = 296 + SYS_MKNODAT = 297 + SYS_FCHOWNAT = 298 + SYS_FUTIMESAT = 299 + SYS_FSTATAT64 = 300 + SYS_UNLINKAT = 301 + SYS_RENAMEAT = 302 + SYS_LINKAT = 303 + SYS_SYMLINKAT = 304 + SYS_READLINKAT = 305 + SYS_FCHMODAT = 306 + SYS_FACCESSAT = 307 + SYS_PSELECT6 = 308 + SYS_PPOLL = 309 + SYS_UNSHARE = 310 + SYS_SET_ROBUST_LIST = 311 + SYS_GET_ROBUST_LIST = 312 + SYS_SPLICE = 313 + SYS_SYNC_FILE_RANGE = 314 + SYS_TEE = 315 + SYS_VMSPLICE = 316 + SYS_MOVE_PAGES = 317 + SYS_GETCPU = 318 + SYS_EPOLL_PWAIT = 319 + SYS_UTIMENSAT = 320 + SYS_SIGNALFD = 321 + SYS_TIMERFD_CREATE = 322 + SYS_EVENTFD = 323 + SYS_FALLOCATE = 324 + SYS_TIMERFD_SETTIME = 325 + SYS_TIMERFD_GETTIME = 326 + SYS_SIGNALFD4 = 327 + SYS_EVENTFD2 = 328 + SYS_EPOLL_CREATE1 = 329 + SYS_DUP3 = 330 + SYS_PIPE2 = 331 + SYS_INOTIFY_INIT1 = 332 + SYS_PREADV = 333 + SYS_PWRITEV = 334 + SYS_RT_TGSIGQUEUEINFO = 335 + SYS_PERF_EVENT_OPEN = 336 + SYS_RECVMMSG = 337 + SYS_FANOTIFY_INIT = 338 + SYS_FANOTIFY_MARK = 339 + SYS_PRLIMIT64 = 340 + SYS_NAME_TO_HANDLE_AT = 341 + SYS_OPEN_BY_HANDLE_AT = 342 + SYS_CLOCK_ADJTIME = 343 + SYS_SYNCFS = 344 + SYS_SENDMMSG = 345 + SYS_SETNS = 346 + SYS_PROCESS_VM_READV = 347 + SYS_PROCESS_VM_WRITEV = 348 + SYS_KCMP = 349 + SYS_FINIT_MODULE = 350 + SYS_SCHED_SETATTR = 351 + SYS_SCHED_GETATTR = 352 + SYS_RENAMEAT2 = 353 + SYS_SECCOMP = 354 + SYS_GETRANDOM = 355 + SYS_MEMFD_CREATE = 356 + SYS_BPF = 357 + SYS_EXECVEAT = 358 + SYS_SOCKET = 359 + SYS_SOCKETPAIR = 360 + SYS_BIND = 361 + SYS_CONNECT = 362 + SYS_LISTEN = 363 + SYS_ACCEPT4 = 364 + SYS_GETSOCKOPT = 365 + SYS_SETSOCKOPT = 366 + SYS_GETSOCKNAME = 367 + SYS_GETPEERNAME = 368 + SYS_SENDTO = 369 + SYS_SENDMSG = 370 + SYS_RECVFROM = 371 + SYS_RECVMSG = 372 + SYS_SHUTDOWN = 373 + SYS_USERFAULTFD = 374 + SYS_MEMBARRIER = 375 + SYS_MLOCK2 = 376 + SYS_COPY_FILE_RANGE = 377 + SYS_PREADV2 = 378 + SYS_PWRITEV2 = 379 + SYS_PKEY_MPROTECT = 380 + SYS_PKEY_ALLOC = 381 + SYS_PKEY_FREE = 382 + SYS_STATX = 383 + SYS_ARCH_PRCTL = 384 + SYS_IO_PGETEVENTS = 385 + SYS_RSEQ = 386 + SYS_SEMGET = 393 + SYS_SEMCTL = 394 + SYS_SHMGET = 395 + SYS_SHMCTL = 396 + SYS_SHMAT = 397 + SYS_SHMDT = 398 + SYS_MSGGET = 399 + SYS_MSGSND = 400 + SYS_MSGRCV = 401 + SYS_MSGCTL = 402 + SYS_CLOCK_GETTIME64 = 403 + SYS_CLOCK_SETTIME64 = 404 + SYS_CLOCK_ADJTIME64 = 405 + SYS_CLOCK_GETRES_TIME64 = 406 + SYS_CLOCK_NANOSLEEP_TIME64 = 407 + SYS_TIMER_GETTIME64 = 408 + SYS_TIMER_SETTIME64 = 409 + SYS_TIMERFD_GETTIME64 = 410 + SYS_TIMERFD_SETTIME64 = 411 + SYS_UTIMENSAT_TIME64 = 412 + SYS_PSELECT6_TIME64 = 413 + SYS_PPOLL_TIME64 = 414 + SYS_IO_PGETEVENTS_TIME64 = 416 + SYS_RECVMMSG_TIME64 = 417 + SYS_MQ_TIMEDSEND_TIME64 = 418 + SYS_MQ_TIMEDRECEIVE_TIME64 = 419 + SYS_SEMTIMEDOP_TIME64 = 420 + SYS_RT_SIGTIMEDWAIT_TIME64 = 421 + SYS_FUTEX_TIME64 = 422 + SYS_SCHED_RR_GET_INTERVAL_TIME64 = 423 + SYS_PIDFD_SEND_SIGNAL = 424 + SYS_IO_URING_SETUP = 425 + SYS_IO_URING_ENTER = 426 + SYS_IO_URING_REGISTER = 427 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index b3d8ad79d425b..9ba207847616b 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -341,4 +341,8 @@ const ( SYS_STATX = 332 SYS_IO_PGETEVENTS = 333 SYS_RSEQ = 334 + SYS_PIDFD_SEND_SIGNAL = 424 + SYS_IO_URING_SETUP = 425 + SYS_IO_URING_ENTER = 426 + SYS_IO_URING_REGISTER = 427 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index e092822fbadd1..94f68f101b348 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -6,359 +6,385 @@ package unix const ( - SYS_RESTART_SYSCALL = 0 - SYS_EXIT = 1 - SYS_FORK = 2 - SYS_READ = 3 - SYS_WRITE = 4 - SYS_OPEN = 5 - SYS_CLOSE = 6 - SYS_CREAT = 8 - SYS_LINK = 9 - SYS_UNLINK = 10 - SYS_EXECVE = 11 - SYS_CHDIR = 12 - SYS_MKNOD = 14 - SYS_CHMOD = 15 - SYS_LCHOWN = 16 - SYS_LSEEK = 19 - SYS_GETPID = 20 - SYS_MOUNT = 21 - SYS_SETUID = 23 - SYS_GETUID = 24 - SYS_PTRACE = 26 - SYS_PAUSE = 29 - SYS_ACCESS = 33 - SYS_NICE = 34 - SYS_SYNC = 36 - SYS_KILL = 37 - SYS_RENAME = 38 - SYS_MKDIR = 39 - SYS_RMDIR = 40 - SYS_DUP = 41 - SYS_PIPE = 42 - SYS_TIMES = 43 - SYS_BRK = 45 - SYS_SETGID = 46 - SYS_GETGID = 47 - SYS_GETEUID = 49 - SYS_GETEGID = 50 - SYS_ACCT = 51 - SYS_UMOUNT2 = 52 - SYS_IOCTL = 54 - SYS_FCNTL = 55 - SYS_SETPGID = 57 - SYS_UMASK = 60 - SYS_CHROOT = 61 - SYS_USTAT = 62 - SYS_DUP2 = 63 - SYS_GETPPID = 64 - SYS_GETPGRP = 65 - SYS_SETSID = 66 - SYS_SIGACTION = 67 - SYS_SETREUID = 70 - SYS_SETREGID = 71 - SYS_SIGSUSPEND = 72 - SYS_SIGPENDING = 73 - SYS_SETHOSTNAME = 74 - SYS_SETRLIMIT = 75 - SYS_GETRUSAGE = 77 - SYS_GETTIMEOFDAY = 78 - SYS_SETTIMEOFDAY = 79 - SYS_GETGROUPS = 80 - SYS_SETGROUPS = 81 - SYS_SYMLINK = 83 - SYS_READLINK = 85 - SYS_USELIB = 86 - SYS_SWAPON = 87 - SYS_REBOOT = 88 - SYS_MUNMAP = 91 - SYS_TRUNCATE = 92 - SYS_FTRUNCATE = 93 - SYS_FCHMOD = 94 - SYS_FCHOWN = 95 - SYS_GETPRIORITY = 96 - SYS_SETPRIORITY = 97 - SYS_STATFS = 99 - SYS_FSTATFS = 100 - SYS_SYSLOG = 103 - SYS_SETITIMER = 104 - SYS_GETITIMER = 105 - SYS_STAT = 106 - SYS_LSTAT = 107 - SYS_FSTAT = 108 - SYS_VHANGUP = 111 - SYS_WAIT4 = 114 - SYS_SWAPOFF = 115 - SYS_SYSINFO = 116 - SYS_FSYNC = 118 - SYS_SIGRETURN = 119 - SYS_CLONE = 120 - SYS_SETDOMAINNAME = 121 - SYS_UNAME = 122 - SYS_ADJTIMEX = 124 - SYS_MPROTECT = 125 - SYS_SIGPROCMASK = 126 - SYS_INIT_MODULE = 128 - SYS_DELETE_MODULE = 129 - SYS_QUOTACTL = 131 - SYS_GETPGID = 132 - SYS_FCHDIR = 133 - SYS_BDFLUSH = 134 - SYS_SYSFS = 135 - SYS_PERSONALITY = 136 - SYS_SETFSUID = 138 - SYS_SETFSGID = 139 - SYS__LLSEEK = 140 - SYS_GETDENTS = 141 - SYS__NEWSELECT = 142 - SYS_FLOCK = 143 - SYS_MSYNC = 144 - SYS_READV = 145 - SYS_WRITEV = 146 - SYS_GETSID = 147 - SYS_FDATASYNC = 148 - SYS__SYSCTL = 149 - SYS_MLOCK = 150 - SYS_MUNLOCK = 151 - SYS_MLOCKALL = 152 - SYS_MUNLOCKALL = 153 - SYS_SCHED_SETPARAM = 154 - SYS_SCHED_GETPARAM = 155 - SYS_SCHED_SETSCHEDULER = 156 - SYS_SCHED_GETSCHEDULER = 157 - SYS_SCHED_YIELD = 158 - SYS_SCHED_GET_PRIORITY_MAX = 159 - SYS_SCHED_GET_PRIORITY_MIN = 160 - SYS_SCHED_RR_GET_INTERVAL = 161 - SYS_NANOSLEEP = 162 - SYS_MREMAP = 163 - SYS_SETRESUID = 164 - SYS_GETRESUID = 165 - SYS_POLL = 168 - SYS_NFSSERVCTL = 169 - SYS_SETRESGID = 170 - SYS_GETRESGID = 171 - SYS_PRCTL = 172 - SYS_RT_SIGRETURN = 173 - SYS_RT_SIGACTION = 174 - SYS_RT_SIGPROCMASK = 175 - SYS_RT_SIGPENDING = 176 - SYS_RT_SIGTIMEDWAIT = 177 - SYS_RT_SIGQUEUEINFO = 178 - SYS_RT_SIGSUSPEND = 179 - SYS_PREAD64 = 180 - SYS_PWRITE64 = 181 - SYS_CHOWN = 182 - SYS_GETCWD = 183 - SYS_CAPGET = 184 - SYS_CAPSET = 185 - SYS_SIGALTSTACK = 186 - SYS_SENDFILE = 187 - SYS_VFORK = 190 - SYS_UGETRLIMIT = 191 - SYS_MMAP2 = 192 - SYS_TRUNCATE64 = 193 - SYS_FTRUNCATE64 = 194 - SYS_STAT64 = 195 - SYS_LSTAT64 = 196 - SYS_FSTAT64 = 197 - SYS_LCHOWN32 = 198 - SYS_GETUID32 = 199 - SYS_GETGID32 = 200 - SYS_GETEUID32 = 201 - SYS_GETEGID32 = 202 - SYS_SETREUID32 = 203 - SYS_SETREGID32 = 204 - SYS_GETGROUPS32 = 205 - SYS_SETGROUPS32 = 206 - SYS_FCHOWN32 = 207 - SYS_SETRESUID32 = 208 - SYS_GETRESUID32 = 209 - SYS_SETRESGID32 = 210 - SYS_GETRESGID32 = 211 - SYS_CHOWN32 = 212 - SYS_SETUID32 = 213 - SYS_SETGID32 = 214 - SYS_SETFSUID32 = 215 - SYS_SETFSGID32 = 216 - SYS_GETDENTS64 = 217 - SYS_PIVOT_ROOT = 218 - SYS_MINCORE = 219 - SYS_MADVISE = 220 - SYS_FCNTL64 = 221 - SYS_GETTID = 224 - SYS_READAHEAD = 225 - SYS_SETXATTR = 226 - SYS_LSETXATTR = 227 - SYS_FSETXATTR = 228 - SYS_GETXATTR = 229 - SYS_LGETXATTR = 230 - SYS_FGETXATTR = 231 - SYS_LISTXATTR = 232 - SYS_LLISTXATTR = 233 - SYS_FLISTXATTR = 234 - SYS_REMOVEXATTR = 235 - SYS_LREMOVEXATTR = 236 - SYS_FREMOVEXATTR = 237 - SYS_TKILL = 238 - SYS_SENDFILE64 = 239 - SYS_FUTEX = 240 - SYS_SCHED_SETAFFINITY = 241 - SYS_SCHED_GETAFFINITY = 242 - SYS_IO_SETUP = 243 - SYS_IO_DESTROY = 244 - SYS_IO_GETEVENTS = 245 - SYS_IO_SUBMIT = 246 - SYS_IO_CANCEL = 247 - SYS_EXIT_GROUP = 248 - SYS_LOOKUP_DCOOKIE = 249 - SYS_EPOLL_CREATE = 250 - SYS_EPOLL_CTL = 251 - SYS_EPOLL_WAIT = 252 - SYS_REMAP_FILE_PAGES = 253 - SYS_SET_TID_ADDRESS = 256 - SYS_TIMER_CREATE = 257 - SYS_TIMER_SETTIME = 258 - SYS_TIMER_GETTIME = 259 - SYS_TIMER_GETOVERRUN = 260 - SYS_TIMER_DELETE = 261 - SYS_CLOCK_SETTIME = 262 - SYS_CLOCK_GETTIME = 263 - SYS_CLOCK_GETRES = 264 - SYS_CLOCK_NANOSLEEP = 265 - SYS_STATFS64 = 266 - SYS_FSTATFS64 = 267 - SYS_TGKILL = 268 - SYS_UTIMES = 269 - SYS_ARM_FADVISE64_64 = 270 - SYS_PCICONFIG_IOBASE = 271 - SYS_PCICONFIG_READ = 272 - SYS_PCICONFIG_WRITE = 273 - SYS_MQ_OPEN = 274 - SYS_MQ_UNLINK = 275 - SYS_MQ_TIMEDSEND = 276 - SYS_MQ_TIMEDRECEIVE = 277 - SYS_MQ_NOTIFY = 278 - SYS_MQ_GETSETATTR = 279 - SYS_WAITID = 280 - SYS_SOCKET = 281 - SYS_BIND = 282 - SYS_CONNECT = 283 - SYS_LISTEN = 284 - SYS_ACCEPT = 285 - SYS_GETSOCKNAME = 286 - SYS_GETPEERNAME = 287 - SYS_SOCKETPAIR = 288 - SYS_SEND = 289 - SYS_SENDTO = 290 - SYS_RECV = 291 - SYS_RECVFROM = 292 - SYS_SHUTDOWN = 293 - SYS_SETSOCKOPT = 294 - SYS_GETSOCKOPT = 295 - SYS_SENDMSG = 296 - SYS_RECVMSG = 297 - SYS_SEMOP = 298 - SYS_SEMGET = 299 - SYS_SEMCTL = 300 - SYS_MSGSND = 301 - SYS_MSGRCV = 302 - SYS_MSGGET = 303 - SYS_MSGCTL = 304 - SYS_SHMAT = 305 - SYS_SHMDT = 306 - SYS_SHMGET = 307 - SYS_SHMCTL = 308 - SYS_ADD_KEY = 309 - SYS_REQUEST_KEY = 310 - SYS_KEYCTL = 311 - SYS_SEMTIMEDOP = 312 - SYS_VSERVER = 313 - SYS_IOPRIO_SET = 314 - SYS_IOPRIO_GET = 315 - SYS_INOTIFY_INIT = 316 - SYS_INOTIFY_ADD_WATCH = 317 - SYS_INOTIFY_RM_WATCH = 318 - SYS_MBIND = 319 - SYS_GET_MEMPOLICY = 320 - SYS_SET_MEMPOLICY = 321 - SYS_OPENAT = 322 - SYS_MKDIRAT = 323 - SYS_MKNODAT = 324 - SYS_FCHOWNAT = 325 - SYS_FUTIMESAT = 326 - SYS_FSTATAT64 = 327 - SYS_UNLINKAT = 328 - SYS_RENAMEAT = 329 - SYS_LINKAT = 330 - SYS_SYMLINKAT = 331 - SYS_READLINKAT = 332 - SYS_FCHMODAT = 333 - SYS_FACCESSAT = 334 - SYS_PSELECT6 = 335 - SYS_PPOLL = 336 - SYS_UNSHARE = 337 - SYS_SET_ROBUST_LIST = 338 - SYS_GET_ROBUST_LIST = 339 - SYS_SPLICE = 340 - SYS_ARM_SYNC_FILE_RANGE = 341 - SYS_TEE = 342 - SYS_VMSPLICE = 343 - SYS_MOVE_PAGES = 344 - SYS_GETCPU = 345 - SYS_EPOLL_PWAIT = 346 - SYS_KEXEC_LOAD = 347 - SYS_UTIMENSAT = 348 - SYS_SIGNALFD = 349 - SYS_TIMERFD_CREATE = 350 - SYS_EVENTFD = 351 - SYS_FALLOCATE = 352 - SYS_TIMERFD_SETTIME = 353 - SYS_TIMERFD_GETTIME = 354 - SYS_SIGNALFD4 = 355 - SYS_EVENTFD2 = 356 - SYS_EPOLL_CREATE1 = 357 - SYS_DUP3 = 358 - SYS_PIPE2 = 359 - SYS_INOTIFY_INIT1 = 360 - SYS_PREADV = 361 - SYS_PWRITEV = 362 - SYS_RT_TGSIGQUEUEINFO = 363 - SYS_PERF_EVENT_OPEN = 364 - SYS_RECVMMSG = 365 - SYS_ACCEPT4 = 366 - SYS_FANOTIFY_INIT = 367 - SYS_FANOTIFY_MARK = 368 - SYS_PRLIMIT64 = 369 - SYS_NAME_TO_HANDLE_AT = 370 - SYS_OPEN_BY_HANDLE_AT = 371 - SYS_CLOCK_ADJTIME = 372 - SYS_SYNCFS = 373 - SYS_SENDMMSG = 374 - SYS_SETNS = 375 - SYS_PROCESS_VM_READV = 376 - SYS_PROCESS_VM_WRITEV = 377 - SYS_KCMP = 378 - SYS_FINIT_MODULE = 379 - SYS_SCHED_SETATTR = 380 - SYS_SCHED_GETATTR = 381 - SYS_RENAMEAT2 = 382 - SYS_SECCOMP = 383 - SYS_GETRANDOM = 384 - SYS_MEMFD_CREATE = 385 - SYS_BPF = 386 - SYS_EXECVEAT = 387 - SYS_USERFAULTFD = 388 - SYS_MEMBARRIER = 389 - SYS_MLOCK2 = 390 - SYS_COPY_FILE_RANGE = 391 - SYS_PREADV2 = 392 - SYS_PWRITEV2 = 393 - SYS_PKEY_MPROTECT = 394 - SYS_PKEY_ALLOC = 395 - SYS_PKEY_FREE = 396 - SYS_STATX = 397 - SYS_RSEQ = 398 - SYS_IO_PGETEVENTS = 399 + SYS_RESTART_SYSCALL = 0 + SYS_EXIT = 1 + SYS_FORK = 2 + SYS_READ = 3 + SYS_WRITE = 4 + SYS_OPEN = 5 + SYS_CLOSE = 6 + SYS_CREAT = 8 + SYS_LINK = 9 + SYS_UNLINK = 10 + SYS_EXECVE = 11 + SYS_CHDIR = 12 + SYS_MKNOD = 14 + SYS_CHMOD = 15 + SYS_LCHOWN = 16 + SYS_LSEEK = 19 + SYS_GETPID = 20 + SYS_MOUNT = 21 + SYS_SETUID = 23 + SYS_GETUID = 24 + SYS_PTRACE = 26 + SYS_PAUSE = 29 + SYS_ACCESS = 33 + SYS_NICE = 34 + SYS_SYNC = 36 + SYS_KILL = 37 + SYS_RENAME = 38 + SYS_MKDIR = 39 + SYS_RMDIR = 40 + SYS_DUP = 41 + SYS_PIPE = 42 + SYS_TIMES = 43 + SYS_BRK = 45 + SYS_SETGID = 46 + SYS_GETGID = 47 + SYS_GETEUID = 49 + SYS_GETEGID = 50 + SYS_ACCT = 51 + SYS_UMOUNT2 = 52 + SYS_IOCTL = 54 + SYS_FCNTL = 55 + SYS_SETPGID = 57 + SYS_UMASK = 60 + SYS_CHROOT = 61 + SYS_USTAT = 62 + SYS_DUP2 = 63 + SYS_GETPPID = 64 + SYS_GETPGRP = 65 + SYS_SETSID = 66 + SYS_SIGACTION = 67 + SYS_SETREUID = 70 + SYS_SETREGID = 71 + SYS_SIGSUSPEND = 72 + SYS_SIGPENDING = 73 + SYS_SETHOSTNAME = 74 + SYS_SETRLIMIT = 75 + SYS_GETRUSAGE = 77 + SYS_GETTIMEOFDAY = 78 + SYS_SETTIMEOFDAY = 79 + SYS_GETGROUPS = 80 + SYS_SETGROUPS = 81 + SYS_SYMLINK = 83 + SYS_READLINK = 85 + SYS_USELIB = 86 + SYS_SWAPON = 87 + SYS_REBOOT = 88 + SYS_MUNMAP = 91 + SYS_TRUNCATE = 92 + SYS_FTRUNCATE = 93 + SYS_FCHMOD = 94 + SYS_FCHOWN = 95 + SYS_GETPRIORITY = 96 + SYS_SETPRIORITY = 97 + SYS_STATFS = 99 + SYS_FSTATFS = 100 + SYS_SYSLOG = 103 + SYS_SETITIMER = 104 + SYS_GETITIMER = 105 + SYS_STAT = 106 + SYS_LSTAT = 107 + SYS_FSTAT = 108 + SYS_VHANGUP = 111 + SYS_WAIT4 = 114 + SYS_SWAPOFF = 115 + SYS_SYSINFO = 116 + SYS_FSYNC = 118 + SYS_SIGRETURN = 119 + SYS_CLONE = 120 + SYS_SETDOMAINNAME = 121 + SYS_UNAME = 122 + SYS_ADJTIMEX = 124 + SYS_MPROTECT = 125 + SYS_SIGPROCMASK = 126 + SYS_INIT_MODULE = 128 + SYS_DELETE_MODULE = 129 + SYS_QUOTACTL = 131 + SYS_GETPGID = 132 + SYS_FCHDIR = 133 + SYS_BDFLUSH = 134 + SYS_SYSFS = 135 + SYS_PERSONALITY = 136 + SYS_SETFSUID = 138 + SYS_SETFSGID = 139 + SYS__LLSEEK = 140 + SYS_GETDENTS = 141 + SYS__NEWSELECT = 142 + SYS_FLOCK = 143 + SYS_MSYNC = 144 + SYS_READV = 145 + SYS_WRITEV = 146 + SYS_GETSID = 147 + SYS_FDATASYNC = 148 + SYS__SYSCTL = 149 + SYS_MLOCK = 150 + SYS_MUNLOCK = 151 + SYS_MLOCKALL = 152 + SYS_MUNLOCKALL = 153 + SYS_SCHED_SETPARAM = 154 + SYS_SCHED_GETPARAM = 155 + SYS_SCHED_SETSCHEDULER = 156 + SYS_SCHED_GETSCHEDULER = 157 + SYS_SCHED_YIELD = 158 + SYS_SCHED_GET_PRIORITY_MAX = 159 + SYS_SCHED_GET_PRIORITY_MIN = 160 + SYS_SCHED_RR_GET_INTERVAL = 161 + SYS_NANOSLEEP = 162 + SYS_MREMAP = 163 + SYS_SETRESUID = 164 + SYS_GETRESUID = 165 + SYS_POLL = 168 + SYS_NFSSERVCTL = 169 + SYS_SETRESGID = 170 + SYS_GETRESGID = 171 + SYS_PRCTL = 172 + SYS_RT_SIGRETURN = 173 + SYS_RT_SIGACTION = 174 + SYS_RT_SIGPROCMASK = 175 + SYS_RT_SIGPENDING = 176 + SYS_RT_SIGTIMEDWAIT = 177 + SYS_RT_SIGQUEUEINFO = 178 + SYS_RT_SIGSUSPEND = 179 + SYS_PREAD64 = 180 + SYS_PWRITE64 = 181 + SYS_CHOWN = 182 + SYS_GETCWD = 183 + SYS_CAPGET = 184 + SYS_CAPSET = 185 + SYS_SIGALTSTACK = 186 + SYS_SENDFILE = 187 + SYS_VFORK = 190 + SYS_UGETRLIMIT = 191 + SYS_MMAP2 = 192 + SYS_TRUNCATE64 = 193 + SYS_FTRUNCATE64 = 194 + SYS_STAT64 = 195 + SYS_LSTAT64 = 196 + SYS_FSTAT64 = 197 + SYS_LCHOWN32 = 198 + SYS_GETUID32 = 199 + SYS_GETGID32 = 200 + SYS_GETEUID32 = 201 + SYS_GETEGID32 = 202 + SYS_SETREUID32 = 203 + SYS_SETREGID32 = 204 + SYS_GETGROUPS32 = 205 + SYS_SETGROUPS32 = 206 + SYS_FCHOWN32 = 207 + SYS_SETRESUID32 = 208 + SYS_GETRESUID32 = 209 + SYS_SETRESGID32 = 210 + SYS_GETRESGID32 = 211 + SYS_CHOWN32 = 212 + SYS_SETUID32 = 213 + SYS_SETGID32 = 214 + SYS_SETFSUID32 = 215 + SYS_SETFSGID32 = 216 + SYS_GETDENTS64 = 217 + SYS_PIVOT_ROOT = 218 + SYS_MINCORE = 219 + SYS_MADVISE = 220 + SYS_FCNTL64 = 221 + SYS_GETTID = 224 + SYS_READAHEAD = 225 + SYS_SETXATTR = 226 + SYS_LSETXATTR = 227 + SYS_FSETXATTR = 228 + SYS_GETXATTR = 229 + SYS_LGETXATTR = 230 + SYS_FGETXATTR = 231 + SYS_LISTXATTR = 232 + SYS_LLISTXATTR = 233 + SYS_FLISTXATTR = 234 + SYS_REMOVEXATTR = 235 + SYS_LREMOVEXATTR = 236 + SYS_FREMOVEXATTR = 237 + SYS_TKILL = 238 + SYS_SENDFILE64 = 239 + SYS_FUTEX = 240 + SYS_SCHED_SETAFFINITY = 241 + SYS_SCHED_GETAFFINITY = 242 + SYS_IO_SETUP = 243 + SYS_IO_DESTROY = 244 + SYS_IO_GETEVENTS = 245 + SYS_IO_SUBMIT = 246 + SYS_IO_CANCEL = 247 + SYS_EXIT_GROUP = 248 + SYS_LOOKUP_DCOOKIE = 249 + SYS_EPOLL_CREATE = 250 + SYS_EPOLL_CTL = 251 + SYS_EPOLL_WAIT = 252 + SYS_REMAP_FILE_PAGES = 253 + SYS_SET_TID_ADDRESS = 256 + SYS_TIMER_CREATE = 257 + SYS_TIMER_SETTIME = 258 + SYS_TIMER_GETTIME = 259 + SYS_TIMER_GETOVERRUN = 260 + SYS_TIMER_DELETE = 261 + SYS_CLOCK_SETTIME = 262 + SYS_CLOCK_GETTIME = 263 + SYS_CLOCK_GETRES = 264 + SYS_CLOCK_NANOSLEEP = 265 + SYS_STATFS64 = 266 + SYS_FSTATFS64 = 267 + SYS_TGKILL = 268 + SYS_UTIMES = 269 + SYS_ARM_FADVISE64_64 = 270 + SYS_PCICONFIG_IOBASE = 271 + SYS_PCICONFIG_READ = 272 + SYS_PCICONFIG_WRITE = 273 + SYS_MQ_OPEN = 274 + SYS_MQ_UNLINK = 275 + SYS_MQ_TIMEDSEND = 276 + SYS_MQ_TIMEDRECEIVE = 277 + SYS_MQ_NOTIFY = 278 + SYS_MQ_GETSETATTR = 279 + SYS_WAITID = 280 + SYS_SOCKET = 281 + SYS_BIND = 282 + SYS_CONNECT = 283 + SYS_LISTEN = 284 + SYS_ACCEPT = 285 + SYS_GETSOCKNAME = 286 + SYS_GETPEERNAME = 287 + SYS_SOCKETPAIR = 288 + SYS_SEND = 289 + SYS_SENDTO = 290 + SYS_RECV = 291 + SYS_RECVFROM = 292 + SYS_SHUTDOWN = 293 + SYS_SETSOCKOPT = 294 + SYS_GETSOCKOPT = 295 + SYS_SENDMSG = 296 + SYS_RECVMSG = 297 + SYS_SEMOP = 298 + SYS_SEMGET = 299 + SYS_SEMCTL = 300 + SYS_MSGSND = 301 + SYS_MSGRCV = 302 + SYS_MSGGET = 303 + SYS_MSGCTL = 304 + SYS_SHMAT = 305 + SYS_SHMDT = 306 + SYS_SHMGET = 307 + SYS_SHMCTL = 308 + SYS_ADD_KEY = 309 + SYS_REQUEST_KEY = 310 + SYS_KEYCTL = 311 + SYS_SEMTIMEDOP = 312 + SYS_VSERVER = 313 + SYS_IOPRIO_SET = 314 + SYS_IOPRIO_GET = 315 + SYS_INOTIFY_INIT = 316 + SYS_INOTIFY_ADD_WATCH = 317 + SYS_INOTIFY_RM_WATCH = 318 + SYS_MBIND = 319 + SYS_GET_MEMPOLICY = 320 + SYS_SET_MEMPOLICY = 321 + SYS_OPENAT = 322 + SYS_MKDIRAT = 323 + SYS_MKNODAT = 324 + SYS_FCHOWNAT = 325 + SYS_FUTIMESAT = 326 + SYS_FSTATAT64 = 327 + SYS_UNLINKAT = 328 + SYS_RENAMEAT = 329 + SYS_LINKAT = 330 + SYS_SYMLINKAT = 331 + SYS_READLINKAT = 332 + SYS_FCHMODAT = 333 + SYS_FACCESSAT = 334 + SYS_PSELECT6 = 335 + SYS_PPOLL = 336 + SYS_UNSHARE = 337 + SYS_SET_ROBUST_LIST = 338 + SYS_GET_ROBUST_LIST = 339 + SYS_SPLICE = 340 + SYS_ARM_SYNC_FILE_RANGE = 341 + SYS_TEE = 342 + SYS_VMSPLICE = 343 + SYS_MOVE_PAGES = 344 + SYS_GETCPU = 345 + SYS_EPOLL_PWAIT = 346 + SYS_KEXEC_LOAD = 347 + SYS_UTIMENSAT = 348 + SYS_SIGNALFD = 349 + SYS_TIMERFD_CREATE = 350 + SYS_EVENTFD = 351 + SYS_FALLOCATE = 352 + SYS_TIMERFD_SETTIME = 353 + SYS_TIMERFD_GETTIME = 354 + SYS_SIGNALFD4 = 355 + SYS_EVENTFD2 = 356 + SYS_EPOLL_CREATE1 = 357 + SYS_DUP3 = 358 + SYS_PIPE2 = 359 + SYS_INOTIFY_INIT1 = 360 + SYS_PREADV = 361 + SYS_PWRITEV = 362 + SYS_RT_TGSIGQUEUEINFO = 363 + SYS_PERF_EVENT_OPEN = 364 + SYS_RECVMMSG = 365 + SYS_ACCEPT4 = 366 + SYS_FANOTIFY_INIT = 367 + SYS_FANOTIFY_MARK = 368 + SYS_PRLIMIT64 = 369 + SYS_NAME_TO_HANDLE_AT = 370 + SYS_OPEN_BY_HANDLE_AT = 371 + SYS_CLOCK_ADJTIME = 372 + SYS_SYNCFS = 373 + SYS_SENDMMSG = 374 + SYS_SETNS = 375 + SYS_PROCESS_VM_READV = 376 + SYS_PROCESS_VM_WRITEV = 377 + SYS_KCMP = 378 + SYS_FINIT_MODULE = 379 + SYS_SCHED_SETATTR = 380 + SYS_SCHED_GETATTR = 381 + SYS_RENAMEAT2 = 382 + SYS_SECCOMP = 383 + SYS_GETRANDOM = 384 + SYS_MEMFD_CREATE = 385 + SYS_BPF = 386 + SYS_EXECVEAT = 387 + SYS_USERFAULTFD = 388 + SYS_MEMBARRIER = 389 + SYS_MLOCK2 = 390 + SYS_COPY_FILE_RANGE = 391 + SYS_PREADV2 = 392 + SYS_PWRITEV2 = 393 + SYS_PKEY_MPROTECT = 394 + SYS_PKEY_ALLOC = 395 + SYS_PKEY_FREE = 396 + SYS_STATX = 397 + SYS_RSEQ = 398 + SYS_IO_PGETEVENTS = 399 + SYS_MIGRATE_PAGES = 400 + SYS_KEXEC_FILE_LOAD = 401 + SYS_CLOCK_GETTIME64 = 403 + SYS_CLOCK_SETTIME64 = 404 + SYS_CLOCK_ADJTIME64 = 405 + SYS_CLOCK_GETRES_TIME64 = 406 + SYS_CLOCK_NANOSLEEP_TIME64 = 407 + SYS_TIMER_GETTIME64 = 408 + SYS_TIMER_SETTIME64 = 409 + SYS_TIMERFD_GETTIME64 = 410 + SYS_TIMERFD_SETTIME64 = 411 + SYS_UTIMENSAT_TIME64 = 412 + SYS_PSELECT6_TIME64 = 413 + SYS_PPOLL_TIME64 = 414 + SYS_IO_PGETEVENTS_TIME64 = 416 + SYS_RECVMMSG_TIME64 = 417 + SYS_MQ_TIMEDSEND_TIME64 = 418 + SYS_MQ_TIMEDRECEIVE_TIME64 = 419 + SYS_SEMTIMEDOP_TIME64 = 420 + SYS_RT_SIGTIMEDWAIT_TIME64 = 421 + SYS_FUTEX_TIME64 = 422 + SYS_SCHED_RR_GET_INTERVAL_TIME64 = 423 + SYS_PIDFD_SEND_SIGNAL = 424 + SYS_IO_URING_SETUP = 425 + SYS_IO_URING_ENTER = 426 + SYS_IO_URING_REGISTER = 427 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index b81d508a730fa..15c413516eb8e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -286,4 +286,8 @@ const ( SYS_IO_PGETEVENTS = 292 SYS_RSEQ = 293 SYS_KEXEC_FILE_LOAD = 294 + SYS_PIDFD_SEND_SIGNAL = 424 + SYS_IO_URING_SETUP = 425 + SYS_IO_URING_ENTER = 426 + SYS_IO_URING_REGISTER = 427 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 6893a5bd055b1..638465b1428ab 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -6,372 +6,406 @@ package unix const ( - SYS_SYSCALL = 4000 - SYS_EXIT = 4001 - SYS_FORK = 4002 - SYS_READ = 4003 - SYS_WRITE = 4004 - SYS_OPEN = 4005 - SYS_CLOSE = 4006 - SYS_WAITPID = 4007 - SYS_CREAT = 4008 - SYS_LINK = 4009 - SYS_UNLINK = 4010 - SYS_EXECVE = 4011 - SYS_CHDIR = 4012 - SYS_TIME = 4013 - SYS_MKNOD = 4014 - SYS_CHMOD = 4015 - SYS_LCHOWN = 4016 - SYS_BREAK = 4017 - SYS_UNUSED18 = 4018 - SYS_LSEEK = 4019 - SYS_GETPID = 4020 - SYS_MOUNT = 4021 - SYS_UMOUNT = 4022 - SYS_SETUID = 4023 - SYS_GETUID = 4024 - SYS_STIME = 4025 - SYS_PTRACE = 4026 - SYS_ALARM = 4027 - SYS_UNUSED28 = 4028 - SYS_PAUSE = 4029 - SYS_UTIME = 4030 - SYS_STTY = 4031 - SYS_GTTY = 4032 - SYS_ACCESS = 4033 - SYS_NICE = 4034 - SYS_FTIME = 4035 - SYS_SYNC = 4036 - SYS_KILL = 4037 - SYS_RENAME = 4038 - SYS_MKDIR = 4039 - SYS_RMDIR = 4040 - SYS_DUP = 4041 - SYS_PIPE = 4042 - SYS_TIMES = 4043 - SYS_PROF = 4044 - SYS_BRK = 4045 - SYS_SETGID = 4046 - SYS_GETGID = 4047 - SYS_SIGNAL = 4048 - SYS_GETEUID = 4049 - SYS_GETEGID = 4050 - SYS_ACCT = 4051 - SYS_UMOUNT2 = 4052 - SYS_LOCK = 4053 - SYS_IOCTL = 4054 - SYS_FCNTL = 4055 - SYS_MPX = 4056 - SYS_SETPGID = 4057 - SYS_ULIMIT = 4058 - SYS_UNUSED59 = 4059 - SYS_UMASK = 4060 - SYS_CHROOT = 4061 - SYS_USTAT = 4062 - SYS_DUP2 = 4063 - SYS_GETPPID = 4064 - SYS_GETPGRP = 4065 - SYS_SETSID = 4066 - SYS_SIGACTION = 4067 - SYS_SGETMASK = 4068 - SYS_SSETMASK = 4069 - SYS_SETREUID = 4070 - SYS_SETREGID = 4071 - SYS_SIGSUSPEND = 4072 - SYS_SIGPENDING = 4073 - SYS_SETHOSTNAME = 4074 - SYS_SETRLIMIT = 4075 - SYS_GETRLIMIT = 4076 - SYS_GETRUSAGE = 4077 - SYS_GETTIMEOFDAY = 4078 - SYS_SETTIMEOFDAY = 4079 - SYS_GETGROUPS = 4080 - SYS_SETGROUPS = 4081 - SYS_RESERVED82 = 4082 - SYS_SYMLINK = 4083 - SYS_UNUSED84 = 4084 - SYS_READLINK = 4085 - SYS_USELIB = 4086 - SYS_SWAPON = 4087 - SYS_REBOOT = 4088 - SYS_READDIR = 4089 - SYS_MMAP = 4090 - SYS_MUNMAP = 4091 - SYS_TRUNCATE = 4092 - SYS_FTRUNCATE = 4093 - SYS_FCHMOD = 4094 - SYS_FCHOWN = 4095 - SYS_GETPRIORITY = 4096 - SYS_SETPRIORITY = 4097 - SYS_PROFIL = 4098 - SYS_STATFS = 4099 - SYS_FSTATFS = 4100 - SYS_IOPERM = 4101 - SYS_SOCKETCALL = 4102 - SYS_SYSLOG = 4103 - SYS_SETITIMER = 4104 - SYS_GETITIMER = 4105 - SYS_STAT = 4106 - SYS_LSTAT = 4107 - SYS_FSTAT = 4108 - SYS_UNUSED109 = 4109 - SYS_IOPL = 4110 - SYS_VHANGUP = 4111 - SYS_IDLE = 4112 - SYS_VM86 = 4113 - SYS_WAIT4 = 4114 - SYS_SWAPOFF = 4115 - SYS_SYSINFO = 4116 - SYS_IPC = 4117 - SYS_FSYNC = 4118 - SYS_SIGRETURN = 4119 - SYS_CLONE = 4120 - SYS_SETDOMAINNAME = 4121 - SYS_UNAME = 4122 - SYS_MODIFY_LDT = 4123 - SYS_ADJTIMEX = 4124 - SYS_MPROTECT = 4125 - SYS_SIGPROCMASK = 4126 - SYS_CREATE_MODULE = 4127 - SYS_INIT_MODULE = 4128 - SYS_DELETE_MODULE = 4129 - SYS_GET_KERNEL_SYMS = 4130 - SYS_QUOTACTL = 4131 - SYS_GETPGID = 4132 - SYS_FCHDIR = 4133 - SYS_BDFLUSH = 4134 - SYS_SYSFS = 4135 - SYS_PERSONALITY = 4136 - SYS_AFS_SYSCALL = 4137 - SYS_SETFSUID = 4138 - SYS_SETFSGID = 4139 - SYS__LLSEEK = 4140 - SYS_GETDENTS = 4141 - SYS__NEWSELECT = 4142 - SYS_FLOCK = 4143 - SYS_MSYNC = 4144 - SYS_READV = 4145 - SYS_WRITEV = 4146 - SYS_CACHEFLUSH = 4147 - SYS_CACHECTL = 4148 - SYS_SYSMIPS = 4149 - SYS_UNUSED150 = 4150 - SYS_GETSID = 4151 - SYS_FDATASYNC = 4152 - SYS__SYSCTL = 4153 - SYS_MLOCK = 4154 - SYS_MUNLOCK = 4155 - SYS_MLOCKALL = 4156 - SYS_MUNLOCKALL = 4157 - SYS_SCHED_SETPARAM = 4158 - SYS_SCHED_GETPARAM = 4159 - SYS_SCHED_SETSCHEDULER = 4160 - SYS_SCHED_GETSCHEDULER = 4161 - SYS_SCHED_YIELD = 4162 - SYS_SCHED_GET_PRIORITY_MAX = 4163 - SYS_SCHED_GET_PRIORITY_MIN = 4164 - SYS_SCHED_RR_GET_INTERVAL = 4165 - SYS_NANOSLEEP = 4166 - SYS_MREMAP = 4167 - SYS_ACCEPT = 4168 - SYS_BIND = 4169 - SYS_CONNECT = 4170 - SYS_GETPEERNAME = 4171 - SYS_GETSOCKNAME = 4172 - SYS_GETSOCKOPT = 4173 - SYS_LISTEN = 4174 - SYS_RECV = 4175 - SYS_RECVFROM = 4176 - SYS_RECVMSG = 4177 - SYS_SEND = 4178 - SYS_SENDMSG = 4179 - SYS_SENDTO = 4180 - SYS_SETSOCKOPT = 4181 - SYS_SHUTDOWN = 4182 - SYS_SOCKET = 4183 - SYS_SOCKETPAIR = 4184 - SYS_SETRESUID = 4185 - SYS_GETRESUID = 4186 - SYS_QUERY_MODULE = 4187 - SYS_POLL = 4188 - SYS_NFSSERVCTL = 4189 - SYS_SETRESGID = 4190 - SYS_GETRESGID = 4191 - SYS_PRCTL = 4192 - SYS_RT_SIGRETURN = 4193 - SYS_RT_SIGACTION = 4194 - SYS_RT_SIGPROCMASK = 4195 - SYS_RT_SIGPENDING = 4196 - SYS_RT_SIGTIMEDWAIT = 4197 - SYS_RT_SIGQUEUEINFO = 4198 - SYS_RT_SIGSUSPEND = 4199 - SYS_PREAD64 = 4200 - SYS_PWRITE64 = 4201 - SYS_CHOWN = 4202 - SYS_GETCWD = 4203 - SYS_CAPGET = 4204 - SYS_CAPSET = 4205 - SYS_SIGALTSTACK = 4206 - SYS_SENDFILE = 4207 - SYS_GETPMSG = 4208 - SYS_PUTPMSG = 4209 - SYS_MMAP2 = 4210 - SYS_TRUNCATE64 = 4211 - SYS_FTRUNCATE64 = 4212 - SYS_STAT64 = 4213 - SYS_LSTAT64 = 4214 - SYS_FSTAT64 = 4215 - SYS_PIVOT_ROOT = 4216 - SYS_MINCORE = 4217 - SYS_MADVISE = 4218 - SYS_GETDENTS64 = 4219 - SYS_FCNTL64 = 4220 - SYS_RESERVED221 = 4221 - SYS_GETTID = 4222 - SYS_READAHEAD = 4223 - SYS_SETXATTR = 4224 - SYS_LSETXATTR = 4225 - SYS_FSETXATTR = 4226 - SYS_GETXATTR = 4227 - SYS_LGETXATTR = 4228 - SYS_FGETXATTR = 4229 - SYS_LISTXATTR = 4230 - SYS_LLISTXATTR = 4231 - SYS_FLISTXATTR = 4232 - SYS_REMOVEXATTR = 4233 - SYS_LREMOVEXATTR = 4234 - SYS_FREMOVEXATTR = 4235 - SYS_TKILL = 4236 - SYS_SENDFILE64 = 4237 - SYS_FUTEX = 4238 - SYS_SCHED_SETAFFINITY = 4239 - SYS_SCHED_GETAFFINITY = 4240 - SYS_IO_SETUP = 4241 - SYS_IO_DESTROY = 4242 - SYS_IO_GETEVENTS = 4243 - SYS_IO_SUBMIT = 4244 - SYS_IO_CANCEL = 4245 - SYS_EXIT_GROUP = 4246 - SYS_LOOKUP_DCOOKIE = 4247 - SYS_EPOLL_CREATE = 4248 - SYS_EPOLL_CTL = 4249 - SYS_EPOLL_WAIT = 4250 - SYS_REMAP_FILE_PAGES = 4251 - SYS_SET_TID_ADDRESS = 4252 - SYS_RESTART_SYSCALL = 4253 - SYS_FADVISE64 = 4254 - SYS_STATFS64 = 4255 - SYS_FSTATFS64 = 4256 - SYS_TIMER_CREATE = 4257 - SYS_TIMER_SETTIME = 4258 - SYS_TIMER_GETTIME = 4259 - SYS_TIMER_GETOVERRUN = 4260 - SYS_TIMER_DELETE = 4261 - SYS_CLOCK_SETTIME = 4262 - SYS_CLOCK_GETTIME = 4263 - SYS_CLOCK_GETRES = 4264 - SYS_CLOCK_NANOSLEEP = 4265 - SYS_TGKILL = 4266 - SYS_UTIMES = 4267 - SYS_MBIND = 4268 - SYS_GET_MEMPOLICY = 4269 - SYS_SET_MEMPOLICY = 4270 - SYS_MQ_OPEN = 4271 - SYS_MQ_UNLINK = 4272 - SYS_MQ_TIMEDSEND = 4273 - SYS_MQ_TIMEDRECEIVE = 4274 - SYS_MQ_NOTIFY = 4275 - SYS_MQ_GETSETATTR = 4276 - SYS_VSERVER = 4277 - SYS_WAITID = 4278 - SYS_ADD_KEY = 4280 - SYS_REQUEST_KEY = 4281 - SYS_KEYCTL = 4282 - SYS_SET_THREAD_AREA = 4283 - SYS_INOTIFY_INIT = 4284 - SYS_INOTIFY_ADD_WATCH = 4285 - SYS_INOTIFY_RM_WATCH = 4286 - SYS_MIGRATE_PAGES = 4287 - SYS_OPENAT = 4288 - SYS_MKDIRAT = 4289 - SYS_MKNODAT = 4290 - SYS_FCHOWNAT = 4291 - SYS_FUTIMESAT = 4292 - SYS_FSTATAT64 = 4293 - SYS_UNLINKAT = 4294 - SYS_RENAMEAT = 4295 - SYS_LINKAT = 4296 - SYS_SYMLINKAT = 4297 - SYS_READLINKAT = 4298 - SYS_FCHMODAT = 4299 - SYS_FACCESSAT = 4300 - SYS_PSELECT6 = 4301 - SYS_PPOLL = 4302 - SYS_UNSHARE = 4303 - SYS_SPLICE = 4304 - SYS_SYNC_FILE_RANGE = 4305 - SYS_TEE = 4306 - SYS_VMSPLICE = 4307 - SYS_MOVE_PAGES = 4308 - SYS_SET_ROBUST_LIST = 4309 - SYS_GET_ROBUST_LIST = 4310 - SYS_KEXEC_LOAD = 4311 - SYS_GETCPU = 4312 - SYS_EPOLL_PWAIT = 4313 - SYS_IOPRIO_SET = 4314 - SYS_IOPRIO_GET = 4315 - SYS_UTIMENSAT = 4316 - SYS_SIGNALFD = 4317 - SYS_TIMERFD = 4318 - SYS_EVENTFD = 4319 - SYS_FALLOCATE = 4320 - SYS_TIMERFD_CREATE = 4321 - SYS_TIMERFD_GETTIME = 4322 - SYS_TIMERFD_SETTIME = 4323 - SYS_SIGNALFD4 = 4324 - SYS_EVENTFD2 = 4325 - SYS_EPOLL_CREATE1 = 4326 - SYS_DUP3 = 4327 - SYS_PIPE2 = 4328 - SYS_INOTIFY_INIT1 = 4329 - SYS_PREADV = 4330 - SYS_PWRITEV = 4331 - SYS_RT_TGSIGQUEUEINFO = 4332 - SYS_PERF_EVENT_OPEN = 4333 - SYS_ACCEPT4 = 4334 - SYS_RECVMMSG = 4335 - SYS_FANOTIFY_INIT = 4336 - SYS_FANOTIFY_MARK = 4337 - SYS_PRLIMIT64 = 4338 - SYS_NAME_TO_HANDLE_AT = 4339 - SYS_OPEN_BY_HANDLE_AT = 4340 - SYS_CLOCK_ADJTIME = 4341 - SYS_SYNCFS = 4342 - SYS_SENDMMSG = 4343 - SYS_SETNS = 4344 - SYS_PROCESS_VM_READV = 4345 - SYS_PROCESS_VM_WRITEV = 4346 - SYS_KCMP = 4347 - SYS_FINIT_MODULE = 4348 - SYS_SCHED_SETATTR = 4349 - SYS_SCHED_GETATTR = 4350 - SYS_RENAMEAT2 = 4351 - SYS_SECCOMP = 4352 - SYS_GETRANDOM = 4353 - SYS_MEMFD_CREATE = 4354 - SYS_BPF = 4355 - SYS_EXECVEAT = 4356 - SYS_USERFAULTFD = 4357 - SYS_MEMBARRIER = 4358 - SYS_MLOCK2 = 4359 - SYS_COPY_FILE_RANGE = 4360 - SYS_PREADV2 = 4361 - SYS_PWRITEV2 = 4362 - SYS_PKEY_MPROTECT = 4363 - SYS_PKEY_ALLOC = 4364 - SYS_PKEY_FREE = 4365 - SYS_STATX = 4366 - SYS_RSEQ = 4367 - SYS_IO_PGETEVENTS = 4368 + SYS_SYSCALL = 4000 + SYS_EXIT = 4001 + SYS_FORK = 4002 + SYS_READ = 4003 + SYS_WRITE = 4004 + SYS_OPEN = 4005 + SYS_CLOSE = 4006 + SYS_WAITPID = 4007 + SYS_CREAT = 4008 + SYS_LINK = 4009 + SYS_UNLINK = 4010 + SYS_EXECVE = 4011 + SYS_CHDIR = 4012 + SYS_TIME = 4013 + SYS_MKNOD = 4014 + SYS_CHMOD = 4015 + SYS_LCHOWN = 4016 + SYS_BREAK = 4017 + SYS_UNUSED18 = 4018 + SYS_LSEEK = 4019 + SYS_GETPID = 4020 + SYS_MOUNT = 4021 + SYS_UMOUNT = 4022 + SYS_SETUID = 4023 + SYS_GETUID = 4024 + SYS_STIME = 4025 + SYS_PTRACE = 4026 + SYS_ALARM = 4027 + SYS_UNUSED28 = 4028 + SYS_PAUSE = 4029 + SYS_UTIME = 4030 + SYS_STTY = 4031 + SYS_GTTY = 4032 + SYS_ACCESS = 4033 + SYS_NICE = 4034 + SYS_FTIME = 4035 + SYS_SYNC = 4036 + SYS_KILL = 4037 + SYS_RENAME = 4038 + SYS_MKDIR = 4039 + SYS_RMDIR = 4040 + SYS_DUP = 4041 + SYS_PIPE = 4042 + SYS_TIMES = 4043 + SYS_PROF = 4044 + SYS_BRK = 4045 + SYS_SETGID = 4046 + SYS_GETGID = 4047 + SYS_SIGNAL = 4048 + SYS_GETEUID = 4049 + SYS_GETEGID = 4050 + SYS_ACCT = 4051 + SYS_UMOUNT2 = 4052 + SYS_LOCK = 4053 + SYS_IOCTL = 4054 + SYS_FCNTL = 4055 + SYS_MPX = 4056 + SYS_SETPGID = 4057 + SYS_ULIMIT = 4058 + SYS_UNUSED59 = 4059 + SYS_UMASK = 4060 + SYS_CHROOT = 4061 + SYS_USTAT = 4062 + SYS_DUP2 = 4063 + SYS_GETPPID = 4064 + SYS_GETPGRP = 4065 + SYS_SETSID = 4066 + SYS_SIGACTION = 4067 + SYS_SGETMASK = 4068 + SYS_SSETMASK = 4069 + SYS_SETREUID = 4070 + SYS_SETREGID = 4071 + SYS_SIGSUSPEND = 4072 + SYS_SIGPENDING = 4073 + SYS_SETHOSTNAME = 4074 + SYS_SETRLIMIT = 4075 + SYS_GETRLIMIT = 4076 + SYS_GETRUSAGE = 4077 + SYS_GETTIMEOFDAY = 4078 + SYS_SETTIMEOFDAY = 4079 + SYS_GETGROUPS = 4080 + SYS_SETGROUPS = 4081 + SYS_RESERVED82 = 4082 + SYS_SYMLINK = 4083 + SYS_UNUSED84 = 4084 + SYS_READLINK = 4085 + SYS_USELIB = 4086 + SYS_SWAPON = 4087 + SYS_REBOOT = 4088 + SYS_READDIR = 4089 + SYS_MMAP = 4090 + SYS_MUNMAP = 4091 + SYS_TRUNCATE = 4092 + SYS_FTRUNCATE = 4093 + SYS_FCHMOD = 4094 + SYS_FCHOWN = 4095 + SYS_GETPRIORITY = 4096 + SYS_SETPRIORITY = 4097 + SYS_PROFIL = 4098 + SYS_STATFS = 4099 + SYS_FSTATFS = 4100 + SYS_IOPERM = 4101 + SYS_SOCKETCALL = 4102 + SYS_SYSLOG = 4103 + SYS_SETITIMER = 4104 + SYS_GETITIMER = 4105 + SYS_STAT = 4106 + SYS_LSTAT = 4107 + SYS_FSTAT = 4108 + SYS_UNUSED109 = 4109 + SYS_IOPL = 4110 + SYS_VHANGUP = 4111 + SYS_IDLE = 4112 + SYS_VM86 = 4113 + SYS_WAIT4 = 4114 + SYS_SWAPOFF = 4115 + SYS_SYSINFO = 4116 + SYS_IPC = 4117 + SYS_FSYNC = 4118 + SYS_SIGRETURN = 4119 + SYS_CLONE = 4120 + SYS_SETDOMAINNAME = 4121 + SYS_UNAME = 4122 + SYS_MODIFY_LDT = 4123 + SYS_ADJTIMEX = 4124 + SYS_MPROTECT = 4125 + SYS_SIGPROCMASK = 4126 + SYS_CREATE_MODULE = 4127 + SYS_INIT_MODULE = 4128 + SYS_DELETE_MODULE = 4129 + SYS_GET_KERNEL_SYMS = 4130 + SYS_QUOTACTL = 4131 + SYS_GETPGID = 4132 + SYS_FCHDIR = 4133 + SYS_BDFLUSH = 4134 + SYS_SYSFS = 4135 + SYS_PERSONALITY = 4136 + SYS_AFS_SYSCALL = 4137 + SYS_SETFSUID = 4138 + SYS_SETFSGID = 4139 + SYS__LLSEEK = 4140 + SYS_GETDENTS = 4141 + SYS__NEWSELECT = 4142 + SYS_FLOCK = 4143 + SYS_MSYNC = 4144 + SYS_READV = 4145 + SYS_WRITEV = 4146 + SYS_CACHEFLUSH = 4147 + SYS_CACHECTL = 4148 + SYS_SYSMIPS = 4149 + SYS_UNUSED150 = 4150 + SYS_GETSID = 4151 + SYS_FDATASYNC = 4152 + SYS__SYSCTL = 4153 + SYS_MLOCK = 4154 + SYS_MUNLOCK = 4155 + SYS_MLOCKALL = 4156 + SYS_MUNLOCKALL = 4157 + SYS_SCHED_SETPARAM = 4158 + SYS_SCHED_GETPARAM = 4159 + SYS_SCHED_SETSCHEDULER = 4160 + SYS_SCHED_GETSCHEDULER = 4161 + SYS_SCHED_YIELD = 4162 + SYS_SCHED_GET_PRIORITY_MAX = 4163 + SYS_SCHED_GET_PRIORITY_MIN = 4164 + SYS_SCHED_RR_GET_INTERVAL = 4165 + SYS_NANOSLEEP = 4166 + SYS_MREMAP = 4167 + SYS_ACCEPT = 4168 + SYS_BIND = 4169 + SYS_CONNECT = 4170 + SYS_GETPEERNAME = 4171 + SYS_GETSOCKNAME = 4172 + SYS_GETSOCKOPT = 4173 + SYS_LISTEN = 4174 + SYS_RECV = 4175 + SYS_RECVFROM = 4176 + SYS_RECVMSG = 4177 + SYS_SEND = 4178 + SYS_SENDMSG = 4179 + SYS_SENDTO = 4180 + SYS_SETSOCKOPT = 4181 + SYS_SHUTDOWN = 4182 + SYS_SOCKET = 4183 + SYS_SOCKETPAIR = 4184 + SYS_SETRESUID = 4185 + SYS_GETRESUID = 4186 + SYS_QUERY_MODULE = 4187 + SYS_POLL = 4188 + SYS_NFSSERVCTL = 4189 + SYS_SETRESGID = 4190 + SYS_GETRESGID = 4191 + SYS_PRCTL = 4192 + SYS_RT_SIGRETURN = 4193 + SYS_RT_SIGACTION = 4194 + SYS_RT_SIGPROCMASK = 4195 + SYS_RT_SIGPENDING = 4196 + SYS_RT_SIGTIMEDWAIT = 4197 + SYS_RT_SIGQUEUEINFO = 4198 + SYS_RT_SIGSUSPEND = 4199 + SYS_PREAD64 = 4200 + SYS_PWRITE64 = 4201 + SYS_CHOWN = 4202 + SYS_GETCWD = 4203 + SYS_CAPGET = 4204 + SYS_CAPSET = 4205 + SYS_SIGALTSTACK = 4206 + SYS_SENDFILE = 4207 + SYS_GETPMSG = 4208 + SYS_PUTPMSG = 4209 + SYS_MMAP2 = 4210 + SYS_TRUNCATE64 = 4211 + SYS_FTRUNCATE64 = 4212 + SYS_STAT64 = 4213 + SYS_LSTAT64 = 4214 + SYS_FSTAT64 = 4215 + SYS_PIVOT_ROOT = 4216 + SYS_MINCORE = 4217 + SYS_MADVISE = 4218 + SYS_GETDENTS64 = 4219 + SYS_FCNTL64 = 4220 + SYS_RESERVED221 = 4221 + SYS_GETTID = 4222 + SYS_READAHEAD = 4223 + SYS_SETXATTR = 4224 + SYS_LSETXATTR = 4225 + SYS_FSETXATTR = 4226 + SYS_GETXATTR = 4227 + SYS_LGETXATTR = 4228 + SYS_FGETXATTR = 4229 + SYS_LISTXATTR = 4230 + SYS_LLISTXATTR = 4231 + SYS_FLISTXATTR = 4232 + SYS_REMOVEXATTR = 4233 + SYS_LREMOVEXATTR = 4234 + SYS_FREMOVEXATTR = 4235 + SYS_TKILL = 4236 + SYS_SENDFILE64 = 4237 + SYS_FUTEX = 4238 + SYS_SCHED_SETAFFINITY = 4239 + SYS_SCHED_GETAFFINITY = 4240 + SYS_IO_SETUP = 4241 + SYS_IO_DESTROY = 4242 + SYS_IO_GETEVENTS = 4243 + SYS_IO_SUBMIT = 4244 + SYS_IO_CANCEL = 4245 + SYS_EXIT_GROUP = 4246 + SYS_LOOKUP_DCOOKIE = 4247 + SYS_EPOLL_CREATE = 4248 + SYS_EPOLL_CTL = 4249 + SYS_EPOLL_WAIT = 4250 + SYS_REMAP_FILE_PAGES = 4251 + SYS_SET_TID_ADDRESS = 4252 + SYS_RESTART_SYSCALL = 4253 + SYS_FADVISE64 = 4254 + SYS_STATFS64 = 4255 + SYS_FSTATFS64 = 4256 + SYS_TIMER_CREATE = 4257 + SYS_TIMER_SETTIME = 4258 + SYS_TIMER_GETTIME = 4259 + SYS_TIMER_GETOVERRUN = 4260 + SYS_TIMER_DELETE = 4261 + SYS_CLOCK_SETTIME = 4262 + SYS_CLOCK_GETTIME = 4263 + SYS_CLOCK_GETRES = 4264 + SYS_CLOCK_NANOSLEEP = 4265 + SYS_TGKILL = 4266 + SYS_UTIMES = 4267 + SYS_MBIND = 4268 + SYS_GET_MEMPOLICY = 4269 + SYS_SET_MEMPOLICY = 4270 + SYS_MQ_OPEN = 4271 + SYS_MQ_UNLINK = 4272 + SYS_MQ_TIMEDSEND = 4273 + SYS_MQ_TIMEDRECEIVE = 4274 + SYS_MQ_NOTIFY = 4275 + SYS_MQ_GETSETATTR = 4276 + SYS_VSERVER = 4277 + SYS_WAITID = 4278 + SYS_ADD_KEY = 4280 + SYS_REQUEST_KEY = 4281 + SYS_KEYCTL = 4282 + SYS_SET_THREAD_AREA = 4283 + SYS_INOTIFY_INIT = 4284 + SYS_INOTIFY_ADD_WATCH = 4285 + SYS_INOTIFY_RM_WATCH = 4286 + SYS_MIGRATE_PAGES = 4287 + SYS_OPENAT = 4288 + SYS_MKDIRAT = 4289 + SYS_MKNODAT = 4290 + SYS_FCHOWNAT = 4291 + SYS_FUTIMESAT = 4292 + SYS_FSTATAT64 = 4293 + SYS_UNLINKAT = 4294 + SYS_RENAMEAT = 4295 + SYS_LINKAT = 4296 + SYS_SYMLINKAT = 4297 + SYS_READLINKAT = 4298 + SYS_FCHMODAT = 4299 + SYS_FACCESSAT = 4300 + SYS_PSELECT6 = 4301 + SYS_PPOLL = 4302 + SYS_UNSHARE = 4303 + SYS_SPLICE = 4304 + SYS_SYNC_FILE_RANGE = 4305 + SYS_TEE = 4306 + SYS_VMSPLICE = 4307 + SYS_MOVE_PAGES = 4308 + SYS_SET_ROBUST_LIST = 4309 + SYS_GET_ROBUST_LIST = 4310 + SYS_KEXEC_LOAD = 4311 + SYS_GETCPU = 4312 + SYS_EPOLL_PWAIT = 4313 + SYS_IOPRIO_SET = 4314 + SYS_IOPRIO_GET = 4315 + SYS_UTIMENSAT = 4316 + SYS_SIGNALFD = 4317 + SYS_TIMERFD = 4318 + SYS_EVENTFD = 4319 + SYS_FALLOCATE = 4320 + SYS_TIMERFD_CREATE = 4321 + SYS_TIMERFD_GETTIME = 4322 + SYS_TIMERFD_SETTIME = 4323 + SYS_SIGNALFD4 = 4324 + SYS_EVENTFD2 = 4325 + SYS_EPOLL_CREATE1 = 4326 + SYS_DUP3 = 4327 + SYS_PIPE2 = 4328 + SYS_INOTIFY_INIT1 = 4329 + SYS_PREADV = 4330 + SYS_PWRITEV = 4331 + SYS_RT_TGSIGQUEUEINFO = 4332 + SYS_PERF_EVENT_OPEN = 4333 + SYS_ACCEPT4 = 4334 + SYS_RECVMMSG = 4335 + SYS_FANOTIFY_INIT = 4336 + SYS_FANOTIFY_MARK = 4337 + SYS_PRLIMIT64 = 4338 + SYS_NAME_TO_HANDLE_AT = 4339 + SYS_OPEN_BY_HANDLE_AT = 4340 + SYS_CLOCK_ADJTIME = 4341 + SYS_SYNCFS = 4342 + SYS_SENDMMSG = 4343 + SYS_SETNS = 4344 + SYS_PROCESS_VM_READV = 4345 + SYS_PROCESS_VM_WRITEV = 4346 + SYS_KCMP = 4347 + SYS_FINIT_MODULE = 4348 + SYS_SCHED_SETATTR = 4349 + SYS_SCHED_GETATTR = 4350 + SYS_RENAMEAT2 = 4351 + SYS_SECCOMP = 4352 + SYS_GETRANDOM = 4353 + SYS_MEMFD_CREATE = 4354 + SYS_BPF = 4355 + SYS_EXECVEAT = 4356 + SYS_USERFAULTFD = 4357 + SYS_MEMBARRIER = 4358 + SYS_MLOCK2 = 4359 + SYS_COPY_FILE_RANGE = 4360 + SYS_PREADV2 = 4361 + SYS_PWRITEV2 = 4362 + SYS_PKEY_MPROTECT = 4363 + SYS_PKEY_ALLOC = 4364 + SYS_PKEY_FREE = 4365 + SYS_STATX = 4366 + SYS_RSEQ = 4367 + SYS_IO_PGETEVENTS = 4368 + SYS_SEMGET = 4393 + SYS_SEMCTL = 4394 + SYS_SHMGET = 4395 + SYS_SHMCTL = 4396 + SYS_SHMAT = 4397 + SYS_SHMDT = 4398 + SYS_MSGGET = 4399 + SYS_MSGSND = 4400 + SYS_MSGRCV = 4401 + SYS_MSGCTL = 4402 + SYS_CLOCK_GETTIME64 = 4403 + SYS_CLOCK_SETTIME64 = 4404 + SYS_CLOCK_ADJTIME64 = 4405 + SYS_CLOCK_GETRES_TIME64 = 4406 + SYS_CLOCK_NANOSLEEP_TIME64 = 4407 + SYS_TIMER_GETTIME64 = 4408 + SYS_TIMER_SETTIME64 = 4409 + SYS_TIMERFD_GETTIME64 = 4410 + SYS_TIMERFD_SETTIME64 = 4411 + SYS_UTIMENSAT_TIME64 = 4412 + SYS_PSELECT6_TIME64 = 4413 + SYS_PPOLL_TIME64 = 4414 + SYS_IO_PGETEVENTS_TIME64 = 4416 + SYS_RECVMMSG_TIME64 = 4417 + SYS_MQ_TIMEDSEND_TIME64 = 4418 + SYS_MQ_TIMEDRECEIVE_TIME64 = 4419 + SYS_SEMTIMEDOP_TIME64 = 4420 + SYS_RT_SIGTIMEDWAIT_TIME64 = 4421 + SYS_FUTEX_TIME64 = 4422 + SYS_SCHED_RR_GET_INTERVAL_TIME64 = 4423 + SYS_PIDFD_SEND_SIGNAL = 4424 + SYS_IO_URING_SETUP = 4425 + SYS_IO_URING_ENTER = 4426 + SYS_IO_URING_REGISTER = 4427 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 40164cacdf5c3..57ec82aac4a4d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -334,4 +334,8 @@ const ( SYS_STATX = 5326 SYS_RSEQ = 5327 SYS_IO_PGETEVENTS = 5328 + SYS_PIDFD_SEND_SIGNAL = 5424 + SYS_IO_URING_SETUP = 5425 + SYS_IO_URING_ENTER = 5426 + SYS_IO_URING_REGISTER = 5427 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index 8a909738bc0ef..825a3e3b02461 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -334,4 +334,8 @@ const ( SYS_STATX = 5326 SYS_RSEQ = 5327 SYS_IO_PGETEVENTS = 5328 + SYS_PIDFD_SEND_SIGNAL = 5424 + SYS_IO_URING_SETUP = 5425 + SYS_IO_URING_ENTER = 5426 + SYS_IO_URING_REGISTER = 5427 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index 8d78184224586..f152dfdd05414 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -6,372 +6,406 @@ package unix const ( - SYS_SYSCALL = 4000 - SYS_EXIT = 4001 - SYS_FORK = 4002 - SYS_READ = 4003 - SYS_WRITE = 4004 - SYS_OPEN = 4005 - SYS_CLOSE = 4006 - SYS_WAITPID = 4007 - SYS_CREAT = 4008 - SYS_LINK = 4009 - SYS_UNLINK = 4010 - SYS_EXECVE = 4011 - SYS_CHDIR = 4012 - SYS_TIME = 4013 - SYS_MKNOD = 4014 - SYS_CHMOD = 4015 - SYS_LCHOWN = 4016 - SYS_BREAK = 4017 - SYS_UNUSED18 = 4018 - SYS_LSEEK = 4019 - SYS_GETPID = 4020 - SYS_MOUNT = 4021 - SYS_UMOUNT = 4022 - SYS_SETUID = 4023 - SYS_GETUID = 4024 - SYS_STIME = 4025 - SYS_PTRACE = 4026 - SYS_ALARM = 4027 - SYS_UNUSED28 = 4028 - SYS_PAUSE = 4029 - SYS_UTIME = 4030 - SYS_STTY = 4031 - SYS_GTTY = 4032 - SYS_ACCESS = 4033 - SYS_NICE = 4034 - SYS_FTIME = 4035 - SYS_SYNC = 4036 - SYS_KILL = 4037 - SYS_RENAME = 4038 - SYS_MKDIR = 4039 - SYS_RMDIR = 4040 - SYS_DUP = 4041 - SYS_PIPE = 4042 - SYS_TIMES = 4043 - SYS_PROF = 4044 - SYS_BRK = 4045 - SYS_SETGID = 4046 - SYS_GETGID = 4047 - SYS_SIGNAL = 4048 - SYS_GETEUID = 4049 - SYS_GETEGID = 4050 - SYS_ACCT = 4051 - SYS_UMOUNT2 = 4052 - SYS_LOCK = 4053 - SYS_IOCTL = 4054 - SYS_FCNTL = 4055 - SYS_MPX = 4056 - SYS_SETPGID = 4057 - SYS_ULIMIT = 4058 - SYS_UNUSED59 = 4059 - SYS_UMASK = 4060 - SYS_CHROOT = 4061 - SYS_USTAT = 4062 - SYS_DUP2 = 4063 - SYS_GETPPID = 4064 - SYS_GETPGRP = 4065 - SYS_SETSID = 4066 - SYS_SIGACTION = 4067 - SYS_SGETMASK = 4068 - SYS_SSETMASK = 4069 - SYS_SETREUID = 4070 - SYS_SETREGID = 4071 - SYS_SIGSUSPEND = 4072 - SYS_SIGPENDING = 4073 - SYS_SETHOSTNAME = 4074 - SYS_SETRLIMIT = 4075 - SYS_GETRLIMIT = 4076 - SYS_GETRUSAGE = 4077 - SYS_GETTIMEOFDAY = 4078 - SYS_SETTIMEOFDAY = 4079 - SYS_GETGROUPS = 4080 - SYS_SETGROUPS = 4081 - SYS_RESERVED82 = 4082 - SYS_SYMLINK = 4083 - SYS_UNUSED84 = 4084 - SYS_READLINK = 4085 - SYS_USELIB = 4086 - SYS_SWAPON = 4087 - SYS_REBOOT = 4088 - SYS_READDIR = 4089 - SYS_MMAP = 4090 - SYS_MUNMAP = 4091 - SYS_TRUNCATE = 4092 - SYS_FTRUNCATE = 4093 - SYS_FCHMOD = 4094 - SYS_FCHOWN = 4095 - SYS_GETPRIORITY = 4096 - SYS_SETPRIORITY = 4097 - SYS_PROFIL = 4098 - SYS_STATFS = 4099 - SYS_FSTATFS = 4100 - SYS_IOPERM = 4101 - SYS_SOCKETCALL = 4102 - SYS_SYSLOG = 4103 - SYS_SETITIMER = 4104 - SYS_GETITIMER = 4105 - SYS_STAT = 4106 - SYS_LSTAT = 4107 - SYS_FSTAT = 4108 - SYS_UNUSED109 = 4109 - SYS_IOPL = 4110 - SYS_VHANGUP = 4111 - SYS_IDLE = 4112 - SYS_VM86 = 4113 - SYS_WAIT4 = 4114 - SYS_SWAPOFF = 4115 - SYS_SYSINFO = 4116 - SYS_IPC = 4117 - SYS_FSYNC = 4118 - SYS_SIGRETURN = 4119 - SYS_CLONE = 4120 - SYS_SETDOMAINNAME = 4121 - SYS_UNAME = 4122 - SYS_MODIFY_LDT = 4123 - SYS_ADJTIMEX = 4124 - SYS_MPROTECT = 4125 - SYS_SIGPROCMASK = 4126 - SYS_CREATE_MODULE = 4127 - SYS_INIT_MODULE = 4128 - SYS_DELETE_MODULE = 4129 - SYS_GET_KERNEL_SYMS = 4130 - SYS_QUOTACTL = 4131 - SYS_GETPGID = 4132 - SYS_FCHDIR = 4133 - SYS_BDFLUSH = 4134 - SYS_SYSFS = 4135 - SYS_PERSONALITY = 4136 - SYS_AFS_SYSCALL = 4137 - SYS_SETFSUID = 4138 - SYS_SETFSGID = 4139 - SYS__LLSEEK = 4140 - SYS_GETDENTS = 4141 - SYS__NEWSELECT = 4142 - SYS_FLOCK = 4143 - SYS_MSYNC = 4144 - SYS_READV = 4145 - SYS_WRITEV = 4146 - SYS_CACHEFLUSH = 4147 - SYS_CACHECTL = 4148 - SYS_SYSMIPS = 4149 - SYS_UNUSED150 = 4150 - SYS_GETSID = 4151 - SYS_FDATASYNC = 4152 - SYS__SYSCTL = 4153 - SYS_MLOCK = 4154 - SYS_MUNLOCK = 4155 - SYS_MLOCKALL = 4156 - SYS_MUNLOCKALL = 4157 - SYS_SCHED_SETPARAM = 4158 - SYS_SCHED_GETPARAM = 4159 - SYS_SCHED_SETSCHEDULER = 4160 - SYS_SCHED_GETSCHEDULER = 4161 - SYS_SCHED_YIELD = 4162 - SYS_SCHED_GET_PRIORITY_MAX = 4163 - SYS_SCHED_GET_PRIORITY_MIN = 4164 - SYS_SCHED_RR_GET_INTERVAL = 4165 - SYS_NANOSLEEP = 4166 - SYS_MREMAP = 4167 - SYS_ACCEPT = 4168 - SYS_BIND = 4169 - SYS_CONNECT = 4170 - SYS_GETPEERNAME = 4171 - SYS_GETSOCKNAME = 4172 - SYS_GETSOCKOPT = 4173 - SYS_LISTEN = 4174 - SYS_RECV = 4175 - SYS_RECVFROM = 4176 - SYS_RECVMSG = 4177 - SYS_SEND = 4178 - SYS_SENDMSG = 4179 - SYS_SENDTO = 4180 - SYS_SETSOCKOPT = 4181 - SYS_SHUTDOWN = 4182 - SYS_SOCKET = 4183 - SYS_SOCKETPAIR = 4184 - SYS_SETRESUID = 4185 - SYS_GETRESUID = 4186 - SYS_QUERY_MODULE = 4187 - SYS_POLL = 4188 - SYS_NFSSERVCTL = 4189 - SYS_SETRESGID = 4190 - SYS_GETRESGID = 4191 - SYS_PRCTL = 4192 - SYS_RT_SIGRETURN = 4193 - SYS_RT_SIGACTION = 4194 - SYS_RT_SIGPROCMASK = 4195 - SYS_RT_SIGPENDING = 4196 - SYS_RT_SIGTIMEDWAIT = 4197 - SYS_RT_SIGQUEUEINFO = 4198 - SYS_RT_SIGSUSPEND = 4199 - SYS_PREAD64 = 4200 - SYS_PWRITE64 = 4201 - SYS_CHOWN = 4202 - SYS_GETCWD = 4203 - SYS_CAPGET = 4204 - SYS_CAPSET = 4205 - SYS_SIGALTSTACK = 4206 - SYS_SENDFILE = 4207 - SYS_GETPMSG = 4208 - SYS_PUTPMSG = 4209 - SYS_MMAP2 = 4210 - SYS_TRUNCATE64 = 4211 - SYS_FTRUNCATE64 = 4212 - SYS_STAT64 = 4213 - SYS_LSTAT64 = 4214 - SYS_FSTAT64 = 4215 - SYS_PIVOT_ROOT = 4216 - SYS_MINCORE = 4217 - SYS_MADVISE = 4218 - SYS_GETDENTS64 = 4219 - SYS_FCNTL64 = 4220 - SYS_RESERVED221 = 4221 - SYS_GETTID = 4222 - SYS_READAHEAD = 4223 - SYS_SETXATTR = 4224 - SYS_LSETXATTR = 4225 - SYS_FSETXATTR = 4226 - SYS_GETXATTR = 4227 - SYS_LGETXATTR = 4228 - SYS_FGETXATTR = 4229 - SYS_LISTXATTR = 4230 - SYS_LLISTXATTR = 4231 - SYS_FLISTXATTR = 4232 - SYS_REMOVEXATTR = 4233 - SYS_LREMOVEXATTR = 4234 - SYS_FREMOVEXATTR = 4235 - SYS_TKILL = 4236 - SYS_SENDFILE64 = 4237 - SYS_FUTEX = 4238 - SYS_SCHED_SETAFFINITY = 4239 - SYS_SCHED_GETAFFINITY = 4240 - SYS_IO_SETUP = 4241 - SYS_IO_DESTROY = 4242 - SYS_IO_GETEVENTS = 4243 - SYS_IO_SUBMIT = 4244 - SYS_IO_CANCEL = 4245 - SYS_EXIT_GROUP = 4246 - SYS_LOOKUP_DCOOKIE = 4247 - SYS_EPOLL_CREATE = 4248 - SYS_EPOLL_CTL = 4249 - SYS_EPOLL_WAIT = 4250 - SYS_REMAP_FILE_PAGES = 4251 - SYS_SET_TID_ADDRESS = 4252 - SYS_RESTART_SYSCALL = 4253 - SYS_FADVISE64 = 4254 - SYS_STATFS64 = 4255 - SYS_FSTATFS64 = 4256 - SYS_TIMER_CREATE = 4257 - SYS_TIMER_SETTIME = 4258 - SYS_TIMER_GETTIME = 4259 - SYS_TIMER_GETOVERRUN = 4260 - SYS_TIMER_DELETE = 4261 - SYS_CLOCK_SETTIME = 4262 - SYS_CLOCK_GETTIME = 4263 - SYS_CLOCK_GETRES = 4264 - SYS_CLOCK_NANOSLEEP = 4265 - SYS_TGKILL = 4266 - SYS_UTIMES = 4267 - SYS_MBIND = 4268 - SYS_GET_MEMPOLICY = 4269 - SYS_SET_MEMPOLICY = 4270 - SYS_MQ_OPEN = 4271 - SYS_MQ_UNLINK = 4272 - SYS_MQ_TIMEDSEND = 4273 - SYS_MQ_TIMEDRECEIVE = 4274 - SYS_MQ_NOTIFY = 4275 - SYS_MQ_GETSETATTR = 4276 - SYS_VSERVER = 4277 - SYS_WAITID = 4278 - SYS_ADD_KEY = 4280 - SYS_REQUEST_KEY = 4281 - SYS_KEYCTL = 4282 - SYS_SET_THREAD_AREA = 4283 - SYS_INOTIFY_INIT = 4284 - SYS_INOTIFY_ADD_WATCH = 4285 - SYS_INOTIFY_RM_WATCH = 4286 - SYS_MIGRATE_PAGES = 4287 - SYS_OPENAT = 4288 - SYS_MKDIRAT = 4289 - SYS_MKNODAT = 4290 - SYS_FCHOWNAT = 4291 - SYS_FUTIMESAT = 4292 - SYS_FSTATAT64 = 4293 - SYS_UNLINKAT = 4294 - SYS_RENAMEAT = 4295 - SYS_LINKAT = 4296 - SYS_SYMLINKAT = 4297 - SYS_READLINKAT = 4298 - SYS_FCHMODAT = 4299 - SYS_FACCESSAT = 4300 - SYS_PSELECT6 = 4301 - SYS_PPOLL = 4302 - SYS_UNSHARE = 4303 - SYS_SPLICE = 4304 - SYS_SYNC_FILE_RANGE = 4305 - SYS_TEE = 4306 - SYS_VMSPLICE = 4307 - SYS_MOVE_PAGES = 4308 - SYS_SET_ROBUST_LIST = 4309 - SYS_GET_ROBUST_LIST = 4310 - SYS_KEXEC_LOAD = 4311 - SYS_GETCPU = 4312 - SYS_EPOLL_PWAIT = 4313 - SYS_IOPRIO_SET = 4314 - SYS_IOPRIO_GET = 4315 - SYS_UTIMENSAT = 4316 - SYS_SIGNALFD = 4317 - SYS_TIMERFD = 4318 - SYS_EVENTFD = 4319 - SYS_FALLOCATE = 4320 - SYS_TIMERFD_CREATE = 4321 - SYS_TIMERFD_GETTIME = 4322 - SYS_TIMERFD_SETTIME = 4323 - SYS_SIGNALFD4 = 4324 - SYS_EVENTFD2 = 4325 - SYS_EPOLL_CREATE1 = 4326 - SYS_DUP3 = 4327 - SYS_PIPE2 = 4328 - SYS_INOTIFY_INIT1 = 4329 - SYS_PREADV = 4330 - SYS_PWRITEV = 4331 - SYS_RT_TGSIGQUEUEINFO = 4332 - SYS_PERF_EVENT_OPEN = 4333 - SYS_ACCEPT4 = 4334 - SYS_RECVMMSG = 4335 - SYS_FANOTIFY_INIT = 4336 - SYS_FANOTIFY_MARK = 4337 - SYS_PRLIMIT64 = 4338 - SYS_NAME_TO_HANDLE_AT = 4339 - SYS_OPEN_BY_HANDLE_AT = 4340 - SYS_CLOCK_ADJTIME = 4341 - SYS_SYNCFS = 4342 - SYS_SENDMMSG = 4343 - SYS_SETNS = 4344 - SYS_PROCESS_VM_READV = 4345 - SYS_PROCESS_VM_WRITEV = 4346 - SYS_KCMP = 4347 - SYS_FINIT_MODULE = 4348 - SYS_SCHED_SETATTR = 4349 - SYS_SCHED_GETATTR = 4350 - SYS_RENAMEAT2 = 4351 - SYS_SECCOMP = 4352 - SYS_GETRANDOM = 4353 - SYS_MEMFD_CREATE = 4354 - SYS_BPF = 4355 - SYS_EXECVEAT = 4356 - SYS_USERFAULTFD = 4357 - SYS_MEMBARRIER = 4358 - SYS_MLOCK2 = 4359 - SYS_COPY_FILE_RANGE = 4360 - SYS_PREADV2 = 4361 - SYS_PWRITEV2 = 4362 - SYS_PKEY_MPROTECT = 4363 - SYS_PKEY_ALLOC = 4364 - SYS_PKEY_FREE = 4365 - SYS_STATX = 4366 - SYS_RSEQ = 4367 - SYS_IO_PGETEVENTS = 4368 + SYS_SYSCALL = 4000 + SYS_EXIT = 4001 + SYS_FORK = 4002 + SYS_READ = 4003 + SYS_WRITE = 4004 + SYS_OPEN = 4005 + SYS_CLOSE = 4006 + SYS_WAITPID = 4007 + SYS_CREAT = 4008 + SYS_LINK = 4009 + SYS_UNLINK = 4010 + SYS_EXECVE = 4011 + SYS_CHDIR = 4012 + SYS_TIME = 4013 + SYS_MKNOD = 4014 + SYS_CHMOD = 4015 + SYS_LCHOWN = 4016 + SYS_BREAK = 4017 + SYS_UNUSED18 = 4018 + SYS_LSEEK = 4019 + SYS_GETPID = 4020 + SYS_MOUNT = 4021 + SYS_UMOUNT = 4022 + SYS_SETUID = 4023 + SYS_GETUID = 4024 + SYS_STIME = 4025 + SYS_PTRACE = 4026 + SYS_ALARM = 4027 + SYS_UNUSED28 = 4028 + SYS_PAUSE = 4029 + SYS_UTIME = 4030 + SYS_STTY = 4031 + SYS_GTTY = 4032 + SYS_ACCESS = 4033 + SYS_NICE = 4034 + SYS_FTIME = 4035 + SYS_SYNC = 4036 + SYS_KILL = 4037 + SYS_RENAME = 4038 + SYS_MKDIR = 4039 + SYS_RMDIR = 4040 + SYS_DUP = 4041 + SYS_PIPE = 4042 + SYS_TIMES = 4043 + SYS_PROF = 4044 + SYS_BRK = 4045 + SYS_SETGID = 4046 + SYS_GETGID = 4047 + SYS_SIGNAL = 4048 + SYS_GETEUID = 4049 + SYS_GETEGID = 4050 + SYS_ACCT = 4051 + SYS_UMOUNT2 = 4052 + SYS_LOCK = 4053 + SYS_IOCTL = 4054 + SYS_FCNTL = 4055 + SYS_MPX = 4056 + SYS_SETPGID = 4057 + SYS_ULIMIT = 4058 + SYS_UNUSED59 = 4059 + SYS_UMASK = 4060 + SYS_CHROOT = 4061 + SYS_USTAT = 4062 + SYS_DUP2 = 4063 + SYS_GETPPID = 4064 + SYS_GETPGRP = 4065 + SYS_SETSID = 4066 + SYS_SIGACTION = 4067 + SYS_SGETMASK = 4068 + SYS_SSETMASK = 4069 + SYS_SETREUID = 4070 + SYS_SETREGID = 4071 + SYS_SIGSUSPEND = 4072 + SYS_SIGPENDING = 4073 + SYS_SETHOSTNAME = 4074 + SYS_SETRLIMIT = 4075 + SYS_GETRLIMIT = 4076 + SYS_GETRUSAGE = 4077 + SYS_GETTIMEOFDAY = 4078 + SYS_SETTIMEOFDAY = 4079 + SYS_GETGROUPS = 4080 + SYS_SETGROUPS = 4081 + SYS_RESERVED82 = 4082 + SYS_SYMLINK = 4083 + SYS_UNUSED84 = 4084 + SYS_READLINK = 4085 + SYS_USELIB = 4086 + SYS_SWAPON = 4087 + SYS_REBOOT = 4088 + SYS_READDIR = 4089 + SYS_MMAP = 4090 + SYS_MUNMAP = 4091 + SYS_TRUNCATE = 4092 + SYS_FTRUNCATE = 4093 + SYS_FCHMOD = 4094 + SYS_FCHOWN = 4095 + SYS_GETPRIORITY = 4096 + SYS_SETPRIORITY = 4097 + SYS_PROFIL = 4098 + SYS_STATFS = 4099 + SYS_FSTATFS = 4100 + SYS_IOPERM = 4101 + SYS_SOCKETCALL = 4102 + SYS_SYSLOG = 4103 + SYS_SETITIMER = 4104 + SYS_GETITIMER = 4105 + SYS_STAT = 4106 + SYS_LSTAT = 4107 + SYS_FSTAT = 4108 + SYS_UNUSED109 = 4109 + SYS_IOPL = 4110 + SYS_VHANGUP = 4111 + SYS_IDLE = 4112 + SYS_VM86 = 4113 + SYS_WAIT4 = 4114 + SYS_SWAPOFF = 4115 + SYS_SYSINFO = 4116 + SYS_IPC = 4117 + SYS_FSYNC = 4118 + SYS_SIGRETURN = 4119 + SYS_CLONE = 4120 + SYS_SETDOMAINNAME = 4121 + SYS_UNAME = 4122 + SYS_MODIFY_LDT = 4123 + SYS_ADJTIMEX = 4124 + SYS_MPROTECT = 4125 + SYS_SIGPROCMASK = 4126 + SYS_CREATE_MODULE = 4127 + SYS_INIT_MODULE = 4128 + SYS_DELETE_MODULE = 4129 + SYS_GET_KERNEL_SYMS = 4130 + SYS_QUOTACTL = 4131 + SYS_GETPGID = 4132 + SYS_FCHDIR = 4133 + SYS_BDFLUSH = 4134 + SYS_SYSFS = 4135 + SYS_PERSONALITY = 4136 + SYS_AFS_SYSCALL = 4137 + SYS_SETFSUID = 4138 + SYS_SETFSGID = 4139 + SYS__LLSEEK = 4140 + SYS_GETDENTS = 4141 + SYS__NEWSELECT = 4142 + SYS_FLOCK = 4143 + SYS_MSYNC = 4144 + SYS_READV = 4145 + SYS_WRITEV = 4146 + SYS_CACHEFLUSH = 4147 + SYS_CACHECTL = 4148 + SYS_SYSMIPS = 4149 + SYS_UNUSED150 = 4150 + SYS_GETSID = 4151 + SYS_FDATASYNC = 4152 + SYS__SYSCTL = 4153 + SYS_MLOCK = 4154 + SYS_MUNLOCK = 4155 + SYS_MLOCKALL = 4156 + SYS_MUNLOCKALL = 4157 + SYS_SCHED_SETPARAM = 4158 + SYS_SCHED_GETPARAM = 4159 + SYS_SCHED_SETSCHEDULER = 4160 + SYS_SCHED_GETSCHEDULER = 4161 + SYS_SCHED_YIELD = 4162 + SYS_SCHED_GET_PRIORITY_MAX = 4163 + SYS_SCHED_GET_PRIORITY_MIN = 4164 + SYS_SCHED_RR_GET_INTERVAL = 4165 + SYS_NANOSLEEP = 4166 + SYS_MREMAP = 4167 + SYS_ACCEPT = 4168 + SYS_BIND = 4169 + SYS_CONNECT = 4170 + SYS_GETPEERNAME = 4171 + SYS_GETSOCKNAME = 4172 + SYS_GETSOCKOPT = 4173 + SYS_LISTEN = 4174 + SYS_RECV = 4175 + SYS_RECVFROM = 4176 + SYS_RECVMSG = 4177 + SYS_SEND = 4178 + SYS_SENDMSG = 4179 + SYS_SENDTO = 4180 + SYS_SETSOCKOPT = 4181 + SYS_SHUTDOWN = 4182 + SYS_SOCKET = 4183 + SYS_SOCKETPAIR = 4184 + SYS_SETRESUID = 4185 + SYS_GETRESUID = 4186 + SYS_QUERY_MODULE = 4187 + SYS_POLL = 4188 + SYS_NFSSERVCTL = 4189 + SYS_SETRESGID = 4190 + SYS_GETRESGID = 4191 + SYS_PRCTL = 4192 + SYS_RT_SIGRETURN = 4193 + SYS_RT_SIGACTION = 4194 + SYS_RT_SIGPROCMASK = 4195 + SYS_RT_SIGPENDING = 4196 + SYS_RT_SIGTIMEDWAIT = 4197 + SYS_RT_SIGQUEUEINFO = 4198 + SYS_RT_SIGSUSPEND = 4199 + SYS_PREAD64 = 4200 + SYS_PWRITE64 = 4201 + SYS_CHOWN = 4202 + SYS_GETCWD = 4203 + SYS_CAPGET = 4204 + SYS_CAPSET = 4205 + SYS_SIGALTSTACK = 4206 + SYS_SENDFILE = 4207 + SYS_GETPMSG = 4208 + SYS_PUTPMSG = 4209 + SYS_MMAP2 = 4210 + SYS_TRUNCATE64 = 4211 + SYS_FTRUNCATE64 = 4212 + SYS_STAT64 = 4213 + SYS_LSTAT64 = 4214 + SYS_FSTAT64 = 4215 + SYS_PIVOT_ROOT = 4216 + SYS_MINCORE = 4217 + SYS_MADVISE = 4218 + SYS_GETDENTS64 = 4219 + SYS_FCNTL64 = 4220 + SYS_RESERVED221 = 4221 + SYS_GETTID = 4222 + SYS_READAHEAD = 4223 + SYS_SETXATTR = 4224 + SYS_LSETXATTR = 4225 + SYS_FSETXATTR = 4226 + SYS_GETXATTR = 4227 + SYS_LGETXATTR = 4228 + SYS_FGETXATTR = 4229 + SYS_LISTXATTR = 4230 + SYS_LLISTXATTR = 4231 + SYS_FLISTXATTR = 4232 + SYS_REMOVEXATTR = 4233 + SYS_LREMOVEXATTR = 4234 + SYS_FREMOVEXATTR = 4235 + SYS_TKILL = 4236 + SYS_SENDFILE64 = 4237 + SYS_FUTEX = 4238 + SYS_SCHED_SETAFFINITY = 4239 + SYS_SCHED_GETAFFINITY = 4240 + SYS_IO_SETUP = 4241 + SYS_IO_DESTROY = 4242 + SYS_IO_GETEVENTS = 4243 + SYS_IO_SUBMIT = 4244 + SYS_IO_CANCEL = 4245 + SYS_EXIT_GROUP = 4246 + SYS_LOOKUP_DCOOKIE = 4247 + SYS_EPOLL_CREATE = 4248 + SYS_EPOLL_CTL = 4249 + SYS_EPOLL_WAIT = 4250 + SYS_REMAP_FILE_PAGES = 4251 + SYS_SET_TID_ADDRESS = 4252 + SYS_RESTART_SYSCALL = 4253 + SYS_FADVISE64 = 4254 + SYS_STATFS64 = 4255 + SYS_FSTATFS64 = 4256 + SYS_TIMER_CREATE = 4257 + SYS_TIMER_SETTIME = 4258 + SYS_TIMER_GETTIME = 4259 + SYS_TIMER_GETOVERRUN = 4260 + SYS_TIMER_DELETE = 4261 + SYS_CLOCK_SETTIME = 4262 + SYS_CLOCK_GETTIME = 4263 + SYS_CLOCK_GETRES = 4264 + SYS_CLOCK_NANOSLEEP = 4265 + SYS_TGKILL = 4266 + SYS_UTIMES = 4267 + SYS_MBIND = 4268 + SYS_GET_MEMPOLICY = 4269 + SYS_SET_MEMPOLICY = 4270 + SYS_MQ_OPEN = 4271 + SYS_MQ_UNLINK = 4272 + SYS_MQ_TIMEDSEND = 4273 + SYS_MQ_TIMEDRECEIVE = 4274 + SYS_MQ_NOTIFY = 4275 + SYS_MQ_GETSETATTR = 4276 + SYS_VSERVER = 4277 + SYS_WAITID = 4278 + SYS_ADD_KEY = 4280 + SYS_REQUEST_KEY = 4281 + SYS_KEYCTL = 4282 + SYS_SET_THREAD_AREA = 4283 + SYS_INOTIFY_INIT = 4284 + SYS_INOTIFY_ADD_WATCH = 4285 + SYS_INOTIFY_RM_WATCH = 4286 + SYS_MIGRATE_PAGES = 4287 + SYS_OPENAT = 4288 + SYS_MKDIRAT = 4289 + SYS_MKNODAT = 4290 + SYS_FCHOWNAT = 4291 + SYS_FUTIMESAT = 4292 + SYS_FSTATAT64 = 4293 + SYS_UNLINKAT = 4294 + SYS_RENAMEAT = 4295 + SYS_LINKAT = 4296 + SYS_SYMLINKAT = 4297 + SYS_READLINKAT = 4298 + SYS_FCHMODAT = 4299 + SYS_FACCESSAT = 4300 + SYS_PSELECT6 = 4301 + SYS_PPOLL = 4302 + SYS_UNSHARE = 4303 + SYS_SPLICE = 4304 + SYS_SYNC_FILE_RANGE = 4305 + SYS_TEE = 4306 + SYS_VMSPLICE = 4307 + SYS_MOVE_PAGES = 4308 + SYS_SET_ROBUST_LIST = 4309 + SYS_GET_ROBUST_LIST = 4310 + SYS_KEXEC_LOAD = 4311 + SYS_GETCPU = 4312 + SYS_EPOLL_PWAIT = 4313 + SYS_IOPRIO_SET = 4314 + SYS_IOPRIO_GET = 4315 + SYS_UTIMENSAT = 4316 + SYS_SIGNALFD = 4317 + SYS_TIMERFD = 4318 + SYS_EVENTFD = 4319 + SYS_FALLOCATE = 4320 + SYS_TIMERFD_CREATE = 4321 + SYS_TIMERFD_GETTIME = 4322 + SYS_TIMERFD_SETTIME = 4323 + SYS_SIGNALFD4 = 4324 + SYS_EVENTFD2 = 4325 + SYS_EPOLL_CREATE1 = 4326 + SYS_DUP3 = 4327 + SYS_PIPE2 = 4328 + SYS_INOTIFY_INIT1 = 4329 + SYS_PREADV = 4330 + SYS_PWRITEV = 4331 + SYS_RT_TGSIGQUEUEINFO = 4332 + SYS_PERF_EVENT_OPEN = 4333 + SYS_ACCEPT4 = 4334 + SYS_RECVMMSG = 4335 + SYS_FANOTIFY_INIT = 4336 + SYS_FANOTIFY_MARK = 4337 + SYS_PRLIMIT64 = 4338 + SYS_NAME_TO_HANDLE_AT = 4339 + SYS_OPEN_BY_HANDLE_AT = 4340 + SYS_CLOCK_ADJTIME = 4341 + SYS_SYNCFS = 4342 + SYS_SENDMMSG = 4343 + SYS_SETNS = 4344 + SYS_PROCESS_VM_READV = 4345 + SYS_PROCESS_VM_WRITEV = 4346 + SYS_KCMP = 4347 + SYS_FINIT_MODULE = 4348 + SYS_SCHED_SETATTR = 4349 + SYS_SCHED_GETATTR = 4350 + SYS_RENAMEAT2 = 4351 + SYS_SECCOMP = 4352 + SYS_GETRANDOM = 4353 + SYS_MEMFD_CREATE = 4354 + SYS_BPF = 4355 + SYS_EXECVEAT = 4356 + SYS_USERFAULTFD = 4357 + SYS_MEMBARRIER = 4358 + SYS_MLOCK2 = 4359 + SYS_COPY_FILE_RANGE = 4360 + SYS_PREADV2 = 4361 + SYS_PWRITEV2 = 4362 + SYS_PKEY_MPROTECT = 4363 + SYS_PKEY_ALLOC = 4364 + SYS_PKEY_FREE = 4365 + SYS_STATX = 4366 + SYS_RSEQ = 4367 + SYS_IO_PGETEVENTS = 4368 + SYS_SEMGET = 4393 + SYS_SEMCTL = 4394 + SYS_SHMGET = 4395 + SYS_SHMCTL = 4396 + SYS_SHMAT = 4397 + SYS_SHMDT = 4398 + SYS_MSGGET = 4399 + SYS_MSGSND = 4400 + SYS_MSGRCV = 4401 + SYS_MSGCTL = 4402 + SYS_CLOCK_GETTIME64 = 4403 + SYS_CLOCK_SETTIME64 = 4404 + SYS_CLOCK_ADJTIME64 = 4405 + SYS_CLOCK_GETRES_TIME64 = 4406 + SYS_CLOCK_NANOSLEEP_TIME64 = 4407 + SYS_TIMER_GETTIME64 = 4408 + SYS_TIMER_SETTIME64 = 4409 + SYS_TIMERFD_GETTIME64 = 4410 + SYS_TIMERFD_SETTIME64 = 4411 + SYS_UTIMENSAT_TIME64 = 4412 + SYS_PSELECT6_TIME64 = 4413 + SYS_PPOLL_TIME64 = 4414 + SYS_IO_PGETEVENTS_TIME64 = 4416 + SYS_RECVMMSG_TIME64 = 4417 + SYS_MQ_TIMEDSEND_TIME64 = 4418 + SYS_MQ_TIMEDRECEIVE_TIME64 = 4419 + SYS_SEMTIMEDOP_TIME64 = 4420 + SYS_RT_SIGTIMEDWAIT_TIME64 = 4421 + SYS_FUTEX_TIME64 = 4422 + SYS_SCHED_RR_GET_INTERVAL_TIME64 = 4423 + SYS_PIDFD_SEND_SIGNAL = 4424 + SYS_IO_URING_SETUP = 4425 + SYS_IO_URING_ENTER = 4426 + SYS_IO_URING_REGISTER = 4427 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index ec5bde3d56345..7cbe78b196ea0 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -372,4 +372,19 @@ const ( SYS_PKEY_MPROTECT = 386 SYS_RSEQ = 387 SYS_IO_PGETEVENTS = 388 + SYS_SEMTIMEDOP = 392 + SYS_SEMGET = 393 + SYS_SEMCTL = 394 + SYS_SHMGET = 395 + SYS_SHMCTL = 396 + SYS_SHMAT = 397 + SYS_SHMDT = 398 + SYS_MSGGET = 399 + SYS_MSGSND = 400 + SYS_MSGRCV = 401 + SYS_MSGCTL = 402 + SYS_PIDFD_SEND_SIGNAL = 424 + SYS_IO_URING_SETUP = 425 + SYS_IO_URING_ENTER = 426 + SYS_IO_URING_REGISTER = 427 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index bdbabdbcdb179..51a2f1236ae2e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -372,4 +372,19 @@ const ( SYS_PKEY_MPROTECT = 386 SYS_RSEQ = 387 SYS_IO_PGETEVENTS = 388 + SYS_SEMTIMEDOP = 392 + SYS_SEMGET = 393 + SYS_SEMCTL = 394 + SYS_SHMGET = 395 + SYS_SHMCTL = 396 + SYS_SHMAT = 397 + SYS_SHMDT = 398 + SYS_MSGGET = 399 + SYS_MSGSND = 400 + SYS_MSGRCV = 401 + SYS_MSGCTL = 402 + SYS_PIDFD_SEND_SIGNAL = 424 + SYS_IO_URING_SETUP = 425 + SYS_IO_URING_ENTER = 426 + SYS_IO_URING_REGISTER = 427 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 2c8c46a2fc15b..323432ae3011b 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -285,4 +285,8 @@ const ( SYS_IO_PGETEVENTS = 292 SYS_RSEQ = 293 SYS_KEXEC_FILE_LOAD = 294 + SYS_PIDFD_SEND_SIGNAL = 424 + SYS_IO_URING_SETUP = 425 + SYS_IO_URING_ENTER = 426 + SYS_IO_URING_REGISTER = 427 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index 6eb7c257f8caf..9dca974849b92 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -334,4 +334,22 @@ const ( SYS_KEXEC_FILE_LOAD = 381 SYS_IO_PGETEVENTS = 382 SYS_RSEQ = 383 + SYS_PKEY_MPROTECT = 384 + SYS_PKEY_ALLOC = 385 + SYS_PKEY_FREE = 386 + SYS_SEMTIMEDOP = 392 + SYS_SEMGET = 393 + SYS_SEMCTL = 394 + SYS_SHMGET = 395 + SYS_SHMCTL = 396 + SYS_SHMAT = 397 + SYS_SHMDT = 398 + SYS_MSGGET = 399 + SYS_MSGSND = 400 + SYS_MSGRCV = 401 + SYS_MSGCTL = 402 + SYS_PIDFD_SEND_SIGNAL = 424 + SYS_IO_URING_SETUP = 425 + SYS_IO_URING_ENTER = 426 + SYS_IO_URING_REGISTER = 427 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 6ed306370a5e5..d3da46f0de738 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -348,4 +348,23 @@ const ( SYS_PWRITEV2 = 359 SYS_STATX = 360 SYS_IO_PGETEVENTS = 361 + SYS_PKEY_MPROTECT = 362 + SYS_PKEY_ALLOC = 363 + SYS_PKEY_FREE = 364 + SYS_RSEQ = 365 + SYS_SEMTIMEDOP = 392 + SYS_SEMGET = 393 + SYS_SEMCTL = 394 + SYS_SHMGET = 395 + SYS_SHMCTL = 396 + SYS_SHMAT = 397 + SYS_SHMDT = 398 + SYS_MSGGET = 399 + SYS_MSGSND = 400 + SYS_MSGRCV = 401 + SYS_MSGCTL = 402 + SYS_PIDFD_SEND_SIGNAL = 424 + SYS_IO_URING_SETUP = 425 + SYS_IO_URING_ENTER = 426 + SYS_IO_URING_REGISTER = 427 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go new file mode 100644 index 0000000000000..fe2b689b6375b --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go @@ -0,0 +1,217 @@ +// go run mksysnum.go https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build arm64,openbsd + +package unix + +const ( + SYS_EXIT = 1 // { void sys_exit(int rval); } + SYS_FORK = 2 // { int sys_fork(void); } + SYS_READ = 3 // { ssize_t sys_read(int fd, void *buf, size_t nbyte); } + SYS_WRITE = 4 // { ssize_t sys_write(int fd, const void *buf, size_t nbyte); } + SYS_OPEN = 5 // { int sys_open(const char *path, int flags, ... mode_t mode); } + SYS_CLOSE = 6 // { int sys_close(int fd); } + SYS_GETENTROPY = 7 // { int sys_getentropy(void *buf, size_t nbyte); } + SYS___TFORK = 8 // { int sys___tfork(const struct __tfork *param, size_t psize); } + SYS_LINK = 9 // { int sys_link(const char *path, const char *link); } + SYS_UNLINK = 10 // { int sys_unlink(const char *path); } + SYS_WAIT4 = 11 // { pid_t sys_wait4(pid_t pid, int *status, int options, struct rusage *rusage); } + SYS_CHDIR = 12 // { int sys_chdir(const char *path); } + SYS_FCHDIR = 13 // { int sys_fchdir(int fd); } + SYS_MKNOD = 14 // { int sys_mknod(const char *path, mode_t mode, dev_t dev); } + SYS_CHMOD = 15 // { int sys_chmod(const char *path, mode_t mode); } + SYS_CHOWN = 16 // { int sys_chown(const char *path, uid_t uid, gid_t gid); } + SYS_OBREAK = 17 // { int sys_obreak(char *nsize); } break + SYS_GETDTABLECOUNT = 18 // { int sys_getdtablecount(void); } + SYS_GETRUSAGE = 19 // { int sys_getrusage(int who, struct rusage *rusage); } + SYS_GETPID = 20 // { pid_t sys_getpid(void); } + SYS_MOUNT = 21 // { int sys_mount(const char *type, const char *path, int flags, void *data); } + SYS_UNMOUNT = 22 // { int sys_unmount(const char *path, int flags); } + SYS_SETUID = 23 // { int sys_setuid(uid_t uid); } + SYS_GETUID = 24 // { uid_t sys_getuid(void); } + SYS_GETEUID = 25 // { uid_t sys_geteuid(void); } + SYS_PTRACE = 26 // { int sys_ptrace(int req, pid_t pid, caddr_t addr, int data); } + SYS_RECVMSG = 27 // { ssize_t sys_recvmsg(int s, struct msghdr *msg, int flags); } + SYS_SENDMSG = 28 // { ssize_t sys_sendmsg(int s, const struct msghdr *msg, int flags); } + SYS_RECVFROM = 29 // { ssize_t sys_recvfrom(int s, void *buf, size_t len, int flags, struct sockaddr *from, socklen_t *fromlenaddr); } + SYS_ACCEPT = 30 // { int sys_accept(int s, struct sockaddr *name, socklen_t *anamelen); } + SYS_GETPEERNAME = 31 // { int sys_getpeername(int fdes, struct sockaddr *asa, socklen_t *alen); } + SYS_GETSOCKNAME = 32 // { int sys_getsockname(int fdes, struct sockaddr *asa, socklen_t *alen); } + SYS_ACCESS = 33 // { int sys_access(const char *path, int amode); } + SYS_CHFLAGS = 34 // { int sys_chflags(const char *path, u_int flags); } + SYS_FCHFLAGS = 35 // { int sys_fchflags(int fd, u_int flags); } + SYS_SYNC = 36 // { void sys_sync(void); } + SYS_STAT = 38 // { int sys_stat(const char *path, struct stat *ub); } + SYS_GETPPID = 39 // { pid_t sys_getppid(void); } + SYS_LSTAT = 40 // { int sys_lstat(const char *path, struct stat *ub); } + SYS_DUP = 41 // { int sys_dup(int fd); } + SYS_FSTATAT = 42 // { int sys_fstatat(int fd, const char *path, struct stat *buf, int flag); } + SYS_GETEGID = 43 // { gid_t sys_getegid(void); } + SYS_PROFIL = 44 // { int sys_profil(caddr_t samples, size_t size, u_long offset, u_int scale); } + SYS_KTRACE = 45 // { int sys_ktrace(const char *fname, int ops, int facs, pid_t pid); } + SYS_SIGACTION = 46 // { int sys_sigaction(int signum, const struct sigaction *nsa, struct sigaction *osa); } + SYS_GETGID = 47 // { gid_t sys_getgid(void); } + SYS_SIGPROCMASK = 48 // { int sys_sigprocmask(int how, sigset_t mask); } + SYS_SETLOGIN = 50 // { int sys_setlogin(const char *namebuf); } + SYS_ACCT = 51 // { int sys_acct(const char *path); } + SYS_SIGPENDING = 52 // { int sys_sigpending(void); } + SYS_FSTAT = 53 // { int sys_fstat(int fd, struct stat *sb); } + SYS_IOCTL = 54 // { int sys_ioctl(int fd, u_long com, ... void *data); } + SYS_REBOOT = 55 // { int sys_reboot(int opt); } + SYS_REVOKE = 56 // { int sys_revoke(const char *path); } + SYS_SYMLINK = 57 // { int sys_symlink(const char *path, const char *link); } + SYS_READLINK = 58 // { ssize_t sys_readlink(const char *path, char *buf, size_t count); } + SYS_EXECVE = 59 // { int sys_execve(const char *path, char * const *argp, char * const *envp); } + SYS_UMASK = 60 // { mode_t sys_umask(mode_t newmask); } + SYS_CHROOT = 61 // { int sys_chroot(const char *path); } + SYS_GETFSSTAT = 62 // { int sys_getfsstat(struct statfs *buf, size_t bufsize, int flags); } + SYS_STATFS = 63 // { int sys_statfs(const char *path, struct statfs *buf); } + SYS_FSTATFS = 64 // { int sys_fstatfs(int fd, struct statfs *buf); } + SYS_FHSTATFS = 65 // { int sys_fhstatfs(const fhandle_t *fhp, struct statfs *buf); } + SYS_VFORK = 66 // { int sys_vfork(void); } + SYS_GETTIMEOFDAY = 67 // { int sys_gettimeofday(struct timeval *tp, struct timezone *tzp); } + SYS_SETTIMEOFDAY = 68 // { int sys_settimeofday(const struct timeval *tv, const struct timezone *tzp); } + SYS_SETITIMER = 69 // { int sys_setitimer(int which, const struct itimerval *itv, struct itimerval *oitv); } + SYS_GETITIMER = 70 // { int sys_getitimer(int which, struct itimerval *itv); } + SYS_SELECT = 71 // { int sys_select(int nd, fd_set *in, fd_set *ou, fd_set *ex, struct timeval *tv); } + SYS_KEVENT = 72 // { int sys_kevent(int fd, const struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } + SYS_MUNMAP = 73 // { int sys_munmap(void *addr, size_t len); } + SYS_MPROTECT = 74 // { int sys_mprotect(void *addr, size_t len, int prot); } + SYS_MADVISE = 75 // { int sys_madvise(void *addr, size_t len, int behav); } + SYS_UTIMES = 76 // { int sys_utimes(const char *path, const struct timeval *tptr); } + SYS_FUTIMES = 77 // { int sys_futimes(int fd, const struct timeval *tptr); } + SYS_GETGROUPS = 79 // { int sys_getgroups(int gidsetsize, gid_t *gidset); } + SYS_SETGROUPS = 80 // { int sys_setgroups(int gidsetsize, const gid_t *gidset); } + SYS_GETPGRP = 81 // { int sys_getpgrp(void); } + SYS_SETPGID = 82 // { int sys_setpgid(pid_t pid, pid_t pgid); } + SYS_FUTEX = 83 // { int sys_futex(uint32_t *f, int op, int val, const struct timespec *timeout, uint32_t *g); } + SYS_UTIMENSAT = 84 // { int sys_utimensat(int fd, const char *path, const struct timespec *times, int flag); } + SYS_FUTIMENS = 85 // { int sys_futimens(int fd, const struct timespec *times); } + SYS_KBIND = 86 // { int sys_kbind(const struct __kbind *param, size_t psize, int64_t proc_cookie); } + SYS_CLOCK_GETTIME = 87 // { int sys_clock_gettime(clockid_t clock_id, struct timespec *tp); } + SYS_CLOCK_SETTIME = 88 // { int sys_clock_settime(clockid_t clock_id, const struct timespec *tp); } + SYS_CLOCK_GETRES = 89 // { int sys_clock_getres(clockid_t clock_id, struct timespec *tp); } + SYS_DUP2 = 90 // { int sys_dup2(int from, int to); } + SYS_NANOSLEEP = 91 // { int sys_nanosleep(const struct timespec *rqtp, struct timespec *rmtp); } + SYS_FCNTL = 92 // { int sys_fcntl(int fd, int cmd, ... void *arg); } + SYS_ACCEPT4 = 93 // { int sys_accept4(int s, struct sockaddr *name, socklen_t *anamelen, int flags); } + SYS___THRSLEEP = 94 // { int sys___thrsleep(const volatile void *ident, clockid_t clock_id, const struct timespec *tp, void *lock, const int *abort); } + SYS_FSYNC = 95 // { int sys_fsync(int fd); } + SYS_SETPRIORITY = 96 // { int sys_setpriority(int which, id_t who, int prio); } + SYS_SOCKET = 97 // { int sys_socket(int domain, int type, int protocol); } + SYS_CONNECT = 98 // { int sys_connect(int s, const struct sockaddr *name, socklen_t namelen); } + SYS_GETDENTS = 99 // { int sys_getdents(int fd, void *buf, size_t buflen); } + SYS_GETPRIORITY = 100 // { int sys_getpriority(int which, id_t who); } + SYS_PIPE2 = 101 // { int sys_pipe2(int *fdp, int flags); } + SYS_DUP3 = 102 // { int sys_dup3(int from, int to, int flags); } + SYS_SIGRETURN = 103 // { int sys_sigreturn(struct sigcontext *sigcntxp); } + SYS_BIND = 104 // { int sys_bind(int s, const struct sockaddr *name, socklen_t namelen); } + SYS_SETSOCKOPT = 105 // { int sys_setsockopt(int s, int level, int name, const void *val, socklen_t valsize); } + SYS_LISTEN = 106 // { int sys_listen(int s, int backlog); } + SYS_CHFLAGSAT = 107 // { int sys_chflagsat(int fd, const char *path, u_int flags, int atflags); } + SYS_PLEDGE = 108 // { int sys_pledge(const char *promises, const char *execpromises); } + SYS_PPOLL = 109 // { int sys_ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *mask); } + SYS_PSELECT = 110 // { int sys_pselect(int nd, fd_set *in, fd_set *ou, fd_set *ex, const struct timespec *ts, const sigset_t *mask); } + SYS_SIGSUSPEND = 111 // { int sys_sigsuspend(int mask); } + SYS_SENDSYSLOG = 112 // { int sys_sendsyslog(const char *buf, size_t nbyte, int flags); } + SYS_UNVEIL = 114 // { int sys_unveil(const char *path, const char *permissions); } + SYS_GETSOCKOPT = 118 // { int sys_getsockopt(int s, int level, int name, void *val, socklen_t *avalsize); } + SYS_THRKILL = 119 // { int sys_thrkill(pid_t tid, int signum, void *tcb); } + SYS_READV = 120 // { ssize_t sys_readv(int fd, const struct iovec *iovp, int iovcnt); } + SYS_WRITEV = 121 // { ssize_t sys_writev(int fd, const struct iovec *iovp, int iovcnt); } + SYS_KILL = 122 // { int sys_kill(int pid, int signum); } + SYS_FCHOWN = 123 // { int sys_fchown(int fd, uid_t uid, gid_t gid); } + SYS_FCHMOD = 124 // { int sys_fchmod(int fd, mode_t mode); } + SYS_SETREUID = 126 // { int sys_setreuid(uid_t ruid, uid_t euid); } + SYS_SETREGID = 127 // { int sys_setregid(gid_t rgid, gid_t egid); } + SYS_RENAME = 128 // { int sys_rename(const char *from, const char *to); } + SYS_FLOCK = 131 // { int sys_flock(int fd, int how); } + SYS_MKFIFO = 132 // { int sys_mkfifo(const char *path, mode_t mode); } + SYS_SENDTO = 133 // { ssize_t sys_sendto(int s, const void *buf, size_t len, int flags, const struct sockaddr *to, socklen_t tolen); } + SYS_SHUTDOWN = 134 // { int sys_shutdown(int s, int how); } + SYS_SOCKETPAIR = 135 // { int sys_socketpair(int domain, int type, int protocol, int *rsv); } + SYS_MKDIR = 136 // { int sys_mkdir(const char *path, mode_t mode); } + SYS_RMDIR = 137 // { int sys_rmdir(const char *path); } + SYS_ADJTIME = 140 // { int sys_adjtime(const struct timeval *delta, struct timeval *olddelta); } + SYS_GETLOGIN_R = 141 // { int sys_getlogin_r(char *namebuf, u_int namelen); } + SYS_SETSID = 147 // { int sys_setsid(void); } + SYS_QUOTACTL = 148 // { int sys_quotactl(const char *path, int cmd, int uid, char *arg); } + SYS_NFSSVC = 155 // { int sys_nfssvc(int flag, void *argp); } + SYS_GETFH = 161 // { int sys_getfh(const char *fname, fhandle_t *fhp); } + SYS_SYSARCH = 165 // { int sys_sysarch(int op, void *parms); } + SYS_PREAD = 173 // { ssize_t sys_pread(int fd, void *buf, size_t nbyte, int pad, off_t offset); } + SYS_PWRITE = 174 // { ssize_t sys_pwrite(int fd, const void *buf, size_t nbyte, int pad, off_t offset); } + SYS_SETGID = 181 // { int sys_setgid(gid_t gid); } + SYS_SETEGID = 182 // { int sys_setegid(gid_t egid); } + SYS_SETEUID = 183 // { int sys_seteuid(uid_t euid); } + SYS_PATHCONF = 191 // { long sys_pathconf(const char *path, int name); } + SYS_FPATHCONF = 192 // { long sys_fpathconf(int fd, int name); } + SYS_SWAPCTL = 193 // { int sys_swapctl(int cmd, const void *arg, int misc); } + SYS_GETRLIMIT = 194 // { int sys_getrlimit(int which, struct rlimit *rlp); } + SYS_SETRLIMIT = 195 // { int sys_setrlimit(int which, const struct rlimit *rlp); } + SYS_MMAP = 197 // { void *sys_mmap(void *addr, size_t len, int prot, int flags, int fd, long pad, off_t pos); } + SYS_LSEEK = 199 // { off_t sys_lseek(int fd, int pad, off_t offset, int whence); } + SYS_TRUNCATE = 200 // { int sys_truncate(const char *path, int pad, off_t length); } + SYS_FTRUNCATE = 201 // { int sys_ftruncate(int fd, int pad, off_t length); } + SYS_SYSCTL = 202 // { int sys_sysctl(const int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } + SYS_MLOCK = 203 // { int sys_mlock(const void *addr, size_t len); } + SYS_MUNLOCK = 204 // { int sys_munlock(const void *addr, size_t len); } + SYS_GETPGID = 207 // { pid_t sys_getpgid(pid_t pid); } + SYS_UTRACE = 209 // { int sys_utrace(const char *label, const void *addr, size_t len); } + SYS_SEMGET = 221 // { int sys_semget(key_t key, int nsems, int semflg); } + SYS_MSGGET = 225 // { int sys_msgget(key_t key, int msgflg); } + SYS_MSGSND = 226 // { int sys_msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg); } + SYS_MSGRCV = 227 // { int sys_msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } + SYS_SHMAT = 228 // { void *sys_shmat(int shmid, const void *shmaddr, int shmflg); } + SYS_SHMDT = 230 // { int sys_shmdt(const void *shmaddr); } + SYS_MINHERIT = 250 // { int sys_minherit(void *addr, size_t len, int inherit); } + SYS_POLL = 252 // { int sys_poll(struct pollfd *fds, u_int nfds, int timeout); } + SYS_ISSETUGID = 253 // { int sys_issetugid(void); } + SYS_LCHOWN = 254 // { int sys_lchown(const char *path, uid_t uid, gid_t gid); } + SYS_GETSID = 255 // { pid_t sys_getsid(pid_t pid); } + SYS_MSYNC = 256 // { int sys_msync(void *addr, size_t len, int flags); } + SYS_PIPE = 263 // { int sys_pipe(int *fdp); } + SYS_FHOPEN = 264 // { int sys_fhopen(const fhandle_t *fhp, int flags); } + SYS_PREADV = 267 // { ssize_t sys_preadv(int fd, const struct iovec *iovp, int iovcnt, int pad, off_t offset); } + SYS_PWRITEV = 268 // { ssize_t sys_pwritev(int fd, const struct iovec *iovp, int iovcnt, int pad, off_t offset); } + SYS_KQUEUE = 269 // { int sys_kqueue(void); } + SYS_MLOCKALL = 271 // { int sys_mlockall(int flags); } + SYS_MUNLOCKALL = 272 // { int sys_munlockall(void); } + SYS_GETRESUID = 281 // { int sys_getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); } + SYS_SETRESUID = 282 // { int sys_setresuid(uid_t ruid, uid_t euid, uid_t suid); } + SYS_GETRESGID = 283 // { int sys_getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); } + SYS_SETRESGID = 284 // { int sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid); } + SYS_MQUERY = 286 // { void *sys_mquery(void *addr, size_t len, int prot, int flags, int fd, long pad, off_t pos); } + SYS_CLOSEFROM = 287 // { int sys_closefrom(int fd); } + SYS_SIGALTSTACK = 288 // { int sys_sigaltstack(const struct sigaltstack *nss, struct sigaltstack *oss); } + SYS_SHMGET = 289 // { int sys_shmget(key_t key, size_t size, int shmflg); } + SYS_SEMOP = 290 // { int sys_semop(int semid, struct sembuf *sops, size_t nsops); } + SYS_FHSTAT = 294 // { int sys_fhstat(const fhandle_t *fhp, struct stat *sb); } + SYS___SEMCTL = 295 // { int sys___semctl(int semid, int semnum, int cmd, union semun *arg); } + SYS_SHMCTL = 296 // { int sys_shmctl(int shmid, int cmd, struct shmid_ds *buf); } + SYS_MSGCTL = 297 // { int sys_msgctl(int msqid, int cmd, struct msqid_ds *buf); } + SYS_SCHED_YIELD = 298 // { int sys_sched_yield(void); } + SYS_GETTHRID = 299 // { pid_t sys_getthrid(void); } + SYS___THRWAKEUP = 301 // { int sys___thrwakeup(const volatile void *ident, int n); } + SYS___THREXIT = 302 // { void sys___threxit(pid_t *notdead); } + SYS___THRSIGDIVERT = 303 // { int sys___thrsigdivert(sigset_t sigmask, siginfo_t *info, const struct timespec *timeout); } + SYS___GETCWD = 304 // { int sys___getcwd(char *buf, size_t len); } + SYS_ADJFREQ = 305 // { int sys_adjfreq(const int64_t *freq, int64_t *oldfreq); } + SYS_SETRTABLE = 310 // { int sys_setrtable(int rtableid); } + SYS_GETRTABLE = 311 // { int sys_getrtable(void); } + SYS_FACCESSAT = 313 // { int sys_faccessat(int fd, const char *path, int amode, int flag); } + SYS_FCHMODAT = 314 // { int sys_fchmodat(int fd, const char *path, mode_t mode, int flag); } + SYS_FCHOWNAT = 315 // { int sys_fchownat(int fd, const char *path, uid_t uid, gid_t gid, int flag); } + SYS_LINKAT = 317 // { int sys_linkat(int fd1, const char *path1, int fd2, const char *path2, int flag); } + SYS_MKDIRAT = 318 // { int sys_mkdirat(int fd, const char *path, mode_t mode); } + SYS_MKFIFOAT = 319 // { int sys_mkfifoat(int fd, const char *path, mode_t mode); } + SYS_MKNODAT = 320 // { int sys_mknodat(int fd, const char *path, mode_t mode, dev_t dev); } + SYS_OPENAT = 321 // { int sys_openat(int fd, const char *path, int flags, ... mode_t mode); } + SYS_READLINKAT = 322 // { ssize_t sys_readlinkat(int fd, const char *path, char *buf, size_t count); } + SYS_RENAMEAT = 323 // { int sys_renameat(int fromfd, const char *from, int tofd, const char *to); } + SYS_SYMLINKAT = 324 // { int sys_symlinkat(const char *path, int fd, const char *link); } + SYS_UNLINKAT = 325 // { int sys_unlinkat(int fd, const char *path, int flag); } + SYS___SET_TCB = 329 // { void sys___set_tcb(void *tcb); } + SYS___GET_TCB = 330 // { void *sys___get_tcb(void); } +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go index cedc9b0f26d73..2c1f815e6f92c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go @@ -30,11 +30,6 @@ type Timespec struct { Nsec int32 } -type StTimespec struct { - Sec int32 - Nsec int32 -} - type Timeval struct { Sec int32 Usec int32 @@ -101,9 +96,9 @@ type Stat_t struct { Gid uint32 Rdev uint32 Size int32 - Atim StTimespec - Mtim StTimespec - Ctim StTimespec + Atim Timespec + Mtim Timespec + Ctim Timespec Blksize int32 Blocks int32 Vfstype int32 @@ -148,6 +143,17 @@ type RawSockaddrUnix struct { Path [1023]uint8 } +type RawSockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [120]uint8 +} + type RawSockaddr struct { Len uint8 Family uint8 @@ -207,17 +213,18 @@ type Msghdr struct { } const ( - SizeofSockaddrInet4 = 0x10 - SizeofSockaddrInet6 = 0x1c - SizeofSockaddrAny = 0x404 - SizeofSockaddrUnix = 0x401 - SizeofLinger = 0x8 - SizeofIPMreq = 0x8 - SizeofIPv6Mreq = 0x14 - SizeofIPv6MTUInfo = 0x20 - SizeofMsghdr = 0x1c - SizeofCmsghdr = 0xc - SizeofICMPv6Filter = 0x20 + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x404 + SizeofSockaddrUnix = 0x401 + SizeofSockaddrDatalink = 0x80 + SizeofLinger = 0x8 + SizeofIPMreq = 0x8 + SizeofIPv6Mreq = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofMsghdr = 0x1c + SizeofCmsghdr = 0xc + SizeofICMPv6Filter = 0x20 ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go index f46482d272e62..b4a069ecbdff0 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go @@ -30,12 +30,6 @@ type Timespec struct { Nsec int64 } -type StTimespec struct { - Sec int64 - Nsec int32 - _ [4]byte -} - type Timeval struct { Sec int64 Usec int32 @@ -103,10 +97,9 @@ type Stat_t struct { Gid uint32 Rdev uint64 Ssize int32 - _ [4]byte - Atim StTimespec - Mtim StTimespec - Ctim StTimespec + Atim Timespec + Mtim Timespec + Ctim Timespec Blksize int64 Blocks int64 Vfstype int32 @@ -154,6 +147,17 @@ type RawSockaddrUnix struct { Path [1023]uint8 } +type RawSockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [120]uint8 +} + type RawSockaddr struct { Len uint8 Family uint8 @@ -205,27 +209,26 @@ type Linger struct { type Msghdr struct { Name *byte Namelen uint32 - _ [4]byte Iov *Iovec Iovlen int32 - _ [4]byte Control *byte Controllen uint32 Flags int32 } const ( - SizeofSockaddrInet4 = 0x10 - SizeofSockaddrInet6 = 0x1c - SizeofSockaddrAny = 0x404 - SizeofSockaddrUnix = 0x401 - SizeofLinger = 0x8 - SizeofIPMreq = 0x8 - SizeofIPv6Mreq = 0x14 - SizeofIPv6MTUInfo = 0x20 - SizeofMsghdr = 0x30 - SizeofCmsghdr = 0xc - SizeofICMPv6Filter = 0x20 + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x404 + SizeofSockaddrUnix = 0x401 + SizeofSockaddrDatalink = 0x80 + SizeofLinger = 0x8 + SizeofIPMreq = 0x8 + SizeofIPv6Mreq = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofMsghdr = 0x30 + SizeofCmsghdr = 0xc + SizeofICMPv6Filter = 0x20 ) const ( @@ -339,7 +342,6 @@ type Statfs_t struct { Ffree uint64 Fsid Fsid64_t Vfstype int32 - _ [4]byte Fsize uint64 Vfsnumber int32 Vfsoff int32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go index cefe2f8eaeb10..9f47b87c507a8 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go @@ -59,24 +59,24 @@ type Rlimit struct { type _Gid_t uint32 type Stat_t struct { - Dev int32 - Mode uint16 - Nlink uint16 - Ino uint64 - Uid uint32 - Gid uint32 - Rdev int32 - Atimespec Timespec - Mtimespec Timespec - Ctimespec Timespec - Birthtimespec Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint32 - Lspare int32 - Qspare [2]int64 + Dev int32 + Mode uint16 + Nlink uint16 + Ino uint64 + Uid uint32 + Gid uint32 + Rdev int32 + Atim Timespec + Mtim Timespec + Ctim Timespec + Btim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + Lspare int32 + Qspare [2]int64 } type Statfs_t struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index c6bb883c3962d..966798a870976 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -63,25 +63,25 @@ type Rlimit struct { type _Gid_t uint32 type Stat_t struct { - Dev int32 - Mode uint16 - Nlink uint16 - Ino uint64 - Uid uint32 - Gid uint32 - Rdev int32 - _ [4]byte - Atimespec Timespec - Mtimespec Timespec - Ctimespec Timespec - Birthtimespec Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint32 - Lspare int32 - Qspare [2]int64 + Dev int32 + Mode uint16 + Nlink uint16 + Ino uint64 + Uid uint32 + Gid uint32 + Rdev int32 + _ [4]byte + Atim Timespec + Mtim Timespec + Ctim Timespec + Btim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + Lspare int32 + Qspare [2]int64 } type Statfs_t struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go index 94c23bc2d4ae8..4fe4c9cd73e6e 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go @@ -60,24 +60,24 @@ type Rlimit struct { type _Gid_t uint32 type Stat_t struct { - Dev int32 - Mode uint16 - Nlink uint16 - Ino uint64 - Uid uint32 - Gid uint32 - Rdev int32 - Atimespec Timespec - Mtimespec Timespec - Ctimespec Timespec - Birthtimespec Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint32 - Lspare int32 - Qspare [2]int64 + Dev int32 + Mode uint16 + Nlink uint16 + Ino uint64 + Uid uint32 + Gid uint32 + Rdev int32 + Atim Timespec + Mtim Timespec + Ctim Timespec + Btim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + Lspare int32 + Qspare [2]int64 } type Statfs_t struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index c82a930cdc59b..21999e4b0a2a3 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -63,25 +63,25 @@ type Rlimit struct { type _Gid_t uint32 type Stat_t struct { - Dev int32 - Mode uint16 - Nlink uint16 - Ino uint64 - Uid uint32 - Gid uint32 - Rdev int32 - _ [4]byte - Atimespec Timespec - Mtimespec Timespec - Ctimespec Timespec - Birthtimespec Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint32 - Lspare int32 - Qspare [2]int64 + Dev int32 + Mode uint16 + Nlink uint16 + Ino uint64 + Uid uint32 + Gid uint32 + Rdev int32 + _ [4]byte + Atim Timespec + Mtim Timespec + Ctim Timespec + Btim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + Lspare int32 + Qspare [2]int64 } type Statfs_t struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go index 7b34e2e2c6868..c206f2b053424 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go @@ -57,25 +57,25 @@ type Rlimit struct { type _Gid_t uint32 type Stat_t struct { - Ino uint64 - Nlink uint32 - Dev uint32 - Mode uint16 - Padding1 uint16 - Uid uint32 - Gid uint32 - Rdev uint32 - Atim Timespec - Mtim Timespec - Ctim Timespec - Size int64 - Blocks int64 - Blksize uint32 - Flags uint32 - Gen uint32 - Lspare int32 - Qspare1 int64 - Qspare2 int64 + Ino uint64 + Nlink uint32 + Dev uint32 + Mode uint16 + _1 uint16 + Uid uint32 + Gid uint32 + Rdev uint32 + Atim Timespec + Mtim Timespec + Ctim Timespec + Size int64 + Blocks int64 + Blksize uint32 + Flags uint32 + Gen uint32 + Lspare int32 + Qspare1 int64 + Qspare2 int64 } type Statfs_t struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index c146c1ad35441..0edc5409a014f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -62,50 +62,50 @@ const ( ) type Stat_t struct { - Dev uint64 - Ino uint64 - Nlink uint64 - Mode uint16 - _0 int16 - Uid uint32 - Gid uint32 - _1 int32 - Rdev uint64 - Atim_ext int32 - Atim Timespec - Mtim_ext int32 - Mtim Timespec - Ctim_ext int32 - Ctim Timespec - Btim_ext int32 - Birthtim Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint64 - Spare [10]uint64 + Dev uint64 + Ino uint64 + Nlink uint64 + Mode uint16 + _0 int16 + Uid uint32 + Gid uint32 + _1 int32 + Rdev uint64 + _ int32 + Atim Timespec + _ int32 + Mtim Timespec + _ int32 + Ctim Timespec + _ int32 + Btim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint64 + Spare [10]uint64 } type stat_freebsd11_t struct { - Dev uint32 - Ino uint32 - Mode uint16 - Nlink uint16 - Uid uint32 - Gid uint32 - Rdev uint32 - Atim Timespec - Mtim Timespec - Ctim Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint32 - Lspare int32 - Birthtim Timespec - _ [8]byte + Dev uint32 + Ino uint32 + Mode uint16 + Nlink uint16 + Uid uint32 + Gid uint32 + Rdev uint32 + Atim Timespec + Mtim Timespec + Ctim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + Lspare int32 + Btim Timespec + _ [8]byte } type Statfs_t struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index ac33a8dd4a6c2..8881ce8428ffc 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -62,45 +62,45 @@ const ( ) type Stat_t struct { - Dev uint64 - Ino uint64 - Nlink uint64 - Mode uint16 - _0 int16 - Uid uint32 - Gid uint32 - _1 int32 - Rdev uint64 - Atim Timespec - Mtim Timespec - Ctim Timespec - Birthtim Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint64 - Spare [10]uint64 + Dev uint64 + Ino uint64 + Nlink uint64 + Mode uint16 + _0 int16 + Uid uint32 + Gid uint32 + _1 int32 + Rdev uint64 + Atim Timespec + Mtim Timespec + Ctim Timespec + Btim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint64 + Spare [10]uint64 } type stat_freebsd11_t struct { - Dev uint32 - Ino uint32 - Mode uint16 - Nlink uint16 - Uid uint32 - Gid uint32 - Rdev uint32 - Atim Timespec - Mtim Timespec - Ctim Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint32 - Lspare int32 - Birthtim Timespec + Dev uint32 + Ino uint32 + Mode uint16 + Nlink uint16 + Uid uint32 + Gid uint32 + Rdev uint32 + Atim Timespec + Mtim Timespec + Ctim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + Lspare int32 + Btim Timespec } type Statfs_t struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index e27511a642f11..fc713999cc58e 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -64,45 +64,45 @@ const ( ) type Stat_t struct { - Dev uint64 - Ino uint64 - Nlink uint64 - Mode uint16 - _0 int16 - Uid uint32 - Gid uint32 - _1 int32 - Rdev uint64 - Atim Timespec - Mtim Timespec - Ctim Timespec - Birthtim Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint64 - Spare [10]uint64 + Dev uint64 + Ino uint64 + Nlink uint64 + Mode uint16 + _0 int16 + Uid uint32 + Gid uint32 + _1 int32 + Rdev uint64 + Atim Timespec + Mtim Timespec + Ctim Timespec + Btim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint64 + Spare [10]uint64 } type stat_freebsd11_t struct { - Dev uint32 - Ino uint32 - Mode uint16 - Nlink uint16 - Uid uint32 - Gid uint32 - Rdev uint32 - Atim Timespec - Mtim Timespec - Ctim Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint32 - Lspare int32 - Birthtim Timespec + Dev uint32 + Ino uint32 + Mode uint16 + Nlink uint16 + Uid uint32 + Gid uint32 + Rdev uint32 + Atim Timespec + Mtim Timespec + Ctim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + Lspare int32 + Btim Timespec } type Statfs_t struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go index 2aadc1a4d8ffd..5a0753ee4b8e8 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go @@ -62,45 +62,45 @@ const ( ) type Stat_t struct { - Dev uint64 - Ino uint64 - Nlink uint64 - Mode uint16 - _0 int16 - Uid uint32 - Gid uint32 - _1 int32 - Rdev uint64 - Atim Timespec - Mtim Timespec - Ctim Timespec - Birthtim Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint64 - Spare [10]uint64 + Dev uint64 + Ino uint64 + Nlink uint64 + Mode uint16 + _0 int16 + Uid uint32 + Gid uint32 + _1 int32 + Rdev uint64 + Atim Timespec + Mtim Timespec + Ctim Timespec + Btim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint64 + Spare [10]uint64 } type stat_freebsd11_t struct { - Dev uint32 - Ino uint32 - Mode uint16 - Nlink uint16 - Uid uint32 - Gid uint32 - Rdev uint32 - Atim Timespec - Mtim Timespec - Ctim Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint32 - Lspare int32 - Birthtim Timespec + Dev uint32 + Ino uint32 + Mode uint16 + Nlink uint16 + Uid uint32 + Gid uint32 + Rdev uint32 + Atim Timespec + Mtim Timespec + Ctim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + Lspare int32 + Btim Timespec } type Statfs_t struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index a908f259e7d7c..06e3a3f4dea8c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -829,6 +829,8 @@ type Sigset_t struct { Val [32]uint32 } +const _C__NSIG = 0x41 + type SignalfdSiginfo struct { Signo uint32 Errno int32 @@ -1452,6 +1454,21 @@ type TpacketBlockDesc struct { Hdr [40]byte } +type TpacketBDTS struct { + Sec uint32 + Usec uint32 +} + +type TpacketHdrV1 struct { + Block_status uint32 + Num_pkts uint32 + Offset_to_first_pkt uint32 + Blk_len uint32 + Seq_num uint64 + Ts_first_pkt TpacketBDTS + Ts_last_pkt TpacketBDTS +} + type TpacketReq struct { Block_size uint32 Block_nr uint32 @@ -2133,3 +2150,320 @@ type FanotifyResponse struct { Fd int32 Response uint32 } + +const ( + CRYPTO_MSG_BASE = 0x10 + CRYPTO_MSG_NEWALG = 0x10 + CRYPTO_MSG_DELALG = 0x11 + CRYPTO_MSG_UPDATEALG = 0x12 + CRYPTO_MSG_GETALG = 0x13 + CRYPTO_MSG_DELRNG = 0x14 + CRYPTO_MSG_GETSTAT = 0x15 +) + +const ( + CRYPTOCFGA_UNSPEC = 0x0 + CRYPTOCFGA_PRIORITY_VAL = 0x1 + CRYPTOCFGA_REPORT_LARVAL = 0x2 + CRYPTOCFGA_REPORT_HASH = 0x3 + CRYPTOCFGA_REPORT_BLKCIPHER = 0x4 + CRYPTOCFGA_REPORT_AEAD = 0x5 + CRYPTOCFGA_REPORT_COMPRESS = 0x6 + CRYPTOCFGA_REPORT_RNG = 0x7 + CRYPTOCFGA_REPORT_CIPHER = 0x8 + CRYPTOCFGA_REPORT_AKCIPHER = 0x9 + CRYPTOCFGA_REPORT_KPP = 0xa + CRYPTOCFGA_REPORT_ACOMP = 0xb + CRYPTOCFGA_STAT_LARVAL = 0xc + CRYPTOCFGA_STAT_HASH = 0xd + CRYPTOCFGA_STAT_BLKCIPHER = 0xe + CRYPTOCFGA_STAT_AEAD = 0xf + CRYPTOCFGA_STAT_COMPRESS = 0x10 + CRYPTOCFGA_STAT_RNG = 0x11 + CRYPTOCFGA_STAT_CIPHER = 0x12 + CRYPTOCFGA_STAT_AKCIPHER = 0x13 + CRYPTOCFGA_STAT_KPP = 0x14 + CRYPTOCFGA_STAT_ACOMP = 0x15 +) + +type CryptoUserAlg struct { + Name [64]int8 + Driver_name [64]int8 + Module_name [64]int8 + Type uint32 + Mask uint32 + Refcnt uint32 + Flags uint32 +} + +type CryptoStatAEAD struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatAKCipher struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Verify_cnt uint64 + Sign_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatCipher struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatCompress struct { + Type [64]int8 + Compress_cnt uint64 + Compress_tlen uint64 + Decompress_cnt uint64 + Decompress_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatHash struct { + Type [64]int8 + Hash_cnt uint64 + Hash_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatKPP struct { + Type [64]int8 + Setsecret_cnt uint64 + Generate_public_key_cnt uint64 + Compute_shared_secret_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatRNG struct { + Type [64]int8 + Generate_cnt uint64 + Generate_tlen uint64 + Seed_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatLarval struct { + Type [64]int8 +} + +type CryptoReportLarval struct { + Type [64]int8 +} + +type CryptoReportHash struct { + Type [64]int8 + Blocksize uint32 + Digestsize uint32 +} + +type CryptoReportCipher struct { + Type [64]int8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 +} + +type CryptoReportBlkCipher struct { + Type [64]int8 + Geniv [64]int8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 + Ivsize uint32 +} + +type CryptoReportAEAD struct { + Type [64]int8 + Geniv [64]int8 + Blocksize uint32 + Maxauthsize uint32 + Ivsize uint32 +} + +type CryptoReportComp struct { + Type [64]int8 +} + +type CryptoReportRNG struct { + Type [64]int8 + Seedsize uint32 +} + +type CryptoReportAKCipher struct { + Type [64]int8 +} + +type CryptoReportKPP struct { + Type [64]int8 +} + +type CryptoReportAcomp struct { + Type [64]int8 +} + +const ( + BPF_REG_0 = 0x0 + BPF_REG_1 = 0x1 + BPF_REG_2 = 0x2 + BPF_REG_3 = 0x3 + BPF_REG_4 = 0x4 + BPF_REG_5 = 0x5 + BPF_REG_6 = 0x6 + BPF_REG_7 = 0x7 + BPF_REG_8 = 0x8 + BPF_REG_9 = 0x9 + BPF_REG_10 = 0xa + BPF_MAP_CREATE = 0x0 + BPF_MAP_LOOKUP_ELEM = 0x1 + BPF_MAP_UPDATE_ELEM = 0x2 + BPF_MAP_DELETE_ELEM = 0x3 + BPF_MAP_GET_NEXT_KEY = 0x4 + BPF_PROG_LOAD = 0x5 + BPF_OBJ_PIN = 0x6 + BPF_OBJ_GET = 0x7 + BPF_PROG_ATTACH = 0x8 + BPF_PROG_DETACH = 0x9 + BPF_PROG_TEST_RUN = 0xa + BPF_PROG_GET_NEXT_ID = 0xb + BPF_MAP_GET_NEXT_ID = 0xc + BPF_PROG_GET_FD_BY_ID = 0xd + BPF_MAP_GET_FD_BY_ID = 0xe + BPF_OBJ_GET_INFO_BY_FD = 0xf + BPF_PROG_QUERY = 0x10 + BPF_RAW_TRACEPOINT_OPEN = 0x11 + BPF_BTF_LOAD = 0x12 + BPF_BTF_GET_FD_BY_ID = 0x13 + BPF_TASK_FD_QUERY = 0x14 + BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 + BPF_MAP_TYPE_UNSPEC = 0x0 + BPF_MAP_TYPE_HASH = 0x1 + BPF_MAP_TYPE_ARRAY = 0x2 + BPF_MAP_TYPE_PROG_ARRAY = 0x3 + BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 + BPF_MAP_TYPE_PERCPU_HASH = 0x5 + BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 + BPF_MAP_TYPE_STACK_TRACE = 0x7 + BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 + BPF_MAP_TYPE_LRU_HASH = 0x9 + BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa + BPF_MAP_TYPE_LPM_TRIE = 0xb + BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc + BPF_MAP_TYPE_HASH_OF_MAPS = 0xd + BPF_MAP_TYPE_DEVMAP = 0xe + BPF_MAP_TYPE_SOCKMAP = 0xf + BPF_MAP_TYPE_CPUMAP = 0x10 + BPF_MAP_TYPE_XSKMAP = 0x11 + BPF_MAP_TYPE_SOCKHASH = 0x12 + BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 + BPF_MAP_TYPE_QUEUE = 0x16 + BPF_MAP_TYPE_STACK = 0x17 + BPF_PROG_TYPE_UNSPEC = 0x0 + BPF_PROG_TYPE_SOCKET_FILTER = 0x1 + BPF_PROG_TYPE_KPROBE = 0x2 + BPF_PROG_TYPE_SCHED_CLS = 0x3 + BPF_PROG_TYPE_SCHED_ACT = 0x4 + BPF_PROG_TYPE_TRACEPOINT = 0x5 + BPF_PROG_TYPE_XDP = 0x6 + BPF_PROG_TYPE_PERF_EVENT = 0x7 + BPF_PROG_TYPE_CGROUP_SKB = 0x8 + BPF_PROG_TYPE_CGROUP_SOCK = 0x9 + BPF_PROG_TYPE_LWT_IN = 0xa + BPF_PROG_TYPE_LWT_OUT = 0xb + BPF_PROG_TYPE_LWT_XMIT = 0xc + BPF_PROG_TYPE_SOCK_OPS = 0xd + BPF_PROG_TYPE_SK_SKB = 0xe + BPF_PROG_TYPE_CGROUP_DEVICE = 0xf + BPF_PROG_TYPE_SK_MSG = 0x10 + BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 + BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 + BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 + BPF_PROG_TYPE_LIRC_MODE2 = 0x14 + BPF_PROG_TYPE_SK_REUSEPORT = 0x15 + BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 + BPF_CGROUP_INET_INGRESS = 0x0 + BPF_CGROUP_INET_EGRESS = 0x1 + BPF_CGROUP_INET_SOCK_CREATE = 0x2 + BPF_CGROUP_SOCK_OPS = 0x3 + BPF_SK_SKB_STREAM_PARSER = 0x4 + BPF_SK_SKB_STREAM_VERDICT = 0x5 + BPF_CGROUP_DEVICE = 0x6 + BPF_SK_MSG_VERDICT = 0x7 + BPF_CGROUP_INET4_BIND = 0x8 + BPF_CGROUP_INET6_BIND = 0x9 + BPF_CGROUP_INET4_CONNECT = 0xa + BPF_CGROUP_INET6_CONNECT = 0xb + BPF_CGROUP_INET4_POST_BIND = 0xc + BPF_CGROUP_INET6_POST_BIND = 0xd + BPF_CGROUP_UDP4_SENDMSG = 0xe + BPF_CGROUP_UDP6_SENDMSG = 0xf + BPF_LIRC_MODE2 = 0x10 + BPF_FLOW_DISSECTOR = 0x11 + BPF_STACK_BUILD_ID_EMPTY = 0x0 + BPF_STACK_BUILD_ID_VALID = 0x1 + BPF_STACK_BUILD_ID_IP = 0x2 + BPF_ADJ_ROOM_NET = 0x0 + BPF_HDR_START_MAC = 0x0 + BPF_HDR_START_NET = 0x1 + BPF_LWT_ENCAP_SEG6 = 0x0 + BPF_LWT_ENCAP_SEG6_INLINE = 0x1 + BPF_OK = 0x0 + BPF_DROP = 0x2 + BPF_REDIRECT = 0x7 + BPF_SOCK_OPS_VOID = 0x0 + BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 + BPF_SOCK_OPS_RWND_INIT = 0x2 + BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 + BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 + BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 + BPF_SOCK_OPS_NEEDS_ECN = 0x6 + BPF_SOCK_OPS_BASE_RTT = 0x7 + BPF_SOCK_OPS_RTO_CB = 0x8 + BPF_SOCK_OPS_RETRANS_CB = 0x9 + BPF_SOCK_OPS_STATE_CB = 0xa + BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb + BPF_TCP_ESTABLISHED = 0x1 + BPF_TCP_SYN_SENT = 0x2 + BPF_TCP_SYN_RECV = 0x3 + BPF_TCP_FIN_WAIT1 = 0x4 + BPF_TCP_FIN_WAIT2 = 0x5 + BPF_TCP_TIME_WAIT = 0x6 + BPF_TCP_CLOSE = 0x7 + BPF_TCP_CLOSE_WAIT = 0x8 + BPF_TCP_LAST_ACK = 0x9 + BPF_TCP_LISTEN = 0xa + BPF_TCP_CLOSING = 0xb + BPF_TCP_NEW_SYN_RECV = 0xc + BPF_TCP_MAX_STATES = 0xd + BPF_FIB_LKUP_RET_SUCCESS = 0x0 + BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 + BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 + BPF_FIB_LKUP_RET_PROHIBIT = 0x3 + BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 + BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 + BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 + BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 + BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 + BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 + BPF_FD_TYPE_TRACEPOINT = 0x1 + BPF_FD_TYPE_KPROBE = 0x2 + BPF_FD_TYPE_KRETPROBE = 0x3 + BPF_FD_TYPE_UPROBE = 0x4 + BPF_FD_TYPE_URETPROBE = 0x5 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index e63fa7415a1a8..cef25e732dae4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -842,6 +842,8 @@ type Sigset_t struct { Val [16]uint64 } +const _C__NSIG = 0x41 + type SignalfdSiginfo struct { Signo uint32 Errno int32 @@ -1464,6 +1466,21 @@ type TpacketBlockDesc struct { Hdr [40]byte } +type TpacketBDTS struct { + Sec uint32 + Usec uint32 +} + +type TpacketHdrV1 struct { + Block_status uint32 + Num_pkts uint32 + Offset_to_first_pkt uint32 + Blk_len uint32 + Seq_num uint64 + Ts_first_pkt TpacketBDTS + Ts_last_pkt TpacketBDTS +} + type TpacketReq struct { Block_size uint32 Block_nr uint32 @@ -2146,3 +2163,320 @@ type FanotifyResponse struct { Fd int32 Response uint32 } + +const ( + CRYPTO_MSG_BASE = 0x10 + CRYPTO_MSG_NEWALG = 0x10 + CRYPTO_MSG_DELALG = 0x11 + CRYPTO_MSG_UPDATEALG = 0x12 + CRYPTO_MSG_GETALG = 0x13 + CRYPTO_MSG_DELRNG = 0x14 + CRYPTO_MSG_GETSTAT = 0x15 +) + +const ( + CRYPTOCFGA_UNSPEC = 0x0 + CRYPTOCFGA_PRIORITY_VAL = 0x1 + CRYPTOCFGA_REPORT_LARVAL = 0x2 + CRYPTOCFGA_REPORT_HASH = 0x3 + CRYPTOCFGA_REPORT_BLKCIPHER = 0x4 + CRYPTOCFGA_REPORT_AEAD = 0x5 + CRYPTOCFGA_REPORT_COMPRESS = 0x6 + CRYPTOCFGA_REPORT_RNG = 0x7 + CRYPTOCFGA_REPORT_CIPHER = 0x8 + CRYPTOCFGA_REPORT_AKCIPHER = 0x9 + CRYPTOCFGA_REPORT_KPP = 0xa + CRYPTOCFGA_REPORT_ACOMP = 0xb + CRYPTOCFGA_STAT_LARVAL = 0xc + CRYPTOCFGA_STAT_HASH = 0xd + CRYPTOCFGA_STAT_BLKCIPHER = 0xe + CRYPTOCFGA_STAT_AEAD = 0xf + CRYPTOCFGA_STAT_COMPRESS = 0x10 + CRYPTOCFGA_STAT_RNG = 0x11 + CRYPTOCFGA_STAT_CIPHER = 0x12 + CRYPTOCFGA_STAT_AKCIPHER = 0x13 + CRYPTOCFGA_STAT_KPP = 0x14 + CRYPTOCFGA_STAT_ACOMP = 0x15 +) + +type CryptoUserAlg struct { + Name [64]int8 + Driver_name [64]int8 + Module_name [64]int8 + Type uint32 + Mask uint32 + Refcnt uint32 + Flags uint32 +} + +type CryptoStatAEAD struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatAKCipher struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Verify_cnt uint64 + Sign_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatCipher struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatCompress struct { + Type [64]int8 + Compress_cnt uint64 + Compress_tlen uint64 + Decompress_cnt uint64 + Decompress_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatHash struct { + Type [64]int8 + Hash_cnt uint64 + Hash_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatKPP struct { + Type [64]int8 + Setsecret_cnt uint64 + Generate_public_key_cnt uint64 + Compute_shared_secret_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatRNG struct { + Type [64]int8 + Generate_cnt uint64 + Generate_tlen uint64 + Seed_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatLarval struct { + Type [64]int8 +} + +type CryptoReportLarval struct { + Type [64]int8 +} + +type CryptoReportHash struct { + Type [64]int8 + Blocksize uint32 + Digestsize uint32 +} + +type CryptoReportCipher struct { + Type [64]int8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 +} + +type CryptoReportBlkCipher struct { + Type [64]int8 + Geniv [64]int8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 + Ivsize uint32 +} + +type CryptoReportAEAD struct { + Type [64]int8 + Geniv [64]int8 + Blocksize uint32 + Maxauthsize uint32 + Ivsize uint32 +} + +type CryptoReportComp struct { + Type [64]int8 +} + +type CryptoReportRNG struct { + Type [64]int8 + Seedsize uint32 +} + +type CryptoReportAKCipher struct { + Type [64]int8 +} + +type CryptoReportKPP struct { + Type [64]int8 +} + +type CryptoReportAcomp struct { + Type [64]int8 +} + +const ( + BPF_REG_0 = 0x0 + BPF_REG_1 = 0x1 + BPF_REG_2 = 0x2 + BPF_REG_3 = 0x3 + BPF_REG_4 = 0x4 + BPF_REG_5 = 0x5 + BPF_REG_6 = 0x6 + BPF_REG_7 = 0x7 + BPF_REG_8 = 0x8 + BPF_REG_9 = 0x9 + BPF_REG_10 = 0xa + BPF_MAP_CREATE = 0x0 + BPF_MAP_LOOKUP_ELEM = 0x1 + BPF_MAP_UPDATE_ELEM = 0x2 + BPF_MAP_DELETE_ELEM = 0x3 + BPF_MAP_GET_NEXT_KEY = 0x4 + BPF_PROG_LOAD = 0x5 + BPF_OBJ_PIN = 0x6 + BPF_OBJ_GET = 0x7 + BPF_PROG_ATTACH = 0x8 + BPF_PROG_DETACH = 0x9 + BPF_PROG_TEST_RUN = 0xa + BPF_PROG_GET_NEXT_ID = 0xb + BPF_MAP_GET_NEXT_ID = 0xc + BPF_PROG_GET_FD_BY_ID = 0xd + BPF_MAP_GET_FD_BY_ID = 0xe + BPF_OBJ_GET_INFO_BY_FD = 0xf + BPF_PROG_QUERY = 0x10 + BPF_RAW_TRACEPOINT_OPEN = 0x11 + BPF_BTF_LOAD = 0x12 + BPF_BTF_GET_FD_BY_ID = 0x13 + BPF_TASK_FD_QUERY = 0x14 + BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 + BPF_MAP_TYPE_UNSPEC = 0x0 + BPF_MAP_TYPE_HASH = 0x1 + BPF_MAP_TYPE_ARRAY = 0x2 + BPF_MAP_TYPE_PROG_ARRAY = 0x3 + BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 + BPF_MAP_TYPE_PERCPU_HASH = 0x5 + BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 + BPF_MAP_TYPE_STACK_TRACE = 0x7 + BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 + BPF_MAP_TYPE_LRU_HASH = 0x9 + BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa + BPF_MAP_TYPE_LPM_TRIE = 0xb + BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc + BPF_MAP_TYPE_HASH_OF_MAPS = 0xd + BPF_MAP_TYPE_DEVMAP = 0xe + BPF_MAP_TYPE_SOCKMAP = 0xf + BPF_MAP_TYPE_CPUMAP = 0x10 + BPF_MAP_TYPE_XSKMAP = 0x11 + BPF_MAP_TYPE_SOCKHASH = 0x12 + BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 + BPF_MAP_TYPE_QUEUE = 0x16 + BPF_MAP_TYPE_STACK = 0x17 + BPF_PROG_TYPE_UNSPEC = 0x0 + BPF_PROG_TYPE_SOCKET_FILTER = 0x1 + BPF_PROG_TYPE_KPROBE = 0x2 + BPF_PROG_TYPE_SCHED_CLS = 0x3 + BPF_PROG_TYPE_SCHED_ACT = 0x4 + BPF_PROG_TYPE_TRACEPOINT = 0x5 + BPF_PROG_TYPE_XDP = 0x6 + BPF_PROG_TYPE_PERF_EVENT = 0x7 + BPF_PROG_TYPE_CGROUP_SKB = 0x8 + BPF_PROG_TYPE_CGROUP_SOCK = 0x9 + BPF_PROG_TYPE_LWT_IN = 0xa + BPF_PROG_TYPE_LWT_OUT = 0xb + BPF_PROG_TYPE_LWT_XMIT = 0xc + BPF_PROG_TYPE_SOCK_OPS = 0xd + BPF_PROG_TYPE_SK_SKB = 0xe + BPF_PROG_TYPE_CGROUP_DEVICE = 0xf + BPF_PROG_TYPE_SK_MSG = 0x10 + BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 + BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 + BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 + BPF_PROG_TYPE_LIRC_MODE2 = 0x14 + BPF_PROG_TYPE_SK_REUSEPORT = 0x15 + BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 + BPF_CGROUP_INET_INGRESS = 0x0 + BPF_CGROUP_INET_EGRESS = 0x1 + BPF_CGROUP_INET_SOCK_CREATE = 0x2 + BPF_CGROUP_SOCK_OPS = 0x3 + BPF_SK_SKB_STREAM_PARSER = 0x4 + BPF_SK_SKB_STREAM_VERDICT = 0x5 + BPF_CGROUP_DEVICE = 0x6 + BPF_SK_MSG_VERDICT = 0x7 + BPF_CGROUP_INET4_BIND = 0x8 + BPF_CGROUP_INET6_BIND = 0x9 + BPF_CGROUP_INET4_CONNECT = 0xa + BPF_CGROUP_INET6_CONNECT = 0xb + BPF_CGROUP_INET4_POST_BIND = 0xc + BPF_CGROUP_INET6_POST_BIND = 0xd + BPF_CGROUP_UDP4_SENDMSG = 0xe + BPF_CGROUP_UDP6_SENDMSG = 0xf + BPF_LIRC_MODE2 = 0x10 + BPF_FLOW_DISSECTOR = 0x11 + BPF_STACK_BUILD_ID_EMPTY = 0x0 + BPF_STACK_BUILD_ID_VALID = 0x1 + BPF_STACK_BUILD_ID_IP = 0x2 + BPF_ADJ_ROOM_NET = 0x0 + BPF_HDR_START_MAC = 0x0 + BPF_HDR_START_NET = 0x1 + BPF_LWT_ENCAP_SEG6 = 0x0 + BPF_LWT_ENCAP_SEG6_INLINE = 0x1 + BPF_OK = 0x0 + BPF_DROP = 0x2 + BPF_REDIRECT = 0x7 + BPF_SOCK_OPS_VOID = 0x0 + BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 + BPF_SOCK_OPS_RWND_INIT = 0x2 + BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 + BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 + BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 + BPF_SOCK_OPS_NEEDS_ECN = 0x6 + BPF_SOCK_OPS_BASE_RTT = 0x7 + BPF_SOCK_OPS_RTO_CB = 0x8 + BPF_SOCK_OPS_RETRANS_CB = 0x9 + BPF_SOCK_OPS_STATE_CB = 0xa + BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb + BPF_TCP_ESTABLISHED = 0x1 + BPF_TCP_SYN_SENT = 0x2 + BPF_TCP_SYN_RECV = 0x3 + BPF_TCP_FIN_WAIT1 = 0x4 + BPF_TCP_FIN_WAIT2 = 0x5 + BPF_TCP_TIME_WAIT = 0x6 + BPF_TCP_CLOSE = 0x7 + BPF_TCP_CLOSE_WAIT = 0x8 + BPF_TCP_LAST_ACK = 0x9 + BPF_TCP_LISTEN = 0xa + BPF_TCP_CLOSING = 0xb + BPF_TCP_NEW_SYN_RECV = 0xc + BPF_TCP_MAX_STATES = 0xd + BPF_FIB_LKUP_RET_SUCCESS = 0x0 + BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 + BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 + BPF_FIB_LKUP_RET_PROHIBIT = 0x3 + BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 + BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 + BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 + BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 + BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 + BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 + BPF_FD_TYPE_TRACEPOINT = 0x1 + BPF_FD_TYPE_KPROBE = 0x2 + BPF_FD_TYPE_KRETPROBE = 0x3 + BPF_FD_TYPE_UPROBE = 0x4 + BPF_FD_TYPE_URETPROBE = 0x5 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index 34e4e6db07874..c4369361e3452 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -818,6 +818,8 @@ type Sigset_t struct { Val [32]uint32 } +const _C__NSIG = 0x41 + type SignalfdSiginfo struct { Signo uint32 Errno int32 @@ -1442,6 +1444,21 @@ type TpacketBlockDesc struct { Hdr [40]byte } +type TpacketBDTS struct { + Sec uint32 + Usec uint32 +} + +type TpacketHdrV1 struct { + Block_status uint32 + Num_pkts uint32 + Offset_to_first_pkt uint32 + Blk_len uint32 + Seq_num uint64 + Ts_first_pkt TpacketBDTS + Ts_last_pkt TpacketBDTS +} + type TpacketReq struct { Block_size uint32 Block_nr uint32 @@ -2124,3 +2141,320 @@ type FanotifyResponse struct { Fd int32 Response uint32 } + +const ( + CRYPTO_MSG_BASE = 0x10 + CRYPTO_MSG_NEWALG = 0x10 + CRYPTO_MSG_DELALG = 0x11 + CRYPTO_MSG_UPDATEALG = 0x12 + CRYPTO_MSG_GETALG = 0x13 + CRYPTO_MSG_DELRNG = 0x14 + CRYPTO_MSG_GETSTAT = 0x15 +) + +const ( + CRYPTOCFGA_UNSPEC = 0x0 + CRYPTOCFGA_PRIORITY_VAL = 0x1 + CRYPTOCFGA_REPORT_LARVAL = 0x2 + CRYPTOCFGA_REPORT_HASH = 0x3 + CRYPTOCFGA_REPORT_BLKCIPHER = 0x4 + CRYPTOCFGA_REPORT_AEAD = 0x5 + CRYPTOCFGA_REPORT_COMPRESS = 0x6 + CRYPTOCFGA_REPORT_RNG = 0x7 + CRYPTOCFGA_REPORT_CIPHER = 0x8 + CRYPTOCFGA_REPORT_AKCIPHER = 0x9 + CRYPTOCFGA_REPORT_KPP = 0xa + CRYPTOCFGA_REPORT_ACOMP = 0xb + CRYPTOCFGA_STAT_LARVAL = 0xc + CRYPTOCFGA_STAT_HASH = 0xd + CRYPTOCFGA_STAT_BLKCIPHER = 0xe + CRYPTOCFGA_STAT_AEAD = 0xf + CRYPTOCFGA_STAT_COMPRESS = 0x10 + CRYPTOCFGA_STAT_RNG = 0x11 + CRYPTOCFGA_STAT_CIPHER = 0x12 + CRYPTOCFGA_STAT_AKCIPHER = 0x13 + CRYPTOCFGA_STAT_KPP = 0x14 + CRYPTOCFGA_STAT_ACOMP = 0x15 +) + +type CryptoUserAlg struct { + Name [64]uint8 + Driver_name [64]uint8 + Module_name [64]uint8 + Type uint32 + Mask uint32 + Refcnt uint32 + Flags uint32 +} + +type CryptoStatAEAD struct { + Type [64]uint8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatAKCipher struct { + Type [64]uint8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Verify_cnt uint64 + Sign_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatCipher struct { + Type [64]uint8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatCompress struct { + Type [64]uint8 + Compress_cnt uint64 + Compress_tlen uint64 + Decompress_cnt uint64 + Decompress_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatHash struct { + Type [64]uint8 + Hash_cnt uint64 + Hash_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatKPP struct { + Type [64]uint8 + Setsecret_cnt uint64 + Generate_public_key_cnt uint64 + Compute_shared_secret_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatRNG struct { + Type [64]uint8 + Generate_cnt uint64 + Generate_tlen uint64 + Seed_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatLarval struct { + Type [64]uint8 +} + +type CryptoReportLarval struct { + Type [64]uint8 +} + +type CryptoReportHash struct { + Type [64]uint8 + Blocksize uint32 + Digestsize uint32 +} + +type CryptoReportCipher struct { + Type [64]uint8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 +} + +type CryptoReportBlkCipher struct { + Type [64]uint8 + Geniv [64]uint8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 + Ivsize uint32 +} + +type CryptoReportAEAD struct { + Type [64]uint8 + Geniv [64]uint8 + Blocksize uint32 + Maxauthsize uint32 + Ivsize uint32 +} + +type CryptoReportComp struct { + Type [64]uint8 +} + +type CryptoReportRNG struct { + Type [64]uint8 + Seedsize uint32 +} + +type CryptoReportAKCipher struct { + Type [64]uint8 +} + +type CryptoReportKPP struct { + Type [64]uint8 +} + +type CryptoReportAcomp struct { + Type [64]uint8 +} + +const ( + BPF_REG_0 = 0x0 + BPF_REG_1 = 0x1 + BPF_REG_2 = 0x2 + BPF_REG_3 = 0x3 + BPF_REG_4 = 0x4 + BPF_REG_5 = 0x5 + BPF_REG_6 = 0x6 + BPF_REG_7 = 0x7 + BPF_REG_8 = 0x8 + BPF_REG_9 = 0x9 + BPF_REG_10 = 0xa + BPF_MAP_CREATE = 0x0 + BPF_MAP_LOOKUP_ELEM = 0x1 + BPF_MAP_UPDATE_ELEM = 0x2 + BPF_MAP_DELETE_ELEM = 0x3 + BPF_MAP_GET_NEXT_KEY = 0x4 + BPF_PROG_LOAD = 0x5 + BPF_OBJ_PIN = 0x6 + BPF_OBJ_GET = 0x7 + BPF_PROG_ATTACH = 0x8 + BPF_PROG_DETACH = 0x9 + BPF_PROG_TEST_RUN = 0xa + BPF_PROG_GET_NEXT_ID = 0xb + BPF_MAP_GET_NEXT_ID = 0xc + BPF_PROG_GET_FD_BY_ID = 0xd + BPF_MAP_GET_FD_BY_ID = 0xe + BPF_OBJ_GET_INFO_BY_FD = 0xf + BPF_PROG_QUERY = 0x10 + BPF_RAW_TRACEPOINT_OPEN = 0x11 + BPF_BTF_LOAD = 0x12 + BPF_BTF_GET_FD_BY_ID = 0x13 + BPF_TASK_FD_QUERY = 0x14 + BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 + BPF_MAP_TYPE_UNSPEC = 0x0 + BPF_MAP_TYPE_HASH = 0x1 + BPF_MAP_TYPE_ARRAY = 0x2 + BPF_MAP_TYPE_PROG_ARRAY = 0x3 + BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 + BPF_MAP_TYPE_PERCPU_HASH = 0x5 + BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 + BPF_MAP_TYPE_STACK_TRACE = 0x7 + BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 + BPF_MAP_TYPE_LRU_HASH = 0x9 + BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa + BPF_MAP_TYPE_LPM_TRIE = 0xb + BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc + BPF_MAP_TYPE_HASH_OF_MAPS = 0xd + BPF_MAP_TYPE_DEVMAP = 0xe + BPF_MAP_TYPE_SOCKMAP = 0xf + BPF_MAP_TYPE_CPUMAP = 0x10 + BPF_MAP_TYPE_XSKMAP = 0x11 + BPF_MAP_TYPE_SOCKHASH = 0x12 + BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 + BPF_MAP_TYPE_QUEUE = 0x16 + BPF_MAP_TYPE_STACK = 0x17 + BPF_PROG_TYPE_UNSPEC = 0x0 + BPF_PROG_TYPE_SOCKET_FILTER = 0x1 + BPF_PROG_TYPE_KPROBE = 0x2 + BPF_PROG_TYPE_SCHED_CLS = 0x3 + BPF_PROG_TYPE_SCHED_ACT = 0x4 + BPF_PROG_TYPE_TRACEPOINT = 0x5 + BPF_PROG_TYPE_XDP = 0x6 + BPF_PROG_TYPE_PERF_EVENT = 0x7 + BPF_PROG_TYPE_CGROUP_SKB = 0x8 + BPF_PROG_TYPE_CGROUP_SOCK = 0x9 + BPF_PROG_TYPE_LWT_IN = 0xa + BPF_PROG_TYPE_LWT_OUT = 0xb + BPF_PROG_TYPE_LWT_XMIT = 0xc + BPF_PROG_TYPE_SOCK_OPS = 0xd + BPF_PROG_TYPE_SK_SKB = 0xe + BPF_PROG_TYPE_CGROUP_DEVICE = 0xf + BPF_PROG_TYPE_SK_MSG = 0x10 + BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 + BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 + BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 + BPF_PROG_TYPE_LIRC_MODE2 = 0x14 + BPF_PROG_TYPE_SK_REUSEPORT = 0x15 + BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 + BPF_CGROUP_INET_INGRESS = 0x0 + BPF_CGROUP_INET_EGRESS = 0x1 + BPF_CGROUP_INET_SOCK_CREATE = 0x2 + BPF_CGROUP_SOCK_OPS = 0x3 + BPF_SK_SKB_STREAM_PARSER = 0x4 + BPF_SK_SKB_STREAM_VERDICT = 0x5 + BPF_CGROUP_DEVICE = 0x6 + BPF_SK_MSG_VERDICT = 0x7 + BPF_CGROUP_INET4_BIND = 0x8 + BPF_CGROUP_INET6_BIND = 0x9 + BPF_CGROUP_INET4_CONNECT = 0xa + BPF_CGROUP_INET6_CONNECT = 0xb + BPF_CGROUP_INET4_POST_BIND = 0xc + BPF_CGROUP_INET6_POST_BIND = 0xd + BPF_CGROUP_UDP4_SENDMSG = 0xe + BPF_CGROUP_UDP6_SENDMSG = 0xf + BPF_LIRC_MODE2 = 0x10 + BPF_FLOW_DISSECTOR = 0x11 + BPF_STACK_BUILD_ID_EMPTY = 0x0 + BPF_STACK_BUILD_ID_VALID = 0x1 + BPF_STACK_BUILD_ID_IP = 0x2 + BPF_ADJ_ROOM_NET = 0x0 + BPF_HDR_START_MAC = 0x0 + BPF_HDR_START_NET = 0x1 + BPF_LWT_ENCAP_SEG6 = 0x0 + BPF_LWT_ENCAP_SEG6_INLINE = 0x1 + BPF_OK = 0x0 + BPF_DROP = 0x2 + BPF_REDIRECT = 0x7 + BPF_SOCK_OPS_VOID = 0x0 + BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 + BPF_SOCK_OPS_RWND_INIT = 0x2 + BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 + BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 + BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 + BPF_SOCK_OPS_NEEDS_ECN = 0x6 + BPF_SOCK_OPS_BASE_RTT = 0x7 + BPF_SOCK_OPS_RTO_CB = 0x8 + BPF_SOCK_OPS_RETRANS_CB = 0x9 + BPF_SOCK_OPS_STATE_CB = 0xa + BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb + BPF_TCP_ESTABLISHED = 0x1 + BPF_TCP_SYN_SENT = 0x2 + BPF_TCP_SYN_RECV = 0x3 + BPF_TCP_FIN_WAIT1 = 0x4 + BPF_TCP_FIN_WAIT2 = 0x5 + BPF_TCP_TIME_WAIT = 0x6 + BPF_TCP_CLOSE = 0x7 + BPF_TCP_CLOSE_WAIT = 0x8 + BPF_TCP_LAST_ACK = 0x9 + BPF_TCP_LISTEN = 0xa + BPF_TCP_CLOSING = 0xb + BPF_TCP_NEW_SYN_RECV = 0xc + BPF_TCP_MAX_STATES = 0xd + BPF_FIB_LKUP_RET_SUCCESS = 0x0 + BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 + BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 + BPF_FIB_LKUP_RET_PROHIBIT = 0x3 + BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 + BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 + BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 + BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 + BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 + BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 + BPF_FD_TYPE_TRACEPOINT = 0x1 + BPF_FD_TYPE_KPROBE = 0x2 + BPF_FD_TYPE_KRETPROBE = 0x3 + BPF_FD_TYPE_UPROBE = 0x4 + BPF_FD_TYPE_URETPROBE = 0x5 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index 7f2e26f156d75..76c55e05390da 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -821,6 +821,8 @@ type Sigset_t struct { Val [16]uint64 } +const _C__NSIG = 0x41 + type SignalfdSiginfo struct { Signo uint32 Errno int32 @@ -1443,6 +1445,21 @@ type TpacketBlockDesc struct { Hdr [40]byte } +type TpacketBDTS struct { + Sec uint32 + Usec uint32 +} + +type TpacketHdrV1 struct { + Block_status uint32 + Num_pkts uint32 + Offset_to_first_pkt uint32 + Blk_len uint32 + Seq_num uint64 + Ts_first_pkt TpacketBDTS + Ts_last_pkt TpacketBDTS +} + type TpacketReq struct { Block_size uint32 Block_nr uint32 @@ -2125,3 +2142,320 @@ type FanotifyResponse struct { Fd int32 Response uint32 } + +const ( + CRYPTO_MSG_BASE = 0x10 + CRYPTO_MSG_NEWALG = 0x10 + CRYPTO_MSG_DELALG = 0x11 + CRYPTO_MSG_UPDATEALG = 0x12 + CRYPTO_MSG_GETALG = 0x13 + CRYPTO_MSG_DELRNG = 0x14 + CRYPTO_MSG_GETSTAT = 0x15 +) + +const ( + CRYPTOCFGA_UNSPEC = 0x0 + CRYPTOCFGA_PRIORITY_VAL = 0x1 + CRYPTOCFGA_REPORT_LARVAL = 0x2 + CRYPTOCFGA_REPORT_HASH = 0x3 + CRYPTOCFGA_REPORT_BLKCIPHER = 0x4 + CRYPTOCFGA_REPORT_AEAD = 0x5 + CRYPTOCFGA_REPORT_COMPRESS = 0x6 + CRYPTOCFGA_REPORT_RNG = 0x7 + CRYPTOCFGA_REPORT_CIPHER = 0x8 + CRYPTOCFGA_REPORT_AKCIPHER = 0x9 + CRYPTOCFGA_REPORT_KPP = 0xa + CRYPTOCFGA_REPORT_ACOMP = 0xb + CRYPTOCFGA_STAT_LARVAL = 0xc + CRYPTOCFGA_STAT_HASH = 0xd + CRYPTOCFGA_STAT_BLKCIPHER = 0xe + CRYPTOCFGA_STAT_AEAD = 0xf + CRYPTOCFGA_STAT_COMPRESS = 0x10 + CRYPTOCFGA_STAT_RNG = 0x11 + CRYPTOCFGA_STAT_CIPHER = 0x12 + CRYPTOCFGA_STAT_AKCIPHER = 0x13 + CRYPTOCFGA_STAT_KPP = 0x14 + CRYPTOCFGA_STAT_ACOMP = 0x15 +) + +type CryptoUserAlg struct { + Name [64]int8 + Driver_name [64]int8 + Module_name [64]int8 + Type uint32 + Mask uint32 + Refcnt uint32 + Flags uint32 +} + +type CryptoStatAEAD struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatAKCipher struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Verify_cnt uint64 + Sign_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatCipher struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatCompress struct { + Type [64]int8 + Compress_cnt uint64 + Compress_tlen uint64 + Decompress_cnt uint64 + Decompress_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatHash struct { + Type [64]int8 + Hash_cnt uint64 + Hash_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatKPP struct { + Type [64]int8 + Setsecret_cnt uint64 + Generate_public_key_cnt uint64 + Compute_shared_secret_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatRNG struct { + Type [64]int8 + Generate_cnt uint64 + Generate_tlen uint64 + Seed_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatLarval struct { + Type [64]int8 +} + +type CryptoReportLarval struct { + Type [64]int8 +} + +type CryptoReportHash struct { + Type [64]int8 + Blocksize uint32 + Digestsize uint32 +} + +type CryptoReportCipher struct { + Type [64]int8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 +} + +type CryptoReportBlkCipher struct { + Type [64]int8 + Geniv [64]int8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 + Ivsize uint32 +} + +type CryptoReportAEAD struct { + Type [64]int8 + Geniv [64]int8 + Blocksize uint32 + Maxauthsize uint32 + Ivsize uint32 +} + +type CryptoReportComp struct { + Type [64]int8 +} + +type CryptoReportRNG struct { + Type [64]int8 + Seedsize uint32 +} + +type CryptoReportAKCipher struct { + Type [64]int8 +} + +type CryptoReportKPP struct { + Type [64]int8 +} + +type CryptoReportAcomp struct { + Type [64]int8 +} + +const ( + BPF_REG_0 = 0x0 + BPF_REG_1 = 0x1 + BPF_REG_2 = 0x2 + BPF_REG_3 = 0x3 + BPF_REG_4 = 0x4 + BPF_REG_5 = 0x5 + BPF_REG_6 = 0x6 + BPF_REG_7 = 0x7 + BPF_REG_8 = 0x8 + BPF_REG_9 = 0x9 + BPF_REG_10 = 0xa + BPF_MAP_CREATE = 0x0 + BPF_MAP_LOOKUP_ELEM = 0x1 + BPF_MAP_UPDATE_ELEM = 0x2 + BPF_MAP_DELETE_ELEM = 0x3 + BPF_MAP_GET_NEXT_KEY = 0x4 + BPF_PROG_LOAD = 0x5 + BPF_OBJ_PIN = 0x6 + BPF_OBJ_GET = 0x7 + BPF_PROG_ATTACH = 0x8 + BPF_PROG_DETACH = 0x9 + BPF_PROG_TEST_RUN = 0xa + BPF_PROG_GET_NEXT_ID = 0xb + BPF_MAP_GET_NEXT_ID = 0xc + BPF_PROG_GET_FD_BY_ID = 0xd + BPF_MAP_GET_FD_BY_ID = 0xe + BPF_OBJ_GET_INFO_BY_FD = 0xf + BPF_PROG_QUERY = 0x10 + BPF_RAW_TRACEPOINT_OPEN = 0x11 + BPF_BTF_LOAD = 0x12 + BPF_BTF_GET_FD_BY_ID = 0x13 + BPF_TASK_FD_QUERY = 0x14 + BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 + BPF_MAP_TYPE_UNSPEC = 0x0 + BPF_MAP_TYPE_HASH = 0x1 + BPF_MAP_TYPE_ARRAY = 0x2 + BPF_MAP_TYPE_PROG_ARRAY = 0x3 + BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 + BPF_MAP_TYPE_PERCPU_HASH = 0x5 + BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 + BPF_MAP_TYPE_STACK_TRACE = 0x7 + BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 + BPF_MAP_TYPE_LRU_HASH = 0x9 + BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa + BPF_MAP_TYPE_LPM_TRIE = 0xb + BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc + BPF_MAP_TYPE_HASH_OF_MAPS = 0xd + BPF_MAP_TYPE_DEVMAP = 0xe + BPF_MAP_TYPE_SOCKMAP = 0xf + BPF_MAP_TYPE_CPUMAP = 0x10 + BPF_MAP_TYPE_XSKMAP = 0x11 + BPF_MAP_TYPE_SOCKHASH = 0x12 + BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 + BPF_MAP_TYPE_QUEUE = 0x16 + BPF_MAP_TYPE_STACK = 0x17 + BPF_PROG_TYPE_UNSPEC = 0x0 + BPF_PROG_TYPE_SOCKET_FILTER = 0x1 + BPF_PROG_TYPE_KPROBE = 0x2 + BPF_PROG_TYPE_SCHED_CLS = 0x3 + BPF_PROG_TYPE_SCHED_ACT = 0x4 + BPF_PROG_TYPE_TRACEPOINT = 0x5 + BPF_PROG_TYPE_XDP = 0x6 + BPF_PROG_TYPE_PERF_EVENT = 0x7 + BPF_PROG_TYPE_CGROUP_SKB = 0x8 + BPF_PROG_TYPE_CGROUP_SOCK = 0x9 + BPF_PROG_TYPE_LWT_IN = 0xa + BPF_PROG_TYPE_LWT_OUT = 0xb + BPF_PROG_TYPE_LWT_XMIT = 0xc + BPF_PROG_TYPE_SOCK_OPS = 0xd + BPF_PROG_TYPE_SK_SKB = 0xe + BPF_PROG_TYPE_CGROUP_DEVICE = 0xf + BPF_PROG_TYPE_SK_MSG = 0x10 + BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 + BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 + BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 + BPF_PROG_TYPE_LIRC_MODE2 = 0x14 + BPF_PROG_TYPE_SK_REUSEPORT = 0x15 + BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 + BPF_CGROUP_INET_INGRESS = 0x0 + BPF_CGROUP_INET_EGRESS = 0x1 + BPF_CGROUP_INET_SOCK_CREATE = 0x2 + BPF_CGROUP_SOCK_OPS = 0x3 + BPF_SK_SKB_STREAM_PARSER = 0x4 + BPF_SK_SKB_STREAM_VERDICT = 0x5 + BPF_CGROUP_DEVICE = 0x6 + BPF_SK_MSG_VERDICT = 0x7 + BPF_CGROUP_INET4_BIND = 0x8 + BPF_CGROUP_INET6_BIND = 0x9 + BPF_CGROUP_INET4_CONNECT = 0xa + BPF_CGROUP_INET6_CONNECT = 0xb + BPF_CGROUP_INET4_POST_BIND = 0xc + BPF_CGROUP_INET6_POST_BIND = 0xd + BPF_CGROUP_UDP4_SENDMSG = 0xe + BPF_CGROUP_UDP6_SENDMSG = 0xf + BPF_LIRC_MODE2 = 0x10 + BPF_FLOW_DISSECTOR = 0x11 + BPF_STACK_BUILD_ID_EMPTY = 0x0 + BPF_STACK_BUILD_ID_VALID = 0x1 + BPF_STACK_BUILD_ID_IP = 0x2 + BPF_ADJ_ROOM_NET = 0x0 + BPF_HDR_START_MAC = 0x0 + BPF_HDR_START_NET = 0x1 + BPF_LWT_ENCAP_SEG6 = 0x0 + BPF_LWT_ENCAP_SEG6_INLINE = 0x1 + BPF_OK = 0x0 + BPF_DROP = 0x2 + BPF_REDIRECT = 0x7 + BPF_SOCK_OPS_VOID = 0x0 + BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 + BPF_SOCK_OPS_RWND_INIT = 0x2 + BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 + BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 + BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 + BPF_SOCK_OPS_NEEDS_ECN = 0x6 + BPF_SOCK_OPS_BASE_RTT = 0x7 + BPF_SOCK_OPS_RTO_CB = 0x8 + BPF_SOCK_OPS_RETRANS_CB = 0x9 + BPF_SOCK_OPS_STATE_CB = 0xa + BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb + BPF_TCP_ESTABLISHED = 0x1 + BPF_TCP_SYN_SENT = 0x2 + BPF_TCP_SYN_RECV = 0x3 + BPF_TCP_FIN_WAIT1 = 0x4 + BPF_TCP_FIN_WAIT2 = 0x5 + BPF_TCP_TIME_WAIT = 0x6 + BPF_TCP_CLOSE = 0x7 + BPF_TCP_CLOSE_WAIT = 0x8 + BPF_TCP_LAST_ACK = 0x9 + BPF_TCP_LISTEN = 0xa + BPF_TCP_CLOSING = 0xb + BPF_TCP_NEW_SYN_RECV = 0xc + BPF_TCP_MAX_STATES = 0xd + BPF_FIB_LKUP_RET_SUCCESS = 0x0 + BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 + BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 + BPF_FIB_LKUP_RET_PROHIBIT = 0x3 + BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 + BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 + BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 + BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 + BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 + BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 + BPF_FD_TYPE_TRACEPOINT = 0x1 + BPF_FD_TYPE_KPROBE = 0x2 + BPF_FD_TYPE_KRETPROBE = 0x3 + BPF_FD_TYPE_UPROBE = 0x4 + BPF_FD_TYPE_URETPROBE = 0x5 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 66e408fa0c967..4302d574ffcf0 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -823,6 +823,8 @@ type Sigset_t struct { Val [32]uint32 } +const _C__NSIG = 0x80 + type SignalfdSiginfo struct { Signo uint32 Errno int32 @@ -1448,6 +1450,21 @@ type TpacketBlockDesc struct { Hdr [40]byte } +type TpacketBDTS struct { + Sec uint32 + Usec uint32 +} + +type TpacketHdrV1 struct { + Block_status uint32 + Num_pkts uint32 + Offset_to_first_pkt uint32 + Blk_len uint32 + Seq_num uint64 + Ts_first_pkt TpacketBDTS + Ts_last_pkt TpacketBDTS +} + type TpacketReq struct { Block_size uint32 Block_nr uint32 @@ -2130,3 +2147,320 @@ type FanotifyResponse struct { Fd int32 Response uint32 } + +const ( + CRYPTO_MSG_BASE = 0x10 + CRYPTO_MSG_NEWALG = 0x10 + CRYPTO_MSG_DELALG = 0x11 + CRYPTO_MSG_UPDATEALG = 0x12 + CRYPTO_MSG_GETALG = 0x13 + CRYPTO_MSG_DELRNG = 0x14 + CRYPTO_MSG_GETSTAT = 0x15 +) + +const ( + CRYPTOCFGA_UNSPEC = 0x0 + CRYPTOCFGA_PRIORITY_VAL = 0x1 + CRYPTOCFGA_REPORT_LARVAL = 0x2 + CRYPTOCFGA_REPORT_HASH = 0x3 + CRYPTOCFGA_REPORT_BLKCIPHER = 0x4 + CRYPTOCFGA_REPORT_AEAD = 0x5 + CRYPTOCFGA_REPORT_COMPRESS = 0x6 + CRYPTOCFGA_REPORT_RNG = 0x7 + CRYPTOCFGA_REPORT_CIPHER = 0x8 + CRYPTOCFGA_REPORT_AKCIPHER = 0x9 + CRYPTOCFGA_REPORT_KPP = 0xa + CRYPTOCFGA_REPORT_ACOMP = 0xb + CRYPTOCFGA_STAT_LARVAL = 0xc + CRYPTOCFGA_STAT_HASH = 0xd + CRYPTOCFGA_STAT_BLKCIPHER = 0xe + CRYPTOCFGA_STAT_AEAD = 0xf + CRYPTOCFGA_STAT_COMPRESS = 0x10 + CRYPTOCFGA_STAT_RNG = 0x11 + CRYPTOCFGA_STAT_CIPHER = 0x12 + CRYPTOCFGA_STAT_AKCIPHER = 0x13 + CRYPTOCFGA_STAT_KPP = 0x14 + CRYPTOCFGA_STAT_ACOMP = 0x15 +) + +type CryptoUserAlg struct { + Name [64]int8 + Driver_name [64]int8 + Module_name [64]int8 + Type uint32 + Mask uint32 + Refcnt uint32 + Flags uint32 +} + +type CryptoStatAEAD struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatAKCipher struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Verify_cnt uint64 + Sign_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatCipher struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatCompress struct { + Type [64]int8 + Compress_cnt uint64 + Compress_tlen uint64 + Decompress_cnt uint64 + Decompress_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatHash struct { + Type [64]int8 + Hash_cnt uint64 + Hash_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatKPP struct { + Type [64]int8 + Setsecret_cnt uint64 + Generate_public_key_cnt uint64 + Compute_shared_secret_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatRNG struct { + Type [64]int8 + Generate_cnt uint64 + Generate_tlen uint64 + Seed_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatLarval struct { + Type [64]int8 +} + +type CryptoReportLarval struct { + Type [64]int8 +} + +type CryptoReportHash struct { + Type [64]int8 + Blocksize uint32 + Digestsize uint32 +} + +type CryptoReportCipher struct { + Type [64]int8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 +} + +type CryptoReportBlkCipher struct { + Type [64]int8 + Geniv [64]int8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 + Ivsize uint32 +} + +type CryptoReportAEAD struct { + Type [64]int8 + Geniv [64]int8 + Blocksize uint32 + Maxauthsize uint32 + Ivsize uint32 +} + +type CryptoReportComp struct { + Type [64]int8 +} + +type CryptoReportRNG struct { + Type [64]int8 + Seedsize uint32 +} + +type CryptoReportAKCipher struct { + Type [64]int8 +} + +type CryptoReportKPP struct { + Type [64]int8 +} + +type CryptoReportAcomp struct { + Type [64]int8 +} + +const ( + BPF_REG_0 = 0x0 + BPF_REG_1 = 0x1 + BPF_REG_2 = 0x2 + BPF_REG_3 = 0x3 + BPF_REG_4 = 0x4 + BPF_REG_5 = 0x5 + BPF_REG_6 = 0x6 + BPF_REG_7 = 0x7 + BPF_REG_8 = 0x8 + BPF_REG_9 = 0x9 + BPF_REG_10 = 0xa + BPF_MAP_CREATE = 0x0 + BPF_MAP_LOOKUP_ELEM = 0x1 + BPF_MAP_UPDATE_ELEM = 0x2 + BPF_MAP_DELETE_ELEM = 0x3 + BPF_MAP_GET_NEXT_KEY = 0x4 + BPF_PROG_LOAD = 0x5 + BPF_OBJ_PIN = 0x6 + BPF_OBJ_GET = 0x7 + BPF_PROG_ATTACH = 0x8 + BPF_PROG_DETACH = 0x9 + BPF_PROG_TEST_RUN = 0xa + BPF_PROG_GET_NEXT_ID = 0xb + BPF_MAP_GET_NEXT_ID = 0xc + BPF_PROG_GET_FD_BY_ID = 0xd + BPF_MAP_GET_FD_BY_ID = 0xe + BPF_OBJ_GET_INFO_BY_FD = 0xf + BPF_PROG_QUERY = 0x10 + BPF_RAW_TRACEPOINT_OPEN = 0x11 + BPF_BTF_LOAD = 0x12 + BPF_BTF_GET_FD_BY_ID = 0x13 + BPF_TASK_FD_QUERY = 0x14 + BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 + BPF_MAP_TYPE_UNSPEC = 0x0 + BPF_MAP_TYPE_HASH = 0x1 + BPF_MAP_TYPE_ARRAY = 0x2 + BPF_MAP_TYPE_PROG_ARRAY = 0x3 + BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 + BPF_MAP_TYPE_PERCPU_HASH = 0x5 + BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 + BPF_MAP_TYPE_STACK_TRACE = 0x7 + BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 + BPF_MAP_TYPE_LRU_HASH = 0x9 + BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa + BPF_MAP_TYPE_LPM_TRIE = 0xb + BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc + BPF_MAP_TYPE_HASH_OF_MAPS = 0xd + BPF_MAP_TYPE_DEVMAP = 0xe + BPF_MAP_TYPE_SOCKMAP = 0xf + BPF_MAP_TYPE_CPUMAP = 0x10 + BPF_MAP_TYPE_XSKMAP = 0x11 + BPF_MAP_TYPE_SOCKHASH = 0x12 + BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 + BPF_MAP_TYPE_QUEUE = 0x16 + BPF_MAP_TYPE_STACK = 0x17 + BPF_PROG_TYPE_UNSPEC = 0x0 + BPF_PROG_TYPE_SOCKET_FILTER = 0x1 + BPF_PROG_TYPE_KPROBE = 0x2 + BPF_PROG_TYPE_SCHED_CLS = 0x3 + BPF_PROG_TYPE_SCHED_ACT = 0x4 + BPF_PROG_TYPE_TRACEPOINT = 0x5 + BPF_PROG_TYPE_XDP = 0x6 + BPF_PROG_TYPE_PERF_EVENT = 0x7 + BPF_PROG_TYPE_CGROUP_SKB = 0x8 + BPF_PROG_TYPE_CGROUP_SOCK = 0x9 + BPF_PROG_TYPE_LWT_IN = 0xa + BPF_PROG_TYPE_LWT_OUT = 0xb + BPF_PROG_TYPE_LWT_XMIT = 0xc + BPF_PROG_TYPE_SOCK_OPS = 0xd + BPF_PROG_TYPE_SK_SKB = 0xe + BPF_PROG_TYPE_CGROUP_DEVICE = 0xf + BPF_PROG_TYPE_SK_MSG = 0x10 + BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 + BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 + BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 + BPF_PROG_TYPE_LIRC_MODE2 = 0x14 + BPF_PROG_TYPE_SK_REUSEPORT = 0x15 + BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 + BPF_CGROUP_INET_INGRESS = 0x0 + BPF_CGROUP_INET_EGRESS = 0x1 + BPF_CGROUP_INET_SOCK_CREATE = 0x2 + BPF_CGROUP_SOCK_OPS = 0x3 + BPF_SK_SKB_STREAM_PARSER = 0x4 + BPF_SK_SKB_STREAM_VERDICT = 0x5 + BPF_CGROUP_DEVICE = 0x6 + BPF_SK_MSG_VERDICT = 0x7 + BPF_CGROUP_INET4_BIND = 0x8 + BPF_CGROUP_INET6_BIND = 0x9 + BPF_CGROUP_INET4_CONNECT = 0xa + BPF_CGROUP_INET6_CONNECT = 0xb + BPF_CGROUP_INET4_POST_BIND = 0xc + BPF_CGROUP_INET6_POST_BIND = 0xd + BPF_CGROUP_UDP4_SENDMSG = 0xe + BPF_CGROUP_UDP6_SENDMSG = 0xf + BPF_LIRC_MODE2 = 0x10 + BPF_FLOW_DISSECTOR = 0x11 + BPF_STACK_BUILD_ID_EMPTY = 0x0 + BPF_STACK_BUILD_ID_VALID = 0x1 + BPF_STACK_BUILD_ID_IP = 0x2 + BPF_ADJ_ROOM_NET = 0x0 + BPF_HDR_START_MAC = 0x0 + BPF_HDR_START_NET = 0x1 + BPF_LWT_ENCAP_SEG6 = 0x0 + BPF_LWT_ENCAP_SEG6_INLINE = 0x1 + BPF_OK = 0x0 + BPF_DROP = 0x2 + BPF_REDIRECT = 0x7 + BPF_SOCK_OPS_VOID = 0x0 + BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 + BPF_SOCK_OPS_RWND_INIT = 0x2 + BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 + BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 + BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 + BPF_SOCK_OPS_NEEDS_ECN = 0x6 + BPF_SOCK_OPS_BASE_RTT = 0x7 + BPF_SOCK_OPS_RTO_CB = 0x8 + BPF_SOCK_OPS_RETRANS_CB = 0x9 + BPF_SOCK_OPS_STATE_CB = 0xa + BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb + BPF_TCP_ESTABLISHED = 0x1 + BPF_TCP_SYN_SENT = 0x2 + BPF_TCP_SYN_RECV = 0x3 + BPF_TCP_FIN_WAIT1 = 0x4 + BPF_TCP_FIN_WAIT2 = 0x5 + BPF_TCP_TIME_WAIT = 0x6 + BPF_TCP_CLOSE = 0x7 + BPF_TCP_CLOSE_WAIT = 0x8 + BPF_TCP_LAST_ACK = 0x9 + BPF_TCP_LISTEN = 0xa + BPF_TCP_CLOSING = 0xb + BPF_TCP_NEW_SYN_RECV = 0xc + BPF_TCP_MAX_STATES = 0xd + BPF_FIB_LKUP_RET_SUCCESS = 0x0 + BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 + BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 + BPF_FIB_LKUP_RET_PROHIBIT = 0x3 + BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 + BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 + BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 + BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 + BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 + BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 + BPF_FD_TYPE_TRACEPOINT = 0x1 + BPF_FD_TYPE_KPROBE = 0x2 + BPF_FD_TYPE_KRETPROBE = 0x3 + BPF_FD_TYPE_UPROBE = 0x4 + BPF_FD_TYPE_URETPROBE = 0x5 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index e60575a35aee4..7ea742be6a897 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -823,6 +823,8 @@ type Sigset_t struct { Val [16]uint64 } +const _C__NSIG = 0x80 + type SignalfdSiginfo struct { Signo uint32 Errno int32 @@ -1445,6 +1447,21 @@ type TpacketBlockDesc struct { Hdr [40]byte } +type TpacketBDTS struct { + Sec uint32 + Usec uint32 +} + +type TpacketHdrV1 struct { + Block_status uint32 + Num_pkts uint32 + Offset_to_first_pkt uint32 + Blk_len uint32 + Seq_num uint64 + Ts_first_pkt TpacketBDTS + Ts_last_pkt TpacketBDTS +} + type TpacketReq struct { Block_size uint32 Block_nr uint32 @@ -2127,3 +2144,320 @@ type FanotifyResponse struct { Fd int32 Response uint32 } + +const ( + CRYPTO_MSG_BASE = 0x10 + CRYPTO_MSG_NEWALG = 0x10 + CRYPTO_MSG_DELALG = 0x11 + CRYPTO_MSG_UPDATEALG = 0x12 + CRYPTO_MSG_GETALG = 0x13 + CRYPTO_MSG_DELRNG = 0x14 + CRYPTO_MSG_GETSTAT = 0x15 +) + +const ( + CRYPTOCFGA_UNSPEC = 0x0 + CRYPTOCFGA_PRIORITY_VAL = 0x1 + CRYPTOCFGA_REPORT_LARVAL = 0x2 + CRYPTOCFGA_REPORT_HASH = 0x3 + CRYPTOCFGA_REPORT_BLKCIPHER = 0x4 + CRYPTOCFGA_REPORT_AEAD = 0x5 + CRYPTOCFGA_REPORT_COMPRESS = 0x6 + CRYPTOCFGA_REPORT_RNG = 0x7 + CRYPTOCFGA_REPORT_CIPHER = 0x8 + CRYPTOCFGA_REPORT_AKCIPHER = 0x9 + CRYPTOCFGA_REPORT_KPP = 0xa + CRYPTOCFGA_REPORT_ACOMP = 0xb + CRYPTOCFGA_STAT_LARVAL = 0xc + CRYPTOCFGA_STAT_HASH = 0xd + CRYPTOCFGA_STAT_BLKCIPHER = 0xe + CRYPTOCFGA_STAT_AEAD = 0xf + CRYPTOCFGA_STAT_COMPRESS = 0x10 + CRYPTOCFGA_STAT_RNG = 0x11 + CRYPTOCFGA_STAT_CIPHER = 0x12 + CRYPTOCFGA_STAT_AKCIPHER = 0x13 + CRYPTOCFGA_STAT_KPP = 0x14 + CRYPTOCFGA_STAT_ACOMP = 0x15 +) + +type CryptoUserAlg struct { + Name [64]int8 + Driver_name [64]int8 + Module_name [64]int8 + Type uint32 + Mask uint32 + Refcnt uint32 + Flags uint32 +} + +type CryptoStatAEAD struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatAKCipher struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Verify_cnt uint64 + Sign_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatCipher struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatCompress struct { + Type [64]int8 + Compress_cnt uint64 + Compress_tlen uint64 + Decompress_cnt uint64 + Decompress_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatHash struct { + Type [64]int8 + Hash_cnt uint64 + Hash_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatKPP struct { + Type [64]int8 + Setsecret_cnt uint64 + Generate_public_key_cnt uint64 + Compute_shared_secret_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatRNG struct { + Type [64]int8 + Generate_cnt uint64 + Generate_tlen uint64 + Seed_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatLarval struct { + Type [64]int8 +} + +type CryptoReportLarval struct { + Type [64]int8 +} + +type CryptoReportHash struct { + Type [64]int8 + Blocksize uint32 + Digestsize uint32 +} + +type CryptoReportCipher struct { + Type [64]int8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 +} + +type CryptoReportBlkCipher struct { + Type [64]int8 + Geniv [64]int8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 + Ivsize uint32 +} + +type CryptoReportAEAD struct { + Type [64]int8 + Geniv [64]int8 + Blocksize uint32 + Maxauthsize uint32 + Ivsize uint32 +} + +type CryptoReportComp struct { + Type [64]int8 +} + +type CryptoReportRNG struct { + Type [64]int8 + Seedsize uint32 +} + +type CryptoReportAKCipher struct { + Type [64]int8 +} + +type CryptoReportKPP struct { + Type [64]int8 +} + +type CryptoReportAcomp struct { + Type [64]int8 +} + +const ( + BPF_REG_0 = 0x0 + BPF_REG_1 = 0x1 + BPF_REG_2 = 0x2 + BPF_REG_3 = 0x3 + BPF_REG_4 = 0x4 + BPF_REG_5 = 0x5 + BPF_REG_6 = 0x6 + BPF_REG_7 = 0x7 + BPF_REG_8 = 0x8 + BPF_REG_9 = 0x9 + BPF_REG_10 = 0xa + BPF_MAP_CREATE = 0x0 + BPF_MAP_LOOKUP_ELEM = 0x1 + BPF_MAP_UPDATE_ELEM = 0x2 + BPF_MAP_DELETE_ELEM = 0x3 + BPF_MAP_GET_NEXT_KEY = 0x4 + BPF_PROG_LOAD = 0x5 + BPF_OBJ_PIN = 0x6 + BPF_OBJ_GET = 0x7 + BPF_PROG_ATTACH = 0x8 + BPF_PROG_DETACH = 0x9 + BPF_PROG_TEST_RUN = 0xa + BPF_PROG_GET_NEXT_ID = 0xb + BPF_MAP_GET_NEXT_ID = 0xc + BPF_PROG_GET_FD_BY_ID = 0xd + BPF_MAP_GET_FD_BY_ID = 0xe + BPF_OBJ_GET_INFO_BY_FD = 0xf + BPF_PROG_QUERY = 0x10 + BPF_RAW_TRACEPOINT_OPEN = 0x11 + BPF_BTF_LOAD = 0x12 + BPF_BTF_GET_FD_BY_ID = 0x13 + BPF_TASK_FD_QUERY = 0x14 + BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 + BPF_MAP_TYPE_UNSPEC = 0x0 + BPF_MAP_TYPE_HASH = 0x1 + BPF_MAP_TYPE_ARRAY = 0x2 + BPF_MAP_TYPE_PROG_ARRAY = 0x3 + BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 + BPF_MAP_TYPE_PERCPU_HASH = 0x5 + BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 + BPF_MAP_TYPE_STACK_TRACE = 0x7 + BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 + BPF_MAP_TYPE_LRU_HASH = 0x9 + BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa + BPF_MAP_TYPE_LPM_TRIE = 0xb + BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc + BPF_MAP_TYPE_HASH_OF_MAPS = 0xd + BPF_MAP_TYPE_DEVMAP = 0xe + BPF_MAP_TYPE_SOCKMAP = 0xf + BPF_MAP_TYPE_CPUMAP = 0x10 + BPF_MAP_TYPE_XSKMAP = 0x11 + BPF_MAP_TYPE_SOCKHASH = 0x12 + BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 + BPF_MAP_TYPE_QUEUE = 0x16 + BPF_MAP_TYPE_STACK = 0x17 + BPF_PROG_TYPE_UNSPEC = 0x0 + BPF_PROG_TYPE_SOCKET_FILTER = 0x1 + BPF_PROG_TYPE_KPROBE = 0x2 + BPF_PROG_TYPE_SCHED_CLS = 0x3 + BPF_PROG_TYPE_SCHED_ACT = 0x4 + BPF_PROG_TYPE_TRACEPOINT = 0x5 + BPF_PROG_TYPE_XDP = 0x6 + BPF_PROG_TYPE_PERF_EVENT = 0x7 + BPF_PROG_TYPE_CGROUP_SKB = 0x8 + BPF_PROG_TYPE_CGROUP_SOCK = 0x9 + BPF_PROG_TYPE_LWT_IN = 0xa + BPF_PROG_TYPE_LWT_OUT = 0xb + BPF_PROG_TYPE_LWT_XMIT = 0xc + BPF_PROG_TYPE_SOCK_OPS = 0xd + BPF_PROG_TYPE_SK_SKB = 0xe + BPF_PROG_TYPE_CGROUP_DEVICE = 0xf + BPF_PROG_TYPE_SK_MSG = 0x10 + BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 + BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 + BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 + BPF_PROG_TYPE_LIRC_MODE2 = 0x14 + BPF_PROG_TYPE_SK_REUSEPORT = 0x15 + BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 + BPF_CGROUP_INET_INGRESS = 0x0 + BPF_CGROUP_INET_EGRESS = 0x1 + BPF_CGROUP_INET_SOCK_CREATE = 0x2 + BPF_CGROUP_SOCK_OPS = 0x3 + BPF_SK_SKB_STREAM_PARSER = 0x4 + BPF_SK_SKB_STREAM_VERDICT = 0x5 + BPF_CGROUP_DEVICE = 0x6 + BPF_SK_MSG_VERDICT = 0x7 + BPF_CGROUP_INET4_BIND = 0x8 + BPF_CGROUP_INET6_BIND = 0x9 + BPF_CGROUP_INET4_CONNECT = 0xa + BPF_CGROUP_INET6_CONNECT = 0xb + BPF_CGROUP_INET4_POST_BIND = 0xc + BPF_CGROUP_INET6_POST_BIND = 0xd + BPF_CGROUP_UDP4_SENDMSG = 0xe + BPF_CGROUP_UDP6_SENDMSG = 0xf + BPF_LIRC_MODE2 = 0x10 + BPF_FLOW_DISSECTOR = 0x11 + BPF_STACK_BUILD_ID_EMPTY = 0x0 + BPF_STACK_BUILD_ID_VALID = 0x1 + BPF_STACK_BUILD_ID_IP = 0x2 + BPF_ADJ_ROOM_NET = 0x0 + BPF_HDR_START_MAC = 0x0 + BPF_HDR_START_NET = 0x1 + BPF_LWT_ENCAP_SEG6 = 0x0 + BPF_LWT_ENCAP_SEG6_INLINE = 0x1 + BPF_OK = 0x0 + BPF_DROP = 0x2 + BPF_REDIRECT = 0x7 + BPF_SOCK_OPS_VOID = 0x0 + BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 + BPF_SOCK_OPS_RWND_INIT = 0x2 + BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 + BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 + BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 + BPF_SOCK_OPS_NEEDS_ECN = 0x6 + BPF_SOCK_OPS_BASE_RTT = 0x7 + BPF_SOCK_OPS_RTO_CB = 0x8 + BPF_SOCK_OPS_RETRANS_CB = 0x9 + BPF_SOCK_OPS_STATE_CB = 0xa + BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb + BPF_TCP_ESTABLISHED = 0x1 + BPF_TCP_SYN_SENT = 0x2 + BPF_TCP_SYN_RECV = 0x3 + BPF_TCP_FIN_WAIT1 = 0x4 + BPF_TCP_FIN_WAIT2 = 0x5 + BPF_TCP_TIME_WAIT = 0x6 + BPF_TCP_CLOSE = 0x7 + BPF_TCP_CLOSE_WAIT = 0x8 + BPF_TCP_LAST_ACK = 0x9 + BPF_TCP_LISTEN = 0xa + BPF_TCP_CLOSING = 0xb + BPF_TCP_NEW_SYN_RECV = 0xc + BPF_TCP_MAX_STATES = 0xd + BPF_FIB_LKUP_RET_SUCCESS = 0x0 + BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 + BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 + BPF_FIB_LKUP_RET_PROHIBIT = 0x3 + BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 + BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 + BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 + BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 + BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 + BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 + BPF_FD_TYPE_TRACEPOINT = 0x1 + BPF_FD_TYPE_KPROBE = 0x2 + BPF_FD_TYPE_KRETPROBE = 0x3 + BPF_FD_TYPE_UPROBE = 0x4 + BPF_FD_TYPE_URETPROBE = 0x5 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index af5836a41a374..8f2b8ad4eeafa 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -823,6 +823,8 @@ type Sigset_t struct { Val [16]uint64 } +const _C__NSIG = 0x80 + type SignalfdSiginfo struct { Signo uint32 Errno int32 @@ -1445,6 +1447,21 @@ type TpacketBlockDesc struct { Hdr [40]byte } +type TpacketBDTS struct { + Sec uint32 + Usec uint32 +} + +type TpacketHdrV1 struct { + Block_status uint32 + Num_pkts uint32 + Offset_to_first_pkt uint32 + Blk_len uint32 + Seq_num uint64 + Ts_first_pkt TpacketBDTS + Ts_last_pkt TpacketBDTS +} + type TpacketReq struct { Block_size uint32 Block_nr uint32 @@ -2127,3 +2144,320 @@ type FanotifyResponse struct { Fd int32 Response uint32 } + +const ( + CRYPTO_MSG_BASE = 0x10 + CRYPTO_MSG_NEWALG = 0x10 + CRYPTO_MSG_DELALG = 0x11 + CRYPTO_MSG_UPDATEALG = 0x12 + CRYPTO_MSG_GETALG = 0x13 + CRYPTO_MSG_DELRNG = 0x14 + CRYPTO_MSG_GETSTAT = 0x15 +) + +const ( + CRYPTOCFGA_UNSPEC = 0x0 + CRYPTOCFGA_PRIORITY_VAL = 0x1 + CRYPTOCFGA_REPORT_LARVAL = 0x2 + CRYPTOCFGA_REPORT_HASH = 0x3 + CRYPTOCFGA_REPORT_BLKCIPHER = 0x4 + CRYPTOCFGA_REPORT_AEAD = 0x5 + CRYPTOCFGA_REPORT_COMPRESS = 0x6 + CRYPTOCFGA_REPORT_RNG = 0x7 + CRYPTOCFGA_REPORT_CIPHER = 0x8 + CRYPTOCFGA_REPORT_AKCIPHER = 0x9 + CRYPTOCFGA_REPORT_KPP = 0xa + CRYPTOCFGA_REPORT_ACOMP = 0xb + CRYPTOCFGA_STAT_LARVAL = 0xc + CRYPTOCFGA_STAT_HASH = 0xd + CRYPTOCFGA_STAT_BLKCIPHER = 0xe + CRYPTOCFGA_STAT_AEAD = 0xf + CRYPTOCFGA_STAT_COMPRESS = 0x10 + CRYPTOCFGA_STAT_RNG = 0x11 + CRYPTOCFGA_STAT_CIPHER = 0x12 + CRYPTOCFGA_STAT_AKCIPHER = 0x13 + CRYPTOCFGA_STAT_KPP = 0x14 + CRYPTOCFGA_STAT_ACOMP = 0x15 +) + +type CryptoUserAlg struct { + Name [64]int8 + Driver_name [64]int8 + Module_name [64]int8 + Type uint32 + Mask uint32 + Refcnt uint32 + Flags uint32 +} + +type CryptoStatAEAD struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatAKCipher struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Verify_cnt uint64 + Sign_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatCipher struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatCompress struct { + Type [64]int8 + Compress_cnt uint64 + Compress_tlen uint64 + Decompress_cnt uint64 + Decompress_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatHash struct { + Type [64]int8 + Hash_cnt uint64 + Hash_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatKPP struct { + Type [64]int8 + Setsecret_cnt uint64 + Generate_public_key_cnt uint64 + Compute_shared_secret_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatRNG struct { + Type [64]int8 + Generate_cnt uint64 + Generate_tlen uint64 + Seed_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatLarval struct { + Type [64]int8 +} + +type CryptoReportLarval struct { + Type [64]int8 +} + +type CryptoReportHash struct { + Type [64]int8 + Blocksize uint32 + Digestsize uint32 +} + +type CryptoReportCipher struct { + Type [64]int8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 +} + +type CryptoReportBlkCipher struct { + Type [64]int8 + Geniv [64]int8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 + Ivsize uint32 +} + +type CryptoReportAEAD struct { + Type [64]int8 + Geniv [64]int8 + Blocksize uint32 + Maxauthsize uint32 + Ivsize uint32 +} + +type CryptoReportComp struct { + Type [64]int8 +} + +type CryptoReportRNG struct { + Type [64]int8 + Seedsize uint32 +} + +type CryptoReportAKCipher struct { + Type [64]int8 +} + +type CryptoReportKPP struct { + Type [64]int8 +} + +type CryptoReportAcomp struct { + Type [64]int8 +} + +const ( + BPF_REG_0 = 0x0 + BPF_REG_1 = 0x1 + BPF_REG_2 = 0x2 + BPF_REG_3 = 0x3 + BPF_REG_4 = 0x4 + BPF_REG_5 = 0x5 + BPF_REG_6 = 0x6 + BPF_REG_7 = 0x7 + BPF_REG_8 = 0x8 + BPF_REG_9 = 0x9 + BPF_REG_10 = 0xa + BPF_MAP_CREATE = 0x0 + BPF_MAP_LOOKUP_ELEM = 0x1 + BPF_MAP_UPDATE_ELEM = 0x2 + BPF_MAP_DELETE_ELEM = 0x3 + BPF_MAP_GET_NEXT_KEY = 0x4 + BPF_PROG_LOAD = 0x5 + BPF_OBJ_PIN = 0x6 + BPF_OBJ_GET = 0x7 + BPF_PROG_ATTACH = 0x8 + BPF_PROG_DETACH = 0x9 + BPF_PROG_TEST_RUN = 0xa + BPF_PROG_GET_NEXT_ID = 0xb + BPF_MAP_GET_NEXT_ID = 0xc + BPF_PROG_GET_FD_BY_ID = 0xd + BPF_MAP_GET_FD_BY_ID = 0xe + BPF_OBJ_GET_INFO_BY_FD = 0xf + BPF_PROG_QUERY = 0x10 + BPF_RAW_TRACEPOINT_OPEN = 0x11 + BPF_BTF_LOAD = 0x12 + BPF_BTF_GET_FD_BY_ID = 0x13 + BPF_TASK_FD_QUERY = 0x14 + BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 + BPF_MAP_TYPE_UNSPEC = 0x0 + BPF_MAP_TYPE_HASH = 0x1 + BPF_MAP_TYPE_ARRAY = 0x2 + BPF_MAP_TYPE_PROG_ARRAY = 0x3 + BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 + BPF_MAP_TYPE_PERCPU_HASH = 0x5 + BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 + BPF_MAP_TYPE_STACK_TRACE = 0x7 + BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 + BPF_MAP_TYPE_LRU_HASH = 0x9 + BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa + BPF_MAP_TYPE_LPM_TRIE = 0xb + BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc + BPF_MAP_TYPE_HASH_OF_MAPS = 0xd + BPF_MAP_TYPE_DEVMAP = 0xe + BPF_MAP_TYPE_SOCKMAP = 0xf + BPF_MAP_TYPE_CPUMAP = 0x10 + BPF_MAP_TYPE_XSKMAP = 0x11 + BPF_MAP_TYPE_SOCKHASH = 0x12 + BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 + BPF_MAP_TYPE_QUEUE = 0x16 + BPF_MAP_TYPE_STACK = 0x17 + BPF_PROG_TYPE_UNSPEC = 0x0 + BPF_PROG_TYPE_SOCKET_FILTER = 0x1 + BPF_PROG_TYPE_KPROBE = 0x2 + BPF_PROG_TYPE_SCHED_CLS = 0x3 + BPF_PROG_TYPE_SCHED_ACT = 0x4 + BPF_PROG_TYPE_TRACEPOINT = 0x5 + BPF_PROG_TYPE_XDP = 0x6 + BPF_PROG_TYPE_PERF_EVENT = 0x7 + BPF_PROG_TYPE_CGROUP_SKB = 0x8 + BPF_PROG_TYPE_CGROUP_SOCK = 0x9 + BPF_PROG_TYPE_LWT_IN = 0xa + BPF_PROG_TYPE_LWT_OUT = 0xb + BPF_PROG_TYPE_LWT_XMIT = 0xc + BPF_PROG_TYPE_SOCK_OPS = 0xd + BPF_PROG_TYPE_SK_SKB = 0xe + BPF_PROG_TYPE_CGROUP_DEVICE = 0xf + BPF_PROG_TYPE_SK_MSG = 0x10 + BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 + BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 + BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 + BPF_PROG_TYPE_LIRC_MODE2 = 0x14 + BPF_PROG_TYPE_SK_REUSEPORT = 0x15 + BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 + BPF_CGROUP_INET_INGRESS = 0x0 + BPF_CGROUP_INET_EGRESS = 0x1 + BPF_CGROUP_INET_SOCK_CREATE = 0x2 + BPF_CGROUP_SOCK_OPS = 0x3 + BPF_SK_SKB_STREAM_PARSER = 0x4 + BPF_SK_SKB_STREAM_VERDICT = 0x5 + BPF_CGROUP_DEVICE = 0x6 + BPF_SK_MSG_VERDICT = 0x7 + BPF_CGROUP_INET4_BIND = 0x8 + BPF_CGROUP_INET6_BIND = 0x9 + BPF_CGROUP_INET4_CONNECT = 0xa + BPF_CGROUP_INET6_CONNECT = 0xb + BPF_CGROUP_INET4_POST_BIND = 0xc + BPF_CGROUP_INET6_POST_BIND = 0xd + BPF_CGROUP_UDP4_SENDMSG = 0xe + BPF_CGROUP_UDP6_SENDMSG = 0xf + BPF_LIRC_MODE2 = 0x10 + BPF_FLOW_DISSECTOR = 0x11 + BPF_STACK_BUILD_ID_EMPTY = 0x0 + BPF_STACK_BUILD_ID_VALID = 0x1 + BPF_STACK_BUILD_ID_IP = 0x2 + BPF_ADJ_ROOM_NET = 0x0 + BPF_HDR_START_MAC = 0x0 + BPF_HDR_START_NET = 0x1 + BPF_LWT_ENCAP_SEG6 = 0x0 + BPF_LWT_ENCAP_SEG6_INLINE = 0x1 + BPF_OK = 0x0 + BPF_DROP = 0x2 + BPF_REDIRECT = 0x7 + BPF_SOCK_OPS_VOID = 0x0 + BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 + BPF_SOCK_OPS_RWND_INIT = 0x2 + BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 + BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 + BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 + BPF_SOCK_OPS_NEEDS_ECN = 0x6 + BPF_SOCK_OPS_BASE_RTT = 0x7 + BPF_SOCK_OPS_RTO_CB = 0x8 + BPF_SOCK_OPS_RETRANS_CB = 0x9 + BPF_SOCK_OPS_STATE_CB = 0xa + BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb + BPF_TCP_ESTABLISHED = 0x1 + BPF_TCP_SYN_SENT = 0x2 + BPF_TCP_SYN_RECV = 0x3 + BPF_TCP_FIN_WAIT1 = 0x4 + BPF_TCP_FIN_WAIT2 = 0x5 + BPF_TCP_TIME_WAIT = 0x6 + BPF_TCP_CLOSE = 0x7 + BPF_TCP_CLOSE_WAIT = 0x8 + BPF_TCP_LAST_ACK = 0x9 + BPF_TCP_LISTEN = 0xa + BPF_TCP_CLOSING = 0xb + BPF_TCP_NEW_SYN_RECV = 0xc + BPF_TCP_MAX_STATES = 0xd + BPF_FIB_LKUP_RET_SUCCESS = 0x0 + BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 + BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 + BPF_FIB_LKUP_RET_PROHIBIT = 0x3 + BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 + BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 + BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 + BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 + BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 + BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 + BPF_FD_TYPE_TRACEPOINT = 0x1 + BPF_FD_TYPE_KPROBE = 0x2 + BPF_FD_TYPE_KRETPROBE = 0x3 + BPF_FD_TYPE_UPROBE = 0x4 + BPF_FD_TYPE_URETPROBE = 0x5 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index 471706ac921f9..865bf57dacbfb 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -823,6 +823,8 @@ type Sigset_t struct { Val [32]uint32 } +const _C__NSIG = 0x80 + type SignalfdSiginfo struct { Signo uint32 Errno int32 @@ -1448,6 +1450,21 @@ type TpacketBlockDesc struct { Hdr [40]byte } +type TpacketBDTS struct { + Sec uint32 + Usec uint32 +} + +type TpacketHdrV1 struct { + Block_status uint32 + Num_pkts uint32 + Offset_to_first_pkt uint32 + Blk_len uint32 + Seq_num uint64 + Ts_first_pkt TpacketBDTS + Ts_last_pkt TpacketBDTS +} + type TpacketReq struct { Block_size uint32 Block_nr uint32 @@ -2130,3 +2147,320 @@ type FanotifyResponse struct { Fd int32 Response uint32 } + +const ( + CRYPTO_MSG_BASE = 0x10 + CRYPTO_MSG_NEWALG = 0x10 + CRYPTO_MSG_DELALG = 0x11 + CRYPTO_MSG_UPDATEALG = 0x12 + CRYPTO_MSG_GETALG = 0x13 + CRYPTO_MSG_DELRNG = 0x14 + CRYPTO_MSG_GETSTAT = 0x15 +) + +const ( + CRYPTOCFGA_UNSPEC = 0x0 + CRYPTOCFGA_PRIORITY_VAL = 0x1 + CRYPTOCFGA_REPORT_LARVAL = 0x2 + CRYPTOCFGA_REPORT_HASH = 0x3 + CRYPTOCFGA_REPORT_BLKCIPHER = 0x4 + CRYPTOCFGA_REPORT_AEAD = 0x5 + CRYPTOCFGA_REPORT_COMPRESS = 0x6 + CRYPTOCFGA_REPORT_RNG = 0x7 + CRYPTOCFGA_REPORT_CIPHER = 0x8 + CRYPTOCFGA_REPORT_AKCIPHER = 0x9 + CRYPTOCFGA_REPORT_KPP = 0xa + CRYPTOCFGA_REPORT_ACOMP = 0xb + CRYPTOCFGA_STAT_LARVAL = 0xc + CRYPTOCFGA_STAT_HASH = 0xd + CRYPTOCFGA_STAT_BLKCIPHER = 0xe + CRYPTOCFGA_STAT_AEAD = 0xf + CRYPTOCFGA_STAT_COMPRESS = 0x10 + CRYPTOCFGA_STAT_RNG = 0x11 + CRYPTOCFGA_STAT_CIPHER = 0x12 + CRYPTOCFGA_STAT_AKCIPHER = 0x13 + CRYPTOCFGA_STAT_KPP = 0x14 + CRYPTOCFGA_STAT_ACOMP = 0x15 +) + +type CryptoUserAlg struct { + Name [64]int8 + Driver_name [64]int8 + Module_name [64]int8 + Type uint32 + Mask uint32 + Refcnt uint32 + Flags uint32 +} + +type CryptoStatAEAD struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatAKCipher struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Verify_cnt uint64 + Sign_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatCipher struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatCompress struct { + Type [64]int8 + Compress_cnt uint64 + Compress_tlen uint64 + Decompress_cnt uint64 + Decompress_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatHash struct { + Type [64]int8 + Hash_cnt uint64 + Hash_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatKPP struct { + Type [64]int8 + Setsecret_cnt uint64 + Generate_public_key_cnt uint64 + Compute_shared_secret_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatRNG struct { + Type [64]int8 + Generate_cnt uint64 + Generate_tlen uint64 + Seed_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatLarval struct { + Type [64]int8 +} + +type CryptoReportLarval struct { + Type [64]int8 +} + +type CryptoReportHash struct { + Type [64]int8 + Blocksize uint32 + Digestsize uint32 +} + +type CryptoReportCipher struct { + Type [64]int8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 +} + +type CryptoReportBlkCipher struct { + Type [64]int8 + Geniv [64]int8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 + Ivsize uint32 +} + +type CryptoReportAEAD struct { + Type [64]int8 + Geniv [64]int8 + Blocksize uint32 + Maxauthsize uint32 + Ivsize uint32 +} + +type CryptoReportComp struct { + Type [64]int8 +} + +type CryptoReportRNG struct { + Type [64]int8 + Seedsize uint32 +} + +type CryptoReportAKCipher struct { + Type [64]int8 +} + +type CryptoReportKPP struct { + Type [64]int8 +} + +type CryptoReportAcomp struct { + Type [64]int8 +} + +const ( + BPF_REG_0 = 0x0 + BPF_REG_1 = 0x1 + BPF_REG_2 = 0x2 + BPF_REG_3 = 0x3 + BPF_REG_4 = 0x4 + BPF_REG_5 = 0x5 + BPF_REG_6 = 0x6 + BPF_REG_7 = 0x7 + BPF_REG_8 = 0x8 + BPF_REG_9 = 0x9 + BPF_REG_10 = 0xa + BPF_MAP_CREATE = 0x0 + BPF_MAP_LOOKUP_ELEM = 0x1 + BPF_MAP_UPDATE_ELEM = 0x2 + BPF_MAP_DELETE_ELEM = 0x3 + BPF_MAP_GET_NEXT_KEY = 0x4 + BPF_PROG_LOAD = 0x5 + BPF_OBJ_PIN = 0x6 + BPF_OBJ_GET = 0x7 + BPF_PROG_ATTACH = 0x8 + BPF_PROG_DETACH = 0x9 + BPF_PROG_TEST_RUN = 0xa + BPF_PROG_GET_NEXT_ID = 0xb + BPF_MAP_GET_NEXT_ID = 0xc + BPF_PROG_GET_FD_BY_ID = 0xd + BPF_MAP_GET_FD_BY_ID = 0xe + BPF_OBJ_GET_INFO_BY_FD = 0xf + BPF_PROG_QUERY = 0x10 + BPF_RAW_TRACEPOINT_OPEN = 0x11 + BPF_BTF_LOAD = 0x12 + BPF_BTF_GET_FD_BY_ID = 0x13 + BPF_TASK_FD_QUERY = 0x14 + BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 + BPF_MAP_TYPE_UNSPEC = 0x0 + BPF_MAP_TYPE_HASH = 0x1 + BPF_MAP_TYPE_ARRAY = 0x2 + BPF_MAP_TYPE_PROG_ARRAY = 0x3 + BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 + BPF_MAP_TYPE_PERCPU_HASH = 0x5 + BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 + BPF_MAP_TYPE_STACK_TRACE = 0x7 + BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 + BPF_MAP_TYPE_LRU_HASH = 0x9 + BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa + BPF_MAP_TYPE_LPM_TRIE = 0xb + BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc + BPF_MAP_TYPE_HASH_OF_MAPS = 0xd + BPF_MAP_TYPE_DEVMAP = 0xe + BPF_MAP_TYPE_SOCKMAP = 0xf + BPF_MAP_TYPE_CPUMAP = 0x10 + BPF_MAP_TYPE_XSKMAP = 0x11 + BPF_MAP_TYPE_SOCKHASH = 0x12 + BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 + BPF_MAP_TYPE_QUEUE = 0x16 + BPF_MAP_TYPE_STACK = 0x17 + BPF_PROG_TYPE_UNSPEC = 0x0 + BPF_PROG_TYPE_SOCKET_FILTER = 0x1 + BPF_PROG_TYPE_KPROBE = 0x2 + BPF_PROG_TYPE_SCHED_CLS = 0x3 + BPF_PROG_TYPE_SCHED_ACT = 0x4 + BPF_PROG_TYPE_TRACEPOINT = 0x5 + BPF_PROG_TYPE_XDP = 0x6 + BPF_PROG_TYPE_PERF_EVENT = 0x7 + BPF_PROG_TYPE_CGROUP_SKB = 0x8 + BPF_PROG_TYPE_CGROUP_SOCK = 0x9 + BPF_PROG_TYPE_LWT_IN = 0xa + BPF_PROG_TYPE_LWT_OUT = 0xb + BPF_PROG_TYPE_LWT_XMIT = 0xc + BPF_PROG_TYPE_SOCK_OPS = 0xd + BPF_PROG_TYPE_SK_SKB = 0xe + BPF_PROG_TYPE_CGROUP_DEVICE = 0xf + BPF_PROG_TYPE_SK_MSG = 0x10 + BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 + BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 + BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 + BPF_PROG_TYPE_LIRC_MODE2 = 0x14 + BPF_PROG_TYPE_SK_REUSEPORT = 0x15 + BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 + BPF_CGROUP_INET_INGRESS = 0x0 + BPF_CGROUP_INET_EGRESS = 0x1 + BPF_CGROUP_INET_SOCK_CREATE = 0x2 + BPF_CGROUP_SOCK_OPS = 0x3 + BPF_SK_SKB_STREAM_PARSER = 0x4 + BPF_SK_SKB_STREAM_VERDICT = 0x5 + BPF_CGROUP_DEVICE = 0x6 + BPF_SK_MSG_VERDICT = 0x7 + BPF_CGROUP_INET4_BIND = 0x8 + BPF_CGROUP_INET6_BIND = 0x9 + BPF_CGROUP_INET4_CONNECT = 0xa + BPF_CGROUP_INET6_CONNECT = 0xb + BPF_CGROUP_INET4_POST_BIND = 0xc + BPF_CGROUP_INET6_POST_BIND = 0xd + BPF_CGROUP_UDP4_SENDMSG = 0xe + BPF_CGROUP_UDP6_SENDMSG = 0xf + BPF_LIRC_MODE2 = 0x10 + BPF_FLOW_DISSECTOR = 0x11 + BPF_STACK_BUILD_ID_EMPTY = 0x0 + BPF_STACK_BUILD_ID_VALID = 0x1 + BPF_STACK_BUILD_ID_IP = 0x2 + BPF_ADJ_ROOM_NET = 0x0 + BPF_HDR_START_MAC = 0x0 + BPF_HDR_START_NET = 0x1 + BPF_LWT_ENCAP_SEG6 = 0x0 + BPF_LWT_ENCAP_SEG6_INLINE = 0x1 + BPF_OK = 0x0 + BPF_DROP = 0x2 + BPF_REDIRECT = 0x7 + BPF_SOCK_OPS_VOID = 0x0 + BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 + BPF_SOCK_OPS_RWND_INIT = 0x2 + BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 + BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 + BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 + BPF_SOCK_OPS_NEEDS_ECN = 0x6 + BPF_SOCK_OPS_BASE_RTT = 0x7 + BPF_SOCK_OPS_RTO_CB = 0x8 + BPF_SOCK_OPS_RETRANS_CB = 0x9 + BPF_SOCK_OPS_STATE_CB = 0xa + BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb + BPF_TCP_ESTABLISHED = 0x1 + BPF_TCP_SYN_SENT = 0x2 + BPF_TCP_SYN_RECV = 0x3 + BPF_TCP_FIN_WAIT1 = 0x4 + BPF_TCP_FIN_WAIT2 = 0x5 + BPF_TCP_TIME_WAIT = 0x6 + BPF_TCP_CLOSE = 0x7 + BPF_TCP_CLOSE_WAIT = 0x8 + BPF_TCP_LAST_ACK = 0x9 + BPF_TCP_LISTEN = 0xa + BPF_TCP_CLOSING = 0xb + BPF_TCP_NEW_SYN_RECV = 0xc + BPF_TCP_MAX_STATES = 0xd + BPF_FIB_LKUP_RET_SUCCESS = 0x0 + BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 + BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 + BPF_FIB_LKUP_RET_PROHIBIT = 0x3 + BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 + BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 + BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 + BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 + BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 + BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 + BPF_FD_TYPE_TRACEPOINT = 0x1 + BPF_FD_TYPE_KPROBE = 0x2 + BPF_FD_TYPE_KRETPROBE = 0x3 + BPF_FD_TYPE_UPROBE = 0x4 + BPF_FD_TYPE_URETPROBE = 0x5 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 6cfa149fa2644..2b68027d5c46e 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -831,6 +831,8 @@ type Sigset_t struct { Val [16]uint64 } +const _C__NSIG = 0x41 + type SignalfdSiginfo struct { Signo uint32 Errno int32 @@ -1453,6 +1455,21 @@ type TpacketBlockDesc struct { Hdr [40]byte } +type TpacketBDTS struct { + Sec uint32 + Usec uint32 +} + +type TpacketHdrV1 struct { + Block_status uint32 + Num_pkts uint32 + Offset_to_first_pkt uint32 + Blk_len uint32 + Seq_num uint64 + Ts_first_pkt TpacketBDTS + Ts_last_pkt TpacketBDTS +} + type TpacketReq struct { Block_size uint32 Block_nr uint32 @@ -2135,3 +2152,320 @@ type FanotifyResponse struct { Fd int32 Response uint32 } + +const ( + CRYPTO_MSG_BASE = 0x10 + CRYPTO_MSG_NEWALG = 0x10 + CRYPTO_MSG_DELALG = 0x11 + CRYPTO_MSG_UPDATEALG = 0x12 + CRYPTO_MSG_GETALG = 0x13 + CRYPTO_MSG_DELRNG = 0x14 + CRYPTO_MSG_GETSTAT = 0x15 +) + +const ( + CRYPTOCFGA_UNSPEC = 0x0 + CRYPTOCFGA_PRIORITY_VAL = 0x1 + CRYPTOCFGA_REPORT_LARVAL = 0x2 + CRYPTOCFGA_REPORT_HASH = 0x3 + CRYPTOCFGA_REPORT_BLKCIPHER = 0x4 + CRYPTOCFGA_REPORT_AEAD = 0x5 + CRYPTOCFGA_REPORT_COMPRESS = 0x6 + CRYPTOCFGA_REPORT_RNG = 0x7 + CRYPTOCFGA_REPORT_CIPHER = 0x8 + CRYPTOCFGA_REPORT_AKCIPHER = 0x9 + CRYPTOCFGA_REPORT_KPP = 0xa + CRYPTOCFGA_REPORT_ACOMP = 0xb + CRYPTOCFGA_STAT_LARVAL = 0xc + CRYPTOCFGA_STAT_HASH = 0xd + CRYPTOCFGA_STAT_BLKCIPHER = 0xe + CRYPTOCFGA_STAT_AEAD = 0xf + CRYPTOCFGA_STAT_COMPRESS = 0x10 + CRYPTOCFGA_STAT_RNG = 0x11 + CRYPTOCFGA_STAT_CIPHER = 0x12 + CRYPTOCFGA_STAT_AKCIPHER = 0x13 + CRYPTOCFGA_STAT_KPP = 0x14 + CRYPTOCFGA_STAT_ACOMP = 0x15 +) + +type CryptoUserAlg struct { + Name [64]uint8 + Driver_name [64]uint8 + Module_name [64]uint8 + Type uint32 + Mask uint32 + Refcnt uint32 + Flags uint32 +} + +type CryptoStatAEAD struct { + Type [64]uint8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatAKCipher struct { + Type [64]uint8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Verify_cnt uint64 + Sign_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatCipher struct { + Type [64]uint8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatCompress struct { + Type [64]uint8 + Compress_cnt uint64 + Compress_tlen uint64 + Decompress_cnt uint64 + Decompress_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatHash struct { + Type [64]uint8 + Hash_cnt uint64 + Hash_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatKPP struct { + Type [64]uint8 + Setsecret_cnt uint64 + Generate_public_key_cnt uint64 + Compute_shared_secret_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatRNG struct { + Type [64]uint8 + Generate_cnt uint64 + Generate_tlen uint64 + Seed_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatLarval struct { + Type [64]uint8 +} + +type CryptoReportLarval struct { + Type [64]uint8 +} + +type CryptoReportHash struct { + Type [64]uint8 + Blocksize uint32 + Digestsize uint32 +} + +type CryptoReportCipher struct { + Type [64]uint8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 +} + +type CryptoReportBlkCipher struct { + Type [64]uint8 + Geniv [64]uint8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 + Ivsize uint32 +} + +type CryptoReportAEAD struct { + Type [64]uint8 + Geniv [64]uint8 + Blocksize uint32 + Maxauthsize uint32 + Ivsize uint32 +} + +type CryptoReportComp struct { + Type [64]uint8 +} + +type CryptoReportRNG struct { + Type [64]uint8 + Seedsize uint32 +} + +type CryptoReportAKCipher struct { + Type [64]uint8 +} + +type CryptoReportKPP struct { + Type [64]uint8 +} + +type CryptoReportAcomp struct { + Type [64]uint8 +} + +const ( + BPF_REG_0 = 0x0 + BPF_REG_1 = 0x1 + BPF_REG_2 = 0x2 + BPF_REG_3 = 0x3 + BPF_REG_4 = 0x4 + BPF_REG_5 = 0x5 + BPF_REG_6 = 0x6 + BPF_REG_7 = 0x7 + BPF_REG_8 = 0x8 + BPF_REG_9 = 0x9 + BPF_REG_10 = 0xa + BPF_MAP_CREATE = 0x0 + BPF_MAP_LOOKUP_ELEM = 0x1 + BPF_MAP_UPDATE_ELEM = 0x2 + BPF_MAP_DELETE_ELEM = 0x3 + BPF_MAP_GET_NEXT_KEY = 0x4 + BPF_PROG_LOAD = 0x5 + BPF_OBJ_PIN = 0x6 + BPF_OBJ_GET = 0x7 + BPF_PROG_ATTACH = 0x8 + BPF_PROG_DETACH = 0x9 + BPF_PROG_TEST_RUN = 0xa + BPF_PROG_GET_NEXT_ID = 0xb + BPF_MAP_GET_NEXT_ID = 0xc + BPF_PROG_GET_FD_BY_ID = 0xd + BPF_MAP_GET_FD_BY_ID = 0xe + BPF_OBJ_GET_INFO_BY_FD = 0xf + BPF_PROG_QUERY = 0x10 + BPF_RAW_TRACEPOINT_OPEN = 0x11 + BPF_BTF_LOAD = 0x12 + BPF_BTF_GET_FD_BY_ID = 0x13 + BPF_TASK_FD_QUERY = 0x14 + BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 + BPF_MAP_TYPE_UNSPEC = 0x0 + BPF_MAP_TYPE_HASH = 0x1 + BPF_MAP_TYPE_ARRAY = 0x2 + BPF_MAP_TYPE_PROG_ARRAY = 0x3 + BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 + BPF_MAP_TYPE_PERCPU_HASH = 0x5 + BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 + BPF_MAP_TYPE_STACK_TRACE = 0x7 + BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 + BPF_MAP_TYPE_LRU_HASH = 0x9 + BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa + BPF_MAP_TYPE_LPM_TRIE = 0xb + BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc + BPF_MAP_TYPE_HASH_OF_MAPS = 0xd + BPF_MAP_TYPE_DEVMAP = 0xe + BPF_MAP_TYPE_SOCKMAP = 0xf + BPF_MAP_TYPE_CPUMAP = 0x10 + BPF_MAP_TYPE_XSKMAP = 0x11 + BPF_MAP_TYPE_SOCKHASH = 0x12 + BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 + BPF_MAP_TYPE_QUEUE = 0x16 + BPF_MAP_TYPE_STACK = 0x17 + BPF_PROG_TYPE_UNSPEC = 0x0 + BPF_PROG_TYPE_SOCKET_FILTER = 0x1 + BPF_PROG_TYPE_KPROBE = 0x2 + BPF_PROG_TYPE_SCHED_CLS = 0x3 + BPF_PROG_TYPE_SCHED_ACT = 0x4 + BPF_PROG_TYPE_TRACEPOINT = 0x5 + BPF_PROG_TYPE_XDP = 0x6 + BPF_PROG_TYPE_PERF_EVENT = 0x7 + BPF_PROG_TYPE_CGROUP_SKB = 0x8 + BPF_PROG_TYPE_CGROUP_SOCK = 0x9 + BPF_PROG_TYPE_LWT_IN = 0xa + BPF_PROG_TYPE_LWT_OUT = 0xb + BPF_PROG_TYPE_LWT_XMIT = 0xc + BPF_PROG_TYPE_SOCK_OPS = 0xd + BPF_PROG_TYPE_SK_SKB = 0xe + BPF_PROG_TYPE_CGROUP_DEVICE = 0xf + BPF_PROG_TYPE_SK_MSG = 0x10 + BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 + BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 + BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 + BPF_PROG_TYPE_LIRC_MODE2 = 0x14 + BPF_PROG_TYPE_SK_REUSEPORT = 0x15 + BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 + BPF_CGROUP_INET_INGRESS = 0x0 + BPF_CGROUP_INET_EGRESS = 0x1 + BPF_CGROUP_INET_SOCK_CREATE = 0x2 + BPF_CGROUP_SOCK_OPS = 0x3 + BPF_SK_SKB_STREAM_PARSER = 0x4 + BPF_SK_SKB_STREAM_VERDICT = 0x5 + BPF_CGROUP_DEVICE = 0x6 + BPF_SK_MSG_VERDICT = 0x7 + BPF_CGROUP_INET4_BIND = 0x8 + BPF_CGROUP_INET6_BIND = 0x9 + BPF_CGROUP_INET4_CONNECT = 0xa + BPF_CGROUP_INET6_CONNECT = 0xb + BPF_CGROUP_INET4_POST_BIND = 0xc + BPF_CGROUP_INET6_POST_BIND = 0xd + BPF_CGROUP_UDP4_SENDMSG = 0xe + BPF_CGROUP_UDP6_SENDMSG = 0xf + BPF_LIRC_MODE2 = 0x10 + BPF_FLOW_DISSECTOR = 0x11 + BPF_STACK_BUILD_ID_EMPTY = 0x0 + BPF_STACK_BUILD_ID_VALID = 0x1 + BPF_STACK_BUILD_ID_IP = 0x2 + BPF_ADJ_ROOM_NET = 0x0 + BPF_HDR_START_MAC = 0x0 + BPF_HDR_START_NET = 0x1 + BPF_LWT_ENCAP_SEG6 = 0x0 + BPF_LWT_ENCAP_SEG6_INLINE = 0x1 + BPF_OK = 0x0 + BPF_DROP = 0x2 + BPF_REDIRECT = 0x7 + BPF_SOCK_OPS_VOID = 0x0 + BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 + BPF_SOCK_OPS_RWND_INIT = 0x2 + BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 + BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 + BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 + BPF_SOCK_OPS_NEEDS_ECN = 0x6 + BPF_SOCK_OPS_BASE_RTT = 0x7 + BPF_SOCK_OPS_RTO_CB = 0x8 + BPF_SOCK_OPS_RETRANS_CB = 0x9 + BPF_SOCK_OPS_STATE_CB = 0xa + BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb + BPF_TCP_ESTABLISHED = 0x1 + BPF_TCP_SYN_SENT = 0x2 + BPF_TCP_SYN_RECV = 0x3 + BPF_TCP_FIN_WAIT1 = 0x4 + BPF_TCP_FIN_WAIT2 = 0x5 + BPF_TCP_TIME_WAIT = 0x6 + BPF_TCP_CLOSE = 0x7 + BPF_TCP_CLOSE_WAIT = 0x8 + BPF_TCP_LAST_ACK = 0x9 + BPF_TCP_LISTEN = 0xa + BPF_TCP_CLOSING = 0xb + BPF_TCP_NEW_SYN_RECV = 0xc + BPF_TCP_MAX_STATES = 0xd + BPF_FIB_LKUP_RET_SUCCESS = 0x0 + BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 + BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 + BPF_FIB_LKUP_RET_PROHIBIT = 0x3 + BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 + BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 + BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 + BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 + BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 + BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 + BPF_FD_TYPE_TRACEPOINT = 0x1 + BPF_FD_TYPE_KPROBE = 0x2 + BPF_FD_TYPE_KRETPROBE = 0x3 + BPF_FD_TYPE_UPROBE = 0x4 + BPF_FD_TYPE_URETPROBE = 0x5 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index acb3773ff9bdb..76cd7e643d5f7 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -831,6 +831,8 @@ type Sigset_t struct { Val [16]uint64 } +const _C__NSIG = 0x41 + type SignalfdSiginfo struct { Signo uint32 Errno int32 @@ -1453,6 +1455,21 @@ type TpacketBlockDesc struct { Hdr [40]byte } +type TpacketBDTS struct { + Sec uint32 + Usec uint32 +} + +type TpacketHdrV1 struct { + Block_status uint32 + Num_pkts uint32 + Offset_to_first_pkt uint32 + Blk_len uint32 + Seq_num uint64 + Ts_first_pkt TpacketBDTS + Ts_last_pkt TpacketBDTS +} + type TpacketReq struct { Block_size uint32 Block_nr uint32 @@ -2135,3 +2152,320 @@ type FanotifyResponse struct { Fd int32 Response uint32 } + +const ( + CRYPTO_MSG_BASE = 0x10 + CRYPTO_MSG_NEWALG = 0x10 + CRYPTO_MSG_DELALG = 0x11 + CRYPTO_MSG_UPDATEALG = 0x12 + CRYPTO_MSG_GETALG = 0x13 + CRYPTO_MSG_DELRNG = 0x14 + CRYPTO_MSG_GETSTAT = 0x15 +) + +const ( + CRYPTOCFGA_UNSPEC = 0x0 + CRYPTOCFGA_PRIORITY_VAL = 0x1 + CRYPTOCFGA_REPORT_LARVAL = 0x2 + CRYPTOCFGA_REPORT_HASH = 0x3 + CRYPTOCFGA_REPORT_BLKCIPHER = 0x4 + CRYPTOCFGA_REPORT_AEAD = 0x5 + CRYPTOCFGA_REPORT_COMPRESS = 0x6 + CRYPTOCFGA_REPORT_RNG = 0x7 + CRYPTOCFGA_REPORT_CIPHER = 0x8 + CRYPTOCFGA_REPORT_AKCIPHER = 0x9 + CRYPTOCFGA_REPORT_KPP = 0xa + CRYPTOCFGA_REPORT_ACOMP = 0xb + CRYPTOCFGA_STAT_LARVAL = 0xc + CRYPTOCFGA_STAT_HASH = 0xd + CRYPTOCFGA_STAT_BLKCIPHER = 0xe + CRYPTOCFGA_STAT_AEAD = 0xf + CRYPTOCFGA_STAT_COMPRESS = 0x10 + CRYPTOCFGA_STAT_RNG = 0x11 + CRYPTOCFGA_STAT_CIPHER = 0x12 + CRYPTOCFGA_STAT_AKCIPHER = 0x13 + CRYPTOCFGA_STAT_KPP = 0x14 + CRYPTOCFGA_STAT_ACOMP = 0x15 +) + +type CryptoUserAlg struct { + Name [64]uint8 + Driver_name [64]uint8 + Module_name [64]uint8 + Type uint32 + Mask uint32 + Refcnt uint32 + Flags uint32 +} + +type CryptoStatAEAD struct { + Type [64]uint8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatAKCipher struct { + Type [64]uint8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Verify_cnt uint64 + Sign_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatCipher struct { + Type [64]uint8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatCompress struct { + Type [64]uint8 + Compress_cnt uint64 + Compress_tlen uint64 + Decompress_cnt uint64 + Decompress_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatHash struct { + Type [64]uint8 + Hash_cnt uint64 + Hash_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatKPP struct { + Type [64]uint8 + Setsecret_cnt uint64 + Generate_public_key_cnt uint64 + Compute_shared_secret_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatRNG struct { + Type [64]uint8 + Generate_cnt uint64 + Generate_tlen uint64 + Seed_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatLarval struct { + Type [64]uint8 +} + +type CryptoReportLarval struct { + Type [64]uint8 +} + +type CryptoReportHash struct { + Type [64]uint8 + Blocksize uint32 + Digestsize uint32 +} + +type CryptoReportCipher struct { + Type [64]uint8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 +} + +type CryptoReportBlkCipher struct { + Type [64]uint8 + Geniv [64]uint8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 + Ivsize uint32 +} + +type CryptoReportAEAD struct { + Type [64]uint8 + Geniv [64]uint8 + Blocksize uint32 + Maxauthsize uint32 + Ivsize uint32 +} + +type CryptoReportComp struct { + Type [64]uint8 +} + +type CryptoReportRNG struct { + Type [64]uint8 + Seedsize uint32 +} + +type CryptoReportAKCipher struct { + Type [64]uint8 +} + +type CryptoReportKPP struct { + Type [64]uint8 +} + +type CryptoReportAcomp struct { + Type [64]uint8 +} + +const ( + BPF_REG_0 = 0x0 + BPF_REG_1 = 0x1 + BPF_REG_2 = 0x2 + BPF_REG_3 = 0x3 + BPF_REG_4 = 0x4 + BPF_REG_5 = 0x5 + BPF_REG_6 = 0x6 + BPF_REG_7 = 0x7 + BPF_REG_8 = 0x8 + BPF_REG_9 = 0x9 + BPF_REG_10 = 0xa + BPF_MAP_CREATE = 0x0 + BPF_MAP_LOOKUP_ELEM = 0x1 + BPF_MAP_UPDATE_ELEM = 0x2 + BPF_MAP_DELETE_ELEM = 0x3 + BPF_MAP_GET_NEXT_KEY = 0x4 + BPF_PROG_LOAD = 0x5 + BPF_OBJ_PIN = 0x6 + BPF_OBJ_GET = 0x7 + BPF_PROG_ATTACH = 0x8 + BPF_PROG_DETACH = 0x9 + BPF_PROG_TEST_RUN = 0xa + BPF_PROG_GET_NEXT_ID = 0xb + BPF_MAP_GET_NEXT_ID = 0xc + BPF_PROG_GET_FD_BY_ID = 0xd + BPF_MAP_GET_FD_BY_ID = 0xe + BPF_OBJ_GET_INFO_BY_FD = 0xf + BPF_PROG_QUERY = 0x10 + BPF_RAW_TRACEPOINT_OPEN = 0x11 + BPF_BTF_LOAD = 0x12 + BPF_BTF_GET_FD_BY_ID = 0x13 + BPF_TASK_FD_QUERY = 0x14 + BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 + BPF_MAP_TYPE_UNSPEC = 0x0 + BPF_MAP_TYPE_HASH = 0x1 + BPF_MAP_TYPE_ARRAY = 0x2 + BPF_MAP_TYPE_PROG_ARRAY = 0x3 + BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 + BPF_MAP_TYPE_PERCPU_HASH = 0x5 + BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 + BPF_MAP_TYPE_STACK_TRACE = 0x7 + BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 + BPF_MAP_TYPE_LRU_HASH = 0x9 + BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa + BPF_MAP_TYPE_LPM_TRIE = 0xb + BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc + BPF_MAP_TYPE_HASH_OF_MAPS = 0xd + BPF_MAP_TYPE_DEVMAP = 0xe + BPF_MAP_TYPE_SOCKMAP = 0xf + BPF_MAP_TYPE_CPUMAP = 0x10 + BPF_MAP_TYPE_XSKMAP = 0x11 + BPF_MAP_TYPE_SOCKHASH = 0x12 + BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 + BPF_MAP_TYPE_QUEUE = 0x16 + BPF_MAP_TYPE_STACK = 0x17 + BPF_PROG_TYPE_UNSPEC = 0x0 + BPF_PROG_TYPE_SOCKET_FILTER = 0x1 + BPF_PROG_TYPE_KPROBE = 0x2 + BPF_PROG_TYPE_SCHED_CLS = 0x3 + BPF_PROG_TYPE_SCHED_ACT = 0x4 + BPF_PROG_TYPE_TRACEPOINT = 0x5 + BPF_PROG_TYPE_XDP = 0x6 + BPF_PROG_TYPE_PERF_EVENT = 0x7 + BPF_PROG_TYPE_CGROUP_SKB = 0x8 + BPF_PROG_TYPE_CGROUP_SOCK = 0x9 + BPF_PROG_TYPE_LWT_IN = 0xa + BPF_PROG_TYPE_LWT_OUT = 0xb + BPF_PROG_TYPE_LWT_XMIT = 0xc + BPF_PROG_TYPE_SOCK_OPS = 0xd + BPF_PROG_TYPE_SK_SKB = 0xe + BPF_PROG_TYPE_CGROUP_DEVICE = 0xf + BPF_PROG_TYPE_SK_MSG = 0x10 + BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 + BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 + BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 + BPF_PROG_TYPE_LIRC_MODE2 = 0x14 + BPF_PROG_TYPE_SK_REUSEPORT = 0x15 + BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 + BPF_CGROUP_INET_INGRESS = 0x0 + BPF_CGROUP_INET_EGRESS = 0x1 + BPF_CGROUP_INET_SOCK_CREATE = 0x2 + BPF_CGROUP_SOCK_OPS = 0x3 + BPF_SK_SKB_STREAM_PARSER = 0x4 + BPF_SK_SKB_STREAM_VERDICT = 0x5 + BPF_CGROUP_DEVICE = 0x6 + BPF_SK_MSG_VERDICT = 0x7 + BPF_CGROUP_INET4_BIND = 0x8 + BPF_CGROUP_INET6_BIND = 0x9 + BPF_CGROUP_INET4_CONNECT = 0xa + BPF_CGROUP_INET6_CONNECT = 0xb + BPF_CGROUP_INET4_POST_BIND = 0xc + BPF_CGROUP_INET6_POST_BIND = 0xd + BPF_CGROUP_UDP4_SENDMSG = 0xe + BPF_CGROUP_UDP6_SENDMSG = 0xf + BPF_LIRC_MODE2 = 0x10 + BPF_FLOW_DISSECTOR = 0x11 + BPF_STACK_BUILD_ID_EMPTY = 0x0 + BPF_STACK_BUILD_ID_VALID = 0x1 + BPF_STACK_BUILD_ID_IP = 0x2 + BPF_ADJ_ROOM_NET = 0x0 + BPF_HDR_START_MAC = 0x0 + BPF_HDR_START_NET = 0x1 + BPF_LWT_ENCAP_SEG6 = 0x0 + BPF_LWT_ENCAP_SEG6_INLINE = 0x1 + BPF_OK = 0x0 + BPF_DROP = 0x2 + BPF_REDIRECT = 0x7 + BPF_SOCK_OPS_VOID = 0x0 + BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 + BPF_SOCK_OPS_RWND_INIT = 0x2 + BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 + BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 + BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 + BPF_SOCK_OPS_NEEDS_ECN = 0x6 + BPF_SOCK_OPS_BASE_RTT = 0x7 + BPF_SOCK_OPS_RTO_CB = 0x8 + BPF_SOCK_OPS_RETRANS_CB = 0x9 + BPF_SOCK_OPS_STATE_CB = 0xa + BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb + BPF_TCP_ESTABLISHED = 0x1 + BPF_TCP_SYN_SENT = 0x2 + BPF_TCP_SYN_RECV = 0x3 + BPF_TCP_FIN_WAIT1 = 0x4 + BPF_TCP_FIN_WAIT2 = 0x5 + BPF_TCP_TIME_WAIT = 0x6 + BPF_TCP_CLOSE = 0x7 + BPF_TCP_CLOSE_WAIT = 0x8 + BPF_TCP_LAST_ACK = 0x9 + BPF_TCP_LISTEN = 0xa + BPF_TCP_CLOSING = 0xb + BPF_TCP_NEW_SYN_RECV = 0xc + BPF_TCP_MAX_STATES = 0xd + BPF_FIB_LKUP_RET_SUCCESS = 0x0 + BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 + BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 + BPF_FIB_LKUP_RET_PROHIBIT = 0x3 + BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 + BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 + BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 + BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 + BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 + BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 + BPF_FD_TYPE_TRACEPOINT = 0x1 + BPF_FD_TYPE_KPROBE = 0x2 + BPF_FD_TYPE_KRETPROBE = 0x3 + BPF_FD_TYPE_UPROBE = 0x4 + BPF_FD_TYPE_URETPROBE = 0x5 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 9735b2576ef63..f99f061557223 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -848,6 +848,8 @@ type Sigset_t struct { Val [16]uint64 } +const _C__NSIG = 0x41 + type SignalfdSiginfo struct { Signo uint32 Errno int32 @@ -1470,6 +1472,21 @@ type TpacketBlockDesc struct { Hdr [40]byte } +type TpacketBDTS struct { + Sec uint32 + Usec uint32 +} + +type TpacketHdrV1 struct { + Block_status uint32 + Num_pkts uint32 + Offset_to_first_pkt uint32 + Blk_len uint32 + Seq_num uint64 + Ts_first_pkt TpacketBDTS + Ts_last_pkt TpacketBDTS +} + type TpacketReq struct { Block_size uint32 Block_nr uint32 @@ -2152,3 +2169,320 @@ type FanotifyResponse struct { Fd int32 Response uint32 } + +const ( + CRYPTO_MSG_BASE = 0x10 + CRYPTO_MSG_NEWALG = 0x10 + CRYPTO_MSG_DELALG = 0x11 + CRYPTO_MSG_UPDATEALG = 0x12 + CRYPTO_MSG_GETALG = 0x13 + CRYPTO_MSG_DELRNG = 0x14 + CRYPTO_MSG_GETSTAT = 0x15 +) + +const ( + CRYPTOCFGA_UNSPEC = 0x0 + CRYPTOCFGA_PRIORITY_VAL = 0x1 + CRYPTOCFGA_REPORT_LARVAL = 0x2 + CRYPTOCFGA_REPORT_HASH = 0x3 + CRYPTOCFGA_REPORT_BLKCIPHER = 0x4 + CRYPTOCFGA_REPORT_AEAD = 0x5 + CRYPTOCFGA_REPORT_COMPRESS = 0x6 + CRYPTOCFGA_REPORT_RNG = 0x7 + CRYPTOCFGA_REPORT_CIPHER = 0x8 + CRYPTOCFGA_REPORT_AKCIPHER = 0x9 + CRYPTOCFGA_REPORT_KPP = 0xa + CRYPTOCFGA_REPORT_ACOMP = 0xb + CRYPTOCFGA_STAT_LARVAL = 0xc + CRYPTOCFGA_STAT_HASH = 0xd + CRYPTOCFGA_STAT_BLKCIPHER = 0xe + CRYPTOCFGA_STAT_AEAD = 0xf + CRYPTOCFGA_STAT_COMPRESS = 0x10 + CRYPTOCFGA_STAT_RNG = 0x11 + CRYPTOCFGA_STAT_CIPHER = 0x12 + CRYPTOCFGA_STAT_AKCIPHER = 0x13 + CRYPTOCFGA_STAT_KPP = 0x14 + CRYPTOCFGA_STAT_ACOMP = 0x15 +) + +type CryptoUserAlg struct { + Name [64]uint8 + Driver_name [64]uint8 + Module_name [64]uint8 + Type uint32 + Mask uint32 + Refcnt uint32 + Flags uint32 +} + +type CryptoStatAEAD struct { + Type [64]uint8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatAKCipher struct { + Type [64]uint8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Verify_cnt uint64 + Sign_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatCipher struct { + Type [64]uint8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatCompress struct { + Type [64]uint8 + Compress_cnt uint64 + Compress_tlen uint64 + Decompress_cnt uint64 + Decompress_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatHash struct { + Type [64]uint8 + Hash_cnt uint64 + Hash_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatKPP struct { + Type [64]uint8 + Setsecret_cnt uint64 + Generate_public_key_cnt uint64 + Compute_shared_secret_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatRNG struct { + Type [64]uint8 + Generate_cnt uint64 + Generate_tlen uint64 + Seed_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatLarval struct { + Type [64]uint8 +} + +type CryptoReportLarval struct { + Type [64]uint8 +} + +type CryptoReportHash struct { + Type [64]uint8 + Blocksize uint32 + Digestsize uint32 +} + +type CryptoReportCipher struct { + Type [64]uint8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 +} + +type CryptoReportBlkCipher struct { + Type [64]uint8 + Geniv [64]uint8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 + Ivsize uint32 +} + +type CryptoReportAEAD struct { + Type [64]uint8 + Geniv [64]uint8 + Blocksize uint32 + Maxauthsize uint32 + Ivsize uint32 +} + +type CryptoReportComp struct { + Type [64]uint8 +} + +type CryptoReportRNG struct { + Type [64]uint8 + Seedsize uint32 +} + +type CryptoReportAKCipher struct { + Type [64]uint8 +} + +type CryptoReportKPP struct { + Type [64]uint8 +} + +type CryptoReportAcomp struct { + Type [64]uint8 +} + +const ( + BPF_REG_0 = 0x0 + BPF_REG_1 = 0x1 + BPF_REG_2 = 0x2 + BPF_REG_3 = 0x3 + BPF_REG_4 = 0x4 + BPF_REG_5 = 0x5 + BPF_REG_6 = 0x6 + BPF_REG_7 = 0x7 + BPF_REG_8 = 0x8 + BPF_REG_9 = 0x9 + BPF_REG_10 = 0xa + BPF_MAP_CREATE = 0x0 + BPF_MAP_LOOKUP_ELEM = 0x1 + BPF_MAP_UPDATE_ELEM = 0x2 + BPF_MAP_DELETE_ELEM = 0x3 + BPF_MAP_GET_NEXT_KEY = 0x4 + BPF_PROG_LOAD = 0x5 + BPF_OBJ_PIN = 0x6 + BPF_OBJ_GET = 0x7 + BPF_PROG_ATTACH = 0x8 + BPF_PROG_DETACH = 0x9 + BPF_PROG_TEST_RUN = 0xa + BPF_PROG_GET_NEXT_ID = 0xb + BPF_MAP_GET_NEXT_ID = 0xc + BPF_PROG_GET_FD_BY_ID = 0xd + BPF_MAP_GET_FD_BY_ID = 0xe + BPF_OBJ_GET_INFO_BY_FD = 0xf + BPF_PROG_QUERY = 0x10 + BPF_RAW_TRACEPOINT_OPEN = 0x11 + BPF_BTF_LOAD = 0x12 + BPF_BTF_GET_FD_BY_ID = 0x13 + BPF_TASK_FD_QUERY = 0x14 + BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 + BPF_MAP_TYPE_UNSPEC = 0x0 + BPF_MAP_TYPE_HASH = 0x1 + BPF_MAP_TYPE_ARRAY = 0x2 + BPF_MAP_TYPE_PROG_ARRAY = 0x3 + BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 + BPF_MAP_TYPE_PERCPU_HASH = 0x5 + BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 + BPF_MAP_TYPE_STACK_TRACE = 0x7 + BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 + BPF_MAP_TYPE_LRU_HASH = 0x9 + BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa + BPF_MAP_TYPE_LPM_TRIE = 0xb + BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc + BPF_MAP_TYPE_HASH_OF_MAPS = 0xd + BPF_MAP_TYPE_DEVMAP = 0xe + BPF_MAP_TYPE_SOCKMAP = 0xf + BPF_MAP_TYPE_CPUMAP = 0x10 + BPF_MAP_TYPE_XSKMAP = 0x11 + BPF_MAP_TYPE_SOCKHASH = 0x12 + BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 + BPF_MAP_TYPE_QUEUE = 0x16 + BPF_MAP_TYPE_STACK = 0x17 + BPF_PROG_TYPE_UNSPEC = 0x0 + BPF_PROG_TYPE_SOCKET_FILTER = 0x1 + BPF_PROG_TYPE_KPROBE = 0x2 + BPF_PROG_TYPE_SCHED_CLS = 0x3 + BPF_PROG_TYPE_SCHED_ACT = 0x4 + BPF_PROG_TYPE_TRACEPOINT = 0x5 + BPF_PROG_TYPE_XDP = 0x6 + BPF_PROG_TYPE_PERF_EVENT = 0x7 + BPF_PROG_TYPE_CGROUP_SKB = 0x8 + BPF_PROG_TYPE_CGROUP_SOCK = 0x9 + BPF_PROG_TYPE_LWT_IN = 0xa + BPF_PROG_TYPE_LWT_OUT = 0xb + BPF_PROG_TYPE_LWT_XMIT = 0xc + BPF_PROG_TYPE_SOCK_OPS = 0xd + BPF_PROG_TYPE_SK_SKB = 0xe + BPF_PROG_TYPE_CGROUP_DEVICE = 0xf + BPF_PROG_TYPE_SK_MSG = 0x10 + BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 + BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 + BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 + BPF_PROG_TYPE_LIRC_MODE2 = 0x14 + BPF_PROG_TYPE_SK_REUSEPORT = 0x15 + BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 + BPF_CGROUP_INET_INGRESS = 0x0 + BPF_CGROUP_INET_EGRESS = 0x1 + BPF_CGROUP_INET_SOCK_CREATE = 0x2 + BPF_CGROUP_SOCK_OPS = 0x3 + BPF_SK_SKB_STREAM_PARSER = 0x4 + BPF_SK_SKB_STREAM_VERDICT = 0x5 + BPF_CGROUP_DEVICE = 0x6 + BPF_SK_MSG_VERDICT = 0x7 + BPF_CGROUP_INET4_BIND = 0x8 + BPF_CGROUP_INET6_BIND = 0x9 + BPF_CGROUP_INET4_CONNECT = 0xa + BPF_CGROUP_INET6_CONNECT = 0xb + BPF_CGROUP_INET4_POST_BIND = 0xc + BPF_CGROUP_INET6_POST_BIND = 0xd + BPF_CGROUP_UDP4_SENDMSG = 0xe + BPF_CGROUP_UDP6_SENDMSG = 0xf + BPF_LIRC_MODE2 = 0x10 + BPF_FLOW_DISSECTOR = 0x11 + BPF_STACK_BUILD_ID_EMPTY = 0x0 + BPF_STACK_BUILD_ID_VALID = 0x1 + BPF_STACK_BUILD_ID_IP = 0x2 + BPF_ADJ_ROOM_NET = 0x0 + BPF_HDR_START_MAC = 0x0 + BPF_HDR_START_NET = 0x1 + BPF_LWT_ENCAP_SEG6 = 0x0 + BPF_LWT_ENCAP_SEG6_INLINE = 0x1 + BPF_OK = 0x0 + BPF_DROP = 0x2 + BPF_REDIRECT = 0x7 + BPF_SOCK_OPS_VOID = 0x0 + BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 + BPF_SOCK_OPS_RWND_INIT = 0x2 + BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 + BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 + BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 + BPF_SOCK_OPS_NEEDS_ECN = 0x6 + BPF_SOCK_OPS_BASE_RTT = 0x7 + BPF_SOCK_OPS_RTO_CB = 0x8 + BPF_SOCK_OPS_RETRANS_CB = 0x9 + BPF_SOCK_OPS_STATE_CB = 0xa + BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb + BPF_TCP_ESTABLISHED = 0x1 + BPF_TCP_SYN_SENT = 0x2 + BPF_TCP_SYN_RECV = 0x3 + BPF_TCP_FIN_WAIT1 = 0x4 + BPF_TCP_FIN_WAIT2 = 0x5 + BPF_TCP_TIME_WAIT = 0x6 + BPF_TCP_CLOSE = 0x7 + BPF_TCP_CLOSE_WAIT = 0x8 + BPF_TCP_LAST_ACK = 0x9 + BPF_TCP_LISTEN = 0xa + BPF_TCP_CLOSING = 0xb + BPF_TCP_NEW_SYN_RECV = 0xc + BPF_TCP_MAX_STATES = 0xd + BPF_FIB_LKUP_RET_SUCCESS = 0x0 + BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 + BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 + BPF_FIB_LKUP_RET_PROHIBIT = 0x3 + BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 + BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 + BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 + BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 + BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 + BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 + BPF_FD_TYPE_TRACEPOINT = 0x1 + BPF_FD_TYPE_KPROBE = 0x2 + BPF_FD_TYPE_KRETPROBE = 0x3 + BPF_FD_TYPE_UPROBE = 0x4 + BPF_FD_TYPE_URETPROBE = 0x5 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index 5369f652f45a3..d9d03ae49a400 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -844,6 +844,8 @@ type Sigset_t struct { Val [16]uint64 } +const _C__NSIG = 0x41 + type SignalfdSiginfo struct { Signo uint32 Errno int32 @@ -1467,6 +1469,21 @@ type TpacketBlockDesc struct { Hdr [40]byte } +type TpacketBDTS struct { + Sec uint32 + Usec uint32 +} + +type TpacketHdrV1 struct { + Block_status uint32 + Num_pkts uint32 + Offset_to_first_pkt uint32 + Blk_len uint32 + Seq_num uint64 + Ts_first_pkt TpacketBDTS + Ts_last_pkt TpacketBDTS +} + type TpacketReq struct { Block_size uint32 Block_nr uint32 @@ -2149,3 +2166,320 @@ type FanotifyResponse struct { Fd int32 Response uint32 } + +const ( + CRYPTO_MSG_BASE = 0x10 + CRYPTO_MSG_NEWALG = 0x10 + CRYPTO_MSG_DELALG = 0x11 + CRYPTO_MSG_UPDATEALG = 0x12 + CRYPTO_MSG_GETALG = 0x13 + CRYPTO_MSG_DELRNG = 0x14 + CRYPTO_MSG_GETSTAT = 0x15 +) + +const ( + CRYPTOCFGA_UNSPEC = 0x0 + CRYPTOCFGA_PRIORITY_VAL = 0x1 + CRYPTOCFGA_REPORT_LARVAL = 0x2 + CRYPTOCFGA_REPORT_HASH = 0x3 + CRYPTOCFGA_REPORT_BLKCIPHER = 0x4 + CRYPTOCFGA_REPORT_AEAD = 0x5 + CRYPTOCFGA_REPORT_COMPRESS = 0x6 + CRYPTOCFGA_REPORT_RNG = 0x7 + CRYPTOCFGA_REPORT_CIPHER = 0x8 + CRYPTOCFGA_REPORT_AKCIPHER = 0x9 + CRYPTOCFGA_REPORT_KPP = 0xa + CRYPTOCFGA_REPORT_ACOMP = 0xb + CRYPTOCFGA_STAT_LARVAL = 0xc + CRYPTOCFGA_STAT_HASH = 0xd + CRYPTOCFGA_STAT_BLKCIPHER = 0xe + CRYPTOCFGA_STAT_AEAD = 0xf + CRYPTOCFGA_STAT_COMPRESS = 0x10 + CRYPTOCFGA_STAT_RNG = 0x11 + CRYPTOCFGA_STAT_CIPHER = 0x12 + CRYPTOCFGA_STAT_AKCIPHER = 0x13 + CRYPTOCFGA_STAT_KPP = 0x14 + CRYPTOCFGA_STAT_ACOMP = 0x15 +) + +type CryptoUserAlg struct { + Name [64]int8 + Driver_name [64]int8 + Module_name [64]int8 + Type uint32 + Mask uint32 + Refcnt uint32 + Flags uint32 +} + +type CryptoStatAEAD struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatAKCipher struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Verify_cnt uint64 + Sign_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatCipher struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatCompress struct { + Type [64]int8 + Compress_cnt uint64 + Compress_tlen uint64 + Decompress_cnt uint64 + Decompress_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatHash struct { + Type [64]int8 + Hash_cnt uint64 + Hash_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatKPP struct { + Type [64]int8 + Setsecret_cnt uint64 + Generate_public_key_cnt uint64 + Compute_shared_secret_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatRNG struct { + Type [64]int8 + Generate_cnt uint64 + Generate_tlen uint64 + Seed_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatLarval struct { + Type [64]int8 +} + +type CryptoReportLarval struct { + Type [64]int8 +} + +type CryptoReportHash struct { + Type [64]int8 + Blocksize uint32 + Digestsize uint32 +} + +type CryptoReportCipher struct { + Type [64]int8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 +} + +type CryptoReportBlkCipher struct { + Type [64]int8 + Geniv [64]int8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 + Ivsize uint32 +} + +type CryptoReportAEAD struct { + Type [64]int8 + Geniv [64]int8 + Blocksize uint32 + Maxauthsize uint32 + Ivsize uint32 +} + +type CryptoReportComp struct { + Type [64]int8 +} + +type CryptoReportRNG struct { + Type [64]int8 + Seedsize uint32 +} + +type CryptoReportAKCipher struct { + Type [64]int8 +} + +type CryptoReportKPP struct { + Type [64]int8 +} + +type CryptoReportAcomp struct { + Type [64]int8 +} + +const ( + BPF_REG_0 = 0x0 + BPF_REG_1 = 0x1 + BPF_REG_2 = 0x2 + BPF_REG_3 = 0x3 + BPF_REG_4 = 0x4 + BPF_REG_5 = 0x5 + BPF_REG_6 = 0x6 + BPF_REG_7 = 0x7 + BPF_REG_8 = 0x8 + BPF_REG_9 = 0x9 + BPF_REG_10 = 0xa + BPF_MAP_CREATE = 0x0 + BPF_MAP_LOOKUP_ELEM = 0x1 + BPF_MAP_UPDATE_ELEM = 0x2 + BPF_MAP_DELETE_ELEM = 0x3 + BPF_MAP_GET_NEXT_KEY = 0x4 + BPF_PROG_LOAD = 0x5 + BPF_OBJ_PIN = 0x6 + BPF_OBJ_GET = 0x7 + BPF_PROG_ATTACH = 0x8 + BPF_PROG_DETACH = 0x9 + BPF_PROG_TEST_RUN = 0xa + BPF_PROG_GET_NEXT_ID = 0xb + BPF_MAP_GET_NEXT_ID = 0xc + BPF_PROG_GET_FD_BY_ID = 0xd + BPF_MAP_GET_FD_BY_ID = 0xe + BPF_OBJ_GET_INFO_BY_FD = 0xf + BPF_PROG_QUERY = 0x10 + BPF_RAW_TRACEPOINT_OPEN = 0x11 + BPF_BTF_LOAD = 0x12 + BPF_BTF_GET_FD_BY_ID = 0x13 + BPF_TASK_FD_QUERY = 0x14 + BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 + BPF_MAP_TYPE_UNSPEC = 0x0 + BPF_MAP_TYPE_HASH = 0x1 + BPF_MAP_TYPE_ARRAY = 0x2 + BPF_MAP_TYPE_PROG_ARRAY = 0x3 + BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 + BPF_MAP_TYPE_PERCPU_HASH = 0x5 + BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 + BPF_MAP_TYPE_STACK_TRACE = 0x7 + BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 + BPF_MAP_TYPE_LRU_HASH = 0x9 + BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa + BPF_MAP_TYPE_LPM_TRIE = 0xb + BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc + BPF_MAP_TYPE_HASH_OF_MAPS = 0xd + BPF_MAP_TYPE_DEVMAP = 0xe + BPF_MAP_TYPE_SOCKMAP = 0xf + BPF_MAP_TYPE_CPUMAP = 0x10 + BPF_MAP_TYPE_XSKMAP = 0x11 + BPF_MAP_TYPE_SOCKHASH = 0x12 + BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 + BPF_MAP_TYPE_QUEUE = 0x16 + BPF_MAP_TYPE_STACK = 0x17 + BPF_PROG_TYPE_UNSPEC = 0x0 + BPF_PROG_TYPE_SOCKET_FILTER = 0x1 + BPF_PROG_TYPE_KPROBE = 0x2 + BPF_PROG_TYPE_SCHED_CLS = 0x3 + BPF_PROG_TYPE_SCHED_ACT = 0x4 + BPF_PROG_TYPE_TRACEPOINT = 0x5 + BPF_PROG_TYPE_XDP = 0x6 + BPF_PROG_TYPE_PERF_EVENT = 0x7 + BPF_PROG_TYPE_CGROUP_SKB = 0x8 + BPF_PROG_TYPE_CGROUP_SOCK = 0x9 + BPF_PROG_TYPE_LWT_IN = 0xa + BPF_PROG_TYPE_LWT_OUT = 0xb + BPF_PROG_TYPE_LWT_XMIT = 0xc + BPF_PROG_TYPE_SOCK_OPS = 0xd + BPF_PROG_TYPE_SK_SKB = 0xe + BPF_PROG_TYPE_CGROUP_DEVICE = 0xf + BPF_PROG_TYPE_SK_MSG = 0x10 + BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 + BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 + BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 + BPF_PROG_TYPE_LIRC_MODE2 = 0x14 + BPF_PROG_TYPE_SK_REUSEPORT = 0x15 + BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 + BPF_CGROUP_INET_INGRESS = 0x0 + BPF_CGROUP_INET_EGRESS = 0x1 + BPF_CGROUP_INET_SOCK_CREATE = 0x2 + BPF_CGROUP_SOCK_OPS = 0x3 + BPF_SK_SKB_STREAM_PARSER = 0x4 + BPF_SK_SKB_STREAM_VERDICT = 0x5 + BPF_CGROUP_DEVICE = 0x6 + BPF_SK_MSG_VERDICT = 0x7 + BPF_CGROUP_INET4_BIND = 0x8 + BPF_CGROUP_INET6_BIND = 0x9 + BPF_CGROUP_INET4_CONNECT = 0xa + BPF_CGROUP_INET6_CONNECT = 0xb + BPF_CGROUP_INET4_POST_BIND = 0xc + BPF_CGROUP_INET6_POST_BIND = 0xd + BPF_CGROUP_UDP4_SENDMSG = 0xe + BPF_CGROUP_UDP6_SENDMSG = 0xf + BPF_LIRC_MODE2 = 0x10 + BPF_FLOW_DISSECTOR = 0x11 + BPF_STACK_BUILD_ID_EMPTY = 0x0 + BPF_STACK_BUILD_ID_VALID = 0x1 + BPF_STACK_BUILD_ID_IP = 0x2 + BPF_ADJ_ROOM_NET = 0x0 + BPF_HDR_START_MAC = 0x0 + BPF_HDR_START_NET = 0x1 + BPF_LWT_ENCAP_SEG6 = 0x0 + BPF_LWT_ENCAP_SEG6_INLINE = 0x1 + BPF_OK = 0x0 + BPF_DROP = 0x2 + BPF_REDIRECT = 0x7 + BPF_SOCK_OPS_VOID = 0x0 + BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 + BPF_SOCK_OPS_RWND_INIT = 0x2 + BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 + BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 + BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 + BPF_SOCK_OPS_NEEDS_ECN = 0x6 + BPF_SOCK_OPS_BASE_RTT = 0x7 + BPF_SOCK_OPS_RTO_CB = 0x8 + BPF_SOCK_OPS_RETRANS_CB = 0x9 + BPF_SOCK_OPS_STATE_CB = 0xa + BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb + BPF_TCP_ESTABLISHED = 0x1 + BPF_TCP_SYN_SENT = 0x2 + BPF_TCP_SYN_RECV = 0x3 + BPF_TCP_FIN_WAIT1 = 0x4 + BPF_TCP_FIN_WAIT2 = 0x5 + BPF_TCP_TIME_WAIT = 0x6 + BPF_TCP_CLOSE = 0x7 + BPF_TCP_CLOSE_WAIT = 0x8 + BPF_TCP_LAST_ACK = 0x9 + BPF_TCP_LISTEN = 0xa + BPF_TCP_CLOSING = 0xb + BPF_TCP_NEW_SYN_RECV = 0xc + BPF_TCP_MAX_STATES = 0xd + BPF_FIB_LKUP_RET_SUCCESS = 0x0 + BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 + BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 + BPF_FIB_LKUP_RET_PROHIBIT = 0x3 + BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 + BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 + BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 + BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 + BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 + BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 + BPF_FD_TYPE_TRACEPOINT = 0x1 + BPF_FD_TYPE_KPROBE = 0x2 + BPF_FD_TYPE_KRETPROBE = 0x3 + BPF_FD_TYPE_UPROBE = 0x4 + BPF_FD_TYPE_URETPROBE = 0x5 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index 552dbe51e8c15..b247fe94bab86 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -826,6 +826,8 @@ type Sigset_t struct { Val [16]uint64 } +const _C__NSIG = 0x41 + type SignalfdSiginfo struct { Signo uint32 Errno int32 @@ -1448,6 +1450,21 @@ type TpacketBlockDesc struct { Hdr [40]byte } +type TpacketBDTS struct { + Sec uint32 + Usec uint32 +} + +type TpacketHdrV1 struct { + Block_status uint32 + Num_pkts uint32 + Offset_to_first_pkt uint32 + Blk_len uint32 + Seq_num uint64 + Ts_first_pkt TpacketBDTS + Ts_last_pkt TpacketBDTS +} + type TpacketReq struct { Block_size uint32 Block_nr uint32 @@ -2130,3 +2147,320 @@ type FanotifyResponse struct { Fd int32 Response uint32 } + +const ( + CRYPTO_MSG_BASE = 0x10 + CRYPTO_MSG_NEWALG = 0x10 + CRYPTO_MSG_DELALG = 0x11 + CRYPTO_MSG_UPDATEALG = 0x12 + CRYPTO_MSG_GETALG = 0x13 + CRYPTO_MSG_DELRNG = 0x14 + CRYPTO_MSG_GETSTAT = 0x15 +) + +const ( + CRYPTOCFGA_UNSPEC = 0x0 + CRYPTOCFGA_PRIORITY_VAL = 0x1 + CRYPTOCFGA_REPORT_LARVAL = 0x2 + CRYPTOCFGA_REPORT_HASH = 0x3 + CRYPTOCFGA_REPORT_BLKCIPHER = 0x4 + CRYPTOCFGA_REPORT_AEAD = 0x5 + CRYPTOCFGA_REPORT_COMPRESS = 0x6 + CRYPTOCFGA_REPORT_RNG = 0x7 + CRYPTOCFGA_REPORT_CIPHER = 0x8 + CRYPTOCFGA_REPORT_AKCIPHER = 0x9 + CRYPTOCFGA_REPORT_KPP = 0xa + CRYPTOCFGA_REPORT_ACOMP = 0xb + CRYPTOCFGA_STAT_LARVAL = 0xc + CRYPTOCFGA_STAT_HASH = 0xd + CRYPTOCFGA_STAT_BLKCIPHER = 0xe + CRYPTOCFGA_STAT_AEAD = 0xf + CRYPTOCFGA_STAT_COMPRESS = 0x10 + CRYPTOCFGA_STAT_RNG = 0x11 + CRYPTOCFGA_STAT_CIPHER = 0x12 + CRYPTOCFGA_STAT_AKCIPHER = 0x13 + CRYPTOCFGA_STAT_KPP = 0x14 + CRYPTOCFGA_STAT_ACOMP = 0x15 +) + +type CryptoUserAlg struct { + Name [64]int8 + Driver_name [64]int8 + Module_name [64]int8 + Type uint32 + Mask uint32 + Refcnt uint32 + Flags uint32 +} + +type CryptoStatAEAD struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatAKCipher struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Verify_cnt uint64 + Sign_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatCipher struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatCompress struct { + Type [64]int8 + Compress_cnt uint64 + Compress_tlen uint64 + Decompress_cnt uint64 + Decompress_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatHash struct { + Type [64]int8 + Hash_cnt uint64 + Hash_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatKPP struct { + Type [64]int8 + Setsecret_cnt uint64 + Generate_public_key_cnt uint64 + Compute_shared_secret_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatRNG struct { + Type [64]int8 + Generate_cnt uint64 + Generate_tlen uint64 + Seed_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatLarval struct { + Type [64]int8 +} + +type CryptoReportLarval struct { + Type [64]int8 +} + +type CryptoReportHash struct { + Type [64]int8 + Blocksize uint32 + Digestsize uint32 +} + +type CryptoReportCipher struct { + Type [64]int8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 +} + +type CryptoReportBlkCipher struct { + Type [64]int8 + Geniv [64]int8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 + Ivsize uint32 +} + +type CryptoReportAEAD struct { + Type [64]int8 + Geniv [64]int8 + Blocksize uint32 + Maxauthsize uint32 + Ivsize uint32 +} + +type CryptoReportComp struct { + Type [64]int8 +} + +type CryptoReportRNG struct { + Type [64]int8 + Seedsize uint32 +} + +type CryptoReportAKCipher struct { + Type [64]int8 +} + +type CryptoReportKPP struct { + Type [64]int8 +} + +type CryptoReportAcomp struct { + Type [64]int8 +} + +const ( + BPF_REG_0 = 0x0 + BPF_REG_1 = 0x1 + BPF_REG_2 = 0x2 + BPF_REG_3 = 0x3 + BPF_REG_4 = 0x4 + BPF_REG_5 = 0x5 + BPF_REG_6 = 0x6 + BPF_REG_7 = 0x7 + BPF_REG_8 = 0x8 + BPF_REG_9 = 0x9 + BPF_REG_10 = 0xa + BPF_MAP_CREATE = 0x0 + BPF_MAP_LOOKUP_ELEM = 0x1 + BPF_MAP_UPDATE_ELEM = 0x2 + BPF_MAP_DELETE_ELEM = 0x3 + BPF_MAP_GET_NEXT_KEY = 0x4 + BPF_PROG_LOAD = 0x5 + BPF_OBJ_PIN = 0x6 + BPF_OBJ_GET = 0x7 + BPF_PROG_ATTACH = 0x8 + BPF_PROG_DETACH = 0x9 + BPF_PROG_TEST_RUN = 0xa + BPF_PROG_GET_NEXT_ID = 0xb + BPF_MAP_GET_NEXT_ID = 0xc + BPF_PROG_GET_FD_BY_ID = 0xd + BPF_MAP_GET_FD_BY_ID = 0xe + BPF_OBJ_GET_INFO_BY_FD = 0xf + BPF_PROG_QUERY = 0x10 + BPF_RAW_TRACEPOINT_OPEN = 0x11 + BPF_BTF_LOAD = 0x12 + BPF_BTF_GET_FD_BY_ID = 0x13 + BPF_TASK_FD_QUERY = 0x14 + BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 + BPF_MAP_TYPE_UNSPEC = 0x0 + BPF_MAP_TYPE_HASH = 0x1 + BPF_MAP_TYPE_ARRAY = 0x2 + BPF_MAP_TYPE_PROG_ARRAY = 0x3 + BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 + BPF_MAP_TYPE_PERCPU_HASH = 0x5 + BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 + BPF_MAP_TYPE_STACK_TRACE = 0x7 + BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 + BPF_MAP_TYPE_LRU_HASH = 0x9 + BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa + BPF_MAP_TYPE_LPM_TRIE = 0xb + BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc + BPF_MAP_TYPE_HASH_OF_MAPS = 0xd + BPF_MAP_TYPE_DEVMAP = 0xe + BPF_MAP_TYPE_SOCKMAP = 0xf + BPF_MAP_TYPE_CPUMAP = 0x10 + BPF_MAP_TYPE_XSKMAP = 0x11 + BPF_MAP_TYPE_SOCKHASH = 0x12 + BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 + BPF_MAP_TYPE_QUEUE = 0x16 + BPF_MAP_TYPE_STACK = 0x17 + BPF_PROG_TYPE_UNSPEC = 0x0 + BPF_PROG_TYPE_SOCKET_FILTER = 0x1 + BPF_PROG_TYPE_KPROBE = 0x2 + BPF_PROG_TYPE_SCHED_CLS = 0x3 + BPF_PROG_TYPE_SCHED_ACT = 0x4 + BPF_PROG_TYPE_TRACEPOINT = 0x5 + BPF_PROG_TYPE_XDP = 0x6 + BPF_PROG_TYPE_PERF_EVENT = 0x7 + BPF_PROG_TYPE_CGROUP_SKB = 0x8 + BPF_PROG_TYPE_CGROUP_SOCK = 0x9 + BPF_PROG_TYPE_LWT_IN = 0xa + BPF_PROG_TYPE_LWT_OUT = 0xb + BPF_PROG_TYPE_LWT_XMIT = 0xc + BPF_PROG_TYPE_SOCK_OPS = 0xd + BPF_PROG_TYPE_SK_SKB = 0xe + BPF_PROG_TYPE_CGROUP_DEVICE = 0xf + BPF_PROG_TYPE_SK_MSG = 0x10 + BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 + BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 + BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 + BPF_PROG_TYPE_LIRC_MODE2 = 0x14 + BPF_PROG_TYPE_SK_REUSEPORT = 0x15 + BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 + BPF_CGROUP_INET_INGRESS = 0x0 + BPF_CGROUP_INET_EGRESS = 0x1 + BPF_CGROUP_INET_SOCK_CREATE = 0x2 + BPF_CGROUP_SOCK_OPS = 0x3 + BPF_SK_SKB_STREAM_PARSER = 0x4 + BPF_SK_SKB_STREAM_VERDICT = 0x5 + BPF_CGROUP_DEVICE = 0x6 + BPF_SK_MSG_VERDICT = 0x7 + BPF_CGROUP_INET4_BIND = 0x8 + BPF_CGROUP_INET6_BIND = 0x9 + BPF_CGROUP_INET4_CONNECT = 0xa + BPF_CGROUP_INET6_CONNECT = 0xb + BPF_CGROUP_INET4_POST_BIND = 0xc + BPF_CGROUP_INET6_POST_BIND = 0xd + BPF_CGROUP_UDP4_SENDMSG = 0xe + BPF_CGROUP_UDP6_SENDMSG = 0xf + BPF_LIRC_MODE2 = 0x10 + BPF_FLOW_DISSECTOR = 0x11 + BPF_STACK_BUILD_ID_EMPTY = 0x0 + BPF_STACK_BUILD_ID_VALID = 0x1 + BPF_STACK_BUILD_ID_IP = 0x2 + BPF_ADJ_ROOM_NET = 0x0 + BPF_HDR_START_MAC = 0x0 + BPF_HDR_START_NET = 0x1 + BPF_LWT_ENCAP_SEG6 = 0x0 + BPF_LWT_ENCAP_SEG6_INLINE = 0x1 + BPF_OK = 0x0 + BPF_DROP = 0x2 + BPF_REDIRECT = 0x7 + BPF_SOCK_OPS_VOID = 0x0 + BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 + BPF_SOCK_OPS_RWND_INIT = 0x2 + BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 + BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 + BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 + BPF_SOCK_OPS_NEEDS_ECN = 0x6 + BPF_SOCK_OPS_BASE_RTT = 0x7 + BPF_SOCK_OPS_RTO_CB = 0x8 + BPF_SOCK_OPS_RETRANS_CB = 0x9 + BPF_SOCK_OPS_STATE_CB = 0xa + BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb + BPF_TCP_ESTABLISHED = 0x1 + BPF_TCP_SYN_SENT = 0x2 + BPF_TCP_SYN_RECV = 0x3 + BPF_TCP_FIN_WAIT1 = 0x4 + BPF_TCP_FIN_WAIT2 = 0x5 + BPF_TCP_TIME_WAIT = 0x6 + BPF_TCP_CLOSE = 0x7 + BPF_TCP_CLOSE_WAIT = 0x8 + BPF_TCP_LAST_ACK = 0x9 + BPF_TCP_LISTEN = 0xa + BPF_TCP_CLOSING = 0xb + BPF_TCP_NEW_SYN_RECV = 0xc + BPF_TCP_MAX_STATES = 0xd + BPF_FIB_LKUP_RET_SUCCESS = 0x0 + BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 + BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 + BPF_FIB_LKUP_RET_PROHIBIT = 0x3 + BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 + BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 + BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 + BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 + BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 + BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 + BPF_FD_TYPE_TRACEPOINT = 0x1 + BPF_FD_TYPE_KPROBE = 0x2 + BPF_FD_TYPE_KRETPROBE = 0x3 + BPF_FD_TYPE_UPROBE = 0x4 + BPF_FD_TYPE_URETPROBE = 0x5 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go index 2dae0c17a3c20..a2268b4f62bfe 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go @@ -57,23 +57,23 @@ type Rlimit struct { type _Gid_t uint32 type Stat_t struct { - Dev uint64 - Mode uint32 - Ino uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Rdev uint64 - Atimespec Timespec - Mtimespec Timespec - Ctimespec Timespec - Birthtimespec Timespec - Size int64 - Blocks int64 - Blksize uint32 - Flags uint32 - Gen uint32 - Spare [2]uint32 + Dev uint64 + Mode uint32 + Ino uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev uint64 + Atim Timespec + Mtim Timespec + Ctim Timespec + Btim Timespec + Size int64 + Blocks int64 + Blksize uint32 + Flags uint32 + Gen uint32 + Spare [2]uint32 } type Statfs_t [0]byte diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go index 1f0e76c0ccc6e..59e1da0a6ac7d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go @@ -58,26 +58,26 @@ type Rlimit struct { type _Gid_t uint32 type Stat_t struct { - Dev uint64 - Mode uint32 - Pad_cgo_0 [4]byte - Ino uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Pad_cgo_1 [4]byte - Rdev uint64 - Atimespec Timespec - Mtimespec Timespec - Ctimespec Timespec - Birthtimespec Timespec - Size int64 - Blocks int64 - Blksize uint32 - Flags uint32 - Gen uint32 - Spare [2]uint32 - Pad_cgo_2 [4]byte + Dev uint64 + Mode uint32 + _ [4]byte + Ino uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + _ [4]byte + Rdev uint64 + Atim Timespec + Mtim Timespec + Ctim Timespec + Btim Timespec + Size int64 + Blocks int64 + Blksize uint32 + Flags uint32 + Gen uint32 + Spare [2]uint32 + _ [4]byte } type Statfs_t [0]byte diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go index 53f2159c7d2b0..1f1f0f3819df1 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go @@ -59,26 +59,26 @@ type Rlimit struct { type _Gid_t uint32 type Stat_t struct { - Dev uint64 - Mode uint32 - Pad_cgo_0 [4]byte - Ino uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Pad_cgo_1 [4]byte - Rdev uint64 - Atimespec Timespec - Mtimespec Timespec - Ctimespec Timespec - Birthtimespec Timespec - Size int64 - Blocks int64 - Blksize uint32 - Flags uint32 - Gen uint32 - Spare [2]uint32 - Pad_cgo_2 [4]byte + Dev uint64 + Mode uint32 + _ [4]byte + Ino uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + _ [4]byte + Rdev uint64 + Atim Timespec + Mtim Timespec + Ctim Timespec + Btim Timespec + Size int64 + Blocks int64 + Blksize uint32 + Flags uint32 + Gen uint32 + Spare [2]uint32 + _ [4]byte } type Statfs_t [0]byte diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go index 43da2c41c5036..8dca204a9c61f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go @@ -58,26 +58,26 @@ type Rlimit struct { type _Gid_t uint32 type Stat_t struct { - Dev uint64 - Mode uint32 - Pad_cgo_0 [4]byte - Ino uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Pad_cgo_1 [4]byte - Rdev uint64 - Atimespec Timespec - Mtimespec Timespec - Ctimespec Timespec - Birthtimespec Timespec - Size int64 - Blocks int64 - Blksize uint32 - Flags uint32 - Gen uint32 - Spare [2]uint32 - Pad_cgo_2 [4]byte + Dev uint64 + Mode uint32 + _ [4]byte + Ino uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + _ [4]byte + Rdev uint64 + Atim Timespec + Mtim Timespec + Ctim Timespec + Btim Timespec + Size int64 + Blocks int64 + Blksize uint32 + Flags uint32 + Gen uint32 + Spare [2]uint32 + _ [4]byte } type Statfs_t [0]byte diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go new file mode 100644 index 0000000000000..fa369a32a2df0 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go @@ -0,0 +1,564 @@ +// cgo -godefs -- -fsigned-char types_openbsd.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build arm64,openbsd + +package unix + +const ( + SizeofPtr = 0x8 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x8 + SizeofLongLong = 0x8 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type _Gid_t uint32 + +type Stat_t struct { + Mode uint32 + Dev int32 + Ino uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev int32 + Atim Timespec + Mtim Timespec + Ctim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + _ Timespec +} + +type Statfs_t struct { + F_flags uint32 + F_bsize uint32 + F_iosize uint32 + F_blocks uint64 + F_bfree uint64 + F_bavail int64 + F_files uint64 + F_ffree uint64 + F_favail int64 + F_syncwrites uint64 + F_syncreads uint64 + F_asyncwrites uint64 + F_asyncreads uint64 + F_fsid Fsid + F_namemax uint32 + F_owner uint32 + F_ctime uint64 + F_fstypename [16]int8 + F_mntonname [90]int8 + F_mntfromname [90]int8 + F_mntfromspec [90]int8 + _ [2]byte + Mount_info [160]byte +} + +type Flock_t struct { + Start int64 + Len int64 + Pid int32 + Type int16 + Whence int16 +} + +type Dirent struct { + Fileno uint64 + Off int64 + Reclen uint16 + Type uint8 + Namlen uint8 + _ [4]uint8 + Name [256]int8 +} + +type Fsid struct { + Val [2]int32 +} + +const ( + PathMax = 0x400 +) + +type RawSockaddrInet4 struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type RawSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Len uint8 + Family uint8 + Path [104]int8 +} + +type RawSockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [24]int8 +} + +type RawSockaddr struct { + Len uint8 + Family uint8 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [92]int8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint64 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Iov *Iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Filt [8]uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x20 + SizeofLinger = 0x8 + SizeofIPMreq = 0x8 + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x30 + SizeofCmsghdr = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 +) + +const ( + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 +) + +type Kevent_t struct { + Ident uint64 + Filter int16 + Flags uint16 + Fflags uint32 + Data int64 + Udata *byte +} + +type FdSet struct { + Bits [32]uint32 +} + +const ( + SizeofIfMsghdr = 0xa8 + SizeofIfData = 0x90 + SizeofIfaMsghdr = 0x18 + SizeofIfAnnounceMsghdr = 0x1a + SizeofRtMsghdr = 0x60 + SizeofRtMetrics = 0x38 +) + +type IfMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + Tableid uint16 + Pad1 uint8 + Pad2 uint8 + Addrs int32 + Flags int32 + Xflags int32 + Data IfData +} + +type IfData struct { + Type uint8 + Addrlen uint8 + Hdrlen uint8 + Link_state uint8 + Mtu uint32 + Metric uint32 + Rdomain uint32 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Oqdrops uint64 + Noproto uint64 + Capabilities uint32 + Lastchange Timeval +} + +type IfaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + Tableid uint16 + Pad1 uint8 + Pad2 uint8 + Addrs int32 + Flags int32 + Metric int32 +} + +type IfAnnounceMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + What uint16 + Name [16]int8 +} + +type RtMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + Tableid uint16 + Priority uint8 + Mpls uint8 + Addrs int32 + Flags int32 + Fmask int32 + Pid int32 + Seq int32 + Errno int32 + Inits uint32 + Rmx RtMetrics +} + +type RtMetrics struct { + Pksent uint64 + Expire int64 + Locks uint32 + Mtu uint32 + Refcnt uint32 + Hopcount uint32 + Recvpipe uint32 + Sendpipe uint32 + Ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Pad uint32 +} + +type Mclpool struct{} + +const ( + SizeofBpfVersion = 0x4 + SizeofBpfStat = 0x8 + SizeofBpfProgram = 0x10 + SizeofBpfInsn = 0x8 + SizeofBpfHdr = 0x14 +) + +type BpfVersion struct { + Major uint16 + Minor uint16 +} + +type BpfStat struct { + Recv uint32 + Drop uint32 +} + +type BpfProgram struct { + Len uint32 + Insns *BpfInsn +} + +type BpfInsn struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type BpfHdr struct { + Tstamp BpfTimeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + _ [2]byte +} + +type BpfTimeval struct { + Sec uint32 + Usec uint32 +} + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed int32 + Ospeed int32 +} + +type Winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +const ( + AT_FDCWD = -0x64 + AT_SYMLINK_NOFOLLOW = 0x2 +) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) + +type Sigset_t uint32 + +type Utsname struct { + Sysname [256]byte + Nodename [256]byte + Release [256]byte + Version [256]byte + Machine [256]byte +} + +const SizeofUvmexp = 0x158 + +type Uvmexp struct { + Pagesize int32 + Pagemask int32 + Pageshift int32 + Npages int32 + Free int32 + Active int32 + Inactive int32 + Paging int32 + Wired int32 + Zeropages int32 + Reserve_pagedaemon int32 + Reserve_kernel int32 + Unused01 int32 + Vnodepages int32 + Vtextpages int32 + Freemin int32 + Freetarg int32 + Inactarg int32 + Wiredmax int32 + Anonmin int32 + Vtextmin int32 + Vnodemin int32 + Anonminpct int32 + Vtextminpct int32 + Vnodeminpct int32 + Nswapdev int32 + Swpages int32 + Swpginuse int32 + Swpgonly int32 + Nswget int32 + Nanon int32 + Unused05 int32 + Unused06 int32 + Faults int32 + Traps int32 + Intrs int32 + Swtch int32 + Softs int32 + Syscalls int32 + Pageins int32 + Unused07 int32 + Unused08 int32 + Pgswapin int32 + Pgswapout int32 + Forks int32 + Forks_ppwait int32 + Forks_sharevm int32 + Pga_zerohit int32 + Pga_zeromiss int32 + Unused09 int32 + Fltnoram int32 + Fltnoanon int32 + Fltnoamap int32 + Fltpgwait int32 + Fltpgrele int32 + Fltrelck int32 + Fltrelckok int32 + Fltanget int32 + Fltanretry int32 + Fltamcopy int32 + Fltnamap int32 + Fltnomap int32 + Fltlget int32 + Fltget int32 + Flt_anon int32 + Flt_acow int32 + Flt_obj int32 + Flt_prcopy int32 + Flt_przero int32 + Pdwoke int32 + Pdrevs int32 + Pdswout int32 + Pdfreed int32 + Pdscans int32 + Pdanscan int32 + Pdobscan int32 + Pdreact int32 + Pdbusy int32 + Pdpageouts int32 + Pdpending int32 + Pddeact int32 + Unused11 int32 + Unused12 int32 + Unused13 int32 + Fpswtch int32 + Kmapent int32 +} + +const SizeofClockinfo = 0x14 + +type Clockinfo struct { + Hz int32 + Tick int32 + Tickadj int32 + Stathz int32 + Profhz int32 +} diff --git a/vendor/golang.org/x/sys/windows/BUILD.bazel b/vendor/golang.org/x/sys/windows/BUILD.bazel index 3316b465af556..e9e5210db72cc 100644 --- a/vendor/golang.org/x/sys/windows/BUILD.bazel +++ b/vendor/golang.org/x/sys/windows/BUILD.bazel @@ -11,8 +11,6 @@ go_library( "eventlog.go", "exec_windows.go", "memory_windows.go", - "mkerrors.go", - "mksyscall.go", "race.go", "race0.go", "security_windows.go", @@ -24,6 +22,7 @@ go_library( "types_windows_386.go", "types_windows_amd64.go", "zerrors_windows.go", + "zknownfolderids_windows.go", "zsyscall_windows.go", ], importmap = "k8s.io/kops/vendor/golang.org/x/sys/windows", diff --git a/vendor/golang.org/x/sys/windows/env_windows.go b/vendor/golang.org/x/sys/windows/env_windows.go index bdc71e241e0c6..f482a9fab3b33 100644 --- a/vendor/golang.org/x/sys/windows/env_windows.go +++ b/vendor/golang.org/x/sys/windows/env_windows.go @@ -6,7 +6,11 @@ package windows -import "syscall" +import ( + "syscall" + "unicode/utf16" + "unsafe" +) func Getenv(key string) (value string, found bool) { return syscall.Getenv(key) @@ -24,6 +28,34 @@ func Environ() []string { return syscall.Environ() } +// Returns a default environment associated with the token, rather than the current +// process. If inheritExisting is true, then this environment also inherits the +// environment of the current process. +func (token Token) Environ(inheritExisting bool) (env []string, err error) { + var block *uint16 + err = CreateEnvironmentBlock(&block, token, inheritExisting) + if err != nil { + return nil, err + } + defer DestroyEnvironmentBlock(block) + blockp := uintptr(unsafe.Pointer(block)) + for { + entry := (*[(1 << 30) - 1]uint16)(unsafe.Pointer(blockp))[:] + for i, v := range entry { + if v == 0 { + entry = entry[:i] + break + } + } + if len(entry) == 0 { + break + } + env = append(env, string(utf16.Decode(entry))) + blockp += 2 * (uintptr(len(entry)) + 1) + } + return env, nil +} + func Unsetenv(key string) error { return syscall.Unsetenv(key) } diff --git a/vendor/golang.org/x/sys/windows/mkerrors.bash b/vendor/golang.org/x/sys/windows/mkerrors.bash index a70b24f398806..2163843a11dff 100644 --- a/vendor/golang.org/x/sys/windows/mkerrors.bash +++ b/vendor/golang.org/x/sys/windows/mkerrors.bash @@ -7,14 +7,13 @@ set -e shopt -s nullglob -[[ $# -eq 1 ]] || { echo "Usage: $0 OUTPUT_FILE.go" >&2; exit 1; } winerror="$(printf '%s\n' "/mnt/c/Program Files (x86)/Windows Kits/"/*/Include/*/shared/winerror.h | sort -Vr | head -n 1)" [[ -n $winerror ]] || { echo "Unable to find winerror.h" >&2; exit 1; } declare -A errors { - echo "// Code generated by 'go generate'; DO NOT EDIT." + echo "// Code generated by 'mkerrors.bash'; DO NOT EDIT." echo echo "package windows" echo "import \"syscall\"" @@ -61,4 +60,4 @@ declare -A errors done < "$winerror" echo ")" -} | gofmt > "$1" +} | gofmt > "zerrors_windows.go" diff --git a/vendor/golang.org/x/sys/windows/mkknownfolderids.bash b/vendor/golang.org/x/sys/windows/mkknownfolderids.bash new file mode 100644 index 0000000000000..ab8924e936f5c --- /dev/null +++ b/vendor/golang.org/x/sys/windows/mkknownfolderids.bash @@ -0,0 +1,27 @@ +#!/bin/bash + +# Copyright 2019 The Go Authors. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +set -e +shopt -s nullglob + +knownfolders="$(printf '%s\n' "/mnt/c/Program Files (x86)/Windows Kits/"/*/Include/*/um/KnownFolders.h | sort -Vr | head -n 1)" +[[ -n $knownfolders ]] || { echo "Unable to find KnownFolders.h" >&2; exit 1; } + +{ + echo "// Code generated by 'mkknownfolderids.bash'; DO NOT EDIT." + echo + echo "package windows" + echo "type KNOWNFOLDERID GUID" + echo "var (" + while read -r line; do + [[ $line =~ DEFINE_KNOWN_FOLDER\((FOLDERID_[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+)\) ]] || continue + printf "%s = &KNOWNFOLDERID{0x%08x, 0x%04x, 0x%04x, [8]byte{0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x}}\n" \ + "${BASH_REMATCH[1]}" $(( "${BASH_REMATCH[2]}" )) $(( "${BASH_REMATCH[3]}" )) $(( "${BASH_REMATCH[4]}" )) \ + $(( "${BASH_REMATCH[5]}" )) $(( "${BASH_REMATCH[6]}" )) $(( "${BASH_REMATCH[7]}" )) $(( "${BASH_REMATCH[8]}" )) \ + $(( "${BASH_REMATCH[9]}" )) $(( "${BASH_REMATCH[10]}" )) $(( "${BASH_REMATCH[11]}" )) $(( "${BASH_REMATCH[12]}" )) + done < "$knownfolders" + echo ")" +} | gofmt > "zknownfolderids_windows.go" diff --git a/vendor/golang.org/x/sys/windows/mksyscall.go b/vendor/golang.org/x/sys/windows/mksyscall.go index fb7db0ef8d4af..62770572747ea 100644 --- a/vendor/golang.org/x/sys/windows/mksyscall.go +++ b/vendor/golang.org/x/sys/windows/mksyscall.go @@ -2,6 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// +build generate + package windows //go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go eventlog.go service.go syscall_windows.go security_windows.go diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go index da06406c44e1e..61b49647b9aed 100644 --- a/vendor/golang.org/x/sys/windows/security_windows.go +++ b/vendor/golang.org/x/sys/windows/security_windows.go @@ -170,15 +170,20 @@ const ( //sys CopySid(destSidLen uint32, destSid *SID, srcSid *SID) (err error) = advapi32.CopySid //sys AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, subAuth0 uint32, subAuth1 uint32, subAuth2 uint32, subAuth3 uint32, subAuth4 uint32, subAuth5 uint32, subAuth6 uint32, subAuth7 uint32, sid **SID) (err error) = advapi32.AllocateAndInitializeSid //sys createWellKnownSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID, sid *SID, sizeSid *uint32) (err error) = advapi32.CreateWellKnownSid +//sys isWellKnownSid(sid *SID, sidType WELL_KNOWN_SID_TYPE) (isWellKnown bool) = advapi32.IsWellKnownSid //sys FreeSid(sid *SID) (err error) [failretval!=0] = advapi32.FreeSid //sys EqualSid(sid1 *SID, sid2 *SID) (isEqual bool) = advapi32.EqualSid +//sys getSidIdentifierAuthority(sid *SID) (authority *SidIdentifierAuthority) = advapi32.GetSidIdentifierAuthority +//sys getSidSubAuthorityCount(sid *SID) (count *uint8) = advapi32.GetSidSubAuthorityCount +//sys getSidSubAuthority(sid *SID, index uint32) (subAuthority *uint32) = advapi32.GetSidSubAuthority +//sys isValidSid(sid *SID) (isValid bool) = advapi32.IsValidSid // The security identifier (SID) structure is a variable-length // structure used to uniquely identify users or groups. type SID struct{} // StringToSid converts a string-format security identifier -// sid into a valid, functional sid. +// SID into a valid, functional SID. func StringToSid(s string) (*SID, error) { var sid *SID p, e := UTF16PtrFromString(s) @@ -193,7 +198,7 @@ func StringToSid(s string) (*SID, error) { return sid.Copy() } -// LookupSID retrieves a security identifier sid for the account +// LookupSID retrieves a security identifier SID for the account // and the name of the domain on which the account was found. // System specify target computer to search. func LookupSID(system, account string) (sid *SID, domain string, accType uint32, err error) { @@ -230,7 +235,7 @@ func LookupSID(system, account string) (sid *SID, domain string, accType uint32, } } -// String converts sid to a string format +// String converts SID to a string format // suitable for display, storage, or transmission. func (sid *SID) String() (string, error) { var s *uint16 @@ -242,12 +247,12 @@ func (sid *SID) String() (string, error) { return UTF16ToString((*[256]uint16)(unsafe.Pointer(s))[:]), nil } -// Len returns the length, in bytes, of a valid security identifier sid. +// Len returns the length, in bytes, of a valid security identifier SID. func (sid *SID) Len() int { return int(GetLengthSid(sid)) } -// Copy creates a duplicate of security identifier sid. +// Copy creates a duplicate of security identifier SID. func (sid *SID) Copy() (*SID, error) { b := make([]byte, sid.Len()) sid2 := (*SID)(unsafe.Pointer(&b[0])) @@ -258,8 +263,42 @@ func (sid *SID) Copy() (*SID, error) { return sid2, nil } -// LookupAccount retrieves the name of the account for this sid -// and the name of the first domain on which this sid is found. +// IdentifierAuthority returns the identifier authority of the SID. +func (sid *SID) IdentifierAuthority() SidIdentifierAuthority { + return *getSidIdentifierAuthority(sid) +} + +// SubAuthorityCount returns the number of sub-authorities in the SID. +func (sid *SID) SubAuthorityCount() uint8 { + return *getSidSubAuthorityCount(sid) +} + +// SubAuthority returns the sub-authority of the SID as specified by +// the index, which must be less than sid.SubAuthorityCount(). +func (sid *SID) SubAuthority(idx uint32) uint32 { + if idx >= uint32(sid.SubAuthorityCount()) { + panic("sub-authority index out of range") + } + return *getSidSubAuthority(sid, idx) +} + +// IsValid returns whether the SID has a valid revision and length. +func (sid *SID) IsValid() bool { + return isValidSid(sid) +} + +// Equals compares two SIDs for equality. +func (sid *SID) Equals(sid2 *SID) bool { + return EqualSid(sid, sid2) +} + +// IsWellKnown determines whether the SID matches the well-known sidType. +func (sid *SID) IsWellKnown(sidType WELL_KNOWN_SID_TYPE) bool { + return isWellKnownSid(sid, sidType) +} + +// LookupAccount retrieves the name of the account for this SID +// and the name of the first domain on which this SID is found. // System specify target computer to search for. func (sid *SID) LookupAccount(system string) (account, domain string, accType uint32, err error) { var sys *uint16 @@ -287,7 +326,7 @@ func (sid *SID) LookupAccount(system string) (account, domain string, accType ui } } -// Various types of pre-specified sids that can be synthesized at runtime. +// Various types of pre-specified SIDs that can be synthesized and compared at runtime. type WELL_KNOWN_SID_TYPE uint32 const ( @@ -413,13 +452,13 @@ const ( WinBuiltinDeviceOwnersSid = 119 ) -// Creates a sid for a well-known predefined alias, generally using the constants of the form +// Creates a SID for a well-known predefined alias, generally using the constants of the form // Win*Sid, for the local machine. func CreateWellKnownSid(sidType WELL_KNOWN_SID_TYPE) (*SID, error) { return CreateWellKnownDomainSid(sidType, nil) } -// Creates a sid for a well-known predefined alias, generally using the constants of the form +// Creates a SID for a well-known predefined alias, generally using the constants of the form // Win*Sid, for the domain specified by the domainSid parameter. func CreateWellKnownDomainSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID) (*SID, error) { n := uint32(50) @@ -502,6 +541,53 @@ const ( MaxTokenInfoClass ) +// Group attributes inside of Tokengroups.Groups[i].Attributes +const ( + SE_GROUP_MANDATORY = 0x00000001 + SE_GROUP_ENABLED_BY_DEFAULT = 0x00000002 + SE_GROUP_ENABLED = 0x00000004 + SE_GROUP_OWNER = 0x00000008 + SE_GROUP_USE_FOR_DENY_ONLY = 0x00000010 + SE_GROUP_INTEGRITY = 0x00000020 + SE_GROUP_INTEGRITY_ENABLED = 0x00000040 + SE_GROUP_LOGON_ID = 0xC0000000 + SE_GROUP_RESOURCE = 0x20000000 + SE_GROUP_VALID_ATTRIBUTES = SE_GROUP_MANDATORY | SE_GROUP_ENABLED_BY_DEFAULT | SE_GROUP_ENABLED | SE_GROUP_OWNER | SE_GROUP_USE_FOR_DENY_ONLY | SE_GROUP_LOGON_ID | SE_GROUP_RESOURCE | SE_GROUP_INTEGRITY | SE_GROUP_INTEGRITY_ENABLED +) + +// Privilege attributes +const ( + SE_PRIVILEGE_ENABLED_BY_DEFAULT = 0x00000001 + SE_PRIVILEGE_ENABLED = 0x00000002 + SE_PRIVILEGE_REMOVED = 0x00000004 + SE_PRIVILEGE_USED_FOR_ACCESS = 0x80000000 + SE_PRIVILEGE_VALID_ATTRIBUTES = SE_PRIVILEGE_ENABLED_BY_DEFAULT | SE_PRIVILEGE_ENABLED | SE_PRIVILEGE_REMOVED | SE_PRIVILEGE_USED_FOR_ACCESS +) + +// Token types +const ( + TokenPrimary = 1 + TokenImpersonation = 2 +) + +// Impersonation levels +const ( + SecurityAnonymous = 0 + SecurityIdentification = 1 + SecurityImpersonation = 2 + SecurityDelegation = 3 +) + +type LUID struct { + LowPart uint32 + HighPart int32 +} + +type LUIDAndAttributes struct { + Luid LUID + Attributes uint32 +} + type SIDAndAttributes struct { Sid *SID Attributes uint32 @@ -517,13 +603,45 @@ type Tokenprimarygroup struct { type Tokengroups struct { GroupCount uint32 - Groups [1]SIDAndAttributes + Groups [1]SIDAndAttributes // Use AllGroups() for iterating. +} + +// AllGroups returns a slice that can be used to iterate over the groups in g. +func (g *Tokengroups) AllGroups() []SIDAndAttributes { + return (*[(1 << 28) - 1]SIDAndAttributes)(unsafe.Pointer(&g.Groups[0]))[:g.GroupCount:g.GroupCount] +} + +type Tokenprivileges struct { + PrivilegeCount uint32 + Privileges [1]LUIDAndAttributes // Use AllPrivileges() for iterating. +} + +// AllPrivileges returns a slice that can be used to iterate over the privileges in p. +func (p *Tokenprivileges) AllPrivileges() []LUIDAndAttributes { + return (*[(1 << 27) - 1]LUIDAndAttributes)(unsafe.Pointer(&p.Privileges[0]))[:p.PrivilegeCount:p.PrivilegeCount] +} + +type Tokenmandatorylabel struct { + Label SIDAndAttributes +} + +func (tml *Tokenmandatorylabel) Size() uint32 { + return uint32(unsafe.Sizeof(Tokenmandatorylabel{})) + GetLengthSid(tml.Label.Sid) } // Authorization Functions -//sys checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) = advapi32.CheckTokenMembership -//sys OpenProcessToken(h Handle, access uint32, token *Token) (err error) = advapi32.OpenProcessToken -//sys GetTokenInformation(t Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) = advapi32.GetTokenInformation +//sys checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) = advapi32.CheckTokenMembership +//sys OpenProcessToken(process Handle, access uint32, token *Token) (err error) = advapi32.OpenProcessToken +//sys OpenThreadToken(thread Handle, access uint32, openAsSelf bool, token *Token) (err error) = advapi32.OpenThreadToken +//sys ImpersonateSelf(impersonationlevel uint32) (err error) = advapi32.ImpersonateSelf +//sys RevertToSelf() (err error) = advapi32.RevertToSelf +//sys SetThreadToken(thread *Handle, token Token) (err error) = advapi32.SetThreadToken +//sys LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err error) = advapi32.LookupPrivilegeValueW +//sys AdjustTokenPrivileges(token Token, disableAllPrivileges bool, newstate *Tokenprivileges, buflen uint32, prevstate *Tokenprivileges, returnlen *uint32) (err error) = advapi32.AdjustTokenPrivileges +//sys AdjustTokenGroups(token Token, resetToDefault bool, newstate *Tokengroups, buflen uint32, prevstate *Tokengroups, returnlen *uint32) (err error) = advapi32.AdjustTokenGroups +//sys GetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) = advapi32.GetTokenInformation +//sys SetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32) (err error) = advapi32.SetTokenInformation +//sys DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes *SecurityAttributes, impersonationLevel uint32, tokenType uint32, newToken *Token) (err error) = advapi32.DuplicateTokenEx //sys GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) = userenv.GetUserProfileDirectoryW //sys getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) = kernel32.GetSystemDirectoryW @@ -537,7 +655,9 @@ type Tokengroups struct { type Token Handle // OpenCurrentProcessToken opens the access token -// associated with current process. +// associated with current process. It is a real +// token that needs to be closed, unlike +// GetCurrentProcessToken. func OpenCurrentProcessToken() (Token, error) { p, e := GetCurrentProcess() if e != nil { @@ -551,6 +671,27 @@ func OpenCurrentProcessToken() (Token, error) { return t, nil } +// GetCurrentProcessToken returns the access token associated with +// the current process. It is a pseudo token that does not need +// to be closed. +func GetCurrentProcessToken() Token { + return Token(^uintptr(4 - 1)) +} + +// GetCurrentThreadToken return the access token associated with +// the current thread. It is a pseudo token that does not need +// to be closed. +func GetCurrentThreadToken() Token { + return Token(^uintptr(5 - 1)) +} + +// GetCurrentThreadEffectiveToken returns the effective access token +// associated with the current thread. It is a pseudo token that does +// not need to be closed. +func GetCurrentThreadEffectiveToken() Token { + return Token(^uintptr(6 - 1)) +} + // Close releases access to access token. func (t Token) Close() error { return CloseHandle(Handle(t)) @@ -622,6 +763,28 @@ func (t Token) GetUserProfileDirectory() (string, error) { } } +// IsElevated returns whether the current token is elevated from a UAC perspective. +func (token Token) IsElevated() bool { + var isElevated uint32 + var outLen uint32 + err := GetTokenInformation(token, TokenElevation, (*byte)(unsafe.Pointer(&isElevated)), uint32(unsafe.Sizeof(isElevated)), &outLen) + if err != nil { + return false + } + return outLen == uint32(unsafe.Sizeof(isElevated)) && isElevated != 0 +} + +// GetLinkedToken returns the linked token, which may be an elevated UAC token. +func (token Token) GetLinkedToken() (Token, error) { + var linkedToken Token + var outLen uint32 + err := GetTokenInformation(token, TokenLinkedToken, (*byte)(unsafe.Pointer(&linkedToken)), uint32(unsafe.Sizeof(linkedToken)), &outLen) + if err != nil { + return Token(0), err + } + return linkedToken, nil +} + // GetSystemDirectory retrieves path to current location of the system // directory, which is typically, though not always, C:\Windows\System32. func GetSystemDirectory() (string, error) { @@ -647,3 +810,45 @@ func (t Token) IsMember(sid *SID) (bool, error) { } return b != 0, nil } + +const ( + WTS_CONSOLE_CONNECT = 0x1 + WTS_CONSOLE_DISCONNECT = 0x2 + WTS_REMOTE_CONNECT = 0x3 + WTS_REMOTE_DISCONNECT = 0x4 + WTS_SESSION_LOGON = 0x5 + WTS_SESSION_LOGOFF = 0x6 + WTS_SESSION_LOCK = 0x7 + WTS_SESSION_UNLOCK = 0x8 + WTS_SESSION_REMOTE_CONTROL = 0x9 + WTS_SESSION_CREATE = 0xa + WTS_SESSION_TERMINATE = 0xb +) + +const ( + WTSActive = 0 + WTSConnected = 1 + WTSConnectQuery = 2 + WTSShadow = 3 + WTSDisconnected = 4 + WTSIdle = 5 + WTSListen = 6 + WTSReset = 7 + WTSDown = 8 + WTSInit = 9 +) + +type WTSSESSION_NOTIFICATION struct { + Size uint32 + SessionID uint32 +} + +type WTS_SESSION_INFO struct { + SessionID uint32 + WindowStationName *uint16 + State uint32 +} + +//sys WTSQueryUserToken(session uint32, token *Token) (err error) = wtsapi32.WTSQueryUserToken +//sys WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessions **WTS_SESSION_INFO, count *uint32) (err error) = wtsapi32.WTSEnumerateSessionsW +//sys WTSFreeMemory(ptr uintptr) = wtsapi32.WTSFreeMemory diff --git a/vendor/golang.org/x/sys/windows/service.go b/vendor/golang.org/x/sys/windows/service.go index 994b1290ced73..03383f1dfdfa0 100644 --- a/vendor/golang.org/x/sys/windows/service.go +++ b/vendor/golang.org/x/sys/windows/service.go @@ -85,21 +85,47 @@ const ( SERVICE_INACTIVE = 2 SERVICE_STATE_ALL = 3 - SERVICE_QUERY_CONFIG = 1 - SERVICE_CHANGE_CONFIG = 2 - SERVICE_QUERY_STATUS = 4 - SERVICE_ENUMERATE_DEPENDENTS = 8 - SERVICE_START = 16 - SERVICE_STOP = 32 - SERVICE_PAUSE_CONTINUE = 64 - SERVICE_INTERROGATE = 128 - SERVICE_USER_DEFINED_CONTROL = 256 - SERVICE_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SERVICE_QUERY_CONFIG | SERVICE_CHANGE_CONFIG | SERVICE_QUERY_STATUS | SERVICE_ENUMERATE_DEPENDENTS | SERVICE_START | SERVICE_STOP | SERVICE_PAUSE_CONTINUE | SERVICE_INTERROGATE | SERVICE_USER_DEFINED_CONTROL + SERVICE_QUERY_CONFIG = 1 + SERVICE_CHANGE_CONFIG = 2 + SERVICE_QUERY_STATUS = 4 + SERVICE_ENUMERATE_DEPENDENTS = 8 + SERVICE_START = 16 + SERVICE_STOP = 32 + SERVICE_PAUSE_CONTINUE = 64 + SERVICE_INTERROGATE = 128 + SERVICE_USER_DEFINED_CONTROL = 256 + SERVICE_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SERVICE_QUERY_CONFIG | SERVICE_CHANGE_CONFIG | SERVICE_QUERY_STATUS | SERVICE_ENUMERATE_DEPENDENTS | SERVICE_START | SERVICE_STOP | SERVICE_PAUSE_CONTINUE | SERVICE_INTERROGATE | SERVICE_USER_DEFINED_CONTROL + SERVICE_RUNS_IN_SYSTEM_PROCESS = 1 - SERVICE_CONFIG_DESCRIPTION = 1 - SERVICE_CONFIG_FAILURE_ACTIONS = 2 + + SERVICE_CONFIG_DESCRIPTION = 1 + SERVICE_CONFIG_FAILURE_ACTIONS = 2 + SERVICE_CONFIG_DELAYED_AUTO_START_INFO = 3 + SERVICE_CONFIG_FAILURE_ACTIONS_FLAG = 4 + SERVICE_CONFIG_SERVICE_SID_INFO = 5 + SERVICE_CONFIG_REQUIRED_PRIVILEGES_INFO = 6 + SERVICE_CONFIG_PRESHUTDOWN_INFO = 7 + SERVICE_CONFIG_TRIGGER_INFO = 8 + SERVICE_CONFIG_PREFERRED_NODE = 9 + SERVICE_CONFIG_LAUNCH_PROTECTED = 12 + + SERVICE_SID_TYPE_NONE = 0 + SERVICE_SID_TYPE_UNRESTRICTED = 1 + SERVICE_SID_TYPE_RESTRICTED = 2 | SERVICE_SID_TYPE_UNRESTRICTED SC_ENUM_PROCESS_INFO = 0 + + SERVICE_NOTIFY_STATUS_CHANGE = 2 + SERVICE_NOTIFY_STOPPED = 0x00000001 + SERVICE_NOTIFY_START_PENDING = 0x00000002 + SERVICE_NOTIFY_STOP_PENDING = 0x00000004 + SERVICE_NOTIFY_RUNNING = 0x00000008 + SERVICE_NOTIFY_CONTINUE_PENDING = 0x00000010 + SERVICE_NOTIFY_PAUSE_PENDING = 0x00000020 + SERVICE_NOTIFY_PAUSED = 0x00000040 + SERVICE_NOTIFY_CREATED = 0x00000080 + SERVICE_NOTIFY_DELETED = 0x00000100 + SERVICE_NOTIFY_DELETE_PENDING = 0x00000200 ) type SERVICE_STATUS struct { @@ -151,6 +177,16 @@ type ENUM_SERVICE_STATUS_PROCESS struct { ServiceStatusProcess SERVICE_STATUS_PROCESS } +type SERVICE_NOTIFY struct { + Version uint32 + NotifyCallback uintptr + Context uintptr + NotificationStatus uint32 + ServiceStatus SERVICE_STATUS_PROCESS + NotificationTriggered uint32 + ServiceNames *uint16 +} + type SERVICE_FAILURE_ACTIONS struct { ResetPeriod uint32 RebootMsg *uint16 @@ -164,12 +200,19 @@ type SC_ACTION struct { Delay uint32 } +type QUERY_SERVICE_LOCK_STATUS struct { + IsLocked uint32 + LockOwner *uint16 + LockDuration uint32 +} + //sys CloseServiceHandle(handle Handle) (err error) = advapi32.CloseServiceHandle //sys CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access uint32, srvType uint32, startType uint32, errCtl uint32, pathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16) (handle Handle, err error) [failretval==0] = advapi32.CreateServiceW //sys OpenService(mgr Handle, serviceName *uint16, access uint32) (handle Handle, err error) [failretval==0] = advapi32.OpenServiceW //sys DeleteService(service Handle) (err error) = advapi32.DeleteService //sys StartService(service Handle, numArgs uint32, argVectors **uint16) (err error) = advapi32.StartServiceW //sys QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) = advapi32.QueryServiceStatus +//sys QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, bufSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceLockStatusW //sys ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err error) = advapi32.ControlService //sys StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) = advapi32.StartServiceCtrlDispatcherW //sys SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error) = advapi32.SetServiceStatus @@ -178,4 +221,5 @@ type SC_ACTION struct { //sys ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err error) = advapi32.ChangeServiceConfig2W //sys QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceConfig2W //sys EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serviceState uint32, services *byte, bufSize uint32, bytesNeeded *uint32, servicesReturned *uint32, resumeHandle *uint32, groupName *uint16) (err error) = advapi32.EnumServicesStatusExW -//sys QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceStatusEx +//sys QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceStatusEx +//sys NotifyServiceStatusChange(service Handle, notifyMask uint32, notifier *SERVICE_NOTIFY) (ret error) = advapi32.NotifyServiceStatusChangeW diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 7aff0d0225254..92ce02bbca2b3 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -10,6 +10,7 @@ import ( errorspkg "errors" "sync" "syscall" + "time" "unicode/utf16" "unsafe" ) @@ -55,6 +56,10 @@ const ( FILE_UNICODE_ON_DISK = 0x00000004 FILE_VOLUME_IS_COMPRESSED = 0x00008000 FILE_VOLUME_QUOTAS = 0x00000020 + + // Return values of SleepEx and other APC functions + STATUS_USER_APC = 0x000000C0 + WAIT_IO_COMPLETION = STATUS_USER_APC ) // StringToUTF16 is deprecated. Use UTF16FromString instead. @@ -134,7 +139,8 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys GetVersion() (ver uint32, err error) //sys FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, buf []uint16, args *byte) (n uint32, err error) = FormatMessageW //sys ExitProcess(exitcode uint32) -//sys CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile int32) (handle Handle, err error) [failretval==InvalidHandle] = CreateFileW +//sys IsWow64Process(handle Handle, isWow64 *bool) (err error) = IsWow64Process +//sys CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) [failretval==InvalidHandle] = CreateFileW //sys ReadFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) //sys WriteFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) //sys GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wait bool) (err error) @@ -146,6 +152,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys findNextFile1(handle Handle, data *win32finddata1) (err error) = FindNextFileW //sys FindClose(handle Handle) (err error) //sys GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (err error) +//sys GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, outBufferLen uint32) (err error) //sys GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) = GetCurrentDirectoryW //sys SetCurrentDirectory(path *uint16) (err error) = SetCurrentDirectoryW //sys CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) = CreateDirectoryW @@ -165,11 +172,14 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CancelIo(s Handle) (err error) //sys CancelIoEx(s Handle, o *Overlapped) (err error) //sys CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) = CreateProcessW -//sys OpenProcess(da uint32, inheritHandle bool, pid uint32) (handle Handle, err error) +//sys OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (handle Handle, err error) +//sys ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) = shell32.ShellExecuteW +//sys shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) = shell32.SHGetKnownFolderPath //sys TerminateProcess(handle Handle, exitcode uint32) (err error) //sys GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) //sys GetStartupInfo(startupInfo *StartupInfo) (err error) = GetStartupInfoW //sys GetCurrentProcess() (pseudoHandle Handle, err error) +//sys GetCurrentThread() (pseudoHandle Handle, err error) //sys GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) //sys DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error) //sys WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) [failretval==0xffffffff] @@ -184,6 +194,9 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys FreeEnvironmentStrings(envs *uint16) (err error) = kernel32.FreeEnvironmentStringsW //sys GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32, err error) = kernel32.GetEnvironmentVariableW //sys SetEnvironmentVariable(name *uint16, value *uint16) (err error) = kernel32.SetEnvironmentVariableW +//sys CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (err error) = userenv.CreateEnvironmentBlock +//sys DestroyEnvironmentBlock(block *uint16) (err error) = userenv.DestroyEnvironmentBlock +//sys getTickCount64() (ms uint64) = kernel32.GetTickCount64 //sys SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) //sys GetFileAttributes(name *uint16) (attrs uint32, err error) [failretval==INVALID_FILE_ATTRIBUTES] = kernel32.GetFileAttributesW //sys SetFileAttributes(name *uint16, attrs uint32) (err error) = kernel32.SetFileAttributesW @@ -222,7 +235,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint32, subkeysLen *uint32, maxSubkeyLen *uint32, maxClassLen *uint32, valuesLen *uint32, maxValueNameLen *uint32, maxValueLen *uint32, saLen *uint32, lastWriteTime *Filetime) (regerrno error) = advapi32.RegQueryInfoKeyW //sys RegEnumKeyEx(key Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, class *uint16, classLen *uint32, lastWriteTime *Filetime) (regerrno error) = advapi32.RegEnumKeyExW //sys RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) = advapi32.RegQueryValueExW -//sys getCurrentProcessId() (pid uint32) = kernel32.GetCurrentProcessId +//sys GetCurrentProcessId() (pid uint32) = kernel32.GetCurrentProcessId //sys GetConsoleMode(console Handle, mode *uint32) (err error) = kernel32.GetConsoleMode //sys SetConsoleMode(console Handle, mode uint32) (err error) = kernel32.SetConsoleMode //sys GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) = kernel32.GetConsoleScreenBufferInfo @@ -231,6 +244,8 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) [failretval==InvalidHandle] = kernel32.CreateToolhelp32Snapshot //sys Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) = kernel32.Process32FirstW //sys Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) = kernel32.Process32NextW +//sys Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error) +//sys Thread32Next(snapshot Handle, threadEntry *ThreadEntry32) (err error) //sys DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBufferSize uint32, outBuffer *byte, outBufferSize uint32, bytesReturned *uint32, overlapped *Overlapped) (err error) // This function returns 1 byte BOOLEAN rather than the 4 byte BOOL. //sys CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) [failretval&0xff==0] = CreateSymbolicLinkW @@ -242,6 +257,18 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetEvent(event Handle) (err error) = kernel32.SetEvent //sys ResetEvent(event Handle) (err error) = kernel32.ResetEvent //sys PulseEvent(event Handle) (err error) = kernel32.PulseEvent +//sys SleepEx(milliseconds uint32, alertable bool) (ret uint32) = kernel32.SleepEx +//sys CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle, err error) = kernel32.CreateJobObjectW +//sys AssignProcessToJobObject(job Handle, process Handle) (err error) = kernel32.AssignProcessToJobObject +//sys TerminateJobObject(job Handle, exitCode uint32) (err error) = kernel32.TerminateJobObject +//sys SetErrorMode(mode uint32) (ret uint32) = kernel32.SetErrorMode +//sys ResumeThread(thread Handle) (ret uint32, err error) [failretval==0xffffffff] = kernel32.ResumeThread +//sys SetPriorityClass(process Handle, priorityClass uint32) (err error) = kernel32.SetPriorityClass +//sys GetPriorityClass(process Handle) (ret uint32, err error) = kernel32.GetPriorityClass +//sys SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobObjectInformation uintptr, JobObjectInformationLength uint32) (ret int, err error) +//sys GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err error) +//sys GetProcessId(process Handle) (id uint32, err error) +//sys OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (handle Handle, err error) // Volume Management Functions //sys DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) = DefineDosDeviceW @@ -263,6 +290,11 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) [failretval==0] = QueryDosDeviceW //sys SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) = SetVolumeLabelW //sys SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err error) = SetVolumeMountPointW +//sys MessageBox(hwnd Handle, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) [failretval==0] = user32.MessageBoxW +//sys clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) = ole32.CLSIDFromString +//sys stringFromGUID2(rguid *GUID, lpsz *uint16, cchMax int32) (chars int32) = ole32.StringFromGUID2 +//sys coCreateGuid(pguid *GUID) (ret error) = ole32.CoCreateGuid +//sys coTaskMemFree(address unsafe.Pointer) = ole32.CoTaskMemFree // syscall interface implementation for other packages @@ -477,6 +509,10 @@ func ComputerName() (name string, err error) { return string(utf16.Decode(b[0:n])), nil } +func DurationSinceBoot() time.Duration { + return time.Duration(getTickCount64()) * time.Millisecond +} + func Ftruncate(fd Handle, length int64) (err error) { curoffset, e := Seek(fd, 0, 1) if e != nil { @@ -560,9 +596,6 @@ func Fsync(fd Handle) (err error) { } func Chmod(path string, mode uint32) (err error) { - if mode == 0 { - return syscall.EINVAL - } p, e := UTF16PtrFromString(path) if e != nil { return e @@ -1089,7 +1122,7 @@ func SetsockoptIPv6Mreq(fd Handle, level, opt int, mreq *IPv6Mreq) (err error) { return syscall.EWINDOWS } -func Getpid() (pid int) { return int(getCurrentProcessId()) } +func Getpid() (pid int) { return int(GetCurrentProcessId()) } func FindFirstFile(name *uint16, data *Win32finddata) (handle Handle, err error) { // NOTE(rsc): The Win32finddata struct is wrong for the system call: @@ -1217,3 +1250,57 @@ func Readlink(path string, buf []byte) (n int, err error) { return n, nil } + +// GUIDFromString parses a string in the form of +// "{XXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX}" into a GUID. +func GUIDFromString(str string) (GUID, error) { + guid := GUID{} + str16, err := syscall.UTF16PtrFromString(str) + if err != nil { + return guid, err + } + err = clsidFromString(str16, &guid) + if err != nil { + return guid, err + } + return guid, nil +} + +// GenerateGUID creates a new random GUID. +func GenerateGUID() (GUID, error) { + guid := GUID{} + err := coCreateGuid(&guid) + if err != nil { + return guid, err + } + return guid, nil +} + +// String returns the canonical string form of the GUID, +// in the form of "{XXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX}". +func (guid GUID) String() string { + var str [100]uint16 + chars := stringFromGUID2(&guid, &str[0], int32(len(str))) + if chars <= 1 { + return "" + } + return string(utf16.Decode(str[:chars-1])) +} + +// KnownFolderPath returns a well-known folder path for the current user, specified by one of +// the FOLDERID_ constants, and chosen and optionally created based on a KF_ flag. +func KnownFolderPath(folderID *KNOWNFOLDERID, flags uint32) (string, error) { + return Token(0).KnownFolderPath(folderID, flags) +} + +// KnownFolderPath returns a well-known folder path for the user token, specified by one of +// the FOLDERID_ constants, and chosen and optionally created based on a KF_ flag. +func (t Token) KnownFolderPath(folderID *KNOWNFOLDERID, flags uint32) (string, error) { + var p *uint16 + err := shGetKnownFolderPath(folderID, flags, t, &p) + if err != nil { + return "", err + } + defer coTaskMemFree(unsafe.Pointer(p)) + return UTF16ToString((*[(1 << 30) - 1]uint16)(unsafe.Pointer(p))[:]), nil +} diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index ee27936840103..1cba11ed59f90 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -4,7 +4,11 @@ package windows -import "syscall" +import ( + "net" + "syscall" + "unsafe" +) const ( // Invented values to support what package os expects. @@ -154,9 +158,39 @@ const ( WAIT_OBJECT_0 = 0x00000000 WAIT_FAILED = 0xFFFFFFFF - PROCESS_TERMINATE = 1 - PROCESS_QUERY_INFORMATION = 0x00000400 - SYNCHRONIZE = 0x00100000 + // Standard access rights. + DELETE = 0x00010000 + READ_CONTROL = 0x00020000 + SYNCHRONIZE = 0x00100000 + WRITE_DAC = 0x00040000 + WRITE_OWNER = 0x00080000 + + // Access rights for process. + PROCESS_CREATE_PROCESS = 0x0080 + PROCESS_CREATE_THREAD = 0x0002 + PROCESS_DUP_HANDLE = 0x0040 + PROCESS_QUERY_INFORMATION = 0x0400 + PROCESS_QUERY_LIMITED_INFORMATION = 0x1000 + PROCESS_SET_INFORMATION = 0x0200 + PROCESS_SET_QUOTA = 0x0100 + PROCESS_SUSPEND_RESUME = 0x0800 + PROCESS_TERMINATE = 0x0001 + PROCESS_VM_OPERATION = 0x0008 + PROCESS_VM_READ = 0x0010 + PROCESS_VM_WRITE = 0x0020 + + // Access rights for thread. + THREAD_DIRECT_IMPERSONATION = 0x0200 + THREAD_GET_CONTEXT = 0x0008 + THREAD_IMPERSONATE = 0x0100 + THREAD_QUERY_INFORMATION = 0x0040 + THREAD_QUERY_LIMITED_INFORMATION = 0x0800 + THREAD_SET_CONTEXT = 0x0010 + THREAD_SET_INFORMATION = 0x0020 + THREAD_SET_LIMITED_INFORMATION = 0x0400 + THREAD_SET_THREAD_TOKEN = 0x0080 + THREAD_SUSPEND_RESUME = 0x0002 + THREAD_TERMINATE = 0x0001 FILE_MAP_COPY = 0x01 FILE_MAP_WRITE = 0x02 @@ -396,6 +430,26 @@ const ( SECURITY_FLAG_IGNORE_CERT_DATE_INVALID = 0x00002000 ) +const ( + // flags for SetErrorMode + SEM_FAILCRITICALERRORS = 0x0001 + SEM_NOALIGNMENTFAULTEXCEPT = 0x0004 + SEM_NOGPFAULTERRORBOX = 0x0002 + SEM_NOOPENFILEERRORBOX = 0x8000 +) + +const ( + // Priority class. + ABOVE_NORMAL_PRIORITY_CLASS = 0x00008000 + BELOW_NORMAL_PRIORITY_CLASS = 0x00004000 + HIGH_PRIORITY_CLASS = 0x00000080 + IDLE_PRIORITY_CLASS = 0x00000040 + NORMAL_PRIORITY_CLASS = 0x00000020 + PROCESS_MODE_BACKGROUND_BEGIN = 0x00100000 + PROCESS_MODE_BACKGROUND_END = 0x00200000 + REALTIME_PRIORITY_CLASS = 0x00000100 +) + var ( OID_PKIX_KP_SERVER_AUTH = []byte("1.3.6.1.5.5.7.3.1\x00") OID_SERVER_GATED_CRYPTO = []byte("1.3.6.1.4.1.311.10.3.3\x00") @@ -605,6 +659,16 @@ type ProcessEntry32 struct { ExeFile [MAX_PATH]uint16 } +type ThreadEntry32 struct { + Size uint32 + Usage uint32 + ThreadID uint32 + OwnerProcessID uint32 + BasePri int32 + DeltaPri int32 + Flags uint32 +} + type Systemtime struct { Year uint16 Month uint16 @@ -1286,6 +1350,41 @@ const ( ComputerNameMax = 8 ) +// For MessageBox() +const ( + MB_OK = 0x00000000 + MB_OKCANCEL = 0x00000001 + MB_ABORTRETRYIGNORE = 0x00000002 + MB_YESNOCANCEL = 0x00000003 + MB_YESNO = 0x00000004 + MB_RETRYCANCEL = 0x00000005 + MB_CANCELTRYCONTINUE = 0x00000006 + MB_ICONHAND = 0x00000010 + MB_ICONQUESTION = 0x00000020 + MB_ICONEXCLAMATION = 0x00000030 + MB_ICONASTERISK = 0x00000040 + MB_USERICON = 0x00000080 + MB_ICONWARNING = MB_ICONEXCLAMATION + MB_ICONERROR = MB_ICONHAND + MB_ICONINFORMATION = MB_ICONASTERISK + MB_ICONSTOP = MB_ICONHAND + MB_DEFBUTTON1 = 0x00000000 + MB_DEFBUTTON2 = 0x00000100 + MB_DEFBUTTON3 = 0x00000200 + MB_DEFBUTTON4 = 0x00000300 + MB_APPLMODAL = 0x00000000 + MB_SYSTEMMODAL = 0x00001000 + MB_TASKMODAL = 0x00002000 + MB_HELP = 0x00004000 + MB_NOFOCUS = 0x00008000 + MB_SETFOREGROUND = 0x00010000 + MB_DEFAULT_DESKTOP_ONLY = 0x00020000 + MB_TOPMOST = 0x00040000 + MB_RIGHT = 0x00080000 + MB_RTLREADING = 0x00100000 + MB_SERVICE_NOTIFICATION = 0x00200000 +) + const ( MOVEFILE_REPLACE_EXISTING = 0x1 MOVEFILE_COPY_ALLOWED = 0x2 @@ -1314,6 +1413,16 @@ type SocketAddress struct { SockaddrLength int32 } +// IP returns an IPv4 or IPv6 address, or nil if the underlying SocketAddress is neither. +func (addr *SocketAddress) IP() net.IP { + if uintptr(addr.SockaddrLength) >= unsafe.Sizeof(RawSockaddrInet4{}) && addr.Sockaddr.Addr.Family == AF_INET { + return (*RawSockaddrInet4)(unsafe.Pointer(addr.Sockaddr)).Addr[:] + } else if uintptr(addr.SockaddrLength) >= unsafe.Sizeof(RawSockaddrInet6{}) && addr.Sockaddr.Addr.Family == AF_INET6 { + return (*RawSockaddrInet6)(unsafe.Pointer(addr.Sockaddr)).Addr[:] + } + return nil +} + type IpAdapterUnicastAddress struct { Length uint32 Flags uint32 @@ -1439,3 +1548,104 @@ type ConsoleScreenBufferInfo struct { } const UNIX_PATH_MAX = 108 // defined in afunix.h + +const ( + // flags for JOBOBJECT_BASIC_LIMIT_INFORMATION.LimitFlags + JOB_OBJECT_LIMIT_ACTIVE_PROCESS = 0x00000008 + JOB_OBJECT_LIMIT_AFFINITY = 0x00000010 + JOB_OBJECT_LIMIT_BREAKAWAY_OK = 0x00000800 + JOB_OBJECT_LIMIT_DIE_ON_UNHANDLED_EXCEPTION = 0x00000400 + JOB_OBJECT_LIMIT_JOB_MEMORY = 0x00000200 + JOB_OBJECT_LIMIT_JOB_TIME = 0x00000004 + JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE = 0x00002000 + JOB_OBJECT_LIMIT_PRESERVE_JOB_TIME = 0x00000040 + JOB_OBJECT_LIMIT_PRIORITY_CLASS = 0x00000020 + JOB_OBJECT_LIMIT_PROCESS_MEMORY = 0x00000100 + JOB_OBJECT_LIMIT_PROCESS_TIME = 0x00000002 + JOB_OBJECT_LIMIT_SCHEDULING_CLASS = 0x00000080 + JOB_OBJECT_LIMIT_SILENT_BREAKAWAY_OK = 0x00001000 + JOB_OBJECT_LIMIT_SUBSET_AFFINITY = 0x00004000 + JOB_OBJECT_LIMIT_WORKINGSET = 0x00000001 +) + +type JOBOBJECT_BASIC_LIMIT_INFORMATION struct { + PerProcessUserTimeLimit int64 + PerJobUserTimeLimit int64 + LimitFlags uint32 + MinimumWorkingSetSize uintptr + MaximumWorkingSetSize uintptr + ActiveProcessLimit uint32 + Affinity uintptr + PriorityClass uint32 + SchedulingClass uint32 +} + +type IO_COUNTERS struct { + ReadOperationCount uint64 + WriteOperationCount uint64 + OtherOperationCount uint64 + ReadTransferCount uint64 + WriteTransferCount uint64 + OtherTransferCount uint64 +} + +type JOBOBJECT_EXTENDED_LIMIT_INFORMATION struct { + BasicLimitInformation JOBOBJECT_BASIC_LIMIT_INFORMATION + IoInfo IO_COUNTERS + ProcessMemoryLimit uintptr + JobMemoryLimit uintptr + PeakProcessMemoryUsed uintptr + PeakJobMemoryUsed uintptr +} + +const ( + // UIRestrictionsClass + JOB_OBJECT_UILIMIT_DESKTOP = 0x00000040 + JOB_OBJECT_UILIMIT_DISPLAYSETTINGS = 0x00000010 + JOB_OBJECT_UILIMIT_EXITWINDOWS = 0x00000080 + JOB_OBJECT_UILIMIT_GLOBALATOMS = 0x00000020 + JOB_OBJECT_UILIMIT_HANDLES = 0x00000001 + JOB_OBJECT_UILIMIT_READCLIPBOARD = 0x00000002 + JOB_OBJECT_UILIMIT_SYSTEMPARAMETERS = 0x00000008 + JOB_OBJECT_UILIMIT_WRITECLIPBOARD = 0x00000004 +) + +type JOBOBJECT_BASIC_UI_RESTRICTIONS struct { + UIRestrictionsClass uint32 +} + +const ( + // JobObjectInformationClass + JobObjectAssociateCompletionPortInformation = 7 + JobObjectBasicLimitInformation = 2 + JobObjectBasicUIRestrictions = 4 + JobObjectCpuRateControlInformation = 15 + JobObjectEndOfJobTimeInformation = 6 + JobObjectExtendedLimitInformation = 9 + JobObjectGroupInformation = 11 + JobObjectGroupInformationEx = 14 + JobObjectLimitViolationInformation2 = 35 + JobObjectNetRateControlInformation = 32 + JobObjectNotificationLimitInformation = 12 + JobObjectNotificationLimitInformation2 = 34 + JobObjectSecurityLimitInformation = 5 +) + +const ( + KF_FLAG_DEFAULT = 0x00000000 + KF_FLAG_FORCE_APP_DATA_REDIRECTION = 0x00080000 + KF_FLAG_RETURN_FILTER_REDIRECTION_TARGET = 0x00040000 + KF_FLAG_FORCE_PACKAGE_REDIRECTION = 0x00020000 + KF_FLAG_NO_PACKAGE_REDIRECTION = 0x00010000 + KF_FLAG_FORCE_APPCONTAINER_REDIRECTION = 0x00020000 + KF_FLAG_NO_APPCONTAINER_REDIRECTION = 0x00010000 + KF_FLAG_CREATE = 0x00008000 + KF_FLAG_DONT_VERIFY = 0x00004000 + KF_FLAG_DONT_UNEXPAND = 0x00002000 + KF_FLAG_NO_ALIAS = 0x00001000 + KF_FLAG_INIT = 0x00000800 + KF_FLAG_DEFAULT_PATH = 0x00000400 + KF_FLAG_NOT_PARENT_RELATIVE = 0x00000200 + KF_FLAG_SIMPLE_IDLIST = 0x00000100 + KF_FLAG_ALIAS_ONLY = 0x80000000 +) diff --git a/vendor/golang.org/x/sys/windows/zerrors_windows.go b/vendor/golang.org/x/sys/windows/zerrors_windows.go index 2b4cea5b94a1c..f0212003520d0 100644 --- a/vendor/golang.org/x/sys/windows/zerrors_windows.go +++ b/vendor/golang.org/x/sys/windows/zerrors_windows.go @@ -1,4 +1,4 @@ -// Code generated by 'go generate'; DO NOT EDIT. +// Code generated by 'mkerrors.bash'; DO NOT EDIT. package windows diff --git a/vendor/golang.org/x/sys/windows/zknownfolderids_windows.go b/vendor/golang.org/x/sys/windows/zknownfolderids_windows.go new file mode 100644 index 0000000000000..6048ac679fa53 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/zknownfolderids_windows.go @@ -0,0 +1,149 @@ +// Code generated by 'mkknownfolderids.bash'; DO NOT EDIT. + +package windows + +type KNOWNFOLDERID GUID + +var ( + FOLDERID_NetworkFolder = &KNOWNFOLDERID{0xd20beec4, 0x5ca8, 0x4905, [8]byte{0xae, 0x3b, 0xbf, 0x25, 0x1e, 0xa0, 0x9b, 0x53}} + FOLDERID_ComputerFolder = &KNOWNFOLDERID{0x0ac0837c, 0xbbf8, 0x452a, [8]byte{0x85, 0x0d, 0x79, 0xd0, 0x8e, 0x66, 0x7c, 0xa7}} + FOLDERID_InternetFolder = &KNOWNFOLDERID{0x4d9f7874, 0x4e0c, 0x4904, [8]byte{0x96, 0x7b, 0x40, 0xb0, 0xd2, 0x0c, 0x3e, 0x4b}} + FOLDERID_ControlPanelFolder = &KNOWNFOLDERID{0x82a74aeb, 0xaeb4, 0x465c, [8]byte{0xa0, 0x14, 0xd0, 0x97, 0xee, 0x34, 0x6d, 0x63}} + FOLDERID_PrintersFolder = &KNOWNFOLDERID{0x76fc4e2d, 0xd6ad, 0x4519, [8]byte{0xa6, 0x63, 0x37, 0xbd, 0x56, 0x06, 0x81, 0x85}} + FOLDERID_SyncManagerFolder = &KNOWNFOLDERID{0x43668bf8, 0xc14e, 0x49b2, [8]byte{0x97, 0xc9, 0x74, 0x77, 0x84, 0xd7, 0x84, 0xb7}} + FOLDERID_SyncSetupFolder = &KNOWNFOLDERID{0x0f214138, 0xb1d3, 0x4a90, [8]byte{0xbb, 0xa9, 0x27, 0xcb, 0xc0, 0xc5, 0x38, 0x9a}} + FOLDERID_ConflictFolder = &KNOWNFOLDERID{0x4bfefb45, 0x347d, 0x4006, [8]byte{0xa5, 0xbe, 0xac, 0x0c, 0xb0, 0x56, 0x71, 0x92}} + FOLDERID_SyncResultsFolder = &KNOWNFOLDERID{0x289a9a43, 0xbe44, 0x4057, [8]byte{0xa4, 0x1b, 0x58, 0x7a, 0x76, 0xd7, 0xe7, 0xf9}} + FOLDERID_RecycleBinFolder = &KNOWNFOLDERID{0xb7534046, 0x3ecb, 0x4c18, [8]byte{0xbe, 0x4e, 0x64, 0xcd, 0x4c, 0xb7, 0xd6, 0xac}} + FOLDERID_ConnectionsFolder = &KNOWNFOLDERID{0x6f0cd92b, 0x2e97, 0x45d1, [8]byte{0x88, 0xff, 0xb0, 0xd1, 0x86, 0xb8, 0xde, 0xdd}} + FOLDERID_Fonts = &KNOWNFOLDERID{0xfd228cb7, 0xae11, 0x4ae3, [8]byte{0x86, 0x4c, 0x16, 0xf3, 0x91, 0x0a, 0xb8, 0xfe}} + FOLDERID_Desktop = &KNOWNFOLDERID{0xb4bfcc3a, 0xdb2c, 0x424c, [8]byte{0xb0, 0x29, 0x7f, 0xe9, 0x9a, 0x87, 0xc6, 0x41}} + FOLDERID_Startup = &KNOWNFOLDERID{0xb97d20bb, 0xf46a, 0x4c97, [8]byte{0xba, 0x10, 0x5e, 0x36, 0x08, 0x43, 0x08, 0x54}} + FOLDERID_Programs = &KNOWNFOLDERID{0xa77f5d77, 0x2e2b, 0x44c3, [8]byte{0xa6, 0xa2, 0xab, 0xa6, 0x01, 0x05, 0x4a, 0x51}} + FOLDERID_StartMenu = &KNOWNFOLDERID{0x625b53c3, 0xab48, 0x4ec1, [8]byte{0xba, 0x1f, 0xa1, 0xef, 0x41, 0x46, 0xfc, 0x19}} + FOLDERID_Recent = &KNOWNFOLDERID{0xae50c081, 0xebd2, 0x438a, [8]byte{0x86, 0x55, 0x8a, 0x09, 0x2e, 0x34, 0x98, 0x7a}} + FOLDERID_SendTo = &KNOWNFOLDERID{0x8983036c, 0x27c0, 0x404b, [8]byte{0x8f, 0x08, 0x10, 0x2d, 0x10, 0xdc, 0xfd, 0x74}} + FOLDERID_Documents = &KNOWNFOLDERID{0xfdd39ad0, 0x238f, 0x46af, [8]byte{0xad, 0xb4, 0x6c, 0x85, 0x48, 0x03, 0x69, 0xc7}} + FOLDERID_Favorites = &KNOWNFOLDERID{0x1777f761, 0x68ad, 0x4d8a, [8]byte{0x87, 0xbd, 0x30, 0xb7, 0x59, 0xfa, 0x33, 0xdd}} + FOLDERID_NetHood = &KNOWNFOLDERID{0xc5abbf53, 0xe17f, 0x4121, [8]byte{0x89, 0x00, 0x86, 0x62, 0x6f, 0xc2, 0xc9, 0x73}} + FOLDERID_PrintHood = &KNOWNFOLDERID{0x9274bd8d, 0xcfd1, 0x41c3, [8]byte{0xb3, 0x5e, 0xb1, 0x3f, 0x55, 0xa7, 0x58, 0xf4}} + FOLDERID_Templates = &KNOWNFOLDERID{0xa63293e8, 0x664e, 0x48db, [8]byte{0xa0, 0x79, 0xdf, 0x75, 0x9e, 0x05, 0x09, 0xf7}} + FOLDERID_CommonStartup = &KNOWNFOLDERID{0x82a5ea35, 0xd9cd, 0x47c5, [8]byte{0x96, 0x29, 0xe1, 0x5d, 0x2f, 0x71, 0x4e, 0x6e}} + FOLDERID_CommonPrograms = &KNOWNFOLDERID{0x0139d44e, 0x6afe, 0x49f2, [8]byte{0x86, 0x90, 0x3d, 0xaf, 0xca, 0xe6, 0xff, 0xb8}} + FOLDERID_CommonStartMenu = &KNOWNFOLDERID{0xa4115719, 0xd62e, 0x491d, [8]byte{0xaa, 0x7c, 0xe7, 0x4b, 0x8b, 0xe3, 0xb0, 0x67}} + FOLDERID_PublicDesktop = &KNOWNFOLDERID{0xc4aa340d, 0xf20f, 0x4863, [8]byte{0xaf, 0xef, 0xf8, 0x7e, 0xf2, 0xe6, 0xba, 0x25}} + FOLDERID_ProgramData = &KNOWNFOLDERID{0x62ab5d82, 0xfdc1, 0x4dc3, [8]byte{0xa9, 0xdd, 0x07, 0x0d, 0x1d, 0x49, 0x5d, 0x97}} + FOLDERID_CommonTemplates = &KNOWNFOLDERID{0xb94237e7, 0x57ac, 0x4347, [8]byte{0x91, 0x51, 0xb0, 0x8c, 0x6c, 0x32, 0xd1, 0xf7}} + FOLDERID_PublicDocuments = &KNOWNFOLDERID{0xed4824af, 0xdce4, 0x45a8, [8]byte{0x81, 0xe2, 0xfc, 0x79, 0x65, 0x08, 0x36, 0x34}} + FOLDERID_RoamingAppData = &KNOWNFOLDERID{0x3eb685db, 0x65f9, 0x4cf6, [8]byte{0xa0, 0x3a, 0xe3, 0xef, 0x65, 0x72, 0x9f, 0x3d}} + FOLDERID_LocalAppData = &KNOWNFOLDERID{0xf1b32785, 0x6fba, 0x4fcf, [8]byte{0x9d, 0x55, 0x7b, 0x8e, 0x7f, 0x15, 0x70, 0x91}} + FOLDERID_LocalAppDataLow = &KNOWNFOLDERID{0xa520a1a4, 0x1780, 0x4ff6, [8]byte{0xbd, 0x18, 0x16, 0x73, 0x43, 0xc5, 0xaf, 0x16}} + FOLDERID_InternetCache = &KNOWNFOLDERID{0x352481e8, 0x33be, 0x4251, [8]byte{0xba, 0x85, 0x60, 0x07, 0xca, 0xed, 0xcf, 0x9d}} + FOLDERID_Cookies = &KNOWNFOLDERID{0x2b0f765d, 0xc0e9, 0x4171, [8]byte{0x90, 0x8e, 0x08, 0xa6, 0x11, 0xb8, 0x4f, 0xf6}} + FOLDERID_History = &KNOWNFOLDERID{0xd9dc8a3b, 0xb784, 0x432e, [8]byte{0xa7, 0x81, 0x5a, 0x11, 0x30, 0xa7, 0x59, 0x63}} + FOLDERID_System = &KNOWNFOLDERID{0x1ac14e77, 0x02e7, 0x4e5d, [8]byte{0xb7, 0x44, 0x2e, 0xb1, 0xae, 0x51, 0x98, 0xb7}} + FOLDERID_SystemX86 = &KNOWNFOLDERID{0xd65231b0, 0xb2f1, 0x4857, [8]byte{0xa4, 0xce, 0xa8, 0xe7, 0xc6, 0xea, 0x7d, 0x27}} + FOLDERID_Windows = &KNOWNFOLDERID{0xf38bf404, 0x1d43, 0x42f2, [8]byte{0x93, 0x05, 0x67, 0xde, 0x0b, 0x28, 0xfc, 0x23}} + FOLDERID_Profile = &KNOWNFOLDERID{0x5e6c858f, 0x0e22, 0x4760, [8]byte{0x9a, 0xfe, 0xea, 0x33, 0x17, 0xb6, 0x71, 0x73}} + FOLDERID_Pictures = &KNOWNFOLDERID{0x33e28130, 0x4e1e, 0x4676, [8]byte{0x83, 0x5a, 0x98, 0x39, 0x5c, 0x3b, 0xc3, 0xbb}} + FOLDERID_ProgramFilesX86 = &KNOWNFOLDERID{0x7c5a40ef, 0xa0fb, 0x4bfc, [8]byte{0x87, 0x4a, 0xc0, 0xf2, 0xe0, 0xb9, 0xfa, 0x8e}} + FOLDERID_ProgramFilesCommonX86 = &KNOWNFOLDERID{0xde974d24, 0xd9c6, 0x4d3e, [8]byte{0xbf, 0x91, 0xf4, 0x45, 0x51, 0x20, 0xb9, 0x17}} + FOLDERID_ProgramFilesX64 = &KNOWNFOLDERID{0x6d809377, 0x6af0, 0x444b, [8]byte{0x89, 0x57, 0xa3, 0x77, 0x3f, 0x02, 0x20, 0x0e}} + FOLDERID_ProgramFilesCommonX64 = &KNOWNFOLDERID{0x6365d5a7, 0x0f0d, 0x45e5, [8]byte{0x87, 0xf6, 0x0d, 0xa5, 0x6b, 0x6a, 0x4f, 0x7d}} + FOLDERID_ProgramFiles = &KNOWNFOLDERID{0x905e63b6, 0xc1bf, 0x494e, [8]byte{0xb2, 0x9c, 0x65, 0xb7, 0x32, 0xd3, 0xd2, 0x1a}} + FOLDERID_ProgramFilesCommon = &KNOWNFOLDERID{0xf7f1ed05, 0x9f6d, 0x47a2, [8]byte{0xaa, 0xae, 0x29, 0xd3, 0x17, 0xc6, 0xf0, 0x66}} + FOLDERID_UserProgramFiles = &KNOWNFOLDERID{0x5cd7aee2, 0x2219, 0x4a67, [8]byte{0xb8, 0x5d, 0x6c, 0x9c, 0xe1, 0x56, 0x60, 0xcb}} + FOLDERID_UserProgramFilesCommon = &KNOWNFOLDERID{0xbcbd3057, 0xca5c, 0x4622, [8]byte{0xb4, 0x2d, 0xbc, 0x56, 0xdb, 0x0a, 0xe5, 0x16}} + FOLDERID_AdminTools = &KNOWNFOLDERID{0x724ef170, 0xa42d, 0x4fef, [8]byte{0x9f, 0x26, 0xb6, 0x0e, 0x84, 0x6f, 0xba, 0x4f}} + FOLDERID_CommonAdminTools = &KNOWNFOLDERID{0xd0384e7d, 0xbac3, 0x4797, [8]byte{0x8f, 0x14, 0xcb, 0xa2, 0x29, 0xb3, 0x92, 0xb5}} + FOLDERID_Music = &KNOWNFOLDERID{0x4bd8d571, 0x6d19, 0x48d3, [8]byte{0xbe, 0x97, 0x42, 0x22, 0x20, 0x08, 0x0e, 0x43}} + FOLDERID_Videos = &KNOWNFOLDERID{0x18989b1d, 0x99b5, 0x455b, [8]byte{0x84, 0x1c, 0xab, 0x7c, 0x74, 0xe4, 0xdd, 0xfc}} + FOLDERID_Ringtones = &KNOWNFOLDERID{0xc870044b, 0xf49e, 0x4126, [8]byte{0xa9, 0xc3, 0xb5, 0x2a, 0x1f, 0xf4, 0x11, 0xe8}} + FOLDERID_PublicPictures = &KNOWNFOLDERID{0xb6ebfb86, 0x6907, 0x413c, [8]byte{0x9a, 0xf7, 0x4f, 0xc2, 0xab, 0xf0, 0x7c, 0xc5}} + FOLDERID_PublicMusic = &KNOWNFOLDERID{0x3214fab5, 0x9757, 0x4298, [8]byte{0xbb, 0x61, 0x92, 0xa9, 0xde, 0xaa, 0x44, 0xff}} + FOLDERID_PublicVideos = &KNOWNFOLDERID{0x2400183a, 0x6185, 0x49fb, [8]byte{0xa2, 0xd8, 0x4a, 0x39, 0x2a, 0x60, 0x2b, 0xa3}} + FOLDERID_PublicRingtones = &KNOWNFOLDERID{0xe555ab60, 0x153b, 0x4d17, [8]byte{0x9f, 0x04, 0xa5, 0xfe, 0x99, 0xfc, 0x15, 0xec}} + FOLDERID_ResourceDir = &KNOWNFOLDERID{0x8ad10c31, 0x2adb, 0x4296, [8]byte{0xa8, 0xf7, 0xe4, 0x70, 0x12, 0x32, 0xc9, 0x72}} + FOLDERID_LocalizedResourcesDir = &KNOWNFOLDERID{0x2a00375e, 0x224c, 0x49de, [8]byte{0xb8, 0xd1, 0x44, 0x0d, 0xf7, 0xef, 0x3d, 0xdc}} + FOLDERID_CommonOEMLinks = &KNOWNFOLDERID{0xc1bae2d0, 0x10df, 0x4334, [8]byte{0xbe, 0xdd, 0x7a, 0xa2, 0x0b, 0x22, 0x7a, 0x9d}} + FOLDERID_CDBurning = &KNOWNFOLDERID{0x9e52ab10, 0xf80d, 0x49df, [8]byte{0xac, 0xb8, 0x43, 0x30, 0xf5, 0x68, 0x78, 0x55}} + FOLDERID_UserProfiles = &KNOWNFOLDERID{0x0762d272, 0xc50a, 0x4bb0, [8]byte{0xa3, 0x82, 0x69, 0x7d, 0xcd, 0x72, 0x9b, 0x80}} + FOLDERID_Playlists = &KNOWNFOLDERID{0xde92c1c7, 0x837f, 0x4f69, [8]byte{0xa3, 0xbb, 0x86, 0xe6, 0x31, 0x20, 0x4a, 0x23}} + FOLDERID_SamplePlaylists = &KNOWNFOLDERID{0x15ca69b3, 0x30ee, 0x49c1, [8]byte{0xac, 0xe1, 0x6b, 0x5e, 0xc3, 0x72, 0xaf, 0xb5}} + FOLDERID_SampleMusic = &KNOWNFOLDERID{0xb250c668, 0xf57d, 0x4ee1, [8]byte{0xa6, 0x3c, 0x29, 0x0e, 0xe7, 0xd1, 0xaa, 0x1f}} + FOLDERID_SamplePictures = &KNOWNFOLDERID{0xc4900540, 0x2379, 0x4c75, [8]byte{0x84, 0x4b, 0x64, 0xe6, 0xfa, 0xf8, 0x71, 0x6b}} + FOLDERID_SampleVideos = &KNOWNFOLDERID{0x859ead94, 0x2e85, 0x48ad, [8]byte{0xa7, 0x1a, 0x09, 0x69, 0xcb, 0x56, 0xa6, 0xcd}} + FOLDERID_PhotoAlbums = &KNOWNFOLDERID{0x69d2cf90, 0xfc33, 0x4fb7, [8]byte{0x9a, 0x0c, 0xeb, 0xb0, 0xf0, 0xfc, 0xb4, 0x3c}} + FOLDERID_Public = &KNOWNFOLDERID{0xdfdf76a2, 0xc82a, 0x4d63, [8]byte{0x90, 0x6a, 0x56, 0x44, 0xac, 0x45, 0x73, 0x85}} + FOLDERID_ChangeRemovePrograms = &KNOWNFOLDERID{0xdf7266ac, 0x9274, 0x4867, [8]byte{0x8d, 0x55, 0x3b, 0xd6, 0x61, 0xde, 0x87, 0x2d}} + FOLDERID_AppUpdates = &KNOWNFOLDERID{0xa305ce99, 0xf527, 0x492b, [8]byte{0x8b, 0x1a, 0x7e, 0x76, 0xfa, 0x98, 0xd6, 0xe4}} + FOLDERID_AddNewPrograms = &KNOWNFOLDERID{0xde61d971, 0x5ebc, 0x4f02, [8]byte{0xa3, 0xa9, 0x6c, 0x82, 0x89, 0x5e, 0x5c, 0x04}} + FOLDERID_Downloads = &KNOWNFOLDERID{0x374de290, 0x123f, 0x4565, [8]byte{0x91, 0x64, 0x39, 0xc4, 0x92, 0x5e, 0x46, 0x7b}} + FOLDERID_PublicDownloads = &KNOWNFOLDERID{0x3d644c9b, 0x1fb8, 0x4f30, [8]byte{0x9b, 0x45, 0xf6, 0x70, 0x23, 0x5f, 0x79, 0xc0}} + FOLDERID_SavedSearches = &KNOWNFOLDERID{0x7d1d3a04, 0xdebb, 0x4115, [8]byte{0x95, 0xcf, 0x2f, 0x29, 0xda, 0x29, 0x20, 0xda}} + FOLDERID_QuickLaunch = &KNOWNFOLDERID{0x52a4f021, 0x7b75, 0x48a9, [8]byte{0x9f, 0x6b, 0x4b, 0x87, 0xa2, 0x10, 0xbc, 0x8f}} + FOLDERID_Contacts = &KNOWNFOLDERID{0x56784854, 0xc6cb, 0x462b, [8]byte{0x81, 0x69, 0x88, 0xe3, 0x50, 0xac, 0xb8, 0x82}} + FOLDERID_SidebarParts = &KNOWNFOLDERID{0xa75d362e, 0x50fc, 0x4fb7, [8]byte{0xac, 0x2c, 0xa8, 0xbe, 0xaa, 0x31, 0x44, 0x93}} + FOLDERID_SidebarDefaultParts = &KNOWNFOLDERID{0x7b396e54, 0x9ec5, 0x4300, [8]byte{0xbe, 0x0a, 0x24, 0x82, 0xeb, 0xae, 0x1a, 0x26}} + FOLDERID_PublicGameTasks = &KNOWNFOLDERID{0xdebf2536, 0xe1a8, 0x4c59, [8]byte{0xb6, 0xa2, 0x41, 0x45, 0x86, 0x47, 0x6a, 0xea}} + FOLDERID_GameTasks = &KNOWNFOLDERID{0x054fae61, 0x4dd8, 0x4787, [8]byte{0x80, 0xb6, 0x09, 0x02, 0x20, 0xc4, 0xb7, 0x00}} + FOLDERID_SavedGames = &KNOWNFOLDERID{0x4c5c32ff, 0xbb9d, 0x43b0, [8]byte{0xb5, 0xb4, 0x2d, 0x72, 0xe5, 0x4e, 0xaa, 0xa4}} + FOLDERID_Games = &KNOWNFOLDERID{0xcac52c1a, 0xb53d, 0x4edc, [8]byte{0x92, 0xd7, 0x6b, 0x2e, 0x8a, 0xc1, 0x94, 0x34}} + FOLDERID_SEARCH_MAPI = &KNOWNFOLDERID{0x98ec0e18, 0x2098, 0x4d44, [8]byte{0x86, 0x44, 0x66, 0x97, 0x93, 0x15, 0xa2, 0x81}} + FOLDERID_SEARCH_CSC = &KNOWNFOLDERID{0xee32e446, 0x31ca, 0x4aba, [8]byte{0x81, 0x4f, 0xa5, 0xeb, 0xd2, 0xfd, 0x6d, 0x5e}} + FOLDERID_Links = &KNOWNFOLDERID{0xbfb9d5e0, 0xc6a9, 0x404c, [8]byte{0xb2, 0xb2, 0xae, 0x6d, 0xb6, 0xaf, 0x49, 0x68}} + FOLDERID_UsersFiles = &KNOWNFOLDERID{0xf3ce0f7c, 0x4901, 0x4acc, [8]byte{0x86, 0x48, 0xd5, 0xd4, 0x4b, 0x04, 0xef, 0x8f}} + FOLDERID_UsersLibraries = &KNOWNFOLDERID{0xa302545d, 0xdeff, 0x464b, [8]byte{0xab, 0xe8, 0x61, 0xc8, 0x64, 0x8d, 0x93, 0x9b}} + FOLDERID_SearchHome = &KNOWNFOLDERID{0x190337d1, 0xb8ca, 0x4121, [8]byte{0xa6, 0x39, 0x6d, 0x47, 0x2d, 0x16, 0x97, 0x2a}} + FOLDERID_OriginalImages = &KNOWNFOLDERID{0x2c36c0aa, 0x5812, 0x4b87, [8]byte{0xbf, 0xd0, 0x4c, 0xd0, 0xdf, 0xb1, 0x9b, 0x39}} + FOLDERID_DocumentsLibrary = &KNOWNFOLDERID{0x7b0db17d, 0x9cd2, 0x4a93, [8]byte{0x97, 0x33, 0x46, 0xcc, 0x89, 0x02, 0x2e, 0x7c}} + FOLDERID_MusicLibrary = &KNOWNFOLDERID{0x2112ab0a, 0xc86a, 0x4ffe, [8]byte{0xa3, 0x68, 0x0d, 0xe9, 0x6e, 0x47, 0x01, 0x2e}} + FOLDERID_PicturesLibrary = &KNOWNFOLDERID{0xa990ae9f, 0xa03b, 0x4e80, [8]byte{0x94, 0xbc, 0x99, 0x12, 0xd7, 0x50, 0x41, 0x04}} + FOLDERID_VideosLibrary = &KNOWNFOLDERID{0x491e922f, 0x5643, 0x4af4, [8]byte{0xa7, 0xeb, 0x4e, 0x7a, 0x13, 0x8d, 0x81, 0x74}} + FOLDERID_RecordedTVLibrary = &KNOWNFOLDERID{0x1a6fdba2, 0xf42d, 0x4358, [8]byte{0xa7, 0x98, 0xb7, 0x4d, 0x74, 0x59, 0x26, 0xc5}} + FOLDERID_HomeGroup = &KNOWNFOLDERID{0x52528a6b, 0xb9e3, 0x4add, [8]byte{0xb6, 0x0d, 0x58, 0x8c, 0x2d, 0xba, 0x84, 0x2d}} + FOLDERID_HomeGroupCurrentUser = &KNOWNFOLDERID{0x9b74b6a3, 0x0dfd, 0x4f11, [8]byte{0x9e, 0x78, 0x5f, 0x78, 0x00, 0xf2, 0xe7, 0x72}} + FOLDERID_DeviceMetadataStore = &KNOWNFOLDERID{0x5ce4a5e9, 0xe4eb, 0x479d, [8]byte{0xb8, 0x9f, 0x13, 0x0c, 0x02, 0x88, 0x61, 0x55}} + FOLDERID_Libraries = &KNOWNFOLDERID{0x1b3ea5dc, 0xb587, 0x4786, [8]byte{0xb4, 0xef, 0xbd, 0x1d, 0xc3, 0x32, 0xae, 0xae}} + FOLDERID_PublicLibraries = &KNOWNFOLDERID{0x48daf80b, 0xe6cf, 0x4f4e, [8]byte{0xb8, 0x00, 0x0e, 0x69, 0xd8, 0x4e, 0xe3, 0x84}} + FOLDERID_UserPinned = &KNOWNFOLDERID{0x9e3995ab, 0x1f9c, 0x4f13, [8]byte{0xb8, 0x27, 0x48, 0xb2, 0x4b, 0x6c, 0x71, 0x74}} + FOLDERID_ImplicitAppShortcuts = &KNOWNFOLDERID{0xbcb5256f, 0x79f6, 0x4cee, [8]byte{0xb7, 0x25, 0xdc, 0x34, 0xe4, 0x02, 0xfd, 0x46}} + FOLDERID_AccountPictures = &KNOWNFOLDERID{0x008ca0b1, 0x55b4, 0x4c56, [8]byte{0xb8, 0xa8, 0x4d, 0xe4, 0xb2, 0x99, 0xd3, 0xbe}} + FOLDERID_PublicUserTiles = &KNOWNFOLDERID{0x0482af6c, 0x08f1, 0x4c34, [8]byte{0x8c, 0x90, 0xe1, 0x7e, 0xc9, 0x8b, 0x1e, 0x17}} + FOLDERID_AppsFolder = &KNOWNFOLDERID{0x1e87508d, 0x89c2, 0x42f0, [8]byte{0x8a, 0x7e, 0x64, 0x5a, 0x0f, 0x50, 0xca, 0x58}} + FOLDERID_StartMenuAllPrograms = &KNOWNFOLDERID{0xf26305ef, 0x6948, 0x40b9, [8]byte{0xb2, 0x55, 0x81, 0x45, 0x3d, 0x09, 0xc7, 0x85}} + FOLDERID_CommonStartMenuPlaces = &KNOWNFOLDERID{0xa440879f, 0x87a0, 0x4f7d, [8]byte{0xb7, 0x00, 0x02, 0x07, 0xb9, 0x66, 0x19, 0x4a}} + FOLDERID_ApplicationShortcuts = &KNOWNFOLDERID{0xa3918781, 0xe5f2, 0x4890, [8]byte{0xb3, 0xd9, 0xa7, 0xe5, 0x43, 0x32, 0x32, 0x8c}} + FOLDERID_RoamingTiles = &KNOWNFOLDERID{0x00bcfc5a, 0xed94, 0x4e48, [8]byte{0x96, 0xa1, 0x3f, 0x62, 0x17, 0xf2, 0x19, 0x90}} + FOLDERID_RoamedTileImages = &KNOWNFOLDERID{0xaaa8d5a5, 0xf1d6, 0x4259, [8]byte{0xba, 0xa8, 0x78, 0xe7, 0xef, 0x60, 0x83, 0x5e}} + FOLDERID_Screenshots = &KNOWNFOLDERID{0xb7bede81, 0xdf94, 0x4682, [8]byte{0xa7, 0xd8, 0x57, 0xa5, 0x26, 0x20, 0xb8, 0x6f}} + FOLDERID_CameraRoll = &KNOWNFOLDERID{0xab5fb87b, 0x7ce2, 0x4f83, [8]byte{0x91, 0x5d, 0x55, 0x08, 0x46, 0xc9, 0x53, 0x7b}} + FOLDERID_SkyDrive = &KNOWNFOLDERID{0xa52bba46, 0xe9e1, 0x435f, [8]byte{0xb3, 0xd9, 0x28, 0xda, 0xa6, 0x48, 0xc0, 0xf6}} + FOLDERID_OneDrive = &KNOWNFOLDERID{0xa52bba46, 0xe9e1, 0x435f, [8]byte{0xb3, 0xd9, 0x28, 0xda, 0xa6, 0x48, 0xc0, 0xf6}} + FOLDERID_SkyDriveDocuments = &KNOWNFOLDERID{0x24d89e24, 0x2f19, 0x4534, [8]byte{0x9d, 0xde, 0x6a, 0x66, 0x71, 0xfb, 0xb8, 0xfe}} + FOLDERID_SkyDrivePictures = &KNOWNFOLDERID{0x339719b5, 0x8c47, 0x4894, [8]byte{0x94, 0xc2, 0xd8, 0xf7, 0x7a, 0xdd, 0x44, 0xa6}} + FOLDERID_SkyDriveMusic = &KNOWNFOLDERID{0xc3f2459e, 0x80d6, 0x45dc, [8]byte{0xbf, 0xef, 0x1f, 0x76, 0x9f, 0x2b, 0xe7, 0x30}} + FOLDERID_SkyDriveCameraRoll = &KNOWNFOLDERID{0x767e6811, 0x49cb, 0x4273, [8]byte{0x87, 0xc2, 0x20, 0xf3, 0x55, 0xe1, 0x08, 0x5b}} + FOLDERID_SearchHistory = &KNOWNFOLDERID{0x0d4c3db6, 0x03a3, 0x462f, [8]byte{0xa0, 0xe6, 0x08, 0x92, 0x4c, 0x41, 0xb5, 0xd4}} + FOLDERID_SearchTemplates = &KNOWNFOLDERID{0x7e636bfe, 0xdfa9, 0x4d5e, [8]byte{0xb4, 0x56, 0xd7, 0xb3, 0x98, 0x51, 0xd8, 0xa9}} + FOLDERID_CameraRollLibrary = &KNOWNFOLDERID{0x2b20df75, 0x1eda, 0x4039, [8]byte{0x80, 0x97, 0x38, 0x79, 0x82, 0x27, 0xd5, 0xb7}} + FOLDERID_SavedPictures = &KNOWNFOLDERID{0x3b193882, 0xd3ad, 0x4eab, [8]byte{0x96, 0x5a, 0x69, 0x82, 0x9d, 0x1f, 0xb5, 0x9f}} + FOLDERID_SavedPicturesLibrary = &KNOWNFOLDERID{0xe25b5812, 0xbe88, 0x4bd9, [8]byte{0x94, 0xb0, 0x29, 0x23, 0x34, 0x77, 0xb6, 0xc3}} + FOLDERID_RetailDemo = &KNOWNFOLDERID{0x12d4c69e, 0x24ad, 0x4923, [8]byte{0xbe, 0x19, 0x31, 0x32, 0x1c, 0x43, 0xa7, 0x67}} + FOLDERID_Device = &KNOWNFOLDERID{0x1c2ac1dc, 0x4358, 0x4b6c, [8]byte{0x97, 0x33, 0xaf, 0x21, 0x15, 0x65, 0x76, 0xf0}} + FOLDERID_DevelopmentFiles = &KNOWNFOLDERID{0xdbe8e08e, 0x3053, 0x4bbc, [8]byte{0xb1, 0x83, 0x2a, 0x7b, 0x2b, 0x19, 0x1e, 0x59}} + FOLDERID_Objects3D = &KNOWNFOLDERID{0x31c0dd25, 0x9439, 0x4f12, [8]byte{0xbf, 0x41, 0x7f, 0xf4, 0xed, 0xa3, 0x87, 0x22}} + FOLDERID_AppCaptures = &KNOWNFOLDERID{0xedc0fe71, 0x98d8, 0x4f4a, [8]byte{0xb9, 0x20, 0xc8, 0xdc, 0x13, 0x3c, 0xb1, 0x65}} + FOLDERID_LocalDocuments = &KNOWNFOLDERID{0xf42ee2d3, 0x909f, 0x4907, [8]byte{0x88, 0x71, 0x4c, 0x22, 0xfc, 0x0b, 0xf7, 0x56}} + FOLDERID_LocalPictures = &KNOWNFOLDERID{0x0ddd015d, 0xb06c, 0x45d5, [8]byte{0x8c, 0x4c, 0xf5, 0x97, 0x13, 0x85, 0x46, 0x39}} + FOLDERID_LocalVideos = &KNOWNFOLDERID{0x35286a68, 0x3c57, 0x41a1, [8]byte{0xbb, 0xb1, 0x0e, 0xae, 0x73, 0xd7, 0x6c, 0x95}} + FOLDERID_LocalMusic = &KNOWNFOLDERID{0xa0c69a99, 0x21c8, 0x4671, [8]byte{0x87, 0x03, 0x79, 0x34, 0x16, 0x2f, 0xcf, 0x1d}} + FOLDERID_LocalDownloads = &KNOWNFOLDERID{0x7d83ee9b, 0x2244, 0x4e70, [8]byte{0xb1, 0xf5, 0x53, 0x93, 0x04, 0x2a, 0xf1, 0xe4}} + FOLDERID_RecordedCalls = &KNOWNFOLDERID{0x2f8b40c2, 0x83ed, 0x48ee, [8]byte{0xb3, 0x83, 0xa1, 0xf1, 0x57, 0xec, 0x6f, 0x9a}} + FOLDERID_AllAppMods = &KNOWNFOLDERID{0x7ad67899, 0x66af, 0x43ba, [8]byte{0x91, 0x56, 0x6a, 0xad, 0x42, 0xe6, 0xc5, 0x96}} + FOLDERID_CurrentAppMods = &KNOWNFOLDERID{0x3db40b20, 0x2a30, 0x4dbe, [8]byte{0x91, 0x7e, 0x77, 0x1d, 0xd2, 0x1d, 0xd0, 0x99}} + FOLDERID_AppDataDesktop = &KNOWNFOLDERID{0xb2c5e279, 0x7add, 0x439f, [8]byte{0xb2, 0x8c, 0xc4, 0x1f, 0xe1, 0xbb, 0xf6, 0x72}} + FOLDERID_AppDataDocuments = &KNOWNFOLDERID{0x7be16610, 0x1f7f, 0x44ac, [8]byte{0xbf, 0xf0, 0x83, 0xe1, 0x5f, 0x2f, 0xfc, 0xa1}} + FOLDERID_AppDataFavorites = &KNOWNFOLDERID{0x7cfbefbc, 0xde1f, 0x45aa, [8]byte{0xb8, 0x43, 0xa5, 0x42, 0xac, 0x53, 0x6c, 0xc9}} + FOLDERID_AppDataProgramData = &KNOWNFOLDERID{0x559d40a3, 0xa036, 0x40fa, [8]byte{0xaf, 0x61, 0x84, 0xcb, 0x43, 0x0a, 0x4d, 0x34}} +) diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index eb9f06296e42e..e66ab049491ee 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -38,14 +38,17 @@ var ( modadvapi32 = NewLazySystemDLL("advapi32.dll") modkernel32 = NewLazySystemDLL("kernel32.dll") modshell32 = NewLazySystemDLL("shell32.dll") + moduserenv = NewLazySystemDLL("userenv.dll") modmswsock = NewLazySystemDLL("mswsock.dll") modcrypt32 = NewLazySystemDLL("crypt32.dll") + moduser32 = NewLazySystemDLL("user32.dll") + modole32 = NewLazySystemDLL("ole32.dll") modws2_32 = NewLazySystemDLL("ws2_32.dll") moddnsapi = NewLazySystemDLL("dnsapi.dll") modiphlpapi = NewLazySystemDLL("iphlpapi.dll") modsecur32 = NewLazySystemDLL("secur32.dll") modnetapi32 = NewLazySystemDLL("netapi32.dll") - moduserenv = NewLazySystemDLL("userenv.dll") + modwtsapi32 = NewLazySystemDLL("wtsapi32.dll") procRegisterEventSourceW = modadvapi32.NewProc("RegisterEventSourceW") procDeregisterEventSource = modadvapi32.NewProc("DeregisterEventSource") @@ -57,6 +60,7 @@ var ( procDeleteService = modadvapi32.NewProc("DeleteService") procStartServiceW = modadvapi32.NewProc("StartServiceW") procQueryServiceStatus = modadvapi32.NewProc("QueryServiceStatus") + procQueryServiceLockStatusW = modadvapi32.NewProc("QueryServiceLockStatusW") procControlService = modadvapi32.NewProc("ControlService") procStartServiceCtrlDispatcherW = modadvapi32.NewProc("StartServiceCtrlDispatcherW") procSetServiceStatus = modadvapi32.NewProc("SetServiceStatus") @@ -66,6 +70,7 @@ var ( procQueryServiceConfig2W = modadvapi32.NewProc("QueryServiceConfig2W") procEnumServicesStatusExW = modadvapi32.NewProc("EnumServicesStatusExW") procQueryServiceStatusEx = modadvapi32.NewProc("QueryServiceStatusEx") + procNotifyServiceStatusChangeW = modadvapi32.NewProc("NotifyServiceStatusChangeW") procGetLastError = modkernel32.NewProc("GetLastError") procLoadLibraryW = modkernel32.NewProc("LoadLibraryW") procLoadLibraryExW = modkernel32.NewProc("LoadLibraryExW") @@ -74,6 +79,7 @@ var ( procGetVersion = modkernel32.NewProc("GetVersion") procFormatMessageW = modkernel32.NewProc("FormatMessageW") procExitProcess = modkernel32.NewProc("ExitProcess") + procIsWow64Process = modkernel32.NewProc("IsWow64Process") procCreateFileW = modkernel32.NewProc("CreateFileW") procReadFile = modkernel32.NewProc("ReadFile") procWriteFile = modkernel32.NewProc("WriteFile") @@ -86,6 +92,7 @@ var ( procFindNextFileW = modkernel32.NewProc("FindNextFileW") procFindClose = modkernel32.NewProc("FindClose") procGetFileInformationByHandle = modkernel32.NewProc("GetFileInformationByHandle") + procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx") procGetCurrentDirectoryW = modkernel32.NewProc("GetCurrentDirectoryW") procSetCurrentDirectoryW = modkernel32.NewProc("SetCurrentDirectoryW") procCreateDirectoryW = modkernel32.NewProc("CreateDirectoryW") @@ -106,10 +113,13 @@ var ( procCancelIoEx = modkernel32.NewProc("CancelIoEx") procCreateProcessW = modkernel32.NewProc("CreateProcessW") procOpenProcess = modkernel32.NewProc("OpenProcess") + procShellExecuteW = modshell32.NewProc("ShellExecuteW") + procSHGetKnownFolderPath = modshell32.NewProc("SHGetKnownFolderPath") procTerminateProcess = modkernel32.NewProc("TerminateProcess") procGetExitCodeProcess = modkernel32.NewProc("GetExitCodeProcess") procGetStartupInfoW = modkernel32.NewProc("GetStartupInfoW") procGetCurrentProcess = modkernel32.NewProc("GetCurrentProcess") + procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") procGetProcessTimes = modkernel32.NewProc("GetProcessTimes") procDuplicateHandle = modkernel32.NewProc("DuplicateHandle") procWaitForSingleObject = modkernel32.NewProc("WaitForSingleObject") @@ -124,6 +134,9 @@ var ( procFreeEnvironmentStringsW = modkernel32.NewProc("FreeEnvironmentStringsW") procGetEnvironmentVariableW = modkernel32.NewProc("GetEnvironmentVariableW") procSetEnvironmentVariableW = modkernel32.NewProc("SetEnvironmentVariableW") + procCreateEnvironmentBlock = moduserenv.NewProc("CreateEnvironmentBlock") + procDestroyEnvironmentBlock = moduserenv.NewProc("DestroyEnvironmentBlock") + procGetTickCount64 = modkernel32.NewProc("GetTickCount64") procSetFileTime = modkernel32.NewProc("SetFileTime") procGetFileAttributesW = modkernel32.NewProc("GetFileAttributesW") procSetFileAttributesW = modkernel32.NewProc("SetFileAttributesW") @@ -171,6 +184,8 @@ var ( procCreateToolhelp32Snapshot = modkernel32.NewProc("CreateToolhelp32Snapshot") procProcess32FirstW = modkernel32.NewProc("Process32FirstW") procProcess32NextW = modkernel32.NewProc("Process32NextW") + procThread32First = modkernel32.NewProc("Thread32First") + procThread32Next = modkernel32.NewProc("Thread32Next") procDeviceIoControl = modkernel32.NewProc("DeviceIoControl") procCreateSymbolicLinkW = modkernel32.NewProc("CreateSymbolicLinkW") procCreateHardLinkW = modkernel32.NewProc("CreateHardLinkW") @@ -181,6 +196,18 @@ var ( procSetEvent = modkernel32.NewProc("SetEvent") procResetEvent = modkernel32.NewProc("ResetEvent") procPulseEvent = modkernel32.NewProc("PulseEvent") + procSleepEx = modkernel32.NewProc("SleepEx") + procCreateJobObjectW = modkernel32.NewProc("CreateJobObjectW") + procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") + procTerminateJobObject = modkernel32.NewProc("TerminateJobObject") + procSetErrorMode = modkernel32.NewProc("SetErrorMode") + procResumeThread = modkernel32.NewProc("ResumeThread") + procSetPriorityClass = modkernel32.NewProc("SetPriorityClass") + procGetPriorityClass = modkernel32.NewProc("GetPriorityClass") + procSetInformationJobObject = modkernel32.NewProc("SetInformationJobObject") + procGenerateConsoleCtrlEvent = modkernel32.NewProc("GenerateConsoleCtrlEvent") + procGetProcessId = modkernel32.NewProc("GetProcessId") + procOpenThread = modkernel32.NewProc("OpenThread") procDefineDosDeviceW = modkernel32.NewProc("DefineDosDeviceW") procDeleteVolumeMountPointW = modkernel32.NewProc("DeleteVolumeMountPointW") procFindFirstVolumeW = modkernel32.NewProc("FindFirstVolumeW") @@ -200,6 +227,11 @@ var ( procQueryDosDeviceW = modkernel32.NewProc("QueryDosDeviceW") procSetVolumeLabelW = modkernel32.NewProc("SetVolumeLabelW") procSetVolumeMountPointW = modkernel32.NewProc("SetVolumeMountPointW") + procMessageBoxW = moduser32.NewProc("MessageBoxW") + procCLSIDFromString = modole32.NewProc("CLSIDFromString") + procStringFromGUID2 = modole32.NewProc("StringFromGUID2") + procCoCreateGuid = modole32.NewProc("CoCreateGuid") + procCoTaskMemFree = modole32.NewProc("CoTaskMemFree") procWSAStartup = modws2_32.NewProc("WSAStartup") procWSACleanup = modws2_32.NewProc("WSACleanup") procWSAIoctl = modws2_32.NewProc("WSAIoctl") @@ -248,13 +280,30 @@ var ( procCopySid = modadvapi32.NewProc("CopySid") procAllocateAndInitializeSid = modadvapi32.NewProc("AllocateAndInitializeSid") procCreateWellKnownSid = modadvapi32.NewProc("CreateWellKnownSid") + procIsWellKnownSid = modadvapi32.NewProc("IsWellKnownSid") procFreeSid = modadvapi32.NewProc("FreeSid") procEqualSid = modadvapi32.NewProc("EqualSid") + procGetSidIdentifierAuthority = modadvapi32.NewProc("GetSidIdentifierAuthority") + procGetSidSubAuthorityCount = modadvapi32.NewProc("GetSidSubAuthorityCount") + procGetSidSubAuthority = modadvapi32.NewProc("GetSidSubAuthority") + procIsValidSid = modadvapi32.NewProc("IsValidSid") procCheckTokenMembership = modadvapi32.NewProc("CheckTokenMembership") procOpenProcessToken = modadvapi32.NewProc("OpenProcessToken") + procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") + procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") + procRevertToSelf = modadvapi32.NewProc("RevertToSelf") + procSetThreadToken = modadvapi32.NewProc("SetThreadToken") + procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") + procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") + procAdjustTokenGroups = modadvapi32.NewProc("AdjustTokenGroups") procGetTokenInformation = modadvapi32.NewProc("GetTokenInformation") + procSetTokenInformation = modadvapi32.NewProc("SetTokenInformation") + procDuplicateTokenEx = modadvapi32.NewProc("DuplicateTokenEx") procGetUserProfileDirectoryW = moduserenv.NewProc("GetUserProfileDirectoryW") procGetSystemDirectoryW = modkernel32.NewProc("GetSystemDirectoryW") + procWTSQueryUserToken = modwtsapi32.NewProc("WTSQueryUserToken") + procWTSEnumerateSessionsW = modwtsapi32.NewProc("WTSEnumerateSessionsW") + procWTSFreeMemory = modwtsapi32.NewProc("WTSFreeMemory") ) func RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Handle, err error) { @@ -381,6 +430,18 @@ func QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) { return } +func QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, bufSize uint32, bytesNeeded *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procQueryServiceLockStatusW.Addr(), 4, uintptr(mgr), uintptr(unsafe.Pointer(lockStatus)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + func ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err error) { r1, _, e1 := syscall.Syscall(procControlService.Addr(), 3, uintptr(service), uintptr(control), uintptr(unsafe.Pointer(status))) if r1 == 0 { @@ -489,6 +550,14 @@ func QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize return } +func NotifyServiceStatusChange(service Handle, notifyMask uint32, notifier *SERVICE_NOTIFY) (ret error) { + r0, _, _ := syscall.Syscall(procNotifyServiceStatusChangeW.Addr(), 3, uintptr(service), uintptr(notifyMask), uintptr(unsafe.Pointer(notifier))) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + func GetLastError() (lasterr error) { r0, _, _ := syscall.Syscall(procGetLastError.Addr(), 0, 0, 0, 0) if r0 != 0 { @@ -610,7 +679,19 @@ func ExitProcess(exitcode uint32) { return } -func CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile int32) (handle Handle, err error) { +func IsWow64Process(handle Handle, isWow64 *bool) (err error) { + r1, _, e1 := syscall.Syscall(procIsWow64Process.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(isWow64)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) { r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) handle = Handle(r0) if handle == InvalidHandle { @@ -772,6 +853,18 @@ func GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (e return } +func GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, outBufferLen uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferLen), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + func GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) { r0, _, e1 := syscall.Syscall(procGetCurrentDirectoryW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0) n = uint32(r0) @@ -995,14 +1088,14 @@ func CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityA return } -func OpenProcess(da uint32, inheritHandle bool, pid uint32) (handle Handle, err error) { +func OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (handle Handle, err error) { var _p0 uint32 if inheritHandle { _p0 = 1 } else { _p0 = 0 } - r0, _, e1 := syscall.Syscall(procOpenProcess.Addr(), 3, uintptr(da), uintptr(_p0), uintptr(pid)) + r0, _, e1 := syscall.Syscall(procOpenProcess.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(processId)) handle = Handle(r0) if handle == 0 { if e1 != 0 { @@ -1014,6 +1107,26 @@ func OpenProcess(da uint32, inheritHandle bool, pid uint32) (handle Handle, err return } +func ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) { + r1, _, e1 := syscall.Syscall6(procShellExecuteW.Addr(), 6, uintptr(hwnd), uintptr(unsafe.Pointer(verb)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(args)), uintptr(unsafe.Pointer(cwd)), uintptr(showCmd)) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) { + r0, _, _ := syscall.Syscall6(procSHGetKnownFolderPath.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(flags), uintptr(token), uintptr(unsafe.Pointer(path)), 0, 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + func TerminateProcess(handle Handle, exitcode uint32) (err error) { r1, _, e1 := syscall.Syscall(procTerminateProcess.Addr(), 2, uintptr(handle), uintptr(exitcode), 0) if r1 == 0 { @@ -1063,6 +1176,19 @@ func GetCurrentProcess() (pseudoHandle Handle, err error) { return } +func GetCurrentThread() (pseudoHandle Handle, err error) { + r0, _, e1 := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0) + pseudoHandle = Handle(r0) + if pseudoHandle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + func GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) { r1, _, e1 := syscall.Syscall6(procGetProcessTimes.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(creationTime)), uintptr(unsafe.Pointer(exitTime)), uintptr(unsafe.Pointer(kernelTime)), uintptr(unsafe.Pointer(userTime)), 0) if r1 == 0 { @@ -1249,6 +1375,42 @@ func SetEnvironmentVariable(name *uint16, value *uint16) (err error) { return } +func CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (err error) { + var _p0 uint32 + if inheritExisting { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall(procCreateEnvironmentBlock.Addr(), 3, uintptr(unsafe.Pointer(block)), uintptr(token), uintptr(_p0)) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func DestroyEnvironmentBlock(block *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procDestroyEnvironmentBlock.Addr(), 1, uintptr(unsafe.Pointer(block)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getTickCount64() (ms uint64) { + r0, _, _ := syscall.Syscall(procGetTickCount64.Addr(), 0, 0, 0, 0) + ms = uint64(r0) + return +} + func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) { r1, _, e1 := syscall.Syscall6(procSetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0) if r1 == 0 { @@ -1691,7 +1853,7 @@ func RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32 return } -func getCurrentProcessId() (pid uint32) { +func GetCurrentProcessId() (pid uint32) { r0, _, _ := syscall.Syscall(procGetCurrentProcessId.Addr(), 0, 0, 0, 0) pid = uint32(r0) return @@ -1794,6 +1956,30 @@ func Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) { return } +func Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error) { + r1, _, e1 := syscall.Syscall(procThread32First.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func Thread32Next(snapshot Handle, threadEntry *ThreadEntry32) (err error) { + r1, _, e1 := syscall.Syscall(procThread32Next.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBufferSize uint32, outBuffer *byte, outBufferSize uint32, bytesReturned *uint32, overlapped *Overlapped) (err error) { r1, _, e1 := syscall.Syscall9(procDeviceIoControl.Addr(), 8, uintptr(handle), uintptr(ioControlCode), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferSize), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferSize), uintptr(unsafe.Pointer(bytesReturned)), uintptr(unsafe.Pointer(overlapped)), 0) if r1 == 0 { @@ -1917,6 +2103,156 @@ func PulseEvent(event Handle) (err error) { return } +func SleepEx(milliseconds uint32, alertable bool) (ret uint32) { + var _p0 uint32 + if alertable { + _p0 = 1 + } else { + _p0 = 0 + } + r0, _, _ := syscall.Syscall(procSleepEx.Addr(), 2, uintptr(milliseconds), uintptr(_p0), 0) + ret = uint32(r0) + return +} + +func CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procCreateJobObjectW.Addr(), 2, uintptr(unsafe.Pointer(jobAttr)), uintptr(unsafe.Pointer(name)), 0) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func AssignProcessToJobObject(job Handle, process Handle) (err error) { + r1, _, e1 := syscall.Syscall(procAssignProcessToJobObject.Addr(), 2, uintptr(job), uintptr(process), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func TerminateJobObject(job Handle, exitCode uint32) (err error) { + r1, _, e1 := syscall.Syscall(procTerminateJobObject.Addr(), 2, uintptr(job), uintptr(exitCode), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetErrorMode(mode uint32) (ret uint32) { + r0, _, _ := syscall.Syscall(procSetErrorMode.Addr(), 1, uintptr(mode), 0, 0) + ret = uint32(r0) + return +} + +func ResumeThread(thread Handle) (ret uint32, err error) { + r0, _, e1 := syscall.Syscall(procResumeThread.Addr(), 1, uintptr(thread), 0, 0) + ret = uint32(r0) + if ret == 0xffffffff { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetPriorityClass(process Handle, priorityClass uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetPriorityClass.Addr(), 2, uintptr(process), uintptr(priorityClass), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetPriorityClass(process Handle) (ret uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetPriorityClass.Addr(), 1, uintptr(process), 0, 0) + ret = uint32(r0) + if ret == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobObjectInformation uintptr, JobObjectInformationLength uint32) (ret int, err error) { + r0, _, e1 := syscall.Syscall6(procSetInformationJobObject.Addr(), 4, uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), 0, 0) + ret = int(r0) + if ret == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGenerateConsoleCtrlEvent.Addr(), 2, uintptr(ctrlEvent), uintptr(processGroupID), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetProcessId(process Handle) (id uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetProcessId.Addr(), 1, uintptr(process), 0, 0) + id = uint32(r0) + if id == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (handle Handle, err error) { + var _p0 uint32 + if inheritHandle { + _p0 = 1 + } else { + _p0 = 0 + } + r0, _, e1 := syscall.Syscall(procOpenThread.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(threadId)) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + func DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) { r1, _, e1 := syscall.Syscall(procDefineDosDeviceW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath))) if r1 == 0 { @@ -2144,6 +2480,46 @@ func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err erro return } +func MessageBox(hwnd Handle, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) { + r0, _, e1 := syscall.Syscall6(procMessageBoxW.Addr(), 4, uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype), 0, 0) + ret = int32(r0) + if ret == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) { + r0, _, _ := syscall.Syscall(procCLSIDFromString.Addr(), 2, uintptr(unsafe.Pointer(lpsz)), uintptr(unsafe.Pointer(pclsid)), 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func stringFromGUID2(rguid *GUID, lpsz *uint16, cchMax int32) (chars int32) { + r0, _, _ := syscall.Syscall(procStringFromGUID2.Addr(), 3, uintptr(unsafe.Pointer(rguid)), uintptr(unsafe.Pointer(lpsz)), uintptr(cchMax)) + chars = int32(r0) + return +} + +func coCreateGuid(pguid *GUID) (ret error) { + r0, _, _ := syscall.Syscall(procCoCreateGuid.Addr(), 1, uintptr(unsafe.Pointer(pguid)), 0, 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func coTaskMemFree(address unsafe.Pointer) { + syscall.Syscall(procCoTaskMemFree.Addr(), 1, uintptr(address), 0, 0) + return +} + func WSAStartup(verreq uint32, data *WSAData) (sockerr error) { r0, _, _ := syscall.Syscall(procWSAStartup.Addr(), 2, uintptr(verreq), uintptr(unsafe.Pointer(data)), 0) if r0 != 0 { @@ -2686,6 +3062,12 @@ func createWellKnownSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID, sid *SID, s return } +func isWellKnownSid(sid *SID, sidType WELL_KNOWN_SID_TYPE) (isWellKnown bool) { + r0, _, _ := syscall.Syscall(procIsWellKnownSid.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(sidType), 0) + isWellKnown = r0 != 0 + return +} + func FreeSid(sid *SID) (err error) { r1, _, e1 := syscall.Syscall(procFreeSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) if r1 != 0 { @@ -2704,6 +3086,30 @@ func EqualSid(sid1 *SID, sid2 *SID) (isEqual bool) { return } +func getSidIdentifierAuthority(sid *SID) (authority *SidIdentifierAuthority) { + r0, _, _ := syscall.Syscall(procGetSidIdentifierAuthority.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + authority = (*SidIdentifierAuthority)(unsafe.Pointer(r0)) + return +} + +func getSidSubAuthorityCount(sid *SID) (count *uint8) { + r0, _, _ := syscall.Syscall(procGetSidSubAuthorityCount.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + count = (*uint8)(unsafe.Pointer(r0)) + return +} + +func getSidSubAuthority(sid *SID, index uint32) (subAuthority *uint32) { + r0, _, _ := syscall.Syscall(procGetSidSubAuthority.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(index), 0) + subAuthority = (*uint32)(unsafe.Pointer(r0)) + return +} + +func isValidSid(sid *SID) (isValid bool) { + r0, _, _ := syscall.Syscall(procIsValidSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + isValid = r0 != 0 + return +} + func checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) { r1, _, e1 := syscall.Syscall(procCheckTokenMembership.Addr(), 3, uintptr(tokenHandle), uintptr(unsafe.Pointer(sidToCheck)), uintptr(unsafe.Pointer(isMember))) if r1 == 0 { @@ -2716,8 +3122,8 @@ func checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) ( return } -func OpenProcessToken(h Handle, access uint32, token *Token) (err error) { - r1, _, e1 := syscall.Syscall(procOpenProcessToken.Addr(), 3, uintptr(h), uintptr(access), uintptr(unsafe.Pointer(token))) +func OpenProcessToken(process Handle, access uint32, token *Token) (err error) { + r1, _, e1 := syscall.Syscall(procOpenProcessToken.Addr(), 3, uintptr(process), uintptr(access), uintptr(unsafe.Pointer(token))) if r1 == 0 { if e1 != 0 { err = errnoErr(e1) @@ -2728,8 +3134,134 @@ func OpenProcessToken(h Handle, access uint32, token *Token) (err error) { return } -func GetTokenInformation(t Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetTokenInformation.Addr(), 5, uintptr(t), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), uintptr(unsafe.Pointer(returnedLen)), 0) +func OpenThreadToken(thread Handle, access uint32, openAsSelf bool, token *Token) (err error) { + var _p0 uint32 + if openAsSelf { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(access), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ImpersonateSelf(impersonationlevel uint32) (err error) { + r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(impersonationlevel), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func RevertToSelf() (err error) { + r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetThreadToken(thread *Handle, token Token) (err error) { + r1, _, e1 := syscall.Syscall(procSetThreadToken.Addr(), 2, uintptr(unsafe.Pointer(thread)), uintptr(token), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err error) { + r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemname)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func AdjustTokenPrivileges(token Token, disableAllPrivileges bool, newstate *Tokenprivileges, buflen uint32, prevstate *Tokenprivileges, returnlen *uint32) (err error) { + var _p0 uint32 + if disableAllPrivileges { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func AdjustTokenGroups(token Token, resetToDefault bool, newstate *Tokengroups, buflen uint32, prevstate *Tokengroups, returnlen *uint32) (err error) { + var _p0 uint32 + if resetToDefault { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall6(procAdjustTokenGroups.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetTokenInformation.Addr(), 5, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), uintptr(unsafe.Pointer(returnedLen)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procSetTokenInformation.Addr(), 4, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes *SecurityAttributes, impersonationLevel uint32, tokenType uint32, newToken *Token) (err error) { + r1, _, e1 := syscall.Syscall6(procDuplicateTokenEx.Addr(), 6, uintptr(existingToken), uintptr(desiredAccess), uintptr(unsafe.Pointer(tokenAttributes)), uintptr(impersonationLevel), uintptr(tokenType), uintptr(unsafe.Pointer(newToken))) if r1 == 0 { if e1 != 0 { err = errnoErr(e1) @@ -2764,3 +3296,32 @@ func getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { } return } + +func WTSQueryUserToken(session uint32, token *Token) (err error) { + r1, _, e1 := syscall.Syscall(procWTSQueryUserToken.Addr(), 2, uintptr(session), uintptr(unsafe.Pointer(token)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessions **WTS_SESSION_INFO, count *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procWTSEnumerateSessionsW.Addr(), 5, uintptr(handle), uintptr(reserved), uintptr(version), uintptr(unsafe.Pointer(sessions)), uintptr(unsafe.Pointer(count)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func WTSFreeMemory(ptr uintptr) { + syscall.Syscall(procWTSFreeMemory.Addr(), 1, uintptr(ptr), 0, 0) + return +} diff --git a/vendor/golang.org/x/text/encoding/internal/identifier/mib.go b/vendor/golang.org/x/text/encoding/internal/identifier/mib.go index 8cc29021c8526..fc7df1bc716e1 100644 --- a/vendor/golang.org/x/text/encoding/internal/identifier/mib.go +++ b/vendor/golang.org/x/text/encoding/internal/identifier/mib.go @@ -538,8 +538,6 @@ const ( // ISO111ECMACyrillic is the MIB identifier with IANA name ECMA-cyrillic. // // ISO registry - // (formerly ECMA - // registry ) ISO111ECMACyrillic MIB = 77 // ISO121Canadian1 is the MIB identifier with IANA name CSA_Z243.4-1985-1. @@ -732,18 +730,18 @@ const ( // ISO885913 is the MIB identifier with IANA name ISO-8859-13. // - // ISO See http://www.iana.org/assignments/charset-reg/ISO-8859-13 http://www.iana.org/assignments/charset-reg/ISO-8859-13 + // ISO See https://www.iana.org/assignments/charset-reg/ISO-8859-13 https://www.iana.org/assignments/charset-reg/ISO-8859-13 ISO885913 MIB = 109 // ISO885914 is the MIB identifier with IANA name ISO-8859-14. // - // ISO See http://www.iana.org/assignments/charset-reg/ISO-8859-14 + // ISO See https://www.iana.org/assignments/charset-reg/ISO-8859-14 ISO885914 MIB = 110 // ISO885915 is the MIB identifier with IANA name ISO-8859-15. // // ISO - // Please see: http://www.iana.org/assignments/charset-reg/ISO-8859-15 + // Please see: https://www.iana.org/assignments/charset-reg/ISO-8859-15 ISO885915 MIB = 111 // ISO885916 is the MIB identifier with IANA name ISO-8859-16. @@ -754,41 +752,41 @@ const ( // GBK is the MIB identifier with IANA name GBK. // // Chinese IT Standardization Technical Committee - // Please see: http://www.iana.org/assignments/charset-reg/GBK + // Please see: https://www.iana.org/assignments/charset-reg/GBK GBK MIB = 113 // GB18030 is the MIB identifier with IANA name GB18030. // // Chinese IT Standardization Technical Committee - // Please see: http://www.iana.org/assignments/charset-reg/GB18030 + // Please see: https://www.iana.org/assignments/charset-reg/GB18030 GB18030 MIB = 114 // OSDEBCDICDF0415 is the MIB identifier with IANA name OSD_EBCDIC_DF04_15. // // Fujitsu-Siemens standard mainframe EBCDIC encoding - // Please see: http://www.iana.org/assignments/charset-reg/OSD-EBCDIC-DF04-15 + // Please see: https://www.iana.org/assignments/charset-reg/OSD-EBCDIC-DF04-15 OSDEBCDICDF0415 MIB = 115 // OSDEBCDICDF03IRV is the MIB identifier with IANA name OSD_EBCDIC_DF03_IRV. // // Fujitsu-Siemens standard mainframe EBCDIC encoding - // Please see: http://www.iana.org/assignments/charset-reg/OSD-EBCDIC-DF03-IRV + // Please see: https://www.iana.org/assignments/charset-reg/OSD-EBCDIC-DF03-IRV OSDEBCDICDF03IRV MIB = 116 // OSDEBCDICDF041 is the MIB identifier with IANA name OSD_EBCDIC_DF04_1. // // Fujitsu-Siemens standard mainframe EBCDIC encoding - // Please see: http://www.iana.org/assignments/charset-reg/OSD-EBCDIC-DF04-1 + // Please see: https://www.iana.org/assignments/charset-reg/OSD-EBCDIC-DF04-1 OSDEBCDICDF041 MIB = 117 // ISO115481 is the MIB identifier with IANA name ISO-11548-1. // - // See http://www.iana.org/assignments/charset-reg/ISO-11548-1 + // See https://www.iana.org/assignments/charset-reg/ISO-11548-1 ISO115481 MIB = 118 // KZ1048 is the MIB identifier with IANA name KZ-1048. // - // See http://www.iana.org/assignments/charset-reg/KZ-1048 + // See https://www.iana.org/assignments/charset-reg/KZ-1048 KZ1048 MIB = 119 // Unicode is the MIB identifier with IANA name ISO-10646-UCS-2. @@ -855,7 +853,7 @@ const ( // SCSU is the MIB identifier with IANA name SCSU. // - // SCSU See http://www.iana.org/assignments/charset-reg/SCSU + // SCSU See https://www.iana.org/assignments/charset-reg/SCSU SCSU MIB = 1011 // UTF7 is the MIB identifier with IANA name UTF-7. @@ -884,22 +882,22 @@ const ( // CESU8 is the MIB identifier with IANA name CESU-8. // - // https://www.unicode.org/unicode/reports/tr26 + // https://www.unicode.org/reports/tr26 CESU8 MIB = 1016 // UTF32 is the MIB identifier with IANA name UTF-32. // - // https://www.unicode.org/unicode/reports/tr19/ + // https://www.unicode.org/reports/tr19/ UTF32 MIB = 1017 // UTF32BE is the MIB identifier with IANA name UTF-32BE. // - // https://www.unicode.org/unicode/reports/tr19/ + // https://www.unicode.org/reports/tr19/ UTF32BE MIB = 1018 // UTF32LE is the MIB identifier with IANA name UTF-32LE. // - // https://www.unicode.org/unicode/reports/tr19/ + // https://www.unicode.org/reports/tr19/ UTF32LE MIB = 1019 // BOCU1 is the MIB identifier with IANA name BOCU-1. @@ -1461,152 +1459,152 @@ const ( // IBM00858 is the MIB identifier with IANA name IBM00858. // - // IBM See http://www.iana.org/assignments/charset-reg/IBM00858 + // IBM See https://www.iana.org/assignments/charset-reg/IBM00858 IBM00858 MIB = 2089 // IBM00924 is the MIB identifier with IANA name IBM00924. // - // IBM See http://www.iana.org/assignments/charset-reg/IBM00924 + // IBM See https://www.iana.org/assignments/charset-reg/IBM00924 IBM00924 MIB = 2090 // IBM01140 is the MIB identifier with IANA name IBM01140. // - // IBM See http://www.iana.org/assignments/charset-reg/IBM01140 + // IBM See https://www.iana.org/assignments/charset-reg/IBM01140 IBM01140 MIB = 2091 // IBM01141 is the MIB identifier with IANA name IBM01141. // - // IBM See http://www.iana.org/assignments/charset-reg/IBM01141 + // IBM See https://www.iana.org/assignments/charset-reg/IBM01141 IBM01141 MIB = 2092 // IBM01142 is the MIB identifier with IANA name IBM01142. // - // IBM See http://www.iana.org/assignments/charset-reg/IBM01142 + // IBM See https://www.iana.org/assignments/charset-reg/IBM01142 IBM01142 MIB = 2093 // IBM01143 is the MIB identifier with IANA name IBM01143. // - // IBM See http://www.iana.org/assignments/charset-reg/IBM01143 + // IBM See https://www.iana.org/assignments/charset-reg/IBM01143 IBM01143 MIB = 2094 // IBM01144 is the MIB identifier with IANA name IBM01144. // - // IBM See http://www.iana.org/assignments/charset-reg/IBM01144 + // IBM See https://www.iana.org/assignments/charset-reg/IBM01144 IBM01144 MIB = 2095 // IBM01145 is the MIB identifier with IANA name IBM01145. // - // IBM See http://www.iana.org/assignments/charset-reg/IBM01145 + // IBM See https://www.iana.org/assignments/charset-reg/IBM01145 IBM01145 MIB = 2096 // IBM01146 is the MIB identifier with IANA name IBM01146. // - // IBM See http://www.iana.org/assignments/charset-reg/IBM01146 + // IBM See https://www.iana.org/assignments/charset-reg/IBM01146 IBM01146 MIB = 2097 // IBM01147 is the MIB identifier with IANA name IBM01147. // - // IBM See http://www.iana.org/assignments/charset-reg/IBM01147 + // IBM See https://www.iana.org/assignments/charset-reg/IBM01147 IBM01147 MIB = 2098 // IBM01148 is the MIB identifier with IANA name IBM01148. // - // IBM See http://www.iana.org/assignments/charset-reg/IBM01148 + // IBM See https://www.iana.org/assignments/charset-reg/IBM01148 IBM01148 MIB = 2099 // IBM01149 is the MIB identifier with IANA name IBM01149. // - // IBM See http://www.iana.org/assignments/charset-reg/IBM01149 + // IBM See https://www.iana.org/assignments/charset-reg/IBM01149 IBM01149 MIB = 2100 // Big5HKSCS is the MIB identifier with IANA name Big5-HKSCS. // - // See http://www.iana.org/assignments/charset-reg/Big5-HKSCS + // See https://www.iana.org/assignments/charset-reg/Big5-HKSCS Big5HKSCS MIB = 2101 // IBM1047 is the MIB identifier with IANA name IBM1047. // - // IBM1047 (EBCDIC Latin 1/Open Systems) http://www-1.ibm.com/servers/eserver/iseries/software/globalization/pdf/cp01047z.pdf + // IBM1047 (EBCDIC Latin 1/Open Systems) https://www-1.ibm.com/servers/eserver/iseries/software/globalization/pdf/cp01047z.pdf IBM1047 MIB = 2102 // PTCP154 is the MIB identifier with IANA name PTCP154. // - // See http://www.iana.org/assignments/charset-reg/PTCP154 + // See https://www.iana.org/assignments/charset-reg/PTCP154 PTCP154 MIB = 2103 // Amiga1251 is the MIB identifier with IANA name Amiga-1251. // - // See http://www.amiga.ultranet.ru/Amiga-1251.html + // See https://www.amiga.ultranet.ru/Amiga-1251.html Amiga1251 MIB = 2104 // KOI7switched is the MIB identifier with IANA name KOI7-switched. // - // See http://www.iana.org/assignments/charset-reg/KOI7-switched + // See https://www.iana.org/assignments/charset-reg/KOI7-switched KOI7switched MIB = 2105 // BRF is the MIB identifier with IANA name BRF. // - // See http://www.iana.org/assignments/charset-reg/BRF + // See https://www.iana.org/assignments/charset-reg/BRF BRF MIB = 2106 // TSCII is the MIB identifier with IANA name TSCII. // - // See http://www.iana.org/assignments/charset-reg/TSCII + // See https://www.iana.org/assignments/charset-reg/TSCII TSCII MIB = 2107 // CP51932 is the MIB identifier with IANA name CP51932. // - // See http://www.iana.org/assignments/charset-reg/CP51932 + // See https://www.iana.org/assignments/charset-reg/CP51932 CP51932 MIB = 2108 // Windows874 is the MIB identifier with IANA name windows-874. // - // See http://www.iana.org/assignments/charset-reg/windows-874 + // See https://www.iana.org/assignments/charset-reg/windows-874 Windows874 MIB = 2109 // Windows1250 is the MIB identifier with IANA name windows-1250. // - // Microsoft http://www.iana.org/assignments/charset-reg/windows-1250 + // Microsoft https://www.iana.org/assignments/charset-reg/windows-1250 Windows1250 MIB = 2250 // Windows1251 is the MIB identifier with IANA name windows-1251. // - // Microsoft http://www.iana.org/assignments/charset-reg/windows-1251 + // Microsoft https://www.iana.org/assignments/charset-reg/windows-1251 Windows1251 MIB = 2251 // Windows1252 is the MIB identifier with IANA name windows-1252. // - // Microsoft http://www.iana.org/assignments/charset-reg/windows-1252 + // Microsoft https://www.iana.org/assignments/charset-reg/windows-1252 Windows1252 MIB = 2252 // Windows1253 is the MIB identifier with IANA name windows-1253. // - // Microsoft http://www.iana.org/assignments/charset-reg/windows-1253 + // Microsoft https://www.iana.org/assignments/charset-reg/windows-1253 Windows1253 MIB = 2253 // Windows1254 is the MIB identifier with IANA name windows-1254. // - // Microsoft http://www.iana.org/assignments/charset-reg/windows-1254 + // Microsoft https://www.iana.org/assignments/charset-reg/windows-1254 Windows1254 MIB = 2254 // Windows1255 is the MIB identifier with IANA name windows-1255. // - // Microsoft http://www.iana.org/assignments/charset-reg/windows-1255 + // Microsoft https://www.iana.org/assignments/charset-reg/windows-1255 Windows1255 MIB = 2255 // Windows1256 is the MIB identifier with IANA name windows-1256. // - // Microsoft http://www.iana.org/assignments/charset-reg/windows-1256 + // Microsoft https://www.iana.org/assignments/charset-reg/windows-1256 Windows1256 MIB = 2256 // Windows1257 is the MIB identifier with IANA name windows-1257. // - // Microsoft http://www.iana.org/assignments/charset-reg/windows-1257 + // Microsoft https://www.iana.org/assignments/charset-reg/windows-1257 Windows1257 MIB = 2257 // Windows1258 is the MIB identifier with IANA name windows-1258. // - // Microsoft http://www.iana.org/assignments/charset-reg/windows-1258 + // Microsoft https://www.iana.org/assignments/charset-reg/windows-1258 Windows1258 MIB = 2258 // TIS620 is the MIB identifier with IANA name TIS-620. @@ -1616,6 +1614,6 @@ const ( // CP50220 is the MIB identifier with IANA name CP50220. // - // See http://www.iana.org/assignments/charset-reg/CP50220 + // See https://www.iana.org/assignments/charset-reg/CP50220 CP50220 MIB = 2260 ) diff --git a/vendor/golang.org/x/text/transform/transform.go b/vendor/golang.org/x/text/transform/transform.go index 919e3d950e98b..520b9ada0e28e 100644 --- a/vendor/golang.org/x/text/transform/transform.go +++ b/vendor/golang.org/x/text/transform/transform.go @@ -493,7 +493,7 @@ func (c *chain) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err erro return dstL.n, srcL.p, err } -// Deprecated: use runes.Remove instead. +// Deprecated: Use runes.Remove instead. func RemoveFunc(f func(r rune) bool) Transformer { return removeF(f) } diff --git a/vendor/golang.org/x/text/unicode/bidi/BUILD.bazel b/vendor/golang.org/x/text/unicode/bidi/BUILD.bazel index 59f950a51dc47..6334d3f9ce8ae 100644 --- a/vendor/golang.org/x/text/unicode/bidi/BUILD.bazel +++ b/vendor/golang.org/x/text/unicode/bidi/BUILD.bazel @@ -8,6 +8,7 @@ go_library( "core.go", "prop.go", "tables10.0.0.go", + "tables11.0.0.go", "tables9.0.0.go", "trieval.go", ], diff --git a/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go index 2e1ff19599dca..d8c94e1bd1a65 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go @@ -1,6 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. -// +build go1.10 +// +build go1.10,!go1.13 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go new file mode 100644 index 0000000000000..022e3c69092df --- /dev/null +++ b/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go @@ -0,0 +1,1887 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// +build go1.13 + +package bidi + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "11.0.0" + +// xorMasks contains masks to be xor-ed with brackets to get the reverse +// version. +var xorMasks = []int32{ // 8 elements + 0, 1, 6, 7, 3, 15, 29, 63, +} // Size: 56 bytes + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *bidiTrie) lookup(s []byte) (v uint8, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return bidiValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = bidiIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *bidiTrie) lookupUnsafe(s []byte) uint8 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return bidiValues[c0] + } + i := bidiIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *bidiTrie) lookupString(s string) (v uint8, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return bidiValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = bidiIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *bidiTrie) lookupStringUnsafe(s string) uint8 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return bidiValues[c0] + } + i := bidiIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// bidiTrie. Total size: 16512 bytes (16.12 KiB). Checksum: 2a9cf1317f2ffaa. +type bidiTrie struct{} + +func newBidiTrie(i int) *bidiTrie { + return &bidiTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *bidiTrie) lookupValue(n uint32, b byte) uint8 { + switch { + default: + return uint8(bidiValues[n<<6+uint32(b)]) + } +} + +// bidiValues: 234 blocks, 14976 entries, 14976 bytes +// The third block is the zero block. +var bidiValues = [14976]uint8{ + // Block 0x0, offset 0x0 + 0x00: 0x000b, 0x01: 0x000b, 0x02: 0x000b, 0x03: 0x000b, 0x04: 0x000b, 0x05: 0x000b, + 0x06: 0x000b, 0x07: 0x000b, 0x08: 0x000b, 0x09: 0x0008, 0x0a: 0x0007, 0x0b: 0x0008, + 0x0c: 0x0009, 0x0d: 0x0007, 0x0e: 0x000b, 0x0f: 0x000b, 0x10: 0x000b, 0x11: 0x000b, + 0x12: 0x000b, 0x13: 0x000b, 0x14: 0x000b, 0x15: 0x000b, 0x16: 0x000b, 0x17: 0x000b, + 0x18: 0x000b, 0x19: 0x000b, 0x1a: 0x000b, 0x1b: 0x000b, 0x1c: 0x0007, 0x1d: 0x0007, + 0x1e: 0x0007, 0x1f: 0x0008, 0x20: 0x0009, 0x21: 0x000a, 0x22: 0x000a, 0x23: 0x0004, + 0x24: 0x0004, 0x25: 0x0004, 0x26: 0x000a, 0x27: 0x000a, 0x28: 0x003a, 0x29: 0x002a, + 0x2a: 0x000a, 0x2b: 0x0003, 0x2c: 0x0006, 0x2d: 0x0003, 0x2e: 0x0006, 0x2f: 0x0006, + 0x30: 0x0002, 0x31: 0x0002, 0x32: 0x0002, 0x33: 0x0002, 0x34: 0x0002, 0x35: 0x0002, + 0x36: 0x0002, 0x37: 0x0002, 0x38: 0x0002, 0x39: 0x0002, 0x3a: 0x0006, 0x3b: 0x000a, + 0x3c: 0x000a, 0x3d: 0x000a, 0x3e: 0x000a, 0x3f: 0x000a, + // Block 0x1, offset 0x40 + 0x40: 0x000a, + 0x5b: 0x005a, 0x5c: 0x000a, 0x5d: 0x004a, + 0x5e: 0x000a, 0x5f: 0x000a, 0x60: 0x000a, + 0x7b: 0x005a, + 0x7c: 0x000a, 0x7d: 0x004a, 0x7e: 0x000a, 0x7f: 0x000b, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x000b, 0xc1: 0x000b, 0xc2: 0x000b, 0xc3: 0x000b, 0xc4: 0x000b, 0xc5: 0x0007, + 0xc6: 0x000b, 0xc7: 0x000b, 0xc8: 0x000b, 0xc9: 0x000b, 0xca: 0x000b, 0xcb: 0x000b, + 0xcc: 0x000b, 0xcd: 0x000b, 0xce: 0x000b, 0xcf: 0x000b, 0xd0: 0x000b, 0xd1: 0x000b, + 0xd2: 0x000b, 0xd3: 0x000b, 0xd4: 0x000b, 0xd5: 0x000b, 0xd6: 0x000b, 0xd7: 0x000b, + 0xd8: 0x000b, 0xd9: 0x000b, 0xda: 0x000b, 0xdb: 0x000b, 0xdc: 0x000b, 0xdd: 0x000b, + 0xde: 0x000b, 0xdf: 0x000b, 0xe0: 0x0006, 0xe1: 0x000a, 0xe2: 0x0004, 0xe3: 0x0004, + 0xe4: 0x0004, 0xe5: 0x0004, 0xe6: 0x000a, 0xe7: 0x000a, 0xe8: 0x000a, 0xe9: 0x000a, + 0xeb: 0x000a, 0xec: 0x000a, 0xed: 0x000b, 0xee: 0x000a, 0xef: 0x000a, + 0xf0: 0x0004, 0xf1: 0x0004, 0xf2: 0x0002, 0xf3: 0x0002, 0xf4: 0x000a, + 0xf6: 0x000a, 0xf7: 0x000a, 0xf8: 0x000a, 0xf9: 0x0002, 0xfb: 0x000a, + 0xfc: 0x000a, 0xfd: 0x000a, 0xfe: 0x000a, 0xff: 0x000a, + // Block 0x4, offset 0x100 + 0x117: 0x000a, + 0x137: 0x000a, + // Block 0x5, offset 0x140 + 0x179: 0x000a, 0x17a: 0x000a, + // Block 0x6, offset 0x180 + 0x182: 0x000a, 0x183: 0x000a, 0x184: 0x000a, 0x185: 0x000a, + 0x186: 0x000a, 0x187: 0x000a, 0x188: 0x000a, 0x189: 0x000a, 0x18a: 0x000a, 0x18b: 0x000a, + 0x18c: 0x000a, 0x18d: 0x000a, 0x18e: 0x000a, 0x18f: 0x000a, + 0x192: 0x000a, 0x193: 0x000a, 0x194: 0x000a, 0x195: 0x000a, 0x196: 0x000a, 0x197: 0x000a, + 0x198: 0x000a, 0x199: 0x000a, 0x19a: 0x000a, 0x19b: 0x000a, 0x19c: 0x000a, 0x19d: 0x000a, + 0x19e: 0x000a, 0x19f: 0x000a, + 0x1a5: 0x000a, 0x1a6: 0x000a, 0x1a7: 0x000a, 0x1a8: 0x000a, 0x1a9: 0x000a, + 0x1aa: 0x000a, 0x1ab: 0x000a, 0x1ac: 0x000a, 0x1ad: 0x000a, 0x1af: 0x000a, + 0x1b0: 0x000a, 0x1b1: 0x000a, 0x1b2: 0x000a, 0x1b3: 0x000a, 0x1b4: 0x000a, 0x1b5: 0x000a, + 0x1b6: 0x000a, 0x1b7: 0x000a, 0x1b8: 0x000a, 0x1b9: 0x000a, 0x1ba: 0x000a, 0x1bb: 0x000a, + 0x1bc: 0x000a, 0x1bd: 0x000a, 0x1be: 0x000a, 0x1bf: 0x000a, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x000c, 0x1c1: 0x000c, 0x1c2: 0x000c, 0x1c3: 0x000c, 0x1c4: 0x000c, 0x1c5: 0x000c, + 0x1c6: 0x000c, 0x1c7: 0x000c, 0x1c8: 0x000c, 0x1c9: 0x000c, 0x1ca: 0x000c, 0x1cb: 0x000c, + 0x1cc: 0x000c, 0x1cd: 0x000c, 0x1ce: 0x000c, 0x1cf: 0x000c, 0x1d0: 0x000c, 0x1d1: 0x000c, + 0x1d2: 0x000c, 0x1d3: 0x000c, 0x1d4: 0x000c, 0x1d5: 0x000c, 0x1d6: 0x000c, 0x1d7: 0x000c, + 0x1d8: 0x000c, 0x1d9: 0x000c, 0x1da: 0x000c, 0x1db: 0x000c, 0x1dc: 0x000c, 0x1dd: 0x000c, + 0x1de: 0x000c, 0x1df: 0x000c, 0x1e0: 0x000c, 0x1e1: 0x000c, 0x1e2: 0x000c, 0x1e3: 0x000c, + 0x1e4: 0x000c, 0x1e5: 0x000c, 0x1e6: 0x000c, 0x1e7: 0x000c, 0x1e8: 0x000c, 0x1e9: 0x000c, + 0x1ea: 0x000c, 0x1eb: 0x000c, 0x1ec: 0x000c, 0x1ed: 0x000c, 0x1ee: 0x000c, 0x1ef: 0x000c, + 0x1f0: 0x000c, 0x1f1: 0x000c, 0x1f2: 0x000c, 0x1f3: 0x000c, 0x1f4: 0x000c, 0x1f5: 0x000c, + 0x1f6: 0x000c, 0x1f7: 0x000c, 0x1f8: 0x000c, 0x1f9: 0x000c, 0x1fa: 0x000c, 0x1fb: 0x000c, + 0x1fc: 0x000c, 0x1fd: 0x000c, 0x1fe: 0x000c, 0x1ff: 0x000c, + // Block 0x8, offset 0x200 + 0x200: 0x000c, 0x201: 0x000c, 0x202: 0x000c, 0x203: 0x000c, 0x204: 0x000c, 0x205: 0x000c, + 0x206: 0x000c, 0x207: 0x000c, 0x208: 0x000c, 0x209: 0x000c, 0x20a: 0x000c, 0x20b: 0x000c, + 0x20c: 0x000c, 0x20d: 0x000c, 0x20e: 0x000c, 0x20f: 0x000c, 0x210: 0x000c, 0x211: 0x000c, + 0x212: 0x000c, 0x213: 0x000c, 0x214: 0x000c, 0x215: 0x000c, 0x216: 0x000c, 0x217: 0x000c, + 0x218: 0x000c, 0x219: 0x000c, 0x21a: 0x000c, 0x21b: 0x000c, 0x21c: 0x000c, 0x21d: 0x000c, + 0x21e: 0x000c, 0x21f: 0x000c, 0x220: 0x000c, 0x221: 0x000c, 0x222: 0x000c, 0x223: 0x000c, + 0x224: 0x000c, 0x225: 0x000c, 0x226: 0x000c, 0x227: 0x000c, 0x228: 0x000c, 0x229: 0x000c, + 0x22a: 0x000c, 0x22b: 0x000c, 0x22c: 0x000c, 0x22d: 0x000c, 0x22e: 0x000c, 0x22f: 0x000c, + 0x234: 0x000a, 0x235: 0x000a, + 0x23e: 0x000a, + // Block 0x9, offset 0x240 + 0x244: 0x000a, 0x245: 0x000a, + 0x247: 0x000a, + // Block 0xa, offset 0x280 + 0x2b6: 0x000a, + // Block 0xb, offset 0x2c0 + 0x2c3: 0x000c, 0x2c4: 0x000c, 0x2c5: 0x000c, + 0x2c6: 0x000c, 0x2c7: 0x000c, 0x2c8: 0x000c, 0x2c9: 0x000c, + // Block 0xc, offset 0x300 + 0x30a: 0x000a, + 0x30d: 0x000a, 0x30e: 0x000a, 0x30f: 0x0004, 0x310: 0x0001, 0x311: 0x000c, + 0x312: 0x000c, 0x313: 0x000c, 0x314: 0x000c, 0x315: 0x000c, 0x316: 0x000c, 0x317: 0x000c, + 0x318: 0x000c, 0x319: 0x000c, 0x31a: 0x000c, 0x31b: 0x000c, 0x31c: 0x000c, 0x31d: 0x000c, + 0x31e: 0x000c, 0x31f: 0x000c, 0x320: 0x000c, 0x321: 0x000c, 0x322: 0x000c, 0x323: 0x000c, + 0x324: 0x000c, 0x325: 0x000c, 0x326: 0x000c, 0x327: 0x000c, 0x328: 0x000c, 0x329: 0x000c, + 0x32a: 0x000c, 0x32b: 0x000c, 0x32c: 0x000c, 0x32d: 0x000c, 0x32e: 0x000c, 0x32f: 0x000c, + 0x330: 0x000c, 0x331: 0x000c, 0x332: 0x000c, 0x333: 0x000c, 0x334: 0x000c, 0x335: 0x000c, + 0x336: 0x000c, 0x337: 0x000c, 0x338: 0x000c, 0x339: 0x000c, 0x33a: 0x000c, 0x33b: 0x000c, + 0x33c: 0x000c, 0x33d: 0x000c, 0x33e: 0x0001, 0x33f: 0x000c, + // Block 0xd, offset 0x340 + 0x340: 0x0001, 0x341: 0x000c, 0x342: 0x000c, 0x343: 0x0001, 0x344: 0x000c, 0x345: 0x000c, + 0x346: 0x0001, 0x347: 0x000c, 0x348: 0x0001, 0x349: 0x0001, 0x34a: 0x0001, 0x34b: 0x0001, + 0x34c: 0x0001, 0x34d: 0x0001, 0x34e: 0x0001, 0x34f: 0x0001, 0x350: 0x0001, 0x351: 0x0001, + 0x352: 0x0001, 0x353: 0x0001, 0x354: 0x0001, 0x355: 0x0001, 0x356: 0x0001, 0x357: 0x0001, + 0x358: 0x0001, 0x359: 0x0001, 0x35a: 0x0001, 0x35b: 0x0001, 0x35c: 0x0001, 0x35d: 0x0001, + 0x35e: 0x0001, 0x35f: 0x0001, 0x360: 0x0001, 0x361: 0x0001, 0x362: 0x0001, 0x363: 0x0001, + 0x364: 0x0001, 0x365: 0x0001, 0x366: 0x0001, 0x367: 0x0001, 0x368: 0x0001, 0x369: 0x0001, + 0x36a: 0x0001, 0x36b: 0x0001, 0x36c: 0x0001, 0x36d: 0x0001, 0x36e: 0x0001, 0x36f: 0x0001, + 0x370: 0x0001, 0x371: 0x0001, 0x372: 0x0001, 0x373: 0x0001, 0x374: 0x0001, 0x375: 0x0001, + 0x376: 0x0001, 0x377: 0x0001, 0x378: 0x0001, 0x379: 0x0001, 0x37a: 0x0001, 0x37b: 0x0001, + 0x37c: 0x0001, 0x37d: 0x0001, 0x37e: 0x0001, 0x37f: 0x0001, + // Block 0xe, offset 0x380 + 0x380: 0x0005, 0x381: 0x0005, 0x382: 0x0005, 0x383: 0x0005, 0x384: 0x0005, 0x385: 0x0005, + 0x386: 0x000a, 0x387: 0x000a, 0x388: 0x000d, 0x389: 0x0004, 0x38a: 0x0004, 0x38b: 0x000d, + 0x38c: 0x0006, 0x38d: 0x000d, 0x38e: 0x000a, 0x38f: 0x000a, 0x390: 0x000c, 0x391: 0x000c, + 0x392: 0x000c, 0x393: 0x000c, 0x394: 0x000c, 0x395: 0x000c, 0x396: 0x000c, 0x397: 0x000c, + 0x398: 0x000c, 0x399: 0x000c, 0x39a: 0x000c, 0x39b: 0x000d, 0x39c: 0x000d, 0x39d: 0x000d, + 0x39e: 0x000d, 0x39f: 0x000d, 0x3a0: 0x000d, 0x3a1: 0x000d, 0x3a2: 0x000d, 0x3a3: 0x000d, + 0x3a4: 0x000d, 0x3a5: 0x000d, 0x3a6: 0x000d, 0x3a7: 0x000d, 0x3a8: 0x000d, 0x3a9: 0x000d, + 0x3aa: 0x000d, 0x3ab: 0x000d, 0x3ac: 0x000d, 0x3ad: 0x000d, 0x3ae: 0x000d, 0x3af: 0x000d, + 0x3b0: 0x000d, 0x3b1: 0x000d, 0x3b2: 0x000d, 0x3b3: 0x000d, 0x3b4: 0x000d, 0x3b5: 0x000d, + 0x3b6: 0x000d, 0x3b7: 0x000d, 0x3b8: 0x000d, 0x3b9: 0x000d, 0x3ba: 0x000d, 0x3bb: 0x000d, + 0x3bc: 0x000d, 0x3bd: 0x000d, 0x3be: 0x000d, 0x3bf: 0x000d, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x000d, 0x3c1: 0x000d, 0x3c2: 0x000d, 0x3c3: 0x000d, 0x3c4: 0x000d, 0x3c5: 0x000d, + 0x3c6: 0x000d, 0x3c7: 0x000d, 0x3c8: 0x000d, 0x3c9: 0x000d, 0x3ca: 0x000d, 0x3cb: 0x000c, + 0x3cc: 0x000c, 0x3cd: 0x000c, 0x3ce: 0x000c, 0x3cf: 0x000c, 0x3d0: 0x000c, 0x3d1: 0x000c, + 0x3d2: 0x000c, 0x3d3: 0x000c, 0x3d4: 0x000c, 0x3d5: 0x000c, 0x3d6: 0x000c, 0x3d7: 0x000c, + 0x3d8: 0x000c, 0x3d9: 0x000c, 0x3da: 0x000c, 0x3db: 0x000c, 0x3dc: 0x000c, 0x3dd: 0x000c, + 0x3de: 0x000c, 0x3df: 0x000c, 0x3e0: 0x0005, 0x3e1: 0x0005, 0x3e2: 0x0005, 0x3e3: 0x0005, + 0x3e4: 0x0005, 0x3e5: 0x0005, 0x3e6: 0x0005, 0x3e7: 0x0005, 0x3e8: 0x0005, 0x3e9: 0x0005, + 0x3ea: 0x0004, 0x3eb: 0x0005, 0x3ec: 0x0005, 0x3ed: 0x000d, 0x3ee: 0x000d, 0x3ef: 0x000d, + 0x3f0: 0x000c, 0x3f1: 0x000d, 0x3f2: 0x000d, 0x3f3: 0x000d, 0x3f4: 0x000d, 0x3f5: 0x000d, + 0x3f6: 0x000d, 0x3f7: 0x000d, 0x3f8: 0x000d, 0x3f9: 0x000d, 0x3fa: 0x000d, 0x3fb: 0x000d, + 0x3fc: 0x000d, 0x3fd: 0x000d, 0x3fe: 0x000d, 0x3ff: 0x000d, + // Block 0x10, offset 0x400 + 0x400: 0x000d, 0x401: 0x000d, 0x402: 0x000d, 0x403: 0x000d, 0x404: 0x000d, 0x405: 0x000d, + 0x406: 0x000d, 0x407: 0x000d, 0x408: 0x000d, 0x409: 0x000d, 0x40a: 0x000d, 0x40b: 0x000d, + 0x40c: 0x000d, 0x40d: 0x000d, 0x40e: 0x000d, 0x40f: 0x000d, 0x410: 0x000d, 0x411: 0x000d, + 0x412: 0x000d, 0x413: 0x000d, 0x414: 0x000d, 0x415: 0x000d, 0x416: 0x000d, 0x417: 0x000d, + 0x418: 0x000d, 0x419: 0x000d, 0x41a: 0x000d, 0x41b: 0x000d, 0x41c: 0x000d, 0x41d: 0x000d, + 0x41e: 0x000d, 0x41f: 0x000d, 0x420: 0x000d, 0x421: 0x000d, 0x422: 0x000d, 0x423: 0x000d, + 0x424: 0x000d, 0x425: 0x000d, 0x426: 0x000d, 0x427: 0x000d, 0x428: 0x000d, 0x429: 0x000d, + 0x42a: 0x000d, 0x42b: 0x000d, 0x42c: 0x000d, 0x42d: 0x000d, 0x42e: 0x000d, 0x42f: 0x000d, + 0x430: 0x000d, 0x431: 0x000d, 0x432: 0x000d, 0x433: 0x000d, 0x434: 0x000d, 0x435: 0x000d, + 0x436: 0x000d, 0x437: 0x000d, 0x438: 0x000d, 0x439: 0x000d, 0x43a: 0x000d, 0x43b: 0x000d, + 0x43c: 0x000d, 0x43d: 0x000d, 0x43e: 0x000d, 0x43f: 0x000d, + // Block 0x11, offset 0x440 + 0x440: 0x000d, 0x441: 0x000d, 0x442: 0x000d, 0x443: 0x000d, 0x444: 0x000d, 0x445: 0x000d, + 0x446: 0x000d, 0x447: 0x000d, 0x448: 0x000d, 0x449: 0x000d, 0x44a: 0x000d, 0x44b: 0x000d, + 0x44c: 0x000d, 0x44d: 0x000d, 0x44e: 0x000d, 0x44f: 0x000d, 0x450: 0x000d, 0x451: 0x000d, + 0x452: 0x000d, 0x453: 0x000d, 0x454: 0x000d, 0x455: 0x000d, 0x456: 0x000c, 0x457: 0x000c, + 0x458: 0x000c, 0x459: 0x000c, 0x45a: 0x000c, 0x45b: 0x000c, 0x45c: 0x000c, 0x45d: 0x0005, + 0x45e: 0x000a, 0x45f: 0x000c, 0x460: 0x000c, 0x461: 0x000c, 0x462: 0x000c, 0x463: 0x000c, + 0x464: 0x000c, 0x465: 0x000d, 0x466: 0x000d, 0x467: 0x000c, 0x468: 0x000c, 0x469: 0x000a, + 0x46a: 0x000c, 0x46b: 0x000c, 0x46c: 0x000c, 0x46d: 0x000c, 0x46e: 0x000d, 0x46f: 0x000d, + 0x470: 0x0002, 0x471: 0x0002, 0x472: 0x0002, 0x473: 0x0002, 0x474: 0x0002, 0x475: 0x0002, + 0x476: 0x0002, 0x477: 0x0002, 0x478: 0x0002, 0x479: 0x0002, 0x47a: 0x000d, 0x47b: 0x000d, + 0x47c: 0x000d, 0x47d: 0x000d, 0x47e: 0x000d, 0x47f: 0x000d, + // Block 0x12, offset 0x480 + 0x480: 0x000d, 0x481: 0x000d, 0x482: 0x000d, 0x483: 0x000d, 0x484: 0x000d, 0x485: 0x000d, + 0x486: 0x000d, 0x487: 0x000d, 0x488: 0x000d, 0x489: 0x000d, 0x48a: 0x000d, 0x48b: 0x000d, + 0x48c: 0x000d, 0x48d: 0x000d, 0x48e: 0x000d, 0x48f: 0x000d, 0x490: 0x000d, 0x491: 0x000c, + 0x492: 0x000d, 0x493: 0x000d, 0x494: 0x000d, 0x495: 0x000d, 0x496: 0x000d, 0x497: 0x000d, + 0x498: 0x000d, 0x499: 0x000d, 0x49a: 0x000d, 0x49b: 0x000d, 0x49c: 0x000d, 0x49d: 0x000d, + 0x49e: 0x000d, 0x49f: 0x000d, 0x4a0: 0x000d, 0x4a1: 0x000d, 0x4a2: 0x000d, 0x4a3: 0x000d, + 0x4a4: 0x000d, 0x4a5: 0x000d, 0x4a6: 0x000d, 0x4a7: 0x000d, 0x4a8: 0x000d, 0x4a9: 0x000d, + 0x4aa: 0x000d, 0x4ab: 0x000d, 0x4ac: 0x000d, 0x4ad: 0x000d, 0x4ae: 0x000d, 0x4af: 0x000d, + 0x4b0: 0x000c, 0x4b1: 0x000c, 0x4b2: 0x000c, 0x4b3: 0x000c, 0x4b4: 0x000c, 0x4b5: 0x000c, + 0x4b6: 0x000c, 0x4b7: 0x000c, 0x4b8: 0x000c, 0x4b9: 0x000c, 0x4ba: 0x000c, 0x4bb: 0x000c, + 0x4bc: 0x000c, 0x4bd: 0x000c, 0x4be: 0x000c, 0x4bf: 0x000c, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x000c, 0x4c1: 0x000c, 0x4c2: 0x000c, 0x4c3: 0x000c, 0x4c4: 0x000c, 0x4c5: 0x000c, + 0x4c6: 0x000c, 0x4c7: 0x000c, 0x4c8: 0x000c, 0x4c9: 0x000c, 0x4ca: 0x000c, 0x4cb: 0x000d, + 0x4cc: 0x000d, 0x4cd: 0x000d, 0x4ce: 0x000d, 0x4cf: 0x000d, 0x4d0: 0x000d, 0x4d1: 0x000d, + 0x4d2: 0x000d, 0x4d3: 0x000d, 0x4d4: 0x000d, 0x4d5: 0x000d, 0x4d6: 0x000d, 0x4d7: 0x000d, + 0x4d8: 0x000d, 0x4d9: 0x000d, 0x4da: 0x000d, 0x4db: 0x000d, 0x4dc: 0x000d, 0x4dd: 0x000d, + 0x4de: 0x000d, 0x4df: 0x000d, 0x4e0: 0x000d, 0x4e1: 0x000d, 0x4e2: 0x000d, 0x4e3: 0x000d, + 0x4e4: 0x000d, 0x4e5: 0x000d, 0x4e6: 0x000d, 0x4e7: 0x000d, 0x4e8: 0x000d, 0x4e9: 0x000d, + 0x4ea: 0x000d, 0x4eb: 0x000d, 0x4ec: 0x000d, 0x4ed: 0x000d, 0x4ee: 0x000d, 0x4ef: 0x000d, + 0x4f0: 0x000d, 0x4f1: 0x000d, 0x4f2: 0x000d, 0x4f3: 0x000d, 0x4f4: 0x000d, 0x4f5: 0x000d, + 0x4f6: 0x000d, 0x4f7: 0x000d, 0x4f8: 0x000d, 0x4f9: 0x000d, 0x4fa: 0x000d, 0x4fb: 0x000d, + 0x4fc: 0x000d, 0x4fd: 0x000d, 0x4fe: 0x000d, 0x4ff: 0x000d, + // Block 0x14, offset 0x500 + 0x500: 0x000d, 0x501: 0x000d, 0x502: 0x000d, 0x503: 0x000d, 0x504: 0x000d, 0x505: 0x000d, + 0x506: 0x000d, 0x507: 0x000d, 0x508: 0x000d, 0x509: 0x000d, 0x50a: 0x000d, 0x50b: 0x000d, + 0x50c: 0x000d, 0x50d: 0x000d, 0x50e: 0x000d, 0x50f: 0x000d, 0x510: 0x000d, 0x511: 0x000d, + 0x512: 0x000d, 0x513: 0x000d, 0x514: 0x000d, 0x515: 0x000d, 0x516: 0x000d, 0x517: 0x000d, + 0x518: 0x000d, 0x519: 0x000d, 0x51a: 0x000d, 0x51b: 0x000d, 0x51c: 0x000d, 0x51d: 0x000d, + 0x51e: 0x000d, 0x51f: 0x000d, 0x520: 0x000d, 0x521: 0x000d, 0x522: 0x000d, 0x523: 0x000d, + 0x524: 0x000d, 0x525: 0x000d, 0x526: 0x000c, 0x527: 0x000c, 0x528: 0x000c, 0x529: 0x000c, + 0x52a: 0x000c, 0x52b: 0x000c, 0x52c: 0x000c, 0x52d: 0x000c, 0x52e: 0x000c, 0x52f: 0x000c, + 0x530: 0x000c, 0x531: 0x000d, 0x532: 0x000d, 0x533: 0x000d, 0x534: 0x000d, 0x535: 0x000d, + 0x536: 0x000d, 0x537: 0x000d, 0x538: 0x000d, 0x539: 0x000d, 0x53a: 0x000d, 0x53b: 0x000d, + 0x53c: 0x000d, 0x53d: 0x000d, 0x53e: 0x000d, 0x53f: 0x000d, + // Block 0x15, offset 0x540 + 0x540: 0x0001, 0x541: 0x0001, 0x542: 0x0001, 0x543: 0x0001, 0x544: 0x0001, 0x545: 0x0001, + 0x546: 0x0001, 0x547: 0x0001, 0x548: 0x0001, 0x549: 0x0001, 0x54a: 0x0001, 0x54b: 0x0001, + 0x54c: 0x0001, 0x54d: 0x0001, 0x54e: 0x0001, 0x54f: 0x0001, 0x550: 0x0001, 0x551: 0x0001, + 0x552: 0x0001, 0x553: 0x0001, 0x554: 0x0001, 0x555: 0x0001, 0x556: 0x0001, 0x557: 0x0001, + 0x558: 0x0001, 0x559: 0x0001, 0x55a: 0x0001, 0x55b: 0x0001, 0x55c: 0x0001, 0x55d: 0x0001, + 0x55e: 0x0001, 0x55f: 0x0001, 0x560: 0x0001, 0x561: 0x0001, 0x562: 0x0001, 0x563: 0x0001, + 0x564: 0x0001, 0x565: 0x0001, 0x566: 0x0001, 0x567: 0x0001, 0x568: 0x0001, 0x569: 0x0001, + 0x56a: 0x0001, 0x56b: 0x000c, 0x56c: 0x000c, 0x56d: 0x000c, 0x56e: 0x000c, 0x56f: 0x000c, + 0x570: 0x000c, 0x571: 0x000c, 0x572: 0x000c, 0x573: 0x000c, 0x574: 0x0001, 0x575: 0x0001, + 0x576: 0x000a, 0x577: 0x000a, 0x578: 0x000a, 0x579: 0x000a, 0x57a: 0x0001, 0x57b: 0x0001, + 0x57c: 0x0001, 0x57d: 0x000c, 0x57e: 0x0001, 0x57f: 0x0001, + // Block 0x16, offset 0x580 + 0x580: 0x0001, 0x581: 0x0001, 0x582: 0x0001, 0x583: 0x0001, 0x584: 0x0001, 0x585: 0x0001, + 0x586: 0x0001, 0x587: 0x0001, 0x588: 0x0001, 0x589: 0x0001, 0x58a: 0x0001, 0x58b: 0x0001, + 0x58c: 0x0001, 0x58d: 0x0001, 0x58e: 0x0001, 0x58f: 0x0001, 0x590: 0x0001, 0x591: 0x0001, + 0x592: 0x0001, 0x593: 0x0001, 0x594: 0x0001, 0x595: 0x0001, 0x596: 0x000c, 0x597: 0x000c, + 0x598: 0x000c, 0x599: 0x000c, 0x59a: 0x0001, 0x59b: 0x000c, 0x59c: 0x000c, 0x59d: 0x000c, + 0x59e: 0x000c, 0x59f: 0x000c, 0x5a0: 0x000c, 0x5a1: 0x000c, 0x5a2: 0x000c, 0x5a3: 0x000c, + 0x5a4: 0x0001, 0x5a5: 0x000c, 0x5a6: 0x000c, 0x5a7: 0x000c, 0x5a8: 0x0001, 0x5a9: 0x000c, + 0x5aa: 0x000c, 0x5ab: 0x000c, 0x5ac: 0x000c, 0x5ad: 0x000c, 0x5ae: 0x0001, 0x5af: 0x0001, + 0x5b0: 0x0001, 0x5b1: 0x0001, 0x5b2: 0x0001, 0x5b3: 0x0001, 0x5b4: 0x0001, 0x5b5: 0x0001, + 0x5b6: 0x0001, 0x5b7: 0x0001, 0x5b8: 0x0001, 0x5b9: 0x0001, 0x5ba: 0x0001, 0x5bb: 0x0001, + 0x5bc: 0x0001, 0x5bd: 0x0001, 0x5be: 0x0001, 0x5bf: 0x0001, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x0001, 0x5c1: 0x0001, 0x5c2: 0x0001, 0x5c3: 0x0001, 0x5c4: 0x0001, 0x5c5: 0x0001, + 0x5c6: 0x0001, 0x5c7: 0x0001, 0x5c8: 0x0001, 0x5c9: 0x0001, 0x5ca: 0x0001, 0x5cb: 0x0001, + 0x5cc: 0x0001, 0x5cd: 0x0001, 0x5ce: 0x0001, 0x5cf: 0x0001, 0x5d0: 0x0001, 0x5d1: 0x0001, + 0x5d2: 0x0001, 0x5d3: 0x0001, 0x5d4: 0x0001, 0x5d5: 0x0001, 0x5d6: 0x0001, 0x5d7: 0x0001, + 0x5d8: 0x0001, 0x5d9: 0x000c, 0x5da: 0x000c, 0x5db: 0x000c, 0x5dc: 0x0001, 0x5dd: 0x0001, + 0x5de: 0x0001, 0x5df: 0x0001, 0x5e0: 0x000d, 0x5e1: 0x000d, 0x5e2: 0x000d, 0x5e3: 0x000d, + 0x5e4: 0x000d, 0x5e5: 0x000d, 0x5e6: 0x000d, 0x5e7: 0x000d, 0x5e8: 0x000d, 0x5e9: 0x000d, + 0x5ea: 0x000d, 0x5eb: 0x000d, 0x5ec: 0x000d, 0x5ed: 0x000d, 0x5ee: 0x000d, 0x5ef: 0x000d, + 0x5f0: 0x0001, 0x5f1: 0x0001, 0x5f2: 0x0001, 0x5f3: 0x0001, 0x5f4: 0x0001, 0x5f5: 0x0001, + 0x5f6: 0x0001, 0x5f7: 0x0001, 0x5f8: 0x0001, 0x5f9: 0x0001, 0x5fa: 0x0001, 0x5fb: 0x0001, + 0x5fc: 0x0001, 0x5fd: 0x0001, 0x5fe: 0x0001, 0x5ff: 0x0001, + // Block 0x18, offset 0x600 + 0x600: 0x0001, 0x601: 0x0001, 0x602: 0x0001, 0x603: 0x0001, 0x604: 0x0001, 0x605: 0x0001, + 0x606: 0x0001, 0x607: 0x0001, 0x608: 0x0001, 0x609: 0x0001, 0x60a: 0x0001, 0x60b: 0x0001, + 0x60c: 0x0001, 0x60d: 0x0001, 0x60e: 0x0001, 0x60f: 0x0001, 0x610: 0x0001, 0x611: 0x0001, + 0x612: 0x0001, 0x613: 0x0001, 0x614: 0x0001, 0x615: 0x0001, 0x616: 0x0001, 0x617: 0x0001, + 0x618: 0x0001, 0x619: 0x0001, 0x61a: 0x0001, 0x61b: 0x0001, 0x61c: 0x0001, 0x61d: 0x0001, + 0x61e: 0x0001, 0x61f: 0x0001, 0x620: 0x000d, 0x621: 0x000d, 0x622: 0x000d, 0x623: 0x000d, + 0x624: 0x000d, 0x625: 0x000d, 0x626: 0x000d, 0x627: 0x000d, 0x628: 0x000d, 0x629: 0x000d, + 0x62a: 0x000d, 0x62b: 0x000d, 0x62c: 0x000d, 0x62d: 0x000d, 0x62e: 0x000d, 0x62f: 0x000d, + 0x630: 0x000d, 0x631: 0x000d, 0x632: 0x000d, 0x633: 0x000d, 0x634: 0x000d, 0x635: 0x000d, + 0x636: 0x000d, 0x637: 0x000d, 0x638: 0x000d, 0x639: 0x000d, 0x63a: 0x000d, 0x63b: 0x000d, + 0x63c: 0x000d, 0x63d: 0x000d, 0x63e: 0x000d, 0x63f: 0x000d, + // Block 0x19, offset 0x640 + 0x640: 0x000d, 0x641: 0x000d, 0x642: 0x000d, 0x643: 0x000d, 0x644: 0x000d, 0x645: 0x000d, + 0x646: 0x000d, 0x647: 0x000d, 0x648: 0x000d, 0x649: 0x000d, 0x64a: 0x000d, 0x64b: 0x000d, + 0x64c: 0x000d, 0x64d: 0x000d, 0x64e: 0x000d, 0x64f: 0x000d, 0x650: 0x000d, 0x651: 0x000d, + 0x652: 0x000d, 0x653: 0x000c, 0x654: 0x000c, 0x655: 0x000c, 0x656: 0x000c, 0x657: 0x000c, + 0x658: 0x000c, 0x659: 0x000c, 0x65a: 0x000c, 0x65b: 0x000c, 0x65c: 0x000c, 0x65d: 0x000c, + 0x65e: 0x000c, 0x65f: 0x000c, 0x660: 0x000c, 0x661: 0x000c, 0x662: 0x0005, 0x663: 0x000c, + 0x664: 0x000c, 0x665: 0x000c, 0x666: 0x000c, 0x667: 0x000c, 0x668: 0x000c, 0x669: 0x000c, + 0x66a: 0x000c, 0x66b: 0x000c, 0x66c: 0x000c, 0x66d: 0x000c, 0x66e: 0x000c, 0x66f: 0x000c, + 0x670: 0x000c, 0x671: 0x000c, 0x672: 0x000c, 0x673: 0x000c, 0x674: 0x000c, 0x675: 0x000c, + 0x676: 0x000c, 0x677: 0x000c, 0x678: 0x000c, 0x679: 0x000c, 0x67a: 0x000c, 0x67b: 0x000c, + 0x67c: 0x000c, 0x67d: 0x000c, 0x67e: 0x000c, 0x67f: 0x000c, + // Block 0x1a, offset 0x680 + 0x680: 0x000c, 0x681: 0x000c, 0x682: 0x000c, + 0x6ba: 0x000c, + 0x6bc: 0x000c, + // Block 0x1b, offset 0x6c0 + 0x6c1: 0x000c, 0x6c2: 0x000c, 0x6c3: 0x000c, 0x6c4: 0x000c, 0x6c5: 0x000c, + 0x6c6: 0x000c, 0x6c7: 0x000c, 0x6c8: 0x000c, + 0x6cd: 0x000c, 0x6d1: 0x000c, + 0x6d2: 0x000c, 0x6d3: 0x000c, 0x6d4: 0x000c, 0x6d5: 0x000c, 0x6d6: 0x000c, 0x6d7: 0x000c, + 0x6e2: 0x000c, 0x6e3: 0x000c, + // Block 0x1c, offset 0x700 + 0x701: 0x000c, + 0x73c: 0x000c, + // Block 0x1d, offset 0x740 + 0x741: 0x000c, 0x742: 0x000c, 0x743: 0x000c, 0x744: 0x000c, + 0x74d: 0x000c, + 0x762: 0x000c, 0x763: 0x000c, + 0x772: 0x0004, 0x773: 0x0004, + 0x77b: 0x0004, + 0x77e: 0x000c, + // Block 0x1e, offset 0x780 + 0x781: 0x000c, 0x782: 0x000c, + 0x7bc: 0x000c, + // Block 0x1f, offset 0x7c0 + 0x7c1: 0x000c, 0x7c2: 0x000c, + 0x7c7: 0x000c, 0x7c8: 0x000c, 0x7cb: 0x000c, + 0x7cc: 0x000c, 0x7cd: 0x000c, 0x7d1: 0x000c, + 0x7f0: 0x000c, 0x7f1: 0x000c, 0x7f5: 0x000c, + // Block 0x20, offset 0x800 + 0x801: 0x000c, 0x802: 0x000c, 0x803: 0x000c, 0x804: 0x000c, 0x805: 0x000c, + 0x807: 0x000c, 0x808: 0x000c, + 0x80d: 0x000c, + 0x822: 0x000c, 0x823: 0x000c, + 0x831: 0x0004, + 0x83a: 0x000c, 0x83b: 0x000c, + 0x83c: 0x000c, 0x83d: 0x000c, 0x83e: 0x000c, 0x83f: 0x000c, + // Block 0x21, offset 0x840 + 0x841: 0x000c, + 0x87c: 0x000c, 0x87f: 0x000c, + // Block 0x22, offset 0x880 + 0x881: 0x000c, 0x882: 0x000c, 0x883: 0x000c, 0x884: 0x000c, + 0x88d: 0x000c, + 0x896: 0x000c, + 0x8a2: 0x000c, 0x8a3: 0x000c, + // Block 0x23, offset 0x8c0 + 0x8c2: 0x000c, + // Block 0x24, offset 0x900 + 0x900: 0x000c, + 0x90d: 0x000c, + 0x933: 0x000a, 0x934: 0x000a, 0x935: 0x000a, + 0x936: 0x000a, 0x937: 0x000a, 0x938: 0x000a, 0x939: 0x0004, 0x93a: 0x000a, + // Block 0x25, offset 0x940 + 0x940: 0x000c, 0x944: 0x000c, + 0x97e: 0x000c, 0x97f: 0x000c, + // Block 0x26, offset 0x980 + 0x980: 0x000c, + 0x986: 0x000c, 0x987: 0x000c, 0x988: 0x000c, 0x98a: 0x000c, 0x98b: 0x000c, + 0x98c: 0x000c, 0x98d: 0x000c, + 0x995: 0x000c, 0x996: 0x000c, + 0x9a2: 0x000c, 0x9a3: 0x000c, + 0x9b8: 0x000a, 0x9b9: 0x000a, 0x9ba: 0x000a, 0x9bb: 0x000a, + 0x9bc: 0x000a, 0x9bd: 0x000a, 0x9be: 0x000a, + // Block 0x27, offset 0x9c0 + 0x9cc: 0x000c, 0x9cd: 0x000c, + 0x9e2: 0x000c, 0x9e3: 0x000c, + // Block 0x28, offset 0xa00 + 0xa00: 0x000c, 0xa01: 0x000c, + 0xa3b: 0x000c, + 0xa3c: 0x000c, + // Block 0x29, offset 0xa40 + 0xa41: 0x000c, 0xa42: 0x000c, 0xa43: 0x000c, 0xa44: 0x000c, + 0xa4d: 0x000c, + 0xa62: 0x000c, 0xa63: 0x000c, + // Block 0x2a, offset 0xa80 + 0xa8a: 0x000c, + 0xa92: 0x000c, 0xa93: 0x000c, 0xa94: 0x000c, 0xa96: 0x000c, + // Block 0x2b, offset 0xac0 + 0xaf1: 0x000c, 0xaf4: 0x000c, 0xaf5: 0x000c, + 0xaf6: 0x000c, 0xaf7: 0x000c, 0xaf8: 0x000c, 0xaf9: 0x000c, 0xafa: 0x000c, + 0xaff: 0x0004, + // Block 0x2c, offset 0xb00 + 0xb07: 0x000c, 0xb08: 0x000c, 0xb09: 0x000c, 0xb0a: 0x000c, 0xb0b: 0x000c, + 0xb0c: 0x000c, 0xb0d: 0x000c, 0xb0e: 0x000c, + // Block 0x2d, offset 0xb40 + 0xb71: 0x000c, 0xb74: 0x000c, 0xb75: 0x000c, + 0xb76: 0x000c, 0xb77: 0x000c, 0xb78: 0x000c, 0xb79: 0x000c, 0xb7b: 0x000c, + 0xb7c: 0x000c, + // Block 0x2e, offset 0xb80 + 0xb88: 0x000c, 0xb89: 0x000c, 0xb8a: 0x000c, 0xb8b: 0x000c, + 0xb8c: 0x000c, 0xb8d: 0x000c, + // Block 0x2f, offset 0xbc0 + 0xbd8: 0x000c, 0xbd9: 0x000c, + 0xbf5: 0x000c, + 0xbf7: 0x000c, 0xbf9: 0x000c, 0xbfa: 0x003a, 0xbfb: 0x002a, + 0xbfc: 0x003a, 0xbfd: 0x002a, + // Block 0x30, offset 0xc00 + 0xc31: 0x000c, 0xc32: 0x000c, 0xc33: 0x000c, 0xc34: 0x000c, 0xc35: 0x000c, + 0xc36: 0x000c, 0xc37: 0x000c, 0xc38: 0x000c, 0xc39: 0x000c, 0xc3a: 0x000c, 0xc3b: 0x000c, + 0xc3c: 0x000c, 0xc3d: 0x000c, 0xc3e: 0x000c, + // Block 0x31, offset 0xc40 + 0xc40: 0x000c, 0xc41: 0x000c, 0xc42: 0x000c, 0xc43: 0x000c, 0xc44: 0x000c, + 0xc46: 0x000c, 0xc47: 0x000c, + 0xc4d: 0x000c, 0xc4e: 0x000c, 0xc4f: 0x000c, 0xc50: 0x000c, 0xc51: 0x000c, + 0xc52: 0x000c, 0xc53: 0x000c, 0xc54: 0x000c, 0xc55: 0x000c, 0xc56: 0x000c, 0xc57: 0x000c, + 0xc59: 0x000c, 0xc5a: 0x000c, 0xc5b: 0x000c, 0xc5c: 0x000c, 0xc5d: 0x000c, + 0xc5e: 0x000c, 0xc5f: 0x000c, 0xc60: 0x000c, 0xc61: 0x000c, 0xc62: 0x000c, 0xc63: 0x000c, + 0xc64: 0x000c, 0xc65: 0x000c, 0xc66: 0x000c, 0xc67: 0x000c, 0xc68: 0x000c, 0xc69: 0x000c, + 0xc6a: 0x000c, 0xc6b: 0x000c, 0xc6c: 0x000c, 0xc6d: 0x000c, 0xc6e: 0x000c, 0xc6f: 0x000c, + 0xc70: 0x000c, 0xc71: 0x000c, 0xc72: 0x000c, 0xc73: 0x000c, 0xc74: 0x000c, 0xc75: 0x000c, + 0xc76: 0x000c, 0xc77: 0x000c, 0xc78: 0x000c, 0xc79: 0x000c, 0xc7a: 0x000c, 0xc7b: 0x000c, + 0xc7c: 0x000c, + // Block 0x32, offset 0xc80 + 0xc86: 0x000c, + // Block 0x33, offset 0xcc0 + 0xced: 0x000c, 0xcee: 0x000c, 0xcef: 0x000c, + 0xcf0: 0x000c, 0xcf2: 0x000c, 0xcf3: 0x000c, 0xcf4: 0x000c, 0xcf5: 0x000c, + 0xcf6: 0x000c, 0xcf7: 0x000c, 0xcf9: 0x000c, 0xcfa: 0x000c, + 0xcfd: 0x000c, 0xcfe: 0x000c, + // Block 0x34, offset 0xd00 + 0xd18: 0x000c, 0xd19: 0x000c, + 0xd1e: 0x000c, 0xd1f: 0x000c, 0xd20: 0x000c, + 0xd31: 0x000c, 0xd32: 0x000c, 0xd33: 0x000c, 0xd34: 0x000c, + // Block 0x35, offset 0xd40 + 0xd42: 0x000c, 0xd45: 0x000c, + 0xd46: 0x000c, + 0xd4d: 0x000c, + 0xd5d: 0x000c, + // Block 0x36, offset 0xd80 + 0xd9d: 0x000c, + 0xd9e: 0x000c, 0xd9f: 0x000c, + // Block 0x37, offset 0xdc0 + 0xdd0: 0x000a, 0xdd1: 0x000a, + 0xdd2: 0x000a, 0xdd3: 0x000a, 0xdd4: 0x000a, 0xdd5: 0x000a, 0xdd6: 0x000a, 0xdd7: 0x000a, + 0xdd8: 0x000a, 0xdd9: 0x000a, + // Block 0x38, offset 0xe00 + 0xe00: 0x000a, + // Block 0x39, offset 0xe40 + 0xe40: 0x0009, + 0xe5b: 0x007a, 0xe5c: 0x006a, + // Block 0x3a, offset 0xe80 + 0xe92: 0x000c, 0xe93: 0x000c, 0xe94: 0x000c, + 0xeb2: 0x000c, 0xeb3: 0x000c, 0xeb4: 0x000c, + // Block 0x3b, offset 0xec0 + 0xed2: 0x000c, 0xed3: 0x000c, + 0xef2: 0x000c, 0xef3: 0x000c, + // Block 0x3c, offset 0xf00 + 0xf34: 0x000c, 0xf35: 0x000c, + 0xf37: 0x000c, 0xf38: 0x000c, 0xf39: 0x000c, 0xf3a: 0x000c, 0xf3b: 0x000c, + 0xf3c: 0x000c, 0xf3d: 0x000c, + // Block 0x3d, offset 0xf40 + 0xf46: 0x000c, 0xf49: 0x000c, 0xf4a: 0x000c, 0xf4b: 0x000c, + 0xf4c: 0x000c, 0xf4d: 0x000c, 0xf4e: 0x000c, 0xf4f: 0x000c, 0xf50: 0x000c, 0xf51: 0x000c, + 0xf52: 0x000c, 0xf53: 0x000c, + 0xf5b: 0x0004, 0xf5d: 0x000c, + 0xf70: 0x000a, 0xf71: 0x000a, 0xf72: 0x000a, 0xf73: 0x000a, 0xf74: 0x000a, 0xf75: 0x000a, + 0xf76: 0x000a, 0xf77: 0x000a, 0xf78: 0x000a, 0xf79: 0x000a, + // Block 0x3e, offset 0xf80 + 0xf80: 0x000a, 0xf81: 0x000a, 0xf82: 0x000a, 0xf83: 0x000a, 0xf84: 0x000a, 0xf85: 0x000a, + 0xf86: 0x000a, 0xf87: 0x000a, 0xf88: 0x000a, 0xf89: 0x000a, 0xf8a: 0x000a, 0xf8b: 0x000c, + 0xf8c: 0x000c, 0xf8d: 0x000c, 0xf8e: 0x000b, + // Block 0x3f, offset 0xfc0 + 0xfc5: 0x000c, + 0xfc6: 0x000c, + 0xfe9: 0x000c, + // Block 0x40, offset 0x1000 + 0x1020: 0x000c, 0x1021: 0x000c, 0x1022: 0x000c, + 0x1027: 0x000c, 0x1028: 0x000c, + 0x1032: 0x000c, + 0x1039: 0x000c, 0x103a: 0x000c, 0x103b: 0x000c, + // Block 0x41, offset 0x1040 + 0x1040: 0x000a, 0x1044: 0x000a, 0x1045: 0x000a, + // Block 0x42, offset 0x1080 + 0x109e: 0x000a, 0x109f: 0x000a, 0x10a0: 0x000a, 0x10a1: 0x000a, 0x10a2: 0x000a, 0x10a3: 0x000a, + 0x10a4: 0x000a, 0x10a5: 0x000a, 0x10a6: 0x000a, 0x10a7: 0x000a, 0x10a8: 0x000a, 0x10a9: 0x000a, + 0x10aa: 0x000a, 0x10ab: 0x000a, 0x10ac: 0x000a, 0x10ad: 0x000a, 0x10ae: 0x000a, 0x10af: 0x000a, + 0x10b0: 0x000a, 0x10b1: 0x000a, 0x10b2: 0x000a, 0x10b3: 0x000a, 0x10b4: 0x000a, 0x10b5: 0x000a, + 0x10b6: 0x000a, 0x10b7: 0x000a, 0x10b8: 0x000a, 0x10b9: 0x000a, 0x10ba: 0x000a, 0x10bb: 0x000a, + 0x10bc: 0x000a, 0x10bd: 0x000a, 0x10be: 0x000a, 0x10bf: 0x000a, + // Block 0x43, offset 0x10c0 + 0x10d7: 0x000c, + 0x10d8: 0x000c, 0x10db: 0x000c, + // Block 0x44, offset 0x1100 + 0x1116: 0x000c, + 0x1118: 0x000c, 0x1119: 0x000c, 0x111a: 0x000c, 0x111b: 0x000c, 0x111c: 0x000c, 0x111d: 0x000c, + 0x111e: 0x000c, 0x1120: 0x000c, 0x1122: 0x000c, + 0x1125: 0x000c, 0x1126: 0x000c, 0x1127: 0x000c, 0x1128: 0x000c, 0x1129: 0x000c, + 0x112a: 0x000c, 0x112b: 0x000c, 0x112c: 0x000c, + 0x1133: 0x000c, 0x1134: 0x000c, 0x1135: 0x000c, + 0x1136: 0x000c, 0x1137: 0x000c, 0x1138: 0x000c, 0x1139: 0x000c, 0x113a: 0x000c, 0x113b: 0x000c, + 0x113c: 0x000c, 0x113f: 0x000c, + // Block 0x45, offset 0x1140 + 0x1170: 0x000c, 0x1171: 0x000c, 0x1172: 0x000c, 0x1173: 0x000c, 0x1174: 0x000c, 0x1175: 0x000c, + 0x1176: 0x000c, 0x1177: 0x000c, 0x1178: 0x000c, 0x1179: 0x000c, 0x117a: 0x000c, 0x117b: 0x000c, + 0x117c: 0x000c, 0x117d: 0x000c, 0x117e: 0x000c, + // Block 0x46, offset 0x1180 + 0x1180: 0x000c, 0x1181: 0x000c, 0x1182: 0x000c, 0x1183: 0x000c, + 0x11b4: 0x000c, + 0x11b6: 0x000c, 0x11b7: 0x000c, 0x11b8: 0x000c, 0x11b9: 0x000c, 0x11ba: 0x000c, + 0x11bc: 0x000c, + // Block 0x47, offset 0x11c0 + 0x11c2: 0x000c, + 0x11eb: 0x000c, 0x11ec: 0x000c, 0x11ed: 0x000c, 0x11ee: 0x000c, 0x11ef: 0x000c, + 0x11f0: 0x000c, 0x11f1: 0x000c, 0x11f2: 0x000c, 0x11f3: 0x000c, + // Block 0x48, offset 0x1200 + 0x1200: 0x000c, 0x1201: 0x000c, + 0x1222: 0x000c, 0x1223: 0x000c, + 0x1224: 0x000c, 0x1225: 0x000c, 0x1228: 0x000c, 0x1229: 0x000c, + 0x122b: 0x000c, 0x122c: 0x000c, 0x122d: 0x000c, + // Block 0x49, offset 0x1240 + 0x1266: 0x000c, 0x1268: 0x000c, 0x1269: 0x000c, + 0x126d: 0x000c, 0x126f: 0x000c, + 0x1270: 0x000c, 0x1271: 0x000c, + // Block 0x4a, offset 0x1280 + 0x12ac: 0x000c, 0x12ad: 0x000c, 0x12ae: 0x000c, 0x12af: 0x000c, + 0x12b0: 0x000c, 0x12b1: 0x000c, 0x12b2: 0x000c, 0x12b3: 0x000c, + 0x12b6: 0x000c, 0x12b7: 0x000c, + // Block 0x4b, offset 0x12c0 + 0x12d0: 0x000c, 0x12d1: 0x000c, + 0x12d2: 0x000c, 0x12d4: 0x000c, 0x12d5: 0x000c, 0x12d6: 0x000c, 0x12d7: 0x000c, + 0x12d8: 0x000c, 0x12d9: 0x000c, 0x12da: 0x000c, 0x12db: 0x000c, 0x12dc: 0x000c, 0x12dd: 0x000c, + 0x12de: 0x000c, 0x12df: 0x000c, 0x12e0: 0x000c, 0x12e2: 0x000c, 0x12e3: 0x000c, + 0x12e4: 0x000c, 0x12e5: 0x000c, 0x12e6: 0x000c, 0x12e7: 0x000c, 0x12e8: 0x000c, + 0x12ed: 0x000c, + 0x12f4: 0x000c, + 0x12f8: 0x000c, 0x12f9: 0x000c, + // Block 0x4c, offset 0x1300 + 0x1300: 0x000c, 0x1301: 0x000c, 0x1302: 0x000c, 0x1303: 0x000c, 0x1304: 0x000c, 0x1305: 0x000c, + 0x1306: 0x000c, 0x1307: 0x000c, 0x1308: 0x000c, 0x1309: 0x000c, 0x130a: 0x000c, 0x130b: 0x000c, + 0x130c: 0x000c, 0x130d: 0x000c, 0x130e: 0x000c, 0x130f: 0x000c, 0x1310: 0x000c, 0x1311: 0x000c, + 0x1312: 0x000c, 0x1313: 0x000c, 0x1314: 0x000c, 0x1315: 0x000c, 0x1316: 0x000c, 0x1317: 0x000c, + 0x1318: 0x000c, 0x1319: 0x000c, 0x131a: 0x000c, 0x131b: 0x000c, 0x131c: 0x000c, 0x131d: 0x000c, + 0x131e: 0x000c, 0x131f: 0x000c, 0x1320: 0x000c, 0x1321: 0x000c, 0x1322: 0x000c, 0x1323: 0x000c, + 0x1324: 0x000c, 0x1325: 0x000c, 0x1326: 0x000c, 0x1327: 0x000c, 0x1328: 0x000c, 0x1329: 0x000c, + 0x132a: 0x000c, 0x132b: 0x000c, 0x132c: 0x000c, 0x132d: 0x000c, 0x132e: 0x000c, 0x132f: 0x000c, + 0x1330: 0x000c, 0x1331: 0x000c, 0x1332: 0x000c, 0x1333: 0x000c, 0x1334: 0x000c, 0x1335: 0x000c, + 0x1336: 0x000c, 0x1337: 0x000c, 0x1338: 0x000c, 0x1339: 0x000c, 0x133b: 0x000c, + 0x133c: 0x000c, 0x133d: 0x000c, 0x133e: 0x000c, 0x133f: 0x000c, + // Block 0x4d, offset 0x1340 + 0x137d: 0x000a, 0x137f: 0x000a, + // Block 0x4e, offset 0x1380 + 0x1380: 0x000a, 0x1381: 0x000a, + 0x138d: 0x000a, 0x138e: 0x000a, 0x138f: 0x000a, + 0x139d: 0x000a, + 0x139e: 0x000a, 0x139f: 0x000a, + 0x13ad: 0x000a, 0x13ae: 0x000a, 0x13af: 0x000a, + 0x13bd: 0x000a, 0x13be: 0x000a, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x0009, 0x13c1: 0x0009, 0x13c2: 0x0009, 0x13c3: 0x0009, 0x13c4: 0x0009, 0x13c5: 0x0009, + 0x13c6: 0x0009, 0x13c7: 0x0009, 0x13c8: 0x0009, 0x13c9: 0x0009, 0x13ca: 0x0009, 0x13cb: 0x000b, + 0x13cc: 0x000b, 0x13cd: 0x000b, 0x13cf: 0x0001, 0x13d0: 0x000a, 0x13d1: 0x000a, + 0x13d2: 0x000a, 0x13d3: 0x000a, 0x13d4: 0x000a, 0x13d5: 0x000a, 0x13d6: 0x000a, 0x13d7: 0x000a, + 0x13d8: 0x000a, 0x13d9: 0x000a, 0x13da: 0x000a, 0x13db: 0x000a, 0x13dc: 0x000a, 0x13dd: 0x000a, + 0x13de: 0x000a, 0x13df: 0x000a, 0x13e0: 0x000a, 0x13e1: 0x000a, 0x13e2: 0x000a, 0x13e3: 0x000a, + 0x13e4: 0x000a, 0x13e5: 0x000a, 0x13e6: 0x000a, 0x13e7: 0x000a, 0x13e8: 0x0009, 0x13e9: 0x0007, + 0x13ea: 0x000e, 0x13eb: 0x000e, 0x13ec: 0x000e, 0x13ed: 0x000e, 0x13ee: 0x000e, 0x13ef: 0x0006, + 0x13f0: 0x0004, 0x13f1: 0x0004, 0x13f2: 0x0004, 0x13f3: 0x0004, 0x13f4: 0x0004, 0x13f5: 0x000a, + 0x13f6: 0x000a, 0x13f7: 0x000a, 0x13f8: 0x000a, 0x13f9: 0x000a, 0x13fa: 0x000a, 0x13fb: 0x000a, + 0x13fc: 0x000a, 0x13fd: 0x000a, 0x13fe: 0x000a, 0x13ff: 0x000a, + // Block 0x50, offset 0x1400 + 0x1400: 0x000a, 0x1401: 0x000a, 0x1402: 0x000a, 0x1403: 0x000a, 0x1404: 0x0006, 0x1405: 0x009a, + 0x1406: 0x008a, 0x1407: 0x000a, 0x1408: 0x000a, 0x1409: 0x000a, 0x140a: 0x000a, 0x140b: 0x000a, + 0x140c: 0x000a, 0x140d: 0x000a, 0x140e: 0x000a, 0x140f: 0x000a, 0x1410: 0x000a, 0x1411: 0x000a, + 0x1412: 0x000a, 0x1413: 0x000a, 0x1414: 0x000a, 0x1415: 0x000a, 0x1416: 0x000a, 0x1417: 0x000a, + 0x1418: 0x000a, 0x1419: 0x000a, 0x141a: 0x000a, 0x141b: 0x000a, 0x141c: 0x000a, 0x141d: 0x000a, + 0x141e: 0x000a, 0x141f: 0x0009, 0x1420: 0x000b, 0x1421: 0x000b, 0x1422: 0x000b, 0x1423: 0x000b, + 0x1424: 0x000b, 0x1425: 0x000b, 0x1426: 0x000e, 0x1427: 0x000e, 0x1428: 0x000e, 0x1429: 0x000e, + 0x142a: 0x000b, 0x142b: 0x000b, 0x142c: 0x000b, 0x142d: 0x000b, 0x142e: 0x000b, 0x142f: 0x000b, + 0x1430: 0x0002, 0x1434: 0x0002, 0x1435: 0x0002, + 0x1436: 0x0002, 0x1437: 0x0002, 0x1438: 0x0002, 0x1439: 0x0002, 0x143a: 0x0003, 0x143b: 0x0003, + 0x143c: 0x000a, 0x143d: 0x009a, 0x143e: 0x008a, + // Block 0x51, offset 0x1440 + 0x1440: 0x0002, 0x1441: 0x0002, 0x1442: 0x0002, 0x1443: 0x0002, 0x1444: 0x0002, 0x1445: 0x0002, + 0x1446: 0x0002, 0x1447: 0x0002, 0x1448: 0x0002, 0x1449: 0x0002, 0x144a: 0x0003, 0x144b: 0x0003, + 0x144c: 0x000a, 0x144d: 0x009a, 0x144e: 0x008a, + 0x1460: 0x0004, 0x1461: 0x0004, 0x1462: 0x0004, 0x1463: 0x0004, + 0x1464: 0x0004, 0x1465: 0x0004, 0x1466: 0x0004, 0x1467: 0x0004, 0x1468: 0x0004, 0x1469: 0x0004, + 0x146a: 0x0004, 0x146b: 0x0004, 0x146c: 0x0004, 0x146d: 0x0004, 0x146e: 0x0004, 0x146f: 0x0004, + 0x1470: 0x0004, 0x1471: 0x0004, 0x1472: 0x0004, 0x1473: 0x0004, 0x1474: 0x0004, 0x1475: 0x0004, + 0x1476: 0x0004, 0x1477: 0x0004, 0x1478: 0x0004, 0x1479: 0x0004, 0x147a: 0x0004, 0x147b: 0x0004, + 0x147c: 0x0004, 0x147d: 0x0004, 0x147e: 0x0004, 0x147f: 0x0004, + // Block 0x52, offset 0x1480 + 0x1480: 0x0004, 0x1481: 0x0004, 0x1482: 0x0004, 0x1483: 0x0004, 0x1484: 0x0004, 0x1485: 0x0004, + 0x1486: 0x0004, 0x1487: 0x0004, 0x1488: 0x0004, 0x1489: 0x0004, 0x148a: 0x0004, 0x148b: 0x0004, + 0x148c: 0x0004, 0x148d: 0x0004, 0x148e: 0x0004, 0x148f: 0x0004, 0x1490: 0x000c, 0x1491: 0x000c, + 0x1492: 0x000c, 0x1493: 0x000c, 0x1494: 0x000c, 0x1495: 0x000c, 0x1496: 0x000c, 0x1497: 0x000c, + 0x1498: 0x000c, 0x1499: 0x000c, 0x149a: 0x000c, 0x149b: 0x000c, 0x149c: 0x000c, 0x149d: 0x000c, + 0x149e: 0x000c, 0x149f: 0x000c, 0x14a0: 0x000c, 0x14a1: 0x000c, 0x14a2: 0x000c, 0x14a3: 0x000c, + 0x14a4: 0x000c, 0x14a5: 0x000c, 0x14a6: 0x000c, 0x14a7: 0x000c, 0x14a8: 0x000c, 0x14a9: 0x000c, + 0x14aa: 0x000c, 0x14ab: 0x000c, 0x14ac: 0x000c, 0x14ad: 0x000c, 0x14ae: 0x000c, 0x14af: 0x000c, + 0x14b0: 0x000c, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x000a, 0x14c1: 0x000a, 0x14c3: 0x000a, 0x14c4: 0x000a, 0x14c5: 0x000a, + 0x14c6: 0x000a, 0x14c8: 0x000a, 0x14c9: 0x000a, + 0x14d4: 0x000a, 0x14d6: 0x000a, 0x14d7: 0x000a, + 0x14d8: 0x000a, + 0x14de: 0x000a, 0x14df: 0x000a, 0x14e0: 0x000a, 0x14e1: 0x000a, 0x14e2: 0x000a, 0x14e3: 0x000a, + 0x14e5: 0x000a, 0x14e7: 0x000a, 0x14e9: 0x000a, + 0x14ee: 0x0004, + 0x14fa: 0x000a, 0x14fb: 0x000a, + // Block 0x54, offset 0x1500 + 0x1500: 0x000a, 0x1501: 0x000a, 0x1502: 0x000a, 0x1503: 0x000a, 0x1504: 0x000a, + 0x150a: 0x000a, 0x150b: 0x000a, + 0x150c: 0x000a, 0x150d: 0x000a, 0x1510: 0x000a, 0x1511: 0x000a, + 0x1512: 0x000a, 0x1513: 0x000a, 0x1514: 0x000a, 0x1515: 0x000a, 0x1516: 0x000a, 0x1517: 0x000a, + 0x1518: 0x000a, 0x1519: 0x000a, 0x151a: 0x000a, 0x151b: 0x000a, 0x151c: 0x000a, 0x151d: 0x000a, + 0x151e: 0x000a, 0x151f: 0x000a, + // Block 0x55, offset 0x1540 + 0x1549: 0x000a, 0x154a: 0x000a, 0x154b: 0x000a, + 0x1550: 0x000a, 0x1551: 0x000a, + 0x1552: 0x000a, 0x1553: 0x000a, 0x1554: 0x000a, 0x1555: 0x000a, 0x1556: 0x000a, 0x1557: 0x000a, + 0x1558: 0x000a, 0x1559: 0x000a, 0x155a: 0x000a, 0x155b: 0x000a, 0x155c: 0x000a, 0x155d: 0x000a, + 0x155e: 0x000a, 0x155f: 0x000a, 0x1560: 0x000a, 0x1561: 0x000a, 0x1562: 0x000a, 0x1563: 0x000a, + 0x1564: 0x000a, 0x1565: 0x000a, 0x1566: 0x000a, 0x1567: 0x000a, 0x1568: 0x000a, 0x1569: 0x000a, + 0x156a: 0x000a, 0x156b: 0x000a, 0x156c: 0x000a, 0x156d: 0x000a, 0x156e: 0x000a, 0x156f: 0x000a, + 0x1570: 0x000a, 0x1571: 0x000a, 0x1572: 0x000a, 0x1573: 0x000a, 0x1574: 0x000a, 0x1575: 0x000a, + 0x1576: 0x000a, 0x1577: 0x000a, 0x1578: 0x000a, 0x1579: 0x000a, 0x157a: 0x000a, 0x157b: 0x000a, + 0x157c: 0x000a, 0x157d: 0x000a, 0x157e: 0x000a, 0x157f: 0x000a, + // Block 0x56, offset 0x1580 + 0x1580: 0x000a, 0x1581: 0x000a, 0x1582: 0x000a, 0x1583: 0x000a, 0x1584: 0x000a, 0x1585: 0x000a, + 0x1586: 0x000a, 0x1587: 0x000a, 0x1588: 0x000a, 0x1589: 0x000a, 0x158a: 0x000a, 0x158b: 0x000a, + 0x158c: 0x000a, 0x158d: 0x000a, 0x158e: 0x000a, 0x158f: 0x000a, 0x1590: 0x000a, 0x1591: 0x000a, + 0x1592: 0x000a, 0x1593: 0x000a, 0x1594: 0x000a, 0x1595: 0x000a, 0x1596: 0x000a, 0x1597: 0x000a, + 0x1598: 0x000a, 0x1599: 0x000a, 0x159a: 0x000a, 0x159b: 0x000a, 0x159c: 0x000a, 0x159d: 0x000a, + 0x159e: 0x000a, 0x159f: 0x000a, 0x15a0: 0x000a, 0x15a1: 0x000a, 0x15a2: 0x000a, 0x15a3: 0x000a, + 0x15a4: 0x000a, 0x15a5: 0x000a, 0x15a6: 0x000a, 0x15a7: 0x000a, 0x15a8: 0x000a, 0x15a9: 0x000a, + 0x15aa: 0x000a, 0x15ab: 0x000a, 0x15ac: 0x000a, 0x15ad: 0x000a, 0x15ae: 0x000a, 0x15af: 0x000a, + 0x15b0: 0x000a, 0x15b1: 0x000a, 0x15b2: 0x000a, 0x15b3: 0x000a, 0x15b4: 0x000a, 0x15b5: 0x000a, + 0x15b6: 0x000a, 0x15b7: 0x000a, 0x15b8: 0x000a, 0x15b9: 0x000a, 0x15ba: 0x000a, 0x15bb: 0x000a, + 0x15bc: 0x000a, 0x15bd: 0x000a, 0x15be: 0x000a, 0x15bf: 0x000a, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x000a, 0x15c1: 0x000a, 0x15c2: 0x000a, 0x15c3: 0x000a, 0x15c4: 0x000a, 0x15c5: 0x000a, + 0x15c6: 0x000a, 0x15c7: 0x000a, 0x15c8: 0x000a, 0x15c9: 0x000a, 0x15ca: 0x000a, 0x15cb: 0x000a, + 0x15cc: 0x000a, 0x15cd: 0x000a, 0x15ce: 0x000a, 0x15cf: 0x000a, 0x15d0: 0x000a, 0x15d1: 0x000a, + 0x15d2: 0x0003, 0x15d3: 0x0004, 0x15d4: 0x000a, 0x15d5: 0x000a, 0x15d6: 0x000a, 0x15d7: 0x000a, + 0x15d8: 0x000a, 0x15d9: 0x000a, 0x15da: 0x000a, 0x15db: 0x000a, 0x15dc: 0x000a, 0x15dd: 0x000a, + 0x15de: 0x000a, 0x15df: 0x000a, 0x15e0: 0x000a, 0x15e1: 0x000a, 0x15e2: 0x000a, 0x15e3: 0x000a, + 0x15e4: 0x000a, 0x15e5: 0x000a, 0x15e6: 0x000a, 0x15e7: 0x000a, 0x15e8: 0x000a, 0x15e9: 0x000a, + 0x15ea: 0x000a, 0x15eb: 0x000a, 0x15ec: 0x000a, 0x15ed: 0x000a, 0x15ee: 0x000a, 0x15ef: 0x000a, + 0x15f0: 0x000a, 0x15f1: 0x000a, 0x15f2: 0x000a, 0x15f3: 0x000a, 0x15f4: 0x000a, 0x15f5: 0x000a, + 0x15f6: 0x000a, 0x15f7: 0x000a, 0x15f8: 0x000a, 0x15f9: 0x000a, 0x15fa: 0x000a, 0x15fb: 0x000a, + 0x15fc: 0x000a, 0x15fd: 0x000a, 0x15fe: 0x000a, 0x15ff: 0x000a, + // Block 0x58, offset 0x1600 + 0x1600: 0x000a, 0x1601: 0x000a, 0x1602: 0x000a, 0x1603: 0x000a, 0x1604: 0x000a, 0x1605: 0x000a, + 0x1606: 0x000a, 0x1607: 0x000a, 0x1608: 0x003a, 0x1609: 0x002a, 0x160a: 0x003a, 0x160b: 0x002a, + 0x160c: 0x000a, 0x160d: 0x000a, 0x160e: 0x000a, 0x160f: 0x000a, 0x1610: 0x000a, 0x1611: 0x000a, + 0x1612: 0x000a, 0x1613: 0x000a, 0x1614: 0x000a, 0x1615: 0x000a, 0x1616: 0x000a, 0x1617: 0x000a, + 0x1618: 0x000a, 0x1619: 0x000a, 0x161a: 0x000a, 0x161b: 0x000a, 0x161c: 0x000a, 0x161d: 0x000a, + 0x161e: 0x000a, 0x161f: 0x000a, 0x1620: 0x000a, 0x1621: 0x000a, 0x1622: 0x000a, 0x1623: 0x000a, + 0x1624: 0x000a, 0x1625: 0x000a, 0x1626: 0x000a, 0x1627: 0x000a, 0x1628: 0x000a, 0x1629: 0x009a, + 0x162a: 0x008a, 0x162b: 0x000a, 0x162c: 0x000a, 0x162d: 0x000a, 0x162e: 0x000a, 0x162f: 0x000a, + 0x1630: 0x000a, 0x1631: 0x000a, 0x1632: 0x000a, 0x1633: 0x000a, 0x1634: 0x000a, 0x1635: 0x000a, + // Block 0x59, offset 0x1640 + 0x167b: 0x000a, + 0x167c: 0x000a, 0x167d: 0x000a, 0x167e: 0x000a, 0x167f: 0x000a, + // Block 0x5a, offset 0x1680 + 0x1680: 0x000a, 0x1681: 0x000a, 0x1682: 0x000a, 0x1683: 0x000a, 0x1684: 0x000a, 0x1685: 0x000a, + 0x1686: 0x000a, 0x1687: 0x000a, 0x1688: 0x000a, 0x1689: 0x000a, 0x168a: 0x000a, 0x168b: 0x000a, + 0x168c: 0x000a, 0x168d: 0x000a, 0x168e: 0x000a, 0x168f: 0x000a, 0x1690: 0x000a, 0x1691: 0x000a, + 0x1692: 0x000a, 0x1693: 0x000a, 0x1694: 0x000a, 0x1696: 0x000a, 0x1697: 0x000a, + 0x1698: 0x000a, 0x1699: 0x000a, 0x169a: 0x000a, 0x169b: 0x000a, 0x169c: 0x000a, 0x169d: 0x000a, + 0x169e: 0x000a, 0x169f: 0x000a, 0x16a0: 0x000a, 0x16a1: 0x000a, 0x16a2: 0x000a, 0x16a3: 0x000a, + 0x16a4: 0x000a, 0x16a5: 0x000a, 0x16a6: 0x000a, 0x16a7: 0x000a, 0x16a8: 0x000a, 0x16a9: 0x000a, + 0x16aa: 0x000a, 0x16ab: 0x000a, 0x16ac: 0x000a, 0x16ad: 0x000a, 0x16ae: 0x000a, 0x16af: 0x000a, + 0x16b0: 0x000a, 0x16b1: 0x000a, 0x16b2: 0x000a, 0x16b3: 0x000a, 0x16b4: 0x000a, 0x16b5: 0x000a, + 0x16b6: 0x000a, 0x16b7: 0x000a, 0x16b8: 0x000a, 0x16b9: 0x000a, 0x16ba: 0x000a, 0x16bb: 0x000a, + 0x16bc: 0x000a, 0x16bd: 0x000a, 0x16be: 0x000a, 0x16bf: 0x000a, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x000a, 0x16c1: 0x000a, 0x16c2: 0x000a, 0x16c3: 0x000a, 0x16c4: 0x000a, 0x16c5: 0x000a, + 0x16c6: 0x000a, 0x16c7: 0x000a, 0x16c8: 0x000a, 0x16c9: 0x000a, 0x16ca: 0x000a, 0x16cb: 0x000a, + 0x16cc: 0x000a, 0x16cd: 0x000a, 0x16ce: 0x000a, 0x16cf: 0x000a, 0x16d0: 0x000a, 0x16d1: 0x000a, + 0x16d2: 0x000a, 0x16d3: 0x000a, 0x16d4: 0x000a, 0x16d5: 0x000a, 0x16d6: 0x000a, 0x16d7: 0x000a, + 0x16d8: 0x000a, 0x16d9: 0x000a, 0x16da: 0x000a, 0x16db: 0x000a, 0x16dc: 0x000a, 0x16dd: 0x000a, + 0x16de: 0x000a, 0x16df: 0x000a, 0x16e0: 0x000a, 0x16e1: 0x000a, 0x16e2: 0x000a, 0x16e3: 0x000a, + 0x16e4: 0x000a, 0x16e5: 0x000a, 0x16e6: 0x000a, + // Block 0x5c, offset 0x1700 + 0x1700: 0x000a, 0x1701: 0x000a, 0x1702: 0x000a, 0x1703: 0x000a, 0x1704: 0x000a, 0x1705: 0x000a, + 0x1706: 0x000a, 0x1707: 0x000a, 0x1708: 0x000a, 0x1709: 0x000a, 0x170a: 0x000a, + 0x1720: 0x000a, 0x1721: 0x000a, 0x1722: 0x000a, 0x1723: 0x000a, + 0x1724: 0x000a, 0x1725: 0x000a, 0x1726: 0x000a, 0x1727: 0x000a, 0x1728: 0x000a, 0x1729: 0x000a, + 0x172a: 0x000a, 0x172b: 0x000a, 0x172c: 0x000a, 0x172d: 0x000a, 0x172e: 0x000a, 0x172f: 0x000a, + 0x1730: 0x000a, 0x1731: 0x000a, 0x1732: 0x000a, 0x1733: 0x000a, 0x1734: 0x000a, 0x1735: 0x000a, + 0x1736: 0x000a, 0x1737: 0x000a, 0x1738: 0x000a, 0x1739: 0x000a, 0x173a: 0x000a, 0x173b: 0x000a, + 0x173c: 0x000a, 0x173d: 0x000a, 0x173e: 0x000a, 0x173f: 0x000a, + // Block 0x5d, offset 0x1740 + 0x1740: 0x000a, 0x1741: 0x000a, 0x1742: 0x000a, 0x1743: 0x000a, 0x1744: 0x000a, 0x1745: 0x000a, + 0x1746: 0x000a, 0x1747: 0x000a, 0x1748: 0x0002, 0x1749: 0x0002, 0x174a: 0x0002, 0x174b: 0x0002, + 0x174c: 0x0002, 0x174d: 0x0002, 0x174e: 0x0002, 0x174f: 0x0002, 0x1750: 0x0002, 0x1751: 0x0002, + 0x1752: 0x0002, 0x1753: 0x0002, 0x1754: 0x0002, 0x1755: 0x0002, 0x1756: 0x0002, 0x1757: 0x0002, + 0x1758: 0x0002, 0x1759: 0x0002, 0x175a: 0x0002, 0x175b: 0x0002, + // Block 0x5e, offset 0x1780 + 0x17aa: 0x000a, 0x17ab: 0x000a, 0x17ac: 0x000a, 0x17ad: 0x000a, 0x17ae: 0x000a, 0x17af: 0x000a, + 0x17b0: 0x000a, 0x17b1: 0x000a, 0x17b2: 0x000a, 0x17b3: 0x000a, 0x17b4: 0x000a, 0x17b5: 0x000a, + 0x17b6: 0x000a, 0x17b7: 0x000a, 0x17b8: 0x000a, 0x17b9: 0x000a, 0x17ba: 0x000a, 0x17bb: 0x000a, + 0x17bc: 0x000a, 0x17bd: 0x000a, 0x17be: 0x000a, 0x17bf: 0x000a, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x000a, 0x17c1: 0x000a, 0x17c2: 0x000a, 0x17c3: 0x000a, 0x17c4: 0x000a, 0x17c5: 0x000a, + 0x17c6: 0x000a, 0x17c7: 0x000a, 0x17c8: 0x000a, 0x17c9: 0x000a, 0x17ca: 0x000a, 0x17cb: 0x000a, + 0x17cc: 0x000a, 0x17cd: 0x000a, 0x17ce: 0x000a, 0x17cf: 0x000a, 0x17d0: 0x000a, 0x17d1: 0x000a, + 0x17d2: 0x000a, 0x17d3: 0x000a, 0x17d4: 0x000a, 0x17d5: 0x000a, 0x17d6: 0x000a, 0x17d7: 0x000a, + 0x17d8: 0x000a, 0x17d9: 0x000a, 0x17da: 0x000a, 0x17db: 0x000a, 0x17dc: 0x000a, 0x17dd: 0x000a, + 0x17de: 0x000a, 0x17df: 0x000a, 0x17e0: 0x000a, 0x17e1: 0x000a, 0x17e2: 0x000a, 0x17e3: 0x000a, + 0x17e4: 0x000a, 0x17e5: 0x000a, 0x17e6: 0x000a, 0x17e7: 0x000a, 0x17e8: 0x000a, 0x17e9: 0x000a, + 0x17ea: 0x000a, 0x17eb: 0x000a, 0x17ed: 0x000a, 0x17ee: 0x000a, 0x17ef: 0x000a, + 0x17f0: 0x000a, 0x17f1: 0x000a, 0x17f2: 0x000a, 0x17f3: 0x000a, 0x17f4: 0x000a, 0x17f5: 0x000a, + 0x17f6: 0x000a, 0x17f7: 0x000a, 0x17f8: 0x000a, 0x17f9: 0x000a, 0x17fa: 0x000a, 0x17fb: 0x000a, + 0x17fc: 0x000a, 0x17fd: 0x000a, 0x17fe: 0x000a, 0x17ff: 0x000a, + // Block 0x60, offset 0x1800 + 0x1800: 0x000a, 0x1801: 0x000a, 0x1802: 0x000a, 0x1803: 0x000a, 0x1804: 0x000a, 0x1805: 0x000a, + 0x1806: 0x000a, 0x1807: 0x000a, 0x1808: 0x000a, 0x1809: 0x000a, 0x180a: 0x000a, 0x180b: 0x000a, + 0x180c: 0x000a, 0x180d: 0x000a, 0x180e: 0x000a, 0x180f: 0x000a, 0x1810: 0x000a, 0x1811: 0x000a, + 0x1812: 0x000a, 0x1813: 0x000a, 0x1814: 0x000a, 0x1815: 0x000a, 0x1816: 0x000a, 0x1817: 0x000a, + 0x1818: 0x000a, 0x1819: 0x000a, 0x181a: 0x000a, 0x181b: 0x000a, 0x181c: 0x000a, 0x181d: 0x000a, + 0x181e: 0x000a, 0x181f: 0x000a, 0x1820: 0x000a, 0x1821: 0x000a, 0x1822: 0x000a, 0x1823: 0x000a, + 0x1824: 0x000a, 0x1825: 0x000a, 0x1826: 0x000a, 0x1827: 0x000a, 0x1828: 0x003a, 0x1829: 0x002a, + 0x182a: 0x003a, 0x182b: 0x002a, 0x182c: 0x003a, 0x182d: 0x002a, 0x182e: 0x003a, 0x182f: 0x002a, + 0x1830: 0x003a, 0x1831: 0x002a, 0x1832: 0x003a, 0x1833: 0x002a, 0x1834: 0x003a, 0x1835: 0x002a, + 0x1836: 0x000a, 0x1837: 0x000a, 0x1838: 0x000a, 0x1839: 0x000a, 0x183a: 0x000a, 0x183b: 0x000a, + 0x183c: 0x000a, 0x183d: 0x000a, 0x183e: 0x000a, 0x183f: 0x000a, + // Block 0x61, offset 0x1840 + 0x1840: 0x000a, 0x1841: 0x000a, 0x1842: 0x000a, 0x1843: 0x000a, 0x1844: 0x000a, 0x1845: 0x009a, + 0x1846: 0x008a, 0x1847: 0x000a, 0x1848: 0x000a, 0x1849: 0x000a, 0x184a: 0x000a, 0x184b: 0x000a, + 0x184c: 0x000a, 0x184d: 0x000a, 0x184e: 0x000a, 0x184f: 0x000a, 0x1850: 0x000a, 0x1851: 0x000a, + 0x1852: 0x000a, 0x1853: 0x000a, 0x1854: 0x000a, 0x1855: 0x000a, 0x1856: 0x000a, 0x1857: 0x000a, + 0x1858: 0x000a, 0x1859: 0x000a, 0x185a: 0x000a, 0x185b: 0x000a, 0x185c: 0x000a, 0x185d: 0x000a, + 0x185e: 0x000a, 0x185f: 0x000a, 0x1860: 0x000a, 0x1861: 0x000a, 0x1862: 0x000a, 0x1863: 0x000a, + 0x1864: 0x000a, 0x1865: 0x000a, 0x1866: 0x003a, 0x1867: 0x002a, 0x1868: 0x003a, 0x1869: 0x002a, + 0x186a: 0x003a, 0x186b: 0x002a, 0x186c: 0x003a, 0x186d: 0x002a, 0x186e: 0x003a, 0x186f: 0x002a, + 0x1870: 0x000a, 0x1871: 0x000a, 0x1872: 0x000a, 0x1873: 0x000a, 0x1874: 0x000a, 0x1875: 0x000a, + 0x1876: 0x000a, 0x1877: 0x000a, 0x1878: 0x000a, 0x1879: 0x000a, 0x187a: 0x000a, 0x187b: 0x000a, + 0x187c: 0x000a, 0x187d: 0x000a, 0x187e: 0x000a, 0x187f: 0x000a, + // Block 0x62, offset 0x1880 + 0x1880: 0x000a, 0x1881: 0x000a, 0x1882: 0x000a, 0x1883: 0x007a, 0x1884: 0x006a, 0x1885: 0x009a, + 0x1886: 0x008a, 0x1887: 0x00ba, 0x1888: 0x00aa, 0x1889: 0x009a, 0x188a: 0x008a, 0x188b: 0x007a, + 0x188c: 0x006a, 0x188d: 0x00da, 0x188e: 0x002a, 0x188f: 0x003a, 0x1890: 0x00ca, 0x1891: 0x009a, + 0x1892: 0x008a, 0x1893: 0x007a, 0x1894: 0x006a, 0x1895: 0x009a, 0x1896: 0x008a, 0x1897: 0x00ba, + 0x1898: 0x00aa, 0x1899: 0x000a, 0x189a: 0x000a, 0x189b: 0x000a, 0x189c: 0x000a, 0x189d: 0x000a, + 0x189e: 0x000a, 0x189f: 0x000a, 0x18a0: 0x000a, 0x18a1: 0x000a, 0x18a2: 0x000a, 0x18a3: 0x000a, + 0x18a4: 0x000a, 0x18a5: 0x000a, 0x18a6: 0x000a, 0x18a7: 0x000a, 0x18a8: 0x000a, 0x18a9: 0x000a, + 0x18aa: 0x000a, 0x18ab: 0x000a, 0x18ac: 0x000a, 0x18ad: 0x000a, 0x18ae: 0x000a, 0x18af: 0x000a, + 0x18b0: 0x000a, 0x18b1: 0x000a, 0x18b2: 0x000a, 0x18b3: 0x000a, 0x18b4: 0x000a, 0x18b5: 0x000a, + 0x18b6: 0x000a, 0x18b7: 0x000a, 0x18b8: 0x000a, 0x18b9: 0x000a, 0x18ba: 0x000a, 0x18bb: 0x000a, + 0x18bc: 0x000a, 0x18bd: 0x000a, 0x18be: 0x000a, 0x18bf: 0x000a, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x000a, 0x18c1: 0x000a, 0x18c2: 0x000a, 0x18c3: 0x000a, 0x18c4: 0x000a, 0x18c5: 0x000a, + 0x18c6: 0x000a, 0x18c7: 0x000a, 0x18c8: 0x000a, 0x18c9: 0x000a, 0x18ca: 0x000a, 0x18cb: 0x000a, + 0x18cc: 0x000a, 0x18cd: 0x000a, 0x18ce: 0x000a, 0x18cf: 0x000a, 0x18d0: 0x000a, 0x18d1: 0x000a, + 0x18d2: 0x000a, 0x18d3: 0x000a, 0x18d4: 0x000a, 0x18d5: 0x000a, 0x18d6: 0x000a, 0x18d7: 0x000a, + 0x18d8: 0x003a, 0x18d9: 0x002a, 0x18da: 0x003a, 0x18db: 0x002a, 0x18dc: 0x000a, 0x18dd: 0x000a, + 0x18de: 0x000a, 0x18df: 0x000a, 0x18e0: 0x000a, 0x18e1: 0x000a, 0x18e2: 0x000a, 0x18e3: 0x000a, + 0x18e4: 0x000a, 0x18e5: 0x000a, 0x18e6: 0x000a, 0x18e7: 0x000a, 0x18e8: 0x000a, 0x18e9: 0x000a, + 0x18ea: 0x000a, 0x18eb: 0x000a, 0x18ec: 0x000a, 0x18ed: 0x000a, 0x18ee: 0x000a, 0x18ef: 0x000a, + 0x18f0: 0x000a, 0x18f1: 0x000a, 0x18f2: 0x000a, 0x18f3: 0x000a, 0x18f4: 0x000a, 0x18f5: 0x000a, + 0x18f6: 0x000a, 0x18f7: 0x000a, 0x18f8: 0x000a, 0x18f9: 0x000a, 0x18fa: 0x000a, 0x18fb: 0x000a, + 0x18fc: 0x003a, 0x18fd: 0x002a, 0x18fe: 0x000a, 0x18ff: 0x000a, + // Block 0x64, offset 0x1900 + 0x1900: 0x000a, 0x1901: 0x000a, 0x1902: 0x000a, 0x1903: 0x000a, 0x1904: 0x000a, 0x1905: 0x000a, + 0x1906: 0x000a, 0x1907: 0x000a, 0x1908: 0x000a, 0x1909: 0x000a, 0x190a: 0x000a, 0x190b: 0x000a, + 0x190c: 0x000a, 0x190d: 0x000a, 0x190e: 0x000a, 0x190f: 0x000a, 0x1910: 0x000a, 0x1911: 0x000a, + 0x1912: 0x000a, 0x1913: 0x000a, 0x1914: 0x000a, 0x1915: 0x000a, 0x1916: 0x000a, 0x1917: 0x000a, + 0x1918: 0x000a, 0x1919: 0x000a, 0x191a: 0x000a, 0x191b: 0x000a, 0x191c: 0x000a, 0x191d: 0x000a, + 0x191e: 0x000a, 0x191f: 0x000a, 0x1920: 0x000a, 0x1921: 0x000a, 0x1922: 0x000a, 0x1923: 0x000a, + 0x1924: 0x000a, 0x1925: 0x000a, 0x1926: 0x000a, 0x1927: 0x000a, 0x1928: 0x000a, 0x1929: 0x000a, + 0x192a: 0x000a, 0x192b: 0x000a, 0x192c: 0x000a, 0x192d: 0x000a, 0x192e: 0x000a, 0x192f: 0x000a, + 0x1930: 0x000a, 0x1931: 0x000a, 0x1932: 0x000a, 0x1933: 0x000a, + 0x1936: 0x000a, 0x1937: 0x000a, 0x1938: 0x000a, 0x1939: 0x000a, 0x193a: 0x000a, 0x193b: 0x000a, + 0x193c: 0x000a, 0x193d: 0x000a, 0x193e: 0x000a, 0x193f: 0x000a, + // Block 0x65, offset 0x1940 + 0x1940: 0x000a, 0x1941: 0x000a, 0x1942: 0x000a, 0x1943: 0x000a, 0x1944: 0x000a, 0x1945: 0x000a, + 0x1946: 0x000a, 0x1947: 0x000a, 0x1948: 0x000a, 0x1949: 0x000a, 0x194a: 0x000a, 0x194b: 0x000a, + 0x194c: 0x000a, 0x194d: 0x000a, 0x194e: 0x000a, 0x194f: 0x000a, 0x1950: 0x000a, 0x1951: 0x000a, + 0x1952: 0x000a, 0x1953: 0x000a, 0x1954: 0x000a, 0x1955: 0x000a, + 0x1958: 0x000a, 0x1959: 0x000a, 0x195a: 0x000a, 0x195b: 0x000a, 0x195c: 0x000a, 0x195d: 0x000a, + 0x195e: 0x000a, 0x195f: 0x000a, 0x1960: 0x000a, 0x1961: 0x000a, 0x1962: 0x000a, 0x1963: 0x000a, + 0x1964: 0x000a, 0x1965: 0x000a, 0x1966: 0x000a, 0x1967: 0x000a, 0x1968: 0x000a, 0x1969: 0x000a, + 0x196a: 0x000a, 0x196b: 0x000a, 0x196c: 0x000a, 0x196d: 0x000a, 0x196e: 0x000a, 0x196f: 0x000a, + 0x1970: 0x000a, 0x1971: 0x000a, 0x1972: 0x000a, 0x1973: 0x000a, 0x1974: 0x000a, 0x1975: 0x000a, + 0x1976: 0x000a, 0x1977: 0x000a, 0x1978: 0x000a, 0x1979: 0x000a, 0x197a: 0x000a, 0x197b: 0x000a, + 0x197c: 0x000a, 0x197d: 0x000a, 0x197e: 0x000a, 0x197f: 0x000a, + // Block 0x66, offset 0x1980 + 0x1980: 0x000a, 0x1981: 0x000a, 0x1982: 0x000a, 0x1983: 0x000a, 0x1984: 0x000a, 0x1985: 0x000a, + 0x1986: 0x000a, 0x1987: 0x000a, 0x1988: 0x000a, 0x198a: 0x000a, 0x198b: 0x000a, + 0x198c: 0x000a, 0x198d: 0x000a, 0x198e: 0x000a, 0x198f: 0x000a, 0x1990: 0x000a, 0x1991: 0x000a, + 0x1992: 0x000a, 0x1993: 0x000a, 0x1994: 0x000a, 0x1995: 0x000a, 0x1996: 0x000a, 0x1997: 0x000a, + 0x1998: 0x000a, 0x1999: 0x000a, 0x199a: 0x000a, 0x199b: 0x000a, 0x199c: 0x000a, 0x199d: 0x000a, + 0x199e: 0x000a, 0x199f: 0x000a, 0x19a0: 0x000a, 0x19a1: 0x000a, 0x19a2: 0x000a, 0x19a3: 0x000a, + 0x19a4: 0x000a, 0x19a5: 0x000a, 0x19a6: 0x000a, 0x19a7: 0x000a, 0x19a8: 0x000a, 0x19a9: 0x000a, + 0x19aa: 0x000a, 0x19ab: 0x000a, 0x19ac: 0x000a, 0x19ad: 0x000a, 0x19ae: 0x000a, 0x19af: 0x000a, + 0x19b0: 0x000a, 0x19b1: 0x000a, 0x19b2: 0x000a, 0x19b3: 0x000a, 0x19b4: 0x000a, 0x19b5: 0x000a, + 0x19b6: 0x000a, 0x19b7: 0x000a, 0x19b8: 0x000a, 0x19b9: 0x000a, 0x19ba: 0x000a, 0x19bb: 0x000a, + 0x19bc: 0x000a, 0x19bd: 0x000a, 0x19be: 0x000a, + // Block 0x67, offset 0x19c0 + 0x19e5: 0x000a, 0x19e6: 0x000a, 0x19e7: 0x000a, 0x19e8: 0x000a, 0x19e9: 0x000a, + 0x19ea: 0x000a, 0x19ef: 0x000c, + 0x19f0: 0x000c, 0x19f1: 0x000c, + 0x19f9: 0x000a, 0x19fa: 0x000a, 0x19fb: 0x000a, + 0x19fc: 0x000a, 0x19fd: 0x000a, 0x19fe: 0x000a, 0x19ff: 0x000a, + // Block 0x68, offset 0x1a00 + 0x1a3f: 0x000c, + // Block 0x69, offset 0x1a40 + 0x1a60: 0x000c, 0x1a61: 0x000c, 0x1a62: 0x000c, 0x1a63: 0x000c, + 0x1a64: 0x000c, 0x1a65: 0x000c, 0x1a66: 0x000c, 0x1a67: 0x000c, 0x1a68: 0x000c, 0x1a69: 0x000c, + 0x1a6a: 0x000c, 0x1a6b: 0x000c, 0x1a6c: 0x000c, 0x1a6d: 0x000c, 0x1a6e: 0x000c, 0x1a6f: 0x000c, + 0x1a70: 0x000c, 0x1a71: 0x000c, 0x1a72: 0x000c, 0x1a73: 0x000c, 0x1a74: 0x000c, 0x1a75: 0x000c, + 0x1a76: 0x000c, 0x1a77: 0x000c, 0x1a78: 0x000c, 0x1a79: 0x000c, 0x1a7a: 0x000c, 0x1a7b: 0x000c, + 0x1a7c: 0x000c, 0x1a7d: 0x000c, 0x1a7e: 0x000c, 0x1a7f: 0x000c, + // Block 0x6a, offset 0x1a80 + 0x1a80: 0x000a, 0x1a81: 0x000a, 0x1a82: 0x000a, 0x1a83: 0x000a, 0x1a84: 0x000a, 0x1a85: 0x000a, + 0x1a86: 0x000a, 0x1a87: 0x000a, 0x1a88: 0x000a, 0x1a89: 0x000a, 0x1a8a: 0x000a, 0x1a8b: 0x000a, + 0x1a8c: 0x000a, 0x1a8d: 0x000a, 0x1a8e: 0x000a, 0x1a8f: 0x000a, 0x1a90: 0x000a, 0x1a91: 0x000a, + 0x1a92: 0x000a, 0x1a93: 0x000a, 0x1a94: 0x000a, 0x1a95: 0x000a, 0x1a96: 0x000a, 0x1a97: 0x000a, + 0x1a98: 0x000a, 0x1a99: 0x000a, 0x1a9a: 0x000a, 0x1a9b: 0x000a, 0x1a9c: 0x000a, 0x1a9d: 0x000a, + 0x1a9e: 0x000a, 0x1a9f: 0x000a, 0x1aa0: 0x000a, 0x1aa1: 0x000a, 0x1aa2: 0x003a, 0x1aa3: 0x002a, + 0x1aa4: 0x003a, 0x1aa5: 0x002a, 0x1aa6: 0x003a, 0x1aa7: 0x002a, 0x1aa8: 0x003a, 0x1aa9: 0x002a, + 0x1aaa: 0x000a, 0x1aab: 0x000a, 0x1aac: 0x000a, 0x1aad: 0x000a, 0x1aae: 0x000a, 0x1aaf: 0x000a, + 0x1ab0: 0x000a, 0x1ab1: 0x000a, 0x1ab2: 0x000a, 0x1ab3: 0x000a, 0x1ab4: 0x000a, 0x1ab5: 0x000a, + 0x1ab6: 0x000a, 0x1ab7: 0x000a, 0x1ab8: 0x000a, 0x1ab9: 0x000a, 0x1aba: 0x000a, 0x1abb: 0x000a, + 0x1abc: 0x000a, 0x1abd: 0x000a, 0x1abe: 0x000a, 0x1abf: 0x000a, + // Block 0x6b, offset 0x1ac0 + 0x1ac0: 0x000a, 0x1ac1: 0x000a, 0x1ac2: 0x000a, 0x1ac3: 0x000a, 0x1ac4: 0x000a, 0x1ac5: 0x000a, + 0x1ac6: 0x000a, 0x1ac7: 0x000a, 0x1ac8: 0x000a, 0x1ac9: 0x000a, 0x1aca: 0x000a, 0x1acb: 0x000a, + 0x1acc: 0x000a, 0x1acd: 0x000a, 0x1ace: 0x000a, + // Block 0x6c, offset 0x1b00 + 0x1b00: 0x000a, 0x1b01: 0x000a, 0x1b02: 0x000a, 0x1b03: 0x000a, 0x1b04: 0x000a, 0x1b05: 0x000a, + 0x1b06: 0x000a, 0x1b07: 0x000a, 0x1b08: 0x000a, 0x1b09: 0x000a, 0x1b0a: 0x000a, 0x1b0b: 0x000a, + 0x1b0c: 0x000a, 0x1b0d: 0x000a, 0x1b0e: 0x000a, 0x1b0f: 0x000a, 0x1b10: 0x000a, 0x1b11: 0x000a, + 0x1b12: 0x000a, 0x1b13: 0x000a, 0x1b14: 0x000a, 0x1b15: 0x000a, 0x1b16: 0x000a, 0x1b17: 0x000a, + 0x1b18: 0x000a, 0x1b19: 0x000a, 0x1b1b: 0x000a, 0x1b1c: 0x000a, 0x1b1d: 0x000a, + 0x1b1e: 0x000a, 0x1b1f: 0x000a, 0x1b20: 0x000a, 0x1b21: 0x000a, 0x1b22: 0x000a, 0x1b23: 0x000a, + 0x1b24: 0x000a, 0x1b25: 0x000a, 0x1b26: 0x000a, 0x1b27: 0x000a, 0x1b28: 0x000a, 0x1b29: 0x000a, + 0x1b2a: 0x000a, 0x1b2b: 0x000a, 0x1b2c: 0x000a, 0x1b2d: 0x000a, 0x1b2e: 0x000a, 0x1b2f: 0x000a, + 0x1b30: 0x000a, 0x1b31: 0x000a, 0x1b32: 0x000a, 0x1b33: 0x000a, 0x1b34: 0x000a, 0x1b35: 0x000a, + 0x1b36: 0x000a, 0x1b37: 0x000a, 0x1b38: 0x000a, 0x1b39: 0x000a, 0x1b3a: 0x000a, 0x1b3b: 0x000a, + 0x1b3c: 0x000a, 0x1b3d: 0x000a, 0x1b3e: 0x000a, 0x1b3f: 0x000a, + // Block 0x6d, offset 0x1b40 + 0x1b40: 0x000a, 0x1b41: 0x000a, 0x1b42: 0x000a, 0x1b43: 0x000a, 0x1b44: 0x000a, 0x1b45: 0x000a, + 0x1b46: 0x000a, 0x1b47: 0x000a, 0x1b48: 0x000a, 0x1b49: 0x000a, 0x1b4a: 0x000a, 0x1b4b: 0x000a, + 0x1b4c: 0x000a, 0x1b4d: 0x000a, 0x1b4e: 0x000a, 0x1b4f: 0x000a, 0x1b50: 0x000a, 0x1b51: 0x000a, + 0x1b52: 0x000a, 0x1b53: 0x000a, 0x1b54: 0x000a, 0x1b55: 0x000a, 0x1b56: 0x000a, 0x1b57: 0x000a, + 0x1b58: 0x000a, 0x1b59: 0x000a, 0x1b5a: 0x000a, 0x1b5b: 0x000a, 0x1b5c: 0x000a, 0x1b5d: 0x000a, + 0x1b5e: 0x000a, 0x1b5f: 0x000a, 0x1b60: 0x000a, 0x1b61: 0x000a, 0x1b62: 0x000a, 0x1b63: 0x000a, + 0x1b64: 0x000a, 0x1b65: 0x000a, 0x1b66: 0x000a, 0x1b67: 0x000a, 0x1b68: 0x000a, 0x1b69: 0x000a, + 0x1b6a: 0x000a, 0x1b6b: 0x000a, 0x1b6c: 0x000a, 0x1b6d: 0x000a, 0x1b6e: 0x000a, 0x1b6f: 0x000a, + 0x1b70: 0x000a, 0x1b71: 0x000a, 0x1b72: 0x000a, 0x1b73: 0x000a, + // Block 0x6e, offset 0x1b80 + 0x1b80: 0x000a, 0x1b81: 0x000a, 0x1b82: 0x000a, 0x1b83: 0x000a, 0x1b84: 0x000a, 0x1b85: 0x000a, + 0x1b86: 0x000a, 0x1b87: 0x000a, 0x1b88: 0x000a, 0x1b89: 0x000a, 0x1b8a: 0x000a, 0x1b8b: 0x000a, + 0x1b8c: 0x000a, 0x1b8d: 0x000a, 0x1b8e: 0x000a, 0x1b8f: 0x000a, 0x1b90: 0x000a, 0x1b91: 0x000a, + 0x1b92: 0x000a, 0x1b93: 0x000a, 0x1b94: 0x000a, 0x1b95: 0x000a, + 0x1bb0: 0x000a, 0x1bb1: 0x000a, 0x1bb2: 0x000a, 0x1bb3: 0x000a, 0x1bb4: 0x000a, 0x1bb5: 0x000a, + 0x1bb6: 0x000a, 0x1bb7: 0x000a, 0x1bb8: 0x000a, 0x1bb9: 0x000a, 0x1bba: 0x000a, 0x1bbb: 0x000a, + // Block 0x6f, offset 0x1bc0 + 0x1bc0: 0x0009, 0x1bc1: 0x000a, 0x1bc2: 0x000a, 0x1bc3: 0x000a, 0x1bc4: 0x000a, + 0x1bc8: 0x003a, 0x1bc9: 0x002a, 0x1bca: 0x003a, 0x1bcb: 0x002a, + 0x1bcc: 0x003a, 0x1bcd: 0x002a, 0x1bce: 0x003a, 0x1bcf: 0x002a, 0x1bd0: 0x003a, 0x1bd1: 0x002a, + 0x1bd2: 0x000a, 0x1bd3: 0x000a, 0x1bd4: 0x003a, 0x1bd5: 0x002a, 0x1bd6: 0x003a, 0x1bd7: 0x002a, + 0x1bd8: 0x003a, 0x1bd9: 0x002a, 0x1bda: 0x003a, 0x1bdb: 0x002a, 0x1bdc: 0x000a, 0x1bdd: 0x000a, + 0x1bde: 0x000a, 0x1bdf: 0x000a, 0x1be0: 0x000a, + 0x1bea: 0x000c, 0x1beb: 0x000c, 0x1bec: 0x000c, 0x1bed: 0x000c, + 0x1bf0: 0x000a, + 0x1bf6: 0x000a, 0x1bf7: 0x000a, + 0x1bfd: 0x000a, 0x1bfe: 0x000a, 0x1bff: 0x000a, + // Block 0x70, offset 0x1c00 + 0x1c19: 0x000c, 0x1c1a: 0x000c, 0x1c1b: 0x000a, 0x1c1c: 0x000a, + 0x1c20: 0x000a, + // Block 0x71, offset 0x1c40 + 0x1c7b: 0x000a, + // Block 0x72, offset 0x1c80 + 0x1c80: 0x000a, 0x1c81: 0x000a, 0x1c82: 0x000a, 0x1c83: 0x000a, 0x1c84: 0x000a, 0x1c85: 0x000a, + 0x1c86: 0x000a, 0x1c87: 0x000a, 0x1c88: 0x000a, 0x1c89: 0x000a, 0x1c8a: 0x000a, 0x1c8b: 0x000a, + 0x1c8c: 0x000a, 0x1c8d: 0x000a, 0x1c8e: 0x000a, 0x1c8f: 0x000a, 0x1c90: 0x000a, 0x1c91: 0x000a, + 0x1c92: 0x000a, 0x1c93: 0x000a, 0x1c94: 0x000a, 0x1c95: 0x000a, 0x1c96: 0x000a, 0x1c97: 0x000a, + 0x1c98: 0x000a, 0x1c99: 0x000a, 0x1c9a: 0x000a, 0x1c9b: 0x000a, 0x1c9c: 0x000a, 0x1c9d: 0x000a, + 0x1c9e: 0x000a, 0x1c9f: 0x000a, 0x1ca0: 0x000a, 0x1ca1: 0x000a, 0x1ca2: 0x000a, 0x1ca3: 0x000a, + // Block 0x73, offset 0x1cc0 + 0x1cdd: 0x000a, + 0x1cde: 0x000a, + // Block 0x74, offset 0x1d00 + 0x1d10: 0x000a, 0x1d11: 0x000a, + 0x1d12: 0x000a, 0x1d13: 0x000a, 0x1d14: 0x000a, 0x1d15: 0x000a, 0x1d16: 0x000a, 0x1d17: 0x000a, + 0x1d18: 0x000a, 0x1d19: 0x000a, 0x1d1a: 0x000a, 0x1d1b: 0x000a, 0x1d1c: 0x000a, 0x1d1d: 0x000a, + 0x1d1e: 0x000a, 0x1d1f: 0x000a, + 0x1d3c: 0x000a, 0x1d3d: 0x000a, 0x1d3e: 0x000a, + // Block 0x75, offset 0x1d40 + 0x1d71: 0x000a, 0x1d72: 0x000a, 0x1d73: 0x000a, 0x1d74: 0x000a, 0x1d75: 0x000a, + 0x1d76: 0x000a, 0x1d77: 0x000a, 0x1d78: 0x000a, 0x1d79: 0x000a, 0x1d7a: 0x000a, 0x1d7b: 0x000a, + 0x1d7c: 0x000a, 0x1d7d: 0x000a, 0x1d7e: 0x000a, 0x1d7f: 0x000a, + // Block 0x76, offset 0x1d80 + 0x1d8c: 0x000a, 0x1d8d: 0x000a, 0x1d8e: 0x000a, 0x1d8f: 0x000a, + // Block 0x77, offset 0x1dc0 + 0x1df7: 0x000a, 0x1df8: 0x000a, 0x1df9: 0x000a, 0x1dfa: 0x000a, + // Block 0x78, offset 0x1e00 + 0x1e1e: 0x000a, 0x1e1f: 0x000a, + 0x1e3f: 0x000a, + // Block 0x79, offset 0x1e40 + 0x1e50: 0x000a, 0x1e51: 0x000a, + 0x1e52: 0x000a, 0x1e53: 0x000a, 0x1e54: 0x000a, 0x1e55: 0x000a, 0x1e56: 0x000a, 0x1e57: 0x000a, + 0x1e58: 0x000a, 0x1e59: 0x000a, 0x1e5a: 0x000a, 0x1e5b: 0x000a, 0x1e5c: 0x000a, 0x1e5d: 0x000a, + 0x1e5e: 0x000a, 0x1e5f: 0x000a, 0x1e60: 0x000a, 0x1e61: 0x000a, 0x1e62: 0x000a, 0x1e63: 0x000a, + 0x1e64: 0x000a, 0x1e65: 0x000a, 0x1e66: 0x000a, 0x1e67: 0x000a, 0x1e68: 0x000a, 0x1e69: 0x000a, + 0x1e6a: 0x000a, 0x1e6b: 0x000a, 0x1e6c: 0x000a, 0x1e6d: 0x000a, 0x1e6e: 0x000a, 0x1e6f: 0x000a, + 0x1e70: 0x000a, 0x1e71: 0x000a, 0x1e72: 0x000a, 0x1e73: 0x000a, 0x1e74: 0x000a, 0x1e75: 0x000a, + 0x1e76: 0x000a, 0x1e77: 0x000a, 0x1e78: 0x000a, 0x1e79: 0x000a, 0x1e7a: 0x000a, 0x1e7b: 0x000a, + 0x1e7c: 0x000a, 0x1e7d: 0x000a, 0x1e7e: 0x000a, 0x1e7f: 0x000a, + // Block 0x7a, offset 0x1e80 + 0x1e80: 0x000a, 0x1e81: 0x000a, 0x1e82: 0x000a, 0x1e83: 0x000a, 0x1e84: 0x000a, 0x1e85: 0x000a, + 0x1e86: 0x000a, + // Block 0x7b, offset 0x1ec0 + 0x1ecd: 0x000a, 0x1ece: 0x000a, 0x1ecf: 0x000a, + // Block 0x7c, offset 0x1f00 + 0x1f2f: 0x000c, + 0x1f30: 0x000c, 0x1f31: 0x000c, 0x1f32: 0x000c, 0x1f33: 0x000a, 0x1f34: 0x000c, 0x1f35: 0x000c, + 0x1f36: 0x000c, 0x1f37: 0x000c, 0x1f38: 0x000c, 0x1f39: 0x000c, 0x1f3a: 0x000c, 0x1f3b: 0x000c, + 0x1f3c: 0x000c, 0x1f3d: 0x000c, 0x1f3e: 0x000a, 0x1f3f: 0x000a, + // Block 0x7d, offset 0x1f40 + 0x1f5e: 0x000c, 0x1f5f: 0x000c, + // Block 0x7e, offset 0x1f80 + 0x1fb0: 0x000c, 0x1fb1: 0x000c, + // Block 0x7f, offset 0x1fc0 + 0x1fc0: 0x000a, 0x1fc1: 0x000a, 0x1fc2: 0x000a, 0x1fc3: 0x000a, 0x1fc4: 0x000a, 0x1fc5: 0x000a, + 0x1fc6: 0x000a, 0x1fc7: 0x000a, 0x1fc8: 0x000a, 0x1fc9: 0x000a, 0x1fca: 0x000a, 0x1fcb: 0x000a, + 0x1fcc: 0x000a, 0x1fcd: 0x000a, 0x1fce: 0x000a, 0x1fcf: 0x000a, 0x1fd0: 0x000a, 0x1fd1: 0x000a, + 0x1fd2: 0x000a, 0x1fd3: 0x000a, 0x1fd4: 0x000a, 0x1fd5: 0x000a, 0x1fd6: 0x000a, 0x1fd7: 0x000a, + 0x1fd8: 0x000a, 0x1fd9: 0x000a, 0x1fda: 0x000a, 0x1fdb: 0x000a, 0x1fdc: 0x000a, 0x1fdd: 0x000a, + 0x1fde: 0x000a, 0x1fdf: 0x000a, 0x1fe0: 0x000a, 0x1fe1: 0x000a, + // Block 0x80, offset 0x2000 + 0x2008: 0x000a, + // Block 0x81, offset 0x2040 + 0x2042: 0x000c, + 0x2046: 0x000c, 0x204b: 0x000c, + 0x2065: 0x000c, 0x2066: 0x000c, 0x2068: 0x000a, 0x2069: 0x000a, + 0x206a: 0x000a, 0x206b: 0x000a, + 0x2078: 0x0004, 0x2079: 0x0004, + // Block 0x82, offset 0x2080 + 0x20b4: 0x000a, 0x20b5: 0x000a, + 0x20b6: 0x000a, 0x20b7: 0x000a, + // Block 0x83, offset 0x20c0 + 0x20c4: 0x000c, 0x20c5: 0x000c, + 0x20e0: 0x000c, 0x20e1: 0x000c, 0x20e2: 0x000c, 0x20e3: 0x000c, + 0x20e4: 0x000c, 0x20e5: 0x000c, 0x20e6: 0x000c, 0x20e7: 0x000c, 0x20e8: 0x000c, 0x20e9: 0x000c, + 0x20ea: 0x000c, 0x20eb: 0x000c, 0x20ec: 0x000c, 0x20ed: 0x000c, 0x20ee: 0x000c, 0x20ef: 0x000c, + 0x20f0: 0x000c, 0x20f1: 0x000c, + 0x20ff: 0x000c, + // Block 0x84, offset 0x2100 + 0x2126: 0x000c, 0x2127: 0x000c, 0x2128: 0x000c, 0x2129: 0x000c, + 0x212a: 0x000c, 0x212b: 0x000c, 0x212c: 0x000c, 0x212d: 0x000c, + // Block 0x85, offset 0x2140 + 0x2147: 0x000c, 0x2148: 0x000c, 0x2149: 0x000c, 0x214a: 0x000c, 0x214b: 0x000c, + 0x214c: 0x000c, 0x214d: 0x000c, 0x214e: 0x000c, 0x214f: 0x000c, 0x2150: 0x000c, 0x2151: 0x000c, + // Block 0x86, offset 0x2180 + 0x2180: 0x000c, 0x2181: 0x000c, 0x2182: 0x000c, + 0x21b3: 0x000c, + 0x21b6: 0x000c, 0x21b7: 0x000c, 0x21b8: 0x000c, 0x21b9: 0x000c, + 0x21bc: 0x000c, + // Block 0x87, offset 0x21c0 + 0x21e5: 0x000c, + // Block 0x88, offset 0x2200 + 0x2229: 0x000c, + 0x222a: 0x000c, 0x222b: 0x000c, 0x222c: 0x000c, 0x222d: 0x000c, 0x222e: 0x000c, + 0x2231: 0x000c, 0x2232: 0x000c, 0x2235: 0x000c, + 0x2236: 0x000c, + // Block 0x89, offset 0x2240 + 0x2243: 0x000c, + 0x224c: 0x000c, + 0x227c: 0x000c, + // Block 0x8a, offset 0x2280 + 0x22b0: 0x000c, 0x22b2: 0x000c, 0x22b3: 0x000c, 0x22b4: 0x000c, + 0x22b7: 0x000c, 0x22b8: 0x000c, + 0x22be: 0x000c, 0x22bf: 0x000c, + // Block 0x8b, offset 0x22c0 + 0x22c1: 0x000c, + 0x22ec: 0x000c, 0x22ed: 0x000c, + 0x22f6: 0x000c, + // Block 0x8c, offset 0x2300 + 0x2325: 0x000c, 0x2328: 0x000c, + 0x232d: 0x000c, + // Block 0x8d, offset 0x2340 + 0x235d: 0x0001, + 0x235e: 0x000c, 0x235f: 0x0001, 0x2360: 0x0001, 0x2361: 0x0001, 0x2362: 0x0001, 0x2363: 0x0001, + 0x2364: 0x0001, 0x2365: 0x0001, 0x2366: 0x0001, 0x2367: 0x0001, 0x2368: 0x0001, 0x2369: 0x0003, + 0x236a: 0x0001, 0x236b: 0x0001, 0x236c: 0x0001, 0x236d: 0x0001, 0x236e: 0x0001, 0x236f: 0x0001, + 0x2370: 0x0001, 0x2371: 0x0001, 0x2372: 0x0001, 0x2373: 0x0001, 0x2374: 0x0001, 0x2375: 0x0001, + 0x2376: 0x0001, 0x2377: 0x0001, 0x2378: 0x0001, 0x2379: 0x0001, 0x237a: 0x0001, 0x237b: 0x0001, + 0x237c: 0x0001, 0x237d: 0x0001, 0x237e: 0x0001, 0x237f: 0x0001, + // Block 0x8e, offset 0x2380 + 0x2380: 0x0001, 0x2381: 0x0001, 0x2382: 0x0001, 0x2383: 0x0001, 0x2384: 0x0001, 0x2385: 0x0001, + 0x2386: 0x0001, 0x2387: 0x0001, 0x2388: 0x0001, 0x2389: 0x0001, 0x238a: 0x0001, 0x238b: 0x0001, + 0x238c: 0x0001, 0x238d: 0x0001, 0x238e: 0x0001, 0x238f: 0x0001, 0x2390: 0x000d, 0x2391: 0x000d, + 0x2392: 0x000d, 0x2393: 0x000d, 0x2394: 0x000d, 0x2395: 0x000d, 0x2396: 0x000d, 0x2397: 0x000d, + 0x2398: 0x000d, 0x2399: 0x000d, 0x239a: 0x000d, 0x239b: 0x000d, 0x239c: 0x000d, 0x239d: 0x000d, + 0x239e: 0x000d, 0x239f: 0x000d, 0x23a0: 0x000d, 0x23a1: 0x000d, 0x23a2: 0x000d, 0x23a3: 0x000d, + 0x23a4: 0x000d, 0x23a5: 0x000d, 0x23a6: 0x000d, 0x23a7: 0x000d, 0x23a8: 0x000d, 0x23a9: 0x000d, + 0x23aa: 0x000d, 0x23ab: 0x000d, 0x23ac: 0x000d, 0x23ad: 0x000d, 0x23ae: 0x000d, 0x23af: 0x000d, + 0x23b0: 0x000d, 0x23b1: 0x000d, 0x23b2: 0x000d, 0x23b3: 0x000d, 0x23b4: 0x000d, 0x23b5: 0x000d, + 0x23b6: 0x000d, 0x23b7: 0x000d, 0x23b8: 0x000d, 0x23b9: 0x000d, 0x23ba: 0x000d, 0x23bb: 0x000d, + 0x23bc: 0x000d, 0x23bd: 0x000d, 0x23be: 0x000d, 0x23bf: 0x000d, + // Block 0x8f, offset 0x23c0 + 0x23c0: 0x000d, 0x23c1: 0x000d, 0x23c2: 0x000d, 0x23c3: 0x000d, 0x23c4: 0x000d, 0x23c5: 0x000d, + 0x23c6: 0x000d, 0x23c7: 0x000d, 0x23c8: 0x000d, 0x23c9: 0x000d, 0x23ca: 0x000d, 0x23cb: 0x000d, + 0x23cc: 0x000d, 0x23cd: 0x000d, 0x23ce: 0x000d, 0x23cf: 0x000d, 0x23d0: 0x000d, 0x23d1: 0x000d, + 0x23d2: 0x000d, 0x23d3: 0x000d, 0x23d4: 0x000d, 0x23d5: 0x000d, 0x23d6: 0x000d, 0x23d7: 0x000d, + 0x23d8: 0x000d, 0x23d9: 0x000d, 0x23da: 0x000d, 0x23db: 0x000d, 0x23dc: 0x000d, 0x23dd: 0x000d, + 0x23de: 0x000d, 0x23df: 0x000d, 0x23e0: 0x000d, 0x23e1: 0x000d, 0x23e2: 0x000d, 0x23e3: 0x000d, + 0x23e4: 0x000d, 0x23e5: 0x000d, 0x23e6: 0x000d, 0x23e7: 0x000d, 0x23e8: 0x000d, 0x23e9: 0x000d, + 0x23ea: 0x000d, 0x23eb: 0x000d, 0x23ec: 0x000d, 0x23ed: 0x000d, 0x23ee: 0x000d, 0x23ef: 0x000d, + 0x23f0: 0x000d, 0x23f1: 0x000d, 0x23f2: 0x000d, 0x23f3: 0x000d, 0x23f4: 0x000d, 0x23f5: 0x000d, + 0x23f6: 0x000d, 0x23f7: 0x000d, 0x23f8: 0x000d, 0x23f9: 0x000d, 0x23fa: 0x000d, 0x23fb: 0x000d, + 0x23fc: 0x000d, 0x23fd: 0x000d, 0x23fe: 0x000a, 0x23ff: 0x000a, + // Block 0x90, offset 0x2400 + 0x2400: 0x000d, 0x2401: 0x000d, 0x2402: 0x000d, 0x2403: 0x000d, 0x2404: 0x000d, 0x2405: 0x000d, + 0x2406: 0x000d, 0x2407: 0x000d, 0x2408: 0x000d, 0x2409: 0x000d, 0x240a: 0x000d, 0x240b: 0x000d, + 0x240c: 0x000d, 0x240d: 0x000d, 0x240e: 0x000d, 0x240f: 0x000d, 0x2410: 0x000b, 0x2411: 0x000b, + 0x2412: 0x000b, 0x2413: 0x000b, 0x2414: 0x000b, 0x2415: 0x000b, 0x2416: 0x000b, 0x2417: 0x000b, + 0x2418: 0x000b, 0x2419: 0x000b, 0x241a: 0x000b, 0x241b: 0x000b, 0x241c: 0x000b, 0x241d: 0x000b, + 0x241e: 0x000b, 0x241f: 0x000b, 0x2420: 0x000b, 0x2421: 0x000b, 0x2422: 0x000b, 0x2423: 0x000b, + 0x2424: 0x000b, 0x2425: 0x000b, 0x2426: 0x000b, 0x2427: 0x000b, 0x2428: 0x000b, 0x2429: 0x000b, + 0x242a: 0x000b, 0x242b: 0x000b, 0x242c: 0x000b, 0x242d: 0x000b, 0x242e: 0x000b, 0x242f: 0x000b, + 0x2430: 0x000d, 0x2431: 0x000d, 0x2432: 0x000d, 0x2433: 0x000d, 0x2434: 0x000d, 0x2435: 0x000d, + 0x2436: 0x000d, 0x2437: 0x000d, 0x2438: 0x000d, 0x2439: 0x000d, 0x243a: 0x000d, 0x243b: 0x000d, + 0x243c: 0x000d, 0x243d: 0x000a, 0x243e: 0x000d, 0x243f: 0x000d, + // Block 0x91, offset 0x2440 + 0x2440: 0x000c, 0x2441: 0x000c, 0x2442: 0x000c, 0x2443: 0x000c, 0x2444: 0x000c, 0x2445: 0x000c, + 0x2446: 0x000c, 0x2447: 0x000c, 0x2448: 0x000c, 0x2449: 0x000c, 0x244a: 0x000c, 0x244b: 0x000c, + 0x244c: 0x000c, 0x244d: 0x000c, 0x244e: 0x000c, 0x244f: 0x000c, 0x2450: 0x000a, 0x2451: 0x000a, + 0x2452: 0x000a, 0x2453: 0x000a, 0x2454: 0x000a, 0x2455: 0x000a, 0x2456: 0x000a, 0x2457: 0x000a, + 0x2458: 0x000a, 0x2459: 0x000a, + 0x2460: 0x000c, 0x2461: 0x000c, 0x2462: 0x000c, 0x2463: 0x000c, + 0x2464: 0x000c, 0x2465: 0x000c, 0x2466: 0x000c, 0x2467: 0x000c, 0x2468: 0x000c, 0x2469: 0x000c, + 0x246a: 0x000c, 0x246b: 0x000c, 0x246c: 0x000c, 0x246d: 0x000c, 0x246e: 0x000c, 0x246f: 0x000c, + 0x2470: 0x000a, 0x2471: 0x000a, 0x2472: 0x000a, 0x2473: 0x000a, 0x2474: 0x000a, 0x2475: 0x000a, + 0x2476: 0x000a, 0x2477: 0x000a, 0x2478: 0x000a, 0x2479: 0x000a, 0x247a: 0x000a, 0x247b: 0x000a, + 0x247c: 0x000a, 0x247d: 0x000a, 0x247e: 0x000a, 0x247f: 0x000a, + // Block 0x92, offset 0x2480 + 0x2480: 0x000a, 0x2481: 0x000a, 0x2482: 0x000a, 0x2483: 0x000a, 0x2484: 0x000a, 0x2485: 0x000a, + 0x2486: 0x000a, 0x2487: 0x000a, 0x2488: 0x000a, 0x2489: 0x000a, 0x248a: 0x000a, 0x248b: 0x000a, + 0x248c: 0x000a, 0x248d: 0x000a, 0x248e: 0x000a, 0x248f: 0x000a, 0x2490: 0x0006, 0x2491: 0x000a, + 0x2492: 0x0006, 0x2494: 0x000a, 0x2495: 0x0006, 0x2496: 0x000a, 0x2497: 0x000a, + 0x2498: 0x000a, 0x2499: 0x009a, 0x249a: 0x008a, 0x249b: 0x007a, 0x249c: 0x006a, 0x249d: 0x009a, + 0x249e: 0x008a, 0x249f: 0x0004, 0x24a0: 0x000a, 0x24a1: 0x000a, 0x24a2: 0x0003, 0x24a3: 0x0003, + 0x24a4: 0x000a, 0x24a5: 0x000a, 0x24a6: 0x000a, 0x24a8: 0x000a, 0x24a9: 0x0004, + 0x24aa: 0x0004, 0x24ab: 0x000a, + 0x24b0: 0x000d, 0x24b1: 0x000d, 0x24b2: 0x000d, 0x24b3: 0x000d, 0x24b4: 0x000d, 0x24b5: 0x000d, + 0x24b6: 0x000d, 0x24b7: 0x000d, 0x24b8: 0x000d, 0x24b9: 0x000d, 0x24ba: 0x000d, 0x24bb: 0x000d, + 0x24bc: 0x000d, 0x24bd: 0x000d, 0x24be: 0x000d, 0x24bf: 0x000d, + // Block 0x93, offset 0x24c0 + 0x24c0: 0x000d, 0x24c1: 0x000d, 0x24c2: 0x000d, 0x24c3: 0x000d, 0x24c4: 0x000d, 0x24c5: 0x000d, + 0x24c6: 0x000d, 0x24c7: 0x000d, 0x24c8: 0x000d, 0x24c9: 0x000d, 0x24ca: 0x000d, 0x24cb: 0x000d, + 0x24cc: 0x000d, 0x24cd: 0x000d, 0x24ce: 0x000d, 0x24cf: 0x000d, 0x24d0: 0x000d, 0x24d1: 0x000d, + 0x24d2: 0x000d, 0x24d3: 0x000d, 0x24d4: 0x000d, 0x24d5: 0x000d, 0x24d6: 0x000d, 0x24d7: 0x000d, + 0x24d8: 0x000d, 0x24d9: 0x000d, 0x24da: 0x000d, 0x24db: 0x000d, 0x24dc: 0x000d, 0x24dd: 0x000d, + 0x24de: 0x000d, 0x24df: 0x000d, 0x24e0: 0x000d, 0x24e1: 0x000d, 0x24e2: 0x000d, 0x24e3: 0x000d, + 0x24e4: 0x000d, 0x24e5: 0x000d, 0x24e6: 0x000d, 0x24e7: 0x000d, 0x24e8: 0x000d, 0x24e9: 0x000d, + 0x24ea: 0x000d, 0x24eb: 0x000d, 0x24ec: 0x000d, 0x24ed: 0x000d, 0x24ee: 0x000d, 0x24ef: 0x000d, + 0x24f0: 0x000d, 0x24f1: 0x000d, 0x24f2: 0x000d, 0x24f3: 0x000d, 0x24f4: 0x000d, 0x24f5: 0x000d, + 0x24f6: 0x000d, 0x24f7: 0x000d, 0x24f8: 0x000d, 0x24f9: 0x000d, 0x24fa: 0x000d, 0x24fb: 0x000d, + 0x24fc: 0x000d, 0x24fd: 0x000d, 0x24fe: 0x000d, 0x24ff: 0x000b, + // Block 0x94, offset 0x2500 + 0x2501: 0x000a, 0x2502: 0x000a, 0x2503: 0x0004, 0x2504: 0x0004, 0x2505: 0x0004, + 0x2506: 0x000a, 0x2507: 0x000a, 0x2508: 0x003a, 0x2509: 0x002a, 0x250a: 0x000a, 0x250b: 0x0003, + 0x250c: 0x0006, 0x250d: 0x0003, 0x250e: 0x0006, 0x250f: 0x0006, 0x2510: 0x0002, 0x2511: 0x0002, + 0x2512: 0x0002, 0x2513: 0x0002, 0x2514: 0x0002, 0x2515: 0x0002, 0x2516: 0x0002, 0x2517: 0x0002, + 0x2518: 0x0002, 0x2519: 0x0002, 0x251a: 0x0006, 0x251b: 0x000a, 0x251c: 0x000a, 0x251d: 0x000a, + 0x251e: 0x000a, 0x251f: 0x000a, 0x2520: 0x000a, + 0x253b: 0x005a, + 0x253c: 0x000a, 0x253d: 0x004a, 0x253e: 0x000a, 0x253f: 0x000a, + // Block 0x95, offset 0x2540 + 0x2540: 0x000a, + 0x255b: 0x005a, 0x255c: 0x000a, 0x255d: 0x004a, + 0x255e: 0x000a, 0x255f: 0x00fa, 0x2560: 0x00ea, 0x2561: 0x000a, 0x2562: 0x003a, 0x2563: 0x002a, + 0x2564: 0x000a, 0x2565: 0x000a, + // Block 0x96, offset 0x2580 + 0x25a0: 0x0004, 0x25a1: 0x0004, 0x25a2: 0x000a, 0x25a3: 0x000a, + 0x25a4: 0x000a, 0x25a5: 0x0004, 0x25a6: 0x0004, 0x25a8: 0x000a, 0x25a9: 0x000a, + 0x25aa: 0x000a, 0x25ab: 0x000a, 0x25ac: 0x000a, 0x25ad: 0x000a, 0x25ae: 0x000a, + 0x25b0: 0x000b, 0x25b1: 0x000b, 0x25b2: 0x000b, 0x25b3: 0x000b, 0x25b4: 0x000b, 0x25b5: 0x000b, + 0x25b6: 0x000b, 0x25b7: 0x000b, 0x25b8: 0x000b, 0x25b9: 0x000a, 0x25ba: 0x000a, 0x25bb: 0x000a, + 0x25bc: 0x000a, 0x25bd: 0x000a, 0x25be: 0x000b, 0x25bf: 0x000b, + // Block 0x97, offset 0x25c0 + 0x25c1: 0x000a, + // Block 0x98, offset 0x2600 + 0x2600: 0x000a, 0x2601: 0x000a, 0x2602: 0x000a, 0x2603: 0x000a, 0x2604: 0x000a, 0x2605: 0x000a, + 0x2606: 0x000a, 0x2607: 0x000a, 0x2608: 0x000a, 0x2609: 0x000a, 0x260a: 0x000a, 0x260b: 0x000a, + 0x260c: 0x000a, 0x2610: 0x000a, 0x2611: 0x000a, + 0x2612: 0x000a, 0x2613: 0x000a, 0x2614: 0x000a, 0x2615: 0x000a, 0x2616: 0x000a, 0x2617: 0x000a, + 0x2618: 0x000a, 0x2619: 0x000a, 0x261a: 0x000a, 0x261b: 0x000a, + 0x2620: 0x000a, + // Block 0x99, offset 0x2640 + 0x267d: 0x000c, + // Block 0x9a, offset 0x2680 + 0x26a0: 0x000c, 0x26a1: 0x0002, 0x26a2: 0x0002, 0x26a3: 0x0002, + 0x26a4: 0x0002, 0x26a5: 0x0002, 0x26a6: 0x0002, 0x26a7: 0x0002, 0x26a8: 0x0002, 0x26a9: 0x0002, + 0x26aa: 0x0002, 0x26ab: 0x0002, 0x26ac: 0x0002, 0x26ad: 0x0002, 0x26ae: 0x0002, 0x26af: 0x0002, + 0x26b0: 0x0002, 0x26b1: 0x0002, 0x26b2: 0x0002, 0x26b3: 0x0002, 0x26b4: 0x0002, 0x26b5: 0x0002, + 0x26b6: 0x0002, 0x26b7: 0x0002, 0x26b8: 0x0002, 0x26b9: 0x0002, 0x26ba: 0x0002, 0x26bb: 0x0002, + // Block 0x9b, offset 0x26c0 + 0x26f6: 0x000c, 0x26f7: 0x000c, 0x26f8: 0x000c, 0x26f9: 0x000c, 0x26fa: 0x000c, + // Block 0x9c, offset 0x2700 + 0x2700: 0x0001, 0x2701: 0x0001, 0x2702: 0x0001, 0x2703: 0x0001, 0x2704: 0x0001, 0x2705: 0x0001, + 0x2706: 0x0001, 0x2707: 0x0001, 0x2708: 0x0001, 0x2709: 0x0001, 0x270a: 0x0001, 0x270b: 0x0001, + 0x270c: 0x0001, 0x270d: 0x0001, 0x270e: 0x0001, 0x270f: 0x0001, 0x2710: 0x0001, 0x2711: 0x0001, + 0x2712: 0x0001, 0x2713: 0x0001, 0x2714: 0x0001, 0x2715: 0x0001, 0x2716: 0x0001, 0x2717: 0x0001, + 0x2718: 0x0001, 0x2719: 0x0001, 0x271a: 0x0001, 0x271b: 0x0001, 0x271c: 0x0001, 0x271d: 0x0001, + 0x271e: 0x0001, 0x271f: 0x0001, 0x2720: 0x0001, 0x2721: 0x0001, 0x2722: 0x0001, 0x2723: 0x0001, + 0x2724: 0x0001, 0x2725: 0x0001, 0x2726: 0x0001, 0x2727: 0x0001, 0x2728: 0x0001, 0x2729: 0x0001, + 0x272a: 0x0001, 0x272b: 0x0001, 0x272c: 0x0001, 0x272d: 0x0001, 0x272e: 0x0001, 0x272f: 0x0001, + 0x2730: 0x0001, 0x2731: 0x0001, 0x2732: 0x0001, 0x2733: 0x0001, 0x2734: 0x0001, 0x2735: 0x0001, + 0x2736: 0x0001, 0x2737: 0x0001, 0x2738: 0x0001, 0x2739: 0x0001, 0x273a: 0x0001, 0x273b: 0x0001, + 0x273c: 0x0001, 0x273d: 0x0001, 0x273e: 0x0001, 0x273f: 0x0001, + // Block 0x9d, offset 0x2740 + 0x2740: 0x0001, 0x2741: 0x0001, 0x2742: 0x0001, 0x2743: 0x0001, 0x2744: 0x0001, 0x2745: 0x0001, + 0x2746: 0x0001, 0x2747: 0x0001, 0x2748: 0x0001, 0x2749: 0x0001, 0x274a: 0x0001, 0x274b: 0x0001, + 0x274c: 0x0001, 0x274d: 0x0001, 0x274e: 0x0001, 0x274f: 0x0001, 0x2750: 0x0001, 0x2751: 0x0001, + 0x2752: 0x0001, 0x2753: 0x0001, 0x2754: 0x0001, 0x2755: 0x0001, 0x2756: 0x0001, 0x2757: 0x0001, + 0x2758: 0x0001, 0x2759: 0x0001, 0x275a: 0x0001, 0x275b: 0x0001, 0x275c: 0x0001, 0x275d: 0x0001, + 0x275e: 0x0001, 0x275f: 0x000a, 0x2760: 0x0001, 0x2761: 0x0001, 0x2762: 0x0001, 0x2763: 0x0001, + 0x2764: 0x0001, 0x2765: 0x0001, 0x2766: 0x0001, 0x2767: 0x0001, 0x2768: 0x0001, 0x2769: 0x0001, + 0x276a: 0x0001, 0x276b: 0x0001, 0x276c: 0x0001, 0x276d: 0x0001, 0x276e: 0x0001, 0x276f: 0x0001, + 0x2770: 0x0001, 0x2771: 0x0001, 0x2772: 0x0001, 0x2773: 0x0001, 0x2774: 0x0001, 0x2775: 0x0001, + 0x2776: 0x0001, 0x2777: 0x0001, 0x2778: 0x0001, 0x2779: 0x0001, 0x277a: 0x0001, 0x277b: 0x0001, + 0x277c: 0x0001, 0x277d: 0x0001, 0x277e: 0x0001, 0x277f: 0x0001, + // Block 0x9e, offset 0x2780 + 0x2780: 0x0001, 0x2781: 0x000c, 0x2782: 0x000c, 0x2783: 0x000c, 0x2784: 0x0001, 0x2785: 0x000c, + 0x2786: 0x000c, 0x2787: 0x0001, 0x2788: 0x0001, 0x2789: 0x0001, 0x278a: 0x0001, 0x278b: 0x0001, + 0x278c: 0x000c, 0x278d: 0x000c, 0x278e: 0x000c, 0x278f: 0x000c, 0x2790: 0x0001, 0x2791: 0x0001, + 0x2792: 0x0001, 0x2793: 0x0001, 0x2794: 0x0001, 0x2795: 0x0001, 0x2796: 0x0001, 0x2797: 0x0001, + 0x2798: 0x0001, 0x2799: 0x0001, 0x279a: 0x0001, 0x279b: 0x0001, 0x279c: 0x0001, 0x279d: 0x0001, + 0x279e: 0x0001, 0x279f: 0x0001, 0x27a0: 0x0001, 0x27a1: 0x0001, 0x27a2: 0x0001, 0x27a3: 0x0001, + 0x27a4: 0x0001, 0x27a5: 0x0001, 0x27a6: 0x0001, 0x27a7: 0x0001, 0x27a8: 0x0001, 0x27a9: 0x0001, + 0x27aa: 0x0001, 0x27ab: 0x0001, 0x27ac: 0x0001, 0x27ad: 0x0001, 0x27ae: 0x0001, 0x27af: 0x0001, + 0x27b0: 0x0001, 0x27b1: 0x0001, 0x27b2: 0x0001, 0x27b3: 0x0001, 0x27b4: 0x0001, 0x27b5: 0x0001, + 0x27b6: 0x0001, 0x27b7: 0x0001, 0x27b8: 0x000c, 0x27b9: 0x000c, 0x27ba: 0x000c, 0x27bb: 0x0001, + 0x27bc: 0x0001, 0x27bd: 0x0001, 0x27be: 0x0001, 0x27bf: 0x000c, + // Block 0x9f, offset 0x27c0 + 0x27c0: 0x0001, 0x27c1: 0x0001, 0x27c2: 0x0001, 0x27c3: 0x0001, 0x27c4: 0x0001, 0x27c5: 0x0001, + 0x27c6: 0x0001, 0x27c7: 0x0001, 0x27c8: 0x0001, 0x27c9: 0x0001, 0x27ca: 0x0001, 0x27cb: 0x0001, + 0x27cc: 0x0001, 0x27cd: 0x0001, 0x27ce: 0x0001, 0x27cf: 0x0001, 0x27d0: 0x0001, 0x27d1: 0x0001, + 0x27d2: 0x0001, 0x27d3: 0x0001, 0x27d4: 0x0001, 0x27d5: 0x0001, 0x27d6: 0x0001, 0x27d7: 0x0001, + 0x27d8: 0x0001, 0x27d9: 0x0001, 0x27da: 0x0001, 0x27db: 0x0001, 0x27dc: 0x0001, 0x27dd: 0x0001, + 0x27de: 0x0001, 0x27df: 0x0001, 0x27e0: 0x0001, 0x27e1: 0x0001, 0x27e2: 0x0001, 0x27e3: 0x0001, + 0x27e4: 0x0001, 0x27e5: 0x000c, 0x27e6: 0x000c, 0x27e7: 0x0001, 0x27e8: 0x0001, 0x27e9: 0x0001, + 0x27ea: 0x0001, 0x27eb: 0x0001, 0x27ec: 0x0001, 0x27ed: 0x0001, 0x27ee: 0x0001, 0x27ef: 0x0001, + 0x27f0: 0x0001, 0x27f1: 0x0001, 0x27f2: 0x0001, 0x27f3: 0x0001, 0x27f4: 0x0001, 0x27f5: 0x0001, + 0x27f6: 0x0001, 0x27f7: 0x0001, 0x27f8: 0x0001, 0x27f9: 0x0001, 0x27fa: 0x0001, 0x27fb: 0x0001, + 0x27fc: 0x0001, 0x27fd: 0x0001, 0x27fe: 0x0001, 0x27ff: 0x0001, + // Block 0xa0, offset 0x2800 + 0x2800: 0x0001, 0x2801: 0x0001, 0x2802: 0x0001, 0x2803: 0x0001, 0x2804: 0x0001, 0x2805: 0x0001, + 0x2806: 0x0001, 0x2807: 0x0001, 0x2808: 0x0001, 0x2809: 0x0001, 0x280a: 0x0001, 0x280b: 0x0001, + 0x280c: 0x0001, 0x280d: 0x0001, 0x280e: 0x0001, 0x280f: 0x0001, 0x2810: 0x0001, 0x2811: 0x0001, + 0x2812: 0x0001, 0x2813: 0x0001, 0x2814: 0x0001, 0x2815: 0x0001, 0x2816: 0x0001, 0x2817: 0x0001, + 0x2818: 0x0001, 0x2819: 0x0001, 0x281a: 0x0001, 0x281b: 0x0001, 0x281c: 0x0001, 0x281d: 0x0001, + 0x281e: 0x0001, 0x281f: 0x0001, 0x2820: 0x0001, 0x2821: 0x0001, 0x2822: 0x0001, 0x2823: 0x0001, + 0x2824: 0x0001, 0x2825: 0x0001, 0x2826: 0x0001, 0x2827: 0x0001, 0x2828: 0x0001, 0x2829: 0x0001, + 0x282a: 0x0001, 0x282b: 0x0001, 0x282c: 0x0001, 0x282d: 0x0001, 0x282e: 0x0001, 0x282f: 0x0001, + 0x2830: 0x0001, 0x2831: 0x0001, 0x2832: 0x0001, 0x2833: 0x0001, 0x2834: 0x0001, 0x2835: 0x0001, + 0x2836: 0x0001, 0x2837: 0x0001, 0x2838: 0x0001, 0x2839: 0x000a, 0x283a: 0x000a, 0x283b: 0x000a, + 0x283c: 0x000a, 0x283d: 0x000a, 0x283e: 0x000a, 0x283f: 0x000a, + // Block 0xa1, offset 0x2840 + 0x2840: 0x000d, 0x2841: 0x000d, 0x2842: 0x000d, 0x2843: 0x000d, 0x2844: 0x000d, 0x2845: 0x000d, + 0x2846: 0x000d, 0x2847: 0x000d, 0x2848: 0x000d, 0x2849: 0x000d, 0x284a: 0x000d, 0x284b: 0x000d, + 0x284c: 0x000d, 0x284d: 0x000d, 0x284e: 0x000d, 0x284f: 0x000d, 0x2850: 0x000d, 0x2851: 0x000d, + 0x2852: 0x000d, 0x2853: 0x000d, 0x2854: 0x000d, 0x2855: 0x000d, 0x2856: 0x000d, 0x2857: 0x000d, + 0x2858: 0x000d, 0x2859: 0x000d, 0x285a: 0x000d, 0x285b: 0x000d, 0x285c: 0x000d, 0x285d: 0x000d, + 0x285e: 0x000d, 0x285f: 0x000d, 0x2860: 0x000d, 0x2861: 0x000d, 0x2862: 0x000d, 0x2863: 0x000d, + 0x2864: 0x000c, 0x2865: 0x000c, 0x2866: 0x000c, 0x2867: 0x000c, 0x2868: 0x000d, 0x2869: 0x000d, + 0x286a: 0x000d, 0x286b: 0x000d, 0x286c: 0x000d, 0x286d: 0x000d, 0x286e: 0x000d, 0x286f: 0x000d, + 0x2870: 0x0005, 0x2871: 0x0005, 0x2872: 0x0005, 0x2873: 0x0005, 0x2874: 0x0005, 0x2875: 0x0005, + 0x2876: 0x0005, 0x2877: 0x0005, 0x2878: 0x0005, 0x2879: 0x0005, 0x287a: 0x000d, 0x287b: 0x000d, + 0x287c: 0x000d, 0x287d: 0x000d, 0x287e: 0x000d, 0x287f: 0x000d, + // Block 0xa2, offset 0x2880 + 0x2880: 0x0001, 0x2881: 0x0001, 0x2882: 0x0001, 0x2883: 0x0001, 0x2884: 0x0001, 0x2885: 0x0001, + 0x2886: 0x0001, 0x2887: 0x0001, 0x2888: 0x0001, 0x2889: 0x0001, 0x288a: 0x0001, 0x288b: 0x0001, + 0x288c: 0x0001, 0x288d: 0x0001, 0x288e: 0x0001, 0x288f: 0x0001, 0x2890: 0x0001, 0x2891: 0x0001, + 0x2892: 0x0001, 0x2893: 0x0001, 0x2894: 0x0001, 0x2895: 0x0001, 0x2896: 0x0001, 0x2897: 0x0001, + 0x2898: 0x0001, 0x2899: 0x0001, 0x289a: 0x0001, 0x289b: 0x0001, 0x289c: 0x0001, 0x289d: 0x0001, + 0x289e: 0x0001, 0x289f: 0x0001, 0x28a0: 0x0005, 0x28a1: 0x0005, 0x28a2: 0x0005, 0x28a3: 0x0005, + 0x28a4: 0x0005, 0x28a5: 0x0005, 0x28a6: 0x0005, 0x28a7: 0x0005, 0x28a8: 0x0005, 0x28a9: 0x0005, + 0x28aa: 0x0005, 0x28ab: 0x0005, 0x28ac: 0x0005, 0x28ad: 0x0005, 0x28ae: 0x0005, 0x28af: 0x0005, + 0x28b0: 0x0005, 0x28b1: 0x0005, 0x28b2: 0x0005, 0x28b3: 0x0005, 0x28b4: 0x0005, 0x28b5: 0x0005, + 0x28b6: 0x0005, 0x28b7: 0x0005, 0x28b8: 0x0005, 0x28b9: 0x0005, 0x28ba: 0x0005, 0x28bb: 0x0005, + 0x28bc: 0x0005, 0x28bd: 0x0005, 0x28be: 0x0005, 0x28bf: 0x0001, + // Block 0xa3, offset 0x28c0 + 0x28c0: 0x0001, 0x28c1: 0x0001, 0x28c2: 0x0001, 0x28c3: 0x0001, 0x28c4: 0x0001, 0x28c5: 0x0001, + 0x28c6: 0x0001, 0x28c7: 0x0001, 0x28c8: 0x0001, 0x28c9: 0x0001, 0x28ca: 0x0001, 0x28cb: 0x0001, + 0x28cc: 0x0001, 0x28cd: 0x0001, 0x28ce: 0x0001, 0x28cf: 0x0001, 0x28d0: 0x0001, 0x28d1: 0x0001, + 0x28d2: 0x0001, 0x28d3: 0x0001, 0x28d4: 0x0001, 0x28d5: 0x0001, 0x28d6: 0x0001, 0x28d7: 0x0001, + 0x28d8: 0x0001, 0x28d9: 0x0001, 0x28da: 0x0001, 0x28db: 0x0001, 0x28dc: 0x0001, 0x28dd: 0x0001, + 0x28de: 0x0001, 0x28df: 0x0001, 0x28e0: 0x0001, 0x28e1: 0x0001, 0x28e2: 0x0001, 0x28e3: 0x0001, + 0x28e4: 0x0001, 0x28e5: 0x0001, 0x28e6: 0x0001, 0x28e7: 0x0001, 0x28e8: 0x0001, 0x28e9: 0x0001, + 0x28ea: 0x0001, 0x28eb: 0x0001, 0x28ec: 0x0001, 0x28ed: 0x0001, 0x28ee: 0x0001, 0x28ef: 0x0001, + 0x28f0: 0x000d, 0x28f1: 0x000d, 0x28f2: 0x000d, 0x28f3: 0x000d, 0x28f4: 0x000d, 0x28f5: 0x000d, + 0x28f6: 0x000d, 0x28f7: 0x000d, 0x28f8: 0x000d, 0x28f9: 0x000d, 0x28fa: 0x000d, 0x28fb: 0x000d, + 0x28fc: 0x000d, 0x28fd: 0x000d, 0x28fe: 0x000d, 0x28ff: 0x000d, + // Block 0xa4, offset 0x2900 + 0x2900: 0x000d, 0x2901: 0x000d, 0x2902: 0x000d, 0x2903: 0x000d, 0x2904: 0x000d, 0x2905: 0x000d, + 0x2906: 0x000c, 0x2907: 0x000c, 0x2908: 0x000c, 0x2909: 0x000c, 0x290a: 0x000c, 0x290b: 0x000c, + 0x290c: 0x000c, 0x290d: 0x000c, 0x290e: 0x000c, 0x290f: 0x000c, 0x2910: 0x000c, 0x2911: 0x000d, + 0x2912: 0x000d, 0x2913: 0x000d, 0x2914: 0x000d, 0x2915: 0x000d, 0x2916: 0x000d, 0x2917: 0x000d, + 0x2918: 0x000d, 0x2919: 0x000d, 0x291a: 0x000d, 0x291b: 0x000d, 0x291c: 0x000d, 0x291d: 0x000d, + 0x291e: 0x000d, 0x291f: 0x000d, 0x2920: 0x000d, 0x2921: 0x000d, 0x2922: 0x000d, 0x2923: 0x000d, + 0x2924: 0x000d, 0x2925: 0x000d, 0x2926: 0x000d, 0x2927: 0x000d, 0x2928: 0x000d, 0x2929: 0x000d, + 0x292a: 0x000d, 0x292b: 0x000d, 0x292c: 0x000d, 0x292d: 0x000d, 0x292e: 0x000d, 0x292f: 0x000d, + 0x2930: 0x0001, 0x2931: 0x0001, 0x2932: 0x0001, 0x2933: 0x0001, 0x2934: 0x0001, 0x2935: 0x0001, + 0x2936: 0x0001, 0x2937: 0x0001, 0x2938: 0x0001, 0x2939: 0x0001, 0x293a: 0x0001, 0x293b: 0x0001, + 0x293c: 0x0001, 0x293d: 0x0001, 0x293e: 0x0001, 0x293f: 0x0001, + // Block 0xa5, offset 0x2940 + 0x2941: 0x000c, + 0x2978: 0x000c, 0x2979: 0x000c, 0x297a: 0x000c, 0x297b: 0x000c, + 0x297c: 0x000c, 0x297d: 0x000c, 0x297e: 0x000c, 0x297f: 0x000c, + // Block 0xa6, offset 0x2980 + 0x2980: 0x000c, 0x2981: 0x000c, 0x2982: 0x000c, 0x2983: 0x000c, 0x2984: 0x000c, 0x2985: 0x000c, + 0x2986: 0x000c, + 0x2992: 0x000a, 0x2993: 0x000a, 0x2994: 0x000a, 0x2995: 0x000a, 0x2996: 0x000a, 0x2997: 0x000a, + 0x2998: 0x000a, 0x2999: 0x000a, 0x299a: 0x000a, 0x299b: 0x000a, 0x299c: 0x000a, 0x299d: 0x000a, + 0x299e: 0x000a, 0x299f: 0x000a, 0x29a0: 0x000a, 0x29a1: 0x000a, 0x29a2: 0x000a, 0x29a3: 0x000a, + 0x29a4: 0x000a, 0x29a5: 0x000a, + 0x29bf: 0x000c, + // Block 0xa7, offset 0x29c0 + 0x29c0: 0x000c, 0x29c1: 0x000c, + 0x29f3: 0x000c, 0x29f4: 0x000c, 0x29f5: 0x000c, + 0x29f6: 0x000c, 0x29f9: 0x000c, 0x29fa: 0x000c, + // Block 0xa8, offset 0x2a00 + 0x2a00: 0x000c, 0x2a01: 0x000c, 0x2a02: 0x000c, + 0x2a27: 0x000c, 0x2a28: 0x000c, 0x2a29: 0x000c, + 0x2a2a: 0x000c, 0x2a2b: 0x000c, 0x2a2d: 0x000c, 0x2a2e: 0x000c, 0x2a2f: 0x000c, + 0x2a30: 0x000c, 0x2a31: 0x000c, 0x2a32: 0x000c, 0x2a33: 0x000c, 0x2a34: 0x000c, + // Block 0xa9, offset 0x2a40 + 0x2a73: 0x000c, + // Block 0xaa, offset 0x2a80 + 0x2a80: 0x000c, 0x2a81: 0x000c, + 0x2ab6: 0x000c, 0x2ab7: 0x000c, 0x2ab8: 0x000c, 0x2ab9: 0x000c, 0x2aba: 0x000c, 0x2abb: 0x000c, + 0x2abc: 0x000c, 0x2abd: 0x000c, 0x2abe: 0x000c, + // Block 0xab, offset 0x2ac0 + 0x2ac9: 0x000c, 0x2aca: 0x000c, 0x2acb: 0x000c, + 0x2acc: 0x000c, + // Block 0xac, offset 0x2b00 + 0x2b2f: 0x000c, + 0x2b30: 0x000c, 0x2b31: 0x000c, 0x2b34: 0x000c, + 0x2b36: 0x000c, 0x2b37: 0x000c, + 0x2b3e: 0x000c, + // Block 0xad, offset 0x2b40 + 0x2b5f: 0x000c, 0x2b63: 0x000c, + 0x2b64: 0x000c, 0x2b65: 0x000c, 0x2b66: 0x000c, 0x2b67: 0x000c, 0x2b68: 0x000c, 0x2b69: 0x000c, + 0x2b6a: 0x000c, + // Block 0xae, offset 0x2b80 + 0x2b80: 0x000c, + 0x2ba6: 0x000c, 0x2ba7: 0x000c, 0x2ba8: 0x000c, 0x2ba9: 0x000c, + 0x2baa: 0x000c, 0x2bab: 0x000c, 0x2bac: 0x000c, + 0x2bb0: 0x000c, 0x2bb1: 0x000c, 0x2bb2: 0x000c, 0x2bb3: 0x000c, 0x2bb4: 0x000c, + // Block 0xaf, offset 0x2bc0 + 0x2bf8: 0x000c, 0x2bf9: 0x000c, 0x2bfa: 0x000c, 0x2bfb: 0x000c, + 0x2bfc: 0x000c, 0x2bfd: 0x000c, 0x2bfe: 0x000c, 0x2bff: 0x000c, + // Block 0xb0, offset 0x2c00 + 0x2c02: 0x000c, 0x2c03: 0x000c, 0x2c04: 0x000c, + 0x2c06: 0x000c, + 0x2c1e: 0x000c, + // Block 0xb1, offset 0x2c40 + 0x2c73: 0x000c, 0x2c74: 0x000c, 0x2c75: 0x000c, + 0x2c76: 0x000c, 0x2c77: 0x000c, 0x2c78: 0x000c, 0x2c7a: 0x000c, + 0x2c7f: 0x000c, + // Block 0xb2, offset 0x2c80 + 0x2c80: 0x000c, 0x2c82: 0x000c, 0x2c83: 0x000c, + // Block 0xb3, offset 0x2cc0 + 0x2cf2: 0x000c, 0x2cf3: 0x000c, 0x2cf4: 0x000c, 0x2cf5: 0x000c, + 0x2cfc: 0x000c, 0x2cfd: 0x000c, 0x2cff: 0x000c, + // Block 0xb4, offset 0x2d00 + 0x2d00: 0x000c, + 0x2d1c: 0x000c, 0x2d1d: 0x000c, + // Block 0xb5, offset 0x2d40 + 0x2d73: 0x000c, 0x2d74: 0x000c, 0x2d75: 0x000c, + 0x2d76: 0x000c, 0x2d77: 0x000c, 0x2d78: 0x000c, 0x2d79: 0x000c, 0x2d7a: 0x000c, + 0x2d7d: 0x000c, 0x2d7f: 0x000c, + // Block 0xb6, offset 0x2d80 + 0x2d80: 0x000c, + 0x2da0: 0x000a, 0x2da1: 0x000a, 0x2da2: 0x000a, 0x2da3: 0x000a, + 0x2da4: 0x000a, 0x2da5: 0x000a, 0x2da6: 0x000a, 0x2da7: 0x000a, 0x2da8: 0x000a, 0x2da9: 0x000a, + 0x2daa: 0x000a, 0x2dab: 0x000a, 0x2dac: 0x000a, + // Block 0xb7, offset 0x2dc0 + 0x2deb: 0x000c, 0x2ded: 0x000c, + 0x2df0: 0x000c, 0x2df1: 0x000c, 0x2df2: 0x000c, 0x2df3: 0x000c, 0x2df4: 0x000c, 0x2df5: 0x000c, + 0x2df7: 0x000c, + // Block 0xb8, offset 0x2e00 + 0x2e1d: 0x000c, + 0x2e1e: 0x000c, 0x2e1f: 0x000c, 0x2e22: 0x000c, 0x2e23: 0x000c, + 0x2e24: 0x000c, 0x2e25: 0x000c, 0x2e27: 0x000c, 0x2e28: 0x000c, 0x2e29: 0x000c, + 0x2e2a: 0x000c, 0x2e2b: 0x000c, + // Block 0xb9, offset 0x2e40 + 0x2e6f: 0x000c, + 0x2e70: 0x000c, 0x2e71: 0x000c, 0x2e72: 0x000c, 0x2e73: 0x000c, 0x2e74: 0x000c, 0x2e75: 0x000c, + 0x2e76: 0x000c, 0x2e77: 0x000c, 0x2e79: 0x000c, 0x2e7a: 0x000c, + // Block 0xba, offset 0x2e80 + 0x2e81: 0x000c, 0x2e82: 0x000c, 0x2e83: 0x000c, 0x2e84: 0x000c, 0x2e85: 0x000c, + 0x2e86: 0x000c, 0x2e89: 0x000c, 0x2e8a: 0x000c, + 0x2eb3: 0x000c, 0x2eb4: 0x000c, 0x2eb5: 0x000c, + 0x2eb6: 0x000c, 0x2eb7: 0x000c, 0x2eb8: 0x000c, 0x2ebb: 0x000c, + 0x2ebc: 0x000c, 0x2ebd: 0x000c, 0x2ebe: 0x000c, + // Block 0xbb, offset 0x2ec0 + 0x2ec7: 0x000c, + 0x2ed1: 0x000c, + 0x2ed2: 0x000c, 0x2ed3: 0x000c, 0x2ed4: 0x000c, 0x2ed5: 0x000c, 0x2ed6: 0x000c, + 0x2ed9: 0x000c, 0x2eda: 0x000c, 0x2edb: 0x000c, + // Block 0xbc, offset 0x2f00 + 0x2f0a: 0x000c, 0x2f0b: 0x000c, + 0x2f0c: 0x000c, 0x2f0d: 0x000c, 0x2f0e: 0x000c, 0x2f0f: 0x000c, 0x2f10: 0x000c, 0x2f11: 0x000c, + 0x2f12: 0x000c, 0x2f13: 0x000c, 0x2f14: 0x000c, 0x2f15: 0x000c, 0x2f16: 0x000c, + 0x2f18: 0x000c, 0x2f19: 0x000c, + // Block 0xbd, offset 0x2f40 + 0x2f70: 0x000c, 0x2f71: 0x000c, 0x2f72: 0x000c, 0x2f73: 0x000c, 0x2f74: 0x000c, 0x2f75: 0x000c, + 0x2f76: 0x000c, 0x2f78: 0x000c, 0x2f79: 0x000c, 0x2f7a: 0x000c, 0x2f7b: 0x000c, + 0x2f7c: 0x000c, 0x2f7d: 0x000c, + // Block 0xbe, offset 0x2f80 + 0x2f92: 0x000c, 0x2f93: 0x000c, 0x2f94: 0x000c, 0x2f95: 0x000c, 0x2f96: 0x000c, 0x2f97: 0x000c, + 0x2f98: 0x000c, 0x2f99: 0x000c, 0x2f9a: 0x000c, 0x2f9b: 0x000c, 0x2f9c: 0x000c, 0x2f9d: 0x000c, + 0x2f9e: 0x000c, 0x2f9f: 0x000c, 0x2fa0: 0x000c, 0x2fa1: 0x000c, 0x2fa2: 0x000c, 0x2fa3: 0x000c, + 0x2fa4: 0x000c, 0x2fa5: 0x000c, 0x2fa6: 0x000c, 0x2fa7: 0x000c, + 0x2faa: 0x000c, 0x2fab: 0x000c, 0x2fac: 0x000c, 0x2fad: 0x000c, 0x2fae: 0x000c, 0x2faf: 0x000c, + 0x2fb0: 0x000c, 0x2fb2: 0x000c, 0x2fb3: 0x000c, 0x2fb5: 0x000c, + 0x2fb6: 0x000c, + // Block 0xbf, offset 0x2fc0 + 0x2ff1: 0x000c, 0x2ff2: 0x000c, 0x2ff3: 0x000c, 0x2ff4: 0x000c, 0x2ff5: 0x000c, + 0x2ff6: 0x000c, 0x2ffa: 0x000c, + 0x2ffc: 0x000c, 0x2ffd: 0x000c, 0x2fff: 0x000c, + // Block 0xc0, offset 0x3000 + 0x3000: 0x000c, 0x3001: 0x000c, 0x3002: 0x000c, 0x3003: 0x000c, 0x3004: 0x000c, 0x3005: 0x000c, + 0x3007: 0x000c, + // Block 0xc1, offset 0x3040 + 0x3050: 0x000c, 0x3051: 0x000c, + 0x3055: 0x000c, 0x3057: 0x000c, + // Block 0xc2, offset 0x3080 + 0x30b3: 0x000c, 0x30b4: 0x000c, + // Block 0xc3, offset 0x30c0 + 0x30f0: 0x000c, 0x30f1: 0x000c, 0x30f2: 0x000c, 0x30f3: 0x000c, 0x30f4: 0x000c, + // Block 0xc4, offset 0x3100 + 0x3130: 0x000c, 0x3131: 0x000c, 0x3132: 0x000c, 0x3133: 0x000c, 0x3134: 0x000c, 0x3135: 0x000c, + 0x3136: 0x000c, + // Block 0xc5, offset 0x3140 + 0x314f: 0x000c, 0x3150: 0x000c, 0x3151: 0x000c, + 0x3152: 0x000c, + // Block 0xc6, offset 0x3180 + 0x319d: 0x000c, + 0x319e: 0x000c, 0x31a0: 0x000b, 0x31a1: 0x000b, 0x31a2: 0x000b, 0x31a3: 0x000b, + // Block 0xc7, offset 0x31c0 + 0x31e7: 0x000c, 0x31e8: 0x000c, 0x31e9: 0x000c, + 0x31f3: 0x000b, 0x31f4: 0x000b, 0x31f5: 0x000b, + 0x31f6: 0x000b, 0x31f7: 0x000b, 0x31f8: 0x000b, 0x31f9: 0x000b, 0x31fa: 0x000b, 0x31fb: 0x000c, + 0x31fc: 0x000c, 0x31fd: 0x000c, 0x31fe: 0x000c, 0x31ff: 0x000c, + // Block 0xc8, offset 0x3200 + 0x3200: 0x000c, 0x3201: 0x000c, 0x3202: 0x000c, 0x3205: 0x000c, + 0x3206: 0x000c, 0x3207: 0x000c, 0x3208: 0x000c, 0x3209: 0x000c, 0x320a: 0x000c, 0x320b: 0x000c, + 0x322a: 0x000c, 0x322b: 0x000c, 0x322c: 0x000c, 0x322d: 0x000c, + // Block 0xc9, offset 0x3240 + 0x3240: 0x000a, 0x3241: 0x000a, 0x3242: 0x000c, 0x3243: 0x000c, 0x3244: 0x000c, 0x3245: 0x000a, + // Block 0xca, offset 0x3280 + 0x3280: 0x000a, 0x3281: 0x000a, 0x3282: 0x000a, 0x3283: 0x000a, 0x3284: 0x000a, 0x3285: 0x000a, + 0x3286: 0x000a, 0x3287: 0x000a, 0x3288: 0x000a, 0x3289: 0x000a, 0x328a: 0x000a, 0x328b: 0x000a, + 0x328c: 0x000a, 0x328d: 0x000a, 0x328e: 0x000a, 0x328f: 0x000a, 0x3290: 0x000a, 0x3291: 0x000a, + 0x3292: 0x000a, 0x3293: 0x000a, 0x3294: 0x000a, 0x3295: 0x000a, 0x3296: 0x000a, + // Block 0xcb, offset 0x32c0 + 0x32db: 0x000a, + // Block 0xcc, offset 0x3300 + 0x3315: 0x000a, + // Block 0xcd, offset 0x3340 + 0x334f: 0x000a, + // Block 0xce, offset 0x3380 + 0x3389: 0x000a, + // Block 0xcf, offset 0x33c0 + 0x33c3: 0x000a, + 0x33ce: 0x0002, 0x33cf: 0x0002, 0x33d0: 0x0002, 0x33d1: 0x0002, + 0x33d2: 0x0002, 0x33d3: 0x0002, 0x33d4: 0x0002, 0x33d5: 0x0002, 0x33d6: 0x0002, 0x33d7: 0x0002, + 0x33d8: 0x0002, 0x33d9: 0x0002, 0x33da: 0x0002, 0x33db: 0x0002, 0x33dc: 0x0002, 0x33dd: 0x0002, + 0x33de: 0x0002, 0x33df: 0x0002, 0x33e0: 0x0002, 0x33e1: 0x0002, 0x33e2: 0x0002, 0x33e3: 0x0002, + 0x33e4: 0x0002, 0x33e5: 0x0002, 0x33e6: 0x0002, 0x33e7: 0x0002, 0x33e8: 0x0002, 0x33e9: 0x0002, + 0x33ea: 0x0002, 0x33eb: 0x0002, 0x33ec: 0x0002, 0x33ed: 0x0002, 0x33ee: 0x0002, 0x33ef: 0x0002, + 0x33f0: 0x0002, 0x33f1: 0x0002, 0x33f2: 0x0002, 0x33f3: 0x0002, 0x33f4: 0x0002, 0x33f5: 0x0002, + 0x33f6: 0x0002, 0x33f7: 0x0002, 0x33f8: 0x0002, 0x33f9: 0x0002, 0x33fa: 0x0002, 0x33fb: 0x0002, + 0x33fc: 0x0002, 0x33fd: 0x0002, 0x33fe: 0x0002, 0x33ff: 0x0002, + // Block 0xd0, offset 0x3400 + 0x3400: 0x000c, 0x3401: 0x000c, 0x3402: 0x000c, 0x3403: 0x000c, 0x3404: 0x000c, 0x3405: 0x000c, + 0x3406: 0x000c, 0x3407: 0x000c, 0x3408: 0x000c, 0x3409: 0x000c, 0x340a: 0x000c, 0x340b: 0x000c, + 0x340c: 0x000c, 0x340d: 0x000c, 0x340e: 0x000c, 0x340f: 0x000c, 0x3410: 0x000c, 0x3411: 0x000c, + 0x3412: 0x000c, 0x3413: 0x000c, 0x3414: 0x000c, 0x3415: 0x000c, 0x3416: 0x000c, 0x3417: 0x000c, + 0x3418: 0x000c, 0x3419: 0x000c, 0x341a: 0x000c, 0x341b: 0x000c, 0x341c: 0x000c, 0x341d: 0x000c, + 0x341e: 0x000c, 0x341f: 0x000c, 0x3420: 0x000c, 0x3421: 0x000c, 0x3422: 0x000c, 0x3423: 0x000c, + 0x3424: 0x000c, 0x3425: 0x000c, 0x3426: 0x000c, 0x3427: 0x000c, 0x3428: 0x000c, 0x3429: 0x000c, + 0x342a: 0x000c, 0x342b: 0x000c, 0x342c: 0x000c, 0x342d: 0x000c, 0x342e: 0x000c, 0x342f: 0x000c, + 0x3430: 0x000c, 0x3431: 0x000c, 0x3432: 0x000c, 0x3433: 0x000c, 0x3434: 0x000c, 0x3435: 0x000c, + 0x3436: 0x000c, 0x343b: 0x000c, + 0x343c: 0x000c, 0x343d: 0x000c, 0x343e: 0x000c, 0x343f: 0x000c, + // Block 0xd1, offset 0x3440 + 0x3440: 0x000c, 0x3441: 0x000c, 0x3442: 0x000c, 0x3443: 0x000c, 0x3444: 0x000c, 0x3445: 0x000c, + 0x3446: 0x000c, 0x3447: 0x000c, 0x3448: 0x000c, 0x3449: 0x000c, 0x344a: 0x000c, 0x344b: 0x000c, + 0x344c: 0x000c, 0x344d: 0x000c, 0x344e: 0x000c, 0x344f: 0x000c, 0x3450: 0x000c, 0x3451: 0x000c, + 0x3452: 0x000c, 0x3453: 0x000c, 0x3454: 0x000c, 0x3455: 0x000c, 0x3456: 0x000c, 0x3457: 0x000c, + 0x3458: 0x000c, 0x3459: 0x000c, 0x345a: 0x000c, 0x345b: 0x000c, 0x345c: 0x000c, 0x345d: 0x000c, + 0x345e: 0x000c, 0x345f: 0x000c, 0x3460: 0x000c, 0x3461: 0x000c, 0x3462: 0x000c, 0x3463: 0x000c, + 0x3464: 0x000c, 0x3465: 0x000c, 0x3466: 0x000c, 0x3467: 0x000c, 0x3468: 0x000c, 0x3469: 0x000c, + 0x346a: 0x000c, 0x346b: 0x000c, 0x346c: 0x000c, + 0x3475: 0x000c, + // Block 0xd2, offset 0x3480 + 0x3484: 0x000c, + 0x349b: 0x000c, 0x349c: 0x000c, 0x349d: 0x000c, + 0x349e: 0x000c, 0x349f: 0x000c, 0x34a1: 0x000c, 0x34a2: 0x000c, 0x34a3: 0x000c, + 0x34a4: 0x000c, 0x34a5: 0x000c, 0x34a6: 0x000c, 0x34a7: 0x000c, 0x34a8: 0x000c, 0x34a9: 0x000c, + 0x34aa: 0x000c, 0x34ab: 0x000c, 0x34ac: 0x000c, 0x34ad: 0x000c, 0x34ae: 0x000c, 0x34af: 0x000c, + // Block 0xd3, offset 0x34c0 + 0x34c0: 0x000c, 0x34c1: 0x000c, 0x34c2: 0x000c, 0x34c3: 0x000c, 0x34c4: 0x000c, 0x34c5: 0x000c, + 0x34c6: 0x000c, 0x34c8: 0x000c, 0x34c9: 0x000c, 0x34ca: 0x000c, 0x34cb: 0x000c, + 0x34cc: 0x000c, 0x34cd: 0x000c, 0x34ce: 0x000c, 0x34cf: 0x000c, 0x34d0: 0x000c, 0x34d1: 0x000c, + 0x34d2: 0x000c, 0x34d3: 0x000c, 0x34d4: 0x000c, 0x34d5: 0x000c, 0x34d6: 0x000c, 0x34d7: 0x000c, + 0x34d8: 0x000c, 0x34db: 0x000c, 0x34dc: 0x000c, 0x34dd: 0x000c, + 0x34de: 0x000c, 0x34df: 0x000c, 0x34e0: 0x000c, 0x34e1: 0x000c, 0x34e3: 0x000c, + 0x34e4: 0x000c, 0x34e6: 0x000c, 0x34e7: 0x000c, 0x34e8: 0x000c, 0x34e9: 0x000c, + 0x34ea: 0x000c, + // Block 0xd4, offset 0x3500 + 0x3500: 0x0001, 0x3501: 0x0001, 0x3502: 0x0001, 0x3503: 0x0001, 0x3504: 0x0001, 0x3505: 0x0001, + 0x3506: 0x0001, 0x3507: 0x0001, 0x3508: 0x0001, 0x3509: 0x0001, 0x350a: 0x0001, 0x350b: 0x0001, + 0x350c: 0x0001, 0x350d: 0x0001, 0x350e: 0x0001, 0x350f: 0x0001, 0x3510: 0x000c, 0x3511: 0x000c, + 0x3512: 0x000c, 0x3513: 0x000c, 0x3514: 0x000c, 0x3515: 0x000c, 0x3516: 0x000c, 0x3517: 0x0001, + 0x3518: 0x0001, 0x3519: 0x0001, 0x351a: 0x0001, 0x351b: 0x0001, 0x351c: 0x0001, 0x351d: 0x0001, + 0x351e: 0x0001, 0x351f: 0x0001, 0x3520: 0x0001, 0x3521: 0x0001, 0x3522: 0x0001, 0x3523: 0x0001, + 0x3524: 0x0001, 0x3525: 0x0001, 0x3526: 0x0001, 0x3527: 0x0001, 0x3528: 0x0001, 0x3529: 0x0001, + 0x352a: 0x0001, 0x352b: 0x0001, 0x352c: 0x0001, 0x352d: 0x0001, 0x352e: 0x0001, 0x352f: 0x0001, + 0x3530: 0x0001, 0x3531: 0x0001, 0x3532: 0x0001, 0x3533: 0x0001, 0x3534: 0x0001, 0x3535: 0x0001, + 0x3536: 0x0001, 0x3537: 0x0001, 0x3538: 0x0001, 0x3539: 0x0001, 0x353a: 0x0001, 0x353b: 0x0001, + 0x353c: 0x0001, 0x353d: 0x0001, 0x353e: 0x0001, 0x353f: 0x0001, + // Block 0xd5, offset 0x3540 + 0x3540: 0x0001, 0x3541: 0x0001, 0x3542: 0x0001, 0x3543: 0x0001, 0x3544: 0x000c, 0x3545: 0x000c, + 0x3546: 0x000c, 0x3547: 0x000c, 0x3548: 0x000c, 0x3549: 0x000c, 0x354a: 0x000c, 0x354b: 0x0001, + 0x354c: 0x0001, 0x354d: 0x0001, 0x354e: 0x0001, 0x354f: 0x0001, 0x3550: 0x0001, 0x3551: 0x0001, + 0x3552: 0x0001, 0x3553: 0x0001, 0x3554: 0x0001, 0x3555: 0x0001, 0x3556: 0x0001, 0x3557: 0x0001, + 0x3558: 0x0001, 0x3559: 0x0001, 0x355a: 0x0001, 0x355b: 0x0001, 0x355c: 0x0001, 0x355d: 0x0001, + 0x355e: 0x0001, 0x355f: 0x0001, 0x3560: 0x0001, 0x3561: 0x0001, 0x3562: 0x0001, 0x3563: 0x0001, + 0x3564: 0x0001, 0x3565: 0x0001, 0x3566: 0x0001, 0x3567: 0x0001, 0x3568: 0x0001, 0x3569: 0x0001, + 0x356a: 0x0001, 0x356b: 0x0001, 0x356c: 0x0001, 0x356d: 0x0001, 0x356e: 0x0001, 0x356f: 0x0001, + 0x3570: 0x0001, 0x3571: 0x0001, 0x3572: 0x0001, 0x3573: 0x0001, 0x3574: 0x0001, 0x3575: 0x0001, + 0x3576: 0x0001, 0x3577: 0x0001, 0x3578: 0x0001, 0x3579: 0x0001, 0x357a: 0x0001, 0x357b: 0x0001, + 0x357c: 0x0001, 0x357d: 0x0001, 0x357e: 0x0001, 0x357f: 0x0001, + // Block 0xd6, offset 0x3580 + 0x3580: 0x000d, 0x3581: 0x000d, 0x3582: 0x000d, 0x3583: 0x000d, 0x3584: 0x000d, 0x3585: 0x000d, + 0x3586: 0x000d, 0x3587: 0x000d, 0x3588: 0x000d, 0x3589: 0x000d, 0x358a: 0x000d, 0x358b: 0x000d, + 0x358c: 0x000d, 0x358d: 0x000d, 0x358e: 0x000d, 0x358f: 0x000d, 0x3590: 0x000d, 0x3591: 0x000d, + 0x3592: 0x000d, 0x3593: 0x000d, 0x3594: 0x000d, 0x3595: 0x000d, 0x3596: 0x000d, 0x3597: 0x000d, + 0x3598: 0x000d, 0x3599: 0x000d, 0x359a: 0x000d, 0x359b: 0x000d, 0x359c: 0x000d, 0x359d: 0x000d, + 0x359e: 0x000d, 0x359f: 0x000d, 0x35a0: 0x000d, 0x35a1: 0x000d, 0x35a2: 0x000d, 0x35a3: 0x000d, + 0x35a4: 0x000d, 0x35a5: 0x000d, 0x35a6: 0x000d, 0x35a7: 0x000d, 0x35a8: 0x000d, 0x35a9: 0x000d, + 0x35aa: 0x000d, 0x35ab: 0x000d, 0x35ac: 0x000d, 0x35ad: 0x000d, 0x35ae: 0x000d, 0x35af: 0x000d, + 0x35b0: 0x000a, 0x35b1: 0x000a, 0x35b2: 0x000d, 0x35b3: 0x000d, 0x35b4: 0x000d, 0x35b5: 0x000d, + 0x35b6: 0x000d, 0x35b7: 0x000d, 0x35b8: 0x000d, 0x35b9: 0x000d, 0x35ba: 0x000d, 0x35bb: 0x000d, + 0x35bc: 0x000d, 0x35bd: 0x000d, 0x35be: 0x000d, 0x35bf: 0x000d, + // Block 0xd7, offset 0x35c0 + 0x35c0: 0x000a, 0x35c1: 0x000a, 0x35c2: 0x000a, 0x35c3: 0x000a, 0x35c4: 0x000a, 0x35c5: 0x000a, + 0x35c6: 0x000a, 0x35c7: 0x000a, 0x35c8: 0x000a, 0x35c9: 0x000a, 0x35ca: 0x000a, 0x35cb: 0x000a, + 0x35cc: 0x000a, 0x35cd: 0x000a, 0x35ce: 0x000a, 0x35cf: 0x000a, 0x35d0: 0x000a, 0x35d1: 0x000a, + 0x35d2: 0x000a, 0x35d3: 0x000a, 0x35d4: 0x000a, 0x35d5: 0x000a, 0x35d6: 0x000a, 0x35d7: 0x000a, + 0x35d8: 0x000a, 0x35d9: 0x000a, 0x35da: 0x000a, 0x35db: 0x000a, 0x35dc: 0x000a, 0x35dd: 0x000a, + 0x35de: 0x000a, 0x35df: 0x000a, 0x35e0: 0x000a, 0x35e1: 0x000a, 0x35e2: 0x000a, 0x35e3: 0x000a, + 0x35e4: 0x000a, 0x35e5: 0x000a, 0x35e6: 0x000a, 0x35e7: 0x000a, 0x35e8: 0x000a, 0x35e9: 0x000a, + 0x35ea: 0x000a, 0x35eb: 0x000a, + 0x35f0: 0x000a, 0x35f1: 0x000a, 0x35f2: 0x000a, 0x35f3: 0x000a, 0x35f4: 0x000a, 0x35f5: 0x000a, + 0x35f6: 0x000a, 0x35f7: 0x000a, 0x35f8: 0x000a, 0x35f9: 0x000a, 0x35fa: 0x000a, 0x35fb: 0x000a, + 0x35fc: 0x000a, 0x35fd: 0x000a, 0x35fe: 0x000a, 0x35ff: 0x000a, + // Block 0xd8, offset 0x3600 + 0x3600: 0x000a, 0x3601: 0x000a, 0x3602: 0x000a, 0x3603: 0x000a, 0x3604: 0x000a, 0x3605: 0x000a, + 0x3606: 0x000a, 0x3607: 0x000a, 0x3608: 0x000a, 0x3609: 0x000a, 0x360a: 0x000a, 0x360b: 0x000a, + 0x360c: 0x000a, 0x360d: 0x000a, 0x360e: 0x000a, 0x360f: 0x000a, 0x3610: 0x000a, 0x3611: 0x000a, + 0x3612: 0x000a, 0x3613: 0x000a, + 0x3620: 0x000a, 0x3621: 0x000a, 0x3622: 0x000a, 0x3623: 0x000a, + 0x3624: 0x000a, 0x3625: 0x000a, 0x3626: 0x000a, 0x3627: 0x000a, 0x3628: 0x000a, 0x3629: 0x000a, + 0x362a: 0x000a, 0x362b: 0x000a, 0x362c: 0x000a, 0x362d: 0x000a, 0x362e: 0x000a, + 0x3631: 0x000a, 0x3632: 0x000a, 0x3633: 0x000a, 0x3634: 0x000a, 0x3635: 0x000a, + 0x3636: 0x000a, 0x3637: 0x000a, 0x3638: 0x000a, 0x3639: 0x000a, 0x363a: 0x000a, 0x363b: 0x000a, + 0x363c: 0x000a, 0x363d: 0x000a, 0x363e: 0x000a, 0x363f: 0x000a, + // Block 0xd9, offset 0x3640 + 0x3641: 0x000a, 0x3642: 0x000a, 0x3643: 0x000a, 0x3644: 0x000a, 0x3645: 0x000a, + 0x3646: 0x000a, 0x3647: 0x000a, 0x3648: 0x000a, 0x3649: 0x000a, 0x364a: 0x000a, 0x364b: 0x000a, + 0x364c: 0x000a, 0x364d: 0x000a, 0x364e: 0x000a, 0x364f: 0x000a, 0x3651: 0x000a, + 0x3652: 0x000a, 0x3653: 0x000a, 0x3654: 0x000a, 0x3655: 0x000a, 0x3656: 0x000a, 0x3657: 0x000a, + 0x3658: 0x000a, 0x3659: 0x000a, 0x365a: 0x000a, 0x365b: 0x000a, 0x365c: 0x000a, 0x365d: 0x000a, + 0x365e: 0x000a, 0x365f: 0x000a, 0x3660: 0x000a, 0x3661: 0x000a, 0x3662: 0x000a, 0x3663: 0x000a, + 0x3664: 0x000a, 0x3665: 0x000a, 0x3666: 0x000a, 0x3667: 0x000a, 0x3668: 0x000a, 0x3669: 0x000a, + 0x366a: 0x000a, 0x366b: 0x000a, 0x366c: 0x000a, 0x366d: 0x000a, 0x366e: 0x000a, 0x366f: 0x000a, + 0x3670: 0x000a, 0x3671: 0x000a, 0x3672: 0x000a, 0x3673: 0x000a, 0x3674: 0x000a, 0x3675: 0x000a, + // Block 0xda, offset 0x3680 + 0x3680: 0x0002, 0x3681: 0x0002, 0x3682: 0x0002, 0x3683: 0x0002, 0x3684: 0x0002, 0x3685: 0x0002, + 0x3686: 0x0002, 0x3687: 0x0002, 0x3688: 0x0002, 0x3689: 0x0002, 0x368a: 0x0002, 0x368b: 0x000a, + 0x368c: 0x000a, + 0x36af: 0x000a, + // Block 0xdb, offset 0x36c0 + 0x36ea: 0x000a, 0x36eb: 0x000a, + // Block 0xdc, offset 0x3700 + 0x3720: 0x000a, 0x3721: 0x000a, 0x3722: 0x000a, 0x3723: 0x000a, + 0x3724: 0x000a, 0x3725: 0x000a, + // Block 0xdd, offset 0x3740 + 0x3740: 0x000a, 0x3741: 0x000a, 0x3742: 0x000a, 0x3743: 0x000a, 0x3744: 0x000a, 0x3745: 0x000a, + 0x3746: 0x000a, 0x3747: 0x000a, 0x3748: 0x000a, 0x3749: 0x000a, 0x374a: 0x000a, 0x374b: 0x000a, + 0x374c: 0x000a, 0x374d: 0x000a, 0x374e: 0x000a, 0x374f: 0x000a, 0x3750: 0x000a, 0x3751: 0x000a, + 0x3752: 0x000a, 0x3753: 0x000a, 0x3754: 0x000a, + 0x3760: 0x000a, 0x3761: 0x000a, 0x3762: 0x000a, 0x3763: 0x000a, + 0x3764: 0x000a, 0x3765: 0x000a, 0x3766: 0x000a, 0x3767: 0x000a, 0x3768: 0x000a, 0x3769: 0x000a, + 0x376a: 0x000a, 0x376b: 0x000a, 0x376c: 0x000a, + 0x3770: 0x000a, 0x3771: 0x000a, 0x3772: 0x000a, 0x3773: 0x000a, 0x3774: 0x000a, 0x3775: 0x000a, + 0x3776: 0x000a, 0x3777: 0x000a, 0x3778: 0x000a, 0x3779: 0x000a, + // Block 0xde, offset 0x3780 + 0x3780: 0x000a, 0x3781: 0x000a, 0x3782: 0x000a, 0x3783: 0x000a, 0x3784: 0x000a, 0x3785: 0x000a, + 0x3786: 0x000a, 0x3787: 0x000a, 0x3788: 0x000a, 0x3789: 0x000a, 0x378a: 0x000a, 0x378b: 0x000a, + 0x378c: 0x000a, 0x378d: 0x000a, 0x378e: 0x000a, 0x378f: 0x000a, 0x3790: 0x000a, 0x3791: 0x000a, + 0x3792: 0x000a, 0x3793: 0x000a, 0x3794: 0x000a, 0x3795: 0x000a, 0x3796: 0x000a, 0x3797: 0x000a, + 0x3798: 0x000a, + // Block 0xdf, offset 0x37c0 + 0x37c0: 0x000a, 0x37c1: 0x000a, 0x37c2: 0x000a, 0x37c3: 0x000a, 0x37c4: 0x000a, 0x37c5: 0x000a, + 0x37c6: 0x000a, 0x37c7: 0x000a, 0x37c8: 0x000a, 0x37c9: 0x000a, 0x37ca: 0x000a, 0x37cb: 0x000a, + 0x37d0: 0x000a, 0x37d1: 0x000a, + 0x37d2: 0x000a, 0x37d3: 0x000a, 0x37d4: 0x000a, 0x37d5: 0x000a, 0x37d6: 0x000a, 0x37d7: 0x000a, + 0x37d8: 0x000a, 0x37d9: 0x000a, 0x37da: 0x000a, 0x37db: 0x000a, 0x37dc: 0x000a, 0x37dd: 0x000a, + 0x37de: 0x000a, 0x37df: 0x000a, 0x37e0: 0x000a, 0x37e1: 0x000a, 0x37e2: 0x000a, 0x37e3: 0x000a, + 0x37e4: 0x000a, 0x37e5: 0x000a, 0x37e6: 0x000a, 0x37e7: 0x000a, 0x37e8: 0x000a, 0x37e9: 0x000a, + 0x37ea: 0x000a, 0x37eb: 0x000a, 0x37ec: 0x000a, 0x37ed: 0x000a, 0x37ee: 0x000a, 0x37ef: 0x000a, + 0x37f0: 0x000a, 0x37f1: 0x000a, 0x37f2: 0x000a, 0x37f3: 0x000a, 0x37f4: 0x000a, 0x37f5: 0x000a, + 0x37f6: 0x000a, 0x37f7: 0x000a, 0x37f8: 0x000a, 0x37f9: 0x000a, 0x37fa: 0x000a, 0x37fb: 0x000a, + 0x37fc: 0x000a, 0x37fd: 0x000a, 0x37fe: 0x000a, 0x37ff: 0x000a, + // Block 0xe0, offset 0x3800 + 0x3800: 0x000a, 0x3801: 0x000a, 0x3802: 0x000a, 0x3803: 0x000a, 0x3804: 0x000a, 0x3805: 0x000a, + 0x3806: 0x000a, 0x3807: 0x000a, + 0x3810: 0x000a, 0x3811: 0x000a, + 0x3812: 0x000a, 0x3813: 0x000a, 0x3814: 0x000a, 0x3815: 0x000a, 0x3816: 0x000a, 0x3817: 0x000a, + 0x3818: 0x000a, 0x3819: 0x000a, + 0x3820: 0x000a, 0x3821: 0x000a, 0x3822: 0x000a, 0x3823: 0x000a, + 0x3824: 0x000a, 0x3825: 0x000a, 0x3826: 0x000a, 0x3827: 0x000a, 0x3828: 0x000a, 0x3829: 0x000a, + 0x382a: 0x000a, 0x382b: 0x000a, 0x382c: 0x000a, 0x382d: 0x000a, 0x382e: 0x000a, 0x382f: 0x000a, + 0x3830: 0x000a, 0x3831: 0x000a, 0x3832: 0x000a, 0x3833: 0x000a, 0x3834: 0x000a, 0x3835: 0x000a, + 0x3836: 0x000a, 0x3837: 0x000a, 0x3838: 0x000a, 0x3839: 0x000a, 0x383a: 0x000a, 0x383b: 0x000a, + 0x383c: 0x000a, 0x383d: 0x000a, 0x383e: 0x000a, 0x383f: 0x000a, + // Block 0xe1, offset 0x3840 + 0x3840: 0x000a, 0x3841: 0x000a, 0x3842: 0x000a, 0x3843: 0x000a, 0x3844: 0x000a, 0x3845: 0x000a, + 0x3846: 0x000a, 0x3847: 0x000a, + 0x3850: 0x000a, 0x3851: 0x000a, + 0x3852: 0x000a, 0x3853: 0x000a, 0x3854: 0x000a, 0x3855: 0x000a, 0x3856: 0x000a, 0x3857: 0x000a, + 0x3858: 0x000a, 0x3859: 0x000a, 0x385a: 0x000a, 0x385b: 0x000a, 0x385c: 0x000a, 0x385d: 0x000a, + 0x385e: 0x000a, 0x385f: 0x000a, 0x3860: 0x000a, 0x3861: 0x000a, 0x3862: 0x000a, 0x3863: 0x000a, + 0x3864: 0x000a, 0x3865: 0x000a, 0x3866: 0x000a, 0x3867: 0x000a, 0x3868: 0x000a, 0x3869: 0x000a, + 0x386a: 0x000a, 0x386b: 0x000a, 0x386c: 0x000a, 0x386d: 0x000a, + // Block 0xe2, offset 0x3880 + 0x3880: 0x000a, 0x3881: 0x000a, 0x3882: 0x000a, 0x3883: 0x000a, 0x3884: 0x000a, 0x3885: 0x000a, + 0x3886: 0x000a, 0x3887: 0x000a, 0x3888: 0x000a, 0x3889: 0x000a, 0x388a: 0x000a, 0x388b: 0x000a, + 0x3890: 0x000a, 0x3891: 0x000a, + 0x3892: 0x000a, 0x3893: 0x000a, 0x3894: 0x000a, 0x3895: 0x000a, 0x3896: 0x000a, 0x3897: 0x000a, + 0x3898: 0x000a, 0x3899: 0x000a, 0x389a: 0x000a, 0x389b: 0x000a, 0x389c: 0x000a, 0x389d: 0x000a, + 0x389e: 0x000a, 0x389f: 0x000a, 0x38a0: 0x000a, 0x38a1: 0x000a, 0x38a2: 0x000a, 0x38a3: 0x000a, + 0x38a4: 0x000a, 0x38a5: 0x000a, 0x38a6: 0x000a, 0x38a7: 0x000a, 0x38a8: 0x000a, 0x38a9: 0x000a, + 0x38aa: 0x000a, 0x38ab: 0x000a, 0x38ac: 0x000a, 0x38ad: 0x000a, 0x38ae: 0x000a, 0x38af: 0x000a, + 0x38b0: 0x000a, 0x38b1: 0x000a, 0x38b2: 0x000a, 0x38b3: 0x000a, 0x38b4: 0x000a, 0x38b5: 0x000a, + 0x38b6: 0x000a, 0x38b7: 0x000a, 0x38b8: 0x000a, 0x38b9: 0x000a, 0x38ba: 0x000a, 0x38bb: 0x000a, + 0x38bc: 0x000a, 0x38bd: 0x000a, 0x38be: 0x000a, + // Block 0xe3, offset 0x38c0 + 0x38c0: 0x000a, 0x38c1: 0x000a, 0x38c2: 0x000a, 0x38c3: 0x000a, 0x38c4: 0x000a, 0x38c5: 0x000a, + 0x38c6: 0x000a, 0x38c7: 0x000a, 0x38c8: 0x000a, 0x38c9: 0x000a, 0x38ca: 0x000a, 0x38cb: 0x000a, + 0x38cc: 0x000a, 0x38cd: 0x000a, 0x38ce: 0x000a, 0x38cf: 0x000a, 0x38d0: 0x000a, 0x38d1: 0x000a, + 0x38d2: 0x000a, 0x38d3: 0x000a, 0x38d4: 0x000a, 0x38d5: 0x000a, 0x38d6: 0x000a, 0x38d7: 0x000a, + 0x38d8: 0x000a, 0x38d9: 0x000a, 0x38da: 0x000a, 0x38db: 0x000a, 0x38dc: 0x000a, 0x38dd: 0x000a, + 0x38de: 0x000a, 0x38df: 0x000a, 0x38e0: 0x000a, 0x38e1: 0x000a, 0x38e2: 0x000a, 0x38e3: 0x000a, + 0x38e4: 0x000a, 0x38e5: 0x000a, 0x38e6: 0x000a, 0x38e7: 0x000a, 0x38e8: 0x000a, 0x38e9: 0x000a, + 0x38ea: 0x000a, 0x38eb: 0x000a, 0x38ec: 0x000a, 0x38ed: 0x000a, 0x38ee: 0x000a, 0x38ef: 0x000a, + 0x38f0: 0x000a, 0x38f3: 0x000a, 0x38f4: 0x000a, 0x38f5: 0x000a, + 0x38f6: 0x000a, 0x38fa: 0x000a, + 0x38fc: 0x000a, 0x38fd: 0x000a, 0x38fe: 0x000a, 0x38ff: 0x000a, + // Block 0xe4, offset 0x3900 + 0x3900: 0x000a, 0x3901: 0x000a, 0x3902: 0x000a, 0x3903: 0x000a, 0x3904: 0x000a, 0x3905: 0x000a, + 0x3906: 0x000a, 0x3907: 0x000a, 0x3908: 0x000a, 0x3909: 0x000a, 0x390a: 0x000a, 0x390b: 0x000a, + 0x390c: 0x000a, 0x390d: 0x000a, 0x390e: 0x000a, 0x390f: 0x000a, 0x3910: 0x000a, 0x3911: 0x000a, + 0x3912: 0x000a, 0x3913: 0x000a, 0x3914: 0x000a, 0x3915: 0x000a, 0x3916: 0x000a, 0x3917: 0x000a, + 0x3918: 0x000a, 0x3919: 0x000a, 0x391a: 0x000a, 0x391b: 0x000a, 0x391c: 0x000a, 0x391d: 0x000a, + 0x391e: 0x000a, 0x391f: 0x000a, 0x3920: 0x000a, 0x3921: 0x000a, 0x3922: 0x000a, + 0x3930: 0x000a, 0x3931: 0x000a, 0x3932: 0x000a, 0x3933: 0x000a, 0x3934: 0x000a, 0x3935: 0x000a, + 0x3936: 0x000a, 0x3937: 0x000a, 0x3938: 0x000a, 0x3939: 0x000a, + // Block 0xe5, offset 0x3940 + 0x3940: 0x000a, 0x3941: 0x000a, 0x3942: 0x000a, + 0x3950: 0x000a, 0x3951: 0x000a, + 0x3952: 0x000a, 0x3953: 0x000a, 0x3954: 0x000a, 0x3955: 0x000a, 0x3956: 0x000a, 0x3957: 0x000a, + 0x3958: 0x000a, 0x3959: 0x000a, 0x395a: 0x000a, 0x395b: 0x000a, 0x395c: 0x000a, 0x395d: 0x000a, + 0x395e: 0x000a, 0x395f: 0x000a, 0x3960: 0x000a, 0x3961: 0x000a, 0x3962: 0x000a, 0x3963: 0x000a, + 0x3964: 0x000a, 0x3965: 0x000a, 0x3966: 0x000a, 0x3967: 0x000a, 0x3968: 0x000a, 0x3969: 0x000a, + 0x396a: 0x000a, 0x396b: 0x000a, 0x396c: 0x000a, 0x396d: 0x000a, 0x396e: 0x000a, 0x396f: 0x000a, + 0x3970: 0x000a, 0x3971: 0x000a, 0x3972: 0x000a, 0x3973: 0x000a, 0x3974: 0x000a, 0x3975: 0x000a, + 0x3976: 0x000a, 0x3977: 0x000a, 0x3978: 0x000a, 0x3979: 0x000a, 0x397a: 0x000a, 0x397b: 0x000a, + 0x397c: 0x000a, 0x397d: 0x000a, 0x397e: 0x000a, 0x397f: 0x000a, + // Block 0xe6, offset 0x3980 + 0x39a0: 0x000a, 0x39a1: 0x000a, 0x39a2: 0x000a, 0x39a3: 0x000a, + 0x39a4: 0x000a, 0x39a5: 0x000a, 0x39a6: 0x000a, 0x39a7: 0x000a, 0x39a8: 0x000a, 0x39a9: 0x000a, + 0x39aa: 0x000a, 0x39ab: 0x000a, 0x39ac: 0x000a, 0x39ad: 0x000a, + // Block 0xe7, offset 0x39c0 + 0x39fe: 0x000b, 0x39ff: 0x000b, + // Block 0xe8, offset 0x3a00 + 0x3a00: 0x000b, 0x3a01: 0x000b, 0x3a02: 0x000b, 0x3a03: 0x000b, 0x3a04: 0x000b, 0x3a05: 0x000b, + 0x3a06: 0x000b, 0x3a07: 0x000b, 0x3a08: 0x000b, 0x3a09: 0x000b, 0x3a0a: 0x000b, 0x3a0b: 0x000b, + 0x3a0c: 0x000b, 0x3a0d: 0x000b, 0x3a0e: 0x000b, 0x3a0f: 0x000b, 0x3a10: 0x000b, 0x3a11: 0x000b, + 0x3a12: 0x000b, 0x3a13: 0x000b, 0x3a14: 0x000b, 0x3a15: 0x000b, 0x3a16: 0x000b, 0x3a17: 0x000b, + 0x3a18: 0x000b, 0x3a19: 0x000b, 0x3a1a: 0x000b, 0x3a1b: 0x000b, 0x3a1c: 0x000b, 0x3a1d: 0x000b, + 0x3a1e: 0x000b, 0x3a1f: 0x000b, 0x3a20: 0x000b, 0x3a21: 0x000b, 0x3a22: 0x000b, 0x3a23: 0x000b, + 0x3a24: 0x000b, 0x3a25: 0x000b, 0x3a26: 0x000b, 0x3a27: 0x000b, 0x3a28: 0x000b, 0x3a29: 0x000b, + 0x3a2a: 0x000b, 0x3a2b: 0x000b, 0x3a2c: 0x000b, 0x3a2d: 0x000b, 0x3a2e: 0x000b, 0x3a2f: 0x000b, + 0x3a30: 0x000b, 0x3a31: 0x000b, 0x3a32: 0x000b, 0x3a33: 0x000b, 0x3a34: 0x000b, 0x3a35: 0x000b, + 0x3a36: 0x000b, 0x3a37: 0x000b, 0x3a38: 0x000b, 0x3a39: 0x000b, 0x3a3a: 0x000b, 0x3a3b: 0x000b, + 0x3a3c: 0x000b, 0x3a3d: 0x000b, 0x3a3e: 0x000b, 0x3a3f: 0x000b, + // Block 0xe9, offset 0x3a40 + 0x3a40: 0x000c, 0x3a41: 0x000c, 0x3a42: 0x000c, 0x3a43: 0x000c, 0x3a44: 0x000c, 0x3a45: 0x000c, + 0x3a46: 0x000c, 0x3a47: 0x000c, 0x3a48: 0x000c, 0x3a49: 0x000c, 0x3a4a: 0x000c, 0x3a4b: 0x000c, + 0x3a4c: 0x000c, 0x3a4d: 0x000c, 0x3a4e: 0x000c, 0x3a4f: 0x000c, 0x3a50: 0x000c, 0x3a51: 0x000c, + 0x3a52: 0x000c, 0x3a53: 0x000c, 0x3a54: 0x000c, 0x3a55: 0x000c, 0x3a56: 0x000c, 0x3a57: 0x000c, + 0x3a58: 0x000c, 0x3a59: 0x000c, 0x3a5a: 0x000c, 0x3a5b: 0x000c, 0x3a5c: 0x000c, 0x3a5d: 0x000c, + 0x3a5e: 0x000c, 0x3a5f: 0x000c, 0x3a60: 0x000c, 0x3a61: 0x000c, 0x3a62: 0x000c, 0x3a63: 0x000c, + 0x3a64: 0x000c, 0x3a65: 0x000c, 0x3a66: 0x000c, 0x3a67: 0x000c, 0x3a68: 0x000c, 0x3a69: 0x000c, + 0x3a6a: 0x000c, 0x3a6b: 0x000c, 0x3a6c: 0x000c, 0x3a6d: 0x000c, 0x3a6e: 0x000c, 0x3a6f: 0x000c, + 0x3a70: 0x000b, 0x3a71: 0x000b, 0x3a72: 0x000b, 0x3a73: 0x000b, 0x3a74: 0x000b, 0x3a75: 0x000b, + 0x3a76: 0x000b, 0x3a77: 0x000b, 0x3a78: 0x000b, 0x3a79: 0x000b, 0x3a7a: 0x000b, 0x3a7b: 0x000b, + 0x3a7c: 0x000b, 0x3a7d: 0x000b, 0x3a7e: 0x000b, 0x3a7f: 0x000b, +} + +// bidiIndex: 24 blocks, 1536 entries, 1536 bytes +// Block 0 is the zero block. +var bidiIndex = [1536]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x02, + 0xca: 0x03, 0xcb: 0x04, 0xcc: 0x05, 0xcd: 0x06, 0xce: 0x07, 0xcf: 0x08, + 0xd2: 0x09, 0xd6: 0x0a, 0xd7: 0x0b, + 0xd8: 0x0c, 0xd9: 0x0d, 0xda: 0x0e, 0xdb: 0x0f, 0xdc: 0x10, 0xdd: 0x11, 0xde: 0x12, 0xdf: 0x13, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, + 0xea: 0x07, 0xef: 0x08, + 0xf0: 0x11, 0xf1: 0x12, 0xf2: 0x12, 0xf3: 0x14, 0xf4: 0x15, + // Block 0x4, offset 0x100 + 0x120: 0x14, 0x121: 0x15, 0x122: 0x16, 0x123: 0x17, 0x124: 0x18, 0x125: 0x19, 0x126: 0x1a, 0x127: 0x1b, + 0x128: 0x1c, 0x129: 0x1d, 0x12a: 0x1c, 0x12b: 0x1e, 0x12c: 0x1f, 0x12d: 0x20, 0x12e: 0x21, 0x12f: 0x22, + 0x130: 0x23, 0x131: 0x24, 0x132: 0x1a, 0x133: 0x25, 0x134: 0x26, 0x135: 0x27, 0x137: 0x28, + 0x138: 0x29, 0x139: 0x2a, 0x13a: 0x2b, 0x13b: 0x2c, 0x13c: 0x2d, 0x13d: 0x2e, 0x13e: 0x2f, 0x13f: 0x30, + // Block 0x5, offset 0x140 + 0x140: 0x31, 0x141: 0x32, 0x142: 0x33, + 0x14d: 0x34, 0x14e: 0x35, + 0x150: 0x36, + 0x15a: 0x37, 0x15c: 0x38, 0x15d: 0x39, 0x15e: 0x3a, 0x15f: 0x3b, + 0x160: 0x3c, 0x162: 0x3d, 0x164: 0x3e, 0x165: 0x3f, 0x167: 0x40, + 0x168: 0x41, 0x169: 0x42, 0x16a: 0x43, 0x16c: 0x44, 0x16d: 0x45, 0x16e: 0x46, 0x16f: 0x47, + 0x170: 0x48, 0x173: 0x49, 0x177: 0x4a, + 0x17e: 0x4b, 0x17f: 0x4c, + // Block 0x6, offset 0x180 + 0x180: 0x4d, 0x181: 0x4e, 0x182: 0x4f, 0x183: 0x50, 0x184: 0x51, 0x185: 0x52, 0x186: 0x53, 0x187: 0x54, + 0x188: 0x55, 0x189: 0x54, 0x18a: 0x54, 0x18b: 0x54, 0x18c: 0x56, 0x18d: 0x57, 0x18e: 0x58, 0x18f: 0x54, + 0x190: 0x59, 0x191: 0x5a, 0x192: 0x5b, 0x193: 0x5c, 0x194: 0x54, 0x195: 0x54, 0x196: 0x54, 0x197: 0x54, + 0x198: 0x54, 0x199: 0x54, 0x19a: 0x5d, 0x19b: 0x54, 0x19c: 0x54, 0x19d: 0x5e, 0x19e: 0x54, 0x19f: 0x5f, + 0x1a4: 0x54, 0x1a5: 0x54, 0x1a6: 0x60, 0x1a7: 0x61, + 0x1a8: 0x54, 0x1a9: 0x54, 0x1aa: 0x54, 0x1ab: 0x54, 0x1ac: 0x54, 0x1ad: 0x62, 0x1ae: 0x63, 0x1af: 0x64, + 0x1b3: 0x65, 0x1b5: 0x66, 0x1b7: 0x67, + 0x1b8: 0x68, 0x1b9: 0x69, 0x1ba: 0x6a, 0x1bb: 0x6b, 0x1bc: 0x54, 0x1bd: 0x54, 0x1be: 0x54, 0x1bf: 0x6c, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x6d, 0x1c2: 0x6e, 0x1c3: 0x6f, 0x1c7: 0x70, + 0x1c8: 0x71, 0x1c9: 0x72, 0x1ca: 0x73, 0x1cb: 0x74, 0x1cd: 0x75, 0x1cf: 0x76, + // Block 0x8, offset 0x200 + 0x237: 0x54, + // Block 0x9, offset 0x240 + 0x252: 0x77, 0x253: 0x78, + 0x258: 0x79, 0x259: 0x7a, 0x25a: 0x7b, 0x25b: 0x7c, 0x25c: 0x7d, 0x25e: 0x7e, + 0x260: 0x7f, 0x261: 0x80, 0x263: 0x81, 0x264: 0x82, 0x265: 0x83, 0x266: 0x84, 0x267: 0x85, + 0x268: 0x86, 0x269: 0x87, 0x26a: 0x88, 0x26b: 0x89, 0x26f: 0x8a, + // Block 0xa, offset 0x280 + 0x2ac: 0x8b, 0x2ad: 0x8c, 0x2ae: 0x0e, 0x2af: 0x0e, + 0x2b0: 0x0e, 0x2b1: 0x0e, 0x2b2: 0x0e, 0x2b3: 0x0e, 0x2b4: 0x8d, 0x2b5: 0x0e, 0x2b6: 0x0e, 0x2b7: 0x8e, + 0x2b8: 0x8f, 0x2b9: 0x90, 0x2ba: 0x0e, 0x2bb: 0x91, 0x2bc: 0x92, 0x2bd: 0x93, 0x2bf: 0x94, + // Block 0xb, offset 0x2c0 + 0x2c4: 0x95, 0x2c5: 0x54, 0x2c6: 0x96, 0x2c7: 0x97, + 0x2cb: 0x98, 0x2cd: 0x99, + 0x2e0: 0x9a, 0x2e1: 0x9a, 0x2e2: 0x9a, 0x2e3: 0x9a, 0x2e4: 0x9b, 0x2e5: 0x9a, 0x2e6: 0x9a, 0x2e7: 0x9a, + 0x2e8: 0x9c, 0x2e9: 0x9a, 0x2ea: 0x9a, 0x2eb: 0x9d, 0x2ec: 0x9e, 0x2ed: 0x9a, 0x2ee: 0x9a, 0x2ef: 0x9a, + 0x2f0: 0x9a, 0x2f1: 0x9a, 0x2f2: 0x9a, 0x2f3: 0x9a, 0x2f4: 0x9f, 0x2f5: 0x9a, 0x2f6: 0x9a, 0x2f7: 0x9a, + 0x2f8: 0x9a, 0x2f9: 0xa0, 0x2fa: 0x9a, 0x2fb: 0x9a, 0x2fc: 0xa1, 0x2fd: 0xa2, 0x2fe: 0x9a, 0x2ff: 0x9a, + // Block 0xc, offset 0x300 + 0x300: 0xa3, 0x301: 0xa4, 0x302: 0xa5, 0x304: 0xa6, 0x305: 0xa7, 0x306: 0xa8, 0x307: 0xa9, + 0x308: 0xaa, 0x30b: 0xab, 0x30c: 0x26, 0x30d: 0xac, + 0x310: 0xad, 0x311: 0xae, 0x312: 0xaf, 0x313: 0xb0, 0x316: 0xb1, 0x317: 0xb2, + 0x318: 0xb3, 0x319: 0xb4, 0x31a: 0xb5, 0x31c: 0xb6, + 0x320: 0xb7, + 0x328: 0xb8, 0x329: 0xb9, 0x32a: 0xba, + 0x330: 0xbb, 0x332: 0xbc, 0x334: 0xbd, 0x335: 0xbe, 0x336: 0xbf, + 0x33b: 0xc0, + // Block 0xd, offset 0x340 + 0x36b: 0xc1, 0x36c: 0xc2, + 0x37e: 0xc3, + // Block 0xe, offset 0x380 + 0x3b2: 0xc4, + // Block 0xf, offset 0x3c0 + 0x3c5: 0xc5, 0x3c6: 0xc6, + 0x3c8: 0x54, 0x3c9: 0xc7, 0x3cc: 0x54, 0x3cd: 0xc8, + 0x3db: 0xc9, 0x3dc: 0xca, 0x3dd: 0xcb, 0x3de: 0xcc, 0x3df: 0xcd, + 0x3e8: 0xce, 0x3e9: 0xcf, 0x3ea: 0xd0, + // Block 0x10, offset 0x400 + 0x400: 0xd1, + 0x420: 0x9a, 0x421: 0x9a, 0x422: 0x9a, 0x423: 0xd2, 0x424: 0x9a, 0x425: 0xd3, 0x426: 0x9a, 0x427: 0x9a, + 0x428: 0x9a, 0x429: 0x9a, 0x42a: 0x9a, 0x42b: 0x9a, 0x42c: 0x9a, 0x42d: 0x9a, 0x42e: 0x9a, 0x42f: 0x9a, + 0x430: 0x9a, 0x431: 0xa1, 0x432: 0x0e, 0x433: 0x9a, 0x434: 0x9a, 0x435: 0x9a, 0x436: 0x9a, 0x437: 0x9a, + 0x438: 0x0e, 0x439: 0x0e, 0x43a: 0x0e, 0x43b: 0xd4, 0x43c: 0x9a, 0x43d: 0x9a, 0x43e: 0x9a, 0x43f: 0x9a, + // Block 0x11, offset 0x440 + 0x440: 0xd5, 0x441: 0x54, 0x442: 0xd6, 0x443: 0xd7, 0x444: 0xd8, 0x445: 0xd9, + 0x449: 0xda, 0x44c: 0x54, 0x44d: 0x54, 0x44e: 0x54, 0x44f: 0x54, + 0x450: 0x54, 0x451: 0x54, 0x452: 0x54, 0x453: 0x54, 0x454: 0x54, 0x455: 0x54, 0x456: 0x54, 0x457: 0x54, + 0x458: 0x54, 0x459: 0x54, 0x45a: 0x54, 0x45b: 0xdb, 0x45c: 0x54, 0x45d: 0x6b, 0x45e: 0x54, 0x45f: 0xdc, + 0x460: 0xdd, 0x461: 0xde, 0x462: 0xdf, 0x464: 0xe0, 0x465: 0xe1, 0x466: 0xe2, 0x467: 0xe3, + 0x469: 0xe4, + 0x47f: 0xe5, + // Block 0x12, offset 0x480 + 0x4bf: 0xe5, + // Block 0x13, offset 0x4c0 + 0x4d0: 0x09, 0x4d1: 0x0a, 0x4d6: 0x0b, + 0x4db: 0x0c, 0x4dd: 0x0d, 0x4de: 0x0e, 0x4df: 0x0f, + 0x4ef: 0x10, + 0x4ff: 0x10, + // Block 0x14, offset 0x500 + 0x50f: 0x10, + 0x51f: 0x10, + 0x52f: 0x10, + 0x53f: 0x10, + // Block 0x15, offset 0x540 + 0x540: 0xe6, 0x541: 0xe6, 0x542: 0xe6, 0x543: 0xe6, 0x544: 0x05, 0x545: 0x05, 0x546: 0x05, 0x547: 0xe7, + 0x548: 0xe6, 0x549: 0xe6, 0x54a: 0xe6, 0x54b: 0xe6, 0x54c: 0xe6, 0x54d: 0xe6, 0x54e: 0xe6, 0x54f: 0xe6, + 0x550: 0xe6, 0x551: 0xe6, 0x552: 0xe6, 0x553: 0xe6, 0x554: 0xe6, 0x555: 0xe6, 0x556: 0xe6, 0x557: 0xe6, + 0x558: 0xe6, 0x559: 0xe6, 0x55a: 0xe6, 0x55b: 0xe6, 0x55c: 0xe6, 0x55d: 0xe6, 0x55e: 0xe6, 0x55f: 0xe6, + 0x560: 0xe6, 0x561: 0xe6, 0x562: 0xe6, 0x563: 0xe6, 0x564: 0xe6, 0x565: 0xe6, 0x566: 0xe6, 0x567: 0xe6, + 0x568: 0xe6, 0x569: 0xe6, 0x56a: 0xe6, 0x56b: 0xe6, 0x56c: 0xe6, 0x56d: 0xe6, 0x56e: 0xe6, 0x56f: 0xe6, + 0x570: 0xe6, 0x571: 0xe6, 0x572: 0xe6, 0x573: 0xe6, 0x574: 0xe6, 0x575: 0xe6, 0x576: 0xe6, 0x577: 0xe6, + 0x578: 0xe6, 0x579: 0xe6, 0x57a: 0xe6, 0x57b: 0xe6, 0x57c: 0xe6, 0x57d: 0xe6, 0x57e: 0xe6, 0x57f: 0xe6, + // Block 0x16, offset 0x580 + 0x58f: 0x10, + 0x59f: 0x10, + 0x5a0: 0x13, + 0x5af: 0x10, + 0x5bf: 0x10, + // Block 0x17, offset 0x5c0 + 0x5cf: 0x10, +} + +// Total table size 16568 bytes (16KiB); checksum: F50EF68C diff --git a/vendor/golang.org/x/text/unicode/norm/BUILD.bazel b/vendor/golang.org/x/text/unicode/norm/BUILD.bazel index abf488772a014..732fd95a7bd8e 100644 --- a/vendor/golang.org/x/text/unicode/norm/BUILD.bazel +++ b/vendor/golang.org/x/text/unicode/norm/BUILD.bazel @@ -10,6 +10,7 @@ go_library( "normalize.go", "readwriter.go", "tables10.0.0.go", + "tables11.0.0.go", "tables9.0.0.go", "transform.go", "trie.go", diff --git a/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go index c48a97b0c28d9..26fbd55a1243d 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go @@ -1,6 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. -// +build go1.10 +// +build go1.10,!go1.13 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go new file mode 100644 index 0000000000000..7297cce32b79d --- /dev/null +++ b/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go @@ -0,0 +1,7693 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// +build go1.13 + +package norm + +import "sync" + +const ( + // Version is the Unicode edition from which the tables are derived. + Version = "11.0.0" + + // MaxTransformChunkSize indicates the maximum number of bytes that Transform + // may need to write atomically for any Form. Making a destination buffer at + // least this size ensures that Transform can always make progress and that + // the user does not need to grow the buffer on an ErrShortDst. + MaxTransformChunkSize = 35 + maxNonStarters*4 +) + +var ccc = [55]uint8{ + 0, 1, 7, 8, 9, 10, 11, 12, + 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, + 29, 30, 31, 32, 33, 34, 35, 36, + 84, 91, 103, 107, 118, 122, 129, 130, + 132, 202, 214, 216, 218, 220, 222, 224, + 226, 228, 230, 232, 233, 234, 240, +} + +const ( + firstMulti = 0x186D + firstCCC = 0x2C9E + endMulti = 0x2F60 + firstLeadingCCC = 0x49AE + firstCCCZeroExcept = 0x4A78 + firstStarterWithNLead = 0x4A9F + lastDecomp = 0x4AA1 + maxDecomp = 0x8000 +) + +// decomps: 19105 bytes +var decomps = [...]byte{ + // Bytes 0 - 3f + 0x00, 0x41, 0x20, 0x41, 0x21, 0x41, 0x22, 0x41, + 0x23, 0x41, 0x24, 0x41, 0x25, 0x41, 0x26, 0x41, + 0x27, 0x41, 0x28, 0x41, 0x29, 0x41, 0x2A, 0x41, + 0x2B, 0x41, 0x2C, 0x41, 0x2D, 0x41, 0x2E, 0x41, + 0x2F, 0x41, 0x30, 0x41, 0x31, 0x41, 0x32, 0x41, + 0x33, 0x41, 0x34, 0x41, 0x35, 0x41, 0x36, 0x41, + 0x37, 0x41, 0x38, 0x41, 0x39, 0x41, 0x3A, 0x41, + 0x3B, 0x41, 0x3C, 0x41, 0x3D, 0x41, 0x3E, 0x41, + // Bytes 40 - 7f + 0x3F, 0x41, 0x40, 0x41, 0x41, 0x41, 0x42, 0x41, + 0x43, 0x41, 0x44, 0x41, 0x45, 0x41, 0x46, 0x41, + 0x47, 0x41, 0x48, 0x41, 0x49, 0x41, 0x4A, 0x41, + 0x4B, 0x41, 0x4C, 0x41, 0x4D, 0x41, 0x4E, 0x41, + 0x4F, 0x41, 0x50, 0x41, 0x51, 0x41, 0x52, 0x41, + 0x53, 0x41, 0x54, 0x41, 0x55, 0x41, 0x56, 0x41, + 0x57, 0x41, 0x58, 0x41, 0x59, 0x41, 0x5A, 0x41, + 0x5B, 0x41, 0x5C, 0x41, 0x5D, 0x41, 0x5E, 0x41, + // Bytes 80 - bf + 0x5F, 0x41, 0x60, 0x41, 0x61, 0x41, 0x62, 0x41, + 0x63, 0x41, 0x64, 0x41, 0x65, 0x41, 0x66, 0x41, + 0x67, 0x41, 0x68, 0x41, 0x69, 0x41, 0x6A, 0x41, + 0x6B, 0x41, 0x6C, 0x41, 0x6D, 0x41, 0x6E, 0x41, + 0x6F, 0x41, 0x70, 0x41, 0x71, 0x41, 0x72, 0x41, + 0x73, 0x41, 0x74, 0x41, 0x75, 0x41, 0x76, 0x41, + 0x77, 0x41, 0x78, 0x41, 0x79, 0x41, 0x7A, 0x41, + 0x7B, 0x41, 0x7C, 0x41, 0x7D, 0x41, 0x7E, 0x42, + // Bytes c0 - ff + 0xC2, 0xA2, 0x42, 0xC2, 0xA3, 0x42, 0xC2, 0xA5, + 0x42, 0xC2, 0xA6, 0x42, 0xC2, 0xAC, 0x42, 0xC2, + 0xB7, 0x42, 0xC3, 0x86, 0x42, 0xC3, 0xB0, 0x42, + 0xC4, 0xA6, 0x42, 0xC4, 0xA7, 0x42, 0xC4, 0xB1, + 0x42, 0xC5, 0x8B, 0x42, 0xC5, 0x93, 0x42, 0xC6, + 0x8E, 0x42, 0xC6, 0x90, 0x42, 0xC6, 0xAB, 0x42, + 0xC8, 0xA2, 0x42, 0xC8, 0xB7, 0x42, 0xC9, 0x90, + 0x42, 0xC9, 0x91, 0x42, 0xC9, 0x92, 0x42, 0xC9, + // Bytes 100 - 13f + 0x94, 0x42, 0xC9, 0x95, 0x42, 0xC9, 0x99, 0x42, + 0xC9, 0x9B, 0x42, 0xC9, 0x9C, 0x42, 0xC9, 0x9F, + 0x42, 0xC9, 0xA1, 0x42, 0xC9, 0xA3, 0x42, 0xC9, + 0xA5, 0x42, 0xC9, 0xA6, 0x42, 0xC9, 0xA8, 0x42, + 0xC9, 0xA9, 0x42, 0xC9, 0xAA, 0x42, 0xC9, 0xAB, + 0x42, 0xC9, 0xAD, 0x42, 0xC9, 0xAF, 0x42, 0xC9, + 0xB0, 0x42, 0xC9, 0xB1, 0x42, 0xC9, 0xB2, 0x42, + 0xC9, 0xB3, 0x42, 0xC9, 0xB4, 0x42, 0xC9, 0xB5, + // Bytes 140 - 17f + 0x42, 0xC9, 0xB8, 0x42, 0xC9, 0xB9, 0x42, 0xC9, + 0xBB, 0x42, 0xCA, 0x81, 0x42, 0xCA, 0x82, 0x42, + 0xCA, 0x83, 0x42, 0xCA, 0x89, 0x42, 0xCA, 0x8A, + 0x42, 0xCA, 0x8B, 0x42, 0xCA, 0x8C, 0x42, 0xCA, + 0x90, 0x42, 0xCA, 0x91, 0x42, 0xCA, 0x92, 0x42, + 0xCA, 0x95, 0x42, 0xCA, 0x9D, 0x42, 0xCA, 0x9F, + 0x42, 0xCA, 0xB9, 0x42, 0xCE, 0x91, 0x42, 0xCE, + 0x92, 0x42, 0xCE, 0x93, 0x42, 0xCE, 0x94, 0x42, + // Bytes 180 - 1bf + 0xCE, 0x95, 0x42, 0xCE, 0x96, 0x42, 0xCE, 0x97, + 0x42, 0xCE, 0x98, 0x42, 0xCE, 0x99, 0x42, 0xCE, + 0x9A, 0x42, 0xCE, 0x9B, 0x42, 0xCE, 0x9C, 0x42, + 0xCE, 0x9D, 0x42, 0xCE, 0x9E, 0x42, 0xCE, 0x9F, + 0x42, 0xCE, 0xA0, 0x42, 0xCE, 0xA1, 0x42, 0xCE, + 0xA3, 0x42, 0xCE, 0xA4, 0x42, 0xCE, 0xA5, 0x42, + 0xCE, 0xA6, 0x42, 0xCE, 0xA7, 0x42, 0xCE, 0xA8, + 0x42, 0xCE, 0xA9, 0x42, 0xCE, 0xB1, 0x42, 0xCE, + // Bytes 1c0 - 1ff + 0xB2, 0x42, 0xCE, 0xB3, 0x42, 0xCE, 0xB4, 0x42, + 0xCE, 0xB5, 0x42, 0xCE, 0xB6, 0x42, 0xCE, 0xB7, + 0x42, 0xCE, 0xB8, 0x42, 0xCE, 0xB9, 0x42, 0xCE, + 0xBA, 0x42, 0xCE, 0xBB, 0x42, 0xCE, 0xBC, 0x42, + 0xCE, 0xBD, 0x42, 0xCE, 0xBE, 0x42, 0xCE, 0xBF, + 0x42, 0xCF, 0x80, 0x42, 0xCF, 0x81, 0x42, 0xCF, + 0x82, 0x42, 0xCF, 0x83, 0x42, 0xCF, 0x84, 0x42, + 0xCF, 0x85, 0x42, 0xCF, 0x86, 0x42, 0xCF, 0x87, + // Bytes 200 - 23f + 0x42, 0xCF, 0x88, 0x42, 0xCF, 0x89, 0x42, 0xCF, + 0x9C, 0x42, 0xCF, 0x9D, 0x42, 0xD0, 0xBD, 0x42, + 0xD1, 0x8A, 0x42, 0xD1, 0x8C, 0x42, 0xD7, 0x90, + 0x42, 0xD7, 0x91, 0x42, 0xD7, 0x92, 0x42, 0xD7, + 0x93, 0x42, 0xD7, 0x94, 0x42, 0xD7, 0x9B, 0x42, + 0xD7, 0x9C, 0x42, 0xD7, 0x9D, 0x42, 0xD7, 0xA2, + 0x42, 0xD7, 0xA8, 0x42, 0xD7, 0xAA, 0x42, 0xD8, + 0xA1, 0x42, 0xD8, 0xA7, 0x42, 0xD8, 0xA8, 0x42, + // Bytes 240 - 27f + 0xD8, 0xA9, 0x42, 0xD8, 0xAA, 0x42, 0xD8, 0xAB, + 0x42, 0xD8, 0xAC, 0x42, 0xD8, 0xAD, 0x42, 0xD8, + 0xAE, 0x42, 0xD8, 0xAF, 0x42, 0xD8, 0xB0, 0x42, + 0xD8, 0xB1, 0x42, 0xD8, 0xB2, 0x42, 0xD8, 0xB3, + 0x42, 0xD8, 0xB4, 0x42, 0xD8, 0xB5, 0x42, 0xD8, + 0xB6, 0x42, 0xD8, 0xB7, 0x42, 0xD8, 0xB8, 0x42, + 0xD8, 0xB9, 0x42, 0xD8, 0xBA, 0x42, 0xD9, 0x81, + 0x42, 0xD9, 0x82, 0x42, 0xD9, 0x83, 0x42, 0xD9, + // Bytes 280 - 2bf + 0x84, 0x42, 0xD9, 0x85, 0x42, 0xD9, 0x86, 0x42, + 0xD9, 0x87, 0x42, 0xD9, 0x88, 0x42, 0xD9, 0x89, + 0x42, 0xD9, 0x8A, 0x42, 0xD9, 0xAE, 0x42, 0xD9, + 0xAF, 0x42, 0xD9, 0xB1, 0x42, 0xD9, 0xB9, 0x42, + 0xD9, 0xBA, 0x42, 0xD9, 0xBB, 0x42, 0xD9, 0xBE, + 0x42, 0xD9, 0xBF, 0x42, 0xDA, 0x80, 0x42, 0xDA, + 0x83, 0x42, 0xDA, 0x84, 0x42, 0xDA, 0x86, 0x42, + 0xDA, 0x87, 0x42, 0xDA, 0x88, 0x42, 0xDA, 0x8C, + // Bytes 2c0 - 2ff + 0x42, 0xDA, 0x8D, 0x42, 0xDA, 0x8E, 0x42, 0xDA, + 0x91, 0x42, 0xDA, 0x98, 0x42, 0xDA, 0xA1, 0x42, + 0xDA, 0xA4, 0x42, 0xDA, 0xA6, 0x42, 0xDA, 0xA9, + 0x42, 0xDA, 0xAD, 0x42, 0xDA, 0xAF, 0x42, 0xDA, + 0xB1, 0x42, 0xDA, 0xB3, 0x42, 0xDA, 0xBA, 0x42, + 0xDA, 0xBB, 0x42, 0xDA, 0xBE, 0x42, 0xDB, 0x81, + 0x42, 0xDB, 0x85, 0x42, 0xDB, 0x86, 0x42, 0xDB, + 0x87, 0x42, 0xDB, 0x88, 0x42, 0xDB, 0x89, 0x42, + // Bytes 300 - 33f + 0xDB, 0x8B, 0x42, 0xDB, 0x8C, 0x42, 0xDB, 0x90, + 0x42, 0xDB, 0x92, 0x43, 0xE0, 0xBC, 0x8B, 0x43, + 0xE1, 0x83, 0x9C, 0x43, 0xE1, 0x84, 0x80, 0x43, + 0xE1, 0x84, 0x81, 0x43, 0xE1, 0x84, 0x82, 0x43, + 0xE1, 0x84, 0x83, 0x43, 0xE1, 0x84, 0x84, 0x43, + 0xE1, 0x84, 0x85, 0x43, 0xE1, 0x84, 0x86, 0x43, + 0xE1, 0x84, 0x87, 0x43, 0xE1, 0x84, 0x88, 0x43, + 0xE1, 0x84, 0x89, 0x43, 0xE1, 0x84, 0x8A, 0x43, + // Bytes 340 - 37f + 0xE1, 0x84, 0x8B, 0x43, 0xE1, 0x84, 0x8C, 0x43, + 0xE1, 0x84, 0x8D, 0x43, 0xE1, 0x84, 0x8E, 0x43, + 0xE1, 0x84, 0x8F, 0x43, 0xE1, 0x84, 0x90, 0x43, + 0xE1, 0x84, 0x91, 0x43, 0xE1, 0x84, 0x92, 0x43, + 0xE1, 0x84, 0x94, 0x43, 0xE1, 0x84, 0x95, 0x43, + 0xE1, 0x84, 0x9A, 0x43, 0xE1, 0x84, 0x9C, 0x43, + 0xE1, 0x84, 0x9D, 0x43, 0xE1, 0x84, 0x9E, 0x43, + 0xE1, 0x84, 0xA0, 0x43, 0xE1, 0x84, 0xA1, 0x43, + // Bytes 380 - 3bf + 0xE1, 0x84, 0xA2, 0x43, 0xE1, 0x84, 0xA3, 0x43, + 0xE1, 0x84, 0xA7, 0x43, 0xE1, 0x84, 0xA9, 0x43, + 0xE1, 0x84, 0xAB, 0x43, 0xE1, 0x84, 0xAC, 0x43, + 0xE1, 0x84, 0xAD, 0x43, 0xE1, 0x84, 0xAE, 0x43, + 0xE1, 0x84, 0xAF, 0x43, 0xE1, 0x84, 0xB2, 0x43, + 0xE1, 0x84, 0xB6, 0x43, 0xE1, 0x85, 0x80, 0x43, + 0xE1, 0x85, 0x87, 0x43, 0xE1, 0x85, 0x8C, 0x43, + 0xE1, 0x85, 0x97, 0x43, 0xE1, 0x85, 0x98, 0x43, + // Bytes 3c0 - 3ff + 0xE1, 0x85, 0x99, 0x43, 0xE1, 0x85, 0xA0, 0x43, + 0xE1, 0x86, 0x84, 0x43, 0xE1, 0x86, 0x85, 0x43, + 0xE1, 0x86, 0x88, 0x43, 0xE1, 0x86, 0x91, 0x43, + 0xE1, 0x86, 0x92, 0x43, 0xE1, 0x86, 0x94, 0x43, + 0xE1, 0x86, 0x9E, 0x43, 0xE1, 0x86, 0xA1, 0x43, + 0xE1, 0x87, 0x87, 0x43, 0xE1, 0x87, 0x88, 0x43, + 0xE1, 0x87, 0x8C, 0x43, 0xE1, 0x87, 0x8E, 0x43, + 0xE1, 0x87, 0x93, 0x43, 0xE1, 0x87, 0x97, 0x43, + // Bytes 400 - 43f + 0xE1, 0x87, 0x99, 0x43, 0xE1, 0x87, 0x9D, 0x43, + 0xE1, 0x87, 0x9F, 0x43, 0xE1, 0x87, 0xB1, 0x43, + 0xE1, 0x87, 0xB2, 0x43, 0xE1, 0xB4, 0x82, 0x43, + 0xE1, 0xB4, 0x96, 0x43, 0xE1, 0xB4, 0x97, 0x43, + 0xE1, 0xB4, 0x9C, 0x43, 0xE1, 0xB4, 0x9D, 0x43, + 0xE1, 0xB4, 0xA5, 0x43, 0xE1, 0xB5, 0xBB, 0x43, + 0xE1, 0xB6, 0x85, 0x43, 0xE2, 0x80, 0x82, 0x43, + 0xE2, 0x80, 0x83, 0x43, 0xE2, 0x80, 0x90, 0x43, + // Bytes 440 - 47f + 0xE2, 0x80, 0x93, 0x43, 0xE2, 0x80, 0x94, 0x43, + 0xE2, 0x82, 0xA9, 0x43, 0xE2, 0x86, 0x90, 0x43, + 0xE2, 0x86, 0x91, 0x43, 0xE2, 0x86, 0x92, 0x43, + 0xE2, 0x86, 0x93, 0x43, 0xE2, 0x88, 0x82, 0x43, + 0xE2, 0x88, 0x87, 0x43, 0xE2, 0x88, 0x91, 0x43, + 0xE2, 0x88, 0x92, 0x43, 0xE2, 0x94, 0x82, 0x43, + 0xE2, 0x96, 0xA0, 0x43, 0xE2, 0x97, 0x8B, 0x43, + 0xE2, 0xA6, 0x85, 0x43, 0xE2, 0xA6, 0x86, 0x43, + // Bytes 480 - 4bf + 0xE2, 0xB5, 0xA1, 0x43, 0xE3, 0x80, 0x81, 0x43, + 0xE3, 0x80, 0x82, 0x43, 0xE3, 0x80, 0x88, 0x43, + 0xE3, 0x80, 0x89, 0x43, 0xE3, 0x80, 0x8A, 0x43, + 0xE3, 0x80, 0x8B, 0x43, 0xE3, 0x80, 0x8C, 0x43, + 0xE3, 0x80, 0x8D, 0x43, 0xE3, 0x80, 0x8E, 0x43, + 0xE3, 0x80, 0x8F, 0x43, 0xE3, 0x80, 0x90, 0x43, + 0xE3, 0x80, 0x91, 0x43, 0xE3, 0x80, 0x92, 0x43, + 0xE3, 0x80, 0x94, 0x43, 0xE3, 0x80, 0x95, 0x43, + // Bytes 4c0 - 4ff + 0xE3, 0x80, 0x96, 0x43, 0xE3, 0x80, 0x97, 0x43, + 0xE3, 0x82, 0xA1, 0x43, 0xE3, 0x82, 0xA2, 0x43, + 0xE3, 0x82, 0xA3, 0x43, 0xE3, 0x82, 0xA4, 0x43, + 0xE3, 0x82, 0xA5, 0x43, 0xE3, 0x82, 0xA6, 0x43, + 0xE3, 0x82, 0xA7, 0x43, 0xE3, 0x82, 0xA8, 0x43, + 0xE3, 0x82, 0xA9, 0x43, 0xE3, 0x82, 0xAA, 0x43, + 0xE3, 0x82, 0xAB, 0x43, 0xE3, 0x82, 0xAD, 0x43, + 0xE3, 0x82, 0xAF, 0x43, 0xE3, 0x82, 0xB1, 0x43, + // Bytes 500 - 53f + 0xE3, 0x82, 0xB3, 0x43, 0xE3, 0x82, 0xB5, 0x43, + 0xE3, 0x82, 0xB7, 0x43, 0xE3, 0x82, 0xB9, 0x43, + 0xE3, 0x82, 0xBB, 0x43, 0xE3, 0x82, 0xBD, 0x43, + 0xE3, 0x82, 0xBF, 0x43, 0xE3, 0x83, 0x81, 0x43, + 0xE3, 0x83, 0x83, 0x43, 0xE3, 0x83, 0x84, 0x43, + 0xE3, 0x83, 0x86, 0x43, 0xE3, 0x83, 0x88, 0x43, + 0xE3, 0x83, 0x8A, 0x43, 0xE3, 0x83, 0x8B, 0x43, + 0xE3, 0x83, 0x8C, 0x43, 0xE3, 0x83, 0x8D, 0x43, + // Bytes 540 - 57f + 0xE3, 0x83, 0x8E, 0x43, 0xE3, 0x83, 0x8F, 0x43, + 0xE3, 0x83, 0x92, 0x43, 0xE3, 0x83, 0x95, 0x43, + 0xE3, 0x83, 0x98, 0x43, 0xE3, 0x83, 0x9B, 0x43, + 0xE3, 0x83, 0x9E, 0x43, 0xE3, 0x83, 0x9F, 0x43, + 0xE3, 0x83, 0xA0, 0x43, 0xE3, 0x83, 0xA1, 0x43, + 0xE3, 0x83, 0xA2, 0x43, 0xE3, 0x83, 0xA3, 0x43, + 0xE3, 0x83, 0xA4, 0x43, 0xE3, 0x83, 0xA5, 0x43, + 0xE3, 0x83, 0xA6, 0x43, 0xE3, 0x83, 0xA7, 0x43, + // Bytes 580 - 5bf + 0xE3, 0x83, 0xA8, 0x43, 0xE3, 0x83, 0xA9, 0x43, + 0xE3, 0x83, 0xAA, 0x43, 0xE3, 0x83, 0xAB, 0x43, + 0xE3, 0x83, 0xAC, 0x43, 0xE3, 0x83, 0xAD, 0x43, + 0xE3, 0x83, 0xAF, 0x43, 0xE3, 0x83, 0xB0, 0x43, + 0xE3, 0x83, 0xB1, 0x43, 0xE3, 0x83, 0xB2, 0x43, + 0xE3, 0x83, 0xB3, 0x43, 0xE3, 0x83, 0xBB, 0x43, + 0xE3, 0x83, 0xBC, 0x43, 0xE3, 0x92, 0x9E, 0x43, + 0xE3, 0x92, 0xB9, 0x43, 0xE3, 0x92, 0xBB, 0x43, + // Bytes 5c0 - 5ff + 0xE3, 0x93, 0x9F, 0x43, 0xE3, 0x94, 0x95, 0x43, + 0xE3, 0x9B, 0xAE, 0x43, 0xE3, 0x9B, 0xBC, 0x43, + 0xE3, 0x9E, 0x81, 0x43, 0xE3, 0xA0, 0xAF, 0x43, + 0xE3, 0xA1, 0xA2, 0x43, 0xE3, 0xA1, 0xBC, 0x43, + 0xE3, 0xA3, 0x87, 0x43, 0xE3, 0xA3, 0xA3, 0x43, + 0xE3, 0xA4, 0x9C, 0x43, 0xE3, 0xA4, 0xBA, 0x43, + 0xE3, 0xA8, 0xAE, 0x43, 0xE3, 0xA9, 0xAC, 0x43, + 0xE3, 0xAB, 0xA4, 0x43, 0xE3, 0xAC, 0x88, 0x43, + // Bytes 600 - 63f + 0xE3, 0xAC, 0x99, 0x43, 0xE3, 0xAD, 0x89, 0x43, + 0xE3, 0xAE, 0x9D, 0x43, 0xE3, 0xB0, 0x98, 0x43, + 0xE3, 0xB1, 0x8E, 0x43, 0xE3, 0xB4, 0xB3, 0x43, + 0xE3, 0xB6, 0x96, 0x43, 0xE3, 0xBA, 0xAC, 0x43, + 0xE3, 0xBA, 0xB8, 0x43, 0xE3, 0xBC, 0x9B, 0x43, + 0xE3, 0xBF, 0xBC, 0x43, 0xE4, 0x80, 0x88, 0x43, + 0xE4, 0x80, 0x98, 0x43, 0xE4, 0x80, 0xB9, 0x43, + 0xE4, 0x81, 0x86, 0x43, 0xE4, 0x82, 0x96, 0x43, + // Bytes 640 - 67f + 0xE4, 0x83, 0xA3, 0x43, 0xE4, 0x84, 0xAF, 0x43, + 0xE4, 0x88, 0x82, 0x43, 0xE4, 0x88, 0xA7, 0x43, + 0xE4, 0x8A, 0xA0, 0x43, 0xE4, 0x8C, 0x81, 0x43, + 0xE4, 0x8C, 0xB4, 0x43, 0xE4, 0x8D, 0x99, 0x43, + 0xE4, 0x8F, 0x95, 0x43, 0xE4, 0x8F, 0x99, 0x43, + 0xE4, 0x90, 0x8B, 0x43, 0xE4, 0x91, 0xAB, 0x43, + 0xE4, 0x94, 0xAB, 0x43, 0xE4, 0x95, 0x9D, 0x43, + 0xE4, 0x95, 0xA1, 0x43, 0xE4, 0x95, 0xAB, 0x43, + // Bytes 680 - 6bf + 0xE4, 0x97, 0x97, 0x43, 0xE4, 0x97, 0xB9, 0x43, + 0xE4, 0x98, 0xB5, 0x43, 0xE4, 0x9A, 0xBE, 0x43, + 0xE4, 0x9B, 0x87, 0x43, 0xE4, 0xA6, 0x95, 0x43, + 0xE4, 0xA7, 0xA6, 0x43, 0xE4, 0xA9, 0xAE, 0x43, + 0xE4, 0xA9, 0xB6, 0x43, 0xE4, 0xAA, 0xB2, 0x43, + 0xE4, 0xAC, 0xB3, 0x43, 0xE4, 0xAF, 0x8E, 0x43, + 0xE4, 0xB3, 0x8E, 0x43, 0xE4, 0xB3, 0xAD, 0x43, + 0xE4, 0xB3, 0xB8, 0x43, 0xE4, 0xB5, 0x96, 0x43, + // Bytes 6c0 - 6ff + 0xE4, 0xB8, 0x80, 0x43, 0xE4, 0xB8, 0x81, 0x43, + 0xE4, 0xB8, 0x83, 0x43, 0xE4, 0xB8, 0x89, 0x43, + 0xE4, 0xB8, 0x8A, 0x43, 0xE4, 0xB8, 0x8B, 0x43, + 0xE4, 0xB8, 0x8D, 0x43, 0xE4, 0xB8, 0x99, 0x43, + 0xE4, 0xB8, 0xA6, 0x43, 0xE4, 0xB8, 0xA8, 0x43, + 0xE4, 0xB8, 0xAD, 0x43, 0xE4, 0xB8, 0xB2, 0x43, + 0xE4, 0xB8, 0xB6, 0x43, 0xE4, 0xB8, 0xB8, 0x43, + 0xE4, 0xB8, 0xB9, 0x43, 0xE4, 0xB8, 0xBD, 0x43, + // Bytes 700 - 73f + 0xE4, 0xB8, 0xBF, 0x43, 0xE4, 0xB9, 0x81, 0x43, + 0xE4, 0xB9, 0x99, 0x43, 0xE4, 0xB9, 0x9D, 0x43, + 0xE4, 0xBA, 0x82, 0x43, 0xE4, 0xBA, 0x85, 0x43, + 0xE4, 0xBA, 0x86, 0x43, 0xE4, 0xBA, 0x8C, 0x43, + 0xE4, 0xBA, 0x94, 0x43, 0xE4, 0xBA, 0xA0, 0x43, + 0xE4, 0xBA, 0xA4, 0x43, 0xE4, 0xBA, 0xAE, 0x43, + 0xE4, 0xBA, 0xBA, 0x43, 0xE4, 0xBB, 0x80, 0x43, + 0xE4, 0xBB, 0x8C, 0x43, 0xE4, 0xBB, 0xA4, 0x43, + // Bytes 740 - 77f + 0xE4, 0xBC, 0x81, 0x43, 0xE4, 0xBC, 0x91, 0x43, + 0xE4, 0xBD, 0xA0, 0x43, 0xE4, 0xBE, 0x80, 0x43, + 0xE4, 0xBE, 0x86, 0x43, 0xE4, 0xBE, 0x8B, 0x43, + 0xE4, 0xBE, 0xAE, 0x43, 0xE4, 0xBE, 0xBB, 0x43, + 0xE4, 0xBE, 0xBF, 0x43, 0xE5, 0x80, 0x82, 0x43, + 0xE5, 0x80, 0xAB, 0x43, 0xE5, 0x81, 0xBA, 0x43, + 0xE5, 0x82, 0x99, 0x43, 0xE5, 0x83, 0x8F, 0x43, + 0xE5, 0x83, 0x9A, 0x43, 0xE5, 0x83, 0xA7, 0x43, + // Bytes 780 - 7bf + 0xE5, 0x84, 0xAA, 0x43, 0xE5, 0x84, 0xBF, 0x43, + 0xE5, 0x85, 0x80, 0x43, 0xE5, 0x85, 0x85, 0x43, + 0xE5, 0x85, 0x8D, 0x43, 0xE5, 0x85, 0x94, 0x43, + 0xE5, 0x85, 0xA4, 0x43, 0xE5, 0x85, 0xA5, 0x43, + 0xE5, 0x85, 0xA7, 0x43, 0xE5, 0x85, 0xA8, 0x43, + 0xE5, 0x85, 0xA9, 0x43, 0xE5, 0x85, 0xAB, 0x43, + 0xE5, 0x85, 0xAD, 0x43, 0xE5, 0x85, 0xB7, 0x43, + 0xE5, 0x86, 0x80, 0x43, 0xE5, 0x86, 0x82, 0x43, + // Bytes 7c0 - 7ff + 0xE5, 0x86, 0x8D, 0x43, 0xE5, 0x86, 0x92, 0x43, + 0xE5, 0x86, 0x95, 0x43, 0xE5, 0x86, 0x96, 0x43, + 0xE5, 0x86, 0x97, 0x43, 0xE5, 0x86, 0x99, 0x43, + 0xE5, 0x86, 0xA4, 0x43, 0xE5, 0x86, 0xAB, 0x43, + 0xE5, 0x86, 0xAC, 0x43, 0xE5, 0x86, 0xB5, 0x43, + 0xE5, 0x86, 0xB7, 0x43, 0xE5, 0x87, 0x89, 0x43, + 0xE5, 0x87, 0x8C, 0x43, 0xE5, 0x87, 0x9C, 0x43, + 0xE5, 0x87, 0x9E, 0x43, 0xE5, 0x87, 0xA0, 0x43, + // Bytes 800 - 83f + 0xE5, 0x87, 0xB5, 0x43, 0xE5, 0x88, 0x80, 0x43, + 0xE5, 0x88, 0x83, 0x43, 0xE5, 0x88, 0x87, 0x43, + 0xE5, 0x88, 0x97, 0x43, 0xE5, 0x88, 0x9D, 0x43, + 0xE5, 0x88, 0xA9, 0x43, 0xE5, 0x88, 0xBA, 0x43, + 0xE5, 0x88, 0xBB, 0x43, 0xE5, 0x89, 0x86, 0x43, + 0xE5, 0x89, 0x8D, 0x43, 0xE5, 0x89, 0xB2, 0x43, + 0xE5, 0x89, 0xB7, 0x43, 0xE5, 0x8A, 0x89, 0x43, + 0xE5, 0x8A, 0x9B, 0x43, 0xE5, 0x8A, 0xA3, 0x43, + // Bytes 840 - 87f + 0xE5, 0x8A, 0xB3, 0x43, 0xE5, 0x8A, 0xB4, 0x43, + 0xE5, 0x8B, 0x87, 0x43, 0xE5, 0x8B, 0x89, 0x43, + 0xE5, 0x8B, 0x92, 0x43, 0xE5, 0x8B, 0x9E, 0x43, + 0xE5, 0x8B, 0xA4, 0x43, 0xE5, 0x8B, 0xB5, 0x43, + 0xE5, 0x8B, 0xB9, 0x43, 0xE5, 0x8B, 0xBA, 0x43, + 0xE5, 0x8C, 0x85, 0x43, 0xE5, 0x8C, 0x86, 0x43, + 0xE5, 0x8C, 0x95, 0x43, 0xE5, 0x8C, 0x97, 0x43, + 0xE5, 0x8C, 0x9A, 0x43, 0xE5, 0x8C, 0xB8, 0x43, + // Bytes 880 - 8bf + 0xE5, 0x8C, 0xBB, 0x43, 0xE5, 0x8C, 0xBF, 0x43, + 0xE5, 0x8D, 0x81, 0x43, 0xE5, 0x8D, 0x84, 0x43, + 0xE5, 0x8D, 0x85, 0x43, 0xE5, 0x8D, 0x89, 0x43, + 0xE5, 0x8D, 0x91, 0x43, 0xE5, 0x8D, 0x94, 0x43, + 0xE5, 0x8D, 0x9A, 0x43, 0xE5, 0x8D, 0x9C, 0x43, + 0xE5, 0x8D, 0xA9, 0x43, 0xE5, 0x8D, 0xB0, 0x43, + 0xE5, 0x8D, 0xB3, 0x43, 0xE5, 0x8D, 0xB5, 0x43, + 0xE5, 0x8D, 0xBD, 0x43, 0xE5, 0x8D, 0xBF, 0x43, + // Bytes 8c0 - 8ff + 0xE5, 0x8E, 0x82, 0x43, 0xE5, 0x8E, 0xB6, 0x43, + 0xE5, 0x8F, 0x83, 0x43, 0xE5, 0x8F, 0x88, 0x43, + 0xE5, 0x8F, 0x8A, 0x43, 0xE5, 0x8F, 0x8C, 0x43, + 0xE5, 0x8F, 0x9F, 0x43, 0xE5, 0x8F, 0xA3, 0x43, + 0xE5, 0x8F, 0xA5, 0x43, 0xE5, 0x8F, 0xAB, 0x43, + 0xE5, 0x8F, 0xAF, 0x43, 0xE5, 0x8F, 0xB1, 0x43, + 0xE5, 0x8F, 0xB3, 0x43, 0xE5, 0x90, 0x86, 0x43, + 0xE5, 0x90, 0x88, 0x43, 0xE5, 0x90, 0x8D, 0x43, + // Bytes 900 - 93f + 0xE5, 0x90, 0x8F, 0x43, 0xE5, 0x90, 0x9D, 0x43, + 0xE5, 0x90, 0xB8, 0x43, 0xE5, 0x90, 0xB9, 0x43, + 0xE5, 0x91, 0x82, 0x43, 0xE5, 0x91, 0x88, 0x43, + 0xE5, 0x91, 0xA8, 0x43, 0xE5, 0x92, 0x9E, 0x43, + 0xE5, 0x92, 0xA2, 0x43, 0xE5, 0x92, 0xBD, 0x43, + 0xE5, 0x93, 0xB6, 0x43, 0xE5, 0x94, 0x90, 0x43, + 0xE5, 0x95, 0x8F, 0x43, 0xE5, 0x95, 0x93, 0x43, + 0xE5, 0x95, 0x95, 0x43, 0xE5, 0x95, 0xA3, 0x43, + // Bytes 940 - 97f + 0xE5, 0x96, 0x84, 0x43, 0xE5, 0x96, 0x87, 0x43, + 0xE5, 0x96, 0x99, 0x43, 0xE5, 0x96, 0x9D, 0x43, + 0xE5, 0x96, 0xAB, 0x43, 0xE5, 0x96, 0xB3, 0x43, + 0xE5, 0x96, 0xB6, 0x43, 0xE5, 0x97, 0x80, 0x43, + 0xE5, 0x97, 0x82, 0x43, 0xE5, 0x97, 0xA2, 0x43, + 0xE5, 0x98, 0x86, 0x43, 0xE5, 0x99, 0x91, 0x43, + 0xE5, 0x99, 0xA8, 0x43, 0xE5, 0x99, 0xB4, 0x43, + 0xE5, 0x9B, 0x97, 0x43, 0xE5, 0x9B, 0x9B, 0x43, + // Bytes 980 - 9bf + 0xE5, 0x9B, 0xB9, 0x43, 0xE5, 0x9C, 0x96, 0x43, + 0xE5, 0x9C, 0x97, 0x43, 0xE5, 0x9C, 0x9F, 0x43, + 0xE5, 0x9C, 0xB0, 0x43, 0xE5, 0x9E, 0x8B, 0x43, + 0xE5, 0x9F, 0x8E, 0x43, 0xE5, 0x9F, 0xB4, 0x43, + 0xE5, 0xA0, 0x8D, 0x43, 0xE5, 0xA0, 0xB1, 0x43, + 0xE5, 0xA0, 0xB2, 0x43, 0xE5, 0xA1, 0x80, 0x43, + 0xE5, 0xA1, 0x9A, 0x43, 0xE5, 0xA1, 0x9E, 0x43, + 0xE5, 0xA2, 0xA8, 0x43, 0xE5, 0xA2, 0xAC, 0x43, + // Bytes 9c0 - 9ff + 0xE5, 0xA2, 0xB3, 0x43, 0xE5, 0xA3, 0x98, 0x43, + 0xE5, 0xA3, 0x9F, 0x43, 0xE5, 0xA3, 0xAB, 0x43, + 0xE5, 0xA3, 0xAE, 0x43, 0xE5, 0xA3, 0xB0, 0x43, + 0xE5, 0xA3, 0xB2, 0x43, 0xE5, 0xA3, 0xB7, 0x43, + 0xE5, 0xA4, 0x82, 0x43, 0xE5, 0xA4, 0x86, 0x43, + 0xE5, 0xA4, 0x8A, 0x43, 0xE5, 0xA4, 0x95, 0x43, + 0xE5, 0xA4, 0x9A, 0x43, 0xE5, 0xA4, 0x9C, 0x43, + 0xE5, 0xA4, 0xA2, 0x43, 0xE5, 0xA4, 0xA7, 0x43, + // Bytes a00 - a3f + 0xE5, 0xA4, 0xA9, 0x43, 0xE5, 0xA5, 0x84, 0x43, + 0xE5, 0xA5, 0x88, 0x43, 0xE5, 0xA5, 0x91, 0x43, + 0xE5, 0xA5, 0x94, 0x43, 0xE5, 0xA5, 0xA2, 0x43, + 0xE5, 0xA5, 0xB3, 0x43, 0xE5, 0xA7, 0x98, 0x43, + 0xE5, 0xA7, 0xAC, 0x43, 0xE5, 0xA8, 0x9B, 0x43, + 0xE5, 0xA8, 0xA7, 0x43, 0xE5, 0xA9, 0xA2, 0x43, + 0xE5, 0xA9, 0xA6, 0x43, 0xE5, 0xAA, 0xB5, 0x43, + 0xE5, 0xAC, 0x88, 0x43, 0xE5, 0xAC, 0xA8, 0x43, + // Bytes a40 - a7f + 0xE5, 0xAC, 0xBE, 0x43, 0xE5, 0xAD, 0x90, 0x43, + 0xE5, 0xAD, 0x97, 0x43, 0xE5, 0xAD, 0xA6, 0x43, + 0xE5, 0xAE, 0x80, 0x43, 0xE5, 0xAE, 0x85, 0x43, + 0xE5, 0xAE, 0x97, 0x43, 0xE5, 0xAF, 0x83, 0x43, + 0xE5, 0xAF, 0x98, 0x43, 0xE5, 0xAF, 0xA7, 0x43, + 0xE5, 0xAF, 0xAE, 0x43, 0xE5, 0xAF, 0xB3, 0x43, + 0xE5, 0xAF, 0xB8, 0x43, 0xE5, 0xAF, 0xBF, 0x43, + 0xE5, 0xB0, 0x86, 0x43, 0xE5, 0xB0, 0x8F, 0x43, + // Bytes a80 - abf + 0xE5, 0xB0, 0xA2, 0x43, 0xE5, 0xB0, 0xB8, 0x43, + 0xE5, 0xB0, 0xBF, 0x43, 0xE5, 0xB1, 0xA0, 0x43, + 0xE5, 0xB1, 0xA2, 0x43, 0xE5, 0xB1, 0xA4, 0x43, + 0xE5, 0xB1, 0xA5, 0x43, 0xE5, 0xB1, 0xAE, 0x43, + 0xE5, 0xB1, 0xB1, 0x43, 0xE5, 0xB2, 0x8D, 0x43, + 0xE5, 0xB3, 0x80, 0x43, 0xE5, 0xB4, 0x99, 0x43, + 0xE5, 0xB5, 0x83, 0x43, 0xE5, 0xB5, 0x90, 0x43, + 0xE5, 0xB5, 0xAB, 0x43, 0xE5, 0xB5, 0xAE, 0x43, + // Bytes ac0 - aff + 0xE5, 0xB5, 0xBC, 0x43, 0xE5, 0xB6, 0xB2, 0x43, + 0xE5, 0xB6, 0xBA, 0x43, 0xE5, 0xB7, 0x9B, 0x43, + 0xE5, 0xB7, 0xA1, 0x43, 0xE5, 0xB7, 0xA2, 0x43, + 0xE5, 0xB7, 0xA5, 0x43, 0xE5, 0xB7, 0xA6, 0x43, + 0xE5, 0xB7, 0xB1, 0x43, 0xE5, 0xB7, 0xBD, 0x43, + 0xE5, 0xB7, 0xBE, 0x43, 0xE5, 0xB8, 0xA8, 0x43, + 0xE5, 0xB8, 0xBD, 0x43, 0xE5, 0xB9, 0xA9, 0x43, + 0xE5, 0xB9, 0xB2, 0x43, 0xE5, 0xB9, 0xB4, 0x43, + // Bytes b00 - b3f + 0xE5, 0xB9, 0xBA, 0x43, 0xE5, 0xB9, 0xBC, 0x43, + 0xE5, 0xB9, 0xBF, 0x43, 0xE5, 0xBA, 0xA6, 0x43, + 0xE5, 0xBA, 0xB0, 0x43, 0xE5, 0xBA, 0xB3, 0x43, + 0xE5, 0xBA, 0xB6, 0x43, 0xE5, 0xBB, 0x89, 0x43, + 0xE5, 0xBB, 0x8A, 0x43, 0xE5, 0xBB, 0x92, 0x43, + 0xE5, 0xBB, 0x93, 0x43, 0xE5, 0xBB, 0x99, 0x43, + 0xE5, 0xBB, 0xAC, 0x43, 0xE5, 0xBB, 0xB4, 0x43, + 0xE5, 0xBB, 0xBE, 0x43, 0xE5, 0xBC, 0x84, 0x43, + // Bytes b40 - b7f + 0xE5, 0xBC, 0x8B, 0x43, 0xE5, 0xBC, 0x93, 0x43, + 0xE5, 0xBC, 0xA2, 0x43, 0xE5, 0xBD, 0x90, 0x43, + 0xE5, 0xBD, 0x93, 0x43, 0xE5, 0xBD, 0xA1, 0x43, + 0xE5, 0xBD, 0xA2, 0x43, 0xE5, 0xBD, 0xA9, 0x43, + 0xE5, 0xBD, 0xAB, 0x43, 0xE5, 0xBD, 0xB3, 0x43, + 0xE5, 0xBE, 0x8B, 0x43, 0xE5, 0xBE, 0x8C, 0x43, + 0xE5, 0xBE, 0x97, 0x43, 0xE5, 0xBE, 0x9A, 0x43, + 0xE5, 0xBE, 0xA9, 0x43, 0xE5, 0xBE, 0xAD, 0x43, + // Bytes b80 - bbf + 0xE5, 0xBF, 0x83, 0x43, 0xE5, 0xBF, 0x8D, 0x43, + 0xE5, 0xBF, 0x97, 0x43, 0xE5, 0xBF, 0xB5, 0x43, + 0xE5, 0xBF, 0xB9, 0x43, 0xE6, 0x80, 0x92, 0x43, + 0xE6, 0x80, 0x9C, 0x43, 0xE6, 0x81, 0xB5, 0x43, + 0xE6, 0x82, 0x81, 0x43, 0xE6, 0x82, 0x94, 0x43, + 0xE6, 0x83, 0x87, 0x43, 0xE6, 0x83, 0x98, 0x43, + 0xE6, 0x83, 0xA1, 0x43, 0xE6, 0x84, 0x88, 0x43, + 0xE6, 0x85, 0x84, 0x43, 0xE6, 0x85, 0x88, 0x43, + // Bytes bc0 - bff + 0xE6, 0x85, 0x8C, 0x43, 0xE6, 0x85, 0x8E, 0x43, + 0xE6, 0x85, 0xA0, 0x43, 0xE6, 0x85, 0xA8, 0x43, + 0xE6, 0x85, 0xBA, 0x43, 0xE6, 0x86, 0x8E, 0x43, + 0xE6, 0x86, 0x90, 0x43, 0xE6, 0x86, 0xA4, 0x43, + 0xE6, 0x86, 0xAF, 0x43, 0xE6, 0x86, 0xB2, 0x43, + 0xE6, 0x87, 0x9E, 0x43, 0xE6, 0x87, 0xB2, 0x43, + 0xE6, 0x87, 0xB6, 0x43, 0xE6, 0x88, 0x80, 0x43, + 0xE6, 0x88, 0x88, 0x43, 0xE6, 0x88, 0x90, 0x43, + // Bytes c00 - c3f + 0xE6, 0x88, 0x9B, 0x43, 0xE6, 0x88, 0xAE, 0x43, + 0xE6, 0x88, 0xB4, 0x43, 0xE6, 0x88, 0xB6, 0x43, + 0xE6, 0x89, 0x8B, 0x43, 0xE6, 0x89, 0x93, 0x43, + 0xE6, 0x89, 0x9D, 0x43, 0xE6, 0x8A, 0x95, 0x43, + 0xE6, 0x8A, 0xB1, 0x43, 0xE6, 0x8B, 0x89, 0x43, + 0xE6, 0x8B, 0x8F, 0x43, 0xE6, 0x8B, 0x93, 0x43, + 0xE6, 0x8B, 0x94, 0x43, 0xE6, 0x8B, 0xBC, 0x43, + 0xE6, 0x8B, 0xBE, 0x43, 0xE6, 0x8C, 0x87, 0x43, + // Bytes c40 - c7f + 0xE6, 0x8C, 0xBD, 0x43, 0xE6, 0x8D, 0x90, 0x43, + 0xE6, 0x8D, 0x95, 0x43, 0xE6, 0x8D, 0xA8, 0x43, + 0xE6, 0x8D, 0xBB, 0x43, 0xE6, 0x8E, 0x83, 0x43, + 0xE6, 0x8E, 0xA0, 0x43, 0xE6, 0x8E, 0xA9, 0x43, + 0xE6, 0x8F, 0x84, 0x43, 0xE6, 0x8F, 0x85, 0x43, + 0xE6, 0x8F, 0xA4, 0x43, 0xE6, 0x90, 0x9C, 0x43, + 0xE6, 0x90, 0xA2, 0x43, 0xE6, 0x91, 0x92, 0x43, + 0xE6, 0x91, 0xA9, 0x43, 0xE6, 0x91, 0xB7, 0x43, + // Bytes c80 - cbf + 0xE6, 0x91, 0xBE, 0x43, 0xE6, 0x92, 0x9A, 0x43, + 0xE6, 0x92, 0x9D, 0x43, 0xE6, 0x93, 0x84, 0x43, + 0xE6, 0x94, 0xAF, 0x43, 0xE6, 0x94, 0xB4, 0x43, + 0xE6, 0x95, 0x8F, 0x43, 0xE6, 0x95, 0x96, 0x43, + 0xE6, 0x95, 0xAC, 0x43, 0xE6, 0x95, 0xB8, 0x43, + 0xE6, 0x96, 0x87, 0x43, 0xE6, 0x96, 0x97, 0x43, + 0xE6, 0x96, 0x99, 0x43, 0xE6, 0x96, 0xA4, 0x43, + 0xE6, 0x96, 0xB0, 0x43, 0xE6, 0x96, 0xB9, 0x43, + // Bytes cc0 - cff + 0xE6, 0x97, 0x85, 0x43, 0xE6, 0x97, 0xA0, 0x43, + 0xE6, 0x97, 0xA2, 0x43, 0xE6, 0x97, 0xA3, 0x43, + 0xE6, 0x97, 0xA5, 0x43, 0xE6, 0x98, 0x93, 0x43, + 0xE6, 0x98, 0xA0, 0x43, 0xE6, 0x99, 0x89, 0x43, + 0xE6, 0x99, 0xB4, 0x43, 0xE6, 0x9A, 0x88, 0x43, + 0xE6, 0x9A, 0x91, 0x43, 0xE6, 0x9A, 0x9C, 0x43, + 0xE6, 0x9A, 0xB4, 0x43, 0xE6, 0x9B, 0x86, 0x43, + 0xE6, 0x9B, 0xB0, 0x43, 0xE6, 0x9B, 0xB4, 0x43, + // Bytes d00 - d3f + 0xE6, 0x9B, 0xB8, 0x43, 0xE6, 0x9C, 0x80, 0x43, + 0xE6, 0x9C, 0x88, 0x43, 0xE6, 0x9C, 0x89, 0x43, + 0xE6, 0x9C, 0x97, 0x43, 0xE6, 0x9C, 0x9B, 0x43, + 0xE6, 0x9C, 0xA1, 0x43, 0xE6, 0x9C, 0xA8, 0x43, + 0xE6, 0x9D, 0x8E, 0x43, 0xE6, 0x9D, 0x93, 0x43, + 0xE6, 0x9D, 0x96, 0x43, 0xE6, 0x9D, 0x9E, 0x43, + 0xE6, 0x9D, 0xBB, 0x43, 0xE6, 0x9E, 0x85, 0x43, + 0xE6, 0x9E, 0x97, 0x43, 0xE6, 0x9F, 0xB3, 0x43, + // Bytes d40 - d7f + 0xE6, 0x9F, 0xBA, 0x43, 0xE6, 0xA0, 0x97, 0x43, + 0xE6, 0xA0, 0x9F, 0x43, 0xE6, 0xA0, 0xAA, 0x43, + 0xE6, 0xA1, 0x92, 0x43, 0xE6, 0xA2, 0x81, 0x43, + 0xE6, 0xA2, 0x85, 0x43, 0xE6, 0xA2, 0x8E, 0x43, + 0xE6, 0xA2, 0xA8, 0x43, 0xE6, 0xA4, 0x94, 0x43, + 0xE6, 0xA5, 0x82, 0x43, 0xE6, 0xA6, 0xA3, 0x43, + 0xE6, 0xA7, 0xAA, 0x43, 0xE6, 0xA8, 0x82, 0x43, + 0xE6, 0xA8, 0x93, 0x43, 0xE6, 0xAA, 0xA8, 0x43, + // Bytes d80 - dbf + 0xE6, 0xAB, 0x93, 0x43, 0xE6, 0xAB, 0x9B, 0x43, + 0xE6, 0xAC, 0x84, 0x43, 0xE6, 0xAC, 0xA0, 0x43, + 0xE6, 0xAC, 0xA1, 0x43, 0xE6, 0xAD, 0x94, 0x43, + 0xE6, 0xAD, 0xA2, 0x43, 0xE6, 0xAD, 0xA3, 0x43, + 0xE6, 0xAD, 0xB2, 0x43, 0xE6, 0xAD, 0xB7, 0x43, + 0xE6, 0xAD, 0xB9, 0x43, 0xE6, 0xAE, 0x9F, 0x43, + 0xE6, 0xAE, 0xAE, 0x43, 0xE6, 0xAE, 0xB3, 0x43, + 0xE6, 0xAE, 0xBA, 0x43, 0xE6, 0xAE, 0xBB, 0x43, + // Bytes dc0 - dff + 0xE6, 0xAF, 0x8B, 0x43, 0xE6, 0xAF, 0x8D, 0x43, + 0xE6, 0xAF, 0x94, 0x43, 0xE6, 0xAF, 0x9B, 0x43, + 0xE6, 0xB0, 0x8F, 0x43, 0xE6, 0xB0, 0x94, 0x43, + 0xE6, 0xB0, 0xB4, 0x43, 0xE6, 0xB1, 0x8E, 0x43, + 0xE6, 0xB1, 0xA7, 0x43, 0xE6, 0xB2, 0x88, 0x43, + 0xE6, 0xB2, 0xBF, 0x43, 0xE6, 0xB3, 0x8C, 0x43, + 0xE6, 0xB3, 0x8D, 0x43, 0xE6, 0xB3, 0xA5, 0x43, + 0xE6, 0xB3, 0xA8, 0x43, 0xE6, 0xB4, 0x96, 0x43, + // Bytes e00 - e3f + 0xE6, 0xB4, 0x9B, 0x43, 0xE6, 0xB4, 0x9E, 0x43, + 0xE6, 0xB4, 0xB4, 0x43, 0xE6, 0xB4, 0xBE, 0x43, + 0xE6, 0xB5, 0x81, 0x43, 0xE6, 0xB5, 0xA9, 0x43, + 0xE6, 0xB5, 0xAA, 0x43, 0xE6, 0xB5, 0xB7, 0x43, + 0xE6, 0xB5, 0xB8, 0x43, 0xE6, 0xB6, 0x85, 0x43, + 0xE6, 0xB7, 0x8B, 0x43, 0xE6, 0xB7, 0x9A, 0x43, + 0xE6, 0xB7, 0xAA, 0x43, 0xE6, 0xB7, 0xB9, 0x43, + 0xE6, 0xB8, 0x9A, 0x43, 0xE6, 0xB8, 0xAF, 0x43, + // Bytes e40 - e7f + 0xE6, 0xB9, 0xAE, 0x43, 0xE6, 0xBA, 0x80, 0x43, + 0xE6, 0xBA, 0x9C, 0x43, 0xE6, 0xBA, 0xBA, 0x43, + 0xE6, 0xBB, 0x87, 0x43, 0xE6, 0xBB, 0x8B, 0x43, + 0xE6, 0xBB, 0x91, 0x43, 0xE6, 0xBB, 0x9B, 0x43, + 0xE6, 0xBC, 0x8F, 0x43, 0xE6, 0xBC, 0x94, 0x43, + 0xE6, 0xBC, 0xA2, 0x43, 0xE6, 0xBC, 0xA3, 0x43, + 0xE6, 0xBD, 0xAE, 0x43, 0xE6, 0xBF, 0x86, 0x43, + 0xE6, 0xBF, 0xAB, 0x43, 0xE6, 0xBF, 0xBE, 0x43, + // Bytes e80 - ebf + 0xE7, 0x80, 0x9B, 0x43, 0xE7, 0x80, 0x9E, 0x43, + 0xE7, 0x80, 0xB9, 0x43, 0xE7, 0x81, 0x8A, 0x43, + 0xE7, 0x81, 0xAB, 0x43, 0xE7, 0x81, 0xB0, 0x43, + 0xE7, 0x81, 0xB7, 0x43, 0xE7, 0x81, 0xBD, 0x43, + 0xE7, 0x82, 0x99, 0x43, 0xE7, 0x82, 0xAD, 0x43, + 0xE7, 0x83, 0x88, 0x43, 0xE7, 0x83, 0x99, 0x43, + 0xE7, 0x84, 0xA1, 0x43, 0xE7, 0x85, 0x85, 0x43, + 0xE7, 0x85, 0x89, 0x43, 0xE7, 0x85, 0xAE, 0x43, + // Bytes ec0 - eff + 0xE7, 0x86, 0x9C, 0x43, 0xE7, 0x87, 0x8E, 0x43, + 0xE7, 0x87, 0x90, 0x43, 0xE7, 0x88, 0x90, 0x43, + 0xE7, 0x88, 0x9B, 0x43, 0xE7, 0x88, 0xA8, 0x43, + 0xE7, 0x88, 0xAA, 0x43, 0xE7, 0x88, 0xAB, 0x43, + 0xE7, 0x88, 0xB5, 0x43, 0xE7, 0x88, 0xB6, 0x43, + 0xE7, 0x88, 0xBB, 0x43, 0xE7, 0x88, 0xBF, 0x43, + 0xE7, 0x89, 0x87, 0x43, 0xE7, 0x89, 0x90, 0x43, + 0xE7, 0x89, 0x99, 0x43, 0xE7, 0x89, 0x9B, 0x43, + // Bytes f00 - f3f + 0xE7, 0x89, 0xA2, 0x43, 0xE7, 0x89, 0xB9, 0x43, + 0xE7, 0x8A, 0x80, 0x43, 0xE7, 0x8A, 0x95, 0x43, + 0xE7, 0x8A, 0xAC, 0x43, 0xE7, 0x8A, 0xAF, 0x43, + 0xE7, 0x8B, 0x80, 0x43, 0xE7, 0x8B, 0xBC, 0x43, + 0xE7, 0x8C, 0xAA, 0x43, 0xE7, 0x8D, 0xB5, 0x43, + 0xE7, 0x8D, 0xBA, 0x43, 0xE7, 0x8E, 0x84, 0x43, + 0xE7, 0x8E, 0x87, 0x43, 0xE7, 0x8E, 0x89, 0x43, + 0xE7, 0x8E, 0x8B, 0x43, 0xE7, 0x8E, 0xA5, 0x43, + // Bytes f40 - f7f + 0xE7, 0x8E, 0xB2, 0x43, 0xE7, 0x8F, 0x9E, 0x43, + 0xE7, 0x90, 0x86, 0x43, 0xE7, 0x90, 0x89, 0x43, + 0xE7, 0x90, 0xA2, 0x43, 0xE7, 0x91, 0x87, 0x43, + 0xE7, 0x91, 0x9C, 0x43, 0xE7, 0x91, 0xA9, 0x43, + 0xE7, 0x91, 0xB1, 0x43, 0xE7, 0x92, 0x85, 0x43, + 0xE7, 0x92, 0x89, 0x43, 0xE7, 0x92, 0x98, 0x43, + 0xE7, 0x93, 0x8A, 0x43, 0xE7, 0x93, 0x9C, 0x43, + 0xE7, 0x93, 0xA6, 0x43, 0xE7, 0x94, 0x86, 0x43, + // Bytes f80 - fbf + 0xE7, 0x94, 0x98, 0x43, 0xE7, 0x94, 0x9F, 0x43, + 0xE7, 0x94, 0xA4, 0x43, 0xE7, 0x94, 0xA8, 0x43, + 0xE7, 0x94, 0xB0, 0x43, 0xE7, 0x94, 0xB2, 0x43, + 0xE7, 0x94, 0xB3, 0x43, 0xE7, 0x94, 0xB7, 0x43, + 0xE7, 0x94, 0xBB, 0x43, 0xE7, 0x94, 0xBE, 0x43, + 0xE7, 0x95, 0x99, 0x43, 0xE7, 0x95, 0xA5, 0x43, + 0xE7, 0x95, 0xB0, 0x43, 0xE7, 0x96, 0x8B, 0x43, + 0xE7, 0x96, 0x92, 0x43, 0xE7, 0x97, 0xA2, 0x43, + // Bytes fc0 - fff + 0xE7, 0x98, 0x90, 0x43, 0xE7, 0x98, 0x9D, 0x43, + 0xE7, 0x98, 0x9F, 0x43, 0xE7, 0x99, 0x82, 0x43, + 0xE7, 0x99, 0xA9, 0x43, 0xE7, 0x99, 0xB6, 0x43, + 0xE7, 0x99, 0xBD, 0x43, 0xE7, 0x9A, 0xAE, 0x43, + 0xE7, 0x9A, 0xBF, 0x43, 0xE7, 0x9B, 0x8A, 0x43, + 0xE7, 0x9B, 0x9B, 0x43, 0xE7, 0x9B, 0xA3, 0x43, + 0xE7, 0x9B, 0xA7, 0x43, 0xE7, 0x9B, 0xAE, 0x43, + 0xE7, 0x9B, 0xB4, 0x43, 0xE7, 0x9C, 0x81, 0x43, + // Bytes 1000 - 103f + 0xE7, 0x9C, 0x9E, 0x43, 0xE7, 0x9C, 0x9F, 0x43, + 0xE7, 0x9D, 0x80, 0x43, 0xE7, 0x9D, 0x8A, 0x43, + 0xE7, 0x9E, 0x8B, 0x43, 0xE7, 0x9E, 0xA7, 0x43, + 0xE7, 0x9F, 0x9B, 0x43, 0xE7, 0x9F, 0xA2, 0x43, + 0xE7, 0x9F, 0xB3, 0x43, 0xE7, 0xA1, 0x8E, 0x43, + 0xE7, 0xA1, 0xAB, 0x43, 0xE7, 0xA2, 0x8C, 0x43, + 0xE7, 0xA2, 0x91, 0x43, 0xE7, 0xA3, 0x8A, 0x43, + 0xE7, 0xA3, 0x8C, 0x43, 0xE7, 0xA3, 0xBB, 0x43, + // Bytes 1040 - 107f + 0xE7, 0xA4, 0xAA, 0x43, 0xE7, 0xA4, 0xBA, 0x43, + 0xE7, 0xA4, 0xBC, 0x43, 0xE7, 0xA4, 0xBE, 0x43, + 0xE7, 0xA5, 0x88, 0x43, 0xE7, 0xA5, 0x89, 0x43, + 0xE7, 0xA5, 0x90, 0x43, 0xE7, 0xA5, 0x96, 0x43, + 0xE7, 0xA5, 0x9D, 0x43, 0xE7, 0xA5, 0x9E, 0x43, + 0xE7, 0xA5, 0xA5, 0x43, 0xE7, 0xA5, 0xBF, 0x43, + 0xE7, 0xA6, 0x81, 0x43, 0xE7, 0xA6, 0x8D, 0x43, + 0xE7, 0xA6, 0x8E, 0x43, 0xE7, 0xA6, 0x8F, 0x43, + // Bytes 1080 - 10bf + 0xE7, 0xA6, 0xAE, 0x43, 0xE7, 0xA6, 0xB8, 0x43, + 0xE7, 0xA6, 0xBE, 0x43, 0xE7, 0xA7, 0x8A, 0x43, + 0xE7, 0xA7, 0x98, 0x43, 0xE7, 0xA7, 0xAB, 0x43, + 0xE7, 0xA8, 0x9C, 0x43, 0xE7, 0xA9, 0x80, 0x43, + 0xE7, 0xA9, 0x8A, 0x43, 0xE7, 0xA9, 0x8F, 0x43, + 0xE7, 0xA9, 0xB4, 0x43, 0xE7, 0xA9, 0xBA, 0x43, + 0xE7, 0xAA, 0x81, 0x43, 0xE7, 0xAA, 0xB1, 0x43, + 0xE7, 0xAB, 0x8B, 0x43, 0xE7, 0xAB, 0xAE, 0x43, + // Bytes 10c0 - 10ff + 0xE7, 0xAB, 0xB9, 0x43, 0xE7, 0xAC, 0xA0, 0x43, + 0xE7, 0xAE, 0x8F, 0x43, 0xE7, 0xAF, 0x80, 0x43, + 0xE7, 0xAF, 0x86, 0x43, 0xE7, 0xAF, 0x89, 0x43, + 0xE7, 0xB0, 0xBE, 0x43, 0xE7, 0xB1, 0xA0, 0x43, + 0xE7, 0xB1, 0xB3, 0x43, 0xE7, 0xB1, 0xBB, 0x43, + 0xE7, 0xB2, 0x92, 0x43, 0xE7, 0xB2, 0xBE, 0x43, + 0xE7, 0xB3, 0x92, 0x43, 0xE7, 0xB3, 0x96, 0x43, + 0xE7, 0xB3, 0xA3, 0x43, 0xE7, 0xB3, 0xA7, 0x43, + // Bytes 1100 - 113f + 0xE7, 0xB3, 0xA8, 0x43, 0xE7, 0xB3, 0xB8, 0x43, + 0xE7, 0xB4, 0x80, 0x43, 0xE7, 0xB4, 0x90, 0x43, + 0xE7, 0xB4, 0xA2, 0x43, 0xE7, 0xB4, 0xAF, 0x43, + 0xE7, 0xB5, 0x82, 0x43, 0xE7, 0xB5, 0x9B, 0x43, + 0xE7, 0xB5, 0xA3, 0x43, 0xE7, 0xB6, 0xA0, 0x43, + 0xE7, 0xB6, 0xBE, 0x43, 0xE7, 0xB7, 0x87, 0x43, + 0xE7, 0xB7, 0xB4, 0x43, 0xE7, 0xB8, 0x82, 0x43, + 0xE7, 0xB8, 0x89, 0x43, 0xE7, 0xB8, 0xB7, 0x43, + // Bytes 1140 - 117f + 0xE7, 0xB9, 0x81, 0x43, 0xE7, 0xB9, 0x85, 0x43, + 0xE7, 0xBC, 0xB6, 0x43, 0xE7, 0xBC, 0xBE, 0x43, + 0xE7, 0xBD, 0x91, 0x43, 0xE7, 0xBD, 0xB2, 0x43, + 0xE7, 0xBD, 0xB9, 0x43, 0xE7, 0xBD, 0xBA, 0x43, + 0xE7, 0xBE, 0x85, 0x43, 0xE7, 0xBE, 0x8A, 0x43, + 0xE7, 0xBE, 0x95, 0x43, 0xE7, 0xBE, 0x9A, 0x43, + 0xE7, 0xBE, 0xBD, 0x43, 0xE7, 0xBF, 0xBA, 0x43, + 0xE8, 0x80, 0x81, 0x43, 0xE8, 0x80, 0x85, 0x43, + // Bytes 1180 - 11bf + 0xE8, 0x80, 0x8C, 0x43, 0xE8, 0x80, 0x92, 0x43, + 0xE8, 0x80, 0xB3, 0x43, 0xE8, 0x81, 0x86, 0x43, + 0xE8, 0x81, 0xA0, 0x43, 0xE8, 0x81, 0xAF, 0x43, + 0xE8, 0x81, 0xB0, 0x43, 0xE8, 0x81, 0xBE, 0x43, + 0xE8, 0x81, 0xBF, 0x43, 0xE8, 0x82, 0x89, 0x43, + 0xE8, 0x82, 0x8B, 0x43, 0xE8, 0x82, 0xAD, 0x43, + 0xE8, 0x82, 0xB2, 0x43, 0xE8, 0x84, 0x83, 0x43, + 0xE8, 0x84, 0xBE, 0x43, 0xE8, 0x87, 0x98, 0x43, + // Bytes 11c0 - 11ff + 0xE8, 0x87, 0xA3, 0x43, 0xE8, 0x87, 0xA8, 0x43, + 0xE8, 0x87, 0xAA, 0x43, 0xE8, 0x87, 0xAD, 0x43, + 0xE8, 0x87, 0xB3, 0x43, 0xE8, 0x87, 0xBC, 0x43, + 0xE8, 0x88, 0x81, 0x43, 0xE8, 0x88, 0x84, 0x43, + 0xE8, 0x88, 0x8C, 0x43, 0xE8, 0x88, 0x98, 0x43, + 0xE8, 0x88, 0x9B, 0x43, 0xE8, 0x88, 0x9F, 0x43, + 0xE8, 0x89, 0xAE, 0x43, 0xE8, 0x89, 0xAF, 0x43, + 0xE8, 0x89, 0xB2, 0x43, 0xE8, 0x89, 0xB8, 0x43, + // Bytes 1200 - 123f + 0xE8, 0x89, 0xB9, 0x43, 0xE8, 0x8A, 0x8B, 0x43, + 0xE8, 0x8A, 0x91, 0x43, 0xE8, 0x8A, 0x9D, 0x43, + 0xE8, 0x8A, 0xB1, 0x43, 0xE8, 0x8A, 0xB3, 0x43, + 0xE8, 0x8A, 0xBD, 0x43, 0xE8, 0x8B, 0xA5, 0x43, + 0xE8, 0x8B, 0xA6, 0x43, 0xE8, 0x8C, 0x9D, 0x43, + 0xE8, 0x8C, 0xA3, 0x43, 0xE8, 0x8C, 0xB6, 0x43, + 0xE8, 0x8D, 0x92, 0x43, 0xE8, 0x8D, 0x93, 0x43, + 0xE8, 0x8D, 0xA3, 0x43, 0xE8, 0x8E, 0xAD, 0x43, + // Bytes 1240 - 127f + 0xE8, 0x8E, 0xBD, 0x43, 0xE8, 0x8F, 0x89, 0x43, + 0xE8, 0x8F, 0x8A, 0x43, 0xE8, 0x8F, 0x8C, 0x43, + 0xE8, 0x8F, 0x9C, 0x43, 0xE8, 0x8F, 0xA7, 0x43, + 0xE8, 0x8F, 0xAF, 0x43, 0xE8, 0x8F, 0xB1, 0x43, + 0xE8, 0x90, 0xBD, 0x43, 0xE8, 0x91, 0x89, 0x43, + 0xE8, 0x91, 0x97, 0x43, 0xE8, 0x93, 0xAE, 0x43, + 0xE8, 0x93, 0xB1, 0x43, 0xE8, 0x93, 0xB3, 0x43, + 0xE8, 0x93, 0xBC, 0x43, 0xE8, 0x94, 0x96, 0x43, + // Bytes 1280 - 12bf + 0xE8, 0x95, 0xA4, 0x43, 0xE8, 0x97, 0x8D, 0x43, + 0xE8, 0x97, 0xBA, 0x43, 0xE8, 0x98, 0x86, 0x43, + 0xE8, 0x98, 0x92, 0x43, 0xE8, 0x98, 0xAD, 0x43, + 0xE8, 0x98, 0xBF, 0x43, 0xE8, 0x99, 0x8D, 0x43, + 0xE8, 0x99, 0x90, 0x43, 0xE8, 0x99, 0x9C, 0x43, + 0xE8, 0x99, 0xA7, 0x43, 0xE8, 0x99, 0xA9, 0x43, + 0xE8, 0x99, 0xAB, 0x43, 0xE8, 0x9A, 0x88, 0x43, + 0xE8, 0x9A, 0xA9, 0x43, 0xE8, 0x9B, 0xA2, 0x43, + // Bytes 12c0 - 12ff + 0xE8, 0x9C, 0x8E, 0x43, 0xE8, 0x9C, 0xA8, 0x43, + 0xE8, 0x9D, 0xAB, 0x43, 0xE8, 0x9D, 0xB9, 0x43, + 0xE8, 0x9E, 0x86, 0x43, 0xE8, 0x9E, 0xBA, 0x43, + 0xE8, 0x9F, 0xA1, 0x43, 0xE8, 0xA0, 0x81, 0x43, + 0xE8, 0xA0, 0x9F, 0x43, 0xE8, 0xA1, 0x80, 0x43, + 0xE8, 0xA1, 0x8C, 0x43, 0xE8, 0xA1, 0xA0, 0x43, + 0xE8, 0xA1, 0xA3, 0x43, 0xE8, 0xA3, 0x82, 0x43, + 0xE8, 0xA3, 0x8F, 0x43, 0xE8, 0xA3, 0x97, 0x43, + // Bytes 1300 - 133f + 0xE8, 0xA3, 0x9E, 0x43, 0xE8, 0xA3, 0xA1, 0x43, + 0xE8, 0xA3, 0xB8, 0x43, 0xE8, 0xA3, 0xBA, 0x43, + 0xE8, 0xA4, 0x90, 0x43, 0xE8, 0xA5, 0x81, 0x43, + 0xE8, 0xA5, 0xA4, 0x43, 0xE8, 0xA5, 0xBE, 0x43, + 0xE8, 0xA6, 0x86, 0x43, 0xE8, 0xA6, 0x8B, 0x43, + 0xE8, 0xA6, 0x96, 0x43, 0xE8, 0xA7, 0x92, 0x43, + 0xE8, 0xA7, 0xA3, 0x43, 0xE8, 0xA8, 0x80, 0x43, + 0xE8, 0xAA, 0xA0, 0x43, 0xE8, 0xAA, 0xAA, 0x43, + // Bytes 1340 - 137f + 0xE8, 0xAA, 0xBF, 0x43, 0xE8, 0xAB, 0x8B, 0x43, + 0xE8, 0xAB, 0x92, 0x43, 0xE8, 0xAB, 0x96, 0x43, + 0xE8, 0xAB, 0xAD, 0x43, 0xE8, 0xAB, 0xB8, 0x43, + 0xE8, 0xAB, 0xBE, 0x43, 0xE8, 0xAC, 0x81, 0x43, + 0xE8, 0xAC, 0xB9, 0x43, 0xE8, 0xAD, 0x98, 0x43, + 0xE8, 0xAE, 0x80, 0x43, 0xE8, 0xAE, 0x8A, 0x43, + 0xE8, 0xB0, 0xB7, 0x43, 0xE8, 0xB1, 0x86, 0x43, + 0xE8, 0xB1, 0x88, 0x43, 0xE8, 0xB1, 0x95, 0x43, + // Bytes 1380 - 13bf + 0xE8, 0xB1, 0xB8, 0x43, 0xE8, 0xB2, 0x9D, 0x43, + 0xE8, 0xB2, 0xA1, 0x43, 0xE8, 0xB2, 0xA9, 0x43, + 0xE8, 0xB2, 0xAB, 0x43, 0xE8, 0xB3, 0x81, 0x43, + 0xE8, 0xB3, 0x82, 0x43, 0xE8, 0xB3, 0x87, 0x43, + 0xE8, 0xB3, 0x88, 0x43, 0xE8, 0xB3, 0x93, 0x43, + 0xE8, 0xB4, 0x88, 0x43, 0xE8, 0xB4, 0x9B, 0x43, + 0xE8, 0xB5, 0xA4, 0x43, 0xE8, 0xB5, 0xB0, 0x43, + 0xE8, 0xB5, 0xB7, 0x43, 0xE8, 0xB6, 0xB3, 0x43, + // Bytes 13c0 - 13ff + 0xE8, 0xB6, 0xBC, 0x43, 0xE8, 0xB7, 0x8B, 0x43, + 0xE8, 0xB7, 0xAF, 0x43, 0xE8, 0xB7, 0xB0, 0x43, + 0xE8, 0xBA, 0xAB, 0x43, 0xE8, 0xBB, 0x8A, 0x43, + 0xE8, 0xBB, 0x94, 0x43, 0xE8, 0xBC, 0xA6, 0x43, + 0xE8, 0xBC, 0xAA, 0x43, 0xE8, 0xBC, 0xB8, 0x43, + 0xE8, 0xBC, 0xBB, 0x43, 0xE8, 0xBD, 0xA2, 0x43, + 0xE8, 0xBE, 0x9B, 0x43, 0xE8, 0xBE, 0x9E, 0x43, + 0xE8, 0xBE, 0xB0, 0x43, 0xE8, 0xBE, 0xB5, 0x43, + // Bytes 1400 - 143f + 0xE8, 0xBE, 0xB6, 0x43, 0xE9, 0x80, 0xA3, 0x43, + 0xE9, 0x80, 0xB8, 0x43, 0xE9, 0x81, 0x8A, 0x43, + 0xE9, 0x81, 0xA9, 0x43, 0xE9, 0x81, 0xB2, 0x43, + 0xE9, 0x81, 0xBC, 0x43, 0xE9, 0x82, 0x8F, 0x43, + 0xE9, 0x82, 0x91, 0x43, 0xE9, 0x82, 0x94, 0x43, + 0xE9, 0x83, 0x8E, 0x43, 0xE9, 0x83, 0x9E, 0x43, + 0xE9, 0x83, 0xB1, 0x43, 0xE9, 0x83, 0xBD, 0x43, + 0xE9, 0x84, 0x91, 0x43, 0xE9, 0x84, 0x9B, 0x43, + // Bytes 1440 - 147f + 0xE9, 0x85, 0x89, 0x43, 0xE9, 0x85, 0x8D, 0x43, + 0xE9, 0x85, 0xAA, 0x43, 0xE9, 0x86, 0x99, 0x43, + 0xE9, 0x86, 0xB4, 0x43, 0xE9, 0x87, 0x86, 0x43, + 0xE9, 0x87, 0x8C, 0x43, 0xE9, 0x87, 0x8F, 0x43, + 0xE9, 0x87, 0x91, 0x43, 0xE9, 0x88, 0xB4, 0x43, + 0xE9, 0x88, 0xB8, 0x43, 0xE9, 0x89, 0xB6, 0x43, + 0xE9, 0x89, 0xBC, 0x43, 0xE9, 0x8B, 0x97, 0x43, + 0xE9, 0x8B, 0x98, 0x43, 0xE9, 0x8C, 0x84, 0x43, + // Bytes 1480 - 14bf + 0xE9, 0x8D, 0x8A, 0x43, 0xE9, 0x8F, 0xB9, 0x43, + 0xE9, 0x90, 0x95, 0x43, 0xE9, 0x95, 0xB7, 0x43, + 0xE9, 0x96, 0x80, 0x43, 0xE9, 0x96, 0x8B, 0x43, + 0xE9, 0x96, 0xAD, 0x43, 0xE9, 0x96, 0xB7, 0x43, + 0xE9, 0x98, 0x9C, 0x43, 0xE9, 0x98, 0xAE, 0x43, + 0xE9, 0x99, 0x8B, 0x43, 0xE9, 0x99, 0x8D, 0x43, + 0xE9, 0x99, 0xB5, 0x43, 0xE9, 0x99, 0xB8, 0x43, + 0xE9, 0x99, 0xBC, 0x43, 0xE9, 0x9A, 0x86, 0x43, + // Bytes 14c0 - 14ff + 0xE9, 0x9A, 0xA3, 0x43, 0xE9, 0x9A, 0xB6, 0x43, + 0xE9, 0x9A, 0xB7, 0x43, 0xE9, 0x9A, 0xB8, 0x43, + 0xE9, 0x9A, 0xB9, 0x43, 0xE9, 0x9B, 0x83, 0x43, + 0xE9, 0x9B, 0xA2, 0x43, 0xE9, 0x9B, 0xA3, 0x43, + 0xE9, 0x9B, 0xA8, 0x43, 0xE9, 0x9B, 0xB6, 0x43, + 0xE9, 0x9B, 0xB7, 0x43, 0xE9, 0x9C, 0xA3, 0x43, + 0xE9, 0x9C, 0xB2, 0x43, 0xE9, 0x9D, 0x88, 0x43, + 0xE9, 0x9D, 0x91, 0x43, 0xE9, 0x9D, 0x96, 0x43, + // Bytes 1500 - 153f + 0xE9, 0x9D, 0x9E, 0x43, 0xE9, 0x9D, 0xA2, 0x43, + 0xE9, 0x9D, 0xA9, 0x43, 0xE9, 0x9F, 0x8B, 0x43, + 0xE9, 0x9F, 0x9B, 0x43, 0xE9, 0x9F, 0xA0, 0x43, + 0xE9, 0x9F, 0xAD, 0x43, 0xE9, 0x9F, 0xB3, 0x43, + 0xE9, 0x9F, 0xBF, 0x43, 0xE9, 0xA0, 0x81, 0x43, + 0xE9, 0xA0, 0x85, 0x43, 0xE9, 0xA0, 0x8B, 0x43, + 0xE9, 0xA0, 0x98, 0x43, 0xE9, 0xA0, 0xA9, 0x43, + 0xE9, 0xA0, 0xBB, 0x43, 0xE9, 0xA1, 0x9E, 0x43, + // Bytes 1540 - 157f + 0xE9, 0xA2, 0xA8, 0x43, 0xE9, 0xA3, 0x9B, 0x43, + 0xE9, 0xA3, 0x9F, 0x43, 0xE9, 0xA3, 0xA2, 0x43, + 0xE9, 0xA3, 0xAF, 0x43, 0xE9, 0xA3, 0xBC, 0x43, + 0xE9, 0xA4, 0xA8, 0x43, 0xE9, 0xA4, 0xA9, 0x43, + 0xE9, 0xA6, 0x96, 0x43, 0xE9, 0xA6, 0x99, 0x43, + 0xE9, 0xA6, 0xA7, 0x43, 0xE9, 0xA6, 0xAC, 0x43, + 0xE9, 0xA7, 0x82, 0x43, 0xE9, 0xA7, 0xB1, 0x43, + 0xE9, 0xA7, 0xBE, 0x43, 0xE9, 0xA9, 0xAA, 0x43, + // Bytes 1580 - 15bf + 0xE9, 0xAA, 0xA8, 0x43, 0xE9, 0xAB, 0x98, 0x43, + 0xE9, 0xAB, 0x9F, 0x43, 0xE9, 0xAC, 0x92, 0x43, + 0xE9, 0xAC, 0xA5, 0x43, 0xE9, 0xAC, 0xAF, 0x43, + 0xE9, 0xAC, 0xB2, 0x43, 0xE9, 0xAC, 0xBC, 0x43, + 0xE9, 0xAD, 0x9A, 0x43, 0xE9, 0xAD, 0xAF, 0x43, + 0xE9, 0xB1, 0x80, 0x43, 0xE9, 0xB1, 0x97, 0x43, + 0xE9, 0xB3, 0xA5, 0x43, 0xE9, 0xB3, 0xBD, 0x43, + 0xE9, 0xB5, 0xA7, 0x43, 0xE9, 0xB6, 0xB4, 0x43, + // Bytes 15c0 - 15ff + 0xE9, 0xB7, 0xBA, 0x43, 0xE9, 0xB8, 0x9E, 0x43, + 0xE9, 0xB9, 0xB5, 0x43, 0xE9, 0xB9, 0xBF, 0x43, + 0xE9, 0xBA, 0x97, 0x43, 0xE9, 0xBA, 0x9F, 0x43, + 0xE9, 0xBA, 0xA5, 0x43, 0xE9, 0xBA, 0xBB, 0x43, + 0xE9, 0xBB, 0x83, 0x43, 0xE9, 0xBB, 0x8D, 0x43, + 0xE9, 0xBB, 0x8E, 0x43, 0xE9, 0xBB, 0x91, 0x43, + 0xE9, 0xBB, 0xB9, 0x43, 0xE9, 0xBB, 0xBD, 0x43, + 0xE9, 0xBB, 0xBE, 0x43, 0xE9, 0xBC, 0x85, 0x43, + // Bytes 1600 - 163f + 0xE9, 0xBC, 0x8E, 0x43, 0xE9, 0xBC, 0x8F, 0x43, + 0xE9, 0xBC, 0x93, 0x43, 0xE9, 0xBC, 0x96, 0x43, + 0xE9, 0xBC, 0xA0, 0x43, 0xE9, 0xBC, 0xBB, 0x43, + 0xE9, 0xBD, 0x83, 0x43, 0xE9, 0xBD, 0x8A, 0x43, + 0xE9, 0xBD, 0x92, 0x43, 0xE9, 0xBE, 0x8D, 0x43, + 0xE9, 0xBE, 0x8E, 0x43, 0xE9, 0xBE, 0x9C, 0x43, + 0xE9, 0xBE, 0x9F, 0x43, 0xE9, 0xBE, 0xA0, 0x43, + 0xEA, 0x9C, 0xA7, 0x43, 0xEA, 0x9D, 0xAF, 0x43, + // Bytes 1640 - 167f + 0xEA, 0xAC, 0xB7, 0x43, 0xEA, 0xAD, 0x92, 0x44, + 0xF0, 0xA0, 0x84, 0xA2, 0x44, 0xF0, 0xA0, 0x94, + 0x9C, 0x44, 0xF0, 0xA0, 0x94, 0xA5, 0x44, 0xF0, + 0xA0, 0x95, 0x8B, 0x44, 0xF0, 0xA0, 0x98, 0xBA, + 0x44, 0xF0, 0xA0, 0xA0, 0x84, 0x44, 0xF0, 0xA0, + 0xA3, 0x9E, 0x44, 0xF0, 0xA0, 0xA8, 0xAC, 0x44, + 0xF0, 0xA0, 0xAD, 0xA3, 0x44, 0xF0, 0xA1, 0x93, + 0xA4, 0x44, 0xF0, 0xA1, 0x9A, 0xA8, 0x44, 0xF0, + // Bytes 1680 - 16bf + 0xA1, 0x9B, 0xAA, 0x44, 0xF0, 0xA1, 0xA7, 0x88, + 0x44, 0xF0, 0xA1, 0xAC, 0x98, 0x44, 0xF0, 0xA1, + 0xB4, 0x8B, 0x44, 0xF0, 0xA1, 0xB7, 0xA4, 0x44, + 0xF0, 0xA1, 0xB7, 0xA6, 0x44, 0xF0, 0xA2, 0x86, + 0x83, 0x44, 0xF0, 0xA2, 0x86, 0x9F, 0x44, 0xF0, + 0xA2, 0x8C, 0xB1, 0x44, 0xF0, 0xA2, 0x9B, 0x94, + 0x44, 0xF0, 0xA2, 0xA1, 0x84, 0x44, 0xF0, 0xA2, + 0xA1, 0x8A, 0x44, 0xF0, 0xA2, 0xAC, 0x8C, 0x44, + // Bytes 16c0 - 16ff + 0xF0, 0xA2, 0xAF, 0xB1, 0x44, 0xF0, 0xA3, 0x80, + 0x8A, 0x44, 0xF0, 0xA3, 0x8A, 0xB8, 0x44, 0xF0, + 0xA3, 0x8D, 0x9F, 0x44, 0xF0, 0xA3, 0x8E, 0x93, + 0x44, 0xF0, 0xA3, 0x8E, 0x9C, 0x44, 0xF0, 0xA3, + 0x8F, 0x83, 0x44, 0xF0, 0xA3, 0x8F, 0x95, 0x44, + 0xF0, 0xA3, 0x91, 0xAD, 0x44, 0xF0, 0xA3, 0x9A, + 0xA3, 0x44, 0xF0, 0xA3, 0xA2, 0xA7, 0x44, 0xF0, + 0xA3, 0xAA, 0x8D, 0x44, 0xF0, 0xA3, 0xAB, 0xBA, + // Bytes 1700 - 173f + 0x44, 0xF0, 0xA3, 0xB2, 0xBC, 0x44, 0xF0, 0xA3, + 0xB4, 0x9E, 0x44, 0xF0, 0xA3, 0xBB, 0x91, 0x44, + 0xF0, 0xA3, 0xBD, 0x9E, 0x44, 0xF0, 0xA3, 0xBE, + 0x8E, 0x44, 0xF0, 0xA4, 0x89, 0xA3, 0x44, 0xF0, + 0xA4, 0x8B, 0xAE, 0x44, 0xF0, 0xA4, 0x8E, 0xAB, + 0x44, 0xF0, 0xA4, 0x98, 0x88, 0x44, 0xF0, 0xA4, + 0x9C, 0xB5, 0x44, 0xF0, 0xA4, 0xA0, 0x94, 0x44, + 0xF0, 0xA4, 0xB0, 0xB6, 0x44, 0xF0, 0xA4, 0xB2, + // Bytes 1740 - 177f + 0x92, 0x44, 0xF0, 0xA4, 0xBE, 0xA1, 0x44, 0xF0, + 0xA4, 0xBE, 0xB8, 0x44, 0xF0, 0xA5, 0x81, 0x84, + 0x44, 0xF0, 0xA5, 0x83, 0xB2, 0x44, 0xF0, 0xA5, + 0x83, 0xB3, 0x44, 0xF0, 0xA5, 0x84, 0x99, 0x44, + 0xF0, 0xA5, 0x84, 0xB3, 0x44, 0xF0, 0xA5, 0x89, + 0x89, 0x44, 0xF0, 0xA5, 0x90, 0x9D, 0x44, 0xF0, + 0xA5, 0x98, 0xA6, 0x44, 0xF0, 0xA5, 0x9A, 0x9A, + 0x44, 0xF0, 0xA5, 0x9B, 0x85, 0x44, 0xF0, 0xA5, + // Bytes 1780 - 17bf + 0xA5, 0xBC, 0x44, 0xF0, 0xA5, 0xAA, 0xA7, 0x44, + 0xF0, 0xA5, 0xAE, 0xAB, 0x44, 0xF0, 0xA5, 0xB2, + 0x80, 0x44, 0xF0, 0xA5, 0xB3, 0x90, 0x44, 0xF0, + 0xA5, 0xBE, 0x86, 0x44, 0xF0, 0xA6, 0x87, 0x9A, + 0x44, 0xF0, 0xA6, 0x88, 0xA8, 0x44, 0xF0, 0xA6, + 0x89, 0x87, 0x44, 0xF0, 0xA6, 0x8B, 0x99, 0x44, + 0xF0, 0xA6, 0x8C, 0xBE, 0x44, 0xF0, 0xA6, 0x93, + 0x9A, 0x44, 0xF0, 0xA6, 0x94, 0xA3, 0x44, 0xF0, + // Bytes 17c0 - 17ff + 0xA6, 0x96, 0xA8, 0x44, 0xF0, 0xA6, 0x9E, 0xA7, + 0x44, 0xF0, 0xA6, 0x9E, 0xB5, 0x44, 0xF0, 0xA6, + 0xAC, 0xBC, 0x44, 0xF0, 0xA6, 0xB0, 0xB6, 0x44, + 0xF0, 0xA6, 0xB3, 0x95, 0x44, 0xF0, 0xA6, 0xB5, + 0xAB, 0x44, 0xF0, 0xA6, 0xBC, 0xAC, 0x44, 0xF0, + 0xA6, 0xBE, 0xB1, 0x44, 0xF0, 0xA7, 0x83, 0x92, + 0x44, 0xF0, 0xA7, 0x8F, 0x8A, 0x44, 0xF0, 0xA7, + 0x99, 0xA7, 0x44, 0xF0, 0xA7, 0xA2, 0xAE, 0x44, + // Bytes 1800 - 183f + 0xF0, 0xA7, 0xA5, 0xA6, 0x44, 0xF0, 0xA7, 0xB2, + 0xA8, 0x44, 0xF0, 0xA7, 0xBB, 0x93, 0x44, 0xF0, + 0xA7, 0xBC, 0xAF, 0x44, 0xF0, 0xA8, 0x97, 0x92, + 0x44, 0xF0, 0xA8, 0x97, 0xAD, 0x44, 0xF0, 0xA8, + 0x9C, 0xAE, 0x44, 0xF0, 0xA8, 0xAF, 0xBA, 0x44, + 0xF0, 0xA8, 0xB5, 0xB7, 0x44, 0xF0, 0xA9, 0x85, + 0x85, 0x44, 0xF0, 0xA9, 0x87, 0x9F, 0x44, 0xF0, + 0xA9, 0x88, 0x9A, 0x44, 0xF0, 0xA9, 0x90, 0x8A, + // Bytes 1840 - 187f + 0x44, 0xF0, 0xA9, 0x92, 0x96, 0x44, 0xF0, 0xA9, + 0x96, 0xB6, 0x44, 0xF0, 0xA9, 0xAC, 0xB0, 0x44, + 0xF0, 0xAA, 0x83, 0x8E, 0x44, 0xF0, 0xAA, 0x84, + 0x85, 0x44, 0xF0, 0xAA, 0x88, 0x8E, 0x44, 0xF0, + 0xAA, 0x8A, 0x91, 0x44, 0xF0, 0xAA, 0x8E, 0x92, + 0x44, 0xF0, 0xAA, 0x98, 0x80, 0x42, 0x21, 0x21, + 0x42, 0x21, 0x3F, 0x42, 0x2E, 0x2E, 0x42, 0x30, + 0x2C, 0x42, 0x30, 0x2E, 0x42, 0x31, 0x2C, 0x42, + // Bytes 1880 - 18bf + 0x31, 0x2E, 0x42, 0x31, 0x30, 0x42, 0x31, 0x31, + 0x42, 0x31, 0x32, 0x42, 0x31, 0x33, 0x42, 0x31, + 0x34, 0x42, 0x31, 0x35, 0x42, 0x31, 0x36, 0x42, + 0x31, 0x37, 0x42, 0x31, 0x38, 0x42, 0x31, 0x39, + 0x42, 0x32, 0x2C, 0x42, 0x32, 0x2E, 0x42, 0x32, + 0x30, 0x42, 0x32, 0x31, 0x42, 0x32, 0x32, 0x42, + 0x32, 0x33, 0x42, 0x32, 0x34, 0x42, 0x32, 0x35, + 0x42, 0x32, 0x36, 0x42, 0x32, 0x37, 0x42, 0x32, + // Bytes 18c0 - 18ff + 0x38, 0x42, 0x32, 0x39, 0x42, 0x33, 0x2C, 0x42, + 0x33, 0x2E, 0x42, 0x33, 0x30, 0x42, 0x33, 0x31, + 0x42, 0x33, 0x32, 0x42, 0x33, 0x33, 0x42, 0x33, + 0x34, 0x42, 0x33, 0x35, 0x42, 0x33, 0x36, 0x42, + 0x33, 0x37, 0x42, 0x33, 0x38, 0x42, 0x33, 0x39, + 0x42, 0x34, 0x2C, 0x42, 0x34, 0x2E, 0x42, 0x34, + 0x30, 0x42, 0x34, 0x31, 0x42, 0x34, 0x32, 0x42, + 0x34, 0x33, 0x42, 0x34, 0x34, 0x42, 0x34, 0x35, + // Bytes 1900 - 193f + 0x42, 0x34, 0x36, 0x42, 0x34, 0x37, 0x42, 0x34, + 0x38, 0x42, 0x34, 0x39, 0x42, 0x35, 0x2C, 0x42, + 0x35, 0x2E, 0x42, 0x35, 0x30, 0x42, 0x36, 0x2C, + 0x42, 0x36, 0x2E, 0x42, 0x37, 0x2C, 0x42, 0x37, + 0x2E, 0x42, 0x38, 0x2C, 0x42, 0x38, 0x2E, 0x42, + 0x39, 0x2C, 0x42, 0x39, 0x2E, 0x42, 0x3D, 0x3D, + 0x42, 0x3F, 0x21, 0x42, 0x3F, 0x3F, 0x42, 0x41, + 0x55, 0x42, 0x42, 0x71, 0x42, 0x43, 0x44, 0x42, + // Bytes 1940 - 197f + 0x44, 0x4A, 0x42, 0x44, 0x5A, 0x42, 0x44, 0x7A, + 0x42, 0x47, 0x42, 0x42, 0x47, 0x79, 0x42, 0x48, + 0x50, 0x42, 0x48, 0x56, 0x42, 0x48, 0x67, 0x42, + 0x48, 0x7A, 0x42, 0x49, 0x49, 0x42, 0x49, 0x4A, + 0x42, 0x49, 0x55, 0x42, 0x49, 0x56, 0x42, 0x49, + 0x58, 0x42, 0x4B, 0x42, 0x42, 0x4B, 0x4B, 0x42, + 0x4B, 0x4D, 0x42, 0x4C, 0x4A, 0x42, 0x4C, 0x6A, + 0x42, 0x4D, 0x42, 0x42, 0x4D, 0x43, 0x42, 0x4D, + // Bytes 1980 - 19bf + 0x44, 0x42, 0x4D, 0x56, 0x42, 0x4D, 0x57, 0x42, + 0x4E, 0x4A, 0x42, 0x4E, 0x6A, 0x42, 0x4E, 0x6F, + 0x42, 0x50, 0x48, 0x42, 0x50, 0x52, 0x42, 0x50, + 0x61, 0x42, 0x52, 0x73, 0x42, 0x53, 0x44, 0x42, + 0x53, 0x4D, 0x42, 0x53, 0x53, 0x42, 0x53, 0x76, + 0x42, 0x54, 0x4D, 0x42, 0x56, 0x49, 0x42, 0x57, + 0x43, 0x42, 0x57, 0x5A, 0x42, 0x57, 0x62, 0x42, + 0x58, 0x49, 0x42, 0x63, 0x63, 0x42, 0x63, 0x64, + // Bytes 19c0 - 19ff + 0x42, 0x63, 0x6D, 0x42, 0x64, 0x42, 0x42, 0x64, + 0x61, 0x42, 0x64, 0x6C, 0x42, 0x64, 0x6D, 0x42, + 0x64, 0x7A, 0x42, 0x65, 0x56, 0x42, 0x66, 0x66, + 0x42, 0x66, 0x69, 0x42, 0x66, 0x6C, 0x42, 0x66, + 0x6D, 0x42, 0x68, 0x61, 0x42, 0x69, 0x69, 0x42, + 0x69, 0x6A, 0x42, 0x69, 0x6E, 0x42, 0x69, 0x76, + 0x42, 0x69, 0x78, 0x42, 0x6B, 0x41, 0x42, 0x6B, + 0x56, 0x42, 0x6B, 0x57, 0x42, 0x6B, 0x67, 0x42, + // Bytes 1a00 - 1a3f + 0x6B, 0x6C, 0x42, 0x6B, 0x6D, 0x42, 0x6B, 0x74, + 0x42, 0x6C, 0x6A, 0x42, 0x6C, 0x6D, 0x42, 0x6C, + 0x6E, 0x42, 0x6C, 0x78, 0x42, 0x6D, 0x32, 0x42, + 0x6D, 0x33, 0x42, 0x6D, 0x41, 0x42, 0x6D, 0x56, + 0x42, 0x6D, 0x57, 0x42, 0x6D, 0x62, 0x42, 0x6D, + 0x67, 0x42, 0x6D, 0x6C, 0x42, 0x6D, 0x6D, 0x42, + 0x6D, 0x73, 0x42, 0x6E, 0x41, 0x42, 0x6E, 0x46, + 0x42, 0x6E, 0x56, 0x42, 0x6E, 0x57, 0x42, 0x6E, + // Bytes 1a40 - 1a7f + 0x6A, 0x42, 0x6E, 0x6D, 0x42, 0x6E, 0x73, 0x42, + 0x6F, 0x56, 0x42, 0x70, 0x41, 0x42, 0x70, 0x46, + 0x42, 0x70, 0x56, 0x42, 0x70, 0x57, 0x42, 0x70, + 0x63, 0x42, 0x70, 0x73, 0x42, 0x73, 0x72, 0x42, + 0x73, 0x74, 0x42, 0x76, 0x69, 0x42, 0x78, 0x69, + 0x43, 0x28, 0x31, 0x29, 0x43, 0x28, 0x32, 0x29, + 0x43, 0x28, 0x33, 0x29, 0x43, 0x28, 0x34, 0x29, + 0x43, 0x28, 0x35, 0x29, 0x43, 0x28, 0x36, 0x29, + // Bytes 1a80 - 1abf + 0x43, 0x28, 0x37, 0x29, 0x43, 0x28, 0x38, 0x29, + 0x43, 0x28, 0x39, 0x29, 0x43, 0x28, 0x41, 0x29, + 0x43, 0x28, 0x42, 0x29, 0x43, 0x28, 0x43, 0x29, + 0x43, 0x28, 0x44, 0x29, 0x43, 0x28, 0x45, 0x29, + 0x43, 0x28, 0x46, 0x29, 0x43, 0x28, 0x47, 0x29, + 0x43, 0x28, 0x48, 0x29, 0x43, 0x28, 0x49, 0x29, + 0x43, 0x28, 0x4A, 0x29, 0x43, 0x28, 0x4B, 0x29, + 0x43, 0x28, 0x4C, 0x29, 0x43, 0x28, 0x4D, 0x29, + // Bytes 1ac0 - 1aff + 0x43, 0x28, 0x4E, 0x29, 0x43, 0x28, 0x4F, 0x29, + 0x43, 0x28, 0x50, 0x29, 0x43, 0x28, 0x51, 0x29, + 0x43, 0x28, 0x52, 0x29, 0x43, 0x28, 0x53, 0x29, + 0x43, 0x28, 0x54, 0x29, 0x43, 0x28, 0x55, 0x29, + 0x43, 0x28, 0x56, 0x29, 0x43, 0x28, 0x57, 0x29, + 0x43, 0x28, 0x58, 0x29, 0x43, 0x28, 0x59, 0x29, + 0x43, 0x28, 0x5A, 0x29, 0x43, 0x28, 0x61, 0x29, + 0x43, 0x28, 0x62, 0x29, 0x43, 0x28, 0x63, 0x29, + // Bytes 1b00 - 1b3f + 0x43, 0x28, 0x64, 0x29, 0x43, 0x28, 0x65, 0x29, + 0x43, 0x28, 0x66, 0x29, 0x43, 0x28, 0x67, 0x29, + 0x43, 0x28, 0x68, 0x29, 0x43, 0x28, 0x69, 0x29, + 0x43, 0x28, 0x6A, 0x29, 0x43, 0x28, 0x6B, 0x29, + 0x43, 0x28, 0x6C, 0x29, 0x43, 0x28, 0x6D, 0x29, + 0x43, 0x28, 0x6E, 0x29, 0x43, 0x28, 0x6F, 0x29, + 0x43, 0x28, 0x70, 0x29, 0x43, 0x28, 0x71, 0x29, + 0x43, 0x28, 0x72, 0x29, 0x43, 0x28, 0x73, 0x29, + // Bytes 1b40 - 1b7f + 0x43, 0x28, 0x74, 0x29, 0x43, 0x28, 0x75, 0x29, + 0x43, 0x28, 0x76, 0x29, 0x43, 0x28, 0x77, 0x29, + 0x43, 0x28, 0x78, 0x29, 0x43, 0x28, 0x79, 0x29, + 0x43, 0x28, 0x7A, 0x29, 0x43, 0x2E, 0x2E, 0x2E, + 0x43, 0x31, 0x30, 0x2E, 0x43, 0x31, 0x31, 0x2E, + 0x43, 0x31, 0x32, 0x2E, 0x43, 0x31, 0x33, 0x2E, + 0x43, 0x31, 0x34, 0x2E, 0x43, 0x31, 0x35, 0x2E, + 0x43, 0x31, 0x36, 0x2E, 0x43, 0x31, 0x37, 0x2E, + // Bytes 1b80 - 1bbf + 0x43, 0x31, 0x38, 0x2E, 0x43, 0x31, 0x39, 0x2E, + 0x43, 0x32, 0x30, 0x2E, 0x43, 0x3A, 0x3A, 0x3D, + 0x43, 0x3D, 0x3D, 0x3D, 0x43, 0x43, 0x6F, 0x2E, + 0x43, 0x46, 0x41, 0x58, 0x43, 0x47, 0x48, 0x7A, + 0x43, 0x47, 0x50, 0x61, 0x43, 0x49, 0x49, 0x49, + 0x43, 0x4C, 0x54, 0x44, 0x43, 0x4C, 0xC2, 0xB7, + 0x43, 0x4D, 0x48, 0x7A, 0x43, 0x4D, 0x50, 0x61, + 0x43, 0x4D, 0xCE, 0xA9, 0x43, 0x50, 0x50, 0x4D, + // Bytes 1bc0 - 1bff + 0x43, 0x50, 0x50, 0x56, 0x43, 0x50, 0x54, 0x45, + 0x43, 0x54, 0x45, 0x4C, 0x43, 0x54, 0x48, 0x7A, + 0x43, 0x56, 0x49, 0x49, 0x43, 0x58, 0x49, 0x49, + 0x43, 0x61, 0x2F, 0x63, 0x43, 0x61, 0x2F, 0x73, + 0x43, 0x61, 0xCA, 0xBE, 0x43, 0x62, 0x61, 0x72, + 0x43, 0x63, 0x2F, 0x6F, 0x43, 0x63, 0x2F, 0x75, + 0x43, 0x63, 0x61, 0x6C, 0x43, 0x63, 0x6D, 0x32, + 0x43, 0x63, 0x6D, 0x33, 0x43, 0x64, 0x6D, 0x32, + // Bytes 1c00 - 1c3f + 0x43, 0x64, 0x6D, 0x33, 0x43, 0x65, 0x72, 0x67, + 0x43, 0x66, 0x66, 0x69, 0x43, 0x66, 0x66, 0x6C, + 0x43, 0x67, 0x61, 0x6C, 0x43, 0x68, 0x50, 0x61, + 0x43, 0x69, 0x69, 0x69, 0x43, 0x6B, 0x48, 0x7A, + 0x43, 0x6B, 0x50, 0x61, 0x43, 0x6B, 0x6D, 0x32, + 0x43, 0x6B, 0x6D, 0x33, 0x43, 0x6B, 0xCE, 0xA9, + 0x43, 0x6C, 0x6F, 0x67, 0x43, 0x6C, 0xC2, 0xB7, + 0x43, 0x6D, 0x69, 0x6C, 0x43, 0x6D, 0x6D, 0x32, + // Bytes 1c40 - 1c7f + 0x43, 0x6D, 0x6D, 0x33, 0x43, 0x6D, 0x6F, 0x6C, + 0x43, 0x72, 0x61, 0x64, 0x43, 0x76, 0x69, 0x69, + 0x43, 0x78, 0x69, 0x69, 0x43, 0xC2, 0xB0, 0x43, + 0x43, 0xC2, 0xB0, 0x46, 0x43, 0xCA, 0xBC, 0x6E, + 0x43, 0xCE, 0xBC, 0x41, 0x43, 0xCE, 0xBC, 0x46, + 0x43, 0xCE, 0xBC, 0x56, 0x43, 0xCE, 0xBC, 0x57, + 0x43, 0xCE, 0xBC, 0x67, 0x43, 0xCE, 0xBC, 0x6C, + 0x43, 0xCE, 0xBC, 0x6D, 0x43, 0xCE, 0xBC, 0x73, + // Bytes 1c80 - 1cbf + 0x44, 0x28, 0x31, 0x30, 0x29, 0x44, 0x28, 0x31, + 0x31, 0x29, 0x44, 0x28, 0x31, 0x32, 0x29, 0x44, + 0x28, 0x31, 0x33, 0x29, 0x44, 0x28, 0x31, 0x34, + 0x29, 0x44, 0x28, 0x31, 0x35, 0x29, 0x44, 0x28, + 0x31, 0x36, 0x29, 0x44, 0x28, 0x31, 0x37, 0x29, + 0x44, 0x28, 0x31, 0x38, 0x29, 0x44, 0x28, 0x31, + 0x39, 0x29, 0x44, 0x28, 0x32, 0x30, 0x29, 0x44, + 0x30, 0xE7, 0x82, 0xB9, 0x44, 0x31, 0xE2, 0x81, + // Bytes 1cc0 - 1cff + 0x84, 0x44, 0x31, 0xE6, 0x97, 0xA5, 0x44, 0x31, + 0xE6, 0x9C, 0x88, 0x44, 0x31, 0xE7, 0x82, 0xB9, + 0x44, 0x32, 0xE6, 0x97, 0xA5, 0x44, 0x32, 0xE6, + 0x9C, 0x88, 0x44, 0x32, 0xE7, 0x82, 0xB9, 0x44, + 0x33, 0xE6, 0x97, 0xA5, 0x44, 0x33, 0xE6, 0x9C, + 0x88, 0x44, 0x33, 0xE7, 0x82, 0xB9, 0x44, 0x34, + 0xE6, 0x97, 0xA5, 0x44, 0x34, 0xE6, 0x9C, 0x88, + 0x44, 0x34, 0xE7, 0x82, 0xB9, 0x44, 0x35, 0xE6, + // Bytes 1d00 - 1d3f + 0x97, 0xA5, 0x44, 0x35, 0xE6, 0x9C, 0x88, 0x44, + 0x35, 0xE7, 0x82, 0xB9, 0x44, 0x36, 0xE6, 0x97, + 0xA5, 0x44, 0x36, 0xE6, 0x9C, 0x88, 0x44, 0x36, + 0xE7, 0x82, 0xB9, 0x44, 0x37, 0xE6, 0x97, 0xA5, + 0x44, 0x37, 0xE6, 0x9C, 0x88, 0x44, 0x37, 0xE7, + 0x82, 0xB9, 0x44, 0x38, 0xE6, 0x97, 0xA5, 0x44, + 0x38, 0xE6, 0x9C, 0x88, 0x44, 0x38, 0xE7, 0x82, + 0xB9, 0x44, 0x39, 0xE6, 0x97, 0xA5, 0x44, 0x39, + // Bytes 1d40 - 1d7f + 0xE6, 0x9C, 0x88, 0x44, 0x39, 0xE7, 0x82, 0xB9, + 0x44, 0x56, 0x49, 0x49, 0x49, 0x44, 0x61, 0x2E, + 0x6D, 0x2E, 0x44, 0x6B, 0x63, 0x61, 0x6C, 0x44, + 0x70, 0x2E, 0x6D, 0x2E, 0x44, 0x76, 0x69, 0x69, + 0x69, 0x44, 0xD5, 0xA5, 0xD6, 0x82, 0x44, 0xD5, + 0xB4, 0xD5, 0xA5, 0x44, 0xD5, 0xB4, 0xD5, 0xAB, + 0x44, 0xD5, 0xB4, 0xD5, 0xAD, 0x44, 0xD5, 0xB4, + 0xD5, 0xB6, 0x44, 0xD5, 0xBE, 0xD5, 0xB6, 0x44, + // Bytes 1d80 - 1dbf + 0xD7, 0x90, 0xD7, 0x9C, 0x44, 0xD8, 0xA7, 0xD9, + 0xB4, 0x44, 0xD8, 0xA8, 0xD8, 0xAC, 0x44, 0xD8, + 0xA8, 0xD8, 0xAD, 0x44, 0xD8, 0xA8, 0xD8, 0xAE, + 0x44, 0xD8, 0xA8, 0xD8, 0xB1, 0x44, 0xD8, 0xA8, + 0xD8, 0xB2, 0x44, 0xD8, 0xA8, 0xD9, 0x85, 0x44, + 0xD8, 0xA8, 0xD9, 0x86, 0x44, 0xD8, 0xA8, 0xD9, + 0x87, 0x44, 0xD8, 0xA8, 0xD9, 0x89, 0x44, 0xD8, + 0xA8, 0xD9, 0x8A, 0x44, 0xD8, 0xAA, 0xD8, 0xAC, + // Bytes 1dc0 - 1dff + 0x44, 0xD8, 0xAA, 0xD8, 0xAD, 0x44, 0xD8, 0xAA, + 0xD8, 0xAE, 0x44, 0xD8, 0xAA, 0xD8, 0xB1, 0x44, + 0xD8, 0xAA, 0xD8, 0xB2, 0x44, 0xD8, 0xAA, 0xD9, + 0x85, 0x44, 0xD8, 0xAA, 0xD9, 0x86, 0x44, 0xD8, + 0xAA, 0xD9, 0x87, 0x44, 0xD8, 0xAA, 0xD9, 0x89, + 0x44, 0xD8, 0xAA, 0xD9, 0x8A, 0x44, 0xD8, 0xAB, + 0xD8, 0xAC, 0x44, 0xD8, 0xAB, 0xD8, 0xB1, 0x44, + 0xD8, 0xAB, 0xD8, 0xB2, 0x44, 0xD8, 0xAB, 0xD9, + // Bytes 1e00 - 1e3f + 0x85, 0x44, 0xD8, 0xAB, 0xD9, 0x86, 0x44, 0xD8, + 0xAB, 0xD9, 0x87, 0x44, 0xD8, 0xAB, 0xD9, 0x89, + 0x44, 0xD8, 0xAB, 0xD9, 0x8A, 0x44, 0xD8, 0xAC, + 0xD8, 0xAD, 0x44, 0xD8, 0xAC, 0xD9, 0x85, 0x44, + 0xD8, 0xAC, 0xD9, 0x89, 0x44, 0xD8, 0xAC, 0xD9, + 0x8A, 0x44, 0xD8, 0xAD, 0xD8, 0xAC, 0x44, 0xD8, + 0xAD, 0xD9, 0x85, 0x44, 0xD8, 0xAD, 0xD9, 0x89, + 0x44, 0xD8, 0xAD, 0xD9, 0x8A, 0x44, 0xD8, 0xAE, + // Bytes 1e40 - 1e7f + 0xD8, 0xAC, 0x44, 0xD8, 0xAE, 0xD8, 0xAD, 0x44, + 0xD8, 0xAE, 0xD9, 0x85, 0x44, 0xD8, 0xAE, 0xD9, + 0x89, 0x44, 0xD8, 0xAE, 0xD9, 0x8A, 0x44, 0xD8, + 0xB3, 0xD8, 0xAC, 0x44, 0xD8, 0xB3, 0xD8, 0xAD, + 0x44, 0xD8, 0xB3, 0xD8, 0xAE, 0x44, 0xD8, 0xB3, + 0xD8, 0xB1, 0x44, 0xD8, 0xB3, 0xD9, 0x85, 0x44, + 0xD8, 0xB3, 0xD9, 0x87, 0x44, 0xD8, 0xB3, 0xD9, + 0x89, 0x44, 0xD8, 0xB3, 0xD9, 0x8A, 0x44, 0xD8, + // Bytes 1e80 - 1ebf + 0xB4, 0xD8, 0xAC, 0x44, 0xD8, 0xB4, 0xD8, 0xAD, + 0x44, 0xD8, 0xB4, 0xD8, 0xAE, 0x44, 0xD8, 0xB4, + 0xD8, 0xB1, 0x44, 0xD8, 0xB4, 0xD9, 0x85, 0x44, + 0xD8, 0xB4, 0xD9, 0x87, 0x44, 0xD8, 0xB4, 0xD9, + 0x89, 0x44, 0xD8, 0xB4, 0xD9, 0x8A, 0x44, 0xD8, + 0xB5, 0xD8, 0xAD, 0x44, 0xD8, 0xB5, 0xD8, 0xAE, + 0x44, 0xD8, 0xB5, 0xD8, 0xB1, 0x44, 0xD8, 0xB5, + 0xD9, 0x85, 0x44, 0xD8, 0xB5, 0xD9, 0x89, 0x44, + // Bytes 1ec0 - 1eff + 0xD8, 0xB5, 0xD9, 0x8A, 0x44, 0xD8, 0xB6, 0xD8, + 0xAC, 0x44, 0xD8, 0xB6, 0xD8, 0xAD, 0x44, 0xD8, + 0xB6, 0xD8, 0xAE, 0x44, 0xD8, 0xB6, 0xD8, 0xB1, + 0x44, 0xD8, 0xB6, 0xD9, 0x85, 0x44, 0xD8, 0xB6, + 0xD9, 0x89, 0x44, 0xD8, 0xB6, 0xD9, 0x8A, 0x44, + 0xD8, 0xB7, 0xD8, 0xAD, 0x44, 0xD8, 0xB7, 0xD9, + 0x85, 0x44, 0xD8, 0xB7, 0xD9, 0x89, 0x44, 0xD8, + 0xB7, 0xD9, 0x8A, 0x44, 0xD8, 0xB8, 0xD9, 0x85, + // Bytes 1f00 - 1f3f + 0x44, 0xD8, 0xB9, 0xD8, 0xAC, 0x44, 0xD8, 0xB9, + 0xD9, 0x85, 0x44, 0xD8, 0xB9, 0xD9, 0x89, 0x44, + 0xD8, 0xB9, 0xD9, 0x8A, 0x44, 0xD8, 0xBA, 0xD8, + 0xAC, 0x44, 0xD8, 0xBA, 0xD9, 0x85, 0x44, 0xD8, + 0xBA, 0xD9, 0x89, 0x44, 0xD8, 0xBA, 0xD9, 0x8A, + 0x44, 0xD9, 0x81, 0xD8, 0xAC, 0x44, 0xD9, 0x81, + 0xD8, 0xAD, 0x44, 0xD9, 0x81, 0xD8, 0xAE, 0x44, + 0xD9, 0x81, 0xD9, 0x85, 0x44, 0xD9, 0x81, 0xD9, + // Bytes 1f40 - 1f7f + 0x89, 0x44, 0xD9, 0x81, 0xD9, 0x8A, 0x44, 0xD9, + 0x82, 0xD8, 0xAD, 0x44, 0xD9, 0x82, 0xD9, 0x85, + 0x44, 0xD9, 0x82, 0xD9, 0x89, 0x44, 0xD9, 0x82, + 0xD9, 0x8A, 0x44, 0xD9, 0x83, 0xD8, 0xA7, 0x44, + 0xD9, 0x83, 0xD8, 0xAC, 0x44, 0xD9, 0x83, 0xD8, + 0xAD, 0x44, 0xD9, 0x83, 0xD8, 0xAE, 0x44, 0xD9, + 0x83, 0xD9, 0x84, 0x44, 0xD9, 0x83, 0xD9, 0x85, + 0x44, 0xD9, 0x83, 0xD9, 0x89, 0x44, 0xD9, 0x83, + // Bytes 1f80 - 1fbf + 0xD9, 0x8A, 0x44, 0xD9, 0x84, 0xD8, 0xA7, 0x44, + 0xD9, 0x84, 0xD8, 0xAC, 0x44, 0xD9, 0x84, 0xD8, + 0xAD, 0x44, 0xD9, 0x84, 0xD8, 0xAE, 0x44, 0xD9, + 0x84, 0xD9, 0x85, 0x44, 0xD9, 0x84, 0xD9, 0x87, + 0x44, 0xD9, 0x84, 0xD9, 0x89, 0x44, 0xD9, 0x84, + 0xD9, 0x8A, 0x44, 0xD9, 0x85, 0xD8, 0xA7, 0x44, + 0xD9, 0x85, 0xD8, 0xAC, 0x44, 0xD9, 0x85, 0xD8, + 0xAD, 0x44, 0xD9, 0x85, 0xD8, 0xAE, 0x44, 0xD9, + // Bytes 1fc0 - 1fff + 0x85, 0xD9, 0x85, 0x44, 0xD9, 0x85, 0xD9, 0x89, + 0x44, 0xD9, 0x85, 0xD9, 0x8A, 0x44, 0xD9, 0x86, + 0xD8, 0xAC, 0x44, 0xD9, 0x86, 0xD8, 0xAD, 0x44, + 0xD9, 0x86, 0xD8, 0xAE, 0x44, 0xD9, 0x86, 0xD8, + 0xB1, 0x44, 0xD9, 0x86, 0xD8, 0xB2, 0x44, 0xD9, + 0x86, 0xD9, 0x85, 0x44, 0xD9, 0x86, 0xD9, 0x86, + 0x44, 0xD9, 0x86, 0xD9, 0x87, 0x44, 0xD9, 0x86, + 0xD9, 0x89, 0x44, 0xD9, 0x86, 0xD9, 0x8A, 0x44, + // Bytes 2000 - 203f + 0xD9, 0x87, 0xD8, 0xAC, 0x44, 0xD9, 0x87, 0xD9, + 0x85, 0x44, 0xD9, 0x87, 0xD9, 0x89, 0x44, 0xD9, + 0x87, 0xD9, 0x8A, 0x44, 0xD9, 0x88, 0xD9, 0xB4, + 0x44, 0xD9, 0x8A, 0xD8, 0xAC, 0x44, 0xD9, 0x8A, + 0xD8, 0xAD, 0x44, 0xD9, 0x8A, 0xD8, 0xAE, 0x44, + 0xD9, 0x8A, 0xD8, 0xB1, 0x44, 0xD9, 0x8A, 0xD8, + 0xB2, 0x44, 0xD9, 0x8A, 0xD9, 0x85, 0x44, 0xD9, + 0x8A, 0xD9, 0x86, 0x44, 0xD9, 0x8A, 0xD9, 0x87, + // Bytes 2040 - 207f + 0x44, 0xD9, 0x8A, 0xD9, 0x89, 0x44, 0xD9, 0x8A, + 0xD9, 0x8A, 0x44, 0xD9, 0x8A, 0xD9, 0xB4, 0x44, + 0xDB, 0x87, 0xD9, 0xB4, 0x45, 0x28, 0xE1, 0x84, + 0x80, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x82, 0x29, + 0x45, 0x28, 0xE1, 0x84, 0x83, 0x29, 0x45, 0x28, + 0xE1, 0x84, 0x85, 0x29, 0x45, 0x28, 0xE1, 0x84, + 0x86, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x87, 0x29, + 0x45, 0x28, 0xE1, 0x84, 0x89, 0x29, 0x45, 0x28, + // Bytes 2080 - 20bf + 0xE1, 0x84, 0x8B, 0x29, 0x45, 0x28, 0xE1, 0x84, + 0x8C, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x8E, 0x29, + 0x45, 0x28, 0xE1, 0x84, 0x8F, 0x29, 0x45, 0x28, + 0xE1, 0x84, 0x90, 0x29, 0x45, 0x28, 0xE1, 0x84, + 0x91, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x92, 0x29, + 0x45, 0x28, 0xE4, 0xB8, 0x80, 0x29, 0x45, 0x28, + 0xE4, 0xB8, 0x83, 0x29, 0x45, 0x28, 0xE4, 0xB8, + 0x89, 0x29, 0x45, 0x28, 0xE4, 0xB9, 0x9D, 0x29, + // Bytes 20c0 - 20ff + 0x45, 0x28, 0xE4, 0xBA, 0x8C, 0x29, 0x45, 0x28, + 0xE4, 0xBA, 0x94, 0x29, 0x45, 0x28, 0xE4, 0xBB, + 0xA3, 0x29, 0x45, 0x28, 0xE4, 0xBC, 0x81, 0x29, + 0x45, 0x28, 0xE4, 0xBC, 0x91, 0x29, 0x45, 0x28, + 0xE5, 0x85, 0xAB, 0x29, 0x45, 0x28, 0xE5, 0x85, + 0xAD, 0x29, 0x45, 0x28, 0xE5, 0x8A, 0xB4, 0x29, + 0x45, 0x28, 0xE5, 0x8D, 0x81, 0x29, 0x45, 0x28, + 0xE5, 0x8D, 0x94, 0x29, 0x45, 0x28, 0xE5, 0x90, + // Bytes 2100 - 213f + 0x8D, 0x29, 0x45, 0x28, 0xE5, 0x91, 0xBC, 0x29, + 0x45, 0x28, 0xE5, 0x9B, 0x9B, 0x29, 0x45, 0x28, + 0xE5, 0x9C, 0x9F, 0x29, 0x45, 0x28, 0xE5, 0xAD, + 0xA6, 0x29, 0x45, 0x28, 0xE6, 0x97, 0xA5, 0x29, + 0x45, 0x28, 0xE6, 0x9C, 0x88, 0x29, 0x45, 0x28, + 0xE6, 0x9C, 0x89, 0x29, 0x45, 0x28, 0xE6, 0x9C, + 0xA8, 0x29, 0x45, 0x28, 0xE6, 0xA0, 0xAA, 0x29, + 0x45, 0x28, 0xE6, 0xB0, 0xB4, 0x29, 0x45, 0x28, + // Bytes 2140 - 217f + 0xE7, 0x81, 0xAB, 0x29, 0x45, 0x28, 0xE7, 0x89, + 0xB9, 0x29, 0x45, 0x28, 0xE7, 0x9B, 0xA3, 0x29, + 0x45, 0x28, 0xE7, 0xA4, 0xBE, 0x29, 0x45, 0x28, + 0xE7, 0xA5, 0x9D, 0x29, 0x45, 0x28, 0xE7, 0xA5, + 0xAD, 0x29, 0x45, 0x28, 0xE8, 0x87, 0xAA, 0x29, + 0x45, 0x28, 0xE8, 0x87, 0xB3, 0x29, 0x45, 0x28, + 0xE8, 0xB2, 0xA1, 0x29, 0x45, 0x28, 0xE8, 0xB3, + 0x87, 0x29, 0x45, 0x28, 0xE9, 0x87, 0x91, 0x29, + // Bytes 2180 - 21bf + 0x45, 0x30, 0xE2, 0x81, 0x84, 0x33, 0x45, 0x31, + 0x30, 0xE6, 0x97, 0xA5, 0x45, 0x31, 0x30, 0xE6, + 0x9C, 0x88, 0x45, 0x31, 0x30, 0xE7, 0x82, 0xB9, + 0x45, 0x31, 0x31, 0xE6, 0x97, 0xA5, 0x45, 0x31, + 0x31, 0xE6, 0x9C, 0x88, 0x45, 0x31, 0x31, 0xE7, + 0x82, 0xB9, 0x45, 0x31, 0x32, 0xE6, 0x97, 0xA5, + 0x45, 0x31, 0x32, 0xE6, 0x9C, 0x88, 0x45, 0x31, + 0x32, 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x33, 0xE6, + // Bytes 21c0 - 21ff + 0x97, 0xA5, 0x45, 0x31, 0x33, 0xE7, 0x82, 0xB9, + 0x45, 0x31, 0x34, 0xE6, 0x97, 0xA5, 0x45, 0x31, + 0x34, 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x35, 0xE6, + 0x97, 0xA5, 0x45, 0x31, 0x35, 0xE7, 0x82, 0xB9, + 0x45, 0x31, 0x36, 0xE6, 0x97, 0xA5, 0x45, 0x31, + 0x36, 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x37, 0xE6, + 0x97, 0xA5, 0x45, 0x31, 0x37, 0xE7, 0x82, 0xB9, + 0x45, 0x31, 0x38, 0xE6, 0x97, 0xA5, 0x45, 0x31, + // Bytes 2200 - 223f + 0x38, 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x39, 0xE6, + 0x97, 0xA5, 0x45, 0x31, 0x39, 0xE7, 0x82, 0xB9, + 0x45, 0x31, 0xE2, 0x81, 0x84, 0x32, 0x45, 0x31, + 0xE2, 0x81, 0x84, 0x33, 0x45, 0x31, 0xE2, 0x81, + 0x84, 0x34, 0x45, 0x31, 0xE2, 0x81, 0x84, 0x35, + 0x45, 0x31, 0xE2, 0x81, 0x84, 0x36, 0x45, 0x31, + 0xE2, 0x81, 0x84, 0x37, 0x45, 0x31, 0xE2, 0x81, + 0x84, 0x38, 0x45, 0x31, 0xE2, 0x81, 0x84, 0x39, + // Bytes 2240 - 227f + 0x45, 0x32, 0x30, 0xE6, 0x97, 0xA5, 0x45, 0x32, + 0x30, 0xE7, 0x82, 0xB9, 0x45, 0x32, 0x31, 0xE6, + 0x97, 0xA5, 0x45, 0x32, 0x31, 0xE7, 0x82, 0xB9, + 0x45, 0x32, 0x32, 0xE6, 0x97, 0xA5, 0x45, 0x32, + 0x32, 0xE7, 0x82, 0xB9, 0x45, 0x32, 0x33, 0xE6, + 0x97, 0xA5, 0x45, 0x32, 0x33, 0xE7, 0x82, 0xB9, + 0x45, 0x32, 0x34, 0xE6, 0x97, 0xA5, 0x45, 0x32, + 0x34, 0xE7, 0x82, 0xB9, 0x45, 0x32, 0x35, 0xE6, + // Bytes 2280 - 22bf + 0x97, 0xA5, 0x45, 0x32, 0x36, 0xE6, 0x97, 0xA5, + 0x45, 0x32, 0x37, 0xE6, 0x97, 0xA5, 0x45, 0x32, + 0x38, 0xE6, 0x97, 0xA5, 0x45, 0x32, 0x39, 0xE6, + 0x97, 0xA5, 0x45, 0x32, 0xE2, 0x81, 0x84, 0x33, + 0x45, 0x32, 0xE2, 0x81, 0x84, 0x35, 0x45, 0x33, + 0x30, 0xE6, 0x97, 0xA5, 0x45, 0x33, 0x31, 0xE6, + 0x97, 0xA5, 0x45, 0x33, 0xE2, 0x81, 0x84, 0x34, + 0x45, 0x33, 0xE2, 0x81, 0x84, 0x35, 0x45, 0x33, + // Bytes 22c0 - 22ff + 0xE2, 0x81, 0x84, 0x38, 0x45, 0x34, 0xE2, 0x81, + 0x84, 0x35, 0x45, 0x35, 0xE2, 0x81, 0x84, 0x36, + 0x45, 0x35, 0xE2, 0x81, 0x84, 0x38, 0x45, 0x37, + 0xE2, 0x81, 0x84, 0x38, 0x45, 0x41, 0xE2, 0x88, + 0x95, 0x6D, 0x45, 0x56, 0xE2, 0x88, 0x95, 0x6D, + 0x45, 0x6D, 0xE2, 0x88, 0x95, 0x73, 0x46, 0x31, + 0xE2, 0x81, 0x84, 0x31, 0x30, 0x46, 0x43, 0xE2, + 0x88, 0x95, 0x6B, 0x67, 0x46, 0x6D, 0xE2, 0x88, + // Bytes 2300 - 233f + 0x95, 0x73, 0x32, 0x46, 0xD8, 0xA8, 0xD8, 0xAD, + 0xD9, 0x8A, 0x46, 0xD8, 0xA8, 0xD8, 0xAE, 0xD9, + 0x8A, 0x46, 0xD8, 0xAA, 0xD8, 0xAC, 0xD9, 0x85, + 0x46, 0xD8, 0xAA, 0xD8, 0xAC, 0xD9, 0x89, 0x46, + 0xD8, 0xAA, 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD8, + 0xAA, 0xD8, 0xAD, 0xD8, 0xAC, 0x46, 0xD8, 0xAA, + 0xD8, 0xAD, 0xD9, 0x85, 0x46, 0xD8, 0xAA, 0xD8, + 0xAE, 0xD9, 0x85, 0x46, 0xD8, 0xAA, 0xD8, 0xAE, + // Bytes 2340 - 237f + 0xD9, 0x89, 0x46, 0xD8, 0xAA, 0xD8, 0xAE, 0xD9, + 0x8A, 0x46, 0xD8, 0xAA, 0xD9, 0x85, 0xD8, 0xAC, + 0x46, 0xD8, 0xAA, 0xD9, 0x85, 0xD8, 0xAD, 0x46, + 0xD8, 0xAA, 0xD9, 0x85, 0xD8, 0xAE, 0x46, 0xD8, + 0xAA, 0xD9, 0x85, 0xD9, 0x89, 0x46, 0xD8, 0xAA, + 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD8, 0xAC, 0xD8, + 0xAD, 0xD9, 0x89, 0x46, 0xD8, 0xAC, 0xD8, 0xAD, + 0xD9, 0x8A, 0x46, 0xD8, 0xAC, 0xD9, 0x85, 0xD8, + // Bytes 2380 - 23bf + 0xAD, 0x46, 0xD8, 0xAC, 0xD9, 0x85, 0xD9, 0x89, + 0x46, 0xD8, 0xAC, 0xD9, 0x85, 0xD9, 0x8A, 0x46, + 0xD8, 0xAD, 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD8, + 0xAD, 0xD9, 0x85, 0xD9, 0x89, 0x46, 0xD8, 0xAD, + 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD8, 0xB3, 0xD8, + 0xAC, 0xD8, 0xAD, 0x46, 0xD8, 0xB3, 0xD8, 0xAC, + 0xD9, 0x89, 0x46, 0xD8, 0xB3, 0xD8, 0xAD, 0xD8, + 0xAC, 0x46, 0xD8, 0xB3, 0xD8, 0xAE, 0xD9, 0x89, + // Bytes 23c0 - 23ff + 0x46, 0xD8, 0xB3, 0xD8, 0xAE, 0xD9, 0x8A, 0x46, + 0xD8, 0xB3, 0xD9, 0x85, 0xD8, 0xAC, 0x46, 0xD8, + 0xB3, 0xD9, 0x85, 0xD8, 0xAD, 0x46, 0xD8, 0xB3, + 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD8, 0xB4, 0xD8, + 0xAC, 0xD9, 0x8A, 0x46, 0xD8, 0xB4, 0xD8, 0xAD, + 0xD9, 0x85, 0x46, 0xD8, 0xB4, 0xD8, 0xAD, 0xD9, + 0x8A, 0x46, 0xD8, 0xB4, 0xD9, 0x85, 0xD8, 0xAE, + 0x46, 0xD8, 0xB4, 0xD9, 0x85, 0xD9, 0x85, 0x46, + // Bytes 2400 - 243f + 0xD8, 0xB5, 0xD8, 0xAD, 0xD8, 0xAD, 0x46, 0xD8, + 0xB5, 0xD8, 0xAD, 0xD9, 0x8A, 0x46, 0xD8, 0xB5, + 0xD9, 0x84, 0xD9, 0x89, 0x46, 0xD8, 0xB5, 0xD9, + 0x84, 0xDB, 0x92, 0x46, 0xD8, 0xB5, 0xD9, 0x85, + 0xD9, 0x85, 0x46, 0xD8, 0xB6, 0xD8, 0xAD, 0xD9, + 0x89, 0x46, 0xD8, 0xB6, 0xD8, 0xAD, 0xD9, 0x8A, + 0x46, 0xD8, 0xB6, 0xD8, 0xAE, 0xD9, 0x85, 0x46, + 0xD8, 0xB7, 0xD9, 0x85, 0xD8, 0xAD, 0x46, 0xD8, + // Bytes 2440 - 247f + 0xB7, 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD8, 0xB7, + 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD8, 0xB9, 0xD8, + 0xAC, 0xD9, 0x85, 0x46, 0xD8, 0xB9, 0xD9, 0x85, + 0xD9, 0x85, 0x46, 0xD8, 0xB9, 0xD9, 0x85, 0xD9, + 0x89, 0x46, 0xD8, 0xB9, 0xD9, 0x85, 0xD9, 0x8A, + 0x46, 0xD8, 0xBA, 0xD9, 0x85, 0xD9, 0x85, 0x46, + 0xD8, 0xBA, 0xD9, 0x85, 0xD9, 0x89, 0x46, 0xD8, + 0xBA, 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x81, + // Bytes 2480 - 24bf + 0xD8, 0xAE, 0xD9, 0x85, 0x46, 0xD9, 0x81, 0xD9, + 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x82, 0xD9, 0x84, + 0xDB, 0x92, 0x46, 0xD9, 0x82, 0xD9, 0x85, 0xD8, + 0xAD, 0x46, 0xD9, 0x82, 0xD9, 0x85, 0xD9, 0x85, + 0x46, 0xD9, 0x82, 0xD9, 0x85, 0xD9, 0x8A, 0x46, + 0xD9, 0x83, 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD9, + 0x83, 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x84, + 0xD8, 0xAC, 0xD8, 0xAC, 0x46, 0xD9, 0x84, 0xD8, + // Bytes 24c0 - 24ff + 0xAC, 0xD9, 0x85, 0x46, 0xD9, 0x84, 0xD8, 0xAC, + 0xD9, 0x8A, 0x46, 0xD9, 0x84, 0xD8, 0xAD, 0xD9, + 0x85, 0x46, 0xD9, 0x84, 0xD8, 0xAD, 0xD9, 0x89, + 0x46, 0xD9, 0x84, 0xD8, 0xAD, 0xD9, 0x8A, 0x46, + 0xD9, 0x84, 0xD8, 0xAE, 0xD9, 0x85, 0x46, 0xD9, + 0x84, 0xD9, 0x85, 0xD8, 0xAD, 0x46, 0xD9, 0x84, + 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x85, 0xD8, + 0xAC, 0xD8, 0xAD, 0x46, 0xD9, 0x85, 0xD8, 0xAC, + // Bytes 2500 - 253f + 0xD8, 0xAE, 0x46, 0xD9, 0x85, 0xD8, 0xAC, 0xD9, + 0x85, 0x46, 0xD9, 0x85, 0xD8, 0xAC, 0xD9, 0x8A, + 0x46, 0xD9, 0x85, 0xD8, 0xAD, 0xD8, 0xAC, 0x46, + 0xD9, 0x85, 0xD8, 0xAD, 0xD9, 0x85, 0x46, 0xD9, + 0x85, 0xD8, 0xAD, 0xD9, 0x8A, 0x46, 0xD9, 0x85, + 0xD8, 0xAE, 0xD8, 0xAC, 0x46, 0xD9, 0x85, 0xD8, + 0xAE, 0xD9, 0x85, 0x46, 0xD9, 0x85, 0xD8, 0xAE, + 0xD9, 0x8A, 0x46, 0xD9, 0x85, 0xD9, 0x85, 0xD9, + // Bytes 2540 - 257f + 0x8A, 0x46, 0xD9, 0x86, 0xD8, 0xAC, 0xD8, 0xAD, + 0x46, 0xD9, 0x86, 0xD8, 0xAC, 0xD9, 0x85, 0x46, + 0xD9, 0x86, 0xD8, 0xAC, 0xD9, 0x89, 0x46, 0xD9, + 0x86, 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD9, 0x86, + 0xD8, 0xAD, 0xD9, 0x85, 0x46, 0xD9, 0x86, 0xD8, + 0xAD, 0xD9, 0x89, 0x46, 0xD9, 0x86, 0xD8, 0xAD, + 0xD9, 0x8A, 0x46, 0xD9, 0x86, 0xD9, 0x85, 0xD9, + 0x89, 0x46, 0xD9, 0x86, 0xD9, 0x85, 0xD9, 0x8A, + // Bytes 2580 - 25bf + 0x46, 0xD9, 0x87, 0xD9, 0x85, 0xD8, 0xAC, 0x46, + 0xD9, 0x87, 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD9, + 0x8A, 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD9, 0x8A, + 0xD8, 0xAD, 0xD9, 0x8A, 0x46, 0xD9, 0x8A, 0xD9, + 0x85, 0xD9, 0x85, 0x46, 0xD9, 0x8A, 0xD9, 0x85, + 0xD9, 0x8A, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD8, + 0xA7, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD8, 0xAC, + 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD8, 0xAD, 0x46, + // Bytes 25c0 - 25ff + 0xD9, 0x8A, 0xD9, 0x94, 0xD8, 0xAE, 0x46, 0xD9, + 0x8A, 0xD9, 0x94, 0xD8, 0xB1, 0x46, 0xD9, 0x8A, + 0xD9, 0x94, 0xD8, 0xB2, 0x46, 0xD9, 0x8A, 0xD9, + 0x94, 0xD9, 0x85, 0x46, 0xD9, 0x8A, 0xD9, 0x94, + 0xD9, 0x86, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD9, + 0x87, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD9, 0x88, + 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD9, 0x89, 0x46, + 0xD9, 0x8A, 0xD9, 0x94, 0xD9, 0x8A, 0x46, 0xD9, + // Bytes 2600 - 263f + 0x8A, 0xD9, 0x94, 0xDB, 0x86, 0x46, 0xD9, 0x8A, + 0xD9, 0x94, 0xDB, 0x87, 0x46, 0xD9, 0x8A, 0xD9, + 0x94, 0xDB, 0x88, 0x46, 0xD9, 0x8A, 0xD9, 0x94, + 0xDB, 0x90, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xDB, + 0x95, 0x46, 0xE0, 0xB9, 0x8D, 0xE0, 0xB8, 0xB2, + 0x46, 0xE0, 0xBA, 0xAB, 0xE0, 0xBA, 0x99, 0x46, + 0xE0, 0xBA, 0xAB, 0xE0, 0xBA, 0xA1, 0x46, 0xE0, + 0xBB, 0x8D, 0xE0, 0xBA, 0xB2, 0x46, 0xE0, 0xBD, + // Bytes 2640 - 267f + 0x80, 0xE0, 0xBE, 0xB5, 0x46, 0xE0, 0xBD, 0x82, + 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBD, 0x8C, 0xE0, + 0xBE, 0xB7, 0x46, 0xE0, 0xBD, 0x91, 0xE0, 0xBE, + 0xB7, 0x46, 0xE0, 0xBD, 0x96, 0xE0, 0xBE, 0xB7, + 0x46, 0xE0, 0xBD, 0x9B, 0xE0, 0xBE, 0xB7, 0x46, + 0xE0, 0xBE, 0x90, 0xE0, 0xBE, 0xB5, 0x46, 0xE0, + 0xBE, 0x92, 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBE, + 0x9C, 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBE, 0xA1, + // Bytes 2680 - 26bf + 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBE, 0xA6, 0xE0, + 0xBE, 0xB7, 0x46, 0xE0, 0xBE, 0xAB, 0xE0, 0xBE, + 0xB7, 0x46, 0xE2, 0x80, 0xB2, 0xE2, 0x80, 0xB2, + 0x46, 0xE2, 0x80, 0xB5, 0xE2, 0x80, 0xB5, 0x46, + 0xE2, 0x88, 0xAB, 0xE2, 0x88, 0xAB, 0x46, 0xE2, + 0x88, 0xAE, 0xE2, 0x88, 0xAE, 0x46, 0xE3, 0x81, + 0xBB, 0xE3, 0x81, 0x8B, 0x46, 0xE3, 0x82, 0x88, + 0xE3, 0x82, 0x8A, 0x46, 0xE3, 0x82, 0xAD, 0xE3, + // Bytes 26c0 - 26ff + 0x83, 0xAD, 0x46, 0xE3, 0x82, 0xB3, 0xE3, 0x82, + 0xB3, 0x46, 0xE3, 0x82, 0xB3, 0xE3, 0x83, 0x88, + 0x46, 0xE3, 0x83, 0x88, 0xE3, 0x83, 0xB3, 0x46, + 0xE3, 0x83, 0x8A, 0xE3, 0x83, 0x8E, 0x46, 0xE3, + 0x83, 0x9B, 0xE3, 0x83, 0xB3, 0x46, 0xE3, 0x83, + 0x9F, 0xE3, 0x83, 0xAA, 0x46, 0xE3, 0x83, 0xAA, + 0xE3, 0x83, 0xA9, 0x46, 0xE3, 0x83, 0xAC, 0xE3, + 0x83, 0xA0, 0x46, 0xE5, 0xA4, 0xA7, 0xE6, 0xAD, + // Bytes 2700 - 273f + 0xA3, 0x46, 0xE5, 0xB9, 0xB3, 0xE6, 0x88, 0x90, + 0x46, 0xE6, 0x98, 0x8E, 0xE6, 0xB2, 0xBB, 0x46, + 0xE6, 0x98, 0xAD, 0xE5, 0x92, 0x8C, 0x47, 0x72, + 0x61, 0x64, 0xE2, 0x88, 0x95, 0x73, 0x47, 0xE3, + 0x80, 0x94, 0x53, 0xE3, 0x80, 0x95, 0x48, 0x28, + 0xE1, 0x84, 0x80, 0xE1, 0x85, 0xA1, 0x29, 0x48, + 0x28, 0xE1, 0x84, 0x82, 0xE1, 0x85, 0xA1, 0x29, + 0x48, 0x28, 0xE1, 0x84, 0x83, 0xE1, 0x85, 0xA1, + // Bytes 2740 - 277f + 0x29, 0x48, 0x28, 0xE1, 0x84, 0x85, 0xE1, 0x85, + 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x86, 0xE1, + 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x87, + 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, + 0x89, 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, + 0x84, 0x8B, 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, + 0xE1, 0x84, 0x8C, 0xE1, 0x85, 0xA1, 0x29, 0x48, + 0x28, 0xE1, 0x84, 0x8C, 0xE1, 0x85, 0xAE, 0x29, + // Bytes 2780 - 27bf + 0x48, 0x28, 0xE1, 0x84, 0x8E, 0xE1, 0x85, 0xA1, + 0x29, 0x48, 0x28, 0xE1, 0x84, 0x8F, 0xE1, 0x85, + 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x90, 0xE1, + 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x91, + 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, + 0x92, 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x72, 0x61, + 0x64, 0xE2, 0x88, 0x95, 0x73, 0x32, 0x48, 0xD8, + 0xA7, 0xD9, 0x83, 0xD8, 0xA8, 0xD8, 0xB1, 0x48, + // Bytes 27c0 - 27ff + 0xD8, 0xA7, 0xD9, 0x84, 0xD9, 0x84, 0xD9, 0x87, + 0x48, 0xD8, 0xB1, 0xD8, 0xB3, 0xD9, 0x88, 0xD9, + 0x84, 0x48, 0xD8, 0xB1, 0xDB, 0x8C, 0xD8, 0xA7, + 0xD9, 0x84, 0x48, 0xD8, 0xB5, 0xD9, 0x84, 0xD8, + 0xB9, 0xD9, 0x85, 0x48, 0xD8, 0xB9, 0xD9, 0x84, + 0xD9, 0x8A, 0xD9, 0x87, 0x48, 0xD9, 0x85, 0xD8, + 0xAD, 0xD9, 0x85, 0xD8, 0xAF, 0x48, 0xD9, 0x88, + 0xD8, 0xB3, 0xD9, 0x84, 0xD9, 0x85, 0x49, 0xE2, + // Bytes 2800 - 283f + 0x80, 0xB2, 0xE2, 0x80, 0xB2, 0xE2, 0x80, 0xB2, + 0x49, 0xE2, 0x80, 0xB5, 0xE2, 0x80, 0xB5, 0xE2, + 0x80, 0xB5, 0x49, 0xE2, 0x88, 0xAB, 0xE2, 0x88, + 0xAB, 0xE2, 0x88, 0xAB, 0x49, 0xE2, 0x88, 0xAE, + 0xE2, 0x88, 0xAE, 0xE2, 0x88, 0xAE, 0x49, 0xE3, + 0x80, 0x94, 0xE4, 0xB8, 0x89, 0xE3, 0x80, 0x95, + 0x49, 0xE3, 0x80, 0x94, 0xE4, 0xBA, 0x8C, 0xE3, + 0x80, 0x95, 0x49, 0xE3, 0x80, 0x94, 0xE5, 0x8B, + // Bytes 2840 - 287f + 0x9D, 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x80, 0x94, + 0xE5, 0xAE, 0x89, 0xE3, 0x80, 0x95, 0x49, 0xE3, + 0x80, 0x94, 0xE6, 0x89, 0x93, 0xE3, 0x80, 0x95, + 0x49, 0xE3, 0x80, 0x94, 0xE6, 0x95, 0x97, 0xE3, + 0x80, 0x95, 0x49, 0xE3, 0x80, 0x94, 0xE6, 0x9C, + 0xAC, 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x80, 0x94, + 0xE7, 0x82, 0xB9, 0xE3, 0x80, 0x95, 0x49, 0xE3, + 0x80, 0x94, 0xE7, 0x9B, 0x97, 0xE3, 0x80, 0x95, + // Bytes 2880 - 28bf + 0x49, 0xE3, 0x82, 0xA2, 0xE3, 0x83, 0xBC, 0xE3, + 0x83, 0xAB, 0x49, 0xE3, 0x82, 0xA4, 0xE3, 0x83, + 0xB3, 0xE3, 0x83, 0x81, 0x49, 0xE3, 0x82, 0xA6, + 0xE3, 0x82, 0xA9, 0xE3, 0x83, 0xB3, 0x49, 0xE3, + 0x82, 0xAA, 0xE3, 0x83, 0xB3, 0xE3, 0x82, 0xB9, + 0x49, 0xE3, 0x82, 0xAA, 0xE3, 0x83, 0xBC, 0xE3, + 0x83, 0xA0, 0x49, 0xE3, 0x82, 0xAB, 0xE3, 0x82, + 0xA4, 0xE3, 0x83, 0xAA, 0x49, 0xE3, 0x82, 0xB1, + // Bytes 28c0 - 28ff + 0xE3, 0x83, 0xBC, 0xE3, 0x82, 0xB9, 0x49, 0xE3, + 0x82, 0xB3, 0xE3, 0x83, 0xAB, 0xE3, 0x83, 0x8A, + 0x49, 0xE3, 0x82, 0xBB, 0xE3, 0x83, 0xB3, 0xE3, + 0x83, 0x81, 0x49, 0xE3, 0x82, 0xBB, 0xE3, 0x83, + 0xB3, 0xE3, 0x83, 0x88, 0x49, 0xE3, 0x83, 0x86, + 0xE3, 0x82, 0x99, 0xE3, 0x82, 0xB7, 0x49, 0xE3, + 0x83, 0x88, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xAB, + 0x49, 0xE3, 0x83, 0x8E, 0xE3, 0x83, 0x83, 0xE3, + // Bytes 2900 - 293f + 0x83, 0x88, 0x49, 0xE3, 0x83, 0x8F, 0xE3, 0x82, + 0xA4, 0xE3, 0x83, 0x84, 0x49, 0xE3, 0x83, 0x92, + 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xAB, 0x49, 0xE3, + 0x83, 0x92, 0xE3, 0x82, 0x9A, 0xE3, 0x82, 0xB3, + 0x49, 0xE3, 0x83, 0x95, 0xE3, 0x83, 0xA9, 0xE3, + 0x83, 0xB3, 0x49, 0xE3, 0x83, 0x98, 0xE3, 0x82, + 0x9A, 0xE3, 0x82, 0xBD, 0x49, 0xE3, 0x83, 0x98, + 0xE3, 0x83, 0xAB, 0xE3, 0x83, 0x84, 0x49, 0xE3, + // Bytes 2940 - 297f + 0x83, 0x9B, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0xAB, + 0x49, 0xE3, 0x83, 0x9B, 0xE3, 0x83, 0xBC, 0xE3, + 0x83, 0xB3, 0x49, 0xE3, 0x83, 0x9E, 0xE3, 0x82, + 0xA4, 0xE3, 0x83, 0xAB, 0x49, 0xE3, 0x83, 0x9E, + 0xE3, 0x83, 0x83, 0xE3, 0x83, 0x8F, 0x49, 0xE3, + 0x83, 0x9E, 0xE3, 0x83, 0xAB, 0xE3, 0x82, 0xAF, + 0x49, 0xE3, 0x83, 0xA4, 0xE3, 0x83, 0xBC, 0xE3, + 0x83, 0xAB, 0x49, 0xE3, 0x83, 0xA6, 0xE3, 0x82, + // Bytes 2980 - 29bf + 0xA2, 0xE3, 0x83, 0xB3, 0x49, 0xE3, 0x83, 0xAF, + 0xE3, 0x83, 0x83, 0xE3, 0x83, 0x88, 0x4C, 0xE2, + 0x80, 0xB2, 0xE2, 0x80, 0xB2, 0xE2, 0x80, 0xB2, + 0xE2, 0x80, 0xB2, 0x4C, 0xE2, 0x88, 0xAB, 0xE2, + 0x88, 0xAB, 0xE2, 0x88, 0xAB, 0xE2, 0x88, 0xAB, + 0x4C, 0xE3, 0x82, 0xA2, 0xE3, 0x83, 0xAB, 0xE3, + 0x83, 0x95, 0xE3, 0x82, 0xA1, 0x4C, 0xE3, 0x82, + 0xA8, 0xE3, 0x83, 0xBC, 0xE3, 0x82, 0xAB, 0xE3, + // Bytes 29c0 - 29ff + 0x83, 0xBC, 0x4C, 0xE3, 0x82, 0xAB, 0xE3, 0x82, + 0x99, 0xE3, 0x83, 0xAD, 0xE3, 0x83, 0xB3, 0x4C, + 0xE3, 0x82, 0xAB, 0xE3, 0x82, 0x99, 0xE3, 0x83, + 0xB3, 0xE3, 0x83, 0x9E, 0x4C, 0xE3, 0x82, 0xAB, + 0xE3, 0x83, 0xA9, 0xE3, 0x83, 0x83, 0xE3, 0x83, + 0x88, 0x4C, 0xE3, 0x82, 0xAB, 0xE3, 0x83, 0xAD, + 0xE3, 0x83, 0xAA, 0xE3, 0x83, 0xBC, 0x4C, 0xE3, + 0x82, 0xAD, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0x8B, + // Bytes 2a00 - 2a3f + 0xE3, 0x83, 0xBC, 0x4C, 0xE3, 0x82, 0xAD, 0xE3, + 0x83, 0xA5, 0xE3, 0x83, 0xAA, 0xE3, 0x83, 0xBC, + 0x4C, 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0x99, 0xE3, + 0x83, 0xA9, 0xE3, 0x83, 0xA0, 0x4C, 0xE3, 0x82, + 0xAF, 0xE3, 0x83, 0xAD, 0xE3, 0x83, 0xBC, 0xE3, + 0x83, 0x8D, 0x4C, 0xE3, 0x82, 0xB5, 0xE3, 0x82, + 0xA4, 0xE3, 0x82, 0xAF, 0xE3, 0x83, 0xAB, 0x4C, + 0xE3, 0x82, 0xBF, 0xE3, 0x82, 0x99, 0xE3, 0x83, + // Bytes 2a40 - 2a7f + 0xBC, 0xE3, 0x82, 0xB9, 0x4C, 0xE3, 0x83, 0x8F, + 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + 0x84, 0x4C, 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x9A, + 0xE3, 0x82, 0xAF, 0xE3, 0x83, 0xAB, 0x4C, 0xE3, + 0x83, 0x95, 0xE3, 0x82, 0xA3, 0xE3, 0x83, 0xBC, + 0xE3, 0x83, 0x88, 0x4C, 0xE3, 0x83, 0x98, 0xE3, + 0x82, 0x99, 0xE3, 0x83, 0xBC, 0xE3, 0x82, 0xBF, + 0x4C, 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x9A, 0xE3, + // Bytes 2a80 - 2abf + 0x83, 0x8B, 0xE3, 0x83, 0x92, 0x4C, 0xE3, 0x83, + 0x98, 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xB3, 0xE3, + 0x82, 0xB9, 0x4C, 0xE3, 0x83, 0x9B, 0xE3, 0x82, + 0x99, 0xE3, 0x83, 0xAB, 0xE3, 0x83, 0x88, 0x4C, + 0xE3, 0x83, 0x9E, 0xE3, 0x82, 0xA4, 0xE3, 0x82, + 0xAF, 0xE3, 0x83, 0xAD, 0x4C, 0xE3, 0x83, 0x9F, + 0xE3, 0x82, 0xAF, 0xE3, 0x83, 0xAD, 0xE3, 0x83, + 0xB3, 0x4C, 0xE3, 0x83, 0xA1, 0xE3, 0x83, 0xBC, + // Bytes 2ac0 - 2aff + 0xE3, 0x83, 0x88, 0xE3, 0x83, 0xAB, 0x4C, 0xE3, + 0x83, 0xAA, 0xE3, 0x83, 0x83, 0xE3, 0x83, 0x88, + 0xE3, 0x83, 0xAB, 0x4C, 0xE3, 0x83, 0xAB, 0xE3, + 0x83, 0x92, 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xBC, + 0x4C, 0xE6, 0xA0, 0xAA, 0xE5, 0xBC, 0x8F, 0xE4, + 0xBC, 0x9A, 0xE7, 0xA4, 0xBE, 0x4E, 0x28, 0xE1, + 0x84, 0x8B, 0xE1, 0x85, 0xA9, 0xE1, 0x84, 0x92, + 0xE1, 0x85, 0xAE, 0x29, 0x4F, 0xD8, 0xAC, 0xD9, + // Bytes 2b00 - 2b3f + 0x84, 0x20, 0xD8, 0xAC, 0xD9, 0x84, 0xD8, 0xA7, + 0xD9, 0x84, 0xD9, 0x87, 0x4F, 0xE3, 0x82, 0xA2, + 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x9A, 0xE3, 0x83, + 0xBC, 0xE3, 0x83, 0x88, 0x4F, 0xE3, 0x82, 0xA2, + 0xE3, 0x83, 0xB3, 0xE3, 0x83, 0x98, 0xE3, 0x82, + 0x9A, 0xE3, 0x82, 0xA2, 0x4F, 0xE3, 0x82, 0xAD, + 0xE3, 0x83, 0xAD, 0xE3, 0x83, 0xAF, 0xE3, 0x83, + 0x83, 0xE3, 0x83, 0x88, 0x4F, 0xE3, 0x82, 0xB5, + // Bytes 2b40 - 2b7f + 0xE3, 0x83, 0xB3, 0xE3, 0x83, 0x81, 0xE3, 0x83, + 0xBC, 0xE3, 0x83, 0xA0, 0x4F, 0xE3, 0x83, 0x8F, + 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + 0xAC, 0xE3, 0x83, 0xAB, 0x4F, 0xE3, 0x83, 0x98, + 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0xBF, 0xE3, 0x83, + 0xBC, 0xE3, 0x83, 0xAB, 0x4F, 0xE3, 0x83, 0x9B, + 0xE3, 0x82, 0x9A, 0xE3, 0x82, 0xA4, 0xE3, 0x83, + 0xB3, 0xE3, 0x83, 0x88, 0x4F, 0xE3, 0x83, 0x9E, + // Bytes 2b80 - 2bbf + 0xE3, 0x83, 0xB3, 0xE3, 0x82, 0xB7, 0xE3, 0x83, + 0xA7, 0xE3, 0x83, 0xB3, 0x4F, 0xE3, 0x83, 0xA1, + 0xE3, 0x82, 0xAB, 0xE3, 0x82, 0x99, 0xE3, 0x83, + 0x88, 0xE3, 0x83, 0xB3, 0x4F, 0xE3, 0x83, 0xAB, + 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x95, 0xE3, 0x82, + 0x99, 0xE3, 0x83, 0xAB, 0x51, 0x28, 0xE1, 0x84, + 0x8B, 0xE1, 0x85, 0xA9, 0xE1, 0x84, 0x8C, 0xE1, + 0x85, 0xA5, 0xE1, 0x86, 0xAB, 0x29, 0x52, 0xE3, + // Bytes 2bc0 - 2bff + 0x82, 0xAD, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xAB, + 0xE3, 0x82, 0xBF, 0xE3, 0x82, 0x99, 0xE3, 0x83, + 0xBC, 0x52, 0xE3, 0x82, 0xAD, 0xE3, 0x83, 0xAD, + 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0x99, 0xE3, 0x83, + 0xA9, 0xE3, 0x83, 0xA0, 0x52, 0xE3, 0x82, 0xAD, + 0xE3, 0x83, 0xAD, 0xE3, 0x83, 0xA1, 0xE3, 0x83, + 0xBC, 0xE3, 0x83, 0x88, 0xE3, 0x83, 0xAB, 0x52, + 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0x99, 0xE3, 0x83, + // Bytes 2c00 - 2c3f + 0xA9, 0xE3, 0x83, 0xA0, 0xE3, 0x83, 0x88, 0xE3, + 0x83, 0xB3, 0x52, 0xE3, 0x82, 0xAF, 0xE3, 0x83, + 0xAB, 0xE3, 0x82, 0xBB, 0xE3, 0x82, 0x99, 0xE3, + 0x82, 0xA4, 0xE3, 0x83, 0xAD, 0x52, 0xE3, 0x83, + 0x8F, 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xBC, 0xE3, + 0x82, 0xBB, 0xE3, 0x83, 0xB3, 0xE3, 0x83, 0x88, + 0x52, 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x9A, 0xE3, + 0x82, 0xA2, 0xE3, 0x82, 0xB9, 0xE3, 0x83, 0x88, + // Bytes 2c40 - 2c7f + 0xE3, 0x83, 0xAB, 0x52, 0xE3, 0x83, 0x95, 0xE3, + 0x82, 0x99, 0xE3, 0x83, 0x83, 0xE3, 0x82, 0xB7, + 0xE3, 0x82, 0xA7, 0xE3, 0x83, 0xAB, 0x52, 0xE3, + 0x83, 0x9F, 0xE3, 0x83, 0xAA, 0xE3, 0x83, 0x8F, + 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + 0xAB, 0x52, 0xE3, 0x83, 0xAC, 0xE3, 0x83, 0xB3, + 0xE3, 0x83, 0x88, 0xE3, 0x82, 0xB1, 0xE3, 0x82, + 0x99, 0xE3, 0x83, 0xB3, 0x61, 0xD8, 0xB5, 0xD9, + // Bytes 2c80 - 2cbf + 0x84, 0xD9, 0x89, 0x20, 0xD8, 0xA7, 0xD9, 0x84, + 0xD9, 0x84, 0xD9, 0x87, 0x20, 0xD8, 0xB9, 0xD9, + 0x84, 0xD9, 0x8A, 0xD9, 0x87, 0x20, 0xD9, 0x88, + 0xD8, 0xB3, 0xD9, 0x84, 0xD9, 0x85, 0x06, 0xE0, + 0xA7, 0x87, 0xE0, 0xA6, 0xBE, 0x01, 0x06, 0xE0, + 0xA7, 0x87, 0xE0, 0xA7, 0x97, 0x01, 0x06, 0xE0, + 0xAD, 0x87, 0xE0, 0xAC, 0xBE, 0x01, 0x06, 0xE0, + 0xAD, 0x87, 0xE0, 0xAD, 0x96, 0x01, 0x06, 0xE0, + // Bytes 2cc0 - 2cff + 0xAD, 0x87, 0xE0, 0xAD, 0x97, 0x01, 0x06, 0xE0, + 0xAE, 0x92, 0xE0, 0xAF, 0x97, 0x01, 0x06, 0xE0, + 0xAF, 0x86, 0xE0, 0xAE, 0xBE, 0x01, 0x06, 0xE0, + 0xAF, 0x86, 0xE0, 0xAF, 0x97, 0x01, 0x06, 0xE0, + 0xAF, 0x87, 0xE0, 0xAE, 0xBE, 0x01, 0x06, 0xE0, + 0xB2, 0xBF, 0xE0, 0xB3, 0x95, 0x01, 0x06, 0xE0, + 0xB3, 0x86, 0xE0, 0xB3, 0x95, 0x01, 0x06, 0xE0, + 0xB3, 0x86, 0xE0, 0xB3, 0x96, 0x01, 0x06, 0xE0, + // Bytes 2d00 - 2d3f + 0xB5, 0x86, 0xE0, 0xB4, 0xBE, 0x01, 0x06, 0xE0, + 0xB5, 0x86, 0xE0, 0xB5, 0x97, 0x01, 0x06, 0xE0, + 0xB5, 0x87, 0xE0, 0xB4, 0xBE, 0x01, 0x06, 0xE0, + 0xB7, 0x99, 0xE0, 0xB7, 0x9F, 0x01, 0x06, 0xE1, + 0x80, 0xA5, 0xE1, 0x80, 0xAE, 0x01, 0x06, 0xE1, + 0xAC, 0x85, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0x87, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0x89, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + // Bytes 2d40 - 2d7f + 0xAC, 0x8B, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0x8D, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0x91, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0xBA, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0xBC, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0xBE, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0xBF, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAD, 0x82, 0xE1, 0xAC, 0xB5, 0x01, 0x08, 0xF0, + // Bytes 2d80 - 2dbf + 0x91, 0x84, 0xB1, 0xF0, 0x91, 0x84, 0xA7, 0x01, + 0x08, 0xF0, 0x91, 0x84, 0xB2, 0xF0, 0x91, 0x84, + 0xA7, 0x01, 0x08, 0xF0, 0x91, 0x8D, 0x87, 0xF0, + 0x91, 0x8C, 0xBE, 0x01, 0x08, 0xF0, 0x91, 0x8D, + 0x87, 0xF0, 0x91, 0x8D, 0x97, 0x01, 0x08, 0xF0, + 0x91, 0x92, 0xB9, 0xF0, 0x91, 0x92, 0xB0, 0x01, + 0x08, 0xF0, 0x91, 0x92, 0xB9, 0xF0, 0x91, 0x92, + 0xBA, 0x01, 0x08, 0xF0, 0x91, 0x92, 0xB9, 0xF0, + // Bytes 2dc0 - 2dff + 0x91, 0x92, 0xBD, 0x01, 0x08, 0xF0, 0x91, 0x96, + 0xB8, 0xF0, 0x91, 0x96, 0xAF, 0x01, 0x08, 0xF0, + 0x91, 0x96, 0xB9, 0xF0, 0x91, 0x96, 0xAF, 0x01, + 0x09, 0xE0, 0xB3, 0x86, 0xE0, 0xB3, 0x82, 0xE0, + 0xB3, 0x95, 0x02, 0x09, 0xE0, 0xB7, 0x99, 0xE0, + 0xB7, 0x8F, 0xE0, 0xB7, 0x8A, 0x12, 0x44, 0x44, + 0x5A, 0xCC, 0x8C, 0xC9, 0x44, 0x44, 0x7A, 0xCC, + 0x8C, 0xC9, 0x44, 0x64, 0x7A, 0xCC, 0x8C, 0xC9, + // Bytes 2e00 - 2e3f + 0x46, 0xD9, 0x84, 0xD8, 0xA7, 0xD9, 0x93, 0xC9, + 0x46, 0xD9, 0x84, 0xD8, 0xA7, 0xD9, 0x94, 0xC9, + 0x46, 0xD9, 0x84, 0xD8, 0xA7, 0xD9, 0x95, 0xB5, + 0x46, 0xE1, 0x84, 0x80, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x82, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x83, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x85, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x86, 0xE1, 0x85, 0xA1, 0x01, + // Bytes 2e40 - 2e7f + 0x46, 0xE1, 0x84, 0x87, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x89, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x8B, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x8B, 0xE1, 0x85, 0xAE, 0x01, + 0x46, 0xE1, 0x84, 0x8C, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x8E, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x8F, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x90, 0xE1, 0x85, 0xA1, 0x01, + // Bytes 2e80 - 2ebf + 0x46, 0xE1, 0x84, 0x91, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x92, 0xE1, 0x85, 0xA1, 0x01, + 0x49, 0xE3, 0x83, 0xA1, 0xE3, 0x82, 0xAB, 0xE3, + 0x82, 0x99, 0x0D, 0x4C, 0xE1, 0x84, 0x8C, 0xE1, + 0x85, 0xAE, 0xE1, 0x84, 0x8B, 0xE1, 0x85, 0xB4, + 0x01, 0x4C, 0xE3, 0x82, 0xAD, 0xE3, 0x82, 0x99, + 0xE3, 0x82, 0xAB, 0xE3, 0x82, 0x99, 0x0D, 0x4C, + 0xE3, 0x82, 0xB3, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + // Bytes 2ec0 - 2eff + 0x9B, 0xE3, 0x82, 0x9A, 0x0D, 0x4C, 0xE3, 0x83, + 0xA4, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x88, 0xE3, + 0x82, 0x99, 0x0D, 0x4F, 0xE1, 0x84, 0x8E, 0xE1, + 0x85, 0xA1, 0xE1, 0x86, 0xB7, 0xE1, 0x84, 0x80, + 0xE1, 0x85, 0xA9, 0x01, 0x4F, 0xE3, 0x82, 0xA4, + 0xE3, 0x83, 0x8B, 0xE3, 0x83, 0xB3, 0xE3, 0x82, + 0xAF, 0xE3, 0x82, 0x99, 0x0D, 0x4F, 0xE3, 0x82, + 0xB7, 0xE3, 0x83, 0xAA, 0xE3, 0x83, 0xB3, 0xE3, + // Bytes 2f00 - 2f3f + 0x82, 0xAF, 0xE3, 0x82, 0x99, 0x0D, 0x4F, 0xE3, + 0x83, 0x98, 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xBC, + 0xE3, 0x82, 0xB7, 0xE3, 0x82, 0x99, 0x0D, 0x4F, + 0xE3, 0x83, 0x9B, 0xE3, 0x82, 0x9A, 0xE3, 0x83, + 0xB3, 0xE3, 0x83, 0x88, 0xE3, 0x82, 0x99, 0x0D, + 0x52, 0xE3, 0x82, 0xA8, 0xE3, 0x82, 0xB9, 0xE3, + 0x82, 0xAF, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x88, + 0xE3, 0x82, 0x99, 0x0D, 0x52, 0xE3, 0x83, 0x95, + // Bytes 2f40 - 2f7f + 0xE3, 0x82, 0xA1, 0xE3, 0x83, 0xA9, 0xE3, 0x83, + 0x83, 0xE3, 0x83, 0x88, 0xE3, 0x82, 0x99, 0x0D, + 0x86, 0xE0, 0xB3, 0x86, 0xE0, 0xB3, 0x82, 0x01, + 0x86, 0xE0, 0xB7, 0x99, 0xE0, 0xB7, 0x8F, 0x01, + 0x03, 0x3C, 0xCC, 0xB8, 0x05, 0x03, 0x3D, 0xCC, + 0xB8, 0x05, 0x03, 0x3E, 0xCC, 0xB8, 0x05, 0x03, + 0x41, 0xCC, 0x80, 0xC9, 0x03, 0x41, 0xCC, 0x81, + 0xC9, 0x03, 0x41, 0xCC, 0x83, 0xC9, 0x03, 0x41, + // Bytes 2f80 - 2fbf + 0xCC, 0x84, 0xC9, 0x03, 0x41, 0xCC, 0x89, 0xC9, + 0x03, 0x41, 0xCC, 0x8C, 0xC9, 0x03, 0x41, 0xCC, + 0x8F, 0xC9, 0x03, 0x41, 0xCC, 0x91, 0xC9, 0x03, + 0x41, 0xCC, 0xA5, 0xB5, 0x03, 0x41, 0xCC, 0xA8, + 0xA5, 0x03, 0x42, 0xCC, 0x87, 0xC9, 0x03, 0x42, + 0xCC, 0xA3, 0xB5, 0x03, 0x42, 0xCC, 0xB1, 0xB5, + 0x03, 0x43, 0xCC, 0x81, 0xC9, 0x03, 0x43, 0xCC, + 0x82, 0xC9, 0x03, 0x43, 0xCC, 0x87, 0xC9, 0x03, + // Bytes 2fc0 - 2fff + 0x43, 0xCC, 0x8C, 0xC9, 0x03, 0x44, 0xCC, 0x87, + 0xC9, 0x03, 0x44, 0xCC, 0x8C, 0xC9, 0x03, 0x44, + 0xCC, 0xA3, 0xB5, 0x03, 0x44, 0xCC, 0xA7, 0xA5, + 0x03, 0x44, 0xCC, 0xAD, 0xB5, 0x03, 0x44, 0xCC, + 0xB1, 0xB5, 0x03, 0x45, 0xCC, 0x80, 0xC9, 0x03, + 0x45, 0xCC, 0x81, 0xC9, 0x03, 0x45, 0xCC, 0x83, + 0xC9, 0x03, 0x45, 0xCC, 0x86, 0xC9, 0x03, 0x45, + 0xCC, 0x87, 0xC9, 0x03, 0x45, 0xCC, 0x88, 0xC9, + // Bytes 3000 - 303f + 0x03, 0x45, 0xCC, 0x89, 0xC9, 0x03, 0x45, 0xCC, + 0x8C, 0xC9, 0x03, 0x45, 0xCC, 0x8F, 0xC9, 0x03, + 0x45, 0xCC, 0x91, 0xC9, 0x03, 0x45, 0xCC, 0xA8, + 0xA5, 0x03, 0x45, 0xCC, 0xAD, 0xB5, 0x03, 0x45, + 0xCC, 0xB0, 0xB5, 0x03, 0x46, 0xCC, 0x87, 0xC9, + 0x03, 0x47, 0xCC, 0x81, 0xC9, 0x03, 0x47, 0xCC, + 0x82, 0xC9, 0x03, 0x47, 0xCC, 0x84, 0xC9, 0x03, + 0x47, 0xCC, 0x86, 0xC9, 0x03, 0x47, 0xCC, 0x87, + // Bytes 3040 - 307f + 0xC9, 0x03, 0x47, 0xCC, 0x8C, 0xC9, 0x03, 0x47, + 0xCC, 0xA7, 0xA5, 0x03, 0x48, 0xCC, 0x82, 0xC9, + 0x03, 0x48, 0xCC, 0x87, 0xC9, 0x03, 0x48, 0xCC, + 0x88, 0xC9, 0x03, 0x48, 0xCC, 0x8C, 0xC9, 0x03, + 0x48, 0xCC, 0xA3, 0xB5, 0x03, 0x48, 0xCC, 0xA7, + 0xA5, 0x03, 0x48, 0xCC, 0xAE, 0xB5, 0x03, 0x49, + 0xCC, 0x80, 0xC9, 0x03, 0x49, 0xCC, 0x81, 0xC9, + 0x03, 0x49, 0xCC, 0x82, 0xC9, 0x03, 0x49, 0xCC, + // Bytes 3080 - 30bf + 0x83, 0xC9, 0x03, 0x49, 0xCC, 0x84, 0xC9, 0x03, + 0x49, 0xCC, 0x86, 0xC9, 0x03, 0x49, 0xCC, 0x87, + 0xC9, 0x03, 0x49, 0xCC, 0x89, 0xC9, 0x03, 0x49, + 0xCC, 0x8C, 0xC9, 0x03, 0x49, 0xCC, 0x8F, 0xC9, + 0x03, 0x49, 0xCC, 0x91, 0xC9, 0x03, 0x49, 0xCC, + 0xA3, 0xB5, 0x03, 0x49, 0xCC, 0xA8, 0xA5, 0x03, + 0x49, 0xCC, 0xB0, 0xB5, 0x03, 0x4A, 0xCC, 0x82, + 0xC9, 0x03, 0x4B, 0xCC, 0x81, 0xC9, 0x03, 0x4B, + // Bytes 30c0 - 30ff + 0xCC, 0x8C, 0xC9, 0x03, 0x4B, 0xCC, 0xA3, 0xB5, + 0x03, 0x4B, 0xCC, 0xA7, 0xA5, 0x03, 0x4B, 0xCC, + 0xB1, 0xB5, 0x03, 0x4C, 0xCC, 0x81, 0xC9, 0x03, + 0x4C, 0xCC, 0x8C, 0xC9, 0x03, 0x4C, 0xCC, 0xA7, + 0xA5, 0x03, 0x4C, 0xCC, 0xAD, 0xB5, 0x03, 0x4C, + 0xCC, 0xB1, 0xB5, 0x03, 0x4D, 0xCC, 0x81, 0xC9, + 0x03, 0x4D, 0xCC, 0x87, 0xC9, 0x03, 0x4D, 0xCC, + 0xA3, 0xB5, 0x03, 0x4E, 0xCC, 0x80, 0xC9, 0x03, + // Bytes 3100 - 313f + 0x4E, 0xCC, 0x81, 0xC9, 0x03, 0x4E, 0xCC, 0x83, + 0xC9, 0x03, 0x4E, 0xCC, 0x87, 0xC9, 0x03, 0x4E, + 0xCC, 0x8C, 0xC9, 0x03, 0x4E, 0xCC, 0xA3, 0xB5, + 0x03, 0x4E, 0xCC, 0xA7, 0xA5, 0x03, 0x4E, 0xCC, + 0xAD, 0xB5, 0x03, 0x4E, 0xCC, 0xB1, 0xB5, 0x03, + 0x4F, 0xCC, 0x80, 0xC9, 0x03, 0x4F, 0xCC, 0x81, + 0xC9, 0x03, 0x4F, 0xCC, 0x86, 0xC9, 0x03, 0x4F, + 0xCC, 0x89, 0xC9, 0x03, 0x4F, 0xCC, 0x8B, 0xC9, + // Bytes 3140 - 317f + 0x03, 0x4F, 0xCC, 0x8C, 0xC9, 0x03, 0x4F, 0xCC, + 0x8F, 0xC9, 0x03, 0x4F, 0xCC, 0x91, 0xC9, 0x03, + 0x50, 0xCC, 0x81, 0xC9, 0x03, 0x50, 0xCC, 0x87, + 0xC9, 0x03, 0x52, 0xCC, 0x81, 0xC9, 0x03, 0x52, + 0xCC, 0x87, 0xC9, 0x03, 0x52, 0xCC, 0x8C, 0xC9, + 0x03, 0x52, 0xCC, 0x8F, 0xC9, 0x03, 0x52, 0xCC, + 0x91, 0xC9, 0x03, 0x52, 0xCC, 0xA7, 0xA5, 0x03, + 0x52, 0xCC, 0xB1, 0xB5, 0x03, 0x53, 0xCC, 0x82, + // Bytes 3180 - 31bf + 0xC9, 0x03, 0x53, 0xCC, 0x87, 0xC9, 0x03, 0x53, + 0xCC, 0xA6, 0xB5, 0x03, 0x53, 0xCC, 0xA7, 0xA5, + 0x03, 0x54, 0xCC, 0x87, 0xC9, 0x03, 0x54, 0xCC, + 0x8C, 0xC9, 0x03, 0x54, 0xCC, 0xA3, 0xB5, 0x03, + 0x54, 0xCC, 0xA6, 0xB5, 0x03, 0x54, 0xCC, 0xA7, + 0xA5, 0x03, 0x54, 0xCC, 0xAD, 0xB5, 0x03, 0x54, + 0xCC, 0xB1, 0xB5, 0x03, 0x55, 0xCC, 0x80, 0xC9, + 0x03, 0x55, 0xCC, 0x81, 0xC9, 0x03, 0x55, 0xCC, + // Bytes 31c0 - 31ff + 0x82, 0xC9, 0x03, 0x55, 0xCC, 0x86, 0xC9, 0x03, + 0x55, 0xCC, 0x89, 0xC9, 0x03, 0x55, 0xCC, 0x8A, + 0xC9, 0x03, 0x55, 0xCC, 0x8B, 0xC9, 0x03, 0x55, + 0xCC, 0x8C, 0xC9, 0x03, 0x55, 0xCC, 0x8F, 0xC9, + 0x03, 0x55, 0xCC, 0x91, 0xC9, 0x03, 0x55, 0xCC, + 0xA3, 0xB5, 0x03, 0x55, 0xCC, 0xA4, 0xB5, 0x03, + 0x55, 0xCC, 0xA8, 0xA5, 0x03, 0x55, 0xCC, 0xAD, + 0xB5, 0x03, 0x55, 0xCC, 0xB0, 0xB5, 0x03, 0x56, + // Bytes 3200 - 323f + 0xCC, 0x83, 0xC9, 0x03, 0x56, 0xCC, 0xA3, 0xB5, + 0x03, 0x57, 0xCC, 0x80, 0xC9, 0x03, 0x57, 0xCC, + 0x81, 0xC9, 0x03, 0x57, 0xCC, 0x82, 0xC9, 0x03, + 0x57, 0xCC, 0x87, 0xC9, 0x03, 0x57, 0xCC, 0x88, + 0xC9, 0x03, 0x57, 0xCC, 0xA3, 0xB5, 0x03, 0x58, + 0xCC, 0x87, 0xC9, 0x03, 0x58, 0xCC, 0x88, 0xC9, + 0x03, 0x59, 0xCC, 0x80, 0xC9, 0x03, 0x59, 0xCC, + 0x81, 0xC9, 0x03, 0x59, 0xCC, 0x82, 0xC9, 0x03, + // Bytes 3240 - 327f + 0x59, 0xCC, 0x83, 0xC9, 0x03, 0x59, 0xCC, 0x84, + 0xC9, 0x03, 0x59, 0xCC, 0x87, 0xC9, 0x03, 0x59, + 0xCC, 0x88, 0xC9, 0x03, 0x59, 0xCC, 0x89, 0xC9, + 0x03, 0x59, 0xCC, 0xA3, 0xB5, 0x03, 0x5A, 0xCC, + 0x81, 0xC9, 0x03, 0x5A, 0xCC, 0x82, 0xC9, 0x03, + 0x5A, 0xCC, 0x87, 0xC9, 0x03, 0x5A, 0xCC, 0x8C, + 0xC9, 0x03, 0x5A, 0xCC, 0xA3, 0xB5, 0x03, 0x5A, + 0xCC, 0xB1, 0xB5, 0x03, 0x61, 0xCC, 0x80, 0xC9, + // Bytes 3280 - 32bf + 0x03, 0x61, 0xCC, 0x81, 0xC9, 0x03, 0x61, 0xCC, + 0x83, 0xC9, 0x03, 0x61, 0xCC, 0x84, 0xC9, 0x03, + 0x61, 0xCC, 0x89, 0xC9, 0x03, 0x61, 0xCC, 0x8C, + 0xC9, 0x03, 0x61, 0xCC, 0x8F, 0xC9, 0x03, 0x61, + 0xCC, 0x91, 0xC9, 0x03, 0x61, 0xCC, 0xA5, 0xB5, + 0x03, 0x61, 0xCC, 0xA8, 0xA5, 0x03, 0x62, 0xCC, + 0x87, 0xC9, 0x03, 0x62, 0xCC, 0xA3, 0xB5, 0x03, + 0x62, 0xCC, 0xB1, 0xB5, 0x03, 0x63, 0xCC, 0x81, + // Bytes 32c0 - 32ff + 0xC9, 0x03, 0x63, 0xCC, 0x82, 0xC9, 0x03, 0x63, + 0xCC, 0x87, 0xC9, 0x03, 0x63, 0xCC, 0x8C, 0xC9, + 0x03, 0x64, 0xCC, 0x87, 0xC9, 0x03, 0x64, 0xCC, + 0x8C, 0xC9, 0x03, 0x64, 0xCC, 0xA3, 0xB5, 0x03, + 0x64, 0xCC, 0xA7, 0xA5, 0x03, 0x64, 0xCC, 0xAD, + 0xB5, 0x03, 0x64, 0xCC, 0xB1, 0xB5, 0x03, 0x65, + 0xCC, 0x80, 0xC9, 0x03, 0x65, 0xCC, 0x81, 0xC9, + 0x03, 0x65, 0xCC, 0x83, 0xC9, 0x03, 0x65, 0xCC, + // Bytes 3300 - 333f + 0x86, 0xC9, 0x03, 0x65, 0xCC, 0x87, 0xC9, 0x03, + 0x65, 0xCC, 0x88, 0xC9, 0x03, 0x65, 0xCC, 0x89, + 0xC9, 0x03, 0x65, 0xCC, 0x8C, 0xC9, 0x03, 0x65, + 0xCC, 0x8F, 0xC9, 0x03, 0x65, 0xCC, 0x91, 0xC9, + 0x03, 0x65, 0xCC, 0xA8, 0xA5, 0x03, 0x65, 0xCC, + 0xAD, 0xB5, 0x03, 0x65, 0xCC, 0xB0, 0xB5, 0x03, + 0x66, 0xCC, 0x87, 0xC9, 0x03, 0x67, 0xCC, 0x81, + 0xC9, 0x03, 0x67, 0xCC, 0x82, 0xC9, 0x03, 0x67, + // Bytes 3340 - 337f + 0xCC, 0x84, 0xC9, 0x03, 0x67, 0xCC, 0x86, 0xC9, + 0x03, 0x67, 0xCC, 0x87, 0xC9, 0x03, 0x67, 0xCC, + 0x8C, 0xC9, 0x03, 0x67, 0xCC, 0xA7, 0xA5, 0x03, + 0x68, 0xCC, 0x82, 0xC9, 0x03, 0x68, 0xCC, 0x87, + 0xC9, 0x03, 0x68, 0xCC, 0x88, 0xC9, 0x03, 0x68, + 0xCC, 0x8C, 0xC9, 0x03, 0x68, 0xCC, 0xA3, 0xB5, + 0x03, 0x68, 0xCC, 0xA7, 0xA5, 0x03, 0x68, 0xCC, + 0xAE, 0xB5, 0x03, 0x68, 0xCC, 0xB1, 0xB5, 0x03, + // Bytes 3380 - 33bf + 0x69, 0xCC, 0x80, 0xC9, 0x03, 0x69, 0xCC, 0x81, + 0xC9, 0x03, 0x69, 0xCC, 0x82, 0xC9, 0x03, 0x69, + 0xCC, 0x83, 0xC9, 0x03, 0x69, 0xCC, 0x84, 0xC9, + 0x03, 0x69, 0xCC, 0x86, 0xC9, 0x03, 0x69, 0xCC, + 0x89, 0xC9, 0x03, 0x69, 0xCC, 0x8C, 0xC9, 0x03, + 0x69, 0xCC, 0x8F, 0xC9, 0x03, 0x69, 0xCC, 0x91, + 0xC9, 0x03, 0x69, 0xCC, 0xA3, 0xB5, 0x03, 0x69, + 0xCC, 0xA8, 0xA5, 0x03, 0x69, 0xCC, 0xB0, 0xB5, + // Bytes 33c0 - 33ff + 0x03, 0x6A, 0xCC, 0x82, 0xC9, 0x03, 0x6A, 0xCC, + 0x8C, 0xC9, 0x03, 0x6B, 0xCC, 0x81, 0xC9, 0x03, + 0x6B, 0xCC, 0x8C, 0xC9, 0x03, 0x6B, 0xCC, 0xA3, + 0xB5, 0x03, 0x6B, 0xCC, 0xA7, 0xA5, 0x03, 0x6B, + 0xCC, 0xB1, 0xB5, 0x03, 0x6C, 0xCC, 0x81, 0xC9, + 0x03, 0x6C, 0xCC, 0x8C, 0xC9, 0x03, 0x6C, 0xCC, + 0xA7, 0xA5, 0x03, 0x6C, 0xCC, 0xAD, 0xB5, 0x03, + 0x6C, 0xCC, 0xB1, 0xB5, 0x03, 0x6D, 0xCC, 0x81, + // Bytes 3400 - 343f + 0xC9, 0x03, 0x6D, 0xCC, 0x87, 0xC9, 0x03, 0x6D, + 0xCC, 0xA3, 0xB5, 0x03, 0x6E, 0xCC, 0x80, 0xC9, + 0x03, 0x6E, 0xCC, 0x81, 0xC9, 0x03, 0x6E, 0xCC, + 0x83, 0xC9, 0x03, 0x6E, 0xCC, 0x87, 0xC9, 0x03, + 0x6E, 0xCC, 0x8C, 0xC9, 0x03, 0x6E, 0xCC, 0xA3, + 0xB5, 0x03, 0x6E, 0xCC, 0xA7, 0xA5, 0x03, 0x6E, + 0xCC, 0xAD, 0xB5, 0x03, 0x6E, 0xCC, 0xB1, 0xB5, + 0x03, 0x6F, 0xCC, 0x80, 0xC9, 0x03, 0x6F, 0xCC, + // Bytes 3440 - 347f + 0x81, 0xC9, 0x03, 0x6F, 0xCC, 0x86, 0xC9, 0x03, + 0x6F, 0xCC, 0x89, 0xC9, 0x03, 0x6F, 0xCC, 0x8B, + 0xC9, 0x03, 0x6F, 0xCC, 0x8C, 0xC9, 0x03, 0x6F, + 0xCC, 0x8F, 0xC9, 0x03, 0x6F, 0xCC, 0x91, 0xC9, + 0x03, 0x70, 0xCC, 0x81, 0xC9, 0x03, 0x70, 0xCC, + 0x87, 0xC9, 0x03, 0x72, 0xCC, 0x81, 0xC9, 0x03, + 0x72, 0xCC, 0x87, 0xC9, 0x03, 0x72, 0xCC, 0x8C, + 0xC9, 0x03, 0x72, 0xCC, 0x8F, 0xC9, 0x03, 0x72, + // Bytes 3480 - 34bf + 0xCC, 0x91, 0xC9, 0x03, 0x72, 0xCC, 0xA7, 0xA5, + 0x03, 0x72, 0xCC, 0xB1, 0xB5, 0x03, 0x73, 0xCC, + 0x82, 0xC9, 0x03, 0x73, 0xCC, 0x87, 0xC9, 0x03, + 0x73, 0xCC, 0xA6, 0xB5, 0x03, 0x73, 0xCC, 0xA7, + 0xA5, 0x03, 0x74, 0xCC, 0x87, 0xC9, 0x03, 0x74, + 0xCC, 0x88, 0xC9, 0x03, 0x74, 0xCC, 0x8C, 0xC9, + 0x03, 0x74, 0xCC, 0xA3, 0xB5, 0x03, 0x74, 0xCC, + 0xA6, 0xB5, 0x03, 0x74, 0xCC, 0xA7, 0xA5, 0x03, + // Bytes 34c0 - 34ff + 0x74, 0xCC, 0xAD, 0xB5, 0x03, 0x74, 0xCC, 0xB1, + 0xB5, 0x03, 0x75, 0xCC, 0x80, 0xC9, 0x03, 0x75, + 0xCC, 0x81, 0xC9, 0x03, 0x75, 0xCC, 0x82, 0xC9, + 0x03, 0x75, 0xCC, 0x86, 0xC9, 0x03, 0x75, 0xCC, + 0x89, 0xC9, 0x03, 0x75, 0xCC, 0x8A, 0xC9, 0x03, + 0x75, 0xCC, 0x8B, 0xC9, 0x03, 0x75, 0xCC, 0x8C, + 0xC9, 0x03, 0x75, 0xCC, 0x8F, 0xC9, 0x03, 0x75, + 0xCC, 0x91, 0xC9, 0x03, 0x75, 0xCC, 0xA3, 0xB5, + // Bytes 3500 - 353f + 0x03, 0x75, 0xCC, 0xA4, 0xB5, 0x03, 0x75, 0xCC, + 0xA8, 0xA5, 0x03, 0x75, 0xCC, 0xAD, 0xB5, 0x03, + 0x75, 0xCC, 0xB0, 0xB5, 0x03, 0x76, 0xCC, 0x83, + 0xC9, 0x03, 0x76, 0xCC, 0xA3, 0xB5, 0x03, 0x77, + 0xCC, 0x80, 0xC9, 0x03, 0x77, 0xCC, 0x81, 0xC9, + 0x03, 0x77, 0xCC, 0x82, 0xC9, 0x03, 0x77, 0xCC, + 0x87, 0xC9, 0x03, 0x77, 0xCC, 0x88, 0xC9, 0x03, + 0x77, 0xCC, 0x8A, 0xC9, 0x03, 0x77, 0xCC, 0xA3, + // Bytes 3540 - 357f + 0xB5, 0x03, 0x78, 0xCC, 0x87, 0xC9, 0x03, 0x78, + 0xCC, 0x88, 0xC9, 0x03, 0x79, 0xCC, 0x80, 0xC9, + 0x03, 0x79, 0xCC, 0x81, 0xC9, 0x03, 0x79, 0xCC, + 0x82, 0xC9, 0x03, 0x79, 0xCC, 0x83, 0xC9, 0x03, + 0x79, 0xCC, 0x84, 0xC9, 0x03, 0x79, 0xCC, 0x87, + 0xC9, 0x03, 0x79, 0xCC, 0x88, 0xC9, 0x03, 0x79, + 0xCC, 0x89, 0xC9, 0x03, 0x79, 0xCC, 0x8A, 0xC9, + 0x03, 0x79, 0xCC, 0xA3, 0xB5, 0x03, 0x7A, 0xCC, + // Bytes 3580 - 35bf + 0x81, 0xC9, 0x03, 0x7A, 0xCC, 0x82, 0xC9, 0x03, + 0x7A, 0xCC, 0x87, 0xC9, 0x03, 0x7A, 0xCC, 0x8C, + 0xC9, 0x03, 0x7A, 0xCC, 0xA3, 0xB5, 0x03, 0x7A, + 0xCC, 0xB1, 0xB5, 0x04, 0xC2, 0xA8, 0xCC, 0x80, + 0xCA, 0x04, 0xC2, 0xA8, 0xCC, 0x81, 0xCA, 0x04, + 0xC2, 0xA8, 0xCD, 0x82, 0xCA, 0x04, 0xC3, 0x86, + 0xCC, 0x81, 0xC9, 0x04, 0xC3, 0x86, 0xCC, 0x84, + 0xC9, 0x04, 0xC3, 0x98, 0xCC, 0x81, 0xC9, 0x04, + // Bytes 35c0 - 35ff + 0xC3, 0xA6, 0xCC, 0x81, 0xC9, 0x04, 0xC3, 0xA6, + 0xCC, 0x84, 0xC9, 0x04, 0xC3, 0xB8, 0xCC, 0x81, + 0xC9, 0x04, 0xC5, 0xBF, 0xCC, 0x87, 0xC9, 0x04, + 0xC6, 0xB7, 0xCC, 0x8C, 0xC9, 0x04, 0xCA, 0x92, + 0xCC, 0x8C, 0xC9, 0x04, 0xCE, 0x91, 0xCC, 0x80, + 0xC9, 0x04, 0xCE, 0x91, 0xCC, 0x81, 0xC9, 0x04, + 0xCE, 0x91, 0xCC, 0x84, 0xC9, 0x04, 0xCE, 0x91, + 0xCC, 0x86, 0xC9, 0x04, 0xCE, 0x91, 0xCD, 0x85, + // Bytes 3600 - 363f + 0xD9, 0x04, 0xCE, 0x95, 0xCC, 0x80, 0xC9, 0x04, + 0xCE, 0x95, 0xCC, 0x81, 0xC9, 0x04, 0xCE, 0x97, + 0xCC, 0x80, 0xC9, 0x04, 0xCE, 0x97, 0xCC, 0x81, + 0xC9, 0x04, 0xCE, 0x97, 0xCD, 0x85, 0xD9, 0x04, + 0xCE, 0x99, 0xCC, 0x80, 0xC9, 0x04, 0xCE, 0x99, + 0xCC, 0x81, 0xC9, 0x04, 0xCE, 0x99, 0xCC, 0x84, + 0xC9, 0x04, 0xCE, 0x99, 0xCC, 0x86, 0xC9, 0x04, + 0xCE, 0x99, 0xCC, 0x88, 0xC9, 0x04, 0xCE, 0x9F, + // Bytes 3640 - 367f + 0xCC, 0x80, 0xC9, 0x04, 0xCE, 0x9F, 0xCC, 0x81, + 0xC9, 0x04, 0xCE, 0xA1, 0xCC, 0x94, 0xC9, 0x04, + 0xCE, 0xA5, 0xCC, 0x80, 0xC9, 0x04, 0xCE, 0xA5, + 0xCC, 0x81, 0xC9, 0x04, 0xCE, 0xA5, 0xCC, 0x84, + 0xC9, 0x04, 0xCE, 0xA5, 0xCC, 0x86, 0xC9, 0x04, + 0xCE, 0xA5, 0xCC, 0x88, 0xC9, 0x04, 0xCE, 0xA9, + 0xCC, 0x80, 0xC9, 0x04, 0xCE, 0xA9, 0xCC, 0x81, + 0xC9, 0x04, 0xCE, 0xA9, 0xCD, 0x85, 0xD9, 0x04, + // Bytes 3680 - 36bf + 0xCE, 0xB1, 0xCC, 0x84, 0xC9, 0x04, 0xCE, 0xB1, + 0xCC, 0x86, 0xC9, 0x04, 0xCE, 0xB1, 0xCD, 0x85, + 0xD9, 0x04, 0xCE, 0xB5, 0xCC, 0x80, 0xC9, 0x04, + 0xCE, 0xB5, 0xCC, 0x81, 0xC9, 0x04, 0xCE, 0xB7, + 0xCD, 0x85, 0xD9, 0x04, 0xCE, 0xB9, 0xCC, 0x80, + 0xC9, 0x04, 0xCE, 0xB9, 0xCC, 0x81, 0xC9, 0x04, + 0xCE, 0xB9, 0xCC, 0x84, 0xC9, 0x04, 0xCE, 0xB9, + 0xCC, 0x86, 0xC9, 0x04, 0xCE, 0xB9, 0xCD, 0x82, + // Bytes 36c0 - 36ff + 0xC9, 0x04, 0xCE, 0xBF, 0xCC, 0x80, 0xC9, 0x04, + 0xCE, 0xBF, 0xCC, 0x81, 0xC9, 0x04, 0xCF, 0x81, + 0xCC, 0x93, 0xC9, 0x04, 0xCF, 0x81, 0xCC, 0x94, + 0xC9, 0x04, 0xCF, 0x85, 0xCC, 0x80, 0xC9, 0x04, + 0xCF, 0x85, 0xCC, 0x81, 0xC9, 0x04, 0xCF, 0x85, + 0xCC, 0x84, 0xC9, 0x04, 0xCF, 0x85, 0xCC, 0x86, + 0xC9, 0x04, 0xCF, 0x85, 0xCD, 0x82, 0xC9, 0x04, + 0xCF, 0x89, 0xCD, 0x85, 0xD9, 0x04, 0xCF, 0x92, + // Bytes 3700 - 373f + 0xCC, 0x81, 0xC9, 0x04, 0xCF, 0x92, 0xCC, 0x88, + 0xC9, 0x04, 0xD0, 0x86, 0xCC, 0x88, 0xC9, 0x04, + 0xD0, 0x90, 0xCC, 0x86, 0xC9, 0x04, 0xD0, 0x90, + 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0x93, 0xCC, 0x81, + 0xC9, 0x04, 0xD0, 0x95, 0xCC, 0x80, 0xC9, 0x04, + 0xD0, 0x95, 0xCC, 0x86, 0xC9, 0x04, 0xD0, 0x95, + 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0x96, 0xCC, 0x86, + 0xC9, 0x04, 0xD0, 0x96, 0xCC, 0x88, 0xC9, 0x04, + // Bytes 3740 - 377f + 0xD0, 0x97, 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0x98, + 0xCC, 0x80, 0xC9, 0x04, 0xD0, 0x98, 0xCC, 0x84, + 0xC9, 0x04, 0xD0, 0x98, 0xCC, 0x86, 0xC9, 0x04, + 0xD0, 0x98, 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0x9A, + 0xCC, 0x81, 0xC9, 0x04, 0xD0, 0x9E, 0xCC, 0x88, + 0xC9, 0x04, 0xD0, 0xA3, 0xCC, 0x84, 0xC9, 0x04, + 0xD0, 0xA3, 0xCC, 0x86, 0xC9, 0x04, 0xD0, 0xA3, + 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0xA3, 0xCC, 0x8B, + // Bytes 3780 - 37bf + 0xC9, 0x04, 0xD0, 0xA7, 0xCC, 0x88, 0xC9, 0x04, + 0xD0, 0xAB, 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0xAD, + 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0xB0, 0xCC, 0x86, + 0xC9, 0x04, 0xD0, 0xB0, 0xCC, 0x88, 0xC9, 0x04, + 0xD0, 0xB3, 0xCC, 0x81, 0xC9, 0x04, 0xD0, 0xB5, + 0xCC, 0x80, 0xC9, 0x04, 0xD0, 0xB5, 0xCC, 0x86, + 0xC9, 0x04, 0xD0, 0xB5, 0xCC, 0x88, 0xC9, 0x04, + 0xD0, 0xB6, 0xCC, 0x86, 0xC9, 0x04, 0xD0, 0xB6, + // Bytes 37c0 - 37ff + 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0xB7, 0xCC, 0x88, + 0xC9, 0x04, 0xD0, 0xB8, 0xCC, 0x80, 0xC9, 0x04, + 0xD0, 0xB8, 0xCC, 0x84, 0xC9, 0x04, 0xD0, 0xB8, + 0xCC, 0x86, 0xC9, 0x04, 0xD0, 0xB8, 0xCC, 0x88, + 0xC9, 0x04, 0xD0, 0xBA, 0xCC, 0x81, 0xC9, 0x04, + 0xD0, 0xBE, 0xCC, 0x88, 0xC9, 0x04, 0xD1, 0x83, + 0xCC, 0x84, 0xC9, 0x04, 0xD1, 0x83, 0xCC, 0x86, + 0xC9, 0x04, 0xD1, 0x83, 0xCC, 0x88, 0xC9, 0x04, + // Bytes 3800 - 383f + 0xD1, 0x83, 0xCC, 0x8B, 0xC9, 0x04, 0xD1, 0x87, + 0xCC, 0x88, 0xC9, 0x04, 0xD1, 0x8B, 0xCC, 0x88, + 0xC9, 0x04, 0xD1, 0x8D, 0xCC, 0x88, 0xC9, 0x04, + 0xD1, 0x96, 0xCC, 0x88, 0xC9, 0x04, 0xD1, 0xB4, + 0xCC, 0x8F, 0xC9, 0x04, 0xD1, 0xB5, 0xCC, 0x8F, + 0xC9, 0x04, 0xD3, 0x98, 0xCC, 0x88, 0xC9, 0x04, + 0xD3, 0x99, 0xCC, 0x88, 0xC9, 0x04, 0xD3, 0xA8, + 0xCC, 0x88, 0xC9, 0x04, 0xD3, 0xA9, 0xCC, 0x88, + // Bytes 3840 - 387f + 0xC9, 0x04, 0xD8, 0xA7, 0xD9, 0x93, 0xC9, 0x04, + 0xD8, 0xA7, 0xD9, 0x94, 0xC9, 0x04, 0xD8, 0xA7, + 0xD9, 0x95, 0xB5, 0x04, 0xD9, 0x88, 0xD9, 0x94, + 0xC9, 0x04, 0xD9, 0x8A, 0xD9, 0x94, 0xC9, 0x04, + 0xDB, 0x81, 0xD9, 0x94, 0xC9, 0x04, 0xDB, 0x92, + 0xD9, 0x94, 0xC9, 0x04, 0xDB, 0x95, 0xD9, 0x94, + 0xC9, 0x05, 0x41, 0xCC, 0x82, 0xCC, 0x80, 0xCA, + 0x05, 0x41, 0xCC, 0x82, 0xCC, 0x81, 0xCA, 0x05, + // Bytes 3880 - 38bf + 0x41, 0xCC, 0x82, 0xCC, 0x83, 0xCA, 0x05, 0x41, + 0xCC, 0x82, 0xCC, 0x89, 0xCA, 0x05, 0x41, 0xCC, + 0x86, 0xCC, 0x80, 0xCA, 0x05, 0x41, 0xCC, 0x86, + 0xCC, 0x81, 0xCA, 0x05, 0x41, 0xCC, 0x86, 0xCC, + 0x83, 0xCA, 0x05, 0x41, 0xCC, 0x86, 0xCC, 0x89, + 0xCA, 0x05, 0x41, 0xCC, 0x87, 0xCC, 0x84, 0xCA, + 0x05, 0x41, 0xCC, 0x88, 0xCC, 0x84, 0xCA, 0x05, + 0x41, 0xCC, 0x8A, 0xCC, 0x81, 0xCA, 0x05, 0x41, + // Bytes 38c0 - 38ff + 0xCC, 0xA3, 0xCC, 0x82, 0xCA, 0x05, 0x41, 0xCC, + 0xA3, 0xCC, 0x86, 0xCA, 0x05, 0x43, 0xCC, 0xA7, + 0xCC, 0x81, 0xCA, 0x05, 0x45, 0xCC, 0x82, 0xCC, + 0x80, 0xCA, 0x05, 0x45, 0xCC, 0x82, 0xCC, 0x81, + 0xCA, 0x05, 0x45, 0xCC, 0x82, 0xCC, 0x83, 0xCA, + 0x05, 0x45, 0xCC, 0x82, 0xCC, 0x89, 0xCA, 0x05, + 0x45, 0xCC, 0x84, 0xCC, 0x80, 0xCA, 0x05, 0x45, + 0xCC, 0x84, 0xCC, 0x81, 0xCA, 0x05, 0x45, 0xCC, + // Bytes 3900 - 393f + 0xA3, 0xCC, 0x82, 0xCA, 0x05, 0x45, 0xCC, 0xA7, + 0xCC, 0x86, 0xCA, 0x05, 0x49, 0xCC, 0x88, 0xCC, + 0x81, 0xCA, 0x05, 0x4C, 0xCC, 0xA3, 0xCC, 0x84, + 0xCA, 0x05, 0x4F, 0xCC, 0x82, 0xCC, 0x80, 0xCA, + 0x05, 0x4F, 0xCC, 0x82, 0xCC, 0x81, 0xCA, 0x05, + 0x4F, 0xCC, 0x82, 0xCC, 0x83, 0xCA, 0x05, 0x4F, + 0xCC, 0x82, 0xCC, 0x89, 0xCA, 0x05, 0x4F, 0xCC, + 0x83, 0xCC, 0x81, 0xCA, 0x05, 0x4F, 0xCC, 0x83, + // Bytes 3940 - 397f + 0xCC, 0x84, 0xCA, 0x05, 0x4F, 0xCC, 0x83, 0xCC, + 0x88, 0xCA, 0x05, 0x4F, 0xCC, 0x84, 0xCC, 0x80, + 0xCA, 0x05, 0x4F, 0xCC, 0x84, 0xCC, 0x81, 0xCA, + 0x05, 0x4F, 0xCC, 0x87, 0xCC, 0x84, 0xCA, 0x05, + 0x4F, 0xCC, 0x88, 0xCC, 0x84, 0xCA, 0x05, 0x4F, + 0xCC, 0x9B, 0xCC, 0x80, 0xCA, 0x05, 0x4F, 0xCC, + 0x9B, 0xCC, 0x81, 0xCA, 0x05, 0x4F, 0xCC, 0x9B, + 0xCC, 0x83, 0xCA, 0x05, 0x4F, 0xCC, 0x9B, 0xCC, + // Bytes 3980 - 39bf + 0x89, 0xCA, 0x05, 0x4F, 0xCC, 0x9B, 0xCC, 0xA3, + 0xB6, 0x05, 0x4F, 0xCC, 0xA3, 0xCC, 0x82, 0xCA, + 0x05, 0x4F, 0xCC, 0xA8, 0xCC, 0x84, 0xCA, 0x05, + 0x52, 0xCC, 0xA3, 0xCC, 0x84, 0xCA, 0x05, 0x53, + 0xCC, 0x81, 0xCC, 0x87, 0xCA, 0x05, 0x53, 0xCC, + 0x8C, 0xCC, 0x87, 0xCA, 0x05, 0x53, 0xCC, 0xA3, + 0xCC, 0x87, 0xCA, 0x05, 0x55, 0xCC, 0x83, 0xCC, + 0x81, 0xCA, 0x05, 0x55, 0xCC, 0x84, 0xCC, 0x88, + // Bytes 39c0 - 39ff + 0xCA, 0x05, 0x55, 0xCC, 0x88, 0xCC, 0x80, 0xCA, + 0x05, 0x55, 0xCC, 0x88, 0xCC, 0x81, 0xCA, 0x05, + 0x55, 0xCC, 0x88, 0xCC, 0x84, 0xCA, 0x05, 0x55, + 0xCC, 0x88, 0xCC, 0x8C, 0xCA, 0x05, 0x55, 0xCC, + 0x9B, 0xCC, 0x80, 0xCA, 0x05, 0x55, 0xCC, 0x9B, + 0xCC, 0x81, 0xCA, 0x05, 0x55, 0xCC, 0x9B, 0xCC, + 0x83, 0xCA, 0x05, 0x55, 0xCC, 0x9B, 0xCC, 0x89, + 0xCA, 0x05, 0x55, 0xCC, 0x9B, 0xCC, 0xA3, 0xB6, + // Bytes 3a00 - 3a3f + 0x05, 0x61, 0xCC, 0x82, 0xCC, 0x80, 0xCA, 0x05, + 0x61, 0xCC, 0x82, 0xCC, 0x81, 0xCA, 0x05, 0x61, + 0xCC, 0x82, 0xCC, 0x83, 0xCA, 0x05, 0x61, 0xCC, + 0x82, 0xCC, 0x89, 0xCA, 0x05, 0x61, 0xCC, 0x86, + 0xCC, 0x80, 0xCA, 0x05, 0x61, 0xCC, 0x86, 0xCC, + 0x81, 0xCA, 0x05, 0x61, 0xCC, 0x86, 0xCC, 0x83, + 0xCA, 0x05, 0x61, 0xCC, 0x86, 0xCC, 0x89, 0xCA, + 0x05, 0x61, 0xCC, 0x87, 0xCC, 0x84, 0xCA, 0x05, + // Bytes 3a40 - 3a7f + 0x61, 0xCC, 0x88, 0xCC, 0x84, 0xCA, 0x05, 0x61, + 0xCC, 0x8A, 0xCC, 0x81, 0xCA, 0x05, 0x61, 0xCC, + 0xA3, 0xCC, 0x82, 0xCA, 0x05, 0x61, 0xCC, 0xA3, + 0xCC, 0x86, 0xCA, 0x05, 0x63, 0xCC, 0xA7, 0xCC, + 0x81, 0xCA, 0x05, 0x65, 0xCC, 0x82, 0xCC, 0x80, + 0xCA, 0x05, 0x65, 0xCC, 0x82, 0xCC, 0x81, 0xCA, + 0x05, 0x65, 0xCC, 0x82, 0xCC, 0x83, 0xCA, 0x05, + 0x65, 0xCC, 0x82, 0xCC, 0x89, 0xCA, 0x05, 0x65, + // Bytes 3a80 - 3abf + 0xCC, 0x84, 0xCC, 0x80, 0xCA, 0x05, 0x65, 0xCC, + 0x84, 0xCC, 0x81, 0xCA, 0x05, 0x65, 0xCC, 0xA3, + 0xCC, 0x82, 0xCA, 0x05, 0x65, 0xCC, 0xA7, 0xCC, + 0x86, 0xCA, 0x05, 0x69, 0xCC, 0x88, 0xCC, 0x81, + 0xCA, 0x05, 0x6C, 0xCC, 0xA3, 0xCC, 0x84, 0xCA, + 0x05, 0x6F, 0xCC, 0x82, 0xCC, 0x80, 0xCA, 0x05, + 0x6F, 0xCC, 0x82, 0xCC, 0x81, 0xCA, 0x05, 0x6F, + 0xCC, 0x82, 0xCC, 0x83, 0xCA, 0x05, 0x6F, 0xCC, + // Bytes 3ac0 - 3aff + 0x82, 0xCC, 0x89, 0xCA, 0x05, 0x6F, 0xCC, 0x83, + 0xCC, 0x81, 0xCA, 0x05, 0x6F, 0xCC, 0x83, 0xCC, + 0x84, 0xCA, 0x05, 0x6F, 0xCC, 0x83, 0xCC, 0x88, + 0xCA, 0x05, 0x6F, 0xCC, 0x84, 0xCC, 0x80, 0xCA, + 0x05, 0x6F, 0xCC, 0x84, 0xCC, 0x81, 0xCA, 0x05, + 0x6F, 0xCC, 0x87, 0xCC, 0x84, 0xCA, 0x05, 0x6F, + 0xCC, 0x88, 0xCC, 0x84, 0xCA, 0x05, 0x6F, 0xCC, + 0x9B, 0xCC, 0x80, 0xCA, 0x05, 0x6F, 0xCC, 0x9B, + // Bytes 3b00 - 3b3f + 0xCC, 0x81, 0xCA, 0x05, 0x6F, 0xCC, 0x9B, 0xCC, + 0x83, 0xCA, 0x05, 0x6F, 0xCC, 0x9B, 0xCC, 0x89, + 0xCA, 0x05, 0x6F, 0xCC, 0x9B, 0xCC, 0xA3, 0xB6, + 0x05, 0x6F, 0xCC, 0xA3, 0xCC, 0x82, 0xCA, 0x05, + 0x6F, 0xCC, 0xA8, 0xCC, 0x84, 0xCA, 0x05, 0x72, + 0xCC, 0xA3, 0xCC, 0x84, 0xCA, 0x05, 0x73, 0xCC, + 0x81, 0xCC, 0x87, 0xCA, 0x05, 0x73, 0xCC, 0x8C, + 0xCC, 0x87, 0xCA, 0x05, 0x73, 0xCC, 0xA3, 0xCC, + // Bytes 3b40 - 3b7f + 0x87, 0xCA, 0x05, 0x75, 0xCC, 0x83, 0xCC, 0x81, + 0xCA, 0x05, 0x75, 0xCC, 0x84, 0xCC, 0x88, 0xCA, + 0x05, 0x75, 0xCC, 0x88, 0xCC, 0x80, 0xCA, 0x05, + 0x75, 0xCC, 0x88, 0xCC, 0x81, 0xCA, 0x05, 0x75, + 0xCC, 0x88, 0xCC, 0x84, 0xCA, 0x05, 0x75, 0xCC, + 0x88, 0xCC, 0x8C, 0xCA, 0x05, 0x75, 0xCC, 0x9B, + 0xCC, 0x80, 0xCA, 0x05, 0x75, 0xCC, 0x9B, 0xCC, + 0x81, 0xCA, 0x05, 0x75, 0xCC, 0x9B, 0xCC, 0x83, + // Bytes 3b80 - 3bbf + 0xCA, 0x05, 0x75, 0xCC, 0x9B, 0xCC, 0x89, 0xCA, + 0x05, 0x75, 0xCC, 0x9B, 0xCC, 0xA3, 0xB6, 0x05, + 0xE1, 0xBE, 0xBF, 0xCC, 0x80, 0xCA, 0x05, 0xE1, + 0xBE, 0xBF, 0xCC, 0x81, 0xCA, 0x05, 0xE1, 0xBE, + 0xBF, 0xCD, 0x82, 0xCA, 0x05, 0xE1, 0xBF, 0xBE, + 0xCC, 0x80, 0xCA, 0x05, 0xE1, 0xBF, 0xBE, 0xCC, + 0x81, 0xCA, 0x05, 0xE1, 0xBF, 0xBE, 0xCD, 0x82, + 0xCA, 0x05, 0xE2, 0x86, 0x90, 0xCC, 0xB8, 0x05, + // Bytes 3bc0 - 3bff + 0x05, 0xE2, 0x86, 0x92, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x86, 0x94, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x87, 0x90, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x87, + 0x92, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x87, 0x94, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x88, 0x83, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x88, 0x88, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x88, 0x8B, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x88, 0xA3, 0xCC, 0xB8, 0x05, 0x05, + // Bytes 3c00 - 3c3f + 0xE2, 0x88, 0xA5, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x88, 0xBC, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, + 0x83, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0x85, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0x88, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x89, 0x8D, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x89, 0xA1, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x89, 0xA4, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x89, 0xA5, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + // Bytes 3c40 - 3c7f + 0x89, 0xB2, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, + 0xB3, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xB6, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xB7, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xBA, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x89, 0xBB, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x89, 0xBC, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x89, 0xBD, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x8A, 0x82, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, + // Bytes 3c80 - 3cbf + 0x83, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0x86, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0x87, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0x91, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x8A, 0x92, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x8A, 0xA2, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x8A, 0xA8, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x8A, 0xA9, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, + 0xAB, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0xB2, + // Bytes 3cc0 - 3cff + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0xB3, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0xB4, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x8A, 0xB5, 0xCC, 0xB8, 0x05, + 0x06, 0xCE, 0x91, 0xCC, 0x93, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0x91, 0xCC, 0x94, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0x95, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0x95, 0xCC, 0x93, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0x95, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + // Bytes 3d00 - 3d3f + 0x06, 0xCE, 0x95, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0x97, 0xCC, 0x93, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0x97, 0xCC, 0x94, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0x99, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0x99, 0xCC, 0x93, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0x99, 0xCC, 0x93, 0xCD, 0x82, 0xCA, + 0x06, 0xCE, 0x99, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0x99, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + // Bytes 3d40 - 3d7f + 0x06, 0xCE, 0x99, 0xCC, 0x94, 0xCD, 0x82, 0xCA, + 0x06, 0xCE, 0x9F, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0x9F, 0xCC, 0x93, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0x9F, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0x9F, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0xA5, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0xA5, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0xA5, 0xCC, 0x94, 0xCD, 0x82, 0xCA, + // Bytes 3d80 - 3dbf + 0x06, 0xCE, 0xA9, 0xCC, 0x93, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xA9, 0xCC, 0x94, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB1, 0xCC, 0x80, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB1, 0xCC, 0x81, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB1, 0xCC, 0x93, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB1, 0xCC, 0x94, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB1, 0xCD, 0x82, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB5, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + // Bytes 3dc0 - 3dff + 0x06, 0xCE, 0xB5, 0xCC, 0x93, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0xB5, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0xB5, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0xB7, 0xCC, 0x80, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB7, 0xCC, 0x81, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB7, 0xCC, 0x93, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB7, 0xCC, 0x94, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB7, 0xCD, 0x82, 0xCD, 0x85, 0xDA, + // Bytes 3e00 - 3e3f + 0x06, 0xCE, 0xB9, 0xCC, 0x88, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0xB9, 0xCC, 0x88, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0xB9, 0xCC, 0x88, 0xCD, 0x82, 0xCA, + 0x06, 0xCE, 0xB9, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0xB9, 0xCC, 0x93, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0xB9, 0xCC, 0x93, 0xCD, 0x82, 0xCA, + 0x06, 0xCE, 0xB9, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0xB9, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + // Bytes 3e40 - 3e7f + 0x06, 0xCE, 0xB9, 0xCC, 0x94, 0xCD, 0x82, 0xCA, + 0x06, 0xCE, 0xBF, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0xBF, 0xCC, 0x93, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0xBF, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0xBF, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x88, 0xCC, 0x80, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x88, 0xCC, 0x81, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x88, 0xCD, 0x82, 0xCA, + // Bytes 3e80 - 3ebf + 0x06, 0xCF, 0x85, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x93, 0xCC, 0x81, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x93, 0xCD, 0x82, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x94, 0xCD, 0x82, 0xCA, + 0x06, 0xCF, 0x89, 0xCC, 0x80, 0xCD, 0x85, 0xDA, + 0x06, 0xCF, 0x89, 0xCC, 0x81, 0xCD, 0x85, 0xDA, + // Bytes 3ec0 - 3eff + 0x06, 0xCF, 0x89, 0xCC, 0x93, 0xCD, 0x85, 0xDA, + 0x06, 0xCF, 0x89, 0xCC, 0x94, 0xCD, 0x85, 0xDA, + 0x06, 0xCF, 0x89, 0xCD, 0x82, 0xCD, 0x85, 0xDA, + 0x06, 0xE0, 0xA4, 0xA8, 0xE0, 0xA4, 0xBC, 0x09, + 0x06, 0xE0, 0xA4, 0xB0, 0xE0, 0xA4, 0xBC, 0x09, + 0x06, 0xE0, 0xA4, 0xB3, 0xE0, 0xA4, 0xBC, 0x09, + 0x06, 0xE0, 0xB1, 0x86, 0xE0, 0xB1, 0x96, 0x85, + 0x06, 0xE0, 0xB7, 0x99, 0xE0, 0xB7, 0x8A, 0x11, + // Bytes 3f00 - 3f3f + 0x06, 0xE3, 0x81, 0x86, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x8B, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x8D, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x8F, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x91, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x93, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x95, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x97, 0xE3, 0x82, 0x99, 0x0D, + // Bytes 3f40 - 3f7f + 0x06, 0xE3, 0x81, 0x99, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x9B, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x9D, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x9F, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xA1, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xA4, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xA6, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xA8, 0xE3, 0x82, 0x99, 0x0D, + // Bytes 3f80 - 3fbf + 0x06, 0xE3, 0x81, 0xAF, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xAF, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x81, 0xB2, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xB2, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x81, 0xB5, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xB5, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x81, 0xB8, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xB8, 0xE3, 0x82, 0x9A, 0x0D, + // Bytes 3fc0 - 3fff + 0x06, 0xE3, 0x81, 0xBB, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xBB, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x82, 0x9D, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xA6, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xAB, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xAD, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xB1, 0xE3, 0x82, 0x99, 0x0D, + // Bytes 4000 - 403f + 0x06, 0xE3, 0x82, 0xB3, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xB5, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xB7, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xB9, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xBB, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xBD, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xBF, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x81, 0xE3, 0x82, 0x99, 0x0D, + // Bytes 4040 - 407f + 0x06, 0xE3, 0x83, 0x84, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x86, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x88, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x83, 0x95, 0xE3, 0x82, 0x99, 0x0D, + // Bytes 4080 - 40bf + 0x06, 0xE3, 0x83, 0x95, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x83, 0x9B, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x9B, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x83, 0xAF, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0xB0, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0xB1, 0xE3, 0x82, 0x99, 0x0D, + // Bytes 40c0 - 40ff + 0x06, 0xE3, 0x83, 0xB2, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0xBD, 0xE3, 0x82, 0x99, 0x0D, + 0x08, 0xCE, 0x91, 0xCC, 0x93, 0xCC, 0x80, 0xCD, + 0x85, 0xDB, 0x08, 0xCE, 0x91, 0xCC, 0x93, 0xCC, + 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0x91, 0xCC, + 0x93, 0xCD, 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCE, + 0x91, 0xCC, 0x94, 0xCC, 0x80, 0xCD, 0x85, 0xDB, + 0x08, 0xCE, 0x91, 0xCC, 0x94, 0xCC, 0x81, 0xCD, + // Bytes 4100 - 413f + 0x85, 0xDB, 0x08, 0xCE, 0x91, 0xCC, 0x94, 0xCD, + 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0x97, 0xCC, + 0x93, 0xCC, 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCE, + 0x97, 0xCC, 0x93, 0xCC, 0x81, 0xCD, 0x85, 0xDB, + 0x08, 0xCE, 0x97, 0xCC, 0x93, 0xCD, 0x82, 0xCD, + 0x85, 0xDB, 0x08, 0xCE, 0x97, 0xCC, 0x94, 0xCC, + 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0x97, 0xCC, + 0x94, 0xCC, 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCE, + // Bytes 4140 - 417f + 0x97, 0xCC, 0x94, 0xCD, 0x82, 0xCD, 0x85, 0xDB, + 0x08, 0xCE, 0xA9, 0xCC, 0x93, 0xCC, 0x80, 0xCD, + 0x85, 0xDB, 0x08, 0xCE, 0xA9, 0xCC, 0x93, 0xCC, + 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xA9, 0xCC, + 0x93, 0xCD, 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCE, + 0xA9, 0xCC, 0x94, 0xCC, 0x80, 0xCD, 0x85, 0xDB, + 0x08, 0xCE, 0xA9, 0xCC, 0x94, 0xCC, 0x81, 0xCD, + 0x85, 0xDB, 0x08, 0xCE, 0xA9, 0xCC, 0x94, 0xCD, + // Bytes 4180 - 41bf + 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xB1, 0xCC, + 0x93, 0xCC, 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCE, + 0xB1, 0xCC, 0x93, 0xCC, 0x81, 0xCD, 0x85, 0xDB, + 0x08, 0xCE, 0xB1, 0xCC, 0x93, 0xCD, 0x82, 0xCD, + 0x85, 0xDB, 0x08, 0xCE, 0xB1, 0xCC, 0x94, 0xCC, + 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xB1, 0xCC, + 0x94, 0xCC, 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCE, + 0xB1, 0xCC, 0x94, 0xCD, 0x82, 0xCD, 0x85, 0xDB, + // Bytes 41c0 - 41ff + 0x08, 0xCE, 0xB7, 0xCC, 0x93, 0xCC, 0x80, 0xCD, + 0x85, 0xDB, 0x08, 0xCE, 0xB7, 0xCC, 0x93, 0xCC, + 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xB7, 0xCC, + 0x93, 0xCD, 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCE, + 0xB7, 0xCC, 0x94, 0xCC, 0x80, 0xCD, 0x85, 0xDB, + 0x08, 0xCE, 0xB7, 0xCC, 0x94, 0xCC, 0x81, 0xCD, + 0x85, 0xDB, 0x08, 0xCE, 0xB7, 0xCC, 0x94, 0xCD, + 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCF, 0x89, 0xCC, + // Bytes 4200 - 423f + 0x93, 0xCC, 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCF, + 0x89, 0xCC, 0x93, 0xCC, 0x81, 0xCD, 0x85, 0xDB, + 0x08, 0xCF, 0x89, 0xCC, 0x93, 0xCD, 0x82, 0xCD, + 0x85, 0xDB, 0x08, 0xCF, 0x89, 0xCC, 0x94, 0xCC, + 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCF, 0x89, 0xCC, + 0x94, 0xCC, 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCF, + 0x89, 0xCC, 0x94, 0xCD, 0x82, 0xCD, 0x85, 0xDB, + 0x08, 0xF0, 0x91, 0x82, 0x99, 0xF0, 0x91, 0x82, + // Bytes 4240 - 427f + 0xBA, 0x09, 0x08, 0xF0, 0x91, 0x82, 0x9B, 0xF0, + 0x91, 0x82, 0xBA, 0x09, 0x08, 0xF0, 0x91, 0x82, + 0xA5, 0xF0, 0x91, 0x82, 0xBA, 0x09, 0x42, 0xC2, + 0xB4, 0x01, 0x43, 0x20, 0xCC, 0x81, 0xC9, 0x43, + 0x20, 0xCC, 0x83, 0xC9, 0x43, 0x20, 0xCC, 0x84, + 0xC9, 0x43, 0x20, 0xCC, 0x85, 0xC9, 0x43, 0x20, + 0xCC, 0x86, 0xC9, 0x43, 0x20, 0xCC, 0x87, 0xC9, + 0x43, 0x20, 0xCC, 0x88, 0xC9, 0x43, 0x20, 0xCC, + // Bytes 4280 - 42bf + 0x8A, 0xC9, 0x43, 0x20, 0xCC, 0x8B, 0xC9, 0x43, + 0x20, 0xCC, 0x93, 0xC9, 0x43, 0x20, 0xCC, 0x94, + 0xC9, 0x43, 0x20, 0xCC, 0xA7, 0xA5, 0x43, 0x20, + 0xCC, 0xA8, 0xA5, 0x43, 0x20, 0xCC, 0xB3, 0xB5, + 0x43, 0x20, 0xCD, 0x82, 0xC9, 0x43, 0x20, 0xCD, + 0x85, 0xD9, 0x43, 0x20, 0xD9, 0x8B, 0x59, 0x43, + 0x20, 0xD9, 0x8C, 0x5D, 0x43, 0x20, 0xD9, 0x8D, + 0x61, 0x43, 0x20, 0xD9, 0x8E, 0x65, 0x43, 0x20, + // Bytes 42c0 - 42ff + 0xD9, 0x8F, 0x69, 0x43, 0x20, 0xD9, 0x90, 0x6D, + 0x43, 0x20, 0xD9, 0x91, 0x71, 0x43, 0x20, 0xD9, + 0x92, 0x75, 0x43, 0x41, 0xCC, 0x8A, 0xC9, 0x43, + 0x73, 0xCC, 0x87, 0xC9, 0x44, 0x20, 0xE3, 0x82, + 0x99, 0x0D, 0x44, 0x20, 0xE3, 0x82, 0x9A, 0x0D, + 0x44, 0xC2, 0xA8, 0xCC, 0x81, 0xCA, 0x44, 0xCE, + 0x91, 0xCC, 0x81, 0xC9, 0x44, 0xCE, 0x95, 0xCC, + 0x81, 0xC9, 0x44, 0xCE, 0x97, 0xCC, 0x81, 0xC9, + // Bytes 4300 - 433f + 0x44, 0xCE, 0x99, 0xCC, 0x81, 0xC9, 0x44, 0xCE, + 0x9F, 0xCC, 0x81, 0xC9, 0x44, 0xCE, 0xA5, 0xCC, + 0x81, 0xC9, 0x44, 0xCE, 0xA5, 0xCC, 0x88, 0xC9, + 0x44, 0xCE, 0xA9, 0xCC, 0x81, 0xC9, 0x44, 0xCE, + 0xB1, 0xCC, 0x81, 0xC9, 0x44, 0xCE, 0xB5, 0xCC, + 0x81, 0xC9, 0x44, 0xCE, 0xB7, 0xCC, 0x81, 0xC9, + 0x44, 0xCE, 0xB9, 0xCC, 0x81, 0xC9, 0x44, 0xCE, + 0xBF, 0xCC, 0x81, 0xC9, 0x44, 0xCF, 0x85, 0xCC, + // Bytes 4340 - 437f + 0x81, 0xC9, 0x44, 0xCF, 0x89, 0xCC, 0x81, 0xC9, + 0x44, 0xD7, 0x90, 0xD6, 0xB7, 0x31, 0x44, 0xD7, + 0x90, 0xD6, 0xB8, 0x35, 0x44, 0xD7, 0x90, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0x91, 0xD6, 0xBC, 0x41, + 0x44, 0xD7, 0x91, 0xD6, 0xBF, 0x49, 0x44, 0xD7, + 0x92, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x93, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0x94, 0xD6, 0xBC, 0x41, + 0x44, 0xD7, 0x95, 0xD6, 0xB9, 0x39, 0x44, 0xD7, + // Bytes 4380 - 43bf + 0x95, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x96, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0x98, 0xD6, 0xBC, 0x41, + 0x44, 0xD7, 0x99, 0xD6, 0xB4, 0x25, 0x44, 0xD7, + 0x99, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x9A, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0x9B, 0xD6, 0xBC, 0x41, + 0x44, 0xD7, 0x9B, 0xD6, 0xBF, 0x49, 0x44, 0xD7, + 0x9C, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x9E, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0xA0, 0xD6, 0xBC, 0x41, + // Bytes 43c0 - 43ff + 0x44, 0xD7, 0xA1, 0xD6, 0xBC, 0x41, 0x44, 0xD7, + 0xA3, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0xA4, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0xA4, 0xD6, 0xBF, 0x49, + 0x44, 0xD7, 0xA6, 0xD6, 0xBC, 0x41, 0x44, 0xD7, + 0xA7, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0xA8, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0xA9, 0xD6, 0xBC, 0x41, + 0x44, 0xD7, 0xA9, 0xD7, 0x81, 0x4D, 0x44, 0xD7, + 0xA9, 0xD7, 0x82, 0x51, 0x44, 0xD7, 0xAA, 0xD6, + // Bytes 4400 - 443f + 0xBC, 0x41, 0x44, 0xD7, 0xB2, 0xD6, 0xB7, 0x31, + 0x44, 0xD8, 0xA7, 0xD9, 0x8B, 0x59, 0x44, 0xD8, + 0xA7, 0xD9, 0x93, 0xC9, 0x44, 0xD8, 0xA7, 0xD9, + 0x94, 0xC9, 0x44, 0xD8, 0xA7, 0xD9, 0x95, 0xB5, + 0x44, 0xD8, 0xB0, 0xD9, 0xB0, 0x79, 0x44, 0xD8, + 0xB1, 0xD9, 0xB0, 0x79, 0x44, 0xD9, 0x80, 0xD9, + 0x8B, 0x59, 0x44, 0xD9, 0x80, 0xD9, 0x8E, 0x65, + 0x44, 0xD9, 0x80, 0xD9, 0x8F, 0x69, 0x44, 0xD9, + // Bytes 4440 - 447f + 0x80, 0xD9, 0x90, 0x6D, 0x44, 0xD9, 0x80, 0xD9, + 0x91, 0x71, 0x44, 0xD9, 0x80, 0xD9, 0x92, 0x75, + 0x44, 0xD9, 0x87, 0xD9, 0xB0, 0x79, 0x44, 0xD9, + 0x88, 0xD9, 0x94, 0xC9, 0x44, 0xD9, 0x89, 0xD9, + 0xB0, 0x79, 0x44, 0xD9, 0x8A, 0xD9, 0x94, 0xC9, + 0x44, 0xDB, 0x92, 0xD9, 0x94, 0xC9, 0x44, 0xDB, + 0x95, 0xD9, 0x94, 0xC9, 0x45, 0x20, 0xCC, 0x88, + 0xCC, 0x80, 0xCA, 0x45, 0x20, 0xCC, 0x88, 0xCC, + // Bytes 4480 - 44bf + 0x81, 0xCA, 0x45, 0x20, 0xCC, 0x88, 0xCD, 0x82, + 0xCA, 0x45, 0x20, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + 0x45, 0x20, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x45, + 0x20, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x45, 0x20, + 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x45, 0x20, 0xCC, + 0x94, 0xCC, 0x81, 0xCA, 0x45, 0x20, 0xCC, 0x94, + 0xCD, 0x82, 0xCA, 0x45, 0x20, 0xD9, 0x8C, 0xD9, + 0x91, 0x72, 0x45, 0x20, 0xD9, 0x8D, 0xD9, 0x91, + // Bytes 44c0 - 44ff + 0x72, 0x45, 0x20, 0xD9, 0x8E, 0xD9, 0x91, 0x72, + 0x45, 0x20, 0xD9, 0x8F, 0xD9, 0x91, 0x72, 0x45, + 0x20, 0xD9, 0x90, 0xD9, 0x91, 0x72, 0x45, 0x20, + 0xD9, 0x91, 0xD9, 0xB0, 0x7A, 0x45, 0xE2, 0xAB, + 0x9D, 0xCC, 0xB8, 0x05, 0x46, 0xCE, 0xB9, 0xCC, + 0x88, 0xCC, 0x81, 0xCA, 0x46, 0xCF, 0x85, 0xCC, + 0x88, 0xCC, 0x81, 0xCA, 0x46, 0xD7, 0xA9, 0xD6, + 0xBC, 0xD7, 0x81, 0x4E, 0x46, 0xD7, 0xA9, 0xD6, + // Bytes 4500 - 453f + 0xBC, 0xD7, 0x82, 0x52, 0x46, 0xD9, 0x80, 0xD9, + 0x8E, 0xD9, 0x91, 0x72, 0x46, 0xD9, 0x80, 0xD9, + 0x8F, 0xD9, 0x91, 0x72, 0x46, 0xD9, 0x80, 0xD9, + 0x90, 0xD9, 0x91, 0x72, 0x46, 0xE0, 0xA4, 0x95, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0x96, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0x97, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0x9C, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0xA1, + // Bytes 4540 - 457f + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0xA2, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0xAB, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0xAF, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA6, 0xA1, + 0xE0, 0xA6, 0xBC, 0x09, 0x46, 0xE0, 0xA6, 0xA2, + 0xE0, 0xA6, 0xBC, 0x09, 0x46, 0xE0, 0xA6, 0xAF, + 0xE0, 0xA6, 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0x96, + 0xE0, 0xA8, 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0x97, + // Bytes 4580 - 45bf + 0xE0, 0xA8, 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0x9C, + 0xE0, 0xA8, 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0xAB, + 0xE0, 0xA8, 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0xB2, + 0xE0, 0xA8, 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0xB8, + 0xE0, 0xA8, 0xBC, 0x09, 0x46, 0xE0, 0xAC, 0xA1, + 0xE0, 0xAC, 0xBC, 0x09, 0x46, 0xE0, 0xAC, 0xA2, + 0xE0, 0xAC, 0xBC, 0x09, 0x46, 0xE0, 0xBE, 0xB2, + 0xE0, 0xBE, 0x80, 0x9D, 0x46, 0xE0, 0xBE, 0xB3, + // Bytes 45c0 - 45ff + 0xE0, 0xBE, 0x80, 0x9D, 0x46, 0xE3, 0x83, 0x86, + 0xE3, 0x82, 0x99, 0x0D, 0x48, 0xF0, 0x9D, 0x85, + 0x97, 0xF0, 0x9D, 0x85, 0xA5, 0xAD, 0x48, 0xF0, + 0x9D, 0x85, 0x98, 0xF0, 0x9D, 0x85, 0xA5, 0xAD, + 0x48, 0xF0, 0x9D, 0x86, 0xB9, 0xF0, 0x9D, 0x85, + 0xA5, 0xAD, 0x48, 0xF0, 0x9D, 0x86, 0xBA, 0xF0, + 0x9D, 0x85, 0xA5, 0xAD, 0x49, 0xE0, 0xBE, 0xB2, + 0xE0, 0xBD, 0xB1, 0xE0, 0xBE, 0x80, 0x9E, 0x49, + // Bytes 4600 - 463f + 0xE0, 0xBE, 0xB3, 0xE0, 0xBD, 0xB1, 0xE0, 0xBE, + 0x80, 0x9E, 0x4C, 0xF0, 0x9D, 0x85, 0x98, 0xF0, + 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xAE, 0xAE, + 0x4C, 0xF0, 0x9D, 0x85, 0x98, 0xF0, 0x9D, 0x85, + 0xA5, 0xF0, 0x9D, 0x85, 0xAF, 0xAE, 0x4C, 0xF0, + 0x9D, 0x85, 0x98, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, + 0x9D, 0x85, 0xB0, 0xAE, 0x4C, 0xF0, 0x9D, 0x85, + 0x98, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, + // Bytes 4640 - 467f + 0xB1, 0xAE, 0x4C, 0xF0, 0x9D, 0x85, 0x98, 0xF0, + 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xB2, 0xAE, + 0x4C, 0xF0, 0x9D, 0x86, 0xB9, 0xF0, 0x9D, 0x85, + 0xA5, 0xF0, 0x9D, 0x85, 0xAE, 0xAE, 0x4C, 0xF0, + 0x9D, 0x86, 0xB9, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, + 0x9D, 0x85, 0xAF, 0xAE, 0x4C, 0xF0, 0x9D, 0x86, + 0xBA, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, + 0xAE, 0xAE, 0x4C, 0xF0, 0x9D, 0x86, 0xBA, 0xF0, + // Bytes 4680 - 46bf + 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xAF, 0xAE, + 0x83, 0x41, 0xCC, 0x82, 0xC9, 0x83, 0x41, 0xCC, + 0x86, 0xC9, 0x83, 0x41, 0xCC, 0x87, 0xC9, 0x83, + 0x41, 0xCC, 0x88, 0xC9, 0x83, 0x41, 0xCC, 0x8A, + 0xC9, 0x83, 0x41, 0xCC, 0xA3, 0xB5, 0x83, 0x43, + 0xCC, 0xA7, 0xA5, 0x83, 0x45, 0xCC, 0x82, 0xC9, + 0x83, 0x45, 0xCC, 0x84, 0xC9, 0x83, 0x45, 0xCC, + 0xA3, 0xB5, 0x83, 0x45, 0xCC, 0xA7, 0xA5, 0x83, + // Bytes 46c0 - 46ff + 0x49, 0xCC, 0x88, 0xC9, 0x83, 0x4C, 0xCC, 0xA3, + 0xB5, 0x83, 0x4F, 0xCC, 0x82, 0xC9, 0x83, 0x4F, + 0xCC, 0x83, 0xC9, 0x83, 0x4F, 0xCC, 0x84, 0xC9, + 0x83, 0x4F, 0xCC, 0x87, 0xC9, 0x83, 0x4F, 0xCC, + 0x88, 0xC9, 0x83, 0x4F, 0xCC, 0x9B, 0xAD, 0x83, + 0x4F, 0xCC, 0xA3, 0xB5, 0x83, 0x4F, 0xCC, 0xA8, + 0xA5, 0x83, 0x52, 0xCC, 0xA3, 0xB5, 0x83, 0x53, + 0xCC, 0x81, 0xC9, 0x83, 0x53, 0xCC, 0x8C, 0xC9, + // Bytes 4700 - 473f + 0x83, 0x53, 0xCC, 0xA3, 0xB5, 0x83, 0x55, 0xCC, + 0x83, 0xC9, 0x83, 0x55, 0xCC, 0x84, 0xC9, 0x83, + 0x55, 0xCC, 0x88, 0xC9, 0x83, 0x55, 0xCC, 0x9B, + 0xAD, 0x83, 0x61, 0xCC, 0x82, 0xC9, 0x83, 0x61, + 0xCC, 0x86, 0xC9, 0x83, 0x61, 0xCC, 0x87, 0xC9, + 0x83, 0x61, 0xCC, 0x88, 0xC9, 0x83, 0x61, 0xCC, + 0x8A, 0xC9, 0x83, 0x61, 0xCC, 0xA3, 0xB5, 0x83, + 0x63, 0xCC, 0xA7, 0xA5, 0x83, 0x65, 0xCC, 0x82, + // Bytes 4740 - 477f + 0xC9, 0x83, 0x65, 0xCC, 0x84, 0xC9, 0x83, 0x65, + 0xCC, 0xA3, 0xB5, 0x83, 0x65, 0xCC, 0xA7, 0xA5, + 0x83, 0x69, 0xCC, 0x88, 0xC9, 0x83, 0x6C, 0xCC, + 0xA3, 0xB5, 0x83, 0x6F, 0xCC, 0x82, 0xC9, 0x83, + 0x6F, 0xCC, 0x83, 0xC9, 0x83, 0x6F, 0xCC, 0x84, + 0xC9, 0x83, 0x6F, 0xCC, 0x87, 0xC9, 0x83, 0x6F, + 0xCC, 0x88, 0xC9, 0x83, 0x6F, 0xCC, 0x9B, 0xAD, + 0x83, 0x6F, 0xCC, 0xA3, 0xB5, 0x83, 0x6F, 0xCC, + // Bytes 4780 - 47bf + 0xA8, 0xA5, 0x83, 0x72, 0xCC, 0xA3, 0xB5, 0x83, + 0x73, 0xCC, 0x81, 0xC9, 0x83, 0x73, 0xCC, 0x8C, + 0xC9, 0x83, 0x73, 0xCC, 0xA3, 0xB5, 0x83, 0x75, + 0xCC, 0x83, 0xC9, 0x83, 0x75, 0xCC, 0x84, 0xC9, + 0x83, 0x75, 0xCC, 0x88, 0xC9, 0x83, 0x75, 0xCC, + 0x9B, 0xAD, 0x84, 0xCE, 0x91, 0xCC, 0x93, 0xC9, + 0x84, 0xCE, 0x91, 0xCC, 0x94, 0xC9, 0x84, 0xCE, + 0x95, 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0x95, 0xCC, + // Bytes 47c0 - 47ff + 0x94, 0xC9, 0x84, 0xCE, 0x97, 0xCC, 0x93, 0xC9, + 0x84, 0xCE, 0x97, 0xCC, 0x94, 0xC9, 0x84, 0xCE, + 0x99, 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0x99, 0xCC, + 0x94, 0xC9, 0x84, 0xCE, 0x9F, 0xCC, 0x93, 0xC9, + 0x84, 0xCE, 0x9F, 0xCC, 0x94, 0xC9, 0x84, 0xCE, + 0xA5, 0xCC, 0x94, 0xC9, 0x84, 0xCE, 0xA9, 0xCC, + 0x93, 0xC9, 0x84, 0xCE, 0xA9, 0xCC, 0x94, 0xC9, + 0x84, 0xCE, 0xB1, 0xCC, 0x80, 0xC9, 0x84, 0xCE, + // Bytes 4800 - 483f + 0xB1, 0xCC, 0x81, 0xC9, 0x84, 0xCE, 0xB1, 0xCC, + 0x93, 0xC9, 0x84, 0xCE, 0xB1, 0xCC, 0x94, 0xC9, + 0x84, 0xCE, 0xB1, 0xCD, 0x82, 0xC9, 0x84, 0xCE, + 0xB5, 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0xB5, 0xCC, + 0x94, 0xC9, 0x84, 0xCE, 0xB7, 0xCC, 0x80, 0xC9, + 0x84, 0xCE, 0xB7, 0xCC, 0x81, 0xC9, 0x84, 0xCE, + 0xB7, 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0xB7, 0xCC, + 0x94, 0xC9, 0x84, 0xCE, 0xB7, 0xCD, 0x82, 0xC9, + // Bytes 4840 - 487f + 0x84, 0xCE, 0xB9, 0xCC, 0x88, 0xC9, 0x84, 0xCE, + 0xB9, 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0xB9, 0xCC, + 0x94, 0xC9, 0x84, 0xCE, 0xBF, 0xCC, 0x93, 0xC9, + 0x84, 0xCE, 0xBF, 0xCC, 0x94, 0xC9, 0x84, 0xCF, + 0x85, 0xCC, 0x88, 0xC9, 0x84, 0xCF, 0x85, 0xCC, + 0x93, 0xC9, 0x84, 0xCF, 0x85, 0xCC, 0x94, 0xC9, + 0x84, 0xCF, 0x89, 0xCC, 0x80, 0xC9, 0x84, 0xCF, + 0x89, 0xCC, 0x81, 0xC9, 0x84, 0xCF, 0x89, 0xCC, + // Bytes 4880 - 48bf + 0x93, 0xC9, 0x84, 0xCF, 0x89, 0xCC, 0x94, 0xC9, + 0x84, 0xCF, 0x89, 0xCD, 0x82, 0xC9, 0x86, 0xCE, + 0x91, 0xCC, 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0x91, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0x91, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0x91, 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0x91, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0x91, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + // Bytes 48c0 - 48ff + 0x97, 0xCC, 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0x97, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0x97, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0x97, 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0x97, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0x97, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0xA9, 0xCC, 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0xA9, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + // Bytes 4900 - 493f + 0xA9, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0xA9, 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0xA9, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0xA9, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0xB1, 0xCC, 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0xB1, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0xB1, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0xB1, 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + // Bytes 4940 - 497f + 0xB1, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0xB1, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0xB7, 0xCC, 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0xB7, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0xB7, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0xB7, 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0xB7, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0xB7, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x86, 0xCF, + // Bytes 4980 - 49bf + 0x89, 0xCC, 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCF, + 0x89, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCF, + 0x89, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCF, + 0x89, 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCF, + 0x89, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCF, + 0x89, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x42, 0xCC, + 0x80, 0xC9, 0x32, 0x42, 0xCC, 0x81, 0xC9, 0x32, + 0x42, 0xCC, 0x93, 0xC9, 0x32, 0x43, 0xE1, 0x85, + // Bytes 49c0 - 49ff + 0xA1, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xA2, 0x01, + 0x00, 0x43, 0xE1, 0x85, 0xA3, 0x01, 0x00, 0x43, + 0xE1, 0x85, 0xA4, 0x01, 0x00, 0x43, 0xE1, 0x85, + 0xA5, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xA6, 0x01, + 0x00, 0x43, 0xE1, 0x85, 0xA7, 0x01, 0x00, 0x43, + 0xE1, 0x85, 0xA8, 0x01, 0x00, 0x43, 0xE1, 0x85, + 0xA9, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xAA, 0x01, + 0x00, 0x43, 0xE1, 0x85, 0xAB, 0x01, 0x00, 0x43, + // Bytes 4a00 - 4a3f + 0xE1, 0x85, 0xAC, 0x01, 0x00, 0x43, 0xE1, 0x85, + 0xAD, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xAE, 0x01, + 0x00, 0x43, 0xE1, 0x85, 0xAF, 0x01, 0x00, 0x43, + 0xE1, 0x85, 0xB0, 0x01, 0x00, 0x43, 0xE1, 0x85, + 0xB1, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xB2, 0x01, + 0x00, 0x43, 0xE1, 0x85, 0xB3, 0x01, 0x00, 0x43, + 0xE1, 0x85, 0xB4, 0x01, 0x00, 0x43, 0xE1, 0x85, + 0xB5, 0x01, 0x00, 0x43, 0xE1, 0x86, 0xAA, 0x01, + // Bytes 4a40 - 4a7f + 0x00, 0x43, 0xE1, 0x86, 0xAC, 0x01, 0x00, 0x43, + 0xE1, 0x86, 0xAD, 0x01, 0x00, 0x43, 0xE1, 0x86, + 0xB0, 0x01, 0x00, 0x43, 0xE1, 0x86, 0xB1, 0x01, + 0x00, 0x43, 0xE1, 0x86, 0xB2, 0x01, 0x00, 0x43, + 0xE1, 0x86, 0xB3, 0x01, 0x00, 0x43, 0xE1, 0x86, + 0xB4, 0x01, 0x00, 0x43, 0xE1, 0x86, 0xB5, 0x01, + 0x00, 0x44, 0xCC, 0x88, 0xCC, 0x81, 0xCA, 0x32, + 0x43, 0xE3, 0x82, 0x99, 0x0D, 0x03, 0x43, 0xE3, + // Bytes 4a80 - 4abf + 0x82, 0x9A, 0x0D, 0x03, 0x46, 0xE0, 0xBD, 0xB1, + 0xE0, 0xBD, 0xB2, 0x9E, 0x26, 0x46, 0xE0, 0xBD, + 0xB1, 0xE0, 0xBD, 0xB4, 0xA2, 0x26, 0x46, 0xE0, + 0xBD, 0xB1, 0xE0, 0xBE, 0x80, 0x9E, 0x26, 0x00, + 0x01, +} + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfcTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfcTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfcValues[c0] + } + i := nfcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfcTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfcTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfcValues[c0] + } + i := nfcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// nfcTrie. Total size: 10586 bytes (10.34 KiB). Checksum: dd926e82067bee11. +type nfcTrie struct{} + +func newNfcTrie(i int) *nfcTrie { + return &nfcTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *nfcTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 46: + return uint16(nfcValues[n<<6+uint32(b)]) + default: + n -= 46 + return uint16(nfcSparse.lookup(n, b)) + } +} + +// nfcValues: 48 blocks, 3072 entries, 6144 bytes +// The third block is the zero block. +var nfcValues = [3072]uint16{ + // Block 0x0, offset 0x0 + 0x3c: 0xa000, 0x3d: 0xa000, 0x3e: 0xa000, + // Block 0x1, offset 0x40 + 0x41: 0xa000, 0x42: 0xa000, 0x43: 0xa000, 0x44: 0xa000, 0x45: 0xa000, + 0x46: 0xa000, 0x47: 0xa000, 0x48: 0xa000, 0x49: 0xa000, 0x4a: 0xa000, 0x4b: 0xa000, + 0x4c: 0xa000, 0x4d: 0xa000, 0x4e: 0xa000, 0x4f: 0xa000, 0x50: 0xa000, + 0x52: 0xa000, 0x53: 0xa000, 0x54: 0xa000, 0x55: 0xa000, 0x56: 0xa000, 0x57: 0xa000, + 0x58: 0xa000, 0x59: 0xa000, 0x5a: 0xa000, + 0x61: 0xa000, 0x62: 0xa000, 0x63: 0xa000, + 0x64: 0xa000, 0x65: 0xa000, 0x66: 0xa000, 0x67: 0xa000, 0x68: 0xa000, 0x69: 0xa000, + 0x6a: 0xa000, 0x6b: 0xa000, 0x6c: 0xa000, 0x6d: 0xa000, 0x6e: 0xa000, 0x6f: 0xa000, + 0x70: 0xa000, 0x72: 0xa000, 0x73: 0xa000, 0x74: 0xa000, 0x75: 0xa000, + 0x76: 0xa000, 0x77: 0xa000, 0x78: 0xa000, 0x79: 0xa000, 0x7a: 0xa000, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x2f6f, 0xc1: 0x2f74, 0xc2: 0x4688, 0xc3: 0x2f79, 0xc4: 0x4697, 0xc5: 0x469c, + 0xc6: 0xa000, 0xc7: 0x46a6, 0xc8: 0x2fe2, 0xc9: 0x2fe7, 0xca: 0x46ab, 0xcb: 0x2ffb, + 0xcc: 0x306e, 0xcd: 0x3073, 0xce: 0x3078, 0xcf: 0x46bf, 0xd1: 0x3104, + 0xd2: 0x3127, 0xd3: 0x312c, 0xd4: 0x46c9, 0xd5: 0x46ce, 0xd6: 0x46dd, + 0xd8: 0xa000, 0xd9: 0x31b3, 0xda: 0x31b8, 0xdb: 0x31bd, 0xdc: 0x470f, 0xdd: 0x3235, + 0xe0: 0x327b, 0xe1: 0x3280, 0xe2: 0x4719, 0xe3: 0x3285, + 0xe4: 0x4728, 0xe5: 0x472d, 0xe6: 0xa000, 0xe7: 0x4737, 0xe8: 0x32ee, 0xe9: 0x32f3, + 0xea: 0x473c, 0xeb: 0x3307, 0xec: 0x337f, 0xed: 0x3384, 0xee: 0x3389, 0xef: 0x4750, + 0xf1: 0x3415, 0xf2: 0x3438, 0xf3: 0x343d, 0xf4: 0x475a, 0xf5: 0x475f, + 0xf6: 0x476e, 0xf8: 0xa000, 0xf9: 0x34c9, 0xfa: 0x34ce, 0xfb: 0x34d3, + 0xfc: 0x47a0, 0xfd: 0x3550, 0xff: 0x3569, + // Block 0x4, offset 0x100 + 0x100: 0x2f7e, 0x101: 0x328a, 0x102: 0x468d, 0x103: 0x471e, 0x104: 0x2f9c, 0x105: 0x32a8, + 0x106: 0x2fb0, 0x107: 0x32bc, 0x108: 0x2fb5, 0x109: 0x32c1, 0x10a: 0x2fba, 0x10b: 0x32c6, + 0x10c: 0x2fbf, 0x10d: 0x32cb, 0x10e: 0x2fc9, 0x10f: 0x32d5, + 0x112: 0x46b0, 0x113: 0x4741, 0x114: 0x2ff1, 0x115: 0x32fd, 0x116: 0x2ff6, 0x117: 0x3302, + 0x118: 0x3014, 0x119: 0x3320, 0x11a: 0x3005, 0x11b: 0x3311, 0x11c: 0x302d, 0x11d: 0x3339, + 0x11e: 0x3037, 0x11f: 0x3343, 0x120: 0x303c, 0x121: 0x3348, 0x122: 0x3046, 0x123: 0x3352, + 0x124: 0x304b, 0x125: 0x3357, 0x128: 0x307d, 0x129: 0x338e, + 0x12a: 0x3082, 0x12b: 0x3393, 0x12c: 0x3087, 0x12d: 0x3398, 0x12e: 0x30aa, 0x12f: 0x33b6, + 0x130: 0x308c, 0x134: 0x30b4, 0x135: 0x33c0, + 0x136: 0x30c8, 0x137: 0x33d9, 0x139: 0x30d2, 0x13a: 0x33e3, 0x13b: 0x30dc, + 0x13c: 0x33ed, 0x13d: 0x30d7, 0x13e: 0x33e8, + // Block 0x5, offset 0x140 + 0x143: 0x30ff, 0x144: 0x3410, 0x145: 0x3118, + 0x146: 0x3429, 0x147: 0x310e, 0x148: 0x341f, + 0x14c: 0x46d3, 0x14d: 0x4764, 0x14e: 0x3131, 0x14f: 0x3442, 0x150: 0x313b, 0x151: 0x344c, + 0x154: 0x3159, 0x155: 0x346a, 0x156: 0x3172, 0x157: 0x3483, + 0x158: 0x3163, 0x159: 0x3474, 0x15a: 0x46f6, 0x15b: 0x4787, 0x15c: 0x317c, 0x15d: 0x348d, + 0x15e: 0x318b, 0x15f: 0x349c, 0x160: 0x46fb, 0x161: 0x478c, 0x162: 0x31a4, 0x163: 0x34ba, + 0x164: 0x3195, 0x165: 0x34ab, 0x168: 0x4705, 0x169: 0x4796, + 0x16a: 0x470a, 0x16b: 0x479b, 0x16c: 0x31c2, 0x16d: 0x34d8, 0x16e: 0x31cc, 0x16f: 0x34e2, + 0x170: 0x31d1, 0x171: 0x34e7, 0x172: 0x31ef, 0x173: 0x3505, 0x174: 0x3212, 0x175: 0x3528, + 0x176: 0x323a, 0x177: 0x3555, 0x178: 0x324e, 0x179: 0x325d, 0x17a: 0x357d, 0x17b: 0x3267, + 0x17c: 0x3587, 0x17d: 0x326c, 0x17e: 0x358c, 0x17f: 0xa000, + // Block 0x6, offset 0x180 + 0x184: 0x8100, 0x185: 0x8100, + 0x186: 0x8100, + 0x18d: 0x2f88, 0x18e: 0x3294, 0x18f: 0x3096, 0x190: 0x33a2, 0x191: 0x3140, + 0x192: 0x3451, 0x193: 0x31d6, 0x194: 0x34ec, 0x195: 0x39cf, 0x196: 0x3b5e, 0x197: 0x39c8, + 0x198: 0x3b57, 0x199: 0x39d6, 0x19a: 0x3b65, 0x19b: 0x39c1, 0x19c: 0x3b50, + 0x19e: 0x38b0, 0x19f: 0x3a3f, 0x1a0: 0x38a9, 0x1a1: 0x3a38, 0x1a2: 0x35b3, 0x1a3: 0x35c5, + 0x1a6: 0x3041, 0x1a7: 0x334d, 0x1a8: 0x30be, 0x1a9: 0x33cf, + 0x1aa: 0x46ec, 0x1ab: 0x477d, 0x1ac: 0x3990, 0x1ad: 0x3b1f, 0x1ae: 0x35d7, 0x1af: 0x35dd, + 0x1b0: 0x33c5, 0x1b4: 0x3028, 0x1b5: 0x3334, + 0x1b8: 0x30fa, 0x1b9: 0x340b, 0x1ba: 0x38b7, 0x1bb: 0x3a46, + 0x1bc: 0x35ad, 0x1bd: 0x35bf, 0x1be: 0x35b9, 0x1bf: 0x35cb, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x2f8d, 0x1c1: 0x3299, 0x1c2: 0x2f92, 0x1c3: 0x329e, 0x1c4: 0x300a, 0x1c5: 0x3316, + 0x1c6: 0x300f, 0x1c7: 0x331b, 0x1c8: 0x309b, 0x1c9: 0x33a7, 0x1ca: 0x30a0, 0x1cb: 0x33ac, + 0x1cc: 0x3145, 0x1cd: 0x3456, 0x1ce: 0x314a, 0x1cf: 0x345b, 0x1d0: 0x3168, 0x1d1: 0x3479, + 0x1d2: 0x316d, 0x1d3: 0x347e, 0x1d4: 0x31db, 0x1d5: 0x34f1, 0x1d6: 0x31e0, 0x1d7: 0x34f6, + 0x1d8: 0x3186, 0x1d9: 0x3497, 0x1da: 0x319f, 0x1db: 0x34b5, + 0x1de: 0x305a, 0x1df: 0x3366, + 0x1e6: 0x4692, 0x1e7: 0x4723, 0x1e8: 0x46ba, 0x1e9: 0x474b, + 0x1ea: 0x395f, 0x1eb: 0x3aee, 0x1ec: 0x393c, 0x1ed: 0x3acb, 0x1ee: 0x46d8, 0x1ef: 0x4769, + 0x1f0: 0x3958, 0x1f1: 0x3ae7, 0x1f2: 0x3244, 0x1f3: 0x355f, + // Block 0x8, offset 0x200 + 0x200: 0x9932, 0x201: 0x9932, 0x202: 0x9932, 0x203: 0x9932, 0x204: 0x9932, 0x205: 0x8132, + 0x206: 0x9932, 0x207: 0x9932, 0x208: 0x9932, 0x209: 0x9932, 0x20a: 0x9932, 0x20b: 0x9932, + 0x20c: 0x9932, 0x20d: 0x8132, 0x20e: 0x8132, 0x20f: 0x9932, 0x210: 0x8132, 0x211: 0x9932, + 0x212: 0x8132, 0x213: 0x9932, 0x214: 0x9932, 0x215: 0x8133, 0x216: 0x812d, 0x217: 0x812d, + 0x218: 0x812d, 0x219: 0x812d, 0x21a: 0x8133, 0x21b: 0x992b, 0x21c: 0x812d, 0x21d: 0x812d, + 0x21e: 0x812d, 0x21f: 0x812d, 0x220: 0x812d, 0x221: 0x8129, 0x222: 0x8129, 0x223: 0x992d, + 0x224: 0x992d, 0x225: 0x992d, 0x226: 0x992d, 0x227: 0x9929, 0x228: 0x9929, 0x229: 0x812d, + 0x22a: 0x812d, 0x22b: 0x812d, 0x22c: 0x812d, 0x22d: 0x992d, 0x22e: 0x992d, 0x22f: 0x812d, + 0x230: 0x992d, 0x231: 0x992d, 0x232: 0x812d, 0x233: 0x812d, 0x234: 0x8101, 0x235: 0x8101, + 0x236: 0x8101, 0x237: 0x8101, 0x238: 0x9901, 0x239: 0x812d, 0x23a: 0x812d, 0x23b: 0x812d, + 0x23c: 0x812d, 0x23d: 0x8132, 0x23e: 0x8132, 0x23f: 0x8132, + // Block 0x9, offset 0x240 + 0x240: 0x49ae, 0x241: 0x49b3, 0x242: 0x9932, 0x243: 0x49b8, 0x244: 0x4a71, 0x245: 0x9936, + 0x246: 0x8132, 0x247: 0x812d, 0x248: 0x812d, 0x249: 0x812d, 0x24a: 0x8132, 0x24b: 0x8132, + 0x24c: 0x8132, 0x24d: 0x812d, 0x24e: 0x812d, 0x250: 0x8132, 0x251: 0x8132, + 0x252: 0x8132, 0x253: 0x812d, 0x254: 0x812d, 0x255: 0x812d, 0x256: 0x812d, 0x257: 0x8132, + 0x258: 0x8133, 0x259: 0x812d, 0x25a: 0x812d, 0x25b: 0x8132, 0x25c: 0x8134, 0x25d: 0x8135, + 0x25e: 0x8135, 0x25f: 0x8134, 0x260: 0x8135, 0x261: 0x8135, 0x262: 0x8134, 0x263: 0x8132, + 0x264: 0x8132, 0x265: 0x8132, 0x266: 0x8132, 0x267: 0x8132, 0x268: 0x8132, 0x269: 0x8132, + 0x26a: 0x8132, 0x26b: 0x8132, 0x26c: 0x8132, 0x26d: 0x8132, 0x26e: 0x8132, 0x26f: 0x8132, + 0x274: 0x0170, + 0x27a: 0x8100, + 0x27e: 0x0037, + // Block 0xa, offset 0x280 + 0x284: 0x8100, 0x285: 0x35a1, + 0x286: 0x35e9, 0x287: 0x00ce, 0x288: 0x3607, 0x289: 0x3613, 0x28a: 0x3625, + 0x28c: 0x3643, 0x28e: 0x3655, 0x28f: 0x3673, 0x290: 0x3e08, 0x291: 0xa000, + 0x295: 0xa000, 0x297: 0xa000, + 0x299: 0xa000, + 0x29f: 0xa000, 0x2a1: 0xa000, + 0x2a5: 0xa000, 0x2a9: 0xa000, + 0x2aa: 0x3637, 0x2ab: 0x3667, 0x2ac: 0x47fe, 0x2ad: 0x3697, 0x2ae: 0x4828, 0x2af: 0x36a9, + 0x2b0: 0x3e70, 0x2b1: 0xa000, 0x2b5: 0xa000, + 0x2b7: 0xa000, 0x2b9: 0xa000, + 0x2bf: 0xa000, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x3721, 0x2c1: 0x372d, 0x2c3: 0x371b, + 0x2c6: 0xa000, 0x2c7: 0x3709, + 0x2cc: 0x375d, 0x2cd: 0x3745, 0x2ce: 0x376f, 0x2d0: 0xa000, + 0x2d3: 0xa000, 0x2d5: 0xa000, 0x2d6: 0xa000, 0x2d7: 0xa000, + 0x2d8: 0xa000, 0x2d9: 0x3751, 0x2da: 0xa000, + 0x2de: 0xa000, 0x2e3: 0xa000, + 0x2e7: 0xa000, + 0x2eb: 0xa000, 0x2ed: 0xa000, + 0x2f0: 0xa000, 0x2f3: 0xa000, 0x2f5: 0xa000, + 0x2f6: 0xa000, 0x2f7: 0xa000, 0x2f8: 0xa000, 0x2f9: 0x37d5, 0x2fa: 0xa000, + 0x2fe: 0xa000, + // Block 0xc, offset 0x300 + 0x301: 0x3733, 0x302: 0x37b7, + 0x310: 0x370f, 0x311: 0x3793, + 0x312: 0x3715, 0x313: 0x3799, 0x316: 0x3727, 0x317: 0x37ab, + 0x318: 0xa000, 0x319: 0xa000, 0x31a: 0x3829, 0x31b: 0x382f, 0x31c: 0x3739, 0x31d: 0x37bd, + 0x31e: 0x373f, 0x31f: 0x37c3, 0x322: 0x374b, 0x323: 0x37cf, + 0x324: 0x3757, 0x325: 0x37db, 0x326: 0x3763, 0x327: 0x37e7, 0x328: 0xa000, 0x329: 0xa000, + 0x32a: 0x3835, 0x32b: 0x383b, 0x32c: 0x378d, 0x32d: 0x3811, 0x32e: 0x3769, 0x32f: 0x37ed, + 0x330: 0x3775, 0x331: 0x37f9, 0x332: 0x377b, 0x333: 0x37ff, 0x334: 0x3781, 0x335: 0x3805, + 0x338: 0x3787, 0x339: 0x380b, + // Block 0xd, offset 0x340 + 0x351: 0x812d, + 0x352: 0x8132, 0x353: 0x8132, 0x354: 0x8132, 0x355: 0x8132, 0x356: 0x812d, 0x357: 0x8132, + 0x358: 0x8132, 0x359: 0x8132, 0x35a: 0x812e, 0x35b: 0x812d, 0x35c: 0x8132, 0x35d: 0x8132, + 0x35e: 0x8132, 0x35f: 0x8132, 0x360: 0x8132, 0x361: 0x8132, 0x362: 0x812d, 0x363: 0x812d, + 0x364: 0x812d, 0x365: 0x812d, 0x366: 0x812d, 0x367: 0x812d, 0x368: 0x8132, 0x369: 0x8132, + 0x36a: 0x812d, 0x36b: 0x8132, 0x36c: 0x8132, 0x36d: 0x812e, 0x36e: 0x8131, 0x36f: 0x8132, + 0x370: 0x8105, 0x371: 0x8106, 0x372: 0x8107, 0x373: 0x8108, 0x374: 0x8109, 0x375: 0x810a, + 0x376: 0x810b, 0x377: 0x810c, 0x378: 0x810d, 0x379: 0x810e, 0x37a: 0x810e, 0x37b: 0x810f, + 0x37c: 0x8110, 0x37d: 0x8111, 0x37f: 0x8112, + // Block 0xe, offset 0x380 + 0x388: 0xa000, 0x38a: 0xa000, 0x38b: 0x8116, + 0x38c: 0x8117, 0x38d: 0x8118, 0x38e: 0x8119, 0x38f: 0x811a, 0x390: 0x811b, 0x391: 0x811c, + 0x392: 0x811d, 0x393: 0x9932, 0x394: 0x9932, 0x395: 0x992d, 0x396: 0x812d, 0x397: 0x8132, + 0x398: 0x8132, 0x399: 0x8132, 0x39a: 0x8132, 0x39b: 0x8132, 0x39c: 0x812d, 0x39d: 0x8132, + 0x39e: 0x8132, 0x39f: 0x812d, + 0x3b0: 0x811e, + // Block 0xf, offset 0x3c0 + 0x3d3: 0x812d, 0x3d4: 0x8132, 0x3d5: 0x8132, 0x3d6: 0x8132, 0x3d7: 0x8132, + 0x3d8: 0x8132, 0x3d9: 0x8132, 0x3da: 0x8132, 0x3db: 0x8132, 0x3dc: 0x8132, 0x3dd: 0x8132, + 0x3de: 0x8132, 0x3df: 0x8132, 0x3e0: 0x8132, 0x3e1: 0x8132, 0x3e3: 0x812d, + 0x3e4: 0x8132, 0x3e5: 0x8132, 0x3e6: 0x812d, 0x3e7: 0x8132, 0x3e8: 0x8132, 0x3e9: 0x812d, + 0x3ea: 0x8132, 0x3eb: 0x8132, 0x3ec: 0x8132, 0x3ed: 0x812d, 0x3ee: 0x812d, 0x3ef: 0x812d, + 0x3f0: 0x8116, 0x3f1: 0x8117, 0x3f2: 0x8118, 0x3f3: 0x8132, 0x3f4: 0x8132, 0x3f5: 0x8132, + 0x3f6: 0x812d, 0x3f7: 0x8132, 0x3f8: 0x8132, 0x3f9: 0x812d, 0x3fa: 0x812d, 0x3fb: 0x8132, + 0x3fc: 0x8132, 0x3fd: 0x8132, 0x3fe: 0x8132, 0x3ff: 0x8132, + // Block 0x10, offset 0x400 + 0x405: 0xa000, + 0x406: 0x2d26, 0x407: 0xa000, 0x408: 0x2d2e, 0x409: 0xa000, 0x40a: 0x2d36, 0x40b: 0xa000, + 0x40c: 0x2d3e, 0x40d: 0xa000, 0x40e: 0x2d46, 0x411: 0xa000, + 0x412: 0x2d4e, + 0x434: 0x8102, 0x435: 0x9900, + 0x43a: 0xa000, 0x43b: 0x2d56, + 0x43c: 0xa000, 0x43d: 0x2d5e, 0x43e: 0xa000, 0x43f: 0xa000, + // Block 0x11, offset 0x440 + 0x440: 0x8132, 0x441: 0x8132, 0x442: 0x812d, 0x443: 0x8132, 0x444: 0x8132, 0x445: 0x8132, + 0x446: 0x8132, 0x447: 0x8132, 0x448: 0x8132, 0x449: 0x8132, 0x44a: 0x812d, 0x44b: 0x8132, + 0x44c: 0x8132, 0x44d: 0x8135, 0x44e: 0x812a, 0x44f: 0x812d, 0x450: 0x8129, 0x451: 0x8132, + 0x452: 0x8132, 0x453: 0x8132, 0x454: 0x8132, 0x455: 0x8132, 0x456: 0x8132, 0x457: 0x8132, + 0x458: 0x8132, 0x459: 0x8132, 0x45a: 0x8132, 0x45b: 0x8132, 0x45c: 0x8132, 0x45d: 0x8132, + 0x45e: 0x8132, 0x45f: 0x8132, 0x460: 0x8132, 0x461: 0x8132, 0x462: 0x8132, 0x463: 0x8132, + 0x464: 0x8132, 0x465: 0x8132, 0x466: 0x8132, 0x467: 0x8132, 0x468: 0x8132, 0x469: 0x8132, + 0x46a: 0x8132, 0x46b: 0x8132, 0x46c: 0x8132, 0x46d: 0x8132, 0x46e: 0x8132, 0x46f: 0x8132, + 0x470: 0x8132, 0x471: 0x8132, 0x472: 0x8132, 0x473: 0x8132, 0x474: 0x8132, 0x475: 0x8132, + 0x476: 0x8133, 0x477: 0x8131, 0x478: 0x8131, 0x479: 0x812d, 0x47b: 0x8132, + 0x47c: 0x8134, 0x47d: 0x812d, 0x47e: 0x8132, 0x47f: 0x812d, + // Block 0x12, offset 0x480 + 0x480: 0x2f97, 0x481: 0x32a3, 0x482: 0x2fa1, 0x483: 0x32ad, 0x484: 0x2fa6, 0x485: 0x32b2, + 0x486: 0x2fab, 0x487: 0x32b7, 0x488: 0x38cc, 0x489: 0x3a5b, 0x48a: 0x2fc4, 0x48b: 0x32d0, + 0x48c: 0x2fce, 0x48d: 0x32da, 0x48e: 0x2fdd, 0x48f: 0x32e9, 0x490: 0x2fd3, 0x491: 0x32df, + 0x492: 0x2fd8, 0x493: 0x32e4, 0x494: 0x38ef, 0x495: 0x3a7e, 0x496: 0x38f6, 0x497: 0x3a85, + 0x498: 0x3019, 0x499: 0x3325, 0x49a: 0x301e, 0x49b: 0x332a, 0x49c: 0x3904, 0x49d: 0x3a93, + 0x49e: 0x3023, 0x49f: 0x332f, 0x4a0: 0x3032, 0x4a1: 0x333e, 0x4a2: 0x3050, 0x4a3: 0x335c, + 0x4a4: 0x305f, 0x4a5: 0x336b, 0x4a6: 0x3055, 0x4a7: 0x3361, 0x4a8: 0x3064, 0x4a9: 0x3370, + 0x4aa: 0x3069, 0x4ab: 0x3375, 0x4ac: 0x30af, 0x4ad: 0x33bb, 0x4ae: 0x390b, 0x4af: 0x3a9a, + 0x4b0: 0x30b9, 0x4b1: 0x33ca, 0x4b2: 0x30c3, 0x4b3: 0x33d4, 0x4b4: 0x30cd, 0x4b5: 0x33de, + 0x4b6: 0x46c4, 0x4b7: 0x4755, 0x4b8: 0x3912, 0x4b9: 0x3aa1, 0x4ba: 0x30e6, 0x4bb: 0x33f7, + 0x4bc: 0x30e1, 0x4bd: 0x33f2, 0x4be: 0x30eb, 0x4bf: 0x33fc, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x30f0, 0x4c1: 0x3401, 0x4c2: 0x30f5, 0x4c3: 0x3406, 0x4c4: 0x3109, 0x4c5: 0x341a, + 0x4c6: 0x3113, 0x4c7: 0x3424, 0x4c8: 0x3122, 0x4c9: 0x3433, 0x4ca: 0x311d, 0x4cb: 0x342e, + 0x4cc: 0x3935, 0x4cd: 0x3ac4, 0x4ce: 0x3943, 0x4cf: 0x3ad2, 0x4d0: 0x394a, 0x4d1: 0x3ad9, + 0x4d2: 0x3951, 0x4d3: 0x3ae0, 0x4d4: 0x314f, 0x4d5: 0x3460, 0x4d6: 0x3154, 0x4d7: 0x3465, + 0x4d8: 0x315e, 0x4d9: 0x346f, 0x4da: 0x46f1, 0x4db: 0x4782, 0x4dc: 0x3997, 0x4dd: 0x3b26, + 0x4de: 0x3177, 0x4df: 0x3488, 0x4e0: 0x3181, 0x4e1: 0x3492, 0x4e2: 0x4700, 0x4e3: 0x4791, + 0x4e4: 0x399e, 0x4e5: 0x3b2d, 0x4e6: 0x39a5, 0x4e7: 0x3b34, 0x4e8: 0x39ac, 0x4e9: 0x3b3b, + 0x4ea: 0x3190, 0x4eb: 0x34a1, 0x4ec: 0x319a, 0x4ed: 0x34b0, 0x4ee: 0x31ae, 0x4ef: 0x34c4, + 0x4f0: 0x31a9, 0x4f1: 0x34bf, 0x4f2: 0x31ea, 0x4f3: 0x3500, 0x4f4: 0x31f9, 0x4f5: 0x350f, + 0x4f6: 0x31f4, 0x4f7: 0x350a, 0x4f8: 0x39b3, 0x4f9: 0x3b42, 0x4fa: 0x39ba, 0x4fb: 0x3b49, + 0x4fc: 0x31fe, 0x4fd: 0x3514, 0x4fe: 0x3203, 0x4ff: 0x3519, + // Block 0x14, offset 0x500 + 0x500: 0x3208, 0x501: 0x351e, 0x502: 0x320d, 0x503: 0x3523, 0x504: 0x321c, 0x505: 0x3532, + 0x506: 0x3217, 0x507: 0x352d, 0x508: 0x3221, 0x509: 0x353c, 0x50a: 0x3226, 0x50b: 0x3541, + 0x50c: 0x322b, 0x50d: 0x3546, 0x50e: 0x3249, 0x50f: 0x3564, 0x510: 0x3262, 0x511: 0x3582, + 0x512: 0x3271, 0x513: 0x3591, 0x514: 0x3276, 0x515: 0x3596, 0x516: 0x337a, 0x517: 0x34a6, + 0x518: 0x3537, 0x519: 0x3573, 0x51b: 0x35d1, + 0x520: 0x46a1, 0x521: 0x4732, 0x522: 0x2f83, 0x523: 0x328f, + 0x524: 0x3878, 0x525: 0x3a07, 0x526: 0x3871, 0x527: 0x3a00, 0x528: 0x3886, 0x529: 0x3a15, + 0x52a: 0x387f, 0x52b: 0x3a0e, 0x52c: 0x38be, 0x52d: 0x3a4d, 0x52e: 0x3894, 0x52f: 0x3a23, + 0x530: 0x388d, 0x531: 0x3a1c, 0x532: 0x38a2, 0x533: 0x3a31, 0x534: 0x389b, 0x535: 0x3a2a, + 0x536: 0x38c5, 0x537: 0x3a54, 0x538: 0x46b5, 0x539: 0x4746, 0x53a: 0x3000, 0x53b: 0x330c, + 0x53c: 0x2fec, 0x53d: 0x32f8, 0x53e: 0x38da, 0x53f: 0x3a69, + // Block 0x15, offset 0x540 + 0x540: 0x38d3, 0x541: 0x3a62, 0x542: 0x38e8, 0x543: 0x3a77, 0x544: 0x38e1, 0x545: 0x3a70, + 0x546: 0x38fd, 0x547: 0x3a8c, 0x548: 0x3091, 0x549: 0x339d, 0x54a: 0x30a5, 0x54b: 0x33b1, + 0x54c: 0x46e7, 0x54d: 0x4778, 0x54e: 0x3136, 0x54f: 0x3447, 0x550: 0x3920, 0x551: 0x3aaf, + 0x552: 0x3919, 0x553: 0x3aa8, 0x554: 0x392e, 0x555: 0x3abd, 0x556: 0x3927, 0x557: 0x3ab6, + 0x558: 0x3989, 0x559: 0x3b18, 0x55a: 0x396d, 0x55b: 0x3afc, 0x55c: 0x3966, 0x55d: 0x3af5, + 0x55e: 0x397b, 0x55f: 0x3b0a, 0x560: 0x3974, 0x561: 0x3b03, 0x562: 0x3982, 0x563: 0x3b11, + 0x564: 0x31e5, 0x565: 0x34fb, 0x566: 0x31c7, 0x567: 0x34dd, 0x568: 0x39e4, 0x569: 0x3b73, + 0x56a: 0x39dd, 0x56b: 0x3b6c, 0x56c: 0x39f2, 0x56d: 0x3b81, 0x56e: 0x39eb, 0x56f: 0x3b7a, + 0x570: 0x39f9, 0x571: 0x3b88, 0x572: 0x3230, 0x573: 0x354b, 0x574: 0x3258, 0x575: 0x3578, + 0x576: 0x3253, 0x577: 0x356e, 0x578: 0x323f, 0x579: 0x355a, + // Block 0x16, offset 0x580 + 0x580: 0x4804, 0x581: 0x480a, 0x582: 0x491e, 0x583: 0x4936, 0x584: 0x4926, 0x585: 0x493e, + 0x586: 0x492e, 0x587: 0x4946, 0x588: 0x47aa, 0x589: 0x47b0, 0x58a: 0x488e, 0x58b: 0x48a6, + 0x58c: 0x4896, 0x58d: 0x48ae, 0x58e: 0x489e, 0x58f: 0x48b6, 0x590: 0x4816, 0x591: 0x481c, + 0x592: 0x3db8, 0x593: 0x3dc8, 0x594: 0x3dc0, 0x595: 0x3dd0, + 0x598: 0x47b6, 0x599: 0x47bc, 0x59a: 0x3ce8, 0x59b: 0x3cf8, 0x59c: 0x3cf0, 0x59d: 0x3d00, + 0x5a0: 0x482e, 0x5a1: 0x4834, 0x5a2: 0x494e, 0x5a3: 0x4966, + 0x5a4: 0x4956, 0x5a5: 0x496e, 0x5a6: 0x495e, 0x5a7: 0x4976, 0x5a8: 0x47c2, 0x5a9: 0x47c8, + 0x5aa: 0x48be, 0x5ab: 0x48d6, 0x5ac: 0x48c6, 0x5ad: 0x48de, 0x5ae: 0x48ce, 0x5af: 0x48e6, + 0x5b0: 0x4846, 0x5b1: 0x484c, 0x5b2: 0x3e18, 0x5b3: 0x3e30, 0x5b4: 0x3e20, 0x5b5: 0x3e38, + 0x5b6: 0x3e28, 0x5b7: 0x3e40, 0x5b8: 0x47ce, 0x5b9: 0x47d4, 0x5ba: 0x3d18, 0x5bb: 0x3d30, + 0x5bc: 0x3d20, 0x5bd: 0x3d38, 0x5be: 0x3d28, 0x5bf: 0x3d40, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x4852, 0x5c1: 0x4858, 0x5c2: 0x3e48, 0x5c3: 0x3e58, 0x5c4: 0x3e50, 0x5c5: 0x3e60, + 0x5c8: 0x47da, 0x5c9: 0x47e0, 0x5ca: 0x3d48, 0x5cb: 0x3d58, + 0x5cc: 0x3d50, 0x5cd: 0x3d60, 0x5d0: 0x4864, 0x5d1: 0x486a, + 0x5d2: 0x3e80, 0x5d3: 0x3e98, 0x5d4: 0x3e88, 0x5d5: 0x3ea0, 0x5d6: 0x3e90, 0x5d7: 0x3ea8, + 0x5d9: 0x47e6, 0x5db: 0x3d68, 0x5dd: 0x3d70, + 0x5df: 0x3d78, 0x5e0: 0x487c, 0x5e1: 0x4882, 0x5e2: 0x497e, 0x5e3: 0x4996, + 0x5e4: 0x4986, 0x5e5: 0x499e, 0x5e6: 0x498e, 0x5e7: 0x49a6, 0x5e8: 0x47ec, 0x5e9: 0x47f2, + 0x5ea: 0x48ee, 0x5eb: 0x4906, 0x5ec: 0x48f6, 0x5ed: 0x490e, 0x5ee: 0x48fe, 0x5ef: 0x4916, + 0x5f0: 0x47f8, 0x5f1: 0x431e, 0x5f2: 0x3691, 0x5f3: 0x4324, 0x5f4: 0x4822, 0x5f5: 0x432a, + 0x5f6: 0x36a3, 0x5f7: 0x4330, 0x5f8: 0x36c1, 0x5f9: 0x4336, 0x5fa: 0x36d9, 0x5fb: 0x433c, + 0x5fc: 0x4870, 0x5fd: 0x4342, + // Block 0x18, offset 0x600 + 0x600: 0x3da0, 0x601: 0x3da8, 0x602: 0x4184, 0x603: 0x41a2, 0x604: 0x418e, 0x605: 0x41ac, + 0x606: 0x4198, 0x607: 0x41b6, 0x608: 0x3cd8, 0x609: 0x3ce0, 0x60a: 0x40d0, 0x60b: 0x40ee, + 0x60c: 0x40da, 0x60d: 0x40f8, 0x60e: 0x40e4, 0x60f: 0x4102, 0x610: 0x3de8, 0x611: 0x3df0, + 0x612: 0x41c0, 0x613: 0x41de, 0x614: 0x41ca, 0x615: 0x41e8, 0x616: 0x41d4, 0x617: 0x41f2, + 0x618: 0x3d08, 0x619: 0x3d10, 0x61a: 0x410c, 0x61b: 0x412a, 0x61c: 0x4116, 0x61d: 0x4134, + 0x61e: 0x4120, 0x61f: 0x413e, 0x620: 0x3ec0, 0x621: 0x3ec8, 0x622: 0x41fc, 0x623: 0x421a, + 0x624: 0x4206, 0x625: 0x4224, 0x626: 0x4210, 0x627: 0x422e, 0x628: 0x3d80, 0x629: 0x3d88, + 0x62a: 0x4148, 0x62b: 0x4166, 0x62c: 0x4152, 0x62d: 0x4170, 0x62e: 0x415c, 0x62f: 0x417a, + 0x630: 0x3685, 0x631: 0x367f, 0x632: 0x3d90, 0x633: 0x368b, 0x634: 0x3d98, + 0x636: 0x4810, 0x637: 0x3db0, 0x638: 0x35f5, 0x639: 0x35ef, 0x63a: 0x35e3, 0x63b: 0x42ee, + 0x63c: 0x35fb, 0x63d: 0x8100, 0x63e: 0x01d3, 0x63f: 0xa100, + // Block 0x19, offset 0x640 + 0x640: 0x8100, 0x641: 0x35a7, 0x642: 0x3dd8, 0x643: 0x369d, 0x644: 0x3de0, + 0x646: 0x483a, 0x647: 0x3df8, 0x648: 0x3601, 0x649: 0x42f4, 0x64a: 0x360d, 0x64b: 0x42fa, + 0x64c: 0x3619, 0x64d: 0x3b8f, 0x64e: 0x3b96, 0x64f: 0x3b9d, 0x650: 0x36b5, 0x651: 0x36af, + 0x652: 0x3e00, 0x653: 0x44e4, 0x656: 0x36bb, 0x657: 0x3e10, + 0x658: 0x3631, 0x659: 0x362b, 0x65a: 0x361f, 0x65b: 0x4300, 0x65d: 0x3ba4, + 0x65e: 0x3bab, 0x65f: 0x3bb2, 0x660: 0x36eb, 0x661: 0x36e5, 0x662: 0x3e68, 0x663: 0x44ec, + 0x664: 0x36cd, 0x665: 0x36d3, 0x666: 0x36f1, 0x667: 0x3e78, 0x668: 0x3661, 0x669: 0x365b, + 0x66a: 0x364f, 0x66b: 0x430c, 0x66c: 0x3649, 0x66d: 0x359b, 0x66e: 0x42e8, 0x66f: 0x0081, + 0x672: 0x3eb0, 0x673: 0x36f7, 0x674: 0x3eb8, + 0x676: 0x4888, 0x677: 0x3ed0, 0x678: 0x363d, 0x679: 0x4306, 0x67a: 0x366d, 0x67b: 0x4318, + 0x67c: 0x3679, 0x67d: 0x4256, 0x67e: 0xa100, + // Block 0x1a, offset 0x680 + 0x681: 0x3c06, 0x683: 0xa000, 0x684: 0x3c0d, 0x685: 0xa000, + 0x687: 0x3c14, 0x688: 0xa000, 0x689: 0x3c1b, + 0x68d: 0xa000, + 0x6a0: 0x2f65, 0x6a1: 0xa000, 0x6a2: 0x3c29, + 0x6a4: 0xa000, 0x6a5: 0xa000, + 0x6ad: 0x3c22, 0x6ae: 0x2f60, 0x6af: 0x2f6a, + 0x6b0: 0x3c30, 0x6b1: 0x3c37, 0x6b2: 0xa000, 0x6b3: 0xa000, 0x6b4: 0x3c3e, 0x6b5: 0x3c45, + 0x6b6: 0xa000, 0x6b7: 0xa000, 0x6b8: 0x3c4c, 0x6b9: 0x3c53, 0x6ba: 0xa000, 0x6bb: 0xa000, + 0x6bc: 0xa000, 0x6bd: 0xa000, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x3c5a, 0x6c1: 0x3c61, 0x6c2: 0xa000, 0x6c3: 0xa000, 0x6c4: 0x3c76, 0x6c5: 0x3c7d, + 0x6c6: 0xa000, 0x6c7: 0xa000, 0x6c8: 0x3c84, 0x6c9: 0x3c8b, + 0x6d1: 0xa000, + 0x6d2: 0xa000, + 0x6e2: 0xa000, + 0x6e8: 0xa000, 0x6e9: 0xa000, + 0x6eb: 0xa000, 0x6ec: 0x3ca0, 0x6ed: 0x3ca7, 0x6ee: 0x3cae, 0x6ef: 0x3cb5, + 0x6f2: 0xa000, 0x6f3: 0xa000, 0x6f4: 0xa000, 0x6f5: 0xa000, + // Block 0x1c, offset 0x700 + 0x706: 0xa000, 0x70b: 0xa000, + 0x70c: 0x3f08, 0x70d: 0xa000, 0x70e: 0x3f10, 0x70f: 0xa000, 0x710: 0x3f18, 0x711: 0xa000, + 0x712: 0x3f20, 0x713: 0xa000, 0x714: 0x3f28, 0x715: 0xa000, 0x716: 0x3f30, 0x717: 0xa000, + 0x718: 0x3f38, 0x719: 0xa000, 0x71a: 0x3f40, 0x71b: 0xa000, 0x71c: 0x3f48, 0x71d: 0xa000, + 0x71e: 0x3f50, 0x71f: 0xa000, 0x720: 0x3f58, 0x721: 0xa000, 0x722: 0x3f60, + 0x724: 0xa000, 0x725: 0x3f68, 0x726: 0xa000, 0x727: 0x3f70, 0x728: 0xa000, 0x729: 0x3f78, + 0x72f: 0xa000, + 0x730: 0x3f80, 0x731: 0x3f88, 0x732: 0xa000, 0x733: 0x3f90, 0x734: 0x3f98, 0x735: 0xa000, + 0x736: 0x3fa0, 0x737: 0x3fa8, 0x738: 0xa000, 0x739: 0x3fb0, 0x73a: 0x3fb8, 0x73b: 0xa000, + 0x73c: 0x3fc0, 0x73d: 0x3fc8, + // Block 0x1d, offset 0x740 + 0x754: 0x3f00, + 0x759: 0x9903, 0x75a: 0x9903, 0x75b: 0x8100, 0x75c: 0x8100, 0x75d: 0xa000, + 0x75e: 0x3fd0, + 0x766: 0xa000, + 0x76b: 0xa000, 0x76c: 0x3fe0, 0x76d: 0xa000, 0x76e: 0x3fe8, 0x76f: 0xa000, + 0x770: 0x3ff0, 0x771: 0xa000, 0x772: 0x3ff8, 0x773: 0xa000, 0x774: 0x4000, 0x775: 0xa000, + 0x776: 0x4008, 0x777: 0xa000, 0x778: 0x4010, 0x779: 0xa000, 0x77a: 0x4018, 0x77b: 0xa000, + 0x77c: 0x4020, 0x77d: 0xa000, 0x77e: 0x4028, 0x77f: 0xa000, + // Block 0x1e, offset 0x780 + 0x780: 0x4030, 0x781: 0xa000, 0x782: 0x4038, 0x784: 0xa000, 0x785: 0x4040, + 0x786: 0xa000, 0x787: 0x4048, 0x788: 0xa000, 0x789: 0x4050, + 0x78f: 0xa000, 0x790: 0x4058, 0x791: 0x4060, + 0x792: 0xa000, 0x793: 0x4068, 0x794: 0x4070, 0x795: 0xa000, 0x796: 0x4078, 0x797: 0x4080, + 0x798: 0xa000, 0x799: 0x4088, 0x79a: 0x4090, 0x79b: 0xa000, 0x79c: 0x4098, 0x79d: 0x40a0, + 0x7af: 0xa000, + 0x7b0: 0xa000, 0x7b1: 0xa000, 0x7b2: 0xa000, 0x7b4: 0x3fd8, + 0x7b7: 0x40a8, 0x7b8: 0x40b0, 0x7b9: 0x40b8, 0x7ba: 0x40c0, + 0x7bd: 0xa000, 0x7be: 0x40c8, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x1377, 0x7c1: 0x0cfb, 0x7c2: 0x13d3, 0x7c3: 0x139f, 0x7c4: 0x0e57, 0x7c5: 0x06eb, + 0x7c6: 0x08df, 0x7c7: 0x162b, 0x7c8: 0x162b, 0x7c9: 0x0a0b, 0x7ca: 0x145f, 0x7cb: 0x0943, + 0x7cc: 0x0a07, 0x7cd: 0x0bef, 0x7ce: 0x0fcf, 0x7cf: 0x115f, 0x7d0: 0x1297, 0x7d1: 0x12d3, + 0x7d2: 0x1307, 0x7d3: 0x141b, 0x7d4: 0x0d73, 0x7d5: 0x0dff, 0x7d6: 0x0eab, 0x7d7: 0x0f43, + 0x7d8: 0x125f, 0x7d9: 0x1447, 0x7da: 0x1573, 0x7db: 0x070f, 0x7dc: 0x08b3, 0x7dd: 0x0d87, + 0x7de: 0x0ecf, 0x7df: 0x1293, 0x7e0: 0x15c3, 0x7e1: 0x0ab3, 0x7e2: 0x0e77, 0x7e3: 0x1283, + 0x7e4: 0x1317, 0x7e5: 0x0c23, 0x7e6: 0x11bb, 0x7e7: 0x12df, 0x7e8: 0x0b1f, 0x7e9: 0x0d0f, + 0x7ea: 0x0e17, 0x7eb: 0x0f1b, 0x7ec: 0x1427, 0x7ed: 0x074f, 0x7ee: 0x07e7, 0x7ef: 0x0853, + 0x7f0: 0x0c8b, 0x7f1: 0x0d7f, 0x7f2: 0x0ecb, 0x7f3: 0x0fef, 0x7f4: 0x1177, 0x7f5: 0x128b, + 0x7f6: 0x12a3, 0x7f7: 0x13c7, 0x7f8: 0x14ef, 0x7f9: 0x15a3, 0x7fa: 0x15bf, 0x7fb: 0x102b, + 0x7fc: 0x106b, 0x7fd: 0x1123, 0x7fe: 0x1243, 0x7ff: 0x147b, + // Block 0x20, offset 0x800 + 0x800: 0x15cb, 0x801: 0x134b, 0x802: 0x09c7, 0x803: 0x0b3b, 0x804: 0x10db, 0x805: 0x119b, + 0x806: 0x0eff, 0x807: 0x1033, 0x808: 0x1397, 0x809: 0x14e7, 0x80a: 0x09c3, 0x80b: 0x0a8f, + 0x80c: 0x0d77, 0x80d: 0x0e2b, 0x80e: 0x0e5f, 0x80f: 0x1113, 0x810: 0x113b, 0x811: 0x14a7, + 0x812: 0x084f, 0x813: 0x11a7, 0x814: 0x07f3, 0x815: 0x07ef, 0x816: 0x1097, 0x817: 0x1127, + 0x818: 0x125b, 0x819: 0x14af, 0x81a: 0x1367, 0x81b: 0x0c27, 0x81c: 0x0d73, 0x81d: 0x1357, + 0x81e: 0x06f7, 0x81f: 0x0a63, 0x820: 0x0b93, 0x821: 0x0f2f, 0x822: 0x0faf, 0x823: 0x0873, + 0x824: 0x103b, 0x825: 0x075f, 0x826: 0x0b77, 0x827: 0x06d7, 0x828: 0x0deb, 0x829: 0x0ca3, + 0x82a: 0x110f, 0x82b: 0x08c7, 0x82c: 0x09b3, 0x82d: 0x0ffb, 0x82e: 0x1263, 0x82f: 0x133b, + 0x830: 0x0db7, 0x831: 0x13f7, 0x832: 0x0de3, 0x833: 0x0c37, 0x834: 0x121b, 0x835: 0x0c57, + 0x836: 0x0fab, 0x837: 0x072b, 0x838: 0x07a7, 0x839: 0x07eb, 0x83a: 0x0d53, 0x83b: 0x10fb, + 0x83c: 0x11f3, 0x83d: 0x1347, 0x83e: 0x145b, 0x83f: 0x085b, + // Block 0x21, offset 0x840 + 0x840: 0x090f, 0x841: 0x0a17, 0x842: 0x0b2f, 0x843: 0x0cbf, 0x844: 0x0e7b, 0x845: 0x103f, + 0x846: 0x1497, 0x847: 0x157b, 0x848: 0x15cf, 0x849: 0x15e7, 0x84a: 0x0837, 0x84b: 0x0cf3, + 0x84c: 0x0da3, 0x84d: 0x13eb, 0x84e: 0x0afb, 0x84f: 0x0bd7, 0x850: 0x0bf3, 0x851: 0x0c83, + 0x852: 0x0e6b, 0x853: 0x0eb7, 0x854: 0x0f67, 0x855: 0x108b, 0x856: 0x112f, 0x857: 0x1193, + 0x858: 0x13db, 0x859: 0x126b, 0x85a: 0x1403, 0x85b: 0x147f, 0x85c: 0x080f, 0x85d: 0x083b, + 0x85e: 0x0923, 0x85f: 0x0ea7, 0x860: 0x12f3, 0x861: 0x133b, 0x862: 0x0b1b, 0x863: 0x0b8b, + 0x864: 0x0c4f, 0x865: 0x0daf, 0x866: 0x10d7, 0x867: 0x0f23, 0x868: 0x073b, 0x869: 0x097f, + 0x86a: 0x0a63, 0x86b: 0x0ac7, 0x86c: 0x0b97, 0x86d: 0x0f3f, 0x86e: 0x0f5b, 0x86f: 0x116b, + 0x870: 0x118b, 0x871: 0x1463, 0x872: 0x14e3, 0x873: 0x14f3, 0x874: 0x152f, 0x875: 0x0753, + 0x876: 0x107f, 0x877: 0x144f, 0x878: 0x14cb, 0x879: 0x0baf, 0x87a: 0x0717, 0x87b: 0x0777, + 0x87c: 0x0a67, 0x87d: 0x0a87, 0x87e: 0x0caf, 0x87f: 0x0d73, + // Block 0x22, offset 0x880 + 0x880: 0x0ec3, 0x881: 0x0fcb, 0x882: 0x1277, 0x883: 0x1417, 0x884: 0x1623, 0x885: 0x0ce3, + 0x886: 0x14a3, 0x887: 0x0833, 0x888: 0x0d2f, 0x889: 0x0d3b, 0x88a: 0x0e0f, 0x88b: 0x0e47, + 0x88c: 0x0f4b, 0x88d: 0x0fa7, 0x88e: 0x1027, 0x88f: 0x110b, 0x890: 0x153b, 0x891: 0x07af, + 0x892: 0x0c03, 0x893: 0x14b3, 0x894: 0x0767, 0x895: 0x0aab, 0x896: 0x0e2f, 0x897: 0x13df, + 0x898: 0x0b67, 0x899: 0x0bb7, 0x89a: 0x0d43, 0x89b: 0x0f2f, 0x89c: 0x14bb, 0x89d: 0x0817, + 0x89e: 0x08ff, 0x89f: 0x0a97, 0x8a0: 0x0cd3, 0x8a1: 0x0d1f, 0x8a2: 0x0d5f, 0x8a3: 0x0df3, + 0x8a4: 0x0f47, 0x8a5: 0x0fbb, 0x8a6: 0x1157, 0x8a7: 0x12f7, 0x8a8: 0x1303, 0x8a9: 0x1457, + 0x8aa: 0x14d7, 0x8ab: 0x0883, 0x8ac: 0x0e4b, 0x8ad: 0x0903, 0x8ae: 0x0ec7, 0x8af: 0x0f6b, + 0x8b0: 0x1287, 0x8b1: 0x14bf, 0x8b2: 0x15ab, 0x8b3: 0x15d3, 0x8b4: 0x0d37, 0x8b5: 0x0e27, + 0x8b6: 0x11c3, 0x8b7: 0x10b7, 0x8b8: 0x10c3, 0x8b9: 0x10e7, 0x8ba: 0x0f17, 0x8bb: 0x0e9f, + 0x8bc: 0x1363, 0x8bd: 0x0733, 0x8be: 0x122b, 0x8bf: 0x081b, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x080b, 0x8c1: 0x0b0b, 0x8c2: 0x0c2b, 0x8c3: 0x10f3, 0x8c4: 0x0a53, 0x8c5: 0x0e03, + 0x8c6: 0x0cef, 0x8c7: 0x13e7, 0x8c8: 0x12e7, 0x8c9: 0x14ab, 0x8ca: 0x1323, 0x8cb: 0x0b27, + 0x8cc: 0x0787, 0x8cd: 0x095b, 0x8d0: 0x09af, + 0x8d2: 0x0cdf, 0x8d5: 0x07f7, 0x8d6: 0x0f1f, 0x8d7: 0x0fe3, + 0x8d8: 0x1047, 0x8d9: 0x1063, 0x8da: 0x1067, 0x8db: 0x107b, 0x8dc: 0x14fb, 0x8dd: 0x10eb, + 0x8de: 0x116f, 0x8e0: 0x128f, 0x8e2: 0x1353, + 0x8e5: 0x1407, 0x8e6: 0x1433, + 0x8ea: 0x154f, 0x8eb: 0x1553, 0x8ec: 0x1557, 0x8ed: 0x15bb, 0x8ee: 0x142b, 0x8ef: 0x14c7, + 0x8f0: 0x0757, 0x8f1: 0x077b, 0x8f2: 0x078f, 0x8f3: 0x084b, 0x8f4: 0x0857, 0x8f5: 0x0897, + 0x8f6: 0x094b, 0x8f7: 0x0967, 0x8f8: 0x096f, 0x8f9: 0x09ab, 0x8fa: 0x09b7, 0x8fb: 0x0a93, + 0x8fc: 0x0a9b, 0x8fd: 0x0ba3, 0x8fe: 0x0bcb, 0x8ff: 0x0bd3, + // Block 0x24, offset 0x900 + 0x900: 0x0beb, 0x901: 0x0c97, 0x902: 0x0cc7, 0x903: 0x0ce7, 0x904: 0x0d57, 0x905: 0x0e1b, + 0x906: 0x0e37, 0x907: 0x0e67, 0x908: 0x0ebb, 0x909: 0x0edb, 0x90a: 0x0f4f, 0x90b: 0x102f, + 0x90c: 0x104b, 0x90d: 0x1053, 0x90e: 0x104f, 0x90f: 0x1057, 0x910: 0x105b, 0x911: 0x105f, + 0x912: 0x1073, 0x913: 0x1077, 0x914: 0x109b, 0x915: 0x10af, 0x916: 0x10cb, 0x917: 0x112f, + 0x918: 0x1137, 0x919: 0x113f, 0x91a: 0x1153, 0x91b: 0x117b, 0x91c: 0x11cb, 0x91d: 0x11ff, + 0x91e: 0x11ff, 0x91f: 0x1267, 0x920: 0x130f, 0x921: 0x1327, 0x922: 0x135b, 0x923: 0x135f, + 0x924: 0x13a3, 0x925: 0x13a7, 0x926: 0x13ff, 0x927: 0x1407, 0x928: 0x14db, 0x929: 0x151f, + 0x92a: 0x1537, 0x92b: 0x0b9b, 0x92c: 0x171e, 0x92d: 0x11e3, + 0x930: 0x06df, 0x931: 0x07e3, 0x932: 0x07a3, 0x933: 0x074b, 0x934: 0x078b, 0x935: 0x07b7, + 0x936: 0x0847, 0x937: 0x0863, 0x938: 0x094b, 0x939: 0x0937, 0x93a: 0x0947, 0x93b: 0x0963, + 0x93c: 0x09af, 0x93d: 0x09bf, 0x93e: 0x0a03, 0x93f: 0x0a0f, + // Block 0x25, offset 0x940 + 0x940: 0x0a2b, 0x941: 0x0a3b, 0x942: 0x0b23, 0x943: 0x0b2b, 0x944: 0x0b5b, 0x945: 0x0b7b, + 0x946: 0x0bab, 0x947: 0x0bc3, 0x948: 0x0bb3, 0x949: 0x0bd3, 0x94a: 0x0bc7, 0x94b: 0x0beb, + 0x94c: 0x0c07, 0x94d: 0x0c5f, 0x94e: 0x0c6b, 0x94f: 0x0c73, 0x950: 0x0c9b, 0x951: 0x0cdf, + 0x952: 0x0d0f, 0x953: 0x0d13, 0x954: 0x0d27, 0x955: 0x0da7, 0x956: 0x0db7, 0x957: 0x0e0f, + 0x958: 0x0e5b, 0x959: 0x0e53, 0x95a: 0x0e67, 0x95b: 0x0e83, 0x95c: 0x0ebb, 0x95d: 0x1013, + 0x95e: 0x0edf, 0x95f: 0x0f13, 0x960: 0x0f1f, 0x961: 0x0f5f, 0x962: 0x0f7b, 0x963: 0x0f9f, + 0x964: 0x0fc3, 0x965: 0x0fc7, 0x966: 0x0fe3, 0x967: 0x0fe7, 0x968: 0x0ff7, 0x969: 0x100b, + 0x96a: 0x1007, 0x96b: 0x1037, 0x96c: 0x10b3, 0x96d: 0x10cb, 0x96e: 0x10e3, 0x96f: 0x111b, + 0x970: 0x112f, 0x971: 0x114b, 0x972: 0x117b, 0x973: 0x122f, 0x974: 0x1257, 0x975: 0x12cb, + 0x976: 0x1313, 0x977: 0x131f, 0x978: 0x1327, 0x979: 0x133f, 0x97a: 0x1353, 0x97b: 0x1343, + 0x97c: 0x135b, 0x97d: 0x1357, 0x97e: 0x134f, 0x97f: 0x135f, + // Block 0x26, offset 0x980 + 0x980: 0x136b, 0x981: 0x13a7, 0x982: 0x13e3, 0x983: 0x1413, 0x984: 0x144b, 0x985: 0x146b, + 0x986: 0x14b7, 0x987: 0x14db, 0x988: 0x14fb, 0x989: 0x150f, 0x98a: 0x151f, 0x98b: 0x152b, + 0x98c: 0x1537, 0x98d: 0x158b, 0x98e: 0x162b, 0x98f: 0x16b5, 0x990: 0x16b0, 0x991: 0x16e2, + 0x992: 0x0607, 0x993: 0x062f, 0x994: 0x0633, 0x995: 0x1764, 0x996: 0x1791, 0x997: 0x1809, + 0x998: 0x1617, 0x999: 0x1627, + // Block 0x27, offset 0x9c0 + 0x9c0: 0x06fb, 0x9c1: 0x06f3, 0x9c2: 0x0703, 0x9c3: 0x1647, 0x9c4: 0x0747, 0x9c5: 0x0757, + 0x9c6: 0x075b, 0x9c7: 0x0763, 0x9c8: 0x076b, 0x9c9: 0x076f, 0x9ca: 0x077b, 0x9cb: 0x0773, + 0x9cc: 0x05b3, 0x9cd: 0x165b, 0x9ce: 0x078f, 0x9cf: 0x0793, 0x9d0: 0x0797, 0x9d1: 0x07b3, + 0x9d2: 0x164c, 0x9d3: 0x05b7, 0x9d4: 0x079f, 0x9d5: 0x07bf, 0x9d6: 0x1656, 0x9d7: 0x07cf, + 0x9d8: 0x07d7, 0x9d9: 0x0737, 0x9da: 0x07df, 0x9db: 0x07e3, 0x9dc: 0x1831, 0x9dd: 0x07ff, + 0x9de: 0x0807, 0x9df: 0x05bf, 0x9e0: 0x081f, 0x9e1: 0x0823, 0x9e2: 0x082b, 0x9e3: 0x082f, + 0x9e4: 0x05c3, 0x9e5: 0x0847, 0x9e6: 0x084b, 0x9e7: 0x0857, 0x9e8: 0x0863, 0x9e9: 0x0867, + 0x9ea: 0x086b, 0x9eb: 0x0873, 0x9ec: 0x0893, 0x9ed: 0x0897, 0x9ee: 0x089f, 0x9ef: 0x08af, + 0x9f0: 0x08b7, 0x9f1: 0x08bb, 0x9f2: 0x08bb, 0x9f3: 0x08bb, 0x9f4: 0x166a, 0x9f5: 0x0e93, + 0x9f6: 0x08cf, 0x9f7: 0x08d7, 0x9f8: 0x166f, 0x9f9: 0x08e3, 0x9fa: 0x08eb, 0x9fb: 0x08f3, + 0x9fc: 0x091b, 0x9fd: 0x0907, 0x9fe: 0x0913, 0x9ff: 0x0917, + // Block 0x28, offset 0xa00 + 0xa00: 0x091f, 0xa01: 0x0927, 0xa02: 0x092b, 0xa03: 0x0933, 0xa04: 0x093b, 0xa05: 0x093f, + 0xa06: 0x093f, 0xa07: 0x0947, 0xa08: 0x094f, 0xa09: 0x0953, 0xa0a: 0x095f, 0xa0b: 0x0983, + 0xa0c: 0x0967, 0xa0d: 0x0987, 0xa0e: 0x096b, 0xa0f: 0x0973, 0xa10: 0x080b, 0xa11: 0x09cf, + 0xa12: 0x0997, 0xa13: 0x099b, 0xa14: 0x099f, 0xa15: 0x0993, 0xa16: 0x09a7, 0xa17: 0x09a3, + 0xa18: 0x09bb, 0xa19: 0x1674, 0xa1a: 0x09d7, 0xa1b: 0x09db, 0xa1c: 0x09e3, 0xa1d: 0x09ef, + 0xa1e: 0x09f7, 0xa1f: 0x0a13, 0xa20: 0x1679, 0xa21: 0x167e, 0xa22: 0x0a1f, 0xa23: 0x0a23, + 0xa24: 0x0a27, 0xa25: 0x0a1b, 0xa26: 0x0a2f, 0xa27: 0x05c7, 0xa28: 0x05cb, 0xa29: 0x0a37, + 0xa2a: 0x0a3f, 0xa2b: 0x0a3f, 0xa2c: 0x1683, 0xa2d: 0x0a5b, 0xa2e: 0x0a5f, 0xa2f: 0x0a63, + 0xa30: 0x0a6b, 0xa31: 0x1688, 0xa32: 0x0a73, 0xa33: 0x0a77, 0xa34: 0x0b4f, 0xa35: 0x0a7f, + 0xa36: 0x05cf, 0xa37: 0x0a8b, 0xa38: 0x0a9b, 0xa39: 0x0aa7, 0xa3a: 0x0aa3, 0xa3b: 0x1692, + 0xa3c: 0x0aaf, 0xa3d: 0x1697, 0xa3e: 0x0abb, 0xa3f: 0x0ab7, + // Block 0x29, offset 0xa40 + 0xa40: 0x0abf, 0xa41: 0x0acf, 0xa42: 0x0ad3, 0xa43: 0x05d3, 0xa44: 0x0ae3, 0xa45: 0x0aeb, + 0xa46: 0x0aef, 0xa47: 0x0af3, 0xa48: 0x05d7, 0xa49: 0x169c, 0xa4a: 0x05db, 0xa4b: 0x0b0f, + 0xa4c: 0x0b13, 0xa4d: 0x0b17, 0xa4e: 0x0b1f, 0xa4f: 0x1863, 0xa50: 0x0b37, 0xa51: 0x16a6, + 0xa52: 0x16a6, 0xa53: 0x11d7, 0xa54: 0x0b47, 0xa55: 0x0b47, 0xa56: 0x05df, 0xa57: 0x16c9, + 0xa58: 0x179b, 0xa59: 0x0b57, 0xa5a: 0x0b5f, 0xa5b: 0x05e3, 0xa5c: 0x0b73, 0xa5d: 0x0b83, + 0xa5e: 0x0b87, 0xa5f: 0x0b8f, 0xa60: 0x0b9f, 0xa61: 0x05eb, 0xa62: 0x05e7, 0xa63: 0x0ba3, + 0xa64: 0x16ab, 0xa65: 0x0ba7, 0xa66: 0x0bbb, 0xa67: 0x0bbf, 0xa68: 0x0bc3, 0xa69: 0x0bbf, + 0xa6a: 0x0bcf, 0xa6b: 0x0bd3, 0xa6c: 0x0be3, 0xa6d: 0x0bdb, 0xa6e: 0x0bdf, 0xa6f: 0x0be7, + 0xa70: 0x0beb, 0xa71: 0x0bef, 0xa72: 0x0bfb, 0xa73: 0x0bff, 0xa74: 0x0c17, 0xa75: 0x0c1f, + 0xa76: 0x0c2f, 0xa77: 0x0c43, 0xa78: 0x16ba, 0xa79: 0x0c3f, 0xa7a: 0x0c33, 0xa7b: 0x0c4b, + 0xa7c: 0x0c53, 0xa7d: 0x0c67, 0xa7e: 0x16bf, 0xa7f: 0x0c6f, + // Block 0x2a, offset 0xa80 + 0xa80: 0x0c63, 0xa81: 0x0c5b, 0xa82: 0x05ef, 0xa83: 0x0c77, 0xa84: 0x0c7f, 0xa85: 0x0c87, + 0xa86: 0x0c7b, 0xa87: 0x05f3, 0xa88: 0x0c97, 0xa89: 0x0c9f, 0xa8a: 0x16c4, 0xa8b: 0x0ccb, + 0xa8c: 0x0cff, 0xa8d: 0x0cdb, 0xa8e: 0x05ff, 0xa8f: 0x0ce7, 0xa90: 0x05fb, 0xa91: 0x05f7, + 0xa92: 0x07c3, 0xa93: 0x07c7, 0xa94: 0x0d03, 0xa95: 0x0ceb, 0xa96: 0x11ab, 0xa97: 0x0663, + 0xa98: 0x0d0f, 0xa99: 0x0d13, 0xa9a: 0x0d17, 0xa9b: 0x0d2b, 0xa9c: 0x0d23, 0xa9d: 0x16dd, + 0xa9e: 0x0603, 0xa9f: 0x0d3f, 0xaa0: 0x0d33, 0xaa1: 0x0d4f, 0xaa2: 0x0d57, 0xaa3: 0x16e7, + 0xaa4: 0x0d5b, 0xaa5: 0x0d47, 0xaa6: 0x0d63, 0xaa7: 0x0607, 0xaa8: 0x0d67, 0xaa9: 0x0d6b, + 0xaaa: 0x0d6f, 0xaab: 0x0d7b, 0xaac: 0x16ec, 0xaad: 0x0d83, 0xaae: 0x060b, 0xaaf: 0x0d8f, + 0xab0: 0x16f1, 0xab1: 0x0d93, 0xab2: 0x060f, 0xab3: 0x0d9f, 0xab4: 0x0dab, 0xab5: 0x0db7, + 0xab6: 0x0dbb, 0xab7: 0x16f6, 0xab8: 0x168d, 0xab9: 0x16fb, 0xaba: 0x0ddb, 0xabb: 0x1700, + 0xabc: 0x0de7, 0xabd: 0x0def, 0xabe: 0x0ddf, 0xabf: 0x0dfb, + // Block 0x2b, offset 0xac0 + 0xac0: 0x0e0b, 0xac1: 0x0e1b, 0xac2: 0x0e0f, 0xac3: 0x0e13, 0xac4: 0x0e1f, 0xac5: 0x0e23, + 0xac6: 0x1705, 0xac7: 0x0e07, 0xac8: 0x0e3b, 0xac9: 0x0e3f, 0xaca: 0x0613, 0xacb: 0x0e53, + 0xacc: 0x0e4f, 0xacd: 0x170a, 0xace: 0x0e33, 0xacf: 0x0e6f, 0xad0: 0x170f, 0xad1: 0x1714, + 0xad2: 0x0e73, 0xad3: 0x0e87, 0xad4: 0x0e83, 0xad5: 0x0e7f, 0xad6: 0x0617, 0xad7: 0x0e8b, + 0xad8: 0x0e9b, 0xad9: 0x0e97, 0xada: 0x0ea3, 0xadb: 0x1651, 0xadc: 0x0eb3, 0xadd: 0x1719, + 0xade: 0x0ebf, 0xadf: 0x1723, 0xae0: 0x0ed3, 0xae1: 0x0edf, 0xae2: 0x0ef3, 0xae3: 0x1728, + 0xae4: 0x0f07, 0xae5: 0x0f0b, 0xae6: 0x172d, 0xae7: 0x1732, 0xae8: 0x0f27, 0xae9: 0x0f37, + 0xaea: 0x061b, 0xaeb: 0x0f3b, 0xaec: 0x061f, 0xaed: 0x061f, 0xaee: 0x0f53, 0xaef: 0x0f57, + 0xaf0: 0x0f5f, 0xaf1: 0x0f63, 0xaf2: 0x0f6f, 0xaf3: 0x0623, 0xaf4: 0x0f87, 0xaf5: 0x1737, + 0xaf6: 0x0fa3, 0xaf7: 0x173c, 0xaf8: 0x0faf, 0xaf9: 0x16a1, 0xafa: 0x0fbf, 0xafb: 0x1741, + 0xafc: 0x1746, 0xafd: 0x174b, 0xafe: 0x0627, 0xaff: 0x062b, + // Block 0x2c, offset 0xb00 + 0xb00: 0x0ff7, 0xb01: 0x1755, 0xb02: 0x1750, 0xb03: 0x175a, 0xb04: 0x175f, 0xb05: 0x0fff, + 0xb06: 0x1003, 0xb07: 0x1003, 0xb08: 0x100b, 0xb09: 0x0633, 0xb0a: 0x100f, 0xb0b: 0x0637, + 0xb0c: 0x063b, 0xb0d: 0x1769, 0xb0e: 0x1023, 0xb0f: 0x102b, 0xb10: 0x1037, 0xb11: 0x063f, + 0xb12: 0x176e, 0xb13: 0x105b, 0xb14: 0x1773, 0xb15: 0x1778, 0xb16: 0x107b, 0xb17: 0x1093, + 0xb18: 0x0643, 0xb19: 0x109b, 0xb1a: 0x109f, 0xb1b: 0x10a3, 0xb1c: 0x177d, 0xb1d: 0x1782, + 0xb1e: 0x1782, 0xb1f: 0x10bb, 0xb20: 0x0647, 0xb21: 0x1787, 0xb22: 0x10cf, 0xb23: 0x10d3, + 0xb24: 0x064b, 0xb25: 0x178c, 0xb26: 0x10ef, 0xb27: 0x064f, 0xb28: 0x10ff, 0xb29: 0x10f7, + 0xb2a: 0x1107, 0xb2b: 0x1796, 0xb2c: 0x111f, 0xb2d: 0x0653, 0xb2e: 0x112b, 0xb2f: 0x1133, + 0xb30: 0x1143, 0xb31: 0x0657, 0xb32: 0x17a0, 0xb33: 0x17a5, 0xb34: 0x065b, 0xb35: 0x17aa, + 0xb36: 0x115b, 0xb37: 0x17af, 0xb38: 0x1167, 0xb39: 0x1173, 0xb3a: 0x117b, 0xb3b: 0x17b4, + 0xb3c: 0x17b9, 0xb3d: 0x118f, 0xb3e: 0x17be, 0xb3f: 0x1197, + // Block 0x2d, offset 0xb40 + 0xb40: 0x16ce, 0xb41: 0x065f, 0xb42: 0x11af, 0xb43: 0x11b3, 0xb44: 0x0667, 0xb45: 0x11b7, + 0xb46: 0x0a33, 0xb47: 0x17c3, 0xb48: 0x17c8, 0xb49: 0x16d3, 0xb4a: 0x16d8, 0xb4b: 0x11d7, + 0xb4c: 0x11db, 0xb4d: 0x13f3, 0xb4e: 0x066b, 0xb4f: 0x1207, 0xb50: 0x1203, 0xb51: 0x120b, + 0xb52: 0x083f, 0xb53: 0x120f, 0xb54: 0x1213, 0xb55: 0x1217, 0xb56: 0x121f, 0xb57: 0x17cd, + 0xb58: 0x121b, 0xb59: 0x1223, 0xb5a: 0x1237, 0xb5b: 0x123b, 0xb5c: 0x1227, 0xb5d: 0x123f, + 0xb5e: 0x1253, 0xb5f: 0x1267, 0xb60: 0x1233, 0xb61: 0x1247, 0xb62: 0x124b, 0xb63: 0x124f, + 0xb64: 0x17d2, 0xb65: 0x17dc, 0xb66: 0x17d7, 0xb67: 0x066f, 0xb68: 0x126f, 0xb69: 0x1273, + 0xb6a: 0x127b, 0xb6b: 0x17f0, 0xb6c: 0x127f, 0xb6d: 0x17e1, 0xb6e: 0x0673, 0xb6f: 0x0677, + 0xb70: 0x17e6, 0xb71: 0x17eb, 0xb72: 0x067b, 0xb73: 0x129f, 0xb74: 0x12a3, 0xb75: 0x12a7, + 0xb76: 0x12ab, 0xb77: 0x12b7, 0xb78: 0x12b3, 0xb79: 0x12bf, 0xb7a: 0x12bb, 0xb7b: 0x12cb, + 0xb7c: 0x12c3, 0xb7d: 0x12c7, 0xb7e: 0x12cf, 0xb7f: 0x067f, + // Block 0x2e, offset 0xb80 + 0xb80: 0x12d7, 0xb81: 0x12db, 0xb82: 0x0683, 0xb83: 0x12eb, 0xb84: 0x12ef, 0xb85: 0x17f5, + 0xb86: 0x12fb, 0xb87: 0x12ff, 0xb88: 0x0687, 0xb89: 0x130b, 0xb8a: 0x05bb, 0xb8b: 0x17fa, + 0xb8c: 0x17ff, 0xb8d: 0x068b, 0xb8e: 0x068f, 0xb8f: 0x1337, 0xb90: 0x134f, 0xb91: 0x136b, + 0xb92: 0x137b, 0xb93: 0x1804, 0xb94: 0x138f, 0xb95: 0x1393, 0xb96: 0x13ab, 0xb97: 0x13b7, + 0xb98: 0x180e, 0xb99: 0x1660, 0xb9a: 0x13c3, 0xb9b: 0x13bf, 0xb9c: 0x13cb, 0xb9d: 0x1665, + 0xb9e: 0x13d7, 0xb9f: 0x13e3, 0xba0: 0x1813, 0xba1: 0x1818, 0xba2: 0x1423, 0xba3: 0x142f, + 0xba4: 0x1437, 0xba5: 0x181d, 0xba6: 0x143b, 0xba7: 0x1467, 0xba8: 0x1473, 0xba9: 0x1477, + 0xbaa: 0x146f, 0xbab: 0x1483, 0xbac: 0x1487, 0xbad: 0x1822, 0xbae: 0x1493, 0xbaf: 0x0693, + 0xbb0: 0x149b, 0xbb1: 0x1827, 0xbb2: 0x0697, 0xbb3: 0x14d3, 0xbb4: 0x0ac3, 0xbb5: 0x14eb, + 0xbb6: 0x182c, 0xbb7: 0x1836, 0xbb8: 0x069b, 0xbb9: 0x069f, 0xbba: 0x1513, 0xbbb: 0x183b, + 0xbbc: 0x06a3, 0xbbd: 0x1840, 0xbbe: 0x152b, 0xbbf: 0x152b, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x1533, 0xbc1: 0x1845, 0xbc2: 0x154b, 0xbc3: 0x06a7, 0xbc4: 0x155b, 0xbc5: 0x1567, + 0xbc6: 0x156f, 0xbc7: 0x1577, 0xbc8: 0x06ab, 0xbc9: 0x184a, 0xbca: 0x158b, 0xbcb: 0x15a7, + 0xbcc: 0x15b3, 0xbcd: 0x06af, 0xbce: 0x06b3, 0xbcf: 0x15b7, 0xbd0: 0x184f, 0xbd1: 0x06b7, + 0xbd2: 0x1854, 0xbd3: 0x1859, 0xbd4: 0x185e, 0xbd5: 0x15db, 0xbd6: 0x06bb, 0xbd7: 0x15ef, + 0xbd8: 0x15f7, 0xbd9: 0x15fb, 0xbda: 0x1603, 0xbdb: 0x160b, 0xbdc: 0x1613, 0xbdd: 0x1868, +} + +// nfcIndex: 22 blocks, 1408 entries, 1408 bytes +// Block 0 is the zero block. +var nfcIndex = [1408]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x2e, 0xc3: 0x01, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x2f, 0xc7: 0x04, + 0xc8: 0x05, 0xca: 0x30, 0xcb: 0x31, 0xcc: 0x06, 0xcd: 0x07, 0xce: 0x08, 0xcf: 0x32, + 0xd0: 0x09, 0xd1: 0x33, 0xd2: 0x34, 0xd3: 0x0a, 0xd6: 0x0b, 0xd7: 0x35, + 0xd8: 0x36, 0xd9: 0x0c, 0xdb: 0x37, 0xdc: 0x38, 0xdd: 0x39, 0xdf: 0x3a, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, + 0xea: 0x06, 0xeb: 0x07, 0xec: 0x08, 0xed: 0x09, 0xef: 0x0a, + 0xf0: 0x13, + // Block 0x4, offset 0x100 + 0x120: 0x3b, 0x121: 0x3c, 0x123: 0x0d, 0x124: 0x3d, 0x125: 0x3e, 0x126: 0x3f, 0x127: 0x40, + 0x128: 0x41, 0x129: 0x42, 0x12a: 0x43, 0x12b: 0x44, 0x12c: 0x3f, 0x12d: 0x45, 0x12e: 0x46, 0x12f: 0x47, + 0x131: 0x48, 0x132: 0x49, 0x133: 0x4a, 0x134: 0x4b, 0x135: 0x4c, 0x137: 0x4d, + 0x138: 0x4e, 0x139: 0x4f, 0x13a: 0x50, 0x13b: 0x51, 0x13c: 0x52, 0x13d: 0x53, 0x13e: 0x54, 0x13f: 0x55, + // Block 0x5, offset 0x140 + 0x140: 0x56, 0x142: 0x57, 0x144: 0x58, 0x145: 0x59, 0x146: 0x5a, 0x147: 0x5b, + 0x14d: 0x5c, + 0x15c: 0x5d, 0x15f: 0x5e, + 0x162: 0x5f, 0x164: 0x60, + 0x168: 0x61, 0x169: 0x62, 0x16a: 0x63, 0x16c: 0x0e, 0x16d: 0x64, 0x16e: 0x65, 0x16f: 0x66, + 0x170: 0x67, 0x173: 0x68, 0x177: 0x0f, + 0x178: 0x10, 0x179: 0x11, 0x17a: 0x12, 0x17b: 0x13, 0x17c: 0x14, 0x17d: 0x15, 0x17e: 0x16, 0x17f: 0x17, + // Block 0x6, offset 0x180 + 0x180: 0x69, 0x183: 0x6a, 0x184: 0x6b, 0x186: 0x6c, 0x187: 0x6d, + 0x188: 0x6e, 0x189: 0x18, 0x18a: 0x19, 0x18b: 0x6f, 0x18c: 0x70, + 0x1ab: 0x71, + 0x1b3: 0x72, 0x1b5: 0x73, 0x1b7: 0x74, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x75, 0x1c1: 0x1a, 0x1c2: 0x1b, 0x1c3: 0x1c, 0x1c4: 0x76, 0x1c5: 0x77, + 0x1c9: 0x78, 0x1cc: 0x79, 0x1cd: 0x7a, + // Block 0x8, offset 0x200 + 0x219: 0x7b, 0x21a: 0x7c, 0x21b: 0x7d, + 0x220: 0x7e, 0x223: 0x7f, 0x224: 0x80, 0x225: 0x81, 0x226: 0x82, 0x227: 0x83, + 0x22a: 0x84, 0x22b: 0x85, 0x22f: 0x86, + 0x230: 0x87, 0x231: 0x88, 0x232: 0x89, 0x233: 0x8a, 0x234: 0x8b, 0x235: 0x8c, 0x236: 0x8d, 0x237: 0x87, + 0x238: 0x88, 0x239: 0x89, 0x23a: 0x8a, 0x23b: 0x8b, 0x23c: 0x8c, 0x23d: 0x8d, 0x23e: 0x87, 0x23f: 0x88, + // Block 0x9, offset 0x240 + 0x240: 0x89, 0x241: 0x8a, 0x242: 0x8b, 0x243: 0x8c, 0x244: 0x8d, 0x245: 0x87, 0x246: 0x88, 0x247: 0x89, + 0x248: 0x8a, 0x249: 0x8b, 0x24a: 0x8c, 0x24b: 0x8d, 0x24c: 0x87, 0x24d: 0x88, 0x24e: 0x89, 0x24f: 0x8a, + 0x250: 0x8b, 0x251: 0x8c, 0x252: 0x8d, 0x253: 0x87, 0x254: 0x88, 0x255: 0x89, 0x256: 0x8a, 0x257: 0x8b, + 0x258: 0x8c, 0x259: 0x8d, 0x25a: 0x87, 0x25b: 0x88, 0x25c: 0x89, 0x25d: 0x8a, 0x25e: 0x8b, 0x25f: 0x8c, + 0x260: 0x8d, 0x261: 0x87, 0x262: 0x88, 0x263: 0x89, 0x264: 0x8a, 0x265: 0x8b, 0x266: 0x8c, 0x267: 0x8d, + 0x268: 0x87, 0x269: 0x88, 0x26a: 0x89, 0x26b: 0x8a, 0x26c: 0x8b, 0x26d: 0x8c, 0x26e: 0x8d, 0x26f: 0x87, + 0x270: 0x88, 0x271: 0x89, 0x272: 0x8a, 0x273: 0x8b, 0x274: 0x8c, 0x275: 0x8d, 0x276: 0x87, 0x277: 0x88, + 0x278: 0x89, 0x279: 0x8a, 0x27a: 0x8b, 0x27b: 0x8c, 0x27c: 0x8d, 0x27d: 0x87, 0x27e: 0x88, 0x27f: 0x89, + // Block 0xa, offset 0x280 + 0x280: 0x8a, 0x281: 0x8b, 0x282: 0x8c, 0x283: 0x8d, 0x284: 0x87, 0x285: 0x88, 0x286: 0x89, 0x287: 0x8a, + 0x288: 0x8b, 0x289: 0x8c, 0x28a: 0x8d, 0x28b: 0x87, 0x28c: 0x88, 0x28d: 0x89, 0x28e: 0x8a, 0x28f: 0x8b, + 0x290: 0x8c, 0x291: 0x8d, 0x292: 0x87, 0x293: 0x88, 0x294: 0x89, 0x295: 0x8a, 0x296: 0x8b, 0x297: 0x8c, + 0x298: 0x8d, 0x299: 0x87, 0x29a: 0x88, 0x29b: 0x89, 0x29c: 0x8a, 0x29d: 0x8b, 0x29e: 0x8c, 0x29f: 0x8d, + 0x2a0: 0x87, 0x2a1: 0x88, 0x2a2: 0x89, 0x2a3: 0x8a, 0x2a4: 0x8b, 0x2a5: 0x8c, 0x2a6: 0x8d, 0x2a7: 0x87, + 0x2a8: 0x88, 0x2a9: 0x89, 0x2aa: 0x8a, 0x2ab: 0x8b, 0x2ac: 0x8c, 0x2ad: 0x8d, 0x2ae: 0x87, 0x2af: 0x88, + 0x2b0: 0x89, 0x2b1: 0x8a, 0x2b2: 0x8b, 0x2b3: 0x8c, 0x2b4: 0x8d, 0x2b5: 0x87, 0x2b6: 0x88, 0x2b7: 0x89, + 0x2b8: 0x8a, 0x2b9: 0x8b, 0x2ba: 0x8c, 0x2bb: 0x8d, 0x2bc: 0x87, 0x2bd: 0x88, 0x2be: 0x89, 0x2bf: 0x8a, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x8b, 0x2c1: 0x8c, 0x2c2: 0x8d, 0x2c3: 0x87, 0x2c4: 0x88, 0x2c5: 0x89, 0x2c6: 0x8a, 0x2c7: 0x8b, + 0x2c8: 0x8c, 0x2c9: 0x8d, 0x2ca: 0x87, 0x2cb: 0x88, 0x2cc: 0x89, 0x2cd: 0x8a, 0x2ce: 0x8b, 0x2cf: 0x8c, + 0x2d0: 0x8d, 0x2d1: 0x87, 0x2d2: 0x88, 0x2d3: 0x89, 0x2d4: 0x8a, 0x2d5: 0x8b, 0x2d6: 0x8c, 0x2d7: 0x8d, + 0x2d8: 0x87, 0x2d9: 0x88, 0x2da: 0x89, 0x2db: 0x8a, 0x2dc: 0x8b, 0x2dd: 0x8c, 0x2de: 0x8e, + // Block 0xc, offset 0x300 + 0x324: 0x1d, 0x325: 0x1e, 0x326: 0x1f, 0x327: 0x20, + 0x328: 0x21, 0x329: 0x22, 0x32a: 0x23, 0x32b: 0x24, 0x32c: 0x8f, 0x32d: 0x90, 0x32e: 0x91, + 0x331: 0x92, 0x332: 0x93, 0x333: 0x94, 0x334: 0x95, + 0x338: 0x96, 0x339: 0x97, 0x33a: 0x98, 0x33b: 0x99, 0x33e: 0x9a, 0x33f: 0x9b, + // Block 0xd, offset 0x340 + 0x347: 0x9c, + 0x34b: 0x9d, 0x34d: 0x9e, + 0x368: 0x9f, 0x36b: 0xa0, + 0x374: 0xa1, + 0x37d: 0xa2, + // Block 0xe, offset 0x380 + 0x381: 0xa3, 0x382: 0xa4, 0x384: 0xa5, 0x385: 0x82, 0x387: 0xa6, + 0x388: 0xa7, 0x38b: 0xa8, 0x38c: 0xa9, 0x38d: 0xaa, + 0x391: 0xab, 0x392: 0xac, 0x393: 0xad, 0x396: 0xae, 0x397: 0xaf, + 0x398: 0x73, 0x39a: 0xb0, 0x39c: 0xb1, + 0x3a0: 0xb2, + 0x3a8: 0xb3, 0x3a9: 0xb4, 0x3aa: 0xb5, + 0x3b0: 0x73, 0x3b5: 0xb6, 0x3b6: 0xb7, + // Block 0xf, offset 0x3c0 + 0x3eb: 0xb8, 0x3ec: 0xb9, + // Block 0x10, offset 0x400 + 0x432: 0xba, + // Block 0x11, offset 0x440 + 0x445: 0xbb, 0x446: 0xbc, 0x447: 0xbd, + 0x449: 0xbe, + // Block 0x12, offset 0x480 + 0x480: 0xbf, + 0x4a3: 0xc0, 0x4a5: 0xc1, + // Block 0x13, offset 0x4c0 + 0x4c8: 0xc2, + // Block 0x14, offset 0x500 + 0x520: 0x25, 0x521: 0x26, 0x522: 0x27, 0x523: 0x28, 0x524: 0x29, 0x525: 0x2a, 0x526: 0x2b, 0x527: 0x2c, + 0x528: 0x2d, + // Block 0x15, offset 0x540 + 0x550: 0x0b, 0x551: 0x0c, 0x556: 0x0d, + 0x55b: 0x0e, 0x55d: 0x0f, 0x55e: 0x10, 0x55f: 0x11, + 0x56f: 0x12, +} + +// nfcSparseOffset: 149 entries, 298 bytes +var nfcSparseOffset = []uint16{0x0, 0x5, 0x9, 0xb, 0xd, 0x18, 0x28, 0x2a, 0x2f, 0x3a, 0x49, 0x56, 0x5e, 0x63, 0x68, 0x6a, 0x72, 0x79, 0x7c, 0x84, 0x88, 0x8c, 0x8e, 0x90, 0x99, 0x9d, 0xa4, 0xa9, 0xac, 0xb6, 0xb9, 0xc0, 0xc8, 0xcb, 0xcd, 0xcf, 0xd1, 0xd6, 0xe7, 0xf3, 0xf5, 0xfb, 0xfd, 0xff, 0x101, 0x103, 0x105, 0x107, 0x10a, 0x10d, 0x10f, 0x112, 0x115, 0x119, 0x11e, 0x127, 0x129, 0x12c, 0x12e, 0x139, 0x13d, 0x14b, 0x14e, 0x154, 0x15a, 0x165, 0x169, 0x16b, 0x16d, 0x16f, 0x171, 0x173, 0x179, 0x17d, 0x17f, 0x181, 0x189, 0x18d, 0x190, 0x192, 0x194, 0x196, 0x199, 0x19b, 0x19d, 0x19f, 0x1a1, 0x1a7, 0x1aa, 0x1ac, 0x1b3, 0x1b9, 0x1bf, 0x1c7, 0x1cd, 0x1d3, 0x1d9, 0x1dd, 0x1eb, 0x1f4, 0x1f7, 0x1fa, 0x1fc, 0x1ff, 0x201, 0x205, 0x20a, 0x20c, 0x20e, 0x213, 0x219, 0x21b, 0x21d, 0x21f, 0x225, 0x228, 0x22a, 0x230, 0x233, 0x23b, 0x242, 0x245, 0x248, 0x24a, 0x24d, 0x255, 0x259, 0x260, 0x263, 0x269, 0x26b, 0x26e, 0x270, 0x273, 0x275, 0x277, 0x279, 0x27c, 0x27e, 0x280, 0x282, 0x284, 0x291, 0x29b, 0x29d, 0x29f, 0x2a5, 0x2a7, 0x2aa} + +// nfcSparseValues: 684 entries, 2736 bytes +var nfcSparseValues = [684]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0000, lo: 0x04}, + {value: 0xa100, lo: 0xa8, hi: 0xa8}, + {value: 0x8100, lo: 0xaf, hi: 0xaf}, + {value: 0x8100, lo: 0xb4, hi: 0xb4}, + {value: 0x8100, lo: 0xb8, hi: 0xb8}, + // Block 0x1, offset 0x5 + {value: 0x0091, lo: 0x03}, + {value: 0x46e2, lo: 0xa0, hi: 0xa1}, + {value: 0x4714, lo: 0xaf, hi: 0xb0}, + {value: 0xa000, lo: 0xb7, hi: 0xb7}, + // Block 0x2, offset 0x9 + {value: 0x0000, lo: 0x01}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + // Block 0x3, offset 0xb + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x98, hi: 0x9d}, + // Block 0x4, offset 0xd + {value: 0x0006, lo: 0x0a}, + {value: 0xa000, lo: 0x81, hi: 0x81}, + {value: 0xa000, lo: 0x85, hi: 0x85}, + {value: 0xa000, lo: 0x89, hi: 0x89}, + {value: 0x4840, lo: 0x8a, hi: 0x8a}, + {value: 0x485e, lo: 0x8b, hi: 0x8b}, + {value: 0x36c7, lo: 0x8c, hi: 0x8c}, + {value: 0x36df, lo: 0x8d, hi: 0x8d}, + {value: 0x4876, lo: 0x8e, hi: 0x8e}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x36fd, lo: 0x93, hi: 0x94}, + // Block 0x5, offset 0x18 + {value: 0x0000, lo: 0x0f}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0xa000, lo: 0x8d, hi: 0x8d}, + {value: 0x37a5, lo: 0x90, hi: 0x90}, + {value: 0x37b1, lo: 0x91, hi: 0x91}, + {value: 0x379f, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x96, hi: 0x96}, + {value: 0x3817, lo: 0x97, hi: 0x97}, + {value: 0x37e1, lo: 0x9c, hi: 0x9c}, + {value: 0x37c9, lo: 0x9d, hi: 0x9d}, + {value: 0x37f3, lo: 0x9e, hi: 0x9e}, + {value: 0xa000, lo: 0xb4, hi: 0xb5}, + {value: 0x381d, lo: 0xb6, hi: 0xb6}, + {value: 0x3823, lo: 0xb7, hi: 0xb7}, + // Block 0x6, offset 0x28 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x83, hi: 0x87}, + // Block 0x7, offset 0x2a + {value: 0x0001, lo: 0x04}, + {value: 0x8113, lo: 0x81, hi: 0x82}, + {value: 0x8132, lo: 0x84, hi: 0x84}, + {value: 0x812d, lo: 0x85, hi: 0x85}, + {value: 0x810d, lo: 0x87, hi: 0x87}, + // Block 0x8, offset 0x2f + {value: 0x0000, lo: 0x0a}, + {value: 0x8132, lo: 0x90, hi: 0x97}, + {value: 0x8119, lo: 0x98, hi: 0x98}, + {value: 0x811a, lo: 0x99, hi: 0x99}, + {value: 0x811b, lo: 0x9a, hi: 0x9a}, + {value: 0x3841, lo: 0xa2, hi: 0xa2}, + {value: 0x3847, lo: 0xa3, hi: 0xa3}, + {value: 0x3853, lo: 0xa4, hi: 0xa4}, + {value: 0x384d, lo: 0xa5, hi: 0xa5}, + {value: 0x3859, lo: 0xa6, hi: 0xa6}, + {value: 0xa000, lo: 0xa7, hi: 0xa7}, + // Block 0x9, offset 0x3a + {value: 0x0000, lo: 0x0e}, + {value: 0x386b, lo: 0x80, hi: 0x80}, + {value: 0xa000, lo: 0x81, hi: 0x81}, + {value: 0x385f, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x3865, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x95, hi: 0x95}, + {value: 0x8132, lo: 0x96, hi: 0x9c}, + {value: 0x8132, lo: 0x9f, hi: 0xa2}, + {value: 0x812d, lo: 0xa3, hi: 0xa3}, + {value: 0x8132, lo: 0xa4, hi: 0xa4}, + {value: 0x8132, lo: 0xa7, hi: 0xa8}, + {value: 0x812d, lo: 0xaa, hi: 0xaa}, + {value: 0x8132, lo: 0xab, hi: 0xac}, + {value: 0x812d, lo: 0xad, hi: 0xad}, + // Block 0xa, offset 0x49 + {value: 0x0000, lo: 0x0c}, + {value: 0x811f, lo: 0x91, hi: 0x91}, + {value: 0x8132, lo: 0xb0, hi: 0xb0}, + {value: 0x812d, lo: 0xb1, hi: 0xb1}, + {value: 0x8132, lo: 0xb2, hi: 0xb3}, + {value: 0x812d, lo: 0xb4, hi: 0xb4}, + {value: 0x8132, lo: 0xb5, hi: 0xb6}, + {value: 0x812d, lo: 0xb7, hi: 0xb9}, + {value: 0x8132, lo: 0xba, hi: 0xba}, + {value: 0x812d, lo: 0xbb, hi: 0xbc}, + {value: 0x8132, lo: 0xbd, hi: 0xbd}, + {value: 0x812d, lo: 0xbe, hi: 0xbe}, + {value: 0x8132, lo: 0xbf, hi: 0xbf}, + // Block 0xb, offset 0x56 + {value: 0x0005, lo: 0x07}, + {value: 0x8132, lo: 0x80, hi: 0x80}, + {value: 0x8132, lo: 0x81, hi: 0x81}, + {value: 0x812d, lo: 0x82, hi: 0x83}, + {value: 0x812d, lo: 0x84, hi: 0x85}, + {value: 0x812d, lo: 0x86, hi: 0x87}, + {value: 0x812d, lo: 0x88, hi: 0x89}, + {value: 0x8132, lo: 0x8a, hi: 0x8a}, + // Block 0xc, offset 0x5e + {value: 0x0000, lo: 0x04}, + {value: 0x8132, lo: 0xab, hi: 0xb1}, + {value: 0x812d, lo: 0xb2, hi: 0xb2}, + {value: 0x8132, lo: 0xb3, hi: 0xb3}, + {value: 0x812d, lo: 0xbd, hi: 0xbd}, + // Block 0xd, offset 0x63 + {value: 0x0000, lo: 0x04}, + {value: 0x8132, lo: 0x96, hi: 0x99}, + {value: 0x8132, lo: 0x9b, hi: 0xa3}, + {value: 0x8132, lo: 0xa5, hi: 0xa7}, + {value: 0x8132, lo: 0xa9, hi: 0xad}, + // Block 0xe, offset 0x68 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x99, hi: 0x9b}, + // Block 0xf, offset 0x6a + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0xa8, hi: 0xa8}, + {value: 0x3ed8, lo: 0xa9, hi: 0xa9}, + {value: 0xa000, lo: 0xb0, hi: 0xb0}, + {value: 0x3ee0, lo: 0xb1, hi: 0xb1}, + {value: 0xa000, lo: 0xb3, hi: 0xb3}, + {value: 0x3ee8, lo: 0xb4, hi: 0xb4}, + {value: 0x9902, lo: 0xbc, hi: 0xbc}, + // Block 0x10, offset 0x72 + {value: 0x0008, lo: 0x06}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x8132, lo: 0x91, hi: 0x91}, + {value: 0x812d, lo: 0x92, hi: 0x92}, + {value: 0x8132, lo: 0x93, hi: 0x93}, + {value: 0x8132, lo: 0x94, hi: 0x94}, + {value: 0x451c, lo: 0x98, hi: 0x9f}, + // Block 0x11, offset 0x79 + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x12, offset 0x7c + {value: 0x0008, lo: 0x07}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2c9e, lo: 0x8b, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x455c, lo: 0x9c, hi: 0x9d}, + {value: 0x456c, lo: 0x9f, hi: 0x9f}, + {value: 0x8132, lo: 0xbe, hi: 0xbe}, + // Block 0x13, offset 0x84 + {value: 0x0000, lo: 0x03}, + {value: 0x4594, lo: 0xb3, hi: 0xb3}, + {value: 0x459c, lo: 0xb6, hi: 0xb6}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + // Block 0x14, offset 0x88 + {value: 0x0008, lo: 0x03}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x4574, lo: 0x99, hi: 0x9b}, + {value: 0x458c, lo: 0x9e, hi: 0x9e}, + // Block 0x15, offset 0x8c + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + // Block 0x16, offset 0x8e + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + // Block 0x17, offset 0x90 + {value: 0x0000, lo: 0x08}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2cb6, lo: 0x88, hi: 0x88}, + {value: 0x2cae, lo: 0x8b, hi: 0x8b}, + {value: 0x2cbe, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x96, hi: 0x97}, + {value: 0x45a4, lo: 0x9c, hi: 0x9c}, + {value: 0x45ac, lo: 0x9d, hi: 0x9d}, + // Block 0x18, offset 0x99 + {value: 0x0000, lo: 0x03}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x2cc6, lo: 0x94, hi: 0x94}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x19, offset 0x9d + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2cce, lo: 0x8a, hi: 0x8a}, + {value: 0x2cde, lo: 0x8b, hi: 0x8b}, + {value: 0x2cd6, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x1a, offset 0xa4 + {value: 0x1801, lo: 0x04}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x3ef0, lo: 0x88, hi: 0x88}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x8120, lo: 0x95, hi: 0x96}, + // Block 0x1b, offset 0xa9 + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + {value: 0xa000, lo: 0xbf, hi: 0xbf}, + // Block 0x1c, offset 0xac + {value: 0x0000, lo: 0x09}, + {value: 0x2ce6, lo: 0x80, hi: 0x80}, + {value: 0x9900, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x2cee, lo: 0x87, hi: 0x87}, + {value: 0x2cf6, lo: 0x88, hi: 0x88}, + {value: 0x2f50, lo: 0x8a, hi: 0x8a}, + {value: 0x2dd8, lo: 0x8b, hi: 0x8b}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x95, hi: 0x96}, + // Block 0x1d, offset 0xb6 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xbb, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x1e, offset 0xb9 + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2cfe, lo: 0x8a, hi: 0x8a}, + {value: 0x2d0e, lo: 0x8b, hi: 0x8b}, + {value: 0x2d06, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x1f, offset 0xc0 + {value: 0x6bea, lo: 0x07}, + {value: 0x9904, lo: 0x8a, hi: 0x8a}, + {value: 0x9900, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x3ef8, lo: 0x9a, hi: 0x9a}, + {value: 0x2f58, lo: 0x9c, hi: 0x9c}, + {value: 0x2de3, lo: 0x9d, hi: 0x9d}, + {value: 0x2d16, lo: 0x9e, hi: 0x9f}, + // Block 0x20, offset 0xc8 + {value: 0x0000, lo: 0x02}, + {value: 0x8122, lo: 0xb8, hi: 0xb9}, + {value: 0x8104, lo: 0xba, hi: 0xba}, + // Block 0x21, offset 0xcb + {value: 0x0000, lo: 0x01}, + {value: 0x8123, lo: 0x88, hi: 0x8b}, + // Block 0x22, offset 0xcd + {value: 0x0000, lo: 0x01}, + {value: 0x8124, lo: 0xb8, hi: 0xb9}, + // Block 0x23, offset 0xcf + {value: 0x0000, lo: 0x01}, + {value: 0x8125, lo: 0x88, hi: 0x8b}, + // Block 0x24, offset 0xd1 + {value: 0x0000, lo: 0x04}, + {value: 0x812d, lo: 0x98, hi: 0x99}, + {value: 0x812d, lo: 0xb5, hi: 0xb5}, + {value: 0x812d, lo: 0xb7, hi: 0xb7}, + {value: 0x812b, lo: 0xb9, hi: 0xb9}, + // Block 0x25, offset 0xd6 + {value: 0x0000, lo: 0x10}, + {value: 0x2644, lo: 0x83, hi: 0x83}, + {value: 0x264b, lo: 0x8d, hi: 0x8d}, + {value: 0x2652, lo: 0x92, hi: 0x92}, + {value: 0x2659, lo: 0x97, hi: 0x97}, + {value: 0x2660, lo: 0x9c, hi: 0x9c}, + {value: 0x263d, lo: 0xa9, hi: 0xa9}, + {value: 0x8126, lo: 0xb1, hi: 0xb1}, + {value: 0x8127, lo: 0xb2, hi: 0xb2}, + {value: 0x4a84, lo: 0xb3, hi: 0xb3}, + {value: 0x8128, lo: 0xb4, hi: 0xb4}, + {value: 0x4a8d, lo: 0xb5, hi: 0xb5}, + {value: 0x45b4, lo: 0xb6, hi: 0xb6}, + {value: 0x8200, lo: 0xb7, hi: 0xb7}, + {value: 0x45bc, lo: 0xb8, hi: 0xb8}, + {value: 0x8200, lo: 0xb9, hi: 0xb9}, + {value: 0x8127, lo: 0xba, hi: 0xbd}, + // Block 0x26, offset 0xe7 + {value: 0x0000, lo: 0x0b}, + {value: 0x8127, lo: 0x80, hi: 0x80}, + {value: 0x4a96, lo: 0x81, hi: 0x81}, + {value: 0x8132, lo: 0x82, hi: 0x83}, + {value: 0x8104, lo: 0x84, hi: 0x84}, + {value: 0x8132, lo: 0x86, hi: 0x87}, + {value: 0x266e, lo: 0x93, hi: 0x93}, + {value: 0x2675, lo: 0x9d, hi: 0x9d}, + {value: 0x267c, lo: 0xa2, hi: 0xa2}, + {value: 0x2683, lo: 0xa7, hi: 0xa7}, + {value: 0x268a, lo: 0xac, hi: 0xac}, + {value: 0x2667, lo: 0xb9, hi: 0xb9}, + // Block 0x27, offset 0xf3 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x86, hi: 0x86}, + // Block 0x28, offset 0xf5 + {value: 0x0000, lo: 0x05}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x2d1e, lo: 0xa6, hi: 0xa6}, + {value: 0x9900, lo: 0xae, hi: 0xae}, + {value: 0x8102, lo: 0xb7, hi: 0xb7}, + {value: 0x8104, lo: 0xb9, hi: 0xba}, + // Block 0x29, offset 0xfb + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x8d, hi: 0x8d}, + // Block 0x2a, offset 0xfd + {value: 0x0000, lo: 0x01}, + {value: 0xa000, lo: 0x80, hi: 0x92}, + // Block 0x2b, offset 0xff + {value: 0x0000, lo: 0x01}, + {value: 0xb900, lo: 0xa1, hi: 0xb5}, + // Block 0x2c, offset 0x101 + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0xa8, hi: 0xbf}, + // Block 0x2d, offset 0x103 + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0x80, hi: 0x82}, + // Block 0x2e, offset 0x105 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x9d, hi: 0x9f}, + // Block 0x2f, offset 0x107 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x94, hi: 0x94}, + {value: 0x8104, lo: 0xb4, hi: 0xb4}, + // Block 0x30, offset 0x10a + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x92, hi: 0x92}, + {value: 0x8132, lo: 0x9d, hi: 0x9d}, + // Block 0x31, offset 0x10d + {value: 0x0000, lo: 0x01}, + {value: 0x8131, lo: 0xa9, hi: 0xa9}, + // Block 0x32, offset 0x10f + {value: 0x0004, lo: 0x02}, + {value: 0x812e, lo: 0xb9, hi: 0xba}, + {value: 0x812d, lo: 0xbb, hi: 0xbb}, + // Block 0x33, offset 0x112 + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0x97, hi: 0x97}, + {value: 0x812d, lo: 0x98, hi: 0x98}, + // Block 0x34, offset 0x115 + {value: 0x0000, lo: 0x03}, + {value: 0x8104, lo: 0xa0, hi: 0xa0}, + {value: 0x8132, lo: 0xb5, hi: 0xbc}, + {value: 0x812d, lo: 0xbf, hi: 0xbf}, + // Block 0x35, offset 0x119 + {value: 0x0000, lo: 0x04}, + {value: 0x8132, lo: 0xb0, hi: 0xb4}, + {value: 0x812d, lo: 0xb5, hi: 0xba}, + {value: 0x8132, lo: 0xbb, hi: 0xbc}, + {value: 0x812d, lo: 0xbd, hi: 0xbd}, + // Block 0x36, offset 0x11e + {value: 0x0000, lo: 0x08}, + {value: 0x2d66, lo: 0x80, hi: 0x80}, + {value: 0x2d6e, lo: 0x81, hi: 0x81}, + {value: 0xa000, lo: 0x82, hi: 0x82}, + {value: 0x2d76, lo: 0x83, hi: 0x83}, + {value: 0x8104, lo: 0x84, hi: 0x84}, + {value: 0x8132, lo: 0xab, hi: 0xab}, + {value: 0x812d, lo: 0xac, hi: 0xac}, + {value: 0x8132, lo: 0xad, hi: 0xb3}, + // Block 0x37, offset 0x127 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xaa, hi: 0xab}, + // Block 0x38, offset 0x129 + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xa6, hi: 0xa6}, + {value: 0x8104, lo: 0xb2, hi: 0xb3}, + // Block 0x39, offset 0x12c + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xb7, hi: 0xb7}, + // Block 0x3a, offset 0x12e + {value: 0x0000, lo: 0x0a}, + {value: 0x8132, lo: 0x90, hi: 0x92}, + {value: 0x8101, lo: 0x94, hi: 0x94}, + {value: 0x812d, lo: 0x95, hi: 0x99}, + {value: 0x8132, lo: 0x9a, hi: 0x9b}, + {value: 0x812d, lo: 0x9c, hi: 0x9f}, + {value: 0x8132, lo: 0xa0, hi: 0xa0}, + {value: 0x8101, lo: 0xa2, hi: 0xa8}, + {value: 0x812d, lo: 0xad, hi: 0xad}, + {value: 0x8132, lo: 0xb4, hi: 0xb4}, + {value: 0x8132, lo: 0xb8, hi: 0xb9}, + // Block 0x3b, offset 0x139 + {value: 0x0004, lo: 0x03}, + {value: 0x0433, lo: 0x80, hi: 0x81}, + {value: 0x8100, lo: 0x97, hi: 0x97}, + {value: 0x8100, lo: 0xbe, hi: 0xbe}, + // Block 0x3c, offset 0x13d + {value: 0x0000, lo: 0x0d}, + {value: 0x8132, lo: 0x90, hi: 0x91}, + {value: 0x8101, lo: 0x92, hi: 0x93}, + {value: 0x8132, lo: 0x94, hi: 0x97}, + {value: 0x8101, lo: 0x98, hi: 0x9a}, + {value: 0x8132, lo: 0x9b, hi: 0x9c}, + {value: 0x8132, lo: 0xa1, hi: 0xa1}, + {value: 0x8101, lo: 0xa5, hi: 0xa6}, + {value: 0x8132, lo: 0xa7, hi: 0xa7}, + {value: 0x812d, lo: 0xa8, hi: 0xa8}, + {value: 0x8132, lo: 0xa9, hi: 0xa9}, + {value: 0x8101, lo: 0xaa, hi: 0xab}, + {value: 0x812d, lo: 0xac, hi: 0xaf}, + {value: 0x8132, lo: 0xb0, hi: 0xb0}, + // Block 0x3d, offset 0x14b + {value: 0x427b, lo: 0x02}, + {value: 0x01b8, lo: 0xa6, hi: 0xa6}, + {value: 0x0057, lo: 0xaa, hi: 0xab}, + // Block 0x3e, offset 0x14e + {value: 0x0007, lo: 0x05}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + {value: 0x3bb9, lo: 0x9a, hi: 0x9b}, + {value: 0x3bc7, lo: 0xae, hi: 0xae}, + // Block 0x3f, offset 0x154 + {value: 0x000e, lo: 0x05}, + {value: 0x3bce, lo: 0x8d, hi: 0x8e}, + {value: 0x3bd5, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + // Block 0x40, offset 0x15a + {value: 0x6408, lo: 0x0a}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0x3be3, lo: 0x84, hi: 0x84}, + {value: 0xa000, lo: 0x88, hi: 0x88}, + {value: 0x3bea, lo: 0x89, hi: 0x89}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0x3bf1, lo: 0x8c, hi: 0x8c}, + {value: 0xa000, lo: 0xa3, hi: 0xa3}, + {value: 0x3bf8, lo: 0xa4, hi: 0xa5}, + {value: 0x3bff, lo: 0xa6, hi: 0xa6}, + {value: 0xa000, lo: 0xbc, hi: 0xbc}, + // Block 0x41, offset 0x165 + {value: 0x0007, lo: 0x03}, + {value: 0x3c68, lo: 0xa0, hi: 0xa1}, + {value: 0x3c92, lo: 0xa2, hi: 0xa3}, + {value: 0x3cbc, lo: 0xaa, hi: 0xad}, + // Block 0x42, offset 0x169 + {value: 0x0004, lo: 0x01}, + {value: 0x048b, lo: 0xa9, hi: 0xaa}, + // Block 0x43, offset 0x16b + {value: 0x0000, lo: 0x01}, + {value: 0x44dd, lo: 0x9c, hi: 0x9c}, + // Block 0x44, offset 0x16d + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xaf, hi: 0xb1}, + // Block 0x45, offset 0x16f + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x46, offset 0x171 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xa0, hi: 0xbf}, + // Block 0x47, offset 0x173 + {value: 0x0000, lo: 0x05}, + {value: 0x812c, lo: 0xaa, hi: 0xaa}, + {value: 0x8131, lo: 0xab, hi: 0xab}, + {value: 0x8133, lo: 0xac, hi: 0xac}, + {value: 0x812e, lo: 0xad, hi: 0xad}, + {value: 0x812f, lo: 0xae, hi: 0xaf}, + // Block 0x48, offset 0x179 + {value: 0x0000, lo: 0x03}, + {value: 0x4a9f, lo: 0xb3, hi: 0xb3}, + {value: 0x4a9f, lo: 0xb5, hi: 0xb6}, + {value: 0x4a9f, lo: 0xba, hi: 0xbf}, + // Block 0x49, offset 0x17d + {value: 0x0000, lo: 0x01}, + {value: 0x4a9f, lo: 0x8f, hi: 0xa3}, + // Block 0x4a, offset 0x17f + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0xae, hi: 0xbe}, + // Block 0x4b, offset 0x181 + {value: 0x0000, lo: 0x07}, + {value: 0x8100, lo: 0x84, hi: 0x84}, + {value: 0x8100, lo: 0x87, hi: 0x87}, + {value: 0x8100, lo: 0x90, hi: 0x90}, + {value: 0x8100, lo: 0x9e, hi: 0x9e}, + {value: 0x8100, lo: 0xa1, hi: 0xa1}, + {value: 0x8100, lo: 0xb2, hi: 0xb2}, + {value: 0x8100, lo: 0xbb, hi: 0xbb}, + // Block 0x4c, offset 0x189 + {value: 0x0000, lo: 0x03}, + {value: 0x8100, lo: 0x80, hi: 0x80}, + {value: 0x8100, lo: 0x8b, hi: 0x8b}, + {value: 0x8100, lo: 0x8e, hi: 0x8e}, + // Block 0x4d, offset 0x18d + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0xaf, hi: 0xaf}, + {value: 0x8132, lo: 0xb4, hi: 0xbd}, + // Block 0x4e, offset 0x190 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x9e, hi: 0x9f}, + // Block 0x4f, offset 0x192 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xb0, hi: 0xb1}, + // Block 0x50, offset 0x194 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x86, hi: 0x86}, + // Block 0x51, offset 0x196 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x84, hi: 0x84}, + {value: 0x8132, lo: 0xa0, hi: 0xb1}, + // Block 0x52, offset 0x199 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0xab, hi: 0xad}, + // Block 0x53, offset 0x19b + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x93, hi: 0x93}, + // Block 0x54, offset 0x19d + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xb3, hi: 0xb3}, + // Block 0x55, offset 0x19f + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x80, hi: 0x80}, + // Block 0x56, offset 0x1a1 + {value: 0x0000, lo: 0x05}, + {value: 0x8132, lo: 0xb0, hi: 0xb0}, + {value: 0x8132, lo: 0xb2, hi: 0xb3}, + {value: 0x812d, lo: 0xb4, hi: 0xb4}, + {value: 0x8132, lo: 0xb7, hi: 0xb8}, + {value: 0x8132, lo: 0xbe, hi: 0xbf}, + // Block 0x57, offset 0x1a7 + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0x81, hi: 0x81}, + {value: 0x8104, lo: 0xb6, hi: 0xb6}, + // Block 0x58, offset 0x1aa + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xad, hi: 0xad}, + // Block 0x59, offset 0x1ac + {value: 0x0000, lo: 0x06}, + {value: 0xe500, lo: 0x80, hi: 0x80}, + {value: 0xc600, lo: 0x81, hi: 0x9b}, + {value: 0xe500, lo: 0x9c, hi: 0x9c}, + {value: 0xc600, lo: 0x9d, hi: 0xb7}, + {value: 0xe500, lo: 0xb8, hi: 0xb8}, + {value: 0xc600, lo: 0xb9, hi: 0xbf}, + // Block 0x5a, offset 0x1b3 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x93}, + {value: 0xe500, lo: 0x94, hi: 0x94}, + {value: 0xc600, lo: 0x95, hi: 0xaf}, + {value: 0xe500, lo: 0xb0, hi: 0xb0}, + {value: 0xc600, lo: 0xb1, hi: 0xbf}, + // Block 0x5b, offset 0x1b9 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8b}, + {value: 0xe500, lo: 0x8c, hi: 0x8c}, + {value: 0xc600, lo: 0x8d, hi: 0xa7}, + {value: 0xe500, lo: 0xa8, hi: 0xa8}, + {value: 0xc600, lo: 0xa9, hi: 0xbf}, + // Block 0x5c, offset 0x1bf + {value: 0x0000, lo: 0x07}, + {value: 0xc600, lo: 0x80, hi: 0x83}, + {value: 0xe500, lo: 0x84, hi: 0x84}, + {value: 0xc600, lo: 0x85, hi: 0x9f}, + {value: 0xe500, lo: 0xa0, hi: 0xa0}, + {value: 0xc600, lo: 0xa1, hi: 0xbb}, + {value: 0xe500, lo: 0xbc, hi: 0xbc}, + {value: 0xc600, lo: 0xbd, hi: 0xbf}, + // Block 0x5d, offset 0x1c7 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x97}, + {value: 0xe500, lo: 0x98, hi: 0x98}, + {value: 0xc600, lo: 0x99, hi: 0xb3}, + {value: 0xe500, lo: 0xb4, hi: 0xb4}, + {value: 0xc600, lo: 0xb5, hi: 0xbf}, + // Block 0x5e, offset 0x1cd + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8f}, + {value: 0xe500, lo: 0x90, hi: 0x90}, + {value: 0xc600, lo: 0x91, hi: 0xab}, + {value: 0xe500, lo: 0xac, hi: 0xac}, + {value: 0xc600, lo: 0xad, hi: 0xbf}, + // Block 0x5f, offset 0x1d3 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + {value: 0xe500, lo: 0xa4, hi: 0xa4}, + {value: 0xc600, lo: 0xa5, hi: 0xbf}, + // Block 0x60, offset 0x1d9 + {value: 0x0000, lo: 0x03}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + // Block 0x61, offset 0x1dd + {value: 0x0006, lo: 0x0d}, + {value: 0x4390, lo: 0x9d, hi: 0x9d}, + {value: 0x8115, lo: 0x9e, hi: 0x9e}, + {value: 0x4402, lo: 0x9f, hi: 0x9f}, + {value: 0x43f0, lo: 0xaa, hi: 0xab}, + {value: 0x44f4, lo: 0xac, hi: 0xac}, + {value: 0x44fc, lo: 0xad, hi: 0xad}, + {value: 0x4348, lo: 0xae, hi: 0xb1}, + {value: 0x4366, lo: 0xb2, hi: 0xb4}, + {value: 0x437e, lo: 0xb5, hi: 0xb6}, + {value: 0x438a, lo: 0xb8, hi: 0xb8}, + {value: 0x4396, lo: 0xb9, hi: 0xbb}, + {value: 0x43ae, lo: 0xbc, hi: 0xbc}, + {value: 0x43b4, lo: 0xbe, hi: 0xbe}, + // Block 0x62, offset 0x1eb + {value: 0x0006, lo: 0x08}, + {value: 0x43ba, lo: 0x80, hi: 0x81}, + {value: 0x43c6, lo: 0x83, hi: 0x84}, + {value: 0x43d8, lo: 0x86, hi: 0x89}, + {value: 0x43fc, lo: 0x8a, hi: 0x8a}, + {value: 0x4378, lo: 0x8b, hi: 0x8b}, + {value: 0x4360, lo: 0x8c, hi: 0x8c}, + {value: 0x43a8, lo: 0x8d, hi: 0x8d}, + {value: 0x43d2, lo: 0x8e, hi: 0x8e}, + // Block 0x63, offset 0x1f4 + {value: 0x0000, lo: 0x02}, + {value: 0x8100, lo: 0xa4, hi: 0xa5}, + {value: 0x8100, lo: 0xb0, hi: 0xb1}, + // Block 0x64, offset 0x1f7 + {value: 0x0000, lo: 0x02}, + {value: 0x8100, lo: 0x9b, hi: 0x9d}, + {value: 0x8200, lo: 0x9e, hi: 0xa3}, + // Block 0x65, offset 0x1fa + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x90, hi: 0x90}, + // Block 0x66, offset 0x1fc + {value: 0x0000, lo: 0x02}, + {value: 0x8100, lo: 0x99, hi: 0x99}, + {value: 0x8200, lo: 0xb2, hi: 0xb4}, + // Block 0x67, offset 0x1ff + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0xbc, hi: 0xbd}, + // Block 0x68, offset 0x201 + {value: 0x0000, lo: 0x03}, + {value: 0x8132, lo: 0xa0, hi: 0xa6}, + {value: 0x812d, lo: 0xa7, hi: 0xad}, + {value: 0x8132, lo: 0xae, hi: 0xaf}, + // Block 0x69, offset 0x205 + {value: 0x0000, lo: 0x04}, + {value: 0x8100, lo: 0x89, hi: 0x8c}, + {value: 0x8100, lo: 0xb0, hi: 0xb2}, + {value: 0x8100, lo: 0xb4, hi: 0xb4}, + {value: 0x8100, lo: 0xb6, hi: 0xbf}, + // Block 0x6a, offset 0x20a + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x81, hi: 0x8c}, + // Block 0x6b, offset 0x20c + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0xb5, hi: 0xba}, + // Block 0x6c, offset 0x20e + {value: 0x0000, lo: 0x04}, + {value: 0x4a9f, lo: 0x9e, hi: 0x9f}, + {value: 0x4a9f, lo: 0xa3, hi: 0xa3}, + {value: 0x4a9f, lo: 0xa5, hi: 0xa6}, + {value: 0x4a9f, lo: 0xaa, hi: 0xaf}, + // Block 0x6d, offset 0x213 + {value: 0x0000, lo: 0x05}, + {value: 0x4a9f, lo: 0x82, hi: 0x87}, + {value: 0x4a9f, lo: 0x8a, hi: 0x8f}, + {value: 0x4a9f, lo: 0x92, hi: 0x97}, + {value: 0x4a9f, lo: 0x9a, hi: 0x9c}, + {value: 0x8100, lo: 0xa3, hi: 0xa3}, + // Block 0x6e, offset 0x219 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0xbd, hi: 0xbd}, + // Block 0x6f, offset 0x21b + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0xa0, hi: 0xa0}, + // Block 0x70, offset 0x21d + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xb6, hi: 0xba}, + // Block 0x71, offset 0x21f + {value: 0x002c, lo: 0x05}, + {value: 0x812d, lo: 0x8d, hi: 0x8d}, + {value: 0x8132, lo: 0x8f, hi: 0x8f}, + {value: 0x8132, lo: 0xb8, hi: 0xb8}, + {value: 0x8101, lo: 0xb9, hi: 0xba}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x72, offset 0x225 + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0xa5, hi: 0xa5}, + {value: 0x812d, lo: 0xa6, hi: 0xa6}, + // Block 0x73, offset 0x228 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xa4, hi: 0xa7}, + // Block 0x74, offset 0x22a + {value: 0x0000, lo: 0x05}, + {value: 0x812d, lo: 0x86, hi: 0x87}, + {value: 0x8132, lo: 0x88, hi: 0x8a}, + {value: 0x812d, lo: 0x8b, hi: 0x8b}, + {value: 0x8132, lo: 0x8c, hi: 0x8c}, + {value: 0x812d, lo: 0x8d, hi: 0x90}, + // Block 0x75, offset 0x230 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x86, hi: 0x86}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x76, offset 0x233 + {value: 0x17fe, lo: 0x07}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x4238, lo: 0x9a, hi: 0x9a}, + {value: 0xa000, lo: 0x9b, hi: 0x9b}, + {value: 0x4242, lo: 0x9c, hi: 0x9c}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x424c, lo: 0xab, hi: 0xab}, + {value: 0x8104, lo: 0xb9, hi: 0xba}, + // Block 0x77, offset 0x23b + {value: 0x0000, lo: 0x06}, + {value: 0x8132, lo: 0x80, hi: 0x82}, + {value: 0x9900, lo: 0xa7, hi: 0xa7}, + {value: 0x2d7e, lo: 0xae, hi: 0xae}, + {value: 0x2d88, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb1, hi: 0xb2}, + {value: 0x8104, lo: 0xb3, hi: 0xb4}, + // Block 0x78, offset 0x242 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x80, hi: 0x80}, + {value: 0x8102, lo: 0x8a, hi: 0x8a}, + // Block 0x79, offset 0x245 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xb5, hi: 0xb5}, + {value: 0x8102, lo: 0xb6, hi: 0xb6}, + // Block 0x7a, offset 0x248 + {value: 0x0002, lo: 0x01}, + {value: 0x8102, lo: 0xa9, hi: 0xaa}, + // Block 0x7b, offset 0x24a + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xbb, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x7c, offset 0x24d + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2d92, lo: 0x8b, hi: 0x8b}, + {value: 0x2d9c, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x8132, lo: 0xa6, hi: 0xac}, + {value: 0x8132, lo: 0xb0, hi: 0xb4}, + // Block 0x7d, offset 0x255 + {value: 0x0000, lo: 0x03}, + {value: 0x8104, lo: 0x82, hi: 0x82}, + {value: 0x8102, lo: 0x86, hi: 0x86}, + {value: 0x8132, lo: 0x9e, hi: 0x9e}, + // Block 0x7e, offset 0x259 + {value: 0x6b5a, lo: 0x06}, + {value: 0x9900, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xb9, hi: 0xb9}, + {value: 0x9900, lo: 0xba, hi: 0xba}, + {value: 0x2db0, lo: 0xbb, hi: 0xbb}, + {value: 0x2da6, lo: 0xbc, hi: 0xbd}, + {value: 0x2dba, lo: 0xbe, hi: 0xbe}, + // Block 0x7f, offset 0x260 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x82, hi: 0x82}, + {value: 0x8102, lo: 0x83, hi: 0x83}, + // Block 0x80, offset 0x263 + {value: 0x0000, lo: 0x05}, + {value: 0x9900, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb8, hi: 0xb9}, + {value: 0x2dc4, lo: 0xba, hi: 0xba}, + {value: 0x2dce, lo: 0xbb, hi: 0xbb}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x81, offset 0x269 + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0x80, hi: 0x80}, + // Block 0x82, offset 0x26b + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xb6, hi: 0xb6}, + {value: 0x8102, lo: 0xb7, hi: 0xb7}, + // Block 0x83, offset 0x26e + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xab, hi: 0xab}, + // Block 0x84, offset 0x270 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xb9, hi: 0xb9}, + {value: 0x8102, lo: 0xba, hi: 0xba}, + // Block 0x85, offset 0x273 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xb4, hi: 0xb4}, + // Block 0x86, offset 0x275 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x87, hi: 0x87}, + // Block 0x87, offset 0x277 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x99, hi: 0x99}, + // Block 0x88, offset 0x279 + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0x82, hi: 0x82}, + {value: 0x8104, lo: 0x84, hi: 0x85}, + // Block 0x89, offset 0x27c + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x97, hi: 0x97}, + // Block 0x8a, offset 0x27e + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0xb0, hi: 0xb4}, + // Block 0x8b, offset 0x280 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xb0, hi: 0xb6}, + // Block 0x8c, offset 0x282 + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0x9e, hi: 0x9e}, + // Block 0x8d, offset 0x284 + {value: 0x0000, lo: 0x0c}, + {value: 0x45cc, lo: 0x9e, hi: 0x9e}, + {value: 0x45d6, lo: 0x9f, hi: 0x9f}, + {value: 0x460a, lo: 0xa0, hi: 0xa0}, + {value: 0x4618, lo: 0xa1, hi: 0xa1}, + {value: 0x4626, lo: 0xa2, hi: 0xa2}, + {value: 0x4634, lo: 0xa3, hi: 0xa3}, + {value: 0x4642, lo: 0xa4, hi: 0xa4}, + {value: 0x812b, lo: 0xa5, hi: 0xa6}, + {value: 0x8101, lo: 0xa7, hi: 0xa9}, + {value: 0x8130, lo: 0xad, hi: 0xad}, + {value: 0x812b, lo: 0xae, hi: 0xb2}, + {value: 0x812d, lo: 0xbb, hi: 0xbf}, + // Block 0x8e, offset 0x291 + {value: 0x0000, lo: 0x09}, + {value: 0x812d, lo: 0x80, hi: 0x82}, + {value: 0x8132, lo: 0x85, hi: 0x89}, + {value: 0x812d, lo: 0x8a, hi: 0x8b}, + {value: 0x8132, lo: 0xaa, hi: 0xad}, + {value: 0x45e0, lo: 0xbb, hi: 0xbb}, + {value: 0x45ea, lo: 0xbc, hi: 0xbc}, + {value: 0x4650, lo: 0xbd, hi: 0xbd}, + {value: 0x466c, lo: 0xbe, hi: 0xbe}, + {value: 0x465e, lo: 0xbf, hi: 0xbf}, + // Block 0x8f, offset 0x29b + {value: 0x0000, lo: 0x01}, + {value: 0x467a, lo: 0x80, hi: 0x80}, + // Block 0x90, offset 0x29d + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x82, hi: 0x84}, + // Block 0x91, offset 0x29f + {value: 0x0000, lo: 0x05}, + {value: 0x8132, lo: 0x80, hi: 0x86}, + {value: 0x8132, lo: 0x88, hi: 0x98}, + {value: 0x8132, lo: 0x9b, hi: 0xa1}, + {value: 0x8132, lo: 0xa3, hi: 0xa4}, + {value: 0x8132, lo: 0xa6, hi: 0xaa}, + // Block 0x92, offset 0x2a5 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x90, hi: 0x96}, + // Block 0x93, offset 0x2a7 + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0x84, hi: 0x89}, + {value: 0x8102, lo: 0x8a, hi: 0x8a}, + // Block 0x94, offset 0x2aa + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x93, hi: 0x93}, +} + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfkcTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfkcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfkcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfkcTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfkcValues[c0] + } + i := nfkcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfkcTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfkcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfkcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfkcTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfkcValues[c0] + } + i := nfkcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// nfkcTrie. Total size: 17248 bytes (16.84 KiB). Checksum: 4fb368372b6b1b27. +type nfkcTrie struct{} + +func newNfkcTrie(i int) *nfkcTrie { + return &nfkcTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *nfkcTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 92: + return uint16(nfkcValues[n<<6+uint32(b)]) + default: + n -= 92 + return uint16(nfkcSparse.lookup(n, b)) + } +} + +// nfkcValues: 94 blocks, 6016 entries, 12032 bytes +// The third block is the zero block. +var nfkcValues = [6016]uint16{ + // Block 0x0, offset 0x0 + 0x3c: 0xa000, 0x3d: 0xa000, 0x3e: 0xa000, + // Block 0x1, offset 0x40 + 0x41: 0xa000, 0x42: 0xa000, 0x43: 0xa000, 0x44: 0xa000, 0x45: 0xa000, + 0x46: 0xa000, 0x47: 0xa000, 0x48: 0xa000, 0x49: 0xa000, 0x4a: 0xa000, 0x4b: 0xa000, + 0x4c: 0xa000, 0x4d: 0xa000, 0x4e: 0xa000, 0x4f: 0xa000, 0x50: 0xa000, + 0x52: 0xa000, 0x53: 0xa000, 0x54: 0xa000, 0x55: 0xa000, 0x56: 0xa000, 0x57: 0xa000, + 0x58: 0xa000, 0x59: 0xa000, 0x5a: 0xa000, + 0x61: 0xa000, 0x62: 0xa000, 0x63: 0xa000, + 0x64: 0xa000, 0x65: 0xa000, 0x66: 0xa000, 0x67: 0xa000, 0x68: 0xa000, 0x69: 0xa000, + 0x6a: 0xa000, 0x6b: 0xa000, 0x6c: 0xa000, 0x6d: 0xa000, 0x6e: 0xa000, 0x6f: 0xa000, + 0x70: 0xa000, 0x72: 0xa000, 0x73: 0xa000, 0x74: 0xa000, 0x75: 0xa000, + 0x76: 0xa000, 0x77: 0xa000, 0x78: 0xa000, 0x79: 0xa000, 0x7a: 0xa000, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x2f6f, 0xc1: 0x2f74, 0xc2: 0x4688, 0xc3: 0x2f79, 0xc4: 0x4697, 0xc5: 0x469c, + 0xc6: 0xa000, 0xc7: 0x46a6, 0xc8: 0x2fe2, 0xc9: 0x2fe7, 0xca: 0x46ab, 0xcb: 0x2ffb, + 0xcc: 0x306e, 0xcd: 0x3073, 0xce: 0x3078, 0xcf: 0x46bf, 0xd1: 0x3104, + 0xd2: 0x3127, 0xd3: 0x312c, 0xd4: 0x46c9, 0xd5: 0x46ce, 0xd6: 0x46dd, + 0xd8: 0xa000, 0xd9: 0x31b3, 0xda: 0x31b8, 0xdb: 0x31bd, 0xdc: 0x470f, 0xdd: 0x3235, + 0xe0: 0x327b, 0xe1: 0x3280, 0xe2: 0x4719, 0xe3: 0x3285, + 0xe4: 0x4728, 0xe5: 0x472d, 0xe6: 0xa000, 0xe7: 0x4737, 0xe8: 0x32ee, 0xe9: 0x32f3, + 0xea: 0x473c, 0xeb: 0x3307, 0xec: 0x337f, 0xed: 0x3384, 0xee: 0x3389, 0xef: 0x4750, + 0xf1: 0x3415, 0xf2: 0x3438, 0xf3: 0x343d, 0xf4: 0x475a, 0xf5: 0x475f, + 0xf6: 0x476e, 0xf8: 0xa000, 0xf9: 0x34c9, 0xfa: 0x34ce, 0xfb: 0x34d3, + 0xfc: 0x47a0, 0xfd: 0x3550, 0xff: 0x3569, + // Block 0x4, offset 0x100 + 0x100: 0x2f7e, 0x101: 0x328a, 0x102: 0x468d, 0x103: 0x471e, 0x104: 0x2f9c, 0x105: 0x32a8, + 0x106: 0x2fb0, 0x107: 0x32bc, 0x108: 0x2fb5, 0x109: 0x32c1, 0x10a: 0x2fba, 0x10b: 0x32c6, + 0x10c: 0x2fbf, 0x10d: 0x32cb, 0x10e: 0x2fc9, 0x10f: 0x32d5, + 0x112: 0x46b0, 0x113: 0x4741, 0x114: 0x2ff1, 0x115: 0x32fd, 0x116: 0x2ff6, 0x117: 0x3302, + 0x118: 0x3014, 0x119: 0x3320, 0x11a: 0x3005, 0x11b: 0x3311, 0x11c: 0x302d, 0x11d: 0x3339, + 0x11e: 0x3037, 0x11f: 0x3343, 0x120: 0x303c, 0x121: 0x3348, 0x122: 0x3046, 0x123: 0x3352, + 0x124: 0x304b, 0x125: 0x3357, 0x128: 0x307d, 0x129: 0x338e, + 0x12a: 0x3082, 0x12b: 0x3393, 0x12c: 0x3087, 0x12d: 0x3398, 0x12e: 0x30aa, 0x12f: 0x33b6, + 0x130: 0x308c, 0x132: 0x195d, 0x133: 0x19e7, 0x134: 0x30b4, 0x135: 0x33c0, + 0x136: 0x30c8, 0x137: 0x33d9, 0x139: 0x30d2, 0x13a: 0x33e3, 0x13b: 0x30dc, + 0x13c: 0x33ed, 0x13d: 0x30d7, 0x13e: 0x33e8, 0x13f: 0x1bac, + // Block 0x5, offset 0x140 + 0x140: 0x1c34, 0x143: 0x30ff, 0x144: 0x3410, 0x145: 0x3118, + 0x146: 0x3429, 0x147: 0x310e, 0x148: 0x341f, 0x149: 0x1c5c, + 0x14c: 0x46d3, 0x14d: 0x4764, 0x14e: 0x3131, 0x14f: 0x3442, 0x150: 0x313b, 0x151: 0x344c, + 0x154: 0x3159, 0x155: 0x346a, 0x156: 0x3172, 0x157: 0x3483, + 0x158: 0x3163, 0x159: 0x3474, 0x15a: 0x46f6, 0x15b: 0x4787, 0x15c: 0x317c, 0x15d: 0x348d, + 0x15e: 0x318b, 0x15f: 0x349c, 0x160: 0x46fb, 0x161: 0x478c, 0x162: 0x31a4, 0x163: 0x34ba, + 0x164: 0x3195, 0x165: 0x34ab, 0x168: 0x4705, 0x169: 0x4796, + 0x16a: 0x470a, 0x16b: 0x479b, 0x16c: 0x31c2, 0x16d: 0x34d8, 0x16e: 0x31cc, 0x16f: 0x34e2, + 0x170: 0x31d1, 0x171: 0x34e7, 0x172: 0x31ef, 0x173: 0x3505, 0x174: 0x3212, 0x175: 0x3528, + 0x176: 0x323a, 0x177: 0x3555, 0x178: 0x324e, 0x179: 0x325d, 0x17a: 0x357d, 0x17b: 0x3267, + 0x17c: 0x3587, 0x17d: 0x326c, 0x17e: 0x358c, 0x17f: 0x00a7, + // Block 0x6, offset 0x180 + 0x184: 0x2dee, 0x185: 0x2df4, + 0x186: 0x2dfa, 0x187: 0x1972, 0x188: 0x1975, 0x189: 0x1a08, 0x18a: 0x1987, 0x18b: 0x198a, + 0x18c: 0x1a3e, 0x18d: 0x2f88, 0x18e: 0x3294, 0x18f: 0x3096, 0x190: 0x33a2, 0x191: 0x3140, + 0x192: 0x3451, 0x193: 0x31d6, 0x194: 0x34ec, 0x195: 0x39cf, 0x196: 0x3b5e, 0x197: 0x39c8, + 0x198: 0x3b57, 0x199: 0x39d6, 0x19a: 0x3b65, 0x19b: 0x39c1, 0x19c: 0x3b50, + 0x19e: 0x38b0, 0x19f: 0x3a3f, 0x1a0: 0x38a9, 0x1a1: 0x3a38, 0x1a2: 0x35b3, 0x1a3: 0x35c5, + 0x1a6: 0x3041, 0x1a7: 0x334d, 0x1a8: 0x30be, 0x1a9: 0x33cf, + 0x1aa: 0x46ec, 0x1ab: 0x477d, 0x1ac: 0x3990, 0x1ad: 0x3b1f, 0x1ae: 0x35d7, 0x1af: 0x35dd, + 0x1b0: 0x33c5, 0x1b1: 0x1942, 0x1b2: 0x1945, 0x1b3: 0x19cf, 0x1b4: 0x3028, 0x1b5: 0x3334, + 0x1b8: 0x30fa, 0x1b9: 0x340b, 0x1ba: 0x38b7, 0x1bb: 0x3a46, + 0x1bc: 0x35ad, 0x1bd: 0x35bf, 0x1be: 0x35b9, 0x1bf: 0x35cb, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x2f8d, 0x1c1: 0x3299, 0x1c2: 0x2f92, 0x1c3: 0x329e, 0x1c4: 0x300a, 0x1c5: 0x3316, + 0x1c6: 0x300f, 0x1c7: 0x331b, 0x1c8: 0x309b, 0x1c9: 0x33a7, 0x1ca: 0x30a0, 0x1cb: 0x33ac, + 0x1cc: 0x3145, 0x1cd: 0x3456, 0x1ce: 0x314a, 0x1cf: 0x345b, 0x1d0: 0x3168, 0x1d1: 0x3479, + 0x1d2: 0x316d, 0x1d3: 0x347e, 0x1d4: 0x31db, 0x1d5: 0x34f1, 0x1d6: 0x31e0, 0x1d7: 0x34f6, + 0x1d8: 0x3186, 0x1d9: 0x3497, 0x1da: 0x319f, 0x1db: 0x34b5, + 0x1de: 0x305a, 0x1df: 0x3366, + 0x1e6: 0x4692, 0x1e7: 0x4723, 0x1e8: 0x46ba, 0x1e9: 0x474b, + 0x1ea: 0x395f, 0x1eb: 0x3aee, 0x1ec: 0x393c, 0x1ed: 0x3acb, 0x1ee: 0x46d8, 0x1ef: 0x4769, + 0x1f0: 0x3958, 0x1f1: 0x3ae7, 0x1f2: 0x3244, 0x1f3: 0x355f, + // Block 0x8, offset 0x200 + 0x200: 0x9932, 0x201: 0x9932, 0x202: 0x9932, 0x203: 0x9932, 0x204: 0x9932, 0x205: 0x8132, + 0x206: 0x9932, 0x207: 0x9932, 0x208: 0x9932, 0x209: 0x9932, 0x20a: 0x9932, 0x20b: 0x9932, + 0x20c: 0x9932, 0x20d: 0x8132, 0x20e: 0x8132, 0x20f: 0x9932, 0x210: 0x8132, 0x211: 0x9932, + 0x212: 0x8132, 0x213: 0x9932, 0x214: 0x9932, 0x215: 0x8133, 0x216: 0x812d, 0x217: 0x812d, + 0x218: 0x812d, 0x219: 0x812d, 0x21a: 0x8133, 0x21b: 0x992b, 0x21c: 0x812d, 0x21d: 0x812d, + 0x21e: 0x812d, 0x21f: 0x812d, 0x220: 0x812d, 0x221: 0x8129, 0x222: 0x8129, 0x223: 0x992d, + 0x224: 0x992d, 0x225: 0x992d, 0x226: 0x992d, 0x227: 0x9929, 0x228: 0x9929, 0x229: 0x812d, + 0x22a: 0x812d, 0x22b: 0x812d, 0x22c: 0x812d, 0x22d: 0x992d, 0x22e: 0x992d, 0x22f: 0x812d, + 0x230: 0x992d, 0x231: 0x992d, 0x232: 0x812d, 0x233: 0x812d, 0x234: 0x8101, 0x235: 0x8101, + 0x236: 0x8101, 0x237: 0x8101, 0x238: 0x9901, 0x239: 0x812d, 0x23a: 0x812d, 0x23b: 0x812d, + 0x23c: 0x812d, 0x23d: 0x8132, 0x23e: 0x8132, 0x23f: 0x8132, + // Block 0x9, offset 0x240 + 0x240: 0x49ae, 0x241: 0x49b3, 0x242: 0x9932, 0x243: 0x49b8, 0x244: 0x4a71, 0x245: 0x9936, + 0x246: 0x8132, 0x247: 0x812d, 0x248: 0x812d, 0x249: 0x812d, 0x24a: 0x8132, 0x24b: 0x8132, + 0x24c: 0x8132, 0x24d: 0x812d, 0x24e: 0x812d, 0x250: 0x8132, 0x251: 0x8132, + 0x252: 0x8132, 0x253: 0x812d, 0x254: 0x812d, 0x255: 0x812d, 0x256: 0x812d, 0x257: 0x8132, + 0x258: 0x8133, 0x259: 0x812d, 0x25a: 0x812d, 0x25b: 0x8132, 0x25c: 0x8134, 0x25d: 0x8135, + 0x25e: 0x8135, 0x25f: 0x8134, 0x260: 0x8135, 0x261: 0x8135, 0x262: 0x8134, 0x263: 0x8132, + 0x264: 0x8132, 0x265: 0x8132, 0x266: 0x8132, 0x267: 0x8132, 0x268: 0x8132, 0x269: 0x8132, + 0x26a: 0x8132, 0x26b: 0x8132, 0x26c: 0x8132, 0x26d: 0x8132, 0x26e: 0x8132, 0x26f: 0x8132, + 0x274: 0x0170, + 0x27a: 0x42a5, + 0x27e: 0x0037, + // Block 0xa, offset 0x280 + 0x284: 0x425a, 0x285: 0x447b, + 0x286: 0x35e9, 0x287: 0x00ce, 0x288: 0x3607, 0x289: 0x3613, 0x28a: 0x3625, + 0x28c: 0x3643, 0x28e: 0x3655, 0x28f: 0x3673, 0x290: 0x3e08, 0x291: 0xa000, + 0x295: 0xa000, 0x297: 0xa000, + 0x299: 0xa000, + 0x29f: 0xa000, 0x2a1: 0xa000, + 0x2a5: 0xa000, 0x2a9: 0xa000, + 0x2aa: 0x3637, 0x2ab: 0x3667, 0x2ac: 0x47fe, 0x2ad: 0x3697, 0x2ae: 0x4828, 0x2af: 0x36a9, + 0x2b0: 0x3e70, 0x2b1: 0xa000, 0x2b5: 0xa000, + 0x2b7: 0xa000, 0x2b9: 0xa000, + 0x2bf: 0xa000, + // Block 0xb, offset 0x2c0 + 0x2c1: 0xa000, 0x2c5: 0xa000, + 0x2c9: 0xa000, 0x2ca: 0x4840, 0x2cb: 0x485e, + 0x2cc: 0x36c7, 0x2cd: 0x36df, 0x2ce: 0x4876, 0x2d0: 0x01be, 0x2d1: 0x01d0, + 0x2d2: 0x01ac, 0x2d3: 0x430c, 0x2d4: 0x4312, 0x2d5: 0x01fa, 0x2d6: 0x01e8, + 0x2f0: 0x01d6, 0x2f1: 0x01eb, 0x2f2: 0x01ee, 0x2f4: 0x0188, 0x2f5: 0x01c7, + 0x2f9: 0x01a6, + // Block 0xc, offset 0x300 + 0x300: 0x3721, 0x301: 0x372d, 0x303: 0x371b, + 0x306: 0xa000, 0x307: 0x3709, + 0x30c: 0x375d, 0x30d: 0x3745, 0x30e: 0x376f, 0x310: 0xa000, + 0x313: 0xa000, 0x315: 0xa000, 0x316: 0xa000, 0x317: 0xa000, + 0x318: 0xa000, 0x319: 0x3751, 0x31a: 0xa000, + 0x31e: 0xa000, 0x323: 0xa000, + 0x327: 0xa000, + 0x32b: 0xa000, 0x32d: 0xa000, + 0x330: 0xa000, 0x333: 0xa000, 0x335: 0xa000, + 0x336: 0xa000, 0x337: 0xa000, 0x338: 0xa000, 0x339: 0x37d5, 0x33a: 0xa000, + 0x33e: 0xa000, + // Block 0xd, offset 0x340 + 0x341: 0x3733, 0x342: 0x37b7, + 0x350: 0x370f, 0x351: 0x3793, + 0x352: 0x3715, 0x353: 0x3799, 0x356: 0x3727, 0x357: 0x37ab, + 0x358: 0xa000, 0x359: 0xa000, 0x35a: 0x3829, 0x35b: 0x382f, 0x35c: 0x3739, 0x35d: 0x37bd, + 0x35e: 0x373f, 0x35f: 0x37c3, 0x362: 0x374b, 0x363: 0x37cf, + 0x364: 0x3757, 0x365: 0x37db, 0x366: 0x3763, 0x367: 0x37e7, 0x368: 0xa000, 0x369: 0xa000, + 0x36a: 0x3835, 0x36b: 0x383b, 0x36c: 0x378d, 0x36d: 0x3811, 0x36e: 0x3769, 0x36f: 0x37ed, + 0x370: 0x3775, 0x371: 0x37f9, 0x372: 0x377b, 0x373: 0x37ff, 0x374: 0x3781, 0x375: 0x3805, + 0x378: 0x3787, 0x379: 0x380b, + // Block 0xe, offset 0x380 + 0x387: 0x1d61, + 0x391: 0x812d, + 0x392: 0x8132, 0x393: 0x8132, 0x394: 0x8132, 0x395: 0x8132, 0x396: 0x812d, 0x397: 0x8132, + 0x398: 0x8132, 0x399: 0x8132, 0x39a: 0x812e, 0x39b: 0x812d, 0x39c: 0x8132, 0x39d: 0x8132, + 0x39e: 0x8132, 0x39f: 0x8132, 0x3a0: 0x8132, 0x3a1: 0x8132, 0x3a2: 0x812d, 0x3a3: 0x812d, + 0x3a4: 0x812d, 0x3a5: 0x812d, 0x3a6: 0x812d, 0x3a7: 0x812d, 0x3a8: 0x8132, 0x3a9: 0x8132, + 0x3aa: 0x812d, 0x3ab: 0x8132, 0x3ac: 0x8132, 0x3ad: 0x812e, 0x3ae: 0x8131, 0x3af: 0x8132, + 0x3b0: 0x8105, 0x3b1: 0x8106, 0x3b2: 0x8107, 0x3b3: 0x8108, 0x3b4: 0x8109, 0x3b5: 0x810a, + 0x3b6: 0x810b, 0x3b7: 0x810c, 0x3b8: 0x810d, 0x3b9: 0x810e, 0x3ba: 0x810e, 0x3bb: 0x810f, + 0x3bc: 0x8110, 0x3bd: 0x8111, 0x3bf: 0x8112, + // Block 0xf, offset 0x3c0 + 0x3c8: 0xa000, 0x3ca: 0xa000, 0x3cb: 0x8116, + 0x3cc: 0x8117, 0x3cd: 0x8118, 0x3ce: 0x8119, 0x3cf: 0x811a, 0x3d0: 0x811b, 0x3d1: 0x811c, + 0x3d2: 0x811d, 0x3d3: 0x9932, 0x3d4: 0x9932, 0x3d5: 0x992d, 0x3d6: 0x812d, 0x3d7: 0x8132, + 0x3d8: 0x8132, 0x3d9: 0x8132, 0x3da: 0x8132, 0x3db: 0x8132, 0x3dc: 0x812d, 0x3dd: 0x8132, + 0x3de: 0x8132, 0x3df: 0x812d, + 0x3f0: 0x811e, 0x3f5: 0x1d84, + 0x3f6: 0x2013, 0x3f7: 0x204f, 0x3f8: 0x204a, + // Block 0x10, offset 0x400 + 0x413: 0x812d, 0x414: 0x8132, 0x415: 0x8132, 0x416: 0x8132, 0x417: 0x8132, + 0x418: 0x8132, 0x419: 0x8132, 0x41a: 0x8132, 0x41b: 0x8132, 0x41c: 0x8132, 0x41d: 0x8132, + 0x41e: 0x8132, 0x41f: 0x8132, 0x420: 0x8132, 0x421: 0x8132, 0x423: 0x812d, + 0x424: 0x8132, 0x425: 0x8132, 0x426: 0x812d, 0x427: 0x8132, 0x428: 0x8132, 0x429: 0x812d, + 0x42a: 0x8132, 0x42b: 0x8132, 0x42c: 0x8132, 0x42d: 0x812d, 0x42e: 0x812d, 0x42f: 0x812d, + 0x430: 0x8116, 0x431: 0x8117, 0x432: 0x8118, 0x433: 0x8132, 0x434: 0x8132, 0x435: 0x8132, + 0x436: 0x812d, 0x437: 0x8132, 0x438: 0x8132, 0x439: 0x812d, 0x43a: 0x812d, 0x43b: 0x8132, + 0x43c: 0x8132, 0x43d: 0x8132, 0x43e: 0x8132, 0x43f: 0x8132, + // Block 0x11, offset 0x440 + 0x445: 0xa000, + 0x446: 0x2d26, 0x447: 0xa000, 0x448: 0x2d2e, 0x449: 0xa000, 0x44a: 0x2d36, 0x44b: 0xa000, + 0x44c: 0x2d3e, 0x44d: 0xa000, 0x44e: 0x2d46, 0x451: 0xa000, + 0x452: 0x2d4e, + 0x474: 0x8102, 0x475: 0x9900, + 0x47a: 0xa000, 0x47b: 0x2d56, + 0x47c: 0xa000, 0x47d: 0x2d5e, 0x47e: 0xa000, 0x47f: 0xa000, + // Block 0x12, offset 0x480 + 0x480: 0x0069, 0x481: 0x006b, 0x482: 0x006f, 0x483: 0x0083, 0x484: 0x00f5, 0x485: 0x00f8, + 0x486: 0x0413, 0x487: 0x0085, 0x488: 0x0089, 0x489: 0x008b, 0x48a: 0x0104, 0x48b: 0x0107, + 0x48c: 0x010a, 0x48d: 0x008f, 0x48f: 0x0097, 0x490: 0x009b, 0x491: 0x00e0, + 0x492: 0x009f, 0x493: 0x00fe, 0x494: 0x0417, 0x495: 0x041b, 0x496: 0x00a1, 0x497: 0x00a9, + 0x498: 0x00ab, 0x499: 0x0423, 0x49a: 0x012b, 0x49b: 0x00ad, 0x49c: 0x0427, 0x49d: 0x01be, + 0x49e: 0x01c1, 0x49f: 0x01c4, 0x4a0: 0x01fa, 0x4a1: 0x01fd, 0x4a2: 0x0093, 0x4a3: 0x00a5, + 0x4a4: 0x00ab, 0x4a5: 0x00ad, 0x4a6: 0x01be, 0x4a7: 0x01c1, 0x4a8: 0x01eb, 0x4a9: 0x01fa, + 0x4aa: 0x01fd, + 0x4b8: 0x020c, + // Block 0x13, offset 0x4c0 + 0x4db: 0x00fb, 0x4dc: 0x0087, 0x4dd: 0x0101, + 0x4de: 0x00d4, 0x4df: 0x010a, 0x4e0: 0x008d, 0x4e1: 0x010d, 0x4e2: 0x0110, 0x4e3: 0x0116, + 0x4e4: 0x011c, 0x4e5: 0x011f, 0x4e6: 0x0122, 0x4e7: 0x042b, 0x4e8: 0x016a, 0x4e9: 0x0128, + 0x4ea: 0x042f, 0x4eb: 0x016d, 0x4ec: 0x0131, 0x4ed: 0x012e, 0x4ee: 0x0134, 0x4ef: 0x0137, + 0x4f0: 0x013a, 0x4f1: 0x013d, 0x4f2: 0x0140, 0x4f3: 0x014c, 0x4f4: 0x014f, 0x4f5: 0x00ec, + 0x4f6: 0x0152, 0x4f7: 0x0155, 0x4f8: 0x041f, 0x4f9: 0x0158, 0x4fa: 0x015b, 0x4fb: 0x00b5, + 0x4fc: 0x015e, 0x4fd: 0x0161, 0x4fe: 0x0164, 0x4ff: 0x01d0, + // Block 0x14, offset 0x500 + 0x500: 0x8132, 0x501: 0x8132, 0x502: 0x812d, 0x503: 0x8132, 0x504: 0x8132, 0x505: 0x8132, + 0x506: 0x8132, 0x507: 0x8132, 0x508: 0x8132, 0x509: 0x8132, 0x50a: 0x812d, 0x50b: 0x8132, + 0x50c: 0x8132, 0x50d: 0x8135, 0x50e: 0x812a, 0x50f: 0x812d, 0x510: 0x8129, 0x511: 0x8132, + 0x512: 0x8132, 0x513: 0x8132, 0x514: 0x8132, 0x515: 0x8132, 0x516: 0x8132, 0x517: 0x8132, + 0x518: 0x8132, 0x519: 0x8132, 0x51a: 0x8132, 0x51b: 0x8132, 0x51c: 0x8132, 0x51d: 0x8132, + 0x51e: 0x8132, 0x51f: 0x8132, 0x520: 0x8132, 0x521: 0x8132, 0x522: 0x8132, 0x523: 0x8132, + 0x524: 0x8132, 0x525: 0x8132, 0x526: 0x8132, 0x527: 0x8132, 0x528: 0x8132, 0x529: 0x8132, + 0x52a: 0x8132, 0x52b: 0x8132, 0x52c: 0x8132, 0x52d: 0x8132, 0x52e: 0x8132, 0x52f: 0x8132, + 0x530: 0x8132, 0x531: 0x8132, 0x532: 0x8132, 0x533: 0x8132, 0x534: 0x8132, 0x535: 0x8132, + 0x536: 0x8133, 0x537: 0x8131, 0x538: 0x8131, 0x539: 0x812d, 0x53b: 0x8132, + 0x53c: 0x8134, 0x53d: 0x812d, 0x53e: 0x8132, 0x53f: 0x812d, + // Block 0x15, offset 0x540 + 0x540: 0x2f97, 0x541: 0x32a3, 0x542: 0x2fa1, 0x543: 0x32ad, 0x544: 0x2fa6, 0x545: 0x32b2, + 0x546: 0x2fab, 0x547: 0x32b7, 0x548: 0x38cc, 0x549: 0x3a5b, 0x54a: 0x2fc4, 0x54b: 0x32d0, + 0x54c: 0x2fce, 0x54d: 0x32da, 0x54e: 0x2fdd, 0x54f: 0x32e9, 0x550: 0x2fd3, 0x551: 0x32df, + 0x552: 0x2fd8, 0x553: 0x32e4, 0x554: 0x38ef, 0x555: 0x3a7e, 0x556: 0x38f6, 0x557: 0x3a85, + 0x558: 0x3019, 0x559: 0x3325, 0x55a: 0x301e, 0x55b: 0x332a, 0x55c: 0x3904, 0x55d: 0x3a93, + 0x55e: 0x3023, 0x55f: 0x332f, 0x560: 0x3032, 0x561: 0x333e, 0x562: 0x3050, 0x563: 0x335c, + 0x564: 0x305f, 0x565: 0x336b, 0x566: 0x3055, 0x567: 0x3361, 0x568: 0x3064, 0x569: 0x3370, + 0x56a: 0x3069, 0x56b: 0x3375, 0x56c: 0x30af, 0x56d: 0x33bb, 0x56e: 0x390b, 0x56f: 0x3a9a, + 0x570: 0x30b9, 0x571: 0x33ca, 0x572: 0x30c3, 0x573: 0x33d4, 0x574: 0x30cd, 0x575: 0x33de, + 0x576: 0x46c4, 0x577: 0x4755, 0x578: 0x3912, 0x579: 0x3aa1, 0x57a: 0x30e6, 0x57b: 0x33f7, + 0x57c: 0x30e1, 0x57d: 0x33f2, 0x57e: 0x30eb, 0x57f: 0x33fc, + // Block 0x16, offset 0x580 + 0x580: 0x30f0, 0x581: 0x3401, 0x582: 0x30f5, 0x583: 0x3406, 0x584: 0x3109, 0x585: 0x341a, + 0x586: 0x3113, 0x587: 0x3424, 0x588: 0x3122, 0x589: 0x3433, 0x58a: 0x311d, 0x58b: 0x342e, + 0x58c: 0x3935, 0x58d: 0x3ac4, 0x58e: 0x3943, 0x58f: 0x3ad2, 0x590: 0x394a, 0x591: 0x3ad9, + 0x592: 0x3951, 0x593: 0x3ae0, 0x594: 0x314f, 0x595: 0x3460, 0x596: 0x3154, 0x597: 0x3465, + 0x598: 0x315e, 0x599: 0x346f, 0x59a: 0x46f1, 0x59b: 0x4782, 0x59c: 0x3997, 0x59d: 0x3b26, + 0x59e: 0x3177, 0x59f: 0x3488, 0x5a0: 0x3181, 0x5a1: 0x3492, 0x5a2: 0x4700, 0x5a3: 0x4791, + 0x5a4: 0x399e, 0x5a5: 0x3b2d, 0x5a6: 0x39a5, 0x5a7: 0x3b34, 0x5a8: 0x39ac, 0x5a9: 0x3b3b, + 0x5aa: 0x3190, 0x5ab: 0x34a1, 0x5ac: 0x319a, 0x5ad: 0x34b0, 0x5ae: 0x31ae, 0x5af: 0x34c4, + 0x5b0: 0x31a9, 0x5b1: 0x34bf, 0x5b2: 0x31ea, 0x5b3: 0x3500, 0x5b4: 0x31f9, 0x5b5: 0x350f, + 0x5b6: 0x31f4, 0x5b7: 0x350a, 0x5b8: 0x39b3, 0x5b9: 0x3b42, 0x5ba: 0x39ba, 0x5bb: 0x3b49, + 0x5bc: 0x31fe, 0x5bd: 0x3514, 0x5be: 0x3203, 0x5bf: 0x3519, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x3208, 0x5c1: 0x351e, 0x5c2: 0x320d, 0x5c3: 0x3523, 0x5c4: 0x321c, 0x5c5: 0x3532, + 0x5c6: 0x3217, 0x5c7: 0x352d, 0x5c8: 0x3221, 0x5c9: 0x353c, 0x5ca: 0x3226, 0x5cb: 0x3541, + 0x5cc: 0x322b, 0x5cd: 0x3546, 0x5ce: 0x3249, 0x5cf: 0x3564, 0x5d0: 0x3262, 0x5d1: 0x3582, + 0x5d2: 0x3271, 0x5d3: 0x3591, 0x5d4: 0x3276, 0x5d5: 0x3596, 0x5d6: 0x337a, 0x5d7: 0x34a6, + 0x5d8: 0x3537, 0x5d9: 0x3573, 0x5da: 0x1be0, 0x5db: 0x42d7, + 0x5e0: 0x46a1, 0x5e1: 0x4732, 0x5e2: 0x2f83, 0x5e3: 0x328f, + 0x5e4: 0x3878, 0x5e5: 0x3a07, 0x5e6: 0x3871, 0x5e7: 0x3a00, 0x5e8: 0x3886, 0x5e9: 0x3a15, + 0x5ea: 0x387f, 0x5eb: 0x3a0e, 0x5ec: 0x38be, 0x5ed: 0x3a4d, 0x5ee: 0x3894, 0x5ef: 0x3a23, + 0x5f0: 0x388d, 0x5f1: 0x3a1c, 0x5f2: 0x38a2, 0x5f3: 0x3a31, 0x5f4: 0x389b, 0x5f5: 0x3a2a, + 0x5f6: 0x38c5, 0x5f7: 0x3a54, 0x5f8: 0x46b5, 0x5f9: 0x4746, 0x5fa: 0x3000, 0x5fb: 0x330c, + 0x5fc: 0x2fec, 0x5fd: 0x32f8, 0x5fe: 0x38da, 0x5ff: 0x3a69, + // Block 0x18, offset 0x600 + 0x600: 0x38d3, 0x601: 0x3a62, 0x602: 0x38e8, 0x603: 0x3a77, 0x604: 0x38e1, 0x605: 0x3a70, + 0x606: 0x38fd, 0x607: 0x3a8c, 0x608: 0x3091, 0x609: 0x339d, 0x60a: 0x30a5, 0x60b: 0x33b1, + 0x60c: 0x46e7, 0x60d: 0x4778, 0x60e: 0x3136, 0x60f: 0x3447, 0x610: 0x3920, 0x611: 0x3aaf, + 0x612: 0x3919, 0x613: 0x3aa8, 0x614: 0x392e, 0x615: 0x3abd, 0x616: 0x3927, 0x617: 0x3ab6, + 0x618: 0x3989, 0x619: 0x3b18, 0x61a: 0x396d, 0x61b: 0x3afc, 0x61c: 0x3966, 0x61d: 0x3af5, + 0x61e: 0x397b, 0x61f: 0x3b0a, 0x620: 0x3974, 0x621: 0x3b03, 0x622: 0x3982, 0x623: 0x3b11, + 0x624: 0x31e5, 0x625: 0x34fb, 0x626: 0x31c7, 0x627: 0x34dd, 0x628: 0x39e4, 0x629: 0x3b73, + 0x62a: 0x39dd, 0x62b: 0x3b6c, 0x62c: 0x39f2, 0x62d: 0x3b81, 0x62e: 0x39eb, 0x62f: 0x3b7a, + 0x630: 0x39f9, 0x631: 0x3b88, 0x632: 0x3230, 0x633: 0x354b, 0x634: 0x3258, 0x635: 0x3578, + 0x636: 0x3253, 0x637: 0x356e, 0x638: 0x323f, 0x639: 0x355a, + // Block 0x19, offset 0x640 + 0x640: 0x4804, 0x641: 0x480a, 0x642: 0x491e, 0x643: 0x4936, 0x644: 0x4926, 0x645: 0x493e, + 0x646: 0x492e, 0x647: 0x4946, 0x648: 0x47aa, 0x649: 0x47b0, 0x64a: 0x488e, 0x64b: 0x48a6, + 0x64c: 0x4896, 0x64d: 0x48ae, 0x64e: 0x489e, 0x64f: 0x48b6, 0x650: 0x4816, 0x651: 0x481c, + 0x652: 0x3db8, 0x653: 0x3dc8, 0x654: 0x3dc0, 0x655: 0x3dd0, + 0x658: 0x47b6, 0x659: 0x47bc, 0x65a: 0x3ce8, 0x65b: 0x3cf8, 0x65c: 0x3cf0, 0x65d: 0x3d00, + 0x660: 0x482e, 0x661: 0x4834, 0x662: 0x494e, 0x663: 0x4966, + 0x664: 0x4956, 0x665: 0x496e, 0x666: 0x495e, 0x667: 0x4976, 0x668: 0x47c2, 0x669: 0x47c8, + 0x66a: 0x48be, 0x66b: 0x48d6, 0x66c: 0x48c6, 0x66d: 0x48de, 0x66e: 0x48ce, 0x66f: 0x48e6, + 0x670: 0x4846, 0x671: 0x484c, 0x672: 0x3e18, 0x673: 0x3e30, 0x674: 0x3e20, 0x675: 0x3e38, + 0x676: 0x3e28, 0x677: 0x3e40, 0x678: 0x47ce, 0x679: 0x47d4, 0x67a: 0x3d18, 0x67b: 0x3d30, + 0x67c: 0x3d20, 0x67d: 0x3d38, 0x67e: 0x3d28, 0x67f: 0x3d40, + // Block 0x1a, offset 0x680 + 0x680: 0x4852, 0x681: 0x4858, 0x682: 0x3e48, 0x683: 0x3e58, 0x684: 0x3e50, 0x685: 0x3e60, + 0x688: 0x47da, 0x689: 0x47e0, 0x68a: 0x3d48, 0x68b: 0x3d58, + 0x68c: 0x3d50, 0x68d: 0x3d60, 0x690: 0x4864, 0x691: 0x486a, + 0x692: 0x3e80, 0x693: 0x3e98, 0x694: 0x3e88, 0x695: 0x3ea0, 0x696: 0x3e90, 0x697: 0x3ea8, + 0x699: 0x47e6, 0x69b: 0x3d68, 0x69d: 0x3d70, + 0x69f: 0x3d78, 0x6a0: 0x487c, 0x6a1: 0x4882, 0x6a2: 0x497e, 0x6a3: 0x4996, + 0x6a4: 0x4986, 0x6a5: 0x499e, 0x6a6: 0x498e, 0x6a7: 0x49a6, 0x6a8: 0x47ec, 0x6a9: 0x47f2, + 0x6aa: 0x48ee, 0x6ab: 0x4906, 0x6ac: 0x48f6, 0x6ad: 0x490e, 0x6ae: 0x48fe, 0x6af: 0x4916, + 0x6b0: 0x47f8, 0x6b1: 0x431e, 0x6b2: 0x3691, 0x6b3: 0x4324, 0x6b4: 0x4822, 0x6b5: 0x432a, + 0x6b6: 0x36a3, 0x6b7: 0x4330, 0x6b8: 0x36c1, 0x6b9: 0x4336, 0x6ba: 0x36d9, 0x6bb: 0x433c, + 0x6bc: 0x4870, 0x6bd: 0x4342, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x3da0, 0x6c1: 0x3da8, 0x6c2: 0x4184, 0x6c3: 0x41a2, 0x6c4: 0x418e, 0x6c5: 0x41ac, + 0x6c6: 0x4198, 0x6c7: 0x41b6, 0x6c8: 0x3cd8, 0x6c9: 0x3ce0, 0x6ca: 0x40d0, 0x6cb: 0x40ee, + 0x6cc: 0x40da, 0x6cd: 0x40f8, 0x6ce: 0x40e4, 0x6cf: 0x4102, 0x6d0: 0x3de8, 0x6d1: 0x3df0, + 0x6d2: 0x41c0, 0x6d3: 0x41de, 0x6d4: 0x41ca, 0x6d5: 0x41e8, 0x6d6: 0x41d4, 0x6d7: 0x41f2, + 0x6d8: 0x3d08, 0x6d9: 0x3d10, 0x6da: 0x410c, 0x6db: 0x412a, 0x6dc: 0x4116, 0x6dd: 0x4134, + 0x6de: 0x4120, 0x6df: 0x413e, 0x6e0: 0x3ec0, 0x6e1: 0x3ec8, 0x6e2: 0x41fc, 0x6e3: 0x421a, + 0x6e4: 0x4206, 0x6e5: 0x4224, 0x6e6: 0x4210, 0x6e7: 0x422e, 0x6e8: 0x3d80, 0x6e9: 0x3d88, + 0x6ea: 0x4148, 0x6eb: 0x4166, 0x6ec: 0x4152, 0x6ed: 0x4170, 0x6ee: 0x415c, 0x6ef: 0x417a, + 0x6f0: 0x3685, 0x6f1: 0x367f, 0x6f2: 0x3d90, 0x6f3: 0x368b, 0x6f4: 0x3d98, + 0x6f6: 0x4810, 0x6f7: 0x3db0, 0x6f8: 0x35f5, 0x6f9: 0x35ef, 0x6fa: 0x35e3, 0x6fb: 0x42ee, + 0x6fc: 0x35fb, 0x6fd: 0x4287, 0x6fe: 0x01d3, 0x6ff: 0x4287, + // Block 0x1c, offset 0x700 + 0x700: 0x42a0, 0x701: 0x4482, 0x702: 0x3dd8, 0x703: 0x369d, 0x704: 0x3de0, + 0x706: 0x483a, 0x707: 0x3df8, 0x708: 0x3601, 0x709: 0x42f4, 0x70a: 0x360d, 0x70b: 0x42fa, + 0x70c: 0x3619, 0x70d: 0x4489, 0x70e: 0x4490, 0x70f: 0x4497, 0x710: 0x36b5, 0x711: 0x36af, + 0x712: 0x3e00, 0x713: 0x44e4, 0x716: 0x36bb, 0x717: 0x3e10, + 0x718: 0x3631, 0x719: 0x362b, 0x71a: 0x361f, 0x71b: 0x4300, 0x71d: 0x449e, + 0x71e: 0x44a5, 0x71f: 0x44ac, 0x720: 0x36eb, 0x721: 0x36e5, 0x722: 0x3e68, 0x723: 0x44ec, + 0x724: 0x36cd, 0x725: 0x36d3, 0x726: 0x36f1, 0x727: 0x3e78, 0x728: 0x3661, 0x729: 0x365b, + 0x72a: 0x364f, 0x72b: 0x430c, 0x72c: 0x3649, 0x72d: 0x4474, 0x72e: 0x447b, 0x72f: 0x0081, + 0x732: 0x3eb0, 0x733: 0x36f7, 0x734: 0x3eb8, + 0x736: 0x4888, 0x737: 0x3ed0, 0x738: 0x363d, 0x739: 0x4306, 0x73a: 0x366d, 0x73b: 0x4318, + 0x73c: 0x3679, 0x73d: 0x425a, 0x73e: 0x428c, + // Block 0x1d, offset 0x740 + 0x740: 0x1bd8, 0x741: 0x1bdc, 0x742: 0x0047, 0x743: 0x1c54, 0x745: 0x1be8, + 0x746: 0x1bec, 0x747: 0x00e9, 0x749: 0x1c58, 0x74a: 0x008f, 0x74b: 0x0051, + 0x74c: 0x0051, 0x74d: 0x0051, 0x74e: 0x0091, 0x74f: 0x00da, 0x750: 0x0053, 0x751: 0x0053, + 0x752: 0x0059, 0x753: 0x0099, 0x755: 0x005d, 0x756: 0x198d, + 0x759: 0x0061, 0x75a: 0x0063, 0x75b: 0x0065, 0x75c: 0x0065, 0x75d: 0x0065, + 0x760: 0x199f, 0x761: 0x1bc8, 0x762: 0x19a8, + 0x764: 0x0075, 0x766: 0x01b8, 0x768: 0x0075, + 0x76a: 0x0057, 0x76b: 0x42d2, 0x76c: 0x0045, 0x76d: 0x0047, 0x76f: 0x008b, + 0x770: 0x004b, 0x771: 0x004d, 0x773: 0x005b, 0x774: 0x009f, 0x775: 0x0215, + 0x776: 0x0218, 0x777: 0x021b, 0x778: 0x021e, 0x779: 0x0093, 0x77b: 0x1b98, + 0x77c: 0x01e8, 0x77d: 0x01c1, 0x77e: 0x0179, 0x77f: 0x01a0, + // Block 0x1e, offset 0x780 + 0x780: 0x0463, 0x785: 0x0049, + 0x786: 0x0089, 0x787: 0x008b, 0x788: 0x0093, 0x789: 0x0095, + 0x790: 0x222e, 0x791: 0x223a, + 0x792: 0x22ee, 0x793: 0x2216, 0x794: 0x229a, 0x795: 0x2222, 0x796: 0x22a0, 0x797: 0x22b8, + 0x798: 0x22c4, 0x799: 0x2228, 0x79a: 0x22ca, 0x79b: 0x2234, 0x79c: 0x22be, 0x79d: 0x22d0, + 0x79e: 0x22d6, 0x79f: 0x1cbc, 0x7a0: 0x0053, 0x7a1: 0x195a, 0x7a2: 0x1ba4, 0x7a3: 0x1963, + 0x7a4: 0x006d, 0x7a5: 0x19ab, 0x7a6: 0x1bd0, 0x7a7: 0x1d48, 0x7a8: 0x1966, 0x7a9: 0x0071, + 0x7aa: 0x19b7, 0x7ab: 0x1bd4, 0x7ac: 0x0059, 0x7ad: 0x0047, 0x7ae: 0x0049, 0x7af: 0x005b, + 0x7b0: 0x0093, 0x7b1: 0x19e4, 0x7b2: 0x1c18, 0x7b3: 0x19ed, 0x7b4: 0x00ad, 0x7b5: 0x1a62, + 0x7b6: 0x1c4c, 0x7b7: 0x1d5c, 0x7b8: 0x19f0, 0x7b9: 0x00b1, 0x7ba: 0x1a65, 0x7bb: 0x1c50, + 0x7bc: 0x0099, 0x7bd: 0x0087, 0x7be: 0x0089, 0x7bf: 0x009b, + // Block 0x1f, offset 0x7c0 + 0x7c1: 0x3c06, 0x7c3: 0xa000, 0x7c4: 0x3c0d, 0x7c5: 0xa000, + 0x7c7: 0x3c14, 0x7c8: 0xa000, 0x7c9: 0x3c1b, + 0x7cd: 0xa000, + 0x7e0: 0x2f65, 0x7e1: 0xa000, 0x7e2: 0x3c29, + 0x7e4: 0xa000, 0x7e5: 0xa000, + 0x7ed: 0x3c22, 0x7ee: 0x2f60, 0x7ef: 0x2f6a, + 0x7f0: 0x3c30, 0x7f1: 0x3c37, 0x7f2: 0xa000, 0x7f3: 0xa000, 0x7f4: 0x3c3e, 0x7f5: 0x3c45, + 0x7f6: 0xa000, 0x7f7: 0xa000, 0x7f8: 0x3c4c, 0x7f9: 0x3c53, 0x7fa: 0xa000, 0x7fb: 0xa000, + 0x7fc: 0xa000, 0x7fd: 0xa000, + // Block 0x20, offset 0x800 + 0x800: 0x3c5a, 0x801: 0x3c61, 0x802: 0xa000, 0x803: 0xa000, 0x804: 0x3c76, 0x805: 0x3c7d, + 0x806: 0xa000, 0x807: 0xa000, 0x808: 0x3c84, 0x809: 0x3c8b, + 0x811: 0xa000, + 0x812: 0xa000, + 0x822: 0xa000, + 0x828: 0xa000, 0x829: 0xa000, + 0x82b: 0xa000, 0x82c: 0x3ca0, 0x82d: 0x3ca7, 0x82e: 0x3cae, 0x82f: 0x3cb5, + 0x832: 0xa000, 0x833: 0xa000, 0x834: 0xa000, 0x835: 0xa000, + // Block 0x21, offset 0x840 + 0x860: 0x0023, 0x861: 0x0025, 0x862: 0x0027, 0x863: 0x0029, + 0x864: 0x002b, 0x865: 0x002d, 0x866: 0x002f, 0x867: 0x0031, 0x868: 0x0033, 0x869: 0x1882, + 0x86a: 0x1885, 0x86b: 0x1888, 0x86c: 0x188b, 0x86d: 0x188e, 0x86e: 0x1891, 0x86f: 0x1894, + 0x870: 0x1897, 0x871: 0x189a, 0x872: 0x189d, 0x873: 0x18a6, 0x874: 0x1a68, 0x875: 0x1a6c, + 0x876: 0x1a70, 0x877: 0x1a74, 0x878: 0x1a78, 0x879: 0x1a7c, 0x87a: 0x1a80, 0x87b: 0x1a84, + 0x87c: 0x1a88, 0x87d: 0x1c80, 0x87e: 0x1c85, 0x87f: 0x1c8a, + // Block 0x22, offset 0x880 + 0x880: 0x1c8f, 0x881: 0x1c94, 0x882: 0x1c99, 0x883: 0x1c9e, 0x884: 0x1ca3, 0x885: 0x1ca8, + 0x886: 0x1cad, 0x887: 0x1cb2, 0x888: 0x187f, 0x889: 0x18a3, 0x88a: 0x18c7, 0x88b: 0x18eb, + 0x88c: 0x190f, 0x88d: 0x1918, 0x88e: 0x191e, 0x88f: 0x1924, 0x890: 0x192a, 0x891: 0x1b60, + 0x892: 0x1b64, 0x893: 0x1b68, 0x894: 0x1b6c, 0x895: 0x1b70, 0x896: 0x1b74, 0x897: 0x1b78, + 0x898: 0x1b7c, 0x899: 0x1b80, 0x89a: 0x1b84, 0x89b: 0x1b88, 0x89c: 0x1af4, 0x89d: 0x1af8, + 0x89e: 0x1afc, 0x89f: 0x1b00, 0x8a0: 0x1b04, 0x8a1: 0x1b08, 0x8a2: 0x1b0c, 0x8a3: 0x1b10, + 0x8a4: 0x1b14, 0x8a5: 0x1b18, 0x8a6: 0x1b1c, 0x8a7: 0x1b20, 0x8a8: 0x1b24, 0x8a9: 0x1b28, + 0x8aa: 0x1b2c, 0x8ab: 0x1b30, 0x8ac: 0x1b34, 0x8ad: 0x1b38, 0x8ae: 0x1b3c, 0x8af: 0x1b40, + 0x8b0: 0x1b44, 0x8b1: 0x1b48, 0x8b2: 0x1b4c, 0x8b3: 0x1b50, 0x8b4: 0x1b54, 0x8b5: 0x1b58, + 0x8b6: 0x0043, 0x8b7: 0x0045, 0x8b8: 0x0047, 0x8b9: 0x0049, 0x8ba: 0x004b, 0x8bb: 0x004d, + 0x8bc: 0x004f, 0x8bd: 0x0051, 0x8be: 0x0053, 0x8bf: 0x0055, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x06bf, 0x8c1: 0x06e3, 0x8c2: 0x06ef, 0x8c3: 0x06ff, 0x8c4: 0x0707, 0x8c5: 0x0713, + 0x8c6: 0x071b, 0x8c7: 0x0723, 0x8c8: 0x072f, 0x8c9: 0x0783, 0x8ca: 0x079b, 0x8cb: 0x07ab, + 0x8cc: 0x07bb, 0x8cd: 0x07cb, 0x8ce: 0x07db, 0x8cf: 0x07fb, 0x8d0: 0x07ff, 0x8d1: 0x0803, + 0x8d2: 0x0837, 0x8d3: 0x085f, 0x8d4: 0x086f, 0x8d5: 0x0877, 0x8d6: 0x087b, 0x8d7: 0x0887, + 0x8d8: 0x08a3, 0x8d9: 0x08a7, 0x8da: 0x08bf, 0x8db: 0x08c3, 0x8dc: 0x08cb, 0x8dd: 0x08db, + 0x8de: 0x0977, 0x8df: 0x098b, 0x8e0: 0x09cb, 0x8e1: 0x09df, 0x8e2: 0x09e7, 0x8e3: 0x09eb, + 0x8e4: 0x09fb, 0x8e5: 0x0a17, 0x8e6: 0x0a43, 0x8e7: 0x0a4f, 0x8e8: 0x0a6f, 0x8e9: 0x0a7b, + 0x8ea: 0x0a7f, 0x8eb: 0x0a83, 0x8ec: 0x0a9b, 0x8ed: 0x0a9f, 0x8ee: 0x0acb, 0x8ef: 0x0ad7, + 0x8f0: 0x0adf, 0x8f1: 0x0ae7, 0x8f2: 0x0af7, 0x8f3: 0x0aff, 0x8f4: 0x0b07, 0x8f5: 0x0b33, + 0x8f6: 0x0b37, 0x8f7: 0x0b3f, 0x8f8: 0x0b43, 0x8f9: 0x0b4b, 0x8fa: 0x0b53, 0x8fb: 0x0b63, + 0x8fc: 0x0b7f, 0x8fd: 0x0bf7, 0x8fe: 0x0c0b, 0x8ff: 0x0c0f, + // Block 0x24, offset 0x900 + 0x900: 0x0c8f, 0x901: 0x0c93, 0x902: 0x0ca7, 0x903: 0x0cab, 0x904: 0x0cb3, 0x905: 0x0cbb, + 0x906: 0x0cc3, 0x907: 0x0ccf, 0x908: 0x0cf7, 0x909: 0x0d07, 0x90a: 0x0d1b, 0x90b: 0x0d8b, + 0x90c: 0x0d97, 0x90d: 0x0da7, 0x90e: 0x0db3, 0x90f: 0x0dbf, 0x910: 0x0dc7, 0x911: 0x0dcb, + 0x912: 0x0dcf, 0x913: 0x0dd3, 0x914: 0x0dd7, 0x915: 0x0e8f, 0x916: 0x0ed7, 0x917: 0x0ee3, + 0x918: 0x0ee7, 0x919: 0x0eeb, 0x91a: 0x0eef, 0x91b: 0x0ef7, 0x91c: 0x0efb, 0x91d: 0x0f0f, + 0x91e: 0x0f2b, 0x91f: 0x0f33, 0x920: 0x0f73, 0x921: 0x0f77, 0x922: 0x0f7f, 0x923: 0x0f83, + 0x924: 0x0f8b, 0x925: 0x0f8f, 0x926: 0x0fb3, 0x927: 0x0fb7, 0x928: 0x0fd3, 0x929: 0x0fd7, + 0x92a: 0x0fdb, 0x92b: 0x0fdf, 0x92c: 0x0ff3, 0x92d: 0x1017, 0x92e: 0x101b, 0x92f: 0x101f, + 0x930: 0x1043, 0x931: 0x1083, 0x932: 0x1087, 0x933: 0x10a7, 0x934: 0x10b7, 0x935: 0x10bf, + 0x936: 0x10df, 0x937: 0x1103, 0x938: 0x1147, 0x939: 0x114f, 0x93a: 0x1163, 0x93b: 0x116f, + 0x93c: 0x1177, 0x93d: 0x117f, 0x93e: 0x1183, 0x93f: 0x1187, + // Block 0x25, offset 0x940 + 0x940: 0x119f, 0x941: 0x11a3, 0x942: 0x11bf, 0x943: 0x11c7, 0x944: 0x11cf, 0x945: 0x11d3, + 0x946: 0x11df, 0x947: 0x11e7, 0x948: 0x11eb, 0x949: 0x11ef, 0x94a: 0x11f7, 0x94b: 0x11fb, + 0x94c: 0x129b, 0x94d: 0x12af, 0x94e: 0x12e3, 0x94f: 0x12e7, 0x950: 0x12ef, 0x951: 0x131b, + 0x952: 0x1323, 0x953: 0x132b, 0x954: 0x1333, 0x955: 0x136f, 0x956: 0x1373, 0x957: 0x137b, + 0x958: 0x137f, 0x959: 0x1383, 0x95a: 0x13af, 0x95b: 0x13b3, 0x95c: 0x13bb, 0x95d: 0x13cf, + 0x95e: 0x13d3, 0x95f: 0x13ef, 0x960: 0x13f7, 0x961: 0x13fb, 0x962: 0x141f, 0x963: 0x143f, + 0x964: 0x1453, 0x965: 0x1457, 0x966: 0x145f, 0x967: 0x148b, 0x968: 0x148f, 0x969: 0x149f, + 0x96a: 0x14c3, 0x96b: 0x14cf, 0x96c: 0x14df, 0x96d: 0x14f7, 0x96e: 0x14ff, 0x96f: 0x1503, + 0x970: 0x1507, 0x971: 0x150b, 0x972: 0x1517, 0x973: 0x151b, 0x974: 0x1523, 0x975: 0x153f, + 0x976: 0x1543, 0x977: 0x1547, 0x978: 0x155f, 0x979: 0x1563, 0x97a: 0x156b, 0x97b: 0x157f, + 0x97c: 0x1583, 0x97d: 0x1587, 0x97e: 0x158f, 0x97f: 0x1593, + // Block 0x26, offset 0x980 + 0x986: 0xa000, 0x98b: 0xa000, + 0x98c: 0x3f08, 0x98d: 0xa000, 0x98e: 0x3f10, 0x98f: 0xa000, 0x990: 0x3f18, 0x991: 0xa000, + 0x992: 0x3f20, 0x993: 0xa000, 0x994: 0x3f28, 0x995: 0xa000, 0x996: 0x3f30, 0x997: 0xa000, + 0x998: 0x3f38, 0x999: 0xa000, 0x99a: 0x3f40, 0x99b: 0xa000, 0x99c: 0x3f48, 0x99d: 0xa000, + 0x99e: 0x3f50, 0x99f: 0xa000, 0x9a0: 0x3f58, 0x9a1: 0xa000, 0x9a2: 0x3f60, + 0x9a4: 0xa000, 0x9a5: 0x3f68, 0x9a6: 0xa000, 0x9a7: 0x3f70, 0x9a8: 0xa000, 0x9a9: 0x3f78, + 0x9af: 0xa000, + 0x9b0: 0x3f80, 0x9b1: 0x3f88, 0x9b2: 0xa000, 0x9b3: 0x3f90, 0x9b4: 0x3f98, 0x9b5: 0xa000, + 0x9b6: 0x3fa0, 0x9b7: 0x3fa8, 0x9b8: 0xa000, 0x9b9: 0x3fb0, 0x9ba: 0x3fb8, 0x9bb: 0xa000, + 0x9bc: 0x3fc0, 0x9bd: 0x3fc8, + // Block 0x27, offset 0x9c0 + 0x9d4: 0x3f00, + 0x9d9: 0x9903, 0x9da: 0x9903, 0x9db: 0x42dc, 0x9dc: 0x42e2, 0x9dd: 0xa000, + 0x9de: 0x3fd0, 0x9df: 0x26b4, + 0x9e6: 0xa000, + 0x9eb: 0xa000, 0x9ec: 0x3fe0, 0x9ed: 0xa000, 0x9ee: 0x3fe8, 0x9ef: 0xa000, + 0x9f0: 0x3ff0, 0x9f1: 0xa000, 0x9f2: 0x3ff8, 0x9f3: 0xa000, 0x9f4: 0x4000, 0x9f5: 0xa000, + 0x9f6: 0x4008, 0x9f7: 0xa000, 0x9f8: 0x4010, 0x9f9: 0xa000, 0x9fa: 0x4018, 0x9fb: 0xa000, + 0x9fc: 0x4020, 0x9fd: 0xa000, 0x9fe: 0x4028, 0x9ff: 0xa000, + // Block 0x28, offset 0xa00 + 0xa00: 0x4030, 0xa01: 0xa000, 0xa02: 0x4038, 0xa04: 0xa000, 0xa05: 0x4040, + 0xa06: 0xa000, 0xa07: 0x4048, 0xa08: 0xa000, 0xa09: 0x4050, + 0xa0f: 0xa000, 0xa10: 0x4058, 0xa11: 0x4060, + 0xa12: 0xa000, 0xa13: 0x4068, 0xa14: 0x4070, 0xa15: 0xa000, 0xa16: 0x4078, 0xa17: 0x4080, + 0xa18: 0xa000, 0xa19: 0x4088, 0xa1a: 0x4090, 0xa1b: 0xa000, 0xa1c: 0x4098, 0xa1d: 0x40a0, + 0xa2f: 0xa000, + 0xa30: 0xa000, 0xa31: 0xa000, 0xa32: 0xa000, 0xa34: 0x3fd8, + 0xa37: 0x40a8, 0xa38: 0x40b0, 0xa39: 0x40b8, 0xa3a: 0x40c0, + 0xa3d: 0xa000, 0xa3e: 0x40c8, 0xa3f: 0x26c9, + // Block 0x29, offset 0xa40 + 0xa40: 0x0367, 0xa41: 0x032b, 0xa42: 0x032f, 0xa43: 0x0333, 0xa44: 0x037b, 0xa45: 0x0337, + 0xa46: 0x033b, 0xa47: 0x033f, 0xa48: 0x0343, 0xa49: 0x0347, 0xa4a: 0x034b, 0xa4b: 0x034f, + 0xa4c: 0x0353, 0xa4d: 0x0357, 0xa4e: 0x035b, 0xa4f: 0x49bd, 0xa50: 0x49c3, 0xa51: 0x49c9, + 0xa52: 0x49cf, 0xa53: 0x49d5, 0xa54: 0x49db, 0xa55: 0x49e1, 0xa56: 0x49e7, 0xa57: 0x49ed, + 0xa58: 0x49f3, 0xa59: 0x49f9, 0xa5a: 0x49ff, 0xa5b: 0x4a05, 0xa5c: 0x4a0b, 0xa5d: 0x4a11, + 0xa5e: 0x4a17, 0xa5f: 0x4a1d, 0xa60: 0x4a23, 0xa61: 0x4a29, 0xa62: 0x4a2f, 0xa63: 0x4a35, + 0xa64: 0x03c3, 0xa65: 0x035f, 0xa66: 0x0363, 0xa67: 0x03e7, 0xa68: 0x03eb, 0xa69: 0x03ef, + 0xa6a: 0x03f3, 0xa6b: 0x03f7, 0xa6c: 0x03fb, 0xa6d: 0x03ff, 0xa6e: 0x036b, 0xa6f: 0x0403, + 0xa70: 0x0407, 0xa71: 0x036f, 0xa72: 0x0373, 0xa73: 0x0377, 0xa74: 0x037f, 0xa75: 0x0383, + 0xa76: 0x0387, 0xa77: 0x038b, 0xa78: 0x038f, 0xa79: 0x0393, 0xa7a: 0x0397, 0xa7b: 0x039b, + 0xa7c: 0x039f, 0xa7d: 0x03a3, 0xa7e: 0x03a7, 0xa7f: 0x03ab, + // Block 0x2a, offset 0xa80 + 0xa80: 0x03af, 0xa81: 0x03b3, 0xa82: 0x040b, 0xa83: 0x040f, 0xa84: 0x03b7, 0xa85: 0x03bb, + 0xa86: 0x03bf, 0xa87: 0x03c7, 0xa88: 0x03cb, 0xa89: 0x03cf, 0xa8a: 0x03d3, 0xa8b: 0x03d7, + 0xa8c: 0x03db, 0xa8d: 0x03df, 0xa8e: 0x03e3, + 0xa92: 0x06bf, 0xa93: 0x071b, 0xa94: 0x06cb, 0xa95: 0x097b, 0xa96: 0x06cf, 0xa97: 0x06e7, + 0xa98: 0x06d3, 0xa99: 0x0f93, 0xa9a: 0x0707, 0xa9b: 0x06db, 0xa9c: 0x06c3, 0xa9d: 0x09ff, + 0xa9e: 0x098f, 0xa9f: 0x072f, + // Block 0x2b, offset 0xac0 + 0xac0: 0x2054, 0xac1: 0x205a, 0xac2: 0x2060, 0xac3: 0x2066, 0xac4: 0x206c, 0xac5: 0x2072, + 0xac6: 0x2078, 0xac7: 0x207e, 0xac8: 0x2084, 0xac9: 0x208a, 0xaca: 0x2090, 0xacb: 0x2096, + 0xacc: 0x209c, 0xacd: 0x20a2, 0xace: 0x2726, 0xacf: 0x272f, 0xad0: 0x2738, 0xad1: 0x2741, + 0xad2: 0x274a, 0xad3: 0x2753, 0xad4: 0x275c, 0xad5: 0x2765, 0xad6: 0x276e, 0xad7: 0x2780, + 0xad8: 0x2789, 0xad9: 0x2792, 0xada: 0x279b, 0xadb: 0x27a4, 0xadc: 0x2777, 0xadd: 0x2bac, + 0xade: 0x2aed, 0xae0: 0x20a8, 0xae1: 0x20c0, 0xae2: 0x20b4, 0xae3: 0x2108, + 0xae4: 0x20c6, 0xae5: 0x20e4, 0xae6: 0x20ae, 0xae7: 0x20de, 0xae8: 0x20ba, 0xae9: 0x20f0, + 0xaea: 0x2120, 0xaeb: 0x213e, 0xaec: 0x2138, 0xaed: 0x212c, 0xaee: 0x217a, 0xaef: 0x210e, + 0xaf0: 0x211a, 0xaf1: 0x2132, 0xaf2: 0x2126, 0xaf3: 0x2150, 0xaf4: 0x20fc, 0xaf5: 0x2144, + 0xaf6: 0x216e, 0xaf7: 0x2156, 0xaf8: 0x20ea, 0xaf9: 0x20cc, 0xafa: 0x2102, 0xafb: 0x2114, + 0xafc: 0x214a, 0xafd: 0x20d2, 0xafe: 0x2174, 0xaff: 0x20f6, + // Block 0x2c, offset 0xb00 + 0xb00: 0x215c, 0xb01: 0x20d8, 0xb02: 0x2162, 0xb03: 0x2168, 0xb04: 0x092f, 0xb05: 0x0b03, + 0xb06: 0x0ca7, 0xb07: 0x10c7, + 0xb10: 0x1bc4, 0xb11: 0x18a9, + 0xb12: 0x18ac, 0xb13: 0x18af, 0xb14: 0x18b2, 0xb15: 0x18b5, 0xb16: 0x18b8, 0xb17: 0x18bb, + 0xb18: 0x18be, 0xb19: 0x18c1, 0xb1a: 0x18ca, 0xb1b: 0x18cd, 0xb1c: 0x18d0, 0xb1d: 0x18d3, + 0xb1e: 0x18d6, 0xb1f: 0x18d9, 0xb20: 0x0313, 0xb21: 0x031b, 0xb22: 0x031f, 0xb23: 0x0327, + 0xb24: 0x032b, 0xb25: 0x032f, 0xb26: 0x0337, 0xb27: 0x033f, 0xb28: 0x0343, 0xb29: 0x034b, + 0xb2a: 0x034f, 0xb2b: 0x0353, 0xb2c: 0x0357, 0xb2d: 0x035b, 0xb2e: 0x2e18, 0xb2f: 0x2e20, + 0xb30: 0x2e28, 0xb31: 0x2e30, 0xb32: 0x2e38, 0xb33: 0x2e40, 0xb34: 0x2e48, 0xb35: 0x2e50, + 0xb36: 0x2e60, 0xb37: 0x2e68, 0xb38: 0x2e70, 0xb39: 0x2e78, 0xb3a: 0x2e80, 0xb3b: 0x2e88, + 0xb3c: 0x2ed3, 0xb3d: 0x2e9b, 0xb3e: 0x2e58, + // Block 0x2d, offset 0xb40 + 0xb40: 0x06bf, 0xb41: 0x071b, 0xb42: 0x06cb, 0xb43: 0x097b, 0xb44: 0x071f, 0xb45: 0x07af, + 0xb46: 0x06c7, 0xb47: 0x07ab, 0xb48: 0x070b, 0xb49: 0x0887, 0xb4a: 0x0d07, 0xb4b: 0x0e8f, + 0xb4c: 0x0dd7, 0xb4d: 0x0d1b, 0xb4e: 0x145f, 0xb4f: 0x098b, 0xb50: 0x0ccf, 0xb51: 0x0d4b, + 0xb52: 0x0d0b, 0xb53: 0x104b, 0xb54: 0x08fb, 0xb55: 0x0f03, 0xb56: 0x1387, 0xb57: 0x105f, + 0xb58: 0x0843, 0xb59: 0x108f, 0xb5a: 0x0f9b, 0xb5b: 0x0a17, 0xb5c: 0x140f, 0xb5d: 0x077f, + 0xb5e: 0x08ab, 0xb5f: 0x0df7, 0xb60: 0x1527, 0xb61: 0x0743, 0xb62: 0x07d3, 0xb63: 0x0d9b, + 0xb64: 0x06cf, 0xb65: 0x06e7, 0xb66: 0x06d3, 0xb67: 0x0adb, 0xb68: 0x08ef, 0xb69: 0x087f, + 0xb6a: 0x0a57, 0xb6b: 0x0a4b, 0xb6c: 0x0feb, 0xb6d: 0x073f, 0xb6e: 0x139b, 0xb6f: 0x089b, + 0xb70: 0x09f3, 0xb71: 0x18dc, 0xb72: 0x18df, 0xb73: 0x18e2, 0xb74: 0x18e5, 0xb75: 0x18ee, + 0xb76: 0x18f1, 0xb77: 0x18f4, 0xb78: 0x18f7, 0xb79: 0x18fa, 0xb7a: 0x18fd, 0xb7b: 0x1900, + 0xb7c: 0x1903, 0xb7d: 0x1906, 0xb7e: 0x1909, 0xb7f: 0x1912, + // Block 0x2e, offset 0xb80 + 0xb80: 0x1cc6, 0xb81: 0x1cd5, 0xb82: 0x1ce4, 0xb83: 0x1cf3, 0xb84: 0x1d02, 0xb85: 0x1d11, + 0xb86: 0x1d20, 0xb87: 0x1d2f, 0xb88: 0x1d3e, 0xb89: 0x218c, 0xb8a: 0x219e, 0xb8b: 0x21b0, + 0xb8c: 0x1954, 0xb8d: 0x1c04, 0xb8e: 0x19d2, 0xb8f: 0x1ba8, 0xb90: 0x04cb, 0xb91: 0x04d3, + 0xb92: 0x04db, 0xb93: 0x04e3, 0xb94: 0x04eb, 0xb95: 0x04ef, 0xb96: 0x04f3, 0xb97: 0x04f7, + 0xb98: 0x04fb, 0xb99: 0x04ff, 0xb9a: 0x0503, 0xb9b: 0x0507, 0xb9c: 0x050b, 0xb9d: 0x050f, + 0xb9e: 0x0513, 0xb9f: 0x0517, 0xba0: 0x051b, 0xba1: 0x0523, 0xba2: 0x0527, 0xba3: 0x052b, + 0xba4: 0x052f, 0xba5: 0x0533, 0xba6: 0x0537, 0xba7: 0x053b, 0xba8: 0x053f, 0xba9: 0x0543, + 0xbaa: 0x0547, 0xbab: 0x054b, 0xbac: 0x054f, 0xbad: 0x0553, 0xbae: 0x0557, 0xbaf: 0x055b, + 0xbb0: 0x055f, 0xbb1: 0x0563, 0xbb2: 0x0567, 0xbb3: 0x056f, 0xbb4: 0x0577, 0xbb5: 0x057f, + 0xbb6: 0x0583, 0xbb7: 0x0587, 0xbb8: 0x058b, 0xbb9: 0x058f, 0xbba: 0x0593, 0xbbb: 0x0597, + 0xbbc: 0x059b, 0xbbd: 0x059f, 0xbbe: 0x05a3, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x2b0c, 0xbc1: 0x29a8, 0xbc2: 0x2b1c, 0xbc3: 0x2880, 0xbc4: 0x2ee4, 0xbc5: 0x288a, + 0xbc6: 0x2894, 0xbc7: 0x2f28, 0xbc8: 0x29b5, 0xbc9: 0x289e, 0xbca: 0x28a8, 0xbcb: 0x28b2, + 0xbcc: 0x29dc, 0xbcd: 0x29e9, 0xbce: 0x29c2, 0xbcf: 0x29cf, 0xbd0: 0x2ea9, 0xbd1: 0x29f6, + 0xbd2: 0x2a03, 0xbd3: 0x2bbe, 0xbd4: 0x26bb, 0xbd5: 0x2bd1, 0xbd6: 0x2be4, 0xbd7: 0x2b2c, + 0xbd8: 0x2a10, 0xbd9: 0x2bf7, 0xbda: 0x2c0a, 0xbdb: 0x2a1d, 0xbdc: 0x28bc, 0xbdd: 0x28c6, + 0xbde: 0x2eb7, 0xbdf: 0x2a2a, 0xbe0: 0x2b3c, 0xbe1: 0x2ef5, 0xbe2: 0x28d0, 0xbe3: 0x28da, + 0xbe4: 0x2a37, 0xbe5: 0x28e4, 0xbe6: 0x28ee, 0xbe7: 0x26d0, 0xbe8: 0x26d7, 0xbe9: 0x28f8, + 0xbea: 0x2902, 0xbeb: 0x2c1d, 0xbec: 0x2a44, 0xbed: 0x2b4c, 0xbee: 0x2c30, 0xbef: 0x2a51, + 0xbf0: 0x2916, 0xbf1: 0x290c, 0xbf2: 0x2f3c, 0xbf3: 0x2a5e, 0xbf4: 0x2c43, 0xbf5: 0x2920, + 0xbf6: 0x2b5c, 0xbf7: 0x292a, 0xbf8: 0x2a78, 0xbf9: 0x2934, 0xbfa: 0x2a85, 0xbfb: 0x2f06, + 0xbfc: 0x2a6b, 0xbfd: 0x2b6c, 0xbfe: 0x2a92, 0xbff: 0x26de, + // Block 0x30, offset 0xc00 + 0xc00: 0x2f17, 0xc01: 0x293e, 0xc02: 0x2948, 0xc03: 0x2a9f, 0xc04: 0x2952, 0xc05: 0x295c, + 0xc06: 0x2966, 0xc07: 0x2b7c, 0xc08: 0x2aac, 0xc09: 0x26e5, 0xc0a: 0x2c56, 0xc0b: 0x2e90, + 0xc0c: 0x2b8c, 0xc0d: 0x2ab9, 0xc0e: 0x2ec5, 0xc0f: 0x2970, 0xc10: 0x297a, 0xc11: 0x2ac6, + 0xc12: 0x26ec, 0xc13: 0x2ad3, 0xc14: 0x2b9c, 0xc15: 0x26f3, 0xc16: 0x2c69, 0xc17: 0x2984, + 0xc18: 0x1cb7, 0xc19: 0x1ccb, 0xc1a: 0x1cda, 0xc1b: 0x1ce9, 0xc1c: 0x1cf8, 0xc1d: 0x1d07, + 0xc1e: 0x1d16, 0xc1f: 0x1d25, 0xc20: 0x1d34, 0xc21: 0x1d43, 0xc22: 0x2192, 0xc23: 0x21a4, + 0xc24: 0x21b6, 0xc25: 0x21c2, 0xc26: 0x21ce, 0xc27: 0x21da, 0xc28: 0x21e6, 0xc29: 0x21f2, + 0xc2a: 0x21fe, 0xc2b: 0x220a, 0xc2c: 0x2246, 0xc2d: 0x2252, 0xc2e: 0x225e, 0xc2f: 0x226a, + 0xc30: 0x2276, 0xc31: 0x1c14, 0xc32: 0x19c6, 0xc33: 0x1936, 0xc34: 0x1be4, 0xc35: 0x1a47, + 0xc36: 0x1a56, 0xc37: 0x19cc, 0xc38: 0x1bfc, 0xc39: 0x1c00, 0xc3a: 0x1960, 0xc3b: 0x2701, + 0xc3c: 0x270f, 0xc3d: 0x26fa, 0xc3e: 0x2708, 0xc3f: 0x2ae0, + // Block 0x31, offset 0xc40 + 0xc40: 0x1a4a, 0xc41: 0x1a32, 0xc42: 0x1c60, 0xc43: 0x1a1a, 0xc44: 0x19f3, 0xc45: 0x1969, + 0xc46: 0x1978, 0xc47: 0x1948, 0xc48: 0x1bf0, 0xc49: 0x1d52, 0xc4a: 0x1a4d, 0xc4b: 0x1a35, + 0xc4c: 0x1c64, 0xc4d: 0x1c70, 0xc4e: 0x1a26, 0xc4f: 0x19fc, 0xc50: 0x1957, 0xc51: 0x1c1c, + 0xc52: 0x1bb0, 0xc53: 0x1b9c, 0xc54: 0x1bcc, 0xc55: 0x1c74, 0xc56: 0x1a29, 0xc57: 0x19c9, + 0xc58: 0x19ff, 0xc59: 0x19de, 0xc5a: 0x1a41, 0xc5b: 0x1c78, 0xc5c: 0x1a2c, 0xc5d: 0x19c0, + 0xc5e: 0x1a02, 0xc5f: 0x1c3c, 0xc60: 0x1bf4, 0xc61: 0x1a14, 0xc62: 0x1c24, 0xc63: 0x1c40, + 0xc64: 0x1bf8, 0xc65: 0x1a17, 0xc66: 0x1c28, 0xc67: 0x22e8, 0xc68: 0x22fc, 0xc69: 0x1996, + 0xc6a: 0x1c20, 0xc6b: 0x1bb4, 0xc6c: 0x1ba0, 0xc6d: 0x1c48, 0xc6e: 0x2716, 0xc6f: 0x27ad, + 0xc70: 0x1a59, 0xc71: 0x1a44, 0xc72: 0x1c7c, 0xc73: 0x1a2f, 0xc74: 0x1a50, 0xc75: 0x1a38, + 0xc76: 0x1c68, 0xc77: 0x1a1d, 0xc78: 0x19f6, 0xc79: 0x1981, 0xc7a: 0x1a53, 0xc7b: 0x1a3b, + 0xc7c: 0x1c6c, 0xc7d: 0x1a20, 0xc7e: 0x19f9, 0xc7f: 0x1984, + // Block 0x32, offset 0xc80 + 0xc80: 0x1c2c, 0xc81: 0x1bb8, 0xc82: 0x1d4d, 0xc83: 0x1939, 0xc84: 0x19ba, 0xc85: 0x19bd, + 0xc86: 0x22f5, 0xc87: 0x1b94, 0xc88: 0x19c3, 0xc89: 0x194b, 0xc8a: 0x19e1, 0xc8b: 0x194e, + 0xc8c: 0x19ea, 0xc8d: 0x196c, 0xc8e: 0x196f, 0xc8f: 0x1a05, 0xc90: 0x1a0b, 0xc91: 0x1a0e, + 0xc92: 0x1c30, 0xc93: 0x1a11, 0xc94: 0x1a23, 0xc95: 0x1c38, 0xc96: 0x1c44, 0xc97: 0x1990, + 0xc98: 0x1d57, 0xc99: 0x1bbc, 0xc9a: 0x1993, 0xc9b: 0x1a5c, 0xc9c: 0x19a5, 0xc9d: 0x19b4, + 0xc9e: 0x22e2, 0xc9f: 0x22dc, 0xca0: 0x1cc1, 0xca1: 0x1cd0, 0xca2: 0x1cdf, 0xca3: 0x1cee, + 0xca4: 0x1cfd, 0xca5: 0x1d0c, 0xca6: 0x1d1b, 0xca7: 0x1d2a, 0xca8: 0x1d39, 0xca9: 0x2186, + 0xcaa: 0x2198, 0xcab: 0x21aa, 0xcac: 0x21bc, 0xcad: 0x21c8, 0xcae: 0x21d4, 0xcaf: 0x21e0, + 0xcb0: 0x21ec, 0xcb1: 0x21f8, 0xcb2: 0x2204, 0xcb3: 0x2240, 0xcb4: 0x224c, 0xcb5: 0x2258, + 0xcb6: 0x2264, 0xcb7: 0x2270, 0xcb8: 0x227c, 0xcb9: 0x2282, 0xcba: 0x2288, 0xcbb: 0x228e, + 0xcbc: 0x2294, 0xcbd: 0x22a6, 0xcbe: 0x22ac, 0xcbf: 0x1c10, + // Block 0x33, offset 0xcc0 + 0xcc0: 0x1377, 0xcc1: 0x0cfb, 0xcc2: 0x13d3, 0xcc3: 0x139f, 0xcc4: 0x0e57, 0xcc5: 0x06eb, + 0xcc6: 0x08df, 0xcc7: 0x162b, 0xcc8: 0x162b, 0xcc9: 0x0a0b, 0xcca: 0x145f, 0xccb: 0x0943, + 0xccc: 0x0a07, 0xccd: 0x0bef, 0xcce: 0x0fcf, 0xccf: 0x115f, 0xcd0: 0x1297, 0xcd1: 0x12d3, + 0xcd2: 0x1307, 0xcd3: 0x141b, 0xcd4: 0x0d73, 0xcd5: 0x0dff, 0xcd6: 0x0eab, 0xcd7: 0x0f43, + 0xcd8: 0x125f, 0xcd9: 0x1447, 0xcda: 0x1573, 0xcdb: 0x070f, 0xcdc: 0x08b3, 0xcdd: 0x0d87, + 0xcde: 0x0ecf, 0xcdf: 0x1293, 0xce0: 0x15c3, 0xce1: 0x0ab3, 0xce2: 0x0e77, 0xce3: 0x1283, + 0xce4: 0x1317, 0xce5: 0x0c23, 0xce6: 0x11bb, 0xce7: 0x12df, 0xce8: 0x0b1f, 0xce9: 0x0d0f, + 0xcea: 0x0e17, 0xceb: 0x0f1b, 0xcec: 0x1427, 0xced: 0x074f, 0xcee: 0x07e7, 0xcef: 0x0853, + 0xcf0: 0x0c8b, 0xcf1: 0x0d7f, 0xcf2: 0x0ecb, 0xcf3: 0x0fef, 0xcf4: 0x1177, 0xcf5: 0x128b, + 0xcf6: 0x12a3, 0xcf7: 0x13c7, 0xcf8: 0x14ef, 0xcf9: 0x15a3, 0xcfa: 0x15bf, 0xcfb: 0x102b, + 0xcfc: 0x106b, 0xcfd: 0x1123, 0xcfe: 0x1243, 0xcff: 0x147b, + // Block 0x34, offset 0xd00 + 0xd00: 0x15cb, 0xd01: 0x134b, 0xd02: 0x09c7, 0xd03: 0x0b3b, 0xd04: 0x10db, 0xd05: 0x119b, + 0xd06: 0x0eff, 0xd07: 0x1033, 0xd08: 0x1397, 0xd09: 0x14e7, 0xd0a: 0x09c3, 0xd0b: 0x0a8f, + 0xd0c: 0x0d77, 0xd0d: 0x0e2b, 0xd0e: 0x0e5f, 0xd0f: 0x1113, 0xd10: 0x113b, 0xd11: 0x14a7, + 0xd12: 0x084f, 0xd13: 0x11a7, 0xd14: 0x07f3, 0xd15: 0x07ef, 0xd16: 0x1097, 0xd17: 0x1127, + 0xd18: 0x125b, 0xd19: 0x14af, 0xd1a: 0x1367, 0xd1b: 0x0c27, 0xd1c: 0x0d73, 0xd1d: 0x1357, + 0xd1e: 0x06f7, 0xd1f: 0x0a63, 0xd20: 0x0b93, 0xd21: 0x0f2f, 0xd22: 0x0faf, 0xd23: 0x0873, + 0xd24: 0x103b, 0xd25: 0x075f, 0xd26: 0x0b77, 0xd27: 0x06d7, 0xd28: 0x0deb, 0xd29: 0x0ca3, + 0xd2a: 0x110f, 0xd2b: 0x08c7, 0xd2c: 0x09b3, 0xd2d: 0x0ffb, 0xd2e: 0x1263, 0xd2f: 0x133b, + 0xd30: 0x0db7, 0xd31: 0x13f7, 0xd32: 0x0de3, 0xd33: 0x0c37, 0xd34: 0x121b, 0xd35: 0x0c57, + 0xd36: 0x0fab, 0xd37: 0x072b, 0xd38: 0x07a7, 0xd39: 0x07eb, 0xd3a: 0x0d53, 0xd3b: 0x10fb, + 0xd3c: 0x11f3, 0xd3d: 0x1347, 0xd3e: 0x145b, 0xd3f: 0x085b, + // Block 0x35, offset 0xd40 + 0xd40: 0x090f, 0xd41: 0x0a17, 0xd42: 0x0b2f, 0xd43: 0x0cbf, 0xd44: 0x0e7b, 0xd45: 0x103f, + 0xd46: 0x1497, 0xd47: 0x157b, 0xd48: 0x15cf, 0xd49: 0x15e7, 0xd4a: 0x0837, 0xd4b: 0x0cf3, + 0xd4c: 0x0da3, 0xd4d: 0x13eb, 0xd4e: 0x0afb, 0xd4f: 0x0bd7, 0xd50: 0x0bf3, 0xd51: 0x0c83, + 0xd52: 0x0e6b, 0xd53: 0x0eb7, 0xd54: 0x0f67, 0xd55: 0x108b, 0xd56: 0x112f, 0xd57: 0x1193, + 0xd58: 0x13db, 0xd59: 0x126b, 0xd5a: 0x1403, 0xd5b: 0x147f, 0xd5c: 0x080f, 0xd5d: 0x083b, + 0xd5e: 0x0923, 0xd5f: 0x0ea7, 0xd60: 0x12f3, 0xd61: 0x133b, 0xd62: 0x0b1b, 0xd63: 0x0b8b, + 0xd64: 0x0c4f, 0xd65: 0x0daf, 0xd66: 0x10d7, 0xd67: 0x0f23, 0xd68: 0x073b, 0xd69: 0x097f, + 0xd6a: 0x0a63, 0xd6b: 0x0ac7, 0xd6c: 0x0b97, 0xd6d: 0x0f3f, 0xd6e: 0x0f5b, 0xd6f: 0x116b, + 0xd70: 0x118b, 0xd71: 0x1463, 0xd72: 0x14e3, 0xd73: 0x14f3, 0xd74: 0x152f, 0xd75: 0x0753, + 0xd76: 0x107f, 0xd77: 0x144f, 0xd78: 0x14cb, 0xd79: 0x0baf, 0xd7a: 0x0717, 0xd7b: 0x0777, + 0xd7c: 0x0a67, 0xd7d: 0x0a87, 0xd7e: 0x0caf, 0xd7f: 0x0d73, + // Block 0x36, offset 0xd80 + 0xd80: 0x0ec3, 0xd81: 0x0fcb, 0xd82: 0x1277, 0xd83: 0x1417, 0xd84: 0x1623, 0xd85: 0x0ce3, + 0xd86: 0x14a3, 0xd87: 0x0833, 0xd88: 0x0d2f, 0xd89: 0x0d3b, 0xd8a: 0x0e0f, 0xd8b: 0x0e47, + 0xd8c: 0x0f4b, 0xd8d: 0x0fa7, 0xd8e: 0x1027, 0xd8f: 0x110b, 0xd90: 0x153b, 0xd91: 0x07af, + 0xd92: 0x0c03, 0xd93: 0x14b3, 0xd94: 0x0767, 0xd95: 0x0aab, 0xd96: 0x0e2f, 0xd97: 0x13df, + 0xd98: 0x0b67, 0xd99: 0x0bb7, 0xd9a: 0x0d43, 0xd9b: 0x0f2f, 0xd9c: 0x14bb, 0xd9d: 0x0817, + 0xd9e: 0x08ff, 0xd9f: 0x0a97, 0xda0: 0x0cd3, 0xda1: 0x0d1f, 0xda2: 0x0d5f, 0xda3: 0x0df3, + 0xda4: 0x0f47, 0xda5: 0x0fbb, 0xda6: 0x1157, 0xda7: 0x12f7, 0xda8: 0x1303, 0xda9: 0x1457, + 0xdaa: 0x14d7, 0xdab: 0x0883, 0xdac: 0x0e4b, 0xdad: 0x0903, 0xdae: 0x0ec7, 0xdaf: 0x0f6b, + 0xdb0: 0x1287, 0xdb1: 0x14bf, 0xdb2: 0x15ab, 0xdb3: 0x15d3, 0xdb4: 0x0d37, 0xdb5: 0x0e27, + 0xdb6: 0x11c3, 0xdb7: 0x10b7, 0xdb8: 0x10c3, 0xdb9: 0x10e7, 0xdba: 0x0f17, 0xdbb: 0x0e9f, + 0xdbc: 0x1363, 0xdbd: 0x0733, 0xdbe: 0x122b, 0xdbf: 0x081b, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x080b, 0xdc1: 0x0b0b, 0xdc2: 0x0c2b, 0xdc3: 0x10f3, 0xdc4: 0x0a53, 0xdc5: 0x0e03, + 0xdc6: 0x0cef, 0xdc7: 0x13e7, 0xdc8: 0x12e7, 0xdc9: 0x14ab, 0xdca: 0x1323, 0xdcb: 0x0b27, + 0xdcc: 0x0787, 0xdcd: 0x095b, 0xdd0: 0x09af, + 0xdd2: 0x0cdf, 0xdd5: 0x07f7, 0xdd6: 0x0f1f, 0xdd7: 0x0fe3, + 0xdd8: 0x1047, 0xdd9: 0x1063, 0xdda: 0x1067, 0xddb: 0x107b, 0xddc: 0x14fb, 0xddd: 0x10eb, + 0xdde: 0x116f, 0xde0: 0x128f, 0xde2: 0x1353, + 0xde5: 0x1407, 0xde6: 0x1433, + 0xdea: 0x154f, 0xdeb: 0x1553, 0xdec: 0x1557, 0xded: 0x15bb, 0xdee: 0x142b, 0xdef: 0x14c7, + 0xdf0: 0x0757, 0xdf1: 0x077b, 0xdf2: 0x078f, 0xdf3: 0x084b, 0xdf4: 0x0857, 0xdf5: 0x0897, + 0xdf6: 0x094b, 0xdf7: 0x0967, 0xdf8: 0x096f, 0xdf9: 0x09ab, 0xdfa: 0x09b7, 0xdfb: 0x0a93, + 0xdfc: 0x0a9b, 0xdfd: 0x0ba3, 0xdfe: 0x0bcb, 0xdff: 0x0bd3, + // Block 0x38, offset 0xe00 + 0xe00: 0x0beb, 0xe01: 0x0c97, 0xe02: 0x0cc7, 0xe03: 0x0ce7, 0xe04: 0x0d57, 0xe05: 0x0e1b, + 0xe06: 0x0e37, 0xe07: 0x0e67, 0xe08: 0x0ebb, 0xe09: 0x0edb, 0xe0a: 0x0f4f, 0xe0b: 0x102f, + 0xe0c: 0x104b, 0xe0d: 0x1053, 0xe0e: 0x104f, 0xe0f: 0x1057, 0xe10: 0x105b, 0xe11: 0x105f, + 0xe12: 0x1073, 0xe13: 0x1077, 0xe14: 0x109b, 0xe15: 0x10af, 0xe16: 0x10cb, 0xe17: 0x112f, + 0xe18: 0x1137, 0xe19: 0x113f, 0xe1a: 0x1153, 0xe1b: 0x117b, 0xe1c: 0x11cb, 0xe1d: 0x11ff, + 0xe1e: 0x11ff, 0xe1f: 0x1267, 0xe20: 0x130f, 0xe21: 0x1327, 0xe22: 0x135b, 0xe23: 0x135f, + 0xe24: 0x13a3, 0xe25: 0x13a7, 0xe26: 0x13ff, 0xe27: 0x1407, 0xe28: 0x14db, 0xe29: 0x151f, + 0xe2a: 0x1537, 0xe2b: 0x0b9b, 0xe2c: 0x171e, 0xe2d: 0x11e3, + 0xe30: 0x06df, 0xe31: 0x07e3, 0xe32: 0x07a3, 0xe33: 0x074b, 0xe34: 0x078b, 0xe35: 0x07b7, + 0xe36: 0x0847, 0xe37: 0x0863, 0xe38: 0x094b, 0xe39: 0x0937, 0xe3a: 0x0947, 0xe3b: 0x0963, + 0xe3c: 0x09af, 0xe3d: 0x09bf, 0xe3e: 0x0a03, 0xe3f: 0x0a0f, + // Block 0x39, offset 0xe40 + 0xe40: 0x0a2b, 0xe41: 0x0a3b, 0xe42: 0x0b23, 0xe43: 0x0b2b, 0xe44: 0x0b5b, 0xe45: 0x0b7b, + 0xe46: 0x0bab, 0xe47: 0x0bc3, 0xe48: 0x0bb3, 0xe49: 0x0bd3, 0xe4a: 0x0bc7, 0xe4b: 0x0beb, + 0xe4c: 0x0c07, 0xe4d: 0x0c5f, 0xe4e: 0x0c6b, 0xe4f: 0x0c73, 0xe50: 0x0c9b, 0xe51: 0x0cdf, + 0xe52: 0x0d0f, 0xe53: 0x0d13, 0xe54: 0x0d27, 0xe55: 0x0da7, 0xe56: 0x0db7, 0xe57: 0x0e0f, + 0xe58: 0x0e5b, 0xe59: 0x0e53, 0xe5a: 0x0e67, 0xe5b: 0x0e83, 0xe5c: 0x0ebb, 0xe5d: 0x1013, + 0xe5e: 0x0edf, 0xe5f: 0x0f13, 0xe60: 0x0f1f, 0xe61: 0x0f5f, 0xe62: 0x0f7b, 0xe63: 0x0f9f, + 0xe64: 0x0fc3, 0xe65: 0x0fc7, 0xe66: 0x0fe3, 0xe67: 0x0fe7, 0xe68: 0x0ff7, 0xe69: 0x100b, + 0xe6a: 0x1007, 0xe6b: 0x1037, 0xe6c: 0x10b3, 0xe6d: 0x10cb, 0xe6e: 0x10e3, 0xe6f: 0x111b, + 0xe70: 0x112f, 0xe71: 0x114b, 0xe72: 0x117b, 0xe73: 0x122f, 0xe74: 0x1257, 0xe75: 0x12cb, + 0xe76: 0x1313, 0xe77: 0x131f, 0xe78: 0x1327, 0xe79: 0x133f, 0xe7a: 0x1353, 0xe7b: 0x1343, + 0xe7c: 0x135b, 0xe7d: 0x1357, 0xe7e: 0x134f, 0xe7f: 0x135f, + // Block 0x3a, offset 0xe80 + 0xe80: 0x136b, 0xe81: 0x13a7, 0xe82: 0x13e3, 0xe83: 0x1413, 0xe84: 0x144b, 0xe85: 0x146b, + 0xe86: 0x14b7, 0xe87: 0x14db, 0xe88: 0x14fb, 0xe89: 0x150f, 0xe8a: 0x151f, 0xe8b: 0x152b, + 0xe8c: 0x1537, 0xe8d: 0x158b, 0xe8e: 0x162b, 0xe8f: 0x16b5, 0xe90: 0x16b0, 0xe91: 0x16e2, + 0xe92: 0x0607, 0xe93: 0x062f, 0xe94: 0x0633, 0xe95: 0x1764, 0xe96: 0x1791, 0xe97: 0x1809, + 0xe98: 0x1617, 0xe99: 0x1627, + // Block 0x3b, offset 0xec0 + 0xec0: 0x19d5, 0xec1: 0x19d8, 0xec2: 0x19db, 0xec3: 0x1c08, 0xec4: 0x1c0c, 0xec5: 0x1a5f, + 0xec6: 0x1a5f, + 0xed3: 0x1d75, 0xed4: 0x1d66, 0xed5: 0x1d6b, 0xed6: 0x1d7a, 0xed7: 0x1d70, + 0xedd: 0x4390, + 0xede: 0x8115, 0xedf: 0x4402, 0xee0: 0x022d, 0xee1: 0x0215, 0xee2: 0x021e, 0xee3: 0x0221, + 0xee4: 0x0224, 0xee5: 0x0227, 0xee6: 0x022a, 0xee7: 0x0230, 0xee8: 0x0233, 0xee9: 0x0017, + 0xeea: 0x43f0, 0xeeb: 0x43f6, 0xeec: 0x44f4, 0xeed: 0x44fc, 0xeee: 0x4348, 0xeef: 0x434e, + 0xef0: 0x4354, 0xef1: 0x435a, 0xef2: 0x4366, 0xef3: 0x436c, 0xef4: 0x4372, 0xef5: 0x437e, + 0xef6: 0x4384, 0xef8: 0x438a, 0xef9: 0x4396, 0xefa: 0x439c, 0xefb: 0x43a2, + 0xefc: 0x43ae, 0xefe: 0x43b4, + // Block 0x3c, offset 0xf00 + 0xf00: 0x43ba, 0xf01: 0x43c0, 0xf03: 0x43c6, 0xf04: 0x43cc, + 0xf06: 0x43d8, 0xf07: 0x43de, 0xf08: 0x43e4, 0xf09: 0x43ea, 0xf0a: 0x43fc, 0xf0b: 0x4378, + 0xf0c: 0x4360, 0xf0d: 0x43a8, 0xf0e: 0x43d2, 0xf0f: 0x1d7f, 0xf10: 0x0299, 0xf11: 0x0299, + 0xf12: 0x02a2, 0xf13: 0x02a2, 0xf14: 0x02a2, 0xf15: 0x02a2, 0xf16: 0x02a5, 0xf17: 0x02a5, + 0xf18: 0x02a5, 0xf19: 0x02a5, 0xf1a: 0x02ab, 0xf1b: 0x02ab, 0xf1c: 0x02ab, 0xf1d: 0x02ab, + 0xf1e: 0x029f, 0xf1f: 0x029f, 0xf20: 0x029f, 0xf21: 0x029f, 0xf22: 0x02a8, 0xf23: 0x02a8, + 0xf24: 0x02a8, 0xf25: 0x02a8, 0xf26: 0x029c, 0xf27: 0x029c, 0xf28: 0x029c, 0xf29: 0x029c, + 0xf2a: 0x02cf, 0xf2b: 0x02cf, 0xf2c: 0x02cf, 0xf2d: 0x02cf, 0xf2e: 0x02d2, 0xf2f: 0x02d2, + 0xf30: 0x02d2, 0xf31: 0x02d2, 0xf32: 0x02b1, 0xf33: 0x02b1, 0xf34: 0x02b1, 0xf35: 0x02b1, + 0xf36: 0x02ae, 0xf37: 0x02ae, 0xf38: 0x02ae, 0xf39: 0x02ae, 0xf3a: 0x02b4, 0xf3b: 0x02b4, + 0xf3c: 0x02b4, 0xf3d: 0x02b4, 0xf3e: 0x02b7, 0xf3f: 0x02b7, + // Block 0x3d, offset 0xf40 + 0xf40: 0x02b7, 0xf41: 0x02b7, 0xf42: 0x02c0, 0xf43: 0x02c0, 0xf44: 0x02bd, 0xf45: 0x02bd, + 0xf46: 0x02c3, 0xf47: 0x02c3, 0xf48: 0x02ba, 0xf49: 0x02ba, 0xf4a: 0x02c9, 0xf4b: 0x02c9, + 0xf4c: 0x02c6, 0xf4d: 0x02c6, 0xf4e: 0x02d5, 0xf4f: 0x02d5, 0xf50: 0x02d5, 0xf51: 0x02d5, + 0xf52: 0x02db, 0xf53: 0x02db, 0xf54: 0x02db, 0xf55: 0x02db, 0xf56: 0x02e1, 0xf57: 0x02e1, + 0xf58: 0x02e1, 0xf59: 0x02e1, 0xf5a: 0x02de, 0xf5b: 0x02de, 0xf5c: 0x02de, 0xf5d: 0x02de, + 0xf5e: 0x02e4, 0xf5f: 0x02e4, 0xf60: 0x02e7, 0xf61: 0x02e7, 0xf62: 0x02e7, 0xf63: 0x02e7, + 0xf64: 0x446e, 0xf65: 0x446e, 0xf66: 0x02ed, 0xf67: 0x02ed, 0xf68: 0x02ed, 0xf69: 0x02ed, + 0xf6a: 0x02ea, 0xf6b: 0x02ea, 0xf6c: 0x02ea, 0xf6d: 0x02ea, 0xf6e: 0x0308, 0xf6f: 0x0308, + 0xf70: 0x4468, 0xf71: 0x4468, + // Block 0x3e, offset 0xf80 + 0xf93: 0x02d8, 0xf94: 0x02d8, 0xf95: 0x02d8, 0xf96: 0x02d8, 0xf97: 0x02f6, + 0xf98: 0x02f6, 0xf99: 0x02f3, 0xf9a: 0x02f3, 0xf9b: 0x02f9, 0xf9c: 0x02f9, 0xf9d: 0x204f, + 0xf9e: 0x02ff, 0xf9f: 0x02ff, 0xfa0: 0x02f0, 0xfa1: 0x02f0, 0xfa2: 0x02fc, 0xfa3: 0x02fc, + 0xfa4: 0x0305, 0xfa5: 0x0305, 0xfa6: 0x0305, 0xfa7: 0x0305, 0xfa8: 0x028d, 0xfa9: 0x028d, + 0xfaa: 0x25aa, 0xfab: 0x25aa, 0xfac: 0x261a, 0xfad: 0x261a, 0xfae: 0x25e9, 0xfaf: 0x25e9, + 0xfb0: 0x2605, 0xfb1: 0x2605, 0xfb2: 0x25fe, 0xfb3: 0x25fe, 0xfb4: 0x260c, 0xfb5: 0x260c, + 0xfb6: 0x2613, 0xfb7: 0x2613, 0xfb8: 0x2613, 0xfb9: 0x25f0, 0xfba: 0x25f0, 0xfbb: 0x25f0, + 0xfbc: 0x0302, 0xfbd: 0x0302, 0xfbe: 0x0302, 0xfbf: 0x0302, + // Block 0x3f, offset 0xfc0 + 0xfc0: 0x25b1, 0xfc1: 0x25b8, 0xfc2: 0x25d4, 0xfc3: 0x25f0, 0xfc4: 0x25f7, 0xfc5: 0x1d89, + 0xfc6: 0x1d8e, 0xfc7: 0x1d93, 0xfc8: 0x1da2, 0xfc9: 0x1db1, 0xfca: 0x1db6, 0xfcb: 0x1dbb, + 0xfcc: 0x1dc0, 0xfcd: 0x1dc5, 0xfce: 0x1dd4, 0xfcf: 0x1de3, 0xfd0: 0x1de8, 0xfd1: 0x1ded, + 0xfd2: 0x1dfc, 0xfd3: 0x1e0b, 0xfd4: 0x1e10, 0xfd5: 0x1e15, 0xfd6: 0x1e1a, 0xfd7: 0x1e29, + 0xfd8: 0x1e2e, 0xfd9: 0x1e3d, 0xfda: 0x1e42, 0xfdb: 0x1e47, 0xfdc: 0x1e56, 0xfdd: 0x1e5b, + 0xfde: 0x1e60, 0xfdf: 0x1e6a, 0xfe0: 0x1ea6, 0xfe1: 0x1eb5, 0xfe2: 0x1ec4, 0xfe3: 0x1ec9, + 0xfe4: 0x1ece, 0xfe5: 0x1ed8, 0xfe6: 0x1ee7, 0xfe7: 0x1eec, 0xfe8: 0x1efb, 0xfe9: 0x1f00, + 0xfea: 0x1f05, 0xfeb: 0x1f14, 0xfec: 0x1f19, 0xfed: 0x1f28, 0xfee: 0x1f2d, 0xfef: 0x1f32, + 0xff0: 0x1f37, 0xff1: 0x1f3c, 0xff2: 0x1f41, 0xff3: 0x1f46, 0xff4: 0x1f4b, 0xff5: 0x1f50, + 0xff6: 0x1f55, 0xff7: 0x1f5a, 0xff8: 0x1f5f, 0xff9: 0x1f64, 0xffa: 0x1f69, 0xffb: 0x1f6e, + 0xffc: 0x1f73, 0xffd: 0x1f78, 0xffe: 0x1f7d, 0xfff: 0x1f87, + // Block 0x40, offset 0x1000 + 0x1000: 0x1f8c, 0x1001: 0x1f91, 0x1002: 0x1f96, 0x1003: 0x1fa0, 0x1004: 0x1fa5, 0x1005: 0x1faf, + 0x1006: 0x1fb4, 0x1007: 0x1fb9, 0x1008: 0x1fbe, 0x1009: 0x1fc3, 0x100a: 0x1fc8, 0x100b: 0x1fcd, + 0x100c: 0x1fd2, 0x100d: 0x1fd7, 0x100e: 0x1fe6, 0x100f: 0x1ff5, 0x1010: 0x1ffa, 0x1011: 0x1fff, + 0x1012: 0x2004, 0x1013: 0x2009, 0x1014: 0x200e, 0x1015: 0x2018, 0x1016: 0x201d, 0x1017: 0x2022, + 0x1018: 0x2031, 0x1019: 0x2040, 0x101a: 0x2045, 0x101b: 0x4420, 0x101c: 0x4426, 0x101d: 0x445c, + 0x101e: 0x44b3, 0x101f: 0x44ba, 0x1020: 0x44c1, 0x1021: 0x44c8, 0x1022: 0x44cf, 0x1023: 0x44d6, + 0x1024: 0x25c6, 0x1025: 0x25cd, 0x1026: 0x25d4, 0x1027: 0x25db, 0x1028: 0x25f0, 0x1029: 0x25f7, + 0x102a: 0x1d98, 0x102b: 0x1d9d, 0x102c: 0x1da2, 0x102d: 0x1da7, 0x102e: 0x1db1, 0x102f: 0x1db6, + 0x1030: 0x1dca, 0x1031: 0x1dcf, 0x1032: 0x1dd4, 0x1033: 0x1dd9, 0x1034: 0x1de3, 0x1035: 0x1de8, + 0x1036: 0x1df2, 0x1037: 0x1df7, 0x1038: 0x1dfc, 0x1039: 0x1e01, 0x103a: 0x1e0b, 0x103b: 0x1e10, + 0x103c: 0x1f3c, 0x103d: 0x1f41, 0x103e: 0x1f50, 0x103f: 0x1f55, + // Block 0x41, offset 0x1040 + 0x1040: 0x1f5a, 0x1041: 0x1f6e, 0x1042: 0x1f73, 0x1043: 0x1f78, 0x1044: 0x1f7d, 0x1045: 0x1f96, + 0x1046: 0x1fa0, 0x1047: 0x1fa5, 0x1048: 0x1faa, 0x1049: 0x1fbe, 0x104a: 0x1fdc, 0x104b: 0x1fe1, + 0x104c: 0x1fe6, 0x104d: 0x1feb, 0x104e: 0x1ff5, 0x104f: 0x1ffa, 0x1050: 0x445c, 0x1051: 0x2027, + 0x1052: 0x202c, 0x1053: 0x2031, 0x1054: 0x2036, 0x1055: 0x2040, 0x1056: 0x2045, 0x1057: 0x25b1, + 0x1058: 0x25b8, 0x1059: 0x25bf, 0x105a: 0x25d4, 0x105b: 0x25e2, 0x105c: 0x1d89, 0x105d: 0x1d8e, + 0x105e: 0x1d93, 0x105f: 0x1da2, 0x1060: 0x1dac, 0x1061: 0x1dbb, 0x1062: 0x1dc0, 0x1063: 0x1dc5, + 0x1064: 0x1dd4, 0x1065: 0x1dde, 0x1066: 0x1dfc, 0x1067: 0x1e15, 0x1068: 0x1e1a, 0x1069: 0x1e29, + 0x106a: 0x1e2e, 0x106b: 0x1e3d, 0x106c: 0x1e47, 0x106d: 0x1e56, 0x106e: 0x1e5b, 0x106f: 0x1e60, + 0x1070: 0x1e6a, 0x1071: 0x1ea6, 0x1072: 0x1eab, 0x1073: 0x1eb5, 0x1074: 0x1ec4, 0x1075: 0x1ec9, + 0x1076: 0x1ece, 0x1077: 0x1ed8, 0x1078: 0x1ee7, 0x1079: 0x1efb, 0x107a: 0x1f00, 0x107b: 0x1f05, + 0x107c: 0x1f14, 0x107d: 0x1f19, 0x107e: 0x1f28, 0x107f: 0x1f2d, + // Block 0x42, offset 0x1080 + 0x1080: 0x1f32, 0x1081: 0x1f37, 0x1082: 0x1f46, 0x1083: 0x1f4b, 0x1084: 0x1f5f, 0x1085: 0x1f64, + 0x1086: 0x1f69, 0x1087: 0x1f6e, 0x1088: 0x1f73, 0x1089: 0x1f87, 0x108a: 0x1f8c, 0x108b: 0x1f91, + 0x108c: 0x1f96, 0x108d: 0x1f9b, 0x108e: 0x1faf, 0x108f: 0x1fb4, 0x1090: 0x1fb9, 0x1091: 0x1fbe, + 0x1092: 0x1fcd, 0x1093: 0x1fd2, 0x1094: 0x1fd7, 0x1095: 0x1fe6, 0x1096: 0x1ff0, 0x1097: 0x1fff, + 0x1098: 0x2004, 0x1099: 0x4450, 0x109a: 0x2018, 0x109b: 0x201d, 0x109c: 0x2022, 0x109d: 0x2031, + 0x109e: 0x203b, 0x109f: 0x25d4, 0x10a0: 0x25e2, 0x10a1: 0x1da2, 0x10a2: 0x1dac, 0x10a3: 0x1dd4, + 0x10a4: 0x1dde, 0x10a5: 0x1dfc, 0x10a6: 0x1e06, 0x10a7: 0x1e6a, 0x10a8: 0x1e6f, 0x10a9: 0x1e92, + 0x10aa: 0x1e97, 0x10ab: 0x1f6e, 0x10ac: 0x1f73, 0x10ad: 0x1f96, 0x10ae: 0x1fe6, 0x10af: 0x1ff0, + 0x10b0: 0x2031, 0x10b1: 0x203b, 0x10b2: 0x4504, 0x10b3: 0x450c, 0x10b4: 0x4514, 0x10b5: 0x1ef1, + 0x10b6: 0x1ef6, 0x10b7: 0x1f0a, 0x10b8: 0x1f0f, 0x10b9: 0x1f1e, 0x10ba: 0x1f23, 0x10bb: 0x1e74, + 0x10bc: 0x1e79, 0x10bd: 0x1e9c, 0x10be: 0x1ea1, 0x10bf: 0x1e33, + // Block 0x43, offset 0x10c0 + 0x10c0: 0x1e38, 0x10c1: 0x1e1f, 0x10c2: 0x1e24, 0x10c3: 0x1e4c, 0x10c4: 0x1e51, 0x10c5: 0x1eba, + 0x10c6: 0x1ebf, 0x10c7: 0x1edd, 0x10c8: 0x1ee2, 0x10c9: 0x1e7e, 0x10ca: 0x1e83, 0x10cb: 0x1e88, + 0x10cc: 0x1e92, 0x10cd: 0x1e8d, 0x10ce: 0x1e65, 0x10cf: 0x1eb0, 0x10d0: 0x1ed3, 0x10d1: 0x1ef1, + 0x10d2: 0x1ef6, 0x10d3: 0x1f0a, 0x10d4: 0x1f0f, 0x10d5: 0x1f1e, 0x10d6: 0x1f23, 0x10d7: 0x1e74, + 0x10d8: 0x1e79, 0x10d9: 0x1e9c, 0x10da: 0x1ea1, 0x10db: 0x1e33, 0x10dc: 0x1e38, 0x10dd: 0x1e1f, + 0x10de: 0x1e24, 0x10df: 0x1e4c, 0x10e0: 0x1e51, 0x10e1: 0x1eba, 0x10e2: 0x1ebf, 0x10e3: 0x1edd, + 0x10e4: 0x1ee2, 0x10e5: 0x1e7e, 0x10e6: 0x1e83, 0x10e7: 0x1e88, 0x10e8: 0x1e92, 0x10e9: 0x1e8d, + 0x10ea: 0x1e65, 0x10eb: 0x1eb0, 0x10ec: 0x1ed3, 0x10ed: 0x1e7e, 0x10ee: 0x1e83, 0x10ef: 0x1e88, + 0x10f0: 0x1e92, 0x10f1: 0x1e6f, 0x10f2: 0x1e97, 0x10f3: 0x1eec, 0x10f4: 0x1e56, 0x10f5: 0x1e5b, + 0x10f6: 0x1e60, 0x10f7: 0x1e7e, 0x10f8: 0x1e83, 0x10f9: 0x1e88, 0x10fa: 0x1eec, 0x10fb: 0x1efb, + 0x10fc: 0x4408, 0x10fd: 0x4408, + // Block 0x44, offset 0x1100 + 0x1110: 0x2311, 0x1111: 0x2326, + 0x1112: 0x2326, 0x1113: 0x232d, 0x1114: 0x2334, 0x1115: 0x2349, 0x1116: 0x2350, 0x1117: 0x2357, + 0x1118: 0x237a, 0x1119: 0x237a, 0x111a: 0x239d, 0x111b: 0x2396, 0x111c: 0x23b2, 0x111d: 0x23a4, + 0x111e: 0x23ab, 0x111f: 0x23ce, 0x1120: 0x23ce, 0x1121: 0x23c7, 0x1122: 0x23d5, 0x1123: 0x23d5, + 0x1124: 0x23ff, 0x1125: 0x23ff, 0x1126: 0x241b, 0x1127: 0x23e3, 0x1128: 0x23e3, 0x1129: 0x23dc, + 0x112a: 0x23f1, 0x112b: 0x23f1, 0x112c: 0x23f8, 0x112d: 0x23f8, 0x112e: 0x2422, 0x112f: 0x2430, + 0x1130: 0x2430, 0x1131: 0x2437, 0x1132: 0x2437, 0x1133: 0x243e, 0x1134: 0x2445, 0x1135: 0x244c, + 0x1136: 0x2453, 0x1137: 0x2453, 0x1138: 0x245a, 0x1139: 0x2468, 0x113a: 0x2476, 0x113b: 0x246f, + 0x113c: 0x247d, 0x113d: 0x247d, 0x113e: 0x2492, 0x113f: 0x2499, + // Block 0x45, offset 0x1140 + 0x1140: 0x24ca, 0x1141: 0x24d8, 0x1142: 0x24d1, 0x1143: 0x24b5, 0x1144: 0x24b5, 0x1145: 0x24df, + 0x1146: 0x24df, 0x1147: 0x24e6, 0x1148: 0x24e6, 0x1149: 0x2510, 0x114a: 0x2517, 0x114b: 0x251e, + 0x114c: 0x24f4, 0x114d: 0x2502, 0x114e: 0x2525, 0x114f: 0x252c, + 0x1152: 0x24fb, 0x1153: 0x2580, 0x1154: 0x2587, 0x1155: 0x255d, 0x1156: 0x2564, 0x1157: 0x2548, + 0x1158: 0x2548, 0x1159: 0x254f, 0x115a: 0x2579, 0x115b: 0x2572, 0x115c: 0x259c, 0x115d: 0x259c, + 0x115e: 0x230a, 0x115f: 0x231f, 0x1160: 0x2318, 0x1161: 0x2342, 0x1162: 0x233b, 0x1163: 0x2365, + 0x1164: 0x235e, 0x1165: 0x2388, 0x1166: 0x236c, 0x1167: 0x2381, 0x1168: 0x23b9, 0x1169: 0x2406, + 0x116a: 0x23ea, 0x116b: 0x2429, 0x116c: 0x24c3, 0x116d: 0x24ed, 0x116e: 0x2595, 0x116f: 0x258e, + 0x1170: 0x25a3, 0x1171: 0x253a, 0x1172: 0x24a0, 0x1173: 0x256b, 0x1174: 0x2492, 0x1175: 0x24ca, + 0x1176: 0x2461, 0x1177: 0x24ae, 0x1178: 0x2541, 0x1179: 0x2533, 0x117a: 0x24bc, 0x117b: 0x24a7, + 0x117c: 0x24bc, 0x117d: 0x2541, 0x117e: 0x2373, 0x117f: 0x238f, + // Block 0x46, offset 0x1180 + 0x1180: 0x2509, 0x1181: 0x2484, 0x1182: 0x2303, 0x1183: 0x24a7, 0x1184: 0x244c, 0x1185: 0x241b, + 0x1186: 0x23c0, 0x1187: 0x2556, + 0x11b0: 0x2414, 0x11b1: 0x248b, 0x11b2: 0x27bf, 0x11b3: 0x27b6, 0x11b4: 0x27ec, 0x11b5: 0x27da, + 0x11b6: 0x27c8, 0x11b7: 0x27e3, 0x11b8: 0x27f5, 0x11b9: 0x240d, 0x11ba: 0x2c7c, 0x11bb: 0x2afc, + 0x11bc: 0x27d1, + // Block 0x47, offset 0x11c0 + 0x11d0: 0x0019, 0x11d1: 0x0483, + 0x11d2: 0x0487, 0x11d3: 0x0035, 0x11d4: 0x0037, 0x11d5: 0x0003, 0x11d6: 0x003f, 0x11d7: 0x04bf, + 0x11d8: 0x04c3, 0x11d9: 0x1b5c, + 0x11e0: 0x8132, 0x11e1: 0x8132, 0x11e2: 0x8132, 0x11e3: 0x8132, + 0x11e4: 0x8132, 0x11e5: 0x8132, 0x11e6: 0x8132, 0x11e7: 0x812d, 0x11e8: 0x812d, 0x11e9: 0x812d, + 0x11ea: 0x812d, 0x11eb: 0x812d, 0x11ec: 0x812d, 0x11ed: 0x812d, 0x11ee: 0x8132, 0x11ef: 0x8132, + 0x11f0: 0x1873, 0x11f1: 0x0443, 0x11f2: 0x043f, 0x11f3: 0x007f, 0x11f4: 0x007f, 0x11f5: 0x0011, + 0x11f6: 0x0013, 0x11f7: 0x00b7, 0x11f8: 0x00bb, 0x11f9: 0x04b7, 0x11fa: 0x04bb, 0x11fb: 0x04ab, + 0x11fc: 0x04af, 0x11fd: 0x0493, 0x11fe: 0x0497, 0x11ff: 0x048b, + // Block 0x48, offset 0x1200 + 0x1200: 0x048f, 0x1201: 0x049b, 0x1202: 0x049f, 0x1203: 0x04a3, 0x1204: 0x04a7, + 0x1207: 0x0077, 0x1208: 0x007b, 0x1209: 0x4269, 0x120a: 0x4269, 0x120b: 0x4269, + 0x120c: 0x4269, 0x120d: 0x007f, 0x120e: 0x007f, 0x120f: 0x007f, 0x1210: 0x0019, 0x1211: 0x0483, + 0x1212: 0x001d, 0x1214: 0x0037, 0x1215: 0x0035, 0x1216: 0x003f, 0x1217: 0x0003, + 0x1218: 0x0443, 0x1219: 0x0011, 0x121a: 0x0013, 0x121b: 0x00b7, 0x121c: 0x00bb, 0x121d: 0x04b7, + 0x121e: 0x04bb, 0x121f: 0x0007, 0x1220: 0x000d, 0x1221: 0x0015, 0x1222: 0x0017, 0x1223: 0x001b, + 0x1224: 0x0039, 0x1225: 0x003d, 0x1226: 0x003b, 0x1228: 0x0079, 0x1229: 0x0009, + 0x122a: 0x000b, 0x122b: 0x0041, + 0x1230: 0x42aa, 0x1231: 0x442c, 0x1232: 0x42af, 0x1234: 0x42b4, + 0x1236: 0x42b9, 0x1237: 0x4432, 0x1238: 0x42be, 0x1239: 0x4438, 0x123a: 0x42c3, 0x123b: 0x443e, + 0x123c: 0x42c8, 0x123d: 0x4444, 0x123e: 0x42cd, 0x123f: 0x444a, + // Block 0x49, offset 0x1240 + 0x1240: 0x0236, 0x1241: 0x440e, 0x1242: 0x440e, 0x1243: 0x4414, 0x1244: 0x4414, 0x1245: 0x4456, + 0x1246: 0x4456, 0x1247: 0x441a, 0x1248: 0x441a, 0x1249: 0x4462, 0x124a: 0x4462, 0x124b: 0x4462, + 0x124c: 0x4462, 0x124d: 0x0239, 0x124e: 0x0239, 0x124f: 0x023c, 0x1250: 0x023c, 0x1251: 0x023c, + 0x1252: 0x023c, 0x1253: 0x023f, 0x1254: 0x023f, 0x1255: 0x0242, 0x1256: 0x0242, 0x1257: 0x0242, + 0x1258: 0x0242, 0x1259: 0x0245, 0x125a: 0x0245, 0x125b: 0x0245, 0x125c: 0x0245, 0x125d: 0x0248, + 0x125e: 0x0248, 0x125f: 0x0248, 0x1260: 0x0248, 0x1261: 0x024b, 0x1262: 0x024b, 0x1263: 0x024b, + 0x1264: 0x024b, 0x1265: 0x024e, 0x1266: 0x024e, 0x1267: 0x024e, 0x1268: 0x024e, 0x1269: 0x0251, + 0x126a: 0x0251, 0x126b: 0x0254, 0x126c: 0x0254, 0x126d: 0x0257, 0x126e: 0x0257, 0x126f: 0x025a, + 0x1270: 0x025a, 0x1271: 0x025d, 0x1272: 0x025d, 0x1273: 0x025d, 0x1274: 0x025d, 0x1275: 0x0260, + 0x1276: 0x0260, 0x1277: 0x0260, 0x1278: 0x0260, 0x1279: 0x0263, 0x127a: 0x0263, 0x127b: 0x0263, + 0x127c: 0x0263, 0x127d: 0x0266, 0x127e: 0x0266, 0x127f: 0x0266, + // Block 0x4a, offset 0x1280 + 0x1280: 0x0266, 0x1281: 0x0269, 0x1282: 0x0269, 0x1283: 0x0269, 0x1284: 0x0269, 0x1285: 0x026c, + 0x1286: 0x026c, 0x1287: 0x026c, 0x1288: 0x026c, 0x1289: 0x026f, 0x128a: 0x026f, 0x128b: 0x026f, + 0x128c: 0x026f, 0x128d: 0x0272, 0x128e: 0x0272, 0x128f: 0x0272, 0x1290: 0x0272, 0x1291: 0x0275, + 0x1292: 0x0275, 0x1293: 0x0275, 0x1294: 0x0275, 0x1295: 0x0278, 0x1296: 0x0278, 0x1297: 0x0278, + 0x1298: 0x0278, 0x1299: 0x027b, 0x129a: 0x027b, 0x129b: 0x027b, 0x129c: 0x027b, 0x129d: 0x027e, + 0x129e: 0x027e, 0x129f: 0x027e, 0x12a0: 0x027e, 0x12a1: 0x0281, 0x12a2: 0x0281, 0x12a3: 0x0281, + 0x12a4: 0x0281, 0x12a5: 0x0284, 0x12a6: 0x0284, 0x12a7: 0x0284, 0x12a8: 0x0284, 0x12a9: 0x0287, + 0x12aa: 0x0287, 0x12ab: 0x0287, 0x12ac: 0x0287, 0x12ad: 0x028a, 0x12ae: 0x028a, 0x12af: 0x028d, + 0x12b0: 0x028d, 0x12b1: 0x0290, 0x12b2: 0x0290, 0x12b3: 0x0290, 0x12b4: 0x0290, 0x12b5: 0x2e00, + 0x12b6: 0x2e00, 0x12b7: 0x2e08, 0x12b8: 0x2e08, 0x12b9: 0x2e10, 0x12ba: 0x2e10, 0x12bb: 0x1f82, + 0x12bc: 0x1f82, + // Block 0x4b, offset 0x12c0 + 0x12c0: 0x0081, 0x12c1: 0x0083, 0x12c2: 0x0085, 0x12c3: 0x0087, 0x12c4: 0x0089, 0x12c5: 0x008b, + 0x12c6: 0x008d, 0x12c7: 0x008f, 0x12c8: 0x0091, 0x12c9: 0x0093, 0x12ca: 0x0095, 0x12cb: 0x0097, + 0x12cc: 0x0099, 0x12cd: 0x009b, 0x12ce: 0x009d, 0x12cf: 0x009f, 0x12d0: 0x00a1, 0x12d1: 0x00a3, + 0x12d2: 0x00a5, 0x12d3: 0x00a7, 0x12d4: 0x00a9, 0x12d5: 0x00ab, 0x12d6: 0x00ad, 0x12d7: 0x00af, + 0x12d8: 0x00b1, 0x12d9: 0x00b3, 0x12da: 0x00b5, 0x12db: 0x00b7, 0x12dc: 0x00b9, 0x12dd: 0x00bb, + 0x12de: 0x00bd, 0x12df: 0x0477, 0x12e0: 0x047b, 0x12e1: 0x0487, 0x12e2: 0x049b, 0x12e3: 0x049f, + 0x12e4: 0x0483, 0x12e5: 0x05ab, 0x12e6: 0x05a3, 0x12e7: 0x04c7, 0x12e8: 0x04cf, 0x12e9: 0x04d7, + 0x12ea: 0x04df, 0x12eb: 0x04e7, 0x12ec: 0x056b, 0x12ed: 0x0573, 0x12ee: 0x057b, 0x12ef: 0x051f, + 0x12f0: 0x05af, 0x12f1: 0x04cb, 0x12f2: 0x04d3, 0x12f3: 0x04db, 0x12f4: 0x04e3, 0x12f5: 0x04eb, + 0x12f6: 0x04ef, 0x12f7: 0x04f3, 0x12f8: 0x04f7, 0x12f9: 0x04fb, 0x12fa: 0x04ff, 0x12fb: 0x0503, + 0x12fc: 0x0507, 0x12fd: 0x050b, 0x12fe: 0x050f, 0x12ff: 0x0513, + // Block 0x4c, offset 0x1300 + 0x1300: 0x0517, 0x1301: 0x051b, 0x1302: 0x0523, 0x1303: 0x0527, 0x1304: 0x052b, 0x1305: 0x052f, + 0x1306: 0x0533, 0x1307: 0x0537, 0x1308: 0x053b, 0x1309: 0x053f, 0x130a: 0x0543, 0x130b: 0x0547, + 0x130c: 0x054b, 0x130d: 0x054f, 0x130e: 0x0553, 0x130f: 0x0557, 0x1310: 0x055b, 0x1311: 0x055f, + 0x1312: 0x0563, 0x1313: 0x0567, 0x1314: 0x056f, 0x1315: 0x0577, 0x1316: 0x057f, 0x1317: 0x0583, + 0x1318: 0x0587, 0x1319: 0x058b, 0x131a: 0x058f, 0x131b: 0x0593, 0x131c: 0x0597, 0x131d: 0x05a7, + 0x131e: 0x4a78, 0x131f: 0x4a7e, 0x1320: 0x03c3, 0x1321: 0x0313, 0x1322: 0x0317, 0x1323: 0x4a3b, + 0x1324: 0x031b, 0x1325: 0x4a41, 0x1326: 0x4a47, 0x1327: 0x031f, 0x1328: 0x0323, 0x1329: 0x0327, + 0x132a: 0x4a4d, 0x132b: 0x4a53, 0x132c: 0x4a59, 0x132d: 0x4a5f, 0x132e: 0x4a65, 0x132f: 0x4a6b, + 0x1330: 0x0367, 0x1331: 0x032b, 0x1332: 0x032f, 0x1333: 0x0333, 0x1334: 0x037b, 0x1335: 0x0337, + 0x1336: 0x033b, 0x1337: 0x033f, 0x1338: 0x0343, 0x1339: 0x0347, 0x133a: 0x034b, 0x133b: 0x034f, + 0x133c: 0x0353, 0x133d: 0x0357, 0x133e: 0x035b, + // Block 0x4d, offset 0x1340 + 0x1342: 0x49bd, 0x1343: 0x49c3, 0x1344: 0x49c9, 0x1345: 0x49cf, + 0x1346: 0x49d5, 0x1347: 0x49db, 0x134a: 0x49e1, 0x134b: 0x49e7, + 0x134c: 0x49ed, 0x134d: 0x49f3, 0x134e: 0x49f9, 0x134f: 0x49ff, + 0x1352: 0x4a05, 0x1353: 0x4a0b, 0x1354: 0x4a11, 0x1355: 0x4a17, 0x1356: 0x4a1d, 0x1357: 0x4a23, + 0x135a: 0x4a29, 0x135b: 0x4a2f, 0x135c: 0x4a35, + 0x1360: 0x00bf, 0x1361: 0x00c2, 0x1362: 0x00cb, 0x1363: 0x4264, + 0x1364: 0x00c8, 0x1365: 0x00c5, 0x1366: 0x0447, 0x1368: 0x046b, 0x1369: 0x044b, + 0x136a: 0x044f, 0x136b: 0x0453, 0x136c: 0x0457, 0x136d: 0x046f, 0x136e: 0x0473, + // Block 0x4e, offset 0x1380 + 0x1380: 0x0063, 0x1381: 0x0065, 0x1382: 0x0067, 0x1383: 0x0069, 0x1384: 0x006b, 0x1385: 0x006d, + 0x1386: 0x006f, 0x1387: 0x0071, 0x1388: 0x0073, 0x1389: 0x0075, 0x138a: 0x0083, 0x138b: 0x0085, + 0x138c: 0x0087, 0x138d: 0x0089, 0x138e: 0x008b, 0x138f: 0x008d, 0x1390: 0x008f, 0x1391: 0x0091, + 0x1392: 0x0093, 0x1393: 0x0095, 0x1394: 0x0097, 0x1395: 0x0099, 0x1396: 0x009b, 0x1397: 0x009d, + 0x1398: 0x009f, 0x1399: 0x00a1, 0x139a: 0x00a3, 0x139b: 0x00a5, 0x139c: 0x00a7, 0x139d: 0x00a9, + 0x139e: 0x00ab, 0x139f: 0x00ad, 0x13a0: 0x00af, 0x13a1: 0x00b1, 0x13a2: 0x00b3, 0x13a3: 0x00b5, + 0x13a4: 0x00dd, 0x13a5: 0x00f2, 0x13a8: 0x0173, 0x13a9: 0x0176, + 0x13aa: 0x0179, 0x13ab: 0x017c, 0x13ac: 0x017f, 0x13ad: 0x0182, 0x13ae: 0x0185, 0x13af: 0x0188, + 0x13b0: 0x018b, 0x13b1: 0x018e, 0x13b2: 0x0191, 0x13b3: 0x0194, 0x13b4: 0x0197, 0x13b5: 0x019a, + 0x13b6: 0x019d, 0x13b7: 0x01a0, 0x13b8: 0x01a3, 0x13b9: 0x0188, 0x13ba: 0x01a6, 0x13bb: 0x01a9, + 0x13bc: 0x01ac, 0x13bd: 0x01af, 0x13be: 0x01b2, 0x13bf: 0x01b5, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x01fd, 0x13c1: 0x0200, 0x13c2: 0x0203, 0x13c3: 0x045b, 0x13c4: 0x01c7, 0x13c5: 0x01d0, + 0x13c6: 0x01d6, 0x13c7: 0x01fa, 0x13c8: 0x01eb, 0x13c9: 0x01e8, 0x13ca: 0x0206, 0x13cb: 0x0209, + 0x13ce: 0x0021, 0x13cf: 0x0023, 0x13d0: 0x0025, 0x13d1: 0x0027, + 0x13d2: 0x0029, 0x13d3: 0x002b, 0x13d4: 0x002d, 0x13d5: 0x002f, 0x13d6: 0x0031, 0x13d7: 0x0033, + 0x13d8: 0x0021, 0x13d9: 0x0023, 0x13da: 0x0025, 0x13db: 0x0027, 0x13dc: 0x0029, 0x13dd: 0x002b, + 0x13de: 0x002d, 0x13df: 0x002f, 0x13e0: 0x0031, 0x13e1: 0x0033, 0x13e2: 0x0021, 0x13e3: 0x0023, + 0x13e4: 0x0025, 0x13e5: 0x0027, 0x13e6: 0x0029, 0x13e7: 0x002b, 0x13e8: 0x002d, 0x13e9: 0x002f, + 0x13ea: 0x0031, 0x13eb: 0x0033, 0x13ec: 0x0021, 0x13ed: 0x0023, 0x13ee: 0x0025, 0x13ef: 0x0027, + 0x13f0: 0x0029, 0x13f1: 0x002b, 0x13f2: 0x002d, 0x13f3: 0x002f, 0x13f4: 0x0031, 0x13f5: 0x0033, + 0x13f6: 0x0021, 0x13f7: 0x0023, 0x13f8: 0x0025, 0x13f9: 0x0027, 0x13fa: 0x0029, 0x13fb: 0x002b, + 0x13fc: 0x002d, 0x13fd: 0x002f, 0x13fe: 0x0031, 0x13ff: 0x0033, + // Block 0x50, offset 0x1400 + 0x1400: 0x0239, 0x1401: 0x023c, 0x1402: 0x0248, 0x1403: 0x0251, 0x1405: 0x028a, + 0x1406: 0x025a, 0x1407: 0x024b, 0x1408: 0x0269, 0x1409: 0x0290, 0x140a: 0x027b, 0x140b: 0x027e, + 0x140c: 0x0281, 0x140d: 0x0284, 0x140e: 0x025d, 0x140f: 0x026f, 0x1410: 0x0275, 0x1411: 0x0263, + 0x1412: 0x0278, 0x1413: 0x0257, 0x1414: 0x0260, 0x1415: 0x0242, 0x1416: 0x0245, 0x1417: 0x024e, + 0x1418: 0x0254, 0x1419: 0x0266, 0x141a: 0x026c, 0x141b: 0x0272, 0x141c: 0x0293, 0x141d: 0x02e4, + 0x141e: 0x02cc, 0x141f: 0x0296, 0x1421: 0x023c, 0x1422: 0x0248, + 0x1424: 0x0287, 0x1427: 0x024b, 0x1429: 0x0290, + 0x142a: 0x027b, 0x142b: 0x027e, 0x142c: 0x0281, 0x142d: 0x0284, 0x142e: 0x025d, 0x142f: 0x026f, + 0x1430: 0x0275, 0x1431: 0x0263, 0x1432: 0x0278, 0x1434: 0x0260, 0x1435: 0x0242, + 0x1436: 0x0245, 0x1437: 0x024e, 0x1439: 0x0266, 0x143b: 0x0272, + // Block 0x51, offset 0x1440 + 0x1442: 0x0248, + 0x1447: 0x024b, 0x1449: 0x0290, 0x144b: 0x027e, + 0x144d: 0x0284, 0x144e: 0x025d, 0x144f: 0x026f, 0x1451: 0x0263, + 0x1452: 0x0278, 0x1454: 0x0260, 0x1457: 0x024e, + 0x1459: 0x0266, 0x145b: 0x0272, 0x145d: 0x02e4, + 0x145f: 0x0296, 0x1461: 0x023c, 0x1462: 0x0248, + 0x1464: 0x0287, 0x1467: 0x024b, 0x1468: 0x0269, 0x1469: 0x0290, + 0x146a: 0x027b, 0x146c: 0x0281, 0x146d: 0x0284, 0x146e: 0x025d, 0x146f: 0x026f, + 0x1470: 0x0275, 0x1471: 0x0263, 0x1472: 0x0278, 0x1474: 0x0260, 0x1475: 0x0242, + 0x1476: 0x0245, 0x1477: 0x024e, 0x1479: 0x0266, 0x147a: 0x026c, 0x147b: 0x0272, + 0x147c: 0x0293, 0x147e: 0x02cc, + // Block 0x52, offset 0x1480 + 0x1480: 0x0239, 0x1481: 0x023c, 0x1482: 0x0248, 0x1483: 0x0251, 0x1484: 0x0287, 0x1485: 0x028a, + 0x1486: 0x025a, 0x1487: 0x024b, 0x1488: 0x0269, 0x1489: 0x0290, 0x148b: 0x027e, + 0x148c: 0x0281, 0x148d: 0x0284, 0x148e: 0x025d, 0x148f: 0x026f, 0x1490: 0x0275, 0x1491: 0x0263, + 0x1492: 0x0278, 0x1493: 0x0257, 0x1494: 0x0260, 0x1495: 0x0242, 0x1496: 0x0245, 0x1497: 0x024e, + 0x1498: 0x0254, 0x1499: 0x0266, 0x149a: 0x026c, 0x149b: 0x0272, + 0x14a1: 0x023c, 0x14a2: 0x0248, 0x14a3: 0x0251, + 0x14a5: 0x028a, 0x14a6: 0x025a, 0x14a7: 0x024b, 0x14a8: 0x0269, 0x14a9: 0x0290, + 0x14ab: 0x027e, 0x14ac: 0x0281, 0x14ad: 0x0284, 0x14ae: 0x025d, 0x14af: 0x026f, + 0x14b0: 0x0275, 0x14b1: 0x0263, 0x14b2: 0x0278, 0x14b3: 0x0257, 0x14b4: 0x0260, 0x14b5: 0x0242, + 0x14b6: 0x0245, 0x14b7: 0x024e, 0x14b8: 0x0254, 0x14b9: 0x0266, 0x14ba: 0x026c, 0x14bb: 0x0272, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x1879, 0x14c1: 0x1876, 0x14c2: 0x187c, 0x14c3: 0x18a0, 0x14c4: 0x18c4, 0x14c5: 0x18e8, + 0x14c6: 0x190c, 0x14c7: 0x1915, 0x14c8: 0x191b, 0x14c9: 0x1921, 0x14ca: 0x1927, + 0x14d0: 0x1a8c, 0x14d1: 0x1a90, + 0x14d2: 0x1a94, 0x14d3: 0x1a98, 0x14d4: 0x1a9c, 0x14d5: 0x1aa0, 0x14d6: 0x1aa4, 0x14d7: 0x1aa8, + 0x14d8: 0x1aac, 0x14d9: 0x1ab0, 0x14da: 0x1ab4, 0x14db: 0x1ab8, 0x14dc: 0x1abc, 0x14dd: 0x1ac0, + 0x14de: 0x1ac4, 0x14df: 0x1ac8, 0x14e0: 0x1acc, 0x14e1: 0x1ad0, 0x14e2: 0x1ad4, 0x14e3: 0x1ad8, + 0x14e4: 0x1adc, 0x14e5: 0x1ae0, 0x14e6: 0x1ae4, 0x14e7: 0x1ae8, 0x14e8: 0x1aec, 0x14e9: 0x1af0, + 0x14ea: 0x271e, 0x14eb: 0x0047, 0x14ec: 0x0065, 0x14ed: 0x193c, 0x14ee: 0x19b1, + 0x14f0: 0x0043, 0x14f1: 0x0045, 0x14f2: 0x0047, 0x14f3: 0x0049, 0x14f4: 0x004b, 0x14f5: 0x004d, + 0x14f6: 0x004f, 0x14f7: 0x0051, 0x14f8: 0x0053, 0x14f9: 0x0055, 0x14fa: 0x0057, 0x14fb: 0x0059, + 0x14fc: 0x005b, 0x14fd: 0x005d, 0x14fe: 0x005f, 0x14ff: 0x0061, + // Block 0x54, offset 0x1500 + 0x1500: 0x26ad, 0x1501: 0x26c2, 0x1502: 0x0503, + 0x1510: 0x0c0f, 0x1511: 0x0a47, + 0x1512: 0x08d3, 0x1513: 0x45c4, 0x1514: 0x071b, 0x1515: 0x09ef, 0x1516: 0x132f, 0x1517: 0x09ff, + 0x1518: 0x0727, 0x1519: 0x0cd7, 0x151a: 0x0eaf, 0x151b: 0x0caf, 0x151c: 0x0827, 0x151d: 0x0b6b, + 0x151e: 0x07bf, 0x151f: 0x0cb7, 0x1520: 0x0813, 0x1521: 0x1117, 0x1522: 0x0f83, 0x1523: 0x138b, + 0x1524: 0x09d3, 0x1525: 0x090b, 0x1526: 0x0e63, 0x1527: 0x0c1b, 0x1528: 0x0c47, 0x1529: 0x06bf, + 0x152a: 0x06cb, 0x152b: 0x140b, 0x152c: 0x0adb, 0x152d: 0x06e7, 0x152e: 0x08ef, 0x152f: 0x0c3b, + 0x1530: 0x13b3, 0x1531: 0x0c13, 0x1532: 0x106f, 0x1533: 0x10ab, 0x1534: 0x08f7, 0x1535: 0x0e43, + 0x1536: 0x0d0b, 0x1537: 0x0d07, 0x1538: 0x0f97, 0x1539: 0x082b, 0x153a: 0x0957, 0x153b: 0x1443, + // Block 0x55, offset 0x1540 + 0x1540: 0x06fb, 0x1541: 0x06f3, 0x1542: 0x0703, 0x1543: 0x1647, 0x1544: 0x0747, 0x1545: 0x0757, + 0x1546: 0x075b, 0x1547: 0x0763, 0x1548: 0x076b, 0x1549: 0x076f, 0x154a: 0x077b, 0x154b: 0x0773, + 0x154c: 0x05b3, 0x154d: 0x165b, 0x154e: 0x078f, 0x154f: 0x0793, 0x1550: 0x0797, 0x1551: 0x07b3, + 0x1552: 0x164c, 0x1553: 0x05b7, 0x1554: 0x079f, 0x1555: 0x07bf, 0x1556: 0x1656, 0x1557: 0x07cf, + 0x1558: 0x07d7, 0x1559: 0x0737, 0x155a: 0x07df, 0x155b: 0x07e3, 0x155c: 0x1831, 0x155d: 0x07ff, + 0x155e: 0x0807, 0x155f: 0x05bf, 0x1560: 0x081f, 0x1561: 0x0823, 0x1562: 0x082b, 0x1563: 0x082f, + 0x1564: 0x05c3, 0x1565: 0x0847, 0x1566: 0x084b, 0x1567: 0x0857, 0x1568: 0x0863, 0x1569: 0x0867, + 0x156a: 0x086b, 0x156b: 0x0873, 0x156c: 0x0893, 0x156d: 0x0897, 0x156e: 0x089f, 0x156f: 0x08af, + 0x1570: 0x08b7, 0x1571: 0x08bb, 0x1572: 0x08bb, 0x1573: 0x08bb, 0x1574: 0x166a, 0x1575: 0x0e93, + 0x1576: 0x08cf, 0x1577: 0x08d7, 0x1578: 0x166f, 0x1579: 0x08e3, 0x157a: 0x08eb, 0x157b: 0x08f3, + 0x157c: 0x091b, 0x157d: 0x0907, 0x157e: 0x0913, 0x157f: 0x0917, + // Block 0x56, offset 0x1580 + 0x1580: 0x091f, 0x1581: 0x0927, 0x1582: 0x092b, 0x1583: 0x0933, 0x1584: 0x093b, 0x1585: 0x093f, + 0x1586: 0x093f, 0x1587: 0x0947, 0x1588: 0x094f, 0x1589: 0x0953, 0x158a: 0x095f, 0x158b: 0x0983, + 0x158c: 0x0967, 0x158d: 0x0987, 0x158e: 0x096b, 0x158f: 0x0973, 0x1590: 0x080b, 0x1591: 0x09cf, + 0x1592: 0x0997, 0x1593: 0x099b, 0x1594: 0x099f, 0x1595: 0x0993, 0x1596: 0x09a7, 0x1597: 0x09a3, + 0x1598: 0x09bb, 0x1599: 0x1674, 0x159a: 0x09d7, 0x159b: 0x09db, 0x159c: 0x09e3, 0x159d: 0x09ef, + 0x159e: 0x09f7, 0x159f: 0x0a13, 0x15a0: 0x1679, 0x15a1: 0x167e, 0x15a2: 0x0a1f, 0x15a3: 0x0a23, + 0x15a4: 0x0a27, 0x15a5: 0x0a1b, 0x15a6: 0x0a2f, 0x15a7: 0x05c7, 0x15a8: 0x05cb, 0x15a9: 0x0a37, + 0x15aa: 0x0a3f, 0x15ab: 0x0a3f, 0x15ac: 0x1683, 0x15ad: 0x0a5b, 0x15ae: 0x0a5f, 0x15af: 0x0a63, + 0x15b0: 0x0a6b, 0x15b1: 0x1688, 0x15b2: 0x0a73, 0x15b3: 0x0a77, 0x15b4: 0x0b4f, 0x15b5: 0x0a7f, + 0x15b6: 0x05cf, 0x15b7: 0x0a8b, 0x15b8: 0x0a9b, 0x15b9: 0x0aa7, 0x15ba: 0x0aa3, 0x15bb: 0x1692, + 0x15bc: 0x0aaf, 0x15bd: 0x1697, 0x15be: 0x0abb, 0x15bf: 0x0ab7, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x0abf, 0x15c1: 0x0acf, 0x15c2: 0x0ad3, 0x15c3: 0x05d3, 0x15c4: 0x0ae3, 0x15c5: 0x0aeb, + 0x15c6: 0x0aef, 0x15c7: 0x0af3, 0x15c8: 0x05d7, 0x15c9: 0x169c, 0x15ca: 0x05db, 0x15cb: 0x0b0f, + 0x15cc: 0x0b13, 0x15cd: 0x0b17, 0x15ce: 0x0b1f, 0x15cf: 0x1863, 0x15d0: 0x0b37, 0x15d1: 0x16a6, + 0x15d2: 0x16a6, 0x15d3: 0x11d7, 0x15d4: 0x0b47, 0x15d5: 0x0b47, 0x15d6: 0x05df, 0x15d7: 0x16c9, + 0x15d8: 0x179b, 0x15d9: 0x0b57, 0x15da: 0x0b5f, 0x15db: 0x05e3, 0x15dc: 0x0b73, 0x15dd: 0x0b83, + 0x15de: 0x0b87, 0x15df: 0x0b8f, 0x15e0: 0x0b9f, 0x15e1: 0x05eb, 0x15e2: 0x05e7, 0x15e3: 0x0ba3, + 0x15e4: 0x16ab, 0x15e5: 0x0ba7, 0x15e6: 0x0bbb, 0x15e7: 0x0bbf, 0x15e8: 0x0bc3, 0x15e9: 0x0bbf, + 0x15ea: 0x0bcf, 0x15eb: 0x0bd3, 0x15ec: 0x0be3, 0x15ed: 0x0bdb, 0x15ee: 0x0bdf, 0x15ef: 0x0be7, + 0x15f0: 0x0beb, 0x15f1: 0x0bef, 0x15f2: 0x0bfb, 0x15f3: 0x0bff, 0x15f4: 0x0c17, 0x15f5: 0x0c1f, + 0x15f6: 0x0c2f, 0x15f7: 0x0c43, 0x15f8: 0x16ba, 0x15f9: 0x0c3f, 0x15fa: 0x0c33, 0x15fb: 0x0c4b, + 0x15fc: 0x0c53, 0x15fd: 0x0c67, 0x15fe: 0x16bf, 0x15ff: 0x0c6f, + // Block 0x58, offset 0x1600 + 0x1600: 0x0c63, 0x1601: 0x0c5b, 0x1602: 0x05ef, 0x1603: 0x0c77, 0x1604: 0x0c7f, 0x1605: 0x0c87, + 0x1606: 0x0c7b, 0x1607: 0x05f3, 0x1608: 0x0c97, 0x1609: 0x0c9f, 0x160a: 0x16c4, 0x160b: 0x0ccb, + 0x160c: 0x0cff, 0x160d: 0x0cdb, 0x160e: 0x05ff, 0x160f: 0x0ce7, 0x1610: 0x05fb, 0x1611: 0x05f7, + 0x1612: 0x07c3, 0x1613: 0x07c7, 0x1614: 0x0d03, 0x1615: 0x0ceb, 0x1616: 0x11ab, 0x1617: 0x0663, + 0x1618: 0x0d0f, 0x1619: 0x0d13, 0x161a: 0x0d17, 0x161b: 0x0d2b, 0x161c: 0x0d23, 0x161d: 0x16dd, + 0x161e: 0x0603, 0x161f: 0x0d3f, 0x1620: 0x0d33, 0x1621: 0x0d4f, 0x1622: 0x0d57, 0x1623: 0x16e7, + 0x1624: 0x0d5b, 0x1625: 0x0d47, 0x1626: 0x0d63, 0x1627: 0x0607, 0x1628: 0x0d67, 0x1629: 0x0d6b, + 0x162a: 0x0d6f, 0x162b: 0x0d7b, 0x162c: 0x16ec, 0x162d: 0x0d83, 0x162e: 0x060b, 0x162f: 0x0d8f, + 0x1630: 0x16f1, 0x1631: 0x0d93, 0x1632: 0x060f, 0x1633: 0x0d9f, 0x1634: 0x0dab, 0x1635: 0x0db7, + 0x1636: 0x0dbb, 0x1637: 0x16f6, 0x1638: 0x168d, 0x1639: 0x16fb, 0x163a: 0x0ddb, 0x163b: 0x1700, + 0x163c: 0x0de7, 0x163d: 0x0def, 0x163e: 0x0ddf, 0x163f: 0x0dfb, + // Block 0x59, offset 0x1640 + 0x1640: 0x0e0b, 0x1641: 0x0e1b, 0x1642: 0x0e0f, 0x1643: 0x0e13, 0x1644: 0x0e1f, 0x1645: 0x0e23, + 0x1646: 0x1705, 0x1647: 0x0e07, 0x1648: 0x0e3b, 0x1649: 0x0e3f, 0x164a: 0x0613, 0x164b: 0x0e53, + 0x164c: 0x0e4f, 0x164d: 0x170a, 0x164e: 0x0e33, 0x164f: 0x0e6f, 0x1650: 0x170f, 0x1651: 0x1714, + 0x1652: 0x0e73, 0x1653: 0x0e87, 0x1654: 0x0e83, 0x1655: 0x0e7f, 0x1656: 0x0617, 0x1657: 0x0e8b, + 0x1658: 0x0e9b, 0x1659: 0x0e97, 0x165a: 0x0ea3, 0x165b: 0x1651, 0x165c: 0x0eb3, 0x165d: 0x1719, + 0x165e: 0x0ebf, 0x165f: 0x1723, 0x1660: 0x0ed3, 0x1661: 0x0edf, 0x1662: 0x0ef3, 0x1663: 0x1728, + 0x1664: 0x0f07, 0x1665: 0x0f0b, 0x1666: 0x172d, 0x1667: 0x1732, 0x1668: 0x0f27, 0x1669: 0x0f37, + 0x166a: 0x061b, 0x166b: 0x0f3b, 0x166c: 0x061f, 0x166d: 0x061f, 0x166e: 0x0f53, 0x166f: 0x0f57, + 0x1670: 0x0f5f, 0x1671: 0x0f63, 0x1672: 0x0f6f, 0x1673: 0x0623, 0x1674: 0x0f87, 0x1675: 0x1737, + 0x1676: 0x0fa3, 0x1677: 0x173c, 0x1678: 0x0faf, 0x1679: 0x16a1, 0x167a: 0x0fbf, 0x167b: 0x1741, + 0x167c: 0x1746, 0x167d: 0x174b, 0x167e: 0x0627, 0x167f: 0x062b, + // Block 0x5a, offset 0x1680 + 0x1680: 0x0ff7, 0x1681: 0x1755, 0x1682: 0x1750, 0x1683: 0x175a, 0x1684: 0x175f, 0x1685: 0x0fff, + 0x1686: 0x1003, 0x1687: 0x1003, 0x1688: 0x100b, 0x1689: 0x0633, 0x168a: 0x100f, 0x168b: 0x0637, + 0x168c: 0x063b, 0x168d: 0x1769, 0x168e: 0x1023, 0x168f: 0x102b, 0x1690: 0x1037, 0x1691: 0x063f, + 0x1692: 0x176e, 0x1693: 0x105b, 0x1694: 0x1773, 0x1695: 0x1778, 0x1696: 0x107b, 0x1697: 0x1093, + 0x1698: 0x0643, 0x1699: 0x109b, 0x169a: 0x109f, 0x169b: 0x10a3, 0x169c: 0x177d, 0x169d: 0x1782, + 0x169e: 0x1782, 0x169f: 0x10bb, 0x16a0: 0x0647, 0x16a1: 0x1787, 0x16a2: 0x10cf, 0x16a3: 0x10d3, + 0x16a4: 0x064b, 0x16a5: 0x178c, 0x16a6: 0x10ef, 0x16a7: 0x064f, 0x16a8: 0x10ff, 0x16a9: 0x10f7, + 0x16aa: 0x1107, 0x16ab: 0x1796, 0x16ac: 0x111f, 0x16ad: 0x0653, 0x16ae: 0x112b, 0x16af: 0x1133, + 0x16b0: 0x1143, 0x16b1: 0x0657, 0x16b2: 0x17a0, 0x16b3: 0x17a5, 0x16b4: 0x065b, 0x16b5: 0x17aa, + 0x16b6: 0x115b, 0x16b7: 0x17af, 0x16b8: 0x1167, 0x16b9: 0x1173, 0x16ba: 0x117b, 0x16bb: 0x17b4, + 0x16bc: 0x17b9, 0x16bd: 0x118f, 0x16be: 0x17be, 0x16bf: 0x1197, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x16ce, 0x16c1: 0x065f, 0x16c2: 0x11af, 0x16c3: 0x11b3, 0x16c4: 0x0667, 0x16c5: 0x11b7, + 0x16c6: 0x0a33, 0x16c7: 0x17c3, 0x16c8: 0x17c8, 0x16c9: 0x16d3, 0x16ca: 0x16d8, 0x16cb: 0x11d7, + 0x16cc: 0x11db, 0x16cd: 0x13f3, 0x16ce: 0x066b, 0x16cf: 0x1207, 0x16d0: 0x1203, 0x16d1: 0x120b, + 0x16d2: 0x083f, 0x16d3: 0x120f, 0x16d4: 0x1213, 0x16d5: 0x1217, 0x16d6: 0x121f, 0x16d7: 0x17cd, + 0x16d8: 0x121b, 0x16d9: 0x1223, 0x16da: 0x1237, 0x16db: 0x123b, 0x16dc: 0x1227, 0x16dd: 0x123f, + 0x16de: 0x1253, 0x16df: 0x1267, 0x16e0: 0x1233, 0x16e1: 0x1247, 0x16e2: 0x124b, 0x16e3: 0x124f, + 0x16e4: 0x17d2, 0x16e5: 0x17dc, 0x16e6: 0x17d7, 0x16e7: 0x066f, 0x16e8: 0x126f, 0x16e9: 0x1273, + 0x16ea: 0x127b, 0x16eb: 0x17f0, 0x16ec: 0x127f, 0x16ed: 0x17e1, 0x16ee: 0x0673, 0x16ef: 0x0677, + 0x16f0: 0x17e6, 0x16f1: 0x17eb, 0x16f2: 0x067b, 0x16f3: 0x129f, 0x16f4: 0x12a3, 0x16f5: 0x12a7, + 0x16f6: 0x12ab, 0x16f7: 0x12b7, 0x16f8: 0x12b3, 0x16f9: 0x12bf, 0x16fa: 0x12bb, 0x16fb: 0x12cb, + 0x16fc: 0x12c3, 0x16fd: 0x12c7, 0x16fe: 0x12cf, 0x16ff: 0x067f, + // Block 0x5c, offset 0x1700 + 0x1700: 0x12d7, 0x1701: 0x12db, 0x1702: 0x0683, 0x1703: 0x12eb, 0x1704: 0x12ef, 0x1705: 0x17f5, + 0x1706: 0x12fb, 0x1707: 0x12ff, 0x1708: 0x0687, 0x1709: 0x130b, 0x170a: 0x05bb, 0x170b: 0x17fa, + 0x170c: 0x17ff, 0x170d: 0x068b, 0x170e: 0x068f, 0x170f: 0x1337, 0x1710: 0x134f, 0x1711: 0x136b, + 0x1712: 0x137b, 0x1713: 0x1804, 0x1714: 0x138f, 0x1715: 0x1393, 0x1716: 0x13ab, 0x1717: 0x13b7, + 0x1718: 0x180e, 0x1719: 0x1660, 0x171a: 0x13c3, 0x171b: 0x13bf, 0x171c: 0x13cb, 0x171d: 0x1665, + 0x171e: 0x13d7, 0x171f: 0x13e3, 0x1720: 0x1813, 0x1721: 0x1818, 0x1722: 0x1423, 0x1723: 0x142f, + 0x1724: 0x1437, 0x1725: 0x181d, 0x1726: 0x143b, 0x1727: 0x1467, 0x1728: 0x1473, 0x1729: 0x1477, + 0x172a: 0x146f, 0x172b: 0x1483, 0x172c: 0x1487, 0x172d: 0x1822, 0x172e: 0x1493, 0x172f: 0x0693, + 0x1730: 0x149b, 0x1731: 0x1827, 0x1732: 0x0697, 0x1733: 0x14d3, 0x1734: 0x0ac3, 0x1735: 0x14eb, + 0x1736: 0x182c, 0x1737: 0x1836, 0x1738: 0x069b, 0x1739: 0x069f, 0x173a: 0x1513, 0x173b: 0x183b, + 0x173c: 0x06a3, 0x173d: 0x1840, 0x173e: 0x152b, 0x173f: 0x152b, + // Block 0x5d, offset 0x1740 + 0x1740: 0x1533, 0x1741: 0x1845, 0x1742: 0x154b, 0x1743: 0x06a7, 0x1744: 0x155b, 0x1745: 0x1567, + 0x1746: 0x156f, 0x1747: 0x1577, 0x1748: 0x06ab, 0x1749: 0x184a, 0x174a: 0x158b, 0x174b: 0x15a7, + 0x174c: 0x15b3, 0x174d: 0x06af, 0x174e: 0x06b3, 0x174f: 0x15b7, 0x1750: 0x184f, 0x1751: 0x06b7, + 0x1752: 0x1854, 0x1753: 0x1859, 0x1754: 0x185e, 0x1755: 0x15db, 0x1756: 0x06bb, 0x1757: 0x15ef, + 0x1758: 0x15f7, 0x1759: 0x15fb, 0x175a: 0x1603, 0x175b: 0x160b, 0x175c: 0x1613, 0x175d: 0x1868, +} + +// nfkcIndex: 22 blocks, 1408 entries, 1408 bytes +// Block 0 is the zero block. +var nfkcIndex = [1408]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x5c, 0xc3: 0x01, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x5d, 0xc7: 0x04, + 0xc8: 0x05, 0xca: 0x5e, 0xcb: 0x5f, 0xcc: 0x06, 0xcd: 0x07, 0xce: 0x08, 0xcf: 0x09, + 0xd0: 0x0a, 0xd1: 0x60, 0xd2: 0x61, 0xd3: 0x0b, 0xd6: 0x0c, 0xd7: 0x62, + 0xd8: 0x63, 0xd9: 0x0d, 0xdb: 0x64, 0xdc: 0x65, 0xdd: 0x66, 0xdf: 0x67, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, + 0xea: 0x06, 0xeb: 0x07, 0xec: 0x08, 0xed: 0x09, 0xef: 0x0a, + 0xf0: 0x13, + // Block 0x4, offset 0x100 + 0x120: 0x68, 0x121: 0x69, 0x123: 0x0e, 0x124: 0x6a, 0x125: 0x6b, 0x126: 0x6c, 0x127: 0x6d, + 0x128: 0x6e, 0x129: 0x6f, 0x12a: 0x70, 0x12b: 0x71, 0x12c: 0x6c, 0x12d: 0x72, 0x12e: 0x73, 0x12f: 0x74, + 0x131: 0x75, 0x132: 0x76, 0x133: 0x77, 0x134: 0x78, 0x135: 0x79, 0x137: 0x7a, + 0x138: 0x7b, 0x139: 0x7c, 0x13a: 0x7d, 0x13b: 0x7e, 0x13c: 0x7f, 0x13d: 0x80, 0x13e: 0x81, 0x13f: 0x82, + // Block 0x5, offset 0x140 + 0x140: 0x83, 0x142: 0x84, 0x143: 0x85, 0x144: 0x86, 0x145: 0x87, 0x146: 0x88, 0x147: 0x89, + 0x14d: 0x8a, + 0x15c: 0x8b, 0x15f: 0x8c, + 0x162: 0x8d, 0x164: 0x8e, + 0x168: 0x8f, 0x169: 0x90, 0x16a: 0x91, 0x16c: 0x0f, 0x16d: 0x92, 0x16e: 0x93, 0x16f: 0x94, + 0x170: 0x95, 0x173: 0x96, 0x174: 0x97, 0x175: 0x10, 0x176: 0x11, 0x177: 0x12, + 0x178: 0x13, 0x179: 0x14, 0x17a: 0x15, 0x17b: 0x16, 0x17c: 0x17, 0x17d: 0x18, 0x17e: 0x19, 0x17f: 0x1a, + // Block 0x6, offset 0x180 + 0x180: 0x98, 0x181: 0x99, 0x182: 0x9a, 0x183: 0x9b, 0x184: 0x1b, 0x185: 0x1c, 0x186: 0x9c, 0x187: 0x9d, + 0x188: 0x9e, 0x189: 0x1d, 0x18a: 0x1e, 0x18b: 0x9f, 0x18c: 0xa0, + 0x191: 0x1f, 0x192: 0x20, 0x193: 0xa1, + 0x1a8: 0xa2, 0x1a9: 0xa3, 0x1ab: 0xa4, + 0x1b1: 0xa5, 0x1b3: 0xa6, 0x1b5: 0xa7, 0x1b7: 0xa8, + 0x1ba: 0xa9, 0x1bb: 0xaa, 0x1bc: 0x21, 0x1bd: 0x22, 0x1be: 0x23, 0x1bf: 0xab, + // Block 0x7, offset 0x1c0 + 0x1c0: 0xac, 0x1c1: 0x24, 0x1c2: 0x25, 0x1c3: 0x26, 0x1c4: 0xad, 0x1c5: 0x27, 0x1c6: 0x28, + 0x1c8: 0x29, 0x1c9: 0x2a, 0x1ca: 0x2b, 0x1cb: 0x2c, 0x1cc: 0x2d, 0x1cd: 0x2e, 0x1ce: 0x2f, 0x1cf: 0x30, + // Block 0x8, offset 0x200 + 0x219: 0xae, 0x21a: 0xaf, 0x21b: 0xb0, 0x21d: 0xb1, 0x21f: 0xb2, + 0x220: 0xb3, 0x223: 0xb4, 0x224: 0xb5, 0x225: 0xb6, 0x226: 0xb7, 0x227: 0xb8, + 0x22a: 0xb9, 0x22b: 0xba, 0x22d: 0xbb, 0x22f: 0xbc, + 0x230: 0xbd, 0x231: 0xbe, 0x232: 0xbf, 0x233: 0xc0, 0x234: 0xc1, 0x235: 0xc2, 0x236: 0xc3, 0x237: 0xbd, + 0x238: 0xbe, 0x239: 0xbf, 0x23a: 0xc0, 0x23b: 0xc1, 0x23c: 0xc2, 0x23d: 0xc3, 0x23e: 0xbd, 0x23f: 0xbe, + // Block 0x9, offset 0x240 + 0x240: 0xbf, 0x241: 0xc0, 0x242: 0xc1, 0x243: 0xc2, 0x244: 0xc3, 0x245: 0xbd, 0x246: 0xbe, 0x247: 0xbf, + 0x248: 0xc0, 0x249: 0xc1, 0x24a: 0xc2, 0x24b: 0xc3, 0x24c: 0xbd, 0x24d: 0xbe, 0x24e: 0xbf, 0x24f: 0xc0, + 0x250: 0xc1, 0x251: 0xc2, 0x252: 0xc3, 0x253: 0xbd, 0x254: 0xbe, 0x255: 0xbf, 0x256: 0xc0, 0x257: 0xc1, + 0x258: 0xc2, 0x259: 0xc3, 0x25a: 0xbd, 0x25b: 0xbe, 0x25c: 0xbf, 0x25d: 0xc0, 0x25e: 0xc1, 0x25f: 0xc2, + 0x260: 0xc3, 0x261: 0xbd, 0x262: 0xbe, 0x263: 0xbf, 0x264: 0xc0, 0x265: 0xc1, 0x266: 0xc2, 0x267: 0xc3, + 0x268: 0xbd, 0x269: 0xbe, 0x26a: 0xbf, 0x26b: 0xc0, 0x26c: 0xc1, 0x26d: 0xc2, 0x26e: 0xc3, 0x26f: 0xbd, + 0x270: 0xbe, 0x271: 0xbf, 0x272: 0xc0, 0x273: 0xc1, 0x274: 0xc2, 0x275: 0xc3, 0x276: 0xbd, 0x277: 0xbe, + 0x278: 0xbf, 0x279: 0xc0, 0x27a: 0xc1, 0x27b: 0xc2, 0x27c: 0xc3, 0x27d: 0xbd, 0x27e: 0xbe, 0x27f: 0xbf, + // Block 0xa, offset 0x280 + 0x280: 0xc0, 0x281: 0xc1, 0x282: 0xc2, 0x283: 0xc3, 0x284: 0xbd, 0x285: 0xbe, 0x286: 0xbf, 0x287: 0xc0, + 0x288: 0xc1, 0x289: 0xc2, 0x28a: 0xc3, 0x28b: 0xbd, 0x28c: 0xbe, 0x28d: 0xbf, 0x28e: 0xc0, 0x28f: 0xc1, + 0x290: 0xc2, 0x291: 0xc3, 0x292: 0xbd, 0x293: 0xbe, 0x294: 0xbf, 0x295: 0xc0, 0x296: 0xc1, 0x297: 0xc2, + 0x298: 0xc3, 0x299: 0xbd, 0x29a: 0xbe, 0x29b: 0xbf, 0x29c: 0xc0, 0x29d: 0xc1, 0x29e: 0xc2, 0x29f: 0xc3, + 0x2a0: 0xbd, 0x2a1: 0xbe, 0x2a2: 0xbf, 0x2a3: 0xc0, 0x2a4: 0xc1, 0x2a5: 0xc2, 0x2a6: 0xc3, 0x2a7: 0xbd, + 0x2a8: 0xbe, 0x2a9: 0xbf, 0x2aa: 0xc0, 0x2ab: 0xc1, 0x2ac: 0xc2, 0x2ad: 0xc3, 0x2ae: 0xbd, 0x2af: 0xbe, + 0x2b0: 0xbf, 0x2b1: 0xc0, 0x2b2: 0xc1, 0x2b3: 0xc2, 0x2b4: 0xc3, 0x2b5: 0xbd, 0x2b6: 0xbe, 0x2b7: 0xbf, + 0x2b8: 0xc0, 0x2b9: 0xc1, 0x2ba: 0xc2, 0x2bb: 0xc3, 0x2bc: 0xbd, 0x2bd: 0xbe, 0x2be: 0xbf, 0x2bf: 0xc0, + // Block 0xb, offset 0x2c0 + 0x2c0: 0xc1, 0x2c1: 0xc2, 0x2c2: 0xc3, 0x2c3: 0xbd, 0x2c4: 0xbe, 0x2c5: 0xbf, 0x2c6: 0xc0, 0x2c7: 0xc1, + 0x2c8: 0xc2, 0x2c9: 0xc3, 0x2ca: 0xbd, 0x2cb: 0xbe, 0x2cc: 0xbf, 0x2cd: 0xc0, 0x2ce: 0xc1, 0x2cf: 0xc2, + 0x2d0: 0xc3, 0x2d1: 0xbd, 0x2d2: 0xbe, 0x2d3: 0xbf, 0x2d4: 0xc0, 0x2d5: 0xc1, 0x2d6: 0xc2, 0x2d7: 0xc3, + 0x2d8: 0xbd, 0x2d9: 0xbe, 0x2da: 0xbf, 0x2db: 0xc0, 0x2dc: 0xc1, 0x2dd: 0xc2, 0x2de: 0xc4, + // Block 0xc, offset 0x300 + 0x324: 0x31, 0x325: 0x32, 0x326: 0x33, 0x327: 0x34, + 0x328: 0x35, 0x329: 0x36, 0x32a: 0x37, 0x32b: 0x38, 0x32c: 0x39, 0x32d: 0x3a, 0x32e: 0x3b, 0x32f: 0x3c, + 0x330: 0x3d, 0x331: 0x3e, 0x332: 0x3f, 0x333: 0x40, 0x334: 0x41, 0x335: 0x42, 0x336: 0x43, 0x337: 0x44, + 0x338: 0x45, 0x339: 0x46, 0x33a: 0x47, 0x33b: 0x48, 0x33c: 0xc5, 0x33d: 0x49, 0x33e: 0x4a, 0x33f: 0x4b, + // Block 0xd, offset 0x340 + 0x347: 0xc6, + 0x34b: 0xc7, 0x34d: 0xc8, + 0x368: 0xc9, 0x36b: 0xca, + 0x374: 0xcb, + 0x37d: 0xcc, + // Block 0xe, offset 0x380 + 0x381: 0xcd, 0x382: 0xce, 0x384: 0xcf, 0x385: 0xb7, 0x387: 0xd0, + 0x388: 0xd1, 0x38b: 0xd2, 0x38c: 0xd3, 0x38d: 0xd4, + 0x391: 0xd5, 0x392: 0xd6, 0x393: 0xd7, 0x396: 0xd8, 0x397: 0xd9, + 0x398: 0xda, 0x39a: 0xdb, 0x39c: 0xdc, + 0x3a0: 0xdd, + 0x3a8: 0xde, 0x3a9: 0xdf, 0x3aa: 0xe0, + 0x3b0: 0xda, 0x3b5: 0xe1, 0x3b6: 0xe2, + // Block 0xf, offset 0x3c0 + 0x3eb: 0xe3, 0x3ec: 0xe4, + // Block 0x10, offset 0x400 + 0x432: 0xe5, + // Block 0x11, offset 0x440 + 0x445: 0xe6, 0x446: 0xe7, 0x447: 0xe8, + 0x449: 0xe9, + 0x450: 0xea, 0x451: 0xeb, 0x452: 0xec, 0x453: 0xed, 0x454: 0xee, 0x455: 0xef, 0x456: 0xf0, 0x457: 0xf1, + 0x458: 0xf2, 0x459: 0xf3, 0x45a: 0x4c, 0x45b: 0xf4, 0x45c: 0xf5, 0x45d: 0xf6, 0x45e: 0xf7, 0x45f: 0x4d, + // Block 0x12, offset 0x480 + 0x480: 0xf8, + 0x4a3: 0xf9, 0x4a5: 0xfa, + 0x4b8: 0x4e, 0x4b9: 0x4f, 0x4ba: 0x50, + // Block 0x13, offset 0x4c0 + 0x4c4: 0x51, 0x4c5: 0xfb, 0x4c6: 0xfc, + 0x4c8: 0x52, 0x4c9: 0xfd, + // Block 0x14, offset 0x500 + 0x520: 0x53, 0x521: 0x54, 0x522: 0x55, 0x523: 0x56, 0x524: 0x57, 0x525: 0x58, 0x526: 0x59, 0x527: 0x5a, + 0x528: 0x5b, + // Block 0x15, offset 0x540 + 0x550: 0x0b, 0x551: 0x0c, 0x556: 0x0d, + 0x55b: 0x0e, 0x55d: 0x0f, 0x55e: 0x10, 0x55f: 0x11, + 0x56f: 0x12, +} + +// nfkcSparseOffset: 162 entries, 324 bytes +var nfkcSparseOffset = []uint16{0x0, 0xe, 0x12, 0x1b, 0x25, 0x35, 0x37, 0x3c, 0x47, 0x56, 0x63, 0x6b, 0x70, 0x75, 0x77, 0x7f, 0x86, 0x89, 0x91, 0x95, 0x99, 0x9b, 0x9d, 0xa6, 0xaa, 0xb1, 0xb6, 0xb9, 0xc3, 0xc6, 0xcd, 0xd5, 0xd9, 0xdb, 0xde, 0xe2, 0xe8, 0xf9, 0x105, 0x107, 0x10d, 0x10f, 0x111, 0x113, 0x115, 0x117, 0x119, 0x11b, 0x11e, 0x121, 0x123, 0x126, 0x129, 0x12d, 0x132, 0x13b, 0x13d, 0x140, 0x142, 0x14d, 0x158, 0x166, 0x174, 0x184, 0x192, 0x199, 0x19f, 0x1ae, 0x1b2, 0x1b4, 0x1b8, 0x1ba, 0x1bd, 0x1bf, 0x1c2, 0x1c4, 0x1c7, 0x1c9, 0x1cb, 0x1cd, 0x1d9, 0x1e3, 0x1ed, 0x1f0, 0x1f4, 0x1f6, 0x1f8, 0x1fa, 0x1fc, 0x1ff, 0x201, 0x203, 0x205, 0x207, 0x20d, 0x210, 0x214, 0x216, 0x21d, 0x223, 0x229, 0x231, 0x237, 0x23d, 0x243, 0x247, 0x249, 0x24b, 0x24d, 0x24f, 0x255, 0x258, 0x25a, 0x260, 0x263, 0x26b, 0x272, 0x275, 0x278, 0x27a, 0x27d, 0x285, 0x289, 0x290, 0x293, 0x299, 0x29b, 0x29d, 0x2a0, 0x2a2, 0x2a5, 0x2a7, 0x2a9, 0x2ab, 0x2ae, 0x2b0, 0x2b2, 0x2b4, 0x2b6, 0x2c3, 0x2cd, 0x2cf, 0x2d1, 0x2d5, 0x2da, 0x2e6, 0x2eb, 0x2f4, 0x2fa, 0x2ff, 0x303, 0x308, 0x30c, 0x31c, 0x32a, 0x338, 0x346, 0x34c, 0x34e, 0x351, 0x35b, 0x35d} + +// nfkcSparseValues: 871 entries, 3484 bytes +var nfkcSparseValues = [871]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0002, lo: 0x0d}, + {value: 0x0001, lo: 0xa0, hi: 0xa0}, + {value: 0x4278, lo: 0xa8, hi: 0xa8}, + {value: 0x0083, lo: 0xaa, hi: 0xaa}, + {value: 0x4264, lo: 0xaf, hi: 0xaf}, + {value: 0x0025, lo: 0xb2, hi: 0xb3}, + {value: 0x425a, lo: 0xb4, hi: 0xb4}, + {value: 0x01dc, lo: 0xb5, hi: 0xb5}, + {value: 0x4291, lo: 0xb8, hi: 0xb8}, + {value: 0x0023, lo: 0xb9, hi: 0xb9}, + {value: 0x009f, lo: 0xba, hi: 0xba}, + {value: 0x221c, lo: 0xbc, hi: 0xbc}, + {value: 0x2210, lo: 0xbd, hi: 0xbd}, + {value: 0x22b2, lo: 0xbe, hi: 0xbe}, + // Block 0x1, offset 0xe + {value: 0x0091, lo: 0x03}, + {value: 0x46e2, lo: 0xa0, hi: 0xa1}, + {value: 0x4714, lo: 0xaf, hi: 0xb0}, + {value: 0xa000, lo: 0xb7, hi: 0xb7}, + // Block 0x2, offset 0x12 + {value: 0x0003, lo: 0x08}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x0091, lo: 0xb0, hi: 0xb0}, + {value: 0x0119, lo: 0xb1, hi: 0xb1}, + {value: 0x0095, lo: 0xb2, hi: 0xb2}, + {value: 0x00a5, lo: 0xb3, hi: 0xb3}, + {value: 0x0143, lo: 0xb4, hi: 0xb6}, + {value: 0x00af, lo: 0xb7, hi: 0xb7}, + {value: 0x00b3, lo: 0xb8, hi: 0xb8}, + // Block 0x3, offset 0x1b + {value: 0x000a, lo: 0x09}, + {value: 0x426e, lo: 0x98, hi: 0x98}, + {value: 0x4273, lo: 0x99, hi: 0x9a}, + {value: 0x4296, lo: 0x9b, hi: 0x9b}, + {value: 0x425f, lo: 0x9c, hi: 0x9c}, + {value: 0x4282, lo: 0x9d, hi: 0x9d}, + {value: 0x0113, lo: 0xa0, hi: 0xa0}, + {value: 0x0099, lo: 0xa1, hi: 0xa1}, + {value: 0x00a7, lo: 0xa2, hi: 0xa3}, + {value: 0x0167, lo: 0xa4, hi: 0xa4}, + // Block 0x4, offset 0x25 + {value: 0x0000, lo: 0x0f}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0xa000, lo: 0x8d, hi: 0x8d}, + {value: 0x37a5, lo: 0x90, hi: 0x90}, + {value: 0x37b1, lo: 0x91, hi: 0x91}, + {value: 0x379f, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x96, hi: 0x96}, + {value: 0x3817, lo: 0x97, hi: 0x97}, + {value: 0x37e1, lo: 0x9c, hi: 0x9c}, + {value: 0x37c9, lo: 0x9d, hi: 0x9d}, + {value: 0x37f3, lo: 0x9e, hi: 0x9e}, + {value: 0xa000, lo: 0xb4, hi: 0xb5}, + {value: 0x381d, lo: 0xb6, hi: 0xb6}, + {value: 0x3823, lo: 0xb7, hi: 0xb7}, + // Block 0x5, offset 0x35 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x83, hi: 0x87}, + // Block 0x6, offset 0x37 + {value: 0x0001, lo: 0x04}, + {value: 0x8113, lo: 0x81, hi: 0x82}, + {value: 0x8132, lo: 0x84, hi: 0x84}, + {value: 0x812d, lo: 0x85, hi: 0x85}, + {value: 0x810d, lo: 0x87, hi: 0x87}, + // Block 0x7, offset 0x3c + {value: 0x0000, lo: 0x0a}, + {value: 0x8132, lo: 0x90, hi: 0x97}, + {value: 0x8119, lo: 0x98, hi: 0x98}, + {value: 0x811a, lo: 0x99, hi: 0x99}, + {value: 0x811b, lo: 0x9a, hi: 0x9a}, + {value: 0x3841, lo: 0xa2, hi: 0xa2}, + {value: 0x3847, lo: 0xa3, hi: 0xa3}, + {value: 0x3853, lo: 0xa4, hi: 0xa4}, + {value: 0x384d, lo: 0xa5, hi: 0xa5}, + {value: 0x3859, lo: 0xa6, hi: 0xa6}, + {value: 0xa000, lo: 0xa7, hi: 0xa7}, + // Block 0x8, offset 0x47 + {value: 0x0000, lo: 0x0e}, + {value: 0x386b, lo: 0x80, hi: 0x80}, + {value: 0xa000, lo: 0x81, hi: 0x81}, + {value: 0x385f, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x3865, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x95, hi: 0x95}, + {value: 0x8132, lo: 0x96, hi: 0x9c}, + {value: 0x8132, lo: 0x9f, hi: 0xa2}, + {value: 0x812d, lo: 0xa3, hi: 0xa3}, + {value: 0x8132, lo: 0xa4, hi: 0xa4}, + {value: 0x8132, lo: 0xa7, hi: 0xa8}, + {value: 0x812d, lo: 0xaa, hi: 0xaa}, + {value: 0x8132, lo: 0xab, hi: 0xac}, + {value: 0x812d, lo: 0xad, hi: 0xad}, + // Block 0x9, offset 0x56 + {value: 0x0000, lo: 0x0c}, + {value: 0x811f, lo: 0x91, hi: 0x91}, + {value: 0x8132, lo: 0xb0, hi: 0xb0}, + {value: 0x812d, lo: 0xb1, hi: 0xb1}, + {value: 0x8132, lo: 0xb2, hi: 0xb3}, + {value: 0x812d, lo: 0xb4, hi: 0xb4}, + {value: 0x8132, lo: 0xb5, hi: 0xb6}, + {value: 0x812d, lo: 0xb7, hi: 0xb9}, + {value: 0x8132, lo: 0xba, hi: 0xba}, + {value: 0x812d, lo: 0xbb, hi: 0xbc}, + {value: 0x8132, lo: 0xbd, hi: 0xbd}, + {value: 0x812d, lo: 0xbe, hi: 0xbe}, + {value: 0x8132, lo: 0xbf, hi: 0xbf}, + // Block 0xa, offset 0x63 + {value: 0x0005, lo: 0x07}, + {value: 0x8132, lo: 0x80, hi: 0x80}, + {value: 0x8132, lo: 0x81, hi: 0x81}, + {value: 0x812d, lo: 0x82, hi: 0x83}, + {value: 0x812d, lo: 0x84, hi: 0x85}, + {value: 0x812d, lo: 0x86, hi: 0x87}, + {value: 0x812d, lo: 0x88, hi: 0x89}, + {value: 0x8132, lo: 0x8a, hi: 0x8a}, + // Block 0xb, offset 0x6b + {value: 0x0000, lo: 0x04}, + {value: 0x8132, lo: 0xab, hi: 0xb1}, + {value: 0x812d, lo: 0xb2, hi: 0xb2}, + {value: 0x8132, lo: 0xb3, hi: 0xb3}, + {value: 0x812d, lo: 0xbd, hi: 0xbd}, + // Block 0xc, offset 0x70 + {value: 0x0000, lo: 0x04}, + {value: 0x8132, lo: 0x96, hi: 0x99}, + {value: 0x8132, lo: 0x9b, hi: 0xa3}, + {value: 0x8132, lo: 0xa5, hi: 0xa7}, + {value: 0x8132, lo: 0xa9, hi: 0xad}, + // Block 0xd, offset 0x75 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x99, hi: 0x9b}, + // Block 0xe, offset 0x77 + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0xa8, hi: 0xa8}, + {value: 0x3ed8, lo: 0xa9, hi: 0xa9}, + {value: 0xa000, lo: 0xb0, hi: 0xb0}, + {value: 0x3ee0, lo: 0xb1, hi: 0xb1}, + {value: 0xa000, lo: 0xb3, hi: 0xb3}, + {value: 0x3ee8, lo: 0xb4, hi: 0xb4}, + {value: 0x9902, lo: 0xbc, hi: 0xbc}, + // Block 0xf, offset 0x7f + {value: 0x0008, lo: 0x06}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x8132, lo: 0x91, hi: 0x91}, + {value: 0x812d, lo: 0x92, hi: 0x92}, + {value: 0x8132, lo: 0x93, hi: 0x93}, + {value: 0x8132, lo: 0x94, hi: 0x94}, + {value: 0x451c, lo: 0x98, hi: 0x9f}, + // Block 0x10, offset 0x86 + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x11, offset 0x89 + {value: 0x0008, lo: 0x07}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2c9e, lo: 0x8b, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x455c, lo: 0x9c, hi: 0x9d}, + {value: 0x456c, lo: 0x9f, hi: 0x9f}, + {value: 0x8132, lo: 0xbe, hi: 0xbe}, + // Block 0x12, offset 0x91 + {value: 0x0000, lo: 0x03}, + {value: 0x4594, lo: 0xb3, hi: 0xb3}, + {value: 0x459c, lo: 0xb6, hi: 0xb6}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + // Block 0x13, offset 0x95 + {value: 0x0008, lo: 0x03}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x4574, lo: 0x99, hi: 0x9b}, + {value: 0x458c, lo: 0x9e, hi: 0x9e}, + // Block 0x14, offset 0x99 + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + // Block 0x15, offset 0x9b + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + // Block 0x16, offset 0x9d + {value: 0x0000, lo: 0x08}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2cb6, lo: 0x88, hi: 0x88}, + {value: 0x2cae, lo: 0x8b, hi: 0x8b}, + {value: 0x2cbe, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x96, hi: 0x97}, + {value: 0x45a4, lo: 0x9c, hi: 0x9c}, + {value: 0x45ac, lo: 0x9d, hi: 0x9d}, + // Block 0x17, offset 0xa6 + {value: 0x0000, lo: 0x03}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x2cc6, lo: 0x94, hi: 0x94}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x18, offset 0xaa + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2cce, lo: 0x8a, hi: 0x8a}, + {value: 0x2cde, lo: 0x8b, hi: 0x8b}, + {value: 0x2cd6, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x19, offset 0xb1 + {value: 0x1801, lo: 0x04}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x3ef0, lo: 0x88, hi: 0x88}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x8120, lo: 0x95, hi: 0x96}, + // Block 0x1a, offset 0xb6 + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + {value: 0xa000, lo: 0xbf, hi: 0xbf}, + // Block 0x1b, offset 0xb9 + {value: 0x0000, lo: 0x09}, + {value: 0x2ce6, lo: 0x80, hi: 0x80}, + {value: 0x9900, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x2cee, lo: 0x87, hi: 0x87}, + {value: 0x2cf6, lo: 0x88, hi: 0x88}, + {value: 0x2f50, lo: 0x8a, hi: 0x8a}, + {value: 0x2dd8, lo: 0x8b, hi: 0x8b}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x95, hi: 0x96}, + // Block 0x1c, offset 0xc3 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xbb, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x1d, offset 0xc6 + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2cfe, lo: 0x8a, hi: 0x8a}, + {value: 0x2d0e, lo: 0x8b, hi: 0x8b}, + {value: 0x2d06, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x1e, offset 0xcd + {value: 0x6bea, lo: 0x07}, + {value: 0x9904, lo: 0x8a, hi: 0x8a}, + {value: 0x9900, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x3ef8, lo: 0x9a, hi: 0x9a}, + {value: 0x2f58, lo: 0x9c, hi: 0x9c}, + {value: 0x2de3, lo: 0x9d, hi: 0x9d}, + {value: 0x2d16, lo: 0x9e, hi: 0x9f}, + // Block 0x1f, offset 0xd5 + {value: 0x0000, lo: 0x03}, + {value: 0x2621, lo: 0xb3, hi: 0xb3}, + {value: 0x8122, lo: 0xb8, hi: 0xb9}, + {value: 0x8104, lo: 0xba, hi: 0xba}, + // Block 0x20, offset 0xd9 + {value: 0x0000, lo: 0x01}, + {value: 0x8123, lo: 0x88, hi: 0x8b}, + // Block 0x21, offset 0xdb + {value: 0x0000, lo: 0x02}, + {value: 0x2636, lo: 0xb3, hi: 0xb3}, + {value: 0x8124, lo: 0xb8, hi: 0xb9}, + // Block 0x22, offset 0xde + {value: 0x0000, lo: 0x03}, + {value: 0x8125, lo: 0x88, hi: 0x8b}, + {value: 0x2628, lo: 0x9c, hi: 0x9c}, + {value: 0x262f, lo: 0x9d, hi: 0x9d}, + // Block 0x23, offset 0xe2 + {value: 0x0000, lo: 0x05}, + {value: 0x030b, lo: 0x8c, hi: 0x8c}, + {value: 0x812d, lo: 0x98, hi: 0x99}, + {value: 0x812d, lo: 0xb5, hi: 0xb5}, + {value: 0x812d, lo: 0xb7, hi: 0xb7}, + {value: 0x812b, lo: 0xb9, hi: 0xb9}, + // Block 0x24, offset 0xe8 + {value: 0x0000, lo: 0x10}, + {value: 0x2644, lo: 0x83, hi: 0x83}, + {value: 0x264b, lo: 0x8d, hi: 0x8d}, + {value: 0x2652, lo: 0x92, hi: 0x92}, + {value: 0x2659, lo: 0x97, hi: 0x97}, + {value: 0x2660, lo: 0x9c, hi: 0x9c}, + {value: 0x263d, lo: 0xa9, hi: 0xa9}, + {value: 0x8126, lo: 0xb1, hi: 0xb1}, + {value: 0x8127, lo: 0xb2, hi: 0xb2}, + {value: 0x4a84, lo: 0xb3, hi: 0xb3}, + {value: 0x8128, lo: 0xb4, hi: 0xb4}, + {value: 0x4a8d, lo: 0xb5, hi: 0xb5}, + {value: 0x45b4, lo: 0xb6, hi: 0xb6}, + {value: 0x45f4, lo: 0xb7, hi: 0xb7}, + {value: 0x45bc, lo: 0xb8, hi: 0xb8}, + {value: 0x45ff, lo: 0xb9, hi: 0xb9}, + {value: 0x8127, lo: 0xba, hi: 0xbd}, + // Block 0x25, offset 0xf9 + {value: 0x0000, lo: 0x0b}, + {value: 0x8127, lo: 0x80, hi: 0x80}, + {value: 0x4a96, lo: 0x81, hi: 0x81}, + {value: 0x8132, lo: 0x82, hi: 0x83}, + {value: 0x8104, lo: 0x84, hi: 0x84}, + {value: 0x8132, lo: 0x86, hi: 0x87}, + {value: 0x266e, lo: 0x93, hi: 0x93}, + {value: 0x2675, lo: 0x9d, hi: 0x9d}, + {value: 0x267c, lo: 0xa2, hi: 0xa2}, + {value: 0x2683, lo: 0xa7, hi: 0xa7}, + {value: 0x268a, lo: 0xac, hi: 0xac}, + {value: 0x2667, lo: 0xb9, hi: 0xb9}, + // Block 0x26, offset 0x105 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x86, hi: 0x86}, + // Block 0x27, offset 0x107 + {value: 0x0000, lo: 0x05}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x2d1e, lo: 0xa6, hi: 0xa6}, + {value: 0x9900, lo: 0xae, hi: 0xae}, + {value: 0x8102, lo: 0xb7, hi: 0xb7}, + {value: 0x8104, lo: 0xb9, hi: 0xba}, + // Block 0x28, offset 0x10d + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x8d, hi: 0x8d}, + // Block 0x29, offset 0x10f + {value: 0x0000, lo: 0x01}, + {value: 0x030f, lo: 0xbc, hi: 0xbc}, + // Block 0x2a, offset 0x111 + {value: 0x0000, lo: 0x01}, + {value: 0xa000, lo: 0x80, hi: 0x92}, + // Block 0x2b, offset 0x113 + {value: 0x0000, lo: 0x01}, + {value: 0xb900, lo: 0xa1, hi: 0xb5}, + // Block 0x2c, offset 0x115 + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0xa8, hi: 0xbf}, + // Block 0x2d, offset 0x117 + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0x80, hi: 0x82}, + // Block 0x2e, offset 0x119 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x9d, hi: 0x9f}, + // Block 0x2f, offset 0x11b + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x94, hi: 0x94}, + {value: 0x8104, lo: 0xb4, hi: 0xb4}, + // Block 0x30, offset 0x11e + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x92, hi: 0x92}, + {value: 0x8132, lo: 0x9d, hi: 0x9d}, + // Block 0x31, offset 0x121 + {value: 0x0000, lo: 0x01}, + {value: 0x8131, lo: 0xa9, hi: 0xa9}, + // Block 0x32, offset 0x123 + {value: 0x0004, lo: 0x02}, + {value: 0x812e, lo: 0xb9, hi: 0xba}, + {value: 0x812d, lo: 0xbb, hi: 0xbb}, + // Block 0x33, offset 0x126 + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0x97, hi: 0x97}, + {value: 0x812d, lo: 0x98, hi: 0x98}, + // Block 0x34, offset 0x129 + {value: 0x0000, lo: 0x03}, + {value: 0x8104, lo: 0xa0, hi: 0xa0}, + {value: 0x8132, lo: 0xb5, hi: 0xbc}, + {value: 0x812d, lo: 0xbf, hi: 0xbf}, + // Block 0x35, offset 0x12d + {value: 0x0000, lo: 0x04}, + {value: 0x8132, lo: 0xb0, hi: 0xb4}, + {value: 0x812d, lo: 0xb5, hi: 0xba}, + {value: 0x8132, lo: 0xbb, hi: 0xbc}, + {value: 0x812d, lo: 0xbd, hi: 0xbd}, + // Block 0x36, offset 0x132 + {value: 0x0000, lo: 0x08}, + {value: 0x2d66, lo: 0x80, hi: 0x80}, + {value: 0x2d6e, lo: 0x81, hi: 0x81}, + {value: 0xa000, lo: 0x82, hi: 0x82}, + {value: 0x2d76, lo: 0x83, hi: 0x83}, + {value: 0x8104, lo: 0x84, hi: 0x84}, + {value: 0x8132, lo: 0xab, hi: 0xab}, + {value: 0x812d, lo: 0xac, hi: 0xac}, + {value: 0x8132, lo: 0xad, hi: 0xb3}, + // Block 0x37, offset 0x13b + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xaa, hi: 0xab}, + // Block 0x38, offset 0x13d + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xa6, hi: 0xa6}, + {value: 0x8104, lo: 0xb2, hi: 0xb3}, + // Block 0x39, offset 0x140 + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xb7, hi: 0xb7}, + // Block 0x3a, offset 0x142 + {value: 0x0000, lo: 0x0a}, + {value: 0x8132, lo: 0x90, hi: 0x92}, + {value: 0x8101, lo: 0x94, hi: 0x94}, + {value: 0x812d, lo: 0x95, hi: 0x99}, + {value: 0x8132, lo: 0x9a, hi: 0x9b}, + {value: 0x812d, lo: 0x9c, hi: 0x9f}, + {value: 0x8132, lo: 0xa0, hi: 0xa0}, + {value: 0x8101, lo: 0xa2, hi: 0xa8}, + {value: 0x812d, lo: 0xad, hi: 0xad}, + {value: 0x8132, lo: 0xb4, hi: 0xb4}, + {value: 0x8132, lo: 0xb8, hi: 0xb9}, + // Block 0x3b, offset 0x14d + {value: 0x0002, lo: 0x0a}, + {value: 0x0043, lo: 0xac, hi: 0xac}, + {value: 0x00d1, lo: 0xad, hi: 0xad}, + {value: 0x0045, lo: 0xae, hi: 0xae}, + {value: 0x0049, lo: 0xb0, hi: 0xb1}, + {value: 0x00e6, lo: 0xb2, hi: 0xb2}, + {value: 0x004f, lo: 0xb3, hi: 0xba}, + {value: 0x005f, lo: 0xbc, hi: 0xbc}, + {value: 0x00ef, lo: 0xbd, hi: 0xbd}, + {value: 0x0061, lo: 0xbe, hi: 0xbe}, + {value: 0x0065, lo: 0xbf, hi: 0xbf}, + // Block 0x3c, offset 0x158 + {value: 0x0000, lo: 0x0d}, + {value: 0x0001, lo: 0x80, hi: 0x8a}, + {value: 0x043b, lo: 0x91, hi: 0x91}, + {value: 0x429b, lo: 0x97, hi: 0x97}, + {value: 0x001d, lo: 0xa4, hi: 0xa4}, + {value: 0x1873, lo: 0xa5, hi: 0xa5}, + {value: 0x1b5c, lo: 0xa6, hi: 0xa6}, + {value: 0x0001, lo: 0xaf, hi: 0xaf}, + {value: 0x2691, lo: 0xb3, hi: 0xb3}, + {value: 0x27fe, lo: 0xb4, hi: 0xb4}, + {value: 0x2698, lo: 0xb6, hi: 0xb6}, + {value: 0x2808, lo: 0xb7, hi: 0xb7}, + {value: 0x186d, lo: 0xbc, hi: 0xbc}, + {value: 0x4269, lo: 0xbe, hi: 0xbe}, + // Block 0x3d, offset 0x166 + {value: 0x0002, lo: 0x0d}, + {value: 0x1933, lo: 0x87, hi: 0x87}, + {value: 0x1930, lo: 0x88, hi: 0x88}, + {value: 0x1870, lo: 0x89, hi: 0x89}, + {value: 0x298e, lo: 0x97, hi: 0x97}, + {value: 0x0001, lo: 0x9f, hi: 0x9f}, + {value: 0x0021, lo: 0xb0, hi: 0xb0}, + {value: 0x0093, lo: 0xb1, hi: 0xb1}, + {value: 0x0029, lo: 0xb4, hi: 0xb9}, + {value: 0x0017, lo: 0xba, hi: 0xba}, + {value: 0x0467, lo: 0xbb, hi: 0xbb}, + {value: 0x003b, lo: 0xbc, hi: 0xbc}, + {value: 0x0011, lo: 0xbd, hi: 0xbe}, + {value: 0x009d, lo: 0xbf, hi: 0xbf}, + // Block 0x3e, offset 0x174 + {value: 0x0002, lo: 0x0f}, + {value: 0x0021, lo: 0x80, hi: 0x89}, + {value: 0x0017, lo: 0x8a, hi: 0x8a}, + {value: 0x0467, lo: 0x8b, hi: 0x8b}, + {value: 0x003b, lo: 0x8c, hi: 0x8c}, + {value: 0x0011, lo: 0x8d, hi: 0x8e}, + {value: 0x0083, lo: 0x90, hi: 0x90}, + {value: 0x008b, lo: 0x91, hi: 0x91}, + {value: 0x009f, lo: 0x92, hi: 0x92}, + {value: 0x00b1, lo: 0x93, hi: 0x93}, + {value: 0x0104, lo: 0x94, hi: 0x94}, + {value: 0x0091, lo: 0x95, hi: 0x95}, + {value: 0x0097, lo: 0x96, hi: 0x99}, + {value: 0x00a1, lo: 0x9a, hi: 0x9a}, + {value: 0x00a7, lo: 0x9b, hi: 0x9c}, + {value: 0x1999, lo: 0xa8, hi: 0xa8}, + // Block 0x3f, offset 0x184 + {value: 0x0000, lo: 0x0d}, + {value: 0x8132, lo: 0x90, hi: 0x91}, + {value: 0x8101, lo: 0x92, hi: 0x93}, + {value: 0x8132, lo: 0x94, hi: 0x97}, + {value: 0x8101, lo: 0x98, hi: 0x9a}, + {value: 0x8132, lo: 0x9b, hi: 0x9c}, + {value: 0x8132, lo: 0xa1, hi: 0xa1}, + {value: 0x8101, lo: 0xa5, hi: 0xa6}, + {value: 0x8132, lo: 0xa7, hi: 0xa7}, + {value: 0x812d, lo: 0xa8, hi: 0xa8}, + {value: 0x8132, lo: 0xa9, hi: 0xa9}, + {value: 0x8101, lo: 0xaa, hi: 0xab}, + {value: 0x812d, lo: 0xac, hi: 0xaf}, + {value: 0x8132, lo: 0xb0, hi: 0xb0}, + // Block 0x40, offset 0x192 + {value: 0x0007, lo: 0x06}, + {value: 0x2180, lo: 0x89, hi: 0x89}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + {value: 0x3bb9, lo: 0x9a, hi: 0x9b}, + {value: 0x3bc7, lo: 0xae, hi: 0xae}, + // Block 0x41, offset 0x199 + {value: 0x000e, lo: 0x05}, + {value: 0x3bce, lo: 0x8d, hi: 0x8e}, + {value: 0x3bd5, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + // Block 0x42, offset 0x19f + {value: 0x0173, lo: 0x0e}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0x3be3, lo: 0x84, hi: 0x84}, + {value: 0xa000, lo: 0x88, hi: 0x88}, + {value: 0x3bea, lo: 0x89, hi: 0x89}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0x3bf1, lo: 0x8c, hi: 0x8c}, + {value: 0xa000, lo: 0xa3, hi: 0xa3}, + {value: 0x3bf8, lo: 0xa4, hi: 0xa4}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x3bff, lo: 0xa6, hi: 0xa6}, + {value: 0x269f, lo: 0xac, hi: 0xad}, + {value: 0x26a6, lo: 0xaf, hi: 0xaf}, + {value: 0x281c, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xbc, hi: 0xbc}, + // Block 0x43, offset 0x1ae + {value: 0x0007, lo: 0x03}, + {value: 0x3c68, lo: 0xa0, hi: 0xa1}, + {value: 0x3c92, lo: 0xa2, hi: 0xa3}, + {value: 0x3cbc, lo: 0xaa, hi: 0xad}, + // Block 0x44, offset 0x1b2 + {value: 0x0004, lo: 0x01}, + {value: 0x048b, lo: 0xa9, hi: 0xaa}, + // Block 0x45, offset 0x1b4 + {value: 0x0002, lo: 0x03}, + {value: 0x0057, lo: 0x80, hi: 0x8f}, + {value: 0x0083, lo: 0x90, hi: 0xa9}, + {value: 0x0021, lo: 0xaa, hi: 0xaa}, + // Block 0x46, offset 0x1b8 + {value: 0x0000, lo: 0x01}, + {value: 0x299b, lo: 0x8c, hi: 0x8c}, + // Block 0x47, offset 0x1ba + {value: 0x0263, lo: 0x02}, + {value: 0x1b8c, lo: 0xb4, hi: 0xb4}, + {value: 0x192d, lo: 0xb5, hi: 0xb6}, + // Block 0x48, offset 0x1bd + {value: 0x0000, lo: 0x01}, + {value: 0x44dd, lo: 0x9c, hi: 0x9c}, + // Block 0x49, offset 0x1bf + {value: 0x0000, lo: 0x02}, + {value: 0x0095, lo: 0xbc, hi: 0xbc}, + {value: 0x006d, lo: 0xbd, hi: 0xbd}, + // Block 0x4a, offset 0x1c2 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xaf, hi: 0xb1}, + // Block 0x4b, offset 0x1c4 + {value: 0x0000, lo: 0x02}, + {value: 0x047f, lo: 0xaf, hi: 0xaf}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x4c, offset 0x1c7 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xa0, hi: 0xbf}, + // Block 0x4d, offset 0x1c9 + {value: 0x0000, lo: 0x01}, + {value: 0x0dc3, lo: 0x9f, hi: 0x9f}, + // Block 0x4e, offset 0x1cb + {value: 0x0000, lo: 0x01}, + {value: 0x162f, lo: 0xb3, hi: 0xb3}, + // Block 0x4f, offset 0x1cd + {value: 0x0004, lo: 0x0b}, + {value: 0x1597, lo: 0x80, hi: 0x82}, + {value: 0x15af, lo: 0x83, hi: 0x83}, + {value: 0x15c7, lo: 0x84, hi: 0x85}, + {value: 0x15d7, lo: 0x86, hi: 0x89}, + {value: 0x15eb, lo: 0x8a, hi: 0x8c}, + {value: 0x15ff, lo: 0x8d, hi: 0x8d}, + {value: 0x1607, lo: 0x8e, hi: 0x8e}, + {value: 0x160f, lo: 0x8f, hi: 0x90}, + {value: 0x161b, lo: 0x91, hi: 0x93}, + {value: 0x162b, lo: 0x94, hi: 0x94}, + {value: 0x1633, lo: 0x95, hi: 0x95}, + // Block 0x50, offset 0x1d9 + {value: 0x0004, lo: 0x09}, + {value: 0x0001, lo: 0x80, hi: 0x80}, + {value: 0x812c, lo: 0xaa, hi: 0xaa}, + {value: 0x8131, lo: 0xab, hi: 0xab}, + {value: 0x8133, lo: 0xac, hi: 0xac}, + {value: 0x812e, lo: 0xad, hi: 0xad}, + {value: 0x812f, lo: 0xae, hi: 0xae}, + {value: 0x812f, lo: 0xaf, hi: 0xaf}, + {value: 0x04b3, lo: 0xb6, hi: 0xb6}, + {value: 0x0887, lo: 0xb8, hi: 0xba}, + // Block 0x51, offset 0x1e3 + {value: 0x0006, lo: 0x09}, + {value: 0x0313, lo: 0xb1, hi: 0xb1}, + {value: 0x0317, lo: 0xb2, hi: 0xb2}, + {value: 0x4a3b, lo: 0xb3, hi: 0xb3}, + {value: 0x031b, lo: 0xb4, hi: 0xb4}, + {value: 0x4a41, lo: 0xb5, hi: 0xb6}, + {value: 0x031f, lo: 0xb7, hi: 0xb7}, + {value: 0x0323, lo: 0xb8, hi: 0xb8}, + {value: 0x0327, lo: 0xb9, hi: 0xb9}, + {value: 0x4a4d, lo: 0xba, hi: 0xbf}, + // Block 0x52, offset 0x1ed + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0xaf, hi: 0xaf}, + {value: 0x8132, lo: 0xb4, hi: 0xbd}, + // Block 0x53, offset 0x1f0 + {value: 0x0000, lo: 0x03}, + {value: 0x020f, lo: 0x9c, hi: 0x9c}, + {value: 0x0212, lo: 0x9d, hi: 0x9d}, + {value: 0x8132, lo: 0x9e, hi: 0x9f}, + // Block 0x54, offset 0x1f4 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xb0, hi: 0xb1}, + // Block 0x55, offset 0x1f6 + {value: 0x0000, lo: 0x01}, + {value: 0x163b, lo: 0xb0, hi: 0xb0}, + // Block 0x56, offset 0x1f8 + {value: 0x000c, lo: 0x01}, + {value: 0x00d7, lo: 0xb8, hi: 0xb9}, + // Block 0x57, offset 0x1fa + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x86, hi: 0x86}, + // Block 0x58, offset 0x1fc + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x84, hi: 0x84}, + {value: 0x8132, lo: 0xa0, hi: 0xb1}, + // Block 0x59, offset 0x1ff + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0xab, hi: 0xad}, + // Block 0x5a, offset 0x201 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x93, hi: 0x93}, + // Block 0x5b, offset 0x203 + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xb3, hi: 0xb3}, + // Block 0x5c, offset 0x205 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x80, hi: 0x80}, + // Block 0x5d, offset 0x207 + {value: 0x0000, lo: 0x05}, + {value: 0x8132, lo: 0xb0, hi: 0xb0}, + {value: 0x8132, lo: 0xb2, hi: 0xb3}, + {value: 0x812d, lo: 0xb4, hi: 0xb4}, + {value: 0x8132, lo: 0xb7, hi: 0xb8}, + {value: 0x8132, lo: 0xbe, hi: 0xbf}, + // Block 0x5e, offset 0x20d + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0x81, hi: 0x81}, + {value: 0x8104, lo: 0xb6, hi: 0xb6}, + // Block 0x5f, offset 0x210 + {value: 0x0008, lo: 0x03}, + {value: 0x1637, lo: 0x9c, hi: 0x9d}, + {value: 0x0125, lo: 0x9e, hi: 0x9e}, + {value: 0x1643, lo: 0x9f, hi: 0x9f}, + // Block 0x60, offset 0x214 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xad, hi: 0xad}, + // Block 0x61, offset 0x216 + {value: 0x0000, lo: 0x06}, + {value: 0xe500, lo: 0x80, hi: 0x80}, + {value: 0xc600, lo: 0x81, hi: 0x9b}, + {value: 0xe500, lo: 0x9c, hi: 0x9c}, + {value: 0xc600, lo: 0x9d, hi: 0xb7}, + {value: 0xe500, lo: 0xb8, hi: 0xb8}, + {value: 0xc600, lo: 0xb9, hi: 0xbf}, + // Block 0x62, offset 0x21d + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x93}, + {value: 0xe500, lo: 0x94, hi: 0x94}, + {value: 0xc600, lo: 0x95, hi: 0xaf}, + {value: 0xe500, lo: 0xb0, hi: 0xb0}, + {value: 0xc600, lo: 0xb1, hi: 0xbf}, + // Block 0x63, offset 0x223 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8b}, + {value: 0xe500, lo: 0x8c, hi: 0x8c}, + {value: 0xc600, lo: 0x8d, hi: 0xa7}, + {value: 0xe500, lo: 0xa8, hi: 0xa8}, + {value: 0xc600, lo: 0xa9, hi: 0xbf}, + // Block 0x64, offset 0x229 + {value: 0x0000, lo: 0x07}, + {value: 0xc600, lo: 0x80, hi: 0x83}, + {value: 0xe500, lo: 0x84, hi: 0x84}, + {value: 0xc600, lo: 0x85, hi: 0x9f}, + {value: 0xe500, lo: 0xa0, hi: 0xa0}, + {value: 0xc600, lo: 0xa1, hi: 0xbb}, + {value: 0xe500, lo: 0xbc, hi: 0xbc}, + {value: 0xc600, lo: 0xbd, hi: 0xbf}, + // Block 0x65, offset 0x231 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x97}, + {value: 0xe500, lo: 0x98, hi: 0x98}, + {value: 0xc600, lo: 0x99, hi: 0xb3}, + {value: 0xe500, lo: 0xb4, hi: 0xb4}, + {value: 0xc600, lo: 0xb5, hi: 0xbf}, + // Block 0x66, offset 0x237 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8f}, + {value: 0xe500, lo: 0x90, hi: 0x90}, + {value: 0xc600, lo: 0x91, hi: 0xab}, + {value: 0xe500, lo: 0xac, hi: 0xac}, + {value: 0xc600, lo: 0xad, hi: 0xbf}, + // Block 0x67, offset 0x23d + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + {value: 0xe500, lo: 0xa4, hi: 0xa4}, + {value: 0xc600, lo: 0xa5, hi: 0xbf}, + // Block 0x68, offset 0x243 + {value: 0x0000, lo: 0x03}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + // Block 0x69, offset 0x247 + {value: 0x0002, lo: 0x01}, + {value: 0x0003, lo: 0x81, hi: 0xbf}, + // Block 0x6a, offset 0x249 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0xbd, hi: 0xbd}, + // Block 0x6b, offset 0x24b + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0xa0, hi: 0xa0}, + // Block 0x6c, offset 0x24d + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xb6, hi: 0xba}, + // Block 0x6d, offset 0x24f + {value: 0x002c, lo: 0x05}, + {value: 0x812d, lo: 0x8d, hi: 0x8d}, + {value: 0x8132, lo: 0x8f, hi: 0x8f}, + {value: 0x8132, lo: 0xb8, hi: 0xb8}, + {value: 0x8101, lo: 0xb9, hi: 0xba}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x6e, offset 0x255 + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0xa5, hi: 0xa5}, + {value: 0x812d, lo: 0xa6, hi: 0xa6}, + // Block 0x6f, offset 0x258 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xa4, hi: 0xa7}, + // Block 0x70, offset 0x25a + {value: 0x0000, lo: 0x05}, + {value: 0x812d, lo: 0x86, hi: 0x87}, + {value: 0x8132, lo: 0x88, hi: 0x8a}, + {value: 0x812d, lo: 0x8b, hi: 0x8b}, + {value: 0x8132, lo: 0x8c, hi: 0x8c}, + {value: 0x812d, lo: 0x8d, hi: 0x90}, + // Block 0x71, offset 0x260 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x86, hi: 0x86}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x72, offset 0x263 + {value: 0x17fe, lo: 0x07}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x4238, lo: 0x9a, hi: 0x9a}, + {value: 0xa000, lo: 0x9b, hi: 0x9b}, + {value: 0x4242, lo: 0x9c, hi: 0x9c}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x424c, lo: 0xab, hi: 0xab}, + {value: 0x8104, lo: 0xb9, hi: 0xba}, + // Block 0x73, offset 0x26b + {value: 0x0000, lo: 0x06}, + {value: 0x8132, lo: 0x80, hi: 0x82}, + {value: 0x9900, lo: 0xa7, hi: 0xa7}, + {value: 0x2d7e, lo: 0xae, hi: 0xae}, + {value: 0x2d88, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb1, hi: 0xb2}, + {value: 0x8104, lo: 0xb3, hi: 0xb4}, + // Block 0x74, offset 0x272 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x80, hi: 0x80}, + {value: 0x8102, lo: 0x8a, hi: 0x8a}, + // Block 0x75, offset 0x275 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xb5, hi: 0xb5}, + {value: 0x8102, lo: 0xb6, hi: 0xb6}, + // Block 0x76, offset 0x278 + {value: 0x0002, lo: 0x01}, + {value: 0x8102, lo: 0xa9, hi: 0xaa}, + // Block 0x77, offset 0x27a + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xbb, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x78, offset 0x27d + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2d92, lo: 0x8b, hi: 0x8b}, + {value: 0x2d9c, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x8132, lo: 0xa6, hi: 0xac}, + {value: 0x8132, lo: 0xb0, hi: 0xb4}, + // Block 0x79, offset 0x285 + {value: 0x0000, lo: 0x03}, + {value: 0x8104, lo: 0x82, hi: 0x82}, + {value: 0x8102, lo: 0x86, hi: 0x86}, + {value: 0x8132, lo: 0x9e, hi: 0x9e}, + // Block 0x7a, offset 0x289 + {value: 0x6b5a, lo: 0x06}, + {value: 0x9900, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xb9, hi: 0xb9}, + {value: 0x9900, lo: 0xba, hi: 0xba}, + {value: 0x2db0, lo: 0xbb, hi: 0xbb}, + {value: 0x2da6, lo: 0xbc, hi: 0xbd}, + {value: 0x2dba, lo: 0xbe, hi: 0xbe}, + // Block 0x7b, offset 0x290 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x82, hi: 0x82}, + {value: 0x8102, lo: 0x83, hi: 0x83}, + // Block 0x7c, offset 0x293 + {value: 0x0000, lo: 0x05}, + {value: 0x9900, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb8, hi: 0xb9}, + {value: 0x2dc4, lo: 0xba, hi: 0xba}, + {value: 0x2dce, lo: 0xbb, hi: 0xbb}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x7d, offset 0x299 + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0x80, hi: 0x80}, + // Block 0x7e, offset 0x29b + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x7f, offset 0x29d + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xb6, hi: 0xb6}, + {value: 0x8102, lo: 0xb7, hi: 0xb7}, + // Block 0x80, offset 0x2a0 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xab, hi: 0xab}, + // Block 0x81, offset 0x2a2 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xb9, hi: 0xb9}, + {value: 0x8102, lo: 0xba, hi: 0xba}, + // Block 0x82, offset 0x2a5 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xb4, hi: 0xb4}, + // Block 0x83, offset 0x2a7 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x87, hi: 0x87}, + // Block 0x84, offset 0x2a9 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x99, hi: 0x99}, + // Block 0x85, offset 0x2ab + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0x82, hi: 0x82}, + {value: 0x8104, lo: 0x84, hi: 0x85}, + // Block 0x86, offset 0x2ae + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x97, hi: 0x97}, + // Block 0x87, offset 0x2b0 + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0xb0, hi: 0xb4}, + // Block 0x88, offset 0x2b2 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xb0, hi: 0xb6}, + // Block 0x89, offset 0x2b4 + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0x9e, hi: 0x9e}, + // Block 0x8a, offset 0x2b6 + {value: 0x0000, lo: 0x0c}, + {value: 0x45cc, lo: 0x9e, hi: 0x9e}, + {value: 0x45d6, lo: 0x9f, hi: 0x9f}, + {value: 0x460a, lo: 0xa0, hi: 0xa0}, + {value: 0x4618, lo: 0xa1, hi: 0xa1}, + {value: 0x4626, lo: 0xa2, hi: 0xa2}, + {value: 0x4634, lo: 0xa3, hi: 0xa3}, + {value: 0x4642, lo: 0xa4, hi: 0xa4}, + {value: 0x812b, lo: 0xa5, hi: 0xa6}, + {value: 0x8101, lo: 0xa7, hi: 0xa9}, + {value: 0x8130, lo: 0xad, hi: 0xad}, + {value: 0x812b, lo: 0xae, hi: 0xb2}, + {value: 0x812d, lo: 0xbb, hi: 0xbf}, + // Block 0x8b, offset 0x2c3 + {value: 0x0000, lo: 0x09}, + {value: 0x812d, lo: 0x80, hi: 0x82}, + {value: 0x8132, lo: 0x85, hi: 0x89}, + {value: 0x812d, lo: 0x8a, hi: 0x8b}, + {value: 0x8132, lo: 0xaa, hi: 0xad}, + {value: 0x45e0, lo: 0xbb, hi: 0xbb}, + {value: 0x45ea, lo: 0xbc, hi: 0xbc}, + {value: 0x4650, lo: 0xbd, hi: 0xbd}, + {value: 0x466c, lo: 0xbe, hi: 0xbe}, + {value: 0x465e, lo: 0xbf, hi: 0xbf}, + // Block 0x8c, offset 0x2cd + {value: 0x0000, lo: 0x01}, + {value: 0x467a, lo: 0x80, hi: 0x80}, + // Block 0x8d, offset 0x2cf + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x82, hi: 0x84}, + // Block 0x8e, offset 0x2d1 + {value: 0x0002, lo: 0x03}, + {value: 0x0043, lo: 0x80, hi: 0x99}, + {value: 0x0083, lo: 0x9a, hi: 0xb3}, + {value: 0x0043, lo: 0xb4, hi: 0xbf}, + // Block 0x8f, offset 0x2d5 + {value: 0x0002, lo: 0x04}, + {value: 0x005b, lo: 0x80, hi: 0x8d}, + {value: 0x0083, lo: 0x8e, hi: 0x94}, + {value: 0x0093, lo: 0x96, hi: 0xa7}, + {value: 0x0043, lo: 0xa8, hi: 0xbf}, + // Block 0x90, offset 0x2da + {value: 0x0002, lo: 0x0b}, + {value: 0x0073, lo: 0x80, hi: 0x81}, + {value: 0x0083, lo: 0x82, hi: 0x9b}, + {value: 0x0043, lo: 0x9c, hi: 0x9c}, + {value: 0x0047, lo: 0x9e, hi: 0x9f}, + {value: 0x004f, lo: 0xa2, hi: 0xa2}, + {value: 0x0055, lo: 0xa5, hi: 0xa6}, + {value: 0x005d, lo: 0xa9, hi: 0xac}, + {value: 0x0067, lo: 0xae, hi: 0xb5}, + {value: 0x0083, lo: 0xb6, hi: 0xb9}, + {value: 0x008d, lo: 0xbb, hi: 0xbb}, + {value: 0x0091, lo: 0xbd, hi: 0xbf}, + // Block 0x91, offset 0x2e6 + {value: 0x0002, lo: 0x04}, + {value: 0x0097, lo: 0x80, hi: 0x83}, + {value: 0x00a1, lo: 0x85, hi: 0x8f}, + {value: 0x0043, lo: 0x90, hi: 0xa9}, + {value: 0x0083, lo: 0xaa, hi: 0xbf}, + // Block 0x92, offset 0x2eb + {value: 0x0002, lo: 0x08}, + {value: 0x00af, lo: 0x80, hi: 0x83}, + {value: 0x0043, lo: 0x84, hi: 0x85}, + {value: 0x0049, lo: 0x87, hi: 0x8a}, + {value: 0x0055, lo: 0x8d, hi: 0x94}, + {value: 0x0067, lo: 0x96, hi: 0x9c}, + {value: 0x0083, lo: 0x9e, hi: 0xb7}, + {value: 0x0043, lo: 0xb8, hi: 0xb9}, + {value: 0x0049, lo: 0xbb, hi: 0xbe}, + // Block 0x93, offset 0x2f4 + {value: 0x0002, lo: 0x05}, + {value: 0x0053, lo: 0x80, hi: 0x84}, + {value: 0x005f, lo: 0x86, hi: 0x86}, + {value: 0x0067, lo: 0x8a, hi: 0x90}, + {value: 0x0083, lo: 0x92, hi: 0xab}, + {value: 0x0043, lo: 0xac, hi: 0xbf}, + // Block 0x94, offset 0x2fa + {value: 0x0002, lo: 0x04}, + {value: 0x006b, lo: 0x80, hi: 0x85}, + {value: 0x0083, lo: 0x86, hi: 0x9f}, + {value: 0x0043, lo: 0xa0, hi: 0xb9}, + {value: 0x0083, lo: 0xba, hi: 0xbf}, + // Block 0x95, offset 0x2ff + {value: 0x0002, lo: 0x03}, + {value: 0x008f, lo: 0x80, hi: 0x93}, + {value: 0x0043, lo: 0x94, hi: 0xad}, + {value: 0x0083, lo: 0xae, hi: 0xbf}, + // Block 0x96, offset 0x303 + {value: 0x0002, lo: 0x04}, + {value: 0x00a7, lo: 0x80, hi: 0x87}, + {value: 0x0043, lo: 0x88, hi: 0xa1}, + {value: 0x0083, lo: 0xa2, hi: 0xbb}, + {value: 0x0043, lo: 0xbc, hi: 0xbf}, + // Block 0x97, offset 0x308 + {value: 0x0002, lo: 0x03}, + {value: 0x004b, lo: 0x80, hi: 0x95}, + {value: 0x0083, lo: 0x96, hi: 0xaf}, + {value: 0x0043, lo: 0xb0, hi: 0xbf}, + // Block 0x98, offset 0x30c + {value: 0x0003, lo: 0x0f}, + {value: 0x01b8, lo: 0x80, hi: 0x80}, + {value: 0x045f, lo: 0x81, hi: 0x81}, + {value: 0x01bb, lo: 0x82, hi: 0x9a}, + {value: 0x045b, lo: 0x9b, hi: 0x9b}, + {value: 0x01c7, lo: 0x9c, hi: 0x9c}, + {value: 0x01d0, lo: 0x9d, hi: 0x9d}, + {value: 0x01d6, lo: 0x9e, hi: 0x9e}, + {value: 0x01fa, lo: 0x9f, hi: 0x9f}, + {value: 0x01eb, lo: 0xa0, hi: 0xa0}, + {value: 0x01e8, lo: 0xa1, hi: 0xa1}, + {value: 0x0173, lo: 0xa2, hi: 0xb2}, + {value: 0x0188, lo: 0xb3, hi: 0xb3}, + {value: 0x01a6, lo: 0xb4, hi: 0xba}, + {value: 0x045f, lo: 0xbb, hi: 0xbb}, + {value: 0x01bb, lo: 0xbc, hi: 0xbf}, + // Block 0x99, offset 0x31c + {value: 0x0003, lo: 0x0d}, + {value: 0x01c7, lo: 0x80, hi: 0x94}, + {value: 0x045b, lo: 0x95, hi: 0x95}, + {value: 0x01c7, lo: 0x96, hi: 0x96}, + {value: 0x01d0, lo: 0x97, hi: 0x97}, + {value: 0x01d6, lo: 0x98, hi: 0x98}, + {value: 0x01fa, lo: 0x99, hi: 0x99}, + {value: 0x01eb, lo: 0x9a, hi: 0x9a}, + {value: 0x01e8, lo: 0x9b, hi: 0x9b}, + {value: 0x0173, lo: 0x9c, hi: 0xac}, + {value: 0x0188, lo: 0xad, hi: 0xad}, + {value: 0x01a6, lo: 0xae, hi: 0xb4}, + {value: 0x045f, lo: 0xb5, hi: 0xb5}, + {value: 0x01bb, lo: 0xb6, hi: 0xbf}, + // Block 0x9a, offset 0x32a + {value: 0x0003, lo: 0x0d}, + {value: 0x01d9, lo: 0x80, hi: 0x8e}, + {value: 0x045b, lo: 0x8f, hi: 0x8f}, + {value: 0x01c7, lo: 0x90, hi: 0x90}, + {value: 0x01d0, lo: 0x91, hi: 0x91}, + {value: 0x01d6, lo: 0x92, hi: 0x92}, + {value: 0x01fa, lo: 0x93, hi: 0x93}, + {value: 0x01eb, lo: 0x94, hi: 0x94}, + {value: 0x01e8, lo: 0x95, hi: 0x95}, + {value: 0x0173, lo: 0x96, hi: 0xa6}, + {value: 0x0188, lo: 0xa7, hi: 0xa7}, + {value: 0x01a6, lo: 0xa8, hi: 0xae}, + {value: 0x045f, lo: 0xaf, hi: 0xaf}, + {value: 0x01bb, lo: 0xb0, hi: 0xbf}, + // Block 0x9b, offset 0x338 + {value: 0x0003, lo: 0x0d}, + {value: 0x01eb, lo: 0x80, hi: 0x88}, + {value: 0x045b, lo: 0x89, hi: 0x89}, + {value: 0x01c7, lo: 0x8a, hi: 0x8a}, + {value: 0x01d0, lo: 0x8b, hi: 0x8b}, + {value: 0x01d6, lo: 0x8c, hi: 0x8c}, + {value: 0x01fa, lo: 0x8d, hi: 0x8d}, + {value: 0x01eb, lo: 0x8e, hi: 0x8e}, + {value: 0x01e8, lo: 0x8f, hi: 0x8f}, + {value: 0x0173, lo: 0x90, hi: 0xa0}, + {value: 0x0188, lo: 0xa1, hi: 0xa1}, + {value: 0x01a6, lo: 0xa2, hi: 0xa8}, + {value: 0x045f, lo: 0xa9, hi: 0xa9}, + {value: 0x01bb, lo: 0xaa, hi: 0xbf}, + // Block 0x9c, offset 0x346 + {value: 0x0000, lo: 0x05}, + {value: 0x8132, lo: 0x80, hi: 0x86}, + {value: 0x8132, lo: 0x88, hi: 0x98}, + {value: 0x8132, lo: 0x9b, hi: 0xa1}, + {value: 0x8132, lo: 0xa3, hi: 0xa4}, + {value: 0x8132, lo: 0xa6, hi: 0xaa}, + // Block 0x9d, offset 0x34c + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x90, hi: 0x96}, + // Block 0x9e, offset 0x34e + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0x84, hi: 0x89}, + {value: 0x8102, lo: 0x8a, hi: 0x8a}, + // Block 0x9f, offset 0x351 + {value: 0x0002, lo: 0x09}, + {value: 0x0063, lo: 0x80, hi: 0x89}, + {value: 0x1951, lo: 0x8a, hi: 0x8a}, + {value: 0x1981, lo: 0x8b, hi: 0x8b}, + {value: 0x199c, lo: 0x8c, hi: 0x8c}, + {value: 0x19a2, lo: 0x8d, hi: 0x8d}, + {value: 0x1bc0, lo: 0x8e, hi: 0x8e}, + {value: 0x19ae, lo: 0x8f, hi: 0x8f}, + {value: 0x197b, lo: 0xaa, hi: 0xaa}, + {value: 0x197e, lo: 0xab, hi: 0xab}, + // Block 0xa0, offset 0x35b + {value: 0x0000, lo: 0x01}, + {value: 0x193f, lo: 0x90, hi: 0x90}, + // Block 0xa1, offset 0x35d + {value: 0x0028, lo: 0x09}, + {value: 0x2862, lo: 0x80, hi: 0x80}, + {value: 0x2826, lo: 0x81, hi: 0x81}, + {value: 0x2830, lo: 0x82, hi: 0x82}, + {value: 0x2844, lo: 0x83, hi: 0x84}, + {value: 0x284e, lo: 0x85, hi: 0x86}, + {value: 0x283a, lo: 0x87, hi: 0x87}, + {value: 0x2858, lo: 0x88, hi: 0x88}, + {value: 0x0b6f, lo: 0x90, hi: 0x90}, + {value: 0x08e7, lo: 0x91, hi: 0x91}, +} + +// recompMap: 7520 bytes (entries only) +var recompMap map[uint32]rune +var recompMapOnce sync.Once + +const recompMapPacked = "" + + "\x00A\x03\x00\x00\x00\x00\xc0" + // 0x00410300: 0x000000C0 + "\x00A\x03\x01\x00\x00\x00\xc1" + // 0x00410301: 0x000000C1 + "\x00A\x03\x02\x00\x00\x00\xc2" + // 0x00410302: 0x000000C2 + "\x00A\x03\x03\x00\x00\x00\xc3" + // 0x00410303: 0x000000C3 + "\x00A\x03\b\x00\x00\x00\xc4" + // 0x00410308: 0x000000C4 + "\x00A\x03\n\x00\x00\x00\xc5" + // 0x0041030A: 0x000000C5 + "\x00C\x03'\x00\x00\x00\xc7" + // 0x00430327: 0x000000C7 + "\x00E\x03\x00\x00\x00\x00\xc8" + // 0x00450300: 0x000000C8 + "\x00E\x03\x01\x00\x00\x00\xc9" + // 0x00450301: 0x000000C9 + "\x00E\x03\x02\x00\x00\x00\xca" + // 0x00450302: 0x000000CA + "\x00E\x03\b\x00\x00\x00\xcb" + // 0x00450308: 0x000000CB + "\x00I\x03\x00\x00\x00\x00\xcc" + // 0x00490300: 0x000000CC + "\x00I\x03\x01\x00\x00\x00\xcd" + // 0x00490301: 0x000000CD + "\x00I\x03\x02\x00\x00\x00\xce" + // 0x00490302: 0x000000CE + "\x00I\x03\b\x00\x00\x00\xcf" + // 0x00490308: 0x000000CF + "\x00N\x03\x03\x00\x00\x00\xd1" + // 0x004E0303: 0x000000D1 + "\x00O\x03\x00\x00\x00\x00\xd2" + // 0x004F0300: 0x000000D2 + "\x00O\x03\x01\x00\x00\x00\xd3" + // 0x004F0301: 0x000000D3 + "\x00O\x03\x02\x00\x00\x00\xd4" + // 0x004F0302: 0x000000D4 + "\x00O\x03\x03\x00\x00\x00\xd5" + // 0x004F0303: 0x000000D5 + "\x00O\x03\b\x00\x00\x00\xd6" + // 0x004F0308: 0x000000D6 + "\x00U\x03\x00\x00\x00\x00\xd9" + // 0x00550300: 0x000000D9 + "\x00U\x03\x01\x00\x00\x00\xda" + // 0x00550301: 0x000000DA + "\x00U\x03\x02\x00\x00\x00\xdb" + // 0x00550302: 0x000000DB + "\x00U\x03\b\x00\x00\x00\xdc" + // 0x00550308: 0x000000DC + "\x00Y\x03\x01\x00\x00\x00\xdd" + // 0x00590301: 0x000000DD + "\x00a\x03\x00\x00\x00\x00\xe0" + // 0x00610300: 0x000000E0 + "\x00a\x03\x01\x00\x00\x00\xe1" + // 0x00610301: 0x000000E1 + "\x00a\x03\x02\x00\x00\x00\xe2" + // 0x00610302: 0x000000E2 + "\x00a\x03\x03\x00\x00\x00\xe3" + // 0x00610303: 0x000000E3 + "\x00a\x03\b\x00\x00\x00\xe4" + // 0x00610308: 0x000000E4 + "\x00a\x03\n\x00\x00\x00\xe5" + // 0x0061030A: 0x000000E5 + "\x00c\x03'\x00\x00\x00\xe7" + // 0x00630327: 0x000000E7 + "\x00e\x03\x00\x00\x00\x00\xe8" + // 0x00650300: 0x000000E8 + "\x00e\x03\x01\x00\x00\x00\xe9" + // 0x00650301: 0x000000E9 + "\x00e\x03\x02\x00\x00\x00\xea" + // 0x00650302: 0x000000EA + "\x00e\x03\b\x00\x00\x00\xeb" + // 0x00650308: 0x000000EB + "\x00i\x03\x00\x00\x00\x00\xec" + // 0x00690300: 0x000000EC + "\x00i\x03\x01\x00\x00\x00\xed" + // 0x00690301: 0x000000ED + "\x00i\x03\x02\x00\x00\x00\xee" + // 0x00690302: 0x000000EE + "\x00i\x03\b\x00\x00\x00\xef" + // 0x00690308: 0x000000EF + "\x00n\x03\x03\x00\x00\x00\xf1" + // 0x006E0303: 0x000000F1 + "\x00o\x03\x00\x00\x00\x00\xf2" + // 0x006F0300: 0x000000F2 + "\x00o\x03\x01\x00\x00\x00\xf3" + // 0x006F0301: 0x000000F3 + "\x00o\x03\x02\x00\x00\x00\xf4" + // 0x006F0302: 0x000000F4 + "\x00o\x03\x03\x00\x00\x00\xf5" + // 0x006F0303: 0x000000F5 + "\x00o\x03\b\x00\x00\x00\xf6" + // 0x006F0308: 0x000000F6 + "\x00u\x03\x00\x00\x00\x00\xf9" + // 0x00750300: 0x000000F9 + "\x00u\x03\x01\x00\x00\x00\xfa" + // 0x00750301: 0x000000FA + "\x00u\x03\x02\x00\x00\x00\xfb" + // 0x00750302: 0x000000FB + "\x00u\x03\b\x00\x00\x00\xfc" + // 0x00750308: 0x000000FC + "\x00y\x03\x01\x00\x00\x00\xfd" + // 0x00790301: 0x000000FD + "\x00y\x03\b\x00\x00\x00\xff" + // 0x00790308: 0x000000FF + "\x00A\x03\x04\x00\x00\x01\x00" + // 0x00410304: 0x00000100 + "\x00a\x03\x04\x00\x00\x01\x01" + // 0x00610304: 0x00000101 + "\x00A\x03\x06\x00\x00\x01\x02" + // 0x00410306: 0x00000102 + "\x00a\x03\x06\x00\x00\x01\x03" + // 0x00610306: 0x00000103 + "\x00A\x03(\x00\x00\x01\x04" + // 0x00410328: 0x00000104 + "\x00a\x03(\x00\x00\x01\x05" + // 0x00610328: 0x00000105 + "\x00C\x03\x01\x00\x00\x01\x06" + // 0x00430301: 0x00000106 + "\x00c\x03\x01\x00\x00\x01\a" + // 0x00630301: 0x00000107 + "\x00C\x03\x02\x00\x00\x01\b" + // 0x00430302: 0x00000108 + "\x00c\x03\x02\x00\x00\x01\t" + // 0x00630302: 0x00000109 + "\x00C\x03\a\x00\x00\x01\n" + // 0x00430307: 0x0000010A + "\x00c\x03\a\x00\x00\x01\v" + // 0x00630307: 0x0000010B + "\x00C\x03\f\x00\x00\x01\f" + // 0x0043030C: 0x0000010C + "\x00c\x03\f\x00\x00\x01\r" + // 0x0063030C: 0x0000010D + "\x00D\x03\f\x00\x00\x01\x0e" + // 0x0044030C: 0x0000010E + "\x00d\x03\f\x00\x00\x01\x0f" + // 0x0064030C: 0x0000010F + "\x00E\x03\x04\x00\x00\x01\x12" + // 0x00450304: 0x00000112 + "\x00e\x03\x04\x00\x00\x01\x13" + // 0x00650304: 0x00000113 + "\x00E\x03\x06\x00\x00\x01\x14" + // 0x00450306: 0x00000114 + "\x00e\x03\x06\x00\x00\x01\x15" + // 0x00650306: 0x00000115 + "\x00E\x03\a\x00\x00\x01\x16" + // 0x00450307: 0x00000116 + "\x00e\x03\a\x00\x00\x01\x17" + // 0x00650307: 0x00000117 + "\x00E\x03(\x00\x00\x01\x18" + // 0x00450328: 0x00000118 + "\x00e\x03(\x00\x00\x01\x19" + // 0x00650328: 0x00000119 + "\x00E\x03\f\x00\x00\x01\x1a" + // 0x0045030C: 0x0000011A + "\x00e\x03\f\x00\x00\x01\x1b" + // 0x0065030C: 0x0000011B + "\x00G\x03\x02\x00\x00\x01\x1c" + // 0x00470302: 0x0000011C + "\x00g\x03\x02\x00\x00\x01\x1d" + // 0x00670302: 0x0000011D + "\x00G\x03\x06\x00\x00\x01\x1e" + // 0x00470306: 0x0000011E + "\x00g\x03\x06\x00\x00\x01\x1f" + // 0x00670306: 0x0000011F + "\x00G\x03\a\x00\x00\x01 " + // 0x00470307: 0x00000120 + "\x00g\x03\a\x00\x00\x01!" + // 0x00670307: 0x00000121 + "\x00G\x03'\x00\x00\x01\"" + // 0x00470327: 0x00000122 + "\x00g\x03'\x00\x00\x01#" + // 0x00670327: 0x00000123 + "\x00H\x03\x02\x00\x00\x01$" + // 0x00480302: 0x00000124 + "\x00h\x03\x02\x00\x00\x01%" + // 0x00680302: 0x00000125 + "\x00I\x03\x03\x00\x00\x01(" + // 0x00490303: 0x00000128 + "\x00i\x03\x03\x00\x00\x01)" + // 0x00690303: 0x00000129 + "\x00I\x03\x04\x00\x00\x01*" + // 0x00490304: 0x0000012A + "\x00i\x03\x04\x00\x00\x01+" + // 0x00690304: 0x0000012B + "\x00I\x03\x06\x00\x00\x01," + // 0x00490306: 0x0000012C + "\x00i\x03\x06\x00\x00\x01-" + // 0x00690306: 0x0000012D + "\x00I\x03(\x00\x00\x01." + // 0x00490328: 0x0000012E + "\x00i\x03(\x00\x00\x01/" + // 0x00690328: 0x0000012F + "\x00I\x03\a\x00\x00\x010" + // 0x00490307: 0x00000130 + "\x00J\x03\x02\x00\x00\x014" + // 0x004A0302: 0x00000134 + "\x00j\x03\x02\x00\x00\x015" + // 0x006A0302: 0x00000135 + "\x00K\x03'\x00\x00\x016" + // 0x004B0327: 0x00000136 + "\x00k\x03'\x00\x00\x017" + // 0x006B0327: 0x00000137 + "\x00L\x03\x01\x00\x00\x019" + // 0x004C0301: 0x00000139 + "\x00l\x03\x01\x00\x00\x01:" + // 0x006C0301: 0x0000013A + "\x00L\x03'\x00\x00\x01;" + // 0x004C0327: 0x0000013B + "\x00l\x03'\x00\x00\x01<" + // 0x006C0327: 0x0000013C + "\x00L\x03\f\x00\x00\x01=" + // 0x004C030C: 0x0000013D + "\x00l\x03\f\x00\x00\x01>" + // 0x006C030C: 0x0000013E + "\x00N\x03\x01\x00\x00\x01C" + // 0x004E0301: 0x00000143 + "\x00n\x03\x01\x00\x00\x01D" + // 0x006E0301: 0x00000144 + "\x00N\x03'\x00\x00\x01E" + // 0x004E0327: 0x00000145 + "\x00n\x03'\x00\x00\x01F" + // 0x006E0327: 0x00000146 + "\x00N\x03\f\x00\x00\x01G" + // 0x004E030C: 0x00000147 + "\x00n\x03\f\x00\x00\x01H" + // 0x006E030C: 0x00000148 + "\x00O\x03\x04\x00\x00\x01L" + // 0x004F0304: 0x0000014C + "\x00o\x03\x04\x00\x00\x01M" + // 0x006F0304: 0x0000014D + "\x00O\x03\x06\x00\x00\x01N" + // 0x004F0306: 0x0000014E + "\x00o\x03\x06\x00\x00\x01O" + // 0x006F0306: 0x0000014F + "\x00O\x03\v\x00\x00\x01P" + // 0x004F030B: 0x00000150 + "\x00o\x03\v\x00\x00\x01Q" + // 0x006F030B: 0x00000151 + "\x00R\x03\x01\x00\x00\x01T" + // 0x00520301: 0x00000154 + "\x00r\x03\x01\x00\x00\x01U" + // 0x00720301: 0x00000155 + "\x00R\x03'\x00\x00\x01V" + // 0x00520327: 0x00000156 + "\x00r\x03'\x00\x00\x01W" + // 0x00720327: 0x00000157 + "\x00R\x03\f\x00\x00\x01X" + // 0x0052030C: 0x00000158 + "\x00r\x03\f\x00\x00\x01Y" + // 0x0072030C: 0x00000159 + "\x00S\x03\x01\x00\x00\x01Z" + // 0x00530301: 0x0000015A + "\x00s\x03\x01\x00\x00\x01[" + // 0x00730301: 0x0000015B + "\x00S\x03\x02\x00\x00\x01\\" + // 0x00530302: 0x0000015C + "\x00s\x03\x02\x00\x00\x01]" + // 0x00730302: 0x0000015D + "\x00S\x03'\x00\x00\x01^" + // 0x00530327: 0x0000015E + "\x00s\x03'\x00\x00\x01_" + // 0x00730327: 0x0000015F + "\x00S\x03\f\x00\x00\x01`" + // 0x0053030C: 0x00000160 + "\x00s\x03\f\x00\x00\x01a" + // 0x0073030C: 0x00000161 + "\x00T\x03'\x00\x00\x01b" + // 0x00540327: 0x00000162 + "\x00t\x03'\x00\x00\x01c" + // 0x00740327: 0x00000163 + "\x00T\x03\f\x00\x00\x01d" + // 0x0054030C: 0x00000164 + "\x00t\x03\f\x00\x00\x01e" + // 0x0074030C: 0x00000165 + "\x00U\x03\x03\x00\x00\x01h" + // 0x00550303: 0x00000168 + "\x00u\x03\x03\x00\x00\x01i" + // 0x00750303: 0x00000169 + "\x00U\x03\x04\x00\x00\x01j" + // 0x00550304: 0x0000016A + "\x00u\x03\x04\x00\x00\x01k" + // 0x00750304: 0x0000016B + "\x00U\x03\x06\x00\x00\x01l" + // 0x00550306: 0x0000016C + "\x00u\x03\x06\x00\x00\x01m" + // 0x00750306: 0x0000016D + "\x00U\x03\n\x00\x00\x01n" + // 0x0055030A: 0x0000016E + "\x00u\x03\n\x00\x00\x01o" + // 0x0075030A: 0x0000016F + "\x00U\x03\v\x00\x00\x01p" + // 0x0055030B: 0x00000170 + "\x00u\x03\v\x00\x00\x01q" + // 0x0075030B: 0x00000171 + "\x00U\x03(\x00\x00\x01r" + // 0x00550328: 0x00000172 + "\x00u\x03(\x00\x00\x01s" + // 0x00750328: 0x00000173 + "\x00W\x03\x02\x00\x00\x01t" + // 0x00570302: 0x00000174 + "\x00w\x03\x02\x00\x00\x01u" + // 0x00770302: 0x00000175 + "\x00Y\x03\x02\x00\x00\x01v" + // 0x00590302: 0x00000176 + "\x00y\x03\x02\x00\x00\x01w" + // 0x00790302: 0x00000177 + "\x00Y\x03\b\x00\x00\x01x" + // 0x00590308: 0x00000178 + "\x00Z\x03\x01\x00\x00\x01y" + // 0x005A0301: 0x00000179 + "\x00z\x03\x01\x00\x00\x01z" + // 0x007A0301: 0x0000017A + "\x00Z\x03\a\x00\x00\x01{" + // 0x005A0307: 0x0000017B + "\x00z\x03\a\x00\x00\x01|" + // 0x007A0307: 0x0000017C + "\x00Z\x03\f\x00\x00\x01}" + // 0x005A030C: 0x0000017D + "\x00z\x03\f\x00\x00\x01~" + // 0x007A030C: 0x0000017E + "\x00O\x03\x1b\x00\x00\x01\xa0" + // 0x004F031B: 0x000001A0 + "\x00o\x03\x1b\x00\x00\x01\xa1" + // 0x006F031B: 0x000001A1 + "\x00U\x03\x1b\x00\x00\x01\xaf" + // 0x0055031B: 0x000001AF + "\x00u\x03\x1b\x00\x00\x01\xb0" + // 0x0075031B: 0x000001B0 + "\x00A\x03\f\x00\x00\x01\xcd" + // 0x0041030C: 0x000001CD + "\x00a\x03\f\x00\x00\x01\xce" + // 0x0061030C: 0x000001CE + "\x00I\x03\f\x00\x00\x01\xcf" + // 0x0049030C: 0x000001CF + "\x00i\x03\f\x00\x00\x01\xd0" + // 0x0069030C: 0x000001D0 + "\x00O\x03\f\x00\x00\x01\xd1" + // 0x004F030C: 0x000001D1 + "\x00o\x03\f\x00\x00\x01\xd2" + // 0x006F030C: 0x000001D2 + "\x00U\x03\f\x00\x00\x01\xd3" + // 0x0055030C: 0x000001D3 + "\x00u\x03\f\x00\x00\x01\xd4" + // 0x0075030C: 0x000001D4 + "\x00\xdc\x03\x04\x00\x00\x01\xd5" + // 0x00DC0304: 0x000001D5 + "\x00\xfc\x03\x04\x00\x00\x01\xd6" + // 0x00FC0304: 0x000001D6 + "\x00\xdc\x03\x01\x00\x00\x01\xd7" + // 0x00DC0301: 0x000001D7 + "\x00\xfc\x03\x01\x00\x00\x01\xd8" + // 0x00FC0301: 0x000001D8 + "\x00\xdc\x03\f\x00\x00\x01\xd9" + // 0x00DC030C: 0x000001D9 + "\x00\xfc\x03\f\x00\x00\x01\xda" + // 0x00FC030C: 0x000001DA + "\x00\xdc\x03\x00\x00\x00\x01\xdb" + // 0x00DC0300: 0x000001DB + "\x00\xfc\x03\x00\x00\x00\x01\xdc" + // 0x00FC0300: 0x000001DC + "\x00\xc4\x03\x04\x00\x00\x01\xde" + // 0x00C40304: 0x000001DE + "\x00\xe4\x03\x04\x00\x00\x01\xdf" + // 0x00E40304: 0x000001DF + "\x02&\x03\x04\x00\x00\x01\xe0" + // 0x02260304: 0x000001E0 + "\x02'\x03\x04\x00\x00\x01\xe1" + // 0x02270304: 0x000001E1 + "\x00\xc6\x03\x04\x00\x00\x01\xe2" + // 0x00C60304: 0x000001E2 + "\x00\xe6\x03\x04\x00\x00\x01\xe3" + // 0x00E60304: 0x000001E3 + "\x00G\x03\f\x00\x00\x01\xe6" + // 0x0047030C: 0x000001E6 + "\x00g\x03\f\x00\x00\x01\xe7" + // 0x0067030C: 0x000001E7 + "\x00K\x03\f\x00\x00\x01\xe8" + // 0x004B030C: 0x000001E8 + "\x00k\x03\f\x00\x00\x01\xe9" + // 0x006B030C: 0x000001E9 + "\x00O\x03(\x00\x00\x01\xea" + // 0x004F0328: 0x000001EA + "\x00o\x03(\x00\x00\x01\xeb" + // 0x006F0328: 0x000001EB + "\x01\xea\x03\x04\x00\x00\x01\xec" + // 0x01EA0304: 0x000001EC + "\x01\xeb\x03\x04\x00\x00\x01\xed" + // 0x01EB0304: 0x000001ED + "\x01\xb7\x03\f\x00\x00\x01\xee" + // 0x01B7030C: 0x000001EE + "\x02\x92\x03\f\x00\x00\x01\xef" + // 0x0292030C: 0x000001EF + "\x00j\x03\f\x00\x00\x01\xf0" + // 0x006A030C: 0x000001F0 + "\x00G\x03\x01\x00\x00\x01\xf4" + // 0x00470301: 0x000001F4 + "\x00g\x03\x01\x00\x00\x01\xf5" + // 0x00670301: 0x000001F5 + "\x00N\x03\x00\x00\x00\x01\xf8" + // 0x004E0300: 0x000001F8 + "\x00n\x03\x00\x00\x00\x01\xf9" + // 0x006E0300: 0x000001F9 + "\x00\xc5\x03\x01\x00\x00\x01\xfa" + // 0x00C50301: 0x000001FA + "\x00\xe5\x03\x01\x00\x00\x01\xfb" + // 0x00E50301: 0x000001FB + "\x00\xc6\x03\x01\x00\x00\x01\xfc" + // 0x00C60301: 0x000001FC + "\x00\xe6\x03\x01\x00\x00\x01\xfd" + // 0x00E60301: 0x000001FD + "\x00\xd8\x03\x01\x00\x00\x01\xfe" + // 0x00D80301: 0x000001FE + "\x00\xf8\x03\x01\x00\x00\x01\xff" + // 0x00F80301: 0x000001FF + "\x00A\x03\x0f\x00\x00\x02\x00" + // 0x0041030F: 0x00000200 + "\x00a\x03\x0f\x00\x00\x02\x01" + // 0x0061030F: 0x00000201 + "\x00A\x03\x11\x00\x00\x02\x02" + // 0x00410311: 0x00000202 + "\x00a\x03\x11\x00\x00\x02\x03" + // 0x00610311: 0x00000203 + "\x00E\x03\x0f\x00\x00\x02\x04" + // 0x0045030F: 0x00000204 + "\x00e\x03\x0f\x00\x00\x02\x05" + // 0x0065030F: 0x00000205 + "\x00E\x03\x11\x00\x00\x02\x06" + // 0x00450311: 0x00000206 + "\x00e\x03\x11\x00\x00\x02\a" + // 0x00650311: 0x00000207 + "\x00I\x03\x0f\x00\x00\x02\b" + // 0x0049030F: 0x00000208 + "\x00i\x03\x0f\x00\x00\x02\t" + // 0x0069030F: 0x00000209 + "\x00I\x03\x11\x00\x00\x02\n" + // 0x00490311: 0x0000020A + "\x00i\x03\x11\x00\x00\x02\v" + // 0x00690311: 0x0000020B + "\x00O\x03\x0f\x00\x00\x02\f" + // 0x004F030F: 0x0000020C + "\x00o\x03\x0f\x00\x00\x02\r" + // 0x006F030F: 0x0000020D + "\x00O\x03\x11\x00\x00\x02\x0e" + // 0x004F0311: 0x0000020E + "\x00o\x03\x11\x00\x00\x02\x0f" + // 0x006F0311: 0x0000020F + "\x00R\x03\x0f\x00\x00\x02\x10" + // 0x0052030F: 0x00000210 + "\x00r\x03\x0f\x00\x00\x02\x11" + // 0x0072030F: 0x00000211 + "\x00R\x03\x11\x00\x00\x02\x12" + // 0x00520311: 0x00000212 + "\x00r\x03\x11\x00\x00\x02\x13" + // 0x00720311: 0x00000213 + "\x00U\x03\x0f\x00\x00\x02\x14" + // 0x0055030F: 0x00000214 + "\x00u\x03\x0f\x00\x00\x02\x15" + // 0x0075030F: 0x00000215 + "\x00U\x03\x11\x00\x00\x02\x16" + // 0x00550311: 0x00000216 + "\x00u\x03\x11\x00\x00\x02\x17" + // 0x00750311: 0x00000217 + "\x00S\x03&\x00\x00\x02\x18" + // 0x00530326: 0x00000218 + "\x00s\x03&\x00\x00\x02\x19" + // 0x00730326: 0x00000219 + "\x00T\x03&\x00\x00\x02\x1a" + // 0x00540326: 0x0000021A + "\x00t\x03&\x00\x00\x02\x1b" + // 0x00740326: 0x0000021B + "\x00H\x03\f\x00\x00\x02\x1e" + // 0x0048030C: 0x0000021E + "\x00h\x03\f\x00\x00\x02\x1f" + // 0x0068030C: 0x0000021F + "\x00A\x03\a\x00\x00\x02&" + // 0x00410307: 0x00000226 + "\x00a\x03\a\x00\x00\x02'" + // 0x00610307: 0x00000227 + "\x00E\x03'\x00\x00\x02(" + // 0x00450327: 0x00000228 + "\x00e\x03'\x00\x00\x02)" + // 0x00650327: 0x00000229 + "\x00\xd6\x03\x04\x00\x00\x02*" + // 0x00D60304: 0x0000022A + "\x00\xf6\x03\x04\x00\x00\x02+" + // 0x00F60304: 0x0000022B + "\x00\xd5\x03\x04\x00\x00\x02," + // 0x00D50304: 0x0000022C + "\x00\xf5\x03\x04\x00\x00\x02-" + // 0x00F50304: 0x0000022D + "\x00O\x03\a\x00\x00\x02." + // 0x004F0307: 0x0000022E + "\x00o\x03\a\x00\x00\x02/" + // 0x006F0307: 0x0000022F + "\x02.\x03\x04\x00\x00\x020" + // 0x022E0304: 0x00000230 + "\x02/\x03\x04\x00\x00\x021" + // 0x022F0304: 0x00000231 + "\x00Y\x03\x04\x00\x00\x022" + // 0x00590304: 0x00000232 + "\x00y\x03\x04\x00\x00\x023" + // 0x00790304: 0x00000233 + "\x00\xa8\x03\x01\x00\x00\x03\x85" + // 0x00A80301: 0x00000385 + "\x03\x91\x03\x01\x00\x00\x03\x86" + // 0x03910301: 0x00000386 + "\x03\x95\x03\x01\x00\x00\x03\x88" + // 0x03950301: 0x00000388 + "\x03\x97\x03\x01\x00\x00\x03\x89" + // 0x03970301: 0x00000389 + "\x03\x99\x03\x01\x00\x00\x03\x8a" + // 0x03990301: 0x0000038A + "\x03\x9f\x03\x01\x00\x00\x03\x8c" + // 0x039F0301: 0x0000038C + "\x03\xa5\x03\x01\x00\x00\x03\x8e" + // 0x03A50301: 0x0000038E + "\x03\xa9\x03\x01\x00\x00\x03\x8f" + // 0x03A90301: 0x0000038F + "\x03\xca\x03\x01\x00\x00\x03\x90" + // 0x03CA0301: 0x00000390 + "\x03\x99\x03\b\x00\x00\x03\xaa" + // 0x03990308: 0x000003AA + "\x03\xa5\x03\b\x00\x00\x03\xab" + // 0x03A50308: 0x000003AB + "\x03\xb1\x03\x01\x00\x00\x03\xac" + // 0x03B10301: 0x000003AC + "\x03\xb5\x03\x01\x00\x00\x03\xad" + // 0x03B50301: 0x000003AD + "\x03\xb7\x03\x01\x00\x00\x03\xae" + // 0x03B70301: 0x000003AE + "\x03\xb9\x03\x01\x00\x00\x03\xaf" + // 0x03B90301: 0x000003AF + "\x03\xcb\x03\x01\x00\x00\x03\xb0" + // 0x03CB0301: 0x000003B0 + "\x03\xb9\x03\b\x00\x00\x03\xca" + // 0x03B90308: 0x000003CA + "\x03\xc5\x03\b\x00\x00\x03\xcb" + // 0x03C50308: 0x000003CB + "\x03\xbf\x03\x01\x00\x00\x03\xcc" + // 0x03BF0301: 0x000003CC + "\x03\xc5\x03\x01\x00\x00\x03\xcd" + // 0x03C50301: 0x000003CD + "\x03\xc9\x03\x01\x00\x00\x03\xce" + // 0x03C90301: 0x000003CE + "\x03\xd2\x03\x01\x00\x00\x03\xd3" + // 0x03D20301: 0x000003D3 + "\x03\xd2\x03\b\x00\x00\x03\xd4" + // 0x03D20308: 0x000003D4 + "\x04\x15\x03\x00\x00\x00\x04\x00" + // 0x04150300: 0x00000400 + "\x04\x15\x03\b\x00\x00\x04\x01" + // 0x04150308: 0x00000401 + "\x04\x13\x03\x01\x00\x00\x04\x03" + // 0x04130301: 0x00000403 + "\x04\x06\x03\b\x00\x00\x04\a" + // 0x04060308: 0x00000407 + "\x04\x1a\x03\x01\x00\x00\x04\f" + // 0x041A0301: 0x0000040C + "\x04\x18\x03\x00\x00\x00\x04\r" + // 0x04180300: 0x0000040D + "\x04#\x03\x06\x00\x00\x04\x0e" + // 0x04230306: 0x0000040E + "\x04\x18\x03\x06\x00\x00\x04\x19" + // 0x04180306: 0x00000419 + "\x048\x03\x06\x00\x00\x049" + // 0x04380306: 0x00000439 + "\x045\x03\x00\x00\x00\x04P" + // 0x04350300: 0x00000450 + "\x045\x03\b\x00\x00\x04Q" + // 0x04350308: 0x00000451 + "\x043\x03\x01\x00\x00\x04S" + // 0x04330301: 0x00000453 + "\x04V\x03\b\x00\x00\x04W" + // 0x04560308: 0x00000457 + "\x04:\x03\x01\x00\x00\x04\\" + // 0x043A0301: 0x0000045C + "\x048\x03\x00\x00\x00\x04]" + // 0x04380300: 0x0000045D + "\x04C\x03\x06\x00\x00\x04^" + // 0x04430306: 0x0000045E + "\x04t\x03\x0f\x00\x00\x04v" + // 0x0474030F: 0x00000476 + "\x04u\x03\x0f\x00\x00\x04w" + // 0x0475030F: 0x00000477 + "\x04\x16\x03\x06\x00\x00\x04\xc1" + // 0x04160306: 0x000004C1 + "\x046\x03\x06\x00\x00\x04\xc2" + // 0x04360306: 0x000004C2 + "\x04\x10\x03\x06\x00\x00\x04\xd0" + // 0x04100306: 0x000004D0 + "\x040\x03\x06\x00\x00\x04\xd1" + // 0x04300306: 0x000004D1 + "\x04\x10\x03\b\x00\x00\x04\xd2" + // 0x04100308: 0x000004D2 + "\x040\x03\b\x00\x00\x04\xd3" + // 0x04300308: 0x000004D3 + "\x04\x15\x03\x06\x00\x00\x04\xd6" + // 0x04150306: 0x000004D6 + "\x045\x03\x06\x00\x00\x04\xd7" + // 0x04350306: 0x000004D7 + "\x04\xd8\x03\b\x00\x00\x04\xda" + // 0x04D80308: 0x000004DA + "\x04\xd9\x03\b\x00\x00\x04\xdb" + // 0x04D90308: 0x000004DB + "\x04\x16\x03\b\x00\x00\x04\xdc" + // 0x04160308: 0x000004DC + "\x046\x03\b\x00\x00\x04\xdd" + // 0x04360308: 0x000004DD + "\x04\x17\x03\b\x00\x00\x04\xde" + // 0x04170308: 0x000004DE + "\x047\x03\b\x00\x00\x04\xdf" + // 0x04370308: 0x000004DF + "\x04\x18\x03\x04\x00\x00\x04\xe2" + // 0x04180304: 0x000004E2 + "\x048\x03\x04\x00\x00\x04\xe3" + // 0x04380304: 0x000004E3 + "\x04\x18\x03\b\x00\x00\x04\xe4" + // 0x04180308: 0x000004E4 + "\x048\x03\b\x00\x00\x04\xe5" + // 0x04380308: 0x000004E5 + "\x04\x1e\x03\b\x00\x00\x04\xe6" + // 0x041E0308: 0x000004E6 + "\x04>\x03\b\x00\x00\x04\xe7" + // 0x043E0308: 0x000004E7 + "\x04\xe8\x03\b\x00\x00\x04\xea" + // 0x04E80308: 0x000004EA + "\x04\xe9\x03\b\x00\x00\x04\xeb" + // 0x04E90308: 0x000004EB + "\x04-\x03\b\x00\x00\x04\xec" + // 0x042D0308: 0x000004EC + "\x04M\x03\b\x00\x00\x04\xed" + // 0x044D0308: 0x000004ED + "\x04#\x03\x04\x00\x00\x04\xee" + // 0x04230304: 0x000004EE + "\x04C\x03\x04\x00\x00\x04\xef" + // 0x04430304: 0x000004EF + "\x04#\x03\b\x00\x00\x04\xf0" + // 0x04230308: 0x000004F0 + "\x04C\x03\b\x00\x00\x04\xf1" + // 0x04430308: 0x000004F1 + "\x04#\x03\v\x00\x00\x04\xf2" + // 0x0423030B: 0x000004F2 + "\x04C\x03\v\x00\x00\x04\xf3" + // 0x0443030B: 0x000004F3 + "\x04'\x03\b\x00\x00\x04\xf4" + // 0x04270308: 0x000004F4 + "\x04G\x03\b\x00\x00\x04\xf5" + // 0x04470308: 0x000004F5 + "\x04+\x03\b\x00\x00\x04\xf8" + // 0x042B0308: 0x000004F8 + "\x04K\x03\b\x00\x00\x04\xf9" + // 0x044B0308: 0x000004F9 + "\x06'\x06S\x00\x00\x06\"" + // 0x06270653: 0x00000622 + "\x06'\x06T\x00\x00\x06#" + // 0x06270654: 0x00000623 + "\x06H\x06T\x00\x00\x06$" + // 0x06480654: 0x00000624 + "\x06'\x06U\x00\x00\x06%" + // 0x06270655: 0x00000625 + "\x06J\x06T\x00\x00\x06&" + // 0x064A0654: 0x00000626 + "\x06\xd5\x06T\x00\x00\x06\xc0" + // 0x06D50654: 0x000006C0 + "\x06\xc1\x06T\x00\x00\x06\xc2" + // 0x06C10654: 0x000006C2 + "\x06\xd2\x06T\x00\x00\x06\xd3" + // 0x06D20654: 0x000006D3 + "\t(\t<\x00\x00\t)" + // 0x0928093C: 0x00000929 + "\t0\t<\x00\x00\t1" + // 0x0930093C: 0x00000931 + "\t3\t<\x00\x00\t4" + // 0x0933093C: 0x00000934 + "\t\xc7\t\xbe\x00\x00\t\xcb" + // 0x09C709BE: 0x000009CB + "\t\xc7\t\xd7\x00\x00\t\xcc" + // 0x09C709D7: 0x000009CC + "\vG\vV\x00\x00\vH" + // 0x0B470B56: 0x00000B48 + "\vG\v>\x00\x00\vK" + // 0x0B470B3E: 0x00000B4B + "\vG\vW\x00\x00\vL" + // 0x0B470B57: 0x00000B4C + "\v\x92\v\xd7\x00\x00\v\x94" + // 0x0B920BD7: 0x00000B94 + "\v\xc6\v\xbe\x00\x00\v\xca" + // 0x0BC60BBE: 0x00000BCA + "\v\xc7\v\xbe\x00\x00\v\xcb" + // 0x0BC70BBE: 0x00000BCB + "\v\xc6\v\xd7\x00\x00\v\xcc" + // 0x0BC60BD7: 0x00000BCC + "\fF\fV\x00\x00\fH" + // 0x0C460C56: 0x00000C48 + "\f\xbf\f\xd5\x00\x00\f\xc0" + // 0x0CBF0CD5: 0x00000CC0 + "\f\xc6\f\xd5\x00\x00\f\xc7" + // 0x0CC60CD5: 0x00000CC7 + "\f\xc6\f\xd6\x00\x00\f\xc8" + // 0x0CC60CD6: 0x00000CC8 + "\f\xc6\f\xc2\x00\x00\f\xca" + // 0x0CC60CC2: 0x00000CCA + "\f\xca\f\xd5\x00\x00\f\xcb" + // 0x0CCA0CD5: 0x00000CCB + "\rF\r>\x00\x00\rJ" + // 0x0D460D3E: 0x00000D4A + "\rG\r>\x00\x00\rK" + // 0x0D470D3E: 0x00000D4B + "\rF\rW\x00\x00\rL" + // 0x0D460D57: 0x00000D4C + "\r\xd9\r\xca\x00\x00\r\xda" + // 0x0DD90DCA: 0x00000DDA + "\r\xd9\r\xcf\x00\x00\r\xdc" + // 0x0DD90DCF: 0x00000DDC + "\r\xdc\r\xca\x00\x00\r\xdd" + // 0x0DDC0DCA: 0x00000DDD + "\r\xd9\r\xdf\x00\x00\r\xde" + // 0x0DD90DDF: 0x00000DDE + "\x10%\x10.\x00\x00\x10&" + // 0x1025102E: 0x00001026 + "\x1b\x05\x1b5\x00\x00\x1b\x06" + // 0x1B051B35: 0x00001B06 + "\x1b\a\x1b5\x00\x00\x1b\b" + // 0x1B071B35: 0x00001B08 + "\x1b\t\x1b5\x00\x00\x1b\n" + // 0x1B091B35: 0x00001B0A + "\x1b\v\x1b5\x00\x00\x1b\f" + // 0x1B0B1B35: 0x00001B0C + "\x1b\r\x1b5\x00\x00\x1b\x0e" + // 0x1B0D1B35: 0x00001B0E + "\x1b\x11\x1b5\x00\x00\x1b\x12" + // 0x1B111B35: 0x00001B12 + "\x1b:\x1b5\x00\x00\x1b;" + // 0x1B3A1B35: 0x00001B3B + "\x1b<\x1b5\x00\x00\x1b=" + // 0x1B3C1B35: 0x00001B3D + "\x1b>\x1b5\x00\x00\x1b@" + // 0x1B3E1B35: 0x00001B40 + "\x1b?\x1b5\x00\x00\x1bA" + // 0x1B3F1B35: 0x00001B41 + "\x1bB\x1b5\x00\x00\x1bC" + // 0x1B421B35: 0x00001B43 + "\x00A\x03%\x00\x00\x1e\x00" + // 0x00410325: 0x00001E00 + "\x00a\x03%\x00\x00\x1e\x01" + // 0x00610325: 0x00001E01 + "\x00B\x03\a\x00\x00\x1e\x02" + // 0x00420307: 0x00001E02 + "\x00b\x03\a\x00\x00\x1e\x03" + // 0x00620307: 0x00001E03 + "\x00B\x03#\x00\x00\x1e\x04" + // 0x00420323: 0x00001E04 + "\x00b\x03#\x00\x00\x1e\x05" + // 0x00620323: 0x00001E05 + "\x00B\x031\x00\x00\x1e\x06" + // 0x00420331: 0x00001E06 + "\x00b\x031\x00\x00\x1e\a" + // 0x00620331: 0x00001E07 + "\x00\xc7\x03\x01\x00\x00\x1e\b" + // 0x00C70301: 0x00001E08 + "\x00\xe7\x03\x01\x00\x00\x1e\t" + // 0x00E70301: 0x00001E09 + "\x00D\x03\a\x00\x00\x1e\n" + // 0x00440307: 0x00001E0A + "\x00d\x03\a\x00\x00\x1e\v" + // 0x00640307: 0x00001E0B + "\x00D\x03#\x00\x00\x1e\f" + // 0x00440323: 0x00001E0C + "\x00d\x03#\x00\x00\x1e\r" + // 0x00640323: 0x00001E0D + "\x00D\x031\x00\x00\x1e\x0e" + // 0x00440331: 0x00001E0E + "\x00d\x031\x00\x00\x1e\x0f" + // 0x00640331: 0x00001E0F + "\x00D\x03'\x00\x00\x1e\x10" + // 0x00440327: 0x00001E10 + "\x00d\x03'\x00\x00\x1e\x11" + // 0x00640327: 0x00001E11 + "\x00D\x03-\x00\x00\x1e\x12" + // 0x0044032D: 0x00001E12 + "\x00d\x03-\x00\x00\x1e\x13" + // 0x0064032D: 0x00001E13 + "\x01\x12\x03\x00\x00\x00\x1e\x14" + // 0x01120300: 0x00001E14 + "\x01\x13\x03\x00\x00\x00\x1e\x15" + // 0x01130300: 0x00001E15 + "\x01\x12\x03\x01\x00\x00\x1e\x16" + // 0x01120301: 0x00001E16 + "\x01\x13\x03\x01\x00\x00\x1e\x17" + // 0x01130301: 0x00001E17 + "\x00E\x03-\x00\x00\x1e\x18" + // 0x0045032D: 0x00001E18 + "\x00e\x03-\x00\x00\x1e\x19" + // 0x0065032D: 0x00001E19 + "\x00E\x030\x00\x00\x1e\x1a" + // 0x00450330: 0x00001E1A + "\x00e\x030\x00\x00\x1e\x1b" + // 0x00650330: 0x00001E1B + "\x02(\x03\x06\x00\x00\x1e\x1c" + // 0x02280306: 0x00001E1C + "\x02)\x03\x06\x00\x00\x1e\x1d" + // 0x02290306: 0x00001E1D + "\x00F\x03\a\x00\x00\x1e\x1e" + // 0x00460307: 0x00001E1E + "\x00f\x03\a\x00\x00\x1e\x1f" + // 0x00660307: 0x00001E1F + "\x00G\x03\x04\x00\x00\x1e " + // 0x00470304: 0x00001E20 + "\x00g\x03\x04\x00\x00\x1e!" + // 0x00670304: 0x00001E21 + "\x00H\x03\a\x00\x00\x1e\"" + // 0x00480307: 0x00001E22 + "\x00h\x03\a\x00\x00\x1e#" + // 0x00680307: 0x00001E23 + "\x00H\x03#\x00\x00\x1e$" + // 0x00480323: 0x00001E24 + "\x00h\x03#\x00\x00\x1e%" + // 0x00680323: 0x00001E25 + "\x00H\x03\b\x00\x00\x1e&" + // 0x00480308: 0x00001E26 + "\x00h\x03\b\x00\x00\x1e'" + // 0x00680308: 0x00001E27 + "\x00H\x03'\x00\x00\x1e(" + // 0x00480327: 0x00001E28 + "\x00h\x03'\x00\x00\x1e)" + // 0x00680327: 0x00001E29 + "\x00H\x03.\x00\x00\x1e*" + // 0x0048032E: 0x00001E2A + "\x00h\x03.\x00\x00\x1e+" + // 0x0068032E: 0x00001E2B + "\x00I\x030\x00\x00\x1e," + // 0x00490330: 0x00001E2C + "\x00i\x030\x00\x00\x1e-" + // 0x00690330: 0x00001E2D + "\x00\xcf\x03\x01\x00\x00\x1e." + // 0x00CF0301: 0x00001E2E + "\x00\xef\x03\x01\x00\x00\x1e/" + // 0x00EF0301: 0x00001E2F + "\x00K\x03\x01\x00\x00\x1e0" + // 0x004B0301: 0x00001E30 + "\x00k\x03\x01\x00\x00\x1e1" + // 0x006B0301: 0x00001E31 + "\x00K\x03#\x00\x00\x1e2" + // 0x004B0323: 0x00001E32 + "\x00k\x03#\x00\x00\x1e3" + // 0x006B0323: 0x00001E33 + "\x00K\x031\x00\x00\x1e4" + // 0x004B0331: 0x00001E34 + "\x00k\x031\x00\x00\x1e5" + // 0x006B0331: 0x00001E35 + "\x00L\x03#\x00\x00\x1e6" + // 0x004C0323: 0x00001E36 + "\x00l\x03#\x00\x00\x1e7" + // 0x006C0323: 0x00001E37 + "\x1e6\x03\x04\x00\x00\x1e8" + // 0x1E360304: 0x00001E38 + "\x1e7\x03\x04\x00\x00\x1e9" + // 0x1E370304: 0x00001E39 + "\x00L\x031\x00\x00\x1e:" + // 0x004C0331: 0x00001E3A + "\x00l\x031\x00\x00\x1e;" + // 0x006C0331: 0x00001E3B + "\x00L\x03-\x00\x00\x1e<" + // 0x004C032D: 0x00001E3C + "\x00l\x03-\x00\x00\x1e=" + // 0x006C032D: 0x00001E3D + "\x00M\x03\x01\x00\x00\x1e>" + // 0x004D0301: 0x00001E3E + "\x00m\x03\x01\x00\x00\x1e?" + // 0x006D0301: 0x00001E3F + "\x00M\x03\a\x00\x00\x1e@" + // 0x004D0307: 0x00001E40 + "\x00m\x03\a\x00\x00\x1eA" + // 0x006D0307: 0x00001E41 + "\x00M\x03#\x00\x00\x1eB" + // 0x004D0323: 0x00001E42 + "\x00m\x03#\x00\x00\x1eC" + // 0x006D0323: 0x00001E43 + "\x00N\x03\a\x00\x00\x1eD" + // 0x004E0307: 0x00001E44 + "\x00n\x03\a\x00\x00\x1eE" + // 0x006E0307: 0x00001E45 + "\x00N\x03#\x00\x00\x1eF" + // 0x004E0323: 0x00001E46 + "\x00n\x03#\x00\x00\x1eG" + // 0x006E0323: 0x00001E47 + "\x00N\x031\x00\x00\x1eH" + // 0x004E0331: 0x00001E48 + "\x00n\x031\x00\x00\x1eI" + // 0x006E0331: 0x00001E49 + "\x00N\x03-\x00\x00\x1eJ" + // 0x004E032D: 0x00001E4A + "\x00n\x03-\x00\x00\x1eK" + // 0x006E032D: 0x00001E4B + "\x00\xd5\x03\x01\x00\x00\x1eL" + // 0x00D50301: 0x00001E4C + "\x00\xf5\x03\x01\x00\x00\x1eM" + // 0x00F50301: 0x00001E4D + "\x00\xd5\x03\b\x00\x00\x1eN" + // 0x00D50308: 0x00001E4E + "\x00\xf5\x03\b\x00\x00\x1eO" + // 0x00F50308: 0x00001E4F + "\x01L\x03\x00\x00\x00\x1eP" + // 0x014C0300: 0x00001E50 + "\x01M\x03\x00\x00\x00\x1eQ" + // 0x014D0300: 0x00001E51 + "\x01L\x03\x01\x00\x00\x1eR" + // 0x014C0301: 0x00001E52 + "\x01M\x03\x01\x00\x00\x1eS" + // 0x014D0301: 0x00001E53 + "\x00P\x03\x01\x00\x00\x1eT" + // 0x00500301: 0x00001E54 + "\x00p\x03\x01\x00\x00\x1eU" + // 0x00700301: 0x00001E55 + "\x00P\x03\a\x00\x00\x1eV" + // 0x00500307: 0x00001E56 + "\x00p\x03\a\x00\x00\x1eW" + // 0x00700307: 0x00001E57 + "\x00R\x03\a\x00\x00\x1eX" + // 0x00520307: 0x00001E58 + "\x00r\x03\a\x00\x00\x1eY" + // 0x00720307: 0x00001E59 + "\x00R\x03#\x00\x00\x1eZ" + // 0x00520323: 0x00001E5A + "\x00r\x03#\x00\x00\x1e[" + // 0x00720323: 0x00001E5B + "\x1eZ\x03\x04\x00\x00\x1e\\" + // 0x1E5A0304: 0x00001E5C + "\x1e[\x03\x04\x00\x00\x1e]" + // 0x1E5B0304: 0x00001E5D + "\x00R\x031\x00\x00\x1e^" + // 0x00520331: 0x00001E5E + "\x00r\x031\x00\x00\x1e_" + // 0x00720331: 0x00001E5F + "\x00S\x03\a\x00\x00\x1e`" + // 0x00530307: 0x00001E60 + "\x00s\x03\a\x00\x00\x1ea" + // 0x00730307: 0x00001E61 + "\x00S\x03#\x00\x00\x1eb" + // 0x00530323: 0x00001E62 + "\x00s\x03#\x00\x00\x1ec" + // 0x00730323: 0x00001E63 + "\x01Z\x03\a\x00\x00\x1ed" + // 0x015A0307: 0x00001E64 + "\x01[\x03\a\x00\x00\x1ee" + // 0x015B0307: 0x00001E65 + "\x01`\x03\a\x00\x00\x1ef" + // 0x01600307: 0x00001E66 + "\x01a\x03\a\x00\x00\x1eg" + // 0x01610307: 0x00001E67 + "\x1eb\x03\a\x00\x00\x1eh" + // 0x1E620307: 0x00001E68 + "\x1ec\x03\a\x00\x00\x1ei" + // 0x1E630307: 0x00001E69 + "\x00T\x03\a\x00\x00\x1ej" + // 0x00540307: 0x00001E6A + "\x00t\x03\a\x00\x00\x1ek" + // 0x00740307: 0x00001E6B + "\x00T\x03#\x00\x00\x1el" + // 0x00540323: 0x00001E6C + "\x00t\x03#\x00\x00\x1em" + // 0x00740323: 0x00001E6D + "\x00T\x031\x00\x00\x1en" + // 0x00540331: 0x00001E6E + "\x00t\x031\x00\x00\x1eo" + // 0x00740331: 0x00001E6F + "\x00T\x03-\x00\x00\x1ep" + // 0x0054032D: 0x00001E70 + "\x00t\x03-\x00\x00\x1eq" + // 0x0074032D: 0x00001E71 + "\x00U\x03$\x00\x00\x1er" + // 0x00550324: 0x00001E72 + "\x00u\x03$\x00\x00\x1es" + // 0x00750324: 0x00001E73 + "\x00U\x030\x00\x00\x1et" + // 0x00550330: 0x00001E74 + "\x00u\x030\x00\x00\x1eu" + // 0x00750330: 0x00001E75 + "\x00U\x03-\x00\x00\x1ev" + // 0x0055032D: 0x00001E76 + "\x00u\x03-\x00\x00\x1ew" + // 0x0075032D: 0x00001E77 + "\x01h\x03\x01\x00\x00\x1ex" + // 0x01680301: 0x00001E78 + "\x01i\x03\x01\x00\x00\x1ey" + // 0x01690301: 0x00001E79 + "\x01j\x03\b\x00\x00\x1ez" + // 0x016A0308: 0x00001E7A + "\x01k\x03\b\x00\x00\x1e{" + // 0x016B0308: 0x00001E7B + "\x00V\x03\x03\x00\x00\x1e|" + // 0x00560303: 0x00001E7C + "\x00v\x03\x03\x00\x00\x1e}" + // 0x00760303: 0x00001E7D + "\x00V\x03#\x00\x00\x1e~" + // 0x00560323: 0x00001E7E + "\x00v\x03#\x00\x00\x1e\u007f" + // 0x00760323: 0x00001E7F + "\x00W\x03\x00\x00\x00\x1e\x80" + // 0x00570300: 0x00001E80 + "\x00w\x03\x00\x00\x00\x1e\x81" + // 0x00770300: 0x00001E81 + "\x00W\x03\x01\x00\x00\x1e\x82" + // 0x00570301: 0x00001E82 + "\x00w\x03\x01\x00\x00\x1e\x83" + // 0x00770301: 0x00001E83 + "\x00W\x03\b\x00\x00\x1e\x84" + // 0x00570308: 0x00001E84 + "\x00w\x03\b\x00\x00\x1e\x85" + // 0x00770308: 0x00001E85 + "\x00W\x03\a\x00\x00\x1e\x86" + // 0x00570307: 0x00001E86 + "\x00w\x03\a\x00\x00\x1e\x87" + // 0x00770307: 0x00001E87 + "\x00W\x03#\x00\x00\x1e\x88" + // 0x00570323: 0x00001E88 + "\x00w\x03#\x00\x00\x1e\x89" + // 0x00770323: 0x00001E89 + "\x00X\x03\a\x00\x00\x1e\x8a" + // 0x00580307: 0x00001E8A + "\x00x\x03\a\x00\x00\x1e\x8b" + // 0x00780307: 0x00001E8B + "\x00X\x03\b\x00\x00\x1e\x8c" + // 0x00580308: 0x00001E8C + "\x00x\x03\b\x00\x00\x1e\x8d" + // 0x00780308: 0x00001E8D + "\x00Y\x03\a\x00\x00\x1e\x8e" + // 0x00590307: 0x00001E8E + "\x00y\x03\a\x00\x00\x1e\x8f" + // 0x00790307: 0x00001E8F + "\x00Z\x03\x02\x00\x00\x1e\x90" + // 0x005A0302: 0x00001E90 + "\x00z\x03\x02\x00\x00\x1e\x91" + // 0x007A0302: 0x00001E91 + "\x00Z\x03#\x00\x00\x1e\x92" + // 0x005A0323: 0x00001E92 + "\x00z\x03#\x00\x00\x1e\x93" + // 0x007A0323: 0x00001E93 + "\x00Z\x031\x00\x00\x1e\x94" + // 0x005A0331: 0x00001E94 + "\x00z\x031\x00\x00\x1e\x95" + // 0x007A0331: 0x00001E95 + "\x00h\x031\x00\x00\x1e\x96" + // 0x00680331: 0x00001E96 + "\x00t\x03\b\x00\x00\x1e\x97" + // 0x00740308: 0x00001E97 + "\x00w\x03\n\x00\x00\x1e\x98" + // 0x0077030A: 0x00001E98 + "\x00y\x03\n\x00\x00\x1e\x99" + // 0x0079030A: 0x00001E99 + "\x01\u007f\x03\a\x00\x00\x1e\x9b" + // 0x017F0307: 0x00001E9B + "\x00A\x03#\x00\x00\x1e\xa0" + // 0x00410323: 0x00001EA0 + "\x00a\x03#\x00\x00\x1e\xa1" + // 0x00610323: 0x00001EA1 + "\x00A\x03\t\x00\x00\x1e\xa2" + // 0x00410309: 0x00001EA2 + "\x00a\x03\t\x00\x00\x1e\xa3" + // 0x00610309: 0x00001EA3 + "\x00\xc2\x03\x01\x00\x00\x1e\xa4" + // 0x00C20301: 0x00001EA4 + "\x00\xe2\x03\x01\x00\x00\x1e\xa5" + // 0x00E20301: 0x00001EA5 + "\x00\xc2\x03\x00\x00\x00\x1e\xa6" + // 0x00C20300: 0x00001EA6 + "\x00\xe2\x03\x00\x00\x00\x1e\xa7" + // 0x00E20300: 0x00001EA7 + "\x00\xc2\x03\t\x00\x00\x1e\xa8" + // 0x00C20309: 0x00001EA8 + "\x00\xe2\x03\t\x00\x00\x1e\xa9" + // 0x00E20309: 0x00001EA9 + "\x00\xc2\x03\x03\x00\x00\x1e\xaa" + // 0x00C20303: 0x00001EAA + "\x00\xe2\x03\x03\x00\x00\x1e\xab" + // 0x00E20303: 0x00001EAB + "\x1e\xa0\x03\x02\x00\x00\x1e\xac" + // 0x1EA00302: 0x00001EAC + "\x1e\xa1\x03\x02\x00\x00\x1e\xad" + // 0x1EA10302: 0x00001EAD + "\x01\x02\x03\x01\x00\x00\x1e\xae" + // 0x01020301: 0x00001EAE + "\x01\x03\x03\x01\x00\x00\x1e\xaf" + // 0x01030301: 0x00001EAF + "\x01\x02\x03\x00\x00\x00\x1e\xb0" + // 0x01020300: 0x00001EB0 + "\x01\x03\x03\x00\x00\x00\x1e\xb1" + // 0x01030300: 0x00001EB1 + "\x01\x02\x03\t\x00\x00\x1e\xb2" + // 0x01020309: 0x00001EB2 + "\x01\x03\x03\t\x00\x00\x1e\xb3" + // 0x01030309: 0x00001EB3 + "\x01\x02\x03\x03\x00\x00\x1e\xb4" + // 0x01020303: 0x00001EB4 + "\x01\x03\x03\x03\x00\x00\x1e\xb5" + // 0x01030303: 0x00001EB5 + "\x1e\xa0\x03\x06\x00\x00\x1e\xb6" + // 0x1EA00306: 0x00001EB6 + "\x1e\xa1\x03\x06\x00\x00\x1e\xb7" + // 0x1EA10306: 0x00001EB7 + "\x00E\x03#\x00\x00\x1e\xb8" + // 0x00450323: 0x00001EB8 + "\x00e\x03#\x00\x00\x1e\xb9" + // 0x00650323: 0x00001EB9 + "\x00E\x03\t\x00\x00\x1e\xba" + // 0x00450309: 0x00001EBA + "\x00e\x03\t\x00\x00\x1e\xbb" + // 0x00650309: 0x00001EBB + "\x00E\x03\x03\x00\x00\x1e\xbc" + // 0x00450303: 0x00001EBC + "\x00e\x03\x03\x00\x00\x1e\xbd" + // 0x00650303: 0x00001EBD + "\x00\xca\x03\x01\x00\x00\x1e\xbe" + // 0x00CA0301: 0x00001EBE + "\x00\xea\x03\x01\x00\x00\x1e\xbf" + // 0x00EA0301: 0x00001EBF + "\x00\xca\x03\x00\x00\x00\x1e\xc0" + // 0x00CA0300: 0x00001EC0 + "\x00\xea\x03\x00\x00\x00\x1e\xc1" + // 0x00EA0300: 0x00001EC1 + "\x00\xca\x03\t\x00\x00\x1e\xc2" + // 0x00CA0309: 0x00001EC2 + "\x00\xea\x03\t\x00\x00\x1e\xc3" + // 0x00EA0309: 0x00001EC3 + "\x00\xca\x03\x03\x00\x00\x1e\xc4" + // 0x00CA0303: 0x00001EC4 + "\x00\xea\x03\x03\x00\x00\x1e\xc5" + // 0x00EA0303: 0x00001EC5 + "\x1e\xb8\x03\x02\x00\x00\x1e\xc6" + // 0x1EB80302: 0x00001EC6 + "\x1e\xb9\x03\x02\x00\x00\x1e\xc7" + // 0x1EB90302: 0x00001EC7 + "\x00I\x03\t\x00\x00\x1e\xc8" + // 0x00490309: 0x00001EC8 + "\x00i\x03\t\x00\x00\x1e\xc9" + // 0x00690309: 0x00001EC9 + "\x00I\x03#\x00\x00\x1e\xca" + // 0x00490323: 0x00001ECA + "\x00i\x03#\x00\x00\x1e\xcb" + // 0x00690323: 0x00001ECB + "\x00O\x03#\x00\x00\x1e\xcc" + // 0x004F0323: 0x00001ECC + "\x00o\x03#\x00\x00\x1e\xcd" + // 0x006F0323: 0x00001ECD + "\x00O\x03\t\x00\x00\x1e\xce" + // 0x004F0309: 0x00001ECE + "\x00o\x03\t\x00\x00\x1e\xcf" + // 0x006F0309: 0x00001ECF + "\x00\xd4\x03\x01\x00\x00\x1e\xd0" + // 0x00D40301: 0x00001ED0 + "\x00\xf4\x03\x01\x00\x00\x1e\xd1" + // 0x00F40301: 0x00001ED1 + "\x00\xd4\x03\x00\x00\x00\x1e\xd2" + // 0x00D40300: 0x00001ED2 + "\x00\xf4\x03\x00\x00\x00\x1e\xd3" + // 0x00F40300: 0x00001ED3 + "\x00\xd4\x03\t\x00\x00\x1e\xd4" + // 0x00D40309: 0x00001ED4 + "\x00\xf4\x03\t\x00\x00\x1e\xd5" + // 0x00F40309: 0x00001ED5 + "\x00\xd4\x03\x03\x00\x00\x1e\xd6" + // 0x00D40303: 0x00001ED6 + "\x00\xf4\x03\x03\x00\x00\x1e\xd7" + // 0x00F40303: 0x00001ED7 + "\x1e\xcc\x03\x02\x00\x00\x1e\xd8" + // 0x1ECC0302: 0x00001ED8 + "\x1e\xcd\x03\x02\x00\x00\x1e\xd9" + // 0x1ECD0302: 0x00001ED9 + "\x01\xa0\x03\x01\x00\x00\x1e\xda" + // 0x01A00301: 0x00001EDA + "\x01\xa1\x03\x01\x00\x00\x1e\xdb" + // 0x01A10301: 0x00001EDB + "\x01\xa0\x03\x00\x00\x00\x1e\xdc" + // 0x01A00300: 0x00001EDC + "\x01\xa1\x03\x00\x00\x00\x1e\xdd" + // 0x01A10300: 0x00001EDD + "\x01\xa0\x03\t\x00\x00\x1e\xde" + // 0x01A00309: 0x00001EDE + "\x01\xa1\x03\t\x00\x00\x1e\xdf" + // 0x01A10309: 0x00001EDF + "\x01\xa0\x03\x03\x00\x00\x1e\xe0" + // 0x01A00303: 0x00001EE0 + "\x01\xa1\x03\x03\x00\x00\x1e\xe1" + // 0x01A10303: 0x00001EE1 + "\x01\xa0\x03#\x00\x00\x1e\xe2" + // 0x01A00323: 0x00001EE2 + "\x01\xa1\x03#\x00\x00\x1e\xe3" + // 0x01A10323: 0x00001EE3 + "\x00U\x03#\x00\x00\x1e\xe4" + // 0x00550323: 0x00001EE4 + "\x00u\x03#\x00\x00\x1e\xe5" + // 0x00750323: 0x00001EE5 + "\x00U\x03\t\x00\x00\x1e\xe6" + // 0x00550309: 0x00001EE6 + "\x00u\x03\t\x00\x00\x1e\xe7" + // 0x00750309: 0x00001EE7 + "\x01\xaf\x03\x01\x00\x00\x1e\xe8" + // 0x01AF0301: 0x00001EE8 + "\x01\xb0\x03\x01\x00\x00\x1e\xe9" + // 0x01B00301: 0x00001EE9 + "\x01\xaf\x03\x00\x00\x00\x1e\xea" + // 0x01AF0300: 0x00001EEA + "\x01\xb0\x03\x00\x00\x00\x1e\xeb" + // 0x01B00300: 0x00001EEB + "\x01\xaf\x03\t\x00\x00\x1e\xec" + // 0x01AF0309: 0x00001EEC + "\x01\xb0\x03\t\x00\x00\x1e\xed" + // 0x01B00309: 0x00001EED + "\x01\xaf\x03\x03\x00\x00\x1e\xee" + // 0x01AF0303: 0x00001EEE + "\x01\xb0\x03\x03\x00\x00\x1e\xef" + // 0x01B00303: 0x00001EEF + "\x01\xaf\x03#\x00\x00\x1e\xf0" + // 0x01AF0323: 0x00001EF0 + "\x01\xb0\x03#\x00\x00\x1e\xf1" + // 0x01B00323: 0x00001EF1 + "\x00Y\x03\x00\x00\x00\x1e\xf2" + // 0x00590300: 0x00001EF2 + "\x00y\x03\x00\x00\x00\x1e\xf3" + // 0x00790300: 0x00001EF3 + "\x00Y\x03#\x00\x00\x1e\xf4" + // 0x00590323: 0x00001EF4 + "\x00y\x03#\x00\x00\x1e\xf5" + // 0x00790323: 0x00001EF5 + "\x00Y\x03\t\x00\x00\x1e\xf6" + // 0x00590309: 0x00001EF6 + "\x00y\x03\t\x00\x00\x1e\xf7" + // 0x00790309: 0x00001EF7 + "\x00Y\x03\x03\x00\x00\x1e\xf8" + // 0x00590303: 0x00001EF8 + "\x00y\x03\x03\x00\x00\x1e\xf9" + // 0x00790303: 0x00001EF9 + "\x03\xb1\x03\x13\x00\x00\x1f\x00" + // 0x03B10313: 0x00001F00 + "\x03\xb1\x03\x14\x00\x00\x1f\x01" + // 0x03B10314: 0x00001F01 + "\x1f\x00\x03\x00\x00\x00\x1f\x02" + // 0x1F000300: 0x00001F02 + "\x1f\x01\x03\x00\x00\x00\x1f\x03" + // 0x1F010300: 0x00001F03 + "\x1f\x00\x03\x01\x00\x00\x1f\x04" + // 0x1F000301: 0x00001F04 + "\x1f\x01\x03\x01\x00\x00\x1f\x05" + // 0x1F010301: 0x00001F05 + "\x1f\x00\x03B\x00\x00\x1f\x06" + // 0x1F000342: 0x00001F06 + "\x1f\x01\x03B\x00\x00\x1f\a" + // 0x1F010342: 0x00001F07 + "\x03\x91\x03\x13\x00\x00\x1f\b" + // 0x03910313: 0x00001F08 + "\x03\x91\x03\x14\x00\x00\x1f\t" + // 0x03910314: 0x00001F09 + "\x1f\b\x03\x00\x00\x00\x1f\n" + // 0x1F080300: 0x00001F0A + "\x1f\t\x03\x00\x00\x00\x1f\v" + // 0x1F090300: 0x00001F0B + "\x1f\b\x03\x01\x00\x00\x1f\f" + // 0x1F080301: 0x00001F0C + "\x1f\t\x03\x01\x00\x00\x1f\r" + // 0x1F090301: 0x00001F0D + "\x1f\b\x03B\x00\x00\x1f\x0e" + // 0x1F080342: 0x00001F0E + "\x1f\t\x03B\x00\x00\x1f\x0f" + // 0x1F090342: 0x00001F0F + "\x03\xb5\x03\x13\x00\x00\x1f\x10" + // 0x03B50313: 0x00001F10 + "\x03\xb5\x03\x14\x00\x00\x1f\x11" + // 0x03B50314: 0x00001F11 + "\x1f\x10\x03\x00\x00\x00\x1f\x12" + // 0x1F100300: 0x00001F12 + "\x1f\x11\x03\x00\x00\x00\x1f\x13" + // 0x1F110300: 0x00001F13 + "\x1f\x10\x03\x01\x00\x00\x1f\x14" + // 0x1F100301: 0x00001F14 + "\x1f\x11\x03\x01\x00\x00\x1f\x15" + // 0x1F110301: 0x00001F15 + "\x03\x95\x03\x13\x00\x00\x1f\x18" + // 0x03950313: 0x00001F18 + "\x03\x95\x03\x14\x00\x00\x1f\x19" + // 0x03950314: 0x00001F19 + "\x1f\x18\x03\x00\x00\x00\x1f\x1a" + // 0x1F180300: 0x00001F1A + "\x1f\x19\x03\x00\x00\x00\x1f\x1b" + // 0x1F190300: 0x00001F1B + "\x1f\x18\x03\x01\x00\x00\x1f\x1c" + // 0x1F180301: 0x00001F1C + "\x1f\x19\x03\x01\x00\x00\x1f\x1d" + // 0x1F190301: 0x00001F1D + "\x03\xb7\x03\x13\x00\x00\x1f " + // 0x03B70313: 0x00001F20 + "\x03\xb7\x03\x14\x00\x00\x1f!" + // 0x03B70314: 0x00001F21 + "\x1f \x03\x00\x00\x00\x1f\"" + // 0x1F200300: 0x00001F22 + "\x1f!\x03\x00\x00\x00\x1f#" + // 0x1F210300: 0x00001F23 + "\x1f \x03\x01\x00\x00\x1f$" + // 0x1F200301: 0x00001F24 + "\x1f!\x03\x01\x00\x00\x1f%" + // 0x1F210301: 0x00001F25 + "\x1f \x03B\x00\x00\x1f&" + // 0x1F200342: 0x00001F26 + "\x1f!\x03B\x00\x00\x1f'" + // 0x1F210342: 0x00001F27 + "\x03\x97\x03\x13\x00\x00\x1f(" + // 0x03970313: 0x00001F28 + "\x03\x97\x03\x14\x00\x00\x1f)" + // 0x03970314: 0x00001F29 + "\x1f(\x03\x00\x00\x00\x1f*" + // 0x1F280300: 0x00001F2A + "\x1f)\x03\x00\x00\x00\x1f+" + // 0x1F290300: 0x00001F2B + "\x1f(\x03\x01\x00\x00\x1f," + // 0x1F280301: 0x00001F2C + "\x1f)\x03\x01\x00\x00\x1f-" + // 0x1F290301: 0x00001F2D + "\x1f(\x03B\x00\x00\x1f." + // 0x1F280342: 0x00001F2E + "\x1f)\x03B\x00\x00\x1f/" + // 0x1F290342: 0x00001F2F + "\x03\xb9\x03\x13\x00\x00\x1f0" + // 0x03B90313: 0x00001F30 + "\x03\xb9\x03\x14\x00\x00\x1f1" + // 0x03B90314: 0x00001F31 + "\x1f0\x03\x00\x00\x00\x1f2" + // 0x1F300300: 0x00001F32 + "\x1f1\x03\x00\x00\x00\x1f3" + // 0x1F310300: 0x00001F33 + "\x1f0\x03\x01\x00\x00\x1f4" + // 0x1F300301: 0x00001F34 + "\x1f1\x03\x01\x00\x00\x1f5" + // 0x1F310301: 0x00001F35 + "\x1f0\x03B\x00\x00\x1f6" + // 0x1F300342: 0x00001F36 + "\x1f1\x03B\x00\x00\x1f7" + // 0x1F310342: 0x00001F37 + "\x03\x99\x03\x13\x00\x00\x1f8" + // 0x03990313: 0x00001F38 + "\x03\x99\x03\x14\x00\x00\x1f9" + // 0x03990314: 0x00001F39 + "\x1f8\x03\x00\x00\x00\x1f:" + // 0x1F380300: 0x00001F3A + "\x1f9\x03\x00\x00\x00\x1f;" + // 0x1F390300: 0x00001F3B + "\x1f8\x03\x01\x00\x00\x1f<" + // 0x1F380301: 0x00001F3C + "\x1f9\x03\x01\x00\x00\x1f=" + // 0x1F390301: 0x00001F3D + "\x1f8\x03B\x00\x00\x1f>" + // 0x1F380342: 0x00001F3E + "\x1f9\x03B\x00\x00\x1f?" + // 0x1F390342: 0x00001F3F + "\x03\xbf\x03\x13\x00\x00\x1f@" + // 0x03BF0313: 0x00001F40 + "\x03\xbf\x03\x14\x00\x00\x1fA" + // 0x03BF0314: 0x00001F41 + "\x1f@\x03\x00\x00\x00\x1fB" + // 0x1F400300: 0x00001F42 + "\x1fA\x03\x00\x00\x00\x1fC" + // 0x1F410300: 0x00001F43 + "\x1f@\x03\x01\x00\x00\x1fD" + // 0x1F400301: 0x00001F44 + "\x1fA\x03\x01\x00\x00\x1fE" + // 0x1F410301: 0x00001F45 + "\x03\x9f\x03\x13\x00\x00\x1fH" + // 0x039F0313: 0x00001F48 + "\x03\x9f\x03\x14\x00\x00\x1fI" + // 0x039F0314: 0x00001F49 + "\x1fH\x03\x00\x00\x00\x1fJ" + // 0x1F480300: 0x00001F4A + "\x1fI\x03\x00\x00\x00\x1fK" + // 0x1F490300: 0x00001F4B + "\x1fH\x03\x01\x00\x00\x1fL" + // 0x1F480301: 0x00001F4C + "\x1fI\x03\x01\x00\x00\x1fM" + // 0x1F490301: 0x00001F4D + "\x03\xc5\x03\x13\x00\x00\x1fP" + // 0x03C50313: 0x00001F50 + "\x03\xc5\x03\x14\x00\x00\x1fQ" + // 0x03C50314: 0x00001F51 + "\x1fP\x03\x00\x00\x00\x1fR" + // 0x1F500300: 0x00001F52 + "\x1fQ\x03\x00\x00\x00\x1fS" + // 0x1F510300: 0x00001F53 + "\x1fP\x03\x01\x00\x00\x1fT" + // 0x1F500301: 0x00001F54 + "\x1fQ\x03\x01\x00\x00\x1fU" + // 0x1F510301: 0x00001F55 + "\x1fP\x03B\x00\x00\x1fV" + // 0x1F500342: 0x00001F56 + "\x1fQ\x03B\x00\x00\x1fW" + // 0x1F510342: 0x00001F57 + "\x03\xa5\x03\x14\x00\x00\x1fY" + // 0x03A50314: 0x00001F59 + "\x1fY\x03\x00\x00\x00\x1f[" + // 0x1F590300: 0x00001F5B + "\x1fY\x03\x01\x00\x00\x1f]" + // 0x1F590301: 0x00001F5D + "\x1fY\x03B\x00\x00\x1f_" + // 0x1F590342: 0x00001F5F + "\x03\xc9\x03\x13\x00\x00\x1f`" + // 0x03C90313: 0x00001F60 + "\x03\xc9\x03\x14\x00\x00\x1fa" + // 0x03C90314: 0x00001F61 + "\x1f`\x03\x00\x00\x00\x1fb" + // 0x1F600300: 0x00001F62 + "\x1fa\x03\x00\x00\x00\x1fc" + // 0x1F610300: 0x00001F63 + "\x1f`\x03\x01\x00\x00\x1fd" + // 0x1F600301: 0x00001F64 + "\x1fa\x03\x01\x00\x00\x1fe" + // 0x1F610301: 0x00001F65 + "\x1f`\x03B\x00\x00\x1ff" + // 0x1F600342: 0x00001F66 + "\x1fa\x03B\x00\x00\x1fg" + // 0x1F610342: 0x00001F67 + "\x03\xa9\x03\x13\x00\x00\x1fh" + // 0x03A90313: 0x00001F68 + "\x03\xa9\x03\x14\x00\x00\x1fi" + // 0x03A90314: 0x00001F69 + "\x1fh\x03\x00\x00\x00\x1fj" + // 0x1F680300: 0x00001F6A + "\x1fi\x03\x00\x00\x00\x1fk" + // 0x1F690300: 0x00001F6B + "\x1fh\x03\x01\x00\x00\x1fl" + // 0x1F680301: 0x00001F6C + "\x1fi\x03\x01\x00\x00\x1fm" + // 0x1F690301: 0x00001F6D + "\x1fh\x03B\x00\x00\x1fn" + // 0x1F680342: 0x00001F6E + "\x1fi\x03B\x00\x00\x1fo" + // 0x1F690342: 0x00001F6F + "\x03\xb1\x03\x00\x00\x00\x1fp" + // 0x03B10300: 0x00001F70 + "\x03\xb5\x03\x00\x00\x00\x1fr" + // 0x03B50300: 0x00001F72 + "\x03\xb7\x03\x00\x00\x00\x1ft" + // 0x03B70300: 0x00001F74 + "\x03\xb9\x03\x00\x00\x00\x1fv" + // 0x03B90300: 0x00001F76 + "\x03\xbf\x03\x00\x00\x00\x1fx" + // 0x03BF0300: 0x00001F78 + "\x03\xc5\x03\x00\x00\x00\x1fz" + // 0x03C50300: 0x00001F7A + "\x03\xc9\x03\x00\x00\x00\x1f|" + // 0x03C90300: 0x00001F7C + "\x1f\x00\x03E\x00\x00\x1f\x80" + // 0x1F000345: 0x00001F80 + "\x1f\x01\x03E\x00\x00\x1f\x81" + // 0x1F010345: 0x00001F81 + "\x1f\x02\x03E\x00\x00\x1f\x82" + // 0x1F020345: 0x00001F82 + "\x1f\x03\x03E\x00\x00\x1f\x83" + // 0x1F030345: 0x00001F83 + "\x1f\x04\x03E\x00\x00\x1f\x84" + // 0x1F040345: 0x00001F84 + "\x1f\x05\x03E\x00\x00\x1f\x85" + // 0x1F050345: 0x00001F85 + "\x1f\x06\x03E\x00\x00\x1f\x86" + // 0x1F060345: 0x00001F86 + "\x1f\a\x03E\x00\x00\x1f\x87" + // 0x1F070345: 0x00001F87 + "\x1f\b\x03E\x00\x00\x1f\x88" + // 0x1F080345: 0x00001F88 + "\x1f\t\x03E\x00\x00\x1f\x89" + // 0x1F090345: 0x00001F89 + "\x1f\n\x03E\x00\x00\x1f\x8a" + // 0x1F0A0345: 0x00001F8A + "\x1f\v\x03E\x00\x00\x1f\x8b" + // 0x1F0B0345: 0x00001F8B + "\x1f\f\x03E\x00\x00\x1f\x8c" + // 0x1F0C0345: 0x00001F8C + "\x1f\r\x03E\x00\x00\x1f\x8d" + // 0x1F0D0345: 0x00001F8D + "\x1f\x0e\x03E\x00\x00\x1f\x8e" + // 0x1F0E0345: 0x00001F8E + "\x1f\x0f\x03E\x00\x00\x1f\x8f" + // 0x1F0F0345: 0x00001F8F + "\x1f \x03E\x00\x00\x1f\x90" + // 0x1F200345: 0x00001F90 + "\x1f!\x03E\x00\x00\x1f\x91" + // 0x1F210345: 0x00001F91 + "\x1f\"\x03E\x00\x00\x1f\x92" + // 0x1F220345: 0x00001F92 + "\x1f#\x03E\x00\x00\x1f\x93" + // 0x1F230345: 0x00001F93 + "\x1f$\x03E\x00\x00\x1f\x94" + // 0x1F240345: 0x00001F94 + "\x1f%\x03E\x00\x00\x1f\x95" + // 0x1F250345: 0x00001F95 + "\x1f&\x03E\x00\x00\x1f\x96" + // 0x1F260345: 0x00001F96 + "\x1f'\x03E\x00\x00\x1f\x97" + // 0x1F270345: 0x00001F97 + "\x1f(\x03E\x00\x00\x1f\x98" + // 0x1F280345: 0x00001F98 + "\x1f)\x03E\x00\x00\x1f\x99" + // 0x1F290345: 0x00001F99 + "\x1f*\x03E\x00\x00\x1f\x9a" + // 0x1F2A0345: 0x00001F9A + "\x1f+\x03E\x00\x00\x1f\x9b" + // 0x1F2B0345: 0x00001F9B + "\x1f,\x03E\x00\x00\x1f\x9c" + // 0x1F2C0345: 0x00001F9C + "\x1f-\x03E\x00\x00\x1f\x9d" + // 0x1F2D0345: 0x00001F9D + "\x1f.\x03E\x00\x00\x1f\x9e" + // 0x1F2E0345: 0x00001F9E + "\x1f/\x03E\x00\x00\x1f\x9f" + // 0x1F2F0345: 0x00001F9F + "\x1f`\x03E\x00\x00\x1f\xa0" + // 0x1F600345: 0x00001FA0 + "\x1fa\x03E\x00\x00\x1f\xa1" + // 0x1F610345: 0x00001FA1 + "\x1fb\x03E\x00\x00\x1f\xa2" + // 0x1F620345: 0x00001FA2 + "\x1fc\x03E\x00\x00\x1f\xa3" + // 0x1F630345: 0x00001FA3 + "\x1fd\x03E\x00\x00\x1f\xa4" + // 0x1F640345: 0x00001FA4 + "\x1fe\x03E\x00\x00\x1f\xa5" + // 0x1F650345: 0x00001FA5 + "\x1ff\x03E\x00\x00\x1f\xa6" + // 0x1F660345: 0x00001FA6 + "\x1fg\x03E\x00\x00\x1f\xa7" + // 0x1F670345: 0x00001FA7 + "\x1fh\x03E\x00\x00\x1f\xa8" + // 0x1F680345: 0x00001FA8 + "\x1fi\x03E\x00\x00\x1f\xa9" + // 0x1F690345: 0x00001FA9 + "\x1fj\x03E\x00\x00\x1f\xaa" + // 0x1F6A0345: 0x00001FAA + "\x1fk\x03E\x00\x00\x1f\xab" + // 0x1F6B0345: 0x00001FAB + "\x1fl\x03E\x00\x00\x1f\xac" + // 0x1F6C0345: 0x00001FAC + "\x1fm\x03E\x00\x00\x1f\xad" + // 0x1F6D0345: 0x00001FAD + "\x1fn\x03E\x00\x00\x1f\xae" + // 0x1F6E0345: 0x00001FAE + "\x1fo\x03E\x00\x00\x1f\xaf" + // 0x1F6F0345: 0x00001FAF + "\x03\xb1\x03\x06\x00\x00\x1f\xb0" + // 0x03B10306: 0x00001FB0 + "\x03\xb1\x03\x04\x00\x00\x1f\xb1" + // 0x03B10304: 0x00001FB1 + "\x1fp\x03E\x00\x00\x1f\xb2" + // 0x1F700345: 0x00001FB2 + "\x03\xb1\x03E\x00\x00\x1f\xb3" + // 0x03B10345: 0x00001FB3 + "\x03\xac\x03E\x00\x00\x1f\xb4" + // 0x03AC0345: 0x00001FB4 + "\x03\xb1\x03B\x00\x00\x1f\xb6" + // 0x03B10342: 0x00001FB6 + "\x1f\xb6\x03E\x00\x00\x1f\xb7" + // 0x1FB60345: 0x00001FB7 + "\x03\x91\x03\x06\x00\x00\x1f\xb8" + // 0x03910306: 0x00001FB8 + "\x03\x91\x03\x04\x00\x00\x1f\xb9" + // 0x03910304: 0x00001FB9 + "\x03\x91\x03\x00\x00\x00\x1f\xba" + // 0x03910300: 0x00001FBA + "\x03\x91\x03E\x00\x00\x1f\xbc" + // 0x03910345: 0x00001FBC + "\x00\xa8\x03B\x00\x00\x1f\xc1" + // 0x00A80342: 0x00001FC1 + "\x1ft\x03E\x00\x00\x1f\xc2" + // 0x1F740345: 0x00001FC2 + "\x03\xb7\x03E\x00\x00\x1f\xc3" + // 0x03B70345: 0x00001FC3 + "\x03\xae\x03E\x00\x00\x1f\xc4" + // 0x03AE0345: 0x00001FC4 + "\x03\xb7\x03B\x00\x00\x1f\xc6" + // 0x03B70342: 0x00001FC6 + "\x1f\xc6\x03E\x00\x00\x1f\xc7" + // 0x1FC60345: 0x00001FC7 + "\x03\x95\x03\x00\x00\x00\x1f\xc8" + // 0x03950300: 0x00001FC8 + "\x03\x97\x03\x00\x00\x00\x1f\xca" + // 0x03970300: 0x00001FCA + "\x03\x97\x03E\x00\x00\x1f\xcc" + // 0x03970345: 0x00001FCC + "\x1f\xbf\x03\x00\x00\x00\x1f\xcd" + // 0x1FBF0300: 0x00001FCD + "\x1f\xbf\x03\x01\x00\x00\x1f\xce" + // 0x1FBF0301: 0x00001FCE + "\x1f\xbf\x03B\x00\x00\x1f\xcf" + // 0x1FBF0342: 0x00001FCF + "\x03\xb9\x03\x06\x00\x00\x1f\xd0" + // 0x03B90306: 0x00001FD0 + "\x03\xb9\x03\x04\x00\x00\x1f\xd1" + // 0x03B90304: 0x00001FD1 + "\x03\xca\x03\x00\x00\x00\x1f\xd2" + // 0x03CA0300: 0x00001FD2 + "\x03\xb9\x03B\x00\x00\x1f\xd6" + // 0x03B90342: 0x00001FD6 + "\x03\xca\x03B\x00\x00\x1f\xd7" + // 0x03CA0342: 0x00001FD7 + "\x03\x99\x03\x06\x00\x00\x1f\xd8" + // 0x03990306: 0x00001FD8 + "\x03\x99\x03\x04\x00\x00\x1f\xd9" + // 0x03990304: 0x00001FD9 + "\x03\x99\x03\x00\x00\x00\x1f\xda" + // 0x03990300: 0x00001FDA + "\x1f\xfe\x03\x00\x00\x00\x1f\xdd" + // 0x1FFE0300: 0x00001FDD + "\x1f\xfe\x03\x01\x00\x00\x1f\xde" + // 0x1FFE0301: 0x00001FDE + "\x1f\xfe\x03B\x00\x00\x1f\xdf" + // 0x1FFE0342: 0x00001FDF + "\x03\xc5\x03\x06\x00\x00\x1f\xe0" + // 0x03C50306: 0x00001FE0 + "\x03\xc5\x03\x04\x00\x00\x1f\xe1" + // 0x03C50304: 0x00001FE1 + "\x03\xcb\x03\x00\x00\x00\x1f\xe2" + // 0x03CB0300: 0x00001FE2 + "\x03\xc1\x03\x13\x00\x00\x1f\xe4" + // 0x03C10313: 0x00001FE4 + "\x03\xc1\x03\x14\x00\x00\x1f\xe5" + // 0x03C10314: 0x00001FE5 + "\x03\xc5\x03B\x00\x00\x1f\xe6" + // 0x03C50342: 0x00001FE6 + "\x03\xcb\x03B\x00\x00\x1f\xe7" + // 0x03CB0342: 0x00001FE7 + "\x03\xa5\x03\x06\x00\x00\x1f\xe8" + // 0x03A50306: 0x00001FE8 + "\x03\xa5\x03\x04\x00\x00\x1f\xe9" + // 0x03A50304: 0x00001FE9 + "\x03\xa5\x03\x00\x00\x00\x1f\xea" + // 0x03A50300: 0x00001FEA + "\x03\xa1\x03\x14\x00\x00\x1f\xec" + // 0x03A10314: 0x00001FEC + "\x00\xa8\x03\x00\x00\x00\x1f\xed" + // 0x00A80300: 0x00001FED + "\x1f|\x03E\x00\x00\x1f\xf2" + // 0x1F7C0345: 0x00001FF2 + "\x03\xc9\x03E\x00\x00\x1f\xf3" + // 0x03C90345: 0x00001FF3 + "\x03\xce\x03E\x00\x00\x1f\xf4" + // 0x03CE0345: 0x00001FF4 + "\x03\xc9\x03B\x00\x00\x1f\xf6" + // 0x03C90342: 0x00001FF6 + "\x1f\xf6\x03E\x00\x00\x1f\xf7" + // 0x1FF60345: 0x00001FF7 + "\x03\x9f\x03\x00\x00\x00\x1f\xf8" + // 0x039F0300: 0x00001FF8 + "\x03\xa9\x03\x00\x00\x00\x1f\xfa" + // 0x03A90300: 0x00001FFA + "\x03\xa9\x03E\x00\x00\x1f\xfc" + // 0x03A90345: 0x00001FFC + "!\x90\x038\x00\x00!\x9a" + // 0x21900338: 0x0000219A + "!\x92\x038\x00\x00!\x9b" + // 0x21920338: 0x0000219B + "!\x94\x038\x00\x00!\xae" + // 0x21940338: 0x000021AE + "!\xd0\x038\x00\x00!\xcd" + // 0x21D00338: 0x000021CD + "!\xd4\x038\x00\x00!\xce" + // 0x21D40338: 0x000021CE + "!\xd2\x038\x00\x00!\xcf" + // 0x21D20338: 0x000021CF + "\"\x03\x038\x00\x00\"\x04" + // 0x22030338: 0x00002204 + "\"\b\x038\x00\x00\"\t" + // 0x22080338: 0x00002209 + "\"\v\x038\x00\x00\"\f" + // 0x220B0338: 0x0000220C + "\"#\x038\x00\x00\"$" + // 0x22230338: 0x00002224 + "\"%\x038\x00\x00\"&" + // 0x22250338: 0x00002226 + "\"<\x038\x00\x00\"A" + // 0x223C0338: 0x00002241 + "\"C\x038\x00\x00\"D" + // 0x22430338: 0x00002244 + "\"E\x038\x00\x00\"G" + // 0x22450338: 0x00002247 + "\"H\x038\x00\x00\"I" + // 0x22480338: 0x00002249 + "\x00=\x038\x00\x00\"`" + // 0x003D0338: 0x00002260 + "\"a\x038\x00\x00\"b" + // 0x22610338: 0x00002262 + "\"M\x038\x00\x00\"m" + // 0x224D0338: 0x0000226D + "\x00<\x038\x00\x00\"n" + // 0x003C0338: 0x0000226E + "\x00>\x038\x00\x00\"o" + // 0x003E0338: 0x0000226F + "\"d\x038\x00\x00\"p" + // 0x22640338: 0x00002270 + "\"e\x038\x00\x00\"q" + // 0x22650338: 0x00002271 + "\"r\x038\x00\x00\"t" + // 0x22720338: 0x00002274 + "\"s\x038\x00\x00\"u" + // 0x22730338: 0x00002275 + "\"v\x038\x00\x00\"x" + // 0x22760338: 0x00002278 + "\"w\x038\x00\x00\"y" + // 0x22770338: 0x00002279 + "\"z\x038\x00\x00\"\x80" + // 0x227A0338: 0x00002280 + "\"{\x038\x00\x00\"\x81" + // 0x227B0338: 0x00002281 + "\"\x82\x038\x00\x00\"\x84" + // 0x22820338: 0x00002284 + "\"\x83\x038\x00\x00\"\x85" + // 0x22830338: 0x00002285 + "\"\x86\x038\x00\x00\"\x88" + // 0x22860338: 0x00002288 + "\"\x87\x038\x00\x00\"\x89" + // 0x22870338: 0x00002289 + "\"\xa2\x038\x00\x00\"\xac" + // 0x22A20338: 0x000022AC + "\"\xa8\x038\x00\x00\"\xad" + // 0x22A80338: 0x000022AD + "\"\xa9\x038\x00\x00\"\xae" + // 0x22A90338: 0x000022AE + "\"\xab\x038\x00\x00\"\xaf" + // 0x22AB0338: 0x000022AF + "\"|\x038\x00\x00\"\xe0" + // 0x227C0338: 0x000022E0 + "\"}\x038\x00\x00\"\xe1" + // 0x227D0338: 0x000022E1 + "\"\x91\x038\x00\x00\"\xe2" + // 0x22910338: 0x000022E2 + "\"\x92\x038\x00\x00\"\xe3" + // 0x22920338: 0x000022E3 + "\"\xb2\x038\x00\x00\"\xea" + // 0x22B20338: 0x000022EA + "\"\xb3\x038\x00\x00\"\xeb" + // 0x22B30338: 0x000022EB + "\"\xb4\x038\x00\x00\"\xec" + // 0x22B40338: 0x000022EC + "\"\xb5\x038\x00\x00\"\xed" + // 0x22B50338: 0x000022ED + "0K0\x99\x00\x000L" + // 0x304B3099: 0x0000304C + "0M0\x99\x00\x000N" + // 0x304D3099: 0x0000304E + "0O0\x99\x00\x000P" + // 0x304F3099: 0x00003050 + "0Q0\x99\x00\x000R" + // 0x30513099: 0x00003052 + "0S0\x99\x00\x000T" + // 0x30533099: 0x00003054 + "0U0\x99\x00\x000V" + // 0x30553099: 0x00003056 + "0W0\x99\x00\x000X" + // 0x30573099: 0x00003058 + "0Y0\x99\x00\x000Z" + // 0x30593099: 0x0000305A + "0[0\x99\x00\x000\\" + // 0x305B3099: 0x0000305C + "0]0\x99\x00\x000^" + // 0x305D3099: 0x0000305E + "0_0\x99\x00\x000`" + // 0x305F3099: 0x00003060 + "0a0\x99\x00\x000b" + // 0x30613099: 0x00003062 + "0d0\x99\x00\x000e" + // 0x30643099: 0x00003065 + "0f0\x99\x00\x000g" + // 0x30663099: 0x00003067 + "0h0\x99\x00\x000i" + // 0x30683099: 0x00003069 + "0o0\x99\x00\x000p" + // 0x306F3099: 0x00003070 + "0o0\x9a\x00\x000q" + // 0x306F309A: 0x00003071 + "0r0\x99\x00\x000s" + // 0x30723099: 0x00003073 + "0r0\x9a\x00\x000t" + // 0x3072309A: 0x00003074 + "0u0\x99\x00\x000v" + // 0x30753099: 0x00003076 + "0u0\x9a\x00\x000w" + // 0x3075309A: 0x00003077 + "0x0\x99\x00\x000y" + // 0x30783099: 0x00003079 + "0x0\x9a\x00\x000z" + // 0x3078309A: 0x0000307A + "0{0\x99\x00\x000|" + // 0x307B3099: 0x0000307C + "0{0\x9a\x00\x000}" + // 0x307B309A: 0x0000307D + "0F0\x99\x00\x000\x94" + // 0x30463099: 0x00003094 + "0\x9d0\x99\x00\x000\x9e" + // 0x309D3099: 0x0000309E + "0\xab0\x99\x00\x000\xac" + // 0x30AB3099: 0x000030AC + "0\xad0\x99\x00\x000\xae" + // 0x30AD3099: 0x000030AE + "0\xaf0\x99\x00\x000\xb0" + // 0x30AF3099: 0x000030B0 + "0\xb10\x99\x00\x000\xb2" + // 0x30B13099: 0x000030B2 + "0\xb30\x99\x00\x000\xb4" + // 0x30B33099: 0x000030B4 + "0\xb50\x99\x00\x000\xb6" + // 0x30B53099: 0x000030B6 + "0\xb70\x99\x00\x000\xb8" + // 0x30B73099: 0x000030B8 + "0\xb90\x99\x00\x000\xba" + // 0x30B93099: 0x000030BA + "0\xbb0\x99\x00\x000\xbc" + // 0x30BB3099: 0x000030BC + "0\xbd0\x99\x00\x000\xbe" + // 0x30BD3099: 0x000030BE + "0\xbf0\x99\x00\x000\xc0" + // 0x30BF3099: 0x000030C0 + "0\xc10\x99\x00\x000\xc2" + // 0x30C13099: 0x000030C2 + "0\xc40\x99\x00\x000\xc5" + // 0x30C43099: 0x000030C5 + "0\xc60\x99\x00\x000\xc7" + // 0x30C63099: 0x000030C7 + "0\xc80\x99\x00\x000\xc9" + // 0x30C83099: 0x000030C9 + "0\xcf0\x99\x00\x000\xd0" + // 0x30CF3099: 0x000030D0 + "0\xcf0\x9a\x00\x000\xd1" + // 0x30CF309A: 0x000030D1 + "0\xd20\x99\x00\x000\xd3" + // 0x30D23099: 0x000030D3 + "0\xd20\x9a\x00\x000\xd4" + // 0x30D2309A: 0x000030D4 + "0\xd50\x99\x00\x000\xd6" + // 0x30D53099: 0x000030D6 + "0\xd50\x9a\x00\x000\xd7" + // 0x30D5309A: 0x000030D7 + "0\xd80\x99\x00\x000\xd9" + // 0x30D83099: 0x000030D9 + "0\xd80\x9a\x00\x000\xda" + // 0x30D8309A: 0x000030DA + "0\xdb0\x99\x00\x000\xdc" + // 0x30DB3099: 0x000030DC + "0\xdb0\x9a\x00\x000\xdd" + // 0x30DB309A: 0x000030DD + "0\xa60\x99\x00\x000\xf4" + // 0x30A63099: 0x000030F4 + "0\xef0\x99\x00\x000\xf7" + // 0x30EF3099: 0x000030F7 + "0\xf00\x99\x00\x000\xf8" + // 0x30F03099: 0x000030F8 + "0\xf10\x99\x00\x000\xf9" + // 0x30F13099: 0x000030F9 + "0\xf20\x99\x00\x000\xfa" + // 0x30F23099: 0x000030FA + "0\xfd0\x99\x00\x000\xfe" + // 0x30FD3099: 0x000030FE + "\x10\x99\x10\xba\x00\x01\x10\x9a" + // 0x109910BA: 0x0001109A + "\x10\x9b\x10\xba\x00\x01\x10\x9c" + // 0x109B10BA: 0x0001109C + "\x10\xa5\x10\xba\x00\x01\x10\xab" + // 0x10A510BA: 0x000110AB + "\x111\x11'\x00\x01\x11." + // 0x11311127: 0x0001112E + "\x112\x11'\x00\x01\x11/" + // 0x11321127: 0x0001112F + "\x13G\x13>\x00\x01\x13K" + // 0x1347133E: 0x0001134B + "\x13G\x13W\x00\x01\x13L" + // 0x13471357: 0x0001134C + "\x14\xb9\x14\xba\x00\x01\x14\xbb" + // 0x14B914BA: 0x000114BB + "\x14\xb9\x14\xb0\x00\x01\x14\xbc" + // 0x14B914B0: 0x000114BC + "\x14\xb9\x14\xbd\x00\x01\x14\xbe" + // 0x14B914BD: 0x000114BE + "\x15\xb8\x15\xaf\x00\x01\x15\xba" + // 0x15B815AF: 0x000115BA + "\x15\xb9\x15\xaf\x00\x01\x15\xbb" + // 0x15B915AF: 0x000115BB + "" + // Total size of tables: 53KB (54514 bytes) diff --git a/vendor/golang.org/x/text/width/BUILD.bazel b/vendor/golang.org/x/text/width/BUILD.bazel index 09ea81d4a2e55..98a38b0168b2a 100644 --- a/vendor/golang.org/x/text/width/BUILD.bazel +++ b/vendor/golang.org/x/text/width/BUILD.bazel @@ -5,6 +5,7 @@ go_library( srcs = [ "kind_string.go", "tables10.0.0.go", + "tables11.0.0.go", "tables9.0.0.go", "transform.go", "trieval.go", diff --git a/vendor/golang.org/x/text/width/kind_string.go b/vendor/golang.org/x/text/width/kind_string.go index edbdc54bc1e1a..dd3febd43b272 100644 --- a/vendor/golang.org/x/text/width/kind_string.go +++ b/vendor/golang.org/x/text/width/kind_string.go @@ -4,6 +4,18 @@ package width import "strconv" +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[Neutral-0] + _ = x[EastAsianAmbiguous-1] + _ = x[EastAsianWide-2] + _ = x[EastAsianNarrow-3] + _ = x[EastAsianFullwidth-4] + _ = x[EastAsianHalfwidth-5] +} + const _Kind_name = "NeutralEastAsianAmbiguousEastAsianWideEastAsianNarrowEastAsianFullwidthEastAsianHalfwidth" var _Kind_index = [...]uint8{0, 7, 25, 38, 53, 71, 89} diff --git a/vendor/golang.org/x/text/width/tables10.0.0.go b/vendor/golang.org/x/text/width/tables10.0.0.go index f498862673134..decb8e480939e 100644 --- a/vendor/golang.org/x/text/width/tables10.0.0.go +++ b/vendor/golang.org/x/text/width/tables10.0.0.go @@ -1,6 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. -// +build go1.10 +// +build go1.10,!go1.13 package width diff --git a/vendor/golang.org/x/text/width/tables11.0.0.go b/vendor/golang.org/x/text/width/tables11.0.0.go new file mode 100644 index 0000000000000..d6def0e7be5c7 --- /dev/null +++ b/vendor/golang.org/x/text/width/tables11.0.0.go @@ -0,0 +1,1330 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// +build go1.13 + +package width + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "11.0.0" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *widthTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return widthValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = widthIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *widthTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return widthValues[c0] + } + i := widthIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = widthIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = widthIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *widthTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return widthValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = widthIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *widthTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return widthValues[c0] + } + i := widthIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = widthIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = widthIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// widthTrie. Total size: 14336 bytes (14.00 KiB). Checksum: c0f7712776e71cd4. +type widthTrie struct{} + +func newWidthTrie(i int) *widthTrie { + return &widthTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *widthTrie) lookupValue(n uint32, b byte) uint16 { + switch { + default: + return uint16(widthValues[n<<6+uint32(b)]) + } +} + +// widthValues: 101 blocks, 6464 entries, 12928 bytes +// The third block is the zero block. +var widthValues = [6464]uint16{ + // Block 0x0, offset 0x0 + 0x20: 0x6001, 0x21: 0x6002, 0x22: 0x6002, 0x23: 0x6002, + 0x24: 0x6002, 0x25: 0x6002, 0x26: 0x6002, 0x27: 0x6002, 0x28: 0x6002, 0x29: 0x6002, + 0x2a: 0x6002, 0x2b: 0x6002, 0x2c: 0x6002, 0x2d: 0x6002, 0x2e: 0x6002, 0x2f: 0x6002, + 0x30: 0x6002, 0x31: 0x6002, 0x32: 0x6002, 0x33: 0x6002, 0x34: 0x6002, 0x35: 0x6002, + 0x36: 0x6002, 0x37: 0x6002, 0x38: 0x6002, 0x39: 0x6002, 0x3a: 0x6002, 0x3b: 0x6002, + 0x3c: 0x6002, 0x3d: 0x6002, 0x3e: 0x6002, 0x3f: 0x6002, + // Block 0x1, offset 0x40 + 0x40: 0x6003, 0x41: 0x6003, 0x42: 0x6003, 0x43: 0x6003, 0x44: 0x6003, 0x45: 0x6003, + 0x46: 0x6003, 0x47: 0x6003, 0x48: 0x6003, 0x49: 0x6003, 0x4a: 0x6003, 0x4b: 0x6003, + 0x4c: 0x6003, 0x4d: 0x6003, 0x4e: 0x6003, 0x4f: 0x6003, 0x50: 0x6003, 0x51: 0x6003, + 0x52: 0x6003, 0x53: 0x6003, 0x54: 0x6003, 0x55: 0x6003, 0x56: 0x6003, 0x57: 0x6003, + 0x58: 0x6003, 0x59: 0x6003, 0x5a: 0x6003, 0x5b: 0x6003, 0x5c: 0x6003, 0x5d: 0x6003, + 0x5e: 0x6003, 0x5f: 0x6003, 0x60: 0x6004, 0x61: 0x6004, 0x62: 0x6004, 0x63: 0x6004, + 0x64: 0x6004, 0x65: 0x6004, 0x66: 0x6004, 0x67: 0x6004, 0x68: 0x6004, 0x69: 0x6004, + 0x6a: 0x6004, 0x6b: 0x6004, 0x6c: 0x6004, 0x6d: 0x6004, 0x6e: 0x6004, 0x6f: 0x6004, + 0x70: 0x6004, 0x71: 0x6004, 0x72: 0x6004, 0x73: 0x6004, 0x74: 0x6004, 0x75: 0x6004, + 0x76: 0x6004, 0x77: 0x6004, 0x78: 0x6004, 0x79: 0x6004, 0x7a: 0x6004, 0x7b: 0x6004, + 0x7c: 0x6004, 0x7d: 0x6004, 0x7e: 0x6004, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xe1: 0x2000, 0xe2: 0x6005, 0xe3: 0x6005, + 0xe4: 0x2000, 0xe5: 0x6006, 0xe6: 0x6005, 0xe7: 0x2000, 0xe8: 0x2000, + 0xea: 0x2000, 0xec: 0x6007, 0xed: 0x2000, 0xee: 0x2000, 0xef: 0x6008, + 0xf0: 0x2000, 0xf1: 0x2000, 0xf2: 0x2000, 0xf3: 0x2000, 0xf4: 0x2000, + 0xf6: 0x2000, 0xf7: 0x2000, 0xf8: 0x2000, 0xf9: 0x2000, 0xfa: 0x2000, + 0xfc: 0x2000, 0xfd: 0x2000, 0xfe: 0x2000, 0xff: 0x2000, + // Block 0x4, offset 0x100 + 0x106: 0x2000, + 0x110: 0x2000, + 0x117: 0x2000, + 0x118: 0x2000, + 0x11e: 0x2000, 0x11f: 0x2000, 0x120: 0x2000, 0x121: 0x2000, + 0x126: 0x2000, 0x128: 0x2000, 0x129: 0x2000, + 0x12a: 0x2000, 0x12c: 0x2000, 0x12d: 0x2000, + 0x130: 0x2000, 0x132: 0x2000, 0x133: 0x2000, + 0x137: 0x2000, 0x138: 0x2000, 0x139: 0x2000, 0x13a: 0x2000, + 0x13c: 0x2000, 0x13e: 0x2000, + // Block 0x5, offset 0x140 + 0x141: 0x2000, + 0x151: 0x2000, + 0x153: 0x2000, + 0x15b: 0x2000, + 0x166: 0x2000, 0x167: 0x2000, + 0x16b: 0x2000, + 0x171: 0x2000, 0x172: 0x2000, 0x173: 0x2000, + 0x178: 0x2000, + 0x17f: 0x2000, + // Block 0x6, offset 0x180 + 0x180: 0x2000, 0x181: 0x2000, 0x182: 0x2000, 0x184: 0x2000, + 0x188: 0x2000, 0x189: 0x2000, 0x18a: 0x2000, 0x18b: 0x2000, + 0x18d: 0x2000, + 0x192: 0x2000, 0x193: 0x2000, + 0x1a6: 0x2000, 0x1a7: 0x2000, + 0x1ab: 0x2000, + // Block 0x7, offset 0x1c0 + 0x1ce: 0x2000, 0x1d0: 0x2000, + 0x1d2: 0x2000, 0x1d4: 0x2000, 0x1d6: 0x2000, + 0x1d8: 0x2000, 0x1da: 0x2000, 0x1dc: 0x2000, + // Block 0x8, offset 0x200 + 0x211: 0x2000, + 0x221: 0x2000, + // Block 0x9, offset 0x240 + 0x244: 0x2000, + 0x247: 0x2000, 0x249: 0x2000, 0x24a: 0x2000, 0x24b: 0x2000, + 0x24d: 0x2000, 0x250: 0x2000, + 0x258: 0x2000, 0x259: 0x2000, 0x25a: 0x2000, 0x25b: 0x2000, 0x25d: 0x2000, + 0x25f: 0x2000, + // Block 0xa, offset 0x280 + 0x280: 0x2000, 0x281: 0x2000, 0x282: 0x2000, 0x283: 0x2000, 0x284: 0x2000, 0x285: 0x2000, + 0x286: 0x2000, 0x287: 0x2000, 0x288: 0x2000, 0x289: 0x2000, 0x28a: 0x2000, 0x28b: 0x2000, + 0x28c: 0x2000, 0x28d: 0x2000, 0x28e: 0x2000, 0x28f: 0x2000, 0x290: 0x2000, 0x291: 0x2000, + 0x292: 0x2000, 0x293: 0x2000, 0x294: 0x2000, 0x295: 0x2000, 0x296: 0x2000, 0x297: 0x2000, + 0x298: 0x2000, 0x299: 0x2000, 0x29a: 0x2000, 0x29b: 0x2000, 0x29c: 0x2000, 0x29d: 0x2000, + 0x29e: 0x2000, 0x29f: 0x2000, 0x2a0: 0x2000, 0x2a1: 0x2000, 0x2a2: 0x2000, 0x2a3: 0x2000, + 0x2a4: 0x2000, 0x2a5: 0x2000, 0x2a6: 0x2000, 0x2a7: 0x2000, 0x2a8: 0x2000, 0x2a9: 0x2000, + 0x2aa: 0x2000, 0x2ab: 0x2000, 0x2ac: 0x2000, 0x2ad: 0x2000, 0x2ae: 0x2000, 0x2af: 0x2000, + 0x2b0: 0x2000, 0x2b1: 0x2000, 0x2b2: 0x2000, 0x2b3: 0x2000, 0x2b4: 0x2000, 0x2b5: 0x2000, + 0x2b6: 0x2000, 0x2b7: 0x2000, 0x2b8: 0x2000, 0x2b9: 0x2000, 0x2ba: 0x2000, 0x2bb: 0x2000, + 0x2bc: 0x2000, 0x2bd: 0x2000, 0x2be: 0x2000, 0x2bf: 0x2000, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x2000, 0x2c1: 0x2000, 0x2c2: 0x2000, 0x2c3: 0x2000, 0x2c4: 0x2000, 0x2c5: 0x2000, + 0x2c6: 0x2000, 0x2c7: 0x2000, 0x2c8: 0x2000, 0x2c9: 0x2000, 0x2ca: 0x2000, 0x2cb: 0x2000, + 0x2cc: 0x2000, 0x2cd: 0x2000, 0x2ce: 0x2000, 0x2cf: 0x2000, 0x2d0: 0x2000, 0x2d1: 0x2000, + 0x2d2: 0x2000, 0x2d3: 0x2000, 0x2d4: 0x2000, 0x2d5: 0x2000, 0x2d6: 0x2000, 0x2d7: 0x2000, + 0x2d8: 0x2000, 0x2d9: 0x2000, 0x2da: 0x2000, 0x2db: 0x2000, 0x2dc: 0x2000, 0x2dd: 0x2000, + 0x2de: 0x2000, 0x2df: 0x2000, 0x2e0: 0x2000, 0x2e1: 0x2000, 0x2e2: 0x2000, 0x2e3: 0x2000, + 0x2e4: 0x2000, 0x2e5: 0x2000, 0x2e6: 0x2000, 0x2e7: 0x2000, 0x2e8: 0x2000, 0x2e9: 0x2000, + 0x2ea: 0x2000, 0x2eb: 0x2000, 0x2ec: 0x2000, 0x2ed: 0x2000, 0x2ee: 0x2000, 0x2ef: 0x2000, + // Block 0xc, offset 0x300 + 0x311: 0x2000, + 0x312: 0x2000, 0x313: 0x2000, 0x314: 0x2000, 0x315: 0x2000, 0x316: 0x2000, 0x317: 0x2000, + 0x318: 0x2000, 0x319: 0x2000, 0x31a: 0x2000, 0x31b: 0x2000, 0x31c: 0x2000, 0x31d: 0x2000, + 0x31e: 0x2000, 0x31f: 0x2000, 0x320: 0x2000, 0x321: 0x2000, 0x323: 0x2000, + 0x324: 0x2000, 0x325: 0x2000, 0x326: 0x2000, 0x327: 0x2000, 0x328: 0x2000, 0x329: 0x2000, + 0x331: 0x2000, 0x332: 0x2000, 0x333: 0x2000, 0x334: 0x2000, 0x335: 0x2000, + 0x336: 0x2000, 0x337: 0x2000, 0x338: 0x2000, 0x339: 0x2000, 0x33a: 0x2000, 0x33b: 0x2000, + 0x33c: 0x2000, 0x33d: 0x2000, 0x33e: 0x2000, 0x33f: 0x2000, + // Block 0xd, offset 0x340 + 0x340: 0x2000, 0x341: 0x2000, 0x343: 0x2000, 0x344: 0x2000, 0x345: 0x2000, + 0x346: 0x2000, 0x347: 0x2000, 0x348: 0x2000, 0x349: 0x2000, + // Block 0xe, offset 0x380 + 0x381: 0x2000, + 0x390: 0x2000, 0x391: 0x2000, + 0x392: 0x2000, 0x393: 0x2000, 0x394: 0x2000, 0x395: 0x2000, 0x396: 0x2000, 0x397: 0x2000, + 0x398: 0x2000, 0x399: 0x2000, 0x39a: 0x2000, 0x39b: 0x2000, 0x39c: 0x2000, 0x39d: 0x2000, + 0x39e: 0x2000, 0x39f: 0x2000, 0x3a0: 0x2000, 0x3a1: 0x2000, 0x3a2: 0x2000, 0x3a3: 0x2000, + 0x3a4: 0x2000, 0x3a5: 0x2000, 0x3a6: 0x2000, 0x3a7: 0x2000, 0x3a8: 0x2000, 0x3a9: 0x2000, + 0x3aa: 0x2000, 0x3ab: 0x2000, 0x3ac: 0x2000, 0x3ad: 0x2000, 0x3ae: 0x2000, 0x3af: 0x2000, + 0x3b0: 0x2000, 0x3b1: 0x2000, 0x3b2: 0x2000, 0x3b3: 0x2000, 0x3b4: 0x2000, 0x3b5: 0x2000, + 0x3b6: 0x2000, 0x3b7: 0x2000, 0x3b8: 0x2000, 0x3b9: 0x2000, 0x3ba: 0x2000, 0x3bb: 0x2000, + 0x3bc: 0x2000, 0x3bd: 0x2000, 0x3be: 0x2000, 0x3bf: 0x2000, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x2000, 0x3c1: 0x2000, 0x3c2: 0x2000, 0x3c3: 0x2000, 0x3c4: 0x2000, 0x3c5: 0x2000, + 0x3c6: 0x2000, 0x3c7: 0x2000, 0x3c8: 0x2000, 0x3c9: 0x2000, 0x3ca: 0x2000, 0x3cb: 0x2000, + 0x3cc: 0x2000, 0x3cd: 0x2000, 0x3ce: 0x2000, 0x3cf: 0x2000, 0x3d1: 0x2000, + // Block 0x10, offset 0x400 + 0x400: 0x4000, 0x401: 0x4000, 0x402: 0x4000, 0x403: 0x4000, 0x404: 0x4000, 0x405: 0x4000, + 0x406: 0x4000, 0x407: 0x4000, 0x408: 0x4000, 0x409: 0x4000, 0x40a: 0x4000, 0x40b: 0x4000, + 0x40c: 0x4000, 0x40d: 0x4000, 0x40e: 0x4000, 0x40f: 0x4000, 0x410: 0x4000, 0x411: 0x4000, + 0x412: 0x4000, 0x413: 0x4000, 0x414: 0x4000, 0x415: 0x4000, 0x416: 0x4000, 0x417: 0x4000, + 0x418: 0x4000, 0x419: 0x4000, 0x41a: 0x4000, 0x41b: 0x4000, 0x41c: 0x4000, 0x41d: 0x4000, + 0x41e: 0x4000, 0x41f: 0x4000, 0x420: 0x4000, 0x421: 0x4000, 0x422: 0x4000, 0x423: 0x4000, + 0x424: 0x4000, 0x425: 0x4000, 0x426: 0x4000, 0x427: 0x4000, 0x428: 0x4000, 0x429: 0x4000, + 0x42a: 0x4000, 0x42b: 0x4000, 0x42c: 0x4000, 0x42d: 0x4000, 0x42e: 0x4000, 0x42f: 0x4000, + 0x430: 0x4000, 0x431: 0x4000, 0x432: 0x4000, 0x433: 0x4000, 0x434: 0x4000, 0x435: 0x4000, + 0x436: 0x4000, 0x437: 0x4000, 0x438: 0x4000, 0x439: 0x4000, 0x43a: 0x4000, 0x43b: 0x4000, + 0x43c: 0x4000, 0x43d: 0x4000, 0x43e: 0x4000, 0x43f: 0x4000, + // Block 0x11, offset 0x440 + 0x440: 0x4000, 0x441: 0x4000, 0x442: 0x4000, 0x443: 0x4000, 0x444: 0x4000, 0x445: 0x4000, + 0x446: 0x4000, 0x447: 0x4000, 0x448: 0x4000, 0x449: 0x4000, 0x44a: 0x4000, 0x44b: 0x4000, + 0x44c: 0x4000, 0x44d: 0x4000, 0x44e: 0x4000, 0x44f: 0x4000, 0x450: 0x4000, 0x451: 0x4000, + 0x452: 0x4000, 0x453: 0x4000, 0x454: 0x4000, 0x455: 0x4000, 0x456: 0x4000, 0x457: 0x4000, + 0x458: 0x4000, 0x459: 0x4000, 0x45a: 0x4000, 0x45b: 0x4000, 0x45c: 0x4000, 0x45d: 0x4000, + 0x45e: 0x4000, 0x45f: 0x4000, + // Block 0x12, offset 0x480 + 0x490: 0x2000, + 0x493: 0x2000, 0x494: 0x2000, 0x495: 0x2000, 0x496: 0x2000, + 0x498: 0x2000, 0x499: 0x2000, 0x49c: 0x2000, 0x49d: 0x2000, + 0x4a0: 0x2000, 0x4a1: 0x2000, 0x4a2: 0x2000, + 0x4a4: 0x2000, 0x4a5: 0x2000, 0x4a6: 0x2000, 0x4a7: 0x2000, + 0x4b0: 0x2000, 0x4b2: 0x2000, 0x4b3: 0x2000, 0x4b5: 0x2000, + 0x4bb: 0x2000, + 0x4be: 0x2000, + // Block 0x13, offset 0x4c0 + 0x4f4: 0x2000, + 0x4ff: 0x2000, + // Block 0x14, offset 0x500 + 0x501: 0x2000, 0x502: 0x2000, 0x503: 0x2000, 0x504: 0x2000, + 0x529: 0xa009, + 0x52c: 0x2000, + // Block 0x15, offset 0x540 + 0x543: 0x2000, 0x545: 0x2000, + 0x549: 0x2000, + 0x553: 0x2000, 0x556: 0x2000, + 0x561: 0x2000, 0x562: 0x2000, + 0x566: 0x2000, + 0x56b: 0x2000, + // Block 0x16, offset 0x580 + 0x593: 0x2000, 0x594: 0x2000, + 0x59b: 0x2000, 0x59c: 0x2000, 0x59d: 0x2000, + 0x59e: 0x2000, 0x5a0: 0x2000, 0x5a1: 0x2000, 0x5a2: 0x2000, 0x5a3: 0x2000, + 0x5a4: 0x2000, 0x5a5: 0x2000, 0x5a6: 0x2000, 0x5a7: 0x2000, 0x5a8: 0x2000, 0x5a9: 0x2000, + 0x5aa: 0x2000, 0x5ab: 0x2000, + 0x5b0: 0x2000, 0x5b1: 0x2000, 0x5b2: 0x2000, 0x5b3: 0x2000, 0x5b4: 0x2000, 0x5b5: 0x2000, + 0x5b6: 0x2000, 0x5b7: 0x2000, 0x5b8: 0x2000, 0x5b9: 0x2000, + // Block 0x17, offset 0x5c0 + 0x5c9: 0x2000, + 0x5d0: 0x200a, 0x5d1: 0x200b, + 0x5d2: 0x200a, 0x5d3: 0x200c, 0x5d4: 0x2000, 0x5d5: 0x2000, 0x5d6: 0x2000, 0x5d7: 0x2000, + 0x5d8: 0x2000, 0x5d9: 0x2000, + 0x5f8: 0x2000, 0x5f9: 0x2000, + // Block 0x18, offset 0x600 + 0x612: 0x2000, 0x614: 0x2000, + 0x627: 0x2000, + // Block 0x19, offset 0x640 + 0x640: 0x2000, 0x642: 0x2000, 0x643: 0x2000, + 0x647: 0x2000, 0x648: 0x2000, 0x64b: 0x2000, + 0x64f: 0x2000, 0x651: 0x2000, + 0x655: 0x2000, + 0x65a: 0x2000, 0x65d: 0x2000, + 0x65e: 0x2000, 0x65f: 0x2000, 0x660: 0x2000, 0x663: 0x2000, + 0x665: 0x2000, 0x667: 0x2000, 0x668: 0x2000, 0x669: 0x2000, + 0x66a: 0x2000, 0x66b: 0x2000, 0x66c: 0x2000, 0x66e: 0x2000, + 0x674: 0x2000, 0x675: 0x2000, + 0x676: 0x2000, 0x677: 0x2000, + 0x67c: 0x2000, 0x67d: 0x2000, + // Block 0x1a, offset 0x680 + 0x688: 0x2000, + 0x68c: 0x2000, + 0x692: 0x2000, + 0x6a0: 0x2000, 0x6a1: 0x2000, + 0x6a4: 0x2000, 0x6a5: 0x2000, 0x6a6: 0x2000, 0x6a7: 0x2000, + 0x6aa: 0x2000, 0x6ab: 0x2000, 0x6ae: 0x2000, 0x6af: 0x2000, + // Block 0x1b, offset 0x6c0 + 0x6c2: 0x2000, 0x6c3: 0x2000, + 0x6c6: 0x2000, 0x6c7: 0x2000, + 0x6d5: 0x2000, + 0x6d9: 0x2000, + 0x6e5: 0x2000, + 0x6ff: 0x2000, + // Block 0x1c, offset 0x700 + 0x712: 0x2000, + 0x71a: 0x4000, 0x71b: 0x4000, + 0x729: 0x4000, + 0x72a: 0x4000, + // Block 0x1d, offset 0x740 + 0x769: 0x4000, + 0x76a: 0x4000, 0x76b: 0x4000, 0x76c: 0x4000, + 0x770: 0x4000, 0x773: 0x4000, + // Block 0x1e, offset 0x780 + 0x7a0: 0x2000, 0x7a1: 0x2000, 0x7a2: 0x2000, 0x7a3: 0x2000, + 0x7a4: 0x2000, 0x7a5: 0x2000, 0x7a6: 0x2000, 0x7a7: 0x2000, 0x7a8: 0x2000, 0x7a9: 0x2000, + 0x7aa: 0x2000, 0x7ab: 0x2000, 0x7ac: 0x2000, 0x7ad: 0x2000, 0x7ae: 0x2000, 0x7af: 0x2000, + 0x7b0: 0x2000, 0x7b1: 0x2000, 0x7b2: 0x2000, 0x7b3: 0x2000, 0x7b4: 0x2000, 0x7b5: 0x2000, + 0x7b6: 0x2000, 0x7b7: 0x2000, 0x7b8: 0x2000, 0x7b9: 0x2000, 0x7ba: 0x2000, 0x7bb: 0x2000, + 0x7bc: 0x2000, 0x7bd: 0x2000, 0x7be: 0x2000, 0x7bf: 0x2000, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x2000, 0x7c1: 0x2000, 0x7c2: 0x2000, 0x7c3: 0x2000, 0x7c4: 0x2000, 0x7c5: 0x2000, + 0x7c6: 0x2000, 0x7c7: 0x2000, 0x7c8: 0x2000, 0x7c9: 0x2000, 0x7ca: 0x2000, 0x7cb: 0x2000, + 0x7cc: 0x2000, 0x7cd: 0x2000, 0x7ce: 0x2000, 0x7cf: 0x2000, 0x7d0: 0x2000, 0x7d1: 0x2000, + 0x7d2: 0x2000, 0x7d3: 0x2000, 0x7d4: 0x2000, 0x7d5: 0x2000, 0x7d6: 0x2000, 0x7d7: 0x2000, + 0x7d8: 0x2000, 0x7d9: 0x2000, 0x7da: 0x2000, 0x7db: 0x2000, 0x7dc: 0x2000, 0x7dd: 0x2000, + 0x7de: 0x2000, 0x7df: 0x2000, 0x7e0: 0x2000, 0x7e1: 0x2000, 0x7e2: 0x2000, 0x7e3: 0x2000, + 0x7e4: 0x2000, 0x7e5: 0x2000, 0x7e6: 0x2000, 0x7e7: 0x2000, 0x7e8: 0x2000, 0x7e9: 0x2000, + 0x7eb: 0x2000, 0x7ec: 0x2000, 0x7ed: 0x2000, 0x7ee: 0x2000, 0x7ef: 0x2000, + 0x7f0: 0x2000, 0x7f1: 0x2000, 0x7f2: 0x2000, 0x7f3: 0x2000, 0x7f4: 0x2000, 0x7f5: 0x2000, + 0x7f6: 0x2000, 0x7f7: 0x2000, 0x7f8: 0x2000, 0x7f9: 0x2000, 0x7fa: 0x2000, 0x7fb: 0x2000, + 0x7fc: 0x2000, 0x7fd: 0x2000, 0x7fe: 0x2000, 0x7ff: 0x2000, + // Block 0x20, offset 0x800 + 0x800: 0x2000, 0x801: 0x2000, 0x802: 0x200d, 0x803: 0x2000, 0x804: 0x2000, 0x805: 0x2000, + 0x806: 0x2000, 0x807: 0x2000, 0x808: 0x2000, 0x809: 0x2000, 0x80a: 0x2000, 0x80b: 0x2000, + 0x80c: 0x2000, 0x80d: 0x2000, 0x80e: 0x2000, 0x80f: 0x2000, 0x810: 0x2000, 0x811: 0x2000, + 0x812: 0x2000, 0x813: 0x2000, 0x814: 0x2000, 0x815: 0x2000, 0x816: 0x2000, 0x817: 0x2000, + 0x818: 0x2000, 0x819: 0x2000, 0x81a: 0x2000, 0x81b: 0x2000, 0x81c: 0x2000, 0x81d: 0x2000, + 0x81e: 0x2000, 0x81f: 0x2000, 0x820: 0x2000, 0x821: 0x2000, 0x822: 0x2000, 0x823: 0x2000, + 0x824: 0x2000, 0x825: 0x2000, 0x826: 0x2000, 0x827: 0x2000, 0x828: 0x2000, 0x829: 0x2000, + 0x82a: 0x2000, 0x82b: 0x2000, 0x82c: 0x2000, 0x82d: 0x2000, 0x82e: 0x2000, 0x82f: 0x2000, + 0x830: 0x2000, 0x831: 0x2000, 0x832: 0x2000, 0x833: 0x2000, 0x834: 0x2000, 0x835: 0x2000, + 0x836: 0x2000, 0x837: 0x2000, 0x838: 0x2000, 0x839: 0x2000, 0x83a: 0x2000, 0x83b: 0x2000, + 0x83c: 0x2000, 0x83d: 0x2000, 0x83e: 0x2000, 0x83f: 0x2000, + // Block 0x21, offset 0x840 + 0x840: 0x2000, 0x841: 0x2000, 0x842: 0x2000, 0x843: 0x2000, 0x844: 0x2000, 0x845: 0x2000, + 0x846: 0x2000, 0x847: 0x2000, 0x848: 0x2000, 0x849: 0x2000, 0x84a: 0x2000, 0x84b: 0x2000, + 0x850: 0x2000, 0x851: 0x2000, + 0x852: 0x2000, 0x853: 0x2000, 0x854: 0x2000, 0x855: 0x2000, 0x856: 0x2000, 0x857: 0x2000, + 0x858: 0x2000, 0x859: 0x2000, 0x85a: 0x2000, 0x85b: 0x2000, 0x85c: 0x2000, 0x85d: 0x2000, + 0x85e: 0x2000, 0x85f: 0x2000, 0x860: 0x2000, 0x861: 0x2000, 0x862: 0x2000, 0x863: 0x2000, + 0x864: 0x2000, 0x865: 0x2000, 0x866: 0x2000, 0x867: 0x2000, 0x868: 0x2000, 0x869: 0x2000, + 0x86a: 0x2000, 0x86b: 0x2000, 0x86c: 0x2000, 0x86d: 0x2000, 0x86e: 0x2000, 0x86f: 0x2000, + 0x870: 0x2000, 0x871: 0x2000, 0x872: 0x2000, 0x873: 0x2000, + // Block 0x22, offset 0x880 + 0x880: 0x2000, 0x881: 0x2000, 0x882: 0x2000, 0x883: 0x2000, 0x884: 0x2000, 0x885: 0x2000, + 0x886: 0x2000, 0x887: 0x2000, 0x888: 0x2000, 0x889: 0x2000, 0x88a: 0x2000, 0x88b: 0x2000, + 0x88c: 0x2000, 0x88d: 0x2000, 0x88e: 0x2000, 0x88f: 0x2000, + 0x892: 0x2000, 0x893: 0x2000, 0x894: 0x2000, 0x895: 0x2000, + 0x8a0: 0x200e, 0x8a1: 0x2000, 0x8a3: 0x2000, + 0x8a4: 0x2000, 0x8a5: 0x2000, 0x8a6: 0x2000, 0x8a7: 0x2000, 0x8a8: 0x2000, 0x8a9: 0x2000, + 0x8b2: 0x2000, 0x8b3: 0x2000, + 0x8b6: 0x2000, 0x8b7: 0x2000, + 0x8bc: 0x2000, 0x8bd: 0x2000, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x2000, 0x8c1: 0x2000, + 0x8c6: 0x2000, 0x8c7: 0x2000, 0x8c8: 0x2000, 0x8cb: 0x200f, + 0x8ce: 0x2000, 0x8cf: 0x2000, 0x8d0: 0x2000, 0x8d1: 0x2000, + 0x8e2: 0x2000, 0x8e3: 0x2000, + 0x8e4: 0x2000, 0x8e5: 0x2000, + 0x8ef: 0x2000, + 0x8fd: 0x4000, 0x8fe: 0x4000, + // Block 0x24, offset 0x900 + 0x905: 0x2000, + 0x906: 0x2000, 0x909: 0x2000, + 0x90e: 0x2000, 0x90f: 0x2000, + 0x914: 0x4000, 0x915: 0x4000, + 0x91c: 0x2000, + 0x91e: 0x2000, + // Block 0x25, offset 0x940 + 0x940: 0x2000, 0x942: 0x2000, + 0x948: 0x4000, 0x949: 0x4000, 0x94a: 0x4000, 0x94b: 0x4000, + 0x94c: 0x4000, 0x94d: 0x4000, 0x94e: 0x4000, 0x94f: 0x4000, 0x950: 0x4000, 0x951: 0x4000, + 0x952: 0x4000, 0x953: 0x4000, + 0x960: 0x2000, 0x961: 0x2000, 0x963: 0x2000, + 0x964: 0x2000, 0x965: 0x2000, 0x967: 0x2000, 0x968: 0x2000, 0x969: 0x2000, + 0x96a: 0x2000, 0x96c: 0x2000, 0x96d: 0x2000, 0x96f: 0x2000, + 0x97f: 0x4000, + // Block 0x26, offset 0x980 + 0x993: 0x4000, + 0x99e: 0x2000, 0x99f: 0x2000, 0x9a1: 0x4000, + 0x9aa: 0x4000, 0x9ab: 0x4000, + 0x9bd: 0x4000, 0x9be: 0x4000, 0x9bf: 0x2000, + // Block 0x27, offset 0x9c0 + 0x9c4: 0x4000, 0x9c5: 0x4000, + 0x9c6: 0x2000, 0x9c7: 0x2000, 0x9c8: 0x2000, 0x9c9: 0x2000, 0x9ca: 0x2000, 0x9cb: 0x2000, + 0x9cc: 0x2000, 0x9cd: 0x2000, 0x9ce: 0x4000, 0x9cf: 0x2000, 0x9d0: 0x2000, 0x9d1: 0x2000, + 0x9d2: 0x2000, 0x9d3: 0x2000, 0x9d4: 0x4000, 0x9d5: 0x2000, 0x9d6: 0x2000, 0x9d7: 0x2000, + 0x9d8: 0x2000, 0x9d9: 0x2000, 0x9da: 0x2000, 0x9db: 0x2000, 0x9dc: 0x2000, 0x9dd: 0x2000, + 0x9de: 0x2000, 0x9df: 0x2000, 0x9e0: 0x2000, 0x9e1: 0x2000, 0x9e3: 0x2000, + 0x9e8: 0x2000, 0x9e9: 0x2000, + 0x9ea: 0x4000, 0x9eb: 0x2000, 0x9ec: 0x2000, 0x9ed: 0x2000, 0x9ee: 0x2000, 0x9ef: 0x2000, + 0x9f0: 0x2000, 0x9f1: 0x2000, 0x9f2: 0x4000, 0x9f3: 0x4000, 0x9f4: 0x2000, 0x9f5: 0x4000, + 0x9f6: 0x2000, 0x9f7: 0x2000, 0x9f8: 0x2000, 0x9f9: 0x2000, 0x9fa: 0x4000, 0x9fb: 0x2000, + 0x9fc: 0x2000, 0x9fd: 0x4000, 0x9fe: 0x2000, 0x9ff: 0x2000, + // Block 0x28, offset 0xa00 + 0xa05: 0x4000, + 0xa0a: 0x4000, 0xa0b: 0x4000, + 0xa28: 0x4000, + 0xa3d: 0x2000, + // Block 0x29, offset 0xa40 + 0xa4c: 0x4000, 0xa4e: 0x4000, + 0xa53: 0x4000, 0xa54: 0x4000, 0xa55: 0x4000, 0xa57: 0x4000, + 0xa76: 0x2000, 0xa77: 0x2000, 0xa78: 0x2000, 0xa79: 0x2000, 0xa7a: 0x2000, 0xa7b: 0x2000, + 0xa7c: 0x2000, 0xa7d: 0x2000, 0xa7e: 0x2000, 0xa7f: 0x2000, + // Block 0x2a, offset 0xa80 + 0xa95: 0x4000, 0xa96: 0x4000, 0xa97: 0x4000, + 0xab0: 0x4000, + 0xabf: 0x4000, + // Block 0x2b, offset 0xac0 + 0xae6: 0x6000, 0xae7: 0x6000, 0xae8: 0x6000, 0xae9: 0x6000, + 0xaea: 0x6000, 0xaeb: 0x6000, 0xaec: 0x6000, 0xaed: 0x6000, + // Block 0x2c, offset 0xb00 + 0xb05: 0x6010, + 0xb06: 0x6011, + // Block 0x2d, offset 0xb40 + 0xb5b: 0x4000, 0xb5c: 0x4000, + // Block 0x2e, offset 0xb80 + 0xb90: 0x4000, + 0xb95: 0x4000, 0xb96: 0x2000, 0xb97: 0x2000, + 0xb98: 0x2000, 0xb99: 0x2000, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x4000, 0xbc1: 0x4000, 0xbc2: 0x4000, 0xbc3: 0x4000, 0xbc4: 0x4000, 0xbc5: 0x4000, + 0xbc6: 0x4000, 0xbc7: 0x4000, 0xbc8: 0x4000, 0xbc9: 0x4000, 0xbca: 0x4000, 0xbcb: 0x4000, + 0xbcc: 0x4000, 0xbcd: 0x4000, 0xbce: 0x4000, 0xbcf: 0x4000, 0xbd0: 0x4000, 0xbd1: 0x4000, + 0xbd2: 0x4000, 0xbd3: 0x4000, 0xbd4: 0x4000, 0xbd5: 0x4000, 0xbd6: 0x4000, 0xbd7: 0x4000, + 0xbd8: 0x4000, 0xbd9: 0x4000, 0xbdb: 0x4000, 0xbdc: 0x4000, 0xbdd: 0x4000, + 0xbde: 0x4000, 0xbdf: 0x4000, 0xbe0: 0x4000, 0xbe1: 0x4000, 0xbe2: 0x4000, 0xbe3: 0x4000, + 0xbe4: 0x4000, 0xbe5: 0x4000, 0xbe6: 0x4000, 0xbe7: 0x4000, 0xbe8: 0x4000, 0xbe9: 0x4000, + 0xbea: 0x4000, 0xbeb: 0x4000, 0xbec: 0x4000, 0xbed: 0x4000, 0xbee: 0x4000, 0xbef: 0x4000, + 0xbf0: 0x4000, 0xbf1: 0x4000, 0xbf2: 0x4000, 0xbf3: 0x4000, 0xbf4: 0x4000, 0xbf5: 0x4000, + 0xbf6: 0x4000, 0xbf7: 0x4000, 0xbf8: 0x4000, 0xbf9: 0x4000, 0xbfa: 0x4000, 0xbfb: 0x4000, + 0xbfc: 0x4000, 0xbfd: 0x4000, 0xbfe: 0x4000, 0xbff: 0x4000, + // Block 0x30, offset 0xc00 + 0xc00: 0x4000, 0xc01: 0x4000, 0xc02: 0x4000, 0xc03: 0x4000, 0xc04: 0x4000, 0xc05: 0x4000, + 0xc06: 0x4000, 0xc07: 0x4000, 0xc08: 0x4000, 0xc09: 0x4000, 0xc0a: 0x4000, 0xc0b: 0x4000, + 0xc0c: 0x4000, 0xc0d: 0x4000, 0xc0e: 0x4000, 0xc0f: 0x4000, 0xc10: 0x4000, 0xc11: 0x4000, + 0xc12: 0x4000, 0xc13: 0x4000, 0xc14: 0x4000, 0xc15: 0x4000, 0xc16: 0x4000, 0xc17: 0x4000, + 0xc18: 0x4000, 0xc19: 0x4000, 0xc1a: 0x4000, 0xc1b: 0x4000, 0xc1c: 0x4000, 0xc1d: 0x4000, + 0xc1e: 0x4000, 0xc1f: 0x4000, 0xc20: 0x4000, 0xc21: 0x4000, 0xc22: 0x4000, 0xc23: 0x4000, + 0xc24: 0x4000, 0xc25: 0x4000, 0xc26: 0x4000, 0xc27: 0x4000, 0xc28: 0x4000, 0xc29: 0x4000, + 0xc2a: 0x4000, 0xc2b: 0x4000, 0xc2c: 0x4000, 0xc2d: 0x4000, 0xc2e: 0x4000, 0xc2f: 0x4000, + 0xc30: 0x4000, 0xc31: 0x4000, 0xc32: 0x4000, 0xc33: 0x4000, + // Block 0x31, offset 0xc40 + 0xc40: 0x4000, 0xc41: 0x4000, 0xc42: 0x4000, 0xc43: 0x4000, 0xc44: 0x4000, 0xc45: 0x4000, + 0xc46: 0x4000, 0xc47: 0x4000, 0xc48: 0x4000, 0xc49: 0x4000, 0xc4a: 0x4000, 0xc4b: 0x4000, + 0xc4c: 0x4000, 0xc4d: 0x4000, 0xc4e: 0x4000, 0xc4f: 0x4000, 0xc50: 0x4000, 0xc51: 0x4000, + 0xc52: 0x4000, 0xc53: 0x4000, 0xc54: 0x4000, 0xc55: 0x4000, + 0xc70: 0x4000, 0xc71: 0x4000, 0xc72: 0x4000, 0xc73: 0x4000, 0xc74: 0x4000, 0xc75: 0x4000, + 0xc76: 0x4000, 0xc77: 0x4000, 0xc78: 0x4000, 0xc79: 0x4000, 0xc7a: 0x4000, 0xc7b: 0x4000, + // Block 0x32, offset 0xc80 + 0xc80: 0x9012, 0xc81: 0x4013, 0xc82: 0x4014, 0xc83: 0x4000, 0xc84: 0x4000, 0xc85: 0x4000, + 0xc86: 0x4000, 0xc87: 0x4000, 0xc88: 0x4000, 0xc89: 0x4000, 0xc8a: 0x4000, 0xc8b: 0x4000, + 0xc8c: 0x4015, 0xc8d: 0x4015, 0xc8e: 0x4000, 0xc8f: 0x4000, 0xc90: 0x4000, 0xc91: 0x4000, + 0xc92: 0x4000, 0xc93: 0x4000, 0xc94: 0x4000, 0xc95: 0x4000, 0xc96: 0x4000, 0xc97: 0x4000, + 0xc98: 0x4000, 0xc99: 0x4000, 0xc9a: 0x4000, 0xc9b: 0x4000, 0xc9c: 0x4000, 0xc9d: 0x4000, + 0xc9e: 0x4000, 0xc9f: 0x4000, 0xca0: 0x4000, 0xca1: 0x4000, 0xca2: 0x4000, 0xca3: 0x4000, + 0xca4: 0x4000, 0xca5: 0x4000, 0xca6: 0x4000, 0xca7: 0x4000, 0xca8: 0x4000, 0xca9: 0x4000, + 0xcaa: 0x4000, 0xcab: 0x4000, 0xcac: 0x4000, 0xcad: 0x4000, 0xcae: 0x4000, 0xcaf: 0x4000, + 0xcb0: 0x4000, 0xcb1: 0x4000, 0xcb2: 0x4000, 0xcb3: 0x4000, 0xcb4: 0x4000, 0xcb5: 0x4000, + 0xcb6: 0x4000, 0xcb7: 0x4000, 0xcb8: 0x4000, 0xcb9: 0x4000, 0xcba: 0x4000, 0xcbb: 0x4000, + 0xcbc: 0x4000, 0xcbd: 0x4000, 0xcbe: 0x4000, + // Block 0x33, offset 0xcc0 + 0xcc1: 0x4000, 0xcc2: 0x4000, 0xcc3: 0x4000, 0xcc4: 0x4000, 0xcc5: 0x4000, + 0xcc6: 0x4000, 0xcc7: 0x4000, 0xcc8: 0x4000, 0xcc9: 0x4000, 0xcca: 0x4000, 0xccb: 0x4000, + 0xccc: 0x4000, 0xccd: 0x4000, 0xcce: 0x4000, 0xccf: 0x4000, 0xcd0: 0x4000, 0xcd1: 0x4000, + 0xcd2: 0x4000, 0xcd3: 0x4000, 0xcd4: 0x4000, 0xcd5: 0x4000, 0xcd6: 0x4000, 0xcd7: 0x4000, + 0xcd8: 0x4000, 0xcd9: 0x4000, 0xcda: 0x4000, 0xcdb: 0x4000, 0xcdc: 0x4000, 0xcdd: 0x4000, + 0xcde: 0x4000, 0xcdf: 0x4000, 0xce0: 0x4000, 0xce1: 0x4000, 0xce2: 0x4000, 0xce3: 0x4000, + 0xce4: 0x4000, 0xce5: 0x4000, 0xce6: 0x4000, 0xce7: 0x4000, 0xce8: 0x4000, 0xce9: 0x4000, + 0xcea: 0x4000, 0xceb: 0x4000, 0xcec: 0x4000, 0xced: 0x4000, 0xcee: 0x4000, 0xcef: 0x4000, + 0xcf0: 0x4000, 0xcf1: 0x4000, 0xcf2: 0x4000, 0xcf3: 0x4000, 0xcf4: 0x4000, 0xcf5: 0x4000, + 0xcf6: 0x4000, 0xcf7: 0x4000, 0xcf8: 0x4000, 0xcf9: 0x4000, 0xcfa: 0x4000, 0xcfb: 0x4000, + 0xcfc: 0x4000, 0xcfd: 0x4000, 0xcfe: 0x4000, 0xcff: 0x4000, + // Block 0x34, offset 0xd00 + 0xd00: 0x4000, 0xd01: 0x4000, 0xd02: 0x4000, 0xd03: 0x4000, 0xd04: 0x4000, 0xd05: 0x4000, + 0xd06: 0x4000, 0xd07: 0x4000, 0xd08: 0x4000, 0xd09: 0x4000, 0xd0a: 0x4000, 0xd0b: 0x4000, + 0xd0c: 0x4000, 0xd0d: 0x4000, 0xd0e: 0x4000, 0xd0f: 0x4000, 0xd10: 0x4000, 0xd11: 0x4000, + 0xd12: 0x4000, 0xd13: 0x4000, 0xd14: 0x4000, 0xd15: 0x4000, 0xd16: 0x4000, + 0xd19: 0x4016, 0xd1a: 0x4017, 0xd1b: 0x4000, 0xd1c: 0x4000, 0xd1d: 0x4000, + 0xd1e: 0x4000, 0xd1f: 0x4000, 0xd20: 0x4000, 0xd21: 0x4018, 0xd22: 0x4019, 0xd23: 0x401a, + 0xd24: 0x401b, 0xd25: 0x401c, 0xd26: 0x401d, 0xd27: 0x401e, 0xd28: 0x401f, 0xd29: 0x4020, + 0xd2a: 0x4021, 0xd2b: 0x4022, 0xd2c: 0x4000, 0xd2d: 0x4010, 0xd2e: 0x4000, 0xd2f: 0x4023, + 0xd30: 0x4000, 0xd31: 0x4024, 0xd32: 0x4000, 0xd33: 0x4025, 0xd34: 0x4000, 0xd35: 0x4026, + 0xd36: 0x4000, 0xd37: 0x401a, 0xd38: 0x4000, 0xd39: 0x4027, 0xd3a: 0x4000, 0xd3b: 0x4028, + 0xd3c: 0x4000, 0xd3d: 0x4020, 0xd3e: 0x4000, 0xd3f: 0x4029, + // Block 0x35, offset 0xd40 + 0xd40: 0x4000, 0xd41: 0x402a, 0xd42: 0x4000, 0xd43: 0x402b, 0xd44: 0x402c, 0xd45: 0x4000, + 0xd46: 0x4017, 0xd47: 0x4000, 0xd48: 0x402d, 0xd49: 0x4000, 0xd4a: 0x402e, 0xd4b: 0x402f, + 0xd4c: 0x4030, 0xd4d: 0x4017, 0xd4e: 0x4016, 0xd4f: 0x4017, 0xd50: 0x4000, 0xd51: 0x4000, + 0xd52: 0x4031, 0xd53: 0x4000, 0xd54: 0x4000, 0xd55: 0x4031, 0xd56: 0x4000, 0xd57: 0x4000, + 0xd58: 0x4032, 0xd59: 0x4000, 0xd5a: 0x4000, 0xd5b: 0x4032, 0xd5c: 0x4000, 0xd5d: 0x4000, + 0xd5e: 0x4033, 0xd5f: 0x402e, 0xd60: 0x4034, 0xd61: 0x4035, 0xd62: 0x4034, 0xd63: 0x4036, + 0xd64: 0x4037, 0xd65: 0x4024, 0xd66: 0x4035, 0xd67: 0x4025, 0xd68: 0x4038, 0xd69: 0x4038, + 0xd6a: 0x4039, 0xd6b: 0x4039, 0xd6c: 0x403a, 0xd6d: 0x403a, 0xd6e: 0x4000, 0xd6f: 0x4035, + 0xd70: 0x4000, 0xd71: 0x4000, 0xd72: 0x403b, 0xd73: 0x403c, 0xd74: 0x4000, 0xd75: 0x4000, + 0xd76: 0x4000, 0xd77: 0x4000, 0xd78: 0x4000, 0xd79: 0x4000, 0xd7a: 0x4000, 0xd7b: 0x403d, + 0xd7c: 0x401c, 0xd7d: 0x4000, 0xd7e: 0x4000, 0xd7f: 0x4000, + // Block 0x36, offset 0xd80 + 0xd85: 0x4000, + 0xd86: 0x4000, 0xd87: 0x4000, 0xd88: 0x4000, 0xd89: 0x4000, 0xd8a: 0x4000, 0xd8b: 0x4000, + 0xd8c: 0x4000, 0xd8d: 0x4000, 0xd8e: 0x4000, 0xd8f: 0x4000, 0xd90: 0x4000, 0xd91: 0x4000, + 0xd92: 0x4000, 0xd93: 0x4000, 0xd94: 0x4000, 0xd95: 0x4000, 0xd96: 0x4000, 0xd97: 0x4000, + 0xd98: 0x4000, 0xd99: 0x4000, 0xd9a: 0x4000, 0xd9b: 0x4000, 0xd9c: 0x4000, 0xd9d: 0x4000, + 0xd9e: 0x4000, 0xd9f: 0x4000, 0xda0: 0x4000, 0xda1: 0x4000, 0xda2: 0x4000, 0xda3: 0x4000, + 0xda4: 0x4000, 0xda5: 0x4000, 0xda6: 0x4000, 0xda7: 0x4000, 0xda8: 0x4000, 0xda9: 0x4000, + 0xdaa: 0x4000, 0xdab: 0x4000, 0xdac: 0x4000, 0xdad: 0x4000, 0xdae: 0x4000, 0xdaf: 0x4000, + 0xdb1: 0x403e, 0xdb2: 0x403e, 0xdb3: 0x403e, 0xdb4: 0x403e, 0xdb5: 0x403e, + 0xdb6: 0x403e, 0xdb7: 0x403e, 0xdb8: 0x403e, 0xdb9: 0x403e, 0xdba: 0x403e, 0xdbb: 0x403e, + 0xdbc: 0x403e, 0xdbd: 0x403e, 0xdbe: 0x403e, 0xdbf: 0x403e, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x4037, 0xdc1: 0x4037, 0xdc2: 0x4037, 0xdc3: 0x4037, 0xdc4: 0x4037, 0xdc5: 0x4037, + 0xdc6: 0x4037, 0xdc7: 0x4037, 0xdc8: 0x4037, 0xdc9: 0x4037, 0xdca: 0x4037, 0xdcb: 0x4037, + 0xdcc: 0x4037, 0xdcd: 0x4037, 0xdce: 0x4037, 0xdcf: 0x400e, 0xdd0: 0x403f, 0xdd1: 0x4040, + 0xdd2: 0x4041, 0xdd3: 0x4040, 0xdd4: 0x403f, 0xdd5: 0x4042, 0xdd6: 0x4043, 0xdd7: 0x4044, + 0xdd8: 0x4040, 0xdd9: 0x4041, 0xdda: 0x4040, 0xddb: 0x4045, 0xddc: 0x4009, 0xddd: 0x4045, + 0xdde: 0x4046, 0xddf: 0x4045, 0xde0: 0x4047, 0xde1: 0x400b, 0xde2: 0x400a, 0xde3: 0x400c, + 0xde4: 0x4048, 0xde5: 0x4000, 0xde6: 0x4000, 0xde7: 0x4000, 0xde8: 0x4000, 0xde9: 0x4000, + 0xdea: 0x4000, 0xdeb: 0x4000, 0xdec: 0x4000, 0xded: 0x4000, 0xdee: 0x4000, 0xdef: 0x4000, + 0xdf0: 0x4000, 0xdf1: 0x4000, 0xdf2: 0x4000, 0xdf3: 0x4000, 0xdf4: 0x4000, 0xdf5: 0x4000, + 0xdf6: 0x4000, 0xdf7: 0x4000, 0xdf8: 0x4000, 0xdf9: 0x4000, 0xdfa: 0x4000, 0xdfb: 0x4000, + 0xdfc: 0x4000, 0xdfd: 0x4000, 0xdfe: 0x4000, 0xdff: 0x4000, + // Block 0x38, offset 0xe00 + 0xe00: 0x4000, 0xe01: 0x4000, 0xe02: 0x4000, 0xe03: 0x4000, 0xe04: 0x4000, 0xe05: 0x4000, + 0xe06: 0x4000, 0xe07: 0x4000, 0xe08: 0x4000, 0xe09: 0x4000, 0xe0a: 0x4000, 0xe0b: 0x4000, + 0xe0c: 0x4000, 0xe0d: 0x4000, 0xe0e: 0x4000, 0xe10: 0x4000, 0xe11: 0x4000, + 0xe12: 0x4000, 0xe13: 0x4000, 0xe14: 0x4000, 0xe15: 0x4000, 0xe16: 0x4000, 0xe17: 0x4000, + 0xe18: 0x4000, 0xe19: 0x4000, 0xe1a: 0x4000, 0xe1b: 0x4000, 0xe1c: 0x4000, 0xe1d: 0x4000, + 0xe1e: 0x4000, 0xe1f: 0x4000, 0xe20: 0x4000, 0xe21: 0x4000, 0xe22: 0x4000, 0xe23: 0x4000, + 0xe24: 0x4000, 0xe25: 0x4000, 0xe26: 0x4000, 0xe27: 0x4000, 0xe28: 0x4000, 0xe29: 0x4000, + 0xe2a: 0x4000, 0xe2b: 0x4000, 0xe2c: 0x4000, 0xe2d: 0x4000, 0xe2e: 0x4000, 0xe2f: 0x4000, + 0xe30: 0x4000, 0xe31: 0x4000, 0xe32: 0x4000, 0xe33: 0x4000, 0xe34: 0x4000, 0xe35: 0x4000, + 0xe36: 0x4000, 0xe37: 0x4000, 0xe38: 0x4000, 0xe39: 0x4000, 0xe3a: 0x4000, + // Block 0x39, offset 0xe40 + 0xe40: 0x4000, 0xe41: 0x4000, 0xe42: 0x4000, 0xe43: 0x4000, 0xe44: 0x4000, 0xe45: 0x4000, + 0xe46: 0x4000, 0xe47: 0x4000, 0xe48: 0x4000, 0xe49: 0x4000, 0xe4a: 0x4000, 0xe4b: 0x4000, + 0xe4c: 0x4000, 0xe4d: 0x4000, 0xe4e: 0x4000, 0xe4f: 0x4000, 0xe50: 0x4000, 0xe51: 0x4000, + 0xe52: 0x4000, 0xe53: 0x4000, 0xe54: 0x4000, 0xe55: 0x4000, 0xe56: 0x4000, 0xe57: 0x4000, + 0xe58: 0x4000, 0xe59: 0x4000, 0xe5a: 0x4000, 0xe5b: 0x4000, 0xe5c: 0x4000, 0xe5d: 0x4000, + 0xe5e: 0x4000, 0xe5f: 0x4000, 0xe60: 0x4000, 0xe61: 0x4000, 0xe62: 0x4000, 0xe63: 0x4000, + 0xe70: 0x4000, 0xe71: 0x4000, 0xe72: 0x4000, 0xe73: 0x4000, 0xe74: 0x4000, 0xe75: 0x4000, + 0xe76: 0x4000, 0xe77: 0x4000, 0xe78: 0x4000, 0xe79: 0x4000, 0xe7a: 0x4000, 0xe7b: 0x4000, + 0xe7c: 0x4000, 0xe7d: 0x4000, 0xe7e: 0x4000, 0xe7f: 0x4000, + // Block 0x3a, offset 0xe80 + 0xe80: 0x4000, 0xe81: 0x4000, 0xe82: 0x4000, 0xe83: 0x4000, 0xe84: 0x4000, 0xe85: 0x4000, + 0xe86: 0x4000, 0xe87: 0x4000, 0xe88: 0x4000, 0xe89: 0x4000, 0xe8a: 0x4000, 0xe8b: 0x4000, + 0xe8c: 0x4000, 0xe8d: 0x4000, 0xe8e: 0x4000, 0xe8f: 0x4000, 0xe90: 0x4000, 0xe91: 0x4000, + 0xe92: 0x4000, 0xe93: 0x4000, 0xe94: 0x4000, 0xe95: 0x4000, 0xe96: 0x4000, 0xe97: 0x4000, + 0xe98: 0x4000, 0xe99: 0x4000, 0xe9a: 0x4000, 0xe9b: 0x4000, 0xe9c: 0x4000, 0xe9d: 0x4000, + 0xe9e: 0x4000, 0xea0: 0x4000, 0xea1: 0x4000, 0xea2: 0x4000, 0xea3: 0x4000, + 0xea4: 0x4000, 0xea5: 0x4000, 0xea6: 0x4000, 0xea7: 0x4000, 0xea8: 0x4000, 0xea9: 0x4000, + 0xeaa: 0x4000, 0xeab: 0x4000, 0xeac: 0x4000, 0xead: 0x4000, 0xeae: 0x4000, 0xeaf: 0x4000, + 0xeb0: 0x4000, 0xeb1: 0x4000, 0xeb2: 0x4000, 0xeb3: 0x4000, 0xeb4: 0x4000, 0xeb5: 0x4000, + 0xeb6: 0x4000, 0xeb7: 0x4000, 0xeb8: 0x4000, 0xeb9: 0x4000, 0xeba: 0x4000, 0xebb: 0x4000, + 0xebc: 0x4000, 0xebd: 0x4000, 0xebe: 0x4000, 0xebf: 0x4000, + // Block 0x3b, offset 0xec0 + 0xec0: 0x4000, 0xec1: 0x4000, 0xec2: 0x4000, 0xec3: 0x4000, 0xec4: 0x4000, 0xec5: 0x4000, + 0xec6: 0x4000, 0xec7: 0x4000, 0xec8: 0x2000, 0xec9: 0x2000, 0xeca: 0x2000, 0xecb: 0x2000, + 0xecc: 0x2000, 0xecd: 0x2000, 0xece: 0x2000, 0xecf: 0x2000, 0xed0: 0x4000, 0xed1: 0x4000, + 0xed2: 0x4000, 0xed3: 0x4000, 0xed4: 0x4000, 0xed5: 0x4000, 0xed6: 0x4000, 0xed7: 0x4000, + 0xed8: 0x4000, 0xed9: 0x4000, 0xeda: 0x4000, 0xedb: 0x4000, 0xedc: 0x4000, 0xedd: 0x4000, + 0xede: 0x4000, 0xedf: 0x4000, 0xee0: 0x4000, 0xee1: 0x4000, 0xee2: 0x4000, 0xee3: 0x4000, + 0xee4: 0x4000, 0xee5: 0x4000, 0xee6: 0x4000, 0xee7: 0x4000, 0xee8: 0x4000, 0xee9: 0x4000, + 0xeea: 0x4000, 0xeeb: 0x4000, 0xeec: 0x4000, 0xeed: 0x4000, 0xeee: 0x4000, 0xeef: 0x4000, + 0xef0: 0x4000, 0xef1: 0x4000, 0xef2: 0x4000, 0xef3: 0x4000, 0xef4: 0x4000, 0xef5: 0x4000, + 0xef6: 0x4000, 0xef7: 0x4000, 0xef8: 0x4000, 0xef9: 0x4000, 0xefa: 0x4000, 0xefb: 0x4000, + 0xefc: 0x4000, 0xefd: 0x4000, 0xefe: 0x4000, 0xeff: 0x4000, + // Block 0x3c, offset 0xf00 + 0xf00: 0x4000, 0xf01: 0x4000, 0xf02: 0x4000, 0xf03: 0x4000, 0xf04: 0x4000, 0xf05: 0x4000, + 0xf06: 0x4000, 0xf07: 0x4000, 0xf08: 0x4000, 0xf09: 0x4000, 0xf0a: 0x4000, 0xf0b: 0x4000, + 0xf0c: 0x4000, 0xf0d: 0x4000, 0xf0e: 0x4000, 0xf0f: 0x4000, 0xf10: 0x4000, 0xf11: 0x4000, + 0xf12: 0x4000, 0xf13: 0x4000, 0xf14: 0x4000, 0xf15: 0x4000, 0xf16: 0x4000, 0xf17: 0x4000, + 0xf18: 0x4000, 0xf19: 0x4000, 0xf1a: 0x4000, 0xf1b: 0x4000, 0xf1c: 0x4000, 0xf1d: 0x4000, + 0xf1e: 0x4000, 0xf1f: 0x4000, 0xf20: 0x4000, 0xf21: 0x4000, 0xf22: 0x4000, 0xf23: 0x4000, + 0xf24: 0x4000, 0xf25: 0x4000, 0xf26: 0x4000, 0xf27: 0x4000, 0xf28: 0x4000, 0xf29: 0x4000, + 0xf2a: 0x4000, 0xf2b: 0x4000, 0xf2c: 0x4000, 0xf2d: 0x4000, 0xf2e: 0x4000, 0xf2f: 0x4000, + 0xf30: 0x4000, 0xf31: 0x4000, 0xf32: 0x4000, 0xf33: 0x4000, 0xf34: 0x4000, 0xf35: 0x4000, + 0xf36: 0x4000, 0xf37: 0x4000, 0xf38: 0x4000, 0xf39: 0x4000, 0xf3a: 0x4000, 0xf3b: 0x4000, + 0xf3c: 0x4000, 0xf3d: 0x4000, 0xf3e: 0x4000, + // Block 0x3d, offset 0xf40 + 0xf40: 0x4000, 0xf41: 0x4000, 0xf42: 0x4000, 0xf43: 0x4000, 0xf44: 0x4000, 0xf45: 0x4000, + 0xf46: 0x4000, 0xf47: 0x4000, 0xf48: 0x4000, 0xf49: 0x4000, 0xf4a: 0x4000, 0xf4b: 0x4000, + 0xf4c: 0x4000, 0xf50: 0x4000, 0xf51: 0x4000, + 0xf52: 0x4000, 0xf53: 0x4000, 0xf54: 0x4000, 0xf55: 0x4000, 0xf56: 0x4000, 0xf57: 0x4000, + 0xf58: 0x4000, 0xf59: 0x4000, 0xf5a: 0x4000, 0xf5b: 0x4000, 0xf5c: 0x4000, 0xf5d: 0x4000, + 0xf5e: 0x4000, 0xf5f: 0x4000, 0xf60: 0x4000, 0xf61: 0x4000, 0xf62: 0x4000, 0xf63: 0x4000, + 0xf64: 0x4000, 0xf65: 0x4000, 0xf66: 0x4000, 0xf67: 0x4000, 0xf68: 0x4000, 0xf69: 0x4000, + 0xf6a: 0x4000, 0xf6b: 0x4000, 0xf6c: 0x4000, 0xf6d: 0x4000, 0xf6e: 0x4000, 0xf6f: 0x4000, + 0xf70: 0x4000, 0xf71: 0x4000, 0xf72: 0x4000, 0xf73: 0x4000, 0xf74: 0x4000, 0xf75: 0x4000, + 0xf76: 0x4000, 0xf77: 0x4000, 0xf78: 0x4000, 0xf79: 0x4000, 0xf7a: 0x4000, 0xf7b: 0x4000, + 0xf7c: 0x4000, 0xf7d: 0x4000, 0xf7e: 0x4000, 0xf7f: 0x4000, + // Block 0x3e, offset 0xf80 + 0xf80: 0x4000, 0xf81: 0x4000, 0xf82: 0x4000, 0xf83: 0x4000, 0xf84: 0x4000, 0xf85: 0x4000, + 0xf86: 0x4000, + // Block 0x3f, offset 0xfc0 + 0xfe0: 0x4000, 0xfe1: 0x4000, 0xfe2: 0x4000, 0xfe3: 0x4000, + 0xfe4: 0x4000, 0xfe5: 0x4000, 0xfe6: 0x4000, 0xfe7: 0x4000, 0xfe8: 0x4000, 0xfe9: 0x4000, + 0xfea: 0x4000, 0xfeb: 0x4000, 0xfec: 0x4000, 0xfed: 0x4000, 0xfee: 0x4000, 0xfef: 0x4000, + 0xff0: 0x4000, 0xff1: 0x4000, 0xff2: 0x4000, 0xff3: 0x4000, 0xff4: 0x4000, 0xff5: 0x4000, + 0xff6: 0x4000, 0xff7: 0x4000, 0xff8: 0x4000, 0xff9: 0x4000, 0xffa: 0x4000, 0xffb: 0x4000, + 0xffc: 0x4000, + // Block 0x40, offset 0x1000 + 0x1000: 0x4000, 0x1001: 0x4000, 0x1002: 0x4000, 0x1003: 0x4000, 0x1004: 0x4000, 0x1005: 0x4000, + 0x1006: 0x4000, 0x1007: 0x4000, 0x1008: 0x4000, 0x1009: 0x4000, 0x100a: 0x4000, 0x100b: 0x4000, + 0x100c: 0x4000, 0x100d: 0x4000, 0x100e: 0x4000, 0x100f: 0x4000, 0x1010: 0x4000, 0x1011: 0x4000, + 0x1012: 0x4000, 0x1013: 0x4000, 0x1014: 0x4000, 0x1015: 0x4000, 0x1016: 0x4000, 0x1017: 0x4000, + 0x1018: 0x4000, 0x1019: 0x4000, 0x101a: 0x4000, 0x101b: 0x4000, 0x101c: 0x4000, 0x101d: 0x4000, + 0x101e: 0x4000, 0x101f: 0x4000, 0x1020: 0x4000, 0x1021: 0x4000, 0x1022: 0x4000, 0x1023: 0x4000, + // Block 0x41, offset 0x1040 + 0x1040: 0x2000, 0x1041: 0x2000, 0x1042: 0x2000, 0x1043: 0x2000, 0x1044: 0x2000, 0x1045: 0x2000, + 0x1046: 0x2000, 0x1047: 0x2000, 0x1048: 0x2000, 0x1049: 0x2000, 0x104a: 0x2000, 0x104b: 0x2000, + 0x104c: 0x2000, 0x104d: 0x2000, 0x104e: 0x2000, 0x104f: 0x2000, 0x1050: 0x4000, 0x1051: 0x4000, + 0x1052: 0x4000, 0x1053: 0x4000, 0x1054: 0x4000, 0x1055: 0x4000, 0x1056: 0x4000, 0x1057: 0x4000, + 0x1058: 0x4000, 0x1059: 0x4000, + 0x1070: 0x4000, 0x1071: 0x4000, 0x1072: 0x4000, 0x1073: 0x4000, 0x1074: 0x4000, 0x1075: 0x4000, + 0x1076: 0x4000, 0x1077: 0x4000, 0x1078: 0x4000, 0x1079: 0x4000, 0x107a: 0x4000, 0x107b: 0x4000, + 0x107c: 0x4000, 0x107d: 0x4000, 0x107e: 0x4000, 0x107f: 0x4000, + // Block 0x42, offset 0x1080 + 0x1080: 0x4000, 0x1081: 0x4000, 0x1082: 0x4000, 0x1083: 0x4000, 0x1084: 0x4000, 0x1085: 0x4000, + 0x1086: 0x4000, 0x1087: 0x4000, 0x1088: 0x4000, 0x1089: 0x4000, 0x108a: 0x4000, 0x108b: 0x4000, + 0x108c: 0x4000, 0x108d: 0x4000, 0x108e: 0x4000, 0x108f: 0x4000, 0x1090: 0x4000, 0x1091: 0x4000, + 0x1092: 0x4000, 0x1094: 0x4000, 0x1095: 0x4000, 0x1096: 0x4000, 0x1097: 0x4000, + 0x1098: 0x4000, 0x1099: 0x4000, 0x109a: 0x4000, 0x109b: 0x4000, 0x109c: 0x4000, 0x109d: 0x4000, + 0x109e: 0x4000, 0x109f: 0x4000, 0x10a0: 0x4000, 0x10a1: 0x4000, 0x10a2: 0x4000, 0x10a3: 0x4000, + 0x10a4: 0x4000, 0x10a5: 0x4000, 0x10a6: 0x4000, 0x10a8: 0x4000, 0x10a9: 0x4000, + 0x10aa: 0x4000, 0x10ab: 0x4000, + // Block 0x43, offset 0x10c0 + 0x10c1: 0x9012, 0x10c2: 0x9012, 0x10c3: 0x9012, 0x10c4: 0x9012, 0x10c5: 0x9012, + 0x10c6: 0x9012, 0x10c7: 0x9012, 0x10c8: 0x9012, 0x10c9: 0x9012, 0x10ca: 0x9012, 0x10cb: 0x9012, + 0x10cc: 0x9012, 0x10cd: 0x9012, 0x10ce: 0x9012, 0x10cf: 0x9012, 0x10d0: 0x9012, 0x10d1: 0x9012, + 0x10d2: 0x9012, 0x10d3: 0x9012, 0x10d4: 0x9012, 0x10d5: 0x9012, 0x10d6: 0x9012, 0x10d7: 0x9012, + 0x10d8: 0x9012, 0x10d9: 0x9012, 0x10da: 0x9012, 0x10db: 0x9012, 0x10dc: 0x9012, 0x10dd: 0x9012, + 0x10de: 0x9012, 0x10df: 0x9012, 0x10e0: 0x9049, 0x10e1: 0x9049, 0x10e2: 0x9049, 0x10e3: 0x9049, + 0x10e4: 0x9049, 0x10e5: 0x9049, 0x10e6: 0x9049, 0x10e7: 0x9049, 0x10e8: 0x9049, 0x10e9: 0x9049, + 0x10ea: 0x9049, 0x10eb: 0x9049, 0x10ec: 0x9049, 0x10ed: 0x9049, 0x10ee: 0x9049, 0x10ef: 0x9049, + 0x10f0: 0x9049, 0x10f1: 0x9049, 0x10f2: 0x9049, 0x10f3: 0x9049, 0x10f4: 0x9049, 0x10f5: 0x9049, + 0x10f6: 0x9049, 0x10f7: 0x9049, 0x10f8: 0x9049, 0x10f9: 0x9049, 0x10fa: 0x9049, 0x10fb: 0x9049, + 0x10fc: 0x9049, 0x10fd: 0x9049, 0x10fe: 0x9049, 0x10ff: 0x9049, + // Block 0x44, offset 0x1100 + 0x1100: 0x9049, 0x1101: 0x9049, 0x1102: 0x9049, 0x1103: 0x9049, 0x1104: 0x9049, 0x1105: 0x9049, + 0x1106: 0x9049, 0x1107: 0x9049, 0x1108: 0x9049, 0x1109: 0x9049, 0x110a: 0x9049, 0x110b: 0x9049, + 0x110c: 0x9049, 0x110d: 0x9049, 0x110e: 0x9049, 0x110f: 0x9049, 0x1110: 0x9049, 0x1111: 0x9049, + 0x1112: 0x9049, 0x1113: 0x9049, 0x1114: 0x9049, 0x1115: 0x9049, 0x1116: 0x9049, 0x1117: 0x9049, + 0x1118: 0x9049, 0x1119: 0x9049, 0x111a: 0x9049, 0x111b: 0x9049, 0x111c: 0x9049, 0x111d: 0x9049, + 0x111e: 0x9049, 0x111f: 0x904a, 0x1120: 0x904b, 0x1121: 0xb04c, 0x1122: 0xb04d, 0x1123: 0xb04d, + 0x1124: 0xb04e, 0x1125: 0xb04f, 0x1126: 0xb050, 0x1127: 0xb051, 0x1128: 0xb052, 0x1129: 0xb053, + 0x112a: 0xb054, 0x112b: 0xb055, 0x112c: 0xb056, 0x112d: 0xb057, 0x112e: 0xb058, 0x112f: 0xb059, + 0x1130: 0xb05a, 0x1131: 0xb05b, 0x1132: 0xb05c, 0x1133: 0xb05d, 0x1134: 0xb05e, 0x1135: 0xb05f, + 0x1136: 0xb060, 0x1137: 0xb061, 0x1138: 0xb062, 0x1139: 0xb063, 0x113a: 0xb064, 0x113b: 0xb065, + 0x113c: 0xb052, 0x113d: 0xb066, 0x113e: 0xb067, 0x113f: 0xb055, + // Block 0x45, offset 0x1140 + 0x1140: 0xb068, 0x1141: 0xb069, 0x1142: 0xb06a, 0x1143: 0xb06b, 0x1144: 0xb05a, 0x1145: 0xb056, + 0x1146: 0xb06c, 0x1147: 0xb06d, 0x1148: 0xb06b, 0x1149: 0xb06e, 0x114a: 0xb06b, 0x114b: 0xb06f, + 0x114c: 0xb06f, 0x114d: 0xb070, 0x114e: 0xb070, 0x114f: 0xb071, 0x1150: 0xb056, 0x1151: 0xb072, + 0x1152: 0xb073, 0x1153: 0xb072, 0x1154: 0xb074, 0x1155: 0xb073, 0x1156: 0xb075, 0x1157: 0xb075, + 0x1158: 0xb076, 0x1159: 0xb076, 0x115a: 0xb077, 0x115b: 0xb077, 0x115c: 0xb073, 0x115d: 0xb078, + 0x115e: 0xb079, 0x115f: 0xb067, 0x1160: 0xb07a, 0x1161: 0xb07b, 0x1162: 0xb07b, 0x1163: 0xb07b, + 0x1164: 0xb07b, 0x1165: 0xb07b, 0x1166: 0xb07b, 0x1167: 0xb07b, 0x1168: 0xb07b, 0x1169: 0xb07b, + 0x116a: 0xb07b, 0x116b: 0xb07b, 0x116c: 0xb07b, 0x116d: 0xb07b, 0x116e: 0xb07b, 0x116f: 0xb07b, + 0x1170: 0xb07c, 0x1171: 0xb07c, 0x1172: 0xb07c, 0x1173: 0xb07c, 0x1174: 0xb07c, 0x1175: 0xb07c, + 0x1176: 0xb07c, 0x1177: 0xb07c, 0x1178: 0xb07c, 0x1179: 0xb07c, 0x117a: 0xb07c, 0x117b: 0xb07c, + 0x117c: 0xb07c, 0x117d: 0xb07c, 0x117e: 0xb07c, + // Block 0x46, offset 0x1180 + 0x1182: 0xb07d, 0x1183: 0xb07e, 0x1184: 0xb07f, 0x1185: 0xb080, + 0x1186: 0xb07f, 0x1187: 0xb07e, 0x118a: 0xb081, 0x118b: 0xb082, + 0x118c: 0xb083, 0x118d: 0xb07f, 0x118e: 0xb080, 0x118f: 0xb07f, + 0x1192: 0xb084, 0x1193: 0xb085, 0x1194: 0xb084, 0x1195: 0xb086, 0x1196: 0xb084, 0x1197: 0xb087, + 0x119a: 0xb088, 0x119b: 0xb089, 0x119c: 0xb08a, + 0x11a0: 0x908b, 0x11a1: 0x908b, 0x11a2: 0x908c, 0x11a3: 0x908d, + 0x11a4: 0x908b, 0x11a5: 0x908e, 0x11a6: 0x908f, 0x11a8: 0xb090, 0x11a9: 0xb091, + 0x11aa: 0xb092, 0x11ab: 0xb091, 0x11ac: 0xb093, 0x11ad: 0xb094, 0x11ae: 0xb095, + 0x11bd: 0x2000, + // Block 0x47, offset 0x11c0 + 0x11e0: 0x4000, 0x11e1: 0x4000, + // Block 0x48, offset 0x1200 + 0x1200: 0x4000, 0x1201: 0x4000, 0x1202: 0x4000, 0x1203: 0x4000, 0x1204: 0x4000, 0x1205: 0x4000, + 0x1206: 0x4000, 0x1207: 0x4000, 0x1208: 0x4000, 0x1209: 0x4000, 0x120a: 0x4000, 0x120b: 0x4000, + 0x120c: 0x4000, 0x120d: 0x4000, 0x120e: 0x4000, 0x120f: 0x4000, 0x1210: 0x4000, 0x1211: 0x4000, + 0x1212: 0x4000, 0x1213: 0x4000, 0x1214: 0x4000, 0x1215: 0x4000, 0x1216: 0x4000, 0x1217: 0x4000, + 0x1218: 0x4000, 0x1219: 0x4000, 0x121a: 0x4000, 0x121b: 0x4000, 0x121c: 0x4000, 0x121d: 0x4000, + 0x121e: 0x4000, 0x121f: 0x4000, 0x1220: 0x4000, 0x1221: 0x4000, 0x1222: 0x4000, 0x1223: 0x4000, + 0x1224: 0x4000, 0x1225: 0x4000, 0x1226: 0x4000, 0x1227: 0x4000, 0x1228: 0x4000, 0x1229: 0x4000, + 0x122a: 0x4000, 0x122b: 0x4000, 0x122c: 0x4000, 0x122d: 0x4000, 0x122e: 0x4000, 0x122f: 0x4000, + 0x1230: 0x4000, 0x1231: 0x4000, + // Block 0x49, offset 0x1240 + 0x1240: 0x4000, 0x1241: 0x4000, 0x1242: 0x4000, 0x1243: 0x4000, 0x1244: 0x4000, 0x1245: 0x4000, + 0x1246: 0x4000, 0x1247: 0x4000, 0x1248: 0x4000, 0x1249: 0x4000, 0x124a: 0x4000, 0x124b: 0x4000, + 0x124c: 0x4000, 0x124d: 0x4000, 0x124e: 0x4000, 0x124f: 0x4000, 0x1250: 0x4000, 0x1251: 0x4000, + 0x1252: 0x4000, 0x1253: 0x4000, 0x1254: 0x4000, 0x1255: 0x4000, 0x1256: 0x4000, 0x1257: 0x4000, + 0x1258: 0x4000, 0x1259: 0x4000, 0x125a: 0x4000, 0x125b: 0x4000, 0x125c: 0x4000, 0x125d: 0x4000, + 0x125e: 0x4000, 0x125f: 0x4000, 0x1260: 0x4000, 0x1261: 0x4000, 0x1262: 0x4000, 0x1263: 0x4000, + 0x1264: 0x4000, 0x1265: 0x4000, 0x1266: 0x4000, 0x1267: 0x4000, 0x1268: 0x4000, 0x1269: 0x4000, + 0x126a: 0x4000, 0x126b: 0x4000, 0x126c: 0x4000, 0x126d: 0x4000, 0x126e: 0x4000, 0x126f: 0x4000, + 0x1270: 0x4000, 0x1271: 0x4000, 0x1272: 0x4000, + // Block 0x4a, offset 0x1280 + 0x1280: 0x4000, 0x1281: 0x4000, 0x1282: 0x4000, 0x1283: 0x4000, 0x1284: 0x4000, 0x1285: 0x4000, + 0x1286: 0x4000, 0x1287: 0x4000, 0x1288: 0x4000, 0x1289: 0x4000, 0x128a: 0x4000, 0x128b: 0x4000, + 0x128c: 0x4000, 0x128d: 0x4000, 0x128e: 0x4000, 0x128f: 0x4000, 0x1290: 0x4000, 0x1291: 0x4000, + 0x1292: 0x4000, 0x1293: 0x4000, 0x1294: 0x4000, 0x1295: 0x4000, 0x1296: 0x4000, 0x1297: 0x4000, + 0x1298: 0x4000, 0x1299: 0x4000, 0x129a: 0x4000, 0x129b: 0x4000, 0x129c: 0x4000, 0x129d: 0x4000, + 0x129e: 0x4000, + // Block 0x4b, offset 0x12c0 + 0x12f0: 0x4000, 0x12f1: 0x4000, 0x12f2: 0x4000, 0x12f3: 0x4000, 0x12f4: 0x4000, 0x12f5: 0x4000, + 0x12f6: 0x4000, 0x12f7: 0x4000, 0x12f8: 0x4000, 0x12f9: 0x4000, 0x12fa: 0x4000, 0x12fb: 0x4000, + 0x12fc: 0x4000, 0x12fd: 0x4000, 0x12fe: 0x4000, 0x12ff: 0x4000, + // Block 0x4c, offset 0x1300 + 0x1300: 0x4000, 0x1301: 0x4000, 0x1302: 0x4000, 0x1303: 0x4000, 0x1304: 0x4000, 0x1305: 0x4000, + 0x1306: 0x4000, 0x1307: 0x4000, 0x1308: 0x4000, 0x1309: 0x4000, 0x130a: 0x4000, 0x130b: 0x4000, + 0x130c: 0x4000, 0x130d: 0x4000, 0x130e: 0x4000, 0x130f: 0x4000, 0x1310: 0x4000, 0x1311: 0x4000, + 0x1312: 0x4000, 0x1313: 0x4000, 0x1314: 0x4000, 0x1315: 0x4000, 0x1316: 0x4000, 0x1317: 0x4000, + 0x1318: 0x4000, 0x1319: 0x4000, 0x131a: 0x4000, 0x131b: 0x4000, 0x131c: 0x4000, 0x131d: 0x4000, + 0x131e: 0x4000, 0x131f: 0x4000, 0x1320: 0x4000, 0x1321: 0x4000, 0x1322: 0x4000, 0x1323: 0x4000, + 0x1324: 0x4000, 0x1325: 0x4000, 0x1326: 0x4000, 0x1327: 0x4000, 0x1328: 0x4000, 0x1329: 0x4000, + 0x132a: 0x4000, 0x132b: 0x4000, 0x132c: 0x4000, 0x132d: 0x4000, 0x132e: 0x4000, 0x132f: 0x4000, + 0x1330: 0x4000, 0x1331: 0x4000, 0x1332: 0x4000, 0x1333: 0x4000, 0x1334: 0x4000, 0x1335: 0x4000, + 0x1336: 0x4000, 0x1337: 0x4000, 0x1338: 0x4000, 0x1339: 0x4000, 0x133a: 0x4000, 0x133b: 0x4000, + // Block 0x4d, offset 0x1340 + 0x1344: 0x4000, + // Block 0x4e, offset 0x1380 + 0x138f: 0x4000, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x2000, 0x13c1: 0x2000, 0x13c2: 0x2000, 0x13c3: 0x2000, 0x13c4: 0x2000, 0x13c5: 0x2000, + 0x13c6: 0x2000, 0x13c7: 0x2000, 0x13c8: 0x2000, 0x13c9: 0x2000, 0x13ca: 0x2000, + 0x13d0: 0x2000, 0x13d1: 0x2000, + 0x13d2: 0x2000, 0x13d3: 0x2000, 0x13d4: 0x2000, 0x13d5: 0x2000, 0x13d6: 0x2000, 0x13d7: 0x2000, + 0x13d8: 0x2000, 0x13d9: 0x2000, 0x13da: 0x2000, 0x13db: 0x2000, 0x13dc: 0x2000, 0x13dd: 0x2000, + 0x13de: 0x2000, 0x13df: 0x2000, 0x13e0: 0x2000, 0x13e1: 0x2000, 0x13e2: 0x2000, 0x13e3: 0x2000, + 0x13e4: 0x2000, 0x13e5: 0x2000, 0x13e6: 0x2000, 0x13e7: 0x2000, 0x13e8: 0x2000, 0x13e9: 0x2000, + 0x13ea: 0x2000, 0x13eb: 0x2000, 0x13ec: 0x2000, 0x13ed: 0x2000, + 0x13f0: 0x2000, 0x13f1: 0x2000, 0x13f2: 0x2000, 0x13f3: 0x2000, 0x13f4: 0x2000, 0x13f5: 0x2000, + 0x13f6: 0x2000, 0x13f7: 0x2000, 0x13f8: 0x2000, 0x13f9: 0x2000, 0x13fa: 0x2000, 0x13fb: 0x2000, + 0x13fc: 0x2000, 0x13fd: 0x2000, 0x13fe: 0x2000, 0x13ff: 0x2000, + // Block 0x50, offset 0x1400 + 0x1400: 0x2000, 0x1401: 0x2000, 0x1402: 0x2000, 0x1403: 0x2000, 0x1404: 0x2000, 0x1405: 0x2000, + 0x1406: 0x2000, 0x1407: 0x2000, 0x1408: 0x2000, 0x1409: 0x2000, 0x140a: 0x2000, 0x140b: 0x2000, + 0x140c: 0x2000, 0x140d: 0x2000, 0x140e: 0x2000, 0x140f: 0x2000, 0x1410: 0x2000, 0x1411: 0x2000, + 0x1412: 0x2000, 0x1413: 0x2000, 0x1414: 0x2000, 0x1415: 0x2000, 0x1416: 0x2000, 0x1417: 0x2000, + 0x1418: 0x2000, 0x1419: 0x2000, 0x141a: 0x2000, 0x141b: 0x2000, 0x141c: 0x2000, 0x141d: 0x2000, + 0x141e: 0x2000, 0x141f: 0x2000, 0x1420: 0x2000, 0x1421: 0x2000, 0x1422: 0x2000, 0x1423: 0x2000, + 0x1424: 0x2000, 0x1425: 0x2000, 0x1426: 0x2000, 0x1427: 0x2000, 0x1428: 0x2000, 0x1429: 0x2000, + 0x1430: 0x2000, 0x1431: 0x2000, 0x1432: 0x2000, 0x1433: 0x2000, 0x1434: 0x2000, 0x1435: 0x2000, + 0x1436: 0x2000, 0x1437: 0x2000, 0x1438: 0x2000, 0x1439: 0x2000, 0x143a: 0x2000, 0x143b: 0x2000, + 0x143c: 0x2000, 0x143d: 0x2000, 0x143e: 0x2000, 0x143f: 0x2000, + // Block 0x51, offset 0x1440 + 0x1440: 0x2000, 0x1441: 0x2000, 0x1442: 0x2000, 0x1443: 0x2000, 0x1444: 0x2000, 0x1445: 0x2000, + 0x1446: 0x2000, 0x1447: 0x2000, 0x1448: 0x2000, 0x1449: 0x2000, 0x144a: 0x2000, 0x144b: 0x2000, + 0x144c: 0x2000, 0x144d: 0x2000, 0x144e: 0x4000, 0x144f: 0x2000, 0x1450: 0x2000, 0x1451: 0x4000, + 0x1452: 0x4000, 0x1453: 0x4000, 0x1454: 0x4000, 0x1455: 0x4000, 0x1456: 0x4000, 0x1457: 0x4000, + 0x1458: 0x4000, 0x1459: 0x4000, 0x145a: 0x4000, 0x145b: 0x2000, 0x145c: 0x2000, 0x145d: 0x2000, + 0x145e: 0x2000, 0x145f: 0x2000, 0x1460: 0x2000, 0x1461: 0x2000, 0x1462: 0x2000, 0x1463: 0x2000, + 0x1464: 0x2000, 0x1465: 0x2000, 0x1466: 0x2000, 0x1467: 0x2000, 0x1468: 0x2000, 0x1469: 0x2000, + 0x146a: 0x2000, 0x146b: 0x2000, 0x146c: 0x2000, + // Block 0x52, offset 0x1480 + 0x1480: 0x4000, 0x1481: 0x4000, 0x1482: 0x4000, + 0x1490: 0x4000, 0x1491: 0x4000, + 0x1492: 0x4000, 0x1493: 0x4000, 0x1494: 0x4000, 0x1495: 0x4000, 0x1496: 0x4000, 0x1497: 0x4000, + 0x1498: 0x4000, 0x1499: 0x4000, 0x149a: 0x4000, 0x149b: 0x4000, 0x149c: 0x4000, 0x149d: 0x4000, + 0x149e: 0x4000, 0x149f: 0x4000, 0x14a0: 0x4000, 0x14a1: 0x4000, 0x14a2: 0x4000, 0x14a3: 0x4000, + 0x14a4: 0x4000, 0x14a5: 0x4000, 0x14a6: 0x4000, 0x14a7: 0x4000, 0x14a8: 0x4000, 0x14a9: 0x4000, + 0x14aa: 0x4000, 0x14ab: 0x4000, 0x14ac: 0x4000, 0x14ad: 0x4000, 0x14ae: 0x4000, 0x14af: 0x4000, + 0x14b0: 0x4000, 0x14b1: 0x4000, 0x14b2: 0x4000, 0x14b3: 0x4000, 0x14b4: 0x4000, 0x14b5: 0x4000, + 0x14b6: 0x4000, 0x14b7: 0x4000, 0x14b8: 0x4000, 0x14b9: 0x4000, 0x14ba: 0x4000, 0x14bb: 0x4000, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x4000, 0x14c1: 0x4000, 0x14c2: 0x4000, 0x14c3: 0x4000, 0x14c4: 0x4000, 0x14c5: 0x4000, + 0x14c6: 0x4000, 0x14c7: 0x4000, 0x14c8: 0x4000, + 0x14d0: 0x4000, 0x14d1: 0x4000, + 0x14e0: 0x4000, 0x14e1: 0x4000, 0x14e2: 0x4000, 0x14e3: 0x4000, + 0x14e4: 0x4000, 0x14e5: 0x4000, + // Block 0x54, offset 0x1500 + 0x1500: 0x4000, 0x1501: 0x4000, 0x1502: 0x4000, 0x1503: 0x4000, 0x1504: 0x4000, 0x1505: 0x4000, + 0x1506: 0x4000, 0x1507: 0x4000, 0x1508: 0x4000, 0x1509: 0x4000, 0x150a: 0x4000, 0x150b: 0x4000, + 0x150c: 0x4000, 0x150d: 0x4000, 0x150e: 0x4000, 0x150f: 0x4000, 0x1510: 0x4000, 0x1511: 0x4000, + 0x1512: 0x4000, 0x1513: 0x4000, 0x1514: 0x4000, 0x1515: 0x4000, 0x1516: 0x4000, 0x1517: 0x4000, + 0x1518: 0x4000, 0x1519: 0x4000, 0x151a: 0x4000, 0x151b: 0x4000, 0x151c: 0x4000, 0x151d: 0x4000, + 0x151e: 0x4000, 0x151f: 0x4000, 0x1520: 0x4000, + 0x152d: 0x4000, 0x152e: 0x4000, 0x152f: 0x4000, + 0x1530: 0x4000, 0x1531: 0x4000, 0x1532: 0x4000, 0x1533: 0x4000, 0x1534: 0x4000, 0x1535: 0x4000, + 0x1537: 0x4000, 0x1538: 0x4000, 0x1539: 0x4000, 0x153a: 0x4000, 0x153b: 0x4000, + 0x153c: 0x4000, 0x153d: 0x4000, 0x153e: 0x4000, 0x153f: 0x4000, + // Block 0x55, offset 0x1540 + 0x1540: 0x4000, 0x1541: 0x4000, 0x1542: 0x4000, 0x1543: 0x4000, 0x1544: 0x4000, 0x1545: 0x4000, + 0x1546: 0x4000, 0x1547: 0x4000, 0x1548: 0x4000, 0x1549: 0x4000, 0x154a: 0x4000, 0x154b: 0x4000, + 0x154c: 0x4000, 0x154d: 0x4000, 0x154e: 0x4000, 0x154f: 0x4000, 0x1550: 0x4000, 0x1551: 0x4000, + 0x1552: 0x4000, 0x1553: 0x4000, 0x1554: 0x4000, 0x1555: 0x4000, 0x1556: 0x4000, 0x1557: 0x4000, + 0x1558: 0x4000, 0x1559: 0x4000, 0x155a: 0x4000, 0x155b: 0x4000, 0x155c: 0x4000, 0x155d: 0x4000, + 0x155e: 0x4000, 0x155f: 0x4000, 0x1560: 0x4000, 0x1561: 0x4000, 0x1562: 0x4000, 0x1563: 0x4000, + 0x1564: 0x4000, 0x1565: 0x4000, 0x1566: 0x4000, 0x1567: 0x4000, 0x1568: 0x4000, 0x1569: 0x4000, + 0x156a: 0x4000, 0x156b: 0x4000, 0x156c: 0x4000, 0x156d: 0x4000, 0x156e: 0x4000, 0x156f: 0x4000, + 0x1570: 0x4000, 0x1571: 0x4000, 0x1572: 0x4000, 0x1573: 0x4000, 0x1574: 0x4000, 0x1575: 0x4000, + 0x1576: 0x4000, 0x1577: 0x4000, 0x1578: 0x4000, 0x1579: 0x4000, 0x157a: 0x4000, 0x157b: 0x4000, + 0x157c: 0x4000, 0x157e: 0x4000, 0x157f: 0x4000, + // Block 0x56, offset 0x1580 + 0x1580: 0x4000, 0x1581: 0x4000, 0x1582: 0x4000, 0x1583: 0x4000, 0x1584: 0x4000, 0x1585: 0x4000, + 0x1586: 0x4000, 0x1587: 0x4000, 0x1588: 0x4000, 0x1589: 0x4000, 0x158a: 0x4000, 0x158b: 0x4000, + 0x158c: 0x4000, 0x158d: 0x4000, 0x158e: 0x4000, 0x158f: 0x4000, 0x1590: 0x4000, 0x1591: 0x4000, + 0x1592: 0x4000, 0x1593: 0x4000, + 0x15a0: 0x4000, 0x15a1: 0x4000, 0x15a2: 0x4000, 0x15a3: 0x4000, + 0x15a4: 0x4000, 0x15a5: 0x4000, 0x15a6: 0x4000, 0x15a7: 0x4000, 0x15a8: 0x4000, 0x15a9: 0x4000, + 0x15aa: 0x4000, 0x15ab: 0x4000, 0x15ac: 0x4000, 0x15ad: 0x4000, 0x15ae: 0x4000, 0x15af: 0x4000, + 0x15b0: 0x4000, 0x15b1: 0x4000, 0x15b2: 0x4000, 0x15b3: 0x4000, 0x15b4: 0x4000, 0x15b5: 0x4000, + 0x15b6: 0x4000, 0x15b7: 0x4000, 0x15b8: 0x4000, 0x15b9: 0x4000, 0x15ba: 0x4000, 0x15bb: 0x4000, + 0x15bc: 0x4000, 0x15bd: 0x4000, 0x15be: 0x4000, 0x15bf: 0x4000, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x4000, 0x15c1: 0x4000, 0x15c2: 0x4000, 0x15c3: 0x4000, 0x15c4: 0x4000, 0x15c5: 0x4000, + 0x15c6: 0x4000, 0x15c7: 0x4000, 0x15c8: 0x4000, 0x15c9: 0x4000, 0x15ca: 0x4000, + 0x15cf: 0x4000, 0x15d0: 0x4000, 0x15d1: 0x4000, + 0x15d2: 0x4000, 0x15d3: 0x4000, + 0x15e0: 0x4000, 0x15e1: 0x4000, 0x15e2: 0x4000, 0x15e3: 0x4000, + 0x15e4: 0x4000, 0x15e5: 0x4000, 0x15e6: 0x4000, 0x15e7: 0x4000, 0x15e8: 0x4000, 0x15e9: 0x4000, + 0x15ea: 0x4000, 0x15eb: 0x4000, 0x15ec: 0x4000, 0x15ed: 0x4000, 0x15ee: 0x4000, 0x15ef: 0x4000, + 0x15f0: 0x4000, 0x15f4: 0x4000, + 0x15f8: 0x4000, 0x15f9: 0x4000, 0x15fa: 0x4000, 0x15fb: 0x4000, + 0x15fc: 0x4000, 0x15fd: 0x4000, 0x15fe: 0x4000, 0x15ff: 0x4000, + // Block 0x58, offset 0x1600 + 0x1600: 0x4000, 0x1602: 0x4000, 0x1603: 0x4000, 0x1604: 0x4000, 0x1605: 0x4000, + 0x1606: 0x4000, 0x1607: 0x4000, 0x1608: 0x4000, 0x1609: 0x4000, 0x160a: 0x4000, 0x160b: 0x4000, + 0x160c: 0x4000, 0x160d: 0x4000, 0x160e: 0x4000, 0x160f: 0x4000, 0x1610: 0x4000, 0x1611: 0x4000, + 0x1612: 0x4000, 0x1613: 0x4000, 0x1614: 0x4000, 0x1615: 0x4000, 0x1616: 0x4000, 0x1617: 0x4000, + 0x1618: 0x4000, 0x1619: 0x4000, 0x161a: 0x4000, 0x161b: 0x4000, 0x161c: 0x4000, 0x161d: 0x4000, + 0x161e: 0x4000, 0x161f: 0x4000, 0x1620: 0x4000, 0x1621: 0x4000, 0x1622: 0x4000, 0x1623: 0x4000, + 0x1624: 0x4000, 0x1625: 0x4000, 0x1626: 0x4000, 0x1627: 0x4000, 0x1628: 0x4000, 0x1629: 0x4000, + 0x162a: 0x4000, 0x162b: 0x4000, 0x162c: 0x4000, 0x162d: 0x4000, 0x162e: 0x4000, 0x162f: 0x4000, + 0x1630: 0x4000, 0x1631: 0x4000, 0x1632: 0x4000, 0x1633: 0x4000, 0x1634: 0x4000, 0x1635: 0x4000, + 0x1636: 0x4000, 0x1637: 0x4000, 0x1638: 0x4000, 0x1639: 0x4000, 0x163a: 0x4000, 0x163b: 0x4000, + 0x163c: 0x4000, 0x163d: 0x4000, 0x163e: 0x4000, 0x163f: 0x4000, + // Block 0x59, offset 0x1640 + 0x1640: 0x4000, 0x1641: 0x4000, 0x1642: 0x4000, 0x1643: 0x4000, 0x1644: 0x4000, 0x1645: 0x4000, + 0x1646: 0x4000, 0x1647: 0x4000, 0x1648: 0x4000, 0x1649: 0x4000, 0x164a: 0x4000, 0x164b: 0x4000, + 0x164c: 0x4000, 0x164d: 0x4000, 0x164e: 0x4000, 0x164f: 0x4000, 0x1650: 0x4000, 0x1651: 0x4000, + 0x1652: 0x4000, 0x1653: 0x4000, 0x1654: 0x4000, 0x1655: 0x4000, 0x1656: 0x4000, 0x1657: 0x4000, + 0x1658: 0x4000, 0x1659: 0x4000, 0x165a: 0x4000, 0x165b: 0x4000, 0x165c: 0x4000, 0x165d: 0x4000, + 0x165e: 0x4000, 0x165f: 0x4000, 0x1660: 0x4000, 0x1661: 0x4000, 0x1662: 0x4000, 0x1663: 0x4000, + 0x1664: 0x4000, 0x1665: 0x4000, 0x1666: 0x4000, 0x1667: 0x4000, 0x1668: 0x4000, 0x1669: 0x4000, + 0x166a: 0x4000, 0x166b: 0x4000, 0x166c: 0x4000, 0x166d: 0x4000, 0x166e: 0x4000, 0x166f: 0x4000, + 0x1670: 0x4000, 0x1671: 0x4000, 0x1672: 0x4000, 0x1673: 0x4000, 0x1674: 0x4000, 0x1675: 0x4000, + 0x1676: 0x4000, 0x1677: 0x4000, 0x1678: 0x4000, 0x1679: 0x4000, 0x167a: 0x4000, 0x167b: 0x4000, + 0x167c: 0x4000, 0x167f: 0x4000, + // Block 0x5a, offset 0x1680 + 0x1680: 0x4000, 0x1681: 0x4000, 0x1682: 0x4000, 0x1683: 0x4000, 0x1684: 0x4000, 0x1685: 0x4000, + 0x1686: 0x4000, 0x1687: 0x4000, 0x1688: 0x4000, 0x1689: 0x4000, 0x168a: 0x4000, 0x168b: 0x4000, + 0x168c: 0x4000, 0x168d: 0x4000, 0x168e: 0x4000, 0x168f: 0x4000, 0x1690: 0x4000, 0x1691: 0x4000, + 0x1692: 0x4000, 0x1693: 0x4000, 0x1694: 0x4000, 0x1695: 0x4000, 0x1696: 0x4000, 0x1697: 0x4000, + 0x1698: 0x4000, 0x1699: 0x4000, 0x169a: 0x4000, 0x169b: 0x4000, 0x169c: 0x4000, 0x169d: 0x4000, + 0x169e: 0x4000, 0x169f: 0x4000, 0x16a0: 0x4000, 0x16a1: 0x4000, 0x16a2: 0x4000, 0x16a3: 0x4000, + 0x16a4: 0x4000, 0x16a5: 0x4000, 0x16a6: 0x4000, 0x16a7: 0x4000, 0x16a8: 0x4000, 0x16a9: 0x4000, + 0x16aa: 0x4000, 0x16ab: 0x4000, 0x16ac: 0x4000, 0x16ad: 0x4000, 0x16ae: 0x4000, 0x16af: 0x4000, + 0x16b0: 0x4000, 0x16b1: 0x4000, 0x16b2: 0x4000, 0x16b3: 0x4000, 0x16b4: 0x4000, 0x16b5: 0x4000, + 0x16b6: 0x4000, 0x16b7: 0x4000, 0x16b8: 0x4000, 0x16b9: 0x4000, 0x16ba: 0x4000, 0x16bb: 0x4000, + 0x16bc: 0x4000, 0x16bd: 0x4000, + // Block 0x5b, offset 0x16c0 + 0x16cb: 0x4000, + 0x16cc: 0x4000, 0x16cd: 0x4000, 0x16ce: 0x4000, 0x16d0: 0x4000, 0x16d1: 0x4000, + 0x16d2: 0x4000, 0x16d3: 0x4000, 0x16d4: 0x4000, 0x16d5: 0x4000, 0x16d6: 0x4000, 0x16d7: 0x4000, + 0x16d8: 0x4000, 0x16d9: 0x4000, 0x16da: 0x4000, 0x16db: 0x4000, 0x16dc: 0x4000, 0x16dd: 0x4000, + 0x16de: 0x4000, 0x16df: 0x4000, 0x16e0: 0x4000, 0x16e1: 0x4000, 0x16e2: 0x4000, 0x16e3: 0x4000, + 0x16e4: 0x4000, 0x16e5: 0x4000, 0x16e6: 0x4000, 0x16e7: 0x4000, + 0x16fa: 0x4000, + // Block 0x5c, offset 0x1700 + 0x1715: 0x4000, 0x1716: 0x4000, + 0x1724: 0x4000, + // Block 0x5d, offset 0x1740 + 0x177b: 0x4000, + 0x177c: 0x4000, 0x177d: 0x4000, 0x177e: 0x4000, 0x177f: 0x4000, + // Block 0x5e, offset 0x1780 + 0x1780: 0x4000, 0x1781: 0x4000, 0x1782: 0x4000, 0x1783: 0x4000, 0x1784: 0x4000, 0x1785: 0x4000, + 0x1786: 0x4000, 0x1787: 0x4000, 0x1788: 0x4000, 0x1789: 0x4000, 0x178a: 0x4000, 0x178b: 0x4000, + 0x178c: 0x4000, 0x178d: 0x4000, 0x178e: 0x4000, 0x178f: 0x4000, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x4000, 0x17c1: 0x4000, 0x17c2: 0x4000, 0x17c3: 0x4000, 0x17c4: 0x4000, 0x17c5: 0x4000, + 0x17cc: 0x4000, 0x17d0: 0x4000, 0x17d1: 0x4000, + 0x17d2: 0x4000, + 0x17eb: 0x4000, 0x17ec: 0x4000, + 0x17f4: 0x4000, 0x17f5: 0x4000, + 0x17f6: 0x4000, 0x17f7: 0x4000, 0x17f8: 0x4000, 0x17f9: 0x4000, + // Block 0x60, offset 0x1800 + 0x1810: 0x4000, 0x1811: 0x4000, + 0x1812: 0x4000, 0x1813: 0x4000, 0x1814: 0x4000, 0x1815: 0x4000, 0x1816: 0x4000, 0x1817: 0x4000, + 0x1818: 0x4000, 0x1819: 0x4000, 0x181a: 0x4000, 0x181b: 0x4000, 0x181c: 0x4000, 0x181d: 0x4000, + 0x181e: 0x4000, 0x181f: 0x4000, 0x1820: 0x4000, 0x1821: 0x4000, 0x1822: 0x4000, 0x1823: 0x4000, + 0x1824: 0x4000, 0x1825: 0x4000, 0x1826: 0x4000, 0x1827: 0x4000, 0x1828: 0x4000, 0x1829: 0x4000, + 0x182a: 0x4000, 0x182b: 0x4000, 0x182c: 0x4000, 0x182d: 0x4000, 0x182e: 0x4000, 0x182f: 0x4000, + 0x1830: 0x4000, 0x1831: 0x4000, 0x1832: 0x4000, 0x1833: 0x4000, 0x1834: 0x4000, 0x1835: 0x4000, + 0x1836: 0x4000, 0x1837: 0x4000, 0x1838: 0x4000, 0x1839: 0x4000, 0x183a: 0x4000, 0x183b: 0x4000, + 0x183c: 0x4000, 0x183d: 0x4000, 0x183e: 0x4000, + // Block 0x61, offset 0x1840 + 0x1840: 0x4000, 0x1841: 0x4000, 0x1842: 0x4000, 0x1843: 0x4000, 0x1844: 0x4000, 0x1845: 0x4000, + 0x1846: 0x4000, 0x1847: 0x4000, 0x1848: 0x4000, 0x1849: 0x4000, 0x184a: 0x4000, 0x184b: 0x4000, + 0x184c: 0x4000, 0x184d: 0x4000, 0x184e: 0x4000, 0x184f: 0x4000, 0x1850: 0x4000, 0x1851: 0x4000, + 0x1852: 0x4000, 0x1853: 0x4000, 0x1854: 0x4000, 0x1855: 0x4000, 0x1856: 0x4000, 0x1857: 0x4000, + 0x1858: 0x4000, 0x1859: 0x4000, 0x185a: 0x4000, 0x185b: 0x4000, 0x185c: 0x4000, 0x185d: 0x4000, + 0x185e: 0x4000, 0x185f: 0x4000, 0x1860: 0x4000, 0x1861: 0x4000, 0x1862: 0x4000, 0x1863: 0x4000, + 0x1864: 0x4000, 0x1865: 0x4000, 0x1866: 0x4000, 0x1867: 0x4000, 0x1868: 0x4000, 0x1869: 0x4000, + 0x186a: 0x4000, 0x186b: 0x4000, 0x186c: 0x4000, 0x186d: 0x4000, 0x186e: 0x4000, 0x186f: 0x4000, + 0x1870: 0x4000, 0x1873: 0x4000, 0x1874: 0x4000, 0x1875: 0x4000, + 0x1876: 0x4000, 0x187a: 0x4000, + 0x187c: 0x4000, 0x187d: 0x4000, 0x187e: 0x4000, 0x187f: 0x4000, + // Block 0x62, offset 0x1880 + 0x1880: 0x4000, 0x1881: 0x4000, 0x1882: 0x4000, 0x1883: 0x4000, 0x1884: 0x4000, 0x1885: 0x4000, + 0x1886: 0x4000, 0x1887: 0x4000, 0x1888: 0x4000, 0x1889: 0x4000, 0x188a: 0x4000, 0x188b: 0x4000, + 0x188c: 0x4000, 0x188d: 0x4000, 0x188e: 0x4000, 0x188f: 0x4000, 0x1890: 0x4000, 0x1891: 0x4000, + 0x1892: 0x4000, 0x1893: 0x4000, 0x1894: 0x4000, 0x1895: 0x4000, 0x1896: 0x4000, 0x1897: 0x4000, + 0x1898: 0x4000, 0x1899: 0x4000, 0x189a: 0x4000, 0x189b: 0x4000, 0x189c: 0x4000, 0x189d: 0x4000, + 0x189e: 0x4000, 0x189f: 0x4000, 0x18a0: 0x4000, 0x18a1: 0x4000, 0x18a2: 0x4000, + 0x18b0: 0x4000, 0x18b1: 0x4000, 0x18b2: 0x4000, 0x18b3: 0x4000, 0x18b4: 0x4000, 0x18b5: 0x4000, + 0x18b6: 0x4000, 0x18b7: 0x4000, 0x18b8: 0x4000, 0x18b9: 0x4000, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x4000, 0x18c1: 0x4000, 0x18c2: 0x4000, + 0x18d0: 0x4000, 0x18d1: 0x4000, + 0x18d2: 0x4000, 0x18d3: 0x4000, 0x18d4: 0x4000, 0x18d5: 0x4000, 0x18d6: 0x4000, 0x18d7: 0x4000, + 0x18d8: 0x4000, 0x18d9: 0x4000, 0x18da: 0x4000, 0x18db: 0x4000, 0x18dc: 0x4000, 0x18dd: 0x4000, + 0x18de: 0x4000, 0x18df: 0x4000, 0x18e0: 0x4000, 0x18e1: 0x4000, 0x18e2: 0x4000, 0x18e3: 0x4000, + 0x18e4: 0x4000, 0x18e5: 0x4000, 0x18e6: 0x4000, 0x18e7: 0x4000, 0x18e8: 0x4000, 0x18e9: 0x4000, + 0x18ea: 0x4000, 0x18eb: 0x4000, 0x18ec: 0x4000, 0x18ed: 0x4000, 0x18ee: 0x4000, 0x18ef: 0x4000, + 0x18f0: 0x4000, 0x18f1: 0x4000, 0x18f2: 0x4000, 0x18f3: 0x4000, 0x18f4: 0x4000, 0x18f5: 0x4000, + 0x18f6: 0x4000, 0x18f7: 0x4000, 0x18f8: 0x4000, 0x18f9: 0x4000, 0x18fa: 0x4000, 0x18fb: 0x4000, + 0x18fc: 0x4000, 0x18fd: 0x4000, 0x18fe: 0x4000, 0x18ff: 0x4000, + // Block 0x64, offset 0x1900 + 0x1900: 0x2000, 0x1901: 0x2000, 0x1902: 0x2000, 0x1903: 0x2000, 0x1904: 0x2000, 0x1905: 0x2000, + 0x1906: 0x2000, 0x1907: 0x2000, 0x1908: 0x2000, 0x1909: 0x2000, 0x190a: 0x2000, 0x190b: 0x2000, + 0x190c: 0x2000, 0x190d: 0x2000, 0x190e: 0x2000, 0x190f: 0x2000, 0x1910: 0x2000, 0x1911: 0x2000, + 0x1912: 0x2000, 0x1913: 0x2000, 0x1914: 0x2000, 0x1915: 0x2000, 0x1916: 0x2000, 0x1917: 0x2000, + 0x1918: 0x2000, 0x1919: 0x2000, 0x191a: 0x2000, 0x191b: 0x2000, 0x191c: 0x2000, 0x191d: 0x2000, + 0x191e: 0x2000, 0x191f: 0x2000, 0x1920: 0x2000, 0x1921: 0x2000, 0x1922: 0x2000, 0x1923: 0x2000, + 0x1924: 0x2000, 0x1925: 0x2000, 0x1926: 0x2000, 0x1927: 0x2000, 0x1928: 0x2000, 0x1929: 0x2000, + 0x192a: 0x2000, 0x192b: 0x2000, 0x192c: 0x2000, 0x192d: 0x2000, 0x192e: 0x2000, 0x192f: 0x2000, + 0x1930: 0x2000, 0x1931: 0x2000, 0x1932: 0x2000, 0x1933: 0x2000, 0x1934: 0x2000, 0x1935: 0x2000, + 0x1936: 0x2000, 0x1937: 0x2000, 0x1938: 0x2000, 0x1939: 0x2000, 0x193a: 0x2000, 0x193b: 0x2000, + 0x193c: 0x2000, 0x193d: 0x2000, +} + +// widthIndex: 22 blocks, 1408 entries, 1408 bytes +// Block 0 is the zero block. +var widthIndex = [1408]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x02, 0xc4: 0x03, 0xc5: 0x04, 0xc7: 0x05, + 0xc9: 0x06, 0xcb: 0x07, 0xcc: 0x08, 0xcd: 0x09, 0xce: 0x0a, 0xcf: 0x0b, + 0xd0: 0x0c, 0xd1: 0x0d, + 0xe1: 0x02, 0xe2: 0x03, 0xe3: 0x04, 0xe4: 0x05, 0xe5: 0x06, 0xe6: 0x06, 0xe7: 0x06, + 0xe8: 0x06, 0xe9: 0x06, 0xea: 0x07, 0xeb: 0x06, 0xec: 0x06, 0xed: 0x08, 0xee: 0x09, 0xef: 0x0a, + 0xf0: 0x0f, 0xf3: 0x12, 0xf4: 0x13, + // Block 0x4, offset 0x100 + 0x104: 0x0e, 0x105: 0x0f, + // Block 0x5, offset 0x140 + 0x140: 0x10, 0x141: 0x11, 0x142: 0x12, 0x144: 0x13, 0x145: 0x14, 0x146: 0x15, 0x147: 0x16, + 0x148: 0x17, 0x149: 0x18, 0x14a: 0x19, 0x14c: 0x1a, 0x14f: 0x1b, + 0x151: 0x1c, 0x152: 0x08, 0x153: 0x1d, 0x154: 0x1e, 0x155: 0x1f, 0x156: 0x20, 0x157: 0x21, + 0x158: 0x22, 0x159: 0x23, 0x15a: 0x24, 0x15b: 0x25, 0x15c: 0x26, 0x15d: 0x27, 0x15e: 0x28, 0x15f: 0x29, + 0x166: 0x2a, + 0x16c: 0x2b, 0x16d: 0x2c, + 0x17a: 0x2d, 0x17b: 0x2e, 0x17c: 0x0e, 0x17d: 0x0e, 0x17e: 0x0e, 0x17f: 0x2f, + // Block 0x6, offset 0x180 + 0x180: 0x30, 0x181: 0x31, 0x182: 0x32, 0x183: 0x33, 0x184: 0x34, 0x185: 0x35, 0x186: 0x36, 0x187: 0x37, + 0x188: 0x38, 0x189: 0x39, 0x18a: 0x0e, 0x18b: 0x3a, 0x18c: 0x0e, 0x18d: 0x0e, 0x18e: 0x0e, 0x18f: 0x0e, + 0x190: 0x0e, 0x191: 0x0e, 0x192: 0x0e, 0x193: 0x0e, 0x194: 0x0e, 0x195: 0x0e, 0x196: 0x0e, 0x197: 0x0e, + 0x198: 0x0e, 0x199: 0x0e, 0x19a: 0x0e, 0x19b: 0x0e, 0x19c: 0x0e, 0x19d: 0x0e, 0x19e: 0x0e, 0x19f: 0x0e, + 0x1a0: 0x0e, 0x1a1: 0x0e, 0x1a2: 0x0e, 0x1a3: 0x0e, 0x1a4: 0x0e, 0x1a5: 0x0e, 0x1a6: 0x0e, 0x1a7: 0x0e, + 0x1a8: 0x0e, 0x1a9: 0x0e, 0x1aa: 0x0e, 0x1ab: 0x0e, 0x1ac: 0x0e, 0x1ad: 0x0e, 0x1ae: 0x0e, 0x1af: 0x0e, + 0x1b0: 0x0e, 0x1b1: 0x0e, 0x1b2: 0x0e, 0x1b3: 0x0e, 0x1b4: 0x0e, 0x1b5: 0x0e, 0x1b6: 0x0e, 0x1b7: 0x0e, + 0x1b8: 0x0e, 0x1b9: 0x0e, 0x1ba: 0x0e, 0x1bb: 0x0e, 0x1bc: 0x0e, 0x1bd: 0x0e, 0x1be: 0x0e, 0x1bf: 0x0e, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0e, 0x1c1: 0x0e, 0x1c2: 0x0e, 0x1c3: 0x0e, 0x1c4: 0x0e, 0x1c5: 0x0e, 0x1c6: 0x0e, 0x1c7: 0x0e, + 0x1c8: 0x0e, 0x1c9: 0x0e, 0x1ca: 0x0e, 0x1cb: 0x0e, 0x1cc: 0x0e, 0x1cd: 0x0e, 0x1ce: 0x0e, 0x1cf: 0x0e, + 0x1d0: 0x0e, 0x1d1: 0x0e, 0x1d2: 0x0e, 0x1d3: 0x0e, 0x1d4: 0x0e, 0x1d5: 0x0e, 0x1d6: 0x0e, 0x1d7: 0x0e, + 0x1d8: 0x0e, 0x1d9: 0x0e, 0x1da: 0x0e, 0x1db: 0x0e, 0x1dc: 0x0e, 0x1dd: 0x0e, 0x1de: 0x0e, 0x1df: 0x0e, + 0x1e0: 0x0e, 0x1e1: 0x0e, 0x1e2: 0x0e, 0x1e3: 0x0e, 0x1e4: 0x0e, 0x1e5: 0x0e, 0x1e6: 0x0e, 0x1e7: 0x0e, + 0x1e8: 0x0e, 0x1e9: 0x0e, 0x1ea: 0x0e, 0x1eb: 0x0e, 0x1ec: 0x0e, 0x1ed: 0x0e, 0x1ee: 0x0e, 0x1ef: 0x0e, + 0x1f0: 0x0e, 0x1f1: 0x0e, 0x1f2: 0x0e, 0x1f3: 0x0e, 0x1f4: 0x0e, 0x1f5: 0x0e, 0x1f6: 0x0e, + 0x1f8: 0x0e, 0x1f9: 0x0e, 0x1fa: 0x0e, 0x1fb: 0x0e, 0x1fc: 0x0e, 0x1fd: 0x0e, 0x1fe: 0x0e, 0x1ff: 0x0e, + // Block 0x8, offset 0x200 + 0x200: 0x0e, 0x201: 0x0e, 0x202: 0x0e, 0x203: 0x0e, 0x204: 0x0e, 0x205: 0x0e, 0x206: 0x0e, 0x207: 0x0e, + 0x208: 0x0e, 0x209: 0x0e, 0x20a: 0x0e, 0x20b: 0x0e, 0x20c: 0x0e, 0x20d: 0x0e, 0x20e: 0x0e, 0x20f: 0x0e, + 0x210: 0x0e, 0x211: 0x0e, 0x212: 0x0e, 0x213: 0x0e, 0x214: 0x0e, 0x215: 0x0e, 0x216: 0x0e, 0x217: 0x0e, + 0x218: 0x0e, 0x219: 0x0e, 0x21a: 0x0e, 0x21b: 0x0e, 0x21c: 0x0e, 0x21d: 0x0e, 0x21e: 0x0e, 0x21f: 0x0e, + 0x220: 0x0e, 0x221: 0x0e, 0x222: 0x0e, 0x223: 0x0e, 0x224: 0x0e, 0x225: 0x0e, 0x226: 0x0e, 0x227: 0x0e, + 0x228: 0x0e, 0x229: 0x0e, 0x22a: 0x0e, 0x22b: 0x0e, 0x22c: 0x0e, 0x22d: 0x0e, 0x22e: 0x0e, 0x22f: 0x0e, + 0x230: 0x0e, 0x231: 0x0e, 0x232: 0x0e, 0x233: 0x0e, 0x234: 0x0e, 0x235: 0x0e, 0x236: 0x0e, 0x237: 0x0e, + 0x238: 0x0e, 0x239: 0x0e, 0x23a: 0x0e, 0x23b: 0x0e, 0x23c: 0x0e, 0x23d: 0x0e, 0x23e: 0x0e, 0x23f: 0x0e, + // Block 0x9, offset 0x240 + 0x240: 0x0e, 0x241: 0x0e, 0x242: 0x0e, 0x243: 0x0e, 0x244: 0x0e, 0x245: 0x0e, 0x246: 0x0e, 0x247: 0x0e, + 0x248: 0x0e, 0x249: 0x0e, 0x24a: 0x0e, 0x24b: 0x0e, 0x24c: 0x0e, 0x24d: 0x0e, 0x24e: 0x0e, 0x24f: 0x0e, + 0x250: 0x0e, 0x251: 0x0e, 0x252: 0x3b, 0x253: 0x3c, + 0x265: 0x3d, + 0x270: 0x0e, 0x271: 0x0e, 0x272: 0x0e, 0x273: 0x0e, 0x274: 0x0e, 0x275: 0x0e, 0x276: 0x0e, 0x277: 0x0e, + 0x278: 0x0e, 0x279: 0x0e, 0x27a: 0x0e, 0x27b: 0x0e, 0x27c: 0x0e, 0x27d: 0x0e, 0x27e: 0x0e, 0x27f: 0x0e, + // Block 0xa, offset 0x280 + 0x280: 0x0e, 0x281: 0x0e, 0x282: 0x0e, 0x283: 0x0e, 0x284: 0x0e, 0x285: 0x0e, 0x286: 0x0e, 0x287: 0x0e, + 0x288: 0x0e, 0x289: 0x0e, 0x28a: 0x0e, 0x28b: 0x0e, 0x28c: 0x0e, 0x28d: 0x0e, 0x28e: 0x0e, 0x28f: 0x0e, + 0x290: 0x0e, 0x291: 0x0e, 0x292: 0x0e, 0x293: 0x0e, 0x294: 0x0e, 0x295: 0x0e, 0x296: 0x0e, 0x297: 0x0e, + 0x298: 0x0e, 0x299: 0x0e, 0x29a: 0x0e, 0x29b: 0x0e, 0x29c: 0x0e, 0x29d: 0x0e, 0x29e: 0x3e, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x08, 0x2c1: 0x08, 0x2c2: 0x08, 0x2c3: 0x08, 0x2c4: 0x08, 0x2c5: 0x08, 0x2c6: 0x08, 0x2c7: 0x08, + 0x2c8: 0x08, 0x2c9: 0x08, 0x2ca: 0x08, 0x2cb: 0x08, 0x2cc: 0x08, 0x2cd: 0x08, 0x2ce: 0x08, 0x2cf: 0x08, + 0x2d0: 0x08, 0x2d1: 0x08, 0x2d2: 0x08, 0x2d3: 0x08, 0x2d4: 0x08, 0x2d5: 0x08, 0x2d6: 0x08, 0x2d7: 0x08, + 0x2d8: 0x08, 0x2d9: 0x08, 0x2da: 0x08, 0x2db: 0x08, 0x2dc: 0x08, 0x2dd: 0x08, 0x2de: 0x08, 0x2df: 0x08, + 0x2e0: 0x08, 0x2e1: 0x08, 0x2e2: 0x08, 0x2e3: 0x08, 0x2e4: 0x08, 0x2e5: 0x08, 0x2e6: 0x08, 0x2e7: 0x08, + 0x2e8: 0x08, 0x2e9: 0x08, 0x2ea: 0x08, 0x2eb: 0x08, 0x2ec: 0x08, 0x2ed: 0x08, 0x2ee: 0x08, 0x2ef: 0x08, + 0x2f0: 0x08, 0x2f1: 0x08, 0x2f2: 0x08, 0x2f3: 0x08, 0x2f4: 0x08, 0x2f5: 0x08, 0x2f6: 0x08, 0x2f7: 0x08, + 0x2f8: 0x08, 0x2f9: 0x08, 0x2fa: 0x08, 0x2fb: 0x08, 0x2fc: 0x08, 0x2fd: 0x08, 0x2fe: 0x08, 0x2ff: 0x08, + // Block 0xc, offset 0x300 + 0x300: 0x08, 0x301: 0x08, 0x302: 0x08, 0x303: 0x08, 0x304: 0x08, 0x305: 0x08, 0x306: 0x08, 0x307: 0x08, + 0x308: 0x08, 0x309: 0x08, 0x30a: 0x08, 0x30b: 0x08, 0x30c: 0x08, 0x30d: 0x08, 0x30e: 0x08, 0x30f: 0x08, + 0x310: 0x08, 0x311: 0x08, 0x312: 0x08, 0x313: 0x08, 0x314: 0x08, 0x315: 0x08, 0x316: 0x08, 0x317: 0x08, + 0x318: 0x08, 0x319: 0x08, 0x31a: 0x08, 0x31b: 0x08, 0x31c: 0x08, 0x31d: 0x08, 0x31e: 0x08, 0x31f: 0x08, + 0x320: 0x08, 0x321: 0x08, 0x322: 0x08, 0x323: 0x08, 0x324: 0x0e, 0x325: 0x0e, 0x326: 0x0e, 0x327: 0x0e, + 0x328: 0x0e, 0x329: 0x0e, 0x32a: 0x0e, 0x32b: 0x0e, + 0x338: 0x3f, 0x339: 0x40, 0x33c: 0x41, 0x33d: 0x42, 0x33e: 0x43, 0x33f: 0x44, + // Block 0xd, offset 0x340 + 0x37f: 0x45, + // Block 0xe, offset 0x380 + 0x380: 0x0e, 0x381: 0x0e, 0x382: 0x0e, 0x383: 0x0e, 0x384: 0x0e, 0x385: 0x0e, 0x386: 0x0e, 0x387: 0x0e, + 0x388: 0x0e, 0x389: 0x0e, 0x38a: 0x0e, 0x38b: 0x0e, 0x38c: 0x0e, 0x38d: 0x0e, 0x38e: 0x0e, 0x38f: 0x0e, + 0x390: 0x0e, 0x391: 0x0e, 0x392: 0x0e, 0x393: 0x0e, 0x394: 0x0e, 0x395: 0x0e, 0x396: 0x0e, 0x397: 0x0e, + 0x398: 0x0e, 0x399: 0x0e, 0x39a: 0x0e, 0x39b: 0x0e, 0x39c: 0x0e, 0x39d: 0x0e, 0x39e: 0x0e, 0x39f: 0x46, + 0x3a0: 0x0e, 0x3a1: 0x0e, 0x3a2: 0x0e, 0x3a3: 0x0e, 0x3a4: 0x0e, 0x3a5: 0x0e, 0x3a6: 0x0e, 0x3a7: 0x0e, + 0x3a8: 0x0e, 0x3a9: 0x0e, 0x3aa: 0x0e, 0x3ab: 0x47, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x0e, 0x3c1: 0x0e, 0x3c2: 0x0e, 0x3c3: 0x0e, 0x3c4: 0x48, 0x3c5: 0x49, 0x3c6: 0x0e, 0x3c7: 0x0e, + 0x3c8: 0x0e, 0x3c9: 0x0e, 0x3ca: 0x0e, 0x3cb: 0x4a, + // Block 0x10, offset 0x400 + 0x400: 0x4b, 0x403: 0x4c, 0x404: 0x4d, 0x405: 0x4e, 0x406: 0x4f, + 0x408: 0x50, 0x409: 0x51, 0x40c: 0x52, 0x40d: 0x53, 0x40e: 0x54, 0x40f: 0x55, + 0x410: 0x3a, 0x411: 0x56, 0x412: 0x0e, 0x413: 0x57, 0x414: 0x58, 0x415: 0x59, 0x416: 0x5a, 0x417: 0x5b, + 0x418: 0x0e, 0x419: 0x5c, 0x41a: 0x0e, 0x41b: 0x5d, + 0x424: 0x5e, 0x425: 0x5f, 0x426: 0x60, 0x427: 0x61, + // Block 0x11, offset 0x440 + 0x456: 0x0b, 0x457: 0x06, + 0x458: 0x0c, 0x45b: 0x0d, 0x45f: 0x0e, + 0x460: 0x06, 0x461: 0x06, 0x462: 0x06, 0x463: 0x06, 0x464: 0x06, 0x465: 0x06, 0x466: 0x06, 0x467: 0x06, + 0x468: 0x06, 0x469: 0x06, 0x46a: 0x06, 0x46b: 0x06, 0x46c: 0x06, 0x46d: 0x06, 0x46e: 0x06, 0x46f: 0x06, + 0x470: 0x06, 0x471: 0x06, 0x472: 0x06, 0x473: 0x06, 0x474: 0x06, 0x475: 0x06, 0x476: 0x06, 0x477: 0x06, + 0x478: 0x06, 0x479: 0x06, 0x47a: 0x06, 0x47b: 0x06, 0x47c: 0x06, 0x47d: 0x06, 0x47e: 0x06, 0x47f: 0x06, + // Block 0x12, offset 0x480 + 0x484: 0x08, 0x485: 0x08, 0x486: 0x08, 0x487: 0x09, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x08, 0x4c1: 0x08, 0x4c2: 0x08, 0x4c3: 0x08, 0x4c4: 0x08, 0x4c5: 0x08, 0x4c6: 0x08, 0x4c7: 0x08, + 0x4c8: 0x08, 0x4c9: 0x08, 0x4ca: 0x08, 0x4cb: 0x08, 0x4cc: 0x08, 0x4cd: 0x08, 0x4ce: 0x08, 0x4cf: 0x08, + 0x4d0: 0x08, 0x4d1: 0x08, 0x4d2: 0x08, 0x4d3: 0x08, 0x4d4: 0x08, 0x4d5: 0x08, 0x4d6: 0x08, 0x4d7: 0x08, + 0x4d8: 0x08, 0x4d9: 0x08, 0x4da: 0x08, 0x4db: 0x08, 0x4dc: 0x08, 0x4dd: 0x08, 0x4de: 0x08, 0x4df: 0x08, + 0x4e0: 0x08, 0x4e1: 0x08, 0x4e2: 0x08, 0x4e3: 0x08, 0x4e4: 0x08, 0x4e5: 0x08, 0x4e6: 0x08, 0x4e7: 0x08, + 0x4e8: 0x08, 0x4e9: 0x08, 0x4ea: 0x08, 0x4eb: 0x08, 0x4ec: 0x08, 0x4ed: 0x08, 0x4ee: 0x08, 0x4ef: 0x08, + 0x4f0: 0x08, 0x4f1: 0x08, 0x4f2: 0x08, 0x4f3: 0x08, 0x4f4: 0x08, 0x4f5: 0x08, 0x4f6: 0x08, 0x4f7: 0x08, + 0x4f8: 0x08, 0x4f9: 0x08, 0x4fa: 0x08, 0x4fb: 0x08, 0x4fc: 0x08, 0x4fd: 0x08, 0x4fe: 0x08, 0x4ff: 0x62, + // Block 0x14, offset 0x500 + 0x520: 0x10, + 0x530: 0x09, 0x531: 0x09, 0x532: 0x09, 0x533: 0x09, 0x534: 0x09, 0x535: 0x09, 0x536: 0x09, 0x537: 0x09, + 0x538: 0x09, 0x539: 0x09, 0x53a: 0x09, 0x53b: 0x09, 0x53c: 0x09, 0x53d: 0x09, 0x53e: 0x09, 0x53f: 0x11, + // Block 0x15, offset 0x540 + 0x540: 0x09, 0x541: 0x09, 0x542: 0x09, 0x543: 0x09, 0x544: 0x09, 0x545: 0x09, 0x546: 0x09, 0x547: 0x09, + 0x548: 0x09, 0x549: 0x09, 0x54a: 0x09, 0x54b: 0x09, 0x54c: 0x09, 0x54d: 0x09, 0x54e: 0x09, 0x54f: 0x11, +} + +// inverseData contains 4-byte entries of the following format: +// <0 padding> +// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the +// UTF-8 encoding of the original rune. Mappings often have the following +// pattern: +// A -> A (U+FF21 -> U+0041) +// B -> B (U+FF22 -> U+0042) +// ... +// By xor-ing the last byte the same entry can be shared by many mappings. This +// reduces the total number of distinct entries by about two thirds. +// The resulting entry for the aforementioned mappings is +// { 0x01, 0xE0, 0x00, 0x00 } +// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get +// E0 ^ A1 = 41. +// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get +// E0 ^ A2 = 42. +// Note that because of the xor-ing, the byte sequence stored in the entry is +// not valid UTF-8. +var inverseData = [150][4]byte{ + {0x00, 0x00, 0x00, 0x00}, + {0x03, 0xe3, 0x80, 0xa0}, + {0x03, 0xef, 0xbc, 0xa0}, + {0x03, 0xef, 0xbc, 0xe0}, + {0x03, 0xef, 0xbd, 0xe0}, + {0x03, 0xef, 0xbf, 0x02}, + {0x03, 0xef, 0xbf, 0x00}, + {0x03, 0xef, 0xbf, 0x0e}, + {0x03, 0xef, 0xbf, 0x0c}, + {0x03, 0xef, 0xbf, 0x0f}, + {0x03, 0xef, 0xbf, 0x39}, + {0x03, 0xef, 0xbf, 0x3b}, + {0x03, 0xef, 0xbf, 0x3f}, + {0x03, 0xef, 0xbf, 0x2a}, + {0x03, 0xef, 0xbf, 0x0d}, + {0x03, 0xef, 0xbf, 0x25}, + {0x03, 0xef, 0xbd, 0x1a}, + {0x03, 0xef, 0xbd, 0x26}, + {0x01, 0xa0, 0x00, 0x00}, + {0x03, 0xef, 0xbd, 0x25}, + {0x03, 0xef, 0xbd, 0x23}, + {0x03, 0xef, 0xbd, 0x2e}, + {0x03, 0xef, 0xbe, 0x07}, + {0x03, 0xef, 0xbe, 0x05}, + {0x03, 0xef, 0xbd, 0x06}, + {0x03, 0xef, 0xbd, 0x13}, + {0x03, 0xef, 0xbd, 0x0b}, + {0x03, 0xef, 0xbd, 0x16}, + {0x03, 0xef, 0xbd, 0x0c}, + {0x03, 0xef, 0xbd, 0x15}, + {0x03, 0xef, 0xbd, 0x0d}, + {0x03, 0xef, 0xbd, 0x1c}, + {0x03, 0xef, 0xbd, 0x02}, + {0x03, 0xef, 0xbd, 0x1f}, + {0x03, 0xef, 0xbd, 0x1d}, + {0x03, 0xef, 0xbd, 0x17}, + {0x03, 0xef, 0xbd, 0x08}, + {0x03, 0xef, 0xbd, 0x09}, + {0x03, 0xef, 0xbd, 0x0e}, + {0x03, 0xef, 0xbd, 0x04}, + {0x03, 0xef, 0xbd, 0x05}, + {0x03, 0xef, 0xbe, 0x3f}, + {0x03, 0xef, 0xbe, 0x00}, + {0x03, 0xef, 0xbd, 0x2c}, + {0x03, 0xef, 0xbe, 0x06}, + {0x03, 0xef, 0xbe, 0x0c}, + {0x03, 0xef, 0xbe, 0x0f}, + {0x03, 0xef, 0xbe, 0x0d}, + {0x03, 0xef, 0xbe, 0x0b}, + {0x03, 0xef, 0xbe, 0x19}, + {0x03, 0xef, 0xbe, 0x15}, + {0x03, 0xef, 0xbe, 0x11}, + {0x03, 0xef, 0xbe, 0x31}, + {0x03, 0xef, 0xbe, 0x33}, + {0x03, 0xef, 0xbd, 0x0f}, + {0x03, 0xef, 0xbe, 0x30}, + {0x03, 0xef, 0xbe, 0x3e}, + {0x03, 0xef, 0xbe, 0x32}, + {0x03, 0xef, 0xbe, 0x36}, + {0x03, 0xef, 0xbd, 0x14}, + {0x03, 0xef, 0xbe, 0x2e}, + {0x03, 0xef, 0xbd, 0x1e}, + {0x03, 0xef, 0xbe, 0x10}, + {0x03, 0xef, 0xbf, 0x13}, + {0x03, 0xef, 0xbf, 0x15}, + {0x03, 0xef, 0xbf, 0x17}, + {0x03, 0xef, 0xbf, 0x1f}, + {0x03, 0xef, 0xbf, 0x1d}, + {0x03, 0xef, 0xbf, 0x1b}, + {0x03, 0xef, 0xbf, 0x09}, + {0x03, 0xef, 0xbf, 0x0b}, + {0x03, 0xef, 0xbf, 0x37}, + {0x03, 0xef, 0xbe, 0x04}, + {0x01, 0xe0, 0x00, 0x00}, + {0x03, 0xe2, 0xa6, 0x1a}, + {0x03, 0xe2, 0xa6, 0x26}, + {0x03, 0xe3, 0x80, 0x23}, + {0x03, 0xe3, 0x80, 0x2e}, + {0x03, 0xe3, 0x80, 0x25}, + {0x03, 0xe3, 0x83, 0x1e}, + {0x03, 0xe3, 0x83, 0x14}, + {0x03, 0xe3, 0x82, 0x06}, + {0x03, 0xe3, 0x82, 0x0b}, + {0x03, 0xe3, 0x82, 0x0c}, + {0x03, 0xe3, 0x82, 0x0d}, + {0x03, 0xe3, 0x82, 0x02}, + {0x03, 0xe3, 0x83, 0x0f}, + {0x03, 0xe3, 0x83, 0x08}, + {0x03, 0xe3, 0x83, 0x09}, + {0x03, 0xe3, 0x83, 0x2c}, + {0x03, 0xe3, 0x83, 0x0c}, + {0x03, 0xe3, 0x82, 0x13}, + {0x03, 0xe3, 0x82, 0x16}, + {0x03, 0xe3, 0x82, 0x15}, + {0x03, 0xe3, 0x82, 0x1c}, + {0x03, 0xe3, 0x82, 0x1f}, + {0x03, 0xe3, 0x82, 0x1d}, + {0x03, 0xe3, 0x82, 0x1a}, + {0x03, 0xe3, 0x82, 0x17}, + {0x03, 0xe3, 0x82, 0x08}, + {0x03, 0xe3, 0x82, 0x09}, + {0x03, 0xe3, 0x82, 0x0e}, + {0x03, 0xe3, 0x82, 0x04}, + {0x03, 0xe3, 0x82, 0x05}, + {0x03, 0xe3, 0x82, 0x3f}, + {0x03, 0xe3, 0x83, 0x00}, + {0x03, 0xe3, 0x83, 0x06}, + {0x03, 0xe3, 0x83, 0x05}, + {0x03, 0xe3, 0x83, 0x0d}, + {0x03, 0xe3, 0x83, 0x0b}, + {0x03, 0xe3, 0x83, 0x07}, + {0x03, 0xe3, 0x83, 0x19}, + {0x03, 0xe3, 0x83, 0x15}, + {0x03, 0xe3, 0x83, 0x11}, + {0x03, 0xe3, 0x83, 0x31}, + {0x03, 0xe3, 0x83, 0x33}, + {0x03, 0xe3, 0x83, 0x30}, + {0x03, 0xe3, 0x83, 0x3e}, + {0x03, 0xe3, 0x83, 0x32}, + {0x03, 0xe3, 0x83, 0x36}, + {0x03, 0xe3, 0x83, 0x2e}, + {0x03, 0xe3, 0x82, 0x07}, + {0x03, 0xe3, 0x85, 0x04}, + {0x03, 0xe3, 0x84, 0x10}, + {0x03, 0xe3, 0x85, 0x30}, + {0x03, 0xe3, 0x85, 0x0d}, + {0x03, 0xe3, 0x85, 0x13}, + {0x03, 0xe3, 0x85, 0x15}, + {0x03, 0xe3, 0x85, 0x17}, + {0x03, 0xe3, 0x85, 0x1f}, + {0x03, 0xe3, 0x85, 0x1d}, + {0x03, 0xe3, 0x85, 0x1b}, + {0x03, 0xe3, 0x85, 0x09}, + {0x03, 0xe3, 0x85, 0x0f}, + {0x03, 0xe3, 0x85, 0x0b}, + {0x03, 0xe3, 0x85, 0x37}, + {0x03, 0xe3, 0x85, 0x3b}, + {0x03, 0xe3, 0x85, 0x39}, + {0x03, 0xe3, 0x85, 0x3f}, + {0x02, 0xc2, 0x02, 0x00}, + {0x02, 0xc2, 0x0e, 0x00}, + {0x02, 0xc2, 0x0c, 0x00}, + {0x02, 0xc2, 0x00, 0x00}, + {0x03, 0xe2, 0x82, 0x0f}, + {0x03, 0xe2, 0x94, 0x2a}, + {0x03, 0xe2, 0x86, 0x39}, + {0x03, 0xe2, 0x86, 0x3b}, + {0x03, 0xe2, 0x86, 0x3f}, + {0x03, 0xe2, 0x96, 0x0d}, + {0x03, 0xe2, 0x97, 0x25}, +} + +// Total table size 14936 bytes (14KiB) diff --git a/vendor/golang.org/x/time/rate/BUILD.bazel b/vendor/golang.org/x/time/rate/BUILD.bazel index 2b8a7917ab43a..3a0f8ec04876f 100644 --- a/vendor/golang.org/x/time/rate/BUILD.bazel +++ b/vendor/golang.org/x/time/rate/BUILD.bazel @@ -2,13 +2,8 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", - srcs = [ - "rate.go", - "rate_go16.go", - "rate_go17.go", - ], + srcs = ["rate.go"], importmap = "k8s.io/kops/vendor/golang.org/x/time/rate", importpath = "golang.org/x/time/rate", visibility = ["//visibility:public"], - deps = ["//vendor/golang.org/x/net/context:go_default_library"], ) diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go index 7228d97e96370..ae93e24719742 100644 --- a/vendor/golang.org/x/time/rate/rate.go +++ b/vendor/golang.org/x/time/rate/rate.go @@ -6,6 +6,7 @@ package rate import ( + "context" "fmt" "math" "sync" @@ -212,19 +213,8 @@ func (lim *Limiter) ReserveN(now time.Time, n int) *Reservation { return &r } -// contextContext is a temporary(?) copy of the context.Context type -// to support both Go 1.6 using golang.org/x/net/context and Go 1.7+ -// with the built-in context package. If people ever stop using Go 1.6 -// we can remove this. -type contextContext interface { - Deadline() (deadline time.Time, ok bool) - Done() <-chan struct{} - Err() error - Value(key interface{}) interface{} -} - // Wait is shorthand for WaitN(ctx, 1). -func (lim *Limiter) wait(ctx contextContext) (err error) { +func (lim *Limiter) Wait(ctx context.Context) (err error) { return lim.WaitN(ctx, 1) } @@ -232,7 +222,7 @@ func (lim *Limiter) wait(ctx contextContext) (err error) { // It returns an error if n exceeds the Limiter's burst size, the Context is // canceled, or the expected wait time exceeds the Context's Deadline. // The burst limit is ignored if the rate limit is Inf. -func (lim *Limiter) waitN(ctx contextContext, n int) (err error) { +func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) { if n > lim.burst && lim.limit != Inf { return fmt.Errorf("rate: Wait(n=%d) exceeds limiter's burst %d", n, lim.burst) } diff --git a/vendor/golang.org/x/time/rate/rate_go16.go b/vendor/golang.org/x/time/rate/rate_go16.go deleted file mode 100644 index 6bab1850f881e..0000000000000 --- a/vendor/golang.org/x/time/rate/rate_go16.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.7 - -package rate - -import "golang.org/x/net/context" - -// Wait is shorthand for WaitN(ctx, 1). -func (lim *Limiter) Wait(ctx context.Context) (err error) { - return lim.waitN(ctx, 1) -} - -// WaitN blocks until lim permits n events to happen. -// It returns an error if n exceeds the Limiter's burst size, the Context is -// canceled, or the expected wait time exceeds the Context's Deadline. -func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) { - return lim.waitN(ctx, n) -} diff --git a/vendor/golang.org/x/time/rate/rate_go17.go b/vendor/golang.org/x/time/rate/rate_go17.go deleted file mode 100644 index f90d85f51e706..0000000000000 --- a/vendor/golang.org/x/time/rate/rate_go17.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.7 - -package rate - -import "context" - -// Wait is shorthand for WaitN(ctx, 1). -func (lim *Limiter) Wait(ctx context.Context) (err error) { - return lim.waitN(ctx, 1) -} - -// WaitN blocks until lim permits n events to happen. -// It returns an error if n exceeds the Limiter's burst size, the Context is -// canceled, or the expected wait time exceeds the Context's Deadline. -func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) { - return lim.waitN(ctx, n) -} diff --git a/vendor/golang.org/x/xerrors/BUILD.bazel b/vendor/golang.org/x/xerrors/BUILD.bazel new file mode 100644 index 0000000000000..c7b36415f1e11 --- /dev/null +++ b/vendor/golang.org/x/xerrors/BUILD.bazel @@ -0,0 +1,18 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "adaptor.go", + "doc.go", + "errors.go", + "fmt.go", + "format.go", + "frame.go", + "wrap.go", + ], + importmap = "k8s.io/kops/vendor/golang.org/x/xerrors", + importpath = "golang.org/x/xerrors", + visibility = ["//visibility:public"], + deps = ["//vendor/golang.org/x/xerrors/internal:go_default_library"], +) diff --git a/vendor/golang.org/x/xerrors/LICENSE b/vendor/golang.org/x/xerrors/LICENSE new file mode 100644 index 0000000000000..e4a47e17f143b --- /dev/null +++ b/vendor/golang.org/x/xerrors/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2019 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/xerrors/PATENTS b/vendor/golang.org/x/xerrors/PATENTS new file mode 100644 index 0000000000000..733099041f84f --- /dev/null +++ b/vendor/golang.org/x/xerrors/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/xerrors/README b/vendor/golang.org/x/xerrors/README new file mode 100644 index 0000000000000..aac7867a560b8 --- /dev/null +++ b/vendor/golang.org/x/xerrors/README @@ -0,0 +1,2 @@ +This repository holds the transition packages for the new Go 1.13 error values. +See golang.org/design/29934-error-values. diff --git a/vendor/golang.org/x/xerrors/adaptor.go b/vendor/golang.org/x/xerrors/adaptor.go new file mode 100644 index 0000000000000..4317f24833131 --- /dev/null +++ b/vendor/golang.org/x/xerrors/adaptor.go @@ -0,0 +1,193 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xerrors + +import ( + "bytes" + "fmt" + "io" + "reflect" + "strconv" +) + +// FormatError calls the FormatError method of f with an errors.Printer +// configured according to s and verb, and writes the result to s. +func FormatError(f Formatter, s fmt.State, verb rune) { + // Assuming this function is only called from the Format method, and given + // that FormatError takes precedence over Format, it cannot be called from + // any package that supports errors.Formatter. It is therefore safe to + // disregard that State may be a specific printer implementation and use one + // of our choice instead. + + // limitations: does not support printing error as Go struct. + + var ( + sep = " " // separator before next error + p = &state{State: s} + direct = true + ) + + var err error = f + + switch verb { + // Note that this switch must match the preference order + // for ordinary string printing (%#v before %+v, and so on). + + case 'v': + if s.Flag('#') { + if stringer, ok := err.(fmt.GoStringer); ok { + io.WriteString(&p.buf, stringer.GoString()) + goto exit + } + // proceed as if it were %v + } else if s.Flag('+') { + p.printDetail = true + sep = "\n - " + } + case 's': + case 'q', 'x', 'X': + // Use an intermediate buffer in the rare cases that precision, + // truncation, or one of the alternative verbs (q, x, and X) are + // specified. + direct = false + + default: + p.buf.WriteString("%!") + p.buf.WriteRune(verb) + p.buf.WriteByte('(') + switch { + case err != nil: + p.buf.WriteString(reflect.TypeOf(f).String()) + default: + p.buf.WriteString("") + } + p.buf.WriteByte(')') + io.Copy(s, &p.buf) + return + } + +loop: + for { + switch v := err.(type) { + case Formatter: + err = v.FormatError((*printer)(p)) + case fmt.Formatter: + v.Format(p, 'v') + break loop + default: + io.WriteString(&p.buf, v.Error()) + break loop + } + if err == nil { + break + } + if p.needColon || !p.printDetail { + p.buf.WriteByte(':') + p.needColon = false + } + p.buf.WriteString(sep) + p.inDetail = false + p.needNewline = false + } + +exit: + width, okW := s.Width() + prec, okP := s.Precision() + + if !direct || (okW && width > 0) || okP { + // Construct format string from State s. + format := []byte{'%'} + if s.Flag('-') { + format = append(format, '-') + } + if s.Flag('+') { + format = append(format, '+') + } + if s.Flag(' ') { + format = append(format, ' ') + } + if okW { + format = strconv.AppendInt(format, int64(width), 10) + } + if okP { + format = append(format, '.') + format = strconv.AppendInt(format, int64(prec), 10) + } + format = append(format, string(verb)...) + fmt.Fprintf(s, string(format), p.buf.String()) + } else { + io.Copy(s, &p.buf) + } +} + +var detailSep = []byte("\n ") + +// state tracks error printing state. It implements fmt.State. +type state struct { + fmt.State + buf bytes.Buffer + + printDetail bool + inDetail bool + needColon bool + needNewline bool +} + +func (s *state) Write(b []byte) (n int, err error) { + if s.printDetail { + if len(b) == 0 { + return 0, nil + } + if s.inDetail && s.needColon { + s.needNewline = true + if b[0] == '\n' { + b = b[1:] + } + } + k := 0 + for i, c := range b { + if s.needNewline { + if s.inDetail && s.needColon { + s.buf.WriteByte(':') + s.needColon = false + } + s.buf.Write(detailSep) + s.needNewline = false + } + if c == '\n' { + s.buf.Write(b[k:i]) + k = i + 1 + s.needNewline = true + } + } + s.buf.Write(b[k:]) + if !s.inDetail { + s.needColon = true + } + } else if !s.inDetail { + s.buf.Write(b) + } + return len(b), nil +} + +// printer wraps a state to implement an xerrors.Printer. +type printer state + +func (s *printer) Print(args ...interface{}) { + if !s.inDetail || s.printDetail { + fmt.Fprint((*state)(s), args...) + } +} + +func (s *printer) Printf(format string, args ...interface{}) { + if !s.inDetail || s.printDetail { + fmt.Fprintf((*state)(s), format, args...) + } +} + +func (s *printer) Detail() bool { + s.inDetail = true + return s.printDetail +} diff --git a/vendor/golang.org/x/xerrors/codereview.cfg b/vendor/golang.org/x/xerrors/codereview.cfg new file mode 100644 index 0000000000000..3f8b14b64e83f --- /dev/null +++ b/vendor/golang.org/x/xerrors/codereview.cfg @@ -0,0 +1 @@ +issuerepo: golang/go diff --git a/vendor/golang.org/x/xerrors/doc.go b/vendor/golang.org/x/xerrors/doc.go new file mode 100644 index 0000000000000..1ad48f50b5b35 --- /dev/null +++ b/vendor/golang.org/x/xerrors/doc.go @@ -0,0 +1,25 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xerrors implements functions to manipulate errors. +// +// This package supports transitioning to the Go 2 proposal for error values: +// https://golang.org/design/29934-error-values +// +// Most of the functions and types in this package will be incorporated into the +// standard library's errors package in Go 1.13; the behavior of this package's +// Errorf function will be incorporated into the standard library's fmt.Errorf. +// Use this package to get equivalent behavior in all supported Go versions. For +// example, create errors using +// +// xerrors.New("write failed") +// +// or +// +// xerrors.Errorf("while reading: %v", err) +// +// If you want your error type to participate in the new formatting +// implementation for %v and %+v, provide it with a Format method that calls +// xerrors.FormatError, as shown in the example for FormatError. +package xerrors // import "golang.org/x/xerrors" diff --git a/vendor/golang.org/x/xerrors/errors.go b/vendor/golang.org/x/xerrors/errors.go new file mode 100644 index 0000000000000..e88d3772d8611 --- /dev/null +++ b/vendor/golang.org/x/xerrors/errors.go @@ -0,0 +1,33 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xerrors + +import "fmt" + +// errorString is a trivial implementation of error. +type errorString struct { + s string + frame Frame +} + +// New returns an error that formats as the given text. +// +// The returned error contains a Frame set to the caller's location and +// implements Formatter to show this information when printed with details. +func New(text string) error { + return &errorString{text, Caller(1)} +} + +func (e *errorString) Error() string { + return e.s +} + +func (e *errorString) Format(s fmt.State, v rune) { FormatError(e, s, v) } + +func (e *errorString) FormatError(p Printer) (next error) { + p.Print(e.s) + e.frame.Format(p) + return nil +} diff --git a/vendor/golang.org/x/xerrors/fmt.go b/vendor/golang.org/x/xerrors/fmt.go new file mode 100644 index 0000000000000..74c1c93ec9c64 --- /dev/null +++ b/vendor/golang.org/x/xerrors/fmt.go @@ -0,0 +1,109 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xerrors + +import ( + "fmt" + "strings" + + "golang.org/x/xerrors/internal" +) + +// Errorf formats according to a format specifier and returns the string as a +// value that satisfies error. +// +// The returned error includes the file and line number of the caller when +// formatted with additional detail enabled. If the last argument is an error +// the returned error's Format method will return it if the format string ends +// with ": %s", ": %v", or ": %w". If the last argument is an error and the +// format string ends with ": %w", the returned error implements Wrapper +// with an Unwrap method returning it. +func Errorf(format string, a ...interface{}) error { + err, wrap := lastError(format, a) + format = formatPlusW(format) + if err == nil { + return &noWrapError{fmt.Sprintf(format, a...), nil, Caller(1)} + } + + // TODO: this is not entirely correct. The error value could be + // printed elsewhere in format if it mixes numbered with unnumbered + // substitutions. With relatively small changes to doPrintf we can + // have it optionally ignore extra arguments and pass the argument + // list in its entirety. + msg := fmt.Sprintf(format[:len(format)-len(": %s")], a[:len(a)-1]...) + frame := Frame{} + if internal.EnableTrace { + frame = Caller(1) + } + if wrap { + return &wrapError{msg, err, frame} + } + return &noWrapError{msg, err, frame} +} + +// formatPlusW is used to avoid the vet check that will barf at %w. +func formatPlusW(s string) string { + return s +} + +func lastError(format string, a []interface{}) (err error, wrap bool) { + wrap = strings.HasSuffix(format, ": %w") + if !wrap && + !strings.HasSuffix(format, ": %s") && + !strings.HasSuffix(format, ": %v") { + return nil, false + } + + if len(a) == 0 { + return nil, false + } + + err, ok := a[len(a)-1].(error) + if !ok { + return nil, false + } + + return err, wrap +} + +type noWrapError struct { + msg string + err error + frame Frame +} + +func (e *noWrapError) Error() string { + return fmt.Sprint(e) +} + +func (e *noWrapError) Format(s fmt.State, v rune) { FormatError(e, s, v) } + +func (e *noWrapError) FormatError(p Printer) (next error) { + p.Print(e.msg) + e.frame.Format(p) + return e.err +} + +type wrapError struct { + msg string + err error + frame Frame +} + +func (e *wrapError) Error() string { + return fmt.Sprint(e) +} + +func (e *wrapError) Format(s fmt.State, v rune) { FormatError(e, s, v) } + +func (e *wrapError) FormatError(p Printer) (next error) { + p.Print(e.msg) + e.frame.Format(p) + return e.err +} + +func (e *wrapError) Unwrap() error { + return e.err +} diff --git a/vendor/golang.org/x/xerrors/format.go b/vendor/golang.org/x/xerrors/format.go new file mode 100644 index 0000000000000..1bc9c26b97fdf --- /dev/null +++ b/vendor/golang.org/x/xerrors/format.go @@ -0,0 +1,34 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xerrors + +// A Formatter formats error messages. +type Formatter interface { + error + + // FormatError prints the receiver's first error and returns the next error in + // the error chain, if any. + FormatError(p Printer) (next error) +} + +// A Printer formats error messages. +// +// The most common implementation of Printer is the one provided by package fmt +// during Printf (as of Go 1.13). Localization packages such as golang.org/x/text/message +// typically provide their own implementations. +type Printer interface { + // Print appends args to the message output. + Print(args ...interface{}) + + // Printf writes a formatted string. + Printf(format string, args ...interface{}) + + // Detail reports whether error detail is requested. + // After the first call to Detail, all text written to the Printer + // is formatted as additional detail, or ignored when + // detail has not been requested. + // If Detail returns false, the caller can avoid printing the detail at all. + Detail() bool +} diff --git a/vendor/golang.org/x/xerrors/frame.go b/vendor/golang.org/x/xerrors/frame.go new file mode 100644 index 0000000000000..0de628ec501f6 --- /dev/null +++ b/vendor/golang.org/x/xerrors/frame.go @@ -0,0 +1,56 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xerrors + +import ( + "runtime" +) + +// A Frame contains part of a call stack. +type Frame struct { + // Make room for three PCs: the one we were asked for, what it called, + // and possibly a PC for skipPleaseUseCallersFrames. See: + // https://go.googlesource.com/go/+/032678e0fb/src/runtime/extern.go#169 + frames [3]uintptr +} + +// Caller returns a Frame that describes a frame on the caller's stack. +// The argument skip is the number of frames to skip over. +// Caller(0) returns the frame for the caller of Caller. +func Caller(skip int) Frame { + var s Frame + runtime.Callers(skip+1, s.frames[:]) + return s +} + +// location reports the file, line, and function of a frame. +// +// The returned function may be "" even if file and line are not. +func (f Frame) location() (function, file string, line int) { + frames := runtime.CallersFrames(f.frames[:]) + if _, ok := frames.Next(); !ok { + return "", "", 0 + } + fr, ok := frames.Next() + if !ok { + return "", "", 0 + } + return fr.Function, fr.File, fr.Line +} + +// Format prints the stack as error detail. +// It should be called from an error's Format implementation +// after printing any other error detail. +func (f Frame) Format(p Printer) { + if p.Detail() { + function, file, line := f.location() + if function != "" { + p.Printf("%s\n ", function) + } + if file != "" { + p.Printf("%s:%d\n", file, line) + } + } +} diff --git a/vendor/golang.org/x/xerrors/go.mod b/vendor/golang.org/x/xerrors/go.mod new file mode 100644 index 0000000000000..870d4f612dbf6 --- /dev/null +++ b/vendor/golang.org/x/xerrors/go.mod @@ -0,0 +1,3 @@ +module golang.org/x/xerrors + +go 1.11 diff --git a/vendor/golang.org/x/xerrors/internal/BUILD.bazel b/vendor/golang.org/x/xerrors/internal/BUILD.bazel new file mode 100644 index 0000000000000..2f319b74ab52a --- /dev/null +++ b/vendor/golang.org/x/xerrors/internal/BUILD.bazel @@ -0,0 +1,9 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["internal.go"], + importmap = "k8s.io/kops/vendor/golang.org/x/xerrors/internal", + importpath = "golang.org/x/xerrors/internal", + visibility = ["//vendor/golang.org/x/xerrors:__subpackages__"], +) diff --git a/vendor/golang.org/x/xerrors/internal/internal.go b/vendor/golang.org/x/xerrors/internal/internal.go new file mode 100644 index 0000000000000..89f4eca5df7bc --- /dev/null +++ b/vendor/golang.org/x/xerrors/internal/internal.go @@ -0,0 +1,8 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +// EnableTrace indicates whether stack information should be recorded in errors. +var EnableTrace = true diff --git a/vendor/golang.org/x/xerrors/wrap.go b/vendor/golang.org/x/xerrors/wrap.go new file mode 100644 index 0000000000000..9a3b510374ec8 --- /dev/null +++ b/vendor/golang.org/x/xerrors/wrap.go @@ -0,0 +1,106 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xerrors + +import ( + "reflect" +) + +// A Wrapper provides context around another error. +type Wrapper interface { + // Unwrap returns the next error in the error chain. + // If there is no next error, Unwrap returns nil. + Unwrap() error +} + +// Opaque returns an error with the same error formatting as err +// but that does not match err and cannot be unwrapped. +func Opaque(err error) error { + return noWrapper{err} +} + +type noWrapper struct { + error +} + +func (e noWrapper) FormatError(p Printer) (next error) { + if f, ok := e.error.(Formatter); ok { + return f.FormatError(p) + } + p.Print(e.error) + return nil +} + +// Unwrap returns the result of calling the Unwrap method on err, if err implements +// Unwrap. Otherwise, Unwrap returns nil. +func Unwrap(err error) error { + u, ok := err.(Wrapper) + if !ok { + return nil + } + return u.Unwrap() +} + +// Is reports whether any error in err's chain matches target. +// +// An error is considered to match a target if it is equal to that target or if +// it implements a method Is(error) bool such that Is(target) returns true. +func Is(err, target error) bool { + if target == nil { + return err == target + } + + isComparable := reflect.TypeOf(target).Comparable() + for { + if isComparable && err == target { + return true + } + if x, ok := err.(interface{ Is(error) bool }); ok && x.Is(target) { + return true + } + // TODO: consider supporing target.Is(err). This would allow + // user-definable predicates, but also may allow for coping with sloppy + // APIs, thereby making it easier to get away with them. + if err = Unwrap(err); err == nil { + return false + } + } +} + +// As finds the first error in err's chain that matches the type to which target +// points, and if so, sets the target to its value and returns true. An error +// matches a type if it is assignable to the target type, or if it has a method +// As(interface{}) bool such that As(target) returns true. As will panic if target +// is not a non-nil pointer to a type which implements error or is of interface type. +// +// The As method should set the target to its value and return true if err +// matches the type to which target points. +func As(err error, target interface{}) bool { + if target == nil { + panic("errors: target cannot be nil") + } + val := reflect.ValueOf(target) + typ := val.Type() + if typ.Kind() != reflect.Ptr || val.IsNil() { + panic("errors: target must be a non-nil pointer") + } + if e := typ.Elem(); e.Kind() != reflect.Interface && !e.Implements(errorType) { + panic("errors: *target must be interface or implement error") + } + targetType := typ.Elem() + for err != nil { + if reflect.TypeOf(err).AssignableTo(targetType) { + val.Elem().Set(reflect.ValueOf(err)) + return true + } + if x, ok := err.(interface{ As(interface{}) bool }); ok && x.As(target) { + return true + } + err = Unwrap(err) + } + return false +} + +var errorType = reflect.TypeOf((*error)(nil)).Elem() diff --git a/vendor/google.golang.org/api/compute/v0.alpha/BUILD.bazel b/vendor/google.golang.org/api/compute/v0.alpha/BUILD.bazel index 7be53e60515fe..cca76f7e7470c 100644 --- a/vendor/google.golang.org/api/compute/v0.alpha/BUILD.bazel +++ b/vendor/google.golang.org/api/compute/v0.alpha/BUILD.bazel @@ -9,5 +9,7 @@ go_library( deps = [ "//vendor/google.golang.org/api/gensupport:go_default_library", "//vendor/google.golang.org/api/googleapi:go_default_library", + "//vendor/google.golang.org/api/option:go_default_library", + "//vendor/google.golang.org/api/transport/http:go_default_library", ], ) diff --git a/vendor/google.golang.org/api/compute/v0.alpha/compute-api.json b/vendor/google.golang.org/api/compute/v0.alpha/compute-api.json index 2b613a87c0356..f2867149af157 100644 --- a/vendor/google.golang.org/api/compute/v0.alpha/compute-api.json +++ b/vendor/google.golang.org/api/compute/v0.alpha/compute-api.json @@ -29,7 +29,7 @@ "description": "Creates and runs virtual machines on Google Cloud Platform.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/compute/docs/reference/latest/", - "etag": "\"J3WqvAcMk4eQjJXvfSI4Yr8VouA/Pblb0-q42Qtp25GnNdbN5oVoa5U\"", + "etag": "\"VPK3KBfpaEgZ16pozGOoMYfKc0U/CGOuQXfe0oClVxpR-J11Xr1lOIU\"", "icons": { "x16": "https://www.google.com/images/icons/product/compute_engine-16.png", "x32": "https://www.google.com/images/icons/product/compute_engine-32.png" @@ -150,7 +150,7 @@ "acceleratorType": { "description": "Name of the accelerator type to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -302,7 +302,7 @@ "address": { "description": "Name of the address resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -348,7 +348,7 @@ "address": { "description": "Name of the address resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -571,12 +571,12 @@ } } }, - "allocations": { + "autoscalers": { "methods": { "aggregatedList": { - "description": "Retrieves an aggregated list of allocations.", + "description": "Retrieves an aggregated list of autoscalers.", "httpMethod": "GET", - "id": "compute.allocations.aggregatedList", + "id": "compute.autoscalers.aggregatedList", "parameterOrder": [ "project" ], @@ -612,9 +612,9 @@ "type": "string" } }, - "path": "{project}/aggregated/allocations", + "path": "{project}/aggregated/autoscalers", "response": { - "$ref": "AllocationAggregatedList" + "$ref": "AutoscalerAggregatedList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -623,19 +623,19 @@ ] }, "delete": { - "description": "Deletes the specified allocation.", + "description": "Deletes the specified autoscaler.", "httpMethod": "DELETE", - "id": "compute.allocations.delete", + "id": "compute.autoscalers.delete", "parameterOrder": [ "project", "zone", - "allocation" + "autoscaler" ], "parameters": { - "allocation": { - "description": "Name of the allocation to delete.", + "autoscaler": { + "description": "Name of the autoscaler to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -659,7 +659,7 @@ "type": "string" } }, - "path": "{project}/zones/{zone}/allocations/{allocation}", + "path": "{project}/zones/{zone}/autoscalers/{autoscaler}", "response": { "$ref": "Operation" }, @@ -669,19 +669,19 @@ ] }, "get": { - "description": "Retrieves all information of the specified allocation.", + "description": "Returns the specified autoscaler resource. Gets a list of available autoscalers by making a list() request.", "httpMethod": "GET", - "id": "compute.allocations.get", + "id": "compute.autoscalers.get", "parameterOrder": [ "project", "zone", - "allocation" + "autoscaler" ], "parameters": { - "allocation": { - "description": "Name of the allocation to retrieve.", + "autoscaler": { + "description": "Name of the autoscaler to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -700,51 +700,9 @@ "type": "string" } }, - "path": "{project}/zones/{zone}/allocations/{allocation}", - "response": { - "$ref": "Allocation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "getIamPolicy": { - "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", - "httpMethod": "GET", - "id": "compute.allocations.getIamPolicy", - "parameterOrder": [ - "project", - "zone", - "resource" - ], - "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "resource": { - "description": "Name or id of the resource for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - }, - "zone": { - "description": "The name of the zone for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - } - }, - "path": "{project}/zones/{zone}/allocations/{resource}/getIamPolicy", + "path": "{project}/zones/{zone}/autoscalers/{autoscaler}", "response": { - "$ref": "Policy" + "$ref": "Autoscaler" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -753,9 +711,9 @@ ] }, "insert": { - "description": "Creates a new allocation.", + "description": "Creates an autoscaler in the specified project using the data included in the request.", "httpMethod": "POST", - "id": "compute.allocations.insert", + "id": "compute.autoscalers.insert", "parameterOrder": [ "project", "zone" @@ -781,9 +739,9 @@ "type": "string" } }, - "path": "{project}/zones/{zone}/allocations", + "path": "{project}/zones/{zone}/autoscalers", "request": { - "$ref": "Allocation" + "$ref": "Autoscaler" }, "response": { "$ref": "Operation" @@ -794,9 +752,9 @@ ] }, "list": { - "description": "A list all the allocations that have been configured for the specified project in specified zone.", + "description": "Retrieves a list of autoscalers contained within the specified zone.", "httpMethod": "GET", - "id": "compute.allocations.list", + "id": "compute.autoscalers.list", "parameterOrder": [ "project", "zone" @@ -840,9 +798,9 @@ "type": "string" } }, - "path": "{project}/zones/{zone}/allocations", + "path": "{project}/zones/{zone}/autoscalers", "response": { - "$ref": "AllocationList" + "$ref": "AutoscalerList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -850,16 +808,21 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", - "httpMethod": "POST", - "id": "compute.allocations.setIamPolicy", + "patch": { + "description": "Updates an autoscaler in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "httpMethod": "PATCH", + "id": "compute.autoscalers.patch", "parameterOrder": [ "project", - "zone", - "resource" + "zone" ], "parameters": { + "autoscaler": { + "description": "Name of the autoscaler to patch.", + "location": "query", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -867,27 +830,25 @@ "required": true, "type": "string" }, - "resource": { - "description": "Name or id of the resource for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", "type": "string" }, "zone": { - "description": "The name of the zone for this request.", + "description": "Name of the zone for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "{project}/zones/{zone}/allocations/{resource}/setIamPolicy", + "path": "{project}/zones/{zone}/autoscalers", "request": { - "$ref": "ZoneSetPolicyRequest" + "$ref": "Autoscaler" }, "response": { - "$ref": "Policy" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -897,7 +858,7 @@ "testIamPermissions": { "description": "Returns permissions that a caller has on the specified resource.", "httpMethod": "POST", - "id": "compute.allocations.testIamPermissions", + "id": "compute.autoscalers.testIamPermissions", "parameterOrder": [ "project", "zone", @@ -926,7 +887,7 @@ "type": "string" } }, - "path": "{project}/zones/{zone}/allocations/{resource}/testIamPermissions", + "path": "{project}/zones/{zone}/autoscalers/{resource}/testIamPermissions", "request": { "$ref": "TestPermissionsRequest" }, @@ -939,399 +900,19 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "updateResourceShape": { - "description": "Updates the shape of an allocation.", - "httpMethod": "POST", - "id": "compute.allocations.updateResourceShape", - "parameterOrder": [ - "project", - "zone", - "allocation" - ], - "parameters": { - "allocation": { - "description": "Name of the allocation to update.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "zone": { - "description": "Name of the zone for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - } - }, - "path": "{project}/zones/{zone}/allocations/{allocation}/updateResourceShape", - "request": { - "$ref": "AllocationsUpdateResourceShapeRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - } - } - }, - "autoscalers": { - "methods": { - "aggregatedList": { - "description": "Retrieves an aggregated list of autoscalers.", - "httpMethod": "GET", - "id": "compute.autoscalers.aggregatedList", - "parameterOrder": [ - "project" - ], - "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - "location": "query", - "type": "string" - }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query", - "type": "string" - }, - "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query", - "type": "string" - }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - } - }, - "path": "{project}/aggregated/autoscalers", - "response": { - "$ref": "AutoscalerAggregatedList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "delete": { - "description": "Deletes the specified autoscaler.", - "httpMethod": "DELETE", - "id": "compute.autoscalers.delete", + "update": { + "description": "Updates an autoscaler in the specified project using the data included in the request.", + "httpMethod": "PUT", + "id": "compute.autoscalers.update", "parameterOrder": [ "project", - "zone", - "autoscaler" + "zone" ], "parameters": { "autoscaler": { - "description": "Name of the autoscaler to delete.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "description": "Name of the autoscaler to update.", "location": "query", - "type": "string" - }, - "zone": { - "description": "Name of the zone for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - } - }, - "path": "{project}/zones/{zone}/autoscalers/{autoscaler}", - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "description": "Returns the specified autoscaler resource. Gets a list of available autoscalers by making a list() request.", - "httpMethod": "GET", - "id": "compute.autoscalers.get", - "parameterOrder": [ - "project", - "zone", - "autoscaler" - ], - "parameters": { - "autoscaler": { - "description": "Name of the autoscaler to return.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "zone": { - "description": "Name of the zone for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - } - }, - "path": "{project}/zones/{zone}/autoscalers/{autoscaler}", - "response": { - "$ref": "Autoscaler" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "description": "Creates an autoscaler in the specified project using the data included in the request.", - "httpMethod": "POST", - "id": "compute.autoscalers.insert", - "parameterOrder": [ - "project", - "zone" - ], - "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "zone": { - "description": "Name of the zone for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - } - }, - "path": "{project}/zones/{zone}/autoscalers", - "request": { - "$ref": "Autoscaler" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "list": { - "description": "Retrieves a list of autoscalers contained within the specified zone.", - "httpMethod": "GET", - "id": "compute.autoscalers.list", - "parameterOrder": [ - "project", - "zone" - ], - "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - "location": "query", - "type": "string" - }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query", - "type": "string" - }, - "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query", - "type": "string" - }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "zone": { - "description": "Name of the zone for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - } - }, - "path": "{project}/zones/{zone}/autoscalers", - "response": { - "$ref": "AutoscalerList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "patch": { - "description": "Updates an autoscaler in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - "httpMethod": "PATCH", - "id": "compute.autoscalers.patch", - "parameterOrder": [ - "project", - "zone" - ], - "parameters": { - "autoscaler": { - "description": "Name of the autoscaler to patch.", - "location": "query", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "type": "string" - }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "zone": { - "description": "Name of the zone for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - } - }, - "path": "{project}/zones/{zone}/autoscalers", - "request": { - "$ref": "Autoscaler" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.", - "httpMethod": "POST", - "id": "compute.autoscalers.testIamPermissions", - "parameterOrder": [ - "project", - "zone", - "resource" - ], - "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "resource": { - "description": "Name or id of the resource for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - }, - "zone": { - "description": "The name of the zone for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - } - }, - "path": "{project}/zones/{zone}/autoscalers/{resource}/testIamPermissions", - "request": { - "$ref": "TestPermissionsRequest" - }, - "response": { - "$ref": "TestPermissionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "update": { - "description": "Updates an autoscaler in the specified project using the data included in the request.", - "httpMethod": "PUT", - "id": "compute.autoscalers.update", - "parameterOrder": [ - "project", - "zone" - ], - "parameters": { - "autoscaler": { - "description": "Name of the autoscaler to update.", - "location": "query", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "type": "string" }, "project": { @@ -1422,7 +1003,7 @@ "backendBucket": { "description": "Name of the BackendBucket resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -1504,7 +1085,7 @@ "backendBucket": { "description": "Name of the BackendBucket resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -1654,7 +1235,7 @@ "backendBucket": { "description": "Name of the BackendBucket resource to patch.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -1768,7 +1349,7 @@ "backendBucket": { "description": "Name of the BackendBucket resource to update.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -1902,7 +1483,7 @@ "backendService": { "description": "Name of the BackendService resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -1984,7 +1565,7 @@ "backendService": { "description": "Name of the BackendService resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -2018,7 +1599,7 @@ "backendService": { "description": "Name of the BackendService resource to which the queried instance belongs.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -2254,7 +1835,7 @@ "backendService": { "description": "Name of the BackendService resource to update.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -2349,7 +1930,7 @@ "diskType": { "description": "Name of the disk type to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -2555,6 +2136,7 @@ "type": "string" }, "guestFlush": { + "description": "[Input Only] Specifies to create an application consistent snapshot by informing the OS to prepare for the snapshot process. Currently only supported on Windows instances using the Volume Shadow Copy Service (VSS).", "location": "query", "type": "boolean" }, @@ -3060,21 +2642,21 @@ } } }, - "firewalls": { + "externalVpnGateways": { "methods": { "delete": { - "description": "Deletes the specified firewall.", + "description": "Deletes the specified externalVpnGateway.", "httpMethod": "DELETE", - "id": "compute.firewalls.delete", + "id": "compute.externalVpnGateways.delete", "parameterOrder": [ "project", - "firewall" + "externalVpnGateway" ], "parameters": { - "firewall": { - "description": "Name of the firewall rule to delete.", + "externalVpnGateway": { + "description": "Name of the externalVpnGateways to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, @@ -3091,7 +2673,7 @@ "type": "string" } }, - "path": "{project}/global/firewalls/{firewall}", + "path": "{project}/global/externalVpnGateways/{externalVpnGateway}", "response": { "$ref": "Operation" }, @@ -3101,18 +2683,18 @@ ] }, "get": { - "description": "Returns the specified firewall.", + "description": "Returns the specified externalVpnGateway. Get a list of available externalVpnGateways by making a list() request.", "httpMethod": "GET", - "id": "compute.firewalls.get", + "id": "compute.externalVpnGateways.get", "parameterOrder": [ "project", - "firewall" + "externalVpnGateway" ], "parameters": { - "firewall": { - "description": "Name of the firewall rule to return.", + "externalVpnGateway": { + "description": "Name of the externalVpnGateway to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, @@ -3124,9 +2706,9 @@ "type": "string" } }, - "path": "{project}/global/firewalls/{firewall}", + "path": "{project}/global/externalVpnGateways/{externalVpnGateway}", "response": { - "$ref": "Firewall" + "$ref": "ExternalVpnGateway" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -3135,9 +2717,9 @@ ] }, "insert": { - "description": "Creates a firewall rule in the specified project using the data included in the request.", + "description": "Creates a ExternalVpnGateway in the specified project using the data included in the request.", "httpMethod": "POST", - "id": "compute.firewalls.insert", + "id": "compute.externalVpnGateways.insert", "parameterOrder": [ "project" ], @@ -3155,9 +2737,9 @@ "type": "string" } }, - "path": "{project}/global/firewalls", + "path": "{project}/global/externalVpnGateways", "request": { - "$ref": "Firewall" + "$ref": "ExternalVpnGateway" }, "response": { "$ref": "Operation" @@ -3168,9 +2750,9 @@ ] }, "list": { - "description": "Retrieves the list of firewall rules available to the specified project.", + "description": "Retrieves the list of ExternalVpnGateway available to the specified project.", "httpMethod": "GET", - "id": "compute.firewalls.list", + "id": "compute.externalVpnGateways.list", "parameterOrder": [ "project" ], @@ -3206,9 +2788,9 @@ "type": "string" } }, - "path": "{project}/global/firewalls", + "path": "{project}/global/externalVpnGateways", "response": { - "$ref": "FirewallList" + "$ref": "ExternalVpnGatewayList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -3216,22 +2798,15 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "patch": { - "description": "Updates the specified firewall rule with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - "httpMethod": "PATCH", - "id": "compute.firewalls.patch", + "setLabels": { + "description": "Sets the labels on an ExternalVpnGateway. To learn more about labels, read the Labeling Resources documentation.", + "httpMethod": "POST", + "id": "compute.externalVpnGateways.setLabels", "parameterOrder": [ "project", - "firewall" + "resource" ], "parameters": { - "firewall": { - "description": "Name of the firewall rule to patch.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -3239,15 +2814,17 @@ "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query", + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, "type": "string" } }, - "path": "{project}/global/firewalls/{firewall}", + "path": "{project}/global/externalVpnGateways/{resource}/setLabels", "request": { - "$ref": "Firewall" + "$ref": "GlobalSetLabelsRequest" }, "response": { "$ref": "Operation" @@ -3260,7 +2837,7 @@ "testIamPermissions": { "description": "Returns permissions that a caller has on the specified resource.", "httpMethod": "POST", - "id": "compute.firewalls.testIamPermissions", + "id": "compute.externalVpnGateways.testIamPermissions", "parameterOrder": [ "project", "resource" @@ -3281,7 +2858,7 @@ "type": "string" } }, - "path": "{project}/global/firewalls/{resource}/testIamPermissions", + "path": "{project}/global/externalVpnGateways/{resource}/testIamPermissions", "request": { "$ref": "TestPermissionsRequest" }, @@ -3293,18 +2870,22 @@ "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] - }, - "update": { - "description": "Updates the specified firewall rule with the data included in the request. The PUT method can only update the following fields of firewall rule: allowed, description, sourceRanges, sourceTags, targetTags.", - "httpMethod": "PUT", - "id": "compute.firewalls.update", + } + } + }, + "firewalls": { + "methods": { + "delete": { + "description": "Deletes the specified firewall.", + "httpMethod": "DELETE", + "id": "compute.firewalls.delete", "parameterOrder": [ "project", "firewall" ], "parameters": { "firewall": { - "description": "Name of the firewall rule to update.", + "description": "Name of the firewall rule to delete.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -3324,6 +2905,70 @@ } }, "path": "{project}/global/firewalls/{firewall}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Returns the specified firewall.", + "httpMethod": "GET", + "id": "compute.firewalls.get", + "parameterOrder": [ + "project", + "firewall" + ], + "parameters": { + "firewall": { + "description": "Name of the firewall rule to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + } + }, + "path": "{project}/global/firewalls/{firewall}", + "response": { + "$ref": "Firewall" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a firewall rule in the specified project using the data included in the request.", + "httpMethod": "POST", + "id": "compute.firewalls.insert", + "parameterOrder": [ + "project" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/global/firewalls", "request": { "$ref": "Firewall" }, @@ -3334,15 +2979,183 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] - } - } - }, - "forwardingRules": { - "methods": { - "aggregatedList": { - "description": "Retrieves an aggregated list of forwarding rules.", + }, + "list": { + "description": "Retrieves the list of firewall rules available to the specified project.", "httpMethod": "GET", - "id": "compute.forwardingRules.aggregatedList", + "id": "compute.firewalls.list", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + } + }, + "path": "{project}/global/firewalls", + "response": { + "$ref": "FirewallList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "patch": { + "description": "Updates the specified firewall rule with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "httpMethod": "PATCH", + "id": "compute.firewalls.patch", + "parameterOrder": [ + "project", + "firewall" + ], + "parameters": { + "firewall": { + "description": "Name of the firewall rule to patch.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/global/firewalls/{firewall}", + "request": { + "$ref": "Firewall" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", + "httpMethod": "POST", + "id": "compute.firewalls.testIamPermissions", + "parameterOrder": [ + "project", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/global/firewalls/{resource}/testIamPermissions", + "request": { + "$ref": "TestPermissionsRequest" + }, + "response": { + "$ref": "TestPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "update": { + "description": "Updates the specified firewall rule with the data included in the request. The PUT method can only update the following fields of firewall rule: allowed, description, sourceRanges, sourceTags, targetTags.", + "httpMethod": "PUT", + "id": "compute.firewalls.update", + "parameterOrder": [ + "project", + "firewall" + ], + "parameters": { + "firewall": { + "description": "Name of the firewall rule to update.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/global/firewalls/{firewall}", + "request": { + "$ref": "Firewall" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "forwardingRules": { + "methods": { + "aggregatedList": { + "description": "Retrieves an aggregated list of forwarding rules.", + "httpMethod": "GET", + "id": "compute.forwardingRules.aggregatedList", "parameterOrder": [ "project" ], @@ -3401,7 +3214,7 @@ "forwardingRule": { "description": "Name of the ForwardingRule resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -3447,7 +3260,7 @@ "forwardingRule": { "description": "Name of the ForwardingRule resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -3587,7 +3400,7 @@ "forwardingRule": { "description": "Name of the ForwardingRule resource to patch.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -3685,7 +3498,7 @@ "forwardingRule": { "description": "Name of the ForwardingRule resource in which target is to be set.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -3782,7 +3595,7 @@ "address": { "description": "Name of the address resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -3820,7 +3633,7 @@ "address": { "description": "Name of the address resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -4013,7 +3826,7 @@ "forwardingRule": { "description": "Name of the ForwardingRule resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -4051,7 +3864,7 @@ "forwardingRule": { "description": "Name of the ForwardingRule resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -4167,7 +3980,7 @@ "forwardingRule": { "description": "Name of the ForwardingRule resource to patch.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -4244,7 +4057,7 @@ "forwardingRule": { "description": "Name of the ForwardingRule resource in which target is to be set.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -4312,38 +4125,62 @@ } } }, - "globalOperations": { + "globalNetworkEndpointGroups": { "methods": { - "aggregatedList": { - "description": "Retrieves an aggregated list of all operations.", - "httpMethod": "GET", - "id": "compute.globalOperations.aggregatedList", + "attachNetworkEndpoints": { + "description": "Attach a network endpoint to the specified network endpoint group.", + "httpMethod": "POST", + "id": "compute.globalNetworkEndpointGroups.attachNetworkEndpoints", "parameterOrder": [ - "project" + "project", + "networkEndpointGroup" ], "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - "location": "query", + "networkEndpointGroup": { + "description": "The name of the network endpoint group where you are attaching network endpoints to. It should comply with RFC1035.", + "location": "path", + "required": true, "type": "string" }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query", + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, "type": "string" }, - "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" + } + }, + "path": "{project}/global/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints", + "request": { + "$ref": "GlobalNetworkEndpointGroupsAttachEndpointsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "delete": { + "description": "Deletes the specified network endpoint group.Note that the NEG cannot be deleted if there are backend services referencing it.", + "httpMethod": "DELETE", + "id": "compute.globalNetworkEndpointGroups.delete", + "parameterOrder": [ + "project", + "networkEndpointGroup" + ], + "parameters": { + "networkEndpointGroup": { + "description": "The name of the network endpoint group to delete. It should comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" }, "project": { "description": "Project ID for this request.", @@ -4351,31 +4188,34 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" } }, - "path": "{project}/aggregated/operations", + "path": "{project}/global/networkEndpointGroups/{networkEndpointGroup}", "response": { - "$ref": "OperationAggregatedList" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "delete": { - "description": "Deletes the specified Operations resource.", - "httpMethod": "DELETE", - "id": "compute.globalOperations.delete", + "detachNetworkEndpoints": { + "description": "Detach the network endpoint from the specified network endpoint group.", + "httpMethod": "POST", + "id": "compute.globalNetworkEndpointGroups.detachNetworkEndpoints", "parameterOrder": [ "project", - "operation" + "networkEndpointGroup" ], "parameters": { - "operation": { - "description": "Name of the Operations resource to delete.", + "networkEndpointGroup": { + "description": "The name of the network endpoint group where you are removing network endpoints. It should comply with RFC1035.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, @@ -4385,27 +4225,37 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" } }, - "path": "{project}/global/operations/{operation}", + "path": "{project}/global/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints", + "request": { + "$ref": "GlobalNetworkEndpointGroupsDetachEndpointsRequest" + }, + "response": { + "$ref": "Operation" + }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] }, "get": { - "description": "Retrieves the specified Operations resource. Gets a list of operations by making a list() request.", + "description": "Returns the specified network endpoint group. Gets a list of available network endpoint groups by making a list() request.", "httpMethod": "GET", - "id": "compute.globalOperations.get", + "id": "compute.globalNetworkEndpointGroups.get", "parameterOrder": [ "project", - "operation" + "networkEndpointGroup" ], "parameters": { - "operation": { - "description": "Name of the Operations resource to return.", + "networkEndpointGroup": { + "description": "The name of the network endpoint group. It should comply with RFC1035.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, @@ -4417,9 +4267,9 @@ "type": "string" } }, - "path": "{project}/global/operations/{operation}", + "path": "{project}/global/networkEndpointGroups/{networkEndpointGroup}", "response": { - "$ref": "Operation" + "$ref": "NetworkEndpointGroup" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -4427,10 +4277,265 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "insert": { + "description": "Creates a network endpoint group in the specified project using the parameters that are included in the request.", + "httpMethod": "POST", + "id": "compute.globalNetworkEndpointGroups.insert", + "parameterOrder": [ + "project" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/global/networkEndpointGroups", + "request": { + "$ref": "NetworkEndpointGroup" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "list": { - "description": "Retrieves a list of Operation resources contained within the specified project.", + "description": "Retrieves the list of network endpoint groups that are located in the specified project.", "httpMethod": "GET", - "id": "compute.globalOperations.list", + "id": "compute.globalNetworkEndpointGroups.list", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + } + }, + "path": "{project}/global/networkEndpointGroups", + "response": { + "$ref": "NetworkEndpointGroupList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "listNetworkEndpoints": { + "description": "Lists the network endpoints in the specified network endpoint group.", + "httpMethod": "POST", + "id": "compute.globalNetworkEndpointGroups.listNetworkEndpoints", + "parameterOrder": [ + "project", + "networkEndpointGroup" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "networkEndpointGroup": { + "description": "The name of the network endpoint group from which you want to generate a list of included network endpoints. It should comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + } + }, + "path": "{project}/global/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints", + "response": { + "$ref": "NetworkEndpointGroupsListNetworkEndpoints" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "globalOperations": { + "methods": { + "aggregatedList": { + "description": "Retrieves an aggregated list of all operations.", + "httpMethod": "GET", + "id": "compute.globalOperations.aggregatedList", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + } + }, + "path": "{project}/aggregated/operations", + "response": { + "$ref": "OperationAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "description": "Deletes the specified Operations resource.", + "httpMethod": "DELETE", + "id": "compute.globalOperations.delete", + "parameterOrder": [ + "project", + "operation" + ], + "parameters": { + "operation": { + "description": "Name of the Operations resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + } + }, + "path": "{project}/global/operations/{operation}", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Retrieves the specified Operations resource. Gets a list of operations by making a list() request.", + "httpMethod": "GET", + "id": "compute.globalOperations.get", + "parameterOrder": [ + "project", + "operation" + ], + "parameters": { + "operation": { + "description": "Name of the Operations resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + } + }, + "path": "{project}/global/operations/{operation}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "list": { + "description": "Retrieves a list of Operation resources contained within the specified project.", + "httpMethod": "GET", + "id": "compute.globalOperations.list", "parameterOrder": [ "project" ], @@ -4488,7 +4593,7 @@ "operation": { "description": "Name of the Operations resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -4525,7 +4630,7 @@ "operation": { "description": "Name of the Operations resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -4552,7 +4657,7 @@ "operation": { "description": "Name of the Operations resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -4627,7 +4732,7 @@ "operation": { "description": "Name of the Operations resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -4712,7 +4817,7 @@ "healthCheck": { "description": "Name of the HealthCheck resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -4750,7 +4855,7 @@ "healthCheck": { "description": "Name of the HealthCheck resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -4866,7 +4971,7 @@ "healthCheck": { "description": "Name of the HealthCheck resource to patch.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -4944,7 +5049,7 @@ "healthCheck": { "description": "Name of the HealthCheck resource to update.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -4989,7 +5094,7 @@ "httpHealthCheck": { "description": "Name of the HttpHealthCheck resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -5027,7 +5132,7 @@ "httpHealthCheck": { "description": "Name of the HttpHealthCheck resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -5143,7 +5248,7 @@ "httpHealthCheck": { "description": "Name of the HttpHealthCheck resource to patch.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -5221,7 +5326,7 @@ "httpHealthCheck": { "description": "Name of the HttpHealthCheck resource to update.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -5266,7 +5371,7 @@ "httpsHealthCheck": { "description": "Name of the HttpsHealthCheck resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -5304,7 +5409,7 @@ "httpsHealthCheck": { "description": "Name of the HttpsHealthCheck resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -5420,7 +5525,7 @@ "httpsHealthCheck": { "description": "Name of the HttpsHealthCheck resource to patch.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -5498,7 +5603,7 @@ "httpsHealthCheck": { "description": "Name of the HttpsHealthCheck resource to update.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -6542,10 +6647,10 @@ "https://www.googleapis.com/auth/compute" ] }, - "recreateInstances": { - "description": "Flags the specified instances in the managed instance group to be immediately recreated. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the flag is set even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + "patchPerInstanceConfigs": { + "description": "Insert or patch (for the ones that already exist) per-instance configs for the managed instance group. perInstanceConfig.instance serves as a key used to distinguish whether to perform insert or patch.", "httpMethod": "POST", - "id": "compute.instanceGroupManagers.recreateInstances", + "id": "compute.instanceGroupManagers.patchPerInstanceConfigs", "parameterOrder": [ "project", "zone", @@ -6553,7 +6658,7 @@ ], "parameters": { "instanceGroupManager": { - "description": "The name of the managed instance group.", + "description": "The name of the managed instance group. It should conform to RFC1035.", "location": "path", "required": true, "type": "string" @@ -6571,15 +6676,15 @@ "type": "string" }, "zone": { - "description": "The name of the zone where the managed instance group is located.", + "description": "The name of the zone where the managed instance group is located. It should conform to RFC1035.", "location": "path", "required": true, "type": "string" } }, - "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", + "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/patchPerInstanceConfigs", "request": { - "$ref": "InstanceGroupManagersRecreateInstancesRequest" + "$ref": "InstanceGroupManagersPatchPerInstanceConfigsReq" }, "response": { "$ref": "Operation" @@ -6589,15 +6694,14 @@ "https://www.googleapis.com/auth/compute" ] }, - "resize": { - "description": "Resizes the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.\n\nWhen resizing down, the instance group arbitrarily chooses the order in which VMs are deleted. The group takes into account some VM attributes when making the selection including:\n\n+ The status of the VM instance. + The health of the VM instance. + The instance template version the VM is based on. + For regional managed instance groups, the location of the VM instance.\n\nThis list is subject to change.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", + "recreateInstances": { + "description": "Flags the specified instances in the managed instance group to be immediately recreated. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the flag is set even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", "httpMethod": "POST", - "id": "compute.instanceGroupManagers.resize", + "id": "compute.instanceGroupManagers.recreateInstances", "parameterOrder": [ "project", "zone", - "instanceGroupManager", - "size" + "instanceGroupManager" ], "parameters": { "instanceGroupManager": { @@ -6618,13 +6722,6 @@ "location": "query", "type": "string" }, - "size": { - "description": "The number of running instances that the managed instance group should maintain at any given time. The group automatically adds or removes instances to maintain the number of instances specified by this parameter.", - "format": "int32", - "location": "query", - "required": true, - "type": "integer" - }, "zone": { "description": "The name of the zone where the managed instance group is located.", "location": "path", @@ -6632,7 +6729,10 @@ "type": "string" } }, - "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize", + "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", + "request": { + "$ref": "InstanceGroupManagersRecreateInstancesRequest" + }, "response": { "$ref": "Operation" }, @@ -6641,14 +6741,66 @@ "https://www.googleapis.com/auth/compute" ] }, - "resizeAdvanced": { - "description": "Resizes the managed instance group with advanced configuration options like disabling creation retries. This is an extended version of the resize method.\n\nIf you increase the size of the instance group, the group creates new instances using the current instance template. If you decrease the size, the group deletes instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating, creatingWithoutRetries, or deleting actions with the get or listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", + "resize": { + "description": "Resizes the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.\n\nWhen resizing down, the instance group arbitrarily chooses the order in which VMs are deleted. The group takes into account some VM attributes when making the selection including:\n\n+ The status of the VM instance. + The health of the VM instance. + The instance template version the VM is based on. + For regional managed instance groups, the location of the VM instance.\n\nThis list is subject to change.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", "httpMethod": "POST", - "id": "compute.instanceGroupManagers.resizeAdvanced", + "id": "compute.instanceGroupManagers.resize", "parameterOrder": [ "project", "zone", - "instanceGroupManager" + "instanceGroupManager", + "size" + ], + "parameters": { + "instanceGroupManager": { + "description": "The name of the managed instance group.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "size": { + "description": "The number of running instances that the managed instance group should maintain at any given time. The group automatically adds or removes instances to maintain the number of instances specified by this parameter.", + "format": "int32", + "location": "query", + "required": true, + "type": "integer" + }, + "zone": { + "description": "The name of the zone where the managed instance group is located.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "resizeAdvanced": { + "description": "Resizes the managed instance group with advanced configuration options like disabling creation retries. This is an extended version of the resize method.\n\nIf you increase the size of the instance group, the group creates new instances using the current instance template. If you decrease the size, the group deletes instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating, creatingWithoutRetries, or deleting actions with the get or listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", + "httpMethod": "POST", + "id": "compute.instanceGroupManagers.resizeAdvanced", + "parameterOrder": [ + "project", + "zone", + "instanceGroupManager" ], "parameters": { "instanceGroupManager": { @@ -6922,7 +7074,7 @@ ] }, "updatePerInstanceConfigs": { - "description": "Insert or patch (for the ones that already exist) per-instance configs for the managed instance group. perInstanceConfig.instance serves as a key used to distinguish whether to perform insert or patch.", + "description": "Insert or update (for the ones that already exist) per-instance configs for the managed instance group. perInstanceConfig.instance serves as a key used to distinguish whether to perform insert or patch.", "httpMethod": "POST", "id": "compute.instanceGroupManagers.updatePerInstanceConfigs", "parameterOrder": [ @@ -7458,7 +7610,7 @@ "instanceTemplates": { "methods": { "delete": { - "description": "Deletes the specified instance template. Deleting an instance template is permanent and cannot be undone. It's not possible to delete templates which are in use by an instance group.", + "description": "Deletes the specified instance template. Deleting an instance template is permanent and cannot be undone. It is not possible to delete templates that are already in use by a managed instance group.", "httpMethod": "DELETE", "id": "compute.instanceTemplates.delete", "parameterOrder": [ @@ -7469,7 +7621,7 @@ "instanceTemplate": { "description": "The name of the instance template to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -7507,7 +7659,7 @@ "instanceTemplate": { "description": "The name of the instance template.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -7597,7 +7749,7 @@ ] }, "list": { - "description": "Retrieves a list of instance templates that are contained within the specified project and zone.", + "description": "Retrieves a list of instance templates that are contained within the specified project.", "httpMethod": "GET", "id": "compute.instanceTemplates.list", "parameterOrder": [ @@ -7887,7 +8039,7 @@ ], "parameters": { "forceAttach": { - "description": "Whether to force attach the disk even if it's currently attached to another instance. This is only available for regional disks.", + "description": "Whether to force attach the disk even if it's currently attached to another instance.", "location": "query", "type": "boolean" }, @@ -8131,6 +8283,55 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "getEffectiveFirewalls": { + "description": "Returns effective firewalls applied to an interface of the instance.", + "httpMethod": "GET", + "id": "compute.instances.getEffectiveFirewalls", + "parameterOrder": [ + "project", + "zone", + "instance", + "networkInterface" + ], + "parameters": { + "instance": { + "description": "Name of the instance scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "networkInterface": { + "description": "The name of the network interface to get the effective firewalls.", + "location": "query", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}/instances/{instance}/getEffectiveFirewalls", + "response": { + "$ref": "InstancesGetEffectiveFirewallsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, "getGuestAttributes": { "description": "Returns the specified guest attributes entry.", "httpMethod": "GET", @@ -8282,6 +8483,48 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "getShieldedInstanceIdentity": { + "description": "Returns the Shielded Instance Identity of an instance", + "httpMethod": "GET", + "id": "compute.instances.getShieldedInstanceIdentity", + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "parameters": { + "instance": { + "description": "Name or id of the instance scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}/instances/{instance}/getShieldedInstanceIdentity", + "response": { + "$ref": "ShieldedInstanceIdentity" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, "getShieldedVmIdentity": { "description": "Returns the Shielded VM Identity of an instance", "httpMethod": "GET", @@ -8547,7 +8790,7 @@ ] }, "reset": { - "description": "Performs a reset on the instance. For more information, see Resetting an instance.", + "description": "Performs a reset on the instance. This is a hard reset the VM does not do a graceful shutdown. For more information, see Resetting an instance.", "httpMethod": "POST", "id": "compute.instances.reset", "parameterOrder": [ @@ -9141,6 +9384,55 @@ "https://www.googleapis.com/auth/compute" ] }, + "setShieldedInstanceIntegrityPolicy": { + "description": "Sets the Shielded Instance integrity policy for an instance. You can only use this method on a running instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "httpMethod": "PATCH", + "id": "compute.instances.setShieldedInstanceIntegrityPolicy", + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "parameters": { + "instance": { + "description": "Name or id of the instance scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy", + "request": { + "$ref": "ShieldedInstanceIntegrityPolicy" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "setShieldedVmIntegrityPolicy": { "description": "Sets the Shielded VM integrity policy for a VM instance. You can only use this method on a running VM instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", "httpMethod": "PATCH", @@ -9683,6 +9975,55 @@ "https://www.googleapis.com/auth/compute" ] }, + "updateShieldedInstanceConfig": { + "description": "Updates the Shielded Instance config for an instance. You can only use this method on a stopped instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "httpMethod": "PATCH", + "id": "compute.instances.updateShieldedInstanceConfig", + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "parameters": { + "instance": { + "description": "Name or id of the instance scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}/instances/{instance}/updateShieldedInstanceConfig", + "request": { + "$ref": "ShieldedInstanceConfig" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "updateShieldedVmConfig": { "description": "Updates the Shielded VM config for a VM instance. You can only use this method on a stopped VM instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", "httpMethod": "PATCH", @@ -9798,7 +10139,7 @@ "interconnectAttachment": { "description": "Name of the interconnect attachment to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -9844,7 +10185,7 @@ "interconnectAttachment": { "description": "Name of the interconnect attachment to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -9942,6 +10283,11 @@ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" + }, + "validateOnly": { + "description": "If true, the request will not be committed.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/interconnectAttachments", @@ -10026,7 +10372,7 @@ "interconnectAttachment": { "description": "Name of the interconnect attachment to patch.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -10216,7 +10562,7 @@ "interconnectLocation": { "description": "Name of the interconnect location to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -10340,7 +10686,7 @@ "interconnect": { "description": "Name of the interconnect to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -10378,7 +10724,7 @@ "interconnect": { "description": "Name of the interconnect to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -10412,7 +10758,7 @@ "interconnect": { "description": "Name of the interconnect resource to query.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -10562,7 +10908,7 @@ "interconnect": { "description": "Name of the interconnect to update.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -10861,7 +11207,7 @@ "license": { "description": "Name of the license resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -10899,7 +11245,7 @@ "license": { "description": "Name of the License resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -11129,7 +11475,7 @@ "machineImage": { "description": "The name of the machine image to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -11167,7 +11513,7 @@ "machineImage": { "description": "The name of the machine image.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -11449,7 +11795,7 @@ "machineType": { "description": "Name of the machine type to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -12090,6 +12436,40 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "getEffectiveFirewalls": { + "description": "Returns the effective firewalls on a given network.", + "httpMethod": "GET", + "id": "compute.networks.getEffectiveFirewalls", + "parameterOrder": [ + "project", + "network" + ], + "parameters": { + "network": { + "description": "Name of the network for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + } + }, + "path": "{project}/global/networks/{network}/getEffectiveFirewalls", + "response": { + "$ref": "NetworksGetEffectiveFirewallsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, "insert": { "description": "Creates a network in the specified project using the data included in the request.", "httpMethod": "POST", @@ -12356,7 +12736,7 @@ "network": { "description": "Name of the network for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, @@ -12613,7 +12993,7 @@ "nodeGroup": { "description": "Name of the NodeGroup resource.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -12711,7 +13091,7 @@ "nodeGroup": { "description": "Name of the NodeGroup resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -12757,7 +13137,7 @@ "nodeGroup": { "description": "Name of the NodeGroup resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -12806,7 +13186,7 @@ "nodeGroup": { "description": "Name of the node group to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -13009,7 +13389,7 @@ "nodeGroup": { "description": "Name of the NodeGroup resource whose nodes you want to list.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -13048,6 +13428,55 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "setAutoscalingPolicy": { + "description": "Sets the autoscaling policy of the node group.", + "httpMethod": "POST", + "id": "compute.nodeGroups.setAutoscalingPolicy", + "parameterOrder": [ + "project", + "zone", + "nodeGroup" + ], + "parameters": { + "nodeGroup": { + "description": "Name of the NodeGroup resource to update.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}/nodeGroups/{nodeGroup}/setAutoscalingPolicy", + "request": { + "$ref": "NodeGroupsSetAutoscalingPolicyRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "setIamPolicy": { "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", "httpMethod": "POST", @@ -13103,9 +13532,9 @@ ], "parameters": { "nodeGroup": { - "description": "Name of the NodeGroup resource to delete.", + "description": "Name of the NodeGroup resource to update.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -13252,7 +13681,7 @@ "nodeTemplate": { "description": "Name of the NodeTemplate resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -13298,7 +13727,7 @@ "nodeTemplate": { "description": "Name of the node template to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -13622,7 +14051,7 @@ "nodeType": { "description": "Name of the node type to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -13720,6 +14149,11 @@ "securityPolicy" ], "parameters": { + "replaceExistingAssociation": { + "description": "Indicates whether or not to replace it if an association of the attachment already exists. This is false by default, in which case an error will be returned if an assocation already exists.", + "location": "query", + "type": "boolean" + }, "requestId": { "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query", @@ -13778,6 +14212,41 @@ "https://www.googleapis.com/auth/compute" ] }, + "copyRules": { + "description": "Copies rules to the specified security policy.", + "httpMethod": "POST", + "id": "compute.organizationSecurityPolicies.copyRules", + "parameterOrder": [ + "securityPolicy" + ], + "parameters": { + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "securityPolicy": { + "description": "Name of the security policy to update.", + "location": "path", + "pattern": "[0-9]{0,20}", + "required": true, + "type": "string" + }, + "sourceSecurityPolicy": { + "description": "The security policy from which to copy rules.", + "location": "query", + "type": "string" + } + }, + "path": "locations/global/securityPolicies/{securityPolicy}/copyRules", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "delete": { "description": "Deletes the specified policy.", "httpMethod": "DELETE", @@ -13969,6 +14438,94 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "listAssociations": { + "description": "Lists associations of a specified target, i.e., organization or folder.", + "httpMethod": "GET", + "id": "compute.organizationSecurityPolicies.listAssociations", + "parameters": { + "targetResource": { + "description": "The target resource to list associations. It is an organization, or a folder.", + "location": "query", + "type": "string" + } + }, + "path": "locations/global/securityPolicies/listAssociations", + "response": { + "$ref": "OrganizationSecurityPoliciesListAssociationsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "move": { + "description": "Moves the specified security policy.", + "httpMethod": "POST", + "id": "compute.organizationSecurityPolicies.move", + "parameterOrder": [ + "securityPolicy" + ], + "parameters": { + "parentId": { + "description": "The new parent of the security policy.", + "location": "query", + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "securityPolicy": { + "description": "Name of the security policy to update.", + "location": "path", + "pattern": "[0-9]{0,20}", + "required": true, + "type": "string" + } + }, + "path": "locations/global/securityPolicies/{securityPolicy}/move", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "patch": { + "description": "Patches the specified policy with the data included in the request.", + "httpMethod": "PATCH", + "id": "compute.organizationSecurityPolicies.patch", + "parameterOrder": [ + "securityPolicy" + ], + "parameters": { + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "securityPolicy": { + "description": "Name of the security policy to update.", + "location": "path", + "pattern": "[0-9]{0,20}", + "required": true, + "type": "string" + } + }, + "path": "locations/global/securityPolicies/{securityPolicy}", + "request": { + "$ref": "SecurityPolicy" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "patchRule": { "description": "Patches a rule at the specified priority.", "httpMethod": "POST", @@ -14577,7 +15134,7 @@ "autoscaler": { "description": "Name of the autoscaler to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -14623,7 +15180,7 @@ "autoscaler": { "description": "Name of the autoscaler to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -14762,99 +15319,99 @@ "autoscaler": { "description": "Name of the autoscaler to patch.", "location": "query", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "type": "string" - }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "region": { - "description": "Name of the region scoping this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - } - }, - "path": "{project}/regions/{region}/autoscalers", - "request": { - "$ref": "Autoscaler" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.", - "httpMethod": "POST", - "id": "compute.regionAutoscalers.testIamPermissions", - "parameterOrder": [ - "project", - "region", - "resource" - ], - "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "region": { - "description": "The name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, - "resource": { - "description": "Name or id of the resource for this request.", - "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - } - }, - "path": "{project}/regions/{region}/autoscalers/{resource}/testIamPermissions", - "request": { - "$ref": "TestPermissionsRequest" - }, - "response": { - "$ref": "TestPermissionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "update": { - "description": "Updates an autoscaler in the specified project using the data included in the request.", - "httpMethod": "PUT", - "id": "compute.regionAutoscalers.update", - "parameterOrder": [ - "project", - "region" - ], - "parameters": { - "autoscaler": { - "description": "Name of the autoscaler to update.", - "location": "query", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/autoscalers", + "request": { + "$ref": "Autoscaler" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", + "httpMethod": "POST", + "id": "compute.regionAutoscalers.testIamPermissions", + "parameterOrder": [ + "project", + "region", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/autoscalers/{resource}/testIamPermissions", + "request": { + "$ref": "TestPermissionsRequest" + }, + "response": { + "$ref": "TestPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "update": { + "description": "Updates an autoscaler in the specified project using the data included in the request.", + "httpMethod": "PUT", + "id": "compute.regionAutoscalers.update", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "autoscaler": { + "description": "Name of the autoscaler to update.", + "location": "query", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "type": "string" }, "project": { @@ -14906,7 +15463,7 @@ "backendService": { "description": "Name of the BackendService resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -14952,7 +15509,7 @@ "backendService": { "description": "Name of the BackendService resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -14994,7 +15551,7 @@ "backendService": { "description": "Name of the BackendService resource for which to get health.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -15230,7 +15787,7 @@ "backendService": { "description": "Name of the BackendService resource to update.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -15332,7 +15889,7 @@ "commitment": { "description": "Name of the commitment to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -15503,6 +16060,55 @@ "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] + }, + "updateReservations": { + "description": "Update the shape of reservations for GPUS/Local SSDs of reservations within the commitments.", + "httpMethod": "POST", + "id": "compute.regionCommitments.updateReservations", + "parameterOrder": [ + "project", + "region", + "commitment" + ], + "parameters": { + "commitment": { + "description": "Name of the commitment of which the reservation's capacities are being updated.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/commitments/{commitment}/updateReservations", + "request": { + "$ref": "RegionCommitmentsUpdateReservationsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] } } }, @@ -15521,7 +16127,7 @@ "diskType": { "description": "Name of the disk type to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -15678,6 +16284,7 @@ "type": "string" }, "guestFlush": { + "description": "[Input Only] Specifies to create an application consistent snapshot by informing the OS to prepare for the snapshot process. Currently only supported on Windows instances using the Volume Shadow Copy Service (VSS).", "location": "query", "type": "boolean" }, @@ -15800,6 +16407,48 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "getIamPolicy": { + "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + "httpMethod": "GET", + "id": "compute.regionDisks.getIamPolicy", + "parameterOrder": [ + "project", + "region", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/disks/{resource}/getIamPolicy", + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, "insert": { "description": "Creates a persistent regional disk in the specified project using the data included in the request.", "httpMethod": "POST", @@ -16001,6 +16650,50 @@ "https://www.googleapis.com/auth/compute" ] }, + "setIamPolicy": { + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + "httpMethod": "POST", + "id": "compute.regionDisks.setIamPolicy", + "parameterOrder": [ + "project", + "region", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/disks/{resource}/setIamPolicy", + "request": { + "$ref": "RegionSetPolicyRequest" + }, + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "setLabels": { "description": "Sets the labels on the target regional disk.", "httpMethod": "POST", @@ -16097,22 +16790,21 @@ } } }, - "regionHealthChecks": { + "regionHealthCheckServices": { "methods": { "delete": { - "description": "Deletes the specified HealthCheck resource.", + "description": "Deletes the specified regional HealthCheckService.", "httpMethod": "DELETE", - "id": "compute.regionHealthChecks.delete", + "id": "compute.regionHealthCheckServices.delete", "parameterOrder": [ "project", "region", - "healthCheck" + "healthCheckService" ], "parameters": { - "healthCheck": { - "description": "Name of the HealthCheck resource to delete.", + "healthCheckService": { + "description": "Name of the HealthCheckService to delete. The name must be 1-63 characters long, and comply with RFC1035.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, @@ -16136,7 +16828,7 @@ "type": "string" } }, - "path": "{project}/regions/{region}/healthChecks/{healthCheck}", + "path": "{project}/regions/{region}/healthCheckServices/{healthCheckService}", "response": { "$ref": "Operation" }, @@ -16146,19 +16838,18 @@ ] }, "get": { - "description": "Returns the specified HealthCheck resource. Gets a list of available health checks by making a list() request.", + "description": "Returns the specified regional HealthCheckService resource.", "httpMethod": "GET", - "id": "compute.regionHealthChecks.get", + "id": "compute.regionHealthCheckServices.get", "parameterOrder": [ "project", "region", - "healthCheck" + "healthCheckService" ], "parameters": { - "healthCheck": { - "description": "Name of the HealthCheck resource to return.", + "healthCheckService": { + "description": "Name of the HealthCheckService to update. The name must be 1-63 characters long, and comply with RFC1035.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, @@ -16177,9 +16868,9 @@ "type": "string" } }, - "path": "{project}/regions/{region}/healthChecks/{healthCheck}", + "path": "{project}/regions/{region}/healthCheckServices/{healthCheckService}", "response": { - "$ref": "HealthCheck" + "$ref": "HealthCheckService" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -16188,9 +16879,9 @@ ] }, "insert": { - "description": "Creates a HealthCheck resource in the specified project using the data included in the request.", + "description": "Creates a regional HealthCheckService resource in the specified project and region using the data included in the request.", "httpMethod": "POST", - "id": "compute.regionHealthChecks.insert", + "id": "compute.regionHealthCheckServices.insert", "parameterOrder": [ "project", "region" @@ -16216,9 +16907,9 @@ "type": "string" } }, - "path": "{project}/regions/{region}/healthChecks", + "path": "{project}/regions/{region}/healthCheckServices", "request": { - "$ref": "HealthCheck" + "$ref": "HealthCheckService" }, "response": { "$ref": "Operation" @@ -16229,9 +16920,9 @@ ] }, "list": { - "description": "Retrieves the list of HealthCheck resources available to the specified project.", + "description": "Lists all the HealthCheckService resources that have been configured for the specified project in the given region.", "httpMethod": "GET", - "id": "compute.regionHealthChecks.list", + "id": "compute.regionHealthCheckServices.list", "parameterOrder": [ "project", "region" @@ -16275,9 +16966,9 @@ "type": "string" } }, - "path": "{project}/regions/{region}/healthChecks", + "path": "{project}/regions/{region}/healthCheckServices", "response": { - "$ref": "HealthCheckList" + "$ref": "HealthCheckServicesList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -16285,59 +16976,294 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "patch": { - "description": "Updates a HealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - "httpMethod": "PATCH", - "id": "compute.regionHealthChecks.patch", - "parameterOrder": [ - "project", - "region", - "healthCheck" - ], - "parameters": { - "healthCheck": { - "description": "Name of the HealthCheck resource to patch.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "region": { - "description": "Name of the region scoping this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - } - }, - "path": "{project}/regions/{region}/healthChecks/{healthCheck}", - "request": { - "$ref": "HealthCheck" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, "testIamPermissions": { "description": "Returns permissions that a caller has on the specified resource.", "httpMethod": "POST", - "id": "compute.regionHealthChecks.testIamPermissions", + "id": "compute.regionHealthCheckServices.testIamPermissions", + "parameterOrder": [ + "project", + "region", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/healthCheckServices/{resource}/testIamPermissions", + "request": { + "$ref": "TestPermissionsRequest" + }, + "response": { + "$ref": "TestPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "regionHealthChecks": { + "methods": { + "delete": { + "description": "Deletes the specified HealthCheck resource.", + "httpMethod": "DELETE", + "id": "compute.regionHealthChecks.delete", + "parameterOrder": [ + "project", + "region", + "healthCheck" + ], + "parameters": { + "healthCheck": { + "description": "Name of the HealthCheck resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/healthChecks/{healthCheck}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Returns the specified HealthCheck resource. Gets a list of available health checks by making a list() request.", + "httpMethod": "GET", + "id": "compute.regionHealthChecks.get", + "parameterOrder": [ + "project", + "region", + "healthCheck" + ], + "parameters": { + "healthCheck": { + "description": "Name of the HealthCheck resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/healthChecks/{healthCheck}", + "response": { + "$ref": "HealthCheck" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a HealthCheck resource in the specified project using the data included in the request.", + "httpMethod": "POST", + "id": "compute.regionHealthChecks.insert", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/healthChecks", + "request": { + "$ref": "HealthCheck" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "description": "Retrieves the list of HealthCheck resources available to the specified project.", + "httpMethod": "GET", + "id": "compute.regionHealthChecks.list", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/healthChecks", + "response": { + "$ref": "HealthCheckList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "patch": { + "description": "Updates a HealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "httpMethod": "PATCH", + "id": "compute.regionHealthChecks.patch", + "parameterOrder": [ + "project", + "region", + "healthCheck" + ], + "parameters": { + "healthCheck": { + "description": "Name of the HealthCheck resource to patch.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/healthChecks/{healthCheck}", + "request": { + "$ref": "HealthCheck" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", + "httpMethod": "POST", + "id": "compute.regionHealthChecks.testIamPermissions", "parameterOrder": [ "project", "region", @@ -16392,7 +17318,7 @@ "healthCheck": { "description": "Name of the HealthCheck resource to update.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -16521,6 +17447,53 @@ "https://www.googleapis.com/auth/compute" ] }, + "createInstances": { + "description": "Creates instances with per-instance configs in this regional managed instance group. Instances are created using the current instance template. The create instances operation is marked DONE if the createInstances request is successful. The underlying actions take additional time. You must separately verify the status of the creating or actions with the listmanagedinstances method.", + "httpMethod": "POST", + "id": "compute.regionInstanceGroupManagers.createInstances", + "parameterOrder": [ + "project", + "region", + "instanceGroupManager" + ], + "parameters": { + "instanceGroupManager": { + "description": "The name of the managed instance group. It should conform to RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region where the managed instance group is located. It should conform to RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/createInstances", + "request": { + "$ref": "RegionInstanceGroupManagersCreateInstancesRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "delete": { "description": "Deletes the specified managed instance group and all of the instances in that group.", "httpMethod": "DELETE", @@ -16963,6 +17936,53 @@ "https://www.googleapis.com/auth/compute" ] }, + "patchPerInstanceConfigs": { + "description": "Insert or patch (for the ones that already exist) per-instance configs for the managed instance group. perInstanceConfig.instance serves as a key used to distinguish whether to perform insert or patch.", + "httpMethod": "POST", + "id": "compute.regionInstanceGroupManagers.patchPerInstanceConfigs", + "parameterOrder": [ + "project", + "region", + "instanceGroupManager" + ], + "parameters": { + "instanceGroupManager": { + "description": "The name of the managed instance group. It should conform to RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request, should conform to RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/patchPerInstanceConfigs", + "request": { + "$ref": "RegionInstanceGroupManagerPatchInstanceConfigReq" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "recreateInstances": { "description": "Flags the specified instances in the managed instance group to be immediately recreated. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the flag is set even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", "httpMethod": "POST", @@ -17297,7 +18317,7 @@ ] }, "updatePerInstanceConfigs": { - "description": "Insert or patch (for the ones that already exist) per-instance configs for the managed instance group. perInstanceConfig.instance serves as a key used to distinguish whether to perform insert or patch.", + "description": "Insert or update (for the ones that already exist) per-instance configs for the managed instance group. perInstanceConfig.instance serves as a key used to distinguish whether to perform insert or patch.", "httpMethod": "POST", "id": "compute.regionInstanceGroupManagers.updatePerInstanceConfigs", "parameterOrder": [ @@ -17603,22 +18623,22 @@ } } }, - "regionOperations": { + "regionNotificationEndpoints": { "methods": { "delete": { - "description": "Deletes the specified region-specific Operations resource.", + "description": "Deletes the specified NotificationEndpoint in the given region", "httpMethod": "DELETE", - "id": "compute.regionOperations.delete", + "id": "compute.regionNotificationEndpoints.delete", "parameterOrder": [ "project", "region", - "operation" + "notificationEndpoint" ], "parameters": { - "operation": { - "description": "Name of the Operations resource to delete.", + "notificationEndpoint": { + "description": "Name of the NotificationEndpoint resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -17630,33 +18650,41 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" } }, - "path": "{project}/regions/{region}/operations/{operation}", + "path": "{project}/regions/{region}/notificationEndpoints/{notificationEndpoint}", + "response": { + "$ref": "Operation" + }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] }, "get": { - "description": "Retrieves the specified region-specific Operations resource.", + "description": "Returns the specified NotificationEndpoint resource in the given region.", "httpMethod": "GET", - "id": "compute.regionOperations.get", + "id": "compute.regionNotificationEndpoints.get", "parameterOrder": [ "project", "region", - "operation" + "notificationEndpoint" ], "parameters": { - "operation": { - "description": "Name of the Operations resource to return.", + "notificationEndpoint": { + "description": "Name of the NotificationEndpoint resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -17668,16 +18696,16 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "{project}/regions/{region}/operations/{operation}", + "path": "{project}/regions/{region}/notificationEndpoints/{notificationEndpoint}", "response": { - "$ref": "Operation" + "$ref": "NotificationEndpoint" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -17685,10 +18713,51 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "insert": { + "description": "Create a NotificationEndpoint in the specified project in the given region using the parameters that are included in the request.", + "httpMethod": "POST", + "id": "compute.regionNotificationEndpoints.insert", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/notificationEndpoints", + "request": { + "$ref": "NotificationEndpoint" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "list": { - "description": "Retrieves a list of Operation resources contained within the specified region.", + "description": "Lists the NotificationEndpoints for a project in the given region.", "httpMethod": "GET", - "id": "compute.regionOperations.list", + "id": "compute.regionNotificationEndpoints.list", "parameterOrder": [ "project", "region" @@ -17725,16 +18794,16 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "{project}/regions/{region}/operations", + "path": "{project}/regions/{region}/notificationEndpoints", "response": { - "$ref": "OperationList" + "$ref": "NotificationEndpointList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -17742,23 +18811,16 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "wait": { - "description": "Waits for the specified region-specific Operations resource until it is done or timeout, and retrieves the specified Operations resource. 1. Immediately returns when the operation is already done. 2. Waits for no more than the default deadline (2 minutes, subject to change) and then returns the current state of the operation, which may be DONE or still in progress. 3. Is best-effort: a. The server can wait less than the default deadline or zero seconds, in overload situations. b. There is no guarantee that the operation is actually done when returns. 4. User should be prepared to retry if the operation is not DONE.", + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", "httpMethod": "POST", - "id": "compute.regionOperations.wait", + "id": "compute.regionNotificationEndpoints.testIamPermissions", "parameterOrder": [ "project", "region", - "operation" + "resource" ], "parameters": { - "operation": { - "description": "Name of the Operations resource to return.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -17767,16 +18829,26 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "The name of the region for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" } }, - "path": "{project}/regions/{region}/operations/{operation}/wait", + "path": "{project}/regions/{region}/notificationEndpoints/{resource}/testIamPermissions", + "request": { + "$ref": "TestPermissionsRequest" + }, "response": { - "$ref": "Operation" + "$ref": "TestPermissionsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -17786,105 +18858,63 @@ } } }, - "regionSslCertificates": { + "regionOperations": { "methods": { "delete": { - "description": "Deletes the specified SslCertificate resource in the region.", + "description": "Deletes the specified region-specific Operations resource.", "httpMethod": "DELETE", - "id": "compute.regionSslCertificates.delete", + "id": "compute.regionOperations.delete", "parameterOrder": [ "project", "region", - "sslCertificate" + "operation" ], "parameters": { - "project": { - "description": "Project ID for this request.", + "operation": { + "description": "Name of the Operations resource to delete.", "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, - "region": { - "description": "Name of the region scoping this request.", + "project": { + "description": "Project ID for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "sslCertificate": { - "description": "Name of the SslCertificate resource to delete.", + "region": { + "description": "Name of the region for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "{project}/regions/{region}/sslCertificates/{sslCertificate}", - "response": { - "$ref": "Operation" - }, + "path": "{project}/regions/{region}/operations/{operation}", "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] }, "get": { - "description": "Returns the specified SslCertificate resource in the specified region. Get a list of available SSL certificates by making a list() request.", + "description": "Retrieves the specified region-specific Operations resource.", "httpMethod": "GET", - "id": "compute.regionSslCertificates.get", + "id": "compute.regionOperations.get", "parameterOrder": [ "project", "region", - "sslCertificate" + "operation" ], "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "region": { - "description": "Name of the region scoping this request.", + "operation": { + "description": "Name of the Operations resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, - "sslCertificate": { - "description": "Name of the SslCertificate resource to return.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - } - }, - "path": "{project}/regions/{region}/sslCertificates/{sslCertificate}", - "response": { - "$ref": "SslCertificate" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "description": "Creates a SslCertificate resource in the specified project and region using the data included in the request", - "httpMethod": "POST", - "id": "compute.regionSslCertificates.insert", - "parameterOrder": [ - "project", - "region" - ], - "parameters": { "project": { "description": "Project ID for this request.", "location": "path", @@ -17893,34 +18923,27 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "Name of the region for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" } }, - "path": "{project}/regions/{region}/sslCertificates", - "request": { - "$ref": "SslCertificate" - }, + "path": "{project}/regions/{region}/operations/{operation}", "response": { "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, "list": { - "description": "Retrieves the list of SslCertificate resources available to the specified project in the specified region.", + "description": "Retrieves a list of Operation resources contained within the specified region.", "httpMethod": "GET", - "id": "compute.regionSslCertificates.list", + "id": "compute.regionOperations.list", "parameterOrder": [ "project", "region" @@ -17957,16 +18980,16 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "Name of the region for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "{project}/regions/{region}/sslCertificates", + "path": "{project}/regions/{region}/operations", "response": { - "$ref": "SslCertificateList" + "$ref": "OperationList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -17974,16 +18997,23 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource and region.", + "wait": { + "description": "Waits for the specified region-specific Operations resource until it is done or timeout, and retrieves the specified Operations resource. 1. Immediately returns when the operation is already done. 2. Waits for no more than the default deadline (2 minutes, subject to change) and then returns the current state of the operation, which may be DONE or still in progress. 3. Is best-effort: a. The server can wait less than the default deadline or zero seconds, in overload situations. b. There is no guarantee that the operation is actually done when returns. 4. User should be prepared to retry if the operation is not DONE.", "httpMethod": "POST", - "id": "compute.regionSslCertificates.testIamPermissions", + "id": "compute.regionOperations.wait", "parameterOrder": [ "project", "region", - "resource" + "operation" ], "parameters": { + "operation": { + "description": "Name of the Operations resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -17992,26 +19022,16 @@ "type": "string" }, "region": { - "description": "The name of the region for this request.", + "description": "Name of the region for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" - }, - "resource": { - "description": "Name or id of the resource for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" } }, - "path": "{project}/regions/{region}/sslCertificates/{resource}/testIamPermissions", - "request": { - "$ref": "TestPermissionsRequest" - }, + "path": "{project}/regions/{region}/operations/{operation}/wait", "response": { - "$ref": "TestPermissionsResponse" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -18021,16 +19041,16 @@ } } }, - "regionTargetHttpProxies": { + "regionSslCertificates": { "methods": { "delete": { - "description": "Deletes the specified TargetHttpProxy resource.", + "description": "Deletes the specified SslCertificate resource in the region.", "httpMethod": "DELETE", - "id": "compute.regionTargetHttpProxies.delete", + "id": "compute.regionSslCertificates.delete", "parameterOrder": [ "project", "region", - "targetHttpProxy" + "sslCertificate" ], "parameters": { "project": { @@ -18052,15 +19072,15 @@ "location": "query", "type": "string" }, - "targetHttpProxy": { - "description": "Name of the TargetHttpProxy resource to delete.", + "sslCertificate": { + "description": "Name of the SslCertificate resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}", + "path": "{project}/regions/{region}/sslCertificates/{sslCertificate}", "response": { "$ref": "Operation" }, @@ -18070,13 +19090,13 @@ ] }, "get": { - "description": "Returns the specified TargetHttpProxy resource in the specified region. Gets a list of available target HTTP proxies by making a list() request.", + "description": "Returns the specified SslCertificate resource in the specified region. Get a list of available SSL certificates by making a list() request.", "httpMethod": "GET", - "id": "compute.regionTargetHttpProxies.get", + "id": "compute.regionSslCertificates.get", "parameterOrder": [ "project", "region", - "targetHttpProxy" + "sslCertificate" ], "parameters": { "project": { @@ -18093,17 +19113,17 @@ "required": true, "type": "string" }, - "targetHttpProxy": { - "description": "Name of the TargetHttpProxy resource to return.", + "sslCertificate": { + "description": "Name of the SslCertificate resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}", + "path": "{project}/regions/{region}/sslCertificates/{sslCertificate}", "response": { - "$ref": "TargetHttpProxy" + "$ref": "SslCertificate" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -18112,9 +19132,244 @@ ] }, "insert": { - "description": "Creates a TargetHttpProxy resource in the specified project and region using the data included in the request.", + "description": "Creates a SslCertificate resource in the specified project and region using the data included in the request", "httpMethod": "POST", - "id": "compute.regionTargetHttpProxies.insert", + "id": "compute.regionSslCertificates.insert", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/sslCertificates", + "request": { + "$ref": "SslCertificate" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "description": "Retrieves the list of SslCertificate resources available to the specified project in the specified region.", + "httpMethod": "GET", + "id": "compute.regionSslCertificates.list", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/sslCertificates", + "response": { + "$ref": "SslCertificateList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource and region.", + "httpMethod": "POST", + "id": "compute.regionSslCertificates.testIamPermissions", + "parameterOrder": [ + "project", + "region", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/sslCertificates/{resource}/testIamPermissions", + "request": { + "$ref": "TestPermissionsRequest" + }, + "response": { + "$ref": "TestPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "regionTargetHttpProxies": { + "methods": { + "delete": { + "description": "Deletes the specified TargetHttpProxy resource.", + "httpMethod": "DELETE", + "id": "compute.regionTargetHttpProxies.delete", + "parameterOrder": [ + "project", + "region", + "targetHttpProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetHttpProxy": { + "description": "Name of the TargetHttpProxy resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Returns the specified TargetHttpProxy resource in the specified region. Gets a list of available target HTTP proxies by making a list() request.", + "httpMethod": "GET", + "id": "compute.regionTargetHttpProxies.get", + "parameterOrder": [ + "project", + "region", + "targetHttpProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "targetHttpProxy": { + "description": "Name of the TargetHttpProxy resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}", + "response": { + "$ref": "TargetHttpProxy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a TargetHttpProxy resource in the specified project and region using the data included in the request.", + "httpMethod": "POST", + "id": "compute.regionTargetHttpProxies.insert", "parameterOrder": [ "project", "region" @@ -18241,7 +19496,7 @@ "targetHttpProxy": { "description": "Name of the TargetHttpProxy to set a URL map for.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -18339,7 +19594,7 @@ "targetHttpsProxy": { "description": "Name of the TargetHttpsProxy resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -18380,7 +19635,7 @@ "targetHttpsProxy": { "description": "Name of the TargetHttpsProxy resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -18525,7 +19780,7 @@ "targetHttpsProxy": { "description": "Name of the TargetHttpsProxy resource to set an SslCertificates resource for.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -18574,7 +19829,7 @@ "targetHttpsProxy": { "description": "Name of the TargetHttpsProxy to set a URL map for.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -18672,7 +19927,7 @@ "urlMap": { "description": "Name of the UrlMap resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -18713,7 +19968,7 @@ "urlMap": { "description": "Name of the UrlMap resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -18801,7 +20056,7 @@ "urlMap": { "description": "Name of the UrlMap scoping this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -18907,7 +20162,7 @@ "urlMap": { "description": "Name of the UrlMap resource to patch.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -19001,7 +20256,7 @@ "urlMap": { "description": "Name of the UrlMap resource to update.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -19045,7 +20300,7 @@ "urlMap": { "description": "Name of the UrlMap resource to be validated as.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -19085,7 +20340,7 @@ "region": { "description": "Name of the region resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -19151,12 +20406,12 @@ } } }, - "resourcePolicies": { + "reservations": { "methods": { "aggregatedList": { - "description": "Retrieves an aggregated list of resource policies.", + "description": "Retrieves an aggregated list of reservations.", "httpMethod": "GET", - "id": "compute.resourcePolicies.aggregatedList", + "id": "compute.reservations.aggregatedList", "parameterOrder": [ "project" ], @@ -19192,9 +20447,9 @@ "type": "string" } }, - "path": "{project}/aggregated/resourcePolicies", + "path": "{project}/aggregated/reservations", "response": { - "$ref": "ResourcePolicyAggregatedList" + "$ref": "ReservationAggregatedList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -19203,13 +20458,13 @@ ] }, "delete": { - "description": "Deletes the specified resource policy.", + "description": "Deletes the specified reservation.", "httpMethod": "DELETE", - "id": "compute.resourcePolicies.delete", + "id": "compute.reservations.delete", "parameterOrder": [ "project", - "region", - "resourcePolicy" + "zone", + "reservation" ], "parameters": { "project": { @@ -19219,27 +20474,27 @@ "required": true, "type": "string" }, - "region": { - "description": "Name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, "requestId": { "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" }, - "resourcePolicy": { - "description": "Name of the resource policy to delete.", + "reservation": { + "description": "Name of the reservation to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "zone": { + "description": "Name of the zone for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "{project}/regions/{region}/resourcePolicies/{resourcePolicy}", + "path": "{project}/zones/{zone}/reservations/{reservation}", "response": { "$ref": "Operation" }, @@ -19249,13 +20504,13 @@ ] }, "get": { - "description": "Retrieves all information of the specified resource policy.", + "description": "Retrieves all information of the specified reservation.", "httpMethod": "GET", - "id": "compute.resourcePolicies.get", + "id": "compute.reservations.get", "parameterOrder": [ "project", - "region", - "resourcePolicy" + "zone", + "reservation" ], "parameters": { "project": { @@ -19265,24 +20520,24 @@ "required": true, "type": "string" }, - "region": { - "description": "Name of the region for this request.", + "reservation": { + "description": "Name of the reservation to retrieve.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, - "resourcePolicy": { - "description": "Name of the resource policy to retrieve.", + "zone": { + "description": "Name of the zone for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "{project}/regions/{region}/resourcePolicies/{resourcePolicy}", + "path": "{project}/zones/{zone}/reservations/{reservation}", "response": { - "$ref": "ResourcePolicy" + "$ref": "Reservation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -19293,10 +20548,10 @@ "getIamPolicy": { "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", "httpMethod": "GET", - "id": "compute.resourcePolicies.getIamPolicy", + "id": "compute.reservations.getIamPolicy", "parameterOrder": [ "project", - "region", + "zone", "resource" ], "parameters": { @@ -19307,22 +20562,22 @@ "required": true, "type": "string" }, - "region": { - "description": "The name of the region for this request.", + "resource": { + "description": "Name or id of the resource for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, - "resource": { - "description": "Name or id of the resource for this request.", + "zone": { + "description": "The name of the zone for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "{project}/regions/{region}/resourcePolicies/{resource}/getIamPolicy", + "path": "{project}/zones/{zone}/reservations/{resource}/getIamPolicy", "response": { "$ref": "Policy" }, @@ -19333,12 +20588,12 @@ ] }, "insert": { - "description": "Creates a new resource policy.", + "description": "Creates a new reservation.", "httpMethod": "POST", - "id": "compute.resourcePolicies.insert", + "id": "compute.reservations.insert", "parameterOrder": [ "project", - "region" + "zone" ], "parameters": { "project": { @@ -19348,22 +20603,22 @@ "required": true, "type": "string" }, - "region": { - "description": "Name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, "requestId": { "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" + }, + "zone": { + "description": "Name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" } }, - "path": "{project}/regions/{region}/resourcePolicies", + "path": "{project}/zones/{zone}/reservations", "request": { - "$ref": "ResourcePolicy" + "$ref": "Reservation" }, "response": { "$ref": "Operation" @@ -19374,12 +20629,12 @@ ] }, "list": { - "description": "A list all the resource policies that have been configured for the specified project in specified region.", + "description": "A list all the reservations that have been configured for the specified project in specified zone.", "httpMethod": "GET", - "id": "compute.resourcePolicies.list", + "id": "compute.reservations.list", "parameterOrder": [ "project", - "region" + "zone" ], "parameters": { "filter": { @@ -19412,17 +20667,17 @@ "required": true, "type": "string" }, - "region": { - "description": "Name of the region for this request.", + "zone": { + "description": "Name of the zone for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "{project}/regions/{region}/resourcePolicies", + "path": "{project}/zones/{zone}/reservations", "response": { - "$ref": "ResourcePolicyList" + "$ref": "ReservationList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -19430,14 +20685,14 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + "resize": { + "description": "Resizes the reservation (applicable to standalone reservations only)", "httpMethod": "POST", - "id": "compute.resourcePolicies.setIamPolicy", + "id": "compute.reservations.resize", "parameterOrder": [ "project", - "region", - "resource" + "zone", + "reservation" ], "parameters": { "project": { @@ -19447,24 +20702,73 @@ "required": true, "type": "string" }, - "region": { - "description": "The name of the region for this request.", + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "reservation": { + "description": "Name of the reservation to update.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, + "zone": { + "description": "Name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}/reservations/{reservation}/resize", + "request": { + "$ref": "ReservationsResizeRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setIamPolicy": { + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + "httpMethod": "POST", + "id": "compute.reservations.setIamPolicy", + "parameterOrder": [ + "project", + "zone", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, "resource": { "description": "Name or id of the resource for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" } }, - "path": "{project}/regions/{region}/resourcePolicies/{resource}/setIamPolicy", + "path": "{project}/zones/{zone}/reservations/{resource}/setIamPolicy", "request": { - "$ref": "RegionSetPolicyRequest" + "$ref": "ZoneSetPolicyRequest" }, "response": { "$ref": "Policy" @@ -19477,10 +20781,10 @@ "testIamPermissions": { "description": "Returns permissions that a caller has on the specified resource.", "httpMethod": "POST", - "id": "compute.resourcePolicies.testIamPermissions", + "id": "compute.reservations.testIamPermissions", "parameterOrder": [ "project", - "region", + "zone", "resource" ], "parameters": { @@ -19491,22 +20795,22 @@ "required": true, "type": "string" }, - "region": { - "description": "The name of the region for this request.", + "resource": { + "description": "Name or id of the resource for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, - "resource": { - "description": "Name or id of the resource for this request.", + "zone": { + "description": "The name of the zone for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "{project}/regions/{region}/resourcePolicies/{resource}/testIamPermissions", + "path": "{project}/zones/{zone}/reservations/{resource}/testIamPermissions", "request": { "$ref": "TestPermissionsRequest" }, @@ -19521,12 +20825,12 @@ } } }, - "routers": { + "resourcePolicies": { "methods": { "aggregatedList": { - "description": "Retrieves an aggregated list of routers.", + "description": "Retrieves an aggregated list of resource policies.", "httpMethod": "GET", - "id": "compute.routers.aggregatedList", + "id": "compute.resourcePolicies.aggregatedList", "parameterOrder": [ "project" ], @@ -19562,9 +20866,9 @@ "type": "string" } }, - "path": "{project}/aggregated/routers", + "path": "{project}/aggregated/resourcePolicies", "response": { - "$ref": "RouterAggregatedList" + "$ref": "ResourcePolicyAggregatedList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -19573,13 +20877,13 @@ ] }, "delete": { - "description": "Deletes the specified Router resource.", + "description": "Deletes the specified resource policy.", "httpMethod": "DELETE", - "id": "compute.routers.delete", + "id": "compute.resourcePolicies.delete", "parameterOrder": [ "project", "region", - "router" + "resourcePolicy" ], "parameters": { "project": { @@ -19601,15 +20905,15 @@ "location": "query", "type": "string" }, - "router": { - "description": "Name of the Router resource to delete.", + "resourcePolicy": { + "description": "Name of the resource policy to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/regions/{region}/routers/{router}", + "path": "{project}/regions/{region}/resourcePolicies/{resourcePolicy}", "response": { "$ref": "Operation" }, @@ -19619,80 +20923,15 @@ ] }, "get": { - "description": "Returns the specified Router resource. Gets a list of available routers by making a list() request.", - "httpMethod": "GET", - "id": "compute.routers.get", - "parameterOrder": [ - "project", - "region", - "router" - ], - "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "region": { - "description": "Name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, - "router": { - "description": "Name of the Router resource to return.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - } - }, - "path": "{project}/regions/{region}/routers/{router}", - "response": { - "$ref": "Router" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "getNatMappingInfo": { - "description": "Retrieves runtime Nat mapping information of VM endpoints.", + "description": "Retrieves all information of the specified resource policy.", "httpMethod": "GET", - "id": "compute.routers.getNatMappingInfo", + "id": "compute.resourcePolicies.get", "parameterOrder": [ "project", "region", - "router" + "resourcePolicy" ], "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - "location": "query", - "type": "string" - }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query", - "type": "string" - }, - "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query", - "type": "string" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -19707,17 +20946,17 @@ "required": true, "type": "string" }, - "router": { - "description": "Name of the Router resource to query for Nat Mapping information of VM endpoints.", + "resourcePolicy": { + "description": "Name of the resource policy to retrieve.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/regions/{region}/routers/{router}/getNatMappingInfo", + "path": "{project}/regions/{region}/resourcePolicies/{resourcePolicy}", "response": { - "$ref": "VmEndpointNatMappingsList" + "$ref": "ResourcePolicy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -19725,14 +20964,14 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "getRouterStatus": { - "description": "Retrieves runtime information of the specified router.", + "getIamPolicy": { + "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", "httpMethod": "GET", - "id": "compute.routers.getRouterStatus", + "id": "compute.resourcePolicies.getIamPolicy", "parameterOrder": [ "project", "region", - "router" + "resource" ], "parameters": { "project": { @@ -19743,23 +20982,23 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "The name of the region for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "router": { - "description": "Name of the Router resource to query.", + "resource": { + "description": "Name or id of the resource for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/regions/{region}/routers/{router}/getRouterStatus", + "path": "{project}/regions/{region}/resourcePolicies/{resource}/getIamPolicy", "response": { - "$ref": "RouterStatusResponse" + "$ref": "Policy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -19768,9 +21007,449 @@ ] }, "insert": { - "description": "Creates a Router resource in the specified project and region using the data included in the request.", + "description": "Creates a new resource policy.", "httpMethod": "POST", - "id": "compute.routers.insert", + "id": "compute.resourcePolicies.insert", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/resourcePolicies", + "request": { + "$ref": "ResourcePolicy" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "description": "A list all the resource policies that have been configured for the specified project in specified region.", + "httpMethod": "GET", + "id": "compute.resourcePolicies.list", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/resourcePolicies", + "response": { + "$ref": "ResourcePolicyList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "setIamPolicy": { + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + "httpMethod": "POST", + "id": "compute.resourcePolicies.setIamPolicy", + "parameterOrder": [ + "project", + "region", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/resourcePolicies/{resource}/setIamPolicy", + "request": { + "$ref": "RegionSetPolicyRequest" + }, + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", + "httpMethod": "POST", + "id": "compute.resourcePolicies.testIamPermissions", + "parameterOrder": [ + "project", + "region", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/resourcePolicies/{resource}/testIamPermissions", + "request": { + "$ref": "TestPermissionsRequest" + }, + "response": { + "$ref": "TestPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "routers": { + "methods": { + "aggregatedList": { + "description": "Retrieves an aggregated list of routers.", + "httpMethod": "GET", + "id": "compute.routers.aggregatedList", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + } + }, + "path": "{project}/aggregated/routers", + "response": { + "$ref": "RouterAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "description": "Deletes the specified Router resource.", + "httpMethod": "DELETE", + "id": "compute.routers.delete", + "parameterOrder": [ + "project", + "region", + "router" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "router": { + "description": "Name of the Router resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/routers/{router}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Returns the specified Router resource. Gets a list of available routers by making a list() request.", + "httpMethod": "GET", + "id": "compute.routers.get", + "parameterOrder": [ + "project", + "region", + "router" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "router": { + "description": "Name of the Router resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/routers/{router}", + "response": { + "$ref": "Router" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "getNatMappingInfo": { + "description": "Retrieves runtime Nat mapping information of VM endpoints.", + "httpMethod": "GET", + "id": "compute.routers.getNatMappingInfo", + "parameterOrder": [ + "project", + "region", + "router" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "natName": { + "description": "Name of the nat service to filter the Nat Mapping information. If it is omitted, all nats for this router will be returned. Name should conform to RFC1035.", + "location": "query", + "type": "string" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "router": { + "description": "Name of the Router resource to query for Nat Mapping information of VM endpoints.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/routers/{router}/getNatMappingInfo", + "response": { + "$ref": "VmEndpointNatMappingsList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "getRouterStatus": { + "description": "Retrieves runtime information of the specified router.", + "httpMethod": "GET", + "id": "compute.routers.getRouterStatus", + "parameterOrder": [ + "project", + "region", + "router" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "router": { + "description": "Name of the Router resource to query.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/routers/{router}/getRouterStatus", + "response": { + "$ref": "RouterStatusResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a Router resource in the specified project and region using the data included in the request.", + "httpMethod": "POST", + "id": "compute.routers.insert", "parameterOrder": [ "project", "region" @@ -19897,7 +21576,7 @@ "router": { "description": "Name of the Router resource to patch.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -19941,7 +21620,7 @@ "router": { "description": "Name of the Router resource to query.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -20036,7 +21715,7 @@ "router": { "description": "Name of the Router resource to update.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -20271,7 +21950,7 @@ "securityPolicy": { "description": "Name of the security policy to update.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -20317,7 +21996,7 @@ "securityPolicy": { "description": "Name of the security policy to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -20350,7 +22029,7 @@ "securityPolicy": { "description": "Name of the security policy to get.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -20390,7 +22069,7 @@ "securityPolicy": { "description": "Name of the security policy to which the queried rule belongs.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -20564,7 +22243,7 @@ "securityPolicy": { "description": "Name of the security policy to update.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -20606,7 +22285,7 @@ "securityPolicy": { "description": "Name of the security policy to update.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -20653,7 +22332,7 @@ "securityPolicy": { "description": "Name of the security policy to update.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -21085,7 +22764,7 @@ "sslCertificate": { "description": "Name of the SslCertificate resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -21118,7 +22797,7 @@ "sslCertificate": { "description": "Name of the SslCertificate resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -21619,7 +23298,7 @@ "subnetwork": { "description": "Name of the Subnetwork resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -21665,7 +23344,7 @@ "subnetwork": { "description": "Name of the Subnetwork resource to update.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -21709,7 +23388,7 @@ "subnetwork": { "description": "Name of the Subnetwork resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -21951,7 +23630,7 @@ "subnetwork": { "description": "Name of the Subnetwork resource to patch.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -22044,7 +23723,7 @@ "subnetwork": { "description": "Name of the Subnetwork resource.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -22183,7 +23862,7 @@ "targetHttpProxy": { "description": "Name of the TargetHttpProxy resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -22216,7 +23895,7 @@ "targetHttpProxy": { "description": "Name of the TargetHttpProxy resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -22337,7 +24016,7 @@ "targetHttpProxy": { "description": "Name of the TargetHttpProxy to set a URL map for.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -22468,7 +24147,7 @@ "targetHttpsProxy": { "description": "Name of the TargetHttpsProxy resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -22501,7 +24180,7 @@ "targetHttpsProxy": { "description": "Name of the TargetHttpsProxy resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -22662,7 +24341,7 @@ "targetHttpsProxy": { "description": "Name of the TargetHttpsProxy resource to set an SslCertificates resource for.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -22743,7 +24422,7 @@ "targetHttpsProxy": { "description": "Name of the TargetHttpsProxy resource whose URL map is to be set.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -22875,7 +24554,7 @@ "targetInstance": { "description": "Name of the TargetInstance resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -22916,7 +24595,7 @@ "targetInstance": { "description": "Name of the TargetInstance resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -23117,7 +24796,7 @@ "targetPool": { "description": "Name of the target pool to add a health check to.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -23166,7 +24845,7 @@ "targetPool": { "description": "Name of the TargetPool resource to add instances to.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -23264,7 +24943,7 @@ "targetPool": { "description": "Name of the TargetPool resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -23305,7 +24984,7 @@ "targetPool": { "description": "Name of the TargetPool resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -23347,7 +25026,7 @@ "targetPool": { "description": "Name of the TargetPool resource to which the queried instance belongs.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -23495,7 +25174,7 @@ "targetPool": { "description": "Name of the target pool to remove health checks from.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -23544,7 +25223,7 @@ "targetPool": { "description": "Name of the TargetPool resource to remove instances from.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -23599,7 +25278,7 @@ "targetPool": { "description": "Name of the TargetPool resource to set a backup pool for.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -23689,7 +25368,7 @@ "targetSslProxy": { "description": "Name of the TargetSslProxy resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -23722,7 +25401,7 @@ "targetSslProxy": { "description": "Name of the TargetSslProxy resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -23843,7 +25522,7 @@ "targetSslProxy": { "description": "Name of the TargetSslProxy resource whose BackendService resource is to be set.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -23884,7 +25563,7 @@ "targetSslProxy": { "description": "Name of the TargetSslProxy resource whose ProxyHeader is to be set.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -23925,7 +25604,7 @@ "targetSslProxy": { "description": "Name of the TargetSslProxy resource whose SslCertificate resource is to be set.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -24047,7 +25726,7 @@ "targetTcpProxy": { "description": "Name of the TargetTcpProxy resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -24080,7 +25759,7 @@ "targetTcpProxy": { "description": "Name of the TargetTcpProxy resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -24201,7 +25880,7 @@ "targetTcpProxy": { "description": "Name of the TargetTcpProxy resource whose BackendService resource is to be set.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -24242,7 +25921,7 @@ "targetTcpProxy": { "description": "Name of the TargetTcpProxy resource whose ProxyHeader is to be set.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -24381,7 +26060,7 @@ "targetVpnGateway": { "description": "Name of the target VPN gateway to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -24422,7 +26101,7 @@ "targetVpnGateway": { "description": "Name of the target VPN gateway to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -24706,7 +26385,7 @@ "urlMap": { "description": "Name of the UrlMap resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -24739,7 +26418,7 @@ "urlMap": { "description": "Name of the UrlMap resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -24811,7 +26490,7 @@ "urlMap": { "description": "Name of the UrlMap scoping this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -24901,7 +26580,7 @@ "urlMap": { "description": "Name of the UrlMap resource to patch.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -24979,7 +26658,7 @@ "urlMap": { "description": "Name of the UrlMap resource to update.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -25015,7 +26694,7 @@ "urlMap": { "description": "Name of the UrlMap resource to be validated as.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -25117,7 +26796,7 @@ "vpnGateway": { "description": "Name of the VPN gateway to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -25158,7 +26837,7 @@ "vpnGateway": { "description": "Name of the VPN gateway to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -25173,305 +26852,14 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "insert": { - "description": "Creates a VPN gateway in the specified project and region using the data included in the request.", - "httpMethod": "POST", - "id": "compute.vpnGateways.insert", - "parameterOrder": [ - "project", - "region" - ], - "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "region": { - "description": "Name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - } - }, - "path": "{project}/regions/{region}/vpnGateways", - "request": { - "$ref": "VpnGateway" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "list": { - "description": "Retrieves a list of VPN gateways available to the specified project and region.", - "httpMethod": "GET", - "id": "compute.vpnGateways.list", - "parameterOrder": [ - "project", - "region" - ], - "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - "location": "query", - "type": "string" - }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query", - "type": "string" - }, - "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query", - "type": "string" - }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "region": { - "description": "Name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - } - }, - "path": "{project}/regions/{region}/vpnGateways", - "response": { - "$ref": "VpnGatewayList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "setLabels": { - "description": "Sets the labels on a VpnGateway. To learn more about labels, read the Labeling Resources documentation.", - "httpMethod": "POST", - "id": "compute.vpnGateways.setLabels", - "parameterOrder": [ - "project", - "region", - "resource" - ], - "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "region": { - "description": "The region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "resource": { - "description": "Name or id of the resource for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - } - }, - "path": "{project}/regions/{region}/vpnGateways/{resource}/setLabels", - "request": { - "$ref": "RegionSetLabelsRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.", - "httpMethod": "POST", - "id": "compute.vpnGateways.testIamPermissions", - "parameterOrder": [ - "project", - "region", - "resource" - ], - "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "region": { - "description": "The name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, - "resource": { - "description": "Name or id of the resource for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - } - }, - "path": "{project}/regions/{region}/vpnGateways/{resource}/testIamPermissions", - "request": { - "$ref": "TestPermissionsRequest" - }, - "response": { - "$ref": "TestPermissionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - } - } - }, - "vpnTunnels": { - "methods": { - "aggregatedList": { - "description": "Retrieves an aggregated list of VPN tunnels.", - "httpMethod": "GET", - "id": "compute.vpnTunnels.aggregatedList", - "parameterOrder": [ - "project" - ], - "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - "location": "query", - "type": "string" - }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query", - "type": "string" - }, - "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query", - "type": "string" - }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - } - }, - "path": "{project}/aggregated/vpnTunnels", - "response": { - "$ref": "VpnTunnelAggregatedList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "delete": { - "description": "Deletes the specified VpnTunnel resource.", - "httpMethod": "DELETE", - "id": "compute.vpnTunnels.delete", - "parameterOrder": [ - "project", - "region", - "vpnTunnel" - ], - "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "region": { - "description": "Name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "vpnTunnel": { - "description": "Name of the VpnTunnel resource to delete.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - } - }, - "path": "{project}/regions/{region}/vpnTunnels/{vpnTunnel}", - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "description": "Returns the specified VpnTunnel resource. Gets a list of available VPN tunnels by making a list() request.", + "getStatus": { + "description": "Returns the status for the specified VPN gateway.", "httpMethod": "GET", - "id": "compute.vpnTunnels.get", + "id": "compute.vpnGateways.getStatus", "parameterOrder": [ "project", "region", - "vpnTunnel" + "vpnGateway" ], "parameters": { "project": { @@ -25488,17 +26876,17 @@ "required": true, "type": "string" }, - "vpnTunnel": { - "description": "Name of the VpnTunnel resource to return.", + "vpnGateway": { + "description": "Name of the VPN gateway to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/regions/{region}/vpnTunnels/{vpnTunnel}", + "path": "{project}/regions/{region}/vpnGateways/{vpnGateway}/getStatus", "response": { - "$ref": "VpnTunnel" + "$ref": "VpnGatewaysGetStatusResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -25507,9 +26895,342 @@ ] }, "insert": { - "description": "Creates a VpnTunnel resource in the specified project and region using the data included in the request.", + "description": "Creates a VPN gateway in the specified project and region using the data included in the request.", "httpMethod": "POST", - "id": "compute.vpnTunnels.insert", + "id": "compute.vpnGateways.insert", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/vpnGateways", + "request": { + "$ref": "VpnGateway" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "description": "Retrieves a list of VPN gateways available to the specified project and region.", + "httpMethod": "GET", + "id": "compute.vpnGateways.list", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/vpnGateways", + "response": { + "$ref": "VpnGatewayList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "setLabels": { + "description": "Sets the labels on a VpnGateway. To learn more about labels, read the Labeling Resources documentation.", + "httpMethod": "POST", + "id": "compute.vpnGateways.setLabels", + "parameterOrder": [ + "project", + "region", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/vpnGateways/{resource}/setLabels", + "request": { + "$ref": "RegionSetLabelsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", + "httpMethod": "POST", + "id": "compute.vpnGateways.testIamPermissions", + "parameterOrder": [ + "project", + "region", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/vpnGateways/{resource}/testIamPermissions", + "request": { + "$ref": "TestPermissionsRequest" + }, + "response": { + "$ref": "TestPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "vpnTunnels": { + "methods": { + "aggregatedList": { + "description": "Retrieves an aggregated list of VPN tunnels.", + "httpMethod": "GET", + "id": "compute.vpnTunnels.aggregatedList", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + } + }, + "path": "{project}/aggregated/vpnTunnels", + "response": { + "$ref": "VpnTunnelAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "description": "Deletes the specified VpnTunnel resource.", + "httpMethod": "DELETE", + "id": "compute.vpnTunnels.delete", + "parameterOrder": [ + "project", + "region", + "vpnTunnel" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "vpnTunnel": { + "description": "Name of the VpnTunnel resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/vpnTunnels/{vpnTunnel}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Returns the specified VpnTunnel resource. Gets a list of available VPN tunnels by making a list() request.", + "httpMethod": "GET", + "id": "compute.vpnTunnels.get", + "parameterOrder": [ + "project", + "region", + "vpnTunnel" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "vpnTunnel": { + "description": "Name of the VpnTunnel resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/vpnTunnels/{vpnTunnel}", + "response": { + "$ref": "VpnTunnel" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a VpnTunnel resource in the specified project and region using the data included in the request.", + "httpMethod": "POST", + "id": "compute.vpnTunnels.insert", "parameterOrder": [ "project", "region" @@ -25715,7 +27436,7 @@ "operation": { "description": "Name of the Operations resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -25753,7 +27474,7 @@ "operation": { "description": "Name of the Operations resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -25852,7 +27573,7 @@ "operation": { "description": "Name of the Operations resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -25904,7 +27625,7 @@ "zone": { "description": "Name of the zone resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -25971,7 +27692,7 @@ } } }, - "revision": "20181130", + "revision": "20190528", "rootUrl": "https://www.googleapis.com/", "schemas": { "AcceleratorConfig": { @@ -25984,7 +27705,7 @@ "type": "integer" }, "acceleratorType": { - "description": "Full or partial URL of the accelerator type resource to attach to this instance. If you are creating an instance template, specify only the accelerator name.", + "description": "Full or partial URL of the accelerator type resource to attach to this instance. For example: projects/my-project/zones/us-central1-c/acceleratorTypes/nvidia-tesla-p100 If you are creating an instance template, specify only the accelerator name. See GPUs on Compute Engine for a full list of accelerator types.", "type": "string" } }, @@ -26030,6 +27751,10 @@ "description": "[Output Only] Server-defined fully-qualified URL for this resource.", "type": "string" }, + "selfLinkWithId": { + "description": "[Output Only] Server-defined URL for this resource's resource id.", + "type": "string" + }, "zone": { "description": "[Output Only] The name of the zone where the accelerator type resides, such as us-central1-a. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", "type": "string" @@ -26541,6 +28266,10 @@ "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, + "selfLinkWithId": { + "description": "[Output Only] Server-defined URL for this resource with the resource id.", + "type": "string" + }, "status": { "description": "[Output Only] The status of the address, which can be one of RESERVING, RESERVED, or IN_USE. An address that is RESERVING is currently in the process of being reserved. A RESERVED address is currently reserved and available to use. An IN_USE address is currently being used by another resource and is not available.", "enum": [ @@ -26902,338 +28631,34 @@ }, "type": "object" }, - "Allocation": { - "description": "Allocation resource", - "id": "Allocation", + "AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk": { + "id": "AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk", "properties": { - "commitment": { - "description": "Full or partial url for commitment in which this allocation is to be created. This field is ignored when allocations are created during committment creation.", - "type": "string" - }, - "creationTimestamp": { - "description": "[Output Only] Creation timestamp in RFC3339 text format.", - "type": "string" - }, - "description": { - "type": "string" - }, - "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64", - "type": "string" - }, - "kind": { - "default": "compute#allocation", - "description": "[Output Only] Type of the resource. Always compute#allocations for allocations.", - "type": "string" - }, - "name": { - "annotations": { - "required": [ - "compute.instances.insert" - ] - }, - "description": "The name of the resource, provided by the client when initially creating the resource. The resource name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "type": "string" - }, - "selfLink": { - "description": "[Output Only] Server-defined fully-qualified URL for this resource.", + "diskSizeGb": { + "description": "Specifies the size of the disk in base-2 GB.", + "format": "int64", "type": "string" }, - "specificAllocation": { - "$ref": "AllocationSpecificSKUAllocation" - }, - "specificAllocationRequired": { - "description": "Indicates whether the allocation can be consumed by VMs with \"any allocation\" defined. If the field is set, then only VMs that target the allocation by name using --allocation-affinity can consume this allocation.", - "type": "boolean" - }, - "zone": { - "type": "string" - } - }, - "type": "object" - }, - "AllocationAffinity": { - "description": "AllocationAffinity is the configuration of desired allocation which this instance could take capacity from.", - "id": "AllocationAffinity", - "properties": { - "consumeAllocationType": { + "interface": { + "description": "Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI. For performance characteristics of SCSI over NVMe, see Local SSD performance.", "enum": [ - "ANY_ALLOCATION", - "NO_ALLOCATION", - "SPECIFIC_ALLOCATION", - "UNSPECIFIED" + "NVDIMM", + "NVME", + "SCSI" ], "enumDescriptions": [ - "", "", "", "" ], "type": "string" - }, - "key": { - "description": "Corresponds to the label key of allocation resource.", - "type": "string" - }, - "values": { - "description": "Corresponds to the label values of allocation resource.", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, - "AllocationAggregatedList": { - "description": "Contains a list of allocations.", - "id": "AllocationAggregatedList", - "properties": { - "id": { - "description": "[Output Only] Unique identifier for the resource; defined by the server.", - "type": "string" - }, - "items": { - "additionalProperties": { - "$ref": "AllocationsScopedList", - "description": "Name of the scope containing this set of allocations." - }, - "description": "A list of Allocation resources.", - "type": "object" - }, - "kind": { - "default": "compute#allocationAggregatedList", - "description": "Type of resource.", - "type": "string" - }, - "nextPageToken": { - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", - "type": "string" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for this resource.", - "type": "string" - }, - "warning": { - "description": "[Output Only] Informational warning message.", - "properties": { - "code": { - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DEPRECATED_TYPE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "EXPERIMENTAL_TYPE_USED", - "EXTERNAL_API_WARNING", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "MISSING_TYPE_DEPENDENCY", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SCHEMA_VALIDATION_IGNORED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNDECLARED_PROPERTIES", - "UNREACHABLE" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "data": { - "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", - "items": { - "properties": { - "key": { - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", - "type": "string" - }, - "value": { - "description": "[Output Only] A warning data value corresponding to the key.", - "type": "string" - } - }, - "type": "object" - }, - "type": "array" - }, - "message": { - "description": "[Output Only] A human-readable description of the warning code.", - "type": "string" - } - }, - "type": "object" - } - }, - "type": "object" - }, - "AllocationList": { - "id": "AllocationList", - "properties": { - "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "type": "string" - }, - "items": { - "description": "[Output Only] A list of Allocation resources.", - "items": { - "$ref": "Allocation" - }, - "type": "array" - }, - "kind": { - "default": "compute#allocationList", - "description": "[Output Only] Type of resource.Always compute#allocationsList for listsof allocations", - "type": "string" - }, - "nextPageToken": { - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", - "type": "string" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for this resource.", - "type": "string" - }, - "warning": { - "description": "[Output Only] Informational warning message.", - "properties": { - "code": { - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DEPRECATED_TYPE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "EXPERIMENTAL_TYPE_USED", - "EXTERNAL_API_WARNING", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "MISSING_TYPE_DEPENDENCY", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SCHEMA_VALIDATION_IGNORED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNDECLARED_PROPERTIES", - "UNREACHABLE" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "data": { - "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", - "items": { - "properties": { - "key": { - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", - "type": "string" - }, - "value": { - "description": "[Output Only] A warning data value corresponding to the key.", - "type": "string" - } - }, - "type": "object" - }, - "type": "array" - }, - "message": { - "description": "[Output Only] A human-readable description of the warning code.", - "type": "string" - } - }, - "type": "object" - } - }, - "type": "object" - }, - "AllocationSpecificSKUAllocation": { - "description": "This allocation type allows to pre allocate specific instance configuration.", - "id": "AllocationSpecificSKUAllocation", - "properties": { - "count": { - "description": "Specifies number of resources that are allocated.", - "format": "int64", - "type": "string" - }, - "inUseCount": { - "description": "[OutputOnly] Indicates how many resource are in use.", - "format": "int64", - "type": "string" - }, - "instanceProperties": { - "$ref": "AllocationSpecificSKUAllocationAllocatedInstanceProperties" } }, "type": "object" }, - "AllocationSpecificSKUAllocationAllocatedInstanceProperties": { + "AllocationSpecificSKUAllocationReservedInstanceProperties": { "description": "Properties of the SKU instances being reserved.", - "id": "AllocationSpecificSKUAllocationAllocatedInstanceProperties", + "id": "AllocationSpecificSKUAllocationReservedInstanceProperties", "properties": { "guestAccelerators": { "description": "Specifies accelerator type and count.", @@ -27245,7 +28670,7 @@ "localSsds": { "description": "Specifies amount of local ssd to reserve with each instance. The type of disk is local-ssd.", "items": { - "$ref": "AllocationSpecificSKUAllocationAllocatedInstancePropertiesAllocatedDisk" + "$ref": "AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk" }, "type": "array" }, @@ -27254,145 +28679,29 @@ "type": "string" }, "minCpuPlatform": { - "description": "Minimum cpu platform the allocation.", + "description": "Minimum cpu platform the reservation.", "type": "string" } }, "type": "object" }, - "AllocationSpecificSKUAllocationAllocatedInstancePropertiesAllocatedDisk": { - "id": "AllocationSpecificSKUAllocationAllocatedInstancePropertiesAllocatedDisk", + "AllocationSpecificSKUReservation": { + "description": "This reservation type allows to pre allocate specific instance configuration.", + "id": "AllocationSpecificSKUReservation", "properties": { - "diskSizeGb": { - "description": "Specifies the size of the disk in base-2 GB.", + "count": { + "description": "Specifies number of resources that are allocated.", "format": "int64", "type": "string" }, - "interface": { - "description": "Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI. For performance characteristics of SCSI over NVMe, see Local SSD performance.", - "enum": [ - "NVDIMM", - "NVME", - "SCSI" - ], - "enumDescriptions": [ - "", - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "AllocationsScopedList": { - "id": "AllocationsScopedList", - "properties": { - "allocations": { - "description": "A list of allocations contained in this scope.", - "items": { - "$ref": "Allocation" - }, - "type": "array" - }, - "warning": { - "description": "Informational warning which replaces the list of allocations when the list is empty.", - "properties": { - "code": { - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DEPRECATED_TYPE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "EXPERIMENTAL_TYPE_USED", - "EXTERNAL_API_WARNING", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "MISSING_TYPE_DEPENDENCY", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SCHEMA_VALIDATION_IGNORED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNDECLARED_PROPERTIES", - "UNREACHABLE" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "data": { - "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", - "items": { - "properties": { - "key": { - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", - "type": "string" - }, - "value": { - "description": "[Output Only] A warning data value corresponding to the key.", - "type": "string" - } - }, - "type": "object" - }, - "type": "array" - }, - "message": { - "description": "[Output Only] A human-readable description of the warning code.", - "type": "string" - } - }, - "type": "object" - } - }, - "type": "object" - }, - "AllocationsUpdateResourceShapeRequest": { - "id": "AllocationsUpdateResourceShapeRequest", - "properties": { - "count": { - "description": "Number of allocated resources which are to be updated with minimum = 1 and maximum = 100.", - "format": "int32", - "type": "integer" - }, - "destinationAllocation": { - "description": "The name of destination allocation where the modified machines are added. If existing, its machine spec must match the modified machine spec. If non existing, new allocation with this name and modified machine spec is created automatically.", + "inUseCount": { + "description": "[OutputOnly] Indicates how many resource are in use.", + "format": "int64", "type": "string" }, - "updatedResourceProperties": { - "$ref": "AllocationSpecificSKUAllocationAllocatedInstanceProperties" + "instanceProperties": { + "$ref": "AllocationSpecificSKUAllocationReservedInstanceProperties", + "description": "The instance properties for this specific sku reservation." } }, "type": "object" @@ -27410,7 +28719,7 @@ "type": "boolean" }, "deviceName": { - "description": "Specifies a unique device name of your choice that is reflected into the /dev/disk/by-id/google-* tree of a Linux operating system running within the instance. This name can be used to reference the device for mounting, resizing, and so on, from within the instance.\n\nIf not specified, the server chooses a default device name to apply to this disk, in the form persistent-disks-x, where x is a number assigned by Google Compute Engine. This field is only applicable for persistent disks.", + "description": "Specifies a unique device name of your choice that is reflected into the /dev/disk/by-id/google-* tree of a Linux operating system running within the instance. This name can be used to reference the device for mounting, resizing, and so on, from within the instance.\n\nIf not specified, the server chooses a default device name to apply to this disk, in the form persistent-disk-x, where x is a number assigned by Google Compute Engine. This field is only applicable for persistent disks.", "type": "string" }, "diskEncryptionKey": { @@ -27488,6 +28797,10 @@ ], "type": "string" }, + "shieldedInstanceInitialState": { + "$ref": "InitialStateConfig", + "description": "[Output Only] shielded vm initial state stored on disk" + }, "source": { "description": "Specifies a valid partial or full URL to an existing Persistent Disk resource. When creating a new instance, one of initializeParams.sourceImage or disks.source is required except for local SSD.\n\nIf desired, you can also attach existing non-root persistent disks using this property. This field is only applicable for persistent disks.\n\nNote that for InstanceTemplate, specify the disk name, not the URL for the disk.", "type": "string" @@ -27528,6 +28841,13 @@ "description": "Specifies the disk type to use to create the instance. If not specified, the default is pd-standard, specified using the full URL. For example:\nhttps://www.googleapis.com/compute/v1/projects/project/zones/zone/diskTypes/pd-standard\n\n\nOther values include pd-ssd and local-ssd. If you define this field, you can provide either the full or partial URL. For example, the following are valid values: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone/diskTypes/diskType \n- projects/project/zones/zone/diskTypes/diskType \n- zones/zone/diskTypes/diskType Note that for InstanceTemplate, this is the name of the disk type, not URL.", "type": "string" }, + "guestOsFeatures": { + "description": "A list of features to enable on the guest operating system. Applicable only for bootable images. Read Enabling guest operating system features to see a list of available options.\n\nGuest OS features are applied by merging initializeParams.guestOsFeatures and disks.guestOsFeatures", + "items": { + "$ref": "GuestOsFeature" + }, + "type": "array" + }, "labels": { "additionalProperties": { "type": "string" @@ -27535,6 +28855,20 @@ "description": "Labels to apply to this disk. These can be later modified by the disks.setLabels method. This field is only applicable for persistent disks.", "type": "object" }, + "replicaZones": { + "description": "URLs of the zones where the disk should be replicated to. Only applicable for regional resources.", + "items": { + "type": "string" + }, + "type": "array" + }, + "resourcePolicies": { + "description": "Resource policies applied to this disk for automatic snapshot creations. Specified using the full or partial URL. For instance template, specify only the resource policy name.", + "items": { + "type": "string" + }, + "type": "array" + }, "sourceImage": { "description": "The source image to create this disk. When creating a new instance, one of initializeParams.sourceImage or disks.source is required except for local SSD.\n\nTo create a disk with one of the public operating system images, specify the image by its family name. For example, specify family/debian-9 to use the latest Debian 9 image:\nprojects/debian-cloud/global/images/family/debian-9\n\n\nAlternatively, use a specific version of a public operating system image:\nprojects/debian-cloud/global/images/debian-9-stretch-vYYYYMMDD\n\n\nTo create a disk with a custom image that you created, specify the image name in the following format:\nglobal/images/my-custom-image\n\n\nYou can also specify a custom image by its image family, which returns the latest version of the image in that family. Replace the image name with family/family-name:\nglobal/images/family/my-image-family\n\n\nIf the source image is deleted later, this field will not be set.", "type": "string" @@ -27609,6 +28943,59 @@ }, "type": "object" }, + "AuthenticationPolicy": { + "description": "The authentication settings for the backend service.", + "id": "AuthenticationPolicy", + "properties": { + "origins": { + "description": "List of authentication methods that can be used for origin authentication. Similar to peers, these will be evaluated in order the first valid one will be used to set origin identity. If none of these methods pass, the request will be rejected with authentication failed error (401). Leave the list empty if origin authentication is not required.", + "items": { + "$ref": "OriginAuthenticationMethod" + }, + "type": "array" + }, + "peers": { + "description": "List of authentication methods that can be used for peer authentication. They will be evaluated in order the first valid one will be used to set peer identity. If none of these methods pass, the request will be rejected with authentication failed error (401). Leave the list empty if peer authentication is not required.", + "items": { + "$ref": "PeerAuthenticationMethod" + }, + "type": "array" + }, + "principalBinding": { + "description": "Define whether peer or origin identity should be used for principal. Default value is USE_PEER. If peer (or origin) identity is not available, either because peer/origin authentication is not defined, or failed, principal will be left unset. In other words, binding rule does not affect the decision to accept or reject request. This field can be set to one of the following: USE_PEER: Principal will be set to the identity from peer authentication. USE_ORIGIN: Principal will be set to the identity from origin authentication.", + "enum": [ + "INVALID", + "USE_ORIGIN", + "USE_PEER" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + }, + "serverTlsContext": { + "$ref": "TlsContext", + "description": "Configures the mechanism to obtain server-side security certificates and identity information." + } + }, + "type": "object" + }, + "AuthorizationConfig": { + "description": "Authorization configuration provides service-level and method-level access control for a service.", + "id": "AuthorizationConfig", + "properties": { + "policies": { + "description": "List of RbacPolicies.", + "items": { + "$ref": "RbacPolicy" + }, + "type": "array" + } + }, + "type": "object" + }, "AuthorizationLoggingOptions": { "description": "Authorization-related information used by Cloud Audit Logging.", "id": "AuthorizationLoggingOptions", @@ -27683,6 +29070,10 @@ "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, + "selfLinkWithId": { + "description": "[Output Only] Server-defined URL for this resource with the resource id.", + "type": "string" + }, "status": { "description": "[Output Only] The status of the autoscaler configuration.", "enum": [ @@ -28196,7 +29587,7 @@ "id": "AutoscalingPolicyLoadBalancingUtilization", "properties": { "utilizationTarget": { - "description": "Fraction of backend capacity utilization (set in HTTP(s) load balancing configuration) that autoscaler should maintain. Must be a positive float value. If not defined, the default is 0.8.", + "description": "Fraction of backend capacity utilization (set in HTTP(S) load balancing configuration) that autoscaler should maintain. Must be a positive float value. If not defined, the default is 0.8.", "format": "double", "type": "number" } @@ -28354,6 +29745,10 @@ "selfLink": { "description": "[Output Only] Server-defined URL for the resource.", "type": "string" + }, + "selfLinkWithId": { + "description": "[Output Only] Server-defined URL for this resource with the resource id.", + "type": "string" } }, "type": "object" @@ -28513,6 +29908,10 @@ "$ref": "BackendServiceCdnPolicy", "description": "Cloud CDN configuration for this BackendService." }, + "circuitBreakers": { + "$ref": "CircuitBreakers", + "description": "Settings controlling the volume of connections to a backend service.\n\nThis field is applicable to either: \n- A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. \n- A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED." + }, "cloudFunctionBackend": { "$ref": "BackendServiceCloudFunctionBackend", "description": "Directs request to a cloud function. appEngineBackend and backends[] must be empty if this is set." @@ -28520,6 +29919,10 @@ "connectionDraining": { "$ref": "ConnectionDraining" }, + "consistentHash": { + "$ref": "ConsistentHashLoadBalancerSettings", + "description": "Consistent Hash-based load balancing can be used to provide soft session affinity based on HTTP headers, cookies or other properties. This load balancing policy is applicable only for HTTP connections. The affinity to a particular destination host will be lost when one or more hosts are added/removed from the destination service. This field specifies parameters that control consistent hashing. This field is only applicable when localityLbPolicy is set to MAGLEV or RING_HASH.\n\nThis field is applicable to either: \n- A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. \n- A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED." + }, "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" @@ -28585,6 +29988,28 @@ ], "type": "string" }, + "localityLbPolicy": { + "description": "The load balancing algorithm used within the scope of the locality. The possible values are: \n- ROUND_ROBIN: This is a simple policy in which each healthy backend is selected in round robin order. This is the default. \n- LEAST_REQUEST: An O(1) algorithm which selects two random healthy hosts and picks the host which has fewer active requests. \n- RING_HASH: The ring/modulo hash load balancer implements consistent hashing to backends. The algorithm has the property that the addition/removal of a host from a set of N hosts only affects 1/N of the requests. \n- RANDOM: The load balancer selects a random healthy host. \n- ORIGINAL_DESTINATION: Backend host is selected based on the client connection metadata, i.e., connections are opened to the same address as the destination address of the incoming connection before the connection was redirected to the load balancer. \n- MAGLEV: used as a drop in replacement for the ring hash load balancer. Maglev is not as stable as ring hash but has faster table lookup build times and host selection times. For more information about Maglev, refer to https://ai.google/research/pubs/pub44824 \n\nThis field is applicable to either: \n- A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. \n- A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED.", + "enum": [ + "INVALID_LB_POLICY", + "LEAST_REQUEST", + "MAGLEV", + "ORIGINAL_DESTINATION", + "RANDOM", + "RING_HASH", + "ROUND_ROBIN" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, "logConfig": { "$ref": "BackendServiceLogConfig", "description": "This field denotes the logging options for the load balancer traffic served by this backend service. If logging is enabled, logs will be exported to Stackdriver." @@ -28594,6 +30019,10 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, + "outlierDetection": { + "$ref": "OutlierDetection", + "description": "Settings controlling eviction of unhealthy hosts from the load balancing pool. This field is applicable to either: \n- A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. \n- A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED." + }, "port": { "description": "Deprecated in favor of portName. The TCP port to connect on the backend. The default value is 80.\n\nThis cannot be used for internal load balancing.", "format": "int32", @@ -28631,10 +30060,18 @@ "description": "[Output Only] The resource URL for the security policy associated with this backend service.", "type": "string" }, + "securitySettings": { + "$ref": "SecuritySettings", + "description": "This field specifies the security policy that applies to this backend service. This field is applicable to either: \n- A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. \n- A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED." + }, "selfLink": { "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, + "selfLinkWithId": { + "description": "[Output Only] Server-defined URL for this resource with the resource id.", + "type": "string" + }, "sessionAffinity": { "description": "Type of session affinity to use. The default is NONE.\n\nWhen the load balancing scheme is EXTERNAL, can be NONE, CLIENT_IP, or GENERATED_COOKIE.\n\nWhen the load balancing scheme is INTERNAL, can be NONE, CLIENT_IP, CLIENT_IP_PROTO, or CLIENT_IP_PORT_PROTO.\n\nWhen the protocol is UDP, this field is not used.", "enum": [ @@ -28642,6 +30079,8 @@ "CLIENT_IP_PORT_PROTO", "CLIENT_IP_PROTO", "GENERATED_COOKIE", + "HEADER_FIELD", + "HTTP_COOKIE", "NONE" ], "enumDescriptions": [ @@ -28649,6 +30088,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -29154,10 +30595,10 @@ "properties": { "condition": { "$ref": "Expr", - "description": "Unimplemented. The condition that is associated with this binding. NOTE: an unsatisfied condition will not allow user access via current binding. Different bindings, including their conditions, are examined independently." + "description": "The condition that is associated with this binding. NOTE: An unsatisfied condition will not allow user access via current binding. Different bindings, including their conditions, are examined independently." }, "members": { - "description": "Specifies the identities requesting access for a Cloud Platform resource. `members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@gmail.com` .\n\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`.\n\n\n\n* `domain:{domain}`: A Google Apps domain name that represents all the users of that domain. For example, `google.com` or `example.com`.", + "description": "Specifies the identities requesting access for a Cloud Platform resource. `members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@gmail.com` .\n\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`.\n\n\n\n* `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`.", "items": { "type": "string" }, @@ -29216,17 +30657,146 @@ }, "type": "object" }, - "Commitment": { - "description": "Represents a Commitment resource. Creating a Commitment resource means that you are purchasing a committed use contract with an explicit start and end time. You can create commitments based on vCPUs and memory usage and receive discounted rates. For full details, read Signing Up for Committed Use Discounts.\n\nCommitted use discounts are subject to Google Cloud Platform's Service Specific Terms. By purchasing a committed use discount, you agree to these terms. Committed use discounts will not renew, so you must purchase a new commitment to continue receiving discounts. (== resource_for beta.commitments ==) (== resource_for v1.commitments ==)", - "id": "Commitment", + "CallCredentials": { + "description": "gRPC call credentials to access the SDS server.", + "id": "CallCredentials", + "properties": { + "accessToken": { + "description": "The access token that is used as call credential for the SDS server. This field is used only if callCredentialType is ACCESS_TOKEN.", + "type": "string" + }, + "callCredentialType": { + "description": "The type of call credentials to use for GRPC requests to the SDS server. This field can be set to one of the following: ACCESS_TOKEN: An access token is used as call credentials for the SDS server. GCE_VM: The local GCE VM service account credentials are used to access the SDS server. JWT_SERVICE_TOKEN: The user provisioned service account credentials are used to access the SDS server. FROM_PLUGIN: Custom authenticator credentials are used to access the SDS server.", + "enum": [ + "ACCESS_TOKEN", + "FROM_PLUGIN", + "GCE_VM", + "INVALID", + "JWT_SERVICE_ACCOUNT" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "fromPlugin": { + "$ref": "MetadataCredentialsFromPlugin", + "description": "Custom authenticator credentials." + }, + "jwtServiceAccount": { + "$ref": "ServiceAccountJwtAccessCredentials", + "description": "This service account credentials are used as call credentials for the SDS server. This field is used only if callCredentialType is JWT_SERVICE_ACCOUNT." + } + }, + "type": "object" + }, + "ChannelCredentials": { + "description": "gRPC channel credentials to access the SDS server.", + "id": "ChannelCredentials", "properties": { - "allocations": { - "description": "List of allocations for this commitment.", + "certificates": { + "$ref": "TlsCertificatePaths", + "description": "The call credentials to access the SDS server." + }, + "channelCredentialType": { + "description": "The channel credentials to access the SDS server. This field can be set to one of the following: CERTIFICATES: Use TLS certificates to access the SDS server. GCE_VM: Use local GCE VM credentials to access the SDS server.", + "enum": [ + "CERTIFICATES", + "GCE_VM", + "INVALID" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "CircuitBreakers": { + "description": "Settings controlling the volume of connections to a backend service.", + "id": "CircuitBreakers", + "properties": { + "connectTimeout": { + "$ref": "Duration", + "description": "The timeout for new network connections to hosts." + }, + "maxConnections": { + "description": "The maximum number of connections to the backend cluster. If not specified, the default is 1024.", + "format": "int32", + "type": "integer" + }, + "maxPendingRequests": { + "description": "The maximum number of pending requests allowed to the backend cluster. If not specified, the default is 1024.", + "format": "int32", + "type": "integer" + }, + "maxRequests": { + "description": "The maximum number of parallel requests that allowed to the backend cluster. If not specified, the default is 1024.", + "format": "int32", + "type": "integer" + }, + "maxRequestsPerConnection": { + "description": "Maximum requests for a single backend connection. This parameter is respected by both the HTTP/1.1 and HTTP/2 implementations. If not specified, there is no limit. Setting this parameter to 1 will effectively disable keep alive.", + "format": "int32", + "type": "integer" + }, + "maxRetries": { + "description": "The maximum number of parallel retries allowed to the backend cluster. If not specified, the default is 3.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "ClientTlsSettings": { + "description": "The client side authentication settings for connection originating from the backend service.", + "id": "ClientTlsSettings", + "properties": { + "clientTlsContext": { + "$ref": "TlsContext", + "description": "Configures the mechanism to obtain client-side security certificates and identity information. This field is only applicable when mode is set to MUTUAL." + }, + "mode": { + "description": "Indicates whether connections to this port should be secured using TLS. The value of this field determines how TLS is enforced. This can be set to one of the following values: DISABLE: Do not setup a TLS connection to the backends. SIMPLE: Originate a TLS connection to the backends. MUTUAL: Secure connections to the backends using mutual TLS by presenting client certificates for authentication.", + "enum": [ + "DISABLE", + "INVALID", + "MUTUAL", + "SIMPLE" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ], + "type": "string" + }, + "sni": { + "description": "SNI string to present to the server during TLS handshake. This field is applicable only when mode is SIMPLE or MUTUAL.", + "type": "string" + }, + "subjectAltNames": { + "description": "A list of alternate names to verify the subject identity in the certificate.If specified, the proxy will verify that the server certificate's subject alt name matches one of the specified values. This field is applicable only when mode is SIMPLE or MUTUAL.", "items": { - "$ref": "Allocation" + "type": "string" }, "type": "array" - }, + } + }, + "type": "object" + }, + "Commitment": { + "description": "Represents a Commitment resource. Creating a Commitment resource means that you are purchasing a committed use contract with an explicit start and end time. You can create commitments based on vCPUs and memory usage and receive discounted rates. For full details, read Signing Up for Committed Use Discounts.\n\nCommitted use discounts are subject to Google Cloud Platform's Service Specific Terms. By purchasing a committed use discount, you agree to these terms. Committed use discounts will not renew, so you must purchase a new commitment to continue receiving discounts. (== resource_for beta.commitments ==) (== resource_for v1.commitments ==)", + "id": "Commitment", + "properties": { "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" @@ -29272,6 +30842,13 @@ "description": "[Output Only] URL of the region where this commitment may be used.", "type": "string" }, + "reservations": { + "description": "List of reservations for this commitment.", + "items": { + "$ref": "Reservation" + }, + "type": "array" + }, "resources": { "description": "A list of commitment amounts for particular resources. Note that VCPU and MEMORY resource commitments must occur together.", "items": { @@ -29283,6 +30860,10 @@ "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, + "selfLinkWithId": { + "description": "[Output Only] Server-defined URL for this resource with the resource id.", + "type": "string" + }, "startTimestamp": { "description": "[Output Only] Commitment start time in RFC3339 text format.", "type": "string" @@ -29310,11 +30891,13 @@ "type": { "description": "The type of commitment, which affects the discount rate and the eligible resources. Type MEMORY_OPTIMIZED specifies a commitment that will only apply to memory optimized machines.", "enum": [ + "COMPUTE_OPTIMIZED", "GENERAL_PURPOSE", "MEMORY_OPTIMIZED", "TYPE_UNSPECIFIED" ], "enumDescriptions": [ + "", "", "", "" @@ -29710,12 +31293,8 @@ ], "type": "string" }, - "value": { - "description": "DEPRECATED. Use 'values' instead.", - "type": "string" - }, "values": { - "description": "The objects of the condition. This is mutually exclusive with 'value'.", + "description": "The objects of the condition.", "items": { "type": "string" }, @@ -29736,6 +31315,100 @@ }, "type": "object" }, + "ConsistentHashLoadBalancerSettings": { + "description": "This message defines settings for a consistent hash style load balancer.", + "id": "ConsistentHashLoadBalancerSettings", + "properties": { + "httpCookie": { + "$ref": "ConsistentHashLoadBalancerSettingsHttpCookie", + "description": "Hash is based on HTTP Cookie. This field describes a HTTP cookie that will be used as the hash key for the consistent hash load balancer. If the cookie is not present, it will be generated. This field is applicable if the sessionAffinity is set to HTTP_COOKIE." + }, + "httpHeaderName": { + "description": "The hash based on the value of the specified header field. This field is applicable if the sessionAffinity is set to HEADER_FIELD.", + "type": "string" + }, + "minimumRingSize": { + "description": "The minimum number of virtual nodes to use for the hash ring. Defaults to 1024. Larger ring sizes result in more granular load distributions. If the number of hosts in the load balancing pool is larger than the ring size, each host will be assigned a single virtual node.", + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, + "ConsistentHashLoadBalancerSettingsHttpCookie": { + "description": "The information about the HTTP Cookie on which the hash function is based for load balancing policies that use a consistent hash.", + "id": "ConsistentHashLoadBalancerSettingsHttpCookie", + "properties": { + "name": { + "description": "Name of the cookie.", + "type": "string" + }, + "path": { + "description": "Path to set for the cookie.", + "type": "string" + }, + "ttl": { + "$ref": "Duration", + "description": "Lifetime of the cookie." + } + }, + "type": "object" + }, + "CorsPolicy": { + "description": "The specification for allowing client side cross-origin requests. Please see W3C Recommendation for Cross Origin Resource Sharing", + "id": "CorsPolicy", + "properties": { + "allowCredentials": { + "description": "In response to a preflight request, setting this to true indicates that the actual request can include user credentials. This translates to the Access-Control-Allow-Credentials header.\nDefault is false.", + "type": "boolean" + }, + "allowHeaders": { + "description": "Specifies the content for the Access-Control-Allow-Headers header.", + "items": { + "type": "string" + }, + "type": "array" + }, + "allowMethods": { + "description": "Specifies the content for the Access-Control-Allow-Methods header.", + "items": { + "type": "string" + }, + "type": "array" + }, + "allowOriginRegexes": { + "description": "Specifies the regualar expression patterns that match allowed origins. For regular expression grammar please see en.cppreference.com/w/cpp/regex/ecmascript \nAn origin is allowed if it matches either allow_origins or allow_origin_regex.", + "items": { + "type": "string" + }, + "type": "array" + }, + "allowOrigins": { + "description": "Specifies the list of origins that will be allowed to do CORS requests.\nAn origin is allowed if it matches either allow_origins or allow_origin_regex.", + "items": { + "type": "string" + }, + "type": "array" + }, + "disabled": { + "description": "If true, specifies the CORS policy is disabled. The default value of false, which indicates that the CORS policy is in effect.", + "type": "boolean" + }, + "exposeHeaders": { + "description": "Specifies the content for the Access-Control-Expose-Headers header.", + "items": { + "type": "string" + }, + "type": "array" + }, + "maxAge": { + "description": "Specifies how long the results of a preflight request can be cached. This translates to the content for the Access-Control-Max-Age header.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, "CustomerEncryptionKey": { "description": "Represents a customer-supplied encryption key", "id": "CustomerEncryptionKey", @@ -29744,6 +31417,9 @@ "description": "The name of the encryption key that is stored in Google Cloud KMS.", "type": "string" }, + "kmsKeyServiceAccount": { + "type": "string" + }, "rawKey": { "description": "Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to either encrypt or decrypt this resource.", "type": "string" @@ -29794,13 +31470,15 @@ "type": "string" }, "state": { - "description": "The deprecation state of this resource. This can be DEPRECATED, OBSOLETE, or DELETED. Operations which create a new resource using a DEPRECATED resource will return successfully, but with a warning indicating the deprecated resource and recommending its replacement. Operations which use OBSOLETE or DELETED resources will be rejected and result in an error.", + "description": "The deprecation state of this resource. This can be ACTIVE, DEPRECATED, OBSOLETE, or DELETED. Operations which communicate the end of life date for an image, can use ACTIVE. Operations which create a new resource using a DEPRECATED resource will return successfully, but with a warning indicating the deprecated resource and recommending its replacement. Operations which use OBSOLETE or DELETED resources will be rejected and result in an error.", "enum": [ + "ACTIVE", "DELETED", "DEPRECATED", "OBSOLETE" ], "enumDescriptions": [ + "", "", "", "" @@ -29824,7 +31502,7 @@ }, "diskEncryptionKey": { "$ref": "CustomerEncryptionKey", - "description": "Encrypts the disk using a customer-supplied encryption key.\n\nAfter you encrypt a disk with a customer-supplied key, you must provide the same key if you use the disk later (e.g. to create a disk snapshot or an image, or to attach the disk to a virtual machine).\n\nCustomer-supplied encryption keys do not protect access to metadata of the disk.\n\nIf you do not provide an encryption key when creating the disk, then the disk will be encrypted using an automatically generated key and you do not need to provide a key to use the disk later." + "description": "Encrypts the disk using a customer-supplied encryption key.\n\nAfter you encrypt a disk with a customer-supplied key, you must provide the same key if you use the disk later (e.g. to create a disk snapshot, to create a disk image, to create a machine image, or to attach the disk to a virtual machine).\n\nCustomer-supplied encryption keys do not protect access to metadata of the disk.\n\nIf you do not provide an encryption key when creating the disk, then the disk will be encrypted using an automatically generated key and you do not need to provide a key to use the disk later." }, "guestOsFeatures": { "description": "A list of features to enable on the guest operating system. Applicable only for bootable images. Read Enabling guest operating system features to see a list of available options.", @@ -29878,6 +31556,10 @@ }, "type": "array" }, + "multiWriter": { + "description": "Indicates whether or not the disk can be read/write attached to more than one instance.", + "type": "boolean" + }, "name": { "annotations": { "required": [ @@ -29924,6 +31606,14 @@ "format": "int64", "type": "string" }, + "sourceDisk": { + "description": "The source disk used to create this disk. You can provide this as a partial or full URL to the resource. For example, the following are valid values: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone/disks/disk \n- projects/project/zones/zone/disks/disk \n- zones/zone/disks/disk", + "type": "string" + }, + "sourceDiskId": { + "description": "[Output Only] The unique ID of the disk used to create this disk. This value identifies the exact disk that was used to create this persistent disk. For example, if you created the persistent disk from a disk that was later deleted and recreated under the same name, the source disk ID would identify the exact version of the disk that was used.", + "type": "string" + }, "sourceImage": { "description": "The source image used to create this disk. If the source image is deleted, this field will not be set.\n\nTo create a disk with one of the public operating system images, specify the image by its family name. For example, specify family/debian-9 to use the latest Debian 9 image:\nprojects/debian-cloud/global/images/family/debian-9\n\n\nAlternatively, use a specific version of a public operating system image:\nprojects/debian-cloud/global/images/debian-9-stretch-vYYYYMMDD\n\n\nTo create a disk with a custom image that you created, specify the image name in the following format:\nglobal/images/my-custom-image\n\n\nYou can also specify a custom image by its image family, which returns the latest version of the image in that family. Replace the image name with family/family-name:\nglobal/images/family/my-image-family", "type": "string" @@ -29952,6 +31642,7 @@ "description": "[Output Only] The status of disk creation.", "enum": [ "CREATING", + "DELETING", "FAILED", "READY", "RESTORING" @@ -29960,6 +31651,7 @@ "", "", "", + "", "" ], "type": "string" @@ -29977,11 +31669,11 @@ "type": "string" }, "type": { - "description": "URL of the disk type resource describing which disk type to use to create the disk. Provide this when creating the disk. For example: project/zones/zone/diskTypes/pd-standard or pd-ssd", + "description": "URL of the disk type resource describing which disk type to use to create the disk. Provide this when creating the disk. For example: projects/project/zones/zone/diskTypes/pd-standard or pd-ssd", "type": "string" }, "users": { - "description": "[Output Only] Links to the users of the disk (attached instances) in form: project/zones/zone/instances/instance", + "description": "[Output Only] Links to the users of the disk (attached instances) in form: projects/project/zones/zone/instances/instance", "items": { "type": "string" }, @@ -30317,6 +32009,10 @@ "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, + "selfLinkWithId": { + "description": "[Output Only] Server-defined URL for this resource with the resource id.", + "type": "string" + }, "validDiskSize": { "description": "[Output Only] An optional textual description of the valid disk size, such as \"10GB-10TB\".", "type": "string" @@ -30817,6 +32513,23 @@ }, "type": "object" }, + "Duration": { + "description": "A Duration represents a fixed-length span of time represented as a count of seconds and fractions of seconds at nanosecond resolution. It is independent of any calendar and concepts like \"day\" or \"month\". Range is approximately 10,000 years.", + "id": "Duration", + "properties": { + "nanos": { + "description": "Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented with a 0 `seconds` field and a positive `nanos` field. Must be from 0 to 999,999,999 inclusive.", + "format": "int32", + "type": "integer" + }, + "seconds": { + "description": "Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years", + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, "ExchangedPeeringRoute": { "id": "ExchangedPeeringRoute", "properties": { @@ -30988,179 +32701,410 @@ }, "type": "object" }, - "Firewall": { - "description": "Represents a Firewall resource.", - "id": "Firewall", + "ExternalVpnGateway": { + "description": "External VPN gateway is the on-premises VPN gateway(s) or another cloud provider?s VPN gateway that connects to your Google Cloud VPN gateway. To create a highly available VPN from Google Cloud to your on-premises side or another Cloud provider's VPN gateway, you must create a external VPN gateway resource in GCP, which provides the information to GCP about your external VPN gateway.", + "id": "ExternalVpnGateway", "properties": { - "allowed": { - "description": "The list of ALLOW rules specified by this firewall. Each rule specifies a protocol and port-range tuple that describes a permitted connection.", - "items": { - "properties": { - "IPProtocol": { - "description": "The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number.", - "type": "string" - }, - "ports": { - "description": "An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port.\n\nExample inputs include: [\"22\"], [\"80\",\"443\"], and [\"12345-12349\"].", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, - "type": "array" - }, "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" }, - "denied": { - "description": "The list of DENY rules specified by this firewall. Each rule specifies a protocol and port-range tuple that describes a denied connection.", - "items": { - "properties": { - "IPProtocol": { - "description": "The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number.", - "type": "string" - }, - "ports": { - "description": "An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port.\n\nExample inputs include: [\"22\"], [\"80\",\"443\"], and [\"12345-12349\"].", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, - "type": "array" - }, "description": { "description": "An optional description of this resource. Provide this property when you create the resource.", "type": "string" }, - "destinationRanges": { - "description": "If destination ranges are specified, the firewall will apply only to traffic that has destination IP address in these ranges. These ranges must be expressed in CIDR format. Only IPv4 is supported.", - "items": { - "type": "string" - }, - "type": "array" - }, - "direction": { - "description": "Direction of traffic to which this firewall applies; default is INGRESS. Note: For INGRESS traffic, it is NOT supported to specify destinationRanges; For EGRESS traffic, it is NOT supported to specify sourceRanges OR sourceTags.", - "enum": [ - "EGRESS", - "INGRESS" - ], - "enumDescriptions": [ - "", - "" - ], - "type": "string" - }, - "disabled": { - "description": "Denotes whether the firewall rule is disabled, i.e not applied to the network it is associated with. When set to true, the firewall rule is not enforced and the network behaves as if it did not exist. If this is unspecified, the firewall rule will be enabled.", - "type": "boolean" - }, - "enableLogging": { - "description": "Deprecated in favor of enable in LogConfig. This field denotes whether to enable logging for a particular firewall rule. If logging is enabled, logs will be exported to Stackdriver.", - "type": "boolean" - }, "id": { "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", "format": "uint64", "type": "string" }, + "interfaces": { + "description": "List of interfaces for this external VPN gateway.", + "items": { + "$ref": "ExternalVpnGatewayInterface" + }, + "type": "array" + }, "kind": { - "default": "compute#firewall", - "description": "[Output Only] Type of the resource. Always compute#firewall for firewall rules.", + "default": "compute#externalVpnGateway", + "description": "[Output Only] Type of the resource. Always compute#externalVpnGateway for externalVpnGateways.", "type": "string" }, - "logConfig": { - "$ref": "FirewallLogConfig", - "description": "This field denotes the logging options for a particular firewall rule. If logging is enabled, logs will be exported to Stackdriver." + "labelFingerprint": { + "description": "A fingerprint for the labels being applied to this ExternalVpnGateway, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet.\n\nTo see the latest fingerprint, make a get() request to retrieve an ExternalVpnGateway.", + "format": "byte", + "type": "string" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Labels to apply to this ExternalVpnGateway resource. These can be later modified by the setLabels method. Each label key/value must comply with RFC1035. Label values may be empty.", + "type": "object" }, "name": { "annotations": { "required": [ - "compute.firewalls.insert", - "compute.firewalls.patch" + "compute.externalVpnGateways.insert" ] }, - "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, - "network": { - "description": "URL of the network resource for this firewall rule. If not specified when creating a firewall rule, the default network is used:\nglobal/networks/default\nIf you choose to specify this property, you can specify the network as a full or partial URL. For example, the following are all valid URLs: \n- https://www.googleapis.com/compute/v1/projects/myproject/global/networks/my-network \n- projects/myproject/global/networks/my-network \n- global/networks/default", + "redundancyType": { + "description": "Indicates the user-supplied redundancy type of this external VPN gateway.", + "enum": [ + "FOUR_IPS_REDUNDANCY", + "SINGLE_IP_INTERNALLY_REDUNDANT", + "TWO_IPS_REDUNDANCY" + ], + "enumDescriptions": [ + "", + "", + "" + ], "type": "string" }, - "priority": { - "description": "Priority for this rule. This is an integer between 0 and 65535, both inclusive. When not specified, the value assumed is 1000. Relative priorities determine precedence of conflicting rules. Lower value of priority implies higher precedence (eg, a rule with priority 0 has higher precedence than a rule with priority 1). DENY rules take precedence over ALLOW rules having equal priority.", - "format": "int32", - "type": "integer" - }, "selfLink": { "description": "[Output Only] Server-defined URL for the resource.", "type": "string" + } + }, + "type": "object" + }, + "ExternalVpnGatewayInterface": { + "description": "The interface for the external VPN gateway.", + "id": "ExternalVpnGatewayInterface", + "properties": { + "id": { + "description": "The numeric ID of this interface. The allowed input values for this id for different redundancy types of external VPN gateway: SINGLE_IP_INTERNALLY_REDUNDANT - 0 TWO_IPS_REDUNDANCY - 0, 1 FOUR_IPS_REDUNDANCY - 0, 1, 2, 3", + "format": "uint32", + "type": "integer" }, - "sourceRanges": { - "description": "If source ranges are specified, the firewall will apply only to traffic that has source IP address in these ranges. These ranges must be expressed in CIDR format. One or both of sourceRanges and sourceTags may be set. If both properties are set, the firewall will apply to traffic that has source IP address within sourceRanges OR the source IP that belongs to a tag listed in the sourceTags property. The connection does not need to match both properties for the firewall to apply. Only IPv4 is supported.", - "items": { - "type": "string" - }, - "type": "array" - }, - "sourceServiceAccounts": { - "description": "If source service accounts are specified, the firewall will apply only to traffic originating from an instance with a service account in this list. Source service accounts cannot be used to control traffic to an instance's external IP address because service accounts are associated with an instance, not an IP address. sourceRanges can be set at the same time as sourceServiceAccounts. If both are set, the firewall will apply to traffic that has source IP address within sourceRanges OR the source IP belongs to an instance with service account listed in sourceServiceAccount. The connection does not need to match both properties for the firewall to apply. sourceServiceAccounts cannot be used at the same time as sourceTags or targetTags.", - "items": { - "type": "string" - }, - "type": "array" - }, - "sourceTags": { - "description": "If source tags are specified, the firewall rule applies only to traffic with source IPs that match the primary network interfaces of VM instances that have the tag and are in the same VPC network. Source tags cannot be used to control traffic to an instance's external IP address, it only applies to traffic between instances in the same virtual network. Because tags are associated with instances, not IP addresses. One or both of sourceRanges and sourceTags may be set. If both properties are set, the firewall will apply to traffic that has source IP address within sourceRanges OR the source IP that belongs to a tag listed in the sourceTags property. The connection does not need to match both properties for the firewall to apply.", - "items": { - "type": "string" - }, - "type": "array" - }, - "targetServiceAccounts": { - "description": "A list of service accounts indicating sets of instances located in the network that may make network connections as specified in allowed[]. targetServiceAccounts cannot be used at the same time as targetTags or sourceTags. If neither targetServiceAccounts nor targetTags are specified, the firewall rule applies to all instances on the specified network.", - "items": { - "type": "string" - }, - "type": "array" - }, - "targetTags": { - "description": "A list of tags that controls which instances the firewall rule applies to. If targetTags are specified, then the firewall rule applies only to instances in the VPC network that have one of those tags. If no targetTags are specified, the firewall rule applies to all instances on the specified network.", - "items": { - "type": "string" - }, - "type": "array" + "ipAddress": { + "description": "IP address of the interface in the external VPN gateway. Only IPv4 is supported. This IP address can be either from your on-premise gateway or another Cloud provider?s VPN gateway, it cannot be an IP address from Google Compute Engine.", + "type": "string" } }, "type": "object" }, - "FirewallList": { - "description": "Contains a list of firewalls.", - "id": "FirewallList", + "ExternalVpnGatewayList": { + "description": "Response to the list request, and contains a list of externalVpnGateways.", + "id": "ExternalVpnGatewayList", "properties": { + "etag": { + "type": "string" + }, "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of Firewall resources.", + "description": "A list of ExternalVpnGateway resources.", "items": { - "$ref": "Firewall" + "$ref": "ExternalVpnGateway" }, "type": "array" }, "kind": { - "default": "compute#firewallList", - "description": "[Output Only] Type of resource. Always compute#firewallList for lists of firewalls.", + "default": "compute#externalVpnGatewayList", + "description": "[Output Only] Type of resource. Always compute#externalVpnGatewayList for lists of externalVpnGateways.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "FileContentBuffer": { + "id": "FileContentBuffer", + "properties": { + "content": { + "description": "The raw content in the secure keys file.", + "format": "byte", + "type": "string" + }, + "fileType": { + "enum": [ + "BIN", + "UNDEFINED", + "X509" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "Firewall": { + "description": "Represents a Firewall resource.", + "id": "Firewall", + "properties": { + "allowed": { + "description": "The list of ALLOW rules specified by this firewall. Each rule specifies a protocol and port-range tuple that describes a permitted connection.", + "items": { + "properties": { + "IPProtocol": { + "description": "The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number.", + "type": "string" + }, + "ports": { + "description": "An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port.\n\nExample inputs include: [\"22\"], [\"80\",\"443\"], and [\"12345-12349\"].", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "array" + }, + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "denied": { + "description": "The list of DENY rules specified by this firewall. Each rule specifies a protocol and port-range tuple that describes a denied connection.", + "items": { + "properties": { + "IPProtocol": { + "description": "The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number.", + "type": "string" + }, + "ports": { + "description": "An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port.\n\nExample inputs include: [\"22\"], [\"80\",\"443\"], and [\"12345-12349\"].", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "array" + }, + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, + "destinationRanges": { + "description": "If destination ranges are specified, the firewall will apply only to traffic that has destination IP address in these ranges. These ranges must be expressed in CIDR format. Only IPv4 is supported.", + "items": { + "type": "string" + }, + "type": "array" + }, + "direction": { + "description": "Direction of traffic to which this firewall applies; default is INGRESS. Note: For INGRESS traffic, it is NOT supported to specify destinationRanges; For EGRESS traffic, it is NOT supported to specify sourceRanges OR sourceTags.", + "enum": [ + "EGRESS", + "INGRESS" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "disabled": { + "description": "Denotes whether the firewall rule is disabled, i.e not applied to the network it is associated with. When set to true, the firewall rule is not enforced and the network behaves as if it did not exist. If this is unspecified, the firewall rule will be enabled.", + "type": "boolean" + }, + "enableLogging": { + "description": "Deprecated in favor of enable in LogConfig. This field denotes whether to enable logging for a particular firewall rule. If logging is enabled, logs will be exported to Stackdriver.", + "type": "boolean" + }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", + "type": "string" + }, + "kind": { + "default": "compute#firewall", + "description": "[Output Only] Type of the resource. Always compute#firewall for firewall rules.", + "type": "string" + }, + "logConfig": { + "$ref": "FirewallLogConfig", + "description": "This field denotes the logging options for a particular firewall rule. If logging is enabled, logs will be exported to Stackdriver." + }, + "name": { + "annotations": { + "required": [ + "compute.firewalls.insert", + "compute.firewalls.patch" + ] + }, + "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "network": { + "description": "URL of the network resource for this firewall rule. If not specified when creating a firewall rule, the default network is used:\nglobal/networks/default\nIf you choose to specify this property, you can specify the network as a full or partial URL. For example, the following are all valid URLs: \n- https://www.googleapis.com/compute/v1/projects/myproject/global/networks/my-network \n- projects/myproject/global/networks/my-network \n- global/networks/default", + "type": "string" + }, + "priority": { + "description": "Priority for this rule. This is an integer between 0 and 65535, both inclusive. When not specified, the value assumed is 1000. Relative priorities determine precedence of conflicting rules. Lower value of priority implies higher precedence (eg, a rule with priority 0 has higher precedence than a rule with priority 1). DENY rules take precedence over ALLOW rules having equal priority.", + "format": "int32", + "type": "integer" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" + }, + "selfLinkWithId": { + "description": "[Output Only] Server-defined URL for this resource with the resource id.", + "type": "string" + }, + "sourceRanges": { + "description": "If source ranges are specified, the firewall will apply only to traffic that has source IP address in these ranges. These ranges must be expressed in CIDR format. One or both of sourceRanges and sourceTags may be set. If both properties are set, the firewall will apply to traffic that has source IP address within sourceRanges OR the source IP that belongs to a tag listed in the sourceTags property. The connection does not need to match both properties for the firewall to apply. Only IPv4 is supported.", + "items": { + "type": "string" + }, + "type": "array" + }, + "sourceServiceAccounts": { + "description": "If source service accounts are specified, the firewall will apply only to traffic originating from an instance with a service account in this list. Source service accounts cannot be used to control traffic to an instance's external IP address because service accounts are associated with an instance, not an IP address. sourceRanges can be set at the same time as sourceServiceAccounts. If both are set, the firewall will apply to traffic that has source IP address within sourceRanges OR the source IP belongs to an instance with service account listed in sourceServiceAccount. The connection does not need to match both properties for the firewall to apply. sourceServiceAccounts cannot be used at the same time as sourceTags or targetTags.", + "items": { + "type": "string" + }, + "type": "array" + }, + "sourceTags": { + "description": "If source tags are specified, the firewall rule applies only to traffic with source IPs that match the primary network interfaces of VM instances that have the tag and are in the same VPC network. Source tags cannot be used to control traffic to an instance's external IP address, it only applies to traffic between instances in the same virtual network. Because tags are associated with instances, not IP addresses. One or both of sourceRanges and sourceTags may be set. If both properties are set, the firewall will apply to traffic that has source IP address within sourceRanges OR the source IP that belongs to a tag listed in the sourceTags property. The connection does not need to match both properties for the firewall to apply.", + "items": { + "type": "string" + }, + "type": "array" + }, + "targetServiceAccounts": { + "description": "A list of service accounts indicating sets of instances located in the network that may make network connections as specified in allowed[]. targetServiceAccounts cannot be used at the same time as targetTags or sourceTags. If neither targetServiceAccounts nor targetTags are specified, the firewall rule applies to all instances on the specified network.", + "items": { + "type": "string" + }, + "type": "array" + }, + "targetTags": { + "description": "A list of tags that controls which instances the firewall rule applies to. If targetTags are specified, then the firewall rule applies only to instances in the VPC network that have one of those tags. If no targetTags are specified, the firewall rule applies to all instances on the specified network.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "FirewallList": { + "description": "Contains a list of firewalls.", + "id": "FirewallList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "description": "A list of Firewall resources.", + "items": { + "$ref": "Firewall" + }, + "type": "array" + }, + "kind": { + "default": "compute#firewallList", + "description": "[Output Only] Type of resource. Always compute#firewallList for lists of firewalls.", "type": "string" }, "nextPageToken": { @@ -31262,6 +33206,18 @@ "enable": { "description": "This field denotes whether to enable logging for a particular firewall rule.", "type": "boolean" + }, + "metadata": { + "description": "This field can only be specified for a particular firewall rule if logging is enabled for that rule. This field denotes whether to include or exclude metadata for firewall logs.", + "enum": [ + "EXCLUDE_ALL_METADATA", + "INCLUDE_ALL_METADATA" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" } }, "type": "object" @@ -31271,7 +33227,7 @@ "id": "FixedOrPercent", "properties": { "calculated": { - "description": "[Output Only] Absolute value of VM instances calculated based on the specific mode.\n\n \n- If the value is fixed, then the caculated value is equal to the fixed value. \n- If the value is a percent, then the calculated value is percent/100 * targetSize. For example, the calculated value of a 80% of a managed instance group with 150 instances would be (80/100 * 150) = 120 VM instances. If there is a remainder, the number is rounded up.", + "description": "[Output Only] Absolute value of VM instances calculated based on the specific mode.\n\n \n- If the value is fixed, then the calculated value is equal to the fixed value. \n- If the value is a percent, then the calculated value is percent/100 * targetSize. For example, the calculated value of a 80% of a managed instance group with 150 instances would be (80/100 * 150) = 120 VM instances. If there is a remainder, the number is rounded up.", "format": "int32", "type": "integer" }, @@ -31395,6 +33351,13 @@ ], "type": "string" }, + "metadataFilters": { + "description": "Opaque filter criteria used by Loadbalancer to restrict routing configuration to a limited set xDS compliant clients. In their xDS requests to Loadbalancer, xDS clients present node metadata. If a match takes place, the relevant routing configuration is made available to those proxies.\nFor each metadataFilter in this list, if its filterMatchCriteria is set to MATCH_ANY, at least one of the filterLabels must match the corresponding label provided in the metadata. If its filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels must match with corresponding labels in the provided metadata.\nmetadataFilters specified here can be overridden by those specified in the UrlMap that this ForwardingRule references.\nmetadataFilters only applies to Loadbalancers that have their loadBalancingScheme set to INTERNAL_SELF_MANAGED.", + "items": { + "$ref": "MetadataFilter" + }, + "type": "array" + }, "name": { "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", @@ -31437,6 +33400,10 @@ "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, + "selfLinkWithId": { + "description": "[Output Only] Server-defined URL for this resource with the resource id.", + "type": "string" + }, "serviceLabel": { "description": "An optional prefix to the service name for this Forwarding Rule. If specified, will be the first label of the fully qualified service name.\n\nThe label must be 1-63 characters long, and comply with RFC1035. Specifically, the label must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.\n\nThis field is only used for internal load balancing.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", @@ -31451,7 +33418,7 @@ "type": "string" }, "target": { - "description": "The URL of the target resource to receive the matched traffic. For regional forwarding rules, this target must live in the same region as the forwarding rule. For global forwarding rules, this target must be a global load balancing resource. The forwarded traffic must be of a type appropriate to the target object. For INTERNAL_SELF_MANAGED\" load balancing, only HTTP and HTTPS targets are valid.", + "description": "The URL of the target resource to receive the matched traffic. For regional forwarding rules, this target must live in the same region as the forwarding rule. For global forwarding rules, this target must be a global load balancing resource. The forwarded traffic must be of a type appropriate to the target object. For INTERNAL_SELF_MANAGED load balancing, only HTTP and HTTPS targets are valid.", "type": "string" } }, @@ -31784,6 +33751,32 @@ }, "type": "object" }, + "GlobalNetworkEndpointGroupsAttachEndpointsRequest": { + "id": "GlobalNetworkEndpointGroupsAttachEndpointsRequest", + "properties": { + "networkEndpoints": { + "description": "The list of network endpoints to be attached.", + "items": { + "$ref": "NetworkEndpoint" + }, + "type": "array" + } + }, + "type": "object" + }, + "GlobalNetworkEndpointGroupsDetachEndpointsRequest": { + "id": "GlobalNetworkEndpointGroupsDetachEndpointsRequest", + "properties": { + "networkEndpoints": { + "description": "The list of network endpoints to be detached.", + "items": { + "$ref": "NetworkEndpoint" + }, + "type": "array" + } + }, + "type": "object" + }, "GlobalSetLabelsRequest": { "id": "GlobalSetLabelsRequest", "properties": { @@ -31824,6 +33817,25 @@ }, "type": "object" }, + "GrpcServiceConfig": { + "description": "gRPC config to access the SDS server.", + "id": "GrpcServiceConfig", + "properties": { + "callCredentials": { + "$ref": "CallCredentials", + "description": "The call credentials to access the SDS server." + }, + "channelCredentials": { + "$ref": "ChannelCredentials", + "description": "The channel credentials to access the SDS server." + }, + "targetUri": { + "description": "The target URI of the SDS server.", + "type": "string" + } + }, + "type": "object" + }, "GuestAttributes": { "description": "A guest attributes entry.", "id": "GuestAttributes", @@ -31896,6 +33908,7 @@ "description": "The ID of a supported feature. Read Enabling guest operating system features to see a list of available options.", "enum": [ "FEATURE_TYPE_UNSPECIFIED", + "GVNIC", "MULTI_IP_SUBNET", "SECURE_BOOT", "UEFI_COMPATIBLE", @@ -31908,6 +33921,7 @@ "", "", "", + "", "" ], "type": "string" @@ -32128,6 +34142,10 @@ "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, + "selfLinkWithId": { + "description": "[Output Only] Server-defined URL for this resource with the resource id.", + "type": "string" + }, "sslHealthCheck": { "$ref": "SSLHealthCheck" }, @@ -32140,7 +34158,7 @@ "type": "integer" }, "type": { - "description": "Specifies the type of the healthCheck, either TCP, SSL, HTTP or HTTPS. If not specified, the default is TCP. Exactly one of the protocol-specific health check field must be specified, which must match type field.", + "description": "Specifies the type of the healthCheck, either TCP, SSL, HTTP, HTTPS or HTTP2. If not specified, the default is TCP. Exactly one of the protocol-specific health check field must be specified, which must match type field.", "enum": [ "HTTP", "HTTP2", @@ -32294,6 +34312,192 @@ }, "type": "object" }, + "HealthCheckService": { + "description": "A HealthCheckService defines a set of backends on which to perform periodic health checks and an endpoint to which to send notification of changes in the health status of the backends.", + "id": "HealthCheckService", + "properties": { + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, + "healthChecks": { + "description": "List of URLs to the HealthCheck resources. Must have at least one HealthCheck, and not more than 10. HealthCheck resources must have portSpecification=USE_SERVING_PORT. For regional HealthCheckService, the HealthCheck must be regional and in the same region. For global HealthCheckService, HealthCheck must be global. Mix of regional and global HealthChecks is not supported. Multiple regional HealthChecks must belong to the same region. Regional HealthChecks\u003c/code? must belong to the same region as zones of NEGs.", + "items": { + "type": "string" + }, + "type": "array" + }, + "healthStatusAggregationStrategy": { + "description": "Policy for how the results from multiple health checks for the same endpoint are aggregated. \n- NO_AGGREGATION. An EndpointHealth message is returned for each backend in the health check service. \n- AND. If any backend's health check reports UNHEALTHY, then UNHEALTHY is the HealthState of the entire health check service. If all backend's are healthy, the HealthState of the health check service is HEALTHY. .", + "enum": [ + "AND", + "NO_AGGREGATION" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", + "type": "string" + }, + "kind": { + "default": "compute#healthCheckService", + "description": "[Output only] Type of the resource. Always compute#healthCheckServicefor health check services.", + "type": "string" + }, + "name": { + "description": "Name of the resource. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "networkEndpointGroups": { + "description": "List of URLs to the NetworkEndpointGroup resources. Must not have more than 100. For regional HealthCheckService, NEGs must be in zones in the region of the HealthCheckService.", + "items": { + "type": "string" + }, + "type": "array" + }, + "notificationEndpoints": { + "description": "List of URLs to the NotificationEndpoint resources. Must not have more than 10. A list of endpoints for receiving notifications of change in health status. For regional HealthCheckService, NotificationEndpoint must be regional and in the same region. For global HealthCheckService, NotificationEndpoint must be global.", + "items": { + "type": "string" + }, + "type": "array" + }, + "region": { + "description": "[Output Only] URL of the region where the health check service resides. This field is not applicable to global health check services. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" + }, + "selfLinkWithId": { + "description": "[Output Only] Server-defined URL with id for the resource.", + "type": "string" + } + }, + "type": "object" + }, + "HealthCheckServicesList": { + "id": "HealthCheckServicesList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "description": "A list of HealthCheckService resources.", + "items": { + "$ref": "HealthCheckService" + }, + "type": "array" + }, + "kind": { + "default": "compute#healthCheckServicesList", + "description": "[Output Only] Type of the resource. Always compute#healthCheckServicesList for lists of HealthCheckServices.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, "HealthChecksAggregatedList": { "id": "HealthChecksAggregatedList", "properties": { @@ -32594,6 +34798,147 @@ }, "type": "object" }, + "HttpFaultAbort": { + "description": "Specification for how requests are aborted as part of fault injection.", + "id": "HttpFaultAbort", + "properties": { + "httpStatus": { + "description": "The HTTP status code used to abort the request.\nThe value must be between 200 and 599 inclusive.", + "format": "uint32", + "type": "integer" + }, + "percentage": { + "description": "The percentage of traffic (connections/operations/requests) which will be aborted as part of fault injection.\nThe value must be between 0.0 and 100.0 inclusive.", + "format": "double", + "type": "number" + } + }, + "type": "object" + }, + "HttpFaultDelay": { + "description": "Specifies the delay introduced by Loadbalancer before forwarding the request to the backend service as part of fault injection.", + "id": "HttpFaultDelay", + "properties": { + "fixedDelay": { + "$ref": "Duration", + "description": "Specifies the value of the fixed delay interval." + }, + "percentage": { + "description": "The percentage of traffic (connections/operations/requests) on which delay will be introduced as part of fault injection.\nThe value must be between 0.0 and 100.0 inclusive.", + "format": "double", + "type": "number" + } + }, + "type": "object" + }, + "HttpFaultInjection": { + "description": "The specification for fault injection introduced into traffic to test the resiliency of clients to backend service failure. As part of fault injection, when clients send requests to a backend service, delays can be introduced by Loadbalancer on a percentage of requests before sending those request to the backend service. Similarly requests from clients can be aborted by the Loadbalancer for a percentage of requests.", + "id": "HttpFaultInjection", + "properties": { + "abort": { + "$ref": "HttpFaultAbort", + "description": "The specification for how client requests are aborted as part of fault injection." + }, + "delay": { + "$ref": "HttpFaultDelay", + "description": "The specification for how client requests are delayed as part of fault injection, before being sent to a backend service." + } + }, + "type": "object" + }, + "HttpHeaderAction": { + "description": "The request and response header transformations that take effect before the request is passed along to the selected backendService.", + "id": "HttpHeaderAction", + "properties": { + "requestHeadersToAdd": { + "description": "Headers to add to a matching request prior to forwarding the request to the backendService.", + "items": { + "$ref": "HttpHeaderOption" + }, + "type": "array" + }, + "requestHeadersToRemove": { + "description": "A list of header names for headers that need to be removed from the request prior to forwarding the request to the backendService.", + "items": { + "type": "string" + }, + "type": "array" + }, + "responseHeadersToAdd": { + "description": "Headers to add the response prior to sending the response back to the client.", + "items": { + "$ref": "HttpHeaderOption" + }, + "type": "array" + }, + "responseHeadersToRemove": { + "description": "A list of header names for headers that need to be removed from the response prior to sending the response back to the client.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "HttpHeaderMatch": { + "description": "matchRule criteria for request header matches.", + "id": "HttpHeaderMatch", + "properties": { + "exactMatch": { + "description": "The value should exactly match contents of exactMatch.\nOnly one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set.", + "type": "string" + }, + "headerName": { + "description": "The name of the HTTP header to match.\nFor matching against the HTTP request's authority, use a headerMatch with the header name \":authority\".\nFor matching a request's method, use the headerName \":method\".", + "type": "string" + }, + "invertMatch": { + "description": "If set to false, the headerMatch is considered a match if the match criteria above are met. If set to true, the headerMatch is considered a match if the match criteria above are NOT met.\nThe default setting is false.", + "type": "boolean" + }, + "prefixMatch": { + "description": "The value of the header must start with the contents of prefixMatch.\nOnly one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set.", + "type": "string" + }, + "presentMatch": { + "description": "A header with the contents of headerName must exist. The match takes place whether or not the request's header has a value or not.\nOnly one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set.", + "type": "boolean" + }, + "rangeMatch": { + "$ref": "Int64RangeMatch", + "description": "The header value must be an integer and its value must be in the range specified in rangeMatch. If the header does not contain an integer, number or is empty, the match fails.\nFor example for a range [-5, 0] \n- -3 will match. \n- 0 will not match. \n- 0.25 will not match. \n- -3someString will not match. \nOnly one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set." + }, + "regexMatch": { + "description": "The value of the header must match the regualar expression specified in regexMatch. For regular expression grammar, please see: en.cppreference.com/w/cpp/regex/ecmascript \nFor matching against a port specified in the HTTP request, use a headerMatch with headerName set to PORT and a regular expression that satisfies the RFC2616 Host header's port specifier.\nOnly one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set.", + "type": "string" + }, + "suffixMatch": { + "description": "The value of the header must end with the contents of suffixMatch.\nOnly one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set.", + "type": "string" + } + }, + "type": "object" + }, + "HttpHeaderOption": { + "description": "Specification determining how headers are added to requests or responses.", + "id": "HttpHeaderOption", + "properties": { + "headerName": { + "description": "The name of the header.", + "type": "string" + }, + "headerValue": { + "description": "The value of the header to add.", + "type": "string" + }, + "replace": { + "description": "If false, headerValue is appended to any values that already exist for the header. If true, headerValue is set for the header, discarding any values that were set for that header.\nThe default value is false.", + "type": "boolean" + } + }, + "type": "object" + }, "HttpHealthCheck": { "description": "An HttpHealthCheck resource. This resource defines a template for how individual instances should be checked for health, via HTTP.", "id": "HttpHealthCheck", @@ -32648,6 +34993,10 @@ "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, + "selfLinkWithId": { + "description": "[Output Only] Server-defined URL for this resource with the resource id.", + "type": "string" + }, "timeoutSec": { "description": "How long (in seconds) to wait before claiming failure. The default value is 5 seconds. It is invalid for timeoutSec to have greater value than checkIntervalSec.", "format": "int32", @@ -32773,6 +35122,207 @@ }, "type": "object" }, + "HttpQueryParameterMatch": { + "description": "HttpRouteRuleMatch criteria for a request's query parameter.", + "id": "HttpQueryParameterMatch", + "properties": { + "exactMatch": { + "description": "The queryParameterMatch matches if the value of the parameter exactly matches the contents of exactMatch.\nOnly one of presentMatch, exactMatch and regexMatch must be set.", + "type": "string" + }, + "name": { + "description": "The name of the query parameter to match. The query parameter must exist in the request, in the absence of which the request match fails.", + "type": "string" + }, + "presentMatch": { + "description": "Specifies that the queryParameterMatch matches if the request contains the query parameter, irrespective of whether the parameter has a value or not.\nOnly one of presentMatch, exactMatch and regexMatch must be set.", + "type": "boolean" + }, + "regexMatch": { + "description": "The queryParameterMatch matches if the value of the parameter matches the regular expression specified by regexMatch. For the regular expression grammar, please see en.cppreference.com/w/cpp/regex/ecmascript \nOnly one of presentMatch, exactMatch and regexMatch must be set.", + "type": "string" + } + }, + "type": "object" + }, + "HttpRedirectAction": { + "description": "Specifies settings for an HTTP redirect.", + "id": "HttpRedirectAction", + "properties": { + "hostRedirect": { + "description": "The host that will be used in the redirect response instead of the one that was supplied in the request.\nThe value must be between 1 and 255 characters.", + "type": "string" + }, + "httpsRedirect": { + "description": "If set to true, the URL scheme in the redirected request is set to https. If set to false, the URL scheme of the redirected request will remain the same as that of the request.\nThis must only be set for UrlMaps used in TargetHttpProxys. Setting this true for TargetHttpsProxy is not permitted.\nThe default is set to false.", + "type": "boolean" + }, + "pathRedirect": { + "description": "The path that will be used in the redirect response instead of the one that was supplied in the request.\nOnly one of pathRedirect or prefixRedirect must be specified.\nThe value must be between 1 and 1024 characters.", + "type": "string" + }, + "prefixRedirect": { + "description": "The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, retaining the remaining portion of the URL before redirecting the request.", + "type": "string" + }, + "redirectResponseCode": { + "description": "The HTTP Status code to use for this RedirectAction.\nSupported values are: \n- MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds to 301. \n- FOUND, which corresponds to 302. \n- SEE_OTHER which corresponds to 303. \n- TEMPORARY_REDIRECT, which corresponds to 307. In this case, the request method will be retained. \n- PERMANENT_REDIRECT, which corresponds to 308. In this case, the request method will be retained.", + "enum": [ + "FOUND", + "MOVED_PERMANENTLY_DEFAULT", + "PERMANENT_REDIRECT", + "SEE_OTHER", + "TEMPORARY_REDIRECT" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "stripQuery": { + "description": "If set to true, any accompanying query portion of the original URL is removed prior to redirecting the request. If set to false, the query portion of the original URL is retained.\nThe default is set to false.", + "type": "boolean" + } + }, + "type": "object" + }, + "HttpRetryPolicy": { + "description": "The retry policy associates with HttpRouteRule", + "id": "HttpRetryPolicy", + "properties": { + "numRetries": { + "description": "Specifies the allowed number retries. This number must be \u003e 0.", + "format": "uint32", + "type": "integer" + }, + "perTryTimeout": { + "$ref": "Duration", + "description": "Specifies a non-zero timeout per retry attempt." + }, + "retryConditions": { + "description": "Specfies one or more conditions when this retry rule applies. Valid values are: \n- 5xx: Loadbalancer will attempt a retry if the backend service responds with any 5xx response code, or if the backend service does not respond at all, example: disconnects, reset, read timeout, connection failure, and refused streams. \n- gateway-error: Similar to 5xx, but only applies to response codes 502, 503 or 504.\n- \n- connect-failure: Loadbalancer will retry on failures connecting to backend services, for example due to connection timeouts. \n- retriable-4xx: Loadbalancer will retry for retriable 4xx response codes. Currently the only retriable error supported is 409. \n- refused-stream:Loadbalancer will retry if the backend service resets the stream with a REFUSED_STREAM error code. This reset type indicates that it is safe to retry. \n- cancelledLoadbalancer will retry if the gRPC status code in the response header is set to cancelled \n- deadline-exceeded: Loadbalancer will retry if the gRPC status code in the response header is set to deadline-exceeded \n- resource-exhausted: Loadbalancer will retry if the gRPC status code in the response header is set to resource-exhausted \n- unavailable: Loadbalancer will retry if the gRPC status code in the response header is set to unavailable", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "HttpRouteAction": { + "id": "HttpRouteAction", + "properties": { + "corsPolicy": { + "$ref": "CorsPolicy", + "description": "The specification for allowing client side cross-origin requests. Please see W3C Recommendation for Cross Origin Resource Sharing" + }, + "faultInjectionPolicy": { + "$ref": "HttpFaultInjection", + "description": "The specification for fault injection introduced into traffic to test the resiliency of clients to backend service failure. As part of fault injection, when clients send requests to a backend service, delays can be introduced by Loadbalancer on a percentage of requests before sending those request to the backend service. Similarly requests from clients can be aborted by the Loadbalancer for a percentage of requests.\ntimeout and retry_policy will be ignored by clients that are configured with a fault_injection_policy." + }, + "requestMirrorPolicy": { + "$ref": "RequestMirrorPolicy", + "description": "Specifies the policy on how requests intended for the route's backends are shadowed to a separate mirrored backend service. Loadbalancer does not wait for responses from the shadow service. Prior to sending traffic to the shadow service, the host / authority header is suffixed with -shadow." + }, + "retryPolicy": { + "$ref": "HttpRetryPolicy", + "description": "Specifies the retry policy associated with this route." + }, + "timeout": { + "$ref": "Duration", + "description": "Specifies the timeout for the selected route. Timeout is computed from the time the request is has been fully processed (i.e. end-of-stream) up until the response has been completely processed. Timeout includes all retries.\nIf not specified, the default value is 15 seconds." + }, + "urlRewrite": { + "$ref": "UrlRewrite", + "description": "The spec to modify the URL of the request, prior to forwarding the request to the matched service" + }, + "weightedBackendServices": { + "description": "A list of weighted backend services to send traffic to when a route match occurs. The weights determine the fraction of traffic that flows to their corresponding backend service. If all traffic needs to go to a single backend service, there must be one weightedBackendService with weight set to a non 0 number.\nOnce a backendService is identified and before forwarding the request to the backend service, advanced routing actions like Url rewrites and header transformations are applied depending on additional settings specified in this HttpRouteAction.", + "items": { + "$ref": "WeightedBackendService" + }, + "type": "array" + } + }, + "type": "object" + }, + "HttpRouteRule": { + "description": "An HttpRouteRule specifies how to match an HTTP request and the corresponding routing action that load balancing proxies will perform.", + "id": "HttpRouteRule", + "properties": { + "headerAction": { + "$ref": "HttpHeaderAction", + "description": "Specifies changes to request and response headers that need to take effect for the selected backendService.\nThe headerAction specified here are applied before the matching pathMatchers[].headerAction and after pathMatchers[].routeRules[].routeAction.weightedBackendService.backendServiceWeightAction[].headerAction" + }, + "matchRules": { + "items": { + "$ref": "HttpRouteRuleMatch" + }, + "type": "array" + }, + "routeAction": { + "$ref": "HttpRouteAction", + "description": "In response to a matching matchRule, the load balancer performs advanced routing actions like URL rewrites, header transformations, etc. prior to forwarding the request to the selected backend. If routeAction specifies any weightedBackendServices, service must not be set. Conversely if service is set, routeAction cannot contain any weightedBackendServices.\nOnly one of routeAction or urlRedirect must be set." + }, + "service": { + "description": "The full or partial URL of the backend service resource to which traffic is directed if this rule is matched. If routeAction is additionally specified, advanced routing actions like URL Rewrites, etc. take effect prior to sending the request to the backend. However, if service is specified, routeAction cannot contain any weightedBackendService s. Conversely, if routeAction specifies any weightedBackendServices, service must not be specified.\nOnly one of urlRedirect, service or routeAction.weightedBackendService must be set.", + "type": "string" + }, + "urlRedirect": { + "$ref": "HttpRedirectAction", + "description": "When this rule is matched, the request is redirected to a URL specified by urlRedirect.\nIf urlRedirect is specified, service or routeAction must not be set." + } + }, + "type": "object" + }, + "HttpRouteRuleMatch": { + "description": "HttpRouteRuleMatch specifies a set of criteria for matching requests to an HttpRouteRule. All specified criteria must be satisfied for a match to occur.", + "id": "HttpRouteRuleMatch", + "properties": { + "fullPathMatch": { + "description": "For satifying the matchRule condition, the path of the request must exactly match the value specified in fullPathMatch after removing any query parameters and anchor that may be part of the original URL.\nFullPathMatch must be between 1 and 1024 characters.\nOnly one of prefixMatch, fullPathMatch or regexMatch must be specified.", + "type": "string" + }, + "headerMatches": { + "description": "Specifies a list of header match criteria, all of which must match corresponding headers in the request.", + "items": { + "$ref": "HttpHeaderMatch" + }, + "type": "array" + }, + "ignoreCase": { + "description": "Specifies that prefixMatch and fullPathMatch matches are case sensitive.\nThe default value is false.\ncaseSensitive must not be used with regexMatch.", + "type": "boolean" + }, + "metadataFilters": { + "description": "Opaque filter criteria used by Loadbalancer to restrict routing configuration to a limited set xDS compliant clients. In their xDS requests to Loadbalancer, xDS clients present node metadata. If a match takes place, the relevant routing configuration is made available to those proxies.\nFor each metadataFilter in this list, if its filterMatchCriteria is set to MATCH_ANY, at least one of the filterLabels must match the corresponding label provided in the metadata. If its filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels must match with corresponding labels in the provided metadata.\nmetadataFilters specified here can be overrides those specified in ForwardingRule that refers to this UrlMap.\nmetadataFilters only applies to Loadbalancers that have their loadBalancingScheme set to INTERNAL_SELF_MANAGED.", + "items": { + "$ref": "MetadataFilter" + }, + "type": "array" + }, + "prefixMatch": { + "description": "For satifying the matchRule condition, the request's path must begin with the specified prefixMatch. prefixMatch must begin with a /.\nThe value must be between 1 and 1024 characters.\nOnly one of prefixMatch, fullPathMatch or regexMatch must be specified.", + "type": "string" + }, + "queryParameterMatches": { + "description": "Specifies a list of query parameter match criteria, all of which must match corresponding query parameters in the request.", + "items": { + "$ref": "HttpQueryParameterMatch" + }, + "type": "array" + }, + "regexMatch": { + "description": "For satifying the matchRule condition, the path of the request must satisfy the regular expression specified in regexMatch after removing any query parameters and anchor supplied with the original URL. For regular expression grammar please see en.cppreference.com/w/cpp/regex/ecmascript \nOnly one of prefixMatch, fullPathMatch or regexMatch must be specified.", + "type": "string" + } + }, + "type": "object" + }, "HttpsHealthCheck": { "description": "An HttpsHealthCheck resource. This resource defines a template for how individual instances should be checked for health, via HTTPS.", "id": "HttpsHealthCheck", @@ -32827,6 +35377,10 @@ "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, + "selfLinkWithId": { + "description": "[Output Only] Server-defined URL for this resource with the resource id.", + "type": "string" + }, "timeoutSec": { "description": "How long (in seconds) to wait before claiming failure. The default value is 5 seconds. It is invalid for timeoutSec to have a greater value than checkIntervalSec.", "format": "int32", @@ -33054,7 +35608,7 @@ "type": "string" }, "sha1Checksum": { - "description": "An optional SHA1 checksum of the disk image before unpackaging; provided by the client when the disk image is created.", + "description": "[Deprecated] This field is deprecated. An optional SHA1 checksum of the disk image before unpackaging provided by the client when the disk image is created.", "pattern": "[a-f0-9]{40}", "type": "string" }, @@ -33074,6 +35628,14 @@ "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, + "selfLinkWithId": { + "description": "[Output Only] Server-defined URL for this resource's resource id.", + "type": "string" + }, + "shieldedInstanceInitialState": { + "$ref": "InitialStateConfig", + "description": "Set the secure boot keys of shielded instance." + }, "sourceDisk": { "description": "URL of the source disk used to create this image. This can be a full or valid partial URL. You must provide either this property or the rawDisk.source property but not both to create an image. For example, the following are valid values: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone/disks/disk \n- projects/project/zones/zone/disks/disk \n- zones/zone/disks/disk", "type": "string" @@ -33124,16 +35686,25 @@ "status": { "description": "[Output Only] The status of the image. An image can be used to create other resources, such as instances, only after the image has been successfully created and the status is set to READY. Possible values are FAILED, PENDING, or READY.", "enum": [ + "DELETING", "FAILED", "PENDING", "READY" ], "enumDescriptions": [ + "", "", "", "" ], "type": "string" + }, + "storageLocations": { + "description": "GCS bucket storage location of the image (regional or multi-regional).", + "items": { + "type": "string" + }, + "type": "array" } }, "type": "object" @@ -33250,14 +35821,42 @@ }, "type": "object" }, + "InitialStateConfig": { + "description": "Initial State for shielded instance, these are public keys which are safe to store in public", + "id": "InitialStateConfig", + "properties": { + "dbs": { + "description": "The Key Database (db).", + "items": { + "$ref": "FileContentBuffer" + }, + "type": "array" + }, + "dbxs": { + "description": "The forbidden key database (dbx).", + "items": { + "$ref": "FileContentBuffer" + }, + "type": "array" + }, + "keks": { + "description": "The Key Exchange Key (KEK).", + "items": { + "$ref": "FileContentBuffer" + }, + "type": "array" + }, + "pk": { + "$ref": "FileContentBuffer", + "description": "The Platform Key (PK)." + } + }, + "type": "object" + }, "Instance": { "description": "An Instance resource. (== resource_for beta.instances ==) (== resource_for v1.instances ==)", "id": "Instance", "properties": { - "allocationAffinity": { - "$ref": "AllocationAffinity", - "description": "The configuration of desired allocations which this Instance could consume capacity from." - }, "canIpForward": { "description": "Allows this instance to send and receive packets with non-matching destination or source IPs. This is required if you plan to use this instance to forward routes. For more information, see Enabling IP Forwarding.", "type": "boolean" @@ -33364,6 +35963,10 @@ "format": "int64", "type": "string" }, + "reservationAffinity": { + "$ref": "ReservationAffinity", + "description": "The configuration of desired reservations from which this Instance can consume capacity from." + }, "resourcePolicies": { "description": "Resource policies applied to this instance.", "items": { @@ -33379,6 +35982,10 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "selfLinkWithId": { + "description": "[Output Only] Server-defined URL for this resource with the resource id.", + "type": "string" + }, "serviceAccounts": { "description": "A list of service accounts, with their specified scopes, authorized for this instance. Only one service account per VM instance is supported.\n\nService accounts generate access tokens that can be accessed through the metadata server and used to authenticate applications on the instance. See Service Accounts for more information.", "items": { @@ -33386,6 +35993,12 @@ }, "type": "array" }, + "shieldedInstanceConfig": { + "$ref": "ShieldedInstanceConfig" + }, + "shieldedInstanceIntegrityPolicy": { + "$ref": "ShieldedInstanceIntegrityPolicy" + }, "shieldedVmConfig": { "$ref": "ShieldedVmConfig" }, @@ -33396,6 +36009,10 @@ "description": "Source machine image", "type": "string" }, + "sourceMachineImageEncryptionKey": { + "$ref": "CustomerEncryptionKey", + "description": "Source GMI encryption key when creating an instance from GMI." + }, "startRestricted": { "description": "[Output Only] Whether a VM has been restricted for start because Compute Engine has detected suspicious activity.", "type": "boolean" @@ -33404,6 +36021,7 @@ "description": "[Output Only] The status of the instance. One of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, STOPPED, SUSPENDING, SUSPENDED, and TERMINATED.", "enum": [ "PROVISIONING", + "REPAIRING", "RUNNING", "STAGING", "STOPPED", @@ -33420,6 +36038,7 @@ "", "", "", + "", "" ], "type": "string" @@ -33607,6 +36226,10 @@ "description": "[Output Only] The URL for this instance group. The server generates this URL.", "type": "string" }, + "selfLinkWithId": { + "description": "[Output Only] Server-defined URL for this resource with the resource id.", + "type": "string" + }, "size": { "description": "[Output Only] The total number of instances in the instance group.", "format": "int32", @@ -33939,7 +36562,7 @@ }, "pendingActions": { "$ref": "InstanceGroupManagerPendingActionsSummary", - "description": "[Output Only] The list of instance actions and the number of instances in this managed instance group that are pending for each of those actions." + "description": "[Deprecated] This field is deprecated and will be removed. Prefer using the status field instead. Please contact cloud-updater-feedback@google.com to leave feedback if your workload relies on this field. [Output Only] The list of instance actions and the number of instances in this managed instance group that are pending for each of those actions." }, "region": { "description": "[Output Only] The URL of the region where the managed instance group resides (for regional resources).", @@ -33949,6 +36572,10 @@ "description": "[Output Only] The URL for this managed instance group. The server defines this URL.", "type": "string" }, + "selfLinkWithId": { + "description": "[Output Only] Server-defined URL for this resource with the resource id.", + "type": "string" + }, "serviceAccount": { "description": "The service account to be used as credentials for all operations performed by the managed instance group on instances. The service accounts needs all permissions required to create and delete instances. By default, the service account {projectNumber}@cloudservices.gserviceaccount.com is used.", "type": "string" @@ -33984,7 +36611,7 @@ "description": "The update policy for this managed instance group." }, "versions": { - "description": "Specifies the instance templates used by this managed instance group to create instances.\n\nEach version is defined by an instanceTemplate. Every template can appear at most once per instance group. This field overrides the top-level instanceTemplate field. Read more about the relationships between these fields. Exactly one version must leave the targetSize field unset. That version will be applied to all remaining instances. For more information, read about canary updates.", + "description": "Specifies the instance templates used by this managed instance group to create instances.\n\nEach version is defined by an instanceTemplate and a name. Every version can appear at most once per instance group. This field overrides the top-level instanceTemplate field. Read more about the relationships between these fields. Exactly one version must leave the targetSize field unset. That version will be applied to all remaining instances. For more information, read about canary updates.", "items": { "$ref": "InstanceGroupManagerVersion" }, @@ -34175,7 +36802,7 @@ }, "maxUnavailable": { "$ref": "FixedOrPercent", - "description": "Maximum number of instances that can be unavailable when autohealing. The instance is considered available if all of the following conditions are satisfied: 1. Instance's status is RUNNING. 2. Instance's liveness health check result was observed to be HEALTHY at least once. By default, a percent value of 100% is used." + "description": "Maximum number of instances that can be unavailable when autohealing. When 'percent' is used, the value is rounded UP. The instance is considered available if all of the following conditions are satisfied: 1. Instance's status is RUNNING. 2. Instance's currentAction is NONE (in particular its liveness health check result was observed to be HEALTHY at least once as it passed VERIFYING). 3. There is no outgoing action on an instance triggered by IGM.\n\nBy default, number of concurrently autohealed instances is smaller than the managed instance group target size. However, if a zonal managed instance group has only one instance, or a regional managed instance group has only one instance per zone, autohealing will recreate these instances when they become unhealthy." } }, "type": "object" @@ -34296,22 +36923,22 @@ "id": "InstanceGroupManagerPendingActionsSummary", "properties": { "creating": { - "description": "[Output Only] The number of instances in the managed instance group that are pending to be created.", + "description": "[Deprecated] This field is deprecated and will be removed. Prefer using the status field instead. Please contact cloud-updater-feedback@google.com to leave feedback if your workload relies on this field. [Output Only] The number of instances in the managed instance group that are pending to be created.", "format": "int32", "type": "integer" }, "deleting": { - "description": "[Output Only] The number of instances in the managed instance group that are pending to be deleted.", + "description": "[Deprecated] This field is deprecated and will be removed. Prefer using the status field instead. Please contact cloud-updater-feedback@google.com to leave feedback if your workload relies on this field. [Output Only] The number of instances in the managed instance group that are pending to be deleted.", "format": "int32", "type": "integer" }, "recreating": { - "description": "[Output Only] The number of instances in the managed instance group that are pending to be recreated.", + "description": "[Deprecated] This field is deprecated and will be removed. Prefer using the status field instead. Please contact cloud-updater-feedback@google.com to leave feedback if your workload relies on this field. [Output Only] The number of instances in the managed instance group that are pending to be recreated.", "format": "int32", "type": "integer" }, "restarting": { - "description": "[Output Only] The number of instances in the managed instance group that are pending to be restarted.", + "description": "[Deprecated] This field is deprecated and will be removed. Prefer using the status field instead. Please contact cloud-updater-feedback@google.com to leave feedback if your workload relies on this field. [Output Only] The number of instances in the managed instance group that are pending to be restarted.", "format": "int32", "type": "integer" } @@ -34325,6 +36952,10 @@ "description": "[Output Only] A bit indicating whether the managed instance group is in a stable state. A stable state means that: none of the instances in the managed instance group is currently undergoing any type of change (for example, creation, restart, or deletion); no future changes are scheduled for instances in the managed instance group; and the managed instance group itself is not being modified.", "type": "boolean" }, + "stateful": { + "$ref": "InstanceGroupManagerStatusStateful", + "description": "[Output Only] Stateful status of the given Instance Group Manager." + }, "versionTarget": { "$ref": "InstanceGroupManagerStatusVersionTarget", "description": "[Output Only] A status of consistency of Instances' versions with their target version specified by version field on Instance Group Manager." @@ -34332,6 +36963,16 @@ }, "type": "object" }, + "InstanceGroupManagerStatusStateful": { + "id": "InstanceGroupManagerStatusStateful", + "properties": { + "isStateful": { + "description": "[Output Only] A bit indicating whether the managed instance group is stateful, i.e. has any disks in Stateful Policy or at least one per-instance config. This is determined based on the user intent, the group may be reported as not stateful even when there is still some preserved state on managed instances.", + "type": "boolean" + } + }, + "type": "object" + }, "InstanceGroupManagerStatusVersionTarget": { "id": "InstanceGroupManagerStatusVersionTarget", "properties": { @@ -34385,6 +37026,18 @@ ], "type": "string" }, + "replacementMethod": { + "description": "What action should be used to replace instances. See minimal_action.REPLACE", + "enum": [ + "RECREATE", + "SUBSTITUTE" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, "type": { "enum": [ "OPPORTUNISTIC", @@ -34403,6 +37056,7 @@ "id": "InstanceGroupManagerVersion", "properties": { "instanceTemplate": { + "description": "The URL of the instance template that is specified for this managed instance group. The group uses this template to create new instances in the managed instance group until the `targetSize` for this version is reached.", "type": "string" }, "name": { @@ -34445,7 +37099,7 @@ "type": "array" }, "maximalAction": { - "description": "The maximal action that should be perfomed on the instances. By default REPLACE.", + "description": "The maximal action that should be performed on the instances. By default REPLACE. This field is deprecated, please use most_disruptive_allowed_action.", "enum": [ "NONE", "REFRESH", @@ -34475,6 +37129,22 @@ "" ], "type": "string" + }, + "mostDisruptiveAllowedAction": { + "description": "The most disruptive action that allowed to be performed on the instances. By default REPLACE.", + "enum": [ + "NONE", + "REFRESH", + "REPLACE", + "RESTART" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ], + "type": "string" } }, "type": "object" @@ -34642,6 +37312,20 @@ }, "type": "object" }, + "InstanceGroupManagersPatchPerInstanceConfigsReq": { + "description": "InstanceGroupManagers.patchPerInstanceConfigs", + "id": "InstanceGroupManagersPatchPerInstanceConfigsReq", + "properties": { + "perInstanceConfigs": { + "description": "The list of per-instance configs to insert or patch on this managed instance group.", + "items": { + "$ref": "PerInstanceConfig" + }, + "type": "array" + } + }, + "type": "object" + }, "InstanceGroupManagersRecreateInstancesRequest": { "id": "InstanceGroupManagersRecreateInstancesRequest", "properties": { @@ -35327,10 +38011,6 @@ "description": "", "id": "InstanceProperties", "properties": { - "allocationAffinity": { - "$ref": "AllocationAffinity", - "description": "The configuration of desired allocations which this Instance could consume capacity from." - }, "canIpForward": { "description": "Enables instances created based on this template to send packets with source IP addresses other than their own and receive packets with destination IP addresses other than their own. If these instances will be used as an IP gateway or it will be set as the next-hop in a Route resource, specify true. If unsure, leave this set to false. See the Enable IP forwarding documentation for more information.", "type": "boolean" @@ -35346,6 +38026,10 @@ }, "type": "array" }, + "displayDevice": { + "$ref": "DisplayDevice", + "description": "Display Device properties to enable support for remote display products like: Teradici, VNC and TeamViewer" + }, "guestAccelerators": { "description": "A list of guest accelerator cards' type and count to use for instances created from the instance template.", "items": { @@ -35384,6 +38068,10 @@ }, "type": "array" }, + "reservationAffinity": { + "$ref": "ReservationAffinity", + "description": "The configuration of desired reservations which this Instance could consume capacity from." + }, "scheduling": { "$ref": "Scheduling", "description": "Specifies the scheduling options for the instances that are created from this template." @@ -35395,6 +38083,9 @@ }, "type": "array" }, + "shieldedInstanceConfig": { + "$ref": "ShieldedInstanceConfig" + }, "shieldedVmConfig": { "$ref": "ShieldedVmConfig", "description": "Specifies the Shielded VM options for the instances that are created from this template." @@ -35456,6 +38147,10 @@ "description": "[Output Only] The URL for this instance template. The server defines this URL.", "type": "string" }, + "selfLinkWithId": { + "description": "[Output Only] Server-defined URL for this resource with the resource id.", + "type": "string" + }, "sourceInstance": { "description": "The source instance used to create the template. You can provide this as a partial or full URL to the resource. For example, the following are valid values: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/instance \n- projects/project/zones/zone/instances/instance", "type": "string" @@ -35597,6 +38292,7 @@ "description": "[Output Only] The status of the instance.", "enum": [ "PROVISIONING", + "REPAIRING", "RUNNING", "STAGING", "STOPPED", @@ -35613,6 +38309,7 @@ "", "", "", + "", "" ], "type": "string" @@ -35633,6 +38330,45 @@ }, "type": "object" }, + "InstancesGetEffectiveFirewallsResponse": { + "id": "InstancesGetEffectiveFirewallsResponse", + "properties": { + "firewalls": { + "description": "Effective firewalls on the instance.", + "items": { + "$ref": "Firewall" + }, + "type": "array" + }, + "organizationFirewalls": { + "description": "Effective firewalls from organization policies.", + "items": { + "$ref": "InstancesGetEffectiveFirewallsResponseOrganizationFirewallPolicy" + }, + "type": "array" + } + }, + "type": "object" + }, + "InstancesGetEffectiveFirewallsResponseOrganizationFirewallPolicy": { + "description": "A pruned SecurityPolicy containing ID and any applicable firewall rules.", + "id": "InstancesGetEffectiveFirewallsResponseOrganizationFirewallPolicy", + "properties": { + "id": { + "description": "The unique identifier for the security policy. This identifier is defined by the server.", + "format": "uint64", + "type": "string" + }, + "rules": { + "description": "The rules that apply to the network.", + "items": { + "$ref": "SecurityPolicyRule" + }, + "type": "array" + } + }, + "type": "object" + }, "InstancesRemoveResourcePoliciesRequest": { "id": "InstancesRemoveResourcePoliciesRequest", "properties": { @@ -35841,6 +38577,23 @@ }, "type": "object" }, + "Int64RangeMatch": { + "description": "HttpRouteRuleMatch criteria for field values that must stay within the specified integer range.", + "id": "Int64RangeMatch", + "properties": { + "rangeEnd": { + "description": "The end of the range (exclusive) in signed long integer format.", + "format": "int64", + "type": "string" + }, + "rangeStart": { + "description": "The start of the range (inclusive) in signed long integer format.", + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, "Interconnect": { "description": "Represents an Interconnects resource. The Interconnects resource is a dedicated connection between Google's network and your on-premises network. For more information, see the Dedicated overview page. (== resource_for v1.interconnects ==) (== resource_for beta.interconnects ==)", "id": "Interconnect", @@ -35880,7 +38633,7 @@ "type": "string" }, "googleReferenceId": { - "description": "[Output Only] Google reference ID; to be used when raising support tickets with Google or otherwise to debug backend connectivity issues.", + "description": "[Output Only] Google reference ID to be used when raising support tickets with Google or otherwise to debug backend connectivity issues.", "type": "string" }, "id": { @@ -35896,7 +38649,7 @@ "type": "array" }, "interconnectType": { - "description": "Type of interconnect. Note that \"IT_PRIVATE\" has been deprecated in favor of \"DEDICATED\"", + "description": "Type of interconnect, which can take one of the following values: \n- PARTNER: A partner-managed interconnection shared between customers though a partner. \n- DEDICATED: A dedicated physical interconnection with the customer. Note that a value IT_PRIVATE has been deprecated in favor of DEDICATED.", "enum": [ "DEDICATED", "IT_PRIVATE", @@ -35927,11 +38680,13 @@ "type": "object" }, "linkType": { - "description": "Type of link requested. This field indicates speed of each of the links in the bundle, not the entire bundle. Only 10G per link is allowed for a dedicated interconnect. Options: Ethernet_10G_LR", + "description": "Type of link requested, which can take one of the following values: \n- LINK_TYPE_ETHERNET_10G_LR: A 10G Ethernet with LR optics \n- LINK_TYPE_ETHERNET_100G_LR: A 100G Ethernet with LR optics. Note that this field indicates the speed of each of the links in the bundle, not the speed of the entire bundle.", "enum": [ + "LINK_TYPE_ETHERNET_100G_LR", "LINK_TYPE_ETHERNET_10G_LR" ], "enumDescriptions": [ + "", "" ], "type": "string" @@ -35955,7 +38710,7 @@ "type": "string" }, "operationalStatus": { - "description": "[Output Only] The current status of whether or not this Interconnect is functional.", + "description": "[Output Only] The current status of this Interconnect's functionality, which can take one of the following values: \n- OS_ACTIVE: A valid Interconnect, which is turned up and is ready to use. Attachments may be provisioned on this Interconnect. \n- OS_UNPROVISIONED: An Interconnect that has not completed turnup. No attachments may be provisioned on this Interconnect. \n- OS_UNDER_MAINTENANCE: An Interconnect that is undergoing internal maintenance. No attachments may be provisioned or updated on this Interconnect.", "enum": [ "OS_ACTIVE", "OS_UNPROVISIONED" @@ -35984,8 +38739,12 @@ "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, + "selfLinkWithId": { + "description": "[Output Only] Server-defined URL for this resource with the resource id.", + "type": "string" + }, "state": { - "description": "[Output Only] The current state of whether or not this Interconnect is functional.", + "description": "[Output Only] The current state of Interconnect functionality, which can take one of the following values: \n- ACTIVE: The Interconnect is valid, turned up and ready to use. Attachments may be provisioned on this Interconnect. \n- UNPROVISIONED: The Interconnect has not completed turnup. No attachments may be provisioned on this Interconnect. \n- UNDER_MAINTENANCE: The Interconnect is undergoing internal maintenance. No attachments may be provisioned or updated on this Interconnect.", "enum": [ "ACTIVE", "UNPROVISIONED" @@ -36008,16 +38767,18 @@ "type": "boolean" }, "bandwidth": { - "description": "Provisioned bandwidth capacity for the interconnectAttachment. Can be set by the partner to update the customer's provisioned bandwidth. Output only for PARTNER type, mutable for PARTNER_PROVIDER and DEDICATED.", + "description": "Provisioned bandwidth capacity for the interconnect attachment. For attachments of type DEDICATED, the user can set the bandwidth. For attachments of type PARTNER, the Google Partner that is operating the interconnect must set the bandwidth. Output only for PARTNER type, mutable for PARTNER_PROVIDER and DEDICATED, and can take one of the following values: \n- BPS_50M: 50 Mbit/s \n- BPS_100M: 100 Mbit/s \n- BPS_200M: 200 Mbit/s \n- BPS_300M: 300 Mbit/s \n- BPS_400M: 400 Mbit/s \n- BPS_500M: 500 Mbit/s \n- BPS_1G: 1 Gbit/s \n- BPS_2G: 2 Gbit/s \n- BPS_5G: 5 Gbit/s \n- BPS_10G: 10 Gbit/s", "enum": [ "BPS_100M", "BPS_10G", "BPS_1G", "BPS_200M", + "BPS_20G", "BPS_2G", "BPS_300M", "BPS_400M", "BPS_500M", + "BPS_50G", "BPS_50M", "BPS_5G" ], @@ -36031,6 +38792,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -36059,7 +38822,7 @@ "type": "string" }, "edgeAvailabilityDomain": { - "description": "Desired availability domain for the attachment. Only available for type PARTNER, at creation time. For improved reliability, customers should configure a pair of attachments with one per availability domain. The selected availability domain will be provided to the Partner via the pairing key so that the provisioned circuit will lie in the specified domain. If not specified, the value will default to AVAILABILITY_DOMAIN_ANY.", + "description": "Desired availability domain for the attachment. Only available for type PARTNER, at creation time, and can take one of the following values: \n- AVAILABILITY_DOMAIN_ANY \n- AVAILABILITY_DOMAIN_1 \n- AVAILABILITY_DOMAIN_2 For improved reliability, customers should configure a pair of attachments, one per availability domain. The selected availability domain will be provided to the Partner via the pairing key, so that the provisioned circuit will lie in the specified domain. If not specified, the value will default to AVAILABILITY_DOMAIN_ANY.", "enum": [ "AVAILABILITY_DOMAIN_1", "AVAILABILITY_DOMAIN_2", @@ -36108,7 +38871,7 @@ "type": "string" }, "operationalStatus": { - "description": "[Output Only] The current status of whether or not this interconnect attachment is functional.", + "description": "[Output Only] The current status of whether or not this interconnect attachment is functional, which can take one of the following values: \n- OS_ACTIVE: The attachment has been turned up and is ready to use. \n- OS_UNPROVISIONED: The attachment is not ready to use yet, because turnup is not complete.", "enum": [ "OS_ACTIVE", "OS_UNPROVISIONED" @@ -36124,7 +38887,7 @@ "type": "string" }, "partnerAsn": { - "description": "Optional BGP ASN for the router that should be supplied by a layer 3 Partner if they configured BGP on behalf of the customer. Output only for PARTNER type, input only for PARTNER_PROVIDER, not available for DEDICATED.", + "description": "Optional BGP ASN for the router supplied by a Layer 3 Partner if they configured BGP on behalf of the customer. Output only for PARTNER type, input only for PARTNER_PROVIDER, not available for DEDICATED.", "format": "int64", "type": "string" }, @@ -36148,8 +38911,12 @@ "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, + "selfLinkWithId": { + "description": "[Output Only] Server-defined URL for this resource with the resource id.", + "type": "string" + }, "state": { - "description": "[Output Only] The current state of this attachment's functionality.", + "description": "[Output Only] The current state of this attachment's functionality. Enum values ACTIVE and UNPROVISIONED are shared by DEDICATED/PRIVATE, PARTNER, and PARTNER_PROVIDER interconnect attachments, while enum values PENDING_PARTNER, PARTNER_REQUEST_RECEIVED, and PENDING_CUSTOMER are used for only PARTNER and PARTNER_PROVIDER interconnect attachments. This state can take one of the following values: \n- ACTIVE: The attachment has been turned up and is ready to use. \n- UNPROVISIONED: The attachment is not ready to use yet, because turnup is not complete. \n- PENDING_PARTNER: A newly-created PARTNER attachment that has not yet been configured on the Partner side. \n- PARTNER_REQUEST_RECEIVED: A PARTNER attachment is in the process of provisioning after a PARTNER_PROVIDER attachment was created that references it. \n- PENDING_CUSTOMER: A PARTNER or PARTNER_PROVIDER attachment that is waiting for a customer to activate it. \n- DEFUNCT: The attachment was deleted externally and is no longer functional. This could be because the associated Interconnect was removed, or because the other side of a Partner attachment was deleted.", "enum": [ "ACTIVE", "DEFUNCT", @@ -36171,6 +38938,7 @@ "type": "string" }, "type": { + "description": "The type of interconnect attachment this is, which can take one of the following values: \n- DEDICATED: an attachment to a Dedicated Interconnect. \n- PARTNER: an attachment to a Partner Interconnect, created by the customer. \n- PARTNER_PROVIDER: an attachment to a Partner Interconnect, created by the partner.", "enum": [ "DEDICATED", "PARTNER", @@ -36428,7 +39196,7 @@ "type": "string" }, "portalUrl": { - "description": "URL of the Partner?s portal for this Attachment. Partners may customise this to be a deep-link to the specific resource on the Partner portal. This value may be validated to match approved Partner values.", + "description": "URL of the Partner?s portal for this Attachment. Partners may customise this to be a deep link to the specific resource on the Partner portal. This value may be validated to match approved Partner values.", "type": "string" } }, @@ -36611,6 +39379,7 @@ "type": "string" }, "state": { + "description": "The state of a LACP link, which can take one of the following values: \n- ACTIVE: The link is configured and active within the bundle. \n- DETACHED: The link is not configured within the bundle. This means that the rest of the object should be empty.", "enum": [ "ACTIVE", "DETACHED" @@ -36628,6 +39397,7 @@ "id": "InterconnectDiagnosticsLinkOpticalPower", "properties": { "state": { + "description": "The status of the current value when compared to the warning and alarm levels for the receiving or transmitting transceiver. Possible states include: \n- OK: The value has not crossed a warning threshold. \n- LOW_WARNING: The value has crossed below the low warning threshold. \n- HIGH_WARNING: The value has crossed above the high warning threshold. \n- LOW_ALARM: The value has crossed below the low alarm threshold. \n- HIGH_ALARM: The value has crossed above the high alarm threshold.", "enum": [ "HIGH_ALARM", "HIGH_WARNING", @@ -36645,7 +39415,7 @@ "type": "string" }, "value": { - "description": "Value of the current optical power, read in dBm. Take a known good optical value, give it a 10% margin and trigger warnings relative to that value. In general, a -7dBm warning and a -11dBm alarm are good optical value estimates for most links.", + "description": "Value of the current receiving or transmitting optical power, read in dBm. Take a known good optical value, give it a 10% margin and trigger warnings relative to that value. In general, a -7dBm warning and a -11dBm alarm are good optical value estimates for most links.", "format": "float", "type": "number" } @@ -36674,10 +39444,12 @@ "$ref": "InterconnectDiagnosticsLinkLACPStatus" }, "receivingOpticalPower": { - "$ref": "InterconnectDiagnosticsLinkOpticalPower" + "$ref": "InterconnectDiagnosticsLinkOpticalPower", + "description": "An InterconnectDiagnostics.LinkOpticalPower object, describing the current value and status of the received light level." }, "transmittingOpticalPower": { - "$ref": "InterconnectDiagnosticsLinkOpticalPower" + "$ref": "InterconnectDiagnosticsLinkOpticalPower", + "description": "An InterconnectDiagnostics.LinkOpticalPower object, describing the current value and status of the transmitted light level." } }, "type": "object" @@ -36811,7 +39583,7 @@ "type": "string" }, "continent": { - "description": "[Output Only] Continent for this location.", + "description": "[Output Only] Continent for this location, which can take one of the following values: \n- AFRICA \n- ASIA_PAC \n- EUROPE \n- NORTH_AMERICA \n- SOUTH_AMERICA", "enum": [ "AFRICA", "ASIA_PAC", @@ -36883,8 +39655,12 @@ "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, + "selfLinkWithId": { + "description": "[Output Only] Server-defined URL for this resource with the resource id.", + "type": "string" + }, "status": { - "description": "[Output Only] The status of this InterconnectLocation. If the status is AVAILABLE, new Interconnects may be provisioned in this InterconnectLocation. Otherwise, no new Interconnects may be provisioned.", + "description": "[Output Only] The status of this InterconnectLocation, which can take one of the following values: \n- CLOSED: The InterconnectLocation is closed and is unavailable for provisioning new Interconnects. \n- AVAILABLE: The InterconnectLocation is available for provisioning new Interconnects.", "enum": [ "AVAILABLE", "CLOSED" @@ -37063,7 +39839,7 @@ "type": "string" }, "issueType": { - "description": "Form this outage is expected to take. Note that the \"IT_\" versions of this enum have been deprecated in favor of the unprefixed values.", + "description": "Form this outage is expected to take, which can take one of the following values: \n- OUTAGE: The Interconnect may be completely out of service for some or all of the specified window. \n- PARTIAL_OUTAGE: Some circuits comprising the Interconnect as a whole should remain up, but with reduced bandwidth. Note that the versions of this enum prefixed with \"IT_\" have been deprecated in favor of the unprefixed values.", "enum": [ "IT_OUTAGE", "IT_PARTIAL_OUTAGE", @@ -37083,7 +39859,7 @@ "type": "string" }, "source": { - "description": "The party that generated this notification. Note that \"NSRC_GOOGLE\" has been deprecated in favor of \"GOOGLE\"", + "description": "The party that generated this notification, which can take the following value: \n- GOOGLE: this notification as generated by Google. Note that the value of NSRC_GOOGLE has been deprecated in favor of GOOGLE.", "enum": [ "GOOGLE", "NSRC_GOOGLE" @@ -37100,7 +39876,7 @@ "type": "string" }, "state": { - "description": "State of this notification. Note that the \"NS_\" versions of this enum have been deprecated in favor of the unprefixed values.", + "description": "State of this notification, which can take one of the following values: \n- ACTIVE: This outage notification is active. The event could be in the past, present, or future. See start_time and end_time for scheduling. \n- CANCELLED: The outage associated with this notification was cancelled before the outage was due to start. Note that the versions of this enum prefixed with \"NS_\" have been deprecated in favor of the unprefixed values.", "enum": [ "ACTIVE", "CANCELLED", @@ -37416,6 +40192,57 @@ }, "type": "object" }, + "Jwt": { + "description": "JWT configuration for origin authentication.", + "id": "Jwt", + "properties": { + "audiences": { + "description": "A JWT containing any of these audiences will be accepted. The service name will be accepted if audiences is empty. Examples: bookstore_android.apps.googleusercontent.com, bookstore_web.apps.googleusercontent.com", + "items": { + "type": "string" + }, + "type": "array" + }, + "issuer": { + "description": "Identifies the issuer that issued the JWT, which is usually a URL or an email address. Examples: https://securetoken.google.com, 1234567-compute@developer.gserviceaccount.com", + "type": "string" + }, + "jwksPublicKeys": { + "description": "The provider?s public key set to validate the signature of the JWT.", + "type": "string" + }, + "jwtHeaders": { + "description": "jwt_headers and jwt_params define where to extract the JWT from an HTTP request. If no explicit location is specified, the following default locations are tried in order:\n\n1. The Authorization header using the Bearer schema. See `here `_. Example:\n\nAuthorization: Bearer .\n\n2. `access_token` query parameter. See `this `_\n\nMultiple JWTs can be verified for a request. Each JWT has to be extracted from the locations its issuer specified or from the default locations.\n\nThis field is set if JWT is sent in a request header. This field specifies the header name. For example, if `header=x-goog-iap-jwt-assertion`, the header format will be x-goog-iap-jwt-assertion: .", + "items": { + "$ref": "JwtHeader" + }, + "type": "array" + }, + "jwtParams": { + "description": "This field is set if JWT is sent in a query parameter. This field specifies the query parameter name. For example, if jwt_params[0] is jwt_token, the JWT format in the query parameter is /path?jwt_token=.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "JwtHeader": { + "description": "This message specifies a header location to extract JWT token.", + "id": "JwtHeader", + "properties": { + "name": { + "description": "The HTTP header name.", + "type": "string" + }, + "valuePrefix": { + "description": "The value prefix. The value format is \"value_prefix\" For example, for \"Authorization: Bearer \", value_prefix=\"Bearer \" with a space at the end.", + "type": "string" + } + }, + "type": "object" + }, "License": { "description": "A license resource.", "id": "License", @@ -37464,6 +40291,10 @@ "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, + "selfLinkWithId": { + "description": "[Output Only] Server-defined URL for this resource with the resource id.", + "type": "string" + }, "transferable": { "description": "If false, licenses will not be copied from the source resource when creating an image from a disk, disk from snapshot, or snapshot from disk.", "type": "boolean" @@ -37674,6 +40505,24 @@ }, "type": "object" }, + "LocalDisk": { + "id": "LocalDisk", + "properties": { + "diskCount": { + "description": "Specifies the number of such disks.", + "type": "string" + }, + "diskSizeGb": { + "description": "Specifies the size of the disk in base-2 GB.", + "type": "string" + }, + "diskType": { + "description": "Specifies the desired disk type on the node. This disk type must be a local storage type (e.g.: local-ssd). Note that for nodeTemplates, this should be the name of the disk type and not its URL.", + "type": "string" + } + }, + "type": "object" + }, "LogConfig": { "description": "Specifies what kind of log the caller must write", "id": "LogConfig", @@ -37738,7 +40587,7 @@ "id": "LogConfigDataAccessOptions", "properties": { "logMode": { - "description": "Whether Gin logging should happen in a fail-closed manner at the caller. This is relevant only in the LocalIAM implementation, for now.\n\nNOTE: Logging to Gin in a fail-closed manner is currently unsupported while work is being done to satisfy the requirements of go/345. Currently, setting LOG_FAIL_CLOSED mode will have no effect, but still exists because there is active work being done to support it (b/115874152).", + "description": "Whether Gin logging should happen in a fail-closed manner at the caller. This is relevant only in the LocalIAM implementation, for now.", "enum": [ "LOG_FAIL_CLOSED", "LOG_MODE_UNSPECIFIED" @@ -37774,6 +40623,10 @@ "description": "[Output Only] The resource type, which is always compute#machineImage for machine image.", "type": "string" }, + "machineImageEncryptionKey": { + "$ref": "CustomerEncryptionKey", + "description": "Encrypts the machine image using a customer-supplied encryption key.\n\nAfter you encrypt a machine image using a customer-supplied key, you must provide the same key if you use the machine image later. For example, you must provide the encryption key when you create an instance from the encrypted machine image in a future request.\n\nCustomer-supplied encryption keys do not protect access to metadata of the machine image.\n\nIf you do not provide an encryption key when creating the machine image, then the machine image will be encrypted using an automatically generated key and you do not need to provide a key to use the machine image later." + }, "name": { "annotations": { "required": [ @@ -37788,16 +40641,20 @@ "description": "[Output Only] The URL for this machine image. The server defines this URL.", "type": "string" }, + "selfLinkWithId": { + "description": "[Output Only] Server-defined URL for this resource with the resource id.", + "type": "string" + }, "sourceInstance": { "description": "The source instance used to create the machine image. You can provide this as a partial or full URL to the resource. For example, the following are valid values: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/instance \n- projects/project/zones/zone/instances/instance", "type": "string" }, "sourceInstanceProperties": { "$ref": "SourceInstanceProperties", - "description": "Properties of source instance." + "description": "[Output Only] Properties of source instance." }, "status": { - "description": "[Output Only] The status of disk creation.", + "description": "[Output Only] The status of the machine image. One of the following values: INVALID, CREATING, READY, DELETING, and UPLOADING.", "enum": [ "CREATING", "DELETING", @@ -37815,7 +40672,7 @@ "type": "string" }, "storageLocations": { - "description": "GCS bucket storage location of the snapshot (regional or multi-regional).", + "description": "GCS bucket storage location of the machine image (regional or multi-regional).", "items": { "type": "string" }, @@ -37998,6 +40855,10 @@ "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, + "selfLinkWithId": { + "description": "[Output Only] Server-defined URL for this resource with the resource id.", + "type": "string" + }, "zone": { "description": "[Output Only] The name of the zone where the machine type resides, such as us-central1-a.", "type": "string" @@ -38373,6 +41234,7 @@ "description": "[Output Only] The status of the instance. This field is empty when the instance does not exist.", "enum": [ "PROVISIONING", + "REPAIRING", "RUNNING", "STAGING", "STOPPED", @@ -38389,6 +41251,7 @@ "", "", "", + "", "" ], "type": "string" @@ -38427,6 +41290,24 @@ "ManagedInstanceInstanceHealth": { "id": "ManagedInstanceInstanceHealth", "properties": { + "detailedHealthState": { + "description": "[Output Only] The current detailed instance health state.", + "enum": [ + "DRAINING", + "HEALTHY", + "TIMEOUT", + "UNHEALTHY", + "UNKNOWN" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "" + ], + "type": "string" + }, "healthCheck": { "description": "[Output Only] The URL for the health check that verifies whether the instance is healthy.", "type": "string" @@ -38622,6 +41503,85 @@ }, "type": "object" }, + "MetadataCredentialsFromPlugin": { + "description": "Custom authenticator credentials.", + "id": "MetadataCredentialsFromPlugin", + "properties": { + "name": { + "description": "Plugin name.", + "type": "string" + }, + "structConfig": { + "description": "A text proto that conforms to a Struct type definition interpreted by the plugin.", + "type": "string" + } + }, + "type": "object" + }, + "MetadataFilter": { + "description": "Opaque filter criteria used by loadbalancers to restrict routing configuration to a limited set of loadbalancing proxies. Proxies and sidecars involved in loadbalancing would typically present metadata to the loadbalancers which need to match criteria specified here. If a match takes place, the relevant routing configuration is made available to those proxies.\nFor each metadataFilter in this list, if its filterMatchCriteria is set to MATCH_ANY, at least one of the filterLabels must match the corresponding label provided in the metadata. If its filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels must match with corresponding labels in the provided metadata.\nAn example for using metadataFilters would be: if loadbalancing involves Envoys, they will only receive routing configuration when values in metadataFilters match values supplied in \u003ca href=\"https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/core/base.proto#envoy-api-msg-core-node\" Node metadata of their XDS requests to loadbalancers.", + "id": "MetadataFilter", + "properties": { + "filterLabels": { + "description": "The list of label value pairs that must match labels in the provided metadata based on filterMatchCriteria \nThis list must not be empty and can have at the most 64 entries.", + "items": { + "$ref": "MetadataFilterLabelMatch" + }, + "type": "array" + }, + "filterMatchCriteria": { + "description": "Specifies how individual filterLabel matches within the list of filterLabels contribute towards the overall metadataFilter match.\nSupported values are: \n- MATCH_ANY: At least one of the filterLabels must have a matching label in the provided metadata. \n- MATCH_ALL: All filterLabels must have matching labels in the provided metadata.", + "enum": [ + "MATCH_ALL", + "MATCH_ANY", + "NOT_SET" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "MetadataFilterLabelMatch": { + "description": "MetadataFilter label name value pairs that are expected to match corresponding labels presented as metadata to the loadbalancer.", + "id": "MetadataFilterLabelMatch", + "properties": { + "name": { + "description": "Name of metadata label.\nThe name can have a maximum length of 1024 characters and must be at least 1 character long.", + "type": "string" + }, + "value": { + "description": "The value of the label must match the specified value.\nvalue can have a maximum length of 1024 characters.", + "type": "string" + } + }, + "type": "object" + }, + "MutualTls": { + "description": "Configuration for the mutual Tls mode for peer authentication.", + "id": "MutualTls", + "properties": { + "mode": { + "description": "Specifies if the server TLS is configured to be strict or permissive. This field can be set to one of the following: STRICT: Client certificate must be presented, connection is in TLS. PERMISSIVE: Client certificate can be omitted, connection can be either plaintext or TLS.", + "enum": [ + "INVALID", + "PERMISSIVE", + "STRICT" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, "NamedPort": { "description": "The named port. For example: .", "id": "NamedPort", @@ -38643,7 +41603,7 @@ "id": "Network", "properties": { "IPv4Range": { - "description": "The range of internal addresses that are legal on this network. This range is a CIDR specification, for example: 192.168.0.0/16. Provided by the client when the network is created.", + "description": "Deprecated in favor of subnet mode networks. The range of internal addresses that are legal on this network. This range is a CIDR specification, for example: 192.168.0.0/16. Provided by the client when the network is created.", "pattern": "[0-9]{1,3}(?:\\.[0-9]{1,3}){3}/[0-9]{1,2}", "type": "string" }, @@ -38698,6 +41658,23 @@ ], "type": "string" }, + "mtu": { + "description": "Maximum Transmission Unit in bytes. The minimum value for this field is 1460 and the maximum value is 1600 bytes.", + "format": "int32", + "type": "integer" + }, + "multicastMode": { + "description": "The multicast mode for this network. If set to ZONAL, multicast is allowed within a zone. If set to DISABLED, multicast is disabled for this network. The default is DISABLED.", + "enum": [ + "DISABLED", + "ZONAL" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, "name": { "annotations": { "required": [ @@ -38723,6 +41700,10 @@ "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, + "selfLinkWithId": { + "description": "[Output Only] Server-defined URL for this resource with the resource id.", + "type": "string" + }, "subnetworks": { "description": "[Output Only] Server-defined fully-qualified URLs for all subnetworks in this VPC network.", "items": { @@ -38744,6 +41725,10 @@ "description": "Metadata defined as annotations on the network endpoint.", "type": "object" }, + "fqdn": { + "description": "Optional fully qualified domain name of network endpoint. This can only be specified when NetworkEndpointGroup.network_endpoint_type is NON_GCP_FQDN_PORT.", + "type": "string" + }, "instance": { "description": "The name for a specific VM instance that the IP address belongs to. This is required for network endpoints of type GCE_VM_IP_PORT. The instance must be in the same zone of network endpoint group.\n\nThe name must be 1-63 characters long, and comply with RFC1035.", "type": "string" @@ -38809,9 +41794,15 @@ "networkEndpointType": { "description": "Type of network endpoints in this network endpoint group. Currently the only supported value is GCE_VM_IP_PORT.", "enum": [ - "GCE_VM_IP_PORT" + "GCE_VM_IP_PORT", + "INTERNET_FQDN_PORT", + "INTERNET_IP_PORT", + "NON_GCP_PRIVATE_IP_PORT" ], "enumDescriptions": [ + "", + "", + "", "" ], "type": "string" @@ -38820,6 +41811,10 @@ "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, + "selfLinkWithId": { + "description": "[Output Only] Server-defined URL for this resource with the resource id.", + "type": "string" + }, "size": { "description": "[Output only] Number of network endpoints in the network endpoint group.", "format": "int32", @@ -39122,6 +42117,13 @@ "NetworkEndpointGroupsListEndpointsRequest": { "id": "NetworkEndpointGroupsListEndpointsRequest", "properties": { + "endpointFilters": { + "description": "Optional list of endpoints to query. This is a more efficient but also limited version of filter parameter. Endpoints in the filter must have ip_address and port fields populated, other fields are not supported.", + "items": { + "$ref": "NetworkEndpointGroupsListEndpointsRequestNetworkEndpointFilter" + }, + "type": "array" + }, "healthStatus": { "description": "Optional query parameter for showing the health status of each network endpoint. Valid options are SKIP or SHOW. If you don't specifiy this parameter, the health status of network endpoints will not be provided.", "enum": [ @@ -39137,6 +42139,15 @@ }, "type": "object" }, + "NetworkEndpointGroupsListEndpointsRequestNetworkEndpointFilter": { + "id": "NetworkEndpointGroupsListEndpointsRequestNetworkEndpointFilter", + "properties": { + "networkEndpoint": { + "$ref": "NetworkEndpoint" + } + }, + "type": "object" + }, "NetworkEndpointGroupsListNetworkEndpoints": { "id": "NetworkEndpointGroupsListNetworkEndpoints", "properties": { @@ -39378,6 +42389,10 @@ "format": "byte", "type": "string" }, + "ipv6Address": { + "description": "[Output Only] An IPv6 internal network address for this network interface.", + "type": "string" + }, "kind": { "default": "compute#networkInterface", "description": "[Output Only] Type of the resource. Always compute#networkInterface for network interfaces.", @@ -39523,7 +42538,7 @@ "type": "boolean" }, "autoCreateRoutes": { - "description": "Indicates whether full mesh connectivity is created and managed automatically. When it is set to true, Google Compute Engine will automatically create and manage the routes between two networks when the state is ACTIVE. Otherwise, user needs to create routes manually to route packets to peer network.", + "description": "This field will be deprecated soon. Prefer using exchange_subnet_routes instead. Indicates whether full mesh connectivity is created and managed automatically. When it is set to true, Google Compute Engine will automatically create and manage the routes between two networks when the state is ACTIVE. Otherwise, user needs to create routes manually to route packets to peer network.", "type": "boolean" }, "exchangeSubnetRoutes": { @@ -39534,10 +42549,18 @@ "description": "Whether to export the custom routes to peer network.", "type": "boolean" }, + "exportSubnetRoutesWithPublicIp": { + "description": "Whether subnet routes with public IP range are exported. The default value is true, all subnet routes are exported. The IPv4 special-use ranges (https://en.wikipedia.org/wiki/IPv4#Special_addresses) are always exported to peers and are not controlled by this field.", + "type": "boolean" + }, "importCustomRoutes": { "description": "Whether to import the custom routes from peer network.", "type": "boolean" }, + "importSubnetRoutesWithPublicIp": { + "description": "Whether subnet routes with public IP range are imported. The default value is false. The IPv4 special-use ranges (https://en.wikipedia.org/wiki/IPv4#Special_addresses) are always imported from peers and are not controlled by this field.", + "type": "boolean" + }, "name": { "description": "Name of this peering. Provided by the client when the peering is created. The name must comply with RFC1035. Specifically, the name must be 1-63 characters long and match regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all the following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "type": "string" @@ -39546,6 +42569,11 @@ "description": "The URL of the peer network. It can be either full URL or partial URL. The peer network may belong to a different project. If the partial URL does not contain project, it is assumed that the peer network is in the same project as the current network.", "type": "string" }, + "peerMtu": { + "description": "Maximum Transmission Unit in bytes.", + "format": "int32", + "type": "integer" + }, "state": { "description": "[Output Only] State for the peering.", "enum": [ @@ -39588,7 +42616,7 @@ "id": "NetworksAddPeeringRequest", "properties": { "autoCreateRoutes": { - "description": "Whether Google Compute Engine manages the routes automatically.", + "description": "This field will be deprecated soon. Prefer using exchange_subnet_routes in network_peering instead. Whether Google Compute Engine manages the routes automatically.", "type": "boolean" }, "exportCustomRoutes": { @@ -39619,6 +42647,45 @@ }, "type": "object" }, + "NetworksGetEffectiveFirewallsResponse": { + "id": "NetworksGetEffectiveFirewallsResponse", + "properties": { + "firewalls": { + "description": "Effective firewalls on the network.", + "items": { + "$ref": "Firewall" + }, + "type": "array" + }, + "organizationFirewalls": { + "description": "Effective firewalls from organization policies.", + "items": { + "$ref": "NetworksGetEffectiveFirewallsResponseOrganizationFirewallPolicy" + }, + "type": "array" + } + }, + "type": "object" + }, + "NetworksGetEffectiveFirewallsResponseOrganizationFirewallPolicy": { + "description": "A pruned SecurityPolicy containing ID and any applicable firewall rules.", + "id": "NetworksGetEffectiveFirewallsResponseOrganizationFirewallPolicy", + "properties": { + "id": { + "description": "[Output Only] The unique identifier for the security policy. This identifier is defined by the server.", + "format": "uint64", + "type": "string" + }, + "rules": { + "description": "The rules that apply to the network.", + "items": { + "$ref": "SecurityPolicyRule" + }, + "type": "array" + } + }, + "type": "object" + }, "NetworksRemovePeeringRequest": { "id": "NetworksRemovePeeringRequest", "properties": { @@ -39639,7 +42706,7 @@ "type": "object" }, "NodeGroup": { - "description": "A NodeGroup resource.", + "description": "A NodeGroup resource. To create a node group, you must first create a node templates. To learn more about node groups and sole-tenant nodes, read the Sole-tenant nodes documentation. (== resource_for beta.nodeGroups ==) (== resource_for v1.nodeGroups ==)", "id": "NodeGroup", "properties": { "autoscalingPolicy": { @@ -39663,6 +42730,19 @@ "description": "[Output Only] The type of the resource. Always compute#nodeGroup for node group.", "type": "string" }, + "managedHoldback": { + "enum": [ + "MANAGED_HOLDBACK_UNSPECIFIED", + "OFF", + "ON" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + }, "name": { "description": "The name of the resource, provided by the client when initially creating the resource. The resource name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "type": "string" @@ -39675,6 +42755,10 @@ "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, + "selfLinkWithId": { + "description": "[Output Only] Server-defined URL for this resource with the resource id.", + "type": "string" + }, "size": { "description": "[Output Only] The total number of nodes in the node group.", "format": "int32", @@ -39943,6 +43027,13 @@ "NodeGroupNode": { "id": "NodeGroupNode", "properties": { + "disks": { + "description": "Local disk configurations.", + "items": { + "$ref": "LocalDisk" + }, + "type": "array" + }, "instances": { "description": "Instances scheduled on this node.", "items": { @@ -40210,6 +43301,15 @@ }, "type": "object" }, + "NodeGroupsSetAutoscalingPolicyRequest": { + "id": "NodeGroupsSetAutoscalingPolicyRequest", + "properties": { + "autoscalingPolicy": { + "$ref": "NodeGroupAutoscalingPolicy" + } + }, + "type": "object" + }, "NodeGroupsSetNodeTemplateRequest": { "id": "NodeGroupsSetNodeTemplateRequest", "properties": { @@ -40221,7 +43321,7 @@ "type": "object" }, "NodeTemplate": { - "description": "A Node Template resource.", + "description": "A Node Template resource. To learn more about node templates and sole-tenant nodes, read the Sole-tenant nodes documentation. (== resource_for beta.nodeTemplates ==) (== resource_for v1.nodeTemplates ==)", "id": "NodeTemplate", "properties": { "creationTimestamp": { @@ -40232,6 +43332,12 @@ "description": "An optional description of this resource. Provide this property when you create the resource.", "type": "string" }, + "disks": { + "items": { + "$ref": "LocalDisk" + }, + "type": "array" + }, "id": { "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", "format": "uint64", @@ -40269,6 +43375,10 @@ "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, + "selfLinkWithId": { + "description": "[Output Only] Server-defined URL for this resource with the resource id.", + "type": "string" + }, "serverBinding": { "$ref": "ServerBinding", "description": "Binding properties for the physical server." @@ -40683,6 +43793,10 @@ "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, + "selfLinkWithId": { + "description": "[Output Only] Server-defined URL for this resource with the resource id.", + "type": "string" + }, "zone": { "description": "[Output Only] The name of the zone where the node type resides, such as us-central1-a.", "type": "string" @@ -41008,241 +44122,422 @@ }, "type": "object" }, - "Operation": { - "description": "An Operation resource, used to manage asynchronous API requests. (== resource_for v1.globalOperations ==) (== resource_for beta.globalOperations ==) (== resource_for v1.regionOperations ==) (== resource_for beta.regionOperations ==) (== resource_for v1.zoneOperations ==) (== resource_for beta.zoneOperations ==)", - "id": "Operation", + "NotificationEndpoint": { + "description": "A notification endpoint resource defines an endpoint to receive notifications when there are status changes detected by the associated health check service.", + "id": "NotificationEndpoint", "properties": { - "clientOperationId": { - "description": "[Output Only] The value of `requestId` if you provided it in the request. Not present otherwise.", - "type": "string" - }, "creationTimestamp": { - "description": "[Deprecated] This field is deprecated.", + "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" }, "description": { - "description": "[Output Only] A textual description of the operation, which is set when the operation is created.", - "type": "string" - }, - "endTime": { - "description": "[Output Only] The time that this operation was completed. This value is in RFC3339 text format.", - "type": "string" - }, - "error": { - "description": "[Output Only] If errors are generated during processing of the operation, this field will be populated.", - "properties": { - "errors": { - "description": "[Output Only] The array of errors encountered while processing this operation.", - "items": { - "properties": { - "code": { - "description": "[Output Only] The error type identifier for this error.", - "type": "string" - }, - "location": { - "description": "[Output Only] Indicates the field in the request that caused the error. This property is optional.", - "type": "string" - }, - "message": { - "description": "[Output Only] An optional, human-readable error message.", - "type": "string" - } - }, - "type": "object" - }, - "type": "array" - } - }, - "type": "object" - }, - "httpErrorMessage": { - "description": "[Output Only] If the operation fails, this field contains the HTTP error message that was returned, such as NOT FOUND.", + "description": "An optional description of this resource. Provide this property when you create the resource.", "type": "string" }, - "httpErrorStatusCode": { - "description": "[Output Only] If the operation fails, this field contains the HTTP error status code that was returned. For example, a 404 means the resource was not found.", - "format": "int32", - "type": "integer" + "grpcSettings": { + "$ref": "NotificationEndpointGrpcSettings", + "description": "Settings of the gRPC notification endpoint including the endpoint URL and the retry duration." }, "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "description": "[Output Only] A unique identifier for this resource type. The server generates this identifier.", "format": "uint64", "type": "string" }, - "insertTime": { - "description": "[Output Only] The time that this operation was requested. This value is in RFC3339 text format.", - "type": "string" - }, "kind": { - "default": "compute#operation", - "description": "[Output Only] Type of the resource. Always compute#operation for Operation resources.", + "default": "compute#notificationEndpoint", + "description": "[Output Only] Type of the resource. Always compute#notificationEndpoint for notification endpoints.", "type": "string" }, "name": { - "description": "[Output Only] Name of the resource.", - "type": "string" - }, - "operationType": { - "description": "[Output Only] The type of operation, such as insert, update, or delete, and so on.", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, - "progress": { - "description": "[Output Only] An optional progress indicator that ranges from 0 to 100. There is no requirement that this be linear or support any granularity of operations. This should not be used to guess when the operation will be complete. This number should monotonically increase as the operation progresses.", - "format": "int32", - "type": "integer" - }, "region": { - "description": "[Output Only] The URL of the region where the operation resides. Only available when performing regional operations. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", + "description": "[Output Only] URL of the region where the notification endpoint resides. This field applies only to the regional resource. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", "type": "string" }, "selfLink": { "description": "[Output Only] Server-defined URL for the resource.", "type": "string" - }, - "startTime": { - "description": "[Output Only] The time that this operation was started by the server. This value is in RFC3339 text format.", - "type": "string" - }, - "status": { - "description": "[Output Only] The status of the operation, which can be one of the following: PENDING, RUNNING, or DONE.", - "enum": [ - "DONE", - "PENDING", - "RUNNING" - ], - "enumDescriptions": [ - "", - "", - "" - ], - "type": "string" - }, - "statusMessage": { - "description": "[Output Only] An optional textual description of the current status of the operation.", - "type": "string" - }, - "targetId": { - "description": "[Output Only] The unique target ID, which identifies a specific incarnation of the target resource.", - "format": "uint64", + } + }, + "type": "object" + }, + "NotificationEndpointGrpcSettings": { + "description": "Represents a gRPC setting that describes one gRPC notification endpoint and the retry duration attempting to send notification to this endpoint.", + "id": "NotificationEndpointGrpcSettings", + "properties": { + "authority": { + "description": "Optional. If specified, this field is used to set the authority header by the sender of notifications. See https://tools.ietf.org/html/rfc7540#section-8.1.2.3", "type": "string" }, - "targetLink": { - "description": "[Output Only] The URL of the resource that the operation modifies. For operations related to creating a snapshot, this points to the persistent disk that the snapshot was created from.", + "endpoint": { + "description": "Endpoint to which gRPC notifications are sent. This must be a valid gRPCLB DNS name.", "type": "string" }, - "user": { - "description": "[Output Only] User who requested the operation, for example: user@example.com.", + "payloadName": { + "description": "Optional. If specified, this field is used to populate the ?name? field in gRPC requests.", "type": "string" }, - "warnings": { - "description": "[Output Only] If warning messages are generated during processing of the operation, this field will be populated.", - "items": { - "properties": { - "code": { - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DEPRECATED_TYPE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "EXPERIMENTAL_TYPE_USED", - "EXTERNAL_API_WARNING", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "MISSING_TYPE_DEPENDENCY", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SCHEMA_VALIDATION_IGNORED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNDECLARED_PROPERTIES", - "UNREACHABLE" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "data": { - "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", - "items": { - "properties": { - "key": { - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", - "type": "string" - }, - "value": { - "description": "[Output Only] A warning data value corresponding to the key.", - "type": "string" - } - }, - "type": "object" - }, - "type": "array" - }, - "message": { - "description": "[Output Only] A human-readable description of the warning code.", - "type": "string" - } - }, - "type": "object" - }, - "type": "array" - }, - "zone": { - "description": "[Output Only] The URL of the zone where the operation resides. Only available when performing per-zone operations. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", - "type": "string" + "retryDurationSec": { + "description": "How much time (in seconds) is spent attempting notification retries until a successful response is received. Default is 30s. Limit is 20m (1200s). Must be a positive number.", + "format": "uint32", + "type": "integer" } }, "type": "object" }, - "OperationAggregatedList": { - "id": "OperationAggregatedList", + "NotificationEndpointList": { + "id": "NotificationEndpointList", "properties": { "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "additionalProperties": { - "$ref": "OperationsScopedList", - "description": "[Output Only] Name of the scope containing this set of operations." + "description": "A list of NotificationEndpoint resources.", + "items": { + "$ref": "NotificationEndpoint" }, - "description": "[Output Only] A map of scoped operation lists.", - "type": "object" + "type": "array" }, "kind": { - "default": "compute#operationAggregatedList", - "description": "[Output Only] Type of resource. Always compute#operationAggregatedList for aggregated lists of operations.", + "default": "compute#notificationEndpointList", + "description": "[Output Only] Type of the resource. Always compute#notificationEndpoint for notification endpoints.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "Operation": { + "description": "An Operation resource, used to manage asynchronous API requests. (== resource_for v1.globalOperations ==) (== resource_for beta.globalOperations ==) (== resource_for v1.regionOperations ==) (== resource_for beta.regionOperations ==) (== resource_for v1.zoneOperations ==) (== resource_for beta.zoneOperations ==)", + "id": "Operation", + "properties": { + "clientOperationId": { + "description": "[Output Only] The value of `requestId` if you provided it in the request. Not present otherwise.", + "type": "string" + }, + "creationTimestamp": { + "description": "[Deprecated] This field is deprecated.", + "type": "string" + }, + "description": { + "description": "[Output Only] A textual description of the operation, which is set when the operation is created.", + "type": "string" + }, + "endTime": { + "description": "[Output Only] The time that this operation was completed. This value is in RFC3339 text format.", + "type": "string" + }, + "error": { + "description": "[Output Only] If errors are generated during processing of the operation, this field will be populated.", + "properties": { + "errors": { + "description": "[Output Only] The array of errors encountered while processing this operation.", + "items": { + "properties": { + "code": { + "description": "[Output Only] The error type identifier for this error.", + "type": "string" + }, + "location": { + "description": "[Output Only] Indicates the field in the request that caused the error. This property is optional.", + "type": "string" + }, + "message": { + "description": "[Output Only] An optional, human-readable error message.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + } + }, + "type": "object" + }, + "httpErrorMessage": { + "description": "[Output Only] If the operation fails, this field contains the HTTP error message that was returned, such as NOT FOUND.", + "type": "string" + }, + "httpErrorStatusCode": { + "description": "[Output Only] If the operation fails, this field contains the HTTP error status code that was returned. For example, a 404 means the resource was not found.", + "format": "int32", + "type": "integer" + }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", + "type": "string" + }, + "insertTime": { + "description": "[Output Only] The time that this operation was requested. This value is in RFC3339 text format.", + "type": "string" + }, + "kind": { + "default": "compute#operation", + "description": "[Output Only] Type of the resource. Always compute#operation for Operation resources.", + "type": "string" + }, + "name": { + "description": "[Output Only] Name of the resource.", + "type": "string" + }, + "operationType": { + "description": "[Output Only] The type of operation, such as insert, update, or delete, and so on.", + "type": "string" + }, + "progress": { + "description": "[Output Only] An optional progress indicator that ranges from 0 to 100. There is no requirement that this be linear or support any granularity of operations. This should not be used to guess when the operation will be complete. This number should monotonically increase as the operation progresses.", + "format": "int32", + "type": "integer" + }, + "region": { + "description": "[Output Only] The URL of the region where the operation resides. Only available when performing regional operations. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" + }, + "selfLinkWithId": { + "description": "[Output Only] Server-defined URL for this resource with the resource id.", + "type": "string" + }, + "startTime": { + "description": "[Output Only] The time that this operation was started by the server. This value is in RFC3339 text format.", + "type": "string" + }, + "status": { + "description": "[Output Only] The status of the operation, which can be one of the following: PENDING, RUNNING, or DONE.", + "enum": [ + "DONE", + "PENDING", + "RUNNING" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + }, + "statusMessage": { + "description": "[Output Only] An optional textual description of the current status of the operation.", + "type": "string" + }, + "targetId": { + "description": "[Output Only] The unique target ID, which identifies a specific incarnation of the target resource.", + "format": "uint64", + "type": "string" + }, + "targetLink": { + "description": "[Output Only] The URL of the resource that the operation modifies. For operations related to creating a snapshot, this points to the persistent disk that the snapshot was created from.", + "type": "string" + }, + "user": { + "description": "[Output Only] User who requested the operation, for example: user@example.com.", + "type": "string" + }, + "warnings": { + "description": "[Output Only] If warning messages are generated during processing of the operation, this field will be populated.", + "items": { + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "zone": { + "description": "[Output Only] The URL of the zone where the operation resides. Only available when performing per-zone operations. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", + "type": "string" + } + }, + "type": "object" + }, + "OperationAggregatedList": { + "id": "OperationAggregatedList", + "properties": { + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "type": "string" + }, + "items": { + "additionalProperties": { + "$ref": "OperationsScopedList", + "description": "[Output Only] Name of the scope containing this set of operations." + }, + "description": "[Output Only] A map of scoped operation lists.", + "type": "object" + }, + "kind": { + "default": "compute#operationAggregatedList", + "description": "[Output Only] Type of resource. Always compute#operationAggregatedList for aggregated lists of operations.", "type": "string" }, "nextPageToken": { @@ -41543,18 +44838,118 @@ }, "type": "object" }, + "OrganizationSecurityPoliciesListAssociationsResponse": { + "id": "OrganizationSecurityPoliciesListAssociationsResponse", + "properties": { + "associations": { + "description": "A list of associations.", + "items": { + "$ref": "SecurityPolicyAssociation" + }, + "type": "array" + }, + "kind": { + "default": "compute#organizationSecurityPoliciesListAssociationsResponse", + "description": "[Output Only] Type of securityPolicy associations. Always compute#organizationSecurityPoliciesListAssociations for lists of securityPolicy associations.", + "type": "string" + } + }, + "type": "object" + }, + "OriginAuthenticationMethod": { + "description": "Configuration for the origin authentication method.", + "id": "OriginAuthenticationMethod", + "properties": { + "jwt": { + "$ref": "Jwt" + } + }, + "type": "object" + }, + "OutlierDetection": { + "description": "Settings controlling eviction of unhealthy hosts from the load balancing pool.", + "id": "OutlierDetection", + "properties": { + "baseEjectionTime": { + "$ref": "Duration", + "description": "The base time that a host is ejected for. The real time is equal to the base time multiplied by the number of times the host has been ejected. Defaults to 30000ms or 30s." + }, + "consecutiveErrors": { + "description": "Number of errors before a host is ejected from the connection pool. When the backend host is accessed over HTTP, a 5xx return code qualifies as an error. Defaults to 5.", + "format": "int32", + "type": "integer" + }, + "consecutiveGatewayFailure": { + "description": "The number of consecutive gateway failures (502, 503, 504 status or connection errors that are mapped to one of those status codes) before a consecutive gateway failure ejection occurs. Defaults to 5.", + "format": "int32", + "type": "integer" + }, + "enforcingConsecutiveErrors": { + "description": "The percentage chance that a host will be actually ejected when an outlier status is detected through consecutive 5xx. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100.", + "format": "int32", + "type": "integer" + }, + "enforcingConsecutiveGatewayFailure": { + "description": "The percentage chance that a host will be actually ejected when an outlier status is detected through consecutive gateway failures. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 0.", + "format": "int32", + "type": "integer" + }, + "enforcingSuccessRate": { + "description": "The percentage chance that a host will be actually ejected when an outlier status is detected through success rate statistics. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100.", + "format": "int32", + "type": "integer" + }, + "interval": { + "$ref": "Duration", + "description": "Time interval between ejection sweep analysis. This can result in both new ejections as well as hosts being returned to service. Defaults to 10 seconds." + }, + "maxEjectionPercent": { + "description": "Maximum percentage of hosts in the load balancing pool for the backend service that can be ejected. Defaults to 10%.", + "format": "int32", + "type": "integer" + }, + "successRateMinimumHosts": { + "description": "The number of hosts in a cluster that must have enough request volume to detect success rate outliers. If the number of hosts is less than this setting, outlier detection via success rate statistics is not performed for any host in the cluster. Defaults to 5.", + "format": "int32", + "type": "integer" + }, + "successRateRequestVolume": { + "description": "The minimum number of total requests that must be collected in one interval (as defined by the interval duration above) to include this host in success rate based outlier detection. If the volume is lower than this setting, outlier detection via success rate statistics is not performed for that host. Defaults to 100.", + "format": "int32", + "type": "integer" + }, + "successRateStdevFactor": { + "description": "This factor is used to determine the ejection threshold for success rate outlier ejection. The ejection threshold is the difference between the mean success rate, and the product of this factor and the standard deviation of the mean success rate: mean - (stdev * success_rate_stdev_factor). This factor is divided by a thousand to get a double. That is, if the desired factor is 1.9, the runtime value should be 1900. Defaults to 1900.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, "PathMatcher": { "description": "A matcher for the path portion of the URL. The BackendService from the longest-matched rule will serve the URL. If no rule was matched, the default service will be used.", "id": "PathMatcher", "properties": { + "defaultRouteAction": { + "$ref": "HttpRouteAction", + "description": "defaultRouteAction takes effect when none of the pathRules or routeRules match. The load balancer performs advanced routing actions like URL rewrites, header transformations, etc. prior to forwarding the request to the selected backend. If defaultRouteAction specifies any weightedBackendServices, defaultService must not be set. Conversely if defaultService is set, defaultRouteAction cannot contain any weightedBackendServices.\nOnly one of defaultRouteAction or defaultUrlRedirect must be set." + }, "defaultService": { - "description": "The full or partial URL to the BackendService resource. This will be used if none of the pathRules or routeRules defined by this PathMatcher are matched. For example, the following are all valid URLs to a BackendService resource: \n- https://www.googleapis.com/compute/v1/projects/project/global/backendServices/backendService \n- compute/v1/projects/project/global/backendServices/backendService \n- global/backendServices/backendService \nUse defaultService instead of defaultRouteAction when simple routing to a backend service is desired and other advanced capabilities like traffic splitting and URL rewrites are not required.\nOnly one of defaultService, defaultRouteAction or defaultUrlRedirect must be set.\nAuthorization requires one or more of the following Google IAM permissions on the specified resource default_service: \n- compute.backendBuckets.use \n- compute.backendServices.use", + "description": "The full or partial URL to the BackendService resource. This will be used if none of the pathRules or routeRules defined by this PathMatcher are matched. For example, the following are all valid URLs to a BackendService resource: \n- https://www.googleapis.com/compute/v1/projects/project/global/backendServices/backendService \n- compute/v1/projects/project/global/backendServices/backendService \n- global/backendServices/backendService If defaultRouteAction is additionally specified, advanced routing actions like URL Rewrites, etc. take effect prior to sending the request to the backend. However, if defaultService is specified, defaultRouteAction cannot contain any weightedBackendServices. Conversely, if defaultRouteAction specifies any weightedBackendServices, defaultService must not be specified.\nOnly one of defaultService, defaultUrlRedirect or defaultRouteAction.weightedBackendService must be set.\nAuthorization requires one or more of the following Google IAM permissions on the specified resource default_service: \n- compute.backendBuckets.use \n- compute.backendServices.use", "type": "string" }, + "defaultUrlRedirect": { + "$ref": "HttpRedirectAction", + "description": "When when none of the specified pathRules or routeRules match, the request is redirected to a URL specified by defaultUrlRedirect.\nIf defaultUrlRedirect is specified, defaultService or defaultRouteAction must not be set." + }, "description": { "description": "An optional description of this resource. Provide this property when you create the resource.", "type": "string" }, + "headerAction": { + "$ref": "HttpHeaderAction", + "description": "Specifies changes to request and response headers that need to take effect for the selected backendService.\nHeaderAction specified here are applied after the matching HttpRouteRule HeaderAction and before the HeaderAction in the UrlMap" + }, "name": { "description": "The name to which this PathMatcher is referred by the HostRule.", "type": "string" @@ -41565,6 +44960,13 @@ "$ref": "PathRule" }, "type": "array" + }, + "routeRules": { + "description": "The list of ordered HTTP route rules. Use this list instead of pathRules when advanced route matching and routing actions are desired. The order of specifying routeRules matters: the first rule that matches will cause its specified routing action to take effect.\nOnly one of pathRules or routeRules must be set.", + "items": { + "$ref": "HttpRouteRule" + }, + "type": "array" } }, "type": "object" @@ -41580,9 +44982,28 @@ }, "type": "array" }, + "routeAction": { + "$ref": "HttpRouteAction", + "description": "In response to a matching path, the load balancer performs advanced routing actions like URL rewrites, header transformations, etc. prior to forwarding the request to the selected backend. If routeAction specifies any weightedBackendServices, service must not be set. Conversely if service is set, routeAction cannot contain any weightedBackendServices.\nOnly one of routeAction or urlRedirect must be set." + }, "service": { - "description": "The URL of the backend service resource if this rule is matched.\nUse service instead of routeAction when simple routing to a backend service is desired and other advanced capabilities like traffic splitting and rewrites are not required.\nOnly one of service, routeAction or urlRedirect should must be set.", + "description": "The full or partial URL of the backend service resource to which traffic is directed if this rule is matched. If routeAction is additionally specified, advanced routing actions like URL Rewrites, etc. take effect prior to sending the request to the backend. However, if service is specified, routeAction cannot contain any weightedBackendService s. Conversely, if routeAction specifies any weightedBackendServices, service must not be specified.\nOnly one of urlRedirect, service or routeAction.weightedBackendService must be set.", "type": "string" + }, + "urlRedirect": { + "$ref": "HttpRedirectAction", + "description": "When a path pattern is matched, the request is redirected to a URL specified by urlRedirect.\nIf urlRedirect is specified, service or routeAction must not be set." + } + }, + "type": "object" + }, + "PeerAuthenticationMethod": { + "description": "Configuration for the peer authentication method.", + "id": "PeerAuthenticationMethod", + "properties": { + "mtls": { + "$ref": "MutualTls", + "description": "Set if mTLS is used for peer authentication." } }, "type": "object" @@ -41613,6 +45034,94 @@ }, "type": "object" }, + "Permission": { + "description": "All fields defined in a permission are ANDed.", + "id": "Permission", + "properties": { + "constraints": { + "description": "Extra custom constraints. The constraints are ANDed together.", + "items": { + "$ref": "PermissionConstraint" + }, + "type": "array" + }, + "hosts": { + "description": "Used in Ingress or Egress Gateway cases to specify hosts that the policy applies to. Exact match, prefix match, and suffix match are supported.", + "items": { + "type": "string" + }, + "type": "array" + }, + "methods": { + "description": "HTTP method.", + "items": { + "type": "string" + }, + "type": "array" + }, + "notHosts": { + "description": "Negate of hosts. Specifies exclusions.", + "items": { + "type": "string" + }, + "type": "array" + }, + "notMethods": { + "description": "Negate of methods. Specifies exclusions.", + "items": { + "type": "string" + }, + "type": "array" + }, + "notPaths": { + "description": "Negate of paths. Specifies exclusions.", + "items": { + "type": "string" + }, + "type": "array" + }, + "notPorts": { + "description": "Negate of ports. Specifies exclusions.", + "items": { + "type": "string" + }, + "type": "array" + }, + "paths": { + "description": "HTTP request paths or gRPC methods. Exact match, prefix match, and suffix match are supported.", + "items": { + "type": "string" + }, + "type": "array" + }, + "ports": { + "description": "Port names or numbers.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "PermissionConstraint": { + "description": "Custom constraint that specifies a key and a list of allowed values for Istio attributes.", + "id": "PermissionConstraint", + "properties": { + "key": { + "description": "Key of the constraint.", + "type": "string" + }, + "values": { + "description": "A list of allowed values.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, "Policy": { "description": "Defines an Identity and Access Management (IAM) policy. It is used to specify access control policies for Cloud Platform resources.\n\n\n\nA `Policy` consists of a list of `bindings`. A `binding` binds a list of `members` to a `role`, where the members can be user accounts, Google groups, Google domains, and service accounts. A `role` is a named list of permissions defined by IAM.\n\n**JSON Example**\n\n{ \"bindings\": [ { \"role\": \"roles/owner\", \"members\": [ \"user:mike@example.com\", \"group:admins@example.com\", \"domain:google.com\", \"serviceAccount:my-other-app@appspot.gserviceaccount.com\" ] }, { \"role\": \"roles/viewer\", \"members\": [\"user:sean@example.com\"] } ] }\n\n**YAML Example**\n\nbindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-other-app@appspot.gserviceaccount.com role: roles/owner - members: - user:sean@example.com role: roles/viewer\n\n\n\nFor a description of IAM and its features, see the [IAM developer's guide](https://cloud.google.com/iam/docs).", "id": "Policy", @@ -41696,9 +45205,11 @@ "description": "These stateful disks will never be deleted during autohealing, update, instance recreate operations. This flag is used to configure if the disk should be deleted after it is no longer used by the group, e.g. when the given instance or the whole MIG is deleted. Note: disks attached in READ_ONLY mode cannot be auto-deleted.", "enum": [ "NEVER", + "ON_PERMANENT_INSTANCE_DELETION", "WHEN_NOT_IN_USE" ], "enumDescriptions": [ + "", "", "" ], @@ -41723,6 +45234,80 @@ }, "type": "object" }, + "Principal": { + "description": "All fields defined in a principal are ANDed.", + "id": "Principal", + "properties": { + "condition": { + "description": "An expression to specify custom condition.", + "type": "string" + }, + "groups": { + "description": "The groups the principal belongs to. Exact match, prefix match, and suffix match are supported.", + "items": { + "type": "string" + }, + "type": "array" + }, + "ips": { + "description": "IPv4 or IPv6 address or range (In CIDR format)", + "items": { + "type": "string" + }, + "type": "array" + }, + "namespaces": { + "description": "The namespaces. Exact match, prefix match, and suffix match are supported.", + "items": { + "type": "string" + }, + "type": "array" + }, + "notGroups": { + "description": "Negate of groups. Specifies exclusions.", + "items": { + "type": "string" + }, + "type": "array" + }, + "notIps": { + "description": "Negate of IPs. Specifies exclusions.", + "items": { + "type": "string" + }, + "type": "array" + }, + "notNamespaces": { + "description": "Negate of namespaces. Specifies exclusions.", + "items": { + "type": "string" + }, + "type": "array" + }, + "notUsers": { + "description": "Negate of users. Specifies exclusions.", + "items": { + "type": "string" + }, + "type": "array" + }, + "properties": { + "additionalProperties": { + "type": "string" + }, + "description": "A map of Istio attribute to expected values. Exact match, prefix match, and suffix match are supported for values. For example, `request.headers[version]: ?v1?`. The properties are ANDed together.", + "type": "object" + }, + "users": { + "description": "The user names/IDs or service accounts. Exact match, prefix match, and suffix match are supported.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, "Project": { "description": "A Project resource. For an overview of projects, see Cloud Platform Resource Hierarchy. (== resource_for v1.projects ==) (== resource_for beta.projects ==)", "id": "Project", @@ -41902,6 +45487,7 @@ "metric": { "description": "[Output Only] Name of the quota metric.", "enum": [ + "ALIASES_PER_NETWORK_GLOBAL", "AMD_S9300_GPUS", "AUTOSCALERS", "BACKEND_BUCKETS", @@ -41910,6 +45496,7 @@ "CPUS", "CPUS_ALL_REGIONS", "DISKS_TOTAL_GB", + "EXTERNAL_VPN_GATEWAYS", "FIREWALLS", "FORWARDING_RULES", "GLOBAL_INTERNAL_ADDRESSES", @@ -41917,6 +45504,7 @@ "HEALTH_CHECKS", "IMAGES", "INSTANCES", + "INSTANCES_PER_NETWORK_GLOBAL", "INSTANCE_GROUPS", "INSTANCE_GROUP_MANAGERS", "INSTANCE_TEMPLATES", @@ -41924,9 +45512,14 @@ "INTERCONNECT_ATTACHMENTS_PER_REGION", "INTERCONNECT_ATTACHMENTS_TOTAL_MBPS", "INTERNAL_ADDRESSES", + "INTERNAL_FORWARDING_RULES_PER_NETWORK", + "INTERNAL_FORWARDING_RULES_WITH_GLOBAL_ACCESS_PER_NETWORK", + "INTERNAL_FORWARDING_RULES_WITH_TARGET_INSTANCE_PER_NETWORK", + "INTERNAL_TARGET_INSTANCE_WITH_GLOBAL_ACCESS_PER_NETWORK", "IN_USE_ADDRESSES", "IN_USE_BACKUP_SCHEDULES", "IN_USE_MAINTENANCE_WINDOWS", + "IN_USE_SNAPSHOT_SCHEDULES", "LOCAL_SSD_TOTAL_GB", "MACHINE_IMAGES", "NETWORKS", @@ -41971,7 +45564,8 @@ "TARGET_VPN_GATEWAYS", "URL_MAPS", "VPN_GATEWAYS", - "VPN_TUNNELS" + "VPN_TUNNELS", + "XPN_SERVICE_PROJECTS" ], "enumDescriptions": [ "", @@ -42043,10 +45637,23 @@ "", "", "", + "", + "", + "", + "", + "", + "", + "", + "", + "", "" ], "type": "string" }, + "owner": { + "description": "[Output Only] Owning resource. This is the resource on which this quota is applied.", + "type": "string" + }, "usage": { "description": "[Output Only] Current usage of this metric.", "format": "double", @@ -42055,6 +45662,30 @@ }, "type": "object" }, + "RbacPolicy": { + "id": "RbacPolicy", + "properties": { + "name": { + "description": "Name of the RbacPolicy.", + "type": "string" + }, + "permissions": { + "description": "The list of permissions.", + "items": { + "$ref": "Permission" + }, + "type": "array" + }, + "principals": { + "description": "The list of principals.", + "items": { + "$ref": "Principal" + }, + "type": "array" + } + }, + "type": "object" + }, "Reference": { "description": "Represents a reference to a resource.", "id": "Reference", @@ -42120,6 +45751,10 @@ "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, + "selfLinkWithId": { + "description": "[Output Only] Server-defined URL for this resource with the resource id.", + "type": "string" + }, "status": { "description": "[Output Only] Status of the region, either UP or DOWN.", "enum": [ @@ -42254,6 +45889,19 @@ }, "type": "object" }, + "RegionCommitmentsUpdateReservationsRequest": { + "id": "RegionCommitmentsUpdateReservationsRequest", + "properties": { + "reservations": { + "description": "List of reservations for the capacity move of VMs with accelerators and local ssds.", + "items": { + "$ref": "Reservation" + }, + "type": "array" + } + }, + "type": "object" + }, "RegionDiskTypeList": { "id": "RegionDiskTypeList", "properties": { @@ -42647,6 +46295,20 @@ }, "type": "object" }, + "RegionInstanceGroupManagerPatchInstanceConfigReq": { + "description": "RegionInstanceGroupManagers.patchPerInstanceConfigs", + "id": "RegionInstanceGroupManagerPatchInstanceConfigReq", + "properties": { + "perInstanceConfigs": { + "description": "The list of per-instance configs to insert or patch on this managed instance group.", + "items": { + "$ref": "PerInstanceConfig" + }, + "type": "array" + } + }, + "type": "object" + }, "RegionInstanceGroupManagerUpdateInstanceConfigReq": { "description": "RegionInstanceGroupManagers.updatePerInstanceConfigs", "id": "RegionInstanceGroupManagerUpdateInstanceConfigReq", @@ -42686,7 +46348,7 @@ "type": "array" }, "maximalAction": { - "description": "The maximal action that should be perfomed on the instances. By default REPLACE.", + "description": "The maximal action that should be performed on the instances. By default REPLACE. This field is deprecated, please use most_disruptive_allowed_action.", "enum": [ "NONE", "REFRESH", @@ -42716,6 +46378,36 @@ "" ], "type": "string" + }, + "mostDisruptiveAllowedAction": { + "description": "The most disruptive action that allowed to be performed on the instances. By default REPLACE.", + "enum": [ + "NONE", + "REFRESH", + "REPLACE", + "RESTART" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "RegionInstanceGroupManagersCreateInstancesRequest": { + "description": "RegionInstanceGroupManagers.createInstances", + "id": "RegionInstanceGroupManagersCreateInstancesRequest", + "properties": { + "instances": { + "description": "[Required] List of specifications of per-instance configs.", + "items": { + "$ref": "PerInstanceConfig" + }, + "type": "array" } }, "type": "object" @@ -43012,65 +46704,456 @@ }, "type": "object" }, - "RegionInstanceGroupsListInstancesRequest": { - "id": "RegionInstanceGroupsListInstancesRequest", - "properties": { - "instanceState": { - "description": "Instances in which state should be returned. Valid options are: 'ALL', 'RUNNING'. By default, it lists all instances.", - "enum": [ - "ALL", - "RUNNING" - ], - "enumDescriptions": [ - "", - "" - ], - "type": "string" - }, - "portName": { - "description": "Name of port user is interested in. It is optional. If it is set, only information about this ports will be returned. If it is not set, all the named ports will be returned. Always lists all instances.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "type": "string" - } - }, - "type": "object" - }, - "RegionInstanceGroupsSetNamedPortsRequest": { - "id": "RegionInstanceGroupsSetNamedPortsRequest", - "properties": { - "fingerprint": { - "description": "The fingerprint of the named ports information for this instance group. Use this optional property to prevent conflicts when multiple users change the named ports settings concurrently. Obtain the fingerprint with the instanceGroups.get method. Then, include the fingerprint in your request to ensure that you do not overwrite changes that were applied from another concurrent request.", - "format": "byte", - "type": "string" - }, - "namedPorts": { - "description": "The list of named ports to set for this instance group.", - "items": { - "$ref": "NamedPort" - }, - "type": "array" - } - }, - "type": "object" - }, - "RegionList": { - "description": "Contains a list of region resources.", - "id": "RegionList", + "RegionInstanceGroupsListInstancesRequest": { + "id": "RegionInstanceGroupsListInstancesRequest", + "properties": { + "instanceState": { + "description": "Instances in which state should be returned. Valid options are: 'ALL', 'RUNNING'. By default, it lists all instances.", + "enum": [ + "ALL", + "RUNNING" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "portName": { + "description": "Name of port user is interested in. It is optional. If it is set, only information about this ports will be returned. If it is not set, all the named ports will be returned. Always lists all instances.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + } + }, + "type": "object" + }, + "RegionInstanceGroupsSetNamedPortsRequest": { + "id": "RegionInstanceGroupsSetNamedPortsRequest", + "properties": { + "fingerprint": { + "description": "The fingerprint of the named ports information for this instance group. Use this optional property to prevent conflicts when multiple users change the named ports settings concurrently. Obtain the fingerprint with the instanceGroups.get method. Then, include the fingerprint in your request to ensure that you do not overwrite changes that were applied from another concurrent request.", + "format": "byte", + "type": "string" + }, + "namedPorts": { + "description": "The list of named ports to set for this instance group.", + "items": { + "$ref": "NamedPort" + }, + "type": "array" + } + }, + "type": "object" + }, + "RegionList": { + "description": "Contains a list of region resources.", + "id": "RegionList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "description": "A list of Region resources.", + "items": { + "$ref": "Region" + }, + "type": "array" + }, + "kind": { + "default": "compute#regionList", + "description": "[Output Only] Type of resource. Always compute#regionList for lists of regions.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "RegionSetLabelsRequest": { + "id": "RegionSetLabelsRequest", + "properties": { + "labelFingerprint": { + "description": "The fingerprint of the previous set of labels for this resource, used to detect conflicts. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels. Make a get() request to the resource to get the latest fingerprint.", + "format": "byte", + "type": "string" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "The labels to set for this resource.", + "type": "object" + } + }, + "type": "object" + }, + "RegionSetPolicyRequest": { + "id": "RegionSetPolicyRequest", + "properties": { + "bindings": { + "description": "Flatten Policy to create a backwacd compatible wire-format. Deprecated. Use 'policy' to specify bindings.", + "items": { + "$ref": "Binding" + }, + "type": "array" + }, + "etag": { + "description": "Flatten Policy to create a backward compatible wire-format. Deprecated. Use 'policy' to specify the etag.", + "format": "byte", + "type": "string" + }, + "policy": { + "$ref": "Policy", + "description": "REQUIRED: The complete policy to be applied to the 'resource'. The size of the policy is limited to a few 10s of KB. An empty policy is in general a valid policy but certain services (like Projects) might reject them." + } + }, + "type": "object" + }, + "RegionTargetHttpsProxiesSetSslCertificatesRequest": { + "id": "RegionTargetHttpsProxiesSetSslCertificatesRequest", + "properties": { + "sslCertificates": { + "description": "New set of SslCertificate resources to associate with this TargetHttpsProxy resource. Currently exactly one SslCertificate resource must be specified.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "RegionUrlMapsValidateRequest": { + "id": "RegionUrlMapsValidateRequest", + "properties": { + "resource": { + "$ref": "UrlMap", + "description": "Content of the UrlMap to be validated." + } + }, + "type": "object" + }, + "RequestMirrorPolicy": { + "description": "A policy that specifies how requests intended for the route's backends are shadowed to a separate mirrored backend service. Loadbalancer does not wait for responses from the shadow service. Prior to sending traffic to the shadow service, the host / authority header is suffixed with -shadow.", + "id": "RequestMirrorPolicy", + "properties": { + "backendService": { + "description": "The full or partial URL to the BackendService resource being mirrored to.", + "type": "string" + } + }, + "type": "object" + }, + "Reservation": { + "description": "Reservation resource", + "id": "Reservation", + "properties": { + "commitment": { + "description": "[OutputOnly] Full or partial url for parent commitment for reservations which are tied to a commitment.", + "type": "string" + }, + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", + "type": "string" + }, + "kind": { + "default": "compute#reservation", + "description": "[Output Only] Type of the resource. Always compute#reservations for reservations.", + "type": "string" + }, + "name": { + "annotations": { + "required": [ + "compute.instances.insert" + ] + }, + "description": "The name of the resource, provided by the client when initially creating the resource. The resource name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined fully-qualified URL for this resource.", + "type": "string" + }, + "selfLinkWithId": { + "description": "[Output Only] Server-defined URL for this resource with the resource id.", + "type": "string" + }, + "specificReservation": { + "$ref": "AllocationSpecificSKUReservation", + "description": "Reservation for instances with specific machine shapes." + }, + "specificReservationRequired": { + "description": "Indicates whether the reservation can be consumed by VMs with \"any reservation\" defined. If the field is set, then only VMs that target the reservation by name using --reservation-affinity can consume this reservation.", + "type": "boolean" + }, + "zone": { + "description": "Zone in which the reservation resides, must be provided if reservation is created with commitment creation.", + "type": "string" + } + }, + "type": "object" + }, + "ReservationAffinity": { + "description": "AllocationAffinity is the configuration of desired allocation which this instance could take capacity from.", + "id": "ReservationAffinity", + "properties": { + "consumeReservationType": { + "description": "Specifies the type of reservation from which this instance can consume resources: ANY_RESERVATION (default), SPECIFIC_RESERVATION, or NO_RESERVATION. See Consuming reserved instances for examples.", + "enum": [ + "ANY_RESERVATION", + "NO_RESERVATION", + "SPECIFIC_RESERVATION", + "UNSPECIFIED" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ], + "type": "string" + }, + "key": { + "description": "Corresponds to the label key of reservation resource.", + "type": "string" + }, + "values": { + "description": "Corresponds to the label values of reservation resource.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "ReservationAggregatedList": { + "description": "Contains a list of reservations.", + "id": "ReservationAggregatedList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "additionalProperties": { + "$ref": "ReservationsScopedList", + "description": "Name of the scope containing this set of reservations." + }, + "description": "A list of Allocation resources.", + "type": "object" + }, + "kind": { + "default": "compute#reservationAggregatedList", + "description": "Type of resource.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "ReservationList": { + "id": "ReservationList", "properties": { "id": { - "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", "type": "string" }, "items": { - "description": "A list of Region resources.", + "description": "[Output Only] A list of Allocation resources.", "items": { - "$ref": "Region" + "$ref": "Reservation" }, "type": "array" }, "kind": { - "default": "compute#regionList", - "description": "[Output Only] Type of resource. Always compute#regionList for lists of regions.", + "default": "compute#reservationList", + "description": "[Output Only] Type of resource.Always compute#reservationsList for listsof reservations", "type": "string" }, "nextPageToken": { @@ -43165,65 +47248,107 @@ }, "type": "object" }, - "RegionSetLabelsRequest": { - "id": "RegionSetLabelsRequest", + "ReservationsResizeRequest": { + "id": "ReservationsResizeRequest", "properties": { - "labelFingerprint": { - "description": "The fingerprint of the previous set of labels for this resource, used to detect conflicts. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels. Make a get() request to the resource to get the latest fingerprint.", - "format": "byte", + "specificSkuCount": { + "description": "Number of allocated resources can be resized with minimum = 1 and maximum = 1000.", + "format": "int64", "type": "string" - }, - "labels": { - "additionalProperties": { - "type": "string" - }, - "description": "The labels to set for this resource.", - "type": "object" } }, "type": "object" }, - "RegionSetPolicyRequest": { - "id": "RegionSetPolicyRequest", + "ReservationsScopedList": { + "id": "ReservationsScopedList", "properties": { - "bindings": { - "description": "Flatten Policy to create a backwacd compatible wire-format. Deprecated. Use 'policy' to specify bindings.", + "reservations": { + "description": "A list of reservations contained in this scope.", "items": { - "$ref": "Binding" + "$ref": "Reservation" }, "type": "array" }, - "etag": { - "description": "Flatten Policy to create a backward compatible wire-format. Deprecated. Use 'policy' to specify the etag.", - "format": "byte", - "type": "string" - }, - "policy": { - "$ref": "Policy", - "description": "REQUIRED: The complete policy to be applied to the 'resource'. The size of the policy is limited to a few 10s of KB. An empty policy is in general a valid policy but certain services (like Projects) might reject them." - } - }, - "type": "object" - }, - "RegionTargetHttpsProxiesSetSslCertificatesRequest": { - "id": "RegionTargetHttpsProxiesSetSslCertificatesRequest", - "properties": { - "sslCertificates": { - "description": "New set of SslCertificate resources to associate with this TargetHttpsProxy resource. Currently exactly one SslCertificate resource must be specified.", - "items": { - "type": "string" + "warning": { + "description": "Informational warning which replaces the list of reservations when the list is empty.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } }, - "type": "array" - } - }, - "type": "object" - }, - "RegionUrlMapsValidateRequest": { - "id": "RegionUrlMapsValidateRequest", - "properties": { - "resource": { - "$ref": "UrlMap", - "description": "Content of the UrlMap to be validated." + "type": "object" } }, "type": "object" @@ -43369,10 +47494,6 @@ "ResourcePolicy": { "id": "ResourcePolicy", "properties": { - "backupSchedulePolicy": { - "$ref": "ResourcePolicyBackupSchedulePolicy", - "description": "Resource policy for persistent disks for creating snapshots." - }, "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" @@ -43380,6 +47501,10 @@ "description": { "type": "string" }, + "groupPlacementPolicy": { + "$ref": "ResourcePolicyGroupPlacementPolicy", + "description": "Resource policy for instacnes for placement configuration." + }, "id": { "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", "format": "uint64", @@ -43407,6 +47532,30 @@ "description": "[Output Only] Server-defined fully-qualified URL for this resource.", "type": "string" }, + "selfLinkWithId": { + "description": "[Output Only] Server-defined URL for this resource with the resource id.", + "type": "string" + }, + "snapshotSchedulePolicy": { + "$ref": "ResourcePolicySnapshotSchedulePolicy", + "description": "Resource policy for persistent disks for creating snapshots." + }, + "status": { + "description": "[Output Only] The status of resource policy creation.", + "enum": [ + "CREATING", + "DELETING", + "INVALID", + "READY" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ], + "type": "string" + }, "vmMaintenancePolicy": { "$ref": "ResourcePolicyVmMaintenancePolicy", "description": "Resource policy applicable to VMs for infrastructure maintenance." @@ -43530,40 +47679,36 @@ }, "type": "object" }, - "ResourcePolicyBackupSchedulePolicy": { - "description": "A backup schedule policy specifies when and how frequently snapshots are to be created for the target disk. Also specifies how many and how long these scheduled snapshots should be retained.", - "id": "ResourcePolicyBackupSchedulePolicy", + "ResourcePolicyDailyCycle": { + "description": "Time window specified for daily operations.", + "id": "ResourcePolicyDailyCycle", "properties": { - "retentionPolicy": { - "$ref": "ResourcePolicyBackupSchedulePolicyRetentionPolicy", - "description": "Retention policy applied to snapshots created by this resource policy." + "daysInCycle": { + "description": "Defines a schedule that runs every nth day of the month.", + "format": "int32", + "type": "integer" }, - "schedule": { - "$ref": "ResourcePolicyBackupSchedulePolicySchedule", - "description": "A Vm Maintenance Policy specifies what kind of infrastructure maintenance we are allowed to perform on this VM and when. Schedule that is applied to disks covered by this policy." + "duration": { + "description": "[Output only] A predetermined duration for the window, automatically chosen to be the smallest possible in the given scenario.", + "type": "string" }, - "snapshotProperties": { - "$ref": "ResourcePolicyBackupSchedulePolicySnapshotProperties", - "description": "Properties with which snapshots are created such as labels, encryption keys." + "startTime": { + "description": "Start time of the window. This must be in UTC format that resolves to one of 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For example, both 13:00-5 and 08:00 are valid.", + "type": "string" } }, "type": "object" }, - "ResourcePolicyBackupSchedulePolicyRetentionPolicy": { - "description": "Policy for retention of scheduled snapshots.", - "id": "ResourcePolicyBackupSchedulePolicyRetentionPolicy", + "ResourcePolicyGroupPlacementPolicy": { + "description": "A GroupPlacementPolicy specifies resource placement configuration. It specifies the failure bucket separation as well as network locality", + "id": "ResourcePolicyGroupPlacementPolicy", "properties": { - "maxRetentionDays": { - "description": "Maximum age of the snapshot that is allowed to be kept.", - "format": "int32", - "type": "integer" - }, - "onPolicySwitch": { - "description": "Specifies the behavior to apply to existing, scheduled snapshots snapshots if the policy is changed.", + "locality": { + "description": "Specifies network locality", "enum": [ - "DO_NOT_RETROACTIVELY_APPLY", - "RETROACTIVELY_APPLY", - "UNSPECIFIED_ON_POLICY_SWITCH" + "BEST_EFFORT", + "STRICT", + "UNSPECIFIED_LOCALITY" ], "enumDescriptions": [ "", @@ -43572,12 +47717,12 @@ ], "type": "string" }, - "onSourceDiskDelete": { - "description": "Specifies the behavior to apply to scheduled snapshots when the source disk is deleted.", + "style": { + "description": "Specifies instances to hosts placement relationship", "enum": [ - "APPLY_RETENTION_POLICY", - "KEEP_AUTO_SNAPSHOTS", - "UNSPECIFIED_ON_SOURCE_DISK_DELETE" + "COMPACT", + "FULLY_SPREAD", + "UNSPECIFIED_PLACEMENT_TYPE" ], "enumDescriptions": [ "", @@ -43585,67 +47730,11 @@ "" ], "type": "string" - } - }, - "type": "object" - }, - "ResourcePolicyBackupSchedulePolicySchedule": { - "description": "A schedule for disks where the schedueled operations are performed.", - "id": "ResourcePolicyBackupSchedulePolicySchedule", - "properties": { - "dailySchedule": { - "$ref": "ResourcePolicyDailyCycle" - }, - "hourlySchedule": { - "$ref": "ResourcePolicyHourlyCycle" }, - "weeklySchedule": { - "$ref": "ResourcePolicyWeeklyCycle" - } - }, - "type": "object" - }, - "ResourcePolicyBackupSchedulePolicySnapshotProperties": { - "description": "Specified snapshot properties for scheduled snapshots created by this policy.", - "id": "ResourcePolicyBackupSchedulePolicySnapshotProperties", - "properties": { - "guestFlush": { - "description": "Indication to perform a ?guest aware? snapshot.", - "type": "boolean" - }, - "labels": { - "additionalProperties": { - "type": "string" - }, - "description": "Labels to apply to scheduled snapshots. These can be later modified by the setLabels method. Label values may be empty.", - "type": "object" - }, - "storageLocations": { - "description": "GCS bucket storage location of the auto snapshot (regional or multi-regional).", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, - "ResourcePolicyDailyCycle": { - "description": "Time window specified for daily operations.", - "id": "ResourcePolicyDailyCycle", - "properties": { - "daysInCycle": { - "description": "Allows to define schedule that runs every nth day of the month.", + "vmCount": { + "description": "Number of vms in this placement group", "format": "int32", "type": "integer" - }, - "duration": { - "description": "[Output only] Duration of the time window, automatically chosen to be smallest possible in the given scenario.", - "type": "string" - }, - "startTime": { - "description": "Time within the window to start the operations. It must be in format \"HH:MM\", where HH : [00-23] and MM : [00-00] GMT.", - "type": "string" } }, "type": "object" @@ -43784,6 +47873,106 @@ }, "type": "object" }, + "ResourcePolicySnapshotSchedulePolicy": { + "description": "A snapshot schedule policy specifies when and how frequently snapshots are to be created for the target disk. Also specifies how many and how long these scheduled snapshots should be retained.", + "id": "ResourcePolicySnapshotSchedulePolicy", + "properties": { + "retentionPolicy": { + "$ref": "ResourcePolicySnapshotSchedulePolicyRetentionPolicy", + "description": "Retention policy applied to snapshots created by this resource policy." + }, + "schedule": { + "$ref": "ResourcePolicySnapshotSchedulePolicySchedule", + "description": "A Vm Maintenance Policy specifies what kind of infrastructure maintenance we are allowed to perform on this VM and when. Schedule that is applied to disks covered by this policy." + }, + "snapshotProperties": { + "$ref": "ResourcePolicySnapshotSchedulePolicySnapshotProperties", + "description": "Properties with which snapshots are created such as labels, encryption keys." + } + }, + "type": "object" + }, + "ResourcePolicySnapshotSchedulePolicyRetentionPolicy": { + "description": "Policy for retention of scheduled snapshots.", + "id": "ResourcePolicySnapshotSchedulePolicyRetentionPolicy", + "properties": { + "maxRetentionDays": { + "description": "Maximum age of the snapshot that is allowed to be kept.", + "format": "int32", + "type": "integer" + }, + "onPolicySwitch": { + "description": "Specifies the behavior to apply to existing, scheduled snapshots snapshots if the policy is changed.", + "enum": [ + "DO_NOT_RETROACTIVELY_APPLY", + "RETROACTIVELY_APPLY", + "UNSPECIFIED_ON_POLICY_SWITCH" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + }, + "onSourceDiskDelete": { + "description": "Specifies the behavior to apply to scheduled snapshots when the source disk is deleted.", + "enum": [ + "APPLY_RETENTION_POLICY", + "KEEP_AUTO_SNAPSHOTS", + "UNSPECIFIED_ON_SOURCE_DISK_DELETE" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "ResourcePolicySnapshotSchedulePolicySchedule": { + "description": "A schedule for disks where the schedueled operations are performed.", + "id": "ResourcePolicySnapshotSchedulePolicySchedule", + "properties": { + "dailySchedule": { + "$ref": "ResourcePolicyDailyCycle" + }, + "hourlySchedule": { + "$ref": "ResourcePolicyHourlyCycle" + }, + "weeklySchedule": { + "$ref": "ResourcePolicyWeeklyCycle" + } + }, + "type": "object" + }, + "ResourcePolicySnapshotSchedulePolicySnapshotProperties": { + "description": "Specified snapshot properties for scheduled snapshots created by this policy.", + "id": "ResourcePolicySnapshotSchedulePolicySnapshotProperties", + "properties": { + "guestFlush": { + "description": "Indication to perform a ?guest aware? snapshot.", + "type": "boolean" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Labels to apply to scheduled snapshots. These can be later modified by the setLabels method. Label values may be empty.", + "type": "object" + }, + "storageLocations": { + "description": "GCS bucket storage location of the auto snapshot (regional or multi-regional).", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, "ResourcePolicyVmMaintenancePolicy": { "id": "ResourcePolicyVmMaintenancePolicy", "properties": { @@ -43918,6 +48107,10 @@ "description": "The URL to an instance that should handle matching packets. You can specify this as a full or partial URL. For example:\nhttps://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/", "type": "string" }, + "nextHopInterconnectAttachment": { + "description": "[Output Only] The URL to an InterconnectAttachment which is the next hop for the route. This field will only be populated for the dynamic routes generated by Cloud Router with a linked interconnectAttachment.", + "type": "string" + }, "nextHopIp": { "description": "The network IP address of an instance that should handle matching packets. Only IPv4 is supported.", "type": "string" @@ -43948,6 +48141,10 @@ "description": "[Output Only] Server-defined fully-qualified URL for this resource.", "type": "string" }, + "selfLinkWithId": { + "description": "[Output Only] Server-defined URL for this resource with the resource id.", + "type": "string" + }, "tags": { "annotations": { "required": [ @@ -44168,7 +48365,7 @@ "description": "BGP information specific to this router." }, "bgpPeers": { - "description": "BGP information that needs to be configured into the routing stack to establish the BGP peering. It must specify peer ASN and either interface name, IP, or peer IP. Please refer to RFC4273.", + "description": "BGP information that must be configured into the routing stack to establish BGP peering. This information must specify the peer ASN and either the interface name, IP address, or peer IP address. Please refer to RFC4273.", "items": { "$ref": "RouterBgpPeer" }, @@ -44188,7 +48385,7 @@ "type": "string" }, "interfaces": { - "description": "Router interfaces. Each interface requires either one linked resource (e.g. linkedVpnTunnel), or IP address and IP address range (e.g. ipRange), or both.", + "description": "Router interfaces. Each interface requires either one linked resource, (for example, linkedVpnTunnel), or IP address and IP address range (for example, ipRange), or both.", "items": { "$ref": "RouterInterface" }, @@ -44210,7 +48407,7 @@ "type": "string" }, "nats": { - "description": "A list of Nat services created in this router.", + "description": "A list of NAT services created in this router.", "items": { "$ref": "RouterNat" }, @@ -44232,6 +48429,10 @@ "selfLink": { "description": "[Output Only] Server-defined URL for the resource.", "type": "string" + }, + "selfLinkWithId": { + "description": "[Output Only] Server-defined URL for this resource with the resource id.", + "type": "string" } }, "type": "object" @@ -44368,7 +48569,7 @@ "id": "RouterBgp", "properties": { "advertiseMode": { - "description": "User-specified flag to indicate which mode to use for advertisement.", + "description": "User-specified flag to indicate which mode to use for advertisement. The options are DEFAULT or CUSTOM.", "enum": [ "CUSTOM", "DEFAULT" @@ -44407,6 +48608,11 @@ "description": "Local BGP Autonomous System Number (ASN). Must be an RFC6996 private ASN, either 16-bit or 32-bit. The value will be fixed for this router resource. All VPN tunnels that link to this router will have the same local ASN.", "format": "uint32", "type": "integer" + }, + "keepaliveInterval": { + "description": "The interval in seconds between BGP keepalive messages that are sent to the peer. Hold time is three times the interval at which keepalive messages are sent, and the hold time is the maximum number of seconds allowed to elapse between successive keepalive messages that BGP receives from a peer. BGP will use the smaller of either the local hold time value or the peer?s hold time value as the hold time for the BGP connection between the two peers. If set, this value must be between 1 and 120. The default is 20.", + "format": "uint32", + "type": "integer" } }, "type": "object" @@ -44427,7 +48633,7 @@ "type": "string" }, "advertisedGroups": { - "description": "User-specified list of prefix groups to advertise in custom mode. This field can only be populated if advertise_mode is CUSTOM and overrides the list defined for the router (in Bgp message). These groups will be advertised in addition to any specified prefixes. Leave this field blank to advertise no custom groups.", + "description": "User-specified list of prefix groups to advertise in custom mode, which can take one of the following options: \n- ALL_SUBNETS: Advertises all available subnets, including peer VPC subnets. \n- ALL_VPC_SUBNETS: Advertises the router's own VPC subnets. \n- ALL_PEER_VPC_SUBNETS: Advertises peer subnets of the router's VPC network. Note that this field can only be populated if advertise_mode is CUSTOM and overrides the list defined for the router (in the \"bgp\" message). These groups are advertised in addition to any specified prefixes. Leave this field blank to advertise no custom groups.", "items": { "enum": [ "ALL_PEER_VPC_SUBNETS", @@ -44444,17 +48650,33 @@ "type": "array" }, "advertisedIpRanges": { - "description": "User-specified list of individual IP ranges to advertise in custom mode. This field can only be populated if advertise_mode is CUSTOM and overrides the list defined for the router (in Bgp message). These IP ranges will be advertised in addition to any specified groups. Leave this field blank to advertise no custom IP ranges.", + "description": "User-specified list of individual IP ranges to advertise in custom mode. This field can only be populated if advertise_mode is CUSTOM and overrides the list defined for the router (in the \"bgp\" message). These IP ranges are advertised in addition to any specified groups. Leave this field blank to advertise no custom IP ranges.", "items": { "$ref": "RouterAdvertisedIpRange" }, "type": "array" }, "advertisedRoutePriority": { - "description": "The priority of routes advertised to this BGP peer. In the case where there is more than one matching route of maximum length, the routes with lowest priority value win.", + "description": "The priority of routes advertised to this BGP peer. Where there is more than one matching route of maximum length, the routes with the lowest priority value win.", "format": "uint32", "type": "integer" }, + "bfd": { + "$ref": "RouterBgpPeerBfd", + "description": "BFD configuration for the BGP peering." + }, + "enable": { + "description": "The status of the BGP peer connection. If set to FALSE, any active session with the peer is terminated and all associated routing information is removed. If set to TRUE, the peer connection can be established with routing information. The default is TRUE.", + "enum": [ + "FALSE", + "TRUE" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, "interfaceName": { "description": "Name of the interface the BGP peer is associated with.", "type": "string" @@ -44464,7 +48686,7 @@ "type": "string" }, "managementType": { - "description": "[Output Only] The resource that configures and manages this BGP peer. MANAGED_BY_USER is the default value and can be managed by you or other users; MANAGED_BY_ATTACHMENT is a BGP peer that is configured and managed by Cloud Interconnect, specifically by an InterconnectAttachment of type PARTNER. Google will automatically create, update, and delete this type of BGP peer when the PARTNER InterconnectAttachment is created, updated, or deleted.", + "description": "[Output Only] The resource that configures and manages this BGP peer. \n- MANAGED_BY_USER is the default value and can be managed by you or other users \n- MANAGED_BY_ATTACHMENT is a BGP peer that is configured and managed by Cloud Interconnect, specifically by an InterconnectAttachment of type PARTNER. Google automatically creates, updates, and deletes this type of BGP peer when the PARTNER InterconnectAttachment is created, updated, or deleted.", "enum": [ "MANAGED_BY_ATTACHMENT", "MANAGED_BY_USER" @@ -44481,34 +48703,100 @@ "type": "string" }, "peerAsn": { - "description": "Peer BGP Autonomous System Number (ASN). For VPN use case, this value can be different for every tunnel.", + "description": "Peer BGP Autonomous System Number (ASN). Each BGP interface may use a different value.", "format": "uint32", "type": "integer" }, "peerIpAddress": { - "description": "IP address of the BGP interface outside Google cloud. Only IPv4 is supported.", + "description": "IP address of the BGP interface outside Google Cloud Platform. Only IPv4 is supported.", "type": "string" } }, "type": "object" }, + "RouterBgpPeerBfd": { + "id": "RouterBgpPeerBfd", + "properties": { + "minReceiveInterval": { + "description": "The minimum interval, in milliseconds, between BFD packets received from the peer router. The actual value is negotiated between the two routers and is equal to the greater of this value and the transmit interval of the other router. If BFD echo mode is enabled on this router and the peer router, this value is used to negotiate the interval between BFD echo packets transmitted by the peer router. Otherwise, it will be used to determine the interval between BFD control packets. If set, this value must be between 33 and 30000. The default is 300.", + "format": "uint32", + "type": "integer" + }, + "minTransmitInterval": { + "description": "The minimum interval, in milliseconds, between BFD packets transmitted to the peer router. The actual value is negotiated between the two routers and is equal to the greater of this value and the corresponding receive interval of the other router. If BFD echo mode is enabled on this router and the peer router, this value is used to negotiate the interval between BFD echo packets transmitted by this router. Otherwise, it will be used to determine the interval between BFD control packets. If set, this value must be between 33 and 30000. The default is 300.", + "format": "uint32", + "type": "integer" + }, + "mode": { + "description": "The BFD session initialization mode for this BGP peer. If set to ACTIVE, the Cloud Router will initiate the BFD session for this BGP peer. If set to PASSIVE, the Cloud Router will wait for the peer router to initiate the BFD session for this BGP peer. If set to DISABLED, BFD is disabled for this BGP peer. The default is PASSIVE.", + "enum": [ + "ACTIVE", + "DISABLED", + "PASSIVE" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + }, + "multiplier": { + "description": "The number of consecutive BFD packets that must be missed before BFD declares that a peer is unavailable. If set, the value must be a value between 2 and 16. The default is 3.", + "format": "uint32", + "type": "integer" + }, + "packetMode": { + "description": "The BFD packet mode for this BGP peer. If set to CONTROL_AND_ECHO, BFD echo mode is enabled for this BGP peer. In this mode, if the peer router also has BFD echo mode enabled, BFD echo packets will be sent to the other router. If the peer router does not have BFD echo mode enabled, only control packets will be sent. If set to CONTROL_ONLY, BFD echo mode is disabled for this BGP peer. If this router and the peer router have a multihop connection, this should be set to CONTROL_ONLY as BFD echo mode is only supported on singlehop connections. The default is CONTROL_AND_ECHO.", + "enum": [ + "CONTROL_AND_ECHO", + "CONTROL_ONLY" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "sessionInitializationMode": { + "description": "The BFD session initialization mode for this BGP peer. If set to ACTIVE, the Cloud Router will initiate the BFD session for this BGP peer. If set to PASSIVE, the Cloud Router will wait for the peer router to initiate the BFD session for this BGP peer. If set to DISABLED, BFD is disabled for this BGP peer. The default is PASSIVE.", + "enum": [ + "ACTIVE", + "DISABLED", + "PASSIVE" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + }, + "slowTimerInterval": { + "description": "The minimum interval, in milliseconds, between BFD control packets transmitted to and received from the peer router when BFD echo mode is enabled on both routers. The actual transmit and receive intervals are negotiated between the two routers and are equal to the greater of this value and the corresponding interval on the other router. If set, this value must be between 1000 and 30000. The default is 5000.", + "format": "uint32", + "type": "integer" + } + }, + "type": "object" + }, "RouterInterface": { "id": "RouterInterface", "properties": { "ipRange": { - "description": "IP address and range of the interface. The IP range must be in the RFC3927 link-local IP space. The value must be a CIDR-formatted string, for example: 169.254.0.1/30. NOTE: Do not truncate the address as it represents the IP address of the interface.", + "description": "IP address and range of the interface. The IP range must be in the RFC3927 link-local IP address space. The value must be a CIDR-formatted string, for example: 169.254.0.1/30. NOTE: Do not truncate the address as it represents the IP address of the interface.", "type": "string" }, "linkedInterconnectAttachment": { - "description": "URI of the linked interconnect attachment. It must be in the same region as the router. Each interface can have at most one linked resource and it could either be a VPN Tunnel or an interconnect attachment.", + "description": "URI of the linked Interconnect attachment. It must be in the same region as the router. Each interface can have one linked resource, which can be either be a VPN tunnel or an Interconnect attachment.", "type": "string" }, "linkedVpnTunnel": { - "description": "URI of the linked VPN tunnel. It must be in the same region as the router. Each interface can have at most one linked resource and it could either be a VPN Tunnel or an interconnect attachment.", + "description": "URI of the linked VPN tunnel, which must be in the same region as the router. Each interface can have one linked resource, which can be either a VPN tunnel or an Interconnect attachment.", "type": "string" }, "managementType": { - "description": "[Output Only] The resource that configures and manages this interface. MANAGED_BY_USER is the default value and can be managed by you or other users; MANAGED_BY_ATTACHMENT is an interface that is configured and managed by Cloud Interconnect, specifically by an InterconnectAttachment of type PARTNER. Google will automatically create, update, and delete this type of interface when the PARTNER InterconnectAttachment is created, updated, or deleted.", + "description": "[Output Only] The resource that configures and manages this interface. \n- MANAGED_BY_USER is the default value and can be managed directly by users. \n- MANAGED_BY_ATTACHMENT is an interface that is configured and managed by Cloud Interconnect, specifically, by an InterconnectAttachment of type PARTNER. Google automatically creates, updates, and deletes this type of interface when the PARTNER InterconnectAttachment is created, updated, or deleted.", "enum": [ "MANAGED_BY_ATTACHMENT", "MANAGED_BY_USER" @@ -44643,6 +48931,13 @@ "description": "Represents a Nat resource. It enables the VMs within the specified subnetworks to access Internet without external IP addresses. It specifies a list of subnetworks (and the ranges within) that want to use NAT. Customers can also provide the external IPs that would be used for NAT. GCP would auto-allocate ephemeral IPs if no external IPs are provided.", "id": "RouterNat", "properties": { + "drainNatIps": { + "description": "A list of URLs of the IP resources to be drained. These IPs must be valid static external IPs that have been assigned to the NAT. These IPs should be used for updating/patching a NAT only.", + "items": { + "type": "string" + }, + "type": "array" + }, "icmpIdleTimeoutSec": { "description": "Timeout (in seconds) for ICMP connections. Defaults to 30s if not set.", "format": "int32", @@ -44653,7 +48948,7 @@ "description": "Configure logging on this NAT." }, "minPortsPerVm": { - "description": "Minimum number of ports allocated to a VM from this NAT config. If not set, a default number of ports is allocated to a VM. This gets rounded up to the nearest power of 2. Eg. if the value of this field is 50, at least 64 ports will be allocated to a VM.", + "description": "Minimum number of ports allocated to a VM from this NAT config. If not set, a default number of ports is allocated to a VM. This is rounded up to the nearest power of 2. For example, if the value of this field is 50, at least 64 ports are allocated to a VM.", "format": "int32", "type": "integer" }, @@ -44663,7 +48958,7 @@ "type": "string" }, "natIpAllocateOption": { - "description": "Specify the NatIpAllocateOption. If it is AUTO_ONLY, then nat_ip should be empty.", + "description": "Specify the NatIpAllocateOption, which can take one of the following values: \n- MANUAL_ONLY: Uses only Nat IP addresses provided by customers. When there are not enough specified Nat IPs, the Nat service fails for new VMs. \n- AUTO_ONLY: Nat IPs are allocated by Google Cloud Platform; customers can't specify any Nat IPs. When choosing AUTO_ONLY, then nat_ip should be empty.", "enum": [ "AUTO_ONLY", "MANUAL_ONLY" @@ -44675,14 +48970,14 @@ "type": "string" }, "natIps": { - "description": "A list of URLs of the IP resources used for this Nat service. These IPs must be valid static external IP addresses assigned to the project. max_length is subject to change post alpha.", + "description": "A list of URLs of the IP resources used for this Nat service. These IP addresses must be valid static external IP addresses assigned to the project.", "items": { "type": "string" }, "type": "array" }, "sourceSubnetworkIpRangesToNat": { - "description": "Specify the Nat option. If this field contains ALL_SUBNETWORKS_ALL_IP_RANGES or ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES, then there should not be any other Router.Nat section in any Router for this network in this region.", + "description": "Specify the Nat option, which can take one of the following values: \n- ALL_SUBNETWORKS_ALL_IP_RANGES: All of the IP ranges in every Subnetwork are allowed to Nat. \n- ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES: All of the primary IP ranges in every Subnetwork are allowed to Nat. \n- LIST_OF_SUBNETWORKS: A list of Subnetworks are allowed to Nat (specified in the field subnetwork below) The default is SUBNETWORK_IP_RANGE_TO_NAT_OPTION_UNSPECIFIED. Note that if this field contains ALL_SUBNETWORKS_ALL_IP_RANGES or ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES, then there should not be any other Router.Nat section in any Router for this network in this region.", "enum": [ "ALL_SUBNETWORKS_ALL_IP_RANGES", "ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES", @@ -44724,17 +49019,19 @@ "description": "Configuration of logging on a NAT.", "id": "RouterNatLogConfig", "properties": { - "enabled": { + "enable": { "description": "Indicates whether or not to export logs. This is false by default.", "type": "boolean" }, "filter": { "description": "Specifies the desired filtering of logs on this NAT. If unspecified, logs are exported for all connections handled by this NAT.", "enum": [ + "ALL", "ERRORS_ONLY", "TRANSLATIONS_ONLY" ], "enumDescriptions": [ + "", "", "" ], @@ -44748,7 +49045,7 @@ "id": "RouterNatSubnetworkToNat", "properties": { "name": { - "description": "URL for the subnetwork resource to use NAT.", + "description": "URL for the subnetwork resource that will use NAT.", "type": "string" }, "secondaryIpRangeNames": { @@ -44759,7 +49056,7 @@ "type": "array" }, "sourceIpRangesToNat": { - "description": "Specify the options for NAT ranges in the Subnetwork. All usages of single value are valid except NAT_IP_RANGE_OPTION_UNSPECIFIED. The only valid option with multiple values is: [\"PRIMARY_IP_RANGE\", \"LIST_OF_SECONDARY_IP_RANGES\"] Default: [ALL_IP_RANGES]", + "description": "Specify the options for NAT ranges in the Subnetwork. All options of a single value are valid except NAT_IP_RANGE_OPTION_UNSPECIFIED. The only valid option with multiple values is: [\"PRIMARY_IP_RANGE\", \"LIST_OF_SECONDARY_IP_RANGES\"] Default: [ALL_IP_RANGES]", "items": { "enum": [ "ALL_IP_RANGES", @@ -44875,7 +49172,7 @@ "type": "object" }, "RouterStatusNatStatus": { - "description": "Status of a NAT contained in this router.", + "description": "Status of a NAT contained in this router. Next tag: 9", "id": "RouterStatusNatStatus", "properties": { "autoAllocatedNatIps": { @@ -44885,6 +49182,20 @@ }, "type": "array" }, + "drainAutoAllocatedNatIps": { + "description": "A list of IPs auto-allocated for NAT that are in drain mode. Example: [\"1.1.1.1\", ?179.12.26.133?].", + "items": { + "type": "string" + }, + "type": "array" + }, + "drainUserAllocatedNatIps": { + "description": "A list of IPs user-allocated for NAT that are in drain mode. Example: [\"1.1.1.1\", ?179.12.26.133?].", + "items": { + "type": "string" + }, + "type": "array" + }, "minExtraNatIpsNeeded": { "description": "The number of extra IPs to allocate. This will be greater than 0 only if user-specified IPs are NOT enough to allow all configured VMs to use NAT. This value is meaningful only when auto-allocation of NAT IPs is *not* used.", "format": "int32", @@ -45162,15 +49473,15 @@ "type": "boolean" }, "deviceName": { - "description": "Specifies a unique device name of your choice that is reflected into the /dev/disk/by-id/google-* tree of a Linux operating system running within the instance. This name can be used to reference the device for mounting, resizing, and so on, from within the instance.\n\nIf not specified, the server chooses a default device name to apply to this disk, in the form persistent-disks-x, where x is a number assigned by Google Compute Engine. This field is only applicable for persistent disks.", + "description": "Specifies the name of the disk attached to the source instance.", "type": "string" }, "diskEncryptionKey": { "$ref": "CustomerEncryptionKey", - "description": "Encrypts or decrypts a disk using a customer-supplied encryption key.\n\nIf you are creating a new disk, this field encrypts the new disk using an encryption key that you provide. If you are attaching an existing disk that is already encrypted, this field decrypts the disk using the customer-supplied encryption key.\n\nIf you encrypt a disk using a customer-supplied key, you must provide the same key again when you attempt to use this resource at a later time. For example, you must provide the key when you create a snapshot or an image from the disk or when you attach the disk to a virtual machine instance.\n\nIf you do not provide an encryption key, then the disk will be encrypted using an automatically generated key and you do not need to provide a key to use the disk later.\n\nMachine Images do not store customer-supplied encryption keys, so you cannot use your own keys to encrypt disks in a managed instance group." + "description": "The encryption key for the disk." }, "diskSizeGb": { - "description": "The size of the disk in base-2 GB. This supersedes disk_size_gb in InitializeParams.", + "description": "The size of the disk in base-2 GB.", "format": "int64", "type": "string" }, @@ -45182,16 +49493,12 @@ "type": "array" }, "index": { - "description": "[Output Only] A zero-based index to this disk, where 0 is reserved for the boot disk. If you have many disks attached to an instance, each disk would have a unique index number.", + "description": "Specifies zero-based index of the disk that is attached to the source instance.", "format": "int32", "type": "integer" }, - "initializeParams": { - "$ref": "AttachedDiskInitializeParams", - "description": "[Input Only] Specifies the parameters for a new disk that will be created alongside the new instance. Use initialization parameters to create boot disks or local SSDs attached to the new instance.\n\nThis property is mutually exclusive with the source property; you can only define one or the other, but not both." - }, "interface": { - "description": "Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI. Persistent disks must always use SCSI and the request will fail if you attempt to attach a persistent disk in any other format than SCSI. Local SSDs can use either NVME or SCSI. For performance characteristics of SCSI over NVMe, see Local SSD performance.", + "description": "Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME.", "enum": [ "NVDIMM", "NVME", @@ -45217,7 +49524,7 @@ "type": "array" }, "mode": { - "description": "The mode in which to attach this disk, either READ_WRITE or READ_ONLY. If not specified, the default is to attach the disk in READ_WRITE mode.", + "description": "The mode in which this disk is attached to the source instance, either READ_WRITE or READ_ONLY.", "enum": [ "READ_ONLY", "READ_WRITE" @@ -45228,24 +49535,12 @@ ], "type": "string" }, - "savedState": { - "description": "For LocalSSD disks on VM Instances in STOPPED or SUSPENDED state, this field is set to PRESERVED if the LocalSSD data has been saved to a persistent location by customer request. (see the discard_local_ssd option on Stop/Suspend). Read-only in the api.", - "enum": [ - "DISK_SAVED_STATE_UNSPECIFIED", - "PRESERVED" - ], - "enumDescriptions": [ - "", - "" - ], - "type": "string" - }, "source": { - "description": "Specifies a valid partial or full URL to an existing Persistent Disk resource. When creating a new instance, one of initializeParams.sourceImage or disks.source is required except for local SSD.\n\nIf desired, you can also attach existing non-root persistent disks using this property. This field is only applicable for persistent disks.\n\nNote that for sourceMachineImage, specify the disk name, not the URL for the disk.", + "description": "Specifies a URL of the disk attached to the source instance.", "type": "string" }, "storageBytes": { - "description": "[Output Only] A size of the storage used by the disk's snapshot.", + "description": "[Output Only] A size of the storage used by the disk's snapshot by this machine image.", "format": "int64", "type": "string" }, @@ -45262,7 +49557,7 @@ "type": "string" }, "type": { - "description": "Specifies the type of the disk, either SCRATCH or PERSISTENT. If not specified, the default is PERSISTENT.", + "description": "Specifies the type of the attached disk, either SCRATCH or PERSISTENT.", "enum": [ "PERSISTENT", "SCRATCH" @@ -45284,6 +49579,10 @@ "description": "Specifies whether the instance should be automatically restarted if it is terminated by Compute Engine (not terminated by a user). You can only set the automatic restart option for standard instances. Preemptible instances cannot be automatically restarted.\n\nBy default, this is set to true so an instance is automatically restarted if it is terminated by Compute Engine.", "type": "boolean" }, + "latencyTolerant": { + "description": "Defines whether the instance is tolerant of higher cpu latency. This can only be set during instance creation, or when the instance is not currently running. It must not be set if the preemptible option is also set.", + "type": "boolean" + }, "minNodeCpus": { "description": "The minimum number of virtual CPUs this instance will consume when running on a sole-tenant node.", "format": "int32", @@ -45347,6 +49646,17 @@ }, "type": "object" }, + "SdsConfig": { + "description": "The configuration to access the SDS server.", + "id": "SdsConfig", + "properties": { + "grpcServiceConfig": { + "$ref": "GrpcServiceConfig", + "description": "The configuration to access the SDS server over GRPC." + } + }, + "type": "object" + }, "SecurityPoliciesListPreconfiguredExpressionSetsResponse": { "id": "SecurityPoliciesListPreconfiguredExpressionSetsResponse", "properties": { @@ -45416,6 +49726,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, + "ruleTupleCount": { + "description": "[Output Only] Total count of all security policy rule tuples. A security policy can not exceed a set number of tuples.", + "format": "int32", + "type": "integer" + }, "rules": { "description": "A list of rules that belong to this policy. There must always be a default rule (rule with priority 2147483647 and match \"*\"). If no rules are provided when creating a security policy, a default rule with action \"allow\" will be added.", "items": { @@ -45427,6 +49742,10 @@ "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, + "selfLinkWithId": { + "description": "[Output Only] Server-defined URL for this resource with the resource id.", + "type": "string" + }, "type": { "description": "The type indicates the intended use of the security policy. CLOUD_ARMOR policies apply to backend services. FIREWALL policies apply to organizations.", "enum": [ @@ -45452,6 +49771,10 @@ "name": { "description": "The name for an association.", "type": "string" + }, + "securityPolicyId": { + "description": "[Output Only] The security policy ID of the association.", + "type": "string" } }, "type": "object" @@ -45618,6 +49941,15 @@ "format": "int32", "type": "integer" }, + "rateLimitOptions": { + "$ref": "SecurityPolicyRuleRateLimitOptions", + "description": "Must be specified if the action is \"rate_based_blacklist\" or \"throttle\". Cannot be specified for any other actions." + }, + "ruleTupleCount": { + "description": "[Output Only] Calculation of the complexity of a single firewall security policy rule.", + "format": "int32", + "type": "integer" + }, "targetResources": { "description": "A list of network resource URLs to which this rule applies. This field allows you to control which network?s VMs get this rule. If this field is left blank, all VMs within the organization will receive the rule.\n\nThis field may only be specified when versioned_expr is set to FIREWALL.", "items": { @@ -45699,6 +50031,65 @@ }, "type": "object" }, + "SecurityPolicyRuleRateLimitOptions": { + "id": "SecurityPolicyRuleRateLimitOptions", + "properties": { + "blockDuration": { + "description": "Can only be specifed if the action for the rule is \"rate_based_blacklist\" If specified, determines the time (in seconds) the traffic will continue to be blocked by the rate limit after the rate falls below the threshold. The default value is 0 seconds.", + "format": "int32", + "type": "integer" + }, + "conformAction": { + "description": "Action to take when requests are under the given threshold. When requests are throttled, this is also the action for all requests which are not dropped. Valid options are \"allow\", \"fairshare\", and \"drop_overload\".", + "type": "string" + }, + "enforceOnKey": { + "description": "Determines the key to enforce the threshold_rps limit on. If key is \"IP\", each IP has this limit enforced separately, whereas \"ALL_IPs\" means a single limit is applied to all requests matching this rule.", + "enum": [ + "ALL_IPS", + "IP" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "exceedAction": { + "description": "When a request is denied, returns the HTTP response code specified. Valid options are \"deny()\" where valid values for status are 403, 404, 429, and 502.", + "type": "string" + }, + "thresholdRps": { + "description": "Rate in requests per second at which to begin ratelimiting.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "SecuritySettings": { + "description": "The authentication and authorization settings for a BackendService.", + "id": "SecuritySettings", + "properties": { + "authenticationPolicy": { + "$ref": "AuthenticationPolicy", + "description": "Authentication policy defines what authentication methods can be accepted on backends, and if authenticated, which method/certificate will set the request principal." + }, + "authorizationConfig": { + "$ref": "AuthorizationConfig", + "description": "Authorization config defines the Role Based Access Control (RBAC) config." + }, + "clientTlsSettings": { + "$ref": "ClientTlsSettings", + "description": "TLS Settings for the backend service." + }, + "serverSettingsSelector": { + "$ref": "ServerSecuritySettingsSelector", + "description": "The listener config of the XDS client is generated if the selector matches the client." + } + }, + "type": "object" + }, "SerialPortOutput": { "description": "An instance's serial console output.", "id": "SerialPortOutput", @@ -45748,6 +50139,57 @@ }, "type": "object" }, + "ServerSecuritySettingsSelector": { + "description": "A selector associated with the SecuritySettings. If the labels and port in this selector match the Envoy's label and port, the server side authentication and authorization settings are applied to the Envoy.", + "id": "ServerSecuritySettingsSelector", + "properties": { + "labelMatches": { + "description": "The labels associated with the XDS client.", + "items": { + "$ref": "MetadataFilterLabelMatch" + }, + "type": "array" + }, + "port": { + "description": "The listener port of the XDS client.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "ServerTlsSettings": { + "description": "The TLS settings for the server.", + "id": "ServerTlsSettings", + "properties": { + "proxyTlsContext": { + "$ref": "TlsContext", + "description": "Configures the mechanism to obtain security certificates and identity information." + }, + "subjectAltNames": { + "description": "A list of alternate names to verify the subject identity in the certificate presented by the client.", + "items": { + "type": "string" + }, + "type": "array" + }, + "tlsMode": { + "description": "Indicates whether connections should be secured using TLS. The value of this field determines how TLS is enforced. This field can be set to one of the following: \n- SIMPLE Secure connections with standard TLS semantics. \n- MUTUAL Secure connections to the backends using mutual TLS by presenting client certificates for authentication.", + "enum": [ + "INVALID", + "MUTUAL", + "SIMPLE" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, "ServiceAccount": { "description": "A service account.", "id": "ServiceAccount", @@ -45766,6 +50208,87 @@ }, "type": "object" }, + "ServiceAccountJwtAccessCredentials": { + "description": "JWT credentials for a service account.", + "id": "ServiceAccountJwtAccessCredentials", + "properties": { + "jsonKey": { + "description": "Service account key.", + "type": "string" + }, + "tokenLifetimeSeconds": { + "description": "The token lifetime seconds.", + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, + "ShieldedInstanceConfig": { + "description": "A set of Shielded Instance options.", + "id": "ShieldedInstanceConfig", + "properties": { + "enableIntegrityMonitoring": { + "description": "Defines whether the instance has integrity monitoring enabled.", + "type": "boolean" + }, + "enableSecureBoot": { + "description": "Defines whether the instance has Secure Boot enabled.", + "type": "boolean" + }, + "enableVtpm": { + "description": "Defines whether the instance has the vTPM enabled.", + "type": "boolean" + } + }, + "type": "object" + }, + "ShieldedInstanceIdentity": { + "description": "A shielded Instance identity entry.", + "id": "ShieldedInstanceIdentity", + "properties": { + "encryptionKey": { + "$ref": "ShieldedInstanceIdentityEntry", + "description": "An Endorsement Key (EK) issued to the Shielded Instance's vTPM." + }, + "kind": { + "default": "compute#shieldedInstanceIdentity", + "description": "[Output Only] Type of the resource. Always compute#shieldedInstanceIdentity for shielded Instance identity entry.", + "type": "string" + }, + "signingKey": { + "$ref": "ShieldedInstanceIdentityEntry", + "description": "An Attestation Key (AK) issued to the Shielded Instance's vTPM." + } + }, + "type": "object" + }, + "ShieldedInstanceIdentityEntry": { + "description": "A Shielded Instance Identity Entry.", + "id": "ShieldedInstanceIdentityEntry", + "properties": { + "ekCert": { + "description": "A PEM-encoded X.509 certificate. This field can be empty.", + "type": "string" + }, + "ekPub": { + "description": "A PEM-encoded public key.", + "type": "string" + } + }, + "type": "object" + }, + "ShieldedInstanceIntegrityPolicy": { + "description": "The policy describes the baseline against which Instance boot integrity is measured.", + "id": "ShieldedInstanceIntegrityPolicy", + "properties": { + "updateAutoLearnPolicy": { + "description": "Updates the integrity policy baseline using the measurements from the VM instance's most recent boot.", + "type": "boolean" + } + }, + "type": "object" + }, "ShieldedVmConfig": { "description": "A set of Shielded VM options.", "id": "ShieldedVmConfig", @@ -45806,7 +50329,7 @@ "type": "object" }, "ShieldedVmIdentityEntry": { - "description": "A Shielded VM Identity Entry.", + "description": "A Shielded Instance Identity Entry.", "id": "ShieldedVmIdentityEntry", "properties": { "ekCert": { @@ -45921,9 +50444,13 @@ "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, + "selfLinkWithId": { + "description": "[Output Only] Server-defined URL for this resource's resource id.", + "type": "string" + }, "snapshotEncryptionKey": { "$ref": "CustomerEncryptionKey", - "description": "Encrypts the snapshot using a customer-supplied encryption key.\n\nAfter you encrypt a snapshot using a customer-supplied key, you must provide the same key if you use the image later For example, you must provide the encryption key when you create a disk from the encrypted snapshot in a future request.\n\nCustomer-supplied encryption keys do not protect access to metadata of the disk.\n\nIf you do not provide an encryption key when creating the snapshot, then the snapshot will be encrypted using an automatically generated key and you do not need to provide a key to use the snapshot later." + "description": "Encrypts the snapshot using a customer-supplied encryption key.\n\nAfter you encrypt a snapshot using a customer-supplied key, you must provide the same key if you use the snapshot later. For example, you must provide the encryption key when you create a disk from the encrypted snapshot in a future request.\n\nCustomer-supplied encryption keys do not protect access to metadata of the snapshot.\n\nIf you do not provide an encryption key when creating the snapshot, then the snapshot will be encrypted using an automatically generated key and you do not need to provide a key to use the snapshot later." }, "sourceDisk": { "description": "[Output Only] The source disk used to create this snapshot.", @@ -46117,7 +50644,7 @@ "type": "boolean" }, "deletionProtection": { - "description": "Whether the instance created from the machine image should be protected against deletion.", + "description": "Whether the instance created from this machine image should be protected against deletion.", "type": "boolean" }, "description": { @@ -46132,7 +50659,7 @@ "type": "array" }, "guestAccelerators": { - "description": "A list of guest accelerator cards' type and count to use for instances created from the machine image.", + "description": "A list of guest accelerator cards' type and count to use for instances created from this machine image.", "items": { "$ref": "AcceleratorConfig" }, @@ -46146,11 +50673,6 @@ "type": "object" }, "machineType": { - "annotations": { - "required": [ - "compute.machineImages.insert" - ] - }, "description": "The machine type to use for instances that are created from this machine image.", "type": "string" }, @@ -46159,7 +50681,7 @@ "description": "The metadata key/value pairs to assign to instances that are created from this machine image. These pairs can consist of custom metadata or predefined keys. See Project and instance metadata for more information." }, "minCpuPlatform": { - "description": "Minimum cpu/platform to be used by this instance. The instance may be scheduled on the specified or newer cpu/platform. Applicable values are the friendly names of CPU platforms, such as minCpuPlatform: \"Intel Haswell\" or minCpuPlatform: \"Intel Sandy Bridge\". For more information, read Specifying a Minimum CPU Platform.", + "description": "Minimum cpu/platform to be used by instances created from this machine image. The instance may be scheduled on the specified or newer cpu/platform. Applicable values are the friendly names of CPU platforms, such as minCpuPlatform: \"Intel Haswell\" or minCpuPlatform: \"Intel Sandy Bridge\". For more information, read Specifying a Minimum CPU Platform.", "type": "string" }, "networkInterfaces": { @@ -46238,6 +50760,10 @@ "description": "[Output only] Server-defined URL for the resource.", "type": "string" }, + "selfLinkWithId": { + "description": "[Output Only] Server-defined URL for this resource with the resource id.", + "type": "string" + }, "selfManaged": { "$ref": "SslCertificateSelfManagedSslCertificate", "description": "Configuration and status of a self-managed SSL certificate." @@ -46861,6 +51387,14 @@ "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, + "selfLinkWithId": { + "description": "[Output Only] Server-defined URL for this resource with the resource id.", + "type": "string" + }, + "tlsSettings": { + "$ref": "ServerTlsSettings", + "description": "Security settings for the proxy. This field is only applicable to a global backend service with the loadBalancingScheme set to INTERNAL_SELF_MANAGED." + }, "warnings": { "description": "[Output Only] If potential misconfigurations are detected for this SSL policy, this field will be populated with warning messages.", "items": { @@ -47015,9 +51549,11 @@ "description": "These stateful disks will never be deleted during autohealing, update or VM instance recreate operations. This flag is used to configure if the disk should be deleted after it is no longer used by the group, e.g. when the given instance or the whole group is deleted. Note: disks attached in READ_ONLY mode cannot be auto-deleted.", "enum": [ "NEVER", + "ON_PERMANENT_INSTANCE_DELETION", "WHEN_NOT_IN_USE" ], "enumDescriptions": [ + "", "", "" ], @@ -47031,7 +51567,7 @@ "id": "Subnetwork", "properties": { "aggregationInterval": { - "description": "Can only be specified if VPC flow logging for this subnetwork is enabled. Toggles the aggregation interval for collecting flow logs. Increasing the interval time will reduce the amount of generated flow logs for long lasting connections. Default is an interval of 5 seconds per connection.", + "description": "Can only be specified if VPC flow logging for this subnetwork is enabled. Sets the aggregation interval for collecting flow logs. Increasing the interval time reduces the amount of generated flow logs for long-lasting connections. Default is an interval of 5 seconds per connection. Valid values: INTERVAL_5_SEC, INTERVAL_30_SEC, INTERVAL_1_MIN, INTERVAL_5_MIN, INTERVAL_10_MIN, INTERVAL_15_MIN.", "enum": [ "INTERVAL_10_MIN", "INTERVAL_15_MIN", @@ -47067,7 +51603,7 @@ "type": "boolean" }, "enablePrivateV6Access": { - "description": "Whether the VMs in this subnet can directly access Google services via internal IPv6 addresses. This field can be both set at resource creation time and updated using patch.", + "description": "Deprecated in favor of enable in PrivateIpv6GoogleAccess. Whether the VMs in this subnet can directly access Google services via internal IPv6 addresses. This field can be both set at resource creation time and updated using patch.", "type": "boolean" }, "fingerprint": { @@ -47076,7 +51612,7 @@ "type": "string" }, "flowSampling": { - "description": "Can only be specified if VPC flow logging for this subnetwork is enabled. The value of the field must be in [0, 1]. Set the sampling rate of VPC flow logs within the subnetwork where 1.0 means all collected logs are reported and 0.0 means no logs are reported. Default is 0.5 which means half of all collected logs are reported.", + "description": "Can only be specified if VPC flow logging for this subnetwork is enabled. The value of the field must be in [0, 1]. Set the sampling rate of VPC flow logs within the subnetwork where 1.0 means all collected logs are reported and 0.0 means no logs are reported. Default is 0.5, which means half of all collected logs are reported.", "format": "float", "type": "number" }, @@ -47104,7 +51640,7 @@ }, "logConfig": { "$ref": "SubnetworkLogConfig", - "description": "This field denotes the logging options for the load balancer traffic served by this backend service. If logging is enabled, logs will be exported to Stackdriver." + "description": "This field denotes the VPC flow logging options for this subnetwork. If logging is enabled, logs are exported to Stackdriver." }, "metadata": { "description": "Can only be specified if VPC flow logging for this subnetwork is enabled. Configures whether metadata fields should be added to the reported VPC flow logs. Default is INCLUDE_ALL_METADATA.", @@ -47131,6 +51667,20 @@ "description": "Whether the VMs in this subnet can access Google services without assigned external IP addresses. This field can be both set at resource creation time and updated using setPrivateIpGoogleAccess.", "type": "boolean" }, + "privateIpv6GoogleAccess": { + "description": "The private IPv6 google access type for the VMs in this subnet. This is an expanded field of enablePrivateV6Access. If both fields are set, privateIpv6GoogleAccess will take priority.\n\nThis field can be both set at resource creation time and updated using patch.", + "enum": [ + "DISABLE_GOOGLE_ACCESS", + "ENABLE_BIDIRECTIONAL_ACCESS_TO_GOOGLE", + "ENABLE_OUTBOUND_VM_ACCESS_TO_GOOGLE" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + }, "purpose": { "description": "The purpose of the resource. This field can be either PRIVATE_RFC_1918 or INTERNAL_HTTPS_LOAD_BALANCER. A subnetwork with purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a user-created subnetwork that is reserved for Internal HTTP(S) Load Balancing. If unspecified, the purpose defaults to PRIVATE_RFC_1918.", "enum": [ @@ -47145,6 +51695,18 @@ ], "type": "string" }, + "rangeType": { + "description": "The type of IP CIDR range to associate with this subnetwork. The default is RFC_1918. When creating a subnetwork in non-RFC 1918 range, this field must be set to NON_RFC_1918.", + "enum": [ + "NON_RFC_1918", + "RFC_1918" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, "region": { "description": "URL of the region where the Subnetwork resides. This field can be set only at resource creation time.", "type": "string" @@ -47172,6 +51734,10 @@ "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, + "selfLinkWithId": { + "description": "[Output Only] Server-defined URL for this resource with the resource id.", + "type": "string" + }, "state": { "description": "[Output Only] The state of the subnetwork, which can be one of READY or DRAINING. A subnetwork that is READY is ready to be used. The state of DRAINING is only applicable to subnetworks that have the purpose set to INTERNAL_HTTPS_LOAD_BALANCER and indicates that connections to the load balancer are being drained. A subnetwork that is draining cannot be used or modified until it reaches a status of READY.", "enum": [ @@ -47440,7 +52006,7 @@ "type": "boolean" }, "flowSampling": { - "description": "Can only be specified if VPC flow logging for this subnetwork is enabled. The value of the field must be in [0, 1]. Set the sampling rate of VPC flow logs within the subnetwork where 1.0 means all collected logs are reported and 0.0 means no logs are reported. Default is 0.5 which means half of all collected logs are reported.", + "description": "Can only be specified if VPC flow logging for this subnetwork is enabled. The value of the field must be in [0, 1]. Set the sampling rate of VPC flow logs within the subnetwork where 1.0 means all collected logs are reported and 0.0 means no logs are reported. Default is 0.5, which means half of all collected logs are reported.", "format": "float", "type": "number" }, @@ -47470,6 +52036,18 @@ "rangeName": { "description": "The name associated with this subnetwork secondary range, used when adding an alias IP range to a VM instance. The name must be 1-63 characters long, and comply with RFC1035. The name must be unique within the subnetwork.", "type": "string" + }, + "rangeType": { + "description": "The type of IP CIDR range to associate with this subnetwork secondary range. The default is RFC_1918. When creating a subnetwork in non-RFC 1918 range, this field must be set to NON_RFC_1918.", + "enum": [ + "NON_RFC_1918", + "RFC_1918" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" } }, "type": "object" @@ -47480,6 +52058,18 @@ "ipCidrRange": { "description": "The IP (in CIDR format or netmask) of internal addresses that are legal on this Subnetwork. This range should be disjoint from other subnetworks within this network. This range can only be larger than (i.e. a superset of) the range previously defined before the update.", "type": "string" + }, + "rangeType": { + "description": "The type of IP CIDR range to associate with this subnetwork. The default is RFC_1918. When expanding to a non-RFC 1918 range, this field must be be set to NON_RFC_1918.", + "enum": [ + "NON_RFC_1918", + "RFC_1918" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" } }, "type": "object" @@ -47776,6 +52366,10 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, + "proxyBind": { + "description": "This field only applies when the loadBalancingScheme is INTERNAL_SELF_MANAGED. When set to true the Envoy binds on the IP address specified by the forwarding rule. Default is false.", + "type": "boolean" + }, "region": { "description": "[Output Only] URL of the region where the regional Target HTTP Proxy resides. This field is not applicable to global Target HTTP Proxies.", "type": "string" @@ -47784,6 +52378,10 @@ "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, + "selfLinkWithId": { + "description": "[Output Only] Server-defined URL for this resource with the resource id.", + "type": "string" + }, "urlMap": { "description": "URL to the UrlMap resource that defines the mapping from URL to the BackendService.", "type": "string" @@ -48169,6 +52767,10 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, + "proxyBind": { + "description": "This field only applies when the loadBalancingScheme is INTERNAL_SELF_MANAGED. When set to true the Envoy binds on the IP address specified by the forwarding rule. Default is false.", + "type": "boolean" + }, "quicOverride": { "description": "Specifies the QUIC override policy for this TargetHttpsProxy resource. This determines whether the load balancer will attempt to negotiate QUIC with clients or not. Can specify one of NONE, ENABLE, or DISABLE. Specify ENABLE to always enable QUIC, Enables QUIC when set to ENABLE, and disables QUIC when set to DISABLE. If NONE is specified, uses the QUIC policy with no user overrides, which is equivalent to DISABLE. Not specifying this field is equivalent to specifying NONE.", "enum": [ @@ -48191,6 +52793,10 @@ "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, + "selfLinkWithId": { + "description": "[Output Only] Server-defined URL for this resource with the resource id.", + "type": "string" + }, "sslCertificates": { "description": "URLs to SslCertificate resources that are used to authenticate connections between users and the load balancer. At least one SSL certificate must be specified. Currently, you may specify up to 15 SSL certificates.", "items": { @@ -48478,6 +53084,10 @@ "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, + "selfLinkWithId": { + "description": "[Output Only] Server-defined URL for this resource with the resource id.", + "type": "string" + }, "zone": { "description": "[Output Only] URL of the zone where the target instance resides. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", "type": "string" @@ -48861,6 +53471,10 @@ "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, + "selfLinkWithId": { + "description": "[Output Only] Server-defined URL for this resource with the resource id.", + "type": "string" + }, "sessionAffinity": { "description": "Session affinity option, must be one of the following values:\nNONE: Connections from the same client IP may go to any instance in the pool.\nCLIENT_IP: Connections from the same client IP will go to the same instance in the pool while that instance remains healthy.\nCLIENT_IP_PROTO: Connections from the same client IP with the same IP protocol will go to the same instance in the pool while that instance remains healthy.", "enum": [ @@ -48868,6 +53482,8 @@ "CLIENT_IP_PORT_PROTO", "CLIENT_IP_PROTO", "GENERATED_COOKIE", + "HEADER_FIELD", + "HTTP_COOKIE", "NONE" ], "enumDescriptions": [ @@ -48875,6 +53491,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -49695,7 +54313,7 @@ "type": "string" }, "forwardingRules": { - "description": "[Output Only] A list of URLs to the ForwardingRule resources. ForwardingRules are created using compute.forwardingRules.insert and associated to a VPN gateway.", + "description": "[Output Only] A list of URLs to the ForwardingRule resources. ForwardingRules are created using compute.forwardingRules.insert and associated with a VPN gateway.", "items": { "type": "string" }, @@ -49751,7 +54369,7 @@ "type": "string" }, "status": { - "description": "[Output Only] The status of the VPN gateway.", + "description": "[Output Only] The status of the VPN gateway, which can be one of the following: CREATING, READY, FAILED, or DELETING.", "enum": [ "CREATING", "DELETING", @@ -49767,7 +54385,7 @@ "type": "string" }, "tunnels": { - "description": "[Output Only] A list of URLs to VpnTunnel resources. VpnTunnels are created using compute.vpntunnels.insert method and associated to a VPN gateway.", + "description": "[Output Only] A list of URLs to VpnTunnel resources. VpnTunnels are created using the compute.vpntunnels.insert method and associated with a VPN gateway.", "items": { "type": "string" }, @@ -50004,7 +54622,7 @@ "id": "TargetVpnGatewaysScopedList", "properties": { "targetVpnGateways": { - "description": "[Output Only] A list of target vpn gateways contained in this scope.", + "description": "[Output Only] A list of target VPN gateways contained in this scope.", "items": { "$ref": "TargetVpnGateway" }, @@ -50138,6 +54756,94 @@ }, "type": "object" }, + "TlsCertificateContext": { + "description": "Defines the mechanism to obtain the client or server certificate.", + "id": "TlsCertificateContext", + "properties": { + "certificatePaths": { + "$ref": "TlsCertificatePaths", + "description": "Specifies the certificate and private key paths. This field is applicable only if tlsCertificateSource is set to USE_PATH." + }, + "certificateSource": { + "description": "Defines how TLS certificates are obtained.", + "enum": [ + "INVALID", + "USE_PATH", + "USE_SDS" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + }, + "sdsConfig": { + "$ref": "SdsConfig", + "description": "Specifies the config to retrieve certificates through SDS. This field is applicable only if tlsCertificateSource is set to USE_SDS." + } + }, + "type": "object" + }, + "TlsCertificatePaths": { + "description": "The paths to the mounted TLS Certificates and private key.", + "id": "TlsCertificatePaths", + "properties": { + "certificatePath": { + "description": "The path to the file holding the client or server TLS certificate to use.", + "type": "string" + }, + "privateKeyPath": { + "description": "The path to the file holding the client or server private key.", + "type": "string" + } + }, + "type": "object" + }, + "TlsContext": { + "description": "The TLS settings for the client or server.", + "id": "TlsContext", + "properties": { + "certificateContext": { + "$ref": "TlsCertificateContext", + "description": "Defines the mechanism to obtain the client or server certificate." + }, + "validationContext": { + "$ref": "TlsValidationContext", + "description": "Defines the mechanism to obtain the Certificate Authority certificate to validate the client/server certificate. If omitted, the proxy will not validate the server or client certificate." + } + }, + "type": "object" + }, + "TlsValidationContext": { + "description": "Defines the mechanism to obtain the Certificate Authority certificate to validate the client/server certificate.", + "id": "TlsValidationContext", + "properties": { + "certificatePath": { + "description": "The path to the file holding the CA certificate to validate the client or server certificate.", + "type": "string" + }, + "sdsConfig": { + "$ref": "SdsConfig", + "description": "Specifies the config to retrieve certificates through SDS. This field is applicable only if tlsCertificateSource is set to USE_SDS." + }, + "validationSource": { + "description": "Defines how TLS certificates are obtained.", + "enum": [ + "INVALID", + "USE_PATH", + "USE_SDS" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, "UDPHealthCheck": { "id": "UDPHealthCheck", "properties": { @@ -50169,10 +54875,18 @@ "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" }, + "defaultRouteAction": { + "$ref": "HttpRouteAction", + "description": "defaultRouteAction takes effect when none of the hostRules match. The load balancer performs advanced routing actions like URL rewrites, header transformations, etc. prior to forwarding the request to the selected backend. If defaultRouteAction specifies any weightedBackendServices, defaultService must not be set. Conversely if defaultService is set, defaultRouteAction cannot contain any weightedBackendServices.\nOnly one of defaultRouteAction or defaultUrlRedirect must be set." + }, "defaultService": { - "description": "The URL of the backendService resource if none of the hostRules match.\nUse defaultService instead of defaultRouteAction when simple routing to a backendService is desired and other advanced capabilities like traffic splitting and rewrites are not required.\nOnly one of defaultService, defaultRouteAction or defaultUrlRedirect should must be set.", + "description": "The full or partial URL of the defaultService resource to which traffic is directed if none of the hostRules match. If defaultRouteAction is additionally specified, advanced routing actions like URL Rewrites, etc. take effect prior to sending the request to the backend. However, if defaultService is specified, defaultRouteAction cannot contain any weightedBackendServices. Conversely, if routeAction specifies any weightedBackendServices, service must not be specified.\nOnly one of defaultService, defaultUrlRedirect or defaultRouteAction.weightedBackendService must be set.", "type": "string" }, + "defaultUrlRedirect": { + "$ref": "HttpRedirectAction", + "description": "When none of the specified hostRules match, the request is redirected to a URL specified by defaultUrlRedirect.\nIf defaultUrlRedirect is specified, defaultService or defaultRouteAction must not be set." + }, "description": { "description": "An optional description of this resource. Provide this property when you create the resource.", "type": "string" @@ -50182,6 +54896,10 @@ "format": "byte", "type": "string" }, + "headerAction": { + "$ref": "HttpHeaderAction", + "description": "Specifies changes to request and response headers that need to take effect for the selected backendService.\nThe headerAction specified here take effect after headerAction specified under pathMatcher." + }, "hostRules": { "description": "The list of HostRules to use against the URL.", "items": { @@ -50354,10 +55072,19 @@ "description": "Message for the expected URL mappings.", "id": "UrlMapTest", "properties": { + "backendServiceWeight": { + "description": "The weight to use for the supplied host and path when using advanced routing rules that involve traffic splitting.", + "format": "uint32", + "type": "integer" + }, "description": { "description": "Description of this test case.", "type": "string" }, + "expectedUrlRedirect": { + "description": "The expected URL that should be redirected to for the host and path being tested.", + "type": "string" + }, "host": { "description": "Host portion of the URL.", "type": "string" @@ -50625,6 +55352,21 @@ }, "type": "object" }, + "UrlRewrite": { + "description": "The spec for modifying the path before sending the request to the matched backend service.", + "id": "UrlRewrite", + "properties": { + "hostRewrite": { + "description": "Prior to forwarding the request to the selected service, the request's host header is replaced with contents of hostRewrite.\nThe value must be between 1 and 255 characters.", + "type": "string" + }, + "pathPrefixRewrite": { + "description": "Prior to forwarding the request to the selected backend service, the matching portion of the request's path is replaced by pathPrefixRewrite.\nThe value must be between 1 and 1024 characters.", + "type": "string" + } + }, + "type": "object" + }, "UsableSubnetwork": { "description": "Subnetwork which the current user has compute.subnetworks.use permission on.", "id": "UsableSubnetwork", @@ -50813,6 +55555,13 @@ "description": "Contain information of Nat mapping for an interface of this endpoint.", "id": "VmEndpointNatMappingsInterfaceNatMappings", "properties": { + "drainNatIpPortRanges": { + "description": "List of all drain IP:port-range mappings assigned to this interface. These ranges are inclusive, that is, both the first and the last ports can be used for NAT. Example: [\"2.2.2.2:12345-12355\", \"1.1.1.1:2234-2234\"].", + "items": { + "type": "string" + }, + "type": "array" + }, "natIpPortRanges": { "description": "A list of all IP:port-range mappings assigned to this interface. These ranges are inclusive, that is, both the first and the last ports can be used for NAT. Example: [\"2.2.2.2:12345-12355\", \"1.1.1.1:2234-2234\"].", "items": { @@ -50820,6 +55569,11 @@ }, "type": "array" }, + "numTotalDrainNatPorts": { + "description": "Total number of drain ports across all NAT IPs allocated to this interface. It equals to the aggregated port number in the field drain_nat_ip_port_ranges.", + "format": "int32", + "type": "integer" + }, "numTotalNatPorts": { "description": "Total number of ports across all NAT IPs allocated to this interface. It equals to the aggregated port number in the field nat_ip_port_ranges.", "format": "int32", @@ -51243,6 +55997,95 @@ }, "type": "object" }, + "VpnGatewayStatus": { + "id": "VpnGatewayStatus", + "properties": { + "vpnConnections": { + "description": "List of VPN connection for this VpnGateway.", + "items": { + "$ref": "VpnGatewayStatusVpnConnection" + }, + "type": "array" + } + }, + "type": "object" + }, + "VpnGatewayStatusHighAvailabilityRequirementState": { + "description": "Describes the high availability requirement state for the VPN connection between this Cloud VPN gateway and a peer gateway.", + "id": "VpnGatewayStatusHighAvailabilityRequirementState", + "properties": { + "state": { + "description": "Indicates the high availability requirement state for the VPN connection. Valid values are CONNECTION_REDUNDANCY_MET, CONNECTION_REDUNDANCY_NOT_MET.", + "enum": [ + "CONNECTION_REDUNDANCY_MET", + "CONNECTION_REDUNDANCY_NOT_MET" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "unsatisfiedReason": { + "description": "Indicates the reason why the VPN connection does not meet the high availability redundancy criteria/requirement. Valid values is INCOMPLETE_TUNNELS_COVERAGE.", + "enum": [ + "INCOMPLETE_TUNNELS_COVERAGE" + ], + "enumDescriptions": [ + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "VpnGatewayStatusTunnel": { + "description": "Contains some information about a VPN tunnel.", + "id": "VpnGatewayStatusTunnel", + "properties": { + "localGatewayInterface": { + "description": "The VPN gateway interface this VPN tunnel is associated with.", + "format": "uint32", + "type": "integer" + }, + "peerGatewayInterface": { + "description": "The peer gateway interface this VPN tunnel is connected to, the peer gateway could either be an external VPN gateway or GCP VPN gateway.", + "format": "uint32", + "type": "integer" + }, + "tunnelUrl": { + "description": "URL reference to the VPN tunnel.", + "type": "string" + } + }, + "type": "object" + }, + "VpnGatewayStatusVpnConnection": { + "description": "A VPN connection contains all VPN tunnels connected from this VpnGateway to the same peer gateway. The peer gateway could either be a external VPN gateway or GCP VPN gateway.", + "id": "VpnGatewayStatusVpnConnection", + "properties": { + "peerExternalGateway": { + "description": "URL reference to the peer external VPN gateways to which the VPN tunnels in this VPN connection are connected. This field is mutually exclusive with peer_gcp_gateway.", + "type": "string" + }, + "peerGcpGateway": { + "description": "URL reference to the peer side VPN gateways to which the VPN tunnels in this VPN connection are connected. This field is mutually exclusive with peer_gcp_gateway.", + "type": "string" + }, + "state": { + "$ref": "VpnGatewayStatusHighAvailabilityRequirementState", + "description": "HighAvailabilityRequirementState for the VPN connection." + }, + "tunnels": { + "description": "List of VPN tunnels that are in this VPN connection.", + "items": { + "$ref": "VpnGatewayStatusTunnel" + }, + "type": "array" + } + }, + "type": "object" + }, "VpnGatewayVpnGatewayInterface": { "description": "A VPN gateway interface.", "id": "VpnGatewayVpnGatewayInterface", @@ -51259,6 +56102,15 @@ }, "type": "object" }, + "VpnGatewaysGetStatusResponse": { + "id": "VpnGatewaysGetStatusResponse", + "properties": { + "result": { + "$ref": "VpnGatewayStatus" + } + }, + "type": "object" + }, "VpnGatewaysScopedList": { "id": "VpnGatewaysScopedList", "properties": { @@ -51375,7 +56227,7 @@ "type": "string" }, "ikeVersion": { - "description": "IKE protocol version to use when establishing the VPN tunnel with peer VPN gateway. Acceptable IKE versions are 1 or 2. Default version is 2.", + "description": "IKE protocol version to use when establishing the VPN tunnel with the peer VPN gateway. Acceptable IKE versions are 1 or 2. The default version is 2.", "format": "int32", "type": "integer" }, @@ -51397,7 +56249,7 @@ "type": "object" }, "localTrafficSelector": { - "description": "Local traffic selector to use when establishing the VPN tunnel with peer VPN gateway. The value should be a CIDR formatted string, for example: 192.168.0.0/16. The ranges should be disjoint. Only IPv4 is supported.", + "description": "Local traffic selector to use when establishing the VPN tunnel with the peer VPN gateway. The value should be a CIDR formatted string, for example: 192.168.0.0/16. The ranges must be disjoint. Only IPv4 is supported.", "items": { "type": "string" }, @@ -51413,6 +56265,19 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, + "peerExternalGateway": { + "description": "URL of the peer side external VPN gateway to which this VPN tunnel is connected. Provided by the client when the VPN tunnel is created. This field is exclusive with the field peerGcpGateway.", + "type": "string" + }, + "peerExternalGatewayInterface": { + "description": "The interface ID of the external VPN gateway to which this VPN tunnel is connected. Provided by the client when the VPN tunnel is created.", + "format": "int32", + "type": "integer" + }, + "peerGcpGateway": { + "description": "URL of the peer side HA GCP VPN gateway to which this VPN tunnel is connected. Provided by the client when the VPN tunnel is created. This field can be used when creating highly available VPN from VPC network to VPC network, the field is exclusive with the field peerExternalGateway. If provided, the VPN tunnel will automatically use the same vpnGatewayInterface ID in the peer GCP VPN gateway.", + "type": "string" + }, "peerIp": { "description": "IP address of the peer VPN gateway. Only IPv4 is supported.", "type": "string" @@ -51422,14 +56287,14 @@ "type": "string" }, "remoteTrafficSelector": { - "description": "Remote traffic selectors to use when establishing the VPN tunnel with peer VPN gateway. The value should be a CIDR formatted string, for example: 192.168.0.0/16. The ranges should be disjoint. Only IPv4 is supported.", + "description": "Remote traffic selectors to use when establishing the VPN tunnel with the peer VPN gateway. The value should be a CIDR formatted string, for example: 192.168.0.0/16. The ranges should be disjoint. Only IPv4 is supported.", "items": { "type": "string" }, "type": "array" }, "router": { - "description": "URL of router resource to be used for dynamic routing.", + "description": "URL of the router resource to be used for dynamic routing.", "type": "string" }, "selfLink": { @@ -51445,7 +56310,7 @@ "type": "string" }, "status": { - "description": "[Output Only] The status of the VPN tunnel.", + "description": "[Output Only] The status of the VPN tunnel, which can be one of the following: \n- PROVISIONING: Resource is being allocated for the VPN tunnel. \n- WAITING_FOR_FULL_CONFIG: Waiting to receive all VPN-related configs from the user. Network, TargetVpnGateway, VpnTunnel, ForwardingRule, and Route resources are needed to setup the VPN tunnel. \n- FIRST_HANDSHAKE: Successful first handshake with the peer VPN. \n- ESTABLISHED: Secure session is successfully established with the peer VPN. \n- NETWORK_ERROR: Deprecated, replaced by NO_INCOMING_PACKETS \n- AUTHORIZATION_ERROR: Auth error (for example, bad shared secret). \n- NEGOTIATION_FAILURE: Handshake failed. \n- DEPROVISIONING: Resources are being deallocated for the VPN tunnel. \n- FAILED: Tunnel creation has failed and the tunnel is not ready to be used.", "enum": [ "ALLOCATING_RESOURCES", "AUTHORIZATION_ERROR", @@ -51502,7 +56367,7 @@ "items": { "additionalProperties": { "$ref": "VpnTunnelsScopedList", - "description": "Name of the scope containing this set of vpn tunnels." + "description": "Name of the scope containing this set of VPN tunnels." }, "description": "A list of VpnTunnelsScopedList resources.", "type": "object" @@ -51720,7 +56585,7 @@ "id": "VpnTunnelsScopedList", "properties": { "vpnTunnels": { - "description": "A list of vpn tunnels contained in this scope.", + "description": "A list of VPN tunnels contained in this scope.", "items": { "$ref": "VpnTunnel" }, @@ -51844,6 +56709,26 @@ }, "type": "object" }, + "WeightedBackendService": { + "description": "In contrast to a single BackendService in HttpRouteAction to which all matching traffic is directed to, WeightedBackendService allows traffic to be split across multiple BackendServices. The volume of traffic for each BackendService is proportional to the weight specified in each WeightedBackendService", + "id": "WeightedBackendService", + "properties": { + "backendService": { + "description": "The full or partial URL to the default BackendService resource. Before forwarding the request to backendService, the loadbalancer applies any relevant headerActions specified as part of this backendServiceWeight.", + "type": "string" + }, + "headerAction": { + "$ref": "HttpHeaderAction", + "description": "Specifies changes to request and response headers that need to take effect for the selected backendService.\nheaderAction specified here take effect before headerAction in the enclosing HttpRouteRule, PathMatcher and UrlMap." + }, + "weight": { + "description": "Specifies the fraction of traffic sent to backendService, computed as weight / (sum of all weightedBackendService weights in routeAction) .\nThe selection of a backend service is determined only for new traffic. Once a user's request has been directed to a backendService, subsequent requests will be sent to the same backendService as determined by the BackendService's session affinity policy.\nThe value must be between 0 and 1000", + "format": "uint32", + "type": "integer" + } + }, + "type": "object" + }, "XpnHostList": { "id": "XpnHostList", "properties": { @@ -51979,7 +56864,7 @@ "type": "object" }, "Zone": { - "description": "A Zone resource. (== resource_for beta.zones ==) (== resource_for v1.zones ==)", + "description": "A Zone resource. (== resource_for beta.zones ==) (== resource_for v1.zones ==) Next ID: 17", "id": "Zone", "properties": { "availableCpuPlatforms": { diff --git a/vendor/google.golang.org/api/compute/v0.alpha/compute-gen.go b/vendor/google.golang.org/api/compute/v0.alpha/compute-gen.go index 9a4786b70e9a2..187a2f021e73e 100644 --- a/vendor/google.golang.org/api/compute/v0.alpha/compute-gen.go +++ b/vendor/google.golang.org/api/compute/v0.alpha/compute-gen.go @@ -1,4 +1,4 @@ -// Copyright 2018 Google Inc. All rights reserved. +// Copyright 2019 Google LLC. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -6,13 +6,39 @@ // Package compute provides access to the Compute Engine API. // -// See https://developers.google.com/compute/docs/reference/latest/ +// For product documentation, see: https://developers.google.com/compute/docs/reference/latest/ +// +// Creating a client // // Usage example: // // import "google.golang.org/api/compute/v0.alpha" // ... -// computeService, err := compute.New(oauthHttpClient) +// ctx := context.Background() +// computeService, err := compute.NewService(ctx) +// +// In this example, Google Application Default Credentials are used for authentication. +// +// For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. +// +// Other authentication options +// +// By default, all available scopes (see "Constants") are used to authenticate. To restrict scopes, use option.WithScopes: +// +// computeService, err := compute.NewService(ctx, option.WithScopes(compute.DevstorageReadWriteScope)) +// +// To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey: +// +// computeService, err := compute.NewService(ctx, option.WithAPIKey("AIza...")) +// +// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource: +// +// config := &oauth2.Config{...} +// // ... +// token, err := config.Exchange(ctx, ...) +// computeService, err := compute.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) +// +// See https://godoc.org/google.golang.org/api/option/ for details on options. package compute // import "google.golang.org/api/compute/v0.alpha" import ( @@ -29,6 +55,8 @@ import ( gensupport "google.golang.org/api/gensupport" googleapi "google.golang.org/api/googleapi" + option "google.golang.org/api/option" + htransport "google.golang.org/api/transport/http" ) // Always reference these packages, just in case the auto-generated code @@ -71,6 +99,37 @@ const ( DevstorageReadWriteScope = "https://www.googleapis.com/auth/devstorage.read_write" ) +// NewService creates a new Service. +func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, error) { + scopesOption := option.WithScopes( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write", + ) + // NOTE: prepend, so we don't override user-specified scopes. + opts = append([]option.ClientOption{scopesOption}, opts...) + client, endpoint, err := htransport.NewClient(ctx, opts...) + if err != nil { + return nil, err + } + s, err := New(client) + if err != nil { + return nil, err + } + if endpoint != "" { + s.BasePath = endpoint + } + return s, nil +} + +// New creates a new Service. It uses the provided http.Client for requests. +// +// Deprecated: please use NewService instead. +// To provide a custom HTTP client, use option.WithHTTPClient. +// If you are using google.golang.org/api/googleapis/transport.APIKey, use option.WithAPIKey with NewService instead. func New(client *http.Client) (*Service, error) { if client == nil { return nil, errors.New("client is nil") @@ -78,16 +137,17 @@ func New(client *http.Client) (*Service, error) { s := &Service{client: client, BasePath: basePath} s.AcceleratorTypes = NewAcceleratorTypesService(s) s.Addresses = NewAddressesService(s) - s.Allocations = NewAllocationsService(s) s.Autoscalers = NewAutoscalersService(s) s.BackendBuckets = NewBackendBucketsService(s) s.BackendServices = NewBackendServicesService(s) s.DiskTypes = NewDiskTypesService(s) s.Disks = NewDisksService(s) + s.ExternalVpnGateways = NewExternalVpnGatewaysService(s) s.Firewalls = NewFirewallsService(s) s.ForwardingRules = NewForwardingRulesService(s) s.GlobalAddresses = NewGlobalAddressesService(s) s.GlobalForwardingRules = NewGlobalForwardingRulesService(s) + s.GlobalNetworkEndpointGroups = NewGlobalNetworkEndpointGroupsService(s) s.GlobalOperations = NewGlobalOperationsService(s) s.GlobalOrganizationOperations = NewGlobalOrganizationOperationsService(s) s.HealthChecks = NewHealthChecksService(s) @@ -117,15 +177,18 @@ func New(client *http.Client) (*Service, error) { s.RegionCommitments = NewRegionCommitmentsService(s) s.RegionDiskTypes = NewRegionDiskTypesService(s) s.RegionDisks = NewRegionDisksService(s) + s.RegionHealthCheckServices = NewRegionHealthCheckServicesService(s) s.RegionHealthChecks = NewRegionHealthChecksService(s) s.RegionInstanceGroupManagers = NewRegionInstanceGroupManagersService(s) s.RegionInstanceGroups = NewRegionInstanceGroupsService(s) + s.RegionNotificationEndpoints = NewRegionNotificationEndpointsService(s) s.RegionOperations = NewRegionOperationsService(s) s.RegionSslCertificates = NewRegionSslCertificatesService(s) s.RegionTargetHttpProxies = NewRegionTargetHttpProxiesService(s) s.RegionTargetHttpsProxies = NewRegionTargetHttpsProxiesService(s) s.RegionUrlMaps = NewRegionUrlMapsService(s) s.Regions = NewRegionsService(s) + s.Reservations = NewReservationsService(s) s.ResourcePolicies = NewResourcePoliciesService(s) s.Routers = NewRoutersService(s) s.Routes = NewRoutesService(s) @@ -158,8 +221,6 @@ type Service struct { Addresses *AddressesService - Allocations *AllocationsService - Autoscalers *AutoscalersService BackendBuckets *BackendBucketsService @@ -170,6 +231,8 @@ type Service struct { Disks *DisksService + ExternalVpnGateways *ExternalVpnGatewaysService + Firewalls *FirewallsService ForwardingRules *ForwardingRulesService @@ -178,6 +241,8 @@ type Service struct { GlobalForwardingRules *GlobalForwardingRulesService + GlobalNetworkEndpointGroups *GlobalNetworkEndpointGroupsService + GlobalOperations *GlobalOperationsService GlobalOrganizationOperations *GlobalOrganizationOperationsService @@ -236,12 +301,16 @@ type Service struct { RegionDisks *RegionDisksService + RegionHealthCheckServices *RegionHealthCheckServicesService + RegionHealthChecks *RegionHealthChecksService RegionInstanceGroupManagers *RegionInstanceGroupManagersService RegionInstanceGroups *RegionInstanceGroupsService + RegionNotificationEndpoints *RegionNotificationEndpointsService + RegionOperations *RegionOperationsService RegionSslCertificates *RegionSslCertificatesService @@ -254,6 +323,8 @@ type Service struct { Regions *RegionsService + Reservations *ReservationsService + ResourcePolicies *ResourcePoliciesService Routers *RoutersService @@ -320,15 +391,6 @@ type AddressesService struct { s *Service } -func NewAllocationsService(s *Service) *AllocationsService { - rs := &AllocationsService{s: s} - return rs -} - -type AllocationsService struct { - s *Service -} - func NewAutoscalersService(s *Service) *AutoscalersService { rs := &AutoscalersService{s: s} return rs @@ -374,6 +436,15 @@ type DisksService struct { s *Service } +func NewExternalVpnGatewaysService(s *Service) *ExternalVpnGatewaysService { + rs := &ExternalVpnGatewaysService{s: s} + return rs +} + +type ExternalVpnGatewaysService struct { + s *Service +} + func NewFirewallsService(s *Service) *FirewallsService { rs := &FirewallsService{s: s} return rs @@ -410,6 +481,15 @@ type GlobalForwardingRulesService struct { s *Service } +func NewGlobalNetworkEndpointGroupsService(s *Service) *GlobalNetworkEndpointGroupsService { + rs := &GlobalNetworkEndpointGroupsService{s: s} + return rs +} + +type GlobalNetworkEndpointGroupsService struct { + s *Service +} + func NewGlobalOperationsService(s *Service) *GlobalOperationsService { rs := &GlobalOperationsService{s: s} return rs @@ -671,6 +751,15 @@ type RegionDisksService struct { s *Service } +func NewRegionHealthCheckServicesService(s *Service) *RegionHealthCheckServicesService { + rs := &RegionHealthCheckServicesService{s: s} + return rs +} + +type RegionHealthCheckServicesService struct { + s *Service +} + func NewRegionHealthChecksService(s *Service) *RegionHealthChecksService { rs := &RegionHealthChecksService{s: s} return rs @@ -698,6 +787,15 @@ type RegionInstanceGroupsService struct { s *Service } +func NewRegionNotificationEndpointsService(s *Service) *RegionNotificationEndpointsService { + rs := &RegionNotificationEndpointsService{s: s} + return rs +} + +type RegionNotificationEndpointsService struct { + s *Service +} + func NewRegionOperationsService(s *Service) *RegionOperationsService { rs := &RegionOperationsService{s: s} return rs @@ -752,6 +850,15 @@ type RegionsService struct { s *Service } +func NewReservationsService(s *Service) *ReservationsService { + rs := &ReservationsService{s: s} + return rs +} + +type ReservationsService struct { + s *Service +} + func NewResourcePoliciesService(s *Service) *ResourcePoliciesService { rs := &ResourcePoliciesService{s: s} return rs @@ -940,8 +1047,11 @@ type AcceleratorConfig struct { AcceleratorCount int64 `json:"acceleratorCount,omitempty"` // AcceleratorType: Full or partial URL of the accelerator type resource - // to attach to this instance. If you are creating an instance template, - // specify only the accelerator name. + // to attach to this instance. For example: + // projects/my-project/zones/us-central1-c/acceleratorTypes/nvidia-tesla- + // p100 If you are creating an instance template, specify only the + // accelerator name. See GPUs on Compute Engine for a full list of + // accelerator types. AcceleratorType string `json:"acceleratorType,omitempty"` // ForceSendFields is a list of field names (e.g. "AcceleratorCount") to @@ -1002,6 +1112,10 @@ type AcceleratorType struct { // resource. SelfLink string `json:"selfLink,omitempty"` + // SelfLinkWithId: [Output Only] Server-defined URL for this resource's + // resource id. + SelfLinkWithId string `json:"selfLinkWithId,omitempty"` + // Zone: [Output Only] The name of the zone where the accelerator type // resides, such as us-central1-a. You must specify this field as part // of the HTTP request URL. It is not settable as a field in the request @@ -1677,6 +1791,10 @@ type Address struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` + // SelfLinkWithId: [Output Only] Server-defined URL for this resource + // with the resource id. + SelfLinkWithId string `json:"selfLinkWithId,omitempty"` + // Status: [Output Only] The status of the address, which can be one of // RESERVING, RESERVED, or IN_USE. An address that is RESERVING is // currently in the process of being reserved. A RESERVED address is @@ -2208,305 +2326,22 @@ func (s *AliasIpRange) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Allocation: Allocation resource -type Allocation struct { - // Commitment: Full or partial url for commitment in which this - // allocation is to be created. This field is ignored when allocations - // are created during committment creation. - Commitment string `json:"commitment,omitempty"` - - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - Description string `json:"description,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] Type of the resource. Always compute#allocations - // for allocations. - Kind string `json:"kind,omitempty"` - - // Name: The name of the resource, provided by the client when initially - // creating the resource. The resource name must be 1-63 characters - // long, and comply with RFC1035. Specifically, the name must be 1-63 - // characters long and match the regular expression - // `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be - // a lowercase letter, and all following characters must be a dash, - // lowercase letter, or digit, except the last character, which cannot - // be a dash. - Name string `json:"name,omitempty"` - - // SelfLink: [Output Only] Server-defined fully-qualified URL for this - // resource. - SelfLink string `json:"selfLink,omitempty"` - - SpecificAllocation *AllocationSpecificSKUAllocation `json:"specificAllocation,omitempty"` - - // SpecificAllocationRequired: Indicates whether the allocation can be - // consumed by VMs with "any allocation" defined. If the field is set, - // then only VMs that target the allocation by name using - // --allocation-affinity can consume this allocation. - SpecificAllocationRequired bool `json:"specificAllocationRequired,omitempty"` - - Zone string `json:"zone,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Commitment") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Commitment") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Allocation) MarshalJSON() ([]byte, error) { - type NoMethod Allocation - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// AllocationAffinity: AllocationAffinity is the configuration of -// desired allocation which this instance could take capacity from. -type AllocationAffinity struct { - // Possible values: - // "ANY_ALLOCATION" - // "NO_ALLOCATION" - // "SPECIFIC_ALLOCATION" - // "UNSPECIFIED" - ConsumeAllocationType string `json:"consumeAllocationType,omitempty"` - - // Key: Corresponds to the label key of allocation resource. - Key string `json:"key,omitempty"` - - // Values: Corresponds to the label values of allocation resource. - Values []string `json:"values,omitempty"` - - // ForceSendFields is a list of field names (e.g. - // "ConsumeAllocationType") to unconditionally include in API requests. - // By default, fields with empty values are omitted from API requests. - // However, any non-pointer, non-interface field appearing in - // ForceSendFields will be sent to the server regardless of whether the - // field is empty or not. This may be used to include empty fields in - // Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "ConsumeAllocationType") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *AllocationAffinity) MarshalJSON() ([]byte, error) { - type NoMethod AllocationAffinity - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// AllocationAggregatedList: Contains a list of allocations. -type AllocationAggregatedList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of Allocation resources. - Items map[string]AllocationsScopedList `json:"items,omitempty"` - - // Kind: Type of resource. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // Warning: [Output Only] Informational warning message. - Warning *AllocationAggregatedListWarning `json:"warning,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *AllocationAggregatedList) MarshalJSON() ([]byte, error) { - type NoMethod AllocationAggregatedList - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} +type AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk struct { + // DiskSizeGb: Specifies the size of the disk in base-2 GB. + DiskSizeGb int64 `json:"diskSizeGb,omitempty,string"` -// AllocationAggregatedListWarning: [Output Only] Informational warning -// message. -type AllocationAggregatedListWarning struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. + // Interface: Specifies the disk interface to use for attaching this + // disk, which is either SCSI or NVME. The default is SCSI. For + // performance characteristics of SCSI over NVMe, see Local SSD + // performance. // // Possible values: - // "CLEANUP_FAILED" - // "DEPRECATED_RESOURCE_USED" - // "DEPRECATED_TYPE_USED" - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - // "EXPERIMENTAL_TYPE_USED" - // "EXTERNAL_API_WARNING" - // "FIELD_VALUE_OVERRIDEN" - // "INJECTED_KERNELS_DEPRECATED" - // "MISSING_TYPE_DEPENDENCY" - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - // "NEXT_HOP_CANNOT_IP_FORWARD" - // "NEXT_HOP_INSTANCE_NOT_FOUND" - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - // "NEXT_HOP_NOT_RUNNING" - // "NOT_CRITICAL_ERROR" - // "NO_RESULTS_ON_PAGE" - // "REQUIRED_TOS_AGREEMENT" - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - // "RESOURCE_NOT_DELETED" - // "SCHEMA_VALIDATION_IGNORED" - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - // "UNDECLARED_PROPERTIES" - // "UNREACHABLE" - Code string `json:"code,omitempty"` - - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: - // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*AllocationAggregatedListWarningData `json:"data,omitempty"` - - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Code") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *AllocationAggregatedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod AllocationAggregatedListWarning - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type AllocationAggregatedListWarningData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` - - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Key") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Key") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *AllocationAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod AllocationAggregatedListWarningData - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type AllocationList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id string `json:"id,omitempty"` - - // Items: [Output Only] A list of Allocation resources. - Items []*Allocation `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource.Always compute#allocationsList - // for listsof allocations - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // Warning: [Output Only] Informational warning message. - Warning *AllocationListWarning `json:"warning,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // "NVDIMM" + // "NVME" + // "SCSI" + Interface string `json:"interface,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "DiskSizeGb") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -2514,8 +2349,8 @@ type AllocationList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "DiskSizeGb") to include in + // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -2523,123 +2358,67 @@ type AllocationList struct { NullFields []string `json:"-"` } -func (s *AllocationList) MarshalJSON() ([]byte, error) { - type NoMethod AllocationList +func (s *AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk) MarshalJSON() ([]byte, error) { + type NoMethod AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// AllocationListWarning: [Output Only] Informational warning message. -type AllocationListWarning struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. - // - // Possible values: - // "CLEANUP_FAILED" - // "DEPRECATED_RESOURCE_USED" - // "DEPRECATED_TYPE_USED" - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - // "EXPERIMENTAL_TYPE_USED" - // "EXTERNAL_API_WARNING" - // "FIELD_VALUE_OVERRIDEN" - // "INJECTED_KERNELS_DEPRECATED" - // "MISSING_TYPE_DEPENDENCY" - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - // "NEXT_HOP_CANNOT_IP_FORWARD" - // "NEXT_HOP_INSTANCE_NOT_FOUND" - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - // "NEXT_HOP_NOT_RUNNING" - // "NOT_CRITICAL_ERROR" - // "NO_RESULTS_ON_PAGE" - // "REQUIRED_TOS_AGREEMENT" - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - // "RESOURCE_NOT_DELETED" - // "SCHEMA_VALIDATION_IGNORED" - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - // "UNDECLARED_PROPERTIES" - // "UNREACHABLE" - Code string `json:"code,omitempty"` - - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: - // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*AllocationListWarningData `json:"data,omitempty"` - - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Code") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} +// AllocationSpecificSKUAllocationReservedInstanceProperties: Properties +// of the SKU instances being reserved. +type AllocationSpecificSKUAllocationReservedInstanceProperties struct { + // GuestAccelerators: Specifies accelerator type and count. + GuestAccelerators []*AcceleratorConfig `json:"guestAccelerators,omitempty"` -func (s *AllocationListWarning) MarshalJSON() ([]byte, error) { - type NoMethod AllocationListWarning - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // LocalSsds: Specifies amount of local ssd to reserve with each + // instance. The type of disk is local-ssd. + LocalSsds []*AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk `json:"localSsds,omitempty"` -type AllocationListWarningData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` + // MachineType: Specifies type of machine (name only) which has fixed + // number of vCPUs and fixed amount of memory. This also includes + // specifying custom machine type following + // custom-NUMBER_OF_CPUS-AMOUNT_OF_MEMORY pattern. + MachineType string `json:"machineType,omitempty"` - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` + // MinCpuPlatform: Minimum cpu platform the reservation. + MinCpuPlatform string `json:"minCpuPlatform,omitempty"` - // ForceSendFields is a list of field names (e.g. "Key") to - // unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "GuestAccelerators") + // to unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Key") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "GuestAccelerators") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *AllocationListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod AllocationListWarningData +func (s *AllocationSpecificSKUAllocationReservedInstanceProperties) MarshalJSON() ([]byte, error) { + type NoMethod AllocationSpecificSKUAllocationReservedInstanceProperties raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// AllocationSpecificSKUAllocation: This allocation type allows to pre +// AllocationSpecificSKUReservation: This reservation type allows to pre // allocate specific instance configuration. -type AllocationSpecificSKUAllocation struct { +type AllocationSpecificSKUReservation struct { // Count: Specifies number of resources that are allocated. Count int64 `json:"count,omitempty,string"` // InUseCount: [OutputOnly] Indicates how many resource are in use. InUseCount int64 `json:"inUseCount,omitempty,string"` - InstanceProperties *AllocationSpecificSKUAllocationAllocatedInstanceProperties `json:"instanceProperties,omitempty"` + // InstanceProperties: The instance properties for this specific sku + // reservation. + InstanceProperties *AllocationSpecificSKUAllocationReservedInstanceProperties `json:"instanceProperties,omitempty"` // ForceSendFields is a list of field names (e.g. "Count") to // unconditionally include in API requests. By default, fields with @@ -2658,258 +2437,8 @@ type AllocationSpecificSKUAllocation struct { NullFields []string `json:"-"` } -func (s *AllocationSpecificSKUAllocation) MarshalJSON() ([]byte, error) { - type NoMethod AllocationSpecificSKUAllocation - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// AllocationSpecificSKUAllocationAllocatedInstanceProperties: -// Properties of the SKU instances being reserved. -type AllocationSpecificSKUAllocationAllocatedInstanceProperties struct { - // GuestAccelerators: Specifies accelerator type and count. - GuestAccelerators []*AcceleratorConfig `json:"guestAccelerators,omitempty"` - - // LocalSsds: Specifies amount of local ssd to reserve with each - // instance. The type of disk is local-ssd. - LocalSsds []*AllocationSpecificSKUAllocationAllocatedInstancePropertiesAllocatedDisk `json:"localSsds,omitempty"` - - // MachineType: Specifies type of machine (name only) which has fixed - // number of vCPUs and fixed amount of memory. This also includes - // specifying custom machine type following - // custom-NUMBER_OF_CPUS-AMOUNT_OF_MEMORY pattern. - MachineType string `json:"machineType,omitempty"` - - // MinCpuPlatform: Minimum cpu platform the allocation. - MinCpuPlatform string `json:"minCpuPlatform,omitempty"` - - // ForceSendFields is a list of field names (e.g. "GuestAccelerators") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "GuestAccelerators") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *AllocationSpecificSKUAllocationAllocatedInstanceProperties) MarshalJSON() ([]byte, error) { - type NoMethod AllocationSpecificSKUAllocationAllocatedInstanceProperties - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type AllocationSpecificSKUAllocationAllocatedInstancePropertiesAllocatedDisk struct { - // DiskSizeGb: Specifies the size of the disk in base-2 GB. - DiskSizeGb int64 `json:"diskSizeGb,omitempty,string"` - - // Interface: Specifies the disk interface to use for attaching this - // disk, which is either SCSI or NVME. The default is SCSI. For - // performance characteristics of SCSI over NVMe, see Local SSD - // performance. - // - // Possible values: - // "NVDIMM" - // "NVME" - // "SCSI" - Interface string `json:"interface,omitempty"` - - // ForceSendFields is a list of field names (e.g. "DiskSizeGb") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "DiskSizeGb") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *AllocationSpecificSKUAllocationAllocatedInstancePropertiesAllocatedDisk) MarshalJSON() ([]byte, error) { - type NoMethod AllocationSpecificSKUAllocationAllocatedInstancePropertiesAllocatedDisk - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type AllocationsScopedList struct { - // Allocations: A list of allocations contained in this scope. - Allocations []*Allocation `json:"allocations,omitempty"` - - // Warning: Informational warning which replaces the list of allocations - // when the list is empty. - Warning *AllocationsScopedListWarning `json:"warning,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Allocations") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Allocations") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *AllocationsScopedList) MarshalJSON() ([]byte, error) { - type NoMethod AllocationsScopedList - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// AllocationsScopedListWarning: Informational warning which replaces -// the list of allocations when the list is empty. -type AllocationsScopedListWarning struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. - // - // Possible values: - // "CLEANUP_FAILED" - // "DEPRECATED_RESOURCE_USED" - // "DEPRECATED_TYPE_USED" - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - // "EXPERIMENTAL_TYPE_USED" - // "EXTERNAL_API_WARNING" - // "FIELD_VALUE_OVERRIDEN" - // "INJECTED_KERNELS_DEPRECATED" - // "MISSING_TYPE_DEPENDENCY" - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - // "NEXT_HOP_CANNOT_IP_FORWARD" - // "NEXT_HOP_INSTANCE_NOT_FOUND" - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - // "NEXT_HOP_NOT_RUNNING" - // "NOT_CRITICAL_ERROR" - // "NO_RESULTS_ON_PAGE" - // "REQUIRED_TOS_AGREEMENT" - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - // "RESOURCE_NOT_DELETED" - // "SCHEMA_VALIDATION_IGNORED" - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - // "UNDECLARED_PROPERTIES" - // "UNREACHABLE" - Code string `json:"code,omitempty"` - - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: - // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*AllocationsScopedListWarningData `json:"data,omitempty"` - - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Code") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *AllocationsScopedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod AllocationsScopedListWarning - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type AllocationsScopedListWarningData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` - - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Key") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Key") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *AllocationsScopedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod AllocationsScopedListWarningData - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type AllocationsUpdateResourceShapeRequest struct { - // Count: Number of allocated resources which are to be updated with - // minimum = 1 and maximum = 100. - Count int64 `json:"count,omitempty"` - - // DestinationAllocation: The name of destination allocation where the - // modified machines are added. If existing, its machine spec must match - // the modified machine spec. If non existing, new allocation with this - // name and modified machine spec is created automatically. - DestinationAllocation string `json:"destinationAllocation,omitempty"` - - UpdatedResourceProperties *AllocationSpecificSKUAllocationAllocatedInstanceProperties `json:"updatedResourceProperties,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Count") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Count") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *AllocationsUpdateResourceShapeRequest) MarshalJSON() ([]byte, error) { - type NoMethod AllocationsUpdateResourceShapeRequest +func (s *AllocationSpecificSKUReservation) MarshalJSON() ([]byte, error) { + type NoMethod AllocationSpecificSKUReservation raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -2932,7 +2461,7 @@ type AttachedDisk struct { // the instance. // // If not specified, the server chooses a default device name to apply - // to this disk, in the form persistent-disks-x, where x is a number + // to this disk, in the form persistent-disk-x, where x is a number // assigned by Google Compute Engine. This field is only applicable for // persistent disks. DeviceName string `json:"deviceName,omitempty"` @@ -3022,6 +2551,10 @@ type AttachedDisk struct { // "PRESERVED" SavedState string `json:"savedState,omitempty"` + // ShieldedInstanceInitialState: [Output Only] shielded vm initial state + // stored on disk + ShieldedInstanceInitialState *InitialStateConfig `json:"shieldedInstanceInitialState,omitempty"` + // Source: Specifies a valid partial or full URL to an existing // Persistent Disk resource. When creating a new instance, one of // initializeParams.sourceImage or disks.source is required except for @@ -3105,11 +2638,28 @@ type AttachedDiskInitializeParams struct { // is the name of the disk type, not URL. DiskType string `json:"diskType,omitempty"` + // GuestOsFeatures: A list of features to enable on the guest operating + // system. Applicable only for bootable images. Read Enabling guest + // operating system features to see a list of available options. + // + // Guest OS features are applied by merging + // initializeParams.guestOsFeatures and disks.guestOsFeatures + GuestOsFeatures []*GuestOsFeature `json:"guestOsFeatures,omitempty"` + // Labels: Labels to apply to this disk. These can be later modified by // the disks.setLabels method. This field is only applicable for // persistent disks. Labels map[string]string `json:"labels,omitempty"` + // ReplicaZones: URLs of the zones where the disk should be replicated + // to. Only applicable for regional resources. + ReplicaZones []string `json:"replicaZones,omitempty"` + + // ResourcePolicies: Resource policies applied to this disk for + // automatic snapshot creations. Specified using the full or partial + // URL. For instance template, specify only the resource policy name. + ResourcePolicies []string `json:"resourcePolicies,omitempty"` + // SourceImage: The source image to create this disk. When creating a // new instance, one of initializeParams.sourceImage or disks.source is // required except for local SSD. @@ -3300,6 +2850,96 @@ func (s *AuditLogConfig) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// AuthenticationPolicy: The authentication settings for the backend +// service. +type AuthenticationPolicy struct { + // Origins: List of authentication methods that can be used for origin + // authentication. Similar to peers, these will be evaluated in order + // the first valid one will be used to set origin identity. If none of + // these methods pass, the request will be rejected with authentication + // failed error (401). Leave the list empty if origin authentication is + // not required. + Origins []*OriginAuthenticationMethod `json:"origins,omitempty"` + + // Peers: List of authentication methods that can be used for peer + // authentication. They will be evaluated in order the first valid one + // will be used to set peer identity. If none of these methods pass, the + // request will be rejected with authentication failed error (401). + // Leave the list empty if peer authentication is not required. + Peers []*PeerAuthenticationMethod `json:"peers,omitempty"` + + // PrincipalBinding: Define whether peer or origin identity should be + // used for principal. Default value is USE_PEER. If peer (or origin) + // identity is not available, either because peer/origin authentication + // is not defined, or failed, principal will be left unset. In other + // words, binding rule does not affect the decision to accept or reject + // request. This field can be set to one of the following: USE_PEER: + // Principal will be set to the identity from peer authentication. + // USE_ORIGIN: Principal will be set to the identity from origin + // authentication. + // + // Possible values: + // "INVALID" + // "USE_ORIGIN" + // "USE_PEER" + PrincipalBinding string `json:"principalBinding,omitempty"` + + // ServerTlsContext: Configures the mechanism to obtain server-side + // security certificates and identity information. + ServerTlsContext *TlsContext `json:"serverTlsContext,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Origins") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Origins") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AuthenticationPolicy) MarshalJSON() ([]byte, error) { + type NoMethod AuthenticationPolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// AuthorizationConfig: Authorization configuration provides +// service-level and method-level access control for a service. +type AuthorizationConfig struct { + // Policies: List of RbacPolicies. + Policies []*RbacPolicy `json:"policies,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Policies") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Policies") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AuthorizationConfig) MarshalJSON() ([]byte, error) { + type NoMethod AuthorizationConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // AuthorizationLoggingOptions: Authorization-related information used // by Cloud Audit Logging. type AuthorizationLoggingOptions struct { @@ -3393,6 +3033,10 @@ type Autoscaler struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` + // SelfLinkWithId: [Output Only] Server-defined URL for this resource + // with the resource id. + SelfLinkWithId string `json:"selfLinkWithId,omitempty"` + // Status: [Output Only] The status of the autoscaler configuration. // // Possible values: @@ -4190,7 +3834,7 @@ func (s *AutoscalingPolicyCustomMetricUtilization) UnmarshalJSON(data []byte) er // of autoscaling based on load balancing. type AutoscalingPolicyLoadBalancingUtilization struct { // UtilizationTarget: Fraction of backend capacity utilization (set in - // HTTP(s) load balancing configuration) that autoscaler should + // HTTP(S) load balancing configuration) that autoscaler should // maintain. Must be a positive float value. If not defined, the default // is 0.8. UtilizationTarget float64 `json:"utilizationTarget,omitempty"` @@ -4529,6 +4173,10 @@ type BackendBucket struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` + // SelfLinkWithId: [Output Only] Server-defined URL for this resource + // with the resource id. + SelfLinkWithId string `json:"selfLinkWithId,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -4776,12 +4424,40 @@ type BackendService struct { // CdnPolicy: Cloud CDN configuration for this BackendService. CdnPolicy *BackendServiceCdnPolicy `json:"cdnPolicy,omitempty"` + // CircuitBreakers: Settings controlling the volume of connections to a + // backend service. + // + // This field is applicable to either: + // - A regional backend service with the service_protocol set to HTTP, + // HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. + // + // - A global backend service with the load_balancing_scheme set to + // INTERNAL_SELF_MANAGED. + CircuitBreakers *CircuitBreakers `json:"circuitBreakers,omitempty"` + // CloudFunctionBackend: Directs request to a cloud function. // appEngineBackend and backends[] must be empty if this is set. CloudFunctionBackend *BackendServiceCloudFunctionBackend `json:"cloudFunctionBackend,omitempty"` ConnectionDraining *ConnectionDraining `json:"connectionDraining,omitempty"` + // ConsistentHash: Consistent Hash-based load balancing can be used to + // provide soft session affinity based on HTTP headers, cookies or other + // properties. This load balancing policy is applicable only for HTTP + // connections. The affinity to a particular destination host will be + // lost when one or more hosts are added/removed from the destination + // service. This field specifies parameters that control consistent + // hashing. This field is only applicable when localityLbPolicy is set + // to MAGLEV or RING_HASH. + // + // This field is applicable to either: + // - A regional backend service with the service_protocol set to HTTP, + // HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. + // + // - A global backend service with the load_balancing_scheme set to + // INTERNAL_SELF_MANAGED. + ConsistentHash *ConsistentHashLoadBalancerSettings `json:"consistentHash,omitempty"` + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -4845,6 +4521,44 @@ type BackendService struct { // "INVALID_LOAD_BALANCING_SCHEME" LoadBalancingScheme string `json:"loadBalancingScheme,omitempty"` + // LocalityLbPolicy: The load balancing algorithm used within the scope + // of the locality. The possible values are: + // - ROUND_ROBIN: This is a simple policy in which each healthy backend + // is selected in round robin order. This is the default. + // - LEAST_REQUEST: An O(1) algorithm which selects two random healthy + // hosts and picks the host which has fewer active requests. + // - RING_HASH: The ring/modulo hash load balancer implements consistent + // hashing to backends. The algorithm has the property that the + // addition/removal of a host from a set of N hosts only affects 1/N of + // the requests. + // - RANDOM: The load balancer selects a random healthy host. + // - ORIGINAL_DESTINATION: Backend host is selected based on the client + // connection metadata, i.e., connections are opened to the same address + // as the destination address of the incoming connection before the + // connection was redirected to the load balancer. + // - MAGLEV: used as a drop in replacement for the ring hash load + // balancer. Maglev is not as stable as ring hash but has faster table + // lookup build times and host selection times. For more information + // about Maglev, refer to https://ai.google/research/pubs/pub44824 + // + // + // This field is applicable to either: + // - A regional backend service with the service_protocol set to HTTP, + // HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. + // + // - A global backend service with the load_balancing_scheme set to + // INTERNAL_SELF_MANAGED. + // + // Possible values: + // "INVALID_LB_POLICY" + // "LEAST_REQUEST" + // "MAGLEV" + // "ORIGINAL_DESTINATION" + // "RANDOM" + // "RING_HASH" + // "ROUND_ROBIN" + LocalityLbPolicy string `json:"localityLbPolicy,omitempty"` + // LogConfig: This field denotes the logging options for the load // balancer traffic served by this backend service. If logging is // enabled, logs will be exported to Stackdriver. @@ -4859,6 +4573,15 @@ type BackendService struct { // last character, which cannot be a dash. Name string `json:"name,omitempty"` + // OutlierDetection: Settings controlling eviction of unhealthy hosts + // from the load balancing pool. This field is applicable to either: + // - A regional backend service with the service_protocol set to HTTP, + // HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. + // + // - A global backend service with the load_balancing_scheme set to + // INTERNAL_SELF_MANAGED. + OutlierDetection *OutlierDetection `json:"outlierDetection,omitempty"` + // Port: Deprecated in favor of portName. The TCP port to connect on the // backend. The default value is 80. // @@ -4900,9 +4623,23 @@ type BackendService struct { // policy associated with this backend service. SecurityPolicy string `json:"securityPolicy,omitempty"` + // SecuritySettings: This field specifies the security policy that + // applies to this backend service. This field is applicable to either: + // + // - A regional backend service with the service_protocol set to HTTP, + // HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. + // + // - A global backend service with the load_balancing_scheme set to + // INTERNAL_SELF_MANAGED. + SecuritySettings *SecuritySettings `json:"securitySettings,omitempty"` + // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` + // SelfLinkWithId: [Output Only] Server-defined URL for this resource + // with the resource id. + SelfLinkWithId string `json:"selfLinkWithId,omitempty"` + // SessionAffinity: Type of session affinity to use. The default is // NONE. // @@ -4919,6 +4656,8 @@ type BackendService struct { // "CLIENT_IP_PORT_PROTO" // "CLIENT_IP_PROTO" // "GENERATED_COOKIE" + // "HEADER_FIELD" + // "HTTP_COOKIE" // "NONE" SessionAffinity string `json:"sessionAffinity,omitempty"` @@ -5789,10 +5528,10 @@ func (s *BackendServicesScopedListWarningData) MarshalJSON() ([]byte, error) { // Binding: Associates `members` with a `role`. type Binding struct { - // Condition: Unimplemented. The condition that is associated with this - // binding. NOTE: an unsatisfied condition will not allow user access - // via current binding. Different bindings, including their conditions, - // are examined independently. + // Condition: The condition that is associated with this binding. NOTE: + // An unsatisfied condition will not allow user access via current + // binding. Different bindings, including their conditions, are examined + // independently. Condition *Expr `json:"condition,omitempty"` // Members: Specifies the identities requesting access for a Cloud @@ -5819,7 +5558,7 @@ type Binding struct { // // // - // * `domain:{domain}`: A Google Apps domain name that represents all + // * `domain:{domain}`: The G Suite domain (primary) that represents all // the users of that domain. For example, `google.com` or `example.com`. Members []string `json:"members,omitempty"` @@ -5933,6 +5672,210 @@ func (s *CacheKeyPolicy) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// CallCredentials: gRPC call credentials to access the SDS server. +type CallCredentials struct { + // AccessToken: The access token that is used as call credential for the + // SDS server. This field is used only if callCredentialType is + // ACCESS_TOKEN. + AccessToken string `json:"accessToken,omitempty"` + + // CallCredentialType: The type of call credentials to use for GRPC + // requests to the SDS server. This field can be set to one of the + // following: ACCESS_TOKEN: An access token is used as call credentials + // for the SDS server. GCE_VM: The local GCE VM service account + // credentials are used to access the SDS server. JWT_SERVICE_TOKEN: The + // user provisioned service account credentials are used to access the + // SDS server. FROM_PLUGIN: Custom authenticator credentials are used to + // access the SDS server. + // + // Possible values: + // "ACCESS_TOKEN" + // "FROM_PLUGIN" + // "GCE_VM" + // "INVALID" + // "JWT_SERVICE_ACCOUNT" + CallCredentialType string `json:"callCredentialType,omitempty"` + + // FromPlugin: Custom authenticator credentials. + FromPlugin *MetadataCredentialsFromPlugin `json:"fromPlugin,omitempty"` + + // JwtServiceAccount: This service account credentials are used as call + // credentials for the SDS server. This field is used only if + // callCredentialType is JWT_SERVICE_ACCOUNT. + JwtServiceAccount *ServiceAccountJwtAccessCredentials `json:"jwtServiceAccount,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AccessToken") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AccessToken") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *CallCredentials) MarshalJSON() ([]byte, error) { + type NoMethod CallCredentials + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ChannelCredentials: gRPC channel credentials to access the SDS +// server. +type ChannelCredentials struct { + // Certificates: The call credentials to access the SDS server. + Certificates *TlsCertificatePaths `json:"certificates,omitempty"` + + // ChannelCredentialType: The channel credentials to access the SDS + // server. This field can be set to one of the following: CERTIFICATES: + // Use TLS certificates to access the SDS server. GCE_VM: Use local GCE + // VM credentials to access the SDS server. + // + // Possible values: + // "CERTIFICATES" + // "GCE_VM" + // "INVALID" + ChannelCredentialType string `json:"channelCredentialType,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Certificates") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Certificates") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ChannelCredentials) MarshalJSON() ([]byte, error) { + type NoMethod ChannelCredentials + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// CircuitBreakers: Settings controlling the volume of connections to a +// backend service. +type CircuitBreakers struct { + // ConnectTimeout: The timeout for new network connections to hosts. + ConnectTimeout *Duration `json:"connectTimeout,omitempty"` + + // MaxConnections: The maximum number of connections to the backend + // cluster. If not specified, the default is 1024. + MaxConnections int64 `json:"maxConnections,omitempty"` + + // MaxPendingRequests: The maximum number of pending requests allowed to + // the backend cluster. If not specified, the default is 1024. + MaxPendingRequests int64 `json:"maxPendingRequests,omitempty"` + + // MaxRequests: The maximum number of parallel requests that allowed to + // the backend cluster. If not specified, the default is 1024. + MaxRequests int64 `json:"maxRequests,omitempty"` + + // MaxRequestsPerConnection: Maximum requests for a single backend + // connection. This parameter is respected by both the HTTP/1.1 and + // HTTP/2 implementations. If not specified, there is no limit. Setting + // this parameter to 1 will effectively disable keep alive. + MaxRequestsPerConnection int64 `json:"maxRequestsPerConnection,omitempty"` + + // MaxRetries: The maximum number of parallel retries allowed to the + // backend cluster. If not specified, the default is 3. + MaxRetries int64 `json:"maxRetries,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ConnectTimeout") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ConnectTimeout") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *CircuitBreakers) MarshalJSON() ([]byte, error) { + type NoMethod CircuitBreakers + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ClientTlsSettings: The client side authentication settings for +// connection originating from the backend service. +type ClientTlsSettings struct { + // ClientTlsContext: Configures the mechanism to obtain client-side + // security certificates and identity information. This field is only + // applicable when mode is set to MUTUAL. + ClientTlsContext *TlsContext `json:"clientTlsContext,omitempty"` + + // Mode: Indicates whether connections to this port should be secured + // using TLS. The value of this field determines how TLS is enforced. + // This can be set to one of the following values: DISABLE: Do not setup + // a TLS connection to the backends. SIMPLE: Originate a TLS connection + // to the backends. MUTUAL: Secure connections to the backends using + // mutual TLS by presenting client certificates for authentication. + // + // Possible values: + // "DISABLE" + // "INVALID" + // "MUTUAL" + // "SIMPLE" + Mode string `json:"mode,omitempty"` + + // Sni: SNI string to present to the server during TLS handshake. This + // field is applicable only when mode is SIMPLE or MUTUAL. + Sni string `json:"sni,omitempty"` + + // SubjectAltNames: A list of alternate names to verify the subject + // identity in the certificate.If specified, the proxy will verify that + // the server certificate's subject alt name matches one of the + // specified values. This field is applicable only when mode is SIMPLE + // or MUTUAL. + SubjectAltNames []string `json:"subjectAltNames,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ClientTlsContext") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ClientTlsContext") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *ClientTlsSettings) MarshalJSON() ([]byte, error) { + type NoMethod ClientTlsSettings + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Commitment: Represents a Commitment resource. Creating a Commitment // resource means that you are purchasing a committed use contract with // an explicit start and end time. You can create commitments based on @@ -5945,9 +5888,6 @@ func (s *CacheKeyPolicy) MarshalJSON() ([]byte, error) { // must purchase a new commitment to continue receiving discounts. (== // resource_for beta.commitments ==) (== resource_for v1.commitments ==) type Commitment struct { - // Allocations: List of allocations for this commitment. - Allocations []*Allocation `json:"allocations,omitempty"` - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -5991,6 +5931,9 @@ type Commitment struct { // used. Region string `json:"region,omitempty"` + // Reservations: List of reservations for this commitment. + Reservations []*Reservation `json:"reservations,omitempty"` + // Resources: A list of commitment amounts for particular resources. // Note that VCPU and MEMORY resource commitments must occur together. Resources []*ResourceCommitment `json:"resources,omitempty"` @@ -5998,6 +5941,10 @@ type Commitment struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` + // SelfLinkWithId: [Output Only] Server-defined URL for this resource + // with the resource id. + SelfLinkWithId string `json:"selfLinkWithId,omitempty"` + // StartTimestamp: [Output Only] Commitment start time in RFC3339 text // format. StartTimestamp string `json:"startTimestamp,omitempty"` @@ -6022,6 +5969,7 @@ type Commitment struct { // will only apply to memory optimized machines. // // Possible values: + // "COMPUTE_OPTIMIZED" // "GENERAL_PURPOSE" // "MEMORY_OPTIMIZED" // "TYPE_UNSPECIFIED" @@ -6031,20 +5979,21 @@ type Commitment struct { // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Allocations") to - // unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Allocations") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } @@ -6537,11 +6486,7 @@ type Condition struct { // "SERVICE" Sys string `json:"sys,omitempty"` - // Value: DEPRECATED. Use 'values' instead. - Value string `json:"value,omitempty"` - - // Values: The objects of the condition. This is mutually exclusive with - // 'value'. + // Values: The objects of the condition. Values []string `json:"values,omitempty"` // ForceSendFields is a list of field names (e.g. "Iam") to @@ -6598,12 +6543,163 @@ func (s *ConnectionDraining) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ConsistentHashLoadBalancerSettings: This message defines settings for +// a consistent hash style load balancer. +type ConsistentHashLoadBalancerSettings struct { + // HttpCookie: Hash is based on HTTP Cookie. This field describes a HTTP + // cookie that will be used as the hash key for the consistent hash load + // balancer. If the cookie is not present, it will be generated. This + // field is applicable if the sessionAffinity is set to HTTP_COOKIE. + HttpCookie *ConsistentHashLoadBalancerSettingsHttpCookie `json:"httpCookie,omitempty"` + + // HttpHeaderName: The hash based on the value of the specified header + // field. This field is applicable if the sessionAffinity is set to + // HEADER_FIELD. + HttpHeaderName string `json:"httpHeaderName,omitempty"` + + // MinimumRingSize: The minimum number of virtual nodes to use for the + // hash ring. Defaults to 1024. Larger ring sizes result in more + // granular load distributions. If the number of hosts in the load + // balancing pool is larger than the ring size, each host will be + // assigned a single virtual node. + MinimumRingSize int64 `json:"minimumRingSize,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. "HttpCookie") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "HttpCookie") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ConsistentHashLoadBalancerSettings) MarshalJSON() ([]byte, error) { + type NoMethod ConsistentHashLoadBalancerSettings + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ConsistentHashLoadBalancerSettingsHttpCookie: The information about +// the HTTP Cookie on which the hash function is based for load +// balancing policies that use a consistent hash. +type ConsistentHashLoadBalancerSettingsHttpCookie struct { + // Name: Name of the cookie. + Name string `json:"name,omitempty"` + + // Path: Path to set for the cookie. + Path string `json:"path,omitempty"` + + // Ttl: Lifetime of the cookie. + Ttl *Duration `json:"ttl,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Name") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Name") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ConsistentHashLoadBalancerSettingsHttpCookie) MarshalJSON() ([]byte, error) { + type NoMethod ConsistentHashLoadBalancerSettingsHttpCookie + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// CorsPolicy: The specification for allowing client side cross-origin +// requests. Please see W3C Recommendation for Cross Origin Resource +// Sharing +type CorsPolicy struct { + // AllowCredentials: In response to a preflight request, setting this to + // true indicates that the actual request can include user credentials. + // This translates to the Access-Control-Allow-Credentials + // header. + // Default is false. + AllowCredentials bool `json:"allowCredentials,omitempty"` + + // AllowHeaders: Specifies the content for the + // Access-Control-Allow-Headers header. + AllowHeaders []string `json:"allowHeaders,omitempty"` + + // AllowMethods: Specifies the content for the + // Access-Control-Allow-Methods header. + AllowMethods []string `json:"allowMethods,omitempty"` + + // AllowOriginRegexes: Specifies the regualar expression patterns that + // match allowed origins. For regular expression grammar please see + // en.cppreference.com/w/cpp/regex/ecmascript + // An origin is allowed if it matches either allow_origins or + // allow_origin_regex. + AllowOriginRegexes []string `json:"allowOriginRegexes,omitempty"` + + // AllowOrigins: Specifies the list of origins that will be allowed to + // do CORS requests. + // An origin is allowed if it matches either allow_origins or + // allow_origin_regex. + AllowOrigins []string `json:"allowOrigins,omitempty"` + + // Disabled: If true, specifies the CORS policy is disabled. The default + // value of false, which indicates that the CORS policy is in effect. + Disabled bool `json:"disabled,omitempty"` + + // ExposeHeaders: Specifies the content for the + // Access-Control-Expose-Headers header. + ExposeHeaders []string `json:"exposeHeaders,omitempty"` + + // MaxAge: Specifies how long the results of a preflight request can be + // cached. This translates to the content for the Access-Control-Max-Age + // header. + MaxAge int64 `json:"maxAge,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AllowCredentials") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AllowCredentials") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *CorsPolicy) MarshalJSON() ([]byte, error) { + type NoMethod CorsPolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // CustomerEncryptionKey: Represents a customer-supplied encryption key type CustomerEncryptionKey struct { // KmsKeyName: The name of the encryption key that is stored in Google // Cloud KMS. KmsKeyName string `json:"kmsKeyName,omitempty"` + KmsKeyServiceAccount string `json:"kmsKeyServiceAccount,omitempty"` + // RawKey: Specifies a 256-bit customer-supplied encryption key, encoded // in RFC 4648 base64 to either encrypt or decrypt this resource. RawKey string `json:"rawKey,omitempty"` @@ -6709,14 +6805,16 @@ type DeprecationStatus struct { // resource as the deprecated resource. Replacement string `json:"replacement,omitempty"` - // State: The deprecation state of this resource. This can be - // DEPRECATED, OBSOLETE, or DELETED. Operations which create a new - // resource using a DEPRECATED resource will return successfully, but - // with a warning indicating the deprecated resource and recommending - // its replacement. Operations which use OBSOLETE or DELETED resources - // will be rejected and result in an error. + // State: The deprecation state of this resource. This can be ACTIVE, + // DEPRECATED, OBSOLETE, or DELETED. Operations which communicate the + // end of life date for an image, can use ACTIVE. Operations which + // create a new resource using a DEPRECATED resource will return + // successfully, but with a warning indicating the deprecated resource + // and recommending its replacement. Operations which use OBSOLETE or + // DELETED resources will be rejected and result in an error. // // Possible values: + // "ACTIVE" // "DELETED" // "DEPRECATED" // "OBSOLETE" @@ -6761,8 +6859,8 @@ type Disk struct { // // After you encrypt a disk with a customer-supplied key, you must // provide the same key if you use the disk later (e.g. to create a disk - // snapshot or an image, or to attach the disk to a virtual - // machine). + // snapshot, to create a disk image, to create a machine image, or to + // attach the disk to a virtual machine). // // Customer-supplied encryption keys do not protect access to metadata // of the disk. @@ -6817,6 +6915,10 @@ type Disk struct { // use. Licenses []string `json:"licenses,omitempty"` + // MultiWriter: Indicates whether or not the disk can be read/write + // attached to more than one instance. + MultiWriter bool `json:"multiWriter,omitempty"` + // Name: Name of the resource. Provided by the client when the resource // is created. The name must be 1-63 characters long, and comply with // RFC1035. Specifically, the name must be 1-63 characters long and @@ -6865,6 +6967,23 @@ type Disk struct { // inclusive. SizeGb int64 `json:"sizeGb,omitempty,string"` + // SourceDisk: The source disk used to create this disk. You can provide + // this as a partial or full URL to the resource. For example, the + // following are valid values: + // - + // https://www.googleapis.com/compute/v1/projects/project/zones/zone/disks/disk + // - projects/project/zones/zone/disks/disk + // - zones/zone/disks/disk + SourceDisk string `json:"sourceDisk,omitempty"` + + // SourceDiskId: [Output Only] The unique ID of the disk used to create + // this disk. This value identifies the exact disk that was used to + // create this persistent disk. For example, if you created the + // persistent disk from a disk that was later deleted and recreated + // under the same name, the source disk ID would identify the exact + // version of the disk that was used. + SourceDiskId string `json:"sourceDiskId,omitempty"` + // SourceImage: The source image used to create this disk. If the source // image is deleted, this field will not be set. // @@ -6934,6 +7053,7 @@ type Disk struct { // // Possible values: // "CREATING" + // "DELETING" // "FAILED" // "READY" // "RESTORING" @@ -6948,11 +7068,11 @@ type Disk struct { // Type: URL of the disk type resource describing which disk type to use // to create the disk. Provide this when creating the disk. For example: - // project/zones/zone/diskTypes/pd-standard or pd-ssd + // projects/project/zones/zone/diskTypes/pd-standard or pd-ssd Type string `json:"type,omitempty"` // Users: [Output Only] Links to the users of the disk (attached - // instances) in form: project/zones/zone/instances/instance + // instances) in form: projects/project/zones/zone/instances/instance Users []string `json:"users,omitempty"` // Zone: [Output Only] URL of the zone where the disk resides. You must @@ -7444,6 +7564,10 @@ type DiskType struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` + // SelfLinkWithId: [Output Only] Server-defined URL for this resource + // with the resource id. + SelfLinkWithId string `json:"selfLinkWithId,omitempty"` + // ValidDiskSize: [Output Only] An optional textual description of the // valid disk size, such as "10GB-10TB". ValidDiskSize string `json:"validDiskSize,omitempty"` @@ -8226,6 +8350,45 @@ func (s *DistributionPolicyZoneConfiguration) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// Duration: A Duration represents a fixed-length span of time +// represented as a count of seconds and fractions of seconds at +// nanosecond resolution. It is independent of any calendar and concepts +// like "day" or "month". Range is approximately 10,000 years. +type Duration struct { + // Nanos: Span of time that's a fraction of a second at nanosecond + // resolution. Durations less than one second are represented with a 0 + // `seconds` field and a positive `nanos` field. Must be from 0 to + // 999,999,999 inclusive. + Nanos int64 `json:"nanos,omitempty"` + + // Seconds: Span of time at a resolution of a second. Must be from 0 to + // 315,576,000,000 inclusive. Note: these bounds are computed from: 60 + // sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + Seconds int64 `json:"seconds,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. "Nanos") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Nanos") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Duration) MarshalJSON() ([]byte, error) { + type NoMethod Duration + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type ExchangedPeeringRoute struct { // DestRange: The destination range of the route. DestRange string `json:"destRange,omitempty"` @@ -8478,6 +8641,329 @@ func (s *Expr) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ExternalVpnGateway: External VPN gateway is the on-premises VPN +// gateway(s) or another cloud provider?s VPN gateway that connects to +// your Google Cloud VPN gateway. To create a highly available VPN from +// Google Cloud to your on-premises side or another Cloud provider's VPN +// gateway, you must create a external VPN gateway resource in GCP, +// which provides the information to GCP about your external VPN +// gateway. +type ExternalVpnGateway struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Interfaces: List of interfaces for this external VPN gateway. + Interfaces []*ExternalVpnGatewayInterface `json:"interfaces,omitempty"` + + // Kind: [Output Only] Type of the resource. Always + // compute#externalVpnGateway for externalVpnGateways. + Kind string `json:"kind,omitempty"` + + // LabelFingerprint: A fingerprint for the labels being applied to this + // ExternalVpnGateway, which is essentially a hash of the labels set + // used for optimistic locking. The fingerprint is initially generated + // by Compute Engine and changes after every request to modify or update + // labels. You must always provide an up-to-date fingerprint hash in + // order to update or change labels, otherwise the request will fail + // with error 412 conditionNotMet. + // + // To see the latest fingerprint, make a get() request to retrieve an + // ExternalVpnGateway. + LabelFingerprint string `json:"labelFingerprint,omitempty"` + + // Labels: Labels to apply to this ExternalVpnGateway resource. These + // can be later modified by the setLabels method. Each label key/value + // must comply with RFC1035. Label values may be empty. + Labels map[string]string `json:"labels,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // RedundancyType: Indicates the user-supplied redundancy type of this + // external VPN gateway. + // + // Possible values: + // "FOUR_IPS_REDUNDANCY" + // "SINGLE_IP_INTERNALLY_REDUNDANT" + // "TWO_IPS_REDUNDANCY" + RedundancyType string `json:"redundancyType,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *ExternalVpnGateway) MarshalJSON() ([]byte, error) { + type NoMethod ExternalVpnGateway + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ExternalVpnGatewayInterface: The interface for the external VPN +// gateway. +type ExternalVpnGatewayInterface struct { + // Id: The numeric ID of this interface. The allowed input values for + // this id for different redundancy types of external VPN gateway: + // SINGLE_IP_INTERNALLY_REDUNDANT - 0 TWO_IPS_REDUNDANCY - 0, 1 + // FOUR_IPS_REDUNDANCY - 0, 1, 2, 3 + Id int64 `json:"id,omitempty"` + + // IpAddress: IP address of the interface in the external VPN gateway. + // Only IPv4 is supported. This IP address can be either from your + // on-premise gateway or another Cloud provider?s VPN gateway, it cannot + // be an IP address from Google Compute Engine. + IpAddress string `json:"ipAddress,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ExternalVpnGatewayInterface) MarshalJSON() ([]byte, error) { + type NoMethod ExternalVpnGatewayInterface + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ExternalVpnGatewayList: Response to the list request, and contains a +// list of externalVpnGateways. +type ExternalVpnGatewayList struct { + Etag string `json:"etag,omitempty"` + + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of ExternalVpnGateway resources. + Items []*ExternalVpnGateway `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always + // compute#externalVpnGatewayList for lists of externalVpnGateways. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *ExternalVpnGatewayListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Etag") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Etag") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ExternalVpnGatewayList) MarshalJSON() ([]byte, error) { + type NoMethod ExternalVpnGatewayList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ExternalVpnGatewayListWarning: [Output Only] Informational warning +// message. +type ExternalVpnGatewayListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*ExternalVpnGatewayListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ExternalVpnGatewayListWarning) MarshalJSON() ([]byte, error) { + type NoMethod ExternalVpnGatewayListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ExternalVpnGatewayListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ExternalVpnGatewayListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod ExternalVpnGatewayListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type FileContentBuffer struct { + // Content: The raw content in the secure keys file. + Content string `json:"content,omitempty"` + + // Possible values: + // "BIN" + // "UNDEFINED" + // "X509" + FileType string `json:"fileType,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Content") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Content") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *FileContentBuffer) MarshalJSON() ([]byte, error) { + type NoMethod FileContentBuffer + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Firewall: Represents a Firewall resource. type Firewall struct { // Allowed: The list of ALLOW rules specified by this firewall. Each @@ -8571,6 +9057,10 @@ type Firewall struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` + // SelfLinkWithId: [Output Only] Server-defined URL for this resource + // with the resource id. + SelfLinkWithId string `json:"selfLinkWithId,omitempty"` + // SourceRanges: If source ranges are specified, the firewall will apply // only to traffic that has source IP address in these ranges. These // ranges must be expressed in CIDR format. One or both of sourceRanges @@ -8888,6 +9378,15 @@ type FirewallLogConfig struct { // firewall rule. Enable bool `json:"enable,omitempty"` + // Metadata: This field can only be specified for a particular firewall + // rule if logging is enabled for that rule. This field denotes whether + // to include or exclude metadata for firewall logs. + // + // Possible values: + // "EXCLUDE_ALL_METADATA" + // "INCLUDE_ALL_METADATA" + Metadata string `json:"metadata,omitempty"` + // ForceSendFields is a list of field names (e.g. "Enable") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -8918,7 +9417,7 @@ type FixedOrPercent struct { // based on the specific mode. // // - // - If the value is fixed, then the caculated value is equal to the + // - If the value is fixed, then the calculated value is equal to the // fixed value. // - If the value is a percent, then the calculated value is percent/100 // * targetSize. For example, the calculated value of a 80% of a managed @@ -9114,6 +9613,23 @@ type ForwardingRule struct { // "INVALID" LoadBalancingScheme string `json:"loadBalancingScheme,omitempty"` + // MetadataFilters: Opaque filter criteria used by Loadbalancer to + // restrict routing configuration to a limited set xDS compliant + // clients. In their xDS requests to Loadbalancer, xDS clients present + // node metadata. If a match takes place, the relevant routing + // configuration is made available to those proxies. + // For each metadataFilter in this list, if its filterMatchCriteria is + // set to MATCH_ANY, at least one of the filterLabels must match the + // corresponding label provided in the metadata. If its + // filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels + // must match with corresponding labels in the provided + // metadata. + // metadataFilters specified here can be overridden by those specified + // in the UrlMap that this ForwardingRule references. + // metadataFilters only applies to Loadbalancers that have their + // loadBalancingScheme set to INTERNAL_SELF_MANAGED. + MetadataFilters []*MetadataFilter `json:"metadataFilters,omitempty"` + // Name: Name of the resource; provided by the client when the resource // is created. The name must be 1-63 characters long, and comply with // RFC1035. Specifically, the name must be 1-63 characters long and @@ -9188,6 +9704,10 @@ type ForwardingRule struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` + // SelfLinkWithId: [Output Only] Server-defined URL for this resource + // with the resource id. + SelfLinkWithId string `json:"selfLinkWithId,omitempty"` + // ServiceLabel: An optional prefix to the service name for this // Forwarding Rule. If specified, will be the first label of the fully // qualified service name. @@ -9224,8 +9744,8 @@ type ForwardingRule struct { // same region as the forwarding rule. For global forwarding rules, this // target must be a global load balancing resource. The forwarded // traffic must be of a type appropriate to the target object. For - // INTERNAL_SELF_MANAGED" load balancing, only HTTP and HTTPS targets - // are valid. + // INTERNAL_SELF_MANAGED load balancing, only HTTP and HTTPS targets are + // valid. Target string `json:"target,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -9726,6 +10246,62 @@ func (s *ForwardingRulesScopedListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type GlobalNetworkEndpointGroupsAttachEndpointsRequest struct { + // NetworkEndpoints: The list of network endpoints to be attached. + NetworkEndpoints []*NetworkEndpoint `json:"networkEndpoints,omitempty"` + + // ForceSendFields is a list of field names (e.g. "NetworkEndpoints") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NetworkEndpoints") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GlobalNetworkEndpointGroupsAttachEndpointsRequest) MarshalJSON() ([]byte, error) { + type NoMethod GlobalNetworkEndpointGroupsAttachEndpointsRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type GlobalNetworkEndpointGroupsDetachEndpointsRequest struct { + // NetworkEndpoints: The list of network endpoints to be detached. + NetworkEndpoints []*NetworkEndpoint `json:"networkEndpoints,omitempty"` + + // ForceSendFields is a list of field names (e.g. "NetworkEndpoints") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NetworkEndpoints") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GlobalNetworkEndpointGroupsDetachEndpointsRequest) MarshalJSON() ([]byte, error) { + type NoMethod GlobalNetworkEndpointGroupsDetachEndpointsRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type GlobalSetLabelsRequest struct { // LabelFingerprint: The fingerprint of the previous set of labels for // this resource, used to detect conflicts. The fingerprint is initially @@ -9808,6 +10384,41 @@ func (s *GlobalSetPolicyRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// GrpcServiceConfig: gRPC config to access the SDS server. +type GrpcServiceConfig struct { + // CallCredentials: The call credentials to access the SDS server. + CallCredentials *CallCredentials `json:"callCredentials,omitempty"` + + // ChannelCredentials: The channel credentials to access the SDS server. + ChannelCredentials *ChannelCredentials `json:"channelCredentials,omitempty"` + + // TargetUri: The target URI of the SDS server. + TargetUri string `json:"targetUri,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CallCredentials") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CallCredentials") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GrpcServiceConfig) MarshalJSON() ([]byte, error) { + type NoMethod GrpcServiceConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // GuestAttributes: A guest attributes entry. type GuestAttributes struct { // Kind: [Output Only] Type of the resource. Always @@ -9926,6 +10537,7 @@ type GuestOsFeature struct { // // Possible values: // "FEATURE_TYPE_UNSPECIFIED" + // "GVNIC" // "MULTI_IP_SUBNET" // "SECURE_BOOT" // "UEFI_COMPATIBLE" @@ -10251,6 +10863,10 @@ type HealthCheck struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` + // SelfLinkWithId: [Output Only] Server-defined URL for this resource + // with the resource id. + SelfLinkWithId string `json:"selfLinkWithId,omitempty"` + SslHealthCheck *SSLHealthCheck `json:"sslHealthCheck,omitempty"` TcpHealthCheck *TCPHealthCheck `json:"tcpHealthCheck,omitempty"` @@ -10260,10 +10876,10 @@ type HealthCheck struct { // greater value than checkIntervalSec. TimeoutSec int64 `json:"timeoutSec,omitempty"` - // Type: Specifies the type of the healthCheck, either TCP, SSL, HTTP or - // HTTPS. If not specified, the default is TCP. Exactly one of the - // protocol-specific health check field must be specified, which must - // match type field. + // Type: Specifies the type of the healthCheck, either TCP, SSL, HTTP, + // HTTPS or HTTP2. If not specified, the default is TCP. Exactly one of + // the protocol-specific health check field must be specified, which + // must match type field. // // Possible values: // "HTTP" @@ -10496,138 +11112,145 @@ func (s *HealthCheckReference) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type HealthChecksAggregatedList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` +// HealthCheckService: A HealthCheckService defines a set of backends on +// which to perform periodic health checks and an endpoint to which to +// send notification of changes in the health status of the backends. +type HealthCheckService struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` - // Items: A list of HealthChecksScopedList resources. - Items map[string]HealthChecksScopedList `json:"items,omitempty"` + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` - // Kind: Type of resource. + // HealthChecks: List of URLs to the HealthCheck resources. Must have at + // least one HealthCheck, and not more than 10. HealthCheck resources + // must have portSpecification=USE_SERVING_PORT. For regional + // HealthCheckService, the HealthCheck must be regional and in the same + // region. For global HealthCheckService, HealthCheck must be global. + // Mix of regional and global HealthChecks is not supported. Multiple + // regional HealthChecks must belong to the same region. Regional + // HealthChecks 0. + NumRetries int64 `json:"numRetries,omitempty"` + + // PerTryTimeout: Specifies a non-zero timeout per retry attempt. + PerTryTimeout *Duration `json:"perTryTimeout,omitempty"` + + // RetryConditions: Specfies one or more conditions when this retry rule + // applies. Valid values are: + // - 5xx: Loadbalancer will attempt a retry if the backend service + // responds with any 5xx response code, or if the backend service does + // not respond at all, example: disconnects, reset, read timeout, + // connection failure, and refused streams. + // - gateway-error: Similar to 5xx, but only applies to response codes + // 502, 503 or 504. + // - + // - connect-failure: Loadbalancer will retry on failures connecting to + // backend services, for example due to connection timeouts. + // - retriable-4xx: Loadbalancer will retry for retriable 4xx response + // codes. Currently the only retriable error supported is 409. + // - refused-stream:Loadbalancer will retry if the backend service + // resets the stream with a REFUSED_STREAM error code. This reset type + // indicates that it is safe to retry. + // - cancelledLoadbalancer will retry if the gRPC status code in the + // response header is set to cancelled + // - deadline-exceeded: Loadbalancer will retry if the gRPC status code + // in the response header is set to deadline-exceeded + // - resource-exhausted: Loadbalancer will retry if the gRPC status code + // in the response header is set to resource-exhausted + // - unavailable: Loadbalancer will retry if the gRPC status code in the + // response header is set to unavailable + RetryConditions []string `json:"retryConditions,omitempty"` + + // ForceSendFields is a list of field names (e.g. "NumRetries") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NumRetries") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HttpRetryPolicy) MarshalJSON() ([]byte, error) { + type NoMethod HttpRetryPolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type HttpRouteAction struct { + // CorsPolicy: The specification for allowing client side cross-origin + // requests. Please see W3C Recommendation for Cross Origin Resource + // Sharing + CorsPolicy *CorsPolicy `json:"corsPolicy,omitempty"` + + // FaultInjectionPolicy: The specification for fault injection + // introduced into traffic to test the resiliency of clients to backend + // service failure. As part of fault injection, when clients send + // requests to a backend service, delays can be introduced by + // Loadbalancer on a percentage of requests before sending those request + // to the backend service. Similarly requests from clients can be + // aborted by the Loadbalancer for a percentage of requests. + // timeout and retry_policy will be ignored by clients that are + // configured with a fault_injection_policy. + FaultInjectionPolicy *HttpFaultInjection `json:"faultInjectionPolicy,omitempty"` + + // RequestMirrorPolicy: Specifies the policy on how requests intended + // for the route's backends are shadowed to a separate mirrored backend + // service. Loadbalancer does not wait for responses from the shadow + // service. Prior to sending traffic to the shadow service, the host / + // authority header is suffixed with -shadow. + RequestMirrorPolicy *RequestMirrorPolicy `json:"requestMirrorPolicy,omitempty"` + + // RetryPolicy: Specifies the retry policy associated with this route. + RetryPolicy *HttpRetryPolicy `json:"retryPolicy,omitempty"` + + // Timeout: Specifies the timeout for the selected route. Timeout is + // computed from the time the request is has been fully processed (i.e. + // end-of-stream) up until the response has been completely processed. + // Timeout includes all retries. + // If not specified, the default value is 15 seconds. + Timeout *Duration `json:"timeout,omitempty"` + + // UrlRewrite: The spec to modify the URL of the request, prior to + // forwarding the request to the matched service + UrlRewrite *UrlRewrite `json:"urlRewrite,omitempty"` + + // WeightedBackendServices: A list of weighted backend services to send + // traffic to when a route match occurs. The weights determine the + // fraction of traffic that flows to their corresponding backend + // service. If all traffic needs to go to a single backend service, + // there must be one weightedBackendService with weight set to a non 0 + // number. + // Once a backendService is identified and before forwarding the request + // to the backend service, advanced routing actions like Url rewrites + // and header transformations are applied depending on additional + // settings specified in this HttpRouteAction. + WeightedBackendServices []*WeightedBackendService `json:"weightedBackendServices,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CorsPolicy") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CorsPolicy") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HttpRouteAction) MarshalJSON() ([]byte, error) { + type NoMethod HttpRouteAction + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// HttpRouteRule: An HttpRouteRule specifies how to match an HTTP +// request and the corresponding routing action that load balancing +// proxies will perform. +type HttpRouteRule struct { + // HeaderAction: Specifies changes to request and response headers that + // need to take effect for the selected backendService. + // The headerAction specified here are applied before the matching + // pathMatchers[].headerAction and after + // pathMatchers[].routeRules[].routeAction.weightedBackendService.backend + // ServiceWeightAction[].headerAction + HeaderAction *HttpHeaderAction `json:"headerAction,omitempty"` + + MatchRules []*HttpRouteRuleMatch `json:"matchRules,omitempty"` + + // RouteAction: In response to a matching matchRule, the load balancer + // performs advanced routing actions like URL rewrites, header + // transformations, etc. prior to forwarding the request to the selected + // backend. If routeAction specifies any weightedBackendServices, + // service must not be set. Conversely if service is set, routeAction + // cannot contain any weightedBackendServices. + // Only one of routeAction or urlRedirect must be set. + RouteAction *HttpRouteAction `json:"routeAction,omitempty"` + + // Service: The full or partial URL of the backend service resource to + // which traffic is directed if this rule is matched. If routeAction is + // additionally specified, advanced routing actions like URL Rewrites, + // etc. take effect prior to sending the request to the backend. + // However, if service is specified, routeAction cannot contain any + // weightedBackendService s. Conversely, if routeAction specifies any + // weightedBackendServices, service must not be specified. + // Only one of urlRedirect, service or + // routeAction.weightedBackendService must be set. + Service string `json:"service,omitempty"` + + // UrlRedirect: When this rule is matched, the request is redirected to + // a URL specified by urlRedirect. + // If urlRedirect is specified, service or routeAction must not be set. + UrlRedirect *HttpRedirectAction `json:"urlRedirect,omitempty"` + + // ForceSendFields is a list of field names (e.g. "HeaderAction") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "HeaderAction") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HttpRouteRule) MarshalJSON() ([]byte, error) { + type NoMethod HttpRouteRule + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// HttpRouteRuleMatch: HttpRouteRuleMatch specifies a set of criteria +// for matching requests to an HttpRouteRule. All specified criteria +// must be satisfied for a match to occur. +type HttpRouteRuleMatch struct { + // FullPathMatch: For satifying the matchRule condition, the path of the + // request must exactly match the value specified in fullPathMatch after + // removing any query parameters and anchor that may be part of the + // original URL. + // FullPathMatch must be between 1 and 1024 characters. + // Only one of prefixMatch, fullPathMatch or regexMatch must be + // specified. + FullPathMatch string `json:"fullPathMatch,omitempty"` + + // HeaderMatches: Specifies a list of header match criteria, all of + // which must match corresponding headers in the request. + HeaderMatches []*HttpHeaderMatch `json:"headerMatches,omitempty"` + + // IgnoreCase: Specifies that prefixMatch and fullPathMatch matches are + // case sensitive. + // The default value is false. + // caseSensitive must not be used with regexMatch. + IgnoreCase bool `json:"ignoreCase,omitempty"` + + // MetadataFilters: Opaque filter criteria used by Loadbalancer to + // restrict routing configuration to a limited set xDS compliant + // clients. In their xDS requests to Loadbalancer, xDS clients present + // node metadata. If a match takes place, the relevant routing + // configuration is made available to those proxies. + // For each metadataFilter in this list, if its filterMatchCriteria is + // set to MATCH_ANY, at least one of the filterLabels must match the + // corresponding label provided in the metadata. If its + // filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels + // must match with corresponding labels in the provided + // metadata. + // metadataFilters specified here can be overrides those specified in + // ForwardingRule that refers to this UrlMap. + // metadataFilters only applies to Loadbalancers that have their + // loadBalancingScheme set to INTERNAL_SELF_MANAGED. + MetadataFilters []*MetadataFilter `json:"metadataFilters,omitempty"` + + // PrefixMatch: For satifying the matchRule condition, the request's + // path must begin with the specified prefixMatch. prefixMatch must + // begin with a /. + // The value must be between 1 and 1024 characters. + // Only one of prefixMatch, fullPathMatch or regexMatch must be + // specified. + PrefixMatch string `json:"prefixMatch,omitempty"` + + // QueryParameterMatches: Specifies a list of query parameter match + // criteria, all of which must match corresponding query parameters in + // the request. + QueryParameterMatches []*HttpQueryParameterMatch `json:"queryParameterMatches,omitempty"` + + // RegexMatch: For satifying the matchRule condition, the path of the + // request must satisfy the regular expression specified in regexMatch + // after removing any query parameters and anchor supplied with the + // original URL. For regular expression grammar please see + // en.cppreference.com/w/cpp/regex/ecmascript + // Only one of prefixMatch, fullPathMatch or regexMatch must be + // specified. + RegexMatch string `json:"regexMatch,omitempty"` + + // ForceSendFields is a list of field names (e.g. "FullPathMatch") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "FullPathMatch") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HttpRouteRuleMatch) MarshalJSON() ([]byte, error) { + type NoMethod HttpRouteRuleMatch + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // HttpsHealthCheck: An HttpsHealthCheck resource. This resource defines // a template for how individual instances should be checked for health, // via HTTPS. @@ -11211,6 +12803,10 @@ type HttpsHealthCheck struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` + // SelfLinkWithId: [Output Only] Server-defined URL for this resource + // with the resource id. + SelfLinkWithId string `json:"selfLinkWithId,omitempty"` + // TimeoutSec: How long (in seconds) to wait before claiming failure. // The default value is 5 seconds. It is invalid for timeoutSec to have // a greater value than checkIntervalSec. @@ -11499,6 +13095,14 @@ type Image struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` + // SelfLinkWithId: [Output Only] Server-defined URL for this resource's + // resource id. + SelfLinkWithId string `json:"selfLinkWithId,omitempty"` + + // ShieldedInstanceInitialState: Set the secure boot keys of shielded + // instance. + ShieldedInstanceInitialState *InitialStateConfig `json:"shieldedInstanceInitialState,omitempty"` + // SourceDisk: URL of the source disk used to create this image. This // can be a full or valid partial URL. You must provide either this // property or the rawDisk.source property but not both to create an @@ -11570,11 +13174,16 @@ type Image struct { // Possible values are FAILED, PENDING, or READY. // // Possible values: + // "DELETING" // "FAILED" // "PENDING" // "READY" Status string `json:"status,omitempty"` + // StorageLocations: GCS bucket storage location of the image (regional + // or multi-regional). + StorageLocations []string `json:"storageLocations,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -11614,8 +13223,9 @@ type ImageRawDisk struct { // "TAR" ContainerType string `json:"containerType,omitempty"` - // Sha1Checksum: An optional SHA1 checksum of the disk image before - // unpackaging; provided by the client when the disk image is created. + // Sha1Checksum: [Deprecated] This field is deprecated. An optional SHA1 + // checksum of the disk image before unpackaging provided by the client + // when the disk image is created. Sha1Checksum string `json:"sha1Checksum,omitempty"` // Source: The full Google Cloud Storage URL where the disk image is @@ -11800,13 +13410,47 @@ func (s *ImageListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// InitialStateConfig: Initial State for shielded instance, these are +// public keys which are safe to store in public +type InitialStateConfig struct { + // Dbs: The Key Database (db). + Dbs []*FileContentBuffer `json:"dbs,omitempty"` + + // Dbxs: The forbidden key database (dbx). + Dbxs []*FileContentBuffer `json:"dbxs,omitempty"` + + // Keks: The Key Exchange Key (KEK). + Keks []*FileContentBuffer `json:"keks,omitempty"` + + // Pk: The Platform Key (PK). + Pk *FileContentBuffer `json:"pk,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Dbs") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Dbs") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InitialStateConfig) MarshalJSON() ([]byte, error) { + type NoMethod InitialStateConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Instance: An Instance resource. (== resource_for beta.instances ==) // (== resource_for v1.instances ==) type Instance struct { - // AllocationAffinity: The configuration of desired allocations which - // this Instance could consume capacity from. - AllocationAffinity *AllocationAffinity `json:"allocationAffinity,omitempty"` - // CanIpForward: Allows this instance to send and receive packets with // non-matching destination or source IPs. This is required if you plan // to use this instance to forward routes. For more information, see @@ -11938,6 +13582,10 @@ type Instance struct { // instances. Read-only in the api. PreservedStateSizeGb int64 `json:"preservedStateSizeGb,omitempty,string"` + // ReservationAffinity: The configuration of desired reservations from + // which this Instance can consume capacity from. + ReservationAffinity *ReservationAffinity `json:"reservationAffinity,omitempty"` + // ResourcePolicies: Resource policies applied to this instance. ResourcePolicies []string `json:"resourcePolicies,omitempty"` @@ -11947,6 +13595,10 @@ type Instance struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // SelfLinkWithId: [Output Only] Server-defined URL for this resource + // with the resource id. + SelfLinkWithId string `json:"selfLinkWithId,omitempty"` + // ServiceAccounts: A list of service accounts, with their specified // scopes, authorized for this instance. Only one service account per VM // instance is supported. @@ -11956,6 +13608,10 @@ type Instance struct { // instance. See Service Accounts for more information. ServiceAccounts []*ServiceAccount `json:"serviceAccounts,omitempty"` + ShieldedInstanceConfig *ShieldedInstanceConfig `json:"shieldedInstanceConfig,omitempty"` + + ShieldedInstanceIntegrityPolicy *ShieldedInstanceIntegrityPolicy `json:"shieldedInstanceIntegrityPolicy,omitempty"` + ShieldedVmConfig *ShieldedVmConfig `json:"shieldedVmConfig,omitempty"` ShieldedVmIntegrityPolicy *ShieldedVmIntegrityPolicy `json:"shieldedVmIntegrityPolicy,omitempty"` @@ -11963,6 +13619,10 @@ type Instance struct { // SourceMachineImage: Source machine image SourceMachineImage string `json:"sourceMachineImage,omitempty"` + // SourceMachineImageEncryptionKey: Source GMI encryption key when + // creating an instance from GMI. + SourceMachineImageEncryptionKey *CustomerEncryptionKey `json:"sourceMachineImageEncryptionKey,omitempty"` + // StartRestricted: [Output Only] Whether a VM has been restricted for // start because Compute Engine has detected suspicious activity. StartRestricted bool `json:"startRestricted,omitempty"` @@ -11973,6 +13633,7 @@ type Instance struct { // // Possible values: // "PROVISIONING" + // "REPAIRING" // "RUNNING" // "STAGING" // "STOPPED" @@ -12002,21 +13663,20 @@ type Instance struct { // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "AllocationAffinity") - // to unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "CanIpForward") to + // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AllocationAffinity") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "CanIpForward") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } @@ -12235,6 +13895,10 @@ type InstanceGroup struct { // generates this URL. SelfLink string `json:"selfLink,omitempty"` + // SelfLinkWithId: [Output Only] Server-defined URL for this resource + // with the resource id. + SelfLinkWithId string `json:"selfLinkWithId,omitempty"` + // Size: [Output Only] The total number of instances in the instance // group. Size int64 `json:"size,omitempty"` @@ -12662,9 +14326,12 @@ type InstanceGroupManager struct { // complementary to this Instance Group Manager. NamedPorts []*NamedPort `json:"namedPorts,omitempty"` - // PendingActions: [Output Only] The list of instance actions and the - // number of instances in this managed instance group that are pending - // for each of those actions. + // PendingActions: [Deprecated] This field is deprecated and will be + // removed. Prefer using the status field instead. Please contact + // cloud-updater-feedback@google.com to leave feedback if your workload + // relies on this field. [Output Only] The list of instance actions and + // the number of instances in this managed instance group that are + // pending for each of those actions. PendingActions *InstanceGroupManagerPendingActionsSummary `json:"pendingActions,omitempty"` // Region: [Output Only] The URL of the region where the managed @@ -12675,6 +14342,10 @@ type InstanceGroupManager struct { // server defines this URL. SelfLink string `json:"selfLink,omitempty"` + // SelfLinkWithId: [Output Only] Server-defined URL for this resource + // with the resource id. + SelfLinkWithId string `json:"selfLinkWithId,omitempty"` + // ServiceAccount: The service account to be used as credentials for all // operations performed by the managed instance group on instances. The // service accounts needs all permissions required to create and delete @@ -12705,12 +14376,12 @@ type InstanceGroupManager struct { // Versions: Specifies the instance templates used by this managed // instance group to create instances. // - // Each version is defined by an instanceTemplate. Every template can - // appear at most once per instance group. This field overrides the - // top-level instanceTemplate field. Read more about the relationships - // between these fields. Exactly one version must leave the targetSize - // field unset. That version will be applied to all remaining instances. - // For more information, read about canary updates. + // Each version is defined by an instanceTemplate and a name. Every + // version can appear at most once per instance group. This field + // overrides the top-level instanceTemplate field. Read more about the + // relationships between these fields. Exactly one version must leave + // the targetSize field unset. That version will be applied to all + // remaining instances. For more information, read about canary updates. Versions []*InstanceGroupManagerVersion `json:"versions,omitempty"` // Zone: [Output Only] The URL of the zone where the managed instance @@ -12995,10 +14666,19 @@ type InstanceGroupManagerAutoHealingPolicy struct { InitialDelaySec int64 `json:"initialDelaySec,omitempty"` // MaxUnavailable: Maximum number of instances that can be unavailable - // when autohealing. The instance is considered available if all of the - // following conditions are satisfied: 1. Instance's status is RUNNING. - // 2. Instance's liveness health check result was observed to be HEALTHY - // at least once. By default, a percent value of 100% is used. + // when autohealing. When 'percent' is used, the value is rounded UP. + // The instance is considered available if all of the following + // conditions are satisfied: 1. Instance's status is RUNNING. 2. + // Instance's currentAction is NONE (in particular its liveness health + // check result was observed to be HEALTHY at least once as it passed + // VERIFYING). 3. There is no outgoing action on an instance triggered + // by IGM. + // + // By default, number of concurrently autohealed instances is smaller + // than the managed instance group target size. However, if a zonal + // managed instance group has only one instance, or a regional managed + // instance group has only one instance per zone, autohealing will + // recreate these instances when they become unhealthy. MaxUnavailable *FixedOrPercent `json:"maxUnavailable,omitempty"` // ForceSendFields is a list of field names (e.g. "HealthCheck") to @@ -13183,20 +14863,32 @@ func (s *InstanceGroupManagerListWarningData) MarshalJSON() ([]byte, error) { } type InstanceGroupManagerPendingActionsSummary struct { - // Creating: [Output Only] The number of instances in the managed - // instance group that are pending to be created. + // Creating: [Deprecated] This field is deprecated and will be removed. + // Prefer using the status field instead. Please contact + // cloud-updater-feedback@google.com to leave feedback if your workload + // relies on this field. [Output Only] The number of instances in the + // managed instance group that are pending to be created. Creating int64 `json:"creating,omitempty"` - // Deleting: [Output Only] The number of instances in the managed - // instance group that are pending to be deleted. + // Deleting: [Deprecated] This field is deprecated and will be removed. + // Prefer using the status field instead. Please contact + // cloud-updater-feedback@google.com to leave feedback if your workload + // relies on this field. [Output Only] The number of instances in the + // managed instance group that are pending to be deleted. Deleting int64 `json:"deleting,omitempty"` - // Recreating: [Output Only] The number of instances in the managed - // instance group that are pending to be recreated. + // Recreating: [Deprecated] This field is deprecated and will be + // removed. Prefer using the status field instead. Please contact + // cloud-updater-feedback@google.com to leave feedback if your workload + // relies on this field. [Output Only] The number of instances in the + // managed instance group that are pending to be recreated. Recreating int64 `json:"recreating,omitempty"` - // Restarting: [Output Only] The number of instances in the managed - // instance group that are pending to be restarted. + // Restarting: [Deprecated] This field is deprecated and will be + // removed. Prefer using the status field instead. Please contact + // cloud-updater-feedback@google.com to leave feedback if your workload + // relies on this field. [Output Only] The number of instances in the + // managed instance group that are pending to be restarted. Restarting int64 `json:"restarting,omitempty"` // ForceSendFields is a list of field names (e.g. "Creating") to @@ -13231,6 +14923,10 @@ type InstanceGroupManagerStatus struct { // group; and the managed instance group itself is not being modified. IsStable bool `json:"isStable,omitempty"` + // Stateful: [Output Only] Stateful status of the given Instance Group + // Manager. + Stateful *InstanceGroupManagerStatusStateful `json:"stateful,omitempty"` + // VersionTarget: [Output Only] A status of consistency of Instances' // versions with their target version specified by version field on // Instance Group Manager. @@ -13259,6 +14955,37 @@ func (s *InstanceGroupManagerStatus) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type InstanceGroupManagerStatusStateful struct { + // IsStateful: [Output Only] A bit indicating whether the managed + // instance group is stateful, i.e. has any disks in Stateful Policy or + // at least one per-instance config. This is determined based on the + // user intent, the group may be reported as not stateful even when + // there is still some preserved state on managed instances. + IsStateful bool `json:"isStateful,omitempty"` + + // ForceSendFields is a list of field names (e.g. "IsStateful") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "IsStateful") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupManagerStatusStateful) MarshalJSON() ([]byte, error) { + type NoMethod InstanceGroupManagerStatusStateful + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type InstanceGroupManagerStatusVersionTarget struct { // IsReached: [Output Only] A bit indicating whether version target has // been reached in this managed instance group, i.e. all instances are @@ -13345,6 +15072,14 @@ type InstanceGroupManagerUpdatePolicy struct { // "RESTART" MinimalAction string `json:"minimalAction,omitempty"` + // ReplacementMethod: What action should be used to replace instances. + // See minimal_action.REPLACE + // + // Possible values: + // "RECREATE" + // "SUBSTITUTE" + ReplacementMethod string `json:"replacementMethod,omitempty"` + // Possible values: // "OPPORTUNISTIC" // "PROACTIVE" @@ -13376,6 +15111,10 @@ func (s *InstanceGroupManagerUpdatePolicy) MarshalJSON() ([]byte, error) { } type InstanceGroupManagerVersion struct { + // InstanceTemplate: The URL of the instance template that is specified + // for this managed instance group. The group uses this template to + // create new instances in the managed instance group until the + // `targetSize` for this version is reached. InstanceTemplate string `json:"instanceTemplate,omitempty"` // Name: Name of the version. Unique among all versions in the scope of @@ -13461,8 +15200,9 @@ type InstanceGroupManagersApplyUpdatesRequest struct { // zones/[ZONE]/instances/[INSTANCE_NAME]. Instances []string `json:"instances,omitempty"` - // MaximalAction: The maximal action that should be perfomed on the - // instances. By default REPLACE. + // MaximalAction: The maximal action that should be performed on the + // instances. By default REPLACE. This field is deprecated, please use + // most_disruptive_allowed_action. // // Possible values: // "NONE" @@ -13481,6 +15221,16 @@ type InstanceGroupManagersApplyUpdatesRequest struct { // "RESTART" MinimalAction string `json:"minimalAction,omitempty"` + // MostDisruptiveAllowedAction: The most disruptive action that allowed + // to be performed on the instances. By default REPLACE. + // + // Possible values: + // "NONE" + // "REFRESH" + // "REPLACE" + // "RESTART" + MostDisruptiveAllowedAction string `json:"mostDisruptiveAllowedAction,omitempty"` + // ForceSendFields is a list of field names (e.g. "Instances") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -13781,6 +15531,37 @@ func (s *InstanceGroupManagersListPerInstanceConfigsRespWarningData) MarshalJSON return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// InstanceGroupManagersPatchPerInstanceConfigsReq: +// InstanceGroupManagers.patchPerInstanceConfigs +type InstanceGroupManagersPatchPerInstanceConfigsReq struct { + // PerInstanceConfigs: The list of per-instance configs to insert or + // patch on this managed instance group. + PerInstanceConfigs []*PerInstanceConfig `json:"perInstanceConfigs,omitempty"` + + // ForceSendFields is a list of field names (e.g. "PerInstanceConfigs") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "PerInstanceConfigs") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupManagersPatchPerInstanceConfigsReq) MarshalJSON() ([]byte, error) { + type NoMethod InstanceGroupManagersPatchPerInstanceConfigsReq + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type InstanceGroupManagersRecreateInstancesRequest struct { // Instances: The URLs of one or more instances to recreate. This can be // a full URL or a partial URL, such as @@ -14900,10 +16681,6 @@ func (s *InstanceMoveRequest) MarshalJSON() ([]byte, error) { } type InstanceProperties struct { - // AllocationAffinity: The configuration of desired allocations which - // this Instance could consume capacity from. - AllocationAffinity *AllocationAffinity `json:"allocationAffinity,omitempty"` - // CanIpForward: Enables instances created based on this template to // send packets with source IP addresses other than their own and // receive packets with destination IP addresses other than their own. @@ -14921,6 +16698,10 @@ type InstanceProperties struct { // are created from this template. Disks []*AttachedDisk `json:"disks,omitempty"` + // DisplayDevice: Display Device properties to enable support for remote + // display products like: Teradici, VNC and TeamViewer + DisplayDevice *DisplayDevice `json:"displayDevice,omitempty"` + // GuestAccelerators: A list of guest accelerator cards' type and count // to use for instances created from the instance template. GuestAccelerators []*AcceleratorConfig `json:"guestAccelerators,omitempty"` @@ -14951,6 +16732,10 @@ type InstanceProperties struct { // interface. NetworkInterfaces []*NetworkInterface `json:"networkInterfaces,omitempty"` + // ReservationAffinity: The configuration of desired reservations which + // this Instance could consume capacity from. + ReservationAffinity *ReservationAffinity `json:"reservationAffinity,omitempty"` + // Scheduling: Specifies the scheduling options for the instances that // are created from this template. Scheduling *Scheduling `json:"scheduling,omitempty"` @@ -14961,6 +16746,8 @@ type InstanceProperties struct { // to obtain the access tokens for these instances. ServiceAccounts []*ServiceAccount `json:"serviceAccounts,omitempty"` + ShieldedInstanceConfig *ShieldedInstanceConfig `json:"shieldedInstanceConfig,omitempty"` + // ShieldedVmConfig: Specifies the Shielded VM options for the instances // that are created from this template. ShieldedVmConfig *ShieldedVmConfig `json:"shieldedVmConfig,omitempty"` @@ -14971,21 +16758,20 @@ type InstanceProperties struct { // within the list must comply with RFC1035. Tags *Tags `json:"tags,omitempty"` - // ForceSendFields is a list of field names (e.g. "AllocationAffinity") - // to unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "CanIpForward") to + // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AllocationAffinity") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "CanIpForward") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } @@ -15057,6 +16843,10 @@ type InstanceTemplate struct { // server defines this URL. SelfLink string `json:"selfLink,omitempty"` + // SelfLinkWithId: [Output Only] Server-defined URL for this resource + // with the resource id. + SelfLinkWithId string `json:"selfLinkWithId,omitempty"` + // SourceInstance: The source instance used to create the template. You // can provide this as a partial or full URL to the resource. For // example, the following are valid values: @@ -15265,6 +17055,7 @@ type InstanceWithNamedPorts struct { // // Possible values: // "PROVISIONING" + // "REPAIRING" // "RUNNING" // "STAGING" // "STOPPED" @@ -15325,6 +17116,75 @@ func (s *InstancesAddResourcePoliciesRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type InstancesGetEffectiveFirewallsResponse struct { + // Firewalls: Effective firewalls on the instance. + Firewalls []*Firewall `json:"firewalls,omitempty"` + + // OrganizationFirewalls: Effective firewalls from organization + // policies. + OrganizationFirewalls []*InstancesGetEffectiveFirewallsResponseOrganizationFirewallPolicy `json:"organizationFirewalls,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Firewalls") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Firewalls") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstancesGetEffectiveFirewallsResponse) MarshalJSON() ([]byte, error) { + type NoMethod InstancesGetEffectiveFirewallsResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InstancesGetEffectiveFirewallsResponseOrganizationFirewallPolicy: A +// pruned SecurityPolicy containing ID and any applicable firewall +// rules. +type InstancesGetEffectiveFirewallsResponseOrganizationFirewallPolicy struct { + // Id: The unique identifier for the security policy. This identifier is + // defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Rules: The rules that apply to the network. + Rules []*SecurityPolicyRule `json:"rules,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstancesGetEffectiveFirewallsResponseOrganizationFirewallPolicy) MarshalJSON() ([]byte, error) { + type NoMethod InstancesGetEffectiveFirewallsResponseOrganizationFirewallPolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type InstancesRemoveResourcePoliciesRequest struct { // ResourcePolicies: Resource policies to be removed from this instance. ResourcePolicies []string `json:"resourcePolicies,omitempty"` @@ -15720,6 +17580,40 @@ func (s *InstancesStartWithEncryptionKeyRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// Int64RangeMatch: HttpRouteRuleMatch criteria for field values that +// must stay within the specified integer range. +type Int64RangeMatch struct { + // RangeEnd: The end of the range (exclusive) in signed long integer + // format. + RangeEnd int64 `json:"rangeEnd,omitempty,string"` + + // RangeStart: The start of the range (inclusive) in signed long integer + // format. + RangeStart int64 `json:"rangeStart,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. "RangeEnd") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "RangeEnd") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Int64RangeMatch) MarshalJSON() ([]byte, error) { + type NoMethod Int64RangeMatch + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Interconnect: Represents an Interconnects resource. The Interconnects // resource is a dedicated connection between Google's network and your // on-premises network. For more information, see the Dedicated @@ -15757,7 +17651,7 @@ type Interconnect struct { // side of the Interconnect link. This can be used only for ping tests. GoogleIpAddress string `json:"googleIpAddress,omitempty"` - // GoogleReferenceId: [Output Only] Google reference ID; to be used when + // GoogleReferenceId: [Output Only] Google reference ID to be used when // raising support tickets with Google or otherwise to debug backend // connectivity issues. GoogleReferenceId string `json:"googleReferenceId,omitempty"` @@ -15770,8 +17664,13 @@ type Interconnect struct { // InterconnectAttachments configured to use this Interconnect. InterconnectAttachments []string `json:"interconnectAttachments,omitempty"` - // InterconnectType: Type of interconnect. Note that "IT_PRIVATE" has - // been deprecated in favor of "DEDICATED" + // InterconnectType: Type of interconnect, which can take one of the + // following values: + // - PARTNER: A partner-managed interconnection shared between customers + // though a partner. + // - DEDICATED: A dedicated physical interconnection with the customer. + // Note that a value IT_PRIVATE has been deprecated in favor of + // DEDICATED. // // Possible values: // "DEDICATED" @@ -15800,11 +17699,15 @@ type Interconnect struct { // comply with RFC1035. Label values may be empty. Labels map[string]string `json:"labels,omitempty"` - // LinkType: Type of link requested. This field indicates speed of each - // of the links in the bundle, not the entire bundle. Only 10G per link - // is allowed for a dedicated interconnect. Options: Ethernet_10G_LR + // LinkType: Type of link requested, which can take one of the following + // values: + // - LINK_TYPE_ETHERNET_10G_LR: A 10G Ethernet with LR optics + // - LINK_TYPE_ETHERNET_100G_LR: A 100G Ethernet with LR optics. Note + // that this field indicates the speed of each of the links in the + // bundle, not the speed of the entire bundle. // // Possible values: + // "LINK_TYPE_ETHERNET_100G_LR" // "LINK_TYPE_ETHERNET_10G_LR" LinkType string `json:"linkType,omitempty"` @@ -15828,8 +17731,16 @@ type Interconnect struct { // Notifications. NocContactEmail string `json:"nocContactEmail,omitempty"` - // OperationalStatus: [Output Only] The current status of whether or not - // this Interconnect is functional. + // OperationalStatus: [Output Only] The current status of this + // Interconnect's functionality, which can take one of the following + // values: + // - OS_ACTIVE: A valid Interconnect, which is turned up and is ready to + // use. Attachments may be provisioned on this Interconnect. + // - OS_UNPROVISIONED: An Interconnect that has not completed turnup. No + // attachments may be provisioned on this Interconnect. + // - OS_UNDER_MAINTENANCE: An Interconnect that is undergoing internal + // maintenance. No attachments may be provisioned or updated on this + // Interconnect. // // Possible values: // "OS_ACTIVE" @@ -15853,8 +17764,19 @@ type Interconnect struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // State: [Output Only] The current state of whether or not this - // Interconnect is functional. + // SelfLinkWithId: [Output Only] Server-defined URL for this resource + // with the resource id. + SelfLinkWithId string `json:"selfLinkWithId,omitempty"` + + // State: [Output Only] The current state of Interconnect functionality, + // which can take one of the following values: + // - ACTIVE: The Interconnect is valid, turned up and ready to use. + // Attachments may be provisioned on this Interconnect. + // - UNPROVISIONED: The Interconnect has not completed turnup. No + // attachments may be provisioned on this Interconnect. + // - UNDER_MAINTENANCE: The Interconnect is undergoing internal + // maintenance. No attachments may be provisioned or updated on this + // Interconnect. // // Possible values: // "ACTIVE" @@ -15897,20 +17819,34 @@ type InterconnectAttachment struct { // Not present for PARTNER_PROVIDER. AdminEnabled bool `json:"adminEnabled,omitempty"` - // Bandwidth: Provisioned bandwidth capacity for the - // interconnectAttachment. Can be set by the partner to update the - // customer's provisioned bandwidth. Output only for PARTNER type, - // mutable for PARTNER_PROVIDER and DEDICATED. + // Bandwidth: Provisioned bandwidth capacity for the interconnect + // attachment. For attachments of type DEDICATED, the user can set the + // bandwidth. For attachments of type PARTNER, the Google Partner that + // is operating the interconnect must set the bandwidth. Output only for + // PARTNER type, mutable for PARTNER_PROVIDER and DEDICATED, and can + // take one of the following values: + // - BPS_50M: 50 Mbit/s + // - BPS_100M: 100 Mbit/s + // - BPS_200M: 200 Mbit/s + // - BPS_300M: 300 Mbit/s + // - BPS_400M: 400 Mbit/s + // - BPS_500M: 500 Mbit/s + // - BPS_1G: 1 Gbit/s + // - BPS_2G: 2 Gbit/s + // - BPS_5G: 5 Gbit/s + // - BPS_10G: 10 Gbit/s // // Possible values: // "BPS_100M" // "BPS_10G" // "BPS_1G" // "BPS_200M" + // "BPS_20G" // "BPS_2G" // "BPS_300M" // "BPS_400M" // "BPS_500M" + // "BPS_50G" // "BPS_50M" // "BPS_5G" Bandwidth string `json:"bandwidth,omitempty"` @@ -15943,12 +17879,16 @@ type InterconnectAttachment struct { Description string `json:"description,omitempty"` // EdgeAvailabilityDomain: Desired availability domain for the - // attachment. Only available for type PARTNER, at creation time. For - // improved reliability, customers should configure a pair of - // attachments with one per availability domain. The selected - // availability domain will be provided to the Partner via the pairing - // key so that the provisioned circuit will lie in the specified domain. - // If not specified, the value will default to AVAILABILITY_DOMAIN_ANY. + // attachment. Only available for type PARTNER, at creation time, and + // can take one of the following values: + // - AVAILABILITY_DOMAIN_ANY + // - AVAILABILITY_DOMAIN_1 + // - AVAILABILITY_DOMAIN_2 For improved reliability, customers should + // configure a pair of attachments, one per availability domain. The + // selected availability domain will be provided to the Partner via the + // pairing key, so that the provisioned circuit will lie in the + // specified domain. If not specified, the value will default to + // AVAILABILITY_DOMAIN_ANY. // // Possible values: // "AVAILABILITY_DOMAIN_1" @@ -16000,7 +17940,12 @@ type InterconnectAttachment struct { Name string `json:"name,omitempty"` // OperationalStatus: [Output Only] The current status of whether or not - // this interconnect attachment is functional. + // this interconnect attachment is functional, which can take one of the + // following values: + // - OS_ACTIVE: The attachment has been turned up and is ready to use. + // + // - OS_UNPROVISIONED: The attachment is not ready to use yet, because + // turnup is not complete. // // Possible values: // "OS_ACTIVE" @@ -16013,10 +17958,10 @@ type InterconnectAttachment struct { // selected partner. Of the form "XXXXX/region/domain" PairingKey string `json:"pairingKey,omitempty"` - // PartnerAsn: Optional BGP ASN for the router that should be supplied - // by a layer 3 Partner if they configured BGP on behalf of the - // customer. Output only for PARTNER type, input only for - // PARTNER_PROVIDER, not available for DEDICATED. + // PartnerAsn: Optional BGP ASN for the router supplied by a Layer 3 + // Partner if they configured BGP on behalf of the customer. Output only + // for PARTNER type, input only for PARTNER_PROVIDER, not available for + // DEDICATED. PartnerAsn int64 `json:"partnerAsn,omitempty,string"` // PartnerMetadata: Informational metadata about Partner attachments @@ -16044,8 +17989,31 @@ type InterconnectAttachment struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` + // SelfLinkWithId: [Output Only] Server-defined URL for this resource + // with the resource id. + SelfLinkWithId string `json:"selfLinkWithId,omitempty"` + // State: [Output Only] The current state of this attachment's - // functionality. + // functionality. Enum values ACTIVE and UNPROVISIONED are shared by + // DEDICATED/PRIVATE, PARTNER, and PARTNER_PROVIDER interconnect + // attachments, while enum values PENDING_PARTNER, + // PARTNER_REQUEST_RECEIVED, and PENDING_CUSTOMER are used for only + // PARTNER and PARTNER_PROVIDER interconnect attachments. This state can + // take one of the following values: + // - ACTIVE: The attachment has been turned up and is ready to use. + // - UNPROVISIONED: The attachment is not ready to use yet, because + // turnup is not complete. + // - PENDING_PARTNER: A newly-created PARTNER attachment that has not + // yet been configured on the Partner side. + // - PARTNER_REQUEST_RECEIVED: A PARTNER attachment is in the process of + // provisioning after a PARTNER_PROVIDER attachment was created that + // references it. + // - PENDING_CUSTOMER: A PARTNER or PARTNER_PROVIDER attachment that is + // waiting for a customer to activate it. + // - DEFUNCT: The attachment was deleted externally and is no longer + // functional. This could be because the associated Interconnect was + // removed, or because the other side of a Partner attachment was + // deleted. // // Possible values: // "ACTIVE" @@ -16057,6 +18025,14 @@ type InterconnectAttachment struct { // "UNPROVISIONED" State string `json:"state,omitempty"` + // Type: The type of interconnect attachment this is, which can take one + // of the following values: + // - DEDICATED: an attachment to a Dedicated Interconnect. + // - PARTNER: an attachment to a Partner Interconnect, created by the + // customer. + // - PARTNER_PROVIDER: an attachment to a Partner Interconnect, created + // by the partner. + // // Possible values: // "DEDICATED" // "PARTNER" @@ -16425,7 +18401,7 @@ type InterconnectAttachmentPartnerMetadata struct { PartnerName string `json:"partnerName,omitempty"` // PortalUrl: URL of the Partner?s portal for this Attachment. Partners - // may customise this to be a deep-link to the specific resource on the + // may customise this to be a deep link to the specific resource on the // Partner portal. This value may be validated to match approved Partner // values. PortalUrl string `json:"portalUrl,omitempty"` @@ -16741,6 +18717,12 @@ type InterconnectDiagnosticsLinkLACPStatus struct { // LACP exchange. NeighborSystemId string `json:"neighborSystemId,omitempty"` + // State: The state of a LACP link, which can take one of the following + // values: + // - ACTIVE: The link is configured and active within the bundle. + // - DETACHED: The link is not configured within the bundle. This means + // that the rest of the object should be empty. + // // Possible values: // "ACTIVE" // "DETACHED" @@ -16771,6 +18753,17 @@ func (s *InterconnectDiagnosticsLinkLACPStatus) MarshalJSON() ([]byte, error) { } type InterconnectDiagnosticsLinkOpticalPower struct { + // State: The status of the current value when compared to the warning + // and alarm levels for the receiving or transmitting transceiver. + // Possible states include: + // - OK: The value has not crossed a warning threshold. + // - LOW_WARNING: The value has crossed below the low warning threshold. + // + // - HIGH_WARNING: The value has crossed above the high warning + // threshold. + // - LOW_ALARM: The value has crossed below the low alarm threshold. + // - HIGH_ALARM: The value has crossed above the high alarm threshold. + // // Possible values: // "HIGH_ALARM" // "HIGH_WARNING" @@ -16779,10 +18772,11 @@ type InterconnectDiagnosticsLinkOpticalPower struct { // "OK" State string `json:"state,omitempty"` - // Value: Value of the current optical power, read in dBm. Take a known - // good optical value, give it a 10% margin and trigger warnings - // relative to that value. In general, a -7dBm warning and a -11dBm - // alarm are good optical value estimates for most links. + // Value: Value of the current receiving or transmitting optical power, + // read in dBm. Take a known good optical value, give it a 10% margin + // and trigger warnings relative to that value. In general, a -7dBm + // warning and a -11dBm alarm are good optical value estimates for most + // links. Value float64 `json:"value,omitempty"` // ForceSendFields is a list of field names (e.g. "State") to @@ -16838,8 +18832,14 @@ type InterconnectDiagnosticsLinkStatus struct { LacpStatus *InterconnectDiagnosticsLinkLACPStatus `json:"lacpStatus,omitempty"` + // ReceivingOpticalPower: An InterconnectDiagnostics.LinkOpticalPower + // object, describing the current value and status of the received light + // level. ReceivingOpticalPower *InterconnectDiagnosticsLinkOpticalPower `json:"receivingOpticalPower,omitempty"` + // TransmittingOpticalPower: An InterconnectDiagnostics.LinkOpticalPower + // object, describing the current value and status of the transmitted + // light level. TransmittingOpticalPower *InterconnectDiagnosticsLinkOpticalPower `json:"transmittingOpticalPower,omitempty"` // ForceSendFields is a list of field names (e.g. "ArpCaches") to @@ -17041,7 +19041,13 @@ type InterconnectLocation struct { // "Amsterdam, Netherlands". City string `json:"city,omitempty"` - // Continent: [Output Only] Continent for this location. + // Continent: [Output Only] Continent for this location, which can take + // one of the following values: + // - AFRICA + // - ASIA_PAC + // - EUROPE + // - NORTH_AMERICA + // - SOUTH_AMERICA // // Possible values: // "AFRICA" @@ -17094,10 +19100,16 @@ type InterconnectLocation struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // Status: [Output Only] The status of this InterconnectLocation. If the - // status is AVAILABLE, new Interconnects may be provisioned in this - // InterconnectLocation. Otherwise, no new Interconnects may be - // provisioned. + // SelfLinkWithId: [Output Only] Server-defined URL for this resource + // with the resource id. + SelfLinkWithId string `json:"selfLinkWithId,omitempty"` + + // Status: [Output Only] The status of this InterconnectLocation, which + // can take one of the following values: + // - CLOSED: The InterconnectLocation is closed and is unavailable for + // provisioning new Interconnects. + // - AVAILABLE: The InterconnectLocation is available for provisioning + // new Interconnects. // // Possible values: // "AVAILABLE" @@ -17345,9 +19357,14 @@ type InterconnectOutageNotification struct { // epoch). EndTime int64 `json:"endTime,omitempty,string"` - // IssueType: Form this outage is expected to take. Note that the "IT_" - // versions of this enum have been deprecated in favor of the unprefixed - // values. + // IssueType: Form this outage is expected to take, which can take one + // of the following values: + // - OUTAGE: The Interconnect may be completely out of service for some + // or all of the specified window. + // - PARTIAL_OUTAGE: Some circuits comprising the Interconnect as a + // whole should remain up, but with reduced bandwidth. Note that the + // versions of this enum prefixed with "IT_" have been deprecated in + // favor of the unprefixed values. // // Possible values: // "IT_OUTAGE" @@ -17359,8 +19376,10 @@ type InterconnectOutageNotification struct { // Name: Unique identifier for this outage notification. Name string `json:"name,omitempty"` - // Source: The party that generated this notification. Note that - // "NSRC_GOOGLE" has been deprecated in favor of "GOOGLE" + // Source: The party that generated this notification, which can take + // the following value: + // - GOOGLE: this notification as generated by Google. Note that the + // value of NSRC_GOOGLE has been deprecated in favor of GOOGLE. // // Possible values: // "GOOGLE" @@ -17371,8 +19390,15 @@ type InterconnectOutageNotification struct { // Unix epoch). StartTime int64 `json:"startTime,omitempty,string"` - // State: State of this notification. Note that the "NS_" versions of - // this enum have been deprecated in favor of the unprefixed values. + // State: State of this notification, which can take one of the + // following values: + // - ACTIVE: This outage notification is active. The event could be in + // the past, present, or future. See start_time and end_time for + // scheduling. + // - CANCELLED: The outage associated with this notification was + // cancelled before the outage was due to start. Note that the versions + // of this enum prefixed with "NS_" have been deprecated in favor of the + // unprefixed values. // // Possible values: // "ACTIVE" @@ -17827,6 +19853,107 @@ func (s *IpOwnerListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// Jwt: JWT configuration for origin authentication. +type Jwt struct { + // Audiences: A JWT containing any of these audiences will be accepted. + // The service name will be accepted if audiences is empty. Examples: + // bookstore_android.apps.googleusercontent.com, + // bookstore_web.apps.googleusercontent.com + Audiences []string `json:"audiences,omitempty"` + + // Issuer: Identifies the issuer that issued the JWT, which is usually a + // URL or an email address. Examples: https://securetoken.google.com, + // 1234567-compute@developer.gserviceaccount.com + Issuer string `json:"issuer,omitempty"` + + // JwksPublicKeys: The provider?s public key set to validate the + // signature of the JWT. + JwksPublicKeys string `json:"jwksPublicKeys,omitempty"` + + // JwtHeaders: jwt_headers and jwt_params define where to extract the + // JWT from an HTTP request. If no explicit location is specified, the + // following default locations are tried in order: + // + // 1. The Authorization header using the Bearer schema. See `here `_. + // Example: + // + // Authorization: Bearer . + // + // 2. `access_token` query parameter. See `this `_ + // + // Multiple JWTs can be verified for a request. Each JWT has to be + // extracted from the locations its issuer specified or from the default + // locations. + // + // This field is set if JWT is sent in a request header. This field + // specifies the header name. For example, if + // `header=x-goog-iap-jwt-assertion`, the header format will be + // x-goog-iap-jwt-assertion: . + JwtHeaders []*JwtHeader `json:"jwtHeaders,omitempty"` + + // JwtParams: This field is set if JWT is sent in a query parameter. + // This field specifies the query parameter name. For example, if + // jwt_params[0] is jwt_token, the JWT format in the query parameter is + // /path?jwt_token=. + JwtParams []string `json:"jwtParams,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Audiences") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Audiences") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Jwt) MarshalJSON() ([]byte, error) { + type NoMethod Jwt + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// JwtHeader: This message specifies a header location to extract JWT +// token. +type JwtHeader struct { + // Name: The HTTP header name. + Name string `json:"name,omitempty"` + + // ValuePrefix: The value prefix. The value format is "value_prefix" For + // example, for "Authorization: Bearer ", value_prefix="Bearer " with a + // space at the end. + ValuePrefix string `json:"valuePrefix,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Name") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Name") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *JwtHeader) MarshalJSON() ([]byte, error) { + type NoMethod JwtHeader + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // License: A license resource. type License struct { // ChargesUseFee: [Output Only] Deprecated. This field no longer @@ -17862,6 +19989,10 @@ type License struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` + // SelfLinkWithId: [Output Only] Server-defined URL for this resource + // with the resource id. + SelfLinkWithId string `json:"selfLinkWithId,omitempty"` + // Transferable: If false, licenses will not be copied from the source // resource when creating an image from a disk, disk from snapshot, or // snapshot from disk. @@ -18179,6 +20310,42 @@ func (s *LicensesListResponseWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type LocalDisk struct { + // DiskCount: Specifies the number of such disks. + DiskCount string `json:"diskCount,omitempty"` + + // DiskSizeGb: Specifies the size of the disk in base-2 GB. + DiskSizeGb string `json:"diskSizeGb,omitempty"` + + // DiskType: Specifies the desired disk type on the node. This disk type + // must be a local storage type (e.g.: local-ssd). Note that for + // nodeTemplates, this should be the name of the disk type and not its + // URL. + DiskType string `json:"diskType,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DiskCount") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DiskCount") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *LocalDisk) MarshalJSON() ([]byte, error) { + type NoMethod LocalDisk + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // LogConfig: Specifies what kind of log the caller must write type LogConfig struct { // CloudAudit: Cloud audit options. @@ -18314,12 +20481,6 @@ type LogConfigDataAccessOptions struct { // the caller. This is relevant only in the LocalIAM implementation, for // now. // - // NOTE: Logging to Gin in a fail-closed manner is currently unsupported - // while work is being done to satisfy the requirements of go/345. - // Currently, setting LOG_FAIL_CLOSED mode will have no effect, but - // still exists because there is active work being done to support it - // (b/115874152). - // // Possible values: // "LOG_FAIL_CLOSED" // "LOG_MODE_UNSPECIFIED" @@ -18366,6 +20527,24 @@ type MachineImage struct { // compute#machineImage for machine image. Kind string `json:"kind,omitempty"` + // MachineImageEncryptionKey: Encrypts the machine image using a + // customer-supplied encryption key. + // + // After you encrypt a machine image using a customer-supplied key, you + // must provide the same key if you use the machine image later. For + // example, you must provide the encryption key when you create an + // instance from the encrypted machine image in a future + // request. + // + // Customer-supplied encryption keys do not protect access to metadata + // of the machine image. + // + // If you do not provide an encryption key when creating the machine + // image, then the machine image will be encrypted using an + // automatically generated key and you do not need to provide a key to + // use the machine image later. + MachineImageEncryptionKey *CustomerEncryptionKey `json:"machineImageEncryptionKey,omitempty"` + // Name: Name of the resource; provided by the client when the resource // is created. The name must be 1-63 characters long, and comply with // RFC1035. Specifically, the name must be 1-63 characters long and @@ -18379,6 +20558,10 @@ type MachineImage struct { // defines this URL. SelfLink string `json:"selfLink,omitempty"` + // SelfLinkWithId: [Output Only] Server-defined URL for this resource + // with the resource id. + SelfLinkWithId string `json:"selfLinkWithId,omitempty"` + // SourceInstance: The source instance used to create the machine image. // You can provide this as a partial or full URL to the resource. For // example, the following are valid values: @@ -18387,10 +20570,12 @@ type MachineImage struct { // - projects/project/zones/zone/instances/instance SourceInstance string `json:"sourceInstance,omitempty"` - // SourceInstanceProperties: Properties of source instance. + // SourceInstanceProperties: [Output Only] Properties of source + // instance. SourceInstanceProperties *SourceInstanceProperties `json:"sourceInstanceProperties,omitempty"` - // Status: [Output Only] The status of disk creation. + // Status: [Output Only] The status of the machine image. One of the + // following values: INVALID, CREATING, READY, DELETING, and UPLOADING. // // Possible values: // "CREATING" @@ -18400,7 +20585,7 @@ type MachineImage struct { // "UPLOADING" Status string `json:"status,omitempty"` - // StorageLocations: GCS bucket storage location of the snapshot + // StorageLocations: GCS bucket storage location of the machine image // (regional or multi-regional). StorageLocations []string `json:"storageLocations,omitempty"` @@ -18638,6 +20823,10 @@ type MachineType struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` + // SelfLinkWithId: [Output Only] Server-defined URL for this resource + // with the resource id. + SelfLinkWithId string `json:"selfLinkWithId,omitempty"` + // Zone: [Output Only] The name of the zone where the machine type // resides, such as us-central1-a. Zone string `json:"zone,omitempty"` @@ -19173,6 +21362,7 @@ type ManagedInstance struct { // // Possible values: // "PROVISIONING" + // "REPAIRING" // "RUNNING" // "STAGING" // "STOPPED" @@ -19232,6 +21422,17 @@ func (s *ManagedInstance) MarshalJSON() ([]byte, error) { } type ManagedInstanceInstanceHealth struct { + // DetailedHealthState: [Output Only] The current detailed instance + // health state. + // + // Possible values: + // "DRAINING" + // "HEALTHY" + // "TIMEOUT" + // "UNHEALTHY" + // "UNKNOWN" + DetailedHealthState string `json:"detailedHealthState,omitempty"` + // HealthCheck: [Output Only] The URL for the health check that verifies // whether the instance is healthy. HealthCheck string `json:"healthCheck,omitempty"` @@ -19243,20 +21444,21 @@ type ManagedInstanceInstanceHealth struct { // "UNHEALTHY" HealthState string `json:"healthState,omitempty"` - // ForceSendFields is a list of field names (e.g. "HealthCheck") to - // unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "DetailedHealthState") + // to unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "HealthCheck") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "DetailedHealthState") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } @@ -19592,6 +21794,173 @@ func (s *MetadataItems) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// MetadataCredentialsFromPlugin: Custom authenticator credentials. +type MetadataCredentialsFromPlugin struct { + // Name: Plugin name. + Name string `json:"name,omitempty"` + + // StructConfig: A text proto that conforms to a Struct type definition + // interpreted by the plugin. + StructConfig string `json:"structConfig,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Name") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Name") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *MetadataCredentialsFromPlugin) MarshalJSON() ([]byte, error) { + type NoMethod MetadataCredentialsFromPlugin + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// MetadataFilter: Opaque filter criteria used by loadbalancers to +// restrict routing configuration to a limited set of loadbalancing +// proxies. Proxies and sidecars involved in loadbalancing would +// typically present metadata to the loadbalancers which need to match +// criteria specified here. If a match takes place, the relevant routing +// configuration is made available to those proxies. +// For each metadataFilter in this list, if its filterMatchCriteria is +// set to MATCH_ANY, at least one of the filterLabels must match the +// corresponding label provided in the metadata. If its +// filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels +// must match with corresponding labels in the provided metadata. +// An example for using metadataFilters would be: if loadbalancing +// involves Envoys, they will only receive routing configuration when +// values in metadataFilters match values supplied in , or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *AutoscalersAggregatedListCall) Filter(filter string) *AutoscalersAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *AutoscalersAggregatedListCall) MaxResults(maxResults int64) *AutoscalersAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *AutoscalersAggregatedListCall) OrderBy(orderBy string) *AutoscalersAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *AutoscalersAggregatedListCall) PageToken(pageToken string) *AutoscalersAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendBucketsAddSignedUrlKeyCall) RequestId(requestId string) *BackendBucketsAddSignedUrlKeyCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AutoscalersAggregatedListCall) Fields(s ...googleapi.Field) *AutoscalersAggregatedListCall { +func (c *BackendBucketsAddSignedUrlKeyCall) Fields(s ...googleapi.Field) *BackendBucketsAddSignedUrlKeyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *AutoscalersAggregatedListCall) IfNoneMatch(entityTag string) *AutoscalersAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AutoscalersAggregatedListCall) Context(ctx context.Context) *AutoscalersAggregatedListCall { +func (c *BackendBucketsAddSignedUrlKeyCall) Context(ctx context.Context) *BackendBucketsAddSignedUrlKeyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AutoscalersAggregatedListCall) Header() http.Header { +func (c *BackendBucketsAddSignedUrlKeyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AutoscalersAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendBucketsAddSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.signedurlkey) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/autoscalers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}/addSignedUrlKey") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "backendBucket": c.backendBucket, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.autoscalers.aggregatedList" call. -// Exactly one of *AutoscalerAggregatedList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *AutoscalerAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*AutoscalerAggregatedList, error) { +// Do executes the "compute.backendBuckets.addSignedUrlKey" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendBucketsAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -43316,7 +48357,7 @@ func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*Autos if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &AutoscalerAggregatedList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -43328,34 +48369,18 @@ func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*Autos } return ret, nil // { - // "description": "Retrieves an aggregated list of autoscalers.", - // "httpMethod": "GET", - // "id": "compute.autoscalers.aggregatedList", + // "description": "Adds a key for validating requests with signed URLs for this backend bucket.", + // "httpMethod": "POST", + // "id": "compute.backendBuckets.addSignedUrlKey", // "parameterOrder": [ - // "project" + // "project", + // "backendBucket" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "backendBucket": { + // "description": "Name of the BackendBucket resource to which the Signed URL Key should be added. The name should conform to RFC1035.", + // "location": "path", + // "required": true, // "type": "string" // }, // "project": { @@ -43364,60 +48389,44 @@ func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*Autos // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/aggregated/autoscalers", + // "path": "{project}/global/backendBuckets/{backendBucket}/addSignedUrlKey", + // "request": { + // "$ref": "SignedUrlKey" + // }, // "response": { - // "$ref": "AutoscalerAggregatedList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *AutoscalersAggregatedListCall) Pages(ctx context.Context, f func(*AutoscalerAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.autoscalers.delete": +// method id "compute.backendBuckets.delete": -type AutoscalersDeleteCall struct { - s *Service - project string - zone string - autoscaler string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type BackendBucketsDeleteCall struct { + s *Service + project string + backendBucket string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified autoscaler. -func (r *AutoscalersService) Delete(project string, zone string, autoscaler string) *AutoscalersDeleteCall { - c := &AutoscalersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified BackendBucket resource. +func (r *BackendBucketsService) Delete(project string, backendBucket string) *BackendBucketsDeleteCall { + c := &BackendBucketsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.autoscaler = autoscaler + c.backendBucket = backendBucket return c } @@ -43435,7 +48444,7 @@ func (r *AutoscalersService) Delete(project string, zone string, autoscaler stri // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *AutoscalersDeleteCall) RequestId(requestId string) *AutoscalersDeleteCall { +func (c *BackendBucketsDeleteCall) RequestId(requestId string) *BackendBucketsDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -43443,7 +48452,7 @@ func (c *AutoscalersDeleteCall) RequestId(requestId string) *AutoscalersDeleteCa // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AutoscalersDeleteCall) Fields(s ...googleapi.Field) *AutoscalersDeleteCall { +func (c *BackendBucketsDeleteCall) Fields(s ...googleapi.Field) *BackendBucketsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -43451,21 +48460,21 @@ func (c *AutoscalersDeleteCall) Fields(s ...googleapi.Field) *AutoscalersDeleteC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AutoscalersDeleteCall) Context(ctx context.Context) *AutoscalersDeleteCall { +func (c *BackendBucketsDeleteCall) Context(ctx context.Context) *BackendBucketsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AutoscalersDeleteCall) Header() http.Header { +func (c *BackendBucketsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AutoscalersDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendBucketsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -43474,7 +48483,7 @@ func (c *AutoscalersDeleteCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers/{autoscaler}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { @@ -43482,21 +48491,20 @@ func (c *AutoscalersDeleteCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "autoscaler": c.autoscaler, + "project": c.project, + "backendBucket": c.backendBucket, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.autoscalers.delete" call. +// Do executes the "compute.backendBuckets.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *AutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *BackendBucketsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -43527,19 +48535,18 @@ func (c *AutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Deletes the specified autoscaler.", + // "description": "Deletes the specified BackendBucket resource.", // "httpMethod": "DELETE", - // "id": "compute.autoscalers.delete", + // "id": "compute.backendBuckets.delete", // "parameterOrder": [ // "project", - // "zone", - // "autoscaler" + // "backendBucket" // ], // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to delete.", + // "backendBucket": { + // "description": "Name of the BackendBucket resource to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -43554,16 +48561,181 @@ func (c *AutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // } + // }, + // "path": "{project}/global/backendBuckets/{backendBucket}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendBuckets.deleteSignedUrlKey": + +type BackendBucketsDeleteSignedUrlKeyCall struct { + s *Service + project string + backendBucket string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// DeleteSignedUrlKey: Deletes a key for validating requests with signed +// URLs for this backend bucket. +func (r *BackendBucketsService) DeleteSignedUrlKey(project string, backendBucket string, keyName string) *BackendBucketsDeleteSignedUrlKeyCall { + c := &BackendBucketsDeleteSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendBucket = backendBucket + c.urlParams_.Set("keyName", keyName) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendBucketsDeleteSignedUrlKeyCall) RequestId(requestId string) *BackendBucketsDeleteSignedUrlKeyCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendBucketsDeleteSignedUrlKeyCall) Fields(s ...googleapi.Field) *BackendBucketsDeleteSignedUrlKeyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendBucketsDeleteSignedUrlKeyCall) Context(ctx context.Context) *BackendBucketsDeleteSignedUrlKeyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendBucketsDeleteSignedUrlKeyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendBucketsDeleteSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}/deleteSignedUrlKey") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendBucket": c.backendBucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendBuckets.deleteSignedUrlKey" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendBucketsDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes a key for validating requests with signed URLs for this backend bucket.", + // "httpMethod": "POST", + // "id": "compute.backendBuckets.deleteSignedUrlKey", + // "parameterOrder": [ + // "project", + // "backendBucket", + // "keyName" + // ], + // "parameters": { + // "backendBucket": { + // "description": "Name of the BackendBucket resource to which the Signed URL Key should be added. The name should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "keyName": { + // "description": "The name of the Signed URL Key to delete.", + // "location": "query", + // "required": true, + // "type": "string" // }, - // "zone": { - // "description": "Name of the zone for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/autoscalers/{autoscaler}", + // "path": "{project}/global/backendBuckets/{backendBucket}/deleteSignedUrlKey", // "response": { // "$ref": "Operation" // }, @@ -43575,33 +48747,187 @@ func (c *AutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er } -// method id "compute.autoscalers.get": +// method id "compute.backendBuckets.get": -type AutoscalersGetCall struct { +type BackendBucketsGetCall struct { + s *Service + project string + backendBucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified BackendBucket resource. Gets a list of +// available backend buckets by making a list() request. +func (r *BackendBucketsService) Get(project string, backendBucket string) *BackendBucketsGetCall { + c := &BackendBucketsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendBucket = backendBucket + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendBucketsGetCall) Fields(s ...googleapi.Field) *BackendBucketsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BackendBucketsGetCall) IfNoneMatch(entityTag string) *BackendBucketsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendBucketsGetCall) Context(ctx context.Context) *BackendBucketsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendBucketsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendBucketsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendBucket": c.backendBucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendBuckets.get" call. +// Exactly one of *BackendBucket or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *BackendBucket.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BackendBucketsGetCall) Do(opts ...googleapi.CallOption) (*BackendBucket, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &BackendBucket{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified BackendBucket resource. Gets a list of available backend buckets by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.backendBuckets.get", + // "parameterOrder": [ + // "project", + // "backendBucket" + // ], + // "parameters": { + // "backendBucket": { + // "description": "Name of the BackendBucket resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/backendBuckets/{backendBucket}", + // "response": { + // "$ref": "BackendBucket" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.backendBuckets.getIamPolicy": + +type BackendBucketsGetIamPolicyCall struct { s *Service project string - zone string - autoscaler string + resource string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Returns the specified autoscaler resource. Gets a list of -// available autoscalers by making a list() request. -func (r *AutoscalersService) Get(project string, zone string, autoscaler string) *AutoscalersGetCall { - c := &AutoscalersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. +func (r *BackendBucketsService) GetIamPolicy(project string, resource string) *BackendBucketsGetIamPolicyCall { + c := &BackendBucketsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.autoscaler = autoscaler + c.resource = resource return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AutoscalersGetCall) Fields(s ...googleapi.Field) *AutoscalersGetCall { +func (c *BackendBucketsGetIamPolicyCall) Fields(s ...googleapi.Field) *BackendBucketsGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -43611,7 +48937,7 @@ func (c *AutoscalersGetCall) Fields(s ...googleapi.Field) *AutoscalersGetCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *AutoscalersGetCall) IfNoneMatch(entityTag string) *AutoscalersGetCall { +func (c *BackendBucketsGetIamPolicyCall) IfNoneMatch(entityTag string) *BackendBucketsGetIamPolicyCall { c.ifNoneMatch_ = entityTag return c } @@ -43619,21 +48945,21 @@ func (c *AutoscalersGetCall) IfNoneMatch(entityTag string) *AutoscalersGetCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AutoscalersGetCall) Context(ctx context.Context) *AutoscalersGetCall { +func (c *BackendBucketsGetIamPolicyCall) Context(ctx context.Context) *BackendBucketsGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AutoscalersGetCall) Header() http.Header { +func (c *BackendBucketsGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AutoscalersGetCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendBucketsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -43645,7 +48971,7 @@ func (c *AutoscalersGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers/{autoscaler}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -43653,21 +48979,20 @@ func (c *AutoscalersGetCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "autoscaler": c.autoscaler, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.autoscalers.get" call. -// Exactly one of *Autoscaler or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Autoscaler.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *AutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, error) { +// Do executes the "compute.backendBuckets.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BackendBucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -43686,7 +49011,7 @@ func (c *AutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, erro if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Autoscaler{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -43698,22 +49023,14 @@ func (c *AutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, erro } return ret, nil // { - // "description": "Returns the specified autoscaler resource. Gets a list of available autoscalers by making a list() request.", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", // "httpMethod": "GET", - // "id": "compute.autoscalers.get", + // "id": "compute.backendBuckets.getIamPolicy", // "parameterOrder": [ // "project", - // "zone", - // "autoscaler" + // "resource" // ], // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -43721,17 +49038,17 @@ func (c *AutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, erro // "required": true, // "type": "string" // }, - // "zone": { - // "description": "Name of the zone for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/autoscalers/{autoscaler}", + // "path": "{project}/global/backendBuckets/{resource}/getIamPolicy", // "response": { - // "$ref": "Autoscaler" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -43742,25 +49059,23 @@ func (c *AutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, erro } -// method id "compute.autoscalers.insert": +// method id "compute.backendBuckets.insert": -type AutoscalersInsertCall struct { - s *Service - project string - zone string - autoscaler *Autoscaler - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type BackendBucketsInsertCall struct { + s *Service + project string + backendbucket *BackendBucket + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates an autoscaler in the specified project using the data -// included in the request. -func (r *AutoscalersService) Insert(project string, zone string, autoscaler *Autoscaler) *AutoscalersInsertCall { - c := &AutoscalersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a BackendBucket resource in the specified project +// using the data included in the request. +func (r *BackendBucketsService) Insert(project string, backendbucket *BackendBucket) *BackendBucketsInsertCall { + c := &BackendBucketsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.autoscaler = autoscaler + c.backendbucket = backendbucket return c } @@ -43778,7 +49093,7 @@ func (r *AutoscalersService) Insert(project string, zone string, autoscaler *Aut // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *AutoscalersInsertCall) RequestId(requestId string) *AutoscalersInsertCall { +func (c *BackendBucketsInsertCall) RequestId(requestId string) *BackendBucketsInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -43786,7 +49101,7 @@ func (c *AutoscalersInsertCall) RequestId(requestId string) *AutoscalersInsertCa // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AutoscalersInsertCall) Fields(s ...googleapi.Field) *AutoscalersInsertCall { +func (c *BackendBucketsInsertCall) Fields(s ...googleapi.Field) *BackendBucketsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -43794,35 +49109,35 @@ func (c *AutoscalersInsertCall) Fields(s ...googleapi.Field) *AutoscalersInsertC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AutoscalersInsertCall) Context(ctx context.Context) *AutoscalersInsertCall { +func (c *BackendBucketsInsertCall) Context(ctx context.Context) *BackendBucketsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AutoscalersInsertCall) Header() http.Header { +func (c *BackendBucketsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AutoscalersInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendBucketsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendbucket) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -43831,19 +49146,18 @@ func (c *AutoscalersInsertCall) doRequest(alt string) (*http.Response, error) { req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.autoscalers.insert" call. +// Do executes the "compute.backendBuckets.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *AutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *BackendBucketsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -43874,12 +49188,11 @@ func (c *AutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Creates an autoscaler in the specified project using the data included in the request.", + // "description": "Creates a BackendBucket resource in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.autoscalers.insert", + // "id": "compute.backendBuckets.insert", // "parameterOrder": [ - // "project", - // "zone" + // "project" // ], // "parameters": { // "project": { @@ -43893,18 +49206,11 @@ func (c *AutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "Name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/autoscalers", + // "path": "{project}/global/backendBuckets", // "request": { - // "$ref": "Autoscaler" + // "$ref": "BackendBucket" // }, // "response": { // "$ref": "Operation" @@ -43917,24 +49223,22 @@ func (c *AutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er } -// method id "compute.autoscalers.list": +// method id "compute.backendBuckets.list": -type AutoscalersListCall struct { +type BackendBucketsListCall struct { s *Service project string - zone string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves a list of autoscalers contained within the specified -// zone. -func (r *AutoscalersService) List(project string, zone string) *AutoscalersListCall { - c := &AutoscalersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of BackendBucket resources available to the +// specified project. +func (r *BackendBucketsService) List(project string) *BackendBucketsListCall { + c := &BackendBucketsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone return c } @@ -43960,7 +49264,7 @@ func (r *AutoscalersService) List(project string, zone string) *AutoscalersListC // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *AutoscalersListCall) Filter(filter string) *AutoscalersListCall { +func (c *BackendBucketsListCall) Filter(filter string) *BackendBucketsListCall { c.urlParams_.Set("filter", filter) return c } @@ -43971,7 +49275,7 @@ func (c *AutoscalersListCall) Filter(filter string) *AutoscalersListCall { // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *AutoscalersListCall) MaxResults(maxResults int64) *AutoscalersListCall { +func (c *BackendBucketsListCall) MaxResults(maxResults int64) *BackendBucketsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -43988,7 +49292,7 @@ func (c *AutoscalersListCall) MaxResults(maxResults int64) *AutoscalersListCall // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *AutoscalersListCall) OrderBy(orderBy string) *AutoscalersListCall { +func (c *BackendBucketsListCall) OrderBy(orderBy string) *BackendBucketsListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -43996,7 +49300,7 @@ func (c *AutoscalersListCall) OrderBy(orderBy string) *AutoscalersListCall { // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *AutoscalersListCall) PageToken(pageToken string) *AutoscalersListCall { +func (c *BackendBucketsListCall) PageToken(pageToken string) *BackendBucketsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -44004,7 +49308,7 @@ func (c *AutoscalersListCall) PageToken(pageToken string) *AutoscalersListCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AutoscalersListCall) Fields(s ...googleapi.Field) *AutoscalersListCall { +func (c *BackendBucketsListCall) Fields(s ...googleapi.Field) *BackendBucketsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -44014,7 +49318,7 @@ func (c *AutoscalersListCall) Fields(s ...googleapi.Field) *AutoscalersListCall // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *AutoscalersListCall) IfNoneMatch(entityTag string) *AutoscalersListCall { +func (c *BackendBucketsListCall) IfNoneMatch(entityTag string) *BackendBucketsListCall { c.ifNoneMatch_ = entityTag return c } @@ -44022,21 +49326,21 @@ func (c *AutoscalersListCall) IfNoneMatch(entityTag string) *AutoscalersListCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AutoscalersListCall) Context(ctx context.Context) *AutoscalersListCall { +func (c *BackendBucketsListCall) Context(ctx context.Context) *BackendBucketsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AutoscalersListCall) Header() http.Header { +func (c *BackendBucketsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AutoscalersListCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendBucketsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -44048,7 +49352,7 @@ func (c *AutoscalersListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -44057,19 +49361,18 @@ func (c *AutoscalersListCall) doRequest(alt string) (*http.Response, error) { req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.autoscalers.list" call. -// Exactly one of *AutoscalerList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *AutoscalerList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.backendBuckets.list" call. +// Exactly one of *BackendBucketList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *BackendBucketList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, error) { +func (c *BackendBucketsListCall) Do(opts ...googleapi.CallOption) (*BackendBucketList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -44088,7 +49391,7 @@ func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &AutoscalerList{ + ret := &BackendBucketList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -44100,12 +49403,11 @@ func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, } return ret, nil // { - // "description": "Retrieves a list of autoscalers contained within the specified zone.", + // "description": "Retrieves the list of BackendBucket resources available to the specified project.", // "httpMethod": "GET", - // "id": "compute.autoscalers.list", + // "id": "compute.backendBuckets.list", // "parameterOrder": [ - // "project", - // "zone" + // "project" // ], // "parameters": { // "filter": { @@ -44137,18 +49439,11 @@ func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "zone": { - // "description": "Name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/autoscalers", + // "path": "{project}/global/backendBuckets", // "response": { - // "$ref": "AutoscalerList" + // "$ref": "BackendBucketList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -44162,7 +49457,7 @@ func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *AutoscalersListCall) Pages(ctx context.Context, f func(*AutoscalerList) error) error { +func (c *BackendBucketsListCall) Pages(ctx context.Context, f func(*BackendBucketList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -44180,33 +49475,26 @@ func (c *AutoscalersListCall) Pages(ctx context.Context, f func(*AutoscalerList) } } -// method id "compute.autoscalers.patch": +// method id "compute.backendBuckets.patch": -type AutoscalersPatchCall struct { - s *Service - project string - zone string - autoscaler *Autoscaler - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type BackendBucketsPatchCall struct { + s *Service + project string + backendBucket string + backendbucket *BackendBucket + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates an autoscaler in the specified project using the data +// Patch: Updates the specified BackendBucket resource with the data // included in the request. This method supports PATCH semantics and // uses the JSON merge patch format and processing rules. -func (r *AutoscalersService) Patch(project string, zone string, autoscaler *Autoscaler) *AutoscalersPatchCall { - c := &AutoscalersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *BackendBucketsService) Patch(project string, backendBucket string, backendbucket *BackendBucket) *BackendBucketsPatchCall { + c := &BackendBucketsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.autoscaler = autoscaler - return c -} - -// Autoscaler sets the optional parameter "autoscaler": Name of the -// autoscaler to patch. -func (c *AutoscalersPatchCall) Autoscaler(autoscaler string) *AutoscalersPatchCall { - c.urlParams_.Set("autoscaler", autoscaler) + c.backendBucket = backendBucket + c.backendbucket = backendbucket return c } @@ -44224,7 +49512,7 @@ func (c *AutoscalersPatchCall) Autoscaler(autoscaler string) *AutoscalersPatchCa // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *AutoscalersPatchCall) RequestId(requestId string) *AutoscalersPatchCall { +func (c *BackendBucketsPatchCall) RequestId(requestId string) *BackendBucketsPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -44232,7 +49520,7 @@ func (c *AutoscalersPatchCall) RequestId(requestId string) *AutoscalersPatchCall // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AutoscalersPatchCall) Fields(s ...googleapi.Field) *AutoscalersPatchCall { +func (c *BackendBucketsPatchCall) Fields(s ...googleapi.Field) *BackendBucketsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -44240,35 +49528,35 @@ func (c *AutoscalersPatchCall) Fields(s ...googleapi.Field) *AutoscalersPatchCal // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AutoscalersPatchCall) Context(ctx context.Context) *AutoscalersPatchCall { +func (c *BackendBucketsPatchCall) Context(ctx context.Context) *BackendBucketsPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AutoscalersPatchCall) Header() http.Header { +func (c *BackendBucketsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AutoscalersPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendBucketsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendbucket) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("PATCH", urls, body) if err != nil { @@ -44276,20 +49564,20 @@ func (c *AutoscalersPatchCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "backendBucket": c.backendBucket, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.autoscalers.patch" call. +// Do executes the "compute.backendBuckets.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *AutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *BackendBucketsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -44320,18 +49608,19 @@ func (c *AutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, err } return ret, nil // { - // "description": "Updates an autoscaler in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "description": "Updates the specified BackendBucket resource with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", // "httpMethod": "PATCH", - // "id": "compute.autoscalers.patch", + // "id": "compute.backendBuckets.patch", // "parameterOrder": [ // "project", - // "zone" + // "backendBucket" // ], // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to patch.", - // "location": "query", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "backendBucket": { + // "description": "Name of the BackendBucket resource to patch.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "project": { @@ -44345,18 +49634,11 @@ func (c *AutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, err // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "Name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/autoscalers", + // "path": "{project}/global/backendBuckets/{backendBucket}", // "request": { - // "$ref": "Autoscaler" + // "$ref": "BackendBucket" // }, // "response": { // "$ref": "Operation" @@ -44369,34 +49651,32 @@ func (c *AutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, err } -// method id "compute.autoscalers.testIamPermissions": +// method id "compute.backendBuckets.setIamPolicy": -type AutoscalersTestIamPermissionsCall struct { +type BackendBucketsSetIamPolicyCall struct { s *Service project string - zone string resource string - testpermissionsrequest *TestPermissionsRequest + globalsetpolicyrequest *GlobalSetPolicyRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *AutoscalersService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *AutoscalersTestIamPermissionsCall { - c := &AutoscalersTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +func (r *BackendBucketsService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *BackendBucketsSetIamPolicyCall { + c := &BackendBucketsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.globalsetpolicyrequest = globalsetpolicyrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AutoscalersTestIamPermissionsCall) Fields(s ...googleapi.Field) *AutoscalersTestIamPermissionsCall { +func (c *BackendBucketsSetIamPolicyCall) Fields(s ...googleapi.Field) *BackendBucketsSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -44404,35 +49684,35 @@ func (c *AutoscalersTestIamPermissionsCall) Fields(s ...googleapi.Field) *Autosc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AutoscalersTestIamPermissionsCall) Context(ctx context.Context) *AutoscalersTestIamPermissionsCall { +func (c *BackendBucketsSetIamPolicyCall) Context(ctx context.Context) *BackendBucketsSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AutoscalersTestIamPermissionsCall) Header() http.Header { +func (c *BackendBucketsSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AutoscalersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendBucketsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetpolicyrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -44441,20 +49721,19 @@ func (c *AutoscalersTestIamPermissionsCall) doRequest(alt string) (*http.Respons req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.autoscalers.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *AutoscalersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.backendBuckets.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BackendBucketsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -44473,7 +49752,7 @@ func (c *AutoscalersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -44485,12 +49764,11 @@ func (c *AutoscalersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", // "httpMethod": "POST", - // "id": "compute.autoscalers.testIamPermissions", + // "id": "compute.backendBuckets.setIamPolicy", // "parameterOrder": [ // "project", - // "zone", // "resource" // ], // "parameters": { @@ -44504,86 +49782,52 @@ func (c *AutoscalersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T // "resource": { // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/autoscalers/{resource}/testIamPermissions", + // "path": "{project}/global/backendBuckets/{resource}/setIamPolicy", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "GlobalSetPolicyRequest" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.autoscalers.update": +// method id "compute.backendBuckets.testIamPermissions": -type AutoscalersUpdateCall struct { - s *Service - project string - zone string - autoscaler *Autoscaler - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type BackendBucketsTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Update: Updates an autoscaler in the specified project using the data -// included in the request. -func (r *AutoscalersService) Update(project string, zone string, autoscaler *Autoscaler) *AutoscalersUpdateCall { - c := &AutoscalersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *BackendBucketsService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *BackendBucketsTestIamPermissionsCall { + c := &BackendBucketsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.autoscaler = autoscaler - return c -} - -// Autoscaler sets the optional parameter "autoscaler": Name of the -// autoscaler to update. -func (c *AutoscalersUpdateCall) Autoscaler(autoscaler string) *AutoscalersUpdateCall { - c.urlParams_.Set("autoscaler", autoscaler) - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *AutoscalersUpdateCall) RequestId(requestId string) *AutoscalersUpdateCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AutoscalersUpdateCall) Fields(s ...googleapi.Field) *AutoscalersUpdateCall { +func (c *BackendBucketsTestIamPermissionsCall) Fields(s ...googleapi.Field) *BackendBucketsTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -44591,56 +49835,56 @@ func (c *AutoscalersUpdateCall) Fields(s ...googleapi.Field) *AutoscalersUpdateC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AutoscalersUpdateCall) Context(ctx context.Context) *AutoscalersUpdateCall { +func (c *BackendBucketsTestIamPermissionsCall) Context(ctx context.Context) *BackendBucketsTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AutoscalersUpdateCall) Header() http.Header { +func (c *BackendBucketsTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AutoscalersUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendBucketsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.autoscalers.update" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *AutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.backendBuckets.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BackendBucketsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -44659,7 +49903,7 @@ func (c *AutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, er if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -44671,20 +49915,14 @@ func (c *AutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Updates an autoscaler in the specified project using the data included in the request.", - // "httpMethod": "PUT", - // "id": "compute.autoscalers.update", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.backendBuckets.testIamPermissions", // "parameterOrder": [ // "project", - // "zone" + // "resource" // ], // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to update.", - // "location": "query", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -44692,53 +49930,49 @@ func (c *AutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, er // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "Name of the zone for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/autoscalers", + // "path": "{project}/global/backendBuckets/{resource}/testIamPermissions", // "request": { - // "$ref": "Autoscaler" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.backendBuckets.addSignedUrlKey": +// method id "compute.backendBuckets.update": -type BackendBucketsAddSignedUrlKeyCall struct { +type BackendBucketsUpdateCall struct { s *Service project string backendBucket string - signedurlkey *SignedUrlKey + backendbucket *BackendBucket urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// AddSignedUrlKey: Adds a key for validating requests with signed URLs -// for this backend bucket. -func (r *BackendBucketsService) AddSignedUrlKey(project string, backendBucket string, signedurlkey *SignedUrlKey) *BackendBucketsAddSignedUrlKeyCall { - c := &BackendBucketsAddSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates the specified BackendBucket resource with the data +// included in the request. +func (r *BackendBucketsService) Update(project string, backendBucket string, backendbucket *BackendBucket) *BackendBucketsUpdateCall { + c := &BackendBucketsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.backendBucket = backendBucket - c.signedurlkey = signedurlkey + c.backendbucket = backendbucket return c } @@ -44756,7 +49990,7 @@ func (r *BackendBucketsService) AddSignedUrlKey(project string, backendBucket st // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendBucketsAddSignedUrlKeyCall) RequestId(requestId string) *BackendBucketsAddSignedUrlKeyCall { +func (c *BackendBucketsUpdateCall) RequestId(requestId string) *BackendBucketsUpdateCall { c.urlParams_.Set("requestId", requestId) return c } @@ -44764,7 +49998,7 @@ func (c *BackendBucketsAddSignedUrlKeyCall) RequestId(requestId string) *Backend // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsAddSignedUrlKeyCall) Fields(s ...googleapi.Field) *BackendBucketsAddSignedUrlKeyCall { +func (c *BackendBucketsUpdateCall) Fields(s ...googleapi.Field) *BackendBucketsUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -44772,37 +50006,37 @@ func (c *BackendBucketsAddSignedUrlKeyCall) Fields(s ...googleapi.Field) *Backen // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsAddSignedUrlKeyCall) Context(ctx context.Context) *BackendBucketsAddSignedUrlKeyCall { +func (c *BackendBucketsUpdateCall) Context(ctx context.Context) *BackendBucketsUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsAddSignedUrlKeyCall) Header() http.Header { +func (c *BackendBucketsUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsAddSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendBucketsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.signedurlkey) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendbucket) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}/addSignedUrlKey") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } @@ -44814,14 +50048,14 @@ func (c *BackendBucketsAddSignedUrlKeyCall) doRequest(alt string) (*http.Respons return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.addSignedUrlKey" call. +// Do executes the "compute.backendBuckets.update" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendBucketsAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *BackendBucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -44852,17 +50086,18 @@ func (c *BackendBucketsAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*O } return ret, nil // { - // "description": "Adds a key for validating requests with signed URLs for this backend bucket.", - // "httpMethod": "POST", - // "id": "compute.backendBuckets.addSignedUrlKey", + // "description": "Updates the specified BackendBucket resource with the data included in the request.", + // "httpMethod": "PUT", + // "id": "compute.backendBuckets.update", // "parameterOrder": [ // "project", // "backendBucket" // ], // "parameters": { // "backendBucket": { - // "description": "Name of the BackendBucket resource to which the Signed URL Key should be added. The name should conform to RFC1035.", + // "description": "Name of the BackendBucket resource to update.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -44879,9 +50114,9 @@ func (c *BackendBucketsAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*O // "type": "string" // } // }, - // "path": "{project}/global/backendBuckets/{backendBucket}/addSignedUrlKey", + // "path": "{project}/global/backendBuckets/{backendBucket}", // "request": { - // "$ref": "SignedUrlKey" + // "$ref": "BackendBucket" // }, // "response": { // "$ref": "Operation" @@ -44894,22 +50129,25 @@ func (c *BackendBucketsAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*O } -// method id "compute.backendBuckets.delete": +// method id "compute.backendServices.addSignedUrlKey": -type BackendBucketsDeleteCall struct { - s *Service - project string - backendBucket string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type BackendServicesAddSignedUrlKeyCall struct { + s *Service + project string + backendService string + signedurlkey *SignedUrlKey + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified BackendBucket resource. -func (r *BackendBucketsService) Delete(project string, backendBucket string) *BackendBucketsDeleteCall { - c := &BackendBucketsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AddSignedUrlKey: Adds a key for validating requests with signed URLs +// for this backend service. +func (r *BackendServicesService) AddSignedUrlKey(project string, backendService string, signedurlkey *SignedUrlKey) *BackendServicesAddSignedUrlKeyCall { + c := &BackendServicesAddSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendBucket = backendBucket + c.backendService = backendService + c.signedurlkey = signedurlkey return c } @@ -44927,7 +50165,7 @@ func (r *BackendBucketsService) Delete(project string, backendBucket string) *Ba // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendBucketsDeleteCall) RequestId(requestId string) *BackendBucketsDeleteCall { +func (c *BackendServicesAddSignedUrlKeyCall) RequestId(requestId string) *BackendServicesAddSignedUrlKeyCall { c.urlParams_.Set("requestId", requestId) return c } @@ -44935,7 +50173,7 @@ func (c *BackendBucketsDeleteCall) RequestId(requestId string) *BackendBucketsDe // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsDeleteCall) Fields(s ...googleapi.Field) *BackendBucketsDeleteCall { +func (c *BackendServicesAddSignedUrlKeyCall) Fields(s ...googleapi.Field) *BackendServicesAddSignedUrlKeyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -44943,51 +50181,56 @@ func (c *BackendBucketsDeleteCall) Fields(s ...googleapi.Field) *BackendBucketsD // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsDeleteCall) Context(ctx context.Context) *BackendBucketsDeleteCall { +func (c *BackendServicesAddSignedUrlKeyCall) Context(ctx context.Context) *BackendServicesAddSignedUrlKeyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsDeleteCall) Header() http.Header { +func (c *BackendServicesAddSignedUrlKeyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendServicesAddSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.signedurlkey) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/addSignedUrlKey") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendBucket": c.backendBucket, + "project": c.project, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.delete" call. +// Do executes the "compute.backendServices.addSignedUrlKey" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendBucketsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *BackendServicesAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -45018,18 +50261,17 @@ func (c *BackendBucketsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Deletes the specified BackendBucket resource.", - // "httpMethod": "DELETE", - // "id": "compute.backendBuckets.delete", + // "description": "Adds a key for validating requests with signed URLs for this backend service.", + // "httpMethod": "POST", + // "id": "compute.backendServices.addSignedUrlKey", // "parameterOrder": [ // "project", - // "backendBucket" + // "backendService" // ], // "parameters": { - // "backendBucket": { - // "description": "Name of the BackendBucket resource to delete.", + // "backendService": { + // "description": "Name of the BackendService resource to which the Signed URL Key should be added. The name should conform to RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -45046,7 +50288,10 @@ func (c *BackendBucketsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // } // }, - // "path": "{project}/global/backendBuckets/{backendBucket}", + // "path": "{project}/global/backendServices/{backendService}/addSignedUrlKey", + // "request": { + // "$ref": "SignedUrlKey" + // }, // "response": { // "$ref": "Operation" // }, @@ -45058,81 +50303,498 @@ func (c *BackendBucketsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.backendBuckets.deleteSignedUrlKey": +// method id "compute.backendServices.aggregatedList": -type BackendBucketsDeleteSignedUrlKeyCall struct { - s *Service - project string - backendBucket string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type BackendServicesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// DeleteSignedUrlKey: Deletes a key for validating requests with signed -// URLs for this backend bucket. -func (r *BackendBucketsService) DeleteSignedUrlKey(project string, backendBucket string, keyName string) *BackendBucketsDeleteSignedUrlKeyCall { - c := &BackendBucketsDeleteSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves the list of all BackendService resources, +// regional and global, available to the specified project. +func (r *BackendServicesService) AggregatedList(project string) *BackendServicesAggregatedListCall { + c := &BackendServicesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendBucket = backendBucket - c.urlParams_.Set("keyName", keyName) return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendBucketsDeleteSignedUrlKeyCall) RequestId(requestId string) *BackendBucketsDeleteSignedUrlKeyCall { - c.urlParams_.Set("requestId", requestId) +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *BackendServicesAggregatedListCall) Filter(filter string) *BackendServicesAggregatedListCall { + c.urlParams_.Set("filter", filter) return c } -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BackendBucketsDeleteSignedUrlKeyCall) Fields(s ...googleapi.Field) *BackendBucketsDeleteSignedUrlKeyCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *BackendServicesAggregatedListCall) MaxResults(maxResults int64) *BackendServicesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *BackendServicesAggregatedListCall) OrderBy(orderBy string) *BackendServicesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *BackendServicesAggregatedListCall) PageToken(pageToken string) *BackendServicesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesAggregatedListCall) Fields(s ...googleapi.Field) *BackendServicesAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BackendServicesAggregatedListCall) IfNoneMatch(entityTag string) *BackendServicesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsDeleteSignedUrlKeyCall) Context(ctx context.Context) *BackendBucketsDeleteSignedUrlKeyCall { +func (c *BackendServicesAggregatedListCall) Context(ctx context.Context) *BackendServicesAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsDeleteSignedUrlKeyCall) Header() http.Header { +func (c *BackendServicesAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsDeleteSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendServicesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}/deleteSignedUrlKey") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/backendServices") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendServices.aggregatedList" call. +// Exactly one of *BackendServiceAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *BackendServiceAggregatedList.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BackendServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*BackendServiceAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &BackendServiceAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of all BackendService resources, regional and global, available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.backendServices.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Name of the project scoping this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/backendServices", + // "response": { + // "$ref": "BackendServiceAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *BackendServicesAggregatedListCall) Pages(ctx context.Context, f func(*BackendServiceAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.backendServices.delete": + +type BackendServicesDeleteCall struct { + s *Service + project string + backendService string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified BackendService resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/delete +func (r *BackendServicesService) Delete(project string, backendService string) *BackendServicesDeleteCall { + c := &BackendServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendService = backendService + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendServicesDeleteCall) RequestId(requestId string) *BackendServicesDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesDeleteCall) Fields(s ...googleapi.Field) *BackendServicesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesDeleteCall) Context(ctx context.Context) *BackendServicesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendServicesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendServicesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendService": c.backendService, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendServices.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified BackendService resource.", + // "httpMethod": "DELETE", + // "id": "compute.backendServices.delete", + // "parameterOrder": [ + // "project", + // "backendService" + // ], + // "parameters": { + // "backendService": { + // "description": "Name of the BackendService resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/backendServices/{backendService}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendServices.deleteSignedUrlKey": + +type BackendServicesDeleteSignedUrlKeyCall struct { + s *Service + project string + backendService string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// DeleteSignedUrlKey: Deletes a key for validating requests with signed +// URLs for this backend service. +func (r *BackendServicesService) DeleteSignedUrlKey(project string, backendService string, keyName string) *BackendServicesDeleteSignedUrlKeyCall { + c := &BackendServicesDeleteSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendService = backendService + c.urlParams_.Set("keyName", keyName) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendServicesDeleteSignedUrlKeyCall) RequestId(requestId string) *BackendServicesDeleteSignedUrlKeyCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesDeleteSignedUrlKeyCall) Fields(s ...googleapi.Field) *BackendServicesDeleteSignedUrlKeyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesDeleteSignedUrlKeyCall) Context(ctx context.Context) *BackendServicesDeleteSignedUrlKeyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendServicesDeleteSignedUrlKeyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendServicesDeleteSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/deleteSignedUrlKey") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -45140,20 +50802,20 @@ func (c *BackendBucketsDeleteSignedUrlKeyCall) doRequest(alt string) (*http.Resp } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendBucket": c.backendBucket, + "project": c.project, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.deleteSignedUrlKey" call. +// Do executes the "compute.backendServices.deleteSignedUrlKey" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendBucketsDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *BackendServicesDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -45184,17 +50846,17 @@ func (c *BackendBucketsDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Deletes a key for validating requests with signed URLs for this backend bucket.", + // "description": "Deletes a key for validating requests with signed URLs for this backend service.", // "httpMethod": "POST", - // "id": "compute.backendBuckets.deleteSignedUrlKey", + // "id": "compute.backendServices.deleteSignedUrlKey", // "parameterOrder": [ // "project", - // "backendBucket", + // "backendService", // "keyName" // ], // "parameters": { - // "backendBucket": { - // "description": "Name of the BackendBucket resource to which the Signed URL Key should be added. The name should conform to RFC1035.", + // "backendService": { + // "description": "Name of the BackendService resource to which the Signed URL Key should be added. The name should conform to RFC1035.", // "location": "path", // "required": true, // "type": "string" @@ -45218,7 +50880,7 @@ func (c *BackendBucketsDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) // "type": "string" // } // }, - // "path": "{project}/global/backendBuckets/{backendBucket}/deleteSignedUrlKey", + // "path": "{project}/global/backendServices/{backendService}/deleteSignedUrlKey", // "response": { // "$ref": "Operation" // }, @@ -45230,31 +50892,32 @@ func (c *BackendBucketsDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) } -// method id "compute.backendBuckets.get": +// method id "compute.backendServices.get": -type BackendBucketsGetCall struct { - s *Service - project string - backendBucket string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type BackendServicesGetCall struct { + s *Service + project string + backendService string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified BackendBucket resource. Gets a list of -// available backend buckets by making a list() request. -func (r *BackendBucketsService) Get(project string, backendBucket string) *BackendBucketsGetCall { - c := &BackendBucketsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified BackendService resource. Gets a list of +// available backend services. +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/get +func (r *BackendServicesService) Get(project string, backendService string) *BackendServicesGetCall { + c := &BackendServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendBucket = backendBucket + c.backendService = backendService return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsGetCall) Fields(s ...googleapi.Field) *BackendBucketsGetCall { +func (c *BackendServicesGetCall) Fields(s ...googleapi.Field) *BackendServicesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -45264,7 +50927,7 @@ func (c *BackendBucketsGetCall) Fields(s ...googleapi.Field) *BackendBucketsGetC // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *BackendBucketsGetCall) IfNoneMatch(entityTag string) *BackendBucketsGetCall { +func (c *BackendServicesGetCall) IfNoneMatch(entityTag string) *BackendServicesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -45272,21 +50935,21 @@ func (c *BackendBucketsGetCall) IfNoneMatch(entityTag string) *BackendBucketsGet // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsGetCall) Context(ctx context.Context) *BackendBucketsGetCall { +func (c *BackendServicesGetCall) Context(ctx context.Context) *BackendServicesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsGetCall) Header() http.Header { +func (c *BackendServicesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendServicesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -45298,7 +50961,7 @@ func (c *BackendBucketsGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -45306,20 +50969,20 @@ func (c *BackendBucketsGetCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendBucket": c.backendBucket, + "project": c.project, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.get" call. -// Exactly one of *BackendBucket or error will be non-nil. Any non-2xx +// Do executes the "compute.backendServices.get" call. +// Exactly one of *BackendService or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *BackendBucket.ServerResponse.Header or (if a response was returned +// *BackendService.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *BackendBucketsGetCall) Do(opts ...googleapi.CallOption) (*BackendBucket, error) { +func (c *BackendServicesGetCall) Do(opts ...googleapi.CallOption) (*BackendService, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -45338,7 +51001,7 @@ func (c *BackendBucketsGetCall) Do(opts ...googleapi.CallOption) (*BackendBucket if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &BackendBucket{ + ret := &BackendService{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -45350,18 +51013,18 @@ func (c *BackendBucketsGetCall) Do(opts ...googleapi.CallOption) (*BackendBucket } return ret, nil // { - // "description": "Returns the specified BackendBucket resource. Gets a list of available backend buckets by making a list() request.", + // "description": "Returns the specified BackendService resource. Gets a list of available backend services.", // "httpMethod": "GET", - // "id": "compute.backendBuckets.get", + // "id": "compute.backendServices.get", // "parameterOrder": [ // "project", - // "backendBucket" + // "backendService" // ], // "parameters": { - // "backendBucket": { - // "description": "Name of the BackendBucket resource to return.", + // "backendService": { + // "description": "Name of the BackendService resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -45373,9 +51036,9 @@ func (c *BackendBucketsGetCall) Do(opts ...googleapi.CallOption) (*BackendBucket // "type": "string" // } // }, - // "path": "{project}/global/backendBuckets/{backendBucket}", + // "path": "{project}/global/backendServices/{backendService}", // "response": { - // "$ref": "BackendBucket" + // "$ref": "BackendService" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -45386,96 +51049,90 @@ func (c *BackendBucketsGetCall) Do(opts ...googleapi.CallOption) (*BackendBucket } -// method id "compute.backendBuckets.getIamPolicy": +// method id "compute.backendServices.getHealth": -type BackendBucketsGetIamPolicyCall struct { - s *Service - project string - resource string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type BackendServicesGetHealthCall struct { + s *Service + project string + backendService string + resourcegroupreference *ResourceGroupReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. -func (r *BackendBucketsService) GetIamPolicy(project string, resource string) *BackendBucketsGetIamPolicyCall { - c := &BackendBucketsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetHealth: Gets the most recent health check results for this +// BackendService. +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/getHealth +func (r *BackendServicesService) GetHealth(project string, backendService string, resourcegroupreference *ResourceGroupReference) *BackendServicesGetHealthCall { + c := &BackendServicesGetHealthCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource + c.backendService = backendService + c.resourcegroupreference = resourcegroupreference return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsGetIamPolicyCall) Fields(s ...googleapi.Field) *BackendBucketsGetIamPolicyCall { +func (c *BackendServicesGetHealthCall) Fields(s ...googleapi.Field) *BackendServicesGetHealthCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *BackendBucketsGetIamPolicyCall) IfNoneMatch(entityTag string) *BackendBucketsGetIamPolicyCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsGetIamPolicyCall) Context(ctx context.Context) *BackendBucketsGetIamPolicyCall { +func (c *BackendServicesGetHealthCall) Context(ctx context.Context) *BackendServicesGetHealthCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsGetIamPolicyCall) Header() http.Header { +func (c *BackendServicesGetHealthCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendServicesGetHealthCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.resourcegroupreference) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{resource}/getIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/getHealth") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *BackendBucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.backendServices.getHealth" call. +// Exactly one of *BackendServiceGroupHealth or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *BackendServiceGroupHealth.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (*BackendServiceGroupHealth, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -45494,7 +51151,7 @@ func (c *BackendBucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Poli if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &BackendServiceGroupHealth{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -45506,32 +51163,34 @@ func (c *BackendBucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Poli } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", - // "httpMethod": "GET", - // "id": "compute.backendBuckets.getIamPolicy", + // "description": "Gets the most recent health check results for this BackendService.", + // "httpMethod": "POST", + // "id": "compute.backendServices.getHealth", // "parameterOrder": [ // "project", - // "resource" + // "backendService" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "backendService": { + // "description": "Name of the BackendService resource to which the queried instance belongs.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "project": { // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/backendBuckets/{resource}/getIamPolicy", + // "path": "{project}/global/backendServices/{backendService}/getHealth", + // "request": { + // "$ref": "ResourceGroupReference" + // }, // "response": { - // "$ref": "Policy" + // "$ref": "BackendServiceGroupHealth" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -45542,23 +51201,26 @@ func (c *BackendBucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Poli } -// method id "compute.backendBuckets.insert": +// method id "compute.backendServices.insert": -type BackendBucketsInsertCall struct { - s *Service - project string - backendbucket *BackendBucket - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type BackendServicesInsertCall struct { + s *Service + project string + backendservice *BackendService + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a BackendBucket resource in the specified project -// using the data included in the request. -func (r *BackendBucketsService) Insert(project string, backendbucket *BackendBucket) *BackendBucketsInsertCall { - c := &BackendBucketsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a BackendService resource in the specified project +// using the data included in the request. There are several +// restrictions and guidelines to keep in mind when creating a backend +// service. Read Restrictions and Guidelines for more information. +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/insert +func (r *BackendServicesService) Insert(project string, backendservice *BackendService) *BackendServicesInsertCall { + c := &BackendServicesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendbucket = backendbucket + c.backendservice = backendservice return c } @@ -45576,7 +51238,7 @@ func (r *BackendBucketsService) Insert(project string, backendbucket *BackendBuc // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendBucketsInsertCall) RequestId(requestId string) *BackendBucketsInsertCall { +func (c *BackendServicesInsertCall) RequestId(requestId string) *BackendServicesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -45584,7 +51246,7 @@ func (c *BackendBucketsInsertCall) RequestId(requestId string) *BackendBucketsIn // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsInsertCall) Fields(s ...googleapi.Field) *BackendBucketsInsertCall { +func (c *BackendServicesInsertCall) Fields(s ...googleapi.Field) *BackendServicesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -45592,35 +51254,35 @@ func (c *BackendBucketsInsertCall) Fields(s ...googleapi.Field) *BackendBucketsI // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsInsertCall) Context(ctx context.Context) *BackendBucketsInsertCall { +func (c *BackendServicesInsertCall) Context(ctx context.Context) *BackendServicesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsInsertCall) Header() http.Header { +func (c *BackendServicesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendServicesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendbucket) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -45633,14 +51295,14 @@ func (c *BackendBucketsInsertCall) doRequest(alt string) (*http.Response, error) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.insert" call. +// Do executes the "compute.backendServices.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendBucketsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *BackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -45671,9 +51333,9 @@ func (c *BackendBucketsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Creates a BackendBucket resource in the specified project using the data included in the request.", + // "description": "Creates a BackendService resource in the specified project using the data included in the request. There are several restrictions and guidelines to keep in mind when creating a backend service. Read Restrictions and Guidelines for more information.", // "httpMethod": "POST", - // "id": "compute.backendBuckets.insert", + // "id": "compute.backendServices.insert", // "parameterOrder": [ // "project" // ], @@ -45691,9 +51353,9 @@ func (c *BackendBucketsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // } // }, - // "path": "{project}/global/backendBuckets", + // "path": "{project}/global/backendServices", // "request": { - // "$ref": "BackendBucket" + // "$ref": "BackendService" // }, // "response": { // "$ref": "Operation" @@ -45706,9 +51368,9 @@ func (c *BackendBucketsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.backendBuckets.list": +// method id "compute.backendServices.list": -type BackendBucketsListCall struct { +type BackendServicesListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -45717,10 +51379,11 @@ type BackendBucketsListCall struct { header_ http.Header } -// List: Retrieves the list of BackendBucket resources available to the +// List: Retrieves the list of BackendService resources available to the // specified project. -func (r *BackendBucketsService) List(project string) *BackendBucketsListCall { - c := &BackendBucketsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/list +func (r *BackendServicesService) List(project string) *BackendServicesListCall { + c := &BackendServicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -45747,7 +51410,7 @@ func (r *BackendBucketsService) List(project string) *BackendBucketsListCall { // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *BackendBucketsListCall) Filter(filter string) *BackendBucketsListCall { +func (c *BackendServicesListCall) Filter(filter string) *BackendServicesListCall { c.urlParams_.Set("filter", filter) return c } @@ -45758,7 +51421,7 @@ func (c *BackendBucketsListCall) Filter(filter string) *BackendBucketsListCall { // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *BackendBucketsListCall) MaxResults(maxResults int64) *BackendBucketsListCall { +func (c *BackendServicesListCall) MaxResults(maxResults int64) *BackendServicesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -45775,7 +51438,7 @@ func (c *BackendBucketsListCall) MaxResults(maxResults int64) *BackendBucketsLis // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *BackendBucketsListCall) OrderBy(orderBy string) *BackendBucketsListCall { +func (c *BackendServicesListCall) OrderBy(orderBy string) *BackendServicesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -45783,7 +51446,7 @@ func (c *BackendBucketsListCall) OrderBy(orderBy string) *BackendBucketsListCall // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *BackendBucketsListCall) PageToken(pageToken string) *BackendBucketsListCall { +func (c *BackendServicesListCall) PageToken(pageToken string) *BackendServicesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -45791,7 +51454,7 @@ func (c *BackendBucketsListCall) PageToken(pageToken string) *BackendBucketsList // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsListCall) Fields(s ...googleapi.Field) *BackendBucketsListCall { +func (c *BackendServicesListCall) Fields(s ...googleapi.Field) *BackendServicesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -45801,7 +51464,7 @@ func (c *BackendBucketsListCall) Fields(s ...googleapi.Field) *BackendBucketsLis // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *BackendBucketsListCall) IfNoneMatch(entityTag string) *BackendBucketsListCall { +func (c *BackendServicesListCall) IfNoneMatch(entityTag string) *BackendServicesListCall { c.ifNoneMatch_ = entityTag return c } @@ -45809,21 +51472,21 @@ func (c *BackendBucketsListCall) IfNoneMatch(entityTag string) *BackendBucketsLi // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsListCall) Context(ctx context.Context) *BackendBucketsListCall { +func (c *BackendServicesListCall) Context(ctx context.Context) *BackendServicesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsListCall) Header() http.Header { +func (c *BackendServicesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsListCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendServicesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -45835,7 +51498,7 @@ func (c *BackendBucketsListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -45848,14 +51511,14 @@ func (c *BackendBucketsListCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.list" call. -// Exactly one of *BackendBucketList or error will be non-nil. Any +// Do executes the "compute.backendServices.list" call. +// Exactly one of *BackendServiceList or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *BackendBucketList.ServerResponse.Header or (if a response was +// *BackendServiceList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *BackendBucketsListCall) Do(opts ...googleapi.CallOption) (*BackendBucketList, error) { +func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServiceList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -45874,7 +51537,7 @@ func (c *BackendBucketsListCall) Do(opts ...googleapi.CallOption) (*BackendBucke if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &BackendBucketList{ + ret := &BackendServiceList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -45886,9 +51549,9 @@ func (c *BackendBucketsListCall) Do(opts ...googleapi.CallOption) (*BackendBucke } return ret, nil // { - // "description": "Retrieves the list of BackendBucket resources available to the specified project.", + // "description": "Retrieves the list of BackendService resources available to the specified project.", // "httpMethod": "GET", - // "id": "compute.backendBuckets.list", + // "id": "compute.backendServices.list", // "parameterOrder": [ // "project" // ], @@ -45924,9 +51587,9 @@ func (c *BackendBucketsListCall) Do(opts ...googleapi.CallOption) (*BackendBucke // "type": "string" // } // }, - // "path": "{project}/global/backendBuckets", + // "path": "{project}/global/backendServices", // "response": { - // "$ref": "BackendBucketList" + // "$ref": "BackendServiceList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -45940,7 +51603,7 @@ func (c *BackendBucketsListCall) Do(opts ...googleapi.CallOption) (*BackendBucke // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *BackendBucketsListCall) Pages(ctx context.Context, f func(*BackendBucketList) error) error { +func (c *BackendServicesListCall) Pages(ctx context.Context, f func(*BackendServiceList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -45958,26 +51621,30 @@ func (c *BackendBucketsListCall) Pages(ctx context.Context, f func(*BackendBucke } } -// method id "compute.backendBuckets.patch": +// method id "compute.backendServices.patch": -type BackendBucketsPatchCall struct { - s *Service - project string - backendBucket string - backendbucket *BackendBucket - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type BackendServicesPatchCall struct { + s *Service + project string + backendService string + backendservice *BackendService + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates the specified BackendBucket resource with the data -// included in the request. This method supports PATCH semantics and -// uses the JSON merge patch format and processing rules. -func (r *BackendBucketsService) Patch(project string, backendBucket string, backendbucket *BackendBucket) *BackendBucketsPatchCall { - c := &BackendBucketsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Patches the specified BackendService resource with the data +// included in the request. There are several restrictions and +// guidelines to keep in mind when updating a backend service. Read +// Restrictions and Guidelines for more information. This method +// supports PATCH semantics and uses the JSON merge patch format and +// processing rules. +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/patch +func (r *BackendServicesService) Patch(project string, backendService string, backendservice *BackendService) *BackendServicesPatchCall { + c := &BackendServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendBucket = backendBucket - c.backendbucket = backendbucket + c.backendService = backendService + c.backendservice = backendservice return c } @@ -45995,7 +51662,7 @@ func (r *BackendBucketsService) Patch(project string, backendBucket string, back // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendBucketsPatchCall) RequestId(requestId string) *BackendBucketsPatchCall { +func (c *BackendServicesPatchCall) RequestId(requestId string) *BackendServicesPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -46003,7 +51670,7 @@ func (c *BackendBucketsPatchCall) RequestId(requestId string) *BackendBucketsPat // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsPatchCall) Fields(s ...googleapi.Field) *BackendBucketsPatchCall { +func (c *BackendServicesPatchCall) Fields(s ...googleapi.Field) *BackendServicesPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -46011,35 +51678,35 @@ func (c *BackendBucketsPatchCall) Fields(s ...googleapi.Field) *BackendBucketsPa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsPatchCall) Context(ctx context.Context) *BackendBucketsPatchCall { +func (c *BackendServicesPatchCall) Context(ctx context.Context) *BackendServicesPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsPatchCall) Header() http.Header { +func (c *BackendServicesPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendServicesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendbucket) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("PATCH", urls, body) if err != nil { @@ -46047,20 +51714,20 @@ func (c *BackendBucketsPatchCall) doRequest(alt string) (*http.Response, error) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendBucket": c.backendBucket, + "project": c.project, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.patch" call. +// Do executes the "compute.backendServices.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendBucketsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *BackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -46091,18 +51758,18 @@ func (c *BackendBucketsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Updates the specified BackendBucket resource with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "description": "Patches the specified BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", // "httpMethod": "PATCH", - // "id": "compute.backendBuckets.patch", + // "id": "compute.backendServices.patch", // "parameterOrder": [ // "project", - // "backendBucket" + // "backendService" // ], // "parameters": { - // "backendBucket": { - // "description": "Name of the BackendBucket resource to patch.", + // "backendService": { + // "description": "Name of the BackendService resource to patch.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -46119,9 +51786,9 @@ func (c *BackendBucketsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // } // }, - // "path": "{project}/global/backendBuckets/{backendBucket}", + // "path": "{project}/global/backendServices/{backendService}", // "request": { - // "$ref": "BackendBucket" + // "$ref": "BackendService" // }, // "response": { // "$ref": "Operation" @@ -46134,32 +51801,51 @@ func (c *BackendBucketsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.backendBuckets.setIamPolicy": +// method id "compute.backendServices.setSecurityPolicy": -type BackendBucketsSetIamPolicyCall struct { - s *Service - project string - resource string - globalsetpolicyrequest *GlobalSetPolicyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type BackendServicesSetSecurityPolicyCall struct { + s *Service + project string + backendService string + securitypolicyreference *SecurityPolicyReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. -func (r *BackendBucketsService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *BackendBucketsSetIamPolicyCall { - c := &BackendBucketsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetSecurityPolicy: Sets the security policy for the specified backend +// service. +func (r *BackendServicesService) SetSecurityPolicy(project string, backendService string, securitypolicyreference *SecurityPolicyReference) *BackendServicesSetSecurityPolicyCall { + c := &BackendServicesSetSecurityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.globalsetpolicyrequest = globalsetpolicyrequest + c.backendService = backendService + c.securitypolicyreference = securitypolicyreference + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendServicesSetSecurityPolicyCall) RequestId(requestId string) *BackendServicesSetSecurityPolicyCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsSetIamPolicyCall) Fields(s ...googleapi.Field) *BackendBucketsSetIamPolicyCall { +func (c *BackendServicesSetSecurityPolicyCall) Fields(s ...googleapi.Field) *BackendServicesSetSecurityPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -46167,35 +51853,35 @@ func (c *BackendBucketsSetIamPolicyCall) Fields(s ...googleapi.Field) *BackendBu // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsSetIamPolicyCall) Context(ctx context.Context) *BackendBucketsSetIamPolicyCall { +func (c *BackendServicesSetSecurityPolicyCall) Context(ctx context.Context) *BackendServicesSetSecurityPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsSetIamPolicyCall) Header() http.Header { +func (c *BackendServicesSetSecurityPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendServicesSetSecurityPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetpolicyrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicyreference) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{resource}/setIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/setSecurityPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -46203,20 +51889,20 @@ func (c *BackendBucketsSetIamPolicyCall) doRequest(alt string) (*http.Response, } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *BackendBucketsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.backendServices.setSecurityPolicy" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendServicesSetSecurityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -46235,7 +51921,7 @@ func (c *BackendBucketsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Poli if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -46247,14 +51933,20 @@ func (c *BackendBucketsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Poli } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "description": "Sets the security policy for the specified backend service.", // "httpMethod": "POST", - // "id": "compute.backendBuckets.setIamPolicy", + // "id": "compute.backendServices.setSecurityPolicy", // "parameterOrder": [ // "project", - // "resource" + // "backendService" // ], // "parameters": { + // "backendService": { + // "description": "Name of the BackendService resource to which the security policy should be set. The name should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -46262,20 +51954,18 @@ func (c *BackendBucketsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Poli // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/global/backendBuckets/{resource}/setIamPolicy", + // "path": "{project}/global/backendServices/{backendService}/setSecurityPolicy", // "request": { - // "$ref": "GlobalSetPolicyRequest" + // "$ref": "SecurityPolicyReference" // }, // "response": { - // "$ref": "Policy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -46285,9 +51975,9 @@ func (c *BackendBucketsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Poli } -// method id "compute.backendBuckets.testIamPermissions": +// method id "compute.backendServices.testIamPermissions": -type BackendBucketsTestIamPermissionsCall struct { +type BackendServicesTestIamPermissionsCall struct { s *Service project string resource string @@ -46299,8 +51989,8 @@ type BackendBucketsTestIamPermissionsCall struct { // TestIamPermissions: Returns permissions that a caller has on the // specified resource. -func (r *BackendBucketsService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *BackendBucketsTestIamPermissionsCall { - c := &BackendBucketsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *BackendServicesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *BackendServicesTestIamPermissionsCall { + c := &BackendServicesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.resource = resource c.testpermissionsrequest = testpermissionsrequest @@ -46310,7 +52000,7 @@ func (r *BackendBucketsService) TestIamPermissions(project string, resource stri // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsTestIamPermissionsCall) Fields(s ...googleapi.Field) *BackendBucketsTestIamPermissionsCall { +func (c *BackendServicesTestIamPermissionsCall) Fields(s ...googleapi.Field) *BackendServicesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -46318,21 +52008,21 @@ func (c *BackendBucketsTestIamPermissionsCall) Fields(s ...googleapi.Field) *Bac // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsTestIamPermissionsCall) Context(ctx context.Context) *BackendBucketsTestIamPermissionsCall { +func (c *BackendServicesTestIamPermissionsCall) Context(ctx context.Context) *BackendServicesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsTestIamPermissionsCall) Header() http.Header { +func (c *BackendServicesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendServicesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -46346,7 +52036,7 @@ func (c *BackendBucketsTestIamPermissionsCall) doRequest(alt string) (*http.Resp reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -46360,14 +52050,14 @@ func (c *BackendBucketsTestIamPermissionsCall) doRequest(alt string) (*http.Resp return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.testIamPermissions" call. +// Do executes the "compute.backendServices.testIamPermissions" call. // Exactly one of *TestPermissionsResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *TestPermissionsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *BackendBucketsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *BackendServicesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -46400,7 +52090,7 @@ func (c *BackendBucketsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) // { // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.backendBuckets.testIamPermissions", + // "id": "compute.backendServices.testIamPermissions", // "parameterOrder": [ // "project", // "resource" @@ -46421,7 +52111,7 @@ func (c *BackendBucketsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) // "type": "string" // } // }, - // "path": "{project}/global/backendBuckets/{resource}/testIamPermissions", + // "path": "{project}/global/backendServices/{resource}/testIamPermissions", // "request": { // "$ref": "TestPermissionsRequest" // }, @@ -46437,25 +52127,28 @@ func (c *BackendBucketsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) } -// method id "compute.backendBuckets.update": +// method id "compute.backendServices.update": -type BackendBucketsUpdateCall struct { - s *Service - project string - backendBucket string - backendbucket *BackendBucket - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type BackendServicesUpdateCall struct { + s *Service + project string + backendService string + backendservice *BackendService + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Update: Updates the specified BackendBucket resource with the data -// included in the request. -func (r *BackendBucketsService) Update(project string, backendBucket string, backendbucket *BackendBucket) *BackendBucketsUpdateCall { - c := &BackendBucketsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates the specified BackendService resource with the data +// included in the request. There are several restrictions and +// guidelines to keep in mind when updating a backend service. Read +// Restrictions and Guidelines for more information. +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/update +func (r *BackendServicesService) Update(project string, backendService string, backendservice *BackendService) *BackendServicesUpdateCall { + c := &BackendServicesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendBucket = backendBucket - c.backendbucket = backendbucket + c.backendService = backendService + c.backendservice = backendservice return c } @@ -46473,7 +52166,7 @@ func (r *BackendBucketsService) Update(project string, backendBucket string, bac // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendBucketsUpdateCall) RequestId(requestId string) *BackendBucketsUpdateCall { +func (c *BackendServicesUpdateCall) RequestId(requestId string) *BackendServicesUpdateCall { c.urlParams_.Set("requestId", requestId) return c } @@ -46481,7 +52174,7 @@ func (c *BackendBucketsUpdateCall) RequestId(requestId string) *BackendBucketsUp // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsUpdateCall) Fields(s ...googleapi.Field) *BackendBucketsUpdateCall { +func (c *BackendServicesUpdateCall) Fields(s ...googleapi.Field) *BackendServicesUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -46489,35 +52182,35 @@ func (c *BackendBucketsUpdateCall) Fields(s ...googleapi.Field) *BackendBucketsU // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsUpdateCall) Context(ctx context.Context) *BackendBucketsUpdateCall { +func (c *BackendServicesUpdateCall) Context(ctx context.Context) *BackendServicesUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsUpdateCall) Header() http.Header { +func (c *BackendServicesUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendServicesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendbucket) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("PUT", urls, body) if err != nil { @@ -46525,20 +52218,20 @@ func (c *BackendBucketsUpdateCall) doRequest(alt string) (*http.Response, error) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendBucket": c.backendBucket, + "project": c.project, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.update" call. +// Do executes the "compute.backendServices.update" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendBucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *BackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -46569,18 +52262,18 @@ func (c *BackendBucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Updates the specified BackendBucket resource with the data included in the request.", + // "description": "Updates the specified BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information.", // "httpMethod": "PUT", - // "id": "compute.backendBuckets.update", + // "id": "compute.backendServices.update", // "parameterOrder": [ // "project", - // "backendBucket" + // "backendService" // ], // "parameters": { - // "backendBucket": { - // "description": "Name of the BackendBucket resource to update.", + // "backendService": { + // "description": "Name of the BackendService resource to update.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -46597,9 +52290,9 @@ func (c *BackendBucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // } // }, - // "path": "{project}/global/backendBuckets/{backendBucket}", + // "path": "{project}/global/backendServices/{backendService}", // "request": { - // "$ref": "BackendBucket" + // "$ref": "BackendService" // }, // "response": { // "$ref": "Operation" @@ -46612,183 +52305,9 @@ func (c *BackendBucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.backendServices.addSignedUrlKey": - -type BackendServicesAddSignedUrlKeyCall struct { - s *Service - project string - backendService string - signedurlkey *SignedUrlKey - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} +// method id "compute.diskTypes.aggregatedList": -// AddSignedUrlKey: Adds a key for validating requests with signed URLs -// for this backend service. -func (r *BackendServicesService) AddSignedUrlKey(project string, backendService string, signedurlkey *SignedUrlKey) *BackendServicesAddSignedUrlKeyCall { - c := &BackendServicesAddSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.backendService = backendService - c.signedurlkey = signedurlkey - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendServicesAddSignedUrlKeyCall) RequestId(requestId string) *BackendServicesAddSignedUrlKeyCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BackendServicesAddSignedUrlKeyCall) Fields(s ...googleapi.Field) *BackendServicesAddSignedUrlKeyCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BackendServicesAddSignedUrlKeyCall) Context(ctx context.Context) *BackendServicesAddSignedUrlKeyCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BackendServicesAddSignedUrlKeyCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BackendServicesAddSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.signedurlkey) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/addSignedUrlKey") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendService": c.backendService, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.backendServices.addSignedUrlKey" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *BackendServicesAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Adds a key for validating requests with signed URLs for this backend service.", - // "httpMethod": "POST", - // "id": "compute.backendServices.addSignedUrlKey", - // "parameterOrder": [ - // "project", - // "backendService" - // ], - // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to which the Signed URL Key should be added. The name should conform to RFC1035.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "{project}/global/backendServices/{backendService}/addSignedUrlKey", - // "request": { - // "$ref": "SignedUrlKey" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.backendServices.aggregatedList": - -type BackendServicesAggregatedListCall struct { +type DiskTypesAggregatedListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -46797,10 +52316,10 @@ type BackendServicesAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves the list of all BackendService resources, -// regional and global, available to the specified project. -func (r *BackendServicesService) AggregatedList(project string) *BackendServicesAggregatedListCall { - c := &BackendServicesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of disk types. +// For details, see https://cloud.google.com/compute/docs/reference/latest/diskTypes/aggregatedList +func (r *DiskTypesService) AggregatedList(project string) *DiskTypesAggregatedListCall { + c := &DiskTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -46827,7 +52346,7 @@ func (r *BackendServicesService) AggregatedList(project string) *BackendServices // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *BackendServicesAggregatedListCall) Filter(filter string) *BackendServicesAggregatedListCall { +func (c *DiskTypesAggregatedListCall) Filter(filter string) *DiskTypesAggregatedListCall { c.urlParams_.Set("filter", filter) return c } @@ -46838,7 +52357,7 @@ func (c *BackendServicesAggregatedListCall) Filter(filter string) *BackendServic // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *BackendServicesAggregatedListCall) MaxResults(maxResults int64) *BackendServicesAggregatedListCall { +func (c *DiskTypesAggregatedListCall) MaxResults(maxResults int64) *DiskTypesAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -46855,7 +52374,7 @@ func (c *BackendServicesAggregatedListCall) MaxResults(maxResults int64) *Backen // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *BackendServicesAggregatedListCall) OrderBy(orderBy string) *BackendServicesAggregatedListCall { +func (c *DiskTypesAggregatedListCall) OrderBy(orderBy string) *DiskTypesAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -46863,7 +52382,7 @@ func (c *BackendServicesAggregatedListCall) OrderBy(orderBy string) *BackendServ // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *BackendServicesAggregatedListCall) PageToken(pageToken string) *BackendServicesAggregatedListCall { +func (c *DiskTypesAggregatedListCall) PageToken(pageToken string) *DiskTypesAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -46871,7 +52390,7 @@ func (c *BackendServicesAggregatedListCall) PageToken(pageToken string) *Backend // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesAggregatedListCall) Fields(s ...googleapi.Field) *BackendServicesAggregatedListCall { +func (c *DiskTypesAggregatedListCall) Fields(s ...googleapi.Field) *DiskTypesAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -46881,7 +52400,7 @@ func (c *BackendServicesAggregatedListCall) Fields(s ...googleapi.Field) *Backen // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *BackendServicesAggregatedListCall) IfNoneMatch(entityTag string) *BackendServicesAggregatedListCall { +func (c *DiskTypesAggregatedListCall) IfNoneMatch(entityTag string) *DiskTypesAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -46889,21 +52408,21 @@ func (c *BackendServicesAggregatedListCall) IfNoneMatch(entityTag string) *Backe // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesAggregatedListCall) Context(ctx context.Context) *BackendServicesAggregatedListCall { +func (c *DiskTypesAggregatedListCall) Context(ctx context.Context) *DiskTypesAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesAggregatedListCall) Header() http.Header { +func (c *DiskTypesAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *DiskTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -46915,7 +52434,7 @@ func (c *BackendServicesAggregatedListCall) doRequest(alt string) (*http.Respons var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/backendServices") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/diskTypes") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -46928,14 +52447,14 @@ func (c *BackendServicesAggregatedListCall) doRequest(alt string) (*http.Respons return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.aggregatedList" call. -// Exactly one of *BackendServiceAggregatedList or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *BackendServiceAggregatedList.ServerResponse.Header or (if a -// response was returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.diskTypes.aggregatedList" call. +// Exactly one of *DiskTypeAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *DiskTypeAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *BackendServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*BackendServiceAggregatedList, error) { +func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTypeAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -46954,7 +52473,7 @@ func (c *BackendServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*B if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &BackendServiceAggregatedList{ + ret := &DiskTypeAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -46966,9 +52485,9 @@ func (c *BackendServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*B } return ret, nil // { - // "description": "Retrieves the list of all BackendService resources, regional and global, available to the specified project.", + // "description": "Retrieves an aggregated list of disk types.", // "httpMethod": "GET", - // "id": "compute.backendServices.aggregatedList", + // "id": "compute.diskTypes.aggregatedList", // "parameterOrder": [ // "project" // ], @@ -46997,16 +52516,16 @@ func (c *BackendServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*B // "type": "string" // }, // "project": { - // "description": "Name of the project scoping this request.", + // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/aggregated/backendServices", + // "path": "{project}/aggregated/diskTypes", // "response": { - // "$ref": "BackendServiceAggregatedList" + // "$ref": "DiskTypeAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -47020,7 +52539,7 @@ func (c *BackendServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*B // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *BackendServicesAggregatedListCall) Pages(ctx context.Context, f func(*BackendServiceAggregatedList) error) error { +func (c *DiskTypesAggregatedListCall) Pages(ctx context.Context, f func(*DiskTypeAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -47038,101 +52557,100 @@ func (c *BackendServicesAggregatedListCall) Pages(ctx context.Context, f func(*B } } -// method id "compute.backendServices.delete": +// method id "compute.diskTypes.get": -type BackendServicesDeleteCall struct { - s *Service - project string - backendService string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type DiskTypesGetCall struct { + s *Service + project string + zone string + diskType string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified BackendService resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/delete -func (r *BackendServicesService) Delete(project string, backendService string) *BackendServicesDeleteCall { - c := &BackendServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified disk type. Gets a list of available disk +// types by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/diskTypes/get +func (r *DiskTypesService) Get(project string, zone string, diskType string) *DiskTypesGetCall { + c := &DiskTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendService = backendService - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendServicesDeleteCall) RequestId(requestId string) *BackendServicesDeleteCall { - c.urlParams_.Set("requestId", requestId) + c.zone = zone + c.diskType = diskType return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesDeleteCall) Fields(s ...googleapi.Field) *BackendServicesDeleteCall { +func (c *DiskTypesGetCall) Fields(s ...googleapi.Field) *DiskTypesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DiskTypesGetCall) IfNoneMatch(entityTag string) *DiskTypesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesDeleteCall) Context(ctx context.Context) *BackendServicesDeleteCall { +func (c *DiskTypesGetCall) Context(ctx context.Context) *DiskTypesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesDeleteCall) Header() http.Header { +func (c *DiskTypesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *DiskTypesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/diskTypes/{diskType}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendService": c.backendService, + "project": c.project, + "zone": c.zone, + "diskType": c.diskType, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// Do executes the "compute.diskTypes.get" call. +// Exactly one of *DiskType or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *DiskType.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *DiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -47151,7 +52669,7 @@ func (c *BackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &DiskType{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -47163,18 +52681,19 @@ func (c *BackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Deletes the specified BackendService resource.", - // "httpMethod": "DELETE", - // "id": "compute.backendServices.delete", + // "description": "Returns the specified disk type. Gets a list of available disk types by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.diskTypes.get", // "parameterOrder": [ // "project", - // "backendService" + // "zone", + // "diskType" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to delete.", + // "diskType": { + // "description": "Name of the disk type to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -47185,222 +52704,116 @@ func (c *BackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/backendServices/{backendService}", + // "path": "{project}/zones/{zone}/diskTypes/{diskType}", // "response": { - // "$ref": "Operation" + // "$ref": "DiskType" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.backendServices.deleteSignedUrlKey": +// method id "compute.diskTypes.list": -type BackendServicesDeleteSignedUrlKeyCall struct { - s *Service - project string - backendService string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type DiskTypesListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// DeleteSignedUrlKey: Deletes a key for validating requests with signed -// URLs for this backend service. -func (r *BackendServicesService) DeleteSignedUrlKey(project string, backendService string, keyName string) *BackendServicesDeleteSignedUrlKeyCall { - c := &BackendServicesDeleteSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of disk types available to the specified +// project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/diskTypes/list +func (r *DiskTypesService) List(project string, zone string) *DiskTypesListCall { + c := &DiskTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendService = backendService - c.urlParams_.Set("keyName", keyName) + c.zone = zone return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendServicesDeleteSignedUrlKeyCall) RequestId(requestId string) *BackendServicesDeleteSignedUrlKeyCall { - c.urlParams_.Set("requestId", requestId) +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *DiskTypesListCall) Filter(filter string) *DiskTypesListCall { + c.urlParams_.Set("filter", filter) return c } -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BackendServicesDeleteSignedUrlKeyCall) Fields(s ...googleapi.Field) *BackendServicesDeleteSignedUrlKeyCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *DiskTypesListCall) MaxResults(maxResults int64) *DiskTypesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BackendServicesDeleteSignedUrlKeyCall) Context(ctx context.Context) *BackendServicesDeleteSignedUrlKeyCall { - c.ctx_ = ctx +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *DiskTypesListCall) OrderBy(orderBy string) *DiskTypesListCall { + c.urlParams_.Set("orderBy", orderBy) return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BackendServicesDeleteSignedUrlKeyCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BackendServicesDeleteSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/deleteSignedUrlKey") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendService": c.backendService, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.backendServices.deleteSignedUrlKey" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *BackendServicesDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes a key for validating requests with signed URLs for this backend service.", - // "httpMethod": "POST", - // "id": "compute.backendServices.deleteSignedUrlKey", - // "parameterOrder": [ - // "project", - // "backendService", - // "keyName" - // ], - // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to which the Signed URL Key should be added. The name should conform to RFC1035.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "keyName": { - // "description": "The name of the Signed URL Key to delete.", - // "location": "query", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "{project}/global/backendServices/{backendService}/deleteSignedUrlKey", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.backendServices.get": - -type BackendServicesGetCall struct { - s *Service - project string - backendService string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified BackendService resource. Gets a list of -// available backend services. -// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/get -func (r *BackendServicesService) Get(project string, backendService string) *BackendServicesGetCall { - c := &BackendServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.backendService = backendService +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *DiskTypesListCall) PageToken(pageToken string) *DiskTypesListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesGetCall) Fields(s ...googleapi.Field) *BackendServicesGetCall { +func (c *DiskTypesListCall) Fields(s ...googleapi.Field) *DiskTypesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -47410,7 +52823,7 @@ func (c *BackendServicesGetCall) Fields(s ...googleapi.Field) *BackendServicesGe // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *BackendServicesGetCall) IfNoneMatch(entityTag string) *BackendServicesGetCall { +func (c *DiskTypesListCall) IfNoneMatch(entityTag string) *DiskTypesListCall { c.ifNoneMatch_ = entityTag return c } @@ -47418,21 +52831,21 @@ func (c *BackendServicesGetCall) IfNoneMatch(entityTag string) *BackendServicesG // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesGetCall) Context(ctx context.Context) *BackendServicesGetCall { +func (c *DiskTypesListCall) Context(ctx context.Context) *DiskTypesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesGetCall) Header() http.Header { +func (c *DiskTypesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *DiskTypesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -47444,7 +52857,7 @@ func (c *BackendServicesGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/diskTypes") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -47452,20 +52865,20 @@ func (c *BackendServicesGetCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendService": c.backendService, + "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.get" call. -// Exactly one of *BackendService or error will be non-nil. Any non-2xx +// Do executes the "compute.diskTypes.list" call. +// Exactly one of *DiskTypeList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *BackendService.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *BackendServicesGetCall) Do(opts ...googleapi.CallOption) (*BackendService, error) { +// *DiskTypeList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -47484,7 +52897,7 @@ func (c *BackendServicesGetCall) Do(opts ...googleapi.CallOption) (*BackendServi if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &BackendService{ + ret := &DiskTypeList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -47496,19 +52909,35 @@ func (c *BackendServicesGetCall) Do(opts ...googleapi.CallOption) (*BackendServi } return ret, nil // { - // "description": "Returns the specified BackendService resource. Gets a list of available backend services.", + // "description": "Retrieves a list of disk types available to the specified project.", // "httpMethod": "GET", - // "id": "compute.backendServices.get", + // "id": "compute.diskTypes.list", // "parameterOrder": [ // "project", - // "backendService" + // "zone" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -47517,11 +52946,18 @@ func (c *BackendServicesGetCall) Do(opts ...googleapi.CallOption) (*BackendServi // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/backendServices/{backendService}", + // "path": "{project}/zones/{zone}/diskTypes", // "response": { - // "$ref": "BackendService" + // "$ref": "DiskTypeList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -47532,178 +52968,49 @@ func (c *BackendServicesGetCall) Do(opts ...googleapi.CallOption) (*BackendServi } -// method id "compute.backendServices.getHealth": - -type BackendServicesGetHealthCall struct { - s *Service - project string - backendService string - resourcegroupreference *ResourceGroupReference - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// GetHealth: Gets the most recent health check results for this -// BackendService. -// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/getHealth -func (r *BackendServicesService) GetHealth(project string, backendService string, resourcegroupreference *ResourceGroupReference) *BackendServicesGetHealthCall { - c := &BackendServicesGetHealthCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.backendService = backendService - c.resourcegroupreference = resourcegroupreference - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BackendServicesGetHealthCall) Fields(s ...googleapi.Field) *BackendServicesGetHealthCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BackendServicesGetHealthCall) Context(ctx context.Context) *BackendServicesGetHealthCall { +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *DiskTypesListCall) Pages(ctx context.Context, f func(*DiskTypeList) error) error { c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BackendServicesGetHealthCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BackendServicesGetHealthCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.resourcegroupreference) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/getHealth") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendService": c.backendService, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.backendServices.getHealth" call. -// Exactly one of *BackendServiceGroupHealth or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *BackendServiceGroupHealth.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *BackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (*BackendServiceGroupHealth, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, + if err := f(x); err != nil { + return err } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &BackendServiceGroupHealth{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Gets the most recent health check results for this BackendService.", - // "httpMethod": "POST", - // "id": "compute.backendServices.getHealth", - // "parameterOrder": [ - // "project", - // "backendService" - // ], - // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to which the queried instance belongs.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/backendServices/{backendService}/getHealth", - // "request": { - // "$ref": "ResourceGroupReference" - // }, - // "response": { - // "$ref": "BackendServiceGroupHealth" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - } -// method id "compute.backendServices.insert": +// method id "compute.disks.addResourcePolicies": -type BackendServicesInsertCall struct { - s *Service - project string - backendservice *BackendService - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type DisksAddResourcePoliciesCall struct { + s *Service + project string + zone string + disk string + disksaddresourcepoliciesrequest *DisksAddResourcePoliciesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a BackendService resource in the specified project -// using the data included in the request. There are several -// restrictions and guidelines to keep in mind when creating a backend -// service. Read Restrictions and Guidelines for more information. -// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/insert -func (r *BackendServicesService) Insert(project string, backendservice *BackendService) *BackendServicesInsertCall { - c := &BackendServicesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AddResourcePolicies: Adds existing resource policies to a disk. You +// can only add one policy which will be applied to this disk for +// scheduling snapshot creation. +func (r *DisksService) AddResourcePolicies(project string, zone string, disk string, disksaddresourcepoliciesrequest *DisksAddResourcePoliciesRequest) *DisksAddResourcePoliciesCall { + c := &DisksAddResourcePoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendservice = backendservice + c.zone = zone + c.disk = disk + c.disksaddresourcepoliciesrequest = disksaddresourcepoliciesrequest return c } @@ -47721,7 +53028,7 @@ func (r *BackendServicesService) Insert(project string, backendservice *BackendS // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendServicesInsertCall) RequestId(requestId string) *BackendServicesInsertCall { +func (c *DisksAddResourcePoliciesCall) RequestId(requestId string) *DisksAddResourcePoliciesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -47729,7 +53036,7 @@ func (c *BackendServicesInsertCall) RequestId(requestId string) *BackendServices // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesInsertCall) Fields(s ...googleapi.Field) *BackendServicesInsertCall { +func (c *DisksAddResourcePoliciesCall) Fields(s ...googleapi.Field) *DisksAddResourcePoliciesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -47737,35 +53044,35 @@ func (c *BackendServicesInsertCall) Fields(s ...googleapi.Field) *BackendService // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesInsertCall) Context(ctx context.Context) *BackendServicesInsertCall { +func (c *DisksAddResourcePoliciesCall) Context(ctx context.Context) *DisksAddResourcePoliciesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesInsertCall) Header() http.Header { +func (c *DisksAddResourcePoliciesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksAddResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.disksaddresourcepoliciesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/addResourcePolicies") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -47774,18 +53081,20 @@ func (c *BackendServicesInsertCall) doRequest(alt string) (*http.Response, error req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "zone": c.zone, + "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.insert" call. +// Do executes the "compute.disks.addResourcePolicies" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *DisksAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -47816,13 +53125,22 @@ func (c *BackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Creates a BackendService resource in the specified project using the data included in the request. There are several restrictions and guidelines to keep in mind when creating a backend service. Read Restrictions and Guidelines for more information.", + // "description": "Adds existing resource policies to a disk. You can only add one policy which will be applied to this disk for scheduling snapshot creation.", // "httpMethod": "POST", - // "id": "compute.backendServices.insert", + // "id": "compute.disks.addResourcePolicies", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "disk" // ], // "parameters": { + // "disk": { + // "description": "The disk name for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -47834,11 +53152,18 @@ func (c *BackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/backendServices", + // "path": "{project}/zones/{zone}/disks/{disk}/addResourcePolicies", // "request": { - // "$ref": "BackendService" + // "$ref": "DisksAddResourcePoliciesRequest" // }, // "response": { // "$ref": "Operation" @@ -47851,9 +53176,9 @@ func (c *BackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.backendServices.list": +// method id "compute.disks.aggregatedList": -type BackendServicesListCall struct { +type DisksAggregatedListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -47862,11 +53187,10 @@ type BackendServicesListCall struct { header_ http.Header } -// List: Retrieves the list of BackendService resources available to the -// specified project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/list -func (r *BackendServicesService) List(project string) *BackendServicesListCall { - c := &BackendServicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of persistent disks. +// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/aggregatedList +func (r *DisksService) AggregatedList(project string) *DisksAggregatedListCall { + c := &DisksAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -47893,7 +53217,7 @@ func (r *BackendServicesService) List(project string) *BackendServicesListCall { // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *BackendServicesListCall) Filter(filter string) *BackendServicesListCall { +func (c *DisksAggregatedListCall) Filter(filter string) *DisksAggregatedListCall { c.urlParams_.Set("filter", filter) return c } @@ -47904,7 +53228,7 @@ func (c *BackendServicesListCall) Filter(filter string) *BackendServicesListCall // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *BackendServicesListCall) MaxResults(maxResults int64) *BackendServicesListCall { +func (c *DisksAggregatedListCall) MaxResults(maxResults int64) *DisksAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -47921,7 +53245,7 @@ func (c *BackendServicesListCall) MaxResults(maxResults int64) *BackendServicesL // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *BackendServicesListCall) OrderBy(orderBy string) *BackendServicesListCall { +func (c *DisksAggregatedListCall) OrderBy(orderBy string) *DisksAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -47929,7 +53253,7 @@ func (c *BackendServicesListCall) OrderBy(orderBy string) *BackendServicesListCa // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *BackendServicesListCall) PageToken(pageToken string) *BackendServicesListCall { +func (c *DisksAggregatedListCall) PageToken(pageToken string) *DisksAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -47937,7 +53261,7 @@ func (c *BackendServicesListCall) PageToken(pageToken string) *BackendServicesLi // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesListCall) Fields(s ...googleapi.Field) *BackendServicesListCall { +func (c *DisksAggregatedListCall) Fields(s ...googleapi.Field) *DisksAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -47947,7 +53271,7 @@ func (c *BackendServicesListCall) Fields(s ...googleapi.Field) *BackendServicesL // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *BackendServicesListCall) IfNoneMatch(entityTag string) *BackendServicesListCall { +func (c *DisksAggregatedListCall) IfNoneMatch(entityTag string) *DisksAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -47955,21 +53279,21 @@ func (c *BackendServicesListCall) IfNoneMatch(entityTag string) *BackendServices // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesListCall) Context(ctx context.Context) *BackendServicesListCall { +func (c *DisksAggregatedListCall) Context(ctx context.Context) *DisksAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesListCall) Header() http.Header { +func (c *DisksAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesListCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -47981,7 +53305,7 @@ func (c *BackendServicesListCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/disks") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -47994,14 +53318,14 @@ func (c *BackendServicesListCall) doRequest(alt string) (*http.Response, error) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.list" call. -// Exactly one of *BackendServiceList or error will be non-nil. Any +// Do executes the "compute.disks.aggregatedList" call. +// Exactly one of *DiskAggregatedList or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *BackendServiceList.ServerResponse.Header or (if a response was +// *DiskAggregatedList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServiceList, error) { +func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -48020,7 +53344,7 @@ func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServ if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &BackendServiceList{ + ret := &DiskAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -48032,9 +53356,9 @@ func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServ } return ret, nil // { - // "description": "Retrieves the list of BackendService resources available to the specified project.", + // "description": "Retrieves an aggregated list of persistent disks.", // "httpMethod": "GET", - // "id": "compute.backendServices.list", + // "id": "compute.disks.aggregatedList", // "parameterOrder": [ // "project" // ], @@ -48070,9 +53394,9 @@ func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServ // "type": "string" // } // }, - // "path": "{project}/global/backendServices", + // "path": "{project}/aggregated/disks", // "response": { - // "$ref": "BackendServiceList" + // "$ref": "DiskAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -48086,7 +53410,7 @@ func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServ // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *BackendServicesListCall) Pages(ctx context.Context, f func(*BackendServiceList) error) error { +func (c *DisksAggregatedListCall) Pages(ctx context.Context, f func(*DiskAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -48104,30 +53428,36 @@ func (c *BackendServicesListCall) Pages(ctx context.Context, f func(*BackendServ } } -// method id "compute.backendServices.patch": +// method id "compute.disks.createSnapshot": -type BackendServicesPatchCall struct { - s *Service - project string - backendService string - backendservice *BackendService - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type DisksCreateSnapshotCall struct { + s *Service + project string + zone string + disk string + snapshot *Snapshot + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Patches the specified BackendService resource with the data -// included in the request. There are several restrictions and -// guidelines to keep in mind when updating a backend service. Read -// Restrictions and Guidelines for more information. This method -// supports PATCH semantics and uses the JSON merge patch format and -// processing rules. -// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/patch -func (r *BackendServicesService) Patch(project string, backendService string, backendservice *BackendService) *BackendServicesPatchCall { - c := &BackendServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// CreateSnapshot: Creates a snapshot of a specified persistent disk. +// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/createSnapshot +func (r *DisksService) CreateSnapshot(project string, zone string, disk string, snapshot *Snapshot) *DisksCreateSnapshotCall { + c := &DisksCreateSnapshotCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendService = backendService - c.backendservice = backendservice + c.zone = zone + c.disk = disk + c.snapshot = snapshot + return c +} + +// GuestFlush sets the optional parameter "guestFlush": [Input Only] +// Specifies to create an application consistent snapshot by informing +// the OS to prepare for the snapshot process. Currently only supported +// on Windows instances using the Volume Shadow Copy Service (VSS). +func (c *DisksCreateSnapshotCall) GuestFlush(guestFlush bool) *DisksCreateSnapshotCall { + c.urlParams_.Set("guestFlush", fmt.Sprint(guestFlush)) return c } @@ -48145,7 +53475,7 @@ func (r *BackendServicesService) Patch(project string, backendService string, ba // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendServicesPatchCall) RequestId(requestId string) *BackendServicesPatchCall { +func (c *DisksCreateSnapshotCall) RequestId(requestId string) *DisksCreateSnapshotCall { c.urlParams_.Set("requestId", requestId) return c } @@ -48153,7 +53483,7 @@ func (c *BackendServicesPatchCall) RequestId(requestId string) *BackendServicesP // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesPatchCall) Fields(s ...googleapi.Field) *BackendServicesPatchCall { +func (c *DisksCreateSnapshotCall) Fields(s ...googleapi.Field) *DisksCreateSnapshotCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -48161,56 +53491,57 @@ func (c *BackendServicesPatchCall) Fields(s ...googleapi.Field) *BackendServices // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesPatchCall) Context(ctx context.Context) *BackendServicesPatchCall { +func (c *DisksCreateSnapshotCall) Context(ctx context.Context) *DisksCreateSnapshotCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesPatchCall) Header() http.Header { +func (c *DisksCreateSnapshotCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksCreateSnapshotCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.snapshot) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/createSnapshot") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendService": c.backendService, + "project": c.project, + "zone": c.zone, + "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.patch" call. +// Do executes the "compute.disks.createSnapshot" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *DisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -48241,21 +53572,27 @@ func (c *BackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Patches the specified BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.backendServices.patch", + // "description": "Creates a snapshot of a specified persistent disk.", + // "httpMethod": "POST", + // "id": "compute.disks.createSnapshot", // "parameterOrder": [ // "project", - // "backendService" + // "zone", + // "disk" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to patch.", + // "disk": { + // "description": "Name of the persistent disk to snapshot.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, + // "guestFlush": { + // "description": "[Input Only] Specifies to create an application consistent snapshot by informing the OS to prepare for the snapshot process. Currently only supported on Windows instances using the Volume Shadow Copy Service (VSS).", + // "location": "query", + // "type": "boolean" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -48267,11 +53604,18 @@ func (c *BackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/backendServices/{backendService}", + // "path": "{project}/zones/{zone}/disks/{disk}/createSnapshot", // "request": { - // "$ref": "BackendService" + // "$ref": "Snapshot" // }, // "response": { // "$ref": "Operation" @@ -48284,25 +53628,28 @@ func (c *BackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.backendServices.setSecurityPolicy": +// method id "compute.disks.delete": -type BackendServicesSetSecurityPolicyCall struct { - s *Service - project string - backendService string - securitypolicyreference *SecurityPolicyReference - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type DisksDeleteCall struct { + s *Service + project string + zone string + disk string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetSecurityPolicy: Sets the security policy for the specified backend -// service. -func (r *BackendServicesService) SetSecurityPolicy(project string, backendService string, securitypolicyreference *SecurityPolicyReference) *BackendServicesSetSecurityPolicyCall { - c := &BackendServicesSetSecurityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified persistent disk. Deleting a disk +// removes its data permanently and is irreversible. However, deleting a +// disk does not delete any snapshots previously made from the disk. You +// must separately delete snapshots. +// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/delete +func (r *DisksService) Delete(project string, zone string, disk string) *DisksDeleteCall { + c := &DisksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendService = backendService - c.securitypolicyreference = securitypolicyreference + c.zone = zone + c.disk = disk return c } @@ -48320,7 +53667,7 @@ func (r *BackendServicesService) SetSecurityPolicy(project string, backendServic // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendServicesSetSecurityPolicyCall) RequestId(requestId string) *BackendServicesSetSecurityPolicyCall { +func (c *DisksDeleteCall) RequestId(requestId string) *DisksDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -48328,7 +53675,7 @@ func (c *BackendServicesSetSecurityPolicyCall) RequestId(requestId string) *Back // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesSetSecurityPolicyCall) Fields(s ...googleapi.Field) *BackendServicesSetSecurityPolicyCall { +func (c *DisksDeleteCall) Fields(s ...googleapi.Field) *DisksDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -48336,56 +53683,52 @@ func (c *BackendServicesSetSecurityPolicyCall) Fields(s ...googleapi.Field) *Bac // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesSetSecurityPolicyCall) Context(ctx context.Context) *BackendServicesSetSecurityPolicyCall { +func (c *DisksDeleteCall) Context(ctx context.Context) *DisksDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesSetSecurityPolicyCall) Header() http.Header { +func (c *DisksDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesSetSecurityPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicyreference) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/setSecurityPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendService": c.backendService, + "project": c.project, + "zone": c.zone, + "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.setSecurityPolicy" call. +// Do executes the "compute.disks.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendServicesSetSecurityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *DisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -48416,16 +53759,17 @@ func (c *BackendServicesSetSecurityPolicyCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Sets the security policy for the specified backend service.", - // "httpMethod": "POST", - // "id": "compute.backendServices.setSecurityPolicy", + // "description": "Deletes the specified persistent disk. Deleting a disk removes its data permanently and is irreversible. However, deleting a disk does not delete any snapshots previously made from the disk. You must separately delete snapshots.", + // "httpMethod": "DELETE", + // "id": "compute.disks.delete", // "parameterOrder": [ // "project", - // "backendService" + // "zone", + // "disk" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to which the security policy should be set. The name should conform to RFC1035.", + // "disk": { + // "description": "Name of the persistent disk to delete.", // "location": "path", // "required": true, // "type": "string" @@ -48441,12 +53785,16 @@ func (c *BackendServicesSetSecurityPolicyCall) Do(opts ...googleapi.CallOption) // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/backendServices/{backendService}/setSecurityPolicy", - // "request": { - // "$ref": "SecurityPolicyReference" - // }, + // "path": "{project}/zones/{zone}/disks/{disk}", // "response": { // "$ref": "Operation" // }, @@ -48458,263 +53806,100 @@ func (c *BackendServicesSetSecurityPolicyCall) Do(opts ...googleapi.CallOption) } -// method id "compute.backendServices.testIamPermissions": +// method id "compute.disks.get": -type BackendServicesTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type DisksGetCall struct { + s *Service + project string + zone string + disk string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *BackendServicesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *BackendServicesTestIamPermissionsCall { - c := &BackendServicesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns a specified persistent disk. Gets a list of available +// persistent disks by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/get +func (r *DisksService) Get(project string, zone string, disk string) *DisksGetCall { + c := &DisksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.zone = zone + c.disk = disk return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesTestIamPermissionsCall) Fields(s ...googleapi.Field) *BackendServicesTestIamPermissionsCall { +func (c *DisksGetCall) Fields(s ...googleapi.Field) *DisksGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BackendServicesTestIamPermissionsCall) Context(ctx context.Context) *BackendServicesTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BackendServicesTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BackendServicesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{resource}/testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.backendServices.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *BackendServicesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.backendServices.testIamPermissions", - // "parameterOrder": [ - // "project", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/backendServices/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, - // "response": { - // "$ref": "TestPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.backendServices.update": - -type BackendServicesUpdateCall struct { - s *Service - project string - backendService string - backendservice *BackendService - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Update: Updates the specified BackendService resource with the data -// included in the request. There are several restrictions and -// guidelines to keep in mind when updating a backend service. Read -// Restrictions and Guidelines for more information. -// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/update -func (r *BackendServicesService) Update(project string, backendService string, backendservice *BackendService) *BackendServicesUpdateCall { - c := &BackendServicesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.backendService = backendService - c.backendservice = backendservice - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendServicesUpdateCall) RequestId(requestId string) *BackendServicesUpdateCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BackendServicesUpdateCall) Fields(s ...googleapi.Field) *BackendServicesUpdateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DisksGetCall) IfNoneMatch(entityTag string) *DisksGetCall { + c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesUpdateCall) Context(ctx context.Context) *BackendServicesUpdateCall { +func (c *DisksGetCall) Context(ctx context.Context) *DisksGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesUpdateCall) Header() http.Header { +func (c *DisksGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendService": c.backendService, + "project": c.project, + "zone": c.zone, + "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.update" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *BackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.disks.get" call. +// Exactly one of *Disk or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Disk.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *DisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -48733,7 +53918,7 @@ func (c *BackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Disk{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -48745,18 +53930,19 @@ func (c *BackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Updates the specified BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information.", - // "httpMethod": "PUT", - // "id": "compute.backendServices.update", + // "description": "Returns a specified persistent disk. Gets a list of available persistent disks by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.disks.get", // "parameterOrder": [ // "project", - // "backendService" + // "zone", + // "disk" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to update.", + // "disk": { + // "description": "Name of the persistent disk to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -48767,113 +53953,54 @@ func (c *BackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/backendServices/{backendService}", - // "request": { - // "$ref": "BackendService" - // }, + // "path": "{project}/zones/{zone}/disks/{disk}", // "response": { - // "$ref": "Operation" + // "$ref": "Disk" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.diskTypes.aggregatedList": +// method id "compute.disks.getIamPolicy": -type DiskTypesAggregatedListCall struct { +type DisksGetIamPolicyCall struct { s *Service project string + zone string + resource string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// AggregatedList: Retrieves an aggregated list of disk types. -// For details, see https://cloud.google.com/compute/docs/reference/latest/diskTypes/aggregatedList -func (r *DiskTypesService) AggregatedList(project string) *DiskTypesAggregatedListCall { - c := &DiskTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. +func (r *DisksService) GetIamPolicy(project string, zone string, resource string) *DisksGetIamPolicyCall { + c := &DisksGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *DiskTypesAggregatedListCall) Filter(filter string) *DiskTypesAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *DiskTypesAggregatedListCall) MaxResults(maxResults int64) *DiskTypesAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *DiskTypesAggregatedListCall) OrderBy(orderBy string) *DiskTypesAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *DiskTypesAggregatedListCall) PageToken(pageToken string) *DiskTypesAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) + c.zone = zone + c.resource = resource return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DiskTypesAggregatedListCall) Fields(s ...googleapi.Field) *DiskTypesAggregatedListCall { +func (c *DisksGetIamPolicyCall) Fields(s ...googleapi.Field) *DisksGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -48883,7 +54010,7 @@ func (c *DiskTypesAggregatedListCall) Fields(s ...googleapi.Field) *DiskTypesAgg // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *DiskTypesAggregatedListCall) IfNoneMatch(entityTag string) *DiskTypesAggregatedListCall { +func (c *DisksGetIamPolicyCall) IfNoneMatch(entityTag string) *DisksGetIamPolicyCall { c.ifNoneMatch_ = entityTag return c } @@ -48891,21 +54018,21 @@ func (c *DiskTypesAggregatedListCall) IfNoneMatch(entityTag string) *DiskTypesAg // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DiskTypesAggregatedListCall) Context(ctx context.Context) *DiskTypesAggregatedListCall { +func (c *DisksGetIamPolicyCall) Context(ctx context.Context) *DisksGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DiskTypesAggregatedListCall) Header() http.Header { +func (c *DisksGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DiskTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -48917,7 +54044,7 @@ func (c *DiskTypesAggregatedListCall) doRequest(alt string) (*http.Response, err var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/diskTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -48925,19 +54052,21 @@ func (c *DiskTypesAggregatedListCall) doRequest(alt string) (*http.Response, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.diskTypes.aggregatedList" call. -// Exactly one of *DiskTypeAggregatedList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *DiskTypeAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTypeAggregatedList, error) { +// Do executes the "compute.disks.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *DisksGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -48956,7 +54085,7 @@ func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTyp if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &DiskTypeAggregatedList{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -48968,47 +54097,40 @@ func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTyp } return ret, nil // { - // "description": "Retrieves an aggregated list of disk types.", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", // "httpMethod": "GET", - // "id": "compute.diskTypes.aggregatedList", + // "id": "compute.disks.getIamPolicy", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "resource" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/aggregated/diskTypes", + // "path": "{project}/zones/{zone}/disks/{resource}/getIamPolicy", // "response": { - // "$ref": "DiskTypeAggregatedList" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -49019,121 +54141,119 @@ func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTyp } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *DiskTypesAggregatedListCall) Pages(ctx context.Context, f func(*DiskTypeAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.diskTypes.get": +// method id "compute.disks.insert": -type DiskTypesGetCall struct { - s *Service - project string - zone string - diskType string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type DisksInsertCall struct { + s *Service + project string + zone string + disk *Disk + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified disk type. Gets a list of available disk -// types by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/diskTypes/get -func (r *DiskTypesService) Get(project string, zone string, diskType string) *DiskTypesGetCall { - c := &DiskTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a persistent disk in the specified project using the +// data in the request. You can create a disk with a sourceImage, a +// sourceSnapshot, or create an empty 500 GB data disk by omitting all +// properties. You can also create a disk that is larger than the +// default size by specifying the sizeGb property. +// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/insert +func (r *DisksService) Insert(project string, zone string, disk *Disk) *DisksInsertCall { + c := &DisksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.diskType = diskType + c.disk = disk + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *DisksInsertCall) RequestId(requestId string) *DisksInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// SourceImage sets the optional parameter "sourceImage": Source image +// to restore onto a disk. +func (c *DisksInsertCall) SourceImage(sourceImage string) *DisksInsertCall { + c.urlParams_.Set("sourceImage", sourceImage) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DiskTypesGetCall) Fields(s ...googleapi.Field) *DiskTypesGetCall { +func (c *DisksInsertCall) Fields(s ...googleapi.Field) *DisksInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *DiskTypesGetCall) IfNoneMatch(entityTag string) *DiskTypesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DiskTypesGetCall) Context(ctx context.Context) *DiskTypesGetCall { +func (c *DisksInsertCall) Context(ctx context.Context) *DisksInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DiskTypesGetCall) Header() http.Header { +func (c *DisksInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DiskTypesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.disk) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/diskTypes/{diskType}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "diskType": c.diskType, + "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.diskTypes.get" call. -// Exactly one of *DiskType or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *DiskType.ServerResponse.Header or (if a response was returned at +// Do executes the "compute.disks.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *DiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { +func (c *DisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -49152,7 +54272,7 @@ func (c *DiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &DiskType{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -49164,22 +54284,14 @@ func (c *DiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { } return ret, nil // { - // "description": "Returns the specified disk type. Gets a list of available disk types by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.diskTypes.get", + // "description": "Creates a persistent disk in the specified project using the data in the request. You can create a disk with a sourceImage, a sourceSnapshot, or create an empty 500 GB data disk by omitting all properties. You can also create a disk that is larger than the default size by specifying the sizeGb property.", + // "httpMethod": "POST", + // "id": "compute.disks.insert", // "parameterOrder": [ // "project", - // "zone", - // "diskType" + // "zone" // ], // "parameters": { - // "diskType": { - // "description": "Name of the disk type to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -49187,6 +54299,16 @@ func (c *DiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "sourceImage": { + // "description": "Optional. Source image to restore onto a disk.", + // "location": "query", + // "type": "string" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -49195,22 +54317,24 @@ func (c *DiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/diskTypes/{diskType}", + // "path": "{project}/zones/{zone}/disks", + // "request": { + // "$ref": "Disk" + // }, // "response": { - // "$ref": "DiskType" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.diskTypes.list": +// method id "compute.disks.list": -type DiskTypesListCall struct { +type DisksListCall struct { s *Service project string zone string @@ -49220,11 +54344,11 @@ type DiskTypesListCall struct { header_ http.Header } -// List: Retrieves a list of disk types available to the specified -// project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/diskTypes/list -func (r *DiskTypesService) List(project string, zone string) *DiskTypesListCall { - c := &DiskTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of persistent disks contained within the +// specified zone. +// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/list +func (r *DisksService) List(project string, zone string) *DisksListCall { + c := &DisksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone return c @@ -49252,7 +54376,7 @@ func (r *DiskTypesService) List(project string, zone string) *DiskTypesListCall // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *DiskTypesListCall) Filter(filter string) *DiskTypesListCall { +func (c *DisksListCall) Filter(filter string) *DisksListCall { c.urlParams_.Set("filter", filter) return c } @@ -49263,7 +54387,7 @@ func (c *DiskTypesListCall) Filter(filter string) *DiskTypesListCall { // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *DiskTypesListCall) MaxResults(maxResults int64) *DiskTypesListCall { +func (c *DisksListCall) MaxResults(maxResults int64) *DisksListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -49280,7 +54404,7 @@ func (c *DiskTypesListCall) MaxResults(maxResults int64) *DiskTypesListCall { // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *DiskTypesListCall) OrderBy(orderBy string) *DiskTypesListCall { +func (c *DisksListCall) OrderBy(orderBy string) *DisksListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -49288,7 +54412,7 @@ func (c *DiskTypesListCall) OrderBy(orderBy string) *DiskTypesListCall { // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *DiskTypesListCall) PageToken(pageToken string) *DiskTypesListCall { +func (c *DisksListCall) PageToken(pageToken string) *DisksListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -49296,7 +54420,7 @@ func (c *DiskTypesListCall) PageToken(pageToken string) *DiskTypesListCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DiskTypesListCall) Fields(s ...googleapi.Field) *DiskTypesListCall { +func (c *DisksListCall) Fields(s ...googleapi.Field) *DisksListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -49306,7 +54430,7 @@ func (c *DiskTypesListCall) Fields(s ...googleapi.Field) *DiskTypesListCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *DiskTypesListCall) IfNoneMatch(entityTag string) *DiskTypesListCall { +func (c *DisksListCall) IfNoneMatch(entityTag string) *DisksListCall { c.ifNoneMatch_ = entityTag return c } @@ -49314,21 +54438,21 @@ func (c *DiskTypesListCall) IfNoneMatch(entityTag string) *DiskTypesListCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DiskTypesListCall) Context(ctx context.Context) *DiskTypesListCall { +func (c *DisksListCall) Context(ctx context.Context) *DisksListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DiskTypesListCall) Header() http.Header { +func (c *DisksListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DiskTypesListCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -49340,7 +54464,7 @@ func (c *DiskTypesListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/diskTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -49354,14 +54478,14 @@ func (c *DiskTypesListCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.diskTypes.list" call. -// Exactly one of *DiskTypeList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *DiskTypeList.ServerResponse.Header or (if a response was returned at +// Do executes the "compute.disks.list" call. +// Exactly one of *DiskList or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *DiskList.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, error) { +func (c *DisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -49380,7 +54504,7 @@ func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, err if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &DiskTypeList{ + ret := &DiskList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -49392,9 +54516,9 @@ func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, err } return ret, nil // { - // "description": "Retrieves a list of disk types available to the specified project.", + // "description": "Retrieves a list of persistent disks contained within the specified zone.", // "httpMethod": "GET", - // "id": "compute.diskTypes.list", + // "id": "compute.disks.list", // "parameterOrder": [ // "project", // "zone" @@ -49438,9 +54562,9 @@ func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, err // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/diskTypes", + // "path": "{project}/zones/{zone}/disks", // "response": { - // "$ref": "DiskTypeList" + // "$ref": "DiskList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -49454,7 +54578,7 @@ func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, err // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *DiskTypesListCall) Pages(ctx context.Context, f func(*DiskTypeList) error) error { +func (c *DisksListCall) Pages(ctx context.Context, f func(*DiskList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -49472,28 +54596,26 @@ func (c *DiskTypesListCall) Pages(ctx context.Context, f func(*DiskTypeList) err } } -// method id "compute.disks.addResourcePolicies": +// method id "compute.disks.removeResourcePolicies": -type DisksAddResourcePoliciesCall struct { - s *Service - project string - zone string - disk string - disksaddresourcepoliciesrequest *DisksAddResourcePoliciesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type DisksRemoveResourcePoliciesCall struct { + s *Service + project string + zone string + disk string + disksremoveresourcepoliciesrequest *DisksRemoveResourcePoliciesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AddResourcePolicies: Adds existing resource policies to a disk. You -// can only add one policy which will be applied to this disk for -// scheduling snapshot creation. -func (r *DisksService) AddResourcePolicies(project string, zone string, disk string, disksaddresourcepoliciesrequest *DisksAddResourcePoliciesRequest) *DisksAddResourcePoliciesCall { - c := &DisksAddResourcePoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// RemoveResourcePolicies: Removes resource policies from a disk. +func (r *DisksService) RemoveResourcePolicies(project string, zone string, disk string, disksremoveresourcepoliciesrequest *DisksRemoveResourcePoliciesRequest) *DisksRemoveResourcePoliciesCall { + c := &DisksRemoveResourcePoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.disk = disk - c.disksaddresourcepoliciesrequest = disksaddresourcepoliciesrequest + c.disksremoveresourcepoliciesrequest = disksremoveresourcepoliciesrequest return c } @@ -49511,7 +54633,7 @@ func (r *DisksService) AddResourcePolicies(project string, zone string, disk str // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *DisksAddResourcePoliciesCall) RequestId(requestId string) *DisksAddResourcePoliciesCall { +func (c *DisksRemoveResourcePoliciesCall) RequestId(requestId string) *DisksRemoveResourcePoliciesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -49519,7 +54641,7 @@ func (c *DisksAddResourcePoliciesCall) RequestId(requestId string) *DisksAddReso // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksAddResourcePoliciesCall) Fields(s ...googleapi.Field) *DisksAddResourcePoliciesCall { +func (c *DisksRemoveResourcePoliciesCall) Fields(s ...googleapi.Field) *DisksRemoveResourcePoliciesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -49527,35 +54649,35 @@ func (c *DisksAddResourcePoliciesCall) Fields(s ...googleapi.Field) *DisksAddRes // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksAddResourcePoliciesCall) Context(ctx context.Context) *DisksAddResourcePoliciesCall { +func (c *DisksRemoveResourcePoliciesCall) Context(ctx context.Context) *DisksRemoveResourcePoliciesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksAddResourcePoliciesCall) Header() http.Header { +func (c *DisksRemoveResourcePoliciesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksAddResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksRemoveResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.disksaddresourcepoliciesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.disksremoveresourcepoliciesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/addResourcePolicies") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/removeResourcePolicies") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -49570,14 +54692,14 @@ func (c *DisksAddResourcePoliciesCall) doRequest(alt string) (*http.Response, er return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.addResourcePolicies" call. +// Do executes the "compute.disks.removeResourcePolicies" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *DisksAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *DisksRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -49608,9 +54730,9 @@ func (c *DisksAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operat } return ret, nil // { - // "description": "Adds existing resource policies to a disk. You can only add one policy which will be applied to this disk for scheduling snapshot creation.", + // "description": "Removes resource policies from a disk.", // "httpMethod": "POST", - // "id": "compute.disks.addResourcePolicies", + // "id": "compute.disks.removeResourcePolicies", // "parameterOrder": [ // "project", // "zone", @@ -49644,9 +54766,9 @@ func (c *DisksAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operat // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks/{disk}/addResourcePolicies", + // "path": "{project}/zones/{zone}/disks/{disk}/removeResourcePolicies", // "request": { - // "$ref": "DisksAddResourcePoliciesRequest" + // "$ref": "DisksRemoveResourcePoliciesRequest" // }, // "response": { // "$ref": "Operation" @@ -49659,156 +54781,111 @@ func (c *DisksAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operat } -// method id "compute.disks.aggregatedList": +// method id "compute.disks.resize": -type DisksAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type DisksResizeCall struct { + s *Service + project string + zone string + disk string + disksresizerequest *DisksResizeRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AggregatedList: Retrieves an aggregated list of persistent disks. -// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/aggregatedList -func (r *DisksService) AggregatedList(project string) *DisksAggregatedListCall { - c := &DisksAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Resize: Resizes the specified persistent disk. You can only increase +// the size of the disk. +func (r *DisksService) Resize(project string, zone string, disk string, disksresizerequest *DisksResizeRequest) *DisksResizeCall { + c := &DisksResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.zone = zone + c.disk = disk + c.disksresizerequest = disksresizerequest return c } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *DisksAggregatedListCall) Filter(filter string) *DisksAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *DisksAggregatedListCall) MaxResults(maxResults int64) *DisksAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *DisksAggregatedListCall) OrderBy(orderBy string) *DisksAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *DisksAggregatedListCall) PageToken(pageToken string) *DisksAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *DisksResizeCall) RequestId(requestId string) *DisksResizeCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksAggregatedListCall) Fields(s ...googleapi.Field) *DisksAggregatedListCall { +func (c *DisksResizeCall) Fields(s ...googleapi.Field) *DisksResizeCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *DisksAggregatedListCall) IfNoneMatch(entityTag string) *DisksAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksAggregatedListCall) Context(ctx context.Context) *DisksAggregatedListCall { +func (c *DisksResizeCall) Context(ctx context.Context) *DisksResizeCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksAggregatedListCall) Header() http.Header { +func (c *DisksResizeCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksResizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.disksresizerequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/disks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/resize") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "zone": c.zone, + "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.aggregatedList" call. -// Exactly one of *DiskAggregatedList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *DiskAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggregatedList, error) { +// Do executes the "compute.disks.resize" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -49827,7 +54904,7 @@ func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggrega if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &DiskAggregatedList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -49839,131 +54916,85 @@ func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggrega } return ret, nil // { - // "description": "Retrieves an aggregated list of persistent disks.", - // "httpMethod": "GET", - // "id": "compute.disks.aggregatedList", + // "description": "Resizes the specified persistent disk. You can only increase the size of the disk.", + // "httpMethod": "POST", + // "id": "compute.disks.resize", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "disk" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", + // "disk": { + // "description": "The name of the persistent disk.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/aggregated/disks", - // "response": { - // "$ref": "DiskAggregatedList" - // }, + // "path": "{project}/zones/{zone}/disks/{disk}/resize", + // "request": { + // "$ref": "DisksResizeRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *DisksAggregatedListCall) Pages(ctx context.Context, f func(*DiskAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.disks.createSnapshot": +// method id "compute.disks.setIamPolicy": -type DisksCreateSnapshotCall struct { - s *Service - project string - zone string - disk string - snapshot *Snapshot - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type DisksSetIamPolicyCall struct { + s *Service + project string + zone string + resource string + zonesetpolicyrequest *ZoneSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// CreateSnapshot: Creates a snapshot of a specified persistent disk. -// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/createSnapshot -func (r *DisksService) CreateSnapshot(project string, zone string, disk string, snapshot *Snapshot) *DisksCreateSnapshotCall { - c := &DisksCreateSnapshotCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +func (r *DisksService) SetIamPolicy(project string, zone string, resource string, zonesetpolicyrequest *ZoneSetPolicyRequest) *DisksSetIamPolicyCall { + c := &DisksSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.disk = disk - c.snapshot = snapshot - return c -} - -// GuestFlush sets the optional parameter "guestFlush": -func (c *DisksCreateSnapshotCall) GuestFlush(guestFlush bool) *DisksCreateSnapshotCall { - c.urlParams_.Set("guestFlush", fmt.Sprint(guestFlush)) - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *DisksCreateSnapshotCall) RequestId(requestId string) *DisksCreateSnapshotCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.zonesetpolicyrequest = zonesetpolicyrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksCreateSnapshotCall) Fields(s ...googleapi.Field) *DisksCreateSnapshotCall { +func (c *DisksSetIamPolicyCall) Fields(s ...googleapi.Field) *DisksSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -49971,35 +55002,35 @@ func (c *DisksCreateSnapshotCall) Fields(s ...googleapi.Field) *DisksCreateSnaps // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksCreateSnapshotCall) Context(ctx context.Context) *DisksCreateSnapshotCall { +func (c *DisksSetIamPolicyCall) Context(ctx context.Context) *DisksSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksCreateSnapshotCall) Header() http.Header { +func (c *DisksSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksCreateSnapshotCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.snapshot) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.zonesetpolicyrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/createSnapshot") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -50007,21 +55038,21 @@ func (c *DisksCreateSnapshotCall) doRequest(alt string) (*http.Response, error) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "disk": c.disk, + "project": c.project, + "zone": c.zone, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.createSnapshot" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *DisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.disks.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *DisksSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -50040,7 +55071,7 @@ func (c *DisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -50052,26 +55083,15 @@ func (c *DisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Creates a snapshot of a specified persistent disk.", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", // "httpMethod": "POST", - // "id": "compute.disks.createSnapshot", + // "id": "compute.disks.setIamPolicy", // "parameterOrder": [ // "project", // "zone", - // "disk" + // "resource" // ], // "parameters": { - // "disk": { - // "description": "Name of the persistent disk to snapshot.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "guestFlush": { - // "location": "query", - // "type": "boolean" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -50079,9 +55099,11 @@ func (c *DisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "zone": { @@ -50092,12 +55114,12 @@ func (c *DisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks/{disk}/createSnapshot", + // "path": "{project}/zones/{zone}/disks/{resource}/setIamPolicy", // "request": { - // "$ref": "Snapshot" + // "$ref": "ZoneSetPolicyRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -50107,28 +55129,27 @@ func (c *DisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.disks.delete": +// method id "compute.disks.setLabels": -type DisksDeleteCall struct { - s *Service - project string - zone string - disk string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type DisksSetLabelsCall struct { + s *Service + project string + zone string + resource string + zonesetlabelsrequest *ZoneSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified persistent disk. Deleting a disk -// removes its data permanently and is irreversible. However, deleting a -// disk does not delete any snapshots previously made from the disk. You -// must separately delete snapshots. -// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/delete -func (r *DisksService) Delete(project string, zone string, disk string) *DisksDeleteCall { - c := &DisksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetLabels: Sets the labels on a disk. To learn more about labels, +// read the Labeling Resources documentation. +func (r *DisksService) SetLabels(project string, zone string, resource string, zonesetlabelsrequest *ZoneSetLabelsRequest) *DisksSetLabelsCall { + c := &DisksSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.disk = disk + c.resource = resource + c.zonesetlabelsrequest = zonesetlabelsrequest return c } @@ -50146,7 +55167,7 @@ func (r *DisksService) Delete(project string, zone string, disk string) *DisksDe // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *DisksDeleteCall) RequestId(requestId string) *DisksDeleteCall { +func (c *DisksSetLabelsCall) RequestId(requestId string) *DisksSetLabelsCall { c.urlParams_.Set("requestId", requestId) return c } @@ -50154,7 +55175,7 @@ func (c *DisksDeleteCall) RequestId(requestId string) *DisksDeleteCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksDeleteCall) Fields(s ...googleapi.Field) *DisksDeleteCall { +func (c *DisksSetLabelsCall) Fields(s ...googleapi.Field) *DisksSetLabelsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -50162,52 +55183,57 @@ func (c *DisksDeleteCall) Fields(s ...googleapi.Field) *DisksDeleteCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksDeleteCall) Context(ctx context.Context) *DisksDeleteCall { +func (c *DisksSetLabelsCall) Context(ctx context.Context) *DisksSetLabelsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksDeleteCall) Header() http.Header { +func (c *DisksSetLabelsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.zonesetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "disk": c.disk, + "project": c.project, + "zone": c.zone, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.delete" call. +// Do executes the "compute.disks.setLabels" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *DisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *DisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -50238,21 +55264,15 @@ func (c *DisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { } return ret, nil // { - // "description": "Deletes the specified persistent disk. Deleting a disk removes its data permanently and is irreversible. However, deleting a disk does not delete any snapshots previously made from the disk. You must separately delete snapshots.", - // "httpMethod": "DELETE", - // "id": "compute.disks.delete", + // "description": "Sets the labels on a disk. To learn more about labels, read the Labeling Resources documentation.", + // "httpMethod": "POST", + // "id": "compute.disks.setLabels", // "parameterOrder": [ // "project", // "zone", - // "disk" + // "resource" // ], // "parameters": { - // "disk": { - // "description": "Name of the persistent disk to delete.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -50265,6 +55285,13 @@ func (c *DisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { // "location": "query", // "type": "string" // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -50273,7 +55300,10 @@ func (c *DisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks/{disk}", + // "path": "{project}/zones/{zone}/disks/{resource}/setLabels", + // "request": { + // "$ref": "ZoneSetLabelsRequest" + // }, // "response": { // "$ref": "Operation" // }, @@ -50285,100 +55315,92 @@ func (c *DisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { } -// method id "compute.disks.get": +// method id "compute.disks.testIamPermissions": -type DisksGetCall struct { - s *Service - project string - zone string - disk string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type DisksTestIamPermissionsCall struct { + s *Service + project string + zone string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns a specified persistent disk. Gets a list of available -// persistent disks by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/get -func (r *DisksService) Get(project string, zone string, disk string) *DisksGetCall { - c := &DisksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *DisksService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *DisksTestIamPermissionsCall { + c := &DisksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.disk = disk + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksGetCall) Fields(s ...googleapi.Field) *DisksGetCall { +func (c *DisksTestIamPermissionsCall) Fields(s ...googleapi.Field) *DisksTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *DisksGetCall) IfNoneMatch(entityTag string) *DisksGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksGetCall) Context(ctx context.Context) *DisksGetCall { +func (c *DisksTestIamPermissionsCall) Context(ctx context.Context) *DisksTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksGetCall) Header() http.Header { +func (c *DisksTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksGetCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "disk": c.disk, + "project": c.project, + "zone": c.zone, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.get" call. -// Exactly one of *Disk or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Disk.ServerResponse.Header or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *DisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { +// Do executes the "compute.disks.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -50397,7 +55419,7 @@ func (c *DisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Disk{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -50409,26 +55431,26 @@ func (c *DisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { } return ret, nil // { - // "description": "Returns a specified persistent disk. Gets a list of available persistent disks by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.disks.get", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.disks.testIamPermissions", // "parameterOrder": [ // "project", // "zone", - // "disk" + // "resource" // ], // "parameters": { - // "disk": { - // "description": "Name of the persistent disk to return.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -50440,9 +55462,12 @@ func (c *DisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks/{disk}", + // "path": "{project}/zones/{zone}/disks/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "Disk" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -50453,33 +55478,195 @@ func (c *DisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { } -// method id "compute.disks.getIamPolicy": +// method id "compute.externalVpnGateways.delete": -type DisksGetIamPolicyCall struct { - s *Service - project string - zone string - resource string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type ExternalVpnGatewaysDeleteCall struct { + s *Service + project string + externalVpnGateway string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. -func (r *DisksService) GetIamPolicy(project string, zone string, resource string) *DisksGetIamPolicyCall { - c := &DisksGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified externalVpnGateway. +func (r *ExternalVpnGatewaysService) Delete(project string, externalVpnGateway string) *ExternalVpnGatewaysDeleteCall { + c := &ExternalVpnGatewaysDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.resource = resource + c.externalVpnGateway = externalVpnGateway + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ExternalVpnGatewaysDeleteCall) RequestId(requestId string) *ExternalVpnGatewaysDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksGetIamPolicyCall) Fields(s ...googleapi.Field) *DisksGetIamPolicyCall { +func (c *ExternalVpnGatewaysDeleteCall) Fields(s ...googleapi.Field) *ExternalVpnGatewaysDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ExternalVpnGatewaysDeleteCall) Context(ctx context.Context) *ExternalVpnGatewaysDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ExternalVpnGatewaysDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ExternalVpnGatewaysDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/externalVpnGateways/{externalVpnGateway}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "externalVpnGateway": c.externalVpnGateway, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.externalVpnGateways.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ExternalVpnGatewaysDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified externalVpnGateway.", + // "httpMethod": "DELETE", + // "id": "compute.externalVpnGateways.delete", + // "parameterOrder": [ + // "project", + // "externalVpnGateway" + // ], + // "parameters": { + // "externalVpnGateway": { + // "description": "Name of the externalVpnGateways to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/externalVpnGateways/{externalVpnGateway}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.externalVpnGateways.get": + +type ExternalVpnGatewaysGetCall struct { + s *Service + project string + externalVpnGateway string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified externalVpnGateway. Get a list of +// available externalVpnGateways by making a list() request. +func (r *ExternalVpnGatewaysService) Get(project string, externalVpnGateway string) *ExternalVpnGatewaysGetCall { + c := &ExternalVpnGatewaysGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.externalVpnGateway = externalVpnGateway + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ExternalVpnGatewaysGetCall) Fields(s ...googleapi.Field) *ExternalVpnGatewaysGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -50489,7 +55676,7 @@ func (c *DisksGetIamPolicyCall) Fields(s ...googleapi.Field) *DisksGetIamPolicyC // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *DisksGetIamPolicyCall) IfNoneMatch(entityTag string) *DisksGetIamPolicyCall { +func (c *ExternalVpnGatewaysGetCall) IfNoneMatch(entityTag string) *ExternalVpnGatewaysGetCall { c.ifNoneMatch_ = entityTag return c } @@ -50497,21 +55684,21 @@ func (c *DisksGetIamPolicyCall) IfNoneMatch(entityTag string) *DisksGetIamPolicy // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksGetIamPolicyCall) Context(ctx context.Context) *DisksGetIamPolicyCall { +func (c *ExternalVpnGatewaysGetCall) Context(ctx context.Context) *ExternalVpnGatewaysGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksGetIamPolicyCall) Header() http.Header { +func (c *ExternalVpnGatewaysGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *ExternalVpnGatewaysGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -50523,7 +55710,7 @@ func (c *DisksGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{resource}/getIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/externalVpnGateways/{externalVpnGateway}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -50531,21 +55718,20 @@ func (c *DisksGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, + "project": c.project, + "externalVpnGateway": c.externalVpnGateway, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *DisksGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.externalVpnGateways.get" call. +// Exactly one of *ExternalVpnGateway or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ExternalVpnGateway.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ExternalVpnGatewaysGetCall) Do(opts ...googleapi.CallOption) (*ExternalVpnGateway, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -50564,7 +55750,7 @@ func (c *DisksGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &ExternalVpnGateway{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -50576,40 +55762,32 @@ func (c *DisksGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "description": "Returns the specified externalVpnGateway. Get a list of available externalVpnGateways by making a list() request.", // "httpMethod": "GET", - // "id": "compute.disks.getIamPolicy", + // "id": "compute.externalVpnGateways.get", // "parameterOrder": [ // "project", - // "zone", - // "resource" + // "externalVpnGateway" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "externalVpnGateway": { + // "description": "Name of the externalVpnGateway to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks/{resource}/getIamPolicy", + // "path": "{project}/global/externalVpnGateways/{externalVpnGateway}", // "response": { - // "$ref": "Policy" + // "$ref": "ExternalVpnGateway" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -50620,29 +55798,23 @@ func (c *DisksGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error } -// method id "compute.disks.insert": +// method id "compute.externalVpnGateways.insert": -type DisksInsertCall struct { - s *Service - project string - zone string - disk *Disk - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ExternalVpnGatewaysInsertCall struct { + s *Service + project string + externalvpngateway *ExternalVpnGateway + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a persistent disk in the specified project using the -// data in the request. You can create a disk with a sourceImage, a -// sourceSnapshot, or create an empty 500 GB data disk by omitting all -// properties. You can also create a disk that is larger than the -// default size by specifying the sizeGb property. -// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/insert -func (r *DisksService) Insert(project string, zone string, disk *Disk) *DisksInsertCall { - c := &DisksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a ExternalVpnGateway in the specified project using +// the data included in the request. +func (r *ExternalVpnGatewaysService) Insert(project string, externalvpngateway *ExternalVpnGateway) *ExternalVpnGatewaysInsertCall { + c := &ExternalVpnGatewaysInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.disk = disk + c.externalvpngateway = externalvpngateway return c } @@ -50660,22 +55832,15 @@ func (r *DisksService) Insert(project string, zone string, disk *Disk) *DisksIns // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *DisksInsertCall) RequestId(requestId string) *DisksInsertCall { +func (c *ExternalVpnGatewaysInsertCall) RequestId(requestId string) *ExternalVpnGatewaysInsertCall { c.urlParams_.Set("requestId", requestId) return c } -// SourceImage sets the optional parameter "sourceImage": Source image -// to restore onto a disk. -func (c *DisksInsertCall) SourceImage(sourceImage string) *DisksInsertCall { - c.urlParams_.Set("sourceImage", sourceImage) - return c -} - // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksInsertCall) Fields(s ...googleapi.Field) *DisksInsertCall { +func (c *ExternalVpnGatewaysInsertCall) Fields(s ...googleapi.Field) *ExternalVpnGatewaysInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -50683,35 +55848,35 @@ func (c *DisksInsertCall) Fields(s ...googleapi.Field) *DisksInsertCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksInsertCall) Context(ctx context.Context) *DisksInsertCall { +func (c *ExternalVpnGatewaysInsertCall) Context(ctx context.Context) *ExternalVpnGatewaysInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksInsertCall) Header() http.Header { +func (c *ExternalVpnGatewaysInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *ExternalVpnGatewaysInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.disk) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.externalvpngateway) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/externalVpnGateways") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -50720,19 +55885,18 @@ func (c *DisksInsertCall) doRequest(alt string) (*http.Response, error) { req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.insert" call. +// Do executes the "compute.externalVpnGateways.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *DisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ExternalVpnGatewaysInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -50763,12 +55927,11 @@ func (c *DisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { } return ret, nil // { - // "description": "Creates a persistent disk in the specified project using the data in the request. You can create a disk with a sourceImage, a sourceSnapshot, or create an empty 500 GB data disk by omitting all properties. You can also create a disk that is larger than the default size by specifying the sizeGb property.", + // "description": "Creates a ExternalVpnGateway in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.disks.insert", + // "id": "compute.externalVpnGateways.insert", // "parameterOrder": [ - // "project", - // "zone" + // "project" // ], // "parameters": { // "project": { @@ -50782,23 +55945,11 @@ func (c *DisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "sourceImage": { - // "description": "Optional. Source image to restore onto a disk.", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks", + // "path": "{project}/global/externalVpnGateways", // "request": { - // "$ref": "Disk" + // "$ref": "ExternalVpnGateway" // }, // "response": { // "$ref": "Operation" @@ -50811,25 +55962,22 @@ func (c *DisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { } -// method id "compute.disks.list": +// method id "compute.externalVpnGateways.list": -type DisksListCall struct { +type ExternalVpnGatewaysListCall struct { s *Service project string - zone string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves a list of persistent disks contained within the -// specified zone. -// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/list -func (r *DisksService) List(project string, zone string) *DisksListCall { - c := &DisksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of ExternalVpnGateway available to the +// specified project. +func (r *ExternalVpnGatewaysService) List(project string) *ExternalVpnGatewaysListCall { + c := &ExternalVpnGatewaysListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone return c } @@ -50855,7 +56003,7 @@ func (r *DisksService) List(project string, zone string) *DisksListCall { // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *DisksListCall) Filter(filter string) *DisksListCall { +func (c *ExternalVpnGatewaysListCall) Filter(filter string) *ExternalVpnGatewaysListCall { c.urlParams_.Set("filter", filter) return c } @@ -50866,7 +56014,7 @@ func (c *DisksListCall) Filter(filter string) *DisksListCall { // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *DisksListCall) MaxResults(maxResults int64) *DisksListCall { +func (c *ExternalVpnGatewaysListCall) MaxResults(maxResults int64) *ExternalVpnGatewaysListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -50883,7 +56031,7 @@ func (c *DisksListCall) MaxResults(maxResults int64) *DisksListCall { // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *DisksListCall) OrderBy(orderBy string) *DisksListCall { +func (c *ExternalVpnGatewaysListCall) OrderBy(orderBy string) *ExternalVpnGatewaysListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -50891,7 +56039,7 @@ func (c *DisksListCall) OrderBy(orderBy string) *DisksListCall { // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *DisksListCall) PageToken(pageToken string) *DisksListCall { +func (c *ExternalVpnGatewaysListCall) PageToken(pageToken string) *ExternalVpnGatewaysListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -50899,7 +56047,7 @@ func (c *DisksListCall) PageToken(pageToken string) *DisksListCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksListCall) Fields(s ...googleapi.Field) *DisksListCall { +func (c *ExternalVpnGatewaysListCall) Fields(s ...googleapi.Field) *ExternalVpnGatewaysListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -50909,7 +56057,7 @@ func (c *DisksListCall) Fields(s ...googleapi.Field) *DisksListCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *DisksListCall) IfNoneMatch(entityTag string) *DisksListCall { +func (c *ExternalVpnGatewaysListCall) IfNoneMatch(entityTag string) *ExternalVpnGatewaysListCall { c.ifNoneMatch_ = entityTag return c } @@ -50917,21 +56065,21 @@ func (c *DisksListCall) IfNoneMatch(entityTag string) *DisksListCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksListCall) Context(ctx context.Context) *DisksListCall { +func (c *ExternalVpnGatewaysListCall) Context(ctx context.Context) *ExternalVpnGatewaysListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksListCall) Header() http.Header { +func (c *ExternalVpnGatewaysListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksListCall) doRequest(alt string) (*http.Response, error) { +func (c *ExternalVpnGatewaysListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -50943,7 +56091,7 @@ func (c *DisksListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/externalVpnGateways") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -50952,19 +56100,18 @@ func (c *DisksListCall) doRequest(alt string) (*http.Response, error) { req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.list" call. -// Exactly one of *DiskList or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *DiskList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *DisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { +// Do executes the "compute.externalVpnGateways.list" call. +// Exactly one of *ExternalVpnGatewayList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ExternalVpnGatewayList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ExternalVpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*ExternalVpnGatewayList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -50983,7 +56130,7 @@ func (c *DisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &DiskList{ + ret := &ExternalVpnGatewayList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -50995,12 +56142,11 @@ func (c *DisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { } return ret, nil // { - // "description": "Retrieves a list of persistent disks contained within the specified zone.", + // "description": "Retrieves the list of ExternalVpnGateway available to the specified project.", // "httpMethod": "GET", - // "id": "compute.disks.list", + // "id": "compute.externalVpnGateways.list", // "parameterOrder": [ - // "project", - // "zone" + // "project" // ], // "parameters": { // "filter": { @@ -51032,18 +56178,11 @@ func (c *DisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks", + // "path": "{project}/global/externalVpnGateways", // "response": { - // "$ref": "DiskList" + // "$ref": "ExternalVpnGatewayList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -51057,7 +56196,7 @@ func (c *DisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *DisksListCall) Pages(ctx context.Context, f func(*DiskList) error) error { +func (c *ExternalVpnGatewaysListCall) Pages(ctx context.Context, f func(*ExternalVpnGatewayList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -51075,52 +56214,32 @@ func (c *DisksListCall) Pages(ctx context.Context, f func(*DiskList) error) erro } } -// method id "compute.disks.removeResourcePolicies": +// method id "compute.externalVpnGateways.setLabels": -type DisksRemoveResourcePoliciesCall struct { - s *Service - project string - zone string - disk string - disksremoveresourcepoliciesrequest *DisksRemoveResourcePoliciesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ExternalVpnGatewaysSetLabelsCall struct { + s *Service + project string + resource string + globalsetlabelsrequest *GlobalSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// RemoveResourcePolicies: Removes resource policies from a disk. -func (r *DisksService) RemoveResourcePolicies(project string, zone string, disk string, disksremoveresourcepoliciesrequest *DisksRemoveResourcePoliciesRequest) *DisksRemoveResourcePoliciesCall { - c := &DisksRemoveResourcePoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetLabels: Sets the labels on an ExternalVpnGateway. To learn more +// about labels, read the Labeling Resources documentation. +func (r *ExternalVpnGatewaysService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *ExternalVpnGatewaysSetLabelsCall { + c := &ExternalVpnGatewaysSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.disk = disk - c.disksremoveresourcepoliciesrequest = disksremoveresourcepoliciesrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *DisksRemoveResourcePoliciesCall) RequestId(requestId string) *DisksRemoveResourcePoliciesCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.globalsetlabelsrequest = globalsetlabelsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksRemoveResourcePoliciesCall) Fields(s ...googleapi.Field) *DisksRemoveResourcePoliciesCall { +func (c *ExternalVpnGatewaysSetLabelsCall) Fields(s ...googleapi.Field) *ExternalVpnGatewaysSetLabelsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -51128,35 +56247,35 @@ func (c *DisksRemoveResourcePoliciesCall) Fields(s ...googleapi.Field) *DisksRem // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksRemoveResourcePoliciesCall) Context(ctx context.Context) *DisksRemoveResourcePoliciesCall { +func (c *ExternalVpnGatewaysSetLabelsCall) Context(ctx context.Context) *ExternalVpnGatewaysSetLabelsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksRemoveResourcePoliciesCall) Header() http.Header { +func (c *ExternalVpnGatewaysSetLabelsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksRemoveResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { +func (c *ExternalVpnGatewaysSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.disksremoveresourcepoliciesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/removeResourcePolicies") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/externalVpnGateways/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -51164,21 +56283,20 @@ func (c *DisksRemoveResourcePoliciesCall) doRequest(alt string) (*http.Response, } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "disk": c.disk, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.removeResourcePolicies" call. +// Do executes the "compute.externalVpnGateways.setLabels" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *DisksRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ExternalVpnGatewaysSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -51209,22 +56327,14 @@ func (c *DisksRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Removes resource policies from a disk.", + // "description": "Sets the labels on an ExternalVpnGateway. To learn more about labels, read the Labeling Resources documentation.", // "httpMethod": "POST", - // "id": "compute.disks.removeResourcePolicies", + // "id": "compute.externalVpnGateways.setLabels", // "parameterOrder": [ // "project", - // "zone", - // "disk" + // "resource" // ], // "parameters": { - // "disk": { - // "description": "The disk name for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -51232,22 +56342,17 @@ func (c *DisksRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Ope // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks/{disk}/removeResourcePolicies", + // "path": "{project}/global/externalVpnGateways/{resource}/setLabels", // "request": { - // "$ref": "DisksRemoveResourcePoliciesRequest" + // "$ref": "GlobalSetLabelsRequest" // }, // "response": { // "$ref": "Operation" @@ -51260,53 +56365,32 @@ func (c *DisksRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.disks.resize": +// method id "compute.externalVpnGateways.testIamPermissions": -type DisksResizeCall struct { - s *Service - project string - zone string - disk string - disksresizerequest *DisksResizeRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ExternalVpnGatewaysTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Resize: Resizes the specified persistent disk. You can only increase -// the size of the disk. -func (r *DisksService) Resize(project string, zone string, disk string, disksresizerequest *DisksResizeRequest) *DisksResizeCall { - c := &DisksResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *ExternalVpnGatewaysService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *ExternalVpnGatewaysTestIamPermissionsCall { + c := &ExternalVpnGatewaysTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.disk = disk - c.disksresizerequest = disksresizerequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *DisksResizeCall) RequestId(requestId string) *DisksResizeCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksResizeCall) Fields(s ...googleapi.Field) *DisksResizeCall { +func (c *ExternalVpnGatewaysTestIamPermissionsCall) Fields(s ...googleapi.Field) *ExternalVpnGatewaysTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -51314,35 +56398,35 @@ func (c *DisksResizeCall) Fields(s ...googleapi.Field) *DisksResizeCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksResizeCall) Context(ctx context.Context) *DisksResizeCall { +func (c *ExternalVpnGatewaysTestIamPermissionsCall) Context(ctx context.Context) *ExternalVpnGatewaysTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksResizeCall) Header() http.Header { +func (c *ExternalVpnGatewaysTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksResizeCall) doRequest(alt string) (*http.Response, error) { +func (c *ExternalVpnGatewaysTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.disksresizerequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/resize") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/externalVpnGateways/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -51350,21 +56434,20 @@ func (c *DisksResizeCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "disk": c.disk, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.resize" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *DisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.externalVpnGateways.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ExternalVpnGatewaysTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -51383,7 +56466,7 @@ func (c *DisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -51395,22 +56478,14 @@ func (c *DisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { } return ret, nil // { - // "description": "Resizes the specified persistent disk. You can only increase the size of the disk.", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.disks.resize", + // "id": "compute.externalVpnGateways.testIamPermissions", // "parameterOrder": [ // "project", - // "zone", - // "disk" + // "resource" // ], // "parameters": { - // "disk": { - // "description": "The name of the persistent disk.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -51418,62 +56493,73 @@ func (c *DisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks/{disk}/resize", + // "path": "{project}/global/externalVpnGateways/{resource}/testIamPermissions", // "request": { - // "$ref": "DisksResizeRequest" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.disks.setIamPolicy": +// method id "compute.firewalls.delete": -type DisksSetIamPolicyCall struct { - s *Service - project string - zone string - resource string - zonesetpolicyrequest *ZoneSetPolicyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type FirewallsDeleteCall struct { + s *Service + project string + firewall string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. -func (r *DisksService) SetIamPolicy(project string, zone string, resource string, zonesetpolicyrequest *ZoneSetPolicyRequest) *DisksSetIamPolicyCall { - c := &DisksSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified firewall. +// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/delete +func (r *FirewallsService) Delete(project string, firewall string) *FirewallsDeleteCall { + c := &FirewallsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.resource = resource - c.zonesetpolicyrequest = zonesetpolicyrequest + c.firewall = firewall + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *FirewallsDeleteCall) RequestId(requestId string) *FirewallsDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksSetIamPolicyCall) Fields(s ...googleapi.Field) *DisksSetIamPolicyCall { +func (c *FirewallsDeleteCall) Fields(s ...googleapi.Field) *FirewallsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -51481,57 +56567,51 @@ func (c *DisksSetIamPolicyCall) Fields(s ...googleapi.Field) *DisksSetIamPolicyC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksSetIamPolicyCall) Context(ctx context.Context) *DisksSetIamPolicyCall { +func (c *FirewallsDeleteCall) Context(ctx context.Context) *FirewallsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksSetIamPolicyCall) Header() http.Header { +func (c *FirewallsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *FirewallsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.zonesetpolicyrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{resource}/setIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, - "resource": c.resource, + "firewall": c.firewall, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *DisksSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.firewalls.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *FirewallsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -51550,7 +56630,7 @@ func (c *DisksSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -51562,43 +56642,37 @@ func (c *DisksSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", - // "httpMethod": "POST", - // "id": "compute.disks.setIamPolicy", + // "description": "Deletes the specified firewall.", + // "httpMethod": "DELETE", + // "id": "compute.firewalls.delete", // "parameterOrder": [ // "project", - // "zone", - // "resource" + // "firewall" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "firewall": { + // "description": "Name of the firewall rule to delete.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks/{resource}/setIamPolicy", - // "request": { - // "$ref": "ZoneSetPolicyRequest" - // }, + // "path": "{project}/global/firewalls/{firewall}", // "response": { - // "$ref": "Policy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -51608,538 +56682,24 @@ func (c *DisksSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error } -// method id "compute.disks.setLabels": +// method id "compute.firewalls.get": -type DisksSetLabelsCall struct { - s *Service - project string - zone string - resource string - zonesetlabelsrequest *ZoneSetLabelsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type FirewallsGetCall struct { + s *Service + project string + firewall string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetLabels: Sets the labels on a disk. To learn more about labels, -// read the Labeling Resources documentation. -func (r *DisksService) SetLabels(project string, zone string, resource string, zonesetlabelsrequest *ZoneSetLabelsRequest) *DisksSetLabelsCall { - c := &DisksSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified firewall. +// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/get +func (r *FirewallsService) Get(project string, firewall string) *FirewallsGetCall { + c := &FirewallsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.resource = resource - c.zonesetlabelsrequest = zonesetlabelsrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *DisksSetLabelsCall) RequestId(requestId string) *DisksSetLabelsCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *DisksSetLabelsCall) Fields(s ...googleapi.Field) *DisksSetLabelsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *DisksSetLabelsCall) Context(ctx context.Context) *DisksSetLabelsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *DisksSetLabelsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *DisksSetLabelsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.zonesetlabelsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{resource}/setLabels") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.disks.setLabels" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *DisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Sets the labels on a disk. To learn more about labels, read the Labeling Resources documentation.", - // "httpMethod": "POST", - // "id": "compute.disks.setLabels", - // "parameterOrder": [ - // "project", - // "zone", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/disks/{resource}/setLabels", - // "request": { - // "$ref": "ZoneSetLabelsRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.disks.testIamPermissions": - -type DisksTestIamPermissionsCall struct { - s *Service - project string - zone string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *DisksService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *DisksTestIamPermissionsCall { - c := &DisksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *DisksTestIamPermissionsCall) Fields(s ...googleapi.Field) *DisksTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *DisksTestIamPermissionsCall) Context(ctx context.Context) *DisksTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *DisksTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *DisksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{resource}/testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.disks.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.disks.testIamPermissions", - // "parameterOrder": [ - // "project", - // "zone", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/disks/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, - // "response": { - // "$ref": "TestPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.firewalls.delete": - -type FirewallsDeleteCall struct { - s *Service - project string - firewall string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes the specified firewall. -// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/delete -func (r *FirewallsService) Delete(project string, firewall string) *FirewallsDeleteCall { - c := &FirewallsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.firewall = firewall - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *FirewallsDeleteCall) RequestId(requestId string) *FirewallsDeleteCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *FirewallsDeleteCall) Fields(s ...googleapi.Field) *FirewallsDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *FirewallsDeleteCall) Context(ctx context.Context) *FirewallsDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *FirewallsDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *FirewallsDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "firewall": c.firewall, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.firewalls.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *FirewallsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes the specified firewall.", - // "httpMethod": "DELETE", - // "id": "compute.firewalls.delete", - // "parameterOrder": [ - // "project", - // "firewall" - // ], - // "parameters": { - // "firewall": { - // "description": "Name of the firewall rule to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "{project}/global/firewalls/{firewall}", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.firewalls.get": - -type FirewallsGetCall struct { - s *Service - project string - firewall string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified firewall. -// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/get -func (r *FirewallsService) Get(project string, firewall string) *FirewallsGetCall { - c := &FirewallsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.firewall = firewall + c.firewall = firewall return c } @@ -53595,7 +58155,7 @@ func (c *ForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation // "forwardingRule": { // "description": "Name of the ForwardingRule resource to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -53766,7 +58326,7 @@ func (c *ForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRu // "forwardingRule": { // "description": "Name of the ForwardingRule resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -54387,382 +58947,382 @@ func (c *ForwardingRulesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, // "forwardingRule": { // "description": "Name of the ForwardingRule resource to patch.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}", - // "request": { - // "$ref": "ForwardingRule" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.forwardingRules.setLabels": - -type ForwardingRulesSetLabelsCall struct { - s *Service - project string - region string - resource string - regionsetlabelsrequest *RegionSetLabelsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetLabels: Sets the labels on the specified resource. To learn more -// about labels, read the Labeling Resources documentation. -func (r *ForwardingRulesService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *ForwardingRulesSetLabelsCall { - c := &ForwardingRulesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.resource = resource - c.regionsetlabelsrequest = regionsetlabelsrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *ForwardingRulesSetLabelsCall) RequestId(requestId string) *ForwardingRulesSetLabelsCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ForwardingRulesSetLabelsCall) Fields(s ...googleapi.Field) *ForwardingRulesSetLabelsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ForwardingRulesSetLabelsCall) Context(ctx context.Context) *ForwardingRulesSetLabelsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ForwardingRulesSetLabelsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ForwardingRulesSetLabelsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{resource}/setLabels") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.forwardingRules.setLabels" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ForwardingRulesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Sets the labels on the specified resource. To learn more about labels, read the Labeling Resources documentation.", - // "httpMethod": "POST", - // "id": "compute.forwardingRules.setLabels", - // "parameterOrder": [ - // "project", - // "region", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "The region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/forwardingRules/{resource}/setLabels", - // "request": { - // "$ref": "RegionSetLabelsRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.forwardingRules.setTarget": - -type ForwardingRulesSetTargetCall struct { - s *Service - project string - region string - forwardingRule string - targetreference *TargetReference - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetTarget: Changes target URL for forwarding rule. The new target -// should be of the same type as the old target. -// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/setTarget -func (r *ForwardingRulesService) SetTarget(project string, region string, forwardingRule string, targetreference *TargetReference) *ForwardingRulesSetTargetCall { - c := &ForwardingRulesSetTargetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.forwardingRule = forwardingRule - c.targetreference = targetreference - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *ForwardingRulesSetTargetCall) RequestId(requestId string) *ForwardingRulesSetTargetCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ForwardingRulesSetTargetCall) Fields(s ...googleapi.Field) *ForwardingRulesSetTargetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ForwardingRulesSetTargetCall) Context(ctx context.Context) *ForwardingRulesSetTargetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ForwardingRulesSetTargetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ForwardingRulesSetTargetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetreference) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}/setTarget") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "forwardingRule": c.forwardingRule, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.forwardingRules.setTarget" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Changes target URL for forwarding rule. The new target should be of the same type as the old target.", - // "httpMethod": "POST", - // "id": "compute.forwardingRules.setTarget", - // "parameterOrder": [ - // "project", - // "region", - // "forwardingRule" - // ], - // "parameters": { - // "forwardingRule": { - // "description": "Name of the ForwardingRule resource in which target is to be set.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}", + // "request": { + // "$ref": "ForwardingRule" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.forwardingRules.setLabels": + +type ForwardingRulesSetLabelsCall struct { + s *Service + project string + region string + resource string + regionsetlabelsrequest *RegionSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetLabels: Sets the labels on the specified resource. To learn more +// about labels, read the Labeling Resources documentation. +func (r *ForwardingRulesService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *ForwardingRulesSetLabelsCall { + c := &ForwardingRulesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + c.regionsetlabelsrequest = regionsetlabelsrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ForwardingRulesSetLabelsCall) RequestId(requestId string) *ForwardingRulesSetLabelsCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ForwardingRulesSetLabelsCall) Fields(s ...googleapi.Field) *ForwardingRulesSetLabelsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ForwardingRulesSetLabelsCall) Context(ctx context.Context) *ForwardingRulesSetLabelsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ForwardingRulesSetLabelsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ForwardingRulesSetLabelsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{resource}/setLabels") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.forwardingRules.setLabels" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ForwardingRulesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the labels on the specified resource. To learn more about labels, read the Labeling Resources documentation.", + // "httpMethod": "POST", + // "id": "compute.forwardingRules.setLabels", + // "parameterOrder": [ + // "project", + // "region", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/forwardingRules/{resource}/setLabels", + // "request": { + // "$ref": "RegionSetLabelsRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.forwardingRules.setTarget": + +type ForwardingRulesSetTargetCall struct { + s *Service + project string + region string + forwardingRule string + targetreference *TargetReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetTarget: Changes target URL for forwarding rule. The new target +// should be of the same type as the old target. +// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/setTarget +func (r *ForwardingRulesService) SetTarget(project string, region string, forwardingRule string, targetreference *TargetReference) *ForwardingRulesSetTargetCall { + c := &ForwardingRulesSetTargetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.forwardingRule = forwardingRule + c.targetreference = targetreference + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ForwardingRulesSetTargetCall) RequestId(requestId string) *ForwardingRulesSetTargetCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ForwardingRulesSetTargetCall) Fields(s ...googleapi.Field) *ForwardingRulesSetTargetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ForwardingRulesSetTargetCall) Context(ctx context.Context) *ForwardingRulesSetTargetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ForwardingRulesSetTargetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ForwardingRulesSetTargetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetreference) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}/setTarget") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "forwardingRule": c.forwardingRule, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.forwardingRules.setTarget" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Changes target URL for forwarding rule. The new target should be of the same type as the old target.", + // "httpMethod": "POST", + // "id": "compute.forwardingRules.setTarget", + // "parameterOrder": [ + // "project", + // "region", + // "forwardingRule" + // ], + // "parameters": { + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource in which target is to be set.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // }, // "project": { // "description": "Project ID for this request.", @@ -55098,7 +59658,7 @@ func (c *GlobalAddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation // "address": { // "description": "Name of the address resource to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -55259,7 +59819,7 @@ func (c *GlobalAddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, err // "address": { // "description": "Name of the address resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -56140,7 +60700,7 @@ func (c *GlobalForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope // "forwardingRule": { // "description": "Name of the ForwardingRule resource to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -56301,7 +60861,7 @@ func (c *GlobalForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*Forwar // "forwardingRule": { // "description": "Name of the ForwardingRule resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -56889,7 +61449,7 @@ func (c *GlobalForwardingRulesPatchCall) Do(opts ...googleapi.CallOption) (*Oper // "forwardingRule": { // "description": "Name of the ForwardingRule resource to patch.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -57216,7 +61776,7 @@ func (c *GlobalForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (* // "forwardingRule": { // "description": "Name of the ForwardingRule resource in which target is to be set.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -57400,156 +61960,108 @@ func (c *GlobalForwardingRulesTestIamPermissionsCall) Do(opts ...googleapi.CallO } -// method id "compute.globalOperations.aggregatedList": +// method id "compute.globalNetworkEndpointGroups.attachNetworkEndpoints": -type GlobalOperationsAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall struct { + s *Service + project string + networkEndpointGroup string + globalnetworkendpointgroupsattachendpointsrequest *GlobalNetworkEndpointGroupsAttachEndpointsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AggregatedList: Retrieves an aggregated list of all operations. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/aggregatedList -func (r *GlobalOperationsService) AggregatedList(project string) *GlobalOperationsAggregatedListCall { - c := &GlobalOperationsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AttachNetworkEndpoints: Attach a network endpoint to the specified +// network endpoint group. +func (r *GlobalNetworkEndpointGroupsService) AttachNetworkEndpoints(project string, networkEndpointGroup string, globalnetworkendpointgroupsattachendpointsrequest *GlobalNetworkEndpointGroupsAttachEndpointsRequest) *GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall { + c := &GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.networkEndpointGroup = networkEndpointGroup + c.globalnetworkendpointgroupsattachendpointsrequest = globalnetworkendpointgroupsattachendpointsrequest return c } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *GlobalOperationsAggregatedListCall) Filter(filter string) *GlobalOperationsAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *GlobalOperationsAggregatedListCall) MaxResults(maxResults int64) *GlobalOperationsAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *GlobalOperationsAggregatedListCall) OrderBy(orderBy string) *GlobalOperationsAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *GlobalOperationsAggregatedListCall) PageToken(pageToken string) *GlobalOperationsAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall) RequestId(requestId string) *GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalOperationsAggregatedListCall) Fields(s ...googleapi.Field) *GlobalOperationsAggregatedListCall { +func (c *GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall) Fields(s ...googleapi.Field) *GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *GlobalOperationsAggregatedListCall) IfNoneMatch(entityTag string) *GlobalOperationsAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalOperationsAggregatedListCall) Context(ctx context.Context) *GlobalOperationsAggregatedListCall { +func (c *GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall) Context(ctx context.Context) *GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalOperationsAggregatedListCall) Header() http.Header { +func (c *GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalOperationsAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalnetworkendpointgroupsattachendpointsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/operations") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "networkEndpointGroup": c.networkEndpointGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalOperations.aggregatedList" call. -// Exactly one of *OperationAggregatedList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *OperationAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *GlobalOperationsAggregatedListCall) Do(opts ...googleapi.CallOption) (*OperationAggregatedList, error) { +// Do executes the "compute.globalNetworkEndpointGroups.attachNetworkEndpoints" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -57568,7 +62080,7 @@ func (c *GlobalOperationsAggregatedListCall) Do(opts ...googleapi.CallOption) (* if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &OperationAggregatedList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -57580,34 +62092,18 @@ func (c *GlobalOperationsAggregatedListCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Retrieves an aggregated list of all operations.", - // "httpMethod": "GET", - // "id": "compute.globalOperations.aggregatedList", + // "description": "Attach a network endpoint to the specified network endpoint group.", + // "httpMethod": "POST", + // "id": "compute.globalNetworkEndpointGroups.attachNetworkEndpoints", // "parameterOrder": [ - // "project" + // "project", + // "networkEndpointGroup" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "networkEndpointGroup": { + // "description": "The name of the network endpoint group where you are attaching network endpoints to. It should comply with RFC1035.", + // "location": "path", + // "required": true, // "type": "string" // }, // "project": { @@ -57616,66 +62112,71 @@ func (c *GlobalOperationsAggregatedListCall) Do(opts ...googleapi.CallOption) (* // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/aggregated/operations", + // "path": "{project}/global/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints", + // "request": { + // "$ref": "GlobalNetworkEndpointGroupsAttachEndpointsRequest" + // }, // "response": { - // "$ref": "OperationAggregatedList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *GlobalOperationsAggregatedListCall) Pages(ctx context.Context, f func(*OperationAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} +// method id "compute.globalNetworkEndpointGroups.delete": -// method id "compute.globalOperations.delete": - -type GlobalOperationsDeleteCall struct { - s *Service - project string - operation string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalNetworkEndpointGroupsDeleteCall struct { + s *Service + project string + networkEndpointGroup string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified Operations resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/delete -func (r *GlobalOperationsService) Delete(project string, operation string) *GlobalOperationsDeleteCall { - c := &GlobalOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified network endpoint group.Note that the +// NEG cannot be deleted if there are backend services referencing it. +func (r *GlobalNetworkEndpointGroupsService) Delete(project string, networkEndpointGroup string) *GlobalNetworkEndpointGroupsDeleteCall { + c := &GlobalNetworkEndpointGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.operation = operation + c.networkEndpointGroup = networkEndpointGroup + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *GlobalNetworkEndpointGroupsDeleteCall) RequestId(requestId string) *GlobalNetworkEndpointGroupsDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalOperationsDeleteCall) Fields(s ...googleapi.Field) *GlobalOperationsDeleteCall { +func (c *GlobalNetworkEndpointGroupsDeleteCall) Fields(s ...googleapi.Field) *GlobalNetworkEndpointGroupsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -57683,21 +62184,21 @@ func (c *GlobalOperationsDeleteCall) Fields(s ...googleapi.Field) *GlobalOperati // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalOperationsDeleteCall) Context(ctx context.Context) *GlobalOperationsDeleteCall { +func (c *GlobalNetworkEndpointGroupsDeleteCall) Context(ctx context.Context) *GlobalNetworkEndpointGroupsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalOperationsDeleteCall) Header() http.Header { +func (c *GlobalNetworkEndpointGroupsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalNetworkEndpointGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -57706,7 +62207,7 @@ func (c *GlobalOperationsDeleteCall) doRequest(alt string) (*http.Response, erro var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations/{operation}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networkEndpointGroups/{networkEndpointGroup}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { @@ -57714,37 +62215,61 @@ func (c *GlobalOperationsDeleteCall) doRequest(alt string) (*http.Response, erro } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "operation": c.operation, + "project": c.project, + "networkEndpointGroup": c.networkEndpointGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalOperations.delete" call. -func (c *GlobalOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { +// Do executes the "compute.globalNetworkEndpointGroups.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *GlobalNetworkEndpointGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } if err != nil { - return err + return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return nil, err } - return nil + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil // { - // "description": "Deletes the specified Operations resource.", + // "description": "Deletes the specified network endpoint group.Note that the NEG cannot be deleted if there are backend services referencing it.", // "httpMethod": "DELETE", - // "id": "compute.globalOperations.delete", + // "id": "compute.globalNetworkEndpointGroups.delete", // "parameterOrder": [ // "project", - // "operation" + // "networkEndpointGroup" // ], // "parameters": { - // "operation": { - // "description": "Name of the Operations resource to delete.", + // "networkEndpointGroup": { + // "description": "The name of the network endpoint group to delete. It should comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -57754,9 +62279,17 @@ func (c *GlobalOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/global/operations/{operation}", + // "path": "{project}/global/networkEndpointGroups/{networkEndpointGroup}", + // "response": { + // "$ref": "Operation" + // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/compute" @@ -57765,97 +62298,108 @@ func (c *GlobalOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { } -// method id "compute.globalOperations.get": +// method id "compute.globalNetworkEndpointGroups.detachNetworkEndpoints": -type GlobalOperationsGetCall struct { - s *Service - project string - operation string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall struct { + s *Service + project string + networkEndpointGroup string + globalnetworkendpointgroupsdetachendpointsrequest *GlobalNetworkEndpointGroupsDetachEndpointsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Retrieves the specified Operations resource. Gets a list of -// operations by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/get -func (r *GlobalOperationsService) Get(project string, operation string) *GlobalOperationsGetCall { - c := &GlobalOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// DetachNetworkEndpoints: Detach the network endpoint from the +// specified network endpoint group. +func (r *GlobalNetworkEndpointGroupsService) DetachNetworkEndpoints(project string, networkEndpointGroup string, globalnetworkendpointgroupsdetachendpointsrequest *GlobalNetworkEndpointGroupsDetachEndpointsRequest) *GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall { + c := &GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.operation = operation + c.networkEndpointGroup = networkEndpointGroup + c.globalnetworkendpointgroupsdetachendpointsrequest = globalnetworkendpointgroupsdetachendpointsrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall) RequestId(requestId string) *GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalOperationsGetCall) Fields(s ...googleapi.Field) *GlobalOperationsGetCall { +func (c *GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall) Fields(s ...googleapi.Field) *GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *GlobalOperationsGetCall) IfNoneMatch(entityTag string) *GlobalOperationsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalOperationsGetCall) Context(ctx context.Context) *GlobalOperationsGetCall { +func (c *GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall) Context(ctx context.Context) *GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalOperationsGetCall) Header() http.Header { +func (c *GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalOperationsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalnetworkendpointgroupsdetachendpointsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations/{operation}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "operation": c.operation, + "project": c.project, + "networkEndpointGroup": c.networkEndpointGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalOperations.get" call. +// Do executes the "compute.globalNetworkEndpointGroups.detachNetworkEndpoints" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *GlobalOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -57886,18 +62430,17 @@ func (c *GlobalOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Retrieves the specified Operations resource. Gets a list of operations by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.globalOperations.get", + // "description": "Detach the network endpoint from the specified network endpoint group.", + // "httpMethod": "POST", + // "id": "compute.globalNetworkEndpointGroups.detachNetworkEndpoints", // "parameterOrder": [ // "project", - // "operation" + // "networkEndpointGroup" // ], // "parameters": { - // "operation": { - // "description": "Name of the Operations resource to return.", + // "networkEndpointGroup": { + // "description": "The name of the network endpoint group where you are removing network endpoints. It should comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -57907,108 +62450,53 @@ func (c *GlobalOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/global/operations/{operation}", + // "path": "{project}/global/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints", + // "request": { + // "$ref": "GlobalNetworkEndpointGroupsDetachEndpointsRequest" + // }, // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.globalOperations.list": +// method id "compute.globalNetworkEndpointGroups.get": -type GlobalOperationsListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type GlobalNetworkEndpointGroupsGetCall struct { + s *Service + project string + networkEndpointGroup string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// List: Retrieves a list of Operation resources contained within the -// specified project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/list -func (r *GlobalOperationsService) List(project string) *GlobalOperationsListCall { - c := &GlobalOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified network endpoint group. Gets a list of +// available network endpoint groups by making a list() request. +func (r *GlobalNetworkEndpointGroupsService) Get(project string, networkEndpointGroup string) *GlobalNetworkEndpointGroupsGetCall { + c := &GlobalNetworkEndpointGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *GlobalOperationsListCall) Filter(filter string) *GlobalOperationsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *GlobalOperationsListCall) MaxResults(maxResults int64) *GlobalOperationsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *GlobalOperationsListCall) OrderBy(orderBy string) *GlobalOperationsListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *GlobalOperationsListCall) PageToken(pageToken string) *GlobalOperationsListCall { - c.urlParams_.Set("pageToken", pageToken) + c.networkEndpointGroup = networkEndpointGroup return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalOperationsListCall) Fields(s ...googleapi.Field) *GlobalOperationsListCall { +func (c *GlobalNetworkEndpointGroupsGetCall) Fields(s ...googleapi.Field) *GlobalNetworkEndpointGroupsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -58018,7 +62506,7 @@ func (c *GlobalOperationsListCall) Fields(s ...googleapi.Field) *GlobalOperation // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *GlobalOperationsListCall) IfNoneMatch(entityTag string) *GlobalOperationsListCall { +func (c *GlobalNetworkEndpointGroupsGetCall) IfNoneMatch(entityTag string) *GlobalNetworkEndpointGroupsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -58026,21 +62514,21 @@ func (c *GlobalOperationsListCall) IfNoneMatch(entityTag string) *GlobalOperatio // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalOperationsListCall) Context(ctx context.Context) *GlobalOperationsListCall { +func (c *GlobalNetworkEndpointGroupsGetCall) Context(ctx context.Context) *GlobalNetworkEndpointGroupsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalOperationsListCall) Header() http.Header { +func (c *GlobalNetworkEndpointGroupsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalOperationsListCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalNetworkEndpointGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -58052,7 +62540,7 @@ func (c *GlobalOperationsListCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networkEndpointGroups/{networkEndpointGroup}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -58060,19 +62548,20 @@ func (c *GlobalOperationsListCall) doRequest(alt string) (*http.Response, error) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "networkEndpointGroup": c.networkEndpointGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalOperations.list" call. -// Exactly one of *OperationList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *OperationList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.globalNetworkEndpointGroups.get" call. +// Exactly one of *NetworkEndpointGroup or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *NetworkEndpointGroup.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *GlobalOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationList, error) { +func (c *GlobalNetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroup, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -58091,7 +62580,7 @@ func (c *GlobalOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &OperationList{ + ret := &NetworkEndpointGroup{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -58103,204 +62592,17 @@ func (c *GlobalOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL } return ret, nil // { - // "description": "Retrieves a list of Operation resources contained within the specified project.", + // "description": "Returns the specified network endpoint group. Gets a list of available network endpoint groups by making a list() request.", // "httpMethod": "GET", - // "id": "compute.globalOperations.list", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/operations", - // "response": { - // "$ref": "OperationList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *GlobalOperationsListCall) Pages(ctx context.Context, f func(*OperationList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.globalOperations.wait": - -type GlobalOperationsWaitCall struct { - s *Service - project string - operation string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Wait: Waits for the specified Operations resource until it is done or -// timeout, and retrieves the specified Operations resource. 1. -// Immediately returns when the operation is already done. 2. Waits for -// no more than the default deadline (2 minutes, subject to change) and -// then returns the current state of the operation, which may be DONE or -// still in progress. 3. Is best-effort: a. The server can wait less -// than the default deadline or zero seconds, in overload situations. b. -// There is no guarantee that the operation is actually done when -// returns. 4. User should be prepared to retry if the operation is not -// DONE. -func (r *GlobalOperationsService) Wait(project string, operation string) *GlobalOperationsWaitCall { - c := &GlobalOperationsWaitCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.operation = operation - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *GlobalOperationsWaitCall) Fields(s ...googleapi.Field) *GlobalOperationsWaitCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *GlobalOperationsWaitCall) Context(ctx context.Context) *GlobalOperationsWaitCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *GlobalOperationsWaitCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *GlobalOperationsWaitCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations/{operation}/wait") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "operation": c.operation, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.globalOperations.wait" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *GlobalOperationsWaitCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Waits for the specified Operations resource until it is done or timeout, and retrieves the specified Operations resource. 1. Immediately returns when the operation is already done. 2. Waits for no more than the default deadline (2 minutes, subject to change) and then returns the current state of the operation, which may be DONE or still in progress. 3. Is best-effort: a. The server can wait less than the default deadline or zero seconds, in overload situations. b. There is no guarantee that the operation is actually done when returns. 4. User should be prepared to retry if the operation is not DONE.", - // "httpMethod": "POST", - // "id": "compute.globalOperations.wait", + // "id": "compute.globalNetworkEndpointGroups.get", // "parameterOrder": [ // "project", - // "operation" + // "networkEndpointGroup" // ], // "parameters": { - // "operation": { - // "description": "Name of the Operations resource to return.", + // "networkEndpointGroup": { + // "description": "The name of the network endpoint group. It should comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -58312,9 +62614,9 @@ func (c *GlobalOperationsWaitCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // } // }, - // "path": "{project}/global/operations/{operation}/wait", + // "path": "{project}/global/networkEndpointGroups/{networkEndpointGroup}", // "response": { - // "$ref": "Operation" + // "$ref": "NetworkEndpointGroup" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -58325,34 +62627,49 @@ func (c *GlobalOperationsWaitCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.globalOrganizationOperations.delete": +// method id "compute.globalNetworkEndpointGroups.insert": -type GlobalOrganizationOperationsDeleteCall struct { - s *Service - operation string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalNetworkEndpointGroupsInsertCall struct { + s *Service + project string + networkendpointgroup *NetworkEndpointGroup + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified Operations resource. -func (r *GlobalOrganizationOperationsService) Delete(operation string) *GlobalOrganizationOperationsDeleteCall { - c := &GlobalOrganizationOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.operation = operation +// Insert: Creates a network endpoint group in the specified project +// using the parameters that are included in the request. +func (r *GlobalNetworkEndpointGroupsService) Insert(project string, networkendpointgroup *NetworkEndpointGroup) *GlobalNetworkEndpointGroupsInsertCall { + c := &GlobalNetworkEndpointGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.networkendpointgroup = networkendpointgroup return c } -// ParentId sets the optional parameter "parentId": Parent ID for this -// request. -func (c *GlobalOrganizationOperationsDeleteCall) ParentId(parentId string) *GlobalOrganizationOperationsDeleteCall { - c.urlParams_.Set("parentId", parentId) +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *GlobalNetworkEndpointGroupsInsertCall) RequestId(requestId string) *GlobalNetworkEndpointGroupsInsertCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalOrganizationOperationsDeleteCall) Fields(s ...googleapi.Field) *GlobalOrganizationOperationsDeleteCall { +func (c *GlobalNetworkEndpointGroupsInsertCall) Fields(s ...googleapi.Field) *GlobalNetworkEndpointGroupsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -58360,178 +62677,55 @@ func (c *GlobalOrganizationOperationsDeleteCall) Fields(s ...googleapi.Field) *G // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalOrganizationOperationsDeleteCall) Context(ctx context.Context) *GlobalOrganizationOperationsDeleteCall { +func (c *GlobalNetworkEndpointGroupsInsertCall) Context(ctx context.Context) *GlobalNetworkEndpointGroupsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalOrganizationOperationsDeleteCall) Header() http.Header { +func (c *GlobalNetworkEndpointGroupsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalOrganizationOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalNetworkEndpointGroupsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/operations/{operation}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroup) if err != nil { return nil, err } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "operation": c.operation, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.globalOrganizationOperations.delete" call. -func (c *GlobalOrganizationOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if err != nil { - return err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return err - } - return nil - // { - // "description": "Deletes the specified Operations resource.", - // "httpMethod": "DELETE", - // "id": "compute.globalOrganizationOperations.delete", - // "parameterOrder": [ - // "operation" - // ], - // "parameters": { - // "operation": { - // "description": "Name of the Operations resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "parentId": { - // "description": "Parent ID for this request.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "locations/global/operations/{operation}", - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.globalOrganizationOperations.get": - -type GlobalOrganizationOperationsGetCall struct { - s *Service - operation string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Retrieves the specified Operations resource. Gets a list of -// operations by making a list() request. -func (r *GlobalOrganizationOperationsService) Get(operation string) *GlobalOrganizationOperationsGetCall { - c := &GlobalOrganizationOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.operation = operation - return c -} - -// ParentId sets the optional parameter "parentId": Parent ID for this -// request. -func (c *GlobalOrganizationOperationsGetCall) ParentId(parentId string) *GlobalOrganizationOperationsGetCall { - c.urlParams_.Set("parentId", parentId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *GlobalOrganizationOperationsGetCall) Fields(s ...googleapi.Field) *GlobalOrganizationOperationsGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *GlobalOrganizationOperationsGetCall) IfNoneMatch(entityTag string) *GlobalOrganizationOperationsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *GlobalOrganizationOperationsGetCall) Context(ctx context.Context) *GlobalOrganizationOperationsGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *GlobalOrganizationOperationsGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *GlobalOrganizationOperationsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/operations/{operation}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networkEndpointGroups") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "operation": c.operation, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalOrganizationOperations.get" call. +// Do executes the "compute.globalNetworkEndpointGroups.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *GlobalOrganizationOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *GlobalNetworkEndpointGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -58562,53 +62756,57 @@ func (c *GlobalOrganizationOperationsGetCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Retrieves the specified Operations resource. Gets a list of operations by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.globalOrganizationOperations.get", + // "description": "Creates a network endpoint group in the specified project using the parameters that are included in the request.", + // "httpMethod": "POST", + // "id": "compute.globalNetworkEndpointGroups.insert", // "parameterOrder": [ - // "operation" + // "project" // ], // "parameters": { - // "operation": { - // "description": "Name of the Operations resource to return.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "parentId": { - // "description": "Parent ID for this request.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "locations/global/operations/{operation}", + // "path": "{project}/global/networkEndpointGroups", + // "request": { + // "$ref": "NetworkEndpointGroup" + // }, // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.globalOrganizationOperations.list": +// method id "compute.globalNetworkEndpointGroups.list": -type GlobalOrganizationOperationsListCall struct { +type GlobalNetworkEndpointGroupsListCall struct { s *Service + project string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves a list of Operation resources contained within the -// specified organization. -func (r *GlobalOrganizationOperationsService) List() *GlobalOrganizationOperationsListCall { - c := &GlobalOrganizationOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of network endpoint groups that are located +// in the specified project. +func (r *GlobalNetworkEndpointGroupsService) List(project string) *GlobalNetworkEndpointGroupsListCall { + c := &GlobalNetworkEndpointGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project return c } @@ -58634,7 +62832,7 @@ func (r *GlobalOrganizationOperationsService) List() *GlobalOrganizationOperatio // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *GlobalOrganizationOperationsListCall) Filter(filter string) *GlobalOrganizationOperationsListCall { +func (c *GlobalNetworkEndpointGroupsListCall) Filter(filter string) *GlobalNetworkEndpointGroupsListCall { c.urlParams_.Set("filter", filter) return c } @@ -58645,7 +62843,7 @@ func (c *GlobalOrganizationOperationsListCall) Filter(filter string) *GlobalOrga // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *GlobalOrganizationOperationsListCall) MaxResults(maxResults int64) *GlobalOrganizationOperationsListCall { +func (c *GlobalNetworkEndpointGroupsListCall) MaxResults(maxResults int64) *GlobalNetworkEndpointGroupsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -58662,7 +62860,7 @@ func (c *GlobalOrganizationOperationsListCall) MaxResults(maxResults int64) *Glo // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *GlobalOrganizationOperationsListCall) OrderBy(orderBy string) *GlobalOrganizationOperationsListCall { +func (c *GlobalNetworkEndpointGroupsListCall) OrderBy(orderBy string) *GlobalNetworkEndpointGroupsListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -58670,22 +62868,15 @@ func (c *GlobalOrganizationOperationsListCall) OrderBy(orderBy string) *GlobalOr // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *GlobalOrganizationOperationsListCall) PageToken(pageToken string) *GlobalOrganizationOperationsListCall { +func (c *GlobalNetworkEndpointGroupsListCall) PageToken(pageToken string) *GlobalNetworkEndpointGroupsListCall { c.urlParams_.Set("pageToken", pageToken) return c } -// ParentId sets the optional parameter "parentId": Parent ID for this -// request. -func (c *GlobalOrganizationOperationsListCall) ParentId(parentId string) *GlobalOrganizationOperationsListCall { - c.urlParams_.Set("parentId", parentId) - return c -} - // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalOrganizationOperationsListCall) Fields(s ...googleapi.Field) *GlobalOrganizationOperationsListCall { +func (c *GlobalNetworkEndpointGroupsListCall) Fields(s ...googleapi.Field) *GlobalNetworkEndpointGroupsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -58695,7 +62886,7 @@ func (c *GlobalOrganizationOperationsListCall) Fields(s ...googleapi.Field) *Glo // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *GlobalOrganizationOperationsListCall) IfNoneMatch(entityTag string) *GlobalOrganizationOperationsListCall { +func (c *GlobalNetworkEndpointGroupsListCall) IfNoneMatch(entityTag string) *GlobalNetworkEndpointGroupsListCall { c.ifNoneMatch_ = entityTag return c } @@ -58703,21 +62894,21 @@ func (c *GlobalOrganizationOperationsListCall) IfNoneMatch(entityTag string) *Gl // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalOrganizationOperationsListCall) Context(ctx context.Context) *GlobalOrganizationOperationsListCall { +func (c *GlobalNetworkEndpointGroupsListCall) Context(ctx context.Context) *GlobalNetworkEndpointGroupsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalOrganizationOperationsListCall) Header() http.Header { +func (c *GlobalNetworkEndpointGroupsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalOrganizationOperationsListCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalNetworkEndpointGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -58729,24 +62920,27 @@ func (c *GlobalOrganizationOperationsListCall) doRequest(alt string) (*http.Resp var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/operations") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networkEndpointGroups") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalOrganizationOperations.list" call. -// Exactly one of *OperationList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *OperationList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.globalNetworkEndpointGroups.list" call. +// Exactly one of *NetworkEndpointGroupList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *NetworkEndpointGroupList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *GlobalOrganizationOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationList, error) { +func (c *GlobalNetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroupList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -58765,7 +62959,7 @@ func (c *GlobalOrganizationOperationsListCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &OperationList{ + ret := &NetworkEndpointGroupList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -58777,9 +62971,12 @@ func (c *GlobalOrganizationOperationsListCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Retrieves a list of Operation resources contained within the specified organization.", + // "description": "Retrieves the list of network endpoint groups that are located in the specified project.", // "httpMethod": "GET", - // "id": "compute.globalOrganizationOperations.list", + // "id": "compute.globalNetworkEndpointGroups.list", + // "parameterOrder": [ + // "project" + // ], // "parameters": { // "filter": { // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", @@ -58804,15 +63001,17 @@ func (c *GlobalOrganizationOperationsListCall) Do(opts ...googleapi.CallOption) // "location": "query", // "type": "string" // }, - // "parentId": { - // "description": "Parent ID for this request.", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // } // }, - // "path": "locations/global/operations", + // "path": "{project}/global/networkEndpointGroups", // "response": { - // "$ref": "OperationList" + // "$ref": "NetworkEndpointGroupList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -58826,7 +63025,7 @@ func (c *GlobalOrganizationOperationsListCall) Do(opts ...googleapi.CallOption) // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *GlobalOrganizationOperationsListCall) Pages(ctx context.Context, f func(*OperationList) error) error { +func (c *GlobalNetworkEndpointGroupsListCall) Pages(ctx context.Context, f func(*NetworkEndpointGroupList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -58844,43 +63043,93 @@ func (c *GlobalOrganizationOperationsListCall) Pages(ctx context.Context, f func } } -// method id "compute.globalOrganizationOperations.wait": +// method id "compute.globalNetworkEndpointGroups.listNetworkEndpoints": -type GlobalOrganizationOperationsWaitCall struct { - s *Service - operation string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalNetworkEndpointGroupsListNetworkEndpointsCall struct { + s *Service + project string + networkEndpointGroup string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Wait: Waits for the specified Operations resource until it is done or -// timeout, and retrieves the specified Operations resource. 1. -// Immediately returns when the operation is already done. 2. Waits for -// no more than the default deadline (2 minutes, subject to change) and -// then returns the current state of the operation, which may be DONE or -// still in progress. 3. Is best-effort: a. The server can wait less -// than the default deadline or zero seconds, in overload situations. b. -// There is no guarantee that the operation is actually done when -// returns. 4. User should be prepared to retry if the operation is not -// DONE. -func (r *GlobalOrganizationOperationsService) Wait(operation string) *GlobalOrganizationOperationsWaitCall { - c := &GlobalOrganizationOperationsWaitCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.operation = operation +// ListNetworkEndpoints: Lists the network endpoints in the specified +// network endpoint group. +func (r *GlobalNetworkEndpointGroupsService) ListNetworkEndpoints(project string, networkEndpointGroup string) *GlobalNetworkEndpointGroupsListNetworkEndpointsCall { + c := &GlobalNetworkEndpointGroupsListNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.networkEndpointGroup = networkEndpointGroup return c } -// ParentId sets the optional parameter "parentId": Parent ID for this -// request. -func (c *GlobalOrganizationOperationsWaitCall) ParentId(parentId string) *GlobalOrganizationOperationsWaitCall { - c.urlParams_.Set("parentId", parentId) +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) Filter(filter string) *GlobalNetworkEndpointGroupsListNetworkEndpointsCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) MaxResults(maxResults int64) *GlobalNetworkEndpointGroupsListNetworkEndpointsCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) OrderBy(orderBy string) *GlobalNetworkEndpointGroupsListNetworkEndpointsCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) PageToken(pageToken string) *GlobalNetworkEndpointGroupsListNetworkEndpointsCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalOrganizationOperationsWaitCall) Fields(s ...googleapi.Field) *GlobalOrganizationOperationsWaitCall { +func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) Fields(s ...googleapi.Field) *GlobalNetworkEndpointGroupsListNetworkEndpointsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -58888,21 +63137,21 @@ func (c *GlobalOrganizationOperationsWaitCall) Fields(s ...googleapi.Field) *Glo // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalOrganizationOperationsWaitCall) Context(ctx context.Context) *GlobalOrganizationOperationsWaitCall { +func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) Context(ctx context.Context) *GlobalNetworkEndpointGroupsListNetworkEndpointsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalOrganizationOperationsWaitCall) Header() http.Header { +func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalOrganizationOperationsWaitCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -58911,7 +63160,7 @@ func (c *GlobalOrganizationOperationsWaitCall) doRequest(alt string) (*http.Resp var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/operations/{operation}/wait") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -58919,19 +63168,22 @@ func (c *GlobalOrganizationOperationsWaitCall) doRequest(alt string) (*http.Resp } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "operation": c.operation, + "project": c.project, + "networkEndpointGroup": c.networkEndpointGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalOrganizationOperations.wait" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *GlobalOrganizationOperationsWaitCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.globalNetworkEndpointGroups.listNetworkEndpoints" call. +// Exactly one of *NetworkEndpointGroupsListNetworkEndpoints or error +// will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *NetworkEndpointGroupsListNetworkEndpoints.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroupsListNetworkEndpoints, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -58950,7 +63202,7 @@ func (c *GlobalOrganizationOperationsWaitCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &NetworkEndpointGroupsListNetworkEndpoints{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -58962,29 +63214,54 @@ func (c *GlobalOrganizationOperationsWaitCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Waits for the specified Operations resource until it is done or timeout, and retrieves the specified Operations resource. 1. Immediately returns when the operation is already done. 2. Waits for no more than the default deadline (2 minutes, subject to change) and then returns the current state of the operation, which may be DONE or still in progress. 3. Is best-effort: a. The server can wait less than the default deadline or zero seconds, in overload situations. b. There is no guarantee that the operation is actually done when returns. 4. User should be prepared to retry if the operation is not DONE.", + // "description": "Lists the network endpoints in the specified network endpoint group.", // "httpMethod": "POST", - // "id": "compute.globalOrganizationOperations.wait", + // "id": "compute.globalNetworkEndpointGroups.listNetworkEndpoints", // "parameterOrder": [ - // "operation" + // "project", + // "networkEndpointGroup" // ], // "parameters": { - // "operation": { - // "description": "Name of the Operations resource to return.", + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "networkEndpointGroup": { + // "description": "The name of the network endpoint group from which you want to generate a list of included network endpoints. It should comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "parentId": { - // "description": "Parent ID for this request.", + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" // } // }, - // "path": "locations/global/operations/{operation}/wait", + // "path": "{project}/global/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints", // "response": { - // "$ref": "Operation" + // "$ref": "NetworkEndpointGroupsListNetworkEndpoints" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -58995,9 +63272,30 @@ func (c *GlobalOrganizationOperationsWaitCall) Do(opts ...googleapi.CallOption) } -// method id "compute.healthChecks.aggregatedList": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) Pages(ctx context.Context, f func(*NetworkEndpointGroupsListNetworkEndpoints) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type HealthChecksAggregatedListCall struct { +// method id "compute.globalOperations.aggregatedList": + +type GlobalOperationsAggregatedListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -59006,10 +63304,10 @@ type HealthChecksAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves the list of all HealthCheck resources, -// regional and global, available to the specified project. -func (r *HealthChecksService) AggregatedList(project string) *HealthChecksAggregatedListCall { - c := &HealthChecksAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of all operations. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/aggregatedList +func (r *GlobalOperationsService) AggregatedList(project string) *GlobalOperationsAggregatedListCall { + c := &GlobalOperationsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -59036,7 +63334,7 @@ func (r *HealthChecksService) AggregatedList(project string) *HealthChecksAggreg // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *HealthChecksAggregatedListCall) Filter(filter string) *HealthChecksAggregatedListCall { +func (c *GlobalOperationsAggregatedListCall) Filter(filter string) *GlobalOperationsAggregatedListCall { c.urlParams_.Set("filter", filter) return c } @@ -59047,7 +63345,7 @@ func (c *HealthChecksAggregatedListCall) Filter(filter string) *HealthChecksAggr // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *HealthChecksAggregatedListCall) MaxResults(maxResults int64) *HealthChecksAggregatedListCall { +func (c *GlobalOperationsAggregatedListCall) MaxResults(maxResults int64) *GlobalOperationsAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -59064,7 +63362,7 @@ func (c *HealthChecksAggregatedListCall) MaxResults(maxResults int64) *HealthChe // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *HealthChecksAggregatedListCall) OrderBy(orderBy string) *HealthChecksAggregatedListCall { +func (c *GlobalOperationsAggregatedListCall) OrderBy(orderBy string) *GlobalOperationsAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -59072,7 +63370,7 @@ func (c *HealthChecksAggregatedListCall) OrderBy(orderBy string) *HealthChecksAg // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *HealthChecksAggregatedListCall) PageToken(pageToken string) *HealthChecksAggregatedListCall { +func (c *GlobalOperationsAggregatedListCall) PageToken(pageToken string) *GlobalOperationsAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -59080,7 +63378,7 @@ func (c *HealthChecksAggregatedListCall) PageToken(pageToken string) *HealthChec // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HealthChecksAggregatedListCall) Fields(s ...googleapi.Field) *HealthChecksAggregatedListCall { +func (c *GlobalOperationsAggregatedListCall) Fields(s ...googleapi.Field) *GlobalOperationsAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -59090,7 +63388,7 @@ func (c *HealthChecksAggregatedListCall) Fields(s ...googleapi.Field) *HealthChe // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *HealthChecksAggregatedListCall) IfNoneMatch(entityTag string) *HealthChecksAggregatedListCall { +func (c *GlobalOperationsAggregatedListCall) IfNoneMatch(entityTag string) *GlobalOperationsAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -59098,21 +63396,21 @@ func (c *HealthChecksAggregatedListCall) IfNoneMatch(entityTag string) *HealthCh // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HealthChecksAggregatedListCall) Context(ctx context.Context) *HealthChecksAggregatedListCall { +func (c *GlobalOperationsAggregatedListCall) Context(ctx context.Context) *GlobalOperationsAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HealthChecksAggregatedListCall) Header() http.Header { +func (c *GlobalOperationsAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HealthChecksAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalOperationsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -59124,7 +63422,7 @@ func (c *HealthChecksAggregatedListCall) doRequest(alt string) (*http.Response, var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/healthChecks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/operations") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -59137,14 +63435,14 @@ func (c *HealthChecksAggregatedListCall) doRequest(alt string) (*http.Response, return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.healthChecks.aggregatedList" call. -// Exactly one of *HealthChecksAggregatedList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *HealthChecksAggregatedList.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.globalOperations.aggregatedList" call. +// Exactly one of *OperationAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *OperationAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *HealthChecksAggregatedListCall) Do(opts ...googleapi.CallOption) (*HealthChecksAggregatedList, error) { +func (c *GlobalOperationsAggregatedListCall) Do(opts ...googleapi.CallOption) (*OperationAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -59163,7 +63461,7 @@ func (c *HealthChecksAggregatedListCall) Do(opts ...googleapi.CallOption) (*Heal if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &HealthChecksAggregatedList{ + ret := &OperationAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -59175,9 +63473,9 @@ func (c *HealthChecksAggregatedListCall) Do(opts ...googleapi.CallOption) (*Heal } return ret, nil // { - // "description": "Retrieves the list of all HealthCheck resources, regional and global, available to the specified project.", + // "description": "Retrieves an aggregated list of all operations.", // "httpMethod": "GET", - // "id": "compute.healthChecks.aggregatedList", + // "id": "compute.globalOperations.aggregatedList", // "parameterOrder": [ // "project" // ], @@ -59206,16 +63504,16 @@ func (c *HealthChecksAggregatedListCall) Do(opts ...googleapi.CallOption) (*Heal // "type": "string" // }, // "project": { - // "description": "Name of the project scoping this request.", + // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/aggregated/healthChecks", + // "path": "{project}/aggregated/operations", // "response": { - // "$ref": "HealthChecksAggregatedList" + // "$ref": "OperationAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -59229,7 +63527,7 @@ func (c *HealthChecksAggregatedListCall) Do(opts ...googleapi.CallOption) (*Heal // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *HealthChecksAggregatedListCall) Pages(ctx context.Context, f func(*HealthChecksAggregatedList) error) error { +func (c *GlobalOperationsAggregatedListCall) Pages(ctx context.Context, f func(*OperationAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -59247,48 +63545,30 @@ func (c *HealthChecksAggregatedListCall) Pages(ctx context.Context, f func(*Heal } } -// method id "compute.healthChecks.delete": +// method id "compute.globalOperations.delete": -type HealthChecksDeleteCall struct { - s *Service - project string - healthCheck string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalOperationsDeleteCall struct { + s *Service + project string + operation string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified HealthCheck resource. -func (r *HealthChecksService) Delete(project string, healthCheck string) *HealthChecksDeleteCall { - c := &HealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified Operations resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/delete +func (r *GlobalOperationsService) Delete(project string, operation string) *GlobalOperationsDeleteCall { + c := &GlobalOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.healthCheck = healthCheck - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *HealthChecksDeleteCall) RequestId(requestId string) *HealthChecksDeleteCall { - c.urlParams_.Set("requestId", requestId) + c.operation = operation return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HealthChecksDeleteCall) Fields(s ...googleapi.Field) *HealthChecksDeleteCall { +func (c *GlobalOperationsDeleteCall) Fields(s ...googleapi.Field) *GlobalOperationsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -59296,21 +63576,21 @@ func (c *HealthChecksDeleteCall) Fields(s ...googleapi.Field) *HealthChecksDelet // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HealthChecksDeleteCall) Context(ctx context.Context) *HealthChecksDeleteCall { +func (c *GlobalOperationsDeleteCall) Context(ctx context.Context) *GlobalOperationsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HealthChecksDeleteCall) Header() http.Header { +func (c *GlobalOperationsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -59319,7 +63599,7 @@ func (c *HealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations/{operation}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { @@ -59327,62 +63607,37 @@ func (c *HealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "healthCheck": c.healthCheck, + "project": c.project, + "operation": c.operation, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.healthChecks.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *HealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.globalOperations.delete" call. +func (c *GlobalOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } if err != nil { - return nil, err + return err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err + return err } - return ret, nil + return nil // { - // "description": "Deletes the specified HealthCheck resource.", + // "description": "Deletes the specified Operations resource.", // "httpMethod": "DELETE", - // "id": "compute.healthChecks.delete", + // "id": "compute.globalOperations.delete", // "parameterOrder": [ // "project", - // "healthCheck" + // "operation" // ], // "parameters": { - // "healthCheck": { - // "description": "Name of the HealthCheck resource to delete.", + // "operation": { + // "description": "Name of the Operations resource to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -59392,17 +63647,9 @@ func (c *HealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, e // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/global/healthChecks/{healthCheck}", - // "response": { - // "$ref": "Operation" - // }, + // "path": "{project}/global/operations/{operation}", // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/compute" @@ -59411,31 +63658,32 @@ func (c *HealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, e } -// method id "compute.healthChecks.get": +// method id "compute.globalOperations.get": -type HealthChecksGetCall struct { +type GlobalOperationsGetCall struct { s *Service project string - healthCheck string + operation string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Returns the specified HealthCheck resource. Gets a list of -// available health checks by making a list() request. -func (r *HealthChecksService) Get(project string, healthCheck string) *HealthChecksGetCall { - c := &HealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Retrieves the specified Operations resource. Gets a list of +// operations by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/get +func (r *GlobalOperationsService) Get(project string, operation string) *GlobalOperationsGetCall { + c := &GlobalOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.healthCheck = healthCheck + c.operation = operation return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HealthChecksGetCall) Fields(s ...googleapi.Field) *HealthChecksGetCall { +func (c *GlobalOperationsGetCall) Fields(s ...googleapi.Field) *GlobalOperationsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -59445,7 +63693,7 @@ func (c *HealthChecksGetCall) Fields(s ...googleapi.Field) *HealthChecksGetCall // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *HealthChecksGetCall) IfNoneMatch(entityTag string) *HealthChecksGetCall { +func (c *GlobalOperationsGetCall) IfNoneMatch(entityTag string) *GlobalOperationsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -59453,21 +63701,21 @@ func (c *HealthChecksGetCall) IfNoneMatch(entityTag string) *HealthChecksGetCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HealthChecksGetCall) Context(ctx context.Context) *HealthChecksGetCall { +func (c *GlobalOperationsGetCall) Context(ctx context.Context) *GlobalOperationsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HealthChecksGetCall) Header() http.Header { +func (c *GlobalOperationsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HealthChecksGetCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -59479,7 +63727,7 @@ func (c *HealthChecksGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations/{operation}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -59487,20 +63735,20 @@ func (c *HealthChecksGetCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "healthCheck": c.healthCheck, + "project": c.project, + "operation": c.operation, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.healthChecks.get" call. -// Exactly one of *HealthCheck or error will be non-nil. Any non-2xx +// Do executes the "compute.globalOperations.get" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *HealthCheck.ServerResponse.Header or (if a response was returned at +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, error) { +func (c *GlobalOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -59519,7 +63767,7 @@ func (c *HealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, er if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &HealthCheck{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -59531,18 +63779,18 @@ func (c *HealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, er } return ret, nil // { - // "description": "Returns the specified HealthCheck resource. Gets a list of available health checks by making a list() request.", + // "description": "Retrieves the specified Operations resource. Gets a list of operations by making a list() request.", // "httpMethod": "GET", - // "id": "compute.healthChecks.get", + // "id": "compute.globalOperations.get", // "parameterOrder": [ // "project", - // "healthCheck" + // "operation" // ], // "parameters": { - // "healthCheck": { - // "description": "Name of the HealthCheck resource to return.", + // "operation": { + // "description": "Name of the Operations resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -59554,9 +63802,9 @@ func (c *HealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, er // "type": "string" // } // }, - // "path": "{project}/global/healthChecks/{healthCheck}", + // "path": "{project}/global/operations/{operation}", // "response": { - // "$ref": "HealthCheck" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -59567,185 +63815,22 @@ func (c *HealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, er } -// method id "compute.healthChecks.insert": +// method id "compute.globalOperations.list": -type HealthChecksInsertCall struct { - s *Service - project string - healthcheck *HealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalOperationsListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Insert: Creates a HealthCheck resource in the specified project using -// the data included in the request. -func (r *HealthChecksService) Insert(project string, healthcheck *HealthCheck) *HealthChecksInsertCall { - c := &HealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.healthcheck = healthcheck - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *HealthChecksInsertCall) RequestId(requestId string) *HealthChecksInsertCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *HealthChecksInsertCall) Fields(s ...googleapi.Field) *HealthChecksInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *HealthChecksInsertCall) Context(ctx context.Context) *HealthChecksInsertCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *HealthChecksInsertCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *HealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.healthChecks.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *HealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates a HealthCheck resource in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.healthChecks.insert", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "{project}/global/healthChecks", - // "request": { - // "$ref": "HealthCheck" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.healthChecks.list": - -type HealthChecksListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves the list of HealthCheck resources available to the -// specified project. -func (r *HealthChecksService) List(project string) *HealthChecksListCall { - c := &HealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of Operation resources contained within the +// specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/list +func (r *GlobalOperationsService) List(project string) *GlobalOperationsListCall { + c := &GlobalOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -59772,7 +63857,7 @@ func (r *HealthChecksService) List(project string) *HealthChecksListCall { // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *HealthChecksListCall) Filter(filter string) *HealthChecksListCall { +func (c *GlobalOperationsListCall) Filter(filter string) *GlobalOperationsListCall { c.urlParams_.Set("filter", filter) return c } @@ -59783,7 +63868,7 @@ func (c *HealthChecksListCall) Filter(filter string) *HealthChecksListCall { // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *HealthChecksListCall) MaxResults(maxResults int64) *HealthChecksListCall { +func (c *GlobalOperationsListCall) MaxResults(maxResults int64) *GlobalOperationsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -59800,7 +63885,7 @@ func (c *HealthChecksListCall) MaxResults(maxResults int64) *HealthChecksListCal // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *HealthChecksListCall) OrderBy(orderBy string) *HealthChecksListCall { +func (c *GlobalOperationsListCall) OrderBy(orderBy string) *GlobalOperationsListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -59808,7 +63893,7 @@ func (c *HealthChecksListCall) OrderBy(orderBy string) *HealthChecksListCall { // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *HealthChecksListCall) PageToken(pageToken string) *HealthChecksListCall { +func (c *GlobalOperationsListCall) PageToken(pageToken string) *GlobalOperationsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -59816,7 +63901,7 @@ func (c *HealthChecksListCall) PageToken(pageToken string) *HealthChecksListCall // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HealthChecksListCall) Fields(s ...googleapi.Field) *HealthChecksListCall { +func (c *GlobalOperationsListCall) Fields(s ...googleapi.Field) *GlobalOperationsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -59826,7 +63911,7 @@ func (c *HealthChecksListCall) Fields(s ...googleapi.Field) *HealthChecksListCal // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *HealthChecksListCall) IfNoneMatch(entityTag string) *HealthChecksListCall { +func (c *GlobalOperationsListCall) IfNoneMatch(entityTag string) *GlobalOperationsListCall { c.ifNoneMatch_ = entityTag return c } @@ -59834,21 +63919,21 @@ func (c *HealthChecksListCall) IfNoneMatch(entityTag string) *HealthChecksListCa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HealthChecksListCall) Context(ctx context.Context) *HealthChecksListCall { +func (c *GlobalOperationsListCall) Context(ctx context.Context) *GlobalOperationsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HealthChecksListCall) Header() http.Header { +func (c *GlobalOperationsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HealthChecksListCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -59860,7 +63945,7 @@ func (c *HealthChecksListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -59873,14 +63958,14 @@ func (c *HealthChecksListCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.healthChecks.list" call. -// Exactly one of *HealthCheckList or error will be non-nil. Any non-2xx +// Do executes the "compute.globalOperations.list" call. +// Exactly one of *OperationList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *HealthCheckList.ServerResponse.Header or (if a response was returned +// *OperationList.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckList, error) { +func (c *GlobalOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -59899,7 +63984,7 @@ func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckLis if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &HealthCheckList{ + ret := &OperationList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -59911,9 +63996,9 @@ func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckLis } return ret, nil // { - // "description": "Retrieves the list of HealthCheck resources available to the specified project.", + // "description": "Retrieves a list of Operation resources contained within the specified project.", // "httpMethod": "GET", - // "id": "compute.healthChecks.list", + // "id": "compute.globalOperations.list", // "parameterOrder": [ // "project" // ], @@ -59949,9 +64034,9 @@ func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckLis // "type": "string" // } // }, - // "path": "{project}/global/healthChecks", + // "path": "{project}/global/operations", // "response": { - // "$ref": "HealthCheckList" + // "$ref": "OperationList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -59965,7 +64050,7 @@ func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckLis // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *HealthChecksListCall) Pages(ctx context.Context, f func(*HealthCheckList) error) error { +func (c *GlobalOperationsListCall) Pages(ctx context.Context, f func(*OperationList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -59983,52 +64068,38 @@ func (c *HealthChecksListCall) Pages(ctx context.Context, f func(*HealthCheckLis } } -// method id "compute.healthChecks.patch": +// method id "compute.globalOperations.wait": -type HealthChecksPatchCall struct { - s *Service - project string - healthCheck string - healthcheck *HealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalOperationsWaitCall struct { + s *Service + project string + operation string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates a HealthCheck resource in the specified project using -// the data included in the request. This method supports PATCH -// semantics and uses the JSON merge patch format and processing rules. -func (r *HealthChecksService) Patch(project string, healthCheck string, healthcheck *HealthCheck) *HealthChecksPatchCall { - c := &HealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Wait: Waits for the specified Operations resource until it is done or +// timeout, and retrieves the specified Operations resource. 1. +// Immediately returns when the operation is already done. 2. Waits for +// no more than the default deadline (2 minutes, subject to change) and +// then returns the current state of the operation, which may be DONE or +// still in progress. 3. Is best-effort: a. The server can wait less +// than the default deadline or zero seconds, in overload situations. b. +// There is no guarantee that the operation is actually done when +// returns. 4. User should be prepared to retry if the operation is not +// DONE. +func (r *GlobalOperationsService) Wait(project string, operation string) *GlobalOperationsWaitCall { + c := &GlobalOperationsWaitCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.healthCheck = healthCheck - c.healthcheck = healthcheck - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *HealthChecksPatchCall) RequestId(requestId string) *HealthChecksPatchCall { - c.urlParams_.Set("requestId", requestId) + c.operation = operation return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HealthChecksPatchCall) Fields(s ...googleapi.Field) *HealthChecksPatchCall { +func (c *GlobalOperationsWaitCall) Fields(s ...googleapi.Field) *GlobalOperationsWaitCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -60036,56 +64107,51 @@ func (c *HealthChecksPatchCall) Fields(s ...googleapi.Field) *HealthChecksPatchC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HealthChecksPatchCall) Context(ctx context.Context) *HealthChecksPatchCall { +func (c *GlobalOperationsWaitCall) Context(ctx context.Context) *GlobalOperationsWaitCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HealthChecksPatchCall) Header() http.Header { +func (c *GlobalOperationsWaitCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalOperationsWaitCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations/{operation}/wait") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "healthCheck": c.healthCheck, + "project": c.project, + "operation": c.operation, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.healthChecks.patch" call. +// Do executes the "compute.globalOperations.wait" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *GlobalOperationsWaitCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -60116,18 +64182,18 @@ func (c *HealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Updates a HealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.healthChecks.patch", + // "description": "Waits for the specified Operations resource until it is done or timeout, and retrieves the specified Operations resource. 1. Immediately returns when the operation is already done. 2. Waits for no more than the default deadline (2 minutes, subject to change) and then returns the current state of the operation, which may be DONE or still in progress. 3. Is best-effort: a. The server can wait less than the default deadline or zero seconds, in overload situations. b. There is no guarantee that the operation is actually done when returns. 4. User should be prepared to retry if the operation is not DONE.", + // "httpMethod": "POST", + // "id": "compute.globalOperations.wait", // "parameterOrder": [ // "project", - // "healthCheck" + // "operation" // ], // "parameters": { - // "healthCheck": { - // "description": "Name of the HealthCheck resource to patch.", + // "operation": { + // "description": "Name of the Operations resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -60137,54 +64203,49 @@ func (c *HealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, er // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/global/healthChecks/{healthCheck}", - // "request": { - // "$ref": "HealthCheck" - // }, + // "path": "{project}/global/operations/{operation}/wait", // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.healthChecks.testIamPermissions": +// method id "compute.globalOrganizationOperations.delete": -type HealthChecksTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalOrganizationOperationsDeleteCall struct { + s *Service + operation string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *HealthChecksService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *HealthChecksTestIamPermissionsCall { - c := &HealthChecksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest +// Delete: Deletes the specified Operations resource. +func (r *GlobalOrganizationOperationsService) Delete(operation string) *GlobalOrganizationOperationsDeleteCall { + c := &GlobalOrganizationOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.operation = operation + return c +} + +// ParentId sets the optional parameter "parentId": Parent ID for this +// request. +func (c *GlobalOrganizationOperationsDeleteCall) ParentId(parentId string) *GlobalOrganizationOperationsDeleteCall { + c.urlParams_.Set("parentId", parentId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HealthChecksTestIamPermissionsCall) Fields(s ...googleapi.Field) *HealthChecksTestIamPermissionsCall { +func (c *GlobalOrganizationOperationsDeleteCall) Fields(s ...googleapi.Field) *GlobalOrganizationOperationsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -60192,227 +64253,178 @@ func (c *HealthChecksTestIamPermissionsCall) Fields(s ...googleapi.Field) *Healt // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HealthChecksTestIamPermissionsCall) Context(ctx context.Context) *HealthChecksTestIamPermissionsCall { +func (c *GlobalOrganizationOperationsDeleteCall) Context(ctx context.Context) *GlobalOrganizationOperationsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HealthChecksTestIamPermissionsCall) Header() http.Header { +func (c *GlobalOrganizationOperationsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HealthChecksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalOrganizationOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/operations/{operation}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "operation": c.operation, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.healthChecks.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *HealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.globalOrganizationOperations.delete" call. +func (c *GlobalOrganizationOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } if err != nil { - return nil, err + return err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err + return err } - return ret, nil + return nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.healthChecks.testIamPermissions", + // "description": "Deletes the specified Operations resource.", + // "httpMethod": "DELETE", + // "id": "compute.globalOrganizationOperations.delete", // "parameterOrder": [ - // "project", - // "resource" + // "operation" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "operation": { + // "description": "Name of the Operations resource to delete.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "parentId": { + // "description": "Parent ID for this request.", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/global/healthChecks/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, - // "response": { - // "$ref": "TestPermissionsResponse" - // }, + // "path": "locations/global/operations/{operation}", // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.healthChecks.update": +// method id "compute.globalOrganizationOperations.get": -type HealthChecksUpdateCall struct { - s *Service - project string - healthCheck string - healthcheck *HealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalOrganizationOperationsGetCall struct { + s *Service + operation string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Update: Updates a HealthCheck resource in the specified project using -// the data included in the request. -func (r *HealthChecksService) Update(project string, healthCheck string, healthcheck *HealthCheck) *HealthChecksUpdateCall { - c := &HealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.healthCheck = healthCheck - c.healthcheck = healthcheck +// Get: Retrieves the specified Operations resource. Gets a list of +// operations by making a list() request. +func (r *GlobalOrganizationOperationsService) Get(operation string) *GlobalOrganizationOperationsGetCall { + c := &GlobalOrganizationOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.operation = operation return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *HealthChecksUpdateCall) RequestId(requestId string) *HealthChecksUpdateCall { - c.urlParams_.Set("requestId", requestId) +// ParentId sets the optional parameter "parentId": Parent ID for this +// request. +func (c *GlobalOrganizationOperationsGetCall) ParentId(parentId string) *GlobalOrganizationOperationsGetCall { + c.urlParams_.Set("parentId", parentId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HealthChecksUpdateCall) Fields(s ...googleapi.Field) *HealthChecksUpdateCall { +func (c *GlobalOrganizationOperationsGetCall) Fields(s ...googleapi.Field) *GlobalOrganizationOperationsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *GlobalOrganizationOperationsGetCall) IfNoneMatch(entityTag string) *GlobalOrganizationOperationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HealthChecksUpdateCall) Context(ctx context.Context) *HealthChecksUpdateCall { +func (c *GlobalOrganizationOperationsGetCall) Context(ctx context.Context) *GlobalOrganizationOperationsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HealthChecksUpdateCall) Header() http.Header { +func (c *GlobalOrganizationOperationsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalOrganizationOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/operations/{operation}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "healthCheck": c.healthCheck, + "operation": c.operation, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.healthChecks.update" call. +// Do executes the "compute.globalOrganizationOperations.get" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *GlobalOrganizationOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -60443,240 +64455,130 @@ func (c *HealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Updates a HealthCheck resource in the specified project using the data included in the request.", - // "httpMethod": "PUT", - // "id": "compute.healthChecks.update", + // "description": "Retrieves the specified Operations resource. Gets a list of operations by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.globalOrganizationOperations.get", // "parameterOrder": [ - // "project", - // "healthCheck" + // "operation" // ], // "parameters": { - // "healthCheck": { - // "description": "Name of the HealthCheck resource to update.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", + // "operation": { + // "description": "Name of the Operations resource to return.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "parentId": { + // "description": "Parent ID for this request.", // "location": "query", // "type": "string" // } // }, - // "path": "{project}/global/healthChecks/{healthCheck}", - // "request": { - // "$ref": "HealthCheck" - // }, + // "path": "locations/global/operations/{operation}", // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.httpHealthChecks.delete": +// method id "compute.globalOrganizationOperations.list": -type HttpHealthChecksDeleteCall struct { - s *Service - project string - httpHealthCheck string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalOrganizationOperationsListCall struct { + s *Service + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified HttpHealthCheck resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/delete -func (r *HttpHealthChecksService) Delete(project string, httpHealthCheck string) *HttpHealthChecksDeleteCall { - c := &HttpHealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.httpHealthCheck = httpHealthCheck +// List: Retrieves a list of Operation resources contained within the +// specified organization. +func (r *GlobalOrganizationOperationsService) List() *GlobalOrganizationOperationsListCall { + c := &GlobalOrganizationOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *HttpHealthChecksDeleteCall) RequestId(requestId string) *HttpHealthChecksDeleteCall { - c.urlParams_.Set("requestId", requestId) +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *GlobalOrganizationOperationsListCall) Filter(filter string) *GlobalOrganizationOperationsListCall { + c.urlParams_.Set("filter", filter) return c } -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *HttpHealthChecksDeleteCall) Fields(s ...googleapi.Field) *HttpHealthChecksDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *GlobalOrganizationOperationsListCall) MaxResults(maxResults int64) *GlobalOrganizationOperationsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *HttpHealthChecksDeleteCall) Context(ctx context.Context) *HttpHealthChecksDeleteCall { - c.ctx_ = ctx +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *GlobalOrganizationOperationsListCall) OrderBy(orderBy string) *GlobalOrganizationOperationsListCall { + c.urlParams_.Set("orderBy", orderBy) return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *HttpHealthChecksDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *HttpHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpHealthCheck": c.httpHealthCheck, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.httpHealthChecks.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *HttpHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes the specified HttpHealthCheck resource.", - // "httpMethod": "DELETE", - // "id": "compute.httpHealthChecks.delete", - // "parameterOrder": [ - // "project", - // "httpHealthCheck" - // ], - // "parameters": { - // "httpHealthCheck": { - // "description": "Name of the HttpHealthCheck resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.httpHealthChecks.get": - -type HttpHealthChecksGetCall struct { - s *Service - project string - httpHealthCheck string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *GlobalOrganizationOperationsListCall) PageToken(pageToken string) *GlobalOrganizationOperationsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c } -// Get: Returns the specified HttpHealthCheck resource. Gets a list of -// available HTTP health checks by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/get -func (r *HttpHealthChecksService) Get(project string, httpHealthCheck string) *HttpHealthChecksGetCall { - c := &HttpHealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.httpHealthCheck = httpHealthCheck +// ParentId sets the optional parameter "parentId": Parent ID for this +// request. +func (c *GlobalOrganizationOperationsListCall) ParentId(parentId string) *GlobalOrganizationOperationsListCall { + c.urlParams_.Set("parentId", parentId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpHealthChecksGetCall) Fields(s ...googleapi.Field) *HttpHealthChecksGetCall { +func (c *GlobalOrganizationOperationsListCall) Fields(s ...googleapi.Field) *GlobalOrganizationOperationsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -60686,7 +64588,7 @@ func (c *HttpHealthChecksGetCall) Fields(s ...googleapi.Field) *HttpHealthChecks // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *HttpHealthChecksGetCall) IfNoneMatch(entityTag string) *HttpHealthChecksGetCall { +func (c *GlobalOrganizationOperationsListCall) IfNoneMatch(entityTag string) *GlobalOrganizationOperationsListCall { c.ifNoneMatch_ = entityTag return c } @@ -60694,21 +64596,21 @@ func (c *HttpHealthChecksGetCall) IfNoneMatch(entityTag string) *HttpHealthCheck // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpHealthChecksGetCall) Context(ctx context.Context) *HttpHealthChecksGetCall { +func (c *GlobalOrganizationOperationsListCall) Context(ctx context.Context) *GlobalOrganizationOperationsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpHealthChecksGetCall) Header() http.Header { +func (c *GlobalOrganizationOperationsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalOrganizationOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -60720,28 +64622,24 @@ func (c *HttpHealthChecksGetCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/operations") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpHealthCheck": c.httpHealthCheck, - }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpHealthChecks.get" call. -// Exactly one of *HttpHealthCheck or error will be non-nil. Any non-2xx +// Do executes the "compute.globalOrganizationOperations.list" call. +// Exactly one of *OperationList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *HttpHealthCheck.ServerResponse.Header or (if a response was returned +// *OperationList.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *HttpHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpHealthCheck, error) { +func (c *GlobalOrganizationOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -60760,7 +64658,7 @@ func (c *HttpHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpHealthC if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &HttpHealthCheck{ + ret := &OperationList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -60772,32 +64670,42 @@ func (c *HttpHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpHealthC } return ret, nil // { - // "description": "Returns the specified HttpHealthCheck resource. Gets a list of available HTTP health checks by making a list() request.", + // "description": "Retrieves a list of Operation resources contained within the specified organization.", // "httpMethod": "GET", - // "id": "compute.httpHealthChecks.get", - // "parameterOrder": [ - // "project", - // "httpHealthCheck" - // ], + // "id": "compute.globalOrganizationOperations.list", // "parameters": { - // "httpHealthCheck": { - // "description": "Name of the HttpHealthCheck resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "parentId": { + // "description": "Parent ID for this request.", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + // "path": "locations/global/operations", // "response": { - // "$ref": "HttpHealthCheck" + // "$ref": "OperationList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -60808,50 +64716,64 @@ func (c *HttpHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpHealthC } -// method id "compute.httpHealthChecks.insert": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *GlobalOrganizationOperationsListCall) Pages(ctx context.Context, f func(*OperationList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type HttpHealthChecksInsertCall struct { - s *Service - project string - httphealthcheck *HttpHealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.globalOrganizationOperations.wait": + +type GlobalOrganizationOperationsWaitCall struct { + s *Service + operation string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a HttpHealthCheck resource in the specified project -// using the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/insert -func (r *HttpHealthChecksService) Insert(project string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksInsertCall { - c := &HttpHealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.httphealthcheck = httphealthcheck +// Wait: Waits for the specified Operations resource until it is done or +// timeout, and retrieves the specified Operations resource. 1. +// Immediately returns when the operation is already done. 2. Waits for +// no more than the default deadline (2 minutes, subject to change) and +// then returns the current state of the operation, which may be DONE or +// still in progress. 3. Is best-effort: a. The server can wait less +// than the default deadline or zero seconds, in overload situations. b. +// There is no guarantee that the operation is actually done when +// returns. 4. User should be prepared to retry if the operation is not +// DONE. +func (r *GlobalOrganizationOperationsService) Wait(operation string) *GlobalOrganizationOperationsWaitCall { + c := &GlobalOrganizationOperationsWaitCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.operation = operation return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *HttpHealthChecksInsertCall) RequestId(requestId string) *HttpHealthChecksInsertCall { - c.urlParams_.Set("requestId", requestId) +// ParentId sets the optional parameter "parentId": Parent ID for this +// request. +func (c *GlobalOrganizationOperationsWaitCall) ParentId(parentId string) *GlobalOrganizationOperationsWaitCall { + c.urlParams_.Set("parentId", parentId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpHealthChecksInsertCall) Fields(s ...googleapi.Field) *HttpHealthChecksInsertCall { +func (c *GlobalOrganizationOperationsWaitCall) Fields(s ...googleapi.Field) *GlobalOrganizationOperationsWaitCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -60859,35 +64781,30 @@ func (c *HttpHealthChecksInsertCall) Fields(s ...googleapi.Field) *HttpHealthChe // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpHealthChecksInsertCall) Context(ctx context.Context) *HttpHealthChecksInsertCall { +func (c *GlobalOrganizationOperationsWaitCall) Context(ctx context.Context) *GlobalOrganizationOperationsWaitCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpHealthChecksInsertCall) Header() http.Header { +func (c *GlobalOrganizationOperationsWaitCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalOrganizationOperationsWaitCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks") + urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/operations/{operation}/wait") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -60895,19 +64812,19 @@ func (c *HttpHealthChecksInsertCall) doRequest(alt string) (*http.Response, erro } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "operation": c.operation, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpHealthChecks.insert" call. +// Do executes the "compute.globalOrganizationOperations.wait" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HttpHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *GlobalOrganizationOperationsWaitCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -60938,44 +64855,42 @@ func (c *HttpHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Creates a HttpHealthCheck resource in the specified project using the data included in the request.", + // "description": "Waits for the specified Operations resource until it is done or timeout, and retrieves the specified Operations resource. 1. Immediately returns when the operation is already done. 2. Waits for no more than the default deadline (2 minutes, subject to change) and then returns the current state of the operation, which may be DONE or still in progress. 3. Is best-effort: a. The server can wait less than the default deadline or zero seconds, in overload situations. b. There is no guarantee that the operation is actually done when returns. 4. User should be prepared to retry if the operation is not DONE.", // "httpMethod": "POST", - // "id": "compute.httpHealthChecks.insert", + // "id": "compute.globalOrganizationOperations.wait", // "parameterOrder": [ - // "project" + // "operation" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "operation": { + // "description": "Name of the Operations resource to return.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "parentId": { + // "description": "Parent ID for this request.", // "location": "query", // "type": "string" // } // }, - // "path": "{project}/global/httpHealthChecks", - // "request": { - // "$ref": "HttpHealthCheck" - // }, + // "path": "locations/global/operations/{operation}/wait", // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.httpHealthChecks.list": +// method id "compute.healthChecks.aggregatedList": -type HttpHealthChecksListCall struct { +type HealthChecksAggregatedListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -60984,11 +64899,10 @@ type HttpHealthChecksListCall struct { header_ http.Header } -// List: Retrieves the list of HttpHealthCheck resources available to -// the specified project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/list -func (r *HttpHealthChecksService) List(project string) *HttpHealthChecksListCall { - c := &HttpHealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves the list of all HealthCheck resources, +// regional and global, available to the specified project. +func (r *HealthChecksService) AggregatedList(project string) *HealthChecksAggregatedListCall { + c := &HealthChecksAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -61015,7 +64929,7 @@ func (r *HttpHealthChecksService) List(project string) *HttpHealthChecksListCall // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *HttpHealthChecksListCall) Filter(filter string) *HttpHealthChecksListCall { +func (c *HealthChecksAggregatedListCall) Filter(filter string) *HealthChecksAggregatedListCall { c.urlParams_.Set("filter", filter) return c } @@ -61026,7 +64940,7 @@ func (c *HttpHealthChecksListCall) Filter(filter string) *HttpHealthChecksListCa // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *HttpHealthChecksListCall) MaxResults(maxResults int64) *HttpHealthChecksListCall { +func (c *HealthChecksAggregatedListCall) MaxResults(maxResults int64) *HealthChecksAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -61043,7 +64957,7 @@ func (c *HttpHealthChecksListCall) MaxResults(maxResults int64) *HttpHealthCheck // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *HttpHealthChecksListCall) OrderBy(orderBy string) *HttpHealthChecksListCall { +func (c *HealthChecksAggregatedListCall) OrderBy(orderBy string) *HealthChecksAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -61051,7 +64965,7 @@ func (c *HttpHealthChecksListCall) OrderBy(orderBy string) *HttpHealthChecksList // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *HttpHealthChecksListCall) PageToken(pageToken string) *HttpHealthChecksListCall { +func (c *HealthChecksAggregatedListCall) PageToken(pageToken string) *HealthChecksAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -61059,7 +64973,7 @@ func (c *HttpHealthChecksListCall) PageToken(pageToken string) *HttpHealthChecks // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpHealthChecksListCall) Fields(s ...googleapi.Field) *HttpHealthChecksListCall { +func (c *HealthChecksAggregatedListCall) Fields(s ...googleapi.Field) *HealthChecksAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -61069,7 +64983,7 @@ func (c *HttpHealthChecksListCall) Fields(s ...googleapi.Field) *HttpHealthCheck // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *HttpHealthChecksListCall) IfNoneMatch(entityTag string) *HttpHealthChecksListCall { +func (c *HealthChecksAggregatedListCall) IfNoneMatch(entityTag string) *HealthChecksAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -61077,21 +64991,21 @@ func (c *HttpHealthChecksListCall) IfNoneMatch(entityTag string) *HttpHealthChec // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpHealthChecksListCall) Context(ctx context.Context) *HttpHealthChecksListCall { +func (c *HealthChecksAggregatedListCall) Context(ctx context.Context) *HealthChecksAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpHealthChecksListCall) Header() http.Header { +func (c *HealthChecksAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpHealthChecksListCall) doRequest(alt string) (*http.Response, error) { +func (c *HealthChecksAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -61103,7 +65017,7 @@ func (c *HttpHealthChecksListCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/healthChecks") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -61116,14 +65030,14 @@ func (c *HttpHealthChecksListCall) doRequest(alt string) (*http.Response, error) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpHealthChecks.list" call. -// Exactly one of *HttpHealthCheckList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *HttpHealthCheckList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.healthChecks.aggregatedList" call. +// Exactly one of *HealthChecksAggregatedList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *HealthChecksAggregatedList.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealthCheckList, error) { +func (c *HealthChecksAggregatedListCall) Do(opts ...googleapi.CallOption) (*HealthChecksAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -61142,7 +65056,7 @@ func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealth if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &HttpHealthCheckList{ + ret := &HealthChecksAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -61154,9 +65068,9 @@ func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealth } return ret, nil // { - // "description": "Retrieves the list of HttpHealthCheck resources available to the specified project.", + // "description": "Retrieves the list of all HealthCheck resources, regional and global, available to the specified project.", // "httpMethod": "GET", - // "id": "compute.httpHealthChecks.list", + // "id": "compute.healthChecks.aggregatedList", // "parameterOrder": [ // "project" // ], @@ -61185,16 +65099,16 @@ func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealth // "type": "string" // }, // "project": { - // "description": "Project ID for this request.", + // "description": "Name of the project scoping this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/httpHealthChecks", + // "path": "{project}/aggregated/healthChecks", // "response": { - // "$ref": "HttpHealthCheckList" + // "$ref": "HealthChecksAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -61208,7 +65122,7 @@ func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealth // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *HttpHealthChecksListCall) Pages(ctx context.Context, f func(*HttpHealthCheckList) error) error { +func (c *HealthChecksAggregatedListCall) Pages(ctx context.Context, f func(*HealthChecksAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -61226,527 +65140,22 @@ func (c *HttpHealthChecksListCall) Pages(ctx context.Context, f func(*HttpHealth } } -// method id "compute.httpHealthChecks.patch": - -type HttpHealthChecksPatchCall struct { - s *Service - project string - httpHealthCheck string - httphealthcheck *HttpHealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Patch: Updates a HttpHealthCheck resource in the specified project -// using the data included in the request. This method supports PATCH -// semantics and uses the JSON merge patch format and processing rules. -// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/patch -func (r *HttpHealthChecksService) Patch(project string, httpHealthCheck string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksPatchCall { - c := &HttpHealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.httpHealthCheck = httpHealthCheck - c.httphealthcheck = httphealthcheck - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *HttpHealthChecksPatchCall) RequestId(requestId string) *HttpHealthChecksPatchCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *HttpHealthChecksPatchCall) Fields(s ...googleapi.Field) *HttpHealthChecksPatchCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *HttpHealthChecksPatchCall) Context(ctx context.Context) *HttpHealthChecksPatchCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *HttpHealthChecksPatchCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *HttpHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpHealthCheck": c.httpHealthCheck, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.httpHealthChecks.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *HttpHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.httpHealthChecks.patch", - // "parameterOrder": [ - // "project", - // "httpHealthCheck" - // ], - // "parameters": { - // "httpHealthCheck": { - // "description": "Name of the HttpHealthCheck resource to patch.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", - // "request": { - // "$ref": "HttpHealthCheck" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.httpHealthChecks.testIamPermissions": - -type HttpHealthChecksTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *HttpHealthChecksService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *HttpHealthChecksTestIamPermissionsCall { - c := &HttpHealthChecksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *HttpHealthChecksTestIamPermissionsCall) Fields(s ...googleapi.Field) *HttpHealthChecksTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *HttpHealthChecksTestIamPermissionsCall) Context(ctx context.Context) *HttpHealthChecksTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *HttpHealthChecksTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *HttpHealthChecksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{resource}/testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.httpHealthChecks.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *HttpHealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.httpHealthChecks.testIamPermissions", - // "parameterOrder": [ - // "project", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/httpHealthChecks/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, - // "response": { - // "$ref": "TestPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.httpHealthChecks.update": - -type HttpHealthChecksUpdateCall struct { - s *Service - project string - httpHealthCheck string - httphealthcheck *HttpHealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Update: Updates a HttpHealthCheck resource in the specified project -// using the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/update -func (r *HttpHealthChecksService) Update(project string, httpHealthCheck string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksUpdateCall { - c := &HttpHealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.httpHealthCheck = httpHealthCheck - c.httphealthcheck = httphealthcheck - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *HttpHealthChecksUpdateCall) RequestId(requestId string) *HttpHealthChecksUpdateCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *HttpHealthChecksUpdateCall) Fields(s ...googleapi.Field) *HttpHealthChecksUpdateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *HttpHealthChecksUpdateCall) Context(ctx context.Context) *HttpHealthChecksUpdateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *HttpHealthChecksUpdateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *HttpHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpHealthCheck": c.httpHealthCheck, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.httpHealthChecks.update" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *HttpHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request.", - // "httpMethod": "PUT", - // "id": "compute.httpHealthChecks.update", - // "parameterOrder": [ - // "project", - // "httpHealthCheck" - // ], - // "parameters": { - // "httpHealthCheck": { - // "description": "Name of the HttpHealthCheck resource to update.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", - // "request": { - // "$ref": "HttpHealthCheck" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.httpsHealthChecks.delete": +// method id "compute.healthChecks.delete": -type HttpsHealthChecksDeleteCall struct { - s *Service - project string - httpsHealthCheck string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HealthChecksDeleteCall struct { + s *Service + project string + healthCheck string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified HttpsHealthCheck resource. -func (r *HttpsHealthChecksService) Delete(project string, httpsHealthCheck string) *HttpsHealthChecksDeleteCall { - c := &HttpsHealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified HealthCheck resource. +func (r *HealthChecksService) Delete(project string, healthCheck string) *HealthChecksDeleteCall { + c := &HealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpsHealthCheck = httpsHealthCheck + c.healthCheck = healthCheck return c } @@ -61764,7 +65173,7 @@ func (r *HttpsHealthChecksService) Delete(project string, httpsHealthCheck strin // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *HttpsHealthChecksDeleteCall) RequestId(requestId string) *HttpsHealthChecksDeleteCall { +func (c *HealthChecksDeleteCall) RequestId(requestId string) *HealthChecksDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -61772,7 +65181,7 @@ func (c *HttpsHealthChecksDeleteCall) RequestId(requestId string) *HttpsHealthCh // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpsHealthChecksDeleteCall) Fields(s ...googleapi.Field) *HttpsHealthChecksDeleteCall { +func (c *HealthChecksDeleteCall) Fields(s ...googleapi.Field) *HealthChecksDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -61780,21 +65189,21 @@ func (c *HttpsHealthChecksDeleteCall) Fields(s ...googleapi.Field) *HttpsHealthC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpsHealthChecksDeleteCall) Context(ctx context.Context) *HttpsHealthChecksDeleteCall { +func (c *HealthChecksDeleteCall) Context(ctx context.Context) *HealthChecksDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpsHealthChecksDeleteCall) Header() http.Header { +func (c *HealthChecksDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpsHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *HealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -61803,7 +65212,7 @@ func (c *HttpsHealthChecksDeleteCall) doRequest(alt string) (*http.Response, err var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { @@ -61811,20 +65220,20 @@ func (c *HttpsHealthChecksDeleteCall) doRequest(alt string) (*http.Response, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpsHealthCheck": c.httpsHealthCheck, + "project": c.project, + "healthCheck": c.healthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpsHealthChecks.delete" call. +// Do executes the "compute.healthChecks.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HttpsHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -61855,18 +65264,18 @@ func (c *HttpsHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Deletes the specified HttpsHealthCheck resource.", + // "description": "Deletes the specified HealthCheck resource.", // "httpMethod": "DELETE", - // "id": "compute.httpsHealthChecks.delete", + // "id": "compute.healthChecks.delete", // "parameterOrder": [ // "project", - // "httpsHealthCheck" + // "healthCheck" // ], // "parameters": { - // "httpsHealthCheck": { - // "description": "Name of the HttpsHealthCheck resource to delete.", + // "healthCheck": { + // "description": "Name of the HealthCheck resource to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -61883,7 +65292,7 @@ func (c *HttpsHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operati // "type": "string" // } // }, - // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", + // "path": "{project}/global/healthChecks/{healthCheck}", // "response": { // "$ref": "Operation" // }, @@ -61895,31 +65304,31 @@ func (c *HttpsHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.httpsHealthChecks.get": +// method id "compute.healthChecks.get": -type HttpsHealthChecksGetCall struct { - s *Service - project string - httpsHealthCheck string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type HealthChecksGetCall struct { + s *Service + project string + healthCheck string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified HttpsHealthCheck resource. Gets a list of -// available HTTPS health checks by making a list() request. -func (r *HttpsHealthChecksService) Get(project string, httpsHealthCheck string) *HttpsHealthChecksGetCall { - c := &HttpsHealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified HealthCheck resource. Gets a list of +// available health checks by making a list() request. +func (r *HealthChecksService) Get(project string, healthCheck string) *HealthChecksGetCall { + c := &HealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpsHealthCheck = httpsHealthCheck + c.healthCheck = healthCheck return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpsHealthChecksGetCall) Fields(s ...googleapi.Field) *HttpsHealthChecksGetCall { +func (c *HealthChecksGetCall) Fields(s ...googleapi.Field) *HealthChecksGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -61929,7 +65338,7 @@ func (c *HttpsHealthChecksGetCall) Fields(s ...googleapi.Field) *HttpsHealthChec // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *HttpsHealthChecksGetCall) IfNoneMatch(entityTag string) *HttpsHealthChecksGetCall { +func (c *HealthChecksGetCall) IfNoneMatch(entityTag string) *HealthChecksGetCall { c.ifNoneMatch_ = entityTag return c } @@ -61937,21 +65346,21 @@ func (c *HttpsHealthChecksGetCall) IfNoneMatch(entityTag string) *HttpsHealthChe // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpsHealthChecksGetCall) Context(ctx context.Context) *HttpsHealthChecksGetCall { +func (c *HealthChecksGetCall) Context(ctx context.Context) *HealthChecksGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpsHealthChecksGetCall) Header() http.Header { +func (c *HealthChecksGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpsHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { +func (c *HealthChecksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -61963,7 +65372,7 @@ func (c *HttpsHealthChecksGetCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -61971,20 +65380,20 @@ func (c *HttpsHealthChecksGetCall) doRequest(alt string) (*http.Response, error) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpsHealthCheck": c.httpsHealthCheck, + "project": c.project, + "healthCheck": c.healthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpsHealthChecks.get" call. -// Exactly one of *HttpsHealthCheck or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *HttpsHealthCheck.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *HttpsHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpsHealthCheck, error) { +// Do executes the "compute.healthChecks.get" call. +// Exactly one of *HealthCheck or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *HealthCheck.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *HealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -62003,7 +65412,7 @@ func (c *HttpsHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpsHealt if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &HttpsHealthCheck{ + ret := &HealthCheck{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -62015,18 +65424,18 @@ func (c *HttpsHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpsHealt } return ret, nil // { - // "description": "Returns the specified HttpsHealthCheck resource. Gets a list of available HTTPS health checks by making a list() request.", + // "description": "Returns the specified HealthCheck resource. Gets a list of available health checks by making a list() request.", // "httpMethod": "GET", - // "id": "compute.httpsHealthChecks.get", + // "id": "compute.healthChecks.get", // "parameterOrder": [ // "project", - // "httpsHealthCheck" + // "healthCheck" // ], // "parameters": { - // "httpsHealthCheck": { - // "description": "Name of the HttpsHealthCheck resource to return.", + // "healthCheck": { + // "description": "Name of the HealthCheck resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -62038,9 +65447,9 @@ func (c *HttpsHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpsHealt // "type": "string" // } // }, - // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", + // "path": "{project}/global/healthChecks/{healthCheck}", // "response": { - // "$ref": "HttpsHealthCheck" + // "$ref": "HealthCheck" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -62051,23 +65460,23 @@ func (c *HttpsHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpsHealt } -// method id "compute.httpsHealthChecks.insert": +// method id "compute.healthChecks.insert": -type HttpsHealthChecksInsertCall struct { - s *Service - project string - httpshealthcheck *HttpsHealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HealthChecksInsertCall struct { + s *Service + project string + healthcheck *HealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a HttpsHealthCheck resource in the specified project -// using the data included in the request. -func (r *HttpsHealthChecksService) Insert(project string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksInsertCall { - c := &HttpsHealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a HealthCheck resource in the specified project using +// the data included in the request. +func (r *HealthChecksService) Insert(project string, healthcheck *HealthCheck) *HealthChecksInsertCall { + c := &HealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpshealthcheck = httpshealthcheck + c.healthcheck = healthcheck return c } @@ -62085,7 +65494,7 @@ func (r *HttpsHealthChecksService) Insert(project string, httpshealthcheck *Http // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *HttpsHealthChecksInsertCall) RequestId(requestId string) *HttpsHealthChecksInsertCall { +func (c *HealthChecksInsertCall) RequestId(requestId string) *HealthChecksInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -62093,7 +65502,7 @@ func (c *HttpsHealthChecksInsertCall) RequestId(requestId string) *HttpsHealthCh // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpsHealthChecksInsertCall) Fields(s ...googleapi.Field) *HttpsHealthChecksInsertCall { +func (c *HealthChecksInsertCall) Fields(s ...googleapi.Field) *HealthChecksInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -62101,35 +65510,35 @@ func (c *HttpsHealthChecksInsertCall) Fields(s ...googleapi.Field) *HttpsHealthC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpsHealthChecksInsertCall) Context(ctx context.Context) *HttpsHealthChecksInsertCall { +func (c *HealthChecksInsertCall) Context(ctx context.Context) *HealthChecksInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpsHealthChecksInsertCall) Header() http.Header { +func (c *HealthChecksInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpsHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *HealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -62142,14 +65551,14 @@ func (c *HttpsHealthChecksInsertCall) doRequest(alt string) (*http.Response, err return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpsHealthChecks.insert" call. +// Do executes the "compute.healthChecks.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HttpsHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -62180,9 +65589,9 @@ func (c *HttpsHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Creates a HttpsHealthCheck resource in the specified project using the data included in the request.", + // "description": "Creates a HealthCheck resource in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.httpsHealthChecks.insert", + // "id": "compute.healthChecks.insert", // "parameterOrder": [ // "project" // ], @@ -62200,9 +65609,9 @@ func (c *HttpsHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operati // "type": "string" // } // }, - // "path": "{project}/global/httpsHealthChecks", + // "path": "{project}/global/healthChecks", // "request": { - // "$ref": "HttpsHealthCheck" + // "$ref": "HealthCheck" // }, // "response": { // "$ref": "Operation" @@ -62215,9 +65624,9 @@ func (c *HttpsHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.httpsHealthChecks.list": +// method id "compute.healthChecks.list": -type HttpsHealthChecksListCall struct { +type HealthChecksListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -62226,10 +65635,10 @@ type HttpsHealthChecksListCall struct { header_ http.Header } -// List: Retrieves the list of HttpsHealthCheck resources available to -// the specified project. -func (r *HttpsHealthChecksService) List(project string) *HttpsHealthChecksListCall { - c := &HttpsHealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of HealthCheck resources available to the +// specified project. +func (r *HealthChecksService) List(project string) *HealthChecksListCall { + c := &HealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -62256,7 +65665,7 @@ func (r *HttpsHealthChecksService) List(project string) *HttpsHealthChecksListCa // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *HttpsHealthChecksListCall) Filter(filter string) *HttpsHealthChecksListCall { +func (c *HealthChecksListCall) Filter(filter string) *HealthChecksListCall { c.urlParams_.Set("filter", filter) return c } @@ -62267,7 +65676,7 @@ func (c *HttpsHealthChecksListCall) Filter(filter string) *HttpsHealthChecksList // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *HttpsHealthChecksListCall) MaxResults(maxResults int64) *HttpsHealthChecksListCall { +func (c *HealthChecksListCall) MaxResults(maxResults int64) *HealthChecksListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -62284,7 +65693,7 @@ func (c *HttpsHealthChecksListCall) MaxResults(maxResults int64) *HttpsHealthChe // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *HttpsHealthChecksListCall) OrderBy(orderBy string) *HttpsHealthChecksListCall { +func (c *HealthChecksListCall) OrderBy(orderBy string) *HealthChecksListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -62292,7 +65701,7 @@ func (c *HttpsHealthChecksListCall) OrderBy(orderBy string) *HttpsHealthChecksLi // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *HttpsHealthChecksListCall) PageToken(pageToken string) *HttpsHealthChecksListCall { +func (c *HealthChecksListCall) PageToken(pageToken string) *HealthChecksListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -62300,7 +65709,7 @@ func (c *HttpsHealthChecksListCall) PageToken(pageToken string) *HttpsHealthChec // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpsHealthChecksListCall) Fields(s ...googleapi.Field) *HttpsHealthChecksListCall { +func (c *HealthChecksListCall) Fields(s ...googleapi.Field) *HealthChecksListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -62310,7 +65719,7 @@ func (c *HttpsHealthChecksListCall) Fields(s ...googleapi.Field) *HttpsHealthChe // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *HttpsHealthChecksListCall) IfNoneMatch(entityTag string) *HttpsHealthChecksListCall { +func (c *HealthChecksListCall) IfNoneMatch(entityTag string) *HealthChecksListCall { c.ifNoneMatch_ = entityTag return c } @@ -62318,21 +65727,21 @@ func (c *HttpsHealthChecksListCall) IfNoneMatch(entityTag string) *HttpsHealthCh // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpsHealthChecksListCall) Context(ctx context.Context) *HttpsHealthChecksListCall { +func (c *HealthChecksListCall) Context(ctx context.Context) *HealthChecksListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpsHealthChecksListCall) Header() http.Header { +func (c *HealthChecksListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpsHealthChecksListCall) doRequest(alt string) (*http.Response, error) { +func (c *HealthChecksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -62344,7 +65753,7 @@ func (c *HttpsHealthChecksListCall) doRequest(alt string) (*http.Response, error var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -62357,14 +65766,14 @@ func (c *HttpsHealthChecksListCall) doRequest(alt string) (*http.Response, error return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpsHealthChecks.list" call. -// Exactly one of *HttpsHealthCheckList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *HttpsHealthCheckList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.healthChecks.list" call. +// Exactly one of *HealthCheckList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *HealthCheckList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHealthCheckList, error) { +func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -62383,7 +65792,7 @@ func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHeal if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &HttpsHealthCheckList{ + ret := &HealthCheckList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -62395,9 +65804,9 @@ func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHeal } return ret, nil // { - // "description": "Retrieves the list of HttpsHealthCheck resources available to the specified project.", + // "description": "Retrieves the list of HealthCheck resources available to the specified project.", // "httpMethod": "GET", - // "id": "compute.httpsHealthChecks.list", + // "id": "compute.healthChecks.list", // "parameterOrder": [ // "project" // ], @@ -62433,9 +65842,9 @@ func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHeal // "type": "string" // } // }, - // "path": "{project}/global/httpsHealthChecks", + // "path": "{project}/global/healthChecks", // "response": { - // "$ref": "HttpsHealthCheckList" + // "$ref": "HealthCheckList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -62449,7 +65858,7 @@ func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHeal // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *HttpsHealthChecksListCall) Pages(ctx context.Context, f func(*HttpsHealthCheckList) error) error { +func (c *HealthChecksListCall) Pages(ctx context.Context, f func(*HealthCheckList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -62467,26 +65876,26 @@ func (c *HttpsHealthChecksListCall) Pages(ctx context.Context, f func(*HttpsHeal } } -// method id "compute.httpsHealthChecks.patch": +// method id "compute.healthChecks.patch": -type HttpsHealthChecksPatchCall struct { - s *Service - project string - httpsHealthCheck string - httpshealthcheck *HttpsHealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HealthChecksPatchCall struct { + s *Service + project string + healthCheck string + healthcheck *HealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates a HttpsHealthCheck resource in the specified project -// using the data included in the request. This method supports PATCH +// Patch: Updates a HealthCheck resource in the specified project using +// the data included in the request. This method supports PATCH // semantics and uses the JSON merge patch format and processing rules. -func (r *HttpsHealthChecksService) Patch(project string, httpsHealthCheck string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksPatchCall { - c := &HttpsHealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *HealthChecksService) Patch(project string, healthCheck string, healthcheck *HealthCheck) *HealthChecksPatchCall { + c := &HealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpsHealthCheck = httpsHealthCheck - c.httpshealthcheck = httpshealthcheck + c.healthCheck = healthCheck + c.healthcheck = healthcheck return c } @@ -62504,7 +65913,7 @@ func (r *HttpsHealthChecksService) Patch(project string, httpsHealthCheck string // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *HttpsHealthChecksPatchCall) RequestId(requestId string) *HttpsHealthChecksPatchCall { +func (c *HealthChecksPatchCall) RequestId(requestId string) *HealthChecksPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -62512,7 +65921,7 @@ func (c *HttpsHealthChecksPatchCall) RequestId(requestId string) *HttpsHealthChe // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpsHealthChecksPatchCall) Fields(s ...googleapi.Field) *HttpsHealthChecksPatchCall { +func (c *HealthChecksPatchCall) Fields(s ...googleapi.Field) *HealthChecksPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -62520,35 +65929,35 @@ func (c *HttpsHealthChecksPatchCall) Fields(s ...googleapi.Field) *HttpsHealthCh // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpsHealthChecksPatchCall) Context(ctx context.Context) *HttpsHealthChecksPatchCall { +func (c *HealthChecksPatchCall) Context(ctx context.Context) *HealthChecksPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpsHealthChecksPatchCall) Header() http.Header { +func (c *HealthChecksPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpsHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *HealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("PATCH", urls, body) if err != nil { @@ -62556,20 +65965,20 @@ func (c *HttpsHealthChecksPatchCall) doRequest(alt string) (*http.Response, erro } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpsHealthCheck": c.httpsHealthCheck, + "project": c.project, + "healthCheck": c.healthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpsHealthChecks.patch" call. +// Do executes the "compute.healthChecks.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HttpsHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -62600,18 +66009,18 @@ func (c *HttpsHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "description": "Updates a HealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", // "httpMethod": "PATCH", - // "id": "compute.httpsHealthChecks.patch", + // "id": "compute.healthChecks.patch", // "parameterOrder": [ // "project", - // "httpsHealthCheck" + // "healthCheck" // ], // "parameters": { - // "httpsHealthCheck": { - // "description": "Name of the HttpsHealthCheck resource to patch.", + // "healthCheck": { + // "description": "Name of the HealthCheck resource to patch.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -62628,9 +66037,9 @@ func (c *HttpsHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operatio // "type": "string" // } // }, - // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", + // "path": "{project}/global/healthChecks/{healthCheck}", // "request": { - // "$ref": "HttpsHealthCheck" + // "$ref": "HealthCheck" // }, // "response": { // "$ref": "Operation" @@ -62643,9 +66052,9 @@ func (c *HttpsHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operatio } -// method id "compute.httpsHealthChecks.testIamPermissions": +// method id "compute.healthChecks.testIamPermissions": -type HttpsHealthChecksTestIamPermissionsCall struct { +type HealthChecksTestIamPermissionsCall struct { s *Service project string resource string @@ -62657,8 +66066,8 @@ type HttpsHealthChecksTestIamPermissionsCall struct { // TestIamPermissions: Returns permissions that a caller has on the // specified resource. -func (r *HttpsHealthChecksService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *HttpsHealthChecksTestIamPermissionsCall { - c := &HttpsHealthChecksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *HealthChecksService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *HealthChecksTestIamPermissionsCall { + c := &HealthChecksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.resource = resource c.testpermissionsrequest = testpermissionsrequest @@ -62668,7 +66077,7 @@ func (r *HttpsHealthChecksService) TestIamPermissions(project string, resource s // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpsHealthChecksTestIamPermissionsCall) Fields(s ...googleapi.Field) *HttpsHealthChecksTestIamPermissionsCall { +func (c *HealthChecksTestIamPermissionsCall) Fields(s ...googleapi.Field) *HealthChecksTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -62676,21 +66085,21 @@ func (c *HttpsHealthChecksTestIamPermissionsCall) Fields(s ...googleapi.Field) * // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpsHealthChecksTestIamPermissionsCall) Context(ctx context.Context) *HttpsHealthChecksTestIamPermissionsCall { +func (c *HealthChecksTestIamPermissionsCall) Context(ctx context.Context) *HealthChecksTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpsHealthChecksTestIamPermissionsCall) Header() http.Header { +func (c *HealthChecksTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpsHealthChecksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *HealthChecksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -62704,7 +66113,7 @@ func (c *HttpsHealthChecksTestIamPermissionsCall) doRequest(alt string) (*http.R reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -62718,14 +66127,14 @@ func (c *HttpsHealthChecksTestIamPermissionsCall) doRequest(alt string) (*http.R return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpsHealthChecks.testIamPermissions" call. +// Do executes the "compute.healthChecks.testIamPermissions" call. // Exactly one of *TestPermissionsResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *TestPermissionsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *HttpsHealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *HealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -62758,7 +66167,7 @@ func (c *HttpsHealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOptio // { // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.httpsHealthChecks.testIamPermissions", + // "id": "compute.healthChecks.testIamPermissions", // "parameterOrder": [ // "project", // "resource" @@ -62779,7 +66188,7 @@ func (c *HttpsHealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOptio // "type": "string" // } // }, - // "path": "{project}/global/httpsHealthChecks/{resource}/testIamPermissions", + // "path": "{project}/global/healthChecks/{resource}/testIamPermissions", // "request": { // "$ref": "TestPermissionsRequest" // }, @@ -62795,25 +66204,25 @@ func (c *HttpsHealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOptio } -// method id "compute.httpsHealthChecks.update": +// method id "compute.healthChecks.update": -type HttpsHealthChecksUpdateCall struct { - s *Service - project string - httpsHealthCheck string - httpshealthcheck *HttpsHealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HealthChecksUpdateCall struct { + s *Service + project string + healthCheck string + healthcheck *HealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Update: Updates a HttpsHealthCheck resource in the specified project -// using the data included in the request. -func (r *HttpsHealthChecksService) Update(project string, httpsHealthCheck string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksUpdateCall { - c := &HttpsHealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates a HealthCheck resource in the specified project using +// the data included in the request. +func (r *HealthChecksService) Update(project string, healthCheck string, healthcheck *HealthCheck) *HealthChecksUpdateCall { + c := &HealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpsHealthCheck = httpsHealthCheck - c.httpshealthcheck = httpshealthcheck + c.healthCheck = healthCheck + c.healthcheck = healthcheck return c } @@ -62831,7 +66240,7 @@ func (r *HttpsHealthChecksService) Update(project string, httpsHealthCheck strin // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *HttpsHealthChecksUpdateCall) RequestId(requestId string) *HttpsHealthChecksUpdateCall { +func (c *HealthChecksUpdateCall) RequestId(requestId string) *HealthChecksUpdateCall { c.urlParams_.Set("requestId", requestId) return c } @@ -62839,7 +66248,7 @@ func (c *HttpsHealthChecksUpdateCall) RequestId(requestId string) *HttpsHealthCh // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpsHealthChecksUpdateCall) Fields(s ...googleapi.Field) *HttpsHealthChecksUpdateCall { +func (c *HealthChecksUpdateCall) Fields(s ...googleapi.Field) *HealthChecksUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -62847,35 +66256,35 @@ func (c *HttpsHealthChecksUpdateCall) Fields(s ...googleapi.Field) *HttpsHealthC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpsHealthChecksUpdateCall) Context(ctx context.Context) *HttpsHealthChecksUpdateCall { +func (c *HealthChecksUpdateCall) Context(ctx context.Context) *HealthChecksUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpsHealthChecksUpdateCall) Header() http.Header { +func (c *HealthChecksUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpsHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *HealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("PUT", urls, body) if err != nil { @@ -62883,20 +66292,20 @@ func (c *HttpsHealthChecksUpdateCall) doRequest(alt string) (*http.Response, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpsHealthCheck": c.httpsHealthCheck, + "project": c.project, + "healthCheck": c.healthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpsHealthChecks.update" call. +// Do executes the "compute.healthChecks.update" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HttpsHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -62927,18 +66336,18 @@ func (c *HttpsHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request.", + // "description": "Updates a HealthCheck resource in the specified project using the data included in the request.", // "httpMethod": "PUT", - // "id": "compute.httpsHealthChecks.update", + // "id": "compute.healthChecks.update", // "parameterOrder": [ // "project", - // "httpsHealthCheck" + // "healthCheck" // ], // "parameters": { - // "httpsHealthCheck": { - // "description": "Name of the HttpsHealthCheck resource to update.", + // "healthCheck": { + // "description": "Name of the HealthCheck resource to update.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -62955,9 +66364,9 @@ func (c *HttpsHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operati // "type": "string" // } // }, - // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", + // "path": "{project}/global/healthChecks/{healthCheck}", // "request": { - // "$ref": "HttpsHealthCheck" + // "$ref": "HealthCheck" // }, // "response": { // "$ref": "Operation" @@ -62970,23 +66379,23 @@ func (c *HttpsHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.images.delete": +// method id "compute.httpHealthChecks.delete": -type ImagesDeleteCall struct { - s *Service - project string - image string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HttpHealthChecksDeleteCall struct { + s *Service + project string + httpHealthCheck string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified image. -// For details, see https://cloud.google.com/compute/docs/reference/latest/images/delete -func (r *ImagesService) Delete(project string, image string) *ImagesDeleteCall { - c := &ImagesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified HttpHealthCheck resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/delete +func (r *HttpHealthChecksService) Delete(project string, httpHealthCheck string) *HttpHealthChecksDeleteCall { + c := &HttpHealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.image = image + c.httpHealthCheck = httpHealthCheck return c } @@ -63004,7 +66413,7 @@ func (r *ImagesService) Delete(project string, image string) *ImagesDeleteCall { // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ImagesDeleteCall) RequestId(requestId string) *ImagesDeleteCall { +func (c *HttpHealthChecksDeleteCall) RequestId(requestId string) *HttpHealthChecksDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -63012,7 +66421,7 @@ func (c *ImagesDeleteCall) RequestId(requestId string) *ImagesDeleteCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesDeleteCall) Fields(s ...googleapi.Field) *ImagesDeleteCall { +func (c *HttpHealthChecksDeleteCall) Fields(s ...googleapi.Field) *HttpHealthChecksDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -63020,21 +66429,21 @@ func (c *ImagesDeleteCall) Fields(s ...googleapi.Field) *ImagesDeleteCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesDeleteCall) Context(ctx context.Context) *ImagesDeleteCall { +func (c *HttpHealthChecksDeleteCall) Context(ctx context.Context) *HttpHealthChecksDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesDeleteCall) Header() http.Header { +func (c *HttpHealthChecksDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -63043,7 +66452,7 @@ func (c *ImagesDeleteCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { @@ -63051,20 +66460,20 @@ func (c *ImagesDeleteCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "image": c.image, + "project": c.project, + "httpHealthCheck": c.httpHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.delete" call. +// Do executes the "compute.httpHealthChecks.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HttpHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -63095,16 +66504,16 @@ func (c *ImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Deletes the specified image.", + // "description": "Deletes the specified HttpHealthCheck resource.", // "httpMethod": "DELETE", - // "id": "compute.images.delete", + // "id": "compute.httpHealthChecks.delete", // "parameterOrder": [ // "project", - // "image" + // "httpHealthCheck" // ], // "parameters": { - // "image": { - // "description": "Name of the image resource to delete.", + // "httpHealthCheck": { + // "description": "Name of the HttpHealthCheck resource to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -63123,7 +66532,7 @@ func (c *ImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "type": "string" // } // }, - // "path": "{project}/global/images/{image}", + // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", // "response": { // "$ref": "Operation" // }, @@ -63135,366 +66544,32 @@ func (c *ImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) } -// method id "compute.images.deprecate": +// method id "compute.httpHealthChecks.get": -type ImagesDeprecateCall struct { - s *Service - project string - image string - deprecationstatus *DeprecationStatus - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HttpHealthChecksGetCall struct { + s *Service + project string + httpHealthCheck string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Deprecate: Sets the deprecation status of an image. -// -// If an empty request body is given, clears the deprecation status -// instead. -// For details, see https://cloud.google.com/compute/docs/reference/latest/images/deprecate -func (r *ImagesService) Deprecate(project string, image string, deprecationstatus *DeprecationStatus) *ImagesDeprecateCall { - c := &ImagesDeprecateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified HttpHealthCheck resource. Gets a list of +// available HTTP health checks by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/get +func (r *HttpHealthChecksService) Get(project string, httpHealthCheck string) *HttpHealthChecksGetCall { + c := &HttpHealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.image = image - c.deprecationstatus = deprecationstatus - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *ImagesDeprecateCall) RequestId(requestId string) *ImagesDeprecateCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ImagesDeprecateCall) Fields(s ...googleapi.Field) *ImagesDeprecateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ImagesDeprecateCall) Context(ctx context.Context) *ImagesDeprecateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ImagesDeprecateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ImagesDeprecateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.deprecationstatus) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}/deprecate") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "image": c.image, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.images.deprecate" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ImagesDeprecateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Sets the deprecation status of an image.\n\nIf an empty request body is given, clears the deprecation status instead.", - // "httpMethod": "POST", - // "id": "compute.images.deprecate", - // "parameterOrder": [ - // "project", - // "image" - // ], - // "parameters": { - // "image": { - // "description": "Image name.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "{project}/global/images/{image}/deprecate", - // "request": { - // "$ref": "DeprecationStatus" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.images.get": - -type ImagesGetCall struct { - s *Service - project string - image string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified image. Gets a list of available images by -// making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/images/get -func (r *ImagesService) Get(project string, image string) *ImagesGetCall { - c := &ImagesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.image = image - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ImagesGetCall) Fields(s ...googleapi.Field) *ImagesGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ImagesGetCall) IfNoneMatch(entityTag string) *ImagesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ImagesGetCall) Context(ctx context.Context) *ImagesGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ImagesGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ImagesGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "image": c.image, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.images.get" call. -// Exactly one of *Image or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Image.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ImagesGetCall) Do(opts ...googleapi.CallOption) (*Image, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Image{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified image. Gets a list of available images by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.images.get", - // "parameterOrder": [ - // "project", - // "image" - // ], - // "parameters": { - // "image": { - // "description": "Name of the image resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/images/{image}", - // "response": { - // "$ref": "Image" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.images.getFromFamily": - -type ImagesGetFromFamilyCall struct { - s *Service - project string - family string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// GetFromFamily: Returns the latest image that is part of an image -// family and is not deprecated. -func (r *ImagesService) GetFromFamily(project string, family string) *ImagesGetFromFamilyCall { - c := &ImagesGetFromFamilyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.family = family + c.httpHealthCheck = httpHealthCheck return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesGetFromFamilyCall) Fields(s ...googleapi.Field) *ImagesGetFromFamilyCall { +func (c *HttpHealthChecksGetCall) Fields(s ...googleapi.Field) *HttpHealthChecksGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -63504,7 +66579,7 @@ func (c *ImagesGetFromFamilyCall) Fields(s ...googleapi.Field) *ImagesGetFromFam // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ImagesGetFromFamilyCall) IfNoneMatch(entityTag string) *ImagesGetFromFamilyCall { +func (c *HttpHealthChecksGetCall) IfNoneMatch(entityTag string) *HttpHealthChecksGetCall { c.ifNoneMatch_ = entityTag return c } @@ -63512,21 +66587,21 @@ func (c *ImagesGetFromFamilyCall) IfNoneMatch(entityTag string) *ImagesGetFromFa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesGetFromFamilyCall) Context(ctx context.Context) *ImagesGetFromFamilyCall { +func (c *HttpHealthChecksGetCall) Context(ctx context.Context) *HttpHealthChecksGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesGetFromFamilyCall) Header() http.Header { +func (c *HttpHealthChecksGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesGetFromFamilyCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -63538,7 +66613,7 @@ func (c *ImagesGetFromFamilyCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/family/{family}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -63546,20 +66621,20 @@ func (c *ImagesGetFromFamilyCall) doRequest(alt string) (*http.Response, error) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "family": c.family, + "project": c.project, + "httpHealthCheck": c.httpHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.getFromFamily" call. -// Exactly one of *Image or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Image.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ImagesGetFromFamilyCall) Do(opts ...googleapi.CallOption) (*Image, error) { +// Do executes the "compute.httpHealthChecks.get" call. +// Exactly one of *HttpHealthCheck or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *HttpHealthCheck.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *HttpHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpHealthCheck, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -63578,7 +66653,7 @@ func (c *ImagesGetFromFamilyCall) Do(opts ...googleapi.CallOption) (*Image, erro if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Image{ + ret := &HttpHealthCheck{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -63590,16 +66665,16 @@ func (c *ImagesGetFromFamilyCall) Do(opts ...googleapi.CallOption) (*Image, erro } return ret, nil // { - // "description": "Returns the latest image that is part of an image family and is not deprecated.", + // "description": "Returns the specified HttpHealthCheck resource. Gets a list of available HTTP health checks by making a list() request.", // "httpMethod": "GET", - // "id": "compute.images.getFromFamily", + // "id": "compute.httpHealthChecks.get", // "parameterOrder": [ // "project", - // "family" + // "httpHealthCheck" // ], // "parameters": { - // "family": { - // "description": "Name of the image family to search for.", + // "httpHealthCheck": { + // "description": "Name of the HttpHealthCheck resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -63613,165 +66688,9 @@ func (c *ImagesGetFromFamilyCall) Do(opts ...googleapi.CallOption) (*Image, erro // "type": "string" // } // }, - // "path": "{project}/global/images/family/{family}", - // "response": { - // "$ref": "Image" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.images.getIamPolicy": - -type ImagesGetIamPolicyCall struct { - s *Service - project string - resource string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. -func (r *ImagesService) GetIamPolicy(project string, resource string) *ImagesGetIamPolicyCall { - c := &ImagesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.resource = resource - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ImagesGetIamPolicyCall) Fields(s ...googleapi.Field) *ImagesGetIamPolicyCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ImagesGetIamPolicyCall) IfNoneMatch(entityTag string) *ImagesGetIamPolicyCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ImagesGetIamPolicyCall) Context(ctx context.Context) *ImagesGetIamPolicyCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ImagesGetIamPolicyCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ImagesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{resource}/getIamPolicy") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.images.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ImagesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Policy{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", - // "httpMethod": "GET", - // "id": "compute.images.getIamPolicy", - // "parameterOrder": [ - // "project", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/images/{resource}/getIamPolicy", + // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", // "response": { - // "$ref": "Policy" + // "$ref": "HttpHealthCheck" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -63782,31 +66701,24 @@ func (c *ImagesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, erro } -// method id "compute.images.insert": +// method id "compute.httpHealthChecks.insert": -type ImagesInsertCall struct { - s *Service - project string - image *Image - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HttpHealthChecksInsertCall struct { + s *Service + project string + httphealthcheck *HttpHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates an image in the specified project using the data -// included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/images/insert -func (r *ImagesService) Insert(project string, image *Image) *ImagesInsertCall { - c := &ImagesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a HttpHealthCheck resource in the specified project +// using the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/insert +func (r *HttpHealthChecksService) Insert(project string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksInsertCall { + c := &HttpHealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.image = image - return c -} - -// ForceCreate sets the optional parameter "forceCreate": Force image -// creation if true. -func (c *ImagesInsertCall) ForceCreate(forceCreate bool) *ImagesInsertCall { - c.urlParams_.Set("forceCreate", fmt.Sprint(forceCreate)) + c.httphealthcheck = httphealthcheck return c } @@ -63824,7 +66736,7 @@ func (c *ImagesInsertCall) ForceCreate(forceCreate bool) *ImagesInsertCall { // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ImagesInsertCall) RequestId(requestId string) *ImagesInsertCall { +func (c *HttpHealthChecksInsertCall) RequestId(requestId string) *HttpHealthChecksInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -63832,7 +66744,7 @@ func (c *ImagesInsertCall) RequestId(requestId string) *ImagesInsertCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesInsertCall) Fields(s ...googleapi.Field) *ImagesInsertCall { +func (c *HttpHealthChecksInsertCall) Fields(s ...googleapi.Field) *HttpHealthChecksInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -63840,35 +66752,35 @@ func (c *ImagesInsertCall) Fields(s ...googleapi.Field) *ImagesInsertCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesInsertCall) Context(ctx context.Context) *ImagesInsertCall { +func (c *HttpHealthChecksInsertCall) Context(ctx context.Context) *HttpHealthChecksInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesInsertCall) Header() http.Header { +func (c *HttpHealthChecksInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.image) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -63881,14 +66793,14 @@ func (c *ImagesInsertCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.insert" call. +// Do executes the "compute.httpHealthChecks.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HttpHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -63919,18 +66831,13 @@ func (c *ImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Creates an image in the specified project using the data included in the request.", + // "description": "Creates a HttpHealthCheck resource in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.images.insert", + // "id": "compute.httpHealthChecks.insert", // "parameterOrder": [ // "project" // ], // "parameters": { - // "forceCreate": { - // "description": "Force image creation if true.", - // "location": "query", - // "type": "boolean" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -63944,27 +66851,24 @@ func (c *ImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "type": "string" // } // }, - // "path": "{project}/global/images", + // "path": "{project}/global/httpHealthChecks", // "request": { - // "$ref": "Image" + // "$ref": "HttpHealthCheck" // }, // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.images.list": +// method id "compute.httpHealthChecks.list": -type ImagesListCall struct { +type HttpHealthChecksListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -63973,16 +66877,11 @@ type ImagesListCall struct { header_ http.Header } -// List: Retrieves the list of custom images available to the specified -// project. Custom images are images you create that belong to your -// project. This method does not get any images that belong to other -// projects, including publicly-available images, like Debian 8. If you -// want to get a list of publicly-available images, use this method to -// make a request to the respective image project, such as debian-cloud -// or windows-cloud. -// For details, see https://cloud.google.com/compute/docs/reference/latest/images/list -func (r *ImagesService) List(project string) *ImagesListCall { - c := &ImagesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of HttpHealthCheck resources available to +// the specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/list +func (r *HttpHealthChecksService) List(project string) *HttpHealthChecksListCall { + c := &HttpHealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -64009,7 +66908,7 @@ func (r *ImagesService) List(project string) *ImagesListCall { // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *ImagesListCall) Filter(filter string) *ImagesListCall { +func (c *HttpHealthChecksListCall) Filter(filter string) *HttpHealthChecksListCall { c.urlParams_.Set("filter", filter) return c } @@ -64020,7 +66919,7 @@ func (c *ImagesListCall) Filter(filter string) *ImagesListCall { // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *ImagesListCall) MaxResults(maxResults int64) *ImagesListCall { +func (c *HttpHealthChecksListCall) MaxResults(maxResults int64) *HttpHealthChecksListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -64037,7 +66936,7 @@ func (c *ImagesListCall) MaxResults(maxResults int64) *ImagesListCall { // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *ImagesListCall) OrderBy(orderBy string) *ImagesListCall { +func (c *HttpHealthChecksListCall) OrderBy(orderBy string) *HttpHealthChecksListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -64045,7 +66944,7 @@ func (c *ImagesListCall) OrderBy(orderBy string) *ImagesListCall { // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *ImagesListCall) PageToken(pageToken string) *ImagesListCall { +func (c *HttpHealthChecksListCall) PageToken(pageToken string) *HttpHealthChecksListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -64053,7 +66952,7 @@ func (c *ImagesListCall) PageToken(pageToken string) *ImagesListCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesListCall) Fields(s ...googleapi.Field) *ImagesListCall { +func (c *HttpHealthChecksListCall) Fields(s ...googleapi.Field) *HttpHealthChecksListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -64063,7 +66962,7 @@ func (c *ImagesListCall) Fields(s ...googleapi.Field) *ImagesListCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ImagesListCall) IfNoneMatch(entityTag string) *ImagesListCall { +func (c *HttpHealthChecksListCall) IfNoneMatch(entityTag string) *HttpHealthChecksListCall { c.ifNoneMatch_ = entityTag return c } @@ -64071,21 +66970,21 @@ func (c *ImagesListCall) IfNoneMatch(entityTag string) *ImagesListCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesListCall) Context(ctx context.Context) *ImagesListCall { +func (c *HttpHealthChecksListCall) Context(ctx context.Context) *HttpHealthChecksListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesListCall) Header() http.Header { +func (c *HttpHealthChecksListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesListCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpHealthChecksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -64097,7 +66996,7 @@ func (c *ImagesListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -64110,14 +67009,14 @@ func (c *ImagesListCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.list" call. -// Exactly one of *ImageList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *ImageList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { +// Do executes the "compute.httpHealthChecks.list" call. +// Exactly one of *HttpHealthCheckList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *HttpHealthCheckList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealthCheckList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -64136,7 +67035,7 @@ func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ImageList{ + ret := &HttpHealthCheckList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -64148,9 +67047,9 @@ func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { } return ret, nil // { - // "description": "Retrieves the list of custom images available to the specified project. Custom images are images you create that belong to your project. This method does not get any images that belong to other projects, including publicly-available images, like Debian 8. If you want to get a list of publicly-available images, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.", + // "description": "Retrieves the list of HttpHealthCheck resources available to the specified project.", // "httpMethod": "GET", - // "id": "compute.images.list", + // "id": "compute.httpHealthChecks.list", // "parameterOrder": [ // "project" // ], @@ -64186,9 +67085,9 @@ func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { // "type": "string" // } // }, - // "path": "{project}/global/images", + // "path": "{project}/global/httpHealthChecks", // "response": { - // "$ref": "ImageList" + // "$ref": "HttpHealthCheckList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -64202,7 +67101,7 @@ func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *ImagesListCall) Pages(ctx context.Context, f func(*ImageList) error) error { +func (c *HttpHealthChecksListCall) Pages(ctx context.Context, f func(*HttpHealthCheckList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -64220,32 +67119,53 @@ func (c *ImagesListCall) Pages(ctx context.Context, f func(*ImageList) error) er } } -// method id "compute.images.setIamPolicy": +// method id "compute.httpHealthChecks.patch": -type ImagesSetIamPolicyCall struct { - s *Service - project string - resource string - globalsetpolicyrequest *GlobalSetPolicyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HttpHealthChecksPatchCall struct { + s *Service + project string + httpHealthCheck string + httphealthcheck *HttpHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. -func (r *ImagesService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *ImagesSetIamPolicyCall { - c := &ImagesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates a HttpHealthCheck resource in the specified project +// using the data included in the request. This method supports PATCH +// semantics and uses the JSON merge patch format and processing rules. +// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/patch +func (r *HttpHealthChecksService) Patch(project string, httpHealthCheck string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksPatchCall { + c := &HttpHealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.globalsetpolicyrequest = globalsetpolicyrequest + c.httpHealthCheck = httpHealthCheck + c.httphealthcheck = httphealthcheck + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *HttpHealthChecksPatchCall) RequestId(requestId string) *HttpHealthChecksPatchCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesSetIamPolicyCall) Fields(s ...googleapi.Field) *ImagesSetIamPolicyCall { +func (c *HttpHealthChecksPatchCall) Fields(s ...googleapi.Field) *HttpHealthChecksPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -64253,56 +67173,56 @@ func (c *ImagesSetIamPolicyCall) Fields(s ...googleapi.Field) *ImagesSetIamPolic // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesSetIamPolicyCall) Context(ctx context.Context) *ImagesSetIamPolicyCall { +func (c *HttpHealthChecksPatchCall) Context(ctx context.Context) *HttpHealthChecksPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesSetIamPolicyCall) Header() http.Header { +func (c *HttpHealthChecksPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetpolicyrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{resource}/setIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "httpHealthCheck": c.httpHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.httpHealthChecks.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *HttpHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -64321,7 +67241,7 @@ func (c *ImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, erro if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -64333,14 +67253,21 @@ func (c *ImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, erro } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", - // "httpMethod": "POST", - // "id": "compute.images.setIamPolicy", + // "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.httpHealthChecks.patch", // "parameterOrder": [ // "project", - // "resource" + // "httpHealthCheck" // ], // "parameters": { + // "httpHealthCheck": { + // "description": "Name of the HttpHealthCheck resource to patch.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -64348,20 +67275,18 @@ func (c *ImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, erro // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/global/images/{resource}/setIamPolicy", + // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", // "request": { - // "$ref": "GlobalSetPolicyRequest" + // "$ref": "HttpHealthCheck" // }, // "response": { - // "$ref": "Policy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -64371,32 +67296,32 @@ func (c *ImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, erro } -// method id "compute.images.setLabels": +// method id "compute.httpHealthChecks.testIamPermissions": -type ImagesSetLabelsCall struct { +type HttpHealthChecksTestIamPermissionsCall struct { s *Service project string resource string - globalsetlabelsrequest *GlobalSetLabelsRequest + testpermissionsrequest *TestPermissionsRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// SetLabels: Sets the labels on an image. To learn more about labels, -// read the Labeling Resources documentation. -func (r *ImagesService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *ImagesSetLabelsCall { - c := &ImagesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *HttpHealthChecksService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *HttpHealthChecksTestIamPermissionsCall { + c := &HttpHealthChecksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.resource = resource - c.globalsetlabelsrequest = globalsetlabelsrequest + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesSetLabelsCall) Fields(s ...googleapi.Field) *ImagesSetLabelsCall { +func (c *HttpHealthChecksTestIamPermissionsCall) Fields(s ...googleapi.Field) *HttpHealthChecksTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -64404,35 +67329,35 @@ func (c *ImagesSetLabelsCall) Fields(s ...googleapi.Field) *ImagesSetLabelsCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesSetLabelsCall) Context(ctx context.Context) *ImagesSetLabelsCall { +func (c *HttpHealthChecksTestIamPermissionsCall) Context(ctx context.Context) *HttpHealthChecksTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesSetLabelsCall) Header() http.Header { +func (c *HttpHealthChecksTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesSetLabelsCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpHealthChecksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{resource}/setLabels") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -64446,14 +67371,14 @@ func (c *ImagesSetLabelsCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.setLabels" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ImagesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.httpHealthChecks.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *HttpHealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -64472,7 +67397,7 @@ func (c *ImagesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, erro if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -64484,9 +67409,9 @@ func (c *ImagesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Sets the labels on an image. To learn more about labels, read the Labeling Resources documentation.", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.images.setLabels", + // "id": "compute.httpHealthChecks.testIamPermissions", // "parameterOrder": [ // "project", // "resource" @@ -64507,47 +67432,68 @@ func (c *ImagesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "type": "string" // } // }, - // "path": "{project}/global/images/{resource}/setLabels", + // "path": "{project}/global/httpHealthChecks/{resource}/testIamPermissions", // "request": { - // "$ref": "GlobalSetLabelsRequest" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.images.testIamPermissions": +// method id "compute.httpHealthChecks.update": -type ImagesTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HttpHealthChecksUpdateCall struct { + s *Service + project string + httpHealthCheck string + httphealthcheck *HttpHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *ImagesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *ImagesTestIamPermissionsCall { - c := &ImagesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates a HttpHealthCheck resource in the specified project +// using the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/update +func (r *HttpHealthChecksService) Update(project string, httpHealthCheck string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksUpdateCall { + c := &HttpHealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.httpHealthCheck = httpHealthCheck + c.httphealthcheck = httphealthcheck + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *HttpHealthChecksUpdateCall) RequestId(requestId string) *HttpHealthChecksUpdateCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesTestIamPermissionsCall) Fields(s ...googleapi.Field) *ImagesTestIamPermissionsCall { +func (c *HttpHealthChecksUpdateCall) Fields(s ...googleapi.Field) *HttpHealthChecksUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -64555,56 +67501,56 @@ func (c *ImagesTestIamPermissionsCall) Fields(s ...googleapi.Field) *ImagesTestI // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesTestIamPermissionsCall) Context(ctx context.Context) *ImagesTestIamPermissionsCall { +func (c *HttpHealthChecksUpdateCall) Context(ctx context.Context) *HttpHealthChecksUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesTestIamPermissionsCall) Header() http.Header { +func (c *HttpHealthChecksUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "httpHealthCheck": c.httpHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.httpHealthChecks.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *HttpHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -64623,7 +67569,7 @@ func (c *ImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPe if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -64635,14 +67581,21 @@ func (c *ImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPe } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.images.testIamPermissions", + // "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request.", + // "httpMethod": "PUT", + // "id": "compute.httpHealthChecks.update", // "parameterOrder": [ // "project", - // "resource" + // "httpHealthCheck" // ], // "parameters": { + // "httpHealthCheck": { + // "description": "Name of the HttpHealthCheck resource to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -64650,66 +67603,43 @@ func (c *ImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPe // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/global/images/{resource}/testIamPermissions", + // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "HttpHealthCheck" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instanceGroupManagers.abandonInstances": +// method id "compute.httpsHealthChecks.delete": -type InstanceGroupManagersAbandonInstancesCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagersabandoninstancesrequest *InstanceGroupManagersAbandonInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HttpsHealthChecksDeleteCall struct { + s *Service + project string + httpsHealthCheck string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AbandonInstances: Flags the specified instances to be removed from -// the managed instance group. Abandoning an instance does not delete -// the instance, but it does remove the instance from any target pools -// that are applied by the managed instance group. This method reduces -// the targetSize of the managed instance group by the number of -// instances that you abandon. This operation is marked as DONE when the -// action is scheduled even if the instances have not yet been removed -// from the group. You must separately verify the status of the -// abandoning action with the listmanagedinstances method. -// -// If the group is part of a backend service that has enabled connection -// draining, it can take up to 60 seconds after the connection draining -// duration has elapsed before the VM instance is removed or -// deleted. -// -// You can specify a maximum of 1000 instances with this method per -// request. -func (r *InstanceGroupManagersService) AbandonInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersabandoninstancesrequest *InstanceGroupManagersAbandonInstancesRequest) *InstanceGroupManagersAbandonInstancesCall { - c := &InstanceGroupManagersAbandonInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified HttpsHealthCheck resource. +func (r *HttpsHealthChecksService) Delete(project string, httpsHealthCheck string) *HttpsHealthChecksDeleteCall { + c := &HttpsHealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagersabandoninstancesrequest = instancegroupmanagersabandoninstancesrequest + c.httpsHealthCheck = httpsHealthCheck return c } @@ -64727,7 +67657,7 @@ func (r *InstanceGroupManagersService) AbandonInstances(project string, zone str // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersAbandonInstancesCall) RequestId(requestId string) *InstanceGroupManagersAbandonInstancesCall { +func (c *HttpsHealthChecksDeleteCall) RequestId(requestId string) *HttpsHealthChecksDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -64735,7 +67665,7 @@ func (c *InstanceGroupManagersAbandonInstancesCall) RequestId(requestId string) // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersAbandonInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersAbandonInstancesCall { +func (c *HttpsHealthChecksDeleteCall) Fields(s ...googleapi.Field) *HttpsHealthChecksDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -64743,57 +67673,51 @@ func (c *InstanceGroupManagersAbandonInstancesCall) Fields(s ...googleapi.Field) // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersAbandonInstancesCall) Context(ctx context.Context) *InstanceGroupManagersAbandonInstancesCall { +func (c *HttpsHealthChecksDeleteCall) Context(ctx context.Context) *HttpsHealthChecksDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersAbandonInstancesCall) Header() http.Header { +func (c *HttpsHealthChecksDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersAbandonInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpsHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersabandoninstancesrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/abandonInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, - }) + "project": c.project, + "httpsHealthCheck": c.httpsHealthCheck, + }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.abandonInstances" call. +// Do executes the "compute.httpsHealthChecks.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HttpsHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -64824,18 +67748,18 @@ func (c *InstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOpt } return ret, nil // { - // "description": "Flags the specified instances to be removed from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", - // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.abandonInstances", + // "description": "Deletes the specified HttpsHealthCheck resource.", + // "httpMethod": "DELETE", + // "id": "compute.httpsHealthChecks.delete", // "parameterOrder": [ // "project", - // "zone", - // "instanceGroupManager" + // "httpsHealthCheck" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "httpsHealthCheck": { + // "description": "Name of the HttpsHealthCheck resource to delete.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -64850,17 +67774,328 @@ func (c *InstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOpt // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // } + // }, + // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.httpsHealthChecks.get": + +type HttpsHealthChecksGetCall struct { + s *Service + project string + httpsHealthCheck string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified HttpsHealthCheck resource. Gets a list of +// available HTTPS health checks by making a list() request. +func (r *HttpsHealthChecksService) Get(project string, httpsHealthCheck string) *HttpsHealthChecksGetCall { + c := &HttpsHealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.httpsHealthCheck = httpsHealthCheck + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *HttpsHealthChecksGetCall) Fields(s ...googleapi.Field) *HttpsHealthChecksGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *HttpsHealthChecksGetCall) IfNoneMatch(entityTag string) *HttpsHealthChecksGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *HttpsHealthChecksGetCall) Context(ctx context.Context) *HttpsHealthChecksGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *HttpsHealthChecksGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *HttpsHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "httpsHealthCheck": c.httpsHealthCheck, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.httpsHealthChecks.get" call. +// Exactly one of *HttpsHealthCheck or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *HttpsHealthCheck.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *HttpsHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpsHealthCheck, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &HttpsHealthCheck{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified HttpsHealthCheck resource. Gets a list of available HTTPS health checks by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.httpsHealthChecks.get", + // "parameterOrder": [ + // "project", + // "httpsHealthCheck" + // ], + // "parameters": { + // "httpsHealthCheck": { + // "description": "Name of the HttpsHealthCheck resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // }, - // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/abandonInstances", + // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", + // "response": { + // "$ref": "HttpsHealthCheck" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.httpsHealthChecks.insert": + +type HttpsHealthChecksInsertCall struct { + s *Service + project string + httpshealthcheck *HttpsHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a HttpsHealthCheck resource in the specified project +// using the data included in the request. +func (r *HttpsHealthChecksService) Insert(project string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksInsertCall { + c := &HttpsHealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.httpshealthcheck = httpshealthcheck + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *HttpsHealthChecksInsertCall) RequestId(requestId string) *HttpsHealthChecksInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *HttpsHealthChecksInsertCall) Fields(s ...googleapi.Field) *HttpsHealthChecksInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *HttpsHealthChecksInsertCall) Context(ctx context.Context) *HttpsHealthChecksInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *HttpsHealthChecksInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *HttpsHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.httpsHealthChecks.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *HttpsHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a HttpsHealthCheck resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.httpsHealthChecks.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/httpsHealthChecks", // "request": { - // "$ref": "InstanceGroupManagersAbandonInstancesRequest" + // "$ref": "HttpsHealthCheck" // }, // "response": { // "$ref": "Operation" @@ -64873,9 +68108,9 @@ func (c *InstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOpt } -// method id "compute.instanceGroupManagers.aggregatedList": +// method id "compute.httpsHealthChecks.list": -type InstanceGroupManagersAggregatedListCall struct { +type HttpsHealthChecksListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -64884,10 +68119,10 @@ type InstanceGroupManagersAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves the list of managed instance groups and -// groups them by zone. -func (r *InstanceGroupManagersService) AggregatedList(project string) *InstanceGroupManagersAggregatedListCall { - c := &InstanceGroupManagersAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of HttpsHealthCheck resources available to +// the specified project. +func (r *HttpsHealthChecksService) List(project string) *HttpsHealthChecksListCall { + c := &HttpsHealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -64914,7 +68149,7 @@ func (r *InstanceGroupManagersService) AggregatedList(project string) *InstanceG // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *InstanceGroupManagersAggregatedListCall) Filter(filter string) *InstanceGroupManagersAggregatedListCall { +func (c *HttpsHealthChecksListCall) Filter(filter string) *HttpsHealthChecksListCall { c.urlParams_.Set("filter", filter) return c } @@ -64925,7 +68160,7 @@ func (c *InstanceGroupManagersAggregatedListCall) Filter(filter string) *Instanc // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *InstanceGroupManagersAggregatedListCall) MaxResults(maxResults int64) *InstanceGroupManagersAggregatedListCall { +func (c *HttpsHealthChecksListCall) MaxResults(maxResults int64) *HttpsHealthChecksListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -64942,7 +68177,7 @@ func (c *InstanceGroupManagersAggregatedListCall) MaxResults(maxResults int64) * // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *InstanceGroupManagersAggregatedListCall) OrderBy(orderBy string) *InstanceGroupManagersAggregatedListCall { +func (c *HttpsHealthChecksListCall) OrderBy(orderBy string) *HttpsHealthChecksListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -64950,7 +68185,7 @@ func (c *InstanceGroupManagersAggregatedListCall) OrderBy(orderBy string) *Insta // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *InstanceGroupManagersAggregatedListCall) PageToken(pageToken string) *InstanceGroupManagersAggregatedListCall { +func (c *HttpsHealthChecksListCall) PageToken(pageToken string) *HttpsHealthChecksListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -64958,7 +68193,7 @@ func (c *InstanceGroupManagersAggregatedListCall) PageToken(pageToken string) *I // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersAggregatedListCall) Fields(s ...googleapi.Field) *InstanceGroupManagersAggregatedListCall { +func (c *HttpsHealthChecksListCall) Fields(s ...googleapi.Field) *HttpsHealthChecksListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -64968,7 +68203,7 @@ func (c *InstanceGroupManagersAggregatedListCall) Fields(s ...googleapi.Field) * // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InstanceGroupManagersAggregatedListCall) IfNoneMatch(entityTag string) *InstanceGroupManagersAggregatedListCall { +func (c *HttpsHealthChecksListCall) IfNoneMatch(entityTag string) *HttpsHealthChecksListCall { c.ifNoneMatch_ = entityTag return c } @@ -64976,21 +68211,21 @@ func (c *InstanceGroupManagersAggregatedListCall) IfNoneMatch(entityTag string) // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersAggregatedListCall) Context(ctx context.Context) *InstanceGroupManagersAggregatedListCall { +func (c *HttpsHealthChecksListCall) Context(ctx context.Context) *HttpsHealthChecksListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersAggregatedListCall) Header() http.Header { +func (c *HttpsHealthChecksListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpsHealthChecksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -65002,7 +68237,7 @@ func (c *InstanceGroupManagersAggregatedListCall) doRequest(alt string) (*http.R var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instanceGroupManagers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -65015,15 +68250,14 @@ func (c *InstanceGroupManagersAggregatedListCall) doRequest(alt string) (*http.R return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.aggregatedList" call. -// Exactly one of *InstanceGroupManagerAggregatedList or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *InstanceGroupManagerAggregatedList.ServerResponse.Header or -// (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagerAggregatedList, error) { +// Do executes the "compute.httpsHealthChecks.list" call. +// Exactly one of *HttpsHealthCheckList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *HttpsHealthCheckList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHealthCheckList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -65042,7 +68276,7 @@ func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOptio if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroupManagerAggregatedList{ + ret := &HttpsHealthCheckList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -65054,9 +68288,9 @@ func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Retrieves the list of managed instance groups and groups them by zone.", + // "description": "Retrieves the list of HttpsHealthCheck resources available to the specified project.", // "httpMethod": "GET", - // "id": "compute.instanceGroupManagers.aggregatedList", + // "id": "compute.httpsHealthChecks.list", // "parameterOrder": [ // "project" // ], @@ -65092,9 +68326,9 @@ func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOptio // "type": "string" // } // }, - // "path": "{project}/aggregated/instanceGroupManagers", + // "path": "{project}/global/httpsHealthChecks", // "response": { - // "$ref": "InstanceGroupManagerAggregatedList" + // "$ref": "HttpsHealthCheckList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -65108,7 +68342,7 @@ func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOptio // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *InstanceGroupManagersAggregatedListCall) Pages(ctx context.Context, f func(*InstanceGroupManagerAggregatedList) error) error { +func (c *HttpsHealthChecksListCall) Pages(ctx context.Context, f func(*HttpsHealthCheckList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -65126,35 +68360,52 @@ func (c *InstanceGroupManagersAggregatedListCall) Pages(ctx context.Context, f f } } -// method id "compute.instanceGroupManagers.applyUpdatesToInstances": +// method id "compute.httpsHealthChecks.patch": -type InstanceGroupManagersApplyUpdatesToInstancesCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagersapplyupdatesrequest *InstanceGroupManagersApplyUpdatesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HttpsHealthChecksPatchCall struct { + s *Service + project string + httpsHealthCheck string + httpshealthcheck *HttpsHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// ApplyUpdatesToInstances: Apply changes to selected instances on the -// managed instance group. This method can be used to apply new -// overrides and/or new versions. -func (r *InstanceGroupManagersService) ApplyUpdatesToInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersapplyupdatesrequest *InstanceGroupManagersApplyUpdatesRequest) *InstanceGroupManagersApplyUpdatesToInstancesCall { - c := &InstanceGroupManagersApplyUpdatesToInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates a HttpsHealthCheck resource in the specified project +// using the data included in the request. This method supports PATCH +// semantics and uses the JSON merge patch format and processing rules. +func (r *HttpsHealthChecksService) Patch(project string, httpsHealthCheck string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksPatchCall { + c := &HttpsHealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagersapplyupdatesrequest = instancegroupmanagersapplyupdatesrequest + c.httpsHealthCheck = httpsHealthCheck + c.httpshealthcheck = httpshealthcheck + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *HttpsHealthChecksPatchCall) RequestId(requestId string) *HttpsHealthChecksPatchCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersApplyUpdatesToInstancesCall { +func (c *HttpsHealthChecksPatchCall) Fields(s ...googleapi.Field) *HttpsHealthChecksPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -65162,57 +68413,56 @@ func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) Fields(s ...googleapi // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) Context(ctx context.Context) *InstanceGroupManagersApplyUpdatesToInstancesCall { +func (c *HttpsHealthChecksPatchCall) Context(ctx context.Context) *HttpsHealthChecksPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) Header() http.Header { +func (c *HttpsHealthChecksPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpsHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersapplyupdatesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/applyUpdatesToInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "httpsHealthCheck": c.httpsHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.applyUpdatesToInstances" call. +// Do executes the "compute.httpsHealthChecks.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HttpsHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -65243,18 +68493,18 @@ func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) Do(opts ...googleapi. } return ret, nil // { - // "description": "Apply changes to selected instances on the managed instance group. This method can be used to apply new overrides and/or new versions.", - // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.applyUpdatesToInstances", + // "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.httpsHealthChecks.patch", // "parameterOrder": [ // "project", - // "zone", - // "instanceGroupManager" + // "httpsHealthCheck" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group, should conform to RFC1035.", + // "httpsHealthCheck": { + // "description": "Name of the HttpsHealthCheck resource to patch.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -65265,16 +68515,15 @@ func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) Do(opts ...googleapi. // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone where the managed instance group is located. Should conform to RFC1035.", - // "location": "path", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/applyUpdatesToInstances", + // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", // "request": { - // "$ref": "InstanceGroupManagersApplyUpdatesRequest" + // "$ref": "HttpsHealthCheck" // }, // "response": { // "$ref": "Operation" @@ -65287,56 +68536,32 @@ func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) Do(opts ...googleapi. } -// method id "compute.instanceGroupManagers.createInstances": +// method id "compute.httpsHealthChecks.testIamPermissions": -type InstanceGroupManagersCreateInstancesCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagerscreateinstancesrequest *InstanceGroupManagersCreateInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HttpsHealthChecksTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// CreateInstances: Creates instances with per-instance configs in this -// managed instance group. Instances are created using the current -// instance template. The create instances operation is marked DONE if -// the createInstances request is successful. The underlying actions -// take additional time. You must separately verify the status of the -// creating or actions with the listmanagedinstances method. -func (r *InstanceGroupManagersService) CreateInstances(project string, zone string, instanceGroupManager string, instancegroupmanagerscreateinstancesrequest *InstanceGroupManagersCreateInstancesRequest) *InstanceGroupManagersCreateInstancesCall { - c := &InstanceGroupManagersCreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *HttpsHealthChecksService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *HttpsHealthChecksTestIamPermissionsCall { + c := &HttpsHealthChecksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagerscreateinstancesrequest = instancegroupmanagerscreateinstancesrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersCreateInstancesCall) RequestId(requestId string) *InstanceGroupManagersCreateInstancesCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersCreateInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersCreateInstancesCall { +func (c *HttpsHealthChecksTestIamPermissionsCall) Fields(s ...googleapi.Field) *HttpsHealthChecksTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -65344,35 +68569,35 @@ func (c *InstanceGroupManagersCreateInstancesCall) Fields(s ...googleapi.Field) // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersCreateInstancesCall) Context(ctx context.Context) *InstanceGroupManagersCreateInstancesCall { +func (c *HttpsHealthChecksTestIamPermissionsCall) Context(ctx context.Context) *HttpsHealthChecksTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersCreateInstancesCall) Header() http.Header { +func (c *HttpsHealthChecksTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersCreateInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpsHealthChecksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerscreateinstancesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/createInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -65380,21 +68605,20 @@ func (c *InstanceGroupManagersCreateInstancesCall) doRequest(alt string) (*http. } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.createInstances" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupManagersCreateInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.httpsHealthChecks.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *HttpsHealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -65413,7 +68637,7 @@ func (c *InstanceGroupManagersCreateInstancesCall) Do(opts ...googleapi.CallOpti if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -65425,21 +68649,14 @@ func (c *InstanceGroupManagersCreateInstancesCall) Do(opts ...googleapi.CallOpti } return ret, nil // { - // "description": "Creates instances with per-instance configs in this managed instance group. Instances are created using the current instance template. The create instances operation is marked DONE if the createInstances request is successful. The underlying actions take additional time. You must separately verify the status of the creating or actions with the listmanagedinstances method.", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.createInstances", + // "id": "compute.httpsHealthChecks.testIamPermissions", // "parameterOrder": [ // "project", - // "zone", - // "instanceGroupManager" + // "resource" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group. It should conform to RFC1035.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -65447,54 +68664,49 @@ func (c *InstanceGroupManagersCreateInstancesCall) Do(opts ...googleapi.CallOpti // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the managed instance group is located. It should conform to RFC1035.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/createInstances", + // "path": "{project}/global/httpsHealthChecks/{resource}/testIamPermissions", // "request": { - // "$ref": "InstanceGroupManagersCreateInstancesRequest" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instanceGroupManagers.delete": +// method id "compute.httpsHealthChecks.update": -type InstanceGroupManagersDeleteCall struct { - s *Service - project string - zone string - instanceGroupManager string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HttpsHealthChecksUpdateCall struct { + s *Service + project string + httpsHealthCheck string + httpshealthcheck *HttpsHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified managed instance group and all of the -// instances in that group. Note that the instance group must not belong -// to a backend service. Read Deleting an instance group for more -// information. -func (r *InstanceGroupManagersService) Delete(project string, zone string, instanceGroupManager string) *InstanceGroupManagersDeleteCall { - c := &InstanceGroupManagersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates a HttpsHealthCheck resource in the specified project +// using the data included in the request. +func (r *HttpsHealthChecksService) Update(project string, httpsHealthCheck string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksUpdateCall { + c := &HttpsHealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instanceGroupManager = instanceGroupManager + c.httpsHealthCheck = httpsHealthCheck + c.httpshealthcheck = httpshealthcheck return c } @@ -65512,7 +68724,7 @@ func (r *InstanceGroupManagersService) Delete(project string, zone string, insta // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersDeleteCall) RequestId(requestId string) *InstanceGroupManagersDeleteCall { +func (c *HttpsHealthChecksUpdateCall) RequestId(requestId string) *HttpsHealthChecksUpdateCall { c.urlParams_.Set("requestId", requestId) return c } @@ -65520,7 +68732,7 @@ func (c *InstanceGroupManagersDeleteCall) RequestId(requestId string) *InstanceG // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersDeleteCall) Fields(s ...googleapi.Field) *InstanceGroupManagersDeleteCall { +func (c *HttpsHealthChecksUpdateCall) Fields(s ...googleapi.Field) *HttpsHealthChecksUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -65528,52 +68740,56 @@ func (c *InstanceGroupManagersDeleteCall) Fields(s ...googleapi.Field) *Instance // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersDeleteCall) Context(ctx context.Context) *InstanceGroupManagersDeleteCall { +func (c *HttpsHealthChecksUpdateCall) Context(ctx context.Context) *HttpsHealthChecksUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersDeleteCall) Header() http.Header { +func (c *HttpsHealthChecksUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpsHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "httpsHealthCheck": c.httpsHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.delete" call. +// Do executes the "compute.httpsHealthChecks.update" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HttpsHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -65604,18 +68820,18 @@ func (c *InstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Deletes the specified managed instance group and all of the instances in that group. Note that the instance group must not belong to a backend service. Read Deleting an instance group for more information.", - // "httpMethod": "DELETE", - // "id": "compute.instanceGroupManagers.delete", + // "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request.", + // "httpMethod": "PUT", + // "id": "compute.httpsHealthChecks.update", // "parameterOrder": [ // "project", - // "zone", - // "instanceGroupManager" + // "httpsHealthCheck" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group to delete.", + // "httpsHealthCheck": { + // "description": "Name of the HttpsHealthCheck resource to update.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -65630,15 +68846,12 @@ func (c *InstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Ope // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the managed instance group is located.", - // "location": "path", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", + // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", + // "request": { + // "$ref": "HttpsHealthCheck" + // }, // "response": { // "$ref": "Operation" // }, @@ -65650,41 +68863,23 @@ func (c *InstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.instanceGroupManagers.deleteInstances": +// method id "compute.images.delete": -type InstanceGroupManagersDeleteInstancesCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagersdeleteinstancesrequest *InstanceGroupManagersDeleteInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ImagesDeleteCall struct { + s *Service + project string + image string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// DeleteInstances: Flags the specified instances in the managed -// instance group for immediate deletion. The instances are also removed -// from any target pools of which they were a member. This method -// reduces the targetSize of the managed instance group by the number of -// instances that you delete. This operation is marked as DONE when the -// action is scheduled even if the instances are still being deleted. -// You must separately verify the status of the deleting action with the -// listmanagedinstances method. -// -// If the group is part of a backend service that has enabled connection -// draining, it can take up to 60 seconds after the connection draining -// duration has elapsed before the VM instance is removed or -// deleted. -// -// You can specify a maximum of 1000 instances with this method per -// request. -func (r *InstanceGroupManagersService) DeleteInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersdeleteinstancesrequest *InstanceGroupManagersDeleteInstancesRequest) *InstanceGroupManagersDeleteInstancesCall { - c := &InstanceGroupManagersDeleteInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified image. +// For details, see https://cloud.google.com/compute/docs/reference/latest/images/delete +func (r *ImagesService) Delete(project string, image string) *ImagesDeleteCall { + c := &ImagesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagersdeleteinstancesrequest = instancegroupmanagersdeleteinstancesrequest + c.image = image return c } @@ -65702,7 +68897,7 @@ func (r *InstanceGroupManagersService) DeleteInstances(project string, zone stri // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersDeleteInstancesCall) RequestId(requestId string) *InstanceGroupManagersDeleteInstancesCall { +func (c *ImagesDeleteCall) RequestId(requestId string) *ImagesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -65710,7 +68905,7 @@ func (c *InstanceGroupManagersDeleteInstancesCall) RequestId(requestId string) * // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersDeleteInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersDeleteInstancesCall { +func (c *ImagesDeleteCall) Fields(s ...googleapi.Field) *ImagesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -65718,57 +68913,51 @@ func (c *InstanceGroupManagersDeleteInstancesCall) Fields(s ...googleapi.Field) // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersDeleteInstancesCall) Context(ctx context.Context) *InstanceGroupManagersDeleteInstancesCall { +func (c *ImagesDeleteCall) Context(ctx context.Context) *ImagesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersDeleteInstancesCall) Header() http.Header { +func (c *ImagesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersDeleteInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersdeleteinstancesrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deleteInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "image": c.image, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.deleteInstances" call. +// Do executes the "compute.images.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -65799,18 +68988,18 @@ func (c *InstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOpti } return ret, nil // { - // "description": "Flags the specified instances in the managed instance group for immediate deletion. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. This operation is marked as DONE when the action is scheduled even if the instances are still being deleted. You must separately verify the status of the deleting action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", - // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.deleteInstances", + // "description": "Deletes the specified image.", + // "httpMethod": "DELETE", + // "id": "compute.images.delete", // "parameterOrder": [ // "project", - // "zone", - // "instanceGroupManager" + // "image" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "image": { + // "description": "Name of the image resource to delete.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -65825,18 +69014,9 @@ func (c *InstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOpti // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the managed instance group is located.", - // "location": "path", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deleteInstances", - // "request": { - // "$ref": "InstanceGroupManagersDeleteInstancesRequest" - // }, + // "path": "{project}/global/images/{image}", // "response": { // "$ref": "Operation" // }, @@ -65848,34 +69028,54 @@ func (c *InstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOpti } -// method id "compute.instanceGroupManagers.deletePerInstanceConfigs": +// method id "compute.images.deprecate": -type InstanceGroupManagersDeletePerInstanceConfigsCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagersdeleteperinstanceconfigsreq *InstanceGroupManagersDeletePerInstanceConfigsReq - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ImagesDeprecateCall struct { + s *Service + project string + image string + deprecationstatus *DeprecationStatus + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// DeletePerInstanceConfigs: Deletes selected per-instance configs for -// the managed instance group. -func (r *InstanceGroupManagersService) DeletePerInstanceConfigs(project string, zone string, instanceGroupManager string, instancegroupmanagersdeleteperinstanceconfigsreq *InstanceGroupManagersDeletePerInstanceConfigsReq) *InstanceGroupManagersDeletePerInstanceConfigsCall { - c := &InstanceGroupManagersDeletePerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Deprecate: Sets the deprecation status of an image. +// +// If an empty request body is given, clears the deprecation status +// instead. +// For details, see https://cloud.google.com/compute/docs/reference/latest/images/deprecate +func (r *ImagesService) Deprecate(project string, image string, deprecationstatus *DeprecationStatus) *ImagesDeprecateCall { + c := &ImagesDeprecateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagersdeleteperinstanceconfigsreq = instancegroupmanagersdeleteperinstanceconfigsreq + c.image = image + c.deprecationstatus = deprecationstatus + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ImagesDeprecateCall) RequestId(requestId string) *ImagesDeprecateCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersDeletePerInstanceConfigsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersDeletePerInstanceConfigsCall { +func (c *ImagesDeprecateCall) Fields(s ...googleapi.Field) *ImagesDeprecateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -65883,35 +69083,35 @@ func (c *InstanceGroupManagersDeletePerInstanceConfigsCall) Fields(s ...googleap // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersDeletePerInstanceConfigsCall) Context(ctx context.Context) *InstanceGroupManagersDeletePerInstanceConfigsCall { +func (c *ImagesDeprecateCall) Context(ctx context.Context) *ImagesDeprecateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersDeletePerInstanceConfigsCall) Header() http.Header { +func (c *ImagesDeprecateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersDeletePerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesDeprecateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersdeleteperinstanceconfigsreq) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.deprecationstatus) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deletePerInstanceConfigs") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}/deprecate") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -65919,21 +69119,20 @@ func (c *InstanceGroupManagersDeletePerInstanceConfigsCall) doRequest(alt string } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "image": c.image, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.deletePerInstanceConfigs" call. +// Do executes the "compute.images.deprecate" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersDeletePerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ImagesDeprecateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -65964,18 +69163,18 @@ func (c *InstanceGroupManagersDeletePerInstanceConfigsCall) Do(opts ...googleapi } return ret, nil // { - // "description": "Deletes selected per-instance configs for the managed instance group.", + // "description": "Sets the deprecation status of an image.\n\nIf an empty request body is given, clears the deprecation status instead.", // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.deletePerInstanceConfigs", + // "id": "compute.images.deprecate", // "parameterOrder": [ // "project", - // "zone", - // "instanceGroupManager" + // "image" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group. It should conform to RFC1035.", + // "image": { + // "description": "Image name.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -65986,16 +69185,15 @@ func (c *InstanceGroupManagersDeletePerInstanceConfigsCall) Do(opts ...googleapi // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone where the managed instance group is located. It should conform to RFC1035.", - // "location": "path", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deletePerInstanceConfigs", + // "path": "{project}/global/images/{image}/deprecate", // "request": { - // "$ref": "InstanceGroupManagersDeletePerInstanceConfigsReq" + // "$ref": "DeprecationStatus" // }, // "response": { // "$ref": "Operation" @@ -66008,34 +69206,32 @@ func (c *InstanceGroupManagersDeletePerInstanceConfigsCall) Do(opts ...googleapi } -// method id "compute.instanceGroupManagers.get": +// method id "compute.images.get": -type InstanceGroupManagersGetCall struct { - s *Service - project string - zone string - instanceGroupManager string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type ImagesGetCall struct { + s *Service + project string + image string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns all of the details about the specified managed instance -// group. Gets a list of available managed instance groups by making a -// list() request. -func (r *InstanceGroupManagersService) Get(project string, zone string, instanceGroupManager string) *InstanceGroupManagersGetCall { - c := &InstanceGroupManagersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified image. Gets a list of available images by +// making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/images/get +func (r *ImagesService) Get(project string, image string) *ImagesGetCall { + c := &ImagesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instanceGroupManager = instanceGroupManager + c.image = image return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersGetCall) Fields(s ...googleapi.Field) *InstanceGroupManagersGetCall { +func (c *ImagesGetCall) Fields(s ...googleapi.Field) *ImagesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -66045,7 +69241,7 @@ func (c *InstanceGroupManagersGetCall) Fields(s ...googleapi.Field) *InstanceGro // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InstanceGroupManagersGetCall) IfNoneMatch(entityTag string) *InstanceGroupManagersGetCall { +func (c *ImagesGetCall) IfNoneMatch(entityTag string) *ImagesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -66053,21 +69249,21 @@ func (c *InstanceGroupManagersGetCall) IfNoneMatch(entityTag string) *InstanceGr // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersGetCall) Context(ctx context.Context) *InstanceGroupManagersGetCall { +func (c *ImagesGetCall) Context(ctx context.Context) *ImagesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersGetCall) Header() http.Header { +func (c *ImagesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersGetCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -66079,7 +69275,7 @@ func (c *InstanceGroupManagersGetCall) doRequest(alt string) (*http.Response, er var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -66087,21 +69283,20 @@ func (c *InstanceGroupManagersGetCall) doRequest(alt string) (*http.Response, er } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "image": c.image, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.get" call. -// Exactly one of *InstanceGroupManager or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InstanceGroupManager.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManager, error) { +// Do executes the "compute.images.get" call. +// Exactly one of *Image or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Image.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ImagesGetCall) Do(opts ...googleapi.CallOption) (*Image, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -66120,7 +69315,7 @@ func (c *InstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*Instan if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroupManager{ + ret := &Image{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -66132,18 +69327,18 @@ func (c *InstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*Instan } return ret, nil // { - // "description": "Returns all of the details about the specified managed instance group. Gets a list of available managed instance groups by making a list() request.", + // "description": "Returns the specified image. Gets a list of available images by making a list() request.", // "httpMethod": "GET", - // "id": "compute.instanceGroupManagers.get", + // "id": "compute.images.get", // "parameterOrder": [ // "project", - // "zone", - // "instanceGroupManager" + // "image" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "image": { + // "description": "Name of the image resource to return.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -66153,17 +69348,11 @@ func (c *InstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*Instan // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the managed instance group is located.", - // "location": "path", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", + // "path": "{project}/global/images/{image}", // "response": { - // "$ref": "InstanceGroupManager" + // "$ref": "Image" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -66174,116 +69363,96 @@ func (c *InstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*Instan } -// method id "compute.instanceGroupManagers.insert": +// method id "compute.images.getFromFamily": -type InstanceGroupManagersInsertCall struct { - s *Service - project string - zone string - instancegroupmanager *InstanceGroupManager - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ImagesGetFromFamilyCall struct { + s *Service + project string + family string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Insert: Creates a managed instance group using the information that -// you specify in the request. After the group is created, instances in -// the group are created using the specified instance template. This -// operation is marked as DONE when the group is created even if the -// instances in the group have not yet been created. You must separately -// verify the status of the individual instances with the -// listmanagedinstances method. -// -// A managed instance group can have up to 1000 VM instances per group. -// Please contact Cloud Support if you need an increase in this limit. -func (r *InstanceGroupManagersService) Insert(project string, zone string, instancegroupmanager *InstanceGroupManager) *InstanceGroupManagersInsertCall { - c := &InstanceGroupManagersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instancegroupmanager = instancegroupmanager - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersInsertCall) RequestId(requestId string) *InstanceGroupManagersInsertCall { - c.urlParams_.Set("requestId", requestId) +// GetFromFamily: Returns the latest image that is part of an image +// family and is not deprecated. +func (r *ImagesService) GetFromFamily(project string, family string) *ImagesGetFromFamilyCall { + c := &ImagesGetFromFamilyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.family = family return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersInsertCall) Fields(s ...googleapi.Field) *InstanceGroupManagersInsertCall { +func (c *ImagesGetFromFamilyCall) Fields(s ...googleapi.Field) *ImagesGetFromFamilyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ImagesGetFromFamilyCall) IfNoneMatch(entityTag string) *ImagesGetFromFamilyCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersInsertCall) Context(ctx context.Context) *InstanceGroupManagersInsertCall { +func (c *ImagesGetFromFamilyCall) Context(ctx context.Context) *ImagesGetFromFamilyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersInsertCall) Header() http.Header { +func (c *ImagesGetFromFamilyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesGetFromFamilyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/family/{family}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, + "family": c.family, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.images.getFromFamily" call. +// Exactly one of *Image or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Image.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ImagesGetFromFamilyCall) Do(opts ...googleapi.CallOption) (*Image, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -66302,7 +69471,7 @@ func (c *InstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Ope if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Image{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -66314,136 +69483,67 @@ func (c *InstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, instances in the group are created using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.\n\nA managed instance group can have up to 1000 VM instances per group. Please contact Cloud Support if you need an increase in this limit.", - // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.insert", + // "description": "Returns the latest image that is part of an image family and is not deprecated.", + // "httpMethod": "GET", + // "id": "compute.images.getFromFamily", // "parameterOrder": [ // "project", - // "zone" + // "family" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "family": { + // "description": "Name of the image family to search for.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where you want to create the managed instance group.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers", - // "request": { - // "$ref": "InstanceGroupManager" - // }, + // "path": "{project}/global/images/family/{family}", // "response": { - // "$ref": "Operation" + // "$ref": "Image" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instanceGroupManagers.list": +// method id "compute.images.getIamPolicy": -type InstanceGroupManagersListCall struct { +type ImagesGetIamPolicyCall struct { s *Service project string - zone string + resource string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves a list of managed instance groups that are contained -// within the specified project and zone. -func (r *InstanceGroupManagersService) List(project string, zone string) *InstanceGroupManagersListCall { - c := &InstanceGroupManagersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. +func (r *ImagesService) GetIamPolicy(project string, resource string) *ImagesGetIamPolicyCall { + c := &ImagesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *InstanceGroupManagersListCall) Filter(filter string) *InstanceGroupManagersListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InstanceGroupManagersListCall) MaxResults(maxResults int64) *InstanceGroupManagersListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *InstanceGroupManagersListCall) OrderBy(orderBy string) *InstanceGroupManagersListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InstanceGroupManagersListCall) PageToken(pageToken string) *InstanceGroupManagersListCall { - c.urlParams_.Set("pageToken", pageToken) + c.resource = resource return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersListCall) Fields(s ...googleapi.Field) *InstanceGroupManagersListCall { +func (c *ImagesGetIamPolicyCall) Fields(s ...googleapi.Field) *ImagesGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -66453,7 +69553,7 @@ func (c *InstanceGroupManagersListCall) Fields(s ...googleapi.Field) *InstanceGr // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InstanceGroupManagersListCall) IfNoneMatch(entityTag string) *InstanceGroupManagersListCall { +func (c *ImagesGetIamPolicyCall) IfNoneMatch(entityTag string) *ImagesGetIamPolicyCall { c.ifNoneMatch_ = entityTag return c } @@ -66461,21 +69561,21 @@ func (c *InstanceGroupManagersListCall) IfNoneMatch(entityTag string) *InstanceG // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersListCall) Context(ctx context.Context) *InstanceGroupManagersListCall { +func (c *ImagesGetIamPolicyCall) Context(ctx context.Context) *ImagesGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersListCall) Header() http.Header { +func (c *ImagesGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersListCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -66487,7 +69587,7 @@ func (c *InstanceGroupManagersListCall) doRequest(alt string) (*http.Response, e var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -66495,20 +69595,20 @@ func (c *InstanceGroupManagersListCall) doRequest(alt string) (*http.Response, e } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.list" call. -// Exactly one of *InstanceGroupManagerList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *InstanceGroupManagerList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagerList, error) { +// Do executes the "compute.images.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ImagesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -66527,7 +69627,7 @@ func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*Insta if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroupManagerList{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -66539,37 +69639,14 @@ func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*Insta } return ret, nil // { - // "description": "Retrieves a list of managed instance groups that are contained within the specified project and zone.", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", // "httpMethod": "GET", - // "id": "compute.instanceGroupManagers.list", + // "id": "compute.images.getIamPolicy", // "parameterOrder": [ // "project", - // "zone" + // "resource" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -66577,16 +69654,17 @@ func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*Insta // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers", + // "path": "{project}/global/images/{resource}/getIamPolicy", // "response": { - // "$ref": "InstanceGroupManagerList" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -66597,120 +69675,57 @@ func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*Insta } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstanceGroupManagersListCall) Pages(ctx context.Context, f func(*InstanceGroupManagerList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instanceGroupManagers.listManagedInstances": +// method id "compute.images.insert": -type InstanceGroupManagersListManagedInstancesCall struct { - s *Service - project string - zone string - instanceGroupManager string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ImagesInsertCall struct { + s *Service + project string + image *Image + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// ListManagedInstances: Lists all of the instances in the managed -// instance group. Each instance in the list has a currentAction, which -// indicates the action that the managed instance group is performing on -// the instance. For example, if the group is still creating an -// instance, the currentAction is CREATING. If a previous action failed, -// the list displays the errors for that failed action. -func (r *InstanceGroupManagersService) ListManagedInstances(project string, zone string, instanceGroupManager string) *InstanceGroupManagersListManagedInstancesCall { - c := &InstanceGroupManagersListManagedInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates an image in the specified project using the data +// included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/images/insert +func (r *ImagesService) Insert(project string, image *Image) *ImagesInsertCall { + c := &ImagesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instanceGroupManager = instanceGroupManager - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *InstanceGroupManagersListManagedInstancesCall) Filter(filter string) *InstanceGroupManagersListManagedInstancesCall { - c.urlParams_.Set("filter", filter) + c.image = image return c } -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InstanceGroupManagersListManagedInstancesCall) MaxResults(maxResults int64) *InstanceGroupManagersListManagedInstancesCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) +// ForceCreate sets the optional parameter "forceCreate": Force image +// creation if true. +func (c *ImagesInsertCall) ForceCreate(forceCreate bool) *ImagesInsertCall { + c.urlParams_.Set("forceCreate", fmt.Sprint(forceCreate)) return c } -// OrderBy sets the optional parameter "order_by": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *InstanceGroupManagersListManagedInstancesCall) OrderBy(orderBy string) *InstanceGroupManagersListManagedInstancesCall { - c.urlParams_.Set("order_by", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InstanceGroupManagersListManagedInstancesCall) PageToken(pageToken string) *InstanceGroupManagersListManagedInstancesCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ImagesInsertCall) RequestId(requestId string) *ImagesInsertCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersListManagedInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersListManagedInstancesCall { +func (c *ImagesInsertCall) Fields(s ...googleapi.Field) *ImagesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -66718,30 +69733,35 @@ func (c *InstanceGroupManagersListManagedInstancesCall) Fields(s ...googleapi.Fi // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersListManagedInstancesCall) Context(ctx context.Context) *InstanceGroupManagersListManagedInstancesCall { +func (c *ImagesInsertCall) Context(ctx context.Context) *ImagesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersListManagedInstancesCall) Header() http.Header { +func (c *ImagesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersListManagedInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.image) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -66749,23 +69769,19 @@ func (c *InstanceGroupManagersListManagedInstancesCall) doRequest(alt string) (* } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.listManagedInstances" call. -// Exactly one of *InstanceGroupManagersListManagedInstancesResponse or -// error will be non-nil. Any non-2xx status code is an error. Response -// headers are in either -// *InstanceGroupManagersListManagedInstancesResponse.ServerResponse.Head -// er or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagersListManagedInstancesResponse, error) { +// Do executes the "compute.images.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -66784,7 +69800,7 @@ func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.Cal if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroupManagersListManagedInstancesResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -66796,43 +69812,17 @@ func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.Cal } return ret, nil // { - // "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action.", + // "description": "Creates an image in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.listManagedInstances", + // "id": "compute.images.insert", // "parameterOrder": [ - // "project", - // "zone", - // "instanceGroupManager" + // "project" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "order_by": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "forceCreate": { + // "description": "Force image creation if true.", // "location": "query", - // "type": "string" + // "type": "boolean" // }, // "project": { // "description": "Project ID for this request.", @@ -66841,66 +69831,52 @@ func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.Cal // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone where the managed instance group is located.", - // "location": "path", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", + // "path": "{project}/global/images", + // "request": { + // "$ref": "Image" + // }, // "response": { - // "$ref": "InstanceGroupManagersListManagedInstancesResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstanceGroupManagersListManagedInstancesCall) Pages(ctx context.Context, f func(*InstanceGroupManagersListManagedInstancesResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instanceGroupManagers.listPerInstanceConfigs": +// method id "compute.images.list": -type InstanceGroupManagersListPerInstanceConfigsCall struct { - s *Service - project string - zone string - instanceGroupManager string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ImagesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// ListPerInstanceConfigs: Lists all of the per-instance configs defined -// for the managed instance group. -func (r *InstanceGroupManagersService) ListPerInstanceConfigs(project string, zone string, instanceGroupManager string) *InstanceGroupManagersListPerInstanceConfigsCall { - c := &InstanceGroupManagersListPerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of custom images available to the specified +// project. Custom images are images you create that belong to your +// project. This method does not get any images that belong to other +// projects, including publicly-available images, like Debian 8. If you +// want to get a list of publicly-available images, use this method to +// make a request to the respective image project, such as debian-cloud +// or windows-cloud. +// For details, see https://cloud.google.com/compute/docs/reference/latest/images/list +func (r *ImagesService) List(project string) *ImagesListCall { + c := &ImagesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instanceGroupManager = instanceGroupManager return c } @@ -66926,7 +69902,7 @@ func (r *InstanceGroupManagersService) ListPerInstanceConfigs(project string, zo // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *InstanceGroupManagersListPerInstanceConfigsCall) Filter(filter string) *InstanceGroupManagersListPerInstanceConfigsCall { +func (c *ImagesListCall) Filter(filter string) *ImagesListCall { c.urlParams_.Set("filter", filter) return c } @@ -66937,7 +69913,7 @@ func (c *InstanceGroupManagersListPerInstanceConfigsCall) Filter(filter string) // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *InstanceGroupManagersListPerInstanceConfigsCall) MaxResults(maxResults int64) *InstanceGroupManagersListPerInstanceConfigsCall { +func (c *ImagesListCall) MaxResults(maxResults int64) *ImagesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -66954,7 +69930,7 @@ func (c *InstanceGroupManagersListPerInstanceConfigsCall) MaxResults(maxResults // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *InstanceGroupManagersListPerInstanceConfigsCall) OrderBy(orderBy string) *InstanceGroupManagersListPerInstanceConfigsCall { +func (c *ImagesListCall) OrderBy(orderBy string) *ImagesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -66962,7 +69938,7 @@ func (c *InstanceGroupManagersListPerInstanceConfigsCall) OrderBy(orderBy string // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *InstanceGroupManagersListPerInstanceConfigsCall) PageToken(pageToken string) *InstanceGroupManagersListPerInstanceConfigsCall { +func (c *ImagesListCall) PageToken(pageToken string) *ImagesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -66970,62 +69946,71 @@ func (c *InstanceGroupManagersListPerInstanceConfigsCall) PageToken(pageToken st // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersListPerInstanceConfigsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersListPerInstanceConfigsCall { +func (c *ImagesListCall) Fields(s ...googleapi.Field) *ImagesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ImagesListCall) IfNoneMatch(entityTag string) *ImagesListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersListPerInstanceConfigsCall) Context(ctx context.Context) *InstanceGroupManagersListPerInstanceConfigsCall { +func (c *ImagesListCall) Context(ctx context.Context) *ImagesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersListPerInstanceConfigsCall) Header() http.Header { +func (c *ImagesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersListPerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.listPerInstanceConfigs" call. -// Exactly one of *InstanceGroupManagersListPerInstanceConfigsResp or -// error will be non-nil. Any non-2xx status code is an error. Response -// headers are in either -// *InstanceGroupManagersListPerInstanceConfigsResp.ServerResponse.Header -// or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *InstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagersListPerInstanceConfigsResp, error) { +// Do executes the "compute.images.list" call. +// Exactly one of *ImageList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ImageList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -67044,7 +70029,7 @@ func (c *InstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googleapi.C if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroupManagersListPerInstanceConfigsResp{ + ret := &ImageList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -67056,13 +70041,11 @@ func (c *InstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googleapi.C } return ret, nil // { - // "description": "Lists all of the per-instance configs defined for the managed instance group.", - // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.listPerInstanceConfigs", + // "description": "Retrieves the list of custom images available to the specified project. Custom images are images you create that belong to your project. This method does not get any images that belong to other projects, including publicly-available images, like Debian 8. If you want to get a list of publicly-available images, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.", + // "httpMethod": "GET", + // "id": "compute.images.list", // "parameterOrder": [ - // "project", - // "zone", - // "instanceGroupManager" + // "project" // ], // "parameters": { // "filter": { @@ -67070,12 +70053,6 @@ func (c *InstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googleapi.C // "location": "query", // "type": "string" // }, - // "instanceGroupManager": { - // "description": "The name of the managed instance group. It should conform to RFC1035.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "maxResults": { // "default": "500", // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", @@ -67100,17 +70077,11 @@ func (c *InstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googleapi.C // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the managed instance group is located. It should conform to RFC1035.", - // "location": "path", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs", + // "path": "{project}/global/images", // "response": { - // "$ref": "InstanceGroupManagersListPerInstanceConfigsResp" + // "$ref": "ImageList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -67124,7 +70095,7 @@ func (c *InstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googleapi.C // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *InstanceGroupManagersListPerInstanceConfigsCall) Pages(ctx context.Context, f func(*InstanceGroupManagersListPerInstanceConfigsResp) error) error { +func (c *ImagesListCall) Pages(ctx context.Context, f func(*ImageList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -67142,58 +70113,32 @@ func (c *InstanceGroupManagersListPerInstanceConfigsCall) Pages(ctx context.Cont } } -// method id "compute.instanceGroupManagers.patch": +// method id "compute.images.setIamPolicy": -type InstanceGroupManagersPatchCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanager *InstanceGroupManager - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ImagesSetIamPolicyCall struct { + s *Service + project string + resource string + globalsetpolicyrequest *GlobalSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates a managed instance group using the information that -// you specify in the request. This operation is marked as DONE when the -// group is patched even if the instances in the group are still in the -// process of being patched. You must separately verify the status of -// the individual instances with the listManagedInstances method. This -// method supports PATCH semantics and uses the JSON merge patch format -// and processing rules. -func (r *InstanceGroupManagersService) Patch(project string, zone string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *InstanceGroupManagersPatchCall { - c := &InstanceGroupManagersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +func (r *ImagesService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *ImagesSetIamPolicyCall { + c := &ImagesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanager = instancegroupmanager - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersPatchCall) RequestId(requestId string) *InstanceGroupManagersPatchCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.globalsetpolicyrequest = globalsetpolicyrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersPatchCall) Fields(s ...googleapi.Field) *InstanceGroupManagersPatchCall { +func (c *ImagesSetIamPolicyCall) Fields(s ...googleapi.Field) *ImagesSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -67201,57 +70146,56 @@ func (c *InstanceGroupManagersPatchCall) Fields(s ...googleapi.Field) *InstanceG // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersPatchCall) Context(ctx context.Context) *InstanceGroupManagersPatchCall { +func (c *ImagesSetIamPolicyCall) Context(ctx context.Context) *ImagesSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersPatchCall) Header() http.Header { +func (c *ImagesSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetpolicyrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.images.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -67270,7 +70214,7 @@ func (c *InstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Oper if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -67282,21 +70226,14 @@ func (c *InstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listManagedInstances method. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.instanceGroupManagers.patch", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "httpMethod": "POST", + // "id": "compute.images.setIamPolicy", // "parameterOrder": [ // "project", - // "zone", - // "instanceGroupManager" + // "resource" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the instance group manager.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -67304,24 +70241,20 @@ func (c *InstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Oper // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where you want to create the managed instance group.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", + // "path": "{project}/global/images/{resource}/setIamPolicy", // "request": { - // "$ref": "InstanceGroupManager" + // "$ref": "GlobalSetPolicyRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -67331,66 +70264,32 @@ func (c *InstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Oper } -// method id "compute.instanceGroupManagers.recreateInstances": +// method id "compute.images.setLabels": -type InstanceGroupManagersRecreateInstancesCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagersrecreateinstancesrequest *InstanceGroupManagersRecreateInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ImagesSetLabelsCall struct { + s *Service + project string + resource string + globalsetlabelsrequest *GlobalSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// RecreateInstances: Flags the specified instances in the managed -// instance group to be immediately recreated. The instances are deleted -// and recreated using the current instance template for the managed -// instance group. This operation is marked as DONE when the flag is set -// even if the instances have not yet been recreated. You must -// separately verify the status of the recreating action with the -// listmanagedinstances method. -// -// If the group is part of a backend service that has enabled connection -// draining, it can take up to 60 seconds after the connection draining -// duration has elapsed before the VM instance is removed or -// deleted. -// -// You can specify a maximum of 1000 instances with this method per -// request. -func (r *InstanceGroupManagersService) RecreateInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersrecreateinstancesrequest *InstanceGroupManagersRecreateInstancesRequest) *InstanceGroupManagersRecreateInstancesCall { - c := &InstanceGroupManagersRecreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetLabels: Sets the labels on an image. To learn more about labels, +// read the Labeling Resources documentation. +func (r *ImagesService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *ImagesSetLabelsCall { + c := &ImagesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagersrecreateinstancesrequest = instancegroupmanagersrecreateinstancesrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersRecreateInstancesCall) RequestId(requestId string) *InstanceGroupManagersRecreateInstancesCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.globalsetlabelsrequest = globalsetlabelsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersRecreateInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersRecreateInstancesCall { +func (c *ImagesSetLabelsCall) Fields(s ...googleapi.Field) *ImagesSetLabelsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -67398,35 +70297,35 @@ func (c *InstanceGroupManagersRecreateInstancesCall) Fields(s ...googleapi.Field // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersRecreateInstancesCall) Context(ctx context.Context) *InstanceGroupManagersRecreateInstancesCall { +func (c *ImagesSetLabelsCall) Context(ctx context.Context) *ImagesSetLabelsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersRecreateInstancesCall) Header() http.Header { +func (c *ImagesSetLabelsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersrecreateinstancesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -67434,21 +70333,20 @@ func (c *InstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*htt } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.recreateInstances" call. +// Do executes the "compute.images.setLabels" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ImagesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -67479,21 +70377,14 @@ func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOp } return ret, nil // { - // "description": "Flags the specified instances in the managed instance group to be immediately recreated. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the flag is set even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + // "description": "Sets the labels on an image. To learn more about labels, read the Labeling Resources documentation.", // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.recreateInstances", + // "id": "compute.images.setLabels", // "parameterOrder": [ // "project", - // "zone", - // "instanceGroupManager" + // "resource" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -67501,21 +70392,17 @@ func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOp // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", + // "path": "{project}/global/images/{resource}/setLabels", // "request": { - // "$ref": "InstanceGroupManagersRecreateInstancesRequest" + // "$ref": "GlobalSetLabelsRequest" // }, // "response": { // "$ref": "Operation" @@ -67528,71 +70415,32 @@ func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOp } -// method id "compute.instanceGroupManagers.resize": +// method id "compute.images.testIamPermissions": -type InstanceGroupManagersResizeCall struct { - s *Service - project string - zone string - instanceGroupManager string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ImagesTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Resize: Resizes the managed instance group. If you increase the size, -// the group creates new instances using the current instance template. -// If you decrease the size, the group deletes instances. The resize -// operation is marked DONE when the resize actions are scheduled even -// if the group has not yet added or deleted any instances. You must -// separately verify the status of the creating or deleting actions with -// the listmanagedinstances method. -// -// When resizing down, the instance group arbitrarily chooses the order -// in which VMs are deleted. The group takes into account some VM -// attributes when making the selection including: -// -// + The status of the VM instance. + The health of the VM instance. + -// The instance template version the VM is based on. + For regional -// managed instance groups, the location of the VM instance. -// -// This list is subject to change. -// -// If the group is part of a backend service that has enabled connection -// draining, it can take up to 60 seconds after the connection draining -// duration has elapsed before the VM instance is removed or deleted. -func (r *InstanceGroupManagersService) Resize(project string, zone string, instanceGroupManager string, size int64) *InstanceGroupManagersResizeCall { - c := &InstanceGroupManagersResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *ImagesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *ImagesTestIamPermissionsCall { + c := &ImagesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.urlParams_.Set("size", fmt.Sprint(size)) - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersResizeCall) RequestId(requestId string) *InstanceGroupManagersResizeCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersResizeCall) Fields(s ...googleapi.Field) *InstanceGroupManagersResizeCall { +func (c *ImagesTestIamPermissionsCall) Fields(s ...googleapi.Field) *ImagesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -67600,30 +70448,35 @@ func (c *InstanceGroupManagersResizeCall) Fields(s ...googleapi.Field) *Instance // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersResizeCall) Context(ctx context.Context) *InstanceGroupManagersResizeCall { +func (c *ImagesTestIamPermissionsCall) Context(ctx context.Context) *ImagesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersResizeCall) Header() http.Header { +func (c *ImagesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -67631,21 +70484,20 @@ func (c *InstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.resize" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.images.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -67664,7 +70516,7 @@ func (c *InstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Ope if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -67676,22 +70528,14 @@ func (c *InstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Resizes the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.\n\nWhen resizing down, the instance group arbitrarily chooses the order in which VMs are deleted. The group takes into account some VM attributes when making the selection including:\n\n+ The status of the VM instance. + The health of the VM instance. + The instance template version the VM is based on. + For regional managed instance groups, the location of the VM instance.\n\nThis list is subject to change.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.resize", + // "id": "compute.images.testIamPermissions", // "parameterOrder": [ // "project", - // "zone", - // "instanceGroupManager", - // "size" + // "resource" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -67699,71 +70543,66 @@ func (c *InstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Ope // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "size": { - // "description": "The number of running instances that the managed instance group should maintain at any given time. The group automatically adds or removes instances to maintain the number of instances specified by this parameter.", - // "format": "int32", - // "location": "query", - // "required": true, - // "type": "integer" - // }, - // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize", + // "path": "{project}/global/images/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instanceGroupManagers.resizeAdvanced": - -type InstanceGroupManagersResizeAdvancedCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagersresizeadvancedrequest *InstanceGroupManagersResizeAdvancedRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} +// method id "compute.instanceGroupManagers.abandonInstances": -// ResizeAdvanced: Resizes the managed instance group with advanced -// configuration options like disabling creation retries. This is an -// extended version of the resize method. -// -// If you increase the size of the instance group, the group creates new -// instances using the current instance template. If you decrease the -// size, the group deletes instances. The resize operation is marked -// DONE when the resize actions are scheduled even if the group has not -// yet added or deleted any instances. You must separately verify the -// status of the creating, creatingWithoutRetries, or deleting actions -// with the get or listmanagedinstances method. +type InstanceGroupManagersAbandonInstancesCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagersabandoninstancesrequest *InstanceGroupManagersAbandonInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// AbandonInstances: Flags the specified instances to be removed from +// the managed instance group. Abandoning an instance does not delete +// the instance, but it does remove the instance from any target pools +// that are applied by the managed instance group. This method reduces +// the targetSize of the managed instance group by the number of +// instances that you abandon. This operation is marked as DONE when the +// action is scheduled even if the instances have not yet been removed +// from the group. You must separately verify the status of the +// abandoning action with the listmanagedinstances method. // // If the group is part of a backend service that has enabled connection // draining, it can take up to 60 seconds after the connection draining -// duration has elapsed before the VM instance is removed or deleted. -func (r *InstanceGroupManagersService) ResizeAdvanced(project string, zone string, instanceGroupManager string, instancegroupmanagersresizeadvancedrequest *InstanceGroupManagersResizeAdvancedRequest) *InstanceGroupManagersResizeAdvancedCall { - c := &InstanceGroupManagersResizeAdvancedCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// duration has elapsed before the VM instance is removed or +// deleted. +// +// You can specify a maximum of 1000 instances with this method per +// request. +func (r *InstanceGroupManagersService) AbandonInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersabandoninstancesrequest *InstanceGroupManagersAbandonInstancesRequest) *InstanceGroupManagersAbandonInstancesCall { + c := &InstanceGroupManagersAbandonInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagersresizeadvancedrequest = instancegroupmanagersresizeadvancedrequest + c.instancegroupmanagersabandoninstancesrequest = instancegroupmanagersabandoninstancesrequest return c } @@ -67781,7 +70620,7 @@ func (r *InstanceGroupManagersService) ResizeAdvanced(project string, zone strin // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersResizeAdvancedCall) RequestId(requestId string) *InstanceGroupManagersResizeAdvancedCall { +func (c *InstanceGroupManagersAbandonInstancesCall) RequestId(requestId string) *InstanceGroupManagersAbandonInstancesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -67789,7 +70628,7 @@ func (c *InstanceGroupManagersResizeAdvancedCall) RequestId(requestId string) *I // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersResizeAdvancedCall) Fields(s ...googleapi.Field) *InstanceGroupManagersResizeAdvancedCall { +func (c *InstanceGroupManagersAbandonInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersAbandonInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -67797,35 +70636,35 @@ func (c *InstanceGroupManagersResizeAdvancedCall) Fields(s ...googleapi.Field) * // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersResizeAdvancedCall) Context(ctx context.Context) *InstanceGroupManagersResizeAdvancedCall { +func (c *InstanceGroupManagersAbandonInstancesCall) Context(ctx context.Context) *InstanceGroupManagersAbandonInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersResizeAdvancedCall) Header() http.Header { +func (c *InstanceGroupManagersAbandonInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersResizeAdvancedCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersAbandonInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersresizeadvancedrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersabandoninstancesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resizeAdvanced") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/abandonInstances") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -67840,14 +70679,14 @@ func (c *InstanceGroupManagersResizeAdvancedCall) doRequest(alt string) (*http.R return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.resizeAdvanced" call. +// Do executes the "compute.instanceGroupManagers.abandonInstances" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersResizeAdvancedCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -67878,9 +70717,9 @@ func (c *InstanceGroupManagersResizeAdvancedCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Resizes the managed instance group with advanced configuration options like disabling creation retries. This is an extended version of the resize method.\n\nIf you increase the size of the instance group, the group creates new instances using the current instance template. If you decrease the size, the group deletes instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating, creatingWithoutRetries, or deleting actions with the get or listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", + // "description": "Flags the specified instances to be removed from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.resizeAdvanced", + // "id": "compute.instanceGroupManagers.abandonInstances", // "parameterOrder": [ // "project", // "zone", @@ -67912,9 +70751,9 @@ func (c *InstanceGroupManagersResizeAdvancedCall) Do(opts ...googleapi.CallOptio // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resizeAdvanced", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/abandonInstances", // "request": { - // "$ref": "InstanceGroupManagersResizeAdvancedRequest" + // "$ref": "InstanceGroupManagersAbandonInstancesRequest" // }, // "response": { // "$ref": "Operation" @@ -67927,89 +70766,324 @@ func (c *InstanceGroupManagersResizeAdvancedCall) Do(opts ...googleapi.CallOptio } -// method id "compute.instanceGroupManagers.setAutoHealingPolicies": +// method id "compute.instanceGroupManagers.aggregatedList": -type InstanceGroupManagersSetAutoHealingPoliciesCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagerssetautohealingrequest *InstanceGroupManagersSetAutoHealingRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetAutoHealingPolicies: Modifies the autohealing policies. -// [Deprecated] This method is deprecated. Please use Patch instead. -func (r *InstanceGroupManagersService) SetAutoHealingPolicies(project string, zone string, instanceGroupManager string, instancegroupmanagerssetautohealingrequest *InstanceGroupManagersSetAutoHealingRequest) *InstanceGroupManagersSetAutoHealingPoliciesCall { - c := &InstanceGroupManagersSetAutoHealingPoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves the list of managed instance groups and +// groups them by zone. +func (r *InstanceGroupManagersService) AggregatedList(project string) *InstanceGroupManagersAggregatedListCall { + c := &InstanceGroupManagersAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagerssetautohealingrequest = instancegroupmanagerssetautohealingrequest return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) RequestId(requestId string) *InstanceGroupManagersSetAutoHealingPoliciesCall { - c.urlParams_.Set("requestId", requestId) +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *InstanceGroupManagersAggregatedListCall) Filter(filter string) *InstanceGroupManagersAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *InstanceGroupManagersAggregatedListCall) MaxResults(maxResults int64) *InstanceGroupManagersAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *InstanceGroupManagersAggregatedListCall) OrderBy(orderBy string) *InstanceGroupManagersAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InstanceGroupManagersAggregatedListCall) PageToken(pageToken string) *InstanceGroupManagersAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersSetAutoHealingPoliciesCall { +func (c *InstanceGroupManagersAggregatedListCall) Fields(s ...googleapi.Field) *InstanceGroupManagersAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstanceGroupManagersAggregatedListCall) IfNoneMatch(entityTag string) *InstanceGroupManagersAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Context(ctx context.Context) *InstanceGroupManagersSetAutoHealingPoliciesCall { +func (c *InstanceGroupManagersAggregatedListCall) Context(ctx context.Context) *InstanceGroupManagersAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Header() http.Header { +func (c *InstanceGroupManagersAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerssetautohealingrequest) + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instanceGroupManagers") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instanceGroupManagers.aggregatedList" call. +// Exactly one of *InstanceGroupManagerAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *InstanceGroupManagerAggregatedList.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagerAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InstanceGroupManagerAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of managed instance groups and groups them by zone.", + // "httpMethod": "GET", + // "id": "compute.instanceGroupManagers.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/instanceGroupManagers", + // "response": { + // "$ref": "InstanceGroupManagerAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstanceGroupManagersAggregatedListCall) Pages(ctx context.Context, f func(*InstanceGroupManagerAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.instanceGroupManagers.applyUpdatesToInstances": + +type InstanceGroupManagersApplyUpdatesToInstancesCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagersapplyupdatesrequest *InstanceGroupManagersApplyUpdatesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// ApplyUpdatesToInstances: Apply changes to selected instances on the +// managed instance group. This method can be used to apply new +// overrides and/or new versions. +func (r *InstanceGroupManagersService) ApplyUpdatesToInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersapplyupdatesrequest *InstanceGroupManagersApplyUpdatesRequest) *InstanceGroupManagersApplyUpdatesToInstancesCall { + c := &InstanceGroupManagersApplyUpdatesToInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagersapplyupdatesrequest = instancegroupmanagersapplyupdatesrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersApplyUpdatesToInstancesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) Context(ctx context.Context) *InstanceGroupManagersApplyUpdatesToInstancesCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersapplyupdatesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/applyUpdatesToInstances") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -68024,14 +71098,14 @@ func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) doRequest(alt string) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.setAutoHealingPolicies" call. +// Do executes the "compute.instanceGroupManagers.applyUpdatesToInstances" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -68062,9 +71136,9 @@ func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googleapi.C } return ret, nil // { - // "description": "Modifies the autohealing policies. [Deprecated] This method is deprecated. Please use Patch instead.", + // "description": "Apply changes to selected instances on the managed instance group. This method can be used to apply new overrides and/or new versions.", // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.setAutoHealingPolicies", + // "id": "compute.instanceGroupManagers.applyUpdatesToInstances", // "parameterOrder": [ // "project", // "zone", @@ -68072,7 +71146,7 @@ func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googleapi.C // ], // "parameters": { // "instanceGroupManager": { - // "description": "The name of the instance group manager.", + // "description": "The name of the managed instance group, should conform to RFC1035.", // "location": "path", // "required": true, // "type": "string" @@ -68084,21 +71158,16 @@ func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googleapi.C // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone where the managed instance group is located. Should conform to RFC1035.", // "location": "path", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/applyUpdatesToInstances", // "request": { - // "$ref": "InstanceGroupManagersSetAutoHealingRequest" + // "$ref": "InstanceGroupManagersApplyUpdatesRequest" // }, // "response": { // "$ref": "Operation" @@ -68111,28 +71180,31 @@ func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googleapi.C } -// method id "compute.instanceGroupManagers.setInstanceTemplate": +// method id "compute.instanceGroupManagers.createInstances": -type InstanceGroupManagersSetInstanceTemplateCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagerssetinstancetemplaterequest *InstanceGroupManagersSetInstanceTemplateRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersCreateInstancesCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagerscreateinstancesrequest *InstanceGroupManagersCreateInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetInstanceTemplate: Specifies the instance template to use when -// creating new instances in this group. The templates for existing -// instances in the group do not change unless you recreate them. -func (r *InstanceGroupManagersService) SetInstanceTemplate(project string, zone string, instanceGroupManager string, instancegroupmanagerssetinstancetemplaterequest *InstanceGroupManagersSetInstanceTemplateRequest) *InstanceGroupManagersSetInstanceTemplateCall { - c := &InstanceGroupManagersSetInstanceTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// CreateInstances: Creates instances with per-instance configs in this +// managed instance group. Instances are created using the current +// instance template. The create instances operation is marked DONE if +// the createInstances request is successful. The underlying actions +// take additional time. You must separately verify the status of the +// creating or actions with the listmanagedinstances method. +func (r *InstanceGroupManagersService) CreateInstances(project string, zone string, instanceGroupManager string, instancegroupmanagerscreateinstancesrequest *InstanceGroupManagersCreateInstancesRequest) *InstanceGroupManagersCreateInstancesCall { + c := &InstanceGroupManagersCreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagerssetinstancetemplaterequest = instancegroupmanagerssetinstancetemplaterequest + c.instancegroupmanagerscreateinstancesrequest = instancegroupmanagerscreateinstancesrequest return c } @@ -68145,12 +71217,11 @@ func (r *InstanceGroupManagersService) SetInstanceTemplate(project string, zone // and the request times out. If you make the request again with the // same request ID, the server can check if original operation with the // same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// request. // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersSetInstanceTemplateCall) RequestId(requestId string) *InstanceGroupManagersSetInstanceTemplateCall { +func (c *InstanceGroupManagersCreateInstancesCall) RequestId(requestId string) *InstanceGroupManagersCreateInstancesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -68158,7 +71229,7 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) RequestId(requestId strin // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersSetInstanceTemplateCall) Fields(s ...googleapi.Field) *InstanceGroupManagersSetInstanceTemplateCall { +func (c *InstanceGroupManagersCreateInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersCreateInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -68166,35 +71237,35 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) Fields(s ...googleapi.Fie // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersSetInstanceTemplateCall) Context(ctx context.Context) *InstanceGroupManagersSetInstanceTemplateCall { +func (c *InstanceGroupManagersCreateInstancesCall) Context(ctx context.Context) *InstanceGroupManagersCreateInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersSetInstanceTemplateCall) Header() http.Header { +func (c *InstanceGroupManagersCreateInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersCreateInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerssetinstancetemplaterequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerscreateinstancesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/createInstances") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -68209,14 +71280,14 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*h return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.setInstanceTemplate" call. +// Do executes the "compute.instanceGroupManagers.createInstances" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersCreateInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -68247,9 +71318,9 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.Call } return ret, nil // { - // "description": "Specifies the instance template to use when creating new instances in this group. The templates for existing instances in the group do not change unless you recreate them.", + // "description": "Creates instances with per-instance configs in this managed instance group. Instances are created using the current instance template. The create instances operation is marked DONE if the createInstances request is successful. The underlying actions take additional time. You must separately verify the status of the creating or actions with the listmanagedinstances method.", // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.setInstanceTemplate", + // "id": "compute.instanceGroupManagers.createInstances", // "parameterOrder": [ // "project", // "zone", @@ -68257,7 +71328,7 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.Call // ], // "parameters": { // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "description": "The name of the managed instance group. It should conform to RFC1035.", // "location": "path", // "required": true, // "type": "string" @@ -68270,20 +71341,20 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.Call // "type": "string" // }, // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone where the managed instance group is located. It should conform to RFC1035.", // "location": "path", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/createInstances", // "request": { - // "$ref": "InstanceGroupManagersSetInstanceTemplateRequest" + // "$ref": "InstanceGroupManagersCreateInstancesRequest" // }, // "response": { // "$ref": "Operation" @@ -68296,32 +71367,27 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.Call } -// method id "compute.instanceGroupManagers.setTargetPools": +// method id "compute.instanceGroupManagers.delete": -type InstanceGroupManagersSetTargetPoolsCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagerssettargetpoolsrequest *InstanceGroupManagersSetTargetPoolsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersDeleteCall struct { + s *Service + project string + zone string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetTargetPools: Modifies the target pools to which all instances in -// this managed instance group are assigned. The target pools -// automatically apply to all of the instances in the managed instance -// group. This operation is marked DONE when you make the request even -// if the instances have not yet been added to their target pools. The -// change might take some time to apply to all of the instances in the -// group depending on the size of the group. -func (r *InstanceGroupManagersService) SetTargetPools(project string, zone string, instanceGroupManager string, instancegroupmanagerssettargetpoolsrequest *InstanceGroupManagersSetTargetPoolsRequest) *InstanceGroupManagersSetTargetPoolsCall { - c := &InstanceGroupManagersSetTargetPoolsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified managed instance group and all of the +// instances in that group. Note that the instance group must not belong +// to a backend service. Read Deleting an instance group for more +// information. +func (r *InstanceGroupManagersService) Delete(project string, zone string, instanceGroupManager string) *InstanceGroupManagersDeleteCall { + c := &InstanceGroupManagersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagerssettargetpoolsrequest = instancegroupmanagerssettargetpoolsrequest return c } @@ -68339,7 +71405,7 @@ func (r *InstanceGroupManagersService) SetTargetPools(project string, zone strin // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersSetTargetPoolsCall) RequestId(requestId string) *InstanceGroupManagersSetTargetPoolsCall { +func (c *InstanceGroupManagersDeleteCall) RequestId(requestId string) *InstanceGroupManagersDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -68347,7 +71413,7 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) RequestId(requestId string) *I // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersSetTargetPoolsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersSetTargetPoolsCall { +func (c *InstanceGroupManagersDeleteCall) Fields(s ...googleapi.Field) *InstanceGroupManagersDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -68355,37 +71421,32 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) Fields(s ...googleapi.Field) * // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersSetTargetPoolsCall) Context(ctx context.Context) *InstanceGroupManagersSetTargetPoolsCall { +func (c *InstanceGroupManagersDeleteCall) Context(ctx context.Context) *InstanceGroupManagersDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersSetTargetPoolsCall) Header() http.Header { +func (c *InstanceGroupManagersDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerssettargetpoolsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setTargetPools") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } @@ -68398,14 +71459,14 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.R return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.setTargetPools" call. +// Do executes the "compute.instanceGroupManagers.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -68436,9 +71497,9 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Modifies the target pools to which all instances in this managed instance group are assigned. The target pools automatically apply to all of the instances in the managed instance group. This operation is marked DONE when you make the request even if the instances have not yet been added to their target pools. The change might take some time to apply to all of the instances in the group depending on the size of the group.", - // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.setTargetPools", + // "description": "Deletes the specified managed instance group and all of the instances in that group. Note that the instance group must not belong to a backend service. Read Deleting an instance group for more information.", + // "httpMethod": "DELETE", + // "id": "compute.instanceGroupManagers.delete", // "parameterOrder": [ // "project", // "zone", @@ -68446,7 +71507,7 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOptio // ], // "parameters": { // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "description": "The name of the managed instance group to delete.", // "location": "path", // "required": true, // "type": "string" @@ -68470,10 +71531,7 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOptio // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", - // "request": { - // "$ref": "InstanceGroupManagersSetTargetPoolsRequest" - // }, + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", // "response": { // "$ref": "Operation" // }, @@ -68485,34 +71543,67 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOptio } -// method id "compute.instanceGroupManagers.testIamPermissions": +// method id "compute.instanceGroupManagers.deleteInstances": -type InstanceGroupManagersTestIamPermissionsCall struct { - s *Service - project string - zone string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersDeleteInstancesCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagersdeleteinstancesrequest *InstanceGroupManagersDeleteInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *InstanceGroupManagersService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *InstanceGroupManagersTestIamPermissionsCall { - c := &InstanceGroupManagersTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// DeleteInstances: Flags the specified instances in the managed +// instance group for immediate deletion. The instances are also removed +// from any target pools of which they were a member. This method +// reduces the targetSize of the managed instance group by the number of +// instances that you delete. This operation is marked as DONE when the +// action is scheduled even if the instances are still being deleted. +// You must separately verify the status of the deleting action with the +// listmanagedinstances method. +// +// If the group is part of a backend service that has enabled connection +// draining, it can take up to 60 seconds after the connection draining +// duration has elapsed before the VM instance is removed or +// deleted. +// +// You can specify a maximum of 1000 instances with this method per +// request. +func (r *InstanceGroupManagersService) DeleteInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersdeleteinstancesrequest *InstanceGroupManagersDeleteInstancesRequest) *InstanceGroupManagersDeleteInstancesCall { + c := &InstanceGroupManagersDeleteInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagersdeleteinstancesrequest = instancegroupmanagersdeleteinstancesrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupManagersDeleteInstancesCall) RequestId(requestId string) *InstanceGroupManagersDeleteInstancesCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersTestIamPermissionsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersTestIamPermissionsCall { +func (c *InstanceGroupManagersDeleteInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersDeleteInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -68520,35 +71611,35 @@ func (c *InstanceGroupManagersTestIamPermissionsCall) Fields(s ...googleapi.Fiel // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersTestIamPermissionsCall) Context(ctx context.Context) *InstanceGroupManagersTestIamPermissionsCall { +func (c *InstanceGroupManagersDeleteInstancesCall) Context(ctx context.Context) *InstanceGroupManagersDeleteInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersTestIamPermissionsCall) Header() http.Header { +func (c *InstanceGroupManagersDeleteInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersDeleteInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersdeleteinstancesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deleteInstances") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -68556,21 +71647,21 @@ func (c *InstanceGroupManagersTestIamPermissionsCall) doRequest(alt string) (*ht } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.instanceGroupManagers.deleteInstances" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -68589,7 +71680,7 @@ func (c *InstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi.CallO if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -68601,15 +71692,21 @@ func (c *InstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi.CallO } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", + // "description": "Flags the specified instances in the managed instance group for immediate deletion. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. This operation is marked as DONE when the action is scheduled even if the instances are still being deleted. You must separately verify the status of the deleting action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.testIamPermissions", + // "id": "compute.instanceGroupManagers.deleteInstances", // "parameterOrder": [ // "project", // "zone", - // "resource" + // "instanceGroupManager" // ], // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -68617,87 +71714,61 @@ func (c *InstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi.CallO // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // }, // "zone": { - // "description": "The name of the zone for this request.", + // "description": "The name of the zone where the managed instance group is located.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{resource}/testIamPermissions", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deleteInstances", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "InstanceGroupManagersDeleteInstancesRequest" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instanceGroupManagers.update": +// method id "compute.instanceGroupManagers.deletePerInstanceConfigs": -type InstanceGroupManagersUpdateCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanager *InstanceGroupManager - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersDeletePerInstanceConfigsCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagersdeleteperinstanceconfigsreq *InstanceGroupManagersDeletePerInstanceConfigsReq + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Update: Updates a managed instance group using the information that -// you specify in the request. This operation is marked as DONE when the -// group is updated even if the instances in the group have not yet been -// updated. You must separately verify the status of the individual -// instances with the listManagedInstances method. -func (r *InstanceGroupManagersService) Update(project string, zone string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *InstanceGroupManagersUpdateCall { - c := &InstanceGroupManagersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// DeletePerInstanceConfigs: Deletes selected per-instance configs for +// the managed instance group. +func (r *InstanceGroupManagersService) DeletePerInstanceConfigs(project string, zone string, instanceGroupManager string, instancegroupmanagersdeleteperinstanceconfigsreq *InstanceGroupManagersDeletePerInstanceConfigsReq) *InstanceGroupManagersDeletePerInstanceConfigsCall { + c := &InstanceGroupManagersDeletePerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instanceGroupManager = instanceGroupManager - c.instancegroupmanager = instancegroupmanager - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersUpdateCall) RequestId(requestId string) *InstanceGroupManagersUpdateCall { - c.urlParams_.Set("requestId", requestId) + c.instancegroupmanagersdeleteperinstanceconfigsreq = instancegroupmanagersdeleteperinstanceconfigsreq return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersUpdateCall) Fields(s ...googleapi.Field) *InstanceGroupManagersUpdateCall { +func (c *InstanceGroupManagersDeletePerInstanceConfigsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersDeletePerInstanceConfigsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -68705,37 +71776,37 @@ func (c *InstanceGroupManagersUpdateCall) Fields(s ...googleapi.Field) *Instance // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersUpdateCall) Context(ctx context.Context) *InstanceGroupManagersUpdateCall { +func (c *InstanceGroupManagersDeletePerInstanceConfigsCall) Context(ctx context.Context) *InstanceGroupManagersDeletePerInstanceConfigsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersUpdateCall) Header() http.Header { +func (c *InstanceGroupManagersDeletePerInstanceConfigsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersDeletePerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersdeleteperinstanceconfigsreq) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deletePerInstanceConfigs") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -68748,14 +71819,14 @@ func (c *InstanceGroupManagersUpdateCall) doRequest(alt string) (*http.Response, return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.update" call. +// Do executes the "compute.instanceGroupManagers.deletePerInstanceConfigs" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersDeletePerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -68786,9 +71857,9 @@ func (c *InstanceGroupManagersUpdateCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is updated even if the instances in the group have not yet been updated. You must separately verify the status of the individual instances with the listManagedInstances method.", - // "httpMethod": "PUT", - // "id": "compute.instanceGroupManagers.update", + // "description": "Deletes selected per-instance configs for the managed instance group.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.deletePerInstanceConfigs", // "parameterOrder": [ // "project", // "zone", @@ -68796,7 +71867,7 @@ func (c *InstanceGroupManagersUpdateCall) Do(opts ...googleapi.CallOption) (*Ope // ], // "parameters": { // "instanceGroupManager": { - // "description": "The name of the instance group manager.", + // "description": "The name of the managed instance group. It should conform to RFC1035.", // "location": "path", // "required": true, // "type": "string" @@ -68808,21 +71879,16 @@ func (c *InstanceGroupManagersUpdateCall) Do(opts ...googleapi.CallOption) (*Ope // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, // "zone": { - // "description": "The name of the zone where you want to create the managed instance group.", + // "description": "The name of the zone where the managed instance group is located. It should conform to RFC1035.", // "location": "path", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deletePerInstanceConfigs", // "request": { - // "$ref": "InstanceGroupManager" + // "$ref": "InstanceGroupManagersDeletePerInstanceConfigsReq" // }, // "response": { // "$ref": "Operation" @@ -68835,93 +71901,80 @@ func (c *InstanceGroupManagersUpdateCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.instanceGroupManagers.updatePerInstanceConfigs": +// method id "compute.instanceGroupManagers.get": -type InstanceGroupManagersUpdatePerInstanceConfigsCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagersupdateperinstanceconfigsreq *InstanceGroupManagersUpdatePerInstanceConfigsReq - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersGetCall struct { + s *Service + project string + zone string + instanceGroupManager string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// UpdatePerInstanceConfigs: Insert or patch (for the ones that already -// exist) per-instance configs for the managed instance group. -// perInstanceConfig.instance serves as a key used to distinguish -// whether to perform insert or patch. -func (r *InstanceGroupManagersService) UpdatePerInstanceConfigs(project string, zone string, instanceGroupManager string, instancegroupmanagersupdateperinstanceconfigsreq *InstanceGroupManagersUpdatePerInstanceConfigsReq) *InstanceGroupManagersUpdatePerInstanceConfigsCall { - c := &InstanceGroupManagersUpdatePerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns all of the details about the specified managed instance +// group. Gets a list of available managed instance groups by making a +// list() request. +func (r *InstanceGroupManagersService) Get(project string, zone string, instanceGroupManager string) *InstanceGroupManagersGetCall { + c := &InstanceGroupManagersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagersupdateperinstanceconfigsreq = instancegroupmanagersupdateperinstanceconfigsreq - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) RequestId(requestId string) *InstanceGroupManagersUpdatePerInstanceConfigsCall { - c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersUpdatePerInstanceConfigsCall { +func (c *InstanceGroupManagersGetCall) Fields(s ...googleapi.Field) *InstanceGroupManagersGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstanceGroupManagersGetCall) IfNoneMatch(entityTag string) *InstanceGroupManagersGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) Context(ctx context.Context) *InstanceGroupManagersUpdatePerInstanceConfigsCall { +func (c *InstanceGroupManagersGetCall) Context(ctx context.Context) *InstanceGroupManagersGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) Header() http.Header { +func (c *InstanceGroupManagersGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersupdateperinstanceconfigsreq) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/updatePerInstanceConfigs") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } @@ -68934,14 +71987,14 @@ func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) doRequest(alt string return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.updatePerInstanceConfigs" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instanceGroupManagers.get" call. +// Exactly one of *InstanceGroupManager or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceGroupManager.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManager, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -68960,7 +72013,7 @@ func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) Do(opts ...googleapi if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &InstanceGroupManager{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -68972,9 +72025,9 @@ func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) Do(opts ...googleapi } return ret, nil // { - // "description": "Insert or patch (for the ones that already exist) per-instance configs for the managed instance group. perInstanceConfig.instance serves as a key used to distinguish whether to perform insert or patch.", - // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.updatePerInstanceConfigs", + // "description": "Returns all of the details about the specified managed instance group. Gets a list of available managed instance groups by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.instanceGroupManagers.get", // "parameterOrder": [ // "project", // "zone", @@ -68982,7 +72035,7 @@ func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) Do(opts ...googleapi // ], // "parameters": { // "instanceGroupManager": { - // "description": "The name of the managed instance group. It should conform to RFC1035.", + // "description": "The name of the managed instance group.", // "location": "path", // "required": true, // "type": "string" @@ -68994,55 +72047,53 @@ func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) Do(opts ...googleapi // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located. It should conform to RFC1035.", + // "description": "The name of the zone where the managed instance group is located.", // "location": "path", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/updatePerInstanceConfigs", - // "request": { - // "$ref": "InstanceGroupManagersUpdatePerInstanceConfigsReq" - // }, + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", // "response": { - // "$ref": "Operation" + // "$ref": "InstanceGroupManager" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instanceGroups.addInstances": +// method id "compute.instanceGroupManagers.insert": -type InstanceGroupsAddInstancesCall struct { - s *Service - project string - zone string - instanceGroup string - instancegroupsaddinstancesrequest *InstanceGroupsAddInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersInsertCall struct { + s *Service + project string + zone string + instancegroupmanager *InstanceGroupManager + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AddInstances: Adds a list of instances to the specified instance -// group. All of the instances in the instance group must be in the same -// network/subnetwork. Read Adding instances for more information. -func (r *InstanceGroupsService) AddInstances(project string, zone string, instanceGroup string, instancegroupsaddinstancesrequest *InstanceGroupsAddInstancesRequest) *InstanceGroupsAddInstancesCall { - c := &InstanceGroupsAddInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a managed instance group using the information that +// you specify in the request. After the group is created, instances in +// the group are created using the specified instance template. This +// operation is marked as DONE when the group is created even if the +// instances in the group have not yet been created. You must separately +// verify the status of the individual instances with the +// listmanagedinstances method. +// +// A managed instance group can have up to 1000 VM instances per group. +// Please contact Cloud Support if you need an increase in this limit. +func (r *InstanceGroupManagersService) Insert(project string, zone string, instancegroupmanager *InstanceGroupManager) *InstanceGroupManagersInsertCall { + c := &InstanceGroupManagersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroup = instanceGroup - c.instancegroupsaddinstancesrequest = instancegroupsaddinstancesrequest + c.instancegroupmanager = instancegroupmanager return c } @@ -69060,7 +72111,7 @@ func (r *InstanceGroupsService) AddInstances(project string, zone string, instan // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupsAddInstancesCall) RequestId(requestId string) *InstanceGroupsAddInstancesCall { +func (c *InstanceGroupManagersInsertCall) RequestId(requestId string) *InstanceGroupManagersInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -69068,7 +72119,7 @@ func (c *InstanceGroupsAddInstancesCall) RequestId(requestId string) *InstanceGr // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsAddInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsAddInstancesCall { +func (c *InstanceGroupManagersInsertCall) Fields(s ...googleapi.Field) *InstanceGroupManagersInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -69076,35 +72127,35 @@ func (c *InstanceGroupsAddInstancesCall) Fields(s ...googleapi.Field) *InstanceG // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsAddInstancesCall) Context(ctx context.Context) *InstanceGroupsAddInstancesCall { +func (c *InstanceGroupManagersInsertCall) Context(ctx context.Context) *InstanceGroupManagersInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsAddInstancesCall) Header() http.Header { +func (c *InstanceGroupManagersInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsAddInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupsaddinstancesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/addInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -69112,21 +72163,20 @@ func (c *InstanceGroupsAddInstancesCall) doRequest(alt string) (*http.Response, } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroup": c.instanceGroup, + "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.addInstances" call. +// Do executes the "compute.instanceGroupManagers.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupsAddInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -69157,21 +72207,14 @@ func (c *InstanceGroupsAddInstancesCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Adds a list of instances to the specified instance group. All of the instances in the instance group must be in the same network/subnetwork. Read Adding instances for more information.", + // "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, instances in the group are created using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.\n\nA managed instance group can have up to 1000 VM instances per group. Please contact Cloud Support if you need an increase in this limit.", // "httpMethod": "POST", - // "id": "compute.instanceGroups.addInstances", + // "id": "compute.instanceGroupManagers.insert", // "parameterOrder": [ // "project", - // "zone", - // "instanceGroup" + // "zone" // ], // "parameters": { - // "instanceGroup": { - // "description": "The name of the instance group where you are adding instances.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -69185,15 +72228,15 @@ func (c *InstanceGroupsAddInstancesCall) Do(opts ...googleapi.CallOption) (*Oper // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the instance group is located.", + // "description": "The name of the zone where you want to create the managed instance group.", // "location": "path", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/addInstances", + // "path": "{project}/zones/{zone}/instanceGroupManagers", // "request": { - // "$ref": "InstanceGroupsAddInstancesRequest" + // "$ref": "InstanceGroupManager" // }, // "response": { // "$ref": "Operation" @@ -69206,22 +72249,24 @@ func (c *InstanceGroupsAddInstancesCall) Do(opts ...googleapi.CallOption) (*Oper } -// method id "compute.instanceGroups.aggregatedList": +// method id "compute.instanceGroupManagers.list": -type InstanceGroupsAggregatedListCall struct { +type InstanceGroupManagersListCall struct { s *Service project string + zone string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// AggregatedList: Retrieves the list of instance groups and sorts them -// by zone. -func (r *InstanceGroupsService) AggregatedList(project string) *InstanceGroupsAggregatedListCall { - c := &InstanceGroupsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of managed instance groups that are contained +// within the specified project and zone. +func (r *InstanceGroupManagersService) List(project string, zone string) *InstanceGroupManagersListCall { + c := &InstanceGroupManagersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.zone = zone return c } @@ -69247,7 +72292,7 @@ func (r *InstanceGroupsService) AggregatedList(project string) *InstanceGroupsAg // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *InstanceGroupsAggregatedListCall) Filter(filter string) *InstanceGroupsAggregatedListCall { +func (c *InstanceGroupManagersListCall) Filter(filter string) *InstanceGroupManagersListCall { c.urlParams_.Set("filter", filter) return c } @@ -69258,7 +72303,7 @@ func (c *InstanceGroupsAggregatedListCall) Filter(filter string) *InstanceGroups // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *InstanceGroupsAggregatedListCall) MaxResults(maxResults int64) *InstanceGroupsAggregatedListCall { +func (c *InstanceGroupManagersListCall) MaxResults(maxResults int64) *InstanceGroupManagersListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -69275,7 +72320,7 @@ func (c *InstanceGroupsAggregatedListCall) MaxResults(maxResults int64) *Instanc // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *InstanceGroupsAggregatedListCall) OrderBy(orderBy string) *InstanceGroupsAggregatedListCall { +func (c *InstanceGroupManagersListCall) OrderBy(orderBy string) *InstanceGroupManagersListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -69283,7 +72328,7 @@ func (c *InstanceGroupsAggregatedListCall) OrderBy(orderBy string) *InstanceGrou // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *InstanceGroupsAggregatedListCall) PageToken(pageToken string) *InstanceGroupsAggregatedListCall { +func (c *InstanceGroupManagersListCall) PageToken(pageToken string) *InstanceGroupManagersListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -69291,7 +72336,7 @@ func (c *InstanceGroupsAggregatedListCall) PageToken(pageToken string) *Instance // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsAggregatedListCall) Fields(s ...googleapi.Field) *InstanceGroupsAggregatedListCall { +func (c *InstanceGroupManagersListCall) Fields(s ...googleapi.Field) *InstanceGroupManagersListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -69301,7 +72346,7 @@ func (c *InstanceGroupsAggregatedListCall) Fields(s ...googleapi.Field) *Instanc // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InstanceGroupsAggregatedListCall) IfNoneMatch(entityTag string) *InstanceGroupsAggregatedListCall { +func (c *InstanceGroupManagersListCall) IfNoneMatch(entityTag string) *InstanceGroupManagersListCall { c.ifNoneMatch_ = entityTag return c } @@ -69309,21 +72354,21 @@ func (c *InstanceGroupsAggregatedListCall) IfNoneMatch(entityTag string) *Instan // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsAggregatedListCall) Context(ctx context.Context) *InstanceGroupsAggregatedListCall { +func (c *InstanceGroupManagersListCall) Context(ctx context.Context) *InstanceGroupManagersListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsAggregatedListCall) Header() http.Header { +func (c *InstanceGroupManagersListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -69335,7 +72380,7 @@ func (c *InstanceGroupsAggregatedListCall) doRequest(alt string) (*http.Response var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instanceGroups") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -69344,18 +72389,19 @@ func (c *InstanceGroupsAggregatedListCall) doRequest(alt string) (*http.Response req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.aggregatedList" call. -// Exactly one of *InstanceGroupAggregatedList or error will be non-nil. +// Do executes the "compute.instanceGroupManagers.list" call. +// Exactly one of *InstanceGroupManagerList or error will be non-nil. // Any non-2xx status code is an error. Response headers are in either -// *InstanceGroupAggregatedList.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use +// *InstanceGroupManagerList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupAggregatedList, error) { +func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagerList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -69374,7 +72420,7 @@ func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*In if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroupAggregatedList{ + ret := &InstanceGroupManagerList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -69386,11 +72432,12 @@ func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*In } return ret, nil // { - // "description": "Retrieves the list of instance groups and sorts them by zone.", + // "description": "Retrieves a list of managed instance groups that are contained within the specified project and zone.", // "httpMethod": "GET", - // "id": "compute.instanceGroups.aggregatedList", + // "id": "compute.instanceGroupManagers.list", // "parameterOrder": [ - // "project" + // "project", + // "zone" // ], // "parameters": { // "filter": { @@ -69422,11 +72469,17 @@ func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*In // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/aggregated/instanceGroups", + // "path": "{project}/zones/{zone}/instanceGroupManagers", // "response": { - // "$ref": "InstanceGroupAggregatedList" + // "$ref": "InstanceGroupManagerList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -69440,7 +72493,7 @@ func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*In // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *InstanceGroupsAggregatedListCall) Pages(ctx context.Context, f func(*InstanceGroupAggregatedList) error) error { +func (c *InstanceGroupManagersListCall) Pages(ctx context.Context, f func(*InstanceGroupManagerList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -69458,539 +72511,29 @@ func (c *InstanceGroupsAggregatedListCall) Pages(ctx context.Context, f func(*In } } -// method id "compute.instanceGroups.delete": - -type InstanceGroupsDeleteCall struct { - s *Service - project string - zone string - instanceGroup string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes the specified instance group. The instances in the -// group are not deleted. Note that instance group must not belong to a -// backend service. Read Deleting an instance group for more -// information. -func (r *InstanceGroupsService) Delete(project string, zone string, instanceGroup string) *InstanceGroupsDeleteCall { - c := &InstanceGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instanceGroup = instanceGroup - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupsDeleteCall) RequestId(requestId string) *InstanceGroupsDeleteCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstanceGroupsDeleteCall) Fields(s ...googleapi.Field) *InstanceGroupsDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstanceGroupsDeleteCall) Context(ctx context.Context) *InstanceGroupsDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstanceGroupsDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstanceGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroup": c.instanceGroup, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instanceGroups.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes the specified instance group. The instances in the group are not deleted. Note that instance group must not belong to a backend service. Read Deleting an instance group for more information.", - // "httpMethod": "DELETE", - // "id": "compute.instanceGroups.delete", - // "parameterOrder": [ - // "project", - // "zone", - // "instanceGroup" - // ], - // "parameters": { - // "instanceGroup": { - // "description": "The name of the instance group to delete.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the instance group is located.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.instanceGroups.get": - -type InstanceGroupsGetCall struct { - s *Service - project string - zone string - instanceGroup string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified instance group. Gets a list of available -// instance groups by making a list() request. -func (r *InstanceGroupsService) Get(project string, zone string, instanceGroup string) *InstanceGroupsGetCall { - c := &InstanceGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instanceGroup = instanceGroup - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstanceGroupsGetCall) Fields(s ...googleapi.Field) *InstanceGroupsGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstanceGroupsGetCall) IfNoneMatch(entityTag string) *InstanceGroupsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstanceGroupsGetCall) Context(ctx context.Context) *InstanceGroupsGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstanceGroupsGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroup": c.instanceGroup, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instanceGroups.get" call. -// Exactly one of *InstanceGroup or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *InstanceGroup.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &InstanceGroup{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified instance group. Gets a list of available instance groups by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.instanceGroups.get", - // "parameterOrder": [ - // "project", - // "zone", - // "instanceGroup" - // ], - // "parameters": { - // "instanceGroup": { - // "description": "The name of the instance group.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the instance group is located.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}", - // "response": { - // "$ref": "InstanceGroup" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.instanceGroups.insert": - -type InstanceGroupsInsertCall struct { - s *Service - project string - zone string - instancegroup *InstanceGroup - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Insert: Creates an instance group in the specified project using the -// parameters that are included in the request. -func (r *InstanceGroupsService) Insert(project string, zone string, instancegroup *InstanceGroup) *InstanceGroupsInsertCall { - c := &InstanceGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instancegroup = instancegroup - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupsInsertCall) RequestId(requestId string) *InstanceGroupsInsertCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstanceGroupsInsertCall) Fields(s ...googleapi.Field) *InstanceGroupsInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstanceGroupsInsertCall) Context(ctx context.Context) *InstanceGroupsInsertCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstanceGroupsInsertCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstanceGroupsInsertCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroup) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instanceGroups.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates an instance group in the specified project using the parameters that are included in the request.", - // "httpMethod": "POST", - // "id": "compute.instanceGroups.insert", - // "parameterOrder": [ - // "project", - // "zone" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where you want to create the instance group.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instanceGroups", - // "request": { - // "$ref": "InstanceGroup" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.instanceGroups.list": +// method id "compute.instanceGroupManagers.listManagedInstances": -type InstanceGroupsListCall struct { - s *Service - project string - zone string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersListManagedInstancesCall struct { + s *Service + project string + zone string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves the list of instance groups that are located in the -// specified project and zone. -func (r *InstanceGroupsService) List(project string, zone string) *InstanceGroupsListCall { - c := &InstanceGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ListManagedInstances: Lists all of the instances in the managed +// instance group. Each instance in the list has a currentAction, which +// indicates the action that the managed instance group is performing on +// the instance. For example, if the group is still creating an +// instance, the currentAction is CREATING. If a previous action failed, +// the list displays the errors for that failed action. +func (r *InstanceGroupManagersService) ListManagedInstances(project string, zone string, instanceGroupManager string) *InstanceGroupManagersListManagedInstancesCall { + c := &InstanceGroupManagersListManagedInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone + c.instanceGroupManager = instanceGroupManager return c } @@ -70016,7 +72559,7 @@ func (r *InstanceGroupsService) List(project string, zone string) *InstanceGroup // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *InstanceGroupsListCall) Filter(filter string) *InstanceGroupsListCall { +func (c *InstanceGroupManagersListManagedInstancesCall) Filter(filter string) *InstanceGroupManagersListManagedInstancesCall { c.urlParams_.Set("filter", filter) return c } @@ -70027,12 +72570,12 @@ func (c *InstanceGroupsListCall) Filter(filter string) *InstanceGroupsListCall { // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *InstanceGroupsListCall) MaxResults(maxResults int64) *InstanceGroupsListCall { +func (c *InstanceGroupManagersListManagedInstancesCall) MaxResults(maxResults int64) *InstanceGroupManagersListManagedInstancesCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } -// OrderBy sets the optional parameter "orderBy": Sorts list results by +// OrderBy sets the optional parameter "order_by": Sorts list results by // a certain order. By default, results are returned in alphanumerical // order based on the resource name. // @@ -70044,15 +72587,15 @@ func (c *InstanceGroupsListCall) MaxResults(maxResults int64) *InstanceGroupsLis // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *InstanceGroupsListCall) OrderBy(orderBy string) *InstanceGroupsListCall { - c.urlParams_.Set("orderBy", orderBy) +func (c *InstanceGroupManagersListManagedInstancesCall) OrderBy(orderBy string) *InstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("order_by", orderBy) return c } // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *InstanceGroupsListCall) PageToken(pageToken string) *InstanceGroupsListCall { +func (c *InstanceGroupManagersListManagedInstancesCall) PageToken(pageToken string) *InstanceGroupManagersListManagedInstancesCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -70060,72 +72603,62 @@ func (c *InstanceGroupsListCall) PageToken(pageToken string) *InstanceGroupsList // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsListCall) Fields(s ...googleapi.Field) *InstanceGroupsListCall { +func (c *InstanceGroupManagersListManagedInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersListManagedInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstanceGroupsListCall) IfNoneMatch(entityTag string) *InstanceGroupsListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsListCall) Context(ctx context.Context) *InstanceGroupsListCall { +func (c *InstanceGroupManagersListManagedInstancesCall) Context(ctx context.Context) *InstanceGroupManagersListManagedInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsListCall) Header() http.Header { +func (c *InstanceGroupManagersListManagedInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersListManagedInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.list" call. -// Exactly one of *InstanceGroupList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InstanceGroupList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupList, error) { +// Do executes the "compute.instanceGroupManagers.listManagedInstances" call. +// Exactly one of *InstanceGroupManagersListManagedInstancesResponse or +// error will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *InstanceGroupManagersListManagedInstancesResponse.ServerResponse.Head +// er or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagersListManagedInstancesResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -70144,7 +72677,7 @@ func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGrou if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroupList{ + ret := &InstanceGroupManagersListManagedInstancesResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -70156,12 +72689,13 @@ func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGrou } return ret, nil // { - // "description": "Retrieves the list of instance groups that are located in the specified project and zone.", - // "httpMethod": "GET", - // "id": "compute.instanceGroups.list", + // "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.listManagedInstances", // "parameterOrder": [ // "project", - // "zone" + // "zone", + // "instanceGroupManager" // ], // "parameters": { // "filter": { @@ -70169,6 +72703,12 @@ func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGrou // "location": "query", // "type": "string" // }, + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "maxResults": { // "default": "500", // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", @@ -70177,7 +72717,7 @@ func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGrou // "minimum": "0", // "type": "integer" // }, - // "orderBy": { + // "order_by": { // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", // "location": "query", // "type": "string" @@ -70195,15 +72735,15 @@ func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGrou // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the instance group is located.", + // "description": "The name of the zone where the managed instance group is located.", // "location": "path", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroups", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", // "response": { - // "$ref": "InstanceGroupList" + // "$ref": "InstanceGroupManagersListManagedInstancesResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -70217,7 +72757,7 @@ func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGrou // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *InstanceGroupsListCall) Pages(ctx context.Context, f func(*InstanceGroupList) error) error { +func (c *InstanceGroupManagersListManagedInstancesCall) Pages(ctx context.Context, f func(*InstanceGroupManagersListManagedInstancesResponse) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -70235,26 +72775,25 @@ func (c *InstanceGroupsListCall) Pages(ctx context.Context, f func(*InstanceGrou } } -// method id "compute.instanceGroups.listInstances": +// method id "compute.instanceGroupManagers.listPerInstanceConfigs": -type InstanceGroupsListInstancesCall struct { - s *Service - project string - zone string - instanceGroup string - instancegroupslistinstancesrequest *InstanceGroupsListInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersListPerInstanceConfigsCall struct { + s *Service + project string + zone string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// ListInstances: Lists the instances in the specified instance group. -func (r *InstanceGroupsService) ListInstances(project string, zone string, instanceGroup string, instancegroupslistinstancesrequest *InstanceGroupsListInstancesRequest) *InstanceGroupsListInstancesCall { - c := &InstanceGroupsListInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ListPerInstanceConfigs: Lists all of the per-instance configs defined +// for the managed instance group. +func (r *InstanceGroupManagersService) ListPerInstanceConfigs(project string, zone string, instanceGroupManager string) *InstanceGroupManagersListPerInstanceConfigsCall { + c := &InstanceGroupManagersListPerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroup = instanceGroup - c.instancegroupslistinstancesrequest = instancegroupslistinstancesrequest + c.instanceGroupManager = instanceGroupManager return c } @@ -70280,7 +72819,7 @@ func (r *InstanceGroupsService) ListInstances(project string, zone string, insta // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *InstanceGroupsListInstancesCall) Filter(filter string) *InstanceGroupsListInstancesCall { +func (c *InstanceGroupManagersListPerInstanceConfigsCall) Filter(filter string) *InstanceGroupManagersListPerInstanceConfigsCall { c.urlParams_.Set("filter", filter) return c } @@ -70291,7 +72830,7 @@ func (c *InstanceGroupsListInstancesCall) Filter(filter string) *InstanceGroupsL // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *InstanceGroupsListInstancesCall) MaxResults(maxResults int64) *InstanceGroupsListInstancesCall { +func (c *InstanceGroupManagersListPerInstanceConfigsCall) MaxResults(maxResults int64) *InstanceGroupManagersListPerInstanceConfigsCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -70308,7 +72847,7 @@ func (c *InstanceGroupsListInstancesCall) MaxResults(maxResults int64) *Instance // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *InstanceGroupsListInstancesCall) OrderBy(orderBy string) *InstanceGroupsListInstancesCall { +func (c *InstanceGroupManagersListPerInstanceConfigsCall) OrderBy(orderBy string) *InstanceGroupManagersListPerInstanceConfigsCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -70316,7 +72855,7 @@ func (c *InstanceGroupsListInstancesCall) OrderBy(orderBy string) *InstanceGroup // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *InstanceGroupsListInstancesCall) PageToken(pageToken string) *InstanceGroupsListInstancesCall { +func (c *InstanceGroupManagersListPerInstanceConfigsCall) PageToken(pageToken string) *InstanceGroupManagersListPerInstanceConfigsCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -70324,7 +72863,7 @@ func (c *InstanceGroupsListInstancesCall) PageToken(pageToken string) *InstanceG // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsListInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsListInstancesCall { +func (c *InstanceGroupManagersListPerInstanceConfigsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersListPerInstanceConfigsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -70332,35 +72871,30 @@ func (c *InstanceGroupsListInstancesCall) Fields(s ...googleapi.Field) *Instance // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsListInstancesCall) Context(ctx context.Context) *InstanceGroupsListInstancesCall { +func (c *InstanceGroupManagersListPerInstanceConfigsCall) Context(ctx context.Context) *InstanceGroupManagersListPerInstanceConfigsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsListInstancesCall) Header() http.Header { +func (c *InstanceGroupManagersListPerInstanceConfigsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersListPerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupslistinstancesrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/listInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -70368,21 +72902,23 @@ func (c *InstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroup": c.instanceGroup, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.listInstances" call. -// Exactly one of *InstanceGroupsListInstances or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *InstanceGroupsListInstances.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*InstanceGroupsListInstances, error) { +// Do executes the "compute.instanceGroupManagers.listPerInstanceConfigs" call. +// Exactly one of *InstanceGroupManagersListPerInstanceConfigsResp or +// error will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *InstanceGroupManagersListPerInstanceConfigsResp.ServerResponse.Header +// or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *InstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagersListPerInstanceConfigsResp, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -70401,7 +72937,7 @@ func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*Ins if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroupsListInstances{ + ret := &InstanceGroupManagersListPerInstanceConfigsResp{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -70413,13 +72949,13 @@ func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*Ins } return ret, nil // { - // "description": "Lists the instances in the specified instance group.", + // "description": "Lists all of the per-instance configs defined for the managed instance group.", // "httpMethod": "POST", - // "id": "compute.instanceGroups.listInstances", + // "id": "compute.instanceGroupManagers.listPerInstanceConfigs", // "parameterOrder": [ // "project", // "zone", - // "instanceGroup" + // "instanceGroupManager" // ], // "parameters": { // "filter": { @@ -70427,8 +72963,8 @@ func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*Ins // "location": "query", // "type": "string" // }, - // "instanceGroup": { - // "description": "The name of the instance group from which you want to generate a list of included instances.", + // "instanceGroupManager": { + // "description": "The name of the managed instance group. It should conform to RFC1035.", // "location": "path", // "required": true, // "type": "string" @@ -70459,18 +72995,15 @@ func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*Ins // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the instance group is located.", + // "description": "The name of the zone where the managed instance group is located. It should conform to RFC1035.", // "location": "path", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/listInstances", - // "request": { - // "$ref": "InstanceGroupsListInstancesRequest" - // }, + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs", // "response": { - // "$ref": "InstanceGroupsListInstances" + // "$ref": "InstanceGroupManagersListPerInstanceConfigsResp" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -70484,7 +73017,7 @@ func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*Ins // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *InstanceGroupsListInstancesCall) Pages(ctx context.Context, f func(*InstanceGroupsListInstances) error) error { +func (c *InstanceGroupManagersListPerInstanceConfigsCall) Pages(ctx context.Context, f func(*InstanceGroupManagersListPerInstanceConfigsResp) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -70502,31 +73035,32 @@ func (c *InstanceGroupsListInstancesCall) Pages(ctx context.Context, f func(*Ins } } -// method id "compute.instanceGroups.removeInstances": +// method id "compute.instanceGroupManagers.patch": -type InstanceGroupsRemoveInstancesCall struct { - s *Service - project string - zone string - instanceGroup string - instancegroupsremoveinstancesrequest *InstanceGroupsRemoveInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersPatchCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanager *InstanceGroupManager + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// RemoveInstances: Removes one or more instances from the specified -// instance group, but does not delete those instances. -// -// If the group is part of a backend service that has enabled connection -// draining, it can take up to 60 seconds after the connection draining -// duration before the VM instance is removed or deleted. -func (r *InstanceGroupsService) RemoveInstances(project string, zone string, instanceGroup string, instancegroupsremoveinstancesrequest *InstanceGroupsRemoveInstancesRequest) *InstanceGroupsRemoveInstancesCall { - c := &InstanceGroupsRemoveInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates a managed instance group using the information that +// you specify in the request. This operation is marked as DONE when the +// group is patched even if the instances in the group are still in the +// process of being patched. You must separately verify the status of +// the individual instances with the listManagedInstances method. This +// method supports PATCH semantics and uses the JSON merge patch format +// and processing rules. +func (r *InstanceGroupManagersService) Patch(project string, zone string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *InstanceGroupManagersPatchCall { + c := &InstanceGroupManagersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroup = instanceGroup - c.instancegroupsremoveinstancesrequest = instancegroupsremoveinstancesrequest + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanager = instancegroupmanager return c } @@ -70544,7 +73078,7 @@ func (r *InstanceGroupsService) RemoveInstances(project string, zone string, ins // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupsRemoveInstancesCall) RequestId(requestId string) *InstanceGroupsRemoveInstancesCall { +func (c *InstanceGroupManagersPatchCall) RequestId(requestId string) *InstanceGroupManagersPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -70552,7 +73086,7 @@ func (c *InstanceGroupsRemoveInstancesCall) RequestId(requestId string) *Instanc // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsRemoveInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsRemoveInstancesCall { +func (c *InstanceGroupManagersPatchCall) Fields(s ...googleapi.Field) *InstanceGroupManagersPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -70560,57 +73094,57 @@ func (c *InstanceGroupsRemoveInstancesCall) Fields(s ...googleapi.Field) *Instan // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsRemoveInstancesCall) Context(ctx context.Context) *InstanceGroupsRemoveInstancesCall { +func (c *InstanceGroupManagersPatchCall) Context(ctx context.Context) *InstanceGroupManagersPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsRemoveInstancesCall) Header() http.Header { +func (c *InstanceGroupManagersPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsRemoveInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupsremoveinstancesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/removeInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroup": c.instanceGroup, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.removeInstances" call. +// Do executes the "compute.instanceGroupManagers.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupsRemoveInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -70641,17 +73175,17 @@ func (c *InstanceGroupsRemoveInstancesCall) Do(opts ...googleapi.CallOption) (*O } return ret, nil // { - // "description": "Removes one or more instances from the specified instance group, but does not delete those instances.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration before the VM instance is removed or deleted.", - // "httpMethod": "POST", - // "id": "compute.instanceGroups.removeInstances", + // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listManagedInstances method. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.instanceGroupManagers.patch", // "parameterOrder": [ // "project", // "zone", - // "instanceGroup" + // "instanceGroupManager" // ], // "parameters": { - // "instanceGroup": { - // "description": "The name of the instance group where the specified instances will be removed.", + // "instanceGroupManager": { + // "description": "The name of the instance group manager.", // "location": "path", // "required": true, // "type": "string" @@ -70669,15 +73203,15 @@ func (c *InstanceGroupsRemoveInstancesCall) Do(opts ...googleapi.CallOption) (*O // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the instance group is located.", + // "description": "The name of the zone where you want to create the managed instance group.", // "location": "path", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/removeInstances", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", // "request": { - // "$ref": "InstanceGroupsRemoveInstancesRequest" + // "$ref": "InstanceGroupManager" // }, // "response": { // "$ref": "Operation" @@ -70690,26 +73224,29 @@ func (c *InstanceGroupsRemoveInstancesCall) Do(opts ...googleapi.CallOption) (*O } -// method id "compute.instanceGroups.setNamedPorts": +// method id "compute.instanceGroupManagers.patchPerInstanceConfigs": -type InstanceGroupsSetNamedPortsCall struct { - s *Service - project string - zone string - instanceGroup string - instancegroupssetnamedportsrequest *InstanceGroupsSetNamedPortsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersPatchPerInstanceConfigsCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagerspatchperinstanceconfigsreq *InstanceGroupManagersPatchPerInstanceConfigsReq + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetNamedPorts: Sets the named ports for the specified instance group. -func (r *InstanceGroupsService) SetNamedPorts(project string, zone string, instanceGroup string, instancegroupssetnamedportsrequest *InstanceGroupsSetNamedPortsRequest) *InstanceGroupsSetNamedPortsCall { - c := &InstanceGroupsSetNamedPortsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// PatchPerInstanceConfigs: Insert or patch (for the ones that already +// exist) per-instance configs for the managed instance group. +// perInstanceConfig.instance serves as a key used to distinguish +// whether to perform insert or patch. +func (r *InstanceGroupManagersService) PatchPerInstanceConfigs(project string, zone string, instanceGroupManager string, instancegroupmanagerspatchperinstanceconfigsreq *InstanceGroupManagersPatchPerInstanceConfigsReq) *InstanceGroupManagersPatchPerInstanceConfigsCall { + c := &InstanceGroupManagersPatchPerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroup = instanceGroup - c.instancegroupssetnamedportsrequest = instancegroupssetnamedportsrequest + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagerspatchperinstanceconfigsreq = instancegroupmanagerspatchperinstanceconfigsreq return c } @@ -70727,7 +73264,7 @@ func (r *InstanceGroupsService) SetNamedPorts(project string, zone string, insta // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupsSetNamedPortsCall) RequestId(requestId string) *InstanceGroupsSetNamedPortsCall { +func (c *InstanceGroupManagersPatchPerInstanceConfigsCall) RequestId(requestId string) *InstanceGroupManagersPatchPerInstanceConfigsCall { c.urlParams_.Set("requestId", requestId) return c } @@ -70735,7 +73272,7 @@ func (c *InstanceGroupsSetNamedPortsCall) RequestId(requestId string) *InstanceG // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsSetNamedPortsCall) Fields(s ...googleapi.Field) *InstanceGroupsSetNamedPortsCall { +func (c *InstanceGroupManagersPatchPerInstanceConfigsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersPatchPerInstanceConfigsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -70743,35 +73280,35 @@ func (c *InstanceGroupsSetNamedPortsCall) Fields(s ...googleapi.Field) *Instance // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsSetNamedPortsCall) Context(ctx context.Context) *InstanceGroupsSetNamedPortsCall { +func (c *InstanceGroupManagersPatchPerInstanceConfigsCall) Context(ctx context.Context) *InstanceGroupManagersPatchPerInstanceConfigsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsSetNamedPortsCall) Header() http.Header { +func (c *InstanceGroupManagersPatchPerInstanceConfigsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersPatchPerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupssetnamedportsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerspatchperinstanceconfigsreq) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/setNamedPorts") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/patchPerInstanceConfigs") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -70779,21 +73316,21 @@ func (c *InstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroup": c.instanceGroup, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.setNamedPorts" call. +// Do executes the "compute.instanceGroupManagers.patchPerInstanceConfigs" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersPatchPerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -70824,17 +73361,17 @@ func (c *InstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Sets the named ports for the specified instance group.", + // "description": "Insert or patch (for the ones that already exist) per-instance configs for the managed instance group. perInstanceConfig.instance serves as a key used to distinguish whether to perform insert or patch.", // "httpMethod": "POST", - // "id": "compute.instanceGroups.setNamedPorts", + // "id": "compute.instanceGroupManagers.patchPerInstanceConfigs", // "parameterOrder": [ // "project", // "zone", - // "instanceGroup" + // "instanceGroupManager" // ], // "parameters": { - // "instanceGroup": { - // "description": "The name of the instance group where the named ports are updated.", + // "instanceGroupManager": { + // "description": "The name of the managed instance group. It should conform to RFC1035.", // "location": "path", // "required": true, // "type": "string" @@ -70852,15 +73389,15 @@ func (c *InstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Ope // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the instance group is located.", + // "description": "The name of the zone where the managed instance group is located. It should conform to RFC1035.", // "location": "path", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/setNamedPorts", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/patchPerInstanceConfigs", // "request": { - // "$ref": "InstanceGroupsSetNamedPortsRequest" + // "$ref": "InstanceGroupManagersPatchPerInstanceConfigsReq" // }, // "response": { // "$ref": "Operation" @@ -70873,34 +73410,66 @@ func (c *InstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.instanceGroups.testIamPermissions": +// method id "compute.instanceGroupManagers.recreateInstances": -type InstanceGroupsTestIamPermissionsCall struct { - s *Service - project string - zone string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersRecreateInstancesCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagersrecreateinstancesrequest *InstanceGroupManagersRecreateInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *InstanceGroupsService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *InstanceGroupsTestIamPermissionsCall { - c := &InstanceGroupsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// RecreateInstances: Flags the specified instances in the managed +// instance group to be immediately recreated. The instances are deleted +// and recreated using the current instance template for the managed +// instance group. This operation is marked as DONE when the flag is set +// even if the instances have not yet been recreated. You must +// separately verify the status of the recreating action with the +// listmanagedinstances method. +// +// If the group is part of a backend service that has enabled connection +// draining, it can take up to 60 seconds after the connection draining +// duration has elapsed before the VM instance is removed or +// deleted. +// +// You can specify a maximum of 1000 instances with this method per +// request. +func (r *InstanceGroupManagersService) RecreateInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersrecreateinstancesrequest *InstanceGroupManagersRecreateInstancesRequest) *InstanceGroupManagersRecreateInstancesCall { + c := &InstanceGroupManagersRecreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagersrecreateinstancesrequest = instancegroupmanagersrecreateinstancesrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupManagersRecreateInstancesCall) RequestId(requestId string) *InstanceGroupManagersRecreateInstancesCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsTestIamPermissionsCall) Fields(s ...googleapi.Field) *InstanceGroupsTestIamPermissionsCall { +func (c *InstanceGroupManagersRecreateInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersRecreateInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -70908,35 +73477,35 @@ func (c *InstanceGroupsTestIamPermissionsCall) Fields(s ...googleapi.Field) *Ins // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsTestIamPermissionsCall) Context(ctx context.Context) *InstanceGroupsTestIamPermissionsCall { +func (c *InstanceGroupManagersRecreateInstancesCall) Context(ctx context.Context) *InstanceGroupManagersRecreateInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsTestIamPermissionsCall) Header() http.Header { +func (c *InstanceGroupManagersRecreateInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersrecreateinstancesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -70944,21 +73513,21 @@ func (c *InstanceGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Resp } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.instanceGroupManagers.recreateInstances" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -70977,7 +73546,7 @@ func (c *InstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -70989,15 +73558,21 @@ func (c *InstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", + // "description": "Flags the specified instances in the managed instance group to be immediately recreated. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the flag is set even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", // "httpMethod": "POST", - // "id": "compute.instanceGroups.testIamPermissions", + // "id": "compute.instanceGroupManagers.recreateInstances", // "parameterOrder": [ // "project", // "zone", - // "resource" + // "instanceGroupManager" // ], // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -71005,56 +73580,72 @@ func (c *InstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // }, // "zone": { - // "description": "The name of the zone for this request.", + // "description": "The name of the zone where the managed instance group is located.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroups/{resource}/testIamPermissions", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "InstanceGroupManagersRecreateInstancesRequest" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instanceTemplates.delete": +// method id "compute.instanceGroupManagers.resize": -type InstanceTemplatesDeleteCall struct { - s *Service - project string - instanceTemplate string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersResizeCall struct { + s *Service + project string + zone string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified instance template. Deleting an instance -// template is permanent and cannot be undone. It's not possible to -// delete templates which are in use by an instance group. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/delete -func (r *InstanceTemplatesService) Delete(project string, instanceTemplate string) *InstanceTemplatesDeleteCall { - c := &InstanceTemplatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Resize: Resizes the managed instance group. If you increase the size, +// the group creates new instances using the current instance template. +// If you decrease the size, the group deletes instances. The resize +// operation is marked DONE when the resize actions are scheduled even +// if the group has not yet added or deleted any instances. You must +// separately verify the status of the creating or deleting actions with +// the listmanagedinstances method. +// +// When resizing down, the instance group arbitrarily chooses the order +// in which VMs are deleted. The group takes into account some VM +// attributes when making the selection including: +// +// + The status of the VM instance. + The health of the VM instance. + +// The instance template version the VM is based on. + For regional +// managed instance groups, the location of the VM instance. +// +// This list is subject to change. +// +// If the group is part of a backend service that has enabled connection +// draining, it can take up to 60 seconds after the connection draining +// duration has elapsed before the VM instance is removed or deleted. +func (r *InstanceGroupManagersService) Resize(project string, zone string, instanceGroupManager string, size int64) *InstanceGroupManagersResizeCall { + c := &InstanceGroupManagersResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.instanceTemplate = instanceTemplate + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.urlParams_.Set("size", fmt.Sprint(size)) return c } @@ -71072,7 +73663,7 @@ func (r *InstanceTemplatesService) Delete(project string, instanceTemplate strin // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceTemplatesDeleteCall) RequestId(requestId string) *InstanceTemplatesDeleteCall { +func (c *InstanceGroupManagersResizeCall) RequestId(requestId string) *InstanceGroupManagersResizeCall { c.urlParams_.Set("requestId", requestId) return c } @@ -71080,7 +73671,7 @@ func (c *InstanceTemplatesDeleteCall) RequestId(requestId string) *InstanceTempl // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceTemplatesDeleteCall) Fields(s ...googleapi.Field) *InstanceTemplatesDeleteCall { +func (c *InstanceGroupManagersResizeCall) Fields(s ...googleapi.Field) *InstanceGroupManagersResizeCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -71088,21 +73679,21 @@ func (c *InstanceTemplatesDeleteCall) Fields(s ...googleapi.Field) *InstanceTemp // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceTemplatesDeleteCall) Context(ctx context.Context) *InstanceTemplatesDeleteCall { +func (c *InstanceGroupManagersResizeCall) Context(ctx context.Context) *InstanceGroupManagersResizeCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceTemplatesDeleteCall) Header() http.Header { +func (c *InstanceGroupManagersResizeCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -71111,28 +73702,29 @@ func (c *InstanceTemplatesDeleteCall) doRequest(alt string) (*http.Response, err var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{instanceTemplate}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "instanceTemplate": c.instanceTemplate, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceTemplates.delete" call. +// Do executes the "compute.instanceGroupManagers.resize" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -71163,18 +73755,19 @@ func (c *InstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Deletes the specified instance template. Deleting an instance template is permanent and cannot be undone. It's not possible to delete templates which are in use by an instance group.", - // "httpMethod": "DELETE", - // "id": "compute.instanceTemplates.delete", + // "description": "Resizes the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.\n\nWhen resizing down, the instance group arbitrarily chooses the order in which VMs are deleted. The group takes into account some VM attributes when making the selection including:\n\n+ The status of the VM instance. + The health of the VM instance. + The instance template version the VM is based on. + For regional managed instance groups, the location of the VM instance.\n\nThis list is subject to change.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.resize", // "parameterOrder": [ // "project", - // "instanceTemplate" + // "zone", + // "instanceGroupManager", + // "size" // ], // "parameters": { - // "instanceTemplate": { - // "description": "The name of the instance template to delete.", + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -71189,9 +73782,22 @@ func (c *InstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operati // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "size": { + // "description": "The number of running instances that the managed instance group should maintain at any given time. The group automatically adds or removes instances to maintain the number of instances specified by this parameter.", + // "format": "int32", + // "location": "query", + // "required": true, + // "type": "integer" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/instanceTemplates/{instanceTemplate}", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize", // "response": { // "$ref": "Operation" // }, @@ -71203,97 +73809,124 @@ func (c *InstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.instanceTemplates.get": +// method id "compute.instanceGroupManagers.resizeAdvanced": -type InstanceTemplatesGetCall struct { - s *Service - project string - instanceTemplate string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersResizeAdvancedCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagersresizeadvancedrequest *InstanceGroupManagersResizeAdvancedRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified instance template. Gets a list of -// available instance templates by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/get -func (r *InstanceTemplatesService) Get(project string, instanceTemplate string) *InstanceTemplatesGetCall { - c := &InstanceTemplatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ResizeAdvanced: Resizes the managed instance group with advanced +// configuration options like disabling creation retries. This is an +// extended version of the resize method. +// +// If you increase the size of the instance group, the group creates new +// instances using the current instance template. If you decrease the +// size, the group deletes instances. The resize operation is marked +// DONE when the resize actions are scheduled even if the group has not +// yet added or deleted any instances. You must separately verify the +// status of the creating, creatingWithoutRetries, or deleting actions +// with the get or listmanagedinstances method. +// +// If the group is part of a backend service that has enabled connection +// draining, it can take up to 60 seconds after the connection draining +// duration has elapsed before the VM instance is removed or deleted. +func (r *InstanceGroupManagersService) ResizeAdvanced(project string, zone string, instanceGroupManager string, instancegroupmanagersresizeadvancedrequest *InstanceGroupManagersResizeAdvancedRequest) *InstanceGroupManagersResizeAdvancedCall { + c := &InstanceGroupManagersResizeAdvancedCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.instanceTemplate = instanceTemplate + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagersresizeadvancedrequest = instancegroupmanagersresizeadvancedrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupManagersResizeAdvancedCall) RequestId(requestId string) *InstanceGroupManagersResizeAdvancedCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceTemplatesGetCall) Fields(s ...googleapi.Field) *InstanceTemplatesGetCall { +func (c *InstanceGroupManagersResizeAdvancedCall) Fields(s ...googleapi.Field) *InstanceGroupManagersResizeAdvancedCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstanceTemplatesGetCall) IfNoneMatch(entityTag string) *InstanceTemplatesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceTemplatesGetCall) Context(ctx context.Context) *InstanceTemplatesGetCall { +func (c *InstanceGroupManagersResizeAdvancedCall) Context(ctx context.Context) *InstanceGroupManagersResizeAdvancedCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceTemplatesGetCall) Header() http.Header { +func (c *InstanceGroupManagersResizeAdvancedCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceTemplatesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersResizeAdvancedCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersresizeadvancedrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{instanceTemplate}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resizeAdvanced") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "instanceTemplate": c.instanceTemplate, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceTemplates.get" call. -// Exactly one of *InstanceTemplate or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InstanceTemplate.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTemplate, error) { +// Do executes the "compute.instanceGroupManagers.resizeAdvanced" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersResizeAdvancedCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -71312,7 +73945,7 @@ func (c *InstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTe if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceTemplate{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -71324,18 +73957,18 @@ func (c *InstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTe } return ret, nil // { - // "description": "Returns the specified instance template. Gets a list of available instance templates by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.instanceTemplates.get", + // "description": "Resizes the managed instance group with advanced configuration options like disabling creation retries. This is an extended version of the resize method.\n\nIf you increase the size of the instance group, the group creates new instances using the current instance template. If you decrease the size, the group deletes instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating, creatingWithoutRetries, or deleting actions with the get or listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.resizeAdvanced", // "parameterOrder": [ // "project", - // "instanceTemplate" + // "zone", + // "instanceGroupManager" // ], // "parameters": { - // "instanceTemplate": { - // "description": "The name of the instance template.", + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -71345,111 +73978,139 @@ func (c *InstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTe // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/instanceTemplates/{instanceTemplate}", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resizeAdvanced", + // "request": { + // "$ref": "InstanceGroupManagersResizeAdvancedRequest" + // }, // "response": { - // "$ref": "InstanceTemplate" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instanceTemplates.getIamPolicy": +// method id "compute.instanceGroupManagers.setAutoHealingPolicies": -type InstanceTemplatesGetIamPolicyCall struct { - s *Service - project string - resource string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersSetAutoHealingPoliciesCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagerssetautohealingrequest *InstanceGroupManagersSetAutoHealingRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. -func (r *InstanceTemplatesService) GetIamPolicy(project string, resource string) *InstanceTemplatesGetIamPolicyCall { - c := &InstanceTemplatesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetAutoHealingPolicies: Modifies the autohealing policies. +// [Deprecated] This method is deprecated. Please use Patch instead. +func (r *InstanceGroupManagersService) SetAutoHealingPolicies(project string, zone string, instanceGroupManager string, instancegroupmanagerssetautohealingrequest *InstanceGroupManagersSetAutoHealingRequest) *InstanceGroupManagersSetAutoHealingPoliciesCall { + c := &InstanceGroupManagersSetAutoHealingPoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagerssetautohealingrequest = instancegroupmanagerssetautohealingrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) RequestId(requestId string) *InstanceGroupManagersSetAutoHealingPoliciesCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceTemplatesGetIamPolicyCall) Fields(s ...googleapi.Field) *InstanceTemplatesGetIamPolicyCall { +func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersSetAutoHealingPoliciesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstanceTemplatesGetIamPolicyCall) IfNoneMatch(entityTag string) *InstanceTemplatesGetIamPolicyCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceTemplatesGetIamPolicyCall) Context(ctx context.Context) *InstanceTemplatesGetIamPolicyCall { +func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Context(ctx context.Context) *InstanceGroupManagersSetAutoHealingPoliciesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceTemplatesGetIamPolicyCall) Header() http.Header { +func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceTemplatesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerssetautohealingrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{resource}/getIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceTemplates.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *InstanceTemplatesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.instanceGroupManagers.setAutoHealingPolicies" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -71468,7 +74129,7 @@ func (c *InstanceTemplatesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*P if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -71480,14 +74141,21 @@ func (c *InstanceTemplatesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*P } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", - // "httpMethod": "GET", - // "id": "compute.instanceTemplates.getIamPolicy", + // "description": "Modifies the autohealing policies. [Deprecated] This method is deprecated. Please use Patch instead.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.setAutoHealingPolicies", // "parameterOrder": [ // "project", - // "resource" + // "zone", + // "instanceGroupManager" // ], // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the instance group manager.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -71495,48 +74163,55 @@ func (c *InstanceTemplatesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*P // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/instanceTemplates/{resource}/getIamPolicy", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies", + // "request": { + // "$ref": "InstanceGroupManagersSetAutoHealingRequest" + // }, // "response": { - // "$ref": "Policy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instanceTemplates.insert": +// method id "compute.instanceGroupManagers.setInstanceTemplate": -type InstanceTemplatesInsertCall struct { - s *Service - project string - instancetemplate *InstanceTemplate - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersSetInstanceTemplateCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagerssetinstancetemplaterequest *InstanceGroupManagersSetInstanceTemplateRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates an instance template in the specified project using -// the data that is included in the request. If you are creating a new -// template to update an existing instance group, your new instance -// template must use the same network or, if applicable, the same -// subnetwork as the original template. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/insert -func (r *InstanceTemplatesService) Insert(project string, instancetemplate *InstanceTemplate) *InstanceTemplatesInsertCall { - c := &InstanceTemplatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetInstanceTemplate: Specifies the instance template to use when +// creating new instances in this group. The templates for existing +// instances in the group do not change unless you recreate them. +func (r *InstanceGroupManagersService) SetInstanceTemplate(project string, zone string, instanceGroupManager string, instancegroupmanagerssetinstancetemplaterequest *InstanceGroupManagersSetInstanceTemplateRequest) *InstanceGroupManagersSetInstanceTemplateCall { + c := &InstanceGroupManagersSetInstanceTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.instancetemplate = instancetemplate + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagerssetinstancetemplaterequest = instancegroupmanagerssetinstancetemplaterequest return c } @@ -71554,7 +74229,7 @@ func (r *InstanceTemplatesService) Insert(project string, instancetemplate *Inst // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceTemplatesInsertCall) RequestId(requestId string) *InstanceTemplatesInsertCall { +func (c *InstanceGroupManagersSetInstanceTemplateCall) RequestId(requestId string) *InstanceGroupManagersSetInstanceTemplateCall { c.urlParams_.Set("requestId", requestId) return c } @@ -71562,7 +74237,7 @@ func (c *InstanceTemplatesInsertCall) RequestId(requestId string) *InstanceTempl // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceTemplatesInsertCall) Fields(s ...googleapi.Field) *InstanceTemplatesInsertCall { +func (c *InstanceGroupManagersSetInstanceTemplateCall) Fields(s ...googleapi.Field) *InstanceGroupManagersSetInstanceTemplateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -71570,35 +74245,35 @@ func (c *InstanceTemplatesInsertCall) Fields(s ...googleapi.Field) *InstanceTemp // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceTemplatesInsertCall) Context(ctx context.Context) *InstanceTemplatesInsertCall { +func (c *InstanceGroupManagersSetInstanceTemplateCall) Context(ctx context.Context) *InstanceGroupManagersSetInstanceTemplateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceTemplatesInsertCall) Header() http.Header { +func (c *InstanceGroupManagersSetInstanceTemplateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceTemplatesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancetemplate) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerssetinstancetemplaterequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -71606,19 +74281,21 @@ func (c *InstanceTemplatesInsertCall) doRequest(alt string) (*http.Response, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceTemplates.insert" call. +// Do executes the "compute.instanceGroupManagers.setInstanceTemplate" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -71649,13 +74326,21 @@ func (c *InstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Creates an instance template in the specified project using the data that is included in the request. If you are creating a new template to update an existing instance group, your new instance template must use the same network or, if applicable, the same subnetwork as the original template.", + // "description": "Specifies the instance template to use when creating new instances in this group. The templates for existing instances in the group do not change unless you recreate them.", // "httpMethod": "POST", - // "id": "compute.instanceTemplates.insert", + // "id": "compute.instanceGroupManagers.setInstanceTemplate", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "instanceGroupManager" // ], // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -71667,11 +74352,17 @@ func (c *InstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operati // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/instanceTemplates", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", // "request": { - // "$ref": "InstanceTemplate" + // "$ref": "InstanceGroupManagersSetInstanceTemplateRequest" // }, // "response": { // "$ref": "Operation" @@ -71684,157 +74375,116 @@ func (c *InstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.instanceTemplates.list": +// method id "compute.instanceGroupManagers.setTargetPools": -type InstanceTemplatesListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersSetTargetPoolsCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagerssettargetpoolsrequest *InstanceGroupManagersSetTargetPoolsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves a list of instance templates that are contained -// within the specified project and zone. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/list -func (r *InstanceTemplatesService) List(project string) *InstanceTemplatesListCall { - c := &InstanceTemplatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetTargetPools: Modifies the target pools to which all instances in +// this managed instance group are assigned. The target pools +// automatically apply to all of the instances in the managed instance +// group. This operation is marked DONE when you make the request even +// if the instances have not yet been added to their target pools. The +// change might take some time to apply to all of the instances in the +// group depending on the size of the group. +func (r *InstanceGroupManagersService) SetTargetPools(project string, zone string, instanceGroupManager string, instancegroupmanagerssettargetpoolsrequest *InstanceGroupManagersSetTargetPoolsRequest) *InstanceGroupManagersSetTargetPoolsCall { + c := &InstanceGroupManagersSetTargetPoolsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagerssettargetpoolsrequest = instancegroupmanagerssettargetpoolsrequest return c } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *InstanceTemplatesListCall) Filter(filter string) *InstanceTemplatesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InstanceTemplatesListCall) MaxResults(maxResults int64) *InstanceTemplatesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *InstanceTemplatesListCall) OrderBy(orderBy string) *InstanceTemplatesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InstanceTemplatesListCall) PageToken(pageToken string) *InstanceTemplatesListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupManagersSetTargetPoolsCall) RequestId(requestId string) *InstanceGroupManagersSetTargetPoolsCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceTemplatesListCall) Fields(s ...googleapi.Field) *InstanceTemplatesListCall { +func (c *InstanceGroupManagersSetTargetPoolsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersSetTargetPoolsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstanceTemplatesListCall) IfNoneMatch(entityTag string) *InstanceTemplatesListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceTemplatesListCall) Context(ctx context.Context) *InstanceTemplatesListCall { +func (c *InstanceGroupManagersSetTargetPoolsCall) Context(ctx context.Context) *InstanceGroupManagersSetTargetPoolsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceTemplatesListCall) Header() http.Header { +func (c *InstanceGroupManagersSetTargetPoolsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceTemplatesListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerssettargetpoolsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setTargetPools") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceTemplates.list" call. -// Exactly one of *InstanceTemplateList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InstanceTemplateList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceTemplateList, error) { +// Do executes the "compute.instanceGroupManagers.setTargetPools" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -71853,7 +74503,7 @@ func (c *InstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceT if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceTemplateList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -71865,104 +74515,83 @@ func (c *InstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceT } return ret, nil // { - // "description": "Retrieves a list of instance templates that are contained within the specified project and zone.", - // "httpMethod": "GET", - // "id": "compute.instanceTemplates.list", + // "description": "Modifies the target pools to which all instances in this managed instance group are assigned. The target pools automatically apply to all of the instances in the managed instance group. This operation is marked DONE when you make the request even if the instances have not yet been added to their target pools. The change might take some time to apply to all of the instances in the group depending on the size of the group.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.setTargetPools", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "instanceGroupManager" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, // "type": "string" // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/instanceTemplates", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", + // "request": { + // "$ref": "InstanceGroupManagersSetTargetPoolsRequest" + // }, // "response": { - // "$ref": "InstanceTemplateList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstanceTemplatesListCall) Pages(ctx context.Context, f func(*InstanceTemplateList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instanceTemplates.setIamPolicy": +// method id "compute.instanceGroupManagers.testIamPermissions": -type InstanceTemplatesSetIamPolicyCall struct { +type InstanceGroupManagersTestIamPermissionsCall struct { s *Service project string + zone string resource string - globalsetpolicyrequest *GlobalSetPolicyRequest + testpermissionsrequest *TestPermissionsRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. -func (r *InstanceTemplatesService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *InstanceTemplatesSetIamPolicyCall { - c := &InstanceTemplatesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *InstanceGroupManagersService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *InstanceGroupManagersTestIamPermissionsCall { + c := &InstanceGroupManagersTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.zone = zone c.resource = resource - c.globalsetpolicyrequest = globalsetpolicyrequest + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceTemplatesSetIamPolicyCall) Fields(s ...googleapi.Field) *InstanceTemplatesSetIamPolicyCall { +func (c *InstanceGroupManagersTestIamPermissionsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -71970,35 +74599,35 @@ func (c *InstanceTemplatesSetIamPolicyCall) Fields(s ...googleapi.Field) *Instan // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceTemplatesSetIamPolicyCall) Context(ctx context.Context) *InstanceTemplatesSetIamPolicyCall { +func (c *InstanceGroupManagersTestIamPermissionsCall) Context(ctx context.Context) *InstanceGroupManagersTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceTemplatesSetIamPolicyCall) Header() http.Header { +func (c *InstanceGroupManagersTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceTemplatesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetpolicyrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{resource}/setIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -72007,19 +74636,20 @@ func (c *InstanceTemplatesSetIamPolicyCall) doRequest(alt string) (*http.Respons req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "zone": c.zone, "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceTemplates.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *InstanceTemplatesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.instanceGroupManagers.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -72038,7 +74668,7 @@ func (c *InstanceTemplatesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*P if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -72050,11 +74680,12 @@ func (c *InstanceTemplatesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*P } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.instanceTemplates.setIamPolicy", + // "id": "compute.instanceGroupManagers.testIamPermissions", // "parameterOrder": [ // "project", + // "zone", // "resource" // ], // "parameters": { @@ -72068,52 +74699,84 @@ func (c *InstanceTemplatesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*P // "resource": { // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/instanceTemplates/{resource}/setIamPolicy", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{resource}/testIamPermissions", // "request": { - // "$ref": "GlobalSetPolicyRequest" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Policy" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instanceTemplates.testIamPermissions": +// method id "compute.instanceGroupManagers.update": -type InstanceTemplatesTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersUpdateCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanager *InstanceGroupManager + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *InstanceTemplatesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *InstanceTemplatesTestIamPermissionsCall { - c := &InstanceTemplatesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates a managed instance group using the information that +// you specify in the request. This operation is marked as DONE when the +// group is updated even if the instances in the group have not yet been +// updated. You must separately verify the status of the individual +// instances with the listManagedInstances method. +func (r *InstanceGroupManagersService) Update(project string, zone string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *InstanceGroupManagersUpdateCall { + c := &InstanceGroupManagersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanager = instancegroupmanager + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupManagersUpdateCall) RequestId(requestId string) *InstanceGroupManagersUpdateCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceTemplatesTestIamPermissionsCall) Fields(s ...googleapi.Field) *InstanceTemplatesTestIamPermissionsCall { +func (c *InstanceGroupManagersUpdateCall) Fields(s ...googleapi.Field) *InstanceGroupManagersUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -72121,56 +74784,57 @@ func (c *InstanceTemplatesTestIamPermissionsCall) Fields(s ...googleapi.Field) * // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceTemplatesTestIamPermissionsCall) Context(ctx context.Context) *InstanceTemplatesTestIamPermissionsCall { +func (c *InstanceGroupManagersUpdateCall) Context(ctx context.Context) *InstanceGroupManagersUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceTemplatesTestIamPermissionsCall) Header() http.Header { +func (c *InstanceGroupManagersUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceTemplatesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceTemplates.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.instanceGroupManagers.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -72189,7 +74853,7 @@ func (c *InstanceTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOptio if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -72201,14 +74865,21 @@ func (c *InstanceTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.instanceTemplates.testIamPermissions", + // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is updated even if the instances in the group have not yet been updated. You must separately verify the status of the individual instances with the listManagedInstances method.", + // "httpMethod": "PUT", + // "id": "compute.instanceGroupManagers.update", // "parameterOrder": [ // "project", - // "resource" + // "zone", + // "instanceGroupManager" // ], // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the instance group manager.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -72216,53 +74887,56 @@ func (c *InstanceTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOptio // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where you want to create the managed instance group.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/instanceTemplates/{resource}/testIamPermissions", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "InstanceGroupManager" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instances.addAccessConfig": +// method id "compute.instanceGroupManagers.updatePerInstanceConfigs": -type InstancesAddAccessConfigCall struct { - s *Service - project string - zone string - instance string - accessconfig *AccessConfig - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersUpdatePerInstanceConfigsCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagersupdateperinstanceconfigsreq *InstanceGroupManagersUpdatePerInstanceConfigsReq + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AddAccessConfig: Adds an access config to an instance's network -// interface. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/addAccessConfig -func (r *InstancesService) AddAccessConfig(project string, zone string, instance string, networkInterface string, accessconfig *AccessConfig) *InstancesAddAccessConfigCall { - c := &InstancesAddAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// UpdatePerInstanceConfigs: Insert or update (for the ones that already +// exist) per-instance configs for the managed instance group. +// perInstanceConfig.instance serves as a key used to distinguish +// whether to perform insert or patch. +func (r *InstanceGroupManagersService) UpdatePerInstanceConfigs(project string, zone string, instanceGroupManager string, instancegroupmanagersupdateperinstanceconfigsreq *InstanceGroupManagersUpdatePerInstanceConfigsReq) *InstanceGroupManagersUpdatePerInstanceConfigsCall { + c := &InstanceGroupManagersUpdatePerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instance = instance - c.urlParams_.Set("networkInterface", networkInterface) - c.accessconfig = accessconfig + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagersupdateperinstanceconfigsreq = instancegroupmanagersupdateperinstanceconfigsreq return c } @@ -72280,7 +74954,7 @@ func (r *InstancesService) AddAccessConfig(project string, zone string, instance // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesAddAccessConfigCall) RequestId(requestId string) *InstancesAddAccessConfigCall { +func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) RequestId(requestId string) *InstanceGroupManagersUpdatePerInstanceConfigsCall { c.urlParams_.Set("requestId", requestId) return c } @@ -72288,7 +74962,7 @@ func (c *InstancesAddAccessConfigCall) RequestId(requestId string) *InstancesAdd // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesAddAccessConfigCall) Fields(s ...googleapi.Field) *InstancesAddAccessConfigCall { +func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersUpdatePerInstanceConfigsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -72296,35 +74970,35 @@ func (c *InstancesAddAccessConfigCall) Fields(s ...googleapi.Field) *InstancesAd // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesAddAccessConfigCall) Context(ctx context.Context) *InstancesAddAccessConfigCall { +func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) Context(ctx context.Context) *InstanceGroupManagersUpdatePerInstanceConfigsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesAddAccessConfigCall) Header() http.Header { +func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesAddAccessConfigCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.accessconfig) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersupdateperinstanceconfigsreq) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/addAccessConfig") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/updatePerInstanceConfigs") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -72332,21 +75006,21 @@ func (c *InstancesAddAccessConfigCall) doRequest(alt string) (*http.Response, er } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.addAccessConfig" call. +// Do executes the "compute.instanceGroupManagers.updatePerInstanceConfigs" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesAddAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -72377,26 +75051,18 @@ func (c *InstancesAddAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operat } return ret, nil // { - // "description": "Adds an access config to an instance's network interface.", + // "description": "Insert or update (for the ones that already exist) per-instance configs for the managed instance group. perInstanceConfig.instance serves as a key used to distinguish whether to perform insert or patch.", // "httpMethod": "POST", - // "id": "compute.instances.addAccessConfig", + // "id": "compute.instanceGroupManagers.updatePerInstanceConfigs", // "parameterOrder": [ // "project", // "zone", - // "instance", - // "networkInterface" + // "instanceGroupManager" // ], // "parameters": { - // "instance": { - // "description": "The instance name for this request.", + // "instanceGroupManager": { + // "description": "The name of the managed instance group. It should conform to RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "networkInterface": { - // "description": "The name of the network interface to add to this instance.", - // "location": "query", // "required": true, // "type": "string" // }, @@ -72413,16 +75079,15 @@ func (c *InstancesAddAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operat // "type": "string" // }, // "zone": { - // "description": "The name of the zone for this request.", + // "description": "The name of the zone where the managed instance group is located. It should conform to RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/addAccessConfig", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/updatePerInstanceConfigs", // "request": { - // "$ref": "AccessConfig" + // "$ref": "InstanceGroupManagersUpdatePerInstanceConfigsReq" // }, // "response": { // "$ref": "Operation" @@ -72435,28 +75100,28 @@ func (c *InstancesAddAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operat } -// method id "compute.instances.addResourcePolicies": +// method id "compute.instanceGroups.addInstances": -type InstancesAddResourcePoliciesCall struct { - s *Service - project string - zone string - instance string - instancesaddresourcepoliciesrequest *InstancesAddResourcePoliciesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupsAddInstancesCall struct { + s *Service + project string + zone string + instanceGroup string + instancegroupsaddinstancesrequest *InstanceGroupsAddInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AddResourcePolicies: Adds existing resource policies to an instance. -// You can only add one policy right now which will be applied to this -// instance for scheduling live migrations. -func (r *InstancesService) AddResourcePolicies(project string, zone string, instance string, instancesaddresourcepoliciesrequest *InstancesAddResourcePoliciesRequest) *InstancesAddResourcePoliciesCall { - c := &InstancesAddResourcePoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AddInstances: Adds a list of instances to the specified instance +// group. All of the instances in the instance group must be in the same +// network/subnetwork. Read Adding instances for more information. +func (r *InstanceGroupsService) AddInstances(project string, zone string, instanceGroup string, instancegroupsaddinstancesrequest *InstanceGroupsAddInstancesRequest) *InstanceGroupsAddInstancesCall { + c := &InstanceGroupsAddInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instance = instance - c.instancesaddresourcepoliciesrequest = instancesaddresourcepoliciesrequest + c.instanceGroup = instanceGroup + c.instancegroupsaddinstancesrequest = instancegroupsaddinstancesrequest return c } @@ -72474,7 +75139,7 @@ func (r *InstancesService) AddResourcePolicies(project string, zone string, inst // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesAddResourcePoliciesCall) RequestId(requestId string) *InstancesAddResourcePoliciesCall { +func (c *InstanceGroupsAddInstancesCall) RequestId(requestId string) *InstanceGroupsAddInstancesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -72482,7 +75147,7 @@ func (c *InstancesAddResourcePoliciesCall) RequestId(requestId string) *Instance // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesAddResourcePoliciesCall) Fields(s ...googleapi.Field) *InstancesAddResourcePoliciesCall { +func (c *InstanceGroupsAddInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsAddInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -72490,35 +75155,35 @@ func (c *InstancesAddResourcePoliciesCall) Fields(s ...googleapi.Field) *Instanc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesAddResourcePoliciesCall) Context(ctx context.Context) *InstancesAddResourcePoliciesCall { +func (c *InstanceGroupsAddInstancesCall) Context(ctx context.Context) *InstanceGroupsAddInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesAddResourcePoliciesCall) Header() http.Header { +func (c *InstanceGroupsAddInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesAddResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsAddInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesaddresourcepoliciesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupsaddinstancesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/addResourcePolicies") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/addInstances") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -72526,21 +75191,21 @@ func (c *InstancesAddResourcePoliciesCall) doRequest(alt string) (*http.Response } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.addResourcePolicies" call. +// Do executes the "compute.instanceGroups.addInstances" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupsAddInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -72571,19 +75236,18 @@ func (c *InstancesAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Adds existing resource policies to an instance. You can only add one policy right now which will be applied to this instance for scheduling live migrations.", + // "description": "Adds a list of instances to the specified instance group. All of the instances in the instance group must be in the same network/subnetwork. Read Adding instances for more information.", // "httpMethod": "POST", - // "id": "compute.instances.addResourcePolicies", + // "id": "compute.instanceGroups.addInstances", // "parameterOrder": [ // "project", // "zone", - // "instance" + // "instanceGroup" // ], // "parameters": { - // "instance": { - // "description": "The instance name for this request.", + // "instanceGroup": { + // "description": "The name of the instance group where you are adding instances.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -72600,16 +75264,15 @@ func (c *InstancesAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Op // "type": "string" // }, // "zone": { - // "description": "The name of the zone for this request.", + // "description": "The name of the zone where the instance group is located.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/addResourcePolicies", + // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/addInstances", // "request": { - // "$ref": "InstancesAddResourcePoliciesRequest" + // "$ref": "InstanceGroupsAddInstancesRequest" // }, // "response": { // "$ref": "Operation" @@ -72622,9 +75285,9 @@ func (c *InstancesAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Op } -// method id "compute.instances.aggregatedList": +// method id "compute.instanceGroups.aggregatedList": -type InstancesAggregatedListCall struct { +type InstanceGroupsAggregatedListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -72633,11 +75296,10 @@ type InstancesAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves aggregated list of all of the instances in -// your project across all regions and zones. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/aggregatedList -func (r *InstancesService) AggregatedList(project string) *InstancesAggregatedListCall { - c := &InstancesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves the list of instance groups and sorts them +// by zone. +func (r *InstanceGroupsService) AggregatedList(project string) *InstanceGroupsAggregatedListCall { + c := &InstanceGroupsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -72664,7 +75326,7 @@ func (r *InstancesService) AggregatedList(project string) *InstancesAggregatedLi // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *InstancesAggregatedListCall) Filter(filter string) *InstancesAggregatedListCall { +func (c *InstanceGroupsAggregatedListCall) Filter(filter string) *InstanceGroupsAggregatedListCall { c.urlParams_.Set("filter", filter) return c } @@ -72675,7 +75337,7 @@ func (c *InstancesAggregatedListCall) Filter(filter string) *InstancesAggregated // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *InstancesAggregatedListCall) MaxResults(maxResults int64) *InstancesAggregatedListCall { +func (c *InstanceGroupsAggregatedListCall) MaxResults(maxResults int64) *InstanceGroupsAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -72692,7 +75354,7 @@ func (c *InstancesAggregatedListCall) MaxResults(maxResults int64) *InstancesAgg // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *InstancesAggregatedListCall) OrderBy(orderBy string) *InstancesAggregatedListCall { +func (c *InstanceGroupsAggregatedListCall) OrderBy(orderBy string) *InstanceGroupsAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -72700,7 +75362,7 @@ func (c *InstancesAggregatedListCall) OrderBy(orderBy string) *InstancesAggregat // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *InstancesAggregatedListCall) PageToken(pageToken string) *InstancesAggregatedListCall { +func (c *InstanceGroupsAggregatedListCall) PageToken(pageToken string) *InstanceGroupsAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -72708,7 +75370,7 @@ func (c *InstancesAggregatedListCall) PageToken(pageToken string) *InstancesAggr // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesAggregatedListCall) Fields(s ...googleapi.Field) *InstancesAggregatedListCall { +func (c *InstanceGroupsAggregatedListCall) Fields(s ...googleapi.Field) *InstanceGroupsAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -72718,7 +75380,7 @@ func (c *InstancesAggregatedListCall) Fields(s ...googleapi.Field) *InstancesAgg // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InstancesAggregatedListCall) IfNoneMatch(entityTag string) *InstancesAggregatedListCall { +func (c *InstanceGroupsAggregatedListCall) IfNoneMatch(entityTag string) *InstanceGroupsAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -72726,21 +75388,21 @@ func (c *InstancesAggregatedListCall) IfNoneMatch(entityTag string) *InstancesAg // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesAggregatedListCall) Context(ctx context.Context) *InstancesAggregatedListCall { +func (c *InstanceGroupsAggregatedListCall) Context(ctx context.Context) *InstanceGroupsAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesAggregatedListCall) Header() http.Header { +func (c *InstanceGroupsAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -72752,7 +75414,7 @@ func (c *InstancesAggregatedListCall) doRequest(alt string) (*http.Response, err var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instanceGroups") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -72765,14 +75427,14 @@ func (c *InstancesAggregatedListCall) doRequest(alt string) (*http.Response, err return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.aggregatedList" call. -// Exactly one of *InstanceAggregatedList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InstanceAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.instanceGroups.aggregatedList" call. +// Exactly one of *InstanceGroupAggregatedList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *InstanceGroupAggregatedList.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceAggregatedList, error) { +func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -72791,7 +75453,7 @@ func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Instanc if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceAggregatedList{ + ret := &InstanceGroupAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -72803,9 +75465,9 @@ func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Instanc } return ret, nil // { - // "description": "Retrieves aggregated list of all of the instances in your project across all regions and zones.", + // "description": "Retrieves the list of instance groups and sorts them by zone.", // "httpMethod": "GET", - // "id": "compute.instances.aggregatedList", + // "id": "compute.instanceGroups.aggregatedList", // "parameterOrder": [ // "project" // ], @@ -72841,9 +75503,9 @@ func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Instanc // "type": "string" // } // }, - // "path": "{project}/aggregated/instances", + // "path": "{project}/aggregated/instanceGroups", // "response": { - // "$ref": "InstanceAggregatedList" + // "$ref": "InstanceGroupAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -72857,7 +75519,7 @@ func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Instanc // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *InstancesAggregatedListCall) Pages(ctx context.Context, f func(*InstanceAggregatedList) error) error { +func (c *InstanceGroupsAggregatedListCall) Pages(ctx context.Context, f func(*InstanceGroupAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -72875,38 +75537,27 @@ func (c *InstancesAggregatedListCall) Pages(ctx context.Context, f func(*Instanc } } -// method id "compute.instances.attachDisk": +// method id "compute.instanceGroups.delete": -type InstancesAttachDiskCall struct { - s *Service - project string - zone string - instance string - attacheddisk *AttachedDisk - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupsDeleteCall struct { + s *Service + project string + zone string + instanceGroup string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AttachDisk: Attaches an existing Disk resource to an instance. You -// must first create the disk before you can attach it. It is not -// possible to create and attach a disk at the same time. For more -// information, read Adding a persistent disk to your instance. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/attachDisk -func (r *InstancesService) AttachDisk(project string, zone string, instance string, attacheddisk *AttachedDisk) *InstancesAttachDiskCall { - c := &InstancesAttachDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified instance group. The instances in the +// group are not deleted. Note that instance group must not belong to a +// backend service. Read Deleting an instance group for more +// information. +func (r *InstanceGroupsService) Delete(project string, zone string, instanceGroup string) *InstanceGroupsDeleteCall { + c := &InstanceGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instance = instance - c.attacheddisk = attacheddisk - return c -} - -// ForceAttach sets the optional parameter "forceAttach": Whether to -// force attach the disk even if it's currently attached to another -// instance. This is only available for regional disks. -func (c *InstancesAttachDiskCall) ForceAttach(forceAttach bool) *InstancesAttachDiskCall { - c.urlParams_.Set("forceAttach", fmt.Sprint(forceAttach)) + c.instanceGroup = instanceGroup return c } @@ -72924,7 +75575,7 @@ func (c *InstancesAttachDiskCall) ForceAttach(forceAttach bool) *InstancesAttach // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesAttachDiskCall) RequestId(requestId string) *InstancesAttachDiskCall { +func (c *InstanceGroupsDeleteCall) RequestId(requestId string) *InstanceGroupsDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -72932,7 +75583,7 @@ func (c *InstancesAttachDiskCall) RequestId(requestId string) *InstancesAttachDi // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesAttachDiskCall) Fields(s ...googleapi.Field) *InstancesAttachDiskCall { +func (c *InstanceGroupsDeleteCall) Fields(s ...googleapi.Field) *InstanceGroupsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -72940,57 +75591,52 @@ func (c *InstancesAttachDiskCall) Fields(s ...googleapi.Field) *InstancesAttachD // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesAttachDiskCall) Context(ctx context.Context) *InstancesAttachDiskCall { +func (c *InstanceGroupsDeleteCall) Context(ctx context.Context) *InstanceGroupsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesAttachDiskCall) Header() http.Header { +func (c *InstanceGroupsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesAttachDiskCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.attacheddisk) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/attachDisk") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.attachDisk" call. +// Do executes the "compute.instanceGroups.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesAttachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -73021,24 +75667,18 @@ func (c *InstancesAttachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Attaches an existing Disk resource to an instance. You must first create the disk before you can attach it. It is not possible to create and attach a disk at the same time. For more information, read Adding a persistent disk to your instance.", - // "httpMethod": "POST", - // "id": "compute.instances.attachDisk", + // "description": "Deletes the specified instance group. The instances in the group are not deleted. Note that instance group must not belong to a backend service. Read Deleting an instance group for more information.", + // "httpMethod": "DELETE", + // "id": "compute.instanceGroups.delete", // "parameterOrder": [ // "project", // "zone", - // "instance" + // "instanceGroup" // ], // "parameters": { - // "forceAttach": { - // "description": "Whether to force attach the disk even if it's currently attached to another instance. This is only available for regional disks.", - // "location": "query", - // "type": "boolean" - // }, - // "instance": { - // "description": "The instance name for this request.", + // "instanceGroup": { + // "description": "The name of the instance group to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -73055,17 +75695,13 @@ func (c *InstancesAttachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // }, // "zone": { - // "description": "The name of the zone for this request.", + // "description": "The name of the zone where the instance group is located.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/attachDisk", - // "request": { - // "$ref": "AttachedDisk" - // }, + // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}", // "response": { // "$ref": "Operation" // }, @@ -73077,105 +75713,99 @@ func (c *InstancesAttachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.instances.delete": +// method id "compute.instanceGroups.get": -type InstancesDeleteCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupsGetCall struct { + s *Service + project string + zone string + instanceGroup string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified Instance resource. For more -// information, see Stopping or Deleting an Instance. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/delete -func (r *InstancesService) Delete(project string, zone string, instance string) *InstancesDeleteCall { - c := &InstancesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified instance group. Gets a list of available +// instance groups by making a list() request. +func (r *InstanceGroupsService) Get(project string, zone string, instanceGroup string) *InstanceGroupsGetCall { + c := &InstanceGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instance = instance - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesDeleteCall) RequestId(requestId string) *InstancesDeleteCall { - c.urlParams_.Set("requestId", requestId) + c.instanceGroup = instanceGroup return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesDeleteCall) Fields(s ...googleapi.Field) *InstancesDeleteCall { +func (c *InstanceGroupsGetCall) Fields(s ...googleapi.Field) *InstanceGroupsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstanceGroupsGetCall) IfNoneMatch(entityTag string) *InstanceGroupsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesDeleteCall) Context(ctx context.Context) *InstancesDeleteCall { +func (c *InstanceGroupsGetCall) Context(ctx context.Context) *InstanceGroupsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesDeleteCall) Header() http.Header { +func (c *InstanceGroupsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.instanceGroups.get" call. +// Exactly one of *InstanceGroup or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// *InstanceGroup.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -73194,7 +75824,7 @@ func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &InstanceGroup{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -73206,19 +75836,18 @@ func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Deletes the specified Instance resource. For more information, see Stopping or Deleting an Instance.", - // "httpMethod": "DELETE", - // "id": "compute.instances.delete", + // "description": "Returns the specified instance group. Gets a list of available instance groups by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.instanceGroups.get", // "parameterOrder": [ // "project", // "zone", - // "instance" + // "instanceGroup" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance resource to delete.", + // "instanceGroup": { + // "description": "The name of the instance group.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -73229,53 +75858,45 @@ func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, // "zone": { - // "description": "The name of the zone for this request.", + // "description": "The name of the zone where the instance group is located.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}", + // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}", // "response": { - // "$ref": "Operation" + // "$ref": "InstanceGroup" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.deleteAccessConfig": +// method id "compute.instanceGroups.insert": -type InstancesDeleteAccessConfigCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupsInsertCall struct { + s *Service + project string + zone string + instancegroup *InstanceGroup + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// DeleteAccessConfig: Deletes an access config from an instance's -// network interface. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/deleteAccessConfig -func (r *InstancesService) DeleteAccessConfig(project string, zone string, instance string, accessConfig string, networkInterface string) *InstancesDeleteAccessConfigCall { - c := &InstancesDeleteAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates an instance group in the specified project using the +// parameters that are included in the request. +func (r *InstanceGroupsService) Insert(project string, zone string, instancegroup *InstanceGroup) *InstanceGroupsInsertCall { + c := &InstanceGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instance = instance - c.urlParams_.Set("accessConfig", accessConfig) - c.urlParams_.Set("networkInterface", networkInterface) + c.instancegroup = instancegroup return c } @@ -73293,7 +75914,7 @@ func (r *InstancesService) DeleteAccessConfig(project string, zone string, insta // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesDeleteAccessConfigCall) RequestId(requestId string) *InstancesDeleteAccessConfigCall { +func (c *InstanceGroupsInsertCall) RequestId(requestId string) *InstanceGroupsInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -73301,7 +75922,7 @@ func (c *InstancesDeleteAccessConfigCall) RequestId(requestId string) *Instances // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesDeleteAccessConfigCall) Fields(s ...googleapi.Field) *InstancesDeleteAccessConfigCall { +func (c *InstanceGroupsInsertCall) Fields(s ...googleapi.Field) *InstanceGroupsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -73309,30 +75930,35 @@ func (c *InstancesDeleteAccessConfigCall) Fields(s ...googleapi.Field) *Instance // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesDeleteAccessConfigCall) Context(ctx context.Context) *InstancesDeleteAccessConfigCall { +func (c *InstanceGroupsInsertCall) Context(ctx context.Context) *InstanceGroupsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesDeleteAccessConfigCall) Header() http.Header { +func (c *InstanceGroupsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesDeleteAccessConfigCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroup) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/deleteAccessConfig") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -73340,21 +75966,20 @@ func (c *InstancesDeleteAccessConfigCall) doRequest(alt string) (*http.Response, } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.deleteAccessConfig" call. +// Do executes the "compute.instanceGroups.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesDeleteAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -73385,36 +76010,14 @@ func (c *InstancesDeleteAccessConfigCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Deletes an access config from an instance's network interface.", + // "description": "Creates an instance group in the specified project using the parameters that are included in the request.", // "httpMethod": "POST", - // "id": "compute.instances.deleteAccessConfig", + // "id": "compute.instanceGroups.insert", // "parameterOrder": [ // "project", - // "zone", - // "instance", - // "accessConfig", - // "networkInterface" + // "zone" // ], // "parameters": { - // "accessConfig": { - // "description": "The name of the access config to delete.", - // "location": "query", - // "required": true, - // "type": "string" - // }, - // "instance": { - // "description": "The instance name for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "networkInterface": { - // "description": "The name of the network interface.", - // "location": "query", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -73428,14 +76031,16 @@ func (c *InstancesDeleteAccessConfigCall) Do(opts ...googleapi.CallOption) (*Ope // "type": "string" // }, // "zone": { - // "description": "The name of the zone for this request.", + // "description": "The name of the zone where you want to create the instance group.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/deleteAccessConfig", + // "path": "{project}/zones/{zone}/instanceGroups", + // "request": { + // "$ref": "InstanceGroup" + // }, // "response": { // "$ref": "Operation" // }, @@ -73447,105 +76052,159 @@ func (c *InstancesDeleteAccessConfigCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.instances.detachDisk": +// method id "compute.instanceGroups.list": -type InstancesDetachDiskCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupsListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// DetachDisk: Detaches a disk from an instance. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/detachDisk -func (r *InstancesService) DetachDisk(project string, zone string, instance string, deviceName string) *InstancesDetachDiskCall { - c := &InstancesDetachDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of instance groups that are located in the +// specified project and zone. +func (r *InstanceGroupsService) List(project string, zone string) *InstanceGroupsListCall { + c := &InstanceGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instance = instance - c.urlParams_.Set("deviceName", deviceName) return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesDetachDiskCall) RequestId(requestId string) *InstancesDetachDiskCall { - c.urlParams_.Set("requestId", requestId) +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *InstanceGroupsListCall) Filter(filter string) *InstanceGroupsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *InstanceGroupsListCall) MaxResults(maxResults int64) *InstanceGroupsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *InstanceGroupsListCall) OrderBy(orderBy string) *InstanceGroupsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InstanceGroupsListCall) PageToken(pageToken string) *InstanceGroupsListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesDetachDiskCall) Fields(s ...googleapi.Field) *InstancesDetachDiskCall { +func (c *InstanceGroupsListCall) Fields(s ...googleapi.Field) *InstanceGroupsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstanceGroupsListCall) IfNoneMatch(entityTag string) *InstanceGroupsListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesDetachDiskCall) Context(ctx context.Context) *InstancesDetachDiskCall { +func (c *InstanceGroupsListCall) Context(ctx context.Context) *InstanceGroupsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesDetachDiskCall) Header() http.Header { +func (c *InstanceGroupsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesDetachDiskCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/detachDisk") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.detachDisk" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesDetachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instanceGroups.list" call. +// Exactly one of *InstanceGroupList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceGroupList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -73564,7 +76223,7 @@ func (c *InstancesDetachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &InstanceGroupList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -73576,27 +76235,35 @@ func (c *InstancesDetachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Detaches a disk from an instance.", - // "httpMethod": "POST", - // "id": "compute.instances.detachDisk", + // "description": "Retrieves the list of instance groups that are located in the specified project and zone.", + // "httpMethod": "GET", + // "id": "compute.instanceGroups.list", // "parameterOrder": [ // "project", - // "zone", - // "instance", - // "deviceName" + // "zone" // ], // "parameters": { - // "deviceName": { - // "description": "The device name of the disk to detach. Make a get() request on the instance to view currently attached disks and device names.", + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", - // "required": true, // "type": "string" // }, - // "instance": { - // "description": "Instance name for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -73606,125 +76273,195 @@ func (c *InstancesDetachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, // "zone": { - // "description": "The name of the zone for this request.", + // "description": "The name of the zone where the instance group is located.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/detachDisk", + // "path": "{project}/zones/{zone}/instanceGroups", // "response": { - // "$ref": "Operation" + // "$ref": "InstanceGroupList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.get": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstanceGroupsListCall) Pages(ctx context.Context, f func(*InstanceGroupList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InstancesGetCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +// method id "compute.instanceGroups.listInstances": + +type InstanceGroupsListInstancesCall struct { + s *Service + project string + zone string + instanceGroup string + instancegroupslistinstancesrequest *InstanceGroupsListInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified Instance resource. Gets a list of -// available instances by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/get -func (r *InstancesService) Get(project string, zone string, instance string) *InstancesGetCall { - c := &InstancesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ListInstances: Lists the instances in the specified instance group. +func (r *InstanceGroupsService) ListInstances(project string, zone string, instanceGroup string, instancegroupslistinstancesrequest *InstanceGroupsListInstancesRequest) *InstanceGroupsListInstancesCall { + c := &InstanceGroupsListInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instance = instance + c.instanceGroup = instanceGroup + c.instancegroupslistinstancesrequest = instancegroupslistinstancesrequest + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *InstanceGroupsListInstancesCall) Filter(filter string) *InstanceGroupsListInstancesCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *InstanceGroupsListInstancesCall) MaxResults(maxResults int64) *InstanceGroupsListInstancesCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *InstanceGroupsListInstancesCall) OrderBy(orderBy string) *InstanceGroupsListInstancesCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InstanceGroupsListInstancesCall) PageToken(pageToken string) *InstanceGroupsListInstancesCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesGetCall) Fields(s ...googleapi.Field) *InstancesGetCall { +func (c *InstanceGroupsListInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsListInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstancesGetCall) IfNoneMatch(entityTag string) *InstancesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesGetCall) Context(ctx context.Context) *InstancesGetCall { +func (c *InstanceGroupsListInstancesCall) Context(ctx context.Context) *InstanceGroupsListInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesGetCall) Header() http.Header { +func (c *InstanceGroupsListInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupslistinstancesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/listInstances") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.get" call. -// Exactly one of *Instance or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Instance.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { +// Do executes the "compute.instanceGroups.listInstances" call. +// Exactly one of *InstanceGroupsListInstances or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *InstanceGroupsListInstances.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*InstanceGroupsListInstances, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -73743,7 +76480,7 @@ func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Instance{ + ret := &InstanceGroupsListInstances{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -73755,22 +76492,44 @@ func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { } return ret, nil // { - // "description": "Returns the specified Instance resource. Gets a list of available instances by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.instances.get", + // "description": "Lists the instances in the specified instance group.", + // "httpMethod": "POST", + // "id": "compute.instanceGroups.listInstances", // "parameterOrder": [ // "project", // "zone", - // "instance" + // "instanceGroup" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance resource to return.", + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "instanceGroup": { + // "description": "The name of the instance group from which you want to generate a list of included instances.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -73779,16 +76538,18 @@ func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { // "type": "string" // }, // "zone": { - // "description": "The name of the zone for this request.", + // "description": "The name of the zone where the instance group is located.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}", + // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/listInstances", + // "request": { + // "$ref": "InstanceGroupsListInstancesRequest" + // }, // "response": { - // "$ref": "Instance" + // "$ref": "InstanceGroupsListInstances" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -73799,112 +76560,136 @@ func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { } -// method id "compute.instances.getGuestAttributes": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstanceGroupsListInstancesCall) Pages(ctx context.Context, f func(*InstanceGroupsListInstances) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InstancesGetGuestAttributesCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +// method id "compute.instanceGroups.removeInstances": + +type InstanceGroupsRemoveInstancesCall struct { + s *Service + project string + zone string + instanceGroup string + instancegroupsremoveinstancesrequest *InstanceGroupsRemoveInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetGuestAttributes: Returns the specified guest attributes entry. -func (r *InstancesService) GetGuestAttributes(project string, zone string, instance string) *InstancesGetGuestAttributesCall { - c := &InstancesGetGuestAttributesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// RemoveInstances: Removes one or more instances from the specified +// instance group, but does not delete those instances. +// +// If the group is part of a backend service that has enabled connection +// draining, it can take up to 60 seconds after the connection draining +// duration before the VM instance is removed or deleted. +func (r *InstanceGroupsService) RemoveInstances(project string, zone string, instanceGroup string, instancegroupsremoveinstancesrequest *InstanceGroupsRemoveInstancesRequest) *InstanceGroupsRemoveInstancesCall { + c := &InstanceGroupsRemoveInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instance = instance - return c -} - -// QueryPath sets the optional parameter "queryPath": Specifies the -// guest attributes path to be queried. -func (c *InstancesGetGuestAttributesCall) QueryPath(queryPath string) *InstancesGetGuestAttributesCall { - c.urlParams_.Set("queryPath", queryPath) + c.instanceGroup = instanceGroup + c.instancegroupsremoveinstancesrequest = instancegroupsremoveinstancesrequest return c } -// VariableKey sets the optional parameter "variableKey": Specifies the -// key for the guest attributes entry. -func (c *InstancesGetGuestAttributesCall) VariableKey(variableKey string) *InstancesGetGuestAttributesCall { - c.urlParams_.Set("variableKey", variableKey) +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupsRemoveInstancesCall) RequestId(requestId string) *InstanceGroupsRemoveInstancesCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesGetGuestAttributesCall) Fields(s ...googleapi.Field) *InstancesGetGuestAttributesCall { +func (c *InstanceGroupsRemoveInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsRemoveInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstancesGetGuestAttributesCall) IfNoneMatch(entityTag string) *InstancesGetGuestAttributesCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesGetGuestAttributesCall) Context(ctx context.Context) *InstancesGetGuestAttributesCall { +func (c *InstanceGroupsRemoveInstancesCall) Context(ctx context.Context) *InstanceGroupsRemoveInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesGetGuestAttributesCall) Header() http.Header { +func (c *InstanceGroupsRemoveInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesGetGuestAttributesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsRemoveInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupsremoveinstancesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/getGuestAttributes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/removeInstances") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.getGuestAttributes" call. -// Exactly one of *GuestAttributes or error will be non-nil. Any non-2xx +// Do executes the "compute.instanceGroups.removeInstances" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *GuestAttributes.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstancesGetGuestAttributesCall) Do(opts ...googleapi.CallOption) (*GuestAttributes, error) { +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupsRemoveInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -73923,7 +76708,7 @@ func (c *InstancesGetGuestAttributesCall) Do(opts ...googleapi.CallOption) (*Gue if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &GuestAttributes{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -73935,19 +76720,18 @@ func (c *InstancesGetGuestAttributesCall) Do(opts ...googleapi.CallOption) (*Gue } return ret, nil // { - // "description": "Returns the specified guest attributes entry.", - // "httpMethod": "GET", - // "id": "compute.instances.getGuestAttributes", + // "description": "Removes one or more instances from the specified instance group, but does not delete those instances.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration before the VM instance is removed or deleted.", + // "httpMethod": "POST", + // "id": "compute.instanceGroups.removeInstances", // "parameterOrder": [ // "project", // "zone", - // "instance" + // "instanceGroup" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", + // "instanceGroup": { + // "description": "The name of the instance group where the specified instances will be removed.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -73958,130 +76742,137 @@ func (c *InstancesGetGuestAttributesCall) Do(opts ...googleapi.CallOption) (*Gue // "required": true, // "type": "string" // }, - // "queryPath": { - // "description": "Specifies the guest attributes path to be queried.", - // "location": "query", - // "type": "string" - // }, - // "variableKey": { - // "description": "Specifies the key for the guest attributes entry.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // }, // "zone": { - // "description": "The name of the zone for this request.", + // "description": "The name of the zone where the instance group is located.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/getGuestAttributes", + // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/removeInstances", + // "request": { + // "$ref": "InstanceGroupsRemoveInstancesRequest" + // }, // "response": { - // "$ref": "GuestAttributes" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instances.getIamPolicy": +// method id "compute.instanceGroups.setNamedPorts": -type InstancesGetIamPolicyCall struct { - s *Service - project string - zone string - resource string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstanceGroupsSetNamedPortsCall struct { + s *Service + project string + zone string + instanceGroup string + instancegroupssetnamedportsrequest *InstanceGroupsSetNamedPortsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. -func (r *InstancesService) GetIamPolicy(project string, zone string, resource string) *InstancesGetIamPolicyCall { - c := &InstancesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetNamedPorts: Sets the named ports for the specified instance group. +func (r *InstanceGroupsService) SetNamedPorts(project string, zone string, instanceGroup string, instancegroupssetnamedportsrequest *InstanceGroupsSetNamedPortsRequest) *InstanceGroupsSetNamedPortsCall { + c := &InstanceGroupsSetNamedPortsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.resource = resource + c.instanceGroup = instanceGroup + c.instancegroupssetnamedportsrequest = instancegroupssetnamedportsrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupsSetNamedPortsCall) RequestId(requestId string) *InstanceGroupsSetNamedPortsCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesGetIamPolicyCall) Fields(s ...googleapi.Field) *InstancesGetIamPolicyCall { +func (c *InstanceGroupsSetNamedPortsCall) Fields(s ...googleapi.Field) *InstanceGroupsSetNamedPortsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstancesGetIamPolicyCall) IfNoneMatch(entityTag string) *InstancesGetIamPolicyCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesGetIamPolicyCall) Context(ctx context.Context) *InstancesGetIamPolicyCall { +func (c *InstanceGroupsSetNamedPortsCall) Context(ctx context.Context) *InstanceGroupsSetNamedPortsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesGetIamPolicyCall) Header() http.Header { +func (c *InstanceGroupsSetNamedPortsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupssetnamedportsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{resource}/getIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/setNamedPorts") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *InstancesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.instanceGroups.setNamedPorts" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -74100,7 +76891,7 @@ func (c *InstancesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -74112,15 +76903,21 @@ func (c *InstancesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", - // "httpMethod": "GET", - // "id": "compute.instances.getIamPolicy", + // "description": "Sets the named ports for the specified instance group.", + // "httpMethod": "POST", + // "id": "compute.instanceGroups.setNamedPorts", // "parameterOrder": [ // "project", // "zone", - // "resource" + // "instanceGroup" // ], // "parameters": { + // "instanceGroup": { + // "description": "The name of the instance group where the named ports are updated.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -74128,126 +76925,99 @@ func (c *InstancesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // }, // "zone": { - // "description": "The name of the zone for this request.", + // "description": "The name of the zone where the instance group is located.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{resource}/getIamPolicy", + // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/setNamedPorts", + // "request": { + // "$ref": "InstanceGroupsSetNamedPortsRequest" + // }, // "response": { - // "$ref": "Policy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instances.getSerialPortOutput": +// method id "compute.instanceGroups.testIamPermissions": -type InstancesGetSerialPortOutputCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstanceGroupsTestIamPermissionsCall struct { + s *Service + project string + zone string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetSerialPortOutput: Returns the last 1 MB of serial port output from -// the specified instance. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/getSerialPortOutput -func (r *InstancesService) GetSerialPortOutput(project string, zone string, instance string) *InstancesGetSerialPortOutputCall { - c := &InstancesGetSerialPortOutputCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *InstanceGroupsService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *InstanceGroupsTestIamPermissionsCall { + c := &InstanceGroupsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instance = instance - return c -} - -// Port sets the optional parameter "port": Specifies which COM or -// serial port to retrieve data from. -func (c *InstancesGetSerialPortOutputCall) Port(port int64) *InstancesGetSerialPortOutputCall { - c.urlParams_.Set("port", fmt.Sprint(port)) - return c -} - -// Start sets the optional parameter "start": Returns output starting -// from a specific byte position. Use this to page through output when -// the output is too large to return in a single request. For the -// initial request, leave this field unspecified. For subsequent calls, -// this field should be set to the next value returned in the previous -// call. -func (c *InstancesGetSerialPortOutputCall) Start(start int64) *InstancesGetSerialPortOutputCall { - c.urlParams_.Set("start", fmt.Sprint(start)) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesGetSerialPortOutputCall) Fields(s ...googleapi.Field) *InstancesGetSerialPortOutputCall { +func (c *InstanceGroupsTestIamPermissionsCall) Fields(s ...googleapi.Field) *InstanceGroupsTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstancesGetSerialPortOutputCall) IfNoneMatch(entityTag string) *InstancesGetSerialPortOutputCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesGetSerialPortOutputCall) Context(ctx context.Context) *InstancesGetSerialPortOutputCall { +func (c *InstanceGroupsTestIamPermissionsCall) Context(ctx context.Context) *InstanceGroupsTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesGetSerialPortOutputCall) Header() http.Header { +func (c *InstanceGroupsTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesGetSerialPortOutputCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/serialPort") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -74255,19 +77025,19 @@ func (c *InstancesGetSerialPortOutputCall) doRequest(alt string) (*http.Response googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, - "instance": c.instance, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.getSerialPortOutput" call. -// Exactly one of *SerialPortOutput or error will be non-nil. Any +// Do executes the "compute.instanceGroups.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *SerialPortOutput.ServerResponse.Header or (if a response was +// *TestPermissionsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*SerialPortOutput, error) { +func (c *InstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -74286,7 +77056,7 @@ func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*Se if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &SerialPortOutput{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -74298,31 +77068,15 @@ func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*Se } return ret, nil // { - // "description": "Returns the last 1 MB of serial port output from the specified instance.", - // "httpMethod": "GET", - // "id": "compute.instances.getSerialPortOutput", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.instanceGroups.testIamPermissions", // "parameterOrder": [ // "project", // "zone", - // "instance" + // "resource" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "port": { - // "default": "1", - // "description": "Specifies which COM or serial port to retrieve data from.", - // "format": "int32", - // "location": "query", - // "maximum": "4", - // "minimum": "1", - // "type": "integer" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -74330,10 +77084,11 @@ func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*Se // "required": true, // "type": "string" // }, - // "start": { - // "description": "Returns output starting from a specific byte position. Use this to page through output when the output is too large to return in a single request. For the initial request, leave this field unspecified. For subsequent calls, this field should be set to the next value returned in the previous call.", - // "format": "int64", - // "location": "query", + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "zone": { @@ -74344,9 +77099,12 @@ func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*Se // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/serialPort", + // "path": "{project}/zones/{zone}/instanceGroups/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "SerialPortOutput" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -74357,99 +77115,103 @@ func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*Se } -// method id "compute.instances.getShieldedVmIdentity": +// method id "compute.instanceTemplates.delete": -type InstancesGetShieldedVmIdentityCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstanceTemplatesDeleteCall struct { + s *Service + project string + instanceTemplate string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetShieldedVmIdentity: Returns the Shielded VM Identity of an -// instance -func (r *InstancesService) GetShieldedVmIdentity(project string, zone string, instance string) *InstancesGetShieldedVmIdentityCall { - c := &InstancesGetShieldedVmIdentityCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified instance template. Deleting an instance +// template is permanent and cannot be undone. It is not possible to +// delete templates that are already in use by a managed instance group. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/delete +func (r *InstanceTemplatesService) Delete(project string, instanceTemplate string) *InstanceTemplatesDeleteCall { + c := &InstanceTemplatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance + c.instanceTemplate = instanceTemplate + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceTemplatesDeleteCall) RequestId(requestId string) *InstanceTemplatesDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesGetShieldedVmIdentityCall) Fields(s ...googleapi.Field) *InstancesGetShieldedVmIdentityCall { +func (c *InstanceTemplatesDeleteCall) Fields(s ...googleapi.Field) *InstanceTemplatesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstancesGetShieldedVmIdentityCall) IfNoneMatch(entityTag string) *InstancesGetShieldedVmIdentityCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesGetShieldedVmIdentityCall) Context(ctx context.Context) *InstancesGetShieldedVmIdentityCall { +func (c *InstanceTemplatesDeleteCall) Context(ctx context.Context) *InstanceTemplatesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesGetShieldedVmIdentityCall) Header() http.Header { +func (c *InstanceTemplatesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesGetShieldedVmIdentityCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/getShieldedVmIdentity") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{instanceTemplate}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "instanceTemplate": c.instanceTemplate, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.getShieldedVmIdentity" call. -// Exactly one of *ShieldedVmIdentity or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ShieldedVmIdentity.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstancesGetShieldedVmIdentityCall) Do(opts ...googleapi.CallOption) (*ShieldedVmIdentity, error) { +// Do executes the "compute.instanceTemplates.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -74468,7 +77230,7 @@ func (c *InstancesGetShieldedVmIdentityCall) Do(opts ...googleapi.CallOption) (* if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ShieldedVmIdentity{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -74480,17 +77242,16 @@ func (c *InstancesGetShieldedVmIdentityCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Returns the Shielded VM Identity of an instance", - // "httpMethod": "GET", - // "id": "compute.instances.getShieldedVmIdentity", + // "description": "Deletes the specified instance template. Deleting an instance template is permanent and cannot be undone. It is not possible to delete templates that are already in use by a managed instance group.", + // "httpMethod": "DELETE", + // "id": "compute.instanceTemplates.delete", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "instanceTemplate" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", + // "instanceTemplate": { + // "description": "The name of the instance template to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -74503,159 +77264,115 @@ func (c *InstancesGetShieldedVmIdentityCall) Do(opts ...googleapi.CallOption) (* // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/getShieldedVmIdentity", + // "path": "{project}/global/instanceTemplates/{instanceTemplate}", // "response": { - // "$ref": "ShieldedVmIdentity" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instances.insert": - -type InstancesInsertCall struct { - s *Service - project string - zone string - instance *Instance - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} +// method id "compute.instanceTemplates.get": -// Insert: Creates an instance resource in the specified project using -// the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/insert -func (r *InstancesService) Insert(project string, zone string, instance *Instance) *InstancesInsertCall { - c := &InstancesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instance = instance - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesInsertCall) RequestId(requestId string) *InstancesInsertCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// SourceInstanceTemplate sets the optional parameter -// "sourceInstanceTemplate": Specifies instance template to create the -// instance. -// -// This field is optional. It can be a full or partial URL. For example, -// the following are all valid URLs to an instance template: -// - -// https://www.googleapis.com/compute/v1/projects/project/global/instanceTemplates/instanceTemplate -// - projects/project/global/instanceTemplates/instanceTemplate -// - global/instanceTemplates/instanceTemplate -func (c *InstancesInsertCall) SourceInstanceTemplate(sourceInstanceTemplate string) *InstancesInsertCall { - c.urlParams_.Set("sourceInstanceTemplate", sourceInstanceTemplate) - return c +type InstanceTemplatesGetCall struct { + s *Service + project string + instanceTemplate string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SourceMachineImage sets the optional parameter "sourceMachineImage": -// Specifies instance machine to create the instance. -// -// This field is optional. It can be a full or partial URL. For example, -// the following are all valid URLs to an instance template: -// - -// https://www.googleapis.com/compute/v1/projects/project/global/global/machineImages/machineImage -// - projects/project/global/global/machineImages/machineImage -// - global/machineImages/machineImage -func (c *InstancesInsertCall) SourceMachineImage(sourceMachineImage string) *InstancesInsertCall { - c.urlParams_.Set("sourceMachineImage", sourceMachineImage) +// Get: Returns the specified instance template. Gets a list of +// available instance templates by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/get +func (r *InstanceTemplatesService) Get(project string, instanceTemplate string) *InstanceTemplatesGetCall { + c := &InstanceTemplatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instanceTemplate = instanceTemplate return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesInsertCall) Fields(s ...googleapi.Field) *InstancesInsertCall { +func (c *InstanceTemplatesGetCall) Fields(s ...googleapi.Field) *InstanceTemplatesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstanceTemplatesGetCall) IfNoneMatch(entityTag string) *InstanceTemplatesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesInsertCall) Context(ctx context.Context) *InstancesInsertCall { +func (c *InstanceTemplatesGetCall) Context(ctx context.Context) *InstanceTemplatesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesInsertCall) Header() http.Header { +func (c *InstanceTemplatesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceTemplatesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instance) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{instanceTemplate}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "instanceTemplate": c.instanceTemplate, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instanceTemplates.get" call. +// Exactly one of *InstanceTemplate or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceTemplate.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTemplate, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -74674,7 +77391,7 @@ func (c *InstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &InstanceTemplate{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -74686,148 +77403,67 @@ func (c *InstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Creates an instance resource in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.instances.insert", + // "description": "Returns the specified instance template. Gets a list of available instance templates by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.instanceTemplates.get", // "parameterOrder": [ // "project", - // "zone" + // "instanceTemplate" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "instanceTemplate": { + // "description": "The name of the instance template.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "sourceInstanceTemplate": { - // "description": "Specifies instance template to create the instance.\n\nThis field is optional. It can be a full or partial URL. For example, the following are all valid URLs to an instance template: \n- https://www.googleapis.com/compute/v1/projects/project/global/instanceTemplates/instanceTemplate \n- projects/project/global/instanceTemplates/instanceTemplate \n- global/instanceTemplates/instanceTemplate", - // "location": "query", - // "type": "string" - // }, - // "sourceMachineImage": { - // "description": "Specifies instance machine to create the instance.\n\nThis field is optional. It can be a full or partial URL. For example, the following are all valid URLs to an instance template: \n- https://www.googleapis.com/compute/v1/projects/project/global/global/machineImages/machineImage \n- projects/project/global/global/machineImages/machineImage \n- global/machineImages/machineImage", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances", - // "request": { - // "$ref": "Instance" - // }, + // "path": "{project}/global/instanceTemplates/{instanceTemplate}", // "response": { - // "$ref": "Operation" + // "$ref": "InstanceTemplate" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.list": +// method id "compute.instanceTemplates.getIamPolicy": -type InstancesListCall struct { +type InstanceTemplatesGetIamPolicyCall struct { s *Service project string - zone string + resource string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves the list of instances contained within the specified -// zone. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/list -func (r *InstancesService) List(project string, zone string) *InstancesListCall { - c := &InstancesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. +func (r *InstanceTemplatesService) GetIamPolicy(project string, resource string) *InstanceTemplatesGetIamPolicyCall { + c := &InstanceTemplatesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *InstancesListCall) Filter(filter string) *InstancesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InstancesListCall) MaxResults(maxResults int64) *InstancesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *InstancesListCall) OrderBy(orderBy string) *InstancesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InstancesListCall) PageToken(pageToken string) *InstancesListCall { - c.urlParams_.Set("pageToken", pageToken) + c.resource = resource return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesListCall) Fields(s ...googleapi.Field) *InstancesListCall { +func (c *InstanceTemplatesGetIamPolicyCall) Fields(s ...googleapi.Field) *InstanceTemplatesGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -74837,7 +77473,7 @@ func (c *InstancesListCall) Fields(s ...googleapi.Field) *InstancesListCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InstancesListCall) IfNoneMatch(entityTag string) *InstancesListCall { +func (c *InstanceTemplatesGetIamPolicyCall) IfNoneMatch(entityTag string) *InstanceTemplatesGetIamPolicyCall { c.ifNoneMatch_ = entityTag return c } @@ -74845,21 +77481,21 @@ func (c *InstancesListCall) IfNoneMatch(entityTag string) *InstancesListCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesListCall) Context(ctx context.Context) *InstancesListCall { +func (c *InstanceTemplatesGetIamPolicyCall) Context(ctx context.Context) *InstanceTemplatesGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesListCall) Header() http.Header { +func (c *InstanceTemplatesGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceTemplatesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -74871,7 +77507,7 @@ func (c *InstancesListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -74879,20 +77515,20 @@ func (c *InstancesListCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.list" call. -// Exactly one of *InstanceList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *InstanceList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, error) { +// Do executes the "compute.instanceTemplates.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *InstanceTemplatesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -74911,7 +77547,7 @@ func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, err if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceList{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -74923,37 +77559,14 @@ func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, err } return ret, nil // { - // "description": "Retrieves the list of instances contained within the specified zone.", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", // "httpMethod": "GET", - // "id": "compute.instances.list", + // "id": "compute.instanceTemplates.getIamPolicy", // "parameterOrder": [ // "project", - // "zone" + // "resource" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -74961,17 +77574,17 @@ func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, err // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances", + // "path": "{project}/global/instanceTemplates/{resource}/getIamPolicy", // "response": { - // "$ref": "InstanceList" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -74982,48 +77595,191 @@ func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, err } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstancesListCall) Pages(ctx context.Context, f func(*InstanceList) error) error { +// method id "compute.instanceTemplates.insert": + +type InstanceTemplatesInsertCall struct { + s *Service + project string + instancetemplate *InstanceTemplate + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates an instance template in the specified project using +// the data that is included in the request. If you are creating a new +// template to update an existing instance group, your new instance +// template must use the same network or, if applicable, the same +// subnetwork as the original template. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/insert +func (r *InstanceTemplatesService) Insert(project string, instancetemplate *InstanceTemplate) *InstanceTemplatesInsertCall { + c := &InstanceTemplatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instancetemplate = instancetemplate + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceTemplatesInsertCall) RequestId(requestId string) *InstanceTemplatesInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceTemplatesInsertCall) Fields(s ...googleapi.Field) *InstanceTemplatesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceTemplatesInsertCall) Context(ctx context.Context) *InstanceTemplatesInsertCall { c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstanceTemplatesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstanceTemplatesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancetemplate) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instanceTemplates.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() } - if x.NextPageToken == "" { - return nil + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, } - c.PageToken(x.NextPageToken) } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates an instance template in the specified project using the data that is included in the request. If you are creating a new template to update an existing instance group, your new instance template must use the same network or, if applicable, the same subnetwork as the original template.", + // "httpMethod": "POST", + // "id": "compute.instanceTemplates.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/instanceTemplates", + // "request": { + // "$ref": "InstanceTemplate" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + } -// method id "compute.instances.listReferrers": +// method id "compute.instanceTemplates.list": -type InstancesListReferrersCall struct { +type InstanceTemplatesListCall struct { s *Service project string - zone string - instance string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// ListReferrers: Retrieves the list of referrers to instances contained -// within the specified zone. For more information, read Viewing -// Referrers to VM Instances. -func (r *InstancesService) ListReferrers(project string, zone string, instance string) *InstancesListReferrersCall { - c := &InstancesListReferrersCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of instance templates that are contained +// within the specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/list +func (r *InstanceTemplatesService) List(project string) *InstanceTemplatesListCall { + c := &InstanceTemplatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance return c } @@ -75049,7 +77805,7 @@ func (r *InstancesService) ListReferrers(project string, zone string, instance s // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *InstancesListReferrersCall) Filter(filter string) *InstancesListReferrersCall { +func (c *InstanceTemplatesListCall) Filter(filter string) *InstanceTemplatesListCall { c.urlParams_.Set("filter", filter) return c } @@ -75060,7 +77816,7 @@ func (c *InstancesListReferrersCall) Filter(filter string) *InstancesListReferre // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *InstancesListReferrersCall) MaxResults(maxResults int64) *InstancesListReferrersCall { +func (c *InstanceTemplatesListCall) MaxResults(maxResults int64) *InstanceTemplatesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -75077,7 +77833,7 @@ func (c *InstancesListReferrersCall) MaxResults(maxResults int64) *InstancesList // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *InstancesListReferrersCall) OrderBy(orderBy string) *InstancesListReferrersCall { +func (c *InstanceTemplatesListCall) OrderBy(orderBy string) *InstanceTemplatesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -75085,7 +77841,7 @@ func (c *InstancesListReferrersCall) OrderBy(orderBy string) *InstancesListRefer // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *InstancesListReferrersCall) PageToken(pageToken string) *InstancesListReferrersCall { +func (c *InstanceTemplatesListCall) PageToken(pageToken string) *InstanceTemplatesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -75093,7 +77849,7 @@ func (c *InstancesListReferrersCall) PageToken(pageToken string) *InstancesListR // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesListReferrersCall) Fields(s ...googleapi.Field) *InstancesListReferrersCall { +func (c *InstanceTemplatesListCall) Fields(s ...googleapi.Field) *InstanceTemplatesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -75103,7 +77859,7 @@ func (c *InstancesListReferrersCall) Fields(s ...googleapi.Field) *InstancesList // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InstancesListReferrersCall) IfNoneMatch(entityTag string) *InstancesListReferrersCall { +func (c *InstanceTemplatesListCall) IfNoneMatch(entityTag string) *InstanceTemplatesListCall { c.ifNoneMatch_ = entityTag return c } @@ -75111,21 +77867,21 @@ func (c *InstancesListReferrersCall) IfNoneMatch(entityTag string) *InstancesLis // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesListReferrersCall) Context(ctx context.Context) *InstancesListReferrersCall { +func (c *InstanceTemplatesListCall) Context(ctx context.Context) *InstanceTemplatesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesListReferrersCall) Header() http.Header { +func (c *InstanceTemplatesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesListReferrersCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceTemplatesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -75137,7 +77893,7 @@ func (c *InstancesListReferrersCall) doRequest(alt string) (*http.Response, erro var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/referrers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -75145,21 +77901,19 @@ func (c *InstancesListReferrersCall) doRequest(alt string) (*http.Response, erro } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.listReferrers" call. -// Exactly one of *InstanceListReferrers or error will be non-nil. Any +// Do executes the "compute.instanceTemplates.list" call. +// Exactly one of *InstanceTemplateList or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *InstanceListReferrers.ServerResponse.Header or (if a response was +// *InstanceTemplateList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*InstanceListReferrers, error) { +func (c *InstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceTemplateList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -75178,7 +77932,7 @@ func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*Instance if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceListReferrers{ + ret := &InstanceTemplateList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -75190,13 +77944,11 @@ func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*Instance } return ret, nil // { - // "description": "Retrieves the list of referrers to instances contained within the specified zone. For more information, read Viewing Referrers to VM Instances.", + // "description": "Retrieves a list of instance templates that are contained within the specified project.", // "httpMethod": "GET", - // "id": "compute.instances.listReferrers", + // "id": "compute.instanceTemplates.list", // "parameterOrder": [ - // "project", - // "zone", - // "instance" + // "project" // ], // "parameters": { // "filter": { @@ -75204,13 +77956,6 @@ func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*Instance // "location": "query", // "type": "string" // }, - // "instance": { - // "description": "Name of the target instance scoping this request, or '-' if the request should span over all instances in the container.", - // "location": "path", - // "pattern": "-|[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "maxResults": { // "default": "500", // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", @@ -75235,18 +77980,11 @@ func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*Instance // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/referrers", + // "path": "{project}/global/instanceTemplates", // "response": { - // "$ref": "InstanceListReferrers" + // "$ref": "InstanceTemplateList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -75260,7 +77998,7 @@ func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*Instance // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *InstancesListReferrersCall) Pages(ctx context.Context, f func(*InstanceListReferrers) error) error { +func (c *InstanceTemplatesListCall) Pages(ctx context.Context, f func(*InstanceTemplateList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -75278,52 +78016,32 @@ func (c *InstancesListReferrersCall) Pages(ctx context.Context, f func(*Instance } } -// method id "compute.instances.removeResourcePolicies": +// method id "compute.instanceTemplates.setIamPolicy": -type InstancesRemoveResourcePoliciesCall struct { - s *Service - project string - zone string - instance string - instancesremoveresourcepoliciesrequest *InstancesRemoveResourcePoliciesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceTemplatesSetIamPolicyCall struct { + s *Service + project string + resource string + globalsetpolicyrequest *GlobalSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// RemoveResourcePolicies: Removes resource policies from an instance. -func (r *InstancesService) RemoveResourcePolicies(project string, zone string, instance string, instancesremoveresourcepoliciesrequest *InstancesRemoveResourcePoliciesRequest) *InstancesRemoveResourcePoliciesCall { - c := &InstancesRemoveResourcePoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +func (r *InstanceTemplatesService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *InstanceTemplatesSetIamPolicyCall { + c := &InstanceTemplatesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.instancesremoveresourcepoliciesrequest = instancesremoveresourcepoliciesrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesRemoveResourcePoliciesCall) RequestId(requestId string) *InstancesRemoveResourcePoliciesCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.globalsetpolicyrequest = globalsetpolicyrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesRemoveResourcePoliciesCall) Fields(s ...googleapi.Field) *InstancesRemoveResourcePoliciesCall { +func (c *InstanceTemplatesSetIamPolicyCall) Fields(s ...googleapi.Field) *InstanceTemplatesSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -75331,35 +78049,35 @@ func (c *InstancesRemoveResourcePoliciesCall) Fields(s ...googleapi.Field) *Inst // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesRemoveResourcePoliciesCall) Context(ctx context.Context) *InstancesRemoveResourcePoliciesCall { +func (c *InstanceTemplatesSetIamPolicyCall) Context(ctx context.Context) *InstanceTemplatesSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesRemoveResourcePoliciesCall) Header() http.Header { +func (c *InstanceTemplatesSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesRemoveResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceTemplatesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesremoveresourcepoliciesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetpolicyrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/removeResourcePolicies") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -75368,20 +78086,19 @@ func (c *InstancesRemoveResourcePoliciesCall) doRequest(alt string) (*http.Respo req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, - "instance": c.instance, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.removeResourcePolicies" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instanceTemplates.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *InstanceTemplatesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -75400,7 +78117,7 @@ func (c *InstancesRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) ( if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -75412,22 +78129,14 @@ func (c *InstancesRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Removes resource policies from an instance.", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", // "httpMethod": "POST", - // "id": "compute.instances.removeResourcePolicies", + // "id": "compute.instanceTemplates.setIamPolicy", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "resource" // ], // "parameters": { - // "instance": { - // "description": "The instance name for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -75435,25 +78144,20 @@ func (c *InstancesRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) ( // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/removeResourcePolicies", + // "path": "{project}/global/instanceTemplates/{resource}/setIamPolicy", // "request": { - // "$ref": "InstancesRemoveResourcePoliciesRequest" + // "$ref": "GlobalSetPolicyRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -75463,52 +78167,32 @@ func (c *InstancesRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) ( } -// method id "compute.instances.reset": +// method id "compute.instanceTemplates.testIamPermissions": -type InstancesResetCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceTemplatesTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Reset: Performs a reset on the instance. For more information, see -// Resetting an instance. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/reset -func (r *InstancesService) Reset(project string, zone string, instance string) *InstancesResetCall { - c := &InstancesResetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *InstanceTemplatesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *InstanceTemplatesTestIamPermissionsCall { + c := &InstanceTemplatesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesResetCall) RequestId(requestId string) *InstancesResetCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesResetCall) Fields(s ...googleapi.Field) *InstancesResetCall { +func (c *InstanceTemplatesTestIamPermissionsCall) Fields(s ...googleapi.Field) *InstanceTemplatesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -75516,30 +78200,35 @@ func (c *InstancesResetCall) Fields(s ...googleapi.Field) *InstancesResetCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesResetCall) Context(ctx context.Context) *InstancesResetCall { +func (c *InstanceTemplatesTestIamPermissionsCall) Context(ctx context.Context) *InstanceTemplatesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesResetCall) Header() http.Header { +func (c *InstanceTemplatesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesResetCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceTemplatesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/reset") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -75548,20 +78237,19 @@ func (c *InstancesResetCall) doRequest(alt string) (*http.Response, error) { req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, - "instance": c.instance, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.reset" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesResetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instanceTemplates.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -75580,7 +78268,7 @@ func (c *InstancesResetCall) Do(opts ...googleapi.CallOption) (*Operation, error if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -75592,22 +78280,14 @@ func (c *InstancesResetCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Performs a reset on the instance. For more information, see Resetting an instance.", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.instances.reset", + // "id": "compute.instanceTemplates.testIamPermissions", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "resource" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -75615,52 +78295,53 @@ func (c *InstancesResetCall) Do(opts ...googleapi.CallOption) (*Operation, error // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/reset", + // "path": "{project}/global/instanceTemplates/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.resume": +// method id "compute.instances.addAccessConfig": -type InstancesResumeCall struct { - s *Service - project string - zone string - instance string - instancesresumerequest *InstancesResumeRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesAddAccessConfigCall struct { + s *Service + project string + zone string + instance string + accessconfig *AccessConfig + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Resume: Resumes an instance that was suspended using the -// instances().suspend method. -func (r *InstancesService) Resume(project string, zone string, instance string, instancesresumerequest *InstancesResumeRequest) *InstancesResumeCall { - c := &InstancesResumeCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AddAccessConfig: Adds an access config to an instance's network +// interface. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/addAccessConfig +func (r *InstancesService) AddAccessConfig(project string, zone string, instance string, networkInterface string, accessconfig *AccessConfig) *InstancesAddAccessConfigCall { + c := &InstancesAddAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance - c.instancesresumerequest = instancesresumerequest + c.urlParams_.Set("networkInterface", networkInterface) + c.accessconfig = accessconfig return c } @@ -75678,7 +78359,7 @@ func (r *InstancesService) Resume(project string, zone string, instance string, // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesResumeCall) RequestId(requestId string) *InstancesResumeCall { +func (c *InstancesAddAccessConfigCall) RequestId(requestId string) *InstancesAddAccessConfigCall { c.urlParams_.Set("requestId", requestId) return c } @@ -75686,7 +78367,7 @@ func (c *InstancesResumeCall) RequestId(requestId string) *InstancesResumeCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesResumeCall) Fields(s ...googleapi.Field) *InstancesResumeCall { +func (c *InstancesAddAccessConfigCall) Fields(s ...googleapi.Field) *InstancesAddAccessConfigCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -75694,35 +78375,35 @@ func (c *InstancesResumeCall) Fields(s ...googleapi.Field) *InstancesResumeCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesResumeCall) Context(ctx context.Context) *InstancesResumeCall { +func (c *InstancesAddAccessConfigCall) Context(ctx context.Context) *InstancesAddAccessConfigCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesResumeCall) Header() http.Header { +func (c *InstancesAddAccessConfigCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesResumeCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesAddAccessConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesresumerequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.accessconfig) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/resume") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/addAccessConfig") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -75737,14 +78418,14 @@ func (c *InstancesResumeCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.resume" call. +// Do executes the "compute.instances.addAccessConfig" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesResumeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesAddAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -75775,22 +78456,29 @@ func (c *InstancesResumeCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Resumes an instance that was suspended using the instances().suspend method.", + // "description": "Adds an access config to an instance's network interface.", // "httpMethod": "POST", - // "id": "compute.instances.resume", + // "id": "compute.instances.addAccessConfig", // "parameterOrder": [ // "project", // "zone", - // "instance" + // "instance", + // "networkInterface" // ], // "parameters": { // "instance": { - // "description": "Name of the instance resource to resume.", + // "description": "The instance name for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, + // "networkInterface": { + // "description": "The name of the network interface to add to this instance.", + // "location": "query", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -75811,9 +78499,9 @@ func (c *InstancesResumeCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/resume", + // "path": "{project}/zones/{zone}/instances/{instance}/addAccessConfig", // "request": { - // "$ref": "InstancesResumeRequest" + // "$ref": "AccessConfig" // }, // "response": { // "$ref": "Operation" @@ -75826,31 +78514,28 @@ func (c *InstancesResumeCall) Do(opts ...googleapi.CallOption) (*Operation, erro } -// method id "compute.instances.setDeletionProtection": +// method id "compute.instances.addResourcePolicies": -type InstancesSetDeletionProtectionCall struct { - s *Service - project string - zone string - resource string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesAddResourcePoliciesCall struct { + s *Service + project string + zone string + instance string + instancesaddresourcepoliciesrequest *InstancesAddResourcePoliciesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetDeletionProtection: Sets deletion protection on the instance. -func (r *InstancesService) SetDeletionProtection(project string, zone string, resource string) *InstancesSetDeletionProtectionCall { - c := &InstancesSetDeletionProtectionCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AddResourcePolicies: Adds existing resource policies to an instance. +// You can only add one policy right now which will be applied to this +// instance for scheduling live migrations. +func (r *InstancesService) AddResourcePolicies(project string, zone string, instance string, instancesaddresourcepoliciesrequest *InstancesAddResourcePoliciesRequest) *InstancesAddResourcePoliciesCall { + c := &InstancesAddResourcePoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.resource = resource - return c -} - -// DeletionProtection sets the optional parameter "deletionProtection": -// Whether the resource should be protected against deletion. -func (c *InstancesSetDeletionProtectionCall) DeletionProtection(deletionProtection bool) *InstancesSetDeletionProtectionCall { - c.urlParams_.Set("deletionProtection", fmt.Sprint(deletionProtection)) + c.instance = instance + c.instancesaddresourcepoliciesrequest = instancesaddresourcepoliciesrequest return c } @@ -75868,7 +78553,7 @@ func (c *InstancesSetDeletionProtectionCall) DeletionProtection(deletionProtecti // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetDeletionProtectionCall) RequestId(requestId string) *InstancesSetDeletionProtectionCall { +func (c *InstancesAddResourcePoliciesCall) RequestId(requestId string) *InstancesAddResourcePoliciesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -75876,7 +78561,7 @@ func (c *InstancesSetDeletionProtectionCall) RequestId(requestId string) *Instan // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetDeletionProtectionCall) Fields(s ...googleapi.Field) *InstancesSetDeletionProtectionCall { +func (c *InstancesAddResourcePoliciesCall) Fields(s ...googleapi.Field) *InstancesAddResourcePoliciesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -75884,30 +78569,35 @@ func (c *InstancesSetDeletionProtectionCall) Fields(s ...googleapi.Field) *Insta // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetDeletionProtectionCall) Context(ctx context.Context) *InstancesSetDeletionProtectionCall { +func (c *InstancesAddResourcePoliciesCall) Context(ctx context.Context) *InstancesAddResourcePoliciesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetDeletionProtectionCall) Header() http.Header { +func (c *InstancesAddResourcePoliciesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetDeletionProtectionCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesAddResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesaddresourcepoliciesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{resource}/setDeletionProtection") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/addResourcePolicies") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -75917,19 +78607,19 @@ func (c *InstancesSetDeletionProtectionCall) doRequest(alt string) (*http.Respon googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, - "resource": c.resource, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setDeletionProtection" call. +// Do executes the "compute.instances.addResourcePolicies" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSetDeletionProtectionCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -75960,20 +78650,21 @@ func (c *InstancesSetDeletionProtectionCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Sets deletion protection on the instance.", + // "description": "Adds existing resource policies to an instance. You can only add one policy right now which will be applied to this instance for scheduling live migrations.", // "httpMethod": "POST", - // "id": "compute.instances.setDeletionProtection", + // "id": "compute.instances.addResourcePolicies", // "parameterOrder": [ // "project", // "zone", - // "resource" + // "instance" // ], // "parameters": { - // "deletionProtection": { - // "default": "true", - // "description": "Whether the resource should be protected against deletion.", - // "location": "query", - // "type": "boolean" + // "instance": { + // "description": "The instance name for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // }, // "project": { // "description": "Project ID for this request.", @@ -75987,13 +78678,6 @@ func (c *InstancesSetDeletionProtectionCall) Do(opts ...googleapi.CallOption) (* // "location": "query", // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -76002,7 +78686,10 @@ func (c *InstancesSetDeletionProtectionCall) Do(opts ...googleapi.CallOption) (* // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{resource}/setDeletionProtection", + // "path": "{project}/zones/{zone}/instances/{instance}/addResourcePolicies", + // "request": { + // "$ref": "InstancesAddResourcePoliciesRequest" + // }, // "response": { // "$ref": "Operation" // }, @@ -76014,107 +78701,157 @@ func (c *InstancesSetDeletionProtectionCall) Do(opts ...googleapi.CallOption) (* } -// method id "compute.instances.setDiskAutoDelete": +// method id "compute.instances.aggregatedList": -type InstancesSetDiskAutoDeleteCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetDiskAutoDelete: Sets the auto-delete flag for a disk attached to -// an instance. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setDiskAutoDelete -func (r *InstancesService) SetDiskAutoDelete(project string, zone string, instance string, autoDelete bool, deviceName string) *InstancesSetDiskAutoDeleteCall { - c := &InstancesSetDiskAutoDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves aggregated list of all of the instances in +// your project across all regions and zones. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/aggregatedList +func (r *InstancesService) AggregatedList(project string) *InstancesAggregatedListCall { + c := &InstancesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.urlParams_.Set("autoDelete", fmt.Sprint(autoDelete)) - c.urlParams_.Set("deviceName", deviceName) return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetDiskAutoDeleteCall) RequestId(requestId string) *InstancesSetDiskAutoDeleteCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstancesSetDiskAutoDeleteCall) Fields(s ...googleapi.Field) *InstancesSetDiskAutoDeleteCall { +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *InstancesAggregatedListCall) Filter(filter string) *InstancesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *InstancesAggregatedListCall) MaxResults(maxResults int64) *InstancesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *InstancesAggregatedListCall) OrderBy(orderBy string) *InstancesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InstancesAggregatedListCall) PageToken(pageToken string) *InstancesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesAggregatedListCall) Fields(s ...googleapi.Field) *InstancesAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesAggregatedListCall) IfNoneMatch(entityTag string) *InstancesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetDiskAutoDeleteCall) Context(ctx context.Context) *InstancesSetDiskAutoDeleteCall { +func (c *InstancesAggregatedListCall) Context(ctx context.Context) *InstancesAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetDiskAutoDeleteCall) Header() http.Header { +func (c *InstancesAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetDiskAutoDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instances") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setDiskAutoDelete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesSetDiskAutoDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instances.aggregatedList" call. +// Exactly one of *InstanceAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -76133,7 +78870,7 @@ func (c *InstancesSetDiskAutoDeleteCall) Do(opts ...googleapi.CallOption) (*Oper if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &InstanceAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -76145,35 +78882,34 @@ func (c *InstancesSetDiskAutoDeleteCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Sets the auto-delete flag for a disk attached to an instance.", - // "httpMethod": "POST", - // "id": "compute.instances.setDiskAutoDelete", + // "description": "Retrieves aggregated list of all of the instances in your project across all regions and zones.", + // "httpMethod": "GET", + // "id": "compute.instances.aggregatedList", // "parameterOrder": [ - // "project", - // "zone", - // "instance", - // "autoDelete", - // "deviceName" + // "project" // ], // "parameters": { - // "autoDelete": { - // "description": "Whether to auto-delete the disk when the instance is deleted.", + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", - // "required": true, - // "type": "boolean" + // "type": "string" // }, - // "deviceName": { - // "description": "The device name of the disk to modify. Make a get() request on the instance to view currently attached disks and device names.", + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", // "location": "query", - // "pattern": "\\w[\\w.-]{0,254}", - // "required": true, // "type": "string" // }, - // "instance": { - // "description": "The instance name for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -76182,60 +78918,100 @@ func (c *InstancesSetDiskAutoDeleteCall) Do(opts ...googleapi.CallOption) (*Oper // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete", + // "path": "{project}/aggregated/instances", // "response": { - // "$ref": "Operation" + // "$ref": "InstanceAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.setIamPolicy": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstancesAggregatedListCall) Pages(ctx context.Context, f func(*InstanceAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InstancesSetIamPolicyCall struct { - s *Service - project string - zone string - resource string - zonesetpolicyrequest *ZoneSetPolicyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.instances.attachDisk": + +type InstancesAttachDiskCall struct { + s *Service + project string + zone string + instance string + attacheddisk *AttachedDisk + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. -func (r *InstancesService) SetIamPolicy(project string, zone string, resource string, zonesetpolicyrequest *ZoneSetPolicyRequest) *InstancesSetIamPolicyCall { - c := &InstancesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AttachDisk: Attaches an existing Disk resource to an instance. You +// must first create the disk before you can attach it. It is not +// possible to create and attach a disk at the same time. For more +// information, read Adding a persistent disk to your instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/attachDisk +func (r *InstancesService) AttachDisk(project string, zone string, instance string, attacheddisk *AttachedDisk) *InstancesAttachDiskCall { + c := &InstancesAttachDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.resource = resource - c.zonesetpolicyrequest = zonesetpolicyrequest + c.instance = instance + c.attacheddisk = attacheddisk + return c +} + +// ForceAttach sets the optional parameter "forceAttach": Whether to +// force attach the disk even if it's currently attached to another +// instance. +func (c *InstancesAttachDiskCall) ForceAttach(forceAttach bool) *InstancesAttachDiskCall { + c.urlParams_.Set("forceAttach", fmt.Sprint(forceAttach)) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesAttachDiskCall) RequestId(requestId string) *InstancesAttachDiskCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetIamPolicyCall) Fields(s ...googleapi.Field) *InstancesSetIamPolicyCall { +func (c *InstancesAttachDiskCall) Fields(s ...googleapi.Field) *InstancesAttachDiskCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -76243,35 +79019,35 @@ func (c *InstancesSetIamPolicyCall) Fields(s ...googleapi.Field) *InstancesSetIa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetIamPolicyCall) Context(ctx context.Context) *InstancesSetIamPolicyCall { +func (c *InstancesAttachDiskCall) Context(ctx context.Context) *InstancesAttachDiskCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetIamPolicyCall) Header() http.Header { +func (c *InstancesAttachDiskCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesAttachDiskCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.zonesetpolicyrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.attacheddisk) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{resource}/setIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/attachDisk") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -76281,19 +79057,19 @@ func (c *InstancesSetIamPolicyCall) doRequest(alt string) (*http.Response, error googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, - "resource": c.resource, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *InstancesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.instances.attachDisk" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesAttachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -76312,7 +79088,7 @@ func (c *InstancesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -76324,15 +79100,27 @@ func (c *InstancesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "description": "Attaches an existing Disk resource to an instance. You must first create the disk before you can attach it. It is not possible to create and attach a disk at the same time. For more information, read Adding a persistent disk to your instance.", // "httpMethod": "POST", - // "id": "compute.instances.setIamPolicy", + // "id": "compute.instances.attachDisk", // "parameterOrder": [ // "project", // "zone", - // "resource" + // "instance" // ], // "parameters": { + // "forceAttach": { + // "description": "Whether to force attach the disk even if it's currently attached to another instance.", + // "location": "query", + // "type": "boolean" + // }, + // "instance": { + // "description": "The instance name for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -76340,11 +79128,9 @@ func (c *InstancesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // }, // "zone": { @@ -76355,12 +79141,12 @@ func (c *InstancesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{resource}/setIamPolicy", + // "path": "{project}/zones/{zone}/instances/{instance}/attachDisk", // "request": { - // "$ref": "ZoneSetPolicyRequest" + // "$ref": "AttachedDisk" // }, // "response": { - // "$ref": "Policy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -76370,27 +79156,26 @@ func (c *InstancesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e } -// method id "compute.instances.setLabels": +// method id "compute.instances.delete": -type InstancesSetLabelsCall struct { - s *Service - project string - zone string - instance string - instancessetlabelsrequest *InstancesSetLabelsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesDeleteCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetLabels: Sets labels on an instance. To learn more about labels, -// read the Labeling Resources documentation. -func (r *InstancesService) SetLabels(project string, zone string, instance string, instancessetlabelsrequest *InstancesSetLabelsRequest) *InstancesSetLabelsCall { - c := &InstancesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified Instance resource. For more +// information, see Stopping or Deleting an Instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/delete +func (r *InstancesService) Delete(project string, zone string, instance string) *InstancesDeleteCall { + c := &InstancesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance - c.instancessetlabelsrequest = instancessetlabelsrequest return c } @@ -76408,7 +79193,7 @@ func (r *InstancesService) SetLabels(project string, zone string, instance strin // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetLabelsCall) RequestId(requestId string) *InstancesSetLabelsCall { +func (c *InstancesDeleteCall) RequestId(requestId string) *InstancesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -76416,7 +79201,7 @@ func (c *InstancesSetLabelsCall) RequestId(requestId string) *InstancesSetLabels // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetLabelsCall) Fields(s ...googleapi.Field) *InstancesSetLabelsCall { +func (c *InstancesDeleteCall) Fields(s ...googleapi.Field) *InstancesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -76424,37 +79209,32 @@ func (c *InstancesSetLabelsCall) Fields(s ...googleapi.Field) *InstancesSetLabel // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetLabelsCall) Context(ctx context.Context) *InstancesSetLabelsCall { +func (c *InstancesDeleteCall) Context(ctx context.Context) *InstancesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetLabelsCall) Header() http.Header { +func (c *InstancesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetLabelsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetlabelsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setLabels") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } @@ -76467,14 +79247,14 @@ func (c *InstancesSetLabelsCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setLabels" call. +// Do executes the "compute.instances.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -76505,9 +79285,9 @@ func (c *InstancesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Sets labels on an instance. To learn more about labels, read the Labeling Resources documentation.", - // "httpMethod": "POST", - // "id": "compute.instances.setLabels", + // "description": "Deletes the specified Instance resource. For more information, see Stopping or Deleting an Instance.", + // "httpMethod": "DELETE", + // "id": "compute.instances.delete", // "parameterOrder": [ // "project", // "zone", @@ -76515,7 +79295,7 @@ func (c *InstancesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, e // ], // "parameters": { // "instance": { - // "description": "Name of the instance scoping this request.", + // "description": "Name of the instance resource to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -76541,10 +79321,7 @@ func (c *InstancesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, e // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setLabels", - // "request": { - // "$ref": "InstancesSetLabelsRequest" - // }, + // "path": "{project}/zones/{zone}/instances/{instance}", // "response": { // "$ref": "Operation" // }, @@ -76556,27 +79333,28 @@ func (c *InstancesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, e } -// method id "compute.instances.setMachineResources": +// method id "compute.instances.deleteAccessConfig": -type InstancesSetMachineResourcesCall struct { - s *Service - project string - zone string - instance string - instancessetmachineresourcesrequest *InstancesSetMachineResourcesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesDeleteAccessConfigCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetMachineResources: Changes the number and/or type of accelerator -// for a stopped instance to the values specified in the request. -func (r *InstancesService) SetMachineResources(project string, zone string, instance string, instancessetmachineresourcesrequest *InstancesSetMachineResourcesRequest) *InstancesSetMachineResourcesCall { - c := &InstancesSetMachineResourcesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// DeleteAccessConfig: Deletes an access config from an instance's +// network interface. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/deleteAccessConfig +func (r *InstancesService) DeleteAccessConfig(project string, zone string, instance string, accessConfig string, networkInterface string) *InstancesDeleteAccessConfigCall { + c := &InstancesDeleteAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance - c.instancessetmachineresourcesrequest = instancessetmachineresourcesrequest + c.urlParams_.Set("accessConfig", accessConfig) + c.urlParams_.Set("networkInterface", networkInterface) return c } @@ -76594,7 +79372,7 @@ func (r *InstancesService) SetMachineResources(project string, zone string, inst // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetMachineResourcesCall) RequestId(requestId string) *InstancesSetMachineResourcesCall { +func (c *InstancesDeleteAccessConfigCall) RequestId(requestId string) *InstancesDeleteAccessConfigCall { c.urlParams_.Set("requestId", requestId) return c } @@ -76602,7 +79380,7 @@ func (c *InstancesSetMachineResourcesCall) RequestId(requestId string) *Instance // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetMachineResourcesCall) Fields(s ...googleapi.Field) *InstancesSetMachineResourcesCall { +func (c *InstancesDeleteAccessConfigCall) Fields(s ...googleapi.Field) *InstancesDeleteAccessConfigCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -76610,35 +79388,30 @@ func (c *InstancesSetMachineResourcesCall) Fields(s ...googleapi.Field) *Instanc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetMachineResourcesCall) Context(ctx context.Context) *InstancesSetMachineResourcesCall { +func (c *InstancesDeleteAccessConfigCall) Context(ctx context.Context) *InstancesDeleteAccessConfigCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetMachineResourcesCall) Header() http.Header { +func (c *InstancesDeleteAccessConfigCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetMachineResourcesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesDeleteAccessConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetmachineresourcesrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMachineResources") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/deleteAccessConfig") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -76653,14 +79426,14 @@ func (c *InstancesSetMachineResourcesCall) doRequest(alt string) (*http.Response return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setMachineResources" call. +// Do executes the "compute.instances.deleteAccessConfig" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSetMachineResourcesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesDeleteAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -76691,22 +79464,36 @@ func (c *InstancesSetMachineResourcesCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Changes the number and/or type of accelerator for a stopped instance to the values specified in the request.", + // "description": "Deletes an access config from an instance's network interface.", // "httpMethod": "POST", - // "id": "compute.instances.setMachineResources", + // "id": "compute.instances.deleteAccessConfig", // "parameterOrder": [ // "project", // "zone", - // "instance" + // "instance", + // "accessConfig", + // "networkInterface" // ], // "parameters": { + // "accessConfig": { + // "description": "The name of the access config to delete.", + // "location": "query", + // "required": true, + // "type": "string" + // }, // "instance": { - // "description": "Name of the instance scoping this request.", + // "description": "The instance name for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, + // "networkInterface": { + // "description": "The name of the network interface.", + // "location": "query", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -76727,10 +79514,7 @@ func (c *InstancesSetMachineResourcesCall) Do(opts ...googleapi.CallOption) (*Op // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setMachineResources", - // "request": { - // "$ref": "InstancesSetMachineResourcesRequest" - // }, + // "path": "{project}/zones/{zone}/instances/{instance}/deleteAccessConfig", // "response": { // "$ref": "Operation" // }, @@ -76742,27 +79526,26 @@ func (c *InstancesSetMachineResourcesCall) Do(opts ...googleapi.CallOption) (*Op } -// method id "compute.instances.setMachineType": +// method id "compute.instances.detachDisk": -type InstancesSetMachineTypeCall struct { - s *Service - project string - zone string - instance string - instancessetmachinetyperequest *InstancesSetMachineTypeRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesDetachDiskCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetMachineType: Changes the machine type for a stopped instance to -// the machine type specified in the request. -func (r *InstancesService) SetMachineType(project string, zone string, instance string, instancessetmachinetyperequest *InstancesSetMachineTypeRequest) *InstancesSetMachineTypeCall { - c := &InstancesSetMachineTypeCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// DetachDisk: Detaches a disk from an instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/detachDisk +func (r *InstancesService) DetachDisk(project string, zone string, instance string, deviceName string) *InstancesDetachDiskCall { + c := &InstancesDetachDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance - c.instancessetmachinetyperequest = instancessetmachinetyperequest + c.urlParams_.Set("deviceName", deviceName) return c } @@ -76780,7 +79563,7 @@ func (r *InstancesService) SetMachineType(project string, zone string, instance // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetMachineTypeCall) RequestId(requestId string) *InstancesSetMachineTypeCall { +func (c *InstancesDetachDiskCall) RequestId(requestId string) *InstancesDetachDiskCall { c.urlParams_.Set("requestId", requestId) return c } @@ -76788,7 +79571,7 @@ func (c *InstancesSetMachineTypeCall) RequestId(requestId string) *InstancesSetM // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetMachineTypeCall) Fields(s ...googleapi.Field) *InstancesSetMachineTypeCall { +func (c *InstancesDetachDiskCall) Fields(s ...googleapi.Field) *InstancesDetachDiskCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -76796,35 +79579,30 @@ func (c *InstancesSetMachineTypeCall) Fields(s ...googleapi.Field) *InstancesSet // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetMachineTypeCall) Context(ctx context.Context) *InstancesSetMachineTypeCall { +func (c *InstancesDetachDiskCall) Context(ctx context.Context) *InstancesDetachDiskCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetMachineTypeCall) Header() http.Header { +func (c *InstancesDetachDiskCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetMachineTypeCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesDetachDiskCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetmachinetyperequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMachineType") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/detachDisk") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -76839,14 +79617,14 @@ func (c *InstancesSetMachineTypeCall) doRequest(alt string) (*http.Response, err return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setMachineType" call. +// Do executes the "compute.instances.detachDisk" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSetMachineTypeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesDetachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -76877,17 +79655,24 @@ func (c *InstancesSetMachineTypeCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Changes the machine type for a stopped instance to the machine type specified in the request.", + // "description": "Detaches a disk from an instance.", // "httpMethod": "POST", - // "id": "compute.instances.setMachineType", + // "id": "compute.instances.detachDisk", // "parameterOrder": [ // "project", // "zone", - // "instance" + // "instance", + // "deviceName" // ], // "parameters": { + // "deviceName": { + // "description": "The device name of the disk to detach. Make a get() request on the instance to view currently attached disks and device names.", + // "location": "query", + // "required": true, + // "type": "string" + // }, // "instance": { - // "description": "Name of the instance scoping this request.", + // "description": "Instance name for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -76913,10 +79698,7 @@ func (c *InstancesSetMachineTypeCall) Do(opts ...googleapi.CallOption) (*Operati // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setMachineType", - // "request": { - // "$ref": "InstancesSetMachineTypeRequest" - // }, + // "path": "{project}/zones/{zone}/instances/{instance}/detachDisk", // "response": { // "$ref": "Operation" // }, @@ -76928,92 +79710,80 @@ func (c *InstancesSetMachineTypeCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.instances.setMetadata": +// method id "compute.instances.get": -type InstancesSetMetadataCall struct { - s *Service - project string - zone string - instance string - metadata *Metadata - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesGetCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetMetadata: Sets metadata for the specified instance to the data -// included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setMetadata -func (r *InstancesService) SetMetadata(project string, zone string, instance string, metadata *Metadata) *InstancesSetMetadataCall { - c := &InstancesSetMetadataCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified Instance resource. Gets a list of +// available instances by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/get +func (r *InstancesService) Get(project string, zone string, instance string) *InstancesGetCall { + c := &InstancesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance - c.metadata = metadata - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetMetadataCall) RequestId(requestId string) *InstancesSetMetadataCall { - c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetMetadataCall) Fields(s ...googleapi.Field) *InstancesSetMetadataCall { +func (c *InstancesGetCall) Fields(s ...googleapi.Field) *InstancesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesGetCall) IfNoneMatch(entityTag string) *InstancesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetMetadataCall) Context(ctx context.Context) *InstancesSetMetadataCall { +func (c *InstancesGetCall) Context(ctx context.Context) *InstancesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetMetadataCall) Header() http.Header { +func (c *InstancesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetMetadataCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.metadata) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMetadata") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } @@ -77026,14 +79796,14 @@ func (c *InstancesSetMetadataCall) doRequest(alt string) (*http.Response, error) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setMetadata" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// Do executes the "compute.instances.get" call. +// Exactly one of *Instance or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Instance.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSetMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -77052,7 +79822,7 @@ func (c *InstancesSetMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Instance{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -77064,9 +79834,9 @@ func (c *InstancesSetMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Sets metadata for the specified instance to the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.instances.setMetadata", + // "description": "Returns the specified Instance resource. Gets a list of available instances by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.instances.get", // "parameterOrder": [ // "project", // "zone", @@ -77074,7 +79844,7 @@ func (c *InstancesSetMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, // ], // "parameters": { // "instance": { - // "description": "Name of the instance scoping this request.", + // "description": "Name of the instance resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -77087,11 +79857,6 @@ func (c *InstancesSetMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -77100,108 +79865,93 @@ func (c *InstancesSetMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setMetadata", - // "request": { - // "$ref": "Metadata" - // }, + // "path": "{project}/zones/{zone}/instances/{instance}", // "response": { - // "$ref": "Operation" + // "$ref": "Instance" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.setMinCpuPlatform": +// method id "compute.instances.getEffectiveFirewalls": -type InstancesSetMinCpuPlatformCall struct { - s *Service - project string - zone string - instance string - instancessetmincpuplatformrequest *InstancesSetMinCpuPlatformRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesGetEffectiveFirewallsCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetMinCpuPlatform: Changes the minimum CPU platform that this -// instance should use. This method can only be called on a stopped -// instance. For more information, read Specifying a Minimum CPU -// Platform. -func (r *InstancesService) SetMinCpuPlatform(project string, zone string, instance string, instancessetmincpuplatformrequest *InstancesSetMinCpuPlatformRequest) *InstancesSetMinCpuPlatformCall { - c := &InstancesSetMinCpuPlatformCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetEffectiveFirewalls: Returns effective firewalls applied to an +// interface of the instance. +func (r *InstancesService) GetEffectiveFirewalls(project string, zone string, instance string, networkInterface string) *InstancesGetEffectiveFirewallsCall { + c := &InstancesGetEffectiveFirewallsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance - c.instancessetmincpuplatformrequest = instancessetmincpuplatformrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetMinCpuPlatformCall) RequestId(requestId string) *InstancesSetMinCpuPlatformCall { - c.urlParams_.Set("requestId", requestId) + c.urlParams_.Set("networkInterface", networkInterface) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetMinCpuPlatformCall) Fields(s ...googleapi.Field) *InstancesSetMinCpuPlatformCall { +func (c *InstancesGetEffectiveFirewallsCall) Fields(s ...googleapi.Field) *InstancesGetEffectiveFirewallsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesGetEffectiveFirewallsCall) IfNoneMatch(entityTag string) *InstancesGetEffectiveFirewallsCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetMinCpuPlatformCall) Context(ctx context.Context) *InstancesSetMinCpuPlatformCall { +func (c *InstancesGetEffectiveFirewallsCall) Context(ctx context.Context) *InstancesGetEffectiveFirewallsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetMinCpuPlatformCall) Header() http.Header { +func (c *InstancesGetEffectiveFirewallsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetMinCpuPlatformCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesGetEffectiveFirewallsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetmincpuplatformrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMinCpuPlatform") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/getEffectiveFirewalls") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } @@ -77214,14 +79964,15 @@ func (c *InstancesSetMinCpuPlatformCall) doRequest(alt string) (*http.Response, return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setMinCpuPlatform" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesSetMinCpuPlatformCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instances.getEffectiveFirewalls" call. +// Exactly one of *InstancesGetEffectiveFirewallsResponse or error will +// be non-nil. Any non-2xx status code is an error. Response headers are +// in either +// *InstancesGetEffectiveFirewallsResponse.ServerResponse.Header or (if +// a response was returned at all) in error.(*googleapi.Error).Header. +// Use googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstancesGetEffectiveFirewallsCall) Do(opts ...googleapi.CallOption) (*InstancesGetEffectiveFirewallsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -77240,7 +79991,7 @@ func (c *InstancesSetMinCpuPlatformCall) Do(opts ...googleapi.CallOption) (*Oper if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &InstancesGetEffectiveFirewallsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -77252,19 +80003,26 @@ func (c *InstancesSetMinCpuPlatformCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Changes the minimum CPU platform that this instance should use. This method can only be called on a stopped instance. For more information, read Specifying a Minimum CPU Platform.", - // "httpMethod": "POST", - // "id": "compute.instances.setMinCpuPlatform", + // "description": "Returns effective firewalls applied to an interface of the instance.", + // "httpMethod": "GET", + // "id": "compute.instances.getEffectiveFirewalls", // "parameterOrder": [ // "project", // "zone", - // "instance" + // "instance", + // "networkInterface" // ], // "parameters": { // "instance": { // "description": "Name of the instance scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "networkInterface": { + // "description": "The name of the network interface to get the effective firewalls.", + // "location": "query", // "required": true, // "type": "string" // }, @@ -77275,11 +80033,6 @@ func (c *InstancesSetMinCpuPlatformCall) Do(opts ...googleapi.CallOption) (*Oper // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -77288,106 +80041,105 @@ func (c *InstancesSetMinCpuPlatformCall) Do(opts ...googleapi.CallOption) (*Oper // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setMinCpuPlatform", - // "request": { - // "$ref": "InstancesSetMinCpuPlatformRequest" - // }, + // "path": "{project}/zones/{zone}/instances/{instance}/getEffectiveFirewalls", // "response": { - // "$ref": "Operation" + // "$ref": "InstancesGetEffectiveFirewallsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.setScheduling": +// method id "compute.instances.getGuestAttributes": -type InstancesSetSchedulingCall struct { - s *Service - project string - zone string - instance string - scheduling *Scheduling - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesGetGuestAttributesCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetScheduling: Sets an instance's scheduling options. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setScheduling -func (r *InstancesService) SetScheduling(project string, zone string, instance string, scheduling *Scheduling) *InstancesSetSchedulingCall { - c := &InstancesSetSchedulingCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetGuestAttributes: Returns the specified guest attributes entry. +func (r *InstancesService) GetGuestAttributes(project string, zone string, instance string) *InstancesGetGuestAttributesCall { + c := &InstancesGetGuestAttributesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance - c.scheduling = scheduling return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetSchedulingCall) RequestId(requestId string) *InstancesSetSchedulingCall { - c.urlParams_.Set("requestId", requestId) +// QueryPath sets the optional parameter "queryPath": Specifies the +// guest attributes path to be queried. +func (c *InstancesGetGuestAttributesCall) QueryPath(queryPath string) *InstancesGetGuestAttributesCall { + c.urlParams_.Set("queryPath", queryPath) + return c +} + +// VariableKey sets the optional parameter "variableKey": Specifies the +// key for the guest attributes entry. +func (c *InstancesGetGuestAttributesCall) VariableKey(variableKey string) *InstancesGetGuestAttributesCall { + c.urlParams_.Set("variableKey", variableKey) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetSchedulingCall) Fields(s ...googleapi.Field) *InstancesSetSchedulingCall { +func (c *InstancesGetGuestAttributesCall) Fields(s ...googleapi.Field) *InstancesGetGuestAttributesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesGetGuestAttributesCall) IfNoneMatch(entityTag string) *InstancesGetGuestAttributesCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetSchedulingCall) Context(ctx context.Context) *InstancesSetSchedulingCall { +func (c *InstancesGetGuestAttributesCall) Context(ctx context.Context) *InstancesGetGuestAttributesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetSchedulingCall) Header() http.Header { +func (c *InstancesGetGuestAttributesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetSchedulingCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesGetGuestAttributesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.scheduling) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setScheduling") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/getGuestAttributes") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } @@ -77400,14 +80152,14 @@ func (c *InstancesSetSchedulingCall) doRequest(alt string) (*http.Response, erro return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setScheduling" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.instances.getGuestAttributes" call. +// Exactly one of *GuestAttributes or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// *GuestAttributes.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstancesGetGuestAttributesCall) Do(opts ...googleapi.CallOption) (*GuestAttributes, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -77426,7 +80178,7 @@ func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operatio if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &GuestAttributes{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -77438,9 +80190,9 @@ func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Sets an instance's scheduling options.", - // "httpMethod": "POST", - // "id": "compute.instances.setScheduling", + // "description": "Returns the specified guest attributes entry.", + // "httpMethod": "GET", + // "id": "compute.instances.getGuestAttributes", // "parameterOrder": [ // "project", // "zone", @@ -77448,7 +80200,7 @@ func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operatio // ], // "parameters": { // "instance": { - // "description": "Instance name for this request.", + // "description": "Name of the instance scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -77461,8 +80213,13 @@ func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operatio // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "queryPath": { + // "description": "Specifies the guest attributes path to be queried.", + // "location": "query", + // "type": "string" + // }, + // "variableKey": { + // "description": "Specifies the key for the guest attributes entry.", // "location": "query", // "type": "string" // }, @@ -77474,107 +80231,92 @@ func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operatio // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setScheduling", - // "request": { - // "$ref": "Scheduling" - // }, + // "path": "{project}/zones/{zone}/instances/{instance}/getGuestAttributes", // "response": { - // "$ref": "Operation" + // "$ref": "GuestAttributes" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.setServiceAccount": +// method id "compute.instances.getIamPolicy": -type InstancesSetServiceAccountCall struct { - s *Service - project string - zone string - instance string - instancessetserviceaccountrequest *InstancesSetServiceAccountRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesGetIamPolicyCall struct { + s *Service + project string + zone string + resource string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetServiceAccount: Sets the service account on the instance. For more -// information, read Changing the service account and access scopes for -// an instance. -func (r *InstancesService) SetServiceAccount(project string, zone string, instance string, instancessetserviceaccountrequest *InstancesSetServiceAccountRequest) *InstancesSetServiceAccountCall { - c := &InstancesSetServiceAccountCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. +func (r *InstancesService) GetIamPolicy(project string, zone string, resource string) *InstancesGetIamPolicyCall { + c := &InstancesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instance = instance - c.instancessetserviceaccountrequest = instancessetserviceaccountrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetServiceAccountCall) RequestId(requestId string) *InstancesSetServiceAccountCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetServiceAccountCall) Fields(s ...googleapi.Field) *InstancesSetServiceAccountCall { +func (c *InstancesGetIamPolicyCall) Fields(s ...googleapi.Field) *InstancesGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesGetIamPolicyCall) IfNoneMatch(entityTag string) *InstancesGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetServiceAccountCall) Context(ctx context.Context) *InstancesSetServiceAccountCall { +func (c *InstancesGetIamPolicyCall) Context(ctx context.Context) *InstancesGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetServiceAccountCall) Header() http.Header { +func (c *InstancesGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetServiceAccountCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetserviceaccountrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setServiceAccount") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } @@ -77582,19 +80324,19 @@ func (c *InstancesSetServiceAccountCall) doRequest(alt string) (*http.Response, googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, - "instance": c.instance, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setServiceAccount" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instances.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *InstancesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -77613,7 +80355,7 @@ func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Oper if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -77625,22 +80367,15 @@ func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Sets the service account on the instance. For more information, read Changing the service account and access scopes for an instance.", - // "httpMethod": "POST", - // "id": "compute.instances.setServiceAccount", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "httpMethod": "GET", + // "id": "compute.instances.getIamPolicy", // "parameterOrder": [ // "project", // "zone", - // "instance" + // "resource" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance resource to start.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -77648,9 +80383,11 @@ func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Oper // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "zone": { @@ -77661,108 +80398,111 @@ func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Oper // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setServiceAccount", - // "request": { - // "$ref": "InstancesSetServiceAccountRequest" - // }, + // "path": "{project}/zones/{zone}/instances/{resource}/getIamPolicy", // "response": { - // "$ref": "Operation" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.setShieldedVmIntegrityPolicy": - -type InstancesSetShieldedVmIntegrityPolicyCall struct { - s *Service - project string - zone string - instance string - shieldedvmintegritypolicy *ShieldedVmIntegrityPolicy - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} +// method id "compute.instances.getSerialPortOutput": -// SetShieldedVmIntegrityPolicy: Sets the Shielded VM integrity policy -// for a VM instance. You can only use this method on a running VM -// instance. This method supports PATCH semantics and uses the JSON -// merge patch format and processing rules. -func (r *InstancesService) SetShieldedVmIntegrityPolicy(project string, zone string, instance string, shieldedvmintegritypolicy *ShieldedVmIntegrityPolicy) *InstancesSetShieldedVmIntegrityPolicyCall { - c := &InstancesSetShieldedVmIntegrityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +type InstancesGetSerialPortOutputCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetSerialPortOutput: Returns the last 1 MB of serial port output from +// the specified instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/getSerialPortOutput +func (r *InstancesService) GetSerialPortOutput(project string, zone string, instance string) *InstancesGetSerialPortOutputCall { + c := &InstancesGetSerialPortOutputCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance - c.shieldedvmintegritypolicy = shieldedvmintegritypolicy return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetShieldedVmIntegrityPolicyCall) RequestId(requestId string) *InstancesSetShieldedVmIntegrityPolicyCall { - c.urlParams_.Set("requestId", requestId) +// Port sets the optional parameter "port": Specifies which COM or +// serial port to retrieve data from. +func (c *InstancesGetSerialPortOutputCall) Port(port int64) *InstancesGetSerialPortOutputCall { + c.urlParams_.Set("port", fmt.Sprint(port)) + return c +} + +// Start sets the optional parameter "start": Returns output starting +// from a specific byte position. Use this to page through output when +// the output is too large to return in a single request. For the +// initial request, leave this field unspecified. For subsequent calls, +// this field should be set to the next value returned in the previous +// call. +func (c *InstancesGetSerialPortOutputCall) Start(start int64) *InstancesGetSerialPortOutputCall { + c.urlParams_.Set("start", fmt.Sprint(start)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetShieldedVmIntegrityPolicyCall) Fields(s ...googleapi.Field) *InstancesSetShieldedVmIntegrityPolicyCall { +func (c *InstancesGetSerialPortOutputCall) Fields(s ...googleapi.Field) *InstancesGetSerialPortOutputCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesGetSerialPortOutputCall) IfNoneMatch(entityTag string) *InstancesGetSerialPortOutputCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetShieldedVmIntegrityPolicyCall) Context(ctx context.Context) *InstancesSetShieldedVmIntegrityPolicyCall { +func (c *InstancesGetSerialPortOutputCall) Context(ctx context.Context) *InstancesGetSerialPortOutputCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetShieldedVmIntegrityPolicyCall) Header() http.Header { +func (c *InstancesGetSerialPortOutputCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetShieldedVmIntegrityPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesGetSerialPortOutputCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.shieldedvmintegritypolicy) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setShieldedVmIntegrityPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/serialPort") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } @@ -77775,14 +80515,14 @@ func (c *InstancesSetShieldedVmIntegrityPolicyCall) doRequest(alt string) (*http return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setShieldedVmIntegrityPolicy" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesSetShieldedVmIntegrityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instances.getSerialPortOutput" call. +// Exactly one of *SerialPortOutput or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *SerialPortOutput.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*SerialPortOutput, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -77801,7 +80541,7 @@ func (c *InstancesSetShieldedVmIntegrityPolicyCall) Do(opts ...googleapi.CallOpt if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &SerialPortOutput{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -77813,9 +80553,9 @@ func (c *InstancesSetShieldedVmIntegrityPolicyCall) Do(opts ...googleapi.CallOpt } return ret, nil // { - // "description": "Sets the Shielded VM integrity policy for a VM instance. You can only use this method on a running VM instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.instances.setShieldedVmIntegrityPolicy", + // "description": "Returns the last 1 MB of serial port output from the specified instance.", + // "httpMethod": "GET", + // "id": "compute.instances.getSerialPortOutput", // "parameterOrder": [ // "project", // "zone", @@ -77829,6 +80569,15 @@ func (c *InstancesSetShieldedVmIntegrityPolicyCall) Do(opts ...googleapi.CallOpt // "required": true, // "type": "string" // }, + // "port": { + // "default": "1", + // "description": "Specifies which COM or serial port to retrieve data from.", + // "format": "int32", + // "location": "query", + // "maximum": "4", + // "minimum": "1", + // "type": "integer" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -77836,8 +80585,9 @@ func (c *InstancesSetShieldedVmIntegrityPolicyCall) Do(opts ...googleapi.CallOpt // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "start": { + // "description": "Returns output starting from a specific byte position. Use this to page through output when the output is too large to return in a single request. For the initial request, leave this field unspecified. For subsequent calls, this field should be set to the next value returned in the previous call.", + // "format": "int64", // "location": "query", // "type": "string" // }, @@ -77849,107 +80599,92 @@ func (c *InstancesSetShieldedVmIntegrityPolicyCall) Do(opts ...googleapi.CallOpt // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setShieldedVmIntegrityPolicy", - // "request": { - // "$ref": "ShieldedVmIntegrityPolicy" - // }, + // "path": "{project}/zones/{zone}/instances/{instance}/serialPort", // "response": { - // "$ref": "Operation" + // "$ref": "SerialPortOutput" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.setTags": +// method id "compute.instances.getShieldedInstanceIdentity": -type InstancesSetTagsCall struct { - s *Service - project string - zone string - instance string - tags *Tags - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesGetShieldedInstanceIdentityCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetTags: Sets network tags for the specified instance to the data -// included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setTags -func (r *InstancesService) SetTags(project string, zone string, instance string, tags *Tags) *InstancesSetTagsCall { - c := &InstancesSetTagsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetShieldedInstanceIdentity: Returns the Shielded Instance Identity +// of an instance +func (r *InstancesService) GetShieldedInstanceIdentity(project string, zone string, instance string) *InstancesGetShieldedInstanceIdentityCall { + c := &InstancesGetShieldedInstanceIdentityCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance - c.tags = tags - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetTagsCall) RequestId(requestId string) *InstancesSetTagsCall { - c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetTagsCall) Fields(s ...googleapi.Field) *InstancesSetTagsCall { +func (c *InstancesGetShieldedInstanceIdentityCall) Fields(s ...googleapi.Field) *InstancesGetShieldedInstanceIdentityCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesGetShieldedInstanceIdentityCall) IfNoneMatch(entityTag string) *InstancesGetShieldedInstanceIdentityCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetTagsCall) Context(ctx context.Context) *InstancesSetTagsCall { +func (c *InstancesGetShieldedInstanceIdentityCall) Context(ctx context.Context) *InstancesGetShieldedInstanceIdentityCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetTagsCall) Header() http.Header { +func (c *InstancesGetShieldedInstanceIdentityCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetTagsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesGetShieldedInstanceIdentityCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.tags) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setTags") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/getShieldedInstanceIdentity") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } @@ -77962,14 +80697,14 @@ func (c *InstancesSetTagsCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setTags" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instances.getShieldedInstanceIdentity" call. +// Exactly one of *ShieldedInstanceIdentity or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *ShieldedInstanceIdentity.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstancesGetShieldedInstanceIdentityCall) Do(opts ...googleapi.CallOption) (*ShieldedInstanceIdentity, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -77988,7 +80723,7 @@ func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, err if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &ShieldedInstanceIdentity{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -78000,9 +80735,9 @@ func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, err } return ret, nil // { - // "description": "Sets network tags for the specified instance to the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.instances.setTags", + // "description": "Returns the Shielded Instance Identity of an instance", + // "httpMethod": "GET", + // "id": "compute.instances.getShieldedInstanceIdentity", // "parameterOrder": [ // "project", // "zone", @@ -78010,7 +80745,7 @@ func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, err // ], // "parameters": { // "instance": { - // "description": "Name of the instance scoping this request.", + // "description": "Name or id of the instance scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -78023,11 +80758,6 @@ func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, err // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -78036,37 +80766,36 @@ func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, err // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setTags", - // "request": { - // "$ref": "Tags" - // }, + // "path": "{project}/zones/{zone}/instances/{instance}/getShieldedInstanceIdentity", // "response": { - // "$ref": "Operation" + // "$ref": "ShieldedInstanceIdentity" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.simulateMaintenanceEvent": +// method id "compute.instances.getShieldedVmIdentity": -type InstancesSimulateMaintenanceEventCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesGetShieldedVmIdentityCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SimulateMaintenanceEvent: Simulates a maintenance event on the -// instance. -func (r *InstancesService) SimulateMaintenanceEvent(project string, zone string, instance string) *InstancesSimulateMaintenanceEventCall { - c := &InstancesSimulateMaintenanceEventCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetShieldedVmIdentity: Returns the Shielded VM Identity of an +// instance +func (r *InstancesService) GetShieldedVmIdentity(project string, zone string, instance string) *InstancesGetShieldedVmIdentityCall { + c := &InstancesGetShieldedVmIdentityCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance @@ -78076,40 +80805,53 @@ func (r *InstancesService) SimulateMaintenanceEvent(project string, zone string, // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSimulateMaintenanceEventCall) Fields(s ...googleapi.Field) *InstancesSimulateMaintenanceEventCall { +func (c *InstancesGetShieldedVmIdentityCall) Fields(s ...googleapi.Field) *InstancesGetShieldedVmIdentityCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesGetShieldedVmIdentityCall) IfNoneMatch(entityTag string) *InstancesGetShieldedVmIdentityCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSimulateMaintenanceEventCall) Context(ctx context.Context) *InstancesSimulateMaintenanceEventCall { +func (c *InstancesGetShieldedVmIdentityCall) Context(ctx context.Context) *InstancesGetShieldedVmIdentityCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSimulateMaintenanceEventCall) Header() http.Header { +func (c *InstancesGetShieldedVmIdentityCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSimulateMaintenanceEventCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesGetShieldedVmIdentityCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/getShieldedVmIdentity") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } @@ -78122,14 +80864,14 @@ func (c *InstancesSimulateMaintenanceEventCall) doRequest(alt string) (*http.Res return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.simulateMaintenanceEvent" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instances.getShieldedVmIdentity" call. +// Exactly one of *ShieldedVmIdentity or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ShieldedVmIdentity.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstancesGetShieldedVmIdentityCall) Do(opts ...googleapi.CallOption) (*ShieldedVmIdentity, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -78148,7 +80890,7 @@ func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &ShieldedVmIdentity{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -78160,9 +80902,9 @@ func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Simulates a maintenance event on the instance.", - // "httpMethod": "POST", - // "id": "compute.instances.simulateMaintenanceEvent", + // "description": "Returns the Shielded VM Identity of an instance", + // "httpMethod": "GET", + // "id": "compute.instances.getShieldedVmIdentity", // "parameterOrder": [ // "project", // "zone", @@ -78191,35 +80933,36 @@ func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent", + // "path": "{project}/zones/{zone}/instances/{instance}/getShieldedVmIdentity", // "response": { - // "$ref": "Operation" + // "$ref": "ShieldedVmIdentity" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.start": +// method id "compute.instances.insert": -type InstancesStartCall struct { +type InstancesInsertCall struct { s *Service project string zone string - instance string + instance *Instance urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Start: Starts an instance that was stopped using the instances().stop -// method. For more information, see Restart an instance. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/start -func (r *InstancesService) Start(project string, zone string, instance string) *InstancesStartCall { - c := &InstancesStartCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates an instance resource in the specified project using +// the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/insert +func (r *InstancesService) Insert(project string, zone string, instance *Instance) *InstancesInsertCall { + c := &InstancesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance @@ -78240,15 +80983,44 @@ func (r *InstancesService) Start(project string, zone string, instance string) * // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesStartCall) RequestId(requestId string) *InstancesStartCall { +func (c *InstancesInsertCall) RequestId(requestId string) *InstancesInsertCall { c.urlParams_.Set("requestId", requestId) return c } +// SourceInstanceTemplate sets the optional parameter +// "sourceInstanceTemplate": Specifies instance template to create the +// instance. +// +// This field is optional. It can be a full or partial URL. For example, +// the following are all valid URLs to an instance template: +// - +// https://www.googleapis.com/compute/v1/projects/project/global/instanceTemplates/instanceTemplate +// - projects/project/global/instanceTemplates/instanceTemplate +// - global/instanceTemplates/instanceTemplate +func (c *InstancesInsertCall) SourceInstanceTemplate(sourceInstanceTemplate string) *InstancesInsertCall { + c.urlParams_.Set("sourceInstanceTemplate", sourceInstanceTemplate) + return c +} + +// SourceMachineImage sets the optional parameter "sourceMachineImage": +// Specifies instance machine to create the instance. +// +// This field is optional. It can be a full or partial URL. For example, +// the following are all valid URLs to an instance template: +// - +// https://www.googleapis.com/compute/v1/projects/project/global/global/machineImages/machineImage +// - projects/project/global/global/machineImages/machineImage +// - global/machineImages/machineImage +func (c *InstancesInsertCall) SourceMachineImage(sourceMachineImage string) *InstancesInsertCall { + c.urlParams_.Set("sourceMachineImage", sourceMachineImage) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesStartCall) Fields(s ...googleapi.Field) *InstancesStartCall { +func (c *InstancesInsertCall) Fields(s ...googleapi.Field) *InstancesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -78256,30 +81028,35 @@ func (c *InstancesStartCall) Fields(s ...googleapi.Field) *InstancesStartCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesStartCall) Context(ctx context.Context) *InstancesStartCall { +func (c *InstancesInsertCall) Context(ctx context.Context) *InstancesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesStartCall) Header() http.Header { +func (c *InstancesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesStartCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instance) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/start") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -78287,21 +81064,20 @@ func (c *InstancesStartCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.start" call. +// Do executes the "compute.instances.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -78332,22 +81108,14 @@ func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Starts an instance that was stopped using the instances().stop method. For more information, see Restart an instance.", + // "description": "Creates an instance resource in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.instances.start", + // "id": "compute.instances.insert", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "zone" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance resource to start.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -78360,6 +81128,16 @@ func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error // "location": "query", // "type": "string" // }, + // "sourceInstanceTemplate": { + // "description": "Specifies instance template to create the instance.\n\nThis field is optional. It can be a full or partial URL. For example, the following are all valid URLs to an instance template: \n- https://www.googleapis.com/compute/v1/projects/project/global/instanceTemplates/instanceTemplate \n- projects/project/global/instanceTemplates/instanceTemplate \n- global/instanceTemplates/instanceTemplate", + // "location": "query", + // "type": "string" + // }, + // "sourceMachineImage": { + // "description": "Specifies instance machine to create the instance.\n\nThis field is optional. It can be a full or partial URL. For example, the following are all valid URLs to an instance template: \n- https://www.googleapis.com/compute/v1/projects/project/global/global/machineImages/machineImage \n- projects/project/global/global/machineImages/machineImage \n- global/machineImages/machineImage", + // "location": "query", + // "type": "string" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -78368,7 +81146,10 @@ func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/start", + // "path": "{project}/zones/{zone}/instances", + // "request": { + // "$ref": "Instance" + // }, // "response": { // "$ref": "Operation" // }, @@ -78380,112 +81161,160 @@ func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error } -// method id "compute.instances.startWithEncryptionKey": +// method id "compute.instances.list": -type InstancesStartWithEncryptionKeyCall struct { - s *Service - project string - zone string - instance string - instancesstartwithencryptionkeyrequest *InstancesStartWithEncryptionKeyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// StartWithEncryptionKey: Starts an instance that was stopped using the -// instances().stop method. For more information, see Restart an -// instance. -func (r *InstancesService) StartWithEncryptionKey(project string, zone string, instance string, instancesstartwithencryptionkeyrequest *InstancesStartWithEncryptionKeyRequest) *InstancesStartWithEncryptionKeyCall { - c := &InstancesStartWithEncryptionKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of instances contained within the specified +// zone. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/list +func (r *InstancesService) List(project string, zone string) *InstancesListCall { + c := &InstancesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instance = instance - c.instancesstartwithencryptionkeyrequest = instancesstartwithencryptionkeyrequest return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesStartWithEncryptionKeyCall) RequestId(requestId string) *InstancesStartWithEncryptionKeyCall { - c.urlParams_.Set("requestId", requestId) +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *InstancesListCall) Filter(filter string) *InstancesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *InstancesListCall) MaxResults(maxResults int64) *InstancesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *InstancesListCall) OrderBy(orderBy string) *InstancesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InstancesListCall) PageToken(pageToken string) *InstancesListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesStartWithEncryptionKeyCall) Fields(s ...googleapi.Field) *InstancesStartWithEncryptionKeyCall { +func (c *InstancesListCall) Fields(s ...googleapi.Field) *InstancesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesListCall) IfNoneMatch(entityTag string) *InstancesListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesStartWithEncryptionKeyCall) Context(ctx context.Context) *InstancesStartWithEncryptionKeyCall { +func (c *InstancesListCall) Context(ctx context.Context) *InstancesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesStartWithEncryptionKeyCall) Header() http.Header { +func (c *InstancesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesStartWithEncryptionKeyCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesstartwithencryptionkeyrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.startWithEncryptionKey" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.instances.list" call. +// Exactly one of *InstanceList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// *InstanceList.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -78504,7 +81333,7 @@ func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) ( if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &InstanceList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -78516,20 +81345,35 @@ func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Starts an instance that was stopped using the instances().stop method. For more information, see Restart an instance.", - // "httpMethod": "POST", - // "id": "compute.instances.startWithEncryptionKey", + // "description": "Retrieves the list of instances contained within the specified zone.", + // "httpMethod": "GET", + // "id": "compute.instances.list", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "zone" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance resource to start.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -78539,11 +81383,6 @@ func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) ( // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -78552,112 +81391,177 @@ func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) ( // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey", - // "request": { - // "$ref": "InstancesStartWithEncryptionKeyRequest" - // }, + // "path": "{project}/zones/{zone}/instances", // "response": { - // "$ref": "Operation" + // "$ref": "InstanceList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.stop": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstancesListCall) Pages(ctx context.Context, f func(*InstanceList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InstancesStopCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.instances.listReferrers": + +type InstancesListReferrersCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Stop: Stops a running instance, shutting it down cleanly, and allows -// you to restart the instance at a later time. Stopped instances do not -// incur VM usage charges while they are stopped. However, resources -// that the VM is using, such as persistent disks and static IP -// addresses, will continue to be charged until they are deleted. For -// more information, see Stopping an instance. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/stop -func (r *InstancesService) Stop(project string, zone string, instance string) *InstancesStopCall { - c := &InstancesStopCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ListReferrers: Retrieves the list of referrers to instances contained +// within the specified zone. For more information, read Viewing +// Referrers to VM Instances. +func (r *InstancesService) ListReferrers(project string, zone string, instance string) *InstancesListReferrersCall { + c := &InstancesListReferrersCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance return c } -// DiscardLocalSsd sets the optional parameter "discardLocalSsd": If -// true, discard the contents of any attached localSSD partitions. -// Default value is false (== preserve localSSD data). -func (c *InstancesStopCall) DiscardLocalSsd(discardLocalSsd bool) *InstancesStopCall { - c.urlParams_.Set("discardLocalSsd", fmt.Sprint(discardLocalSsd)) +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *InstancesListReferrersCall) Filter(filter string) *InstancesListReferrersCall { + c.urlParams_.Set("filter", filter) return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *InstancesListReferrersCall) MaxResults(maxResults int64) *InstancesListReferrersCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesStopCall) RequestId(requestId string) *InstancesStopCall { - c.urlParams_.Set("requestId", requestId) +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *InstancesListReferrersCall) OrderBy(orderBy string) *InstancesListReferrersCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InstancesListReferrersCall) PageToken(pageToken string) *InstancesListReferrersCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesStopCall) Fields(s ...googleapi.Field) *InstancesStopCall { +func (c *InstancesListReferrersCall) Fields(s ...googleapi.Field) *InstancesListReferrersCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesListReferrersCall) IfNoneMatch(entityTag string) *InstancesListReferrersCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesStopCall) Context(ctx context.Context) *InstancesStopCall { +func (c *InstancesListReferrersCall) Context(ctx context.Context) *InstancesListReferrersCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesStopCall) Header() http.Header { +func (c *InstancesListReferrersCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesStopCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesListReferrersCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/stop") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/referrers") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } @@ -78670,14 +81574,14 @@ func (c *InstancesStopCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.stop" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instances.listReferrers" call. +// Exactly one of *InstanceListReferrers or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceListReferrers.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*InstanceListReferrers, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -78696,7 +81600,7 @@ func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &InstanceListReferrers{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -78708,27 +81612,45 @@ func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time. Stopped instances do not incur VM usage charges while they are stopped. However, resources that the VM is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted. For more information, see Stopping an instance.", - // "httpMethod": "POST", - // "id": "compute.instances.stop", + // "description": "Retrieves the list of referrers to instances contained within the specified zone. For more information, read Viewing Referrers to VM Instances.", + // "httpMethod": "GET", + // "id": "compute.instances.listReferrers", // "parameterOrder": [ // "project", // "zone", // "instance" // ], // "parameters": { - // "discardLocalSsd": { - // "description": "If true, discard the contents of any attached localSSD partitions. Default value is false (== preserve localSSD data).", + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", - // "type": "boolean" + // "type": "string" // }, // "instance": { - // "description": "Name of the instance resource to stop.", + // "description": "Name of the target instance scoping this request, or '-' if the request should span over all instances in the container.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "-|[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -78736,11 +81658,6 @@ func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -78749,49 +81666,60 @@ func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/stop", + // "path": "{project}/zones/{zone}/instances/{instance}/referrers", // "response": { - // "$ref": "Operation" + // "$ref": "InstanceListReferrers" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.suspend": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstancesListReferrersCall) Pages(ctx context.Context, f func(*InstanceListReferrers) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InstancesSuspendCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.instances.removeResourcePolicies": + +type InstancesRemoveResourcePoliciesCall struct { + s *Service + project string + zone string + instance string + instancesremoveresourcepoliciesrequest *InstancesRemoveResourcePoliciesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Suspend: This method suspends a running instance, saving its state to -// persistent storage, and allows you to resume the instance at a later -// time. Suspended instances incur reduced per-minute, virtual machine -// usage charges while they are suspended. Any resources the virtual -// machine is using, such as persistent disks and static IP addresses, -// will continue to be charged until they are deleted. -func (r *InstancesService) Suspend(project string, zone string, instance string) *InstancesSuspendCall { - c := &InstancesSuspendCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// RemoveResourcePolicies: Removes resource policies from an instance. +func (r *InstancesService) RemoveResourcePolicies(project string, zone string, instance string, instancesremoveresourcepoliciesrequest *InstancesRemoveResourcePoliciesRequest) *InstancesRemoveResourcePoliciesCall { + c := &InstancesRemoveResourcePoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance - return c -} - -// DiscardLocalSsd sets the optional parameter "discardLocalSsd": If -// true, discard the contents of any attached localSSD partitions. -// Default value is false (== preserve localSSD data). -func (c *InstancesSuspendCall) DiscardLocalSsd(discardLocalSsd bool) *InstancesSuspendCall { - c.urlParams_.Set("discardLocalSsd", fmt.Sprint(discardLocalSsd)) + c.instancesremoveresourcepoliciesrequest = instancesremoveresourcepoliciesrequest return c } @@ -78809,7 +81737,7 @@ func (c *InstancesSuspendCall) DiscardLocalSsd(discardLocalSsd bool) *InstancesS // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSuspendCall) RequestId(requestId string) *InstancesSuspendCall { +func (c *InstancesRemoveResourcePoliciesCall) RequestId(requestId string) *InstancesRemoveResourcePoliciesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -78817,7 +81745,7 @@ func (c *InstancesSuspendCall) RequestId(requestId string) *InstancesSuspendCall // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSuspendCall) Fields(s ...googleapi.Field) *InstancesSuspendCall { +func (c *InstancesRemoveResourcePoliciesCall) Fields(s ...googleapi.Field) *InstancesRemoveResourcePoliciesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -78825,30 +81753,35 @@ func (c *InstancesSuspendCall) Fields(s ...googleapi.Field) *InstancesSuspendCal // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSuspendCall) Context(ctx context.Context) *InstancesSuspendCall { +func (c *InstancesRemoveResourcePoliciesCall) Context(ctx context.Context) *InstancesRemoveResourcePoliciesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSuspendCall) Header() http.Header { +func (c *InstancesRemoveResourcePoliciesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSuspendCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesRemoveResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesremoveresourcepoliciesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/suspend") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/removeResourcePolicies") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -78863,14 +81796,14 @@ func (c *InstancesSuspendCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.suspend" call. +// Do executes the "compute.instances.removeResourcePolicies" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSuspendCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -78901,22 +81834,17 @@ func (c *InstancesSuspendCall) Do(opts ...googleapi.CallOption) (*Operation, err } return ret, nil // { - // "description": "This method suspends a running instance, saving its state to persistent storage, and allows you to resume the instance at a later time. Suspended instances incur reduced per-minute, virtual machine usage charges while they are suspended. Any resources the virtual machine is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted.", + // "description": "Removes resource policies from an instance.", // "httpMethod": "POST", - // "id": "compute.instances.suspend", + // "id": "compute.instances.removeResourcePolicies", // "parameterOrder": [ // "project", // "zone", // "instance" // ], // "parameters": { - // "discardLocalSsd": { - // "description": "If true, discard the contents of any attached localSSD partitions. Default value is false (== preserve localSSD data).", - // "location": "query", - // "type": "boolean" - // }, // "instance": { - // "description": "Name of the instance resource to suspend.", + // "description": "The instance name for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -78942,7 +81870,10 @@ func (c *InstancesSuspendCall) Do(opts ...googleapi.CallOption) (*Operation, err // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/suspend", + // "path": "{project}/zones/{zone}/instances/{instance}/removeResourcePolicies", + // "request": { + // "$ref": "InstancesRemoveResourcePoliciesRequest" + // }, // "response": { // "$ref": "Operation" // }, @@ -78954,34 +81885,53 @@ func (c *InstancesSuspendCall) Do(opts ...googleapi.CallOption) (*Operation, err } -// method id "compute.instances.testIamPermissions": +// method id "compute.instances.reset": -type InstancesTestIamPermissionsCall struct { - s *Service - project string - zone string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesResetCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *InstancesService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *InstancesTestIamPermissionsCall { - c := &InstancesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Reset: Performs a reset on the instance. This is a hard reset the VM +// does not do a graceful shutdown. For more information, see Resetting +// an instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/reset +func (r *InstancesService) Reset(project string, zone string, instance string) *InstancesResetCall { + c := &InstancesResetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.instance = instance + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesResetCall) RequestId(requestId string) *InstancesResetCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesTestIamPermissionsCall) Fields(s ...googleapi.Field) *InstancesTestIamPermissionsCall { +func (c *InstancesResetCall) Fields(s ...googleapi.Field) *InstancesResetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -78989,35 +81939,30 @@ func (c *InstancesTestIamPermissionsCall) Fields(s ...googleapi.Field) *Instance // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesTestIamPermissionsCall) Context(ctx context.Context) *InstancesTestIamPermissionsCall { +func (c *InstancesResetCall) Context(ctx context.Context) *InstancesResetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesTestIamPermissionsCall) Header() http.Header { +func (c *InstancesResetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesResetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/reset") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -79027,19 +81972,19 @@ func (c *InstancesTestIamPermissionsCall) doRequest(alt string) (*http.Response, googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, - "resource": c.resource, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstancesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.instances.reset" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesResetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -79058,7 +82003,7 @@ func (c *InstancesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -79070,77 +82015,75 @@ func (c *InstancesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", + // "description": "Performs a reset on the instance. This is a hard reset the VM does not do a graceful shutdown. For more information, see Resetting an instance.", // "httpMethod": "POST", - // "id": "compute.instances.testIamPermissions", + // "id": "compute.instances.reset", // "parameterOrder": [ // "project", // "zone", - // "resource" + // "instance" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "instance": { + // "description": "Name of the instance scoping this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/zones/{zone}/instances/{instance}/reset", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instances.updateAccessConfig": +// method id "compute.instances.resume": -type InstancesUpdateAccessConfigCall struct { - s *Service - project string - zone string - instance string - accessconfig *AccessConfig - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesResumeCall struct { + s *Service + project string + zone string + instance string + instancesresumerequest *InstancesResumeRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// UpdateAccessConfig: Updates the specified access config from an -// instance's network interface with the data included in the request. -// This method supports PATCH semantics and uses the JSON merge patch -// format and processing rules. -func (r *InstancesService) UpdateAccessConfig(project string, zone string, instance string, networkInterface string, accessconfig *AccessConfig) *InstancesUpdateAccessConfigCall { - c := &InstancesUpdateAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Resume: Resumes an instance that was suspended using the +// instances().suspend method. +func (r *InstancesService) Resume(project string, zone string, instance string, instancesresumerequest *InstancesResumeRequest) *InstancesResumeCall { + c := &InstancesResumeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance - c.urlParams_.Set("networkInterface", networkInterface) - c.accessconfig = accessconfig + c.instancesresumerequest = instancesresumerequest return c } @@ -79158,7 +82101,7 @@ func (r *InstancesService) UpdateAccessConfig(project string, zone string, insta // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesUpdateAccessConfigCall) RequestId(requestId string) *InstancesUpdateAccessConfigCall { +func (c *InstancesResumeCall) RequestId(requestId string) *InstancesResumeCall { c.urlParams_.Set("requestId", requestId) return c } @@ -79166,7 +82109,7 @@ func (c *InstancesUpdateAccessConfigCall) RequestId(requestId string) *Instances // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesUpdateAccessConfigCall) Fields(s ...googleapi.Field) *InstancesUpdateAccessConfigCall { +func (c *InstancesResumeCall) Fields(s ...googleapi.Field) *InstancesResumeCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -79174,35 +82117,35 @@ func (c *InstancesUpdateAccessConfigCall) Fields(s ...googleapi.Field) *Instance // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesUpdateAccessConfigCall) Context(ctx context.Context) *InstancesUpdateAccessConfigCall { +func (c *InstancesResumeCall) Context(ctx context.Context) *InstancesResumeCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesUpdateAccessConfigCall) Header() http.Header { +func (c *InstancesResumeCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesUpdateAccessConfigCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesResumeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.accessconfig) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesresumerequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/updateAccessConfig") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/resume") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -79217,14 +82160,14 @@ func (c *InstancesUpdateAccessConfigCall) doRequest(alt string) (*http.Response, return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.updateAccessConfig" call. +// Do executes the "compute.instances.resume" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesUpdateAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesResumeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -79255,29 +82198,22 @@ func (c *InstancesUpdateAccessConfigCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Updates the specified access config from an instance's network interface with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "description": "Resumes an instance that was suspended using the instances().suspend method.", // "httpMethod": "POST", - // "id": "compute.instances.updateAccessConfig", + // "id": "compute.instances.resume", // "parameterOrder": [ // "project", // "zone", - // "instance", - // "networkInterface" + // "instance" // ], // "parameters": { // "instance": { - // "description": "The instance name for this request.", + // "description": "Name of the instance resource to resume.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "networkInterface": { - // "description": "The name of the network interface where the access config is attached.", - // "location": "query", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -79298,9 +82234,9 @@ func (c *InstancesUpdateAccessConfigCall) Do(opts ...googleapi.CallOption) (*Ope // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/updateAccessConfig", + // "path": "{project}/zones/{zone}/instances/{instance}/resume", // "request": { - // "$ref": "AccessConfig" + // "$ref": "InstancesResumeRequest" // }, // "response": { // "$ref": "Operation" @@ -79313,29 +82249,31 @@ func (c *InstancesUpdateAccessConfigCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.instances.updateDisplayDevice": +// method id "compute.instances.setDeletionProtection": -type InstancesUpdateDisplayDeviceCall struct { - s *Service - project string - zone string - instance string - displaydevice *DisplayDevice - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSetDeletionProtectionCall struct { + s *Service + project string + zone string + resource string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// UpdateDisplayDevice: Updates the Display config for a VM instance. -// You can only use this method on a stopped VM instance. This method -// supports PATCH semantics and uses the JSON merge patch format and -// processing rules. -func (r *InstancesService) UpdateDisplayDevice(project string, zone string, instance string, displaydevice *DisplayDevice) *InstancesUpdateDisplayDeviceCall { - c := &InstancesUpdateDisplayDeviceCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetDeletionProtection: Sets deletion protection on the instance. +func (r *InstancesService) SetDeletionProtection(project string, zone string, resource string) *InstancesSetDeletionProtectionCall { + c := &InstancesSetDeletionProtectionCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instance = instance - c.displaydevice = displaydevice + c.resource = resource + return c +} + +// DeletionProtection sets the optional parameter "deletionProtection": +// Whether the resource should be protected against deletion. +func (c *InstancesSetDeletionProtectionCall) DeletionProtection(deletionProtection bool) *InstancesSetDeletionProtectionCall { + c.urlParams_.Set("deletionProtection", fmt.Sprint(deletionProtection)) return c } @@ -79353,7 +82291,7 @@ func (r *InstancesService) UpdateDisplayDevice(project string, zone string, inst // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesUpdateDisplayDeviceCall) RequestId(requestId string) *InstancesUpdateDisplayDeviceCall { +func (c *InstancesSetDeletionProtectionCall) RequestId(requestId string) *InstancesSetDeletionProtectionCall { c.urlParams_.Set("requestId", requestId) return c } @@ -79361,7 +82299,7 @@ func (c *InstancesUpdateDisplayDeviceCall) RequestId(requestId string) *Instance // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesUpdateDisplayDeviceCall) Fields(s ...googleapi.Field) *InstancesUpdateDisplayDeviceCall { +func (c *InstancesSetDeletionProtectionCall) Fields(s ...googleapi.Field) *InstancesSetDeletionProtectionCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -79369,37 +82307,32 @@ func (c *InstancesUpdateDisplayDeviceCall) Fields(s ...googleapi.Field) *Instanc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesUpdateDisplayDeviceCall) Context(ctx context.Context) *InstancesUpdateDisplayDeviceCall { +func (c *InstancesSetDeletionProtectionCall) Context(ctx context.Context) *InstancesSetDeletionProtectionCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesUpdateDisplayDeviceCall) Header() http.Header { +func (c *InstancesSetDeletionProtectionCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesUpdateDisplayDeviceCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetDeletionProtectionCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.displaydevice) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/updateDisplayDevice") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{resource}/setDeletionProtection") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -79407,19 +82340,19 @@ func (c *InstancesUpdateDisplayDeviceCall) doRequest(alt string) (*http.Response googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, - "instance": c.instance, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.updateDisplayDevice" call. +// Do executes the "compute.instances.setDeletionProtection" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesUpdateDisplayDeviceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSetDeletionProtectionCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -79450,21 +82383,20 @@ func (c *InstancesUpdateDisplayDeviceCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Updates the Display config for a VM instance. You can only use this method on a stopped VM instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.instances.updateDisplayDevice", + // "description": "Sets deletion protection on the instance.", + // "httpMethod": "POST", + // "id": "compute.instances.setDeletionProtection", // "parameterOrder": [ // "project", // "zone", - // "instance" + // "resource" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" + // "deletionProtection": { + // "default": "true", + // "description": "Whether the resource should be protected against deletion.", + // "location": "query", + // "type": "boolean" // }, // "project": { // "description": "Project ID for this request.", @@ -79478,6 +82410,13 @@ func (c *InstancesUpdateDisplayDeviceCall) Do(opts ...googleapi.CallOption) (*Op // "location": "query", // "type": "string" // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -79486,10 +82425,7 @@ func (c *InstancesUpdateDisplayDeviceCall) Do(opts ...googleapi.CallOption) (*Op // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/updateDisplayDevice", - // "request": { - // "$ref": "DisplayDevice" - // }, + // "path": "{project}/zones/{zone}/instances/{resource}/setDeletionProtection", // "response": { // "$ref": "Operation" // }, @@ -79501,28 +82437,28 @@ func (c *InstancesUpdateDisplayDeviceCall) Do(opts ...googleapi.CallOption) (*Op } -// method id "compute.instances.updateNetworkInterface": +// method id "compute.instances.setDiskAutoDelete": -type InstancesUpdateNetworkInterfaceCall struct { - s *Service - project string - zone string - instance string - networkinterface *NetworkInterface - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSetDiskAutoDeleteCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// UpdateNetworkInterface: Updates an instance's network interface. This -// method follows PATCH semantics. -func (r *InstancesService) UpdateNetworkInterface(project string, zone string, instance string, networkInterface string, networkinterface *NetworkInterface) *InstancesUpdateNetworkInterfaceCall { - c := &InstancesUpdateNetworkInterfaceCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetDiskAutoDelete: Sets the auto-delete flag for a disk attached to +// an instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setDiskAutoDelete +func (r *InstancesService) SetDiskAutoDelete(project string, zone string, instance string, autoDelete bool, deviceName string) *InstancesSetDiskAutoDeleteCall { + c := &InstancesSetDiskAutoDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance - c.urlParams_.Set("networkInterface", networkInterface) - c.networkinterface = networkinterface + c.urlParams_.Set("autoDelete", fmt.Sprint(autoDelete)) + c.urlParams_.Set("deviceName", deviceName) return c } @@ -79540,7 +82476,7 @@ func (r *InstancesService) UpdateNetworkInterface(project string, zone string, i // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesUpdateNetworkInterfaceCall) RequestId(requestId string) *InstancesUpdateNetworkInterfaceCall { +func (c *InstancesSetDiskAutoDeleteCall) RequestId(requestId string) *InstancesSetDiskAutoDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -79548,7 +82484,7 @@ func (c *InstancesUpdateNetworkInterfaceCall) RequestId(requestId string) *Insta // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesUpdateNetworkInterfaceCall) Fields(s ...googleapi.Field) *InstancesUpdateNetworkInterfaceCall { +func (c *InstancesSetDiskAutoDeleteCall) Fields(s ...googleapi.Field) *InstancesSetDiskAutoDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -79556,37 +82492,32 @@ func (c *InstancesUpdateNetworkInterfaceCall) Fields(s ...googleapi.Field) *Inst // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesUpdateNetworkInterfaceCall) Context(ctx context.Context) *InstancesUpdateNetworkInterfaceCall { +func (c *InstancesSetDiskAutoDeleteCall) Context(ctx context.Context) *InstancesSetDiskAutoDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesUpdateNetworkInterfaceCall) Header() http.Header { +func (c *InstancesSetDiskAutoDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesUpdateNetworkInterfaceCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetDiskAutoDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkinterface) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/updateNetworkInterface") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -79599,14 +82530,14 @@ func (c *InstancesUpdateNetworkInterfaceCall) doRequest(alt string) (*http.Respo return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.updateNetworkInterface" call. +// Do executes the "compute.instances.setDiskAutoDelete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesUpdateNetworkInterfaceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSetDiskAutoDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -79637,16 +82568,30 @@ func (c *InstancesUpdateNetworkInterfaceCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Updates an instance's network interface. This method follows PATCH semantics.", - // "httpMethod": "PATCH", - // "id": "compute.instances.updateNetworkInterface", + // "description": "Sets the auto-delete flag for a disk attached to an instance.", + // "httpMethod": "POST", + // "id": "compute.instances.setDiskAutoDelete", // "parameterOrder": [ // "project", // "zone", // "instance", - // "networkInterface" + // "autoDelete", + // "deviceName" // ], // "parameters": { + // "autoDelete": { + // "description": "Whether to auto-delete the disk when the instance is deleted.", + // "location": "query", + // "required": true, + // "type": "boolean" + // }, + // "deviceName": { + // "description": "The device name of the disk to modify. Make a get() request on the instance to view currently attached disks and device names.", + // "location": "query", + // "pattern": "\\w[\\w.-]{0,254}", + // "required": true, + // "type": "string" + // }, // "instance": { // "description": "The instance name for this request.", // "location": "path", @@ -79654,12 +82599,6 @@ func (c *InstancesUpdateNetworkInterfaceCall) Do(opts ...googleapi.CallOption) ( // "required": true, // "type": "string" // }, - // "networkInterface": { - // "description": "The name of the network interface to update.", - // "location": "query", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -79680,10 +82619,7 @@ func (c *InstancesUpdateNetworkInterfaceCall) Do(opts ...googleapi.CallOption) ( // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/updateNetworkInterface", - // "request": { - // "$ref": "NetworkInterface" - // }, + // "path": "{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete", // "response": { // "$ref": "Operation" // }, @@ -79695,55 +82631,34 @@ func (c *InstancesUpdateNetworkInterfaceCall) Do(opts ...googleapi.CallOption) ( } -// method id "compute.instances.updateShieldedVmConfig": +// method id "compute.instances.setIamPolicy": -type InstancesUpdateShieldedVmConfigCall struct { - s *Service - project string - zone string - instance string - shieldedvmconfig *ShieldedVmConfig - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSetIamPolicyCall struct { + s *Service + project string + zone string + resource string + zonesetpolicyrequest *ZoneSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// UpdateShieldedVmConfig: Updates the Shielded VM config for a VM -// instance. You can only use this method on a stopped VM instance. This -// method supports PATCH semantics and uses the JSON merge patch format -// and processing rules. -func (r *InstancesService) UpdateShieldedVmConfig(project string, zone string, instance string, shieldedvmconfig *ShieldedVmConfig) *InstancesUpdateShieldedVmConfigCall { - c := &InstancesUpdateShieldedVmConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +func (r *InstancesService) SetIamPolicy(project string, zone string, resource string, zonesetpolicyrequest *ZoneSetPolicyRequest) *InstancesSetIamPolicyCall { + c := &InstancesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instance = instance - c.shieldedvmconfig = shieldedvmconfig - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesUpdateShieldedVmConfigCall) RequestId(requestId string) *InstancesUpdateShieldedVmConfigCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.zonesetpolicyrequest = zonesetpolicyrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesUpdateShieldedVmConfigCall) Fields(s ...googleapi.Field) *InstancesUpdateShieldedVmConfigCall { +func (c *InstancesSetIamPolicyCall) Fields(s ...googleapi.Field) *InstancesSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -79751,37 +82666,37 @@ func (c *InstancesUpdateShieldedVmConfigCall) Fields(s ...googleapi.Field) *Inst // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesUpdateShieldedVmConfigCall) Context(ctx context.Context) *InstancesUpdateShieldedVmConfigCall { +func (c *InstancesSetIamPolicyCall) Context(ctx context.Context) *InstancesSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesUpdateShieldedVmConfigCall) Header() http.Header { +func (c *InstancesSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesUpdateShieldedVmConfigCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.shieldedvmconfig) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.zonesetpolicyrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/updateShieldedVmConfig") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -79789,19 +82704,19 @@ func (c *InstancesUpdateShieldedVmConfigCall) doRequest(alt string) (*http.Respo googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, - "instance": c.instance, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.updateShieldedVmConfig" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesUpdateShieldedVmConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instances.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *InstancesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -79820,7 +82735,7 @@ func (c *InstancesUpdateShieldedVmConfigCall) Do(opts ...googleapi.CallOption) ( if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -79832,22 +82747,15 @@ func (c *InstancesUpdateShieldedVmConfigCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Updates the Shielded VM config for a VM instance. You can only use this method on a stopped VM instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.instances.updateShieldedVmConfig", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "httpMethod": "POST", + // "id": "compute.instances.setIamPolicy", // "parameterOrder": [ // "project", // "zone", - // "instance" + // "resource" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -79855,9 +82763,11 @@ func (c *InstancesUpdateShieldedVmConfigCall) Do(opts ...googleapi.CallOption) ( // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "zone": { @@ -79868,12 +82778,12 @@ func (c *InstancesUpdateShieldedVmConfigCall) Do(opts ...googleapi.CallOption) ( // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/updateShieldedVmConfig", + // "path": "{project}/zones/{zone}/instances/{resource}/setIamPolicy", // "request": { - // "$ref": "ShieldedVmConfig" + // "$ref": "ZoneSetPolicyRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -79883,157 +82793,111 @@ func (c *InstancesUpdateShieldedVmConfigCall) Do(opts ...googleapi.CallOption) ( } -// method id "compute.interconnectAttachments.aggregatedList": +// method id "compute.instances.setLabels": -type InterconnectAttachmentsAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstancesSetLabelsCall struct { + s *Service + project string + zone string + instance string + instancessetlabelsrequest *InstancesSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AggregatedList: Retrieves an aggregated list of interconnect -// attachments. -func (r *InterconnectAttachmentsService) AggregatedList(project string) *InterconnectAttachmentsAggregatedListCall { - c := &InterconnectAttachmentsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetLabels: Sets labels on an instance. To learn more about labels, +// read the Labeling Resources documentation. +func (r *InstancesService) SetLabels(project string, zone string, instance string, instancessetlabelsrequest *InstancesSetLabelsRequest) *InstancesSetLabelsCall { + c := &InstancesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.zone = zone + c.instance = instance + c.instancessetlabelsrequest = instancessetlabelsrequest return c } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *InterconnectAttachmentsAggregatedListCall) Filter(filter string) *InterconnectAttachmentsAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InterconnectAttachmentsAggregatedListCall) MaxResults(maxResults int64) *InterconnectAttachmentsAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *InterconnectAttachmentsAggregatedListCall) OrderBy(orderBy string) *InterconnectAttachmentsAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InterconnectAttachmentsAggregatedListCall) PageToken(pageToken string) *InterconnectAttachmentsAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesSetLabelsCall) RequestId(requestId string) *InstancesSetLabelsCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectAttachmentsAggregatedListCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsAggregatedListCall { +func (c *InstancesSetLabelsCall) Fields(s ...googleapi.Field) *InstancesSetLabelsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InterconnectAttachmentsAggregatedListCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectAttachmentsAggregatedListCall) Context(ctx context.Context) *InterconnectAttachmentsAggregatedListCall { +func (c *InstancesSetLabelsCall) Context(ctx context.Context) *InstancesSetLabelsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectAttachmentsAggregatedListCall) Header() http.Header { +func (c *InstancesSetLabelsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectAttachmentsAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/interconnectAttachments") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setLabels") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnectAttachments.aggregatedList" call. -// Exactly one of *InterconnectAttachmentAggregatedList or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *InterconnectAttachmentAggregatedList.ServerResponse.Header or -// (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachmentAggregatedList, error) { +// Do executes the "compute.instances.setLabels" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -80052,7 +82916,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOpt if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InterconnectAttachmentAggregatedList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -80064,96 +82928,78 @@ func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOpt } return ret, nil // { - // "description": "Retrieves an aggregated list of interconnect attachments.", - // "httpMethod": "GET", - // "id": "compute.interconnectAttachments.aggregatedList", + // "description": "Sets labels on an instance. To learn more about labels, read the Labeling Resources documentation.", + // "httpMethod": "POST", + // "id": "compute.instances.setLabels", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "instance" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/aggregated/interconnectAttachments", + // "path": "{project}/zones/{zone}/instances/{instance}/setLabels", + // "request": { + // "$ref": "InstancesSetLabelsRequest" + // }, // "response": { - // "$ref": "InterconnectAttachmentAggregatedList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InterconnectAttachmentsAggregatedListCall) Pages(ctx context.Context, f func(*InterconnectAttachmentAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.interconnectAttachments.delete": +// method id "compute.instances.setMachineResources": -type InterconnectAttachmentsDeleteCall struct { - s *Service - project string - region string - interconnectAttachment string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSetMachineResourcesCall struct { + s *Service + project string + zone string + instance string + instancessetmachineresourcesrequest *InstancesSetMachineResourcesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified interconnect attachment. -func (r *InterconnectAttachmentsService) Delete(project string, region string, interconnectAttachment string) *InterconnectAttachmentsDeleteCall { - c := &InterconnectAttachmentsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetMachineResources: Changes the number and/or type of accelerator +// for a stopped instance to the values specified in the request. +func (r *InstancesService) SetMachineResources(project string, zone string, instance string, instancessetmachineresourcesrequest *InstancesSetMachineResourcesRequest) *InstancesSetMachineResourcesCall { + c := &InstancesSetMachineResourcesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.interconnectAttachment = interconnectAttachment + c.zone = zone + c.instance = instance + c.instancessetmachineresourcesrequest = instancessetmachineresourcesrequest return c } @@ -80171,7 +83017,7 @@ func (r *InterconnectAttachmentsService) Delete(project string, region string, i // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InterconnectAttachmentsDeleteCall) RequestId(requestId string) *InterconnectAttachmentsDeleteCall { +func (c *InstancesSetMachineResourcesCall) RequestId(requestId string) *InstancesSetMachineResourcesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -80179,7 +83025,7 @@ func (c *InterconnectAttachmentsDeleteCall) RequestId(requestId string) *Interco // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectAttachmentsDeleteCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsDeleteCall { +func (c *InstancesSetMachineResourcesCall) Fields(s ...googleapi.Field) *InstancesSetMachineResourcesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -80187,52 +83033,57 @@ func (c *InterconnectAttachmentsDeleteCall) Fields(s ...googleapi.Field) *Interc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectAttachmentsDeleteCall) Context(ctx context.Context) *InterconnectAttachmentsDeleteCall { +func (c *InstancesSetMachineResourcesCall) Context(ctx context.Context) *InstancesSetMachineResourcesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectAttachmentsDeleteCall) Header() http.Header { +func (c *InstancesSetMachineResourcesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectAttachmentsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetMachineResourcesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetmachineresourcesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMachineResources") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "interconnectAttachment": c.interconnectAttachment, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnectAttachments.delete" call. +// Do executes the "compute.instances.setMachineResources" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InterconnectAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSetMachineResourcesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -80263,19 +83114,19 @@ func (c *InterconnectAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*O } return ret, nil // { - // "description": "Deletes the specified interconnect attachment.", - // "httpMethod": "DELETE", - // "id": "compute.interconnectAttachments.delete", + // "description": "Changes the number and/or type of accelerator for a stopped instance to the values specified in the request.", + // "httpMethod": "POST", + // "id": "compute.instances.setMachineResources", // "parameterOrder": [ // "project", - // "region", - // "interconnectAttachment" + // "zone", + // "instance" // ], // "parameters": { - // "interconnectAttachment": { - // "description": "Name of the interconnect attachment to delete.", + // "instance": { + // "description": "Name of the instance scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -80286,20 +83137,23 @@ func (c *InterconnectAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*O // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + // "path": "{project}/zones/{zone}/instances/{instance}/setMachineResources", + // "request": { + // "$ref": "InstancesSetMachineResourcesRequest" + // }, // "response": { // "$ref": "Operation" // }, @@ -80311,98 +83165,111 @@ func (c *InterconnectAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*O } -// method id "compute.interconnectAttachments.get": +// method id "compute.instances.setMachineType": -type InterconnectAttachmentsGetCall struct { - s *Service - project string - region string - interconnectAttachment string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstancesSetMachineTypeCall struct { + s *Service + project string + zone string + instance string + instancessetmachinetyperequest *InstancesSetMachineTypeRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified interconnect attachment. -func (r *InterconnectAttachmentsService) Get(project string, region string, interconnectAttachment string) *InterconnectAttachmentsGetCall { - c := &InterconnectAttachmentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetMachineType: Changes the machine type for a stopped instance to +// the machine type specified in the request. +func (r *InstancesService) SetMachineType(project string, zone string, instance string, instancessetmachinetyperequest *InstancesSetMachineTypeRequest) *InstancesSetMachineTypeCall { + c := &InstancesSetMachineTypeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.interconnectAttachment = interconnectAttachment + c.zone = zone + c.instance = instance + c.instancessetmachinetyperequest = instancessetmachinetyperequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesSetMachineTypeCall) RequestId(requestId string) *InstancesSetMachineTypeCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectAttachmentsGetCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsGetCall { +func (c *InstancesSetMachineTypeCall) Fields(s ...googleapi.Field) *InstancesSetMachineTypeCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InterconnectAttachmentsGetCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectAttachmentsGetCall) Context(ctx context.Context) *InterconnectAttachmentsGetCall { +func (c *InstancesSetMachineTypeCall) Context(ctx context.Context) *InstancesSetMachineTypeCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectAttachmentsGetCall) Header() http.Header { +func (c *InstancesSetMachineTypeCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectAttachmentsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetMachineTypeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetmachinetyperequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMachineType") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "interconnectAttachment": c.interconnectAttachment, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnectAttachments.get" call. -// Exactly one of *InterconnectAttachment or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InterconnectAttachment.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InterconnectAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachment, error) { +// Do executes the "compute.instances.setMachineType" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSetMachineTypeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -80421,7 +83288,7 @@ func (c *InterconnectAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*Inte if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InterconnectAttachment{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -80433,19 +83300,19 @@ func (c *InterconnectAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*Inte } return ret, nil // { - // "description": "Returns the specified interconnect attachment.", - // "httpMethod": "GET", - // "id": "compute.interconnectAttachments.get", + // "description": "Changes the machine type for a stopped instance to the machine type specified in the request.", + // "httpMethod": "POST", + // "id": "compute.instances.setMachineType", // "parameterOrder": [ // "project", - // "region", - // "interconnectAttachment" + // "zone", + // "instance" // ], // "parameters": { - // "interconnectAttachment": { - // "description": "Name of the interconnect attachment to return.", + // "instance": { + // "description": "Name of the instance scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -80456,120 +83323,140 @@ func (c *InterconnectAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*Inte // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + // "path": "{project}/zones/{zone}/instances/{instance}/setMachineType", + // "request": { + // "$ref": "InstancesSetMachineTypeRequest" + // }, // "response": { - // "$ref": "InterconnectAttachment" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.interconnectAttachments.getIamPolicy": +// method id "compute.instances.setMetadata": -type InterconnectAttachmentsGetIamPolicyCall struct { - s *Service - project string - region string - resource string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstancesSetMetadataCall struct { + s *Service + project string + zone string + instance string + metadata *Metadata + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. -func (r *InterconnectAttachmentsService) GetIamPolicy(project string, region string, resource string) *InterconnectAttachmentsGetIamPolicyCall { - c := &InterconnectAttachmentsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetMetadata: Sets metadata for the specified instance to the data +// included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setMetadata +func (r *InstancesService) SetMetadata(project string, zone string, instance string, metadata *Metadata) *InstancesSetMetadataCall { + c := &InstancesSetMetadataCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.resource = resource + c.zone = zone + c.instance = instance + c.metadata = metadata + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesSetMetadataCall) RequestId(requestId string) *InstancesSetMetadataCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectAttachmentsGetIamPolicyCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsGetIamPolicyCall { +func (c *InstancesSetMetadataCall) Fields(s ...googleapi.Field) *InstancesSetMetadataCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InterconnectAttachmentsGetIamPolicyCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsGetIamPolicyCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectAttachmentsGetIamPolicyCall) Context(ctx context.Context) *InterconnectAttachmentsGetIamPolicyCall { +func (c *InstancesSetMetadataCall) Context(ctx context.Context) *InstancesSetMetadataCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectAttachmentsGetIamPolicyCall) Header() http.Header { +func (c *InstancesSetMetadataCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectAttachmentsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetMetadataCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.metadata) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{resource}/getIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMetadata") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, - "resource": c.resource, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnectAttachments.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *InterconnectAttachmentsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.instances.setMetadata" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSetMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -80588,7 +83475,7 @@ func (c *InterconnectAttachmentsGetIamPolicyCall) Do(opts ...googleapi.CallOptio if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -80600,15 +83487,22 @@ func (c *InterconnectAttachmentsGetIamPolicyCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", - // "httpMethod": "GET", - // "id": "compute.interconnectAttachments.getIamPolicy", + // "description": "Sets metadata for the specified instance to the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.instances.setMetadata", // "parameterOrder": [ // "project", - // "region", - // "resource" + // "zone", + // "instance" // ], // "parameters": { + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -80616,53 +83510,57 @@ func (c *InterconnectAttachmentsGetIamPolicyCall) Do(opts ...googleapi.CallOptio // "required": true, // "type": "string" // }, - // "region": { - // "description": "The name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/interconnectAttachments/{resource}/getIamPolicy", + // "path": "{project}/zones/{zone}/instances/{instance}/setMetadata", + // "request": { + // "$ref": "Metadata" + // }, // "response": { - // "$ref": "Policy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.interconnectAttachments.insert": +// method id "compute.instances.setMinCpuPlatform": -type InterconnectAttachmentsInsertCall struct { - s *Service - project string - region string - interconnectattachment *InterconnectAttachment - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSetMinCpuPlatformCall struct { + s *Service + project string + zone string + instance string + instancessetmincpuplatformrequest *InstancesSetMinCpuPlatformRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates an InterconnectAttachment in the specified project -// using the data included in the request. -func (r *InterconnectAttachmentsService) Insert(project string, region string, interconnectattachment *InterconnectAttachment) *InterconnectAttachmentsInsertCall { - c := &InterconnectAttachmentsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetMinCpuPlatform: Changes the minimum CPU platform that this +// instance should use. This method can only be called on a stopped +// instance. For more information, read Specifying a Minimum CPU +// Platform. +func (r *InstancesService) SetMinCpuPlatform(project string, zone string, instance string, instancessetmincpuplatformrequest *InstancesSetMinCpuPlatformRequest) *InstancesSetMinCpuPlatformCall { + c := &InstancesSetMinCpuPlatformCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.interconnectattachment = interconnectattachment + c.zone = zone + c.instance = instance + c.instancessetmincpuplatformrequest = instancessetmincpuplatformrequest return c } @@ -80680,7 +83578,7 @@ func (r *InterconnectAttachmentsService) Insert(project string, region string, i // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InterconnectAttachmentsInsertCall) RequestId(requestId string) *InterconnectAttachmentsInsertCall { +func (c *InstancesSetMinCpuPlatformCall) RequestId(requestId string) *InstancesSetMinCpuPlatformCall { c.urlParams_.Set("requestId", requestId) return c } @@ -80688,7 +83586,7 @@ func (c *InterconnectAttachmentsInsertCall) RequestId(requestId string) *Interco // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectAttachmentsInsertCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsInsertCall { +func (c *InstancesSetMinCpuPlatformCall) Fields(s ...googleapi.Field) *InstancesSetMinCpuPlatformCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -80696,35 +83594,35 @@ func (c *InterconnectAttachmentsInsertCall) Fields(s ...googleapi.Field) *Interc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectAttachmentsInsertCall) Context(ctx context.Context) *InterconnectAttachmentsInsertCall { +func (c *InstancesSetMinCpuPlatformCall) Context(ctx context.Context) *InstancesSetMinCpuPlatformCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectAttachmentsInsertCall) Header() http.Header { +func (c *InstancesSetMinCpuPlatformCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectAttachmentsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetMinCpuPlatformCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnectattachment) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetmincpuplatformrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMinCpuPlatform") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -80732,20 +83630,21 @@ func (c *InterconnectAttachmentsInsertCall) doRequest(alt string) (*http.Respons } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnectAttachments.insert" call. +// Do executes the "compute.instances.setMinCpuPlatform" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InterconnectAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSetMinCpuPlatformCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -80776,25 +83675,26 @@ func (c *InterconnectAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*O } return ret, nil // { - // "description": "Creates an InterconnectAttachment in the specified project using the data included in the request.", + // "description": "Changes the minimum CPU platform that this instance should use. This method can only be called on a stopped instance. For more information, read Specifying a Minimum CPU Platform.", // "httpMethod": "POST", - // "id": "compute.interconnectAttachments.insert", + // "id": "compute.instances.setMinCpuPlatform", // "parameterOrder": [ // "project", - // "region" + // "zone", + // "instance" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "instance": { + // "description": "Name of the instance scoping this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, @@ -80802,11 +83702,18 @@ func (c *InterconnectAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*O // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/interconnectAttachments", + // "path": "{project}/zones/{zone}/instances/{instance}/setMinCpuPlatform", // "request": { - // "$ref": "InterconnectAttachment" + // "$ref": "InstancesSetMinCpuPlatformRequest" // }, // "response": { // "$ref": "Operation" @@ -80819,159 +83726,111 @@ func (c *InterconnectAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*O } -// method id "compute.interconnectAttachments.list": +// method id "compute.instances.setScheduling": -type InterconnectAttachmentsListCall struct { - s *Service - project string - region string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstancesSetSchedulingCall struct { + s *Service + project string + zone string + instance string + scheduling *Scheduling + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves the list of interconnect attachments contained within -// the specified region. -func (r *InterconnectAttachmentsService) List(project string, region string) *InterconnectAttachmentsListCall { - c := &InterconnectAttachmentsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetScheduling: Sets an instance's scheduling options. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setScheduling +func (r *InstancesService) SetScheduling(project string, zone string, instance string, scheduling *Scheduling) *InstancesSetSchedulingCall { + c := &InstancesSetSchedulingCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *InterconnectAttachmentsListCall) Filter(filter string) *InterconnectAttachmentsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InterconnectAttachmentsListCall) MaxResults(maxResults int64) *InterconnectAttachmentsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + c.zone = zone + c.instance = instance + c.scheduling = scheduling return c } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *InterconnectAttachmentsListCall) OrderBy(orderBy string) *InterconnectAttachmentsListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InterconnectAttachmentsListCall) PageToken(pageToken string) *InterconnectAttachmentsListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesSetSchedulingCall) RequestId(requestId string) *InstancesSetSchedulingCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectAttachmentsListCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsListCall { +func (c *InstancesSetSchedulingCall) Fields(s ...googleapi.Field) *InstancesSetSchedulingCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InterconnectAttachmentsListCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectAttachmentsListCall) Context(ctx context.Context) *InterconnectAttachmentsListCall { +func (c *InstancesSetSchedulingCall) Context(ctx context.Context) *InstancesSetSchedulingCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectAttachmentsListCall) Header() http.Header { +func (c *InstancesSetSchedulingCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectAttachmentsListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetSchedulingCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.scheduling) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setScheduling") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnectAttachments.list" call. -// Exactly one of *InterconnectAttachmentList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *InterconnectAttachmentList.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachmentList, error) { +// Do executes the "compute.instances.setScheduling" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -80990,7 +83849,7 @@ func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*Int if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InterconnectAttachmentList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -81002,35 +83861,20 @@ func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*Int } return ret, nil // { - // "description": "Retrieves the list of interconnect attachments contained within the specified region.", - // "httpMethod": "GET", - // "id": "compute.interconnectAttachments.list", + // "description": "Sets an instance's scheduling options.", + // "httpMethod": "POST", + // "id": "compute.instances.setScheduling", // "parameterOrder": [ // "project", - // "region" + // "zone", + // "instance" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "instance": { + // "description": "Instance name for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "project": { @@ -81040,70 +83884,56 @@ func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*Int // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/interconnectAttachments", + // "path": "{project}/zones/{zone}/instances/{instance}/setScheduling", + // "request": { + // "$ref": "Scheduling" + // }, // "response": { - // "$ref": "InterconnectAttachmentList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InterconnectAttachmentsListCall) Pages(ctx context.Context, f func(*InterconnectAttachmentList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.interconnectAttachments.patch": +// method id "compute.instances.setServiceAccount": -type InterconnectAttachmentsPatchCall struct { - s *Service - project string - region string - interconnectAttachment string - interconnectattachment *InterconnectAttachment - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSetServiceAccountCall struct { + s *Service + project string + zone string + instance string + instancessetserviceaccountrequest *InstancesSetServiceAccountRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates the specified interconnect attachment with the data -// included in the request. This method supports PATCH semantics and -// uses the JSON merge patch format and processing rules. -func (r *InterconnectAttachmentsService) Patch(project string, region string, interconnectAttachment string, interconnectattachment *InterconnectAttachment) *InterconnectAttachmentsPatchCall { - c := &InterconnectAttachmentsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetServiceAccount: Sets the service account on the instance. For more +// information, read Changing the service account and access scopes for +// an instance. +func (r *InstancesService) SetServiceAccount(project string, zone string, instance string, instancessetserviceaccountrequest *InstancesSetServiceAccountRequest) *InstancesSetServiceAccountCall { + c := &InstancesSetServiceAccountCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.interconnectAttachment = interconnectAttachment - c.interconnectattachment = interconnectattachment + c.zone = zone + c.instance = instance + c.instancessetserviceaccountrequest = instancessetserviceaccountrequest return c } @@ -81121,7 +83951,7 @@ func (r *InterconnectAttachmentsService) Patch(project string, region string, in // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InterconnectAttachmentsPatchCall) RequestId(requestId string) *InterconnectAttachmentsPatchCall { +func (c *InstancesSetServiceAccountCall) RequestId(requestId string) *InstancesSetServiceAccountCall { c.urlParams_.Set("requestId", requestId) return c } @@ -81129,7 +83959,7 @@ func (c *InterconnectAttachmentsPatchCall) RequestId(requestId string) *Intercon // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectAttachmentsPatchCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsPatchCall { +func (c *InstancesSetServiceAccountCall) Fields(s ...googleapi.Field) *InstancesSetServiceAccountCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -81137,57 +83967,57 @@ func (c *InterconnectAttachmentsPatchCall) Fields(s ...googleapi.Field) *Interco // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectAttachmentsPatchCall) Context(ctx context.Context) *InterconnectAttachmentsPatchCall { +func (c *InstancesSetServiceAccountCall) Context(ctx context.Context) *InstancesSetServiceAccountCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectAttachmentsPatchCall) Header() http.Header { +func (c *InstancesSetServiceAccountCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectAttachmentsPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetServiceAccountCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnectattachment) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetserviceaccountrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setServiceAccount") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "interconnectAttachment": c.interconnectAttachment, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnectAttachments.patch" call. +// Do executes the "compute.instances.setServiceAccount" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InterconnectAttachmentsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -81218,19 +84048,19 @@ func (c *InterconnectAttachmentsPatchCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Updates the specified interconnect attachment with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.interconnectAttachments.patch", + // "description": "Sets the service account on the instance. For more information, read Changing the service account and access scopes for an instance.", + // "httpMethod": "POST", + // "id": "compute.instances.setServiceAccount", // "parameterOrder": [ // "project", - // "region", - // "interconnectAttachment" + // "zone", + // "instance" // ], // "parameters": { - // "interconnectAttachment": { - // "description": "Name of the interconnect attachment to patch.", + // "instance": { + // "description": "Name of the instance resource to start.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -81241,22 +84071,22 @@ func (c *InterconnectAttachmentsPatchCall) Do(opts ...googleapi.CallOption) (*Op // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + // "path": "{project}/zones/{zone}/instances/{instance}/setServiceAccount", // "request": { - // "$ref": "InterconnectAttachment" + // "$ref": "InstancesSetServiceAccountRequest" // }, // "response": { // "$ref": "Operation" @@ -81269,34 +84099,55 @@ func (c *InterconnectAttachmentsPatchCall) Do(opts ...googleapi.CallOption) (*Op } -// method id "compute.interconnectAttachments.setIamPolicy": +// method id "compute.instances.setShieldedInstanceIntegrityPolicy": -type InterconnectAttachmentsSetIamPolicyCall struct { - s *Service - project string - region string - resource string - regionsetpolicyrequest *RegionSetPolicyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSetShieldedInstanceIntegrityPolicyCall struct { + s *Service + project string + zone string + instance string + shieldedinstanceintegritypolicy *ShieldedInstanceIntegrityPolicy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. -func (r *InterconnectAttachmentsService) SetIamPolicy(project string, region string, resource string, regionsetpolicyrequest *RegionSetPolicyRequest) *InterconnectAttachmentsSetIamPolicyCall { - c := &InterconnectAttachmentsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetShieldedInstanceIntegrityPolicy: Sets the Shielded Instance +// integrity policy for an instance. You can only use this method on a +// running instance. This method supports PATCH semantics and uses the +// JSON merge patch format and processing rules. +func (r *InstancesService) SetShieldedInstanceIntegrityPolicy(project string, zone string, instance string, shieldedinstanceintegritypolicy *ShieldedInstanceIntegrityPolicy) *InstancesSetShieldedInstanceIntegrityPolicyCall { + c := &InstancesSetShieldedInstanceIntegrityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.resource = resource - c.regionsetpolicyrequest = regionsetpolicyrequest + c.zone = zone + c.instance = instance + c.shieldedinstanceintegritypolicy = shieldedinstanceintegritypolicy + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) RequestId(requestId string) *InstancesSetShieldedInstanceIntegrityPolicyCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectAttachmentsSetIamPolicyCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsSetIamPolicyCall { +func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Fields(s ...googleapi.Field) *InstancesSetShieldedInstanceIntegrityPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -81304,57 +84155,57 @@ func (c *InterconnectAttachmentsSetIamPolicyCall) Fields(s ...googleapi.Field) * // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectAttachmentsSetIamPolicyCall) Context(ctx context.Context) *InterconnectAttachmentsSetIamPolicyCall { +func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Context(ctx context.Context) *InstancesSetShieldedInstanceIntegrityPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectAttachmentsSetIamPolicyCall) Header() http.Header { +func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectAttachmentsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetpolicyrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.shieldedinstanceintegritypolicy) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{resource}/setIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, - "resource": c.resource, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnectAttachments.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *InterconnectAttachmentsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.instances.setShieldedInstanceIntegrityPolicy" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -81373,7 +84224,7 @@ func (c *InterconnectAttachmentsSetIamPolicyCall) Do(opts ...googleapi.CallOptio if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -81385,15 +84236,22 @@ func (c *InterconnectAttachmentsSetIamPolicyCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", - // "httpMethod": "POST", - // "id": "compute.interconnectAttachments.setIamPolicy", + // "description": "Sets the Shielded Instance integrity policy for an instance. You can only use this method on a running instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.instances.setShieldedInstanceIntegrityPolicy", // "parameterOrder": [ // "project", - // "region", - // "resource" + // "zone", + // "instance" // ], // "parameters": { + // "instance": { + // "description": "Name or id of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -81401,27 +84259,25 @@ func (c *InterconnectAttachmentsSetIamPolicyCall) Do(opts ...googleapi.CallOptio // "required": true, // "type": "string" // }, - // "region": { - // "description": "The name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/interconnectAttachments/{resource}/setIamPolicy", + // "path": "{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy", // "request": { - // "$ref": "RegionSetPolicyRequest" + // "$ref": "ShieldedInstanceIntegrityPolicy" // }, // "response": { - // "$ref": "Policy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -81431,27 +84287,29 @@ func (c *InterconnectAttachmentsSetIamPolicyCall) Do(opts ...googleapi.CallOptio } -// method id "compute.interconnectAttachments.setLabels": +// method id "compute.instances.setShieldedVmIntegrityPolicy": -type InterconnectAttachmentsSetLabelsCall struct { - s *Service - project string - region string - resource string - regionsetlabelsrequest *RegionSetLabelsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSetShieldedVmIntegrityPolicyCall struct { + s *Service + project string + zone string + instance string + shieldedvmintegritypolicy *ShieldedVmIntegrityPolicy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetLabels: Sets the labels on an InterconnectAttachment. To learn -// more about labels, read the Labeling Resources documentation. -func (r *InterconnectAttachmentsService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *InterconnectAttachmentsSetLabelsCall { - c := &InterconnectAttachmentsSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetShieldedVmIntegrityPolicy: Sets the Shielded VM integrity policy +// for a VM instance. You can only use this method on a running VM +// instance. This method supports PATCH semantics and uses the JSON +// merge patch format and processing rules. +func (r *InstancesService) SetShieldedVmIntegrityPolicy(project string, zone string, instance string, shieldedvmintegritypolicy *ShieldedVmIntegrityPolicy) *InstancesSetShieldedVmIntegrityPolicyCall { + c := &InstancesSetShieldedVmIntegrityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.resource = resource - c.regionsetlabelsrequest = regionsetlabelsrequest + c.zone = zone + c.instance = instance + c.shieldedvmintegritypolicy = shieldedvmintegritypolicy return c } @@ -81469,7 +84327,7 @@ func (r *InterconnectAttachmentsService) SetLabels(project string, region string // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InterconnectAttachmentsSetLabelsCall) RequestId(requestId string) *InterconnectAttachmentsSetLabelsCall { +func (c *InstancesSetShieldedVmIntegrityPolicyCall) RequestId(requestId string) *InstancesSetShieldedVmIntegrityPolicyCall { c.urlParams_.Set("requestId", requestId) return c } @@ -81477,7 +84335,7 @@ func (c *InterconnectAttachmentsSetLabelsCall) RequestId(requestId string) *Inte // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectAttachmentsSetLabelsCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsSetLabelsCall { +func (c *InstancesSetShieldedVmIntegrityPolicyCall) Fields(s ...googleapi.Field) *InstancesSetShieldedVmIntegrityPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -81485,57 +84343,57 @@ func (c *InterconnectAttachmentsSetLabelsCall) Fields(s ...googleapi.Field) *Int // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectAttachmentsSetLabelsCall) Context(ctx context.Context) *InterconnectAttachmentsSetLabelsCall { +func (c *InstancesSetShieldedVmIntegrityPolicyCall) Context(ctx context.Context) *InstancesSetShieldedVmIntegrityPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectAttachmentsSetLabelsCall) Header() http.Header { +func (c *InstancesSetShieldedVmIntegrityPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectAttachmentsSetLabelsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetShieldedVmIntegrityPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.shieldedvmintegritypolicy) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{resource}/setLabels") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setShieldedVmIntegrityPolicy") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, - "resource": c.resource, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnectAttachments.setLabels" call. +// Do executes the "compute.instances.setShieldedVmIntegrityPolicy" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InterconnectAttachmentsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSetShieldedVmIntegrityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -81566,26 +84424,26 @@ func (c *InterconnectAttachmentsSetLabelsCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Sets the labels on an InterconnectAttachment. To learn more about labels, read the Labeling Resources documentation.", - // "httpMethod": "POST", - // "id": "compute.interconnectAttachments.setLabels", + // "description": "Sets the Shielded VM integrity policy for a VM instance. You can only use this method on a running VM instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.instances.setShieldedVmIntegrityPolicy", // "parameterOrder": [ // "project", - // "region", - // "resource" + // "zone", + // "instance" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "instance": { + // "description": "Name of the instance scoping this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "region": { - // "description": "The region for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, @@ -81594,17 +84452,17 @@ func (c *InterconnectAttachmentsSetLabelsCall) Do(opts ...googleapi.CallOption) // "location": "query", // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/interconnectAttachments/{resource}/setLabels", + // "path": "{project}/zones/{zone}/instances/{instance}/setShieldedVmIntegrityPolicy", // "request": { - // "$ref": "RegionSetLabelsRequest" + // "$ref": "ShieldedVmIntegrityPolicy" // }, // "response": { // "$ref": "Operation" @@ -81617,34 +84475,54 @@ func (c *InterconnectAttachmentsSetLabelsCall) Do(opts ...googleapi.CallOption) } -// method id "compute.interconnectAttachments.testIamPermissions": +// method id "compute.instances.setTags": -type InterconnectAttachmentsTestIamPermissionsCall struct { - s *Service - project string - region string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSetTagsCall struct { + s *Service + project string + zone string + instance string + tags *Tags + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *InterconnectAttachmentsService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *InterconnectAttachmentsTestIamPermissionsCall { - c := &InterconnectAttachmentsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetTags: Sets network tags for the specified instance to the data +// included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setTags +func (r *InstancesService) SetTags(project string, zone string, instance string, tags *Tags) *InstancesSetTagsCall { + c := &InstancesSetTagsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.zone = zone + c.instance = instance + c.tags = tags + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesSetTagsCall) RequestId(requestId string) *InstancesSetTagsCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectAttachmentsTestIamPermissionsCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsTestIamPermissionsCall { +func (c *InstancesSetTagsCall) Fields(s ...googleapi.Field) *InstancesSetTagsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -81652,35 +84530,35 @@ func (c *InterconnectAttachmentsTestIamPermissionsCall) Fields(s ...googleapi.Fi // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectAttachmentsTestIamPermissionsCall) Context(ctx context.Context) *InterconnectAttachmentsTestIamPermissionsCall { +func (c *InstancesSetTagsCall) Context(ctx context.Context) *InstancesSetTagsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectAttachmentsTestIamPermissionsCall) Header() http.Header { +func (c *InstancesSetTagsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectAttachmentsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetTagsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.tags) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setTags") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -81689,20 +84567,20 @@ func (c *InterconnectAttachmentsTestIamPermissionsCall) doRequest(alt string) (* req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, - "resource": c.resource, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnectAttachments.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InterconnectAttachmentsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.instances.setTags" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -81721,7 +84599,7 @@ func (c *InterconnectAttachmentsTestIamPermissionsCall) Do(opts ...googleapi.Cal if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -81733,15 +84611,22 @@ func (c *InterconnectAttachmentsTestIamPermissionsCall) Do(opts ...googleapi.Cal } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", + // "description": "Sets network tags for the specified instance to the data included in the request.", // "httpMethod": "POST", - // "id": "compute.interconnectAttachments.testIamPermissions", + // "id": "compute.instances.setTags", // "parameterOrder": [ // "project", - // "region", - // "resource" + // "zone", + // "instance" // ], // "parameters": { + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -81749,128 +84634,113 @@ func (c *InterconnectAttachmentsTestIamPermissionsCall) Do(opts ...googleapi.Cal // "required": true, // "type": "string" // }, - // "region": { - // "description": "The name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/interconnectAttachments/{resource}/testIamPermissions", + // "path": "{project}/zones/{zone}/instances/{instance}/setTags", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "Tags" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.interconnectLocations.get": +// method id "compute.instances.simulateMaintenanceEvent": -type InterconnectLocationsGetCall struct { - s *Service - project string - interconnectLocation string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstancesSimulateMaintenanceEventCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the details for the specified interconnect location. -// Gets a list of available interconnect locations by making a list() -// request. -func (r *InterconnectLocationsService) Get(project string, interconnectLocation string) *InterconnectLocationsGetCall { - c := &InterconnectLocationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SimulateMaintenanceEvent: Simulates a maintenance event on the +// instance. +func (r *InstancesService) SimulateMaintenanceEvent(project string, zone string, instance string) *InstancesSimulateMaintenanceEventCall { + c := &InstancesSimulateMaintenanceEventCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.interconnectLocation = interconnectLocation + c.zone = zone + c.instance = instance return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectLocationsGetCall) Fields(s ...googleapi.Field) *InterconnectLocationsGetCall { +func (c *InstancesSimulateMaintenanceEventCall) Fields(s ...googleapi.Field) *InstancesSimulateMaintenanceEventCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InterconnectLocationsGetCall) IfNoneMatch(entityTag string) *InterconnectLocationsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectLocationsGetCall) Context(ctx context.Context) *InterconnectLocationsGetCall { +func (c *InstancesSimulateMaintenanceEventCall) Context(ctx context.Context) *InstancesSimulateMaintenanceEventCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectLocationsGetCall) Header() http.Header { +func (c *InstancesSimulateMaintenanceEventCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectLocationsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSimulateMaintenanceEventCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnectLocations/{interconnectLocation}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "interconnectLocation": c.interconnectLocation, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnectLocations.get" call. -// Exactly one of *InterconnectLocation or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InterconnectLocation.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InterconnectLocationsGetCall) Do(opts ...googleapi.CallOption) (*InterconnectLocation, error) { +// Do executes the "compute.instances.simulateMaintenanceEvent" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -81889,7 +84759,7 @@ func (c *InterconnectLocationsGetCall) Do(opts ...googleapi.CallOption) (*Interc if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InterconnectLocation{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -81901,18 +84771,19 @@ func (c *InterconnectLocationsGetCall) Do(opts ...googleapi.CallOption) (*Interc } return ret, nil // { - // "description": "Returns the details for the specified interconnect location. Gets a list of available interconnect locations by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.interconnectLocations.get", + // "description": "Simulates a maintenance event on the instance.", + // "httpMethod": "POST", + // "id": "compute.instances.simulateMaintenanceEvent", // "parameterOrder": [ // "project", - // "interconnectLocation" + // "zone", + // "instance" // ], // "parameters": { - // "interconnectLocation": { - // "description": "Name of the interconnect location to return.", + // "instance": { + // "description": "Name of the instance scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -81922,171 +84793,126 @@ func (c *InterconnectLocationsGetCall) Do(opts ...googleapi.CallOption) (*Interc // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/interconnectLocations/{interconnectLocation}", + // "path": "{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent", // "response": { - // "$ref": "InterconnectLocation" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.interconnectLocations.list": +// method id "compute.instances.start": -type InterconnectLocationsListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstancesStartCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves the list of interconnect locations available to the -// specified project. -func (r *InterconnectLocationsService) List(project string) *InterconnectLocationsListCall { - c := &InterconnectLocationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Start: Starts an instance that was stopped using the instances().stop +// method. For more information, see Restart an instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/start +func (r *InstancesService) Start(project string, zone string, instance string) *InstancesStartCall { + c := &InstancesStartCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.zone = zone + c.instance = instance return c } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *InterconnectLocationsListCall) Filter(filter string) *InterconnectLocationsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InterconnectLocationsListCall) MaxResults(maxResults int64) *InterconnectLocationsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *InterconnectLocationsListCall) OrderBy(orderBy string) *InterconnectLocationsListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InterconnectLocationsListCall) PageToken(pageToken string) *InterconnectLocationsListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesStartCall) RequestId(requestId string) *InstancesStartCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectLocationsListCall) Fields(s ...googleapi.Field) *InterconnectLocationsListCall { +func (c *InstancesStartCall) Fields(s ...googleapi.Field) *InstancesStartCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InterconnectLocationsListCall) IfNoneMatch(entityTag string) *InterconnectLocationsListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectLocationsListCall) Context(ctx context.Context) *InterconnectLocationsListCall { +func (c *InstancesStartCall) Context(ctx context.Context) *InstancesStartCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectLocationsListCall) Header() http.Header { +func (c *InstancesStartCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectLocationsListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesStartCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnectLocations") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/start") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnectLocations.list" call. -// Exactly one of *InterconnectLocationList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *InterconnectLocationList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*InterconnectLocationList, error) { +// Do executes the "compute.instances.start" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -82105,7 +84931,7 @@ func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*Inter if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InterconnectLocationList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -82117,104 +84943,102 @@ func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*Inter } return ret, nil // { - // "description": "Retrieves the list of interconnect locations available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.interconnectLocations.list", + // "description": "Starts an instance that was stopped using the instances().stop method. For more information, see Restart an instance.", + // "httpMethod": "POST", + // "id": "compute.instances.start", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "instance" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", + // "instance": { + // "description": "Name of the instance resource to start.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/interconnectLocations", + // "path": "{project}/zones/{zone}/instances/{instance}/start", // "response": { - // "$ref": "InterconnectLocationList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InterconnectLocationsListCall) Pages(ctx context.Context, f func(*InterconnectLocationList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.interconnectLocations.testIamPermissions": +// method id "compute.instances.startWithEncryptionKey": -type InterconnectLocationsTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesStartWithEncryptionKeyCall struct { + s *Service + project string + zone string + instance string + instancesstartwithencryptionkeyrequest *InstancesStartWithEncryptionKeyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *InterconnectLocationsService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *InterconnectLocationsTestIamPermissionsCall { - c := &InterconnectLocationsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// StartWithEncryptionKey: Starts an instance that was stopped using the +// instances().stop method. For more information, see Restart an +// instance. +func (r *InstancesService) StartWithEncryptionKey(project string, zone string, instance string, instancesstartwithencryptionkeyrequest *InstancesStartWithEncryptionKeyRequest) *InstancesStartWithEncryptionKeyCall { + c := &InstancesStartWithEncryptionKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.zone = zone + c.instance = instance + c.instancesstartwithencryptionkeyrequest = instancesstartwithencryptionkeyrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesStartWithEncryptionKeyCall) RequestId(requestId string) *InstancesStartWithEncryptionKeyCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectLocationsTestIamPermissionsCall) Fields(s ...googleapi.Field) *InterconnectLocationsTestIamPermissionsCall { +func (c *InstancesStartWithEncryptionKeyCall) Fields(s ...googleapi.Field) *InstancesStartWithEncryptionKeyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -82222,35 +85046,35 @@ func (c *InterconnectLocationsTestIamPermissionsCall) Fields(s ...googleapi.Fiel // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectLocationsTestIamPermissionsCall) Context(ctx context.Context) *InterconnectLocationsTestIamPermissionsCall { +func (c *InstancesStartWithEncryptionKeyCall) Context(ctx context.Context) *InstancesStartWithEncryptionKeyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectLocationsTestIamPermissionsCall) Header() http.Header { +func (c *InstancesStartWithEncryptionKeyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectLocationsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesStartWithEncryptionKeyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesstartwithencryptionkeyrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnectLocations/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -82259,19 +85083,20 @@ func (c *InterconnectLocationsTestIamPermissionsCall) doRequest(alt string) (*ht req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "resource": c.resource, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnectLocations.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InterconnectLocationsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.instances.startWithEncryptionKey" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -82290,7 +85115,7 @@ func (c *InterconnectLocationsTestIamPermissionsCall) Do(opts ...googleapi.CallO if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -82302,14 +85127,22 @@ func (c *InterconnectLocationsTestIamPermissionsCall) Do(opts ...googleapi.CallO } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", + // "description": "Starts an instance that was stopped using the instances().stop method. For more information, see Restart an instance.", // "httpMethod": "POST", - // "id": "compute.interconnectLocations.testIamPermissions", + // "id": "compute.instances.startWithEncryptionKey", // "parameterOrder": [ // "project", - // "resource" + // "zone", + // "instance" // ], // "parameters": { + // "instance": { + // "description": "Name of the instance resource to start.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -82317,46 +85150,66 @@ func (c *InterconnectLocationsTestIamPermissionsCall) Do(opts ...googleapi.CallO // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/interconnectLocations/{resource}/testIamPermissions", + // "path": "{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "InstancesStartWithEncryptionKeyRequest" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.interconnects.delete": +// method id "compute.instances.stop": -type InterconnectsDeleteCall struct { - s *Service - project string - interconnect string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesStopCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified interconnect. -func (r *InterconnectsService) Delete(project string, interconnect string) *InterconnectsDeleteCall { - c := &InterconnectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Stop: Stops a running instance, shutting it down cleanly, and allows +// you to restart the instance at a later time. Stopped instances do not +// incur VM usage charges while they are stopped. However, resources +// that the VM is using, such as persistent disks and static IP +// addresses, will continue to be charged until they are deleted. For +// more information, see Stopping an instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/stop +func (r *InstancesService) Stop(project string, zone string, instance string) *InstancesStopCall { + c := &InstancesStopCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.interconnect = interconnect + c.zone = zone + c.instance = instance + return c +} + +// DiscardLocalSsd sets the optional parameter "discardLocalSsd": If +// true, discard the contents of any attached localSSD partitions. +// Default value is false (== preserve localSSD data). +func (c *InstancesStopCall) DiscardLocalSsd(discardLocalSsd bool) *InstancesStopCall { + c.urlParams_.Set("discardLocalSsd", fmt.Sprint(discardLocalSsd)) return c } @@ -82374,7 +85227,7 @@ func (r *InterconnectsService) Delete(project string, interconnect string) *Inte // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InterconnectsDeleteCall) RequestId(requestId string) *InterconnectsDeleteCall { +func (c *InstancesStopCall) RequestId(requestId string) *InstancesStopCall { c.urlParams_.Set("requestId", requestId) return c } @@ -82382,7 +85235,7 @@ func (c *InterconnectsDeleteCall) RequestId(requestId string) *InterconnectsDele // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectsDeleteCall) Fields(s ...googleapi.Field) *InterconnectsDeleteCall { +func (c *InstancesStopCall) Fields(s ...googleapi.Field) *InstancesStopCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -82390,21 +85243,21 @@ func (c *InterconnectsDeleteCall) Fields(s ...googleapi.Field) *InterconnectsDel // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectsDeleteCall) Context(ctx context.Context) *InterconnectsDeleteCall { +func (c *InstancesStopCall) Context(ctx context.Context) *InstancesStopCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectsDeleteCall) Header() http.Header { +func (c *InstancesStopCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesStopCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -82413,28 +85266,29 @@ func (c *InterconnectsDeleteCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/stop") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "interconnect": c.interconnect, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnects.delete" call. +// Do executes the "compute.instances.stop" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InterconnectsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -82465,18 +85319,24 @@ func (c *InterconnectsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Deletes the specified interconnect.", - // "httpMethod": "DELETE", - // "id": "compute.interconnects.delete", + // "description": "Stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time. Stopped instances do not incur VM usage charges while they are stopped. However, resources that the VM is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted. For more information, see Stopping an instance.", + // "httpMethod": "POST", + // "id": "compute.instances.stop", // "parameterOrder": [ // "project", - // "interconnect" + // "zone", + // "instance" // ], // "parameters": { - // "interconnect": { - // "description": "Name of the interconnect to delete.", + // "discardLocalSsd": { + // "description": "If true, discard the contents of any attached localSSD partitions. Default value is false (== preserve localSSD data).", + // "location": "query", + // "type": "boolean" + // }, + // "instance": { + // "description": "Name of the instance resource to stop.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -82491,9 +85351,16 @@ func (c *InterconnectsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/interconnects/{interconnect}", + // "path": "{project}/zones/{zone}/instances/{instance}/stop", // "response": { // "$ref": "Operation" // }, @@ -82505,96 +85372,116 @@ func (c *InterconnectsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.interconnects.get": +// method id "compute.instances.suspend": -type InterconnectsGetCall struct { - s *Service - project string - interconnect string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstancesSuspendCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified interconnect. Get a list of available -// interconnects by making a list() request. -func (r *InterconnectsService) Get(project string, interconnect string) *InterconnectsGetCall { - c := &InterconnectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Suspend: This method suspends a running instance, saving its state to +// persistent storage, and allows you to resume the instance at a later +// time. Suspended instances incur reduced per-minute, virtual machine +// usage charges while they are suspended. Any resources the virtual +// machine is using, such as persistent disks and static IP addresses, +// will continue to be charged until they are deleted. +func (r *InstancesService) Suspend(project string, zone string, instance string) *InstancesSuspendCall { + c := &InstancesSuspendCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.interconnect = interconnect + c.zone = zone + c.instance = instance + return c +} + +// DiscardLocalSsd sets the optional parameter "discardLocalSsd": If +// true, discard the contents of any attached localSSD partitions. +// Default value is false (== preserve localSSD data). +func (c *InstancesSuspendCall) DiscardLocalSsd(discardLocalSsd bool) *InstancesSuspendCall { + c.urlParams_.Set("discardLocalSsd", fmt.Sprint(discardLocalSsd)) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesSuspendCall) RequestId(requestId string) *InstancesSuspendCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectsGetCall) Fields(s ...googleapi.Field) *InterconnectsGetCall { +func (c *InstancesSuspendCall) Fields(s ...googleapi.Field) *InstancesSuspendCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InterconnectsGetCall) IfNoneMatch(entityTag string) *InterconnectsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectsGetCall) Context(ctx context.Context) *InterconnectsGetCall { +func (c *InstancesSuspendCall) Context(ctx context.Context) *InstancesSuspendCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectsGetCall) Header() http.Header { +func (c *InstancesSuspendCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSuspendCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/suspend") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "interconnect": c.interconnect, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnects.get" call. -// Exactly one of *Interconnect or error will be non-nil. Any non-2xx +// Do executes the "compute.instances.suspend" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Interconnect.ServerResponse.Header or (if a response was returned at +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InterconnectsGetCall) Do(opts ...googleapi.CallOption) (*Interconnect, error) { +func (c *InstancesSuspendCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -82613,7 +85500,7 @@ func (c *InterconnectsGetCall) Do(opts ...googleapi.CallOption) (*Interconnect, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Interconnect{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -82625,18 +85512,24 @@ func (c *InterconnectsGetCall) Do(opts ...googleapi.CallOption) (*Interconnect, } return ret, nil // { - // "description": "Returns the specified interconnect. Get a list of available interconnects by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.interconnects.get", + // "description": "This method suspends a running instance, saving its state to persistent storage, and allows you to resume the instance at a later time. Suspended instances incur reduced per-minute, virtual machine usage charges while they are suspended. Any resources the virtual machine is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted.", + // "httpMethod": "POST", + // "id": "compute.instances.suspend", // "parameterOrder": [ // "project", - // "interconnect" + // "zone", + // "instance" // ], // "parameters": { - // "interconnect": { - // "description": "Name of the interconnect to return.", + // "discardLocalSsd": { + // "description": "If true, discard the contents of any attached localSSD partitions. Default value is false (== preserve localSSD data).", + // "location": "query", + // "type": "boolean" + // }, + // "instance": { + // "description": "Name of the instance resource to suspend.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -82646,112 +85539,118 @@ func (c *InterconnectsGetCall) Do(opts ...googleapi.CallOption) (*Interconnect, // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/interconnects/{interconnect}", + // "path": "{project}/zones/{zone}/instances/{instance}/suspend", // "response": { - // "$ref": "Interconnect" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.interconnects.getDiagnostics": +// method id "compute.instances.testIamPermissions": -type InterconnectsGetDiagnosticsCall struct { - s *Service - project string - interconnect string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstancesTestIamPermissionsCall struct { + s *Service + project string + zone string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetDiagnostics: Returns the interconnectDiagnostics for the specified -// interconnect. -func (r *InterconnectsService) GetDiagnostics(project string, interconnect string) *InterconnectsGetDiagnosticsCall { - c := &InterconnectsGetDiagnosticsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *InstancesService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *InstancesTestIamPermissionsCall { + c := &InstancesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.interconnect = interconnect + c.zone = zone + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectsGetDiagnosticsCall) Fields(s ...googleapi.Field) *InterconnectsGetDiagnosticsCall { +func (c *InstancesTestIamPermissionsCall) Fields(s ...googleapi.Field) *InstancesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InterconnectsGetDiagnosticsCall) IfNoneMatch(entityTag string) *InterconnectsGetDiagnosticsCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectsGetDiagnosticsCall) Context(ctx context.Context) *InterconnectsGetDiagnosticsCall { +func (c *InstancesTestIamPermissionsCall) Context(ctx context.Context) *InstancesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectsGetDiagnosticsCall) Header() http.Header { +func (c *InstancesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectsGetDiagnosticsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}/getDiagnostics") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "interconnect": c.interconnect, + "project": c.project, + "zone": c.zone, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnects.getDiagnostics" call. -// Exactly one of *InterconnectsGetDiagnosticsResponse or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *InterconnectsGetDiagnosticsResponse.ServerResponse.Header or -// (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *InterconnectsGetDiagnosticsCall) Do(opts ...googleapi.CallOption) (*InterconnectsGetDiagnosticsResponse, error) { +// Do executes the "compute.instances.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstancesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -82770,7 +85669,7 @@ func (c *InterconnectsGetDiagnosticsCall) Do(opts ...googleapi.CallOption) (*Int if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InterconnectsGetDiagnosticsResponse{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -82782,32 +85681,43 @@ func (c *InterconnectsGetDiagnosticsCall) Do(opts ...googleapi.CallOption) (*Int } return ret, nil // { - // "description": "Returns the interconnectDiagnostics for the specified interconnect.", - // "httpMethod": "GET", - // "id": "compute.interconnects.getDiagnostics", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.instances.testIamPermissions", // "parameterOrder": [ // "project", - // "interconnect" + // "zone", + // "resource" // ], // "parameters": { - // "interconnect": { - // "description": "Name of the interconnect resource to query.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/interconnects/{interconnect}/getDiagnostics", + // "path": "{project}/zones/{zone}/instances/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "InterconnectsGetDiagnosticsResponse" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -82818,96 +85728,114 @@ func (c *InterconnectsGetDiagnosticsCall) Do(opts ...googleapi.CallOption) (*Int } -// method id "compute.interconnects.getIamPolicy": +// method id "compute.instances.updateAccessConfig": -type InterconnectsGetIamPolicyCall struct { +type InstancesUpdateAccessConfigCall struct { s *Service project string - resource string + zone string + instance string + accessconfig *AccessConfig urlParams_ gensupport.URLParams - ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. -func (r *InterconnectsService) GetIamPolicy(project string, resource string) *InterconnectsGetIamPolicyCall { - c := &InterconnectsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// UpdateAccessConfig: Updates the specified access config from an +// instance's network interface with the data included in the request. +// This method supports PATCH semantics and uses the JSON merge patch +// format and processing rules. +func (r *InstancesService) UpdateAccessConfig(project string, zone string, instance string, networkInterface string, accessconfig *AccessConfig) *InstancesUpdateAccessConfigCall { + c := &InstancesUpdateAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource + c.zone = zone + c.instance = instance + c.urlParams_.Set("networkInterface", networkInterface) + c.accessconfig = accessconfig + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesUpdateAccessConfigCall) RequestId(requestId string) *InstancesUpdateAccessConfigCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectsGetIamPolicyCall) Fields(s ...googleapi.Field) *InterconnectsGetIamPolicyCall { +func (c *InstancesUpdateAccessConfigCall) Fields(s ...googleapi.Field) *InstancesUpdateAccessConfigCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InterconnectsGetIamPolicyCall) IfNoneMatch(entityTag string) *InterconnectsGetIamPolicyCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectsGetIamPolicyCall) Context(ctx context.Context) *InterconnectsGetIamPolicyCall { +func (c *InstancesUpdateAccessConfigCall) Context(ctx context.Context) *InstancesUpdateAccessConfigCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectsGetIamPolicyCall) Header() http.Header { +func (c *InstancesUpdateAccessConfigCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesUpdateAccessConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.accessconfig) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{resource}/getIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/updateAccessConfig") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "resource": c.resource, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnects.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *InterconnectsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.instances.updateAccessConfig" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesUpdateAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -82926,7 +85854,7 @@ func (c *InterconnectsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -82938,14 +85866,29 @@ func (c *InterconnectsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", - // "httpMethod": "GET", - // "id": "compute.interconnects.getIamPolicy", + // "description": "Updates the specified access config from an instance's network interface with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "POST", + // "id": "compute.instances.updateAccessConfig", // "parameterOrder": [ // "project", - // "resource" + // "zone", + // "instance", + // "networkInterface" // ], // "parameters": { + // "instance": { + // "description": "The instance name for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "networkInterface": { + // "description": "The name of the network interface where the access config is attached.", + // "location": "query", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -82953,44 +85896,57 @@ func (c *InterconnectsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/interconnects/{resource}/getIamPolicy", + // "path": "{project}/zones/{zone}/instances/{instance}/updateAccessConfig", + // "request": { + // "$ref": "AccessConfig" + // }, // "response": { - // "$ref": "Policy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.interconnects.insert": +// method id "compute.instances.updateDisplayDevice": -type InterconnectsInsertCall struct { - s *Service - project string - interconnect *Interconnect - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesUpdateDisplayDeviceCall struct { + s *Service + project string + zone string + instance string + displaydevice *DisplayDevice + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a Interconnect in the specified project using the -// data included in the request. -func (r *InterconnectsService) Insert(project string, interconnect *Interconnect) *InterconnectsInsertCall { - c := &InterconnectsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// UpdateDisplayDevice: Updates the Display config for a VM instance. +// You can only use this method on a stopped VM instance. This method +// supports PATCH semantics and uses the JSON merge patch format and +// processing rules. +func (r *InstancesService) UpdateDisplayDevice(project string, zone string, instance string, displaydevice *DisplayDevice) *InstancesUpdateDisplayDeviceCall { + c := &InstancesUpdateDisplayDeviceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.interconnect = interconnect + c.zone = zone + c.instance = instance + c.displaydevice = displaydevice return c } @@ -83008,7 +85964,7 @@ func (r *InterconnectsService) Insert(project string, interconnect *Interconnect // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InterconnectsInsertCall) RequestId(requestId string) *InterconnectsInsertCall { +func (c *InstancesUpdateDisplayDeviceCall) RequestId(requestId string) *InstancesUpdateDisplayDeviceCall { c.urlParams_.Set("requestId", requestId) return c } @@ -83016,7 +85972,7 @@ func (c *InterconnectsInsertCall) RequestId(requestId string) *InterconnectsInse // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectsInsertCall) Fields(s ...googleapi.Field) *InterconnectsInsertCall { +func (c *InstancesUpdateDisplayDeviceCall) Fields(s ...googleapi.Field) *InstancesUpdateDisplayDeviceCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -83024,55 +85980,57 @@ func (c *InterconnectsInsertCall) Fields(s ...googleapi.Field) *InterconnectsIns // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectsInsertCall) Context(ctx context.Context) *InterconnectsInsertCall { +func (c *InstancesUpdateDisplayDeviceCall) Context(ctx context.Context) *InstancesUpdateDisplayDeviceCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectsInsertCall) Header() http.Header { +func (c *InstancesUpdateDisplayDeviceCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesUpdateDisplayDeviceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnect) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.displaydevice) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/updateDisplayDevice") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnects.insert" call. +// Do executes the "compute.instances.updateDisplayDevice" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InterconnectsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesUpdateDisplayDeviceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -83103,13 +86061,22 @@ func (c *InterconnectsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Creates a Interconnect in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.interconnects.insert", + // "description": "Updates the Display config for a VM instance. You can only use this method on a stopped VM instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.instances.updateDisplayDevice", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "instance" // ], // "parameters": { + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -83121,11 +86088,18 @@ func (c *InterconnectsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/interconnects", + // "path": "{project}/zones/{zone}/instances/{instance}/updateDisplayDevice", // "request": { - // "$ref": "Interconnect" + // "$ref": "DisplayDevice" // }, // "response": { // "$ref": "Operation" @@ -83138,156 +86112,112 @@ func (c *InterconnectsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.interconnects.list": +// method id "compute.instances.updateNetworkInterface": -type InterconnectsListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstancesUpdateNetworkInterfaceCall struct { + s *Service + project string + zone string + instance string + networkinterface *NetworkInterface + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves the list of interconnect available to the specified -// project. -func (r *InterconnectsService) List(project string) *InterconnectsListCall { - c := &InterconnectsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// UpdateNetworkInterface: Updates an instance's network interface. This +// method follows PATCH semantics. +func (r *InstancesService) UpdateNetworkInterface(project string, zone string, instance string, networkInterface string, networkinterface *NetworkInterface) *InstancesUpdateNetworkInterfaceCall { + c := &InstancesUpdateNetworkInterfaceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.zone = zone + c.instance = instance + c.urlParams_.Set("networkInterface", networkInterface) + c.networkinterface = networkinterface return c } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *InterconnectsListCall) Filter(filter string) *InterconnectsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InterconnectsListCall) MaxResults(maxResults int64) *InterconnectsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *InterconnectsListCall) OrderBy(orderBy string) *InterconnectsListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InterconnectsListCall) PageToken(pageToken string) *InterconnectsListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesUpdateNetworkInterfaceCall) RequestId(requestId string) *InstancesUpdateNetworkInterfaceCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectsListCall) Fields(s ...googleapi.Field) *InterconnectsListCall { +func (c *InstancesUpdateNetworkInterfaceCall) Fields(s ...googleapi.Field) *InstancesUpdateNetworkInterfaceCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InterconnectsListCall) IfNoneMatch(entityTag string) *InterconnectsListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectsListCall) Context(ctx context.Context) *InterconnectsListCall { +func (c *InstancesUpdateNetworkInterfaceCall) Context(ctx context.Context) *InstancesUpdateNetworkInterfaceCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectsListCall) Header() http.Header { +func (c *InstancesUpdateNetworkInterfaceCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectsListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesUpdateNetworkInterfaceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkinterface) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/updateNetworkInterface") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnects.list" call. -// Exactly one of *InterconnectList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InterconnectList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectList, error) { +// Do executes the "compute.instances.updateNetworkInterface" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesUpdateNetworkInterfaceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -83306,7 +86236,7 @@ func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectL if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InterconnectList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -83318,98 +86248,87 @@ func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectL } return ret, nil // { - // "description": "Retrieves the list of interconnect available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.interconnects.list", + // "description": "Updates an instance's network interface. This method follows PATCH semantics.", + // "httpMethod": "PATCH", + // "id": "compute.instances.updateNetworkInterface", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "instance", + // "networkInterface" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", + // "instance": { + // "description": "The instance name for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", + // "networkInterface": { + // "description": "The name of the network interface to update.", // "location": "query", - // "minimum": "0", - // "type": "integer" + // "required": true, + // "type": "string" // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/interconnects", + // "path": "{project}/zones/{zone}/instances/{instance}/updateNetworkInterface", + // "request": { + // "$ref": "NetworkInterface" + // }, // "response": { - // "$ref": "InterconnectList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InterconnectsListCall) Pages(ctx context.Context, f func(*InterconnectList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.interconnects.patch": +// method id "compute.instances.updateShieldedInstanceConfig": -type InterconnectsPatchCall struct { - s *Service - project string - interconnect string - interconnect2 *Interconnect - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesUpdateShieldedInstanceConfigCall struct { + s *Service + project string + zone string + instance string + shieldedinstanceconfig *ShieldedInstanceConfig + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates the specified interconnect with the data included in -// the request. This method supports PATCH semantics and uses the JSON -// merge patch format and processing rules. -func (r *InterconnectsService) Patch(project string, interconnect string, interconnect2 *Interconnect) *InterconnectsPatchCall { - c := &InterconnectsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// UpdateShieldedInstanceConfig: Updates the Shielded Instance config +// for an instance. You can only use this method on a stopped instance. +// This method supports PATCH semantics and uses the JSON merge patch +// format and processing rules. +func (r *InstancesService) UpdateShieldedInstanceConfig(project string, zone string, instance string, shieldedinstanceconfig *ShieldedInstanceConfig) *InstancesUpdateShieldedInstanceConfigCall { + c := &InstancesUpdateShieldedInstanceConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.interconnect = interconnect - c.interconnect2 = interconnect2 + c.zone = zone + c.instance = instance + c.shieldedinstanceconfig = shieldedinstanceconfig return c } @@ -83427,7 +86346,7 @@ func (r *InterconnectsService) Patch(project string, interconnect string, interc // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InterconnectsPatchCall) RequestId(requestId string) *InterconnectsPatchCall { +func (c *InstancesUpdateShieldedInstanceConfigCall) RequestId(requestId string) *InstancesUpdateShieldedInstanceConfigCall { c.urlParams_.Set("requestId", requestId) return c } @@ -83435,7 +86354,7 @@ func (c *InterconnectsPatchCall) RequestId(requestId string) *InterconnectsPatch // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectsPatchCall) Fields(s ...googleapi.Field) *InterconnectsPatchCall { +func (c *InstancesUpdateShieldedInstanceConfigCall) Fields(s ...googleapi.Field) *InstancesUpdateShieldedInstanceConfigCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -83443,35 +86362,35 @@ func (c *InterconnectsPatchCall) Fields(s ...googleapi.Field) *InterconnectsPatc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectsPatchCall) Context(ctx context.Context) *InterconnectsPatchCall { +func (c *InstancesUpdateShieldedInstanceConfigCall) Context(ctx context.Context) *InstancesUpdateShieldedInstanceConfigCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectsPatchCall) Header() http.Header { +func (c *InstancesUpdateShieldedInstanceConfigCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectsPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesUpdateShieldedInstanceConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnect2) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.shieldedinstanceconfig) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/updateShieldedInstanceConfig") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("PATCH", urls, body) if err != nil { @@ -83479,20 +86398,21 @@ func (c *InterconnectsPatchCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "interconnect": c.interconnect, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnects.patch" call. +// Do executes the "compute.instances.updateShieldedInstanceConfig" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InterconnectsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesUpdateShieldedInstanceConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -83523,18 +86443,19 @@ func (c *InterconnectsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Updates the specified interconnect with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "description": "Updates the Shielded Instance config for an instance. You can only use this method on a stopped instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", // "httpMethod": "PATCH", - // "id": "compute.interconnects.patch", + // "id": "compute.instances.updateShieldedInstanceConfig", // "parameterOrder": [ // "project", - // "interconnect" + // "zone", + // "instance" // ], // "parameters": { - // "interconnect": { - // "description": "Name of the interconnect to update.", + // "instance": { + // "description": "Name or id of the instance scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -83549,11 +86470,18 @@ func (c *InterconnectsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, e // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/interconnects/{interconnect}", + // "path": "{project}/zones/{zone}/instances/{instance}/updateShieldedInstanceConfig", // "request": { - // "$ref": "Interconnect" + // "$ref": "ShieldedInstanceConfig" // }, // "response": { // "$ref": "Operation" @@ -83566,32 +86494,55 @@ func (c *InterconnectsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, e } -// method id "compute.interconnects.setIamPolicy": +// method id "compute.instances.updateShieldedVmConfig": -type InterconnectsSetIamPolicyCall struct { - s *Service - project string - resource string - globalsetpolicyrequest *GlobalSetPolicyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesUpdateShieldedVmConfigCall struct { + s *Service + project string + zone string + instance string + shieldedvmconfig *ShieldedVmConfig + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. -func (r *InterconnectsService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *InterconnectsSetIamPolicyCall { - c := &InterconnectsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// UpdateShieldedVmConfig: Updates the Shielded VM config for a VM +// instance. You can only use this method on a stopped VM instance. This +// method supports PATCH semantics and uses the JSON merge patch format +// and processing rules. +func (r *InstancesService) UpdateShieldedVmConfig(project string, zone string, instance string, shieldedvmconfig *ShieldedVmConfig) *InstancesUpdateShieldedVmConfigCall { + c := &InstancesUpdateShieldedVmConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.globalsetpolicyrequest = globalsetpolicyrequest + c.zone = zone + c.instance = instance + c.shieldedvmconfig = shieldedvmconfig + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesUpdateShieldedVmConfigCall) RequestId(requestId string) *InstancesUpdateShieldedVmConfigCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectsSetIamPolicyCall) Fields(s ...googleapi.Field) *InterconnectsSetIamPolicyCall { +func (c *InstancesUpdateShieldedVmConfigCall) Fields(s ...googleapi.Field) *InstancesUpdateShieldedVmConfigCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -83599,56 +86550,57 @@ func (c *InterconnectsSetIamPolicyCall) Fields(s ...googleapi.Field) *Interconne // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectsSetIamPolicyCall) Context(ctx context.Context) *InterconnectsSetIamPolicyCall { +func (c *InstancesUpdateShieldedVmConfigCall) Context(ctx context.Context) *InstancesUpdateShieldedVmConfigCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectsSetIamPolicyCall) Header() http.Header { +func (c *InstancesUpdateShieldedVmConfigCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesUpdateShieldedVmConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetpolicyrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.shieldedvmconfig) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{resource}/setIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/updateShieldedVmConfig") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "resource": c.resource, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnects.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *InterconnectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.instances.updateShieldedVmConfig" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesUpdateShieldedVmConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -83667,7 +86619,7 @@ func (c *InterconnectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -83679,14 +86631,22 @@ func (c *InterconnectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", - // "httpMethod": "POST", - // "id": "compute.interconnects.setIamPolicy", + // "description": "Updates the Shielded VM config for a VM instance. You can only use this method on a stopped VM instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.instances.updateShieldedVmConfig", // "parameterOrder": [ // "project", - // "resource" + // "zone", + // "instance" // ], // "parameters": { + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -83694,20 +86654,25 @@ func (c *InterconnectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/interconnects/{resource}/setIamPolicy", + // "path": "{project}/zones/{zone}/instances/{instance}/updateShieldedVmConfig", // "request": { - // "$ref": "GlobalSetPolicyRequest" + // "$ref": "ShieldedVmConfig" // }, // "response": { - // "$ref": "Policy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -83717,89 +86682,157 @@ func (c *InterconnectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic } -// method id "compute.interconnects.setLabels": +// method id "compute.interconnectAttachments.aggregatedList": -type InterconnectsSetLabelsCall struct { - s *Service - project string - resource string - globalsetlabelsrequest *GlobalSetLabelsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InterconnectAttachmentsAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetLabels: Sets the labels on an Interconnect. To learn more about -// labels, read the Labeling Resources documentation. -func (r *InterconnectsService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *InterconnectsSetLabelsCall { - c := &InterconnectsSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of interconnect +// attachments. +func (r *InterconnectAttachmentsService) AggregatedList(project string) *InterconnectAttachmentsAggregatedListCall { + c := &InterconnectAttachmentsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.globalsetlabelsrequest = globalsetlabelsrequest + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *InterconnectAttachmentsAggregatedListCall) Filter(filter string) *InterconnectAttachmentsAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *InterconnectAttachmentsAggregatedListCall) MaxResults(maxResults int64) *InterconnectAttachmentsAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *InterconnectAttachmentsAggregatedListCall) OrderBy(orderBy string) *InterconnectAttachmentsAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InterconnectAttachmentsAggregatedListCall) PageToken(pageToken string) *InterconnectAttachmentsAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectsSetLabelsCall) Fields(s ...googleapi.Field) *InterconnectsSetLabelsCall { +func (c *InterconnectAttachmentsAggregatedListCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InterconnectAttachmentsAggregatedListCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectsSetLabelsCall) Context(ctx context.Context) *InterconnectsSetLabelsCall { +func (c *InterconnectAttachmentsAggregatedListCall) Context(ctx context.Context) *InterconnectAttachmentsAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectsSetLabelsCall) Header() http.Header { +func (c *InterconnectAttachmentsAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectsSetLabelsCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectAttachmentsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{resource}/setLabels") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/interconnectAttachments") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnects.setLabels" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InterconnectsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.interconnectAttachments.aggregatedList" call. +// Exactly one of *InterconnectAttachmentAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *InterconnectAttachmentAggregatedList.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachmentAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -83818,7 +86851,7 @@ func (c *InterconnectsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operatio if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &InterconnectAttachmentAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -83830,70 +86863,122 @@ func (c *InterconnectsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Sets the labels on an Interconnect. To learn more about labels, read the Labeling Resources documentation.", - // "httpMethod": "POST", - // "id": "compute.interconnects.setLabels", + // "description": "Retrieves an aggregated list of interconnect attachments.", + // "httpMethod": "GET", + // "id": "compute.interconnectAttachments.aggregatedList", // "parameterOrder": [ - // "project", - // "resource" + // "project" // ], // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/global/interconnects/{resource}/setLabels", - // "request": { - // "$ref": "GlobalSetLabelsRequest" - // }, + // "path": "{project}/aggregated/interconnectAttachments", // "response": { - // "$ref": "Operation" + // "$ref": "InterconnectAttachmentAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.interconnects.testIamPermissions": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InterconnectAttachmentsAggregatedListCall) Pages(ctx context.Context, f func(*InterconnectAttachmentAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InterconnectsTestIamPermissionsCall struct { +// method id "compute.interconnectAttachments.delete": + +type InterconnectAttachmentsDeleteCall struct { s *Service project string - resource string - testpermissionsrequest *TestPermissionsRequest + region string + interconnectAttachment string urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *InterconnectsService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *InterconnectsTestIamPermissionsCall { - c := &InterconnectsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified interconnect attachment. +func (r *InterconnectAttachmentsService) Delete(project string, region string, interconnectAttachment string) *InterconnectAttachmentsDeleteCall { + c := &InterconnectAttachmentsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.region = region + c.interconnectAttachment = interconnectAttachment + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InterconnectAttachmentsDeleteCall) RequestId(requestId string) *InterconnectAttachmentsDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectsTestIamPermissionsCall) Fields(s ...googleapi.Field) *InterconnectsTestIamPermissionsCall { +func (c *InterconnectAttachmentsDeleteCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -83901,56 +86986,52 @@ func (c *InterconnectsTestIamPermissionsCall) Fields(s ...googleapi.Field) *Inte // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectsTestIamPermissionsCall) Context(ctx context.Context) *InterconnectsTestIamPermissionsCall { +func (c *InterconnectAttachmentsDeleteCall) Context(ctx context.Context) *InterconnectAttachmentsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectsTestIamPermissionsCall) Header() http.Header { +func (c *InterconnectAttachmentsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectAttachmentsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "region": c.region, + "interconnectAttachment": c.interconnectAttachment, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnects.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InterconnectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.interconnectAttachments.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InterconnectAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -83969,7 +87050,7 @@ func (c *InterconnectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) ( if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -83981,14 +87062,22 @@ func (c *InterconnectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.interconnects.testIamPermissions", + // "description": "Deletes the specified interconnect attachment.", + // "httpMethod": "DELETE", + // "id": "compute.interconnectAttachments.delete", // "parameterOrder": [ // "project", - // "resource" + // "region", + // "interconnectAttachment" // ], // "parameters": { + // "interconnectAttachment": { + // "description": "Name of the interconnect attachment to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -83996,55 +87085,57 @@ func (c *InterconnectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) ( // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "region": { + // "description": "Name of the region for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/global/interconnects/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.licenseCodes.get": +// method id "compute.interconnectAttachments.get": -type LicenseCodesGetCall struct { - s *Service - project string - licenseCode string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InterconnectAttachmentsGetCall struct { + s *Service + project string + region string + interconnectAttachment string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Return a specified license code. License codes are mirrored -// across all projects that have permissions to read the License Code. -func (r *LicenseCodesService) Get(project string, licenseCode string) *LicenseCodesGetCall { - c := &LicenseCodesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified interconnect attachment. +func (r *InterconnectAttachmentsService) Get(project string, region string, interconnectAttachment string) *InterconnectAttachmentsGetCall { + c := &InterconnectAttachmentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.licenseCode = licenseCode + c.region = region + c.interconnectAttachment = interconnectAttachment return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LicenseCodesGetCall) Fields(s ...googleapi.Field) *LicenseCodesGetCall { +func (c *InterconnectAttachmentsGetCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -84054,7 +87145,7 @@ func (c *LicenseCodesGetCall) Fields(s ...googleapi.Field) *LicenseCodesGetCall // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *LicenseCodesGetCall) IfNoneMatch(entityTag string) *LicenseCodesGetCall { +func (c *InterconnectAttachmentsGetCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -84062,21 +87153,21 @@ func (c *LicenseCodesGetCall) IfNoneMatch(entityTag string) *LicenseCodesGetCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LicenseCodesGetCall) Context(ctx context.Context) *LicenseCodesGetCall { +func (c *InterconnectAttachmentsGetCall) Context(ctx context.Context) *InterconnectAttachmentsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LicenseCodesGetCall) Header() http.Header { +func (c *InterconnectAttachmentsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LicenseCodesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectAttachmentsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -84088,7 +87179,7 @@ func (c *LicenseCodesGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenseCodes/{licenseCode}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -84096,20 +87187,21 @@ func (c *LicenseCodesGetCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "licenseCode": c.licenseCode, + "project": c.project, + "region": c.region, + "interconnectAttachment": c.interconnectAttachment, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.licenseCodes.get" call. -// Exactly one of *LicenseCode or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *LicenseCode.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *LicenseCodesGetCall) Do(opts ...googleapi.CallOption) (*LicenseCode, error) { +// Do executes the "compute.interconnectAttachments.get" call. +// Exactly one of *InterconnectAttachment or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InterconnectAttachment.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InterconnectAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachment, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -84128,7 +87220,7 @@ func (c *LicenseCodesGetCall) Do(opts ...googleapi.CallOption) (*LicenseCode, er if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &LicenseCode{ + ret := &InterconnectAttachment{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -84140,18 +87232,19 @@ func (c *LicenseCodesGetCall) Do(opts ...googleapi.CallOption) (*LicenseCode, er } return ret, nil // { - // "description": "Return a specified license code. License codes are mirrored across all projects that have permissions to read the License Code.", + // "description": "Returns the specified interconnect attachment.", // "httpMethod": "GET", - // "id": "compute.licenseCodes.get", + // "id": "compute.interconnectAttachments.get", // "parameterOrder": [ // "project", - // "licenseCode" + // "region", + // "interconnectAttachment" // ], // "parameters": { - // "licenseCode": { - // "description": "Number corresponding to the License code resource to return.", + // "interconnectAttachment": { + // "description": "Name of the interconnect attachment to return.", // "location": "path", - // "pattern": "[0-9]{0,61}?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -84161,11 +87254,18 @@ func (c *LicenseCodesGetCall) Do(opts ...googleapi.CallOption) (*LicenseCode, er // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/licenseCodes/{licenseCode}", + // "path": "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", // "response": { - // "$ref": "LicenseCode" + // "$ref": "InterconnectAttachment" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -84176,11 +87276,12 @@ func (c *LicenseCodesGetCall) Do(opts ...googleapi.CallOption) (*LicenseCode, er } -// method id "compute.licenseCodes.getIamPolicy": +// method id "compute.interconnectAttachments.getIamPolicy": -type LicenseCodesGetIamPolicyCall struct { +type InterconnectAttachmentsGetIamPolicyCall struct { s *Service project string + region string resource string urlParams_ gensupport.URLParams ifNoneMatch_ string @@ -84190,9 +87291,10 @@ type LicenseCodesGetIamPolicyCall struct { // GetIamPolicy: Gets the access control policy for a resource. May be // empty if no such policy or resource exists. -func (r *LicenseCodesService) GetIamPolicy(project string, resource string) *LicenseCodesGetIamPolicyCall { - c := &LicenseCodesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *InterconnectAttachmentsService) GetIamPolicy(project string, region string, resource string) *InterconnectAttachmentsGetIamPolicyCall { + c := &InterconnectAttachmentsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.region = region c.resource = resource return c } @@ -84200,7 +87302,7 @@ func (r *LicenseCodesService) GetIamPolicy(project string, resource string) *Lic // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LicenseCodesGetIamPolicyCall) Fields(s ...googleapi.Field) *LicenseCodesGetIamPolicyCall { +func (c *InterconnectAttachmentsGetIamPolicyCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -84210,7 +87312,7 @@ func (c *LicenseCodesGetIamPolicyCall) Fields(s ...googleapi.Field) *LicenseCode // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *LicenseCodesGetIamPolicyCall) IfNoneMatch(entityTag string) *LicenseCodesGetIamPolicyCall { +func (c *InterconnectAttachmentsGetIamPolicyCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsGetIamPolicyCall { c.ifNoneMatch_ = entityTag return c } @@ -84218,21 +87320,21 @@ func (c *LicenseCodesGetIamPolicyCall) IfNoneMatch(entityTag string) *LicenseCod // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LicenseCodesGetIamPolicyCall) Context(ctx context.Context) *LicenseCodesGetIamPolicyCall { +func (c *InterconnectAttachmentsGetIamPolicyCall) Context(ctx context.Context) *InterconnectAttachmentsGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LicenseCodesGetIamPolicyCall) Header() http.Header { +func (c *InterconnectAttachmentsGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LicenseCodesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectAttachmentsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -84244,7 +87346,7 @@ func (c *LicenseCodesGetIamPolicyCall) doRequest(alt string) (*http.Response, er var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenseCodes/{resource}/getIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -84253,19 +87355,20 @@ func (c *LicenseCodesGetIamPolicyCall) doRequest(alt string) (*http.Response, er req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.licenseCodes.getIamPolicy" call. +// Do executes the "compute.interconnectAttachments.getIamPolicy" call. // Exactly one of *Policy or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *Policy.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *LicenseCodesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +func (c *InterconnectAttachmentsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -84298,9 +87401,10 @@ func (c *LicenseCodesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy // { // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", // "httpMethod": "GET", - // "id": "compute.licenseCodes.getIamPolicy", + // "id": "compute.interconnectAttachments.getIamPolicy", // "parameterOrder": [ // "project", + // "region", // "resource" // ], // "parameters": { @@ -84311,15 +87415,22 @@ func (c *LicenseCodesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy // "required": true, // "type": "string" // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "resource": { // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/licenseCodes/{resource}/getIamPolicy", + // "path": "{project}/regions/{region}/interconnectAttachments/{resource}/getIamPolicy", // "response": { // "$ref": "Policy" // }, @@ -84332,32 +87443,58 @@ func (c *LicenseCodesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy } -// method id "compute.licenseCodes.setIamPolicy": +// method id "compute.interconnectAttachments.insert": -type LicenseCodesSetIamPolicyCall struct { +type InterconnectAttachmentsInsertCall struct { s *Service project string - resource string - globalsetpolicyrequest *GlobalSetPolicyRequest + region string + interconnectattachment *InterconnectAttachment urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. -func (r *LicenseCodesService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *LicenseCodesSetIamPolicyCall { - c := &LicenseCodesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates an InterconnectAttachment in the specified project +// using the data included in the request. +func (r *InterconnectAttachmentsService) Insert(project string, region string, interconnectattachment *InterconnectAttachment) *InterconnectAttachmentsInsertCall { + c := &InterconnectAttachmentsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.globalsetpolicyrequest = globalsetpolicyrequest + c.region = region + c.interconnectattachment = interconnectattachment + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InterconnectAttachmentsInsertCall) RequestId(requestId string) *InterconnectAttachmentsInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// ValidateOnly sets the optional parameter "validateOnly": If true, the +// request will not be committed. +func (c *InterconnectAttachmentsInsertCall) ValidateOnly(validateOnly bool) *InterconnectAttachmentsInsertCall { + c.urlParams_.Set("validateOnly", fmt.Sprint(validateOnly)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LicenseCodesSetIamPolicyCall) Fields(s ...googleapi.Field) *LicenseCodesSetIamPolicyCall { +func (c *InterconnectAttachmentsInsertCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -84365,35 +87502,35 @@ func (c *LicenseCodesSetIamPolicyCall) Fields(s ...googleapi.Field) *LicenseCode // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LicenseCodesSetIamPolicyCall) Context(ctx context.Context) *LicenseCodesSetIamPolicyCall { +func (c *InterconnectAttachmentsInsertCall) Context(ctx context.Context) *InterconnectAttachmentsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LicenseCodesSetIamPolicyCall) Header() http.Header { +func (c *InterconnectAttachmentsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LicenseCodesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectAttachmentsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetpolicyrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnectattachment) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenseCodes/{resource}/setIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -84401,20 +87538,20 @@ func (c *LicenseCodesSetIamPolicyCall) doRequest(alt string) (*http.Response, er } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.licenseCodes.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *LicenseCodesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.interconnectAttachments.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InterconnectAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -84433,7 +87570,7 @@ func (c *LicenseCodesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -84445,12 +87582,12 @@ func (c *LicenseCodesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "description": "Creates an InterconnectAttachment in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.licenseCodes.setIamPolicy", + // "id": "compute.interconnectAttachments.insert", // "parameterOrder": [ // "project", - // "resource" + // "region" // ], // "parameters": { // "project": { @@ -84460,20 +87597,30 @@ func (c *LicenseCodesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "region": { + // "description": "Name of the region for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "validateOnly": { + // "description": "If true, the request will not be committed.", + // "location": "query", + // "type": "boolean" // } // }, - // "path": "{project}/global/licenseCodes/{resource}/setIamPolicy", + // "path": "{project}/regions/{region}/interconnectAttachments", // "request": { - // "$ref": "GlobalSetPolicyRequest" + // "$ref": "InterconnectAttachment" // }, // "response": { - // "$ref": "Policy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -84483,89 +87630,159 @@ func (c *LicenseCodesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy } -// method id "compute.licenseCodes.testIamPermissions": +// method id "compute.interconnectAttachments.list": -type LicenseCodesTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InterconnectAttachmentsListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *LicenseCodesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *LicenseCodesTestIamPermissionsCall { - c := &LicenseCodesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of interconnect attachments contained within +// the specified region. +func (r *InterconnectAttachmentsService) List(project string, region string) *InterconnectAttachmentsListCall { + c := &InterconnectAttachmentsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.region = region + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *InterconnectAttachmentsListCall) Filter(filter string) *InterconnectAttachmentsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *InterconnectAttachmentsListCall) MaxResults(maxResults int64) *InterconnectAttachmentsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *InterconnectAttachmentsListCall) OrderBy(orderBy string) *InterconnectAttachmentsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InterconnectAttachmentsListCall) PageToken(pageToken string) *InterconnectAttachmentsListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LicenseCodesTestIamPermissionsCall) Fields(s ...googleapi.Field) *LicenseCodesTestIamPermissionsCall { +func (c *InterconnectAttachmentsListCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InterconnectAttachmentsListCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LicenseCodesTestIamPermissionsCall) Context(ctx context.Context) *LicenseCodesTestIamPermissionsCall { +func (c *InterconnectAttachmentsListCall) Context(ctx context.Context) *InterconnectAttachmentsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LicenseCodesTestIamPermissionsCall) Header() http.Header { +func (c *InterconnectAttachmentsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LicenseCodesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectAttachmentsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenseCodes/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.licenseCodes.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.interconnectAttachments.list" call. +// Exactly one of *InterconnectAttachmentList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *InterconnectAttachmentList.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *LicenseCodesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachmentList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -84584,7 +87801,7 @@ func (c *LicenseCodesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (* if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &InterconnectAttachmentList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -84596,14 +87813,37 @@ func (c *LicenseCodesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.licenseCodes.testIamPermissions", + // "description": "Retrieves the list of interconnect attachments contained within the specified region.", + // "httpMethod": "GET", + // "id": "compute.interconnectAttachments.list", // "parameterOrder": [ // "project", - // "resource" + // "region" // ], // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -84611,20 +87851,17 @@ func (c *LicenseCodesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (* // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "region": { + // "description": "Name of the region for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/licenseCodes/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/regions/{region}/interconnectAttachments", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "InterconnectAttachmentList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -84635,22 +87872,49 @@ func (c *LicenseCodesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (* } -// method id "compute.licenses.delete": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InterconnectAttachmentsListCall) Pages(ctx context.Context, f func(*InterconnectAttachmentList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type LicensesDeleteCall struct { - s *Service - project string - license string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.interconnectAttachments.patch": + +type InterconnectAttachmentsPatchCall struct { + s *Service + project string + region string + interconnectAttachment string + interconnectattachment *InterconnectAttachment + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified license. -func (r *LicensesService) Delete(project string, license string) *LicensesDeleteCall { - c := &LicensesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates the specified interconnect attachment with the data +// included in the request. This method supports PATCH semantics and +// uses the JSON merge patch format and processing rules. +func (r *InterconnectAttachmentsService) Patch(project string, region string, interconnectAttachment string, interconnectattachment *InterconnectAttachment) *InterconnectAttachmentsPatchCall { + c := &InterconnectAttachmentsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.license = license + c.region = region + c.interconnectAttachment = interconnectAttachment + c.interconnectattachment = interconnectattachment return c } @@ -84668,7 +87932,7 @@ func (r *LicensesService) Delete(project string, license string) *LicensesDelete // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *LicensesDeleteCall) RequestId(requestId string) *LicensesDeleteCall { +func (c *InterconnectAttachmentsPatchCall) RequestId(requestId string) *InterconnectAttachmentsPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -84676,7 +87940,7 @@ func (c *LicensesDeleteCall) RequestId(requestId string) *LicensesDeleteCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LicensesDeleteCall) Fields(s ...googleapi.Field) *LicensesDeleteCall { +func (c *InterconnectAttachmentsPatchCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -84684,51 +87948,57 @@ func (c *LicensesDeleteCall) Fields(s ...googleapi.Field) *LicensesDeleteCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LicensesDeleteCall) Context(ctx context.Context) *LicensesDeleteCall { +func (c *InterconnectAttachmentsPatchCall) Context(ctx context.Context) *InterconnectAttachmentsPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LicensesDeleteCall) Header() http.Header { +func (c *InterconnectAttachmentsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LicensesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectAttachmentsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnectattachment) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{license}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "license": c.license, + "project": c.project, + "region": c.region, + "interconnectAttachment": c.interconnectAttachment, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.licenses.delete" call. +// Do executes the "compute.interconnectAttachments.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *LicensesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InterconnectAttachmentsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -84759,18 +88029,19 @@ func (c *LicensesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Deletes the specified license.", - // "httpMethod": "DELETE", - // "id": "compute.licenses.delete", + // "description": "Updates the specified interconnect attachment with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.interconnectAttachments.patch", // "parameterOrder": [ // "project", - // "license" + // "region", + // "interconnectAttachment" // ], // "parameters": { - // "license": { - // "description": "Name of the license resource to delete.", + // "interconnectAttachment": { + // "description": "Name of the interconnect attachment to patch.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -84781,13 +88052,23 @@ func (c *LicensesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error // "required": true, // "type": "string" // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "{project}/global/licenses/{license}", + // "path": "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + // "request": { + // "$ref": "InterconnectAttachment" + // }, // "response": { // "$ref": "Operation" // }, @@ -84799,96 +88080,92 @@ func (c *LicensesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error } -// method id "compute.licenses.get": +// method id "compute.interconnectAttachments.setIamPolicy": -type LicensesGetCall struct { - s *Service - project string - license string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InterconnectAttachmentsSetIamPolicyCall struct { + s *Service + project string + region string + resource string + regionsetpolicyrequest *RegionSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified License resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/licenses/get -func (r *LicensesService) Get(project string, license string) *LicensesGetCall { - c := &LicensesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +func (r *InterconnectAttachmentsService) SetIamPolicy(project string, region string, resource string, regionsetpolicyrequest *RegionSetPolicyRequest) *InterconnectAttachmentsSetIamPolicyCall { + c := &InterconnectAttachmentsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.license = license + c.region = region + c.resource = resource + c.regionsetpolicyrequest = regionsetpolicyrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LicensesGetCall) Fields(s ...googleapi.Field) *LicensesGetCall { +func (c *InterconnectAttachmentsSetIamPolicyCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *LicensesGetCall) IfNoneMatch(entityTag string) *LicensesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LicensesGetCall) Context(ctx context.Context) *LicensesGetCall { +func (c *InterconnectAttachmentsSetIamPolicyCall) Context(ctx context.Context) *InterconnectAttachmentsSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LicensesGetCall) Header() http.Header { +func (c *InterconnectAttachmentsSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LicensesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectAttachmentsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetpolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{license}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "license": c.license, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.licenses.get" call. -// Exactly one of *License or error will be non-nil. Any non-2xx status +// Do executes the "compute.interconnectAttachments.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either -// *License.ServerResponse.Header or (if a response was returned at all) +// *Policy.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { +func (c *InterconnectAttachmentsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -84907,7 +88184,7 @@ func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &License{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -84919,132 +88196,157 @@ func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { } return ret, nil // { - // "description": "Returns the specified License resource.", - // "httpMethod": "GET", - // "id": "compute.licenses.get", - // "parameterOrder": [ - // "project", - // "license" + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "httpMethod": "POST", + // "id": "compute.interconnectAttachments.setIamPolicy", + // "parameterOrder": [ + // "project", + // "region", + // "resource" // ], // "parameters": { - // "license": { - // "description": "Name of the License resource to return.", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/licenses/{license}", + // "path": "{project}/regions/{region}/interconnectAttachments/{resource}/setIamPolicy", + // "request": { + // "$ref": "RegionSetPolicyRequest" + // }, // "response": { - // "$ref": "License" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.licenses.getIamPolicy": +// method id "compute.interconnectAttachments.setLabels": -type LicensesGetIamPolicyCall struct { - s *Service - project string - resource string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InterconnectAttachmentsSetLabelsCall struct { + s *Service + project string + region string + resource string + regionsetlabelsrequest *RegionSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. -func (r *LicensesService) GetIamPolicy(project string, resource string) *LicensesGetIamPolicyCall { - c := &LicensesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetLabels: Sets the labels on an InterconnectAttachment. To learn +// more about labels, read the Labeling Resources documentation. +func (r *InterconnectAttachmentsService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *InterconnectAttachmentsSetLabelsCall { + c := &InterconnectAttachmentsSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.region = region c.resource = resource + c.regionsetlabelsrequest = regionsetlabelsrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InterconnectAttachmentsSetLabelsCall) RequestId(requestId string) *InterconnectAttachmentsSetLabelsCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LicensesGetIamPolicyCall) Fields(s ...googleapi.Field) *LicensesGetIamPolicyCall { +func (c *InterconnectAttachmentsSetLabelsCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsSetLabelsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *LicensesGetIamPolicyCall) IfNoneMatch(entityTag string) *LicensesGetIamPolicyCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LicensesGetIamPolicyCall) Context(ctx context.Context) *LicensesGetIamPolicyCall { +func (c *InterconnectAttachmentsSetLabelsCall) Context(ctx context.Context) *InterconnectAttachmentsSetLabelsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LicensesGetIamPolicyCall) Header() http.Header { +func (c *InterconnectAttachmentsSetLabelsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LicensesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectAttachmentsSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{resource}/getIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.licenses.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *LicensesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.interconnectAttachments.setLabels" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InterconnectAttachmentsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -85063,7 +88365,7 @@ func (c *LicensesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -85075,11 +88377,12 @@ func (c *LicensesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", - // "httpMethod": "GET", - // "id": "compute.licenses.getIamPolicy", + // "description": "Sets the labels on an InterconnectAttachment. To learn more about labels, read the Labeling Resources documentation.", + // "httpMethod": "POST", + // "id": "compute.interconnectAttachments.setLabels", // "parameterOrder": [ // "project", + // "region", // "resource" // ], // "parameters": { @@ -85090,69 +88393,69 @@ func (c *LicensesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er // "required": true, // "type": "string" // }, + // "region": { + // "description": "The region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "resource": { // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/licenses/{resource}/getIamPolicy", + // "path": "{project}/regions/{region}/interconnectAttachments/{resource}/setLabels", + // "request": { + // "$ref": "RegionSetLabelsRequest" + // }, // "response": { - // "$ref": "Policy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.licenses.insert": +// method id "compute.interconnectAttachments.testIamPermissions": -type LicensesInsertCall struct { - s *Service - project string - license *License - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InterconnectAttachmentsTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Create a License resource in the specified project. -func (r *LicensesService) Insert(project string, license *License) *LicensesInsertCall { - c := &LicensesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *InterconnectAttachmentsService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *InterconnectAttachmentsTestIamPermissionsCall { + c := &InterconnectAttachmentsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.license = license - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *LicensesInsertCall) RequestId(requestId string) *LicensesInsertCall { - c.urlParams_.Set("requestId", requestId) + c.region = region + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LicensesInsertCall) Fields(s ...googleapi.Field) *LicensesInsertCall { +func (c *InterconnectAttachmentsTestIamPermissionsCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -85160,35 +88463,35 @@ func (c *LicensesInsertCall) Fields(s ...googleapi.Field) *LicensesInsertCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LicensesInsertCall) Context(ctx context.Context) *LicensesInsertCall { +func (c *InterconnectAttachmentsTestIamPermissionsCall) Context(ctx context.Context) *InterconnectAttachmentsTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LicensesInsertCall) Header() http.Header { +func (c *InterconnectAttachmentsTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LicensesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectAttachmentsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.license) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -85196,19 +88499,21 @@ func (c *LicensesInsertCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.licenses.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *LicensesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.interconnectAttachments.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InterconnectAttachmentsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -85227,7 +88532,7 @@ func (c *LicensesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -85239,11 +88544,13 @@ func (c *LicensesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Create a License resource in the specified project.", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.licenses.insert", + // "id": "compute.interconnectAttachments.testIamPermissions", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "resource" // ], // "parameters": { // "project": { @@ -85253,33 +88560,197 @@ func (c *LicensesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/licenses", + // "path": "{project}/regions/{region}/interconnectAttachments/{resource}/testIamPermissions", // "request": { - // "$ref": "License" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.licenses.list": +// method id "compute.interconnectLocations.get": -type LicensesListCall struct { +type InterconnectLocationsGetCall struct { + s *Service + project string + interconnectLocation string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the details for the specified interconnect location. +// Gets a list of available interconnect locations by making a list() +// request. +func (r *InterconnectLocationsService) Get(project string, interconnectLocation string) *InterconnectLocationsGetCall { + c := &InterconnectLocationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.interconnectLocation = interconnectLocation + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InterconnectLocationsGetCall) Fields(s ...googleapi.Field) *InterconnectLocationsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InterconnectLocationsGetCall) IfNoneMatch(entityTag string) *InterconnectLocationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InterconnectLocationsGetCall) Context(ctx context.Context) *InterconnectLocationsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InterconnectLocationsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InterconnectLocationsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnectLocations/{interconnectLocation}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "interconnectLocation": c.interconnectLocation, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.interconnectLocations.get" call. +// Exactly one of *InterconnectLocation or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InterconnectLocation.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InterconnectLocationsGetCall) Do(opts ...googleapi.CallOption) (*InterconnectLocation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InterconnectLocation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the details for the specified interconnect location. Gets a list of available interconnect locations by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.interconnectLocations.get", + // "parameterOrder": [ + // "project", + // "interconnectLocation" + // ], + // "parameters": { + // "interconnectLocation": { + // "description": "Name of the interconnect location to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/interconnectLocations/{interconnectLocation}", + // "response": { + // "$ref": "InterconnectLocation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.interconnectLocations.list": + +type InterconnectLocationsListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -85288,14 +88759,10 @@ type LicensesListCall struct { header_ http.Header } -// List: Retrieves the list of licenses available in the specified -// project. This method does not get any licenses that belong to other -// projects, including licenses attached to publicly-available images, -// like Debian 9. If you want to get a list of publicly-available -// licenses, use this method to make a request to the respective image -// project, such as debian-cloud or windows-cloud. -func (r *LicensesService) List(project string) *LicensesListCall { - c := &LicensesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of interconnect locations available to the +// specified project. +func (r *InterconnectLocationsService) List(project string) *InterconnectLocationsListCall { + c := &InterconnectLocationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -85322,7 +88789,7 @@ func (r *LicensesService) List(project string) *LicensesListCall { // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *LicensesListCall) Filter(filter string) *LicensesListCall { +func (c *InterconnectLocationsListCall) Filter(filter string) *InterconnectLocationsListCall { c.urlParams_.Set("filter", filter) return c } @@ -85333,7 +88800,7 @@ func (c *LicensesListCall) Filter(filter string) *LicensesListCall { // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *LicensesListCall) MaxResults(maxResults int64) *LicensesListCall { +func (c *InterconnectLocationsListCall) MaxResults(maxResults int64) *InterconnectLocationsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -85350,7 +88817,7 @@ func (c *LicensesListCall) MaxResults(maxResults int64) *LicensesListCall { // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *LicensesListCall) OrderBy(orderBy string) *LicensesListCall { +func (c *InterconnectLocationsListCall) OrderBy(orderBy string) *InterconnectLocationsListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -85358,7 +88825,7 @@ func (c *LicensesListCall) OrderBy(orderBy string) *LicensesListCall { // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *LicensesListCall) PageToken(pageToken string) *LicensesListCall { +func (c *InterconnectLocationsListCall) PageToken(pageToken string) *InterconnectLocationsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -85366,7 +88833,7 @@ func (c *LicensesListCall) PageToken(pageToken string) *LicensesListCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LicensesListCall) Fields(s ...googleapi.Field) *LicensesListCall { +func (c *InterconnectLocationsListCall) Fields(s ...googleapi.Field) *InterconnectLocationsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -85376,7 +88843,7 @@ func (c *LicensesListCall) Fields(s ...googleapi.Field) *LicensesListCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *LicensesListCall) IfNoneMatch(entityTag string) *LicensesListCall { +func (c *InterconnectLocationsListCall) IfNoneMatch(entityTag string) *InterconnectLocationsListCall { c.ifNoneMatch_ = entityTag return c } @@ -85384,21 +88851,21 @@ func (c *LicensesListCall) IfNoneMatch(entityTag string) *LicensesListCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LicensesListCall) Context(ctx context.Context) *LicensesListCall { +func (c *InterconnectLocationsListCall) Context(ctx context.Context) *InterconnectLocationsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LicensesListCall) Header() http.Header { +func (c *InterconnectLocationsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LicensesListCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectLocationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -85410,7 +88877,7 @@ func (c *LicensesListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnectLocations") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -85423,14 +88890,14 @@ func (c *LicensesListCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.licenses.list" call. -// Exactly one of *LicensesListResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *LicensesListResponse.ServerResponse.Header or (if a response was +// Do executes the "compute.interconnectLocations.list" call. +// Exactly one of *InterconnectLocationList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *InterconnectLocationList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *LicensesListCall) Do(opts ...googleapi.CallOption) (*LicensesListResponse, error) { +func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*InterconnectLocationList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -85449,7 +88916,7 @@ func (c *LicensesListCall) Do(opts ...googleapi.CallOption) (*LicensesListRespon if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &LicensesListResponse{ + ret := &InterconnectLocationList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -85461,9 +88928,9 @@ func (c *LicensesListCall) Do(opts ...googleapi.CallOption) (*LicensesListRespon } return ret, nil // { - // "description": "Retrieves the list of licenses available in the specified project. This method does not get any licenses that belong to other projects, including licenses attached to publicly-available images, like Debian 9. If you want to get a list of publicly-available licenses, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.", + // "description": "Retrieves the list of interconnect locations available to the specified project.", // "httpMethod": "GET", - // "id": "compute.licenses.list", + // "id": "compute.interconnectLocations.list", // "parameterOrder": [ // "project" // ], @@ -85499,9 +88966,9 @@ func (c *LicensesListCall) Do(opts ...googleapi.CallOption) (*LicensesListRespon // "type": "string" // } // }, - // "path": "{project}/global/licenses", + // "path": "{project}/global/interconnectLocations", // "response": { - // "$ref": "LicensesListResponse" + // "$ref": "InterconnectLocationList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -85515,7 +88982,7 @@ func (c *LicensesListCall) Do(opts ...googleapi.CallOption) (*LicensesListRespon // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *LicensesListCall) Pages(ctx context.Context, f func(*LicensesListResponse) error) error { +func (c *InterconnectLocationsListCall) Pages(ctx context.Context, f func(*InterconnectLocationList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -85533,32 +89000,32 @@ func (c *LicensesListCall) Pages(ctx context.Context, f func(*LicensesListRespon } } -// method id "compute.licenses.setIamPolicy": +// method id "compute.interconnectLocations.testIamPermissions": -type LicensesSetIamPolicyCall struct { +type InterconnectLocationsTestIamPermissionsCall struct { s *Service project string resource string - globalsetpolicyrequest *GlobalSetPolicyRequest + testpermissionsrequest *TestPermissionsRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. -func (r *LicensesService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *LicensesSetIamPolicyCall { - c := &LicensesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *InterconnectLocationsService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *InterconnectLocationsTestIamPermissionsCall { + c := &InterconnectLocationsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.resource = resource - c.globalsetpolicyrequest = globalsetpolicyrequest + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LicensesSetIamPolicyCall) Fields(s ...googleapi.Field) *LicensesSetIamPolicyCall { +func (c *InterconnectLocationsTestIamPermissionsCall) Fields(s ...googleapi.Field) *InterconnectLocationsTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -85566,35 +89033,35 @@ func (c *LicensesSetIamPolicyCall) Fields(s ...googleapi.Field) *LicensesSetIamP // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LicensesSetIamPolicyCall) Context(ctx context.Context) *LicensesSetIamPolicyCall { +func (c *InterconnectLocationsTestIamPermissionsCall) Context(ctx context.Context) *InterconnectLocationsTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LicensesSetIamPolicyCall) Header() http.Header { +func (c *InterconnectLocationsTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LicensesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectLocationsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetpolicyrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{resource}/setIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnectLocations/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -85608,14 +89075,14 @@ func (c *LicensesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.licenses.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *LicensesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.interconnectLocations.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InterconnectLocationsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -85634,7 +89101,7 @@ func (c *LicensesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -85646,9 +89113,9 @@ func (c *LicensesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.licenses.setIamPolicy", + // "id": "compute.interconnectLocations.testIamPermissions", // "parameterOrder": [ // "project", // "resource" @@ -85669,47 +89136,64 @@ func (c *LicensesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er // "type": "string" // } // }, - // "path": "{project}/global/licenses/{resource}/setIamPolicy", + // "path": "{project}/global/interconnectLocations/{resource}/testIamPermissions", // "request": { - // "$ref": "GlobalSetPolicyRequest" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Policy" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.licenses.testIamPermissions": +// method id "compute.interconnects.delete": -type LicensesTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InterconnectsDeleteCall struct { + s *Service + project string + interconnect string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *LicensesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *LicensesTestIamPermissionsCall { - c := &LicensesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified interconnect. +func (r *InterconnectsService) Delete(project string, interconnect string) *InterconnectsDeleteCall { + c := &InterconnectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.interconnect = interconnect + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InterconnectsDeleteCall) RequestId(requestId string) *InterconnectsDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LicensesTestIamPermissionsCall) Fields(s ...googleapi.Field) *LicensesTestIamPermissionsCall { +func (c *InterconnectsDeleteCall) Fields(s ...googleapi.Field) *InterconnectsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -85717,56 +89201,51 @@ func (c *LicensesTestIamPermissionsCall) Fields(s ...googleapi.Field) *LicensesT // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LicensesTestIamPermissionsCall) Context(ctx context.Context) *LicensesTestIamPermissionsCall { +func (c *InterconnectsDeleteCall) Context(ctx context.Context) *InterconnectsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LicensesTestIamPermissionsCall) Header() http.Header { +func (c *InterconnectsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LicensesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "interconnect": c.interconnect, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.licenses.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *LicensesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.interconnects.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InterconnectsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -85785,7 +89264,7 @@ func (c *LicensesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Test if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -85797,14 +89276,21 @@ func (c *LicensesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Test } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.licenses.testIamPermissions", + // "description": "Deletes the specified interconnect.", + // "httpMethod": "DELETE", + // "id": "compute.interconnects.delete", // "parameterOrder": [ // "project", - // "resource" + // "interconnect" // ], // "parameters": { + // "interconnect": { + // "description": "Name of the interconnect to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -85812,125 +89298,114 @@ func (c *LicensesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Test // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/global/licenses/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/global/interconnects/{interconnect}", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.machineImages.delete": +// method id "compute.interconnects.get": -type MachineImagesDeleteCall struct { +type InterconnectsGetCall struct { s *Service project string - machineImage string + interconnect string urlParams_ gensupport.URLParams + ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Delete: Deletes the specified machine image. Deleting a machine image -// is permanent and cannot be undone. -func (r *MachineImagesService) Delete(project string, machineImage string) *MachineImagesDeleteCall { - c := &MachineImagesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified interconnect. Get a list of available +// interconnects by making a list() request. +func (r *InterconnectsService) Get(project string, interconnect string) *InterconnectsGetCall { + c := &InterconnectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.machineImage = machineImage - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *MachineImagesDeleteCall) RequestId(requestId string) *MachineImagesDeleteCall { - c.urlParams_.Set("requestId", requestId) + c.interconnect = interconnect return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *MachineImagesDeleteCall) Fields(s ...googleapi.Field) *MachineImagesDeleteCall { +func (c *InterconnectsGetCall) Fields(s ...googleapi.Field) *InterconnectsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InterconnectsGetCall) IfNoneMatch(entityTag string) *InterconnectsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *MachineImagesDeleteCall) Context(ctx context.Context) *MachineImagesDeleteCall { +func (c *InterconnectsGetCall) Context(ctx context.Context) *InterconnectsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *MachineImagesDeleteCall) Header() http.Header { +func (c *InterconnectsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *MachineImagesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/machineImages/{machineImage}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "machineImage": c.machineImage, + "interconnect": c.interconnect, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.machineImages.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.interconnects.get" call. +// Exactly one of *Interconnect or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// *Interconnect.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *MachineImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InterconnectsGetCall) Do(opts ...googleapi.CallOption) (*Interconnect, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -85949,7 +89424,7 @@ func (c *MachineImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Interconnect{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -85961,18 +89436,18 @@ func (c *MachineImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Deletes the specified machine image. Deleting a machine image is permanent and cannot be undone.", - // "httpMethod": "DELETE", - // "id": "compute.machineImages.delete", + // "description": "Returns the specified interconnect. Get a list of available interconnects by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.interconnects.get", // "parameterOrder": [ // "project", - // "machineImage" + // "interconnect" // ], // "parameters": { - // "machineImage": { - // "description": "The name of the machine image to delete.", + // "interconnect": { + // "description": "Name of the interconnect to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -85982,50 +89457,46 @@ func (c *MachineImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/global/machineImages/{machineImage}", + // "path": "{project}/global/interconnects/{interconnect}", // "response": { - // "$ref": "Operation" + // "$ref": "Interconnect" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.machineImages.get": +// method id "compute.interconnects.getDiagnostics": -type MachineImagesGetCall struct { +type InterconnectsGetDiagnosticsCall struct { s *Service project string - machineImage string + interconnect string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Returns the specified machine image. Gets a list of available -// machine images by making a list() request. -func (r *MachineImagesService) Get(project string, machineImage string) *MachineImagesGetCall { - c := &MachineImagesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetDiagnostics: Returns the interconnectDiagnostics for the specified +// interconnect. +func (r *InterconnectsService) GetDiagnostics(project string, interconnect string) *InterconnectsGetDiagnosticsCall { + c := &InterconnectsGetDiagnosticsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.machineImage = machineImage + c.interconnect = interconnect return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *MachineImagesGetCall) Fields(s ...googleapi.Field) *MachineImagesGetCall { +func (c *InterconnectsGetDiagnosticsCall) Fields(s ...googleapi.Field) *InterconnectsGetDiagnosticsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -86035,7 +89506,7 @@ func (c *MachineImagesGetCall) Fields(s ...googleapi.Field) *MachineImagesGetCal // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *MachineImagesGetCall) IfNoneMatch(entityTag string) *MachineImagesGetCall { +func (c *InterconnectsGetDiagnosticsCall) IfNoneMatch(entityTag string) *InterconnectsGetDiagnosticsCall { c.ifNoneMatch_ = entityTag return c } @@ -86043,21 +89514,21 @@ func (c *MachineImagesGetCall) IfNoneMatch(entityTag string) *MachineImagesGetCa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *MachineImagesGetCall) Context(ctx context.Context) *MachineImagesGetCall { +func (c *InterconnectsGetDiagnosticsCall) Context(ctx context.Context) *InterconnectsGetDiagnosticsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *MachineImagesGetCall) Header() http.Header { +func (c *InterconnectsGetDiagnosticsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *MachineImagesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectsGetDiagnosticsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -86069,7 +89540,7 @@ func (c *MachineImagesGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/machineImages/{machineImage}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}/getDiagnostics") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -86078,19 +89549,20 @@ func (c *MachineImagesGetCall) doRequest(alt string) (*http.Response, error) { req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "machineImage": c.machineImage, + "interconnect": c.interconnect, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.machineImages.get" call. -// Exactly one of *MachineImage or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *MachineImage.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *MachineImagesGetCall) Do(opts ...googleapi.CallOption) (*MachineImage, error) { +// Do executes the "compute.interconnects.getDiagnostics" call. +// Exactly one of *InterconnectsGetDiagnosticsResponse or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *InterconnectsGetDiagnosticsResponse.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *InterconnectsGetDiagnosticsCall) Do(opts ...googleapi.CallOption) (*InterconnectsGetDiagnosticsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -86109,7 +89581,7 @@ func (c *MachineImagesGetCall) Do(opts ...googleapi.CallOption) (*MachineImage, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &MachineImage{ + ret := &InterconnectsGetDiagnosticsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -86121,18 +89593,18 @@ func (c *MachineImagesGetCall) Do(opts ...googleapi.CallOption) (*MachineImage, } return ret, nil // { - // "description": "Returns the specified machine image. Gets a list of available machine images by making a list() request.", + // "description": "Returns the interconnectDiagnostics for the specified interconnect.", // "httpMethod": "GET", - // "id": "compute.machineImages.get", + // "id": "compute.interconnects.getDiagnostics", // "parameterOrder": [ // "project", - // "machineImage" + // "interconnect" // ], // "parameters": { - // "machineImage": { - // "description": "The name of the machine image.", + // "interconnect": { + // "description": "Name of the interconnect resource to query.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -86144,9 +89616,9 @@ func (c *MachineImagesGetCall) Do(opts ...googleapi.CallOption) (*MachineImage, // "type": "string" // } // }, - // "path": "{project}/global/machineImages/{machineImage}", + // "path": "{project}/global/interconnects/{interconnect}/getDiagnostics", // "response": { - // "$ref": "MachineImage" + // "$ref": "InterconnectsGetDiagnosticsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -86157,9 +89629,9 @@ func (c *MachineImagesGetCall) Do(opts ...googleapi.CallOption) (*MachineImage, } -// method id "compute.machineImages.getIamPolicy": +// method id "compute.interconnects.getIamPolicy": -type MachineImagesGetIamPolicyCall struct { +type InterconnectsGetIamPolicyCall struct { s *Service project string resource string @@ -86171,8 +89643,8 @@ type MachineImagesGetIamPolicyCall struct { // GetIamPolicy: Gets the access control policy for a resource. May be // empty if no such policy or resource exists. -func (r *MachineImagesService) GetIamPolicy(project string, resource string) *MachineImagesGetIamPolicyCall { - c := &MachineImagesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *InterconnectsService) GetIamPolicy(project string, resource string) *InterconnectsGetIamPolicyCall { + c := &InterconnectsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.resource = resource return c @@ -86181,7 +89653,7 @@ func (r *MachineImagesService) GetIamPolicy(project string, resource string) *Ma // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *MachineImagesGetIamPolicyCall) Fields(s ...googleapi.Field) *MachineImagesGetIamPolicyCall { +func (c *InterconnectsGetIamPolicyCall) Fields(s ...googleapi.Field) *InterconnectsGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -86191,7 +89663,7 @@ func (c *MachineImagesGetIamPolicyCall) Fields(s ...googleapi.Field) *MachineIma // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *MachineImagesGetIamPolicyCall) IfNoneMatch(entityTag string) *MachineImagesGetIamPolicyCall { +func (c *InterconnectsGetIamPolicyCall) IfNoneMatch(entityTag string) *InterconnectsGetIamPolicyCall { c.ifNoneMatch_ = entityTag return c } @@ -86199,21 +89671,21 @@ func (c *MachineImagesGetIamPolicyCall) IfNoneMatch(entityTag string) *MachineIm // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *MachineImagesGetIamPolicyCall) Context(ctx context.Context) *MachineImagesGetIamPolicyCall { +func (c *InterconnectsGetIamPolicyCall) Context(ctx context.Context) *InterconnectsGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *MachineImagesGetIamPolicyCall) Header() http.Header { +func (c *InterconnectsGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *MachineImagesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -86225,7 +89697,7 @@ func (c *MachineImagesGetIamPolicyCall) doRequest(alt string) (*http.Response, e var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/machineImages/{resource}/getIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -86239,14 +89711,14 @@ func (c *MachineImagesGetIamPolicyCall) doRequest(alt string) (*http.Response, e return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.machineImages.getIamPolicy" call. +// Do executes the "compute.interconnects.getIamPolicy" call. // Exactly one of *Policy or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *Policy.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *MachineImagesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +func (c *InterconnectsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -86279,7 +89751,7 @@ func (c *MachineImagesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic // { // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", // "httpMethod": "GET", - // "id": "compute.machineImages.getIamPolicy", + // "id": "compute.interconnects.getIamPolicy", // "parameterOrder": [ // "project", // "resource" @@ -86300,7 +89772,7 @@ func (c *MachineImagesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic // "type": "string" // } // }, - // "path": "{project}/global/machineImages/{resource}/getIamPolicy", + // "path": "{project}/global/interconnects/{resource}/getIamPolicy", // "response": { // "$ref": "Policy" // }, @@ -86313,26 +89785,23 @@ func (c *MachineImagesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic } -// method id "compute.machineImages.insert": +// method id "compute.interconnects.insert": -type MachineImagesInsertCall struct { +type InterconnectsInsertCall struct { s *Service project string - machineimage *MachineImage + interconnect *Interconnect urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Insert: Creates a machine image in the specified project using the -// data that is included in the request. If you are creating a new -// machine image to update an existing instance, your new machine image -// should use the same network or, if applicable, the same subnetwork as -// the original instance. -func (r *MachineImagesService) Insert(project string, machineimage *MachineImage) *MachineImagesInsertCall { - c := &MachineImagesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a Interconnect in the specified project using the +// data included in the request. +func (r *InterconnectsService) Insert(project string, interconnect *Interconnect) *InterconnectsInsertCall { + c := &InterconnectsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.machineimage = machineimage + c.interconnect = interconnect return c } @@ -86350,23 +89819,15 @@ func (r *MachineImagesService) Insert(project string, machineimage *MachineImage // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *MachineImagesInsertCall) RequestId(requestId string) *MachineImagesInsertCall { +func (c *InterconnectsInsertCall) RequestId(requestId string) *InterconnectsInsertCall { c.urlParams_.Set("requestId", requestId) return c } -// SourceInstance sets the optional parameter "sourceInstance": -// Required. Source instance that is used to create the machine image -// from. -func (c *MachineImagesInsertCall) SourceInstance(sourceInstance string) *MachineImagesInsertCall { - c.urlParams_.Set("sourceInstance", sourceInstance) - return c -} - // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *MachineImagesInsertCall) Fields(s ...googleapi.Field) *MachineImagesInsertCall { +func (c *InterconnectsInsertCall) Fields(s ...googleapi.Field) *InterconnectsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -86374,35 +89835,35 @@ func (c *MachineImagesInsertCall) Fields(s ...googleapi.Field) *MachineImagesIns // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *MachineImagesInsertCall) Context(ctx context.Context) *MachineImagesInsertCall { +func (c *InterconnectsInsertCall) Context(ctx context.Context) *InterconnectsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *MachineImagesInsertCall) Header() http.Header { +func (c *InterconnectsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *MachineImagesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.machineimage) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnect) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/machineImages") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -86415,14 +89876,14 @@ func (c *MachineImagesInsertCall) doRequest(alt string) (*http.Response, error) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.machineImages.insert" call. +// Do executes the "compute.interconnects.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *MachineImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InterconnectsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -86453,9 +89914,9 @@ func (c *MachineImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Creates a machine image in the specified project using the data that is included in the request. If you are creating a new machine image to update an existing instance, your new machine image should use the same network or, if applicable, the same subnetwork as the original instance.", + // "description": "Creates a Interconnect in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.machineImages.insert", + // "id": "compute.interconnects.insert", // "parameterOrder": [ // "project" // ], @@ -86471,16 +89932,11 @@ func (c *MachineImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "sourceInstance": { - // "description": "Required. Source instance that is used to create the machine image from.", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/global/machineImages", + // "path": "{project}/global/interconnects", // "request": { - // "$ref": "MachineImage" + // "$ref": "Interconnect" // }, // "response": { // "$ref": "Operation" @@ -86493,9 +89949,9 @@ func (c *MachineImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.machineImages.list": +// method id "compute.interconnects.list": -type MachineImagesListCall struct { +type InterconnectsListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -86504,10 +89960,10 @@ type MachineImagesListCall struct { header_ http.Header } -// List: Retrieves a list of machine images that are contained within -// the specified project. -func (r *MachineImagesService) List(project string) *MachineImagesListCall { - c := &MachineImagesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of interconnect available to the specified +// project. +func (r *InterconnectsService) List(project string) *InterconnectsListCall { + c := &InterconnectsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -86534,7 +89990,7 @@ func (r *MachineImagesService) List(project string) *MachineImagesListCall { // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *MachineImagesListCall) Filter(filter string) *MachineImagesListCall { +func (c *InterconnectsListCall) Filter(filter string) *InterconnectsListCall { c.urlParams_.Set("filter", filter) return c } @@ -86545,7 +90001,7 @@ func (c *MachineImagesListCall) Filter(filter string) *MachineImagesListCall { // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *MachineImagesListCall) MaxResults(maxResults int64) *MachineImagesListCall { +func (c *InterconnectsListCall) MaxResults(maxResults int64) *InterconnectsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -86562,7 +90018,7 @@ func (c *MachineImagesListCall) MaxResults(maxResults int64) *MachineImagesListC // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *MachineImagesListCall) OrderBy(orderBy string) *MachineImagesListCall { +func (c *InterconnectsListCall) OrderBy(orderBy string) *InterconnectsListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -86570,7 +90026,7 @@ func (c *MachineImagesListCall) OrderBy(orderBy string) *MachineImagesListCall { // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *MachineImagesListCall) PageToken(pageToken string) *MachineImagesListCall { +func (c *InterconnectsListCall) PageToken(pageToken string) *InterconnectsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -86578,7 +90034,7 @@ func (c *MachineImagesListCall) PageToken(pageToken string) *MachineImagesListCa // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *MachineImagesListCall) Fields(s ...googleapi.Field) *MachineImagesListCall { +func (c *InterconnectsListCall) Fields(s ...googleapi.Field) *InterconnectsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -86588,7 +90044,7 @@ func (c *MachineImagesListCall) Fields(s ...googleapi.Field) *MachineImagesListC // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *MachineImagesListCall) IfNoneMatch(entityTag string) *MachineImagesListCall { +func (c *InterconnectsListCall) IfNoneMatch(entityTag string) *InterconnectsListCall { c.ifNoneMatch_ = entityTag return c } @@ -86596,21 +90052,21 @@ func (c *MachineImagesListCall) IfNoneMatch(entityTag string) *MachineImagesList // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *MachineImagesListCall) Context(ctx context.Context) *MachineImagesListCall { +func (c *InterconnectsListCall) Context(ctx context.Context) *InterconnectsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *MachineImagesListCall) Header() http.Header { +func (c *InterconnectsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *MachineImagesListCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -86622,7 +90078,7 @@ func (c *MachineImagesListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/machineImages") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -86635,14 +90091,14 @@ func (c *MachineImagesListCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.machineImages.list" call. -// Exactly one of *MachineImageList or error will be non-nil. Any +// Do executes the "compute.interconnects.list" call. +// Exactly one of *InterconnectList or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *MachineImageList.ServerResponse.Header or (if a response was +// *InterconnectList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *MachineImagesListCall) Do(opts ...googleapi.CallOption) (*MachineImageList, error) { +func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -86661,7 +90117,7 @@ func (c *MachineImagesListCall) Do(opts ...googleapi.CallOption) (*MachineImageL if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &MachineImageList{ + ret := &InterconnectList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -86673,9 +90129,9 @@ func (c *MachineImagesListCall) Do(opts ...googleapi.CallOption) (*MachineImageL } return ret, nil // { - // "description": "Retrieves a list of machine images that are contained within the specified project.", + // "description": "Retrieves the list of interconnect available to the specified project.", // "httpMethod": "GET", - // "id": "compute.machineImages.list", + // "id": "compute.interconnects.list", // "parameterOrder": [ // "project" // ], @@ -86711,9 +90167,9 @@ func (c *MachineImagesListCall) Do(opts ...googleapi.CallOption) (*MachineImageL // "type": "string" // } // }, - // "path": "{project}/global/machineImages", + // "path": "{project}/global/interconnects", // "response": { - // "$ref": "MachineImageList" + // "$ref": "InterconnectList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -86727,7 +90183,7 @@ func (c *MachineImagesListCall) Do(opts ...googleapi.CallOption) (*MachineImageL // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *MachineImagesListCall) Pages(ctx context.Context, f func(*MachineImageList) error) error { +func (c *InterconnectsListCall) Pages(ctx context.Context, f func(*InterconnectList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -86745,32 +90201,52 @@ func (c *MachineImagesListCall) Pages(ctx context.Context, f func(*MachineImageL } } -// method id "compute.machineImages.setIamPolicy": +// method id "compute.interconnects.patch": -type MachineImagesSetIamPolicyCall struct { - s *Service - project string - resource string - globalsetpolicyrequest *GlobalSetPolicyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InterconnectsPatchCall struct { + s *Service + project string + interconnect string + interconnect2 *Interconnect + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. -func (r *MachineImagesService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *MachineImagesSetIamPolicyCall { - c := &MachineImagesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates the specified interconnect with the data included in +// the request. This method supports PATCH semantics and uses the JSON +// merge patch format and processing rules. +func (r *InterconnectsService) Patch(project string, interconnect string, interconnect2 *Interconnect) *InterconnectsPatchCall { + c := &InterconnectsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.globalsetpolicyrequest = globalsetpolicyrequest + c.interconnect = interconnect + c.interconnect2 = interconnect2 + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InterconnectsPatchCall) RequestId(requestId string) *InterconnectsPatchCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *MachineImagesSetIamPolicyCall) Fields(s ...googleapi.Field) *MachineImagesSetIamPolicyCall { +func (c *InterconnectsPatchCall) Fields(s ...googleapi.Field) *InterconnectsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -86778,56 +90254,56 @@ func (c *MachineImagesSetIamPolicyCall) Fields(s ...googleapi.Field) *MachineIma // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *MachineImagesSetIamPolicyCall) Context(ctx context.Context) *MachineImagesSetIamPolicyCall { +func (c *InterconnectsPatchCall) Context(ctx context.Context) *InterconnectsPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *MachineImagesSetIamPolicyCall) Header() http.Header { +func (c *InterconnectsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *MachineImagesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetpolicyrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnect2) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/machineImages/{resource}/setIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "interconnect": c.interconnect, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.machineImages.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *MachineImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.interconnects.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InterconnectsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -86846,7 +90322,7 @@ func (c *MachineImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -86858,14 +90334,21 @@ func (c *MachineImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", - // "httpMethod": "POST", - // "id": "compute.machineImages.setIamPolicy", + // "description": "Updates the specified interconnect with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.interconnects.patch", // "parameterOrder": [ // "project", - // "resource" + // "interconnect" // ], // "parameters": { + // "interconnect": { + // "description": "Name of the interconnect to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -86873,20 +90356,18 @@ func (c *MachineImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/global/machineImages/{resource}/setIamPolicy", + // "path": "{project}/global/interconnects/{interconnect}", // "request": { - // "$ref": "GlobalSetPolicyRequest" + // "$ref": "Interconnect" // }, // "response": { - // "$ref": "Policy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -86896,32 +90377,32 @@ func (c *MachineImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic } -// method id "compute.machineImages.testIamPermissions": +// method id "compute.interconnects.setIamPolicy": -type MachineImagesTestIamPermissionsCall struct { +type InterconnectsSetIamPolicyCall struct { s *Service project string resource string - testpermissionsrequest *TestPermissionsRequest + globalsetpolicyrequest *GlobalSetPolicyRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *MachineImagesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *MachineImagesTestIamPermissionsCall { - c := &MachineImagesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +func (r *InterconnectsService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *InterconnectsSetIamPolicyCall { + c := &InterconnectsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.globalsetpolicyrequest = globalsetpolicyrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *MachineImagesTestIamPermissionsCall) Fields(s ...googleapi.Field) *MachineImagesTestIamPermissionsCall { +func (c *InterconnectsSetIamPolicyCall) Fields(s ...googleapi.Field) *InterconnectsSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -86929,35 +90410,35 @@ func (c *MachineImagesTestIamPermissionsCall) Fields(s ...googleapi.Field) *Mach // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *MachineImagesTestIamPermissionsCall) Context(ctx context.Context) *MachineImagesTestIamPermissionsCall { +func (c *InterconnectsSetIamPolicyCall) Context(ctx context.Context) *InterconnectsSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *MachineImagesTestIamPermissionsCall) Header() http.Header { +func (c *InterconnectsSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *MachineImagesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetpolicyrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/machineImages/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -86971,14 +90452,14 @@ func (c *MachineImagesTestIamPermissionsCall) doRequest(alt string) (*http.Respo return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.machineImages.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *MachineImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.interconnects.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *InterconnectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -86997,7 +90478,7 @@ func (c *MachineImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) ( if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -87009,9 +90490,9 @@ func (c *MachineImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", // "httpMethod": "POST", - // "id": "compute.machineImages.testIamPermissions", + // "id": "compute.interconnects.setIamPolicy", // "parameterOrder": [ // "project", // "resource" @@ -87032,172 +90513,104 @@ func (c *MachineImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) ( // "type": "string" // } // }, - // "path": "{project}/global/machineImages/{resource}/testIamPermissions", + // "path": "{project}/global/interconnects/{resource}/setIamPolicy", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "GlobalSetPolicyRequest" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.machineTypes.aggregatedList": +// method id "compute.interconnects.setLabels": -type MachineTypesAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InterconnectsSetLabelsCall struct { + s *Service + project string + resource string + globalsetlabelsrequest *GlobalSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AggregatedList: Retrieves an aggregated list of machine types. -// For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/aggregatedList -func (r *MachineTypesService) AggregatedList(project string) *MachineTypesAggregatedListCall { - c := &MachineTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetLabels: Sets the labels on an Interconnect. To learn more about +// labels, read the Labeling Resources documentation. +func (r *InterconnectsService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *InterconnectsSetLabelsCall { + c := &InterconnectsSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *MachineTypesAggregatedListCall) Filter(filter string) *MachineTypesAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *MachineTypesAggregatedListCall) MaxResults(maxResults int64) *MachineTypesAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *MachineTypesAggregatedListCall) OrderBy(orderBy string) *MachineTypesAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *MachineTypesAggregatedListCall) PageToken(pageToken string) *MachineTypesAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) + c.resource = resource + c.globalsetlabelsrequest = globalsetlabelsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *MachineTypesAggregatedListCall) Fields(s ...googleapi.Field) *MachineTypesAggregatedListCall { +func (c *InterconnectsSetLabelsCall) Fields(s ...googleapi.Field) *InterconnectsSetLabelsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *MachineTypesAggregatedListCall) IfNoneMatch(entityTag string) *MachineTypesAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *MachineTypesAggregatedListCall) Context(ctx context.Context) *MachineTypesAggregatedListCall { +func (c *InterconnectsSetLabelsCall) Context(ctx context.Context) *InterconnectsSetLabelsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *MachineTypesAggregatedListCall) Header() http.Header { +func (c *InterconnectsSetLabelsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *MachineTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectsSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/machineTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.machineTypes.aggregatedList" call. -// Exactly one of *MachineTypeAggregatedList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *MachineTypeAggregatedList.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*MachineTypeAggregatedList, error) { +// Do executes the "compute.interconnects.setLabels" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InterconnectsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -87216,7 +90629,7 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &MachineTypeAggregatedList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -87228,172 +90641,127 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach } return ret, nil // { - // "description": "Retrieves an aggregated list of machine types.", - // "httpMethod": "GET", - // "id": "compute.machineTypes.aggregatedList", + // "description": "Sets the labels on an Interconnect. To learn more about labels, read the Labeling Resources documentation.", + // "httpMethod": "POST", + // "id": "compute.interconnects.setLabels", // "parameterOrder": [ - // "project" + // "project", + // "resource" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/aggregated/machineTypes", + // "path": "{project}/global/interconnects/{resource}/setLabels", + // "request": { + // "$ref": "GlobalSetLabelsRequest" + // }, // "response": { - // "$ref": "MachineTypeAggregatedList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *MachineTypesAggregatedListCall) Pages(ctx context.Context, f func(*MachineTypeAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.machineTypes.get": +// method id "compute.interconnects.testIamPermissions": -type MachineTypesGetCall struct { - s *Service - project string - zone string - machineType string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InterconnectsTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified machine type. Gets a list of available -// machine types by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/get -func (r *MachineTypesService) Get(project string, zone string, machineType string) *MachineTypesGetCall { - c := &MachineTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *InterconnectsService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *InterconnectsTestIamPermissionsCall { + c := &InterconnectsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.machineType = machineType + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *MachineTypesGetCall) Fields(s ...googleapi.Field) *MachineTypesGetCall { +func (c *InterconnectsTestIamPermissionsCall) Fields(s ...googleapi.Field) *InterconnectsTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *MachineTypesGetCall) IfNoneMatch(entityTag string) *MachineTypesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *MachineTypesGetCall) Context(ctx context.Context) *MachineTypesGetCall { +func (c *InterconnectsTestIamPermissionsCall) Context(ctx context.Context) *InterconnectsTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *MachineTypesGetCall) Header() http.Header { +func (c *InterconnectsTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *MachineTypesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/machineTypes/{machineType}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "machineType": c.machineType, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.machineTypes.get" call. -// Exactly one of *MachineType or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *MachineType.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, error) { +// Do executes the "compute.interconnects.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InterconnectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -87412,7 +90780,7 @@ func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, er if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &MachineType{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -87424,22 +90792,14 @@ func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, er } return ret, nil // { - // "description": "Returns the specified machine type. Gets a list of available machine types by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.machineTypes.get", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.interconnects.testIamPermissions", // "parameterOrder": [ // "project", - // "zone", - // "machineType" + // "resource" // ], // "parameters": { - // "machineType": { - // "description": "Name of the machine type to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -87447,17 +90807,20 @@ func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, er // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/machineTypes/{machineType}", + // "path": "{project}/global/interconnects/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "MachineType" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -87468,95 +90831,31 @@ func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, er } -// method id "compute.machineTypes.list": +// method id "compute.licenseCodes.get": -type MachineTypesListCall struct { +type LicenseCodesGetCall struct { s *Service project string - zone string + licenseCode string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves a list of machine types available to the specified -// project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/list -func (r *MachineTypesService) List(project string, zone string) *MachineTypesListCall { - c := &MachineTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Return a specified license code. License codes are mirrored +// across all projects that have permissions to read the License Code. +func (r *LicenseCodesService) Get(project string, licenseCode string) *LicenseCodesGetCall { + c := &LicenseCodesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *MachineTypesListCall) Filter(filter string) *MachineTypesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *MachineTypesListCall) MaxResults(maxResults int64) *MachineTypesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *MachineTypesListCall) OrderBy(orderBy string) *MachineTypesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *MachineTypesListCall) PageToken(pageToken string) *MachineTypesListCall { - c.urlParams_.Set("pageToken", pageToken) + c.licenseCode = licenseCode return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *MachineTypesListCall) Fields(s ...googleapi.Field) *MachineTypesListCall { +func (c *LicenseCodesGetCall) Fields(s ...googleapi.Field) *LicenseCodesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -87566,7 +90865,7 @@ func (c *MachineTypesListCall) Fields(s ...googleapi.Field) *MachineTypesListCal // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *MachineTypesListCall) IfNoneMatch(entityTag string) *MachineTypesListCall { +func (c *LicenseCodesGetCall) IfNoneMatch(entityTag string) *LicenseCodesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -87574,21 +90873,21 @@ func (c *MachineTypesListCall) IfNoneMatch(entityTag string) *MachineTypesListCa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *MachineTypesListCall) Context(ctx context.Context) *MachineTypesListCall { +func (c *LicenseCodesGetCall) Context(ctx context.Context) *LicenseCodesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *MachineTypesListCall) Header() http.Header { +func (c *LicenseCodesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *MachineTypesListCall) doRequest(alt string) (*http.Response, error) { +func (c *LicenseCodesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -87600,7 +90899,7 @@ func (c *MachineTypesListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/machineTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenseCodes/{licenseCode}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -87608,20 +90907,20 @@ func (c *MachineTypesListCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "licenseCode": c.licenseCode, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.machineTypes.list" call. -// Exactly one of *MachineTypeList or error will be non-nil. Any non-2xx +// Do executes the "compute.licenseCodes.get" call. +// Exactly one of *LicenseCode or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *MachineTypeList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeList, error) { +// *LicenseCode.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *LicenseCodesGetCall) Do(opts ...googleapi.CallOption) (*LicenseCode, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -87640,7 +90939,7 @@ func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeLis if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &MachineTypeList{ + ret := &LicenseCode{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -87652,35 +90951,19 @@ func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeLis } return ret, nil // { - // "description": "Retrieves a list of machine types available to the specified project.", + // "description": "Return a specified license code. License codes are mirrored across all projects that have permissions to read the License Code.", // "httpMethod": "GET", - // "id": "compute.machineTypes.list", + // "id": "compute.licenseCodes.get", // "parameterOrder": [ // "project", - // "zone" + // "licenseCode" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "licenseCode": { + // "description": "Number corresponding to the License code resource to return.", + // "location": "path", + // "pattern": "[0-9]{0,61}?", + // "required": true, // "type": "string" // }, // "project": { @@ -87689,18 +90972,11 @@ func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeLis // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/machineTypes", + // "path": "{project}/global/licenseCodes/{licenseCode}", // "response": { - // "$ref": "MachineTypeList" + // "$ref": "LicenseCode" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -87711,113 +90987,31 @@ func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeLis } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *MachineTypesListCall) Pages(ctx context.Context, f func(*MachineTypeList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.networkEndpointGroups.aggregatedList": +// method id "compute.licenseCodes.getIamPolicy": -type NetworkEndpointGroupsAggregatedListCall struct { +type LicenseCodesGetIamPolicyCall struct { s *Service project string + resource string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// AggregatedList: Retrieves the list of network endpoint groups and -// sorts them by zone. -func (r *NetworkEndpointGroupsService) AggregatedList(project string) *NetworkEndpointGroupsAggregatedListCall { - c := &NetworkEndpointGroupsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. +func (r *LicenseCodesService) GetIamPolicy(project string, resource string) *LicenseCodesGetIamPolicyCall { + c := &LicenseCodesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *NetworkEndpointGroupsAggregatedListCall) Filter(filter string) *NetworkEndpointGroupsAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *NetworkEndpointGroupsAggregatedListCall) MaxResults(maxResults int64) *NetworkEndpointGroupsAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *NetworkEndpointGroupsAggregatedListCall) OrderBy(orderBy string) *NetworkEndpointGroupsAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *NetworkEndpointGroupsAggregatedListCall) PageToken(pageToken string) *NetworkEndpointGroupsAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) + c.resource = resource return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworkEndpointGroupsAggregatedListCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsAggregatedListCall { +func (c *LicenseCodesGetIamPolicyCall) Fields(s ...googleapi.Field) *LicenseCodesGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -87827,7 +91021,7 @@ func (c *NetworkEndpointGroupsAggregatedListCall) Fields(s ...googleapi.Field) * // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *NetworkEndpointGroupsAggregatedListCall) IfNoneMatch(entityTag string) *NetworkEndpointGroupsAggregatedListCall { +func (c *LicenseCodesGetIamPolicyCall) IfNoneMatch(entityTag string) *LicenseCodesGetIamPolicyCall { c.ifNoneMatch_ = entityTag return c } @@ -87835,21 +91029,21 @@ func (c *NetworkEndpointGroupsAggregatedListCall) IfNoneMatch(entityTag string) // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworkEndpointGroupsAggregatedListCall) Context(ctx context.Context) *NetworkEndpointGroupsAggregatedListCall { +func (c *LicenseCodesGetIamPolicyCall) Context(ctx context.Context) *LicenseCodesGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworkEndpointGroupsAggregatedListCall) Header() http.Header { +func (c *LicenseCodesGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworkEndpointGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *LicenseCodesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -87861,7 +91055,7 @@ func (c *NetworkEndpointGroupsAggregatedListCall) doRequest(alt string) (*http.R var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/networkEndpointGroups") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenseCodes/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -87869,20 +91063,20 @@ func (c *NetworkEndpointGroupsAggregatedListCall) doRequest(alt string) (*http.R } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networkEndpointGroups.aggregatedList" call. -// Exactly one of *NetworkEndpointGroupAggregatedList or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *NetworkEndpointGroupAggregatedList.ServerResponse.Header or -// (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *NetworkEndpointGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroupAggregatedList, error) { +// Do executes the "compute.licenseCodes.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *LicenseCodesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -87901,7 +91095,7 @@ func (c *NetworkEndpointGroupsAggregatedListCall) Do(opts ...googleapi.CallOptio if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &NetworkEndpointGroupAggregatedList{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -87913,47 +91107,32 @@ func (c *NetworkEndpointGroupsAggregatedListCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Retrieves the list of network endpoint groups and sorts them by zone.", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", // "httpMethod": "GET", - // "id": "compute.networkEndpointGroups.aggregatedList", + // "id": "compute.licenseCodes.getIamPolicy", // "parameterOrder": [ - // "project" + // "project", + // "resource" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/aggregated/networkEndpointGroups", + // "path": "{project}/global/licenseCodes/{resource}/getIamPolicy", // "response": { - // "$ref": "NetworkEndpointGroupAggregatedList" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -87964,74 +91143,32 @@ func (c *NetworkEndpointGroupsAggregatedListCall) Do(opts ...googleapi.CallOptio } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *NetworkEndpointGroupsAggregatedListCall) Pages(ctx context.Context, f func(*NetworkEndpointGroupAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.networkEndpointGroups.attachNetworkEndpoints": +// method id "compute.licenseCodes.setIamPolicy": -type NetworkEndpointGroupsAttachNetworkEndpointsCall struct { - s *Service - project string - zone string - networkEndpointGroup string - networkendpointgroupsattachendpointsrequest *NetworkEndpointGroupsAttachEndpointsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type LicenseCodesSetIamPolicyCall struct { + s *Service + project string + resource string + globalsetpolicyrequest *GlobalSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AttachNetworkEndpoints: Attach a list of network endpoints to the -// specified network endpoint group. -func (r *NetworkEndpointGroupsService) AttachNetworkEndpoints(project string, zone string, networkEndpointGroup string, networkendpointgroupsattachendpointsrequest *NetworkEndpointGroupsAttachEndpointsRequest) *NetworkEndpointGroupsAttachNetworkEndpointsCall { - c := &NetworkEndpointGroupsAttachNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +func (r *LicenseCodesService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *LicenseCodesSetIamPolicyCall { + c := &LicenseCodesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.networkEndpointGroup = networkEndpointGroup - c.networkendpointgroupsattachendpointsrequest = networkendpointgroupsattachendpointsrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) RequestId(requestId string) *NetworkEndpointGroupsAttachNetworkEndpointsCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.globalsetpolicyrequest = globalsetpolicyrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsAttachNetworkEndpointsCall { +func (c *LicenseCodesSetIamPolicyCall) Fields(s ...googleapi.Field) *LicenseCodesSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -88039,35 +91176,35 @@ func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Fields(s ...googleapi. // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Context(ctx context.Context) *NetworkEndpointGroupsAttachNetworkEndpointsCall { +func (c *LicenseCodesSetIamPolicyCall) Context(ctx context.Context) *LicenseCodesSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Header() http.Header { +func (c *LicenseCodesSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { +func (c *LicenseCodesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroupsattachendpointsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetpolicyrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenseCodes/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -88075,21 +91212,20 @@ func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) doRequest(alt string) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "networkEndpointGroup": c.networkEndpointGroup, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networkEndpointGroups.attachNetworkEndpoints" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.licenseCodes.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *LicenseCodesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -88108,7 +91244,7 @@ func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Do(opts ...googleapi.C if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -88120,21 +91256,14 @@ func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Do(opts ...googleapi.C } return ret, nil // { - // "description": "Attach a list of network endpoints to the specified network endpoint group.", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", // "httpMethod": "POST", - // "id": "compute.networkEndpointGroups.attachNetworkEndpoints", + // "id": "compute.licenseCodes.setIamPolicy", // "parameterOrder": [ // "project", - // "zone", - // "networkEndpointGroup" + // "resource" // ], // "parameters": { - // "networkEndpointGroup": { - // "description": "The name of the network endpoint group where you are attaching network endpoints to. It should comply with RFC1035.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -88142,24 +91271,20 @@ func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Do(opts ...googleapi.C // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints", + // "path": "{project}/global/licenseCodes/{resource}/setIamPolicy", // "request": { - // "$ref": "NetworkEndpointGroupsAttachEndpointsRequest" + // "$ref": "GlobalSetPolicyRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -88169,53 +91294,32 @@ func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Do(opts ...googleapi.C } -// method id "compute.networkEndpointGroups.delete": +// method id "compute.licenseCodes.testIamPermissions": -type NetworkEndpointGroupsDeleteCall struct { - s *Service - project string - zone string - networkEndpointGroup string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type LicenseCodesTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified network endpoint group. The network -// endpoints in the NEG and the VM instances they belong to are not -// terminated when the NEG is deleted. Note that the NEG cannot be -// deleted if there are backend services referencing it. -func (r *NetworkEndpointGroupsService) Delete(project string, zone string, networkEndpointGroup string) *NetworkEndpointGroupsDeleteCall { - c := &NetworkEndpointGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *LicenseCodesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *LicenseCodesTestIamPermissionsCall { + c := &LicenseCodesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.networkEndpointGroup = networkEndpointGroup - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *NetworkEndpointGroupsDeleteCall) RequestId(requestId string) *NetworkEndpointGroupsDeleteCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworkEndpointGroupsDeleteCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsDeleteCall { +func (c *LicenseCodesTestIamPermissionsCall) Fields(s ...googleapi.Field) *LicenseCodesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -88223,52 +91327,56 @@ func (c *NetworkEndpointGroupsDeleteCall) Fields(s ...googleapi.Field) *NetworkE // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworkEndpointGroupsDeleteCall) Context(ctx context.Context) *NetworkEndpointGroupsDeleteCall { +func (c *LicenseCodesTestIamPermissionsCall) Context(ctx context.Context) *LicenseCodesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworkEndpointGroupsDeleteCall) Header() http.Header { +func (c *LicenseCodesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworkEndpointGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *LicenseCodesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenseCodes/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "networkEndpointGroup": c.networkEndpointGroup, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networkEndpointGroups.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *NetworkEndpointGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.licenseCodes.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *LicenseCodesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -88287,7 +91395,7 @@ func (c *NetworkEndpointGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Ope if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -88299,21 +91407,14 @@ func (c *NetworkEndpointGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Deletes the specified network endpoint group. The network endpoints in the NEG and the VM instances they belong to are not terminated when the NEG is deleted. Note that the NEG cannot be deleted if there are backend services referencing it.", - // "httpMethod": "DELETE", - // "id": "compute.networkEndpointGroups.delete", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.licenseCodes.testIamPermissions", // "parameterOrder": [ // "project", - // "zone", - // "networkEndpointGroup" + // "resource" // ], // "parameters": { - // "networkEndpointGroup": { - // "description": "The name of the network endpoint group to delete. It should comply with RFC1035.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -88321,51 +91422,46 @@ func (c *NetworkEndpointGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Ope // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", + // "path": "{project}/global/licenseCodes/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.networkEndpointGroups.detachNetworkEndpoints": +// method id "compute.licenses.delete": -type NetworkEndpointGroupsDetachNetworkEndpointsCall struct { - s *Service - project string - zone string - networkEndpointGroup string - networkendpointgroupsdetachendpointsrequest *NetworkEndpointGroupsDetachEndpointsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type LicensesDeleteCall struct { + s *Service + project string + license string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// DetachNetworkEndpoints: Detach a list of network endpoints from the -// specified network endpoint group. -func (r *NetworkEndpointGroupsService) DetachNetworkEndpoints(project string, zone string, networkEndpointGroup string, networkendpointgroupsdetachendpointsrequest *NetworkEndpointGroupsDetachEndpointsRequest) *NetworkEndpointGroupsDetachNetworkEndpointsCall { - c := &NetworkEndpointGroupsDetachNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified license. +func (r *LicensesService) Delete(project string, license string) *LicensesDeleteCall { + c := &LicensesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.networkEndpointGroup = networkEndpointGroup - c.networkendpointgroupsdetachendpointsrequest = networkendpointgroupsdetachendpointsrequest + c.license = license return c } @@ -88383,7 +91479,7 @@ func (r *NetworkEndpointGroupsService) DetachNetworkEndpoints(project string, zo // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) RequestId(requestId string) *NetworkEndpointGroupsDetachNetworkEndpointsCall { +func (c *LicensesDeleteCall) RequestId(requestId string) *LicensesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -88391,7 +91487,7 @@ func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) RequestId(requestId st // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsDetachNetworkEndpointsCall { +func (c *LicensesDeleteCall) Fields(s ...googleapi.Field) *LicensesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -88399,57 +91495,51 @@ func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Fields(s ...googleapi. // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Context(ctx context.Context) *NetworkEndpointGroupsDetachNetworkEndpointsCall { +func (c *LicensesDeleteCall) Context(ctx context.Context) *LicensesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Header() http.Header { +func (c *LicensesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { +func (c *LicensesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroupsdetachendpointsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{license}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "networkEndpointGroup": c.networkEndpointGroup, + "project": c.project, + "license": c.license, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networkEndpointGroups.detachNetworkEndpoints" call. +// Do executes the "compute.licenses.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *LicensesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -88480,18 +91570,18 @@ func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Do(opts ...googleapi.C } return ret, nil // { - // "description": "Detach a list of network endpoints from the specified network endpoint group.", - // "httpMethod": "POST", - // "id": "compute.networkEndpointGroups.detachNetworkEndpoints", + // "description": "Deletes the specified license.", + // "httpMethod": "DELETE", + // "id": "compute.licenses.delete", // "parameterOrder": [ // "project", - // "zone", - // "networkEndpointGroup" + // "license" // ], // "parameters": { - // "networkEndpointGroup": { - // "description": "The name of the network endpoint group where you are removing network endpoints. It should comply with RFC1035.", + // "license": { + // "description": "Name of the license resource to delete.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -88506,18 +91596,9 @@ func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Do(opts ...googleapi.C // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", - // "location": "path", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints", - // "request": { - // "$ref": "NetworkEndpointGroupsDetachEndpointsRequest" - // }, + // "path": "{project}/global/licenses/{license}", // "response": { // "$ref": "Operation" // }, @@ -88529,33 +91610,31 @@ func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Do(opts ...googleapi.C } -// method id "compute.networkEndpointGroups.get": +// method id "compute.licenses.get": -type NetworkEndpointGroupsGetCall struct { - s *Service - project string - zone string - networkEndpointGroup string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type LicensesGetCall struct { + s *Service + project string + license string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified network endpoint group. Gets a list of -// available network endpoint groups by making a list() request. -func (r *NetworkEndpointGroupsService) Get(project string, zone string, networkEndpointGroup string) *NetworkEndpointGroupsGetCall { - c := &NetworkEndpointGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified License resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/licenses/get +func (r *LicensesService) Get(project string, license string) *LicensesGetCall { + c := &LicensesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.networkEndpointGroup = networkEndpointGroup + c.license = license return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworkEndpointGroupsGetCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsGetCall { +func (c *LicensesGetCall) Fields(s ...googleapi.Field) *LicensesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -88565,7 +91644,7 @@ func (c *NetworkEndpointGroupsGetCall) Fields(s ...googleapi.Field) *NetworkEndp // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *NetworkEndpointGroupsGetCall) IfNoneMatch(entityTag string) *NetworkEndpointGroupsGetCall { +func (c *LicensesGetCall) IfNoneMatch(entityTag string) *LicensesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -88573,21 +91652,21 @@ func (c *NetworkEndpointGroupsGetCall) IfNoneMatch(entityTag string) *NetworkEnd // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworkEndpointGroupsGetCall) Context(ctx context.Context) *NetworkEndpointGroupsGetCall { +func (c *LicensesGetCall) Context(ctx context.Context) *LicensesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworkEndpointGroupsGetCall) Header() http.Header { +func (c *LicensesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworkEndpointGroupsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *LicensesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -88599,7 +91678,7 @@ func (c *NetworkEndpointGroupsGetCall) doRequest(alt string) (*http.Response, er var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{license}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -88607,21 +91686,20 @@ func (c *NetworkEndpointGroupsGetCall) doRequest(alt string) (*http.Response, er } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "networkEndpointGroup": c.networkEndpointGroup, + "project": c.project, + "license": c.license, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networkEndpointGroups.get" call. -// Exactly one of *NetworkEndpointGroup or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *NetworkEndpointGroup.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *NetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroup, error) { +// Do executes the "compute.licenses.get" call. +// Exactly one of *License or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *License.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -88640,7 +91718,7 @@ func (c *NetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (*Networ if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &NetworkEndpointGroup{ + ret := &License{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -88652,18 +91730,18 @@ func (c *NetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (*Networ } return ret, nil // { - // "description": "Returns the specified network endpoint group. Gets a list of available network endpoint groups by making a list() request.", + // "description": "Returns the specified License resource.", // "httpMethod": "GET", - // "id": "compute.networkEndpointGroups.get", + // "id": "compute.licenses.get", // "parameterOrder": [ // "project", - // "zone", - // "networkEndpointGroup" + // "license" // ], // "parameters": { - // "networkEndpointGroup": { - // "description": "The name of the network endpoint group. It should comply with RFC1035.", + // "license": { + // "description": "Name of the License resource to return.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -88673,17 +91751,167 @@ func (c *NetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (*Networ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // } + // }, + // "path": "{project}/global/licenses/{license}", + // "response": { + // "$ref": "License" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.licenses.getIamPolicy": + +type LicensesGetIamPolicyCall struct { + s *Service + project string + resource string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. +func (r *LicensesService) GetIamPolicy(project string, resource string) *LicensesGetIamPolicyCall { + c := &LicensesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *LicensesGetIamPolicyCall) Fields(s ...googleapi.Field) *LicensesGetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *LicensesGetIamPolicyCall) IfNoneMatch(entityTag string) *LicensesGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *LicensesGetIamPolicyCall) Context(ctx context.Context) *LicensesGetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *LicensesGetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *LicensesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{resource}/getIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.licenses.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *LicensesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "httpMethod": "GET", + // "id": "compute.licenses.getIamPolicy", + // "parameterOrder": [ + // "project", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" // }, - // "zone": { - // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", + // "path": "{project}/global/licenses/{resource}/getIamPolicy", // "response": { - // "$ref": "NetworkEndpointGroup" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -88694,25 +91922,22 @@ func (c *NetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (*Networ } -// method id "compute.networkEndpointGroups.insert": +// method id "compute.licenses.insert": -type NetworkEndpointGroupsInsertCall struct { - s *Service - project string - zone string - networkendpointgroup *NetworkEndpointGroup - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type LicensesInsertCall struct { + s *Service + project string + license *License + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a network endpoint group in the specified project -// using the parameters that are included in the request. -func (r *NetworkEndpointGroupsService) Insert(project string, zone string, networkendpointgroup *NetworkEndpointGroup) *NetworkEndpointGroupsInsertCall { - c := &NetworkEndpointGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Create a License resource in the specified project. +func (r *LicensesService) Insert(project string, license *License) *LicensesInsertCall { + c := &LicensesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.networkendpointgroup = networkendpointgroup + c.license = license return c } @@ -88730,7 +91955,7 @@ func (r *NetworkEndpointGroupsService) Insert(project string, zone string, netwo // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *NetworkEndpointGroupsInsertCall) RequestId(requestId string) *NetworkEndpointGroupsInsertCall { +func (c *LicensesInsertCall) RequestId(requestId string) *LicensesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -88738,7 +91963,7 @@ func (c *NetworkEndpointGroupsInsertCall) RequestId(requestId string) *NetworkEn // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworkEndpointGroupsInsertCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsInsertCall { +func (c *LicensesInsertCall) Fields(s ...googleapi.Field) *LicensesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -88746,35 +91971,35 @@ func (c *NetworkEndpointGroupsInsertCall) Fields(s ...googleapi.Field) *NetworkE // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworkEndpointGroupsInsertCall) Context(ctx context.Context) *NetworkEndpointGroupsInsertCall { +func (c *LicensesInsertCall) Context(ctx context.Context) *LicensesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworkEndpointGroupsInsertCall) Header() http.Header { +func (c *LicensesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworkEndpointGroupsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *LicensesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroup) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.license) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -88783,19 +92008,18 @@ func (c *NetworkEndpointGroupsInsertCall) doRequest(alt string) (*http.Response, req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networkEndpointGroups.insert" call. +// Do executes the "compute.licenses.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NetworkEndpointGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *LicensesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -88826,12 +92050,11 @@ func (c *NetworkEndpointGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Creates a network endpoint group in the specified project using the parameters that are included in the request.", + // "description": "Create a License resource in the specified project.", // "httpMethod": "POST", - // "id": "compute.networkEndpointGroups.insert", + // "id": "compute.licenses.insert", // "parameterOrder": [ - // "project", - // "zone" + // "project" // ], // "parameters": { // "project": { @@ -88845,47 +92068,46 @@ func (c *NetworkEndpointGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Ope // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where you want to create the network endpoint group. It should comply with RFC1035.", - // "location": "path", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/networkEndpointGroups", + // "path": "{project}/global/licenses", // "request": { - // "$ref": "NetworkEndpointGroup" + // "$ref": "License" // }, // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } -// method id "compute.networkEndpointGroups.list": +// method id "compute.licenses.list": -type NetworkEndpointGroupsListCall struct { +type LicensesListCall struct { s *Service project string - zone string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves the list of network endpoint groups that are located -// in the specified project and zone. -func (r *NetworkEndpointGroupsService) List(project string, zone string) *NetworkEndpointGroupsListCall { - c := &NetworkEndpointGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of licenses available in the specified +// project. This method does not get any licenses that belong to other +// projects, including licenses attached to publicly-available images, +// like Debian 9. If you want to get a list of publicly-available +// licenses, use this method to make a request to the respective image +// project, such as debian-cloud or windows-cloud. +func (r *LicensesService) List(project string) *LicensesListCall { + c := &LicensesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone return c } @@ -88911,7 +92133,7 @@ func (r *NetworkEndpointGroupsService) List(project string, zone string) *Networ // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *NetworkEndpointGroupsListCall) Filter(filter string) *NetworkEndpointGroupsListCall { +func (c *LicensesListCall) Filter(filter string) *LicensesListCall { c.urlParams_.Set("filter", filter) return c } @@ -88922,7 +92144,7 @@ func (c *NetworkEndpointGroupsListCall) Filter(filter string) *NetworkEndpointGr // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *NetworkEndpointGroupsListCall) MaxResults(maxResults int64) *NetworkEndpointGroupsListCall { +func (c *LicensesListCall) MaxResults(maxResults int64) *LicensesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -88939,7 +92161,7 @@ func (c *NetworkEndpointGroupsListCall) MaxResults(maxResults int64) *NetworkEnd // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *NetworkEndpointGroupsListCall) OrderBy(orderBy string) *NetworkEndpointGroupsListCall { +func (c *LicensesListCall) OrderBy(orderBy string) *LicensesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -88947,7 +92169,7 @@ func (c *NetworkEndpointGroupsListCall) OrderBy(orderBy string) *NetworkEndpoint // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *NetworkEndpointGroupsListCall) PageToken(pageToken string) *NetworkEndpointGroupsListCall { +func (c *LicensesListCall) PageToken(pageToken string) *LicensesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -88955,7 +92177,7 @@ func (c *NetworkEndpointGroupsListCall) PageToken(pageToken string) *NetworkEndp // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworkEndpointGroupsListCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsListCall { +func (c *LicensesListCall) Fields(s ...googleapi.Field) *LicensesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -88965,7 +92187,7 @@ func (c *NetworkEndpointGroupsListCall) Fields(s ...googleapi.Field) *NetworkEnd // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *NetworkEndpointGroupsListCall) IfNoneMatch(entityTag string) *NetworkEndpointGroupsListCall { +func (c *LicensesListCall) IfNoneMatch(entityTag string) *LicensesListCall { c.ifNoneMatch_ = entityTag return c } @@ -88973,21 +92195,21 @@ func (c *NetworkEndpointGroupsListCall) IfNoneMatch(entityTag string) *NetworkEn // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworkEndpointGroupsListCall) Context(ctx context.Context) *NetworkEndpointGroupsListCall { +func (c *LicensesListCall) Context(ctx context.Context) *LicensesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworkEndpointGroupsListCall) Header() http.Header { +func (c *LicensesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworkEndpointGroupsListCall) doRequest(alt string) (*http.Response, error) { +func (c *LicensesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -88999,7 +92221,7 @@ func (c *NetworkEndpointGroupsListCall) doRequest(alt string) (*http.Response, e var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -89008,19 +92230,18 @@ func (c *NetworkEndpointGroupsListCall) doRequest(alt string) (*http.Response, e req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networkEndpointGroups.list" call. -// Exactly one of *NetworkEndpointGroupList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *NetworkEndpointGroupList.ServerResponse.Header or (if a response was +// Do executes the "compute.licenses.list" call. +// Exactly one of *LicensesListResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *LicensesListResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *NetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroupList, error) { +func (c *LicensesListCall) Do(opts ...googleapi.CallOption) (*LicensesListResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -89039,7 +92260,7 @@ func (c *NetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) (*Netwo if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &NetworkEndpointGroupList{ + ret := &LicensesListResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -89051,12 +92272,11 @@ func (c *NetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) (*Netwo } return ret, nil // { - // "description": "Retrieves the list of network endpoint groups that are located in the specified project and zone.", + // "description": "Retrieves the list of licenses available in the specified project. This method does not get any licenses that belong to other projects, including licenses attached to publicly-available images, like Debian 9. If you want to get a list of publicly-available licenses, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.", // "httpMethod": "GET", - // "id": "compute.networkEndpointGroups.list", + // "id": "compute.licenses.list", // "parameterOrder": [ - // "project", - // "zone" + // "project" // ], // "parameters": { // "filter": { @@ -89088,17 +92308,11 @@ func (c *NetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) (*Netwo // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", - // "location": "path", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/networkEndpointGroups", + // "path": "{project}/global/licenses", // "response": { - // "$ref": "NetworkEndpointGroupList" + // "$ref": "LicensesListResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -89112,7 +92326,7 @@ func (c *NetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) (*Netwo // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *NetworkEndpointGroupsListCall) Pages(ctx context.Context, f func(*NetworkEndpointGroupList) error) error { +func (c *LicensesListCall) Pages(ctx context.Context, f func(*LicensesListResponse) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -89130,97 +92344,32 @@ func (c *NetworkEndpointGroupsListCall) Pages(ctx context.Context, f func(*Netwo } } -// method id "compute.networkEndpointGroups.listNetworkEndpoints": +// method id "compute.licenses.setIamPolicy": -type NetworkEndpointGroupsListNetworkEndpointsCall struct { - s *Service - project string - zone string - networkEndpointGroup string - networkendpointgroupslistendpointsrequest *NetworkEndpointGroupsListEndpointsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type LicensesSetIamPolicyCall struct { + s *Service + project string + resource string + globalsetpolicyrequest *GlobalSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// ListNetworkEndpoints: Lists the network endpoints in the specified -// network endpoint group. -func (r *NetworkEndpointGroupsService) ListNetworkEndpoints(project string, zone string, networkEndpointGroup string, networkendpointgroupslistendpointsrequest *NetworkEndpointGroupsListEndpointsRequest) *NetworkEndpointGroupsListNetworkEndpointsCall { - c := &NetworkEndpointGroupsListNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +func (r *LicensesService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *LicensesSetIamPolicyCall { + c := &LicensesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.networkEndpointGroup = networkEndpointGroup - c.networkendpointgroupslistendpointsrequest = networkendpointgroupslistendpointsrequest - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Filter(filter string) *NetworkEndpointGroupsListNetworkEndpointsCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *NetworkEndpointGroupsListNetworkEndpointsCall) MaxResults(maxResults int64) *NetworkEndpointGroupsListNetworkEndpointsCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *NetworkEndpointGroupsListNetworkEndpointsCall) OrderBy(orderBy string) *NetworkEndpointGroupsListNetworkEndpointsCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *NetworkEndpointGroupsListNetworkEndpointsCall) PageToken(pageToken string) *NetworkEndpointGroupsListNetworkEndpointsCall { - c.urlParams_.Set("pageToken", pageToken) + c.resource = resource + c.globalsetpolicyrequest = globalsetpolicyrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsListNetworkEndpointsCall { +func (c *LicensesSetIamPolicyCall) Fields(s ...googleapi.Field) *LicensesSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -89228,35 +92377,35 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Fields(s ...googleapi.Fi // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Context(ctx context.Context) *NetworkEndpointGroupsListNetworkEndpointsCall { +func (c *LicensesSetIamPolicyCall) Context(ctx context.Context) *LicensesSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Header() http.Header { +func (c *LicensesSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworkEndpointGroupsListNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { +func (c *LicensesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroupslistendpointsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetpolicyrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -89264,23 +92413,20 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) doRequest(alt string) (* } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "networkEndpointGroup": c.networkEndpointGroup, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networkEndpointGroups.listNetworkEndpoints" call. -// Exactly one of *NetworkEndpointGroupsListNetworkEndpoints or error -// will be non-nil. Any non-2xx status code is an error. Response -// headers are in either -// *NetworkEndpointGroupsListNetworkEndpoints.ServerResponse.Header or -// (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroupsListNetworkEndpoints, error) { +// Do executes the "compute.licenses.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *LicensesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -89299,7 +92445,7 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googleapi.Cal if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &NetworkEndpointGroupsListNetworkEndpoints{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -89311,44 +92457,14 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googleapi.Cal } return ret, nil // { - // "description": "Lists the network endpoints in the specified network endpoint group.", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", // "httpMethod": "POST", - // "id": "compute.networkEndpointGroups.listNetworkEndpoints", + // "id": "compute.licenses.setIamPolicy", // "parameterOrder": [ // "project", - // "zone", - // "networkEndpointGroup" + // "resource" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "networkEndpointGroup": { - // "description": "The name of the network endpoint group from which you want to generate a list of included network endpoints. It should comply with RFC1035.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -89356,56 +92472,34 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googleapi.Cal // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints", + // "path": "{project}/global/licenses/{resource}/setIamPolicy", // "request": { - // "$ref": "NetworkEndpointGroupsListEndpointsRequest" + // "$ref": "GlobalSetPolicyRequest" // }, // "response": { - // "$ref": "NetworkEndpointGroupsListNetworkEndpoints" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Pages(ctx context.Context, f func(*NetworkEndpointGroupsListNetworkEndpoints) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.networkEndpointGroups.testIamPermissions": +// method id "compute.licenses.testIamPermissions": -type NetworkEndpointGroupsTestIamPermissionsCall struct { +type LicensesTestIamPermissionsCall struct { s *Service project string - zone string resource string testpermissionsrequest *TestPermissionsRequest urlParams_ gensupport.URLParams @@ -89415,10 +92509,9 @@ type NetworkEndpointGroupsTestIamPermissionsCall struct { // TestIamPermissions: Returns permissions that a caller has on the // specified resource. -func (r *NetworkEndpointGroupsService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *NetworkEndpointGroupsTestIamPermissionsCall { - c := &NetworkEndpointGroupsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *LicensesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *LicensesTestIamPermissionsCall { + c := &LicensesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone c.resource = resource c.testpermissionsrequest = testpermissionsrequest return c @@ -89427,7 +92520,7 @@ func (r *NetworkEndpointGroupsService) TestIamPermissions(project string, zone s // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworkEndpointGroupsTestIamPermissionsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsTestIamPermissionsCall { +func (c *LicensesTestIamPermissionsCall) Fields(s ...googleapi.Field) *LicensesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -89435,21 +92528,21 @@ func (c *NetworkEndpointGroupsTestIamPermissionsCall) Fields(s ...googleapi.Fiel // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworkEndpointGroupsTestIamPermissionsCall) Context(ctx context.Context) *NetworkEndpointGroupsTestIamPermissionsCall { +func (c *LicensesTestIamPermissionsCall) Context(ctx context.Context) *LicensesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworkEndpointGroupsTestIamPermissionsCall) Header() http.Header { +func (c *LicensesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworkEndpointGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *LicensesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -89463,7 +92556,7 @@ func (c *NetworkEndpointGroupsTestIamPermissionsCall) doRequest(alt string) (*ht reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -89472,20 +92565,19 @@ func (c *NetworkEndpointGroupsTestIamPermissionsCall) doRequest(alt string) (*ht req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networkEndpointGroups.testIamPermissions" call. +// Do executes the "compute.licenses.testIamPermissions" call. // Exactly one of *TestPermissionsResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *TestPermissionsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *NetworkEndpointGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *LicensesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -89518,10 +92610,9 @@ func (c *NetworkEndpointGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallO // { // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.networkEndpointGroups.testIamPermissions", + // "id": "compute.licenses.testIamPermissions", // "parameterOrder": [ // "project", - // "zone", // "resource" // ], // "parameters": { @@ -89535,19 +92626,12 @@ func (c *NetworkEndpointGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallO // "resource": { // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/networkEndpointGroups/{resource}/testIamPermissions", + // "path": "{project}/global/licenses/{resource}/testIamPermissions", // "request": { // "$ref": "TestPermissionsRequest" // }, @@ -89563,24 +92647,23 @@ func (c *NetworkEndpointGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallO } -// method id "compute.networks.addPeering": +// method id "compute.machineImages.delete": -type NetworksAddPeeringCall struct { - s *Service - project string - network string - networksaddpeeringrequest *NetworksAddPeeringRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type MachineImagesDeleteCall struct { + s *Service + project string + machineImage string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AddPeering: Adds a peering to the specified network. -func (r *NetworksService) AddPeering(project string, network string, networksaddpeeringrequest *NetworksAddPeeringRequest) *NetworksAddPeeringCall { - c := &NetworksAddPeeringCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified machine image. Deleting a machine image +// is permanent and cannot be undone. +func (r *MachineImagesService) Delete(project string, machineImage string) *MachineImagesDeleteCall { + c := &MachineImagesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.network = network - c.networksaddpeeringrequest = networksaddpeeringrequest + c.machineImage = machineImage return c } @@ -89598,7 +92681,7 @@ func (r *NetworksService) AddPeering(project string, network string, networksadd // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *NetworksAddPeeringCall) RequestId(requestId string) *NetworksAddPeeringCall { +func (c *MachineImagesDeleteCall) RequestId(requestId string) *MachineImagesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -89606,7 +92689,7 @@ func (c *NetworksAddPeeringCall) RequestId(requestId string) *NetworksAddPeering // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksAddPeeringCall) Fields(s ...googleapi.Field) *NetworksAddPeeringCall { +func (c *MachineImagesDeleteCall) Fields(s ...googleapi.Field) *MachineImagesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -89614,56 +92697,51 @@ func (c *NetworksAddPeeringCall) Fields(s ...googleapi.Field) *NetworksAddPeerin // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksAddPeeringCall) Context(ctx context.Context) *NetworksAddPeeringCall { +func (c *MachineImagesDeleteCall) Context(ctx context.Context) *MachineImagesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksAddPeeringCall) Header() http.Header { +func (c *MachineImagesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksAddPeeringCall) doRequest(alt string) (*http.Response, error) { +func (c *MachineImagesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.networksaddpeeringrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/addPeering") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/machineImages/{machineImage}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "network": c.network, + "project": c.project, + "machineImage": c.machineImage, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.addPeering" call. +// Do executes the "compute.machineImages.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NetworksAddPeeringCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *MachineImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -89694,16 +92772,16 @@ func (c *NetworksAddPeeringCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Adds a peering to the specified network.", - // "httpMethod": "POST", - // "id": "compute.networks.addPeering", + // "description": "Deletes the specified machine image. Deleting a machine image is permanent and cannot be undone.", + // "httpMethod": "DELETE", + // "id": "compute.machineImages.delete", // "parameterOrder": [ // "project", - // "network" + // "machineImage" // ], // "parameters": { - // "network": { - // "description": "Name of the network resource to add peering to.", + // "machineImage": { + // "description": "The name of the machine image to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -89722,10 +92800,7 @@ func (c *NetworksAddPeeringCall) Do(opts ...googleapi.CallOption) (*Operation, e // "type": "string" // } // }, - // "path": "{project}/global/networks/{network}/addPeering", - // "request": { - // "$ref": "NetworksAddPeeringRequest" - // }, + // "path": "{project}/global/machineImages/{machineImage}", // "response": { // "$ref": "Operation" // }, @@ -89737,101 +92812,96 @@ func (c *NetworksAddPeeringCall) Do(opts ...googleapi.CallOption) (*Operation, e } -// method id "compute.networks.delete": +// method id "compute.machineImages.get": -type NetworksDeleteCall struct { - s *Service - project string - network string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type MachineImagesGetCall struct { + s *Service + project string + machineImage string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified network. -// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/delete -func (r *NetworksService) Delete(project string, network string) *NetworksDeleteCall { - c := &NetworksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified machine image. Gets a list of available +// machine images by making a list() request. +func (r *MachineImagesService) Get(project string, machineImage string) *MachineImagesGetCall { + c := &MachineImagesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.network = network - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *NetworksDeleteCall) RequestId(requestId string) *NetworksDeleteCall { - c.urlParams_.Set("requestId", requestId) + c.machineImage = machineImage return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksDeleteCall) Fields(s ...googleapi.Field) *NetworksDeleteCall { +func (c *MachineImagesGetCall) Fields(s ...googleapi.Field) *MachineImagesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *MachineImagesGetCall) IfNoneMatch(entityTag string) *MachineImagesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksDeleteCall) Context(ctx context.Context) *NetworksDeleteCall { +func (c *MachineImagesGetCall) Context(ctx context.Context) *MachineImagesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksDeleteCall) Header() http.Header { +func (c *MachineImagesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *MachineImagesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/machineImages/{machineImage}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "network": c.network, + "project": c.project, + "machineImage": c.machineImage, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.machineImages.get" call. +// Exactly one of *MachineImage or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// *MachineImage.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *MachineImagesGetCall) Do(opts ...googleapi.CallOption) (*MachineImage, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -89850,7 +92920,7 @@ func (c *NetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &MachineImage{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -89862,16 +92932,16 @@ func (c *NetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Deletes the specified network.", - // "httpMethod": "DELETE", - // "id": "compute.networks.delete", + // "description": "Returns the specified machine image. Gets a list of available machine images by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.machineImages.get", // "parameterOrder": [ // "project", - // "network" + // "machineImage" // ], // "parameters": { - // "network": { - // "description": "Name of the network to delete.", + // "machineImage": { + // "description": "The name of the machine image.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -89883,51 +92953,46 @@ func (c *NetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/global/networks/{network}", + // "path": "{project}/global/machineImages/{machineImage}", // "response": { - // "$ref": "Operation" + // "$ref": "MachineImage" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.networks.get": +// method id "compute.machineImages.getIamPolicy": -type NetworksGetCall struct { +type MachineImagesGetIamPolicyCall struct { s *Service project string - network string + resource string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Returns the specified network. Gets a list of available networks -// by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/get -func (r *NetworksService) Get(project string, network string) *NetworksGetCall { - c := &NetworksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. +func (r *MachineImagesService) GetIamPolicy(project string, resource string) *MachineImagesGetIamPolicyCall { + c := &MachineImagesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.network = network + c.resource = resource return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksGetCall) Fields(s ...googleapi.Field) *NetworksGetCall { +func (c *MachineImagesGetIamPolicyCall) Fields(s ...googleapi.Field) *MachineImagesGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -89937,7 +93002,7 @@ func (c *NetworksGetCall) Fields(s ...googleapi.Field) *NetworksGetCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *NetworksGetCall) IfNoneMatch(entityTag string) *NetworksGetCall { +func (c *MachineImagesGetIamPolicyCall) IfNoneMatch(entityTag string) *MachineImagesGetIamPolicyCall { c.ifNoneMatch_ = entityTag return c } @@ -89945,21 +93010,21 @@ func (c *NetworksGetCall) IfNoneMatch(entityTag string) *NetworksGetCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksGetCall) Context(ctx context.Context) *NetworksGetCall { +func (c *MachineImagesGetIamPolicyCall) Context(ctx context.Context) *MachineImagesGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksGetCall) Header() http.Header { +func (c *MachineImagesGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksGetCall) doRequest(alt string) (*http.Response, error) { +func (c *MachineImagesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -89971,7 +93036,7 @@ func (c *NetworksGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/machineImages/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -89979,20 +93044,20 @@ func (c *NetworksGetCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "network": c.network, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.get" call. -// Exactly one of *Network or error will be non-nil. Any non-2xx status +// Do executes the "compute.machineImages.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either -// *Network.ServerResponse.Header or (if a response was returned at all) +// *Policy.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *NetworksGetCall) Do(opts ...googleapi.CallOption) (*Network, error) { +func (c *MachineImagesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -90011,7 +93076,7 @@ func (c *NetworksGetCall) Do(opts ...googleapi.CallOption) (*Network, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Network{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -90023,32 +93088,32 @@ func (c *NetworksGetCall) Do(opts ...googleapi.CallOption) (*Network, error) { } return ret, nil // { - // "description": "Returns the specified network. Gets a list of available networks by making a list() request.", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", // "httpMethod": "GET", - // "id": "compute.networks.get", + // "id": "compute.machineImages.getIamPolicy", // "parameterOrder": [ // "project", - // "network" + // "resource" // ], // "parameters": { - // "network": { - // "description": "Name of the network to return.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/networks/{network}", + // "path": "{project}/global/machineImages/{resource}/getIamPolicy", // "response": { - // "$ref": "Network" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -90059,24 +93124,26 @@ func (c *NetworksGetCall) Do(opts ...googleapi.CallOption) (*Network, error) { } -// method id "compute.networks.insert": +// method id "compute.machineImages.insert": -type NetworksInsertCall struct { - s *Service - project string - network *Network - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type MachineImagesInsertCall struct { + s *Service + project string + machineimage *MachineImage + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a network in the specified project using the data -// included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/insert -func (r *NetworksService) Insert(project string, network *Network) *NetworksInsertCall { - c := &NetworksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a machine image in the specified project using the +// data that is included in the request. If you are creating a new +// machine image to update an existing instance, your new machine image +// should use the same network or, if applicable, the same subnetwork as +// the original instance. +func (r *MachineImagesService) Insert(project string, machineimage *MachineImage) *MachineImagesInsertCall { + c := &MachineImagesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.network = network + c.machineimage = machineimage return c } @@ -90094,15 +93161,23 @@ func (r *NetworksService) Insert(project string, network *Network) *NetworksInse // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *NetworksInsertCall) RequestId(requestId string) *NetworksInsertCall { +func (c *MachineImagesInsertCall) RequestId(requestId string) *MachineImagesInsertCall { c.urlParams_.Set("requestId", requestId) return c } +// SourceInstance sets the optional parameter "sourceInstance": +// Required. Source instance that is used to create the machine image +// from. +func (c *MachineImagesInsertCall) SourceInstance(sourceInstance string) *MachineImagesInsertCall { + c.urlParams_.Set("sourceInstance", sourceInstance) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksInsertCall) Fields(s ...googleapi.Field) *NetworksInsertCall { +func (c *MachineImagesInsertCall) Fields(s ...googleapi.Field) *MachineImagesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -90110,35 +93185,35 @@ func (c *NetworksInsertCall) Fields(s ...googleapi.Field) *NetworksInsertCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksInsertCall) Context(ctx context.Context) *NetworksInsertCall { +func (c *MachineImagesInsertCall) Context(ctx context.Context) *MachineImagesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksInsertCall) Header() http.Header { +func (c *MachineImagesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *MachineImagesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.network) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.machineimage) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/machineImages") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -90151,14 +93226,14 @@ func (c *NetworksInsertCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.insert" call. +// Do executes the "compute.machineImages.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *MachineImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -90189,9 +93264,9 @@ func (c *NetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Creates a network in the specified project using the data included in the request.", + // "description": "Creates a machine image in the specified project using the data that is included in the request. If you are creating a new machine image to update an existing instance, your new machine image should use the same network or, if applicable, the same subnetwork as the original instance.", // "httpMethod": "POST", - // "id": "compute.networks.insert", + // "id": "compute.machineImages.insert", // "parameterOrder": [ // "project" // ], @@ -90207,11 +93282,16 @@ func (c *NetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "sourceInstance": { + // "description": "Required. Source instance that is used to create the machine image from.", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/global/networks", + // "path": "{project}/global/machineImages", // "request": { - // "$ref": "Network" + // "$ref": "MachineImage" // }, // "response": { // "$ref": "Operation" @@ -90224,9 +93304,9 @@ func (c *NetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error } -// method id "compute.networks.list": +// method id "compute.machineImages.list": -type NetworksListCall struct { +type MachineImagesListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -90235,11 +93315,10 @@ type NetworksListCall struct { header_ http.Header } -// List: Retrieves the list of networks available to the specified -// project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/list -func (r *NetworksService) List(project string) *NetworksListCall { - c := &NetworksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of machine images that are contained within +// the specified project. +func (r *MachineImagesService) List(project string) *MachineImagesListCall { + c := &MachineImagesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -90266,7 +93345,7 @@ func (r *NetworksService) List(project string) *NetworksListCall { // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *NetworksListCall) Filter(filter string) *NetworksListCall { +func (c *MachineImagesListCall) Filter(filter string) *MachineImagesListCall { c.urlParams_.Set("filter", filter) return c } @@ -90277,7 +93356,7 @@ func (c *NetworksListCall) Filter(filter string) *NetworksListCall { // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *NetworksListCall) MaxResults(maxResults int64) *NetworksListCall { +func (c *MachineImagesListCall) MaxResults(maxResults int64) *MachineImagesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -90294,7 +93373,7 @@ func (c *NetworksListCall) MaxResults(maxResults int64) *NetworksListCall { // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *NetworksListCall) OrderBy(orderBy string) *NetworksListCall { +func (c *MachineImagesListCall) OrderBy(orderBy string) *MachineImagesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -90302,7 +93381,7 @@ func (c *NetworksListCall) OrderBy(orderBy string) *NetworksListCall { // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *NetworksListCall) PageToken(pageToken string) *NetworksListCall { +func (c *MachineImagesListCall) PageToken(pageToken string) *MachineImagesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -90310,7 +93389,7 @@ func (c *NetworksListCall) PageToken(pageToken string) *NetworksListCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksListCall) Fields(s ...googleapi.Field) *NetworksListCall { +func (c *MachineImagesListCall) Fields(s ...googleapi.Field) *MachineImagesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -90320,7 +93399,7 @@ func (c *NetworksListCall) Fields(s ...googleapi.Field) *NetworksListCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *NetworksListCall) IfNoneMatch(entityTag string) *NetworksListCall { +func (c *MachineImagesListCall) IfNoneMatch(entityTag string) *MachineImagesListCall { c.ifNoneMatch_ = entityTag return c } @@ -90328,21 +93407,21 @@ func (c *NetworksListCall) IfNoneMatch(entityTag string) *NetworksListCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksListCall) Context(ctx context.Context) *NetworksListCall { +func (c *MachineImagesListCall) Context(ctx context.Context) *MachineImagesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksListCall) Header() http.Header { +func (c *MachineImagesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksListCall) doRequest(alt string) (*http.Response, error) { +func (c *MachineImagesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -90354,7 +93433,7 @@ func (c *NetworksListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/machineImages") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -90367,14 +93446,14 @@ func (c *NetworksListCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.list" call. -// Exactly one of *NetworkList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *NetworkList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error) { +// Do executes the "compute.machineImages.list" call. +// Exactly one of *MachineImageList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *MachineImageList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *MachineImagesListCall) Do(opts ...googleapi.CallOption) (*MachineImageList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -90393,7 +93472,7 @@ func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &NetworkList{ + ret := &MachineImageList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -90405,9 +93484,9 @@ func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error } return ret, nil // { - // "description": "Retrieves the list of networks available to the specified project.", + // "description": "Retrieves a list of machine images that are contained within the specified project.", // "httpMethod": "GET", - // "id": "compute.networks.list", + // "id": "compute.machineImages.list", // "parameterOrder": [ // "project" // ], @@ -90443,9 +93522,9 @@ func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error // "type": "string" // } // }, - // "path": "{project}/global/networks", + // "path": "{project}/global/machineImages", // "response": { - // "$ref": "NetworkList" + // "$ref": "MachineImageList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -90459,7 +93538,7 @@ func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *NetworksListCall) Pages(ctx context.Context, f func(*NetworkList) error) error { +func (c *MachineImagesListCall) Pages(ctx context.Context, f func(*MachineImageList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -90477,24 +93556,325 @@ func (c *NetworksListCall) Pages(ctx context.Context, f func(*NetworkList) error } } -// method id "compute.networks.listIpAddresses": +// method id "compute.machineImages.setIamPolicy": -type NetworksListIpAddressesCall struct { +type MachineImagesSetIamPolicyCall struct { + s *Service + project string + resource string + globalsetpolicyrequest *GlobalSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +func (r *MachineImagesService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *MachineImagesSetIamPolicyCall { + c := &MachineImagesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource + c.globalsetpolicyrequest = globalsetpolicyrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *MachineImagesSetIamPolicyCall) Fields(s ...googleapi.Field) *MachineImagesSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *MachineImagesSetIamPolicyCall) Context(ctx context.Context) *MachineImagesSetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *MachineImagesSetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *MachineImagesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetpolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/machineImages/{resource}/setIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.machineImages.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *MachineImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "httpMethod": "POST", + // "id": "compute.machineImages.setIamPolicy", + // "parameterOrder": [ + // "project", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/machineImages/{resource}/setIamPolicy", + // "request": { + // "$ref": "GlobalSetPolicyRequest" + // }, + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.machineImages.testIamPermissions": + +type MachineImagesTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *MachineImagesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *MachineImagesTestIamPermissionsCall { + c := &MachineImagesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *MachineImagesTestIamPermissionsCall) Fields(s ...googleapi.Field) *MachineImagesTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *MachineImagesTestIamPermissionsCall) Context(ctx context.Context) *MachineImagesTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *MachineImagesTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *MachineImagesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/machineImages/{resource}/testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.machineImages.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *MachineImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TestPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.machineImages.testIamPermissions", + // "parameterOrder": [ + // "project", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/machineImages/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, + // "response": { + // "$ref": "TestPermissionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.machineTypes.aggregatedList": + +type MachineTypesAggregatedListCall struct { s *Service project string - network string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// ListIpAddresses: Lists the internal IP addresses in the specified -// network. -func (r *NetworksService) ListIpAddresses(project string, network string) *NetworksListIpAddressesCall { - c := &NetworksListIpAddressesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of machine types. +// For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/aggregatedList +func (r *MachineTypesService) AggregatedList(project string) *MachineTypesAggregatedListCall { + c := &MachineTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.network = network return c } @@ -90520,7 +93900,7 @@ func (r *NetworksService) ListIpAddresses(project string, network string) *Netwo // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *NetworksListIpAddressesCall) Filter(filter string) *NetworksListIpAddressesCall { +func (c *MachineTypesAggregatedListCall) Filter(filter string) *MachineTypesAggregatedListCall { c.urlParams_.Set("filter", filter) return c } @@ -90531,7 +93911,7 @@ func (c *NetworksListIpAddressesCall) Filter(filter string) *NetworksListIpAddre // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *NetworksListIpAddressesCall) MaxResults(maxResults int64) *NetworksListIpAddressesCall { +func (c *MachineTypesAggregatedListCall) MaxResults(maxResults int64) *MachineTypesAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -90548,7 +93928,7 @@ func (c *NetworksListIpAddressesCall) MaxResults(maxResults int64) *NetworksList // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *NetworksListIpAddressesCall) OrderBy(orderBy string) *NetworksListIpAddressesCall { +func (c *MachineTypesAggregatedListCall) OrderBy(orderBy string) *MachineTypesAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -90556,23 +93936,15 @@ func (c *NetworksListIpAddressesCall) OrderBy(orderBy string) *NetworksListIpAdd // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *NetworksListIpAddressesCall) PageToken(pageToken string) *NetworksListIpAddressesCall { +func (c *MachineTypesAggregatedListCall) PageToken(pageToken string) *MachineTypesAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } -// Types sets the optional parameter "types": (Optional) types filter -// separate by comma, valid values are: SUBNETWORK, RESERVED, PEER_USED, -// PEER_RESERVED, REMOTE_USED, REMOTE_RESERVED. -func (c *NetworksListIpAddressesCall) Types(types string) *NetworksListIpAddressesCall { - c.urlParams_.Set("types", types) - return c -} - // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksListIpAddressesCall) Fields(s ...googleapi.Field) *NetworksListIpAddressesCall { +func (c *MachineTypesAggregatedListCall) Fields(s ...googleapi.Field) *MachineTypesAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -90582,7 +93954,7 @@ func (c *NetworksListIpAddressesCall) Fields(s ...googleapi.Field) *NetworksList // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *NetworksListIpAddressesCall) IfNoneMatch(entityTag string) *NetworksListIpAddressesCall { +func (c *MachineTypesAggregatedListCall) IfNoneMatch(entityTag string) *MachineTypesAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -90590,21 +93962,21 @@ func (c *NetworksListIpAddressesCall) IfNoneMatch(entityTag string) *NetworksLis // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksListIpAddressesCall) Context(ctx context.Context) *NetworksListIpAddressesCall { +func (c *MachineTypesAggregatedListCall) Context(ctx context.Context) *MachineTypesAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksListIpAddressesCall) Header() http.Header { +func (c *MachineTypesAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksListIpAddressesCall) doRequest(alt string) (*http.Response, error) { +func (c *MachineTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -90616,7 +93988,7 @@ func (c *NetworksListIpAddressesCall) doRequest(alt string) (*http.Response, err var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/listIpAddresses") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/machineTypes") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -90625,19 +93997,18 @@ func (c *NetworksListIpAddressesCall) doRequest(alt string) (*http.Response, err req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.listIpAddresses" call. -// Exactly one of *IpAddressesList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *IpAddressesList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.machineTypes.aggregatedList" call. +// Exactly one of *MachineTypeAggregatedList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *MachineTypeAggregatedList.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *NetworksListIpAddressesCall) Do(opts ...googleapi.CallOption) (*IpAddressesList, error) { +func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*MachineTypeAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -90656,7 +94027,7 @@ func (c *NetworksListIpAddressesCall) Do(opts ...googleapi.CallOption) (*IpAddre if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &IpAddressesList{ + ret := &MachineTypeAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -90668,12 +94039,11 @@ func (c *NetworksListIpAddressesCall) Do(opts ...googleapi.CallOption) (*IpAddre } return ret, nil // { - // "description": "Lists the internal IP addresses in the specified network.", + // "description": "Retrieves an aggregated list of machine types.", // "httpMethod": "GET", - // "id": "compute.networks.listIpAddresses", + // "id": "compute.machineTypes.aggregatedList", // "parameterOrder": [ - // "project", - // "network" + // "project" // ], // "parameters": { // "filter": { @@ -90689,13 +94059,6 @@ func (c *NetworksListIpAddressesCall) Do(opts ...googleapi.CallOption) (*IpAddre // "minimum": "0", // "type": "integer" // }, - // "network": { - // "description": "Name of the network for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "orderBy": { // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", // "location": "query", @@ -90712,16 +94075,11 @@ func (c *NetworksListIpAddressesCall) Do(opts ...googleapi.CallOption) (*IpAddre // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "types": { - // "description": "(Optional) types filter separate by comma, valid values are: SUBNETWORK, RESERVED, PEER_USED, PEER_RESERVED, REMOTE_USED, REMOTE_RESERVED.", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/global/networks/{network}/listIpAddresses", + // "path": "{project}/aggregated/machineTypes", // "response": { - // "$ref": "IpAddressesList" + // "$ref": "MachineTypeAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -90735,7 +94093,7 @@ func (c *NetworksListIpAddressesCall) Do(opts ...googleapi.CallOption) (*IpAddre // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *NetworksListIpAddressesCall) Pages(ctx context.Context, f func(*IpAddressesList) error) error { +func (c *MachineTypesAggregatedListCall) Pages(ctx context.Context, f func(*MachineTypeAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -90753,23 +94111,193 @@ func (c *NetworksListIpAddressesCall) Pages(ctx context.Context, f func(*IpAddre } } -// method id "compute.networks.listIpOwners": +// method id "compute.machineTypes.get": -type NetworksListIpOwnersCall struct { +type MachineTypesGetCall struct { s *Service project string - network string + zone string + machineType string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// ListIpOwners: Lists the internal IP owners in the specified network. -func (r *NetworksService) ListIpOwners(project string, network string) *NetworksListIpOwnersCall { - c := &NetworksListIpOwnersCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified machine type. Gets a list of available +// machine types by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/get +func (r *MachineTypesService) Get(project string, zone string, machineType string) *MachineTypesGetCall { + c := &MachineTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.network = network + c.zone = zone + c.machineType = machineType + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *MachineTypesGetCall) Fields(s ...googleapi.Field) *MachineTypesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *MachineTypesGetCall) IfNoneMatch(entityTag string) *MachineTypesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *MachineTypesGetCall) Context(ctx context.Context) *MachineTypesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *MachineTypesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *MachineTypesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/machineTypes/{machineType}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "machineType": c.machineType, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.machineTypes.get" call. +// Exactly one of *MachineType or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *MachineType.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &MachineType{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified machine type. Gets a list of available machine types by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.machineTypes.get", + // "parameterOrder": [ + // "project", + // "zone", + // "machineType" + // ], + // "parameters": { + // "machineType": { + // "description": "Name of the machine type to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/machineTypes/{machineType}", + // "response": { + // "$ref": "MachineType" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.machineTypes.list": + +type MachineTypesListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of machine types available to the specified +// project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/list +func (r *MachineTypesService) List(project string, zone string) *MachineTypesListCall { + c := &MachineTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone return c } @@ -90795,25 +94323,18 @@ func (r *NetworksService) ListIpOwners(project string, network string) *Networks // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *NetworksListIpOwnersCall) Filter(filter string) *NetworksListIpOwnersCall { +func (c *MachineTypesListCall) Filter(filter string) *MachineTypesListCall { c.urlParams_.Set("filter", filter) return c } -// IpCidrRange sets the optional parameter "ipCidrRange": (Optional) IP -// CIDR range filter, example: "10.128.10.0/30". -func (c *NetworksListIpOwnersCall) IpCidrRange(ipCidrRange string) *NetworksListIpOwnersCall { - c.urlParams_.Set("ipCidrRange", ipCidrRange) - return c -} - // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of // available results is larger than maxResults, Compute Engine returns a // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *NetworksListIpOwnersCall) MaxResults(maxResults int64) *NetworksListIpOwnersCall { +func (c *MachineTypesListCall) MaxResults(maxResults int64) *MachineTypesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -90830,51 +94351,23 @@ func (c *NetworksListIpOwnersCall) MaxResults(maxResults int64) *NetworksListIpO // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *NetworksListIpOwnersCall) OrderBy(orderBy string) *NetworksListIpOwnersCall { +func (c *MachineTypesListCall) OrderBy(orderBy string) *MachineTypesListCall { c.urlParams_.Set("orderBy", orderBy) return c } -// OwnerProjects sets the optional parameter "ownerProjects": (Optional) -// Project IDs filter, example: "project-1,project-2". -func (c *NetworksListIpOwnersCall) OwnerProjects(ownerProjects string) *NetworksListIpOwnersCall { - c.urlParams_.Set("ownerProjects", ownerProjects) - return c -} - -// OwnerTypes sets the optional parameter "ownerTypes": (Optional) Owner -// types filter, example: "instance,forwardingRule". -func (c *NetworksListIpOwnersCall) OwnerTypes(ownerTypes string) *NetworksListIpOwnersCall { - c.urlParams_.Set("ownerTypes", ownerTypes) - return c -} - // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *NetworksListIpOwnersCall) PageToken(pageToken string) *NetworksListIpOwnersCall { +func (c *MachineTypesListCall) PageToken(pageToken string) *MachineTypesListCall { c.urlParams_.Set("pageToken", pageToken) return c } -// SubnetName sets the optional parameter "subnetName": (Optional) -// Subnetwork name filter. -func (c *NetworksListIpOwnersCall) SubnetName(subnetName string) *NetworksListIpOwnersCall { - c.urlParams_.Set("subnetName", subnetName) - return c -} - -// SubnetRegion sets the optional parameter "subnetRegion": (Optional) -// Subnetwork region filter. -func (c *NetworksListIpOwnersCall) SubnetRegion(subnetRegion string) *NetworksListIpOwnersCall { - c.urlParams_.Set("subnetRegion", subnetRegion) - return c -} - // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksListIpOwnersCall) Fields(s ...googleapi.Field) *NetworksListIpOwnersCall { +func (c *MachineTypesListCall) Fields(s ...googleapi.Field) *MachineTypesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -90884,7 +94377,7 @@ func (c *NetworksListIpOwnersCall) Fields(s ...googleapi.Field) *NetworksListIpO // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *NetworksListIpOwnersCall) IfNoneMatch(entityTag string) *NetworksListIpOwnersCall { +func (c *MachineTypesListCall) IfNoneMatch(entityTag string) *MachineTypesListCall { c.ifNoneMatch_ = entityTag return c } @@ -90892,21 +94385,21 @@ func (c *NetworksListIpOwnersCall) IfNoneMatch(entityTag string) *NetworksListIp // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksListIpOwnersCall) Context(ctx context.Context) *NetworksListIpOwnersCall { +func (c *MachineTypesListCall) Context(ctx context.Context) *MachineTypesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksListIpOwnersCall) Header() http.Header { +func (c *MachineTypesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksListIpOwnersCall) doRequest(alt string) (*http.Response, error) { +func (c *MachineTypesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -90918,7 +94411,7 @@ func (c *NetworksListIpOwnersCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/listIpOwners") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/machineTypes") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -90927,19 +94420,19 @@ func (c *NetworksListIpOwnersCall) doRequest(alt string) (*http.Response, error) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "network": c.network, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.listIpOwners" call. -// Exactly one of *IpOwnerList or error will be non-nil. Any non-2xx +// Do executes the "compute.machineTypes.list" call. +// Exactly one of *MachineTypeList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *IpOwnerList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *NetworksListIpOwnersCall) Do(opts ...googleapi.CallOption) (*IpOwnerList, error) { +// *MachineTypeList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -90958,7 +94451,7 @@ func (c *NetworksListIpOwnersCall) Do(opts ...googleapi.CallOption) (*IpOwnerLis if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &IpOwnerList{ + ret := &MachineTypeList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -90970,12 +94463,12 @@ func (c *NetworksListIpOwnersCall) Do(opts ...googleapi.CallOption) (*IpOwnerLis } return ret, nil // { - // "description": "Lists the internal IP owners in the specified network.", + // "description": "Retrieves a list of machine types available to the specified project.", // "httpMethod": "GET", - // "id": "compute.networks.listIpOwners", + // "id": "compute.machineTypes.list", // "parameterOrder": [ // "project", - // "network" + // "zone" // ], // "parameters": { // "filter": { @@ -90983,11 +94476,6 @@ func (c *NetworksListIpOwnersCall) Do(opts ...googleapi.CallOption) (*IpOwnerLis // "location": "query", // "type": "string" // }, - // "ipCidrRange": { - // "description": "(Optional) IP CIDR range filter, example: \"10.128.10.0/30\".", - // "location": "query", - // "type": "string" - // }, // "maxResults": { // "default": "500", // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", @@ -90996,28 +94484,11 @@ func (c *NetworksListIpOwnersCall) Do(opts ...googleapi.CallOption) (*IpOwnerLis // "minimum": "0", // "type": "integer" // }, - // "network": { - // "description": "Name of the network to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "orderBy": { // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", // "location": "query", // "type": "string" // }, - // "ownerProjects": { - // "description": "(Optional) Project IDs filter, example: \"project-1,project-2\".", - // "location": "query", - // "type": "string" - // }, - // "ownerTypes": { - // "description": "(Optional) Owner types filter, example: \"instance,forwardingRule\".", - // "location": "query", - // "type": "string" - // }, // "pageToken": { // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", @@ -91030,22 +94501,17 @@ func (c *NetworksListIpOwnersCall) Do(opts ...googleapi.CallOption) (*IpOwnerLis // "required": true, // "type": "string" // }, - // "subnetName": { - // "description": "(Optional) Subnetwork name filter.", - // "location": "query", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "type": "string" - // }, - // "subnetRegion": { - // "description": "(Optional) Subnetwork region filter.", - // "location": "query", + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/networks/{network}/listIpOwners", + // "path": "{project}/zones/{zone}/machineTypes", // "response": { - // "$ref": "IpOwnerList" + // "$ref": "MachineTypeList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -91059,7 +94525,7 @@ func (c *NetworksListIpOwnersCall) Do(opts ...googleapi.CallOption) (*IpOwnerLis // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *NetworksListIpOwnersCall) Pages(ctx context.Context, f func(*IpOwnerList) error) error { +func (c *MachineTypesListCall) Pages(ctx context.Context, f func(*MachineTypeList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -91077,35 +94543,22 @@ func (c *NetworksListIpOwnersCall) Pages(ctx context.Context, f func(*IpOwnerLis } } -// method id "compute.networks.listPeeringRoutes": +// method id "compute.networkEndpointGroups.aggregatedList": -type NetworksListPeeringRoutesCall struct { +type NetworkEndpointGroupsAggregatedListCall struct { s *Service project string - network string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// ListPeeringRoutes: Lists the peering routes exchanged over peering -// connection. -func (r *NetworksService) ListPeeringRoutes(project string, network string) *NetworksListPeeringRoutesCall { - c := &NetworksListPeeringRoutesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves the list of network endpoint groups and +// sorts them by zone. +func (r *NetworkEndpointGroupsService) AggregatedList(project string) *NetworkEndpointGroupsAggregatedListCall { + c := &NetworkEndpointGroupsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.network = network - return c -} - -// Direction sets the optional parameter "direction": The direction of -// the exchanged routes. -// -// Possible values: -// "INCOMING" -// "OUTGOING" -func (c *NetworksListPeeringRoutesCall) Direction(direction string) *NetworksListPeeringRoutesCall { - c.urlParams_.Set("direction", direction) return c } @@ -91131,7 +94584,7 @@ func (c *NetworksListPeeringRoutesCall) Direction(direction string) *NetworksLis // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *NetworksListPeeringRoutesCall) Filter(filter string) *NetworksListPeeringRoutesCall { +func (c *NetworkEndpointGroupsAggregatedListCall) Filter(filter string) *NetworkEndpointGroupsAggregatedListCall { c.urlParams_.Set("filter", filter) return c } @@ -91142,7 +94595,7 @@ func (c *NetworksListPeeringRoutesCall) Filter(filter string) *NetworksListPeeri // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *NetworksListPeeringRoutesCall) MaxResults(maxResults int64) *NetworksListPeeringRoutesCall { +func (c *NetworkEndpointGroupsAggregatedListCall) MaxResults(maxResults int64) *NetworkEndpointGroupsAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -91159,7 +94612,7 @@ func (c *NetworksListPeeringRoutesCall) MaxResults(maxResults int64) *NetworksLi // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *NetworksListPeeringRoutesCall) OrderBy(orderBy string) *NetworksListPeeringRoutesCall { +func (c *NetworkEndpointGroupsAggregatedListCall) OrderBy(orderBy string) *NetworkEndpointGroupsAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -91167,30 +94620,15 @@ func (c *NetworksListPeeringRoutesCall) OrderBy(orderBy string) *NetworksListPee // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *NetworksListPeeringRoutesCall) PageToken(pageToken string) *NetworksListPeeringRoutesCall { +func (c *NetworkEndpointGroupsAggregatedListCall) PageToken(pageToken string) *NetworkEndpointGroupsAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } -// PeeringName sets the optional parameter "peeringName": The response -// will show routes exchanged over the given peering connection. -func (c *NetworksListPeeringRoutesCall) PeeringName(peeringName string) *NetworksListPeeringRoutesCall { - c.urlParams_.Set("peeringName", peeringName) - return c -} - -// Region sets the optional parameter "region": The region of the -// request. The response will include all subnet routes, static routes -// and dynamic routes in the region. -func (c *NetworksListPeeringRoutesCall) Region(region string) *NetworksListPeeringRoutesCall { - c.urlParams_.Set("region", region) - return c -} - // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksListPeeringRoutesCall) Fields(s ...googleapi.Field) *NetworksListPeeringRoutesCall { +func (c *NetworkEndpointGroupsAggregatedListCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -91200,7 +94638,7 @@ func (c *NetworksListPeeringRoutesCall) Fields(s ...googleapi.Field) *NetworksLi // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *NetworksListPeeringRoutesCall) IfNoneMatch(entityTag string) *NetworksListPeeringRoutesCall { +func (c *NetworkEndpointGroupsAggregatedListCall) IfNoneMatch(entityTag string) *NetworkEndpointGroupsAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -91208,21 +94646,21 @@ func (c *NetworksListPeeringRoutesCall) IfNoneMatch(entityTag string) *NetworksL // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksListPeeringRoutesCall) Context(ctx context.Context) *NetworksListPeeringRoutesCall { +func (c *NetworkEndpointGroupsAggregatedListCall) Context(ctx context.Context) *NetworkEndpointGroupsAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksListPeeringRoutesCall) Header() http.Header { +func (c *NetworkEndpointGroupsAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksListPeeringRoutesCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkEndpointGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -91234,7 +94672,7 @@ func (c *NetworksListPeeringRoutesCall) doRequest(alt string) (*http.Response, e var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/listPeeringRoutes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/networkEndpointGroups") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -91243,19 +94681,19 @@ func (c *NetworksListPeeringRoutesCall) doRequest(alt string) (*http.Response, e req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.listPeeringRoutes" call. -// Exactly one of *ExchangedPeeringRoutesList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *ExchangedPeeringRoutesList.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *NetworksListPeeringRoutesCall) Do(opts ...googleapi.CallOption) (*ExchangedPeeringRoutesList, error) { +// Do executes the "compute.networkEndpointGroups.aggregatedList" call. +// Exactly one of *NetworkEndpointGroupAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *NetworkEndpointGroupAggregatedList.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *NetworkEndpointGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroupAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -91274,7 +94712,7 @@ func (c *NetworksListPeeringRoutesCall) Do(opts ...googleapi.CallOption) (*Excha if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ExchangedPeeringRoutesList{ + ret := &NetworkEndpointGroupAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -91286,27 +94724,13 @@ func (c *NetworksListPeeringRoutesCall) Do(opts ...googleapi.CallOption) (*Excha } return ret, nil // { - // "description": "Lists the peering routes exchanged over peering connection.", + // "description": "Retrieves the list of network endpoint groups and sorts them by zone.", // "httpMethod": "GET", - // "id": "compute.networks.listPeeringRoutes", + // "id": "compute.networkEndpointGroups.aggregatedList", // "parameterOrder": [ - // "project", - // "network" + // "project" // ], // "parameters": { - // "direction": { - // "description": "The direction of the exchanged routes.", - // "enum": [ - // "INCOMING", - // "OUTGOING" - // ], - // "enumDescriptions": [ - // "", - // "" - // ], - // "location": "query", - // "type": "string" - // }, // "filter": { // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", @@ -91320,13 +94744,6 @@ func (c *NetworksListPeeringRoutesCall) Do(opts ...googleapi.CallOption) (*Excha // "minimum": "0", // "type": "integer" // }, - // "network": { - // "description": "Name of the network for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "orderBy": { // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", // "location": "query", @@ -91337,27 +94754,17 @@ func (c *NetworksListPeeringRoutesCall) Do(opts ...googleapi.CallOption) (*Excha // "location": "query", // "type": "string" // }, - // "peeringName": { - // "description": "The response will show routes exchanged over the given peering connection.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "region": { - // "description": "The region of the request. The response will include all subnet routes, static routes and dynamic routes in the region.", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/global/networks/{network}/listPeeringRoutes", + // "path": "{project}/aggregated/networkEndpointGroups", // "response": { - // "$ref": "ExchangedPeeringRoutesList" + // "$ref": "NetworkEndpointGroupAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -91371,7 +94778,7 @@ func (c *NetworksListPeeringRoutesCall) Do(opts ...googleapi.CallOption) (*Excha // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *NetworksListPeeringRoutesCall) Pages(ctx context.Context, f func(*ExchangedPeeringRoutesList) error) error { +func (c *NetworkEndpointGroupsAggregatedListCall) Pages(ctx context.Context, f func(*NetworkEndpointGroupAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -91389,26 +94796,27 @@ func (c *NetworksListPeeringRoutesCall) Pages(ctx context.Context, f func(*Excha } } -// method id "compute.networks.patch": +// method id "compute.networkEndpointGroups.attachNetworkEndpoints": -type NetworksPatchCall struct { - s *Service - project string - network string - network2 *Network - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworkEndpointGroupsAttachNetworkEndpointsCall struct { + s *Service + project string + zone string + networkEndpointGroup string + networkendpointgroupsattachendpointsrequest *NetworkEndpointGroupsAttachEndpointsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Patches the specified network with the data included in the -// request. Only the following fields can be modified: -// routingConfig.routingMode. -func (r *NetworksService) Patch(project string, network string, network2 *Network) *NetworksPatchCall { - c := &NetworksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AttachNetworkEndpoints: Attach a list of network endpoints to the +// specified network endpoint group. +func (r *NetworkEndpointGroupsService) AttachNetworkEndpoints(project string, zone string, networkEndpointGroup string, networkendpointgroupsattachendpointsrequest *NetworkEndpointGroupsAttachEndpointsRequest) *NetworkEndpointGroupsAttachNetworkEndpointsCall { + c := &NetworkEndpointGroupsAttachNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.network = network - c.network2 = network2 + c.zone = zone + c.networkEndpointGroup = networkEndpointGroup + c.networkendpointgroupsattachendpointsrequest = networkendpointgroupsattachendpointsrequest return c } @@ -91426,7 +94834,7 @@ func (r *NetworksService) Patch(project string, network string, network2 *Networ // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *NetworksPatchCall) RequestId(requestId string) *NetworksPatchCall { +func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) RequestId(requestId string) *NetworkEndpointGroupsAttachNetworkEndpointsCall { c.urlParams_.Set("requestId", requestId) return c } @@ -91434,7 +94842,7 @@ func (c *NetworksPatchCall) RequestId(requestId string) *NetworksPatchCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksPatchCall) Fields(s ...googleapi.Field) *NetworksPatchCall { +func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsAttachNetworkEndpointsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -91442,56 +94850,57 @@ func (c *NetworksPatchCall) Fields(s ...googleapi.Field) *NetworksPatchCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksPatchCall) Context(ctx context.Context) *NetworksPatchCall { +func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Context(ctx context.Context) *NetworkEndpointGroupsAttachNetworkEndpointsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksPatchCall) Header() http.Header { +func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.network2) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroupsattachendpointsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "network": c.network, + "project": c.project, + "zone": c.zone, + "networkEndpointGroup": c.networkEndpointGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.patch" call. +// Do executes the "compute.networkEndpointGroups.attachNetworkEndpoints" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NetworksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -91522,18 +94931,18 @@ func (c *NetworksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Patches the specified network with the data included in the request. Only the following fields can be modified: routingConfig.routingMode.", - // "httpMethod": "PATCH", - // "id": "compute.networks.patch", + // "description": "Attach a list of network endpoints to the specified network endpoint group.", + // "httpMethod": "POST", + // "id": "compute.networkEndpointGroups.attachNetworkEndpoints", // "parameterOrder": [ // "project", - // "network" + // "zone", + // "networkEndpointGroup" // ], // "parameters": { - // "network": { - // "description": "Name of the network to update.", + // "networkEndpointGroup": { + // "description": "The name of the network endpoint group where you are attaching network endpoints to. It should comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -91548,11 +94957,17 @@ func (c *NetworksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/networks/{network}", + // "path": "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints", // "request": { - // "$ref": "Network" + // "$ref": "NetworkEndpointGroupsAttachEndpointsRequest" // }, // "response": { // "$ref": "Operation" @@ -91565,24 +94980,27 @@ func (c *NetworksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) } -// method id "compute.networks.removePeering": +// method id "compute.networkEndpointGroups.delete": -type NetworksRemovePeeringCall struct { - s *Service - project string - network string - networksremovepeeringrequest *NetworksRemovePeeringRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworkEndpointGroupsDeleteCall struct { + s *Service + project string + zone string + networkEndpointGroup string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// RemovePeering: Removes a peering from the specified network. -func (r *NetworksService) RemovePeering(project string, network string, networksremovepeeringrequest *NetworksRemovePeeringRequest) *NetworksRemovePeeringCall { - c := &NetworksRemovePeeringCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified network endpoint group. The network +// endpoints in the NEG and the VM instances they belong to are not +// terminated when the NEG is deleted. Note that the NEG cannot be +// deleted if there are backend services referencing it. +func (r *NetworkEndpointGroupsService) Delete(project string, zone string, networkEndpointGroup string) *NetworkEndpointGroupsDeleteCall { + c := &NetworkEndpointGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.network = network - c.networksremovepeeringrequest = networksremovepeeringrequest + c.zone = zone + c.networkEndpointGroup = networkEndpointGroup return c } @@ -91600,7 +95018,7 @@ func (r *NetworksService) RemovePeering(project string, network string, networks // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *NetworksRemovePeeringCall) RequestId(requestId string) *NetworksRemovePeeringCall { +func (c *NetworkEndpointGroupsDeleteCall) RequestId(requestId string) *NetworkEndpointGroupsDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -91608,7 +95026,7 @@ func (c *NetworksRemovePeeringCall) RequestId(requestId string) *NetworksRemoveP // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksRemovePeeringCall) Fields(s ...googleapi.Field) *NetworksRemovePeeringCall { +func (c *NetworkEndpointGroupsDeleteCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -91616,56 +95034,52 @@ func (c *NetworksRemovePeeringCall) Fields(s ...googleapi.Field) *NetworksRemove // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksRemovePeeringCall) Context(ctx context.Context) *NetworksRemovePeeringCall { +func (c *NetworkEndpointGroupsDeleteCall) Context(ctx context.Context) *NetworkEndpointGroupsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksRemovePeeringCall) Header() http.Header { +func (c *NetworkEndpointGroupsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksRemovePeeringCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkEndpointGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.networksremovepeeringrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/removePeering") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "network": c.network, + "project": c.project, + "zone": c.zone, + "networkEndpointGroup": c.networkEndpointGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.removePeering" call. +// Do executes the "compute.networkEndpointGroups.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NetworksRemovePeeringCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NetworkEndpointGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -91696,18 +95110,18 @@ func (c *NetworksRemovePeeringCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Removes a peering from the specified network.", - // "httpMethod": "POST", - // "id": "compute.networks.removePeering", + // "description": "Deletes the specified network endpoint group. The network endpoints in the NEG and the VM instances they belong to are not terminated when the NEG is deleted. Note that the NEG cannot be deleted if there are backend services referencing it.", + // "httpMethod": "DELETE", + // "id": "compute.networkEndpointGroups.delete", // "parameterOrder": [ // "project", - // "network" + // "zone", + // "networkEndpointGroup" // ], // "parameters": { - // "network": { - // "description": "Name of the network resource to remove peering from.", + // "networkEndpointGroup": { + // "description": "The name of the network endpoint group to delete. It should comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -91722,12 +95136,15 @@ func (c *NetworksRemovePeeringCall) Do(opts ...googleapi.CallOption) (*Operation // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/networks/{network}/removePeering", - // "request": { - // "$ref": "NetworksRemovePeeringRequest" - // }, + // "path": "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", // "response": { // "$ref": "Operation" // }, @@ -91739,23 +95156,27 @@ func (c *NetworksRemovePeeringCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.networks.switchToCustomMode": +// method id "compute.networkEndpointGroups.detachNetworkEndpoints": -type NetworksSwitchToCustomModeCall struct { - s *Service - project string - network string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworkEndpointGroupsDetachNetworkEndpointsCall struct { + s *Service + project string + zone string + networkEndpointGroup string + networkendpointgroupsdetachendpointsrequest *NetworkEndpointGroupsDetachEndpointsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SwitchToCustomMode: Switches the network mode from auto subnet mode -// to custom subnet mode. -func (r *NetworksService) SwitchToCustomMode(project string, network string) *NetworksSwitchToCustomModeCall { - c := &NetworksSwitchToCustomModeCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// DetachNetworkEndpoints: Detach a list of network endpoints from the +// specified network endpoint group. +func (r *NetworkEndpointGroupsService) DetachNetworkEndpoints(project string, zone string, networkEndpointGroup string, networkendpointgroupsdetachendpointsrequest *NetworkEndpointGroupsDetachEndpointsRequest) *NetworkEndpointGroupsDetachNetworkEndpointsCall { + c := &NetworkEndpointGroupsDetachNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.network = network + c.zone = zone + c.networkEndpointGroup = networkEndpointGroup + c.networkendpointgroupsdetachendpointsrequest = networkendpointgroupsdetachendpointsrequest return c } @@ -91773,7 +95194,7 @@ func (r *NetworksService) SwitchToCustomMode(project string, network string) *Ne // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *NetworksSwitchToCustomModeCall) RequestId(requestId string) *NetworksSwitchToCustomModeCall { +func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) RequestId(requestId string) *NetworkEndpointGroupsDetachNetworkEndpointsCall { c.urlParams_.Set("requestId", requestId) return c } @@ -91781,7 +95202,7 @@ func (c *NetworksSwitchToCustomModeCall) RequestId(requestId string) *NetworksSw // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksSwitchToCustomModeCall) Fields(s ...googleapi.Field) *NetworksSwitchToCustomModeCall { +func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsDetachNetworkEndpointsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -91789,30 +95210,35 @@ func (c *NetworksSwitchToCustomModeCall) Fields(s ...googleapi.Field) *NetworksS // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksSwitchToCustomModeCall) Context(ctx context.Context) *NetworksSwitchToCustomModeCall { +func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Context(ctx context.Context) *NetworkEndpointGroupsDetachNetworkEndpointsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksSwitchToCustomModeCall) Header() http.Header { +func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksSwitchToCustomModeCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroupsdetachendpointsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/switchToCustomMode") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -91820,20 +95246,21 @@ func (c *NetworksSwitchToCustomModeCall) doRequest(alt string) (*http.Response, } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "network": c.network, + "project": c.project, + "zone": c.zone, + "networkEndpointGroup": c.networkEndpointGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.switchToCustomMode" call. +// Do executes the "compute.networkEndpointGroups.detachNetworkEndpoints" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NetworksSwitchToCustomModeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -91864,18 +95291,18 @@ func (c *NetworksSwitchToCustomModeCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Switches the network mode from auto subnet mode to custom subnet mode.", + // "description": "Detach a list of network endpoints from the specified network endpoint group.", // "httpMethod": "POST", - // "id": "compute.networks.switchToCustomMode", + // "id": "compute.networkEndpointGroups.detachNetworkEndpoints", // "parameterOrder": [ // "project", - // "network" + // "zone", + // "networkEndpointGroup" // ], // "parameters": { - // "network": { - // "description": "Name of the network to be updated.", + // "networkEndpointGroup": { + // "description": "The name of the network endpoint group where you are removing network endpoints. It should comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -91890,9 +95317,18 @@ func (c *NetworksSwitchToCustomModeCall) Do(opts ...googleapi.CallOption) (*Oper // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/networks/{network}/switchToCustomMode", + // "path": "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints", + // "request": { + // "$ref": "NetworkEndpointGroupsDetachEndpointsRequest" + // }, // "response": { // "$ref": "Operation" // }, @@ -91904,89 +95340,99 @@ func (c *NetworksSwitchToCustomModeCall) Do(opts ...googleapi.CallOption) (*Oper } -// method id "compute.networks.testIamPermissions": +// method id "compute.networkEndpointGroups.get": -type NetworksTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworkEndpointGroupsGetCall struct { + s *Service + project string + zone string + networkEndpointGroup string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *NetworksService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *NetworksTestIamPermissionsCall { - c := &NetworksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified network endpoint group. Gets a list of +// available network endpoint groups by making a list() request. +func (r *NetworkEndpointGroupsService) Get(project string, zone string, networkEndpointGroup string) *NetworkEndpointGroupsGetCall { + c := &NetworkEndpointGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.zone = zone + c.networkEndpointGroup = networkEndpointGroup return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksTestIamPermissionsCall) Fields(s ...googleapi.Field) *NetworksTestIamPermissionsCall { +func (c *NetworkEndpointGroupsGetCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NetworkEndpointGroupsGetCall) IfNoneMatch(entityTag string) *NetworkEndpointGroupsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksTestIamPermissionsCall) Context(ctx context.Context) *NetworksTestIamPermissionsCall { +func (c *NetworkEndpointGroupsGetCall) Context(ctx context.Context) *NetworkEndpointGroupsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksTestIamPermissionsCall) Header() http.Header { +func (c *NetworkEndpointGroupsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkEndpointGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "zone": c.zone, + "networkEndpointGroup": c.networkEndpointGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// Do executes the "compute.networkEndpointGroups.get" call. +// Exactly one of *NetworkEndpointGroup or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// *NetworkEndpointGroup.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *NetworksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *NetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroup, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -92005,7 +95451,7 @@ func (c *NetworksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Test if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &NetworkEndpointGroup{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -92017,14 +95463,21 @@ func (c *NetworksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Test } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.networks.testIamPermissions", + // "description": "Returns the specified network endpoint group. Gets a list of available network endpoint groups by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.networkEndpointGroups.get", // "parameterOrder": [ // "project", - // "resource" + // "zone", + // "networkEndpointGroup" // ], // "parameters": { + // "networkEndpointGroup": { + // "description": "The name of the network endpoint group. It should comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -92032,20 +95485,16 @@ func (c *NetworksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Test // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "zone": { + // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/networks/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "NetworkEndpointGroup" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -92056,27 +95505,25 @@ func (c *NetworksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Test } -// method id "compute.networks.updatePeering": +// method id "compute.networkEndpointGroups.insert": -type NetworksUpdatePeeringCall struct { - s *Service - project string - network string - networksupdatepeeringrequest *NetworksUpdatePeeringRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworkEndpointGroupsInsertCall struct { + s *Service + project string + zone string + networkendpointgroup *NetworkEndpointGroup + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// UpdatePeering: Updates the specified network peering with the data -// included in the request Only the following fields can be modified: -// NetworkPeering.export_custom_routes, and -// NetworkPeering.import_custom_routes -func (r *NetworksService) UpdatePeering(project string, network string, networksupdatepeeringrequest *NetworksUpdatePeeringRequest) *NetworksUpdatePeeringCall { - c := &NetworksUpdatePeeringCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a network endpoint group in the specified project +// using the parameters that are included in the request. +func (r *NetworkEndpointGroupsService) Insert(project string, zone string, networkendpointgroup *NetworkEndpointGroup) *NetworkEndpointGroupsInsertCall { + c := &NetworkEndpointGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.network = network - c.networksupdatepeeringrequest = networksupdatepeeringrequest + c.zone = zone + c.networkendpointgroup = networkendpointgroup return c } @@ -92094,7 +95541,7 @@ func (r *NetworksService) UpdatePeering(project string, network string, networks // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *NetworksUpdatePeeringCall) RequestId(requestId string) *NetworksUpdatePeeringCall { +func (c *NetworkEndpointGroupsInsertCall) RequestId(requestId string) *NetworkEndpointGroupsInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -92102,7 +95549,7 @@ func (c *NetworksUpdatePeeringCall) RequestId(requestId string) *NetworksUpdateP // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksUpdatePeeringCall) Fields(s ...googleapi.Field) *NetworksUpdatePeeringCall { +func (c *NetworkEndpointGroupsInsertCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -92110,56 +95557,56 @@ func (c *NetworksUpdatePeeringCall) Fields(s ...googleapi.Field) *NetworksUpdate // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksUpdatePeeringCall) Context(ctx context.Context) *NetworksUpdatePeeringCall { +func (c *NetworkEndpointGroupsInsertCall) Context(ctx context.Context) *NetworkEndpointGroupsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksUpdatePeeringCall) Header() http.Header { +func (c *NetworkEndpointGroupsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksUpdatePeeringCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkEndpointGroupsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.networksupdatepeeringrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroup) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/updatePeering") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "network": c.network, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.updatePeering" call. +// Do executes the "compute.networkEndpointGroups.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NetworksUpdatePeeringCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NetworkEndpointGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -92190,21 +95637,14 @@ func (c *NetworksUpdatePeeringCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Updates the specified network peering with the data included in the request Only the following fields can be modified: NetworkPeering.export_custom_routes, and NetworkPeering.import_custom_routes", - // "httpMethod": "PATCH", - // "id": "compute.networks.updatePeering", + // "description": "Creates a network endpoint group in the specified project using the parameters that are included in the request.", + // "httpMethod": "POST", + // "id": "compute.networkEndpointGroups.insert", // "parameterOrder": [ // "project", - // "network" + // "zone" // ], // "parameters": { - // "network": { - // "description": "Name of the network resource which the updated peering is belonging to.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -92216,11 +95656,17 @@ func (c *NetworksUpdatePeeringCall) Do(opts ...googleapi.CallOption) (*Operation // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where you want to create the network endpoint group. It should comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/networks/{network}/updatePeering", + // "path": "{project}/zones/{zone}/networkEndpointGroups", // "request": { - // "$ref": "NetworksUpdatePeeringRequest" + // "$ref": "NetworkEndpointGroup" // }, // "response": { // "$ref": "Operation" @@ -92233,110 +95679,159 @@ func (c *NetworksUpdatePeeringCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.nodeGroups.addNodes": +// method id "compute.networkEndpointGroups.list": -type NodeGroupsAddNodesCall struct { - s *Service - project string - zone string - nodeGroup string - nodegroupsaddnodesrequest *NodeGroupsAddNodesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworkEndpointGroupsListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// AddNodes: Adds specified number of nodes to the node group. -func (r *NodeGroupsService) AddNodes(project string, zone string, nodeGroup string, nodegroupsaddnodesrequest *NodeGroupsAddNodesRequest) *NodeGroupsAddNodesCall { - c := &NodeGroupsAddNodesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of network endpoint groups that are located +// in the specified project and zone. +func (r *NetworkEndpointGroupsService) List(project string, zone string) *NetworkEndpointGroupsListCall { + c := &NetworkEndpointGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.nodeGroup = nodeGroup - c.nodegroupsaddnodesrequest = nodegroupsaddnodesrequest return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *NodeGroupsAddNodesCall) RequestId(requestId string) *NodeGroupsAddNodesCall { - c.urlParams_.Set("requestId", requestId) +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *NetworkEndpointGroupsListCall) Filter(filter string) *NetworkEndpointGroupsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *NetworkEndpointGroupsListCall) MaxResults(maxResults int64) *NetworkEndpointGroupsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *NetworkEndpointGroupsListCall) OrderBy(orderBy string) *NetworkEndpointGroupsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *NetworkEndpointGroupsListCall) PageToken(pageToken string) *NetworkEndpointGroupsListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeGroupsAddNodesCall) Fields(s ...googleapi.Field) *NodeGroupsAddNodesCall { +func (c *NetworkEndpointGroupsListCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NetworkEndpointGroupsListCall) IfNoneMatch(entityTag string) *NetworkEndpointGroupsListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeGroupsAddNodesCall) Context(ctx context.Context) *NodeGroupsAddNodesCall { +func (c *NetworkEndpointGroupsListCall) Context(ctx context.Context) *NetworkEndpointGroupsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeGroupsAddNodesCall) Header() http.Header { +func (c *NetworkEndpointGroupsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeGroupsAddNodesCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkEndpointGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.nodegroupsaddnodesrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}/addNodes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "nodeGroup": c.nodeGroup, + "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeGroups.addNodes" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *NodeGroupsAddNodesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.networkEndpointGroups.list" call. +// Exactly one of *NetworkEndpointGroupList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *NetworkEndpointGroupList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroupList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -92355,7 +95850,7 @@ func (c *NodeGroupsAddNodesCall) Do(opts ...googleapi.CallOption) (*Operation, e if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &NetworkEndpointGroupList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -92367,20 +95862,35 @@ func (c *NodeGroupsAddNodesCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Adds specified number of nodes to the node group.", - // "httpMethod": "POST", - // "id": "compute.nodeGroups.addNodes", + // "description": "Retrieves the list of network endpoint groups that are located in the specified project and zone.", + // "httpMethod": "GET", + // "id": "compute.networkEndpointGroups.list", // "parameterOrder": [ // "project", - // "zone", - // "nodeGroup" + // "zone" // ], // "parameters": { - // "nodeGroup": { - // "description": "Name of the NodeGroup resource.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -92390,50 +95900,68 @@ func (c *NodeGroupsAddNodesCall) Do(opts ...googleapi.CallOption) (*Operation, e // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, // "zone": { - // "description": "The name of the zone for this request.", + // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/nodeGroups/{nodeGroup}/addNodes", - // "request": { - // "$ref": "NodeGroupsAddNodesRequest" - // }, + // "path": "{project}/zones/{zone}/networkEndpointGroups", // "response": { - // "$ref": "Operation" + // "$ref": "NetworkEndpointGroupList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.nodeGroups.aggregatedList": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *NetworkEndpointGroupsListCall) Pages(ctx context.Context, f func(*NetworkEndpointGroupList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type NodeGroupsAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +// method id "compute.networkEndpointGroups.listNetworkEndpoints": + +type NetworkEndpointGroupsListNetworkEndpointsCall struct { + s *Service + project string + zone string + networkEndpointGroup string + networkendpointgroupslistendpointsrequest *NetworkEndpointGroupsListEndpointsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AggregatedList: Retrieves an aggregated list of node groups. Note: -// use nodeGroups.listNodes for more details about each group. -func (r *NodeGroupsService) AggregatedList(project string) *NodeGroupsAggregatedListCall { - c := &NodeGroupsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ListNetworkEndpoints: Lists the network endpoints in the specified +// network endpoint group. +func (r *NetworkEndpointGroupsService) ListNetworkEndpoints(project string, zone string, networkEndpointGroup string, networkendpointgroupslistendpointsrequest *NetworkEndpointGroupsListEndpointsRequest) *NetworkEndpointGroupsListNetworkEndpointsCall { + c := &NetworkEndpointGroupsListNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.zone = zone + c.networkEndpointGroup = networkEndpointGroup + c.networkendpointgroupslistendpointsrequest = networkendpointgroupslistendpointsrequest return c } @@ -92459,7 +95987,7 @@ func (r *NodeGroupsService) AggregatedList(project string) *NodeGroupsAggregated // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *NodeGroupsAggregatedListCall) Filter(filter string) *NodeGroupsAggregatedListCall { +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Filter(filter string) *NetworkEndpointGroupsListNetworkEndpointsCall { c.urlParams_.Set("filter", filter) return c } @@ -92470,7 +95998,7 @@ func (c *NodeGroupsAggregatedListCall) Filter(filter string) *NodeGroupsAggregat // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *NodeGroupsAggregatedListCall) MaxResults(maxResults int64) *NodeGroupsAggregatedListCall { +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) MaxResults(maxResults int64) *NetworkEndpointGroupsListNetworkEndpointsCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -92487,7 +96015,7 @@ func (c *NodeGroupsAggregatedListCall) MaxResults(maxResults int64) *NodeGroupsA // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *NodeGroupsAggregatedListCall) OrderBy(orderBy string) *NodeGroupsAggregatedListCall { +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) OrderBy(orderBy string) *NetworkEndpointGroupsListNetworkEndpointsCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -92495,7 +96023,7 @@ func (c *NodeGroupsAggregatedListCall) OrderBy(orderBy string) *NodeGroupsAggreg // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *NodeGroupsAggregatedListCall) PageToken(pageToken string) *NodeGroupsAggregatedListCall { +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) PageToken(pageToken string) *NetworkEndpointGroupsListNetworkEndpointsCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -92503,71 +96031,67 @@ func (c *NodeGroupsAggregatedListCall) PageToken(pageToken string) *NodeGroupsAg // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeGroupsAggregatedListCall) Fields(s ...googleapi.Field) *NodeGroupsAggregatedListCall { +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsListNetworkEndpointsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *NodeGroupsAggregatedListCall) IfNoneMatch(entityTag string) *NodeGroupsAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeGroupsAggregatedListCall) Context(ctx context.Context) *NodeGroupsAggregatedListCall { +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Context(ctx context.Context) *NetworkEndpointGroupsListNetworkEndpointsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeGroupsAggregatedListCall) Header() http.Header { +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroupslistendpointsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/nodeGroups") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "networkEndpointGroup": c.networkEndpointGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeGroups.aggregatedList" call. -// Exactly one of *NodeGroupAggregatedList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *NodeGroupAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *NodeGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeGroupAggregatedList, error) { +// Do executes the "compute.networkEndpointGroups.listNetworkEndpoints" call. +// Exactly one of *NetworkEndpointGroupsListNetworkEndpoints or error +// will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *NetworkEndpointGroupsListNetworkEndpoints.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroupsListNetworkEndpoints, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -92586,7 +96110,7 @@ func (c *NodeGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeGr if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &NodeGroupAggregatedList{ + ret := &NetworkEndpointGroupsListNetworkEndpoints{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -92598,11 +96122,13 @@ func (c *NodeGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeGr } return ret, nil // { - // "description": "Retrieves an aggregated list of node groups. Note: use nodeGroups.listNodes for more details about each group.", - // "httpMethod": "GET", - // "id": "compute.nodeGroups.aggregatedList", + // "description": "Lists the network endpoints in the specified network endpoint group.", + // "httpMethod": "POST", + // "id": "compute.networkEndpointGroups.listNetworkEndpoints", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "networkEndpointGroup" // ], // "parameters": { // "filter": { @@ -92618,6 +96144,12 @@ func (c *NodeGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeGr // "minimum": "0", // "type": "integer" // }, + // "networkEndpointGroup": { + // "description": "The name of the network endpoint group from which you want to generate a list of included network endpoints. It should comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "orderBy": { // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", // "location": "query", @@ -92634,11 +96166,20 @@ func (c *NodeGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeGr // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/aggregated/nodeGroups", + // "path": "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints", + // "request": { + // "$ref": "NetworkEndpointGroupsListEndpointsRequest" + // }, // "response": { - // "$ref": "NodeGroupAggregatedList" + // "$ref": "NetworkEndpointGroupsListNetworkEndpoints" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -92652,7 +96193,7 @@ func (c *NodeGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeGr // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *NodeGroupsAggregatedListCall) Pages(ctx context.Context, f func(*NodeGroupAggregatedList) error) error { +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Pages(ctx context.Context, f func(*NetworkEndpointGroupsListNetworkEndpoints) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -92670,50 +96211,34 @@ func (c *NodeGroupsAggregatedListCall) Pages(ctx context.Context, f func(*NodeGr } } -// method id "compute.nodeGroups.delete": +// method id "compute.networkEndpointGroups.testIamPermissions": -type NodeGroupsDeleteCall struct { - s *Service - project string - zone string - nodeGroup string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworkEndpointGroupsTestIamPermissionsCall struct { + s *Service + project string + zone string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified NodeGroup resource. -func (r *NodeGroupsService) Delete(project string, zone string, nodeGroup string) *NodeGroupsDeleteCall { - c := &NodeGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *NetworkEndpointGroupsService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *NetworkEndpointGroupsTestIamPermissionsCall { + c := &NetworkEndpointGroupsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.nodeGroup = nodeGroup - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *NodeGroupsDeleteCall) RequestId(requestId string) *NodeGroupsDeleteCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeGroupsDeleteCall) Fields(s ...googleapi.Field) *NodeGroupsDeleteCall { +func (c *NetworkEndpointGroupsTestIamPermissionsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -92721,52 +96246,57 @@ func (c *NodeGroupsDeleteCall) Fields(s ...googleapi.Field) *NodeGroupsDeleteCal // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeGroupsDeleteCall) Context(ctx context.Context) *NodeGroupsDeleteCall { +func (c *NetworkEndpointGroupsTestIamPermissionsCall) Context(ctx context.Context) *NetworkEndpointGroupsTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeGroupsDeleteCall) Header() http.Header { +func (c *NetworkEndpointGroupsTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkEndpointGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "nodeGroup": c.nodeGroup, + "project": c.project, + "zone": c.zone, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeGroups.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *NodeGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.networkEndpointGroups.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NetworkEndpointGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -92785,7 +96315,7 @@ func (c *NodeGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, err if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -92797,22 +96327,15 @@ func (c *NodeGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, err } return ret, nil // { - // "description": "Deletes the specified NodeGroup resource.", - // "httpMethod": "DELETE", - // "id": "compute.nodeGroups.delete", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.networkEndpointGroups.testIamPermissions", // "parameterOrder": [ // "project", // "zone", - // "nodeGroup" + // "resource" // ], // "parameters": { - // "nodeGroup": { - // "description": "Name of the NodeGroup resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -92820,9 +96343,11 @@ func (c *NodeGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, err // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "zone": { @@ -92833,38 +96358,40 @@ func (c *NodeGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, err // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/nodeGroups/{nodeGroup}", + // "path": "{project}/zones/{zone}/networkEndpointGroups/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.nodeGroups.deleteNodes": +// method id "compute.networks.addPeering": -type NodeGroupsDeleteNodesCall struct { - s *Service - project string - zone string - nodeGroup string - nodegroupsdeletenodesrequest *NodeGroupsDeleteNodesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworksAddPeeringCall struct { + s *Service + project string + network string + networksaddpeeringrequest *NetworksAddPeeringRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// DeleteNodes: Deletes specified nodes from the node group. -func (r *NodeGroupsService) DeleteNodes(project string, zone string, nodeGroup string, nodegroupsdeletenodesrequest *NodeGroupsDeleteNodesRequest) *NodeGroupsDeleteNodesCall { - c := &NodeGroupsDeleteNodesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AddPeering: Adds a peering to the specified network. +func (r *NetworksService) AddPeering(project string, network string, networksaddpeeringrequest *NetworksAddPeeringRequest) *NetworksAddPeeringCall { + c := &NetworksAddPeeringCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.nodeGroup = nodeGroup - c.nodegroupsdeletenodesrequest = nodegroupsdeletenodesrequest + c.network = network + c.networksaddpeeringrequest = networksaddpeeringrequest return c } @@ -92882,7 +96409,7 @@ func (r *NodeGroupsService) DeleteNodes(project string, zone string, nodeGroup s // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *NodeGroupsDeleteNodesCall) RequestId(requestId string) *NodeGroupsDeleteNodesCall { +func (c *NetworksAddPeeringCall) RequestId(requestId string) *NetworksAddPeeringCall { c.urlParams_.Set("requestId", requestId) return c } @@ -92890,7 +96417,7 @@ func (c *NodeGroupsDeleteNodesCall) RequestId(requestId string) *NodeGroupsDelet // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeGroupsDeleteNodesCall) Fields(s ...googleapi.Field) *NodeGroupsDeleteNodesCall { +func (c *NetworksAddPeeringCall) Fields(s ...googleapi.Field) *NetworksAddPeeringCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -92898,35 +96425,35 @@ func (c *NodeGroupsDeleteNodesCall) Fields(s ...googleapi.Field) *NodeGroupsDele // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeGroupsDeleteNodesCall) Context(ctx context.Context) *NodeGroupsDeleteNodesCall { +func (c *NetworksAddPeeringCall) Context(ctx context.Context) *NetworksAddPeeringCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeGroupsDeleteNodesCall) Header() http.Header { +func (c *NetworksAddPeeringCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeGroupsDeleteNodesCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksAddPeeringCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.nodegroupsdeletenodesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networksaddpeeringrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}/deleteNodes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/addPeering") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -92934,21 +96461,20 @@ func (c *NodeGroupsDeleteNodesCall) doRequest(alt string) (*http.Response, error } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "nodeGroup": c.nodeGroup, + "project": c.project, + "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeGroups.deleteNodes" call. +// Do executes the "compute.networks.addPeering" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NodeGroupsDeleteNodesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NetworksAddPeeringCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -92979,19 +96505,18 @@ func (c *NodeGroupsDeleteNodesCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Deletes specified nodes from the node group.", + // "description": "Adds a peering to the specified network.", // "httpMethod": "POST", - // "id": "compute.nodeGroups.deleteNodes", + // "id": "compute.networks.addPeering", // "parameterOrder": [ // "project", - // "zone", - // "nodeGroup" + // "network" // ], // "parameters": { - // "nodeGroup": { - // "description": "Name of the NodeGroup resource to delete.", + // "network": { + // "description": "Name of the network resource to add peering to.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -93006,18 +96531,11 @@ func (c *NodeGroupsDeleteNodesCall) Do(opts ...googleapi.CallOption) (*Operation // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/nodeGroups/{nodeGroup}/deleteNodes", + // "path": "{project}/global/networks/{network}/addPeering", // "request": { - // "$ref": "NodeGroupsDeleteNodesRequest" + // "$ref": "NetworksAddPeeringRequest" // }, // "response": { // "$ref": "Operation" @@ -93030,100 +96548,101 @@ func (c *NodeGroupsDeleteNodesCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.nodeGroups.get": +// method id "compute.networks.delete": -type NodeGroupsGetCall struct { - s *Service - project string - zone string - nodeGroup string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type NetworksDeleteCall struct { + s *Service + project string + network string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified NodeGroup. Get a list of available -// NodeGroups by making a list() request. Note: the "nodes" field should -// not be used. Use nodeGroups.listNodes instead. -func (r *NodeGroupsService) Get(project string, zone string, nodeGroup string) *NodeGroupsGetCall { - c := &NodeGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified network. +// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/delete +func (r *NetworksService) Delete(project string, network string) *NetworksDeleteCall { + c := &NetworksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.nodeGroup = nodeGroup + c.network = network + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *NetworksDeleteCall) RequestId(requestId string) *NetworksDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeGroupsGetCall) Fields(s ...googleapi.Field) *NodeGroupsGetCall { +func (c *NetworksDeleteCall) Fields(s ...googleapi.Field) *NetworksDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *NodeGroupsGetCall) IfNoneMatch(entityTag string) *NodeGroupsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeGroupsGetCall) Context(ctx context.Context) *NodeGroupsGetCall { +func (c *NetworksDeleteCall) Context(ctx context.Context) *NetworksDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeGroupsGetCall) Header() http.Header { +func (c *NetworksDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeGroupsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "nodeGroup": c.nodeGroup, + "project": c.project, + "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeGroups.get" call. -// Exactly one of *NodeGroup or error will be non-nil. Any non-2xx +// Do executes the "compute.networks.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *NodeGroup.ServerResponse.Header or (if a response was returned at +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NodeGroupsGetCall) Do(opts ...googleapi.CallOption) (*NodeGroup, error) { +func (c *NetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -93142,7 +96661,7 @@ func (c *NodeGroupsGetCall) Do(opts ...googleapi.CallOption) (*NodeGroup, error) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &NodeGroup{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -93154,19 +96673,18 @@ func (c *NodeGroupsGetCall) Do(opts ...googleapi.CallOption) (*NodeGroup, error) } return ret, nil // { - // "description": "Returns the specified NodeGroup. Get a list of available NodeGroups by making a list() request. Note: the \"nodes\" field should not be used. Use nodeGroups.listNodes instead.", - // "httpMethod": "GET", - // "id": "compute.nodeGroups.get", + // "description": "Deletes the specified network.", + // "httpMethod": "DELETE", + // "id": "compute.networks.delete", // "parameterOrder": [ // "project", - // "zone", - // "nodeGroup" + // "network" // ], // "parameters": { - // "nodeGroup": { - // "description": "Name of the node group to return.", + // "network": { + // "description": "Name of the network to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -93177,54 +96695,50 @@ func (c *NodeGroupsGetCall) Do(opts ...googleapi.CallOption) (*NodeGroup, error) // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/nodeGroups/{nodeGroup}", + // "path": "{project}/global/networks/{network}", // "response": { - // "$ref": "NodeGroup" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.nodeGroups.getIamPolicy": +// method id "compute.networks.get": -type NodeGroupsGetIamPolicyCall struct { +type NetworksGetCall struct { s *Service project string - zone string - resource string + network string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. -func (r *NodeGroupsService) GetIamPolicy(project string, zone string, resource string) *NodeGroupsGetIamPolicyCall { - c := &NodeGroupsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified network. Gets a list of available networks +// by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/get +func (r *NetworksService) Get(project string, network string) *NetworksGetCall { + c := &NetworksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.resource = resource + c.network = network return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeGroupsGetIamPolicyCall) Fields(s ...googleapi.Field) *NodeGroupsGetIamPolicyCall { +func (c *NetworksGetCall) Fields(s ...googleapi.Field) *NetworksGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -93234,7 +96748,7 @@ func (c *NodeGroupsGetIamPolicyCall) Fields(s ...googleapi.Field) *NodeGroupsGet // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *NodeGroupsGetIamPolicyCall) IfNoneMatch(entityTag string) *NodeGroupsGetIamPolicyCall { +func (c *NetworksGetCall) IfNoneMatch(entityTag string) *NetworksGetCall { c.ifNoneMatch_ = entityTag return c } @@ -93242,21 +96756,21 @@ func (c *NodeGroupsGetIamPolicyCall) IfNoneMatch(entityTag string) *NodeGroupsGe // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeGroupsGetIamPolicyCall) Context(ctx context.Context) *NodeGroupsGetIamPolicyCall { +func (c *NetworksGetCall) Context(ctx context.Context) *NetworksGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeGroupsGetIamPolicyCall) Header() http.Header { +func (c *NetworksGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeGroupsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -93268,7 +96782,7 @@ func (c *NodeGroupsGetIamPolicyCall) doRequest(alt string) (*http.Response, erro var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{resource}/getIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -93276,21 +96790,20 @@ func (c *NodeGroupsGetIamPolicyCall) doRequest(alt string) (*http.Response, erro } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, + "project": c.project, + "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeGroups.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// Do executes the "compute.networks.get" call. +// Exactly one of *Network or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) +// *Network.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *NodeGroupsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +func (c *NetworksGetCall) Do(opts ...googleapi.CallOption) (*Network, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -93309,7 +96822,7 @@ func (c *NodeGroupsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &Network{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -93321,40 +96834,32 @@ func (c *NodeGroupsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "description": "Returns the specified network. Gets a list of available networks by making a list() request.", // "httpMethod": "GET", - // "id": "compute.nodeGroups.getIamPolicy", + // "id": "compute.networks.get", // "parameterOrder": [ // "project", - // "zone", - // "resource" + // "network" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "network": { + // "description": "Name of the network to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/nodeGroups/{resource}/getIamPolicy", + // "path": "{project}/global/networks/{network}", // "response": { - // "$ref": "Policy" + // "$ref": "Network" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -93365,26 +96870,181 @@ func (c *NodeGroupsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, } -// method id "compute.nodeGroups.insert": +// method id "compute.networks.getEffectiveFirewalls": -type NodeGroupsInsertCall struct { - s *Service - project string - zone string - nodegroup *NodeGroup - urlParams_ gensupport.URLParams +type NetworksGetEffectiveFirewallsCall struct { + s *Service + project string + network string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetEffectiveFirewalls: Returns the effective firewalls on a given +// network. +func (r *NetworksService) GetEffectiveFirewalls(project string, network string) *NetworksGetEffectiveFirewallsCall { + c := &NetworksGetEffectiveFirewallsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.network = network + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NetworksGetEffectiveFirewallsCall) Fields(s ...googleapi.Field) *NetworksGetEffectiveFirewallsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NetworksGetEffectiveFirewallsCall) IfNoneMatch(entityTag string) *NetworksGetEffectiveFirewallsCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NetworksGetEffectiveFirewallsCall) Context(ctx context.Context) *NetworksGetEffectiveFirewallsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NetworksGetEffectiveFirewallsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NetworksGetEffectiveFirewallsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/getEffectiveFirewalls") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "network": c.network, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.networks.getEffectiveFirewalls" call. +// Exactly one of *NetworksGetEffectiveFirewallsResponse or error will +// be non-nil. Any non-2xx status code is an error. Response headers are +// in either +// *NetworksGetEffectiveFirewallsResponse.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NetworksGetEffectiveFirewallsCall) Do(opts ...googleapi.CallOption) (*NetworksGetEffectiveFirewallsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &NetworksGetEffectiveFirewallsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the effective firewalls on a given network.", + // "httpMethod": "GET", + // "id": "compute.networks.getEffectiveFirewalls", + // "parameterOrder": [ + // "project", + // "network" + // ], + // "parameters": { + // "network": { + // "description": "Name of the network for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/networks/{network}/getEffectiveFirewalls", + // "response": { + // "$ref": "NetworksGetEffectiveFirewallsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.networks.insert": + +type NetworksInsertCall struct { + s *Service + project string + network *Network + urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Insert: Creates a NodeGroup resource in the specified project using -// the data included in the request. -func (r *NodeGroupsService) Insert(project string, zone string, initialNodeCount int64, nodegroup *NodeGroup) *NodeGroupsInsertCall { - c := &NodeGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a network in the specified project using the data +// included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/insert +func (r *NetworksService) Insert(project string, network *Network) *NetworksInsertCall { + c := &NetworksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.urlParams_.Set("initialNodeCount", fmt.Sprint(initialNodeCount)) - c.nodegroup = nodegroup + c.network = network return c } @@ -93402,7 +97062,7 @@ func (r *NodeGroupsService) Insert(project string, zone string, initialNodeCount // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *NodeGroupsInsertCall) RequestId(requestId string) *NodeGroupsInsertCall { +func (c *NetworksInsertCall) RequestId(requestId string) *NetworksInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -93410,7 +97070,7 @@ func (c *NodeGroupsInsertCall) RequestId(requestId string) *NodeGroupsInsertCall // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeGroupsInsertCall) Fields(s ...googleapi.Field) *NodeGroupsInsertCall { +func (c *NetworksInsertCall) Fields(s ...googleapi.Field) *NetworksInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -93418,35 +97078,35 @@ func (c *NodeGroupsInsertCall) Fields(s ...googleapi.Field) *NodeGroupsInsertCal // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeGroupsInsertCall) Context(ctx context.Context) *NodeGroupsInsertCall { +func (c *NetworksInsertCall) Context(ctx context.Context) *NetworksInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeGroupsInsertCall) Header() http.Header { +func (c *NetworksInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeGroupsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.nodegroup) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.network) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -93455,19 +97115,18 @@ func (c *NodeGroupsInsertCall) doRequest(alt string) (*http.Response, error) { req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeGroups.insert" call. +// Do executes the "compute.networks.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NodeGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -93498,22 +97157,13 @@ func (c *NodeGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, err } return ret, nil // { - // "description": "Creates a NodeGroup resource in the specified project using the data included in the request.", + // "description": "Creates a network in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.nodeGroups.insert", + // "id": "compute.networks.insert", // "parameterOrder": [ - // "project", - // "zone", - // "initialNodeCount" + // "project" // ], // "parameters": { - // "initialNodeCount": { - // "description": "Initial count of nodes in the node group.", - // "format": "int32", - // "location": "query", - // "required": true, - // "type": "integer" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -93525,18 +97175,11 @@ func (c *NodeGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, err // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/nodeGroups", + // "path": "{project}/global/networks", // "request": { - // "$ref": "NodeGroup" + // "$ref": "Network" // }, // "response": { // "$ref": "Operation" @@ -93549,25 +97192,23 @@ func (c *NodeGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, err } -// method id "compute.nodeGroups.list": +// method id "compute.networks.list": -type NodeGroupsListCall struct { +type NetworksListCall struct { s *Service project string - zone string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves a list of node groups available to the specified -// project. Note: use nodeGroups.listNodes for more details about each -// group. -func (r *NodeGroupsService) List(project string, zone string) *NodeGroupsListCall { - c := &NodeGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of networks available to the specified +// project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/list +func (r *NetworksService) List(project string) *NetworksListCall { + c := &NetworksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone return c } @@ -93593,7 +97234,7 @@ func (r *NodeGroupsService) List(project string, zone string) *NodeGroupsListCal // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *NodeGroupsListCall) Filter(filter string) *NodeGroupsListCall { +func (c *NetworksListCall) Filter(filter string) *NetworksListCall { c.urlParams_.Set("filter", filter) return c } @@ -93604,7 +97245,7 @@ func (c *NodeGroupsListCall) Filter(filter string) *NodeGroupsListCall { // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *NodeGroupsListCall) MaxResults(maxResults int64) *NodeGroupsListCall { +func (c *NetworksListCall) MaxResults(maxResults int64) *NetworksListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -93621,7 +97262,7 @@ func (c *NodeGroupsListCall) MaxResults(maxResults int64) *NodeGroupsListCall { // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *NodeGroupsListCall) OrderBy(orderBy string) *NodeGroupsListCall { +func (c *NetworksListCall) OrderBy(orderBy string) *NetworksListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -93629,7 +97270,7 @@ func (c *NodeGroupsListCall) OrderBy(orderBy string) *NodeGroupsListCall { // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *NodeGroupsListCall) PageToken(pageToken string) *NodeGroupsListCall { +func (c *NetworksListCall) PageToken(pageToken string) *NetworksListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -93637,7 +97278,7 @@ func (c *NodeGroupsListCall) PageToken(pageToken string) *NodeGroupsListCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeGroupsListCall) Fields(s ...googleapi.Field) *NodeGroupsListCall { +func (c *NetworksListCall) Fields(s ...googleapi.Field) *NetworksListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -93647,7 +97288,7 @@ func (c *NodeGroupsListCall) Fields(s ...googleapi.Field) *NodeGroupsListCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *NodeGroupsListCall) IfNoneMatch(entityTag string) *NodeGroupsListCall { +func (c *NetworksListCall) IfNoneMatch(entityTag string) *NetworksListCall { c.ifNoneMatch_ = entityTag return c } @@ -93655,21 +97296,21 @@ func (c *NodeGroupsListCall) IfNoneMatch(entityTag string) *NodeGroupsListCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeGroupsListCall) Context(ctx context.Context) *NodeGroupsListCall { +func (c *NetworksListCall) Context(ctx context.Context) *NetworksListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeGroupsListCall) Header() http.Header { +func (c *NetworksListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeGroupsListCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -93681,7 +97322,7 @@ func (c *NodeGroupsListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -93690,19 +97331,18 @@ func (c *NodeGroupsListCall) doRequest(alt string) (*http.Response, error) { req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeGroups.list" call. -// Exactly one of *NodeGroupList or error will be non-nil. Any non-2xx +// Do executes the "compute.networks.list" call. +// Exactly one of *NetworkList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *NodeGroupList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *NodeGroupsListCall) Do(opts ...googleapi.CallOption) (*NodeGroupList, error) { +// *NetworkList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -93721,7 +97361,7 @@ func (c *NodeGroupsListCall) Do(opts ...googleapi.CallOption) (*NodeGroupList, e if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &NodeGroupList{ + ret := &NetworkList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -93733,12 +97373,11 @@ func (c *NodeGroupsListCall) Do(opts ...googleapi.CallOption) (*NodeGroupList, e } return ret, nil // { - // "description": "Retrieves a list of node groups available to the specified project. Note: use nodeGroups.listNodes for more details about each group.", + // "description": "Retrieves the list of networks available to the specified project.", // "httpMethod": "GET", - // "id": "compute.nodeGroups.list", + // "id": "compute.networks.list", // "parameterOrder": [ - // "project", - // "zone" + // "project" // ], // "parameters": { // "filter": { @@ -93770,18 +97409,11 @@ func (c *NodeGroupsListCall) Do(opts ...googleapi.CallOption) (*NodeGroupList, e // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/nodeGroups", + // "path": "{project}/global/networks", // "response": { - // "$ref": "NodeGroupList" + // "$ref": "NetworkList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -93795,7 +97427,7 @@ func (c *NodeGroupsListCall) Do(opts ...googleapi.CallOption) (*NodeGroupList, e // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *NodeGroupsListCall) Pages(ctx context.Context, f func(*NodeGroupList) error) error { +func (c *NetworksListCall) Pages(ctx context.Context, f func(*NetworkList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -93813,24 +97445,24 @@ func (c *NodeGroupsListCall) Pages(ctx context.Context, f func(*NodeGroupList) e } } -// method id "compute.nodeGroups.listNodes": +// method id "compute.networks.listIpAddresses": -type NodeGroupsListNodesCall struct { - s *Service - project string - zone string - nodeGroup string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworksListIpAddressesCall struct { + s *Service + project string + network string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// ListNodes: Lists nodes in the node group. -func (r *NodeGroupsService) ListNodes(project string, zone string, nodeGroup string) *NodeGroupsListNodesCall { - c := &NodeGroupsListNodesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ListIpAddresses: Lists the internal IP addresses in the specified +// network. +func (r *NetworksService) ListIpAddresses(project string, network string) *NetworksListIpAddressesCall { + c := &NetworksListIpAddressesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.nodeGroup = nodeGroup + c.network = network return c } @@ -93856,7 +97488,7 @@ func (r *NodeGroupsService) ListNodes(project string, zone string, nodeGroup str // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *NodeGroupsListNodesCall) Filter(filter string) *NodeGroupsListNodesCall { +func (c *NetworksListIpAddressesCall) Filter(filter string) *NetworksListIpAddressesCall { c.urlParams_.Set("filter", filter) return c } @@ -93867,7 +97499,7 @@ func (c *NodeGroupsListNodesCall) Filter(filter string) *NodeGroupsListNodesCall // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *NodeGroupsListNodesCall) MaxResults(maxResults int64) *NodeGroupsListNodesCall { +func (c *NetworksListIpAddressesCall) MaxResults(maxResults int64) *NetworksListIpAddressesCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -93884,7 +97516,7 @@ func (c *NodeGroupsListNodesCall) MaxResults(maxResults int64) *NodeGroupsListNo // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *NodeGroupsListNodesCall) OrderBy(orderBy string) *NodeGroupsListNodesCall { +func (c *NetworksListIpAddressesCall) OrderBy(orderBy string) *NetworksListIpAddressesCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -93892,68 +97524,88 @@ func (c *NodeGroupsListNodesCall) OrderBy(orderBy string) *NodeGroupsListNodesCa // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *NodeGroupsListNodesCall) PageToken(pageToken string) *NodeGroupsListNodesCall { +func (c *NetworksListIpAddressesCall) PageToken(pageToken string) *NetworksListIpAddressesCall { c.urlParams_.Set("pageToken", pageToken) return c } +// Types sets the optional parameter "types": (Optional) types filter +// separate by comma, valid values are: SUBNETWORK, RESERVED, PEER_USED, +// PEER_RESERVED, REMOTE_USED, REMOTE_RESERVED. +func (c *NetworksListIpAddressesCall) Types(types string) *NetworksListIpAddressesCall { + c.urlParams_.Set("types", types) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeGroupsListNodesCall) Fields(s ...googleapi.Field) *NodeGroupsListNodesCall { +func (c *NetworksListIpAddressesCall) Fields(s ...googleapi.Field) *NetworksListIpAddressesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NetworksListIpAddressesCall) IfNoneMatch(entityTag string) *NetworksListIpAddressesCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeGroupsListNodesCall) Context(ctx context.Context) *NodeGroupsListNodesCall { +func (c *NetworksListIpAddressesCall) Context(ctx context.Context) *NetworksListIpAddressesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeGroupsListNodesCall) Header() http.Header { +func (c *NetworksListIpAddressesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeGroupsListNodesCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksListIpAddressesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}/listNodes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/listIpAddresses") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "nodeGroup": c.nodeGroup, + "project": c.project, + "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeGroups.listNodes" call. -// Exactly one of *NodeGroupsListNodes or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *NodeGroupsListNodes.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.networks.listIpAddresses" call. +// Exactly one of *IpAddressesList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *IpAddressesList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *NodeGroupsListNodesCall) Do(opts ...googleapi.CallOption) (*NodeGroupsListNodes, error) { +func (c *NetworksListIpAddressesCall) Do(opts ...googleapi.CallOption) (*IpAddressesList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -93972,7 +97624,7 @@ func (c *NodeGroupsListNodesCall) Do(opts ...googleapi.CallOption) (*NodeGroupsL if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &NodeGroupsListNodes{ + ret := &IpAddressesList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -93984,13 +97636,12 @@ func (c *NodeGroupsListNodesCall) Do(opts ...googleapi.CallOption) (*NodeGroupsL } return ret, nil // { - // "description": "Lists nodes in the node group.", - // "httpMethod": "POST", - // "id": "compute.nodeGroups.listNodes", + // "description": "Lists the internal IP addresses in the specified network.", + // "httpMethod": "GET", + // "id": "compute.networks.listIpAddresses", // "parameterOrder": [ // "project", - // "zone", - // "nodeGroup" + // "network" // ], // "parameters": { // "filter": { @@ -94006,10 +97657,10 @@ func (c *NodeGroupsListNodesCall) Do(opts ...googleapi.CallOption) (*NodeGroupsL // "minimum": "0", // "type": "integer" // }, - // "nodeGroup": { - // "description": "Name of the NodeGroup resource whose nodes you want to list.", + // "network": { + // "description": "Name of the network for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -94030,17 +97681,15 @@ func (c *NodeGroupsListNodesCall) Do(opts ...googleapi.CallOption) (*NodeGroupsL // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "types": { + // "description": "(Optional) types filter separate by comma, valid values are: SUBNETWORK, RESERVED, PEER_USED, PEER_RESERVED, REMOTE_USED, REMOTE_RESERVED.", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/nodeGroups/{nodeGroup}/listNodes", + // "path": "{project}/global/networks/{network}/listIpAddresses", // "response": { - // "$ref": "NodeGroupsListNodes" + // "$ref": "IpAddressesList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -94054,7 +97703,7 @@ func (c *NodeGroupsListNodesCall) Do(opts ...googleapi.CallOption) (*NodeGroupsL // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *NodeGroupsListNodesCall) Pages(ctx context.Context, f func(*NodeGroupsListNodes) error) error { +func (c *NetworksListIpAddressesCall) Pages(ctx context.Context, f func(*IpAddressesList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -94072,272 +97721,193 @@ func (c *NodeGroupsListNodesCall) Pages(ctx context.Context, f func(*NodeGroupsL } } -// method id "compute.nodeGroups.setIamPolicy": +// method id "compute.networks.listIpOwners": -type NodeGroupsSetIamPolicyCall struct { - s *Service - project string - zone string - resource string - zonesetpolicyrequest *ZoneSetPolicyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworksListIpOwnersCall struct { + s *Service + project string + network string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. -func (r *NodeGroupsService) SetIamPolicy(project string, zone string, resource string, zonesetpolicyrequest *ZoneSetPolicyRequest) *NodeGroupsSetIamPolicyCall { - c := &NodeGroupsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ListIpOwners: Lists the internal IP owners in the specified network. +func (r *NetworksService) ListIpOwners(project string, network string) *NetworksListIpOwnersCall { + c := &NetworksListIpOwnersCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.resource = resource - c.zonesetpolicyrequest = zonesetpolicyrequest + c.network = network return c } -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *NodeGroupsSetIamPolicyCall) Fields(s ...googleapi.Field) *NodeGroupsSetIamPolicyCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *NetworksListIpOwnersCall) Filter(filter string) *NetworksListIpOwnersCall { + c.urlParams_.Set("filter", filter) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *NodeGroupsSetIamPolicyCall) Context(ctx context.Context) *NodeGroupsSetIamPolicyCall { - c.ctx_ = ctx +// IpCidrRange sets the optional parameter "ipCidrRange": (Optional) IP +// CIDR range filter, example: "10.128.10.0/30". +func (c *NetworksListIpOwnersCall) IpCidrRange(ipCidrRange string) *NetworksListIpOwnersCall { + c.urlParams_.Set("ipCidrRange", ipCidrRange) return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *NodeGroupsSetIamPolicyCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *NetworksListIpOwnersCall) MaxResults(maxResults int64) *NetworksListIpOwnersCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c } -func (c *NodeGroupsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.zonesetpolicyrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{resource}/setIamPolicy") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *NetworksListIpOwnersCall) OrderBy(orderBy string) *NetworksListIpOwnersCall { + c.urlParams_.Set("orderBy", orderBy) + return c } -// Do executes the "compute.nodeGroups.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *NodeGroupsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Policy{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", - // "httpMethod": "POST", - // "id": "compute.nodeGroups.setIamPolicy", - // "parameterOrder": [ - // "project", - // "zone", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/nodeGroups/{resource}/setIamPolicy", - // "request": { - // "$ref": "ZoneSetPolicyRequest" - // }, - // "response": { - // "$ref": "Policy" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - +// OwnerProjects sets the optional parameter "ownerProjects": (Optional) +// Project IDs filter, example: "project-1,project-2". +func (c *NetworksListIpOwnersCall) OwnerProjects(ownerProjects string) *NetworksListIpOwnersCall { + c.urlParams_.Set("ownerProjects", ownerProjects) + return c } -// method id "compute.nodeGroups.setNodeTemplate": +// OwnerTypes sets the optional parameter "ownerTypes": (Optional) Owner +// types filter, example: "instance,forwardingRule". +func (c *NetworksListIpOwnersCall) OwnerTypes(ownerTypes string) *NetworksListIpOwnersCall { + c.urlParams_.Set("ownerTypes", ownerTypes) + return c +} -type NodeGroupsSetNodeTemplateCall struct { - s *Service - project string - zone string - nodeGroup string - nodegroupssetnodetemplaterequest *NodeGroupsSetNodeTemplateRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *NetworksListIpOwnersCall) PageToken(pageToken string) *NetworksListIpOwnersCall { + c.urlParams_.Set("pageToken", pageToken) + return c } -// SetNodeTemplate: Updates the node template of the node group. -func (r *NodeGroupsService) SetNodeTemplate(project string, zone string, nodeGroup string, nodegroupssetnodetemplaterequest *NodeGroupsSetNodeTemplateRequest) *NodeGroupsSetNodeTemplateCall { - c := &NodeGroupsSetNodeTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.nodeGroup = nodeGroup - c.nodegroupssetnodetemplaterequest = nodegroupssetnodetemplaterequest +// SubnetName sets the optional parameter "subnetName": (Optional) +// Subnetwork name filter. +func (c *NetworksListIpOwnersCall) SubnetName(subnetName string) *NetworksListIpOwnersCall { + c.urlParams_.Set("subnetName", subnetName) return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *NodeGroupsSetNodeTemplateCall) RequestId(requestId string) *NodeGroupsSetNodeTemplateCall { - c.urlParams_.Set("requestId", requestId) +// SubnetRegion sets the optional parameter "subnetRegion": (Optional) +// Subnetwork region filter. +func (c *NetworksListIpOwnersCall) SubnetRegion(subnetRegion string) *NetworksListIpOwnersCall { + c.urlParams_.Set("subnetRegion", subnetRegion) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeGroupsSetNodeTemplateCall) Fields(s ...googleapi.Field) *NodeGroupsSetNodeTemplateCall { +func (c *NetworksListIpOwnersCall) Fields(s ...googleapi.Field) *NetworksListIpOwnersCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NetworksListIpOwnersCall) IfNoneMatch(entityTag string) *NetworksListIpOwnersCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeGroupsSetNodeTemplateCall) Context(ctx context.Context) *NodeGroupsSetNodeTemplateCall { +func (c *NetworksListIpOwnersCall) Context(ctx context.Context) *NetworksListIpOwnersCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeGroupsSetNodeTemplateCall) Header() http.Header { +func (c *NetworksListIpOwnersCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeGroupsSetNodeTemplateCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksListIpOwnersCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.nodegroupssetnodetemplaterequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}/setNodeTemplate") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/listIpOwners") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "nodeGroup": c.nodeGroup, + "project": c.project, + "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeGroups.setNodeTemplate" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.networks.listIpOwners" call. +// Exactly one of *IpOwnerList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// *IpOwnerList.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NodeGroupsSetNodeTemplateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NetworksListIpOwnersCall) Do(opts ...googleapi.CallOption) (*IpOwnerList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -94356,7 +97926,7 @@ func (c *NodeGroupsSetNodeTemplateCall) Do(opts ...googleapi.CallOption) (*Opera if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &IpOwnerList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -94368,182 +97938,59 @@ func (c *NodeGroupsSetNodeTemplateCall) Do(opts ...googleapi.CallOption) (*Opera } return ret, nil // { - // "description": "Updates the node template of the node group.", - // "httpMethod": "POST", - // "id": "compute.nodeGroups.setNodeTemplate", + // "description": "Lists the internal IP owners in the specified network.", + // "httpMethod": "GET", + // "id": "compute.networks.listIpOwners", // "parameterOrder": [ // "project", - // "zone", - // "nodeGroup" + // "network" // ], // "parameters": { - // "nodeGroup": { - // "description": "Name of the NodeGroup resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "ipCidrRange": { + // "description": "(Optional) IP CIDR range filter, example: \"10.128.10.0/30\".", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "network": { + // "description": "Name of the network to return.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", // "location": "query", // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "ownerProjects": { + // "description": "(Optional) Project IDs filter, example: \"project-1,project-2\".", + // "location": "query", // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/nodeGroups/{nodeGroup}/setNodeTemplate", - // "request": { - // "$ref": "NodeGroupsSetNodeTemplateRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.nodeGroups.testIamPermissions": - -type NodeGroupsTestIamPermissionsCall struct { - s *Service - project string - zone string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *NodeGroupsService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *NodeGroupsTestIamPermissionsCall { - c := &NodeGroupsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *NodeGroupsTestIamPermissionsCall) Fields(s ...googleapi.Field) *NodeGroupsTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *NodeGroupsTestIamPermissionsCall) Context(ctx context.Context) *NodeGroupsTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *NodeGroupsTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *NodeGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{resource}/testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.nodeGroups.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *NodeGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.nodeGroups.testIamPermissions", - // "parameterOrder": [ - // "project", - // "zone", - // "resource" - // ], - // "parameters": { + // }, + // "ownerTypes": { + // "description": "(Optional) Owner types filter, example: \"instance,forwardingRule\".", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -94551,27 +97998,22 @@ func (c *NodeGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Te // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "subnetName": { + // "description": "(Optional) Subnetwork name filter.", + // "location": "query", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", + // "subnetRegion": { + // "description": "(Optional) Subnetwork region filter.", + // "location": "query", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/nodeGroups/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/global/networks/{network}/listIpOwners", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "IpOwnerList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -94582,21 +98024,56 @@ func (c *NodeGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Te } -// method id "compute.nodeTemplates.aggregatedList": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *NetworksListIpOwnersCall) Pages(ctx context.Context, f func(*IpOwnerList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type NodeTemplatesAggregatedListCall struct { +// method id "compute.networks.listPeeringRoutes": + +type NetworksListPeeringRoutesCall struct { s *Service project string + network string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// AggregatedList: Retrieves an aggregated list of node templates. -func (r *NodeTemplatesService) AggregatedList(project string) *NodeTemplatesAggregatedListCall { - c := &NodeTemplatesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ListPeeringRoutes: Lists the peering routes exchanged over peering +// connection. +func (r *NetworksService) ListPeeringRoutes(project string, network string) *NetworksListPeeringRoutesCall { + c := &NetworksListPeeringRoutesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.network = network + return c +} + +// Direction sets the optional parameter "direction": The direction of +// the exchanged routes. +// +// Possible values: +// "INCOMING" +// "OUTGOING" +func (c *NetworksListPeeringRoutesCall) Direction(direction string) *NetworksListPeeringRoutesCall { + c.urlParams_.Set("direction", direction) return c } @@ -94622,7 +98099,7 @@ func (r *NodeTemplatesService) AggregatedList(project string) *NodeTemplatesAggr // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *NodeTemplatesAggregatedListCall) Filter(filter string) *NodeTemplatesAggregatedListCall { +func (c *NetworksListPeeringRoutesCall) Filter(filter string) *NetworksListPeeringRoutesCall { c.urlParams_.Set("filter", filter) return c } @@ -94633,7 +98110,7 @@ func (c *NodeTemplatesAggregatedListCall) Filter(filter string) *NodeTemplatesAg // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *NodeTemplatesAggregatedListCall) MaxResults(maxResults int64) *NodeTemplatesAggregatedListCall { +func (c *NetworksListPeeringRoutesCall) MaxResults(maxResults int64) *NetworksListPeeringRoutesCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -94650,7 +98127,7 @@ func (c *NodeTemplatesAggregatedListCall) MaxResults(maxResults int64) *NodeTemp // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *NodeTemplatesAggregatedListCall) OrderBy(orderBy string) *NodeTemplatesAggregatedListCall { +func (c *NetworksListPeeringRoutesCall) OrderBy(orderBy string) *NetworksListPeeringRoutesCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -94658,15 +98135,30 @@ func (c *NodeTemplatesAggregatedListCall) OrderBy(orderBy string) *NodeTemplates // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *NodeTemplatesAggregatedListCall) PageToken(pageToken string) *NodeTemplatesAggregatedListCall { +func (c *NetworksListPeeringRoutesCall) PageToken(pageToken string) *NetworksListPeeringRoutesCall { c.urlParams_.Set("pageToken", pageToken) return c } +// PeeringName sets the optional parameter "peeringName": The response +// will show routes exchanged over the given peering connection. +func (c *NetworksListPeeringRoutesCall) PeeringName(peeringName string) *NetworksListPeeringRoutesCall { + c.urlParams_.Set("peeringName", peeringName) + return c +} + +// Region sets the optional parameter "region": The region of the +// request. The response will include all subnet routes, static routes +// and dynamic routes in the region. +func (c *NetworksListPeeringRoutesCall) Region(region string) *NetworksListPeeringRoutesCall { + c.urlParams_.Set("region", region) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeTemplatesAggregatedListCall) Fields(s ...googleapi.Field) *NodeTemplatesAggregatedListCall { +func (c *NetworksListPeeringRoutesCall) Fields(s ...googleapi.Field) *NetworksListPeeringRoutesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -94676,7 +98168,7 @@ func (c *NodeTemplatesAggregatedListCall) Fields(s ...googleapi.Field) *NodeTemp // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *NodeTemplatesAggregatedListCall) IfNoneMatch(entityTag string) *NodeTemplatesAggregatedListCall { +func (c *NetworksListPeeringRoutesCall) IfNoneMatch(entityTag string) *NetworksListPeeringRoutesCall { c.ifNoneMatch_ = entityTag return c } @@ -94684,21 +98176,21 @@ func (c *NodeTemplatesAggregatedListCall) IfNoneMatch(entityTag string) *NodeTem // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeTemplatesAggregatedListCall) Context(ctx context.Context) *NodeTemplatesAggregatedListCall { +func (c *NetworksListPeeringRoutesCall) Context(ctx context.Context) *NetworksListPeeringRoutesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeTemplatesAggregatedListCall) Header() http.Header { +func (c *NetworksListPeeringRoutesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeTemplatesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksListPeeringRoutesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -94710,7 +98202,7 @@ func (c *NodeTemplatesAggregatedListCall) doRequest(alt string) (*http.Response, var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/nodeTemplates") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/listPeeringRoutes") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -94719,18 +98211,19 @@ func (c *NodeTemplatesAggregatedListCall) doRequest(alt string) (*http.Response, req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeTemplates.aggregatedList" call. -// Exactly one of *NodeTemplateAggregatedList or error will be non-nil. +// Do executes the "compute.networks.listPeeringRoutes" call. +// Exactly one of *ExchangedPeeringRoutesList or error will be non-nil. // Any non-2xx status code is an error. Response headers are in either -// *NodeTemplateAggregatedList.ServerResponse.Header or (if a response +// *ExchangedPeeringRoutesList.ServerResponse.Header or (if a response // was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *NodeTemplatesAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeTemplateAggregatedList, error) { +func (c *NetworksListPeeringRoutesCall) Do(opts ...googleapi.CallOption) (*ExchangedPeeringRoutesList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -94749,7 +98242,7 @@ func (c *NodeTemplatesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Nod if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &NodeTemplateAggregatedList{ + ret := &ExchangedPeeringRoutesList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -94761,15 +98254,29 @@ func (c *NodeTemplatesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Nod } return ret, nil // { - // "description": "Retrieves an aggregated list of node templates.", + // "description": "Lists the peering routes exchanged over peering connection.", // "httpMethod": "GET", - // "id": "compute.nodeTemplates.aggregatedList", + // "id": "compute.networks.listPeeringRoutes", // "parameterOrder": [ - // "project" + // "project", + // "network" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "direction": { + // "description": "The direction of the exchanged routes.", + // "enum": [ + // "INCOMING", + // "OUTGOING" + // ], + // "enumDescriptions": [ + // "", + // "" + // ], + // "location": "query", + // "type": "string" + // }, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -94781,6 +98288,13 @@ func (c *NodeTemplatesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Nod // "minimum": "0", // "type": "integer" // }, + // "network": { + // "description": "Name of the network for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "orderBy": { // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", // "location": "query", @@ -94791,17 +98305,27 @@ func (c *NodeTemplatesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Nod // "location": "query", // "type": "string" // }, + // "peeringName": { + // "description": "The response will show routes exchanged over the given peering connection.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "region": { + // "description": "The region of the request. The response will include all subnet routes, static routes and dynamic routes in the region.", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/aggregated/nodeTemplates", + // "path": "{project}/global/networks/{network}/listPeeringRoutes", // "response": { - // "$ref": "NodeTemplateAggregatedList" + // "$ref": "ExchangedPeeringRoutesList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -94815,7 +98339,7 @@ func (c *NodeTemplatesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Nod // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *NodeTemplatesAggregatedListCall) Pages(ctx context.Context, f func(*NodeTemplateAggregatedList) error) error { +func (c *NetworksListPeeringRoutesCall) Pages(ctx context.Context, f func(*ExchangedPeeringRoutesList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -94833,24 +98357,26 @@ func (c *NodeTemplatesAggregatedListCall) Pages(ctx context.Context, f func(*Nod } } -// method id "compute.nodeTemplates.delete": +// method id "compute.networks.patch": -type NodeTemplatesDeleteCall struct { - s *Service - project string - region string - nodeTemplate string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworksPatchCall struct { + s *Service + project string + network string + network2 *Network + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified NodeTemplate resource. -func (r *NodeTemplatesService) Delete(project string, region string, nodeTemplate string) *NodeTemplatesDeleteCall { - c := &NodeTemplatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Patches the specified network with the data included in the +// request. Only the following fields can be modified: +// routingConfig.routingMode. +func (r *NetworksService) Patch(project string, network string, network2 *Network) *NetworksPatchCall { + c := &NetworksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.nodeTemplate = nodeTemplate + c.network = network + c.network2 = network2 return c } @@ -94868,7 +98394,7 @@ func (r *NodeTemplatesService) Delete(project string, region string, nodeTemplat // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *NodeTemplatesDeleteCall) RequestId(requestId string) *NodeTemplatesDeleteCall { +func (c *NetworksPatchCall) RequestId(requestId string) *NetworksPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -94876,7 +98402,7 @@ func (c *NodeTemplatesDeleteCall) RequestId(requestId string) *NodeTemplatesDele // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeTemplatesDeleteCall) Fields(s ...googleapi.Field) *NodeTemplatesDeleteCall { +func (c *NetworksPatchCall) Fields(s ...googleapi.Field) *NetworksPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -94884,52 +98410,56 @@ func (c *NodeTemplatesDeleteCall) Fields(s ...googleapi.Field) *NodeTemplatesDel // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeTemplatesDeleteCall) Context(ctx context.Context) *NodeTemplatesDeleteCall { +func (c *NetworksPatchCall) Context(ctx context.Context) *NetworksPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeTemplatesDeleteCall) Header() http.Header { +func (c *NetworksPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.network2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates/{nodeTemplate}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "nodeTemplate": c.nodeTemplate, + "project": c.project, + "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeTemplates.delete" call. +// Do executes the "compute.networks.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NodeTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NetworksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -94960,19 +98490,18 @@ func (c *NodeTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Deletes the specified NodeTemplate resource.", - // "httpMethod": "DELETE", - // "id": "compute.nodeTemplates.delete", + // "description": "Patches the specified network with the data included in the request. Only the following fields can be modified: routingConfig.routingMode.", + // "httpMethod": "PATCH", + // "id": "compute.networks.patch", // "parameterOrder": [ // "project", - // "region", - // "nodeTemplate" + // "network" // ], // "parameters": { - // "nodeTemplate": { - // "description": "Name of the NodeTemplate resource to delete.", + // "network": { + // "description": "Name of the network to update.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -94983,20 +98512,16 @@ func (c *NodeTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, // "required": true, // "type": "string" // }, - // "region": { - // "description": "The name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "{project}/regions/{region}/nodeTemplates/{nodeTemplate}", + // "path": "{project}/global/networks/{network}", + // "request": { + // "$ref": "Network" + // }, // "response": { // "$ref": "Operation" // }, @@ -95008,99 +98533,107 @@ func (c *NodeTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.nodeTemplates.get": +// method id "compute.networks.removePeering": -type NodeTemplatesGetCall struct { - s *Service - project string - region string - nodeTemplate string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type NetworksRemovePeeringCall struct { + s *Service + project string + network string + networksremovepeeringrequest *NetworksRemovePeeringRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified node template. Gets a list of available -// node templates by making a list() request. -func (r *NodeTemplatesService) Get(project string, region string, nodeTemplate string) *NodeTemplatesGetCall { - c := &NodeTemplatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// RemovePeering: Removes a peering from the specified network. +func (r *NetworksService) RemovePeering(project string, network string, networksremovepeeringrequest *NetworksRemovePeeringRequest) *NetworksRemovePeeringCall { + c := &NetworksRemovePeeringCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.nodeTemplate = nodeTemplate + c.network = network + c.networksremovepeeringrequest = networksremovepeeringrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *NetworksRemovePeeringCall) RequestId(requestId string) *NetworksRemovePeeringCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeTemplatesGetCall) Fields(s ...googleapi.Field) *NodeTemplatesGetCall { +func (c *NetworksRemovePeeringCall) Fields(s ...googleapi.Field) *NetworksRemovePeeringCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *NodeTemplatesGetCall) IfNoneMatch(entityTag string) *NodeTemplatesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeTemplatesGetCall) Context(ctx context.Context) *NodeTemplatesGetCall { +func (c *NetworksRemovePeeringCall) Context(ctx context.Context) *NetworksRemovePeeringCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeTemplatesGetCall) Header() http.Header { +func (c *NetworksRemovePeeringCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeTemplatesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksRemovePeeringCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networksremovepeeringrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates/{nodeTemplate}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/removePeering") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "nodeTemplate": c.nodeTemplate, + "project": c.project, + "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeTemplates.get" call. -// Exactly one of *NodeTemplate or error will be non-nil. Any non-2xx +// Do executes the "compute.networks.removePeering" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *NodeTemplate.ServerResponse.Header or (if a response was returned at +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NodeTemplatesGetCall) Do(opts ...googleapi.CallOption) (*NodeTemplate, error) { +func (c *NetworksRemovePeeringCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -95119,7 +98652,7 @@ func (c *NodeTemplatesGetCall) Do(opts ...googleapi.CallOption) (*NodeTemplate, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &NodeTemplate{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -95131,19 +98664,18 @@ func (c *NodeTemplatesGetCall) Do(opts ...googleapi.CallOption) (*NodeTemplate, } return ret, nil // { - // "description": "Returns the specified node template. Gets a list of available node templates by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.nodeTemplates.get", + // "description": "Removes a peering from the specified network.", + // "httpMethod": "POST", + // "id": "compute.networks.removePeering", // "parameterOrder": [ // "project", - // "region", - // "nodeTemplate" + // "network" // ], // "parameters": { - // "nodeTemplate": { - // "description": "Name of the node template to return.", + // "network": { + // "description": "Name of the network resource to remove peering from.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -95154,213 +98686,44 @@ func (c *NodeTemplatesGetCall) Do(opts ...googleapi.CallOption) (*NodeTemplate, // "required": true, // "type": "string" // }, - // "region": { - // "description": "The name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/regions/{region}/nodeTemplates/{nodeTemplate}", - // "response": { - // "$ref": "NodeTemplate" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.nodeTemplates.getIamPolicy": - -type NodeTemplatesGetIamPolicyCall struct { - s *Service - project string - region string - resource string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. -func (r *NodeTemplatesService) GetIamPolicy(project string, region string, resource string) *NodeTemplatesGetIamPolicyCall { - c := &NodeTemplatesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.resource = resource - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *NodeTemplatesGetIamPolicyCall) Fields(s ...googleapi.Field) *NodeTemplatesGetIamPolicyCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *NodeTemplatesGetIamPolicyCall) IfNoneMatch(entityTag string) *NodeTemplatesGetIamPolicyCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *NodeTemplatesGetIamPolicyCall) Context(ctx context.Context) *NodeTemplatesGetIamPolicyCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *NodeTemplatesGetIamPolicyCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *NodeTemplatesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates/{resource}/getIamPolicy") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.nodeTemplates.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *NodeTemplatesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Policy{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", - // "httpMethod": "GET", - // "id": "compute.nodeTemplates.getIamPolicy", - // "parameterOrder": [ - // "project", - // "region", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "The name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // } + // "path": "{project}/global/networks/{network}/removePeering", + // "request": { + // "$ref": "NetworksRemovePeeringRequest" // }, - // "path": "{project}/regions/{region}/nodeTemplates/{resource}/getIamPolicy", // "response": { - // "$ref": "Policy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.nodeTemplates.insert": +// method id "compute.networks.switchToCustomMode": -type NodeTemplatesInsertCall struct { - s *Service - project string - region string - nodetemplate *NodeTemplate - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworksSwitchToCustomModeCall struct { + s *Service + project string + network string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a NodeTemplate resource in the specified project -// using the data included in the request. -func (r *NodeTemplatesService) Insert(project string, region string, nodetemplate *NodeTemplate) *NodeTemplatesInsertCall { - c := &NodeTemplatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SwitchToCustomMode: Switches the network mode from auto subnet mode +// to custom subnet mode. +func (r *NetworksService) SwitchToCustomMode(project string, network string) *NetworksSwitchToCustomModeCall { + c := &NetworksSwitchToCustomModeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.nodetemplate = nodetemplate + c.network = network return c } @@ -95378,7 +98741,7 @@ func (r *NodeTemplatesService) Insert(project string, region string, nodetemplat // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *NodeTemplatesInsertCall) RequestId(requestId string) *NodeTemplatesInsertCall { +func (c *NetworksSwitchToCustomModeCall) RequestId(requestId string) *NetworksSwitchToCustomModeCall { c.urlParams_.Set("requestId", requestId) return c } @@ -95386,7 +98749,7 @@ func (c *NodeTemplatesInsertCall) RequestId(requestId string) *NodeTemplatesInse // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeTemplatesInsertCall) Fields(s ...googleapi.Field) *NodeTemplatesInsertCall { +func (c *NetworksSwitchToCustomModeCall) Fields(s ...googleapi.Field) *NetworksSwitchToCustomModeCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -95394,35 +98757,30 @@ func (c *NodeTemplatesInsertCall) Fields(s ...googleapi.Field) *NodeTemplatesIns // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeTemplatesInsertCall) Context(ctx context.Context) *NodeTemplatesInsertCall { +func (c *NetworksSwitchToCustomModeCall) Context(ctx context.Context) *NetworksSwitchToCustomModeCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeTemplatesInsertCall) Header() http.Header { +func (c *NetworksSwitchToCustomModeCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeTemplatesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksSwitchToCustomModeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.nodetemplate) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/switchToCustomMode") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -95431,19 +98789,19 @@ func (c *NodeTemplatesInsertCall) doRequest(alt string) (*http.Response, error) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, + "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeTemplates.insert" call. +// Do executes the "compute.networks.switchToCustomMode" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NodeTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NetworksSwitchToCustomModeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -95474,25 +98832,25 @@ func (c *NodeTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Creates a NodeTemplate resource in the specified project using the data included in the request.", + // "description": "Switches the network mode from auto subnet mode to custom subnet mode.", // "httpMethod": "POST", - // "id": "compute.nodeTemplates.insert", + // "id": "compute.networks.switchToCustomMode", // "parameterOrder": [ // "project", - // "region" + // "network" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "network": { + // "description": "Name of the network to be updated.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "region": { - // "description": "The name of the region for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, @@ -95502,10 +98860,7 @@ func (c *NodeTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/nodeTemplates", - // "request": { - // "$ref": "NodeTemplate" - // }, + // "path": "{project}/global/networks/{network}/switchToCustomMode", // "response": { // "$ref": "Operation" // }, @@ -95517,159 +98872,89 @@ func (c *NodeTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.nodeTemplates.list": +// method id "compute.networks.testIamPermissions": -type NodeTemplatesListCall struct { - s *Service - project string - region string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type NetworksTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves a list of node templates available to the specified -// project. -func (r *NodeTemplatesService) List(project string, region string) *NodeTemplatesListCall { - c := &NodeTemplatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *NetworksService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *NetworksTestIamPermissionsCall { + c := &NetworksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *NodeTemplatesListCall) Filter(filter string) *NodeTemplatesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *NodeTemplatesListCall) MaxResults(maxResults int64) *NodeTemplatesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *NodeTemplatesListCall) OrderBy(orderBy string) *NodeTemplatesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *NodeTemplatesListCall) PageToken(pageToken string) *NodeTemplatesListCall { - c.urlParams_.Set("pageToken", pageToken) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeTemplatesListCall) Fields(s ...googleapi.Field) *NodeTemplatesListCall { +func (c *NetworksTestIamPermissionsCall) Fields(s ...googleapi.Field) *NetworksTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *NodeTemplatesListCall) IfNoneMatch(entityTag string) *NodeTemplatesListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeTemplatesListCall) Context(ctx context.Context) *NodeTemplatesListCall { +func (c *NetworksTestIamPermissionsCall) Context(ctx context.Context) *NetworksTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeTemplatesListCall) Header() http.Header { +func (c *NetworksTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeTemplatesListCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeTemplates.list" call. -// Exactly one of *NodeTemplateList or error will be non-nil. Any +// Do executes the "compute.networks.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *NodeTemplateList.ServerResponse.Header or (if a response was +// *TestPermissionsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *NodeTemplatesListCall) Do(opts ...googleapi.CallOption) (*NodeTemplateList, error) { +func (c *NetworksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -95688,7 +98973,7 @@ func (c *NodeTemplatesListCall) Do(opts ...googleapi.CallOption) (*NodeTemplateL if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &NodeTemplateList{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -95700,37 +98985,14 @@ func (c *NodeTemplatesListCall) Do(opts ...googleapi.CallOption) (*NodeTemplateL } return ret, nil // { - // "description": "Retrieves a list of node templates available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.nodeTemplates.list", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.networks.testIamPermissions", // "parameterOrder": [ // "project", - // "region" + // "resource" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -95738,17 +99000,20 @@ func (c *NodeTemplatesListCall) Do(opts ...googleapi.CallOption) (*NodeTemplateL // "required": true, // "type": "string" // }, - // "region": { - // "description": "The name of the region for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/nodeTemplates", + // "path": "{project}/global/networks/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "NodeTemplateList" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -95759,55 +99024,53 @@ func (c *NodeTemplatesListCall) Do(opts ...googleapi.CallOption) (*NodeTemplateL } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *NodeTemplatesListCall) Pages(ctx context.Context, f func(*NodeTemplateList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.nodeTemplates.setIamPolicy": +// method id "compute.networks.updatePeering": -type NodeTemplatesSetIamPolicyCall struct { - s *Service - project string - region string - resource string - regionsetpolicyrequest *RegionSetPolicyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworksUpdatePeeringCall struct { + s *Service + project string + network string + networksupdatepeeringrequest *NetworksUpdatePeeringRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. -func (r *NodeTemplatesService) SetIamPolicy(project string, region string, resource string, regionsetpolicyrequest *RegionSetPolicyRequest) *NodeTemplatesSetIamPolicyCall { - c := &NodeTemplatesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// UpdatePeering: Updates the specified network peering with the data +// included in the request Only the following fields can be modified: +// NetworkPeering.export_custom_routes, and +// NetworkPeering.import_custom_routes +func (r *NetworksService) UpdatePeering(project string, network string, networksupdatepeeringrequest *NetworksUpdatePeeringRequest) *NetworksUpdatePeeringCall { + c := &NetworksUpdatePeeringCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.resource = resource - c.regionsetpolicyrequest = regionsetpolicyrequest + c.network = network + c.networksupdatepeeringrequest = networksupdatepeeringrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *NetworksUpdatePeeringCall) RequestId(requestId string) *NetworksUpdatePeeringCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeTemplatesSetIamPolicyCall) Fields(s ...googleapi.Field) *NodeTemplatesSetIamPolicyCall { +func (c *NetworksUpdatePeeringCall) Fields(s ...googleapi.Field) *NetworksUpdatePeeringCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -95815,57 +99078,56 @@ func (c *NodeTemplatesSetIamPolicyCall) Fields(s ...googleapi.Field) *NodeTempla // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeTemplatesSetIamPolicyCall) Context(ctx context.Context) *NodeTemplatesSetIamPolicyCall { +func (c *NetworksUpdatePeeringCall) Context(ctx context.Context) *NetworksUpdatePeeringCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeTemplatesSetIamPolicyCall) Header() http.Header { +func (c *NetworksUpdatePeeringCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeTemplatesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksUpdatePeeringCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetpolicyrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networksupdatepeeringrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates/{resource}/setIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/updatePeering") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, + "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeTemplates.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *NodeTemplatesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.networks.updatePeering" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NetworksUpdatePeeringCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -95884,7 +99146,7 @@ func (c *NodeTemplatesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -95896,43 +99158,40 @@ func (c *NodeTemplatesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", - // "httpMethod": "POST", - // "id": "compute.nodeTemplates.setIamPolicy", + // "description": "Updates the specified network peering with the data included in the request Only the following fields can be modified: NetworkPeering.export_custom_routes, and NetworkPeering.import_custom_routes", + // "httpMethod": "PATCH", + // "id": "compute.networks.updatePeering", // "parameterOrder": [ // "project", - // "region", - // "resource" + // "network" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "network": { + // "description": "Name of the network resource which the updated peering is belonging to.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "region": { - // "description": "The name of the region for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/regions/{region}/nodeTemplates/{resource}/setIamPolicy", + // "path": "{project}/global/networks/{network}/updatePeering", // "request": { - // "$ref": "RegionSetPolicyRequest" + // "$ref": "NetworksUpdatePeeringRequest" // }, // "response": { - // "$ref": "Policy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -95942,34 +99201,52 @@ func (c *NodeTemplatesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic } -// method id "compute.nodeTemplates.testIamPermissions": +// method id "compute.nodeGroups.addNodes": -type NodeTemplatesTestIamPermissionsCall struct { - s *Service - project string - region string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NodeGroupsAddNodesCall struct { + s *Service + project string + zone string + nodeGroup string + nodegroupsaddnodesrequest *NodeGroupsAddNodesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *NodeTemplatesService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *NodeTemplatesTestIamPermissionsCall { - c := &NodeTemplatesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AddNodes: Adds specified number of nodes to the node group. +func (r *NodeGroupsService) AddNodes(project string, zone string, nodeGroup string, nodegroupsaddnodesrequest *NodeGroupsAddNodesRequest) *NodeGroupsAddNodesCall { + c := &NodeGroupsAddNodesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.zone = zone + c.nodeGroup = nodeGroup + c.nodegroupsaddnodesrequest = nodegroupsaddnodesrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *NodeGroupsAddNodesCall) RequestId(requestId string) *NodeGroupsAddNodesCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeTemplatesTestIamPermissionsCall) Fields(s ...googleapi.Field) *NodeTemplatesTestIamPermissionsCall { +func (c *NodeGroupsAddNodesCall) Fields(s ...googleapi.Field) *NodeGroupsAddNodesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -95977,35 +99254,35 @@ func (c *NodeTemplatesTestIamPermissionsCall) Fields(s ...googleapi.Field) *Node // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeTemplatesTestIamPermissionsCall) Context(ctx context.Context) *NodeTemplatesTestIamPermissionsCall { +func (c *NodeGroupsAddNodesCall) Context(ctx context.Context) *NodeGroupsAddNodesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeTemplatesTestIamPermissionsCall) Header() http.Header { +func (c *NodeGroupsAddNodesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeTemplatesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeGroupsAddNodesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.nodegroupsaddnodesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}/addNodes") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -96013,21 +99290,21 @@ func (c *NodeTemplatesTestIamPermissionsCall) doRequest(alt string) (*http.Respo } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, + "zone": c.zone, + "nodeGroup": c.nodeGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeTemplates.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *NodeTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.nodeGroups.addNodes" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NodeGroupsAddNodesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -96046,7 +99323,7 @@ func (c *NodeTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) ( if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -96058,15 +99335,22 @@ func (c *NodeTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", + // "description": "Adds specified number of nodes to the node group.", // "httpMethod": "POST", - // "id": "compute.nodeTemplates.testIamPermissions", + // "id": "compute.nodeGroups.addNodes", // "parameterOrder": [ // "project", - // "region", - // "resource" + // "zone", + // "nodeGroup" // ], // "parameters": { + // "nodeGroup": { + // "description": "Name of the NodeGroup resource.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -96074,40 +99358,37 @@ func (c *NodeTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) ( // "required": true, // "type": "string" // }, - // "region": { - // "description": "The name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/nodeTemplates/{resource}/testIamPermissions", + // "path": "{project}/zones/{zone}/nodeGroups/{nodeGroup}/addNodes", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "NodeGroupsAddNodesRequest" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.nodeTypes.aggregatedList": +// method id "compute.nodeGroups.aggregatedList": -type NodeTypesAggregatedListCall struct { +type NodeGroupsAggregatedListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -96116,9 +99397,10 @@ type NodeTypesAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves an aggregated list of node types. -func (r *NodeTypesService) AggregatedList(project string) *NodeTypesAggregatedListCall { - c := &NodeTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of node groups. Note: +// use nodeGroups.listNodes for more details about each group. +func (r *NodeGroupsService) AggregatedList(project string) *NodeGroupsAggregatedListCall { + c := &NodeGroupsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -96145,7 +99427,7 @@ func (r *NodeTypesService) AggregatedList(project string) *NodeTypesAggregatedLi // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *NodeTypesAggregatedListCall) Filter(filter string) *NodeTypesAggregatedListCall { +func (c *NodeGroupsAggregatedListCall) Filter(filter string) *NodeGroupsAggregatedListCall { c.urlParams_.Set("filter", filter) return c } @@ -96156,7 +99438,7 @@ func (c *NodeTypesAggregatedListCall) Filter(filter string) *NodeTypesAggregated // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *NodeTypesAggregatedListCall) MaxResults(maxResults int64) *NodeTypesAggregatedListCall { +func (c *NodeGroupsAggregatedListCall) MaxResults(maxResults int64) *NodeGroupsAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -96173,7 +99455,7 @@ func (c *NodeTypesAggregatedListCall) MaxResults(maxResults int64) *NodeTypesAgg // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *NodeTypesAggregatedListCall) OrderBy(orderBy string) *NodeTypesAggregatedListCall { +func (c *NodeGroupsAggregatedListCall) OrderBy(orderBy string) *NodeGroupsAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -96181,7 +99463,7 @@ func (c *NodeTypesAggregatedListCall) OrderBy(orderBy string) *NodeTypesAggregat // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *NodeTypesAggregatedListCall) PageToken(pageToken string) *NodeTypesAggregatedListCall { +func (c *NodeGroupsAggregatedListCall) PageToken(pageToken string) *NodeGroupsAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -96189,7 +99471,7 @@ func (c *NodeTypesAggregatedListCall) PageToken(pageToken string) *NodeTypesAggr // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeTypesAggregatedListCall) Fields(s ...googleapi.Field) *NodeTypesAggregatedListCall { +func (c *NodeGroupsAggregatedListCall) Fields(s ...googleapi.Field) *NodeGroupsAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -96199,7 +99481,7 @@ func (c *NodeTypesAggregatedListCall) Fields(s ...googleapi.Field) *NodeTypesAgg // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *NodeTypesAggregatedListCall) IfNoneMatch(entityTag string) *NodeTypesAggregatedListCall { +func (c *NodeGroupsAggregatedListCall) IfNoneMatch(entityTag string) *NodeGroupsAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -96207,21 +99489,21 @@ func (c *NodeTypesAggregatedListCall) IfNoneMatch(entityTag string) *NodeTypesAg // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeTypesAggregatedListCall) Context(ctx context.Context) *NodeTypesAggregatedListCall { +func (c *NodeGroupsAggregatedListCall) Context(ctx context.Context) *NodeGroupsAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeTypesAggregatedListCall) Header() http.Header { +func (c *NodeGroupsAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -96233,7 +99515,7 @@ func (c *NodeTypesAggregatedListCall) doRequest(alt string) (*http.Response, err var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/nodeTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/nodeGroups") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -96246,14 +99528,14 @@ func (c *NodeTypesAggregatedListCall) doRequest(alt string) (*http.Response, err return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeTypes.aggregatedList" call. -// Exactly one of *NodeTypeAggregatedList or error will be non-nil. Any +// Do executes the "compute.nodeGroups.aggregatedList" call. +// Exactly one of *NodeGroupAggregatedList or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *NodeTypeAggregatedList.ServerResponse.Header or (if a response was +// *NodeGroupAggregatedList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *NodeTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeTypeAggregatedList, error) { +func (c *NodeGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeGroupAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -96272,7 +99554,7 @@ func (c *NodeTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeTyp if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &NodeTypeAggregatedList{ + ret := &NodeGroupAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -96284,9 +99566,9 @@ func (c *NodeTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeTyp } return ret, nil // { - // "description": "Retrieves an aggregated list of node types.", + // "description": "Retrieves an aggregated list of node groups. Note: use nodeGroups.listNodes for more details about each group.", // "httpMethod": "GET", - // "id": "compute.nodeTypes.aggregatedList", + // "id": "compute.nodeGroups.aggregatedList", // "parameterOrder": [ // "project" // ], @@ -96322,9 +99604,9 @@ func (c *NodeTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeTyp // "type": "string" // } // }, - // "path": "{project}/aggregated/nodeTypes", + // "path": "{project}/aggregated/nodeGroups", // "response": { - // "$ref": "NodeTypeAggregatedList" + // "$ref": "NodeGroupAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -96338,7 +99620,7 @@ func (c *NodeTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeTyp // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *NodeTypesAggregatedListCall) Pages(ctx context.Context, f func(*NodeTypeAggregatedList) error) error { +func (c *NodeGroupsAggregatedListCall) Pages(ctx context.Context, f func(*NodeGroupAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -96356,33 +99638,394 @@ func (c *NodeTypesAggregatedListCall) Pages(ctx context.Context, f func(*NodeTyp } } -// method id "compute.nodeTypes.get": +// method id "compute.nodeGroups.delete": -type NodeTypesGetCall struct { - s *Service - project string - zone string - nodeType string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type NodeGroupsDeleteCall struct { + s *Service + project string + zone string + nodeGroup string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified node type. Gets a list of available node -// types by making a list() request. -func (r *NodeTypesService) Get(project string, zone string, nodeType string) *NodeTypesGetCall { - c := &NodeTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified NodeGroup resource. +func (r *NodeGroupsService) Delete(project string, zone string, nodeGroup string) *NodeGroupsDeleteCall { + c := &NodeGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.nodeType = nodeType + c.nodeGroup = nodeGroup return c } -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *NodeTypesGetCall) Fields(s ...googleapi.Field) *NodeTypesGetCall { +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *NodeGroupsDeleteCall) RequestId(requestId string) *NodeGroupsDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NodeGroupsDeleteCall) Fields(s ...googleapi.Field) *NodeGroupsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NodeGroupsDeleteCall) Context(ctx context.Context) *NodeGroupsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NodeGroupsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NodeGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "nodeGroup": c.nodeGroup, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.nodeGroups.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NodeGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified NodeGroup resource.", + // "httpMethod": "DELETE", + // "id": "compute.nodeGroups.delete", + // "parameterOrder": [ + // "project", + // "zone", + // "nodeGroup" + // ], + // "parameters": { + // "nodeGroup": { + // "description": "Name of the NodeGroup resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/nodeGroups/{nodeGroup}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.nodeGroups.deleteNodes": + +type NodeGroupsDeleteNodesCall struct { + s *Service + project string + zone string + nodeGroup string + nodegroupsdeletenodesrequest *NodeGroupsDeleteNodesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// DeleteNodes: Deletes specified nodes from the node group. +func (r *NodeGroupsService) DeleteNodes(project string, zone string, nodeGroup string, nodegroupsdeletenodesrequest *NodeGroupsDeleteNodesRequest) *NodeGroupsDeleteNodesCall { + c := &NodeGroupsDeleteNodesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.nodeGroup = nodeGroup + c.nodegroupsdeletenodesrequest = nodegroupsdeletenodesrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *NodeGroupsDeleteNodesCall) RequestId(requestId string) *NodeGroupsDeleteNodesCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NodeGroupsDeleteNodesCall) Fields(s ...googleapi.Field) *NodeGroupsDeleteNodesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NodeGroupsDeleteNodesCall) Context(ctx context.Context) *NodeGroupsDeleteNodesCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NodeGroupsDeleteNodesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NodeGroupsDeleteNodesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.nodegroupsdeletenodesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}/deleteNodes") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "nodeGroup": c.nodeGroup, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.nodeGroups.deleteNodes" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NodeGroupsDeleteNodesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes specified nodes from the node group.", + // "httpMethod": "POST", + // "id": "compute.nodeGroups.deleteNodes", + // "parameterOrder": [ + // "project", + // "zone", + // "nodeGroup" + // ], + // "parameters": { + // "nodeGroup": { + // "description": "Name of the NodeGroup resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/nodeGroups/{nodeGroup}/deleteNodes", + // "request": { + // "$ref": "NodeGroupsDeleteNodesRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.nodeGroups.get": + +type NodeGroupsGetCall struct { + s *Service + project string + zone string + nodeGroup string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified NodeGroup. Get a list of available +// NodeGroups by making a list() request. Note: the "nodes" field should +// not be used. Use nodeGroups.listNodes instead. +func (r *NodeGroupsService) Get(project string, zone string, nodeGroup string) *NodeGroupsGetCall { + c := &NodeGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.nodeGroup = nodeGroup + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NodeGroupsGetCall) Fields(s ...googleapi.Field) *NodeGroupsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -96392,7 +100035,7 @@ func (c *NodeTypesGetCall) Fields(s ...googleapi.Field) *NodeTypesGetCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *NodeTypesGetCall) IfNoneMatch(entityTag string) *NodeTypesGetCall { +func (c *NodeGroupsGetCall) IfNoneMatch(entityTag string) *NodeGroupsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -96400,21 +100043,21 @@ func (c *NodeTypesGetCall) IfNoneMatch(entityTag string) *NodeTypesGetCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeTypesGetCall) Context(ctx context.Context) *NodeTypesGetCall { +func (c *NodeGroupsGetCall) Context(ctx context.Context) *NodeGroupsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeTypesGetCall) Header() http.Header { +func (c *NodeGroupsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeTypesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -96426,7 +100069,7 @@ func (c *NodeTypesGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeTypes/{nodeType}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -96434,21 +100077,21 @@ func (c *NodeTypesGetCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "nodeType": c.nodeType, + "project": c.project, + "zone": c.zone, + "nodeGroup": c.nodeGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeTypes.get" call. -// Exactly one of *NodeType or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *NodeType.ServerResponse.Header or (if a response was returned at +// Do executes the "compute.nodeGroups.get" call. +// Exactly one of *NodeGroup or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *NodeGroup.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NodeTypesGetCall) Do(opts ...googleapi.CallOption) (*NodeType, error) { +func (c *NodeGroupsGetCall) Do(opts ...googleapi.CallOption) (*NodeGroup, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -96467,7 +100110,7 @@ func (c *NodeTypesGetCall) Do(opts ...googleapi.CallOption) (*NodeType, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &NodeType{ + ret := &NodeGroup{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -96479,19 +100122,19 @@ func (c *NodeTypesGetCall) Do(opts ...googleapi.CallOption) (*NodeType, error) { } return ret, nil // { - // "description": "Returns the specified node type. Gets a list of available node types by making a list() request.", + // "description": "Returns the specified NodeGroup. Get a list of available NodeGroups by making a list() request. Note: the \"nodes\" field should not be used. Use nodeGroups.listNodes instead.", // "httpMethod": "GET", - // "id": "compute.nodeTypes.get", + // "id": "compute.nodeGroups.get", // "parameterOrder": [ // "project", // "zone", - // "nodeType" + // "nodeGroup" // ], // "parameters": { - // "nodeType": { - // "description": "Name of the node type to return.", + // "nodeGroup": { + // "description": "Name of the node group to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -96510,9 +100153,9 @@ func (c *NodeTypesGetCall) Do(opts ...googleapi.CallOption) (*NodeType, error) { // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/nodeTypes/{nodeType}", + // "path": "{project}/zones/{zone}/nodeGroups/{nodeGroup}", // "response": { - // "$ref": "NodeType" + // "$ref": "NodeGroup" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -96523,22 +100166,374 @@ func (c *NodeTypesGetCall) Do(opts ...googleapi.CallOption) (*NodeType, error) { } -// method id "compute.nodeTypes.list": +// method id "compute.nodeGroups.getIamPolicy": -type NodeTypesListCall struct { +type NodeGroupsGetIamPolicyCall struct { s *Service project string zone string + resource string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves a list of node types available to the specified -// project. -func (r *NodeTypesService) List(project string, zone string) *NodeTypesListCall { - c := &NodeTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. +func (r *NodeGroupsService) GetIamPolicy(project string, zone string, resource string) *NodeGroupsGetIamPolicyCall { + c := &NodeGroupsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.resource = resource + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NodeGroupsGetIamPolicyCall) Fields(s ...googleapi.Field) *NodeGroupsGetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NodeGroupsGetIamPolicyCall) IfNoneMatch(entityTag string) *NodeGroupsGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NodeGroupsGetIamPolicyCall) Context(ctx context.Context) *NodeGroupsGetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NodeGroupsGetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NodeGroupsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{resource}/getIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.nodeGroups.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *NodeGroupsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "httpMethod": "GET", + // "id": "compute.nodeGroups.getIamPolicy", + // "parameterOrder": [ + // "project", + // "zone", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/nodeGroups/{resource}/getIamPolicy", + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.nodeGroups.insert": + +type NodeGroupsInsertCall struct { + s *Service + project string + zone string + nodegroup *NodeGroup + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a NodeGroup resource in the specified project using +// the data included in the request. +func (r *NodeGroupsService) Insert(project string, zone string, initialNodeCount int64, nodegroup *NodeGroup) *NodeGroupsInsertCall { + c := &NodeGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.urlParams_.Set("initialNodeCount", fmt.Sprint(initialNodeCount)) + c.nodegroup = nodegroup + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *NodeGroupsInsertCall) RequestId(requestId string) *NodeGroupsInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NodeGroupsInsertCall) Fields(s ...googleapi.Field) *NodeGroupsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NodeGroupsInsertCall) Context(ctx context.Context) *NodeGroupsInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NodeGroupsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NodeGroupsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.nodegroup) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.nodeGroups.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NodeGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a NodeGroup resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.nodeGroups.insert", + // "parameterOrder": [ + // "project", + // "zone", + // "initialNodeCount" + // ], + // "parameters": { + // "initialNodeCount": { + // "description": "Initial count of nodes in the node group.", + // "format": "int32", + // "location": "query", + // "required": true, + // "type": "integer" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/nodeGroups", + // "request": { + // "$ref": "NodeGroup" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.nodeGroups.list": + +type NodeGroupsListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of node groups available to the specified +// project. Note: use nodeGroups.listNodes for more details about each +// group. +func (r *NodeGroupsService) List(project string, zone string) *NodeGroupsListCall { + c := &NodeGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone return c @@ -96566,7 +100561,7 @@ func (r *NodeTypesService) List(project string, zone string) *NodeTypesListCall // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *NodeTypesListCall) Filter(filter string) *NodeTypesListCall { +func (c *NodeGroupsListCall) Filter(filter string) *NodeGroupsListCall { c.urlParams_.Set("filter", filter) return c } @@ -96577,7 +100572,7 @@ func (c *NodeTypesListCall) Filter(filter string) *NodeTypesListCall { // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *NodeTypesListCall) MaxResults(maxResults int64) *NodeTypesListCall { +func (c *NodeGroupsListCall) MaxResults(maxResults int64) *NodeGroupsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -96594,7 +100589,7 @@ func (c *NodeTypesListCall) MaxResults(maxResults int64) *NodeTypesListCall { // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *NodeTypesListCall) OrderBy(orderBy string) *NodeTypesListCall { +func (c *NodeGroupsListCall) OrderBy(orderBy string) *NodeGroupsListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -96602,7 +100597,7 @@ func (c *NodeTypesListCall) OrderBy(orderBy string) *NodeTypesListCall { // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *NodeTypesListCall) PageToken(pageToken string) *NodeTypesListCall { +func (c *NodeGroupsListCall) PageToken(pageToken string) *NodeGroupsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -96610,7 +100605,7 @@ func (c *NodeTypesListCall) PageToken(pageToken string) *NodeTypesListCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeTypesListCall) Fields(s ...googleapi.Field) *NodeTypesListCall { +func (c *NodeGroupsListCall) Fields(s ...googleapi.Field) *NodeGroupsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -96620,7 +100615,7 @@ func (c *NodeTypesListCall) Fields(s ...googleapi.Field) *NodeTypesListCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *NodeTypesListCall) IfNoneMatch(entityTag string) *NodeTypesListCall { +func (c *NodeGroupsListCall) IfNoneMatch(entityTag string) *NodeGroupsListCall { c.ifNoneMatch_ = entityTag return c } @@ -96628,21 +100623,21 @@ func (c *NodeTypesListCall) IfNoneMatch(entityTag string) *NodeTypesListCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeTypesListCall) Context(ctx context.Context) *NodeTypesListCall { +func (c *NodeGroupsListCall) Context(ctx context.Context) *NodeGroupsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeTypesListCall) Header() http.Header { +func (c *NodeGroupsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeTypesListCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -96654,7 +100649,7 @@ func (c *NodeTypesListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -96668,14 +100663,14 @@ func (c *NodeTypesListCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeTypes.list" call. -// Exactly one of *NodeTypeList or error will be non-nil. Any non-2xx +// Do executes the "compute.nodeGroups.list" call. +// Exactly one of *NodeGroupList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *NodeTypeList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *NodeTypesListCall) Do(opts ...googleapi.CallOption) (*NodeTypeList, error) { +// *NodeGroupList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NodeGroupsListCall) Do(opts ...googleapi.CallOption) (*NodeGroupList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -96694,7 +100689,7 @@ func (c *NodeTypesListCall) Do(opts ...googleapi.CallOption) (*NodeTypeList, err if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &NodeTypeList{ + ret := &NodeGroupList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -96706,9 +100701,9 @@ func (c *NodeTypesListCall) Do(opts ...googleapi.CallOption) (*NodeTypeList, err } return ret, nil // { - // "description": "Retrieves a list of node types available to the specified project.", + // "description": "Retrieves a list of node groups available to the specified project. Note: use nodeGroups.listNodes for more details about each group.", // "httpMethod": "GET", - // "id": "compute.nodeTypes.list", + // "id": "compute.nodeGroups.list", // "parameterOrder": [ // "project", // "zone" @@ -96752,9 +100747,9 @@ func (c *NodeTypesListCall) Do(opts ...googleapi.CallOption) (*NodeTypeList, err // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/nodeTypes", + // "path": "{project}/zones/{zone}/nodeGroups", // "response": { - // "$ref": "NodeTypeList" + // "$ref": "NodeGroupList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -96768,7 +100763,7 @@ func (c *NodeTypesListCall) Do(opts ...googleapi.CallOption) (*NodeTypeList, err // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *NodeTypesListCall) Pages(ctx context.Context, f func(*NodeTypeList) error) error { +func (c *NodeGroupsListCall) Pages(ctx context.Context, f func(*NodeGroupList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -96786,49 +100781,94 @@ func (c *NodeTypesListCall) Pages(ctx context.Context, f func(*NodeTypeList) err } } -// method id "compute.organizationSecurityPolicies.addAssociation": +// method id "compute.nodeGroups.listNodes": -type OrganizationSecurityPoliciesAddAssociationCall struct { - s *Service - securityPolicy string - securitypolicyassociation *SecurityPolicyAssociation - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NodeGroupsListNodesCall struct { + s *Service + project string + zone string + nodeGroup string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AddAssociation: Inserts an association for the specified security -// policy. -func (r *OrganizationSecurityPoliciesService) AddAssociation(securityPolicy string, securitypolicyassociation *SecurityPolicyAssociation) *OrganizationSecurityPoliciesAddAssociationCall { - c := &OrganizationSecurityPoliciesAddAssociationCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.securityPolicy = securityPolicy - c.securitypolicyassociation = securitypolicyassociation +// ListNodes: Lists nodes in the node group. +func (r *NodeGroupsService) ListNodes(project string, zone string, nodeGroup string) *NodeGroupsListNodesCall { + c := &NodeGroupsListNodesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.nodeGroup = nodeGroup return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *OrganizationSecurityPoliciesAddAssociationCall) RequestId(requestId string) *OrganizationSecurityPoliciesAddAssociationCall { - c.urlParams_.Set("requestId", requestId) +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *NodeGroupsListNodesCall) Filter(filter string) *NodeGroupsListNodesCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *NodeGroupsListNodesCall) MaxResults(maxResults int64) *NodeGroupsListNodesCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *NodeGroupsListNodesCall) OrderBy(orderBy string) *NodeGroupsListNodesCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *NodeGroupsListNodesCall) PageToken(pageToken string) *NodeGroupsListNodesCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *OrganizationSecurityPoliciesAddAssociationCall) Fields(s ...googleapi.Field) *OrganizationSecurityPoliciesAddAssociationCall { +func (c *NodeGroupsListNodesCall) Fields(s ...googleapi.Field) *NodeGroupsListNodesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -96836,55 +100876,8260 @@ func (c *OrganizationSecurityPoliciesAddAssociationCall) Fields(s ...googleapi.F // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *OrganizationSecurityPoliciesAddAssociationCall) Context(ctx context.Context) *OrganizationSecurityPoliciesAddAssociationCall { +func (c *NodeGroupsListNodesCall) Context(ctx context.Context) *NodeGroupsListNodesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *OrganizationSecurityPoliciesAddAssociationCall) Header() http.Header { +func (c *NodeGroupsListNodesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *OrganizationSecurityPoliciesAddAssociationCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeGroupsListNodesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicyassociation) + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}/listNodes") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/securityPolicies/{securityPolicy}/addAssociation") + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "nodeGroup": c.nodeGroup, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.nodeGroups.listNodes" call. +// Exactly one of *NodeGroupsListNodes or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *NodeGroupsListNodes.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NodeGroupsListNodesCall) Do(opts ...googleapi.CallOption) (*NodeGroupsListNodes, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &NodeGroupsListNodes{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists nodes in the node group.", + // "httpMethod": "POST", + // "id": "compute.nodeGroups.listNodes", + // "parameterOrder": [ + // "project", + // "zone", + // "nodeGroup" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "nodeGroup": { + // "description": "Name of the NodeGroup resource whose nodes you want to list.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/nodeGroups/{nodeGroup}/listNodes", + // "response": { + // "$ref": "NodeGroupsListNodes" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *NodeGroupsListNodesCall) Pages(ctx context.Context, f func(*NodeGroupsListNodes) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.nodeGroups.setAutoscalingPolicy": + +type NodeGroupsSetAutoscalingPolicyCall struct { + s *Service + project string + zone string + nodeGroup string + nodegroupssetautoscalingpolicyrequest *NodeGroupsSetAutoscalingPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetAutoscalingPolicy: Sets the autoscaling policy of the node group. +func (r *NodeGroupsService) SetAutoscalingPolicy(project string, zone string, nodeGroup string, nodegroupssetautoscalingpolicyrequest *NodeGroupsSetAutoscalingPolicyRequest) *NodeGroupsSetAutoscalingPolicyCall { + c := &NodeGroupsSetAutoscalingPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.nodeGroup = nodeGroup + c.nodegroupssetautoscalingpolicyrequest = nodegroupssetautoscalingpolicyrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *NodeGroupsSetAutoscalingPolicyCall) RequestId(requestId string) *NodeGroupsSetAutoscalingPolicyCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NodeGroupsSetAutoscalingPolicyCall) Fields(s ...googleapi.Field) *NodeGroupsSetAutoscalingPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NodeGroupsSetAutoscalingPolicyCall) Context(ctx context.Context) *NodeGroupsSetAutoscalingPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NodeGroupsSetAutoscalingPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NodeGroupsSetAutoscalingPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.nodegroupssetautoscalingpolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}/setAutoscalingPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "nodeGroup": c.nodeGroup, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.nodeGroups.setAutoscalingPolicy" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NodeGroupsSetAutoscalingPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the autoscaling policy of the node group.", + // "httpMethod": "POST", + // "id": "compute.nodeGroups.setAutoscalingPolicy", + // "parameterOrder": [ + // "project", + // "zone", + // "nodeGroup" + // ], + // "parameters": { + // "nodeGroup": { + // "description": "Name of the NodeGroup resource to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/nodeGroups/{nodeGroup}/setAutoscalingPolicy", + // "request": { + // "$ref": "NodeGroupsSetAutoscalingPolicyRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.nodeGroups.setIamPolicy": + +type NodeGroupsSetIamPolicyCall struct { + s *Service + project string + zone string + resource string + zonesetpolicyrequest *ZoneSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +func (r *NodeGroupsService) SetIamPolicy(project string, zone string, resource string, zonesetpolicyrequest *ZoneSetPolicyRequest) *NodeGroupsSetIamPolicyCall { + c := &NodeGroupsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.resource = resource + c.zonesetpolicyrequest = zonesetpolicyrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NodeGroupsSetIamPolicyCall) Fields(s ...googleapi.Field) *NodeGroupsSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NodeGroupsSetIamPolicyCall) Context(ctx context.Context) *NodeGroupsSetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NodeGroupsSetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NodeGroupsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.zonesetpolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{resource}/setIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.nodeGroups.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *NodeGroupsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "httpMethod": "POST", + // "id": "compute.nodeGroups.setIamPolicy", + // "parameterOrder": [ + // "project", + // "zone", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/nodeGroups/{resource}/setIamPolicy", + // "request": { + // "$ref": "ZoneSetPolicyRequest" + // }, + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.nodeGroups.setNodeTemplate": + +type NodeGroupsSetNodeTemplateCall struct { + s *Service + project string + zone string + nodeGroup string + nodegroupssetnodetemplaterequest *NodeGroupsSetNodeTemplateRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetNodeTemplate: Updates the node template of the node group. +func (r *NodeGroupsService) SetNodeTemplate(project string, zone string, nodeGroup string, nodegroupssetnodetemplaterequest *NodeGroupsSetNodeTemplateRequest) *NodeGroupsSetNodeTemplateCall { + c := &NodeGroupsSetNodeTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.nodeGroup = nodeGroup + c.nodegroupssetnodetemplaterequest = nodegroupssetnodetemplaterequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *NodeGroupsSetNodeTemplateCall) RequestId(requestId string) *NodeGroupsSetNodeTemplateCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NodeGroupsSetNodeTemplateCall) Fields(s ...googleapi.Field) *NodeGroupsSetNodeTemplateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NodeGroupsSetNodeTemplateCall) Context(ctx context.Context) *NodeGroupsSetNodeTemplateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NodeGroupsSetNodeTemplateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NodeGroupsSetNodeTemplateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.nodegroupssetnodetemplaterequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}/setNodeTemplate") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "nodeGroup": c.nodeGroup, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.nodeGroups.setNodeTemplate" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NodeGroupsSetNodeTemplateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the node template of the node group.", + // "httpMethod": "POST", + // "id": "compute.nodeGroups.setNodeTemplate", + // "parameterOrder": [ + // "project", + // "zone", + // "nodeGroup" + // ], + // "parameters": { + // "nodeGroup": { + // "description": "Name of the NodeGroup resource to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/nodeGroups/{nodeGroup}/setNodeTemplate", + // "request": { + // "$ref": "NodeGroupsSetNodeTemplateRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.nodeGroups.testIamPermissions": + +type NodeGroupsTestIamPermissionsCall struct { + s *Service + project string + zone string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *NodeGroupsService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *NodeGroupsTestIamPermissionsCall { + c := &NodeGroupsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NodeGroupsTestIamPermissionsCall) Fields(s ...googleapi.Field) *NodeGroupsTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NodeGroupsTestIamPermissionsCall) Context(ctx context.Context) *NodeGroupsTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NodeGroupsTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NodeGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{resource}/testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.nodeGroups.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NodeGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TestPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.nodeGroups.testIamPermissions", + // "parameterOrder": [ + // "project", + // "zone", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/nodeGroups/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, + // "response": { + // "$ref": "TestPermissionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.nodeTemplates.aggregatedList": + +type NodeTemplatesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves an aggregated list of node templates. +func (r *NodeTemplatesService) AggregatedList(project string) *NodeTemplatesAggregatedListCall { + c := &NodeTemplatesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *NodeTemplatesAggregatedListCall) Filter(filter string) *NodeTemplatesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *NodeTemplatesAggregatedListCall) MaxResults(maxResults int64) *NodeTemplatesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *NodeTemplatesAggregatedListCall) OrderBy(orderBy string) *NodeTemplatesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *NodeTemplatesAggregatedListCall) PageToken(pageToken string) *NodeTemplatesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NodeTemplatesAggregatedListCall) Fields(s ...googleapi.Field) *NodeTemplatesAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NodeTemplatesAggregatedListCall) IfNoneMatch(entityTag string) *NodeTemplatesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NodeTemplatesAggregatedListCall) Context(ctx context.Context) *NodeTemplatesAggregatedListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NodeTemplatesAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NodeTemplatesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/nodeTemplates") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.nodeTemplates.aggregatedList" call. +// Exactly one of *NodeTemplateAggregatedList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *NodeTemplateAggregatedList.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NodeTemplatesAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeTemplateAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &NodeTemplateAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an aggregated list of node templates.", + // "httpMethod": "GET", + // "id": "compute.nodeTemplates.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/nodeTemplates", + // "response": { + // "$ref": "NodeTemplateAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *NodeTemplatesAggregatedListCall) Pages(ctx context.Context, f func(*NodeTemplateAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.nodeTemplates.delete": + +type NodeTemplatesDeleteCall struct { + s *Service + project string + region string + nodeTemplate string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified NodeTemplate resource. +func (r *NodeTemplatesService) Delete(project string, region string, nodeTemplate string) *NodeTemplatesDeleteCall { + c := &NodeTemplatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.nodeTemplate = nodeTemplate + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *NodeTemplatesDeleteCall) RequestId(requestId string) *NodeTemplatesDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NodeTemplatesDeleteCall) Fields(s ...googleapi.Field) *NodeTemplatesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NodeTemplatesDeleteCall) Context(ctx context.Context) *NodeTemplatesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NodeTemplatesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NodeTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates/{nodeTemplate}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "nodeTemplate": c.nodeTemplate, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.nodeTemplates.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NodeTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified NodeTemplate resource.", + // "httpMethod": "DELETE", + // "id": "compute.nodeTemplates.delete", + // "parameterOrder": [ + // "project", + // "region", + // "nodeTemplate" + // ], + // "parameters": { + // "nodeTemplate": { + // "description": "Name of the NodeTemplate resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/nodeTemplates/{nodeTemplate}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.nodeTemplates.get": + +type NodeTemplatesGetCall struct { + s *Service + project string + region string + nodeTemplate string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified node template. Gets a list of available +// node templates by making a list() request. +func (r *NodeTemplatesService) Get(project string, region string, nodeTemplate string) *NodeTemplatesGetCall { + c := &NodeTemplatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.nodeTemplate = nodeTemplate + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NodeTemplatesGetCall) Fields(s ...googleapi.Field) *NodeTemplatesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NodeTemplatesGetCall) IfNoneMatch(entityTag string) *NodeTemplatesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NodeTemplatesGetCall) Context(ctx context.Context) *NodeTemplatesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NodeTemplatesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NodeTemplatesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates/{nodeTemplate}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "nodeTemplate": c.nodeTemplate, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.nodeTemplates.get" call. +// Exactly one of *NodeTemplate or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *NodeTemplate.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NodeTemplatesGetCall) Do(opts ...googleapi.CallOption) (*NodeTemplate, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &NodeTemplate{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified node template. Gets a list of available node templates by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.nodeTemplates.get", + // "parameterOrder": [ + // "project", + // "region", + // "nodeTemplate" + // ], + // "parameters": { + // "nodeTemplate": { + // "description": "Name of the node template to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/nodeTemplates/{nodeTemplate}", + // "response": { + // "$ref": "NodeTemplate" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.nodeTemplates.getIamPolicy": + +type NodeTemplatesGetIamPolicyCall struct { + s *Service + project string + region string + resource string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. +func (r *NodeTemplatesService) GetIamPolicy(project string, region string, resource string) *NodeTemplatesGetIamPolicyCall { + c := &NodeTemplatesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NodeTemplatesGetIamPolicyCall) Fields(s ...googleapi.Field) *NodeTemplatesGetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NodeTemplatesGetIamPolicyCall) IfNoneMatch(entityTag string) *NodeTemplatesGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NodeTemplatesGetIamPolicyCall) Context(ctx context.Context) *NodeTemplatesGetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NodeTemplatesGetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NodeTemplatesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates/{resource}/getIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.nodeTemplates.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *NodeTemplatesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "httpMethod": "GET", + // "id": "compute.nodeTemplates.getIamPolicy", + // "parameterOrder": [ + // "project", + // "region", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/nodeTemplates/{resource}/getIamPolicy", + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.nodeTemplates.insert": + +type NodeTemplatesInsertCall struct { + s *Service + project string + region string + nodetemplate *NodeTemplate + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a NodeTemplate resource in the specified project +// using the data included in the request. +func (r *NodeTemplatesService) Insert(project string, region string, nodetemplate *NodeTemplate) *NodeTemplatesInsertCall { + c := &NodeTemplatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.nodetemplate = nodetemplate + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *NodeTemplatesInsertCall) RequestId(requestId string) *NodeTemplatesInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NodeTemplatesInsertCall) Fields(s ...googleapi.Field) *NodeTemplatesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NodeTemplatesInsertCall) Context(ctx context.Context) *NodeTemplatesInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NodeTemplatesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NodeTemplatesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.nodetemplate) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.nodeTemplates.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NodeTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a NodeTemplate resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.nodeTemplates.insert", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/nodeTemplates", + // "request": { + // "$ref": "NodeTemplate" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.nodeTemplates.list": + +type NodeTemplatesListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of node templates available to the specified +// project. +func (r *NodeTemplatesService) List(project string, region string) *NodeTemplatesListCall { + c := &NodeTemplatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *NodeTemplatesListCall) Filter(filter string) *NodeTemplatesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *NodeTemplatesListCall) MaxResults(maxResults int64) *NodeTemplatesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *NodeTemplatesListCall) OrderBy(orderBy string) *NodeTemplatesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *NodeTemplatesListCall) PageToken(pageToken string) *NodeTemplatesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NodeTemplatesListCall) Fields(s ...googleapi.Field) *NodeTemplatesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NodeTemplatesListCall) IfNoneMatch(entityTag string) *NodeTemplatesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NodeTemplatesListCall) Context(ctx context.Context) *NodeTemplatesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NodeTemplatesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NodeTemplatesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.nodeTemplates.list" call. +// Exactly one of *NodeTemplateList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *NodeTemplateList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NodeTemplatesListCall) Do(opts ...googleapi.CallOption) (*NodeTemplateList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &NodeTemplateList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of node templates available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.nodeTemplates.list", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/nodeTemplates", + // "response": { + // "$ref": "NodeTemplateList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *NodeTemplatesListCall) Pages(ctx context.Context, f func(*NodeTemplateList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.nodeTemplates.setIamPolicy": + +type NodeTemplatesSetIamPolicyCall struct { + s *Service + project string + region string + resource string + regionsetpolicyrequest *RegionSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +func (r *NodeTemplatesService) SetIamPolicy(project string, region string, resource string, regionsetpolicyrequest *RegionSetPolicyRequest) *NodeTemplatesSetIamPolicyCall { + c := &NodeTemplatesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + c.regionsetpolicyrequest = regionsetpolicyrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NodeTemplatesSetIamPolicyCall) Fields(s ...googleapi.Field) *NodeTemplatesSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NodeTemplatesSetIamPolicyCall) Context(ctx context.Context) *NodeTemplatesSetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NodeTemplatesSetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NodeTemplatesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetpolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates/{resource}/setIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.nodeTemplates.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *NodeTemplatesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "httpMethod": "POST", + // "id": "compute.nodeTemplates.setIamPolicy", + // "parameterOrder": [ + // "project", + // "region", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/nodeTemplates/{resource}/setIamPolicy", + // "request": { + // "$ref": "RegionSetPolicyRequest" + // }, + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.nodeTemplates.testIamPermissions": + +type NodeTemplatesTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *NodeTemplatesService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *NodeTemplatesTestIamPermissionsCall { + c := &NodeTemplatesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NodeTemplatesTestIamPermissionsCall) Fields(s ...googleapi.Field) *NodeTemplatesTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NodeTemplatesTestIamPermissionsCall) Context(ctx context.Context) *NodeTemplatesTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NodeTemplatesTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NodeTemplatesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates/{resource}/testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.nodeTemplates.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NodeTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TestPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.nodeTemplates.testIamPermissions", + // "parameterOrder": [ + // "project", + // "region", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/nodeTemplates/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, + // "response": { + // "$ref": "TestPermissionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.nodeTypes.aggregatedList": + +type NodeTypesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves an aggregated list of node types. +func (r *NodeTypesService) AggregatedList(project string) *NodeTypesAggregatedListCall { + c := &NodeTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *NodeTypesAggregatedListCall) Filter(filter string) *NodeTypesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *NodeTypesAggregatedListCall) MaxResults(maxResults int64) *NodeTypesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *NodeTypesAggregatedListCall) OrderBy(orderBy string) *NodeTypesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *NodeTypesAggregatedListCall) PageToken(pageToken string) *NodeTypesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NodeTypesAggregatedListCall) Fields(s ...googleapi.Field) *NodeTypesAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NodeTypesAggregatedListCall) IfNoneMatch(entityTag string) *NodeTypesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NodeTypesAggregatedListCall) Context(ctx context.Context) *NodeTypesAggregatedListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NodeTypesAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NodeTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/nodeTypes") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.nodeTypes.aggregatedList" call. +// Exactly one of *NodeTypeAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *NodeTypeAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NodeTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeTypeAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &NodeTypeAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an aggregated list of node types.", + // "httpMethod": "GET", + // "id": "compute.nodeTypes.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/nodeTypes", + // "response": { + // "$ref": "NodeTypeAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *NodeTypesAggregatedListCall) Pages(ctx context.Context, f func(*NodeTypeAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.nodeTypes.get": + +type NodeTypesGetCall struct { + s *Service + project string + zone string + nodeType string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified node type. Gets a list of available node +// types by making a list() request. +func (r *NodeTypesService) Get(project string, zone string, nodeType string) *NodeTypesGetCall { + c := &NodeTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.nodeType = nodeType + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NodeTypesGetCall) Fields(s ...googleapi.Field) *NodeTypesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NodeTypesGetCall) IfNoneMatch(entityTag string) *NodeTypesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NodeTypesGetCall) Context(ctx context.Context) *NodeTypesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NodeTypesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NodeTypesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeTypes/{nodeType}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "nodeType": c.nodeType, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.nodeTypes.get" call. +// Exactly one of *NodeType or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *NodeType.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NodeTypesGetCall) Do(opts ...googleapi.CallOption) (*NodeType, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &NodeType{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified node type. Gets a list of available node types by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.nodeTypes.get", + // "parameterOrder": [ + // "project", + // "zone", + // "nodeType" + // ], + // "parameters": { + // "nodeType": { + // "description": "Name of the node type to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/nodeTypes/{nodeType}", + // "response": { + // "$ref": "NodeType" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.nodeTypes.list": + +type NodeTypesListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of node types available to the specified +// project. +func (r *NodeTypesService) List(project string, zone string) *NodeTypesListCall { + c := &NodeTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *NodeTypesListCall) Filter(filter string) *NodeTypesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *NodeTypesListCall) MaxResults(maxResults int64) *NodeTypesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *NodeTypesListCall) OrderBy(orderBy string) *NodeTypesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *NodeTypesListCall) PageToken(pageToken string) *NodeTypesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NodeTypesListCall) Fields(s ...googleapi.Field) *NodeTypesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NodeTypesListCall) IfNoneMatch(entityTag string) *NodeTypesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NodeTypesListCall) Context(ctx context.Context) *NodeTypesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NodeTypesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NodeTypesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeTypes") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.nodeTypes.list" call. +// Exactly one of *NodeTypeList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *NodeTypeList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NodeTypesListCall) Do(opts ...googleapi.CallOption) (*NodeTypeList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &NodeTypeList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of node types available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.nodeTypes.list", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/nodeTypes", + // "response": { + // "$ref": "NodeTypeList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *NodeTypesListCall) Pages(ctx context.Context, f func(*NodeTypeList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.organizationSecurityPolicies.addAssociation": + +type OrganizationSecurityPoliciesAddAssociationCall struct { + s *Service + securityPolicy string + securitypolicyassociation *SecurityPolicyAssociation + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// AddAssociation: Inserts an association for the specified security +// policy. +func (r *OrganizationSecurityPoliciesService) AddAssociation(securityPolicy string, securitypolicyassociation *SecurityPolicyAssociation) *OrganizationSecurityPoliciesAddAssociationCall { + c := &OrganizationSecurityPoliciesAddAssociationCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.securityPolicy = securityPolicy + c.securitypolicyassociation = securitypolicyassociation + return c +} + +// ReplaceExistingAssociation sets the optional parameter +// "replaceExistingAssociation": Indicates whether or not to replace it +// if an association of the attachment already exists. This is false by +// default, in which case an error will be returned if an assocation +// already exists. +func (c *OrganizationSecurityPoliciesAddAssociationCall) ReplaceExistingAssociation(replaceExistingAssociation bool) *OrganizationSecurityPoliciesAddAssociationCall { + c.urlParams_.Set("replaceExistingAssociation", fmt.Sprint(replaceExistingAssociation)) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *OrganizationSecurityPoliciesAddAssociationCall) RequestId(requestId string) *OrganizationSecurityPoliciesAddAssociationCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OrganizationSecurityPoliciesAddAssociationCall) Fields(s ...googleapi.Field) *OrganizationSecurityPoliciesAddAssociationCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OrganizationSecurityPoliciesAddAssociationCall) Context(ctx context.Context) *OrganizationSecurityPoliciesAddAssociationCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OrganizationSecurityPoliciesAddAssociationCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrganizationSecurityPoliciesAddAssociationCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicyassociation) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/securityPolicies/{securityPolicy}/addAssociation") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "securityPolicy": c.securityPolicy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.organizationSecurityPolicies.addAssociation" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *OrganizationSecurityPoliciesAddAssociationCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Inserts an association for the specified security policy.", + // "httpMethod": "POST", + // "id": "compute.organizationSecurityPolicies.addAssociation", + // "parameterOrder": [ + // "securityPolicy" + // ], + // "parameters": { + // "replaceExistingAssociation": { + // "description": "Indicates whether or not to replace it if an association of the attachment already exists. This is false by default, in which case an error will be returned if an assocation already exists.", + // "location": "query", + // "type": "boolean" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "securityPolicy": { + // "description": "Name of the security policy to update.", + // "location": "path", + // "pattern": "[0-9]{0,20}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "locations/global/securityPolicies/{securityPolicy}/addAssociation", + // "request": { + // "$ref": "SecurityPolicyAssociation" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.organizationSecurityPolicies.addRule": + +type OrganizationSecurityPoliciesAddRuleCall struct { + s *Service + securityPolicy string + securitypolicyrule *SecurityPolicyRule + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// AddRule: Inserts a rule into a security policy. +func (r *OrganizationSecurityPoliciesService) AddRule(securityPolicy string, securitypolicyrule *SecurityPolicyRule) *OrganizationSecurityPoliciesAddRuleCall { + c := &OrganizationSecurityPoliciesAddRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.securityPolicy = securityPolicy + c.securitypolicyrule = securitypolicyrule + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *OrganizationSecurityPoliciesAddRuleCall) RequestId(requestId string) *OrganizationSecurityPoliciesAddRuleCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OrganizationSecurityPoliciesAddRuleCall) Fields(s ...googleapi.Field) *OrganizationSecurityPoliciesAddRuleCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OrganizationSecurityPoliciesAddRuleCall) Context(ctx context.Context) *OrganizationSecurityPoliciesAddRuleCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OrganizationSecurityPoliciesAddRuleCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrganizationSecurityPoliciesAddRuleCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicyrule) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/securityPolicies/{securityPolicy}/addRule") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "securityPolicy": c.securityPolicy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.organizationSecurityPolicies.addRule" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *OrganizationSecurityPoliciesAddRuleCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Inserts a rule into a security policy.", + // "httpMethod": "POST", + // "id": "compute.organizationSecurityPolicies.addRule", + // "parameterOrder": [ + // "securityPolicy" + // ], + // "parameters": { + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "securityPolicy": { + // "description": "Name of the security policy to update.", + // "location": "path", + // "pattern": "[0-9]{0,20}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "locations/global/securityPolicies/{securityPolicy}/addRule", + // "request": { + // "$ref": "SecurityPolicyRule" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.organizationSecurityPolicies.copyRules": + +type OrganizationSecurityPoliciesCopyRulesCall struct { + s *Service + securityPolicy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// CopyRules: Copies rules to the specified security policy. +func (r *OrganizationSecurityPoliciesService) CopyRules(securityPolicy string) *OrganizationSecurityPoliciesCopyRulesCall { + c := &OrganizationSecurityPoliciesCopyRulesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.securityPolicy = securityPolicy + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *OrganizationSecurityPoliciesCopyRulesCall) RequestId(requestId string) *OrganizationSecurityPoliciesCopyRulesCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// SourceSecurityPolicy sets the optional parameter +// "sourceSecurityPolicy": The security policy from which to copy rules. +func (c *OrganizationSecurityPoliciesCopyRulesCall) SourceSecurityPolicy(sourceSecurityPolicy string) *OrganizationSecurityPoliciesCopyRulesCall { + c.urlParams_.Set("sourceSecurityPolicy", sourceSecurityPolicy) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OrganizationSecurityPoliciesCopyRulesCall) Fields(s ...googleapi.Field) *OrganizationSecurityPoliciesCopyRulesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OrganizationSecurityPoliciesCopyRulesCall) Context(ctx context.Context) *OrganizationSecurityPoliciesCopyRulesCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OrganizationSecurityPoliciesCopyRulesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrganizationSecurityPoliciesCopyRulesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/securityPolicies/{securityPolicy}/copyRules") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "securityPolicy": c.securityPolicy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.organizationSecurityPolicies.copyRules" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *OrganizationSecurityPoliciesCopyRulesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Copies rules to the specified security policy.", + // "httpMethod": "POST", + // "id": "compute.organizationSecurityPolicies.copyRules", + // "parameterOrder": [ + // "securityPolicy" + // ], + // "parameters": { + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "securityPolicy": { + // "description": "Name of the security policy to update.", + // "location": "path", + // "pattern": "[0-9]{0,20}", + // "required": true, + // "type": "string" + // }, + // "sourceSecurityPolicy": { + // "description": "The security policy from which to copy rules.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "locations/global/securityPolicies/{securityPolicy}/copyRules", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.organizationSecurityPolicies.delete": + +type OrganizationSecurityPoliciesDeleteCall struct { + s *Service + securityPolicy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified policy. +func (r *OrganizationSecurityPoliciesService) Delete(securityPolicy string) *OrganizationSecurityPoliciesDeleteCall { + c := &OrganizationSecurityPoliciesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.securityPolicy = securityPolicy + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *OrganizationSecurityPoliciesDeleteCall) RequestId(requestId string) *OrganizationSecurityPoliciesDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OrganizationSecurityPoliciesDeleteCall) Fields(s ...googleapi.Field) *OrganizationSecurityPoliciesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OrganizationSecurityPoliciesDeleteCall) Context(ctx context.Context) *OrganizationSecurityPoliciesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OrganizationSecurityPoliciesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrganizationSecurityPoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/securityPolicies/{securityPolicy}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "securityPolicy": c.securityPolicy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.organizationSecurityPolicies.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *OrganizationSecurityPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified policy.", + // "httpMethod": "DELETE", + // "id": "compute.organizationSecurityPolicies.delete", + // "parameterOrder": [ + // "securityPolicy" + // ], + // "parameters": { + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "securityPolicy": { + // "description": "Name of the security policy to delete.", + // "location": "path", + // "pattern": "[0-9]{0,20}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "locations/global/securityPolicies/{securityPolicy}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.organizationSecurityPolicies.get": + +type OrganizationSecurityPoliciesGetCall struct { + s *Service + securityPolicy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: List all of the ordered rules present in a single specified +// policy. +func (r *OrganizationSecurityPoliciesService) Get(securityPolicy string) *OrganizationSecurityPoliciesGetCall { + c := &OrganizationSecurityPoliciesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.securityPolicy = securityPolicy + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OrganizationSecurityPoliciesGetCall) Fields(s ...googleapi.Field) *OrganizationSecurityPoliciesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *OrganizationSecurityPoliciesGetCall) IfNoneMatch(entityTag string) *OrganizationSecurityPoliciesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OrganizationSecurityPoliciesGetCall) Context(ctx context.Context) *OrganizationSecurityPoliciesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OrganizationSecurityPoliciesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrganizationSecurityPoliciesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/securityPolicies/{securityPolicy}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "securityPolicy": c.securityPolicy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.organizationSecurityPolicies.get" call. +// Exactly one of *SecurityPolicy or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *SecurityPolicy.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *OrganizationSecurityPoliciesGetCall) Do(opts ...googleapi.CallOption) (*SecurityPolicy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &SecurityPolicy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "List all of the ordered rules present in a single specified policy.", + // "httpMethod": "GET", + // "id": "compute.organizationSecurityPolicies.get", + // "parameterOrder": [ + // "securityPolicy" + // ], + // "parameters": { + // "securityPolicy": { + // "description": "Name of the security policy to get.", + // "location": "path", + // "pattern": "[0-9]{0,20}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "locations/global/securityPolicies/{securityPolicy}", + // "response": { + // "$ref": "SecurityPolicy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.organizationSecurityPolicies.getAssociation": + +type OrganizationSecurityPoliciesGetAssociationCall struct { + s *Service + securityPolicy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetAssociation: Gets an association with the specified name. +func (r *OrganizationSecurityPoliciesService) GetAssociation(securityPolicy string) *OrganizationSecurityPoliciesGetAssociationCall { + c := &OrganizationSecurityPoliciesGetAssociationCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.securityPolicy = securityPolicy + return c +} + +// Name sets the optional parameter "name": The name of the association +// to get from the security policy. +func (c *OrganizationSecurityPoliciesGetAssociationCall) Name(name string) *OrganizationSecurityPoliciesGetAssociationCall { + c.urlParams_.Set("name", name) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OrganizationSecurityPoliciesGetAssociationCall) Fields(s ...googleapi.Field) *OrganizationSecurityPoliciesGetAssociationCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *OrganizationSecurityPoliciesGetAssociationCall) IfNoneMatch(entityTag string) *OrganizationSecurityPoliciesGetAssociationCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OrganizationSecurityPoliciesGetAssociationCall) Context(ctx context.Context) *OrganizationSecurityPoliciesGetAssociationCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OrganizationSecurityPoliciesGetAssociationCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrganizationSecurityPoliciesGetAssociationCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/securityPolicies/{securityPolicy}/getAssociation") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "securityPolicy": c.securityPolicy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.organizationSecurityPolicies.getAssociation" call. +// Exactly one of *SecurityPolicyAssociation or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *SecurityPolicyAssociation.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *OrganizationSecurityPoliciesGetAssociationCall) Do(opts ...googleapi.CallOption) (*SecurityPolicyAssociation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &SecurityPolicyAssociation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets an association with the specified name.", + // "httpMethod": "GET", + // "id": "compute.organizationSecurityPolicies.getAssociation", + // "parameterOrder": [ + // "securityPolicy" + // ], + // "parameters": { + // "name": { + // "description": "The name of the association to get from the security policy.", + // "location": "query", + // "type": "string" + // }, + // "securityPolicy": { + // "description": "Name of the security policy to which the queried rule belongs.", + // "location": "path", + // "pattern": "[0-9]{0,20}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "locations/global/securityPolicies/{securityPolicy}/getAssociation", + // "response": { + // "$ref": "SecurityPolicyAssociation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.organizationSecurityPolicies.getRule": + +type OrganizationSecurityPoliciesGetRuleCall struct { + s *Service + securityPolicy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetRule: Gets a rule at the specified priority. +func (r *OrganizationSecurityPoliciesService) GetRule(securityPolicy string) *OrganizationSecurityPoliciesGetRuleCall { + c := &OrganizationSecurityPoliciesGetRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.securityPolicy = securityPolicy + return c +} + +// Priority sets the optional parameter "priority": The priority of the +// rule to get from the security policy. +func (c *OrganizationSecurityPoliciesGetRuleCall) Priority(priority int64) *OrganizationSecurityPoliciesGetRuleCall { + c.urlParams_.Set("priority", fmt.Sprint(priority)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OrganizationSecurityPoliciesGetRuleCall) Fields(s ...googleapi.Field) *OrganizationSecurityPoliciesGetRuleCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *OrganizationSecurityPoliciesGetRuleCall) IfNoneMatch(entityTag string) *OrganizationSecurityPoliciesGetRuleCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OrganizationSecurityPoliciesGetRuleCall) Context(ctx context.Context) *OrganizationSecurityPoliciesGetRuleCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OrganizationSecurityPoliciesGetRuleCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrganizationSecurityPoliciesGetRuleCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/securityPolicies/{securityPolicy}/getRule") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "securityPolicy": c.securityPolicy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.organizationSecurityPolicies.getRule" call. +// Exactly one of *SecurityPolicyRule or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *SecurityPolicyRule.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *OrganizationSecurityPoliciesGetRuleCall) Do(opts ...googleapi.CallOption) (*SecurityPolicyRule, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &SecurityPolicyRule{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets a rule at the specified priority.", + // "httpMethod": "GET", + // "id": "compute.organizationSecurityPolicies.getRule", + // "parameterOrder": [ + // "securityPolicy" + // ], + // "parameters": { + // "priority": { + // "description": "The priority of the rule to get from the security policy.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "securityPolicy": { + // "description": "Name of the security policy to which the queried rule belongs.", + // "location": "path", + // "pattern": "[0-9]{0,20}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "locations/global/securityPolicies/{securityPolicy}/getRule", + // "response": { + // "$ref": "SecurityPolicyRule" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.organizationSecurityPolicies.insert": + +type OrganizationSecurityPoliciesInsertCall struct { + s *Service + securitypolicy *SecurityPolicy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a new policy in the specified project using the data +// included in the request. +func (r *OrganizationSecurityPoliciesService) Insert(securitypolicy *SecurityPolicy) *OrganizationSecurityPoliciesInsertCall { + c := &OrganizationSecurityPoliciesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.securitypolicy = securitypolicy + return c +} + +// ParentId sets the optional parameter "parentId": Parent ID for this +// request. +func (c *OrganizationSecurityPoliciesInsertCall) ParentId(parentId string) *OrganizationSecurityPoliciesInsertCall { + c.urlParams_.Set("parentId", parentId) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *OrganizationSecurityPoliciesInsertCall) RequestId(requestId string) *OrganizationSecurityPoliciesInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OrganizationSecurityPoliciesInsertCall) Fields(s ...googleapi.Field) *OrganizationSecurityPoliciesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OrganizationSecurityPoliciesInsertCall) Context(ctx context.Context) *OrganizationSecurityPoliciesInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OrganizationSecurityPoliciesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrganizationSecurityPoliciesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/securityPolicies") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.organizationSecurityPolicies.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *OrganizationSecurityPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new policy in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.organizationSecurityPolicies.insert", + // "parameters": { + // "parentId": { + // "description": "Parent ID for this request.", + // "location": "query", + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "locations/global/securityPolicies", + // "request": { + // "$ref": "SecurityPolicy" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.organizationSecurityPolicies.list": + +type OrganizationSecurityPoliciesListCall struct { + s *Service + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: List all the policies that have been configured for the +// specified project. +func (r *OrganizationSecurityPoliciesService) List() *OrganizationSecurityPoliciesListCall { + c := &OrganizationSecurityPoliciesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *OrganizationSecurityPoliciesListCall) Filter(filter string) *OrganizationSecurityPoliciesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *OrganizationSecurityPoliciesListCall) MaxResults(maxResults int64) *OrganizationSecurityPoliciesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *OrganizationSecurityPoliciesListCall) OrderBy(orderBy string) *OrganizationSecurityPoliciesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *OrganizationSecurityPoliciesListCall) PageToken(pageToken string) *OrganizationSecurityPoliciesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ParentId sets the optional parameter "parentId": Parent ID for this +// request. +func (c *OrganizationSecurityPoliciesListCall) ParentId(parentId string) *OrganizationSecurityPoliciesListCall { + c.urlParams_.Set("parentId", parentId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OrganizationSecurityPoliciesListCall) Fields(s ...googleapi.Field) *OrganizationSecurityPoliciesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *OrganizationSecurityPoliciesListCall) IfNoneMatch(entityTag string) *OrganizationSecurityPoliciesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OrganizationSecurityPoliciesListCall) Context(ctx context.Context) *OrganizationSecurityPoliciesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OrganizationSecurityPoliciesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrganizationSecurityPoliciesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/securityPolicies") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.organizationSecurityPolicies.list" call. +// Exactly one of *SecurityPolicyList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *SecurityPolicyList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *OrganizationSecurityPoliciesListCall) Do(opts ...googleapi.CallOption) (*SecurityPolicyList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &SecurityPolicyList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "List all the policies that have been configured for the specified project.", + // "httpMethod": "GET", + // "id": "compute.organizationSecurityPolicies.list", + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "parentId": { + // "description": "Parent ID for this request.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "locations/global/securityPolicies", + // "response": { + // "$ref": "SecurityPolicyList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *OrganizationSecurityPoliciesListCall) Pages(ctx context.Context, f func(*SecurityPolicyList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.organizationSecurityPolicies.listAssociations": + +type OrganizationSecurityPoliciesListAssociationsCall struct { + s *Service + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// ListAssociations: Lists associations of a specified target, i.e., +// organization or folder. +func (r *OrganizationSecurityPoliciesService) ListAssociations() *OrganizationSecurityPoliciesListAssociationsCall { + c := &OrganizationSecurityPoliciesListAssociationsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + return c +} + +// TargetResource sets the optional parameter "targetResource": The +// target resource to list associations. It is an organization, or a +// folder. +func (c *OrganizationSecurityPoliciesListAssociationsCall) TargetResource(targetResource string) *OrganizationSecurityPoliciesListAssociationsCall { + c.urlParams_.Set("targetResource", targetResource) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OrganizationSecurityPoliciesListAssociationsCall) Fields(s ...googleapi.Field) *OrganizationSecurityPoliciesListAssociationsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *OrganizationSecurityPoliciesListAssociationsCall) IfNoneMatch(entityTag string) *OrganizationSecurityPoliciesListAssociationsCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OrganizationSecurityPoliciesListAssociationsCall) Context(ctx context.Context) *OrganizationSecurityPoliciesListAssociationsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OrganizationSecurityPoliciesListAssociationsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrganizationSecurityPoliciesListAssociationsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/securityPolicies/listAssociations") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.organizationSecurityPolicies.listAssociations" call. +// Exactly one of *OrganizationSecurityPoliciesListAssociationsResponse +// or error will be non-nil. Any non-2xx status code is an error. +// Response headers are in either +// *OrganizationSecurityPoliciesListAssociationsResponse.ServerResponse.H +// eader or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *OrganizationSecurityPoliciesListAssociationsCall) Do(opts ...googleapi.CallOption) (*OrganizationSecurityPoliciesListAssociationsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &OrganizationSecurityPoliciesListAssociationsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists associations of a specified target, i.e., organization or folder.", + // "httpMethod": "GET", + // "id": "compute.organizationSecurityPolicies.listAssociations", + // "parameters": { + // "targetResource": { + // "description": "The target resource to list associations. It is an organization, or a folder.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "locations/global/securityPolicies/listAssociations", + // "response": { + // "$ref": "OrganizationSecurityPoliciesListAssociationsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.organizationSecurityPolicies.move": + +type OrganizationSecurityPoliciesMoveCall struct { + s *Service + securityPolicy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Move: Moves the specified security policy. +func (r *OrganizationSecurityPoliciesService) Move(securityPolicy string) *OrganizationSecurityPoliciesMoveCall { + c := &OrganizationSecurityPoliciesMoveCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.securityPolicy = securityPolicy + return c +} + +// ParentId sets the optional parameter "parentId": The new parent of +// the security policy. +func (c *OrganizationSecurityPoliciesMoveCall) ParentId(parentId string) *OrganizationSecurityPoliciesMoveCall { + c.urlParams_.Set("parentId", parentId) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *OrganizationSecurityPoliciesMoveCall) RequestId(requestId string) *OrganizationSecurityPoliciesMoveCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OrganizationSecurityPoliciesMoveCall) Fields(s ...googleapi.Field) *OrganizationSecurityPoliciesMoveCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OrganizationSecurityPoliciesMoveCall) Context(ctx context.Context) *OrganizationSecurityPoliciesMoveCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OrganizationSecurityPoliciesMoveCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrganizationSecurityPoliciesMoveCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/securityPolicies/{securityPolicy}/move") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "securityPolicy": c.securityPolicy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.organizationSecurityPolicies.move" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *OrganizationSecurityPoliciesMoveCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Moves the specified security policy.", + // "httpMethod": "POST", + // "id": "compute.organizationSecurityPolicies.move", + // "parameterOrder": [ + // "securityPolicy" + // ], + // "parameters": { + // "parentId": { + // "description": "The new parent of the security policy.", + // "location": "query", + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "securityPolicy": { + // "description": "Name of the security policy to update.", + // "location": "path", + // "pattern": "[0-9]{0,20}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "locations/global/securityPolicies/{securityPolicy}/move", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.organizationSecurityPolicies.patch": + +type OrganizationSecurityPoliciesPatchCall struct { + s *Service + securityPolicy string + securitypolicy *SecurityPolicy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Patches the specified policy with the data included in the +// request. +func (r *OrganizationSecurityPoliciesService) Patch(securityPolicy string, securitypolicy *SecurityPolicy) *OrganizationSecurityPoliciesPatchCall { + c := &OrganizationSecurityPoliciesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.securityPolicy = securityPolicy + c.securitypolicy = securitypolicy + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *OrganizationSecurityPoliciesPatchCall) RequestId(requestId string) *OrganizationSecurityPoliciesPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OrganizationSecurityPoliciesPatchCall) Fields(s ...googleapi.Field) *OrganizationSecurityPoliciesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OrganizationSecurityPoliciesPatchCall) Context(ctx context.Context) *OrganizationSecurityPoliciesPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OrganizationSecurityPoliciesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrganizationSecurityPoliciesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/securityPolicies/{securityPolicy}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "securityPolicy": c.securityPolicy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.organizationSecurityPolicies.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *OrganizationSecurityPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patches the specified policy with the data included in the request.", + // "httpMethod": "PATCH", + // "id": "compute.organizationSecurityPolicies.patch", + // "parameterOrder": [ + // "securityPolicy" + // ], + // "parameters": { + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "securityPolicy": { + // "description": "Name of the security policy to update.", + // "location": "path", + // "pattern": "[0-9]{0,20}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "locations/global/securityPolicies/{securityPolicy}", + // "request": { + // "$ref": "SecurityPolicy" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.organizationSecurityPolicies.patchRule": + +type OrganizationSecurityPoliciesPatchRuleCall struct { + s *Service + securityPolicy string + securitypolicyrule *SecurityPolicyRule + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// PatchRule: Patches a rule at the specified priority. +func (r *OrganizationSecurityPoliciesService) PatchRule(securityPolicy string, securitypolicyrule *SecurityPolicyRule) *OrganizationSecurityPoliciesPatchRuleCall { + c := &OrganizationSecurityPoliciesPatchRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.securityPolicy = securityPolicy + c.securitypolicyrule = securitypolicyrule + return c +} + +// Priority sets the optional parameter "priority": The priority of the +// rule to patch. +func (c *OrganizationSecurityPoliciesPatchRuleCall) Priority(priority int64) *OrganizationSecurityPoliciesPatchRuleCall { + c.urlParams_.Set("priority", fmt.Sprint(priority)) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *OrganizationSecurityPoliciesPatchRuleCall) RequestId(requestId string) *OrganizationSecurityPoliciesPatchRuleCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OrganizationSecurityPoliciesPatchRuleCall) Fields(s ...googleapi.Field) *OrganizationSecurityPoliciesPatchRuleCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OrganizationSecurityPoliciesPatchRuleCall) Context(ctx context.Context) *OrganizationSecurityPoliciesPatchRuleCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OrganizationSecurityPoliciesPatchRuleCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrganizationSecurityPoliciesPatchRuleCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicyrule) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/securityPolicies/{securityPolicy}/patchRule") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "securityPolicy": c.securityPolicy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.organizationSecurityPolicies.patchRule" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *OrganizationSecurityPoliciesPatchRuleCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patches a rule at the specified priority.", + // "httpMethod": "POST", + // "id": "compute.organizationSecurityPolicies.patchRule", + // "parameterOrder": [ + // "securityPolicy" + // ], + // "parameters": { + // "priority": { + // "description": "The priority of the rule to patch.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "securityPolicy": { + // "description": "Name of the security policy to update.", + // "location": "path", + // "pattern": "[0-9]{0,20}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "locations/global/securityPolicies/{securityPolicy}/patchRule", + // "request": { + // "$ref": "SecurityPolicyRule" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.organizationSecurityPolicies.removeAssociation": + +type OrganizationSecurityPoliciesRemoveAssociationCall struct { + s *Service + securityPolicy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// RemoveAssociation: Removes an association for the specified security +// policy. +func (r *OrganizationSecurityPoliciesService) RemoveAssociation(securityPolicy string) *OrganizationSecurityPoliciesRemoveAssociationCall { + c := &OrganizationSecurityPoliciesRemoveAssociationCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.securityPolicy = securityPolicy + return c +} + +// Name sets the optional parameter "name": Name for the attachment that +// will be removed. +func (c *OrganizationSecurityPoliciesRemoveAssociationCall) Name(name string) *OrganizationSecurityPoliciesRemoveAssociationCall { + c.urlParams_.Set("name", name) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *OrganizationSecurityPoliciesRemoveAssociationCall) RequestId(requestId string) *OrganizationSecurityPoliciesRemoveAssociationCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OrganizationSecurityPoliciesRemoveAssociationCall) Fields(s ...googleapi.Field) *OrganizationSecurityPoliciesRemoveAssociationCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OrganizationSecurityPoliciesRemoveAssociationCall) Context(ctx context.Context) *OrganizationSecurityPoliciesRemoveAssociationCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OrganizationSecurityPoliciesRemoveAssociationCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrganizationSecurityPoliciesRemoveAssociationCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/securityPolicies/{securityPolicy}/removeAssociation") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "securityPolicy": c.securityPolicy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.organizationSecurityPolicies.removeAssociation" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *OrganizationSecurityPoliciesRemoveAssociationCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Removes an association for the specified security policy.", + // "httpMethod": "POST", + // "id": "compute.organizationSecurityPolicies.removeAssociation", + // "parameterOrder": [ + // "securityPolicy" + // ], + // "parameters": { + // "name": { + // "description": "Name for the attachment that will be removed.", + // "location": "query", + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "securityPolicy": { + // "description": "Name of the security policy to update.", + // "location": "path", + // "pattern": "[0-9]{0,20}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "locations/global/securityPolicies/{securityPolicy}/removeAssociation", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.organizationSecurityPolicies.removeRule": + +type OrganizationSecurityPoliciesRemoveRuleCall struct { + s *Service + securityPolicy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// RemoveRule: Deletes a rule at the specified priority. +func (r *OrganizationSecurityPoliciesService) RemoveRule(securityPolicy string) *OrganizationSecurityPoliciesRemoveRuleCall { + c := &OrganizationSecurityPoliciesRemoveRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.securityPolicy = securityPolicy + return c +} + +// Priority sets the optional parameter "priority": The priority of the +// rule to remove from the security policy. +func (c *OrganizationSecurityPoliciesRemoveRuleCall) Priority(priority int64) *OrganizationSecurityPoliciesRemoveRuleCall { + c.urlParams_.Set("priority", fmt.Sprint(priority)) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *OrganizationSecurityPoliciesRemoveRuleCall) RequestId(requestId string) *OrganizationSecurityPoliciesRemoveRuleCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OrganizationSecurityPoliciesRemoveRuleCall) Fields(s ...googleapi.Field) *OrganizationSecurityPoliciesRemoveRuleCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OrganizationSecurityPoliciesRemoveRuleCall) Context(ctx context.Context) *OrganizationSecurityPoliciesRemoveRuleCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OrganizationSecurityPoliciesRemoveRuleCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrganizationSecurityPoliciesRemoveRuleCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/securityPolicies/{securityPolicy}/removeRule") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "securityPolicy": c.securityPolicy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.organizationSecurityPolicies.removeRule" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *OrganizationSecurityPoliciesRemoveRuleCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes a rule at the specified priority.", + // "httpMethod": "POST", + // "id": "compute.organizationSecurityPolicies.removeRule", + // "parameterOrder": [ + // "securityPolicy" + // ], + // "parameters": { + // "priority": { + // "description": "The priority of the rule to remove from the security policy.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "securityPolicy": { + // "description": "Name of the security policy to update.", + // "location": "path", + // "pattern": "[0-9]{0,20}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "locations/global/securityPolicies/{securityPolicy}/removeRule", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.projects.disableXpnHost": + +type ProjectsDisableXpnHostCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// DisableXpnHost: Disable this project as a shared VPC host project. +func (r *ProjectsService) DisableXpnHost(project string) *ProjectsDisableXpnHostCall { + c := &ProjectsDisableXpnHostCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ProjectsDisableXpnHostCall) RequestId(requestId string) *ProjectsDisableXpnHostCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsDisableXpnHostCall) Fields(s ...googleapi.Field) *ProjectsDisableXpnHostCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsDisableXpnHostCall) Context(ctx context.Context) *ProjectsDisableXpnHostCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsDisableXpnHostCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsDisableXpnHostCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/disableXpnHost") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.projects.disableXpnHost" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsDisableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Disable this project as a shared VPC host project.", + // "httpMethod": "POST", + // "id": "compute.projects.disableXpnHost", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/disableXpnHost", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.projects.disableXpnResource": + +type ProjectsDisableXpnResourceCall struct { + s *Service + project string + projectsdisablexpnresourcerequest *ProjectsDisableXpnResourceRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// DisableXpnResource: Disable a serivce resource (a.k.a service +// project) associated with this host project. +func (r *ProjectsService) DisableXpnResource(project string, projectsdisablexpnresourcerequest *ProjectsDisableXpnResourceRequest) *ProjectsDisableXpnResourceCall { + c := &ProjectsDisableXpnResourceCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.projectsdisablexpnresourcerequest = projectsdisablexpnresourcerequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ProjectsDisableXpnResourceCall) RequestId(requestId string) *ProjectsDisableXpnResourceCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsDisableXpnResourceCall) Fields(s ...googleapi.Field) *ProjectsDisableXpnResourceCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsDisableXpnResourceCall) Context(ctx context.Context) *ProjectsDisableXpnResourceCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsDisableXpnResourceCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsDisableXpnResourceCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectsdisablexpnresourcerequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/disableXpnResource") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.projects.disableXpnResource" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsDisableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Disable a serivce resource (a.k.a service project) associated with this host project.", + // "httpMethod": "POST", + // "id": "compute.projects.disableXpnResource", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/disableXpnResource", + // "request": { + // "$ref": "ProjectsDisableXpnResourceRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.projects.enableXpnHost": + +type ProjectsEnableXpnHostCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// EnableXpnHost: Enable this project as a shared VPC host project. +func (r *ProjectsService) EnableXpnHost(project string) *ProjectsEnableXpnHostCall { + c := &ProjectsEnableXpnHostCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ProjectsEnableXpnHostCall) RequestId(requestId string) *ProjectsEnableXpnHostCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsEnableXpnHostCall) Fields(s ...googleapi.Field) *ProjectsEnableXpnHostCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsEnableXpnHostCall) Context(ctx context.Context) *ProjectsEnableXpnHostCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsEnableXpnHostCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsEnableXpnHostCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/enableXpnHost") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.projects.enableXpnHost" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsEnableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Enable this project as a shared VPC host project.", + // "httpMethod": "POST", + // "id": "compute.projects.enableXpnHost", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/enableXpnHost", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.projects.enableXpnResource": + +type ProjectsEnableXpnResourceCall struct { + s *Service + project string + projectsenablexpnresourcerequest *ProjectsEnableXpnResourceRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// EnableXpnResource: Enable service resource (a.k.a service project) +// for a host project, so that subnets in the host project can be used +// by instances in the service project. +func (r *ProjectsService) EnableXpnResource(project string, projectsenablexpnresourcerequest *ProjectsEnableXpnResourceRequest) *ProjectsEnableXpnResourceCall { + c := &ProjectsEnableXpnResourceCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.projectsenablexpnresourcerequest = projectsenablexpnresourcerequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ProjectsEnableXpnResourceCall) RequestId(requestId string) *ProjectsEnableXpnResourceCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsEnableXpnResourceCall) Fields(s ...googleapi.Field) *ProjectsEnableXpnResourceCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsEnableXpnResourceCall) Context(ctx context.Context) *ProjectsEnableXpnResourceCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsEnableXpnResourceCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsEnableXpnResourceCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectsenablexpnresourcerequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/enableXpnResource") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.projects.enableXpnResource" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsEnableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Enable service resource (a.k.a service project) for a host project, so that subnets in the host project can be used by instances in the service project.", + // "httpMethod": "POST", + // "id": "compute.projects.enableXpnResource", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/enableXpnResource", + // "request": { + // "$ref": "ProjectsEnableXpnResourceRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.projects.get": + +type ProjectsGetCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified Project resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/projects/get +func (r *ProjectsService) Get(project string) *ProjectsGetCall { + c := &ProjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsGetCall) Fields(s ...googleapi.Field) *ProjectsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsGetCall) IfNoneMatch(entityTag string) *ProjectsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsGetCall) Context(ctx context.Context) *ProjectsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.projects.get" call. +// Exactly one of *Project or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Project.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Project{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified Project resource.", + // "httpMethod": "GET", + // "id": "compute.projects.get", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}", + // "response": { + // "$ref": "Project" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.projects.getXpnHost": + +type ProjectsGetXpnHostCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetXpnHost: Gets the shared VPC host project that this project links +// to. May be empty if no link exists. +func (r *ProjectsService) GetXpnHost(project string) *ProjectsGetXpnHostCall { + c := &ProjectsGetXpnHostCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsGetXpnHostCall) Fields(s ...googleapi.Field) *ProjectsGetXpnHostCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsGetXpnHostCall) IfNoneMatch(entityTag string) *ProjectsGetXpnHostCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsGetXpnHostCall) Context(ctx context.Context) *ProjectsGetXpnHostCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsGetXpnHostCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsGetXpnHostCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/getXpnHost") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.projects.getXpnHost" call. +// Exactly one of *Project or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Project.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsGetXpnHostCall) Do(opts ...googleapi.CallOption) (*Project, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Project{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the shared VPC host project that this project links to. May be empty if no link exists.", + // "httpMethod": "GET", + // "id": "compute.projects.getXpnHost", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/getXpnHost", + // "response": { + // "$ref": "Project" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.projects.getXpnResources": + +type ProjectsGetXpnResourcesCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetXpnResources: Gets service resources (a.k.a service project) +// associated with this host project. +func (r *ProjectsService) GetXpnResources(project string) *ProjectsGetXpnResourcesCall { + c := &ProjectsGetXpnResourcesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *ProjectsGetXpnResourcesCall) Filter(filter string) *ProjectsGetXpnResourcesCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *ProjectsGetXpnResourcesCall) MaxResults(maxResults int64) *ProjectsGetXpnResourcesCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "order_by": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *ProjectsGetXpnResourcesCall) OrderBy(orderBy string) *ProjectsGetXpnResourcesCall { + c.urlParams_.Set("order_by", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *ProjectsGetXpnResourcesCall) PageToken(pageToken string) *ProjectsGetXpnResourcesCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsGetXpnResourcesCall) Fields(s ...googleapi.Field) *ProjectsGetXpnResourcesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsGetXpnResourcesCall) IfNoneMatch(entityTag string) *ProjectsGetXpnResourcesCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsGetXpnResourcesCall) Context(ctx context.Context) *ProjectsGetXpnResourcesCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsGetXpnResourcesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsGetXpnResourcesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/getXpnResources") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.projects.getXpnResources" call. +// Exactly one of *ProjectsGetXpnResources or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ProjectsGetXpnResources.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsGetXpnResourcesCall) Do(opts ...googleapi.CallOption) (*ProjectsGetXpnResources, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ProjectsGetXpnResources{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets service resources (a.k.a service project) associated with this host project.", + // "httpMethod": "GET", + // "id": "compute.projects.getXpnResources", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "order_by": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/getXpnResources", + // "response": { + // "$ref": "ProjectsGetXpnResources" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsGetXpnResourcesCall) Pages(ctx context.Context, f func(*ProjectsGetXpnResources) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.projects.listXpnHosts": + +type ProjectsListXpnHostsCall struct { + s *Service + project string + projectslistxpnhostsrequest *ProjectsListXpnHostsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// ListXpnHosts: Lists all shared VPC host projects visible to the user +// in an organization. +func (r *ProjectsService) ListXpnHosts(project string, projectslistxpnhostsrequest *ProjectsListXpnHostsRequest) *ProjectsListXpnHostsCall { + c := &ProjectsListXpnHostsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.projectslistxpnhostsrequest = projectslistxpnhostsrequest + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *ProjectsListXpnHostsCall) Filter(filter string) *ProjectsListXpnHostsCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *ProjectsListXpnHostsCall) MaxResults(maxResults int64) *ProjectsListXpnHostsCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "order_by": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *ProjectsListXpnHostsCall) OrderBy(orderBy string) *ProjectsListXpnHostsCall { + c.urlParams_.Set("order_by", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *ProjectsListXpnHostsCall) PageToken(pageToken string) *ProjectsListXpnHostsCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsListXpnHostsCall) Fields(s ...googleapi.Field) *ProjectsListXpnHostsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsListXpnHostsCall) Context(ctx context.Context) *ProjectsListXpnHostsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsListXpnHostsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsListXpnHostsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectslistxpnhostsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/listXpnHosts") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.projects.listXpnHosts" call. +// Exactly one of *XpnHostList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *XpnHostList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsListXpnHostsCall) Do(opts ...googleapi.CallOption) (*XpnHostList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &XpnHostList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists all shared VPC host projects visible to the user in an organization.", + // "httpMethod": "POST", + // "id": "compute.projects.listXpnHosts", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "order_by": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/listXpnHosts", + // "request": { + // "$ref": "ProjectsListXpnHostsRequest" + // }, + // "response": { + // "$ref": "XpnHostList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsListXpnHostsCall) Pages(ctx context.Context, f func(*XpnHostList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.projects.moveDisk": + +type ProjectsMoveDiskCall struct { + s *Service + project string + diskmoverequest *DiskMoveRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// MoveDisk: Moves a persistent disk from one zone to another. +func (r *ProjectsService) MoveDisk(project string, diskmoverequest *DiskMoveRequest) *ProjectsMoveDiskCall { + c := &ProjectsMoveDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.diskmoverequest = diskmoverequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ProjectsMoveDiskCall) RequestId(requestId string) *ProjectsMoveDiskCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsMoveDiskCall) Fields(s ...googleapi.Field) *ProjectsMoveDiskCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsMoveDiskCall) Context(ctx context.Context) *ProjectsMoveDiskCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsMoveDiskCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsMoveDiskCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.diskmoverequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/moveDisk") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.projects.moveDisk" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsMoveDiskCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Moves a persistent disk from one zone to another.", + // "httpMethod": "POST", + // "id": "compute.projects.moveDisk", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/moveDisk", + // "request": { + // "$ref": "DiskMoveRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.projects.moveInstance": + +type ProjectsMoveInstanceCall struct { + s *Service + project string + instancemoverequest *InstanceMoveRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// MoveInstance: Moves an instance and its attached persistent disks +// from one zone to another. +func (r *ProjectsService) MoveInstance(project string, instancemoverequest *InstanceMoveRequest) *ProjectsMoveInstanceCall { + c := &ProjectsMoveInstanceCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instancemoverequest = instancemoverequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ProjectsMoveInstanceCall) RequestId(requestId string) *ProjectsMoveInstanceCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsMoveInstanceCall) Fields(s ...googleapi.Field) *ProjectsMoveInstanceCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsMoveInstanceCall) Context(ctx context.Context) *ProjectsMoveInstanceCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsMoveInstanceCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsMoveInstanceCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancemoverequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/moveInstance") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.projects.moveInstance" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsMoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Moves an instance and its attached persistent disks from one zone to another.", + // "httpMethod": "POST", + // "id": "compute.projects.moveInstance", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/moveInstance", + // "request": { + // "$ref": "InstanceMoveRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.projects.setCommonInstanceMetadata": + +type ProjectsSetCommonInstanceMetadataCall struct { + s *Service + project string + metadata *Metadata + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetCommonInstanceMetadata: Sets metadata common to all instances +// within the specified project using the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/projects/setCommonInstanceMetadata +func (r *ProjectsService) SetCommonInstanceMetadata(project string, metadata *Metadata) *ProjectsSetCommonInstanceMetadataCall { + c := &ProjectsSetCommonInstanceMetadataCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.metadata = metadata + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ProjectsSetCommonInstanceMetadataCall) RequestId(requestId string) *ProjectsSetCommonInstanceMetadataCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsSetCommonInstanceMetadataCall) Fields(s ...googleapi.Field) *ProjectsSetCommonInstanceMetadataCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsSetCommonInstanceMetadataCall) Context(ctx context.Context) *ProjectsSetCommonInstanceMetadataCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsSetCommonInstanceMetadataCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsSetCommonInstanceMetadataCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.metadata) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/setCommonInstanceMetadata") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.projects.setCommonInstanceMetadata" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsSetCommonInstanceMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets metadata common to all instances within the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.projects.setCommonInstanceMetadata", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/setCommonInstanceMetadata", + // "request": { + // "$ref": "Metadata" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.projects.setDefaultNetworkTier": + +type ProjectsSetDefaultNetworkTierCall struct { + s *Service + project string + projectssetdefaultnetworktierrequest *ProjectsSetDefaultNetworkTierRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetDefaultNetworkTier: Sets the default network tier of the project. +// The default network tier is used when an +// address/forwardingRule/instance is created without specifying the +// network tier field. +func (r *ProjectsService) SetDefaultNetworkTier(project string, projectssetdefaultnetworktierrequest *ProjectsSetDefaultNetworkTierRequest) *ProjectsSetDefaultNetworkTierCall { + c := &ProjectsSetDefaultNetworkTierCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.projectssetdefaultnetworktierrequest = projectssetdefaultnetworktierrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ProjectsSetDefaultNetworkTierCall) RequestId(requestId string) *ProjectsSetDefaultNetworkTierCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsSetDefaultNetworkTierCall) Fields(s ...googleapi.Field) *ProjectsSetDefaultNetworkTierCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsSetDefaultNetworkTierCall) Context(ctx context.Context) *ProjectsSetDefaultNetworkTierCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsSetDefaultNetworkTierCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsSetDefaultNetworkTierCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectssetdefaultnetworktierrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/setDefaultNetworkTier") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.projects.setDefaultNetworkTier" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsSetDefaultNetworkTierCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the default network tier of the project. The default network tier is used when an address/forwardingRule/instance is created without specifying the network tier field.", + // "httpMethod": "POST", + // "id": "compute.projects.setDefaultNetworkTier", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/setDefaultNetworkTier", + // "request": { + // "$ref": "ProjectsSetDefaultNetworkTierRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.projects.setDefaultServiceAccount": + +type ProjectsSetDefaultServiceAccountCall struct { + s *Service + project string + projectssetdefaultserviceaccountrequest *ProjectsSetDefaultServiceAccountRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetDefaultServiceAccount: Sets the default service account of the +// project. The default service account is used when a VM instance is +// created with the service account email address set to "default". +func (r *ProjectsService) SetDefaultServiceAccount(project string, projectssetdefaultserviceaccountrequest *ProjectsSetDefaultServiceAccountRequest) *ProjectsSetDefaultServiceAccountCall { + c := &ProjectsSetDefaultServiceAccountCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.projectssetdefaultserviceaccountrequest = projectssetdefaultserviceaccountrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ProjectsSetDefaultServiceAccountCall) RequestId(requestId string) *ProjectsSetDefaultServiceAccountCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsSetDefaultServiceAccountCall) Fields(s ...googleapi.Field) *ProjectsSetDefaultServiceAccountCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsSetDefaultServiceAccountCall) Context(ctx context.Context) *ProjectsSetDefaultServiceAccountCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsSetDefaultServiceAccountCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsSetDefaultServiceAccountCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectssetdefaultserviceaccountrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/setDefaultServiceAccount") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.projects.setDefaultServiceAccount" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsSetDefaultServiceAccountCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the default service account of the project. The default service account is used when a VM instance is created with the service account email address set to \"default\".", + // "httpMethod": "POST", + // "id": "compute.projects.setDefaultServiceAccount", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/setDefaultServiceAccount", + // "request": { + // "$ref": "ProjectsSetDefaultServiceAccountRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.projects.setUsageExportBucket": + +type ProjectsSetUsageExportBucketCall struct { + s *Service + project string + usageexportlocation *UsageExportLocation + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetUsageExportBucket: Enables the usage export feature and sets the +// usage export bucket where reports are stored. If you provide an empty +// request body using this method, the usage export feature will be +// disabled. +// For details, see https://cloud.google.com/compute/docs/reference/latest/projects/setUsageExportBucket +func (r *ProjectsService) SetUsageExportBucket(project string, usageexportlocation *UsageExportLocation) *ProjectsSetUsageExportBucketCall { + c := &ProjectsSetUsageExportBucketCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.usageexportlocation = usageexportlocation + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ProjectsSetUsageExportBucketCall) RequestId(requestId string) *ProjectsSetUsageExportBucketCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsSetUsageExportBucketCall) Fields(s ...googleapi.Field) *ProjectsSetUsageExportBucketCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsSetUsageExportBucketCall) Context(ctx context.Context) *ProjectsSetUsageExportBucketCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsSetUsageExportBucketCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsSetUsageExportBucketCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.usageexportlocation) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/setUsageExportBucket") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.projects.setUsageExportBucket" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsSetUsageExportBucketCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Enables the usage export feature and sets the usage export bucket where reports are stored. If you provide an empty request body using this method, the usage export feature will be disabled.", + // "httpMethod": "POST", + // "id": "compute.projects.setUsageExportBucket", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/setUsageExportBucket", + // "request": { + // "$ref": "UsageExportLocation" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "compute.regionAutoscalers.delete": + +type RegionAutoscalersDeleteCall struct { + s *Service + project string + region string + autoscaler string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified autoscaler. +func (r *RegionAutoscalersService) Delete(project string, region string, autoscaler string) *RegionAutoscalersDeleteCall { + c := &RegionAutoscalersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.autoscaler = autoscaler + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionAutoscalersDeleteCall) RequestId(requestId string) *RegionAutoscalersDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionAutoscalersDeleteCall) Fields(s ...googleapi.Field) *RegionAutoscalersDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionAutoscalersDeleteCall) Context(ctx context.Context) *RegionAutoscalersDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionAutoscalersDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionAutoscalersDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers/{autoscaler}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "autoscaler": c.autoscaler, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionAutoscalers.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionAutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified autoscaler.", + // "httpMethod": "DELETE", + // "id": "compute.regionAutoscalers.delete", + // "parameterOrder": [ + // "project", + // "region", + // "autoscaler" + // ], + // "parameters": { + // "autoscaler": { + // "description": "Name of the autoscaler to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/autoscalers/{autoscaler}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionAutoscalers.get": + +type RegionAutoscalersGetCall struct { + s *Service + project string + region string + autoscaler string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified autoscaler. +func (r *RegionAutoscalersService) Get(project string, region string, autoscaler string) *RegionAutoscalersGetCall { + c := &RegionAutoscalersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.autoscaler = autoscaler + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionAutoscalersGetCall) Fields(s ...googleapi.Field) *RegionAutoscalersGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionAutoscalersGetCall) IfNoneMatch(entityTag string) *RegionAutoscalersGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionAutoscalersGetCall) Context(ctx context.Context) *RegionAutoscalersGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionAutoscalersGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionAutoscalersGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers/{autoscaler}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "securityPolicy": c.securityPolicy, + "project": c.project, + "region": c.region, + "autoscaler": c.autoscaler, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.organizationSecurityPolicies.addAssociation" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.regionAutoscalers.get" call. +// Exactly one of *Autoscaler or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// *Autoscaler.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *OrganizationSecurityPoliciesAddAssociationCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionAutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -96903,7 +109148,7 @@ func (c *OrganizationSecurityPoliciesAddAssociationCall) Do(opts ...googleapi.Ca if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Autoscaler{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -96915,57 +109160,69 @@ func (c *OrganizationSecurityPoliciesAddAssociationCall) Do(opts ...googleapi.Ca } return ret, nil // { - // "description": "Inserts an association for the specified security policy.", - // "httpMethod": "POST", - // "id": "compute.organizationSecurityPolicies.addAssociation", + // "description": "Returns the specified autoscaler.", + // "httpMethod": "GET", + // "id": "compute.regionAutoscalers.get", // "parameterOrder": [ - // "securityPolicy" + // "project", + // "region", + // "autoscaler" // ], // "parameters": { - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "autoscaler": { + // "description": "Name of the autoscaler to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, - // "securityPolicy": { - // "description": "Name of the security policy to update.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[0-9]{0,20}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "locations/global/securityPolicies/{securityPolicy}/addAssociation", - // "request": { - // "$ref": "SecurityPolicyAssociation" - // }, + // "path": "{project}/regions/{region}/autoscalers/{autoscaler}", // "response": { - // "$ref": "Operation" + // "$ref": "Autoscaler" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.organizationSecurityPolicies.addRule": +// method id "compute.regionAutoscalers.insert": -type OrganizationSecurityPoliciesAddRuleCall struct { - s *Service - securityPolicy string - securitypolicyrule *SecurityPolicyRule - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionAutoscalersInsertCall struct { + s *Service + project string + region string + autoscaler *Autoscaler + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AddRule: Inserts a rule into a security policy. -func (r *OrganizationSecurityPoliciesService) AddRule(securityPolicy string, securitypolicyrule *SecurityPolicyRule) *OrganizationSecurityPoliciesAddRuleCall { - c := &OrganizationSecurityPoliciesAddRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.securityPolicy = securityPolicy - c.securitypolicyrule = securitypolicyrule +// Insert: Creates an autoscaler in the specified project using the data +// included in the request. +func (r *RegionAutoscalersService) Insert(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersInsertCall { + c := &RegionAutoscalersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.autoscaler = autoscaler return c } @@ -96983,7 +109240,7 @@ func (r *OrganizationSecurityPoliciesService) AddRule(securityPolicy string, sec // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *OrganizationSecurityPoliciesAddRuleCall) RequestId(requestId string) *OrganizationSecurityPoliciesAddRuleCall { +func (c *RegionAutoscalersInsertCall) RequestId(requestId string) *RegionAutoscalersInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -96991,7 +109248,7 @@ func (c *OrganizationSecurityPoliciesAddRuleCall) RequestId(requestId string) *O // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *OrganizationSecurityPoliciesAddRuleCall) Fields(s ...googleapi.Field) *OrganizationSecurityPoliciesAddRuleCall { +func (c *RegionAutoscalersInsertCall) Fields(s ...googleapi.Field) *RegionAutoscalersInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -96999,35 +109256,35 @@ func (c *OrganizationSecurityPoliciesAddRuleCall) Fields(s ...googleapi.Field) * // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *OrganizationSecurityPoliciesAddRuleCall) Context(ctx context.Context) *OrganizationSecurityPoliciesAddRuleCall { +func (c *RegionAutoscalersInsertCall) Context(ctx context.Context) *RegionAutoscalersInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *OrganizationSecurityPoliciesAddRuleCall) Header() http.Header { +func (c *RegionAutoscalersInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *OrganizationSecurityPoliciesAddRuleCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionAutoscalersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicyrule) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/securityPolicies/{securityPolicy}/addRule") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -97035,19 +109292,20 @@ func (c *OrganizationSecurityPoliciesAddRuleCall) doRequest(alt string) (*http.R } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "securityPolicy": c.securityPolicy, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.organizationSecurityPolicies.addRule" call. +// Do executes the "compute.regionAutoscalers.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *OrganizationSecurityPoliciesAddRuleCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionAutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -97078,29 +109336,37 @@ func (c *OrganizationSecurityPoliciesAddRuleCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Inserts a rule into a security policy.", + // "description": "Creates an autoscaler in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.organizationSecurityPolicies.addRule", + // "id": "compute.regionAutoscalers.insert", // "parameterOrder": [ - // "securityPolicy" + // "project", + // "region" // ], // "parameters": { - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // }, - // "securityPolicy": { - // "description": "Name of the security policy to update.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[0-9]{0,20}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "locations/global/securityPolicies/{securityPolicy}/addRule", + // "path": "{project}/regions/{region}/autoscalers", // "request": { - // "$ref": "SecurityPolicyRule" + // "$ref": "Autoscaler" // }, // "response": { // "$ref": "Operation" @@ -97113,97 +109379,159 @@ func (c *OrganizationSecurityPoliciesAddRuleCall) Do(opts ...googleapi.CallOptio } -// method id "compute.organizationSecurityPolicies.delete": +// method id "compute.regionAutoscalers.list": -type OrganizationSecurityPoliciesDeleteCall struct { - s *Service - securityPolicy string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionAutoscalersListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified policy. -func (r *OrganizationSecurityPoliciesService) Delete(securityPolicy string) *OrganizationSecurityPoliciesDeleteCall { - c := &OrganizationSecurityPoliciesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.securityPolicy = securityPolicy +// List: Retrieves a list of autoscalers contained within the specified +// region. +func (r *RegionAutoscalersService) List(project string, region string) *RegionAutoscalersListCall { + c := &RegionAutoscalersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *OrganizationSecurityPoliciesDeleteCall) RequestId(requestId string) *OrganizationSecurityPoliciesDeleteCall { - c.urlParams_.Set("requestId", requestId) +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *RegionAutoscalersListCall) Filter(filter string) *RegionAutoscalersListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *RegionAutoscalersListCall) MaxResults(maxResults int64) *RegionAutoscalersListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *RegionAutoscalersListCall) OrderBy(orderBy string) *RegionAutoscalersListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *RegionAutoscalersListCall) PageToken(pageToken string) *RegionAutoscalersListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *OrganizationSecurityPoliciesDeleteCall) Fields(s ...googleapi.Field) *OrganizationSecurityPoliciesDeleteCall { +func (c *RegionAutoscalersListCall) Fields(s ...googleapi.Field) *RegionAutoscalersListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionAutoscalersListCall) IfNoneMatch(entityTag string) *RegionAutoscalersListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *OrganizationSecurityPoliciesDeleteCall) Context(ctx context.Context) *OrganizationSecurityPoliciesDeleteCall { +func (c *RegionAutoscalersListCall) Context(ctx context.Context) *RegionAutoscalersListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *OrganizationSecurityPoliciesDeleteCall) Header() http.Header { +func (c *RegionAutoscalersListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *OrganizationSecurityPoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionAutoscalersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/securityPolicies/{securityPolicy}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "securityPolicy": c.securityPolicy, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.organizationSecurityPolicies.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *OrganizationSecurityPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regionAutoscalers.list" call. +// Exactly one of *RegionAutoscalerList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *RegionAutoscalerList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionAutoscalersListCall) Do(opts ...googleapi.CallOption) (*RegionAutoscalerList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -97222,7 +109550,7 @@ func (c *OrganizationSecurityPoliciesDeleteCall) Do(opts ...googleapi.CallOption if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &RegionAutoscalerList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -97234,125 +109562,196 @@ func (c *OrganizationSecurityPoliciesDeleteCall) Do(opts ...googleapi.CallOption } return ret, nil // { - // "description": "Deletes the specified policy.", - // "httpMethod": "DELETE", - // "id": "compute.organizationSecurityPolicies.delete", + // "description": "Retrieves a list of autoscalers contained within the specified region.", + // "httpMethod": "GET", + // "id": "compute.regionAutoscalers.list", // "parameterOrder": [ - // "securityPolicy" + // "project", + // "region" // ], // "parameters": { - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, - // "securityPolicy": { - // "description": "Name of the security policy to delete.", + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[0-9]{0,20}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "locations/global/securityPolicies/{securityPolicy}", + // "path": "{project}/regions/{region}/autoscalers", // "response": { - // "$ref": "Operation" + // "$ref": "RegionAutoscalerList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.organizationSecurityPolicies.get": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionAutoscalersListCall) Pages(ctx context.Context, f func(*RegionAutoscalerList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type OrganizationSecurityPoliciesGetCall struct { - s *Service - securityPolicy string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +// method id "compute.regionAutoscalers.patch": + +type RegionAutoscalersPatchCall struct { + s *Service + project string + region string + autoscaler *Autoscaler + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: List all of the ordered rules present in a single specified -// policy. -func (r *OrganizationSecurityPoliciesService) Get(securityPolicy string) *OrganizationSecurityPoliciesGetCall { - c := &OrganizationSecurityPoliciesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.securityPolicy = securityPolicy +// Patch: Updates an autoscaler in the specified project using the data +// included in the request. This method supports PATCH semantics and +// uses the JSON merge patch format and processing rules. +func (r *RegionAutoscalersService) Patch(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersPatchCall { + c := &RegionAutoscalersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.autoscaler = autoscaler + return c +} + +// Autoscaler sets the optional parameter "autoscaler": Name of the +// autoscaler to patch. +func (c *RegionAutoscalersPatchCall) Autoscaler(autoscaler string) *RegionAutoscalersPatchCall { + c.urlParams_.Set("autoscaler", autoscaler) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionAutoscalersPatchCall) RequestId(requestId string) *RegionAutoscalersPatchCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *OrganizationSecurityPoliciesGetCall) Fields(s ...googleapi.Field) *OrganizationSecurityPoliciesGetCall { +func (c *RegionAutoscalersPatchCall) Fields(s ...googleapi.Field) *RegionAutoscalersPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *OrganizationSecurityPoliciesGetCall) IfNoneMatch(entityTag string) *OrganizationSecurityPoliciesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *OrganizationSecurityPoliciesGetCall) Context(ctx context.Context) *OrganizationSecurityPoliciesGetCall { +func (c *RegionAutoscalersPatchCall) Context(ctx context.Context) *RegionAutoscalersPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *OrganizationSecurityPoliciesGetCall) Header() http.Header { +func (c *RegionAutoscalersPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *OrganizationSecurityPoliciesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionAutoscalersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/securityPolicies/{securityPolicy}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "securityPolicy": c.securityPolicy, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.organizationSecurityPolicies.get" call. -// Exactly one of *SecurityPolicy or error will be non-nil. Any non-2xx +// Do executes the "compute.regionAutoscalers.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *SecurityPolicy.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *OrganizationSecurityPoliciesGetCall) Do(opts ...googleapi.CallOption) (*SecurityPolicy, error) { +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionAutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -97371,7 +109770,7 @@ func (c *OrganizationSecurityPoliciesGetCall) Do(opts ...googleapi.CallOption) ( if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &SecurityPolicy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -97383,127 +109782,141 @@ func (c *OrganizationSecurityPoliciesGetCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "List all of the ordered rules present in a single specified policy.", - // "httpMethod": "GET", - // "id": "compute.organizationSecurityPolicies.get", + // "description": "Updates an autoscaler in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.regionAutoscalers.patch", // "parameterOrder": [ - // "securityPolicy" + // "project", + // "region" // ], // "parameters": { - // "securityPolicy": { - // "description": "Name of the security policy to get.", + // "autoscaler": { + // "description": "Name of the autoscaler to patch.", + // "location": "query", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[0-9]{0,20}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "locations/global/securityPolicies/{securityPolicy}", + // "path": "{project}/regions/{region}/autoscalers", + // "request": { + // "$ref": "Autoscaler" + // }, // "response": { - // "$ref": "SecurityPolicy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.organizationSecurityPolicies.getAssociation": - -type OrganizationSecurityPoliciesGetAssociationCall struct { - s *Service - securityPolicy string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} +// method id "compute.regionAutoscalers.testIamPermissions": -// GetAssociation: Gets an association with the specified name. -func (r *OrganizationSecurityPoliciesService) GetAssociation(securityPolicy string) *OrganizationSecurityPoliciesGetAssociationCall { - c := &OrganizationSecurityPoliciesGetAssociationCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.securityPolicy = securityPolicy - return c +type RegionAutoscalersTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Name sets the optional parameter "name": The name of the association -// to get from the security policy. -func (c *OrganizationSecurityPoliciesGetAssociationCall) Name(name string) *OrganizationSecurityPoliciesGetAssociationCall { - c.urlParams_.Set("name", name) +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *RegionAutoscalersService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionAutoscalersTestIamPermissionsCall { + c := &RegionAutoscalersTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *OrganizationSecurityPoliciesGetAssociationCall) Fields(s ...googleapi.Field) *OrganizationSecurityPoliciesGetAssociationCall { +func (c *RegionAutoscalersTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionAutoscalersTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *OrganizationSecurityPoliciesGetAssociationCall) IfNoneMatch(entityTag string) *OrganizationSecurityPoliciesGetAssociationCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *OrganizationSecurityPoliciesGetAssociationCall) Context(ctx context.Context) *OrganizationSecurityPoliciesGetAssociationCall { +func (c *RegionAutoscalersTestIamPermissionsCall) Context(ctx context.Context) *RegionAutoscalersTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *OrganizationSecurityPoliciesGetAssociationCall) Header() http.Header { +func (c *RegionAutoscalersTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *OrganizationSecurityPoliciesGetAssociationCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionAutoscalersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/securityPolicies/{securityPolicy}/getAssociation") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "securityPolicy": c.securityPolicy, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.organizationSecurityPolicies.getAssociation" call. -// Exactly one of *SecurityPolicyAssociation or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *SecurityPolicyAssociation.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.regionAutoscalers.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *OrganizationSecurityPoliciesGetAssociationCall) Do(opts ...googleapi.CallOption) (*SecurityPolicyAssociation, error) { +func (c *RegionAutoscalersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -97522,7 +109935,7 @@ func (c *OrganizationSecurityPoliciesGetAssociationCall) Do(opts ...googleapi.Ca if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &SecurityPolicyAssociation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -97534,29 +109947,43 @@ func (c *OrganizationSecurityPoliciesGetAssociationCall) Do(opts ...googleapi.Ca } return ret, nil // { - // "description": "Gets an association with the specified name.", - // "httpMethod": "GET", - // "id": "compute.organizationSecurityPolicies.getAssociation", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.regionAutoscalers.testIamPermissions", // "parameterOrder": [ - // "securityPolicy" + // "project", + // "region", + // "resource" // ], // "parameters": { - // "name": { - // "description": "The name of the association to get from the security policy.", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // }, - // "securityPolicy": { - // "description": "Name of the security policy to which the queried rule belongs.", + // "region": { + // "description": "The name of the region for this request.", // "location": "path", - // "pattern": "[0-9]{0,20}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "locations/global/securityPolicies/{securityPolicy}/getAssociation", + // "path": "{project}/regions/{region}/autoscalers/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "SecurityPolicyAssociation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -97567,99 +109994,115 @@ func (c *OrganizationSecurityPoliciesGetAssociationCall) Do(opts ...googleapi.Ca } -// method id "compute.organizationSecurityPolicies.getRule": +// method id "compute.regionAutoscalers.update": -type OrganizationSecurityPoliciesGetRuleCall struct { - s *Service - securityPolicy string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionAutoscalersUpdateCall struct { + s *Service + project string + region string + autoscaler *Autoscaler + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetRule: Gets a rule at the specified priority. -func (r *OrganizationSecurityPoliciesService) GetRule(securityPolicy string) *OrganizationSecurityPoliciesGetRuleCall { - c := &OrganizationSecurityPoliciesGetRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.securityPolicy = securityPolicy +// Update: Updates an autoscaler in the specified project using the data +// included in the request. +func (r *RegionAutoscalersService) Update(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersUpdateCall { + c := &RegionAutoscalersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.autoscaler = autoscaler return c } -// Priority sets the optional parameter "priority": The priority of the -// rule to get from the security policy. -func (c *OrganizationSecurityPoliciesGetRuleCall) Priority(priority int64) *OrganizationSecurityPoliciesGetRuleCall { - c.urlParams_.Set("priority", fmt.Sprint(priority)) +// Autoscaler sets the optional parameter "autoscaler": Name of the +// autoscaler to update. +func (c *RegionAutoscalersUpdateCall) Autoscaler(autoscaler string) *RegionAutoscalersUpdateCall { + c.urlParams_.Set("autoscaler", autoscaler) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionAutoscalersUpdateCall) RequestId(requestId string) *RegionAutoscalersUpdateCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *OrganizationSecurityPoliciesGetRuleCall) Fields(s ...googleapi.Field) *OrganizationSecurityPoliciesGetRuleCall { +func (c *RegionAutoscalersUpdateCall) Fields(s ...googleapi.Field) *RegionAutoscalersUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *OrganizationSecurityPoliciesGetRuleCall) IfNoneMatch(entityTag string) *OrganizationSecurityPoliciesGetRuleCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *OrganizationSecurityPoliciesGetRuleCall) Context(ctx context.Context) *OrganizationSecurityPoliciesGetRuleCall { +func (c *RegionAutoscalersUpdateCall) Context(ctx context.Context) *RegionAutoscalersUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *OrganizationSecurityPoliciesGetRuleCall) Header() http.Header { +func (c *RegionAutoscalersUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *OrganizationSecurityPoliciesGetRuleCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionAutoscalersUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/securityPolicies/{securityPolicy}/getRule") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "securityPolicy": c.securityPolicy, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.organizationSecurityPolicies.getRule" call. -// Exactly one of *SecurityPolicyRule or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *SecurityPolicyRule.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *OrganizationSecurityPoliciesGetRuleCall) Do(opts ...googleapi.CallOption) (*SecurityPolicyRule, error) { +// Do executes the "compute.regionAutoscalers.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionAutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -97678,7 +110121,7 @@ func (c *OrganizationSecurityPoliciesGetRuleCall) Do(opts ...googleapi.CallOptio if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &SecurityPolicyRule{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -97690,62 +110133,73 @@ func (c *OrganizationSecurityPoliciesGetRuleCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Gets a rule at the specified priority.", - // "httpMethod": "GET", - // "id": "compute.organizationSecurityPolicies.getRule", + // "description": "Updates an autoscaler in the specified project using the data included in the request.", + // "httpMethod": "PUT", + // "id": "compute.regionAutoscalers.update", // "parameterOrder": [ - // "securityPolicy" + // "project", + // "region" // ], // "parameters": { - // "priority": { - // "description": "The priority of the rule to get from the security policy.", - // "format": "int32", + // "autoscaler": { + // "description": "Name of the autoscaler to update.", // "location": "query", - // "type": "integer" + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "type": "string" // }, - // "securityPolicy": { - // "description": "Name of the security policy to which the queried rule belongs.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[0-9]{0,20}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "locations/global/securityPolicies/{securityPolicy}/getRule", + // "path": "{project}/regions/{region}/autoscalers", + // "request": { + // "$ref": "Autoscaler" + // }, // "response": { - // "$ref": "SecurityPolicyRule" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.organizationSecurityPolicies.insert": +// method id "compute.regionBackendServices.delete": -type OrganizationSecurityPoliciesInsertCall struct { +type RegionBackendServicesDeleteCall struct { s *Service - securitypolicy *SecurityPolicy + project string + region string + backendService string urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Insert: Creates a new policy in the specified project using the data -// included in the request. -func (r *OrganizationSecurityPoliciesService) Insert(securitypolicy *SecurityPolicy) *OrganizationSecurityPoliciesInsertCall { - c := &OrganizationSecurityPoliciesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.securitypolicy = securitypolicy - return c -} - -// ParentId sets the optional parameter "parentId": Parent ID for this -// request. -func (c *OrganizationSecurityPoliciesInsertCall) ParentId(parentId string) *OrganizationSecurityPoliciesInsertCall { - c.urlParams_.Set("parentId", parentId) +// Delete: Deletes the specified regional BackendService resource. +func (r *RegionBackendServicesService) Delete(project string, region string, backendService string) *RegionBackendServicesDeleteCall { + c := &RegionBackendServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.backendService = backendService return c } @@ -97763,7 +110217,7 @@ func (c *OrganizationSecurityPoliciesInsertCall) ParentId(parentId string) *Orga // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *OrganizationSecurityPoliciesInsertCall) RequestId(requestId string) *OrganizationSecurityPoliciesInsertCall { +func (c *RegionBackendServicesDeleteCall) RequestId(requestId string) *RegionBackendServicesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -97771,7 +110225,7 @@ func (c *OrganizationSecurityPoliciesInsertCall) RequestId(requestId string) *Or // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *OrganizationSecurityPoliciesInsertCall) Fields(s ...googleapi.Field) *OrganizationSecurityPoliciesInsertCall { +func (c *RegionBackendServicesDeleteCall) Fields(s ...googleapi.Field) *RegionBackendServicesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -97779,52 +110233,52 @@ func (c *OrganizationSecurityPoliciesInsertCall) Fields(s ...googleapi.Field) *O // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *OrganizationSecurityPoliciesInsertCall) Context(ctx context.Context) *OrganizationSecurityPoliciesInsertCall { +func (c *RegionBackendServicesDeleteCall) Context(ctx context.Context) *RegionBackendServicesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *OrganizationSecurityPoliciesInsertCall) Header() http.Header { +func (c *RegionBackendServicesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *OrganizationSecurityPoliciesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionBackendServicesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicy) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/securityPolicies") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "backendService": c.backendService, + }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.organizationSecurityPolicies.insert" call. +// Do executes the "compute.regionBackendServices.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *OrganizationSecurityPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionBackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -97855,13 +110309,34 @@ func (c *OrganizationSecurityPoliciesInsertCall) Do(opts ...googleapi.CallOption } return ret, nil // { - // "description": "Creates a new policy in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.organizationSecurityPolicies.insert", + // "description": "Deletes the specified regional BackendService resource.", + // "httpMethod": "DELETE", + // "id": "compute.regionBackendServices.delete", + // "parameterOrder": [ + // "project", + // "region", + // "backendService" + // ], // "parameters": { - // "parentId": { - // "description": "Parent ID for this request.", - // "location": "query", + // "backendService": { + // "description": "Name of the BackendService resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // }, // "requestId": { @@ -97870,10 +110345,7 @@ func (c *OrganizationSecurityPoliciesInsertCall) Do(opts ...googleapi.CallOption // "type": "string" // } // }, - // "path": "locations/global/securityPolicies", - // "request": { - // "$ref": "SecurityPolicy" - // }, + // "path": "{project}/regions/{region}/backendServices/{backendService}", // "response": { // "$ref": "Operation" // }, @@ -97885,97 +110357,32 @@ func (c *OrganizationSecurityPoliciesInsertCall) Do(opts ...googleapi.CallOption } -// method id "compute.organizationSecurityPolicies.list": - -type OrganizationSecurityPoliciesListCall struct { - s *Service - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: List all the policies that have been configured for the -// specified project. -func (r *OrganizationSecurityPoliciesService) List() *OrganizationSecurityPoliciesListCall { - c := &OrganizationSecurityPoliciesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *OrganizationSecurityPoliciesListCall) Filter(filter string) *OrganizationSecurityPoliciesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *OrganizationSecurityPoliciesListCall) MaxResults(maxResults int64) *OrganizationSecurityPoliciesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *OrganizationSecurityPoliciesListCall) OrderBy(orderBy string) *OrganizationSecurityPoliciesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} +// method id "compute.regionBackendServices.get": -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *OrganizationSecurityPoliciesListCall) PageToken(pageToken string) *OrganizationSecurityPoliciesListCall { - c.urlParams_.Set("pageToken", pageToken) - return c +type RegionBackendServicesGetCall struct { + s *Service + project string + region string + backendService string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// ParentId sets the optional parameter "parentId": Parent ID for this -// request. -func (c *OrganizationSecurityPoliciesListCall) ParentId(parentId string) *OrganizationSecurityPoliciesListCall { - c.urlParams_.Set("parentId", parentId) +// Get: Returns the specified regional BackendService resource. +func (r *RegionBackendServicesService) Get(project string, region string, backendService string) *RegionBackendServicesGetCall { + c := &RegionBackendServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.backendService = backendService return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *OrganizationSecurityPoliciesListCall) Fields(s ...googleapi.Field) *OrganizationSecurityPoliciesListCall { +func (c *RegionBackendServicesGetCall) Fields(s ...googleapi.Field) *RegionBackendServicesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -97985,7 +110392,7 @@ func (c *OrganizationSecurityPoliciesListCall) Fields(s ...googleapi.Field) *Org // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *OrganizationSecurityPoliciesListCall) IfNoneMatch(entityTag string) *OrganizationSecurityPoliciesListCall { +func (c *RegionBackendServicesGetCall) IfNoneMatch(entityTag string) *RegionBackendServicesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -97993,21 +110400,21 @@ func (c *OrganizationSecurityPoliciesListCall) IfNoneMatch(entityTag string) *Or // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *OrganizationSecurityPoliciesListCall) Context(ctx context.Context) *OrganizationSecurityPoliciesListCall { +func (c *RegionBackendServicesGetCall) Context(ctx context.Context) *RegionBackendServicesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *OrganizationSecurityPoliciesListCall) Header() http.Header { +func (c *RegionBackendServicesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *OrganizationSecurityPoliciesListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionBackendServicesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -98019,24 +110426,29 @@ func (c *OrganizationSecurityPoliciesListCall) doRequest(alt string) (*http.Resp var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/securityPolicies") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "backendService": c.backendService, + }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.organizationSecurityPolicies.list" call. -// Exactly one of *SecurityPolicyList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *SecurityPolicyList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.regionBackendServices.get" call. +// Exactly one of *BackendService or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *BackendService.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *OrganizationSecurityPoliciesListCall) Do(opts ...googleapi.CallOption) (*SecurityPolicyList, error) { +func (c *RegionBackendServicesGetCall) Do(opts ...googleapi.CallOption) (*BackendService, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -98055,7 +110467,7 @@ func (c *OrganizationSecurityPoliciesListCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &SecurityPolicyList{ + ret := &BackendService{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -98067,42 +110479,40 @@ func (c *OrganizationSecurityPoliciesListCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "List all the policies that have been configured for the specified project.", + // "description": "Returns the specified regional BackendService resource.", // "httpMethod": "GET", - // "id": "compute.organizationSecurityPolicies.list", + // "id": "compute.regionBackendServices.get", + // "parameterOrder": [ + // "project", + // "region", + // "backendService" + // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", + // "backendService": { + // "description": "Name of the BackendService resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // }, - // "parentId": { - // "description": "Parent ID for this request.", - // "location": "query", + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // } // }, - // "path": "locations/global/securityPolicies", + // "path": "{project}/regions/{region}/backendServices/{backendService}", // "response": { - // "$ref": "SecurityPolicyList" + // "$ref": "BackendService" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -98113,76 +110523,34 @@ func (c *OrganizationSecurityPoliciesListCall) Do(opts ...googleapi.CallOption) } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *OrganizationSecurityPoliciesListCall) Pages(ctx context.Context, f func(*SecurityPolicyList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.organizationSecurityPolicies.patchRule": - -type OrganizationSecurityPoliciesPatchRuleCall struct { - s *Service - securityPolicy string - securitypolicyrule *SecurityPolicyRule - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// PatchRule: Patches a rule at the specified priority. -func (r *OrganizationSecurityPoliciesService) PatchRule(securityPolicy string, securitypolicyrule *SecurityPolicyRule) *OrganizationSecurityPoliciesPatchRuleCall { - c := &OrganizationSecurityPoliciesPatchRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.securityPolicy = securityPolicy - c.securitypolicyrule = securitypolicyrule - return c -} +// method id "compute.regionBackendServices.getHealth": -// Priority sets the optional parameter "priority": The priority of the -// rule to patch. -func (c *OrganizationSecurityPoliciesPatchRuleCall) Priority(priority int64) *OrganizationSecurityPoliciesPatchRuleCall { - c.urlParams_.Set("priority", fmt.Sprint(priority)) - return c +type RegionBackendServicesGetHealthCall struct { + s *Service + project string + region string + backendService string + resourcegroupreference *ResourceGroupReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *OrganizationSecurityPoliciesPatchRuleCall) RequestId(requestId string) *OrganizationSecurityPoliciesPatchRuleCall { - c.urlParams_.Set("requestId", requestId) +// GetHealth: Gets the most recent health check results for this +// regional BackendService. +func (r *RegionBackendServicesService) GetHealth(project string, region string, backendService string, resourcegroupreference *ResourceGroupReference) *RegionBackendServicesGetHealthCall { + c := &RegionBackendServicesGetHealthCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.backendService = backendService + c.resourcegroupreference = resourcegroupreference return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *OrganizationSecurityPoliciesPatchRuleCall) Fields(s ...googleapi.Field) *OrganizationSecurityPoliciesPatchRuleCall { +func (c *RegionBackendServicesGetHealthCall) Fields(s ...googleapi.Field) *RegionBackendServicesGetHealthCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -98190,35 +110558,35 @@ func (c *OrganizationSecurityPoliciesPatchRuleCall) Fields(s ...googleapi.Field) // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *OrganizationSecurityPoliciesPatchRuleCall) Context(ctx context.Context) *OrganizationSecurityPoliciesPatchRuleCall { +func (c *RegionBackendServicesGetHealthCall) Context(ctx context.Context) *RegionBackendServicesGetHealthCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *OrganizationSecurityPoliciesPatchRuleCall) Header() http.Header { +func (c *RegionBackendServicesGetHealthCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *OrganizationSecurityPoliciesPatchRuleCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionBackendServicesGetHealthCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicyrule) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.resourcegroupreference) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/securityPolicies/{securityPolicy}/patchRule") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}/getHealth") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -98226,19 +110594,21 @@ func (c *OrganizationSecurityPoliciesPatchRuleCall) doRequest(alt string) (*http } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "securityPolicy": c.securityPolicy, + "project": c.project, + "region": c.region, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.organizationSecurityPolicies.patchRule" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *OrganizationSecurityPoliciesPatchRuleCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regionBackendServices.getHealth" call. +// Exactly one of *BackendServiceGroupHealth or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *BackendServiceGroupHealth.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionBackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (*BackendServiceGroupHealth, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -98257,7 +110627,7 @@ func (c *OrganizationSecurityPoliciesPatchRuleCall) Do(opts ...googleapi.CallOpt if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &BackendServiceGroupHealth{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -98269,69 +110639,74 @@ func (c *OrganizationSecurityPoliciesPatchRuleCall) Do(opts ...googleapi.CallOpt } return ret, nil // { - // "description": "Patches a rule at the specified priority.", + // "description": "Gets the most recent health check results for this regional BackendService.", // "httpMethod": "POST", - // "id": "compute.organizationSecurityPolicies.patchRule", + // "id": "compute.regionBackendServices.getHealth", // "parameterOrder": [ - // "securityPolicy" + // "project", + // "region", + // "backendService" // ], // "parameters": { - // "priority": { - // "description": "The priority of the rule to patch.", - // "format": "int32", - // "location": "query", - // "type": "integer" + // "backendService": { + // "description": "Name of the BackendService resource for which to get health.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "project": { + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // }, - // "securityPolicy": { - // "description": "Name of the security policy to update.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[0-9]{0,20}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "locations/global/securityPolicies/{securityPolicy}/patchRule", + // "path": "{project}/regions/{region}/backendServices/{backendService}/getHealth", // "request": { - // "$ref": "SecurityPolicyRule" + // "$ref": "ResourceGroupReference" // }, // "response": { - // "$ref": "Operation" + // "$ref": "BackendServiceGroupHealth" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.organizationSecurityPolicies.removeAssociation": +// method id "compute.regionBackendServices.insert": -type OrganizationSecurityPoliciesRemoveAssociationCall struct { +type RegionBackendServicesInsertCall struct { s *Service - securityPolicy string + project string + region string + backendservice *BackendService urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// RemoveAssociation: Removes an association for the specified security -// policy. -func (r *OrganizationSecurityPoliciesService) RemoveAssociation(securityPolicy string) *OrganizationSecurityPoliciesRemoveAssociationCall { - c := &OrganizationSecurityPoliciesRemoveAssociationCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.securityPolicy = securityPolicy - return c -} - -// Name sets the optional parameter "name": Name for the attachment that -// will be removed. -func (c *OrganizationSecurityPoliciesRemoveAssociationCall) Name(name string) *OrganizationSecurityPoliciesRemoveAssociationCall { - c.urlParams_.Set("name", name) +// Insert: Creates a regional BackendService resource in the specified +// project using the data included in the request. There are several +// restrictions and guidelines to keep in mind when creating a regional +// backend service. Read Restrictions and Guidelines for more +// information. +func (r *RegionBackendServicesService) Insert(project string, region string, backendservice *BackendService) *RegionBackendServicesInsertCall { + c := &RegionBackendServicesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.backendservice = backendservice return c } @@ -98349,7 +110724,7 @@ func (c *OrganizationSecurityPoliciesRemoveAssociationCall) Name(name string) *O // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *OrganizationSecurityPoliciesRemoveAssociationCall) RequestId(requestId string) *OrganizationSecurityPoliciesRemoveAssociationCall { +func (c *RegionBackendServicesInsertCall) RequestId(requestId string) *RegionBackendServicesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -98357,7 +110732,7 @@ func (c *OrganizationSecurityPoliciesRemoveAssociationCall) RequestId(requestId // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *OrganizationSecurityPoliciesRemoveAssociationCall) Fields(s ...googleapi.Field) *OrganizationSecurityPoliciesRemoveAssociationCall { +func (c *RegionBackendServicesInsertCall) Fields(s ...googleapi.Field) *RegionBackendServicesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -98365,30 +110740,35 @@ func (c *OrganizationSecurityPoliciesRemoveAssociationCall) Fields(s ...googleap // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *OrganizationSecurityPoliciesRemoveAssociationCall) Context(ctx context.Context) *OrganizationSecurityPoliciesRemoveAssociationCall { +func (c *RegionBackendServicesInsertCall) Context(ctx context.Context) *RegionBackendServicesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *OrganizationSecurityPoliciesRemoveAssociationCall) Header() http.Header { +func (c *RegionBackendServicesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *OrganizationSecurityPoliciesRemoveAssociationCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionBackendServicesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/securityPolicies/{securityPolicy}/removeAssociation") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -98396,19 +110776,20 @@ func (c *OrganizationSecurityPoliciesRemoveAssociationCall) doRequest(alt string } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "securityPolicy": c.securityPolicy, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.organizationSecurityPolicies.removeAssociation" call. +// Do executes the "compute.regionBackendServices.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *OrganizationSecurityPoliciesRemoveAssociationCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionBackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -98439,32 +110820,38 @@ func (c *OrganizationSecurityPoliciesRemoveAssociationCall) Do(opts ...googleapi } return ret, nil // { - // "description": "Removes an association for the specified security policy.", + // "description": "Creates a regional BackendService resource in the specified project using the data included in the request. There are several restrictions and guidelines to keep in mind when creating a regional backend service. Read Restrictions and Guidelines for more information.", // "httpMethod": "POST", - // "id": "compute.organizationSecurityPolicies.removeAssociation", + // "id": "compute.regionBackendServices.insert", // "parameterOrder": [ - // "securityPolicy" + // "project", + // "region" // ], // "parameters": { - // "name": { - // "description": "Name for the attachment that will be removed.", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "securityPolicy": { - // "description": "Name of the security policy to update.", - // "location": "path", - // "pattern": "[0-9]{0,20}", - // "required": true, - // "type": "string" // } // }, - // "path": "locations/global/securityPolicies/{securityPolicy}/removeAssociation", + // "path": "{project}/regions/{region}/backendServices", + // "request": { + // "$ref": "BackendService" + // }, // "response": { // "$ref": "Operation" // }, @@ -98476,104 +110863,159 @@ func (c *OrganizationSecurityPoliciesRemoveAssociationCall) Do(opts ...googleapi } -// method id "compute.organizationSecurityPolicies.removeRule": +// method id "compute.regionBackendServices.list": -type OrganizationSecurityPoliciesRemoveRuleCall struct { - s *Service - securityPolicy string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionBackendServicesListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// RemoveRule: Deletes a rule at the specified priority. -func (r *OrganizationSecurityPoliciesService) RemoveRule(securityPolicy string) *OrganizationSecurityPoliciesRemoveRuleCall { - c := &OrganizationSecurityPoliciesRemoveRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.securityPolicy = securityPolicy +// List: Retrieves the list of regional BackendService resources +// available to the specified project in the given region. +func (r *RegionBackendServicesService) List(project string, region string) *RegionBackendServicesListCall { + c := &RegionBackendServicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region return c } -// Priority sets the optional parameter "priority": The priority of the -// rule to remove from the security policy. -func (c *OrganizationSecurityPoliciesRemoveRuleCall) Priority(priority int64) *OrganizationSecurityPoliciesRemoveRuleCall { - c.urlParams_.Set("priority", fmt.Sprint(priority)) +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *RegionBackendServicesListCall) Filter(filter string) *RegionBackendServicesListCall { + c.urlParams_.Set("filter", filter) return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *RegionBackendServicesListCall) MaxResults(maxResults int64) *RegionBackendServicesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *OrganizationSecurityPoliciesRemoveRuleCall) RequestId(requestId string) *OrganizationSecurityPoliciesRemoveRuleCall { - c.urlParams_.Set("requestId", requestId) +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *RegionBackendServicesListCall) OrderBy(orderBy string) *RegionBackendServicesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *RegionBackendServicesListCall) PageToken(pageToken string) *RegionBackendServicesListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *OrganizationSecurityPoliciesRemoveRuleCall) Fields(s ...googleapi.Field) *OrganizationSecurityPoliciesRemoveRuleCall { +func (c *RegionBackendServicesListCall) Fields(s ...googleapi.Field) *RegionBackendServicesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionBackendServicesListCall) IfNoneMatch(entityTag string) *RegionBackendServicesListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *OrganizationSecurityPoliciesRemoveRuleCall) Context(ctx context.Context) *OrganizationSecurityPoliciesRemoveRuleCall { +func (c *RegionBackendServicesListCall) Context(ctx context.Context) *RegionBackendServicesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *OrganizationSecurityPoliciesRemoveRuleCall) Header() http.Header { +func (c *RegionBackendServicesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *OrganizationSecurityPoliciesRemoveRuleCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionBackendServicesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "locations/global/securityPolicies/{securityPolicy}/removeRule") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "securityPolicy": c.securityPolicy, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.organizationSecurityPolicies.removeRule" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *OrganizationSecurityPoliciesRemoveRuleCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regionBackendServices.list" call. +// Exactly one of *BackendServiceList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *BackendServiceList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionBackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServiceList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -98592,7 +111034,7 @@ func (c *OrganizationSecurityPoliciesRemoveRuleCall) Do(opts ...googleapi.CallOp if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &BackendServiceList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -98604,58 +111046,111 @@ func (c *OrganizationSecurityPoliciesRemoveRuleCall) Do(opts ...googleapi.CallOp } return ret, nil // { - // "description": "Deletes a rule at the specified priority.", - // "httpMethod": "POST", - // "id": "compute.organizationSecurityPolicies.removeRule", + // "description": "Retrieves the list of regional BackendService resources available to the specified project in the given region.", + // "httpMethod": "GET", + // "id": "compute.regionBackendServices.list", // "parameterOrder": [ - // "securityPolicy" + // "project", + // "region" // ], // "parameters": { - // "priority": { - // "description": "The priority of the rule to remove from the security policy.", - // "format": "int32", + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", // "location": "query", + // "minimum": "0", // "type": "integer" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", // "location": "query", // "type": "string" // }, - // "securityPolicy": { - // "description": "Name of the security policy to update.", + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[0-9]{0,20}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "locations/global/securityPolicies/{securityPolicy}/removeRule", + // "path": "{project}/regions/{region}/backendServices", // "response": { - // "$ref": "Operation" + // "$ref": "BackendServiceList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.projects.disableXpnHost": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionBackendServicesListCall) Pages(ctx context.Context, f func(*BackendServiceList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type ProjectsDisableXpnHostCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.regionBackendServices.patch": + +type RegionBackendServicesPatchCall struct { + s *Service + project string + region string + backendService string + backendservice *BackendService + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// DisableXpnHost: Disable this project as a shared VPC host project. -func (r *ProjectsService) DisableXpnHost(project string) *ProjectsDisableXpnHostCall { - c := &ProjectsDisableXpnHostCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates the specified regional BackendService resource with +// the data included in the request. There are several restrictions and +// guidelines to keep in mind when updating a backend service. Read +// Restrictions and Guidelines for more information. This method +// supports PATCH semantics and uses the JSON merge patch format and +// processing rules. +func (r *RegionBackendServicesService) Patch(project string, region string, backendService string, backendservice *BackendService) *RegionBackendServicesPatchCall { + c := &RegionBackendServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.region = region + c.backendService = backendService + c.backendservice = backendservice return c } @@ -98673,7 +111168,7 @@ func (r *ProjectsService) DisableXpnHost(project string) *ProjectsDisableXpnHost // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ProjectsDisableXpnHostCall) RequestId(requestId string) *ProjectsDisableXpnHostCall { +func (c *RegionBackendServicesPatchCall) RequestId(requestId string) *RegionBackendServicesPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -98681,7 +111176,7 @@ func (c *ProjectsDisableXpnHostCall) RequestId(requestId string) *ProjectsDisabl // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsDisableXpnHostCall) Fields(s ...googleapi.Field) *ProjectsDisableXpnHostCall { +func (c *RegionBackendServicesPatchCall) Fields(s ...googleapi.Field) *RegionBackendServicesPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -98689,50 +111184,57 @@ func (c *ProjectsDisableXpnHostCall) Fields(s ...googleapi.Field) *ProjectsDisab // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsDisableXpnHostCall) Context(ctx context.Context) *ProjectsDisableXpnHostCall { +func (c *RegionBackendServicesPatchCall) Context(ctx context.Context) *RegionBackendServicesPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsDisableXpnHostCall) Header() http.Header { +func (c *RegionBackendServicesPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsDisableXpnHostCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionBackendServicesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/disableXpnHost") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "region": c.region, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.disableXpnHost" call. +// Do executes the "compute.regionBackendServices.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ProjectsDisableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionBackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -98763,13 +111265,22 @@ func (c *ProjectsDisableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Disable this project as a shared VPC host project.", - // "httpMethod": "POST", - // "id": "compute.projects.disableXpnHost", + // "description": "Updates the specified regional BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.regionBackendServices.patch", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "backendService" // ], // "parameters": { + // "backendService": { + // "description": "Name of the BackendService resource to patch.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -98777,13 +111288,23 @@ func (c *ProjectsDisableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operatio // "required": true, // "type": "string" // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "{project}/disableXpnHost", + // "path": "{project}/regions/{region}/backendServices/{backendService}", + // "request": { + // "$ref": "BackendService" + // }, // "response": { // "$ref": "Operation" // }, @@ -98795,49 +111316,34 @@ func (c *ProjectsDisableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operatio } -// method id "compute.projects.disableXpnResource": +// method id "compute.regionBackendServices.testIamPermissions": -type ProjectsDisableXpnResourceCall struct { - s *Service - project string - projectsdisablexpnresourcerequest *ProjectsDisableXpnResourceRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionBackendServicesTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// DisableXpnResource: Disable a serivce resource (a.k.a service -// project) associated with this host project. -func (r *ProjectsService) DisableXpnResource(project string, projectsdisablexpnresourcerequest *ProjectsDisableXpnResourceRequest) *ProjectsDisableXpnResourceCall { - c := &ProjectsDisableXpnResourceCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *RegionBackendServicesService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionBackendServicesTestIamPermissionsCall { + c := &RegionBackendServicesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.projectsdisablexpnresourcerequest = projectsdisablexpnresourcerequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *ProjectsDisableXpnResourceCall) RequestId(requestId string) *ProjectsDisableXpnResourceCall { - c.urlParams_.Set("requestId", requestId) + c.region = region + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsDisableXpnResourceCall) Fields(s ...googleapi.Field) *ProjectsDisableXpnResourceCall { +func (c *RegionBackendServicesTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionBackendServicesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -98845,35 +111351,35 @@ func (c *ProjectsDisableXpnResourceCall) Fields(s ...googleapi.Field) *ProjectsD // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsDisableXpnResourceCall) Context(ctx context.Context) *ProjectsDisableXpnResourceCall { +func (c *RegionBackendServicesTestIamPermissionsCall) Context(ctx context.Context) *RegionBackendServicesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsDisableXpnResourceCall) Header() http.Header { +func (c *RegionBackendServicesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsDisableXpnResourceCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionBackendServicesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectsdisablexpnresourcerequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/disableXpnResource") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -98881,19 +111387,21 @@ func (c *ProjectsDisableXpnResourceCall) doRequest(alt string) (*http.Response, } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.disableXpnResource" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsDisableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regionBackendServices.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionBackendServicesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -98912,7 +111420,7 @@ func (c *ProjectsDisableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Oper if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -98924,11 +111432,13 @@ func (c *ProjectsDisableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Disable a serivce resource (a.k.a service project) associated with this host project.", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.projects.disableXpnResource", + // "id": "compute.regionBackendServices.testIamPermissions", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "resource" // ], // "parameters": { // "project": { @@ -98938,41 +111448,60 @@ func (c *ProjectsDisableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Oper // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/disableXpnResource", + // "path": "{project}/regions/{region}/backendServices/{resource}/testIamPermissions", // "request": { - // "$ref": "ProjectsDisableXpnResourceRequest" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.projects.enableXpnHost": +// method id "compute.regionBackendServices.update": -type ProjectsEnableXpnHostCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionBackendServicesUpdateCall struct { + s *Service + project string + region string + backendService string + backendservice *BackendService + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// EnableXpnHost: Enable this project as a shared VPC host project. -func (r *ProjectsService) EnableXpnHost(project string) *ProjectsEnableXpnHostCall { - c := &ProjectsEnableXpnHostCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates the specified regional BackendService resource with +// the data included in the request. There are several restrictions and +// guidelines to keep in mind when updating a backend service. Read +// Restrictions and Guidelines for more information. +func (r *RegionBackendServicesService) Update(project string, region string, backendService string, backendservice *BackendService) *RegionBackendServicesUpdateCall { + c := &RegionBackendServicesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.region = region + c.backendService = backendService + c.backendservice = backendservice return c } @@ -98990,7 +111519,7 @@ func (r *ProjectsService) EnableXpnHost(project string) *ProjectsEnableXpnHostCa // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ProjectsEnableXpnHostCall) RequestId(requestId string) *ProjectsEnableXpnHostCall { +func (c *RegionBackendServicesUpdateCall) RequestId(requestId string) *RegionBackendServicesUpdateCall { c.urlParams_.Set("requestId", requestId) return c } @@ -98998,7 +111527,7 @@ func (c *ProjectsEnableXpnHostCall) RequestId(requestId string) *ProjectsEnableX // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsEnableXpnHostCall) Fields(s ...googleapi.Field) *ProjectsEnableXpnHostCall { +func (c *RegionBackendServicesUpdateCall) Fields(s ...googleapi.Field) *RegionBackendServicesUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -99006,50 +111535,57 @@ func (c *ProjectsEnableXpnHostCall) Fields(s ...googleapi.Field) *ProjectsEnable // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsEnableXpnHostCall) Context(ctx context.Context) *ProjectsEnableXpnHostCall { +func (c *RegionBackendServicesUpdateCall) Context(ctx context.Context) *RegionBackendServicesUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsEnableXpnHostCall) Header() http.Header { +func (c *RegionBackendServicesUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsEnableXpnHostCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionBackendServicesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/enableXpnHost") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "region": c.region, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.enableXpnHost" call. +// Do executes the "compute.regionBackendServices.update" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ProjectsEnableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionBackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -99080,13 +111616,22 @@ func (c *ProjectsEnableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Enable this project as a shared VPC host project.", - // "httpMethod": "POST", - // "id": "compute.projects.enableXpnHost", + // "description": "Updates the specified regional BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information.", + // "httpMethod": "PUT", + // "id": "compute.regionBackendServices.update", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "backendService" // ], // "parameters": { + // "backendService": { + // "description": "Name of the BackendService resource to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -99094,13 +111639,23 @@ func (c *ProjectsEnableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operation // "required": true, // "type": "string" // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "{project}/enableXpnHost", + // "path": "{project}/regions/{region}/backendServices/{backendService}", + // "request": { + // "$ref": "BackendService" + // }, // "response": { // "$ref": "Operation" // }, @@ -99112,88 +111667,137 @@ func (c *ProjectsEnableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.projects.enableXpnResource": +// method id "compute.regionCommitments.aggregatedList": -type ProjectsEnableXpnResourceCall struct { - s *Service - project string - projectsenablexpnresourcerequest *ProjectsEnableXpnResourceRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionCommitmentsAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// EnableXpnResource: Enable service resource (a.k.a service project) -// for a host project, so that subnets in the host project can be used -// by instances in the service project. -func (r *ProjectsService) EnableXpnResource(project string, projectsenablexpnresourcerequest *ProjectsEnableXpnResourceRequest) *ProjectsEnableXpnResourceCall { - c := &ProjectsEnableXpnResourceCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of commitments. +func (r *RegionCommitmentsService) AggregatedList(project string) *RegionCommitmentsAggregatedListCall { + c := &RegionCommitmentsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.projectsenablexpnresourcerequest = projectsenablexpnresourcerequest return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *ProjectsEnableXpnResourceCall) RequestId(requestId string) *ProjectsEnableXpnResourceCall { - c.urlParams_.Set("requestId", requestId) +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *RegionCommitmentsAggregatedListCall) Filter(filter string) *RegionCommitmentsAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *RegionCommitmentsAggregatedListCall) MaxResults(maxResults int64) *RegionCommitmentsAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *RegionCommitmentsAggregatedListCall) OrderBy(orderBy string) *RegionCommitmentsAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *RegionCommitmentsAggregatedListCall) PageToken(pageToken string) *RegionCommitmentsAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsEnableXpnResourceCall) Fields(s ...googleapi.Field) *ProjectsEnableXpnResourceCall { +func (c *RegionCommitmentsAggregatedListCall) Fields(s ...googleapi.Field) *RegionCommitmentsAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionCommitmentsAggregatedListCall) IfNoneMatch(entityTag string) *RegionCommitmentsAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsEnableXpnResourceCall) Context(ctx context.Context) *ProjectsEnableXpnResourceCall { +func (c *RegionCommitmentsAggregatedListCall) Context(ctx context.Context) *RegionCommitmentsAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsEnableXpnResourceCall) Header() http.Header { +func (c *RegionCommitmentsAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsEnableXpnResourceCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionCommitmentsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectsenablexpnresourcerequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/enableXpnResource") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/commitments") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } @@ -99204,14 +111808,14 @@ func (c *ProjectsEnableXpnResourceCall) doRequest(alt string) (*http.Response, e return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.enableXpnResource" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsEnableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regionCommitments.aggregatedList" call. +// Exactly one of *CommitmentAggregatedList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *CommitmentAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionCommitmentsAggregatedListCall) Do(opts ...googleapi.CallOption) (*CommitmentAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -99230,7 +111834,7 @@ func (c *ProjectsEnableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Opera if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &CommitmentAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -99242,64 +111846,105 @@ func (c *ProjectsEnableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Opera } return ret, nil // { - // "description": "Enable service resource (a.k.a service project) for a host project, so that subnets in the host project can be used by instances in the service project.", - // "httpMethod": "POST", - // "id": "compute.projects.enableXpnResource", + // "description": "Retrieves an aggregated list of commitments.", + // "httpMethod": "GET", + // "id": "compute.regionCommitments.aggregatedList", // "parameterOrder": [ // "project" // ], // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/enableXpnResource", - // "request": { - // "$ref": "ProjectsEnableXpnResourceRequest" - // }, + // "path": "{project}/aggregated/commitments", // "response": { - // "$ref": "Operation" + // "$ref": "CommitmentAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.projects.get": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionCommitmentsAggregatedListCall) Pages(ctx context.Context, f func(*CommitmentAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type ProjectsGetCall struct { +// method id "compute.regionCommitments.get": + +type RegionCommitmentsGetCall struct { s *Service project string + region string + commitment string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Returns the specified Project resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/projects/get -func (r *ProjectsService) Get(project string) *ProjectsGetCall { - c := &ProjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified commitment resource. Gets a list of +// available commitments by making a list() request. +func (r *RegionCommitmentsService) Get(project string, region string, commitment string) *RegionCommitmentsGetCall { + c := &RegionCommitmentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.region = region + c.commitment = commitment return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsGetCall) Fields(s ...googleapi.Field) *ProjectsGetCall { +func (c *RegionCommitmentsGetCall) Fields(s ...googleapi.Field) *RegionCommitmentsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -99309,7 +111954,7 @@ func (c *ProjectsGetCall) Fields(s ...googleapi.Field) *ProjectsGetCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ProjectsGetCall) IfNoneMatch(entityTag string) *ProjectsGetCall { +func (c *RegionCommitmentsGetCall) IfNoneMatch(entityTag string) *RegionCommitmentsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -99317,21 +111962,21 @@ func (c *ProjectsGetCall) IfNoneMatch(entityTag string) *ProjectsGetCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsGetCall) Context(ctx context.Context) *ProjectsGetCall { +func (c *RegionCommitmentsGetCall) Context(ctx context.Context) *RegionCommitmentsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsGetCall) Header() http.Header { +func (c *RegionCommitmentsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionCommitmentsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -99343,7 +111988,7 @@ func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/commitments/{commitment}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -99351,19 +111996,21 @@ func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "region": c.region, + "commitment": c.commitment, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.get" call. -// Exactly one of *Project or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Project.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { +// Do executes the "compute.regionCommitments.get" call. +// Exactly one of *Commitment or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Commitment.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionCommitmentsGetCall) Do(opts ...googleapi.CallOption) (*Commitment, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -99382,7 +112029,7 @@ func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Project{ + ret := &Commitment{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -99394,24 +112041,40 @@ func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { } return ret, nil // { - // "description": "Returns the specified Project resource.", + // "description": "Returns the specified commitment resource. Gets a list of available commitments by making a list() request.", // "httpMethod": "GET", - // "id": "compute.projects.get", + // "id": "compute.regionCommitments.get", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "commitment" // ], // "parameters": { + // "commitment": { + // "description": "Name of the commitment to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}", + // "path": "{project}/regions/{region}/commitments/{commitment}", // "response": { - // "$ref": "Project" + // "$ref": "Commitment" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -99422,93 +112085,108 @@ func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { } -// method id "compute.projects.getXpnHost": +// method id "compute.regionCommitments.insert": -type ProjectsGetXpnHostCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionCommitmentsInsertCall struct { + s *Service + project string + region string + commitment *Commitment + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetXpnHost: Gets the shared VPC host project that this project links -// to. May be empty if no link exists. -func (r *ProjectsService) GetXpnHost(project string) *ProjectsGetXpnHostCall { - c := &ProjectsGetXpnHostCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a commitment in the specified project using the data +// included in the request. +func (r *RegionCommitmentsService) Insert(project string, region string, commitment *Commitment) *RegionCommitmentsInsertCall { + c := &RegionCommitmentsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.region = region + c.commitment = commitment + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionCommitmentsInsertCall) RequestId(requestId string) *RegionCommitmentsInsertCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsGetXpnHostCall) Fields(s ...googleapi.Field) *ProjectsGetXpnHostCall { +func (c *RegionCommitmentsInsertCall) Fields(s ...googleapi.Field) *RegionCommitmentsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsGetXpnHostCall) IfNoneMatch(entityTag string) *ProjectsGetXpnHostCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsGetXpnHostCall) Context(ctx context.Context) *ProjectsGetXpnHostCall { +func (c *RegionCommitmentsInsertCall) Context(ctx context.Context) *RegionCommitmentsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsGetXpnHostCall) Header() http.Header { +func (c *RegionCommitmentsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsGetXpnHostCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionCommitmentsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.commitment) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/getXpnHost") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/commitments") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.getXpnHost" call. -// Exactly one of *Project or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Project.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsGetXpnHostCall) Do(opts ...googleapi.CallOption) (*Project, error) { +// Do executes the "compute.regionCommitments.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionCommitmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -99527,7 +112205,7 @@ func (c *ProjectsGetXpnHostCall) Do(opts ...googleapi.CallOption) (*Project, err if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Project{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -99539,11 +112217,12 @@ func (c *ProjectsGetXpnHostCall) Do(opts ...googleapi.CallOption) (*Project, err } return ret, nil // { - // "description": "Gets the shared VPC host project that this project links to. May be empty if no link exists.", - // "httpMethod": "GET", - // "id": "compute.projects.getXpnHost", + // "description": "Creates a commitment in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.regionCommitments.insert", // "parameterOrder": [ - // "project" + // "project", + // "region" // ], // "parameters": { // "project": { @@ -99552,11 +112231,26 @@ func (c *ProjectsGetXpnHostCall) Do(opts ...googleapi.CallOption) (*Project, err // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/getXpnHost", + // "path": "{project}/regions/{region}/commitments", + // "request": { + // "$ref": "Commitment" + // }, // "response": { - // "$ref": "Project" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -99566,22 +112260,24 @@ func (c *ProjectsGetXpnHostCall) Do(opts ...googleapi.CallOption) (*Project, err } -// method id "compute.projects.getXpnResources": +// method id "compute.regionCommitments.list": -type ProjectsGetXpnResourcesCall struct { +type RegionCommitmentsListCall struct { s *Service project string + region string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// GetXpnResources: Gets service resources (a.k.a service project) -// associated with this host project. -func (r *ProjectsService) GetXpnResources(project string) *ProjectsGetXpnResourcesCall { - c := &ProjectsGetXpnResourcesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of commitments contained within the specified +// region. +func (r *RegionCommitmentsService) List(project string, region string) *RegionCommitmentsListCall { + c := &RegionCommitmentsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.region = region return c } @@ -99607,7 +112303,7 @@ func (r *ProjectsService) GetXpnResources(project string) *ProjectsGetXpnResourc // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *ProjectsGetXpnResourcesCall) Filter(filter string) *ProjectsGetXpnResourcesCall { +func (c *RegionCommitmentsListCall) Filter(filter string) *RegionCommitmentsListCall { c.urlParams_.Set("filter", filter) return c } @@ -99618,12 +112314,12 @@ func (c *ProjectsGetXpnResourcesCall) Filter(filter string) *ProjectsGetXpnResou // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *ProjectsGetXpnResourcesCall) MaxResults(maxResults int64) *ProjectsGetXpnResourcesCall { +func (c *RegionCommitmentsListCall) MaxResults(maxResults int64) *RegionCommitmentsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } -// OrderBy sets the optional parameter "order_by": Sorts list results by +// OrderBy sets the optional parameter "orderBy": Sorts list results by // a certain order. By default, results are returned in alphanumerical // order based on the resource name. // @@ -99635,15 +112331,15 @@ func (c *ProjectsGetXpnResourcesCall) MaxResults(maxResults int64) *ProjectsGetX // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *ProjectsGetXpnResourcesCall) OrderBy(orderBy string) *ProjectsGetXpnResourcesCall { - c.urlParams_.Set("order_by", orderBy) +func (c *RegionCommitmentsListCall) OrderBy(orderBy string) *RegionCommitmentsListCall { + c.urlParams_.Set("orderBy", orderBy) return c } // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *ProjectsGetXpnResourcesCall) PageToken(pageToken string) *ProjectsGetXpnResourcesCall { +func (c *RegionCommitmentsListCall) PageToken(pageToken string) *RegionCommitmentsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -99651,7 +112347,7 @@ func (c *ProjectsGetXpnResourcesCall) PageToken(pageToken string) *ProjectsGetXp // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsGetXpnResourcesCall) Fields(s ...googleapi.Field) *ProjectsGetXpnResourcesCall { +func (c *RegionCommitmentsListCall) Fields(s ...googleapi.Field) *RegionCommitmentsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -99661,7 +112357,7 @@ func (c *ProjectsGetXpnResourcesCall) Fields(s ...googleapi.Field) *ProjectsGetX // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ProjectsGetXpnResourcesCall) IfNoneMatch(entityTag string) *ProjectsGetXpnResourcesCall { +func (c *RegionCommitmentsListCall) IfNoneMatch(entityTag string) *RegionCommitmentsListCall { c.ifNoneMatch_ = entityTag return c } @@ -99669,21 +112365,21 @@ func (c *ProjectsGetXpnResourcesCall) IfNoneMatch(entityTag string) *ProjectsGet // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsGetXpnResourcesCall) Context(ctx context.Context) *ProjectsGetXpnResourcesCall { +func (c *RegionCommitmentsListCall) Context(ctx context.Context) *RegionCommitmentsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsGetXpnResourcesCall) Header() http.Header { +func (c *RegionCommitmentsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsGetXpnResourcesCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionCommitmentsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -99695,7 +112391,7 @@ func (c *ProjectsGetXpnResourcesCall) doRequest(alt string) (*http.Response, err var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/getXpnResources") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/commitments") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -99704,18 +112400,19 @@ func (c *ProjectsGetXpnResourcesCall) doRequest(alt string) (*http.Response, err req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.getXpnResources" call. -// Exactly one of *ProjectsGetXpnResources or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ProjectsGetXpnResources.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.regionCommitments.list" call. +// Exactly one of *CommitmentList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *CommitmentList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *ProjectsGetXpnResourcesCall) Do(opts ...googleapi.CallOption) (*ProjectsGetXpnResources, error) { +func (c *RegionCommitmentsListCall) Do(opts ...googleapi.CallOption) (*CommitmentList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -99734,7 +112431,7 @@ func (c *ProjectsGetXpnResourcesCall) Do(opts ...googleapi.CallOption) (*Project if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ProjectsGetXpnResources{ + ret := &CommitmentList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -99746,11 +112443,12 @@ func (c *ProjectsGetXpnResourcesCall) Do(opts ...googleapi.CallOption) (*Project } return ret, nil // { - // "description": "Gets service resources (a.k.a service project) associated with this host project.", + // "description": "Retrieves a list of commitments contained within the specified region.", // "httpMethod": "GET", - // "id": "compute.projects.getXpnResources", + // "id": "compute.regionCommitments.list", // "parameterOrder": [ - // "project" + // "project", + // "region" // ], // "parameters": { // "filter": { @@ -99766,7 +112464,7 @@ func (c *ProjectsGetXpnResourcesCall) Do(opts ...googleapi.CallOption) (*Project // "minimum": "0", // "type": "integer" // }, - // "order_by": { + // "orderBy": { // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", // "location": "query", // "type": "string" @@ -99782,15 +112480,23 @@ func (c *ProjectsGetXpnResourcesCall) Do(opts ...googleapi.CallOption) (*Project // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/getXpnResources", + // "path": "{project}/regions/{region}/commitments", // "response": { - // "$ref": "ProjectsGetXpnResources" + // "$ref": "CommitmentList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } @@ -99799,7 +112505,7 @@ func (c *ProjectsGetXpnResourcesCall) Do(opts ...googleapi.CallOption) (*Project // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *ProjectsGetXpnResourcesCall) Pages(ctx context.Context, f func(*ProjectsGetXpnResources) error) error { +func (c *RegionCommitmentsListCall) Pages(ctx context.Context, f func(*CommitmentList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -99817,93 +112523,34 @@ func (c *ProjectsGetXpnResourcesCall) Pages(ctx context.Context, f func(*Project } } -// method id "compute.projects.listXpnHosts": +// method id "compute.regionCommitments.testIamPermissions": -type ProjectsListXpnHostsCall struct { - s *Service - project string - projectslistxpnhostsrequest *ProjectsListXpnHostsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionCommitmentsTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// ListXpnHosts: Lists all shared VPC host projects visible to the user -// in an organization. -func (r *ProjectsService) ListXpnHosts(project string, projectslistxpnhostsrequest *ProjectsListXpnHostsRequest) *ProjectsListXpnHostsCall { - c := &ProjectsListXpnHostsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *RegionCommitmentsService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionCommitmentsTestIamPermissionsCall { + c := &RegionCommitmentsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.projectslistxpnhostsrequest = projectslistxpnhostsrequest - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *ProjectsListXpnHostsCall) Filter(filter string) *ProjectsListXpnHostsCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *ProjectsListXpnHostsCall) MaxResults(maxResults int64) *ProjectsListXpnHostsCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "order_by": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *ProjectsListXpnHostsCall) OrderBy(orderBy string) *ProjectsListXpnHostsCall { - c.urlParams_.Set("order_by", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *ProjectsListXpnHostsCall) PageToken(pageToken string) *ProjectsListXpnHostsCall { - c.urlParams_.Set("pageToken", pageToken) + c.region = region + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsListXpnHostsCall) Fields(s ...googleapi.Field) *ProjectsListXpnHostsCall { +func (c *RegionCommitmentsTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionCommitmentsTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -99911,35 +112558,35 @@ func (c *ProjectsListXpnHostsCall) Fields(s ...googleapi.Field) *ProjectsListXpn // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsListXpnHostsCall) Context(ctx context.Context) *ProjectsListXpnHostsCall { +func (c *RegionCommitmentsTestIamPermissionsCall) Context(ctx context.Context) *RegionCommitmentsTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsListXpnHostsCall) Header() http.Header { +func (c *RegionCommitmentsTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsListXpnHostsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionCommitmentsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectslistxpnhostsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/listXpnHosts") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/commitments/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -99947,19 +112594,21 @@ func (c *ProjectsListXpnHostsCall) doRequest(alt string) (*http.Response, error) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.listXpnHosts" call. -// Exactly one of *XpnHostList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *XpnHostList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsListXpnHostsCall) Do(opts ...googleapi.CallOption) (*XpnHostList, error) { +// Do executes the "compute.regionCommitments.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionCommitmentsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -99978,7 +112627,7 @@ func (c *ProjectsListXpnHostsCall) Do(opts ...googleapi.CallOption) (*XpnHostLis if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &XpnHostList{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -99990,96 +112639,74 @@ func (c *ProjectsListXpnHostsCall) Do(opts ...googleapi.CallOption) (*XpnHostLis } return ret, nil // { - // "description": "Lists all shared VPC host projects visible to the user in an organization.", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.projects.listXpnHosts", + // "id": "compute.regionCommitments.testIamPermissions", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "resource" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "order_by": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/listXpnHosts", + // "path": "{project}/regions/{region}/commitments/{resource}/testIamPermissions", // "request": { - // "$ref": "ProjectsListXpnHostsRequest" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "XpnHostList" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ProjectsListXpnHostsCall) Pages(ctx context.Context, f func(*XpnHostList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.projects.moveDisk": +// method id "compute.regionCommitments.updateReservations": -type ProjectsMoveDiskCall struct { - s *Service - project string - diskmoverequest *DiskMoveRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionCommitmentsUpdateReservationsCall struct { + s *Service + project string + region string + commitment string + regioncommitmentsupdatereservationsrequest *RegionCommitmentsUpdateReservationsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// MoveDisk: Moves a persistent disk from one zone to another. -func (r *ProjectsService) MoveDisk(project string, diskmoverequest *DiskMoveRequest) *ProjectsMoveDiskCall { - c := &ProjectsMoveDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// UpdateReservations: Update the shape of reservations for GPUS/Local +// SSDs of reservations within the commitments. +func (r *RegionCommitmentsService) UpdateReservations(project string, region string, commitment string, regioncommitmentsupdatereservationsrequest *RegionCommitmentsUpdateReservationsRequest) *RegionCommitmentsUpdateReservationsCall { + c := &RegionCommitmentsUpdateReservationsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.diskmoverequest = diskmoverequest + c.region = region + c.commitment = commitment + c.regioncommitmentsupdatereservationsrequest = regioncommitmentsupdatereservationsrequest return c } @@ -100097,7 +112724,7 @@ func (r *ProjectsService) MoveDisk(project string, diskmoverequest *DiskMoveRequ // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ProjectsMoveDiskCall) RequestId(requestId string) *ProjectsMoveDiskCall { +func (c *RegionCommitmentsUpdateReservationsCall) RequestId(requestId string) *RegionCommitmentsUpdateReservationsCall { c.urlParams_.Set("requestId", requestId) return c } @@ -100105,7 +112732,7 @@ func (c *ProjectsMoveDiskCall) RequestId(requestId string) *ProjectsMoveDiskCall // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsMoveDiskCall) Fields(s ...googleapi.Field) *ProjectsMoveDiskCall { +func (c *RegionCommitmentsUpdateReservationsCall) Fields(s ...googleapi.Field) *RegionCommitmentsUpdateReservationsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -100113,35 +112740,35 @@ func (c *ProjectsMoveDiskCall) Fields(s ...googleapi.Field) *ProjectsMoveDiskCal // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsMoveDiskCall) Context(ctx context.Context) *ProjectsMoveDiskCall { +func (c *RegionCommitmentsUpdateReservationsCall) Context(ctx context.Context) *RegionCommitmentsUpdateReservationsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsMoveDiskCall) Header() http.Header { +func (c *RegionCommitmentsUpdateReservationsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsMoveDiskCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionCommitmentsUpdateReservationsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.diskmoverequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioncommitmentsupdatereservationsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/moveDisk") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/commitments/{commitment}/updateReservations") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -100149,19 +112776,21 @@ func (c *ProjectsMoveDiskCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "region": c.region, + "commitment": c.commitment, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.moveDisk" call. +// Do executes the "compute.regionCommitments.updateReservations" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ProjectsMoveDiskCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionCommitmentsUpdateReservationsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -100192,13 +112821,22 @@ func (c *ProjectsMoveDiskCall) Do(opts ...googleapi.CallOption) (*Operation, err } return ret, nil // { - // "description": "Moves a persistent disk from one zone to another.", + // "description": "Update the shape of reservations for GPUS/Local SSDs of reservations within the commitments.", // "httpMethod": "POST", - // "id": "compute.projects.moveDisk", + // "id": "compute.regionCommitments.updateReservations", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "commitment" // ], // "parameters": { + // "commitment": { + // "description": "Name of the commitment of which the reservation's capacities are being updated.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -100206,15 +112844,22 @@ func (c *ProjectsMoveDiskCall) Do(opts ...googleapi.CallOption) (*Operation, err // "required": true, // "type": "string" // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "{project}/moveDisk", + // "path": "{project}/regions/{region}/commitments/{commitment}/updateReservations", // "request": { - // "$ref": "DiskMoveRequest" + // "$ref": "RegionCommitmentsUpdateReservationsRequest" // }, // "response": { // "$ref": "Operation" @@ -100227,105 +112872,99 @@ func (c *ProjectsMoveDiskCall) Do(opts ...googleapi.CallOption) (*Operation, err } -// method id "compute.projects.moveInstance": +// method id "compute.regionDiskTypes.get": -type ProjectsMoveInstanceCall struct { - s *Service - project string - instancemoverequest *InstanceMoveRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionDiskTypesGetCall struct { + s *Service + project string + region string + diskType string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// MoveInstance: Moves an instance and its attached persistent disks -// from one zone to another. -func (r *ProjectsService) MoveInstance(project string, instancemoverequest *InstanceMoveRequest) *ProjectsMoveInstanceCall { - c := &ProjectsMoveInstanceCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified regional disk type. Gets a list of +// available disk types by making a list() request. +func (r *RegionDiskTypesService) Get(project string, region string, diskType string) *RegionDiskTypesGetCall { + c := &RegionDiskTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.instancemoverequest = instancemoverequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *ProjectsMoveInstanceCall) RequestId(requestId string) *ProjectsMoveInstanceCall { - c.urlParams_.Set("requestId", requestId) + c.region = region + c.diskType = diskType return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsMoveInstanceCall) Fields(s ...googleapi.Field) *ProjectsMoveInstanceCall { +func (c *RegionDiskTypesGetCall) Fields(s ...googleapi.Field) *RegionDiskTypesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionDiskTypesGetCall) IfNoneMatch(entityTag string) *RegionDiskTypesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsMoveInstanceCall) Context(ctx context.Context) *ProjectsMoveInstanceCall { +func (c *RegionDiskTypesGetCall) Context(ctx context.Context) *RegionDiskTypesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsMoveInstanceCall) Header() http.Header { +func (c *RegionDiskTypesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsMoveInstanceCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDiskTypesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancemoverequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/moveInstance") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/diskTypes/{diskType}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "region": c.region, + "diskType": c.diskType, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.moveInstance" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// Do executes the "compute.regionDiskTypes.get" call. +// Exactly one of *DiskType or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *DiskType.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ProjectsMoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionDiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -100344,7 +112983,7 @@ func (c *ProjectsMoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &DiskType{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -100356,13 +112995,22 @@ func (c *ProjectsMoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Moves an instance and its attached persistent disks from one zone to another.", - // "httpMethod": "POST", - // "id": "compute.projects.moveInstance", + // "description": "Returns the specified regional disk type. Gets a list of available disk types by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.regionDiskTypes.get", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "diskType" // ], // "parameters": { + // "diskType": { + // "description": "Name of the disk type to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -100370,127 +113018,180 @@ func (c *ProjectsMoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/moveInstance", - // "request": { - // "$ref": "InstanceMoveRequest" - // }, + // "path": "{project}/regions/{region}/diskTypes/{diskType}", // "response": { - // "$ref": "Operation" + // "$ref": "DiskType" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.projects.setCommonInstanceMetadata": +// method id "compute.regionDiskTypes.list": -type ProjectsSetCommonInstanceMetadataCall struct { - s *Service - project string - metadata *Metadata - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionDiskTypesListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetCommonInstanceMetadata: Sets metadata common to all instances -// within the specified project using the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/projects/setCommonInstanceMetadata -func (r *ProjectsService) SetCommonInstanceMetadata(project string, metadata *Metadata) *ProjectsSetCommonInstanceMetadataCall { - c := &ProjectsSetCommonInstanceMetadataCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of regional disk types available to the +// specified project. +func (r *RegionDiskTypesService) List(project string, region string) *RegionDiskTypesListCall { + c := &RegionDiskTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.metadata = metadata + c.region = region return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *ProjectsSetCommonInstanceMetadataCall) RequestId(requestId string) *ProjectsSetCommonInstanceMetadataCall { - c.urlParams_.Set("requestId", requestId) +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *RegionDiskTypesListCall) Filter(filter string) *RegionDiskTypesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *RegionDiskTypesListCall) MaxResults(maxResults int64) *RegionDiskTypesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *RegionDiskTypesListCall) OrderBy(orderBy string) *RegionDiskTypesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *RegionDiskTypesListCall) PageToken(pageToken string) *RegionDiskTypesListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsSetCommonInstanceMetadataCall) Fields(s ...googleapi.Field) *ProjectsSetCommonInstanceMetadataCall { +func (c *RegionDiskTypesListCall) Fields(s ...googleapi.Field) *RegionDiskTypesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionDiskTypesListCall) IfNoneMatch(entityTag string) *RegionDiskTypesListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsSetCommonInstanceMetadataCall) Context(ctx context.Context) *ProjectsSetCommonInstanceMetadataCall { +func (c *RegionDiskTypesListCall) Context(ctx context.Context) *RegionDiskTypesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsSetCommonInstanceMetadataCall) Header() http.Header { +func (c *RegionDiskTypesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsSetCommonInstanceMetadataCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDiskTypesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.metadata) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/setCommonInstanceMetadata") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/diskTypes") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.setCommonInstanceMetadata" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsSetCommonInstanceMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regionDiskTypes.list" call. +// Exactly one of *RegionDiskTypeList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *RegionDiskTypeList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionDiskTypesListCall) Do(opts ...googleapi.CallOption) (*RegionDiskTypeList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -100509,7 +113210,7 @@ func (c *ProjectsSetCommonInstanceMetadataCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &RegionDiskTypeList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -100521,13 +113222,37 @@ func (c *ProjectsSetCommonInstanceMetadataCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Sets metadata common to all instances within the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.projects.setCommonInstanceMetadata", + // "description": "Retrieves a list of regional disk types available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.regionDiskTypes.list", // "parameterOrder": [ - // "project" + // "project", + // "region" // ], // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -100535,46 +113260,70 @@ func (c *ProjectsSetCommonInstanceMetadataCall) Do(opts ...googleapi.CallOption) // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/setCommonInstanceMetadata", - // "request": { - // "$ref": "Metadata" - // }, + // "path": "{project}/regions/{region}/diskTypes", // "response": { - // "$ref": "Operation" + // "$ref": "RegionDiskTypeList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.projects.setDefaultNetworkTier": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionDiskTypesListCall) Pages(ctx context.Context, f func(*RegionDiskTypeList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type ProjectsSetDefaultNetworkTierCall struct { - s *Service - project string - projectssetdefaultnetworktierrequest *ProjectsSetDefaultNetworkTierRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.regionDisks.addResourcePolicies": + +type RegionDisksAddResourcePoliciesCall struct { + s *Service + project string + region string + disk string + regiondisksaddresourcepoliciesrequest *RegionDisksAddResourcePoliciesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetDefaultNetworkTier: Sets the default network tier of the project. -// The default network tier is used when an -// address/forwardingRule/instance is created without specifying the -// network tier field. -func (r *ProjectsService) SetDefaultNetworkTier(project string, projectssetdefaultnetworktierrequest *ProjectsSetDefaultNetworkTierRequest) *ProjectsSetDefaultNetworkTierCall { - c := &ProjectsSetDefaultNetworkTierCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AddResourcePolicies: Adds existing resource policies to a regional +// disk. You can only add one policy which will be applied to this disk +// for scheduling snapshot creation. +func (r *RegionDisksService) AddResourcePolicies(project string, region string, disk string, regiondisksaddresourcepoliciesrequest *RegionDisksAddResourcePoliciesRequest) *RegionDisksAddResourcePoliciesCall { + c := &RegionDisksAddResourcePoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.projectssetdefaultnetworktierrequest = projectssetdefaultnetworktierrequest + c.region = region + c.disk = disk + c.regiondisksaddresourcepoliciesrequest = regiondisksaddresourcepoliciesrequest return c } @@ -100592,7 +113341,7 @@ func (r *ProjectsService) SetDefaultNetworkTier(project string, projectssetdefau // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ProjectsSetDefaultNetworkTierCall) RequestId(requestId string) *ProjectsSetDefaultNetworkTierCall { +func (c *RegionDisksAddResourcePoliciesCall) RequestId(requestId string) *RegionDisksAddResourcePoliciesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -100600,7 +113349,7 @@ func (c *ProjectsSetDefaultNetworkTierCall) RequestId(requestId string) *Project // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsSetDefaultNetworkTierCall) Fields(s ...googleapi.Field) *ProjectsSetDefaultNetworkTierCall { +func (c *RegionDisksAddResourcePoliciesCall) Fields(s ...googleapi.Field) *RegionDisksAddResourcePoliciesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -100608,35 +113357,35 @@ func (c *ProjectsSetDefaultNetworkTierCall) Fields(s ...googleapi.Field) *Projec // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsSetDefaultNetworkTierCall) Context(ctx context.Context) *ProjectsSetDefaultNetworkTierCall { +func (c *RegionDisksAddResourcePoliciesCall) Context(ctx context.Context) *RegionDisksAddResourcePoliciesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsSetDefaultNetworkTierCall) Header() http.Header { +func (c *RegionDisksAddResourcePoliciesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsSetDefaultNetworkTierCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDisksAddResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectssetdefaultnetworktierrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regiondisksaddresourcepoliciesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/setDefaultNetworkTier") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{disk}/addResourcePolicies") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -100645,18 +113394,20 @@ func (c *ProjectsSetDefaultNetworkTierCall) doRequest(alt string) (*http.Respons req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, + "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.setDefaultNetworkTier" call. +// Do executes the "compute.regionDisks.addResourcePolicies" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ProjectsSetDefaultNetworkTierCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionDisksAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -100687,13 +113438,22 @@ func (c *ProjectsSetDefaultNetworkTierCall) Do(opts ...googleapi.CallOption) (*O } return ret, nil // { - // "description": "Sets the default network tier of the project. The default network tier is used when an address/forwardingRule/instance is created without specifying the network tier field.", + // "description": "Adds existing resource policies to a regional disk. You can only add one policy which will be applied to this disk for scheduling snapshot creation.", // "httpMethod": "POST", - // "id": "compute.projects.setDefaultNetworkTier", + // "id": "compute.regionDisks.addResourcePolicies", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "disk" // ], // "parameters": { + // "disk": { + // "description": "The disk name for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -100701,15 +113461,22 @@ func (c *ProjectsSetDefaultNetworkTierCall) Do(opts ...googleapi.CallOption) (*O // "required": true, // "type": "string" // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "{project}/setDefaultNetworkTier", + // "path": "{project}/regions/{region}/disks/{disk}/addResourcePolicies", // "request": { - // "$ref": "ProjectsSetDefaultNetworkTierRequest" + // "$ref": "RegionDisksAddResourcePoliciesRequest" // }, // "response": { // "$ref": "Operation" @@ -100722,24 +113489,35 @@ func (c *ProjectsSetDefaultNetworkTierCall) Do(opts ...googleapi.CallOption) (*O } -// method id "compute.projects.setDefaultServiceAccount": +// method id "compute.regionDisks.createSnapshot": -type ProjectsSetDefaultServiceAccountCall struct { - s *Service - project string - projectssetdefaultserviceaccountrequest *ProjectsSetDefaultServiceAccountRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionDisksCreateSnapshotCall struct { + s *Service + project string + region string + disk string + snapshot *Snapshot + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetDefaultServiceAccount: Sets the default service account of the -// project. The default service account is used when a VM instance is -// created with the service account email address set to "default". -func (r *ProjectsService) SetDefaultServiceAccount(project string, projectssetdefaultserviceaccountrequest *ProjectsSetDefaultServiceAccountRequest) *ProjectsSetDefaultServiceAccountCall { - c := &ProjectsSetDefaultServiceAccountCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// CreateSnapshot: Creates a snapshot of this regional disk. +func (r *RegionDisksService) CreateSnapshot(project string, region string, disk string, snapshot *Snapshot) *RegionDisksCreateSnapshotCall { + c := &RegionDisksCreateSnapshotCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.projectssetdefaultserviceaccountrequest = projectssetdefaultserviceaccountrequest + c.region = region + c.disk = disk + c.snapshot = snapshot + return c +} + +// GuestFlush sets the optional parameter "guestFlush": [Input Only] +// Specifies to create an application consistent snapshot by informing +// the OS to prepare for the snapshot process. Currently only supported +// on Windows instances using the Volume Shadow Copy Service (VSS). +func (c *RegionDisksCreateSnapshotCall) GuestFlush(guestFlush bool) *RegionDisksCreateSnapshotCall { + c.urlParams_.Set("guestFlush", fmt.Sprint(guestFlush)) return c } @@ -100757,7 +113535,7 @@ func (r *ProjectsService) SetDefaultServiceAccount(project string, projectssetde // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ProjectsSetDefaultServiceAccountCall) RequestId(requestId string) *ProjectsSetDefaultServiceAccountCall { +func (c *RegionDisksCreateSnapshotCall) RequestId(requestId string) *RegionDisksCreateSnapshotCall { c.urlParams_.Set("requestId", requestId) return c } @@ -100765,7 +113543,7 @@ func (c *ProjectsSetDefaultServiceAccountCall) RequestId(requestId string) *Proj // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsSetDefaultServiceAccountCall) Fields(s ...googleapi.Field) *ProjectsSetDefaultServiceAccountCall { +func (c *RegionDisksCreateSnapshotCall) Fields(s ...googleapi.Field) *RegionDisksCreateSnapshotCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -100773,35 +113551,35 @@ func (c *ProjectsSetDefaultServiceAccountCall) Fields(s ...googleapi.Field) *Pro // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsSetDefaultServiceAccountCall) Context(ctx context.Context) *ProjectsSetDefaultServiceAccountCall { +func (c *RegionDisksCreateSnapshotCall) Context(ctx context.Context) *RegionDisksCreateSnapshotCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsSetDefaultServiceAccountCall) Header() http.Header { +func (c *RegionDisksCreateSnapshotCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsSetDefaultServiceAccountCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDisksCreateSnapshotCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectssetdefaultserviceaccountrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.snapshot) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/setDefaultServiceAccount") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{disk}/createSnapshot") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -100810,18 +113588,20 @@ func (c *ProjectsSetDefaultServiceAccountCall) doRequest(alt string) (*http.Resp req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, + "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.setDefaultServiceAccount" call. +// Do executes the "compute.regionDisks.createSnapshot" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ProjectsSetDefaultServiceAccountCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionDisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -100852,13 +113632,27 @@ func (c *ProjectsSetDefaultServiceAccountCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Sets the default service account of the project. The default service account is used when a VM instance is created with the service account email address set to \"default\".", + // "description": "Creates a snapshot of this regional disk.", // "httpMethod": "POST", - // "id": "compute.projects.setDefaultServiceAccount", + // "id": "compute.regionDisks.createSnapshot", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "disk" // ], // "parameters": { + // "disk": { + // "description": "Name of the regional persistent disk to snapshot.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "guestFlush": { + // "description": "[Input Only] Specifies to create an application consistent snapshot by informing the OS to prepare for the snapshot process. Currently only supported on Windows instances using the Volume Shadow Copy Service (VSS).", + // "location": "query", + // "type": "boolean" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -100866,15 +113660,22 @@ func (c *ProjectsSetDefaultServiceAccountCall) Do(opts ...googleapi.CallOption) // "required": true, // "type": "string" // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "{project}/setDefaultServiceAccount", + // "path": "{project}/regions/{region}/disks/{disk}/createSnapshot", // "request": { - // "$ref": "ProjectsSetDefaultServiceAccountRequest" + // "$ref": "Snapshot" // }, // "response": { // "$ref": "Operation" @@ -100887,26 +113688,27 @@ func (c *ProjectsSetDefaultServiceAccountCall) Do(opts ...googleapi.CallOption) } -// method id "compute.projects.setUsageExportBucket": +// method id "compute.regionDisks.delete": -type ProjectsSetUsageExportBucketCall struct { - s *Service - project string - usageexportlocation *UsageExportLocation - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionDisksDeleteCall struct { + s *Service + project string + region string + disk string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetUsageExportBucket: Enables the usage export feature and sets the -// usage export bucket where reports are stored. If you provide an empty -// request body using this method, the usage export feature will be -// disabled. -// For details, see https://cloud.google.com/compute/docs/reference/latest/projects/setUsageExportBucket -func (r *ProjectsService) SetUsageExportBucket(project string, usageexportlocation *UsageExportLocation) *ProjectsSetUsageExportBucketCall { - c := &ProjectsSetUsageExportBucketCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified regional persistent disk. Deleting a +// regional disk removes all the replicas of its data permanently and is +// irreversible. However, deleting a disk does not delete any snapshots +// previously made from the disk. You must separately delete snapshots. +func (r *RegionDisksService) Delete(project string, region string, disk string) *RegionDisksDeleteCall { + c := &RegionDisksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.usageexportlocation = usageexportlocation + c.region = region + c.disk = disk return c } @@ -100924,7 +113726,7 @@ func (r *ProjectsService) SetUsageExportBucket(project string, usageexportlocati // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ProjectsSetUsageExportBucketCall) RequestId(requestId string) *ProjectsSetUsageExportBucketCall { +func (c *RegionDisksDeleteCall) RequestId(requestId string) *RegionDisksDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -100932,7 +113734,7 @@ func (c *ProjectsSetUsageExportBucketCall) RequestId(requestId string) *Projects // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsSetUsageExportBucketCall) Fields(s ...googleapi.Field) *ProjectsSetUsageExportBucketCall { +func (c *RegionDisksDeleteCall) Fields(s ...googleapi.Field) *RegionDisksDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -100940,55 +113742,52 @@ func (c *ProjectsSetUsageExportBucketCall) Fields(s ...googleapi.Field) *Project // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsSetUsageExportBucketCall) Context(ctx context.Context) *ProjectsSetUsageExportBucketCall { +func (c *RegionDisksDeleteCall) Context(ctx context.Context) *RegionDisksDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsSetUsageExportBucketCall) Header() http.Header { +func (c *RegionDisksDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsSetUsageExportBucketCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDisksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.usageexportlocation) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/setUsageExportBucket") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{disk}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, + "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.setUsageExportBucket" call. +// Do executes the "compute.regionDisks.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ProjectsSetUsageExportBucketCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionDisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -101019,13 +113818,21 @@ func (c *ProjectsSetUsageExportBucketCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Enables the usage export feature and sets the usage export bucket where reports are stored. If you provide an empty request body using this method, the usage export feature will be disabled.", - // "httpMethod": "POST", - // "id": "compute.projects.setUsageExportBucket", + // "description": "Deletes the specified regional persistent disk. Deleting a regional disk removes all the replicas of its data permanently and is irreversible. However, deleting a disk does not delete any snapshots previously made from the disk. You must separately delete snapshots.", + // "httpMethod": "DELETE", + // "id": "compute.regionDisks.delete", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "disk" // ], // "parameters": { + // "disk": { + // "description": "Name of the regional persistent disk to delete.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -101033,127 +113840,123 @@ func (c *ProjectsSetUsageExportBucketCall) Do(opts ...googleapi.CallOption) (*Op // "required": true, // "type": "string" // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "{project}/setUsageExportBucket", - // "request": { - // "$ref": "UsageExportLocation" - // }, + // "path": "{project}/regions/{region}/disks/{disk}", // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.regionAutoscalers.delete": +// method id "compute.regionDisks.get": -type RegionAutoscalersDeleteCall struct { - s *Service - project string - region string - autoscaler string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionDisksGetCall struct { + s *Service + project string + region string + disk string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified autoscaler. -func (r *RegionAutoscalersService) Delete(project string, region string, autoscaler string) *RegionAutoscalersDeleteCall { - c := &RegionAutoscalersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns a specified regional persistent disk. +func (r *RegionDisksService) Get(project string, region string, disk string) *RegionDisksGetCall { + c := &RegionDisksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.autoscaler = autoscaler - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionAutoscalersDeleteCall) RequestId(requestId string) *RegionAutoscalersDeleteCall { - c.urlParams_.Set("requestId", requestId) + c.disk = disk return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionAutoscalersDeleteCall) Fields(s ...googleapi.Field) *RegionAutoscalersDeleteCall { +func (c *RegionDisksGetCall) Fields(s ...googleapi.Field) *RegionDisksGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionDisksGetCall) IfNoneMatch(entityTag string) *RegionDisksGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionAutoscalersDeleteCall) Context(ctx context.Context) *RegionAutoscalersDeleteCall { +func (c *RegionDisksGetCall) Context(ctx context.Context) *RegionDisksGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionAutoscalersDeleteCall) Header() http.Header { +func (c *RegionDisksGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionAutoscalersDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDisksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers/{autoscaler}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{disk}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "autoscaler": c.autoscaler, + "project": c.project, + "region": c.region, + "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionAutoscalers.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionAutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regionDisks.get" call. +// Exactly one of *Disk or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Disk.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *RegionDisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -101172,7 +113975,7 @@ func (c *RegionAutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operati if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Disk{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -101184,19 +113987,19 @@ func (c *RegionAutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Deletes the specified autoscaler.", - // "httpMethod": "DELETE", - // "id": "compute.regionAutoscalers.delete", + // "description": "Returns a specified regional persistent disk.", + // "httpMethod": "GET", + // "id": "compute.regionDisks.get", // "parameterOrder": [ // "project", // "region", - // "autoscaler" + // "disk" // ], // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to delete.", + // "disk": { + // "description": "Name of the regional persistent disk to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -101208,56 +114011,53 @@ func (c *RegionAutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operati // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/autoscalers/{autoscaler}", + // "path": "{project}/regions/{region}/disks/{disk}", // "response": { - // "$ref": "Operation" + // "$ref": "Disk" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionAutoscalers.get": +// method id "compute.regionDisks.getIamPolicy": -type RegionAutoscalersGetCall struct { +type RegionDisksGetIamPolicyCall struct { s *Service project string region string - autoscaler string + resource string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Returns the specified autoscaler. -func (r *RegionAutoscalersService) Get(project string, region string, autoscaler string) *RegionAutoscalersGetCall { - c := &RegionAutoscalersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. +func (r *RegionDisksService) GetIamPolicy(project string, region string, resource string) *RegionDisksGetIamPolicyCall { + c := &RegionDisksGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.autoscaler = autoscaler + c.resource = resource return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionAutoscalersGetCall) Fields(s ...googleapi.Field) *RegionAutoscalersGetCall { +func (c *RegionDisksGetIamPolicyCall) Fields(s ...googleapi.Field) *RegionDisksGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -101267,7 +114067,7 @@ func (c *RegionAutoscalersGetCall) Fields(s ...googleapi.Field) *RegionAutoscale // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionAutoscalersGetCall) IfNoneMatch(entityTag string) *RegionAutoscalersGetCall { +func (c *RegionDisksGetIamPolicyCall) IfNoneMatch(entityTag string) *RegionDisksGetIamPolicyCall { c.ifNoneMatch_ = entityTag return c } @@ -101275,21 +114075,21 @@ func (c *RegionAutoscalersGetCall) IfNoneMatch(entityTag string) *RegionAutoscal // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionAutoscalersGetCall) Context(ctx context.Context) *RegionAutoscalersGetCall { +func (c *RegionDisksGetIamPolicyCall) Context(ctx context.Context) *RegionDisksGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionAutoscalersGetCall) Header() http.Header { +func (c *RegionDisksGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionAutoscalersGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDisksGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -101301,7 +114101,7 @@ func (c *RegionAutoscalersGetCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers/{autoscaler}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -101309,21 +114109,21 @@ func (c *RegionAutoscalersGetCall) doRequest(alt string) (*http.Response, error) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "autoscaler": c.autoscaler, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionAutoscalers.get" call. -// Exactly one of *Autoscaler or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Autoscaler.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionAutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, error) { +// Do executes the "compute.regionDisks.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *RegionDisksGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -101342,7 +114142,7 @@ func (c *RegionAutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Autoscaler{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -101354,22 +114154,15 @@ func (c *RegionAutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler } return ret, nil // { - // "description": "Returns the specified autoscaler.", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", // "httpMethod": "GET", - // "id": "compute.regionAutoscalers.get", + // "id": "compute.regionDisks.getIamPolicy", // "parameterOrder": [ // "project", // "region", - // "autoscaler" + // "resource" // ], // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -101378,16 +114171,23 @@ func (c *RegionAutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "The name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/autoscalers/{autoscaler}", + // "path": "{project}/regions/{region}/disks/{resource}/getIamPolicy", // "response": { - // "$ref": "Autoscaler" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -101398,25 +114198,25 @@ func (c *RegionAutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler } -// method id "compute.regionAutoscalers.insert": +// method id "compute.regionDisks.insert": -type RegionAutoscalersInsertCall struct { +type RegionDisksInsertCall struct { s *Service project string region string - autoscaler *Autoscaler + disk *Disk urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Insert: Creates an autoscaler in the specified project using the data -// included in the request. -func (r *RegionAutoscalersService) Insert(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersInsertCall { - c := &RegionAutoscalersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a persistent regional disk in the specified project +// using the data included in the request. +func (r *RegionDisksService) Insert(project string, region string, disk *Disk) *RegionDisksInsertCall { + c := &RegionDisksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.autoscaler = autoscaler + c.disk = disk return c } @@ -101434,15 +114234,22 @@ func (r *RegionAutoscalersService) Insert(project string, region string, autosca // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionAutoscalersInsertCall) RequestId(requestId string) *RegionAutoscalersInsertCall { +func (c *RegionDisksInsertCall) RequestId(requestId string) *RegionDisksInsertCall { c.urlParams_.Set("requestId", requestId) return c } +// SourceImage sets the optional parameter "sourceImage": Source image +// to restore onto a disk. +func (c *RegionDisksInsertCall) SourceImage(sourceImage string) *RegionDisksInsertCall { + c.urlParams_.Set("sourceImage", sourceImage) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionAutoscalersInsertCall) Fields(s ...googleapi.Field) *RegionAutoscalersInsertCall { +func (c *RegionDisksInsertCall) Fields(s ...googleapi.Field) *RegionDisksInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -101450,35 +114257,35 @@ func (c *RegionAutoscalersInsertCall) Fields(s ...googleapi.Field) *RegionAutosc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionAutoscalersInsertCall) Context(ctx context.Context) *RegionAutoscalersInsertCall { +func (c *RegionDisksInsertCall) Context(ctx context.Context) *RegionDisksInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionAutoscalersInsertCall) Header() http.Header { +func (c *RegionDisksInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionAutoscalersInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDisksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.disk) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -101492,14 +114299,14 @@ func (c *RegionAutoscalersInsertCall) doRequest(alt string) (*http.Response, err return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionAutoscalers.insert" call. +// Do executes the "compute.regionDisks.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionAutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionDisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -101530,9 +114337,9 @@ func (c *RegionAutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Creates an autoscaler in the specified project using the data included in the request.", + // "description": "Creates a persistent regional disk in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.regionAutoscalers.insert", + // "id": "compute.regionDisks.insert", // "parameterOrder": [ // "project", // "region" @@ -101546,7 +114353,7 @@ func (c *RegionAutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operati // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -101556,11 +114363,16 @@ func (c *RegionAutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operati // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "sourceImage": { + // "description": "Optional. Source image to restore onto a disk.", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/autoscalers", + // "path": "{project}/regions/{region}/disks", // "request": { - // "$ref": "Autoscaler" + // "$ref": "Disk" // }, // "response": { // "$ref": "Operation" @@ -101573,9 +114385,9 @@ func (c *RegionAutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.regionAutoscalers.list": +// method id "compute.regionDisks.list": -type RegionAutoscalersListCall struct { +type RegionDisksListCall struct { s *Service project string region string @@ -101585,10 +114397,10 @@ type RegionAutoscalersListCall struct { header_ http.Header } -// List: Retrieves a list of autoscalers contained within the specified -// region. -func (r *RegionAutoscalersService) List(project string, region string) *RegionAutoscalersListCall { - c := &RegionAutoscalersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of persistent disks contained within the +// specified region. +func (r *RegionDisksService) List(project string, region string) *RegionDisksListCall { + c := &RegionDisksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region return c @@ -101616,7 +114428,7 @@ func (r *RegionAutoscalersService) List(project string, region string) *RegionAu // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *RegionAutoscalersListCall) Filter(filter string) *RegionAutoscalersListCall { +func (c *RegionDisksListCall) Filter(filter string) *RegionDisksListCall { c.urlParams_.Set("filter", filter) return c } @@ -101627,7 +114439,7 @@ func (c *RegionAutoscalersListCall) Filter(filter string) *RegionAutoscalersList // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *RegionAutoscalersListCall) MaxResults(maxResults int64) *RegionAutoscalersListCall { +func (c *RegionDisksListCall) MaxResults(maxResults int64) *RegionDisksListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -101644,7 +114456,7 @@ func (c *RegionAutoscalersListCall) MaxResults(maxResults int64) *RegionAutoscal // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *RegionAutoscalersListCall) OrderBy(orderBy string) *RegionAutoscalersListCall { +func (c *RegionDisksListCall) OrderBy(orderBy string) *RegionDisksListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -101652,7 +114464,7 @@ func (c *RegionAutoscalersListCall) OrderBy(orderBy string) *RegionAutoscalersLi // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *RegionAutoscalersListCall) PageToken(pageToken string) *RegionAutoscalersListCall { +func (c *RegionDisksListCall) PageToken(pageToken string) *RegionDisksListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -101660,7 +114472,7 @@ func (c *RegionAutoscalersListCall) PageToken(pageToken string) *RegionAutoscale // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionAutoscalersListCall) Fields(s ...googleapi.Field) *RegionAutoscalersListCall { +func (c *RegionDisksListCall) Fields(s ...googleapi.Field) *RegionDisksListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -101670,7 +114482,7 @@ func (c *RegionAutoscalersListCall) Fields(s ...googleapi.Field) *RegionAutoscal // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionAutoscalersListCall) IfNoneMatch(entityTag string) *RegionAutoscalersListCall { +func (c *RegionDisksListCall) IfNoneMatch(entityTag string) *RegionDisksListCall { c.ifNoneMatch_ = entityTag return c } @@ -101678,21 +114490,21 @@ func (c *RegionAutoscalersListCall) IfNoneMatch(entityTag string) *RegionAutosca // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionAutoscalersListCall) Context(ctx context.Context) *RegionAutoscalersListCall { +func (c *RegionDisksListCall) Context(ctx context.Context) *RegionDisksListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionAutoscalersListCall) Header() http.Header { +func (c *RegionDisksListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionAutoscalersListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDisksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -101704,7 +114516,7 @@ func (c *RegionAutoscalersListCall) doRequest(alt string) (*http.Response, error var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -101718,14 +114530,14 @@ func (c *RegionAutoscalersListCall) doRequest(alt string) (*http.Response, error return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionAutoscalers.list" call. -// Exactly one of *RegionAutoscalerList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *RegionAutoscalerList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionAutoscalersListCall) Do(opts ...googleapi.CallOption) (*RegionAutoscalerList, error) { +// Do executes the "compute.regionDisks.list" call. +// Exactly one of *DiskList or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *DiskList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionDisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -101744,7 +114556,7 @@ func (c *RegionAutoscalersListCall) Do(opts ...googleapi.CallOption) (*RegionAut if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &RegionAutoscalerList{ + ret := &DiskList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -101756,9 +114568,9 @@ func (c *RegionAutoscalersListCall) Do(opts ...googleapi.CallOption) (*RegionAut } return ret, nil // { - // "description": "Retrieves a list of autoscalers contained within the specified region.", + // "description": "Retrieves the list of persistent disks contained within the specified region.", // "httpMethod": "GET", - // "id": "compute.regionAutoscalers.list", + // "id": "compute.regionDisks.list", // "parameterOrder": [ // "project", // "region" @@ -101795,16 +114607,16 @@ func (c *RegionAutoscalersListCall) Do(opts ...googleapi.CallOption) (*RegionAut // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/autoscalers", + // "path": "{project}/regions/{region}/disks", // "response": { - // "$ref": "RegionAutoscalerList" + // "$ref": "DiskList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -101818,7 +114630,7 @@ func (c *RegionAutoscalersListCall) Do(opts ...googleapi.CallOption) (*RegionAut // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *RegionAutoscalersListCall) Pages(ctx context.Context, f func(*RegionAutoscalerList) error) error { +func (c *RegionDisksListCall) Pages(ctx context.Context, f func(*DiskList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -101836,33 +114648,27 @@ func (c *RegionAutoscalersListCall) Pages(ctx context.Context, f func(*RegionAut } } -// method id "compute.regionAutoscalers.patch": +// method id "compute.regionDisks.removeResourcePolicies": -type RegionAutoscalersPatchCall struct { - s *Service - project string - region string - autoscaler *Autoscaler - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionDisksRemoveResourcePoliciesCall struct { + s *Service + project string + region string + disk string + regiondisksremoveresourcepoliciesrequest *RegionDisksRemoveResourcePoliciesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates an autoscaler in the specified project using the data -// included in the request. This method supports PATCH semantics and -// uses the JSON merge patch format and processing rules. -func (r *RegionAutoscalersService) Patch(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersPatchCall { - c := &RegionAutoscalersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// RemoveResourcePolicies: Removes resource policies from a regional +// disk. +func (r *RegionDisksService) RemoveResourcePolicies(project string, region string, disk string, regiondisksremoveresourcepoliciesrequest *RegionDisksRemoveResourcePoliciesRequest) *RegionDisksRemoveResourcePoliciesCall { + c := &RegionDisksRemoveResourcePoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.autoscaler = autoscaler - return c -} - -// Autoscaler sets the optional parameter "autoscaler": Name of the -// autoscaler to patch. -func (c *RegionAutoscalersPatchCall) Autoscaler(autoscaler string) *RegionAutoscalersPatchCall { - c.urlParams_.Set("autoscaler", autoscaler) + c.disk = disk + c.regiondisksremoveresourcepoliciesrequest = regiondisksremoveresourcepoliciesrequest return c } @@ -101880,7 +114686,7 @@ func (c *RegionAutoscalersPatchCall) Autoscaler(autoscaler string) *RegionAutosc // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionAutoscalersPatchCall) RequestId(requestId string) *RegionAutoscalersPatchCall { +func (c *RegionDisksRemoveResourcePoliciesCall) RequestId(requestId string) *RegionDisksRemoveResourcePoliciesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -101888,7 +114694,7 @@ func (c *RegionAutoscalersPatchCall) RequestId(requestId string) *RegionAutoscal // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionAutoscalersPatchCall) Fields(s ...googleapi.Field) *RegionAutoscalersPatchCall { +func (c *RegionDisksRemoveResourcePoliciesCall) Fields(s ...googleapi.Field) *RegionDisksRemoveResourcePoliciesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -101896,37 +114702,37 @@ func (c *RegionAutoscalersPatchCall) Fields(s ...googleapi.Field) *RegionAutosca // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionAutoscalersPatchCall) Context(ctx context.Context) *RegionAutoscalersPatchCall { +func (c *RegionDisksRemoveResourcePoliciesCall) Context(ctx context.Context) *RegionDisksRemoveResourcePoliciesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionAutoscalersPatchCall) Header() http.Header { +func (c *RegionDisksRemoveResourcePoliciesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionAutoscalersPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDisksRemoveResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regiondisksremoveresourcepoliciesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{disk}/removeResourcePolicies") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -101934,18 +114740,19 @@ func (c *RegionAutoscalersPatchCall) doRequest(alt string) (*http.Response, erro googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, + "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionAutoscalers.patch" call. +// Do executes the "compute.regionDisks.removeResourcePolicies" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionAutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionDisksRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -101976,18 +114783,20 @@ func (c *RegionAutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Updates an autoscaler in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.regionAutoscalers.patch", + // "description": "Removes resource policies from a regional disk.", + // "httpMethod": "POST", + // "id": "compute.regionDisks.removeResourcePolicies", // "parameterOrder": [ // "project", - // "region" + // "region", + // "disk" // ], // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to patch.", - // "location": "query", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "disk": { + // "description": "The disk name for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "project": { @@ -101998,7 +114807,7 @@ func (c *RegionAutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operatio // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "The name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -102010,9 +114819,9 @@ func (c *RegionAutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operatio // "type": "string" // } // }, - // "path": "{project}/regions/{region}/autoscalers", + // "path": "{project}/regions/{region}/disks/{disk}/removeResourcePolicies", // "request": { - // "$ref": "Autoscaler" + // "$ref": "RegionDisksRemoveResourcePoliciesRequest" // }, // "response": { // "$ref": "Operation" @@ -102025,195 +114834,26 @@ func (c *RegionAutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operatio } -// method id "compute.regionAutoscalers.testIamPermissions": - -type RegionAutoscalersTestIamPermissionsCall struct { - s *Service - project string - region string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *RegionAutoscalersService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionAutoscalersTestIamPermissionsCall { - c := &RegionAutoscalersTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionAutoscalersTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionAutoscalersTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionAutoscalersTestIamPermissionsCall) Context(ctx context.Context) *RegionAutoscalersTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionAutoscalersTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionAutoscalersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers/{resource}/testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionAutoscalers.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionAutoscalersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.regionAutoscalers.testIamPermissions", - // "parameterOrder": [ - // "project", - // "region", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "The name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/autoscalers/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, - // "response": { - // "$ref": "TestPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.regionAutoscalers.update": +// method id "compute.regionDisks.resize": -type RegionAutoscalersUpdateCall struct { - s *Service - project string - region string - autoscaler *Autoscaler - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionDisksResizeCall struct { + s *Service + project string + region string + disk string + regiondisksresizerequest *RegionDisksResizeRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Update: Updates an autoscaler in the specified project using the data -// included in the request. -func (r *RegionAutoscalersService) Update(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersUpdateCall { - c := &RegionAutoscalersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Resize: Resizes the specified regional persistent disk. +func (r *RegionDisksService) Resize(project string, region string, disk string, regiondisksresizerequest *RegionDisksResizeRequest) *RegionDisksResizeCall { + c := &RegionDisksResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.autoscaler = autoscaler - return c -} - -// Autoscaler sets the optional parameter "autoscaler": Name of the -// autoscaler to update. -func (c *RegionAutoscalersUpdateCall) Autoscaler(autoscaler string) *RegionAutoscalersUpdateCall { - c.urlParams_.Set("autoscaler", autoscaler) + c.disk = disk + c.regiondisksresizerequest = regiondisksresizerequest return c } @@ -102231,7 +114871,7 @@ func (c *RegionAutoscalersUpdateCall) Autoscaler(autoscaler string) *RegionAutos // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionAutoscalersUpdateCall) RequestId(requestId string) *RegionAutoscalersUpdateCall { +func (c *RegionDisksResizeCall) RequestId(requestId string) *RegionDisksResizeCall { c.urlParams_.Set("requestId", requestId) return c } @@ -102239,7 +114879,7 @@ func (c *RegionAutoscalersUpdateCall) RequestId(requestId string) *RegionAutosca // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionAutoscalersUpdateCall) Fields(s ...googleapi.Field) *RegionAutoscalersUpdateCall { +func (c *RegionDisksResizeCall) Fields(s ...googleapi.Field) *RegionDisksResizeCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -102247,37 +114887,37 @@ func (c *RegionAutoscalersUpdateCall) Fields(s ...googleapi.Field) *RegionAutosc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionAutoscalersUpdateCall) Context(ctx context.Context) *RegionAutoscalersUpdateCall { +func (c *RegionDisksResizeCall) Context(ctx context.Context) *RegionDisksResizeCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionAutoscalersUpdateCall) Header() http.Header { +func (c *RegionDisksResizeCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionAutoscalersUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDisksResizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regiondisksresizerequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{disk}/resize") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -102285,194 +114925,19 @@ func (c *RegionAutoscalersUpdateCall) doRequest(alt string) (*http.Response, err googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, + "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionAutoscalers.update" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionAutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates an autoscaler in the specified project using the data included in the request.", - // "httpMethod": "PUT", - // "id": "compute.regionAutoscalers.update", - // "parameterOrder": [ - // "project", - // "region" - // ], - // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to update.", - // "location": "query", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/autoscalers", - // "request": { - // "$ref": "Autoscaler" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.regionBackendServices.delete": - -type RegionBackendServicesDeleteCall struct { - s *Service - project string - region string - backendService string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes the specified regional BackendService resource. -func (r *RegionBackendServicesService) Delete(project string, region string, backendService string) *RegionBackendServicesDeleteCall { - c := &RegionBackendServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.backendService = backendService - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionBackendServicesDeleteCall) RequestId(requestId string) *RegionBackendServicesDeleteCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionBackendServicesDeleteCall) Fields(s ...googleapi.Field) *RegionBackendServicesDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionBackendServicesDeleteCall) Context(ctx context.Context) *RegionBackendServicesDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionBackendServicesDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionBackendServicesDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "backendService": c.backendService, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionBackendServices.delete" call. +// Do executes the "compute.regionDisks.resize" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionBackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionDisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -102503,248 +114968,85 @@ func (c *RegionBackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Deletes the specified regional BackendService resource.", - // "httpMethod": "DELETE", - // "id": "compute.regionBackendServices.delete", - // "parameterOrder": [ - // "project", - // "region", - // "backendService" - // ], - // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/backendServices/{backendService}", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.regionBackendServices.get": - -type RegionBackendServicesGetCall struct { - s *Service - project string - region string - backendService string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified regional BackendService resource. -func (r *RegionBackendServicesService) Get(project string, region string, backendService string) *RegionBackendServicesGetCall { - c := &RegionBackendServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.backendService = backendService - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionBackendServicesGetCall) Fields(s ...googleapi.Field) *RegionBackendServicesGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionBackendServicesGetCall) IfNoneMatch(entityTag string) *RegionBackendServicesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionBackendServicesGetCall) Context(ctx context.Context) *RegionBackendServicesGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionBackendServicesGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionBackendServicesGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "backendService": c.backendService, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionBackendServices.get" call. -// Exactly one of *BackendService or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *BackendService.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionBackendServicesGetCall) Do(opts ...googleapi.CallOption) (*BackendService, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &BackendService{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified regional BackendService resource.", - // "httpMethod": "GET", - // "id": "compute.regionBackendServices.get", + // "description": "Resizes the specified regional persistent disk.", + // "httpMethod": "POST", + // "id": "compute.regionDisks.resize", // "parameterOrder": [ // "project", // "region", - // "backendService" + // "disk" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to return.", + // "disk": { + // "description": "Name of the regional persistent disk.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, // "project": { - // "description": "Project ID for this request.", + // "description": "The project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/backendServices/{backendService}", + // "path": "{project}/regions/{region}/disks/{disk}/resize", + // "request": { + // "$ref": "RegionDisksResizeRequest" + // }, // "response": { - // "$ref": "BackendService" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.regionBackendServices.getHealth": +// method id "compute.regionDisks.setIamPolicy": -type RegionBackendServicesGetHealthCall struct { +type RegionDisksSetIamPolicyCall struct { s *Service project string region string - backendService string - resourcegroupreference *ResourceGroupReference + resource string + regionsetpolicyrequest *RegionSetPolicyRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// GetHealth: Gets the most recent health check results for this -// regional BackendService. -func (r *RegionBackendServicesService) GetHealth(project string, region string, backendService string, resourcegroupreference *ResourceGroupReference) *RegionBackendServicesGetHealthCall { - c := &RegionBackendServicesGetHealthCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +func (r *RegionDisksService) SetIamPolicy(project string, region string, resource string, regionsetpolicyrequest *RegionSetPolicyRequest) *RegionDisksSetIamPolicyCall { + c := &RegionDisksSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.backendService = backendService - c.resourcegroupreference = resourcegroupreference + c.resource = resource + c.regionsetpolicyrequest = regionsetpolicyrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionBackendServicesGetHealthCall) Fields(s ...googleapi.Field) *RegionBackendServicesGetHealthCall { +func (c *RegionDisksSetIamPolicyCall) Fields(s ...googleapi.Field) *RegionDisksSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -102752,35 +115054,35 @@ func (c *RegionBackendServicesGetHealthCall) Fields(s ...googleapi.Field) *Regio // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionBackendServicesGetHealthCall) Context(ctx context.Context) *RegionBackendServicesGetHealthCall { +func (c *RegionDisksSetIamPolicyCall) Context(ctx context.Context) *RegionDisksSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionBackendServicesGetHealthCall) Header() http.Header { +func (c *RegionDisksSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionBackendServicesGetHealthCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDisksSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.resourcegroupreference) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetpolicyrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}/getHealth") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -102788,21 +115090,21 @@ func (c *RegionBackendServicesGetHealthCall) doRequest(alt string) (*http.Respon } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "backendService": c.backendService, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionBackendServices.getHealth" call. -// Exactly one of *BackendServiceGroupHealth or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *BackendServiceGroupHealth.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionBackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (*BackendServiceGroupHealth, error) { +// Do executes the "compute.regionDisks.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *RegionDisksSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -102821,7 +115123,7 @@ func (c *RegionBackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (* if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &BackendServiceGroupHealth{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -102833,74 +115135,72 @@ func (c *RegionBackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Gets the most recent health check results for this regional BackendService.", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", // "httpMethod": "POST", - // "id": "compute.regionBackendServices.getHealth", + // "id": "compute.regionDisks.setIamPolicy", // "parameterOrder": [ // "project", // "region", - // "backendService" + // "resource" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource for which to get health.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { + // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "The name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/backendServices/{backendService}/getHealth", + // "path": "{project}/regions/{region}/disks/{resource}/setIamPolicy", // "request": { - // "$ref": "ResourceGroupReference" + // "$ref": "RegionSetPolicyRequest" // }, // "response": { - // "$ref": "BackendServiceGroupHealth" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.regionBackendServices.insert": +// method id "compute.regionDisks.setLabels": -type RegionBackendServicesInsertCall struct { - s *Service - project string - region string - backendservice *BackendService - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionDisksSetLabelsCall struct { + s *Service + project string + region string + resource string + regionsetlabelsrequest *RegionSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a regional BackendService resource in the specified -// project using the data included in the request. There are several -// restrictions and guidelines to keep in mind when creating a regional -// backend service. Read Restrictions and Guidelines for more -// information. -func (r *RegionBackendServicesService) Insert(project string, region string, backendservice *BackendService) *RegionBackendServicesInsertCall { - c := &RegionBackendServicesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetLabels: Sets the labels on the target regional disk. +func (r *RegionDisksService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *RegionDisksSetLabelsCall { + c := &RegionDisksSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.backendservice = backendservice + c.resource = resource + c.regionsetlabelsrequest = regionsetlabelsrequest return c } @@ -102918,7 +115218,7 @@ func (r *RegionBackendServicesService) Insert(project string, region string, bac // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionBackendServicesInsertCall) RequestId(requestId string) *RegionBackendServicesInsertCall { +func (c *RegionDisksSetLabelsCall) RequestId(requestId string) *RegionDisksSetLabelsCall { c.urlParams_.Set("requestId", requestId) return c } @@ -102926,7 +115226,7 @@ func (c *RegionBackendServicesInsertCall) RequestId(requestId string) *RegionBac // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionBackendServicesInsertCall) Fields(s ...googleapi.Field) *RegionBackendServicesInsertCall { +func (c *RegionDisksSetLabelsCall) Fields(s ...googleapi.Field) *RegionDisksSetLabelsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -102934,35 +115234,35 @@ func (c *RegionBackendServicesInsertCall) Fields(s ...googleapi.Field) *RegionBa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionBackendServicesInsertCall) Context(ctx context.Context) *RegionBackendServicesInsertCall { +func (c *RegionDisksSetLabelsCall) Context(ctx context.Context) *RegionDisksSetLabelsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionBackendServicesInsertCall) Header() http.Header { +func (c *RegionDisksSetLabelsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionBackendServicesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDisksSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -102970,20 +115270,21 @@ func (c *RegionBackendServicesInsertCall) doRequest(alt string) (*http.Response, } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionBackendServices.insert" call. +// Do executes the "compute.regionDisks.setLabels" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionBackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionDisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -103014,12 +115315,13 @@ func (c *RegionBackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Creates a regional BackendService resource in the specified project using the data included in the request. There are several restrictions and guidelines to keep in mind when creating a regional backend service. Read Restrictions and Guidelines for more information.", + // "description": "Sets the labels on the target regional disk.", // "httpMethod": "POST", - // "id": "compute.regionBackendServices.insert", + // "id": "compute.regionDisks.setLabels", // "parameterOrder": [ // "project", - // "region" + // "region", + // "resource" // ], // "parameters": { // "project": { @@ -103030,7 +115332,7 @@ func (c *RegionBackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Ope // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "The region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -103040,11 +115342,18 @@ func (c *RegionBackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Ope // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/backendServices", + // "path": "{project}/regions/{region}/disks/{resource}/setLabels", // "request": { - // "$ref": "BackendService" + // "$ref": "RegionSetLabelsRequest" // }, // "response": { // "$ref": "Operation" @@ -103057,159 +115366,92 @@ func (c *RegionBackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.regionBackendServices.list": +// method id "compute.regionDisks.testIamPermissions": -type RegionBackendServicesListCall struct { - s *Service - project string - region string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionDisksTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves the list of regional BackendService resources -// available to the specified project in the given region. -func (r *RegionBackendServicesService) List(project string, region string) *RegionBackendServicesListCall { - c := &RegionBackendServicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *RegionDisksService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionDisksTestIamPermissionsCall { + c := &RegionDisksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *RegionBackendServicesListCall) Filter(filter string) *RegionBackendServicesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *RegionBackendServicesListCall) MaxResults(maxResults int64) *RegionBackendServicesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *RegionBackendServicesListCall) OrderBy(orderBy string) *RegionBackendServicesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *RegionBackendServicesListCall) PageToken(pageToken string) *RegionBackendServicesListCall { - c.urlParams_.Set("pageToken", pageToken) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionBackendServicesListCall) Fields(s ...googleapi.Field) *RegionBackendServicesListCall { +func (c *RegionDisksTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionDisksTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionBackendServicesListCall) IfNoneMatch(entityTag string) *RegionBackendServicesListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionBackendServicesListCall) Context(ctx context.Context) *RegionBackendServicesListCall { +func (c *RegionDisksTestIamPermissionsCall) Context(ctx context.Context) *RegionDisksTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionBackendServicesListCall) Header() http.Header { +func (c *RegionDisksTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionBackendServicesListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDisksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionBackendServices.list" call. -// Exactly one of *BackendServiceList or error will be non-nil. Any +// Do executes the "compute.regionDisks.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *BackendServiceList.ServerResponse.Header or (if a response was +// *TestPermissionsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionBackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServiceList, error) { +func (c *RegionDisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -103228,7 +115470,7 @@ func (c *RegionBackendServicesListCall) Do(opts ...googleapi.CallOption) (*Backe if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &BackendServiceList{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -103240,37 +115482,15 @@ func (c *RegionBackendServicesListCall) Do(opts ...googleapi.CallOption) (*Backe } return ret, nil // { - // "description": "Retrieves the list of regional BackendService resources available to the specified project in the given region.", - // "httpMethod": "GET", - // "id": "compute.regionBackendServices.list", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.regionDisks.testIamPermissions", // "parameterOrder": [ // "project", - // "region" + // "region", + // "resource" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -103279,16 +115499,26 @@ func (c *RegionBackendServicesListCall) Do(opts ...googleapi.CallOption) (*Backe // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "The name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/backendServices", + // "path": "{project}/regions/{region}/disks/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "BackendServiceList" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -103299,52 +115529,24 @@ func (c *RegionBackendServicesListCall) Do(opts ...googleapi.CallOption) (*Backe } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RegionBackendServicesListCall) Pages(ctx context.Context, f func(*BackendServiceList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.regionBackendServices.patch": +// method id "compute.regionHealthCheckServices.delete": -type RegionBackendServicesPatchCall struct { - s *Service - project string - region string - backendService string - backendservice *BackendService - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionHealthCheckServicesDeleteCall struct { + s *Service + project string + region string + healthCheckService string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates the specified regional BackendService resource with -// the data included in the request. There are several restrictions and -// guidelines to keep in mind when updating a backend service. Read -// Restrictions and Guidelines for more information. This method -// supports PATCH semantics and uses the JSON merge patch format and -// processing rules. -func (r *RegionBackendServicesService) Patch(project string, region string, backendService string, backendservice *BackendService) *RegionBackendServicesPatchCall { - c := &RegionBackendServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified regional HealthCheckService. +func (r *RegionHealthCheckServicesService) Delete(project string, region string, healthCheckService string) *RegionHealthCheckServicesDeleteCall { + c := &RegionHealthCheckServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.backendService = backendService - c.backendservice = backendservice + c.healthCheckService = healthCheckService return c } @@ -103362,7 +115564,7 @@ func (r *RegionBackendServicesService) Patch(project string, region string, back // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionBackendServicesPatchCall) RequestId(requestId string) *RegionBackendServicesPatchCall { +func (c *RegionHealthCheckServicesDeleteCall) RequestId(requestId string) *RegionHealthCheckServicesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -103370,7 +115572,7 @@ func (c *RegionBackendServicesPatchCall) RequestId(requestId string) *RegionBack // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionBackendServicesPatchCall) Fields(s ...googleapi.Field) *RegionBackendServicesPatchCall { +func (c *RegionHealthCheckServicesDeleteCall) Fields(s ...googleapi.Field) *RegionHealthCheckServicesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -103378,57 +115580,52 @@ func (c *RegionBackendServicesPatchCall) Fields(s ...googleapi.Field) *RegionBac // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionBackendServicesPatchCall) Context(ctx context.Context) *RegionBackendServicesPatchCall { +func (c *RegionHealthCheckServicesDeleteCall) Context(ctx context.Context) *RegionHealthCheckServicesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionBackendServicesPatchCall) Header() http.Header { +func (c *RegionHealthCheckServicesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionBackendServicesPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionHealthCheckServicesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthCheckServices/{healthCheckService}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "backendService": c.backendService, + "project": c.project, + "region": c.region, + "healthCheckService": c.healthCheckService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionBackendServices.patch" call. +// Do executes the "compute.regionHealthCheckServices.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionBackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionHealthCheckServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -103459,19 +115656,18 @@ func (c *RegionBackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Updates the specified regional BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.regionBackendServices.patch", + // "description": "Deletes the specified regional HealthCheckService.", + // "httpMethod": "DELETE", + // "id": "compute.regionHealthCheckServices.delete", // "parameterOrder": [ // "project", // "region", - // "backendService" + // "healthCheckService" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to patch.", + // "healthCheckService": { + // "description": "Name of the HealthCheckService to delete. The name must be 1-63 characters long, and comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -103495,10 +115691,7 @@ func (c *RegionBackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Oper // "type": "string" // } // }, - // "path": "{project}/regions/{region}/backendServices/{backendService}", - // "request": { - // "$ref": "BackendService" - // }, + // "path": "{project}/regions/{region}/healthCheckServices/{healthCheckService}", // "response": { // "$ref": "Operation" // }, @@ -103510,92 +115703,98 @@ func (c *RegionBackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Oper } -// method id "compute.regionBackendServices.testIamPermissions": +// method id "compute.regionHealthCheckServices.get": -type RegionBackendServicesTestIamPermissionsCall struct { - s *Service - project string - region string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionHealthCheckServicesGetCall struct { + s *Service + project string + region string + healthCheckService string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *RegionBackendServicesService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionBackendServicesTestIamPermissionsCall { - c := &RegionBackendServicesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified regional HealthCheckService resource. +func (r *RegionHealthCheckServicesService) Get(project string, region string, healthCheckService string) *RegionHealthCheckServicesGetCall { + c := &RegionHealthCheckServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.healthCheckService = healthCheckService return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionBackendServicesTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionBackendServicesTestIamPermissionsCall { +func (c *RegionHealthCheckServicesGetCall) Fields(s ...googleapi.Field) *RegionHealthCheckServicesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionHealthCheckServicesGetCall) IfNoneMatch(entityTag string) *RegionHealthCheckServicesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionBackendServicesTestIamPermissionsCall) Context(ctx context.Context) *RegionBackendServicesTestIamPermissionsCall { +func (c *RegionHealthCheckServicesGetCall) Context(ctx context.Context) *RegionHealthCheckServicesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionBackendServicesTestIamPermissionsCall) Header() http.Header { +func (c *RegionHealthCheckServicesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionBackendServicesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionHealthCheckServicesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthCheckServices/{healthCheckService}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, + "region": c.region, + "healthCheckService": c.healthCheckService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionBackendServices.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// Do executes the "compute.regionHealthCheckServices.get" call. +// Exactly one of *HealthCheckService or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// *HealthCheckService.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionBackendServicesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *RegionHealthCheckServicesGetCall) Do(opts ...googleapi.CallOption) (*HealthCheckService, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -103614,7 +115813,7 @@ func (c *RegionBackendServicesTestIamPermissionsCall) Do(opts ...googleapi.CallO if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &HealthCheckService{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -103626,15 +115825,21 @@ func (c *RegionBackendServicesTestIamPermissionsCall) Do(opts ...googleapi.CallO } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.regionBackendServices.testIamPermissions", + // "description": "Returns the specified regional HealthCheckService resource.", + // "httpMethod": "GET", + // "id": "compute.regionHealthCheckServices.get", // "parameterOrder": [ // "project", // "region", - // "resource" + // "healthCheckService" // ], // "parameters": { + // "healthCheckService": { + // "description": "Name of the HealthCheckService to update. The name must be 1-63 characters long, and comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -103643,26 +115848,16 @@ func (c *RegionBackendServicesTestIamPermissionsCall) Do(opts ...googleapi.CallO // "type": "string" // }, // "region": { - // "description": "The name of the region for this request.", + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" - // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/backendServices/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/regions/{region}/healthCheckServices/{healthCheckService}", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "HealthCheckService" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -103673,29 +115868,25 @@ func (c *RegionBackendServicesTestIamPermissionsCall) Do(opts ...googleapi.CallO } -// method id "compute.regionBackendServices.update": +// method id "compute.regionHealthCheckServices.insert": -type RegionBackendServicesUpdateCall struct { - s *Service - project string - region string - backendService string - backendservice *BackendService - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionHealthCheckServicesInsertCall struct { + s *Service + project string + region string + healthcheckservice *HealthCheckService + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Update: Updates the specified regional BackendService resource with -// the data included in the request. There are several restrictions and -// guidelines to keep in mind when updating a backend service. Read -// Restrictions and Guidelines for more information. -func (r *RegionBackendServicesService) Update(project string, region string, backendService string, backendservice *BackendService) *RegionBackendServicesUpdateCall { - c := &RegionBackendServicesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a regional HealthCheckService resource in the +// specified project and region using the data included in the request. +func (r *RegionHealthCheckServicesService) Insert(project string, region string, healthcheckservice *HealthCheckService) *RegionHealthCheckServicesInsertCall { + c := &RegionHealthCheckServicesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.backendService = backendService - c.backendservice = backendservice + c.healthcheckservice = healthcheckservice return c } @@ -103713,7 +115904,7 @@ func (r *RegionBackendServicesService) Update(project string, region string, bac // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionBackendServicesUpdateCall) RequestId(requestId string) *RegionBackendServicesUpdateCall { +func (c *RegionHealthCheckServicesInsertCall) RequestId(requestId string) *RegionHealthCheckServicesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -103721,7 +115912,7 @@ func (c *RegionBackendServicesUpdateCall) RequestId(requestId string) *RegionBac // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionBackendServicesUpdateCall) Fields(s ...googleapi.Field) *RegionBackendServicesUpdateCall { +func (c *RegionHealthCheckServicesInsertCall) Fields(s ...googleapi.Field) *RegionHealthCheckServicesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -103729,57 +115920,56 @@ func (c *RegionBackendServicesUpdateCall) Fields(s ...googleapi.Field) *RegionBa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionBackendServicesUpdateCall) Context(ctx context.Context) *RegionBackendServicesUpdateCall { +func (c *RegionHealthCheckServicesInsertCall) Context(ctx context.Context) *RegionHealthCheckServicesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionBackendServicesUpdateCall) Header() http.Header { +func (c *RegionHealthCheckServicesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionBackendServicesUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionHealthCheckServicesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheckservice) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthCheckServices") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "backendService": c.backendService, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionBackendServices.update" call. +// Do executes the "compute.regionHealthCheckServices.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionBackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionHealthCheckServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -103810,22 +116000,14 @@ func (c *RegionBackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Updates the specified regional BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information.", - // "httpMethod": "PUT", - // "id": "compute.regionBackendServices.update", + // "description": "Creates a regional HealthCheckService resource in the specified project and region using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.regionHealthCheckServices.insert", // "parameterOrder": [ // "project", - // "region", - // "backendService" + // "region" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to update.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -103846,9 +116028,9 @@ func (c *RegionBackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Ope // "type": "string" // } // }, - // "path": "{project}/regions/{region}/backendServices/{backendService}", + // "path": "{project}/regions/{region}/healthCheckServices", // "request": { - // "$ref": "BackendService" + // "$ref": "HealthCheckService" // }, // "response": { // "$ref": "Operation" @@ -103861,21 +116043,24 @@ func (c *RegionBackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.regionCommitments.aggregatedList": +// method id "compute.regionHealthCheckServices.list": -type RegionCommitmentsAggregatedListCall struct { +type RegionHealthCheckServicesListCall struct { s *Service project string + region string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// AggregatedList: Retrieves an aggregated list of commitments. -func (r *RegionCommitmentsService) AggregatedList(project string) *RegionCommitmentsAggregatedListCall { - c := &RegionCommitmentsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Lists all the HealthCheckService resources that have been +// configured for the specified project in the given region. +func (r *RegionHealthCheckServicesService) List(project string, region string) *RegionHealthCheckServicesListCall { + c := &RegionHealthCheckServicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.region = region return c } @@ -103901,7 +116086,7 @@ func (r *RegionCommitmentsService) AggregatedList(project string) *RegionCommitm // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *RegionCommitmentsAggregatedListCall) Filter(filter string) *RegionCommitmentsAggregatedListCall { +func (c *RegionHealthCheckServicesListCall) Filter(filter string) *RegionHealthCheckServicesListCall { c.urlParams_.Set("filter", filter) return c } @@ -103912,7 +116097,7 @@ func (c *RegionCommitmentsAggregatedListCall) Filter(filter string) *RegionCommi // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *RegionCommitmentsAggregatedListCall) MaxResults(maxResults int64) *RegionCommitmentsAggregatedListCall { +func (c *RegionHealthCheckServicesListCall) MaxResults(maxResults int64) *RegionHealthCheckServicesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -103929,7 +116114,7 @@ func (c *RegionCommitmentsAggregatedListCall) MaxResults(maxResults int64) *Regi // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *RegionCommitmentsAggregatedListCall) OrderBy(orderBy string) *RegionCommitmentsAggregatedListCall { +func (c *RegionHealthCheckServicesListCall) OrderBy(orderBy string) *RegionHealthCheckServicesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -103937,7 +116122,7 @@ func (c *RegionCommitmentsAggregatedListCall) OrderBy(orderBy string) *RegionCom // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *RegionCommitmentsAggregatedListCall) PageToken(pageToken string) *RegionCommitmentsAggregatedListCall { +func (c *RegionHealthCheckServicesListCall) PageToken(pageToken string) *RegionHealthCheckServicesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -103945,7 +116130,7 @@ func (c *RegionCommitmentsAggregatedListCall) PageToken(pageToken string) *Regio // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionCommitmentsAggregatedListCall) Fields(s ...googleapi.Field) *RegionCommitmentsAggregatedListCall { +func (c *RegionHealthCheckServicesListCall) Fields(s ...googleapi.Field) *RegionHealthCheckServicesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -103955,7 +116140,7 @@ func (c *RegionCommitmentsAggregatedListCall) Fields(s ...googleapi.Field) *Regi // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionCommitmentsAggregatedListCall) IfNoneMatch(entityTag string) *RegionCommitmentsAggregatedListCall { +func (c *RegionHealthCheckServicesListCall) IfNoneMatch(entityTag string) *RegionHealthCheckServicesListCall { c.ifNoneMatch_ = entityTag return c } @@ -103963,21 +116148,21 @@ func (c *RegionCommitmentsAggregatedListCall) IfNoneMatch(entityTag string) *Reg // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionCommitmentsAggregatedListCall) Context(ctx context.Context) *RegionCommitmentsAggregatedListCall { +func (c *RegionHealthCheckServicesListCall) Context(ctx context.Context) *RegionHealthCheckServicesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionCommitmentsAggregatedListCall) Header() http.Header { +func (c *RegionHealthCheckServicesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionCommitmentsAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionHealthCheckServicesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -103989,7 +116174,7 @@ func (c *RegionCommitmentsAggregatedListCall) doRequest(alt string) (*http.Respo var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/commitments") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthCheckServices") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -103998,18 +116183,19 @@ func (c *RegionCommitmentsAggregatedListCall) doRequest(alt string) (*http.Respo req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionCommitments.aggregatedList" call. -// Exactly one of *CommitmentAggregatedList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *CommitmentAggregatedList.ServerResponse.Header or (if a response was +// Do executes the "compute.regionHealthCheckServices.list" call. +// Exactly one of *HealthCheckServicesList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *HealthCheckServicesList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionCommitmentsAggregatedListCall) Do(opts ...googleapi.CallOption) (*CommitmentAggregatedList, error) { +func (c *RegionHealthCheckServicesListCall) Do(opts ...googleapi.CallOption) (*HealthCheckServicesList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -104028,7 +116214,7 @@ func (c *RegionCommitmentsAggregatedListCall) Do(opts ...googleapi.CallOption) ( if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &CommitmentAggregatedList{ + ret := &HealthCheckServicesList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -104040,11 +116226,12 @@ func (c *RegionCommitmentsAggregatedListCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Retrieves an aggregated list of commitments.", + // "description": "Lists all the HealthCheckService resources that have been configured for the specified project in the given region.", // "httpMethod": "GET", - // "id": "compute.regionCommitments.aggregatedList", + // "id": "compute.regionHealthCheckServices.list", // "parameterOrder": [ - // "project" + // "project", + // "region" // ], // "parameters": { // "filter": { @@ -104076,11 +116263,18 @@ func (c *RegionCommitmentsAggregatedListCall) Do(opts ...googleapi.CallOption) ( // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/aggregated/commitments", + // "path": "{project}/regions/{region}/healthCheckServices", // "response": { - // "$ref": "CommitmentAggregatedList" + // "$ref": "HealthCheckServicesList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -104094,7 +116288,7 @@ func (c *RegionCommitmentsAggregatedListCall) Do(opts ...googleapi.CallOption) ( // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *RegionCommitmentsAggregatedListCall) Pages(ctx context.Context, f func(*CommitmentAggregatedList) error) error { +func (c *RegionHealthCheckServicesListCall) Pages(ctx context.Context, f func(*HealthCheckServicesList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -104112,99 +116306,92 @@ func (c *RegionCommitmentsAggregatedListCall) Pages(ctx context.Context, f func( } } -// method id "compute.regionCommitments.get": +// method id "compute.regionHealthCheckServices.testIamPermissions": -type RegionCommitmentsGetCall struct { - s *Service - project string - region string - commitment string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionHealthCheckServicesTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified commitment resource. Gets a list of -// available commitments by making a list() request. -func (r *RegionCommitmentsService) Get(project string, region string, commitment string) *RegionCommitmentsGetCall { - c := &RegionCommitmentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *RegionHealthCheckServicesService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionHealthCheckServicesTestIamPermissionsCall { + c := &RegionHealthCheckServicesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.commitment = commitment + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionCommitmentsGetCall) Fields(s ...googleapi.Field) *RegionCommitmentsGetCall { +func (c *RegionHealthCheckServicesTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionHealthCheckServicesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionCommitmentsGetCall) IfNoneMatch(entityTag string) *RegionCommitmentsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionCommitmentsGetCall) Context(ctx context.Context) *RegionCommitmentsGetCall { +func (c *RegionHealthCheckServicesTestIamPermissionsCall) Context(ctx context.Context) *RegionHealthCheckServicesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionCommitmentsGetCall) Header() http.Header { +func (c *RegionHealthCheckServicesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionCommitmentsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionHealthCheckServicesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/commitments/{commitment}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthCheckServices/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "commitment": c.commitment, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionCommitments.get" call. -// Exactly one of *Commitment or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Commitment.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionCommitmentsGetCall) Do(opts ...googleapi.CallOption) (*Commitment, error) { +// Do executes the "compute.regionHealthCheckServices.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionHealthCheckServicesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -104223,7 +116410,7 @@ func (c *RegionCommitmentsGetCall) Do(opts ...googleapi.CallOption) (*Commitment if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Commitment{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -104235,22 +116422,15 @@ func (c *RegionCommitmentsGetCall) Do(opts ...googleapi.CallOption) (*Commitment } return ret, nil // { - // "description": "Returns the specified commitment resource. Gets a list of available commitments by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.regionCommitments.get", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.regionHealthCheckServices.testIamPermissions", // "parameterOrder": [ // "project", // "region", - // "commitment" + // "resource" // ], // "parameters": { - // "commitment": { - // "description": "Name of the commitment to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -104259,16 +116439,26 @@ func (c *RegionCommitmentsGetCall) Do(opts ...googleapi.CallOption) (*Commitment // "type": "string" // }, // "region": { - // "description": "Name of the region for this request.", + // "description": "The name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/commitments/{commitment}", + // "path": "{project}/regions/{region}/healthCheckServices/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "Commitment" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -104279,25 +116469,24 @@ func (c *RegionCommitmentsGetCall) Do(opts ...googleapi.CallOption) (*Commitment } -// method id "compute.regionCommitments.insert": +// method id "compute.regionHealthChecks.delete": -type RegionCommitmentsInsertCall struct { - s *Service - project string - region string - commitment *Commitment - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionHealthChecksDeleteCall struct { + s *Service + project string + region string + healthCheck string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a commitment in the specified project using the data -// included in the request. -func (r *RegionCommitmentsService) Insert(project string, region string, commitment *Commitment) *RegionCommitmentsInsertCall { - c := &RegionCommitmentsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified HealthCheck resource. +func (r *RegionHealthChecksService) Delete(project string, region string, healthCheck string) *RegionHealthChecksDeleteCall { + c := &RegionHealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.commitment = commitment + c.healthCheck = healthCheck return c } @@ -104315,7 +116504,7 @@ func (r *RegionCommitmentsService) Insert(project string, region string, commitm // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionCommitmentsInsertCall) RequestId(requestId string) *RegionCommitmentsInsertCall { +func (c *RegionHealthChecksDeleteCall) RequestId(requestId string) *RegionHealthChecksDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -104323,7 +116512,7 @@ func (c *RegionCommitmentsInsertCall) RequestId(requestId string) *RegionCommitm // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionCommitmentsInsertCall) Fields(s ...googleapi.Field) *RegionCommitmentsInsertCall { +func (c *RegionHealthChecksDeleteCall) Fields(s ...googleapi.Field) *RegionHealthChecksDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -104331,56 +116520,52 @@ func (c *RegionCommitmentsInsertCall) Fields(s ...googleapi.Field) *RegionCommit // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionCommitmentsInsertCall) Context(ctx context.Context) *RegionCommitmentsInsertCall { +func (c *RegionHealthChecksDeleteCall) Context(ctx context.Context) *RegionHealthChecksDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionCommitmentsInsertCall) Header() http.Header { +func (c *RegionHealthChecksDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionCommitmentsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.commitment) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/commitments") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "region": c.region, + "healthCheck": c.healthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionCommitments.insert" call. +// Do executes the "compute.regionHealthChecks.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionCommitmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -104411,261 +116596,20 @@ func (c *RegionCommitmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Creates a commitment in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.regionCommitments.insert", - // "parameterOrder": [ - // "project", - // "region" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/commitments", - // "request": { - // "$ref": "Commitment" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.regionCommitments.list": - -type RegionCommitmentsListCall struct { - s *Service - project string - region string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves a list of commitments contained within the specified -// region. -func (r *RegionCommitmentsService) List(project string, region string) *RegionCommitmentsListCall { - c := &RegionCommitmentsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *RegionCommitmentsListCall) Filter(filter string) *RegionCommitmentsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *RegionCommitmentsListCall) MaxResults(maxResults int64) *RegionCommitmentsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *RegionCommitmentsListCall) OrderBy(orderBy string) *RegionCommitmentsListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *RegionCommitmentsListCall) PageToken(pageToken string) *RegionCommitmentsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionCommitmentsListCall) Fields(s ...googleapi.Field) *RegionCommitmentsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionCommitmentsListCall) IfNoneMatch(entityTag string) *RegionCommitmentsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionCommitmentsListCall) Context(ctx context.Context) *RegionCommitmentsListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionCommitmentsListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionCommitmentsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/commitments") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionCommitments.list" call. -// Exactly one of *CommitmentList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *CommitmentList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionCommitmentsListCall) Do(opts ...googleapi.CallOption) (*CommitmentList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &CommitmentList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves a list of commitments contained within the specified region.", - // "httpMethod": "GET", - // "id": "compute.regionCommitments.list", - // "parameterOrder": [ - // "project", - // "region" - // ], - // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "description": "Deletes the specified HealthCheck resource.", + // "httpMethod": "DELETE", + // "id": "compute.regionHealthChecks.delete", + // "parameterOrder": [ + // "project", + // "region", + // "healthCheck" + // ], + // "parameters": { + // "healthCheck": { + // "description": "Name of the HealthCheck resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "project": { @@ -104676,133 +116620,123 @@ func (c *RegionCommitmentsListCall) Do(opts ...googleapi.CallOption) (*Commitmen // "type": "string" // }, // "region": { - // "description": "Name of the region for this request.", + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/commitments", + // "path": "{project}/regions/{region}/healthChecks/{healthCheck}", // "response": { - // "$ref": "CommitmentList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RegionCommitmentsListCall) Pages(ctx context.Context, f func(*CommitmentList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.regionCommitments.testIamPermissions": +// method id "compute.regionHealthChecks.get": -type RegionCommitmentsTestIamPermissionsCall struct { - s *Service - project string - region string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionHealthChecksGetCall struct { + s *Service + project string + region string + healthCheck string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *RegionCommitmentsService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionCommitmentsTestIamPermissionsCall { - c := &RegionCommitmentsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified HealthCheck resource. Gets a list of +// available health checks by making a list() request. +func (r *RegionHealthChecksService) Get(project string, region string, healthCheck string) *RegionHealthChecksGetCall { + c := &RegionHealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.healthCheck = healthCheck return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionCommitmentsTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionCommitmentsTestIamPermissionsCall { +func (c *RegionHealthChecksGetCall) Fields(s ...googleapi.Field) *RegionHealthChecksGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionHealthChecksGetCall) IfNoneMatch(entityTag string) *RegionHealthChecksGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionCommitmentsTestIamPermissionsCall) Context(ctx context.Context) *RegionCommitmentsTestIamPermissionsCall { +func (c *RegionHealthChecksGetCall) Context(ctx context.Context) *RegionHealthChecksGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionCommitmentsTestIamPermissionsCall) Header() http.Header { +func (c *RegionHealthChecksGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionCommitmentsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/commitments/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, + "region": c.region, + "healthCheck": c.healthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionCommitments.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionCommitmentsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.regionHealthChecks.get" call. +// Exactly one of *HealthCheck or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *HealthCheck.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -104821,7 +116755,7 @@ func (c *RegionCommitmentsTestIamPermissionsCall) Do(opts ...googleapi.CallOptio if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &HealthCheck{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -104833,15 +116767,22 @@ func (c *RegionCommitmentsTestIamPermissionsCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.regionCommitments.testIamPermissions", + // "description": "Returns the specified HealthCheck resource. Gets a list of available health checks by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.regionHealthChecks.get", // "parameterOrder": [ // "project", // "region", - // "resource" + // "healthCheck" // ], // "parameters": { + // "healthCheck": { + // "description": "Name of the HealthCheck resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -104850,26 +116791,16 @@ func (c *RegionCommitmentsTestIamPermissionsCall) Do(opts ...googleapi.CallOptio // "type": "string" // }, // "region": { - // "description": "The name of the region for this request.", + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" - // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/commitments/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/regions/{region}/healthChecks/{healthCheck}", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "HealthCheck" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -104880,99 +116811,108 @@ func (c *RegionCommitmentsTestIamPermissionsCall) Do(opts ...googleapi.CallOptio } -// method id "compute.regionDiskTypes.get": +// method id "compute.regionHealthChecks.insert": -type RegionDiskTypesGetCall struct { - s *Service - project string - region string - diskType string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionHealthChecksInsertCall struct { + s *Service + project string + region string + healthcheck *HealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified regional disk type. Gets a list of -// available disk types by making a list() request. -func (r *RegionDiskTypesService) Get(project string, region string, diskType string) *RegionDiskTypesGetCall { - c := &RegionDiskTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a HealthCheck resource in the specified project using +// the data included in the request. +func (r *RegionHealthChecksService) Insert(project string, region string, healthcheck *HealthCheck) *RegionHealthChecksInsertCall { + c := &RegionHealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.diskType = diskType + c.healthcheck = healthcheck + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionHealthChecksInsertCall) RequestId(requestId string) *RegionHealthChecksInsertCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionDiskTypesGetCall) Fields(s ...googleapi.Field) *RegionDiskTypesGetCall { +func (c *RegionHealthChecksInsertCall) Fields(s ...googleapi.Field) *RegionHealthChecksInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionDiskTypesGetCall) IfNoneMatch(entityTag string) *RegionDiskTypesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionDiskTypesGetCall) Context(ctx context.Context) *RegionDiskTypesGetCall { +func (c *RegionHealthChecksInsertCall) Context(ctx context.Context) *RegionHealthChecksInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionDiskTypesGetCall) Header() http.Header { +func (c *RegionHealthChecksInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionDiskTypesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/diskTypes/{diskType}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthChecks") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "diskType": c.diskType, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionDiskTypes.get" call. -// Exactly one of *DiskType or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *DiskType.ServerResponse.Header or (if a response was returned at +// Do executes the "compute.regionHealthChecks.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionDiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { +func (c *RegionHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -104991,7 +116931,7 @@ func (c *RegionDiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, er if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &DiskType{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -105003,22 +116943,14 @@ func (c *RegionDiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, er } return ret, nil // { - // "description": "Returns the specified regional disk type. Gets a list of available disk types by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.regionDiskTypes.get", + // "description": "Creates a HealthCheck resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.regionHealthChecks.insert", // "parameterOrder": [ // "project", - // "region", - // "diskType" + // "region" // ], // "parameters": { - // "diskType": { - // "description": "Name of the disk type to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -105027,29 +116959,36 @@ func (c *RegionDiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, er // "type": "string" // }, // "region": { - // "description": "The name of the region for this request.", + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/diskTypes/{diskType}", + // "path": "{project}/regions/{region}/healthChecks", + // "request": { + // "$ref": "HealthCheck" + // }, // "response": { - // "$ref": "DiskType" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.regionDiskTypes.list": +// method id "compute.regionHealthChecks.list": -type RegionDiskTypesListCall struct { +type RegionHealthChecksListCall struct { s *Service project string region string @@ -105059,10 +116998,10 @@ type RegionDiskTypesListCall struct { header_ http.Header } -// List: Retrieves a list of regional disk types available to the +// List: Retrieves the list of HealthCheck resources available to the // specified project. -func (r *RegionDiskTypesService) List(project string, region string) *RegionDiskTypesListCall { - c := &RegionDiskTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *RegionHealthChecksService) List(project string, region string) *RegionHealthChecksListCall { + c := &RegionHealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region return c @@ -105090,7 +117029,7 @@ func (r *RegionDiskTypesService) List(project string, region string) *RegionDisk // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *RegionDiskTypesListCall) Filter(filter string) *RegionDiskTypesListCall { +func (c *RegionHealthChecksListCall) Filter(filter string) *RegionHealthChecksListCall { c.urlParams_.Set("filter", filter) return c } @@ -105101,7 +117040,7 @@ func (c *RegionDiskTypesListCall) Filter(filter string) *RegionDiskTypesListCall // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *RegionDiskTypesListCall) MaxResults(maxResults int64) *RegionDiskTypesListCall { +func (c *RegionHealthChecksListCall) MaxResults(maxResults int64) *RegionHealthChecksListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -105118,7 +117057,7 @@ func (c *RegionDiskTypesListCall) MaxResults(maxResults int64) *RegionDiskTypesL // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *RegionDiskTypesListCall) OrderBy(orderBy string) *RegionDiskTypesListCall { +func (c *RegionHealthChecksListCall) OrderBy(orderBy string) *RegionHealthChecksListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -105126,7 +117065,7 @@ func (c *RegionDiskTypesListCall) OrderBy(orderBy string) *RegionDiskTypesListCa // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *RegionDiskTypesListCall) PageToken(pageToken string) *RegionDiskTypesListCall { +func (c *RegionHealthChecksListCall) PageToken(pageToken string) *RegionHealthChecksListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -105134,7 +117073,7 @@ func (c *RegionDiskTypesListCall) PageToken(pageToken string) *RegionDiskTypesLi // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionDiskTypesListCall) Fields(s ...googleapi.Field) *RegionDiskTypesListCall { +func (c *RegionHealthChecksListCall) Fields(s ...googleapi.Field) *RegionHealthChecksListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -105144,7 +117083,7 @@ func (c *RegionDiskTypesListCall) Fields(s ...googleapi.Field) *RegionDiskTypesL // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionDiskTypesListCall) IfNoneMatch(entityTag string) *RegionDiskTypesListCall { +func (c *RegionHealthChecksListCall) IfNoneMatch(entityTag string) *RegionHealthChecksListCall { c.ifNoneMatch_ = entityTag return c } @@ -105152,21 +117091,21 @@ func (c *RegionDiskTypesListCall) IfNoneMatch(entityTag string) *RegionDiskTypes // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionDiskTypesListCall) Context(ctx context.Context) *RegionDiskTypesListCall { +func (c *RegionHealthChecksListCall) Context(ctx context.Context) *RegionHealthChecksListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionDiskTypesListCall) Header() http.Header { +func (c *RegionHealthChecksListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionDiskTypesListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionHealthChecksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -105178,7 +117117,7 @@ func (c *RegionDiskTypesListCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/diskTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthChecks") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -105192,14 +117131,14 @@ func (c *RegionDiskTypesListCall) doRequest(alt string) (*http.Response, error) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionDiskTypes.list" call. -// Exactly one of *RegionDiskTypeList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *RegionDiskTypeList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.regionHealthChecks.list" call. +// Exactly one of *HealthCheckList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *HealthCheckList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionDiskTypesListCall) Do(opts ...googleapi.CallOption) (*RegionDiskTypeList, error) { +func (c *RegionHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -105218,7 +117157,7 @@ func (c *RegionDiskTypesListCall) Do(opts ...googleapi.CallOption) (*RegionDiskT if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &RegionDiskTypeList{ + ret := &HealthCheckList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -105230,9 +117169,9 @@ func (c *RegionDiskTypesListCall) Do(opts ...googleapi.CallOption) (*RegionDiskT } return ret, nil // { - // "description": "Retrieves a list of regional disk types available to the specified project.", + // "description": "Retrieves the list of HealthCheck resources available to the specified project.", // "httpMethod": "GET", - // "id": "compute.regionDiskTypes.list", + // "id": "compute.regionHealthChecks.list", // "parameterOrder": [ // "project", // "region" @@ -105269,16 +117208,16 @@ func (c *RegionDiskTypesListCall) Do(opts ...googleapi.CallOption) (*RegionDiskT // "type": "string" // }, // "region": { - // "description": "The name of the region for this request.", + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/diskTypes", + // "path": "{project}/regions/{region}/healthChecks", // "response": { - // "$ref": "RegionDiskTypeList" + // "$ref": "HealthCheckList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -105292,7 +117231,7 @@ func (c *RegionDiskTypesListCall) Do(opts ...googleapi.CallOption) (*RegionDiskT // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *RegionDiskTypesListCall) Pages(ctx context.Context, f func(*RegionDiskTypeList) error) error { +func (c *RegionHealthChecksListCall) Pages(ctx context.Context, f func(*HealthCheckList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -105310,28 +117249,28 @@ func (c *RegionDiskTypesListCall) Pages(ctx context.Context, f func(*RegionDiskT } } -// method id "compute.regionDisks.addResourcePolicies": +// method id "compute.regionHealthChecks.patch": -type RegionDisksAddResourcePoliciesCall struct { - s *Service - project string - region string - disk string - regiondisksaddresourcepoliciesrequest *RegionDisksAddResourcePoliciesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionHealthChecksPatchCall struct { + s *Service + project string + region string + healthCheck string + healthcheck *HealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AddResourcePolicies: Adds existing resource policies to a regional -// disk. You can only add one policy which will be applied to this disk -// for scheduling snapshot creation. -func (r *RegionDisksService) AddResourcePolicies(project string, region string, disk string, regiondisksaddresourcepoliciesrequest *RegionDisksAddResourcePoliciesRequest) *RegionDisksAddResourcePoliciesCall { - c := &RegionDisksAddResourcePoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates a HealthCheck resource in the specified project using +// the data included in the request. This method supports PATCH +// semantics and uses the JSON merge patch format and processing rules. +func (r *RegionHealthChecksService) Patch(project string, region string, healthCheck string, healthcheck *HealthCheck) *RegionHealthChecksPatchCall { + c := &RegionHealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.disk = disk - c.regiondisksaddresourcepoliciesrequest = regiondisksaddresourcepoliciesrequest + c.healthCheck = healthCheck + c.healthcheck = healthcheck return c } @@ -105349,7 +117288,7 @@ func (r *RegionDisksService) AddResourcePolicies(project string, region string, // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionDisksAddResourcePoliciesCall) RequestId(requestId string) *RegionDisksAddResourcePoliciesCall { +func (c *RegionHealthChecksPatchCall) RequestId(requestId string) *RegionHealthChecksPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -105357,7 +117296,7 @@ func (c *RegionDisksAddResourcePoliciesCall) RequestId(requestId string) *Region // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionDisksAddResourcePoliciesCall) Fields(s ...googleapi.Field) *RegionDisksAddResourcePoliciesCall { +func (c *RegionHealthChecksPatchCall) Fields(s ...googleapi.Field) *RegionHealthChecksPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -105365,57 +117304,57 @@ func (c *RegionDisksAddResourcePoliciesCall) Fields(s ...googleapi.Field) *Regio // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionDisksAddResourcePoliciesCall) Context(ctx context.Context) *RegionDisksAddResourcePoliciesCall { +func (c *RegionHealthChecksPatchCall) Context(ctx context.Context) *RegionHealthChecksPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionDisksAddResourcePoliciesCall) Header() http.Header { +func (c *RegionHealthChecksPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionDisksAddResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regiondisksaddresourcepoliciesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{disk}/addResourcePolicies") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "disk": c.disk, + "project": c.project, + "region": c.region, + "healthCheck": c.healthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionDisks.addResourcePolicies" call. +// Do executes the "compute.regionHealthChecks.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionDisksAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -105446,17 +117385,17 @@ func (c *RegionDisksAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Adds existing resource policies to a regional disk. You can only add one policy which will be applied to this disk for scheduling snapshot creation.", - // "httpMethod": "POST", - // "id": "compute.regionDisks.addResourcePolicies", + // "description": "Updates a HealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.regionHealthChecks.patch", // "parameterOrder": [ // "project", // "region", - // "disk" + // "healthCheck" // ], // "parameters": { - // "disk": { - // "description": "The disk name for this request.", + // "healthCheck": { + // "description": "Name of the HealthCheck resource to patch.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -105470,7 +117409,7 @@ func (c *RegionDisksAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (* // "type": "string" // }, // "region": { - // "description": "The name of the region for this request.", + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -105482,9 +117421,9 @@ func (c *RegionDisksAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (* // "type": "string" // } // }, - // "path": "{project}/regions/{region}/disks/{disk}/addResourcePolicies", + // "path": "{project}/regions/{region}/healthChecks/{healthCheck}", // "request": { - // "$ref": "RegionDisksAddResourcePoliciesRequest" + // "$ref": "HealthCheck" // }, // "response": { // "$ref": "Operation" @@ -105497,58 +117436,34 @@ func (c *RegionDisksAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (* } -// method id "compute.regionDisks.createSnapshot": +// method id "compute.regionHealthChecks.testIamPermissions": -type RegionDisksCreateSnapshotCall struct { - s *Service - project string - region string - disk string - snapshot *Snapshot - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionHealthChecksTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// CreateSnapshot: Creates a snapshot of this regional disk. -func (r *RegionDisksService) CreateSnapshot(project string, region string, disk string, snapshot *Snapshot) *RegionDisksCreateSnapshotCall { - c := &RegionDisksCreateSnapshotCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *RegionHealthChecksService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionHealthChecksTestIamPermissionsCall { + c := &RegionHealthChecksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.disk = disk - c.snapshot = snapshot - return c -} - -// GuestFlush sets the optional parameter "guestFlush": -func (c *RegionDisksCreateSnapshotCall) GuestFlush(guestFlush bool) *RegionDisksCreateSnapshotCall { - c.urlParams_.Set("guestFlush", fmt.Sprint(guestFlush)) - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionDisksCreateSnapshotCall) RequestId(requestId string) *RegionDisksCreateSnapshotCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionDisksCreateSnapshotCall) Fields(s ...googleapi.Field) *RegionDisksCreateSnapshotCall { +func (c *RegionHealthChecksTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionHealthChecksTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -105556,35 +117471,35 @@ func (c *RegionDisksCreateSnapshotCall) Fields(s ...googleapi.Field) *RegionDisk // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionDisksCreateSnapshotCall) Context(ctx context.Context) *RegionDisksCreateSnapshotCall { +func (c *RegionHealthChecksTestIamPermissionsCall) Context(ctx context.Context) *RegionHealthChecksTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionDisksCreateSnapshotCall) Header() http.Header { +func (c *RegionHealthChecksTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionDisksCreateSnapshotCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionHealthChecksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.snapshot) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{disk}/createSnapshot") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthChecks/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -105592,21 +117507,21 @@ func (c *RegionDisksCreateSnapshotCall) doRequest(alt string) (*http.Response, e } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "disk": c.disk, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionDisks.createSnapshot" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionDisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regionHealthChecks.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionHealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -105625,7 +117540,7 @@ func (c *RegionDisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Opera if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -105637,26 +117552,15 @@ func (c *RegionDisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Opera } return ret, nil // { - // "description": "Creates a snapshot of this regional disk.", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.regionDisks.createSnapshot", + // "id": "compute.regionHealthChecks.testIamPermissions", // "parameterOrder": [ // "project", // "region", - // "disk" + // "resource" // ], // "parameters": { - // "disk": { - // "description": "Name of the regional persistent disk to snapshot.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "guestFlush": { - // "location": "query", - // "type": "boolean" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -105665,54 +117569,57 @@ func (c *RegionDisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Opera // "type": "string" // }, // "region": { - // "description": "Name of the region for this request.", + // "description": "The name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/disks/{disk}/createSnapshot", + // "path": "{project}/regions/{region}/healthChecks/{resource}/testIamPermissions", // "request": { - // "$ref": "Snapshot" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionDisks.delete": +// method id "compute.regionHealthChecks.update": -type RegionDisksDeleteCall struct { - s *Service - project string - region string - disk string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionHealthChecksUpdateCall struct { + s *Service + project string + region string + healthCheck string + healthcheck *HealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified regional persistent disk. Deleting a -// regional disk removes all the replicas of its data permanently and is -// irreversible. However, deleting a disk does not delete any snapshots -// previously made from the disk. You must separately delete snapshots. -func (r *RegionDisksService) Delete(project string, region string, disk string) *RegionDisksDeleteCall { - c := &RegionDisksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates a HealthCheck resource in the specified project using +// the data included in the request. +func (r *RegionHealthChecksService) Update(project string, region string, healthCheck string, healthcheck *HealthCheck) *RegionHealthChecksUpdateCall { + c := &RegionHealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.disk = disk + c.healthCheck = healthCheck + c.healthcheck = healthcheck return c } @@ -105730,7 +117637,7 @@ func (r *RegionDisksService) Delete(project string, region string, disk string) // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionDisksDeleteCall) RequestId(requestId string) *RegionDisksDeleteCall { +func (c *RegionHealthChecksUpdateCall) RequestId(requestId string) *RegionHealthChecksUpdateCall { c.urlParams_.Set("requestId", requestId) return c } @@ -105738,7 +117645,7 @@ func (c *RegionDisksDeleteCall) RequestId(requestId string) *RegionDisksDeleteCa // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionDisksDeleteCall) Fields(s ...googleapi.Field) *RegionDisksDeleteCall { +func (c *RegionHealthChecksUpdateCall) Fields(s ...googleapi.Field) *RegionHealthChecksUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -105746,52 +117653,57 @@ func (c *RegionDisksDeleteCall) Fields(s ...googleapi.Field) *RegionDisksDeleteC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionDisksDeleteCall) Context(ctx context.Context) *RegionDisksDeleteCall { +func (c *RegionHealthChecksUpdateCall) Context(ctx context.Context) *RegionHealthChecksUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionDisksDeleteCall) Header() http.Header { +func (c *RegionHealthChecksUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionDisksDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{disk}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "disk": c.disk, + "project": c.project, + "region": c.region, + "healthCheck": c.healthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionDisks.delete" call. +// Do executes the "compute.regionHealthChecks.update" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionDisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -105822,18 +117734,19 @@ func (c *RegionDisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Deletes the specified regional persistent disk. Deleting a regional disk removes all the replicas of its data permanently and is irreversible. However, deleting a disk does not delete any snapshots previously made from the disk. You must separately delete snapshots.", - // "httpMethod": "DELETE", - // "id": "compute.regionDisks.delete", + // "description": "Updates a HealthCheck resource in the specified project using the data included in the request.", + // "httpMethod": "PUT", + // "id": "compute.regionHealthChecks.update", // "parameterOrder": [ // "project", // "region", - // "disk" + // "healthCheck" // ], // "parameters": { - // "disk": { - // "description": "Name of the regional persistent disk to delete.", + // "healthCheck": { + // "description": "Name of the HealthCheck resource to update.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -105845,7 +117758,7 @@ func (c *RegionDisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er // "type": "string" // }, // "region": { - // "description": "Name of the region for this request.", + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -105857,7 +117770,10 @@ func (c *RegionDisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er // "type": "string" // } // }, - // "path": "{project}/regions/{region}/disks/{disk}", + // "path": "{project}/regions/{region}/healthChecks/{healthCheck}", + // "request": { + // "$ref": "HealthCheck" + // }, // "response": { // "$ref": "Operation" // }, @@ -105869,98 +117785,126 @@ func (c *RegionDisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er } -// method id "compute.regionDisks.get": +// method id "compute.regionInstanceGroupManagers.abandonInstances": -type RegionDisksGetCall struct { - s *Service - project string - region string - disk string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersAbandonInstancesCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagersabandoninstancesrequest *RegionInstanceGroupManagersAbandonInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns a specified regional persistent disk. -func (r *RegionDisksService) Get(project string, region string, disk string) *RegionDisksGetCall { - c := &RegionDisksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AbandonInstances: Flags the specified instances to be immediately +// removed from the managed instance group. Abandoning an instance does +// not delete the instance, but it does remove the instance from any +// target pools that are applied by the managed instance group. This +// method reduces the targetSize of the managed instance group by the +// number of instances that you abandon. This operation is marked as +// DONE when the action is scheduled even if the instances have not yet +// been removed from the group. You must separately verify the status of +// the abandoning action with the listmanagedinstances method. +// +// If the group is part of a backend service that has enabled connection +// draining, it can take up to 60 seconds after the connection draining +// duration has elapsed before the VM instance is removed or +// deleted. +// +// You can specify a maximum of 1000 instances with this method per +// request. +func (r *RegionInstanceGroupManagersService) AbandonInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersabandoninstancesrequest *RegionInstanceGroupManagersAbandonInstancesRequest) *RegionInstanceGroupManagersAbandonInstancesCall { + c := &RegionInstanceGroupManagersAbandonInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.disk = disk + c.instanceGroupManager = instanceGroupManager + c.regioninstancegroupmanagersabandoninstancesrequest = regioninstancegroupmanagersabandoninstancesrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionInstanceGroupManagersAbandonInstancesCall) RequestId(requestId string) *RegionInstanceGroupManagersAbandonInstancesCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionDisksGetCall) Fields(s ...googleapi.Field) *RegionDisksGetCall { +func (c *RegionInstanceGroupManagersAbandonInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersAbandonInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionDisksGetCall) IfNoneMatch(entityTag string) *RegionDisksGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionDisksGetCall) Context(ctx context.Context) *RegionDisksGetCall { +func (c *RegionInstanceGroupManagersAbandonInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersAbandonInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionDisksGetCall) Header() http.Header { +func (c *RegionInstanceGroupManagersAbandonInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionDisksGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersAbandonInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersabandoninstancesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{disk}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/abandonInstances") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "disk": c.disk, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionDisks.get" call. -// Exactly one of *Disk or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Disk.ServerResponse.Header or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *RegionDisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { +// Do executes the "compute.regionInstanceGroupManagers.abandonInstances" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionInstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -105979,7 +117923,7 @@ func (c *RegionDisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Disk{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -105991,19 +117935,18 @@ func (c *RegionDisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { } return ret, nil // { - // "description": "Returns a specified regional persistent disk.", - // "httpMethod": "GET", - // "id": "compute.regionDisks.get", + // "description": "Flags the specified instances to be immediately removed from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroupManagers.abandonInstances", // "parameterOrder": [ // "project", // "region", - // "disk" + // "instanceGroupManager" // ], // "parameters": { - // "disk": { - // "description": "Name of the regional persistent disk to return.", + // "instanceGroupManager": { + // "description": "Name of the managed instance group.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -106015,78 +117958,60 @@ func (c *RegionDisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { // "type": "string" // }, // "region": { - // "description": "Name of the region for this request.", + // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/disks/{disk}", + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/abandonInstances", + // "request": { + // "$ref": "RegionInstanceGroupManagersAbandonInstancesRequest" + // }, // "response": { - // "$ref": "Disk" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.regionDisks.insert": +// method id "compute.regionInstanceGroupManagers.applyUpdatesToInstances": -type RegionDisksInsertCall struct { - s *Service - project string - region string - disk *Disk - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersApplyUpdatesToInstancesCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagersapplyupdatesrequest *RegionInstanceGroupManagersApplyUpdatesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a persistent regional disk in the specified project -// using the data included in the request. -func (r *RegionDisksService) Insert(project string, region string, disk *Disk) *RegionDisksInsertCall { - c := &RegionDisksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ApplyUpdatesToInstances: Apply updates to selected instances the +// managed instance group. +func (r *RegionInstanceGroupManagersService) ApplyUpdatesToInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersapplyupdatesrequest *RegionInstanceGroupManagersApplyUpdatesRequest) *RegionInstanceGroupManagersApplyUpdatesToInstancesCall { + c := &RegionInstanceGroupManagersApplyUpdatesToInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.disk = disk - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionDisksInsertCall) RequestId(requestId string) *RegionDisksInsertCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// SourceImage sets the optional parameter "sourceImage": Source image -// to restore onto a disk. -func (c *RegionDisksInsertCall) SourceImage(sourceImage string) *RegionDisksInsertCall { - c.urlParams_.Set("sourceImage", sourceImage) + c.instanceGroupManager = instanceGroupManager + c.regioninstancegroupmanagersapplyupdatesrequest = regioninstancegroupmanagersapplyupdatesrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionDisksInsertCall) Fields(s ...googleapi.Field) *RegionDisksInsertCall { +func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersApplyUpdatesToInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -106094,35 +118019,35 @@ func (c *RegionDisksInsertCall) Fields(s ...googleapi.Field) *RegionDisksInsertC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionDisksInsertCall) Context(ctx context.Context) *RegionDisksInsertCall { +func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersApplyUpdatesToInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionDisksInsertCall) Header() http.Header { +func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionDisksInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.disk) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersapplyupdatesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/applyUpdatesToInstances") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -106130,20 +118055,21 @@ func (c *RegionDisksInsertCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionDisks.insert" call. +// Do executes the "compute.regionInstanceGroupManagers.applyUpdatesToInstances" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionDisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -106174,14 +118100,21 @@ func (c *RegionDisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Creates a persistent regional disk in the specified project using the data included in the request.", + // "description": "Apply updates to selected instances the managed instance group.", // "httpMethod": "POST", - // "id": "compute.regionDisks.insert", + // "id": "compute.regionInstanceGroupManagers.applyUpdatesToInstances", // "parameterOrder": [ // "project", - // "region" + // "region", + // "instanceGroupManager" // ], // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group, should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -106190,26 +118123,15 @@ func (c *RegionDisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er // "type": "string" // }, // "region": { - // "description": "Name of the region for this request.", + // "description": "Name of the region scoping this request, should conform to RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "sourceImage": { - // "description": "Optional. Source image to restore onto a disk.", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/disks", + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/applyUpdatesToInstances", // "request": { - // "$ref": "Disk" + // "$ref": "RegionInstanceGroupManagersApplyUpdatesRequest" // }, // "response": { // "$ref": "Operation" @@ -106222,159 +118144,114 @@ func (c *RegionDisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er } -// method id "compute.regionDisks.list": +// method id "compute.regionInstanceGroupManagers.createInstances": -type RegionDisksListCall struct { - s *Service - project string - region string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersCreateInstancesCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagerscreateinstancesrequest *RegionInstanceGroupManagersCreateInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves the list of persistent disks contained within the -// specified region. -func (r *RegionDisksService) List(project string, region string) *RegionDisksListCall { - c := &RegionDisksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// CreateInstances: Creates instances with per-instance configs in this +// regional managed instance group. Instances are created using the +// current instance template. The create instances operation is marked +// DONE if the createInstances request is successful. The underlying +// actions take additional time. You must separately verify the status +// of the creating or actions with the listmanagedinstances method. +func (r *RegionInstanceGroupManagersService) CreateInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagerscreateinstancesrequest *RegionInstanceGroupManagersCreateInstancesRequest) *RegionInstanceGroupManagersCreateInstancesCall { + c := &RegionInstanceGroupManagersCreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region + c.instanceGroupManager = instanceGroupManager + c.regioninstancegroupmanagerscreateinstancesrequest = regioninstancegroupmanagerscreateinstancesrequest return c } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *RegionDisksListCall) Filter(filter string) *RegionDisksListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *RegionDisksListCall) MaxResults(maxResults int64) *RegionDisksListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *RegionDisksListCall) OrderBy(orderBy string) *RegionDisksListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *RegionDisksListCall) PageToken(pageToken string) *RegionDisksListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionInstanceGroupManagersCreateInstancesCall) RequestId(requestId string) *RegionInstanceGroupManagersCreateInstancesCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionDisksListCall) Fields(s ...googleapi.Field) *RegionDisksListCall { +func (c *RegionInstanceGroupManagersCreateInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersCreateInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionDisksListCall) IfNoneMatch(entityTag string) *RegionDisksListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionDisksListCall) Context(ctx context.Context) *RegionDisksListCall { +func (c *RegionInstanceGroupManagersCreateInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersCreateInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionDisksListCall) Header() http.Header { +func (c *RegionInstanceGroupManagersCreateInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionDisksListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersCreateInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerscreateinstancesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/createInstances") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionDisks.list" call. -// Exactly one of *DiskList or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *DiskList.ServerResponse.Header or (if a response was returned at +// Do executes the "compute.regionInstanceGroupManagers.createInstances" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionDisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { +func (c *RegionInstanceGroupManagersCreateInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -106393,7 +118270,7 @@ func (c *RegionDisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &DiskList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -106403,37 +118280,21 @@ func (c *RegionDisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } - return ret, nil - // { - // "description": "Retrieves the list of persistent disks contained within the specified region.", - // "httpMethod": "GET", - // "id": "compute.regionDisks.list", + return ret, nil + // { + // "description": "Creates instances with per-instance configs in this regional managed instance group. Instances are created using the current instance template. The create instances operation is marked DONE if the createInstances request is successful. The underlying actions take additional time. You must separately verify the status of the creating or actions with the listmanagedinstances method.", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroupManagers.createInstances", // "parameterOrder": [ // "project", - // "region" + // "region", + // "instanceGroupManager" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "instanceGroupManager": { + // "description": "The name of the managed instance group. It should conform to RFC1035.", + // "location": "path", + // "required": true, // "type": "string" // }, // "project": { @@ -106444,68 +118305,51 @@ func (c *RegionDisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error // "type": "string" // }, // "region": { - // "description": "Name of the region for this request.", + // "description": "The name of the region where the managed instance group is located. It should conform to RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/disks", + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/createInstances", + // "request": { + // "$ref": "RegionInstanceGroupManagersCreateInstancesRequest" + // }, // "response": { - // "$ref": "DiskList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RegionDisksListCall) Pages(ctx context.Context, f func(*DiskList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.regionDisks.removeResourcePolicies": +// method id "compute.regionInstanceGroupManagers.delete": -type RegionDisksRemoveResourcePoliciesCall struct { - s *Service - project string - region string - disk string - regiondisksremoveresourcepoliciesrequest *RegionDisksRemoveResourcePoliciesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersDeleteCall struct { + s *Service + project string + region string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// RemoveResourcePolicies: Removes resource policies from a regional -// disk. -func (r *RegionDisksService) RemoveResourcePolicies(project string, region string, disk string, regiondisksremoveresourcepoliciesrequest *RegionDisksRemoveResourcePoliciesRequest) *RegionDisksRemoveResourcePoliciesCall { - c := &RegionDisksRemoveResourcePoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified managed instance group and all of the +// instances in that group. +func (r *RegionInstanceGroupManagersService) Delete(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersDeleteCall { + c := &RegionInstanceGroupManagersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.disk = disk - c.regiondisksremoveresourcepoliciesrequest = regiondisksremoveresourcepoliciesrequest + c.instanceGroupManager = instanceGroupManager return c } @@ -106523,7 +118367,7 @@ func (r *RegionDisksService) RemoveResourcePolicies(project string, region strin // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionDisksRemoveResourcePoliciesCall) RequestId(requestId string) *RegionDisksRemoveResourcePoliciesCall { +func (c *RegionInstanceGroupManagersDeleteCall) RequestId(requestId string) *RegionInstanceGroupManagersDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -106531,7 +118375,7 @@ func (c *RegionDisksRemoveResourcePoliciesCall) RequestId(requestId string) *Reg // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionDisksRemoveResourcePoliciesCall) Fields(s ...googleapi.Field) *RegionDisksRemoveResourcePoliciesCall { +func (c *RegionInstanceGroupManagersDeleteCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -106539,57 +118383,52 @@ func (c *RegionDisksRemoveResourcePoliciesCall) Fields(s ...googleapi.Field) *Re // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionDisksRemoveResourcePoliciesCall) Context(ctx context.Context) *RegionDisksRemoveResourcePoliciesCall { +func (c *RegionInstanceGroupManagersDeleteCall) Context(ctx context.Context) *RegionInstanceGroupManagersDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionDisksRemoveResourcePoliciesCall) Header() http.Header { +func (c *RegionInstanceGroupManagersDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionDisksRemoveResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regiondisksremoveresourcepoliciesrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{disk}/removeResourcePolicies") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "disk": c.disk, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionDisks.removeResourcePolicies" call. +// Do executes the "compute.regionInstanceGroupManagers.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionDisksRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -106620,19 +118459,18 @@ func (c *RegionDisksRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Removes resource policies from a regional disk.", - // "httpMethod": "POST", - // "id": "compute.regionDisks.removeResourcePolicies", + // "description": "Deletes the specified managed instance group and all of the instances in that group.", + // "httpMethod": "DELETE", + // "id": "compute.regionInstanceGroupManagers.delete", // "parameterOrder": [ // "project", // "region", - // "disk" + // "instanceGroupManager" // ], // "parameters": { - // "disk": { - // "description": "The disk name for this request.", + // "instanceGroupManager": { + // "description": "Name of the managed instance group to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -106644,9 +118482,8 @@ func (c *RegionDisksRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) // "type": "string" // }, // "region": { - // "description": "The name of the region for this request.", + // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -106656,10 +118493,7 @@ func (c *RegionDisksRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) // "type": "string" // } // }, - // "path": "{project}/regions/{region}/disks/{disk}/removeResourcePolicies", - // "request": { - // "$ref": "RegionDisksRemoveResourcePoliciesRequest" - // }, + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", // "response": { // "$ref": "Operation" // }, @@ -106671,26 +118505,42 @@ func (c *RegionDisksRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) } -// method id "compute.regionDisks.resize": +// method id "compute.regionInstanceGroupManagers.deleteInstances": -type RegionDisksResizeCall struct { - s *Service - project string - region string - disk string - regiondisksresizerequest *RegionDisksResizeRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersDeleteInstancesCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagersdeleteinstancesrequest *RegionInstanceGroupManagersDeleteInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Resize: Resizes the specified regional persistent disk. -func (r *RegionDisksService) Resize(project string, region string, disk string, regiondisksresizerequest *RegionDisksResizeRequest) *RegionDisksResizeCall { - c := &RegionDisksResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// DeleteInstances: Flags the specified instances in the managed +// instance group to be immediately deleted. The instances are also +// removed from any target pools of which they were a member. This +// method reduces the targetSize of the managed instance group by the +// number of instances that you delete. The deleteInstances operation is +// marked DONE if the deleteInstances request is successful. The +// underlying actions take additional time. You must separately verify +// the status of the deleting action with the listmanagedinstances +// method. +// +// If the group is part of a backend service that has enabled connection +// draining, it can take up to 60 seconds after the connection draining +// duration has elapsed before the VM instance is removed or +// deleted. +// +// You can specify a maximum of 1000 instances with this method per +// request. +func (r *RegionInstanceGroupManagersService) DeleteInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersdeleteinstancesrequest *RegionInstanceGroupManagersDeleteInstancesRequest) *RegionInstanceGroupManagersDeleteInstancesCall { + c := &RegionInstanceGroupManagersDeleteInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.disk = disk - c.regiondisksresizerequest = regiondisksresizerequest + c.instanceGroupManager = instanceGroupManager + c.regioninstancegroupmanagersdeleteinstancesrequest = regioninstancegroupmanagersdeleteinstancesrequest return c } @@ -106708,7 +118558,7 @@ func (r *RegionDisksService) Resize(project string, region string, disk string, // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionDisksResizeCall) RequestId(requestId string) *RegionDisksResizeCall { +func (c *RegionInstanceGroupManagersDeleteInstancesCall) RequestId(requestId string) *RegionInstanceGroupManagersDeleteInstancesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -106716,7 +118566,7 @@ func (c *RegionDisksResizeCall) RequestId(requestId string) *RegionDisksResizeCa // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionDisksResizeCall) Fields(s ...googleapi.Field) *RegionDisksResizeCall { +func (c *RegionInstanceGroupManagersDeleteInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersDeleteInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -106724,35 +118574,35 @@ func (c *RegionDisksResizeCall) Fields(s ...googleapi.Field) *RegionDisksResizeC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionDisksResizeCall) Context(ctx context.Context) *RegionDisksResizeCall { +func (c *RegionInstanceGroupManagersDeleteInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersDeleteInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionDisksResizeCall) Header() http.Header { +func (c *RegionInstanceGroupManagersDeleteInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionDisksResizeCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersDeleteInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regiondisksresizerequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersdeleteinstancesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{disk}/resize") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deleteInstances") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -106760,21 +118610,21 @@ func (c *RegionDisksResizeCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "disk": c.disk, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionDisks.resize" call. +// Do executes the "compute.regionInstanceGroupManagers.deleteInstances" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionDisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -106805,33 +118655,31 @@ func (c *RegionDisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Resizes the specified regional persistent disk.", + // "description": "Flags the specified instances in the managed instance group to be immediately deleted. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. The deleteInstances operation is marked DONE if the deleteInstances request is successful. The underlying actions take additional time. You must separately verify the status of the deleting action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", // "httpMethod": "POST", - // "id": "compute.regionDisks.resize", + // "id": "compute.regionInstanceGroupManagers.deleteInstances", // "parameterOrder": [ // "project", // "region", - // "disk" + // "instanceGroupManager" // ], // "parameters": { - // "disk": { - // "description": "Name of the regional persistent disk.", + // "instanceGroupManager": { + // "description": "Name of the managed instance group.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, // "project": { - // "description": "The project ID for this request.", + // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, // "region": { - // "description": "Name of the region for this request.", + // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -106841,9 +118689,9 @@ func (c *RegionDisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, er // "type": "string" // } // }, - // "path": "{project}/regions/{region}/disks/{disk}/resize", + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deleteInstances", // "request": { - // "$ref": "RegionDisksResizeRequest" + // "$ref": "RegionInstanceGroupManagersDeleteInstancesRequest" // }, // "response": { // "$ref": "Operation" @@ -106856,52 +118704,34 @@ func (c *RegionDisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, er } -// method id "compute.regionDisks.setLabels": +// method id "compute.regionInstanceGroupManagers.deletePerInstanceConfigs": -type RegionDisksSetLabelsCall struct { - s *Service - project string - region string - resource string - regionsetlabelsrequest *RegionSetLabelsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersDeletePerInstanceConfigsCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagerdeleteinstanceconfigreq *RegionInstanceGroupManagerDeleteInstanceConfigReq + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetLabels: Sets the labels on the target regional disk. -func (r *RegionDisksService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *RegionDisksSetLabelsCall { - c := &RegionDisksSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// DeletePerInstanceConfigs: Deletes selected per-instance configs for +// the managed instance group. +func (r *RegionInstanceGroupManagersService) DeletePerInstanceConfigs(project string, region string, instanceGroupManager string, regioninstancegroupmanagerdeleteinstanceconfigreq *RegionInstanceGroupManagerDeleteInstanceConfigReq) *RegionInstanceGroupManagersDeletePerInstanceConfigsCall { + c := &RegionInstanceGroupManagersDeletePerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.resource = resource - c.regionsetlabelsrequest = regionsetlabelsrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionDisksSetLabelsCall) RequestId(requestId string) *RegionDisksSetLabelsCall { - c.urlParams_.Set("requestId", requestId) + c.instanceGroupManager = instanceGroupManager + c.regioninstancegroupmanagerdeleteinstanceconfigreq = regioninstancegroupmanagerdeleteinstanceconfigreq return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionDisksSetLabelsCall) Fields(s ...googleapi.Field) *RegionDisksSetLabelsCall { +func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersDeletePerInstanceConfigsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -106909,35 +118739,35 @@ func (c *RegionDisksSetLabelsCall) Fields(s ...googleapi.Field) *RegionDisksSetL // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionDisksSetLabelsCall) Context(ctx context.Context) *RegionDisksSetLabelsCall { +func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) Context(ctx context.Context) *RegionInstanceGroupManagersDeletePerInstanceConfigsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionDisksSetLabelsCall) Header() http.Header { +func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionDisksSetLabelsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerdeleteinstanceconfigreq) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{resource}/setLabels") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deletePerInstanceConfigs") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -106945,21 +118775,21 @@ func (c *RegionDisksSetLabelsCall) doRequest(alt string) (*http.Response, error) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionDisks.setLabels" call. +// Do executes the "compute.regionInstanceGroupManagers.deletePerInstanceConfigs" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionDisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -106990,45 +118820,38 @@ func (c *RegionDisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Sets the labels on the target regional disk.", + // "description": "Deletes selected per-instance configs for the managed instance group.", // "httpMethod": "POST", - // "id": "compute.regionDisks.setLabels", + // "id": "compute.regionInstanceGroupManagers.deletePerInstanceConfigs", // "parameterOrder": [ // "project", // "region", - // "resource" + // "instanceGroupManager" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "instanceGroupManager": { + // "description": "The name of the managed instance group. It should conform to RFC1035.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "region": { - // "description": "The region for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "region": { + // "description": "Name of the region scoping this request, should conform to RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/disks/{resource}/setLabels", + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deletePerInstanceConfigs", // "request": { - // "$ref": "RegionSetLabelsRequest" + // "$ref": "RegionInstanceGroupManagerDeleteInstanceConfigReq" // }, // "response": { // "$ref": "Operation" @@ -107041,92 +118864,99 @@ func (c *RegionDisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.regionDisks.testIamPermissions": +// method id "compute.regionInstanceGroupManagers.get": -type RegionDisksTestIamPermissionsCall struct { - s *Service - project string - region string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersGetCall struct { + s *Service + project string + region string + instanceGroupManager string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *RegionDisksService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionDisksTestIamPermissionsCall { - c := &RegionDisksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns all of the details about the specified managed instance +// group. +func (r *RegionInstanceGroupManagersService) Get(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersGetCall { + c := &RegionInstanceGroupManagersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.instanceGroupManager = instanceGroupManager return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionDisksTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionDisksTestIamPermissionsCall { +func (c *RegionInstanceGroupManagersGetCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionInstanceGroupManagersGetCall) IfNoneMatch(entityTag string) *RegionInstanceGroupManagersGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionDisksTestIamPermissionsCall) Context(ctx context.Context) *RegionDisksTestIamPermissionsCall { +func (c *RegionInstanceGroupManagersGetCall) Context(ctx context.Context) *RegionInstanceGroupManagersGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionDisksTestIamPermissionsCall) Header() http.Header { +func (c *RegionInstanceGroupManagersGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionDisksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionDisks.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// Do executes the "compute.regionInstanceGroupManagers.get" call. +// Exactly one of *InstanceGroupManager or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// *InstanceGroupManager.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionDisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *RegionInstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManager, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -107145,7 +118975,7 @@ func (c *RegionDisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &InstanceGroupManager{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -107157,43 +118987,38 @@ func (c *RegionDisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.regionDisks.testIamPermissions", + // "description": "Returns all of the details about the specified managed instance group.", + // "httpMethod": "GET", + // "id": "compute.regionInstanceGroupManagers.get", // "parameterOrder": [ // "project", // "region", - // "resource" + // "instanceGroupManager" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "instanceGroupManager": { + // "description": "Name of the managed instance group to return.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "region": { - // "description": "The name of the region for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/disks/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "InstanceGroupManager" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -107204,24 +119029,32 @@ func (c *RegionDisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T } -// method id "compute.regionHealthChecks.delete": +// method id "compute.regionInstanceGroupManagers.insert": -type RegionHealthChecksDeleteCall struct { - s *Service - project string - region string - healthCheck string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersInsertCall struct { + s *Service + project string + region string + instancegroupmanager *InstanceGroupManager + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified HealthCheck resource. -func (r *RegionHealthChecksService) Delete(project string, region string, healthCheck string) *RegionHealthChecksDeleteCall { - c := &RegionHealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a managed instance group using the information that +// you specify in the request. After the group is created, instances in +// the group are created using the specified instance template. This +// operation is marked as DONE when the group is created even if the +// instances in the group have not yet been created. You must separately +// verify the status of the individual instances with the +// listmanagedinstances method. +// +// A regional managed instance group can contain up to 2000 instances. +func (r *RegionInstanceGroupManagersService) Insert(project string, region string, instancegroupmanager *InstanceGroupManager) *RegionInstanceGroupManagersInsertCall { + c := &RegionInstanceGroupManagersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.healthCheck = healthCheck + c.instancegroupmanager = instancegroupmanager return c } @@ -107239,7 +119072,7 @@ func (r *RegionHealthChecksService) Delete(project string, region string, health // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionHealthChecksDeleteCall) RequestId(requestId string) *RegionHealthChecksDeleteCall { +func (c *RegionInstanceGroupManagersInsertCall) RequestId(requestId string) *RegionInstanceGroupManagersInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -107247,7 +119080,7 @@ func (c *RegionHealthChecksDeleteCall) RequestId(requestId string) *RegionHealth // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionHealthChecksDeleteCall) Fields(s ...googleapi.Field) *RegionHealthChecksDeleteCall { +func (c *RegionInstanceGroupManagersInsertCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -107255,52 +119088,56 @@ func (c *RegionHealthChecksDeleteCall) Fields(s ...googleapi.Field) *RegionHealt // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionHealthChecksDeleteCall) Context(ctx context.Context) *RegionHealthChecksDeleteCall { +func (c *RegionInstanceGroupManagersInsertCall) Context(ctx context.Context) *RegionInstanceGroupManagersInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionHealthChecksDeleteCall) Header() http.Header { +func (c *RegionInstanceGroupManagersInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthChecks/{healthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "healthCheck": c.healthCheck, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionHealthChecks.delete" call. +// Do executes the "compute.regionInstanceGroupManagers.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -107331,22 +119168,14 @@ func (c *RegionHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operat } return ret, nil // { - // "description": "Deletes the specified HealthCheck resource.", - // "httpMethod": "DELETE", - // "id": "compute.regionHealthChecks.delete", + // "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, instances in the group are created using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.\n\nA regional managed instance group can contain up to 2000 instances.", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroupManagers.insert", // "parameterOrder": [ // "project", - // "region", - // "healthCheck" + // "region" // ], // "parameters": { - // "healthCheck": { - // "description": "Name of the HealthCheck resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -107357,7 +119186,6 @@ func (c *RegionHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operat // "region": { // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -107367,7 +119195,10 @@ func (c *RegionHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operat // "type": "string" // } // }, - // "path": "{project}/regions/{region}/healthChecks/{healthCheck}", + // "path": "{project}/regions/{region}/instanceGroupManagers", + // "request": { + // "$ref": "InstanceGroupManager" + // }, // "response": { // "$ref": "Operation" // }, @@ -107379,33 +119210,94 @@ func (c *RegionHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operat } -// method id "compute.regionHealthChecks.get": +// method id "compute.regionInstanceGroupManagers.list": -type RegionHealthChecksGetCall struct { +type RegionInstanceGroupManagersListCall struct { s *Service project string region string - healthCheck string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Returns the specified HealthCheck resource. Gets a list of -// available health checks by making a list() request. -func (r *RegionHealthChecksService) Get(project string, region string, healthCheck string) *RegionHealthChecksGetCall { - c := &RegionHealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of managed instance groups that are +// contained within the specified region. +func (r *RegionInstanceGroupManagersService) List(project string, region string) *RegionInstanceGroupManagersListCall { + c := &RegionInstanceGroupManagersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.healthCheck = healthCheck + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *RegionInstanceGroupManagersListCall) Filter(filter string) *RegionInstanceGroupManagersListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *RegionInstanceGroupManagersListCall) MaxResults(maxResults int64) *RegionInstanceGroupManagersListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *RegionInstanceGroupManagersListCall) OrderBy(orderBy string) *RegionInstanceGroupManagersListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *RegionInstanceGroupManagersListCall) PageToken(pageToken string) *RegionInstanceGroupManagersListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionHealthChecksGetCall) Fields(s ...googleapi.Field) *RegionHealthChecksGetCall { +func (c *RegionInstanceGroupManagersListCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -107415,7 +119307,7 @@ func (c *RegionHealthChecksGetCall) Fields(s ...googleapi.Field) *RegionHealthCh // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionHealthChecksGetCall) IfNoneMatch(entityTag string) *RegionHealthChecksGetCall { +func (c *RegionInstanceGroupManagersListCall) IfNoneMatch(entityTag string) *RegionInstanceGroupManagersListCall { c.ifNoneMatch_ = entityTag return c } @@ -107423,21 +119315,21 @@ func (c *RegionHealthChecksGetCall) IfNoneMatch(entityTag string) *RegionHealthC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionHealthChecksGetCall) Context(ctx context.Context) *RegionHealthChecksGetCall { +func (c *RegionInstanceGroupManagersListCall) Context(ctx context.Context) *RegionInstanceGroupManagersListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionHealthChecksGetCall) Header() http.Header { +func (c *RegionInstanceGroupManagersListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -107449,7 +119341,7 @@ func (c *RegionHealthChecksGetCall) doRequest(alt string) (*http.Response, error var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthChecks/{healthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -107457,21 +119349,20 @@ func (c *RegionHealthChecksGetCall) doRequest(alt string) (*http.Response, error } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "healthCheck": c.healthCheck, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionHealthChecks.get" call. -// Exactly one of *HealthCheck or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *HealthCheck.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, error) { +// Do executes the "compute.regionInstanceGroupManagers.list" call. +// Exactly one of *RegionInstanceGroupManagerList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *RegionInstanceGroupManagerList.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionInstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupManagerList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -107490,7 +119381,7 @@ func (c *RegionHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthChe if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &HealthCheck{ + ret := &RegionInstanceGroupManagerList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -107502,20 +119393,35 @@ func (c *RegionHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthChe } return ret, nil // { - // "description": "Returns the specified HealthCheck resource. Gets a list of available health checks by making a list() request.", + // "description": "Retrieves the list of managed instance groups that are contained within the specified region.", // "httpMethod": "GET", - // "id": "compute.regionHealthChecks.get", + // "id": "compute.regionInstanceGroupManagers.list", // "parameterOrder": [ // "project", - // "region", - // "healthCheck" + // "region" // ], // "parameters": { - // "healthCheck": { - // "description": "Name of the HealthCheck resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -107528,14 +119434,13 @@ func (c *RegionHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthChe // "region": { // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/healthChecks/{healthCheck}", + // "path": "{project}/regions/{region}/instanceGroupManagers", // "response": { - // "$ref": "HealthCheck" + // "$ref": "RegionInstanceGroupManagerList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -107546,51 +119451,118 @@ func (c *RegionHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthChe } -// method id "compute.regionHealthChecks.insert": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionInstanceGroupManagersListCall) Pages(ctx context.Context, f func(*RegionInstanceGroupManagerList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type RegionHealthChecksInsertCall struct { - s *Service - project string - region string - healthcheck *HealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.regionInstanceGroupManagers.listManagedInstances": + +type RegionInstanceGroupManagersListManagedInstancesCall struct { + s *Service + project string + region string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a HealthCheck resource in the specified project using -// the data included in the request. -func (r *RegionHealthChecksService) Insert(project string, region string, healthcheck *HealthCheck) *RegionHealthChecksInsertCall { - c := &RegionHealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ListManagedInstances: Lists the instances in the managed instance +// group and instances that are scheduled to be created. The list +// includes any current actions that the group has scheduled for its +// instances. +func (r *RegionInstanceGroupManagersService) ListManagedInstances(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersListManagedInstancesCall { + c := &RegionInstanceGroupManagersListManagedInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.healthcheck = healthcheck + c.instanceGroupManager = instanceGroupManager return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionHealthChecksInsertCall) RequestId(requestId string) *RegionHealthChecksInsertCall { - c.urlParams_.Set("requestId", requestId) +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *RegionInstanceGroupManagersListManagedInstancesCall) Filter(filter string) *RegionInstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *RegionInstanceGroupManagersListManagedInstancesCall) MaxResults(maxResults int64) *RegionInstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "order_by": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *RegionInstanceGroupManagersListManagedInstancesCall) OrderBy(orderBy string) *RegionInstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("order_by", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *RegionInstanceGroupManagersListManagedInstancesCall) PageToken(pageToken string) *RegionInstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionHealthChecksInsertCall) Fields(s ...googleapi.Field) *RegionHealthChecksInsertCall { +func (c *RegionInstanceGroupManagersListManagedInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersListManagedInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -107598,35 +119570,30 @@ func (c *RegionHealthChecksInsertCall) Fields(s ...googleapi.Field) *RegionHealt // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionHealthChecksInsertCall) Context(ctx context.Context) *RegionHealthChecksInsertCall { +func (c *RegionInstanceGroupManagersListManagedInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersListManagedInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionHealthChecksInsertCall) Header() http.Header { +func (c *RegionInstanceGroupManagersListManagedInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersListManagedInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthChecks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -107634,20 +119601,23 @@ func (c *RegionHealthChecksInsertCall) doRequest(alt string) (*http.Response, er } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionHealthChecks.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regionInstanceGroupManagers.listManagedInstances" call. +// Exactly one of *RegionInstanceGroupManagersListInstancesResponse or +// error will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *RegionInstanceGroupManagersListInstancesResponse.ServerResponse.Heade +// r or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupManagersListInstancesResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -107666,7 +119636,7 @@ func (c *RegionHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operat if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &RegionInstanceGroupManagersListInstancesResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -107678,14 +119648,44 @@ func (c *RegionHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operat } return ret, nil // { - // "description": "Creates a HealthCheck resource in the specified project using the data included in the request.", + // "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances.", // "httpMethod": "POST", - // "id": "compute.regionHealthChecks.insert", + // "id": "compute.regionInstanceGroupManagers.listManagedInstances", // "parameterOrder": [ // "project", - // "region" + // "region", + // "instanceGroupManager" // ], // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "order_by": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -107696,49 +119696,63 @@ func (c *RegionHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operat // "region": { // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/healthChecks", - // "request": { - // "$ref": "HealthCheck" - // }, + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", // "response": { - // "$ref": "Operation" + // "$ref": "RegionInstanceGroupManagersListInstancesResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionHealthChecks.list": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionInstanceGroupManagersListManagedInstancesCall) Pages(ctx context.Context, f func(*RegionInstanceGroupManagersListInstancesResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type RegionHealthChecksListCall struct { - s *Service - project string - region string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +// method id "compute.regionInstanceGroupManagers.listPerInstanceConfigs": + +type RegionInstanceGroupManagersListPerInstanceConfigsCall struct { + s *Service + project string + region string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves the list of HealthCheck resources available to the -// specified project. -func (r *RegionHealthChecksService) List(project string, region string) *RegionHealthChecksListCall { - c := &RegionHealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ListPerInstanceConfigs: Lists all of the per-instance configs defined +// for the managed instance group. +func (r *RegionInstanceGroupManagersService) ListPerInstanceConfigs(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersListPerInstanceConfigsCall { + c := &RegionInstanceGroupManagersListPerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region + c.instanceGroupManager = instanceGroupManager return c } @@ -107764,7 +119778,7 @@ func (r *RegionHealthChecksService) List(project string, region string) *RegionH // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *RegionHealthChecksListCall) Filter(filter string) *RegionHealthChecksListCall { +func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Filter(filter string) *RegionInstanceGroupManagersListPerInstanceConfigsCall { c.urlParams_.Set("filter", filter) return c } @@ -107775,7 +119789,7 @@ func (c *RegionHealthChecksListCall) Filter(filter string) *RegionHealthChecksLi // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *RegionHealthChecksListCall) MaxResults(maxResults int64) *RegionHealthChecksListCall { +func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) MaxResults(maxResults int64) *RegionInstanceGroupManagersListPerInstanceConfigsCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -107792,7 +119806,7 @@ func (c *RegionHealthChecksListCall) MaxResults(maxResults int64) *RegionHealthC // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *RegionHealthChecksListCall) OrderBy(orderBy string) *RegionHealthChecksListCall { +func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) OrderBy(orderBy string) *RegionInstanceGroupManagersListPerInstanceConfigsCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -107800,7 +119814,7 @@ func (c *RegionHealthChecksListCall) OrderBy(orderBy string) *RegionHealthChecks // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *RegionHealthChecksListCall) PageToken(pageToken string) *RegionHealthChecksListCall { +func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) PageToken(pageToken string) *RegionInstanceGroupManagersListPerInstanceConfigsCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -107808,72 +119822,62 @@ func (c *RegionHealthChecksListCall) PageToken(pageToken string) *RegionHealthCh // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionHealthChecksListCall) Fields(s ...googleapi.Field) *RegionHealthChecksListCall { +func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersListPerInstanceConfigsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionHealthChecksListCall) IfNoneMatch(entityTag string) *RegionHealthChecksListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionHealthChecksListCall) Context(ctx context.Context) *RegionHealthChecksListCall { +func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Context(ctx context.Context) *RegionInstanceGroupManagersListPerInstanceConfigsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionHealthChecksListCall) Header() http.Header { +func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionHealthChecksListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthChecks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionHealthChecks.list" call. -// Exactly one of *HealthCheckList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *HealthCheckList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckList, error) { +// Do executes the "compute.regionInstanceGroupManagers.listPerInstanceConfigs" call. +// Exactly one of *RegionInstanceGroupManagersListInstanceConfigsResp or +// error will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *RegionInstanceGroupManagersListInstanceConfigsResp.ServerResponse.Hea +// der or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupManagersListInstanceConfigsResp, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -107892,7 +119896,7 @@ func (c *RegionHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCh if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &HealthCheckList{ + ret := &RegionInstanceGroupManagersListInstanceConfigsResp{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -107904,12 +119908,13 @@ func (c *RegionHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCh } return ret, nil // { - // "description": "Retrieves the list of HealthCheck resources available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.regionHealthChecks.list", + // "description": "Lists all of the per-instance configs defined for the managed instance group.", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroupManagers.listPerInstanceConfigs", // "parameterOrder": [ // "project", - // "region" + // "region", + // "instanceGroupManager" // ], // "parameters": { // "filter": { @@ -107917,6 +119922,12 @@ func (c *RegionHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCh // "location": "query", // "type": "string" // }, + // "instanceGroupManager": { + // "description": "The name of the managed instance group. It should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "maxResults": { // "default": "500", // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", @@ -107943,16 +119954,15 @@ func (c *RegionHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCh // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "Name of the region scoping this request, should conform to RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/healthChecks", + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs", // "response": { - // "$ref": "HealthCheckList" + // "$ref": "RegionInstanceGroupManagersListInstanceConfigsResp" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -107966,7 +119976,7 @@ func (c *RegionHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCh // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *RegionHealthChecksListCall) Pages(ctx context.Context, f func(*HealthCheckList) error) error { +func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Pages(ctx context.Context, f func(*RegionInstanceGroupManagersListInstanceConfigsResp) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -107984,28 +119994,32 @@ func (c *RegionHealthChecksListCall) Pages(ctx context.Context, f func(*HealthCh } } -// method id "compute.regionHealthChecks.patch": +// method id "compute.regionInstanceGroupManagers.patch": -type RegionHealthChecksPatchCall struct { - s *Service - project string - region string - healthCheck string - healthcheck *HealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersPatchCall struct { + s *Service + project string + region string + instanceGroupManager string + instancegroupmanager *InstanceGroupManager + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates a HealthCheck resource in the specified project using -// the data included in the request. This method supports PATCH -// semantics and uses the JSON merge patch format and processing rules. -func (r *RegionHealthChecksService) Patch(project string, region string, healthCheck string, healthcheck *HealthCheck) *RegionHealthChecksPatchCall { - c := &RegionHealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates a managed instance group using the information that +// you specify in the request. This operation is marked as DONE when the +// group is patched even if the instances in the group are still in the +// process of being patched. You must separately verify the status of +// the individual instances with the listmanagedinstances method. This +// method supports PATCH semantics and uses the JSON merge patch format +// and processing rules. +func (r *RegionInstanceGroupManagersService) Patch(project string, region string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *RegionInstanceGroupManagersPatchCall { + c := &RegionInstanceGroupManagersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.healthCheck = healthCheck - c.healthcheck = healthcheck + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanager = instancegroupmanager return c } @@ -108023,7 +120037,7 @@ func (r *RegionHealthChecksService) Patch(project string, region string, healthC // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionHealthChecksPatchCall) RequestId(requestId string) *RegionHealthChecksPatchCall { +func (c *RegionInstanceGroupManagersPatchCall) RequestId(requestId string) *RegionInstanceGroupManagersPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -108031,7 +120045,7 @@ func (c *RegionHealthChecksPatchCall) RequestId(requestId string) *RegionHealthC // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionHealthChecksPatchCall) Fields(s ...googleapi.Field) *RegionHealthChecksPatchCall { +func (c *RegionInstanceGroupManagersPatchCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -108039,35 +120053,35 @@ func (c *RegionHealthChecksPatchCall) Fields(s ...googleapi.Field) *RegionHealth // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionHealthChecksPatchCall) Context(ctx context.Context) *RegionHealthChecksPatchCall { +func (c *RegionInstanceGroupManagersPatchCall) Context(ctx context.Context) *RegionInstanceGroupManagersPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionHealthChecksPatchCall) Header() http.Header { +func (c *RegionInstanceGroupManagersPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthChecks/{healthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("PATCH", urls, body) if err != nil { @@ -108075,21 +120089,21 @@ func (c *RegionHealthChecksPatchCall) doRequest(alt string) (*http.Response, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "healthCheck": c.healthCheck, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionHealthChecks.patch" call. +// Do executes the "compute.regionInstanceGroupManagers.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -108120,19 +120134,18 @@ func (c *RegionHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Updates a HealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listmanagedinstances method. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", // "httpMethod": "PATCH", - // "id": "compute.regionHealthChecks.patch", + // "id": "compute.regionInstanceGroupManagers.patch", // "parameterOrder": [ // "project", // "region", - // "healthCheck" + // "instanceGroupManager" // ], // "parameters": { - // "healthCheck": { - // "description": "Name of the HealthCheck resource to patch.", + // "instanceGroupManager": { + // "description": "The name of the instance group manager.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -108146,7 +120159,6 @@ func (c *RegionHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operati // "region": { // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -108156,9 +120168,9 @@ func (c *RegionHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operati // "type": "string" // } // }, - // "path": "{project}/regions/{region}/healthChecks/{healthCheck}", + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", // "request": { - // "$ref": "HealthCheck" + // "$ref": "InstanceGroupManager" // }, // "response": { // "$ref": "Operation" @@ -108171,34 +120183,55 @@ func (c *RegionHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.regionHealthChecks.testIamPermissions": +// method id "compute.regionInstanceGroupManagers.patchPerInstanceConfigs": -type RegionHealthChecksTestIamPermissionsCall struct { - s *Service - project string - region string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersPatchPerInstanceConfigsCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagerpatchinstanceconfigreq *RegionInstanceGroupManagerPatchInstanceConfigReq + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *RegionHealthChecksService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionHealthChecksTestIamPermissionsCall { - c := &RegionHealthChecksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// PatchPerInstanceConfigs: Insert or patch (for the ones that already +// exist) per-instance configs for the managed instance group. +// perInstanceConfig.instance serves as a key used to distinguish +// whether to perform insert or patch. +func (r *RegionInstanceGroupManagersService) PatchPerInstanceConfigs(project string, region string, instanceGroupManager string, regioninstancegroupmanagerpatchinstanceconfigreq *RegionInstanceGroupManagerPatchInstanceConfigReq) *RegionInstanceGroupManagersPatchPerInstanceConfigsCall { + c := &RegionInstanceGroupManagersPatchPerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.instanceGroupManager = instanceGroupManager + c.regioninstancegroupmanagerpatchinstanceconfigreq = regioninstancegroupmanagerpatchinstanceconfigreq + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) RequestId(requestId string) *RegionInstanceGroupManagersPatchPerInstanceConfigsCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionHealthChecksTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionHealthChecksTestIamPermissionsCall { +func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersPatchPerInstanceConfigsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -108206,35 +120239,35 @@ func (c *RegionHealthChecksTestIamPermissionsCall) Fields(s ...googleapi.Field) // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionHealthChecksTestIamPermissionsCall) Context(ctx context.Context) *RegionHealthChecksTestIamPermissionsCall { +func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) Context(ctx context.Context) *RegionInstanceGroupManagersPatchPerInstanceConfigsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionHealthChecksTestIamPermissionsCall) Header() http.Header { +func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionHealthChecksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerpatchinstanceconfigreq) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthChecks/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/patchPerInstanceConfigs") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -108242,21 +120275,21 @@ func (c *RegionHealthChecksTestIamPermissionsCall) doRequest(alt string) (*http. } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionHealthChecks.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionHealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.regionInstanceGroupManagers.patchPerInstanceConfigs" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -108275,7 +120308,7 @@ func (c *RegionHealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOpti if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -108287,15 +120320,21 @@ func (c *RegionHealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOpti } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", + // "description": "Insert or patch (for the ones that already exist) per-instance configs for the managed instance group. perInstanceConfig.instance serves as a key used to distinguish whether to perform insert or patch.", // "httpMethod": "POST", - // "id": "compute.regionHealthChecks.testIamPermissions", + // "id": "compute.regionInstanceGroupManagers.patchPerInstanceConfigs", // "parameterOrder": [ // "project", // "region", - // "resource" + // "instanceGroupManager" // ], // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group. It should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -108304,57 +120343,66 @@ func (c *RegionHealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOpti // "type": "string" // }, // "region": { - // "description": "The name of the region for this request.", + // "description": "Name of the region scoping this request, should conform to RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/regions/{region}/healthChecks/{resource}/testIamPermissions", + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/patchPerInstanceConfigs", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "RegionInstanceGroupManagerPatchInstanceConfigReq" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.regionHealthChecks.update": +// method id "compute.regionInstanceGroupManagers.recreateInstances": -type RegionHealthChecksUpdateCall struct { - s *Service - project string - region string - healthCheck string - healthcheck *HealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersRecreateInstancesCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagersrecreaterequest *RegionInstanceGroupManagersRecreateRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Update: Updates a HealthCheck resource in the specified project using -// the data included in the request. -func (r *RegionHealthChecksService) Update(project string, region string, healthCheck string, healthcheck *HealthCheck) *RegionHealthChecksUpdateCall { - c := &RegionHealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// RecreateInstances: Flags the specified instances in the managed +// instance group to be immediately recreated. The instances are deleted +// and recreated using the current instance template for the managed +// instance group. This operation is marked as DONE when the flag is set +// even if the instances have not yet been recreated. You must +// separately verify the status of the recreating action with the +// listmanagedinstances method. +// +// If the group is part of a backend service that has enabled connection +// draining, it can take up to 60 seconds after the connection draining +// duration has elapsed before the VM instance is removed or +// deleted. +// +// You can specify a maximum of 1000 instances with this method per +// request. +func (r *RegionInstanceGroupManagersService) RecreateInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersrecreaterequest *RegionInstanceGroupManagersRecreateRequest) *RegionInstanceGroupManagersRecreateInstancesCall { + c := &RegionInstanceGroupManagersRecreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.healthCheck = healthCheck - c.healthcheck = healthcheck + c.instanceGroupManager = instanceGroupManager + c.regioninstancegroupmanagersrecreaterequest = regioninstancegroupmanagersrecreaterequest return c } @@ -108372,7 +120420,7 @@ func (r *RegionHealthChecksService) Update(project string, region string, health // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionHealthChecksUpdateCall) RequestId(requestId string) *RegionHealthChecksUpdateCall { +func (c *RegionInstanceGroupManagersRecreateInstancesCall) RequestId(requestId string) *RegionInstanceGroupManagersRecreateInstancesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -108380,7 +120428,7 @@ func (c *RegionHealthChecksUpdateCall) RequestId(requestId string) *RegionHealth // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionHealthChecksUpdateCall) Fields(s ...googleapi.Field) *RegionHealthChecksUpdateCall { +func (c *RegionInstanceGroupManagersRecreateInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersRecreateInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -108388,57 +120436,57 @@ func (c *RegionHealthChecksUpdateCall) Fields(s ...googleapi.Field) *RegionHealt // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionHealthChecksUpdateCall) Context(ctx context.Context) *RegionHealthChecksUpdateCall { +func (c *RegionInstanceGroupManagersRecreateInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersRecreateInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionHealthChecksUpdateCall) Header() http.Header { +func (c *RegionInstanceGroupManagersRecreateInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersrecreaterequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthChecks/{healthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/recreateInstances") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "healthCheck": c.healthCheck, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionHealthChecks.update" call. +// Do executes the "compute.regionInstanceGroupManagers.recreateInstances" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -108469,19 +120517,18 @@ func (c *RegionHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operat } return ret, nil // { - // "description": "Updates a HealthCheck resource in the specified project using the data included in the request.", - // "httpMethod": "PUT", - // "id": "compute.regionHealthChecks.update", + // "description": "Flags the specified instances in the managed instance group to be immediately recreated. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the flag is set even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroupManagers.recreateInstances", // "parameterOrder": [ // "project", // "region", - // "healthCheck" + // "instanceGroupManager" // ], // "parameters": { - // "healthCheck": { - // "description": "Name of the HealthCheck resource to update.", + // "instanceGroupManager": { + // "description": "Name of the managed instance group.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -108495,7 +120542,6 @@ func (c *RegionHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operat // "region": { // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -108505,9 +120551,9 @@ func (c *RegionHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operat // "type": "string" // } // }, - // "path": "{project}/regions/{region}/healthChecks/{healthCheck}", + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", // "request": { - // "$ref": "HealthCheck" + // "$ref": "RegionInstanceGroupManagersRecreateRequest" // }, // "response": { // "$ref": "Operation" @@ -108520,42 +120566,37 @@ func (c *RegionHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operat } -// method id "compute.regionInstanceGroupManagers.abandonInstances": +// method id "compute.regionInstanceGroupManagers.resize": -type RegionInstanceGroupManagersAbandonInstancesCall struct { - s *Service - project string - region string - instanceGroupManager string - regioninstancegroupmanagersabandoninstancesrequest *RegionInstanceGroupManagersAbandonInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersResizeCall struct { + s *Service + project string + region string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AbandonInstances: Flags the specified instances to be immediately -// removed from the managed instance group. Abandoning an instance does -// not delete the instance, but it does remove the instance from any -// target pools that are applied by the managed instance group. This -// method reduces the targetSize of the managed instance group by the -// number of instances that you abandon. This operation is marked as -// DONE when the action is scheduled even if the instances have not yet -// been removed from the group. You must separately verify the status of -// the abandoning action with the listmanagedinstances method. +// Resize: Changes the intended size of the managed instance group. If +// you increase the size, the group creates new instances using the +// current instance template. If you decrease the size, the group +// deletes one or more instances. +// +// The resize operation is marked DONE if the resize request is +// successful. The underlying actions take additional time. You must +// separately verify the status of the creating or deleting actions with +// the listmanagedinstances method. // // If the group is part of a backend service that has enabled connection // draining, it can take up to 60 seconds after the connection draining -// duration has elapsed before the VM instance is removed or -// deleted. -// -// You can specify a maximum of 1000 instances with this method per -// request. -func (r *RegionInstanceGroupManagersService) AbandonInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersabandoninstancesrequest *RegionInstanceGroupManagersAbandonInstancesRequest) *RegionInstanceGroupManagersAbandonInstancesCall { - c := &RegionInstanceGroupManagersAbandonInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// duration has elapsed before the VM instance is removed or deleted. +func (r *RegionInstanceGroupManagersService) Resize(project string, region string, instanceGroupManager string, size int64) *RegionInstanceGroupManagersResizeCall { + c := &RegionInstanceGroupManagersResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region c.instanceGroupManager = instanceGroupManager - c.regioninstancegroupmanagersabandoninstancesrequest = regioninstancegroupmanagersabandoninstancesrequest + c.urlParams_.Set("size", fmt.Sprint(size)) return c } @@ -108573,7 +120614,7 @@ func (r *RegionInstanceGroupManagersService) AbandonInstances(project string, re // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersAbandonInstancesCall) RequestId(requestId string) *RegionInstanceGroupManagersAbandonInstancesCall { +func (c *RegionInstanceGroupManagersResizeCall) RequestId(requestId string) *RegionInstanceGroupManagersResizeCall { c.urlParams_.Set("requestId", requestId) return c } @@ -108581,7 +120622,7 @@ func (c *RegionInstanceGroupManagersAbandonInstancesCall) RequestId(requestId st // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersAbandonInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersAbandonInstancesCall { +func (c *RegionInstanceGroupManagersResizeCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersResizeCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -108589,35 +120630,30 @@ func (c *RegionInstanceGroupManagersAbandonInstancesCall) Fields(s ...googleapi. // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersAbandonInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersAbandonInstancesCall { +func (c *RegionInstanceGroupManagersResizeCall) Context(ctx context.Context) *RegionInstanceGroupManagersResizeCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersAbandonInstancesCall) Header() http.Header { +func (c *RegionInstanceGroupManagersResizeCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersAbandonInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersabandoninstancesrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/abandonInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resize") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -108632,14 +120668,14 @@ func (c *RegionInstanceGroupManagersAbandonInstancesCall) doRequest(alt string) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.abandonInstances" call. +// Do executes the "compute.regionInstanceGroupManagers.resize" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -108670,13 +120706,14 @@ func (c *RegionInstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.C } return ret, nil // { - // "description": "Flags the specified instances to be immediately removed from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + // "description": "Changes the intended size of the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes one or more instances.\n\nThe resize operation is marked DONE if the resize request is successful. The underlying actions take additional time. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.abandonInstances", + // "id": "compute.regionInstanceGroupManagers.resize", // "parameterOrder": [ // "project", // "region", - // "instanceGroupManager" + // "instanceGroupManager", + // "size" // ], // "parameters": { // "instanceGroupManager": { @@ -108702,12 +120739,17 @@ func (c *RegionInstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.C // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "size": { + // "description": "Number of instances that should exist in this instance group manager.", + // "format": "int32", + // "location": "query", + // "minimum": "0", + // "required": true, + // "type": "integer" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/abandonInstances", - // "request": { - // "$ref": "RegionInstanceGroupManagersAbandonInstancesRequest" - // }, + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resize", // "response": { // "$ref": "Operation" // }, @@ -108719,34 +120761,54 @@ func (c *RegionInstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.C } -// method id "compute.regionInstanceGroupManagers.applyUpdatesToInstances": +// method id "compute.regionInstanceGroupManagers.setAutoHealingPolicies": -type RegionInstanceGroupManagersApplyUpdatesToInstancesCall struct { - s *Service - project string - region string - instanceGroupManager string - regioninstancegroupmanagersapplyupdatesrequest *RegionInstanceGroupManagersApplyUpdatesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersSetAutoHealingPoliciesCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagerssetautohealingrequest *RegionInstanceGroupManagersSetAutoHealingRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// ApplyUpdatesToInstances: Apply updates to selected instances the -// managed instance group. -func (r *RegionInstanceGroupManagersService) ApplyUpdatesToInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersapplyupdatesrequest *RegionInstanceGroupManagersApplyUpdatesRequest) *RegionInstanceGroupManagersApplyUpdatesToInstancesCall { - c := &RegionInstanceGroupManagersApplyUpdatesToInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetAutoHealingPolicies: Modifies the autohealing policy for the +// instances in this managed instance group. [Deprecated] This method is +// deprecated. Please use Patch instead. +func (r *RegionInstanceGroupManagersService) SetAutoHealingPolicies(project string, region string, instanceGroupManager string, regioninstancegroupmanagerssetautohealingrequest *RegionInstanceGroupManagersSetAutoHealingRequest) *RegionInstanceGroupManagersSetAutoHealingPoliciesCall { + c := &RegionInstanceGroupManagersSetAutoHealingPoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region c.instanceGroupManager = instanceGroupManager - c.regioninstancegroupmanagersapplyupdatesrequest = regioninstancegroupmanagersapplyupdatesrequest + c.regioninstancegroupmanagerssetautohealingrequest = regioninstancegroupmanagerssetautohealingrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) RequestId(requestId string) *RegionInstanceGroupManagersSetAutoHealingPoliciesCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersApplyUpdatesToInstancesCall { +func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersSetAutoHealingPoliciesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -108754,35 +120816,35 @@ func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Fields(s ...goo // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersApplyUpdatesToInstancesCall { +func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Context(ctx context.Context) *RegionInstanceGroupManagersSetAutoHealingPoliciesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Header() http.Header { +func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersapplyupdatesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerssetautohealingrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/applyUpdatesToInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -108797,14 +120859,14 @@ func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) doRequest(alt s return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.applyUpdatesToInstances" call. +// Do executes the "compute.regionInstanceGroupManagers.setAutoHealingPolicies" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -108835,9 +120897,9 @@ func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Do(opts ...goog } return ret, nil // { - // "description": "Apply updates to selected instances the managed instance group.", + // "description": "Modifies the autohealing policy for the instances in this managed instance group. [Deprecated] This method is deprecated. Please use Patch instead.", // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.applyUpdatesToInstances", + // "id": "compute.regionInstanceGroupManagers.setAutoHealingPolicies", // "parameterOrder": [ // "project", // "region", @@ -108845,7 +120907,7 @@ func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Do(opts ...goog // ], // "parameters": { // "instanceGroupManager": { - // "description": "The name of the managed instance group, should conform to RFC1035.", + // "description": "Name of the managed instance group.", // "location": "path", // "required": true, // "type": "string" @@ -108858,15 +120920,20 @@ func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Do(opts ...goog // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request, should conform to RFC1035.", + // "description": "Name of the region scoping this request.", // "location": "path", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/applyUpdatesToInstances", + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies", // "request": { - // "$ref": "RegionInstanceGroupManagersApplyUpdatesRequest" + // "$ref": "RegionInstanceGroupManagersSetAutoHealingRequest" // }, // "response": { // "$ref": "Operation" @@ -108879,25 +120946,28 @@ func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Do(opts ...goog } -// method id "compute.regionInstanceGroupManagers.delete": +// method id "compute.regionInstanceGroupManagers.setInstanceTemplate": -type RegionInstanceGroupManagersDeleteCall struct { - s *Service - project string - region string - instanceGroupManager string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersSetInstanceTemplateCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagerssettemplaterequest *RegionInstanceGroupManagersSetTemplateRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified managed instance group and all of the -// instances in that group. -func (r *RegionInstanceGroupManagersService) Delete(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersDeleteCall { - c := &RegionInstanceGroupManagersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetInstanceTemplate: Sets the instance template to use when creating +// new instances or recreating instances in this group. Existing +// instances are not affected. +func (r *RegionInstanceGroupManagersService) SetInstanceTemplate(project string, region string, instanceGroupManager string, regioninstancegroupmanagerssettemplaterequest *RegionInstanceGroupManagersSetTemplateRequest) *RegionInstanceGroupManagersSetInstanceTemplateCall { + c := &RegionInstanceGroupManagersSetInstanceTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region c.instanceGroupManager = instanceGroupManager + c.regioninstancegroupmanagerssettemplaterequest = regioninstancegroupmanagerssettemplaterequest return c } @@ -108915,7 +120985,7 @@ func (r *RegionInstanceGroupManagersService) Delete(project string, region strin // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersDeleteCall) RequestId(requestId string) *RegionInstanceGroupManagersDeleteCall { +func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) RequestId(requestId string) *RegionInstanceGroupManagersSetInstanceTemplateCall { c.urlParams_.Set("requestId", requestId) return c } @@ -108923,7 +120993,7 @@ func (c *RegionInstanceGroupManagersDeleteCall) RequestId(requestId string) *Reg // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersDeleteCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersDeleteCall { +func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersSetInstanceTemplateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -108931,32 +121001,37 @@ func (c *RegionInstanceGroupManagersDeleteCall) Fields(s ...googleapi.Field) *Re // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersDeleteCall) Context(ctx context.Context) *RegionInstanceGroupManagersDeleteCall { +func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Context(ctx context.Context) *RegionInstanceGroupManagersSetInstanceTemplateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersDeleteCall) Header() http.Header { +func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerssettemplaterequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -108969,14 +121044,14 @@ func (c *RegionInstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Res return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.delete" call. +// Do executes the "compute.regionInstanceGroupManagers.setInstanceTemplate" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -109007,9 +121082,9 @@ func (c *RegionInstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Deletes the specified managed instance group and all of the instances in that group.", - // "httpMethod": "DELETE", - // "id": "compute.regionInstanceGroupManagers.delete", + // "description": "Sets the instance template to use when creating new instances or recreating instances in this group. Existing instances are not affected.", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroupManagers.setInstanceTemplate", // "parameterOrder": [ // "project", // "region", @@ -109017,7 +121092,7 @@ func (c *RegionInstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "instanceGroupManager": { - // "description": "Name of the managed instance group to delete.", + // "description": "The name of the managed instance group.", // "location": "path", // "required": true, // "type": "string" @@ -109041,7 +121116,10 @@ func (c *RegionInstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", + // "request": { + // "$ref": "RegionInstanceGroupManagersSetTemplateRequest" + // }, // "response": { // "$ref": "Operation" // }, @@ -109053,42 +121131,28 @@ func (c *RegionInstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) } -// method id "compute.regionInstanceGroupManagers.deleteInstances": +// method id "compute.regionInstanceGroupManagers.setTargetPools": -type RegionInstanceGroupManagersDeleteInstancesCall struct { - s *Service - project string - region string - instanceGroupManager string - regioninstancegroupmanagersdeleteinstancesrequest *RegionInstanceGroupManagersDeleteInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersSetTargetPoolsCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagerssettargetpoolsrequest *RegionInstanceGroupManagersSetTargetPoolsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// DeleteInstances: Flags the specified instances in the managed -// instance group to be immediately deleted. The instances are also -// removed from any target pools of which they were a member. This -// method reduces the targetSize of the managed instance group by the -// number of instances that you delete. The deleteInstances operation is -// marked DONE if the deleteInstances request is successful. The -// underlying actions take additional time. You must separately verify -// the status of the deleting action with the listmanagedinstances -// method. -// -// If the group is part of a backend service that has enabled connection -// draining, it can take up to 60 seconds after the connection draining -// duration has elapsed before the VM instance is removed or -// deleted. -// -// You can specify a maximum of 1000 instances with this method per -// request. -func (r *RegionInstanceGroupManagersService) DeleteInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersdeleteinstancesrequest *RegionInstanceGroupManagersDeleteInstancesRequest) *RegionInstanceGroupManagersDeleteInstancesCall { - c := &RegionInstanceGroupManagersDeleteInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetTargetPools: Modifies the target pools to which all new instances +// in this group are assigned. Existing instances in the group are not +// affected. +func (r *RegionInstanceGroupManagersService) SetTargetPools(project string, region string, instanceGroupManager string, regioninstancegroupmanagerssettargetpoolsrequest *RegionInstanceGroupManagersSetTargetPoolsRequest) *RegionInstanceGroupManagersSetTargetPoolsCall { + c := &RegionInstanceGroupManagersSetTargetPoolsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region c.instanceGroupManager = instanceGroupManager - c.regioninstancegroupmanagersdeleteinstancesrequest = regioninstancegroupmanagersdeleteinstancesrequest + c.regioninstancegroupmanagerssettargetpoolsrequest = regioninstancegroupmanagerssettargetpoolsrequest return c } @@ -109106,7 +121170,7 @@ func (r *RegionInstanceGroupManagersService) DeleteInstances(project string, reg // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersDeleteInstancesCall) RequestId(requestId string) *RegionInstanceGroupManagersDeleteInstancesCall { +func (c *RegionInstanceGroupManagersSetTargetPoolsCall) RequestId(requestId string) *RegionInstanceGroupManagersSetTargetPoolsCall { c.urlParams_.Set("requestId", requestId) return c } @@ -109114,7 +121178,7 @@ func (c *RegionInstanceGroupManagersDeleteInstancesCall) RequestId(requestId str // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersDeleteInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersDeleteInstancesCall { +func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersSetTargetPoolsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -109122,35 +121186,35 @@ func (c *RegionInstanceGroupManagersDeleteInstancesCall) Fields(s ...googleapi.F // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersDeleteInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersDeleteInstancesCall { +func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Context(ctx context.Context) *RegionInstanceGroupManagersSetTargetPoolsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersDeleteInstancesCall) Header() http.Header { +func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersDeleteInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersdeleteinstancesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerssettargetpoolsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deleteInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setTargetPools") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -109165,14 +121229,14 @@ func (c *RegionInstanceGroupManagersDeleteInstancesCall) doRequest(alt string) ( return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.deleteInstances" call. +// Do executes the "compute.regionInstanceGroupManagers.setTargetPools" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -109203,9 +121267,9 @@ func (c *RegionInstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.Ca } return ret, nil // { - // "description": "Flags the specified instances in the managed instance group to be immediately deleted. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. The deleteInstances operation is marked DONE if the deleteInstances request is successful. The underlying actions take additional time. You must separately verify the status of the deleting action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + // "description": "Modifies the target pools to which all new instances in this group are assigned. Existing instances in the group are not affected.", // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.deleteInstances", + // "id": "compute.regionInstanceGroupManagers.setTargetPools", // "parameterOrder": [ // "project", // "region", @@ -109237,9 +121301,9 @@ func (c *RegionInstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.Ca // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deleteInstances", + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", // "request": { - // "$ref": "RegionInstanceGroupManagersDeleteInstancesRequest" + // "$ref": "RegionInstanceGroupManagersSetTargetPoolsRequest" // }, // "response": { // "$ref": "Operation" @@ -109252,34 +121316,34 @@ func (c *RegionInstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.Ca } -// method id "compute.regionInstanceGroupManagers.deletePerInstanceConfigs": +// method id "compute.regionInstanceGroupManagers.testIamPermissions": -type RegionInstanceGroupManagersDeletePerInstanceConfigsCall struct { - s *Service - project string - region string - instanceGroupManager string - regioninstancegroupmanagerdeleteinstanceconfigreq *RegionInstanceGroupManagerDeleteInstanceConfigReq - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// DeletePerInstanceConfigs: Deletes selected per-instance configs for -// the managed instance group. -func (r *RegionInstanceGroupManagersService) DeletePerInstanceConfigs(project string, region string, instanceGroupManager string, regioninstancegroupmanagerdeleteinstanceconfigreq *RegionInstanceGroupManagerDeleteInstanceConfigReq) *RegionInstanceGroupManagersDeletePerInstanceConfigsCall { - c := &RegionInstanceGroupManagersDeletePerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *RegionInstanceGroupManagersService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionInstanceGroupManagersTestIamPermissionsCall { + c := &RegionInstanceGroupManagersTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroupManager = instanceGroupManager - c.regioninstancegroupmanagerdeleteinstanceconfigreq = regioninstancegroupmanagerdeleteinstanceconfigreq + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersDeletePerInstanceConfigsCall { +func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -109287,35 +121351,35 @@ func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) Fields(s ...go // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) Context(ctx context.Context) *RegionInstanceGroupManagersDeletePerInstanceConfigsCall { +func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Context(ctx context.Context) *RegionInstanceGroupManagersTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) Header() http.Header { +func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerdeleteinstanceconfigreq) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deletePerInstanceConfigs") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -109323,21 +121387,21 @@ func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) doRequest(alt } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.deletePerInstanceConfigs" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regionInstanceGroupManagers.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -109356,7 +121420,7 @@ func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) Do(opts ...goo if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -109368,21 +121432,15 @@ func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) Do(opts ...goo } return ret, nil // { - // "description": "Deletes selected per-instance configs for the managed instance group.", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.deletePerInstanceConfigs", + // "id": "compute.regionInstanceGroupManagers.testIamPermissions", // "parameterOrder": [ // "project", // "region", - // "instanceGroupManager" + // "resource" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group. It should conform to RFC1035.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -109391,100 +121449,124 @@ func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) Do(opts ...goo // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request, should conform to RFC1035.", + // "description": "The name of the region for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deletePerInstanceConfigs", + // "path": "{project}/regions/{region}/instanceGroupManagers/{resource}/testIamPermissions", // "request": { - // "$ref": "RegionInstanceGroupManagerDeleteInstanceConfigReq" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionInstanceGroupManagers.get": +// method id "compute.regionInstanceGroupManagers.update": -type RegionInstanceGroupManagersGetCall struct { +type RegionInstanceGroupManagersUpdateCall struct { s *Service project string region string instanceGroupManager string + instancegroupmanager *InstanceGroupManager urlParams_ gensupport.URLParams - ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Returns all of the details about the specified managed instance -// group. -func (r *RegionInstanceGroupManagersService) Get(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersGetCall { - c := &RegionInstanceGroupManagersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates a managed instance group using the information that +// you specify in the request. This operation is marked as DONE when the +// group is updated even if the instances in the group have not yet been +// updated. You must separately verify the status of the individual +// instances with the listmanagedinstances method. +func (r *RegionInstanceGroupManagersService) Update(project string, region string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *RegionInstanceGroupManagersUpdateCall { + c := &RegionInstanceGroupManagersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region c.instanceGroupManager = instanceGroupManager + c.instancegroupmanager = instancegroupmanager + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionInstanceGroupManagersUpdateCall) RequestId(requestId string) *RegionInstanceGroupManagersUpdateCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersGetCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersGetCall { +func (c *RegionInstanceGroupManagersUpdateCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionInstanceGroupManagersGetCall) IfNoneMatch(entityTag string) *RegionInstanceGroupManagersGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersGetCall) Context(ctx context.Context) *RegionInstanceGroupManagersGetCall { +func (c *RegionInstanceGroupManagersUpdateCall) Context(ctx context.Context) *RegionInstanceGroupManagersUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersGetCall) Header() http.Header { +func (c *RegionInstanceGroupManagersUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } @@ -109497,14 +121579,14 @@ func (c *RegionInstanceGroupManagersGetCall) doRequest(alt string) (*http.Respon return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.get" call. -// Exactly one of *InstanceGroupManager or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InstanceGroupManager.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManager, error) { +// Do executes the "compute.regionInstanceGroupManagers.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionInstanceGroupManagersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -109523,7 +121605,7 @@ func (c *RegionInstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (* if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroupManager{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -109535,9 +121617,9 @@ func (c *RegionInstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Returns all of the details about the specified managed instance group.", - // "httpMethod": "GET", - // "id": "compute.regionInstanceGroupManagers.get", + // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is updated even if the instances in the group have not yet been updated. You must separately verify the status of the individual instances with the listmanagedinstances method.", + // "httpMethod": "PUT", + // "id": "compute.regionInstanceGroupManagers.update", // "parameterOrder": [ // "project", // "region", @@ -109545,7 +121627,7 @@ func (c *RegionInstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (* // ], // "parameters": { // "instanceGroupManager": { - // "description": "Name of the managed instance group to return.", + // "description": "The name of the instance group manager.", // "location": "path", // "required": true, // "type": "string" @@ -109562,47 +121644,51 @@ func (c *RegionInstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (* // "location": "path", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", - // "response": { + // "request": { // "$ref": "InstanceGroupManager" // }, + // "response": { + // "$ref": "Operation" + // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.regionInstanceGroupManagers.insert": +// method id "compute.regionInstanceGroupManagers.updatePerInstanceConfigs": -type RegionInstanceGroupManagersInsertCall struct { - s *Service - project string - region string - instancegroupmanager *InstanceGroupManager - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersUpdatePerInstanceConfigsCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagerupdateinstanceconfigreq *RegionInstanceGroupManagerUpdateInstanceConfigReq + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a managed instance group using the information that -// you specify in the request. After the group is created, instances in -// the group are created using the specified instance template. This -// operation is marked as DONE when the group is created even if the -// instances in the group have not yet been created. You must separately -// verify the status of the individual instances with the -// listmanagedinstances method. -// -// A regional managed instance group can contain up to 2000 instances. -func (r *RegionInstanceGroupManagersService) Insert(project string, region string, instancegroupmanager *InstanceGroupManager) *RegionInstanceGroupManagersInsertCall { - c := &RegionInstanceGroupManagersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// UpdatePerInstanceConfigs: Insert or update (for the ones that already +// exist) per-instance configs for the managed instance group. +// perInstanceConfig.instance serves as a key used to distinguish +// whether to perform insert or patch. +func (r *RegionInstanceGroupManagersService) UpdatePerInstanceConfigs(project string, region string, instanceGroupManager string, regioninstancegroupmanagerupdateinstanceconfigreq *RegionInstanceGroupManagerUpdateInstanceConfigReq) *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall { + c := &RegionInstanceGroupManagersUpdatePerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instancegroupmanager = instancegroupmanager + c.instanceGroupManager = instanceGroupManager + c.regioninstancegroupmanagerupdateinstanceconfigreq = regioninstancegroupmanagerupdateinstanceconfigreq return c } @@ -109620,7 +121706,7 @@ func (r *RegionInstanceGroupManagersService) Insert(project string, region strin // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersInsertCall) RequestId(requestId string) *RegionInstanceGroupManagersInsertCall { +func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) RequestId(requestId string) *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall { c.urlParams_.Set("requestId", requestId) return c } @@ -109628,7 +121714,7 @@ func (c *RegionInstanceGroupManagersInsertCall) RequestId(requestId string) *Reg // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersInsertCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersInsertCall { +func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -109636,35 +121722,35 @@ func (c *RegionInstanceGroupManagersInsertCall) Fields(s ...googleapi.Field) *Re // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersInsertCall) Context(ctx context.Context) *RegionInstanceGroupManagersInsertCall { +func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Context(ctx context.Context) *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersInsertCall) Header() http.Header { +func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerupdateinstanceconfigreq) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/updatePerInstanceConfigs") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -109672,20 +121758,21 @@ func (c *RegionInstanceGroupManagersInsertCall) doRequest(alt string) (*http.Res } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.insert" call. +// Do executes the "compute.regionInstanceGroupManagers.updatePerInstanceConfigs" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -109716,14 +121803,21 @@ func (c *RegionInstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, instances in the group are created using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.\n\nA regional managed instance group can contain up to 2000 instances.", + // "description": "Insert or update (for the ones that already exist) per-instance configs for the managed instance group. perInstanceConfig.instance serves as a key used to distinguish whether to perform insert or patch.", // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.insert", + // "id": "compute.regionInstanceGroupManagers.updatePerInstanceConfigs", // "parameterOrder": [ // "project", - // "region" + // "region", + // "instanceGroupManager" // ], // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group. It should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -109732,7 +121826,7 @@ func (c *RegionInstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "Name of the region scoping this request, should conform to RFC1035.", // "location": "path", // "required": true, // "type": "string" @@ -109743,24 +121837,188 @@ func (c *RegionInstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers", - // "request": { - // "$ref": "InstanceGroupManager" - // }, + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/updatePerInstanceConfigs", + // "request": { + // "$ref": "RegionInstanceGroupManagerUpdateInstanceConfigReq" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionInstanceGroups.get": + +type RegionInstanceGroupsGetCall struct { + s *Service + project string + region string + instanceGroup string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified instance group resource. +func (r *RegionInstanceGroupsService) Get(project string, region string, instanceGroup string) *RegionInstanceGroupsGetCall { + c := &RegionInstanceGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.instanceGroup = instanceGroup + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionInstanceGroupsGetCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionInstanceGroupsGetCall) IfNoneMatch(entityTag string) *RegionInstanceGroupsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionInstanceGroupsGetCall) Context(ctx context.Context) *RegionInstanceGroupsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionInstanceGroupsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionInstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{instanceGroup}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "instanceGroup": c.instanceGroup, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionInstanceGroups.get" call. +// Exactly one of *InstanceGroup or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *InstanceGroup.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionInstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InstanceGroup{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified instance group resource.", + // "httpMethod": "GET", + // "id": "compute.regionInstanceGroups.get", + // "parameterOrder": [ + // "project", + // "region", + // "instanceGroup" + // ], + // "parameters": { + // "instanceGroup": { + // "description": "Name of the instance group resource to return.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/instanceGroups/{instanceGroup}", // "response": { - // "$ref": "Operation" + // "$ref": "InstanceGroup" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionInstanceGroupManagers.list": +// method id "compute.regionInstanceGroups.list": -type RegionInstanceGroupManagersListCall struct { +type RegionInstanceGroupsListCall struct { s *Service project string region string @@ -109770,10 +122028,10 @@ type RegionInstanceGroupManagersListCall struct { header_ http.Header } -// List: Retrieves the list of managed instance groups that are -// contained within the specified region. -func (r *RegionInstanceGroupManagersService) List(project string, region string) *RegionInstanceGroupManagersListCall { - c := &RegionInstanceGroupManagersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of instance group resources contained within +// the specified region. +func (r *RegionInstanceGroupsService) List(project string, region string) *RegionInstanceGroupsListCall { + c := &RegionInstanceGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region return c @@ -109801,7 +122059,7 @@ func (r *RegionInstanceGroupManagersService) List(project string, region string) // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *RegionInstanceGroupManagersListCall) Filter(filter string) *RegionInstanceGroupManagersListCall { +func (c *RegionInstanceGroupsListCall) Filter(filter string) *RegionInstanceGroupsListCall { c.urlParams_.Set("filter", filter) return c } @@ -109812,7 +122070,7 @@ func (c *RegionInstanceGroupManagersListCall) Filter(filter string) *RegionInsta // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *RegionInstanceGroupManagersListCall) MaxResults(maxResults int64) *RegionInstanceGroupManagersListCall { +func (c *RegionInstanceGroupsListCall) MaxResults(maxResults int64) *RegionInstanceGroupsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -109829,7 +122087,7 @@ func (c *RegionInstanceGroupManagersListCall) MaxResults(maxResults int64) *Regi // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *RegionInstanceGroupManagersListCall) OrderBy(orderBy string) *RegionInstanceGroupManagersListCall { +func (c *RegionInstanceGroupsListCall) OrderBy(orderBy string) *RegionInstanceGroupsListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -109837,7 +122095,7 @@ func (c *RegionInstanceGroupManagersListCall) OrderBy(orderBy string) *RegionIns // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *RegionInstanceGroupManagersListCall) PageToken(pageToken string) *RegionInstanceGroupManagersListCall { +func (c *RegionInstanceGroupsListCall) PageToken(pageToken string) *RegionInstanceGroupsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -109845,7 +122103,7 @@ func (c *RegionInstanceGroupManagersListCall) PageToken(pageToken string) *Regio // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersListCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersListCall { +func (c *RegionInstanceGroupsListCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -109855,7 +122113,7 @@ func (c *RegionInstanceGroupManagersListCall) Fields(s ...googleapi.Field) *Regi // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionInstanceGroupManagersListCall) IfNoneMatch(entityTag string) *RegionInstanceGroupManagersListCall { +func (c *RegionInstanceGroupsListCall) IfNoneMatch(entityTag string) *RegionInstanceGroupsListCall { c.ifNoneMatch_ = entityTag return c } @@ -109863,21 +122121,21 @@ func (c *RegionInstanceGroupManagersListCall) IfNoneMatch(entityTag string) *Reg // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersListCall) Context(ctx context.Context) *RegionInstanceGroupManagersListCall { +func (c *RegionInstanceGroupsListCall) Context(ctx context.Context) *RegionInstanceGroupsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersListCall) Header() http.Header { +func (c *RegionInstanceGroupsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -109889,7 +122147,7 @@ func (c *RegionInstanceGroupManagersListCall) doRequest(alt string) (*http.Respo var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -109903,14 +122161,14 @@ func (c *RegionInstanceGroupManagersListCall) doRequest(alt string) (*http.Respo return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.list" call. -// Exactly one of *RegionInstanceGroupManagerList or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *RegionInstanceGroupManagerList.ServerResponse.Header or (if a -// response was returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.regionInstanceGroups.list" call. +// Exactly one of *RegionInstanceGroupList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *RegionInstanceGroupList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupManagerList, error) { +func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -109929,7 +122187,7 @@ func (c *RegionInstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) ( if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &RegionInstanceGroupManagerList{ + ret := &RegionInstanceGroupList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -109941,9 +122199,9 @@ func (c *RegionInstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Retrieves the list of managed instance groups that are contained within the specified region.", + // "description": "Retrieves the list of instance group resources contained within the specified region.", // "httpMethod": "GET", - // "id": "compute.regionInstanceGroupManagers.list", + // "id": "compute.regionInstanceGroups.list", // "parameterOrder": [ // "project", // "region" @@ -109986,9 +122244,9 @@ func (c *RegionInstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) ( // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers", + // "path": "{project}/regions/{region}/instanceGroups", // "response": { - // "$ref": "RegionInstanceGroupManagerList" + // "$ref": "RegionInstanceGroupList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -110002,7 +122260,7 @@ func (c *RegionInstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) ( // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *RegionInstanceGroupManagersListCall) Pages(ctx context.Context, f func(*RegionInstanceGroupManagerList) error) error { +func (c *RegionInstanceGroupsListCall) Pages(ctx context.Context, f func(*RegionInstanceGroupList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -110020,27 +122278,29 @@ func (c *RegionInstanceGroupManagersListCall) Pages(ctx context.Context, f func( } } -// method id "compute.regionInstanceGroupManagers.listManagedInstances": +// method id "compute.regionInstanceGroups.listInstances": -type RegionInstanceGroupManagersListManagedInstancesCall struct { - s *Service - project string - region string - instanceGroupManager string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupsListInstancesCall struct { + s *Service + project string + region string + instanceGroup string + regioninstancegroupslistinstancesrequest *RegionInstanceGroupsListInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// ListManagedInstances: Lists the instances in the managed instance -// group and instances that are scheduled to be created. The list -// includes any current actions that the group has scheduled for its -// instances. -func (r *RegionInstanceGroupManagersService) ListManagedInstances(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersListManagedInstancesCall { - c := &RegionInstanceGroupManagersListManagedInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ListInstances: Lists the instances in the specified instance group +// and displays information about the named ports. Depending on the +// specified options, this method can list all instances or only the +// instances that are running. +func (r *RegionInstanceGroupsService) ListInstances(project string, region string, instanceGroup string, regioninstancegroupslistinstancesrequest *RegionInstanceGroupsListInstancesRequest) *RegionInstanceGroupsListInstancesCall { + c := &RegionInstanceGroupsListInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroupManager = instanceGroupManager + c.instanceGroup = instanceGroup + c.regioninstancegroupslistinstancesrequest = regioninstancegroupslistinstancesrequest return c } @@ -110066,7 +122326,7 @@ func (r *RegionInstanceGroupManagersService) ListManagedInstances(project string // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *RegionInstanceGroupManagersListManagedInstancesCall) Filter(filter string) *RegionInstanceGroupManagersListManagedInstancesCall { +func (c *RegionInstanceGroupsListInstancesCall) Filter(filter string) *RegionInstanceGroupsListInstancesCall { c.urlParams_.Set("filter", filter) return c } @@ -110077,12 +122337,12 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Filter(filter stri // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *RegionInstanceGroupManagersListManagedInstancesCall) MaxResults(maxResults int64) *RegionInstanceGroupManagersListManagedInstancesCall { +func (c *RegionInstanceGroupsListInstancesCall) MaxResults(maxResults int64) *RegionInstanceGroupsListInstancesCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } -// OrderBy sets the optional parameter "order_by": Sorts list results by +// OrderBy sets the optional parameter "orderBy": Sorts list results by // a certain order. By default, results are returned in alphanumerical // order based on the resource name. // @@ -110094,15 +122354,15 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) MaxResults(maxResu // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *RegionInstanceGroupManagersListManagedInstancesCall) OrderBy(orderBy string) *RegionInstanceGroupManagersListManagedInstancesCall { - c.urlParams_.Set("order_by", orderBy) +func (c *RegionInstanceGroupsListInstancesCall) OrderBy(orderBy string) *RegionInstanceGroupsListInstancesCall { + c.urlParams_.Set("orderBy", orderBy) return c } // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *RegionInstanceGroupManagersListManagedInstancesCall) PageToken(pageToken string) *RegionInstanceGroupManagersListManagedInstancesCall { +func (c *RegionInstanceGroupsListInstancesCall) PageToken(pageToken string) *RegionInstanceGroupsListInstancesCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -110110,7 +122370,7 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) PageToken(pageToke // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersListManagedInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersListManagedInstancesCall { +func (c *RegionInstanceGroupsListInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsListInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -110118,30 +122378,35 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Fields(s ...google // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersListManagedInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersListManagedInstancesCall { +func (c *RegionInstanceGroupsListInstancesCall) Context(ctx context.Context) *RegionInstanceGroupsListInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersListManagedInstancesCall) Header() http.Header { +func (c *RegionInstanceGroupsListInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersListManagedInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupslistinstancesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -110149,23 +122414,22 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) doRequest(alt stri } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "region": c.region, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.listManagedInstances" call. -// Exactly one of *RegionInstanceGroupManagersListInstancesResponse or -// error will be non-nil. Any non-2xx status code is an error. Response -// headers are in either -// *RegionInstanceGroupManagersListInstancesResponse.ServerResponse.Heade -// r or (if a response was returned at all) in +// Do executes the "compute.regionInstanceGroups.listInstances" call. +// Exactly one of *RegionInstanceGroupsListInstances or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *RegionInstanceGroupsListInstances.ServerResponse.Header or +// (if a response was returned at all) in // error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check // whether the returned error was because http.StatusNotModified was // returned. -func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupManagersListInstancesResponse, error) { +func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupsListInstances, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -110184,7 +122448,7 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googlea if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &RegionInstanceGroupManagersListInstancesResponse{ + ret := &RegionInstanceGroupsListInstances{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -110196,13 +122460,13 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googlea } return ret, nil // { - // "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances.", + // "description": "Lists the instances in the specified instance group and displays information about the named ports. Depending on the specified options, this method can list all instances or only the instances that are running.", // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.listManagedInstances", + // "id": "compute.regionInstanceGroups.listInstances", // "parameterOrder": [ // "project", // "region", - // "instanceGroupManager" + // "instanceGroup" // ], // "parameters": { // "filter": { @@ -110210,8 +122474,8 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googlea // "location": "query", // "type": "string" // }, - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "instanceGroup": { + // "description": "Name of the regional instance group for which we want to list the instances.", // "location": "path", // "required": true, // "type": "string" @@ -110224,7 +122488,7 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googlea // "minimum": "0", // "type": "integer" // }, - // "order_by": { + // "orderBy": { // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", // "location": "query", // "type": "string" @@ -110248,9 +122512,12 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googlea // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", + // "path": "{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances", + // "request": { + // "$ref": "RegionInstanceGroupsListInstancesRequest" + // }, // "response": { - // "$ref": "RegionInstanceGroupManagersListInstancesResponse" + // "$ref": "RegionInstanceGroupsListInstances" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -110264,7 +122531,7 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googlea // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *RegionInstanceGroupManagersListManagedInstancesCall) Pages(ctx context.Context, f func(*RegionInstanceGroupManagersListInstancesResponse) error) error { +func (c *RegionInstanceGroupsListInstancesCall) Pages(ctx context.Context, f func(*RegionInstanceGroupsListInstances) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -110282,95 +122549,53 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Pages(ctx context. } } -// method id "compute.regionInstanceGroupManagers.listPerInstanceConfigs": +// method id "compute.regionInstanceGroups.setNamedPorts": -type RegionInstanceGroupManagersListPerInstanceConfigsCall struct { - s *Service - project string - region string - instanceGroupManager string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupsSetNamedPortsCall struct { + s *Service + project string + region string + instanceGroup string + regioninstancegroupssetnamedportsrequest *RegionInstanceGroupsSetNamedPortsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// ListPerInstanceConfigs: Lists all of the per-instance configs defined -// for the managed instance group. -func (r *RegionInstanceGroupManagersService) ListPerInstanceConfigs(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersListPerInstanceConfigsCall { - c := &RegionInstanceGroupManagersListPerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetNamedPorts: Sets the named ports for the specified regional +// instance group. +func (r *RegionInstanceGroupsService) SetNamedPorts(project string, region string, instanceGroup string, regioninstancegroupssetnamedportsrequest *RegionInstanceGroupsSetNamedPortsRequest) *RegionInstanceGroupsSetNamedPortsCall { + c := &RegionInstanceGroupsSetNamedPortsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroupManager = instanceGroupManager - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Filter(filter string) *RegionInstanceGroupManagersListPerInstanceConfigsCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) MaxResults(maxResults int64) *RegionInstanceGroupManagersListPerInstanceConfigsCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + c.instanceGroup = instanceGroup + c.regioninstancegroupssetnamedportsrequest = regioninstancegroupssetnamedportsrequest return c } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) OrderBy(orderBy string) *RegionInstanceGroupManagersListPerInstanceConfigsCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) PageToken(pageToken string) *RegionInstanceGroupManagersListPerInstanceConfigsCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionInstanceGroupsSetNamedPortsCall) RequestId(requestId string) *RegionInstanceGroupsSetNamedPortsCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersListPerInstanceConfigsCall { +func (c *RegionInstanceGroupsSetNamedPortsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsSetNamedPortsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -110378,30 +122603,35 @@ func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Fields(s ...goog // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Context(ctx context.Context) *RegionInstanceGroupManagersListPerInstanceConfigsCall { +func (c *RegionInstanceGroupsSetNamedPortsCall) Context(ctx context.Context) *RegionInstanceGroupsSetNamedPortsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Header() http.Header { +func (c *RegionInstanceGroupsSetNamedPortsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupssetnamedportsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -110409,23 +122639,21 @@ func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) doRequest(alt st } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "region": c.region, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.listPerInstanceConfigs" call. -// Exactly one of *RegionInstanceGroupManagersListInstanceConfigsResp or -// error will be non-nil. Any non-2xx status code is an error. Response -// headers are in either -// *RegionInstanceGroupManagersListInstanceConfigsResp.ServerResponse.Hea -// der or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupManagersListInstanceConfigsResp, error) { +// Do executes the "compute.regionInstanceGroups.setNamedPorts" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -110444,7 +122672,7 @@ func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googl if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &RegionInstanceGroupManagersListInstanceConfigsResp{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -110456,44 +122684,180 @@ func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googl } return ret, nil // { - // "description": "Lists all of the per-instance configs defined for the managed instance group.", + // "description": "Sets the named ports for the specified regional instance group.", // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.listPerInstanceConfigs", + // "id": "compute.regionInstanceGroups.setNamedPorts", // "parameterOrder": [ // "project", // "region", - // "instanceGroupManager" + // "instanceGroup" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", + // "instanceGroup": { + // "description": "The name of the regional instance group where the named ports are updated.", + // "location": "path", + // "required": true, // "type": "string" // }, - // "instanceGroupManager": { - // "description": "The name of the managed instance group. It should conform to RFC1035.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "required": true, // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, + // } + // }, + // "path": "{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts", + // "request": { + // "$ref": "RegionInstanceGroupsSetNamedPortsRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionInstanceGroups.testIamPermissions": + +type RegionInstanceGroupsTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *RegionInstanceGroupsService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionInstanceGroupsTestIamPermissionsCall { + c := &RegionInstanceGroupsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionInstanceGroupsTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionInstanceGroupsTestIamPermissionsCall) Context(ctx context.Context) *RegionInstanceGroupsTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionInstanceGroupsTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionInstanceGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{resource}/testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionInstanceGroups.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionInstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TestPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroups.testIamPermissions", + // "parameterOrder": [ + // "project", + // "region", + // "resource" + // ], + // "parameters": { // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -110502,15 +122866,26 @@ func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googl // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request, should conform to RFC1035.", + // "description": "The name of the region for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs", + // "path": "{project}/regions/{region}/instanceGroups/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "RegionInstanceGroupManagersListInstanceConfigsResp" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -110521,53 +122896,25 @@ func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googl } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Pages(ctx context.Context, f func(*RegionInstanceGroupManagersListInstanceConfigsResp) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.regionInstanceGroupManagers.patch": +// method id "compute.regionNotificationEndpoints.delete": -type RegionInstanceGroupManagersPatchCall struct { +type RegionNotificationEndpointsDeleteCall struct { s *Service project string region string - instanceGroupManager string - instancegroupmanager *InstanceGroupManager + notificationEndpoint string urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Patch: Updates a managed instance group using the information that -// you specify in the request. This operation is marked as DONE when the -// group is patched even if the instances in the group are still in the -// process of being patched. You must separately verify the status of -// the individual instances with the listmanagedinstances method. This -// method supports PATCH semantics and uses the JSON merge patch format -// and processing rules. -func (r *RegionInstanceGroupManagersService) Patch(project string, region string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *RegionInstanceGroupManagersPatchCall { - c := &RegionInstanceGroupManagersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified NotificationEndpoint in the given +// region +func (r *RegionNotificationEndpointsService) Delete(project string, region string, notificationEndpoint string) *RegionNotificationEndpointsDeleteCall { + c := &RegionNotificationEndpointsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanager = instancegroupmanager + c.notificationEndpoint = notificationEndpoint return c } @@ -110585,7 +122932,7 @@ func (r *RegionInstanceGroupManagersService) Patch(project string, region string // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersPatchCall) RequestId(requestId string) *RegionInstanceGroupManagersPatchCall { +func (c *RegionNotificationEndpointsDeleteCall) RequestId(requestId string) *RegionNotificationEndpointsDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -110593,7 +122940,7 @@ func (c *RegionInstanceGroupManagersPatchCall) RequestId(requestId string) *Regi // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersPatchCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersPatchCall { +func (c *RegionNotificationEndpointsDeleteCall) Fields(s ...googleapi.Field) *RegionNotificationEndpointsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -110601,37 +122948,32 @@ func (c *RegionInstanceGroupManagersPatchCall) Fields(s ...googleapi.Field) *Reg // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersPatchCall) Context(ctx context.Context) *RegionInstanceGroupManagersPatchCall { +func (c *RegionNotificationEndpointsDeleteCall) Context(ctx context.Context) *RegionNotificationEndpointsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersPatchCall) Header() http.Header { +func (c *RegionNotificationEndpointsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionNotificationEndpointsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/notificationEndpoints/{notificationEndpoint}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } @@ -110639,19 +122981,19 @@ func (c *RegionInstanceGroupManagersPatchCall) doRequest(alt string) (*http.Resp googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "notificationEndpoint": c.notificationEndpoint, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.patch" call. +// Do executes the "compute.regionNotificationEndpoints.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionNotificationEndpointsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -110682,18 +123024,19 @@ func (c *RegionInstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listmanagedinstances method. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.regionInstanceGroupManagers.patch", + // "description": "Deletes the specified NotificationEndpoint in the given region", + // "httpMethod": "DELETE", + // "id": "compute.regionNotificationEndpoints.delete", // "parameterOrder": [ // "project", // "region", - // "instanceGroupManager" + // "notificationEndpoint" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the instance group manager.", + // "notificationEndpoint": { + // "description": "Name of the NotificationEndpoint resource to delete.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -110707,6 +123050,7 @@ func (c *RegionInstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) // "region": { // "description": "Name of the region scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -110716,10 +123060,7 @@ func (c *RegionInstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", - // "request": { - // "$ref": "InstanceGroupManager" - // }, + // "path": "{project}/regions/{region}/notificationEndpoints/{notificationEndpoint}", // "response": { // "$ref": "Operation" // }, @@ -110731,40 +123072,192 @@ func (c *RegionInstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) } -// method id "compute.regionInstanceGroupManagers.recreateInstances": +// method id "compute.regionNotificationEndpoints.get": -type RegionInstanceGroupManagersRecreateInstancesCall struct { - s *Service - project string - region string - instanceGroupManager string - regioninstancegroupmanagersrecreaterequest *RegionInstanceGroupManagersRecreateRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionNotificationEndpointsGetCall struct { + s *Service + project string + region string + notificationEndpoint string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// RecreateInstances: Flags the specified instances in the managed -// instance group to be immediately recreated. The instances are deleted -// and recreated using the current instance template for the managed -// instance group. This operation is marked as DONE when the flag is set -// even if the instances have not yet been recreated. You must -// separately verify the status of the recreating action with the -// listmanagedinstances method. -// -// If the group is part of a backend service that has enabled connection -// draining, it can take up to 60 seconds after the connection draining -// duration has elapsed before the VM instance is removed or -// deleted. -// -// You can specify a maximum of 1000 instances with this method per -// request. -func (r *RegionInstanceGroupManagersService) RecreateInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersrecreaterequest *RegionInstanceGroupManagersRecreateRequest) *RegionInstanceGroupManagersRecreateInstancesCall { - c := &RegionInstanceGroupManagersRecreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified NotificationEndpoint resource in the given +// region. +func (r *RegionNotificationEndpointsService) Get(project string, region string, notificationEndpoint string) *RegionNotificationEndpointsGetCall { + c := &RegionNotificationEndpointsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroupManager = instanceGroupManager - c.regioninstancegroupmanagersrecreaterequest = regioninstancegroupmanagersrecreaterequest + c.notificationEndpoint = notificationEndpoint + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionNotificationEndpointsGetCall) Fields(s ...googleapi.Field) *RegionNotificationEndpointsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionNotificationEndpointsGetCall) IfNoneMatch(entityTag string) *RegionNotificationEndpointsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionNotificationEndpointsGetCall) Context(ctx context.Context) *RegionNotificationEndpointsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionNotificationEndpointsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionNotificationEndpointsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/notificationEndpoints/{notificationEndpoint}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "notificationEndpoint": c.notificationEndpoint, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionNotificationEndpoints.get" call. +// Exactly one of *NotificationEndpoint or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *NotificationEndpoint.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionNotificationEndpointsGetCall) Do(opts ...googleapi.CallOption) (*NotificationEndpoint, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &NotificationEndpoint{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified NotificationEndpoint resource in the given region.", + // "httpMethod": "GET", + // "id": "compute.regionNotificationEndpoints.get", + // "parameterOrder": [ + // "project", + // "region", + // "notificationEndpoint" + // ], + // "parameters": { + // "notificationEndpoint": { + // "description": "Name of the NotificationEndpoint resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/notificationEndpoints/{notificationEndpoint}", + // "response": { + // "$ref": "NotificationEndpoint" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.regionNotificationEndpoints.insert": + +type RegionNotificationEndpointsInsertCall struct { + s *Service + project string + region string + notificationendpoint *NotificationEndpoint + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Create a NotificationEndpoint in the specified project in the +// given region using the parameters that are included in the request. +func (r *RegionNotificationEndpointsService) Insert(project string, region string, notificationendpoint *NotificationEndpoint) *RegionNotificationEndpointsInsertCall { + c := &RegionNotificationEndpointsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.notificationendpoint = notificationendpoint return c } @@ -110782,7 +123275,7 @@ func (r *RegionInstanceGroupManagersService) RecreateInstances(project string, r // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersRecreateInstancesCall) RequestId(requestId string) *RegionInstanceGroupManagersRecreateInstancesCall { +func (c *RegionNotificationEndpointsInsertCall) RequestId(requestId string) *RegionNotificationEndpointsInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -110790,7 +123283,7 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) RequestId(requestId s // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersRecreateInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersRecreateInstancesCall { +func (c *RegionNotificationEndpointsInsertCall) Fields(s ...googleapi.Field) *RegionNotificationEndpointsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -110798,35 +123291,35 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) Fields(s ...googleapi // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersRecreateInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersRecreateInstancesCall { +func (c *RegionNotificationEndpointsInsertCall) Context(ctx context.Context) *RegionNotificationEndpointsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersRecreateInstancesCall) Header() http.Header { +func (c *RegionNotificationEndpointsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionNotificationEndpointsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersrecreaterequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.notificationendpoint) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/recreateInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/notificationEndpoints") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -110834,21 +123327,20 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) doRequest(alt string) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.recreateInstances" call. +// Do executes the "compute.regionNotificationEndpoints.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionNotificationEndpointsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -110879,21 +123371,14 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi. } return ret, nil // { - // "description": "Flags the specified instances in the managed instance group to be immediately recreated. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the flag is set even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + // "description": "Create a NotificationEndpoint in the specified project in the given region using the parameters that are included in the request.", // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.recreateInstances", + // "id": "compute.regionNotificationEndpoints.insert", // "parameterOrder": [ // "project", - // "region", - // "instanceGroupManager" + // "region" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "Name of the managed instance group.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -110904,6 +123389,7 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi. // "region": { // "description": "Name of the region scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -110913,9 +123399,9 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi. // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", + // "path": "{project}/regions/{region}/notificationEndpoints", // "request": { - // "$ref": "RegionInstanceGroupManagersRecreateRequest" + // "$ref": "NotificationEndpoint" // }, // "response": { // "$ref": "Operation" @@ -110928,63 +123414,297 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi. } -// method id "compute.regionInstanceGroupManagers.resize": +// method id "compute.regionNotificationEndpoints.list": -type RegionInstanceGroupManagersResizeCall struct { - s *Service - project string - region string - instanceGroupManager string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionNotificationEndpointsListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists the NotificationEndpoints for a project in the given +// region. +func (r *RegionNotificationEndpointsService) List(project string, region string) *RegionNotificationEndpointsListCall { + c := &RegionNotificationEndpointsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *RegionNotificationEndpointsListCall) Filter(filter string) *RegionNotificationEndpointsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *RegionNotificationEndpointsListCall) MaxResults(maxResults int64) *RegionNotificationEndpointsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *RegionNotificationEndpointsListCall) OrderBy(orderBy string) *RegionNotificationEndpointsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *RegionNotificationEndpointsListCall) PageToken(pageToken string) *RegionNotificationEndpointsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionNotificationEndpointsListCall) Fields(s ...googleapi.Field) *RegionNotificationEndpointsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionNotificationEndpointsListCall) IfNoneMatch(entityTag string) *RegionNotificationEndpointsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionNotificationEndpointsListCall) Context(ctx context.Context) *RegionNotificationEndpointsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionNotificationEndpointsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionNotificationEndpointsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/notificationEndpoints") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionNotificationEndpoints.list" call. +// Exactly one of *NotificationEndpointList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *NotificationEndpointList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionNotificationEndpointsListCall) Do(opts ...googleapi.CallOption) (*NotificationEndpointList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &NotificationEndpointList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists the NotificationEndpoints for a project in the given region.", + // "httpMethod": "GET", + // "id": "compute.regionNotificationEndpoints.list", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/notificationEndpoints", + // "response": { + // "$ref": "NotificationEndpointList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionNotificationEndpointsListCall) Pages(ctx context.Context, f func(*NotificationEndpointList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } } -// Resize: Changes the intended size of the managed instance group. If -// you increase the size, the group creates new instances using the -// current instance template. If you decrease the size, the group -// deletes one or more instances. -// -// The resize operation is marked DONE if the resize request is -// successful. The underlying actions take additional time. You must -// separately verify the status of the creating or deleting actions with -// the listmanagedinstances method. -// -// If the group is part of a backend service that has enabled connection -// draining, it can take up to 60 seconds after the connection draining -// duration has elapsed before the VM instance is removed or deleted. -func (r *RegionInstanceGroupManagersService) Resize(project string, region string, instanceGroupManager string, size int64) *RegionInstanceGroupManagersResizeCall { - c := &RegionInstanceGroupManagersResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.instanceGroupManager = instanceGroupManager - c.urlParams_.Set("size", fmt.Sprint(size)) - return c +// method id "compute.regionNotificationEndpoints.testIamPermissions": + +type RegionNotificationEndpointsTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersResizeCall) RequestId(requestId string) *RegionInstanceGroupManagersResizeCall { - c.urlParams_.Set("requestId", requestId) +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *RegionNotificationEndpointsService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionNotificationEndpointsTestIamPermissionsCall { + c := &RegionNotificationEndpointsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersResizeCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersResizeCall { +func (c *RegionNotificationEndpointsTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionNotificationEndpointsTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -110992,30 +123712,35 @@ func (c *RegionInstanceGroupManagersResizeCall) Fields(s ...googleapi.Field) *Re // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersResizeCall) Context(ctx context.Context) *RegionInstanceGroupManagersResizeCall { +func (c *RegionNotificationEndpointsTestIamPermissionsCall) Context(ctx context.Context) *RegionNotificationEndpointsTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersResizeCall) Header() http.Header { +func (c *RegionNotificationEndpointsTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionNotificationEndpointsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resize") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/notificationEndpoints/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -111023,21 +123748,21 @@ func (c *RegionInstanceGroupManagersResizeCall) doRequest(alt string) (*http.Res } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.resize" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regionNotificationEndpoints.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionNotificationEndpointsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -111056,7 +123781,7 @@ func (c *RegionInstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -111068,22 +123793,15 @@ func (c *RegionInstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Changes the intended size of the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes one or more instances.\n\nThe resize operation is marked DONE if the resize request is successful. The underlying actions take additional time. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.resize", + // "id": "compute.regionNotificationEndpoints.testIamPermissions", // "parameterOrder": [ // "project", // "region", - // "instanceGroupManager", - // "size" + // "resource" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "Name of the managed instance group.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -111092,85 +123810,62 @@ func (c *RegionInstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "The name of the region for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "size": { - // "description": "Number of instances that should exist in this instance group manager.", - // "format": "int32", - // "location": "query", - // "minimum": "0", + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, - // "type": "integer" + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resize", + // "path": "{project}/regions/{region}/notificationEndpoints/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionInstanceGroupManagers.setAutoHealingPolicies": +// method id "compute.regionOperations.delete": -type RegionInstanceGroupManagersSetAutoHealingPoliciesCall struct { - s *Service - project string - region string - instanceGroupManager string - regioninstancegroupmanagerssetautohealingrequest *RegionInstanceGroupManagersSetAutoHealingRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionOperationsDeleteCall struct { + s *Service + project string + region string + operation string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetAutoHealingPolicies: Modifies the autohealing policy for the -// instances in this managed instance group. [Deprecated] This method is -// deprecated. Please use Patch instead. -func (r *RegionInstanceGroupManagersService) SetAutoHealingPolicies(project string, region string, instanceGroupManager string, regioninstancegroupmanagerssetautohealingrequest *RegionInstanceGroupManagersSetAutoHealingRequest) *RegionInstanceGroupManagersSetAutoHealingPoliciesCall { - c := &RegionInstanceGroupManagersSetAutoHealingPoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified region-specific Operations resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/regionOperations/delete +func (r *RegionOperationsService) Delete(project string, region string, operation string) *RegionOperationsDeleteCall { + c := &RegionOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroupManager = instanceGroupManager - c.regioninstancegroupmanagerssetautohealingrequest = regioninstancegroupmanagerssetautohealingrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) RequestId(requestId string) *RegionInstanceGroupManagersSetAutoHealingPoliciesCall { - c.urlParams_.Set("requestId", requestId) + c.operation = operation return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersSetAutoHealingPoliciesCall { +func (c *RegionOperationsDeleteCall) Fields(s ...googleapi.Field) *RegionOperationsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -111178,99 +123873,70 @@ func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Fields(s ...goog // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Context(ctx context.Context) *RegionInstanceGroupManagersSetAutoHealingPoliciesCall { +func (c *RegionOperationsDeleteCall) Context(ctx context.Context) *RegionOperationsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Header() http.Header { +func (c *RegionOperationsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerssetautohealingrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations/{operation}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "region": c.region, + "operation": c.operation, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.setAutoHealingPolicies" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regionOperations.delete" call. +func (c *RegionOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } if err != nil { - return nil, err + return err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err + return err } - return ret, nil + return nil // { - // "description": "Modifies the autohealing policy for the instances in this managed instance group. [Deprecated] This method is deprecated. Please use Patch instead.", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.setAutoHealingPolicies", + // "description": "Deletes the specified region-specific Operations resource.", + // "httpMethod": "DELETE", + // "id": "compute.regionOperations.delete", // "parameterOrder": [ // "project", // "region", - // "instanceGroupManager" + // "operation" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "Name of the managed instance group.", + // "operation": { + // "description": "Name of the Operations resource to delete.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -111282,24 +123948,14 @@ func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googl // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "Name of the region for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies", - // "request": { - // "$ref": "RegionInstanceGroupManagersSetAutoHealingRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, + // "path": "{project}/regions/{region}/operations/{operation}", // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/compute" @@ -111308,112 +123964,99 @@ func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googl } -// method id "compute.regionInstanceGroupManagers.setInstanceTemplate": +// method id "compute.regionOperations.get": -type RegionInstanceGroupManagersSetInstanceTemplateCall struct { - s *Service - project string - region string - instanceGroupManager string - regioninstancegroupmanagerssettemplaterequest *RegionInstanceGroupManagersSetTemplateRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionOperationsGetCall struct { + s *Service + project string + region string + operation string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetInstanceTemplate: Sets the instance template to use when creating -// new instances or recreating instances in this group. Existing -// instances are not affected. -func (r *RegionInstanceGroupManagersService) SetInstanceTemplate(project string, region string, instanceGroupManager string, regioninstancegroupmanagerssettemplaterequest *RegionInstanceGroupManagersSetTemplateRequest) *RegionInstanceGroupManagersSetInstanceTemplateCall { - c := &RegionInstanceGroupManagersSetInstanceTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Retrieves the specified region-specific Operations resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/regionOperations/get +func (r *RegionOperationsService) Get(project string, region string, operation string) *RegionOperationsGetCall { + c := &RegionOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroupManager = instanceGroupManager - c.regioninstancegroupmanagerssettemplaterequest = regioninstancegroupmanagerssettemplaterequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) RequestId(requestId string) *RegionInstanceGroupManagersSetInstanceTemplateCall { - c.urlParams_.Set("requestId", requestId) + c.operation = operation return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersSetInstanceTemplateCall { +func (c *RegionOperationsGetCall) Fields(s ...googleapi.Field) *RegionOperationsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionOperationsGetCall) IfNoneMatch(entityTag string) *RegionOperationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Context(ctx context.Context) *RegionInstanceGroupManagersSetInstanceTemplateCall { +func (c *RegionOperationsGetCall) Context(ctx context.Context) *RegionOperationsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Header() http.Header { +func (c *RegionOperationsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerssettemplaterequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations/{operation}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "region": c.region, + "operation": c.operation, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.setInstanceTemplate" call. +// Do executes the "compute.regionOperations.get" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -111444,18 +124087,19 @@ func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleap } return ret, nil // { - // "description": "Sets the instance template to use when creating new instances or recreating instances in this group. Existing instances are not affected.", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.setInstanceTemplate", + // "description": "Retrieves the specified region-specific Operations resource.", + // "httpMethod": "GET", + // "id": "compute.regionOperations.get", // "parameterOrder": [ // "project", // "region", - // "instanceGroupManager" + // "operation" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "operation": { + // "description": "Name of the Operations resource to return.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -111467,138 +124111,180 @@ func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleap // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "Name of the region for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", - // "request": { - // "$ref": "RegionInstanceGroupManagersSetTemplateRequest" - // }, + // "path": "{project}/regions/{region}/operations/{operation}", // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionInstanceGroupManagers.setTargetPools": +// method id "compute.regionOperations.list": -type RegionInstanceGroupManagersSetTargetPoolsCall struct { - s *Service - project string - region string - instanceGroupManager string - regioninstancegroupmanagerssettargetpoolsrequest *RegionInstanceGroupManagersSetTargetPoolsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionOperationsListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetTargetPools: Modifies the target pools to which all new instances -// in this group are assigned. Existing instances in the group are not -// affected. -func (r *RegionInstanceGroupManagersService) SetTargetPools(project string, region string, instanceGroupManager string, regioninstancegroupmanagerssettargetpoolsrequest *RegionInstanceGroupManagersSetTargetPoolsRequest) *RegionInstanceGroupManagersSetTargetPoolsCall { - c := &RegionInstanceGroupManagersSetTargetPoolsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of Operation resources contained within the +// specified region. +// For details, see https://cloud.google.com/compute/docs/reference/latest/regionOperations/list +func (r *RegionOperationsService) List(project string, region string) *RegionOperationsListCall { + c := &RegionOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroupManager = instanceGroupManager - c.regioninstancegroupmanagerssettargetpoolsrequest = regioninstancegroupmanagerssettargetpoolsrequest return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersSetTargetPoolsCall) RequestId(requestId string) *RegionInstanceGroupManagersSetTargetPoolsCall { - c.urlParams_.Set("requestId", requestId) +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *RegionOperationsListCall) Filter(filter string) *RegionOperationsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *RegionOperationsListCall) MaxResults(maxResults int64) *RegionOperationsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *RegionOperationsListCall) OrderBy(orderBy string) *RegionOperationsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *RegionOperationsListCall) PageToken(pageToken string) *RegionOperationsListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersSetTargetPoolsCall { +func (c *RegionOperationsListCall) Fields(s ...googleapi.Field) *RegionOperationsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionOperationsListCall) IfNoneMatch(entityTag string) *RegionOperationsListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Context(ctx context.Context) *RegionInstanceGroupManagersSetTargetPoolsCall { +func (c *RegionOperationsListCall) Context(ctx context.Context) *RegionOperationsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Header() http.Header { +func (c *RegionOperationsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerssettargetpoolsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setTargetPools") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.setTargetPools" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.regionOperations.list" call. +// Exactly one of *OperationList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// *OperationList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -111617,7 +124303,7 @@ func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.Cal if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &OperationList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -111629,19 +124315,35 @@ func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.Cal } return ret, nil // { - // "description": "Modifies the target pools to which all new instances in this group are assigned. Existing instances in the group are not affected.", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.setTargetPools", + // "description": "Retrieves a list of Operation resources contained within the specified region.", + // "httpMethod": "GET", + // "id": "compute.regionOperations.list", // "parameterOrder": [ // "project", - // "region", - // "instanceGroupManager" + // "region" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "Name of the managed instance group.", - // "location": "path", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -111652,60 +124354,81 @@ func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.Cal // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "Name of the region for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", - // "request": { - // "$ref": "RegionInstanceGroupManagersSetTargetPoolsRequest" - // }, + // "path": "{project}/regions/{region}/operations", // "response": { - // "$ref": "Operation" + // "$ref": "OperationList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionInstanceGroupManagers.testIamPermissions": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionOperationsListCall) Pages(ctx context.Context, f func(*OperationList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type RegionInstanceGroupManagersTestIamPermissionsCall struct { - s *Service - project string - region string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.regionOperations.wait": + +type RegionOperationsWaitCall struct { + s *Service + project string + region string + operation string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *RegionInstanceGroupManagersService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionInstanceGroupManagersTestIamPermissionsCall { - c := &RegionInstanceGroupManagersTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Wait: Waits for the specified region-specific Operations resource +// until it is done or timeout, and retrieves the specified Operations +// resource. 1. Immediately returns when the operation is already done. +// 2. Waits for no more than the default deadline (2 minutes, subject to +// change) and then returns the current state of the operation, which +// may be DONE or still in progress. 3. Is best-effort: a. The server +// can wait less than the default deadline or zero seconds, in overload +// situations. b. There is no guarantee that the operation is actually +// done when returns. 4. User should be prepared to retry if the +// operation is not DONE. +func (r *RegionOperationsService) Wait(project string, region string, operation string) *RegionOperationsWaitCall { + c := &RegionOperationsWaitCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.operation = operation return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersTestIamPermissionsCall { +func (c *RegionOperationsWaitCall) Fields(s ...googleapi.Field) *RegionOperationsWaitCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -111713,35 +124436,30 @@ func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Fields(s ...googleap // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Context(ctx context.Context) *RegionInstanceGroupManagersTestIamPermissionsCall { +func (c *RegionOperationsWaitCall) Context(ctx context.Context) *RegionOperationsWaitCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Header() http.Header { +func (c *RegionOperationsWaitCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionOperationsWaitCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations/{operation}/wait") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -111749,21 +124467,21 @@ func (c *RegionInstanceGroupManagersTestIamPermissionsCall) doRequest(alt string } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, + "region": c.region, + "operation": c.operation, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.regionOperations.wait" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionOperationsWaitCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -111782,7 +124500,7 @@ func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -111794,15 +124512,22 @@ func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", + // "description": "Waits for the specified region-specific Operations resource until it is done or timeout, and retrieves the specified Operations resource. 1. Immediately returns when the operation is already done. 2. Waits for no more than the default deadline (2 minutes, subject to change) and then returns the current state of the operation, which may be DONE or still in progress. 3. Is best-effort: a. The server can wait less than the default deadline or zero seconds, in overload situations. b. There is no guarantee that the operation is actually done when returns. 4. User should be prepared to retry if the operation is not DONE.", // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.testIamPermissions", + // "id": "compute.regionOperations.wait", // "parameterOrder": [ // "project", // "region", - // "resource" + // "operation" // ], // "parameters": { + // "operation": { + // "description": "Name of the Operations resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -111811,26 +124536,16 @@ func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi // "type": "string" // }, // "region": { - // "description": "The name of the region for this request.", + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" - // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/regions/{region}/operations/{operation}/wait", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -111841,30 +124556,24 @@ func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi } -// method id "compute.regionInstanceGroupManagers.update": +// method id "compute.regionSslCertificates.delete": -type RegionInstanceGroupManagersUpdateCall struct { - s *Service - project string - region string - instanceGroupManager string - instancegroupmanager *InstanceGroupManager - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionSslCertificatesDeleteCall struct { + s *Service + project string + region string + sslCertificate string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Update: Updates a managed instance group using the information that -// you specify in the request. This operation is marked as DONE when the -// group is updated even if the instances in the group have not yet been -// updated. You must separately verify the status of the individual -// instances with the listmanagedinstances method. -func (r *RegionInstanceGroupManagersService) Update(project string, region string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *RegionInstanceGroupManagersUpdateCall { - c := &RegionInstanceGroupManagersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified SslCertificate resource in the region. +func (r *RegionSslCertificatesService) Delete(project string, region string, sslCertificate string) *RegionSslCertificatesDeleteCall { + c := &RegionSslCertificatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanager = instancegroupmanager + c.sslCertificate = sslCertificate return c } @@ -111882,7 +124591,7 @@ func (r *RegionInstanceGroupManagersService) Update(project string, region strin // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersUpdateCall) RequestId(requestId string) *RegionInstanceGroupManagersUpdateCall { +func (c *RegionSslCertificatesDeleteCall) RequestId(requestId string) *RegionSslCertificatesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -111890,7 +124599,7 @@ func (c *RegionInstanceGroupManagersUpdateCall) RequestId(requestId string) *Reg // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersUpdateCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersUpdateCall { +func (c *RegionSslCertificatesDeleteCall) Fields(s ...googleapi.Field) *RegionSslCertificatesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -111898,57 +124607,52 @@ func (c *RegionInstanceGroupManagersUpdateCall) Fields(s ...googleapi.Field) *Re // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersUpdateCall) Context(ctx context.Context) *RegionInstanceGroupManagersUpdateCall { +func (c *RegionSslCertificatesDeleteCall) Context(ctx context.Context) *RegionSslCertificatesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersUpdateCall) Header() http.Header { +func (c *RegionSslCertificatesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionSslCertificatesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/sslCertificates/{sslCertificate}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "region": c.region, + "sslCertificate": c.sslCertificate, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.update" call. +// Do executes the "compute.regionSslCertificates.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionSslCertificatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -111979,21 +124683,15 @@ func (c *RegionInstanceGroupManagersUpdateCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is updated even if the instances in the group have not yet been updated. You must separately verify the status of the individual instances with the listmanagedinstances method.", - // "httpMethod": "PUT", - // "id": "compute.regionInstanceGroupManagers.update", + // "description": "Deletes the specified SslCertificate resource in the region.", + // "httpMethod": "DELETE", + // "id": "compute.regionSslCertificates.delete", // "parameterOrder": [ // "project", // "region", - // "instanceGroupManager" + // "sslCertificate" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the instance group manager.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -112004,6 +124702,7 @@ func (c *RegionInstanceGroupManagersUpdateCall) Do(opts ...googleapi.CallOption) // "region": { // "description": "Name of the region scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -112011,12 +124710,16 @@ func (c *RegionInstanceGroupManagersUpdateCall) Do(opts ...googleapi.CallOption) // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "sslCertificate": { + // "description": "Name of the SslCertificate resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", - // "request": { - // "$ref": "InstanceGroupManager" - // }, + // "path": "{project}/regions/{region}/sslCertificates/{sslCertificate}", // "response": { // "$ref": "Operation" // }, @@ -112028,113 +124731,100 @@ func (c *RegionInstanceGroupManagersUpdateCall) Do(opts ...googleapi.CallOption) } -// method id "compute.regionInstanceGroupManagers.updatePerInstanceConfigs": +// method id "compute.regionSslCertificates.get": -type RegionInstanceGroupManagersUpdatePerInstanceConfigsCall struct { - s *Service - project string - region string - instanceGroupManager string - regioninstancegroupmanagerupdateinstanceconfigreq *RegionInstanceGroupManagerUpdateInstanceConfigReq - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionSslCertificatesGetCall struct { + s *Service + project string + region string + sslCertificate string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// UpdatePerInstanceConfigs: Insert or patch (for the ones that already -// exist) per-instance configs for the managed instance group. -// perInstanceConfig.instance serves as a key used to distinguish -// whether to perform insert or patch. -func (r *RegionInstanceGroupManagersService) UpdatePerInstanceConfigs(project string, region string, instanceGroupManager string, regioninstancegroupmanagerupdateinstanceconfigreq *RegionInstanceGroupManagerUpdateInstanceConfigReq) *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall { - c := &RegionInstanceGroupManagersUpdatePerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified SslCertificate resource in the specified +// region. Get a list of available SSL certificates by making a list() +// request. +func (r *RegionSslCertificatesService) Get(project string, region string, sslCertificate string) *RegionSslCertificatesGetCall { + c := &RegionSslCertificatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroupManager = instanceGroupManager - c.regioninstancegroupmanagerupdateinstanceconfigreq = regioninstancegroupmanagerupdateinstanceconfigreq - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) RequestId(requestId string) *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall { - c.urlParams_.Set("requestId", requestId) + c.sslCertificate = sslCertificate return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall { +func (c *RegionSslCertificatesGetCall) Fields(s ...googleapi.Field) *RegionSslCertificatesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionSslCertificatesGetCall) IfNoneMatch(entityTag string) *RegionSslCertificatesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Context(ctx context.Context) *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall { +func (c *RegionSslCertificatesGetCall) Context(ctx context.Context) *RegionSslCertificatesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Header() http.Header { +func (c *RegionSslCertificatesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionSslCertificatesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerupdateinstanceconfigreq) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/updatePerInstanceConfigs") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/sslCertificates/{sslCertificate}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "region": c.region, + "sslCertificate": c.sslCertificate, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.updatePerInstanceConfigs" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.regionSslCertificates.get" call. +// Exactly one of *SslCertificate or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// *SslCertificate.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionSslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCertificate, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -112153,7 +124843,7 @@ func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Do(opts ...goo if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &SslCertificate{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -112165,21 +124855,15 @@ func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Do(opts ...goo } return ret, nil // { - // "description": "Insert or patch (for the ones that already exist) per-instance configs for the managed instance group. perInstanceConfig.instance serves as a key used to distinguish whether to perform insert or patch.", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.updatePerInstanceConfigs", + // "description": "Returns the specified SslCertificate resource in the specified region. Get a list of available SSL certificates by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.regionSslCertificates.get", // "parameterOrder": [ // "project", // "region", - // "instanceGroupManager" + // "sslCertificate" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group. It should conform to RFC1035.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -112188,124 +124872,135 @@ func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Do(opts ...goo // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request, should conform to RFC1035.", + // "description": "Name of the region scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "sslCertificate": { + // "description": "Name of the SslCertificate resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/updatePerInstanceConfigs", - // "request": { - // "$ref": "RegionInstanceGroupManagerUpdateInstanceConfigReq" - // }, + // "path": "{project}/regions/{region}/sslCertificates/{sslCertificate}", // "response": { - // "$ref": "Operation" + // "$ref": "SslCertificate" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionInstanceGroups.get": +// method id "compute.regionSslCertificates.insert": -type RegionInstanceGroupsGetCall struct { - s *Service - project string - region string - instanceGroup string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionSslCertificatesInsertCall struct { + s *Service + project string + region string + sslcertificate *SslCertificate + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified instance group resource. -func (r *RegionInstanceGroupsService) Get(project string, region string, instanceGroup string) *RegionInstanceGroupsGetCall { - c := &RegionInstanceGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a SslCertificate resource in the specified project +// and region using the data included in the request +func (r *RegionSslCertificatesService) Insert(project string, region string, sslcertificate *SslCertificate) *RegionSslCertificatesInsertCall { + c := &RegionSslCertificatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroup = instanceGroup + c.sslcertificate = sslcertificate + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionSslCertificatesInsertCall) RequestId(requestId string) *RegionSslCertificatesInsertCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupsGetCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsGetCall { +func (c *RegionSslCertificatesInsertCall) Fields(s ...googleapi.Field) *RegionSslCertificatesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionInstanceGroupsGetCall) IfNoneMatch(entityTag string) *RegionInstanceGroupsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupsGetCall) Context(ctx context.Context) *RegionInstanceGroupsGetCall { +func (c *RegionSslCertificatesInsertCall) Context(ctx context.Context) *RegionSslCertificatesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupsGetCall) Header() http.Header { +func (c *RegionSslCertificatesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionSslCertificatesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.sslcertificate) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{instanceGroup}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/sslCertificates") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroup": c.instanceGroup, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroups.get" call. -// Exactly one of *InstanceGroup or error will be non-nil. Any non-2xx +// Do executes the "compute.regionSslCertificates.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *InstanceGroup.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionInstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup, error) { +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionSslCertificatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -112324,7 +125019,7 @@ func (c *RegionInstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*Instanc if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroup{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -112336,21 +125031,14 @@ func (c *RegionInstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*Instanc } return ret, nil // { - // "description": "Returns the specified instance group resource.", - // "httpMethod": "GET", - // "id": "compute.regionInstanceGroups.get", + // "description": "Creates a SslCertificate resource in the specified project and region using the data included in the request", + // "httpMethod": "POST", + // "id": "compute.regionSslCertificates.insert", // "parameterOrder": [ // "project", - // "region", - // "instanceGroup" + // "region" // ], // "parameters": { - // "instanceGroup": { - // "description": "Name of the instance group resource to return.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -112361,26 +125049,34 @@ func (c *RegionInstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*Instanc // "region": { // "description": "Name of the region scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroups/{instanceGroup}", + // "path": "{project}/regions/{region}/sslCertificates", + // "request": { + // "$ref": "SslCertificate" + // }, // "response": { - // "$ref": "InstanceGroup" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.regionInstanceGroups.list": +// method id "compute.regionSslCertificates.list": -type RegionInstanceGroupsListCall struct { +type RegionSslCertificatesListCall struct { s *Service project string region string @@ -112390,10 +125086,10 @@ type RegionInstanceGroupsListCall struct { header_ http.Header } -// List: Retrieves the list of instance group resources contained within -// the specified region. -func (r *RegionInstanceGroupsService) List(project string, region string) *RegionInstanceGroupsListCall { - c := &RegionInstanceGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of SslCertificate resources available to the +// specified project in the specified region. +func (r *RegionSslCertificatesService) List(project string, region string) *RegionSslCertificatesListCall { + c := &RegionSslCertificatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region return c @@ -112421,7 +125117,7 @@ func (r *RegionInstanceGroupsService) List(project string, region string) *Regio // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *RegionInstanceGroupsListCall) Filter(filter string) *RegionInstanceGroupsListCall { +func (c *RegionSslCertificatesListCall) Filter(filter string) *RegionSslCertificatesListCall { c.urlParams_.Set("filter", filter) return c } @@ -112432,7 +125128,7 @@ func (c *RegionInstanceGroupsListCall) Filter(filter string) *RegionInstanceGrou // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *RegionInstanceGroupsListCall) MaxResults(maxResults int64) *RegionInstanceGroupsListCall { +func (c *RegionSslCertificatesListCall) MaxResults(maxResults int64) *RegionSslCertificatesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -112449,7 +125145,7 @@ func (c *RegionInstanceGroupsListCall) MaxResults(maxResults int64) *RegionInsta // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *RegionInstanceGroupsListCall) OrderBy(orderBy string) *RegionInstanceGroupsListCall { +func (c *RegionSslCertificatesListCall) OrderBy(orderBy string) *RegionSslCertificatesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -112457,7 +125153,7 @@ func (c *RegionInstanceGroupsListCall) OrderBy(orderBy string) *RegionInstanceGr // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *RegionInstanceGroupsListCall) PageToken(pageToken string) *RegionInstanceGroupsListCall { +func (c *RegionSslCertificatesListCall) PageToken(pageToken string) *RegionSslCertificatesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -112465,7 +125161,7 @@ func (c *RegionInstanceGroupsListCall) PageToken(pageToken string) *RegionInstan // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupsListCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsListCall { +func (c *RegionSslCertificatesListCall) Fields(s ...googleapi.Field) *RegionSslCertificatesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -112475,7 +125171,7 @@ func (c *RegionInstanceGroupsListCall) Fields(s ...googleapi.Field) *RegionInsta // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionInstanceGroupsListCall) IfNoneMatch(entityTag string) *RegionInstanceGroupsListCall { +func (c *RegionSslCertificatesListCall) IfNoneMatch(entityTag string) *RegionSslCertificatesListCall { c.ifNoneMatch_ = entityTag return c } @@ -112483,21 +125179,21 @@ func (c *RegionInstanceGroupsListCall) IfNoneMatch(entityTag string) *RegionInst // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupsListCall) Context(ctx context.Context) *RegionInstanceGroupsListCall { +func (c *RegionSslCertificatesListCall) Context(ctx context.Context) *RegionSslCertificatesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupsListCall) Header() http.Header { +func (c *RegionSslCertificatesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionSslCertificatesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -112509,7 +125205,7 @@ func (c *RegionInstanceGroupsListCall) doRequest(alt string) (*http.Response, er var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/sslCertificates") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -112523,14 +125219,14 @@ func (c *RegionInstanceGroupsListCall) doRequest(alt string) (*http.Response, er return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroups.list" call. -// Exactly one of *RegionInstanceGroupList or error will be non-nil. Any +// Do executes the "compute.regionSslCertificates.list" call. +// Exactly one of *SslCertificateList or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *RegionInstanceGroupList.ServerResponse.Header or (if a response was +// *SslCertificateList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupList, error) { +func (c *RegionSslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCertificateList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -112549,7 +125245,7 @@ func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*Region if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &RegionInstanceGroupList{ + ret := &SslCertificateList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -112561,9 +125257,9 @@ func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*Region } return ret, nil // { - // "description": "Retrieves the list of instance group resources contained within the specified region.", + // "description": "Retrieves the list of SslCertificate resources available to the specified project in the specified region.", // "httpMethod": "GET", - // "id": "compute.regionInstanceGroups.list", + // "id": "compute.regionSslCertificates.list", // "parameterOrder": [ // "project", // "region" @@ -112602,13 +125298,14 @@ func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*Region // "region": { // "description": "Name of the region scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroups", + // "path": "{project}/regions/{region}/sslCertificates", // "response": { - // "$ref": "RegionInstanceGroupList" + // "$ref": "SslCertificateList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -112622,7 +125319,7 @@ func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*Region // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *RegionInstanceGroupsListCall) Pages(ctx context.Context, f func(*RegionInstanceGroupList) error) error { +func (c *RegionSslCertificatesListCall) Pages(ctx context.Context, f func(*SslCertificateList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -112640,99 +125337,34 @@ func (c *RegionInstanceGroupsListCall) Pages(ctx context.Context, f func(*Region } } -// method id "compute.regionInstanceGroups.listInstances": +// method id "compute.regionSslCertificates.testIamPermissions": -type RegionInstanceGroupsListInstancesCall struct { - s *Service - project string - region string - instanceGroup string - regioninstancegroupslistinstancesrequest *RegionInstanceGroupsListInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionSslCertificatesTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// ListInstances: Lists the instances in the specified instance group -// and displays information about the named ports. Depending on the -// specified options, this method can list all instances or only the -// instances that are running. -func (r *RegionInstanceGroupsService) ListInstances(project string, region string, instanceGroup string, regioninstancegroupslistinstancesrequest *RegionInstanceGroupsListInstancesRequest) *RegionInstanceGroupsListInstancesCall { - c := &RegionInstanceGroupsListInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource and region. +func (r *RegionSslCertificatesService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionSslCertificatesTestIamPermissionsCall { + c := &RegionSslCertificatesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroup = instanceGroup - c.regioninstancegroupslistinstancesrequest = regioninstancegroupslistinstancesrequest - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *RegionInstanceGroupsListInstancesCall) Filter(filter string) *RegionInstanceGroupsListInstancesCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *RegionInstanceGroupsListInstancesCall) MaxResults(maxResults int64) *RegionInstanceGroupsListInstancesCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *RegionInstanceGroupsListInstancesCall) OrderBy(orderBy string) *RegionInstanceGroupsListInstancesCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *RegionInstanceGroupsListInstancesCall) PageToken(pageToken string) *RegionInstanceGroupsListInstancesCall { - c.urlParams_.Set("pageToken", pageToken) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupsListInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsListInstancesCall { +func (c *RegionSslCertificatesTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionSslCertificatesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -112740,35 +125372,35 @@ func (c *RegionInstanceGroupsListInstancesCall) Fields(s ...googleapi.Field) *Re // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupsListInstancesCall) Context(ctx context.Context) *RegionInstanceGroupsListInstancesCall { +func (c *RegionSslCertificatesTestIamPermissionsCall) Context(ctx context.Context) *RegionSslCertificatesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupsListInstancesCall) Header() http.Header { +func (c *RegionSslCertificatesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionSslCertificatesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupslistinstancesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/sslCertificates/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -112776,22 +125408,21 @@ func (c *RegionInstanceGroupsListInstancesCall) doRequest(alt string) (*http.Res } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroup": c.instanceGroup, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroups.listInstances" call. -// Exactly one of *RegionInstanceGroupsListInstances or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *RegionInstanceGroupsListInstances.ServerResponse.Header or -// (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupsListInstances, error) { +// Do executes the "compute.regionSslCertificates.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionSslCertificatesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -112810,7 +125441,7 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &RegionInstanceGroupsListInstances{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -112822,44 +125453,15 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Lists the instances in the specified instance group and displays information about the named ports. Depending on the specified options, this method can list all instances or only the instances that are running.", + // "description": "Returns permissions that a caller has on the specified resource and region.", // "httpMethod": "POST", - // "id": "compute.regionInstanceGroups.listInstances", + // "id": "compute.regionSslCertificates.testIamPermissions", // "parameterOrder": [ // "project", // "region", - // "instanceGroup" + // "resource" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "instanceGroup": { - // "description": "Name of the regional instance group for which we want to list the instances.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -112868,18 +125470,26 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances", + // "path": "{project}/regions/{region}/sslCertificates/{resource}/testIamPermissions", // "request": { - // "$ref": "RegionInstanceGroupsListInstancesRequest" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "RegionInstanceGroupsListInstances" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -112890,48 +125500,24 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RegionInstanceGroupsListInstancesCall) Pages(ctx context.Context, f func(*RegionInstanceGroupsListInstances) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.regionInstanceGroups.setNamedPorts": +// method id "compute.regionTargetHttpProxies.delete": -type RegionInstanceGroupsSetNamedPortsCall struct { - s *Service - project string - region string - instanceGroup string - regioninstancegroupssetnamedportsrequest *RegionInstanceGroupsSetNamedPortsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionTargetHttpProxiesDeleteCall struct { + s *Service + project string + region string + targetHttpProxy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetNamedPorts: Sets the named ports for the specified regional -// instance group. -func (r *RegionInstanceGroupsService) SetNamedPorts(project string, region string, instanceGroup string, regioninstancegroupssetnamedportsrequest *RegionInstanceGroupsSetNamedPortsRequest) *RegionInstanceGroupsSetNamedPortsCall { - c := &RegionInstanceGroupsSetNamedPortsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified TargetHttpProxy resource. +func (r *RegionTargetHttpProxiesService) Delete(project string, region string, targetHttpProxy string) *RegionTargetHttpProxiesDeleteCall { + c := &RegionTargetHttpProxiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroup = instanceGroup - c.regioninstancegroupssetnamedportsrequest = regioninstancegroupssetnamedportsrequest + c.targetHttpProxy = targetHttpProxy return c } @@ -112949,7 +125535,7 @@ func (r *RegionInstanceGroupsService) SetNamedPorts(project string, region strin // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupsSetNamedPortsCall) RequestId(requestId string) *RegionInstanceGroupsSetNamedPortsCall { +func (c *RegionTargetHttpProxiesDeleteCall) RequestId(requestId string) *RegionTargetHttpProxiesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -112957,7 +125543,7 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) RequestId(requestId string) *Reg // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupsSetNamedPortsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsSetNamedPortsCall { +func (c *RegionTargetHttpProxiesDeleteCall) Fields(s ...googleapi.Field) *RegionTargetHttpProxiesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -112965,57 +125551,52 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) Fields(s ...googleapi.Field) *Re // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupsSetNamedPortsCall) Context(ctx context.Context) *RegionInstanceGroupsSetNamedPortsCall { +func (c *RegionTargetHttpProxiesDeleteCall) Context(ctx context.Context) *RegionTargetHttpProxiesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupsSetNamedPortsCall) Header() http.Header { +func (c *RegionTargetHttpProxiesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionTargetHttpProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupssetnamedportsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroup": c.instanceGroup, + "project": c.project, + "region": c.region, + "targetHttpProxy": c.targetHttpProxy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroups.setNamedPorts" call. +// Do executes the "compute.regionTargetHttpProxies.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionTargetHttpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -113046,21 +125627,15 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Sets the named ports for the specified regional instance group.", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroups.setNamedPorts", + // "description": "Deletes the specified TargetHttpProxy resource.", + // "httpMethod": "DELETE", + // "id": "compute.regionTargetHttpProxies.delete", // "parameterOrder": [ // "project", // "region", - // "instanceGroup" + // "targetHttpProxy" // ], // "parameters": { - // "instanceGroup": { - // "description": "The name of the regional instance group where the named ports are updated.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -113071,6 +125646,7 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) // "region": { // "description": "Name of the region scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -113078,12 +125654,16 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "targetHttpProxy": { + // "description": "Name of the TargetHttpProxy resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts", - // "request": { - // "$ref": "RegionInstanceGroupsSetNamedPortsRequest" - // }, + // "path": "{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}", // "response": { // "$ref": "Operation" // }, @@ -113095,92 +125675,100 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) } -// method id "compute.regionInstanceGroups.testIamPermissions": +// method id "compute.regionTargetHttpProxies.get": -type RegionInstanceGroupsTestIamPermissionsCall struct { - s *Service - project string - region string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionTargetHttpProxiesGetCall struct { + s *Service + project string + region string + targetHttpProxy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *RegionInstanceGroupsService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionInstanceGroupsTestIamPermissionsCall { - c := &RegionInstanceGroupsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified TargetHttpProxy resource in the specified +// region. Gets a list of available target HTTP proxies by making a +// list() request. +func (r *RegionTargetHttpProxiesService) Get(project string, region string, targetHttpProxy string) *RegionTargetHttpProxiesGetCall { + c := &RegionTargetHttpProxiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.targetHttpProxy = targetHttpProxy return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupsTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsTestIamPermissionsCall { +func (c *RegionTargetHttpProxiesGetCall) Fields(s ...googleapi.Field) *RegionTargetHttpProxiesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionTargetHttpProxiesGetCall) IfNoneMatch(entityTag string) *RegionTargetHttpProxiesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupsTestIamPermissionsCall) Context(ctx context.Context) *RegionInstanceGroupsTestIamPermissionsCall { +func (c *RegionTargetHttpProxiesGetCall) Context(ctx context.Context) *RegionTargetHttpProxiesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupsTestIamPermissionsCall) Header() http.Header { +func (c *RegionTargetHttpProxiesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionTargetHttpProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, + "region": c.region, + "targetHttpProxy": c.targetHttpProxy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroups.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.regionTargetHttpProxies.get" call. +// Exactly one of *TargetHttpProxy or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *TargetHttpProxy.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionInstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *RegionTargetHttpProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHttpProxy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -113199,7 +125787,7 @@ func (c *RegionInstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOp if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &TargetHttpProxy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -113211,13 +125799,13 @@ func (c *RegionInstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOp } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroups.testIamPermissions", + // "description": "Returns the specified TargetHttpProxy resource in the specified region. Gets a list of available target HTTP proxies by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.regionTargetHttpProxies.get", // "parameterOrder": [ // "project", // "region", - // "resource" + // "targetHttpProxy" // ], // "parameters": { // "project": { @@ -113228,26 +125816,23 @@ func (c *RegionInstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOp // "type": "string" // }, // "region": { - // "description": "The name of the region for this request.", + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "targetHttpProxy": { + // "description": "Name of the TargetHttpProxy resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroups/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "TargetHttpProxy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -113258,223 +125843,108 @@ func (c *RegionInstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOp } -// method id "compute.regionOperations.delete": +// method id "compute.regionTargetHttpProxies.insert": -type RegionOperationsDeleteCall struct { - s *Service - project string - region string - operation string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionTargetHttpProxiesInsertCall struct { + s *Service + project string + region string + targethttpproxy *TargetHttpProxy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified region-specific Operations resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/regionOperations/delete -func (r *RegionOperationsService) Delete(project string, region string, operation string) *RegionOperationsDeleteCall { - c := &RegionOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a TargetHttpProxy resource in the specified project +// and region using the data included in the request. +func (r *RegionTargetHttpProxiesService) Insert(project string, region string, targethttpproxy *TargetHttpProxy) *RegionTargetHttpProxiesInsertCall { + c := &RegionTargetHttpProxiesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.operation = operation - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionOperationsDeleteCall) Fields(s ...googleapi.Field) *RegionOperationsDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionOperationsDeleteCall) Context(ctx context.Context) *RegionOperationsDeleteCall { - c.ctx_ = ctx + c.targethttpproxy = targethttpproxy return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionOperationsDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations/{operation}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "operation": c.operation, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionOperations.delete" call. -func (c *RegionOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if err != nil { - return err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return err - } - return nil - // { - // "description": "Deletes the specified region-specific Operations resource.", - // "httpMethod": "DELETE", - // "id": "compute.regionOperations.delete", - // "parameterOrder": [ - // "project", - // "region", - // "operation" - // ], - // "parameters": { - // "operation": { - // "description": "Name of the Operations resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/operations/{operation}", - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.regionOperations.get": - -type RegionOperationsGetCall struct { - s *Service - project string - region string - operation string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Retrieves the specified region-specific Operations resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/regionOperations/get -func (r *RegionOperationsService) Get(project string, region string, operation string) *RegionOperationsGetCall { - c := &RegionOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.operation = operation +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionTargetHttpProxiesInsertCall) RequestId(requestId string) *RegionTargetHttpProxiesInsertCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionOperationsGetCall) Fields(s ...googleapi.Field) *RegionOperationsGetCall { +func (c *RegionTargetHttpProxiesInsertCall) Fields(s ...googleapi.Field) *RegionTargetHttpProxiesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionOperationsGetCall) IfNoneMatch(entityTag string) *RegionOperationsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionOperationsGetCall) Context(ctx context.Context) *RegionOperationsGetCall { +func (c *RegionTargetHttpProxiesInsertCall) Context(ctx context.Context) *RegionTargetHttpProxiesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionOperationsGetCall) Header() http.Header { +func (c *RegionTargetHttpProxiesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionOperationsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionTargetHttpProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targethttpproxy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations/{operation}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpProxies") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "operation": c.operation, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionOperations.get" call. +// Do executes the "compute.regionTargetHttpProxies.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionTargetHttpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -113505,22 +125975,14 @@ func (c *RegionOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Retrieves the specified region-specific Operations resource.", - // "httpMethod": "GET", - // "id": "compute.regionOperations.get", + // "description": "Creates a TargetHttpProxy resource in the specified project and region using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.regionTargetHttpProxies.insert", // "parameterOrder": [ // "project", - // "region", - // "operation" + // "region" // ], // "parameters": { - // "operation": { - // "description": "Name of the Operations resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -113529,29 +125991,36 @@ func (c *RegionOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // }, // "region": { - // "description": "Name of the region for this request.", + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/operations/{operation}", + // "path": "{project}/regions/{region}/targetHttpProxies", + // "request": { + // "$ref": "TargetHttpProxy" + // }, // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.regionOperations.list": +// method id "compute.regionTargetHttpProxies.list": -type RegionOperationsListCall struct { +type RegionTargetHttpProxiesListCall struct { s *Service project string region string @@ -113561,11 +126030,10 @@ type RegionOperationsListCall struct { header_ http.Header } -// List: Retrieves a list of Operation resources contained within the -// specified region. -// For details, see https://cloud.google.com/compute/docs/reference/latest/regionOperations/list -func (r *RegionOperationsService) List(project string, region string) *RegionOperationsListCall { - c := &RegionOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of TargetHttpProxy resources available to +// the specified project in the specified region. +func (r *RegionTargetHttpProxiesService) List(project string, region string) *RegionTargetHttpProxiesListCall { + c := &RegionTargetHttpProxiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region return c @@ -113593,7 +126061,7 @@ func (r *RegionOperationsService) List(project string, region string) *RegionOpe // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *RegionOperationsListCall) Filter(filter string) *RegionOperationsListCall { +func (c *RegionTargetHttpProxiesListCall) Filter(filter string) *RegionTargetHttpProxiesListCall { c.urlParams_.Set("filter", filter) return c } @@ -113604,7 +126072,7 @@ func (c *RegionOperationsListCall) Filter(filter string) *RegionOperationsListCa // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *RegionOperationsListCall) MaxResults(maxResults int64) *RegionOperationsListCall { +func (c *RegionTargetHttpProxiesListCall) MaxResults(maxResults int64) *RegionTargetHttpProxiesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -113621,7 +126089,7 @@ func (c *RegionOperationsListCall) MaxResults(maxResults int64) *RegionOperation // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *RegionOperationsListCall) OrderBy(orderBy string) *RegionOperationsListCall { +func (c *RegionTargetHttpProxiesListCall) OrderBy(orderBy string) *RegionTargetHttpProxiesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -113629,7 +126097,7 @@ func (c *RegionOperationsListCall) OrderBy(orderBy string) *RegionOperationsList // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *RegionOperationsListCall) PageToken(pageToken string) *RegionOperationsListCall { +func (c *RegionTargetHttpProxiesListCall) PageToken(pageToken string) *RegionTargetHttpProxiesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -113637,7 +126105,7 @@ func (c *RegionOperationsListCall) PageToken(pageToken string) *RegionOperations // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionOperationsListCall) Fields(s ...googleapi.Field) *RegionOperationsListCall { +func (c *RegionTargetHttpProxiesListCall) Fields(s ...googleapi.Field) *RegionTargetHttpProxiesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -113647,7 +126115,7 @@ func (c *RegionOperationsListCall) Fields(s ...googleapi.Field) *RegionOperation // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionOperationsListCall) IfNoneMatch(entityTag string) *RegionOperationsListCall { +func (c *RegionTargetHttpProxiesListCall) IfNoneMatch(entityTag string) *RegionTargetHttpProxiesListCall { c.ifNoneMatch_ = entityTag return c } @@ -113655,21 +126123,21 @@ func (c *RegionOperationsListCall) IfNoneMatch(entityTag string) *RegionOperatio // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionOperationsListCall) Context(ctx context.Context) *RegionOperationsListCall { +func (c *RegionTargetHttpProxiesListCall) Context(ctx context.Context) *RegionTargetHttpProxiesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionOperationsListCall) Header() http.Header { +func (c *RegionTargetHttpProxiesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionOperationsListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionTargetHttpProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -113681,7 +126149,7 @@ func (c *RegionOperationsListCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpProxies") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -113695,14 +126163,14 @@ func (c *RegionOperationsListCall) doRequest(alt string) (*http.Response, error) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionOperations.list" call. -// Exactly one of *OperationList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *OperationList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.regionTargetHttpProxies.list" call. +// Exactly one of *TargetHttpProxyList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TargetHttpProxyList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationList, error) { +func (c *RegionTargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHttpProxyList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -113721,7 +126189,7 @@ func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &OperationList{ + ret := &TargetHttpProxyList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -113733,9 +126201,9 @@ func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL } return ret, nil // { - // "description": "Retrieves a list of Operation resources contained within the specified region.", + // "description": "Retrieves the list of TargetHttpProxy resources available to the specified project in the specified region.", // "httpMethod": "GET", - // "id": "compute.regionOperations.list", + // "id": "compute.regionTargetHttpProxies.list", // "parameterOrder": [ // "project", // "region" @@ -113772,16 +126240,16 @@ func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL // "type": "string" // }, // "region": { - // "description": "Name of the region for this request.", + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/operations", + // "path": "{project}/regions/{region}/targetHttpProxies", // "response": { - // "$ref": "OperationList" + // "$ref": "TargetHttpProxyList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -113795,7 +126263,7 @@ func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *RegionOperationsListCall) Pages(ctx context.Context, f func(*OperationList) error) error { +func (c *RegionTargetHttpProxiesListCall) Pages(ctx context.Context, f func(*TargetHttpProxyList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -113813,40 +126281,52 @@ func (c *RegionOperationsListCall) Pages(ctx context.Context, f func(*OperationL } } -// method id "compute.regionOperations.wait": +// method id "compute.regionTargetHttpProxies.setUrlMap": -type RegionOperationsWaitCall struct { - s *Service - project string - region string - operation string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionTargetHttpProxiesSetUrlMapCall struct { + s *Service + project string + region string + targetHttpProxy string + urlmapreference *UrlMapReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Wait: Waits for the specified region-specific Operations resource -// until it is done or timeout, and retrieves the specified Operations -// resource. 1. Immediately returns when the operation is already done. -// 2. Waits for no more than the default deadline (2 minutes, subject to -// change) and then returns the current state of the operation, which -// may be DONE or still in progress. 3. Is best-effort: a. The server -// can wait less than the default deadline or zero seconds, in overload -// situations. b. There is no guarantee that the operation is actually -// done when returns. 4. User should be prepared to retry if the -// operation is not DONE. -func (r *RegionOperationsService) Wait(project string, region string, operation string) *RegionOperationsWaitCall { - c := &RegionOperationsWaitCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetUrlMap: Changes the URL map for TargetHttpProxy. +func (r *RegionTargetHttpProxiesService) SetUrlMap(project string, region string, targetHttpProxy string, urlmapreference *UrlMapReference) *RegionTargetHttpProxiesSetUrlMapCall { + c := &RegionTargetHttpProxiesSetUrlMapCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.operation = operation + c.targetHttpProxy = targetHttpProxy + c.urlmapreference = urlmapreference + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionTargetHttpProxiesSetUrlMapCall) RequestId(requestId string) *RegionTargetHttpProxiesSetUrlMapCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionOperationsWaitCall) Fields(s ...googleapi.Field) *RegionOperationsWaitCall { +func (c *RegionTargetHttpProxiesSetUrlMapCall) Fields(s ...googleapi.Field) *RegionTargetHttpProxiesSetUrlMapCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -113854,30 +126334,202 @@ func (c *RegionOperationsWaitCall) Fields(s ...googleapi.Field) *RegionOperation // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionOperationsWaitCall) Context(ctx context.Context) *RegionOperationsWaitCall { +func (c *RegionTargetHttpProxiesSetUrlMapCall) Context(ctx context.Context) *RegionTargetHttpProxiesSetUrlMapCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionOperationsWaitCall) Header() http.Header { +func (c *RegionTargetHttpProxiesSetUrlMapCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionOperationsWaitCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionTargetHttpProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmapreference) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}/setUrlMap") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "targetHttpProxy": c.targetHttpProxy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionTargetHttpProxies.setUrlMap" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionTargetHttpProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Changes the URL map for TargetHttpProxy.", + // "httpMethod": "POST", + // "id": "compute.regionTargetHttpProxies.setUrlMap", + // "parameterOrder": [ + // "project", + // "region", + // "targetHttpProxy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "targetHttpProxy": { + // "description": "Name of the TargetHttpProxy to set a URL map for.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}/setUrlMap", + // "request": { + // "$ref": "UrlMapReference" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionTargetHttpProxies.testIamPermissions": + +type RegionTargetHttpProxiesTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *RegionTargetHttpProxiesService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionTargetHttpProxiesTestIamPermissionsCall { + c := &RegionTargetHttpProxiesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionTargetHttpProxiesTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionTargetHttpProxiesTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionTargetHttpProxiesTestIamPermissionsCall) Context(ctx context.Context) *RegionTargetHttpProxiesTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionTargetHttpProxiesTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionTargetHttpProxiesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations/{operation}/wait") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpProxies/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -113885,21 +126537,21 @@ func (c *RegionOperationsWaitCall) doRequest(alt string) (*http.Response, error) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "operation": c.operation, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionOperations.wait" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionOperationsWaitCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regionTargetHttpProxies.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionTargetHttpProxiesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -113918,7 +126570,7 @@ func (c *RegionOperationsWaitCall) Do(opts ...googleapi.CallOption) (*Operation, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -113930,22 +126582,15 @@ func (c *RegionOperationsWaitCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Waits for the specified region-specific Operations resource until it is done or timeout, and retrieves the specified Operations resource. 1. Immediately returns when the operation is already done. 2. Waits for no more than the default deadline (2 minutes, subject to change) and then returns the current state of the operation, which may be DONE or still in progress. 3. Is best-effort: a. The server can wait less than the default deadline or zero seconds, in overload situations. b. There is no guarantee that the operation is actually done when returns. 4. User should be prepared to retry if the operation is not DONE.", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.regionOperations.wait", + // "id": "compute.regionTargetHttpProxies.testIamPermissions", // "parameterOrder": [ // "project", // "region", - // "operation" + // "resource" // ], // "parameters": { - // "operation": { - // "description": "Name of the Operations resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -113954,16 +126599,26 @@ func (c *RegionOperationsWaitCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // }, // "region": { - // "description": "Name of the region for this request.", + // "description": "The name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/operations/{operation}/wait", + // "path": "{project}/regions/{region}/targetHttpProxies/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -113974,24 +126629,24 @@ func (c *RegionOperationsWaitCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.regionSslCertificates.delete": +// method id "compute.regionTargetHttpsProxies.delete": -type RegionSslCertificatesDeleteCall struct { - s *Service - project string - region string - sslCertificate string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionTargetHttpsProxiesDeleteCall struct { + s *Service + project string + region string + targetHttpsProxy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified SslCertificate resource in the region. -func (r *RegionSslCertificatesService) Delete(project string, region string, sslCertificate string) *RegionSslCertificatesDeleteCall { - c := &RegionSslCertificatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified TargetHttpsProxy resource. +func (r *RegionTargetHttpsProxiesService) Delete(project string, region string, targetHttpsProxy string) *RegionTargetHttpsProxiesDeleteCall { + c := &RegionTargetHttpsProxiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.sslCertificate = sslCertificate + c.targetHttpsProxy = targetHttpsProxy return c } @@ -114009,7 +126664,7 @@ func (r *RegionSslCertificatesService) Delete(project string, region string, ssl // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionSslCertificatesDeleteCall) RequestId(requestId string) *RegionSslCertificatesDeleteCall { +func (c *RegionTargetHttpsProxiesDeleteCall) RequestId(requestId string) *RegionTargetHttpsProxiesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -114017,7 +126672,7 @@ func (c *RegionSslCertificatesDeleteCall) RequestId(requestId string) *RegionSsl // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionSslCertificatesDeleteCall) Fields(s ...googleapi.Field) *RegionSslCertificatesDeleteCall { +func (c *RegionTargetHttpsProxiesDeleteCall) Fields(s ...googleapi.Field) *RegionTargetHttpsProxiesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -114025,21 +126680,21 @@ func (c *RegionSslCertificatesDeleteCall) Fields(s ...googleapi.Field) *RegionSs // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionSslCertificatesDeleteCall) Context(ctx context.Context) *RegionSslCertificatesDeleteCall { +func (c *RegionTargetHttpsProxiesDeleteCall) Context(ctx context.Context) *RegionTargetHttpsProxiesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionSslCertificatesDeleteCall) Header() http.Header { +func (c *RegionTargetHttpsProxiesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionSslCertificatesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionTargetHttpsProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -114048,7 +126703,7 @@ func (c *RegionSslCertificatesDeleteCall) doRequest(alt string) (*http.Response, var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/sslCertificates/{sslCertificate}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { @@ -114056,21 +126711,21 @@ func (c *RegionSslCertificatesDeleteCall) doRequest(alt string) (*http.Response, } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "sslCertificate": c.sslCertificate, + "project": c.project, + "region": c.region, + "targetHttpsProxy": c.targetHttpsProxy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionSslCertificates.delete" call. +// Do executes the "compute.regionTargetHttpsProxies.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionSslCertificatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionTargetHttpsProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -114101,13 +126756,13 @@ func (c *RegionSslCertificatesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Deletes the specified SslCertificate resource in the region.", + // "description": "Deletes the specified TargetHttpsProxy resource.", // "httpMethod": "DELETE", - // "id": "compute.regionSslCertificates.delete", + // "id": "compute.regionTargetHttpsProxies.delete", // "parameterOrder": [ // "project", // "region", - // "sslCertificate" + // "targetHttpsProxy" // ], // "parameters": { // "project": { @@ -114129,15 +126784,15 @@ func (c *RegionSslCertificatesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope // "location": "query", // "type": "string" // }, - // "sslCertificate": { - // "description": "Name of the SslCertificate resource to delete.", + // "targetHttpsProxy": { + // "description": "Name of the TargetHttpsProxy resource to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/sslCertificates/{sslCertificate}", + // "path": "{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", // "response": { // "$ref": "Operation" // }, @@ -114149,34 +126804,34 @@ func (c *RegionSslCertificatesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.regionSslCertificates.get": +// method id "compute.regionTargetHttpsProxies.get": -type RegionSslCertificatesGetCall struct { - s *Service - project string - region string - sslCertificate string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionTargetHttpsProxiesGetCall struct { + s *Service + project string + region string + targetHttpsProxy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified SslCertificate resource in the specified -// region. Get a list of available SSL certificates by making a list() -// request. -func (r *RegionSslCertificatesService) Get(project string, region string, sslCertificate string) *RegionSslCertificatesGetCall { - c := &RegionSslCertificatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified TargetHttpsProxy resource in the specified +// region. Gets a list of available target HTTP proxies by making a +// list() request. +func (r *RegionTargetHttpsProxiesService) Get(project string, region string, targetHttpsProxy string) *RegionTargetHttpsProxiesGetCall { + c := &RegionTargetHttpsProxiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.sslCertificate = sslCertificate + c.targetHttpsProxy = targetHttpsProxy return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionSslCertificatesGetCall) Fields(s ...googleapi.Field) *RegionSslCertificatesGetCall { +func (c *RegionTargetHttpsProxiesGetCall) Fields(s ...googleapi.Field) *RegionTargetHttpsProxiesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -114186,7 +126841,7 @@ func (c *RegionSslCertificatesGetCall) Fields(s ...googleapi.Field) *RegionSslCe // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionSslCertificatesGetCall) IfNoneMatch(entityTag string) *RegionSslCertificatesGetCall { +func (c *RegionTargetHttpsProxiesGetCall) IfNoneMatch(entityTag string) *RegionTargetHttpsProxiesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -114194,21 +126849,21 @@ func (c *RegionSslCertificatesGetCall) IfNoneMatch(entityTag string) *RegionSslC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionSslCertificatesGetCall) Context(ctx context.Context) *RegionSslCertificatesGetCall { +func (c *RegionTargetHttpsProxiesGetCall) Context(ctx context.Context) *RegionTargetHttpsProxiesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionSslCertificatesGetCall) Header() http.Header { +func (c *RegionTargetHttpsProxiesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionSslCertificatesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionTargetHttpsProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -114220,7 +126875,7 @@ func (c *RegionSslCertificatesGetCall) doRequest(alt string) (*http.Response, er var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/sslCertificates/{sslCertificate}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -114228,21 +126883,21 @@ func (c *RegionSslCertificatesGetCall) doRequest(alt string) (*http.Response, er } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "sslCertificate": c.sslCertificate, + "project": c.project, + "region": c.region, + "targetHttpsProxy": c.targetHttpsProxy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionSslCertificates.get" call. -// Exactly one of *SslCertificate or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *SslCertificate.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.regionTargetHttpsProxies.get" call. +// Exactly one of *TargetHttpsProxy or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TargetHttpsProxy.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionSslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCertificate, error) { +func (c *RegionTargetHttpsProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHttpsProxy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -114261,7 +126916,7 @@ func (c *RegionSslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCer if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &SslCertificate{ + ret := &TargetHttpsProxy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -114273,13 +126928,13 @@ func (c *RegionSslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCer } return ret, nil // { - // "description": "Returns the specified SslCertificate resource in the specified region. Get a list of available SSL certificates by making a list() request.", + // "description": "Returns the specified TargetHttpsProxy resource in the specified region. Gets a list of available target HTTP proxies by making a list() request.", // "httpMethod": "GET", - // "id": "compute.regionSslCertificates.get", + // "id": "compute.regionTargetHttpsProxies.get", // "parameterOrder": [ // "project", // "region", - // "sslCertificate" + // "targetHttpsProxy" // ], // "parameters": { // "project": { @@ -114296,17 +126951,17 @@ func (c *RegionSslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCer // "required": true, // "type": "string" // }, - // "sslCertificate": { - // "description": "Name of the SslCertificate resource to return.", + // "targetHttpsProxy": { + // "description": "Name of the TargetHttpsProxy resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/sslCertificates/{sslCertificate}", + // "path": "{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", // "response": { - // "$ref": "SslCertificate" + // "$ref": "TargetHttpsProxy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -114317,25 +126972,25 @@ func (c *RegionSslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCer } -// method id "compute.regionSslCertificates.insert": +// method id "compute.regionTargetHttpsProxies.insert": -type RegionSslCertificatesInsertCall struct { - s *Service - project string - region string - sslcertificate *SslCertificate - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionTargetHttpsProxiesInsertCall struct { + s *Service + project string + region string + targethttpsproxy *TargetHttpsProxy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a SslCertificate resource in the specified project -// and region using the data included in the request -func (r *RegionSslCertificatesService) Insert(project string, region string, sslcertificate *SslCertificate) *RegionSslCertificatesInsertCall { - c := &RegionSslCertificatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a TargetHttpsProxy resource in the specified project +// and region using the data included in the request. +func (r *RegionTargetHttpsProxiesService) Insert(project string, region string, targethttpsproxy *TargetHttpsProxy) *RegionTargetHttpsProxiesInsertCall { + c := &RegionTargetHttpsProxiesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.sslcertificate = sslcertificate + c.targethttpsproxy = targethttpsproxy return c } @@ -114353,7 +127008,7 @@ func (r *RegionSslCertificatesService) Insert(project string, region string, ssl // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionSslCertificatesInsertCall) RequestId(requestId string) *RegionSslCertificatesInsertCall { +func (c *RegionTargetHttpsProxiesInsertCall) RequestId(requestId string) *RegionTargetHttpsProxiesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -114361,7 +127016,7 @@ func (c *RegionSslCertificatesInsertCall) RequestId(requestId string) *RegionSsl // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionSslCertificatesInsertCall) Fields(s ...googleapi.Field) *RegionSslCertificatesInsertCall { +func (c *RegionTargetHttpsProxiesInsertCall) Fields(s ...googleapi.Field) *RegionTargetHttpsProxiesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -114369,35 +127024,35 @@ func (c *RegionSslCertificatesInsertCall) Fields(s ...googleapi.Field) *RegionSs // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionSslCertificatesInsertCall) Context(ctx context.Context) *RegionSslCertificatesInsertCall { +func (c *RegionTargetHttpsProxiesInsertCall) Context(ctx context.Context) *RegionTargetHttpsProxiesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionSslCertificatesInsertCall) Header() http.Header { +func (c *RegionTargetHttpsProxiesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionSslCertificatesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionTargetHttpsProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.sslcertificate) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targethttpsproxy) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/sslCertificates") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpsProxies") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -114411,14 +127066,14 @@ func (c *RegionSslCertificatesInsertCall) doRequest(alt string) (*http.Response, return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionSslCertificates.insert" call. +// Do executes the "compute.regionTargetHttpsProxies.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionSslCertificatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionTargetHttpsProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -114449,9 +127104,9 @@ func (c *RegionSslCertificatesInsertCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Creates a SslCertificate resource in the specified project and region using the data included in the request", + // "description": "Creates a TargetHttpsProxy resource in the specified project and region using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.regionSslCertificates.insert", + // "id": "compute.regionTargetHttpsProxies.insert", // "parameterOrder": [ // "project", // "region" @@ -114477,9 +127132,9 @@ func (c *RegionSslCertificatesInsertCall) Do(opts ...googleapi.CallOption) (*Ope // "type": "string" // } // }, - // "path": "{project}/regions/{region}/sslCertificates", + // "path": "{project}/regions/{region}/targetHttpsProxies", // "request": { - // "$ref": "SslCertificate" + // "$ref": "TargetHttpsProxy" // }, // "response": { // "$ref": "Operation" @@ -114492,9 +127147,9 @@ func (c *RegionSslCertificatesInsertCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.regionSslCertificates.list": +// method id "compute.regionTargetHttpsProxies.list": -type RegionSslCertificatesListCall struct { +type RegionTargetHttpsProxiesListCall struct { s *Service project string region string @@ -114504,10 +127159,10 @@ type RegionSslCertificatesListCall struct { header_ http.Header } -// List: Retrieves the list of SslCertificate resources available to the -// specified project in the specified region. -func (r *RegionSslCertificatesService) List(project string, region string) *RegionSslCertificatesListCall { - c := &RegionSslCertificatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of TargetHttpsProxy resources available to +// the specified project in the specified region. +func (r *RegionTargetHttpsProxiesService) List(project string, region string) *RegionTargetHttpsProxiesListCall { + c := &RegionTargetHttpsProxiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region return c @@ -114535,7 +127190,7 @@ func (r *RegionSslCertificatesService) List(project string, region string) *Regi // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *RegionSslCertificatesListCall) Filter(filter string) *RegionSslCertificatesListCall { +func (c *RegionTargetHttpsProxiesListCall) Filter(filter string) *RegionTargetHttpsProxiesListCall { c.urlParams_.Set("filter", filter) return c } @@ -114546,7 +127201,7 @@ func (c *RegionSslCertificatesListCall) Filter(filter string) *RegionSslCertific // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *RegionSslCertificatesListCall) MaxResults(maxResults int64) *RegionSslCertificatesListCall { +func (c *RegionTargetHttpsProxiesListCall) MaxResults(maxResults int64) *RegionTargetHttpsProxiesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -114563,7 +127218,7 @@ func (c *RegionSslCertificatesListCall) MaxResults(maxResults int64) *RegionSslC // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *RegionSslCertificatesListCall) OrderBy(orderBy string) *RegionSslCertificatesListCall { +func (c *RegionTargetHttpsProxiesListCall) OrderBy(orderBy string) *RegionTargetHttpsProxiesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -114571,7 +127226,7 @@ func (c *RegionSslCertificatesListCall) OrderBy(orderBy string) *RegionSslCertif // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *RegionSslCertificatesListCall) PageToken(pageToken string) *RegionSslCertificatesListCall { +func (c *RegionTargetHttpsProxiesListCall) PageToken(pageToken string) *RegionTargetHttpsProxiesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -114579,7 +127234,7 @@ func (c *RegionSslCertificatesListCall) PageToken(pageToken string) *RegionSslCe // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionSslCertificatesListCall) Fields(s ...googleapi.Field) *RegionSslCertificatesListCall { +func (c *RegionTargetHttpsProxiesListCall) Fields(s ...googleapi.Field) *RegionTargetHttpsProxiesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -114589,7 +127244,7 @@ func (c *RegionSslCertificatesListCall) Fields(s ...googleapi.Field) *RegionSslC // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionSslCertificatesListCall) IfNoneMatch(entityTag string) *RegionSslCertificatesListCall { +func (c *RegionTargetHttpsProxiesListCall) IfNoneMatch(entityTag string) *RegionTargetHttpsProxiesListCall { c.ifNoneMatch_ = entityTag return c } @@ -114597,21 +127252,21 @@ func (c *RegionSslCertificatesListCall) IfNoneMatch(entityTag string) *RegionSsl // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionSslCertificatesListCall) Context(ctx context.Context) *RegionSslCertificatesListCall { +func (c *RegionTargetHttpsProxiesListCall) Context(ctx context.Context) *RegionTargetHttpsProxiesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionSslCertificatesListCall) Header() http.Header { +func (c *RegionTargetHttpsProxiesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionSslCertificatesListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionTargetHttpsProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -114623,7 +127278,7 @@ func (c *RegionSslCertificatesListCall) doRequest(alt string) (*http.Response, e var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/sslCertificates") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpsProxies") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -114637,14 +127292,14 @@ func (c *RegionSslCertificatesListCall) doRequest(alt string) (*http.Response, e return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionSslCertificates.list" call. -// Exactly one of *SslCertificateList or error will be non-nil. Any +// Do executes the "compute.regionTargetHttpsProxies.list" call. +// Exactly one of *TargetHttpsProxyList or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *SslCertificateList.ServerResponse.Header or (if a response was +// *TargetHttpsProxyList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionSslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCertificateList, error) { +func (c *RegionTargetHttpsProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHttpsProxyList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -114663,7 +127318,7 @@ func (c *RegionSslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCe if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &SslCertificateList{ + ret := &TargetHttpsProxyList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -114675,9 +127330,9 @@ func (c *RegionSslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCe } return ret, nil // { - // "description": "Retrieves the list of SslCertificate resources available to the specified project in the specified region.", + // "description": "Retrieves the list of TargetHttpsProxy resources available to the specified project in the specified region.", // "httpMethod": "GET", - // "id": "compute.regionSslCertificates.list", + // "id": "compute.regionTargetHttpsProxies.list", // "parameterOrder": [ // "project", // "region" @@ -114721,9 +127376,9 @@ func (c *RegionSslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCe // "type": "string" // } // }, - // "path": "{project}/regions/{region}/sslCertificates", + // "path": "{project}/regions/{region}/targetHttpsProxies", // "response": { - // "$ref": "SslCertificateList" + // "$ref": "TargetHttpsProxyList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -114737,7 +127392,7 @@ func (c *RegionSslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCe // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *RegionSslCertificatesListCall) Pages(ctx context.Context, f func(*SslCertificateList) error) error { +func (c *RegionTargetHttpsProxiesListCall) Pages(ctx context.Context, f func(*TargetHttpsProxyList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -114755,9 +127410,379 @@ func (c *RegionSslCertificatesListCall) Pages(ctx context.Context, f func(*SslCe } } -// method id "compute.regionSslCertificates.testIamPermissions": +// method id "compute.regionTargetHttpsProxies.setSslCertificates": -type RegionSslCertificatesTestIamPermissionsCall struct { +type RegionTargetHttpsProxiesSetSslCertificatesCall struct { + s *Service + project string + region string + targetHttpsProxy string + regiontargethttpsproxiessetsslcertificatesrequest *RegionTargetHttpsProxiesSetSslCertificatesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetSslCertificates: Replaces SslCertificates for TargetHttpsProxy. +func (r *RegionTargetHttpsProxiesService) SetSslCertificates(project string, region string, targetHttpsProxy string, regiontargethttpsproxiessetsslcertificatesrequest *RegionTargetHttpsProxiesSetSslCertificatesRequest) *RegionTargetHttpsProxiesSetSslCertificatesCall { + c := &RegionTargetHttpsProxiesSetSslCertificatesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.targetHttpsProxy = targetHttpsProxy + c.regiontargethttpsproxiessetsslcertificatesrequest = regiontargethttpsproxiessetsslcertificatesrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) RequestId(requestId string) *RegionTargetHttpsProxiesSetSslCertificatesCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) Fields(s ...googleapi.Field) *RegionTargetHttpsProxiesSetSslCertificatesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) Context(ctx context.Context) *RegionTargetHttpsProxiesSetSslCertificatesCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regiontargethttpsproxiessetsslcertificatesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "targetHttpsProxy": c.targetHttpsProxy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionTargetHttpsProxies.setSslCertificates" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Replaces SslCertificates for TargetHttpsProxy.", + // "httpMethod": "POST", + // "id": "compute.regionTargetHttpsProxies.setSslCertificates", + // "parameterOrder": [ + // "project", + // "region", + // "targetHttpsProxy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "targetHttpsProxy": { + // "description": "Name of the TargetHttpsProxy resource to set an SslCertificates resource for.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates", + // "request": { + // "$ref": "RegionTargetHttpsProxiesSetSslCertificatesRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionTargetHttpsProxies.setUrlMap": + +type RegionTargetHttpsProxiesSetUrlMapCall struct { + s *Service + project string + region string + targetHttpsProxy string + urlmapreference *UrlMapReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetUrlMap: Changes the URL map for TargetHttpsProxy. +func (r *RegionTargetHttpsProxiesService) SetUrlMap(project string, region string, targetHttpsProxy string, urlmapreference *UrlMapReference) *RegionTargetHttpsProxiesSetUrlMapCall { + c := &RegionTargetHttpsProxiesSetUrlMapCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.targetHttpsProxy = targetHttpsProxy + c.urlmapreference = urlmapreference + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionTargetHttpsProxiesSetUrlMapCall) RequestId(requestId string) *RegionTargetHttpsProxiesSetUrlMapCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionTargetHttpsProxiesSetUrlMapCall) Fields(s ...googleapi.Field) *RegionTargetHttpsProxiesSetUrlMapCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionTargetHttpsProxiesSetUrlMapCall) Context(ctx context.Context) *RegionTargetHttpsProxiesSetUrlMapCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionTargetHttpsProxiesSetUrlMapCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionTargetHttpsProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmapreference) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "targetHttpsProxy": c.targetHttpsProxy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionTargetHttpsProxies.setUrlMap" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionTargetHttpsProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Changes the URL map for TargetHttpsProxy.", + // "httpMethod": "POST", + // "id": "compute.regionTargetHttpsProxies.setUrlMap", + // "parameterOrder": [ + // "project", + // "region", + // "targetHttpsProxy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "targetHttpsProxy": { + // "description": "Name of the TargetHttpsProxy to set a URL map for.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap", + // "request": { + // "$ref": "UrlMapReference" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionTargetHttpsProxies.testIamPermissions": + +type RegionTargetHttpsProxiesTestIamPermissionsCall struct { s *Service project string region string @@ -114769,9 +127794,9 @@ type RegionSslCertificatesTestIamPermissionsCall struct { } // TestIamPermissions: Returns permissions that a caller has on the -// specified resource and region. -func (r *RegionSslCertificatesService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionSslCertificatesTestIamPermissionsCall { - c := &RegionSslCertificatesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// specified resource. +func (r *RegionTargetHttpsProxiesService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionTargetHttpsProxiesTestIamPermissionsCall { + c := &RegionTargetHttpsProxiesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region c.resource = resource @@ -114782,7 +127807,7 @@ func (r *RegionSslCertificatesService) TestIamPermissions(project string, region // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionSslCertificatesTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionSslCertificatesTestIamPermissionsCall { +func (c *RegionTargetHttpsProxiesTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionTargetHttpsProxiesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -114790,21 +127815,21 @@ func (c *RegionSslCertificatesTestIamPermissionsCall) Fields(s ...googleapi.Fiel // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionSslCertificatesTestIamPermissionsCall) Context(ctx context.Context) *RegionSslCertificatesTestIamPermissionsCall { +func (c *RegionTargetHttpsProxiesTestIamPermissionsCall) Context(ctx context.Context) *RegionTargetHttpsProxiesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionSslCertificatesTestIamPermissionsCall) Header() http.Header { +func (c *RegionTargetHttpsProxiesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionSslCertificatesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionTargetHttpsProxiesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -114818,7 +127843,7 @@ func (c *RegionSslCertificatesTestIamPermissionsCall) doRequest(alt string) (*ht reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/sslCertificates/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpsProxies/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -114833,14 +127858,14 @@ func (c *RegionSslCertificatesTestIamPermissionsCall) doRequest(alt string) (*ht return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionSslCertificates.testIamPermissions" call. +// Do executes the "compute.regionTargetHttpsProxies.testIamPermissions" call. // Exactly one of *TestPermissionsResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *TestPermissionsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionSslCertificatesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *RegionTargetHttpsProxiesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -114871,9 +127896,9 @@ func (c *RegionSslCertificatesTestIamPermissionsCall) Do(opts ...googleapi.CallO } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource and region.", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.regionSslCertificates.testIamPermissions", + // "id": "compute.regionTargetHttpsProxies.testIamPermissions", // "parameterOrder": [ // "project", // "region", @@ -114902,7 +127927,7 @@ func (c *RegionSslCertificatesTestIamPermissionsCall) Do(opts ...googleapi.CallO // "type": "string" // } // }, - // "path": "{project}/regions/{region}/sslCertificates/{resource}/testIamPermissions", + // "path": "{project}/regions/{region}/targetHttpsProxies/{resource}/testIamPermissions", // "request": { // "$ref": "TestPermissionsRequest" // }, @@ -114918,42 +127943,30 @@ func (c *RegionSslCertificatesTestIamPermissionsCall) Do(opts ...googleapi.CallO } -// method id "compute.regionTargetHttpProxies.delete": +// method id "compute.regionUrlMaps.delete": -type RegionTargetHttpProxiesDeleteCall struct { - s *Service - project string - region string - targetHttpProxy string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionUrlMapsDeleteCall struct { + s *Service + project string + region string + urlMap string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified TargetHttpProxy resource. -func (r *RegionTargetHttpProxiesService) Delete(project string, region string, targetHttpProxy string) *RegionTargetHttpProxiesDeleteCall { - c := &RegionTargetHttpProxiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified UrlMap resource. +func (r *RegionUrlMapsService) Delete(project string, region string, urlMap string) *RegionUrlMapsDeleteCall { + c := &RegionUrlMapsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.targetHttpProxy = targetHttpProxy + c.urlMap = urlMap return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionTargetHttpProxiesDeleteCall) RequestId(requestId string) *RegionTargetHttpProxiesDeleteCall { +// RequestId sets the optional parameter "requestId": begin_interface: +// MixerMutationRequestBuilder Request ID to support idempotency. +func (c *RegionUrlMapsDeleteCall) RequestId(requestId string) *RegionUrlMapsDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -114961,7 +127974,7 @@ func (c *RegionTargetHttpProxiesDeleteCall) RequestId(requestId string) *RegionT // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionTargetHttpProxiesDeleteCall) Fields(s ...googleapi.Field) *RegionTargetHttpProxiesDeleteCall { +func (c *RegionUrlMapsDeleteCall) Fields(s ...googleapi.Field) *RegionUrlMapsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -114969,21 +127982,21 @@ func (c *RegionTargetHttpProxiesDeleteCall) Fields(s ...googleapi.Field) *Region // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionTargetHttpProxiesDeleteCall) Context(ctx context.Context) *RegionTargetHttpProxiesDeleteCall { +func (c *RegionUrlMapsDeleteCall) Context(ctx context.Context) *RegionUrlMapsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionTargetHttpProxiesDeleteCall) Header() http.Header { +func (c *RegionUrlMapsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionTargetHttpProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionUrlMapsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -114992,7 +128005,7 @@ func (c *RegionTargetHttpProxiesDeleteCall) doRequest(alt string) (*http.Respons var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/urlMaps/{urlMap}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { @@ -115000,21 +128013,21 @@ func (c *RegionTargetHttpProxiesDeleteCall) doRequest(alt string) (*http.Respons } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "targetHttpProxy": c.targetHttpProxy, + "project": c.project, + "region": c.region, + "urlMap": c.urlMap, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionTargetHttpProxies.delete" call. +// Do executes the "compute.regionUrlMaps.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionTargetHttpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionUrlMapsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -115045,13 +128058,13 @@ func (c *RegionTargetHttpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*O } return ret, nil // { - // "description": "Deletes the specified TargetHttpProxy resource.", + // "description": "Deletes the specified UrlMap resource.", // "httpMethod": "DELETE", - // "id": "compute.regionTargetHttpProxies.delete", + // "id": "compute.regionUrlMaps.delete", // "parameterOrder": [ // "project", // "region", - // "targetHttpProxy" + // "urlMap" // ], // "parameters": { // "project": { @@ -115069,19 +128082,19 @@ func (c *RegionTargetHttpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*O // "type": "string" // }, // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "description": "begin_interface: MixerMutationRequestBuilder Request ID to support idempotency.", // "location": "query", // "type": "string" // }, - // "targetHttpProxy": { - // "description": "Name of the TargetHttpProxy resource to delete.", + // "urlMap": { + // "description": "Name of the UrlMap resource to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}", + // "path": "{project}/regions/{region}/urlMaps/{urlMap}", // "response": { // "$ref": "Operation" // }, @@ -115093,34 +128106,33 @@ func (c *RegionTargetHttpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*O } -// method id "compute.regionTargetHttpProxies.get": +// method id "compute.regionUrlMaps.get": -type RegionTargetHttpProxiesGetCall struct { - s *Service - project string - region string - targetHttpProxy string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionUrlMapsGetCall struct { + s *Service + project string + region string + urlMap string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified TargetHttpProxy resource in the specified -// region. Gets a list of available target HTTP proxies by making a -// list() request. -func (r *RegionTargetHttpProxiesService) Get(project string, region string, targetHttpProxy string) *RegionTargetHttpProxiesGetCall { - c := &RegionTargetHttpProxiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified UrlMap resource. Gets a list of available +// URL maps by making a list() request. +func (r *RegionUrlMapsService) Get(project string, region string, urlMap string) *RegionUrlMapsGetCall { + c := &RegionUrlMapsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.targetHttpProxy = targetHttpProxy + c.urlMap = urlMap return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionTargetHttpProxiesGetCall) Fields(s ...googleapi.Field) *RegionTargetHttpProxiesGetCall { +func (c *RegionUrlMapsGetCall) Fields(s ...googleapi.Field) *RegionUrlMapsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -115130,7 +128142,7 @@ func (c *RegionTargetHttpProxiesGetCall) Fields(s ...googleapi.Field) *RegionTar // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionTargetHttpProxiesGetCall) IfNoneMatch(entityTag string) *RegionTargetHttpProxiesGetCall { +func (c *RegionUrlMapsGetCall) IfNoneMatch(entityTag string) *RegionUrlMapsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -115138,21 +128150,21 @@ func (c *RegionTargetHttpProxiesGetCall) IfNoneMatch(entityTag string) *RegionTa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionTargetHttpProxiesGetCall) Context(ctx context.Context) *RegionTargetHttpProxiesGetCall { +func (c *RegionUrlMapsGetCall) Context(ctx context.Context) *RegionUrlMapsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionTargetHttpProxiesGetCall) Header() http.Header { +func (c *RegionUrlMapsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionTargetHttpProxiesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionUrlMapsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -115164,7 +128176,7 @@ func (c *RegionTargetHttpProxiesGetCall) doRequest(alt string) (*http.Response, var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/urlMaps/{urlMap}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -115172,21 +128184,21 @@ func (c *RegionTargetHttpProxiesGetCall) doRequest(alt string) (*http.Response, } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "targetHttpProxy": c.targetHttpProxy, + "project": c.project, + "region": c.region, + "urlMap": c.urlMap, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionTargetHttpProxies.get" call. -// Exactly one of *TargetHttpProxy or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *TargetHttpProxy.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionTargetHttpProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHttpProxy, error) { +// Do executes the "compute.regionUrlMaps.get" call. +// Exactly one of *UrlMap or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *UrlMap.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *RegionUrlMapsGetCall) Do(opts ...googleapi.CallOption) (*UrlMap, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -115205,7 +128217,7 @@ func (c *RegionTargetHttpProxiesGetCall) Do(opts ...googleapi.CallOption) (*Targ if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TargetHttpProxy{ + ret := &UrlMap{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -115217,13 +128229,13 @@ func (c *RegionTargetHttpProxiesGetCall) Do(opts ...googleapi.CallOption) (*Targ } return ret, nil // { - // "description": "Returns the specified TargetHttpProxy resource in the specified region. Gets a list of available target HTTP proxies by making a list() request.", + // "description": "Returns the specified UrlMap resource. Gets a list of available URL maps by making a list() request.", // "httpMethod": "GET", - // "id": "compute.regionTargetHttpProxies.get", + // "id": "compute.regionUrlMaps.get", // "parameterOrder": [ // "project", // "region", - // "targetHttpProxy" + // "urlMap" // ], // "parameters": { // "project": { @@ -115240,64 +128252,217 @@ func (c *RegionTargetHttpProxiesGetCall) Do(opts ...googleapi.CallOption) (*Targ // "required": true, // "type": "string" // }, - // "targetHttpProxy": { - // "description": "Name of the TargetHttpProxy resource to return.", + // "urlMap": { + // "description": "Name of the UrlMap resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/urlMaps/{urlMap}", + // "response": { + // "$ref": "UrlMap" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.regionUrlMaps.insert": + +type RegionUrlMapsInsertCall struct { + s *Service + project string + region string + urlmap *UrlMap + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a UrlMap resource in the specified project using the +// data included in the request. +func (r *RegionUrlMapsService) Insert(project string, region string, urlmap *UrlMap) *RegionUrlMapsInsertCall { + c := &RegionUrlMapsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.urlmap = urlmap + return c +} + +// RequestId sets the optional parameter "requestId": begin_interface: +// MixerMutationRequestBuilder Request ID to support idempotency. +func (c *RegionUrlMapsInsertCall) RequestId(requestId string) *RegionUrlMapsInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionUrlMapsInsertCall) Fields(s ...googleapi.Field) *RegionUrlMapsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionUrlMapsInsertCall) Context(ctx context.Context) *RegionUrlMapsInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionUrlMapsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionUrlMapsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmap) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/urlMaps") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionUrlMaps.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionUrlMapsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a UrlMap resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.regionUrlMaps.insert", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "begin_interface: MixerMutationRequestBuilder Request ID to support idempotency.", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}", + // "path": "{project}/regions/{region}/urlMaps", + // "request": { + // "$ref": "UrlMap" + // }, // "response": { - // "$ref": "TargetHttpProxy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.regionTargetHttpProxies.insert": +// method id "compute.regionUrlMaps.invalidateCache": -type RegionTargetHttpProxiesInsertCall struct { - s *Service - project string - region string - targethttpproxy *TargetHttpProxy - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionUrlMapsInvalidateCacheCall struct { + s *Service + project string + region string + urlMap string + cacheinvalidationrule *CacheInvalidationRule + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a TargetHttpProxy resource in the specified project -// and region using the data included in the request. -func (r *RegionTargetHttpProxiesService) Insert(project string, region string, targethttpproxy *TargetHttpProxy) *RegionTargetHttpProxiesInsertCall { - c := &RegionTargetHttpProxiesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// InvalidateCache: Initiates a cache invalidation operation, +// invalidating the specified path, scoped to the specified UrlMap. +func (r *RegionUrlMapsService) InvalidateCache(project string, region string, urlMap string, cacheinvalidationrule *CacheInvalidationRule) *RegionUrlMapsInvalidateCacheCall { + c := &RegionUrlMapsInvalidateCacheCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.targethttpproxy = targethttpproxy + c.urlMap = urlMap + c.cacheinvalidationrule = cacheinvalidationrule return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionTargetHttpProxiesInsertCall) RequestId(requestId string) *RegionTargetHttpProxiesInsertCall { +// RequestId sets the optional parameter "requestId": begin_interface: +// MixerMutationRequestBuilder Request ID to support idempotency. +func (c *RegionUrlMapsInvalidateCacheCall) RequestId(requestId string) *RegionUrlMapsInvalidateCacheCall { c.urlParams_.Set("requestId", requestId) return c } @@ -115305,7 +128470,7 @@ func (c *RegionTargetHttpProxiesInsertCall) RequestId(requestId string) *RegionT // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionTargetHttpProxiesInsertCall) Fields(s ...googleapi.Field) *RegionTargetHttpProxiesInsertCall { +func (c *RegionUrlMapsInvalidateCacheCall) Fields(s ...googleapi.Field) *RegionUrlMapsInvalidateCacheCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -115313,35 +128478,35 @@ func (c *RegionTargetHttpProxiesInsertCall) Fields(s ...googleapi.Field) *Region // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionTargetHttpProxiesInsertCall) Context(ctx context.Context) *RegionTargetHttpProxiesInsertCall { +func (c *RegionUrlMapsInvalidateCacheCall) Context(ctx context.Context) *RegionUrlMapsInvalidateCacheCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionTargetHttpProxiesInsertCall) Header() http.Header { +func (c *RegionUrlMapsInvalidateCacheCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionTargetHttpProxiesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionUrlMapsInvalidateCacheCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targethttpproxy) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.cacheinvalidationrule) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpProxies") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/urlMaps/{urlMap}/invalidateCache") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -115351,18 +128516,19 @@ func (c *RegionTargetHttpProxiesInsertCall) doRequest(alt string) (*http.Respons googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, + "urlMap": c.urlMap, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionTargetHttpProxies.insert" call. +// Do executes the "compute.regionUrlMaps.invalidateCache" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionTargetHttpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionUrlMapsInvalidateCacheCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -115393,12 +128559,13 @@ func (c *RegionTargetHttpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*O } return ret, nil // { - // "description": "Creates a TargetHttpProxy resource in the specified project and region using the data included in the request.", + // "description": "Initiates a cache invalidation operation, invalidating the specified path, scoped to the specified UrlMap.", // "httpMethod": "POST", - // "id": "compute.regionTargetHttpProxies.insert", + // "id": "compute.regionUrlMaps.invalidateCache", // "parameterOrder": [ // "project", - // "region" + // "region", + // "urlMap" // ], // "parameters": { // "project": { @@ -115416,14 +128583,21 @@ func (c *RegionTargetHttpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*O // "type": "string" // }, // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "description": "begin_interface: MixerMutationRequestBuilder Request ID to support idempotency.", // "location": "query", // "type": "string" + // }, + // "urlMap": { + // "description": "Name of the UrlMap scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/targetHttpProxies", + // "path": "{project}/regions/{region}/urlMaps/{urlMap}/invalidateCache", // "request": { - // "$ref": "TargetHttpProxy" + // "$ref": "CacheInvalidationRule" // }, // "response": { // "$ref": "Operation" @@ -115436,9 +128610,9 @@ func (c *RegionTargetHttpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*O } -// method id "compute.regionTargetHttpProxies.list": +// method id "compute.regionUrlMaps.list": -type RegionTargetHttpProxiesListCall struct { +type RegionUrlMapsListCall struct { s *Service project string region string @@ -115448,10 +128622,10 @@ type RegionTargetHttpProxiesListCall struct { header_ http.Header } -// List: Retrieves the list of TargetHttpProxy resources available to -// the specified project in the specified region. -func (r *RegionTargetHttpProxiesService) List(project string, region string) *RegionTargetHttpProxiesListCall { - c := &RegionTargetHttpProxiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of UrlMap resources available to the +// specified project in the specified region. +func (r *RegionUrlMapsService) List(project string, region string) *RegionUrlMapsListCall { + c := &RegionUrlMapsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region return c @@ -115479,7 +128653,7 @@ func (r *RegionTargetHttpProxiesService) List(project string, region string) *Re // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *RegionTargetHttpProxiesListCall) Filter(filter string) *RegionTargetHttpProxiesListCall { +func (c *RegionUrlMapsListCall) Filter(filter string) *RegionUrlMapsListCall { c.urlParams_.Set("filter", filter) return c } @@ -115490,7 +128664,7 @@ func (c *RegionTargetHttpProxiesListCall) Filter(filter string) *RegionTargetHtt // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *RegionTargetHttpProxiesListCall) MaxResults(maxResults int64) *RegionTargetHttpProxiesListCall { +func (c *RegionUrlMapsListCall) MaxResults(maxResults int64) *RegionUrlMapsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -115507,7 +128681,7 @@ func (c *RegionTargetHttpProxiesListCall) MaxResults(maxResults int64) *RegionTa // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *RegionTargetHttpProxiesListCall) OrderBy(orderBy string) *RegionTargetHttpProxiesListCall { +func (c *RegionUrlMapsListCall) OrderBy(orderBy string) *RegionUrlMapsListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -115515,7 +128689,7 @@ func (c *RegionTargetHttpProxiesListCall) OrderBy(orderBy string) *RegionTargetH // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *RegionTargetHttpProxiesListCall) PageToken(pageToken string) *RegionTargetHttpProxiesListCall { +func (c *RegionUrlMapsListCall) PageToken(pageToken string) *RegionUrlMapsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -115523,7 +128697,7 @@ func (c *RegionTargetHttpProxiesListCall) PageToken(pageToken string) *RegionTar // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionTargetHttpProxiesListCall) Fields(s ...googleapi.Field) *RegionTargetHttpProxiesListCall { +func (c *RegionUrlMapsListCall) Fields(s ...googleapi.Field) *RegionUrlMapsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -115533,7 +128707,7 @@ func (c *RegionTargetHttpProxiesListCall) Fields(s ...googleapi.Field) *RegionTa // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionTargetHttpProxiesListCall) IfNoneMatch(entityTag string) *RegionTargetHttpProxiesListCall { +func (c *RegionUrlMapsListCall) IfNoneMatch(entityTag string) *RegionUrlMapsListCall { c.ifNoneMatch_ = entityTag return c } @@ -115541,21 +128715,21 @@ func (c *RegionTargetHttpProxiesListCall) IfNoneMatch(entityTag string) *RegionT // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionTargetHttpProxiesListCall) Context(ctx context.Context) *RegionTargetHttpProxiesListCall { +func (c *RegionUrlMapsListCall) Context(ctx context.Context) *RegionUrlMapsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionTargetHttpProxiesListCall) Header() http.Header { +func (c *RegionUrlMapsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionTargetHttpProxiesListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionUrlMapsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -115567,7 +128741,7 @@ func (c *RegionTargetHttpProxiesListCall) doRequest(alt string) (*http.Response, var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpProxies") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/urlMaps") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -115581,14 +128755,14 @@ func (c *RegionTargetHttpProxiesListCall) doRequest(alt string) (*http.Response, return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionTargetHttpProxies.list" call. -// Exactly one of *TargetHttpProxyList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TargetHttpProxyList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionTargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHttpProxyList, error) { +// Do executes the "compute.regionUrlMaps.list" call. +// Exactly one of *UrlMapList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *UrlMapList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionUrlMapsListCall) Do(opts ...googleapi.CallOption) (*UrlMapList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -115607,7 +128781,7 @@ func (c *RegionTargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*Tar if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TargetHttpProxyList{ + ret := &UrlMapList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -115619,9 +128793,9 @@ func (c *RegionTargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*Tar } return ret, nil // { - // "description": "Retrieves the list of TargetHttpProxy resources available to the specified project in the specified region.", + // "description": "Retrieves the list of UrlMap resources available to the specified project in the specified region.", // "httpMethod": "GET", - // "id": "compute.regionTargetHttpProxies.list", + // "id": "compute.regionUrlMaps.list", // "parameterOrder": [ // "project", // "region" @@ -115665,9 +128839,9 @@ func (c *RegionTargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*Tar // "type": "string" // } // }, - // "path": "{project}/regions/{region}/targetHttpProxies", + // "path": "{project}/regions/{region}/urlMaps", // "response": { - // "$ref": "TargetHttpProxyList" + // "$ref": "UrlMapList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -115681,7 +128855,7 @@ func (c *RegionTargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*Tar // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *RegionTargetHttpProxiesListCall) Pages(ctx context.Context, f func(*TargetHttpProxyList) error) error { +func (c *RegionUrlMapsListCall) Pages(ctx context.Context, f func(*UrlMapList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -115699,44 +128873,34 @@ func (c *RegionTargetHttpProxiesListCall) Pages(ctx context.Context, f func(*Tar } } -// method id "compute.regionTargetHttpProxies.setUrlMap": +// method id "compute.regionUrlMaps.patch": -type RegionTargetHttpProxiesSetUrlMapCall struct { - s *Service - project string - region string - targetHttpProxy string - urlmapreference *UrlMapReference - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionUrlMapsPatchCall struct { + s *Service + project string + region string + urlMap string + urlmap *UrlMap + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetUrlMap: Changes the URL map for TargetHttpProxy. -func (r *RegionTargetHttpProxiesService) SetUrlMap(project string, region string, targetHttpProxy string, urlmapreference *UrlMapReference) *RegionTargetHttpProxiesSetUrlMapCall { - c := &RegionTargetHttpProxiesSetUrlMapCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Patches the specified UrlMap resource with the data included +// in the request. This method supports PATCH semantics and uses JSON +// merge patch format and processing rules. +func (r *RegionUrlMapsService) Patch(project string, region string, urlMap string, urlmap *UrlMap) *RegionUrlMapsPatchCall { + c := &RegionUrlMapsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.targetHttpProxy = targetHttpProxy - c.urlmapreference = urlmapreference + c.urlMap = urlMap + c.urlmap = urlmap return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionTargetHttpProxiesSetUrlMapCall) RequestId(requestId string) *RegionTargetHttpProxiesSetUrlMapCall { +// RequestId sets the optional parameter "requestId": begin_interface: +// MixerMutationRequestBuilder Request ID to support idempotency. +func (c *RegionUrlMapsPatchCall) RequestId(requestId string) *RegionUrlMapsPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -115744,7 +128908,7 @@ func (c *RegionTargetHttpProxiesSetUrlMapCall) RequestId(requestId string) *Regi // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionTargetHttpProxiesSetUrlMapCall) Fields(s ...googleapi.Field) *RegionTargetHttpProxiesSetUrlMapCall { +func (c *RegionUrlMapsPatchCall) Fields(s ...googleapi.Field) *RegionUrlMapsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -115752,57 +128916,57 @@ func (c *RegionTargetHttpProxiesSetUrlMapCall) Fields(s ...googleapi.Field) *Reg // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionTargetHttpProxiesSetUrlMapCall) Context(ctx context.Context) *RegionTargetHttpProxiesSetUrlMapCall { +func (c *RegionUrlMapsPatchCall) Context(ctx context.Context) *RegionUrlMapsPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionTargetHttpProxiesSetUrlMapCall) Header() http.Header { +func (c *RegionUrlMapsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionTargetHttpProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionUrlMapsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmapreference) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmap) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}/setUrlMap") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/urlMaps/{urlMap}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "targetHttpProxy": c.targetHttpProxy, + "project": c.project, + "region": c.region, + "urlMap": c.urlMap, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionTargetHttpProxies.setUrlMap" call. +// Do executes the "compute.regionUrlMaps.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionTargetHttpProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionUrlMapsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -115833,13 +128997,13 @@ func (c *RegionTargetHttpProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Changes the URL map for TargetHttpProxy.", - // "httpMethod": "POST", - // "id": "compute.regionTargetHttpProxies.setUrlMap", + // "description": "Patches the specified UrlMap resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.regionUrlMaps.patch", // "parameterOrder": [ // "project", // "region", - // "targetHttpProxy" + // "urlMap" // ], // "parameters": { // "project": { @@ -115857,21 +129021,21 @@ func (c *RegionTargetHttpProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) // "type": "string" // }, // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "description": "begin_interface: MixerMutationRequestBuilder Request ID to support idempotency.", // "location": "query", // "type": "string" // }, - // "targetHttpProxy": { - // "description": "Name of the TargetHttpProxy to set a URL map for.", + // "urlMap": { + // "description": "Name of the UrlMap resource to patch.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}/setUrlMap", + // "path": "{project}/regions/{region}/urlMaps/{urlMap}", // "request": { - // "$ref": "UrlMapReference" + // "$ref": "UrlMap" // }, // "response": { // "$ref": "Operation" @@ -115884,9 +129048,9 @@ func (c *RegionTargetHttpProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) } -// method id "compute.regionTargetHttpProxies.testIamPermissions": +// method id "compute.regionUrlMaps.testIamPermissions": -type RegionTargetHttpProxiesTestIamPermissionsCall struct { +type RegionUrlMapsTestIamPermissionsCall struct { s *Service project string region string @@ -115899,8 +129063,8 @@ type RegionTargetHttpProxiesTestIamPermissionsCall struct { // TestIamPermissions: Returns permissions that a caller has on the // specified resource. -func (r *RegionTargetHttpProxiesService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionTargetHttpProxiesTestIamPermissionsCall { - c := &RegionTargetHttpProxiesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *RegionUrlMapsService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionUrlMapsTestIamPermissionsCall { + c := &RegionUrlMapsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region c.resource = resource @@ -115911,7 +129075,7 @@ func (r *RegionTargetHttpProxiesService) TestIamPermissions(project string, regi // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionTargetHttpProxiesTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionTargetHttpProxiesTestIamPermissionsCall { +func (c *RegionUrlMapsTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionUrlMapsTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -115919,21 +129083,21 @@ func (c *RegionTargetHttpProxiesTestIamPermissionsCall) Fields(s ...googleapi.Fi // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionTargetHttpProxiesTestIamPermissionsCall) Context(ctx context.Context) *RegionTargetHttpProxiesTestIamPermissionsCall { +func (c *RegionUrlMapsTestIamPermissionsCall) Context(ctx context.Context) *RegionUrlMapsTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionTargetHttpProxiesTestIamPermissionsCall) Header() http.Header { +func (c *RegionUrlMapsTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionTargetHttpProxiesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionUrlMapsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -115947,7 +129111,7 @@ func (c *RegionTargetHttpProxiesTestIamPermissionsCall) doRequest(alt string) (* reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpProxies/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/urlMaps/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -115962,14 +129126,14 @@ func (c *RegionTargetHttpProxiesTestIamPermissionsCall) doRequest(alt string) (* return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionTargetHttpProxies.testIamPermissions" call. +// Do executes the "compute.regionUrlMaps.testIamPermissions" call. // Exactly one of *TestPermissionsResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *TestPermissionsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionTargetHttpProxiesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *RegionUrlMapsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -116002,7 +129166,7 @@ func (c *RegionTargetHttpProxiesTestIamPermissionsCall) Do(opts ...googleapi.Cal // { // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.regionTargetHttpProxies.testIamPermissions", + // "id": "compute.regionUrlMaps.testIamPermissions", // "parameterOrder": [ // "project", // "region", @@ -116031,7 +129195,7 @@ func (c *RegionTargetHttpProxiesTestIamPermissionsCall) Do(opts ...googleapi.Cal // "type": "string" // } // }, - // "path": "{project}/regions/{region}/targetHttpProxies/{resource}/testIamPermissions", + // "path": "{project}/regions/{region}/urlMaps/{resource}/testIamPermissions", // "request": { // "$ref": "TestPermissionsRequest" // }, @@ -116047,42 +129211,33 @@ func (c *RegionTargetHttpProxiesTestIamPermissionsCall) Do(opts ...googleapi.Cal } -// method id "compute.regionTargetHttpsProxies.delete": +// method id "compute.regionUrlMaps.update": -type RegionTargetHttpsProxiesDeleteCall struct { - s *Service - project string - region string - targetHttpsProxy string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionUrlMapsUpdateCall struct { + s *Service + project string + region string + urlMap string + urlmap *UrlMap + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified TargetHttpsProxy resource. -func (r *RegionTargetHttpsProxiesService) Delete(project string, region string, targetHttpsProxy string) *RegionTargetHttpsProxiesDeleteCall { - c := &RegionTargetHttpsProxiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates the specified UrlMap resource with the data included +// in the request. +func (r *RegionUrlMapsService) Update(project string, region string, urlMap string, urlmap *UrlMap) *RegionUrlMapsUpdateCall { + c := &RegionUrlMapsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.targetHttpsProxy = targetHttpsProxy + c.urlMap = urlMap + c.urlmap = urlmap return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionTargetHttpsProxiesDeleteCall) RequestId(requestId string) *RegionTargetHttpsProxiesDeleteCall { +// RequestId sets the optional parameter "requestId": begin_interface: +// MixerMutationRequestBuilder Request ID to support idempotency. +func (c *RegionUrlMapsUpdateCall) RequestId(requestId string) *RegionUrlMapsUpdateCall { c.urlParams_.Set("requestId", requestId) return c } @@ -116090,7 +129245,7 @@ func (c *RegionTargetHttpsProxiesDeleteCall) RequestId(requestId string) *Region // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionTargetHttpsProxiesDeleteCall) Fields(s ...googleapi.Field) *RegionTargetHttpsProxiesDeleteCall { +func (c *RegionUrlMapsUpdateCall) Fields(s ...googleapi.Field) *RegionUrlMapsUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -116098,52 +129253,57 @@ func (c *RegionTargetHttpsProxiesDeleteCall) Fields(s ...googleapi.Field) *Regio // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionTargetHttpsProxiesDeleteCall) Context(ctx context.Context) *RegionTargetHttpsProxiesDeleteCall { +func (c *RegionUrlMapsUpdateCall) Context(ctx context.Context) *RegionUrlMapsUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionTargetHttpsProxiesDeleteCall) Header() http.Header { +func (c *RegionUrlMapsUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionTargetHttpsProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionUrlMapsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmap) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/urlMaps/{urlMap}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "targetHttpsProxy": c.targetHttpsProxy, + "project": c.project, + "region": c.region, + "urlMap": c.urlMap, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionTargetHttpsProxies.delete" call. +// Do executes the "compute.regionUrlMaps.update" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionTargetHttpsProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionUrlMapsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -116174,13 +129334,13 @@ func (c *RegionTargetHttpsProxiesDeleteCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Deletes the specified TargetHttpsProxy resource.", - // "httpMethod": "DELETE", - // "id": "compute.regionTargetHttpsProxies.delete", + // "description": "Updates the specified UrlMap resource with the data included in the request.", + // "httpMethod": "PUT", + // "id": "compute.regionUrlMaps.update", // "parameterOrder": [ // "project", // "region", - // "targetHttpsProxy" + // "urlMap" // ], // "parameters": { // "project": { @@ -116198,19 +129358,22 @@ func (c *RegionTargetHttpsProxiesDeleteCall) Do(opts ...googleapi.CallOption) (* // "type": "string" // }, // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "description": "begin_interface: MixerMutationRequestBuilder Request ID to support idempotency.", // "location": "query", // "type": "string" // }, - // "targetHttpsProxy": { - // "description": "Name of the TargetHttpsProxy resource to delete.", + // "urlMap": { + // "description": "Name of the UrlMap resource to update.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", + // "path": "{project}/regions/{region}/urlMaps/{urlMap}", + // "request": { + // "$ref": "UrlMap" + // }, // "response": { // "$ref": "Operation" // }, @@ -116222,100 +129385,93 @@ func (c *RegionTargetHttpsProxiesDeleteCall) Do(opts ...googleapi.CallOption) (* } -// method id "compute.regionTargetHttpsProxies.get": +// method id "compute.regionUrlMaps.validate": -type RegionTargetHttpsProxiesGetCall struct { - s *Service - project string - region string - targetHttpsProxy string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionUrlMapsValidateCall struct { + s *Service + project string + region string + urlMap string + regionurlmapsvalidaterequest *RegionUrlMapsValidateRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified TargetHttpsProxy resource in the specified -// region. Gets a list of available target HTTP proxies by making a -// list() request. -func (r *RegionTargetHttpsProxiesService) Get(project string, region string, targetHttpsProxy string) *RegionTargetHttpsProxiesGetCall { - c := &RegionTargetHttpsProxiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Validate: Runs static validation for the UrlMap. In particular, the +// tests of the provided UrlMap will be run. Calling this method does +// NOT create the UrlMap. +func (r *RegionUrlMapsService) Validate(project string, region string, urlMap string, regionurlmapsvalidaterequest *RegionUrlMapsValidateRequest) *RegionUrlMapsValidateCall { + c := &RegionUrlMapsValidateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.targetHttpsProxy = targetHttpsProxy + c.urlMap = urlMap + c.regionurlmapsvalidaterequest = regionurlmapsvalidaterequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionTargetHttpsProxiesGetCall) Fields(s ...googleapi.Field) *RegionTargetHttpsProxiesGetCall { +func (c *RegionUrlMapsValidateCall) Fields(s ...googleapi.Field) *RegionUrlMapsValidateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionTargetHttpsProxiesGetCall) IfNoneMatch(entityTag string) *RegionTargetHttpsProxiesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionTargetHttpsProxiesGetCall) Context(ctx context.Context) *RegionTargetHttpsProxiesGetCall { +func (c *RegionUrlMapsValidateCall) Context(ctx context.Context) *RegionUrlMapsValidateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionTargetHttpsProxiesGetCall) Header() http.Header { +func (c *RegionUrlMapsValidateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionTargetHttpsProxiesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionUrlMapsValidateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionurlmapsvalidaterequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/urlMaps/{urlMap}/validate") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "targetHttpsProxy": c.targetHttpsProxy, + "project": c.project, + "region": c.region, + "urlMap": c.urlMap, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionTargetHttpsProxies.get" call. -// Exactly one of *TargetHttpsProxy or error will be non-nil. Any +// Do executes the "compute.regionUrlMaps.validate" call. +// Exactly one of *UrlMapsValidateResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *TargetHttpsProxy.ServerResponse.Header or (if a response was +// *UrlMapsValidateResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionTargetHttpsProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHttpsProxy, error) { +func (c *RegionUrlMapsValidateCall) Do(opts ...googleapi.CallOption) (*UrlMapsValidateResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -116334,7 +129490,7 @@ func (c *RegionTargetHttpsProxiesGetCall) Do(opts ...googleapi.CallOption) (*Tar if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TargetHttpsProxy{ + ret := &UrlMapsValidateResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -116346,13 +129502,13 @@ func (c *RegionTargetHttpsProxiesGetCall) Do(opts ...googleapi.CallOption) (*Tar } return ret, nil // { - // "description": "Returns the specified TargetHttpsProxy resource in the specified region. Gets a list of available target HTTP proxies by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.regionTargetHttpsProxies.get", + // "description": "Runs static validation for the UrlMap. In particular, the tests of the provided UrlMap will be run. Calling this method does NOT create the UrlMap.", + // "httpMethod": "POST", + // "id": "compute.regionUrlMaps.validate", // "parameterOrder": [ // "project", // "region", - // "targetHttpsProxy" + // "urlMap" // ], // "parameters": { // "project": { @@ -116369,110 +129525,101 @@ func (c *RegionTargetHttpsProxiesGetCall) Do(opts ...googleapi.CallOption) (*Tar // "required": true, // "type": "string" // }, - // "targetHttpsProxy": { - // "description": "Name of the TargetHttpsProxy resource to return.", + // "urlMap": { + // "description": "Name of the UrlMap resource to be validated as.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", + // "path": "{project}/regions/{region}/urlMaps/{urlMap}/validate", + // "request": { + // "$ref": "RegionUrlMapsValidateRequest" + // }, // "response": { - // "$ref": "TargetHttpsProxy" + // "$ref": "UrlMapsValidateResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.regionTargetHttpsProxies.insert": +// method id "compute.regions.get": -type RegionTargetHttpsProxiesInsertCall struct { - s *Service - project string - region string - targethttpsproxy *TargetHttpsProxy - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionsGetCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Insert: Creates a TargetHttpsProxy resource in the specified project -// and region using the data included in the request. -func (r *RegionTargetHttpsProxiesService) Insert(project string, region string, targethttpsproxy *TargetHttpsProxy) *RegionTargetHttpsProxiesInsertCall { - c := &RegionTargetHttpsProxiesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified Region resource. Gets a list of available +// regions by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/regions/get +func (r *RegionsService) Get(project string, region string) *RegionsGetCall { + c := &RegionsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.targethttpsproxy = targethttpsproxy - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionTargetHttpsProxiesInsertCall) RequestId(requestId string) *RegionTargetHttpsProxiesInsertCall { - c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionTargetHttpsProxiesInsertCall) Fields(s ...googleapi.Field) *RegionTargetHttpsProxiesInsertCall { +func (c *RegionsGetCall) Fields(s ...googleapi.Field) *RegionsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionsGetCall) IfNoneMatch(entityTag string) *RegionsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionTargetHttpsProxiesInsertCall) Context(ctx context.Context) *RegionTargetHttpsProxiesInsertCall { +func (c *RegionsGetCall) Context(ctx context.Context) *RegionsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionTargetHttpsProxiesInsertCall) Header() http.Header { +func (c *RegionsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionTargetHttpsProxiesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targethttpsproxy) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpsProxies") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } @@ -116484,14 +129631,14 @@ func (c *RegionTargetHttpsProxiesInsertCall) doRequest(alt string) (*http.Respon return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionTargetHttpsProxies.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionTargetHttpsProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regions.get" call. +// Exactly one of *Region or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Region.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *RegionsGetCall) Do(opts ...googleapi.CallOption) (*Region, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -116510,7 +129657,7 @@ func (c *RegionTargetHttpsProxiesInsertCall) Do(opts ...googleapi.CallOption) (* if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Region{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -116522,9 +129669,9 @@ func (c *RegionTargetHttpsProxiesInsertCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Creates a TargetHttpsProxy resource in the specified project and region using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.regionTargetHttpsProxies.insert", + // "description": "Returns the specified Region resource. Gets a list of available regions by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.regions.get", // "parameterOrder": [ // "project", // "region" @@ -116538,51 +129685,43 @@ func (c *RegionTargetHttpsProxiesInsertCall) Do(opts ...googleapi.CallOption) (* // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "Name of the region resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/targetHttpsProxies", - // "request": { - // "$ref": "TargetHttpsProxy" - // }, + // "path": "{project}/regions/{region}", // "response": { - // "$ref": "Operation" + // "$ref": "Region" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionTargetHttpsProxies.list": +// method id "compute.regions.list": -type RegionTargetHttpsProxiesListCall struct { +type RegionsListCall struct { s *Service project string - region string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves the list of TargetHttpsProxy resources available to -// the specified project in the specified region. -func (r *RegionTargetHttpsProxiesService) List(project string, region string) *RegionTargetHttpsProxiesListCall { - c := &RegionTargetHttpsProxiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of region resources available to the +// specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/regions/list +func (r *RegionsService) List(project string) *RegionsListCall { + c := &RegionsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region return c } @@ -116608,7 +129747,7 @@ func (r *RegionTargetHttpsProxiesService) List(project string, region string) *R // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *RegionTargetHttpsProxiesListCall) Filter(filter string) *RegionTargetHttpsProxiesListCall { +func (c *RegionsListCall) Filter(filter string) *RegionsListCall { c.urlParams_.Set("filter", filter) return c } @@ -116619,7 +129758,7 @@ func (c *RegionTargetHttpsProxiesListCall) Filter(filter string) *RegionTargetHt // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *RegionTargetHttpsProxiesListCall) MaxResults(maxResults int64) *RegionTargetHttpsProxiesListCall { +func (c *RegionsListCall) MaxResults(maxResults int64) *RegionsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -116636,7 +129775,7 @@ func (c *RegionTargetHttpsProxiesListCall) MaxResults(maxResults int64) *RegionT // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *RegionTargetHttpsProxiesListCall) OrderBy(orderBy string) *RegionTargetHttpsProxiesListCall { +func (c *RegionsListCall) OrderBy(orderBy string) *RegionsListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -116644,7 +129783,7 @@ func (c *RegionTargetHttpsProxiesListCall) OrderBy(orderBy string) *RegionTarget // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *RegionTargetHttpsProxiesListCall) PageToken(pageToken string) *RegionTargetHttpsProxiesListCall { +func (c *RegionsListCall) PageToken(pageToken string) *RegionsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -116652,7 +129791,7 @@ func (c *RegionTargetHttpsProxiesListCall) PageToken(pageToken string) *RegionTa // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionTargetHttpsProxiesListCall) Fields(s ...googleapi.Field) *RegionTargetHttpsProxiesListCall { +func (c *RegionsListCall) Fields(s ...googleapi.Field) *RegionsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -116662,7 +129801,7 @@ func (c *RegionTargetHttpsProxiesListCall) Fields(s ...googleapi.Field) *RegionT // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionTargetHttpsProxiesListCall) IfNoneMatch(entityTag string) *RegionTargetHttpsProxiesListCall { +func (c *RegionsListCall) IfNoneMatch(entityTag string) *RegionsListCall { c.ifNoneMatch_ = entityTag return c } @@ -116670,21 +129809,21 @@ func (c *RegionTargetHttpsProxiesListCall) IfNoneMatch(entityTag string) *Region // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionTargetHttpsProxiesListCall) Context(ctx context.Context) *RegionTargetHttpsProxiesListCall { +func (c *RegionsListCall) Context(ctx context.Context) *RegionsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionTargetHttpsProxiesListCall) Header() http.Header { +func (c *RegionsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionTargetHttpsProxiesListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -116693,430 +129832,30 @@ func (c *RegionTargetHttpsProxiesListCall) doRequest(alt string) (*http.Response if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpsProxies") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionTargetHttpsProxies.list" call. -// Exactly one of *TargetHttpsProxyList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TargetHttpsProxyList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionTargetHttpsProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHttpsProxyList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TargetHttpsProxyList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves the list of TargetHttpsProxy resources available to the specified project in the specified region.", - // "httpMethod": "GET", - // "id": "compute.regionTargetHttpsProxies.list", - // "parameterOrder": [ - // "project", - // "region" - // ], - // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/targetHttpsProxies", - // "response": { - // "$ref": "TargetHttpsProxyList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RegionTargetHttpsProxiesListCall) Pages(ctx context.Context, f func(*TargetHttpsProxyList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.regionTargetHttpsProxies.setSslCertificates": - -type RegionTargetHttpsProxiesSetSslCertificatesCall struct { - s *Service - project string - region string - targetHttpsProxy string - regiontargethttpsproxiessetsslcertificatesrequest *RegionTargetHttpsProxiesSetSslCertificatesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetSslCertificates: Replaces SslCertificates for TargetHttpsProxy. -func (r *RegionTargetHttpsProxiesService) SetSslCertificates(project string, region string, targetHttpsProxy string, regiontargethttpsproxiessetsslcertificatesrequest *RegionTargetHttpsProxiesSetSslCertificatesRequest) *RegionTargetHttpsProxiesSetSslCertificatesCall { - c := &RegionTargetHttpsProxiesSetSslCertificatesCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.targetHttpsProxy = targetHttpsProxy - c.regiontargethttpsproxiessetsslcertificatesrequest = regiontargethttpsproxiessetsslcertificatesrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) RequestId(requestId string) *RegionTargetHttpsProxiesSetSslCertificatesCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) Fields(s ...googleapi.Field) *RegionTargetHttpsProxiesSetSslCertificatesCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) Context(ctx context.Context) *RegionTargetHttpsProxiesSetSslCertificatesCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regiontargethttpsproxiessetsslcertificatesrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "targetHttpsProxy": c.targetHttpsProxy, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionTargetHttpsProxies.setSslCertificates" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Replaces SslCertificates for TargetHttpsProxy.", - // "httpMethod": "POST", - // "id": "compute.regionTargetHttpsProxies.setSslCertificates", - // "parameterOrder": [ - // "project", - // "region", - // "targetHttpsProxy" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "targetHttpsProxy": { - // "description": "Name of the TargetHttpsProxy resource to set an SslCertificates resource for.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates", - // "request": { - // "$ref": "RegionTargetHttpsProxiesSetSslCertificatesRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.regionTargetHttpsProxies.setUrlMap": - -type RegionTargetHttpsProxiesSetUrlMapCall struct { - s *Service - project string - region string - targetHttpsProxy string - urlmapreference *UrlMapReference - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetUrlMap: Changes the URL map for TargetHttpsProxy. -func (r *RegionTargetHttpsProxiesService) SetUrlMap(project string, region string, targetHttpsProxy string, urlmapreference *UrlMapReference) *RegionTargetHttpsProxiesSetUrlMapCall { - c := &RegionTargetHttpsProxiesSetUrlMapCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.targetHttpsProxy = targetHttpsProxy - c.urlmapreference = urlmapreference - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionTargetHttpsProxiesSetUrlMapCall) RequestId(requestId string) *RegionTargetHttpsProxiesSetUrlMapCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionTargetHttpsProxiesSetUrlMapCall) Fields(s ...googleapi.Field) *RegionTargetHttpsProxiesSetUrlMapCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionTargetHttpsProxiesSetUrlMapCall) Context(ctx context.Context) *RegionTargetHttpsProxiesSetUrlMapCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionTargetHttpsProxiesSetUrlMapCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionTargetHttpsProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmapreference) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "targetHttpsProxy": c.targetHttpsProxy, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionTargetHttpsProxies.setUrlMap" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.regions.list" call. +// Exactly one of *RegionList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// *RegionList.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionTargetHttpsProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionsListCall) Do(opts ...googleapi.CallOption) (*RegionList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -117135,7 +129874,7 @@ func (c *RegionTargetHttpsProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &RegionList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -117147,143 +129886,227 @@ func (c *RegionTargetHttpsProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Changes the URL map for TargetHttpsProxy.", - // "httpMethod": "POST", - // "id": "compute.regionTargetHttpsProxies.setUrlMap", + // "description": "Retrieves the list of region resources available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.regions.list", // "parameterOrder": [ - // "project", - // "region", - // "targetHttpsProxy" + // "project" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", // "type": "string" // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, - // "targetHttpsProxy": { - // "description": "Name of the TargetHttpsProxy to set a URL map for.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap", - // "request": { - // "$ref": "UrlMapReference" - // }, + // "path": "{project}/regions", // "response": { - // "$ref": "Operation" + // "$ref": "RegionList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionTargetHttpsProxies.testIamPermissions": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionsListCall) Pages(ctx context.Context, f func(*RegionList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type RegionTargetHttpsProxiesTestIamPermissionsCall struct { - s *Service - project string - region string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.reservations.aggregatedList": + +type ReservationsAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *RegionTargetHttpsProxiesService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionTargetHttpsProxiesTestIamPermissionsCall { - c := &RegionTargetHttpsProxiesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of reservations. +func (r *ReservationsService) AggregatedList(project string) *ReservationsAggregatedListCall { + c := &ReservationsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *ReservationsAggregatedListCall) Filter(filter string) *ReservationsAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *ReservationsAggregatedListCall) MaxResults(maxResults int64) *ReservationsAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *ReservationsAggregatedListCall) OrderBy(orderBy string) *ReservationsAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *ReservationsAggregatedListCall) PageToken(pageToken string) *ReservationsAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionTargetHttpsProxiesTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionTargetHttpsProxiesTestIamPermissionsCall { +func (c *ReservationsAggregatedListCall) Fields(s ...googleapi.Field) *ReservationsAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ReservationsAggregatedListCall) IfNoneMatch(entityTag string) *ReservationsAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionTargetHttpsProxiesTestIamPermissionsCall) Context(ctx context.Context) *RegionTargetHttpsProxiesTestIamPermissionsCall { +func (c *ReservationsAggregatedListCall) Context(ctx context.Context) *ReservationsAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionTargetHttpsProxiesTestIamPermissionsCall) Header() http.Header { +func (c *ReservationsAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionTargetHttpsProxiesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *ReservationsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpsProxies/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/reservations") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionTargetHttpsProxies.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.reservations.aggregatedList" call. +// Exactly one of *ReservationAggregatedList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *ReservationAggregatedList.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionTargetHttpsProxiesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *ReservationsAggregatedListCall) Do(opts ...googleapi.CallOption) (*ReservationAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -117302,7 +130125,7 @@ func (c *RegionTargetHttpsProxiesTestIamPermissionsCall) Do(opts ...googleapi.Ca if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &ReservationAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -117314,43 +130137,47 @@ func (c *RegionTargetHttpsProxiesTestIamPermissionsCall) Do(opts ...googleapi.Ca } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.regionTargetHttpsProxies.testIamPermissions", + // "description": "Retrieves an aggregated list of reservations.", + // "httpMethod": "GET", + // "id": "compute.reservations.aggregatedList", // "parameterOrder": [ - // "project", - // "region", - // "resource" + // "project" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", // "type": "string" // }, - // "region": { - // "description": "The name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/targetHttpsProxies/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/aggregated/reservations", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "ReservationAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -117361,30 +130188,63 @@ func (c *RegionTargetHttpsProxiesTestIamPermissionsCall) Do(opts ...googleapi.Ca } -// method id "compute.regionUrlMaps.delete": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ReservationsAggregatedListCall) Pages(ctx context.Context, f func(*ReservationAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type RegionUrlMapsDeleteCall struct { - s *Service - project string - region string - urlMap string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.reservations.delete": + +type ReservationsDeleteCall struct { + s *Service + project string + zone string + reservation string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified UrlMap resource. -func (r *RegionUrlMapsService) Delete(project string, region string, urlMap string) *RegionUrlMapsDeleteCall { - c := &RegionUrlMapsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified reservation. +func (r *ReservationsService) Delete(project string, zone string, reservation string) *ReservationsDeleteCall { + c := &ReservationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.urlMap = urlMap + c.zone = zone + c.reservation = reservation return c } -// RequestId sets the optional parameter "requestId": begin_interface: -// MixerMutationRequestBuilder Request ID to support idempotency. -func (c *RegionUrlMapsDeleteCall) RequestId(requestId string) *RegionUrlMapsDeleteCall { +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ReservationsDeleteCall) RequestId(requestId string) *ReservationsDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -117392,7 +130252,7 @@ func (c *RegionUrlMapsDeleteCall) RequestId(requestId string) *RegionUrlMapsDele // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionUrlMapsDeleteCall) Fields(s ...googleapi.Field) *RegionUrlMapsDeleteCall { +func (c *ReservationsDeleteCall) Fields(s ...googleapi.Field) *ReservationsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -117400,21 +130260,21 @@ func (c *RegionUrlMapsDeleteCall) Fields(s ...googleapi.Field) *RegionUrlMapsDel // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionUrlMapsDeleteCall) Context(ctx context.Context) *RegionUrlMapsDeleteCall { +func (c *ReservationsDeleteCall) Context(ctx context.Context) *ReservationsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionUrlMapsDeleteCall) Header() http.Header { +func (c *ReservationsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionUrlMapsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *ReservationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -117423,7 +130283,7 @@ func (c *RegionUrlMapsDeleteCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/urlMaps/{urlMap}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/reservations/{reservation}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { @@ -117431,21 +130291,21 @@ func (c *RegionUrlMapsDeleteCall) doRequest(alt string) (*http.Response, error) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "urlMap": c.urlMap, + "project": c.project, + "zone": c.zone, + "reservation": c.reservation, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionUrlMaps.delete" call. +// Do executes the "compute.reservations.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionUrlMapsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ReservationsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -117476,13 +130336,13 @@ func (c *RegionUrlMapsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Deletes the specified UrlMap resource.", + // "description": "Deletes the specified reservation.", // "httpMethod": "DELETE", - // "id": "compute.regionUrlMaps.delete", + // "id": "compute.reservations.delete", // "parameterOrder": [ // "project", - // "region", - // "urlMap" + // "zone", + // "reservation" // ], // "parameters": { // "project": { @@ -117492,27 +130352,27 @@ func (c *RegionUrlMapsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "requestId": { - // "description": "begin_interface: MixerMutationRequestBuilder Request ID to support idempotency.", + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // }, - // "urlMap": { - // "description": "Name of the UrlMap resource to delete.", + // "reservation": { + // "description": "Name of the reservation to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/urlMaps/{urlMap}", + // "path": "{project}/zones/{zone}/reservations/{reservation}", // "response": { // "$ref": "Operation" // }, @@ -117524,33 +130384,32 @@ func (c *RegionUrlMapsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.regionUrlMaps.get": +// method id "compute.reservations.get": -type RegionUrlMapsGetCall struct { +type ReservationsGetCall struct { s *Service project string - region string - urlMap string + zone string + reservation string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Returns the specified UrlMap resource. Gets a list of available -// URL maps by making a list() request. -func (r *RegionUrlMapsService) Get(project string, region string, urlMap string) *RegionUrlMapsGetCall { - c := &RegionUrlMapsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Retrieves all information of the specified reservation. +func (r *ReservationsService) Get(project string, zone string, reservation string) *ReservationsGetCall { + c := &ReservationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.urlMap = urlMap + c.zone = zone + c.reservation = reservation return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionUrlMapsGetCall) Fields(s ...googleapi.Field) *RegionUrlMapsGetCall { +func (c *ReservationsGetCall) Fields(s ...googleapi.Field) *ReservationsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -117560,7 +130419,7 @@ func (c *RegionUrlMapsGetCall) Fields(s ...googleapi.Field) *RegionUrlMapsGetCal // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionUrlMapsGetCall) IfNoneMatch(entityTag string) *RegionUrlMapsGetCall { +func (c *ReservationsGetCall) IfNoneMatch(entityTag string) *ReservationsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -117568,21 +130427,21 @@ func (c *RegionUrlMapsGetCall) IfNoneMatch(entityTag string) *RegionUrlMapsGetCa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionUrlMapsGetCall) Context(ctx context.Context) *RegionUrlMapsGetCall { +func (c *ReservationsGetCall) Context(ctx context.Context) *ReservationsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionUrlMapsGetCall) Header() http.Header { +func (c *ReservationsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionUrlMapsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *ReservationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -117594,7 +130453,7 @@ func (c *RegionUrlMapsGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/urlMaps/{urlMap}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/reservations/{reservation}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -117602,21 +130461,21 @@ func (c *RegionUrlMapsGetCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "urlMap": c.urlMap, + "project": c.project, + "zone": c.zone, + "reservation": c.reservation, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionUrlMaps.get" call. -// Exactly one of *UrlMap or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *UrlMap.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *RegionUrlMapsGetCall) Do(opts ...googleapi.CallOption) (*UrlMap, error) { +// Do executes the "compute.reservations.get" call. +// Exactly one of *Reservation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Reservation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ReservationsGetCall) Do(opts ...googleapi.CallOption) (*Reservation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -117635,7 +130494,7 @@ func (c *RegionUrlMapsGetCall) Do(opts ...googleapi.CallOption) (*UrlMap, error) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &UrlMap{ + ret := &Reservation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -117647,13 +130506,13 @@ func (c *RegionUrlMapsGetCall) Do(opts ...googleapi.CallOption) (*UrlMap, error) } return ret, nil // { - // "description": "Returns the specified UrlMap resource. Gets a list of available URL maps by making a list() request.", + // "description": "Retrieves all information of the specified reservation.", // "httpMethod": "GET", - // "id": "compute.regionUrlMaps.get", + // "id": "compute.reservations.get", // "parameterOrder": [ // "project", - // "region", - // "urlMap" + // "zone", + // "reservation" // ], // "parameters": { // "project": { @@ -117663,24 +130522,24 @@ func (c *RegionUrlMapsGetCall) Do(opts ...googleapi.CallOption) (*UrlMap, error) // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region scoping this request.", + // "reservation": { + // "description": "Name of the reservation to retrieve.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "urlMap": { - // "description": "Name of the UrlMap resource to return.", + // "zone": { + // "description": "Name of the zone for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/urlMaps/{urlMap}", + // "path": "{project}/zones/{zone}/reservations/{reservation}", // "response": { - // "$ref": "UrlMap" + // "$ref": "Reservation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -117691,96 +130550,99 @@ func (c *RegionUrlMapsGetCall) Do(opts ...googleapi.CallOption) (*UrlMap, error) } -// method id "compute.regionUrlMaps.insert": +// method id "compute.reservations.getIamPolicy": -type RegionUrlMapsInsertCall struct { - s *Service - project string - region string - urlmap *UrlMap - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ReservationsGetIamPolicyCall struct { + s *Service + project string + zone string + resource string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Insert: Creates a UrlMap resource in the specified project using the -// data included in the request. -func (r *RegionUrlMapsService) Insert(project string, region string, urlmap *UrlMap) *RegionUrlMapsInsertCall { - c := &RegionUrlMapsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. +func (r *ReservationsService) GetIamPolicy(project string, zone string, resource string) *ReservationsGetIamPolicyCall { + c := &ReservationsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.urlmap = urlmap - return c -} - -// RequestId sets the optional parameter "requestId": begin_interface: -// MixerMutationRequestBuilder Request ID to support idempotency. -func (c *RegionUrlMapsInsertCall) RequestId(requestId string) *RegionUrlMapsInsertCall { - c.urlParams_.Set("requestId", requestId) + c.zone = zone + c.resource = resource return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionUrlMapsInsertCall) Fields(s ...googleapi.Field) *RegionUrlMapsInsertCall { +func (c *ReservationsGetIamPolicyCall) Fields(s ...googleapi.Field) *ReservationsGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ReservationsGetIamPolicyCall) IfNoneMatch(entityTag string) *ReservationsGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionUrlMapsInsertCall) Context(ctx context.Context) *RegionUrlMapsInsertCall { +func (c *ReservationsGetIamPolicyCall) Context(ctx context.Context) *ReservationsGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionUrlMapsInsertCall) Header() http.Header { +func (c *ReservationsGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionUrlMapsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *ReservationsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmap) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/urlMaps") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/reservations/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "zone": c.zone, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionUrlMaps.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionUrlMapsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.reservations.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ReservationsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -117799,7 +130661,7 @@ func (c *RegionUrlMapsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -117811,12 +130673,13 @@ func (c *RegionUrlMapsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Creates a UrlMap resource in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.regionUrlMaps.insert", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "httpMethod": "GET", + // "id": "compute.reservations.getIamPolicy", // "parameterOrder": [ // "project", - // "region" + // "zone", + // "resource" // ], // "parameters": { // "project": { @@ -117826,61 +130689,70 @@ func (c *RegionUrlMapsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region scoping this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "begin_interface: MixerMutationRequestBuilder Request ID to support idempotency.", - // "location": "query", + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/urlMaps", - // "request": { - // "$ref": "UrlMap" - // }, + // "path": "{project}/zones/{zone}/reservations/{resource}/getIamPolicy", // "response": { - // "$ref": "Operation" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionUrlMaps.invalidateCache": +// method id "compute.reservations.insert": -type RegionUrlMapsInvalidateCacheCall struct { - s *Service - project string - region string - urlMap string - cacheinvalidationrule *CacheInvalidationRule - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ReservationsInsertCall struct { + s *Service + project string + zone string + reservation *Reservation + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// InvalidateCache: Initiates a cache invalidation operation, -// invalidating the specified path, scoped to the specified UrlMap. -func (r *RegionUrlMapsService) InvalidateCache(project string, region string, urlMap string, cacheinvalidationrule *CacheInvalidationRule) *RegionUrlMapsInvalidateCacheCall { - c := &RegionUrlMapsInvalidateCacheCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a new reservation. +func (r *ReservationsService) Insert(project string, zone string, reservation *Reservation) *ReservationsInsertCall { + c := &ReservationsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.urlMap = urlMap - c.cacheinvalidationrule = cacheinvalidationrule + c.zone = zone + c.reservation = reservation return c } -// RequestId sets the optional parameter "requestId": begin_interface: -// MixerMutationRequestBuilder Request ID to support idempotency. -func (c *RegionUrlMapsInvalidateCacheCall) RequestId(requestId string) *RegionUrlMapsInvalidateCacheCall { +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ReservationsInsertCall) RequestId(requestId string) *ReservationsInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -117888,7 +130760,7 @@ func (c *RegionUrlMapsInvalidateCacheCall) RequestId(requestId string) *RegionUr // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionUrlMapsInvalidateCacheCall) Fields(s ...googleapi.Field) *RegionUrlMapsInvalidateCacheCall { +func (c *ReservationsInsertCall) Fields(s ...googleapi.Field) *ReservationsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -117896,35 +130768,35 @@ func (c *RegionUrlMapsInvalidateCacheCall) Fields(s ...googleapi.Field) *RegionU // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionUrlMapsInvalidateCacheCall) Context(ctx context.Context) *RegionUrlMapsInvalidateCacheCall { +func (c *ReservationsInsertCall) Context(ctx context.Context) *ReservationsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionUrlMapsInvalidateCacheCall) Header() http.Header { +func (c *ReservationsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionUrlMapsInvalidateCacheCall) doRequest(alt string) (*http.Response, error) { +func (c *ReservationsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.cacheinvalidationrule) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.reservation) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/urlMaps/{urlMap}/invalidateCache") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/reservations") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -117933,20 +130805,19 @@ func (c *RegionUrlMapsInvalidateCacheCall) doRequest(alt string) (*http.Response req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, - "urlMap": c.urlMap, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionUrlMaps.invalidateCache" call. +// Do executes the "compute.reservations.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionUrlMapsInvalidateCacheCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ReservationsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -117977,13 +130848,12 @@ func (c *RegionUrlMapsInvalidateCacheCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Initiates a cache invalidation operation, invalidating the specified path, scoped to the specified UrlMap.", + // "description": "Creates a new reservation.", // "httpMethod": "POST", - // "id": "compute.regionUrlMaps.invalidateCache", + // "id": "compute.reservations.insert", // "parameterOrder": [ // "project", - // "region", - // "urlMap" + // "zone" // ], // "parameters": { // "project": { @@ -117993,29 +130863,22 @@ func (c *RegionUrlMapsInvalidateCacheCall) Do(opts ...googleapi.CallOption) (*Op // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "requestId": { - // "description": "begin_interface: MixerMutationRequestBuilder Request ID to support idempotency.", + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // }, - // "urlMap": { - // "description": "Name of the UrlMap scoping this request.", + // "zone": { + // "description": "Name of the zone for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/urlMaps/{urlMap}/invalidateCache", + // "path": "{project}/zones/{zone}/reservations", // "request": { - // "$ref": "CacheInvalidationRule" + // "$ref": "Reservation" // }, // "response": { // "$ref": "Operation" @@ -118028,24 +130891,24 @@ func (c *RegionUrlMapsInvalidateCacheCall) Do(opts ...googleapi.CallOption) (*Op } -// method id "compute.regionUrlMaps.list": +// method id "compute.reservations.list": -type RegionUrlMapsListCall struct { +type ReservationsListCall struct { s *Service project string - region string + zone string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves the list of UrlMap resources available to the -// specified project in the specified region. -func (r *RegionUrlMapsService) List(project string, region string) *RegionUrlMapsListCall { - c := &RegionUrlMapsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: A list all the reservations that have been configured for the +// specified project in specified zone. +func (r *ReservationsService) List(project string, zone string) *ReservationsListCall { + c := &ReservationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region + c.zone = zone return c } @@ -118071,7 +130934,7 @@ func (r *RegionUrlMapsService) List(project string, region string) *RegionUrlMap // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *RegionUrlMapsListCall) Filter(filter string) *RegionUrlMapsListCall { +func (c *ReservationsListCall) Filter(filter string) *ReservationsListCall { c.urlParams_.Set("filter", filter) return c } @@ -118082,7 +130945,7 @@ func (c *RegionUrlMapsListCall) Filter(filter string) *RegionUrlMapsListCall { // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *RegionUrlMapsListCall) MaxResults(maxResults int64) *RegionUrlMapsListCall { +func (c *ReservationsListCall) MaxResults(maxResults int64) *ReservationsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -118099,7 +130962,7 @@ func (c *RegionUrlMapsListCall) MaxResults(maxResults int64) *RegionUrlMapsListC // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *RegionUrlMapsListCall) OrderBy(orderBy string) *RegionUrlMapsListCall { +func (c *ReservationsListCall) OrderBy(orderBy string) *ReservationsListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -118107,7 +130970,7 @@ func (c *RegionUrlMapsListCall) OrderBy(orderBy string) *RegionUrlMapsListCall { // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *RegionUrlMapsListCall) PageToken(pageToken string) *RegionUrlMapsListCall { +func (c *ReservationsListCall) PageToken(pageToken string) *ReservationsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -118115,7 +130978,7 @@ func (c *RegionUrlMapsListCall) PageToken(pageToken string) *RegionUrlMapsListCa // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionUrlMapsListCall) Fields(s ...googleapi.Field) *RegionUrlMapsListCall { +func (c *ReservationsListCall) Fields(s ...googleapi.Field) *ReservationsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -118125,7 +130988,7 @@ func (c *RegionUrlMapsListCall) Fields(s ...googleapi.Field) *RegionUrlMapsListC // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionUrlMapsListCall) IfNoneMatch(entityTag string) *RegionUrlMapsListCall { +func (c *ReservationsListCall) IfNoneMatch(entityTag string) *ReservationsListCall { c.ifNoneMatch_ = entityTag return c } @@ -118133,21 +130996,21 @@ func (c *RegionUrlMapsListCall) IfNoneMatch(entityTag string) *RegionUrlMapsList // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionUrlMapsListCall) Context(ctx context.Context) *RegionUrlMapsListCall { +func (c *ReservationsListCall) Context(ctx context.Context) *ReservationsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionUrlMapsListCall) Header() http.Header { +func (c *ReservationsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionUrlMapsListCall) doRequest(alt string) (*http.Response, error) { +func (c *ReservationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -118159,7 +131022,7 @@ func (c *RegionUrlMapsListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/urlMaps") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/reservations") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -118168,19 +131031,19 @@ func (c *RegionUrlMapsListCall) doRequest(alt string) (*http.Response, error) { req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionUrlMaps.list" call. -// Exactly one of *UrlMapList or error will be non-nil. Any non-2xx +// Do executes the "compute.reservations.list" call. +// Exactly one of *ReservationList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *UrlMapList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionUrlMapsListCall) Do(opts ...googleapi.CallOption) (*UrlMapList, error) { +// *ReservationList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ReservationsListCall) Do(opts ...googleapi.CallOption) (*ReservationList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -118199,7 +131062,7 @@ func (c *RegionUrlMapsListCall) Do(opts ...googleapi.CallOption) (*UrlMapList, e if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &UrlMapList{ + ret := &ReservationList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -118211,12 +131074,12 @@ func (c *RegionUrlMapsListCall) Do(opts ...googleapi.CallOption) (*UrlMapList, e } return ret, nil // { - // "description": "Retrieves the list of UrlMap resources available to the specified project in the specified region.", + // "description": "A list all the reservations that have been configured for the specified project in specified zone.", // "httpMethod": "GET", - // "id": "compute.regionUrlMaps.list", + // "id": "compute.reservations.list", // "parameterOrder": [ // "project", - // "region" + // "zone" // ], // "parameters": { // "filter": { @@ -118249,17 +131112,17 @@ func (c *RegionUrlMapsListCall) Do(opts ...googleapi.CallOption) (*UrlMapList, e // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region scoping this request.", + // "zone": { + // "description": "Name of the zone for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/urlMaps", + // "path": "{project}/zones/{zone}/reservations", // "response": { - // "$ref": "UrlMapList" + // "$ref": "ReservationList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -118273,7 +131136,7 @@ func (c *RegionUrlMapsListCall) Do(opts ...googleapi.CallOption) (*UrlMapList, e // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *RegionUrlMapsListCall) Pages(ctx context.Context, f func(*UrlMapList) error) error { +func (c *ReservationsListCall) Pages(ctx context.Context, f func(*ReservationList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -118291,34 +131154,45 @@ func (c *RegionUrlMapsListCall) Pages(ctx context.Context, f func(*UrlMapList) e } } -// method id "compute.regionUrlMaps.patch": +// method id "compute.reservations.resize": -type RegionUrlMapsPatchCall struct { - s *Service - project string - region string - urlMap string - urlmap *UrlMap - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ReservationsResizeCall struct { + s *Service + project string + zone string + reservation string + reservationsresizerequest *ReservationsResizeRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Patches the specified UrlMap resource with the data included -// in the request. This method supports PATCH semantics and uses JSON -// merge patch format and processing rules. -func (r *RegionUrlMapsService) Patch(project string, region string, urlMap string, urlmap *UrlMap) *RegionUrlMapsPatchCall { - c := &RegionUrlMapsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Resize: Resizes the reservation (applicable to standalone +// reservations only) +func (r *ReservationsService) Resize(project string, zone string, reservation string, reservationsresizerequest *ReservationsResizeRequest) *ReservationsResizeCall { + c := &ReservationsResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.urlMap = urlMap - c.urlmap = urlmap + c.zone = zone + c.reservation = reservation + c.reservationsresizerequest = reservationsresizerequest return c } -// RequestId sets the optional parameter "requestId": begin_interface: -// MixerMutationRequestBuilder Request ID to support idempotency. -func (c *RegionUrlMapsPatchCall) RequestId(requestId string) *RegionUrlMapsPatchCall { +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ReservationsResizeCall) RequestId(requestId string) *ReservationsResizeCall { c.urlParams_.Set("requestId", requestId) return c } @@ -118326,174 +131200,7 @@ func (c *RegionUrlMapsPatchCall) RequestId(requestId string) *RegionUrlMapsPatch // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionUrlMapsPatchCall) Fields(s ...googleapi.Field) *RegionUrlMapsPatchCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionUrlMapsPatchCall) Context(ctx context.Context) *RegionUrlMapsPatchCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionUrlMapsPatchCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionUrlMapsPatchCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmap) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/urlMaps/{urlMap}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "urlMap": c.urlMap, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionUrlMaps.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionUrlMapsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Patches the specified UrlMap resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.regionUrlMaps.patch", - // "parameterOrder": [ - // "project", - // "region", - // "urlMap" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "begin_interface: MixerMutationRequestBuilder Request ID to support idempotency.", - // "location": "query", - // "type": "string" - // }, - // "urlMap": { - // "description": "Name of the UrlMap resource to patch.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/urlMaps/{urlMap}", - // "request": { - // "$ref": "UrlMap" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.regionUrlMaps.testIamPermissions": - -type RegionUrlMapsTestIamPermissionsCall struct { - s *Service - project string - region string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *RegionUrlMapsService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionUrlMapsTestIamPermissionsCall { - c := &RegionUrlMapsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionUrlMapsTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionUrlMapsTestIamPermissionsCall { +func (c *ReservationsResizeCall) Fields(s ...googleapi.Field) *ReservationsResizeCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -118501,35 +131208,35 @@ func (c *RegionUrlMapsTestIamPermissionsCall) Fields(s ...googleapi.Field) *Regi // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionUrlMapsTestIamPermissionsCall) Context(ctx context.Context) *RegionUrlMapsTestIamPermissionsCall { +func (c *ReservationsResizeCall) Context(ctx context.Context) *ReservationsResizeCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionUrlMapsTestIamPermissionsCall) Header() http.Header { +func (c *ReservationsResizeCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionUrlMapsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *ReservationsResizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.reservationsresizerequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/urlMaps/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/reservations/{reservation}/resize") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -118537,191 +131244,21 @@ func (c *RegionUrlMapsTestIamPermissionsCall) doRequest(alt string) (*http.Respo } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionUrlMaps.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionUrlMapsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.regionUrlMaps.testIamPermissions", - // "parameterOrder": [ - // "project", - // "region", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "The name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/urlMaps/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, - // "response": { - // "$ref": "TestPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.regionUrlMaps.update": - -type RegionUrlMapsUpdateCall struct { - s *Service - project string - region string - urlMap string - urlmap *UrlMap - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Update: Updates the specified UrlMap resource with the data included -// in the request. -func (r *RegionUrlMapsService) Update(project string, region string, urlMap string, urlmap *UrlMap) *RegionUrlMapsUpdateCall { - c := &RegionUrlMapsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.urlMap = urlMap - c.urlmap = urlmap - return c -} - -// RequestId sets the optional parameter "requestId": begin_interface: -// MixerMutationRequestBuilder Request ID to support idempotency. -func (c *RegionUrlMapsUpdateCall) RequestId(requestId string) *RegionUrlMapsUpdateCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionUrlMapsUpdateCall) Fields(s ...googleapi.Field) *RegionUrlMapsUpdateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionUrlMapsUpdateCall) Context(ctx context.Context) *RegionUrlMapsUpdateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionUrlMapsUpdateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionUrlMapsUpdateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmap) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/urlMaps/{urlMap}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "urlMap": c.urlMap, + "project": c.project, + "zone": c.zone, + "reservation": c.reservation, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionUrlMaps.update" call. +// Do executes the "compute.reservations.resize" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionUrlMapsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ReservationsResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -118752,13 +131289,13 @@ func (c *RegionUrlMapsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Updates the specified UrlMap resource with the data included in the request.", - // "httpMethod": "PUT", - // "id": "compute.regionUrlMaps.update", + // "description": "Resizes the reservation (applicable to standalone reservations only)", + // "httpMethod": "POST", + // "id": "compute.reservations.resize", // "parameterOrder": [ // "project", - // "region", - // "urlMap" + // "zone", + // "reservation" // ], // "parameters": { // "project": { @@ -118768,29 +131305,29 @@ func (c *RegionUrlMapsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region scoping this request.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "reservation": { + // "description": "Name of the reservation to update.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "begin_interface: MixerMutationRequestBuilder Request ID to support idempotency.", - // "location": "query", - // "type": "string" - // }, - // "urlMap": { - // "description": "Name of the UrlMap resource to update.", + // "zone": { + // "description": "Name of the zone for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/urlMaps/{urlMap}", + // "path": "{project}/zones/{zone}/reservations/{reservation}/resize", // "request": { - // "$ref": "UrlMap" + // "$ref": "ReservationsResizeRequest" // }, // "response": { // "$ref": "Operation" @@ -118803,35 +131340,34 @@ func (c *RegionUrlMapsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.regionUrlMaps.validate": +// method id "compute.reservations.setIamPolicy": -type RegionUrlMapsValidateCall struct { - s *Service - project string - region string - urlMap string - regionurlmapsvalidaterequest *RegionUrlMapsValidateRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ReservationsSetIamPolicyCall struct { + s *Service + project string + zone string + resource string + zonesetpolicyrequest *ZoneSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Validate: Runs static validation for the UrlMap. In particular, the -// tests of the provided UrlMap will be run. Calling this method does -// NOT create the UrlMap. -func (r *RegionUrlMapsService) Validate(project string, region string, urlMap string, regionurlmapsvalidaterequest *RegionUrlMapsValidateRequest) *RegionUrlMapsValidateCall { - c := &RegionUrlMapsValidateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +func (r *ReservationsService) SetIamPolicy(project string, zone string, resource string, zonesetpolicyrequest *ZoneSetPolicyRequest) *ReservationsSetIamPolicyCall { + c := &ReservationsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.urlMap = urlMap - c.regionurlmapsvalidaterequest = regionurlmapsvalidaterequest + c.zone = zone + c.resource = resource + c.zonesetpolicyrequest = zonesetpolicyrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionUrlMapsValidateCall) Fields(s ...googleapi.Field) *RegionUrlMapsValidateCall { +func (c *ReservationsSetIamPolicyCall) Fields(s ...googleapi.Field) *ReservationsSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -118839,35 +131375,35 @@ func (c *RegionUrlMapsValidateCall) Fields(s ...googleapi.Field) *RegionUrlMapsV // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionUrlMapsValidateCall) Context(ctx context.Context) *RegionUrlMapsValidateCall { +func (c *ReservationsSetIamPolicyCall) Context(ctx context.Context) *ReservationsSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionUrlMapsValidateCall) Header() http.Header { +func (c *ReservationsSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionUrlMapsValidateCall) doRequest(alt string) (*http.Response, error) { +func (c *ReservationsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionurlmapsvalidaterequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.zonesetpolicyrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/urlMaps/{urlMap}/validate") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/reservations/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -118875,21 +131411,21 @@ func (c *RegionUrlMapsValidateCall) doRequest(alt string) (*http.Response, error } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "urlMap": c.urlMap, + "project": c.project, + "zone": c.zone, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionUrlMaps.validate" call. -// Exactly one of *UrlMapsValidateResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *UrlMapsValidateResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionUrlMapsValidateCall) Do(opts ...googleapi.CallOption) (*UrlMapsValidateResponse, error) { +// Do executes the "compute.reservations.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ReservationsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -118908,7 +131444,7 @@ func (c *RegionUrlMapsValidateCall) Do(opts ...googleapi.CallOption) (*UrlMapsVa if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &UrlMapsValidateResponse{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -118920,13 +131456,13 @@ func (c *RegionUrlMapsValidateCall) Do(opts ...googleapi.CallOption) (*UrlMapsVa } return ret, nil // { - // "description": "Runs static validation for the UrlMap. In particular, the tests of the provided UrlMap will be run. Calling this method does NOT create the UrlMap.", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", // "httpMethod": "POST", - // "id": "compute.regionUrlMaps.validate", + // "id": "compute.reservations.setIamPolicy", // "parameterOrder": [ // "project", - // "region", - // "urlMap" + // "zone", + // "resource" // ], // "parameters": { // "project": { @@ -118936,27 +131472,27 @@ func (c *RegionUrlMapsValidateCall) Do(opts ...googleapi.CallOption) (*UrlMapsVa // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region scoping this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "urlMap": { - // "description": "Name of the UrlMap resource to be validated as.", + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/urlMaps/{urlMap}/validate", + // "path": "{project}/zones/{zone}/reservations/{resource}/setIamPolicy", // "request": { - // "$ref": "RegionUrlMapsValidateRequest" + // "$ref": "ZoneSetPolicyRequest" // }, // "response": { - // "$ref": "UrlMapsValidateResponse" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -118966,97 +131502,92 @@ func (c *RegionUrlMapsValidateCall) Do(opts ...googleapi.CallOption) (*UrlMapsVa } -// method id "compute.regions.get": +// method id "compute.reservations.testIamPermissions": -type RegionsGetCall struct { - s *Service - project string - region string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type ReservationsTestIamPermissionsCall struct { + s *Service + project string + zone string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified Region resource. Gets a list of available -// regions by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/regions/get -func (r *RegionsService) Get(project string, region string) *RegionsGetCall { - c := &RegionsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *ReservationsService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *ReservationsTestIamPermissionsCall { + c := &ReservationsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region + c.zone = zone + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionsGetCall) Fields(s ...googleapi.Field) *RegionsGetCall { +func (c *ReservationsTestIamPermissionsCall) Fields(s ...googleapi.Field) *ReservationsTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionsGetCall) IfNoneMatch(entityTag string) *RegionsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionsGetCall) Context(ctx context.Context) *RegionsGetCall { +func (c *ReservationsTestIamPermissionsCall) Context(ctx context.Context) *ReservationsTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionsGetCall) Header() http.Header { +func (c *ReservationsTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *ReservationsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/reservations/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "zone": c.zone, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regions.get" call. -// Exactly one of *Region or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Region.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *RegionsGetCall) Do(opts ...googleapi.CallOption) (*Region, error) { +// Do executes the "compute.reservations.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ReservationsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -119075,7 +131606,7 @@ func (c *RegionsGetCall) Do(opts ...googleapi.CallOption) (*Region, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Region{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -119087,12 +131618,13 @@ func (c *RegionsGetCall) Do(opts ...googleapi.CallOption) (*Region, error) { } return ret, nil // { - // "description": "Returns the specified Region resource. Gets a list of available regions by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.regions.get", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.reservations.testIamPermissions", // "parameterOrder": [ // "project", - // "region" + // "zone", + // "resource" // ], // "parameters": { // "project": { @@ -119102,249 +131634,27 @@ func (c *RegionsGetCall) Do(opts ...googleapi.CallOption) (*Region, error) { // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region resource to return.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}", - // "response": { - // "$ref": "Region" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.regions.list": - -type RegionsListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves the list of region resources available to the -// specified project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/regions/list -func (r *RegionsService) List(project string) *RegionsListCall { - c := &RegionsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *RegionsListCall) Filter(filter string) *RegionsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *RegionsListCall) MaxResults(maxResults int64) *RegionsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *RegionsListCall) OrderBy(orderBy string) *RegionsListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *RegionsListCall) PageToken(pageToken string) *RegionsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionsListCall) Fields(s ...googleapi.Field) *RegionsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionsListCall) IfNoneMatch(entityTag string) *RegionsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionsListCall) Context(ctx context.Context) *RegionsListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionsListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regions.list" call. -// Exactly one of *RegionList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *RegionList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionsListCall) Do(opts ...googleapi.CallOption) (*RegionList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &RegionList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves the list of region resources available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.regions.list", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions", + // "path": "{project}/zones/{zone}/reservations/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "RegionList" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -119355,27 +131665,6 @@ func (c *RegionsListCall) Do(opts ...googleapi.CallOption) (*RegionList, error) } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RegionsListCall) Pages(ctx context.Context, f func(*RegionList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - // method id "compute.resourcePolicies.aggregatedList": type ResourcePoliciesAggregatedListCall struct { @@ -119785,7 +132074,7 @@ func (c *ResourcePoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio // "resourcePolicy": { // "description": "Name of the resource policy to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -119950,7 +132239,7 @@ func (c *ResourcePoliciesGetCall) Do(opts ...googleapi.CallOption) (*ResourcePol // "resourcePolicy": { // "description": "Name of the resource policy to retrieve.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -121306,7 +133595,7 @@ func (c *RoutersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "router": { // "description": "Name of the Router resource to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -121472,7 +133761,7 @@ func (c *RoutersGetCall) Do(opts ...googleapi.CallOption) (*Router, error) { // "router": { // "description": "Name of the Router resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -121551,6 +133840,15 @@ func (c *RoutersGetNatMappingInfoCall) MaxResults(maxResults int64) *RoutersGetN return c } +// NatName sets the optional parameter "natName": Name of the nat +// service to filter the Nat Mapping information. If it is omitted, all +// nats for this router will be returned. Name should conform to +// RFC1035. +func (c *RoutersGetNatMappingInfoCall) NatName(natName string) *RoutersGetNatMappingInfoCall { + c.urlParams_.Set("natName", natName) + return c +} + // OrderBy sets the optional parameter "orderBy": Sorts list results by // a certain order. By default, results are returned in alphanumerical // order based on the resource name. @@ -121698,6 +133996,11 @@ func (c *RoutersGetNatMappingInfoCall) Do(opts ...googleapi.CallOption) (*VmEndp // "minimum": "0", // "type": "integer" // }, + // "natName": { + // "description": "Name of the nat service to filter the Nat Mapping information. If it is omitted, all nats for this router will be returned. Name should conform to RFC1035.", + // "location": "query", + // "type": "string" + // }, // "orderBy": { // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", // "location": "query", @@ -121725,7 +134028,7 @@ func (c *RoutersGetNatMappingInfoCall) Do(opts ...googleapi.CallOption) (*VmEndp // "router": { // "description": "Name of the Router resource to query for Nat Mapping information of VM endpoints.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -121913,7 +134216,7 @@ func (c *RoutersGetRouterStatusCall) Do(opts ...googleapi.CallOption) (*RouterSt // "router": { // "description": "Name of the Router resource to query.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -122536,7 +134839,7 @@ func (c *RoutersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "router": { // "description": "Name of the Router resource to patch.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -122699,7 +135002,7 @@ func (c *RoutersPreviewCall) Do(opts ...googleapi.CallOption) (*RoutersPreviewRe // "router": { // "description": "Name of the Router resource to query.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -123049,7 +135352,7 @@ func (c *RoutersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "router": { // "description": "Name of the Router resource to update.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -124098,7 +136401,7 @@ func (c *SecurityPoliciesAddRuleCall) Do(opts ...googleapi.CallOption) (*Operati // "securityPolicy": { // "description": "Name of the security policy to update.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -124270,7 +136573,7 @@ func (c *SecurityPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio // "securityPolicy": { // "description": "Name of the security policy to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -124425,7 +136728,7 @@ func (c *SecurityPoliciesGetCall) Do(opts ...googleapi.CallOption) (*SecurityPol // "securityPolicy": { // "description": "Name of the security policy to get.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -124593,7 +136896,7 @@ func (c *SecurityPoliciesGetRuleCall) Do(opts ...googleapi.CallOption) (*Securit // "securityPolicy": { // "description": "Name of the security policy to which the queried rule belongs.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -125427,7 +137730,7 @@ func (c *SecurityPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Operation // "securityPolicy": { // "description": "Name of the security policy to update.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -125597,7 +137900,7 @@ func (c *SecurityPoliciesPatchRuleCall) Do(opts ...googleapi.CallOption) (*Opera // "securityPolicy": { // "description": "Name of the security policy to update.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -125758,7 +138061,7 @@ func (c *SecurityPoliciesRemoveRuleCall) Do(opts ...googleapi.CallOption) (*Oper // "securityPolicy": { // "description": "Name of the security policy to update.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -127668,7 +139971,7 @@ func (c *SslCertificatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation // "sslCertificate": { // "description": "Name of the SslCertificate resource to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -127823,7 +140126,7 @@ func (c *SslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCertifica // "sslCertificate": { // "description": "Name of the SslCertificate resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -130113,7 +142416,7 @@ func (c *SubnetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er // "subnetwork": { // "description": "Name of the Subnetwork resource to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -130296,7 +142599,7 @@ func (c *SubnetworksExpandIpCidrRangeCall) Do(opts ...googleapi.CallOption) (*Op // "subnetwork": { // "description": "Name of the Subnetwork resource to update.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -130465,7 +142768,7 @@ func (c *SubnetworksGetCall) Do(opts ...googleapi.CallOption) (*Subnetwork, erro // "subnetwork": { // "description": "Name of the Subnetwork resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -131528,7 +143831,7 @@ func (c *SubnetworksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, err // "subnetwork": { // "description": "Name of the Subnetwork resource to patch.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -131877,7 +144180,7 @@ func (c *SubnetworksSetPrivateIpGoogleAccessCall) Do(opts ...googleapi.CallOptio // "subnetwork": { // "description": "Name of the Subnetwork resource.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -132460,7 +144763,7 @@ func (c *TargetHttpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operati // "targetHttpProxy": { // "description": "Name of the TargetHttpProxy resource to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -132616,7 +144919,7 @@ func (c *TargetHttpProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHttp // "targetHttpProxy": { // "description": "Name of the TargetHttpProxy resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -133207,7 +145510,7 @@ func (c *TargetHttpProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Oper // "targetHttpProxy": { // "description": "Name of the TargetHttpProxy to set a URL map for.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -133778,7 +146081,7 @@ func (c *TargetHttpsProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operat // "targetHttpsProxy": { // "description": "Name of the TargetHttpsProxy resource to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -133933,7 +146236,7 @@ func (c *TargetHttpsProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHtt // "targetHttpsProxy": { // "description": "Name of the TargetHttpsProxy resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -134694,7 +146997,7 @@ func (c *TargetHttpsProxiesSetSslCertificatesCall) Do(opts ...googleapi.CallOpti // "targetHttpsProxy": { // "description": "Name of the TargetHttpsProxy resource to set an SslCertificates resource for.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -135045,7 +147348,7 @@ func (c *TargetHttpsProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Ope // "targetHttpsProxy": { // "description": "Name of the TargetHttpsProxy resource whose URL map is to be set.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -135621,7 +147924,7 @@ func (c *TargetInstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation // "targetInstance": { // "description": "Name of the TargetInstance resource to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -135788,7 +148091,7 @@ func (c *TargetInstancesGetCall) Do(opts ...googleapi.CallOption) (*TargetInstan // "targetInstance": { // "description": "Name of the TargetInstance resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -136582,7 +148885,7 @@ func (c *TargetPoolsAddHealthCheckCall) Do(opts ...googleapi.CallOption) (*Opera // "targetPool": { // "description": "Name of the target pool to add a health check to.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -136768,7 +149071,7 @@ func (c *TargetPoolsAddInstanceCall) Do(opts ...googleapi.CallOption) (*Operatio // "targetPool": { // "description": "Name of the TargetPool resource to add instances to.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -137199,7 +149502,7 @@ func (c *TargetPoolsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er // "targetPool": { // "description": "Name of the TargetPool resource to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -137366,7 +149669,7 @@ func (c *TargetPoolsGetCall) Do(opts ...googleapi.CallOption) (*TargetPool, erro // "targetPool": { // "description": "Name of the TargetPool resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -137527,7 +149830,7 @@ func (c *TargetPoolsGetHealthCall) Do(opts ...googleapi.CallOption) (*TargetPool // "targetPool": { // "description": "Name of the TargetPool resource to which the queried instance belongs.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -138154,7 +150457,7 @@ func (c *TargetPoolsRemoveHealthCheckCall) Do(opts ...googleapi.CallOption) (*Op // "targetPool": { // "description": "Name of the target pool to remove health checks from.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -138340,7 +150643,7 @@ func (c *TargetPoolsRemoveInstanceCall) Do(opts ...googleapi.CallOption) (*Opera // "targetPool": { // "description": "Name of the TargetPool resource to remove instances from.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -138539,7 +150842,7 @@ func (c *TargetPoolsSetBackupCall) Do(opts ...googleapi.CallOption) (*Operation, // "targetPool": { // "description": "Name of the TargetPool resource to set a backup pool for.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -138869,7 +151172,7 @@ func (c *TargetSslProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio // "targetSslProxy": { // "description": "Name of the TargetSslProxy resource to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -139024,7 +151327,7 @@ func (c *TargetSslProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetSslPr // "targetSslProxy": { // "description": "Name of the TargetSslProxy resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -139612,7 +151915,7 @@ func (c *TargetSslProxiesSetBackendServiceCall) Do(opts ...googleapi.CallOption) // "targetSslProxy": { // "description": "Name of the TargetSslProxy resource whose BackendService resource is to be set.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -139786,7 +152089,7 @@ func (c *TargetSslProxiesSetProxyHeaderCall) Do(opts ...googleapi.CallOption) (* // "targetSslProxy": { // "description": "Name of the TargetSslProxy resource whose ProxyHeader is to be set.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -139960,7 +152263,7 @@ func (c *TargetSslProxiesSetSslCertificatesCall) Do(opts ...googleapi.CallOption // "targetSslProxy": { // "description": "Name of the TargetSslProxy resource whose SslCertificate resource is to be set.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -140455,7 +152758,7 @@ func (c *TargetTcpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio // "targetTcpProxy": { // "description": "Name of the TargetTcpProxy resource to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -140610,7 +152913,7 @@ func (c *TargetTcpProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetTcpPr // "targetTcpProxy": { // "description": "Name of the TargetTcpProxy resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -141198,7 +153501,7 @@ func (c *TargetTcpProxiesSetBackendServiceCall) Do(opts ...googleapi.CallOption) // "targetTcpProxy": { // "description": "Name of the TargetTcpProxy resource whose BackendService resource is to be set.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -141372,7 +153675,7 @@ func (c *TargetTcpProxiesSetProxyHeaderCall) Do(opts ...googleapi.CallOption) (* // "targetTcpProxy": { // "description": "Name of the TargetTcpProxy resource whose ProxyHeader is to be set.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -141953,7 +154256,7 @@ func (c *TargetVpnGatewaysDeleteCall) Do(opts ...googleapi.CallOption) (*Operati // "targetVpnGateway": { // "description": "Name of the target VPN gateway to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -142119,7 +154422,7 @@ func (c *TargetVpnGatewaysGetCall) Do(opts ...googleapi.CallOption) (*TargetVpnG // "targetVpnGateway": { // "description": "Name of the target VPN gateway to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -143324,7 +155627,7 @@ func (c *UrlMapsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "urlMap": { // "description": "Name of the UrlMap resource to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -143480,7 +155783,7 @@ func (c *UrlMapsGetCall) Do(opts ...googleapi.CallOption) (*UrlMap, error) { // "urlMap": { // "description": "Name of the UrlMap resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -143818,7 +156121,7 @@ func (c *UrlMapsInvalidateCacheCall) Do(opts ...googleapi.CallOption) (*Operatio // "urlMap": { // "description": "Name of the UrlMap scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -144248,7 +156551,7 @@ func (c *UrlMapsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "urlMap": { // "description": "Name of the UrlMap resource to patch.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -144576,7 +156879,7 @@ func (c *UrlMapsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "urlMap": { // "description": "Name of the UrlMap resource to update.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -144729,7 +157032,7 @@ func (c *UrlMapsValidateCall) Do(opts ...googleapi.CallOption) (*UrlMapsValidate // "urlMap": { // "description": "Name of the UrlMap resource to be validated as.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -145158,7 +157461,7 @@ func (c *VpnGatewaysDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er // "vpnGateway": { // "description": "Name of the VPN gateway to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -145324,7 +157627,7 @@ func (c *VpnGatewaysGetCall) Do(opts ...googleapi.CallOption) (*VpnGateway, erro // "vpnGateway": { // "description": "Name of the VPN gateway to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -145342,6 +157645,172 @@ func (c *VpnGatewaysGetCall) Do(opts ...googleapi.CallOption) (*VpnGateway, erro } +// method id "compute.vpnGateways.getStatus": + +type VpnGatewaysGetStatusCall struct { + s *Service + project string + region string + vpnGateway string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetStatus: Returns the status for the specified VPN gateway. +func (r *VpnGatewaysService) GetStatus(project string, region string, vpnGateway string) *VpnGatewaysGetStatusCall { + c := &VpnGatewaysGetStatusCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.vpnGateway = vpnGateway + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *VpnGatewaysGetStatusCall) Fields(s ...googleapi.Field) *VpnGatewaysGetStatusCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *VpnGatewaysGetStatusCall) IfNoneMatch(entityTag string) *VpnGatewaysGetStatusCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *VpnGatewaysGetStatusCall) Context(ctx context.Context) *VpnGatewaysGetStatusCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *VpnGatewaysGetStatusCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *VpnGatewaysGetStatusCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/vpnGateways/{vpnGateway}/getStatus") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "vpnGateway": c.vpnGateway, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.vpnGateways.getStatus" call. +// Exactly one of *VpnGatewaysGetStatusResponse or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *VpnGatewaysGetStatusResponse.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *VpnGatewaysGetStatusCall) Do(opts ...googleapi.CallOption) (*VpnGatewaysGetStatusResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &VpnGatewaysGetStatusResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the status for the specified VPN gateway.", + // "httpMethod": "GET", + // "id": "compute.vpnGateways.getStatus", + // "parameterOrder": [ + // "project", + // "region", + // "vpnGateway" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "vpnGateway": { + // "description": "Name of the VPN gateway to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/vpnGateways/{vpnGateway}/getStatus", + // "response": { + // "$ref": "VpnGatewaysGetStatusResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + // method id "compute.vpnGateways.insert": type VpnGatewaysInsertCall struct { @@ -146538,7 +159007,7 @@ func (c *VpnTunnelsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, err // "vpnTunnel": { // "description": "Name of the VpnTunnel resource to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -146704,7 +159173,7 @@ func (c *VpnTunnelsGetCall) Do(opts ...googleapi.CallOption) (*VpnTunnel, error) // "vpnTunnel": { // "description": "Name of the VpnTunnel resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -147605,7 +160074,7 @@ func (c *ZoneOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { // "operation": { // "description": "Name of the Operations resource to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -147768,7 +160237,7 @@ func (c *ZoneOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, er // "operation": { // "description": "Name of the Operations resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -148193,7 +160662,7 @@ func (c *ZoneOperationsWaitCall) Do(opts ...googleapi.CallOption) (*Operation, e // "operation": { // "description": "Name of the Operations resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -148364,7 +160833,7 @@ func (c *ZonesGetCall) Do(opts ...googleapi.CallOption) (*Zone, error) { // "zone": { // "description": "Name of the zone resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } diff --git a/vendor/google.golang.org/api/compute/v0.beta/BUILD.bazel b/vendor/google.golang.org/api/compute/v0.beta/BUILD.bazel index 6eedd5f7cefdd..a4e60e7e0d7d6 100644 --- a/vendor/google.golang.org/api/compute/v0.beta/BUILD.bazel +++ b/vendor/google.golang.org/api/compute/v0.beta/BUILD.bazel @@ -9,5 +9,7 @@ go_library( deps = [ "//vendor/google.golang.org/api/gensupport:go_default_library", "//vendor/google.golang.org/api/googleapi:go_default_library", + "//vendor/google.golang.org/api/option:go_default_library", + "//vendor/google.golang.org/api/transport/http:go_default_library", ], ) diff --git a/vendor/google.golang.org/api/compute/v0.beta/compute-api.json b/vendor/google.golang.org/api/compute/v0.beta/compute-api.json index 37e0d739e4fb8..5d69c1962ddb2 100644 --- a/vendor/google.golang.org/api/compute/v0.beta/compute-api.json +++ b/vendor/google.golang.org/api/compute/v0.beta/compute-api.json @@ -29,7 +29,7 @@ "description": "Creates and runs virtual machines on Google Cloud Platform.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/compute/docs/reference/latest/", - "etag": "\"J3WqvAcMk4eQjJXvfSI4Yr8VouA/hpAivS3Oe8CKVoCbwnyJCYDdS-0\"", + "etag": "\"VPK3KBfpaEgZ16pozGOoMYfKc0U/_AHzCpq0AAQFZQWJBiL19RSPIkQ\"", "icons": { "x16": "https://www.google.com/images/icons/product/compute_engine-16.png", "x32": "https://www.google.com/images/icons/product/compute_engine-32.png" @@ -150,7 +150,7 @@ "acceleratorType": { "description": "Name of the accelerator type to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -302,7 +302,7 @@ "address": { "description": "Name of the address resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -348,7 +348,7 @@ "address": { "description": "Name of the address resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -571,12 +571,12 @@ } } }, - "allocations": { + "autoscalers": { "methods": { "aggregatedList": { - "description": "Retrieves an aggregated list of allocations.", + "description": "Retrieves an aggregated list of autoscalers.", "httpMethod": "GET", - "id": "compute.allocations.aggregatedList", + "id": "compute.autoscalers.aggregatedList", "parameterOrder": [ "project" ], @@ -612,9 +612,9 @@ "type": "string" } }, - "path": "{project}/aggregated/allocations", + "path": "{project}/aggregated/autoscalers", "response": { - "$ref": "AllocationAggregatedList" + "$ref": "AutoscalerAggregatedList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -623,19 +623,19 @@ ] }, "delete": { - "description": "Deletes the specified allocation.", + "description": "Deletes the specified autoscaler.", "httpMethod": "DELETE", - "id": "compute.allocations.delete", + "id": "compute.autoscalers.delete", "parameterOrder": [ "project", "zone", - "allocation" + "autoscaler" ], "parameters": { - "allocation": { - "description": "Name of the allocation to delete.", + "autoscaler": { + "description": "Name of the autoscaler to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -659,7 +659,7 @@ "type": "string" } }, - "path": "{project}/zones/{zone}/allocations/{allocation}", + "path": "{project}/zones/{zone}/autoscalers/{autoscaler}", "response": { "$ref": "Operation" }, @@ -669,19 +669,19 @@ ] }, "get": { - "description": "Retrieves all information of the specified allocation.", + "description": "Returns the specified autoscaler resource. Gets a list of available autoscalers by making a list() request.", "httpMethod": "GET", - "id": "compute.allocations.get", + "id": "compute.autoscalers.get", "parameterOrder": [ "project", "zone", - "allocation" + "autoscaler" ], "parameters": { - "allocation": { - "description": "Name of the allocation to retrieve.", + "autoscaler": { + "description": "Name of the autoscaler to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -700,51 +700,9 @@ "type": "string" } }, - "path": "{project}/zones/{zone}/allocations/{allocation}", - "response": { - "$ref": "Allocation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "getIamPolicy": { - "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", - "httpMethod": "GET", - "id": "compute.allocations.getIamPolicy", - "parameterOrder": [ - "project", - "zone", - "resource" - ], - "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "resource": { - "description": "Name or id of the resource for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - }, - "zone": { - "description": "The name of the zone for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - } - }, - "path": "{project}/zones/{zone}/allocations/{resource}/getIamPolicy", + "path": "{project}/zones/{zone}/autoscalers/{autoscaler}", "response": { - "$ref": "Policy" + "$ref": "Autoscaler" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -753,9 +711,9 @@ ] }, "insert": { - "description": "Creates a new allocation.", + "description": "Creates an autoscaler in the specified project using the data included in the request.", "httpMethod": "POST", - "id": "compute.allocations.insert", + "id": "compute.autoscalers.insert", "parameterOrder": [ "project", "zone" @@ -781,9 +739,9 @@ "type": "string" } }, - "path": "{project}/zones/{zone}/allocations", + "path": "{project}/zones/{zone}/autoscalers", "request": { - "$ref": "Allocation" + "$ref": "Autoscaler" }, "response": { "$ref": "Operation" @@ -794,9 +752,9 @@ ] }, "list": { - "description": "A list all the allocations that have been configured for the specified project in specified zone.", + "description": "Retrieves a list of autoscalers contained within the specified zone.", "httpMethod": "GET", - "id": "compute.allocations.list", + "id": "compute.autoscalers.list", "parameterOrder": [ "project", "zone" @@ -840,9 +798,9 @@ "type": "string" } }, - "path": "{project}/zones/{zone}/allocations", + "path": "{project}/zones/{zone}/autoscalers", "response": { - "$ref": "AllocationList" + "$ref": "AutoscalerList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -850,16 +808,21 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", - "httpMethod": "POST", - "id": "compute.allocations.setIamPolicy", + "patch": { + "description": "Updates an autoscaler in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "httpMethod": "PATCH", + "id": "compute.autoscalers.patch", "parameterOrder": [ "project", - "zone", - "resource" + "zone" ], "parameters": { + "autoscaler": { + "description": "Name of the autoscaler to patch.", + "location": "query", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -867,27 +830,25 @@ "required": true, "type": "string" }, - "resource": { - "description": "Name or id of the resource for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", "type": "string" }, "zone": { - "description": "The name of the zone for this request.", + "description": "Name of the zone for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "{project}/zones/{zone}/allocations/{resource}/setIamPolicy", + "path": "{project}/zones/{zone}/autoscalers", "request": { - "$ref": "ZoneSetPolicyRequest" + "$ref": "Autoscaler" }, "response": { - "$ref": "Policy" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -897,7 +858,7 @@ "testIamPermissions": { "description": "Returns permissions that a caller has on the specified resource.", "httpMethod": "POST", - "id": "compute.allocations.testIamPermissions", + "id": "compute.autoscalers.testIamPermissions", "parameterOrder": [ "project", "zone", @@ -926,7 +887,7 @@ "type": "string" } }, - "path": "{project}/zones/{zone}/allocations/{resource}/testIamPermissions", + "path": "{project}/zones/{zone}/autoscalers/{resource}/testIamPermissions", "request": { "$ref": "TestPermissionsRequest" }, @@ -938,351 +899,20 @@ "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] - } - } - }, - "autoscalers": { - "methods": { - "aggregatedList": { - "description": "Retrieves an aggregated list of autoscalers.", - "httpMethod": "GET", - "id": "compute.autoscalers.aggregatedList", - "parameterOrder": [ - "project" - ], - "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - "location": "query", - "type": "string" - }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query", - "type": "string" - }, - "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query", - "type": "string" - }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - } - }, - "path": "{project}/aggregated/autoscalers", - "response": { - "$ref": "AutoscalerAggregatedList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] }, - "delete": { - "description": "Deletes the specified autoscaler.", - "httpMethod": "DELETE", - "id": "compute.autoscalers.delete", + "update": { + "description": "Updates an autoscaler in the specified project using the data included in the request.", + "httpMethod": "PUT", + "id": "compute.autoscalers.update", "parameterOrder": [ "project", - "zone", - "autoscaler" + "zone" ], "parameters": { "autoscaler": { - "description": "Name of the autoscaler to delete.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "description": "Name of the autoscaler to update.", "location": "query", - "type": "string" - }, - "zone": { - "description": "Name of the zone for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - } - }, - "path": "{project}/zones/{zone}/autoscalers/{autoscaler}", - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "description": "Returns the specified autoscaler resource. Gets a list of available autoscalers by making a list() request.", - "httpMethod": "GET", - "id": "compute.autoscalers.get", - "parameterOrder": [ - "project", - "zone", - "autoscaler" - ], - "parameters": { - "autoscaler": { - "description": "Name of the autoscaler to return.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "zone": { - "description": "Name of the zone for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - } - }, - "path": "{project}/zones/{zone}/autoscalers/{autoscaler}", - "response": { - "$ref": "Autoscaler" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "description": "Creates an autoscaler in the specified project using the data included in the request.", - "httpMethod": "POST", - "id": "compute.autoscalers.insert", - "parameterOrder": [ - "project", - "zone" - ], - "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "zone": { - "description": "Name of the zone for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - } - }, - "path": "{project}/zones/{zone}/autoscalers", - "request": { - "$ref": "Autoscaler" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "list": { - "description": "Retrieves a list of autoscalers contained within the specified zone.", - "httpMethod": "GET", - "id": "compute.autoscalers.list", - "parameterOrder": [ - "project", - "zone" - ], - "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - "location": "query", - "type": "string" - }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query", - "type": "string" - }, - "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query", - "type": "string" - }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "zone": { - "description": "Name of the zone for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - } - }, - "path": "{project}/zones/{zone}/autoscalers", - "response": { - "$ref": "AutoscalerList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "patch": { - "description": "Updates an autoscaler in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - "httpMethod": "PATCH", - "id": "compute.autoscalers.patch", - "parameterOrder": [ - "project", - "zone" - ], - "parameters": { - "autoscaler": { - "description": "Name of the autoscaler to patch.", - "location": "query", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "type": "string" - }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "zone": { - "description": "Name of the zone for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - } - }, - "path": "{project}/zones/{zone}/autoscalers", - "request": { - "$ref": "Autoscaler" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.", - "httpMethod": "POST", - "id": "compute.autoscalers.testIamPermissions", - "parameterOrder": [ - "project", - "zone", - "resource" - ], - "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "resource": { - "description": "Name or id of the resource for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - }, - "zone": { - "description": "The name of the zone for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - } - }, - "path": "{project}/zones/{zone}/autoscalers/{resource}/testIamPermissions", - "request": { - "$ref": "TestPermissionsRequest" - }, - "response": { - "$ref": "TestPermissionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "update": { - "description": "Updates an autoscaler in the specified project using the data included in the request.", - "httpMethod": "PUT", - "id": "compute.autoscalers.update", - "parameterOrder": [ - "project", - "zone" - ], - "parameters": { - "autoscaler": { - "description": "Name of the autoscaler to update.", - "location": "query", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "type": "string" }, "project": { @@ -1373,7 +1003,7 @@ "backendBucket": { "description": "Name of the BackendBucket resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -1455,7 +1085,7 @@ "backendBucket": { "description": "Name of the BackendBucket resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -1571,7 +1201,7 @@ "backendBucket": { "description": "Name of the BackendBucket resource to patch.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -1612,7 +1242,7 @@ "backendBucket": { "description": "Name of the BackendBucket resource to update.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -1746,7 +1376,7 @@ "backendService": { "description": "Name of the BackendService resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -1828,7 +1458,7 @@ "backendService": { "description": "Name of the BackendService resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -1862,7 +1492,7 @@ "backendService": { "description": "Name of the BackendService resource to which the queried instance belongs.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -2098,7 +1728,7 @@ "backendService": { "description": "Name of the BackendService resource to update.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -2193,7 +1823,7 @@ "diskType": { "description": "Name of the disk type to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -2399,6 +2029,7 @@ "type": "string" }, "guestFlush": { + "description": "[Input Only] Specifies to create an application consistent snapshot by informing the OS to prepare for the snapshot process. Currently only supported on Windows instances using the Volume Shadow Copy Service (VSS).", "location": "query", "type": "boolean" }, @@ -2904,21 +2535,21 @@ } } }, - "firewalls": { + "externalVpnGateways": { "methods": { "delete": { - "description": "Deletes the specified firewall.", + "description": "Deletes the specified externalVpnGateway.", "httpMethod": "DELETE", - "id": "compute.firewalls.delete", + "id": "compute.externalVpnGateways.delete", "parameterOrder": [ "project", - "firewall" + "externalVpnGateway" ], "parameters": { - "firewall": { - "description": "Name of the firewall rule to delete.", + "externalVpnGateway": { + "description": "Name of the externalVpnGateways to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, @@ -2935,7 +2566,7 @@ "type": "string" } }, - "path": "{project}/global/firewalls/{firewall}", + "path": "{project}/global/externalVpnGateways/{externalVpnGateway}", "response": { "$ref": "Operation" }, @@ -2945,18 +2576,18 @@ ] }, "get": { - "description": "Returns the specified firewall.", + "description": "Returns the specified externalVpnGateway. Get a list of available externalVpnGateways by making a list() request.", "httpMethod": "GET", - "id": "compute.firewalls.get", + "id": "compute.externalVpnGateways.get", "parameterOrder": [ "project", - "firewall" + "externalVpnGateway" ], "parameters": { - "firewall": { - "description": "Name of the firewall rule to return.", + "externalVpnGateway": { + "description": "Name of the externalVpnGateway to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, @@ -2968,9 +2599,9 @@ "type": "string" } }, - "path": "{project}/global/firewalls/{firewall}", + "path": "{project}/global/externalVpnGateways/{externalVpnGateway}", "response": { - "$ref": "Firewall" + "$ref": "ExternalVpnGateway" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -2979,9 +2610,9 @@ ] }, "insert": { - "description": "Creates a firewall rule in the specified project using the data included in the request.", + "description": "Creates a ExternalVpnGateway in the specified project using the data included in the request.", "httpMethod": "POST", - "id": "compute.firewalls.insert", + "id": "compute.externalVpnGateways.insert", "parameterOrder": [ "project" ], @@ -2999,9 +2630,9 @@ "type": "string" } }, - "path": "{project}/global/firewalls", + "path": "{project}/global/externalVpnGateways", "request": { - "$ref": "Firewall" + "$ref": "ExternalVpnGateway" }, "response": { "$ref": "Operation" @@ -3012,9 +2643,9 @@ ] }, "list": { - "description": "Retrieves the list of firewall rules available to the specified project.", + "description": "Retrieves the list of ExternalVpnGateway available to the specified project.", "httpMethod": "GET", - "id": "compute.firewalls.list", + "id": "compute.externalVpnGateways.list", "parameterOrder": [ "project" ], @@ -3050,9 +2681,9 @@ "type": "string" } }, - "path": "{project}/global/firewalls", + "path": "{project}/global/externalVpnGateways", "response": { - "$ref": "FirewallList" + "$ref": "ExternalVpnGatewayList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -3060,22 +2691,15 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "patch": { - "description": "Updates the specified firewall rule with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - "httpMethod": "PATCH", - "id": "compute.firewalls.patch", + "setLabels": { + "description": "Sets the labels on an ExternalVpnGateway. To learn more about labels, read the Labeling Resources documentation.", + "httpMethod": "POST", + "id": "compute.externalVpnGateways.setLabels", "parameterOrder": [ "project", - "firewall" + "resource" ], "parameters": { - "firewall": { - "description": "Name of the firewall rule to patch.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -3083,15 +2707,17 @@ "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query", + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, "type": "string" } }, - "path": "{project}/global/firewalls/{firewall}", + "path": "{project}/global/externalVpnGateways/{resource}/setLabels", "request": { - "$ref": "Firewall" + "$ref": "GlobalSetLabelsRequest" }, "response": { "$ref": "Operation" @@ -3104,7 +2730,7 @@ "testIamPermissions": { "description": "Returns permissions that a caller has on the specified resource.", "httpMethod": "POST", - "id": "compute.firewalls.testIamPermissions", + "id": "compute.externalVpnGateways.testIamPermissions", "parameterOrder": [ "project", "resource" @@ -3125,7 +2751,7 @@ "type": "string" } }, - "path": "{project}/global/firewalls/{resource}/testIamPermissions", + "path": "{project}/global/externalVpnGateways/{resource}/testIamPermissions", "request": { "$ref": "TestPermissionsRequest" }, @@ -3137,18 +2763,22 @@ "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] - }, - "update": { - "description": "Updates the specified firewall rule with the data included in the request. The PUT method can only update the following fields of firewall rule: allowed, description, sourceRanges, sourceTags, targetTags.", - "httpMethod": "PUT", - "id": "compute.firewalls.update", + } + } + }, + "firewalls": { + "methods": { + "delete": { + "description": "Deletes the specified firewall.", + "httpMethod": "DELETE", + "id": "compute.firewalls.delete", "parameterOrder": [ "project", "firewall" ], "parameters": { "firewall": { - "description": "Name of the firewall rule to update.", + "description": "Name of the firewall rule to delete.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -3168,6 +2798,70 @@ } }, "path": "{project}/global/firewalls/{firewall}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Returns the specified firewall.", + "httpMethod": "GET", + "id": "compute.firewalls.get", + "parameterOrder": [ + "project", + "firewall" + ], + "parameters": { + "firewall": { + "description": "Name of the firewall rule to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + } + }, + "path": "{project}/global/firewalls/{firewall}", + "response": { + "$ref": "Firewall" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a firewall rule in the specified project using the data included in the request.", + "httpMethod": "POST", + "id": "compute.firewalls.insert", + "parameterOrder": [ + "project" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/global/firewalls", "request": { "$ref": "Firewall" }, @@ -3178,15 +2872,183 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] - } - } - }, - "forwardingRules": { - "methods": { - "aggregatedList": { - "description": "Retrieves an aggregated list of forwarding rules.", + }, + "list": { + "description": "Retrieves the list of firewall rules available to the specified project.", "httpMethod": "GET", - "id": "compute.forwardingRules.aggregatedList", + "id": "compute.firewalls.list", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + } + }, + "path": "{project}/global/firewalls", + "response": { + "$ref": "FirewallList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "patch": { + "description": "Updates the specified firewall rule with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "httpMethod": "PATCH", + "id": "compute.firewalls.patch", + "parameterOrder": [ + "project", + "firewall" + ], + "parameters": { + "firewall": { + "description": "Name of the firewall rule to patch.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/global/firewalls/{firewall}", + "request": { + "$ref": "Firewall" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", + "httpMethod": "POST", + "id": "compute.firewalls.testIamPermissions", + "parameterOrder": [ + "project", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/global/firewalls/{resource}/testIamPermissions", + "request": { + "$ref": "TestPermissionsRequest" + }, + "response": { + "$ref": "TestPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "update": { + "description": "Updates the specified firewall rule with the data included in the request. The PUT method can only update the following fields of firewall rule: allowed, description, sourceRanges, sourceTags, targetTags.", + "httpMethod": "PUT", + "id": "compute.firewalls.update", + "parameterOrder": [ + "project", + "firewall" + ], + "parameters": { + "firewall": { + "description": "Name of the firewall rule to update.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/global/firewalls/{firewall}", + "request": { + "$ref": "Firewall" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "forwardingRules": { + "methods": { + "aggregatedList": { + "description": "Retrieves an aggregated list of forwarding rules.", + "httpMethod": "GET", + "id": "compute.forwardingRules.aggregatedList", "parameterOrder": [ "project" ], @@ -3245,7 +3107,7 @@ "forwardingRule": { "description": "Name of the ForwardingRule resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -3291,7 +3153,7 @@ "forwardingRule": { "description": "Name of the ForwardingRule resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -3418,6 +3280,55 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "patch": { + "description": "Updates the specified forwarding rule with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. Currently, you can only patch the network_tier field.", + "httpMethod": "PATCH", + "id": "compute.forwardingRules.patch", + "parameterOrder": [ + "project", + "region", + "forwardingRule" + ], + "parameters": { + "forwardingRule": { + "description": "Name of the ForwardingRule resource to patch.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}", + "request": { + "$ref": "ForwardingRule" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "setLabels": { "description": "Sets the labels on the specified resource. To learn more about labels, read the Labeling Resources documentation.", "httpMethod": "POST", @@ -3480,7 +3391,7 @@ "forwardingRule": { "description": "Name of the ForwardingRule resource in which target is to be set.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -3577,7 +3488,7 @@ "address": { "description": "Name of the address resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -3615,7 +3526,7 @@ "address": { "description": "Name of the address resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -3808,7 +3719,7 @@ "forwardingRule": { "description": "Name of the ForwardingRule resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -3846,7 +3757,7 @@ "forwardingRule": { "description": "Name of the ForwardingRule resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -3950,6 +3861,47 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "patch": { + "description": "Updates the specified forwarding rule with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. Currently, you can only patch the network_tier field.", + "httpMethod": "PATCH", + "id": "compute.globalForwardingRules.patch", + "parameterOrder": [ + "project", + "forwardingRule" + ], + "parameters": { + "forwardingRule": { + "description": "Name of the ForwardingRule resource to patch.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/global/forwardingRules/{forwardingRule}", + "request": { + "$ref": "ForwardingRule" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "setLabels": { "description": "Sets the labels on the specified resource. To learn more about labels, read the Labeling Resources documentation.", "httpMethod": "POST", @@ -3998,7 +3950,7 @@ "forwardingRule": { "description": "Name of the ForwardingRule resource in which target is to be set.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -4129,7 +4081,7 @@ "operation": { "description": "Name of the Operations resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -4159,7 +4111,7 @@ "operation": { "description": "Name of the Operations resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -4234,6 +4186,55 @@ }, "healthChecks": { "methods": { + "aggregatedList": { + "description": "Retrieves the list of all HealthCheck resources, regional and global, available to the specified project.", + "httpMethod": "GET", + "id": "compute.healthChecks.aggregatedList", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Name of the project scoping this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + } + }, + "path": "{project}/aggregated/healthChecks", + "response": { + "$ref": "HealthChecksAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, "delete": { "description": "Deletes the specified HealthCheck resource.", "httpMethod": "DELETE", @@ -4246,7 +4247,7 @@ "healthCheck": { "description": "Name of the HealthCheck resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -4284,7 +4285,7 @@ "healthCheck": { "description": "Name of the HealthCheck resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -4400,7 +4401,7 @@ "healthCheck": { "description": "Name of the HealthCheck resource to patch.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -4478,7 +4479,7 @@ "healthCheck": { "description": "Name of the HealthCheck resource to update.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -4523,7 +4524,7 @@ "httpHealthCheck": { "description": "Name of the HttpHealthCheck resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -4561,7 +4562,7 @@ "httpHealthCheck": { "description": "Name of the HttpHealthCheck resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -4677,7 +4678,7 @@ "httpHealthCheck": { "description": "Name of the HttpHealthCheck resource to patch.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -4755,7 +4756,7 @@ "httpHealthCheck": { "description": "Name of the HttpHealthCheck resource to update.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -4800,7 +4801,7 @@ "httpsHealthCheck": { "description": "Name of the HttpsHealthCheck resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -4838,7 +4839,7 @@ "httpsHealthCheck": { "description": "Name of the HttpsHealthCheck resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -4954,7 +4955,7 @@ "httpsHealthCheck": { "description": "Name of the HttpsHealthCheck resource to patch.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -5032,7 +5033,7 @@ "httpsHealthCheck": { "description": "Name of the HttpsHealthCheck resource to update.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -5545,6 +5546,48 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "applyUpdatesToInstances": { + "description": "Apply changes to selected instances on the managed instance group. This method can be used to apply new overrides and/or new versions.", + "httpMethod": "POST", + "id": "compute.instanceGroupManagers.applyUpdatesToInstances", + "parameterOrder": [ + "project", + "zone", + "instanceGroupManager" + ], + "parameters": { + "instanceGroupManager": { + "description": "The name of the managed instance group, should conform to RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "zone": { + "description": "The name of the zone where the managed instance group is located. Should conform to RFC1035.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/applyUpdatesToInstances", + "request": { + "$ref": "InstanceGroupManagersApplyUpdatesRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "delete": { "description": "Deletes the specified managed instance group and all of the instances in that group. Note that the instance group must not belong to a backend service. Read Deleting an instance group for more information.", "httpMethod": "DELETE", @@ -6751,7 +6794,7 @@ "instanceTemplates": { "methods": { "delete": { - "description": "Deletes the specified instance template. Deleting an instance template is permanent and cannot be undone. It's not possible to delete templates which are in use by an instance group.", + "description": "Deletes the specified instance template. Deleting an instance template is permanent and cannot be undone. It is not possible to delete templates that are already in use by a managed instance group.", "httpMethod": "DELETE", "id": "compute.instanceTemplates.delete", "parameterOrder": [ @@ -6762,7 +6805,7 @@ "instanceTemplate": { "description": "The name of the instance template to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -6800,7 +6843,7 @@ "instanceTemplate": { "description": "The name of the instance template.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -6890,7 +6933,7 @@ ] }, "list": { - "description": "Retrieves a list of instance templates that are contained within the specified project and zone.", + "description": "Retrieves a list of instance templates that are contained within the specified project.", "httpMethod": "GET", "id": "compute.instanceTemplates.list", "parameterOrder": [ @@ -7131,7 +7174,7 @@ ], "parameters": { "forceAttach": { - "description": "Whether to force attach the disk even if it's currently attached to another instance. This is only available for regional disks.", + "description": "Whether to force attach the disk even if it's currently attached to another instance.", "location": "query", "type": "boolean" }, @@ -7526,6 +7569,48 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "getShieldedInstanceIdentity": { + "description": "Returns the Shielded Instance Identity of an instance", + "httpMethod": "GET", + "id": "compute.instances.getShieldedInstanceIdentity", + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "parameters": { + "instance": { + "description": "Name or id of the instance scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}/instances/{instance}/getShieldedInstanceIdentity", + "response": { + "$ref": "ShieldedInstanceIdentity" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, "getShieldedVmIdentity": { "description": "Returns the Shielded VM Identity of an instance", "httpMethod": "GET", @@ -7737,7 +7822,7 @@ ] }, "reset": { - "description": "Performs a reset on the instance. For more information, see Resetting an instance.", + "description": "Performs a reset on the instance. This is a hard reset the VM does not do a graceful shutdown. For more information, see Resetting an instance.", "httpMethod": "POST", "id": "compute.instances.reset", "parameterOrder": [ @@ -8331,10 +8416,10 @@ "https://www.googleapis.com/auth/compute" ] }, - "setShieldedVmIntegrityPolicy": { - "description": "Sets the Shielded VM integrity policy for a VM instance. You can only use this method on a running VM instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "setShieldedInstanceIntegrityPolicy": { + "description": "Sets the Shielded Instance integrity policy for an instance. You can only use this method on a running instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", "httpMethod": "PATCH", - "id": "compute.instances.setShieldedVmIntegrityPolicy", + "id": "compute.instances.setShieldedInstanceIntegrityPolicy", "parameterOrder": [ "project", "zone", @@ -8342,7 +8427,7 @@ ], "parameters": { "instance": { - "description": "Name of the instance scoping this request.", + "description": "Name or id of the instance scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -8368,9 +8453,9 @@ "type": "string" } }, - "path": "{project}/zones/{zone}/instances/{instance}/setShieldedVmIntegrityPolicy", + "path": "{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy", "request": { - "$ref": "ShieldedVmIntegrityPolicy" + "$ref": "ShieldedInstanceIntegrityPolicy" }, "response": { "$ref": "Operation" @@ -8380,10 +8465,10 @@ "https://www.googleapis.com/auth/compute" ] }, - "setTags": { - "description": "Sets network tags for the specified instance to the data included in the request.", - "httpMethod": "POST", - "id": "compute.instances.setTags", + "setShieldedVmIntegrityPolicy": { + "description": "Sets the Shielded VM integrity policy for a VM instance. You can only use this method on a running VM instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "httpMethod": "PATCH", + "id": "compute.instances.setShieldedVmIntegrityPolicy", "parameterOrder": [ "project", "zone", @@ -8417,9 +8502,9 @@ "type": "string" } }, - "path": "{project}/zones/{zone}/instances/{instance}/setTags", + "path": "{project}/zones/{zone}/instances/{instance}/setShieldedVmIntegrityPolicy", "request": { - "$ref": "Tags" + "$ref": "ShieldedVmIntegrityPolicy" }, "response": { "$ref": "Operation" @@ -8429,10 +8514,10 @@ "https://www.googleapis.com/auth/compute" ] }, - "simulateMaintenanceEvent": { - "description": "Simulates a maintenance event on the instance.", + "setTags": { + "description": "Sets network tags for the specified instance to the data included in the request.", "httpMethod": "POST", - "id": "compute.instances.simulateMaintenanceEvent", + "id": "compute.instances.setTags", "parameterOrder": [ "project", "zone", @@ -8453,47 +8538,6 @@ "required": true, "type": "string" }, - "zone": { - "description": "The name of the zone for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - } - }, - "path": "{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent", - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "start": { - "description": "Starts an instance that was stopped using the instances().stop method. For more information, see Restart an instance.", - "httpMethod": "POST", - "id": "compute.instances.start", - "parameterOrder": [ - "project", - "zone", - "instance" - ], - "parameters": { - "instance": { - "description": "Name of the instance resource to start.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, "requestId": { "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query", @@ -8507,7 +8551,10 @@ "type": "string" } }, - "path": "{project}/zones/{zone}/instances/{instance}/start", + "path": "{project}/zones/{zone}/instances/{instance}/setTags", + "request": { + "$ref": "Tags" + }, "response": { "$ref": "Operation" }, @@ -8516,10 +8563,10 @@ "https://www.googleapis.com/auth/compute" ] }, - "startWithEncryptionKey": { - "description": "Starts an instance that was stopped using the instances().stop method. For more information, see Restart an instance.", + "simulateMaintenanceEvent": { + "description": "Simulates a maintenance event on the instance.", "httpMethod": "POST", - "id": "compute.instances.startWithEncryptionKey", + "id": "compute.instances.simulateMaintenanceEvent", "parameterOrder": [ "project", "zone", @@ -8527,7 +8574,7 @@ ], "parameters": { "instance": { - "description": "Name of the instance resource to start.", + "description": "Name of the instance scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -8540,11 +8587,6 @@ "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, "zone": { "description": "The name of the zone for this request.", "location": "path", @@ -8553,10 +8595,7 @@ "type": "string" } }, - "path": "{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey", - "request": { - "$ref": "InstancesStartWithEncryptionKeyRequest" - }, + "path": "{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent", "response": { "$ref": "Operation" }, @@ -8565,10 +8604,10 @@ "https://www.googleapis.com/auth/compute" ] }, - "stop": { - "description": "Stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time. Stopped instances do not incur VM usage charges while they are stopped. However, resources that the VM is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted. For more information, see Stopping an instance.", + "start": { + "description": "Starts an instance that was stopped using the instances().stop method. For more information, see Restart an instance.", "httpMethod": "POST", - "id": "compute.instances.stop", + "id": "compute.instances.start", "parameterOrder": [ "project", "zone", @@ -8576,7 +8615,7 @@ ], "parameters": { "instance": { - "description": "Name of the instance resource to stop.", + "description": "Name of the instance resource to start.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -8602,7 +8641,7 @@ "type": "string" } }, - "path": "{project}/zones/{zone}/instances/{instance}/stop", + "path": "{project}/zones/{zone}/instances/{instance}/start", "response": { "$ref": "Operation" }, @@ -8611,23 +8650,118 @@ "https://www.googleapis.com/auth/compute" ] }, - "suspend": { - "description": "This method suspends a running instance, saving its state to persistent storage, and allows you to resume the instance at a later time. Suspended instances incur reduced per-minute, virtual machine usage charges while they are suspended. Any resources the virtual machine is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted.", + "startWithEncryptionKey": { + "description": "Starts an instance that was stopped using the instances().stop method. For more information, see Restart an instance.", "httpMethod": "POST", - "id": "compute.instances.suspend", + "id": "compute.instances.startWithEncryptionKey", "parameterOrder": [ "project", "zone", "instance" ], "parameters": { - "discardLocalSsd": { - "description": "If true, discard the contents of any attached localSSD partitions. Default value is false (== preserve localSSD data).", - "location": "query", - "type": "boolean" - }, "instance": { - "description": "Name of the instance resource to suspend.", + "description": "Name of the instance resource to start.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey", + "request": { + "$ref": "InstancesStartWithEncryptionKeyRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "stop": { + "description": "Stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time. Stopped instances do not incur VM usage charges while they are stopped. However, resources that the VM is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted. For more information, see Stopping an instance.", + "httpMethod": "POST", + "id": "compute.instances.stop", + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "parameters": { + "instance": { + "description": "Name of the instance resource to stop.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}/instances/{instance}/stop", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "suspend": { + "description": "This method suspends a running instance, saving its state to persistent storage, and allows you to resume the instance at a later time. Suspended instances incur reduced per-minute, virtual machine usage charges while they are suspended. Any resources the virtual machine is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted.", + "httpMethod": "POST", + "id": "compute.instances.suspend", + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "parameters": { + "discardLocalSsd": { + "description": "If true, discard the contents of any attached localSSD partitions. Default value is false (== preserve localSSD data).", + "location": "query", + "type": "boolean" + }, + "instance": { + "description": "Name of the instance resource to suspend.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -8868,6 +9002,55 @@ "https://www.googleapis.com/auth/compute" ] }, + "updateShieldedInstanceConfig": { + "description": "Updates the Shielded Instance config for an instance. You can only use this method on a stopped instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "httpMethod": "PATCH", + "id": "compute.instances.updateShieldedInstanceConfig", + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "parameters": { + "instance": { + "description": "Name or id of the instance scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}/instances/{instance}/updateShieldedInstanceConfig", + "request": { + "$ref": "ShieldedInstanceConfig" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "updateShieldedVmConfig": { "description": "Updates the Shielded VM config for a VM instance. You can only use this method on a stopped VM instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", "httpMethod": "PATCH", @@ -8983,7 +9166,7 @@ "interconnectAttachment": { "description": "Name of the interconnect attachment to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -9029,7 +9212,7 @@ "interconnectAttachment": { "description": "Name of the interconnect attachment to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -9169,7 +9352,7 @@ "interconnectAttachment": { "description": "Name of the interconnect attachment to patch.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -9315,7 +9498,7 @@ "interconnectLocation": { "description": "Name of the interconnect location to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -9402,7 +9585,7 @@ "interconnect": { "description": "Name of the interconnect to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -9440,7 +9623,7 @@ "interconnect": { "description": "Name of the interconnect to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -9474,7 +9657,7 @@ "interconnect": { "description": "Name of the interconnect resource to query.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -9590,7 +9773,7 @@ "interconnect": { "description": "Name of the interconnect to update.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -9746,7 +9929,7 @@ "license": { "description": "Name of the license resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -9784,7 +9967,7 @@ "license": { "description": "Name of the License resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -9806,12 +9989,13 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "insert": { - "description": "Create a License resource in the specified project.", - "httpMethod": "POST", - "id": "compute.licenses.insert", + "getIamPolicy": { + "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + "httpMethod": "GET", + "id": "compute.licenses.getIamPolicy", "parameterOrder": [ - "project" + "project", + "resource" ], "parameters": { "project": { @@ -9821,69 +10005,17 @@ "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - } - }, - "path": "{project}/global/licenses", - "request": { - "$ref": "License" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/devstorage.read_write" - ] - }, - "list": { - "description": "Retrieves the list of licenses available in the specified project. This method does not get any licenses that belong to other projects, including licenses attached to publicly-available images, like Debian 9. If you want to get a list of publicly-available licenses, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.", - "httpMethod": "GET", - "id": "compute.licenses.list", - "parameterOrder": [ - "project" - ], - "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - "location": "query", - "type": "string" - }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query", - "type": "string" - }, - "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query", - "type": "string" - }, - "project": { - "description": "Project ID for this request.", + "resource": { + "description": "Name or id of the resource for this request.", "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/global/licenses", + "path": "{project}/global/licenses/{resource}/getIamPolicy", "response": { - "$ref": "LicensesListResponse" + "$ref": "Policy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -9891,13 +10023,12 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + "insert": { + "description": "Create a License resource in the specified project.", "httpMethod": "POST", - "id": "compute.licenses.setIamPolicy", + "id": "compute.licenses.insert", "parameterOrder": [ - "project", - "resource" + "project" ], "parameters": { "project": { @@ -9907,34 +10038,120 @@ "required": true, "type": "string" }, - "resource": { - "description": "Name or id of the resource for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", "type": "string" } }, - "path": "{project}/global/licenses/{resource}/setIamPolicy", + "path": "{project}/global/licenses", "request": { - "$ref": "GlobalSetPolicyRequest" + "$ref": "License" }, "response": { - "$ref": "Policy" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" ] - } - } - }, - "machineTypes": { - "methods": { - "aggregatedList": { - "description": "Retrieves an aggregated list of machine types.", + }, + "list": { + "description": "Retrieves the list of licenses available in the specified project. This method does not get any licenses that belong to other projects, including licenses attached to publicly-available images, like Debian 9. If you want to get a list of publicly-available licenses, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.", "httpMethod": "GET", - "id": "compute.machineTypes.aggregatedList", + "id": "compute.licenses.list", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + } + }, + "path": "{project}/global/licenses", + "response": { + "$ref": "LicensesListResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "setIamPolicy": { + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + "httpMethod": "POST", + "id": "compute.licenses.setIamPolicy", + "parameterOrder": [ + "project", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/global/licenses/{resource}/setIamPolicy", + "request": { + "$ref": "GlobalSetPolicyRequest" + }, + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "machineTypes": { + "methods": { + "aggregatedList": { + "description": "Retrieves an aggregated list of machine types.", + "httpMethod": "GET", + "id": "compute.machineTypes.aggregatedList", "parameterOrder": [ "project" ], @@ -9993,7 +10210,7 @@ "machineType": { "description": "Name of the machine type to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -10754,7 +10971,7 @@ "network": { "description": "Name of the network for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, @@ -11011,7 +11228,7 @@ "nodeGroup": { "description": "Name of the NodeGroup resource.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -11109,7 +11326,7 @@ "nodeGroup": { "description": "Name of the NodeGroup resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -11155,7 +11372,7 @@ "nodeGroup": { "description": "Name of the NodeGroup resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -11204,7 +11421,7 @@ "nodeGroup": { "description": "Name of the node group to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -11407,7 +11624,7 @@ "nodeGroup": { "description": "Name of the NodeGroup resource whose nodes you want to list.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -11501,9 +11718,9 @@ ], "parameters": { "nodeGroup": { - "description": "Name of the NodeGroup resource to delete.", + "description": "Name of the NodeGroup resource to update.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -11650,7 +11867,7 @@ "nodeTemplate": { "description": "Name of the NodeTemplate resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -11696,7 +11913,7 @@ "nodeTemplate": { "description": "Name of the node template to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -12020,7 +12237,7 @@ "nodeType": { "description": "Name of the node type to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -12571,7 +12788,7 @@ "autoscaler": { "description": "Name of the autoscaler to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -12617,7 +12834,7 @@ "autoscaler": { "description": "Name of the autoscaler to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -12756,7 +12973,7 @@ "autoscaler": { "description": "Name of the autoscaler to patch.", "location": "query", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "type": "string" }, "project": { @@ -12848,7 +13065,7 @@ "autoscaler": { "description": "Name of the autoscaler to update.", "location": "query", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "type": "string" }, "project": { @@ -12900,7 +13117,7 @@ "backendService": { "description": "Name of the BackendService resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -12946,7 +13163,7 @@ "backendService": { "description": "Name of the BackendService resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -12988,7 +13205,7 @@ "backendService": { "description": "Name of the BackendService resource for which to get health.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -13224,7 +13441,7 @@ "backendService": { "description": "Name of the BackendService resource to update.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -13326,7 +13543,7 @@ "commitment": { "description": "Name of the commitment to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -13452,6 +13669,55 @@ "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] + }, + "updateReservations": { + "description": "Update the shape of reservations for GPUS/Local SSDs of reservations within the commitments.", + "httpMethod": "POST", + "id": "compute.regionCommitments.updateReservations", + "parameterOrder": [ + "project", + "region", + "commitment" + ], + "parameters": { + "commitment": { + "description": "Name of the commitment of which the reservation's capacities are being updated.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/commitments/{commitment}/updateReservations", + "request": { + "$ref": "RegionCommitmentsUpdateReservationsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] } } }, @@ -13470,7 +13736,7 @@ "diskType": { "description": "Name of the disk type to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -13745,6 +14011,48 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "getIamPolicy": { + "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + "httpMethod": "GET", + "id": "compute.regionDisks.getIamPolicy", + "parameterOrder": [ + "project", + "region", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/disks/{resource}/getIamPolicy", + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, "insert": { "description": "Creates a persistent regional disk in the specified project using the data included in the request.", "httpMethod": "POST", @@ -13946,6 +14254,50 @@ "https://www.googleapis.com/auth/compute" ] }, + "setIamPolicy": { + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + "httpMethod": "POST", + "id": "compute.regionDisks.setIamPolicy", + "parameterOrder": [ + "project", + "region", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/disks/{resource}/setIamPolicy", + "request": { + "$ref": "RegionSetPolicyRequest" + }, + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "setLabels": { "description": "Sets the labels on the target regional disk.", "httpMethod": "POST", @@ -14042,6 +14394,294 @@ } } }, + "regionHealthChecks": { + "methods": { + "delete": { + "description": "Deletes the specified HealthCheck resource.", + "httpMethod": "DELETE", + "id": "compute.regionHealthChecks.delete", + "parameterOrder": [ + "project", + "region", + "healthCheck" + ], + "parameters": { + "healthCheck": { + "description": "Name of the HealthCheck resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/healthChecks/{healthCheck}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Returns the specified HealthCheck resource. Gets a list of available health checks by making a list() request.", + "httpMethod": "GET", + "id": "compute.regionHealthChecks.get", + "parameterOrder": [ + "project", + "region", + "healthCheck" + ], + "parameters": { + "healthCheck": { + "description": "Name of the HealthCheck resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/healthChecks/{healthCheck}", + "response": { + "$ref": "HealthCheck" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a HealthCheck resource in the specified project using the data included in the request.", + "httpMethod": "POST", + "id": "compute.regionHealthChecks.insert", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/healthChecks", + "request": { + "$ref": "HealthCheck" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "description": "Retrieves the list of HealthCheck resources available to the specified project.", + "httpMethod": "GET", + "id": "compute.regionHealthChecks.list", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/healthChecks", + "response": { + "$ref": "HealthCheckList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "patch": { + "description": "Updates a HealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "httpMethod": "PATCH", + "id": "compute.regionHealthChecks.patch", + "parameterOrder": [ + "project", + "region", + "healthCheck" + ], + "parameters": { + "healthCheck": { + "description": "Name of the HealthCheck resource to patch.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/healthChecks/{healthCheck}", + "request": { + "$ref": "HealthCheck" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "update": { + "description": "Updates a HealthCheck resource in the specified project using the data included in the request.", + "httpMethod": "PUT", + "id": "compute.regionHealthChecks.update", + "parameterOrder": [ + "project", + "region", + "healthCheck" + ], + "parameters": { + "healthCheck": { + "description": "Name of the HealthCheck resource to update.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/healthChecks/{healthCheck}", + "request": { + "$ref": "HealthCheck" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, "regionInstanceGroupManagers": { "methods": { "abandonInstances": { @@ -14091,6 +14731,48 @@ "https://www.googleapis.com/auth/compute" ] }, + "applyUpdatesToInstances": { + "description": "Apply updates to selected instances the managed instance group.", + "httpMethod": "POST", + "id": "compute.regionInstanceGroupManagers.applyUpdatesToInstances", + "parameterOrder": [ + "project", + "region", + "instanceGroupManager" + ], + "parameters": { + "instanceGroupManager": { + "description": "The name of the managed instance group, should conform to RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request, should conform to RFC1035.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/applyUpdatesToInstances", + "request": { + "$ref": "RegionInstanceGroupManagersApplyUpdatesRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "delete": { "description": "Deletes the specified managed instance group and all of the instances in that group.", "httpMethod": "DELETE", @@ -15036,7 +15718,7 @@ "operation": { "description": "Name of the Operations resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -15074,7 +15756,7 @@ "operation": { "description": "Name of the Operations resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -15162,15 +15844,16 @@ } } }, - "regions": { + "regionSslCertificates": { "methods": { - "get": { - "description": "Returns the specified Region resource. Gets a list of available regions by making a list() request.", - "httpMethod": "GET", - "id": "compute.regions.get", + "delete": { + "description": "Deletes the specified SslCertificate resource in the region.", + "httpMethod": "DELETE", + "id": "compute.regionSslCertificates.delete", "parameterOrder": [ "project", - "region" + "region", + "sslCertificate" ], "parameters": { "project": { @@ -15181,82 +15864,124 @@ "type": "string" }, "region": { - "description": "Name of the region resource to return.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "sslCertificate": { + "description": "Name of the SslCertificate resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" } }, - "path": "{project}/regions/{region}", + "path": "{project}/regions/{region}/sslCertificates/{sslCertificate}", "response": { - "$ref": "Region" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "list": { - "description": "Retrieves the list of region resources available to the specified project.", + "get": { + "description": "Returns the specified SslCertificate resource in the specified region. Get a list of available SSL certificates by making a list() request.", "httpMethod": "GET", - "id": "compute.regions.list", + "id": "compute.regionSslCertificates.get", "parameterOrder": [ - "project" + "project", + "region", + "sslCertificate" ], "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - "location": "query", + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, "type": "string" }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query", + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, "type": "string" }, - "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query", + "sslCertificate": { + "description": "Name of the SslCertificate resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, "type": "string" - }, + } + }, + "path": "{project}/regions/{region}/sslCertificates/{sslCertificate}", + "response": { + "$ref": "SslCertificate" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a SslCertificate resource in the specified project and region using the data included in the request", + "httpMethod": "POST", + "id": "compute.regionSslCertificates.insert", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { "project": { "description": "Project ID for this request.", "location": "path", "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" } }, - "path": "{project}/regions", + "path": "{project}/regions/{region}/sslCertificates", + "request": { + "$ref": "SslCertificate" + }, "response": { - "$ref": "RegionList" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] - } - } - }, - "resourcePolicies": { - "methods": { - "aggregatedList": { - "description": "Retrieves an aggregated list of resource policies.", + }, + "list": { + "description": "Retrieves the list of SslCertificate resources available to the specified project in the specified region.", "httpMethod": "GET", - "id": "compute.resourcePolicies.aggregatedList", + "id": "compute.regionSslCertificates.list", "parameterOrder": [ - "project" + "project", + "region" ], "parameters": { "filter": { @@ -15288,26 +16013,37 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" } }, - "path": "{project}/aggregated/resourcePolicies", + "path": "{project}/regions/{region}/sslCertificates", "response": { - "$ref": "ResourcePolicyAggregatedList" + "$ref": "SslCertificateList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] - }, + } + } + }, + "regionTargetHttpProxies": { + "methods": { "delete": { - "description": "Deletes the specified resource policy.", + "description": "Deletes the specified TargetHttpProxy resource.", "httpMethod": "DELETE", - "id": "compute.resourcePolicies.delete", + "id": "compute.regionTargetHttpProxies.delete", "parameterOrder": [ "project", "region", - "resourcePolicy" + "targetHttpProxy" ], "parameters": { "project": { @@ -15318,7 +16054,7 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, @@ -15329,15 +16065,15 @@ "location": "query", "type": "string" }, - "resourcePolicy": { - "description": "Name of the resource policy to delete.", + "targetHttpProxy": { + "description": "Name of the TargetHttpProxy resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/regions/{region}/resourcePolicies/{resourcePolicy}", + "path": "{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}", "response": { "$ref": "Operation" }, @@ -15347,13 +16083,13 @@ ] }, "get": { - "description": "Retrieves all information of the specified resource policy.", + "description": "Returns the specified TargetHttpProxy resource in the specified region. Gets a list of available target HTTP proxies by making a list() request.", "httpMethod": "GET", - "id": "compute.resourcePolicies.get", + "id": "compute.regionTargetHttpProxies.get", "parameterOrder": [ "project", "region", - "resourcePolicy" + "targetHttpProxy" ], "parameters": { "project": { @@ -15364,23 +16100,23 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "resourcePolicy": { - "description": "Name of the resource policy to retrieve.", + "targetHttpProxy": { + "description": "Name of the TargetHttpProxy resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/regions/{region}/resourcePolicies/{resourcePolicy}", + "path": "{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}", "response": { - "$ref": "ResourcePolicy" + "$ref": "TargetHttpProxy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -15389,9 +16125,9 @@ ] }, "insert": { - "description": "Creates a new resource policy.", + "description": "Creates a TargetHttpProxy resource in the specified project and region using the data included in the request.", "httpMethod": "POST", - "id": "compute.resourcePolicies.insert", + "id": "compute.regionTargetHttpProxies.insert", "parameterOrder": [ "project", "region" @@ -15405,7 +16141,7 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, @@ -15417,9 +16153,9 @@ "type": "string" } }, - "path": "{project}/regions/{region}/resourcePolicies", + "path": "{project}/regions/{region}/targetHttpProxies", "request": { - "$ref": "ResourcePolicy" + "$ref": "TargetHttpProxy" }, "response": { "$ref": "Operation" @@ -15430,9 +16166,9 @@ ] }, "list": { - "description": "A list all the resource policies that have been configured for the specified project in specified region.", + "description": "Retrieves the list of TargetHttpProxy resources available to the specified project in the specified region.", "httpMethod": "GET", - "id": "compute.resourcePolicies.list", + "id": "compute.regionTargetHttpProxies.list", "parameterOrder": [ "project", "region" @@ -15469,16 +16205,16 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "{project}/regions/{region}/resourcePolicies", + "path": "{project}/regions/{region}/targetHttpProxies", "response": { - "$ref": "ResourcePolicyList" + "$ref": "TargetHttpProxyList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -15486,14 +16222,14 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.", + "setUrlMap": { + "description": "Changes the URL map for TargetHttpProxy.", "httpMethod": "POST", - "id": "compute.resourcePolicies.testIamPermissions", + "id": "compute.regionTargetHttpProxies.setUrlMap", "parameterOrder": [ "project", "region", - "resource" + "targetHttpProxy" ], "parameters": { "project": { @@ -15504,94 +16240,95 @@ "type": "string" }, "region": { - "description": "The name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "resource": { - "description": "Name or id of the resource for this request.", + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetHttpProxy": { + "description": "Name of the TargetHttpProxy to set a URL map for.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/regions/{region}/resourcePolicies/{resource}/testIamPermissions", + "path": "{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}/setUrlMap", "request": { - "$ref": "TestPermissionsRequest" + "$ref": "UrlMapReference" }, "response": { - "$ref": "TestPermissionsResponse" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] } } }, - "routers": { + "regionTargetHttpsProxies": { "methods": { - "aggregatedList": { - "description": "Retrieves an aggregated list of routers.", - "httpMethod": "GET", - "id": "compute.routers.aggregatedList", + "delete": { + "description": "Deletes the specified TargetHttpsProxy resource.", + "httpMethod": "DELETE", + "id": "compute.regionTargetHttpsProxies.delete", "parameterOrder": [ - "project" + "project", + "region", + "targetHttpsProxy" ], "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - "location": "query", + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, "type": "string" }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query", + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, "type": "string" }, - "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" }, - "project": { - "description": "Project ID for this request.", + "targetHttpsProxy": { + "description": "Name of the TargetHttpsProxy resource to delete.", "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/aggregated/routers", + "path": "{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", "response": { - "$ref": "RouterAggregatedList" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "delete": { - "description": "Deletes the specified Router resource.", - "httpMethod": "DELETE", - "id": "compute.routers.delete", + "get": { + "description": "Returns the specified TargetHttpsProxy resource in the specified region. Gets a list of available target HTTP proxies by making a list() request.", + "httpMethod": "GET", + "id": "compute.regionTargetHttpsProxies.get", "parameterOrder": [ "project", "region", - "router" + "targetHttpsProxy" ], "parameters": { "project": { @@ -15602,42 +16339,37 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "router": { - "description": "Name of the Router resource to delete.", + "targetHttpsProxy": { + "description": "Name of the TargetHttpsProxy resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/regions/{region}/routers/{router}", + "path": "{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", "response": { - "$ref": "Operation" + "$ref": "TargetHttpsProxy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "get": { - "description": "Returns the specified Router resource. Gets a list of available routers by making a list() request.", - "httpMethod": "GET", - "id": "compute.routers.get", + "insert": { + "description": "Creates a TargetHttpsProxy resource in the specified project and region using the data included in the request.", + "httpMethod": "POST", + "id": "compute.regionTargetHttpsProxies.insert", "parameterOrder": [ "project", - "region", - "router" + "region" ], "parameters": { "project": { @@ -15648,38 +16380,37 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "router": { - "description": "Name of the Router resource to return.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", "type": "string" } }, - "path": "{project}/regions/{region}/routers/{router}", + "path": "{project}/regions/{region}/targetHttpsProxies", + "request": { + "$ref": "TargetHttpsProxy" + }, "response": { - "$ref": "Router" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "getNatMappingInfo": { - "description": "Retrieves runtime Nat mapping information of VM endpoints.", + "list": { + "description": "Retrieves the list of TargetHttpsProxy resources available to the specified project in the specified region.", "httpMethod": "GET", - "id": "compute.routers.getNatMappingInfo", + "id": "compute.regionTargetHttpsProxies.list", "parameterOrder": [ "project", - "region", - "router" + "region" ], "parameters": { "filter": { @@ -15713,23 +16444,16 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, - "router": { - "description": "Name of the Router resource to query for Nat Mapping information of VM endpoints.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "{project}/regions/{region}/routers/{router}/getNatMappingInfo", + "path": "{project}/regions/{region}/targetHttpsProxies", "response": { - "$ref": "VmEndpointNatMappingsList" + "$ref": "TargetHttpsProxyList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -15737,14 +16461,14 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "getRouterStatus": { - "description": "Retrieves runtime information of the specified router.", - "httpMethod": "GET", - "id": "compute.routers.getRouterStatus", + "setSslCertificates": { + "description": "Replaces SslCertificates for TargetHttpsProxy.", + "httpMethod": "POST", + "id": "compute.regionTargetHttpsProxies.setSslCertificates", "parameterOrder": [ "project", "region", - "router" + "targetHttpsProxy" ], "parameters": { "project": { @@ -15755,37 +16479,45 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "router": { - "description": "Name of the Router resource to query.", + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetHttpsProxy": { + "description": "Name of the TargetHttpsProxy resource to set an SslCertificates resource for.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/regions/{region}/routers/{router}/getRouterStatus", + "path": "{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates", + "request": { + "$ref": "RegionTargetHttpsProxiesSetSslCertificatesRequest" + }, "response": { - "$ref": "RouterStatusResponse" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "insert": { - "description": "Creates a Router resource in the specified project and region using the data included in the request.", + "setUrlMap": { + "description": "Changes the URL map for TargetHttpsProxy.", "httpMethod": "POST", - "id": "compute.routers.insert", + "id": "compute.regionTargetHttpsProxies.setUrlMap", "parameterOrder": [ "project", - "region" + "region", + "targetHttpsProxy" ], "parameters": { "project": { @@ -15796,7 +16528,7 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, @@ -15806,11 +16538,18 @@ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" + }, + "targetHttpsProxy": { + "description": "Name of the TargetHttpsProxy to set a URL map for.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" } }, - "path": "{project}/regions/{region}/routers", + "path": "{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap", "request": { - "$ref": "Router" + "$ref": "UrlMapReference" }, "response": { "$ref": "Operation" @@ -15819,39 +16558,67 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] - }, - "list": { - "description": "Retrieves a list of Router resources available to the specified project.", - "httpMethod": "GET", - "id": "compute.routers.list", + } + } + }, + "regionUrlMaps": { + "methods": { + "delete": { + "description": "Deletes the specified UrlMap resource.", + "httpMethod": "DELETE", + "id": "compute.regionUrlMaps.delete", "parameterOrder": [ "project", - "region" + "region", + "urlMap" ], "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - "location": "query", + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, "type": "string" }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query", + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, "type": "string" }, - "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "requestId": { + "description": "begin_interface: MixerMutationRequestBuilder Request ID to support idempotency.", "location": "query", "type": "string" }, + "urlMap": { + "description": "Name of the UrlMap resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/urlMaps/{urlMap}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Returns the specified UrlMap resource. Gets a list of available URL maps by making a list() request.", + "httpMethod": "GET", + "id": "compute.regionUrlMaps.get", + "parameterOrder": [ + "project", + "region", + "urlMap" + ], + "parameters": { "project": { "description": "Project ID for this request.", "location": "path", @@ -15860,16 +16627,23 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "urlMap": { + "description": "Name of the UrlMap resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" } }, - "path": "{project}/regions/{region}/routers", + "path": "{project}/regions/{region}/urlMaps/{urlMap}", "response": { - "$ref": "RouterList" + "$ref": "UrlMap" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -15877,14 +16651,13 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "patch": { - "description": "Patches the specified Router resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", - "httpMethod": "PATCH", - "id": "compute.routers.patch", + "insert": { + "description": "Creates a UrlMap resource in the specified project using the data included in the request.", + "httpMethod": "POST", + "id": "compute.regionUrlMaps.insert", "parameterOrder": [ "project", - "region", - "router" + "region" ], "parameters": { "project": { @@ -15895,28 +16668,21 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "description": "begin_interface: MixerMutationRequestBuilder Request ID to support idempotency.", "location": "query", "type": "string" - }, - "router": { - "description": "Name of the Router resource to patch.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" } }, - "path": "{project}/regions/{region}/routers/{router}", + "path": "{project}/regions/{region}/urlMaps", "request": { - "$ref": "Router" + "$ref": "UrlMap" }, "response": { "$ref": "Operation" @@ -15926,14 +16692,14 @@ "https://www.googleapis.com/auth/compute" ] }, - "preview": { - "description": "Preview fields auto-generated during router create and update operations. Calling this method does NOT create or update the router.", + "invalidateCache": { + "description": "Initiates a cache invalidation operation, invalidating the specified path, scoped to the specified UrlMap.", "httpMethod": "POST", - "id": "compute.routers.preview", + "id": "compute.regionUrlMaps.invalidateCache", "parameterOrder": [ "project", "region", - "router" + "urlMap" ], "parameters": { "project": { @@ -15944,43 +16710,69 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "router": { - "description": "Name of the Router resource to query.", + "requestId": { + "description": "begin_interface: MixerMutationRequestBuilder Request ID to support idempotency.", + "location": "query", + "type": "string" + }, + "urlMap": { + "description": "Name of the UrlMap scoping this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/regions/{region}/routers/{router}/preview", + "path": "{project}/regions/{region}/urlMaps/{urlMap}/invalidateCache", "request": { - "$ref": "Router" + "$ref": "CacheInvalidationRule" }, "response": { - "$ref": "RoutersPreviewResponse" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.", - "httpMethod": "POST", - "id": "compute.routers.testIamPermissions", + "list": { + "description": "Retrieves the list of UrlMap resources available to the specified project in the specified region.", + "httpMethod": "GET", + "id": "compute.regionUrlMaps.list", "parameterOrder": [ "project", - "region", - "resource" + "region" ], "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -15989,26 +16781,16 @@ "type": "string" }, "region": { - "description": "The name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" - }, - "resource": { - "description": "Name or id of the resource for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" } }, - "path": "{project}/regions/{region}/routers/{resource}/testIamPermissions", - "request": { - "$ref": "TestPermissionsRequest" - }, + "path": "{project}/regions/{region}/urlMaps", "response": { - "$ref": "TestPermissionsResponse" + "$ref": "UrlMapList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -16016,14 +16798,14 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "update": { - "description": "Updates the specified Router resource with the data included in the request.", - "httpMethod": "PUT", - "id": "compute.routers.update", + "patch": { + "description": "Patches the specified UrlMap resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", + "httpMethod": "PATCH", + "id": "compute.regionUrlMaps.patch", "parameterOrder": [ "project", "region", - "router" + "urlMap" ], "parameters": { "project": { @@ -16034,28 +16816,28 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "description": "begin_interface: MixerMutationRequestBuilder Request ID to support idempotency.", "location": "query", "type": "string" }, - "router": { - "description": "Name of the Router resource to update.", + "urlMap": { + "description": "Name of the UrlMap resource to patch.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/regions/{region}/routers/{router}", + "path": "{project}/regions/{region}/urlMaps/{urlMap}", "request": { - "$ref": "Router" + "$ref": "UrlMap" }, "response": { "$ref": "Operation" @@ -16064,18 +16846,15 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] - } - } - }, - "routes": { - "methods": { - "delete": { - "description": "Deletes the specified Route resource.", - "httpMethod": "DELETE", - "id": "compute.routes.delete", + }, + "update": { + "description": "Updates the specified UrlMap resource with the data included in the request.", + "httpMethod": "PUT", + "id": "compute.regionUrlMaps.update", "parameterOrder": [ "project", - "route" + "region", + "urlMap" ], "parameters": { "project": { @@ -16085,20 +16864,30 @@ "required": true, "type": "string" }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "description": "begin_interface: MixerMutationRequestBuilder Request ID to support idempotency.", "location": "query", "type": "string" }, - "route": { - "description": "Name of the Route resource to delete.", + "urlMap": { + "description": "Name of the UrlMap resource to update.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/global/routes/{route}", + "path": "{project}/regions/{region}/urlMaps/{urlMap}", + "request": { + "$ref": "UrlMap" + }, "response": { "$ref": "Operation" }, @@ -16107,13 +16896,14 @@ "https://www.googleapis.com/auth/compute" ] }, - "get": { - "description": "Returns the specified Route resource. Gets a list of available routes by making a list() request.", - "httpMethod": "GET", - "id": "compute.routes.get", + "validate": { + "description": "Runs static validation for the UrlMap. In particular, the tests of the provided UrlMap will be run. Calling this method does NOT create the UrlMap.", + "httpMethod": "POST", + "id": "compute.regionUrlMaps.validate", "parameterOrder": [ "project", - "route" + "region", + "urlMap" ], "parameters": { "project": { @@ -16123,30 +16913,44 @@ "required": true, "type": "string" }, - "route": { - "description": "Name of the Route resource to return.", + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "urlMap": { + "description": "Name of the UrlMap resource to be validated as.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/global/routes/{route}", + "path": "{project}/regions/{region}/urlMaps/{urlMap}/validate", + "request": { + "$ref": "RegionUrlMapsValidateRequest" + }, "response": { - "$ref": "Route" + "$ref": "UrlMapsValidateResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] - }, - "insert": { - "description": "Creates a Route resource in the specified project using the data included in the request.", - "httpMethod": "POST", - "id": "compute.routes.insert", + } + } + }, + "regions": { + "methods": { + "get": { + "description": "Returns the specified Region resource. Gets a list of available regions by making a list() request.", + "httpMethod": "GET", + "id": "compute.regions.get", "parameterOrder": [ - "project" + "project", + "region" ], "parameters": { "project": { @@ -16156,28 +16960,28 @@ "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query", + "region": { + "description": "Name of the region resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, "type": "string" } }, - "path": "{project}/global/routes", - "request": { - "$ref": "Route" - }, + "path": "{project}/regions/{region}", "response": { - "$ref": "Operation" + "$ref": "Region" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, "list": { - "description": "Retrieves the list of Route resources available to the specified project.", + "description": "Retrieves the list of region resources available to the specified project.", "httpMethod": "GET", - "id": "compute.routes.list", + "id": "compute.regions.list", "parameterOrder": [ "project" ], @@ -16213,46 +17017,9 @@ "type": "string" } }, - "path": "{project}/global/routes", - "response": { - "$ref": "RouteList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.", - "httpMethod": "POST", - "id": "compute.routes.testIamPermissions", - "parameterOrder": [ - "project", - "resource" - ], - "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "resource": { - "description": "Name or id of the resource for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - } - }, - "path": "{project}/global/routes/{resource}/testIamPermissions", - "request": { - "$ref": "TestPermissionsRequest" - }, + "path": "{project}/regions", "response": { - "$ref": "TestPermissionsResponse" + "$ref": "RegionList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -16262,56 +17029,65 @@ } } }, - "securityPolicies": { + "reservations": { "methods": { - "addRule": { - "description": "Inserts a rule into a security policy.", - "httpMethod": "POST", - "id": "compute.securityPolicies.addRule", + "aggregatedList": { + "description": "Retrieves an aggregated list of reservations.", + "httpMethod": "GET", + "id": "compute.reservations.aggregatedList", "parameterOrder": [ - "project", - "securityPolicy" + "project" ], "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" - }, - "securityPolicy": { - "description": "Name of the security policy to update.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, - "validateOnly": { - "description": "If true, the request will not be committed.", - "location": "query", - "type": "boolean" } }, - "path": "{project}/global/securityPolicies/{securityPolicy}/addRule", - "request": { - "$ref": "SecurityPolicyRule" - }, + "path": "{project}/aggregated/reservations", "response": { - "$ref": "Operation" + "$ref": "ReservationAggregatedList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, "delete": { - "description": "Deletes the specified policy.", + "description": "Deletes the specified reservation.", "httpMethod": "DELETE", - "id": "compute.securityPolicies.delete", + "id": "compute.reservations.delete", "parameterOrder": [ "project", - "securityPolicy" + "zone", + "reservation" ], "parameters": { "project": { @@ -16326,15 +17102,22 @@ "location": "query", "type": "string" }, - "securityPolicy": { - "description": "Name of the security policy to delete.", + "reservation": { + "description": "Name of the reservation to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "zone": { + "description": "Name of the zone for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "{project}/global/securityPolicies/{securityPolicy}", + "path": "{project}/zones/{zone}/reservations/{reservation}", "response": { "$ref": "Operation" }, @@ -16344,12 +17127,13 @@ ] }, "get": { - "description": "List all of the ordered rules present in a single specified policy.", + "description": "Retrieves all information of the specified reservation.", "httpMethod": "GET", - "id": "compute.securityPolicies.get", + "id": "compute.reservations.get", "parameterOrder": [ "project", - "securityPolicy" + "zone", + "reservation" ], "parameters": { "project": { @@ -16359,17 +17143,24 @@ "required": true, "type": "string" }, - "securityPolicy": { - "description": "Name of the security policy to get.", + "reservation": { + "description": "Name of the reservation to retrieve.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "zone": { + "description": "Name of the zone for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "{project}/global/securityPolicies/{securityPolicy}", + "path": "{project}/zones/{zone}/reservations/{reservation}", "response": { - "$ref": "SecurityPolicy" + "$ref": "Reservation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -16377,21 +17168,16 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "getRule": { - "description": "Gets a rule at the specified priority.", + "getIamPolicy": { + "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", "httpMethod": "GET", - "id": "compute.securityPolicies.getRule", + "id": "compute.reservations.getIamPolicy", "parameterOrder": [ "project", - "securityPolicy" + "zone", + "resource" ], "parameters": { - "priority": { - "description": "The priority of the rule to get from the security policy.", - "format": "int32", - "location": "query", - "type": "integer" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -16399,17 +17185,24 @@ "required": true, "type": "string" }, - "securityPolicy": { - "description": "Name of the security policy to which the queried rule belongs.", + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "{project}/global/securityPolicies/{securityPolicy}/getRule", + "path": "{project}/zones/{zone}/reservations/{resource}/getIamPolicy", "response": { - "$ref": "SecurityPolicyRule" + "$ref": "Policy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -16418,12 +17211,13 @@ ] }, "insert": { - "description": "Creates a new policy in the specified project using the data included in the request.", + "description": "Creates a new reservation.", "httpMethod": "POST", - "id": "compute.securityPolicies.insert", + "id": "compute.reservations.insert", "parameterOrder": [ - "project" - ], + "project", + "zone" + ], "parameters": { "project": { "description": "Project ID for this request.", @@ -16437,15 +17231,17 @@ "location": "query", "type": "string" }, - "validateOnly": { - "description": "If true, the request will not be committed.", - "location": "query", - "type": "boolean" + "zone": { + "description": "Name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" } }, - "path": "{project}/global/securityPolicies", + "path": "{project}/zones/{zone}/reservations", "request": { - "$ref": "SecurityPolicy" + "$ref": "Reservation" }, "response": { "$ref": "Operation" @@ -16456,11 +17252,12 @@ ] }, "list": { - "description": "List all the policies that have been configured for the specified project.", + "description": "A list all the reservations that have been configured for the specified project in specified zone.", "httpMethod": "GET", - "id": "compute.securityPolicies.list", + "id": "compute.reservations.list", "parameterOrder": [ - "project" + "project", + "zone" ], "parameters": { "filter": { @@ -16492,73 +17289,33 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" - } - }, - "path": "{project}/global/securityPolicies", - "response": { - "$ref": "SecurityPolicyList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "listPreconfiguredExpressionSets": { - "description": "Gets the current list of preconfigured Web Application Firewall (WAF) expressions.", - "httpMethod": "GET", - "id": "compute.securityPolicies.listPreconfiguredExpressionSets", - "parameterOrder": [ - "project" - ], - "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - "location": "query", - "type": "string" - }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query", - "type": "string" - }, - "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query", - "type": "string" }, - "project": { - "description": "Project ID for this request.", + "zone": { + "description": "Name of the zone for this request.", "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "{project}/global/securityPolicies/listPreconfiguredExpressionSets", + "path": "{project}/zones/{zone}/reservations", "response": { - "$ref": "SecurityPoliciesListPreconfiguredExpressionSetsResponse" + "$ref": "ReservationList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "patch": { - "description": "Patches the specified policy with the data included in the request.", - "httpMethod": "PATCH", - "id": "compute.securityPolicies.patch", + "resize": { + "description": "Resizes the reservation (applicable to standalone reservations only)", + "httpMethod": "POST", + "id": "compute.reservations.resize", "parameterOrder": [ "project", - "securityPolicy" + "zone", + "reservation" ], "parameters": { "project": { @@ -16573,64 +17330,24 @@ "location": "query", "type": "string" }, - "securityPolicy": { - "description": "Name of the security policy to update.", + "reservation": { + "description": "Name of the reservation to update.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" - } - }, - "path": "{project}/global/securityPolicies/{securityPolicy}", - "request": { - "$ref": "SecurityPolicy" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "patchRule": { - "description": "Patches a rule at the specified priority.", - "httpMethod": "POST", - "id": "compute.securityPolicies.patchRule", - "parameterOrder": [ - "project", - "securityPolicy" - ], - "parameters": { - "priority": { - "description": "The priority of the rule to patch.", - "format": "int32", - "location": "query", - "type": "integer" - }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" }, - "securityPolicy": { - "description": "Name of the security policy to update.", + "zone": { + "description": "Name of the zone for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" - }, - "validateOnly": { - "description": "If true, the request will not be committed.", - "location": "query", - "type": "boolean" } }, - "path": "{project}/global/securityPolicies/{securityPolicy}/patchRule", + "path": "{project}/zones/{zone}/reservations/{reservation}/resize", "request": { - "$ref": "SecurityPolicyRule" + "$ref": "ReservationsResizeRequest" }, "response": { "$ref": "Operation" @@ -16640,21 +17357,16 @@ "https://www.googleapis.com/auth/compute" ] }, - "removeRule": { - "description": "Deletes a rule at the specified priority.", + "setIamPolicy": { + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", "httpMethod": "POST", - "id": "compute.securityPolicies.removeRule", + "id": "compute.reservations.setIamPolicy", "parameterOrder": [ "project", - "securityPolicy" + "zone", + "resource" ], "parameters": { - "priority": { - "description": "The priority of the rule to remove from the security policy.", - "format": "int32", - "location": "query", - "type": "integer" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -16662,29 +17374,40 @@ "required": true, "type": "string" }, - "securityPolicy": { - "description": "Name of the security policy to update.", + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "{project}/global/securityPolicies/{securityPolicy}/removeRule", + "path": "{project}/zones/{zone}/reservations/{resource}/setIamPolicy", + "request": { + "$ref": "ZoneSetPolicyRequest" + }, "response": { - "$ref": "Operation" + "$ref": "Policy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] }, - "setLabels": { - "description": "Sets the labels on a security policy. To learn more about labels, read the Labeling Resources documentation.", + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", "httpMethod": "POST", - "id": "compute.securityPolicies.setLabels", + "id": "compute.reservations.testIamPermissions", "parameterOrder": [ "project", + "zone", "resource" ], "parameters": { @@ -16698,71 +17421,92 @@ "resource": { "description": "Name or id of the resource for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "{project}/global/securityPolicies/{resource}/setLabels", + "path": "{project}/zones/{zone}/reservations/{resource}/testIamPermissions", "request": { - "$ref": "GlobalSetLabelsRequest" + "$ref": "TestPermissionsRequest" }, "response": { - "$ref": "Operation" + "$ref": "TestPermissionsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] - }, - "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.", - "httpMethod": "POST", - "id": "compute.securityPolicies.testIamPermissions", + } + } + }, + "resourcePolicies": { + "methods": { + "aggregatedList": { + "description": "Retrieves an aggregated list of resource policies.", + "httpMethod": "GET", + "id": "compute.resourcePolicies.aggregatedList", "parameterOrder": [ - "project", - "resource" + "project" ], "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" - }, - "resource": { - "description": "Name or id of the resource for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" } }, - "path": "{project}/global/securityPolicies/{resource}/testIamPermissions", - "request": { - "$ref": "TestPermissionsRequest" - }, + "path": "{project}/aggregated/resourcePolicies", "response": { - "$ref": "TestPermissionsResponse" + "$ref": "ResourcePolicyAggregatedList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] - } - } - }, - "snapshots": { - "methods": { + }, "delete": { - "description": "Deletes the specified Snapshot resource. Keep in mind that deleting a single snapshot might not necessarily delete all the data on that snapshot. If any data on the snapshot that is marked for deletion is needed for subsequent snapshots, the data will be moved to the next corresponding snapshot.\n\nFor more information, see Deleting snapshots.", + "description": "Deletes the specified resource policy.", "httpMethod": "DELETE", - "id": "compute.snapshots.delete", + "id": "compute.resourcePolicies.delete", "parameterOrder": [ "project", - "snapshot" + "region", + "resourcePolicy" ], "parameters": { "project": { @@ -16772,20 +17516,27 @@ "required": true, "type": "string" }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, "requestId": { "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" }, - "snapshot": { - "description": "Name of the Snapshot resource to delete.", + "resourcePolicy": { + "description": "Name of the resource policy to delete.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/global/snapshots/{snapshot}", + "path": "{project}/regions/{region}/resourcePolicies/{resourcePolicy}", "response": { "$ref": "Operation" }, @@ -16795,12 +17546,13 @@ ] }, "get": { - "description": "Returns the specified Snapshot resource. Gets a list of available snapshots by making a list() request.", + "description": "Retrieves all information of the specified resource policy.", "httpMethod": "GET", - "id": "compute.snapshots.get", + "id": "compute.resourcePolicies.get", "parameterOrder": [ "project", - "snapshot" + "region", + "resourcePolicy" ], "parameters": { "project": { @@ -16810,17 +17562,24 @@ "required": true, "type": "string" }, - "snapshot": { - "description": "Name of the Snapshot resource to return.", + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "resourcePolicy": { + "description": "Name of the resource policy to retrieve.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/global/snapshots/{snapshot}", + "path": "{project}/regions/{region}/resourcePolicies/{resourcePolicy}", "response": { - "$ref": "Snapshot" + "$ref": "ResourcePolicy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -16828,13 +17587,13 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "getIamPolicy": { - "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", - "httpMethod": "GET", - "id": "compute.snapshots.getIamPolicy", + "insert": { + "description": "Creates a new resource policy.", + "httpMethod": "POST", + "id": "compute.resourcePolicies.insert", "parameterOrder": [ "project", - "resource" + "region" ], "parameters": { "project": { @@ -16844,30 +17603,38 @@ "required": true, "type": "string" }, - "resource": { - "description": "Name or id of the resource for this request.", + "region": { + "description": "Name of the region for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" } }, - "path": "{project}/global/snapshots/{resource}/getIamPolicy", + "path": "{project}/regions/{region}/resourcePolicies", + "request": { + "$ref": "ResourcePolicy" + }, "response": { - "$ref": "Policy" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "list": { - "description": "Retrieves the list of Snapshot resources contained within the specified project.", + "description": "A list all the resource policies that have been configured for the specified project in specified region.", "httpMethod": "GET", - "id": "compute.snapshots.list", + "id": "compute.resourcePolicies.list", "parameterOrder": [ - "project" + "project", + "region" ], "parameters": { "filter": { @@ -16899,11 +17666,18 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" } }, - "path": "{project}/global/snapshots", + "path": "{project}/regions/{region}/resourcePolicies", "response": { - "$ref": "SnapshotList" + "$ref": "ResourcePolicyList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -16911,12 +17685,13 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", "httpMethod": "POST", - "id": "compute.snapshots.setIamPolicy", + "id": "compute.resourcePolicies.testIamPermissions", "parameterOrder": [ "project", + "region", "resource" ], "parameters": { @@ -16927,110 +17702,95 @@ "required": true, "type": "string" }, - "resource": { - "description": "Name or id of the resource for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - } - }, - "path": "{project}/global/snapshots/{resource}/setIamPolicy", - "request": { - "$ref": "GlobalSetPolicyRequest" - }, - "response": { - "$ref": "Policy" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "setLabels": { - "description": "Sets the labels on a snapshot. To learn more about labels, read the Labeling Resources documentation.", - "httpMethod": "POST", - "id": "compute.snapshots.setLabels", - "parameterOrder": [ - "project", - "resource" - ], - "parameters": { - "project": { - "description": "Project ID for this request.", + "region": { + "description": "The name of the region for this request.", "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, "resource": { "description": "Name or id of the resource for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/global/snapshots/{resource}/setLabels", + "path": "{project}/regions/{region}/resourcePolicies/{resource}/testIamPermissions", "request": { - "$ref": "GlobalSetLabelsRequest" + "$ref": "TestPermissionsRequest" }, "response": { - "$ref": "Operation" + "$ref": "TestPermissionsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] - }, - "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.", - "httpMethod": "POST", - "id": "compute.snapshots.testIamPermissions", + } + } + }, + "routers": { + "methods": { + "aggregatedList": { + "description": "Retrieves an aggregated list of routers.", + "httpMethod": "GET", + "id": "compute.routers.aggregatedList", "parameterOrder": [ - "project", - "resource" + "project" ], "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" - }, - "resource": { - "description": "Name or id of the resource for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" } }, - "path": "{project}/global/snapshots/{resource}/testIamPermissions", - "request": { - "$ref": "TestPermissionsRequest" - }, + "path": "{project}/aggregated/routers", "response": { - "$ref": "TestPermissionsResponse" + "$ref": "RouterAggregatedList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] - } - } - }, - "sslCertificates": { - "methods": { + }, "delete": { - "description": "Deletes the specified SslCertificate resource.", + "description": "Deletes the specified Router resource.", "httpMethod": "DELETE", - "id": "compute.sslCertificates.delete", + "id": "compute.routers.delete", "parameterOrder": [ "project", - "sslCertificate" + "region", + "router" ], "parameters": { "project": { @@ -17040,20 +17800,27 @@ "required": true, "type": "string" }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, "requestId": { "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" }, - "sslCertificate": { - "description": "Name of the SslCertificate resource to delete.", + "router": { + "description": "Name of the Router resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/global/sslCertificates/{sslCertificate}", + "path": "{project}/regions/{region}/routers/{router}", "response": { "$ref": "Operation" }, @@ -17063,12 +17830,13 @@ ] }, "get": { - "description": "Returns the specified SslCertificate resource. Gets a list of available SSL certificates by making a list() request.", + "description": "Returns the specified Router resource. Gets a list of available routers by making a list() request.", "httpMethod": "GET", - "id": "compute.sslCertificates.get", + "id": "compute.routers.get", "parameterOrder": [ "project", - "sslCertificate" + "region", + "router" ], "parameters": { "project": { @@ -17078,17 +17846,24 @@ "required": true, "type": "string" }, - "sslCertificate": { - "description": "Name of the SslCertificate resource to return.", + "region": { + "description": "Name of the region for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "router": { + "description": "Name of the Router resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" } }, - "path": "{project}/global/sslCertificates/{sslCertificate}", + "path": "{project}/regions/{region}/routers/{router}", "response": { - "$ref": "SslCertificate" + "$ref": "Router" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -17096,50 +17871,19 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "insert": { - "description": "Creates a SslCertificate resource in the specified project using the data included in the request.", - "httpMethod": "POST", - "id": "compute.sslCertificates.insert", + "getNatMappingInfo": { + "description": "Retrieves runtime Nat mapping information of VM endpoints.", + "httpMethod": "GET", + "id": "compute.routers.getNatMappingInfo", "parameterOrder": [ - "project" + "project", + "region", + "router" ], "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - } - }, - "path": "{project}/global/sslCertificates", - "request": { - "$ref": "SslCertificate" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "list": { - "description": "Retrieves the list of SslCertificate resources available to the specified project.", - "httpMethod": "GET", - "id": "compute.sslCertificates.list", - "parameterOrder": [ - "project" - ], - "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - "location": "query", + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", "type": "string" }, "maxResults": { @@ -17166,66 +17910,40 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" - } - }, - "path": "{project}/global/sslCertificates", - "response": { - "$ref": "SslCertificateList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.", - "httpMethod": "POST", - "id": "compute.sslCertificates.testIamPermissions", - "parameterOrder": [ - "project", - "resource" - ], - "parameters": { - "project": { - "description": "Project ID for this request.", + }, + "region": { + "description": "Name of the region for this request.", "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "resource": { - "description": "Name or id of the resource for this request.", + "router": { + "description": "Name of the Router resource to query for Nat Mapping information of VM endpoints.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/global/sslCertificates/{resource}/testIamPermissions", - "request": { - "$ref": "TestPermissionsRequest" - }, + "path": "{project}/regions/{region}/routers/{router}/getNatMappingInfo", "response": { - "$ref": "TestPermissionsResponse" + "$ref": "VmEndpointNatMappingsList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] - } - } - }, - "sslPolicies": { - "methods": { - "delete": { - "description": "Deletes the specified SSL policy. The SSL policy resource can be deleted only if it is not in use by any TargetHttpsProxy or TargetSslProxy resources.", - "httpMethod": "DELETE", - "id": "compute.sslPolicies.delete", + }, + "getRouterStatus": { + "description": "Retrieves runtime information of the specified router.", + "httpMethod": "GET", + "id": "compute.routers.getRouterStatus", "parameterOrder": [ "project", - "sslPolicy" + "region", + "router" ], "parameters": { "project": { @@ -17235,53 +17953,24 @@ "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "sslPolicy": { - "description": "Name of the SSL policy to delete. The name must be 1-63 characters long, and comply with RFC1035.", - "location": "path", - "required": true, - "type": "string" - } - }, - "path": "{project}/global/sslPolicies/{sslPolicy}", - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "description": "Lists all of the ordered rules present in a single specified policy.", - "httpMethod": "GET", - "id": "compute.sslPolicies.get", - "parameterOrder": [ - "project", - "sslPolicy" - ], - "parameters": { - "project": { - "description": "Project ID for this request.", + "region": { + "description": "Name of the region for this request.", "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "sslPolicy": { - "description": "Name of the SSL policy to update. The name must be 1-63 characters long, and comply with RFC1035.", + "router": { + "description": "Name of the Router resource to query.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/global/sslPolicies/{sslPolicy}", + "path": "{project}/regions/{region}/routers/{router}/getRouterStatus", "response": { - "$ref": "SslPolicy" + "$ref": "RouterStatusResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -17290,11 +17979,12 @@ ] }, "insert": { - "description": "Returns the specified SSL policy resource. Gets a list of available SSL policies by making a list() request.", + "description": "Creates a Router resource in the specified project and region using the data included in the request.", "httpMethod": "POST", - "id": "compute.sslPolicies.insert", + "id": "compute.routers.insert", "parameterOrder": [ - "project" + "project", + "region" ], "parameters": { "project": { @@ -17304,15 +17994,22 @@ "required": true, "type": "string" }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, "requestId": { "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" } }, - "path": "{project}/global/sslPolicies", + "path": "{project}/regions/{region}/routers", "request": { - "$ref": "SslPolicy" + "$ref": "Router" }, "response": { "$ref": "Operation" @@ -17323,11 +18020,12 @@ ] }, "list": { - "description": "Lists all the SSL policies that have been configured for the specified project.", + "description": "Retrieves a list of Router resources available to the specified project.", "httpMethod": "GET", - "id": "compute.sslPolicies.list", + "id": "compute.routers.list", "parameterOrder": [ - "project" + "project", + "region" ], "parameters": { "filter": { @@ -17359,60 +18057,18 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" - } - }, - "path": "{project}/global/sslPolicies", - "response": { - "$ref": "SslPoliciesList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "listAvailableFeatures": { - "description": "Lists all features that can be specified in the SSL policy when using custom profile.", - "httpMethod": "GET", - "id": "compute.sslPolicies.listAvailableFeatures", - "parameterOrder": [ - "project" - ], - "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - "location": "query", - "type": "string" - }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query", - "type": "string" - }, - "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query", - "type": "string" }, - "project": { - "description": "Project ID for this request.", + "region": { + "description": "Name of the region for this request.", "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "{project}/global/sslPolicies/listAvailableFeatures", + "path": "{project}/regions/{region}/routers", "response": { - "$ref": "SslPoliciesListAvailableFeaturesResponse" + "$ref": "RouterList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -17421,12 +18077,13 @@ ] }, "patch": { - "description": "Patches the specified SSL policy with the data included in the request.", + "description": "Patches the specified Router resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", "httpMethod": "PATCH", - "id": "compute.sslPolicies.patch", + "id": "compute.routers.patch", "parameterOrder": [ "project", - "sslPolicy" + "region", + "router" ], "parameters": { "project": { @@ -17436,21 +18093,29 @@ "required": true, "type": "string" }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, "requestId": { "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" }, - "sslPolicy": { - "description": "Name of the SSL policy to update. The name must be 1-63 characters long, and comply with RFC1035.", + "router": { + "description": "Name of the Router resource to patch.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/global/sslPolicies/{sslPolicy}", + "path": "{project}/regions/{region}/routers/{router}", "request": { - "$ref": "SslPolicy" + "$ref": "Router" }, "response": { "$ref": "Operation" @@ -17460,13 +18125,14 @@ "https://www.googleapis.com/auth/compute" ] }, - "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.", + "preview": { + "description": "Preview fields auto-generated during router create and update operations. Calling this method does NOT create or update the router.", "httpMethod": "POST", - "id": "compute.sslPolicies.testIamPermissions", + "id": "compute.routers.preview", "parameterOrder": [ "project", - "resource" + "region", + "router" ], "parameters": { "project": { @@ -17476,73 +18142,27 @@ "required": true, "type": "string" }, - "resource": { - "description": "Name or id of the resource for this request.", + "region": { + "description": "Name of the region for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" - } - }, - "path": "{project}/global/sslPolicies/{resource}/testIamPermissions", - "request": { - "$ref": "TestPermissionsRequest" - }, - "response": { - "$ref": "TestPermissionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - } - } - }, - "subnetworks": { - "methods": { - "aggregatedList": { - "description": "Retrieves an aggregated list of subnetworks.", - "httpMethod": "GET", - "id": "compute.subnetworks.aggregatedList", - "parameterOrder": [ - "project" - ], - "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - "location": "query", - "type": "string" - }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query", - "type": "string" - }, - "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query", - "type": "string" }, - "project": { - "description": "Project ID for this request.", + "router": { + "description": "Name of the Router resource to query.", "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/aggregated/subnetworks", + "path": "{project}/regions/{region}/routers/{router}/preview", + "request": { + "$ref": "Router" + }, "response": { - "$ref": "SubnetworkAggregatedList" + "$ref": "RoutersPreviewResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -17550,14 +18170,14 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "delete": { - "description": "Deletes the specified subnetwork.", - "httpMethod": "DELETE", - "id": "compute.subnetworks.delete", + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", + "httpMethod": "POST", + "id": "compute.routers.testIamPermissions", "parameterOrder": [ "project", "region", - "subnetwork" + "resource" ], "parameters": { "project": { @@ -17568,42 +18188,41 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "The name of the region for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "subnetwork": { - "description": "Name of the Subnetwork resource to delete.", + "resource": { + "description": "Name or id of the resource for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/regions/{region}/subnetworks/{subnetwork}", + "path": "{project}/regions/{region}/routers/{resource}/testIamPermissions", + "request": { + "$ref": "TestPermissionsRequest" + }, "response": { - "$ref": "Operation" + "$ref": "TestPermissionsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "expandIpCidrRange": { - "description": "Expands the IP CIDR range of the subnetwork to a specified value.", - "httpMethod": "POST", - "id": "compute.subnetworks.expandIpCidrRange", + "update": { + "description": "Updates the specified Router resource with the data included in the request.", + "httpMethod": "PUT", + "id": "compute.routers.update", "parameterOrder": [ "project", "region", - "subnetwork" + "router" ], "parameters": { "project": { @@ -17614,7 +18233,7 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "Name of the region for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, @@ -17625,17 +18244,17 @@ "location": "query", "type": "string" }, - "subnetwork": { - "description": "Name of the Subnetwork resource to update.", + "router": { + "description": "Name of the Router resource to update.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/regions/{region}/subnetworks/{subnetwork}/expandIpCidrRange", + "path": "{project}/regions/{region}/routers/{router}", "request": { - "$ref": "SubnetworksExpandIpCidrRangeRequest" + "$ref": "Router" }, "response": { "$ref": "Operation" @@ -17644,15 +18263,18 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] - }, - "get": { - "description": "Returns the specified subnetwork. Gets a list of available subnetworks list() request.", - "httpMethod": "GET", - "id": "compute.subnetworks.get", + } + } + }, + "routes": { + "methods": { + "delete": { + "description": "Deletes the specified Route resource.", + "httpMethod": "DELETE", + "id": "compute.routes.delete", "parameterOrder": [ "project", - "region", - "subnetwork" + "route" ], "parameters": { "project": { @@ -17662,39 +18284,35 @@ "required": true, "type": "string" }, - "region": { - "description": "Name of the region scoping this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", "type": "string" }, - "subnetwork": { - "description": "Name of the Subnetwork resource to return.", + "route": { + "description": "Name of the Route resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/regions/{region}/subnetworks/{subnetwork}", + "path": "{project}/global/routes/{route}", "response": { - "$ref": "Subnetwork" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "getIamPolicy": { - "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + "get": { + "description": "Returns the specified Route resource. Gets a list of available routes by making a list() request.", "httpMethod": "GET", - "id": "compute.subnetworks.getIamPolicy", + "id": "compute.routes.get", "parameterOrder": [ "project", - "region", - "resource" + "route" ], "parameters": { "project": { @@ -17704,24 +18322,17 @@ "required": true, "type": "string" }, - "region": { - "description": "The name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, - "resource": { - "description": "Name or id of the resource for this request.", + "route": { + "description": "Name of the Route resource to return.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/regions/{region}/subnetworks/{resource}/getIamPolicy", + "path": "{project}/global/routes/{route}", "response": { - "$ref": "Policy" + "$ref": "Route" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -17730,12 +18341,11 @@ ] }, "insert": { - "description": "Creates a subnetwork in the specified project using the data included in the request.", + "description": "Creates a Route resource in the specified project using the data included in the request.", "httpMethod": "POST", - "id": "compute.subnetworks.insert", + "id": "compute.routes.insert", "parameterOrder": [ - "project", - "region" + "project" ], "parameters": { "project": { @@ -17745,22 +18355,15 @@ "required": true, "type": "string" }, - "region": { - "description": "Name of the region scoping this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, "requestId": { "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" } }, - "path": "{project}/regions/{region}/subnetworks", + "path": "{project}/global/routes", "request": { - "$ref": "Subnetwork" + "$ref": "Route" }, "response": { "$ref": "Operation" @@ -17771,12 +18374,11 @@ ] }, "list": { - "description": "Retrieves a list of subnetworks available to the specified project.", + "description": "Retrieves the list of Route resources available to the specified project.", "httpMethod": "GET", - "id": "compute.subnetworks.list", + "id": "compute.routes.list", "parameterOrder": [ - "project", - "region" + "project" ], "parameters": { "filter": { @@ -17808,18 +18410,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" - }, - "region": { - "description": "Name of the region scoping this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" } }, - "path": "{project}/regions/{region}/subnetworks", + "path": "{project}/global/routes", "response": { - "$ref": "SubnetworkList" + "$ref": "RouteList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -17827,63 +18422,54 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "listUsable": { - "description": "Retrieves an aggregated list of usable subnetworks.", - "httpMethod": "GET", - "id": "compute.subnetworks.listUsable", + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", + "httpMethod": "POST", + "id": "compute.routes.testIamPermissions", "parameterOrder": [ - "project" + "project", + "resource" ], "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - "location": "query", - "type": "string" - }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query", - "type": "string" - }, - "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query", - "type": "string" - }, "project": { "description": "Project ID for this request.", "location": "path", "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" } }, - "path": "{project}/aggregated/subnetworks/listUsable", + "path": "{project}/global/routes/{resource}/testIamPermissions", + "request": { + "$ref": "TestPermissionsRequest" + }, "response": { - "$ref": "UsableSubnetworksAggregatedList" + "$ref": "TestPermissionsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] - }, - "patch": { - "description": "Patches the specified subnetwork with the data included in the request. Only certain fields can up updated with a patch request as indicated in the field descriptions. You must specify the current fingeprint of the subnetwork resource being patched.", - "httpMethod": "PATCH", - "id": "compute.subnetworks.patch", + } + } + }, + "securityPolicies": { + "methods": { + "addRule": { + "description": "Inserts a rule into a security policy.", + "httpMethod": "POST", + "id": "compute.securityPolicies.addRule", "parameterOrder": [ "project", - "region", - "subnetwork" + "securityPolicy" ], "parameters": { "project": { @@ -17893,29 +18479,22 @@ "required": true, "type": "string" }, - "region": { - "description": "Name of the region scoping this request.", + "securityPolicy": { + "description": "Name of the security policy to update.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "validateOnly": { + "description": "If true, the request will not be committed.", "location": "query", - "type": "string" - }, - "subnetwork": { - "description": "Name of the Subnetwork resource to patch.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" + "type": "boolean" } }, - "path": "{project}/regions/{region}/subnetworks/{subnetwork}", + "path": "{project}/global/securityPolicies/{securityPolicy}/addRule", "request": { - "$ref": "Subnetwork" + "$ref": "SecurityPolicyRule" }, "response": { "$ref": "Operation" @@ -17925,14 +18504,13 @@ "https://www.googleapis.com/auth/compute" ] }, - "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", - "httpMethod": "POST", - "id": "compute.subnetworks.setIamPolicy", + "delete": { + "description": "Deletes the specified policy.", + "httpMethod": "DELETE", + "id": "compute.securityPolicies.delete", "parameterOrder": [ "project", - "region", - "resource" + "securityPolicy" ], "parameters": { "project": { @@ -17942,41 +18520,35 @@ "required": true, "type": "string" }, - "region": { - "description": "The name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", "type": "string" }, - "resource": { - "description": "Name or id of the resource for this request.", + "securityPolicy": { + "description": "Name of the security policy to delete.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/regions/{region}/subnetworks/{resource}/setIamPolicy", - "request": { - "$ref": "RegionSetPolicyRequest" - }, + "path": "{project}/global/securityPolicies/{securityPolicy}", "response": { - "$ref": "Policy" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] }, - "setPrivateIpGoogleAccess": { - "description": "Set whether VMs in this subnet can access Google services without assigning external IP addresses through Private Google Access.", - "httpMethod": "POST", - "id": "compute.subnetworks.setPrivateIpGoogleAccess", + "get": { + "description": "List all of the ordered rules present in a single specified policy.", + "httpMethod": "GET", + "id": "compute.securityPolicies.get", "parameterOrder": [ "project", - "region", - "subnetwork" + "securityPolicy" ], "parameters": { "project": { @@ -17986,48 +18558,39 @@ "required": true, "type": "string" }, - "region": { - "description": "Name of the region scoping this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "subnetwork": { - "description": "Name of the Subnetwork resource.", + "securityPolicy": { + "description": "Name of the security policy to get.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/regions/{region}/subnetworks/{subnetwork}/setPrivateIpGoogleAccess", - "request": { - "$ref": "SubnetworksSetPrivateIpGoogleAccessRequest" - }, + "path": "{project}/global/securityPolicies/{securityPolicy}", "response": { - "$ref": "Operation" + "$ref": "SecurityPolicy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.", - "httpMethod": "POST", - "id": "compute.subnetworks.testIamPermissions", + "getRule": { + "description": "Gets a rule at the specified priority.", + "httpMethod": "GET", + "id": "compute.securityPolicies.getRule", "parameterOrder": [ "project", - "region", - "resource" + "securityPolicy" ], "parameters": { + "priority": { + "description": "The priority of the rule to get from the security policy.", + "format": "int32", + "location": "query", + "type": "integer" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -18035,45 +18598,30 @@ "required": true, "type": "string" }, - "region": { - "description": "The name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, - "resource": { - "description": "Name or id of the resource for this request.", + "securityPolicy": { + "description": "Name of the security policy to which the queried rule belongs.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/regions/{region}/subnetworks/{resource}/testIamPermissions", - "request": { - "$ref": "TestPermissionsRequest" - }, + "path": "{project}/global/securityPolicies/{securityPolicy}/getRule", "response": { - "$ref": "TestPermissionsResponse" + "$ref": "SecurityPolicyRule" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] - } - } - }, - "targetHttpProxies": { - "methods": { - "delete": { - "description": "Deletes the specified TargetHttpProxy resource.", - "httpMethod": "DELETE", - "id": "compute.targetHttpProxies.delete", + }, + "insert": { + "description": "Creates a new policy in the specified project using the data included in the request.", + "httpMethod": "POST", + "id": "compute.securityPolicies.insert", "parameterOrder": [ - "project", - "targetHttpProxy" + "project" ], "parameters": { "project": { @@ -18088,15 +18636,16 @@ "location": "query", "type": "string" }, - "targetHttpProxy": { - "description": "Name of the TargetHttpProxy resource to delete.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" + "validateOnly": { + "description": "If true, the request will not be committed.", + "location": "query", + "type": "boolean" } }, - "path": "{project}/global/targetHttpProxies/{targetHttpProxy}", + "path": "{project}/global/securityPolicies", + "request": { + "$ref": "SecurityPolicy" + }, "response": { "$ref": "Operation" }, @@ -18105,46 +18654,110 @@ "https://www.googleapis.com/auth/compute" ] }, - "get": { - "description": "Returns the specified TargetHttpProxy resource. Gets a list of available target HTTP proxies by making a list() request.", + "list": { + "description": "List all the policies that have been configured for the specified project.", "httpMethod": "GET", - "id": "compute.targetHttpProxies.get", + "id": "compute.securityPolicies.list", "parameterOrder": [ - "project", - "targetHttpProxy" + "project" ], "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + } + }, + "path": "{project}/global/securityPolicies", + "response": { + "$ref": "SecurityPolicyList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "listPreconfiguredExpressionSets": { + "description": "Gets the current list of preconfigured Web Application Firewall (WAF) expressions.", + "httpMethod": "GET", + "id": "compute.securityPolicies.listPreconfiguredExpressionSets", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", + "type": "string" }, - "targetHttpProxy": { - "description": "Name of the TargetHttpProxy resource to return.", + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" } }, - "path": "{project}/global/targetHttpProxies/{targetHttpProxy}", + "path": "{project}/global/securityPolicies/listPreconfiguredExpressionSets", "response": { - "$ref": "TargetHttpProxy" + "$ref": "SecurityPoliciesListPreconfiguredExpressionSetsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "insert": { - "description": "Creates a TargetHttpProxy resource in the specified project using the data included in the request.", - "httpMethod": "POST", - "id": "compute.targetHttpProxies.insert", + "patch": { + "description": "Patches the specified policy with the data included in the request.", + "httpMethod": "PATCH", + "id": "compute.securityPolicies.patch", "parameterOrder": [ - "project" + "project", + "securityPolicy" ], "parameters": { "project": { @@ -18158,11 +18771,18 @@ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" + }, + "securityPolicy": { + "description": "Name of the security policy to update.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" } }, - "path": "{project}/global/targetHttpProxies", + "path": "{project}/global/securityPolicies/{securityPolicy}", "request": { - "$ref": "TargetHttpProxy" + "$ref": "SecurityPolicy" }, "response": { "$ref": "Operation" @@ -18172,62 +18792,99 @@ "https://www.googleapis.com/auth/compute" ] }, - "list": { - "description": "Retrieves the list of TargetHttpProxy resources available to the specified project.", - "httpMethod": "GET", - "id": "compute.targetHttpProxies.list", + "patchRule": { + "description": "Patches a rule at the specified priority.", + "httpMethod": "POST", + "id": "compute.securityPolicies.patchRule", "parameterOrder": [ - "project" + "project", + "securityPolicy" ], "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - "location": "query", - "type": "string" - }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "format": "uint32", + "priority": { + "description": "The priority of the rule to patch.", + "format": "int32", "location": "query", - "minimum": "0", "type": "integer" }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query", + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, "type": "string" }, - "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query", + "securityPolicy": { + "description": "Name of the security policy to update.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, "type": "string" }, + "validateOnly": { + "description": "If true, the request will not be committed.", + "location": "query", + "type": "boolean" + } + }, + "path": "{project}/global/securityPolicies/{securityPolicy}/patchRule", + "request": { + "$ref": "SecurityPolicyRule" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "removeRule": { + "description": "Deletes a rule at the specified priority.", + "httpMethod": "POST", + "id": "compute.securityPolicies.removeRule", + "parameterOrder": [ + "project", + "securityPolicy" + ], + "parameters": { + "priority": { + "description": "The priority of the rule to remove from the security policy.", + "format": "int32", + "location": "query", + "type": "integer" + }, "project": { "description": "Project ID for this request.", "location": "path", "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "securityPolicy": { + "description": "Name of the security policy to update.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" } }, - "path": "{project}/global/targetHttpProxies", + "path": "{project}/global/securityPolicies/{securityPolicy}/removeRule", "response": { - "$ref": "TargetHttpProxyList" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "setUrlMap": { - "description": "Changes the URL map for TargetHttpProxy.", + "setLabels": { + "description": "Sets the labels on a security policy. To learn more about labels, read the Labeling Resources documentation.", "httpMethod": "POST", - "id": "compute.targetHttpProxies.setUrlMap", + "id": "compute.securityPolicies.setLabels", "parameterOrder": [ "project", - "targetHttpProxy" + "resource" ], "parameters": { "project": { @@ -18237,22 +18894,17 @@ "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "targetHttpProxy": { - "description": "Name of the TargetHttpProxy to set a URL map for.", + "resource": { + "description": "Name or id of the resource for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/targetHttpProxies/{targetHttpProxy}/setUrlMap", + "path": "{project}/global/securityPolicies/{resource}/setLabels", "request": { - "$ref": "UrlMapReference" + "$ref": "GlobalSetLabelsRequest" }, "response": { "$ref": "Operation" @@ -18265,7 +18917,7 @@ "testIamPermissions": { "description": "Returns permissions that a caller has on the specified resource.", "httpMethod": "POST", - "id": "compute.targetHttpProxies.testIamPermissions", + "id": "compute.securityPolicies.testIamPermissions", "parameterOrder": [ "project", "resource" @@ -18286,7 +18938,7 @@ "type": "string" } }, - "path": "{project}/global/targetHttpProxies/{resource}/testIamPermissions", + "path": "{project}/global/securityPolicies/{resource}/testIamPermissions", "request": { "$ref": "TestPermissionsRequest" }, @@ -18301,15 +18953,15 @@ } } }, - "targetHttpsProxies": { + "snapshots": { "methods": { "delete": { - "description": "Deletes the specified TargetHttpsProxy resource.", + "description": "Deletes the specified Snapshot resource. Keep in mind that deleting a single snapshot might not necessarily delete all the data on that snapshot. If any data on the snapshot that is marked for deletion is needed for subsequent snapshots, the data will be moved to the next corresponding snapshot.\n\nFor more information, see Deleting snapshots.", "httpMethod": "DELETE", - "id": "compute.targetHttpsProxies.delete", + "id": "compute.snapshots.delete", "parameterOrder": [ "project", - "targetHttpsProxy" + "snapshot" ], "parameters": { "project": { @@ -18324,15 +18976,15 @@ "location": "query", "type": "string" }, - "targetHttpsProxy": { - "description": "Name of the TargetHttpsProxy resource to delete.", + "snapshot": { + "description": "Name of the Snapshot resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/global/targetHttpsProxies/{targetHttpsProxy}", + "path": "{project}/global/snapshots/{snapshot}", "response": { "$ref": "Operation" }, @@ -18342,12 +18994,12 @@ ] }, "get": { - "description": "Returns the specified TargetHttpsProxy resource. Gets a list of available target HTTPS proxies by making a list() request.", + "description": "Returns the specified Snapshot resource. Gets a list of available snapshots by making a list() request.", "httpMethod": "GET", - "id": "compute.targetHttpsProxies.get", + "id": "compute.snapshots.get", "parameterOrder": [ "project", - "targetHttpsProxy" + "snapshot" ], "parameters": { "project": { @@ -18357,17 +19009,17 @@ "required": true, "type": "string" }, - "targetHttpsProxy": { - "description": "Name of the TargetHttpsProxy resource to return.", + "snapshot": { + "description": "Name of the Snapshot resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/global/targetHttpsProxies/{targetHttpsProxy}", + "path": "{project}/global/snapshots/{snapshot}", "response": { - "$ref": "TargetHttpsProxy" + "$ref": "Snapshot" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -18375,12 +19027,13 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "insert": { - "description": "Creates a TargetHttpsProxy resource in the specified project using the data included in the request.", - "httpMethod": "POST", - "id": "compute.targetHttpsProxies.insert", + "getIamPolicy": { + "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + "httpMethod": "GET", + "id": "compute.snapshots.getIamPolicy", "parameterOrder": [ - "project" + "project", + "resource" ], "parameters": { "project": { @@ -18390,28 +19043,28 @@ "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query", + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, "type": "string" } }, - "path": "{project}/global/targetHttpsProxies", - "request": { - "$ref": "TargetHttpsProxy" - }, + "path": "{project}/global/snapshots/{resource}/getIamPolicy", "response": { - "$ref": "Operation" + "$ref": "Policy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, "list": { - "description": "Retrieves the list of TargetHttpsProxy resources available to the specified project.", + "description": "Retrieves the list of Snapshot resources contained within the specified project.", "httpMethod": "GET", - "id": "compute.targetHttpsProxies.list", + "id": "compute.snapshots.list", "parameterOrder": [ "project" ], @@ -18447,9 +19100,9 @@ "type": "string" } }, - "path": "{project}/global/targetHttpsProxies", + "path": "{project}/global/snapshots", "response": { - "$ref": "TargetHttpsProxyList" + "$ref": "SnapshotList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -18457,13 +19110,13 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "setQuicOverride": { - "description": "Sets the QUIC override policy for TargetHttpsProxy.", + "setIamPolicy": { + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", "httpMethod": "POST", - "id": "compute.targetHttpsProxies.setQuicOverride", + "id": "compute.snapshots.setIamPolicy", "parameterOrder": [ "project", - "targetHttpsProxy" + "resource" ], "parameters": { "project": { @@ -18473,37 +19126,33 @@ "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "targetHttpsProxy": { - "description": "Name of the TargetHttpsProxy resource to set the QUIC override policy for. The name should conform to RFC1035.", + "resource": { + "description": "Name or id of the resource for this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/global/targetHttpsProxies/{targetHttpsProxy}/setQuicOverride", + "path": "{project}/global/snapshots/{resource}/setIamPolicy", "request": { - "$ref": "TargetHttpsProxiesSetQuicOverrideRequest" + "$ref": "GlobalSetPolicyRequest" }, "response": { - "$ref": "Operation" + "$ref": "Policy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] }, - "setSslCertificates": { - "description": "Replaces SslCertificates for TargetHttpsProxy.", + "setLabels": { + "description": "Sets the labels on a snapshot. To learn more about labels, read the Labeling Resources documentation.", "httpMethod": "POST", - "id": "compute.targetHttpsProxies.setSslCertificates", + "id": "compute.snapshots.setLabels", "parameterOrder": [ "project", - "targetHttpsProxy" + "resource" ], "parameters": { "project": { @@ -18513,103 +19162,17 @@ "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "targetHttpsProxy": { - "description": "Name of the TargetHttpsProxy resource to set an SslCertificates resource for.", + "resource": { + "description": "Name or id of the resource for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates", + "path": "{project}/global/snapshots/{resource}/setLabels", "request": { - "$ref": "TargetHttpsProxiesSetSslCertificatesRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "setSslPolicy": { - "description": "Sets the SSL policy for TargetHttpsProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the HTTPS proxy load balancer. They do not affect the connection between the load balancer and the backends.", - "httpMethod": "POST", - "id": "compute.targetHttpsProxies.setSslPolicy", - "parameterOrder": [ - "project", - "targetHttpsProxy" - ], - "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "targetHttpsProxy": { - "description": "Name of the TargetHttpsProxy resource whose SSL policy is to be set. The name must be 1-63 characters long, and comply with RFC1035.", - "location": "path", - "required": true, - "type": "string" - } - }, - "path": "{project}/global/targetHttpsProxies/{targetHttpsProxy}/setSslPolicy", - "request": { - "$ref": "SslPolicyReference" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "setUrlMap": { - "description": "Changes the URL map for TargetHttpsProxy.", - "httpMethod": "POST", - "id": "compute.targetHttpsProxies.setUrlMap", - "parameterOrder": [ - "project", - "targetHttpsProxy" - ], - "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "targetHttpsProxy": { - "description": "Name of the TargetHttpsProxy resource whose URL map is to be set.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - } - }, - "path": "{project}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap", - "request": { - "$ref": "UrlMapReference" + "$ref": "GlobalSetLabelsRequest" }, "response": { "$ref": "Operation" @@ -18622,7 +19185,7 @@ "testIamPermissions": { "description": "Returns permissions that a caller has on the specified resource.", "httpMethod": "POST", - "id": "compute.targetHttpsProxies.testIamPermissions", + "id": "compute.snapshots.testIamPermissions", "parameterOrder": [ "project", "resource" @@ -18643,7 +19206,7 @@ "type": "string" } }, - "path": "{project}/global/targetHttpsProxies/{resource}/testIamPermissions", + "path": "{project}/global/snapshots/{resource}/testIamPermissions", "request": { "$ref": "TestPermissionsRequest" }, @@ -18658,12 +19221,12 @@ } } }, - "targetInstances": { + "sslCertificates": { "methods": { "aggregatedList": { - "description": "Retrieves an aggregated list of target instances.", + "description": "Retrieves the list of all SslCertificate resources, regional and global, available to the specified project.", "httpMethod": "GET", - "id": "compute.targetInstances.aggregatedList", + "id": "compute.sslCertificates.aggregatedList", "parameterOrder": [ "project" ], @@ -18692,16 +19255,16 @@ "type": "string" }, "project": { - "description": "Project ID for this request.", + "description": "Name of the project scoping this request.", "location": "path", "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" } }, - "path": "{project}/aggregated/targetInstances", + "path": "{project}/aggregated/sslCertificates", "response": { - "$ref": "TargetInstanceAggregatedList" + "$ref": "SslCertificateAggregatedList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -18710,13 +19273,12 @@ ] }, "delete": { - "description": "Deletes the specified TargetInstance resource.", + "description": "Deletes the specified SslCertificate resource.", "httpMethod": "DELETE", - "id": "compute.targetInstances.delete", + "id": "compute.sslCertificates.delete", "parameterOrder": [ "project", - "zone", - "targetInstance" + "sslCertificate" ], "parameters": { "project": { @@ -18731,22 +19293,15 @@ "location": "query", "type": "string" }, - "targetInstance": { - "description": "Name of the TargetInstance resource to delete.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, - "zone": { - "description": "Name of the zone scoping this request.", + "sslCertificate": { + "description": "Name of the SslCertificate resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/zones/{zone}/targetInstances/{targetInstance}", + "path": "{project}/global/sslCertificates/{sslCertificate}", "response": { "$ref": "Operation" }, @@ -18756,13 +19311,12 @@ ] }, "get": { - "description": "Returns the specified TargetInstance resource. Gets a list of available target instances by making a list() request.", + "description": "Returns the specified SslCertificate resource. Gets a list of available SSL certificates by making a list() request.", "httpMethod": "GET", - "id": "compute.targetInstances.get", + "id": "compute.sslCertificates.get", "parameterOrder": [ "project", - "zone", - "targetInstance" + "sslCertificate" ], "parameters": { "project": { @@ -18772,24 +19326,17 @@ "required": true, "type": "string" }, - "targetInstance": { - "description": "Name of the TargetInstance resource to return.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, - "zone": { - "description": "Name of the zone scoping this request.", + "sslCertificate": { + "description": "Name of the SslCertificate resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/zones/{zone}/targetInstances/{targetInstance}", + "path": "{project}/global/sslCertificates/{sslCertificate}", "response": { - "$ref": "TargetInstance" + "$ref": "SslCertificate" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -18798,12 +19345,11 @@ ] }, "insert": { - "description": "Creates a TargetInstance resource in the specified project and zone using the data included in the request.", + "description": "Creates a SslCertificate resource in the specified project using the data included in the request.", "httpMethod": "POST", - "id": "compute.targetInstances.insert", + "id": "compute.sslCertificates.insert", "parameterOrder": [ - "project", - "zone" + "project" ], "parameters": { "project": { @@ -18817,18 +19363,11 @@ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" - }, - "zone": { - "description": "Name of the zone scoping this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" } }, - "path": "{project}/zones/{zone}/targetInstances", + "path": "{project}/global/sslCertificates", "request": { - "$ref": "TargetInstance" + "$ref": "SslCertificate" }, "response": { "$ref": "Operation" @@ -18839,12 +19378,11 @@ ] }, "list": { - "description": "Retrieves a list of TargetInstance resources available to the specified project and zone.", + "description": "Retrieves the list of SslCertificate resources available to the specified project.", "httpMethod": "GET", - "id": "compute.targetInstances.list", + "id": "compute.sslCertificates.list", "parameterOrder": [ - "project", - "zone" + "project" ], "parameters": { "filter": { @@ -18876,18 +19414,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" - }, - "zone": { - "description": "Name of the zone scoping this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" } }, - "path": "{project}/zones/{zone}/targetInstances", + "path": "{project}/global/sslCertificates", "response": { - "$ref": "TargetInstanceList" + "$ref": "SslCertificateList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -18898,10 +19429,9 @@ "testIamPermissions": { "description": "Returns permissions that a caller has on the specified resource.", "httpMethod": "POST", - "id": "compute.targetInstances.testIamPermissions", + "id": "compute.sslCertificates.testIamPermissions", "parameterOrder": [ "project", - "zone", "resource" ], "parameters": { @@ -18915,19 +19445,12 @@ "resource": { "description": "Name or id of the resource for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - }, - "zone": { - "description": "The name of the zone for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/zones/{zone}/targetInstances/{resource}/testIamPermissions", + "path": "{project}/global/sslCertificates/{resource}/testIamPermissions", "request": { "$ref": "TestPermissionsRequest" }, @@ -18942,16 +19465,15 @@ } } }, - "targetPools": { + "sslPolicies": { "methods": { - "addHealthCheck": { - "description": "Adds health check URLs to a target pool.", - "httpMethod": "POST", - "id": "compute.targetPools.addHealthCheck", + "delete": { + "description": "Deletes the specified SSL policy. The SSL policy resource can be deleted only if it is not in use by any TargetHttpsProxy or TargetSslProxy resources.", + "httpMethod": "DELETE", + "id": "compute.sslPolicies.delete", "parameterOrder": [ "project", - "region", - "targetPool" + "sslPolicy" ], "parameters": { "project": { @@ -18961,30 +19483,19 @@ "required": true, "type": "string" }, - "region": { - "description": "Name of the region scoping this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, "requestId": { "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" }, - "targetPool": { - "description": "Name of the target pool to add a health check to.", + "sslPolicy": { + "description": "Name of the SSL policy to delete. The name must be 1-63 characters long, and comply with RFC1035.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "{project}/regions/{region}/targetPools/{targetPool}/addHealthCheck", - "request": { - "$ref": "TargetPoolsAddHealthCheckRequest" - }, + "path": "{project}/global/sslPolicies/{sslPolicy}", "response": { "$ref": "Operation" }, @@ -18993,14 +19504,13 @@ "https://www.googleapis.com/auth/compute" ] }, - "addInstance": { - "description": "Adds an instance to a target pool.", - "httpMethod": "POST", - "id": "compute.targetPools.addInstance", + "get": { + "description": "Lists all of the ordered rules present in a single specified policy.", + "httpMethod": "GET", + "id": "compute.sslPolicies.get", "parameterOrder": [ "project", - "region", - "targetPool" + "sslPolicy" ], "parameters": { "project": { @@ -19010,10 +19520,35 @@ "required": true, "type": "string" }, - "region": { - "description": "Name of the region scoping this request.", + "sslPolicy": { + "description": "Name of the SSL policy to update. The name must be 1-63 characters long, and comply with RFC1035.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/global/sslPolicies/{sslPolicy}", + "response": { + "$ref": "SslPolicy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Returns the specified SSL policy resource. Gets a list of available SSL policies by making a list() request.", + "httpMethod": "POST", + "id": "compute.sslPolicies.insert", + "parameterOrder": [ + "project" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" }, @@ -19021,18 +19556,11 @@ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" - }, - "targetPool": { - "description": "Name of the TargetPool resource to add instances to.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" } }, - "path": "{project}/regions/{region}/targetPools/{targetPool}/addInstance", + "path": "{project}/global/sslPolicies", "request": { - "$ref": "TargetPoolsAddInstanceRequest" + "$ref": "SslPolicy" }, "response": { "$ref": "Operation" @@ -19042,10 +19570,10 @@ "https://www.googleapis.com/auth/compute" ] }, - "aggregatedList": { - "description": "Retrieves an aggregated list of target pools.", + "list": { + "description": "Lists all the SSL policies that have been configured for the specified project.", "httpMethod": "GET", - "id": "compute.targetPools.aggregatedList", + "id": "compute.sslPolicies.list", "parameterOrder": [ "project" ], @@ -19081,9 +19609,9 @@ "type": "string" } }, - "path": "{project}/aggregated/targetPools", + "path": "{project}/global/sslPolicies", "response": { - "$ref": "TargetPoolAggregatedList" + "$ref": "SslPoliciesList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -19091,87 +19619,48 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "delete": { - "description": "Deletes the specified target pool.", - "httpMethod": "DELETE", - "id": "compute.targetPools.delete", + "listAvailableFeatures": { + "description": "Lists all features that can be specified in the SSL policy when using custom profile.", + "httpMethod": "GET", + "id": "compute.sslPolicies.listAvailableFeatures", "parameterOrder": [ - "project", - "region", - "targetPool" + "project" ], "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", "type": "string" }, - "region": { - "description": "Name of the region scoping this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", "location": "query", "type": "string" }, - "targetPool": { - "description": "Name of the TargetPool resource to delete.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", "type": "string" - } - }, - "path": "{project}/regions/{region}/targetPools/{targetPool}", - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "description": "Returns the specified target pool. Gets a list of available target pools by making a list() request.", - "httpMethod": "GET", - "id": "compute.targetPools.get", - "parameterOrder": [ - "project", - "region", - "targetPool" - ], - "parameters": { + }, "project": { "description": "Project ID for this request.", "location": "path", "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" - }, - "region": { - "description": "Name of the region scoping this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, - "targetPool": { - "description": "Name of the TargetPool resource to return.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" } }, - "path": "{project}/regions/{region}/targetPools/{targetPool}", + "path": "{project}/global/sslPolicies/listAvailableFeatures", "response": { - "$ref": "TargetPool" + "$ref": "SslPoliciesListAvailableFeaturesResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -19179,14 +19668,13 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "getHealth": { - "description": "Gets the most recent health check results for each IP for the instance that is referenced by the given target pool.", - "httpMethod": "POST", - "id": "compute.targetPools.getHealth", + "patch": { + "description": "Patches the specified SSL policy with the data included in the request.", + "httpMethod": "PATCH", + "id": "compute.sslPolicies.patch", "parameterOrder": [ "project", - "region", - "targetPool" + "sslPolicy" ], "parameters": { "project": { @@ -19196,41 +19684,37 @@ "required": true, "type": "string" }, - "region": { - "description": "Name of the region scoping this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", "type": "string" }, - "targetPool": { - "description": "Name of the TargetPool resource to which the queried instance belongs.", + "sslPolicy": { + "description": "Name of the SSL policy to update. The name must be 1-63 characters long, and comply with RFC1035.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "{project}/regions/{region}/targetPools/{targetPool}/getHealth", + "path": "{project}/global/sslPolicies/{sslPolicy}", "request": { - "$ref": "InstanceReference" + "$ref": "SslPolicy" }, "response": { - "$ref": "TargetPoolInstanceHealth" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "insert": { - "description": "Creates a target pool in the specified project and region using the data included in the request.", + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", "httpMethod": "POST", - "id": "compute.targetPools.insert", + "id": "compute.sslPolicies.testIamPermissions", "parameterOrder": [ "project", - "region" + "resource" ], "parameters": { "project": { @@ -19240,38 +19724,37 @@ "required": true, "type": "string" }, - "region": { - "description": "Name of the region scoping this request.", + "resource": { + "description": "Name or id of the resource for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" } }, - "path": "{project}/regions/{region}/targetPools", + "path": "{project}/global/sslPolicies/{resource}/testIamPermissions", "request": { - "$ref": "TargetPool" + "$ref": "TestPermissionsRequest" }, "response": { - "$ref": "Operation" + "$ref": "TestPermissionsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] - }, - "list": { - "description": "Retrieves a list of target pools available to the specified project and region.", + } + } + }, + "subnetworks": { + "methods": { + "aggregatedList": { + "description": "Retrieves an aggregated list of subnetworks.", "httpMethod": "GET", - "id": "compute.targetPools.list", + "id": "compute.subnetworks.aggregatedList", "parameterOrder": [ - "project", - "region" + "project" ], "parameters": { "filter": { @@ -19303,18 +19786,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" - }, - "region": { - "description": "Name of the region scoping this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" } }, - "path": "{project}/regions/{region}/targetPools", + "path": "{project}/aggregated/subnetworks", "response": { - "$ref": "TargetPoolList" + "$ref": "SubnetworkAggregatedList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -19322,14 +19798,14 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "removeHealthCheck": { - "description": "Removes health check URL from a target pool.", - "httpMethod": "POST", - "id": "compute.targetPools.removeHealthCheck", + "delete": { + "description": "Deletes the specified subnetwork.", + "httpMethod": "DELETE", + "id": "compute.subnetworks.delete", "parameterOrder": [ "project", "region", - "targetPool" + "subnetwork" ], "parameters": { "project": { @@ -19340,7 +19816,7 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, @@ -19351,18 +19827,15 @@ "location": "query", "type": "string" }, - "targetPool": { - "description": "Name of the target pool to remove health checks from.", + "subnetwork": { + "description": "Name of the Subnetwork resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/regions/{region}/targetPools/{targetPool}/removeHealthCheck", - "request": { - "$ref": "TargetPoolsRemoveHealthCheckRequest" - }, + "path": "{project}/regions/{region}/subnetworks/{subnetwork}", "response": { "$ref": "Operation" }, @@ -19371,14 +19844,14 @@ "https://www.googleapis.com/auth/compute" ] }, - "removeInstance": { - "description": "Removes instance URL from a target pool.", + "expandIpCidrRange": { + "description": "Expands the IP CIDR range of the subnetwork to a specified value.", "httpMethod": "POST", - "id": "compute.targetPools.removeInstance", + "id": "compute.subnetworks.expandIpCidrRange", "parameterOrder": [ "project", "region", - "targetPool" + "subnetwork" ], "parameters": { "project": { @@ -19400,17 +19873,17 @@ "location": "query", "type": "string" }, - "targetPool": { - "description": "Name of the TargetPool resource to remove instances from.", + "subnetwork": { + "description": "Name of the Subnetwork resource to update.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/regions/{region}/targetPools/{targetPool}/removeInstance", + "path": "{project}/regions/{region}/subnetworks/{subnetwork}/expandIpCidrRange", "request": { - "$ref": "TargetPoolsRemoveInstanceRequest" + "$ref": "SubnetworksExpandIpCidrRangeRequest" }, "response": { "$ref": "Operation" @@ -19420,22 +19893,16 @@ "https://www.googleapis.com/auth/compute" ] }, - "setBackup": { - "description": "Changes a backup target pool's configurations.", - "httpMethod": "POST", - "id": "compute.targetPools.setBackup", + "get": { + "description": "Returns the specified subnetwork. Gets a list of available subnetworks list() request.", + "httpMethod": "GET", + "id": "compute.subnetworks.get", "parameterOrder": [ "project", "region", - "targetPool" + "subnetwork" ], "parameters": { - "failoverRatio": { - "description": "New failoverRatio value for the target pool.", - "format": "float", - "location": "query", - "type": "number" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -19450,35 +19917,28 @@ "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "targetPool": { - "description": "Name of the TargetPool resource to set a backup pool for.", + "subnetwork": { + "description": "Name of the Subnetwork resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/regions/{region}/targetPools/{targetPool}/setBackup", - "request": { - "$ref": "TargetReference" - }, + "path": "{project}/regions/{region}/subnetworks/{subnetwork}", "response": { - "$ref": "Operation" + "$ref": "Subnetwork" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.", - "httpMethod": "POST", - "id": "compute.targetPools.testIamPermissions", + "getIamPolicy": { + "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + "httpMethod": "GET", + "id": "compute.subnetworks.getIamPolicy", "parameterOrder": [ "project", "region", @@ -19507,30 +19967,23 @@ "type": "string" } }, - "path": "{project}/regions/{region}/targetPools/{resource}/testIamPermissions", - "request": { - "$ref": "TestPermissionsRequest" - }, + "path": "{project}/regions/{region}/subnetworks/{resource}/getIamPolicy", "response": { - "$ref": "TestPermissionsResponse" + "$ref": "Policy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] - } - } - }, - "targetSslProxies": { - "methods": { - "delete": { - "description": "Deletes the specified TargetSslProxy resource.", - "httpMethod": "DELETE", - "id": "compute.targetSslProxies.delete", + }, + "insert": { + "description": "Creates a subnetwork in the specified project using the data included in the request.", + "httpMethod": "POST", + "id": "compute.subnetworks.insert", "parameterOrder": [ "project", - "targetSslProxy" + "region" ], "parameters": { "project": { @@ -19540,20 +19993,23 @@ "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "targetSslProxy": { - "description": "Name of the TargetSslProxy resource to delete.", + "region": { + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" } }, - "path": "{project}/global/targetSslProxies/{targetSslProxy}", + "path": "{project}/regions/{region}/subnetworks", + "request": { + "$ref": "Subnetwork" + }, "response": { "$ref": "Operation" }, @@ -19562,15 +20018,38 @@ "https://www.googleapis.com/auth/compute" ] }, - "get": { - "description": "Returns the specified TargetSslProxy resource. Gets a list of available target SSL proxies by making a list() request.", + "list": { + "description": "Retrieves a list of subnetworks available to the specified project.", "httpMethod": "GET", - "id": "compute.targetSslProxies.get", + "id": "compute.subnetworks.list", "parameterOrder": [ "project", - "targetSslProxy" + "region" ], "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -19578,17 +20057,17 @@ "required": true, "type": "string" }, - "targetSslProxy": { - "description": "Name of the TargetSslProxy resource to return.", + "region": { + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "{project}/global/targetSslProxies/{targetSslProxy}", + "path": "{project}/regions/{region}/subnetworks", "response": { - "$ref": "TargetSslProxy" + "$ref": "SubnetworkList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -19596,43 +20075,10 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "insert": { - "description": "Creates a TargetSslProxy resource in the specified project using the data included in the request.", - "httpMethod": "POST", - "id": "compute.targetSslProxies.insert", - "parameterOrder": [ - "project" - ], - "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - } - }, - "path": "{project}/global/targetSslProxies", - "request": { - "$ref": "TargetSslProxy" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "list": { - "description": "Retrieves the list of TargetSslProxy resources available to the specified project.", + "listUsable": { + "description": "Retrieves an aggregated list of usable subnetworks.", "httpMethod": "GET", - "id": "compute.targetSslProxies.list", + "id": "compute.subnetworks.listUsable", "parameterOrder": [ "project" ], @@ -19668,9 +20114,9 @@ "type": "string" } }, - "path": "{project}/global/targetSslProxies", + "path": "{project}/aggregated/subnetworks/listUsable", "response": { - "$ref": "TargetSslProxyList" + "$ref": "UsableSubnetworksAggregatedList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -19678,15 +20124,22 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "setBackendService": { - "description": "Changes the BackendService for TargetSslProxy.", - "httpMethod": "POST", - "id": "compute.targetSslProxies.setBackendService", + "patch": { + "description": "Patches the specified subnetwork with the data included in the request. Only certain fields can up updated with a patch request as indicated in the field descriptions. You must specify the current fingeprint of the subnetwork resource being patched.", + "httpMethod": "PATCH", + "id": "compute.subnetworks.patch", "parameterOrder": [ "project", - "targetSslProxy" + "region", + "subnetwork" ], "parameters": { + "drainTimeoutSeconds": { + "description": "The drain timeout specifies the upper bound in seconds on the amount of time allowed to drain connections from the current ACTIVE subnetwork to the current BACKUP subnetwork. The drain timeout is only applicable when the following conditions are true: - the subnetwork being patched has purpose = INTERNAL_HTTPS_LOAD_BALANCER - the subnetwork being patched has role = BACKUP - the patch request is setting the role to ACTIVE. Note that after this patch operation the roles of the ACTIVE and BACKUP subnetworks will be swapped.", + "format": "int32", + "location": "query", + "type": "integer" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -19694,22 +20147,29 @@ "required": true, "type": "string" }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, "requestId": { "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" }, - "targetSslProxy": { - "description": "Name of the TargetSslProxy resource whose BackendService resource is to be set.", + "subnetwork": { + "description": "Name of the Subnetwork resource to patch.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/global/targetSslProxies/{targetSslProxy}/setBackendService", + "path": "{project}/regions/{region}/subnetworks/{subnetwork}", "request": { - "$ref": "TargetSslProxiesSetBackendServiceRequest" + "$ref": "Subnetwork" }, "response": { "$ref": "Operation" @@ -19719,13 +20179,14 @@ "https://www.googleapis.com/auth/compute" ] }, - "setProxyHeader": { - "description": "Changes the ProxyHeaderType for TargetSslProxy.", + "setIamPolicy": { + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", "httpMethod": "POST", - "id": "compute.targetSslProxies.setProxyHeader", + "id": "compute.subnetworks.setIamPolicy", "parameterOrder": [ "project", - "targetSslProxy" + "region", + "resource" ], "parameters": { "project": { @@ -19735,38 +20196,41 @@ "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query", + "region": { + "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, "type": "string" }, - "targetSslProxy": { - "description": "Name of the TargetSslProxy resource whose ProxyHeader is to be set.", + "resource": { + "description": "Name or id of the resource for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/global/targetSslProxies/{targetSslProxy}/setProxyHeader", + "path": "{project}/regions/{region}/subnetworks/{resource}/setIamPolicy", "request": { - "$ref": "TargetSslProxiesSetProxyHeaderRequest" + "$ref": "RegionSetPolicyRequest" }, "response": { - "$ref": "Operation" + "$ref": "Policy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] }, - "setSslCertificates": { - "description": "Changes SslCertificates for TargetSslProxy.", + "setPrivateIpGoogleAccess": { + "description": "Set whether VMs in this subnet can access Google services without assigning external IP addresses through Private Google Access.", "httpMethod": "POST", - "id": "compute.targetSslProxies.setSslCertificates", + "id": "compute.subnetworks.setPrivateIpGoogleAccess", "parameterOrder": [ "project", - "targetSslProxy" + "region", + "subnetwork" ], "parameters": { "project": { @@ -19776,22 +20240,29 @@ "required": true, "type": "string" }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, "requestId": { "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" }, - "targetSslProxy": { - "description": "Name of the TargetSslProxy resource whose SslCertificate resource is to be set.", + "subnetwork": { + "description": "Name of the Subnetwork resource.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/global/targetSslProxies/{targetSslProxy}/setSslCertificates", + "path": "{project}/regions/{region}/subnetworks/{subnetwork}/setPrivateIpGoogleAccess", "request": { - "$ref": "TargetSslProxiesSetSslCertificatesRequest" + "$ref": "SubnetworksSetPrivateIpGoogleAccessRequest" }, "response": { "$ref": "Operation" @@ -19801,13 +20272,14 @@ "https://www.googleapis.com/auth/compute" ] }, - "setSslPolicy": { - "description": "Sets the SSL policy for TargetSslProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the SSL proxy load balancer. They do not affect the connection between the load balancer and the backends.", + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", "httpMethod": "POST", - "id": "compute.targetSslProxies.setSslPolicy", + "id": "compute.subnetworks.testIamPermissions", "parameterOrder": [ "project", - "targetSslProxy" + "region", + "resource" ], "parameters": { "project": { @@ -19817,78 +20289,94 @@ "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query", + "region": { + "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, "type": "string" }, - "targetSslProxy": { - "description": "Name of the TargetSslProxy resource whose SSL policy is to be set. The name must be 1-63 characters long, and comply with RFC1035.", + "resource": { + "description": "Name or id of the resource for this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/global/targetSslProxies/{targetSslProxy}/setSslPolicy", + "path": "{project}/regions/{region}/subnetworks/{resource}/testIamPermissions", "request": { - "$ref": "SslPolicyReference" + "$ref": "TestPermissionsRequest" }, "response": { - "$ref": "Operation" + "$ref": "TestPermissionsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] - }, - "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.", - "httpMethod": "POST", - "id": "compute.targetSslProxies.testIamPermissions", + } + } + }, + "targetHttpProxies": { + "methods": { + "aggregatedList": { + "description": "Retrieves the list of all TargetHttpProxy resources, regional and global, available to the specified project.", + "httpMethod": "GET", + "id": "compute.targetHttpProxies.aggregatedList", "parameterOrder": [ - "project", - "resource" + "project" ], "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", "type": "string" }, - "resource": { - "description": "Name or id of the resource for this request.", + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Name of the project scoping this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" } }, - "path": "{project}/global/targetSslProxies/{resource}/testIamPermissions", - "request": { - "$ref": "TestPermissionsRequest" - }, + "path": "{project}/aggregated/targetHttpProxies", "response": { - "$ref": "TestPermissionsResponse" + "$ref": "TargetHttpProxyAggregatedList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] - } - } - }, - "targetTcpProxies": { - "methods": { + }, "delete": { - "description": "Deletes the specified TargetTcpProxy resource.", + "description": "Deletes the specified TargetHttpProxy resource.", "httpMethod": "DELETE", - "id": "compute.targetTcpProxies.delete", + "id": "compute.targetHttpProxies.delete", "parameterOrder": [ "project", - "targetTcpProxy" + "targetHttpProxy" ], "parameters": { "project": { @@ -19903,15 +20391,15 @@ "location": "query", "type": "string" }, - "targetTcpProxy": { - "description": "Name of the TargetTcpProxy resource to delete.", + "targetHttpProxy": { + "description": "Name of the TargetHttpProxy resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/global/targetTcpProxies/{targetTcpProxy}", + "path": "{project}/global/targetHttpProxies/{targetHttpProxy}", "response": { "$ref": "Operation" }, @@ -19921,12 +20409,12 @@ ] }, "get": { - "description": "Returns the specified TargetTcpProxy resource. Gets a list of available target TCP proxies by making a list() request.", + "description": "Returns the specified TargetHttpProxy resource. Gets a list of available target HTTP proxies by making a list() request.", "httpMethod": "GET", - "id": "compute.targetTcpProxies.get", + "id": "compute.targetHttpProxies.get", "parameterOrder": [ "project", - "targetTcpProxy" + "targetHttpProxy" ], "parameters": { "project": { @@ -19936,17 +20424,17 @@ "required": true, "type": "string" }, - "targetTcpProxy": { - "description": "Name of the TargetTcpProxy resource to return.", + "targetHttpProxy": { + "description": "Name of the TargetHttpProxy resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/global/targetTcpProxies/{targetTcpProxy}", + "path": "{project}/global/targetHttpProxies/{targetHttpProxy}", "response": { - "$ref": "TargetTcpProxy" + "$ref": "TargetHttpProxy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -19955,9 +20443,9 @@ ] }, "insert": { - "description": "Creates a TargetTcpProxy resource in the specified project using the data included in the request.", + "description": "Creates a TargetHttpProxy resource in the specified project using the data included in the request.", "httpMethod": "POST", - "id": "compute.targetTcpProxies.insert", + "id": "compute.targetHttpProxies.insert", "parameterOrder": [ "project" ], @@ -19975,9 +20463,9 @@ "type": "string" } }, - "path": "{project}/global/targetTcpProxies", + "path": "{project}/global/targetHttpProxies", "request": { - "$ref": "TargetTcpProxy" + "$ref": "TargetHttpProxy" }, "response": { "$ref": "Operation" @@ -19988,9 +20476,9 @@ ] }, "list": { - "description": "Retrieves the list of TargetTcpProxy resources available to the specified project.", + "description": "Retrieves the list of TargetHttpProxy resources available to the specified project.", "httpMethod": "GET", - "id": "compute.targetTcpProxies.list", + "id": "compute.targetHttpProxies.list", "parameterOrder": [ "project" ], @@ -20026,9 +20514,9 @@ "type": "string" } }, - "path": "{project}/global/targetTcpProxies", + "path": "{project}/global/targetHttpProxies", "response": { - "$ref": "TargetTcpProxyList" + "$ref": "TargetHttpProxyList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -20036,13 +20524,13 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "setBackendService": { - "description": "Changes the BackendService for TargetTcpProxy.", + "setUrlMap": { + "description": "Changes the URL map for TargetHttpProxy.", "httpMethod": "POST", - "id": "compute.targetTcpProxies.setBackendService", + "id": "compute.targetHttpProxies.setUrlMap", "parameterOrder": [ "project", - "targetTcpProxy" + "targetHttpProxy" ], "parameters": { "project": { @@ -20057,17 +20545,17 @@ "location": "query", "type": "string" }, - "targetTcpProxy": { - "description": "Name of the TargetTcpProxy resource whose BackendService resource is to be set.", + "targetHttpProxy": { + "description": "Name of the TargetHttpProxy to set a URL map for.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/global/targetTcpProxies/{targetTcpProxy}/setBackendService", + "path": "{project}/targetHttpProxies/{targetHttpProxy}/setUrlMap", "request": { - "$ref": "TargetTcpProxiesSetBackendServiceRequest" + "$ref": "UrlMapReference" }, "response": { "$ref": "Operation" @@ -20077,13 +20565,13 @@ "https://www.googleapis.com/auth/compute" ] }, - "setProxyHeader": { - "description": "Changes the ProxyHeaderType for TargetTcpProxy.", + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", "httpMethod": "POST", - "id": "compute.targetTcpProxies.setProxyHeader", + "id": "compute.targetHttpProxies.testIamPermissions", "parameterOrder": [ "project", - "targetTcpProxy" + "resource" ], "parameters": { "project": { @@ -20093,39 +20581,35 @@ "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "targetTcpProxy": { - "description": "Name of the TargetTcpProxy resource whose ProxyHeader is to be set.", + "resource": { + "description": "Name or id of the resource for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/global/targetTcpProxies/{targetTcpProxy}/setProxyHeader", + "path": "{project}/global/targetHttpProxies/{resource}/testIamPermissions", "request": { - "$ref": "TargetTcpProxiesSetProxyHeaderRequest" + "$ref": "TestPermissionsRequest" }, "response": { - "$ref": "Operation" + "$ref": "TestPermissionsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] } } }, - "targetVpnGateways": { + "targetHttpsProxies": { "methods": { "aggregatedList": { - "description": "Retrieves an aggregated list of target VPN gateways.", + "description": "Retrieves the list of all TargetHttpsProxy resources, regional and global, available to the specified project.", "httpMethod": "GET", - "id": "compute.targetVpnGateways.aggregatedList", + "id": "compute.targetHttpsProxies.aggregatedList", "parameterOrder": [ "project" ], @@ -20154,16 +20638,16 @@ "type": "string" }, "project": { - "description": "Project ID for this request.", + "description": "Name of the project scoping this request.", "location": "path", "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" } }, - "path": "{project}/aggregated/targetVpnGateways", + "path": "{project}/aggregated/targetHttpsProxies", "response": { - "$ref": "TargetVpnGatewayAggregatedList" + "$ref": "TargetHttpsProxyAggregatedList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -20172,13 +20656,12 @@ ] }, "delete": { - "description": "Deletes the specified target VPN gateway.", + "description": "Deletes the specified TargetHttpsProxy resource.", "httpMethod": "DELETE", - "id": "compute.targetVpnGateways.delete", + "id": "compute.targetHttpsProxies.delete", "parameterOrder": [ "project", - "region", - "targetVpnGateway" + "targetHttpsProxy" ], "parameters": { "project": { @@ -20188,27 +20671,20 @@ "required": true, "type": "string" }, - "region": { - "description": "Name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, "requestId": { "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" }, - "targetVpnGateway": { - "description": "Name of the target VPN gateway to delete.", + "targetHttpsProxy": { + "description": "Name of the TargetHttpsProxy resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}", + "path": "{project}/global/targetHttpsProxies/{targetHttpsProxy}", "response": { "$ref": "Operation" }, @@ -20218,13 +20694,12 @@ ] }, "get": { - "description": "Returns the specified target VPN gateway. Gets a list of available target VPN gateways by making a list() request.", + "description": "Returns the specified TargetHttpsProxy resource. Gets a list of available target HTTPS proxies by making a list() request.", "httpMethod": "GET", - "id": "compute.targetVpnGateways.get", + "id": "compute.targetHttpsProxies.get", "parameterOrder": [ "project", - "region", - "targetVpnGateway" + "targetHttpsProxy" ], "parameters": { "project": { @@ -20234,24 +20709,17 @@ "required": true, "type": "string" }, - "region": { - "description": "Name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, - "targetVpnGateway": { - "description": "Name of the target VPN gateway to return.", + "targetHttpsProxy": { + "description": "Name of the TargetHttpsProxy resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}", + "path": "{project}/global/targetHttpsProxies/{targetHttpsProxy}", "response": { - "$ref": "TargetVpnGateway" + "$ref": "TargetHttpsProxy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -20260,12 +20728,11 @@ ] }, "insert": { - "description": "Creates a target VPN gateway in the specified project and region using the data included in the request.", + "description": "Creates a TargetHttpsProxy resource in the specified project using the data included in the request.", "httpMethod": "POST", - "id": "compute.targetVpnGateways.insert", + "id": "compute.targetHttpsProxies.insert", "parameterOrder": [ - "project", - "region" + "project" ], "parameters": { "project": { @@ -20275,22 +20742,15 @@ "required": true, "type": "string" }, - "region": { - "description": "Name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, "requestId": { "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" } }, - "path": "{project}/regions/{region}/targetVpnGateways", + "path": "{project}/global/targetHttpsProxies", "request": { - "$ref": "TargetVpnGateway" + "$ref": "TargetHttpsProxy" }, "response": { "$ref": "Operation" @@ -20301,12 +20761,11 @@ ] }, "list": { - "description": "Retrieves a list of target VPN gateways available to the specified project and region.", + "description": "Retrieves the list of TargetHttpsProxy resources available to the specified project.", "httpMethod": "GET", - "id": "compute.targetVpnGateways.list", + "id": "compute.targetHttpsProxies.list", "parameterOrder": [ - "project", - "region" + "project" ], "parameters": { "filter": { @@ -20338,18 +20797,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" - }, - "region": { - "description": "Name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" } }, - "path": "{project}/regions/{region}/targetVpnGateways", + "path": "{project}/global/targetHttpsProxies", "response": { - "$ref": "TargetVpnGatewayList" + "$ref": "TargetHttpsProxyList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -20357,14 +20809,13 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "setLabels": { - "description": "Sets the labels on a TargetVpnGateway. To learn more about labels, read the Labeling Resources documentation.", + "setQuicOverride": { + "description": "Sets the QUIC override policy for TargetHttpsProxy.", "httpMethod": "POST", - "id": "compute.targetVpnGateways.setLabels", + "id": "compute.targetHttpsProxies.setQuicOverride", "parameterOrder": [ "project", - "region", - "resource" + "targetHttpsProxy" ], "parameters": { "project": { @@ -20374,29 +20825,21 @@ "required": true, "type": "string" }, - "region": { - "description": "The region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, "requestId": { "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" }, - "resource": { - "description": "Name or id of the resource for this request.", + "targetHttpsProxy": { + "description": "Name of the TargetHttpsProxy resource to set the QUIC override policy for. The name should conform to RFC1035.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/regions/{region}/targetVpnGateways/{resource}/setLabels", + "path": "{project}/global/targetHttpsProxies/{targetHttpsProxy}/setQuicOverride", "request": { - "$ref": "RegionSetLabelsRequest" + "$ref": "TargetHttpsProxiesSetQuicOverrideRequest" }, "response": { "$ref": "Operation" @@ -20406,14 +20849,13 @@ "https://www.googleapis.com/auth/compute" ] }, - "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.", + "setSslCertificates": { + "description": "Replaces SslCertificates for TargetHttpsProxy.", "httpMethod": "POST", - "id": "compute.targetVpnGateways.testIamPermissions", + "id": "compute.targetHttpsProxies.setSslCertificates", "parameterOrder": [ "project", - "region", - "resource" + "targetHttpsProxy" ], "parameters": { "project": { @@ -20423,45 +20865,38 @@ "required": true, "type": "string" }, - "region": { - "description": "The name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", "type": "string" }, - "resource": { - "description": "Name or id of the resource for this request.", + "targetHttpsProxy": { + "description": "Name of the TargetHttpsProxy resource to set an SslCertificates resource for.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/regions/{region}/targetVpnGateways/{resource}/testIamPermissions", + "path": "{project}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates", "request": { - "$ref": "TestPermissionsRequest" + "$ref": "TargetHttpsProxiesSetSslCertificatesRequest" }, "response": { - "$ref": "TestPermissionsResponse" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] - } - } - }, - "urlMaps": { - "methods": { - "delete": { - "description": "Deletes the specified UrlMap resource.", - "httpMethod": "DELETE", - "id": "compute.urlMaps.delete", + }, + "setSslPolicy": { + "description": "Sets the SSL policy for TargetHttpsProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the HTTPS proxy load balancer. They do not affect the connection between the load balancer and the backends.", + "httpMethod": "POST", + "id": "compute.targetHttpsProxies.setSslPolicy", "parameterOrder": [ "project", - "urlMap" + "targetHttpsProxy" ], "parameters": { "project": { @@ -20476,15 +20911,17 @@ "location": "query", "type": "string" }, - "urlMap": { - "description": "Name of the UrlMap resource to delete.", + "targetHttpsProxy": { + "description": "Name of the TargetHttpsProxy resource whose SSL policy is to be set. The name must be 1-63 characters long, and comply with RFC1035.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "{project}/global/urlMaps/{urlMap}", + "path": "{project}/global/targetHttpsProxies/{targetHttpsProxy}/setSslPolicy", + "request": { + "$ref": "SslPolicyReference" + }, "response": { "$ref": "Operation" }, @@ -20493,46 +20930,13 @@ "https://www.googleapis.com/auth/compute" ] }, - "get": { - "description": "Returns the specified UrlMap resource. Gets a list of available URL maps by making a list() request.", - "httpMethod": "GET", - "id": "compute.urlMaps.get", - "parameterOrder": [ - "project", - "urlMap" - ], - "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "urlMap": { - "description": "Name of the UrlMap resource to return.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - } - }, - "path": "{project}/global/urlMaps/{urlMap}", - "response": { - "$ref": "UrlMap" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "description": "Creates a UrlMap resource in the specified project using the data included in the request.", + "setUrlMap": { + "description": "Changes the URL map for TargetHttpsProxy.", "httpMethod": "POST", - "id": "compute.urlMaps.insert", + "id": "compute.targetHttpsProxies.setUrlMap", "parameterOrder": [ - "project" + "project", + "targetHttpsProxy" ], "parameters": { "project": { @@ -20546,11 +20950,18 @@ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" + }, + "targetHttpsProxy": { + "description": "Name of the TargetHttpsProxy resource whose URL map is to be set.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" } }, - "path": "{project}/global/urlMaps", + "path": "{project}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap", "request": { - "$ref": "UrlMap" + "$ref": "UrlMapReference" }, "response": { "$ref": "Operation" @@ -20560,13 +20971,13 @@ "https://www.googleapis.com/auth/compute" ] }, - "invalidateCache": { - "description": "Initiates a cache invalidation operation, invalidating the specified path, scoped to the specified UrlMap.", + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", "httpMethod": "POST", - "id": "compute.urlMaps.invalidateCache", + "id": "compute.targetHttpsProxies.testIamPermissions", "parameterOrder": [ "project", - "urlMap" + "resource" ], "parameters": { "project": { @@ -20576,35 +20987,35 @@ "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "urlMap": { - "description": "Name of the UrlMap scoping this request.", + "resource": { + "description": "Name or id of the resource for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/global/urlMaps/{urlMap}/invalidateCache", + "path": "{project}/global/targetHttpsProxies/{resource}/testIamPermissions", "request": { - "$ref": "CacheInvalidationRule" + "$ref": "TestPermissionsRequest" }, "response": { - "$ref": "Operation" + "$ref": "TestPermissionsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] - }, - "list": { - "description": "Retrieves the list of UrlMap resources available to the specified project.", + } + } + }, + "targetInstances": { + "methods": { + "aggregatedList": { + "description": "Retrieves an aggregated list of target instances.", "httpMethod": "GET", - "id": "compute.urlMaps.list", + "id": "compute.targetInstances.aggregatedList", "parameterOrder": [ "project" ], @@ -20640,9 +21051,9 @@ "type": "string" } }, - "path": "{project}/global/urlMaps", + "path": "{project}/aggregated/targetInstances", "response": { - "$ref": "UrlMapList" + "$ref": "TargetInstanceAggregatedList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -20650,13 +21061,14 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "patch": { - "description": "Patches the specified UrlMap resource with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - "httpMethod": "PATCH", - "id": "compute.urlMaps.patch", + "delete": { + "description": "Deletes the specified TargetInstance resource.", + "httpMethod": "DELETE", + "id": "compute.targetInstances.delete", "parameterOrder": [ "project", - "urlMap" + "zone", + "targetInstance" ], "parameters": { "project": { @@ -20671,18 +21083,22 @@ "location": "query", "type": "string" }, - "urlMap": { - "description": "Name of the UrlMap resource to patch.", + "targetInstance": { + "description": "Name of the TargetInstance resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "zone": { + "description": "Name of the zone scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "{project}/global/urlMaps/{urlMap}", - "request": { - "$ref": "UrlMap" - }, + "path": "{project}/zones/{zone}/targetInstances/{targetInstance}", "response": { "$ref": "Operation" }, @@ -20691,13 +21107,14 @@ "https://www.googleapis.com/auth/compute" ] }, - "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.", - "httpMethod": "POST", - "id": "compute.urlMaps.testIamPermissions", + "get": { + "description": "Returns the specified TargetInstance resource. Gets a list of available target instances by making a list() request.", + "httpMethod": "GET", + "id": "compute.targetInstances.get", "parameterOrder": [ "project", - "resource" + "zone", + "targetInstance" ], "parameters": { "project": { @@ -20707,20 +21124,24 @@ "required": true, "type": "string" }, - "resource": { - "description": "Name or id of the resource for this request.", + "targetInstance": { + "description": "Name of the TargetInstance resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "zone": { + "description": "Name of the zone scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "{project}/global/urlMaps/{resource}/testIamPermissions", - "request": { - "$ref": "TestPermissionsRequest" - }, + "path": "{project}/zones/{zone}/targetInstances/{targetInstance}", "response": { - "$ref": "TestPermissionsResponse" + "$ref": "TargetInstance" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -20728,13 +21149,13 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "update": { - "description": "Updates the specified UrlMap resource with the data included in the request.", - "httpMethod": "PUT", - "id": "compute.urlMaps.update", + "insert": { + "description": "Creates a TargetInstance resource in the specified project and zone using the data included in the request.", + "httpMethod": "POST", + "id": "compute.targetInstances.insert", "parameterOrder": [ "project", - "urlMap" + "zone" ], "parameters": { "project": { @@ -20749,17 +21170,17 @@ "location": "query", "type": "string" }, - "urlMap": { - "description": "Name of the UrlMap resource to update.", + "zone": { + "description": "Name of the zone scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "{project}/global/urlMaps/{urlMap}", + "path": "{project}/zones/{zone}/targetInstances", "request": { - "$ref": "UrlMap" + "$ref": "TargetInstance" }, "response": { "$ref": "Operation" @@ -20769,52 +21190,13 @@ "https://www.googleapis.com/auth/compute" ] }, - "validate": { - "description": "Runs static validation for the UrlMap. In particular, the tests of the provided UrlMap will be run. Calling this method does NOT create the UrlMap.", - "httpMethod": "POST", - "id": "compute.urlMaps.validate", - "parameterOrder": [ - "project", - "urlMap" - ], - "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "urlMap": { - "description": "Name of the UrlMap resource to be validated as.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - } - }, - "path": "{project}/global/urlMaps/{urlMap}/validate", - "request": { - "$ref": "UrlMapsValidateRequest" - }, - "response": { - "$ref": "UrlMapsValidateResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - } - } - }, - "vpnTunnels": { - "methods": { - "aggregatedList": { - "description": "Retrieves an aggregated list of VPN tunnels.", + "list": { + "description": "Retrieves a list of TargetInstance resources available to the specified project and zone.", "httpMethod": "GET", - "id": "compute.vpnTunnels.aggregatedList", + "id": "compute.targetInstances.list", "parameterOrder": [ - "project" + "project", + "zone" ], "parameters": { "filter": { @@ -20846,11 +21228,18 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "zone": { + "description": "Name of the zone scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" } }, - "path": "{project}/aggregated/vpnTunnels", + "path": "{project}/zones/{zone}/targetInstances", "response": { - "$ref": "VpnTunnelAggregatedList" + "$ref": "TargetInstanceList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -20858,14 +21247,14 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "delete": { - "description": "Deletes the specified VpnTunnel resource.", - "httpMethod": "DELETE", - "id": "compute.vpnTunnels.delete", + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", + "httpMethod": "POST", + "id": "compute.targetInstances.testIamPermissions", "parameterOrder": [ "project", - "region", - "vpnTunnel" + "zone", + "resource" ], "parameters": { "project": { @@ -20875,43 +21264,46 @@ "required": true, "type": "string" }, - "region": { - "description": "Name of the region for this request.", + "resource": { + "description": "Name or id of the resource for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "vpnTunnel": { - "description": "Name of the VpnTunnel resource to delete.", + "zone": { + "description": "The name of the zone for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "{project}/regions/{region}/vpnTunnels/{vpnTunnel}", + "path": "{project}/zones/{zone}/targetInstances/{resource}/testIamPermissions", + "request": { + "$ref": "TestPermissionsRequest" + }, "response": { - "$ref": "Operation" + "$ref": "TestPermissionsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] - }, - "get": { - "description": "Returns the specified VpnTunnel resource. Gets a list of available VPN tunnels by making a list() request.", - "httpMethod": "GET", - "id": "compute.vpnTunnels.get", + } + } + }, + "targetPools": { + "methods": { + "addHealthCheck": { + "description": "Adds health check URLs to a target pool.", + "httpMethod": "POST", + "id": "compute.targetPools.addHealthCheck", "parameterOrder": [ "project", "region", - "vpnTunnel" + "targetPool" ], "parameters": { "project": { @@ -20922,37 +21314,45 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "vpnTunnel": { - "description": "Name of the VpnTunnel resource to return.", + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetPool": { + "description": "Name of the target pool to add a health check to.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/regions/{region}/vpnTunnels/{vpnTunnel}", + "path": "{project}/regions/{region}/targetPools/{targetPool}/addHealthCheck", + "request": { + "$ref": "TargetPoolsAddHealthCheckRequest" + }, "response": { - "$ref": "VpnTunnel" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "insert": { - "description": "Creates a VpnTunnel resource in the specified project and region using the data included in the request.", + "addInstance": { + "description": "Adds an instance to a target pool.", "httpMethod": "POST", - "id": "compute.vpnTunnels.insert", + "id": "compute.targetPools.addInstance", "parameterOrder": [ "project", - "region" + "region", + "targetPool" ], "parameters": { "project": { @@ -20963,7 +21363,7 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, @@ -20973,11 +21373,18 @@ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" + }, + "targetPool": { + "description": "Name of the TargetPool resource to add instances to.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" } }, - "path": "{project}/regions/{region}/vpnTunnels", + "path": "{project}/regions/{region}/targetPools/{targetPool}/addInstance", "request": { - "$ref": "VpnTunnel" + "$ref": "TargetPoolsAddInstanceRequest" }, "response": { "$ref": "Operation" @@ -20987,13 +21394,12 @@ "https://www.googleapis.com/auth/compute" ] }, - "list": { - "description": "Retrieves a list of VpnTunnel resources contained in the specified project and region.", + "aggregatedList": { + "description": "Retrieves an aggregated list of target pools.", "httpMethod": "GET", - "id": "compute.vpnTunnels.list", + "id": "compute.targetPools.aggregatedList", "parameterOrder": [ - "project", - "region" + "project" ], "parameters": { "filter": { @@ -21025,18 +21431,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" - }, - "region": { - "description": "Name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" } }, - "path": "{project}/regions/{region}/vpnTunnels", + "path": "{project}/aggregated/targetPools", "response": { - "$ref": "VpnTunnelList" + "$ref": "TargetPoolAggregatedList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -21044,14 +21443,14 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "setLabels": { - "description": "Sets the labels on a VpnTunnel. To learn more about labels, read the Labeling Resources documentation.", - "httpMethod": "POST", - "id": "compute.vpnTunnels.setLabels", + "delete": { + "description": "Deletes the specified target pool.", + "httpMethod": "DELETE", + "id": "compute.targetPools.delete", "parameterOrder": [ "project", "region", - "resource" + "targetPool" ], "parameters": { "project": { @@ -21062,7 +21461,7 @@ "type": "string" }, "region": { - "description": "The region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, @@ -21073,18 +21472,15 @@ "location": "query", "type": "string" }, - "resource": { - "description": "Name or id of the resource for this request.", + "targetPool": { + "description": "Name of the TargetPool resource to delete.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/regions/{region}/vpnTunnels/{resource}/setLabels", - "request": { - "$ref": "RegionSetLabelsRequest" - }, + "path": "{project}/regions/{region}/targetPools/{targetPool}", "response": { "$ref": "Operation" }, @@ -21093,14 +21489,14 @@ "https://www.googleapis.com/auth/compute" ] }, - "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.", - "httpMethod": "POST", - "id": "compute.vpnTunnels.testIamPermissions", + "get": { + "description": "Returns the specified target pool. Gets a list of available target pools by making a list() request.", + "httpMethod": "GET", + "id": "compute.targetPools.get", "parameterOrder": [ "project", "region", - "resource" + "targetPool" ], "parameters": { "project": { @@ -21111,54 +21507,40 @@ "type": "string" }, "region": { - "description": "The name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "resource": { - "description": "Name or id of the resource for this request.", + "targetPool": { + "description": "Name of the TargetPool resource to return.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/regions/{region}/vpnTunnels/{resource}/testIamPermissions", - "request": { - "$ref": "TestPermissionsRequest" - }, + "path": "{project}/regions/{region}/targetPools/{targetPool}", "response": { - "$ref": "TestPermissionsResponse" + "$ref": "TargetPool" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] - } - } - }, - "zoneOperations": { - "methods": { - "delete": { - "description": "Deletes the specified zone-specific Operations resource.", - "httpMethod": "DELETE", - "id": "compute.zoneOperations.delete", + }, + "getHealth": { + "description": "Gets the most recent health check results for each IP for the instance that is referenced by the given target pool.", + "httpMethod": "POST", + "id": "compute.targetPools.getHealth", "parameterOrder": [ "project", - "zone", - "operation" + "region", + "targetPool" ], "parameters": { - "operation": { - "description": "Name of the Operations resource to delete.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -21166,37 +21548,43 @@ "required": true, "type": "string" }, - "zone": { - "description": "Name of the zone for this request.", + "region": { + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" - } - }, - "path": "{project}/zones/{zone}/operations/{operation}", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + }, + "targetPool": { + "description": "Name of the TargetPool resource to which the queried instance belongs.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/targetPools/{targetPool}/getHealth", + "request": { + "$ref": "InstanceReference" + }, + "response": { + "$ref": "TargetPoolInstanceHealth" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "get": { - "description": "Retrieves the specified zone-specific Operations resource.", - "httpMethod": "GET", - "id": "compute.zoneOperations.get", + "insert": { + "description": "Creates a target pool in the specified project and region using the data included in the request.", + "httpMethod": "POST", + "id": "compute.targetPools.insert", "parameterOrder": [ "project", - "zone", - "operation" + "region" ], "parameters": { - "operation": { - "description": "Name of the Operations resource to return.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -21204,31 +21592,38 @@ "required": true, "type": "string" }, - "zone": { - "description": "Name of the zone for this request.", + "region": { + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" } }, - "path": "{project}/zones/{zone}/operations/{operation}", + "path": "{project}/regions/{region}/targetPools", + "request": { + "$ref": "TargetPool" + }, "response": { "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "list": { - "description": "Retrieves a list of Operation resources contained within the specified zone.", + "description": "Retrieves a list of target pools available to the specified project and region.", "httpMethod": "GET", - "id": "compute.zoneOperations.list", + "id": "compute.targetPools.list", "parameterOrder": [ "project", - "zone" + "region" ], "parameters": { "filter": { @@ -21261,17 +21656,215 @@ "required": true, "type": "string" }, - "zone": { - "description": "Name of the zone for request.", + "region": { + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "{project}/zones/{zone}/operations", + "path": "{project}/regions/{region}/targetPools", "response": { - "$ref": "OperationList" + "$ref": "TargetPoolList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "removeHealthCheck": { + "description": "Removes health check URL from a target pool.", + "httpMethod": "POST", + "id": "compute.targetPools.removeHealthCheck", + "parameterOrder": [ + "project", + "region", + "targetPool" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetPool": { + "description": "Name of the target pool to remove health checks from.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/targetPools/{targetPool}/removeHealthCheck", + "request": { + "$ref": "TargetPoolsRemoveHealthCheckRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "removeInstance": { + "description": "Removes instance URL from a target pool.", + "httpMethod": "POST", + "id": "compute.targetPools.removeInstance", + "parameterOrder": [ + "project", + "region", + "targetPool" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetPool": { + "description": "Name of the TargetPool resource to remove instances from.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/targetPools/{targetPool}/removeInstance", + "request": { + "$ref": "TargetPoolsRemoveInstanceRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setBackup": { + "description": "Changes a backup target pool's configurations.", + "httpMethod": "POST", + "id": "compute.targetPools.setBackup", + "parameterOrder": [ + "project", + "region", + "targetPool" + ], + "parameters": { + "failoverRatio": { + "description": "New failoverRatio value for the target pool.", + "format": "float", + "location": "query", + "type": "number" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetPool": { + "description": "Name of the TargetPool resource to set a backup pool for.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/targetPools/{targetPool}/setBackup", + "request": { + "$ref": "TargetReference" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", + "httpMethod": "POST", + "id": "compute.targetPools.testIamPermissions", + "parameterOrder": [ + "project", + "region", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/targetPools/{resource}/testIamPermissions", + "request": { + "$ref": "TestPermissionsRequest" + }, + "response": { + "$ref": "TestPermissionsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -21281,15 +21874,53 @@ } } }, - "zones": { + "targetSslProxies": { "methods": { + "delete": { + "description": "Deletes the specified TargetSslProxy resource.", + "httpMethod": "DELETE", + "id": "compute.targetSslProxies.delete", + "parameterOrder": [ + "project", + "targetSslProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetSslProxy": { + "description": "Name of the TargetSslProxy resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/global/targetSslProxies/{targetSslProxy}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "get": { - "description": "Returns the specified Zone resource. Gets a list of available zones by making a list() request.", + "description": "Returns the specified TargetSslProxy resource. Gets a list of available target SSL proxies by making a list() request.", "httpMethod": "GET", - "id": "compute.zones.get", + "id": "compute.targetSslProxies.get", "parameterOrder": [ "project", - "zone" + "targetSslProxy" ], "parameters": { "project": { @@ -21299,17 +21930,17 @@ "required": true, "type": "string" }, - "zone": { - "description": "Name of the zone resource to return.", + "targetSslProxy": { + "description": "Name of the TargetSslProxy resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "{project}/zones/{zone}", + "path": "{project}/global/targetSslProxies/{targetSslProxy}", "response": { - "$ref": "Zone" + "$ref": "TargetSslProxy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -21317,10 +21948,43 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "insert": { + "description": "Creates a TargetSslProxy resource in the specified project using the data included in the request.", + "httpMethod": "POST", + "id": "compute.targetSslProxies.insert", + "parameterOrder": [ + "project" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/global/targetSslProxies", + "request": { + "$ref": "TargetSslProxy" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "list": { - "description": "Retrieves the list of Zone resources available to the specified project.", + "description": "Retrieves the list of TargetSslProxy resources available to the specified project.", "httpMethod": "GET", - "id": "compute.zones.list", + "id": "compute.targetSslProxies.list", "parameterOrder": [ "project" ], @@ -21356,129 +22020,5176 @@ "type": "string" } }, - "path": "{project}/zones", + "path": "{project}/global/targetSslProxies", "response": { - "$ref": "ZoneList" + "$ref": "TargetSslProxyList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] - } - } - } - }, - "revision": "20181130", - "rootUrl": "https://www.googleapis.com/", - "schemas": { - "AcceleratorConfig": { - "description": "A specification of the type and number of accelerator cards attached to the instance.", - "id": "AcceleratorConfig", - "properties": { - "acceleratorCount": { - "description": "The number of the guest accelerator cards exposed to this instance.", - "format": "int32", - "type": "integer" - }, - "acceleratorType": { - "description": "Full or partial URL of the accelerator type resource to attach to this instance. If you are creating an instance template, specify only the accelerator name.", - "type": "string" - } - }, - "type": "object" - }, - "AcceleratorType": { - "description": "An Accelerator Type resource. (== resource_for beta.acceleratorTypes ==) (== resource_for v1.acceleratorTypes ==)", - "id": "AcceleratorType", - "properties": { - "creationTimestamp": { - "description": "[Output Only] Creation timestamp in RFC3339 text format.", - "type": "string" - }, - "deprecated": { - "$ref": "DeprecationStatus", - "description": "[Output Only] The deprecation status associated with this accelerator type." - }, - "description": { - "description": "[Output Only] An optional textual description of the resource.", - "type": "string" - }, - "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64", - "type": "string" - }, - "kind": { - "default": "compute#acceleratorType", - "description": "[Output Only] The type of the resource. Always compute#acceleratorType for accelerator types.", - "type": "string" - }, - "maximumCardsPerInstance": { - "description": "[Output Only] Maximum accelerator cards allowed per instance.", - "format": "int32", - "type": "integer" - }, - "name": { - "description": "[Output Only] Name of the resource.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "type": "string" - }, - "selfLink": { - "description": "[Output Only] Server-defined fully-qualified URL for this resource.", - "type": "string" - }, - "zone": { - "description": "[Output Only] The name of the zone where the accelerator type resides, such as us-central1-a. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", - "type": "string" - } - }, - "type": "object" - }, - "AcceleratorTypeAggregatedList": { - "id": "AcceleratorTypeAggregatedList", - "properties": { - "id": { - "description": "[Output Only] Unique identifier for the resource; defined by the server.", - "type": "string" }, - "items": { - "additionalProperties": { - "$ref": "AcceleratorTypesScopedList", - "description": "[Output Only] Name of the scope containing this set of accelerator types." + "setBackendService": { + "description": "Changes the BackendService for TargetSslProxy.", + "httpMethod": "POST", + "id": "compute.targetSslProxies.setBackendService", + "parameterOrder": [ + "project", + "targetSslProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetSslProxy": { + "description": "Name of the TargetSslProxy resource whose BackendService resource is to be set.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } }, - "description": "A list of AcceleratorTypesScopedList resources.", - "type": "object" - }, - "kind": { - "default": "compute#acceleratorTypeAggregatedList", - "description": "[Output Only] Type of resource. Always compute#acceleratorTypeAggregatedList for aggregated lists of accelerator types.", - "type": "string" - }, - "nextPageToken": { - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", - "type": "string" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for this resource.", - "type": "string" + "path": "{project}/global/targetSslProxies/{targetSslProxy}/setBackendService", + "request": { + "$ref": "TargetSslProxiesSetBackendServiceRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] }, - "warning": { - "description": "[Output Only] Informational warning message.", - "properties": { - "code": { - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DEPRECATED_TYPE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "EXPERIMENTAL_TYPE_USED", - "EXTERNAL_API_WARNING", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "MISSING_TYPE_DEPENDENCY", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "setProxyHeader": { + "description": "Changes the ProxyHeaderType for TargetSslProxy.", + "httpMethod": "POST", + "id": "compute.targetSslProxies.setProxyHeader", + "parameterOrder": [ + "project", + "targetSslProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetSslProxy": { + "description": "Name of the TargetSslProxy resource whose ProxyHeader is to be set.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/global/targetSslProxies/{targetSslProxy}/setProxyHeader", + "request": { + "$ref": "TargetSslProxiesSetProxyHeaderRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setSslCertificates": { + "description": "Changes SslCertificates for TargetSslProxy.", + "httpMethod": "POST", + "id": "compute.targetSslProxies.setSslCertificates", + "parameterOrder": [ + "project", + "targetSslProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetSslProxy": { + "description": "Name of the TargetSslProxy resource whose SslCertificate resource is to be set.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/global/targetSslProxies/{targetSslProxy}/setSslCertificates", + "request": { + "$ref": "TargetSslProxiesSetSslCertificatesRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setSslPolicy": { + "description": "Sets the SSL policy for TargetSslProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the SSL proxy load balancer. They do not affect the connection between the load balancer and the backends.", + "httpMethod": "POST", + "id": "compute.targetSslProxies.setSslPolicy", + "parameterOrder": [ + "project", + "targetSslProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetSslProxy": { + "description": "Name of the TargetSslProxy resource whose SSL policy is to be set. The name must be 1-63 characters long, and comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "{project}/global/targetSslProxies/{targetSslProxy}/setSslPolicy", + "request": { + "$ref": "SslPolicyReference" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", + "httpMethod": "POST", + "id": "compute.targetSslProxies.testIamPermissions", + "parameterOrder": [ + "project", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/global/targetSslProxies/{resource}/testIamPermissions", + "request": { + "$ref": "TestPermissionsRequest" + }, + "response": { + "$ref": "TestPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "targetTcpProxies": { + "methods": { + "delete": { + "description": "Deletes the specified TargetTcpProxy resource.", + "httpMethod": "DELETE", + "id": "compute.targetTcpProxies.delete", + "parameterOrder": [ + "project", + "targetTcpProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetTcpProxy": { + "description": "Name of the TargetTcpProxy resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/global/targetTcpProxies/{targetTcpProxy}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Returns the specified TargetTcpProxy resource. Gets a list of available target TCP proxies by making a list() request.", + "httpMethod": "GET", + "id": "compute.targetTcpProxies.get", + "parameterOrder": [ + "project", + "targetTcpProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "targetTcpProxy": { + "description": "Name of the TargetTcpProxy resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/global/targetTcpProxies/{targetTcpProxy}", + "response": { + "$ref": "TargetTcpProxy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a TargetTcpProxy resource in the specified project using the data included in the request.", + "httpMethod": "POST", + "id": "compute.targetTcpProxies.insert", + "parameterOrder": [ + "project" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/global/targetTcpProxies", + "request": { + "$ref": "TargetTcpProxy" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "description": "Retrieves the list of TargetTcpProxy resources available to the specified project.", + "httpMethod": "GET", + "id": "compute.targetTcpProxies.list", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + } + }, + "path": "{project}/global/targetTcpProxies", + "response": { + "$ref": "TargetTcpProxyList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "setBackendService": { + "description": "Changes the BackendService for TargetTcpProxy.", + "httpMethod": "POST", + "id": "compute.targetTcpProxies.setBackendService", + "parameterOrder": [ + "project", + "targetTcpProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetTcpProxy": { + "description": "Name of the TargetTcpProxy resource whose BackendService resource is to be set.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/global/targetTcpProxies/{targetTcpProxy}/setBackendService", + "request": { + "$ref": "TargetTcpProxiesSetBackendServiceRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setProxyHeader": { + "description": "Changes the ProxyHeaderType for TargetTcpProxy.", + "httpMethod": "POST", + "id": "compute.targetTcpProxies.setProxyHeader", + "parameterOrder": [ + "project", + "targetTcpProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetTcpProxy": { + "description": "Name of the TargetTcpProxy resource whose ProxyHeader is to be set.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/global/targetTcpProxies/{targetTcpProxy}/setProxyHeader", + "request": { + "$ref": "TargetTcpProxiesSetProxyHeaderRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "targetVpnGateways": { + "methods": { + "aggregatedList": { + "description": "Retrieves an aggregated list of target VPN gateways.", + "httpMethod": "GET", + "id": "compute.targetVpnGateways.aggregatedList", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + } + }, + "path": "{project}/aggregated/targetVpnGateways", + "response": { + "$ref": "TargetVpnGatewayAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "description": "Deletes the specified target VPN gateway.", + "httpMethod": "DELETE", + "id": "compute.targetVpnGateways.delete", + "parameterOrder": [ + "project", + "region", + "targetVpnGateway" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetVpnGateway": { + "description": "Name of the target VPN gateway to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Returns the specified target VPN gateway. Gets a list of available target VPN gateways by making a list() request.", + "httpMethod": "GET", + "id": "compute.targetVpnGateways.get", + "parameterOrder": [ + "project", + "region", + "targetVpnGateway" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "targetVpnGateway": { + "description": "Name of the target VPN gateway to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}", + "response": { + "$ref": "TargetVpnGateway" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a target VPN gateway in the specified project and region using the data included in the request.", + "httpMethod": "POST", + "id": "compute.targetVpnGateways.insert", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/targetVpnGateways", + "request": { + "$ref": "TargetVpnGateway" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "description": "Retrieves a list of target VPN gateways available to the specified project and region.", + "httpMethod": "GET", + "id": "compute.targetVpnGateways.list", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/targetVpnGateways", + "response": { + "$ref": "TargetVpnGatewayList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "setLabels": { + "description": "Sets the labels on a TargetVpnGateway. To learn more about labels, read the Labeling Resources documentation.", + "httpMethod": "POST", + "id": "compute.targetVpnGateways.setLabels", + "parameterOrder": [ + "project", + "region", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/targetVpnGateways/{resource}/setLabels", + "request": { + "$ref": "RegionSetLabelsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", + "httpMethod": "POST", + "id": "compute.targetVpnGateways.testIamPermissions", + "parameterOrder": [ + "project", + "region", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/targetVpnGateways/{resource}/testIamPermissions", + "request": { + "$ref": "TestPermissionsRequest" + }, + "response": { + "$ref": "TestPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "urlMaps": { + "methods": { + "aggregatedList": { + "description": "Retrieves the list of all UrlMap resources, regional and global, available to the specified project.", + "httpMethod": "GET", + "id": "compute.urlMaps.aggregatedList", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Name of the project scoping this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + } + }, + "path": "{project}/aggregated/urlMaps", + "response": { + "$ref": "UrlMapsAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "description": "Deletes the specified UrlMap resource.", + "httpMethod": "DELETE", + "id": "compute.urlMaps.delete", + "parameterOrder": [ + "project", + "urlMap" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "urlMap": { + "description": "Name of the UrlMap resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/global/urlMaps/{urlMap}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Returns the specified UrlMap resource. Gets a list of available URL maps by making a list() request.", + "httpMethod": "GET", + "id": "compute.urlMaps.get", + "parameterOrder": [ + "project", + "urlMap" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "urlMap": { + "description": "Name of the UrlMap resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/global/urlMaps/{urlMap}", + "response": { + "$ref": "UrlMap" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a UrlMap resource in the specified project using the data included in the request.", + "httpMethod": "POST", + "id": "compute.urlMaps.insert", + "parameterOrder": [ + "project" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/global/urlMaps", + "request": { + "$ref": "UrlMap" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "invalidateCache": { + "description": "Initiates a cache invalidation operation, invalidating the specified path, scoped to the specified UrlMap.", + "httpMethod": "POST", + "id": "compute.urlMaps.invalidateCache", + "parameterOrder": [ + "project", + "urlMap" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "urlMap": { + "description": "Name of the UrlMap scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/global/urlMaps/{urlMap}/invalidateCache", + "request": { + "$ref": "CacheInvalidationRule" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "description": "Retrieves the list of UrlMap resources available to the specified project.", + "httpMethod": "GET", + "id": "compute.urlMaps.list", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + } + }, + "path": "{project}/global/urlMaps", + "response": { + "$ref": "UrlMapList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "patch": { + "description": "Patches the specified UrlMap resource with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "httpMethod": "PATCH", + "id": "compute.urlMaps.patch", + "parameterOrder": [ + "project", + "urlMap" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "urlMap": { + "description": "Name of the UrlMap resource to patch.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/global/urlMaps/{urlMap}", + "request": { + "$ref": "UrlMap" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", + "httpMethod": "POST", + "id": "compute.urlMaps.testIamPermissions", + "parameterOrder": [ + "project", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/global/urlMaps/{resource}/testIamPermissions", + "request": { + "$ref": "TestPermissionsRequest" + }, + "response": { + "$ref": "TestPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "update": { + "description": "Updates the specified UrlMap resource with the data included in the request.", + "httpMethod": "PUT", + "id": "compute.urlMaps.update", + "parameterOrder": [ + "project", + "urlMap" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "urlMap": { + "description": "Name of the UrlMap resource to update.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/global/urlMaps/{urlMap}", + "request": { + "$ref": "UrlMap" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "validate": { + "description": "Runs static validation for the UrlMap. In particular, the tests of the provided UrlMap will be run. Calling this method does NOT create the UrlMap.", + "httpMethod": "POST", + "id": "compute.urlMaps.validate", + "parameterOrder": [ + "project", + "urlMap" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "urlMap": { + "description": "Name of the UrlMap resource to be validated as.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/global/urlMaps/{urlMap}/validate", + "request": { + "$ref": "UrlMapsValidateRequest" + }, + "response": { + "$ref": "UrlMapsValidateResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "vpnGateways": { + "methods": { + "aggregatedList": { + "description": "Retrieves an aggregated list of VPN gateways.", + "httpMethod": "GET", + "id": "compute.vpnGateways.aggregatedList", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + } + }, + "path": "{project}/aggregated/vpnGateways", + "response": { + "$ref": "VpnGatewayAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "description": "Deletes the specified VPN gateway.", + "httpMethod": "DELETE", + "id": "compute.vpnGateways.delete", + "parameterOrder": [ + "project", + "region", + "vpnGateway" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "vpnGateway": { + "description": "Name of the VPN gateway to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/vpnGateways/{vpnGateway}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Returns the specified VPN gateway. Gets a list of available VPN gateways by making a list() request.", + "httpMethod": "GET", + "id": "compute.vpnGateways.get", + "parameterOrder": [ + "project", + "region", + "vpnGateway" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "vpnGateway": { + "description": "Name of the VPN gateway to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/vpnGateways/{vpnGateway}", + "response": { + "$ref": "VpnGateway" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "getStatus": { + "description": "Returns the status for the specified VPN gateway.", + "httpMethod": "GET", + "id": "compute.vpnGateways.getStatus", + "parameterOrder": [ + "project", + "region", + "vpnGateway" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "vpnGateway": { + "description": "Name of the VPN gateway to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/vpnGateways/{vpnGateway}/getStatus", + "response": { + "$ref": "VpnGatewaysGetStatusResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a VPN gateway in the specified project and region using the data included in the request.", + "httpMethod": "POST", + "id": "compute.vpnGateways.insert", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/vpnGateways", + "request": { + "$ref": "VpnGateway" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "description": "Retrieves a list of VPN gateways available to the specified project and region.", + "httpMethod": "GET", + "id": "compute.vpnGateways.list", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/vpnGateways", + "response": { + "$ref": "VpnGatewayList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "setLabels": { + "description": "Sets the labels on a VpnGateway. To learn more about labels, read the Labeling Resources documentation.", + "httpMethod": "POST", + "id": "compute.vpnGateways.setLabels", + "parameterOrder": [ + "project", + "region", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/vpnGateways/{resource}/setLabels", + "request": { + "$ref": "RegionSetLabelsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "vpnTunnels": { + "methods": { + "aggregatedList": { + "description": "Retrieves an aggregated list of VPN tunnels.", + "httpMethod": "GET", + "id": "compute.vpnTunnels.aggregatedList", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + } + }, + "path": "{project}/aggregated/vpnTunnels", + "response": { + "$ref": "VpnTunnelAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "description": "Deletes the specified VpnTunnel resource.", + "httpMethod": "DELETE", + "id": "compute.vpnTunnels.delete", + "parameterOrder": [ + "project", + "region", + "vpnTunnel" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "vpnTunnel": { + "description": "Name of the VpnTunnel resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/vpnTunnels/{vpnTunnel}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Returns the specified VpnTunnel resource. Gets a list of available VPN tunnels by making a list() request.", + "httpMethod": "GET", + "id": "compute.vpnTunnels.get", + "parameterOrder": [ + "project", + "region", + "vpnTunnel" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "vpnTunnel": { + "description": "Name of the VpnTunnel resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/vpnTunnels/{vpnTunnel}", + "response": { + "$ref": "VpnTunnel" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a VpnTunnel resource in the specified project and region using the data included in the request.", + "httpMethod": "POST", + "id": "compute.vpnTunnels.insert", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/vpnTunnels", + "request": { + "$ref": "VpnTunnel" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "description": "Retrieves a list of VpnTunnel resources contained in the specified project and region.", + "httpMethod": "GET", + "id": "compute.vpnTunnels.list", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/vpnTunnels", + "response": { + "$ref": "VpnTunnelList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "setLabels": { + "description": "Sets the labels on a VpnTunnel. To learn more about labels, read the Labeling Resources documentation.", + "httpMethod": "POST", + "id": "compute.vpnTunnels.setLabels", + "parameterOrder": [ + "project", + "region", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/vpnTunnels/{resource}/setLabels", + "request": { + "$ref": "RegionSetLabelsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", + "httpMethod": "POST", + "id": "compute.vpnTunnels.testIamPermissions", + "parameterOrder": [ + "project", + "region", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/vpnTunnels/{resource}/testIamPermissions", + "request": { + "$ref": "TestPermissionsRequest" + }, + "response": { + "$ref": "TestPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "zoneOperations": { + "methods": { + "delete": { + "description": "Deletes the specified zone-specific Operations resource.", + "httpMethod": "DELETE", + "id": "compute.zoneOperations.delete", + "parameterOrder": [ + "project", + "zone", + "operation" + ], + "parameters": { + "operation": { + "description": "Name of the Operations resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "zone": { + "description": "Name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}/operations/{operation}", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Retrieves the specified zone-specific Operations resource.", + "httpMethod": "GET", + "id": "compute.zoneOperations.get", + "parameterOrder": [ + "project", + "zone", + "operation" + ], + "parameters": { + "operation": { + "description": "Name of the Operations resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "zone": { + "description": "Name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}/operations/{operation}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "list": { + "description": "Retrieves a list of Operation resources contained within the specified zone.", + "httpMethod": "GET", + "id": "compute.zoneOperations.list", + "parameterOrder": [ + "project", + "zone" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "zone": { + "description": "Name of the zone for request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}/operations", + "response": { + "$ref": "OperationList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "zones": { + "methods": { + "get": { + "description": "Returns the specified Zone resource. Gets a list of available zones by making a list() request.", + "httpMethod": "GET", + "id": "compute.zones.get", + "parameterOrder": [ + "project", + "zone" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "zone": { + "description": "Name of the zone resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}", + "response": { + "$ref": "Zone" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "list": { + "description": "Retrieves the list of Zone resources available to the specified project.", + "httpMethod": "GET", + "id": "compute.zones.list", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones", + "response": { + "$ref": "ZoneList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + } + }, + "revision": "20190528", + "rootUrl": "https://www.googleapis.com/", + "schemas": { + "AcceleratorConfig": { + "description": "A specification of the type and number of accelerator cards attached to the instance.", + "id": "AcceleratorConfig", + "properties": { + "acceleratorCount": { + "description": "The number of the guest accelerator cards exposed to this instance.", + "format": "int32", + "type": "integer" + }, + "acceleratorType": { + "description": "Full or partial URL of the accelerator type resource to attach to this instance. For example: projects/my-project/zones/us-central1-c/acceleratorTypes/nvidia-tesla-p100 If you are creating an instance template, specify only the accelerator name. See GPUs on Compute Engine for a full list of accelerator types.", + "type": "string" + } + }, + "type": "object" + }, + "AcceleratorType": { + "description": "An Accelerator Type resource. (== resource_for beta.acceleratorTypes ==) (== resource_for v1.acceleratorTypes ==)", + "id": "AcceleratorType", + "properties": { + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "deprecated": { + "$ref": "DeprecationStatus", + "description": "[Output Only] The deprecation status associated with this accelerator type." + }, + "description": { + "description": "[Output Only] An optional textual description of the resource.", + "type": "string" + }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", + "type": "string" + }, + "kind": { + "default": "compute#acceleratorType", + "description": "[Output Only] The type of the resource. Always compute#acceleratorType for accelerator types.", + "type": "string" + }, + "maximumCardsPerInstance": { + "description": "[Output Only] Maximum accelerator cards allowed per instance.", + "format": "int32", + "type": "integer" + }, + "name": { + "description": "[Output Only] Name of the resource.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined fully-qualified URL for this resource.", + "type": "string" + }, + "zone": { + "description": "[Output Only] The name of the zone where the accelerator type resides, such as us-central1-a. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", + "type": "string" + } + }, + "type": "object" + }, + "AcceleratorTypeAggregatedList": { + "id": "AcceleratorTypeAggregatedList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "additionalProperties": { + "$ref": "AcceleratorTypesScopedList", + "description": "[Output Only] Name of the scope containing this set of accelerator types." + }, + "description": "A list of AcceleratorTypesScopedList resources.", + "type": "object" + }, + "kind": { + "default": "compute#acceleratorTypeAggregatedList", + "description": "[Output Only] Type of resource. Always compute#acceleratorTypeAggregatedList for aggregated lists of accelerator types.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "AcceleratorTypeList": { + "description": "Contains a list of accelerator types.", + "id": "AcceleratorTypeList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "description": "A list of AcceleratorType resources.", + "items": { + "$ref": "AcceleratorType" + }, + "type": "array" + }, + "kind": { + "default": "compute#acceleratorTypeList", + "description": "[Output Only] Type of resource. Always compute#acceleratorTypeList for lists of accelerator types.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "AcceleratorTypesScopedList": { + "id": "AcceleratorTypesScopedList", + "properties": { + "acceleratorTypes": { + "description": "[Output Only] A list of accelerator types contained in this scope.", + "items": { + "$ref": "AcceleratorType" + }, + "type": "array" + }, + "warning": { + "description": "[Output Only] An informational warning that appears when the accelerator types list is empty.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "AccessConfig": { + "description": "An access configuration attached to an instance's network interface. Only one access config per instance is supported.", + "id": "AccessConfig", + "properties": { + "kind": { + "default": "compute#accessConfig", + "description": "[Output Only] Type of the resource. Always compute#accessConfig for access configs.", + "type": "string" + }, + "name": { + "description": "The name of this access configuration. The default and recommended name is External NAT but you can use any arbitrary string you would like. For example, My external IP or Network Access.", + "type": "string" + }, + "natIP": { + "description": "An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance.", + "type": "string" + }, + "networkTier": { + "description": "This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD.\n\nIf an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier.\n\nIf an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP.", + "enum": [ + "PREMIUM", + "STANDARD" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "publicPtrDomainName": { + "description": "The DNS domain name for the public PTR record. This field can only be set when the set_public_ptr field is enabled.", + "type": "string" + }, + "setPublicPtr": { + "description": "Specifies whether a public DNS ?PTR? record should be created to map the external IP address of the instance to a DNS domain name.", + "type": "boolean" + }, + "type": { + "default": "ONE_TO_ONE_NAT", + "description": "The type of configuration. The default and only option is ONE_TO_ONE_NAT.", + "enum": [ + "ONE_TO_ONE_NAT" + ], + "enumDescriptions": [ + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "Address": { + "description": "A reserved address resource. (== resource_for beta.addresses ==) (== resource_for v1.addresses ==) (== resource_for beta.globalAddresses ==) (== resource_for v1.globalAddresses ==)", + "id": "Address", + "properties": { + "address": { + "description": "The static IP address represented by this resource.", + "type": "string" + }, + "addressType": { + "description": "The type of address to reserve, either INTERNAL or EXTERNAL. If unspecified, defaults to EXTERNAL.", + "enum": [ + "EXTERNAL", + "INTERNAL", + "UNSPECIFIED_TYPE" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + }, + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", + "type": "string" + }, + "ipVersion": { + "description": "The IP Version that will be used by this address. Valid options are IPV4 or IPV6. This can only be specified for a global address.", + "enum": [ + "IPV4", + "IPV6", + "UNSPECIFIED_VERSION" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + }, + "kind": { + "default": "compute#address", + "description": "[Output Only] Type of the resource. Always compute#address for addresses.", + "type": "string" + }, + "labelFingerprint": { + "description": "A fingerprint for the labels being applied to this Address, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet.\n\nTo see the latest fingerprint, make a get() request to retrieve an Address.", + "format": "byte", + "type": "string" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Labels to apply to this Address resource. These can be later modified by the setLabels method. Each label key/value must comply with RFC1035. Label values may be empty.", + "type": "object" + }, + "name": { + "annotations": { + "required": [ + "compute.addresses.insert" + ] + }, + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "network": { + "description": "The URL of the network in which to reserve the address. This field can only be used with INTERNAL type with VPC_PEERING purpose.", + "type": "string" + }, + "networkTier": { + "description": "This signifies the networking tier used for configuring this Address and can only take the following values: PREMIUM, STANDARD. Global forwarding rules can only be Premium Tier. Regional forwarding rules can be either Premium or Standard Tier. Standard Tier addresses applied to regional forwarding rules can be used with any external load balancer. Regional forwarding rules in Premium Tier can only be used with a Network load balancer.\n\nIf this field is not specified, it is assumed to be PREMIUM.", + "enum": [ + "PREMIUM", + "STANDARD" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "prefixLength": { + "description": "The prefix length if the resource reprensents an IP range.", + "format": "int32", + "type": "integer" + }, + "purpose": { + "description": "The purpose of resource, only used with INTERNAL type.", + "enum": [ + "DNS_RESOLVER", + "GCE_ENDPOINT", + "NAT_AUTO", + "VPC_PEERING" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ], + "type": "string" + }, + "region": { + "description": "[Output Only] URL of the region where the regional address resides. This field is not applicable to global addresses. You must specify this field as part of the HTTP request URL. You cannot set this field in the request body.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" + }, + "status": { + "description": "[Output Only] The status of the address, which can be one of RESERVING, RESERVED, or IN_USE. An address that is RESERVING is currently in the process of being reserved. A RESERVED address is currently reserved and available to use. An IN_USE address is currently being used by another resource and is not available.", + "enum": [ + "IN_USE", + "RESERVED", + "RESERVING" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + }, + "subnetwork": { + "description": "The URL of the subnetwork in which to reserve the address. If an IP address is specified, it must be within the subnetwork's IP range. This field can only be used with INTERNAL type with GCE_ENDPOINT/DNS_RESOLVER purposes.", + "type": "string" + }, + "users": { + "description": "[Output Only] The URLs of the resources that are using this address.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "AddressAggregatedList": { + "id": "AddressAggregatedList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "additionalProperties": { + "$ref": "AddressesScopedList", + "description": "[Output Only] Name of the scope containing this set of addresses." + }, + "description": "A list of AddressesScopedList resources.", + "type": "object" + }, + "kind": { + "default": "compute#addressAggregatedList", + "description": "[Output Only] Type of resource. Always compute#addressAggregatedList for aggregated lists of addresses.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "AddressList": { + "description": "Contains a list of addresses.", + "id": "AddressList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "description": "A list of Address resources.", + "items": { + "$ref": "Address" + }, + "type": "array" + }, + "kind": { + "default": "compute#addressList", + "description": "[Output Only] Type of resource. Always compute#addressList for lists of addresses.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "AddressesScopedList": { + "id": "AddressesScopedList", + "properties": { + "addresses": { + "description": "[Output Only] A list of addresses contained in this scope.", + "items": { + "$ref": "Address" + }, + "type": "array" + }, + "warning": { + "description": "[Output Only] Informational warning which replaces the list of addresses when the list is empty.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "AliasIpRange": { + "description": "An alias IP range attached to an instance's network interface.", + "id": "AliasIpRange", + "properties": { + "ipCidrRange": { + "description": "The IP CIDR range represented by this alias IP range. This IP CIDR range must belong to the specified subnetwork and cannot contain IP addresses reserved by system or used by other network interfaces. This range may be a single IP address (e.g. 10.2.3.4), a netmask (e.g. /24) or a CIDR format string (e.g. 10.1.2.0/24).", + "type": "string" + }, + "subnetworkRangeName": { + "description": "Optional subnetwork secondary range name specifying the secondary range from which to allocate the IP CIDR range for this alias IP range. If left unspecified, the primary range of the subnetwork will be used.", + "type": "string" + } + }, + "type": "object" + }, + "AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk": { + "id": "AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk", + "properties": { + "diskSizeGb": { + "description": "Specifies the size of the disk in base-2 GB.", + "format": "int64", + "type": "string" + }, + "interface": { + "description": "Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI. For performance characteristics of SCSI over NVMe, see Local SSD performance.", + "enum": [ + "NVME", + "SCSI" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "AllocationSpecificSKUAllocationReservedInstanceProperties": { + "description": "Properties of the SKU instances being reserved.", + "id": "AllocationSpecificSKUAllocationReservedInstanceProperties", + "properties": { + "guestAccelerators": { + "description": "Specifies accelerator type and count.", + "items": { + "$ref": "AcceleratorConfig" + }, + "type": "array" + }, + "localSsds": { + "description": "Specifies amount of local ssd to reserve with each instance. The type of disk is local-ssd.", + "items": { + "$ref": "AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk" + }, + "type": "array" + }, + "machineType": { + "description": "Specifies type of machine (name only) which has fixed number of vCPUs and fixed amount of memory. This also includes specifying custom machine type following custom-NUMBER_OF_CPUS-AMOUNT_OF_MEMORY pattern.", + "type": "string" + }, + "minCpuPlatform": { + "description": "Minimum cpu platform the reservation.", + "type": "string" + } + }, + "type": "object" + }, + "AllocationSpecificSKUReservation": { + "description": "This reservation type allows to pre allocate specific instance configuration.", + "id": "AllocationSpecificSKUReservation", + "properties": { + "count": { + "description": "Specifies number of resources that are allocated.", + "format": "int64", + "type": "string" + }, + "inUseCount": { + "description": "[OutputOnly] Indicates how many resource are in use.", + "format": "int64", + "type": "string" + }, + "instanceProperties": { + "$ref": "AllocationSpecificSKUAllocationReservedInstanceProperties", + "description": "The instance properties for this specific sku reservation." + } + }, + "type": "object" + }, + "AttachedDisk": { + "description": "An instance-attached disk resource.", + "id": "AttachedDisk", + "properties": { + "autoDelete": { + "description": "Specifies whether the disk will be auto-deleted when the instance is deleted (but not when the disk is detached from the instance).", + "type": "boolean" + }, + "boot": { + "description": "Indicates that this is a boot disk. The virtual machine will use the first partition of the disk for its root filesystem.", + "type": "boolean" + }, + "deviceName": { + "description": "Specifies a unique device name of your choice that is reflected into the /dev/disk/by-id/google-* tree of a Linux operating system running within the instance. This name can be used to reference the device for mounting, resizing, and so on, from within the instance.\n\nIf not specified, the server chooses a default device name to apply to this disk, in the form persistent-disk-x, where x is a number assigned by Google Compute Engine. This field is only applicable for persistent disks.", + "type": "string" + }, + "diskEncryptionKey": { + "$ref": "CustomerEncryptionKey", + "description": "Encrypts or decrypts a disk using a customer-supplied encryption key.\n\nIf you are creating a new disk, this field encrypts the new disk using an encryption key that you provide. If you are attaching an existing disk that is already encrypted, this field decrypts the disk using the customer-supplied encryption key.\n\nIf you encrypt a disk using a customer-supplied key, you must provide the same key again when you attempt to use this resource at a later time. For example, you must provide the key when you create a snapshot or an image from the disk or when you attach the disk to a virtual machine instance.\n\nIf you do not provide an encryption key, then the disk will be encrypted using an automatically generated key and you do not need to provide a key to use the disk later.\n\nInstance templates do not store customer-supplied encryption keys, so you cannot use your own keys to encrypt disks in a managed instance group." + }, + "guestOsFeatures": { + "description": "A list of features to enable on the guest operating system. Applicable only for bootable images. Read Enabling guest operating system features to see a list of available options.", + "items": { + "$ref": "GuestOsFeature" + }, + "type": "array" + }, + "index": { + "description": "[Output Only] A zero-based index to this disk, where 0 is reserved for the boot disk. If you have many disks attached to an instance, each disk would have a unique index number.", + "format": "int32", + "type": "integer" + }, + "initializeParams": { + "$ref": "AttachedDiskInitializeParams", + "description": "[Input Only] Specifies the parameters for a new disk that will be created alongside the new instance. Use initialization parameters to create boot disks or local SSDs attached to the new instance.\n\nThis property is mutually exclusive with the source property; you can only define one or the other, but not both." + }, + "interface": { + "description": "Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI. Persistent disks must always use SCSI and the request will fail if you attempt to attach a persistent disk in any other format than SCSI. Local SSDs can use either NVME or SCSI. For performance characteristics of SCSI over NVMe, see Local SSD performance.", + "enum": [ + "NVME", + "SCSI" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "kind": { + "default": "compute#attachedDisk", + "description": "[Output Only] Type of the resource. Always compute#attachedDisk for attached disks.", + "type": "string" + }, + "licenses": { + "description": "[Output Only] Any valid publicly visible licenses.", + "items": { + "type": "string" + }, + "type": "array" + }, + "mode": { + "description": "The mode in which to attach this disk, either READ_WRITE or READ_ONLY. If not specified, the default is to attach the disk in READ_WRITE mode.", + "enum": [ + "READ_ONLY", + "READ_WRITE" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "source": { + "description": "Specifies a valid partial or full URL to an existing Persistent Disk resource. When creating a new instance, one of initializeParams.sourceImage or disks.source is required except for local SSD.\n\nIf desired, you can also attach existing non-root persistent disks using this property. This field is only applicable for persistent disks.\n\nNote that for InstanceTemplate, specify the disk name, not the URL for the disk.", + "type": "string" + }, + "type": { + "description": "Specifies the type of the disk, either SCRATCH or PERSISTENT. If not specified, the default is PERSISTENT.", + "enum": [ + "PERSISTENT", + "SCRATCH" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "AttachedDiskInitializeParams": { + "description": "[Input Only] Specifies the parameters for a new disk that will be created alongside the new instance. Use initialization parameters to create boot disks or local SSDs attached to the new instance.\n\nThis property is mutually exclusive with the source property; you can only define one or the other, but not both.", + "id": "AttachedDiskInitializeParams", + "properties": { + "description": { + "description": "An optional description. Provide this property when creating the disk.", + "type": "string" + }, + "diskName": { + "description": "Specifies the disk name. If not specified, the default is to use the name of the instance. If the disk with the instance name exists already in the given zone/region, a new name will be automatically generated.", + "type": "string" + }, + "diskSizeGb": { + "description": "Specifies the size of the disk in base-2 GB.", + "format": "int64", + "type": "string" + }, + "diskType": { + "description": "Specifies the disk type to use to create the instance. If not specified, the default is pd-standard, specified using the full URL. For example:\nhttps://www.googleapis.com/compute/v1/projects/project/zones/zone/diskTypes/pd-standard\n\n\nOther values include pd-ssd and local-ssd. If you define this field, you can provide either the full or partial URL. For example, the following are valid values: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone/diskTypes/diskType \n- projects/project/zones/zone/diskTypes/diskType \n- zones/zone/diskTypes/diskType Note that for InstanceTemplate, this is the name of the disk type, not URL.", + "type": "string" + }, + "guestOsFeatures": { + "description": "A list of features to enable on the guest operating system. Applicable only for bootable images. Read Enabling guest operating system features to see a list of available options.\n\nGuest OS features are applied by merging initializeParams.guestOsFeatures and disks.guestOsFeatures", + "items": { + "$ref": "GuestOsFeature" + }, + "type": "array" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Labels to apply to this disk. These can be later modified by the disks.setLabels method. This field is only applicable for persistent disks.", + "type": "object" + }, + "resourcePolicies": { + "description": "Resource policies applied to this disk for automatic snapshot creations. Specified using the full or partial URL. For instance template, specify only the resource policy name.", + "items": { + "type": "string" + }, + "type": "array" + }, + "sourceImage": { + "description": "The source image to create this disk. When creating a new instance, one of initializeParams.sourceImage or disks.source is required except for local SSD.\n\nTo create a disk with one of the public operating system images, specify the image by its family name. For example, specify family/debian-9 to use the latest Debian 9 image:\nprojects/debian-cloud/global/images/family/debian-9\n\n\nAlternatively, use a specific version of a public operating system image:\nprojects/debian-cloud/global/images/debian-9-stretch-vYYYYMMDD\n\n\nTo create a disk with a custom image that you created, specify the image name in the following format:\nglobal/images/my-custom-image\n\n\nYou can also specify a custom image by its image family, which returns the latest version of the image in that family. Replace the image name with family/family-name:\nglobal/images/family/my-image-family\n\n\nIf the source image is deleted later, this field will not be set.", + "type": "string" + }, + "sourceImageEncryptionKey": { + "$ref": "CustomerEncryptionKey", + "description": "The customer-supplied encryption key of the source image. Required if the source image is protected by a customer-supplied encryption key.\n\nInstance templates do not store customer-supplied encryption keys, so you cannot create disks for instances in a managed instance group if the source images are encrypted with your own keys." + }, + "sourceSnapshot": { + "description": "The source snapshot to create this disk. When creating a new instance, one of initializeParams.sourceSnapshot or disks.source is required except for local SSD.\n\nTo create a disk with a snapshot that you created, specify the snapshot name in the following format:\nglobal/snapshots/my-backup\n\n\nIf the source snapshot is deleted later, this field will not be set.", + "type": "string" + }, + "sourceSnapshotEncryptionKey": { + "$ref": "CustomerEncryptionKey", + "description": "The customer-supplied encryption key of the source snapshot." + } + }, + "type": "object" + }, + "AuditConfig": { + "description": "Specifies the audit configuration for a service. The configuration determines which permission types are logged, and what identities, if any, are exempted from logging. An AuditConfig must have one or more AuditLogConfigs.\n\nIf there are AuditConfigs for both `allServices` and a specific service, the union of the two AuditConfigs is used for that service: the log_types specified in each AuditConfig are enabled, and the exempted_members in each AuditLogConfig are exempted.\n\nExample Policy with multiple AuditConfigs:\n\n{ \"audit_configs\": [ { \"service\": \"allServices\" \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:foo@gmail.com\" ] }, { \"log_type\": \"DATA_WRITE\", }, { \"log_type\": \"ADMIN_READ\", } ] }, { \"service\": \"fooservice.googleapis.com\" \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", }, { \"log_type\": \"DATA_WRITE\", \"exempted_members\": [ \"user:bar@gmail.com\" ] } ] } ] }\n\nFor fooservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts foo@gmail.com from DATA_READ logging, and bar@gmail.com from DATA_WRITE logging.", + "id": "AuditConfig", + "properties": { + "auditLogConfigs": { + "description": "The configuration for logging of each type of permission.", + "items": { + "$ref": "AuditLogConfig" + }, + "type": "array" + }, + "exemptedMembers": { + "description": "", + "items": { + "type": "string" + }, + "type": "array" + }, + "service": { + "description": "Specifies a service that will be enabled for audit logging. For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. `allServices` is a special value that covers all services.", + "type": "string" + } + }, + "type": "object" + }, + "AuditLogConfig": { + "description": "Provides the configuration for logging a type of permissions. Example:\n\n{ \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:foo@gmail.com\" ] }, { \"log_type\": \"DATA_WRITE\", } ] }\n\nThis enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting foo@gmail.com from DATA_READ logging.", + "id": "AuditLogConfig", + "properties": { + "exemptedMembers": { + "description": "Specifies the identities that do not cause logging for this type of permission. Follows the same format of [Binding.members][].", + "items": { + "type": "string" + }, + "type": "array" + }, + "logType": { + "description": "The log type that this config enables.", + "enum": [ + "ADMIN_READ", + "DATA_READ", + "DATA_WRITE", + "LOG_TYPE_UNSPECIFIED" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "AuthorizationLoggingOptions": { + "description": "Authorization-related information used by Cloud Audit Logging.", + "id": "AuthorizationLoggingOptions", + "properties": { + "permissionType": { + "description": "The type of the permission that was checked.", + "enum": [ + "ADMIN_READ", + "ADMIN_WRITE", + "DATA_READ", + "DATA_WRITE", + "PERMISSION_TYPE_UNSPECIFIED" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "Autoscaler": { + "description": "Represents an Autoscaler resource. Autoscalers allow you to automatically scale virtual machine instances in managed instance groups according to an autoscaling policy that you define. For more information, read Autoscaling Groups of Instances. (== resource_for beta.autoscalers ==) (== resource_for v1.autoscalers ==) (== resource_for beta.regionAutoscalers ==) (== resource_for v1.regionAutoscalers ==)", + "id": "Autoscaler", + "properties": { + "autoscalingPolicy": { + "$ref": "AutoscalingPolicy", + "description": "The configuration parameters for the autoscaling algorithm. You can define one or more of the policies for an autoscaler: cpuUtilization, customMetricUtilizations, and loadBalancingUtilization.\n\nIf none of these are specified, the default will be to autoscale based on cpuUtilization to 0.6 or 60%." + }, + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", + "type": "string" + }, + "kind": { + "default": "compute#autoscaler", + "description": "[Output Only] Type of the resource. Always compute#autoscaler for autoscalers.", + "type": "string" + }, + "name": { + "annotations": { + "required": [ + "compute.instanceGroups.insert" + ] + }, + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "region": { + "description": "[Output Only] URL of the region where the instance group resides (for autoscalers living in regional scope).", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" + }, + "status": { + "description": "[Output Only] The status of the autoscaler configuration.", + "enum": [ + "ACTIVE", + "DELETING", + "ERROR", + "PENDING" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ], + "type": "string" + }, + "statusDetails": { + "description": "[Output Only] Human-readable details about the current state of the autoscaler. Read the documentation for Commonly returned status messages for examples of status messages you might encounter.", + "items": { + "$ref": "AutoscalerStatusDetails" + }, + "type": "array" + }, + "target": { + "description": "URL of the managed instance group that this autoscaler will scale.", + "type": "string" + }, + "zone": { + "description": "[Output Only] URL of the zone where the instance group resides (for autoscalers living in zonal scope).", + "type": "string" + } + }, + "type": "object" + }, + "AutoscalerAggregatedList": { + "id": "AutoscalerAggregatedList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "additionalProperties": { + "$ref": "AutoscalersScopedList", + "description": "[Output Only] Name of the scope containing this set of autoscalers." + }, + "description": "A list of AutoscalersScopedList resources.", + "type": "object" + }, + "kind": { + "default": "compute#autoscalerAggregatedList", + "description": "[Output Only] Type of resource. Always compute#autoscalerAggregatedList for aggregated lists of autoscalers.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "AutoscalerList": { + "description": "Contains a list of Autoscaler resources.", + "id": "AutoscalerList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "description": "A list of Autoscaler resources.", + "items": { + "$ref": "Autoscaler" + }, + "type": "array" + }, + "kind": { + "default": "compute#autoscalerList", + "description": "[Output Only] Type of resource. Always compute#autoscalerList for lists of autoscalers.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "AutoscalerStatusDetails": { + "id": "AutoscalerStatusDetails", + "properties": { + "message": { + "description": "The status message.", + "type": "string" + }, + "type": { + "description": "The type of error returned.", + "enum": [ + "ALL_INSTANCES_UNHEALTHY", + "BACKEND_SERVICE_DOES_NOT_EXIST", + "CAPPED_AT_MAX_NUM_REPLICAS", + "CUSTOM_METRIC_DATA_POINTS_TOO_SPARSE", + "CUSTOM_METRIC_INVALID", + "MIN_EQUALS_MAX", + "MISSING_CUSTOM_METRIC_DATA_POINTS", + "MISSING_LOAD_BALANCING_DATA_POINTS", + "MORE_THAN_ONE_BACKEND_SERVICE", + "NOT_ENOUGH_QUOTA_AVAILABLE", + "REGION_RESOURCE_STOCKOUT", + "SCALING_TARGET_DOES_NOT_EXIST", + "UNKNOWN", + "UNSUPPORTED_MAX_RATE_LOAD_BALANCING_CONFIGURATION", + "ZONE_RESOURCE_STOCKOUT" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "AutoscalersScopedList": { + "id": "AutoscalersScopedList", + "properties": { + "autoscalers": { + "description": "[Output Only] A list of autoscalers contained in this scope.", + "items": { + "$ref": "Autoscaler" + }, + "type": "array" + }, + "warning": { + "description": "[Output Only] Informational warning which replaces the list of autoscalers when the list is empty.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "AutoscalingPolicy": { + "description": "Cloud Autoscaler policy.", + "id": "AutoscalingPolicy", + "properties": { + "coolDownPeriodSec": { + "description": "The number of seconds that the autoscaler should wait before it starts collecting information from a new instance. This prevents the autoscaler from collecting information when the instance is initializing, during which the collected usage would not be reliable. The default time autoscaler waits is 60 seconds.\n\nVirtual machine initialization times might vary because of numerous factors. We recommend that you test how long an instance may take to initialize. To do this, create an instance and time the startup process.", + "format": "int32", + "type": "integer" + }, + "cpuUtilization": { + "$ref": "AutoscalingPolicyCpuUtilization", + "description": "Defines the CPU utilization policy that allows the autoscaler to scale based on the average CPU utilization of a managed instance group." + }, + "customMetricUtilizations": { + "description": "Configuration parameters of autoscaling based on a custom metric.", + "items": { + "$ref": "AutoscalingPolicyCustomMetricUtilization" + }, + "type": "array" + }, + "loadBalancingUtilization": { + "$ref": "AutoscalingPolicyLoadBalancingUtilization", + "description": "Configuration parameters of autoscaling based on load balancer." + }, + "maxNumReplicas": { + "description": "The maximum number of instances that the autoscaler can scale up to. This is required when creating or updating an autoscaler. The maximum number of replicas should not be lower than minimal number of replicas.", + "format": "int32", + "type": "integer" + }, + "minNumReplicas": { + "description": "The minimum number of replicas that the autoscaler can scale down to. This cannot be less than 0. If not provided, autoscaler will choose a default value depending on maximum number of instances allowed.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "AutoscalingPolicyCpuUtilization": { + "description": "CPU utilization policy.", + "id": "AutoscalingPolicyCpuUtilization", + "properties": { + "utilizationTarget": { + "description": "The target CPU utilization that the autoscaler should maintain. Must be a float value in the range (0, 1]. If not specified, the default is 0.6.\n\nIf the CPU level is below the target utilization, the autoscaler scales down the number of instances until it reaches the minimum number of instances you specified or until the average CPU of your instances reaches the target utilization.\n\nIf the average CPU is above the target utilization, the autoscaler scales up until it reaches the maximum number of instances you specified or until the average utilization reaches the target utilization.", + "format": "double", + "type": "number" + } + }, + "type": "object" + }, + "AutoscalingPolicyCustomMetricUtilization": { + "description": "Custom utilization metric policy.", + "id": "AutoscalingPolicyCustomMetricUtilization", + "properties": { + "filter": { + "description": "A filter string, compatible with a Stackdriver Monitoring filter string for TimeSeries.list API call. This filter is used to select a specific TimeSeries for the purpose of autoscaling and to determine whether the metric is exporting per-instance or per-group data.\n\nFor the filter to be valid for autoscaling purposes, the following rules apply: \n- You can only use the AND operator for joining selectors. \n- You can only use direct equality comparison operator (=) without any functions for each selector. \n- You can specify the metric in both the filter string and in the metric field. However, if specified in both places, the metric must be identical. \n- The monitored resource type determines what kind of values are expected for the metric. If it is a gce_instance, the autoscaler expects the metric to include a separate TimeSeries for each instance in a group. In such a case, you cannot filter on resource labels.\nIf the resource type is any other value, the autoscaler expects this metric to contain values that apply to the entire autoscaled instance group and resource label filtering can be performed to point autoscaler at the correct TimeSeries to scale upon. This is called a per-group metric for the purpose of autoscaling.\n\nIf not specified, the type defaults to gce_instance. \n\nYou should provide a filter that is selective enough to pick just one TimeSeries for the autoscaled group or for each of the instances (if you are using gce_instance resource type). If multiple TimeSeries are returned upon the query execution, the autoscaler will sum their respective values to obtain its scaling value.", + "type": "string" + }, + "metric": { + "description": "The identifier (type) of the Stackdriver Monitoring metric. The metric cannot have negative values.\n\nThe metric must have a value type of INT64 or DOUBLE.", + "type": "string" + }, + "singleInstanceAssignment": { + "description": "If scaling is based on a per-group metric value that represents the total amount of work to be done or resource usage, set this value to an amount assigned for a single instance of the scaled group. Autoscaler will keep the number of instances proportional to the value of this metric, the metric itself should not change value due to group resizing.\n\nA good metric to use with the target is for example pubsub.googleapis.com/subscription/num_undelivered_messages or a custom metric exporting the total number of requests coming to your instances.\n\nA bad example would be a metric exporting an average or median latency, since this value can't include a chunk assignable to a single instance, it could be better used with utilization_target instead.", + "format": "double", + "type": "number" + }, + "utilizationTarget": { + "description": "The target value of the metric that autoscaler should maintain. This must be a positive value. A utilization metric scales number of virtual machines handling requests to increase or decrease proportionally to the metric.\n\nFor example, a good metric to use as a utilization_target is compute.googleapis.com/instance/network/received_bytes_count. The autoscaler will work to keep this value constant for each of the instances.", + "format": "double", + "type": "number" + }, + "utilizationTargetType": { + "description": "Defines how target utilization value is expressed for a Stackdriver Monitoring metric. Either GAUGE, DELTA_PER_SECOND, or DELTA_PER_MINUTE.", + "enum": [ + "DELTA_PER_MINUTE", + "DELTA_PER_SECOND", + "GAUGE" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "AutoscalingPolicyLoadBalancingUtilization": { + "description": "Configuration parameters of autoscaling based on load balancing.", + "id": "AutoscalingPolicyLoadBalancingUtilization", + "properties": { + "utilizationTarget": { + "description": "Fraction of backend capacity utilization (set in HTTP(S) load balancing configuration) that autoscaler should maintain. Must be a positive float value. If not defined, the default is 0.8.", + "format": "double", + "type": "number" + } + }, + "type": "object" + }, + "Backend": { + "description": "Message containing information of one individual backend.", + "id": "Backend", + "properties": { + "balancingMode": { + "description": "Specifies the balancing mode for this backend. For global HTTP(S) or TCP/SSL load balancing, the default is UTILIZATION. Valid values are UTILIZATION, RATE (for HTTP(S)) and CONNECTION (for TCP/SSL).\n\nFor Internal Load Balancing, the default and only supported mode is CONNECTION.", + "enum": [ + "CONNECTION", + "RATE", + "UTILIZATION" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + }, + "capacityScaler": { + "description": "A multiplier applied to the group's maximum servicing capacity (based on UTILIZATION, RATE or CONNECTION). Default value is 1, which means the group will serve up to 100% of its configured capacity (depending on balancingMode). A setting of 0 means the group is completely drained, offering 0% of its available Capacity. Valid range is [0.0,1.0].\n\nThis cannot be used for internal load balancing.", + "format": "float", + "type": "number" + }, + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, + "failover": { + "description": "This field designates whether this is a failover backend. More than one failover backend can be configured for a given BackendService.", + "type": "boolean" + }, + "group": { + "description": "The fully-qualified URL of an Instance Group or Network Endpoint Group resource. In case of instance group this defines the list of instances that serve traffic. Member virtual machine instances from each instance group must live in the same zone as the instance group itself. No two backends in a backend service are allowed to use same Instance Group resource.\n\nFor Network Endpoint Groups this defines list of endpoints. All endpoints of Network Endpoint Group must be hosted on instances located in the same zone as the Network Endpoint Group.\n\nBackend service can not contain mix of Instance Group and Network Endpoint Group backends.\n\nNote that you must specify an Instance Group or Network Endpoint Group resource using the fully-qualified URL, rather than a partial URL.\n\nWhen the BackendService has load balancing scheme INTERNAL, the instance group must be within the same region as the BackendService. Network Endpoint Groups are not supported for INTERNAL load balancing scheme.", + "type": "string" + }, + "maxConnections": { + "description": "The max number of simultaneous connections for the group. Can be used with either CONNECTION or UTILIZATION balancing modes. For CONNECTION mode, either maxConnections or maxConnectionsPerInstance must be set.\n\nThis cannot be used for internal load balancing.", + "format": "int32", + "type": "integer" + }, + "maxConnectionsPerEndpoint": { + "description": "The max number of simultaneous connections that a single backend network endpoint can handle. This is used to calculate the capacity of the group. Can be used in either CONNECTION or UTILIZATION balancing modes. For CONNECTION mode, either maxConnections or maxConnectionsPerEndpoint must be set.\n\nThis cannot be used for internal load balancing.", + "format": "int32", + "type": "integer" + }, + "maxConnectionsPerInstance": { + "description": "The max number of simultaneous connections that a single backend instance can handle. This is used to calculate the capacity of the group. Can be used in either CONNECTION or UTILIZATION balancing modes. For CONNECTION mode, either maxConnections or maxConnectionsPerInstance must be set.\n\nThis cannot be used for internal load balancing.", + "format": "int32", + "type": "integer" + }, + "maxRate": { + "description": "The max requests per second (RPS) of the group. Can be used with either RATE or UTILIZATION balancing modes, but required if RATE mode. For RATE mode, either maxRate or maxRatePerInstance must be set.\n\nThis cannot be used for internal load balancing.", + "format": "int32", + "type": "integer" + }, + "maxRatePerEndpoint": { + "description": "The max requests per second (RPS) that a single backend network endpoint can handle. This is used to calculate the capacity of the group. Can be used in either balancing mode. For RATE mode, either maxRate or maxRatePerEndpoint must be set.\n\nThis cannot be used for internal load balancing.", + "format": "float", + "type": "number" + }, + "maxRatePerInstance": { + "description": "The max requests per second (RPS) that a single backend instance can handle. This is used to calculate the capacity of the group. Can be used in either balancing mode. For RATE mode, either maxRate or maxRatePerInstance must be set.\n\nThis cannot be used for internal load balancing.", + "format": "float", + "type": "number" + }, + "maxUtilization": { + "description": "Used when balancingMode is UTILIZATION. This ratio defines the CPU utilization target for the group. The default is 0.8. Valid range is [0.0, 1.0].\n\nThis cannot be used for internal load balancing.", + "format": "float", + "type": "number" + } + }, + "type": "object" + }, + "BackendBucket": { + "description": "A BackendBucket resource. This resource defines a Cloud Storage bucket.", + "id": "BackendBucket", + "properties": { + "bucketName": { + "description": "Cloud Storage bucket name.", + "type": "string" + }, + "cdnPolicy": { + "$ref": "BackendBucketCdnPolicy", + "description": "Cloud CDN configuration for this BackendBucket." + }, + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "description": { + "description": "An optional textual description of the resource; provided by the client when the resource is created.", + "type": "string" + }, + "enableCdn": { + "description": "If true, enable Cloud CDN for this BackendBucket.", + "type": "boolean" + }, + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "format": "uint64", + "type": "string" + }, + "kind": { + "default": "compute#backendBucket", + "description": "Type of the resource.", + "type": "string" + }, + "name": { + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" + } + }, + "type": "object" + }, + "BackendBucketCdnPolicy": { + "description": "Message containing Cloud CDN configuration for a backend bucket.", + "id": "BackendBucketCdnPolicy", + "properties": { + "signedUrlCacheMaxAgeSec": { + "description": "Maximum number of seconds the response to a signed URL request will be considered fresh. After this time period, the response will be revalidated before being served. Defaults to 1hr (3600s). When serving responses to signed URL requests, Cloud CDN will internally behave as though all responses from this backend had a \"Cache-Control: public, max-age=[TTL]\" header, regardless of any existing Cache-Control header. The actual headers served in responses will not be altered.", + "format": "int64", + "type": "string" + }, + "signedUrlKeyNames": { + "description": "[Output Only] Names of the keys for signing request URLs.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "BackendBucketList": { + "description": "Contains a list of BackendBucket resources.", + "id": "BackendBucketList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "description": "A list of BackendBucket resources.", + "items": { + "$ref": "BackendBucket" + }, + "type": "array" + }, + "kind": { + "default": "compute#backendBucketList", + "description": "Type of resource.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "BackendService": { + "description": "A BackendService resource. This resource defines a group of backend virtual machines and their serving capacity. (== resource_for v1.backendService ==) (== resource_for beta.backendService ==)", + "id": "BackendService", + "properties": { + "affinityCookieTtlSec": { + "description": "Lifetime of cookies in seconds if session_affinity is GENERATED_COOKIE. If set to 0, the cookie is non-persistent and lasts only until the end of the browser session (or equivalent). The maximum allowed value for TTL is one day.\n\nWhen the load balancing scheme is INTERNAL, this field is not used.", + "format": "int32", + "type": "integer" + }, + "backends": { + "description": "The list of backends that serve this BackendService.", + "items": { + "$ref": "Backend" + }, + "type": "array" + }, + "cdnPolicy": { + "$ref": "BackendServiceCdnPolicy", + "description": "Cloud CDN configuration for this BackendService." + }, + "circuitBreakers": { + "$ref": "CircuitBreakers", + "description": "Settings controlling the volume of connections to a backend service.\n\nThis field is applicable to either: \n- A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. \n- A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED." + }, + "connectionDraining": { + "$ref": "ConnectionDraining" + }, + "consistentHash": { + "$ref": "ConsistentHashLoadBalancerSettings", + "description": "Consistent Hash-based load balancing can be used to provide soft session affinity based on HTTP headers, cookies or other properties. This load balancing policy is applicable only for HTTP connections. The affinity to a particular destination host will be lost when one or more hosts are added/removed from the destination service. This field specifies parameters that control consistent hashing. This field is only applicable when localityLbPolicy is set to MAGLEV or RING_HASH.\n\nThis field is applicable to either: \n- A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. \n- A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED." + }, + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "customRequestHeaders": { + "description": "Headers that the HTTP/S load balancer should add to proxied requests.", + "items": { + "type": "string" + }, + "type": "array" + }, + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, + "enableCDN": { + "description": "If true, enable Cloud CDN for this BackendService.\n\nWhen the load balancing scheme is INTERNAL, this field is not used.", + "type": "boolean" + }, + "failoverPolicy": { + "$ref": "BackendServiceFailoverPolicy" + }, + "fingerprint": { + "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a BackendService. An up-to-date fingerprint must be provided in order to update the BackendService, otherwise the request will fail with error 412 conditionNotMet.\n\nTo see the latest fingerprint, make a get() request to retrieve a BackendService.", + "format": "byte", + "type": "string" + }, + "healthChecks": { + "description": "The list of URLs to the HttpHealthCheck or HttpsHealthCheck resource for health checking this BackendService. Currently at most one health check can be specified, and a health check is required for Compute Engine backend services. A health check must not be specified for App Engine backend and Cloud Function backend.\n\nFor internal load balancing, a URL to a HealthCheck resource must be specified instead.", + "items": { + "type": "string" + }, + "type": "array" + }, + "iap": { + "$ref": "BackendServiceIAP" + }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", + "type": "string" + }, + "kind": { + "default": "compute#backendService", + "description": "[Output Only] Type of resource. Always compute#backendService for backend services.", + "type": "string" + }, + "loadBalancingScheme": { + "description": "Indicates whether the backend service will be used with internal or external load balancing. A backend service created for one type of load balancing cannot be used with the other. Possible values are INTERNAL and EXTERNAL.", + "enum": [ + "EXTERNAL", + "INTERNAL", + "INTERNAL_MANAGED", + "INTERNAL_SELF_MANAGED", + "INVALID_LOAD_BALANCING_SCHEME" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "localityLbPolicy": { + "description": "The load balancing algorithm used within the scope of the locality. The possible values are: \n- ROUND_ROBIN: This is a simple policy in which each healthy backend is selected in round robin order. This is the default. \n- LEAST_REQUEST: An O(1) algorithm which selects two random healthy hosts and picks the host which has fewer active requests. \n- RING_HASH: The ring/modulo hash load balancer implements consistent hashing to backends. The algorithm has the property that the addition/removal of a host from a set of N hosts only affects 1/N of the requests. \n- RANDOM: The load balancer selects a random healthy host. \n- ORIGINAL_DESTINATION: Backend host is selected based on the client connection metadata, i.e., connections are opened to the same address as the destination address of the incoming connection before the connection was redirected to the load balancer. \n- MAGLEV: used as a drop in replacement for the ring hash load balancer. Maglev is not as stable as ring hash but has faster table lookup build times and host selection times. For more information about Maglev, refer to https://ai.google/research/pubs/pub44824 \n\nThis field is applicable to either: \n- A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. \n- A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED.", + "enum": [ + "INVALID_LB_POLICY", + "LEAST_REQUEST", + "MAGLEV", + "ORIGINAL_DESTINATION", + "RANDOM", + "RING_HASH", + "ROUND_ROBIN" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "logConfig": { + "$ref": "BackendServiceLogConfig", + "description": "This field denotes the logging options for the load balancer traffic served by this backend service. If logging is enabled, logs will be exported to Stackdriver." + }, + "name": { + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "outlierDetection": { + "$ref": "OutlierDetection", + "description": "Settings controlling eviction of unhealthy hosts from the load balancing pool. This field is applicable to either: \n- A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. \n- A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED." + }, + "port": { + "description": "Deprecated in favor of portName. The TCP port to connect on the backend. The default value is 80.\n\nThis cannot be used for internal load balancing.", + "format": "int32", + "type": "integer" + }, + "portName": { + "description": "Name of backend port. The same name should appear in the instance groups referenced by this service. Required when the load balancing scheme is EXTERNAL.\n\nWhen the load balancing scheme is INTERNAL, this field is not used.", + "type": "string" + }, + "protocol": { + "description": "The protocol this BackendService uses to communicate with backends.\n\nPossible values are HTTP, HTTPS, TCP, and SSL. The default is HTTP.\n\nFor internal load balancing, the possible values are TCP and UDP, and the default is TCP.", + "enum": [ + "HTTP", + "HTTP2", + "HTTPS", + "SSL", + "TCP", + "UDP" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "region": { + "description": "[Output Only] URL of the region where the regional backend service resides. This field is not applicable to global backend services. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", + "type": "string" + }, + "securityPolicy": { + "description": "[Output Only] The resource URL for the security policy associated with this backend service.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" + }, + "sessionAffinity": { + "description": "Type of session affinity to use. The default is NONE.\n\nWhen the load balancing scheme is EXTERNAL, can be NONE, CLIENT_IP, or GENERATED_COOKIE.\n\nWhen the load balancing scheme is INTERNAL, can be NONE, CLIENT_IP, CLIENT_IP_PROTO, or CLIENT_IP_PORT_PROTO.\n\nWhen the protocol is UDP, this field is not used.", + "enum": [ + "CLIENT_IP", + "CLIENT_IP_PORT_PROTO", + "CLIENT_IP_PROTO", + "GENERATED_COOKIE", + "HEADER_FIELD", + "HTTP_COOKIE", + "NONE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "timeoutSec": { + "description": "How many seconds to wait for the backend before considering it a failed request. Default is 30 seconds.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "BackendServiceAggregatedList": { + "description": "Contains a list of BackendServicesScopedList.", + "id": "BackendServiceAggregatedList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "additionalProperties": { + "$ref": "BackendServicesScopedList", + "description": "Name of the scope containing this set of BackendServices." + }, + "description": "A list of BackendServicesScopedList resources.", + "type": "object" + }, + "kind": { + "default": "compute#backendServiceAggregatedList", + "description": "Type of resource.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "BackendServiceCdnPolicy": { + "description": "Message containing Cloud CDN configuration for a backend service.", + "id": "BackendServiceCdnPolicy", + "properties": { + "cacheKeyPolicy": { + "$ref": "CacheKeyPolicy", + "description": "The CacheKeyPolicy for this CdnPolicy." + }, + "signedUrlCacheMaxAgeSec": { + "description": "Maximum number of seconds the response to a signed URL request will be considered fresh. After this time period, the response will be revalidated before being served. Defaults to 1hr (3600s). When serving responses to signed URL requests, Cloud CDN will internally behave as though all responses from this backend had a \"Cache-Control: public, max-age=[TTL]\" header, regardless of any existing Cache-Control header. The actual headers served in responses will not be altered.", + "format": "int64", + "type": "string" + }, + "signedUrlKeyNames": { + "description": "[Output Only] Names of the keys for signing request URLs.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "BackendServiceFailoverPolicy": { + "id": "BackendServiceFailoverPolicy", + "properties": { + "disableConnectionDrainOnFailover": { + "description": "On failover or failback, this field indicates whether connection drain will be honored. Setting this to true has the following effect: connections to the old active pool are not drained. Connections to the new active pool use the timeout of 10 min (currently fixed). Setting to false has the following effect: both old and new connections will have a drain timeout of 10 min.\n\nThis can be set to true only if the protocol is TCP.\n\nThe default is false.", + "type": "boolean" + }, + "dropTrafficIfUnhealthy": { + "description": "This option is used only when no healthy VMs are detected in the primary and backup instance groups. When set to true, traffic is dropped. When set to false, new connections are sent across all VMs in the primary group.\n\nThe default is false.", + "type": "boolean" + }, + "failoverRatio": { + "description": "The value of the field must be in [0, 1]. If the ratio of the healthy VMs in the primary backend is at or below this number, traffic arriving at the load-balanced IP will be directed to the failover backend.\n\nIn case where 'failoverRatio' is not set or all the VMs in the backup backend are unhealthy, the traffic will be directed back to the primary backend in the \"force\" mode, where traffic will be spread to the healthy VMs with the best effort, or to all VMs when no VM is healthy.\n\nThis field is only used with l4 load balancing.", + "format": "float", + "type": "number" + } + }, + "type": "object" + }, + "BackendServiceGroupHealth": { + "id": "BackendServiceGroupHealth", + "properties": { + "healthStatus": { + "description": "Health state of the backend instances or endpoints in requested instance or network endpoint group, determined based on configured health checks.", + "items": { + "$ref": "HealthStatus" + }, + "type": "array" + }, + "kind": { + "default": "compute#backendServiceGroupHealth", + "description": "[Output Only] Type of resource. Always compute#backendServiceGroupHealth for the health of backend services.", + "type": "string" + } + }, + "type": "object" + }, + "BackendServiceIAP": { + "description": "Identity-Aware Proxy", + "id": "BackendServiceIAP", + "properties": { + "enabled": { + "type": "boolean" + }, + "oauth2ClientId": { + "type": "string" + }, + "oauth2ClientSecret": { + "type": "string" + }, + "oauth2ClientSecretSha256": { + "description": "[Output Only] SHA256 hash value for the field oauth2_client_secret above.", + "type": "string" + } + }, + "type": "object" + }, + "BackendServiceList": { + "description": "Contains a list of BackendService resources.", + "id": "BackendServiceList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "description": "A list of BackendService resources.", + "items": { + "$ref": "BackendService" + }, + "type": "array" + }, + "kind": { + "default": "compute#backendServiceList", + "description": "[Output Only] Type of resource. Always compute#backendServiceList for lists of backend services.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "BackendServiceLogConfig": { + "description": "The available logging options for the load balancer traffic served by this backend service.", + "id": "BackendServiceLogConfig", + "properties": { + "enable": { + "description": "This field denotes whether to enable logging for the load balancer traffic served by this backend service.", + "type": "boolean" + }, + "sampleRate": { + "description": "This field can only be specified if logging is enabled for this backend service. The value of the field must be in [0, 1]. This configures the sampling rate of requests to the load balancer where 1.0 means all logged requests are reported and 0.0 means no logged requests are reported. The default value is 1.0.", + "format": "float", + "type": "number" + } + }, + "type": "object" + }, + "BackendServiceReference": { + "id": "BackendServiceReference", + "properties": { + "backendService": { + "type": "string" + } + }, + "type": "object" + }, + "BackendServicesScopedList": { + "id": "BackendServicesScopedList", + "properties": { + "backendServices": { + "description": "A list of BackendServices contained in this scope.", + "items": { + "$ref": "BackendService" + }, + "type": "array" + }, + "warning": { + "description": "Informational warning which replaces the list of backend services when the list is empty.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "Binding": { + "description": "Associates `members` with a `role`.", + "id": "Binding", + "properties": { + "condition": { + "$ref": "Expr", + "description": "The condition that is associated with this binding. NOTE: An unsatisfied condition will not allow user access via current binding. Different bindings, including their conditions, are examined independently." + }, + "members": { + "description": "Specifies the identities requesting access for a Cloud Platform resource. `members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@gmail.com` .\n\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`.\n\n\n\n* `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`.", + "items": { + "type": "string" + }, + "type": "array" + }, + "role": { + "description": "Role that is assigned to `members`. For example, `roles/viewer`, `roles/editor`, or `roles/owner`.", + "type": "string" + } + }, + "type": "object" + }, + "CacheInvalidationRule": { + "id": "CacheInvalidationRule", + "properties": { + "host": { + "description": "If set, this invalidation rule will only apply to requests with a Host header matching host.", + "type": "string" + }, + "path": { + "type": "string" + } + }, + "type": "object" + }, + "CacheKeyPolicy": { + "description": "Message containing what to include in the cache key for a request for Cloud CDN.", + "id": "CacheKeyPolicy", + "properties": { + "includeHost": { + "description": "If true, requests to different hosts will be cached separately.", + "type": "boolean" + }, + "includeProtocol": { + "description": "If true, http and https requests will be cached separately.", + "type": "boolean" + }, + "includeQueryString": { + "description": "If true, include query string parameters in the cache key according to query_string_whitelist and query_string_blacklist. If neither is set, the entire query string will be included. If false, the query string will be excluded from the cache key entirely.", + "type": "boolean" + }, + "queryStringBlacklist": { + "description": "Names of query string parameters to exclude in cache keys. All other parameters will be included. Either specify query_string_whitelist or query_string_blacklist, not both. '\u0026' and '=' will be percent encoded and not treated as delimiters.", + "items": { + "type": "string" + }, + "type": "array" + }, + "queryStringWhitelist": { + "description": "Names of query string parameters to include in cache keys. All other parameters will be excluded. Either specify query_string_whitelist or query_string_blacklist, not both. '\u0026' and '=' will be percent encoded and not treated as delimiters.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "CircuitBreakers": { + "description": "Settings controlling the volume of connections to a backend service.", + "id": "CircuitBreakers", + "properties": { + "connectTimeout": { + "$ref": "Duration", + "description": "The timeout for new network connections to hosts." + }, + "maxConnections": { + "description": "The maximum number of connections to the backend cluster. If not specified, the default is 1024.", + "format": "int32", + "type": "integer" + }, + "maxPendingRequests": { + "description": "The maximum number of pending requests allowed to the backend cluster. If not specified, the default is 1024.", + "format": "int32", + "type": "integer" + }, + "maxRequests": { + "description": "The maximum number of parallel requests that allowed to the backend cluster. If not specified, the default is 1024.", + "format": "int32", + "type": "integer" + }, + "maxRequestsPerConnection": { + "description": "Maximum requests for a single backend connection. This parameter is respected by both the HTTP/1.1 and HTTP/2 implementations. If not specified, there is no limit. Setting this parameter to 1 will effectively disable keep alive.", + "format": "int32", + "type": "integer" + }, + "maxRetries": { + "description": "The maximum number of parallel retries allowed to the backend cluster. If not specified, the default is 3.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "Commitment": { + "description": "Represents a Commitment resource. Creating a Commitment resource means that you are purchasing a committed use contract with an explicit start and end time. You can create commitments based on vCPUs and memory usage and receive discounted rates. For full details, read Signing Up for Committed Use Discounts.\n\nCommitted use discounts are subject to Google Cloud Platform's Service Specific Terms. By purchasing a committed use discount, you agree to these terms. Committed use discounts will not renew, so you must purchase a new commitment to continue receiving discounts. (== resource_for beta.commitments ==) (== resource_for v1.commitments ==)", + "id": "Commitment", + "properties": { + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, + "endTimestamp": { + "description": "[Output Only] Commitment end time in RFC3339 text format.", + "type": "string" + }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", + "type": "string" + }, + "kind": { + "default": "compute#commitment", + "description": "[Output Only] Type of the resource. Always compute#commitment for commitments.", + "type": "string" + }, + "name": { + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "plan": { + "description": "The plan for this commitment, which determines duration and discount rate. The currently supported plans are TWELVE_MONTH (1 year), and THIRTY_SIX_MONTH (3 years).", + "enum": [ + "INVALID", + "THIRTY_SIX_MONTH", + "TWELVE_MONTH" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + }, + "region": { + "description": "[Output Only] URL of the region where this commitment may be used.", + "type": "string" + }, + "reservations": { + "description": "List of reservations for this commitment.", + "items": { + "$ref": "Reservation" + }, + "type": "array" + }, + "resources": { + "description": "A list of commitment amounts for particular resources. Note that VCPU and MEMORY resource commitments must occur together.", + "items": { + "$ref": "ResourceCommitment" + }, + "type": "array" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" + }, + "startTimestamp": { + "description": "[Output Only] Commitment start time in RFC3339 text format.", + "type": "string" + }, + "status": { + "description": "[Output Only] Status of the commitment with regards to eventual expiration (each commitment has an end date defined). One of the following values: NOT_YET_ACTIVE, ACTIVE, EXPIRED.", + "enum": [ + "ACTIVE", + "CREATING", + "EXPIRED", + "NOT_YET_ACTIVE" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ], + "type": "string" + }, + "statusMessage": { + "description": "[Output Only] An optional, human-readable explanation of the status.", + "type": "string" + } + }, + "type": "object" + }, + "CommitmentAggregatedList": { + "id": "CommitmentAggregatedList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "additionalProperties": { + "$ref": "CommitmentsScopedList", + "description": "[Output Only] Name of the scope containing this set of commitments." + }, + "description": "A list of CommitmentsScopedList resources.", + "type": "object" + }, + "kind": { + "default": "compute#commitmentAggregatedList", + "description": "[Output Only] Type of resource. Always compute#commitmentAggregatedList for aggregated lists of commitments.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "CommitmentList": { + "description": "Contains a list of Commitment resources.", + "id": "CommitmentList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "description": "A list of Commitment resources.", + "items": { + "$ref": "Commitment" + }, + "type": "array" + }, + "kind": { + "default": "compute#commitmentList", + "description": "[Output Only] Type of resource. Always compute#commitmentList for lists of commitments.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "CommitmentsScopedList": { + "id": "CommitmentsScopedList", + "properties": { + "commitments": { + "description": "[Output Only] A list of commitments contained in this scope.", + "items": { + "$ref": "Commitment" + }, + "type": "array" + }, + "warning": { + "description": "[Output Only] Informational warning which replaces the list of commitments when the list is empty.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", @@ -21542,29 +27253,475 @@ "type": "string" } }, - "type": "object" + "type": "object" + } + }, + "type": "object" + }, + "Condition": { + "description": "A condition to be met.", + "id": "Condition", + "properties": { + "iam": { + "description": "Trusted attributes supplied by the IAM system.", + "enum": [ + "APPROVER", + "ATTRIBUTION", + "AUTHORITY", + "CREDENTIALS_TYPE", + "JUSTIFICATION_TYPE", + "NO_ATTR", + "SECURITY_REALM" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "op": { + "description": "An operator to apply the subject with.", + "enum": [ + "DISCHARGED", + "EQUALS", + "IN", + "NOT_EQUALS", + "NOT_IN", + "NO_OP" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "svc": { + "description": "Trusted attributes discharged by the service.", + "type": "string" + }, + "sys": { + "description": "Trusted attributes supplied by any service that owns resources and uses the IAM system for access control.", + "enum": [ + "IP", + "NAME", + "NO_ATTR", + "REGION", + "SERVICE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "values": { + "description": "The objects of the condition.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "ConnectionDraining": { + "description": "Message containing connection draining configuration.", + "id": "ConnectionDraining", + "properties": { + "drainingTimeoutSec": { + "description": "Time for which instance will be drained (not accept new connections, but still work to finish started).", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "ConsistentHashLoadBalancerSettings": { + "description": "This message defines settings for a consistent hash style load balancer.", + "id": "ConsistentHashLoadBalancerSettings", + "properties": { + "httpCookie": { + "$ref": "ConsistentHashLoadBalancerSettingsHttpCookie", + "description": "Hash is based on HTTP Cookie. This field describes a HTTP cookie that will be used as the hash key for the consistent hash load balancer. If the cookie is not present, it will be generated. This field is applicable if the sessionAffinity is set to HTTP_COOKIE." + }, + "httpHeaderName": { + "description": "The hash based on the value of the specified header field. This field is applicable if the sessionAffinity is set to HEADER_FIELD.", + "type": "string" + }, + "minimumRingSize": { + "description": "The minimum number of virtual nodes to use for the hash ring. Defaults to 1024. Larger ring sizes result in more granular load distributions. If the number of hosts in the load balancing pool is larger than the ring size, each host will be assigned a single virtual node.", + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, + "ConsistentHashLoadBalancerSettingsHttpCookie": { + "description": "The information about the HTTP Cookie on which the hash function is based for load balancing policies that use a consistent hash.", + "id": "ConsistentHashLoadBalancerSettingsHttpCookie", + "properties": { + "name": { + "description": "Name of the cookie.", + "type": "string" + }, + "path": { + "description": "Path to set for the cookie.", + "type": "string" + }, + "ttl": { + "$ref": "Duration", + "description": "Lifetime of the cookie." + } + }, + "type": "object" + }, + "CorsPolicy": { + "description": "The specification for allowing client side cross-origin requests. Please see W3C Recommendation for Cross Origin Resource Sharing", + "id": "CorsPolicy", + "properties": { + "allowCredentials": { + "description": "In response to a preflight request, setting this to true indicates that the actual request can include user credentials. This translates to the Access-Control-Allow-Credentials header.\nDefault is false.", + "type": "boolean" + }, + "allowHeaders": { + "description": "Specifies the content for the Access-Control-Allow-Headers header.", + "items": { + "type": "string" + }, + "type": "array" + }, + "allowMethods": { + "description": "Specifies the content for the Access-Control-Allow-Methods header.", + "items": { + "type": "string" + }, + "type": "array" + }, + "allowOriginRegexes": { + "description": "Specifies the regualar expression patterns that match allowed origins. For regular expression grammar please see en.cppreference.com/w/cpp/regex/ecmascript \nAn origin is allowed if it matches either allow_origins or allow_origin_regex.", + "items": { + "type": "string" + }, + "type": "array" + }, + "allowOrigins": { + "description": "Specifies the list of origins that will be allowed to do CORS requests.\nAn origin is allowed if it matches either allow_origins or allow_origin_regex.", + "items": { + "type": "string" + }, + "type": "array" + }, + "disabled": { + "description": "If true, specifies the CORS policy is disabled. The default value of false, which indicates that the CORS policy is in effect.", + "type": "boolean" + }, + "exposeHeaders": { + "description": "Specifies the content for the Access-Control-Expose-Headers header.", + "items": { + "type": "string" + }, + "type": "array" + }, + "maxAge": { + "description": "Specifies how long the results of a preflight request can be cached. This translates to the content for the Access-Control-Max-Age header.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "CustomerEncryptionKey": { + "description": "Represents a customer-supplied encryption key", + "id": "CustomerEncryptionKey", + "properties": { + "kmsKeyName": { + "description": "The name of the encryption key that is stored in Google Cloud KMS.", + "type": "string" + }, + "rawKey": { + "description": "Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to either encrypt or decrypt this resource.", + "type": "string" + }, + "rsaEncryptedKey": { + "description": "Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied encryption key to either encrypt or decrypt this resource.\n\nThe key must meet the following requirements before you can provide it to Compute Engine: \n- The key is wrapped using a RSA public key certificate provided by Google. \n- After being wrapped, the key must be encoded in RFC 4648 base64 encoding. Gets the RSA public key certificate provided by Google at:\nhttps://cloud-certs.storage.googleapis.com/google-cloud-csek-ingress.pem", + "type": "string" + }, + "sha256": { + "description": "[Output only] The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption key that protects this resource.", + "type": "string" + } + }, + "type": "object" + }, + "CustomerEncryptionKeyProtectedDisk": { + "id": "CustomerEncryptionKeyProtectedDisk", + "properties": { + "diskEncryptionKey": { + "$ref": "CustomerEncryptionKey", + "description": "Decrypts data associated with the disk with a customer-supplied encryption key." + }, + "source": { + "description": "Specifies a valid partial or full URL to an existing Persistent Disk resource. This field is only applicable for persistent disks.", + "type": "string" + } + }, + "type": "object" + }, + "DeprecationStatus": { + "description": "Deprecation status for a public resource.", + "id": "DeprecationStatus", + "properties": { + "deleted": { + "description": "An optional RFC3339 timestamp on or after which the state of this resource is intended to change to DELETED. This is only informational and the status will not change unless the client explicitly changes it.", + "type": "string" + }, + "deprecated": { + "description": "An optional RFC3339 timestamp on or after which the state of this resource is intended to change to DEPRECATED. This is only informational and the status will not change unless the client explicitly changes it.", + "type": "string" + }, + "obsolete": { + "description": "An optional RFC3339 timestamp on or after which the state of this resource is intended to change to OBSOLETE. This is only informational and the status will not change unless the client explicitly changes it.", + "type": "string" + }, + "replacement": { + "description": "The URL of the suggested replacement for a deprecated resource. The suggested replacement resource must be the same kind of resource as the deprecated resource.", + "type": "string" + }, + "state": { + "description": "The deprecation state of this resource. This can be ACTIVE, DEPRECATED, OBSOLETE, or DELETED. Operations which communicate the end of life date for an image, can use ACTIVE. Operations which create a new resource using a DEPRECATED resource will return successfully, but with a warning indicating the deprecated resource and recommending its replacement. Operations which use OBSOLETE or DELETED resources will be rejected and result in an error.", + "enum": [ + "ACTIVE", + "DELETED", + "DEPRECATED", + "OBSOLETE" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "Disk": { + "description": "A Disk resource. (== resource_for beta.disks ==) (== resource_for v1.disks ==)", + "id": "Disk", + "properties": { + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, + "diskEncryptionKey": { + "$ref": "CustomerEncryptionKey", + "description": "Encrypts the disk using a customer-supplied encryption key.\n\nAfter you encrypt a disk with a customer-supplied key, you must provide the same key if you use the disk later (e.g. to create a disk snapshot, to create a disk image, to create a machine image, or to attach the disk to a virtual machine).\n\nCustomer-supplied encryption keys do not protect access to metadata of the disk.\n\nIf you do not provide an encryption key when creating the disk, then the disk will be encrypted using an automatically generated key and you do not need to provide a key to use the disk later." + }, + "guestOsFeatures": { + "description": "A list of features to enable on the guest operating system. Applicable only for bootable images. Read Enabling guest operating system features to see a list of available options.", + "items": { + "$ref": "GuestOsFeature" + }, + "type": "array" + }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", + "type": "string" + }, + "kind": { + "default": "compute#disk", + "description": "[Output Only] Type of the resource. Always compute#disk for disks.", + "type": "string" + }, + "labelFingerprint": { + "description": "A fingerprint for the labels being applied to this disk, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet.\n\nTo see the latest fingerprint, make a get() request to retrieve a disk.", + "format": "byte", + "type": "string" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Labels to apply to this disk. These can be later modified by the setLabels method.", + "type": "object" + }, + "lastAttachTimestamp": { + "description": "[Output Only] Last attach timestamp in RFC3339 text format.", + "type": "string" + }, + "lastDetachTimestamp": { + "description": "[Output Only] Last detach timestamp in RFC3339 text format.", + "type": "string" + }, + "licenseCodes": { + "description": "Integer license codes indicating which licenses are attached to this disk.", + "items": { + "format": "int64", + "type": "string" + }, + "type": "array" + }, + "licenses": { + "description": "A list of publicly visible licenses. Reserved for Google's use.", + "items": { + "type": "string" + }, + "type": "array" + }, + "name": { + "annotations": { + "required": [ + "compute.disks.insert" + ] + }, + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "options": { + "description": "Internal use only.", + "type": "string" + }, + "physicalBlockSizeBytes": { + "description": "Physical block size of the persistent disk, in bytes. If not present in a request, a default value is used. Currently supported sizes are 4096 and 16384, other sizes may be added in the future. If an unsupported value is requested, the error message will list the supported values for the caller's project.", + "format": "int64", + "type": "string" + }, + "region": { + "description": "[Output Only] URL of the region where the disk resides. Only applicable for regional resources. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", + "type": "string" + }, + "replicaZones": { + "description": "URLs of the zones where the disk should be replicated to. Only applicable for regional resources.", + "items": { + "type": "string" + }, + "type": "array" + }, + "resourcePolicies": { + "description": "Resource policies applied to this disk for automatic snapshot creations.", + "items": { + "type": "string" + }, + "type": "array" + }, + "selfLink": { + "description": "[Output Only] Server-defined fully-qualified URL for this resource.", + "type": "string" + }, + "sizeGb": { + "description": "Size of the persistent disk, specified in GB. You can specify this field when creating a persistent disk using the sourceImage or sourceSnapshot parameter, or specify it alone to create an empty persistent disk.\n\nIf you specify this field along with sourceImage or sourceSnapshot, the value of sizeGb must not be less than the size of the sourceImage or the size of the snapshot. Acceptable values are 1 to 65536, inclusive.", + "format": "int64", + "type": "string" + }, + "sourceImage": { + "description": "The source image used to create this disk. If the source image is deleted, this field will not be set.\n\nTo create a disk with one of the public operating system images, specify the image by its family name. For example, specify family/debian-9 to use the latest Debian 9 image:\nprojects/debian-cloud/global/images/family/debian-9\n\n\nAlternatively, use a specific version of a public operating system image:\nprojects/debian-cloud/global/images/debian-9-stretch-vYYYYMMDD\n\n\nTo create a disk with a custom image that you created, specify the image name in the following format:\nglobal/images/my-custom-image\n\n\nYou can also specify a custom image by its image family, which returns the latest version of the image in that family. Replace the image name with family/family-name:\nglobal/images/family/my-image-family", + "type": "string" + }, + "sourceImageEncryptionKey": { + "$ref": "CustomerEncryptionKey", + "description": "The customer-supplied encryption key of the source image. Required if the source image is protected by a customer-supplied encryption key." + }, + "sourceImageId": { + "description": "[Output Only] The ID value of the image used to create this disk. This value identifies the exact image that was used to create this persistent disk. For example, if you created the persistent disk from an image that was later deleted and recreated under the same name, the source image ID would identify the exact version of the image that was used.", + "type": "string" + }, + "sourceSnapshot": { + "description": "The source snapshot used to create this disk. You can provide this as a partial or full URL to the resource. For example, the following are valid values: \n- https://www.googleapis.com/compute/v1/projects/project/global/snapshots/snapshot \n- projects/project/global/snapshots/snapshot \n- global/snapshots/snapshot", + "type": "string" + }, + "sourceSnapshotEncryptionKey": { + "$ref": "CustomerEncryptionKey", + "description": "The customer-supplied encryption key of the source snapshot. Required if the source snapshot is protected by a customer-supplied encryption key." + }, + "sourceSnapshotId": { + "description": "[Output Only] The unique ID of the snapshot used to create this disk. This value identifies the exact snapshot that was used to create this persistent disk. For example, if you created the persistent disk from a snapshot that was later deleted and recreated under the same name, the source snapshot ID would identify the exact version of the snapshot that was used.", + "type": "string" + }, + "status": { + "description": "[Output Only] The status of disk creation.", + "enum": [ + "CREATING", + "DELETING", + "FAILED", + "READY", + "RESTORING" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "storageType": { + "description": "[Deprecated] Storage type of the persistent disk.", + "enum": [ + "HDD", + "SSD" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "type": { + "description": "URL of the disk type resource describing which disk type to use to create the disk. Provide this when creating the disk. For example: projects/project/zones/zone/diskTypes/pd-standard or pd-ssd", + "type": "string" + }, + "users": { + "description": "[Output Only] Links to the users of the disk (attached instances) in form: projects/project/zones/zone/instances/instance", + "items": { + "type": "string" + }, + "type": "array" + }, + "zone": { + "description": "[Output Only] URL of the zone where the disk resides. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", + "type": "string" } }, "type": "object" }, - "AcceleratorTypeList": { - "description": "Contains a list of accelerator types.", - "id": "AcceleratorTypeList", + "DiskAggregatedList": { + "id": "DiskAggregatedList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of AcceleratorType resources.", - "items": { - "$ref": "AcceleratorType" + "additionalProperties": { + "$ref": "DisksScopedList", + "description": "[Output Only] Name of the scope containing this set of disks." }, - "type": "array" + "description": "A list of DisksScopedList resources.", + "type": "object" }, "kind": { - "default": "compute#acceleratorTypeList", - "description": "[Output Only] Type of resource. Always compute#acceleratorTypeList for lists of accelerator types.", + "default": "compute#diskAggregatedList", + "description": "[Output Only] Type of resource. Always compute#diskAggregatedList for aggregated lists of persistent disks.", "type": "string" }, "nextPageToken": { @@ -21659,18 +27816,77 @@ }, "type": "object" }, - "AcceleratorTypesScopedList": { - "id": "AcceleratorTypesScopedList", + "DiskInstantiationConfig": { + "description": "A specification of the desired way to instantiate a disk in the instance template when its created from a source instance.", + "id": "DiskInstantiationConfig", "properties": { - "acceleratorTypes": { - "description": "[Output Only] A list of accelerator types contained in this scope.", + "autoDelete": { + "description": "Specifies whether the disk will be auto-deleted when the instance is deleted (but not when the disk is detached from the instance).", + "type": "boolean" + }, + "customImage": { + "description": "The custom source image to be used to restore this disk when instantiating this instance template.", + "type": "string" + }, + "deviceName": { + "description": "Specifies the device name of the disk to which the configurations apply to.", + "type": "string" + }, + "instantiateFrom": { + "description": "Specifies whether to include the disk and what image to use. Possible values are: \n- source-image: to use the same image that was used to create the source instance's corresponding disk. Applicable to the boot disk and additional read-write disks. \n- source-image-family: to use the same image family that was used to create the source instance's corresponding disk. Applicable to the boot disk and additional read-write disks. \n- custom-image: to use a user-provided image url for disk creation. Applicable to the boot disk and additional read-write disks. \n- attach-read-only: to attach a read-only disk. Applicable to read-only disks. \n- do-not-include: to exclude a disk from the template. Applicable to additional read-write disks, local SSDs, and read-only disks.", + "enum": [ + "ATTACH_READ_ONLY", + "BLANK", + "CUSTOM_IMAGE", + "DEFAULT", + "DO_NOT_INCLUDE", + "SOURCE_IMAGE", + "SOURCE_IMAGE_FAMILY" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "DiskList": { + "description": "A list of Disk resources.", + "id": "DiskList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "description": "A list of Disk resources.", "items": { - "$ref": "AcceleratorType" + "$ref": "Disk" }, "type": "array" }, + "kind": { + "default": "compute#diskList", + "description": "[Output Only] Type of resource. Always compute#diskList for lists of disks.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, "warning": { - "description": "[Output Only] An informational warning that appears when the accelerator types list is empty.", + "description": "[Output Only] Informational warning message.", "properties": { "code": { "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", @@ -21753,85 +27969,39 @@ }, "type": "object" }, - "AccessConfig": { - "description": "An access configuration attached to an instance's network interface. Only one access config per instance is supported.", - "id": "AccessConfig", + "DiskMoveRequest": { + "id": "DiskMoveRequest", "properties": { - "kind": { - "default": "compute#accessConfig", - "description": "[Output Only] Type of the resource. Always compute#accessConfig for access configs.", - "type": "string" - }, - "name": { - "description": "The name of this access configuration. The default and recommended name is External NAT but you can use any arbitrary string you would like. For example, My external IP or Network Access.", - "type": "string" - }, - "natIP": { - "description": "An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance.", - "type": "string" - }, - "networkTier": { - "description": "This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD.\n\nIf an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier.\n\nIf an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP.", - "enum": [ - "PREMIUM", - "STANDARD" - ], - "enumDescriptions": [ - "", - "" - ], - "type": "string" - }, - "publicPtrDomainName": { - "description": "The DNS domain name for the public PTR record. This field can only be set when the set_public_ptr field is enabled.", + "destinationZone": { + "description": "The URL of the destination zone to move the disk. This can be a full or partial URL. For example, the following are all valid URLs to a zone: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone \n- projects/project/zones/zone \n- zones/zone", "type": "string" }, - "setPublicPtr": { - "description": "Specifies whether a public DNS ?PTR? record should be created to map the external IP address of the instance to a DNS domain name.", - "type": "boolean" - }, - "type": { - "default": "ONE_TO_ONE_NAT", - "description": "The type of configuration. The default and only option is ONE_TO_ONE_NAT.", - "enum": [ - "ONE_TO_ONE_NAT" - ], - "enumDescriptions": [ - "" - ], + "targetDisk": { + "description": "The URL of the target disk to move. This can be a full or partial URL. For example, the following are all valid URLs to a disk: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone/disks/disk \n- projects/project/zones/zone/disks/disk \n- zones/zone/disks/disk", "type": "string" } }, "type": "object" }, - "Address": { - "description": "A reserved address resource. (== resource_for beta.addresses ==) (== resource_for v1.addresses ==) (== resource_for beta.globalAddresses ==) (== resource_for v1.globalAddresses ==)", - "id": "Address", + "DiskType": { + "description": "A DiskType resource. (== resource_for beta.diskTypes ==) (== resource_for v1.diskTypes ==)", + "id": "DiskType", "properties": { - "address": { - "description": "The static IP address represented by this resource.", + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" }, - "addressType": { - "description": "The type of address to reserve, either INTERNAL or EXTERNAL. If unspecified, defaults to EXTERNAL.", - "enum": [ - "EXTERNAL", - "INTERNAL", - "UNSPECIFIED_TYPE" - ], - "enumDescriptions": [ - "", - "", - "" - ], + "defaultDiskSizeGb": { + "description": "[Output Only] Server-defined default disk size in GB.", + "format": "int64", "type": "string" }, - "creationTimestamp": { - "description": "[Output Only] Creation timestamp in RFC3339 text format.", - "type": "string" + "deprecated": { + "$ref": "DeprecationStatus", + "description": "[Output Only] The deprecation status associated with this disk type." }, "description": { - "description": "An optional description of this resource. Provide this property when you create the resource.", + "description": "[Output Only] An optional description of this resource.", "type": "string" }, "id": { @@ -21839,120 +28009,37 @@ "format": "uint64", "type": "string" }, - "ipVersion": { - "description": "The IP Version that will be used by this address. Valid options are IPV4 or IPV6. This can only be specified for a global address.", - "enum": [ - "IPV4", - "IPV6", - "UNSPECIFIED_VERSION" - ], - "enumDescriptions": [ - "", - "", - "" - ], - "type": "string" - }, "kind": { - "default": "compute#address", - "description": "[Output Only] Type of the resource. Always compute#address for addresses.", - "type": "string" - }, - "labelFingerprint": { - "description": "A fingerprint for the labels being applied to this Address, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet.\n\nTo see the latest fingerprint, make a get() request to retrieve an Address.", - "format": "byte", + "default": "compute#diskType", + "description": "[Output Only] Type of the resource. Always compute#diskType for disk types.", "type": "string" }, - "labels": { - "additionalProperties": { - "type": "string" - }, - "description": "Labels to apply to this Address resource. These can be later modified by the setLabels method. Each label key/value must comply with RFC1035. Label values may be empty.", - "type": "object" - }, "name": { - "annotations": { - "required": [ - "compute.addresses.insert" - ] - }, - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "description": "[Output Only] Name of the resource.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, - "network": { - "description": "The URL of the network in which to reserve the address. This field can only be used with INTERNAL type with VPC_PEERING purpose.", - "type": "string" - }, - "networkTier": { - "description": "This signifies the networking tier used for configuring this Address and can only take the following values: PREMIUM, STANDARD. Global forwarding rules can only be Premium Tier. Regional forwarding rules can be either Premium or Standard Tier. Standard Tier addresses applied to regional forwarding rules can be used with any external load balancer. Regional forwarding rules in Premium Tier can only be used with a Network load balancer.\n\nIf this field is not specified, it is assumed to be PREMIUM.", - "enum": [ - "PREMIUM", - "STANDARD" - ], - "enumDescriptions": [ - "", - "" - ], - "type": "string" - }, - "prefixLength": { - "description": "The prefix length if the resource reprensents an IP range.", - "format": "int32", - "type": "integer" - }, - "purpose": { - "description": "The purpose of resource, only used with INTERNAL type.", - "enum": [ - "DNS_RESOLVER", - "GCE_ENDPOINT", - "VPC_PEERING" - ], - "enumDescriptions": [ - "", - "", - "" - ], - "type": "string" - }, "region": { - "description": "[Output Only] URL of the region where the regional address resides. This field is not applicable to global addresses. You must specify this field as part of the HTTP request URL. You cannot set this field in the request body.", + "description": "[Output Only] URL of the region where the disk type resides. Only applicable for regional resources. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", "type": "string" }, "selfLink": { "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, - "status": { - "description": "[Output Only] The status of the address, which can be one of RESERVING, RESERVED, or IN_USE. An address that is RESERVING is currently in the process of being reserved. A RESERVED address is currently reserved and available to use. An IN_USE address is currently being used by another resource and is not available.", - "enum": [ - "IN_USE", - "RESERVED", - "RESERVING" - ], - "enumDescriptions": [ - "", - "", - "" - ], + "validDiskSize": { + "description": "[Output Only] An optional textual description of the valid disk size, such as \"10GB-10TB\".", "type": "string" }, - "subnetwork": { - "description": "The URL of the subnetwork in which to reserve the address. If an IP address is specified, it must be within the subnetwork's IP range. This field can only be used with INTERNAL type with GCE_ENDPOINT/DNS_RESOLVER purposes.", + "zone": { + "description": "[Output Only] URL of the zone where the disk type resides. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", "type": "string" - }, - "users": { - "description": "[Output Only] The URLs of the resources that are using this address.", - "items": { - "type": "string" - }, - "type": "array" } }, "type": "object" }, - "AddressAggregatedList": { - "id": "AddressAggregatedList", + "DiskTypeAggregatedList": { + "id": "DiskTypeAggregatedList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", @@ -21960,15 +28047,15 @@ }, "items": { "additionalProperties": { - "$ref": "AddressesScopedList", - "description": "[Output Only] Name of the scope containing this set of addresses." + "$ref": "DiskTypesScopedList", + "description": "[Output Only] Name of the scope containing this set of disk types." }, - "description": "A list of AddressesScopedList resources.", + "description": "A list of DiskTypesScopedList resources.", "type": "object" }, "kind": { - "default": "compute#addressAggregatedList", - "description": "[Output Only] Type of resource. Always compute#addressAggregatedList for aggregated lists of addresses.", + "default": "compute#diskTypeAggregatedList", + "description": "[Output Only] Type of resource. Always compute#diskTypeAggregatedList.", "type": "string" }, "nextPageToken": { @@ -22063,24 +28150,24 @@ }, "type": "object" }, - "AddressList": { - "description": "Contains a list of addresses.", - "id": "AddressList", + "DiskTypeList": { + "description": "Contains a list of disk types.", + "id": "DiskTypeList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of Address resources.", + "description": "A list of DiskType resources.", "items": { - "$ref": "Address" + "$ref": "DiskType" }, "type": "array" }, "kind": { - "default": "compute#addressList", - "description": "[Output Only] Type of resource. Always compute#addressList for lists of addresses.", + "default": "compute#diskTypeList", + "description": "[Output Only] Type of resource. Always compute#diskTypeList for disk types.", "type": "string" }, "nextPageToken": { @@ -22175,18 +28262,18 @@ }, "type": "object" }, - "AddressesScopedList": { - "id": "AddressesScopedList", + "DiskTypesScopedList": { + "id": "DiskTypesScopedList", "properties": { - "addresses": { - "description": "[Output Only] A list of addresses contained in this scope.", + "diskTypes": { + "description": "[Output Only] A list of disk types contained in this scope.", "items": { - "$ref": "Address" + "$ref": "DiskType" }, "type": "array" }, "warning": { - "description": "[Output Only] Informational warning which replaces the list of addresses when the list is empty.", + "description": "[Output Only] Informational warning which replaces the list of disk types when the list is empty.", "properties": { "code": { "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", @@ -22269,133 +28356,55 @@ }, "type": "object" }, - "AliasIpRange": { - "description": "An alias IP range attached to an instance's network interface.", - "id": "AliasIpRange", + "DisksAddResourcePoliciesRequest": { + "id": "DisksAddResourcePoliciesRequest", "properties": { - "ipCidrRange": { - "description": "The IP CIDR range represented by this alias IP range. This IP CIDR range must belong to the specified subnetwork and cannot contain IP addresses reserved by system or used by other network interfaces. This range may be a single IP address (e.g. 10.2.3.4), a netmask (e.g. /24) or a CIDR format string (e.g. 10.1.2.0/24).", - "type": "string" - }, - "subnetworkRangeName": { - "description": "Optional subnetwork secondary range name specifying the secondary range from which to allocate the IP CIDR range for this alias IP range. If left unspecified, the primary range of the subnetwork will be used.", - "type": "string" + "resourcePolicies": { + "description": "Resource policies to be added to this disk.", + "items": { + "type": "string" + }, + "type": "array" } }, "type": "object" }, - "Allocation": { - "description": "Allocation resource", - "id": "Allocation", + "DisksRemoveResourcePoliciesRequest": { + "id": "DisksRemoveResourcePoliciesRequest", "properties": { - "creationTimestamp": { - "description": "[Output Only] Creation timestamp in RFC3339 text format.", - "type": "string" - }, - "description": { - "type": "string" - }, - "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64", - "type": "string" - }, - "kind": { - "default": "compute#allocation", - "description": "[Output Only] Type of the resource. Always compute#allocations for allocations.", - "type": "string" - }, - "name": { - "annotations": { - "required": [ - "compute.instances.insert" - ] + "resourcePolicies": { + "description": "Resource policies to be removed from this disk.", + "items": { + "type": "string" }, - "description": "The name of the resource, provided by the client when initially creating the resource. The resource name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "type": "string" - }, - "selfLink": { - "description": "[Output Only] Server-defined fully-qualified URL for this resource.", - "type": "string" - }, - "specificAllocation": { - "$ref": "AllocationSpecificSKUAllocation" - }, - "specificAllocationRequired": { - "description": "Indicates whether the allocation can be consumed by VMs with \"any allocation\" defined. If the field is set, then only VMs that target the allocation by name using --allocation-affinity can consume this allocation.", - "type": "boolean" - }, - "zone": { - "type": "string" + "type": "array" } }, "type": "object" }, - "AllocationAffinity": { - "description": "AllocationAffinity is the configuration of desired allocation which this instance could take capacity from.", - "id": "AllocationAffinity", + "DisksResizeRequest": { + "id": "DisksResizeRequest", "properties": { - "consumeAllocationType": { - "enum": [ - "ANY_ALLOCATION", - "NO_ALLOCATION", - "SPECIFIC_ALLOCATION", - "UNSPECIFIED" - ], - "enumDescriptions": [ - "", - "", - "", - "" - ], - "type": "string" - }, - "key": { - "description": "Corresponds to the label key of allocation resource.", + "sizeGb": { + "description": "The new size of the persistent disk, which is specified in GB.", + "format": "int64", "type": "string" - }, - "values": { - "description": "Corresponds to the label values of allocation resource.", - "items": { - "type": "string" - }, - "type": "array" } }, "type": "object" }, - "AllocationAggregatedList": { - "description": "Contains a list of allocations.", - "id": "AllocationAggregatedList", + "DisksScopedList": { + "id": "DisksScopedList", "properties": { - "id": { - "description": "[Output Only] Unique identifier for the resource; defined by the server.", - "type": "string" - }, - "items": { - "additionalProperties": { - "$ref": "AllocationsScopedList", - "description": "Name of the scope containing this set of allocations." + "disks": { + "description": "[Output Only] A list of disks contained in this scope.", + "items": { + "$ref": "Disk" }, - "description": "A list of Allocation resources.", - "type": "object" - }, - "kind": { - "default": "compute#allocationAggregatedList", - "description": "Type of resource.", - "type": "string" - }, - "nextPageToken": { - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", - "type": "string" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for this resource.", - "type": "string" + "type": "array" }, "warning": { - "description": "[Output Only] Informational warning message.", + "description": "[Output Only] Informational warning which replaces the list of disks when the list is empty.", "properties": { "code": { "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", @@ -22473,28 +28482,122 @@ "type": "string" } }, - "type": "object" + "type": "object" + } + }, + "type": "object" + }, + "DisplayDevice": { + "description": "A set of Display Device options", + "id": "DisplayDevice", + "properties": { + "enableDisplay": { + "description": "Defines whether the instance has Display enabled.", + "type": "boolean" + } + }, + "type": "object" + }, + "DistributionPolicy": { + "id": "DistributionPolicy", + "properties": { + "zones": { + "description": "Zones where the regional managed instance group will create and manage instances.", + "items": { + "$ref": "DistributionPolicyZoneConfiguration" + }, + "type": "array" + } + }, + "type": "object" + }, + "DistributionPolicyZoneConfiguration": { + "id": "DistributionPolicyZoneConfiguration", + "properties": { + "zone": { + "annotations": { + "required": [ + "compute.regionInstanceGroupManagers.insert", + "compute.regionInstanceGroupManagers.update" + ] + }, + "description": "The URL of the zone. The zone must exist in the region where the managed instance group is located.", + "type": "string" + } + }, + "type": "object" + }, + "Duration": { + "description": "A Duration represents a fixed-length span of time represented as a count of seconds and fractions of seconds at nanosecond resolution. It is independent of any calendar and concepts like \"day\" or \"month\". Range is approximately 10,000 years.", + "id": "Duration", + "properties": { + "nanos": { + "description": "Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented with a 0 `seconds` field and a positive `nanos` field. Must be from 0 to 999,999,999 inclusive.", + "format": "int32", + "type": "integer" + }, + "seconds": { + "description": "Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years", + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, + "ExchangedPeeringRoute": { + "id": "ExchangedPeeringRoute", + "properties": { + "destRange": { + "description": "The destination range of the route.", + "type": "string" + }, + "imported": { + "description": "If the peering route is imported if there is no confliction.", + "type": "boolean" + }, + "nextHopRegion": { + "description": "The region of peering route next hop, only applies to dynamic routes.", + "type": "string" + }, + "priority": { + "description": "The priority of the peering route.", + "format": "uint32", + "type": "integer" + }, + "type": { + "description": "The type of the peering route.", + "enum": [ + "DYNAMIC_PEERING_ROUTE", + "STATIC_PEERING_ROUTE", + "SUBNET_PEERING_ROUTE" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" } }, "type": "object" }, - "AllocationList": { - "id": "AllocationList", + "ExchangedPeeringRoutesList": { + "id": "ExchangedPeeringRoutesList", "properties": { "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "[Output Only] A list of Allocation resources.", + "description": "A list of ExchangedPeeringRoute resources.", "items": { - "$ref": "Allocation" + "$ref": "ExchangedPeeringRoute" }, "type": "array" }, "kind": { - "default": "compute#allocationList", - "description": "[Output Only] Type of resource.Always compute#allocationsList for listsof allocations", + "default": "compute#exchangedPeeringRoutesList", + "description": "[Output Only] Type of resource. Always compute#exchangedPeeringRoutesList for exchanged peering routes lists.", "type": "string" }, "nextPageToken": { @@ -22589,90 +28692,150 @@ }, "type": "object" }, - "AllocationSpecificSKUAllocation": { - "description": "This allocation type allows to pre allocate specific instance configuration.", - "id": "AllocationSpecificSKUAllocation", + "Expr": { + "description": "Represents an expression text. Example:\n\ntitle: \"User account presence\" description: \"Determines whether the request has a user account\" expression: \"size(request.user) \u003e 0\"", + "id": "Expr", "properties": { - "count": { - "description": "Specifies number of resources that are allocated.", - "format": "int64", + "description": { + "description": "An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.", "type": "string" }, - "inUseCount": { - "description": "[OutputOnly] Indicates how many resource are in use.", - "format": "int64", + "expression": { + "description": "Textual representation of an expression in Common Expression Language syntax.\n\nThe application context of the containing message determines which well-known feature set of CEL is supported.", "type": "string" }, - "instanceProperties": { - "$ref": "AllocationSpecificSKUAllocationAllocatedInstanceProperties" + "location": { + "description": "An optional string indicating the location of the expression for error reporting, e.g. a file name and a position in the file.", + "type": "string" + }, + "title": { + "description": "An optional title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.", + "type": "string" } }, "type": "object" }, - "AllocationSpecificSKUAllocationAllocatedInstanceProperties": { - "description": "Properties of the SKU instances being reserved.", - "id": "AllocationSpecificSKUAllocationAllocatedInstanceProperties", + "ExternalVpnGateway": { + "description": "External VPN gateway is the on-premises VPN gateway(s) or another cloud provider?s VPN gateway that connects to your Google Cloud VPN gateway. To create a highly available VPN from Google Cloud to your on-premises side or another Cloud provider's VPN gateway, you must create a external VPN gateway resource in GCP, which provides the information to GCP about your external VPN gateway.", + "id": "ExternalVpnGateway", "properties": { - "guestAccelerators": { - "description": "Specifies accelerator type and count.", - "items": { - "$ref": "AcceleratorConfig" - }, - "type": "array" + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" }, - "localSsds": { - "description": "Specifies amount of local ssd to reserve with each instance. The type of disk is local-ssd.", + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", + "type": "string" + }, + "interfaces": { + "description": "List of interfaces for this external VPN gateway.", "items": { - "$ref": "AllocationSpecificSKUAllocationAllocatedInstancePropertiesAllocatedDisk" + "$ref": "ExternalVpnGatewayInterface" }, "type": "array" }, - "machineType": { - "description": "Specifies type of machine (name only) which has fixed number of vCPUs and fixed amount of memory. This also includes specifying custom machine type following custom-NUMBER_OF_CPUS-AMOUNT_OF_MEMORY pattern.", + "kind": { + "default": "compute#externalVpnGateway", + "description": "[Output Only] Type of the resource. Always compute#externalVpnGateway for externalVpnGateways.", "type": "string" }, - "minCpuPlatform": { - "description": "Minimum cpu platform the allocation.", + "labelFingerprint": { + "description": "A fingerprint for the labels being applied to this ExternalVpnGateway, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet.\n\nTo see the latest fingerprint, make a get() request to retrieve an ExternalVpnGateway.", + "format": "byte", "type": "string" - } - }, - "type": "object" - }, - "AllocationSpecificSKUAllocationAllocatedInstancePropertiesAllocatedDisk": { - "id": "AllocationSpecificSKUAllocationAllocatedInstancePropertiesAllocatedDisk", - "properties": { - "diskSizeGb": { - "description": "Specifies the size of the disk in base-2 GB.", - "format": "int64", + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Labels to apply to this ExternalVpnGateway resource. These can be later modified by the setLabels method. Each label key/value must comply with RFC1035. Label values may be empty.", + "type": "object" + }, + "name": { + "annotations": { + "required": [ + "compute.externalVpnGateways.insert" + ] + }, + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, - "interface": { - "description": "Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI. For performance characteristics of SCSI over NVMe, see Local SSD performance.", + "redundancyType": { + "description": "Indicates the user-supplied redundancy type of this external VPN gateway.", "enum": [ - "NVME", - "SCSI" + "FOUR_IPS_REDUNDANCY", + "SINGLE_IP_INTERNALLY_REDUNDANT", + "TWO_IPS_REDUNDANCY" ], "enumDescriptions": [ + "", "", "" ], "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" } }, "type": "object" }, - "AllocationsScopedList": { - "id": "AllocationsScopedList", + "ExternalVpnGatewayInterface": { + "description": "The interface for the external VPN gateway.", + "id": "ExternalVpnGatewayInterface", "properties": { - "allocations": { - "description": "A list of allocations contained in this scope.", + "id": { + "description": "The numeric ID of this interface. The allowed input values for this id for different redundancy types of external VPN gateway: SINGLE_IP_INTERNALLY_REDUNDANT - 0 TWO_IPS_REDUNDANCY - 0, 1 FOUR_IPS_REDUNDANCY - 0, 1, 2, 3", + "format": "uint32", + "type": "integer" + }, + "ipAddress": { + "description": "IP address of the interface in the external VPN gateway. Only IPv4 is supported. This IP address can be either from your on-premise gateway or another Cloud provider?s VPN gateway, it cannot be an IP address from Google Compute Engine.", + "type": "string" + } + }, + "type": "object" + }, + "ExternalVpnGatewayList": { + "description": "Response to the list request, and contains a list of externalVpnGateways.", + "id": "ExternalVpnGatewayList", + "properties": { + "etag": { + "type": "string" + }, + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "description": "A list of ExternalVpnGateway resources.", "items": { - "$ref": "Allocation" + "$ref": "ExternalVpnGateway" }, "type": "array" }, + "kind": { + "default": "compute#externalVpnGatewayList", + "description": "[Output Only] Type of resource. Always compute#externalVpnGatewayList for lists of externalVpnGateways.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, "warning": { - "description": "Informational warning which replaces the list of allocations when the list is empty.", + "description": "[Output Only] Informational warning message.", "properties": { "code": { "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", @@ -22755,231 +28918,84 @@ }, "type": "object" }, - "AttachedDisk": { - "description": "An instance-attached disk resource.", - "id": "AttachedDisk", + "Firewall": { + "description": "Represents a Firewall resource.", + "id": "Firewall", "properties": { - "autoDelete": { - "description": "Specifies whether the disk will be auto-deleted when the instance is deleted (but not when the disk is detached from the instance).", - "type": "boolean" - }, - "boot": { - "description": "Indicates that this is a boot disk. The virtual machine will use the first partition of the disk for its root filesystem.", - "type": "boolean" - }, - "deviceName": { - "description": "Specifies a unique device name of your choice that is reflected into the /dev/disk/by-id/google-* tree of a Linux operating system running within the instance. This name can be used to reference the device for mounting, resizing, and so on, from within the instance.\n\nIf not specified, the server chooses a default device name to apply to this disk, in the form persistent-disks-x, where x is a number assigned by Google Compute Engine. This field is only applicable for persistent disks.", - "type": "string" - }, - "diskEncryptionKey": { - "$ref": "CustomerEncryptionKey", - "description": "Encrypts or decrypts a disk using a customer-supplied encryption key.\n\nIf you are creating a new disk, this field encrypts the new disk using an encryption key that you provide. If you are attaching an existing disk that is already encrypted, this field decrypts the disk using the customer-supplied encryption key.\n\nIf you encrypt a disk using a customer-supplied key, you must provide the same key again when you attempt to use this resource at a later time. For example, you must provide the key when you create a snapshot or an image from the disk or when you attach the disk to a virtual machine instance.\n\nIf you do not provide an encryption key, then the disk will be encrypted using an automatically generated key and you do not need to provide a key to use the disk later.\n\nInstance templates do not store customer-supplied encryption keys, so you cannot use your own keys to encrypt disks in a managed instance group." - }, - "guestOsFeatures": { - "description": "A list of features to enable on the guest operating system. Applicable only for bootable images. Read Enabling guest operating system features to see a list of available options.", + "allowed": { + "description": "The list of ALLOW rules specified by this firewall. Each rule specifies a protocol and port-range tuple that describes a permitted connection.", "items": { - "$ref": "GuestOsFeature" + "properties": { + "IPProtocol": { + "description": "The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number.", + "type": "string" + }, + "ports": { + "description": "An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port.\n\nExample inputs include: [\"22\"], [\"80\",\"443\"], and [\"12345-12349\"].", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" }, "type": "array" }, - "index": { - "description": "[Output Only] A zero-based index to this disk, where 0 is reserved for the boot disk. If you have many disks attached to an instance, each disk would have a unique index number.", - "format": "int32", - "type": "integer" - }, - "initializeParams": { - "$ref": "AttachedDiskInitializeParams", - "description": "[Input Only] Specifies the parameters for a new disk that will be created alongside the new instance. Use initialization parameters to create boot disks or local SSDs attached to the new instance.\n\nThis property is mutually exclusive with the source property; you can only define one or the other, but not both." - }, - "interface": { - "description": "Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI. Persistent disks must always use SCSI and the request will fail if you attempt to attach a persistent disk in any other format than SCSI. Local SSDs can use either NVME or SCSI. For performance characteristics of SCSI over NVMe, see Local SSD performance.", - "enum": [ - "NVME", - "SCSI" - ], - "enumDescriptions": [ - "", - "" - ], - "type": "string" - }, - "kind": { - "default": "compute#attachedDisk", - "description": "[Output Only] Type of the resource. Always compute#attachedDisk for attached disks.", + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" }, - "licenses": { - "description": "[Output Only] Any valid publicly visible licenses.", + "denied": { + "description": "The list of DENY rules specified by this firewall. Each rule specifies a protocol and port-range tuple that describes a denied connection.", "items": { - "type": "string" + "properties": { + "IPProtocol": { + "description": "The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number.", + "type": "string" + }, + "ports": { + "description": "An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port.\n\nExample inputs include: [\"22\"], [\"80\",\"443\"], and [\"12345-12349\"].", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" }, "type": "array" }, - "mode": { - "description": "The mode in which to attach this disk, either READ_WRITE or READ_ONLY. If not specified, the default is to attach the disk in READ_WRITE mode.", - "enum": [ - "READ_ONLY", - "READ_WRITE" - ], - "enumDescriptions": [ - "", - "" - ], - "type": "string" - }, - "source": { - "description": "Specifies a valid partial or full URL to an existing Persistent Disk resource. When creating a new instance, one of initializeParams.sourceImage or disks.source is required except for local SSD.\n\nIf desired, you can also attach existing non-root persistent disks using this property. This field is only applicable for persistent disks.\n\nNote that for InstanceTemplate, specify the disk name, not the URL for the disk.", - "type": "string" - }, - "type": { - "description": "Specifies the type of the disk, either SCRATCH or PERSISTENT. If not specified, the default is PERSISTENT.", - "enum": [ - "PERSISTENT", - "SCRATCH" - ], - "enumDescriptions": [ - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "AttachedDiskInitializeParams": { - "description": "[Input Only] Specifies the parameters for a new disk that will be created alongside the new instance. Use initialization parameters to create boot disks or local SSDs attached to the new instance.\n\nThis property is mutually exclusive with the source property; you can only define one or the other, but not both.", - "id": "AttachedDiskInitializeParams", - "properties": { "description": { - "description": "An optional description. Provide this property when creating the disk.", - "type": "string" - }, - "diskName": { - "description": "Specifies the disk name. If not specified, the default is to use the name of the instance. If the disk with the instance name exists already in the given zone/region, a new name will be automatically generated.", - "type": "string" - }, - "diskSizeGb": { - "description": "Specifies the size of the disk in base-2 GB.", - "format": "int64", - "type": "string" - }, - "diskType": { - "description": "Specifies the disk type to use to create the instance. If not specified, the default is pd-standard, specified using the full URL. For example:\nhttps://www.googleapis.com/compute/v1/projects/project/zones/zone/diskTypes/pd-standard\n\n\nOther values include pd-ssd and local-ssd. If you define this field, you can provide either the full or partial URL. For example, the following are valid values: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone/diskTypes/diskType \n- projects/project/zones/zone/diskTypes/diskType \n- zones/zone/diskTypes/diskType Note that for InstanceTemplate, this is the name of the disk type, not URL.", - "type": "string" - }, - "labels": { - "additionalProperties": { - "type": "string" - }, - "description": "Labels to apply to this disk. These can be later modified by the disks.setLabels method. This field is only applicable for persistent disks.", - "type": "object" - }, - "sourceImage": { - "description": "The source image to create this disk. When creating a new instance, one of initializeParams.sourceImage or disks.source is required except for local SSD.\n\nTo create a disk with one of the public operating system images, specify the image by its family name. For example, specify family/debian-9 to use the latest Debian 9 image:\nprojects/debian-cloud/global/images/family/debian-9\n\n\nAlternatively, use a specific version of a public operating system image:\nprojects/debian-cloud/global/images/debian-9-stretch-vYYYYMMDD\n\n\nTo create a disk with a custom image that you created, specify the image name in the following format:\nglobal/images/my-custom-image\n\n\nYou can also specify a custom image by its image family, which returns the latest version of the image in that family. Replace the image name with family/family-name:\nglobal/images/family/my-image-family\n\n\nIf the source image is deleted later, this field will not be set.", + "description": "An optional description of this resource. Provide this property when you create the resource.", "type": "string" }, - "sourceImageEncryptionKey": { - "$ref": "CustomerEncryptionKey", - "description": "The customer-supplied encryption key of the source image. Required if the source image is protected by a customer-supplied encryption key.\n\nInstance templates do not store customer-supplied encryption keys, so you cannot create disks for instances in a managed instance group if the source images are encrypted with your own keys." - } - }, - "type": "object" - }, - "AuditConfig": { - "description": "Specifies the audit configuration for a service. The configuration determines which permission types are logged, and what identities, if any, are exempted from logging. An AuditConfig must have one or more AuditLogConfigs.\n\nIf there are AuditConfigs for both `allServices` and a specific service, the union of the two AuditConfigs is used for that service: the log_types specified in each AuditConfig are enabled, and the exempted_members in each AuditLogConfig are exempted.\n\nExample Policy with multiple AuditConfigs:\n\n{ \"audit_configs\": [ { \"service\": \"allServices\" \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:foo@gmail.com\" ] }, { \"log_type\": \"DATA_WRITE\", }, { \"log_type\": \"ADMIN_READ\", } ] }, { \"service\": \"fooservice.googleapis.com\" \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", }, { \"log_type\": \"DATA_WRITE\", \"exempted_members\": [ \"user:bar@gmail.com\" ] } ] } ] }\n\nFor fooservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts foo@gmail.com from DATA_READ logging, and bar@gmail.com from DATA_WRITE logging.", - "id": "AuditConfig", - "properties": { - "auditLogConfigs": { - "description": "The configuration for logging of each type of permission.", - "items": { - "$ref": "AuditLogConfig" - }, - "type": "array" - }, - "exemptedMembers": { - "description": "", - "items": { - "type": "string" - }, - "type": "array" - }, - "service": { - "description": "Specifies a service that will be enabled for audit logging. For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. `allServices` is a special value that covers all services.", - "type": "string" - } - }, - "type": "object" - }, - "AuditLogConfig": { - "description": "Provides the configuration for logging a type of permissions. Example:\n\n{ \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:foo@gmail.com\" ] }, { \"log_type\": \"DATA_WRITE\", } ] }\n\nThis enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting foo@gmail.com from DATA_READ logging.", - "id": "AuditLogConfig", - "properties": { - "exemptedMembers": { - "description": "Specifies the identities that do not cause logging for this type of permission. Follows the same format of [Binding.members][].", + "destinationRanges": { + "description": "If destination ranges are specified, the firewall will apply only to traffic that has destination IP address in these ranges. These ranges must be expressed in CIDR format. Only IPv4 is supported.", "items": { "type": "string" }, "type": "array" }, - "logType": { - "description": "The log type that this config enables.", - "enum": [ - "ADMIN_READ", - "DATA_READ", - "DATA_WRITE", - "LOG_TYPE_UNSPECIFIED" - ], - "enumDescriptions": [ - "", - "", - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "AuthorizationLoggingOptions": { - "description": "Authorization-related information used by Cloud Audit Logging.", - "id": "AuthorizationLoggingOptions", - "properties": { - "permissionType": { - "description": "The type of the permission that was checked.", + "direction": { + "description": "Direction of traffic to which this firewall applies; default is INGRESS. Note: For INGRESS traffic, it is NOT supported to specify destinationRanges; For EGRESS traffic, it is NOT supported to specify sourceRanges OR sourceTags.", "enum": [ - "ADMIN_READ", - "ADMIN_WRITE", - "DATA_READ", - "DATA_WRITE", - "PERMISSION_TYPE_UNSPECIFIED" + "EGRESS", + "INGRESS" ], "enumDescriptions": [ - "", - "", - "", "", "" ], "type": "string" - } - }, - "type": "object" - }, - "Autoscaler": { - "description": "Represents an Autoscaler resource. Autoscalers allow you to automatically scale virtual machine instances in managed instance groups according to an autoscaling policy that you define. For more information, read Autoscaling Groups of Instances. (== resource_for beta.autoscalers ==) (== resource_for v1.autoscalers ==) (== resource_for beta.regionAutoscalers ==) (== resource_for v1.regionAutoscalers ==)", - "id": "Autoscaler", - "properties": { - "autoscalingPolicy": { - "$ref": "AutoscalingPolicy", - "description": "The configuration parameters for the autoscaling algorithm. You can define one or more of the policies for an autoscaler: cpuUtilization, customMetricUtilizations, and loadBalancingUtilization.\n\nIf none of these are specified, the default will be to autoscale based on cpuUtilization to 0.6 or 60%." }, - "creationTimestamp": { - "description": "[Output Only] Creation timestamp in RFC3339 text format.", - "type": "string" + "disabled": { + "description": "Denotes whether the firewall rule is disabled, i.e not applied to the network it is associated with. When set to true, the firewall rule is not enforced and the network behaves as if it did not exist. If this is unspecified, the firewall rule will be enabled.", + "type": "boolean" }, - "description": { - "description": "An optional description of this resource. Provide this property when you create the resource.", - "type": "string" + "enableLogging": { + "description": "Deprecated in favor of enable in LogConfig. This field denotes whether to enable logging for a particular firewall rule. If logging is enabled, logs will be exported to Stackdriver.", + "type": "boolean" }, "id": { "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", @@ -22987,80 +29003,94 @@ "type": "string" }, "kind": { - "default": "compute#autoscaler", - "description": "[Output Only] Type of the resource. Always compute#autoscaler for autoscalers.", + "default": "compute#firewall", + "description": "[Output Only] Type of the resource. Always compute#firewall for firewall rules.", "type": "string" }, + "logConfig": { + "$ref": "FirewallLogConfig", + "description": "This field denotes the logging options for a particular firewall rule. If logging is enabled, logs will be exported to Stackdriver." + }, "name": { "annotations": { "required": [ - "compute.instanceGroups.insert" + "compute.firewalls.insert", + "compute.firewalls.patch" ] }, - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, - "region": { - "description": "[Output Only] URL of the region where the instance group resides (for autoscalers living in regional scope).", + "network": { + "description": "URL of the network resource for this firewall rule. If not specified when creating a firewall rule, the default network is used:\nglobal/networks/default\nIf you choose to specify this property, you can specify the network as a full or partial URL. For example, the following are all valid URLs: \n- https://www.googleapis.com/compute/v1/projects/myproject/global/networks/my-network \n- projects/myproject/global/networks/my-network \n- global/networks/default", "type": "string" }, + "priority": { + "description": "Priority for this rule. This is an integer between 0 and 65535, both inclusive. When not specified, the value assumed is 1000. Relative priorities determine precedence of conflicting rules. Lower value of priority implies higher precedence (eg, a rule with priority 0 has higher precedence than a rule with priority 1). DENY rules take precedence over ALLOW rules having equal priority.", + "format": "int32", + "type": "integer" + }, "selfLink": { "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, - "status": { - "description": "[Output Only] The status of the autoscaler configuration.", - "enum": [ - "ACTIVE", - "DELETING", - "ERROR", - "PENDING" - ], - "enumDescriptions": [ - "", - "", - "", - "" - ], - "type": "string" + "sourceRanges": { + "description": "If source ranges are specified, the firewall will apply only to traffic that has source IP address in these ranges. These ranges must be expressed in CIDR format. One or both of sourceRanges and sourceTags may be set. If both properties are set, the firewall will apply to traffic that has source IP address within sourceRanges OR the source IP that belongs to a tag listed in the sourceTags property. The connection does not need to match both properties for the firewall to apply. Only IPv4 is supported.", + "items": { + "type": "string" + }, + "type": "array" }, - "statusDetails": { - "description": "[Output Only] Human-readable details about the current state of the autoscaler. Read the documentation for Commonly returned status messages for examples of status messages you might encounter.", + "sourceServiceAccounts": { + "description": "If source service accounts are specified, the firewall will apply only to traffic originating from an instance with a service account in this list. Source service accounts cannot be used to control traffic to an instance's external IP address because service accounts are associated with an instance, not an IP address. sourceRanges can be set at the same time as sourceServiceAccounts. If both are set, the firewall will apply to traffic that has source IP address within sourceRanges OR the source IP belongs to an instance with service account listed in sourceServiceAccount. The connection does not need to match both properties for the firewall to apply. sourceServiceAccounts cannot be used at the same time as sourceTags or targetTags.", "items": { - "$ref": "AutoscalerStatusDetails" + "type": "string" }, "type": "array" }, - "target": { - "description": "URL of the managed instance group that this autoscaler will scale.", - "type": "string" + "sourceTags": { + "description": "If source tags are specified, the firewall rule applies only to traffic with source IPs that match the primary network interfaces of VM instances that have the tag and are in the same VPC network. Source tags cannot be used to control traffic to an instance's external IP address, it only applies to traffic between instances in the same virtual network. Because tags are associated with instances, not IP addresses. One or both of sourceRanges and sourceTags may be set. If both properties are set, the firewall will apply to traffic that has source IP address within sourceRanges OR the source IP that belongs to a tag listed in the sourceTags property. The connection does not need to match both properties for the firewall to apply.", + "items": { + "type": "string" + }, + "type": "array" }, - "zone": { - "description": "[Output Only] URL of the zone where the instance group resides (for autoscalers living in zonal scope).", - "type": "string" + "targetServiceAccounts": { + "description": "A list of service accounts indicating sets of instances located in the network that may make network connections as specified in allowed[]. targetServiceAccounts cannot be used at the same time as targetTags or sourceTags. If neither targetServiceAccounts nor targetTags are specified, the firewall rule applies to all instances on the specified network.", + "items": { + "type": "string" + }, + "type": "array" + }, + "targetTags": { + "description": "A list of tags that controls which instances the firewall rule applies to. If targetTags are specified, then the firewall rule applies only to instances in the VPC network that have one of those tags. If no targetTags are specified, the firewall rule applies to all instances on the specified network.", + "items": { + "type": "string" + }, + "type": "array" } }, "type": "object" }, - "AutoscalerAggregatedList": { - "id": "AutoscalerAggregatedList", + "FirewallList": { + "description": "Contains a list of firewalls.", + "id": "FirewallList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "additionalProperties": { - "$ref": "AutoscalersScopedList", - "description": "[Output Only] Name of the scope containing this set of autoscalers." + "description": "A list of Firewall resources.", + "items": { + "$ref": "Firewall" }, - "description": "A list of AutoscalersScopedList resources.", - "type": "object" + "type": "array" }, "kind": { - "default": "compute#autoscalerAggregatedList", - "description": "[Output Only] Type of resource. Always compute#autoscalerAggregatedList for aggregated lists of autoscalers.", + "default": "compute#firewallList", + "description": "[Output Only] Type of resource. Always compute#firewallList for lists of firewalls.", "type": "string" }, "nextPageToken": { @@ -23155,24 +29185,231 @@ }, "type": "object" }, - "AutoscalerList": { - "description": "Contains a list of Autoscaler resources.", - "id": "AutoscalerList", + "FirewallLogConfig": { + "description": "The available logging options for a firewall rule.", + "id": "FirewallLogConfig", + "properties": { + "enable": { + "description": "This field denotes whether to enable logging for a particular firewall rule.", + "type": "boolean" + } + }, + "type": "object" + }, + "FixedOrPercent": { + "description": "Encapsulates numeric value that can be either absolute or relative.", + "id": "FixedOrPercent", + "properties": { + "calculated": { + "description": "[Output Only] Absolute value of VM instances calculated based on the specific mode.\n\n \n- If the value is fixed, then the calculated value is equal to the fixed value. \n- If the value is a percent, then the calculated value is percent/100 * targetSize. For example, the calculated value of a 80% of a managed instance group with 150 instances would be (80/100 * 150) = 120 VM instances. If there is a remainder, the number is rounded up.", + "format": "int32", + "type": "integer" + }, + "fixed": { + "description": "Specifies a fixed number of VM instances. This must be a positive integer.", + "format": "int32", + "type": "integer" + }, + "percent": { + "description": "Specifies a percentage of instances between 0 to 100%, inclusive. For example, specify 80 for 80%.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "ForwardingRule": { + "description": "A ForwardingRule resource. A ForwardingRule resource specifies which pool of target virtual machines to forward a packet to if it matches the given [IPAddress, IPProtocol, ports] tuple. (== resource_for beta.forwardingRules ==) (== resource_for v1.forwardingRules ==) (== resource_for beta.globalForwardingRules ==) (== resource_for v1.globalForwardingRules ==) (== resource_for beta.regionForwardingRules ==) (== resource_for v1.regionForwardingRules ==)", + "id": "ForwardingRule", "properties": { + "IPAddress": { + "description": "The IP address that this forwarding rule is serving on behalf of.\n\nAddresses are restricted based on the forwarding rule's load balancing scheme (EXTERNAL or INTERNAL) and scope (global or regional).\n\nWhen the load balancing scheme is EXTERNAL, for global forwarding rules, the address must be a global IP, and for regional forwarding rules, the address must live in the same region as the forwarding rule. If this field is empty, an ephemeral IPv4 address from the same scope (global or regional) will be assigned. A regional forwarding rule supports IPv4 only. A global forwarding rule supports either IPv4 or IPv6.\n\nWhen the load balancing scheme is INTERNAL_SELF_MANAGED, this must be a URL reference to an existing Address resource ( internal regional static IP address), with a purpose of GCE_END_POINT and address_type of INTERNAL.\n\nWhen the load balancing scheme is INTERNAL, this can only be an RFC 1918 IP address belonging to the network/subnet configured for the forwarding rule. By default, if this field is empty, an ephemeral internal IP address will be automatically allocated from the IP range of the subnet or network configured for this forwarding rule.\n\nAn address can be specified either by a literal IP address or a URL reference to an existing Address resource. The following examples are all valid: \n- 100.1.2.3 \n- https://www.googleapis.com/compute/v1/projects/project/regions/region/addresses/address \n- projects/project/regions/region/addresses/address \n- regions/region/addresses/address \n- global/addresses/address \n- address", + "type": "string" + }, + "IPProtocol": { + "description": "The IP protocol to which this rule applies. Valid options are TCP, UDP, ESP, AH, SCTP or ICMP.\n\nWhen the load balancing scheme is INTERNAL, only TCP and UDP are valid. When the load balancing scheme is INTERNAL_SELF_MANAGED, only TCPis valid.", + "enum": [ + "AH", + "ESP", + "ICMP", + "SCTP", + "TCP", + "UDP" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "allPorts": { + "description": "This field is used along with the backend_service field for internal load balancing or with the target field for internal TargetInstance. This field cannot be used with port or portRange fields.\n\nWhen the load balancing scheme is INTERNAL and protocol is TCP/UDP, specify this field to allow packets addressed to any ports will be forwarded to the backends configured with this forwarding rule.", + "type": "boolean" + }, + "allowGlobalAccess": { + "description": "This field is used along with the backend_service field for internal load balancing or with the target field for internal TargetInstance. If the field is set to TRUE, clients can access ILB from all regions. Otherwise only allows access from clients in the same region as the internal load balancer.", + "type": "boolean" + }, + "backendService": { + "description": "This field is only used for INTERNAL load balancing.\n\nFor internal load balancing, this field identifies the BackendService resource to receive the matched traffic.", + "type": "string" + }, + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, + "fingerprint": { + "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a ForwardingRule. Include the fingerprint in patch request to ensure that you do not overwrite changes that were applied from another concurrent request.\n\nTo see the latest fingerprint, make a get() request to retrieve a ForwardingRule.", + "format": "byte", + "type": "string" + }, "id": { - "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", "type": "string" }, - "items": { - "description": "A list of Autoscaler resources.", + "ipVersion": { + "description": "The IP Version that will be used by this forwarding rule. Valid options are IPV4 or IPV6. This can only be specified for an external global forwarding rule.", + "enum": [ + "IPV4", + "IPV6", + "UNSPECIFIED_VERSION" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + }, + "kind": { + "default": "compute#forwardingRule", + "description": "[Output Only] Type of the resource. Always compute#forwardingRule for Forwarding Rule resources.", + "type": "string" + }, + "labelFingerprint": { + "description": "A fingerprint for the labels being applied to this resource, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet.\n\nTo see the latest fingerprint, make a get() request to retrieve a ForwardingRule.", + "format": "byte", + "type": "string" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Labels to apply to this resource. These can be later modified by the setLabels method. Each label key/value pair must comply with RFC1035. Label values may be empty.", + "type": "object" + }, + "loadBalancingScheme": { + "description": "This signifies what the ForwardingRule will be used for and can only take the following values: INTERNAL, INTERNAL_SELF_MANAGED, EXTERNAL. The value of INTERNAL means that this will be used for Internal Network Load Balancing (TCP, UDP). The value of INTERNAL_SELF_MANAGED means that this will be used for Internal Global HTTP(S) LB. The value of EXTERNAL means that this will be used for External Load Balancing (HTTP(S) LB, External TCP/UDP LB, SSL Proxy)", + "enum": [ + "EXTERNAL", + "INTERNAL", + "INTERNAL_MANAGED", + "INTERNAL_SELF_MANAGED", + "INVALID" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "metadataFilters": { + "description": "Opaque filter criteria used by Loadbalancer to restrict routing configuration to a limited set xDS compliant clients. In their xDS requests to Loadbalancer, xDS clients present node metadata. If a match takes place, the relevant routing configuration is made available to those proxies.\nFor each metadataFilter in this list, if its filterMatchCriteria is set to MATCH_ANY, at least one of the filterLabels must match the corresponding label provided in the metadata. If its filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels must match with corresponding labels in the provided metadata.\nmetadataFilters specified here can be overridden by those specified in the UrlMap that this ForwardingRule references.\nmetadataFilters only applies to Loadbalancers that have their loadBalancingScheme set to INTERNAL_SELF_MANAGED.", "items": { - "$ref": "Autoscaler" + "$ref": "MetadataFilter" + }, + "type": "array" + }, + "name": { + "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "network": { + "description": "This field is not used for external load balancing.\n\nFor INTERNAL and INTERNAL_SELF_MANAGED load balancing, this field identifies the network that the load balanced IP should belong to for this Forwarding Rule. If this field is not specified, the default network will be used.", + "type": "string" + }, + "networkTier": { + "description": "This signifies the networking tier used for configuring this load balancer and can only take the following values: PREMIUM , STANDARD.\n\nFor regional ForwardingRule, the valid values are PREMIUM and STANDARD. For GlobalForwardingRule, the valid value is PREMIUM.\n\nIf this field is not specified, it is assumed to be PREMIUM. If IPAddress is specified, this value must be equal to the networkTier of the Address.", + "enum": [ + "PREMIUM", + "STANDARD" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "portRange": { + "description": "This field is used along with the target field for TargetHttpProxy, TargetHttpsProxy, TargetSslProxy, TargetTcpProxy, TargetVpnGateway, TargetPool, TargetInstance.\n\nApplicable only when IPProtocol is TCP, UDP, or SCTP, only packets addressed to ports in the specified range will be forwarded to target. Forwarding rules with the same [IPAddress, IPProtocol] pair must have disjoint port ranges.\n\nSome types of forwarding target have constraints on the acceptable ports: \n- TargetHttpProxy: 80, 8080 \n- TargetHttpsProxy: 443 \n- TargetTcpProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, 1688, 1883, 5222 \n- TargetSslProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, 1688, 1883, 5222 \n- TargetVpnGateway: 500, 4500", + "type": "string" + }, + "ports": { + "description": "This field is used along with the backend_service field for internal load balancing.\n\nWhen the load balancing scheme is INTERNAL, a list of ports can be configured, for example, ['80'], ['8000','9000'] etc. Only packets addressed to these ports will be forwarded to the backends configured with this forwarding rule.\n\nYou may specify a maximum of up to 5 ports.", + "items": { + "type": "string" }, "type": "array" }, + "region": { + "description": "[Output Only] URL of the region where the regional forwarding rule resides. This field is not applicable to global forwarding rules. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" + }, + "serviceLabel": { + "description": "An optional prefix to the service name for this Forwarding Rule. If specified, will be the first label of the fully qualified service name.\n\nThe label must be 1-63 characters long, and comply with RFC1035. Specifically, the label must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.\n\nThis field is only used for internal load balancing.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "serviceName": { + "description": "[Output Only] The internal fully qualified service name for this Forwarding Rule.\n\nThis field is only used for internal load balancing.", + "type": "string" + }, + "subnetwork": { + "description": "This field is only used for INTERNAL load balancing.\n\nFor internal load balancing, this field identifies the subnetwork that the load balanced IP should belong to for this Forwarding Rule.\n\nIf the network specified is in auto subnet mode, this field is optional. However, if the network is in custom subnet mode, a subnetwork must be specified.", + "type": "string" + }, + "target": { + "description": "The URL of the target resource to receive the matched traffic. For regional forwarding rules, this target must live in the same region as the forwarding rule. For global forwarding rules, this target must be a global load balancing resource. The forwarded traffic must be of a type appropriate to the target object. For INTERNAL_SELF_MANAGED load balancing, only HTTP and HTTPS targets are valid.", + "type": "string" + } + }, + "type": "object" + }, + "ForwardingRuleAggregatedList": { + "id": "ForwardingRuleAggregatedList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "additionalProperties": { + "$ref": "ForwardingRulesScopedList", + "description": "Name of the scope containing this set of addresses." + }, + "description": "A list of ForwardingRulesScopedList resources.", + "type": "object" + }, "kind": { - "default": "compute#autoscalerList", - "description": "[Output Only] Type of resource. Always compute#autoscalerList for lists of autoscalers.", + "default": "compute#forwardingRuleAggregatedList", + "description": "[Output Only] Type of resource. Always compute#forwardingRuleAggregatedList for lists of forwarding rules.", "type": "string" }, "nextPageToken": { @@ -23267,66 +29504,36 @@ }, "type": "object" }, - "AutoscalerStatusDetails": { - "id": "AutoscalerStatusDetails", + "ForwardingRuleList": { + "description": "Contains a list of ForwardingRule resources.", + "id": "ForwardingRuleList", "properties": { - "message": { - "description": "The status message.", + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, - "type": { - "description": "The type of error returned.", - "enum": [ - "ALL_INSTANCES_UNHEALTHY", - "BACKEND_SERVICE_DOES_NOT_EXIST", - "CAPPED_AT_MAX_NUM_REPLICAS", - "CUSTOM_METRIC_DATA_POINTS_TOO_SPARSE", - "CUSTOM_METRIC_INVALID", - "MIN_EQUALS_MAX", - "MISSING_CUSTOM_METRIC_DATA_POINTS", - "MISSING_LOAD_BALANCING_DATA_POINTS", - "MORE_THAN_ONE_BACKEND_SERVICE", - "NOT_ENOUGH_QUOTA_AVAILABLE", - "REGION_RESOURCE_STOCKOUT", - "SCALING_TARGET_DOES_NOT_EXIST", - "UNKNOWN", - "UNSUPPORTED_MAX_RATE_LOAD_BALANCING_CONFIGURATION", - "ZONE_RESOURCE_STOCKOUT" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "AutoscalersScopedList": { - "id": "AutoscalersScopedList", - "properties": { - "autoscalers": { - "description": "[Output Only] A list of autoscalers contained in this scope.", + "items": { + "description": "A list of ForwardingRule resources.", "items": { - "$ref": "Autoscaler" + "$ref": "ForwardingRule" }, "type": "array" }, + "kind": { + "default": "compute#forwardingRuleList", + "description": "Type of resource.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, "warning": { - "description": "[Output Only] Informational warning which replaces the list of autoscalers when the list is empty.", + "description": "[Output Only] Informational warning message.", "properties": { "code": { "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", @@ -23397,282 +29604,39 @@ }, "type": "object" }, - "type": "array" - }, - "message": { - "description": "[Output Only] A human-readable description of the warning code.", - "type": "string" - } - }, - "type": "object" - } - }, - "type": "object" - }, - "AutoscalingPolicy": { - "description": "Cloud Autoscaler policy.", - "id": "AutoscalingPolicy", - "properties": { - "coolDownPeriodSec": { - "description": "The number of seconds that the autoscaler should wait before it starts collecting information from a new instance. This prevents the autoscaler from collecting information when the instance is initializing, during which the collected usage would not be reliable. The default time autoscaler waits is 60 seconds.\n\nVirtual machine initialization times might vary because of numerous factors. We recommend that you test how long an instance may take to initialize. To do this, create an instance and time the startup process.", - "format": "int32", - "type": "integer" - }, - "cpuUtilization": { - "$ref": "AutoscalingPolicyCpuUtilization", - "description": "Defines the CPU utilization policy that allows the autoscaler to scale based on the average CPU utilization of a managed instance group." - }, - "customMetricUtilizations": { - "description": "Configuration parameters of autoscaling based on a custom metric.", - "items": { - "$ref": "AutoscalingPolicyCustomMetricUtilization" - }, - "type": "array" - }, - "loadBalancingUtilization": { - "$ref": "AutoscalingPolicyLoadBalancingUtilization", - "description": "Configuration parameters of autoscaling based on load balancer." - }, - "maxNumReplicas": { - "description": "The maximum number of instances that the autoscaler can scale up to. This is required when creating or updating an autoscaler. The maximum number of replicas should not be lower than minimal number of replicas.", - "format": "int32", - "type": "integer" - }, - "minNumReplicas": { - "description": "The minimum number of replicas that the autoscaler can scale down to. This cannot be less than 0. If not provided, autoscaler will choose a default value depending on maximum number of instances allowed.", - "format": "int32", - "type": "integer" - } - }, - "type": "object" - }, - "AutoscalingPolicyCpuUtilization": { - "description": "CPU utilization policy.", - "id": "AutoscalingPolicyCpuUtilization", - "properties": { - "utilizationTarget": { - "description": "The target CPU utilization that the autoscaler should maintain. Must be a float value in the range (0, 1]. If not specified, the default is 0.6.\n\nIf the CPU level is below the target utilization, the autoscaler scales down the number of instances until it reaches the minimum number of instances you specified or until the average CPU of your instances reaches the target utilization.\n\nIf the average CPU is above the target utilization, the autoscaler scales up until it reaches the maximum number of instances you specified or until the average utilization reaches the target utilization.", - "format": "double", - "type": "number" - } - }, - "type": "object" - }, - "AutoscalingPolicyCustomMetricUtilization": { - "description": "Custom utilization metric policy.", - "id": "AutoscalingPolicyCustomMetricUtilization", - "properties": { - "filter": { - "description": "A filter string, compatible with a Stackdriver Monitoring filter string for TimeSeries.list API call. This filter is used to select a specific TimeSeries for the purpose of autoscaling and to determine whether the metric is exporting per-instance or per-group data.\n\nFor the filter to be valid for autoscaling purposes, the following rules apply: \n- You can only use the AND operator for joining selectors. \n- You can only use direct equality comparison operator (=) without any functions for each selector. \n- You can specify the metric in both the filter string and in the metric field. However, if specified in both places, the metric must be identical. \n- The monitored resource type determines what kind of values are expected for the metric. If it is a gce_instance, the autoscaler expects the metric to include a separate TimeSeries for each instance in a group. In such a case, you cannot filter on resource labels.\nIf the resource type is any other value, the autoscaler expects this metric to contain values that apply to the entire autoscaled instance group and resource label filtering can be performed to point autoscaler at the correct TimeSeries to scale upon. This is called a per-group metric for the purpose of autoscaling.\n\nIf not specified, the type defaults to gce_instance. \n\nYou should provide a filter that is selective enough to pick just one TimeSeries for the autoscaled group or for each of the instances (if you are using gce_instance resource type). If multiple TimeSeries are returned upon the query execution, the autoscaler will sum their respective values to obtain its scaling value.", - "type": "string" - }, - "metric": { - "description": "The identifier (type) of the Stackdriver Monitoring metric. The metric cannot have negative values.\n\nThe metric must have a value type of INT64 or DOUBLE.", - "type": "string" - }, - "singleInstanceAssignment": { - "description": "If scaling is based on a per-group metric value that represents the total amount of work to be done or resource usage, set this value to an amount assigned for a single instance of the scaled group. Autoscaler will keep the number of instances proportional to the value of this metric, the metric itself should not change value due to group resizing.\n\nA good metric to use with the target is for example pubsub.googleapis.com/subscription/num_undelivered_messages or a custom metric exporting the total number of requests coming to your instances.\n\nA bad example would be a metric exporting an average or median latency, since this value can't include a chunk assignable to a single instance, it could be better used with utilization_target instead.", - "format": "double", - "type": "number" - }, - "utilizationTarget": { - "description": "The target value of the metric that autoscaler should maintain. This must be a positive value. A utilization metric scales number of virtual machines handling requests to increase or decrease proportionally to the metric.\n\nFor example, a good metric to use as a utilization_target is compute.googleapis.com/instance/network/received_bytes_count. The autoscaler will work to keep this value constant for each of the instances.", - "format": "double", - "type": "number" - }, - "utilizationTargetType": { - "description": "Defines how target utilization value is expressed for a Stackdriver Monitoring metric. Either GAUGE, DELTA_PER_SECOND, or DELTA_PER_MINUTE.", - "enum": [ - "DELTA_PER_MINUTE", - "DELTA_PER_SECOND", - "GAUGE" - ], - "enumDescriptions": [ - "", - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "AutoscalingPolicyLoadBalancingUtilization": { - "description": "Configuration parameters of autoscaling based on load balancing.", - "id": "AutoscalingPolicyLoadBalancingUtilization", - "properties": { - "utilizationTarget": { - "description": "Fraction of backend capacity utilization (set in HTTP(s) load balancing configuration) that autoscaler should maintain. Must be a positive float value. If not defined, the default is 0.8.", - "format": "double", - "type": "number" - } - }, - "type": "object" - }, - "Backend": { - "description": "Message containing information of one individual backend.", - "id": "Backend", - "properties": { - "balancingMode": { - "description": "Specifies the balancing mode for this backend. For global HTTP(S) or TCP/SSL load balancing, the default is UTILIZATION. Valid values are UTILIZATION, RATE (for HTTP(S)) and CONNECTION (for TCP/SSL).\n\nFor Internal Load Balancing, the default and only supported mode is CONNECTION.", - "enum": [ - "CONNECTION", - "RATE", - "UTILIZATION" - ], - "enumDescriptions": [ - "", - "", - "" - ], - "type": "string" - }, - "capacityScaler": { - "description": "A multiplier applied to the group's maximum servicing capacity (based on UTILIZATION, RATE or CONNECTION). Default value is 1, which means the group will serve up to 100% of its configured capacity (depending on balancingMode). A setting of 0 means the group is completely drained, offering 0% of its available Capacity. Valid range is [0.0,1.0].\n\nThis cannot be used for internal load balancing.", - "format": "float", - "type": "number" - }, - "description": { - "description": "An optional description of this resource. Provide this property when you create the resource.", - "type": "string" - }, - "group": { - "description": "The fully-qualified URL of an Instance Group or Network Endpoint Group resource. In case of instance group this defines the list of instances that serve traffic. Member virtual machine instances from each instance group must live in the same zone as the instance group itself. No two backends in a backend service are allowed to use same Instance Group resource.\n\nFor Network Endpoint Groups this defines list of endpoints. All endpoints of Network Endpoint Group must be hosted on instances located in the same zone as the Network Endpoint Group.\n\nBackend service can not contain mix of Instance Group and Network Endpoint Group backends.\n\nNote that you must specify an Instance Group or Network Endpoint Group resource using the fully-qualified URL, rather than a partial URL.\n\nWhen the BackendService has load balancing scheme INTERNAL, the instance group must be within the same region as the BackendService. Network Endpoint Groups are not supported for INTERNAL load balancing scheme.", - "type": "string" - }, - "maxConnections": { - "description": "The max number of simultaneous connections for the group. Can be used with either CONNECTION or UTILIZATION balancing modes. For CONNECTION mode, either maxConnections or maxConnectionsPerInstance must be set.\n\nThis cannot be used for internal load balancing.", - "format": "int32", - "type": "integer" - }, - "maxConnectionsPerEndpoint": { - "description": "The max number of simultaneous connections that a single backend network endpoint can handle. This is used to calculate the capacity of the group. Can be used in either CONNECTION or UTILIZATION balancing modes. For CONNECTION mode, either maxConnections or maxConnectionsPerEndpoint must be set.\n\nThis cannot be used for internal load balancing.", - "format": "int32", - "type": "integer" - }, - "maxConnectionsPerInstance": { - "description": "The max number of simultaneous connections that a single backend instance can handle. This is used to calculate the capacity of the group. Can be used in either CONNECTION or UTILIZATION balancing modes. For CONNECTION mode, either maxConnections or maxConnectionsPerInstance must be set.\n\nThis cannot be used for internal load balancing.", - "format": "int32", - "type": "integer" - }, - "maxRate": { - "description": "The max requests per second (RPS) of the group. Can be used with either RATE or UTILIZATION balancing modes, but required if RATE mode. For RATE mode, either maxRate or maxRatePerInstance must be set.\n\nThis cannot be used for internal load balancing.", - "format": "int32", - "type": "integer" - }, - "maxRatePerEndpoint": { - "description": "The max requests per second (RPS) that a single backend network endpoint can handle. This is used to calculate the capacity of the group. Can be used in either balancing mode. For RATE mode, either maxRate or maxRatePerEndpoint must be set.\n\nThis cannot be used for internal load balancing.", - "format": "float", - "type": "number" - }, - "maxRatePerInstance": { - "description": "The max requests per second (RPS) that a single backend instance can handle. This is used to calculate the capacity of the group. Can be used in either balancing mode. For RATE mode, either maxRate or maxRatePerInstance must be set.\n\nThis cannot be used for internal load balancing.", - "format": "float", - "type": "number" - }, - "maxUtilization": { - "description": "Used when balancingMode is UTILIZATION. This ratio defines the CPU utilization target for the group. The default is 0.8. Valid range is [0.0, 1.0].\n\nThis cannot be used for internal load balancing.", - "format": "float", - "type": "number" - } - }, - "type": "object" - }, - "BackendBucket": { - "description": "A BackendBucket resource. This resource defines a Cloud Storage bucket.", - "id": "BackendBucket", - "properties": { - "bucketName": { - "description": "Cloud Storage bucket name.", - "type": "string" - }, - "cdnPolicy": { - "$ref": "BackendBucketCdnPolicy", - "description": "Cloud CDN configuration for this BackendBucket." - }, - "creationTimestamp": { - "description": "[Output Only] Creation timestamp in RFC3339 text format.", - "type": "string" - }, - "description": { - "description": "An optional textual description of the resource; provided by the client when the resource is created.", - "type": "string" - }, - "enableCdn": { - "description": "If true, enable Cloud CDN for this BackendBucket.", - "type": "boolean" - }, - "id": { - "description": "[Output Only] Unique identifier for the resource; defined by the server.", - "format": "uint64", - "type": "string" - }, - "kind": { - "default": "compute#backendBucket", - "description": "Type of the resource.", - "type": "string" - }, - "name": { - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "type": "string" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for the resource.", - "type": "string" + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" } }, "type": "object" }, - "BackendBucketCdnPolicy": { - "description": "Message containing Cloud CDN configuration for a backend bucket.", - "id": "BackendBucketCdnPolicy", + "ForwardingRuleReference": { + "id": "ForwardingRuleReference", "properties": { - "signedUrlCacheMaxAgeSec": { - "description": "Maximum number of seconds the response to a signed URL request will be considered fresh. After this time period, the response will be revalidated before being served. Defaults to 1hr (3600s). When serving responses to signed URL requests, Cloud CDN will internally behave as though all responses from this backend had a \"Cache-Control: public, max-age=[TTL]\" header, regardless of any existing Cache-Control header. The actual headers served in responses will not be altered.", - "format": "int64", + "forwardingRule": { "type": "string" - }, - "signedUrlKeyNames": { - "description": "[Output Only] Names of the keys for signing request URLs.", - "items": { - "type": "string" - }, - "type": "array" } }, "type": "object" }, - "BackendBucketList": { - "description": "Contains a list of BackendBucket resources.", - "id": "BackendBucketList", + "ForwardingRulesScopedList": { + "id": "ForwardingRulesScopedList", "properties": { - "id": { - "description": "[Output Only] Unique identifier for the resource; defined by the server.", - "type": "string" - }, - "items": { - "description": "A list of BackendBucket resources.", + "forwardingRules": { + "description": "A list of forwarding rules contained in this scope.", "items": { - "$ref": "BackendBucket" + "$ref": "ForwardingRule" }, "type": "array" }, - "kind": { - "default": "compute#backendBucketList", - "description": "Type of resource.", - "type": "string" - }, - "nextPageToken": { - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", - "type": "string" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for this resource.", - "type": "string" - }, "warning": { - "description": "[Output Only] Informational warning message.", + "description": "Informational warning which replaces the list of forwarding rules when the list is empty.", "properties": { "code": { "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", @@ -23755,177 +29719,406 @@ }, "type": "object" }, - "BackendService": { - "description": "A BackendService resource. This resource defines a group of backend virtual machines and their serving capacity. (== resource_for v1.backendService ==) (== resource_for beta.backendService ==)", - "id": "BackendService", + "GlobalSetLabelsRequest": { + "id": "GlobalSetLabelsRequest", "properties": { - "affinityCookieTtlSec": { - "description": "Lifetime of cookies in seconds if session_affinity is GENERATED_COOKIE. If set to 0, the cookie is non-persistent and lasts only until the end of the browser session (or equivalent). The maximum allowed value for TTL is one day.\n\nWhen the load balancing scheme is INTERNAL, this field is not used.", - "format": "int32", - "type": "integer" + "labelFingerprint": { + "description": "The fingerprint of the previous set of labels for this resource, used to detect conflicts. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash when updating or changing labels, otherwise the request will fail with error 412 conditionNotMet. Make a get() request to the resource to get the latest fingerprint.", + "format": "byte", + "type": "string" }, - "backends": { - "description": "The list of backends that serve this BackendService.", + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "A list of labels to apply for this resource. Each label key \u0026 value must comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. For example, \"webserver-frontend\": \"images\". A label value can also be empty (e.g. \"my-label\": \"\").", + "type": "object" + } + }, + "type": "object" + }, + "GlobalSetPolicyRequest": { + "id": "GlobalSetPolicyRequest", + "properties": { + "bindings": { + "description": "Flatten Policy to create a backward compatible wire-format. Deprecated. Use 'policy' to specify bindings.", "items": { - "$ref": "Backend" + "$ref": "Binding" }, "type": "array" }, - "cdnPolicy": { - "$ref": "BackendServiceCdnPolicy", - "description": "Cloud CDN configuration for this BackendService." + "etag": { + "description": "Flatten Policy to create a backward compatible wire-format. Deprecated. Use 'policy' to specify the etag.", + "format": "byte", + "type": "string" }, - "connectionDraining": { - "$ref": "ConnectionDraining" + "policy": { + "$ref": "Policy", + "description": "REQUIRED: The complete policy to be applied to the 'resource'. The size of the policy is limited to a few 10s of KB. An empty policy is in general a valid policy but certain services (like Projects) might reject them." + } + }, + "type": "object" + }, + "GuestAttributes": { + "description": "A guest attributes entry.", + "id": "GuestAttributes", + "properties": { + "kind": { + "default": "compute#guestAttributes", + "description": "[Output Only] Type of the resource. Always compute#guestAttributes for guest attributes entry.", + "type": "string" }, - "creationTimestamp": { - "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "queryPath": { + "description": "The path to be queried. This can be the default namespace ('/') or a nested namespace ('//') or a specified key ('//')", "type": "string" }, - "customRequestHeaders": { - "description": "Headers that the HTTP/S load balancer should add to proxied requests.", - "items": { - "type": "string" - }, - "type": "array" + "queryValue": { + "$ref": "GuestAttributesValue", + "description": "[Output Only] The value of the requested queried path." }, - "description": { - "description": "An optional description of this resource. Provide this property when you create the resource.", + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, - "enableCDN": { - "description": "If true, enable Cloud CDN for this BackendService.\n\nWhen the load balancing scheme is INTERNAL, this field is not used.", - "type": "boolean" + "variableKey": { + "description": "The key to search for.", + "type": "string" }, - "fingerprint": { - "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a BackendService. An up-to-date fingerprint must be provided in order to update the BackendService, otherwise the request will fail with error 412 conditionNotMet.\n\nTo see the latest fingerprint, make a get() request to retrieve a BackendService.", - "format": "byte", + "variableValue": { + "description": "[Output Only] The value found for the requested key.", + "type": "string" + } + }, + "type": "object" + }, + "GuestAttributesEntry": { + "description": "A guest attributes namespace/key/value entry.", + "id": "GuestAttributesEntry", + "properties": { + "key": { + "description": "Key for the guest attribute entry.", "type": "string" }, - "healthChecks": { - "description": "The list of URLs to the HttpHealthCheck or HttpsHealthCheck resource for health checking this BackendService. Currently at most one health check can be specified, and a health check is required for Compute Engine backend services. A health check must not be specified for App Engine backend and Cloud Function backend.\n\nFor internal load balancing, a URL to a HealthCheck resource must be specified instead.", + "namespace": { + "description": "Namespace for the guest attribute entry.", + "type": "string" + }, + "value": { + "description": "Value for the guest attribute entry.", + "type": "string" + } + }, + "type": "object" + }, + "GuestAttributesValue": { + "description": "Array of guest attribute namespace/key/value tuples.", + "id": "GuestAttributesValue", + "properties": { + "items": { "items": { - "type": "string" + "$ref": "GuestAttributesEntry" }, "type": "array" + } + }, + "type": "object" + }, + "GuestOsFeature": { + "description": "Guest OS features.", + "id": "GuestOsFeature", + "properties": { + "type": { + "description": "The ID of a supported feature. Read Enabling guest operating system features to see a list of available options.", + "enum": [ + "FEATURE_TYPE_UNSPECIFIED", + "MULTI_IP_SUBNET", + "SECURE_BOOT", + "UEFI_COMPATIBLE", + "VIRTIO_SCSI_MULTIQUEUE", + "WINDOWS" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "HTTP2HealthCheck": { + "id": "HTTP2HealthCheck", + "properties": { + "host": { + "description": "The value of the host header in the HTTP/2 health check request. If left empty (default value), the IP on behalf of which this health check is performed will be used.", + "type": "string" }, - "iap": { - "$ref": "BackendServiceIAP" + "port": { + "description": "The TCP port number for the health check request. The default value is 443. Valid values are 1 through 65535.", + "format": "int32", + "type": "integer" }, - "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64", + "portName": { + "description": "Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence.", "type": "string" }, - "kind": { - "default": "compute#backendService", - "description": "[Output Only] Type of resource. Always compute#backendService for backend services.", + "portSpecification": { + "description": "Specifies how port is selected for health checking, can be one of following values:\nUSE_FIXED_PORT: The port number in\nport\nis used for health checking.\nUSE_NAMED_PORT: The\nportName\nis used for health checking.\nUSE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking.\n\n\nIf not specified, HTTP2 health check follows behavior specified in\nport\nand\nportName\nfields.", + "enum": [ + "USE_FIXED_PORT", + "USE_NAMED_PORT", + "USE_SERVING_PORT" + ], + "enumDescriptions": [ + "", + "", + "" + ], "type": "string" }, - "loadBalancingScheme": { - "description": "Indicates whether the backend service will be used with internal or external load balancing. A backend service created for one type of load balancing cannot be used with the other. Possible values are INTERNAL and EXTERNAL.", + "proxyHeader": { + "description": "Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.", "enum": [ - "EXTERNAL", - "INTERNAL", - "INVALID_LOAD_BALANCING_SCHEME" + "NONE", + "PROXY_V1" ], "enumDescriptions": [ - "", "", "" ], "type": "string" }, - "name": { - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "requestPath": { + "description": "The request path of the HTTP/2 health check request. The default value is /.", + "type": "string" + }, + "response": { + "description": "The string to match anywhere in the first 1024 bytes of the response body. If left empty (the default value), the status code determines health. The response data can only be ASCII.", + "type": "string" + } + }, + "type": "object" + }, + "HTTPHealthCheck": { + "id": "HTTPHealthCheck", + "properties": { + "host": { + "description": "The value of the host header in the HTTP health check request. If left empty (default value), the IP on behalf of which this health check is performed will be used.", "type": "string" }, "port": { - "description": "Deprecated in favor of portName. The TCP port to connect on the backend. The default value is 80.\n\nThis cannot be used for internal load balancing.", + "description": "The TCP port number for the health check request. The default value is 80. Valid values are 1 through 65535.", "format": "int32", "type": "integer" }, "portName": { - "description": "Name of backend port. The same name should appear in the instance groups referenced by this service. Required when the load balancing scheme is EXTERNAL.\n\nWhen the load balancing scheme is INTERNAL, this field is not used.", + "description": "Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence.", "type": "string" }, - "protocol": { - "description": "The protocol this BackendService uses to communicate with backends.\n\nPossible values are HTTP, HTTPS, TCP, and SSL. The default is HTTP.\n\nFor internal load balancing, the possible values are TCP and UDP, and the default is TCP.", + "portSpecification": { + "description": "Specifies how port is selected for health checking, can be one of following values:\nUSE_FIXED_PORT: The port number in\nport\nis used for health checking.\nUSE_NAMED_PORT: The\nportName\nis used for health checking.\nUSE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking.\n\n\nIf not specified, HTTP health check follows behavior specified in\nport\nand\nportName\nfields.", "enum": [ - "HTTP", - "HTTP2", - "HTTPS", - "SSL", - "TCP", - "UDP" + "USE_FIXED_PORT", + "USE_NAMED_PORT", + "USE_SERVING_PORT" ], "enumDescriptions": [ "", "", + "" + ], + "type": "string" + }, + "proxyHeader": { + "description": "Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.", + "enum": [ + "NONE", + "PROXY_V1" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "requestPath": { + "description": "The request path of the HTTP health check request. The default value is /.", + "type": "string" + }, + "response": { + "description": "The string to match anywhere in the first 1024 bytes of the response body. If left empty (the default value), the status code determines health. The response data can only be ASCII.", + "type": "string" + } + }, + "type": "object" + }, + "HTTPSHealthCheck": { + "id": "HTTPSHealthCheck", + "properties": { + "host": { + "description": "The value of the host header in the HTTPS health check request. If left empty (default value), the IP on behalf of which this health check is performed will be used.", + "type": "string" + }, + "port": { + "description": "The TCP port number for the health check request. The default value is 443. Valid values are 1 through 65535.", + "format": "int32", + "type": "integer" + }, + "portName": { + "description": "Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence.", + "type": "string" + }, + "portSpecification": { + "description": "Specifies how port is selected for health checking, can be one of following values:\nUSE_FIXED_PORT: The port number in\nport\nis used for health checking.\nUSE_NAMED_PORT: The\nportName\nis used for health checking.\nUSE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking.\n\n\nIf not specified, HTTPS health check follows behavior specified in\nport\nand\nportName\nfields.", + "enum": [ + "USE_FIXED_PORT", + "USE_NAMED_PORT", + "USE_SERVING_PORT" + ], + "enumDescriptions": [ "", "", + "" + ], + "type": "string" + }, + "proxyHeader": { + "description": "Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.", + "enum": [ + "NONE", + "PROXY_V1" + ], + "enumDescriptions": [ "", "" ], "type": "string" }, - "region": { - "description": "[Output Only] URL of the region where the regional backend service resides. This field is not applicable to global backend services. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", + "requestPath": { + "description": "The request path of the HTTPS health check request. The default value is /.", "type": "string" }, - "securityPolicy": { - "description": "[Output Only] The resource URL for the security policy associated with this backend service.", + "response": { + "description": "The string to match anywhere in the first 1024 bytes of the response body. If left empty (the default value), the status code determines health. The response data can only be ASCII.", + "type": "string" + } + }, + "type": "object" + }, + "HealthCheck": { + "description": "An HealthCheck resource. This resource defines a template for how individual virtual machines should be checked for health, via one of the supported protocols.", + "id": "HealthCheck", + "properties": { + "checkIntervalSec": { + "description": "How often (in seconds) to send a health check. The default value is 5 seconds.", + "format": "int32", + "type": "integer" + }, + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in 3339 text format.", + "type": "string" + }, + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, + "healthyThreshold": { + "description": "A so-far unhealthy instance will be marked healthy after this many consecutive successes. The default value is 2.", + "format": "int32", + "type": "integer" + }, + "http2HealthCheck": { + "$ref": "HTTP2HealthCheck" + }, + "httpHealthCheck": { + "$ref": "HTTPHealthCheck" + }, + "httpsHealthCheck": { + "$ref": "HTTPSHealthCheck" + }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", + "type": "string" + }, + "kind": { + "default": "compute#healthCheck", + "description": "Type of the resource.", + "type": "string" + }, + "name": { + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "region": { + "description": "[Output Only] Region where the health check resides. Not applicable to global health checks.", "type": "string" }, "selfLink": { "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, - "sessionAffinity": { - "description": "Type of session affinity to use. The default is NONE.\n\nWhen the load balancing scheme is EXTERNAL, can be NONE, CLIENT_IP, or GENERATED_COOKIE.\n\nWhen the load balancing scheme is INTERNAL, can be NONE, CLIENT_IP, CLIENT_IP_PROTO, or CLIENT_IP_PORT_PROTO.\n\nWhen the protocol is UDP, this field is not used.", + "sslHealthCheck": { + "$ref": "SSLHealthCheck" + }, + "tcpHealthCheck": { + "$ref": "TCPHealthCheck" + }, + "timeoutSec": { + "description": "How long (in seconds) to wait before claiming failure. The default value is 5 seconds. It is invalid for timeoutSec to have greater value than checkIntervalSec.", + "format": "int32", + "type": "integer" + }, + "type": { + "description": "Specifies the type of the healthCheck, either TCP, SSL, HTTP, HTTPS or HTTP2. If not specified, the default is TCP. Exactly one of the protocol-specific health check field must be specified, which must match type field.", "enum": [ - "CLIENT_IP", - "CLIENT_IP_PORT_PROTO", - "CLIENT_IP_PROTO", - "GENERATED_COOKIE", - "NONE" + "HTTP", + "HTTP2", + "HTTPS", + "INVALID", + "SSL", + "TCP" ], "enumDescriptions": [ "", "", "", "", + "", "" ], "type": "string" }, - "timeoutSec": { - "description": "How many seconds to wait for the backend before considering it a failed request. Default is 30 seconds.", + "unhealthyThreshold": { + "description": "A so-far healthy instance will be marked unhealthy after this many consecutive failures. The default value is 2.", "format": "int32", "type": "integer" } }, "type": "object" }, - "BackendServiceAggregatedList": { - "description": "Contains a list of BackendServicesScopedList.", - "id": "BackendServiceAggregatedList", + "HealthCheckList": { + "description": "Contains a list of HealthCheck resources.", + "id": "HealthCheckList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "additionalProperties": { - "$ref": "BackendServicesScopedList", - "description": "Name of the scope containing this set of BackendServices." + "description": "A list of HealthCheck resources.", + "items": { + "$ref": "HealthCheck" }, - "description": "A list of BackendServicesScopedList resources.", - "type": "object" + "type": "array" }, "kind": { - "default": "compute#backendServiceAggregatedList", + "default": "compute#healthCheckList", "description": "Type of resource.", "type": "string" }, @@ -24021,85 +30214,34 @@ }, "type": "object" }, - "BackendServiceCdnPolicy": { - "description": "Message containing Cloud CDN configuration for a backend service.", - "id": "BackendServiceCdnPolicy", - "properties": { - "cacheKeyPolicy": { - "$ref": "CacheKeyPolicy", - "description": "The CacheKeyPolicy for this CdnPolicy." - }, - "signedUrlCacheMaxAgeSec": { - "description": "Maximum number of seconds the response to a signed URL request will be considered fresh. After this time period, the response will be revalidated before being served. Defaults to 1hr (3600s). When serving responses to signed URL requests, Cloud CDN will internally behave as though all responses from this backend had a \"Cache-Control: public, max-age=[TTL]\" header, regardless of any existing Cache-Control header. The actual headers served in responses will not be altered.", - "format": "int64", - "type": "string" - }, - "signedUrlKeyNames": { - "description": "[Output Only] Names of the keys for signing request URLs.", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, - "BackendServiceGroupHealth": { - "id": "BackendServiceGroupHealth", - "properties": { - "healthStatus": { - "description": "Health state of the backend instances or endpoints in requested instance or network endpoint group, determined based on configured health checks.", - "items": { - "$ref": "HealthStatus" - }, - "type": "array" - }, - "kind": { - "default": "compute#backendServiceGroupHealth", - "description": "[Output Only] Type of resource. Always compute#backendServiceGroupHealth for the health of backend services.", - "type": "string" - } - }, - "type": "object" - }, - "BackendServiceIAP": { - "description": "Identity-Aware Proxy", - "id": "BackendServiceIAP", + "HealthCheckReference": { + "description": "A full or valid partial URL to a health check. For example, the following are valid URLs: \n- https://www.googleapis.com/compute/beta/projects/project-id/global/httpHealthChecks/health-check \n- projects/project-id/global/httpHealthChecks/health-check \n- global/httpHealthChecks/health-check", + "id": "HealthCheckReference", "properties": { - "enabled": { - "type": "boolean" - }, - "oauth2ClientId": { - "type": "string" - }, - "oauth2ClientSecret": { - "type": "string" - }, - "oauth2ClientSecretSha256": { - "description": "[Output Only] SHA256 hash value for the field oauth2_client_secret above.", + "healthCheck": { "type": "string" } }, "type": "object" }, - "BackendServiceList": { - "description": "Contains a list of BackendService resources.", - "id": "BackendServiceList", + "HealthChecksAggregatedList": { + "id": "HealthChecksAggregatedList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of BackendService resources.", - "items": { - "$ref": "BackendService" + "additionalProperties": { + "$ref": "HealthChecksScopedList", + "description": "Name of the scope containing this set of HealthChecks." }, - "type": "array" + "description": "A list of HealthChecksScopedList resources.", + "type": "object" }, "kind": { - "default": "compute#backendServiceList", - "description": "[Output Only] Type of resource. Always compute#backendServiceList for lists of backend services.", + "default": "compute#healthChecksAggregatedList", + "description": "Type of resource.", "type": "string" }, "nextPageToken": { @@ -24194,22 +30336,13 @@ }, "type": "object" }, - "BackendServiceReference": { - "id": "BackendServiceReference", - "properties": { - "backendService": { - "type": "string" - } - }, - "type": "object" - }, - "BackendServicesScopedList": { - "id": "BackendServicesScopedList", + "HealthChecksScopedList": { + "id": "HealthChecksScopedList", "properties": { - "backendServices": { - "description": "A list of BackendServices contained in this scope.", + "healthChecks": { + "description": "A list of HealthChecks contained in this scope.", "items": { - "$ref": "BackendService" + "$ref": "HealthCheck" }, "type": "array" }, @@ -24297,66 +30430,168 @@ }, "type": "object" }, - "Binding": { - "description": "Associates `members` with a `role`.", - "id": "Binding", + "HealthStatus": { + "id": "HealthStatus", "properties": { - "condition": { - "$ref": "Expr", - "description": "Unimplemented. The condition that is associated with this binding. NOTE: an unsatisfied condition will not allow user access via current binding. Different bindings, including their conditions, are examined independently." + "healthState": { + "description": "Health state of the instance.", + "enum": [ + "HEALTHY", + "UNHEALTHY" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" }, - "members": { - "description": "Specifies the identities requesting access for a Cloud Platform resource. `members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@gmail.com` .\n\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`.\n\n\n\n* `domain:{domain}`: A Google Apps domain name that represents all the users of that domain. For example, `google.com` or `example.com`.", + "instance": { + "description": "URL of the instance resource.", + "type": "string" + }, + "ipAddress": { + "description": "The IP address represented by this resource.", + "type": "string" + }, + "port": { + "description": "The port on the instance.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "HealthStatusForNetworkEndpoint": { + "id": "HealthStatusForNetworkEndpoint", + "properties": { + "backendService": { + "$ref": "BackendServiceReference", + "description": "URL of the backend service associated with the health state of the network endpoint." + }, + "forwardingRule": { + "$ref": "ForwardingRuleReference", + "description": "URL of the forwarding rule associated with the health state of the network endpoint." + }, + "healthCheck": { + "$ref": "HealthCheckReference", + "description": "URL of the health check associated with the health state of the network endpoint." + }, + "healthState": { + "description": "Health state of the network endpoint determined based on the health checks configured.", + "enum": [ + "DRAINING", + "HEALTHY", + "UNHEALTHY", + "UNKNOWN" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "HostRule": { + "description": "UrlMaps A host-matching rule for a URL. If matched, will use the named PathMatcher to select the BackendService.", + "id": "HostRule", + "properties": { + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, + "hosts": { + "description": "The list of host patterns to match. They must be valid hostnames, except * will match any string of ([a-z0-9-.]*). In that case, * must be the first character and must be followed in the pattern by either - or ..", "items": { "type": "string" }, "type": "array" }, - "role": { - "description": "Role that is assigned to `members`. For example, `roles/viewer`, `roles/editor`, or `roles/owner`.", + "pathMatcher": { + "description": "The name of the PathMatcher to use to match the path portion of the URL if the hostRule matches the URL's host portion.", "type": "string" } }, "type": "object" }, - "CacheInvalidationRule": { - "id": "CacheInvalidationRule", + "HttpFaultAbort": { + "description": "Specification for how requests are aborted as part of fault injection.", + "id": "HttpFaultAbort", "properties": { - "host": { - "description": "If set, this invalidation rule will only apply to requests with a Host header matching host.", - "type": "string" + "httpStatus": { + "description": "The HTTP status code used to abort the request.\nThe value must be between 200 and 599 inclusive.", + "format": "uint32", + "type": "integer" }, - "path": { - "type": "string" + "percentage": { + "description": "The percentage of traffic (connections/operations/requests) which will be aborted as part of fault injection.\nThe value must be between 0.0 and 100.0 inclusive.", + "format": "double", + "type": "number" } }, "type": "object" }, - "CacheKeyPolicy": { - "description": "Message containing what to include in the cache key for a request for Cloud CDN.", - "id": "CacheKeyPolicy", + "HttpFaultDelay": { + "description": "Specifies the delay introduced by Loadbalancer before forwarding the request to the backend service as part of fault injection.", + "id": "HttpFaultDelay", "properties": { - "includeHost": { - "description": "If true, requests to different hosts will be cached separately.", - "type": "boolean" + "fixedDelay": { + "$ref": "Duration", + "description": "Specifies the value of the fixed delay interval." }, - "includeProtocol": { - "description": "If true, http and https requests will be cached separately.", - "type": "boolean" + "percentage": { + "description": "The percentage of traffic (connections/operations/requests) on which delay will be introduced as part of fault injection.\nThe value must be between 0.0 and 100.0 inclusive.", + "format": "double", + "type": "number" + } + }, + "type": "object" + }, + "HttpFaultInjection": { + "description": "The specification for fault injection introduced into traffic to test the resiliency of clients to backend service failure. As part of fault injection, when clients send requests to a backend service, delays can be introduced by Loadbalancer on a percentage of requests before sending those request to the backend service. Similarly requests from clients can be aborted by the Loadbalancer for a percentage of requests.", + "id": "HttpFaultInjection", + "properties": { + "abort": { + "$ref": "HttpFaultAbort", + "description": "The specification for how client requests are aborted as part of fault injection." }, - "includeQueryString": { - "description": "If true, include query string parameters in the cache key according to query_string_whitelist and query_string_blacklist. If neither is set, the entire query string will be included. If false, the query string will be excluded from the cache key entirely.", - "type": "boolean" + "delay": { + "$ref": "HttpFaultDelay", + "description": "The specification for how client requests are delayed as part of fault injection, before being sent to a backend service." + } + }, + "type": "object" + }, + "HttpHeaderAction": { + "description": "The request and response header transformations that take effect before the request is passed along to the selected backendService.", + "id": "HttpHeaderAction", + "properties": { + "requestHeadersToAdd": { + "description": "Headers to add to a matching request prior to forwarding the request to the backendService.", + "items": { + "$ref": "HttpHeaderOption" + }, + "type": "array" }, - "queryStringBlacklist": { - "description": "Names of query string parameters to exclude in cache keys. All other parameters will be included. Either specify query_string_whitelist or query_string_blacklist, not both. '\u0026' and '=' will be percent encoded and not treated as delimiters.", + "requestHeadersToRemove": { + "description": "A list of header names for headers that need to be removed from the request prior to forwarding the request to the backendService.", "items": { "type": "string" }, "type": "array" }, - "queryStringWhitelist": { - "description": "Names of query string parameters to include in cache keys. All other parameters will be excluded. Either specify query_string_whitelist or query_string_blacklist, not both. '\u0026' and '=' will be percent encoded and not treated as delimiters.", + "responseHeadersToAdd": { + "description": "Headers to add the response prior to sending the response back to the client.", + "items": { + "$ref": "HttpHeaderOption" + }, + "type": "array" + }, + "responseHeadersToRemove": { + "description": "A list of header names for headers that need to be removed from the response prior to sending the response back to the client.", "items": { "type": "string" }, @@ -24365,16 +30600,72 @@ }, "type": "object" }, - "Commitment": { - "description": "Represents a Commitment resource. Creating a Commitment resource means that you are purchasing a committed use contract with an explicit start and end time. You can create commitments based on vCPUs and memory usage and receive discounted rates. For full details, read Signing Up for Committed Use Discounts.\n\nCommitted use discounts are subject to Google Cloud Platform's Service Specific Terms. By purchasing a committed use discount, you agree to these terms. Committed use discounts will not renew, so you must purchase a new commitment to continue receiving discounts. (== resource_for beta.commitments ==) (== resource_for v1.commitments ==)", - "id": "Commitment", + "HttpHeaderMatch": { + "description": "matchRule criteria for request header matches.", + "id": "HttpHeaderMatch", "properties": { - "allocations": { - "description": "List of allocations for this commitment.", - "items": { - "$ref": "Allocation" - }, - "type": "array" + "exactMatch": { + "description": "The value should exactly match contents of exactMatch.\nOnly one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set.", + "type": "string" + }, + "headerName": { + "description": "The name of the HTTP header to match.\nFor matching against the HTTP request's authority, use a headerMatch with the header name \":authority\".\nFor matching a request's method, use the headerName \":method\".", + "type": "string" + }, + "invertMatch": { + "description": "If set to false, the headerMatch is considered a match if the match criteria above are met. If set to true, the headerMatch is considered a match if the match criteria above are NOT met.\nThe default setting is false.", + "type": "boolean" + }, + "prefixMatch": { + "description": "The value of the header must start with the contents of prefixMatch.\nOnly one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set.", + "type": "string" + }, + "presentMatch": { + "description": "A header with the contents of headerName must exist. The match takes place whether or not the request's header has a value or not.\nOnly one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set.", + "type": "boolean" + }, + "rangeMatch": { + "$ref": "Int64RangeMatch", + "description": "The header value must be an integer and its value must be in the range specified in rangeMatch. If the header does not contain an integer, number or is empty, the match fails.\nFor example for a range [-5, 0] \n- -3 will match. \n- 0 will not match. \n- 0.25 will not match. \n- -3someString will not match. \nOnly one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set." + }, + "regexMatch": { + "description": "The value of the header must match the regualar expression specified in regexMatch. For regular expression grammar, please see: en.cppreference.com/w/cpp/regex/ecmascript \nFor matching against a port specified in the HTTP request, use a headerMatch with headerName set to PORT and a regular expression that satisfies the RFC2616 Host header's port specifier.\nOnly one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set.", + "type": "string" + }, + "suffixMatch": { + "description": "The value of the header must end with the contents of suffixMatch.\nOnly one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set.", + "type": "string" + } + }, + "type": "object" + }, + "HttpHeaderOption": { + "description": "Specification determining how headers are added to requests or responses.", + "id": "HttpHeaderOption", + "properties": { + "headerName": { + "description": "The name of the header.", + "type": "string" + }, + "headerValue": { + "description": "The value of the header to add.", + "type": "string" + }, + "replace": { + "description": "If false, headerValue is appended to any values that already exist for the header. If true, headerValue is set for the header, discarding any values that were set for that header.\nThe default value is false.", + "type": "boolean" + } + }, + "type": "object" + }, + "HttpHealthCheck": { + "description": "An HttpHealthCheck resource. This resource defines a template for how individual instances should be checked for health, via HTTP.", + "id": "HttpHealthCheck", + "properties": { + "checkIntervalSec": { + "description": "How often (in seconds) to send a health check. The default value is 5 seconds.", + "format": "int32", + "type": "integer" }, "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", @@ -24384,8 +30675,13 @@ "description": "An optional description of this resource. Provide this property when you create the resource.", "type": "string" }, - "endTimestamp": { - "description": "[Output Only] Commitment end time in RFC3339 text format.", + "healthyThreshold": { + "description": "A so-far unhealthy instance will be marked healthy after this many consecutive successes. The default value is 2.", + "format": "int32", + "type": "integer" + }, + "host": { + "description": "The value of the host header in the HTTP health check request. If left empty (default value), the public IP on behalf of which this health check is performed will be used.", "type": "string" }, "id": { @@ -24394,8 +30690,8 @@ "type": "string" }, "kind": { - "default": "compute#commitment", - "description": "[Output Only] Type of the resource. Always compute#commitment for commitments.", + "default": "compute#httpHealthCheck", + "description": "[Output Only] Type of the resource. Always compute#httpHealthCheck for HTTP health checks.", "type": "string" }, "name": { @@ -24403,80 +30699,50 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, - "plan": { - "description": "The plan for this commitment, which determines duration and discount rate. The currently supported plans are TWELVE_MONTH (1 year), and THIRTY_SIX_MONTH (3 years).", - "enum": [ - "INVALID", - "THIRTY_SIX_MONTH", - "TWELVE_MONTH" - ], - "enumDescriptions": [ - "", - "", - "" - ], - "type": "string" + "port": { + "description": "The TCP port number for the HTTP health check request. The default value is 80.", + "format": "int32", + "type": "integer" }, - "region": { - "description": "[Output Only] URL of the region where this commitment may be used.", + "requestPath": { + "description": "The request path of the HTTP health check request. The default value is /.", "type": "string" }, - "resources": { - "description": "A list of commitment amounts for particular resources. Note that VCPU and MEMORY resource commitments must occur together.", - "items": { - "$ref": "ResourceCommitment" - }, - "type": "array" - }, "selfLink": { "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, - "startTimestamp": { - "description": "[Output Only] Commitment start time in RFC3339 text format.", - "type": "string" - }, - "status": { - "description": "[Output Only] Status of the commitment with regards to eventual expiration (each commitment has an end date defined). One of the following values: NOT_YET_ACTIVE, ACTIVE, EXPIRED.", - "enum": [ - "ACTIVE", - "CREATING", - "EXPIRED", - "NOT_YET_ACTIVE" - ], - "enumDescriptions": [ - "", - "", - "", - "" - ], - "type": "string" + "timeoutSec": { + "description": "How long (in seconds) to wait before claiming failure. The default value is 5 seconds. It is invalid for timeoutSec to have greater value than checkIntervalSec.", + "format": "int32", + "type": "integer" }, - "statusMessage": { - "description": "[Output Only] An optional, human-readable explanation of the status.", - "type": "string" + "unhealthyThreshold": { + "description": "A so-far healthy instance will be marked unhealthy after this many consecutive failures. The default value is 2.", + "format": "int32", + "type": "integer" } }, "type": "object" }, - "CommitmentAggregatedList": { - "id": "CommitmentAggregatedList", + "HttpHealthCheckList": { + "description": "Contains a list of HttpHealthCheck resources.", + "id": "HttpHealthCheckList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "additionalProperties": { - "$ref": "CommitmentsScopedList", - "description": "[Output Only] Name of the scope containing this set of commitments." + "description": "A list of HttpHealthCheck resources.", + "items": { + "$ref": "HttpHealthCheck" }, - "description": "A list of CommitmentsScopedList resources.", - "type": "object" + "type": "array" }, "kind": { - "default": "compute#commitmentAggregatedList", - "description": "[Output Only] Type of resource. Always compute#commitmentAggregatedList for aggregated lists of commitments.", + "default": "compute#httpHealthCheckList", + "description": "Type of resource.", "type": "string" }, "nextPageToken": { @@ -24571,130 +30837,304 @@ }, "type": "object" }, - "CommitmentList": { - "description": "Contains a list of Commitment resources.", - "id": "CommitmentList", + "HttpQueryParameterMatch": { + "description": "HttpRouteRuleMatch criteria for a request's query parameter.", + "id": "HttpQueryParameterMatch", "properties": { - "id": { - "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "exactMatch": { + "description": "The queryParameterMatch matches if the value of the parameter exactly matches the contents of exactMatch.\nOnly one of presentMatch, exactMatch and regexMatch must be set.", "type": "string" }, - "items": { - "description": "A list of Commitment resources.", + "name": { + "description": "The name of the query parameter to match. The query parameter must exist in the request, in the absence of which the request match fails.", + "type": "string" + }, + "presentMatch": { + "description": "Specifies that the queryParameterMatch matches if the request contains the query parameter, irrespective of whether the parameter has a value or not.\nOnly one of presentMatch, exactMatch and regexMatch must be set.", + "type": "boolean" + }, + "regexMatch": { + "description": "The queryParameterMatch matches if the value of the parameter matches the regular expression specified by regexMatch. For the regular expression grammar, please see en.cppreference.com/w/cpp/regex/ecmascript \nOnly one of presentMatch, exactMatch and regexMatch must be set.", + "type": "string" + } + }, + "type": "object" + }, + "HttpRedirectAction": { + "description": "Specifies settings for an HTTP redirect.", + "id": "HttpRedirectAction", + "properties": { + "hostRedirect": { + "description": "The host that will be used in the redirect response instead of the one that was supplied in the request.\nThe value must be between 1 and 255 characters.", + "type": "string" + }, + "httpsRedirect": { + "description": "If set to true, the URL scheme in the redirected request is set to https. If set to false, the URL scheme of the redirected request will remain the same as that of the request.\nThis must only be set for UrlMaps used in TargetHttpProxys. Setting this true for TargetHttpsProxy is not permitted.\nThe default is set to false.", + "type": "boolean" + }, + "pathRedirect": { + "description": "The path that will be used in the redirect response instead of the one that was supplied in the request.\nOnly one of pathRedirect or prefixRedirect must be specified.\nThe value must be between 1 and 1024 characters.", + "type": "string" + }, + "prefixRedirect": { + "description": "The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, retaining the remaining portion of the URL before redirecting the request.", + "type": "string" + }, + "redirectResponseCode": { + "description": "The HTTP Status code to use for this RedirectAction.\nSupported values are: \n- MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds to 301. \n- FOUND, which corresponds to 302. \n- SEE_OTHER which corresponds to 303. \n- TEMPORARY_REDIRECT, which corresponds to 307. In this case, the request method will be retained. \n- PERMANENT_REDIRECT, which corresponds to 308. In this case, the request method will be retained.", + "enum": [ + "FOUND", + "MOVED_PERMANENTLY_DEFAULT", + "PERMANENT_REDIRECT", + "SEE_OTHER", + "TEMPORARY_REDIRECT" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "stripQuery": { + "description": "If set to true, any accompanying query portion of the original URL is removed prior to redirecting the request. If set to false, the query portion of the original URL is retained.\nThe default is set to false.", + "type": "boolean" + } + }, + "type": "object" + }, + "HttpRetryPolicy": { + "description": "The retry policy associates with HttpRouteRule", + "id": "HttpRetryPolicy", + "properties": { + "numRetries": { + "description": "Specifies the allowed number retries. This number must be \u003e 0.", + "format": "uint32", + "type": "integer" + }, + "perTryTimeout": { + "$ref": "Duration", + "description": "Specifies a non-zero timeout per retry attempt." + }, + "retryConditions": { + "description": "Specfies one or more conditions when this retry rule applies. Valid values are: \n- 5xx: Loadbalancer will attempt a retry if the backend service responds with any 5xx response code, or if the backend service does not respond at all, example: disconnects, reset, read timeout, connection failure, and refused streams. \n- gateway-error: Similar to 5xx, but only applies to response codes 502, 503 or 504.\n- \n- connect-failure: Loadbalancer will retry on failures connecting to backend services, for example due to connection timeouts. \n- retriable-4xx: Loadbalancer will retry for retriable 4xx response codes. Currently the only retriable error supported is 409. \n- refused-stream:Loadbalancer will retry if the backend service resets the stream with a REFUSED_STREAM error code. This reset type indicates that it is safe to retry. \n- cancelledLoadbalancer will retry if the gRPC status code in the response header is set to cancelled \n- deadline-exceeded: Loadbalancer will retry if the gRPC status code in the response header is set to deadline-exceeded \n- resource-exhausted: Loadbalancer will retry if the gRPC status code in the response header is set to resource-exhausted \n- unavailable: Loadbalancer will retry if the gRPC status code in the response header is set to unavailable", "items": { - "$ref": "Commitment" + "type": "string" }, "type": "array" + } + }, + "type": "object" + }, + "HttpRouteAction": { + "id": "HttpRouteAction", + "properties": { + "corsPolicy": { + "$ref": "CorsPolicy", + "description": "The specification for allowing client side cross-origin requests. Please see W3C Recommendation for Cross Origin Resource Sharing" }, - "kind": { - "default": "compute#commitmentList", - "description": "[Output Only] Type of resource. Always compute#commitmentList for lists of commitments.", + "faultInjectionPolicy": { + "$ref": "HttpFaultInjection", + "description": "The specification for fault injection introduced into traffic to test the resiliency of clients to backend service failure. As part of fault injection, when clients send requests to a backend service, delays can be introduced by Loadbalancer on a percentage of requests before sending those request to the backend service. Similarly requests from clients can be aborted by the Loadbalancer for a percentage of requests.\ntimeout and retry_policy will be ignored by clients that are configured with a fault_injection_policy." + }, + "requestMirrorPolicy": { + "$ref": "RequestMirrorPolicy", + "description": "Specifies the policy on how requests intended for the route's backends are shadowed to a separate mirrored backend service. Loadbalancer does not wait for responses from the shadow service. Prior to sending traffic to the shadow service, the host / authority header is suffixed with -shadow." + }, + "retryPolicy": { + "$ref": "HttpRetryPolicy", + "description": "Specifies the retry policy associated with this route." + }, + "timeout": { + "$ref": "Duration", + "description": "Specifies the timeout for the selected route. Timeout is computed from the time the request is has been fully processed (i.e. end-of-stream) up until the response has been completely processed. Timeout includes all retries.\nIf not specified, the default value is 15 seconds." + }, + "urlRewrite": { + "$ref": "UrlRewrite", + "description": "The spec to modify the URL of the request, prior to forwarding the request to the matched service" + }, + "weightedBackendServices": { + "description": "A list of weighted backend services to send traffic to when a route match occurs. The weights determine the fraction of traffic that flows to their corresponding backend service. If all traffic needs to go to a single backend service, there must be one weightedBackendService with weight set to a non 0 number.\nOnce a backendService is identified and before forwarding the request to the backend service, advanced routing actions like Url rewrites and header transformations are applied depending on additional settings specified in this HttpRouteAction.", + "items": { + "$ref": "WeightedBackendService" + }, + "type": "array" + } + }, + "type": "object" + }, + "HttpRouteRule": { + "description": "An HttpRouteRule specifies how to match an HTTP request and the corresponding routing action that load balancing proxies will perform.", + "id": "HttpRouteRule", + "properties": { + "headerAction": { + "$ref": "HttpHeaderAction", + "description": "Specifies changes to request and response headers that need to take effect for the selected backendService.\nThe headerAction specified here are applied before the matching pathMatchers[].headerAction and after pathMatchers[].routeRules[].routeAction.weightedBackendService.backendServiceWeightAction[].headerAction" + }, + "matchRules": { + "items": { + "$ref": "HttpRouteRuleMatch" + }, + "type": "array" + }, + "routeAction": { + "$ref": "HttpRouteAction", + "description": "In response to a matching matchRule, the load balancer performs advanced routing actions like URL rewrites, header transformations, etc. prior to forwarding the request to the selected backend. If routeAction specifies any weightedBackendServices, service must not be set. Conversely if service is set, routeAction cannot contain any weightedBackendServices.\nOnly one of routeAction or urlRedirect must be set." + }, + "service": { + "description": "The full or partial URL of the backend service resource to which traffic is directed if this rule is matched. If routeAction is additionally specified, advanced routing actions like URL Rewrites, etc. take effect prior to sending the request to the backend. However, if service is specified, routeAction cannot contain any weightedBackendService s. Conversely, if routeAction specifies any weightedBackendServices, service must not be specified.\nOnly one of urlRedirect, service or routeAction.weightedBackendService must be set.", "type": "string" }, - "nextPageToken": { - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "urlRedirect": { + "$ref": "HttpRedirectAction", + "description": "When this rule is matched, the request is redirected to a URL specified by urlRedirect.\nIf urlRedirect is specified, service or routeAction must not be set." + } + }, + "type": "object" + }, + "HttpRouteRuleMatch": { + "description": "HttpRouteRuleMatch specifies a set of criteria for matching requests to an HttpRouteRule. All specified criteria must be satisfied for a match to occur.", + "id": "HttpRouteRuleMatch", + "properties": { + "fullPathMatch": { + "description": "For satifying the matchRule condition, the path of the request must exactly match the value specified in fullPathMatch after removing any query parameters and anchor that may be part of the original URL.\nFullPathMatch must be between 1 and 1024 characters.\nOnly one of prefixMatch, fullPathMatch or regexMatch must be specified.", "type": "string" }, - "selfLink": { - "description": "[Output Only] Server-defined URL for this resource.", + "headerMatches": { + "description": "Specifies a list of header match criteria, all of which must match corresponding headers in the request.", + "items": { + "$ref": "HttpHeaderMatch" + }, + "type": "array" + }, + "ignoreCase": { + "description": "Specifies that prefixMatch and fullPathMatch matches are case sensitive.\nThe default value is false.\ncaseSensitive must not be used with regexMatch.", + "type": "boolean" + }, + "metadataFilters": { + "description": "Opaque filter criteria used by Loadbalancer to restrict routing configuration to a limited set xDS compliant clients. In their xDS requests to Loadbalancer, xDS clients present node metadata. If a match takes place, the relevant routing configuration is made available to those proxies.\nFor each metadataFilter in this list, if its filterMatchCriteria is set to MATCH_ANY, at least one of the filterLabels must match the corresponding label provided in the metadata. If its filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels must match with corresponding labels in the provided metadata.\nmetadataFilters specified here can be overrides those specified in ForwardingRule that refers to this UrlMap.\nmetadataFilters only applies to Loadbalancers that have their loadBalancingScheme set to INTERNAL_SELF_MANAGED.", + "items": { + "$ref": "MetadataFilter" + }, + "type": "array" + }, + "prefixMatch": { + "description": "For satifying the matchRule condition, the request's path must begin with the specified prefixMatch. prefixMatch must begin with a /.\nThe value must be between 1 and 1024 characters.\nOnly one of prefixMatch, fullPathMatch or regexMatch must be specified.", "type": "string" }, - "warning": { - "description": "[Output Only] Informational warning message.", - "properties": { - "code": { - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DEPRECATED_TYPE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "EXPERIMENTAL_TYPE_USED", - "EXTERNAL_API_WARNING", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "MISSING_TYPE_DEPENDENCY", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SCHEMA_VALIDATION_IGNORED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNDECLARED_PROPERTIES", - "UNREACHABLE" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "data": { - "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", - "items": { - "properties": { - "key": { - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", - "type": "string" - }, - "value": { - "description": "[Output Only] A warning data value corresponding to the key.", - "type": "string" - } - }, - "type": "object" - }, - "type": "array" - }, - "message": { - "description": "[Output Only] A human-readable description of the warning code.", - "type": "string" - } + "queryParameterMatches": { + "description": "Specifies a list of query parameter match criteria, all of which must match corresponding query parameters in the request.", + "items": { + "$ref": "HttpQueryParameterMatch" }, - "type": "object" + "type": "array" + }, + "regexMatch": { + "description": "For satifying the matchRule condition, the path of the request must satisfy the regular expression specified in regexMatch after removing any query parameters and anchor supplied with the original URL. For regular expression grammar please see en.cppreference.com/w/cpp/regex/ecmascript \nOnly one of prefixMatch, fullPathMatch or regexMatch must be specified.", + "type": "string" + } + }, + "type": "object" + }, + "HttpsHealthCheck": { + "description": "An HttpsHealthCheck resource. This resource defines a template for how individual instances should be checked for health, via HTTPS.", + "id": "HttpsHealthCheck", + "properties": { + "checkIntervalSec": { + "description": "How often (in seconds) to send a health check. The default value is 5 seconds.", + "format": "int32", + "type": "integer" + }, + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, + "healthyThreshold": { + "description": "A so-far unhealthy instance will be marked healthy after this many consecutive successes. The default value is 2.", + "format": "int32", + "type": "integer" + }, + "host": { + "description": "The value of the host header in the HTTPS health check request. If left empty (default value), the public IP on behalf of which this health check is performed will be used.", + "type": "string" + }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", + "type": "string" + }, + "kind": { + "default": "compute#httpsHealthCheck", + "description": "Type of the resource.", + "type": "string" + }, + "name": { + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "port": { + "description": "The TCP port number for the HTTPS health check request. The default value is 443.", + "format": "int32", + "type": "integer" + }, + "requestPath": { + "description": "The request path of the HTTPS health check request. The default value is \"/\".", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" + }, + "timeoutSec": { + "description": "How long (in seconds) to wait before claiming failure. The default value is 5 seconds. It is invalid for timeoutSec to have a greater value than checkIntervalSec.", + "format": "int32", + "type": "integer" + }, + "unhealthyThreshold": { + "description": "A so-far healthy instance will be marked unhealthy after this many consecutive failures. The default value is 2.", + "format": "int32", + "type": "integer" } }, "type": "object" }, - "CommitmentsScopedList": { - "id": "CommitmentsScopedList", + "HttpsHealthCheckList": { + "description": "Contains a list of HttpsHealthCheck resources.", + "id": "HttpsHealthCheckList", "properties": { - "commitments": { - "description": "[Output Only] A list of commitments contained in this scope.", + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "description": "A list of HttpsHealthCheck resources.", "items": { - "$ref": "Commitment" + "$ref": "HttpsHealthCheck" }, "type": "array" }, + "kind": { + "default": "compute#httpsHealthCheckList", + "description": "Type of resource.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, "warning": { - "description": "[Output Only] Informational warning which replaces the list of commitments when the list is empty.", + "description": "[Output Only] Informational warning message.", "properties": { "code": { "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", @@ -24777,190 +31217,36 @@ }, "type": "object" }, - "Condition": { - "description": "A condition to be met.", - "id": "Condition", - "properties": { - "iam": { - "description": "Trusted attributes supplied by the IAM system.", - "enum": [ - "APPROVER", - "ATTRIBUTION", - "AUTHORITY", - "CREDENTIALS_TYPE", - "JUSTIFICATION_TYPE", - "NO_ATTR", - "SECURITY_REALM" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "op": { - "description": "An operator to apply the subject with.", - "enum": [ - "DISCHARGED", - "EQUALS", - "IN", - "NOT_EQUALS", - "NOT_IN", - "NO_OP" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "svc": { - "description": "Trusted attributes discharged by the service.", - "type": "string" - }, - "sys": { - "description": "Trusted attributes supplied by any service that owns resources and uses the IAM system for access control.", - "enum": [ - "IP", - "NAME", - "NO_ATTR", - "REGION", - "SERVICE" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "value": { - "description": "DEPRECATED. Use 'values' instead.", - "type": "string" - }, - "values": { - "description": "The objects of the condition. This is mutually exclusive with 'value'.", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, - "ConnectionDraining": { - "description": "Message containing connection draining configuration.", - "id": "ConnectionDraining", - "properties": { - "drainingTimeoutSec": { - "description": "Time for which instance will be drained (not accept new connections, but still work to finish started).", - "format": "int32", - "type": "integer" - } - }, - "type": "object" - }, - "CustomerEncryptionKey": { - "description": "Represents a customer-supplied encryption key", - "id": "CustomerEncryptionKey", + "Image": { + "description": "An Image resource. (== resource_for beta.images ==) (== resource_for v1.images ==)", + "id": "Image", "properties": { - "kmsKeyName": { - "description": "The name of the encryption key that is stored in Google Cloud KMS.", - "type": "string" - }, - "rawKey": { - "description": "Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to either encrypt or decrypt this resource.", - "type": "string" - }, - "rsaEncryptedKey": { - "description": "Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit customer-supplied encryption key to either encrypt or decrypt this resource.\n\nThe key must meet the following requirements before you can provide it to Compute Engine: \n- The key is wrapped using a RSA public key certificate provided by Google. \n- After being wrapped, the key must be encoded in RFC 4648 base64 encoding. Gets the RSA public key certificate provided by Google at:\nhttps://cloud-certs.storage.googleapis.com/google-cloud-csek-ingress.pem", - "type": "string" - }, - "sha256": { - "description": "[Output only] The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption key that protects this resource.", + "archiveSizeBytes": { + "description": "Size of the image tar.gz archive stored in Google Cloud Storage (in bytes).", + "format": "int64", "type": "string" - } - }, - "type": "object" - }, - "CustomerEncryptionKeyProtectedDisk": { - "id": "CustomerEncryptionKeyProtectedDisk", - "properties": { - "diskEncryptionKey": { - "$ref": "CustomerEncryptionKey", - "description": "Decrypts data associated with the disk with a customer-supplied encryption key." }, - "source": { - "description": "Specifies a valid partial or full URL to an existing Persistent Disk resource. This field is only applicable for persistent disks.", - "type": "string" - } - }, - "type": "object" - }, - "DeprecationStatus": { - "description": "Deprecation status for a public resource.", - "id": "DeprecationStatus", - "properties": { - "deleted": { - "description": "An optional RFC3339 timestamp on or after which the state of this resource is intended to change to DELETED. This is only informational and the status will not change unless the client explicitly changes it.", + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" }, "deprecated": { - "description": "An optional RFC3339 timestamp on or after which the state of this resource is intended to change to DEPRECATED. This is only informational and the status will not change unless the client explicitly changes it.", - "type": "string" - }, - "obsolete": { - "description": "An optional RFC3339 timestamp on or after which the state of this resource is intended to change to OBSOLETE. This is only informational and the status will not change unless the client explicitly changes it.", - "type": "string" + "$ref": "DeprecationStatus", + "description": "The deprecation status associated with this image." }, - "replacement": { - "description": "The URL of the suggested replacement for a deprecated resource. The suggested replacement resource must be the same kind of resource as the deprecated resource.", + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", "type": "string" }, - "state": { - "description": "The deprecation state of this resource. This can be DEPRECATED, OBSOLETE, or DELETED. Operations which create a new resource using a DEPRECATED resource will return successfully, but with a warning indicating the deprecated resource and recommending its replacement. Operations which use OBSOLETE or DELETED resources will be rejected and result in an error.", - "enum": [ - "DELETED", - "DEPRECATED", - "OBSOLETE" - ], - "enumDescriptions": [ - "", - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "Disk": { - "description": "A Disk resource. (== resource_for beta.disks ==) (== resource_for v1.disks ==)", - "id": "Disk", - "properties": { - "creationTimestamp": { - "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "diskSizeGb": { + "description": "Size of the image when restored onto a persistent disk (in GB).", + "format": "int64", "type": "string" }, - "description": { - "description": "An optional description of this resource. Provide this property when you create the resource.", + "family": { + "description": "The name of the image family to which this image belongs. You can create disks by specifying an image family instead of a specific image name. The image family always returns its latest image that is not deprecated. The name of the image family must comply with RFC1035.", "type": "string" }, - "diskEncryptionKey": { - "$ref": "CustomerEncryptionKey", - "description": "Encrypts the disk using a customer-supplied encryption key.\n\nAfter you encrypt a disk with a customer-supplied key, you must provide the same key if you use the disk later (e.g. to create a disk snapshot or an image, or to attach the disk to a virtual machine).\n\nCustomer-supplied encryption keys do not protect access to metadata of the disk.\n\nIf you do not provide an encryption key when creating the disk, then the disk will be encrypted using an automatically generated key and you do not need to provide a key to use the disk later." - }, "guestOsFeatures": { "description": "A list of features to enable on the guest operating system. Applicable only for bootable images. Read Enabling guest operating system features to see a list of available options.", "items": { @@ -24973,13 +31259,17 @@ "format": "uint64", "type": "string" }, + "imageEncryptionKey": { + "$ref": "CustomerEncryptionKey", + "description": "Encrypts the image using a customer-supplied encryption key.\n\nAfter you encrypt an image with a customer-supplied key, you must provide the same key if you use the image later (e.g. to create a disk from the image).\n\nCustomer-supplied encryption keys do not protect access to metadata of the disk.\n\nIf you do not provide an encryption key when creating the image, then the disk will be encrypted using an automatically generated key and you do not need to provide a key to use the image later." + }, "kind": { - "default": "compute#disk", - "description": "[Output Only] Type of the resource. Always compute#disk for disks.", + "default": "compute#image", + "description": "[Output Only] Type of the resource. Always compute#image for images.", "type": "string" }, "labelFingerprint": { - "description": "A fingerprint for the labels being applied to this disk, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet.\n\nTo see the latest fingerprint, make a get() request to retrieve a disk.", + "description": "A fingerprint for the labels being applied to this image, which is essentially a hash of the labels used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet.\n\nTo see the latest fingerprint, make a get() request to retrieve an image.", "format": "byte", "type": "string" }, @@ -24987,19 +31277,11 @@ "additionalProperties": { "type": "string" }, - "description": "Labels to apply to this disk. These can be later modified by the setLabels method.", + "description": "Labels to apply to this image. These can be later modified by the setLabels method.", "type": "object" }, - "lastAttachTimestamp": { - "description": "[Output Only] Last attach timestamp in RFC3339 text format.", - "type": "string" - }, - "lastDetachTimestamp": { - "description": "[Output Only] Last detach timestamp in RFC3339 text format.", - "type": "string" - }, "licenseCodes": { - "description": "Integer license codes indicating which licenses are attached to this disk.", + "description": "Integer license codes indicating which licenses are attached to this image.", "items": { "format": "int64", "type": "string" @@ -25007,7 +31289,7 @@ "type": "array" }, "licenses": { - "description": "A list of publicly visible licenses. Reserved for Google's use.", + "description": "Any applicable license URI.", "items": { "type": "string" }, @@ -25016,51 +31298,61 @@ "name": { "annotations": { "required": [ - "compute.disks.insert" + "compute.images.insert" ] }, - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, - "options": { - "description": "Internal use only.", - "type": "string" + "rawDisk": { + "description": "The parameters of the raw disk image.", + "properties": { + "containerType": { + "description": "The format used to encode and transmit the block device, which should be TAR. This is just a container and transmission format and not a runtime format. Provided by the client when the disk image is created.", + "enum": [ + "TAR" + ], + "enumDescriptions": [ + "" + ], + "type": "string" + }, + "sha1Checksum": { + "description": "[Deprecated] This field is deprecated. An optional SHA1 checksum of the disk image before unpackaging provided by the client when the disk image is created.", + "pattern": "[a-f0-9]{40}", + "type": "string" + }, + "source": { + "annotations": { + "required": [ + "compute.images.insert" + ] + }, + "description": "The full Google Cloud Storage URL where the disk image is stored. You must provide either this property or the sourceDisk property but not both.", + "type": "string" + } + }, + "type": "object" }, - "physicalBlockSizeBytes": { - "description": "Physical block size of the persistent disk, in bytes. If not present in a request, a default value is used. Currently supported sizes are 4096 and 16384, other sizes may be added in the future. If an unsupported value is requested, the error message will list the supported values for the caller's project.", - "format": "int64", + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, - "region": { - "description": "[Output Only] URL of the region where the disk resides. Only applicable for regional resources. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", + "sourceDisk": { + "description": "URL of the source disk used to create this image. This can be a full or valid partial URL. You must provide either this property or the rawDisk.source property but not both to create an image. For example, the following are valid values: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone/disks/disk \n- projects/project/zones/zone/disks/disk \n- zones/zone/disks/disk", "type": "string" }, - "replicaZones": { - "description": "URLs of the zones where the disk should be replicated to. Only applicable for regional resources.", - "items": { - "type": "string" - }, - "type": "array" - }, - "resourcePolicies": { - "description": "Resource policies applied to this disk for automatic snapshot creations.", - "items": { - "type": "string" - }, - "type": "array" - }, - "selfLink": { - "description": "[Output Only] Server-defined fully-qualified URL for this resource.", - "type": "string" + "sourceDiskEncryptionKey": { + "$ref": "CustomerEncryptionKey", + "description": "The customer-supplied encryption key of the source disk. Required if the source disk is protected by a customer-supplied encryption key." }, - "sizeGb": { - "description": "Size of the persistent disk, specified in GB. You can specify this field when creating a persistent disk using the sourceImage or sourceSnapshot parameter, or specify it alone to create an empty persistent disk.\n\nIf you specify this field along with sourceImage or sourceSnapshot, the value of sizeGb must not be less than the size of the sourceImage or the size of the snapshot. Acceptable values are 1 to 65536, inclusive.", - "format": "int64", + "sourceDiskId": { + "description": "[Output Only] The ID value of the disk used to create this image. This value may be used to determine whether the image was taken from the current or a previous instance of a given disk name.", "type": "string" }, "sourceImage": { - "description": "The source image used to create this disk. If the source image is deleted, this field will not be set.\n\nTo create a disk with one of the public operating system images, specify the image by its family name. For example, specify family/debian-9 to use the latest Debian 9 image:\nprojects/debian-cloud/global/images/family/debian-9\n\n\nAlternatively, use a specific version of a public operating system image:\nprojects/debian-cloud/global/images/debian-9-stretch-vYYYYMMDD\n\n\nTo create a disk with a custom image that you created, specify the image name in the following format:\nglobal/images/my-custom-image\n\n\nYou can also specify a custom image by its image family, which returns the latest version of the image in that family. Replace the image name with family/family-name:\nglobal/images/family/my-image-family", + "description": "URL of the source image used to create this image. This can be a full or valid partial URL. You must provide exactly one of: \n- this property, or \n- the rawDisk.source property, or \n- the sourceDisk property in order to create an image.", "type": "string" }, "sourceImageEncryptionKey": { @@ -25068,11 +31360,11 @@ "description": "The customer-supplied encryption key of the source image. Required if the source image is protected by a customer-supplied encryption key." }, "sourceImageId": { - "description": "[Output Only] The ID value of the image used to create this disk. This value identifies the exact image that was used to create this persistent disk. For example, if you created the persistent disk from an image that was later deleted and recreated under the same name, the source image ID would identify the exact version of the image that was used.", + "description": "[Output Only] The ID value of the image used to create this image. This value may be used to determine whether the image was taken from the current or a previous instance of a given image name.", "type": "string" }, "sourceSnapshot": { - "description": "The source snapshot used to create this disk. You can provide this as a partial or full URL to the resource. For example, the following are valid values: \n- https://www.googleapis.com/compute/v1/projects/project/global/snapshots/snapshot \n- projects/project/global/snapshots/snapshot \n- global/snapshots/snapshot", + "description": "URL of the source snapshot used to create this image. This can be a full or valid partial URL. You must provide exactly one of: \n- this property, or \n- the sourceImage property, or \n- the rawDisk.source property, or \n- the sourceDisk property in order to create an image.", "type": "string" }, "sourceSnapshotEncryptionKey": { @@ -25080,73 +31372,64 @@ "description": "The customer-supplied encryption key of the source snapshot. Required if the source snapshot is protected by a customer-supplied encryption key." }, "sourceSnapshotId": { - "description": "[Output Only] The unique ID of the snapshot used to create this disk. This value identifies the exact snapshot that was used to create this persistent disk. For example, if you created the persistent disk from a snapshot that was later deleted and recreated under the same name, the source snapshot ID would identify the exact version of the snapshot that was used.", + "description": "[Output Only] The ID value of the snapshot used to create this image. This value may be used to determine whether the snapshot was taken from the current or a previous instance of a given snapshot name.", "type": "string" }, - "status": { - "description": "[Output Only] The status of disk creation.", + "sourceType": { + "default": "RAW", + "description": "The type of the image used to create this disk. The default and only value is RAW", "enum": [ - "CREATING", - "FAILED", - "READY", - "RESTORING" + "RAW" ], "enumDescriptions": [ - "", - "", - "", "" ], "type": "string" }, - "storageType": { - "description": "[Deprecated] Storage type of the persistent disk.", + "status": { + "description": "[Output Only] The status of the image. An image can be used to create other resources, such as instances, only after the image has been successfully created and the status is set to READY. Possible values are FAILED, PENDING, or READY.", "enum": [ - "HDD", - "SSD" + "DELETING", + "FAILED", + "PENDING", + "READY" ], "enumDescriptions": [ + "", + "", "", "" ], "type": "string" }, - "type": { - "description": "URL of the disk type resource describing which disk type to use to create the disk. Provide this when creating the disk. For example: project/zones/zone/diskTypes/pd-standard or pd-ssd", - "type": "string" - }, - "users": { - "description": "[Output Only] Links to the users of the disk (attached instances) in form: project/zones/zone/instances/instance", + "storageLocations": { + "description": "GCS bucket storage location of the image (regional or multi-regional).", "items": { "type": "string" }, "type": "array" - }, - "zone": { - "description": "[Output Only] URL of the zone where the disk resides. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", - "type": "string" } }, "type": "object" }, - "DiskAggregatedList": { - "id": "DiskAggregatedList", + "ImageList": { + "description": "Contains a list of images.", + "id": "ImageList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "additionalProperties": { - "$ref": "DisksScopedList", - "description": "[Output Only] Name of the scope containing this set of disks." + "description": "A list of Image resources.", + "items": { + "$ref": "Image" }, - "description": "A list of DisksScopedList resources.", - "type": "object" + "type": "array" }, "kind": { - "default": "compute#diskAggregatedList", - "description": "[Output Only] Type of resource. Always compute#diskAggregatedList for aggregated lists of persistent disks.", + "default": "compute#imageList", + "description": "Type of resource.", "type": "string" }, "nextPageToken": { @@ -25241,32 +31524,154 @@ }, "type": "object" }, - "DiskInstantiationConfig": { - "description": "A specification of the desired way to instantiate a disk in the instance template when its created from a source instance.", - "id": "DiskInstantiationConfig", + "Instance": { + "description": "An Instance resource. (== resource_for beta.instances ==) (== resource_for v1.instances ==)", + "id": "Instance", "properties": { - "autoDelete": { - "description": "Specifies whether the disk will be auto-deleted when the instance is deleted (but not when the disk is detached from the instance).", + "canIpForward": { + "description": "Allows this instance to send and receive packets with non-matching destination or source IPs. This is required if you plan to use this instance to forward routes. For more information, see Enabling IP Forwarding.", "type": "boolean" }, - "customImage": { - "description": "The custom source image to be used to restore this disk when instantiating this instance template.", + "cpuPlatform": { + "description": "[Output Only] The CPU platform used by this instance.", "type": "string" }, - "deviceName": { - "description": "Specifies the device name of the disk to which the configurations apply to.", + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "deletionProtection": { + "description": "Whether the resource should be protected against deletion.", + "type": "boolean" + }, + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, + "disks": { + "description": "Array of disks associated with this instance. Persistent disks must be created before you can assign them.", + "items": { + "$ref": "AttachedDisk" + }, + "type": "array" + }, + "displayDevice": { + "$ref": "DisplayDevice", + "description": "Enables display device for the instance." + }, + "guestAccelerators": { + "description": "A list of the type and count of accelerator cards attached to the instance.", + "items": { + "$ref": "AcceleratorConfig" + }, + "type": "array" + }, + "hostname": { + "type": "string" + }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", + "type": "string" + }, + "kind": { + "default": "compute#instance", + "description": "[Output Only] Type of the resource. Always compute#instance for instances.", + "type": "string" + }, + "labelFingerprint": { + "description": "A fingerprint for this request, which is essentially a hash of the label's contents and used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels.\n\nTo see the latest fingerprint, make get() request to the instance.", + "format": "byte", + "type": "string" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Labels to apply to this instance. These can be later modified by the setLabels method.", + "type": "object" + }, + "machineType": { + "annotations": { + "required": [ + "compute.instances.insert" + ] + }, + "description": "Full or partial URL of the machine type resource to use for this instance, in the format: zones/zone/machineTypes/machine-type. This is provided by the client when the instance is created. For example, the following is a valid partial url to a predefined machine type:\nzones/us-central1-f/machineTypes/n1-standard-1\n\n\nTo create a custom machine type, provide a URL to a machine type in the following format, where CPUS is 1 or an even number up to 32 (2, 4, 6, ... 24, etc), and MEMORY is the total memory for this instance. Memory must be a multiple of 256 MB and must be supplied in MB (e.g. 5 GB of memory is 5120 MB):\nzones/zone/machineTypes/custom-CPUS-MEMORY\n\n\nFor example: zones/us-central1-f/machineTypes/custom-4-5120 \n\nFor a full list of restrictions, read the Specifications for custom machine types.", + "type": "string" + }, + "metadata": { + "$ref": "Metadata", + "description": "The metadata key/value pairs assigned to this instance. This includes custom metadata and predefined keys." + }, + "minCpuPlatform": { + "description": "Specifies a minimum CPU platform for the VM instance. Applicable values are the friendly names of CPU platforms, such as minCpuPlatform: \"Intel Haswell\" or minCpuPlatform: \"Intel Sandy Bridge\".", + "type": "string" + }, + "name": { + "annotations": { + "required": [ + "compute.instances.insert" + ] + }, + "description": "The name of the resource, provided by the client when initially creating the resource. The resource name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "networkInterfaces": { + "description": "An array of network configurations for this instance. These specify how interfaces are configured to interact with other network services, such as connecting to the internet. Multiple interfaces are supported per instance.", + "items": { + "$ref": "NetworkInterface" + }, + "type": "array" + }, + "reservationAffinity": { + "$ref": "ReservationAffinity", + "description": "The configuration of desired reservations from which this Instance can consume capacity from." + }, + "scheduling": { + "$ref": "Scheduling", + "description": "Sets the scheduling options for this instance." + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, - "instantiateFrom": { - "description": "Specifies whether to include the disk and what image to use. Possible values are: \n- source-image: to use the same image that was used to create the source instance's corresponding disk. Applicable to the boot disk and additional read-write disks. \n- source-image-family: to use the same image family that was used to create the source instance's corresponding disk. Applicable to the boot disk and additional read-write disks. \n- custom-image: to use a user-provided image url for disk creation. Applicable to the boot disk and additional read-write disks. \n- attach-read-only: to attach a read-only disk. Applicable to read-only disks. \n- do-not-include: to exclude a disk from the template. Applicable to additional read-write disks, local SSDs, and read-only disks.", + "serviceAccounts": { + "description": "A list of service accounts, with their specified scopes, authorized for this instance. Only one service account per VM instance is supported.\n\nService accounts generate access tokens that can be accessed through the metadata server and used to authenticate applications on the instance. See Service Accounts for more information.", + "items": { + "$ref": "ServiceAccount" + }, + "type": "array" + }, + "shieldedInstanceConfig": { + "$ref": "ShieldedInstanceConfig" + }, + "shieldedInstanceIntegrityPolicy": { + "$ref": "ShieldedInstanceIntegrityPolicy" + }, + "shieldedVmConfig": { + "$ref": "ShieldedVmConfig" + }, + "shieldedVmIntegrityPolicy": { + "$ref": "ShieldedVmIntegrityPolicy" + }, + "startRestricted": { + "description": "[Output Only] Whether a VM has been restricted for start because Compute Engine has detected suspicious activity.", + "type": "boolean" + }, + "status": { + "description": "[Output Only] The status of the instance. One of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, STOPPED, SUSPENDING, SUSPENDED, and TERMINATED.", "enum": [ - "ATTACH_READ_ONLY", - "BLANK", - "CUSTOM_IMAGE", - "DEFAULT", - "DO_NOT_INCLUDE", - "SOURCE_IMAGE", - "SOURCE_IMAGE_FAMILY" + "PROVISIONING", + "REPAIRING", + "RUNNING", + "STAGING", + "STOPPED", + "STOPPING", + "SUSPENDED", + "SUSPENDING", + "TERMINATED" ], "enumDescriptions": [ "", @@ -25275,31 +31680,45 @@ "", "", "", + "", + "", "" ], "type": "string" + }, + "statusMessage": { + "description": "[Output Only] An optional, human-readable explanation of the status.", + "type": "string" + }, + "tags": { + "$ref": "Tags", + "description": "Tags to apply to this instance. Tags are used to identify valid sources or targets for network firewalls and are specified by the client during instance creation. The tags can be later modified by the setTags method. Each tag within the list must comply with RFC1035. Multiple tags can be specified via the 'tags.items' field." + }, + "zone": { + "description": "[Output Only] URL of the zone where the instance resides. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", + "type": "string" } }, "type": "object" }, - "DiskList": { - "description": "A list of Disk resources.", - "id": "DiskList", + "InstanceAggregatedList": { + "id": "InstanceAggregatedList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of Disk resources.", - "items": { - "$ref": "Disk" + "additionalProperties": { + "$ref": "InstancesScopedList", + "description": "[Output Only] Name of the scope containing this set of instances." }, - "type": "array" + "description": "A list of InstancesScopedList resources.", + "type": "object" }, "kind": { - "default": "compute#diskList", - "description": "[Output Only] Type of resource. Always compute#diskList for lists of disks.", + "default": "compute#instanceAggregatedList", + "description": "[Output Only] Type of resource. Always compute#instanceAggregatedList for aggregated lists of Instance resources.", "type": "string" }, "nextPageToken": { @@ -25394,77 +31813,80 @@ }, "type": "object" }, - "DiskMoveRequest": { - "id": "DiskMoveRequest", - "properties": { - "destinationZone": { - "description": "The URL of the destination zone to move the disk. This can be a full or partial URL. For example, the following are all valid URLs to a zone: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone \n- projects/project/zones/zone \n- zones/zone", - "type": "string" - }, - "targetDisk": { - "description": "The URL of the target disk to move. This can be a full or partial URL. For example, the following are all valid URLs to a disk: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone/disks/disk \n- projects/project/zones/zone/disks/disk \n- zones/zone/disks/disk", - "type": "string" - } - }, - "type": "object" - }, - "DiskType": { - "description": "A DiskType resource. (== resource_for beta.diskTypes ==) (== resource_for v1.diskTypes ==)", - "id": "DiskType", + "InstanceGroup": { + "description": "InstanceGroups (== resource_for beta.instanceGroups ==) (== resource_for v1.instanceGroups ==) (== resource_for beta.regionInstanceGroups ==) (== resource_for v1.regionInstanceGroups ==)", + "id": "InstanceGroup", "properties": { "creationTimestamp": { - "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "description": "[Output Only] The creation timestamp for this instance group in RFC3339 text format.", "type": "string" }, - "defaultDiskSizeGb": { - "description": "[Output Only] Server-defined default disk size in GB.", - "format": "int64", + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", "type": "string" }, - "deprecated": { - "$ref": "DeprecationStatus", - "description": "[Output Only] The deprecation status associated with this disk type." - }, - "description": { - "description": "[Output Only] An optional description of this resource.", + "fingerprint": { + "description": "[Output Only] The fingerprint of the named ports. The system uses this fingerprint to detect conflicts when multiple users change the named ports concurrently.", + "format": "byte", "type": "string" }, "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "description": "[Output Only] A unique identifier for this instance group, generated by the server.", "format": "uint64", "type": "string" }, "kind": { - "default": "compute#diskType", - "description": "[Output Only] Type of the resource. Always compute#diskType for disk types.", + "default": "compute#instanceGroup", + "description": "[Output Only] The resource type, which is always compute#instanceGroup for instance groups.", "type": "string" }, "name": { - "description": "[Output Only] Name of the resource.", + "annotations": { + "required": [ + "compute.instanceGroupManagers.insert" + ] + }, + "description": "The name of the instance group. The name must be 1-63 characters long, and comply with RFC1035.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, + "namedPorts": { + "description": "Assigns a name to a port number. For example: {name: \"http\", port: 80}\n\nThis allows the system to reference ports by the assigned name instead of a port number. Named ports can also contain multiple ports. For example: [{name: \"http\", port: 80},{name: \"http\", port: 8080}] \n\nNamed ports apply to all instances in this instance group.", + "items": { + "$ref": "NamedPort" + }, + "type": "array" + }, + "network": { + "description": "The URL of the network to which all instances in the instance group belong.", + "type": "string" + }, "region": { - "description": "[Output Only] URL of the region where the disk type resides. Only applicable for regional resources. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", + "description": "[Output Only] The URL of the region where the instance group is located (for regional resources).", "type": "string" }, "selfLink": { - "description": "[Output Only] Server-defined URL for the resource.", + "description": "[Output Only] The URL for this instance group. The server generates this URL.", "type": "string" }, - "validDiskSize": { - "description": "[Output Only] An optional textual description of the valid disk size, such as \"10GB-10TB\".", + "size": { + "description": "[Output Only] The total number of instances in the instance group.", + "format": "int32", + "type": "integer" + }, + "subnetwork": { + "description": "[Output Only] The URL of the subnetwork to which all instances in the instance group belong.", "type": "string" }, "zone": { - "description": "[Output Only] URL of the zone where the disk type resides. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", + "description": "[Output Only] The URL of the zone where the instance group is located (for zonal resources).", "type": "string" } }, "type": "object" }, - "DiskTypeAggregatedList": { - "id": "DiskTypeAggregatedList", + "InstanceGroupAggregatedList": { + "id": "InstanceGroupAggregatedList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", @@ -25472,15 +31894,15 @@ }, "items": { "additionalProperties": { - "$ref": "DiskTypesScopedList", - "description": "[Output Only] Name of the scope containing this set of disk types." + "$ref": "InstanceGroupsScopedList", + "description": "The name of the scope that contains this set of instance groups." }, - "description": "A list of DiskTypesScopedList resources.", + "description": "A list of InstanceGroupsScopedList resources.", "type": "object" }, "kind": { - "default": "compute#diskTypeAggregatedList", - "description": "[Output Only] Type of resource. Always compute#diskTypeAggregatedList.", + "default": "compute#instanceGroupAggregatedList", + "description": "[Output Only] The resource type, which is always compute#instanceGroupAggregatedList for aggregated lists of instance groups.", "type": "string" }, "nextPageToken": { @@ -25575,24 +31997,24 @@ }, "type": "object" }, - "DiskTypeList": { - "description": "Contains a list of disk types.", - "id": "DiskTypeList", + "InstanceGroupList": { + "description": "A list of InstanceGroup resources.", + "id": "InstanceGroupList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of DiskType resources.", + "description": "A list of InstanceGroup resources.", "items": { - "$ref": "DiskType" + "$ref": "InstanceGroup" }, "type": "array" }, "kind": { - "default": "compute#diskTypeList", - "description": "[Output Only] Type of resource. Always compute#diskTypeList for disk types.", + "default": "compute#instanceGroupList", + "description": "[Output Only] The resource type, which is always compute#instanceGroupList for instance group lists.", "type": "string" }, "nextPageToken": { @@ -25687,149 +32109,233 @@ }, "type": "object" }, - "DiskTypesScopedList": { - "id": "DiskTypesScopedList", + "InstanceGroupManager": { + "description": "An Instance Group Manager resource. (== resource_for beta.instanceGroupManagers ==) (== resource_for v1.instanceGroupManagers ==) (== resource_for beta.regionInstanceGroupManagers ==) (== resource_for v1.regionInstanceGroupManagers ==)", + "id": "InstanceGroupManager", "properties": { - "diskTypes": { - "description": "[Output Only] A list of disk types contained in this scope.", + "autoHealingPolicies": { + "description": "The autohealing policy for this managed instance group. You can specify only one value.", "items": { - "$ref": "DiskType" + "$ref": "InstanceGroupManagerAutoHealingPolicy" }, "type": "array" }, - "warning": { - "description": "[Output Only] Informational warning which replaces the list of disk types when the list is empty.", - "properties": { - "code": { - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DEPRECATED_TYPE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "EXPERIMENTAL_TYPE_USED", - "EXTERNAL_API_WARNING", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "MISSING_TYPE_DEPENDENCY", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SCHEMA_VALIDATION_IGNORED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNDECLARED_PROPERTIES", - "UNREACHABLE" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "data": { - "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", - "items": { - "properties": { - "key": { - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", - "type": "string" - }, - "value": { - "description": "[Output Only] A warning data value corresponding to the key.", - "type": "string" - } - }, - "type": "object" - }, - "type": "array" - }, - "message": { - "description": "[Output Only] A human-readable description of the warning code.", - "type": "string" - } + "baseInstanceName": { + "annotations": { + "required": [ + "compute.instanceGroupManagers.insert" + ] }, - "type": "object" - } - }, - "type": "object" - }, - "DisksAddResourcePoliciesRequest": { - "id": "DisksAddResourcePoliciesRequest", - "properties": { - "resourcePolicies": { - "description": "Resource policies to be added to this disk.", + "description": "The base instance name to use for instances in this group. The value must be 1-58 characters long. Instances are named by appending a hyphen and a random four-character string to the base instance name. The base instance name must comply with RFC1035.", + "pattern": "[a-z][-a-z0-9]{0,57}", + "type": "string" + }, + "creationTimestamp": { + "description": "[Output Only] The creation timestamp for this managed instance group in RFC3339 text format.", + "type": "string" + }, + "currentActions": { + "$ref": "InstanceGroupManagerActionsSummary", + "description": "[Output Only] The list of instance actions and the number of instances in this managed instance group that are scheduled for each of those actions." + }, + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, + "distributionPolicy": { + "$ref": "DistributionPolicy", + "description": "Policy specifying intended distribution of instances in regional managed instance group." + }, + "failoverAction": { + "description": "The action to perform in case of zone failure. Only one value is supported, NO_FAILOVER. The default is NO_FAILOVER.", + "enum": [ + "NO_FAILOVER", + "UNKNOWN" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "fingerprint": { + "description": "Fingerprint of this resource. This field may be used in optimistic locking. It will be ignored when inserting an InstanceGroupManager. An up-to-date fingerprint must be provided in order to update the InstanceGroupManager, otherwise the request will fail with error 412 conditionNotMet.\n\nTo see the latest fingerprint, make a get() request to retrieve an InstanceGroupManager.", + "format": "byte", + "type": "string" + }, + "id": { + "description": "[Output Only] A unique identifier for this resource type. The server generates this identifier.", + "format": "uint64", + "type": "string" + }, + "instanceGroup": { + "description": "[Output Only] The URL of the Instance Group resource.", + "type": "string" + }, + "instanceTemplate": { + "description": "The URL of the instance template that is specified for this managed instance group. The group uses this template to create all new instances in the managed instance group.", + "type": "string" + }, + "kind": { + "default": "compute#instanceGroupManager", + "description": "[Output Only] The resource type, which is always compute#instanceGroupManager for managed instance groups.", + "type": "string" + }, + "name": { + "annotations": { + "required": [ + "compute.instanceGroupManagers.insert", + "compute.regionInstanceGroupManagers.insert" + ] + }, + "description": "The name of the managed instance group. The name must be 1-63 characters long, and comply with RFC1035.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "namedPorts": { + "description": "Named ports configured for the Instance Groups complementary to this Instance Group Manager.", "items": { - "type": "string" + "$ref": "NamedPort" }, "type": "array" - } - }, - "type": "object" - }, - "DisksRemoveResourcePoliciesRequest": { - "id": "DisksRemoveResourcePoliciesRequest", - "properties": { - "resourcePolicies": { - "description": "Resource policies to be removed from this disk.", + }, + "pendingActions": { + "$ref": "InstanceGroupManagerPendingActionsSummary", + "description": "[Deprecated] This field is deprecated and will be removed. Prefer using the status field instead. Please contact cloud-updater-feedback@google.com to leave feedback if your workload relies on this field. [Output Only] The list of instance actions and the number of instances in this managed instance group that are pending for each of those actions." + }, + "region": { + "description": "[Output Only] The URL of the region where the managed instance group resides (for regional resources).", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] The URL for this managed instance group. The server defines this URL.", + "type": "string" + }, + "serviceAccount": { + "description": "The service account to be used as credentials for all operations performed by the managed instance group on instances. The service accounts needs all permissions required to create and delete instances. By default, the service account {projectNumber}@cloudservices.gserviceaccount.com is used.", + "type": "string" + }, + "status": { + "$ref": "InstanceGroupManagerStatus", + "description": "[Output Only] The status of this managed instance group." + }, + "targetPools": { + "description": "The URLs for all TargetPool resources to which instances in the instanceGroup field are added. The target pools automatically apply to all of the instances in the managed instance group.", "items": { "type": "string" }, "type": "array" + }, + "targetSize": { + "annotations": { + "required": [ + "compute.instanceGroupManagers.insert", + "compute.regionInstanceGroupManagers.insert" + ] + }, + "description": "The target number of running instances for this managed instance group. Deleting or abandoning instances reduces this number. Resizing the group changes this number.", + "format": "int32", + "type": "integer" + }, + "updatePolicy": { + "$ref": "InstanceGroupManagerUpdatePolicy", + "description": "The update policy for this managed instance group." + }, + "versions": { + "description": "Specifies the instance templates used by this managed instance group to create instances.\n\nEach version is defined by an instanceTemplate and a name. Every version can appear at most once per instance group. This field overrides the top-level instanceTemplate field. Read more about the relationships between these fields. Exactly one version must leave the targetSize field unset. That version will be applied to all remaining instances. For more information, read about canary updates.", + "items": { + "$ref": "InstanceGroupManagerVersion" + }, + "type": "array" + }, + "zone": { + "description": "[Output Only] The URL of the zone where the managed instance group is located (for zonal resources).", + "type": "string" } }, "type": "object" }, - "DisksResizeRequest": { - "id": "DisksResizeRequest", + "InstanceGroupManagerActionsSummary": { + "id": "InstanceGroupManagerActionsSummary", "properties": { - "sizeGb": { - "description": "The new size of the persistent disk, which is specified in GB.", - "format": "int64", - "type": "string" + "abandoning": { + "description": "[Output Only] The total number of instances in the managed instance group that are scheduled to be abandoned. Abandoning an instance removes it from the managed instance group without deleting it.", + "format": "int32", + "type": "integer" + }, + "creating": { + "description": "[Output Only] The number of instances in the managed instance group that are scheduled to be created or are currently being created. If the group fails to create any of these instances, it tries again until it creates the instance successfully.\n\nIf you have disabled creation retries, this field will not be populated; instead, the creatingWithoutRetries field will be populated.", + "format": "int32", + "type": "integer" + }, + "creatingWithoutRetries": { + "description": "[Output Only] The number of instances that the managed instance group will attempt to create. The group attempts to create each instance only once. If the group fails to create any of these instances, it decreases the group's targetSize value accordingly.", + "format": "int32", + "type": "integer" + }, + "deleting": { + "description": "[Output Only] The number of instances in the managed instance group that are scheduled to be deleted or are currently being deleted.", + "format": "int32", + "type": "integer" + }, + "none": { + "description": "[Output Only] The number of instances in the managed instance group that are running and have no scheduled actions.", + "format": "int32", + "type": "integer" + }, + "recreating": { + "description": "[Output Only] The number of instances in the managed instance group that are scheduled to be recreated or are currently being being recreated. Recreating an instance deletes the existing root persistent disk and creates a new disk from the image that is defined in the instance template.", + "format": "int32", + "type": "integer" + }, + "refreshing": { + "description": "[Output Only] The number of instances in the managed instance group that are being reconfigured with properties that do not require a restart or a recreate action. For example, setting or removing target pools for the instance.", + "format": "int32", + "type": "integer" + }, + "restarting": { + "description": "[Output Only] The number of instances in the managed instance group that are scheduled to be restarted or are currently being restarted.", + "format": "int32", + "type": "integer" + }, + "verifying": { + "description": "[Output Only] The number of instances in the managed instance group that are being verified. See the managedInstances[].currentAction property in the listManagedInstances method documentation.", + "format": "int32", + "type": "integer" } }, "type": "object" }, - "DisksScopedList": { - "id": "DisksScopedList", + "InstanceGroupManagerAggregatedList": { + "id": "InstanceGroupManagerAggregatedList", "properties": { - "disks": { - "description": "[Output Only] A list of disks contained in this scope.", - "items": { - "$ref": "Disk" + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "additionalProperties": { + "$ref": "InstanceGroupManagersScopedList", + "description": "[Output Only] The name of the scope that contains this set of managed instance groups." }, - "type": "array" + "description": "A list of InstanceGroupManagersScopedList resources.", + "type": "object" + }, + "kind": { + "default": "compute#instanceGroupManagerAggregatedList", + "description": "[Output Only] The resource type, which is always compute#instanceGroupManagerAggregatedList for an aggregated list of managed instance groups.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" }, "warning": { - "description": "[Output Only] Informational warning which replaces the list of disks when the list is empty.", + "description": "[Output Only] Informational warning message.", "properties": { "code": { "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", @@ -25905,107 +32411,47 @@ "message": { "description": "[Output Only] A human-readable description of the warning code.", "type": "string" - } - }, - "type": "object" - } - }, - "type": "object" - }, - "DisplayDevice": { - "description": "A set of Display Device options", - "id": "DisplayDevice", - "properties": { - "enableDisplay": { - "description": "Defines whether the instance has Display enabled.", - "type": "boolean" - } - }, - "type": "object" - }, - "DistributionPolicy": { - "id": "DistributionPolicy", - "properties": { - "zones": { - "description": "Zones where the regional managed instance group will create and manage instances.", - "items": { - "$ref": "DistributionPolicyZoneConfiguration" - }, - "type": "array" - } - }, - "type": "object" - }, - "DistributionPolicyZoneConfiguration": { - "id": "DistributionPolicyZoneConfiguration", - "properties": { - "zone": { - "annotations": { - "required": [ - "compute.regionInstanceGroupManagers.insert", - "compute.regionInstanceGroupManagers.update" - ] + } }, - "description": "The URL of the zone. The zone must exist in the region where the managed instance group is located.", - "type": "string" + "type": "object" } }, "type": "object" }, - "ExchangedPeeringRoute": { - "id": "ExchangedPeeringRoute", + "InstanceGroupManagerAutoHealingPolicy": { + "description": "", + "id": "InstanceGroupManagerAutoHealingPolicy", "properties": { - "destRange": { - "description": "The destination range of the route.", - "type": "string" - }, - "imported": { - "description": "If the peering route is imported if there is no confliction.", - "type": "boolean" - }, - "nextHopRegion": { - "description": "The region of peering route next hop, only applies to dynamic routes.", + "healthCheck": { + "description": "The URL for the health check that signals autohealing.", "type": "string" }, - "priority": { - "description": "The priority of the peering route.", - "format": "uint32", + "initialDelaySec": { + "description": "The number of seconds that the managed instance group waits before it applies autohealing policies to new instances or recently recreated instances. This initial delay allows instances to initialize and run their startup scripts before the instance group determines that they are UNHEALTHY. This prevents the managed instance group from recreating its instances prematurely. This value must be from range [0, 3600].", + "format": "int32", "type": "integer" - }, - "type": { - "description": "The type of the peering route.", - "enum": [ - "DYNAMIC_PEERING_ROUTE", - "STATIC_PEERING_ROUTE", - "SUBNET_PEERING_ROUTE" - ], - "enumDescriptions": [ - "", - "", - "" - ], - "type": "string" } }, "type": "object" }, - "ExchangedPeeringRoutesList": { - "id": "ExchangedPeeringRoutesList", + "InstanceGroupManagerList": { + "description": "[Output Only] A list of managed instance groups.", + "id": "InstanceGroupManagerList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of ExchangedPeeringRoute resources.", + "description": "A list of InstanceGroupManager resources.", "items": { - "$ref": "ExchangedPeeringRoute" + "$ref": "InstanceGroupManager" }, "type": "array" }, "kind": { - "default": "compute#exchangedPeeringRoutesList", - "description": "[Output Only] Type of resource. Always compute#exchangedPeeringRoutesList for exchanged peering routes lists.", + "default": "compute#instanceGroupManagerList", + "description": "[Output Only] The resource type, which is always compute#instanceGroupManagerList for a list of managed instance groups.", "type": "string" }, "nextPageToken": { @@ -26100,93 +32546,88 @@ }, "type": "object" }, - "Expr": { - "description": "Represents an expression text. Example:\n\ntitle: \"User account presence\" description: \"Determines whether the request has a user account\" expression: \"size(request.user) \u003e 0\"", - "id": "Expr", + "InstanceGroupManagerPendingActionsSummary": { + "id": "InstanceGroupManagerPendingActionsSummary", "properties": { - "description": { - "description": "An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.", - "type": "string" + "creating": { + "description": "[Deprecated] This field is deprecated and will be removed. Prefer using the status field instead. Please contact cloud-updater-feedback@google.com to leave feedback if your workload relies on this field. [Output Only] The number of instances in the managed instance group that are pending to be created.", + "format": "int32", + "type": "integer" }, - "expression": { - "description": "Textual representation of an expression in Common Expression Language syntax.\n\nThe application context of the containing message determines which well-known feature set of CEL is supported.", - "type": "string" + "deleting": { + "description": "[Deprecated] This field is deprecated and will be removed. Prefer using the status field instead. Please contact cloud-updater-feedback@google.com to leave feedback if your workload relies on this field. [Output Only] The number of instances in the managed instance group that are pending to be deleted.", + "format": "int32", + "type": "integer" }, - "location": { - "description": "An optional string indicating the location of the expression for error reporting, e.g. a file name and a position in the file.", - "type": "string" + "recreating": { + "description": "[Deprecated] This field is deprecated and will be removed. Prefer using the status field instead. Please contact cloud-updater-feedback@google.com to leave feedback if your workload relies on this field. [Output Only] The number of instances in the managed instance group that are pending to be recreated.", + "format": "int32", + "type": "integer" }, - "title": { - "description": "An optional title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.", - "type": "string" + "restarting": { + "description": "[Deprecated] This field is deprecated and will be removed. Prefer using the status field instead. Please contact cloud-updater-feedback@google.com to leave feedback if your workload relies on this field. [Output Only] The number of instances in the managed instance group that are pending to be restarted.", + "format": "int32", + "type": "integer" } }, "type": "object" }, - "Firewall": { - "description": "Represents a Firewall resource.", - "id": "Firewall", + "InstanceGroupManagerStatus": { + "id": "InstanceGroupManagerStatus", "properties": { - "allowed": { - "description": "The list of ALLOW rules specified by this firewall. Each rule specifies a protocol and port-range tuple that describes a permitted connection.", - "items": { - "properties": { - "IPProtocol": { - "description": "The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number.", - "type": "string" - }, - "ports": { - "description": "An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port.\n\nExample inputs include: [\"22\"], [\"80\",\"443\"], and [\"12345-12349\"].", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, - "type": "array" + "isStable": { + "description": "[Output Only] A bit indicating whether the managed instance group is in a stable state. A stable state means that: none of the instances in the managed instance group is currently undergoing any type of change (for example, creation, restart, or deletion); no future changes are scheduled for instances in the managed instance group; and the managed instance group itself is not being modified.", + "type": "boolean" }, - "creationTimestamp": { - "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "versionTarget": { + "$ref": "InstanceGroupManagerStatusVersionTarget", + "description": "[Output Only] A status of consistency of Instances' versions with their target version specified by version field on Instance Group Manager." + } + }, + "type": "object" + }, + "InstanceGroupManagerStatusVersionTarget": { + "id": "InstanceGroupManagerStatusVersionTarget", + "properties": { + "isReached": { + "description": "[Output Only] A bit indicating whether version target has been reached in this managed instance group, i.e. all instances are in their target version. Instances' target version are specified by version field on Instance Group Manager.", + "type": "boolean" + } + }, + "type": "object" + }, + "InstanceGroupManagerUpdatePolicy": { + "id": "InstanceGroupManagerUpdatePolicy", + "properties": { + "instanceRedistributionType": { + "enum": [ + "NONE", + "PROACTIVE" + ], + "enumDescriptions": [ + "", + "" + ], "type": "string" }, - "denied": { - "description": "The list of DENY rules specified by this firewall. Each rule specifies a protocol and port-range tuple that describes a denied connection.", - "items": { - "properties": { - "IPProtocol": { - "description": "The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number.", - "type": "string" - }, - "ports": { - "description": "An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port.\n\nExample inputs include: [\"22\"], [\"80\",\"443\"], and [\"12345-12349\"].", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, - "type": "array" + "maxSurge": { + "$ref": "FixedOrPercent", + "description": "The maximum number of instances that can be created above the specified targetSize during the update process. By default, a fixed value of 1 is used. This value can be either a fixed number or a percentage if the instance group has 10 or more instances. If you set a percentage, the number of instances will be rounded up if necessary.\n\nAt least one of either maxSurge or maxUnavailable must be greater than 0. Learn more about maxSurge." }, - "description": { - "description": "An optional description of this resource. Provide this property when you create the resource.", - "type": "string" + "maxUnavailable": { + "$ref": "FixedOrPercent", + "description": "The maximum number of instances that can be unavailable during the update process. An instance is considered available if all of the following conditions are satisfied:\n\n \n- The instance's status is RUNNING. \n- If there is a health check on the instance group, the instance's liveness health check result must be HEALTHY at least once. If there is no health check on the group, then the instance only needs to have a status of RUNNING to be considered available. By default, a fixed value of 1 is used. This value can be either a fixed number or a percentage if the instance group has 10 or more instances. If you set a percentage, the number of instances will be rounded up if necessary.\n\nAt least one of either maxSurge or maxUnavailable must be greater than 0. Learn more about maxUnavailable." }, - "destinationRanges": { - "description": "If destination ranges are specified, the firewall will apply only to traffic that has destination IP address in these ranges. These ranges must be expressed in CIDR format. Only IPv4 is supported.", - "items": { - "type": "string" - }, - "type": "array" + "minReadySec": { + "description": "Minimum number of seconds to wait for after a newly created instance becomes available. This value must be from range [0, 3600].", + "format": "int32", + "type": "integer" }, - "direction": { - "description": "Direction of traffic to which this firewall applies; default is INGRESS. Note: For INGRESS traffic, it is NOT supported to specify destinationRanges; For EGRESS traffic, it is NOT supported to specify sourceRanges OR sourceTags.", + "minimalAction": { + "description": "Minimal action to be taken on an instance. You can specify either RESTART to restart existing instances or REPLACE to delete and create new instances from the target template. If you specify a RESTART, the Updater will attempt to perform that action only. However, if the Updater determines that the minimal action you specify is not enough to perform the update, it might perform a more disruptive action.", "enum": [ - "EGRESS", - "INGRESS" + "REPLACE", + "RESTART" ], "enumDescriptions": [ "", @@ -26194,82 +32635,124 @@ ], "type": "string" }, - "disabled": { - "description": "Denotes whether the firewall rule is disabled, i.e not applied to the network it is associated with. When set to true, the firewall rule is not enforced and the network behaves as if it did not exist. If this is unspecified, the firewall rule will be enabled.", - "type": "boolean" - }, - "enableLogging": { - "description": "Deprecated in favor of enable in LogConfig. This field denotes whether to enable logging for a particular firewall rule. If logging is enabled, logs will be exported to Stackdriver.", - "type": "boolean" - }, - "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64", + "type": { + "enum": [ + "OPPORTUNISTIC", + "PROACTIVE" + ], + "enumDescriptions": [ + "", + "" + ], "type": "string" - }, - "kind": { - "default": "compute#firewall", - "description": "[Output Only] Type of the resource. Always compute#firewall for firewall rules.", + } + }, + "type": "object" + }, + "InstanceGroupManagerVersion": { + "id": "InstanceGroupManagerVersion", + "properties": { + "instanceTemplate": { + "description": "The URL of the instance template that is specified for this managed instance group. The group uses this template to create new instances in the managed instance group until the `targetSize` for this version is reached.", "type": "string" }, - "logConfig": { - "$ref": "FirewallLogConfig", - "description": "This field denotes the logging options for a particular firewall rule. If logging is enabled, logs will be exported to Stackdriver." - }, "name": { - "annotations": { - "required": [ - "compute.firewalls.insert", - "compute.firewalls.patch" - ] - }, - "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "type": "string" - }, - "network": { - "description": "URL of the network resource for this firewall rule. If not specified when creating a firewall rule, the default network is used:\nglobal/networks/default\nIf you choose to specify this property, you can specify the network as a full or partial URL. For example, the following are all valid URLs: \n- https://www.googleapis.com/compute/v1/projects/myproject/global/networks/my-network \n- projects/myproject/global/networks/my-network \n- global/networks/default", - "type": "string" - }, - "priority": { - "description": "Priority for this rule. This is an integer between 0 and 65535, both inclusive. When not specified, the value assumed is 1000. Relative priorities determine precedence of conflicting rules. Lower value of priority implies higher precedence (eg, a rule with priority 0 has higher precedence than a rule with priority 1). DENY rules take precedence over ALLOW rules having equal priority.", - "format": "int32", - "type": "integer" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for the resource.", + "description": "Name of the version. Unique among all versions in the scope of this managed instance group.", "type": "string" }, - "sourceRanges": { - "description": "If source ranges are specified, the firewall will apply only to traffic that has source IP address in these ranges. These ranges must be expressed in CIDR format. One or both of sourceRanges and sourceTags may be set. If both properties are set, the firewall will apply to traffic that has source IP address within sourceRanges OR the source IP that belongs to a tag listed in the sourceTags property. The connection does not need to match both properties for the firewall to apply. Only IPv4 is supported.", + "targetSize": { + "$ref": "FixedOrPercent", + "description": "Specifies the intended number of instances to be created from the instanceTemplate. The final number of instances created from the template will be equal to: \n- If expressed as a fixed number, the minimum of either targetSize.fixed or instanceGroupManager.targetSize is used. \n- if expressed as a percent, the targetSize would be (targetSize.percent/100 * InstanceGroupManager.targetSize) If there is a remainder, the number is rounded up. If unset, this version will update any remaining instances not updated by another version. Read Starting a canary update for more information." + } + }, + "type": "object" + }, + "InstanceGroupManagersAbandonInstancesRequest": { + "id": "InstanceGroupManagersAbandonInstancesRequest", + "properties": { + "instances": { + "description": "The URLs of one or more instances to abandon. This can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME].", "items": { "type": "string" }, "type": "array" - }, - "sourceServiceAccounts": { - "description": "If source service accounts are specified, the firewall will apply only to traffic originating from an instance with a service account in this list. Source service accounts cannot be used to control traffic to an instance's external IP address because service accounts are associated with an instance, not an IP address. sourceRanges can be set at the same time as sourceServiceAccounts. If both are set, the firewall will apply to traffic that has source IP address within sourceRanges OR the source IP belongs to an instance with service account listed in sourceServiceAccount. The connection does not need to match both properties for the firewall to apply. sourceServiceAccounts cannot be used at the same time as sourceTags or targetTags.", + } + }, + "type": "object" + }, + "InstanceGroupManagersApplyUpdatesRequest": { + "description": "InstanceGroupManagers.applyUpdatesToInstances", + "id": "InstanceGroupManagersApplyUpdatesRequest", + "properties": { + "instances": { + "description": "The list of URLs of one or more instances for which we want to apply updates on this managed instance group. This can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME].", "items": { "type": "string" }, "type": "array" }, - "sourceTags": { - "description": "If source tags are specified, the firewall rule applies only to traffic with source IPs that match the primary network interfaces of VM instances that have the tag and are in the same VPC network. Source tags cannot be used to control traffic to an instance's external IP address, it only applies to traffic between instances in the same virtual network. Because tags are associated with instances, not IP addresses. One or both of sourceRanges and sourceTags may be set. If both properties are set, the firewall will apply to traffic that has source IP address within sourceRanges OR the source IP that belongs to a tag listed in the sourceTags property. The connection does not need to match both properties for the firewall to apply.", + "minimalAction": { + "description": "The minimal action that should be perfomed on the instances. By default NONE.", + "enum": [ + "REPLACE", + "RESTART" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "mostDisruptiveAllowedAction": { + "description": "The most disruptive action that allowed to be performed on the instances. By default REPLACE.", + "enum": [ + "REPLACE", + "RESTART" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "InstanceGroupManagersDeleteInstancesRequest": { + "id": "InstanceGroupManagersDeleteInstancesRequest", + "properties": { + "instances": { + "description": "The URLs of one or more instances to delete. This can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME].", "items": { "type": "string" }, "type": "array" - }, - "targetServiceAccounts": { - "description": "A list of service accounts indicating sets of instances located in the network that may make network connections as specified in allowed[]. targetServiceAccounts cannot be used at the same time as targetTags or sourceTags. If neither targetServiceAccounts nor targetTags are specified, the firewall rule applies to all instances on the specified network.", + } + }, + "type": "object" + }, + "InstanceGroupManagersListManagedInstancesResponse": { + "id": "InstanceGroupManagersListManagedInstancesResponse", + "properties": { + "managedInstances": { + "description": "[Output Only] The list of instances in the managed instance group.", "items": { - "type": "string" + "$ref": "ManagedInstance" }, "type": "array" }, - "targetTags": { - "description": "A list of tags that controls which instances the firewall rule applies to. If targetTags are specified, then the firewall rule applies only to instances in the VPC network that have one of those tags. If no targetTags are specified, the firewall rule applies to all instances on the specified network.", + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + } + }, + "type": "object" + }, + "InstanceGroupManagersRecreateInstancesRequest": { + "id": "InstanceGroupManagersRecreateInstancesRequest", + "properties": { + "instances": { + "description": "The URLs of one or more instances to recreate. This can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME].", "items": { "type": "string" }, @@ -26278,36 +32761,33 @@ }, "type": "object" }, - "FirewallList": { - "description": "Contains a list of firewalls.", - "id": "FirewallList", + "InstanceGroupManagersResizeAdvancedRequest": { + "id": "InstanceGroupManagersResizeAdvancedRequest", "properties": { - "id": { - "description": "[Output Only] Unique identifier for the resource; defined by the server.", - "type": "string" + "noCreationRetries": { + "description": "If this flag is true, the managed instance group attempts to create all instances initiated by this resize request only once. If there is an error during creation, the managed instance group does not retry create this instance, and we will decrease the targetSize of the request instead. If the flag is false, the group attempts to recreate each instance continuously until it succeeds.\n\nThis flag matters only in the first attempt of creation of an instance. After an instance is successfully created while this flag is enabled, the instance behaves the same way as all the other instances created with a regular resize request. In particular, if a running instance dies unexpectedly at a later time and needs to be recreated, this mode does not affect the recreation behavior in that scenario.\n\nThis flag is applicable only to the current resize request. It does not influence other resize requests in any way.\n\nYou can see which instances is being creating in which mode by calling the get or listManagedInstances API.", + "type": "boolean" }, - "items": { - "description": "A list of Firewall resources.", + "targetSize": { + "description": "The number of running instances that the managed instance group should maintain at any given time. The group automatically adds or removes instances to maintain the number of instances specified by this parameter.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "InstanceGroupManagersScopedList": { + "id": "InstanceGroupManagersScopedList", + "properties": { + "instanceGroupManagers": { + "description": "[Output Only] The list of managed instance groups that are contained in the specified project and zone.", "items": { - "$ref": "Firewall" + "$ref": "InstanceGroupManager" }, "type": "array" }, - "kind": { - "default": "compute#firewallList", - "description": "[Output Only] Type of resource. Always compute#firewallList for lists of firewalls.", - "type": "string" - }, - "nextPageToken": { - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", - "type": "string" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for this resource.", - "type": "string" - }, "warning": { - "description": "[Output Only] Informational warning message.", + "description": "[Output Only] The warning that replaces the list of managed instance groups when the list is empty.", "properties": { "code": { "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", @@ -26385,216 +32865,81 @@ "type": "string" } }, - "type": "object" - } - }, - "type": "object" - }, - "FirewallLogConfig": { - "description": "The available logging options for a firewall rule.", - "id": "FirewallLogConfig", - "properties": { - "enable": { - "description": "This field denotes whether to enable logging for a particular firewall rule.", - "type": "boolean" - } - }, - "type": "object" - }, - "FixedOrPercent": { - "description": "Encapsulates numeric value that can be either absolute or relative.", - "id": "FixedOrPercent", - "properties": { - "calculated": { - "description": "[Output Only] Absolute value of VM instances calculated based on the specific mode.\n\n \n- If the value is fixed, then the caculated value is equal to the fixed value. \n- If the value is a percent, then the calculated value is percent/100 * targetSize. For example, the calculated value of a 80% of a managed instance group with 150 instances would be (80/100 * 150) = 120 VM instances. If there is a remainder, the number is rounded up.", - "format": "int32", - "type": "integer" - }, - "fixed": { - "description": "Specifies a fixed number of VM instances. This must be a positive integer.", - "format": "int32", - "type": "integer" - }, - "percent": { - "description": "Specifies a percentage of instances between 0 to 100%, inclusive. For example, specify 80 for 80%.", - "format": "int32", - "type": "integer" - } - }, - "type": "object" - }, - "ForwardingRule": { - "description": "A ForwardingRule resource. A ForwardingRule resource specifies which pool of target virtual machines to forward a packet to if it matches the given [IPAddress, IPProtocol, ports] tuple. (== resource_for beta.forwardingRules ==) (== resource_for v1.forwardingRules ==) (== resource_for beta.globalForwardingRules ==) (== resource_for v1.globalForwardingRules ==) (== resource_for beta.regionForwardingRules ==) (== resource_for v1.regionForwardingRules ==)", - "id": "ForwardingRule", - "properties": { - "IPAddress": { - "description": "The IP address that this forwarding rule is serving on behalf of.\n\nAddresses are restricted based on the forwarding rule's load balancing scheme (EXTERNAL or INTERNAL) and scope (global or regional).\n\nWhen the load balancing scheme is EXTERNAL, for global forwarding rules, the address must be a global IP, and for regional forwarding rules, the address must live in the same region as the forwarding rule. If this field is empty, an ephemeral IPv4 address from the same scope (global or regional) will be assigned. A regional forwarding rule supports IPv4 only. A global forwarding rule supports either IPv4 or IPv6.\n\nWhen the load balancing scheme is INTERNAL_SELF_MANAGED, this must be a URL reference to an existing Address resource ( internal regional static IP address), with a purpose of GCE_END_POINT and address_type of INTERNAL.\n\nWhen the load balancing scheme is INTERNAL, this can only be an RFC 1918 IP address belonging to the network/subnet configured for the forwarding rule. By default, if this field is empty, an ephemeral internal IP address will be automatically allocated from the IP range of the subnet or network configured for this forwarding rule.\n\nAn address can be specified either by a literal IP address or a URL reference to an existing Address resource. The following examples are all valid: \n- 100.1.2.3 \n- https://www.googleapis.com/compute/v1/projects/project/regions/region/addresses/address \n- projects/project/regions/region/addresses/address \n- regions/region/addresses/address \n- global/addresses/address \n- address", - "type": "string" - }, - "IPProtocol": { - "description": "The IP protocol to which this rule applies. Valid options are TCP, UDP, ESP, AH, SCTP or ICMP.\n\nWhen the load balancing scheme is INTERNAL, only TCP and UDP are valid. When the load balancing scheme is INTERNAL_SELF_MANAGED, only TCPis valid.", - "enum": [ - "AH", - "ESP", - "ICMP", - "SCTP", - "TCP", - "UDP" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "allPorts": { - "description": "This field is used along with the backend_service field for internal load balancing or with the target field for internal TargetInstance. This field cannot be used with port or portRange fields.\n\nWhen the load balancing scheme is INTERNAL and protocol is TCP/UDP, specify this field to allow packets addressed to any ports will be forwarded to the backends configured with this forwarding rule.", - "type": "boolean" - }, - "backendService": { - "description": "This field is only used for INTERNAL load balancing.\n\nFor internal load balancing, this field identifies the BackendService resource to receive the matched traffic.", - "type": "string" - }, - "creationTimestamp": { - "description": "[Output Only] Creation timestamp in RFC3339 text format.", - "type": "string" - }, - "description": { - "description": "An optional description of this resource. Provide this property when you create the resource.", - "type": "string" - }, - "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64", - "type": "string" - }, - "ipVersion": { - "description": "The IP Version that will be used by this forwarding rule. Valid options are IPV4 or IPV6. This can only be specified for an external global forwarding rule.", - "enum": [ - "IPV4", - "IPV6", - "UNSPECIFIED_VERSION" - ], - "enumDescriptions": [ - "", - "", - "" - ], - "type": "string" - }, - "kind": { - "default": "compute#forwardingRule", - "description": "[Output Only] Type of the resource. Always compute#forwardingRule for Forwarding Rule resources.", - "type": "string" - }, - "labelFingerprint": { - "description": "A fingerprint for the labels being applied to this resource, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet.\n\nTo see the latest fingerprint, make a get() request to retrieve a ForwardingRule.", - "format": "byte", - "type": "string" - }, - "labels": { - "additionalProperties": { - "type": "string" - }, - "description": "Labels to apply to this resource. These can be later modified by the setLabels method. Each label key/value pair must comply with RFC1035. Label values may be empty.", - "type": "object" - }, - "loadBalancingScheme": { - "description": "This signifies what the ForwardingRule will be used for and can only take the following values: INTERNAL, INTERNAL_SELF_MANAGED, EXTERNAL. The value of INTERNAL means that this will be used for Internal Network Load Balancing (TCP, UDP). The value of INTERNAL_SELF_MANAGED means that this will be used for Internal Global HTTP(S) LB. The value of EXTERNAL means that this will be used for External Load Balancing (HTTP(S) LB, External TCP/UDP LB, SSL Proxy)", - "enum": [ - "EXTERNAL", - "INTERNAL", - "INVALID" - ], - "enumDescriptions": [ - "", - "", - "" - ], - "type": "string" - }, - "name": { - "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "type": "string" - }, - "network": { - "description": "This field is not used for external load balancing.\n\nFor INTERNAL and INTERNAL_SELF_MANAGED load balancing, this field identifies the network that the load balanced IP should belong to for this Forwarding Rule. If this field is not specified, the default network will be used.", - "type": "string" - }, - "networkTier": { - "description": "This signifies the networking tier used for configuring this load balancer and can only take the following values: PREMIUM , STANDARD.\n\nFor regional ForwardingRule, the valid values are PREMIUM and STANDARD. For GlobalForwardingRule, the valid value is PREMIUM.\n\nIf this field is not specified, it is assumed to be PREMIUM. If IPAddress is specified, this value must be equal to the networkTier of the Address.", - "enum": [ - "PREMIUM", - "STANDARD" - ], - "enumDescriptions": [ - "", - "" - ], + "type": "object" + } + }, + "type": "object" + }, + "InstanceGroupManagersSetAutoHealingRequest": { + "id": "InstanceGroupManagersSetAutoHealingRequest", + "properties": { + "autoHealingPolicies": { + "items": { + "$ref": "InstanceGroupManagerAutoHealingPolicy" + }, + "type": "array" + } + }, + "type": "object" + }, + "InstanceGroupManagersSetInstanceTemplateRequest": { + "id": "InstanceGroupManagersSetInstanceTemplateRequest", + "properties": { + "instanceTemplate": { + "description": "The URL of the instance template that is specified for this managed instance group. The group uses this template to create all new instances in the managed instance group.", "type": "string" - }, - "portRange": { - "description": "This field is used along with the target field for TargetHttpProxy, TargetHttpsProxy, TargetSslProxy, TargetTcpProxy, TargetVpnGateway, TargetPool, TargetInstance.\n\nApplicable only when IPProtocol is TCP, UDP, or SCTP, only packets addressed to ports in the specified range will be forwarded to target. Forwarding rules with the same [IPAddress, IPProtocol] pair must have disjoint port ranges.\n\nSome types of forwarding target have constraints on the acceptable ports: \n- TargetHttpProxy: 80, 8080 \n- TargetHttpsProxy: 443 \n- TargetTcpProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, 1688, 1883, 5222 \n- TargetSslProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, 1688, 1883, 5222 \n- TargetVpnGateway: 500, 4500", + } + }, + "type": "object" + }, + "InstanceGroupManagersSetTargetPoolsRequest": { + "id": "InstanceGroupManagersSetTargetPoolsRequest", + "properties": { + "fingerprint": { + "description": "The fingerprint of the target pools information. Use this optional property to prevent conflicts when multiple users change the target pools settings concurrently. Obtain the fingerprint with the instanceGroupManagers.get method. Then, include the fingerprint in your request to ensure that you do not overwrite changes that were applied from another concurrent request.", + "format": "byte", "type": "string" }, - "ports": { - "description": "This field is used along with the backend_service field for internal load balancing.\n\nWhen the load balancing scheme is INTERNAL, a list of ports can be configured, for example, ['80'], ['8000','9000'] etc. Only packets addressed to these ports will be forwarded to the backends configured with this forwarding rule.\n\nYou may specify a maximum of up to 5 ports.", + "targetPools": { + "description": "The list of target pool URLs that instances in this managed instance group belong to. The managed instance group applies these target pools to all of the instances in the group. Existing instances and new instances in the group all receive these target pool settings.", "items": { "type": "string" }, "type": "array" - }, - "region": { - "description": "[Output Only] URL of the region where the regional forwarding rule resides. This field is not applicable to global forwarding rules. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", - "type": "string" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for the resource.", - "type": "string" - }, - "serviceLabel": { - "description": "An optional prefix to the service name for this Forwarding Rule. If specified, will be the first label of the fully qualified service name.\n\nThe label must be 1-63 characters long, and comply with RFC1035. Specifically, the label must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.\n\nThis field is only used for internal load balancing.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "type": "string" - }, - "serviceName": { - "description": "[Output Only] The internal fully qualified service name for this Forwarding Rule.\n\nThis field is only used for internal load balancing.", - "type": "string" - }, - "subnetwork": { - "description": "This field is only used for INTERNAL load balancing.\n\nFor internal load balancing, this field identifies the subnetwork that the load balanced IP should belong to for this Forwarding Rule.\n\nIf the network specified is in auto subnet mode, this field is optional. However, if the network is in custom subnet mode, a subnetwork must be specified.", - "type": "string" - }, - "target": { - "description": "The URL of the target resource to receive the matched traffic. For regional forwarding rules, this target must live in the same region as the forwarding rule. For global forwarding rules, this target must be a global load balancing resource. The forwarded traffic must be of a type appropriate to the target object. For INTERNAL_SELF_MANAGED\" load balancing, only HTTP and HTTPS targets are valid.", - "type": "string" } }, "type": "object" }, - "ForwardingRuleAggregatedList": { - "id": "ForwardingRuleAggregatedList", + "InstanceGroupsAddInstancesRequest": { + "id": "InstanceGroupsAddInstancesRequest", + "properties": { + "instances": { + "description": "The list of instances to add to the instance group.", + "items": { + "$ref": "InstanceReference" + }, + "type": "array" + } + }, + "type": "object" + }, + "InstanceGroupsListInstances": { + "id": "InstanceGroupsListInstances", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "additionalProperties": { - "$ref": "ForwardingRulesScopedList", - "description": "Name of the scope containing this set of addresses." + "description": "A list of InstanceWithNamedPorts resources.", + "items": { + "$ref": "InstanceWithNamedPorts" }, - "description": "A list of ForwardingRulesScopedList resources.", - "type": "object" + "type": "array" }, "kind": { - "default": "compute#forwardingRuleAggregatedList", - "description": "[Output Only] Type of resource. Always compute#forwardingRuleAggregatedList for lists of forwarding rules.", + "default": "compute#instanceGroupsListInstances", + "description": "[Output Only] The resource type, which is always compute#instanceGroupsListInstances for the list of instances in the specified instance group.", "type": "string" }, "nextPageToken": { @@ -26689,36 +33034,49 @@ }, "type": "object" }, - "ForwardingRuleList": { - "description": "Contains a list of ForwardingRule resources.", - "id": "ForwardingRuleList", + "InstanceGroupsListInstancesRequest": { + "id": "InstanceGroupsListInstancesRequest", "properties": { - "id": { - "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "instanceState": { + "description": "A filter for the state of the instances in the instance group. Valid options are ALL or RUNNING. If you do not specify this parameter the list includes all instances regardless of their state.", + "enum": [ + "ALL", + "RUNNING" + ], + "enumDescriptions": [ + "", + "" + ], "type": "string" - }, - "items": { - "description": "A list of ForwardingRule resources.", + } + }, + "type": "object" + }, + "InstanceGroupsRemoveInstancesRequest": { + "id": "InstanceGroupsRemoveInstancesRequest", + "properties": { + "instances": { + "description": "The list of instances to remove from the instance group.", "items": { - "$ref": "ForwardingRule" + "$ref": "InstanceReference" + }, + "type": "array" + } + }, + "type": "object" + }, + "InstanceGroupsScopedList": { + "id": "InstanceGroupsScopedList", + "properties": { + "instanceGroups": { + "description": "[Output Only] The list of instance groups that are contained in this scope.", + "items": { + "$ref": "InstanceGroup" }, "type": "array" - }, - "kind": { - "default": "compute#forwardingRuleList", - "description": "Type of resource.", - "type": "string" - }, - "nextPageToken": { - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", - "type": "string" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for this resource.", - "type": "string" }, "warning": { - "description": "[Output Only] Informational warning message.", + "description": "[Output Only] An informational warning that replaces the list of instance groups when the list is empty.", "properties": { "code": { "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", @@ -26801,27 +33159,54 @@ }, "type": "object" }, - "ForwardingRuleReference": { - "id": "ForwardingRuleReference", + "InstanceGroupsSetNamedPortsRequest": { + "id": "InstanceGroupsSetNamedPortsRequest", "properties": { - "forwardingRule": { + "fingerprint": { + "description": "The fingerprint of the named ports information for this instance group. Use this optional property to prevent conflicts when multiple users change the named ports settings concurrently. Obtain the fingerprint with the instanceGroups.get method. Then, include the fingerprint in your request to ensure that you do not overwrite changes that were applied from another concurrent request. A request with an incorrect fingerprint will fail with error 412 conditionNotMet.", + "format": "byte", "type": "string" + }, + "namedPorts": { + "description": "The list of named ports to set for this instance group.", + "items": { + "$ref": "NamedPort" + }, + "type": "array" } }, "type": "object" }, - "ForwardingRulesScopedList": { - "id": "ForwardingRulesScopedList", + "InstanceList": { + "description": "Contains a list of instances.", + "id": "InstanceList", "properties": { - "forwardingRules": { - "description": "A list of forwarding rules contained in this scope.", + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "description": "A list of Instance resources.", "items": { - "$ref": "ForwardingRule" + "$ref": "Instance" }, "type": "array" }, + "kind": { + "default": "compute#instanceList", + "description": "[Output Only] Type of resource. Always compute#instanceList for lists of Instance resources.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, "warning": { - "description": "Informational warning which replaces the list of forwarding rules when the list is empty.", + "description": "[Output Only] Informational warning message.", "properties": { "code": { "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", @@ -26904,403 +33289,301 @@ }, "type": "object" }, - "GlobalSetLabelsRequest": { - "id": "GlobalSetLabelsRequest", + "InstanceListReferrers": { + "description": "Contains a list of instance referrers.", + "id": "InstanceListReferrers", "properties": { - "labelFingerprint": { - "description": "The fingerprint of the previous set of labels for this resource, used to detect conflicts. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash when updating or changing labels, otherwise the request will fail with error 412 conditionNotMet. Make a get() request to the resource to get the latest fingerprint.", - "format": "byte", + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, - "labels": { - "additionalProperties": { - "type": "string" - }, - "description": "A list of labels to apply for this resource. Each label key \u0026 value must comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. For example, \"webserver-frontend\": \"images\". A label value can also be empty (e.g. \"my-label\": \"\").", - "type": "object" - } - }, - "type": "object" - }, - "GlobalSetPolicyRequest": { - "id": "GlobalSetPolicyRequest", - "properties": { - "bindings": { - "description": "Flatten Policy to create a backward compatible wire-format. Deprecated. Use 'policy' to specify bindings.", + "items": { + "description": "A list of Reference resources.", "items": { - "$ref": "Binding" + "$ref": "Reference" }, "type": "array" }, - "etag": { - "description": "Flatten Policy to create a backward compatible wire-format. Deprecated. Use 'policy' to specify the etag.", - "format": "byte", - "type": "string" - }, - "policy": { - "$ref": "Policy", - "description": "REQUIRED: The complete policy to be applied to the 'resource'. The size of the policy is limited to a few 10s of KB. An empty policy is in general a valid policy but certain services (like Projects) might reject them." - } - }, - "type": "object" - }, - "GuestAttributes": { - "description": "A guest attributes entry.", - "id": "GuestAttributes", - "properties": { "kind": { - "default": "compute#guestAttributes", - "description": "[Output Only] Type of the resource. Always compute#guestAttributes for guest attributes entry.", + "default": "compute#instanceListReferrers", + "description": "[Output Only] Type of resource. Always compute#instanceListReferrers for lists of Instance referrers.", "type": "string" }, - "queryPath": { - "description": "The path to be queried. This can be the default namespace ('/') or a nested namespace ('//') or a specified key ('//')", + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", "type": "string" }, - "queryValue": { - "$ref": "GuestAttributesValue", - "description": "[Output Only] The value of the requested queried path." - }, "selfLink": { "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, - "variableKey": { - "description": "The key to search for.", - "type": "string" - }, - "variableValue": { - "description": "[Output Only] The value found for the requested key.", - "type": "string" + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" } }, "type": "object" }, - "GuestAttributesEntry": { - "description": "A guest attributes namespace/key/value entry.", - "id": "GuestAttributesEntry", + "InstanceMoveRequest": { + "id": "InstanceMoveRequest", "properties": { - "key": { - "description": "Key for the guest attribute entry.", - "type": "string" - }, - "namespace": { - "description": "Namespace for the guest attribute entry.", + "destinationZone": { + "description": "The URL of the destination zone to move the instance. This can be a full or partial URL. For example, the following are all valid URLs to a zone: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone \n- projects/project/zones/zone \n- zones/zone", "type": "string" }, - "value": { - "description": "Value for the guest attribute entry.", - "type": "string" - } - }, - "type": "object" - }, - "GuestAttributesValue": { - "description": "Array of guest attribute namespace/key/value tuples.", - "id": "GuestAttributesValue", - "properties": { - "items": { - "items": { - "$ref": "GuestAttributesEntry" - }, - "type": "array" - } - }, - "type": "object" - }, - "GuestOsFeature": { - "description": "Guest OS features.", - "id": "GuestOsFeature", - "properties": { - "type": { - "description": "The ID of a supported feature. Read Enabling guest operating system features to see a list of available options.", - "enum": [ - "FEATURE_TYPE_UNSPECIFIED", - "MULTI_IP_SUBNET", - "SECURE_BOOT", - "UEFI_COMPATIBLE", - "VIRTIO_SCSI_MULTIQUEUE", - "WINDOWS" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "" - ], + "targetInstance": { + "description": "The URL of the target instance to move. This can be a full or partial URL. For example, the following are all valid URLs to an instance: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/instance \n- projects/project/zones/zone/instances/instance \n- zones/zone/instances/instance", "type": "string" } }, "type": "object" }, - "HTTP2HealthCheck": { - "id": "HTTP2HealthCheck", + "InstanceProperties": { + "description": "", + "id": "InstanceProperties", "properties": { - "host": { - "description": "The value of the host header in the HTTP/2 health check request. If left empty (default value), the IP on behalf of which this health check is performed will be used.", - "type": "string" - }, - "port": { - "description": "The TCP port number for the health check request. The default value is 443. Valid values are 1 through 65535.", - "format": "int32", - "type": "integer" - }, - "portName": { - "description": "Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence.", - "type": "string" - }, - "portSpecification": { - "description": "Specifies how port is selected for health checking, can be one of following values:\nUSE_FIXED_PORT: The port number in\nport\nis used for health checking.\nUSE_NAMED_PORT: The\nportName\nis used for health checking.\nUSE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking.\n\n\nIf not specified, HTTP2 health check follows behavior specified in\nport\nand\nportName\nfields.", - "enum": [ - "USE_FIXED_PORT", - "USE_NAMED_PORT", - "USE_SERVING_PORT" - ], - "enumDescriptions": [ - "", - "", - "" - ], - "type": "string" - }, - "proxyHeader": { - "description": "Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.", - "enum": [ - "NONE", - "PROXY_V1" - ], - "enumDescriptions": [ - "", - "" - ], - "type": "string" + "canIpForward": { + "description": "Enables instances created based on this template to send packets with source IP addresses other than their own and receive packets with destination IP addresses other than their own. If these instances will be used as an IP gateway or it will be set as the next-hop in a Route resource, specify true. If unsure, leave this set to false. See the Enable IP forwarding documentation for more information.", + "type": "boolean" }, - "requestPath": { - "description": "The request path of the HTTP/2 health check request. The default value is /.", + "description": { + "description": "An optional text description for the instances that are created from this instance template.", "type": "string" }, - "response": { - "description": "The string to match anywhere in the first 1024 bytes of the response body. If left empty (the default value), the status code determines health. The response data can only be ASCII.", - "type": "string" - } - }, - "type": "object" - }, - "HTTPHealthCheck": { - "id": "HTTPHealthCheck", - "properties": { - "host": { - "description": "The value of the host header in the HTTP health check request. If left empty (default value), the IP on behalf of which this health check is performed will be used.", - "type": "string" + "disks": { + "description": "An array of disks that are associated with the instances that are created from this template.", + "items": { + "$ref": "AttachedDisk" + }, + "type": "array" }, - "port": { - "description": "The TCP port number for the health check request. The default value is 80. Valid values are 1 through 65535.", - "format": "int32", - "type": "integer" + "displayDevice": { + "$ref": "DisplayDevice", + "description": "Display Device properties to enable support for remote display products like: Teradici, VNC and TeamViewer" }, - "portName": { - "description": "Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence.", - "type": "string" + "guestAccelerators": { + "description": "A list of guest accelerator cards' type and count to use for instances created from the instance template.", + "items": { + "$ref": "AcceleratorConfig" + }, + "type": "array" }, - "portSpecification": { - "description": "Specifies how port is selected for health checking, can be one of following values:\nUSE_FIXED_PORT: The port number in\nport\nis used for health checking.\nUSE_NAMED_PORT: The\nportName\nis used for health checking.\nUSE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking.\n\n\nIf not specified, HTTP health check follows behavior specified in\nport\nand\nportName\nfields.", - "enum": [ - "USE_FIXED_PORT", - "USE_NAMED_PORT", - "USE_SERVING_PORT" - ], - "enumDescriptions": [ - "", - "", - "" - ], - "type": "string" + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Labels to apply to instances that are created from this template.", + "type": "object" }, - "proxyHeader": { - "description": "Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.", - "enum": [ - "NONE", - "PROXY_V1" - ], - "enumDescriptions": [ - "", - "" - ], + "machineType": { + "annotations": { + "required": [ + "compute.instanceTemplates.insert" + ] + }, + "description": "The machine type to use for instances that are created from this template.", "type": "string" }, - "requestPath": { - "description": "The request path of the HTTP health check request. The default value is /.", - "type": "string" + "metadata": { + "$ref": "Metadata", + "description": "The metadata key/value pairs to assign to instances that are created from this template. These pairs can consist of custom metadata or predefined keys. See Project and instance metadata for more information." }, - "response": { - "description": "The string to match anywhere in the first 1024 bytes of the response body. If left empty (the default value), the status code determines health. The response data can only be ASCII.", - "type": "string" - } - }, - "type": "object" - }, - "HTTPSHealthCheck": { - "id": "HTTPSHealthCheck", - "properties": { - "host": { - "description": "The value of the host header in the HTTPS health check request. If left empty (default value), the IP on behalf of which this health check is performed will be used.", + "minCpuPlatform": { + "description": "Minimum cpu/platform to be used by this instance. The instance may be scheduled on the specified or newer cpu/platform. Applicable values are the friendly names of CPU platforms, such as minCpuPlatform: \"Intel Haswell\" or minCpuPlatform: \"Intel Sandy Bridge\". For more information, read Specifying a Minimum CPU Platform.", "type": "string" }, - "port": { - "description": "The TCP port number for the health check request. The default value is 443. Valid values are 1 through 65535.", - "format": "int32", - "type": "integer" + "networkInterfaces": { + "description": "An array of network access configurations for this interface.", + "items": { + "$ref": "NetworkInterface" + }, + "type": "array" }, - "portName": { - "description": "Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence.", - "type": "string" + "reservationAffinity": { + "$ref": "ReservationAffinity", + "description": "The configuration of desired reservations which this Instance could consume capacity from." }, - "portSpecification": { - "description": "Specifies how port is selected for health checking, can be one of following values:\nUSE_FIXED_PORT: The port number in\nport\nis used for health checking.\nUSE_NAMED_PORT: The\nportName\nis used for health checking.\nUSE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking.\n\n\nIf not specified, HTTPS health check follows behavior specified in\nport\nand\nportName\nfields.", - "enum": [ - "USE_FIXED_PORT", - "USE_NAMED_PORT", - "USE_SERVING_PORT" - ], - "enumDescriptions": [ - "", - "", - "" - ], - "type": "string" + "scheduling": { + "$ref": "Scheduling", + "description": "Specifies the scheduling options for the instances that are created from this template." }, - "proxyHeader": { - "description": "Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.", - "enum": [ - "NONE", - "PROXY_V1" - ], - "enumDescriptions": [ - "", - "" - ], - "type": "string" + "serviceAccounts": { + "description": "A list of service accounts with specified scopes. Access tokens for these service accounts are available to the instances that are created from this template. Use metadata queries to obtain the access tokens for these instances.", + "items": { + "$ref": "ServiceAccount" + }, + "type": "array" }, - "requestPath": { - "description": "The request path of the HTTPS health check request. The default value is /.", - "type": "string" + "shieldedInstanceConfig": { + "$ref": "ShieldedInstanceConfig" }, - "response": { - "description": "The string to match anywhere in the first 1024 bytes of the response body. If left empty (the default value), the status code determines health. The response data can only be ASCII.", + "shieldedVmConfig": { + "$ref": "ShieldedVmConfig", + "description": "Specifies the Shielded VM options for the instances that are created from this template." + }, + "tags": { + "$ref": "Tags", + "description": "A list of tags to apply to the instances that are created from this template. The tags identify valid sources or targets for network firewalls. The setTags method can modify this list of tags. Each tag within the list must comply with RFC1035." + } + }, + "type": "object" + }, + "InstanceReference": { + "id": "InstanceReference", + "properties": { + "instance": { + "description": "The URL for a specific instance.", "type": "string" } }, "type": "object" }, - "HealthCheck": { - "description": "An HealthCheck resource. This resource defines a template for how individual virtual machines should be checked for health, via one of the supported protocols.", - "id": "HealthCheck", + "InstanceTemplate": { + "description": "An Instance Template resource. (== resource_for beta.instanceTemplates ==) (== resource_for v1.instanceTemplates ==)", + "id": "InstanceTemplate", "properties": { - "checkIntervalSec": { - "description": "How often (in seconds) to send a health check. The default value is 5 seconds.", - "format": "int32", - "type": "integer" - }, "creationTimestamp": { - "description": "[Output Only] Creation timestamp in 3339 text format.", + "description": "[Output Only] The creation timestamp for this instance template in RFC3339 text format.", "type": "string" }, "description": { "description": "An optional description of this resource. Provide this property when you create the resource.", "type": "string" }, - "healthyThreshold": { - "description": "A so-far unhealthy instance will be marked healthy after this many consecutive successes. The default value is 2.", - "format": "int32", - "type": "integer" - }, - "http2HealthCheck": { - "$ref": "HTTP2HealthCheck" - }, - "httpHealthCheck": { - "$ref": "HTTPHealthCheck" - }, - "httpsHealthCheck": { - "$ref": "HTTPSHealthCheck" - }, "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "description": "[Output Only] A unique identifier for this instance template. The server defines this identifier.", "format": "uint64", "type": "string" }, "kind": { - "default": "compute#healthCheck", - "description": "Type of the resource.", + "default": "compute#instanceTemplate", + "description": "[Output Only] The resource type, which is always compute#instanceTemplate for instance templates.", "type": "string" }, "name": { - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "annotations": { + "required": [ + "compute.instanceTemplates.insert" + ] + }, + "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, + "properties": { + "$ref": "InstanceProperties", + "description": "The instance properties for this instance template." + }, "selfLink": { - "description": "[Output Only] Server-defined URL for the resource.", + "description": "[Output Only] The URL for this instance template. The server defines this URL.", "type": "string" }, - "sslHealthCheck": { - "$ref": "SSLHealthCheck" - }, - "tcpHealthCheck": { - "$ref": "TCPHealthCheck" - }, - "timeoutSec": { - "description": "How long (in seconds) to wait before claiming failure. The default value is 5 seconds. It is invalid for timeoutSec to have greater value than checkIntervalSec.", - "format": "int32", - "type": "integer" - }, - "type": { - "description": "Specifies the type of the healthCheck, either TCP, SSL, HTTP or HTTPS. If not specified, the default is TCP. Exactly one of the protocol-specific health check field must be specified, which must match type field.", - "enum": [ - "HTTP", - "HTTP2", - "HTTPS", - "INVALID", - "SSL", - "TCP" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "" - ], + "sourceInstance": { + "description": "The source instance used to create the template. You can provide this as a partial or full URL to the resource. For example, the following are valid values: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/instance \n- projects/project/zones/zone/instances/instance", "type": "string" }, - "unhealthyThreshold": { - "description": "A so-far healthy instance will be marked unhealthy after this many consecutive failures. The default value is 2.", - "format": "int32", - "type": "integer" + "sourceInstanceParams": { + "$ref": "SourceInstanceParams", + "description": "The source instance params to use to create this instance template." } }, "type": "object" }, - "HealthCheckList": { - "description": "Contains a list of HealthCheck resources.", - "id": "HealthCheckList", + "InstanceTemplateList": { + "description": "A list of instance templates.", + "id": "InstanceTemplateList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of HealthCheck resources.", + "description": "A list of InstanceTemplate resources.", "items": { - "$ref": "HealthCheck" + "$ref": "InstanceTemplate" }, "type": "array" }, "kind": { - "default": "compute#healthCheckList", - "description": "Type of resource.", + "default": "compute#instanceTemplateList", + "description": "[Output Only] The resource type, which is always compute#instanceTemplatesListResponse for instance template lists.", "type": "string" }, "nextPageToken": { @@ -27395,71 +33678,39 @@ }, "type": "object" }, - "HealthCheckReference": { - "description": "A full or valid partial URL to a health check. For example, the following are valid URLs: \n- https://www.googleapis.com/compute/beta/projects/project-id/global/httpHealthChecks/health-check \n- projects/project-id/global/httpHealthChecks/health-check \n- global/httpHealthChecks/health-check", - "id": "HealthCheckReference", - "properties": { - "healthCheck": { - "type": "string" - } - }, - "type": "object" - }, - "HealthStatus": { - "id": "HealthStatus", + "InstanceWithNamedPorts": { + "id": "InstanceWithNamedPorts", "properties": { - "healthState": { - "description": "Health state of the instance.", - "enum": [ - "HEALTHY", - "UNHEALTHY" - ], - "enumDescriptions": [ - "", - "" - ], - "type": "string" - }, "instance": { - "description": "URL of the instance resource.", - "type": "string" - }, - "ipAddress": { - "description": "The IP address represented by this resource.", + "description": "[Output Only] The URL of the instance.", "type": "string" }, - "port": { - "description": "The port on the instance.", - "format": "int32", - "type": "integer" - } - }, - "type": "object" - }, - "HealthStatusForNetworkEndpoint": { - "id": "HealthStatusForNetworkEndpoint", - "properties": { - "backendService": { - "$ref": "BackendServiceReference", - "description": "URL of the backend service associated with the health state of the network endpoint." - }, - "forwardingRule": { - "$ref": "ForwardingRuleReference", - "description": "URL of the forwarding rule associated with the health state of the network endpoint." - }, - "healthCheck": { - "$ref": "HealthCheckReference", - "description": "URL of the health check associated with the health state of the network endpoint." + "namedPorts": { + "description": "[Output Only] The named ports that belong to this instance group.", + "items": { + "$ref": "NamedPort" + }, + "type": "array" }, - "healthState": { - "description": "Health state of the network endpoint determined based on the health checks configured.", + "status": { + "description": "[Output Only] The status of the instance.", "enum": [ - "DRAINING", - "HEALTHY", - "UNHEALTHY", - "UNKNOWN" + "PROVISIONING", + "REPAIRING", + "RUNNING", + "STAGING", + "STOPPED", + "STOPPING", + "SUSPENDED", + "SUSPENDING", + "TERMINATED" ], "enumDescriptions": [ + "", + "", + "", + "", + "", "", "", "", @@ -27470,125 +33721,35 @@ }, "type": "object" }, - "HostRule": { - "description": "UrlMaps A host-matching rule for a URL. If matched, will use the named PathMatcher to select the BackendService.", - "id": "HostRule", + "InstancesResumeRequest": { + "id": "InstancesResumeRequest", "properties": { - "description": { - "description": "An optional description of this resource. Provide this property when you create the resource.", - "type": "string" - }, - "hosts": { - "description": "The list of host patterns to match. They must be valid hostnames, except * will match any string of ([a-z0-9-.]*). In that case, * must be the first character and must be followed in the pattern by either - or ..", + "disks": { + "description": "Array of disks associated with this instance that are protected with a customer-supplied encryption key.\n\nIn order to resume the instance, the disk url and its corresponding key must be provided.\n\nIf the disk is not protected with a customer-supplied encryption key it should not be specified.", "items": { - "type": "string" + "$ref": "CustomerEncryptionKeyProtectedDisk" }, "type": "array" }, - "pathMatcher": { - "description": "The name of the PathMatcher to use to match the path portion of the URL if the hostRule matches the URL's host portion.", - "type": "string" - } - }, - "type": "object" - }, - "HttpHealthCheck": { - "description": "An HttpHealthCheck resource. This resource defines a template for how individual instances should be checked for health, via HTTP.", - "id": "HttpHealthCheck", - "properties": { - "checkIntervalSec": { - "description": "How often (in seconds) to send a health check. The default value is 5 seconds.", - "format": "int32", - "type": "integer" - }, - "creationTimestamp": { - "description": "[Output Only] Creation timestamp in RFC3339 text format.", - "type": "string" - }, - "description": { - "description": "An optional description of this resource. Provide this property when you create the resource.", - "type": "string" - }, - "healthyThreshold": { - "description": "A so-far unhealthy instance will be marked healthy after this many consecutive successes. The default value is 2.", - "format": "int32", - "type": "integer" - }, - "host": { - "description": "The value of the host header in the HTTP health check request. If left empty (default value), the public IP on behalf of which this health check is performed will be used.", - "type": "string" - }, - "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64", - "type": "string" - }, - "kind": { - "default": "compute#httpHealthCheck", - "description": "[Output Only] Type of the resource. Always compute#httpHealthCheck for HTTP health checks.", - "type": "string" - }, - "name": { - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "type": "string" - }, - "port": { - "description": "The TCP port number for the HTTP health check request. The default value is 80.", - "format": "int32", - "type": "integer" - }, - "requestPath": { - "description": "The request path of the HTTP health check request. The default value is /.", - "type": "string" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for the resource.", - "type": "string" - }, - "timeoutSec": { - "description": "How long (in seconds) to wait before claiming failure. The default value is 5 seconds. It is invalid for timeoutSec to have greater value than checkIntervalSec.", - "format": "int32", - "type": "integer" - }, - "unhealthyThreshold": { - "description": "A so-far healthy instance will be marked unhealthy after this many consecutive failures. The default value is 2.", - "format": "int32", - "type": "integer" + "instanceEncryptionKey": { + "$ref": "CustomerEncryptionKey", + "description": "Decrypts data associated with an instance that is protected with a customer-supplied encryption key.\n\nIf the instance you are starting is protected with a customer-supplied encryption key, the correct key must be provided otherwise the instance resume will not succeed." } }, "type": "object" }, - "HttpHealthCheckList": { - "description": "Contains a list of HttpHealthCheck resources.", - "id": "HttpHealthCheckList", + "InstancesScopedList": { + "id": "InstancesScopedList", "properties": { - "id": { - "description": "[Output Only] Unique identifier for the resource; defined by the server.", - "type": "string" - }, - "items": { - "description": "A list of HttpHealthCheck resources.", + "instances": { + "description": "[Output Only] A list of instances contained in this scope.", "items": { - "$ref": "HttpHealthCheck" + "$ref": "Instance" }, "type": "array" }, - "kind": { - "default": "compute#httpHealthCheckList", - "description": "Type of resource.", - "type": "string" - }, - "nextPageToken": { - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", - "type": "string" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for this resource.", - "type": "string" - }, "warning": { - "description": "[Output Only] Informational warning message.", + "description": "[Output Only] Informational warning which replaces the list of instances when the list is empty.", "properties": { "code": { "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", @@ -27671,30 +33832,143 @@ }, "type": "object" }, - "HttpsHealthCheck": { - "description": "An HttpsHealthCheck resource. This resource defines a template for how individual instances should be checked for health, via HTTPS.", - "id": "HttpsHealthCheck", + "InstancesSetLabelsRequest": { + "id": "InstancesSetLabelsRequest", "properties": { - "checkIntervalSec": { - "description": "How often (in seconds) to send a health check. The default value is 5 seconds.", - "format": "int32", - "type": "integer" + "labelFingerprint": { + "description": "Fingerprint of the previous set of labels for this resource, used to prevent conflicts. Provide the latest fingerprint value when making a request to add or change labels.", + "format": "byte", + "type": "string" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + } + }, + "type": "object" + }, + "InstancesSetMachineResourcesRequest": { + "id": "InstancesSetMachineResourcesRequest", + "properties": { + "guestAccelerators": { + "description": "A list of the type and count of accelerator cards attached to the instance.", + "items": { + "$ref": "AcceleratorConfig" + }, + "type": "array" + } + }, + "type": "object" + }, + "InstancesSetMachineTypeRequest": { + "id": "InstancesSetMachineTypeRequest", + "properties": { + "machineType": { + "description": "Full or partial URL of the machine type resource. See Machine Types for a full list of machine types. For example: zones/us-central1-f/machineTypes/n1-standard-1", + "type": "string" + } + }, + "type": "object" + }, + "InstancesSetMinCpuPlatformRequest": { + "id": "InstancesSetMinCpuPlatformRequest", + "properties": { + "minCpuPlatform": { + "description": "Minimum cpu/platform this instance should be started at.", + "type": "string" + } + }, + "type": "object" + }, + "InstancesSetServiceAccountRequest": { + "id": "InstancesSetServiceAccountRequest", + "properties": { + "email": { + "description": "Email address of the service account.", + "type": "string" + }, + "scopes": { + "description": "The list of scopes to be made available for this service account.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "InstancesStartWithEncryptionKeyRequest": { + "id": "InstancesStartWithEncryptionKeyRequest", + "properties": { + "disks": { + "description": "Array of disks associated with this instance that are protected with a customer-supplied encryption key.\n\nIn order to start the instance, the disk url and its corresponding key must be provided.\n\nIf the disk is not protected with a customer-supplied encryption key it should not be specified.", + "items": { + "$ref": "CustomerEncryptionKeyProtectedDisk" + }, + "type": "array" + } + }, + "type": "object" + }, + "Int64RangeMatch": { + "description": "HttpRouteRuleMatch criteria for field values that must stay within the specified integer range.", + "id": "Int64RangeMatch", + "properties": { + "rangeEnd": { + "description": "The end of the range (exclusive) in signed long integer format.", + "format": "int64", + "type": "string" + }, + "rangeStart": { + "description": "The start of the range (inclusive) in signed long integer format.", + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, + "Interconnect": { + "description": "Represents an Interconnects resource. The Interconnects resource is a dedicated connection between Google's network and your on-premises network. For more information, see the Dedicated overview page. (== resource_for v1.interconnects ==) (== resource_for beta.interconnects ==)", + "id": "Interconnect", + "properties": { + "adminEnabled": { + "description": "Administrative status of the interconnect. When this is set to true, the Interconnect is functional and can carry traffic. When set to false, no packets can be carried over the interconnect and no BGP routes are exchanged over it. By default, the status is set to true.", + "type": "boolean" + }, + "circuitInfos": { + "description": "[Output Only] A list of CircuitInfo objects, that describe the individual circuits in this LAG.", + "items": { + "$ref": "InterconnectCircuitInfo" + }, + "type": "array" }, "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" }, - "description": { - "description": "An optional description of this resource. Provide this property when you create the resource.", + "customerName": { + "description": "Customer name, to put in the Letter of Authorization as the party authorized to request a crossconnect.", + "type": "string" + }, + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, + "expectedOutages": { + "description": "[Output Only] A list of outages expected for this Interconnect.", + "items": { + "$ref": "InterconnectOutageNotification" + }, + "type": "array" + }, + "googleIpAddress": { + "description": "[Output Only] IP address configured on the Google side of the Interconnect link. This can be used only for ping tests.", "type": "string" }, - "healthyThreshold": { - "description": "A so-far unhealthy instance will be marked healthy after this many consecutive successes. The default value is 2.", - "format": "int32", - "type": "integer" - }, - "host": { - "description": "The value of the host header in the HTTPS health check request. If left empty (default value), the public IP on behalf of which this health check is performed will be used.", + "googleReferenceId": { + "description": "[Output Only] Google reference ID to be used when raising support tickets with Google or otherwise to debug backend connectivity issues.", "type": "string" }, "id": { @@ -27702,207 +33976,212 @@ "format": "uint64", "type": "string" }, + "interconnectAttachments": { + "description": "[Output Only] A list of the URLs of all InterconnectAttachments configured to use this Interconnect.", + "items": { + "type": "string" + }, + "type": "array" + }, + "interconnectType": { + "description": "Type of interconnect, which can take one of the following values: \n- PARTNER: A partner-managed interconnection shared between customers though a partner. \n- DEDICATED: A dedicated physical interconnection with the customer. Note that a value IT_PRIVATE has been deprecated in favor of DEDICATED.", + "enum": [ + "DEDICATED", + "IT_PRIVATE", + "PARTNER" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + }, "kind": { - "default": "compute#httpsHealthCheck", - "description": "Type of the resource.", + "default": "compute#interconnect", + "description": "[Output Only] Type of the resource. Always compute#interconnect for interconnects.", + "type": "string" + }, + "labelFingerprint": { + "description": "A fingerprint for the labels being applied to this Interconnect, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet.\n\nTo see the latest fingerprint, make a get() request to retrieve an Interconnect.", + "format": "byte", + "type": "string" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Labels to apply to this Interconnect resource. These can be later modified by the setLabels method. Each label key/value must comply with RFC1035. Label values may be empty.", + "type": "object" + }, + "linkType": { + "description": "Type of link requested, which can take one of the following values: \n- LINK_TYPE_ETHERNET_10G_LR: A 10G Ethernet with LR optics \n- LINK_TYPE_ETHERNET_100G_LR: A 100G Ethernet with LR optics. Note that this field indicates the speed of each of the links in the bundle, not the speed of the entire bundle.", + "enum": [ + "LINK_TYPE_ETHERNET_100G_LR", + "LINK_TYPE_ETHERNET_10G_LR" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "location": { + "description": "URL of the InterconnectLocation object that represents where this connection is to be provisioned.", "type": "string" }, "name": { + "annotations": { + "required": [ + "compute.interconnects.insert" + ] + }, "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, - "port": { - "description": "The TCP port number for the HTTPS health check request. The default value is 443.", - "format": "int32", - "type": "integer" + "nocContactEmail": { + "description": "Email address to contact the customer NOC for operations and maintenance notifications regarding this Interconnect. If specified, this will be used for notifications in addition to all other forms described, such as Stackdriver logs alerting and Cloud Notifications.", + "type": "string" }, - "requestPath": { - "description": "The request path of the HTTPS health check request. The default value is \"/\".", + "operationalStatus": { + "description": "[Output Only] The current status of this Interconnect's functionality, which can take one of the following values: \n- OS_ACTIVE: A valid Interconnect, which is turned up and is ready to use. Attachments may be provisioned on this Interconnect. \n- OS_UNPROVISIONED: An Interconnect that has not completed turnup. No attachments may be provisioned on this Interconnect. \n- OS_UNDER_MAINTENANCE: An Interconnect that is undergoing internal maintenance. No attachments may be provisioned or updated on this Interconnect.", + "enum": [ + "OS_ACTIVE", + "OS_UNPROVISIONED" + ], + "enumDescriptions": [ + "", + "" + ], "type": "string" }, - "selfLink": { - "description": "[Output Only] Server-defined URL for the resource.", + "peerIpAddress": { + "description": "[Output Only] IP address configured on the customer side of the Interconnect link. The customer should configure this IP address during turnup when prompted by Google NOC. This can be used only for ping tests.", "type": "string" }, - "timeoutSec": { - "description": "How long (in seconds) to wait before claiming failure. The default value is 5 seconds. It is invalid for timeoutSec to have a greater value than checkIntervalSec.", + "provisionedLinkCount": { + "description": "[Output Only] Number of links actually provisioned in this interconnect.", "format": "int32", "type": "integer" }, - "unhealthyThreshold": { - "description": "A so-far healthy instance will be marked unhealthy after this many consecutive failures. The default value is 2.", + "requestedLinkCount": { + "description": "Target number of physical links in the link bundle, as requested by the customer.", "format": "int32", "type": "integer" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" + }, + "state": { + "description": "[Output Only] The current state of Interconnect functionality, which can take one of the following values: \n- ACTIVE: The Interconnect is valid, turned up and ready to use. Attachments may be provisioned on this Interconnect. \n- UNPROVISIONED: The Interconnect has not completed turnup. No attachments may be provisioned on this Interconnect. \n- UNDER_MAINTENANCE: The Interconnect is undergoing internal maintenance. No attachments may be provisioned or updated on this Interconnect.", + "enum": [ + "ACTIVE", + "UNPROVISIONED" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" } }, "type": "object" }, - "HttpsHealthCheckList": { - "description": "Contains a list of HttpsHealthCheck resources.", - "id": "HttpsHealthCheckList", + "InterconnectAttachment": { + "description": "Represents an InterconnectAttachment (VLAN attachment) resource. For more information, see Creating VLAN Attachments. (== resource_for beta.interconnectAttachments ==) (== resource_for v1.interconnectAttachments ==)", + "id": "InterconnectAttachment", "properties": { - "id": { - "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "adminEnabled": { + "description": "Determines whether this Attachment will carry packets. Not present for PARTNER_PROVIDER.", + "type": "boolean" + }, + "bandwidth": { + "description": "Provisioned bandwidth capacity for the interconnect attachment. For attachments of type DEDICATED, the user can set the bandwidth. For attachments of type PARTNER, the Google Partner that is operating the interconnect must set the bandwidth. Output only for PARTNER type, mutable for PARTNER_PROVIDER and DEDICATED, and can take one of the following values: \n- BPS_50M: 50 Mbit/s \n- BPS_100M: 100 Mbit/s \n- BPS_200M: 200 Mbit/s \n- BPS_300M: 300 Mbit/s \n- BPS_400M: 400 Mbit/s \n- BPS_500M: 500 Mbit/s \n- BPS_1G: 1 Gbit/s \n- BPS_2G: 2 Gbit/s \n- BPS_5G: 5 Gbit/s \n- BPS_10G: 10 Gbit/s", + "enum": [ + "BPS_100M", + "BPS_10G", + "BPS_1G", + "BPS_200M", + "BPS_2G", + "BPS_300M", + "BPS_400M", + "BPS_500M", + "BPS_50M", + "BPS_5G" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], "type": "string" }, - "items": { - "description": "A list of HttpsHealthCheck resources.", + "candidateSubnets": { + "description": "Up to 16 candidate prefixes that can be used to restrict the allocation of cloudRouterIpAddress and customerRouterIpAddress for this attachment. All prefixes must be within link-local address space (169.254.0.0/16) and must be /29 or shorter (/28, /27, etc). Google will attempt to select an unused /29 from the supplied candidate prefix(es). The request will fail if all possible /29s are in use on Google?s edge. If not supplied, Google will randomly select an unused /29 from all of link-local space.", "items": { - "$ref": "HttpsHealthCheck" + "type": "string" }, "type": "array" }, - "kind": { - "default": "compute#httpsHealthCheckList", - "description": "Type of resource.", - "type": "string" - }, - "nextPageToken": { - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", - "type": "string" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for this resource.", - "type": "string" - }, - "warning": { - "description": "[Output Only] Informational warning message.", - "properties": { - "code": { - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DEPRECATED_TYPE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "EXPERIMENTAL_TYPE_USED", - "EXTERNAL_API_WARNING", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "MISSING_TYPE_DEPENDENCY", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SCHEMA_VALIDATION_IGNORED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNDECLARED_PROPERTIES", - "UNREACHABLE" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "data": { - "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", - "items": { - "properties": { - "key": { - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", - "type": "string" - }, - "value": { - "description": "[Output Only] A warning data value corresponding to the key.", - "type": "string" - } - }, - "type": "object" - }, - "type": "array" - }, - "message": { - "description": "[Output Only] A human-readable description of the warning code.", - "type": "string" - } - }, - "type": "object" - } - }, - "type": "object" - }, - "Image": { - "description": "An Image resource. (== resource_for beta.images ==) (== resource_for v1.images ==)", - "id": "Image", - "properties": { - "archiveSizeBytes": { - "description": "Size of the image tar.gz archive stored in Google Cloud Storage (in bytes).", - "format": "int64", + "cloudRouterIpAddress": { + "description": "[Output Only] IPv4 address + prefix length to be configured on Cloud Router Interface for this interconnect attachment.", "type": "string" }, "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" }, - "deprecated": { - "$ref": "DeprecationStatus", - "description": "The deprecation status associated with this image." + "customerRouterIpAddress": { + "description": "[Output Only] IPv4 address + prefix length to be configured on the customer router subinterface for this interconnect attachment.", + "type": "string" }, "description": { - "description": "An optional description of this resource. Provide this property when you create the resource.", + "description": "An optional description of this resource.", "type": "string" }, - "diskSizeGb": { - "description": "Size of the image when restored onto a persistent disk (in GB).", - "format": "int64", + "edgeAvailabilityDomain": { + "description": "Desired availability domain for the attachment. Only available for type PARTNER, at creation time, and can take one of the following values: \n- AVAILABILITY_DOMAIN_ANY \n- AVAILABILITY_DOMAIN_1 \n- AVAILABILITY_DOMAIN_2 For improved reliability, customers should configure a pair of attachments, one per availability domain. The selected availability domain will be provided to the Partner via the pairing key, so that the provisioned circuit will lie in the specified domain. If not specified, the value will default to AVAILABILITY_DOMAIN_ANY.", + "enum": [ + "AVAILABILITY_DOMAIN_1", + "AVAILABILITY_DOMAIN_2", + "AVAILABILITY_DOMAIN_ANY" + ], + "enumDescriptions": [ + "", + "", + "" + ], "type": "string" }, - "family": { - "description": "The name of the image family to which this image belongs. You can create disks by specifying an image family instead of a specific image name. The image family always returns its latest image that is not deprecated. The name of the image family must comply with RFC1035.", + "googleReferenceId": { + "description": "[Output Only] Google reference ID, to be used when raising support tickets with Google or otherwise to debug backend connectivity issues.", "type": "string" }, - "guestOsFeatures": { - "description": "A list of features to enable on the guest operating system. Applicable only for bootable images. Read Enabling guest operating system features to see a list of available options.", - "items": { - "$ref": "GuestOsFeature" - }, - "type": "array" - }, "id": { "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", "format": "uint64", "type": "string" }, - "imageEncryptionKey": { - "$ref": "CustomerEncryptionKey", - "description": "Encrypts the image using a customer-supplied encryption key.\n\nAfter you encrypt an image with a customer-supplied key, you must provide the same key if you use the image later (e.g. to create a disk from the image).\n\nCustomer-supplied encryption keys do not protect access to metadata of the disk.\n\nIf you do not provide an encryption key when creating the image, then the disk will be encrypted using an automatically generated key and you do not need to provide a key to use the image later." + "interconnect": { + "description": "URL of the underlying Interconnect object that this attachment's traffic will traverse through.", + "type": "string" }, "kind": { - "default": "compute#image", - "description": "[Output Only] Type of the resource. Always compute#image for images.", + "default": "compute#interconnectAttachment", + "description": "[Output Only] Type of the resource. Always compute#interconnectAttachment for interconnect attachments.", "type": "string" }, "labelFingerprint": { - "description": "A fingerprint for the labels being applied to this image, which is essentially a hash of the labels used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet.\n\nTo see the latest fingerprint, make a get() request to retrieve an image.", + "description": "A fingerprint for the labels being applied to this InterconnectAttachment, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet.\n\nTo see the latest fingerprint, make a get() request to retrieve an InterconnectAttachment.", "format": "byte", "type": "string" }, @@ -27910,121 +34189,83 @@ "additionalProperties": { "type": "string" }, - "description": "Labels to apply to this image. These can be later modified by the setLabels method.", + "description": "Labels to apply to this InterconnectAttachment resource. These can be later modified by the setLabels method. Each label key/value must comply with RFC1035. Label values may be empty.", "type": "object" }, - "licenseCodes": { - "description": "Integer license codes indicating which licenses are attached to this image.", - "items": { - "format": "int64", - "type": "string" - }, - "type": "array" - }, - "licenses": { - "description": "Any applicable license URI.", - "items": { - "type": "string" - }, - "type": "array" - }, "name": { - "annotations": { - "required": [ - "compute.images.insert" - ] - }, - "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, - "rawDisk": { - "description": "The parameters of the raw disk image.", - "properties": { - "containerType": { - "description": "The format used to encode and transmit the block device, which should be TAR. This is just a container and transmission format and not a runtime format. Provided by the client when the disk image is created.", - "enum": [ - "TAR" - ], - "enumDescriptions": [ - "" - ], - "type": "string" - }, - "sha1Checksum": { - "description": "An optional SHA1 checksum of the disk image before unpackaging; provided by the client when the disk image is created.", - "pattern": "[a-f0-9]{40}", - "type": "string" - }, - "source": { - "annotations": { - "required": [ - "compute.images.insert" - ] - }, - "description": "The full Google Cloud Storage URL where the disk image is stored. You must provide either this property or the sourceDisk property but not both.", - "type": "string" - } - }, - "type": "object" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for the resource.", + "operationalStatus": { + "description": "[Output Only] The current status of whether or not this interconnect attachment is functional, which can take one of the following values: \n- OS_ACTIVE: The attachment has been turned up and is ready to use. \n- OS_UNPROVISIONED: The attachment is not ready to use yet, because turnup is not complete.", + "enum": [ + "OS_ACTIVE", + "OS_UNPROVISIONED" + ], + "enumDescriptions": [ + "", + "" + ], "type": "string" }, - "sourceDisk": { - "description": "URL of the source disk used to create this image. This can be a full or valid partial URL. You must provide either this property or the rawDisk.source property but not both to create an image. For example, the following are valid values: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone/disks/disk \n- projects/project/zones/zone/disks/disk \n- zones/zone/disks/disk", + "pairingKey": { + "description": "[Output only for type PARTNER. Input only for PARTNER_PROVIDER. Not present for DEDICATED]. The opaque identifier of an PARTNER attachment used to initiate provisioning with a selected partner. Of the form \"XXXXX/region/domain\"", "type": "string" }, - "sourceDiskEncryptionKey": { - "$ref": "CustomerEncryptionKey", - "description": "The customer-supplied encryption key of the source disk. Required if the source disk is protected by a customer-supplied encryption key." - }, - "sourceDiskId": { - "description": "[Output Only] The ID value of the disk used to create this image. This value may be used to determine whether the image was taken from the current or a previous instance of a given disk name.", + "partnerAsn": { + "description": "Optional BGP ASN for the router supplied by a Layer 3 Partner if they configured BGP on behalf of the customer. Output only for PARTNER type, input only for PARTNER_PROVIDER, not available for DEDICATED.", + "format": "int64", "type": "string" }, - "sourceImage": { - "description": "URL of the source image used to create this image. This can be a full or valid partial URL. You must provide exactly one of: \n- this property, or \n- the rawDisk.source property, or \n- the sourceDisk property in order to create an image.", - "type": "string" + "partnerMetadata": { + "$ref": "InterconnectAttachmentPartnerMetadata", + "description": "Informational metadata about Partner attachments from Partners to display to customers. Output only for for PARTNER type, mutable for PARTNER_PROVIDER, not available for DEDICATED." }, - "sourceImageEncryptionKey": { - "$ref": "CustomerEncryptionKey", - "description": "The customer-supplied encryption key of the source image. Required if the source image is protected by a customer-supplied encryption key." + "privateInterconnectInfo": { + "$ref": "InterconnectAttachmentPrivateInfo", + "description": "[Output Only] Information specific to an InterconnectAttachment. This property is populated if the interconnect that this is attached to is of type DEDICATED." }, - "sourceImageId": { - "description": "[Output Only] The ID value of the image used to create this image. This value may be used to determine whether the image was taken from the current or a previous instance of a given image name.", + "region": { + "description": "[Output Only] URL of the region where the regional interconnect attachment resides. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", "type": "string" }, - "sourceSnapshot": { - "description": "URL of the source snapshot used to create this image. This can be a full or valid partial URL. You must provide exactly one of: \n- this property, or \n- the sourceImage property, or \n- the rawDisk.source property, or \n- the sourceDisk property in order to create an image.", + "router": { + "description": "URL of the Cloud Router to be used for dynamic routing. This router must be in the same region as this InterconnectAttachment. The InterconnectAttachment will automatically connect the Interconnect to the network \u0026 region within which the Cloud Router is configured.", "type": "string" }, - "sourceSnapshotEncryptionKey": { - "$ref": "CustomerEncryptionKey", - "description": "The customer-supplied encryption key of the source snapshot. Required if the source snapshot is protected by a customer-supplied encryption key." - }, - "sourceSnapshotId": { - "description": "[Output Only] The ID value of the snapshot used to create this image. This value may be used to determine whether the snapshot was taken from the current or a previous instance of a given snapshot name.", + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, - "sourceType": { - "default": "RAW", - "description": "The type of the image used to create this disk. The default and only value is RAW", + "state": { + "description": "[Output Only] The current state of this attachment's functionality. Enum values ACTIVE and UNPROVISIONED are shared by DEDICATED/PRIVATE, PARTNER, and PARTNER_PROVIDER interconnect attachments, while enum values PENDING_PARTNER, PARTNER_REQUEST_RECEIVED, and PENDING_CUSTOMER are used for only PARTNER and PARTNER_PROVIDER interconnect attachments. This state can take one of the following values: \n- ACTIVE: The attachment has been turned up and is ready to use. \n- UNPROVISIONED: The attachment is not ready to use yet, because turnup is not complete. \n- PENDING_PARTNER: A newly-created PARTNER attachment that has not yet been configured on the Partner side. \n- PARTNER_REQUEST_RECEIVED: A PARTNER attachment is in the process of provisioning after a PARTNER_PROVIDER attachment was created that references it. \n- PENDING_CUSTOMER: A PARTNER or PARTNER_PROVIDER attachment that is waiting for a customer to activate it. \n- DEFUNCT: The attachment was deleted externally and is no longer functional. This could be because the associated Interconnect was removed, or because the other side of a Partner attachment was deleted.", "enum": [ - "RAW" + "ACTIVE", + "DEFUNCT", + "PARTNER_REQUEST_RECEIVED", + "PENDING_CUSTOMER", + "PENDING_PARTNER", + "STATE_UNSPECIFIED", + "UNPROVISIONED" ], "enumDescriptions": [ + "", + "", + "", + "", + "", + "", "" ], "type": "string" }, - "status": { - "description": "[Output Only] The status of the image. An image can be used to create other resources, such as instances, only after the image has been successfully created and the status is set to READY. Possible values are FAILED, PENDING, or READY.", + "type": { + "description": "The type of interconnect attachment this is, which can take one of the following values: \n- DEDICATED: an attachment to a Dedicated Interconnect. \n- PARTNER: an attachment to a Partner Interconnect, created by the customer. \n- PARTNER_PROVIDER: an attachment to a Partner Interconnect, created by the partner.", "enum": [ - "FAILED", - "PENDING", - "READY" + "DEDICATED", + "PARTNER", + "PARTNER_PROVIDER" ], "enumDescriptions": [ "", @@ -28032,28 +34273,145 @@ "" ], "type": "string" + }, + "vlanTag8021q": { + "description": "The IEEE 802.1Q VLAN tag for this attachment, in the range 2-4094. Only specified at creation time.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "InterconnectAttachmentAggregatedList": { + "id": "InterconnectAttachmentAggregatedList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "additionalProperties": { + "$ref": "InterconnectAttachmentsScopedList", + "description": "Name of the scope containing this set of interconnect attachments." + }, + "description": "A list of InterconnectAttachmentsScopedList resources.", + "type": "object" + }, + "kind": { + "default": "compute#interconnectAttachmentAggregatedList", + "description": "[Output Only] Type of resource. Always compute#interconnectAttachmentAggregatedList for aggregated lists of interconnect attachments.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" } }, "type": "object" }, - "ImageList": { - "description": "Contains a list of images.", - "id": "ImageList", + "InterconnectAttachmentList": { + "description": "Response to the list request, and contains a list of interconnect attachments.", + "id": "InterconnectAttachmentList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of Image resources.", + "description": "A list of InterconnectAttachment resources.", "items": { - "$ref": "Image" + "$ref": "InterconnectAttachment" }, "type": "array" }, "kind": { - "default": "compute#imageList", - "description": "Type of resource.", + "default": "compute#interconnectAttachmentList", + "description": "[Output Only] Type of resource. Always compute#interconnectAttachmentList for lists of interconnect attachments.", "type": "string" }, "nextPageToken": { @@ -28148,152 +34506,229 @@ }, "type": "object" }, - "Instance": { - "description": "An Instance resource. (== resource_for beta.instances ==) (== resource_for v1.instances ==)", - "id": "Instance", + "InterconnectAttachmentPartnerMetadata": { + "description": "Informational metadata about Partner attachments from Partners to display to customers. These fields are propagated from PARTNER_PROVIDER attachments to their corresponding PARTNER attachments.", + "id": "InterconnectAttachmentPartnerMetadata", "properties": { - "allocationAffinity": { - "$ref": "AllocationAffinity", - "description": "The configuration of desired allocations which this Instance could consume capacity from." - }, - "canIpForward": { - "description": "Allows this instance to send and receive packets with non-matching destination or source IPs. This is required if you plan to use this instance to forward routes. For more information, see Enabling IP Forwarding.", - "type": "boolean" - }, - "cpuPlatform": { - "description": "[Output Only] The CPU platform used by this instance.", + "interconnectName": { + "description": "Plain text name of the Interconnect this attachment is connected to, as displayed in the Partner?s portal. For instance \"Chicago 1\". This value may be validated to match approved Partner values.", "type": "string" }, - "creationTimestamp": { - "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "partnerName": { + "description": "Plain text name of the Partner providing this attachment. This value may be validated to match approved Partner values.", "type": "string" }, - "deletionProtection": { - "description": "Whether the resource should be protected against deletion.", - "type": "boolean" - }, - "description": { - "description": "An optional description of this resource. Provide this property when you create the resource.", + "portalUrl": { + "description": "URL of the Partner?s portal for this Attachment. Partners may customise this to be a deep link to the specific resource on the Partner portal. This value may be validated to match approved Partner values.", "type": "string" - }, - "disks": { - "description": "Array of disks associated with this instance. Persistent disks must be created before you can assign them.", - "items": { - "$ref": "AttachedDisk" - }, - "type": "array" - }, - "displayDevice": { - "$ref": "DisplayDevice", - "description": "Enables display device for the instance." - }, - "guestAccelerators": { - "description": "A list of the type and count of accelerator cards attached to the instance.", + } + }, + "type": "object" + }, + "InterconnectAttachmentPrivateInfo": { + "description": "Information for an interconnect attachment when this belongs to an interconnect of type DEDICATED.", + "id": "InterconnectAttachmentPrivateInfo", + "properties": { + "tag8021q": { + "description": "[Output Only] 802.1q encapsulation tag to be used for traffic between Google and the customer, going to and from this network and region.", + "format": "uint32", + "type": "integer" + } + }, + "type": "object" + }, + "InterconnectAttachmentsScopedList": { + "id": "InterconnectAttachmentsScopedList", + "properties": { + "interconnectAttachments": { + "description": "A list of interconnect attachments contained in this scope.", "items": { - "$ref": "AcceleratorConfig" + "$ref": "InterconnectAttachment" }, "type": "array" }, - "hostname": { - "type": "string" - }, - "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64", - "type": "string" - }, - "kind": { - "default": "compute#instance", - "description": "[Output Only] Type of the resource. Always compute#instance for instances.", - "type": "string" - }, - "labelFingerprint": { - "description": "A fingerprint for this request, which is essentially a hash of the label's contents and used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels.\n\nTo see the latest fingerprint, make get() request to the instance.", - "format": "byte", - "type": "string" - }, - "labels": { - "additionalProperties": { - "type": "string" + "warning": { + "description": "Informational warning which replaces the list of addresses when the list is empty.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } }, - "description": "Labels to apply to this instance. These can be later modified by the setLabels method.", "type": "object" - }, - "machineType": { - "annotations": { - "required": [ - "compute.instances.insert" - ] - }, - "description": "Full or partial URL of the machine type resource to use for this instance, in the format: zones/zone/machineTypes/machine-type. This is provided by the client when the instance is created. For example, the following is a valid partial url to a predefined machine type:\nzones/us-central1-f/machineTypes/n1-standard-1\n\n\nTo create a custom machine type, provide a URL to a machine type in the following format, where CPUS is 1 or an even number up to 32 (2, 4, 6, ... 24, etc), and MEMORY is the total memory for this instance. Memory must be a multiple of 256 MB and must be supplied in MB (e.g. 5 GB of memory is 5120 MB):\nzones/zone/machineTypes/custom-CPUS-MEMORY\n\n\nFor example: zones/us-central1-f/machineTypes/custom-4-5120 \n\nFor a full list of restrictions, read the Specifications for custom machine types.", + } + }, + "type": "object" + }, + "InterconnectCircuitInfo": { + "description": "Describes a single physical circuit between the Customer and Google. CircuitInfo objects are created by Google, so all fields are output only. Next id: 4", + "id": "InterconnectCircuitInfo", + "properties": { + "customerDemarcId": { + "description": "Customer-side demarc ID for this circuit.", "type": "string" }, - "metadata": { - "$ref": "Metadata", - "description": "The metadata key/value pairs assigned to this instance. This includes custom metadata and predefined keys." - }, - "minCpuPlatform": { - "description": "Specifies a minimum CPU platform for the VM instance. Applicable values are the friendly names of CPU platforms, such as minCpuPlatform: \"Intel Haswell\" or minCpuPlatform: \"Intel Sandy Bridge\".", + "googleCircuitId": { + "description": "Google-assigned unique ID for this circuit. Assigned at circuit turn-up.", "type": "string" }, - "name": { - "annotations": { - "required": [ - "compute.instances.insert" - ] - }, - "description": "The name of the resource, provided by the client when initially creating the resource. The resource name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "googleDemarcId": { + "description": "Google-side demarc ID for this circuit. Assigned at circuit turn-up and provided by Google to the customer in the LOA.", "type": "string" - }, - "networkInterfaces": { - "description": "An array of network configurations for this instance. These specify how interfaces are configured to interact with other network services, such as connecting to the internet. Multiple interfaces are supported per instance.", + } + }, + "type": "object" + }, + "InterconnectDiagnostics": { + "description": "Diagnostics information about interconnect, contains detailed and current technical information about Google?s side of the connection.", + "id": "InterconnectDiagnostics", + "properties": { + "arpCaches": { + "description": "A list of InterconnectDiagnostics.ARPEntry objects, describing individual neighbors currently seen by the Google router in the ARP cache for the Interconnect. This will be empty when the Interconnect is not bundled.", "items": { - "$ref": "NetworkInterface" + "$ref": "InterconnectDiagnosticsARPEntry" }, "type": "array" }, - "scheduling": { - "$ref": "Scheduling", - "description": "Sets the scheduling options for this instance." - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for this resource.", - "type": "string" - }, - "serviceAccounts": { - "description": "A list of service accounts, with their specified scopes, authorized for this instance. Only one service account per VM instance is supported.\n\nService accounts generate access tokens that can be accessed through the metadata server and used to authenticate applications on the instance. See Service Accounts for more information.", + "links": { + "description": "A list of InterconnectDiagnostics.LinkStatus objects, describing the status for each link on the Interconnect.", "items": { - "$ref": "ServiceAccount" + "$ref": "InterconnectDiagnosticsLinkStatus" }, "type": "array" }, - "shieldedVmConfig": { - "$ref": "ShieldedVmConfig" + "macAddress": { + "description": "The MAC address of the Interconnect's bundle interface.", + "type": "string" + } + }, + "type": "object" + }, + "InterconnectDiagnosticsARPEntry": { + "description": "Describing the ARP neighbor entries seen on this link", + "id": "InterconnectDiagnosticsARPEntry", + "properties": { + "ipAddress": { + "description": "The IP address of this ARP neighbor.", + "type": "string" }, - "shieldedVmIntegrityPolicy": { - "$ref": "ShieldedVmIntegrityPolicy" + "macAddress": { + "description": "The MAC address of this ARP neighbor.", + "type": "string" + } + }, + "type": "object" + }, + "InterconnectDiagnosticsLinkLACPStatus": { + "id": "InterconnectDiagnosticsLinkLACPStatus", + "properties": { + "googleSystemId": { + "description": "System ID of the port on Google?s side of the LACP exchange.", + "type": "string" }, - "startRestricted": { - "description": "[Output Only] Whether a VM has been restricted for start because Compute Engine has detected suspicious activity.", - "type": "boolean" + "neighborSystemId": { + "description": "System ID of the port on the neighbor?s side of the LACP exchange.", + "type": "string" }, - "status": { - "description": "[Output Only] The status of the instance. One of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, STOPPED, SUSPENDING, SUSPENDED, and TERMINATED.", + "state": { + "description": "The state of a LACP link, which can take one of the following values: \n- ACTIVE: The link is configured and active within the bundle. \n- DETACHED: The link is not configured within the bundle. This means that the rest of the object should be empty.", "enum": [ - "PROVISIONING", - "RUNNING", - "STAGING", - "STOPPED", - "STOPPING", - "SUSPENDED", - "SUSPENDING", - "TERMINATED" + "ACTIVE", + "DETACHED" ], "enumDescriptions": [ "", - "", - "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "InterconnectDiagnosticsLinkOpticalPower": { + "id": "InterconnectDiagnosticsLinkOpticalPower", + "properties": { + "state": { + "description": "The status of the current value when compared to the warning and alarm levels for the receiving or transmitting transceiver. Possible states include: \n- OK: The value has not crossed a warning threshold. \n- LOW_WARNING: The value has crossed below the low warning threshold. \n- HIGH_WARNING: The value has crossed above the high warning threshold. \n- LOW_ALARM: The value has crossed below the low alarm threshold. \n- HIGH_ALARM: The value has crossed above the high alarm threshold.", + "enum": [ + "HIGH_ALARM", + "HIGH_WARNING", + "LOW_ALARM", + "LOW_WARNING", + "OK" + ], + "enumDescriptions": [ "", "", "", @@ -28302,39 +34737,64 @@ ], "type": "string" }, - "statusMessage": { - "description": "[Output Only] An optional, human-readable explanation of the status.", - "type": "string" + "value": { + "description": "Value of the current receiving or transmitting optical power, read in dBm. Take a known good optical value, give it a 10% margin and trigger warnings relative to that value. In general, a -7dBm warning and a -11dBm alarm are good optical value estimates for most links.", + "format": "float", + "type": "number" + } + }, + "type": "object" + }, + "InterconnectDiagnosticsLinkStatus": { + "id": "InterconnectDiagnosticsLinkStatus", + "properties": { + "arpCaches": { + "description": "A list of InterconnectDiagnostics.ARPEntry objects, describing the ARP neighbor entries seen on this link. This will be empty if the link is bundled", + "items": { + "$ref": "InterconnectDiagnosticsARPEntry" + }, + "type": "array" }, - "tags": { - "$ref": "Tags", - "description": "Tags to apply to this instance. Tags are used to identify valid sources or targets for network firewalls and are specified by the client during instance creation. The tags can be later modified by the setTags method. Each tag within the list must comply with RFC1035. Multiple tags can be specified via the 'tags.items' field." + "circuitId": { + "description": "The unique ID for this link assigned during turn up by Google.", + "type": "string" }, - "zone": { - "description": "[Output Only] URL of the zone where the instance resides. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", + "googleDemarc": { + "description": "The Demarc address assigned by Google and provided in the LoA.", "type": "string" + }, + "lacpStatus": { + "$ref": "InterconnectDiagnosticsLinkLACPStatus" + }, + "receivingOpticalPower": { + "$ref": "InterconnectDiagnosticsLinkOpticalPower", + "description": "An InterconnectDiagnostics.LinkOpticalPower object, describing the current value and status of the received light level." + }, + "transmittingOpticalPower": { + "$ref": "InterconnectDiagnosticsLinkOpticalPower", + "description": "An InterconnectDiagnostics.LinkOpticalPower object, describing the current value and status of the transmitted light level." } }, "type": "object" }, - "InstanceAggregatedList": { - "id": "InstanceAggregatedList", + "InterconnectList": { + "description": "Response to the list request, and contains a list of interconnects.", + "id": "InterconnectList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "additionalProperties": { - "$ref": "InstancesScopedList", - "description": "[Output Only] Name of the scope containing this set of instances." + "description": "A list of Interconnect resources.", + "items": { + "$ref": "Interconnect" }, - "description": "A list of InstancesScopedList resources.", - "type": "object" + "type": "array" }, "kind": { - "default": "compute#instanceAggregatedList", - "description": "[Output Only] Type of resource. Always compute#instanceAggregatedList for aggregated lists of Instance resources.", + "default": "compute#interconnectList", + "description": "[Output Only] Type of resource. Always compute#interconnectList for lists of interconnects.", "type": "string" }, "nextPageToken": { @@ -28429,96 +34889,128 @@ }, "type": "object" }, - "InstanceGroup": { - "description": "InstanceGroups (== resource_for beta.instanceGroups ==) (== resource_for v1.instanceGroups ==) (== resource_for beta.regionInstanceGroups ==) (== resource_for v1.regionInstanceGroups ==)", - "id": "InstanceGroup", + "InterconnectLocation": { + "description": "Represents an InterconnectLocations resource. The InterconnectLocations resource describes the locations where you can connect to Google's networks. For more information, see Colocation Facilities.", + "id": "InterconnectLocation", "properties": { + "address": { + "description": "[Output Only] The postal address of the Point of Presence, each line in the address is separated by a newline character.", + "type": "string" + }, + "availabilityZone": { + "description": "[Output Only] Availability zone for this InterconnectLocation. Within a metropolitan area (metro), maintenance will not be simultaneously scheduled in more than one availability zone. Example: \"zone1\" or \"zone2\".", + "type": "string" + }, + "city": { + "description": "[Output Only] Metropolitan area designator that indicates which city an interconnect is located. For example: \"Chicago, IL\", \"Amsterdam, Netherlands\".", + "type": "string" + }, + "continent": { + "description": "[Output Only] Continent for this location, which can take one of the following values: \n- AFRICA \n- ASIA_PAC \n- EUROPE \n- NORTH_AMERICA \n- SOUTH_AMERICA", + "enum": [ + "AFRICA", + "ASIA_PAC", + "C_AFRICA", + "C_ASIA_PAC", + "C_EUROPE", + "C_NORTH_AMERICA", + "C_SOUTH_AMERICA", + "EUROPE", + "NORTH_AMERICA", + "SOUTH_AMERICA" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, "creationTimestamp": { - "description": "[Output Only] The creation timestamp for this instance group in RFC3339 text format.", + "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" }, "description": { - "description": "An optional description of this resource. Provide this property when you create the resource.", + "description": "[Output Only] An optional description of the resource.", "type": "string" }, - "fingerprint": { - "description": "[Output Only] The fingerprint of the named ports. The system uses this fingerprint to detect conflicts when multiple users change the named ports concurrently.", - "format": "byte", + "facilityProvider": { + "description": "[Output Only] The name of the provider for this facility (e.g., EQUINIX).", + "type": "string" + }, + "facilityProviderFacilityId": { + "description": "[Output Only] A provider-assigned Identifier for this facility (e.g., Ashburn-DC1).", "type": "string" }, "id": { - "description": "[Output Only] A unique identifier for this instance group, generated by the server.", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", "format": "uint64", "type": "string" }, "kind": { - "default": "compute#instanceGroup", - "description": "[Output Only] The resource type, which is always compute#instanceGroup for instance groups.", + "default": "compute#interconnectLocation", + "description": "[Output Only] Type of the resource. Always compute#interconnectLocation for interconnect locations.", "type": "string" }, "name": { - "annotations": { - "required": [ - "compute.instanceGroupManagers.insert" - ] - }, - "description": "The name of the instance group. The name must be 1-63 characters long, and comply with RFC1035.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "description": "[Output Only] Name of the resource.", "type": "string" }, - "namedPorts": { - "description": "Assigns a name to a port number. For example: {name: \"http\", port: 80}\n\nThis allows the system to reference ports by the assigned name instead of a port number. Named ports can also contain multiple ports. For example: [{name: \"http\", port: 80},{name: \"http\", port: 8080}] \n\nNamed ports apply to all instances in this instance group.", + "peeringdbFacilityId": { + "description": "[Output Only] The peeringdb identifier for this facility (corresponding with a netfac type in peeringdb).", + "type": "string" + }, + "regionInfos": { + "description": "[Output Only] A list of InterconnectLocation.RegionInfo objects, that describe parameters pertaining to the relation between this InterconnectLocation and various Google Cloud regions.", "items": { - "$ref": "NamedPort" + "$ref": "InterconnectLocationRegionInfo" }, "type": "array" }, - "network": { - "description": "The URL of the network to which all instances in the instance group belong.", - "type": "string" - }, - "region": { - "description": "[Output Only] The URL of the region where the instance group is located (for regional resources).", - "type": "string" - }, - "selfLink": { - "description": "[Output Only] The URL for this instance group. The server generates this URL.", - "type": "string" - }, - "size": { - "description": "[Output Only] The total number of instances in the instance group.", - "format": "int32", - "type": "integer" - }, - "subnetwork": { - "description": "[Output Only] The URL of the subnetwork to which all instances in the instance group belong.", + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, - "zone": { - "description": "[Output Only] The URL of the zone where the instance group is located (for zonal resources).", + "status": { + "description": "[Output Only] The status of this InterconnectLocation, which can take one of the following values: \n- CLOSED: The InterconnectLocation is closed and is unavailable for provisioning new Interconnects. \n- AVAILABLE: The InterconnectLocation is available for provisioning new Interconnects.", + "enum": [ + "AVAILABLE", + "CLOSED" + ], + "enumDescriptions": [ + "", + "" + ], "type": "string" } }, "type": "object" }, - "InstanceGroupAggregatedList": { - "id": "InstanceGroupAggregatedList", + "InterconnectLocationList": { + "description": "Response to the list request, and contains a list of interconnect locations.", + "id": "InterconnectLocationList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "additionalProperties": { - "$ref": "InstanceGroupsScopedList", - "description": "The name of the scope that contains this set of instance groups." + "description": "A list of InterconnectLocation resources.", + "items": { + "$ref": "InterconnectLocation" }, - "description": "A list of InstanceGroupsScopedList resources.", - "type": "object" + "type": "array" }, "kind": { - "default": "compute#instanceGroupAggregatedList", - "description": "[Output Only] The resource type, which is always compute#instanceGroupAggregatedList for aggregated lists of instance groups.", + "default": "compute#interconnectLocationList", + "description": "[Output Only] Type of resource. Always compute#interconnectLocationList for lists of interconnect locations.", "type": "string" }, "nextPageToken": { @@ -28613,26 +35105,290 @@ }, "type": "object" }, - "InstanceGroupList": { - "description": "A list of InstanceGroup resources.", - "id": "InstanceGroupList", + "InterconnectLocationRegionInfo": { + "description": "Information about any potential InterconnectAttachments between an Interconnect at a specific InterconnectLocation, and a specific Cloud Region.", + "id": "InterconnectLocationRegionInfo", "properties": { - "id": { - "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "expectedRttMs": { + "description": "Expected round-trip time in milliseconds, from this InterconnectLocation to a VM in this region.", + "format": "int64", "type": "string" }, - "items": { - "description": "A list of InstanceGroup resources.", + "locationPresence": { + "description": "Identifies the network presence of this location.", + "enum": [ + "GLOBAL", + "LOCAL_REGION", + "LP_GLOBAL", + "LP_LOCAL_REGION" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ], + "type": "string" + }, + "region": { + "description": "URL for the region of this location.", + "type": "string" + } + }, + "type": "object" + }, + "InterconnectOutageNotification": { + "description": "Description of a planned outage on this Interconnect. Next id: 9", + "id": "InterconnectOutageNotification", + "properties": { + "affectedCircuits": { + "description": "If issue_type is IT_PARTIAL_OUTAGE, a list of the Google-side circuit IDs that will be affected.", "items": { - "$ref": "InstanceGroup" + "type": "string" }, "type": "array" }, + "description": { + "description": "A description about the purpose of the outage.", + "type": "string" + }, + "endTime": { + "description": "Scheduled end time for the outage (milliseconds since Unix epoch).", + "format": "int64", + "type": "string" + }, + "issueType": { + "description": "Form this outage is expected to take, which can take one of the following values: \n- OUTAGE: The Interconnect may be completely out of service for some or all of the specified window. \n- PARTIAL_OUTAGE: Some circuits comprising the Interconnect as a whole should remain up, but with reduced bandwidth. Note that the versions of this enum prefixed with \"IT_\" have been deprecated in favor of the unprefixed values.", + "enum": [ + "IT_OUTAGE", + "IT_PARTIAL_OUTAGE", + "OUTAGE", + "PARTIAL_OUTAGE" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ], + "type": "string" + }, + "name": { + "description": "Unique identifier for this outage notification.", + "type": "string" + }, + "source": { + "description": "The party that generated this notification, which can take the following value: \n- GOOGLE: this notification as generated by Google. Note that the value of NSRC_GOOGLE has been deprecated in favor of GOOGLE.", + "enum": [ + "GOOGLE", + "NSRC_GOOGLE" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "startTime": { + "description": "Scheduled start time for the outage (milliseconds since Unix epoch).", + "format": "int64", + "type": "string" + }, + "state": { + "description": "State of this notification, which can take one of the following values: \n- ACTIVE: This outage notification is active. The event could be in the past, present, or future. See start_time and end_time for scheduling. \n- CANCELLED: The outage associated with this notification was cancelled before the outage was due to start. Note that the versions of this enum prefixed with \"NS_\" have been deprecated in favor of the unprefixed values.", + "enum": [ + "ACTIVE", + "CANCELLED", + "NS_ACTIVE", + "NS_CANCELED" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "InterconnectsGetDiagnosticsResponse": { + "description": "Response for the InterconnectsGetDiagnosticsRequest.", + "id": "InterconnectsGetDiagnosticsResponse", + "properties": { + "result": { + "$ref": "InterconnectDiagnostics" + } + }, + "type": "object" + }, + "License": { + "description": "A license resource.", + "id": "License", + "properties": { + "chargesUseFee": { + "description": "[Output Only] Deprecated. This field no longer reflects whether a license charges a usage fee.", + "type": "boolean" + }, + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "description": { + "description": "An optional textual description of the resource; provided by the client when the resource is created.", + "type": "string" + }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", + "type": "string" + }, "kind": { - "default": "compute#instanceGroupList", - "description": "[Output Only] The resource type, which is always compute#instanceGroupList for instance group lists.", + "default": "compute#license", + "description": "[Output Only] Type of resource. Always compute#license for licenses.", + "type": "string" + }, + "licenseCode": { + "description": "[Output Only] The unique code used to attach this license to images, snapshots, and disks.", + "format": "uint64", + "type": "string" + }, + "name": { + "annotations": { + "required": [ + "compute.images.insert" + ] + }, + "description": "Name of the resource. The name must be 1-63 characters long and comply with RFC1035.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "resourceRequirements": { + "$ref": "LicenseResourceRequirements" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" + }, + "transferable": { + "description": "If false, licenses will not be copied from the source resource when creating an image from a disk, disk from snapshot, or snapshot from disk.", + "type": "boolean" + } + }, + "type": "object" + }, + "LicenseCode": { + "id": "LicenseCode", + "properties": { + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "description": { + "description": "[Output Only] Description of this License Code.", + "type": "string" + }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", + "type": "string" + }, + "kind": { + "default": "compute#licenseCode", + "description": "[Output Only] Type of resource. Always compute#licenseCode for licenses.", + "type": "string" + }, + "licenseAlias": { + "description": "[Output Only] URL and description aliases of Licenses with the same License Code.", + "items": { + "$ref": "LicenseCodeLicenseAlias" + }, + "type": "array" + }, + "name": { + "annotations": { + "required": [ + "compute.licenses.insert" + ] + }, + "description": "[Output Only] Name of the resource. The name is 1-20 characters long and must be a valid 64 bit integer.", + "pattern": "[0-9]{0,20}?", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" + }, + "state": { + "description": "[Output Only] Current state of this License Code.", + "enum": [ + "DISABLED", + "ENABLED", + "RESTRICTED", + "STATE_UNSPECIFIED", + "TERMINATED" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "transferable": { + "description": "[Output Only] If true, the license will remain attached when creating images or snapshots from disks. Otherwise, the license is not transferred.", + "type": "boolean" + } + }, + "type": "object" + }, + "LicenseCodeLicenseAlias": { + "id": "LicenseCodeLicenseAlias", + "properties": { + "description": { + "description": "[Output Only] Description of this License Code.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] URL of license corresponding to this License Code.", + "type": "string" + } + }, + "type": "object" + }, + "LicenseResourceRequirements": { + "id": "LicenseResourceRequirements", + "properties": { + "minGuestCpuCount": { + "description": "Minimum number of guest cpus required to use the Instance. Enforced at Instance creation and Instance start.", + "format": "int32", + "type": "integer" + }, + "minMemoryMb": { + "description": "Minimum memory required to use the Instance. Enforced at Instance creation and Instance start.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "LicensesListResponse": { + "id": "LicensesListResponse", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, + "items": { + "description": "A list of License resources.", + "items": { + "$ref": "License" + }, + "type": "array" + }, "nextPageToken": { "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", "type": "string" @@ -28725,205 +35481,152 @@ }, "type": "object" }, - "InstanceGroupManager": { - "description": "An Instance Group Manager resource. (== resource_for beta.instanceGroupManagers ==) (== resource_for v1.instanceGroupManagers ==) (== resource_for beta.regionInstanceGroupManagers ==) (== resource_for v1.regionInstanceGroupManagers ==)", - "id": "InstanceGroupManager", + "LogConfig": { + "description": "Specifies what kind of log the caller must write", + "id": "LogConfig", "properties": { - "autoHealingPolicies": { - "description": "The autohealing policy for this managed instance group. You can specify only one value.", - "items": { - "$ref": "InstanceGroupManagerAutoHealingPolicy" - }, - "type": "array" - }, - "baseInstanceName": { - "annotations": { - "required": [ - "compute.instanceGroupManagers.insert" - ] - }, - "description": "The base instance name to use for instances in this group. The value must be 1-58 characters long. Instances are named by appending a hyphen and a random four-character string to the base instance name. The base instance name must comply with RFC1035.", - "pattern": "[a-z][-a-z0-9]{0,57}", - "type": "string" - }, - "creationTimestamp": { - "description": "[Output Only] The creation timestamp for this managed instance group in RFC3339 text format.", - "type": "string" - }, - "currentActions": { - "$ref": "InstanceGroupManagerActionsSummary", - "description": "[Output Only] The list of instance actions and the number of instances in this managed instance group that are scheduled for each of those actions." + "cloudAudit": { + "$ref": "LogConfigCloudAuditOptions", + "description": "Cloud audit options." }, - "description": { - "description": "An optional description of this resource. Provide this property when you create the resource.", - "type": "string" + "counter": { + "$ref": "LogConfigCounterOptions", + "description": "Counter options." }, - "distributionPolicy": { - "$ref": "DistributionPolicy", - "description": "Policy specifying intended distribution of instances in regional managed instance group." + "dataAccess": { + "$ref": "LogConfigDataAccessOptions", + "description": "Data access options." + } + }, + "type": "object" + }, + "LogConfigCloudAuditOptions": { + "description": "Write a Cloud Audit log", + "id": "LogConfigCloudAuditOptions", + "properties": { + "authorizationLoggingOptions": { + "$ref": "AuthorizationLoggingOptions", + "description": "Information used by the Cloud Audit Logging pipeline." }, - "failoverAction": { - "description": "The action to perform in case of zone failure. Only one value is supported, NO_FAILOVER. The default is NO_FAILOVER.", + "logName": { + "description": "The log_name to populate in the Cloud Audit Record.", "enum": [ - "NO_FAILOVER", - "UNKNOWN" + "ADMIN_ACTIVITY", + "DATA_ACCESS", + "UNSPECIFIED_LOG_NAME" ], "enumDescriptions": [ + "", "", "" ], "type": "string" - }, - "fingerprint": { - "description": "Fingerprint of this resource. This field may be used in optimistic locking. It will be ignored when inserting an InstanceGroupManager. An up-to-date fingerprint must be provided in order to update the InstanceGroupManager, otherwise the request will fail with error 412 conditionNotMet.\n\nTo see the latest fingerprint, make a get() request to retrieve an InstanceGroupManager.", - "format": "byte", - "type": "string" - }, - "id": { - "description": "[Output Only] A unique identifier for this resource type. The server generates this identifier.", - "format": "uint64", - "type": "string" - }, - "instanceGroup": { - "description": "[Output Only] The URL of the Instance Group resource.", - "type": "string" - }, - "instanceTemplate": { - "description": "The URL of the instance template that is specified for this managed instance group. The group uses this template to create all new instances in the managed instance group.", - "type": "string" - }, - "kind": { - "default": "compute#instanceGroupManager", - "description": "[Output Only] The resource type, which is always compute#instanceGroupManager for managed instance groups.", - "type": "string" - }, - "name": { - "annotations": { - "required": [ - "compute.instanceGroupManagers.insert", - "compute.regionInstanceGroupManagers.insert" - ] - }, - "description": "The name of the managed instance group. The name must be 1-63 characters long, and comply with RFC1035.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "type": "string" - }, - "namedPorts": { - "description": "Named ports configured for the Instance Groups complementary to this Instance Group Manager.", - "items": { - "$ref": "NamedPort" - }, - "type": "array" - }, - "pendingActions": { - "$ref": "InstanceGroupManagerPendingActionsSummary", - "description": "[Output Only] The list of instance actions and the number of instances in this managed instance group that are pending for each of those actions." - }, - "region": { - "description": "[Output Only] The URL of the region where the managed instance group resides (for regional resources).", - "type": "string" - }, - "selfLink": { - "description": "[Output Only] The URL for this managed instance group. The server defines this URL.", + } + }, + "type": "object" + }, + "LogConfigCounterOptions": { + "description": "Increment a streamz counter with the specified metric and field names.\n\nMetric names should start with a '/', generally be lowercase-only, and end in \"_count\". Field names should not contain an initial slash. The actual exported metric names will have \"/iam/policy\" prepended.\n\nField names correspond to IAM request parameters and field values are their respective values.\n\nSupported field names: - \"authority\", which is \"[token]\" if IAMContext.token is present, otherwise the value of IAMContext.authority_selector if present, and otherwise a representation of IAMContext.principal; or - \"iam_principal\", a representation of IAMContext.principal even if a token or authority selector is present; or - \"\" (empty string), resulting in a counter with no fields.\n\nExamples: counter { metric: \"/debug_access_count\" field: \"iam_principal\" } ==\u003e increment counter /iam/policy/backend_debug_access_count {iam_principal=[value of IAMContext.principal]}\n\nAt this time we do not support multiple field names (though this may be supported in the future).", + "id": "LogConfigCounterOptions", + "properties": { + "field": { + "description": "The field value to attribute.", "type": "string" }, - "serviceAccount": { - "description": "The service account to be used as credentials for all operations performed by the managed instance group on instances. The service accounts needs all permissions required to create and delete instances. By default, the service account {projectNumber}@cloudservices.gserviceaccount.com is used.", + "metric": { + "description": "The metric to update.", "type": "string" - }, - "status": { - "$ref": "InstanceGroupManagerStatus", - "description": "[Output Only] The status of this managed instance group." - }, - "targetPools": { - "description": "The URLs for all TargetPool resources to which instances in the instanceGroup field are added. The target pools automatically apply to all of the instances in the managed instance group.", - "items": { - "type": "string" - }, - "type": "array" - }, - "targetSize": { - "annotations": { - "required": [ - "compute.instanceGroupManagers.insert", - "compute.regionInstanceGroupManagers.insert" - ] - }, - "description": "The target number of running instances for this managed instance group. Deleting or abandoning instances reduces this number. Resizing the group changes this number.", - "format": "int32", - "type": "integer" - }, - "updatePolicy": { - "$ref": "InstanceGroupManagerUpdatePolicy", - "description": "The update policy for this managed instance group." - }, - "versions": { - "description": "Specifies the instance templates used by this managed instance group to create instances.\n\nEach version is defined by an instanceTemplate. Every template can appear at most once per instance group. This field overrides the top-level instanceTemplate field. Read more about the relationships between these fields. Exactly one version must leave the targetSize field unset. That version will be applied to all remaining instances. For more information, read about canary updates.", - "items": { - "$ref": "InstanceGroupManagerVersion" - }, - "type": "array" - }, - "zone": { - "description": "[Output Only] The URL of the zone where the managed instance group is located (for zonal resources).", + } + }, + "type": "object" + }, + "LogConfigDataAccessOptions": { + "description": "Write a Data Access (Gin) log", + "id": "LogConfigDataAccessOptions", + "properties": { + "logMode": { + "description": "Whether Gin logging should happen in a fail-closed manner at the caller. This is relevant only in the LocalIAM implementation, for now.", + "enum": [ + "LOG_FAIL_CLOSED", + "LOG_MODE_UNSPECIFIED" + ], + "enumDescriptions": [ + "", + "" + ], "type": "string" } }, "type": "object" }, - "InstanceGroupManagerActionsSummary": { - "id": "InstanceGroupManagerActionsSummary", + "MachineType": { + "description": "A Machine Type resource. (== resource_for v1.machineTypes ==) (== resource_for beta.machineTypes ==)", + "id": "MachineType", "properties": { - "abandoning": { - "description": "[Output Only] The total number of instances in the managed instance group that are scheduled to be abandoned. Abandoning an instance removes it from the managed instance group without deleting it.", - "format": "int32", - "type": "integer" + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "deprecated": { + "$ref": "DeprecationStatus", + "description": "[Output Only] The deprecation status associated with this machine type." + }, + "description": { + "description": "[Output Only] An optional textual description of the resource.", + "type": "string" }, - "creating": { - "description": "[Output Only] The number of instances in the managed instance group that are scheduled to be created or are currently being created. If the group fails to create any of these instances, it tries again until it creates the instance successfully.\n\nIf you have disabled creation retries, this field will not be populated; instead, the creatingWithoutRetries field will be populated.", + "guestCpus": { + "description": "[Output Only] The number of virtual CPUs that are available to the instance.", "format": "int32", "type": "integer" }, - "creatingWithoutRetries": { - "description": "[Output Only] The number of instances that the managed instance group will attempt to create. The group attempts to create each instance only once. If the group fails to create any of these instances, it decreases the group's targetSize value accordingly.", - "format": "int32", - "type": "integer" + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", + "type": "string" }, - "deleting": { - "description": "[Output Only] The number of instances in the managed instance group that are scheduled to be deleted or are currently being deleted.", - "format": "int32", - "type": "integer" + "isSharedCpu": { + "description": "[Output Only] Whether this machine type has a shared CPU. See Shared-core machine types for more information.", + "type": "boolean" }, - "none": { - "description": "[Output Only] The number of instances in the managed instance group that are running and have no scheduled actions.", - "format": "int32", - "type": "integer" + "kind": { + "default": "compute#machineType", + "description": "[Output Only] The type of the resource. Always compute#machineType for machine types.", + "type": "string" }, - "recreating": { - "description": "[Output Only] The number of instances in the managed instance group that are scheduled to be recreated or are currently being being recreated. Recreating an instance deletes the existing root persistent disk and creates a new disk from the image that is defined in the instance template.", + "maximumPersistentDisks": { + "description": "[Output Only] Maximum persistent disks allowed.", "format": "int32", "type": "integer" }, - "refreshing": { - "description": "[Output Only] The number of instances in the managed instance group that are being reconfigured with properties that do not require a restart or a recreate action. For example, setting or removing target pools for the instance.", - "format": "int32", - "type": "integer" + "maximumPersistentDisksSizeGb": { + "description": "[Output Only] Maximum total persistent disks size (GB) allowed.", + "format": "int64", + "type": "string" }, - "restarting": { - "description": "[Output Only] The number of instances in the managed instance group that are scheduled to be restarted or are currently being restarted.", + "memoryMb": { + "description": "[Output Only] The amount of physical memory available to the instance, defined in MB.", "format": "int32", "type": "integer" }, - "verifying": { - "description": "[Output Only] The number of instances in the managed instance group that are being verified. See the managedInstances[].currentAction property in the listManagedInstances method documentation.", - "format": "int32", - "type": "integer" + "name": { + "description": "[Output Only] Name of the resource.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" + }, + "zone": { + "description": "[Output Only] The name of the zone where the machine type resides, such as us-central1-a.", + "type": "string" } }, "type": "object" }, - "InstanceGroupManagerAggregatedList": { - "id": "InstanceGroupManagerAggregatedList", + "MachineTypeAggregatedList": { + "id": "MachineTypeAggregatedList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", @@ -28931,15 +35634,15 @@ }, "items": { "additionalProperties": { - "$ref": "InstanceGroupManagersScopedList", - "description": "[Output Only] The name of the scope that contains this set of managed instance groups." + "$ref": "MachineTypesScopedList", + "description": "[Output Only] Name of the scope containing this set of machine types." }, - "description": "A list of InstanceGroupManagersScopedList resources.", + "description": "A list of MachineTypesScopedList resources.", "type": "object" }, "kind": { - "default": "compute#instanceGroupManagerAggregatedList", - "description": "[Output Only] The resource type, which is always compute#instanceGroupManagerAggregatedList for an aggregated list of managed instance groups.", + "default": "compute#machineTypeAggregatedList", + "description": "[Output Only] Type of resource. Always compute#machineTypeAggregatedList for aggregated lists of machine types.", "type": "string" }, "nextPageToken": { @@ -29034,40 +35737,24 @@ }, "type": "object" }, - "InstanceGroupManagerAutoHealingPolicy": { - "description": "", - "id": "InstanceGroupManagerAutoHealingPolicy", - "properties": { - "healthCheck": { - "description": "The URL for the health check that signals autohealing.", - "type": "string" - }, - "initialDelaySec": { - "description": "The number of seconds that the managed instance group waits before it applies autohealing policies to new instances or recently recreated instances. This initial delay allows instances to initialize and run their startup scripts before the instance group determines that they are UNHEALTHY. This prevents the managed instance group from recreating its instances prematurely. This value must be from range [0, 3600].", - "format": "int32", - "type": "integer" - } - }, - "type": "object" - }, - "InstanceGroupManagerList": { - "description": "[Output Only] A list of managed instance groups.", - "id": "InstanceGroupManagerList", + "MachineTypeList": { + "description": "Contains a list of machine types.", + "id": "MachineTypeList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of InstanceGroupManager resources.", + "description": "A list of MachineType resources.", "items": { - "$ref": "InstanceGroupManager" + "$ref": "MachineType" }, "type": "array" }, "kind": { - "default": "compute#instanceGroupManagerList", - "description": "[Output Only] The resource type, which is always compute#instanceGroupManagerList for a list of managed instance groups.", + "default": "compute#machineTypeList", + "description": "[Output Only] Type of resource. Always compute#machineTypeList for lists of machine types.", "type": "string" }, "nextPageToken": { @@ -29162,184 +35849,18 @@ }, "type": "object" }, - "InstanceGroupManagerPendingActionsSummary": { - "id": "InstanceGroupManagerPendingActionsSummary", - "properties": { - "creating": { - "description": "[Output Only] The number of instances in the managed instance group that are pending to be created.", - "format": "int32", - "type": "integer" - }, - "deleting": { - "description": "[Output Only] The number of instances in the managed instance group that are pending to be deleted.", - "format": "int32", - "type": "integer" - }, - "recreating": { - "description": "[Output Only] The number of instances in the managed instance group that are pending to be recreated.", - "format": "int32", - "type": "integer" - }, - "restarting": { - "description": "[Output Only] The number of instances in the managed instance group that are pending to be restarted.", - "format": "int32", - "type": "integer" - } - }, - "type": "object" - }, - "InstanceGroupManagerStatus": { - "id": "InstanceGroupManagerStatus", - "properties": { - "isStable": { - "description": "[Output Only] A bit indicating whether the managed instance group is in a stable state. A stable state means that: none of the instances in the managed instance group is currently undergoing any type of change (for example, creation, restart, or deletion); no future changes are scheduled for instances in the managed instance group; and the managed instance group itself is not being modified.", - "type": "boolean" - } - }, - "type": "object" - }, - "InstanceGroupManagerUpdatePolicy": { - "id": "InstanceGroupManagerUpdatePolicy", - "properties": { - "maxSurge": { - "$ref": "FixedOrPercent", - "description": "The maximum number of instances that can be created above the specified targetSize during the update process. By default, a fixed value of 1 is used. This value can be either a fixed number or a percentage if the instance group has 10 or more instances. If you set a percentage, the number of instances will be rounded up if necessary.\n\nAt least one of either maxSurge or maxUnavailable must be greater than 0. Learn more about maxSurge." - }, - "maxUnavailable": { - "$ref": "FixedOrPercent", - "description": "The maximum number of instances that can be unavailable during the update process. An instance is considered available if all of the following conditions are satisfied:\n\n \n- The instance's status is RUNNING. \n- If there is a health check on the instance group, the instance's liveness health check result must be HEALTHY at least once. If there is no health check on the group, then the instance only needs to have a status of RUNNING to be considered available. By default, a fixed value of 1 is used. This value can be either a fixed number or a percentage if the instance group has 10 or more instances. If you set a percentage, the number of instances will be rounded up if necessary.\n\nAt least one of either maxSurge or maxUnavailable must be greater than 0. Learn more about maxUnavailable." - }, - "minReadySec": { - "description": "Minimum number of seconds to wait for after a newly created instance becomes available. This value must be from range [0, 3600].", - "format": "int32", - "type": "integer" - }, - "minimalAction": { - "description": "Minimal action to be taken on an instance. You can specify either RESTART to restart existing instances or REPLACE to delete and create new instances from the target template. If you specify a RESTART, the Updater will attempt to perform that action only. However, if the Updater determines that the minimal action you specify is not enough to perform the update, it might perform a more disruptive action.", - "enum": [ - "REPLACE", - "RESTART" - ], - "enumDescriptions": [ - "", - "" - ], - "type": "string" - }, - "type": { - "enum": [ - "OPPORTUNISTIC", - "PROACTIVE" - ], - "enumDescriptions": [ - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "InstanceGroupManagerVersion": { - "id": "InstanceGroupManagerVersion", - "properties": { - "instanceTemplate": { - "type": "string" - }, - "name": { - "description": "Name of the version. Unique among all versions in the scope of this managed instance group.", - "type": "string" - }, - "targetSize": { - "$ref": "FixedOrPercent", - "description": "Specifies the intended number of instances to be created from the instanceTemplate. The final number of instances created from the template will be equal to: \n- If expressed as a fixed number, the minimum of either targetSize.fixed or instanceGroupManager.targetSize is used. \n- if expressed as a percent, the targetSize would be (targetSize.percent/100 * InstanceGroupManager.targetSize) If there is a remainder, the number is rounded up. If unset, this version will update any remaining instances not updated by another version. Read Starting a canary update for more information." - } - }, - "type": "object" - }, - "InstanceGroupManagersAbandonInstancesRequest": { - "id": "InstanceGroupManagersAbandonInstancesRequest", - "properties": { - "instances": { - "description": "The URLs of one or more instances to abandon. This can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME].", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, - "InstanceGroupManagersDeleteInstancesRequest": { - "id": "InstanceGroupManagersDeleteInstancesRequest", - "properties": { - "instances": { - "description": "The URLs of one or more instances to delete. This can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME].", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, - "InstanceGroupManagersListManagedInstancesResponse": { - "id": "InstanceGroupManagersListManagedInstancesResponse", - "properties": { - "managedInstances": { - "description": "[Output Only] The list of instances in the managed instance group.", - "items": { - "$ref": "ManagedInstance" - }, - "type": "array" - }, - "nextPageToken": { - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", - "type": "string" - } - }, - "type": "object" - }, - "InstanceGroupManagersRecreateInstancesRequest": { - "id": "InstanceGroupManagersRecreateInstancesRequest", - "properties": { - "instances": { - "description": "The URLs of one or more instances to recreate. This can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME].", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, - "InstanceGroupManagersResizeAdvancedRequest": { - "id": "InstanceGroupManagersResizeAdvancedRequest", - "properties": { - "noCreationRetries": { - "description": "If this flag is true, the managed instance group attempts to create all instances initiated by this resize request only once. If there is an error during creation, the managed instance group does not retry create this instance, and we will decrease the targetSize of the request instead. If the flag is false, the group attempts to recreate each instance continuously until it succeeds.\n\nThis flag matters only in the first attempt of creation of an instance. After an instance is successfully created while this flag is enabled, the instance behaves the same way as all the other instances created with a regular resize request. In particular, if a running instance dies unexpectedly at a later time and needs to be recreated, this mode does not affect the recreation behavior in that scenario.\n\nThis flag is applicable only to the current resize request. It does not influence other resize requests in any way.\n\nYou can see which instances is being creating in which mode by calling the get or listManagedInstances API.", - "type": "boolean" - }, - "targetSize": { - "description": "The number of running instances that the managed instance group should maintain at any given time. The group automatically adds or removes instances to maintain the number of instances specified by this parameter.", - "format": "int32", - "type": "integer" - } - }, - "type": "object" - }, - "InstanceGroupManagersScopedList": { - "id": "InstanceGroupManagersScopedList", + "MachineTypesScopedList": { + "id": "MachineTypesScopedList", "properties": { - "instanceGroupManagers": { - "description": "[Output Only] The list of managed instance groups that are contained in the specified project and zone.", + "machineTypes": { + "description": "[Output Only] A list of machine types contained in this scope.", "items": { - "$ref": "InstanceGroupManager" + "$ref": "MachineType" }, "type": "array" }, "warning": { - "description": "[Output Only] The warning that replaces the list of managed instance groups when the list is empty.", + "description": "[Output Only] An informational warning that appears when the machine types list is empty.", "properties": { "code": { "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", @@ -29422,180 +35943,196 @@ }, "type": "object" }, - "InstanceGroupManagersSetAutoHealingRequest": { - "id": "InstanceGroupManagersSetAutoHealingRequest", - "properties": { - "autoHealingPolicies": { - "items": { - "$ref": "InstanceGroupManagerAutoHealingPolicy" - }, - "type": "array" - } - }, - "type": "object" - }, - "InstanceGroupManagersSetInstanceTemplateRequest": { - "id": "InstanceGroupManagersSetInstanceTemplateRequest", - "properties": { - "instanceTemplate": { - "description": "The URL of the instance template that is specified for this managed instance group. The group uses this template to create all new instances in the managed instance group.", - "type": "string" - } - }, - "type": "object" - }, - "InstanceGroupManagersSetTargetPoolsRequest": { - "id": "InstanceGroupManagersSetTargetPoolsRequest", + "ManagedInstance": { + "description": "A Managed Instance resource.", + "id": "ManagedInstance", "properties": { - "fingerprint": { - "description": "The fingerprint of the target pools information. Use this optional property to prevent conflicts when multiple users change the target pools settings concurrently. Obtain the fingerprint with the instanceGroupManagers.get method. Then, include the fingerprint in your request to ensure that you do not overwrite changes that were applied from another concurrent request.", - "format": "byte", + "currentAction": { + "description": "[Output Only] The current action that the managed instance group has scheduled for the instance. Possible values: \n- NONE The instance is running, and the managed instance group does not have any scheduled actions for this instance. \n- CREATING The managed instance group is creating this instance. If the group fails to create this instance, it will try again until it is successful. \n- CREATING_WITHOUT_RETRIES The managed instance group is attempting to create this instance only once. If the group fails to create this instance, it does not try again and the group's targetSize value is decreased instead. \n- RECREATING The managed instance group is recreating this instance. \n- DELETING The managed instance group is permanently deleting this instance. \n- ABANDONING The managed instance group is abandoning this instance. The instance will be removed from the instance group and from any target pools that are associated with this group. \n- RESTARTING The managed instance group is restarting the instance. \n- REFRESHING The managed instance group is applying configuration changes to the instance without stopping it. For example, the group can update the target pool list for an instance without stopping that instance. \n- VERIFYING The managed instance group has created the instance and it is in the process of being verified.", + "enum": [ + "ABANDONING", + "CREATING", + "CREATING_WITHOUT_RETRIES", + "DELETING", + "NONE", + "RECREATING", + "REFRESHING", + "RESTARTING", + "VERIFYING" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], "type": "string" }, - "targetPools": { - "description": "The list of target pool URLs that instances in this managed instance group belong to. The managed instance group applies these target pools to all of the instances in the group. Existing instances and new instances in the group all receive these target pool settings.", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, - "InstanceGroupsAddInstancesRequest": { - "id": "InstanceGroupsAddInstancesRequest", - "properties": { - "instances": { - "description": "The list of instances to add to the instance group.", - "items": { - "$ref": "InstanceReference" - }, - "type": "array" - } - }, - "type": "object" - }, - "InstanceGroupsListInstances": { - "id": "InstanceGroupsListInstances", - "properties": { "id": { - "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "description": "[Output only] The unique identifier for this resource. This field is empty when instance does not exist.", + "format": "uint64", "type": "string" }, - "items": { - "description": "A list of InstanceWithNamedPorts resources.", - "items": { - "$ref": "InstanceWithNamedPorts" - }, - "type": "array" - }, - "kind": { - "default": "compute#instanceGroupsListInstances", - "description": "[Output Only] The resource type, which is always compute#instanceGroupsListInstances for the list of instances in the specified instance group.", + "instance": { + "description": "[Output Only] The URL of the instance. The URL can exist even if the instance has not yet been created.", "type": "string" }, - "nextPageToken": { - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "instanceStatus": { + "description": "[Output Only] The status of the instance. This field is empty when the instance does not exist.", + "enum": [ + "PROVISIONING", + "REPAIRING", + "RUNNING", + "STAGING", + "STOPPED", + "STOPPING", + "SUSPENDED", + "SUSPENDING", + "TERMINATED" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], "type": "string" }, - "selfLink": { - "description": "[Output Only] Server-defined URL for this resource.", - "type": "string" + "lastAttempt": { + "$ref": "ManagedInstanceLastAttempt", + "description": "[Output Only] Information about the last attempt to create or delete the instance." }, - "warning": { - "description": "[Output Only] Informational warning message.", + "version": { + "$ref": "ManagedInstanceVersion", + "description": "[Output Only] Intended version of this instance." + } + }, + "type": "object" + }, + "ManagedInstanceLastAttempt": { + "id": "ManagedInstanceLastAttempt", + "properties": { + "errors": { + "description": "[Output Only] Encountered errors during the last attempt to create or delete the instance.", "properties": { - "code": { - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DEPRECATED_TYPE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "EXPERIMENTAL_TYPE_USED", - "EXTERNAL_API_WARNING", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "MISSING_TYPE_DEPENDENCY", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SCHEMA_VALIDATION_IGNORED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNDECLARED_PROPERTIES", - "UNREACHABLE" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "data": { - "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "errors": { + "description": "[Output Only] The array of errors encountered while processing this operation.", "items": { "properties": { - "key": { - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "code": { + "description": "[Output Only] The error type identifier for this error.", "type": "string" }, - "value": { - "description": "[Output Only] A warning data value corresponding to the key.", + "location": { + "description": "[Output Only] Indicates the field in the request that caused the error. This property is optional.", + "type": "string" + }, + "message": { + "description": "[Output Only] An optional, human-readable error message.", "type": "string" } }, - "type": "object" + "type": "object" + }, + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "ManagedInstanceVersion": { + "id": "ManagedInstanceVersion", + "properties": { + "instanceTemplate": { + "description": "[Output Only] The intended template of the instance. This field is empty when current_action is one of { DELETING, ABANDONING }.", + "type": "string" + }, + "name": { + "description": "[Output Only] Name of the version.", + "type": "string" + } + }, + "type": "object" + }, + "Metadata": { + "description": "A metadata key/value entry.", + "id": "Metadata", + "properties": { + "fingerprint": { + "description": "Specifies a fingerprint for this request, which is essentially a hash of the metadata's contents and used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update metadata. You must always provide an up-to-date fingerprint hash in order to update or change metadata, otherwise the request will fail with error 412 conditionNotMet.\n\nTo see the latest fingerprint, make a get() request to retrieve the resource.", + "format": "byte", + "type": "string" + }, + "items": { + "description": "Array of key/value pairs. The total size of all keys and values must be less than 512 KB.", + "items": { + "properties": { + "key": { + "annotations": { + "required": [ + "compute.instances.insert", + "compute.projects.setCommonInstanceMetadata" + ] + }, + "description": "Key for the metadata entry. Keys must conform to the following regexp: [a-zA-Z0-9-_]+, and be less than 128 bytes in length. This is reflected as part of a URL in the metadata server. Additionally, to avoid ambiguity, keys must not conflict with any other metadata keys for the project.", + "pattern": "[a-zA-Z0-9-_]{1,128}", + "type": "string" }, - "type": "array" + "value": { + "annotations": { + "required": [ + "compute.instances.insert", + "compute.projects.setCommonInstanceMetadata" + ] + }, + "description": "Value for the metadata entry. These are free-form strings, and only have meaning as interpreted by the image running in the instance. The only restriction placed on values is that their size must be less than or equal to 262144 bytes (256 KiB).", + "type": "string" + } }, - "message": { - "description": "[Output Only] A human-readable description of the warning code.", - "type": "string" - } + "type": "object" }, - "type": "object" + "type": "array" + }, + "kind": { + "default": "compute#metadata", + "description": "[Output Only] Type of the resource. Always compute#metadata for metadata.", + "type": "string" } }, "type": "object" }, - "InstanceGroupsListInstancesRequest": { - "id": "InstanceGroupsListInstancesRequest", + "MetadataFilter": { + "description": "Opaque filter criteria used by loadbalancers to restrict routing configuration to a limited set of loadbalancing proxies. Proxies and sidecars involved in loadbalancing would typically present metadata to the loadbalancers which need to match criteria specified here. If a match takes place, the relevant routing configuration is made available to those proxies.\nFor each metadataFilter in this list, if its filterMatchCriteria is set to MATCH_ANY, at least one of the filterLabels must match the corresponding label provided in the metadata. If its filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels must match with corresponding labels in the provided metadata.\nAn example for using metadataFilters would be: if loadbalancing involves Envoys, they will only receive routing configuration when values in metadataFilters match values supplied in \u003ca href=\"https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/core/base.proto#envoy-api-msg-core-node\" Node metadata of their XDS requests to loadbalancers.", + "id": "MetadataFilter", "properties": { - "instanceState": { - "description": "A filter for the state of the instances in the instance group. Valid options are ALL or RUNNING. If you do not specify this parameter the list includes all instances regardless of their state.", + "filterLabels": { + "description": "The list of label value pairs that must match labels in the provided metadata based on filterMatchCriteria \nThis list must not be empty and can have at the most 64 entries.", + "items": { + "$ref": "MetadataFilterLabelMatch" + }, + "type": "array" + }, + "filterMatchCriteria": { + "description": "Specifies how individual filterLabel matches within the list of filterLabels contribute towards the overall metadataFilter match.\nSupported values are: \n- MATCH_ANY: At least one of the filterLabels must have a matching label in the provided metadata. \n- MATCH_ALL: All filterLabels must have matching labels in the provided metadata.", "enum": [ - "ALL", - "RUNNING" + "MATCH_ALL", + "MATCH_ANY", + "NOT_SET" ], "enumDescriptions": [ + "", "", "" ], @@ -29604,149 +36141,215 @@ }, "type": "object" }, - "InstanceGroupsRemoveInstancesRequest": { - "id": "InstanceGroupsRemoveInstancesRequest", + "MetadataFilterLabelMatch": { + "description": "MetadataFilter label name value pairs that are expected to match corresponding labels presented as metadata to the loadbalancer.", + "id": "MetadataFilterLabelMatch", "properties": { - "instances": { - "description": "The list of instances to remove from the instance group.", - "items": { - "$ref": "InstanceReference" - }, - "type": "array" + "name": { + "description": "Name of metadata label.\nThe name can have a maximum length of 1024 characters and must be at least 1 character long.", + "type": "string" + }, + "value": { + "description": "The value of the label must match the specified value.\nvalue can have a maximum length of 1024 characters.", + "type": "string" } }, "type": "object" }, - "InstanceGroupsScopedList": { - "id": "InstanceGroupsScopedList", + "NamedPort": { + "description": "The named port. For example: .", + "id": "NamedPort", "properties": { - "instanceGroups": { - "description": "[Output Only] The list of instance groups that are contained in this scope.", + "name": { + "description": "The name for this named port. The name must be 1-63 characters long, and comply with RFC1035.", + "type": "string" + }, + "port": { + "description": "The port number, which can be a value between 1 and 65535.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "Network": { + "description": "Represents a Network resource. Read Virtual Private Cloud (VPC) Network Overview for more information. (== resource_for v1.networks ==) (== resource_for beta.networks ==)", + "id": "Network", + "properties": { + "IPv4Range": { + "description": "Deprecated in favor of subnet mode networks. The range of internal addresses that are legal on this network. This range is a CIDR specification, for example: 192.168.0.0/16. Provided by the client when the network is created.", + "pattern": "[0-9]{1,3}(?:\\.[0-9]{1,3}){3}/[0-9]{1,2}", + "type": "string" + }, + "autoCreateSubnetworks": { + "description": "When set to true, the VPC network is created in \"auto\" mode. When set to false, the VPC network is created in \"custom\" mode.\n\nAn auto mode VPC network starts with one subnet per region. Each subnet has a predetermined range as described in Auto mode VPC network IP ranges.", + "type": "boolean" + }, + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, + "gatewayIPv4": { + "description": "[Output Only] The gateway address for default routing out of the network. This value is read only and is selected by GCP.", + "pattern": "[0-9]{1,3}(?:\\.[0-9]{1,3}){3}", + "type": "string" + }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", + "type": "string" + }, + "kind": { + "default": "compute#network", + "description": "[Output Only] Type of the resource. Always compute#network for networks.", + "type": "string" + }, + "name": { + "annotations": { + "required": [ + "compute.networks.insert" + ] + }, + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "peerings": { + "description": "[Output Only] A list of network peerings for the resource.", "items": { - "$ref": "InstanceGroup" + "$ref": "NetworkPeering" }, "type": "array" }, - "warning": { - "description": "[Output Only] An informational warning that replaces the list of instance groups when the list is empty.", - "properties": { - "code": { - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DEPRECATED_TYPE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "EXPERIMENTAL_TYPE_USED", - "EXTERNAL_API_WARNING", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "MISSING_TYPE_DEPENDENCY", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SCHEMA_VALIDATION_IGNORED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNDECLARED_PROPERTIES", - "UNREACHABLE" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "data": { - "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", - "items": { - "properties": { - "key": { - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", - "type": "string" - }, - "value": { - "description": "[Output Only] A warning data value corresponding to the key.", - "type": "string" - } - }, - "type": "object" - }, - "type": "array" - }, - "message": { - "description": "[Output Only] A human-readable description of the warning code.", - "type": "string" - } + "routingConfig": { + "$ref": "NetworkRoutingConfig", + "description": "The network-level routing configuration for this network. Used by Cloud Router to determine what type of network-wide routing behavior to enforce." + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" + }, + "subnetworks": { + "description": "[Output Only] Server-defined fully-qualified URLs for all subnetworks in this VPC network.", + "items": { + "type": "string" }, - "type": "object" + "type": "array" } }, "type": "object" }, - "InstanceGroupsSetNamedPortsRequest": { - "id": "InstanceGroupsSetNamedPortsRequest", + "NetworkEndpoint": { + "description": "The network endpoint.", + "id": "NetworkEndpoint", "properties": { - "fingerprint": { - "description": "The fingerprint of the named ports information for this instance group. Use this optional property to prevent conflicts when multiple users change the named ports settings concurrently. Obtain the fingerprint with the instanceGroups.get method. Then, include the fingerprint in your request to ensure that you do not overwrite changes that were applied from another concurrent request. A request with an incorrect fingerprint will fail with error 412 conditionNotMet.", - "format": "byte", + "instance": { + "description": "The name for a specific VM instance that the IP address belongs to. This is required for network endpoints of type GCE_VM_IP_PORT. The instance must be in the same zone of network endpoint group.\n\nThe name must be 1-63 characters long, and comply with RFC1035.", "type": "string" }, - "namedPorts": { - "description": "The list of named ports to set for this instance group.", - "items": { - "$ref": "NamedPort" - }, - "type": "array" + "ipAddress": { + "description": "Optional IPv4 address of network endpoint. The IP address must belong to a VM in GCE (either the primary IP or as part of an aliased IP range). If the IP address is not specified, then the primary IP address for the VM instance in the network that the network endpoint group belongs to will be used.", + "type": "string" + }, + "port": { + "description": "Optional port number of network endpoint. If not specified and the NetworkEndpointGroup.network_endpoint_type is GCE_IP_PORT, the defaultPort for the network endpoint group will be used.", + "format": "int32", + "type": "integer" } }, "type": "object" }, - "InstanceList": { - "description": "Contains a list of instances.", - "id": "InstanceList", + "NetworkEndpointGroup": { + "description": "Represents a collection of network endpoints.", + "id": "NetworkEndpointGroup", + "properties": { + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "defaultPort": { + "description": "The default port used if the port number is not specified in the network endpoint.", + "format": "int32", + "type": "integer" + }, + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", + "type": "string" + }, + "kind": { + "default": "compute#networkEndpointGroup", + "description": "[Output Only] Type of the resource. Always compute#networkEndpointGroup for network endpoint group.", + "type": "string" + }, + "loadBalancer": { + "$ref": "NetworkEndpointGroupLbNetworkEndpointGroup", + "description": "This field is only valid when the network endpoint group is used for load balancing. [Deprecated] This field is deprecated." + }, + "name": { + "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "type": "string" + }, + "network": { + "description": "The URL of the network to which all network endpoints in the NEG belong. Uses \"default\" project network if unspecified.", + "type": "string" + }, + "networkEndpointType": { + "description": "Type of network endpoints in this network endpoint group. Currently the only supported value is GCE_VM_IP_PORT.", + "enum": [ + "GCE_VM_IP_PORT" + ], + "enumDescriptions": [ + "" + ], + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" + }, + "size": { + "description": "[Output only] Number of network endpoints in the network endpoint group.", + "format": "int32", + "type": "integer" + }, + "subnetwork": { + "description": "Optional URL of the subnetwork to which all network endpoints in the NEG belong.", + "type": "string" + }, + "zone": { + "description": "[Output Only] The URL of the zone where the network endpoint group is located.", + "type": "string" + } + }, + "type": "object" + }, + "NetworkEndpointGroupAggregatedList": { + "id": "NetworkEndpointGroupAggregatedList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of Instance resources.", - "items": { - "$ref": "Instance" + "additionalProperties": { + "$ref": "NetworkEndpointGroupsScopedList", + "description": "The name of the scope that contains this set of network endpoint groups." }, - "type": "array" + "description": "A list of NetworkEndpointGroupsScopedList resources.", + "type": "object" }, "kind": { - "default": "compute#instanceList", - "description": "[Output Only] Type of resource. Always compute#instanceList for lists of Instance resources.", + "default": "compute#networkEndpointGroupAggregatedList", + "description": "[Output Only] The resource type, which is always compute#networkEndpointGroupAggregatedList for aggregated lists of network endpoint groups.", "type": "string" }, "nextPageToken": { @@ -29841,24 +36444,47 @@ }, "type": "object" }, - "InstanceListReferrers": { - "description": "Contains a list of instance referrers.", - "id": "InstanceListReferrers", + "NetworkEndpointGroupLbNetworkEndpointGroup": { + "description": "Load balancing specific fields for network endpoint group.", + "id": "NetworkEndpointGroupLbNetworkEndpointGroup", + "properties": { + "defaultPort": { + "description": "The default port used if the port number is not specified in the network endpoint. [Deprecated] This field is deprecated.", + "format": "int32", + "type": "integer" + }, + "network": { + "description": "The URL of the network to which all network endpoints in the NEG belong. Uses \"default\" project network if unspecified. [Deprecated] This field is deprecated.", + "type": "string" + }, + "subnetwork": { + "description": "Optional URL of the subnetwork to which all network endpoints in the NEG belong. [Deprecated] This field is deprecated.", + "type": "string" + }, + "zone": { + "description": "[Output Only] The URL of the zone where the network endpoint group is located. [Deprecated] This field is deprecated.", + "type": "string" + } + }, + "type": "object" + }, + "NetworkEndpointGroupList": { + "id": "NetworkEndpointGroupList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of Reference resources.", + "description": "A list of NetworkEndpointGroup resources.", "items": { - "$ref": "Reference" + "$ref": "NetworkEndpointGroup" }, "type": "array" }, "kind": { - "default": "compute#instanceListReferrers", - "description": "[Output Only] Type of resource. Always compute#instanceListReferrers for lists of Instance referrers.", + "default": "compute#networkEndpointGroupList", + "description": "[Output Only] The resource type, which is always compute#networkEndpointGroupList for network endpoint group lists.", "type": "string" }, "nextPageToken": { @@ -29953,192 +36579,73 @@ }, "type": "object" }, - "InstanceMoveRequest": { - "id": "InstanceMoveRequest", - "properties": { - "destinationZone": { - "description": "The URL of the destination zone to move the instance. This can be a full or partial URL. For example, the following are all valid URLs to a zone: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone \n- projects/project/zones/zone \n- zones/zone", - "type": "string" - }, - "targetInstance": { - "description": "The URL of the target instance to move. This can be a full or partial URL. For example, the following are all valid URLs to an instance: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/instance \n- projects/project/zones/zone/instances/instance \n- zones/zone/instances/instance", - "type": "string" - } - }, - "type": "object" - }, - "InstanceProperties": { - "description": "", - "id": "InstanceProperties", - "properties": { - "allocationAffinity": { - "$ref": "AllocationAffinity", - "description": "The configuration of desired allocations which this Instance could consume capacity from." - }, - "canIpForward": { - "description": "Enables instances created based on this template to send packets with source IP addresses other than their own and receive packets with destination IP addresses other than their own. If these instances will be used as an IP gateway or it will be set as the next-hop in a Route resource, specify true. If unsure, leave this set to false. See the Enable IP forwarding documentation for more information.", - "type": "boolean" - }, - "description": { - "description": "An optional text description for the instances that are created from this instance template.", - "type": "string" - }, - "disks": { - "description": "An array of disks that are associated with the instances that are created from this template.", - "items": { - "$ref": "AttachedDisk" - }, - "type": "array" - }, - "guestAccelerators": { - "description": "A list of guest accelerator cards' type and count to use for instances created from the instance template.", - "items": { - "$ref": "AcceleratorConfig" - }, - "type": "array" - }, - "labels": { - "additionalProperties": { - "type": "string" - }, - "description": "Labels to apply to instances that are created from this template.", - "type": "object" - }, - "machineType": { - "annotations": { - "required": [ - "compute.instanceTemplates.insert" - ] - }, - "description": "The machine type to use for instances that are created from this template.", - "type": "string" - }, - "metadata": { - "$ref": "Metadata", - "description": "The metadata key/value pairs to assign to instances that are created from this template. These pairs can consist of custom metadata or predefined keys. See Project and instance metadata for more information." - }, - "minCpuPlatform": { - "description": "Minimum cpu/platform to be used by this instance. The instance may be scheduled on the specified or newer cpu/platform. Applicable values are the friendly names of CPU platforms, such as minCpuPlatform: \"Intel Haswell\" or minCpuPlatform: \"Intel Sandy Bridge\". For more information, read Specifying a Minimum CPU Platform.", - "type": "string" - }, - "networkInterfaces": { - "description": "An array of network access configurations for this interface.", - "items": { - "$ref": "NetworkInterface" - }, - "type": "array" - }, - "scheduling": { - "$ref": "Scheduling", - "description": "Specifies the scheduling options for the instances that are created from this template." - }, - "serviceAccounts": { - "description": "A list of service accounts with specified scopes. Access tokens for these service accounts are available to the instances that are created from this template. Use metadata queries to obtain the access tokens for these instances.", - "items": { - "$ref": "ServiceAccount" - }, - "type": "array" - }, - "shieldedVmConfig": { - "$ref": "ShieldedVmConfig", - "description": "Specifies the Shielded VM options for the instances that are created from this template." - }, - "tags": { - "$ref": "Tags", - "description": "A list of tags to apply to the instances that are created from this template. The tags identify valid sources or targets for network firewalls. The setTags method can modify this list of tags. Each tag within the list must comply with RFC1035." - } - }, - "type": "object" - }, - "InstanceReference": { - "id": "InstanceReference", - "properties": { - "instance": { - "description": "The URL for a specific instance.", - "type": "string" - } - }, - "type": "object" - }, - "InstanceTemplate": { - "description": "An Instance Template resource. (== resource_for beta.instanceTemplates ==) (== resource_for v1.instanceTemplates ==)", - "id": "InstanceTemplate", - "properties": { - "creationTimestamp": { - "description": "[Output Only] The creation timestamp for this instance template in RFC3339 text format.", - "type": "string" - }, - "description": { - "description": "An optional description of this resource. Provide this property when you create the resource.", - "type": "string" - }, - "id": { - "description": "[Output Only] A unique identifier for this instance template. The server defines this identifier.", - "format": "uint64", - "type": "string" - }, - "kind": { - "default": "compute#instanceTemplate", - "description": "[Output Only] The resource type, which is always compute#instanceTemplate for instance templates.", - "type": "string" - }, - "name": { - "annotations": { - "required": [ - "compute.instanceTemplates.insert" - ] + "NetworkEndpointGroupsAttachEndpointsRequest": { + "id": "NetworkEndpointGroupsAttachEndpointsRequest", + "properties": { + "networkEndpoints": { + "description": "The list of network endpoints to be attached.", + "items": { + "$ref": "NetworkEndpoint" }, - "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "type": "string" - }, - "properties": { - "$ref": "InstanceProperties", - "description": "The instance properties for this instance template." - }, - "selfLink": { - "description": "[Output Only] The URL for this instance template. The server defines this URL.", - "type": "string" - }, - "sourceInstance": { - "description": "The source instance used to create the template. You can provide this as a partial or full URL to the resource. For example, the following are valid values: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/instance \n- projects/project/zones/zone/instances/instance", + "type": "array" + } + }, + "type": "object" + }, + "NetworkEndpointGroupsDetachEndpointsRequest": { + "id": "NetworkEndpointGroupsDetachEndpointsRequest", + "properties": { + "networkEndpoints": { + "description": "The list of network endpoints to be detached.", + "items": { + "$ref": "NetworkEndpoint" + }, + "type": "array" + } + }, + "type": "object" + }, + "NetworkEndpointGroupsListEndpointsRequest": { + "id": "NetworkEndpointGroupsListEndpointsRequest", + "properties": { + "healthStatus": { + "description": "Optional query parameter for showing the health status of each network endpoint. Valid options are SKIP or SHOW. If you don't specifiy this parameter, the health status of network endpoints will not be provided.", + "enum": [ + "SHOW", + "SKIP" + ], + "enumDescriptions": [ + "", + "" + ], "type": "string" - }, - "sourceInstanceParams": { - "$ref": "SourceInstanceParams", - "description": "The source instance params to use to create this instance template." } }, "type": "object" }, - "InstanceTemplateList": { - "description": "A list of instance templates.", - "id": "InstanceTemplateList", + "NetworkEndpointGroupsListNetworkEndpoints": { + "id": "NetworkEndpointGroupsListNetworkEndpoints", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of InstanceTemplate resources.", + "description": "A list of NetworkEndpointWithHealthStatus resources.", "items": { - "$ref": "InstanceTemplate" + "$ref": "NetworkEndpointWithHealthStatus" }, "type": "array" }, "kind": { - "default": "compute#instanceTemplateList", - "description": "[Output Only] The resource type, which is always compute#instanceTemplatesListResponse for instance template lists.", + "default": "compute#networkEndpointGroupsListNetworkEndpoints", + "description": "[Output Only] The resource type, which is always compute#networkEndpointGroupsListNetworkEndpoints for the list of network endpoints in the specified network endpoint group.", "type": "string" }, "nextPageToken": { "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", "type": "string" }, - "selfLink": { - "description": "[Output Only] Server-defined URL for this resource.", - "type": "string" - }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -30223,76 +36730,18 @@ }, "type": "object" }, - "InstanceWithNamedPorts": { - "id": "InstanceWithNamedPorts", - "properties": { - "instance": { - "description": "[Output Only] The URL of the instance.", - "type": "string" - }, - "namedPorts": { - "description": "[Output Only] The named ports that belong to this instance group.", - "items": { - "$ref": "NamedPort" - }, - "type": "array" - }, - "status": { - "description": "[Output Only] The status of the instance.", - "enum": [ - "PROVISIONING", - "RUNNING", - "STAGING", - "STOPPED", - "STOPPING", - "SUSPENDED", - "SUSPENDING", - "TERMINATED" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "InstancesResumeRequest": { - "id": "InstancesResumeRequest", - "properties": { - "disks": { - "description": "Array of disks associated with this instance that are protected with a customer-supplied encryption key.\n\nIn order to resume the instance, the disk url and its corresponding key must be provided.\n\nIf the disk is not protected with a customer-supplied encryption key it should not be specified.", - "items": { - "$ref": "CustomerEncryptionKeyProtectedDisk" - }, - "type": "array" - }, - "instanceEncryptionKey": { - "$ref": "CustomerEncryptionKey", - "description": "Decrypts data associated with an instance that is protected with a customer-supplied encryption key.\n\nIf the instance you are starting is protected with a customer-supplied encryption key, the correct key must be provided otherwise the instance resume will not succeed." - } - }, - "type": "object" - }, - "InstancesScopedList": { - "id": "InstancesScopedList", + "NetworkEndpointGroupsScopedList": { + "id": "NetworkEndpointGroupsScopedList", "properties": { - "instances": { - "description": "[Output Only] A list of instances contained in this scope.", + "networkEndpointGroups": { + "description": "[Output Only] The list of network endpoint groups that are contained in this scope.", "items": { - "$ref": "Instance" + "$ref": "NetworkEndpointGroup" }, "type": "array" }, "warning": { - "description": "[Output Only] Informational warning which replaces the list of instances when the list is empty.", + "description": "[Output Only] An informational warning that replaces the list of network endpoint groups when the list is empty.", "properties": { "code": { "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", @@ -30375,408 +36824,341 @@ }, "type": "object" }, - "InstancesSetLabelsRequest": { - "id": "InstancesSetLabelsRequest", - "properties": { - "labelFingerprint": { - "description": "Fingerprint of the previous set of labels for this resource, used to prevent conflicts. Provide the latest fingerprint value when making a request to add or change labels.", - "format": "byte", - "type": "string" - }, - "labels": { - "additionalProperties": { - "type": "string" - }, - "type": "object" - } - }, - "type": "object" - }, - "InstancesSetMachineResourcesRequest": { - "id": "InstancesSetMachineResourcesRequest", + "NetworkEndpointWithHealthStatus": { + "id": "NetworkEndpointWithHealthStatus", "properties": { - "guestAccelerators": { - "description": "A list of the type and count of accelerator cards attached to the instance.", + "healths": { + "description": "[Output only] The health status of network endpoint;", "items": { - "$ref": "AcceleratorConfig" + "$ref": "HealthStatusForNetworkEndpoint" }, "type": "array" - } - }, - "type": "object" - }, - "InstancesSetMachineTypeRequest": { - "id": "InstancesSetMachineTypeRequest", - "properties": { - "machineType": { - "description": "Full or partial URL of the machine type resource. See Machine Types for a full list of machine types. For example: zones/us-central1-f/machineTypes/n1-standard-1", - "type": "string" - } - }, - "type": "object" - }, - "InstancesSetMinCpuPlatformRequest": { - "id": "InstancesSetMinCpuPlatformRequest", - "properties": { - "minCpuPlatform": { - "description": "Minimum cpu/platform this instance should be started at.", - "type": "string" - } - }, - "type": "object" - }, - "InstancesSetServiceAccountRequest": { - "id": "InstancesSetServiceAccountRequest", - "properties": { - "email": { - "description": "Email address of the service account.", - "type": "string" }, - "scopes": { - "description": "The list of scopes to be made available for this service account.", - "items": { - "type": "string" - }, - "type": "array" + "networkEndpoint": { + "$ref": "NetworkEndpoint", + "description": "[Output only] The network endpoint;" } }, "type": "object" }, - "InstancesStartWithEncryptionKeyRequest": { - "id": "InstancesStartWithEncryptionKeyRequest", + "NetworkInterface": { + "description": "A network interface resource attached to an instance.", + "id": "NetworkInterface", "properties": { - "disks": { - "description": "Array of disks associated with this instance that are protected with a customer-supplied encryption key.\n\nIn order to start the instance, the disk url and its corresponding key must be provided.\n\nIf the disk is not protected with a customer-supplied encryption key it should not be specified.", + "accessConfigs": { + "description": "An array of configurations for this interface. Currently, only one access config, ONE_TO_ONE_NAT, is supported. If there are no accessConfigs specified, then this instance will have no external internet access.", "items": { - "$ref": "CustomerEncryptionKeyProtectedDisk" + "$ref": "AccessConfig" }, "type": "array" - } - }, - "type": "object" - }, - "Interconnect": { - "description": "Represents an Interconnects resource. The Interconnects resource is a dedicated connection between Google's network and your on-premises network. For more information, see the Dedicated overview page. (== resource_for v1.interconnects ==) (== resource_for beta.interconnects ==)", - "id": "Interconnect", - "properties": { - "adminEnabled": { - "description": "Administrative status of the interconnect. When this is set to true, the Interconnect is functional and can carry traffic. When set to false, no packets can be carried over the interconnect and no BGP routes are exchanged over it. By default, the status is set to true.", - "type": "boolean" }, - "circuitInfos": { - "description": "[Output Only] A list of CircuitInfo objects, that describe the individual circuits in this LAG.", + "aliasIpRanges": { + "description": "An array of alias IP ranges for this network interface. Can only be specified for network interfaces on subnet-mode networks.", "items": { - "$ref": "InterconnectCircuitInfo" + "$ref": "AliasIpRange" }, "type": "array" }, - "creationTimestamp": { - "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "fingerprint": { + "description": "Fingerprint hash of contents stored in this network interface. This field will be ignored when inserting an Instance or adding a NetworkInterface. An up-to-date fingerprint must be provided in order to update the NetworkInterface, otherwise the request will fail with error 412 conditionNotMet.", + "format": "byte", "type": "string" }, - "customerName": { - "description": "Customer name, to put in the Letter of Authorization as the party authorized to request a crossconnect.", + "kind": { + "default": "compute#networkInterface", + "description": "[Output Only] Type of the resource. Always compute#networkInterface for network interfaces.", "type": "string" }, - "description": { - "description": "An optional description of this resource. Provide this property when you create the resource.", + "name": { + "description": "[Output Only] The name of the network interface, generated by the server. For network devices, these are eth0, eth1, etc.", "type": "string" }, - "expectedOutages": { - "description": "[Output Only] A list of outages expected for this Interconnect.", - "items": { - "$ref": "InterconnectOutageNotification" - }, - "type": "array" - }, - "googleIpAddress": { - "description": "[Output Only] IP address configured on the Google side of the Interconnect link. This can be used only for ping tests.", + "network": { + "description": "URL of the network resource for this instance. When creating an instance, if neither the network nor the subnetwork is specified, the default network global/networks/default is used; if the network is not specified but the subnetwork is specified, the network is inferred.\n\nThis field is optional when creating a firewall rule. If not specified when creating a firewall rule, the default network global/networks/default is used.\n\nIf you specify this property, you can specify the network as a full or partial URL. For example, the following are all valid URLs: \n- https://www.googleapis.com/compute/v1/projects/project/global/networks/network \n- projects/project/global/networks/network \n- global/networks/default", "type": "string" }, - "googleReferenceId": { - "description": "[Output Only] Google reference ID; to be used when raising support tickets with Google or otherwise to debug backend connectivity issues.", + "networkIP": { + "description": "An IPv4 internal network address to assign to the instance for this network interface. If not specified by the user, an unused internal IP is assigned by the system.", "type": "string" }, + "subnetwork": { + "description": "The URL of the Subnetwork resource for this instance. If the network resource is in legacy mode, do not provide this property. If the network is in auto subnet mode, providing the subnetwork is optional. If the network is in custom subnet mode, then this field should be specified. If you specify this property, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: \n- https://www.googleapis.com/compute/v1/projects/project/regions/region/subnetworks/subnetwork \n- regions/region/subnetworks/subnetwork", + "type": "string" + } + }, + "type": "object" + }, + "NetworkList": { + "description": "Contains a list of networks.", + "id": "NetworkList", + "properties": { "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64", + "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, - "interconnectAttachments": { - "description": "[Output Only] A list of the URLs of all InterconnectAttachments configured to use this Interconnect.", + "items": { + "description": "A list of Network resources.", "items": { - "type": "string" + "$ref": "Network" }, "type": "array" }, - "interconnectType": { - "description": "Type of interconnect. Note that \"IT_PRIVATE\" has been deprecated in favor of \"DEDICATED\"", - "enum": [ - "DEDICATED", - "IT_PRIVATE", - "PARTNER" - ], - "enumDescriptions": [ - "", - "", - "" - ], - "type": "string" - }, "kind": { - "default": "compute#interconnect", - "description": "[Output Only] Type of the resource. Always compute#interconnect for interconnects.", - "type": "string" - }, - "labelFingerprint": { - "description": "A fingerprint for the labels being applied to this Interconnect, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet.\n\nTo see the latest fingerprint, make a get() request to retrieve an Interconnect.", - "format": "byte", - "type": "string" - }, - "labels": { - "additionalProperties": { - "type": "string" - }, - "description": "Labels to apply to this Interconnect resource. These can be later modified by the setLabels method. Each label key/value must comply with RFC1035. Label values may be empty.", - "type": "object" - }, - "linkType": { - "description": "Type of link requested. This field indicates speed of each of the links in the bundle, not the entire bundle. Only 10G per link is allowed for a dedicated interconnect. Options: Ethernet_10G_LR", - "enum": [ - "LINK_TYPE_ETHERNET_10G_LR" - ], - "enumDescriptions": [ - "" - ], - "type": "string" - }, - "location": { - "description": "URL of the InterconnectLocation object that represents where this connection is to be provisioned.", - "type": "string" - }, - "name": { - "annotations": { - "required": [ - "compute.interconnects.insert" - ] - }, - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "type": "string" - }, - "nocContactEmail": { - "description": "Email address to contact the customer NOC for operations and maintenance notifications regarding this Interconnect. If specified, this will be used for notifications in addition to all other forms described, such as Stackdriver logs alerting and Cloud Notifications.", - "type": "string" - }, - "operationalStatus": { - "description": "[Output Only] The current status of whether or not this Interconnect is functional.", - "enum": [ - "OS_ACTIVE", - "OS_UNPROVISIONED" - ], - "enumDescriptions": [ - "", - "" - ], + "default": "compute#networkList", + "description": "[Output Only] Type of resource. Always compute#networkList for lists of networks.", "type": "string" }, - "peerIpAddress": { - "description": "[Output Only] IP address configured on the customer side of the Interconnect link. The customer should configure this IP address during turnup when prompted by Google NOC. This can be used only for ping tests.", + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", "type": "string" }, - "provisionedLinkCount": { - "description": "[Output Only] Number of links actually provisioned in this interconnect.", - "format": "int32", - "type": "integer" - }, - "requestedLinkCount": { - "description": "Target number of physical links in the link bundle, as requested by the customer.", - "format": "int32", - "type": "integer" - }, "selfLink": { - "description": "[Output Only] Server-defined URL for the resource.", + "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, - "state": { - "description": "[Output Only] The current state of whether or not this Interconnect is functional.", - "enum": [ - "ACTIVE", - "UNPROVISIONED" - ], - "enumDescriptions": [ - "", - "" - ], - "type": "string" + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" } }, "type": "object" }, - "InterconnectAttachment": { - "description": "Represents an InterconnectAttachment (VLAN attachment) resource. For more information, see Creating VLAN Attachments. (== resource_for beta.interconnectAttachments ==) (== resource_for v1.interconnectAttachments ==)", - "id": "InterconnectAttachment", + "NetworkPeering": { + "description": "A network peering attached to a network resource. The message includes the peering name, peer network, peering state, and a flag indicating whether Google Compute Engine should automatically create routes for the peering.", + "id": "NetworkPeering", "properties": { - "adminEnabled": { - "description": "Determines whether this Attachment will carry packets. Not present for PARTNER_PROVIDER.", + "autoCreateRoutes": { + "description": "This field will be deprecated soon. Prefer using exchange_subnet_routes instead. Indicates whether full mesh connectivity is created and managed automatically. When it is set to true, Google Compute Engine will automatically create and manage the routes between two networks when the state is ACTIVE. Otherwise, user needs to create routes manually to route packets to peer network.", "type": "boolean" }, - "bandwidth": { - "description": "Provisioned bandwidth capacity for the interconnectAttachment. Can be set by the partner to update the customer's provisioned bandwidth. Output only for PARTNER type, mutable for PARTNER_PROVIDER and DEDICATED.", - "enum": [ - "BPS_100M", - "BPS_10G", - "BPS_1G", - "BPS_200M", - "BPS_2G", - "BPS_300M", - "BPS_400M", - "BPS_500M", - "BPS_50M", - "BPS_5G" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "candidateSubnets": { - "description": "Up to 16 candidate prefixes that can be used to restrict the allocation of cloudRouterIpAddress and customerRouterIpAddress for this attachment. All prefixes must be within link-local address space (169.254.0.0/16) and must be /29 or shorter (/28, /27, etc). Google will attempt to select an unused /29 from the supplied candidate prefix(es). The request will fail if all possible /29s are in use on Google?s edge. If not supplied, Google will randomly select an unused /29 from all of link-local space.", - "items": { - "type": "string" - }, - "type": "array" - }, - "cloudRouterIpAddress": { - "description": "[Output Only] IPv4 address + prefix length to be configured on Cloud Router Interface for this interconnect attachment.", - "type": "string" + "exchangeSubnetRoutes": { + "description": "Whether full mesh connectivity is created and managed automatically. When it is set to true, Google Compute Engine will automatically create and manage the routes between two networks when the peering state is ACTIVE. Otherwise, user needs to create routes manually to route packets to peer network.", + "type": "boolean" }, - "creationTimestamp": { - "description": "[Output Only] Creation timestamp in RFC3339 text format.", - "type": "string" + "exportCustomRoutes": { + "description": "Whether to export the custom routes to peer network.", + "type": "boolean" }, - "customerRouterIpAddress": { - "description": "[Output Only] IPv4 address + prefix length to be configured on the customer router subinterface for this interconnect attachment.", + "importCustomRoutes": { + "description": "Whether to import the custom routes from peer network.", + "type": "boolean" + }, + "name": { + "description": "Name of this peering. Provided by the client when the peering is created. The name must comply with RFC1035. Specifically, the name must be 1-63 characters long and match regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all the following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "type": "string" }, - "description": { - "description": "An optional description of this resource.", + "network": { + "description": "The URL of the peer network. It can be either full URL or partial URL. The peer network may belong to a different project. If the partial URL does not contain project, it is assumed that the peer network is in the same project as the current network.", "type": "string" }, - "edgeAvailabilityDomain": { - "description": "Desired availability domain for the attachment. Only available for type PARTNER, at creation time. For improved reliability, customers should configure a pair of attachments with one per availability domain. The selected availability domain will be provided to the Partner via the pairing key so that the provisioned circuit will lie in the specified domain. If not specified, the value will default to AVAILABILITY_DOMAIN_ANY.", + "state": { + "description": "[Output Only] State for the peering.", "enum": [ - "AVAILABILITY_DOMAIN_1", - "AVAILABILITY_DOMAIN_2", - "AVAILABILITY_DOMAIN_ANY" + "ACTIVE", + "INACTIVE" ], "enumDescriptions": [ - "", "", "" ], "type": "string" }, - "googleReferenceId": { - "description": "[Output Only] Google reference ID, to be used when raising support tickets with Google or otherwise to debug backend connectivity issues.", + "stateDetails": { + "description": "[Output Only] Details about the current state of the peering.", "type": "string" - }, - "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64", + } + }, + "type": "object" + }, + "NetworkRoutingConfig": { + "description": "A routing configuration attached to a network resource. The message includes the list of routers associated with the network, and a flag indicating the type of routing behavior to enforce network-wide.", + "id": "NetworkRoutingConfig", + "properties": { + "routingMode": { + "description": "The network-wide routing mode to use. If set to REGIONAL, this network's cloud routers will only advertise routes with subnets of this network in the same region as the router. If set to GLOBAL, this network's cloud routers will advertise routes with all subnets of this network, across regions.", + "enum": [ + "GLOBAL", + "REGIONAL" + ], + "enumDescriptions": [ + "", + "" + ], "type": "string" + } + }, + "type": "object" + }, + "NetworksAddPeeringRequest": { + "id": "NetworksAddPeeringRequest", + "properties": { + "autoCreateRoutes": { + "description": "This field will be deprecated soon. Prefer using exchange_subnet_routes in network_peering instead. Whether Google Compute Engine manages the routes automatically.", + "type": "boolean" }, - "interconnect": { - "description": "URL of the underlying Interconnect object that this attachment's traffic will traverse through.", + "name": { + "annotations": { + "required": [ + "compute.networks.addPeering" + ] + }, + "description": "Name of the peering, which should conform to RFC1035.", "type": "string" }, - "kind": { - "default": "compute#interconnectAttachment", - "description": "[Output Only] Type of the resource. Always compute#interconnectAttachment for interconnect attachments.", - "type": "string" + "networkPeering": { + "$ref": "NetworkPeering", + "description": "Network peering parameters. In order to specify route policies for peering using import/export custom routes, you will have to fill all peering related parameters (name, peer network, exchange_subnet_routes) in network_peeringfield. Corresponding fields in NetworksAddPeeringRequest will be deprecated soon." }, - "labelFingerprint": { - "description": "A fingerprint for the labels being applied to this InterconnectAttachment, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet.\n\nTo see the latest fingerprint, make a get() request to retrieve an InterconnectAttachment.", - "format": "byte", + "peerNetwork": { + "description": "URL of the peer network. It can be either full URL or partial URL. The peer network may belong to a different project. If the partial URL does not contain project, it is assumed that the peer network is in the same project as the current network.", "type": "string" - }, - "labels": { - "additionalProperties": { - "type": "string" - }, - "description": "Labels to apply to this InterconnectAttachment resource. These can be later modified by the setLabels method. Each label key/value must comply with RFC1035. Label values may be empty.", - "type": "object" - }, + } + }, + "type": "object" + }, + "NetworksRemovePeeringRequest": { + "id": "NetworksRemovePeeringRequest", + "properties": { "name": { - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "description": "Name of the peering, which should conform to RFC1035.", "type": "string" - }, - "operationalStatus": { - "description": "[Output Only] The current status of whether or not this interconnect attachment is functional.", - "enum": [ - "OS_ACTIVE", - "OS_UNPROVISIONED" - ], - "enumDescriptions": [ - "", - "" - ], + } + }, + "type": "object" + }, + "NetworksUpdatePeeringRequest": { + "id": "NetworksUpdatePeeringRequest", + "properties": { + "networkPeering": { + "$ref": "NetworkPeering" + } + }, + "type": "object" + }, + "NodeGroup": { + "description": "A NodeGroup resource. To create a node group, you must first create a node templates. To learn more about node groups and sole-tenant nodes, read the Sole-tenant nodes documentation. (== resource_for beta.nodeGroups ==) (== resource_for v1.nodeGroups ==)", + "id": "NodeGroup", + "properties": { + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" }, - "pairingKey": { - "description": "[Output only for type PARTNER. Input only for PARTNER_PROVIDER. Not present for DEDICATED]. The opaque identifier of an PARTNER attachment used to initiate provisioning with a selected partner. Of the form \"XXXXX/region/domain\"", + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", "type": "string" }, - "partnerAsn": { - "description": "Optional BGP ASN for the router that should be supplied by a layer 3 Partner if they configured BGP on behalf of the customer. Output only for PARTNER type, input only for PARTNER_PROVIDER, not available for DEDICATED.", - "format": "int64", + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", "type": "string" }, - "partnerMetadata": { - "$ref": "InterconnectAttachmentPartnerMetadata", - "description": "Informational metadata about Partner attachments from Partners to display to customers. Output only for for PARTNER type, mutable for PARTNER_PROVIDER, not available for DEDICATED." - }, - "privateInterconnectInfo": { - "$ref": "InterconnectAttachmentPrivateInfo", - "description": "[Output Only] Information specific to an InterconnectAttachment. This property is populated if the interconnect that this is attached to is of type DEDICATED." + "kind": { + "default": "compute#nodeGroup", + "description": "[Output Only] The type of the resource. Always compute#nodeGroup for node group.", + "type": "string" }, - "region": { - "description": "[Output Only] URL of the region where the regional interconnect attachment resides. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", + "name": { + "description": "The name of the resource, provided by the client when initially creating the resource. The resource name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "type": "string" }, - "router": { - "description": "URL of the Cloud Router to be used for dynamic routing. This router must be in the same region as this InterconnectAttachment. The InterconnectAttachment will automatically connect the Interconnect to the network \u0026 region within which the Cloud Router is configured.", + "nodeTemplate": { + "description": "The URL of the node template to which this node group belongs.", "type": "string" }, "selfLink": { "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, - "state": { - "description": "[Output Only] The current state of this attachment's functionality.", + "size": { + "description": "[Output Only] The total number of nodes in the node group.", + "format": "int32", + "type": "integer" + }, + "status": { "enum": [ - "ACTIVE", - "DEFUNCT", - "PARTNER_REQUEST_RECEIVED", - "PENDING_CUSTOMER", - "PENDING_PARTNER", - "STATE_UNSPECIFIED", - "UNPROVISIONED" + "CREATING", + "DELETING", + "INVALID", + "READY" ], "enumDescriptions": [ - "", - "", - "", "", "", "", @@ -30784,29 +37166,15 @@ ], "type": "string" }, - "type": { - "enum": [ - "DEDICATED", - "PARTNER", - "PARTNER_PROVIDER" - ], - "enumDescriptions": [ - "", - "", - "" - ], + "zone": { + "description": "[Output Only] The name of the zone where the node group resides, such as us-central1-a.", "type": "string" - }, - "vlanTag8021q": { - "description": "The IEEE 802.1Q VLAN tag for this attachment, in the range 2-4094. Only specified at creation time.", - "format": "int32", - "type": "integer" } }, "type": "object" }, - "InterconnectAttachmentAggregatedList": { - "id": "InterconnectAttachmentAggregatedList", + "NodeGroupAggregatedList": { + "id": "NodeGroupAggregatedList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", @@ -30814,15 +37182,15 @@ }, "items": { "additionalProperties": { - "$ref": "InterconnectAttachmentsScopedList", - "description": "Name of the scope containing this set of interconnect attachments." + "$ref": "NodeGroupsScopedList", + "description": "[Output Only] Name of the scope containing this set of node groups." }, - "description": "A list of InterconnectAttachmentsScopedList resources.", + "description": "A list of NodeGroupsScopedList resources.", "type": "object" }, "kind": { - "default": "compute#interconnectAttachmentAggregatedList", - "description": "[Output Only] Type of resource. Always compute#interconnectAttachmentAggregatedList for aggregated lists of interconnect attachments.", + "default": "compute#nodeGroupAggregatedList", + "description": "[Output Only] Type of resource.Always compute#nodeGroupAggregatedList for aggregated lists of node groups.", "type": "string" }, "nextPageToken": { @@ -30917,24 +37285,24 @@ }, "type": "object" }, - "InterconnectAttachmentList": { - "description": "Response to the list request, and contains a list of interconnect attachments.", - "id": "InterconnectAttachmentList", + "NodeGroupList": { + "description": "Contains a list of nodeGroups.", + "id": "NodeGroupList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of InterconnectAttachment resources.", + "description": "A list of NodeGroup resources.", "items": { - "$ref": "InterconnectAttachment" + "$ref": "NodeGroup" }, "type": "array" }, "kind": { - "default": "compute#interconnectAttachmentList", - "description": "[Output Only] Type of resource. Always compute#interconnectAttachmentList for lists of interconnect attachments.", + "default": "compute#nodeGroupList", + "description": "[Output Only] Type of resource.Always compute#nodeGroupList for lists of node groups.", "type": "string" }, "nextPageToken": { @@ -31029,49 +37397,100 @@ }, "type": "object" }, - "InterconnectAttachmentPartnerMetadata": { - "description": "Informational metadata about Partner attachments from Partners to display to customers. These fields are propagated from PARTNER_PROVIDER attachments to their corresponding PARTNER attachments.", - "id": "InterconnectAttachmentPartnerMetadata", + "NodeGroupNode": { + "id": "NodeGroupNode", "properties": { - "interconnectName": { - "description": "Plain text name of the Interconnect this attachment is connected to, as displayed in the Partner?s portal. For instance \"Chicago 1\". This value may be validated to match approved Partner values.", + "instances": { + "description": "Instances scheduled on this node.", + "items": { + "type": "string" + }, + "type": "array" + }, + "name": { + "description": "The name of the node.", "type": "string" }, - "partnerName": { - "description": "Plain text name of the Partner providing this attachment. This value may be validated to match approved Partner values.", + "nodeType": { + "description": "The type of this node.", "type": "string" }, - "portalUrl": { - "description": "URL of the Partner?s portal for this Attachment. Partners may customise this to be a deep-link to the specific resource on the Partner portal. This value may be validated to match approved Partner values.", + "serverBinding": { + "$ref": "ServerBinding", + "description": "Binding properties for the physical server." + }, + "status": { + "enum": [ + "CREATING", + "DELETING", + "INVALID", + "READY", + "REPAIRING" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "" + ], "type": "string" } }, "type": "object" }, - "InterconnectAttachmentPrivateInfo": { - "description": "Information for an interconnect attachment when this belongs to an interconnect of type DEDICATED.", - "id": "InterconnectAttachmentPrivateInfo", + "NodeGroupsAddNodesRequest": { + "id": "NodeGroupsAddNodesRequest", "properties": { - "tag8021q": { - "description": "[Output Only] 802.1q encapsulation tag to be used for traffic between Google and the customer, going to and from this network and region.", - "format": "uint32", + "additionalNodeCount": { + "description": "Count of additional nodes to be added to the node group.", + "format": "int32", "type": "integer" } }, "type": "object" }, - "InterconnectAttachmentsScopedList": { - "id": "InterconnectAttachmentsScopedList", + "NodeGroupsDeleteNodesRequest": { + "id": "NodeGroupsDeleteNodesRequest", "properties": { - "interconnectAttachments": { - "description": "A list of interconnect attachments contained in this scope.", + "nodes": { "items": { - "$ref": "InterconnectAttachment" + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "NodeGroupsListNodes": { + "id": "NodeGroupsListNodes", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "description": "A list of Node resources.", + "items": { + "$ref": "NodeGroupNode" }, "type": "array" }, + "kind": { + "default": "compute#nodeGroupsListNodes", + "description": "[Output Only] The resource type, which is always compute.nodeGroupsListNodes for the list of nodes in the specified node group.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, "warning": { - "description": "Informational warning which replaces the list of addresses when the list is empty.", + "description": "[Output Only] Informational warning message.", "properties": { "code": { "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", @@ -31154,166 +37573,204 @@ }, "type": "object" }, - "InterconnectCircuitInfo": { - "description": "Describes a single physical circuit between the Customer and Google. CircuitInfo objects are created by Google, so all fields are output only. Next id: 4", - "id": "InterconnectCircuitInfo", - "properties": { - "customerDemarcId": { - "description": "Customer-side demarc ID for this circuit.", - "type": "string" - }, - "googleCircuitId": { - "description": "Google-assigned unique ID for this circuit. Assigned at circuit turn-up.", - "type": "string" - }, - "googleDemarcId": { - "description": "Google-side demarc ID for this circuit. Assigned at circuit turn-up and provided by Google to the customer in the LOA.", - "type": "string" - } - }, - "type": "object" - }, - "InterconnectDiagnostics": { - "description": "Diagnostics information about interconnect, contains detailed and current technical information about Google?s side of the connection.", - "id": "InterconnectDiagnostics", + "NodeGroupsScopedList": { + "id": "NodeGroupsScopedList", "properties": { - "arpCaches": { - "description": "A list of InterconnectDiagnostics.ARPEntry objects, describing individual neighbors currently seen by the Google router in the ARP cache for the Interconnect. This will be empty when the Interconnect is not bundled.", + "nodeGroups": { + "description": "[Output Only] A list of node groups contained in this scope.", "items": { - "$ref": "InterconnectDiagnosticsARPEntry" + "$ref": "NodeGroup" }, "type": "array" }, - "links": { - "description": "A list of InterconnectDiagnostics.LinkStatus objects, describing the status for each link on the Interconnect.", - "items": { - "$ref": "InterconnectDiagnosticsLinkStatus" + "warning": { + "description": "[Output Only] An informational warning that appears when the nodeGroup list is empty.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } }, - "type": "array" - }, - "macAddress": { - "description": "The MAC address of the Interconnect's bundle interface.", - "type": "string" + "type": "object" } }, "type": "object" }, - "InterconnectDiagnosticsARPEntry": { - "description": "Describing the ARP neighbor entries seen on this link", - "id": "InterconnectDiagnosticsARPEntry", + "NodeGroupsSetNodeTemplateRequest": { + "id": "NodeGroupsSetNodeTemplateRequest", "properties": { - "ipAddress": { - "description": "The IP address of this ARP neighbor.", - "type": "string" - }, - "macAddress": { - "description": "The MAC address of this ARP neighbor.", + "nodeTemplate": { + "description": "Full or partial URL of the node template resource to be updated for this node group.", "type": "string" } }, "type": "object" }, - "InterconnectDiagnosticsLinkLACPStatus": { - "id": "InterconnectDiagnosticsLinkLACPStatus", + "NodeTemplate": { + "description": "A Node Template resource. To learn more about node templates and sole-tenant nodes, read the Sole-tenant nodes documentation. (== resource_for beta.nodeTemplates ==) (== resource_for v1.nodeTemplates ==)", + "id": "NodeTemplate", "properties": { - "googleSystemId": { - "description": "System ID of the port on Google?s side of the LACP exchange.", + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" }, - "neighborSystemId": { - "description": "System ID of the port on the neighbor?s side of the LACP exchange.", + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", "type": "string" }, - "state": { - "enum": [ - "ACTIVE", - "DETACHED" - ], - "enumDescriptions": [ - "", - "" - ], + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", "type": "string" - } - }, - "type": "object" - }, - "InterconnectDiagnosticsLinkOpticalPower": { - "id": "InterconnectDiagnosticsLinkOpticalPower", - "properties": { - "state": { - "enum": [ - "HIGH_ALARM", - "HIGH_WARNING", - "LOW_ALARM", - "LOW_WARNING", - "OK" + }, + "kind": { + "default": "compute#nodeTemplate", + "description": "[Output Only] The type of the resource. Always compute#nodeTemplate for node templates.", + "type": "string" + }, + "name": { + "description": "The name of the resource, provided by the client when initially creating the resource. The resource name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "type": "string" + }, + "nodeAffinityLabels": { + "additionalProperties": { + "type": "string" + }, + "description": "Labels to use for node affinity, which will be used in instance scheduling.", + "type": "object" + }, + "nodeType": { + "description": "The node type to use for nodes group that are created from this template.", + "type": "string" + }, + "nodeTypeFlexibility": { + "$ref": "NodeTemplateNodeTypeFlexibility", + "description": "The flexible properties of the desired node type. Node groups that use this node template will create nodes of a type that matches these properties.\n\nThis field is mutually exclusive with the node_type property; you can only define one or the other, but not both." + }, + "region": { + "description": "[Output Only] The name of the region where the node template resides, such as us-central1.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" + }, + "serverBinding": { + "$ref": "ServerBinding", + "description": "Binding properties for the physical server." + }, + "status": { + "description": "[Output Only] The status of the node template. One of the following values: CREATING, READY, and DELETING.", + "enum": [ + "CREATING", + "DELETING", + "INVALID", + "READY" ], "enumDescriptions": [ "", "", "", - "", "" ], "type": "string" }, - "value": { - "description": "Value of the current optical power, read in dBm. Take a known good optical value, give it a 10% margin and trigger warnings relative to that value. In general, a -7dBm warning and a -11dBm alarm are good optical value estimates for most links.", - "format": "float", - "type": "number" - } - }, - "type": "object" - }, - "InterconnectDiagnosticsLinkStatus": { - "id": "InterconnectDiagnosticsLinkStatus", - "properties": { - "arpCaches": { - "description": "A list of InterconnectDiagnostics.ARPEntry objects, describing the ARP neighbor entries seen on this link. This will be empty if the link is bundled", - "items": { - "$ref": "InterconnectDiagnosticsARPEntry" - }, - "type": "array" - }, - "circuitId": { - "description": "The unique ID for this link assigned during turn up by Google.", - "type": "string" - }, - "googleDemarc": { - "description": "The Demarc address assigned by Google and provided in the LoA.", + "statusMessage": { + "description": "[Output Only] An optional, human-readable explanation of the status.", "type": "string" - }, - "lacpStatus": { - "$ref": "InterconnectDiagnosticsLinkLACPStatus" - }, - "receivingOpticalPower": { - "$ref": "InterconnectDiagnosticsLinkOpticalPower" - }, - "transmittingOpticalPower": { - "$ref": "InterconnectDiagnosticsLinkOpticalPower" } }, "type": "object" }, - "InterconnectList": { - "description": "Response to the list request, and contains a list of interconnects.", - "id": "InterconnectList", + "NodeTemplateAggregatedList": { + "id": "NodeTemplateAggregatedList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of Interconnect resources.", - "items": { - "$ref": "Interconnect" + "additionalProperties": { + "$ref": "NodeTemplatesScopedList", + "description": "[Output Only] Name of the scope containing this set of node templates." }, - "type": "array" + "description": "A list of NodeTemplatesScopedList resources.", + "type": "object" }, "kind": { - "default": "compute#interconnectList", - "description": "[Output Only] Type of resource. Always compute#interconnectList for lists of interconnects.", + "default": "compute#nodeTemplateAggregatedList", + "description": "[Output Only] Type of resource.Always compute#nodeTemplateAggregatedList for aggregated lists of node templates.", "type": "string" }, "nextPageToken": { @@ -31408,128 +37865,24 @@ }, "type": "object" }, - "InterconnectLocation": { - "description": "Represents an InterconnectLocations resource. The InterconnectLocations resource describes the locations where you can connect to Google's networks. For more information, see Colocation Facilities.", - "id": "InterconnectLocation", - "properties": { - "address": { - "description": "[Output Only] The postal address of the Point of Presence, each line in the address is separated by a newline character.", - "type": "string" - }, - "availabilityZone": { - "description": "[Output Only] Availability zone for this InterconnectLocation. Within a metropolitan area (metro), maintenance will not be simultaneously scheduled in more than one availability zone. Example: \"zone1\" or \"zone2\".", - "type": "string" - }, - "city": { - "description": "[Output Only] Metropolitan area designator that indicates which city an interconnect is located. For example: \"Chicago, IL\", \"Amsterdam, Netherlands\".", - "type": "string" - }, - "continent": { - "description": "[Output Only] Continent for this location.", - "enum": [ - "AFRICA", - "ASIA_PAC", - "C_AFRICA", - "C_ASIA_PAC", - "C_EUROPE", - "C_NORTH_AMERICA", - "C_SOUTH_AMERICA", - "EUROPE", - "NORTH_AMERICA", - "SOUTH_AMERICA" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "creationTimestamp": { - "description": "[Output Only] Creation timestamp in RFC3339 text format.", - "type": "string" - }, - "description": { - "description": "[Output Only] An optional description of the resource.", - "type": "string" - }, - "facilityProvider": { - "description": "[Output Only] The name of the provider for this facility (e.g., EQUINIX).", - "type": "string" - }, - "facilityProviderFacilityId": { - "description": "[Output Only] A provider-assigned Identifier for this facility (e.g., Ashburn-DC1).", - "type": "string" - }, - "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64", - "type": "string" - }, - "kind": { - "default": "compute#interconnectLocation", - "description": "[Output Only] Type of the resource. Always compute#interconnectLocation for interconnect locations.", - "type": "string" - }, - "name": { - "description": "[Output Only] Name of the resource.", - "type": "string" - }, - "peeringdbFacilityId": { - "description": "[Output Only] The peeringdb identifier for this facility (corresponding with a netfac type in peeringdb).", - "type": "string" - }, - "regionInfos": { - "description": "[Output Only] A list of InterconnectLocation.RegionInfo objects, that describe parameters pertaining to the relation between this InterconnectLocation and various Google Cloud regions.", - "items": { - "$ref": "InterconnectLocationRegionInfo" - }, - "type": "array" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for the resource.", - "type": "string" - }, - "status": { - "description": "[Output Only] The status of this InterconnectLocation. If the status is AVAILABLE, new Interconnects may be provisioned in this InterconnectLocation. Otherwise, no new Interconnects may be provisioned.", - "enum": [ - "AVAILABLE", - "CLOSED" - ], - "enumDescriptions": [ - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "InterconnectLocationList": { - "description": "Response to the list request, and contains a list of interconnect locations.", - "id": "InterconnectLocationList", + "NodeTemplateList": { + "description": "Contains a list of node templates.", + "id": "NodeTemplateList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of InterconnectLocation resources.", + "description": "A list of NodeTemplate resources.", "items": { - "$ref": "InterconnectLocation" + "$ref": "NodeTemplate" }, "type": "array" }, "kind": { - "default": "compute#interconnectLocationList", - "description": "[Output Only] Type of resource. Always compute#interconnectLocationList for lists of interconnect locations.", + "default": "compute#nodeTemplateList", + "description": "[Output Only] Type of resource.Always compute#nodeTemplateList for lists of node templates.", "type": "string" }, "nextPageToken": { @@ -31624,300 +37977,33 @@ }, "type": "object" }, - "InterconnectLocationRegionInfo": { - "description": "Information about any potential InterconnectAttachments between an Interconnect at a specific InterconnectLocation, and a specific Cloud Region.", - "id": "InterconnectLocationRegionInfo", - "properties": { - "expectedRttMs": { - "description": "Expected round-trip time in milliseconds, from this InterconnectLocation to a VM in this region.", - "format": "int64", - "type": "string" - }, - "locationPresence": { - "description": "Identifies the network presence of this location.", - "enum": [ - "GLOBAL", - "LOCAL_REGION", - "LP_GLOBAL", - "LP_LOCAL_REGION" - ], - "enumDescriptions": [ - "", - "", - "", - "" - ], - "type": "string" - }, - "region": { - "description": "URL for the region of this location.", - "type": "string" - } - }, - "type": "object" - }, - "InterconnectOutageNotification": { - "description": "Description of a planned outage on this Interconnect. Next id: 9", - "id": "InterconnectOutageNotification", - "properties": { - "affectedCircuits": { - "description": "If issue_type is IT_PARTIAL_OUTAGE, a list of the Google-side circuit IDs that will be affected.", - "items": { - "type": "string" - }, - "type": "array" - }, - "description": { - "description": "A description about the purpose of the outage.", - "type": "string" - }, - "endTime": { - "description": "Scheduled end time for the outage (milliseconds since Unix epoch).", - "format": "int64", - "type": "string" - }, - "issueType": { - "description": "Form this outage is expected to take. Note that the \"IT_\" versions of this enum have been deprecated in favor of the unprefixed values.", - "enum": [ - "IT_OUTAGE", - "IT_PARTIAL_OUTAGE", - "OUTAGE", - "PARTIAL_OUTAGE" - ], - "enumDescriptions": [ - "", - "", - "", - "" - ], - "type": "string" - }, - "name": { - "description": "Unique identifier for this outage notification.", - "type": "string" - }, - "source": { - "description": "The party that generated this notification. Note that \"NSRC_GOOGLE\" has been deprecated in favor of \"GOOGLE\"", - "enum": [ - "GOOGLE", - "NSRC_GOOGLE" - ], - "enumDescriptions": [ - "", - "" - ], - "type": "string" - }, - "startTime": { - "description": "Scheduled start time for the outage (milliseconds since Unix epoch).", - "format": "int64", - "type": "string" - }, - "state": { - "description": "State of this notification. Note that the \"NS_\" versions of this enum have been deprecated in favor of the unprefixed values.", - "enum": [ - "ACTIVE", - "CANCELLED", - "NS_ACTIVE", - "NS_CANCELED" - ], - "enumDescriptions": [ - "", - "", - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "InterconnectsGetDiagnosticsResponse": { - "description": "Response for the InterconnectsGetDiagnosticsRequest.", - "id": "InterconnectsGetDiagnosticsResponse", - "properties": { - "result": { - "$ref": "InterconnectDiagnostics" - } - }, - "type": "object" - }, - "License": { - "description": "A license resource.", - "id": "License", - "properties": { - "chargesUseFee": { - "description": "[Output Only] Deprecated. This field no longer reflects whether a license charges a usage fee.", - "type": "boolean" - }, - "creationTimestamp": { - "description": "[Output Only] Creation timestamp in RFC3339 text format.", - "type": "string" - }, - "description": { - "description": "An optional textual description of the resource; provided by the client when the resource is created.", - "type": "string" - }, - "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64", - "type": "string" - }, - "kind": { - "default": "compute#license", - "description": "[Output Only] Type of resource. Always compute#license for licenses.", - "type": "string" - }, - "licenseCode": { - "description": "[Output Only] The unique code used to attach this license to images, snapshots, and disks.", - "format": "uint64", - "type": "string" - }, - "name": { - "annotations": { - "required": [ - "compute.images.insert" - ] - }, - "description": "Name of the resource. The name must be 1-63 characters long and comply with RFC1035.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "type": "string" - }, - "resourceRequirements": { - "$ref": "LicenseResourceRequirements" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for the resource.", - "type": "string" - }, - "transferable": { - "description": "If false, licenses will not be copied from the source resource when creating an image from a disk, disk from snapshot, or snapshot from disk.", - "type": "boolean" - } - }, - "type": "object" - }, - "LicenseCode": { - "id": "LicenseCode", + "NodeTemplateNodeTypeFlexibility": { + "id": "NodeTemplateNodeTypeFlexibility", "properties": { - "creationTimestamp": { - "description": "[Output Only] Creation timestamp in RFC3339 text format.", - "type": "string" - }, - "description": { - "description": "[Output Only] Description of this License Code.", - "type": "string" - }, - "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64", - "type": "string" - }, - "kind": { - "default": "compute#licenseCode", - "description": "[Output Only] Type of resource. Always compute#licenseCode for licenses.", - "type": "string" - }, - "licenseAlias": { - "description": "[Output Only] URL and description aliases of Licenses with the same License Code.", - "items": { - "$ref": "LicenseCodeLicenseAlias" - }, - "type": "array" - }, - "name": { - "annotations": { - "required": [ - "compute.licenses.insert" - ] - }, - "description": "[Output Only] Name of the resource. The name is 1-20 characters long and must be a valid 64 bit integer.", - "pattern": "[0-9]{0,20}?", - "type": "string" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for the resource.", - "type": "string" - }, - "state": { - "description": "[Output Only] Current state of this License Code.", - "enum": [ - "DISABLED", - "ENABLED", - "RESTRICTED", - "STATE_UNSPECIFIED", - "TERMINATED" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "" - ], + "cpus": { "type": "string" }, - "transferable": { - "description": "[Output Only] If true, the license will remain attached when creating images or snapshots from disks. Otherwise, the license is not transferred.", - "type": "boolean" - } - }, - "type": "object" - }, - "LicenseCodeLicenseAlias": { - "id": "LicenseCodeLicenseAlias", - "properties": { - "description": { - "description": "[Output Only] Description of this License Code.", + "localSsd": { "type": "string" }, - "selfLink": { - "description": "[Output Only] URL of license corresponding to this License Code.", + "memory": { "type": "string" } }, "type": "object" }, - "LicenseResourceRequirements": { - "id": "LicenseResourceRequirements", - "properties": { - "minGuestCpuCount": { - "description": "Minimum number of guest cpus required to use the Instance. Enforced at Instance creation and Instance start.", - "format": "int32", - "type": "integer" - }, - "minMemoryMb": { - "description": "Minimum memory required to use the Instance. Enforced at Instance creation and Instance start.", - "format": "int32", - "type": "integer" - } - }, - "type": "object" - }, - "LicensesListResponse": { - "id": "LicensesListResponse", + "NodeTemplatesScopedList": { + "id": "NodeTemplatesScopedList", "properties": { - "id": { - "description": "[Output Only] Unique identifier for the resource; defined by the server.", - "type": "string" - }, - "items": { - "description": "A list of License resources.", + "nodeTemplates": { + "description": "[Output Only] A list of node templates contained in this scope.", "items": { - "$ref": "License" + "$ref": "NodeTemplate" }, "type": "array" }, - "nextPageToken": { - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", - "type": "string" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for this resource.", - "type": "string" - }, "warning": { - "description": "[Output Only] Informational warning message.", + "description": "[Output Only] An informational warning that appears when the node templates list is empty.", "properties": { "code": { "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", @@ -31987,115 +38073,41 @@ } }, "type": "object" - }, - "type": "array" - }, - "message": { - "description": "[Output Only] A human-readable description of the warning code.", - "type": "string" - } - }, - "type": "object" - } - }, - "type": "object" - }, - "LogConfig": { - "description": "Specifies what kind of log the caller must write", - "id": "LogConfig", - "properties": { - "cloudAudit": { - "$ref": "LogConfigCloudAuditOptions", - "description": "Cloud audit options." - }, - "counter": { - "$ref": "LogConfigCounterOptions", - "description": "Counter options." - }, - "dataAccess": { - "$ref": "LogConfigDataAccessOptions", - "description": "Data access options." - } - }, - "type": "object" - }, - "LogConfigCloudAuditOptions": { - "description": "Write a Cloud Audit log", - "id": "LogConfigCloudAuditOptions", - "properties": { - "authorizationLoggingOptions": { - "$ref": "AuthorizationLoggingOptions", - "description": "Information used by the Cloud Audit Logging pipeline." - }, - "logName": { - "description": "The log_name to populate in the Cloud Audit Record.", - "enum": [ - "ADMIN_ACTIVITY", - "DATA_ACCESS", - "UNSPECIFIED_LOG_NAME" - ], - "enumDescriptions": [ - "", - "", - "" - ], - "type": "string" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" } }, "type": "object" }, - "LogConfigCounterOptions": { - "description": "Increment a streamz counter with the specified metric and field names.\n\nMetric names should start with a '/', generally be lowercase-only, and end in \"_count\". Field names should not contain an initial slash. The actual exported metric names will have \"/iam/policy\" prepended.\n\nField names correspond to IAM request parameters and field values are their respective values.\n\nSupported field names: - \"authority\", which is \"[token]\" if IAMContext.token is present, otherwise the value of IAMContext.authority_selector if present, and otherwise a representation of IAMContext.principal; or - \"iam_principal\", a representation of IAMContext.principal even if a token or authority selector is present; or - \"\" (empty string), resulting in a counter with no fields.\n\nExamples: counter { metric: \"/debug_access_count\" field: \"iam_principal\" } ==\u003e increment counter /iam/policy/backend_debug_access_count {iam_principal=[value of IAMContext.principal]}\n\nAt this time we do not support multiple field names (though this may be supported in the future).", - "id": "LogConfigCounterOptions", + "NodeType": { + "description": "A Node Type resource.", + "id": "NodeType", "properties": { - "field": { - "description": "The field value to attribute.", + "cpuPlatform": { + "description": "[Output Only] The CPU platform used by this node type.", "type": "string" }, - "metric": { - "description": "The metric to update.", - "type": "string" - } - }, - "type": "object" - }, - "LogConfigDataAccessOptions": { - "description": "Write a Data Access (Gin) log", - "id": "LogConfigDataAccessOptions", - "properties": { - "logMode": { - "description": "Whether Gin logging should happen in a fail-closed manner at the caller. This is relevant only in the LocalIAM implementation, for now.\n\nNOTE: Logging to Gin in a fail-closed manner is currently unsupported while work is being done to satisfy the requirements of go/345. Currently, setting LOG_FAIL_CLOSED mode will have no effect, but still exists because there is active work being done to support it (b/115874152).", - "enum": [ - "LOG_FAIL_CLOSED", - "LOG_MODE_UNSPECIFIED" - ], - "enumDescriptions": [ - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "MachineType": { - "description": "A Machine Type resource. (== resource_for v1.machineTypes ==) (== resource_for beta.machineTypes ==)", - "id": "MachineType", - "properties": { "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" }, "deprecated": { "$ref": "DeprecationStatus", - "description": "[Output Only] The deprecation status associated with this machine type." + "description": "[Output Only] The deprecation status associated with this node type." }, "description": { "description": "[Output Only] An optional textual description of the resource.", "type": "string" }, "guestCpus": { - "description": "[Output Only] The number of virtual CPUs that are available to the instance.", + "description": "[Output Only] The number of virtual CPUs that are available to the node type.", "format": "int32", "type": "integer" }, @@ -32104,27 +38116,18 @@ "format": "uint64", "type": "string" }, - "isSharedCpu": { - "description": "[Output Only] Whether this machine type has a shared CPU. See Shared-core machine types for more information.", - "type": "boolean" - }, "kind": { - "default": "compute#machineType", - "description": "[Output Only] The type of the resource. Always compute#machineType for machine types.", + "default": "compute#nodeType", + "description": "[Output Only] The type of the resource. Always compute#nodeType for node types.", "type": "string" }, - "maximumPersistentDisks": { - "description": "[Output Only] Maximum persistent disks allowed.", + "localSsdGb": { + "description": "[Output Only] Local SSD available to the node type, defined in GB.", "format": "int32", "type": "integer" }, - "maximumPersistentDisksSizeGb": { - "description": "[Output Only] Maximum total persistent disks size (GB) allowed.", - "format": "int64", - "type": "string" - }, "memoryMb": { - "description": "[Output Only] The amount of physical memory available to the instance, defined in MB.", + "description": "[Output Only] The amount of physical memory available to the node type, defined in MB.", "format": "int32", "type": "integer" }, @@ -32138,14 +38141,14 @@ "type": "string" }, "zone": { - "description": "[Output Only] The name of the zone where the machine type resides, such as us-central1-a.", + "description": "[Output Only] The name of the zone where the node type resides, such as us-central1-a.", "type": "string" } }, "type": "object" }, - "MachineTypeAggregatedList": { - "id": "MachineTypeAggregatedList", + "NodeTypeAggregatedList": { + "id": "NodeTypeAggregatedList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", @@ -32153,15 +38156,15 @@ }, "items": { "additionalProperties": { - "$ref": "MachineTypesScopedList", - "description": "[Output Only] Name of the scope containing this set of machine types." + "$ref": "NodeTypesScopedList", + "description": "[Output Only] Name of the scope containing this set of node types." }, - "description": "A list of MachineTypesScopedList resources.", + "description": "A list of NodeTypesScopedList resources.", "type": "object" }, "kind": { - "default": "compute#machineTypeAggregatedList", - "description": "[Output Only] Type of resource. Always compute#machineTypeAggregatedList for aggregated lists of machine types.", + "default": "compute#nodeTypeAggregatedList", + "description": "[Output Only] Type of resource.Always compute#nodeTypeAggregatedList for aggregated lists of node types.", "type": "string" }, "nextPageToken": { @@ -32256,24 +38259,24 @@ }, "type": "object" }, - "MachineTypeList": { - "description": "Contains a list of machine types.", - "id": "MachineTypeList", + "NodeTypeList": { + "description": "Contains a list of node types.", + "id": "NodeTypeList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of MachineType resources.", + "description": "A list of NodeType resources.", "items": { - "$ref": "MachineType" + "$ref": "NodeType" }, "type": "array" }, "kind": { - "default": "compute#machineTypeList", - "description": "[Output Only] Type of resource. Always compute#machineTypeList for lists of machine types.", + "default": "compute#nodeTypeList", + "description": "[Output Only] Type of resource.Always compute#nodeTypeList for lists of node types.", "type": "string" }, "nextPageToken": { @@ -32368,18 +38371,18 @@ }, "type": "object" }, - "MachineTypesScopedList": { - "id": "MachineTypesScopedList", + "NodeTypesScopedList": { + "id": "NodeTypesScopedList", "properties": { - "machineTypes": { - "description": "[Output Only] A list of machine types contained in this scope.", + "nodeTypes": { + "description": "[Output Only] A list of node types contained in this scope.", "items": { - "$ref": "MachineType" + "$ref": "NodeType" }, "type": "array" }, "warning": { - "description": "[Output Only] An informational warning that appears when the machine types list is empty.", + "description": "[Output Only] An informational warning that appears when the node types list is empty.", "properties": { "code": { "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", @@ -32462,85 +38465,28 @@ }, "type": "object" }, - "ManagedInstance": { - "description": "A Managed Instance resource.", - "id": "ManagedInstance", + "Operation": { + "description": "An Operation resource, used to manage asynchronous API requests. (== resource_for v1.globalOperations ==) (== resource_for beta.globalOperations ==) (== resource_for v1.regionOperations ==) (== resource_for beta.regionOperations ==) (== resource_for v1.zoneOperations ==) (== resource_for beta.zoneOperations ==)", + "id": "Operation", "properties": { - "currentAction": { - "description": "[Output Only] The current action that the managed instance group has scheduled for the instance. Possible values: \n- NONE The instance is running, and the managed instance group does not have any scheduled actions for this instance. \n- CREATING The managed instance group is creating this instance. If the group fails to create this instance, it will try again until it is successful. \n- CREATING_WITHOUT_RETRIES The managed instance group is attempting to create this instance only once. If the group fails to create this instance, it does not try again and the group's targetSize value is decreased instead. \n- RECREATING The managed instance group is recreating this instance. \n- DELETING The managed instance group is permanently deleting this instance. \n- ABANDONING The managed instance group is abandoning this instance. The instance will be removed from the instance group and from any target pools that are associated with this group. \n- RESTARTING The managed instance group is restarting the instance. \n- REFRESHING The managed instance group is applying configuration changes to the instance without stopping it. For example, the group can update the target pool list for an instance without stopping that instance. \n- VERIFYING The managed instance group has created the instance and it is in the process of being verified.", - "enum": [ - "ABANDONING", - "CREATING", - "CREATING_WITHOUT_RETRIES", - "DELETING", - "NONE", - "RECREATING", - "REFRESHING", - "RESTARTING", - "VERIFYING" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "" - ], + "clientOperationId": { + "description": "[Output Only] The value of `requestId` if you provided it in the request. Not present otherwise.", "type": "string" }, - "id": { - "description": "[Output only] The unique identifier for this resource. This field is empty when instance does not exist.", - "format": "uint64", + "creationTimestamp": { + "description": "[Deprecated] This field is deprecated.", "type": "string" }, - "instance": { - "description": "[Output Only] The URL of the instance. The URL can exist even if the instance has not yet been created.", + "description": { + "description": "[Output Only] A textual description of the operation, which is set when the operation is created.", "type": "string" }, - "instanceStatus": { - "description": "[Output Only] The status of the instance. This field is empty when the instance does not exist.", - "enum": [ - "PROVISIONING", - "RUNNING", - "STAGING", - "STOPPED", - "STOPPING", - "SUSPENDED", - "SUSPENDING", - "TERMINATED" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "" - ], + "endTime": { + "description": "[Output Only] The time that this operation was completed. This value is in RFC3339 text format.", "type": "string" }, - "lastAttempt": { - "$ref": "ManagedInstanceLastAttempt", - "description": "[Output Only] Information about the last attempt to create or delete the instance." - }, - "version": { - "$ref": "ManagedInstanceVersion", - "description": "[Output Only] Intended version of this instance." - } - }, - "type": "object" - }, - "ManagedInstanceLastAttempt": { - "id": "ManagedInstanceLastAttempt", - "properties": { - "errors": { - "description": "[Output Only] Encountered errors during the last attempt to create or delete the instance.", + "error": { + "description": "[Output Only] If errors are generated during processing of the operation, this field will be populated.", "properties": { "errors": { "description": "[Output Only] The array of errors encountered while processing this operation.", @@ -32565,265 +38511,195 @@ } }, "type": "object" - } - }, - "type": "object" - }, - "ManagedInstanceVersion": { - "id": "ManagedInstanceVersion", - "properties": { - "instanceTemplate": { - "description": "[Output Only] The intended template of the instance. This field is empty when current_action is one of { DELETING, ABANDONING }.", - "type": "string" - }, - "name": { - "description": "[Output Only] Name of the version.", - "type": "string" - } - }, - "type": "object" - }, - "Metadata": { - "description": "A metadata key/value entry.", - "id": "Metadata", - "properties": { - "fingerprint": { - "description": "Specifies a fingerprint for this request, which is essentially a hash of the metadata's contents and used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update metadata. You must always provide an up-to-date fingerprint hash in order to update or change metadata, otherwise the request will fail with error 412 conditionNotMet.\n\nTo see the latest fingerprint, make a get() request to retrieve the resource.", - "format": "byte", - "type": "string" - }, - "items": { - "description": "Array of key/value pairs. The total size of all keys and values must be less than 512 KB.", - "items": { - "properties": { - "key": { - "annotations": { - "required": [ - "compute.instances.insert", - "compute.projects.setCommonInstanceMetadata" - ] - }, - "description": "Key for the metadata entry. Keys must conform to the following regexp: [a-zA-Z0-9-_]+, and be less than 128 bytes in length. This is reflected as part of a URL in the metadata server. Additionally, to avoid ambiguity, keys must not conflict with any other metadata keys for the project.", - "pattern": "[a-zA-Z0-9-_]{1,128}", - "type": "string" - }, - "value": { - "annotations": { - "required": [ - "compute.instances.insert", - "compute.projects.setCommonInstanceMetadata" - ] - }, - "description": "Value for the metadata entry. These are free-form strings, and only have meaning as interpreted by the image running in the instance. The only restriction placed on values is that their size must be less than or equal to 262144 bytes (256 KiB).", - "type": "string" - } - }, - "type": "object" - }, - "type": "array" }, - "kind": { - "default": "compute#metadata", - "description": "[Output Only] Type of the resource. Always compute#metadata for metadata.", - "type": "string" - } - }, - "type": "object" - }, - "NamedPort": { - "description": "The named port. For example: .", - "id": "NamedPort", - "properties": { - "name": { - "description": "The name for this named port. The name must be 1-63 characters long, and comply with RFC1035.", + "httpErrorMessage": { + "description": "[Output Only] If the operation fails, this field contains the HTTP error message that was returned, such as NOT FOUND.", "type": "string" }, - "port": { - "description": "The port number, which can be a value between 1 and 65535.", + "httpErrorStatusCode": { + "description": "[Output Only] If the operation fails, this field contains the HTTP error status code that was returned. For example, a 404 means the resource was not found.", "format": "int32", "type": "integer" - } - }, - "type": "object" - }, - "Network": { - "description": "Represents a Network resource. Read Virtual Private Cloud (VPC) Network Overview for more information. (== resource_for v1.networks ==) (== resource_for beta.networks ==)", - "id": "Network", - "properties": { - "IPv4Range": { - "description": "The range of internal addresses that are legal on this network. This range is a CIDR specification, for example: 192.168.0.0/16. Provided by the client when the network is created.", - "pattern": "[0-9]{1,3}(?:\\.[0-9]{1,3}){3}/[0-9]{1,2}", - "type": "string" - }, - "autoCreateSubnetworks": { - "description": "When set to true, the VPC network is created in \"auto\" mode. When set to false, the VPC network is created in \"custom\" mode.\n\nAn auto mode VPC network starts with one subnet per region. Each subnet has a predetermined range as described in Auto mode VPC network IP ranges.", - "type": "boolean" - }, - "creationTimestamp": { - "description": "[Output Only] Creation timestamp in RFC3339 text format.", - "type": "string" - }, - "description": { - "description": "An optional description of this resource. Provide this property when you create the resource.", - "type": "string" - }, - "gatewayIPv4": { - "description": "[Output Only] The gateway address for default routing out of the network. This value is read only and is selected by GCP.", - "pattern": "[0-9]{1,3}(?:\\.[0-9]{1,3}){3}", - "type": "string" }, "id": { "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", "format": "uint64", "type": "string" }, - "kind": { - "default": "compute#network", - "description": "[Output Only] Type of the resource. Always compute#network for networks.", - "type": "string" - }, - "name": { - "annotations": { - "required": [ - "compute.networks.insert" - ] - }, - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "type": "string" - }, - "peerings": { - "description": "[Output Only] A list of network peerings for the resource.", - "items": { - "$ref": "NetworkPeering" - }, - "type": "array" - }, - "routingConfig": { - "$ref": "NetworkRoutingConfig", - "description": "The network-level routing configuration for this network. Used by Cloud Router to determine what type of network-wide routing behavior to enforce." - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for the resource.", + "insertTime": { + "description": "[Output Only] The time that this operation was requested. This value is in RFC3339 text format.", "type": "string" }, - "subnetworks": { - "description": "[Output Only] Server-defined fully-qualified URLs for all subnetworks in this VPC network.", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, - "NetworkEndpoint": { - "description": "The network endpoint.", - "id": "NetworkEndpoint", - "properties": { - "instance": { - "description": "The name for a specific VM instance that the IP address belongs to. This is required for network endpoints of type GCE_VM_IP_PORT. The instance must be in the same zone of network endpoint group.\n\nThe name must be 1-63 characters long, and comply with RFC1035.", + "kind": { + "default": "compute#operation", + "description": "[Output Only] Type of the resource. Always compute#operation for Operation resources.", "type": "string" }, - "ipAddress": { - "description": "Optional IPv4 address of network endpoint. The IP address must belong to a VM in GCE (either the primary IP or as part of an aliased IP range). If the IP address is not specified, then the primary IP address for the VM instance in the network that the network endpoint group belongs to will be used.", + "name": { + "description": "[Output Only] Name of the resource.", "type": "string" }, - "port": { - "description": "Optional port number of network endpoint. If not specified and the NetworkEndpointGroup.network_endpoint_type is GCE_IP_PORT, the defaultPort for the network endpoint group will be used.", - "format": "int32", - "type": "integer" - } - }, - "type": "object" - }, - "NetworkEndpointGroup": { - "description": "Represents a collection of network endpoints.", - "id": "NetworkEndpointGroup", - "properties": { - "creationTimestamp": { - "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "operationType": { + "description": "[Output Only] The type of operation, such as insert, update, or delete, and so on.", "type": "string" }, - "defaultPort": { - "description": "The default port used if the port number is not specified in the network endpoint.", + "progress": { + "description": "[Output Only] An optional progress indicator that ranges from 0 to 100. There is no requirement that this be linear or support any granularity of operations. This should not be used to guess when the operation will be complete. This number should monotonically increase as the operation progresses.", "format": "int32", "type": "integer" }, - "description": { - "description": "An optional description of this resource. Provide this property when you create the resource.", - "type": "string" - }, - "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64", - "type": "string" - }, - "kind": { - "default": "compute#networkEndpointGroup", - "description": "[Output Only] Type of the resource. Always compute#networkEndpointGroup for network endpoint group.", + "region": { + "description": "[Output Only] The URL of the region where the operation resides. Only available when performing regional operations. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", "type": "string" }, - "loadBalancer": { - "$ref": "NetworkEndpointGroupLbNetworkEndpointGroup", - "description": "This field is only valid when the network endpoint group is used for load balancing. [Deprecated] This field is deprecated." - }, - "name": { - "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, - "network": { - "description": "The URL of the network to which all network endpoints in the NEG belong. Uses \"default\" project network if unspecified.", + "startTime": { + "description": "[Output Only] The time that this operation was started by the server. This value is in RFC3339 text format.", "type": "string" }, - "networkEndpointType": { - "description": "Type of network endpoints in this network endpoint group. Currently the only supported value is GCE_VM_IP_PORT.", + "status": { + "description": "[Output Only] The status of the operation, which can be one of the following: PENDING, RUNNING, or DONE.", "enum": [ - "GCE_VM_IP_PORT" + "DONE", + "PENDING", + "RUNNING" ], "enumDescriptions": [ + "", + "", "" ], "type": "string" }, - "selfLink": { - "description": "[Output Only] Server-defined URL for the resource.", + "statusMessage": { + "description": "[Output Only] An optional textual description of the current status of the operation.", "type": "string" }, - "size": { - "description": "[Output only] Number of network endpoints in the network endpoint group.", - "format": "int32", - "type": "integer" + "targetId": { + "description": "[Output Only] The unique target ID, which identifies a specific incarnation of the target resource.", + "format": "uint64", + "type": "string" }, - "subnetwork": { - "description": "Optional URL of the subnetwork to which all network endpoints in the NEG belong.", + "targetLink": { + "description": "[Output Only] The URL of the resource that the operation modifies. For operations related to creating a snapshot, this points to the persistent disk that the snapshot was created from.", + "type": "string" + }, + "user": { + "description": "[Output Only] User who requested the operation, for example: user@example.com.", "type": "string" }, + "warnings": { + "description": "[Output Only] If warning messages are generated during processing of the operation, this field will be populated.", + "items": { + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, "zone": { - "description": "[Output Only] The URL of the zone where the network endpoint group is located.", + "description": "[Output Only] The URL of the zone where the operation resides. Only available when performing per-zone operations. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", "type": "string" } }, "type": "object" }, - "NetworkEndpointGroupAggregatedList": { - "id": "NetworkEndpointGroupAggregatedList", + "OperationAggregatedList": { + "id": "OperationAggregatedList", "properties": { "id": { - "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", "type": "string" }, "items": { "additionalProperties": { - "$ref": "NetworkEndpointGroupsScopedList", - "description": "The name of the scope that contains this set of network endpoint groups." + "$ref": "OperationsScopedList", + "description": "[Output Only] Name of the scope containing this set of operations." }, - "description": "A list of NetworkEndpointGroupsScopedList resources.", + "description": "[Output Only] A map of scoped operation lists.", "type": "object" }, "kind": { - "default": "compute#networkEndpointGroupAggregatedList", - "description": "[Output Only] The resource type, which is always compute#networkEndpointGroupAggregatedList for aggregated lists of network endpoint groups.", + "default": "compute#operationAggregatedList", + "description": "[Output Only] Type of resource. Always compute#operationAggregatedList for aggregated lists of operations.", "type": "string" }, "nextPageToken": { @@ -32918,47 +38794,24 @@ }, "type": "object" }, - "NetworkEndpointGroupLbNetworkEndpointGroup": { - "description": "Load balancing specific fields for network endpoint group.", - "id": "NetworkEndpointGroupLbNetworkEndpointGroup", - "properties": { - "defaultPort": { - "description": "The default port used if the port number is not specified in the network endpoint. [Deprecated] This field is deprecated.", - "format": "int32", - "type": "integer" - }, - "network": { - "description": "The URL of the network to which all network endpoints in the NEG belong. Uses \"default\" project network if unspecified. [Deprecated] This field is deprecated.", - "type": "string" - }, - "subnetwork": { - "description": "Optional URL of the subnetwork to which all network endpoints in the NEG belong. [Deprecated] This field is deprecated.", - "type": "string" - }, - "zone": { - "description": "[Output Only] The URL of the zone where the network endpoint group is located. [Deprecated] This field is deprecated.", - "type": "string" - } - }, - "type": "object" - }, - "NetworkEndpointGroupList": { - "id": "NetworkEndpointGroupList", + "OperationList": { + "description": "Contains a list of Operation resources.", + "id": "OperationList", "properties": { "id": { - "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", "type": "string" }, "items": { - "description": "A list of NetworkEndpointGroup resources.", + "description": "[Output Only] A list of Operation resources.", "items": { - "$ref": "NetworkEndpointGroup" + "$ref": "Operation" }, "type": "array" }, "kind": { - "default": "compute#networkEndpointGroupList", - "description": "[Output Only] The resource type, which is always compute#networkEndpointGroupList for network endpoint group lists.", + "default": "compute#operationList", + "description": "[Output Only] Type of resource. Always compute#operations for Operations resource.", "type": "string" }, "nextPageToken": { @@ -33053,75 +38906,18 @@ }, "type": "object" }, - "NetworkEndpointGroupsAttachEndpointsRequest": { - "id": "NetworkEndpointGroupsAttachEndpointsRequest", - "properties": { - "networkEndpoints": { - "description": "The list of network endpoints to be attached.", - "items": { - "$ref": "NetworkEndpoint" - }, - "type": "array" - } - }, - "type": "object" - }, - "NetworkEndpointGroupsDetachEndpointsRequest": { - "id": "NetworkEndpointGroupsDetachEndpointsRequest", - "properties": { - "networkEndpoints": { - "description": "The list of network endpoints to be detached.", - "items": { - "$ref": "NetworkEndpoint" - }, - "type": "array" - } - }, - "type": "object" - }, - "NetworkEndpointGroupsListEndpointsRequest": { - "id": "NetworkEndpointGroupsListEndpointsRequest", - "properties": { - "healthStatus": { - "description": "Optional query parameter for showing the health status of each network endpoint. Valid options are SKIP or SHOW. If you don't specifiy this parameter, the health status of network endpoints will not be provided.", - "enum": [ - "SHOW", - "SKIP" - ], - "enumDescriptions": [ - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "NetworkEndpointGroupsListNetworkEndpoints": { - "id": "NetworkEndpointGroupsListNetworkEndpoints", + "OperationsScopedList": { + "id": "OperationsScopedList", "properties": { - "id": { - "description": "[Output Only] Unique identifier for the resource; defined by the server.", - "type": "string" - }, - "items": { - "description": "A list of NetworkEndpointWithHealthStatus resources.", + "operations": { + "description": "[Output Only] A list of operations contained in this scope.", "items": { - "$ref": "NetworkEndpointWithHealthStatus" + "$ref": "Operation" }, "type": "array" }, - "kind": { - "default": "compute#networkEndpointGroupsListNetworkEndpoints", - "description": "[Output Only] The resource type, which is always compute#networkEndpointGroupsListNetworkEndpoints for the list of network endpoints in the specified network endpoint group.", - "type": "string" - }, - "nextPageToken": { - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", - "type": "string" - }, "warning": { - "description": "[Output Only] Informational warning message.", + "description": "[Output Only] Informational warning which replaces the list of operations when the list is empty.", "properties": { "code": { "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", @@ -33204,332 +39000,335 @@ }, "type": "object" }, - "NetworkEndpointGroupsScopedList": { - "id": "NetworkEndpointGroupsScopedList", + "OutlierDetection": { + "description": "Settings controlling eviction of unhealthy hosts from the load balancing pool.", + "id": "OutlierDetection", "properties": { - "networkEndpointGroups": { - "description": "[Output Only] The list of network endpoint groups that are contained in this scope.", + "baseEjectionTime": { + "$ref": "Duration", + "description": "The base time that a host is ejected for. The real time is equal to the base time multiplied by the number of times the host has been ejected. Defaults to 30000ms or 30s." + }, + "consecutiveErrors": { + "description": "Number of errors before a host is ejected from the connection pool. When the backend host is accessed over HTTP, a 5xx return code qualifies as an error. Defaults to 5.", + "format": "int32", + "type": "integer" + }, + "consecutiveGatewayFailure": { + "description": "The number of consecutive gateway failures (502, 503, 504 status or connection errors that are mapped to one of those status codes) before a consecutive gateway failure ejection occurs. Defaults to 5.", + "format": "int32", + "type": "integer" + }, + "enforcingConsecutiveErrors": { + "description": "The percentage chance that a host will be actually ejected when an outlier status is detected through consecutive 5xx. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100.", + "format": "int32", + "type": "integer" + }, + "enforcingConsecutiveGatewayFailure": { + "description": "The percentage chance that a host will be actually ejected when an outlier status is detected through consecutive gateway failures. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 0.", + "format": "int32", + "type": "integer" + }, + "enforcingSuccessRate": { + "description": "The percentage chance that a host will be actually ejected when an outlier status is detected through success rate statistics. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100.", + "format": "int32", + "type": "integer" + }, + "interval": { + "$ref": "Duration", + "description": "Time interval between ejection sweep analysis. This can result in both new ejections as well as hosts being returned to service. Defaults to 10 seconds." + }, + "maxEjectionPercent": { + "description": "Maximum percentage of hosts in the load balancing pool for the backend service that can be ejected. Defaults to 10%.", + "format": "int32", + "type": "integer" + }, + "successRateMinimumHosts": { + "description": "The number of hosts in a cluster that must have enough request volume to detect success rate outliers. If the number of hosts is less than this setting, outlier detection via success rate statistics is not performed for any host in the cluster. Defaults to 5.", + "format": "int32", + "type": "integer" + }, + "successRateRequestVolume": { + "description": "The minimum number of total requests that must be collected in one interval (as defined by the interval duration above) to include this host in success rate based outlier detection. If the volume is lower than this setting, outlier detection via success rate statistics is not performed for that host. Defaults to 100.", + "format": "int32", + "type": "integer" + }, + "successRateStdevFactor": { + "description": "This factor is used to determine the ejection threshold for success rate outlier ejection. The ejection threshold is the difference between the mean success rate, and the product of this factor and the standard deviation of the mean success rate: mean - (stdev * success_rate_stdev_factor). This factor is divided by a thousand to get a double. That is, if the desired factor is 1.9, the runtime value should be 1900. Defaults to 1900.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "PathMatcher": { + "description": "A matcher for the path portion of the URL. The BackendService from the longest-matched rule will serve the URL. If no rule was matched, the default service will be used.", + "id": "PathMatcher", + "properties": { + "defaultRouteAction": { + "$ref": "HttpRouteAction", + "description": "defaultRouteAction takes effect when none of the pathRules or routeRules match. The load balancer performs advanced routing actions like URL rewrites, header transformations, etc. prior to forwarding the request to the selected backend. If defaultRouteAction specifies any weightedBackendServices, defaultService must not be set. Conversely if defaultService is set, defaultRouteAction cannot contain any weightedBackendServices.\nOnly one of defaultRouteAction or defaultUrlRedirect must be set." + }, + "defaultService": { + "description": "The full or partial URL to the BackendService resource. This will be used if none of the pathRules or routeRules defined by this PathMatcher are matched. For example, the following are all valid URLs to a BackendService resource: \n- https://www.googleapis.com/compute/v1/projects/project/global/backendServices/backendService \n- compute/v1/projects/project/global/backendServices/backendService \n- global/backendServices/backendService If defaultRouteAction is additionally specified, advanced routing actions like URL Rewrites, etc. take effect prior to sending the request to the backend. However, if defaultService is specified, defaultRouteAction cannot contain any weightedBackendServices. Conversely, if defaultRouteAction specifies any weightedBackendServices, defaultService must not be specified.\nOnly one of defaultService, defaultUrlRedirect or defaultRouteAction.weightedBackendService must be set.\nAuthorization requires one or more of the following Google IAM permissions on the specified resource default_service: \n- compute.backendBuckets.use \n- compute.backendServices.use", + "type": "string" + }, + "defaultUrlRedirect": { + "$ref": "HttpRedirectAction", + "description": "When when none of the specified pathRules or routeRules match, the request is redirected to a URL specified by defaultUrlRedirect.\nIf defaultUrlRedirect is specified, defaultService or defaultRouteAction must not be set." + }, + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, + "headerAction": { + "$ref": "HttpHeaderAction", + "description": "Specifies changes to request and response headers that need to take effect for the selected backendService.\nHeaderAction specified here are applied after the matching HttpRouteRule HeaderAction and before the HeaderAction in the UrlMap" + }, + "name": { + "description": "The name to which this PathMatcher is referred by the HostRule.", + "type": "string" + }, + "pathRules": { + "description": "The list of path rules. Use this list instead of routeRules when routing based on simple path matching is all that's required. The order by which path rules are specified does not matter. Matches are always done on the longest-path-first basis.\nFor example: a pathRule with a path /a/b/c/* will match before /a/b/* irrespective of the order in which those paths appear in this list.\nOnly one of pathRules or routeRules must be set.", "items": { - "$ref": "NetworkEndpointGroup" + "$ref": "PathRule" }, "type": "array" }, - "warning": { - "description": "[Output Only] An informational warning that replaces the list of network endpoint groups when the list is empty.", - "properties": { - "code": { - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DEPRECATED_TYPE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "EXPERIMENTAL_TYPE_USED", - "EXTERNAL_API_WARNING", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "MISSING_TYPE_DEPENDENCY", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SCHEMA_VALIDATION_IGNORED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNDECLARED_PROPERTIES", - "UNREACHABLE" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "data": { - "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", - "items": { - "properties": { - "key": { - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", - "type": "string" - }, - "value": { - "description": "[Output Only] A warning data value corresponding to the key.", - "type": "string" - } - }, - "type": "object" - }, - "type": "array" - }, - "message": { - "description": "[Output Only] A human-readable description of the warning code.", - "type": "string" - } + "routeRules": { + "description": "The list of ordered HTTP route rules. Use this list instead of pathRules when advanced route matching and routing actions are desired. The order of specifying routeRules matters: the first rule that matches will cause its specified routing action to take effect.\nOnly one of pathRules or routeRules must be set.", + "items": { + "$ref": "HttpRouteRule" }, - "type": "object" + "type": "array" } }, "type": "object" }, - "NetworkEndpointWithHealthStatus": { - "id": "NetworkEndpointWithHealthStatus", + "PathRule": { + "description": "A path-matching rule for a URL. If matched, will use the specified BackendService to handle the traffic arriving at this URL.", + "id": "PathRule", "properties": { - "healths": { - "description": "[Output only] The health status of network endpoint;", + "paths": { + "description": "The list of path patterns to match. Each must start with / and the only place a * is allowed is at the end following a /. The string fed to the path matcher does not include any text after the first ? or #, and those chars are not allowed here.", "items": { - "$ref": "HealthStatusForNetworkEndpoint" + "type": "string" }, "type": "array" }, - "networkEndpoint": { - "$ref": "NetworkEndpoint", - "description": "[Output only] The network endpoint;" + "routeAction": { + "$ref": "HttpRouteAction", + "description": "In response to a matching path, the load balancer performs advanced routing actions like URL rewrites, header transformations, etc. prior to forwarding the request to the selected backend. If routeAction specifies any weightedBackendServices, service must not be set. Conversely if service is set, routeAction cannot contain any weightedBackendServices.\nOnly one of routeAction or urlRedirect must be set." + }, + "service": { + "description": "The full or partial URL of the backend service resource to which traffic is directed if this rule is matched. If routeAction is additionally specified, advanced routing actions like URL Rewrites, etc. take effect prior to sending the request to the backend. However, if service is specified, routeAction cannot contain any weightedBackendService s. Conversely, if routeAction specifies any weightedBackendServices, service must not be specified.\nOnly one of urlRedirect, service or routeAction.weightedBackendService must be set.", + "type": "string" + }, + "urlRedirect": { + "$ref": "HttpRedirectAction", + "description": "When a path pattern is matched, the request is redirected to a URL specified by urlRedirect.\nIf urlRedirect is specified, service or routeAction must not be set." } }, "type": "object" }, - "NetworkInterface": { - "description": "A network interface resource attached to an instance.", - "id": "NetworkInterface", + "Policy": { + "description": "Defines an Identity and Access Management (IAM) policy. It is used to specify access control policies for Cloud Platform resources.\n\n\n\nA `Policy` consists of a list of `bindings`. A `binding` binds a list of `members` to a `role`, where the members can be user accounts, Google groups, Google domains, and service accounts. A `role` is a named list of permissions defined by IAM.\n\n**JSON Example**\n\n{ \"bindings\": [ { \"role\": \"roles/owner\", \"members\": [ \"user:mike@example.com\", \"group:admins@example.com\", \"domain:google.com\", \"serviceAccount:my-other-app@appspot.gserviceaccount.com\" ] }, { \"role\": \"roles/viewer\", \"members\": [\"user:sean@example.com\"] } ] }\n\n**YAML Example**\n\nbindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-other-app@appspot.gserviceaccount.com role: roles/owner - members: - user:sean@example.com role: roles/viewer\n\n\n\nFor a description of IAM and its features, see the [IAM developer's guide](https://cloud.google.com/iam/docs).", + "id": "Policy", "properties": { - "accessConfigs": { - "description": "An array of configurations for this interface. Currently, only one access config, ONE_TO_ONE_NAT, is supported. If there are no accessConfigs specified, then this instance will have no external internet access.", + "auditConfigs": { + "description": "Specifies cloud audit logging configuration for this policy.", "items": { - "$ref": "AccessConfig" + "$ref": "AuditConfig" }, "type": "array" }, - "aliasIpRanges": { - "description": "An array of alias IP ranges for this network interface. Can only be specified for network interfaces on subnet-mode networks.", + "bindings": { + "description": "Associates a list of `members` to a `role`. `bindings` with no members will result in an error.", "items": { - "$ref": "AliasIpRange" + "$ref": "Binding" }, "type": "array" }, - "fingerprint": { - "description": "Fingerprint hash of contents stored in this network interface. This field will be ignored when inserting an Instance or adding a NetworkInterface. An up-to-date fingerprint must be provided in order to update the NetworkInterface, otherwise the request will fail with error 412 conditionNotMet.", + "etag": { + "description": "`etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy.\n\nIf no `etag` is provided in the call to `setIamPolicy`, then the existing policy is overwritten blindly.", "format": "byte", "type": "string" }, - "kind": { - "default": "compute#networkInterface", - "description": "[Output Only] Type of the resource. Always compute#networkInterface for network interfaces.", - "type": "string" - }, - "name": { - "description": "[Output Only] The name of the network interface, generated by the server. For network devices, these are eth0, eth1, etc.", - "type": "string" - }, - "network": { - "description": "URL of the network resource for this instance. When creating an instance, if neither the network nor the subnetwork is specified, the default network global/networks/default is used; if the network is not specified but the subnetwork is specified, the network is inferred.\n\nThis field is optional when creating a firewall rule. If not specified when creating a firewall rule, the default network global/networks/default is used.\n\nIf you specify this property, you can specify the network as a full or partial URL. For example, the following are all valid URLs: \n- https://www.googleapis.com/compute/v1/projects/project/global/networks/network \n- projects/project/global/networks/network \n- global/networks/default", - "type": "string" + "iamOwned": { + "description": "", + "type": "boolean" }, - "networkIP": { - "description": "An IPv4 internal network address to assign to the instance for this network interface. If not specified by the user, an unused internal IP is assigned by the system.", - "type": "string" + "rules": { + "description": "If more than one rule is specified, the rules are applied in the following manner: - All matching LOG rules are always applied. - If any DENY/DENY_WITH_LOG rule matches, permission is denied. Logging will be applied if one or more matching rule requires logging. - Otherwise, if any ALLOW/ALLOW_WITH_LOG rule matches, permission is granted. Logging will be applied if one or more matching rule requires logging. - Otherwise, if no rule applies, permission is denied.", + "items": { + "$ref": "Rule" + }, + "type": "array" }, - "subnetwork": { - "description": "The URL of the Subnetwork resource for this instance. If the network resource is in legacy mode, do not provide this property. If the network is in auto subnet mode, providing the subnetwork is optional. If the network is in custom subnet mode, then this field should be specified. If you specify this property, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: \n- https://www.googleapis.com/compute/v1/projects/project/regions/region/subnetworks/subnetwork \n- regions/region/subnetworks/subnetwork", - "type": "string" + "version": { + "description": "Deprecated.", + "format": "int32", + "type": "integer" } }, "type": "object" }, - "NetworkList": { - "description": "Contains a list of networks.", - "id": "NetworkList", + "PreconfiguredWafSet": { + "id": "PreconfiguredWafSet", "properties": { - "id": { - "description": "[Output Only] Unique identifier for the resource; defined by the server.", - "type": "string" - }, - "items": { - "description": "A list of Network resources.", + "expressionSets": { + "description": "List of entities that are currently supported for WAF rules.", "items": { - "$ref": "Network" + "$ref": "WafExpressionSet" }, "type": "array" + } + }, + "type": "object" + }, + "Project": { + "description": "A Project resource. For an overview of projects, see Cloud Platform Resource Hierarchy. (== resource_for v1.projects ==) (== resource_for beta.projects ==)", + "id": "Project", + "properties": { + "commonInstanceMetadata": { + "$ref": "Metadata", + "description": "Metadata key/value pairs available to all instances contained in this project. See Custom metadata for more information." }, - "kind": { - "default": "compute#networkList", - "description": "[Output Only] Type of resource. Always compute#networkList for lists of networks.", + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" }, - "nextPageToken": { - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "defaultNetworkTier": { + "description": "This signifies the default network tier used for configuring resources of the project and can only take the following values: PREMIUM, STANDARD. Initially the default network tier is PREMIUM.", + "enum": [ + "PREMIUM", + "STANDARD" + ], + "enumDescriptions": [ + "", + "" + ], "type": "string" }, - "selfLink": { - "description": "[Output Only] Server-defined URL for this resource.", + "defaultServiceAccount": { + "description": "[Output Only] Default service account used by VMs running in this project.", "type": "string" }, - "warning": { - "description": "[Output Only] Informational warning message.", - "properties": { - "code": { - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DEPRECATED_TYPE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "EXPERIMENTAL_TYPE_USED", - "EXTERNAL_API_WARNING", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "MISSING_TYPE_DEPENDENCY", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SCHEMA_VALIDATION_IGNORED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNDECLARED_PROPERTIES", - "UNREACHABLE" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "data": { - "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", - "items": { - "properties": { - "key": { - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", - "type": "string" - }, - "value": { - "description": "[Output Only] A warning data value corresponding to the key.", - "type": "string" - } - }, - "type": "object" - }, - "type": "array" - }, - "message": { - "description": "[Output Only] A human-readable description of the warning code.", - "type": "string" - } - }, - "type": "object" - } - }, - "type": "object" - }, - "NetworkPeering": { - "description": "A network peering attached to a network resource. The message includes the peering name, peer network, peering state, and a flag indicating whether Google Compute Engine should automatically create routes for the peering.", - "id": "NetworkPeering", - "properties": { - "autoCreateRoutes": { - "description": "Indicates whether full mesh connectivity is created and managed automatically. When it is set to true, Google Compute Engine will automatically create and manage the routes between two networks when the state is ACTIVE. Otherwise, user needs to create routes manually to route packets to peer network.", - "type": "boolean" + "description": { + "description": "An optional textual description of the resource.", + "type": "string" }, - "exchangeSubnetRoutes": { - "description": "Whether full mesh connectivity is created and managed automatically. When it is set to true, Google Compute Engine will automatically create and manage the routes between two networks when the peering state is ACTIVE. Otherwise, user needs to create routes manually to route packets to peer network.", - "type": "boolean" + "enabledFeatures": { + "description": "Restricted features enabled for use on this project.", + "items": { + "type": "string" + }, + "type": "array" }, - "exportCustomRoutes": { - "description": "Whether to export the custom routes to peer network.", - "type": "boolean" + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server. This is not the project ID, and is just a unique ID used by Compute Engine to identify resources.", + "format": "uint64", + "type": "string" }, - "importCustomRoutes": { - "description": "Whether to import the custom routes from peer network.", - "type": "boolean" + "kind": { + "default": "compute#project", + "description": "[Output Only] Type of the resource. Always compute#project for projects.", + "type": "string" }, "name": { - "description": "Name of this peering. Provided by the client when the peering is created. The name must comply with RFC1035. Specifically, the name must be 1-63 characters long and match regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all the following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "description": "The project ID. For example: my-example-project. Use the project ID to make requests to Compute Engine.", + "type": "string" + }, + "quotas": { + "description": "[Output Only] Quotas assigned to this project.", + "items": { + "$ref": "Quota" + }, + "type": "array" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, - "network": { - "description": "The URL of the peer network. It can be either full URL or partial URL. The peer network may belong to a different project. If the partial URL does not contain project, it is assumed that the peer network is in the same project as the current network.", - "type": "string" + "usageExportLocation": { + "$ref": "UsageExportLocation", + "description": "The naming prefix for daily usage reports and the Google Cloud Storage bucket where they are stored." }, - "state": { - "description": "[Output Only] State for the peering.", + "xpnProjectStatus": { + "description": "[Output Only] The role this project has in a shared VPC configuration. Currently only HOST projects are differentiated.", "enum": [ - "ACTIVE", - "INACTIVE" + "HOST", + "UNSPECIFIED_XPN_PROJECT_STATUS" ], "enumDescriptions": [ "", "" ], "type": "string" + } + }, + "type": "object" + }, + "ProjectsDisableXpnResourceRequest": { + "id": "ProjectsDisableXpnResourceRequest", + "properties": { + "xpnResource": { + "$ref": "XpnResourceId", + "description": "Service resource (a.k.a service project) ID." + } + }, + "type": "object" + }, + "ProjectsEnableXpnResourceRequest": { + "id": "ProjectsEnableXpnResourceRequest", + "properties": { + "xpnResource": { + "$ref": "XpnResourceId", + "description": "Service resource (a.k.a service project) ID." + } + }, + "type": "object" + }, + "ProjectsGetXpnResources": { + "id": "ProjectsGetXpnResources", + "properties": { + "kind": { + "default": "compute#projectsGetXpnResources", + "description": "[Output Only] Type of resource. Always compute#projectsGetXpnResources for lists of service resources (a.k.a service projects)", + "type": "string" }, - "stateDetails": { - "description": "[Output Only] Details about the current state of the peering.", + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", "type": "string" + }, + "resources": { + "description": "Service resources (a.k.a service projects) attached to this project as their shared VPC host.", + "items": { + "$ref": "XpnResourceId" + }, + "type": "array" } }, "type": "object" }, - "NetworkRoutingConfig": { - "description": "A routing configuration attached to a network resource. The message includes the list of routers associated with the network, and a flag indicating the type of routing behavior to enforce network-wide.", - "id": "NetworkRoutingConfig", + "ProjectsListXpnHostsRequest": { + "id": "ProjectsListXpnHostsRequest", "properties": { - "routingMode": { - "description": "The network-wide routing mode to use. If set to REGIONAL, this network's cloud routers will only advertise routes with subnets of this network in the same region as the router. If set to GLOBAL, this network's cloud routers will advertise routes with all subnets of this network, across regions.", + "organization": { + "description": "Optional organization ID managed by Cloud Resource Manager, for which to list shared VPC host projects. If not specified, the organization will be inferred from the project.", + "type": "string" + } + }, + "type": "object" + }, + "ProjectsSetDefaultNetworkTierRequest": { + "id": "ProjectsSetDefaultNetworkTierRequest", + "properties": { + "networkTier": { + "description": "Default network tier to be set.", "enum": [ - "GLOBAL", - "REGIONAL" + "PREMIUM", + "STANDARD" ], "enumDescriptions": [ "", @@ -33540,62 +39339,209 @@ }, "type": "object" }, - "NetworksAddPeeringRequest": { - "id": "NetworksAddPeeringRequest", + "Quota": { + "description": "A quotas entry.", + "id": "Quota", "properties": { - "autoCreateRoutes": { - "description": "Whether Google Compute Engine manages the routes automatically.", - "type": "boolean" + "limit": { + "description": "[Output Only] Quota limit for this metric.", + "format": "double", + "type": "number" }, - "name": { - "annotations": { - "required": [ - "compute.networks.addPeering" - ] - }, - "description": "Name of the peering, which should conform to RFC1035.", + "metric": { + "description": "[Output Only] Name of the quota metric.", + "enum": [ + "AUTOSCALERS", + "BACKEND_BUCKETS", + "BACKEND_SERVICES", + "COMMITMENTS", + "CPUS", + "CPUS_ALL_REGIONS", + "DISKS_TOTAL_GB", + "EXTERNAL_VPN_GATEWAYS", + "FIREWALLS", + "FORWARDING_RULES", + "GLOBAL_INTERNAL_ADDRESSES", + "GPUS_ALL_REGIONS", + "HEALTH_CHECKS", + "IMAGES", + "INSTANCES", + "INSTANCE_GROUPS", + "INSTANCE_GROUP_MANAGERS", + "INSTANCE_TEMPLATES", + "INTERCONNECTS", + "INTERCONNECT_ATTACHMENTS_PER_REGION", + "INTERCONNECT_ATTACHMENTS_TOTAL_MBPS", + "INTERNAL_ADDRESSES", + "IN_USE_ADDRESSES", + "IN_USE_BACKUP_SCHEDULES", + "IN_USE_SNAPSHOT_SCHEDULES", + "LOCAL_SSD_TOTAL_GB", + "NETWORKS", + "NETWORK_ENDPOINT_GROUPS", + "NVIDIA_K80_GPUS", + "NVIDIA_P100_GPUS", + "NVIDIA_P100_VWS_GPUS", + "NVIDIA_P4_GPUS", + "NVIDIA_P4_VWS_GPUS", + "NVIDIA_T4_GPUS", + "NVIDIA_T4_VWS_GPUS", + "NVIDIA_V100_GPUS", + "PREEMPTIBLE_CPUS", + "PREEMPTIBLE_LOCAL_SSD_GB", + "PREEMPTIBLE_NVIDIA_K80_GPUS", + "PREEMPTIBLE_NVIDIA_P100_GPUS", + "PREEMPTIBLE_NVIDIA_P100_VWS_GPUS", + "PREEMPTIBLE_NVIDIA_P4_GPUS", + "PREEMPTIBLE_NVIDIA_P4_VWS_GPUS", + "PREEMPTIBLE_NVIDIA_T4_GPUS", + "PREEMPTIBLE_NVIDIA_T4_VWS_GPUS", + "PREEMPTIBLE_NVIDIA_V100_GPUS", + "REGIONAL_AUTOSCALERS", + "REGIONAL_INSTANCE_GROUP_MANAGERS", + "RESOURCE_POLICIES", + "ROUTERS", + "ROUTES", + "SECURITY_POLICIES", + "SECURITY_POLICY_RULES", + "SNAPSHOTS", + "SSD_TOTAL_GB", + "SSL_CERTIFICATES", + "STATIC_ADDRESSES", + "SUBNETWORKS", + "TARGET_HTTPS_PROXIES", + "TARGET_HTTP_PROXIES", + "TARGET_INSTANCES", + "TARGET_POOLS", + "TARGET_SSL_PROXIES", + "TARGET_TCP_PROXIES", + "TARGET_VPN_GATEWAYS", + "URL_MAPS", + "VPN_GATEWAYS", + "VPN_TUNNELS" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], "type": "string" }, - "networkPeering": { - "$ref": "NetworkPeering", - "description": "Network peering parameters. In order to specify route policies for peering using import/export custom routes, you will have to fill all peering related parameters (name, peer network, exchange_subnet_routes) in network_peeringfield. Corresponding fields in NetworksAddPeeringRequest will be deprecated soon." - }, - "peerNetwork": { - "description": "URL of the peer network. It can be either full URL or partial URL. The peer network may belong to a different project. If the partial URL does not contain project, it is assumed that the peer network is in the same project as the current network.", + "owner": { + "description": "[Output Only] Owning resource. This is the resource on which this quota is applied.", "type": "string" + }, + "usage": { + "description": "[Output Only] Current usage of this metric.", + "format": "double", + "type": "number" } }, "type": "object" }, - "NetworksRemovePeeringRequest": { - "id": "NetworksRemovePeeringRequest", + "Reference": { + "description": "Represents a reference to a resource.", + "id": "Reference", "properties": { - "name": { - "description": "Name of the peering, which should conform to RFC1035.", + "kind": { + "default": "compute#reference", + "description": "[Output Only] Type of the resource. Always compute#reference for references.", + "type": "string" + }, + "referenceType": { + "description": "A description of the reference type with no implied semantics. Possible values include: \n- MEMBER_OF", + "type": "string" + }, + "referrer": { + "description": "URL of the resource which refers to the target.", + "type": "string" + }, + "target": { + "description": "URL of the resource to which this reference points.", "type": "string" } }, "type": "object" }, - "NetworksUpdatePeeringRequest": { - "id": "NetworksUpdatePeeringRequest", - "properties": { - "networkPeering": { - "$ref": "NetworkPeering" - } - }, - "type": "object" - }, - "NodeGroup": { - "description": "A NodeGroup resource.", - "id": "NodeGroup", + "Region": { + "description": "Region resource. (== resource_for beta.regions ==) (== resource_for v1.regions ==)", + "id": "Region", "properties": { "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" }, + "deprecated": { + "$ref": "DeprecationStatus", + "description": "[Output Only] The deprecation status associated with this region." + }, "description": { - "description": "An optional description of this resource. Provide this property when you create the resource.", + "description": "[Output Only] Textual description of the resource.", "type": "string" }, "id": { @@ -33604,179 +39550,65 @@ "type": "string" }, "kind": { - "default": "compute#nodeGroup", - "description": "[Output Only] The type of the resource. Always compute#nodeGroup for node group.", + "default": "compute#region", + "description": "[Output Only] Type of the resource. Always compute#region for regions.", "type": "string" }, "name": { - "description": "The name of the resource, provided by the client when initially creating the resource. The resource name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "description": "[Output Only] Name of the resource.", "type": "string" }, - "nodeTemplate": { - "description": "The URL of the node template to which this node group belongs.", - "type": "string" + "quotas": { + "description": "[Output Only] Quotas assigned to this region.", + "items": { + "$ref": "Quota" + }, + "type": "array" }, "selfLink": { "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, - "size": { - "description": "[Output Only] The total number of nodes in the node group.", - "format": "int32", - "type": "integer" - }, "status": { + "description": "[Output Only] Status of the region, either UP or DOWN.", "enum": [ - "CREATING", - "DELETING", - "INVALID", - "READY" + "DOWN", + "UP" ], "enumDescriptions": [ - "", - "", "", "" ], "type": "string" }, - "zone": { - "description": "[Output Only] The name of the zone where the node group resides, such as us-central1-a.", - "type": "string" - } - }, - "type": "object" - }, - "NodeGroupAggregatedList": { - "id": "NodeGroupAggregatedList", - "properties": { - "id": { - "description": "[Output Only] Unique identifier for the resource; defined by the server.", - "type": "string" - }, - "items": { - "additionalProperties": { - "$ref": "NodeGroupsScopedList", - "description": "[Output Only] Name of the scope containing this set of node groups." - }, - "description": "A list of NodeGroupsScopedList resources.", - "type": "object" - }, - "kind": { - "default": "compute#nodeGroupAggregatedList", - "description": "[Output Only] Type of resource.Always compute#nodeGroupAggregatedList for aggregated lists of node groups.", - "type": "string" - }, - "nextPageToken": { - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", - "type": "string" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for this resource.", - "type": "string" - }, - "warning": { - "description": "[Output Only] Informational warning message.", - "properties": { - "code": { - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DEPRECATED_TYPE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "EXPERIMENTAL_TYPE_USED", - "EXTERNAL_API_WARNING", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "MISSING_TYPE_DEPENDENCY", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SCHEMA_VALIDATION_IGNORED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNDECLARED_PROPERTIES", - "UNREACHABLE" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "data": { - "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", - "items": { - "properties": { - "key": { - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", - "type": "string" - }, - "value": { - "description": "[Output Only] A warning data value corresponding to the key.", - "type": "string" - } - }, - "type": "object" - }, - "type": "array" - }, - "message": { - "description": "[Output Only] A human-readable description of the warning code.", - "type": "string" - } + "zones": { + "description": "[Output Only] A list of zones available in this region, in the form of resource URLs.", + "items": { + "type": "string" }, - "type": "object" + "type": "array" } }, "type": "object" }, - "NodeGroupList": { - "description": "Contains a list of nodeGroups.", - "id": "NodeGroupList", + "RegionAutoscalerList": { + "description": "Contains a list of autoscalers.", + "id": "RegionAutoscalerList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of NodeGroup resources.", + "description": "A list of Autoscaler resources.", "items": { - "$ref": "NodeGroup" + "$ref": "Autoscaler" }, "type": "array" }, "kind": { - "default": "compute#nodeGroupList", - "description": "[Output Only] Type of resource.Always compute#nodeGroupList for lists of node groups.", + "default": "compute#regionAutoscalerList", + "description": "Type of resource.", "type": "string" }, "nextPageToken": { @@ -33871,84 +39703,36 @@ }, "type": "object" }, - "NodeGroupNode": { - "id": "NodeGroupNode", - "properties": { - "instances": { - "description": "Instances scheduled on this node.", - "items": { - "type": "string" - }, - "type": "array" - }, - "name": { - "description": "The name of the node.", - "type": "string" - }, - "nodeType": { - "description": "The type of this node.", - "type": "string" - }, - "status": { - "enum": [ - "CREATING", - "DELETING", - "INVALID", - "READY", - "REPAIRING" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "NodeGroupsAddNodesRequest": { - "id": "NodeGroupsAddNodesRequest", - "properties": { - "additionalNodeCount": { - "description": "Count of additional nodes to be added to the node group.", - "format": "int32", - "type": "integer" - } - }, - "type": "object" - }, - "NodeGroupsDeleteNodesRequest": { - "id": "NodeGroupsDeleteNodesRequest", + "RegionCommitmentsUpdateReservationsRequest": { + "id": "RegionCommitmentsUpdateReservationsRequest", "properties": { - "nodes": { + "reservations": { + "description": "List of reservations for the capacity move of VMs with accelerators and local ssds.", "items": { - "type": "string" + "$ref": "Reservation" }, "type": "array" } }, "type": "object" }, - "NodeGroupsListNodes": { - "id": "NodeGroupsListNodes", + "RegionDiskTypeList": { + "id": "RegionDiskTypeList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of Node resources.", + "description": "A list of DiskType resources.", "items": { - "$ref": "NodeGroupNode" + "$ref": "DiskType" }, "type": "array" }, "kind": { - "default": "compute#nodeGroupsListNodes", - "description": "[Output Only] The resource type, which is always compute.nodeGroupsListNodes for the list of nodes in the specified node group.", + "default": "compute#regionDiskTypeList", + "description": "[Output Only] Type of resource. Always compute#regionDiskTypeList for region disk types.", "type": "string" }, "nextPageToken": { @@ -34043,18 +39827,73 @@ }, "type": "object" }, - "NodeGroupsScopedList": { - "id": "NodeGroupsScopedList", + "RegionDisksAddResourcePoliciesRequest": { + "id": "RegionDisksAddResourcePoliciesRequest", "properties": { - "nodeGroups": { - "description": "[Output Only] A list of node groups contained in this scope.", + "resourcePolicies": { + "description": "Resource policies to be added to this disk.", "items": { - "$ref": "NodeGroup" + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "RegionDisksRemoveResourcePoliciesRequest": { + "id": "RegionDisksRemoveResourcePoliciesRequest", + "properties": { + "resourcePolicies": { + "description": "Resource policies to be removed from this disk.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "RegionDisksResizeRequest": { + "id": "RegionDisksResizeRequest", + "properties": { + "sizeGb": { + "description": "The new size of the regional persistent disk, which is specified in GB.", + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, + "RegionInstanceGroupList": { + "description": "Contains a list of InstanceGroup resources.", + "id": "RegionInstanceGroupList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "description": "A list of InstanceGroup resources.", + "items": { + "$ref": "InstanceGroup" }, "type": "array" }, + "kind": { + "default": "compute#regionInstanceGroupList", + "description": "The resource type.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, "warning": { - "description": "[Output Only] An informational warning that appears when the nodeGroup list is empty.", + "description": "[Output Only] Informational warning message.", "properties": { "code": { "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", @@ -34137,106 +39976,24 @@ }, "type": "object" }, - "NodeGroupsSetNodeTemplateRequest": { - "id": "NodeGroupsSetNodeTemplateRequest", - "properties": { - "nodeTemplate": { - "description": "Full or partial URL of the node template resource to be updated for this node group.", - "type": "string" - } - }, - "type": "object" - }, - "NodeTemplate": { - "description": "A Node Template resource.", - "id": "NodeTemplate", - "properties": { - "creationTimestamp": { - "description": "[Output Only] Creation timestamp in RFC3339 text format.", - "type": "string" - }, - "description": { - "description": "An optional description of this resource. Provide this property when you create the resource.", - "type": "string" - }, - "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64", - "type": "string" - }, - "kind": { - "default": "compute#nodeTemplate", - "description": "[Output Only] The type of the resource. Always compute#nodeTemplate for node templates.", - "type": "string" - }, - "name": { - "description": "The name of the resource, provided by the client when initially creating the resource. The resource name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "type": "string" - }, - "nodeAffinityLabels": { - "additionalProperties": { - "type": "string" - }, - "description": "Labels to use for node affinity, which will be used in instance scheduling.", - "type": "object" - }, - "nodeType": { - "description": "The node type to use for nodes group that are created from this template.", - "type": "string" - }, - "nodeTypeFlexibility": { - "$ref": "NodeTemplateNodeTypeFlexibility", - "description": "The flexible properties of the desired node type. Node groups that use this node template will create nodes of a type that matches these properties.\n\nThis field is mutually exclusive with the node_type property; you can only define one or the other, but not both." - }, - "region": { - "description": "[Output Only] The name of the region where the node template resides, such as us-central1.", - "type": "string" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for the resource.", - "type": "string" - }, - "status": { - "description": "[Output Only] The status of the node template. One of the following values: CREATING, READY, and DELETING.", - "enum": [ - "CREATING", - "DELETING", - "INVALID", - "READY" - ], - "enumDescriptions": [ - "", - "", - "", - "" - ], - "type": "string" - }, - "statusMessage": { - "description": "[Output Only] An optional, human-readable explanation of the status.", - "type": "string" - } - }, - "type": "object" - }, - "NodeTemplateAggregatedList": { - "id": "NodeTemplateAggregatedList", + "RegionInstanceGroupManagerList": { + "description": "Contains a list of managed instance groups.", + "id": "RegionInstanceGroupManagerList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "additionalProperties": { - "$ref": "NodeTemplatesScopedList", - "description": "[Output Only] Name of the scope containing this set of node templates." + "description": "A list of InstanceGroupManager resources.", + "items": { + "$ref": "InstanceGroupManager" }, - "description": "A list of NodeTemplatesScopedList resources.", - "type": "object" + "type": "array" }, "kind": { - "default": "compute#nodeTemplateAggregatedList", - "description": "[Output Only] Type of resource.Always compute#nodeTemplateAggregatedList for aggregated lists of node templates.", + "default": "compute#regionInstanceGroupManagerList", + "description": "[Output Only] The resource type, which is always compute#instanceGroupManagerList for a list of managed instance groups that exist in th regional scope.", "type": "string" }, "nextPageToken": { @@ -34331,24 +40088,157 @@ }, "type": "object" }, - "NodeTemplateList": { - "description": "Contains a list of node templates.", - "id": "NodeTemplateList", + "RegionInstanceGroupManagersAbandonInstancesRequest": { + "id": "RegionInstanceGroupManagersAbandonInstancesRequest", + "properties": { + "instances": { + "description": "The URLs of one or more instances to abandon. This can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME].", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "RegionInstanceGroupManagersApplyUpdatesRequest": { + "description": "InstanceGroupManagers.applyUpdatesToInstances", + "id": "RegionInstanceGroupManagersApplyUpdatesRequest", + "properties": { + "instances": { + "description": "The list of instances for which we want to apply changes on this managed instance group.", + "items": { + "type": "string" + }, + "type": "array" + }, + "minimalAction": { + "description": "The minimal action that should be perfomed on the instances. By default NONE.", + "enum": [ + "REPLACE", + "RESTART" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "mostDisruptiveAllowedAction": { + "description": "The most disruptive action that allowed to be performed on the instances. By default REPLACE.", + "enum": [ + "REPLACE", + "RESTART" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "RegionInstanceGroupManagersDeleteInstancesRequest": { + "id": "RegionInstanceGroupManagersDeleteInstancesRequest", + "properties": { + "instances": { + "description": "The URLs of one or more instances to delete. This can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME].", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "RegionInstanceGroupManagersListInstancesResponse": { + "id": "RegionInstanceGroupManagersListInstancesResponse", + "properties": { + "managedInstances": { + "description": "A list of managed instances.", + "items": { + "$ref": "ManagedInstance" + }, + "type": "array" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + } + }, + "type": "object" + }, + "RegionInstanceGroupManagersRecreateRequest": { + "id": "RegionInstanceGroupManagersRecreateRequest", + "properties": { + "instances": { + "description": "The URLs of one or more instances to recreate. This can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME].", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "RegionInstanceGroupManagersSetAutoHealingRequest": { + "id": "RegionInstanceGroupManagersSetAutoHealingRequest", + "properties": { + "autoHealingPolicies": { + "items": { + "$ref": "InstanceGroupManagerAutoHealingPolicy" + }, + "type": "array" + } + }, + "type": "object" + }, + "RegionInstanceGroupManagersSetTargetPoolsRequest": { + "id": "RegionInstanceGroupManagersSetTargetPoolsRequest", + "properties": { + "fingerprint": { + "description": "Fingerprint of the target pools information, which is a hash of the contents. This field is used for optimistic locking when you update the target pool entries. This field is optional.", + "format": "byte", + "type": "string" + }, + "targetPools": { + "description": "The URL of all TargetPool resources to which instances in the instanceGroup field are added. The target pools automatically apply to all of the instances in the managed instance group.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "RegionInstanceGroupManagersSetTemplateRequest": { + "id": "RegionInstanceGroupManagersSetTemplateRequest", + "properties": { + "instanceTemplate": { + "description": "URL of the InstanceTemplate resource from which all new instances will be created.", + "type": "string" + } + }, + "type": "object" + }, + "RegionInstanceGroupsListInstances": { + "id": "RegionInstanceGroupsListInstances", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of NodeTemplate resources.", + "description": "A list of InstanceWithNamedPorts resources.", "items": { - "$ref": "NodeTemplate" + "$ref": "InstanceWithNamedPorts" }, "type": "array" }, "kind": { - "default": "compute#nodeTemplateList", - "description": "[Output Only] Type of resource.Always compute#nodeTemplateList for lists of node templates.", + "default": "compute#regionInstanceGroupsListInstances", + "description": "The resource type.", "type": "string" }, "nextPageToken": { @@ -34443,33 +40333,77 @@ }, "type": "object" }, - "NodeTemplateNodeTypeFlexibility": { - "id": "NodeTemplateNodeTypeFlexibility", + "RegionInstanceGroupsListInstancesRequest": { + "id": "RegionInstanceGroupsListInstancesRequest", "properties": { - "cpus": { + "instanceState": { + "description": "Instances in which state should be returned. Valid options are: 'ALL', 'RUNNING'. By default, it lists all instances.", + "enum": [ + "ALL", + "RUNNING" + ], + "enumDescriptions": [ + "", + "" + ], "type": "string" }, - "localSsd": { + "portName": { + "description": "Name of port user is interested in. It is optional. If it is set, only information about this ports will be returned. If it is not set, all the named ports will be returned. Always lists all instances.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" - }, - "memory": { + } + }, + "type": "object" + }, + "RegionInstanceGroupsSetNamedPortsRequest": { + "id": "RegionInstanceGroupsSetNamedPortsRequest", + "properties": { + "fingerprint": { + "description": "The fingerprint of the named ports information for this instance group. Use this optional property to prevent conflicts when multiple users change the named ports settings concurrently. Obtain the fingerprint with the instanceGroups.get method. Then, include the fingerprint in your request to ensure that you do not overwrite changes that were applied from another concurrent request.", + "format": "byte", "type": "string" + }, + "namedPorts": { + "description": "The list of named ports to set for this instance group.", + "items": { + "$ref": "NamedPort" + }, + "type": "array" } }, "type": "object" }, - "NodeTemplatesScopedList": { - "id": "NodeTemplatesScopedList", + "RegionList": { + "description": "Contains a list of region resources.", + "id": "RegionList", "properties": { - "nodeTemplates": { - "description": "[Output Only] A list of node templates contained in this scope.", + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "description": "A list of Region resources.", "items": { - "$ref": "NodeTemplate" + "$ref": "Region" }, "type": "array" }, + "kind": { + "default": "compute#regionList", + "description": "[Output Only] Type of resource. Always compute#regionList for lists of regions.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, "warning": { - "description": "[Output Only] An informational warning that appears when the node templates list is empty.", + "description": "[Output Only] Informational warning message.", "properties": { "code": { "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", @@ -34552,69 +40486,172 @@ }, "type": "object" }, - "NodeType": { - "description": "A Node Type resource.", - "id": "NodeType", + "RegionSetLabelsRequest": { + "id": "RegionSetLabelsRequest", "properties": { - "cpuPlatform": { - "description": "[Output Only] The CPU platform used by this node type.", + "labelFingerprint": { + "description": "The fingerprint of the previous set of labels for this resource, used to detect conflicts. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels. Make a get() request to the resource to get the latest fingerprint.", + "format": "byte", + "type": "string" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "The labels to set for this resource.", + "type": "object" + } + }, + "type": "object" + }, + "RegionSetPolicyRequest": { + "id": "RegionSetPolicyRequest", + "properties": { + "bindings": { + "description": "Flatten Policy to create a backwacd compatible wire-format. Deprecated. Use 'policy' to specify bindings.", + "items": { + "$ref": "Binding" + }, + "type": "array" + }, + "etag": { + "description": "Flatten Policy to create a backward compatible wire-format. Deprecated. Use 'policy' to specify the etag.", + "format": "byte", + "type": "string" + }, + "policy": { + "$ref": "Policy", + "description": "REQUIRED: The complete policy to be applied to the 'resource'. The size of the policy is limited to a few 10s of KB. An empty policy is in general a valid policy but certain services (like Projects) might reject them." + } + }, + "type": "object" + }, + "RegionTargetHttpsProxiesSetSslCertificatesRequest": { + "id": "RegionTargetHttpsProxiesSetSslCertificatesRequest", + "properties": { + "sslCertificates": { + "description": "New set of SslCertificate resources to associate with this TargetHttpsProxy resource. Currently exactly one SslCertificate resource must be specified.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "RegionUrlMapsValidateRequest": { + "id": "RegionUrlMapsValidateRequest", + "properties": { + "resource": { + "$ref": "UrlMap", + "description": "Content of the UrlMap to be validated." + } + }, + "type": "object" + }, + "RequestMirrorPolicy": { + "description": "A policy that specifies how requests intended for the route's backends are shadowed to a separate mirrored backend service. Loadbalancer does not wait for responses from the shadow service. Prior to sending traffic to the shadow service, the host / authority header is suffixed with -shadow.", + "id": "RequestMirrorPolicy", + "properties": { + "backendService": { + "description": "The full or partial URL to the BackendService resource being mirrored to.", + "type": "string" + } + }, + "type": "object" + }, + "Reservation": { + "description": "Reservation resource", + "id": "Reservation", + "properties": { + "commitment": { + "description": "[OutputOnly] Full or partial url for parent commitment for reservations which are tied to a commitment.", "type": "string" }, "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" }, - "deprecated": { - "$ref": "DeprecationStatus", - "description": "[Output Only] The deprecation status associated with this node type." - }, "description": { - "description": "[Output Only] An optional textual description of the resource.", + "description": "An optional description of this resource. Provide this property when you create the resource.", "type": "string" }, - "guestCpus": { - "description": "[Output Only] The number of virtual CPUs that are available to the node type.", - "format": "int32", - "type": "integer" - }, "id": { "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", "format": "uint64", "type": "string" }, "kind": { - "default": "compute#nodeType", - "description": "[Output Only] The type of the resource. Always compute#nodeType for node types.", + "default": "compute#reservation", + "description": "[Output Only] Type of the resource. Always compute#reservations for reservations.", "type": "string" }, - "localSsdGb": { - "description": "[Output Only] Local SSD available to the node type, defined in GB.", - "format": "int32", - "type": "integer" - }, - "memoryMb": { - "description": "[Output Only] The amount of physical memory available to the node type, defined in MB.", - "format": "int32", - "type": "integer" - }, "name": { - "description": "[Output Only] Name of the resource.", + "annotations": { + "required": [ + "compute.instances.insert" + ] + }, + "description": "The name of the resource, provided by the client when initially creating the resource. The resource name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, "selfLink": { - "description": "[Output Only] Server-defined URL for the resource.", + "description": "[Output Only] Server-defined fully-qualified URL for this resource.", "type": "string" }, + "specificReservation": { + "$ref": "AllocationSpecificSKUReservation", + "description": "Reservation for instances with specific machine shapes." + }, + "specificReservationRequired": { + "description": "Indicates whether the reservation can be consumed by VMs with \"any reservation\" defined. If the field is set, then only VMs that target the reservation by name using --reservation-affinity can consume this reservation.", + "type": "boolean" + }, "zone": { - "description": "[Output Only] The name of the zone where the node type resides, such as us-central1-a.", + "description": "Zone in which the reservation resides, must be provided if reservation is created with commitment creation.", + "type": "string" + } + }, + "type": "object" + }, + "ReservationAffinity": { + "description": "AllocationAffinity is the configuration of desired allocation which this instance could take capacity from.", + "id": "ReservationAffinity", + "properties": { + "consumeReservationType": { + "description": "Specifies the type of reservation from which this instance can consume resources: ANY_RESERVATION (default), SPECIFIC_RESERVATION, or NO_RESERVATION. See Consuming reserved instances for examples.", + "enum": [ + "ANY_RESERVATION", + "NO_RESERVATION", + "SPECIFIC_RESERVATION", + "UNSPECIFIED" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ], + "type": "string" + }, + "key": { + "description": "Corresponds to the label key of reservation resource.", "type": "string" + }, + "values": { + "description": "Corresponds to the label values of reservation resource.", + "items": { + "type": "string" + }, + "type": "array" } }, "type": "object" }, - "NodeTypeAggregatedList": { - "id": "NodeTypeAggregatedList", + "ReservationAggregatedList": { + "description": "Contains a list of reservations.", + "id": "ReservationAggregatedList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", @@ -34622,15 +40659,15 @@ }, "items": { "additionalProperties": { - "$ref": "NodeTypesScopedList", - "description": "[Output Only] Name of the scope containing this set of node types." + "$ref": "ReservationsScopedList", + "description": "Name of the scope containing this set of reservations." }, - "description": "A list of NodeTypesScopedList resources.", + "description": "A list of Allocation resources.", "type": "object" }, "kind": { - "default": "compute#nodeTypeAggregatedList", - "description": "[Output Only] Type of resource.Always compute#nodeTypeAggregatedList for aggregated lists of node types.", + "default": "compute#reservationAggregatedList", + "description": "Type of resource.", "type": "string" }, "nextPageToken": { @@ -34725,24 +40762,23 @@ }, "type": "object" }, - "NodeTypeList": { - "description": "Contains a list of node types.", - "id": "NodeTypeList", + "ReservationList": { + "id": "ReservationList", "properties": { "id": { - "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", "type": "string" }, "items": { - "description": "A list of NodeType resources.", + "description": "[Output Only] A list of Allocation resources.", "items": { - "$ref": "NodeType" + "$ref": "Reservation" }, "type": "array" }, "kind": { - "default": "compute#nodeTypeList", - "description": "[Output Only] Type of resource.Always compute#nodeTypeList for lists of node types.", + "default": "compute#reservationList", + "description": "[Output Only] Type of resource.Always compute#reservationsList for listsof reservations", "type": "string" }, "nextPageToken": { @@ -34837,18 +40873,29 @@ }, "type": "object" }, - "NodeTypesScopedList": { - "id": "NodeTypesScopedList", + "ReservationsResizeRequest": { + "id": "ReservationsResizeRequest", "properties": { - "nodeTypes": { - "description": "[Output Only] A list of node types contained in this scope.", + "specificSkuCount": { + "description": "Number of allocated resources can be resized with minimum = 1 and maximum = 1000.", + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, + "ReservationsScopedList": { + "id": "ReservationsScopedList", + "properties": { + "reservations": { + "description": "A list of reservations contained in this scope.", "items": { - "$ref": "NodeType" + "$ref": "Reservation" }, "type": "array" }, "warning": { - "description": "[Output Only] An informational warning that appears when the node types list is empty.", + "description": "Informational warning which replaces the list of reservations when the list is empty.", "properties": { "code": { "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", @@ -34931,253 +40978,62 @@ }, "type": "object" }, - "Operation": { - "description": "An Operation resource, used to manage asynchronous API requests. (== resource_for v1.globalOperations ==) (== resource_for beta.globalOperations ==) (== resource_for v1.regionOperations ==) (== resource_for beta.regionOperations ==) (== resource_for v1.zoneOperations ==) (== resource_for beta.zoneOperations ==)", - "id": "Operation", + "ResourceCommitment": { + "description": "Commitment for a particular resource (a Commitment is composed of one or more of these).", + "id": "ResourceCommitment", "properties": { - "clientOperationId": { - "description": "[Output Only] The value of `requestId` if you provided it in the request. Not present otherwise.", - "type": "string" - }, - "creationTimestamp": { - "description": "[Deprecated] This field is deprecated.", - "type": "string" - }, - "description": { - "description": "[Output Only] A textual description of the operation, which is set when the operation is created.", - "type": "string" - }, - "endTime": { - "description": "[Output Only] The time that this operation was completed. This value is in RFC3339 text format.", - "type": "string" - }, - "error": { - "description": "[Output Only] If errors are generated during processing of the operation, this field will be populated.", - "properties": { - "errors": { - "description": "[Output Only] The array of errors encountered while processing this operation.", - "items": { - "properties": { - "code": { - "description": "[Output Only] The error type identifier for this error.", - "type": "string" - }, - "location": { - "description": "[Output Only] Indicates the field in the request that caused the error. This property is optional.", - "type": "string" - }, - "message": { - "description": "[Output Only] An optional, human-readable error message.", - "type": "string" - } - }, - "type": "object" - }, - "type": "array" - } - }, - "type": "object" - }, - "httpErrorMessage": { - "description": "[Output Only] If the operation fails, this field contains the HTTP error message that was returned, such as NOT FOUND.", - "type": "string" - }, - "httpErrorStatusCode": { - "description": "[Output Only] If the operation fails, this field contains the HTTP error status code that was returned. For example, a 404 means the resource was not found.", - "format": "int32", - "type": "integer" - }, - "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64", - "type": "string" - }, - "insertTime": { - "description": "[Output Only] The time that this operation was requested. This value is in RFC3339 text format.", - "type": "string" - }, - "kind": { - "default": "compute#operation", - "description": "[Output Only] Type of the resource. Always compute#operation for Operation resources.", - "type": "string" - }, - "name": { - "description": "[Output Only] Name of the resource.", - "type": "string" - }, - "operationType": { - "description": "[Output Only] The type of operation, such as insert, update, or delete, and so on.", - "type": "string" - }, - "progress": { - "description": "[Output Only] An optional progress indicator that ranges from 0 to 100. There is no requirement that this be linear or support any granularity of operations. This should not be used to guess when the operation will be complete. This number should monotonically increase as the operation progresses.", - "format": "int32", - "type": "integer" - }, - "region": { - "description": "[Output Only] The URL of the region where the operation resides. Only available when performing regional operations. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", - "type": "string" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for the resource.", + "acceleratorType": { + "description": "Name of the accelerator type resource. Applicable only when the type is ACCELERATOR.", "type": "string" }, - "startTime": { - "description": "[Output Only] The time that this operation was started by the server. This value is in RFC3339 text format.", + "amount": { + "description": "The amount of the resource purchased (in a type-dependent unit, such as bytes). For vCPUs, this can just be an integer. For memory, this must be provided in MB. Memory must be a multiple of 256 MB, with up to 6.5GB of memory per every vCPU.", + "format": "int64", "type": "string" }, - "status": { - "description": "[Output Only] The status of the operation, which can be one of the following: PENDING, RUNNING, or DONE.", + "type": { + "description": "Type of resource for which this commitment applies. Possible values are VCPU and MEMORY", "enum": [ - "DONE", - "PENDING", - "RUNNING" + "ACCELERATOR", + "LOCAL_SSD", + "MEMORY", + "UNSPECIFIED", + "VCPU" ], "enumDescriptions": [ + "", + "", "", "", "" ], "type": "string" - }, - "statusMessage": { - "description": "[Output Only] An optional textual description of the current status of the operation.", - "type": "string" - }, - "targetId": { - "description": "[Output Only] The unique target ID, which identifies a specific incarnation of the target resource.", - "format": "uint64", - "type": "string" - }, - "targetLink": { - "description": "[Output Only] The URL of the resource that the operation modifies. For operations related to creating a snapshot, this points to the persistent disk that the snapshot was created from.", - "type": "string" - }, - "user": { - "description": "[Output Only] User who requested the operation, for example: user@example.com.", - "type": "string" - }, - "warnings": { - "description": "[Output Only] If warning messages are generated during processing of the operation, this field will be populated.", - "items": { - "properties": { - "code": { - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DEPRECATED_TYPE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "EXPERIMENTAL_TYPE_USED", - "EXTERNAL_API_WARNING", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "MISSING_TYPE_DEPENDENCY", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SCHEMA_VALIDATION_IGNORED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNDECLARED_PROPERTIES", - "UNREACHABLE" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "data": { - "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", - "items": { - "properties": { - "key": { - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", - "type": "string" - }, - "value": { - "description": "[Output Only] A warning data value corresponding to the key.", - "type": "string" - } - }, - "type": "object" - }, - "type": "array" - }, - "message": { - "description": "[Output Only] A human-readable description of the warning code.", - "type": "string" - } - }, - "type": "object" - }, - "type": "array" - }, - "zone": { - "description": "[Output Only] The URL of the zone where the operation resides. Only available when performing per-zone operations. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", - "type": "string" } }, "type": "object" }, - "OperationAggregatedList": { - "id": "OperationAggregatedList", + "ResourceGroupReference": { + "id": "ResourceGroupReference", "properties": { - "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "group": { + "description": "A URI referencing one of the instance groups or network endpoint groups listed in the backend service.", "type": "string" - }, - "items": { - "additionalProperties": { - "$ref": "OperationsScopedList", - "description": "[Output Only] Name of the scope containing this set of operations." + } + }, + "type": "object" + }, + "ResourcePoliciesScopedList": { + "id": "ResourcePoliciesScopedList", + "properties": { + "resourcePolicies": { + "description": "A list of resourcePolicies contained in this scope.", + "items": { + "$ref": "ResourcePolicy" }, - "description": "[Output Only] A map of scoped operation lists.", - "type": "object" - }, - "kind": { - "default": "compute#operationAggregatedList", - "description": "[Output Only] Type of resource. Always compute#operationAggregatedList for aggregated lists of operations.", - "type": "string" - }, - "nextPageToken": { - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", - "type": "string" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for this resource.", - "type": "string" + "type": "array" }, "warning": { - "description": "[Output Only] Informational warning message.", + "description": "Informational warning which replaces the list of resourcePolicies when the list is empty.", "properties": { "code": { "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", @@ -35260,24 +41116,88 @@ }, "type": "object" }, - "OperationList": { - "description": "Contains a list of Operation resources.", - "id": "OperationList", + "ResourcePolicy": { + "id": "ResourcePolicy", "properties": { + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "description": { + "type": "string" + }, "id": { "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", + "type": "string" + }, + "kind": { + "default": "compute#resourcePolicy", + "description": "[Output Only] Type of the resource. Always compute#resource_policies for resource policies.", + "type": "string" + }, + "name": { + "annotations": { + "required": [ + "compute.instances.insert" + ] + }, + "description": "The name of the resource, provided by the client when initially creating the resource. The resource name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "region": { + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined fully-qualified URL for this resource.", + "type": "string" + }, + "snapshotSchedulePolicy": { + "$ref": "ResourcePolicySnapshotSchedulePolicy", + "description": "Resource policy for persistent disks for creating snapshots." + }, + "status": { + "description": "[Output Only] The status of resource policy creation.", + "enum": [ + "CREATING", + "DELETING", + "INVALID", + "READY" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "ResourcePolicyAggregatedList": { + "description": "Contains a list of resourcePolicies.", + "id": "ResourcePolicyAggregatedList", + "properties": { + "etag": { + "type": "string" + }, + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "[Output Only] A list of Operation resources.", - "items": { - "$ref": "Operation" + "additionalProperties": { + "$ref": "ResourcePoliciesScopedList", + "description": "Name of the scope containing this set of resourcePolicies." }, - "type": "array" + "description": "A list of ResourcePolicy resources.", + "type": "object" }, "kind": { - "default": "compute#operationList", - "description": "[Output Only] Type of resource. Always compute#operations for Operations resource.", + "default": "compute#resourcePolicyAggregatedList", + "description": "Type of resource.", "type": "string" }, "nextPageToken": { @@ -35372,18 +41292,78 @@ }, "type": "object" }, - "OperationsScopedList": { - "id": "OperationsScopedList", + "ResourcePolicyDailyCycle": { + "description": "Time window specified for daily operations.", + "id": "ResourcePolicyDailyCycle", "properties": { - "operations": { - "description": "[Output Only] A list of operations contained in this scope.", + "daysInCycle": { + "description": "Defines a schedule that runs every nth day of the month.", + "format": "int32", + "type": "integer" + }, + "duration": { + "description": "[Output only] A predetermined duration for the window, automatically chosen to be the smallest possible in the given scenario.", + "type": "string" + }, + "startTime": { + "description": "Start time of the window. This must be in UTC format that resolves to one of 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For example, both 13:00-5 and 08:00 are valid.", + "type": "string" + } + }, + "type": "object" + }, + "ResourcePolicyHourlyCycle": { + "description": "Time window specified for hourly operations.", + "id": "ResourcePolicyHourlyCycle", + "properties": { + "duration": { + "description": "[Output only] Duration of the time window, automatically chosen to be smallest possible in the given scenario.", + "type": "string" + }, + "hoursInCycle": { + "description": "Allows to define schedule that runs every nth hour.", + "format": "int32", + "type": "integer" + }, + "startTime": { + "description": "Time within the window to start the operations. It must be in format \"HH:MM\", where HH : [00-23] and MM : [00-00] GMT.", + "type": "string" + } + }, + "type": "object" + }, + "ResourcePolicyList": { + "id": "ResourcePolicyList", + "properties": { + "etag": { + "type": "string" + }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "type": "string" + }, + "items": { + "description": "[Output Only] A list of ResourcePolicy resources.", "items": { - "$ref": "Operation" + "$ref": "ResourcePolicy" }, "type": "array" }, + "kind": { + "default": "compute#resourcePolicyList", + "description": "[Output Only] Type of resource.Always compute#resourcePoliciesList for listsof resourcePolicies", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, "warning": { - "description": "[Output Only] Informational warning which replaces the list of operations when the list is empty.", + "description": "[Output Only] Informational warning message.", "properties": { "code": { "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", @@ -35466,453 +41446,472 @@ }, "type": "object" }, - "PathMatcher": { - "description": "A matcher for the path portion of the URL. The BackendService from the longest-matched rule will serve the URL. If no rule was matched, the default service will be used.", - "id": "PathMatcher", + "ResourcePolicySnapshotSchedulePolicy": { + "description": "A snapshot schedule policy specifies when and how frequently snapshots are to be created for the target disk. Also specifies how many and how long these scheduled snapshots should be retained.", + "id": "ResourcePolicySnapshotSchedulePolicy", "properties": { - "defaultService": { - "description": "The full or partial URL to the BackendService resource. This will be used if none of the pathRules or routeRules defined by this PathMatcher are matched. For example, the following are all valid URLs to a BackendService resource: \n- https://www.googleapis.com/compute/v1/projects/project/global/backendServices/backendService \n- compute/v1/projects/project/global/backendServices/backendService \n- global/backendServices/backendService \nUse defaultService instead of defaultRouteAction when simple routing to a backend service is desired and other advanced capabilities like traffic splitting and URL rewrites are not required.\nOnly one of defaultService, defaultRouteAction or defaultUrlRedirect must be set.\nAuthorization requires one or more of the following Google IAM permissions on the specified resource default_service: \n- compute.backendBuckets.use \n- compute.backendServices.use", - "type": "string" - }, - "description": { - "description": "An optional description of this resource. Provide this property when you create the resource.", - "type": "string" - }, - "name": { - "description": "The name to which this PathMatcher is referred by the HostRule.", - "type": "string" + "retentionPolicy": { + "$ref": "ResourcePolicySnapshotSchedulePolicyRetentionPolicy", + "description": "Retention policy applied to snapshots created by this resource policy." }, - "pathRules": { - "description": "The list of path rules. Use this list instead of routeRules when routing based on simple path matching is all that's required. The order by which path rules are specified does not matter. Matches are always done on the longest-path-first basis.\nFor example: a pathRule with a path /a/b/c/* will match before /a/b/* irrespective of the order in which those paths appear in this list.\nOnly one of pathRules or routeRules must be set.", - "items": { - "$ref": "PathRule" - }, - "type": "array" - } - }, - "type": "object" - }, - "PathRule": { - "description": "A path-matching rule for a URL. If matched, will use the specified BackendService to handle the traffic arriving at this URL.", - "id": "PathRule", - "properties": { - "paths": { - "description": "The list of path patterns to match. Each must start with / and the only place a * is allowed is at the end following a /. The string fed to the path matcher does not include any text after the first ? or #, and those chars are not allowed here.", - "items": { - "type": "string" - }, - "type": "array" + "schedule": { + "$ref": "ResourcePolicySnapshotSchedulePolicySchedule", + "description": "A Vm Maintenance Policy specifies what kind of infrastructure maintenance we are allowed to perform on this VM and when. Schedule that is applied to disks covered by this policy." }, - "service": { - "description": "The URL of the backend service resource if this rule is matched.\nUse service instead of routeAction when simple routing to a backend service is desired and other advanced capabilities like traffic splitting and rewrites are not required.\nOnly one of service, routeAction or urlRedirect should must be set.", - "type": "string" + "snapshotProperties": { + "$ref": "ResourcePolicySnapshotSchedulePolicySnapshotProperties", + "description": "Properties with which snapshots are created such as labels, encryption keys." } }, "type": "object" }, - "Policy": { - "description": "Defines an Identity and Access Management (IAM) policy. It is used to specify access control policies for Cloud Platform resources.\n\n\n\nA `Policy` consists of a list of `bindings`. A `binding` binds a list of `members` to a `role`, where the members can be user accounts, Google groups, Google domains, and service accounts. A `role` is a named list of permissions defined by IAM.\n\n**JSON Example**\n\n{ \"bindings\": [ { \"role\": \"roles/owner\", \"members\": [ \"user:mike@example.com\", \"group:admins@example.com\", \"domain:google.com\", \"serviceAccount:my-other-app@appspot.gserviceaccount.com\" ] }, { \"role\": \"roles/viewer\", \"members\": [\"user:sean@example.com\"] } ] }\n\n**YAML Example**\n\nbindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-other-app@appspot.gserviceaccount.com role: roles/owner - members: - user:sean@example.com role: roles/viewer\n\n\n\nFor a description of IAM and its features, see the [IAM developer's guide](https://cloud.google.com/iam/docs).", - "id": "Policy", + "ResourcePolicySnapshotSchedulePolicyRetentionPolicy": { + "description": "Policy for retention of scheduled snapshots.", + "id": "ResourcePolicySnapshotSchedulePolicyRetentionPolicy", "properties": { - "auditConfigs": { - "description": "Specifies cloud audit logging configuration for this policy.", - "items": { - "$ref": "AuditConfig" - }, - "type": "array" - }, - "bindings": { - "description": "Associates a list of `members` to a `role`. `bindings` with no members will result in an error.", - "items": { - "$ref": "Binding" - }, - "type": "array" + "maxRetentionDays": { + "description": "Maximum age of the snapshot that is allowed to be kept.", + "format": "int32", + "type": "integer" }, - "etag": { - "description": "`etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy.\n\nIf no `etag` is provided in the call to `setIamPolicy`, then the existing policy is overwritten blindly.", - "format": "byte", + "onSourceDiskDelete": { + "description": "Specifies the behavior to apply to scheduled snapshots when the source disk is deleted.", + "enum": [ + "APPLY_RETENTION_POLICY", + "KEEP_AUTO_SNAPSHOTS", + "UNSPECIFIED_ON_SOURCE_DISK_DELETE" + ], + "enumDescriptions": [ + "", + "", + "" + ], "type": "string" + } + }, + "type": "object" + }, + "ResourcePolicySnapshotSchedulePolicySchedule": { + "description": "A schedule for disks where the schedueled operations are performed.", + "id": "ResourcePolicySnapshotSchedulePolicySchedule", + "properties": { + "dailySchedule": { + "$ref": "ResourcePolicyDailyCycle" }, - "iamOwned": { - "description": "", + "hourlySchedule": { + "$ref": "ResourcePolicyHourlyCycle" + }, + "weeklySchedule": { + "$ref": "ResourcePolicyWeeklyCycle" + } + }, + "type": "object" + }, + "ResourcePolicySnapshotSchedulePolicySnapshotProperties": { + "description": "Specified snapshot properties for scheduled snapshots created by this policy.", + "id": "ResourcePolicySnapshotSchedulePolicySnapshotProperties", + "properties": { + "guestFlush": { + "description": "Indication to perform a ?guest aware? snapshot.", "type": "boolean" }, - "rules": { - "description": "If more than one rule is specified, the rules are applied in the following manner: - All matching LOG rules are always applied. - If any DENY/DENY_WITH_LOG rule matches, permission is denied. Logging will be applied if one or more matching rule requires logging. - Otherwise, if any ALLOW/ALLOW_WITH_LOG rule matches, permission is granted. Logging will be applied if one or more matching rule requires logging. - Otherwise, if no rule applies, permission is denied.", + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Labels to apply to scheduled snapshots. These can be later modified by the setLabels method. Label values may be empty.", + "type": "object" + }, + "storageLocations": { + "description": "GCS bucket storage location of the auto snapshot (regional or multi-regional).", "items": { - "$ref": "Rule" + "type": "string" }, "type": "array" - }, - "version": { - "description": "Deprecated.", - "format": "int32", - "type": "integer" } }, "type": "object" }, - "PreconfiguredWafSet": { - "id": "PreconfiguredWafSet", + "ResourcePolicyWeeklyCycle": { + "description": "Time window specified for weekly operations.", + "id": "ResourcePolicyWeeklyCycle", "properties": { - "expressionSets": { - "description": "List of entities that are currently supported for WAF rules.", + "dayOfWeeks": { + "description": "Up to 7 intervals/windows, one for each day of the week.", "items": { - "$ref": "WafExpressionSet" + "$ref": "ResourcePolicyWeeklyCycleDayOfWeek" }, "type": "array" } }, "type": "object" }, - "Project": { - "description": "A Project resource. For an overview of projects, see Cloud Platform Resource Hierarchy. (== resource_for v1.projects ==) (== resource_for beta.projects ==)", - "id": "Project", + "ResourcePolicyWeeklyCycleDayOfWeek": { + "id": "ResourcePolicyWeeklyCycleDayOfWeek", "properties": { - "commonInstanceMetadata": { - "$ref": "Metadata", - "description": "Metadata key/value pairs available to all instances contained in this project. See Custom metadata for more information." - }, - "creationTimestamp": { - "description": "[Output Only] Creation timestamp in RFC3339 text format.", - "type": "string" - }, - "defaultNetworkTier": { - "description": "This signifies the default network tier used for configuring resources of the project and can only take the following values: PREMIUM, STANDARD. Initially the default network tier is PREMIUM.", + "day": { + "description": "Allows to define schedule that runs specified day of the week.", "enum": [ - "PREMIUM", - "STANDARD" + "FRIDAY", + "INVALID", + "MONDAY", + "SATURDAY", + "SUNDAY", + "THURSDAY", + "TUESDAY", + "WEDNESDAY" ], "enumDescriptions": [ + "", + "", + "", + "", + "", + "", "", "" ], "type": "string" }, - "defaultServiceAccount": { - "description": "[Output Only] Default service account used by VMs running in this project.", + "duration": { + "description": "[Output only] Duration of the time window, automatically chosen to be smallest possible in the given scenario.", + "type": "string" + }, + "startTime": { + "description": "Time within the window to start the operations. It must be in format \"HH:MM\", where HH : [00-23] and MM : [00-00] GMT.", + "type": "string" + } + }, + "type": "object" + }, + "Route": { + "description": "Represents a Route resource. A route specifies how certain packets should be handled by the network. Routes are associated with instances by tags and the set of routes for a particular instance is called its routing table.\n\nFor each packet leaving an instance, the system searches that instance's routing table for a single best matching route. Routes match packets by destination IP address, preferring smaller or more specific ranges over larger ones. If there is a tie, the system selects the route with the smallest priority value. If there is still a tie, it uses the layer three and four packet headers to select just one of the remaining matching routes. The packet is then forwarded as specified by the nextHop field of the winning route - either to another instance destination, an instance gateway, or a Google Compute Engine-operated gateway.\n\nPackets that do not match any route in the sending instance's routing table are dropped. (== resource_for beta.routes ==) (== resource_for v1.routes ==)", + "id": "Route", + "properties": { + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" }, "description": { - "description": "An optional textual description of the resource.", + "description": "An optional description of this resource. Provide this property when you create the resource.", "type": "string" }, - "enabledFeatures": { - "description": "Restricted features enabled for use on this project.", - "items": { - "type": "string" + "destRange": { + "annotations": { + "required": [ + "compute.routes.insert" + ] }, - "type": "array" + "description": "The destination range of outgoing packets that this route applies to. Only IPv4 is supported.", + "type": "string" }, "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server. This is not the project ID, and is just a unique ID used by Compute Engine to identify resources.", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", "format": "uint64", "type": "string" }, "kind": { - "default": "compute#project", - "description": "[Output Only] Type of the resource. Always compute#project for projects.", + "default": "compute#route", + "description": "[Output Only] Type of this resource. Always compute#routes for Route resources.", "type": "string" }, "name": { - "description": "The project ID. For example: my-example-project. Use the project ID to make requests to Compute Engine.", + "annotations": { + "required": [ + "compute.routes.insert" + ] + }, + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, - "quotas": { - "description": "[Output Only] Quotas assigned to this project.", - "items": { - "$ref": "Quota" + "network": { + "annotations": { + "required": [ + "compute.routes.insert" + ] }, - "type": "array" + "description": "Fully-qualified URL of the network that this route applies to.", + "type": "string" }, - "selfLink": { - "description": "[Output Only] Server-defined URL for the resource.", + "nextHopGateway": { + "description": "The URL to a gateway that should handle matching packets. You can only specify the internet gateway using a full or partial valid URL: projects/\u003cproject-id\u003e/global/gateways/default-internet-gateway", "type": "string" }, - "usageExportLocation": { - "$ref": "UsageExportLocation", - "description": "The naming prefix for daily usage reports and the Google Cloud Storage bucket where they are stored." + "nextHopIlb": { + "description": "The URL to a forwarding rule of type loadBalancingScheme=INTERNAL that should handle matching packets. You can only specify the forwarding rule as a partial or full URL. For example, the following are all valid URLs: \n- https://www.googleapis.com/compute/v1/projects/project/regions/region/forwardingRules/forwardingRule \n- regions/region/forwardingRules/forwardingRule Note that this can only be used when the destination_range is a public (non-RFC 1918) IP CIDR range.", + "type": "string" }, - "xpnProjectStatus": { - "description": "[Output Only] The role this project has in a shared VPC configuration. Currently only HOST projects are differentiated.", - "enum": [ - "HOST", - "UNSPECIFIED_XPN_PROJECT_STATUS" - ], - "enumDescriptions": [ - "", - "" - ], + "nextHopInstance": { + "description": "The URL to an instance that should handle matching packets. You can specify this as a full or partial URL. For example:\nhttps://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/", "type": "string" - } - }, - "type": "object" - }, - "ProjectsDisableXpnResourceRequest": { - "id": "ProjectsDisableXpnResourceRequest", - "properties": { - "xpnResource": { - "$ref": "XpnResourceId", - "description": "Service resource (a.k.a service project) ID." - } - }, - "type": "object" - }, - "ProjectsEnableXpnResourceRequest": { - "id": "ProjectsEnableXpnResourceRequest", - "properties": { - "xpnResource": { - "$ref": "XpnResourceId", - "description": "Service resource (a.k.a service project) ID." - } - }, - "type": "object" - }, - "ProjectsGetXpnResources": { - "id": "ProjectsGetXpnResources", - "properties": { - "kind": { - "default": "compute#projectsGetXpnResources", - "description": "[Output Only] Type of resource. Always compute#projectsGetXpnResources for lists of service resources (a.k.a service projects)", + }, + "nextHopInterconnectAttachment": { + "description": "[Output Only] The URL to an InterconnectAttachment which is the next hop for the route. This field will only be populated for the dynamic routes generated by Cloud Router with a linked interconnectAttachment.", "type": "string" }, - "nextPageToken": { - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "nextHopIp": { + "description": "The network IP address of an instance that should handle matching packets. Only IPv4 is supported.", "type": "string" }, - "resources": { - "description": "Service resources (a.k.a service projects) attached to this project as their shared VPC host.", - "items": { - "$ref": "XpnResourceId" - }, - "type": "array" - } - }, - "type": "object" - }, - "ProjectsListXpnHostsRequest": { - "id": "ProjectsListXpnHostsRequest", - "properties": { - "organization": { - "description": "Optional organization ID managed by Cloud Resource Manager, for which to list shared VPC host projects. If not specified, the organization will be inferred from the project.", + "nextHopNetwork": { + "description": "The URL of the local network if it should handle matching packets.", "type": "string" - } - }, - "type": "object" - }, - "ProjectsSetDefaultNetworkTierRequest": { - "id": "ProjectsSetDefaultNetworkTierRequest", - "properties": { - "networkTier": { - "description": "Default network tier to be set.", - "enum": [ - "PREMIUM", - "STANDARD" - ], - "enumDescriptions": [ - "", - "" - ], + }, + "nextHopPeering": { + "description": "[Output Only] The network peering name that should handle matching packets, which should conform to RFC1035.", "type": "string" - } - }, - "type": "object" - }, - "Quota": { - "description": "A quotas entry.", - "id": "Quota", - "properties": { - "limit": { - "description": "[Output Only] Quota limit for this metric.", - "format": "double", - "type": "number" }, - "metric": { - "description": "[Output Only] Name of the quota metric.", - "enum": [ - "AUTOSCALERS", - "BACKEND_BUCKETS", - "BACKEND_SERVICES", - "COMMITMENTS", - "CPUS", - "CPUS_ALL_REGIONS", - "DISKS_TOTAL_GB", - "FIREWALLS", - "FORWARDING_RULES", - "GLOBAL_INTERNAL_ADDRESSES", - "GPUS_ALL_REGIONS", - "HEALTH_CHECKS", - "IMAGES", - "INSTANCES", - "INSTANCE_GROUPS", - "INSTANCE_GROUP_MANAGERS", - "INSTANCE_TEMPLATES", - "INTERCONNECTS", - "INTERCONNECT_ATTACHMENTS_PER_REGION", - "INTERCONNECT_ATTACHMENTS_TOTAL_MBPS", - "INTERNAL_ADDRESSES", - "IN_USE_ADDRESSES", - "IN_USE_BACKUP_SCHEDULES", - "LOCAL_SSD_TOTAL_GB", - "NETWORKS", - "NETWORK_ENDPOINT_GROUPS", - "NVIDIA_K80_GPUS", - "NVIDIA_P100_GPUS", - "NVIDIA_P100_VWS_GPUS", - "NVIDIA_P4_GPUS", - "NVIDIA_P4_VWS_GPUS", - "NVIDIA_T4_GPUS", - "NVIDIA_T4_VWS_GPUS", - "NVIDIA_V100_GPUS", - "PREEMPTIBLE_CPUS", - "PREEMPTIBLE_LOCAL_SSD_GB", - "PREEMPTIBLE_NVIDIA_K80_GPUS", - "PREEMPTIBLE_NVIDIA_P100_GPUS", - "PREEMPTIBLE_NVIDIA_P100_VWS_GPUS", - "PREEMPTIBLE_NVIDIA_P4_GPUS", - "PREEMPTIBLE_NVIDIA_P4_VWS_GPUS", - "PREEMPTIBLE_NVIDIA_T4_GPUS", - "PREEMPTIBLE_NVIDIA_T4_VWS_GPUS", - "PREEMPTIBLE_NVIDIA_V100_GPUS", - "REGIONAL_AUTOSCALERS", - "REGIONAL_INSTANCE_GROUP_MANAGERS", - "RESOURCE_POLICIES", - "ROUTERS", - "ROUTES", - "SECURITY_POLICIES", - "SECURITY_POLICY_RULES", - "SNAPSHOTS", - "SSD_TOTAL_GB", - "SSL_CERTIFICATES", - "STATIC_ADDRESSES", - "SUBNETWORKS", - "TARGET_HTTPS_PROXIES", - "TARGET_HTTP_PROXIES", - "TARGET_INSTANCES", - "TARGET_POOLS", - "TARGET_SSL_PROXIES", - "TARGET_TCP_PROXIES", - "TARGET_VPN_GATEWAYS", - "URL_MAPS", - "VPN_GATEWAYS", - "VPN_TUNNELS" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ], + "nextHopVpnTunnel": { + "description": "The URL to a VpnTunnel that should handle matching packets.", "type": "string" }, - "usage": { - "description": "[Output Only] Current usage of this metric.", - "format": "double", - "type": "number" + "priority": { + "annotations": { + "required": [ + "compute.routes.insert" + ] + }, + "description": "The priority of this route. Priority is used to break ties in cases where there is more than one matching route of equal prefix length. In the case of two routes with equal prefix length, the one with the lowest-numbered priority value wins. Default value is 1000. Valid range is 0 through 65535.", + "format": "uint32", + "type": "integer" + }, + "selfLink": { + "description": "[Output Only] Server-defined fully-qualified URL for this resource.", + "type": "string" + }, + "tags": { + "annotations": { + "required": [ + "compute.routes.insert" + ] + }, + "description": "A list of instance tags to which this route applies.", + "items": { + "type": "string" + }, + "type": "array" + }, + "warnings": { + "description": "[Output Only] If potential misconfigurations are detected for this route, this field will be populated with warning messages.", + "items": { + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" } }, "type": "object" }, - "Reference": { - "description": "Represents a reference to a resource.", - "id": "Reference", + "RouteList": { + "description": "Contains a list of Route resources.", + "id": "RouteList", "properties": { - "kind": { - "default": "compute#reference", - "description": "[Output Only] Type of the resource. Always compute#reference for references.", + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, - "referenceType": { - "description": "A description of the reference type with no implied semantics. Possible values include: \n- MEMBER_OF", + "items": { + "description": "A list of Route resources.", + "items": { + "$ref": "Route" + }, + "type": "array" + }, + "kind": { + "default": "compute#routeList", + "description": "Type of resource.", "type": "string" }, - "referrer": { - "description": "URL of the resource which refers to the target.", + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", "type": "string" }, - "target": { - "description": "URL of the resource to which this reference points.", + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" } }, "type": "object" }, - "Region": { - "description": "Region resource. (== resource_for beta.regions ==) (== resource_for v1.regions ==)", - "id": "Region", + "Router": { + "description": "Router resource.", + "id": "Router", "properties": { + "bgp": { + "$ref": "RouterBgp", + "description": "BGP information specific to this router." + }, + "bgpPeers": { + "description": "BGP information that must be configured into the routing stack to establish BGP peering. This information must specify the peer ASN and either the interface name, IP address, or peer IP address. Please refer to RFC4273.", + "items": { + "$ref": "RouterBgpPeer" + }, + "type": "array" + }, "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" }, - "deprecated": { - "$ref": "DeprecationStatus", - "description": "[Output Only] The deprecation status associated with this region." - }, "description": { - "description": "[Output Only] Textual description of the resource.", + "description": "An optional description of this resource. Provide this property when you create the resource.", "type": "string" }, "id": { @@ -35920,65 +41919,88 @@ "format": "uint64", "type": "string" }, + "interfaces": { + "description": "Router interfaces. Each interface requires either one linked resource, (for example, linkedVpnTunnel), or IP address and IP address range (for example, ipRange), or both.", + "items": { + "$ref": "RouterInterface" + }, + "type": "array" + }, "kind": { - "default": "compute#region", - "description": "[Output Only] Type of the resource. Always compute#region for regions.", + "default": "compute#router", + "description": "[Output Only] Type of resource. Always compute#router for routers.", "type": "string" }, "name": { - "description": "[Output Only] Name of the resource.", + "annotations": { + "required": [ + "compute.routers.insert" + ] + }, + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, - "quotas": { - "description": "[Output Only] Quotas assigned to this region.", + "nats": { + "description": "A list of NAT services created in this router.", "items": { - "$ref": "Quota" + "$ref": "RouterNat" }, "type": "array" }, + "network": { + "annotations": { + "required": [ + "compute.routers.insert" + ] + }, + "description": "URI of the network to which this router belongs.", + "type": "string" + }, + "region": { + "description": "[Output Only] URI of the region where the router resides. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", + "type": "string" + }, "selfLink": { "description": "[Output Only] Server-defined URL for the resource.", "type": "string" - }, - "status": { - "description": "[Output Only] Status of the region, either UP or DOWN.", - "enum": [ - "DOWN", - "UP" - ], - "enumDescriptions": [ - "", - "" - ], + } + }, + "type": "object" + }, + "RouterAdvertisedIpRange": { + "description": "Description-tagged IP ranges for the router to advertise.", + "id": "RouterAdvertisedIpRange", + "properties": { + "description": { + "description": "User-specified description for the IP range.", "type": "string" }, - "zones": { - "description": "[Output Only] A list of zones available in this region, in the form of resource URLs.", - "items": { - "type": "string" - }, - "type": "array" + "range": { + "description": "The IP range to advertise. The value must be a CIDR-formatted string.", + "type": "string" } }, "type": "object" }, - "RegionAutoscalerList": { - "description": "Contains a list of autoscalers.", - "id": "RegionAutoscalerList", + "RouterAggregatedList": { + "description": "Contains a list of routers.", + "id": "RouterAggregatedList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of Autoscaler resources.", - "items": { - "$ref": "Autoscaler" + "additionalProperties": { + "$ref": "RoutersScopedList", + "description": "Name of the scope containing this set of routers." }, - "type": "array" + "description": "A list of Router resources.", + "type": "object" }, "kind": { - "default": "compute#regionAutoscalerList", + "default": "compute#routerAggregatedList", "description": "Type of resource.", "type": "string" }, @@ -36074,23 +42096,179 @@ }, "type": "object" }, - "RegionDiskTypeList": { - "id": "RegionDiskTypeList", + "RouterBgp": { + "id": "RouterBgp", + "properties": { + "advertiseMode": { + "description": "User-specified flag to indicate which mode to use for advertisement. The options are DEFAULT or CUSTOM.", + "enum": [ + "CUSTOM", + "DEFAULT" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "advertisedGroups": { + "description": "User-specified list of prefix groups to advertise in custom mode. This field can only be populated if advertise_mode is CUSTOM and is advertised to all peers of the router. These groups will be advertised in addition to any specified prefixes. Leave this field blank to advertise no custom groups.", + "items": { + "enum": [ + "ALL_SUBNETS" + ], + "enumDescriptions": [ + "" + ], + "type": "string" + }, + "type": "array" + }, + "advertisedIpRanges": { + "description": "User-specified list of individual IP ranges to advertise in custom mode. This field can only be populated if advertise_mode is CUSTOM and is advertised to all peers of the router. These IP ranges will be advertised in addition to any specified groups. Leave this field blank to advertise no custom IP ranges.", + "items": { + "$ref": "RouterAdvertisedIpRange" + }, + "type": "array" + }, + "asn": { + "description": "Local BGP Autonomous System Number (ASN). Must be an RFC6996 private ASN, either 16-bit or 32-bit. The value will be fixed for this router resource. All VPN tunnels that link to this router will have the same local ASN.", + "format": "uint32", + "type": "integer" + } + }, + "type": "object" + }, + "RouterBgpPeer": { + "id": "RouterBgpPeer", + "properties": { + "advertiseMode": { + "description": "User-specified flag to indicate which mode to use for advertisement.", + "enum": [ + "CUSTOM", + "DEFAULT" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "advertisedGroups": { + "description": "User-specified list of prefix groups to advertise in custom mode, which can take one of the following options: \n- ALL_SUBNETS: Advertises all available subnets, including peer VPC subnets. \n- ALL_VPC_SUBNETS: Advertises the router's own VPC subnets. \n- ALL_PEER_VPC_SUBNETS: Advertises peer subnets of the router's VPC network. Note that this field can only be populated if advertise_mode is CUSTOM and overrides the list defined for the router (in the \"bgp\" message). These groups are advertised in addition to any specified prefixes. Leave this field blank to advertise no custom groups.", + "items": { + "enum": [ + "ALL_SUBNETS" + ], + "enumDescriptions": [ + "" + ], + "type": "string" + }, + "type": "array" + }, + "advertisedIpRanges": { + "description": "User-specified list of individual IP ranges to advertise in custom mode. This field can only be populated if advertise_mode is CUSTOM and overrides the list defined for the router (in the \"bgp\" message). These IP ranges are advertised in addition to any specified groups. Leave this field blank to advertise no custom IP ranges.", + "items": { + "$ref": "RouterAdvertisedIpRange" + }, + "type": "array" + }, + "advertisedRoutePriority": { + "description": "The priority of routes advertised to this BGP peer. Where there is more than one matching route of maximum length, the routes with the lowest priority value win.", + "format": "uint32", + "type": "integer" + }, + "interfaceName": { + "description": "Name of the interface the BGP peer is associated with.", + "type": "string" + }, + "ipAddress": { + "description": "IP address of the interface inside Google Cloud Platform. Only IPv4 is supported.", + "type": "string" + }, + "managementType": { + "description": "[Output Only] The resource that configures and manages this BGP peer. \n- MANAGED_BY_USER is the default value and can be managed by you or other users \n- MANAGED_BY_ATTACHMENT is a BGP peer that is configured and managed by Cloud Interconnect, specifically by an InterconnectAttachment of type PARTNER. Google automatically creates, updates, and deletes this type of BGP peer when the PARTNER InterconnectAttachment is created, updated, or deleted.", + "enum": [ + "MANAGED_BY_ATTACHMENT", + "MANAGED_BY_USER" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "name": { + "description": "Name of this BGP peer. The name must be 1-63 characters long and comply with RFC1035.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "peerAsn": { + "description": "Peer BGP Autonomous System Number (ASN). Each BGP interface may use a different value.", + "format": "uint32", + "type": "integer" + }, + "peerIpAddress": { + "description": "IP address of the BGP interface outside Google Cloud Platform. Only IPv4 is supported.", + "type": "string" + } + }, + "type": "object" + }, + "RouterInterface": { + "id": "RouterInterface", + "properties": { + "ipRange": { + "description": "IP address and range of the interface. The IP range must be in the RFC3927 link-local IP address space. The value must be a CIDR-formatted string, for example: 169.254.0.1/30. NOTE: Do not truncate the address as it represents the IP address of the interface.", + "type": "string" + }, + "linkedInterconnectAttachment": { + "description": "URI of the linked Interconnect attachment. It must be in the same region as the router. Each interface can have one linked resource, which can be either be a VPN tunnel or an Interconnect attachment.", + "type": "string" + }, + "linkedVpnTunnel": { + "description": "URI of the linked VPN tunnel, which must be in the same region as the router. Each interface can have one linked resource, which can be either a VPN tunnel or an Interconnect attachment.", + "type": "string" + }, + "managementType": { + "description": "[Output Only] The resource that configures and manages this interface. \n- MANAGED_BY_USER is the default value and can be managed directly by users. \n- MANAGED_BY_ATTACHMENT is an interface that is configured and managed by Cloud Interconnect, specifically, by an InterconnectAttachment of type PARTNER. Google automatically creates, updates, and deletes this type of interface when the PARTNER InterconnectAttachment is created, updated, or deleted.", + "enum": [ + "MANAGED_BY_ATTACHMENT", + "MANAGED_BY_USER" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "name": { + "description": "Name of this interface entry. The name must be 1-63 characters long and comply with RFC1035.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + } + }, + "type": "object" + }, + "RouterList": { + "description": "Contains a list of Router resources.", + "id": "RouterList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of DiskType resources.", + "description": "A list of Router resources.", "items": { - "$ref": "DiskType" + "$ref": "Router" }, "type": "array" }, "kind": { - "default": "compute#regionDiskTypeList", - "description": "[Output Only] Type of resource. Always compute#regionDiskTypeList for region disk types.", + "default": "compute#routerList", + "description": "[Output Only] Type of resource. Always compute#router for routers.", "type": "string" }, "nextPageToken": { @@ -36180,29 +42358,282 @@ "type": "string" } }, - "type": "object" + "type": "object" + } + }, + "type": "object" + }, + "RouterNat": { + "description": "Represents a Nat resource. It enables the VMs within the specified subnetworks to access Internet without external IP addresses. It specifies a list of subnetworks (and the ranges within) that want to use NAT. Customers can also provide the external IPs that would be used for NAT. GCP would auto-allocate ephemeral IPs if no external IPs are provided.", + "id": "RouterNat", + "properties": { + "icmpIdleTimeoutSec": { + "description": "Timeout (in seconds) for ICMP connections. Defaults to 30s if not set.", + "format": "int32", + "type": "integer" + }, + "logConfig": { + "$ref": "RouterNatLogConfig", + "description": "Configure logging on this NAT." + }, + "minPortsPerVm": { + "description": "Minimum number of ports allocated to a VM from this NAT config. If not set, a default number of ports is allocated to a VM. This is rounded up to the nearest power of 2. For example, if the value of this field is 50, at least 64 ports are allocated to a VM.", + "format": "int32", + "type": "integer" + }, + "name": { + "description": "Unique name of this Nat service. The name must be 1-63 characters long and comply with RFC1035.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "natIpAllocateOption": { + "description": "Specify the NatIpAllocateOption, which can take one of the following values: \n- MANUAL_ONLY: Uses only Nat IP addresses provided by customers. When there are not enough specified Nat IPs, the Nat service fails for new VMs. \n- AUTO_ONLY: Nat IPs are allocated by Google Cloud Platform; customers can't specify any Nat IPs. When choosing AUTO_ONLY, then nat_ip should be empty.", + "enum": [ + "AUTO_ONLY", + "MANUAL_ONLY" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "natIps": { + "description": "A list of URLs of the IP resources used for this Nat service. These IP addresses must be valid static external IP addresses assigned to the project.", + "items": { + "type": "string" + }, + "type": "array" + }, + "sourceSubnetworkIpRangesToNat": { + "description": "Specify the Nat option, which can take one of the following values: \n- ALL_SUBNETWORKS_ALL_IP_RANGES: All of the IP ranges in every Subnetwork are allowed to Nat. \n- ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES: All of the primary IP ranges in every Subnetwork are allowed to Nat. \n- LIST_OF_SUBNETWORKS: A list of Subnetworks are allowed to Nat (specified in the field subnetwork below) The default is SUBNETWORK_IP_RANGE_TO_NAT_OPTION_UNSPECIFIED. Note that if this field contains ALL_SUBNETWORKS_ALL_IP_RANGES or ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES, then there should not be any other Router.Nat section in any Router for this network in this region.", + "enum": [ + "ALL_SUBNETWORKS_ALL_IP_RANGES", + "ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES", + "LIST_OF_SUBNETWORKS" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + }, + "subnetworks": { + "description": "A list of Subnetwork resources whose traffic should be translated by NAT Gateway. It is used only when LIST_OF_SUBNETWORKS is selected for the SubnetworkIpRangeToNatOption above.", + "items": { + "$ref": "RouterNatSubnetworkToNat" + }, + "type": "array" + }, + "tcpEstablishedIdleTimeoutSec": { + "description": "Timeout (in seconds) for TCP established connections. Defaults to 1200s if not set.", + "format": "int32", + "type": "integer" + }, + "tcpTransitoryIdleTimeoutSec": { + "description": "Timeout (in seconds) for TCP transitory connections. Defaults to 30s if not set.", + "format": "int32", + "type": "integer" + }, + "udpIdleTimeoutSec": { + "description": "Timeout (in seconds) for UDP connections. Defaults to 30s if not set.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "RouterNatLogConfig": { + "description": "Configuration of logging on a NAT.", + "id": "RouterNatLogConfig", + "properties": { + "enable": { + "description": "Indicates whether or not to export logs. This is false by default.", + "type": "boolean" + }, + "filter": { + "description": "Specifies the desired filtering of logs on this NAT. If unspecified, logs are exported for all connections handled by this NAT.", + "enum": [ + "ALL", + "ERRORS_ONLY", + "TRANSLATIONS_ONLY" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "RouterNatSubnetworkToNat": { + "description": "Defines the IP ranges that want to use NAT for a subnetwork.", + "id": "RouterNatSubnetworkToNat", + "properties": { + "name": { + "description": "URL for the subnetwork resource that will use NAT.", + "type": "string" + }, + "secondaryIpRangeNames": { + "description": "A list of the secondary ranges of the Subnetwork that are allowed to use NAT. This can be populated only if \"LIST_OF_SECONDARY_IP_RANGES\" is one of the values in source_ip_ranges_to_nat.", + "items": { + "type": "string" + }, + "type": "array" + }, + "sourceIpRangesToNat": { + "description": "Specify the options for NAT ranges in the Subnetwork. All options of a single value are valid except NAT_IP_RANGE_OPTION_UNSPECIFIED. The only valid option with multiple values is: [\"PRIMARY_IP_RANGE\", \"LIST_OF_SECONDARY_IP_RANGES\"] Default: [ALL_IP_RANGES]", + "items": { + "enum": [ + "ALL_IP_RANGES", + "LIST_OF_SECONDARY_IP_RANGES", + "PRIMARY_IP_RANGE" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "RouterStatus": { + "id": "RouterStatus", + "properties": { + "bestRoutes": { + "description": "Best routes for this router's network.", + "items": { + "$ref": "Route" + }, + "type": "array" + }, + "bestRoutesForRouter": { + "description": "Best routes learned by this router.", + "items": { + "$ref": "Route" + }, + "type": "array" + }, + "bgpPeerStatus": { + "items": { + "$ref": "RouterStatusBgpPeerStatus" + }, + "type": "array" + }, + "natStatus": { + "items": { + "$ref": "RouterStatusNatStatus" + }, + "type": "array" + }, + "network": { + "description": "URI of the network to which this router belongs.", + "type": "string" } }, "type": "object" }, - "RegionDisksAddResourcePoliciesRequest": { - "id": "RegionDisksAddResourcePoliciesRequest", + "RouterStatusBgpPeerStatus": { + "id": "RouterStatusBgpPeerStatus", "properties": { - "resourcePolicies": { - "description": "Resource policies to be added to this disk.", + "advertisedRoutes": { + "description": "Routes that were advertised to the remote BGP peer", "items": { - "type": "string" + "$ref": "Route" }, "type": "array" + }, + "ipAddress": { + "description": "IP address of the local BGP interface.", + "type": "string" + }, + "linkedVpnTunnel": { + "description": "URL of the VPN tunnel that this BGP peer controls.", + "type": "string" + }, + "name": { + "description": "Name of this BGP peer. Unique within the Routers resource.", + "type": "string" + }, + "numLearnedRoutes": { + "description": "Number of routes learned from the remote BGP Peer.", + "format": "uint32", + "type": "integer" + }, + "peerIpAddress": { + "description": "IP address of the remote BGP interface.", + "type": "string" + }, + "state": { + "description": "BGP state as specified in RFC1771.", + "type": "string" + }, + "status": { + "description": "Status of the BGP peer: {UP, DOWN}", + "enum": [ + "DOWN", + "UNKNOWN", + "UP" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + }, + "uptime": { + "description": "Time this session has been up. Format: 14 years, 51 weeks, 6 days, 23 hours, 59 minutes, 59 seconds", + "type": "string" + }, + "uptimeSeconds": { + "description": "Time this session has been up, in seconds. Format: 145", + "type": "string" } }, "type": "object" }, - "RegionDisksRemoveResourcePoliciesRequest": { - "id": "RegionDisksRemoveResourcePoliciesRequest", + "RouterStatusNatStatus": { + "description": "Status of a NAT contained in this router. Next tag: 9", + "id": "RouterStatusNatStatus", "properties": { - "resourcePolicies": { - "description": "Resource policies to be removed from this disk.", + "autoAllocatedNatIps": { + "description": "A list of IPs auto-allocated for NAT. Example: [\"1.1.1.1\", \"129.2.16.89\"]", + "items": { + "type": "string" + }, + "type": "array" + }, + "minExtraNatIpsNeeded": { + "description": "The number of extra IPs to allocate. This will be greater than 0 only if user-specified IPs are NOT enough to allow all configured VMs to use NAT. This value is meaningful only when auto-allocation of NAT IPs is *not* used.", + "format": "int32", + "type": "integer" + }, + "name": { + "description": "Unique name of this NAT.", + "type": "string" + }, + "numVmEndpointsWithNatMappings": { + "description": "Number of VM endpoints (i.e., Nics) that can use NAT.", + "format": "int32", + "type": "integer" + }, + "userAllocatedNatIpResources": { + "description": "A list of fully qualified URLs of reserved IP address resources.", + "items": { + "type": "string" + }, + "type": "array" + }, + "userAllocatedNatIps": { + "description": "A list of IPs user-allocated for NAT. They will be raw IP strings like \"179.12.26.133\".", "items": { "type": "string" }, @@ -36211,47 +42642,42 @@ }, "type": "object" }, - "RegionDisksResizeRequest": { - "id": "RegionDisksResizeRequest", + "RouterStatusResponse": { + "id": "RouterStatusResponse", "properties": { - "sizeGb": { - "description": "The new size of the regional persistent disk, which is specified in GB.", - "format": "int64", + "kind": { + "default": "compute#routerStatusResponse", + "description": "Type of resource.", "type": "string" + }, + "result": { + "$ref": "RouterStatus" } }, "type": "object" }, - "RegionInstanceGroupList": { - "description": "Contains a list of InstanceGroup resources.", - "id": "RegionInstanceGroupList", + "RoutersPreviewResponse": { + "id": "RoutersPreviewResponse", "properties": { - "id": { - "description": "[Output Only] Unique identifier for the resource; defined by the server.", - "type": "string" - }, - "items": { - "description": "A list of InstanceGroup resources.", + "resource": { + "$ref": "Router", + "description": "Preview of given router." + } + }, + "type": "object" + }, + "RoutersScopedList": { + "id": "RoutersScopedList", + "properties": { + "routers": { + "description": "A list of routers contained in this scope.", "items": { - "$ref": "InstanceGroup" + "$ref": "Router" }, "type": "array" }, - "kind": { - "default": "compute#regionInstanceGroupList", - "description": "The resource type.", - "type": "string" - }, - "nextPageToken": { - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", - "type": "string" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for this resource.", - "type": "string" - }, "warning": { - "description": "[Output Only] Informational warning message.", + "description": "Informational warning which replaces the list of routers when the list is empty.", "properties": { "code": { "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", @@ -36334,123 +42760,64 @@ }, "type": "object" }, - "RegionInstanceGroupManagerList": { - "description": "Contains a list of managed instance groups.", - "id": "RegionInstanceGroupManagerList", + "Rule": { + "description": "A rule to be applied in a Policy.", + "id": "Rule", "properties": { - "id": { - "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "action": { + "description": "Required", + "enum": [ + "ALLOW", + "ALLOW_WITH_LOG", + "DENY", + "DENY_WITH_LOG", + "LOG", + "NO_ACTION" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "" + ], "type": "string" }, - "items": { - "description": "A list of InstanceGroupManager resources.", + "conditions": { + "description": "Additional restrictions that must be met. All conditions must pass for the rule to match.", "items": { - "$ref": "InstanceGroupManager" + "$ref": "Condition" }, "type": "array" }, - "kind": { - "default": "compute#regionInstanceGroupManagerList", - "description": "[Output Only] The resource type, which is always compute#instanceGroupManagerList for a list of managed instance groups that exist in th regional scope.", + "description": { + "description": "Human-readable description of the rule.", "type": "string" }, - "nextPageToken": { - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", - "type": "string" + "ins": { + "description": "If one or more 'in' clauses are specified, the rule matches if the PRINCIPAL/AUTHORITY_SELECTOR is in at least one of these entries.", + "items": { + "type": "string" + }, + "type": "array" }, - "selfLink": { - "description": "[Output Only] Server-defined URL for this resource.", - "type": "string" + "logConfigs": { + "description": "The config returned to callers of tech.iam.IAM.CheckPolicy for any entries that match the LOG action.", + "items": { + "$ref": "LogConfig" + }, + "type": "array" }, - "warning": { - "description": "[Output Only] Informational warning message.", - "properties": { - "code": { - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DEPRECATED_TYPE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "EXPERIMENTAL_TYPE_USED", - "EXTERNAL_API_WARNING", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "MISSING_TYPE_DEPENDENCY", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SCHEMA_VALIDATION_IGNORED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNDECLARED_PROPERTIES", - "UNREACHABLE" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "data": { - "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", - "items": { - "properties": { - "key": { - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", - "type": "string" - }, - "value": { - "description": "[Output Only] A warning data value corresponding to the key.", - "type": "string" - } - }, - "type": "object" - }, - "type": "array" - }, - "message": { - "description": "[Output Only] A human-readable description of the warning code.", - "type": "string" - } + "notIns": { + "description": "If one or more 'not_in' clauses are specified, the rule matches if the PRINCIPAL/AUTHORITY_SELECTOR is in none of the entries.", + "items": { + "type": "string" }, - "type": "object" - } - }, - "type": "object" - }, - "RegionInstanceGroupManagersAbandonInstancesRequest": { - "id": "RegionInstanceGroupManagersAbandonInstancesRequest", - "properties": { - "instances": { - "description": "The URLs of one or more instances to abandon. This can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME].", + "type": "array" + }, + "permissions": { + "description": "A permission is a string of form '..' (e.g., 'storage.buckets.list'). A value of '*' matches all permissions, and a verb part of '*' (e.g., 'storage.buckets.*') matches all verbs.", "items": { "type": "string" }, @@ -36459,41 +42826,113 @@ }, "type": "object" }, - "RegionInstanceGroupManagersDeleteInstancesRequest": { - "id": "RegionInstanceGroupManagersDeleteInstancesRequest", + "SSLHealthCheck": { + "id": "SSLHealthCheck", "properties": { - "instances": { - "description": "The URLs of one or more instances to delete. This can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME].", - "items": { - "type": "string" - }, - "type": "array" + "port": { + "description": "The TCP port number for the health check request. The default value is 443. Valid values are 1 through 65535.", + "format": "int32", + "type": "integer" + }, + "portName": { + "description": "Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence.", + "type": "string" + }, + "portSpecification": { + "description": "Specifies how port is selected for health checking, can be one of following values:\nUSE_FIXED_PORT: The port number in\nport\nis used for health checking.\nUSE_NAMED_PORT: The\nportName\nis used for health checking.\nUSE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking.\n\n\nIf not specified, SSL health check follows behavior specified in\nport\nand\nportName\nfields.", + "enum": [ + "USE_FIXED_PORT", + "USE_NAMED_PORT", + "USE_SERVING_PORT" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + }, + "proxyHeader": { + "description": "Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.", + "enum": [ + "NONE", + "PROXY_V1" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "request": { + "description": "The application data to send once the SSL connection has been established (default value is empty). If both request and response are empty, the connection establishment alone will indicate health. The request data can only be ASCII.", + "type": "string" + }, + "response": { + "description": "The bytes to match against the beginning of the response data. If left empty (the default value), any response will indicate health. The response data can only be ASCII.", + "type": "string" } }, "type": "object" }, - "RegionInstanceGroupManagersListInstancesResponse": { - "id": "RegionInstanceGroupManagersListInstancesResponse", + "Scheduling": { + "description": "Sets the scheduling options for an Instance.", + "id": "Scheduling", "properties": { - "managedInstances": { - "description": "A list of managed instances.", + "automaticRestart": { + "description": "Specifies whether the instance should be automatically restarted if it is terminated by Compute Engine (not terminated by a user). You can only set the automatic restart option for standard instances. Preemptible instances cannot be automatically restarted.\n\nBy default, this is set to true so an instance is automatically restarted if it is terminated by Compute Engine.", + "type": "boolean" + }, + "nodeAffinities": { + "description": "A set of node affinity and anti-affinity.", "items": { - "$ref": "ManagedInstance" + "$ref": "SchedulingNodeAffinity" }, "type": "array" }, - "nextPageToken": { - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "onHostMaintenance": { + "description": "Defines the maintenance behavior for this instance. For standard instances, the default behavior is MIGRATE. For preemptible instances, the default and only possible behavior is TERMINATE. For more information, see Setting Instance Scheduling Options.", + "enum": [ + "MIGRATE", + "TERMINATE" + ], + "enumDescriptions": [ + "", + "" + ], "type": "string" + }, + "preemptible": { + "description": "Defines whether the instance is preemptible. This can only be set during instance creation, it cannot be set or changed after the instance has been created.", + "type": "boolean" } }, "type": "object" }, - "RegionInstanceGroupManagersRecreateRequest": { - "id": "RegionInstanceGroupManagersRecreateRequest", + "SchedulingNodeAffinity": { + "description": "Node Affinity: the configuration of desired nodes onto which this Instance could be scheduled.", + "id": "SchedulingNodeAffinity", "properties": { - "instances": { - "description": "The URLs of one or more instances to recreate. This can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME].", + "key": { + "description": "Corresponds to the label key of Node resource.", + "type": "string" + }, + "operator": { + "description": "Defines the operation of node selection.", + "enum": [ + "IN", + "NOT_IN", + "OPERATOR_UNSPECIFIED" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + }, + "values": { + "description": "Corresponds to the label values of Node resource.", "items": { "type": "string" }, @@ -36502,73 +42941,105 @@ }, "type": "object" }, - "RegionInstanceGroupManagersSetAutoHealingRequest": { - "id": "RegionInstanceGroupManagersSetAutoHealingRequest", + "SecurityPoliciesListPreconfiguredExpressionSetsResponse": { + "id": "SecurityPoliciesListPreconfiguredExpressionSetsResponse", "properties": { - "autoHealingPolicies": { - "items": { - "$ref": "InstanceGroupManagerAutoHealingPolicy" - }, - "type": "array" + "preconfiguredExpressionSets": { + "$ref": "SecurityPoliciesWafConfig" } }, "type": "object" }, - "RegionInstanceGroupManagersSetTargetPoolsRequest": { - "id": "RegionInstanceGroupManagersSetTargetPoolsRequest", + "SecurityPoliciesWafConfig": { + "id": "SecurityPoliciesWafConfig", + "properties": { + "wafRules": { + "$ref": "PreconfiguredWafSet" + } + }, + "type": "object" + }, + "SecurityPolicy": { + "description": "A security policy is comprised of one or more rules. It can also be associated with one or more 'targets'. (== resource_for v1.securityPolicies ==) (== resource_for beta.securityPolicies ==)", + "id": "SecurityPolicy", "properties": { + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, "fingerprint": { - "description": "Fingerprint of the target pools information, which is a hash of the contents. This field is used for optimistic locking when you update the target pool entries. This field is optional.", + "description": "Specifies a fingerprint for this resource, which is essentially a hash of the metadata's contents and used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update metadata. You must always provide an up-to-date fingerprint hash in order to update or change metadata, otherwise the request will fail with error 412 conditionNotMet.\n\nTo see the latest fingerprint, make get() request to the security policy.", "format": "byte", "type": "string" }, - "targetPools": { - "description": "The URL of all TargetPool resources to which instances in the instanceGroup field are added. The target pools automatically apply to all of the instances in the managed instance group.", - "items": { + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", + "type": "string" + }, + "kind": { + "default": "compute#securityPolicy", + "description": "[Output only] Type of the resource. Always compute#securityPolicyfor security policies", + "type": "string" + }, + "labelFingerprint": { + "description": "A fingerprint for the labels being applied to this security policy, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels.\n\nTo see the latest fingerprint, make get() request to the security policy.", + "format": "byte", + "type": "string" + }, + "labels": { + "additionalProperties": { "type": "string" }, + "description": "Labels to apply to this security policy resource. These can be later modified by the setLabels method. Each label key/value must comply with RFC1035. Label values may be empty.", + "type": "object" + }, + "name": { + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "rules": { + "description": "A list of rules that belong to this policy. There must always be a default rule (rule with priority 2147483647 and match \"*\"). If no rules are provided when creating a security policy, a default rule with action \"allow\" will be added.", + "items": { + "$ref": "SecurityPolicyRule" + }, "type": "array" - } - }, - "type": "object" - }, - "RegionInstanceGroupManagersSetTemplateRequest": { - "id": "RegionInstanceGroupManagersSetTemplateRequest", - "properties": { - "instanceTemplate": { - "description": "URL of the InstanceTemplate resource from which all new instances will be created.", + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", "type": "string" } }, "type": "object" }, - "RegionInstanceGroupsListInstances": { - "id": "RegionInstanceGroupsListInstances", + "SecurityPolicyList": { + "id": "SecurityPolicyList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of InstanceWithNamedPorts resources.", + "description": "A list of SecurityPolicy resources.", "items": { - "$ref": "InstanceWithNamedPorts" + "$ref": "SecurityPolicy" }, "type": "array" }, "kind": { - "default": "compute#regionInstanceGroupsListInstances", - "description": "The resource type.", + "default": "compute#securityPolicyList", + "description": "[Output Only] Type of resource. Always compute#securityPolicyList for listsof securityPolicies", "type": "string" }, "nextPageToken": { "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", "type": "string" }, - "selfLink": { - "description": "[Output Only] Server-defined URL for this resource.", - "type": "string" - }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -36653,247 +43124,457 @@ }, "type": "object" }, - "RegionInstanceGroupsListInstancesRequest": { - "id": "RegionInstanceGroupsListInstancesRequest", + "SecurityPolicyReference": { + "id": "SecurityPolicyReference", "properties": { - "instanceState": { - "description": "Instances in which state should be returned. Valid options are: 'ALL', 'RUNNING'. By default, it lists all instances.", + "securityPolicy": { + "type": "string" + } + }, + "type": "object" + }, + "SecurityPolicyRule": { + "description": "Represents a rule that describes one or more match conditions along with the action to be taken when traffic matches this condition (allow or deny).", + "id": "SecurityPolicyRule", + "properties": { + "action": { + "description": "The Action to preform when the client connection triggers the rule. Can currently be either \"allow\" or \"deny()\" where valid values for status are 403, 404, and 502.", + "type": "string" + }, + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, + "kind": { + "default": "compute#securityPolicyRule", + "description": "[Output only] Type of the resource. Always compute#securityPolicyRule for security policy rules", + "type": "string" + }, + "match": { + "$ref": "SecurityPolicyRuleMatcher", + "description": "A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding ?action? is enforced." + }, + "preview": { + "description": "If set to true, the specified action is not enforced.", + "type": "boolean" + }, + "priority": { + "description": "An integer indicating the priority of a rule in the list. The priority must be a positive value between 0 and 2147483647. Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest prority.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "SecurityPolicyRuleMatcher": { + "description": "Represents a match condition that incoming traffic is evaluated against. Exactly one field must be specified.", + "id": "SecurityPolicyRuleMatcher", + "properties": { + "config": { + "$ref": "SecurityPolicyRuleMatcherConfig", + "description": "The configuration options available when specifying versioned_expr. This field must be specified if versioned_expr is specified and cannot be specified if versioned_expr is not specified." + }, + "expr": { + "$ref": "Expr", + "description": "User defined CEVAL expression. A CEVAL expression is used to specify match criteria such as origin.ip, source.region_code and contents in the request header." + }, + "versionedExpr": { + "description": "Preconfigured versioned expression. If this field is specified, config must also be specified. Available preconfigured expressions along with their requirements are: SRC_IPS_V1 - must specify the corresponding src_ip_range field in config.", "enum": [ - "ALL", - "RUNNING" + "SRC_IPS_V1" ], "enumDescriptions": [ - "", "" ], "type": "string" - }, - "portName": { - "description": "Name of port user is interested in. It is optional. If it is set, only information about this ports will be returned. If it is not set, all the named ports will be returned. Always lists all instances.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "type": "string" } }, "type": "object" }, - "RegionInstanceGroupsSetNamedPortsRequest": { - "id": "RegionInstanceGroupsSetNamedPortsRequest", + "SecurityPolicyRuleMatcherConfig": { + "id": "SecurityPolicyRuleMatcherConfig", "properties": { - "fingerprint": { - "description": "The fingerprint of the named ports information for this instance group. Use this optional property to prevent conflicts when multiple users change the named ports settings concurrently. Obtain the fingerprint with the instanceGroups.get method. Then, include the fingerprint in your request to ensure that you do not overwrite changes that were applied from another concurrent request.", - "format": "byte", - "type": "string" - }, - "namedPorts": { - "description": "The list of named ports to set for this instance group.", + "srcIpRanges": { + "description": "CIDR IP address range.", "items": { - "$ref": "NamedPort" + "type": "string" }, "type": "array" } }, "type": "object" }, - "RegionList": { - "description": "Contains a list of region resources.", - "id": "RegionList", + "SerialPortOutput": { + "description": "An instance's serial console output.", + "id": "SerialPortOutput", "properties": { - "id": { - "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "contents": { + "description": "[Output Only] The contents of the console output.", "type": "string" }, - "items": { - "description": "A list of Region resources.", - "items": { - "$ref": "Region" - }, - "type": "array" - }, "kind": { - "default": "compute#regionList", - "description": "[Output Only] Type of resource. Always compute#regionList for lists of regions.", + "default": "compute#serialPortOutput", + "description": "[Output Only] Type of the resource. Always compute#serialPortOutput for serial port output.", "type": "string" }, - "nextPageToken": { - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "next": { + "description": "[Output Only] The position of the next byte of content from the serial console output. Use this value in the next request as the start parameter.", + "format": "int64", "type": "string" }, "selfLink": { "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, - "warning": { - "description": "[Output Only] Informational warning message.", - "properties": { - "code": { - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DEPRECATED_TYPE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "EXPERIMENTAL_TYPE_USED", - "EXTERNAL_API_WARNING", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "MISSING_TYPE_DEPENDENCY", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SCHEMA_VALIDATION_IGNORED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNDECLARED_PROPERTIES", - "UNREACHABLE" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "data": { - "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", - "items": { - "properties": { - "key": { - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", - "type": "string" - }, - "value": { - "description": "[Output Only] A warning data value corresponding to the key.", - "type": "string" - } - }, - "type": "object" - }, - "type": "array" - }, - "message": { - "description": "[Output Only] A human-readable description of the warning code.", - "type": "string" - } - }, - "type": "object" + "start": { + "description": "The starting byte position of the output that was returned. This should match the start parameter sent with the request. If the serial console output exceeds the size of the buffer, older output will be overwritten by newer content and the start values will be mismatched.", + "format": "int64", + "type": "string" } }, "type": "object" }, - "RegionSetLabelsRequest": { - "id": "RegionSetLabelsRequest", + "ServerBinding": { + "id": "ServerBinding", "properties": { - "labelFingerprint": { - "description": "The fingerprint of the previous set of labels for this resource, used to detect conflicts. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels. Make a get() request to the resource to get the latest fingerprint.", - "format": "byte", + "type": { + "enum": [ + "RESTART_NODE_ON_ANY_SERVER", + "RESTART_NODE_ON_MINIMAL_SERVERS", + "SERVER_BINDING_TYPE_UNSPECIFIED" + ], + "enumDescriptions": [ + "", + "", + "" + ], "type": "string" - }, - "labels": { - "additionalProperties": { - "type": "string" - }, - "description": "The labels to set for this resource.", - "type": "object" } }, "type": "object" }, - "RegionSetPolicyRequest": { - "id": "RegionSetPolicyRequest", + "ServiceAccount": { + "description": "A service account.", + "id": "ServiceAccount", "properties": { - "bindings": { - "description": "Flatten Policy to create a backwacd compatible wire-format. Deprecated. Use 'policy' to specify bindings.", + "email": { + "description": "Email address of the service account.", + "type": "string" + }, + "scopes": { + "description": "The list of scopes to be made available for this service account.", "items": { - "$ref": "Binding" + "type": "string" }, "type": "array" + } + }, + "type": "object" + }, + "ShieldedInstanceConfig": { + "description": "A set of Shielded Instance options.", + "id": "ShieldedInstanceConfig", + "properties": { + "enableIntegrityMonitoring": { + "description": "Defines whether the instance has integrity monitoring enabled.", + "type": "boolean" }, - "etag": { - "description": "Flatten Policy to create a backward compatible wire-format. Deprecated. Use 'policy' to specify the etag.", - "format": "byte", + "enableSecureBoot": { + "description": "Defines whether the instance has Secure Boot enabled.", + "type": "boolean" + }, + "enableVtpm": { + "description": "Defines whether the instance has the vTPM enabled.", + "type": "boolean" + } + }, + "type": "object" + }, + "ShieldedInstanceIdentity": { + "description": "A shielded Instance identity entry.", + "id": "ShieldedInstanceIdentity", + "properties": { + "encryptionKey": { + "$ref": "ShieldedInstanceIdentityEntry", + "description": "An Endorsement Key (EK) issued to the Shielded Instance's vTPM." + }, + "kind": { + "default": "compute#shieldedInstanceIdentity", + "description": "[Output Only] Type of the resource. Always compute#shieldedInstanceIdentity for shielded Instance identity entry.", "type": "string" }, - "policy": { - "$ref": "Policy", - "description": "REQUIRED: The complete policy to be applied to the 'resource'. The size of the policy is limited to a few 10s of KB. An empty policy is in general a valid policy but certain services (like Projects) might reject them." + "signingKey": { + "$ref": "ShieldedInstanceIdentityEntry", + "description": "An Attestation Key (AK) issued to the Shielded Instance's vTPM." } }, "type": "object" }, - "ResourceCommitment": { - "description": "Commitment for a particular resource (a Commitment is composed of one or more of these).", - "id": "ResourceCommitment", + "ShieldedInstanceIdentityEntry": { + "description": "A Shielded Instance Identity Entry.", + "id": "ShieldedInstanceIdentityEntry", "properties": { - "amount": { - "description": "The amount of the resource purchased (in a type-dependent unit, such as bytes). For vCPUs, this can just be an integer. For memory, this must be provided in MB. Memory must be a multiple of 256 MB, with up to 6.5GB of memory per every vCPU.", + "ekCert": { + "description": "A PEM-encoded X.509 certificate. This field can be empty.", + "type": "string" + }, + "ekPub": { + "description": "A PEM-encoded public key.", + "type": "string" + } + }, + "type": "object" + }, + "ShieldedInstanceIntegrityPolicy": { + "description": "The policy describes the baseline against which Instance boot integrity is measured.", + "id": "ShieldedInstanceIntegrityPolicy", + "properties": { + "updateAutoLearnPolicy": { + "description": "Updates the integrity policy baseline using the measurements from the VM instance's most recent boot.", + "type": "boolean" + } + }, + "type": "object" + }, + "ShieldedVmConfig": { + "description": "A set of Shielded VM options.", + "id": "ShieldedVmConfig", + "properties": { + "enableIntegrityMonitoring": { + "description": "Defines whether the instance has integrity monitoring enabled.", + "type": "boolean" + }, + "enableSecureBoot": { + "description": "Defines whether the instance has Secure Boot enabled.", + "type": "boolean" + }, + "enableVtpm": { + "description": "Defines whether the instance has the vTPM enabled.", + "type": "boolean" + } + }, + "type": "object" + }, + "ShieldedVmIdentity": { + "description": "A shielded VM identity entry.", + "id": "ShieldedVmIdentity", + "properties": { + "encryptionKey": { + "$ref": "ShieldedVmIdentityEntry", + "description": "An Endorsement Key (EK) issued to the Shielded VM's vTPM." + }, + "kind": { + "default": "compute#shieldedVmIdentity", + "description": "[Output Only] Type of the resource. Always compute#shieldedVmIdentity for shielded VM identity entry.", + "type": "string" + }, + "signingKey": { + "$ref": "ShieldedVmIdentityEntry", + "description": "An Attestation Key (AK) issued to the Shielded VM's vTPM." + } + }, + "type": "object" + }, + "ShieldedVmIdentityEntry": { + "description": "A Shielded Instance Identity Entry.", + "id": "ShieldedVmIdentityEntry", + "properties": { + "ekCert": { + "description": "A PEM-encoded X.509 certificate. This field can be empty.", + "type": "string" + }, + "ekPub": { + "description": "A PEM-encoded public key.", + "type": "string" + } + }, + "type": "object" + }, + "ShieldedVmIntegrityPolicy": { + "description": "The policy describes the baseline against which VM instance boot integrity is measured.", + "id": "ShieldedVmIntegrityPolicy", + "properties": { + "updateAutoLearnPolicy": { + "description": "Updates the integrity policy baseline using the measurements from the VM instance's most recent boot.", + "type": "boolean" + } + }, + "type": "object" + }, + "SignedUrlKey": { + "description": "Represents a customer-supplied Signing Key used by Cloud CDN Signed URLs", + "id": "SignedUrlKey", + "properties": { + "keyName": { + "description": "Name of the key. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "keyValue": { + "description": "128-bit key value used for signing the URL. The key value must be a valid RFC 4648 Section 5 base64url encoded string.", + "type": "string" + } + }, + "type": "object" + }, + "Snapshot": { + "description": "A persistent disk snapshot resource. (== resource_for beta.snapshots ==) (== resource_for v1.snapshots ==)", + "id": "Snapshot", + "properties": { + "autoCreated": { + "description": "[Output Only] Set to true if snapshots are automatically by applying resource policy on the target disk.", + "type": "boolean" + }, + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, + "diskSizeGb": { + "description": "[Output Only] Size of the snapshot, specified in GB.", "format": "int64", "type": "string" }, - "type": { - "description": "Type of resource for which this commitment applies. Possible values are VCPU and MEMORY", + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", + "type": "string" + }, + "kind": { + "default": "compute#snapshot", + "description": "[Output Only] Type of the resource. Always compute#snapshot for Snapshot resources.", + "type": "string" + }, + "labelFingerprint": { + "description": "A fingerprint for the labels being applied to this snapshot, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet.\n\nTo see the latest fingerprint, make a get() request to retrieve a snapshot.", + "format": "byte", + "type": "string" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Labels to apply to this snapshot. These can be later modified by the setLabels method. Label values may be empty.", + "type": "object" + }, + "licenseCodes": { + "description": "[Output Only] Integer license codes indicating which licenses are attached to this snapshot.", + "items": { + "format": "int64", + "type": "string" + }, + "type": "array" + }, + "licenses": { + "description": "[Output Only] A list of public visible licenses that apply to this snapshot. This can be because the original image had licenses attached (such as a Windows image).", + "items": { + "type": "string" + }, + "type": "array" + }, + "name": { + "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" + }, + "snapshotEncryptionKey": { + "$ref": "CustomerEncryptionKey", + "description": "Encrypts the snapshot using a customer-supplied encryption key.\n\nAfter you encrypt a snapshot using a customer-supplied key, you must provide the same key if you use the snapshot later. For example, you must provide the encryption key when you create a disk from the encrypted snapshot in a future request.\n\nCustomer-supplied encryption keys do not protect access to metadata of the snapshot.\n\nIf you do not provide an encryption key when creating the snapshot, then the snapshot will be encrypted using an automatically generated key and you do not need to provide a key to use the snapshot later." + }, + "sourceDisk": { + "description": "[Output Only] The source disk used to create this snapshot.", + "type": "string" + }, + "sourceDiskEncryptionKey": { + "$ref": "CustomerEncryptionKey", + "description": "The customer-supplied encryption key of the source disk. Required if the source disk is protected by a customer-supplied encryption key." + }, + "sourceDiskId": { + "description": "[Output Only] The ID value of the disk used to create this snapshot. This value may be used to determine whether the snapshot was taken from the current or a previous instance of a given disk name.", + "type": "string" + }, + "status": { + "description": "[Output Only] The status of the snapshot. This can be CREATING, DELETING, FAILED, READY, or UPLOADING.", "enum": [ - "MEMORY", - "UNSPECIFIED", - "VCPU" + "CREATING", + "DELETING", + "FAILED", + "READY", + "UPLOADING" ], "enumDescriptions": [ + "", + "", "", "", "" ], "type": "string" - } - }, - "type": "object" - }, - "ResourceGroupReference": { - "id": "ResourceGroupReference", - "properties": { - "group": { - "description": "A URI referencing one of the instance groups or network endpoint groups listed in the backend service.", + }, + "storageBytes": { + "description": "[Output Only] A size of the storage used by the snapshot. As snapshots share storage, this number is expected to change with snapshot creation/deletion.", + "format": "int64", + "type": "string" + }, + "storageBytesStatus": { + "description": "[Output Only] An indicator whether storageBytes is in a stable state or it is being adjusted as a result of shared storage reallocation. This status can either be UPDATING, meaning the size of the snapshot is being updated, or UP_TO_DATE, meaning the size of the snapshot is up-to-date.", + "enum": [ + "UPDATING", + "UP_TO_DATE" + ], + "enumDescriptions": [ + "", + "" + ], "type": "string" + }, + "storageLocations": { + "description": "GCS bucket storage location of the snapshot (regional or multi-regional).", + "items": { + "type": "string" + }, + "type": "array" } }, "type": "object" }, - "ResourcePoliciesScopedList": { - "id": "ResourcePoliciesScopedList", + "SnapshotList": { + "description": "Contains a list of Snapshot resources.", + "id": "SnapshotList", "properties": { - "resourcePolicies": { - "description": "A list of resourcePolicies contained in this scope.", + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "description": "A list of Snapshot resources.", "items": { - "$ref": "ResourcePolicy" + "$ref": "Snapshot" }, "type": "array" }, + "kind": { + "default": "compute#snapshotList", + "description": "Type of resource.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, "warning": { - "description": "Informational warning which replaces the list of resourcePolicies when the list is empty.", + "description": "[Output Only] Informational warning message.", "properties": { "code": { "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", @@ -36976,18 +43657,38 @@ }, "type": "object" }, - "ResourcePolicy": { - "id": "ResourcePolicy", + "SourceInstanceParams": { + "description": "A specification of the parameters to use when creating the instance template from a source instance.", + "id": "SourceInstanceParams", "properties": { - "backupSchedulePolicy": { - "$ref": "ResourcePolicyBackupSchedulePolicy", - "description": "Resource policy for persistent disks for creating snapshots." + "diskConfigs": { + "description": "Attached disks configuration. If not provided, defaults are applied: For boot disk and any other R/W disks, new custom images will be created from each disk. For read-only disks, they will be attached in read-only mode. Local SSD disks will be created as blank volumes.", + "items": { + "$ref": "DiskInstantiationConfig" + }, + "type": "array" + } + }, + "type": "object" + }, + "SslCertificate": { + "description": "An SslCertificate resource. This resource provides a mechanism to upload an SSL key and certificate to the load balancer to serve secure connections from the user. (== resource_for beta.sslCertificates ==) (== resource_for v1.sslCertificates ==)", + "id": "SslCertificate", + "properties": { + "certificate": { + "description": "A local certificate file. The certificate must be in PEM format. The certificate chain must be no greater than 5 certs long. The chain must include at least one intermediate cert.", + "type": "string" }, "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" }, "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, + "expireTime": { + "description": "[Output Only] Expire time of the certificate. RFC3339", "type": "string" }, "id": { @@ -36996,52 +43697,77 @@ "type": "string" }, "kind": { - "default": "compute#resourcePolicy", - "description": "[Output Only] Type of the resource. Always compute#resource_policies for resource policies.", + "default": "compute#sslCertificate", + "description": "[Output Only] Type of the resource. Always compute#sslCertificate for SSL certificates.", "type": "string" }, + "managed": { + "$ref": "SslCertificateManagedSslCertificate", + "description": "Configuration and status of a managed SSL certificate." + }, "name": { - "annotations": { - "required": [ - "compute.instances.insert" - ] - }, - "description": "The name of the resource, provided by the client when initially creating the resource. The resource name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, + "privateKey": { + "description": "A write-only private key in PEM format. Only insert requests will include this field.", + "type": "string" + }, "region": { + "description": "[Output Only] URL of the region where the regional SSL Certificate resides. This field is not applicable to global SSL Certificate.", "type": "string" }, "selfLink": { - "description": "[Output Only] Server-defined fully-qualified URL for this resource.", + "description": "[Output only] Server-defined URL for the resource.", + "type": "string" + }, + "selfManaged": { + "$ref": "SslCertificateSelfManagedSslCertificate", + "description": "Configuration and status of a self-managed SSL certificate." + }, + "subjectAlternativeNames": { + "description": "[Output Only] Domains associated with the certificate via Subject Alternative Name.", + "items": { + "type": "string" + }, + "type": "array" + }, + "type": { + "description": "(Optional) Specifies the type of SSL certificate, either \"SELF_MANAGED\" or \"MANAGED\". If not specified, the certificate is self-managed and the fields certificate and private_key are used.", + "enum": [ + "MANAGED", + "SELF_MANAGED", + "TYPE_UNSPECIFIED" + ], + "enumDescriptions": [ + "", + "", + "" + ], "type": "string" } }, "type": "object" }, - "ResourcePolicyAggregatedList": { - "description": "Contains a list of resourcePolicies.", - "id": "ResourcePolicyAggregatedList", + "SslCertificateAggregatedList": { + "id": "SslCertificateAggregatedList", "properties": { - "etag": { - "type": "string" - }, "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { "additionalProperties": { - "$ref": "ResourcePoliciesScopedList", - "description": "Name of the scope containing this set of resourcePolicies." + "$ref": "SslCertificatesScopedList", + "description": "Name of the scope containing this set of SslCertificates." }, - "description": "A list of ResourcePolicy resources.", + "description": "A list of SslCertificatesScopedList resources.", "type": "object" }, "kind": { - "default": "compute#resourcePolicyAggregatedList", - "description": "Type of resource.", + "default": "compute#sslCertificateAggregatedList", + "description": "[Output Only] Type of resource. Always compute#sslCertificateAggregatedList for lists of SSL Certificates.", "type": "string" }, "nextPageToken": { @@ -37136,145 +43862,24 @@ }, "type": "object" }, - "ResourcePolicyBackupSchedulePolicy": { - "description": "A backup schedule policy specifies when and how frequently snapshots are to be created for the target disk. Also specifies how many and how long these scheduled snapshots should be retained.", - "id": "ResourcePolicyBackupSchedulePolicy", - "properties": { - "retentionPolicy": { - "$ref": "ResourcePolicyBackupSchedulePolicyRetentionPolicy", - "description": "Retention policy applied to snapshots created by this resource policy." - }, - "schedule": { - "$ref": "ResourcePolicyBackupSchedulePolicySchedule", - "description": "A Vm Maintenance Policy specifies what kind of infrastructure maintenance we are allowed to perform on this VM and when. Schedule that is applied to disks covered by this policy." - }, - "snapshotProperties": { - "$ref": "ResourcePolicyBackupSchedulePolicySnapshotProperties", - "description": "Properties with which snapshots are created such as labels, encryption keys." - } - }, - "type": "object" - }, - "ResourcePolicyBackupSchedulePolicyRetentionPolicy": { - "description": "Policy for retention of scheduled snapshots.", - "id": "ResourcePolicyBackupSchedulePolicyRetentionPolicy", - "properties": { - "maxRetentionDays": { - "description": "Maximum age of the snapshot that is allowed to be kept.", - "format": "int32", - "type": "integer" - }, - "onSourceDiskDelete": { - "description": "Specifies the behavior to apply to scheduled snapshots when the source disk is deleted.", - "enum": [ - "APPLY_RETENTION_POLICY", - "KEEP_AUTO_SNAPSHOTS", - "UNSPECIFIED_ON_SOURCE_DISK_DELETE" - ], - "enumDescriptions": [ - "", - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "ResourcePolicyBackupSchedulePolicySchedule": { - "description": "A schedule for disks where the schedueled operations are performed.", - "id": "ResourcePolicyBackupSchedulePolicySchedule", - "properties": { - "dailySchedule": { - "$ref": "ResourcePolicyDailyCycle" - }, - "hourlySchedule": { - "$ref": "ResourcePolicyHourlyCycle" - }, - "weeklySchedule": { - "$ref": "ResourcePolicyWeeklyCycle" - } - }, - "type": "object" - }, - "ResourcePolicyBackupSchedulePolicySnapshotProperties": { - "description": "Specified snapshot properties for scheduled snapshots created by this policy.", - "id": "ResourcePolicyBackupSchedulePolicySnapshotProperties", - "properties": { - "guestFlush": { - "description": "Indication to perform a ?guest aware? snapshot.", - "type": "boolean" - }, - "labels": { - "additionalProperties": { - "type": "string" - }, - "description": "Labels to apply to scheduled snapshots. These can be later modified by the setLabels method. Label values may be empty.", - "type": "object" - } - }, - "type": "object" - }, - "ResourcePolicyDailyCycle": { - "description": "Time window specified for daily operations.", - "id": "ResourcePolicyDailyCycle", - "properties": { - "daysInCycle": { - "description": "Allows to define schedule that runs every nth day of the month.", - "format": "int32", - "type": "integer" - }, - "duration": { - "description": "[Output only] Duration of the time window, automatically chosen to be smallest possible in the given scenario.", - "type": "string" - }, - "startTime": { - "description": "Time within the window to start the operations. It must be in format \"HH:MM\", where HH : [00-23] and MM : [00-00] GMT.", - "type": "string" - } - }, - "type": "object" - }, - "ResourcePolicyHourlyCycle": { - "description": "Time window specified for hourly operations.", - "id": "ResourcePolicyHourlyCycle", - "properties": { - "duration": { - "description": "[Output only] Duration of the time window, automatically chosen to be smallest possible in the given scenario.", - "type": "string" - }, - "hoursInCycle": { - "description": "Allows to define schedule that runs every nth hour.", - "format": "int32", - "type": "integer" - }, - "startTime": { - "description": "Time within the window to start the operations. It must be in format \"HH:MM\", where HH : [00-23] and MM : [00-00] GMT.", - "type": "string" - } - }, - "type": "object" - }, - "ResourcePolicyList": { - "id": "ResourcePolicyList", + "SslCertificateList": { + "description": "Contains a list of SslCertificate resources.", + "id": "SslCertificateList", "properties": { - "etag": { - "type": "string" - }, "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "[Output Only] A list of ResourcePolicy resources.", + "description": "A list of SslCertificate resources.", "items": { - "$ref": "ResourcePolicy" + "$ref": "SslCertificate" }, "type": "array" }, "kind": { - "default": "compute#resourcePolicyList", - "description": "[Output Only] Type of resource.Always compute#resourcePoliciesList for listsof resourcePolicies", + "default": "compute#sslCertificateList", + "description": "Type of resource.", "type": "string" }, "nextPageToken": { @@ -37369,34 +43974,51 @@ }, "type": "object" }, - "ResourcePolicyWeeklyCycle": { - "description": "Time window specified for weekly operations.", - "id": "ResourcePolicyWeeklyCycle", + "SslCertificateManagedSslCertificate": { + "description": "Configuration and status of a managed SSL certificate.", + "id": "SslCertificateManagedSslCertificate", "properties": { - "dayOfWeeks": { - "description": "Up to 7 intervals/windows, one for each day of the week.", + "domainStatus": { + "additionalProperties": { + "enum": [ + "ACTIVE", + "DOMAIN_STATUS_UNSPECIFIED", + "FAILED_CAA_CHECKING", + "FAILED_CAA_FORBIDDEN", + "FAILED_NOT_VISIBLE", + "FAILED_RATE_LIMITED", + "PROVISIONING" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "description": "[Output only] Detailed statuses of the domains specified for managed certificate resource.", + "type": "object" + }, + "domains": { + "description": "The domains for which a managed SSL certificate will be generated. Currently only single-domain certs are supported.", "items": { - "$ref": "ResourcePolicyWeeklyCycleDayOfWeek" + "type": "string" }, "type": "array" - } - }, - "type": "object" - }, - "ResourcePolicyWeeklyCycleDayOfWeek": { - "id": "ResourcePolicyWeeklyCycleDayOfWeek", - "properties": { - "day": { - "description": "Allows to define schedule that runs specified day of the week.", + }, + "status": { + "description": "[Output only] Status of the managed certificate resource.", "enum": [ - "FRIDAY", - "INVALID", - "MONDAY", - "SATURDAY", - "SUNDAY", - "THURSDAY", - "TUESDAY", - "WEDNESDAY" + "ACTIVE", + "MANAGED_CERTIFICATE_STATUS_UNSPECIFIED", + "PROVISIONING", + "PROVISIONING_FAILED", + "PROVISIONING_FAILED_PERMANENTLY", + "RENEWAL_FAILED" ], "enumDescriptions": [ "", @@ -37404,244 +44026,40 @@ "", "", "", - "", - "", "" ], "type": "string" - }, - "duration": { - "description": "[Output only] Duration of the time window, automatically chosen to be smallest possible in the given scenario.", - "type": "string" - }, - "startTime": { - "description": "Time within the window to start the operations. It must be in format \"HH:MM\", where HH : [00-23] and MM : [00-00] GMT.", - "type": "string" } }, "type": "object" }, - "Route": { - "description": "Represents a Route resource. A route specifies how certain packets should be handled by the network. Routes are associated with instances by tags and the set of routes for a particular instance is called its routing table.\n\nFor each packet leaving an instance, the system searches that instance's routing table for a single best matching route. Routes match packets by destination IP address, preferring smaller or more specific ranges over larger ones. If there is a tie, the system selects the route with the smallest priority value. If there is still a tie, it uses the layer three and four packet headers to select just one of the remaining matching routes. The packet is then forwarded as specified by the nextHop field of the winning route - either to another instance destination, an instance gateway, or a Google Compute Engine-operated gateway.\n\nPackets that do not match any route in the sending instance's routing table are dropped. (== resource_for beta.routes ==) (== resource_for v1.routes ==)", - "id": "Route", + "SslCertificateSelfManagedSslCertificate": { + "description": "Configuration and status of a self-managed SSL certificate.", + "id": "SslCertificateSelfManagedSslCertificate", "properties": { - "creationTimestamp": { - "description": "[Output Only] Creation timestamp in RFC3339 text format.", - "type": "string" - }, - "description": { - "description": "An optional description of this resource. Provide this property when you create the resource.", - "type": "string" - }, - "destRange": { - "annotations": { - "required": [ - "compute.routes.insert" - ] - }, - "description": "The destination range of outgoing packets that this route applies to. Only IPv4 is supported.", - "type": "string" - }, - "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64", - "type": "string" - }, - "kind": { - "default": "compute#route", - "description": "[Output Only] Type of this resource. Always compute#routes for Route resources.", - "type": "string" - }, - "name": { - "annotations": { - "required": [ - "compute.routes.insert" - ] - }, - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "type": "string" - }, - "network": { - "annotations": { - "required": [ - "compute.routes.insert" - ] - }, - "description": "Fully-qualified URL of the network that this route applies to.", - "type": "string" - }, - "nextHopGateway": { - "description": "The URL to a gateway that should handle matching packets. You can only specify the internet gateway using a full or partial valid URL: projects/\u003cproject-id\u003e/global/gateways/default-internet-gateway", - "type": "string" - }, - "nextHopIlb": { - "description": "The URL to a forwarding rule of type loadBalancingScheme=INTERNAL that should handle matching packets. You can only specify the forwarding rule as a partial or full URL. For example, the following are all valid URLs: \n- https://www.googleapis.com/compute/v1/projects/project/regions/region/forwardingRules/forwardingRule \n- regions/region/forwardingRules/forwardingRule Note that this can only be used when the destination_range is a public (non-RFC 1918) IP CIDR range.", - "type": "string" - }, - "nextHopInstance": { - "description": "The URL to an instance that should handle matching packets. You can specify this as a full or partial URL. For example:\nhttps://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/", - "type": "string" - }, - "nextHopIp": { - "description": "The network IP address of an instance that should handle matching packets. Only IPv4 is supported.", - "type": "string" - }, - "nextHopNetwork": { - "description": "The URL of the local network if it should handle matching packets.", - "type": "string" - }, - "nextHopPeering": { - "description": "[Output Only] The network peering name that should handle matching packets, which should conform to RFC1035.", - "type": "string" - }, - "nextHopVpnTunnel": { - "description": "The URL to a VpnTunnel that should handle matching packets.", + "certificate": { + "description": "A local certificate file. The certificate must be in PEM format. The certificate chain must be no greater than 5 certs long. The chain must include at least one intermediate cert.", "type": "string" }, - "priority": { - "annotations": { - "required": [ - "compute.routes.insert" - ] - }, - "description": "The priority of this route. Priority is used to break ties in cases where there is more than one matching route of equal prefix length. In the case of two routes with equal prefix length, the one with the lowest-numbered priority value wins. Default value is 1000. Valid range is 0 through 65535.", - "format": "uint32", - "type": "integer" - }, - "selfLink": { - "description": "[Output Only] Server-defined fully-qualified URL for this resource.", + "privateKey": { + "description": "A write-only private key in PEM format. Only insert requests will include this field.", "type": "string" - }, - "tags": { - "annotations": { - "required": [ - "compute.routes.insert" - ] - }, - "description": "A list of instance tags to which this route applies.", - "items": { - "type": "string" - }, - "type": "array" - }, - "warnings": { - "description": "[Output Only] If potential misconfigurations are detected for this route, this field will be populated with warning messages.", - "items": { - "properties": { - "code": { - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DEPRECATED_TYPE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "EXPERIMENTAL_TYPE_USED", - "EXTERNAL_API_WARNING", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "MISSING_TYPE_DEPENDENCY", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SCHEMA_VALIDATION_IGNORED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNDECLARED_PROPERTIES", - "UNREACHABLE" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "data": { - "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", - "items": { - "properties": { - "key": { - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", - "type": "string" - }, - "value": { - "description": "[Output Only] A warning data value corresponding to the key.", - "type": "string" - } - }, - "type": "object" - }, - "type": "array" - }, - "message": { - "description": "[Output Only] A human-readable description of the warning code.", - "type": "string" - } - }, - "type": "object" - }, - "type": "array" } }, "type": "object" }, - "RouteList": { - "description": "Contains a list of Route resources.", - "id": "RouteList", - "properties": { - "id": { - "description": "[Output Only] Unique identifier for the resource; defined by the server.", - "type": "string" - }, - "items": { - "description": "A list of Route resources.", - "items": { - "$ref": "Route" - }, - "type": "array" - }, - "kind": { - "default": "compute#routeList", - "description": "Type of resource.", - "type": "string" - }, - "nextPageToken": { - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", - "type": "string" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for this resource.", - "type": "string" + "SslCertificatesScopedList": { + "id": "SslCertificatesScopedList", + "properties": { + "sslCertificates": { + "description": "List of SslCertificates contained in this scope.", + "items": { + "$ref": "SslCertificate" + }, + "type": "array" }, "warning": { - "description": "[Output Only] Informational warning message.", + "description": "Informational warning which replaces the list of backend services when the list is empty.", "properties": { "code": { "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", @@ -37724,117 +44142,23 @@ }, "type": "object" }, - "Router": { - "description": "Router resource.", - "id": "Router", - "properties": { - "bgp": { - "$ref": "RouterBgp", - "description": "BGP information specific to this router." - }, - "bgpPeers": { - "description": "BGP information that needs to be configured into the routing stack to establish the BGP peering. It must specify peer ASN and either interface name, IP, or peer IP. Please refer to RFC4273.", - "items": { - "$ref": "RouterBgpPeer" - }, - "type": "array" - }, - "creationTimestamp": { - "description": "[Output Only] Creation timestamp in RFC3339 text format.", - "type": "string" - }, - "description": { - "description": "An optional description of this resource. Provide this property when you create the resource.", - "type": "string" - }, - "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64", - "type": "string" - }, - "interfaces": { - "description": "Router interfaces. Each interface requires either one linked resource (e.g. linkedVpnTunnel), or IP address and IP address range (e.g. ipRange), or both.", - "items": { - "$ref": "RouterInterface" - }, - "type": "array" - }, - "kind": { - "default": "compute#router", - "description": "[Output Only] Type of resource. Always compute#router for routers.", - "type": "string" - }, - "name": { - "annotations": { - "required": [ - "compute.routers.insert" - ] - }, - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "type": "string" - }, - "nats": { - "description": "A list of Nat services created in this router.", - "items": { - "$ref": "RouterNat" - }, - "type": "array" - }, - "network": { - "annotations": { - "required": [ - "compute.routers.insert" - ] - }, - "description": "URI of the network to which this router belongs.", - "type": "string" - }, - "region": { - "description": "[Output Only] URI of the region where the router resides. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", - "type": "string" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for the resource.", - "type": "string" - } - }, - "type": "object" - }, - "RouterAdvertisedIpRange": { - "description": "Description-tagged IP ranges for the router to advertise.", - "id": "RouterAdvertisedIpRange", - "properties": { - "description": { - "description": "User-specified description for the IP range.", - "type": "string" - }, - "range": { - "description": "The IP range to advertise. The value must be a CIDR-formatted string.", - "type": "string" - } - }, - "type": "object" - }, - "RouterAggregatedList": { - "description": "Contains a list of routers.", - "id": "RouterAggregatedList", + "SslPoliciesList": { + "id": "SslPoliciesList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "additionalProperties": { - "$ref": "RoutersScopedList", - "description": "Name of the scope containing this set of routers." + "description": "A list of SslPolicy resources.", + "items": { + "$ref": "SslPolicy" }, - "description": "A list of Router resources.", - "type": "object" + "type": "array" }, "kind": { - "default": "compute#routerAggregatedList", - "description": "Type of resource.", + "default": "compute#sslPoliciesList", + "description": "[Output Only] Type of the resource. Always compute#sslPoliciesList for lists of sslPolicies.", "type": "string" }, "nextPageToken": { @@ -37929,179 +44253,441 @@ }, "type": "object" }, - "RouterBgp": { - "id": "RouterBgp", + "SslPoliciesListAvailableFeaturesResponse": { + "id": "SslPoliciesListAvailableFeaturesResponse", "properties": { - "advertiseMode": { - "description": "User-specified flag to indicate which mode to use for advertisement.", - "enum": [ - "CUSTOM", - "DEFAULT" - ], - "enumDescriptions": [ - "", - "" - ], - "type": "string" - }, - "advertisedGroups": { - "description": "User-specified list of prefix groups to advertise in custom mode. This field can only be populated if advertise_mode is CUSTOM and is advertised to all peers of the router. These groups will be advertised in addition to any specified prefixes. Leave this field blank to advertise no custom groups.", + "features": { "items": { - "enum": [ - "ALL_SUBNETS" - ], - "enumDescriptions": [ - "" - ], "type": "string" }, "type": "array" - }, - "advertisedIpRanges": { - "description": "User-specified list of individual IP ranges to advertise in custom mode. This field can only be populated if advertise_mode is CUSTOM and is advertised to all peers of the router. These IP ranges will be advertised in addition to any specified groups. Leave this field blank to advertise no custom IP ranges.", - "items": { - "$ref": "RouterAdvertisedIpRange" - }, - "type": "array" - }, - "asn": { - "description": "Local BGP Autonomous System Number (ASN). Must be an RFC6996 private ASN, either 16-bit or 32-bit. The value will be fixed for this router resource. All VPN tunnels that link to this router will have the same local ASN.", - "format": "uint32", - "type": "integer" } }, "type": "object" }, - "RouterBgpPeer": { - "id": "RouterBgpPeer", + "SslPolicy": { + "description": "A SSL policy specifies the server-side support for SSL features. This can be attached to a TargetHttpsProxy or a TargetSslProxy. This affects connections between clients and the HTTPS or SSL proxy load balancer. They do not affect the connection between the load balancers and the backends.", + "id": "SslPolicy", "properties": { - "advertiseMode": { - "description": "User-specified flag to indicate which mode to use for advertisement.", - "enum": [ - "CUSTOM", - "DEFAULT" - ], - "enumDescriptions": [ - "", - "" - ], + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" }, - "advertisedGroups": { - "description": "User-specified list of prefix groups to advertise in custom mode. This field can only be populated if advertise_mode is CUSTOM and overrides the list defined for the router (in Bgp message). These groups will be advertised in addition to any specified prefixes. Leave this field blank to advertise no custom groups.", + "customFeatures": { + "description": "A list of features enabled when the selected profile is CUSTOM. The\n- method returns the set of features that can be specified in this list. This field must be empty if the profile is not CUSTOM.", "items": { - "enum": [ - "ALL_SUBNETS" - ], - "enumDescriptions": [ - "" - ], "type": "string" }, "type": "array" }, - "advertisedIpRanges": { - "description": "User-specified list of individual IP ranges to advertise in custom mode. This field can only be populated if advertise_mode is CUSTOM and overrides the list defined for the router (in Bgp message). These IP ranges will be advertised in addition to any specified groups. Leave this field blank to advertise no custom IP ranges.", + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, + "enabledFeatures": { + "description": "[Output Only] The list of features enabled in the SSL policy.", "items": { - "$ref": "RouterAdvertisedIpRange" + "type": "string" }, "type": "array" }, - "advertisedRoutePriority": { - "description": "The priority of routes advertised to this BGP peer. In the case where there is more than one matching route of maximum length, the routes with lowest priority value win.", - "format": "uint32", - "type": "integer" + "fingerprint": { + "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a SslPolicy. An up-to-date fingerprint must be provided in order to update the SslPolicy, otherwise the request will fail with error 412 conditionNotMet.\n\nTo see the latest fingerprint, make a get() request to retrieve an SslPolicy.", + "format": "byte", + "type": "string" }, - "interfaceName": { - "description": "Name of the interface the BGP peer is associated with.", + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", "type": "string" }, - "ipAddress": { - "description": "IP address of the interface inside Google Cloud Platform. Only IPv4 is supported.", + "kind": { + "default": "compute#sslPolicy", + "description": "[Output only] Type of the resource. Always compute#sslPolicyfor SSL policies.", "type": "string" }, - "managementType": { - "description": "[Output Only] The resource that configures and manages this BGP peer. MANAGED_BY_USER is the default value and can be managed by you or other users; MANAGED_BY_ATTACHMENT is a BGP peer that is configured and managed by Cloud Interconnect, specifically by an InterconnectAttachment of type PARTNER. Google will automatically create, update, and delete this type of BGP peer when the PARTNER InterconnectAttachment is created, updated, or deleted.", + "minTlsVersion": { + "description": "The minimum version of SSL protocol that can be used by the clients to establish a connection with the load balancer. This can be one of TLS_1_0, TLS_1_1, TLS_1_2.", "enum": [ - "MANAGED_BY_ATTACHMENT", - "MANAGED_BY_USER" + "TLS_1_0", + "TLS_1_1", + "TLS_1_2" ], "enumDescriptions": [ + "", "", "" ], "type": "string" }, "name": { - "description": "Name of this BGP peer. The name must be 1-63 characters long and comply with RFC1035.", + "description": "Name of the resource. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, - "peerAsn": { - "description": "Peer BGP Autonomous System Number (ASN). For VPN use case, this value can be different for every tunnel.", - "format": "uint32", - "type": "integer" + "profile": { + "description": "Profile specifies the set of SSL features that can be used by the load balancer when negotiating SSL with clients. This can be one of COMPATIBLE, MODERN, RESTRICTED, or CUSTOM. If using CUSTOM, the set of SSL features to enable must be specified in the customFeatures field.", + "enum": [ + "COMPATIBLE", + "CUSTOM", + "MODERN", + "RESTRICTED" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ], + "type": "string" }, - "peerIpAddress": { - "description": "IP address of the BGP interface outside Google cloud. Only IPv4 is supported.", + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" + }, + "warnings": { + "description": "[Output Only] If potential misconfigurations are detected for this SSL policy, this field will be populated with warning messages.", + "items": { + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + } + }, + "type": "object" + }, + "SslPolicyReference": { + "id": "SslPolicyReference", + "properties": { + "sslPolicy": { + "description": "URL of the SSL policy resource. Set this to empty string to clear any existing SSL policy associated with the target proxy resource.", "type": "string" } }, "type": "object" }, - "RouterInterface": { - "id": "RouterInterface", + "Subnetwork": { + "description": "A Subnetwork resource. (== resource_for beta.subnetworks ==) (== resource_for v1.subnetworks ==)", + "id": "Subnetwork", "properties": { - "ipRange": { - "description": "IP address and range of the interface. The IP range must be in the RFC3927 link-local IP space. The value must be a CIDR-formatted string, for example: 169.254.0.1/30. NOTE: Do not truncate the address as it represents the IP address of the interface.", + "allowSubnetCidrRoutesOverlap": { + "description": "Whether this subnetwork can conflict with static routes. Setting this to true allows this subnetwork's primary and secondary ranges to conflict with routes that have already been configured on the corresponding network. Static routes will take precedence over the subnetwork route if the route prefix length is at least as large as the subnetwork prefix length.\n\nAlso, packets destined to IPs within subnetwork may contain private/sensitive data and are prevented from leaving the virtual network. Setting this field to true will disable this feature.\n\nThe default value is false and applies to all existing subnetworks and automatically created subnetworks.\n\nThis field cannot be set to true at resource creation time.", + "type": "boolean" + }, + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" }, - "linkedInterconnectAttachment": { - "description": "URI of the linked interconnect attachment. It must be in the same region as the router. Each interface can have at most one linked resource and it could either be a VPN Tunnel or an interconnect attachment.", + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource. This field can be set only at resource creation time.", "type": "string" }, - "linkedVpnTunnel": { - "description": "URI of the linked VPN tunnel. It must be in the same region as the router. Each interface can have at most one linked resource and it could either be a VPN Tunnel or an interconnect attachment.", + "enableFlowLogs": { + "description": "Whether to enable flow logging for this subnetwork. If this field is not explicitly set, it will not appear in get listings. If not set the default behavior is to disable flow logging.", + "type": "boolean" + }, + "fingerprint": { + "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a Subnetwork. An up-to-date fingerprint must be provided in order to update the Subnetwork, otherwise the request will fail with error 412 conditionNotMet.\n\nTo see the latest fingerprint, make a get() request to retrieve a Subnetwork.", + "format": "byte", "type": "string" }, - "managementType": { - "description": "[Output Only] The resource that configures and manages this interface. MANAGED_BY_USER is the default value and can be managed by you or other users; MANAGED_BY_ATTACHMENT is an interface that is configured and managed by Cloud Interconnect, specifically by an InterconnectAttachment of type PARTNER. Google will automatically create, update, and delete this type of interface when the PARTNER InterconnectAttachment is created, updated, or deleted.", + "gatewayAddress": { + "description": "[Output Only] The gateway address for default routes to reach destination addresses outside this subnetwork.", + "type": "string" + }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", + "type": "string" + }, + "ipCidrRange": { + "description": "The range of internal addresses that are owned by this subnetwork. Provide this property when you create the subnetwork. For example, 10.0.0.0/8 or 192.168.0.0/16. Ranges must be unique and non-overlapping within a network. Only IPv4 is supported. This field can be set only at resource creation time.", + "type": "string" + }, + "kind": { + "default": "compute#subnetwork", + "description": "[Output Only] Type of the resource. Always compute#subnetwork for Subnetwork resources.", + "type": "string" + }, + "logConfig": { + "$ref": "SubnetworkLogConfig", + "description": "This field denotes the VPC flow logging options for this subnetwork. If logging is enabled, logs are exported to Stackdriver." + }, + "name": { + "description": "The name of the resource, provided by the client when initially creating the resource. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "network": { + "description": "The URL of the network to which this subnetwork belongs, provided by the client when initially creating the subnetwork. Only networks that are in the distributed mode can have subnetworks. This field can be set only at resource creation time.", + "type": "string" + }, + "privateIpGoogleAccess": { + "description": "Whether the VMs in this subnet can access Google services without assigned external IP addresses. This field can be both set at resource creation time and updated using setPrivateIpGoogleAccess.", + "type": "boolean" + }, + "purpose": { + "description": "The purpose of the resource. This field can be either PRIVATE_RFC_1918 or INTERNAL_HTTPS_LOAD_BALANCER. A subnetwork with purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a user-created subnetwork that is reserved for Internal HTTP(S) Load Balancing. If unspecified, the purpose defaults to PRIVATE_RFC_1918.", "enum": [ - "MANAGED_BY_ATTACHMENT", - "MANAGED_BY_USER" + "INTERNAL_HTTPS_LOAD_BALANCER", + "PRIVATE", + "PRIVATE_RFC_1918" ], "enumDescriptions": [ + "", "", "" ], "type": "string" }, - "name": { - "description": "Name of this interface entry. The name must be 1-63 characters long and comply with RFC1035.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "region": { + "description": "URL of the region where the Subnetwork resides. This field can be set only at resource creation time.", + "type": "string" + }, + "role": { + "description": "The role of subnetwork. Currenly, this field is only used when purpose = INTERNAL_HTTPS_LOAD_BALANCER. The value can be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being used for Internal HTTP(S) Load Balancing. A BACKUP subnetwork is one that is ready to be promoted to ACTIVE or is currently draining. This field can be updated with a patch request.", + "enum": [ + "ACTIVE", + "BACKUP" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "secondaryIpRanges": { + "description": "An array of configurations for secondary IP ranges for VM instances contained in this subnetwork. The primary IP of such VM must belong to the primary ipCidrRange of the subnetwork. The alias IPs may belong to either primary or secondary ranges. This field can be updated with a patch request.", + "items": { + "$ref": "SubnetworkSecondaryRange" + }, + "type": "array" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" + }, + "state": { + "description": "[Output Only] The state of the subnetwork, which can be one of READY or DRAINING. A subnetwork that is READY is ready to be used. The state of DRAINING is only applicable to subnetworks that have the purpose set to INTERNAL_HTTPS_LOAD_BALANCER and indicates that connections to the load balancer are being drained. A subnetwork that is draining cannot be used or modified until it reaches a status of READY.", + "enum": [ + "DRAINING", + "READY" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "SubnetworkAggregatedList": { + "id": "SubnetworkAggregatedList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "additionalProperties": { + "$ref": "SubnetworksScopedList", + "description": "Name of the scope containing this set of Subnetworks." + }, + "description": "A list of SubnetworksScopedList resources.", + "type": "object" + }, + "kind": { + "default": "compute#subnetworkAggregatedList", + "description": "[Output Only] Type of resource. Always compute#subnetworkAggregatedList for aggregated lists of subnetworks.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" } }, "type": "object" }, - "RouterList": { - "description": "Contains a list of Router resources.", - "id": "RouterList", + "SubnetworkList": { + "description": "Contains a list of Subnetwork resources.", + "id": "SubnetworkList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of Router resources.", + "description": "A list of Subnetwork resources.", "items": { - "$ref": "Router" + "$ref": "Subnetwork" }, "type": "array" }, "kind": { - "default": "compute#routerList", - "description": "[Output Only] Type of resource. Always compute#router for routers.", + "default": "compute#subnetworkList", + "description": "[Output Only] Type of resource. Always compute#subnetworkList for lists of subnetworks.", "type": "string" }, "nextPageToken": { @@ -38196,292 +44782,91 @@ }, "type": "object" }, - "RouterNat": { - "description": "Represents a Nat resource. It enables the VMs within the specified subnetworks to access Internet without external IP addresses. It specifies a list of subnetworks (and the ranges within) that want to use NAT. Customers can also provide the external IPs that would be used for NAT. GCP would auto-allocate ephemeral IPs if no external IPs are provided.", - "id": "RouterNat", + "SubnetworkLogConfig": { + "description": "The available logging options for this subnetwork.", + "id": "SubnetworkLogConfig", "properties": { - "icmpIdleTimeoutSec": { - "description": "Timeout (in seconds) for ICMP connections. Defaults to 30s if not set.", - "format": "int32", - "type": "integer" - }, - "minPortsPerVm": { - "description": "Minimum number of ports allocated to a VM from this NAT config. If not set, a default number of ports is allocated to a VM. This gets rounded up to the nearest power of 2. Eg. if the value of this field is 50, at least 64 ports will be allocated to a VM.", - "format": "int32", - "type": "integer" - }, - "name": { - "description": "Unique name of this Nat service. The name must be 1-63 characters long and comply with RFC1035.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "type": "string" - }, - "natIpAllocateOption": { - "description": "Specify the NatIpAllocateOption. If it is AUTO_ONLY, then nat_ip should be empty.", + "aggregationInterval": { + "description": "Can only be specified if VPC flow logging for this subnetwork is enabled. Toggles the aggregation interval for collecting flow logs. Increasing the interval time will reduce the amount of generated flow logs for long lasting connections. Default is an interval of 5 seconds per connection.", "enum": [ - "AUTO_ONLY", - "MANUAL_ONLY" + "INTERVAL_10_MIN", + "INTERVAL_15_MIN", + "INTERVAL_1_MIN", + "INTERVAL_30_SEC", + "INTERVAL_5_MIN", + "INTERVAL_5_SEC" ], "enumDescriptions": [ "", - "" - ], - "type": "string" - }, - "natIps": { - "description": "A list of URLs of the IP resources used for this Nat service. These IPs must be valid static external IP addresses assigned to the project. max_length is subject to change post alpha.", - "items": { - "type": "string" - }, - "type": "array" - }, - "sourceSubnetworkIpRangesToNat": { - "description": "Specify the Nat option. If this field contains ALL_SUBNETWORKS_ALL_IP_RANGES or ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES, then there should not be any other Router.Nat section in any Router for this network in this region.", - "enum": [ - "ALL_SUBNETWORKS_ALL_IP_RANGES", - "ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES", - "LIST_OF_SUBNETWORKS" - ], - "enumDescriptions": [ + "", + "", "", "", "" ], "type": "string" }, - "subnetworks": { - "description": "A list of Subnetwork resources whose traffic should be translated by NAT Gateway. It is used only when LIST_OF_SUBNETWORKS is selected for the SubnetworkIpRangeToNatOption above.", - "items": { - "$ref": "RouterNatSubnetworkToNat" - }, - "type": "array" - }, - "tcpEstablishedIdleTimeoutSec": { - "description": "Timeout (in seconds) for TCP established connections. Defaults to 1200s if not set.", - "format": "int32", - "type": "integer" - }, - "tcpTransitoryIdleTimeoutSec": { - "description": "Timeout (in seconds) for TCP transitory connections. Defaults to 30s if not set.", - "format": "int32", - "type": "integer" - }, - "udpIdleTimeoutSec": { - "description": "Timeout (in seconds) for UDP connections. Defaults to 30s if not set.", - "format": "int32", - "type": "integer" - } - }, - "type": "object" - }, - "RouterNatSubnetworkToNat": { - "description": "Defines the IP ranges that want to use NAT for a subnetwork.", - "id": "RouterNatSubnetworkToNat", - "properties": { - "name": { - "description": "URL for the subnetwork resource to use NAT.", - "type": "string" - }, - "secondaryIpRangeNames": { - "description": "A list of the secondary ranges of the Subnetwork that are allowed to use NAT. This can be populated only if \"LIST_OF_SECONDARY_IP_RANGES\" is one of the values in source_ip_ranges_to_nat.", - "items": { - "type": "string" - }, - "type": "array" - }, - "sourceIpRangesToNat": { - "description": "Specify the options for NAT ranges in the Subnetwork. All usages of single value are valid except NAT_IP_RANGE_OPTION_UNSPECIFIED. The only valid option with multiple values is: [\"PRIMARY_IP_RANGE\", \"LIST_OF_SECONDARY_IP_RANGES\"] Default: [ALL_IP_RANGES]", - "items": { - "enum": [ - "ALL_IP_RANGES", - "LIST_OF_SECONDARY_IP_RANGES", - "PRIMARY_IP_RANGE" - ], - "enumDescriptions": [ - "", - "", - "" - ], - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, - "RouterStatus": { - "id": "RouterStatus", - "properties": { - "bestRoutes": { - "description": "Best routes for this router's network.", - "items": { - "$ref": "Route" - }, - "type": "array" - }, - "bestRoutesForRouter": { - "description": "Best routes learned by this router.", - "items": { - "$ref": "Route" - }, - "type": "array" - }, - "bgpPeerStatus": { - "items": { - "$ref": "RouterStatusBgpPeerStatus" - }, - "type": "array" - }, - "natStatus": { - "items": { - "$ref": "RouterStatusNatStatus" - }, - "type": "array" - }, - "network": { - "description": "URI of the network to which this router belongs.", - "type": "string" - } - }, - "type": "object" - }, - "RouterStatusBgpPeerStatus": { - "id": "RouterStatusBgpPeerStatus", - "properties": { - "advertisedRoutes": { - "description": "Routes that were advertised to the remote BGP peer", - "items": { - "$ref": "Route" - }, - "type": "array" - }, - "ipAddress": { - "description": "IP address of the local BGP interface.", - "type": "string" - }, - "linkedVpnTunnel": { - "description": "URL of the VPN tunnel that this BGP peer controls.", - "type": "string" - }, - "name": { - "description": "Name of this BGP peer. Unique within the Routers resource.", - "type": "string" - }, - "numLearnedRoutes": { - "description": "Number of routes learned from the remote BGP Peer.", - "format": "uint32", - "type": "integer" - }, - "peerIpAddress": { - "description": "IP address of the remote BGP interface.", - "type": "string" + "enable": { + "description": "Whether to enable flow logging for this subnetwork. If this field is not explicitly set, it will not appear in get listings. If not set the default behavior is to disable flow logging.", + "type": "boolean" }, - "state": { - "description": "BGP state as specified in RFC1771.", - "type": "string" + "flowSampling": { + "description": "Can only be specified if VPC flow logging for this subnetwork is enabled. The value of the field must be in [0, 1]. Set the sampling rate of VPC flow logs within the subnetwork where 1.0 means all collected logs are reported and 0.0 means no logs are reported. Default is 0.5, which means half of all collected logs are reported.", + "format": "float", + "type": "number" }, - "status": { - "description": "Status of the BGP peer: {UP, DOWN}", + "metadata": { + "description": "Can only be specified if VPC flow logging for this subnetwork is enabled. Configures whether metadata fields should be added to the reported VPC flow logs. Default is INCLUDE_ALL_METADATA.", "enum": [ - "DOWN", - "UNKNOWN", - "UP" + "EXCLUDE_ALL_METADATA", + "INCLUDE_ALL_METADATA" ], "enumDescriptions": [ - "", "", "" ], "type": "string" - }, - "uptime": { - "description": "Time this session has been up. Format: 14 years, 51 weeks, 6 days, 23 hours, 59 minutes, 59 seconds", - "type": "string" - }, - "uptimeSeconds": { - "description": "Time this session has been up, in seconds. Format: 145", - "type": "string" } }, "type": "object" }, - "RouterStatusNatStatus": { - "description": "Status of a NAT contained in this router.", - "id": "RouterStatusNatStatus", + "SubnetworkSecondaryRange": { + "description": "Represents a secondary IP range of a subnetwork.", + "id": "SubnetworkSecondaryRange", "properties": { - "autoAllocatedNatIps": { - "description": "A list of IPs auto-allocated for NAT. Example: [\"1.1.1.1\", \"129.2.16.89\"]", - "items": { - "type": "string" - }, - "type": "array" - }, - "minExtraNatIpsNeeded": { - "description": "The number of extra IPs to allocate. This will be greater than 0 only if user-specified IPs are NOT enough to allow all configured VMs to use NAT. This value is meaningful only when auto-allocation of NAT IPs is *not* used.", - "format": "int32", - "type": "integer" - }, - "name": { - "description": "Unique name of this NAT.", + "ipCidrRange": { + "description": "The range of IP addresses belonging to this subnetwork secondary range. Provide this property when you create the subnetwork. Ranges must be unique and non-overlapping with all primary and secondary IP ranges within a network. Only IPv4 is supported.", "type": "string" }, - "numVmEndpointsWithNatMappings": { - "description": "Number of VM endpoints (i.e., Nics) that can use NAT.", - "format": "int32", - "type": "integer" - }, - "userAllocatedNatIpResources": { - "description": "A list of fully qualified URLs of reserved IP address resources.", - "items": { - "type": "string" - }, - "type": "array" - }, - "userAllocatedNatIps": { - "description": "A list of IPs user-allocated for NAT. They will be raw IP strings like \"179.12.26.133\".", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, - "RouterStatusResponse": { - "id": "RouterStatusResponse", - "properties": { - "kind": { - "default": "compute#routerStatusResponse", - "description": "Type of resource.", + "rangeName": { + "description": "The name associated with this subnetwork secondary range, used when adding an alias IP range to a VM instance. The name must be 1-63 characters long, and comply with RFC1035. The name must be unique within the subnetwork.", "type": "string" - }, - "result": { - "$ref": "RouterStatus" } }, "type": "object" }, - "RoutersPreviewResponse": { - "id": "RoutersPreviewResponse", + "SubnetworksExpandIpCidrRangeRequest": { + "id": "SubnetworksExpandIpCidrRangeRequest", "properties": { - "resource": { - "$ref": "Router", - "description": "Preview of given router." + "ipCidrRange": { + "description": "The IP (in CIDR format or netmask) of internal addresses that are legal on this Subnetwork. This range should be disjoint from other subnetworks within this network. This range can only be larger than (i.e. a superset of) the range previously defined before the update.", + "type": "string" } }, "type": "object" }, - "RoutersScopedList": { - "id": "RoutersScopedList", + "SubnetworksScopedList": { + "id": "SubnetworksScopedList", "properties": { - "routers": { - "description": "A list of routers contained in this scope.", + "subnetworks": { + "description": "A list of subnetworks contained in this scope.", "items": { - "$ref": "Router" + "$ref": "Subnetwork" }, "type": "array" }, "warning": { - "description": "Informational warning which replaces the list of routers when the list is empty.", + "description": "An informational warning that appears when the list of addresses is empty.", "properties": { "code": { "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", @@ -38564,77 +44949,20 @@ }, "type": "object" }, - "Rule": { - "description": "A rule to be applied in a Policy.", - "id": "Rule", + "SubnetworksSetPrivateIpGoogleAccessRequest": { + "id": "SubnetworksSetPrivateIpGoogleAccessRequest", "properties": { - "action": { - "description": "Required", - "enum": [ - "ALLOW", - "ALLOW_WITH_LOG", - "DENY", - "DENY_WITH_LOG", - "LOG", - "NO_ACTION" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "conditions": { - "description": "Additional restrictions that must be met. All conditions must pass for the rule to match.", - "items": { - "$ref": "Condition" - }, - "type": "array" - }, - "description": { - "description": "Human-readable description of the rule.", - "type": "string" - }, - "ins": { - "description": "If one or more 'in' clauses are specified, the rule matches if the PRINCIPAL/AUTHORITY_SELECTOR is in at least one of these entries.", - "items": { - "type": "string" - }, - "type": "array" - }, - "logConfigs": { - "description": "The config returned to callers of tech.iam.IAM.CheckPolicy for any entries that match the LOG action.", - "items": { - "$ref": "LogConfig" - }, - "type": "array" - }, - "notIns": { - "description": "If one or more 'not_in' clauses are specified, the rule matches if the PRINCIPAL/AUTHORITY_SELECTOR is in none of the entries.", - "items": { - "type": "string" - }, - "type": "array" - }, - "permissions": { - "description": "A permission is a string of form '..' (e.g., 'storage.buckets.list'). A value of '*' matches all permissions, and a verb part of '*' (e.g., 'storage.buckets.*') matches all verbs.", - "items": { - "type": "string" - }, - "type": "array" + "privateIpGoogleAccess": { + "type": "boolean" } }, "type": "object" }, - "SSLHealthCheck": { - "id": "SSLHealthCheck", + "TCPHealthCheck": { + "id": "TCPHealthCheck", "properties": { "port": { - "description": "The TCP port number for the health check request. The default value is 443. Valid values are 1 through 65535.", + "description": "The TCP port number for the health check request. The default value is 80. Valid values are 1 through 65535.", "format": "int32", "type": "integer" }, @@ -38643,7 +44971,7 @@ "type": "string" }, "portSpecification": { - "description": "Specifies how port is selected for health checking, can be one of following values:\nUSE_FIXED_PORT: The port number in\nport\nis used for health checking.\nUSE_NAMED_PORT: The\nportName\nis used for health checking.\nUSE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking.\n\n\nIf not specified, SSL health check follows behavior specified in\nport\nand\nportName\nfields.", + "description": "Specifies how port is selected for health checking, can be one of following values:\nUSE_FIXED_PORT: The port number in\nport\nis used for health checking.\nUSE_NAMED_PORT: The\nportName\nis used for health checking.\nUSE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking.\n\n\nIf not specified, TCP health check follows behavior specified in\nport\nand\nportName\nfields.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", @@ -38669,7 +44997,7 @@ "type": "string" }, "request": { - "description": "The application data to send once the SSL connection has been established (default value is empty). If both request and response are empty, the connection establishment alone will indicate health. The request data can only be ASCII.", + "description": "The application data to send once the TCP connection has been established (default value is empty). If both request and response are empty, the connection establishment alone will indicate health. The request data can only be ASCII.", "type": "string" }, "response": { @@ -38679,93 +45007,122 @@ }, "type": "object" }, - "Scheduling": { - "description": "Sets the scheduling options for an Instance.", - "id": "Scheduling", + "Tags": { + "description": "A set of instance tags.", + "id": "Tags", "properties": { - "automaticRestart": { - "description": "Specifies whether the instance should be automatically restarted if it is terminated by Compute Engine (not terminated by a user). You can only set the automatic restart option for standard instances. Preemptible instances cannot be automatically restarted.\n\nBy default, this is set to true so an instance is automatically restarted if it is terminated by Compute Engine.", - "type": "boolean" + "fingerprint": { + "description": "Specifies a fingerprint for this request, which is essentially a hash of the tags' contents and used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update tags. You must always provide an up-to-date fingerprint hash in order to update or change tags.\n\nTo see the latest fingerprint, make get() request to the instance.", + "format": "byte", + "type": "string" }, - "nodeAffinities": { - "description": "A set of node affinity and anti-affinity.", + "items": { + "description": "An array of tags. Each tag must be 1-63 characters long, and comply with RFC1035.", "items": { - "$ref": "SchedulingNodeAffinity" + "type": "string" }, "type": "array" - }, - "onHostMaintenance": { - "description": "Defines the maintenance behavior for this instance. For standard instances, the default behavior is MIGRATE. For preemptible instances, the default and only possible behavior is TERMINATE. For more information, see Setting Instance Scheduling Options.", - "enum": [ - "MIGRATE", - "TERMINATE" - ], - "enumDescriptions": [ - "", - "" - ], - "type": "string" - }, - "preemptible": { - "description": "Defines whether the instance is preemptible. This can only be set during instance creation, it cannot be set or changed after the instance has been created.", - "type": "boolean" } }, "type": "object" }, - "SchedulingNodeAffinity": { - "description": "Node Affinity: the configuration of desired nodes onto which this Instance could be scheduled.", - "id": "SchedulingNodeAffinity", + "TargetHttpProxiesScopedList": { + "id": "TargetHttpProxiesScopedList", "properties": { - "key": { - "description": "Corresponds to the label key of Node resource.", - "type": "string" - }, - "operator": { - "description": "Defines the operation of node selection.", - "enum": [ - "IN", - "NOT_IN", - "OPERATOR_UNSPECIFIED" - ], - "enumDescriptions": [ - "", - "", - "" - ], - "type": "string" - }, - "values": { - "description": "Corresponds to the label values of Node resource.", + "targetHttpProxies": { + "description": "A list of TargetHttpProxies contained in this scope.", "items": { - "type": "string" + "$ref": "TargetHttpProxy" + }, + "type": "array" + }, + "warning": { + "description": "Informational warning which replaces the list of backend services when the list is empty.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } }, - "type": "array" - } - }, - "type": "object" - }, - "SecurityPoliciesListPreconfiguredExpressionSetsResponse": { - "id": "SecurityPoliciesListPreconfiguredExpressionSetsResponse", - "properties": { - "preconfiguredExpressionSets": { - "$ref": "SecurityPoliciesWafConfig" - } - }, - "type": "object" - }, - "SecurityPoliciesWafConfig": { - "id": "SecurityPoliciesWafConfig", - "properties": { - "wafRules": { - "$ref": "PreconfiguredWafSet" + "type": "object" } }, "type": "object" }, - "SecurityPolicy": { - "description": "A security policy is comprised of one or more rules. It can also be associated with one or more 'targets'. (== resource_for v1.securityPolicies ==) (== resource_for beta.securityPolicies ==)", - "id": "SecurityPolicy", + "TargetHttpProxy": { + "description": "A TargetHttpProxy resource. This resource defines an HTTP proxy. (== resource_for beta.targetHttpProxies ==) (== resource_for v1.targetHttpProxies ==)", + "id": "TargetHttpProxy", "properties": { "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", @@ -38775,75 +45132,64 @@ "description": "An optional description of this resource. Provide this property when you create the resource.", "type": "string" }, - "fingerprint": { - "description": "Specifies a fingerprint for this resource, which is essentially a hash of the metadata's contents and used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update metadata. You must always provide an up-to-date fingerprint hash in order to update or change metadata, otherwise the request will fail with error 412 conditionNotMet.\n\nTo see the latest fingerprint, make get() request to the security policy.", - "format": "byte", - "type": "string" - }, "id": { "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", "format": "uint64", "type": "string" }, "kind": { - "default": "compute#securityPolicy", - "description": "[Output only] Type of the resource. Always compute#securityPolicyfor security policies", - "type": "string" - }, - "labelFingerprint": { - "description": "A fingerprint for the labels being applied to this security policy, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels.\n\nTo see the latest fingerprint, make get() request to the security policy.", - "format": "byte", + "default": "compute#targetHttpProxy", + "description": "[Output Only] Type of resource. Always compute#targetHttpProxy for target HTTP proxies.", "type": "string" }, - "labels": { - "additionalProperties": { - "type": "string" - }, - "description": "Labels to apply to this security policy resource. These can be later modified by the setLabels method. Each label key/value must comply with RFC1035. Label values may be empty.", - "type": "object" - }, "name": { "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, - "rules": { - "description": "A list of rules that belong to this policy. There must always be a default rule (rule with priority 2147483647 and match \"*\"). If no rules are provided when creating a security policy, a default rule with action \"allow\" will be added.", - "items": { - "$ref": "SecurityPolicyRule" - }, - "type": "array" + "region": { + "description": "[Output Only] URL of the region where the regional Target HTTP Proxy resides. This field is not applicable to global Target HTTP Proxies.", + "type": "string" }, "selfLink": { "description": "[Output Only] Server-defined URL for the resource.", "type": "string" + }, + "urlMap": { + "description": "URL to the UrlMap resource that defines the mapping from URL to the BackendService.", + "type": "string" } }, "type": "object" }, - "SecurityPolicyList": { - "id": "SecurityPolicyList", + "TargetHttpProxyAggregatedList": { + "id": "TargetHttpProxyAggregatedList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of SecurityPolicy resources.", - "items": { - "$ref": "SecurityPolicy" + "additionalProperties": { + "$ref": "TargetHttpProxiesScopedList", + "description": "Name of the scope containing this set of TargetHttpProxies." }, - "type": "array" + "description": "A list of TargetHttpProxiesScopedList resources.", + "type": "object" }, "kind": { - "default": "compute#securityPolicyList", - "description": "[Output Only] Type of resource. Always compute#securityPolicyList for listsof securityPolicies", + "default": "compute#targetHttpProxyAggregatedList", + "description": "[Output Only] Type of resource. Always compute#targetHttpProxyAggregatedList for lists of Target HTTP Proxies.", "type": "string" }, "nextPageToken": { "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", "type": "string" }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -38928,361 +45274,24 @@ }, "type": "object" }, - "SecurityPolicyReference": { - "id": "SecurityPolicyReference", - "properties": { - "securityPolicy": { - "type": "string" - } - }, - "type": "object" - }, - "SecurityPolicyRule": { - "description": "Represents a rule that describes one or more match conditions along with the action to be taken when traffic matches this condition (allow or deny).", - "id": "SecurityPolicyRule", - "properties": { - "action": { - "description": "The Action to preform when the client connection triggers the rule. Can currently be either \"allow\" or \"deny()\" where valid values for status are 403, 404, and 502.", - "type": "string" - }, - "description": { - "description": "An optional description of this resource. Provide this property when you create the resource.", - "type": "string" - }, - "kind": { - "default": "compute#securityPolicyRule", - "description": "[Output only] Type of the resource. Always compute#securityPolicyRule for security policy rules", - "type": "string" - }, - "match": { - "$ref": "SecurityPolicyRuleMatcher", - "description": "A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding ?action? is enforced." - }, - "preview": { - "description": "If set to true, the specified action is not enforced.", - "type": "boolean" - }, - "priority": { - "description": "An integer indicating the priority of a rule in the list. The priority must be a positive value between 0 and 2147483647. Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest prority.", - "format": "int32", - "type": "integer" - } - }, - "type": "object" - }, - "SecurityPolicyRuleMatcher": { - "description": "Represents a match condition that incoming traffic is evaluated against. Exactly one field must be specified.", - "id": "SecurityPolicyRuleMatcher", - "properties": { - "config": { - "$ref": "SecurityPolicyRuleMatcherConfig", - "description": "The configuration options available when specifying versioned_expr. This field must be specified if versioned_expr is specified and cannot be specified if versioned_expr is not specified." - }, - "expr": { - "$ref": "Expr", - "description": "User defined CEVAL expression. A CEVAL expression is used to specify match criteria such as origin.ip, source.region_code and contents in the request header." - }, - "versionedExpr": { - "description": "Preconfigured versioned expression. If this field is specified, config must also be specified. Available preconfigured expressions along with their requirements are: SRC_IPS_V1 - must specify the corresponding src_ip_range field in config.", - "enum": [ - "SRC_IPS_V1" - ], - "enumDescriptions": [ - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "SecurityPolicyRuleMatcherConfig": { - "id": "SecurityPolicyRuleMatcherConfig", - "properties": { - "srcIpRanges": { - "description": "CIDR IP address range.", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, - "SerialPortOutput": { - "description": "An instance's serial console output.", - "id": "SerialPortOutput", - "properties": { - "contents": { - "description": "[Output Only] The contents of the console output.", - "type": "string" - }, - "kind": { - "default": "compute#serialPortOutput", - "description": "[Output Only] Type of the resource. Always compute#serialPortOutput for serial port output.", - "type": "string" - }, - "next": { - "description": "[Output Only] The position of the next byte of content from the serial console output. Use this value in the next request as the start parameter.", - "format": "int64", - "type": "string" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for this resource.", - "type": "string" - }, - "start": { - "description": "The starting byte position of the output that was returned. This should match the start parameter sent with the request. If the serial console output exceeds the size of the buffer, older output will be overwritten by newer content and the start values will be mismatched.", - "format": "int64", - "type": "string" - } - }, - "type": "object" - }, - "ServiceAccount": { - "description": "A service account.", - "id": "ServiceAccount", - "properties": { - "email": { - "description": "Email address of the service account.", - "type": "string" - }, - "scopes": { - "description": "The list of scopes to be made available for this service account.", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, - "ShieldedVmConfig": { - "description": "A set of Shielded VM options.", - "id": "ShieldedVmConfig", - "properties": { - "enableIntegrityMonitoring": { - "description": "Defines whether the instance has integrity monitoring enabled.", - "type": "boolean" - }, - "enableSecureBoot": { - "description": "Defines whether the instance has Secure Boot enabled.", - "type": "boolean" - }, - "enableVtpm": { - "description": "Defines whether the instance has the vTPM enabled.", - "type": "boolean" - } - }, - "type": "object" - }, - "ShieldedVmIdentity": { - "description": "A shielded VM identity entry.", - "id": "ShieldedVmIdentity", - "properties": { - "encryptionKey": { - "$ref": "ShieldedVmIdentityEntry", - "description": "An Endorsement Key (EK) issued to the Shielded VM's vTPM." - }, - "kind": { - "default": "compute#shieldedVmIdentity", - "description": "[Output Only] Type of the resource. Always compute#shieldedVmIdentity for shielded VM identity entry.", - "type": "string" - }, - "signingKey": { - "$ref": "ShieldedVmIdentityEntry", - "description": "An Attestation Key (AK) issued to the Shielded VM's vTPM." - } - }, - "type": "object" - }, - "ShieldedVmIdentityEntry": { - "description": "A Shielded VM Identity Entry.", - "id": "ShieldedVmIdentityEntry", - "properties": { - "ekCert": { - "description": "A PEM-encoded X.509 certificate. This field can be empty.", - "type": "string" - }, - "ekPub": { - "description": "A PEM-encoded public key.", - "type": "string" - } - }, - "type": "object" - }, - "ShieldedVmIntegrityPolicy": { - "description": "The policy describes the baseline against which VM instance boot integrity is measured.", - "id": "ShieldedVmIntegrityPolicy", - "properties": { - "updateAutoLearnPolicy": { - "description": "Updates the integrity policy baseline using the measurements from the VM instance's most recent boot.", - "type": "boolean" - } - }, - "type": "object" - }, - "SignedUrlKey": { - "description": "Represents a customer-supplied Signing Key used by Cloud CDN Signed URLs", - "id": "SignedUrlKey", - "properties": { - "keyName": { - "description": "Name of the key. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "type": "string" - }, - "keyValue": { - "description": "128-bit key value used for signing the URL. The key value must be a valid RFC 4648 Section 5 base64url encoded string.", - "type": "string" - } - }, - "type": "object" - }, - "Snapshot": { - "description": "A persistent disk snapshot resource. (== resource_for beta.snapshots ==) (== resource_for v1.snapshots ==)", - "id": "Snapshot", - "properties": { - "autoCreated": { - "description": "[Output Only] Set to true if snapshots are automatically by applying resource policy on the target disk.", - "type": "boolean" - }, - "creationTimestamp": { - "description": "[Output Only] Creation timestamp in RFC3339 text format.", - "type": "string" - }, - "description": { - "description": "An optional description of this resource. Provide this property when you create the resource.", - "type": "string" - }, - "diskSizeGb": { - "description": "[Output Only] Size of the snapshot, specified in GB.", - "format": "int64", - "type": "string" - }, - "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64", - "type": "string" - }, - "kind": { - "default": "compute#snapshot", - "description": "[Output Only] Type of the resource. Always compute#snapshot for Snapshot resources.", - "type": "string" - }, - "labelFingerprint": { - "description": "A fingerprint for the labels being applied to this snapshot, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet.\n\nTo see the latest fingerprint, make a get() request to retrieve a snapshot.", - "format": "byte", - "type": "string" - }, - "labels": { - "additionalProperties": { - "type": "string" - }, - "description": "Labels to apply to this snapshot. These can be later modified by the setLabels method. Label values may be empty.", - "type": "object" - }, - "licenseCodes": { - "description": "[Output Only] Integer license codes indicating which licenses are attached to this snapshot.", - "items": { - "format": "int64", - "type": "string" - }, - "type": "array" - }, - "licenses": { - "description": "[Output Only] A list of public visible licenses that apply to this snapshot. This can be because the original image had licenses attached (such as a Windows image).", - "items": { - "type": "string" - }, - "type": "array" - }, - "name": { - "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "type": "string" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for the resource.", - "type": "string" - }, - "snapshotEncryptionKey": { - "$ref": "CustomerEncryptionKey", - "description": "Encrypts the snapshot using a customer-supplied encryption key.\n\nAfter you encrypt a snapshot using a customer-supplied key, you must provide the same key if you use the image later For example, you must provide the encryption key when you create a disk from the encrypted snapshot in a future request.\n\nCustomer-supplied encryption keys do not protect access to metadata of the disk.\n\nIf you do not provide an encryption key when creating the snapshot, then the snapshot will be encrypted using an automatically generated key and you do not need to provide a key to use the snapshot later." - }, - "sourceDisk": { - "description": "[Output Only] The source disk used to create this snapshot.", - "type": "string" - }, - "sourceDiskEncryptionKey": { - "$ref": "CustomerEncryptionKey", - "description": "The customer-supplied encryption key of the source disk. Required if the source disk is protected by a customer-supplied encryption key." - }, - "sourceDiskId": { - "description": "[Output Only] The ID value of the disk used to create this snapshot. This value may be used to determine whether the snapshot was taken from the current or a previous instance of a given disk name.", - "type": "string" - }, - "status": { - "description": "[Output Only] The status of the snapshot. This can be CREATING, DELETING, FAILED, READY, or UPLOADING.", - "enum": [ - "CREATING", - "DELETING", - "FAILED", - "READY", - "UPLOADING" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "storageBytes": { - "description": "[Output Only] A size of the storage used by the snapshot. As snapshots share storage, this number is expected to change with snapshot creation/deletion.", - "format": "int64", - "type": "string" - }, - "storageBytesStatus": { - "description": "[Output Only] An indicator whether storageBytes is in a stable state or it is being adjusted as a result of shared storage reallocation. This status can either be UPDATING, meaning the size of the snapshot is being updated, or UP_TO_DATE, meaning the size of the snapshot is up-to-date.", - "enum": [ - "UPDATING", - "UP_TO_DATE" - ], - "enumDescriptions": [ - "", - "" - ], - "type": "string" - }, - "storageLocations": { - "description": "GCS bucket storage location of the snapshot (regional or multi-regional).", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, - "SnapshotList": { - "description": "Contains a list of Snapshot resources.", - "id": "SnapshotList", + "TargetHttpProxyList": { + "description": "A list of TargetHttpProxy resources.", + "id": "TargetHttpProxyList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of Snapshot resources.", + "description": "A list of TargetHttpProxy resources.", "items": { - "$ref": "Snapshot" + "$ref": "TargetHttpProxy" }, "type": "array" }, "kind": { - "default": "compute#snapshotList", - "description": "Type of resource.", + "default": "compute#targetHttpProxyList", + "description": "Type of resource. Always compute#targetHttpProxyList for lists of target HTTP proxies.", "type": "string" }, "nextPageToken": { @@ -39294,7 +45303,101 @@ "type": "string" }, "warning": { - "description": "[Output Only] Informational warning message.", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "TargetHttpsProxiesScopedList": { + "id": "TargetHttpsProxiesScopedList", + "properties": { + "targetHttpsProxies": { + "description": "A list of TargetHttpsProxies contained in this scope.", + "items": { + "$ref": "TargetHttpsProxy" + }, + "type": "array" + }, + "warning": { + "description": "Informational warning which replaces the list of backend services when the list is empty.", "properties": { "code": { "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", @@ -39377,28 +45480,43 @@ }, "type": "object" }, - "SourceInstanceParams": { - "description": "A specification of the parameters to use when creating the instance template from a source instance.", - "id": "SourceInstanceParams", + "TargetHttpsProxiesSetQuicOverrideRequest": { + "id": "TargetHttpsProxiesSetQuicOverrideRequest", "properties": { - "diskConfigs": { - "description": "Attached disks configuration. If not provided, defaults are applied: For boot disk and any other R/W disks, new custom images will be created from each disk. For read-only disks, they will be attached in read-only mode. Local SSD disks will be created as blank volumes.", + "quicOverride": { + "description": "QUIC policy for the TargetHttpsProxy resource.", + "enum": [ + "DISABLE", + "ENABLE", + "NONE" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "TargetHttpsProxiesSetSslCertificatesRequest": { + "id": "TargetHttpsProxiesSetSslCertificatesRequest", + "properties": { + "sslCertificates": { + "description": "New set of SslCertificate resources to associate with this TargetHttpsProxy resource. Currently exactly one SslCertificate resource must be specified.", "items": { - "$ref": "DiskInstantiationConfig" + "type": "string" }, "type": "array" } }, "type": "object" }, - "SslCertificate": { - "description": "An SslCertificate resource. This resource provides a mechanism to upload an SSL key and certificate to the load balancer to serve secure connections from the user. (== resource_for beta.sslCertificates ==) (== resource_for v1.sslCertificates ==)", - "id": "SslCertificate", + "TargetHttpsProxy": { + "description": "A TargetHttpsProxy resource. This resource defines an HTTPS proxy. (== resource_for beta.targetHttpsProxies ==) (== resource_for v1.targetHttpsProxies ==)", + "id": "TargetHttpsProxy", "properties": { - "certificate": { - "description": "A local certificate file. The certificate must be in PEM format. The certificate chain must be no greater than 5 certs long. The chain must include at least one intermediate cert.", - "type": "string" - }, "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" @@ -39407,83 +45525,79 @@ "description": "An optional description of this resource. Provide this property when you create the resource.", "type": "string" }, - "expireTime": { - "description": "[Output Only] Expire time of the certificate. RFC3339", - "type": "string" - }, "id": { "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", "format": "uint64", "type": "string" }, "kind": { - "default": "compute#sslCertificate", - "description": "[Output Only] Type of the resource. Always compute#sslCertificate for SSL certificates.", + "default": "compute#targetHttpsProxy", + "description": "[Output Only] Type of resource. Always compute#targetHttpsProxy for target HTTPS proxies.", "type": "string" }, - "managed": { - "$ref": "SslCertificateManagedSslCertificate", - "description": "Configuration and status of a managed SSL certificate." - }, "name": { "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, - "privateKey": { - "description": "A write-only private key in PEM format. Only insert requests will include this field.", + "quicOverride": { + "description": "Specifies the QUIC override policy for this TargetHttpsProxy resource. This determines whether the load balancer will attempt to negotiate QUIC with clients or not. Can specify one of NONE, ENABLE, or DISABLE. Specify ENABLE to always enable QUIC, Enables QUIC when set to ENABLE, and disables QUIC when set to DISABLE. If NONE is specified, uses the QUIC policy with no user overrides, which is equivalent to DISABLE. Not specifying this field is equivalent to specifying NONE.", + "enum": [ + "DISABLE", + "ENABLE", + "NONE" + ], + "enumDescriptions": [ + "", + "", + "" + ], "type": "string" }, - "selfLink": { - "description": "[Output only] Server-defined URL for the resource.", + "region": { + "description": "[Output Only] URL of the region where the regional TargetHttpsProxy resides. This field is not applicable to global TargetHttpsProxies.", "type": "string" }, - "selfManaged": { - "$ref": "SslCertificateSelfManagedSslCertificate", - "description": "Configuration and status of a self-managed SSL certificate." + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" }, - "subjectAlternativeNames": { - "description": "[Output Only] Domains associated with the certificate via Subject Alternative Name.", + "sslCertificates": { + "description": "URLs to SslCertificate resources that are used to authenticate connections between users and the load balancer. At least one SSL certificate must be specified. Currently, you may specify up to 15 SSL certificates.", "items": { "type": "string" }, "type": "array" }, - "type": { - "description": "(Optional) Specifies the type of SSL certificate, either \"SELF_MANAGED\" or \"MANAGED\". If not specified, the certificate is self-managed and the fields certificate and private_key are used.", - "enum": [ - "MANAGED", - "SELF_MANAGED", - "TYPE_UNSPECIFIED" - ], - "enumDescriptions": [ - "", - "", - "" - ], + "sslPolicy": { + "description": "URL of SslPolicy resource that will be associated with the TargetHttpsProxy resource. If not set, the TargetHttpsProxy resource will not have any SSL policy configured.", + "type": "string" + }, + "urlMap": { + "description": "A fully-qualified or valid partial URL to the UrlMap resource that defines the mapping from URL to the BackendService. For example, the following are all valid URLs for specifying a URL map: \n- https://www.googleapis.compute/v1/projects/project/global/urlMaps/url-map \n- projects/project/global/urlMaps/url-map \n- global/urlMaps/url-map", "type": "string" } }, "type": "object" }, - "SslCertificateList": { - "description": "Contains a list of SslCertificate resources.", - "id": "SslCertificateList", + "TargetHttpsProxyAggregatedList": { + "id": "TargetHttpsProxyAggregatedList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of SslCertificate resources.", - "items": { - "$ref": "SslCertificate" + "additionalProperties": { + "$ref": "TargetHttpsProxiesScopedList", + "description": "Name of the scope containing this set of TargetHttpsProxies." }, - "type": "array" + "description": "A list of TargetHttpsProxiesScopedList resources.", + "type": "object" }, "kind": { - "default": "compute#sslCertificateList", - "description": "Type of resource.", + "default": "compute#targetHttpsProxyAggregatedList", + "description": "[Output Only] Type of resource. Always compute#targetHttpsProxyAggregatedList for lists of Target HTTP Proxies.", "type": "string" }, "nextPageToken": { @@ -39578,97 +45692,24 @@ }, "type": "object" }, - "SslCertificateManagedSslCertificate": { - "description": "Configuration and status of a managed SSL certificate.", - "id": "SslCertificateManagedSslCertificate", - "properties": { - "domainStatus": { - "additionalProperties": { - "enum": [ - "ACTIVE", - "DOMAIN_STATUS_UNSPECIFIED", - "FAILED_CAA_CHECKING", - "FAILED_CAA_FORBIDDEN", - "FAILED_NOT_VISIBLE", - "FAILED_RATE_LIMITED", - "PROVISIONING" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "description": "[Output only] Detailed statuses of the domains specified for managed certificate resource.", - "type": "object" - }, - "domains": { - "description": "The domains for which a managed SSL certificate will be generated. Currently only single-domain certs are supported.", - "items": { - "type": "string" - }, - "type": "array" - }, - "status": { - "description": "[Output only] Status of the managed certificate resource.", - "enum": [ - "ACTIVE", - "MANAGED_CERTIFICATE_STATUS_UNSPECIFIED", - "PROVISIONING", - "PROVISIONING_FAILED", - "PROVISIONING_FAILED_PERMANENTLY", - "RENEWAL_FAILED" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "SslCertificateSelfManagedSslCertificate": { - "description": "Configuration and status of a self-managed SSL certificate.", - "id": "SslCertificateSelfManagedSslCertificate", - "properties": { - "certificate": { - "description": "A local certificate file. The certificate must be in PEM format. The certificate chain must be no greater than 5 certs long. The chain must include at least one intermediate cert.", - "type": "string" - }, - "privateKey": { - "description": "A write-only private key in PEM format. Only insert requests will include this field.", - "type": "string" - } - }, - "type": "object" - }, - "SslPoliciesList": { - "id": "SslPoliciesList", + "TargetHttpsProxyList": { + "description": "Contains a list of TargetHttpsProxy resources.", + "id": "TargetHttpsProxyList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of SslPolicy resources.", + "description": "A list of TargetHttpsProxy resources.", "items": { - "$ref": "SslPolicy" + "$ref": "TargetHttpsProxy" }, "type": "array" }, "kind": { - "default": "compute#sslPoliciesList", - "description": "[Output Only] Type of the resource. Always compute#sslPoliciesList for lists of sslPolicies.", + "default": "compute#targetHttpsProxyList", + "description": "Type of resource. Always compute#targetHttpsProxyList for lists of target HTTPS proxies.", "type": "string" }, "nextPageToken": { @@ -39763,90 +45804,43 @@ }, "type": "object" }, - "SslPoliciesListAvailableFeaturesResponse": { - "id": "SslPoliciesListAvailableFeaturesResponse", - "properties": { - "features": { - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, - "SslPolicy": { - "description": "A SSL policy specifies the server-side support for SSL features. This can be attached to a TargetHttpsProxy or a TargetSslProxy. This affects connections between clients and the HTTPS or SSL proxy load balancer. They do not affect the connection between the load balancers and the backends.", - "id": "SslPolicy", + "TargetInstance": { + "description": "A TargetInstance resource. This resource defines an endpoint instance that terminates traffic of certain protocols. (== resource_for beta.targetInstances ==) (== resource_for v1.targetInstances ==)", + "id": "TargetInstance", "properties": { "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" }, - "customFeatures": { - "description": "A list of features enabled when the selected profile is CUSTOM. The\n- method returns the set of features that can be specified in this list. This field must be empty if the profile is not CUSTOM.", - "items": { - "type": "string" - }, - "type": "array" - }, "description": { "description": "An optional description of this resource. Provide this property when you create the resource.", "type": "string" }, - "enabledFeatures": { - "description": "[Output Only] The list of features enabled in the SSL policy.", - "items": { - "type": "string" - }, - "type": "array" - }, - "fingerprint": { - "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a SslPolicy. An up-to-date fingerprint must be provided in order to update the SslPolicy, otherwise the request will fail with error 412 conditionNotMet.\n\nTo see the latest fingerprint, make a get() request to retrieve an SslPolicy.", - "format": "byte", - "type": "string" - }, "id": { "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", "format": "uint64", "type": "string" }, - "kind": { - "default": "compute#sslPolicy", - "description": "[Output only] Type of the resource. Always compute#sslPolicyfor SSL policies.", + "instance": { + "description": "A URL to the virtual machine instance that handles traffic for this target instance. When creating a target instance, you can provide the fully-qualified URL or a valid partial URL to the desired virtual machine. For example, the following are all valid URLs: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/instance \n- projects/project/zones/zone/instances/instance \n- zones/zone/instances/instance", "type": "string" }, - "minTlsVersion": { - "description": "The minimum version of SSL protocol that can be used by the clients to establish a connection with the load balancer. This can be one of TLS_1_0, TLS_1_1, TLS_1_2.", - "enum": [ - "TLS_1_0", - "TLS_1_1", - "TLS_1_2" - ], - "enumDescriptions": [ - "", - "", - "" - ], + "kind": { + "default": "compute#targetInstance", + "description": "[Output Only] The type of the resource. Always compute#targetInstance for target instances.", "type": "string" }, "name": { - "description": "Name of the resource. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, - "profile": { - "description": "Profile specifies the set of SSL features that can be used by the load balancer when negotiating SSL with clients. This can be one of COMPATIBLE, MODERN, RESTRICTED, or CUSTOM. If using CUSTOM, the set of SSL features to enable must be specified in the customFeatures field.", + "natPolicy": { + "description": "NAT option controlling how IPs are NAT'ed to the instance. Currently only NO_NAT (default value) is supported.", "enum": [ - "COMPATIBLE", - "CUSTOM", - "MODERN", - "RESTRICTED" + "NO_NAT" ], "enumDescriptions": [ - "", - "", - "", "" ], "type": "string" @@ -39855,179 +45849,15 @@ "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, - "warnings": { - "description": "[Output Only] If potential misconfigurations are detected for this SSL policy, this field will be populated with warning messages.", - "items": { - "properties": { - "code": { - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DEPRECATED_TYPE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "EXPERIMENTAL_TYPE_USED", - "EXTERNAL_API_WARNING", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "MISSING_TYPE_DEPENDENCY", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SCHEMA_VALIDATION_IGNORED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNDECLARED_PROPERTIES", - "UNREACHABLE" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "data": { - "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", - "items": { - "properties": { - "key": { - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", - "type": "string" - }, - "value": { - "description": "[Output Only] A warning data value corresponding to the key.", - "type": "string" - } - }, - "type": "object" - }, - "type": "array" - }, - "message": { - "description": "[Output Only] A human-readable description of the warning code.", - "type": "string" - } - }, - "type": "object" - }, - "type": "array" - } - }, - "type": "object" - }, - "SslPolicyReference": { - "id": "SslPolicyReference", - "properties": { - "sslPolicy": { - "description": "URL of the SSL policy resource. Set this to empty string to clear any existing SSL policy associated with the target proxy resource.", - "type": "string" - } - }, - "type": "object" - }, - "Subnetwork": { - "description": "A Subnetwork resource. (== resource_for beta.subnetworks ==) (== resource_for v1.subnetworks ==)", - "id": "Subnetwork", - "properties": { - "allowSubnetCidrRoutesOverlap": { - "description": "Whether this subnetwork can conflict with static routes. Setting this to true allows this subnetwork's primary and secondary ranges to conflict with routes that have already been configured on the corresponding network. Static routes will take precedence over the subnetwork route if the route prefix length is at least as large as the subnetwork prefix length.\n\nAlso, packets destined to IPs within subnetwork may contain private/sensitive data and are prevented from leaving the virtual network. Setting this field to true will disable this feature.\n\nThe default value is false and applies to all existing subnetworks and automatically created subnetworks.\n\nThis field cannot be set to true at resource creation time.", - "type": "boolean" - }, - "creationTimestamp": { - "description": "[Output Only] Creation timestamp in RFC3339 text format.", - "type": "string" - }, - "description": { - "description": "An optional description of this resource. Provide this property when you create the resource. This field can be set only at resource creation time.", - "type": "string" - }, - "enableFlowLogs": { - "description": "Whether to enable flow logging for this subnetwork. If this field is not explicitly set, it will not appear in get listings. If not set the default behavior is to disable flow logging.", - "type": "boolean" - }, - "fingerprint": { - "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a Subnetwork. An up-to-date fingerprint must be provided in order to update the Subnetwork, otherwise the request will fail with error 412 conditionNotMet.\n\nTo see the latest fingerprint, make a get() request to retrieve a Subnetwork.", - "format": "byte", - "type": "string" - }, - "gatewayAddress": { - "description": "[Output Only] The gateway address for default routes to reach destination addresses outside this subnetwork.", - "type": "string" - }, - "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64", - "type": "string" - }, - "ipCidrRange": { - "description": "The range of internal addresses that are owned by this subnetwork. Provide this property when you create the subnetwork. For example, 10.0.0.0/8 or 192.168.0.0/16. Ranges must be unique and non-overlapping within a network. Only IPv4 is supported. This field can be set only at resource creation time.", - "type": "string" - }, - "kind": { - "default": "compute#subnetwork", - "description": "[Output Only] Type of the resource. Always compute#subnetwork for Subnetwork resources.", - "type": "string" - }, - "name": { - "description": "The name of the resource, provided by the client when initially creating the resource. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "type": "string" - }, - "network": { - "description": "The URL of the network to which this subnetwork belongs, provided by the client when initially creating the subnetwork. Only networks that are in the distributed mode can have subnetworks. This field can be set only at resource creation time.", - "type": "string" - }, - "privateIpGoogleAccess": { - "description": "Whether the VMs in this subnet can access Google services without assigned external IP addresses. This field can be both set at resource creation time and updated using setPrivateIpGoogleAccess.", - "type": "boolean" - }, - "region": { - "description": "URL of the region where the Subnetwork resides. This field can be set only at resource creation time.", - "type": "string" - }, - "secondaryIpRanges": { - "description": "An array of configurations for secondary IP ranges for VM instances contained in this subnetwork. The primary IP of such VM must belong to the primary ipCidrRange of the subnetwork. The alias IPs may belong to either primary or secondary ranges. This field can be updated with a patch request.", - "items": { - "$ref": "SubnetworkSecondaryRange" - }, - "type": "array" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for the resource.", + "zone": { + "description": "[Output Only] URL of the zone where the target instance resides. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", "type": "string" } }, "type": "object" }, - "SubnetworkAggregatedList": { - "id": "SubnetworkAggregatedList", + "TargetInstanceAggregatedList": { + "id": "TargetInstanceAggregatedList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", @@ -40035,15 +45865,15 @@ }, "items": { "additionalProperties": { - "$ref": "SubnetworksScopedList", - "description": "Name of the scope containing this set of Subnetworks." + "$ref": "TargetInstancesScopedList", + "description": "Name of the scope containing this set of target instances." }, - "description": "A list of SubnetworksScopedList resources.", + "description": "A list of TargetInstance resources.", "type": "object" }, "kind": { - "default": "compute#subnetworkAggregatedList", - "description": "[Output Only] Type of resource. Always compute#subnetworkAggregatedList for aggregated lists of subnetworks.", + "default": "compute#targetInstanceAggregatedList", + "description": "Type of resource.", "type": "string" }, "nextPageToken": { @@ -40138,24 +45968,24 @@ }, "type": "object" }, - "SubnetworkList": { - "description": "Contains a list of Subnetwork resources.", - "id": "SubnetworkList", + "TargetInstanceList": { + "description": "Contains a list of TargetInstance resources.", + "id": "TargetInstanceList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of Subnetwork resources.", + "description": "A list of TargetInstance resources.", "items": { - "$ref": "Subnetwork" + "$ref": "TargetInstance" }, "type": "array" }, "kind": { - "default": "compute#subnetworkList", - "description": "[Output Only] Type of resource. Always compute#subnetworkList for lists of subnetworks.", + "default": "compute#targetInstanceList", + "description": "Type of resource.", "type": "string" }, "nextPageToken": { @@ -40232,61 +46062,36 @@ "type": "string" }, "value": { - "description": "[Output Only] A warning data value corresponding to the key.", - "type": "string" - } - }, - "type": "object" - }, - "type": "array" - }, - "message": { - "description": "[Output Only] A human-readable description of the warning code.", - "type": "string" - } - }, - "type": "object" - } - }, - "type": "object" - }, - "SubnetworkSecondaryRange": { - "description": "Represents a secondary IP range of a subnetwork.", - "id": "SubnetworkSecondaryRange", - "properties": { - "ipCidrRange": { - "description": "The range of IP addresses belonging to this subnetwork secondary range. Provide this property when you create the subnetwork. Ranges must be unique and non-overlapping with all primary and secondary IP ranges within a network. Only IPv4 is supported.", - "type": "string" - }, - "rangeName": { - "description": "The name associated with this subnetwork secondary range, used when adding an alias IP range to a VM instance. The name must be 1-63 characters long, and comply with RFC1035. The name must be unique within the subnetwork.", - "type": "string" - } - }, - "type": "object" - }, - "SubnetworksExpandIpCidrRangeRequest": { - "id": "SubnetworksExpandIpCidrRangeRequest", - "properties": { - "ipCidrRange": { - "description": "The IP (in CIDR format or netmask) of internal addresses that are legal on this Subnetwork. This range should be disjoint from other subnetworks within this network. This range can only be larger than (i.e. a superset of) the range previously defined before the update.", - "type": "string" + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" } }, "type": "object" }, - "SubnetworksScopedList": { - "id": "SubnetworksScopedList", + "TargetInstancesScopedList": { + "id": "TargetInstancesScopedList", "properties": { - "subnetworks": { - "description": "A list of subnetworks contained in this scope.", + "targetInstances": { + "description": "A list of target instances contained in this scope.", "items": { - "$ref": "Subnetwork" + "$ref": "TargetInstance" }, "type": "array" }, "warning": { - "description": "An informational warning that appears when the list of addresses is empty.", + "description": "Informational warning which replaces the list of addresses when the list is empty.", "properties": { "code": { "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", @@ -40369,103 +46174,49 @@ }, "type": "object" }, - "SubnetworksSetPrivateIpGoogleAccessRequest": { - "id": "SubnetworksSetPrivateIpGoogleAccessRequest", - "properties": { - "privateIpGoogleAccess": { - "type": "boolean" - } - }, - "type": "object" - }, - "TCPHealthCheck": { - "id": "TCPHealthCheck", + "TargetPool": { + "description": "A TargetPool resource. This resource defines a pool of instances, an associated HttpHealthCheck resource, and the fallback target pool. (== resource_for beta.targetPools ==) (== resource_for v1.targetPools ==)", + "id": "TargetPool", "properties": { - "port": { - "description": "The TCP port number for the health check request. The default value is 80. Valid values are 1 through 65535.", - "format": "int32", - "type": "integer" - }, - "portName": { - "description": "Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence.", - "type": "string" - }, - "portSpecification": { - "description": "Specifies how port is selected for health checking, can be one of following values:\nUSE_FIXED_PORT: The port number in\nport\nis used for health checking.\nUSE_NAMED_PORT: The\nportName\nis used for health checking.\nUSE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking.\n\n\nIf not specified, TCP health check follows behavior specified in\nport\nand\nportName\nfields.", - "enum": [ - "USE_FIXED_PORT", - "USE_NAMED_PORT", - "USE_SERVING_PORT" - ], - "enumDescriptions": [ - "", - "", - "" - ], + "backupPool": { + "description": "This field is applicable only when the containing target pool is serving a forwarding rule as the primary pool, and its failoverRatio field is properly set to a value between [0, 1].\n\nbackupPool and failoverRatio together define the fallback behavior of the primary target pool: if the ratio of the healthy instances in the primary pool is at or below failoverRatio, traffic arriving at the load-balanced IP will be directed to the backup pool.\n\nIn case where failoverRatio and backupPool are not set, or all the instances in the backup pool are unhealthy, the traffic will be directed back to the primary pool in the \"force\" mode, where traffic will be spread to the healthy instances with the best effort, or to all instances when no instance is healthy.", "type": "string" }, - "proxyHeader": { - "description": "Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.", - "enum": [ - "NONE", - "PROXY_V1" - ], - "enumDescriptions": [ - "", - "" - ], + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" }, - "request": { - "description": "The application data to send once the TCP connection has been established (default value is empty). If both request and response are empty, the connection establishment alone will indicate health. The request data can only be ASCII.", + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", "type": "string" }, - "response": { - "description": "The bytes to match against the beginning of the response data. If left empty (the default value), any response will indicate health. The response data can only be ASCII.", - "type": "string" - } - }, - "type": "object" - }, - "Tags": { - "description": "A set of instance tags.", - "id": "Tags", - "properties": { - "fingerprint": { - "description": "Specifies a fingerprint for this request, which is essentially a hash of the tags' contents and used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update tags. You must always provide an up-to-date fingerprint hash in order to update or change tags.\n\nTo see the latest fingerprint, make get() request to the instance.", - "format": "byte", - "type": "string" + "failoverRatio": { + "description": "This field is applicable only when the containing target pool is serving a forwarding rule as the primary pool (i.e., not as a backup pool to some other target pool). The value of the field must be in [0, 1].\n\nIf set, backupPool must also be set. They together define the fallback behavior of the primary target pool: if the ratio of the healthy instances in the primary pool is at or below this number, traffic arriving at the load-balanced IP will be directed to the backup pool.\n\nIn case where failoverRatio is not set or all the instances in the backup pool are unhealthy, the traffic will be directed back to the primary pool in the \"force\" mode, where traffic will be spread to the healthy instances with the best effort, or to all instances when no instance is healthy.", + "format": "float", + "type": "number" }, - "items": { - "description": "An array of tags. Each tag must be 1-63 characters long, and comply with RFC1035.", + "healthChecks": { + "description": "The URL of the HttpHealthCheck resource. A member instance in this pool is considered healthy if and only if the health checks pass. An empty list means all member instances will be considered healthy at all times. Only HttpHealthChecks are supported. Only one health check may be specified.", "items": { "type": "string" }, "type": "array" - } - }, - "type": "object" - }, - "TargetHttpProxy": { - "description": "A TargetHttpProxy resource. This resource defines an HTTP proxy. (== resource_for beta.targetHttpProxies ==) (== resource_for v1.targetHttpProxies ==)", - "id": "TargetHttpProxy", - "properties": { - "creationTimestamp": { - "description": "[Output Only] Creation timestamp in RFC3339 text format.", - "type": "string" - }, - "description": { - "description": "An optional description of this resource. Provide this property when you create the resource.", - "type": "string" }, "id": { "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", "format": "uint64", "type": "string" }, + "instances": { + "description": "A list of resource URLs to the virtual machine instances serving this pool. They must live in zones contained in the same region as this pool.", + "items": { + "type": "string" + }, + "type": "array" + }, "kind": { - "default": "compute#targetHttpProxy", - "description": "[Output Only] Type of resource. Always compute#targetHttpProxy for target HTTP proxies.", + "default": "compute#targetPool", + "description": "[Output Only] Type of the resource. Always compute#targetPool for target pools.", "type": "string" }, "name": { @@ -40473,35 +46224,57 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, + "region": { + "description": "[Output Only] URL of the region where the target pool resides.", + "type": "string" + }, "selfLink": { "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, - "urlMap": { - "description": "URL to the UrlMap resource that defines the mapping from URL to the BackendService.", + "sessionAffinity": { + "description": "Session affinity option, must be one of the following values:\nNONE: Connections from the same client IP may go to any instance in the pool.\nCLIENT_IP: Connections from the same client IP will go to the same instance in the pool while that instance remains healthy.\nCLIENT_IP_PROTO: Connections from the same client IP with the same IP protocol will go to the same instance in the pool while that instance remains healthy.", + "enum": [ + "CLIENT_IP", + "CLIENT_IP_PORT_PROTO", + "CLIENT_IP_PROTO", + "GENERATED_COOKIE", + "HEADER_FIELD", + "HTTP_COOKIE", + "NONE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "" + ], "type": "string" } }, "type": "object" }, - "TargetHttpProxyList": { - "description": "A list of TargetHttpProxy resources.", - "id": "TargetHttpProxyList", + "TargetPoolAggregatedList": { + "id": "TargetPoolAggregatedList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of TargetHttpProxy resources.", - "items": { - "$ref": "TargetHttpProxy" + "additionalProperties": { + "$ref": "TargetPoolsScopedList", + "description": "Name of the scope containing this set of target pools." }, - "type": "array" + "description": "A list of TargetPool resources.", + "type": "object" }, "kind": { - "default": "compute#targetHttpProxyList", - "description": "Type of resource. Always compute#targetHttpProxyList for lists of target HTTP proxies.", + "default": "compute#targetPoolAggregatedList", + "description": "[Output Only] Type of resource. Always compute#targetPoolAggregatedList for aggregated lists of target pools.", "type": "string" }, "nextPageToken": { @@ -40596,120 +46369,41 @@ }, "type": "object" }, - "TargetHttpsProxiesSetQuicOverrideRequest": { - "id": "TargetHttpsProxiesSetQuicOverrideRequest", - "properties": { - "quicOverride": { - "description": "QUIC policy for the TargetHttpsProxy resource.", - "enum": [ - "DISABLE", - "ENABLE", - "NONE" - ], - "enumDescriptions": [ - "", - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "TargetHttpsProxiesSetSslCertificatesRequest": { - "id": "TargetHttpsProxiesSetSslCertificatesRequest", + "TargetPoolInstanceHealth": { + "id": "TargetPoolInstanceHealth", "properties": { - "sslCertificates": { - "description": "New set of SslCertificate resources to associate with this TargetHttpsProxy resource. Currently exactly one SslCertificate resource must be specified.", + "healthStatus": { "items": { - "type": "string" + "$ref": "HealthStatus" }, "type": "array" - } - }, - "type": "object" - }, - "TargetHttpsProxy": { - "description": "A TargetHttpsProxy resource. This resource defines an HTTPS proxy. (== resource_for beta.targetHttpsProxies ==) (== resource_for v1.targetHttpsProxies ==)", - "id": "TargetHttpsProxy", - "properties": { - "creationTimestamp": { - "description": "[Output Only] Creation timestamp in RFC3339 text format.", - "type": "string" - }, - "description": { - "description": "An optional description of this resource. Provide this property when you create the resource.", - "type": "string" - }, - "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64", - "type": "string" }, "kind": { - "default": "compute#targetHttpsProxy", - "description": "[Output Only] Type of resource. Always compute#targetHttpsProxy for target HTTPS proxies.", - "type": "string" - }, - "name": { - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "type": "string" - }, - "quicOverride": { - "description": "Specifies the QUIC override policy for this TargetHttpsProxy resource. This determines whether the load balancer will attempt to negotiate QUIC with clients or not. Can specify one of NONE, ENABLE, or DISABLE. Specify ENABLE to always enable QUIC, Enables QUIC when set to ENABLE, and disables QUIC when set to DISABLE. If NONE is specified, uses the QUIC policy with no user overrides, which is equivalent to DISABLE. Not specifying this field is equivalent to specifying NONE.", - "enum": [ - "DISABLE", - "ENABLE", - "NONE" - ], - "enumDescriptions": [ - "", - "", - "" - ], - "type": "string" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for the resource.", - "type": "string" - }, - "sslCertificates": { - "description": "URLs to SslCertificate resources that are used to authenticate connections between users and the load balancer. At least one SSL certificate must be specified. Currently, you may specify up to 15 SSL certificates.", - "items": { - "type": "string" - }, - "type": "array" - }, - "sslPolicy": { - "description": "URL of SslPolicy resource that will be associated with the TargetHttpsProxy resource. If not set, the TargetHttpsProxy resource will not have any SSL policy configured.", - "type": "string" - }, - "urlMap": { - "description": "A fully-qualified or valid partial URL to the UrlMap resource that defines the mapping from URL to the BackendService. For example, the following are all valid URLs for specifying a URL map: \n- https://www.googleapis.compute/v1/projects/project/global/urlMaps/url-map \n- projects/project/global/urlMaps/url-map \n- global/urlMaps/url-map", + "default": "compute#targetPoolInstanceHealth", + "description": "[Output Only] Type of resource. Always compute#targetPoolInstanceHealth when checking the health of an instance.", "type": "string" } }, "type": "object" }, - "TargetHttpsProxyList": { - "description": "Contains a list of TargetHttpsProxy resources.", - "id": "TargetHttpsProxyList", + "TargetPoolList": { + "description": "Contains a list of TargetPool resources.", + "id": "TargetPoolList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of TargetHttpsProxy resources.", + "description": "A list of TargetPool resources.", "items": { - "$ref": "TargetHttpsProxy" + "$ref": "TargetPool" }, "type": "array" }, "kind": { - "default": "compute#targetHttpsProxyList", - "description": "Type of resource. Always compute#targetHttpsProxyList for lists of target HTTPS proxies.", + "default": "compute#targetPoolList", + "description": "[Output Only] Type of resource. Always compute#targetPoolList for lists of target pools.", "type": "string" }, "nextPageToken": { @@ -40804,88 +46498,70 @@ }, "type": "object" }, - "TargetInstance": { - "description": "A TargetInstance resource. This resource defines an endpoint instance that terminates traffic of certain protocols. (== resource_for beta.targetInstances ==) (== resource_for v1.targetInstances ==)", - "id": "TargetInstance", + "TargetPoolsAddHealthCheckRequest": { + "id": "TargetPoolsAddHealthCheckRequest", "properties": { - "creationTimestamp": { - "description": "[Output Only] Creation timestamp in RFC3339 text format.", - "type": "string" - }, - "description": { - "description": "An optional description of this resource. Provide this property when you create the resource.", - "type": "string" - }, - "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64", - "type": "string" - }, - "instance": { - "description": "A URL to the virtual machine instance that handles traffic for this target instance. When creating a target instance, you can provide the fully-qualified URL or a valid partial URL to the desired virtual machine. For example, the following are all valid URLs: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/instance \n- projects/project/zones/zone/instances/instance \n- zones/zone/instances/instance", - "type": "string" - }, - "kind": { - "default": "compute#targetInstance", - "description": "[Output Only] The type of the resource. Always compute#targetInstance for target instances.", - "type": "string" - }, - "name": { - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "type": "string" - }, - "natPolicy": { - "description": "NAT option controlling how IPs are NAT'ed to the instance. Currently only NO_NAT (default value) is supported.", - "enum": [ - "NO_NAT" - ], - "enumDescriptions": [ - "" - ], - "type": "string" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for the resource.", - "type": "string" - }, - "zone": { - "description": "[Output Only] URL of the zone where the target instance resides. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", - "type": "string" + "healthChecks": { + "description": "The HttpHealthCheck to add to the target pool.", + "items": { + "$ref": "HealthCheckReference" + }, + "type": "array" } }, "type": "object" }, - "TargetInstanceAggregatedList": { - "id": "TargetInstanceAggregatedList", + "TargetPoolsAddInstanceRequest": { + "id": "TargetPoolsAddInstanceRequest", "properties": { - "id": { - "description": "[Output Only] Unique identifier for the resource; defined by the server.", - "type": "string" - }, - "items": { - "additionalProperties": { - "$ref": "TargetInstancesScopedList", - "description": "Name of the scope containing this set of target instances." + "instances": { + "description": "A full or partial URL to an instance to add to this target pool. This can be a full or partial URL. For example, the following are valid URLs: \n- https://www.googleapis.com/compute/v1/projects/project-id/zones/zone/instances/instance-name \n- projects/project-id/zones/zone/instances/instance-name \n- zones/zone/instances/instance-name", + "items": { + "$ref": "InstanceReference" }, - "description": "A list of TargetInstance resources.", - "type": "object" - }, - "kind": { - "default": "compute#targetInstanceAggregatedList", - "description": "Type of resource.", - "type": "string" - }, - "nextPageToken": { - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", - "type": "string" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for this resource.", - "type": "string" + "type": "array" + } + }, + "type": "object" + }, + "TargetPoolsRemoveHealthCheckRequest": { + "id": "TargetPoolsRemoveHealthCheckRequest", + "properties": { + "healthChecks": { + "description": "Health check URL to be removed. This can be a full or valid partial URL. For example, the following are valid URLs: \n- https://www.googleapis.com/compute/beta/projects/project/global/httpHealthChecks/health-check \n- projects/project/global/httpHealthChecks/health-check \n- global/httpHealthChecks/health-check", + "items": { + "$ref": "HealthCheckReference" + }, + "type": "array" + } + }, + "type": "object" + }, + "TargetPoolsRemoveInstanceRequest": { + "id": "TargetPoolsRemoveInstanceRequest", + "properties": { + "instances": { + "description": "URLs of the instances to be removed from target pool.", + "items": { + "$ref": "InstanceReference" + }, + "type": "array" + } + }, + "type": "object" + }, + "TargetPoolsScopedList": { + "id": "TargetPoolsScopedList", + "properties": { + "targetPools": { + "description": "A list of target pools contained in this scope.", + "items": { + "$ref": "TargetPool" + }, + "type": "array" }, "warning": { - "description": "[Output Only] Informational warning message.", + "description": "Informational warning which replaces the list of addresses when the list is empty.", "properties": { "code": { "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", @@ -40968,23 +46644,134 @@ }, "type": "object" }, - "TargetInstanceList": { - "description": "Contains a list of TargetInstance resources.", - "id": "TargetInstanceList", + "TargetReference": { + "id": "TargetReference", + "properties": { + "target": { + "type": "string" + } + }, + "type": "object" + }, + "TargetSslProxiesSetBackendServiceRequest": { + "id": "TargetSslProxiesSetBackendServiceRequest", + "properties": { + "service": { + "description": "The URL of the new BackendService resource for the targetSslProxy.", + "type": "string" + } + }, + "type": "object" + }, + "TargetSslProxiesSetProxyHeaderRequest": { + "id": "TargetSslProxiesSetProxyHeaderRequest", + "properties": { + "proxyHeader": { + "description": "The new type of proxy header to append before sending data to the backend. NONE or PROXY_V1 are allowed.", + "enum": [ + "NONE", + "PROXY_V1" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "TargetSslProxiesSetSslCertificatesRequest": { + "id": "TargetSslProxiesSetSslCertificatesRequest", + "properties": { + "sslCertificates": { + "description": "New set of URLs to SslCertificate resources to associate with this TargetSslProxy. Currently exactly one ssl certificate must be specified.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "TargetSslProxy": { + "description": "A TargetSslProxy resource. This resource defines an SSL proxy. (== resource_for beta.targetSslProxies ==) (== resource_for v1.targetSslProxies ==)", + "id": "TargetSslProxy", + "properties": { + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", + "type": "string" + }, + "kind": { + "default": "compute#targetSslProxy", + "description": "[Output Only] Type of the resource. Always compute#targetSslProxy for target SSL proxies.", + "type": "string" + }, + "name": { + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "proxyHeader": { + "description": "Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.", + "enum": [ + "NONE", + "PROXY_V1" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" + }, + "service": { + "description": "URL to the BackendService resource.", + "type": "string" + }, + "sslCertificates": { + "description": "URLs to SslCertificate resources that are used to authenticate connections to Backends. At least one SSL certificate must be specified. Currently, you may specify up to 15 SSL certificates.", + "items": { + "type": "string" + }, + "type": "array" + }, + "sslPolicy": { + "description": "URL of SslPolicy resource that will be associated with the TargetSslProxy resource. If not set, the TargetSslProxy resource will not have any SSL policy configured.", + "type": "string" + } + }, + "type": "object" + }, + "TargetSslProxyList": { + "description": "Contains a list of TargetSslProxy resources.", + "id": "TargetSslProxyList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of TargetInstance resources.", + "description": "A list of TargetSslProxy resources.", "items": { - "$ref": "TargetInstance" + "$ref": "TargetSslProxy" }, "type": "array" }, "kind": { - "default": "compute#targetInstanceList", + "default": "compute#targetSslProxyList", "description": "Type of resource.", "type": "string" }, @@ -41080,18 +46867,114 @@ }, "type": "object" }, - "TargetInstancesScopedList": { - "id": "TargetInstancesScopedList", + "TargetTcpProxiesSetBackendServiceRequest": { + "id": "TargetTcpProxiesSetBackendServiceRequest", "properties": { - "targetInstances": { - "description": "A list of target instances contained in this scope.", + "service": { + "description": "The URL of the new BackendService resource for the targetTcpProxy.", + "type": "string" + } + }, + "type": "object" + }, + "TargetTcpProxiesSetProxyHeaderRequest": { + "id": "TargetTcpProxiesSetProxyHeaderRequest", + "properties": { + "proxyHeader": { + "description": "The new type of proxy header to append before sending data to the backend. NONE or PROXY_V1 are allowed.", + "enum": [ + "NONE", + "PROXY_V1" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "TargetTcpProxy": { + "description": "A TargetTcpProxy resource. This resource defines a TCP proxy. (== resource_for beta.targetTcpProxies ==) (== resource_for v1.targetTcpProxies ==)", + "id": "TargetTcpProxy", + "properties": { + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", + "type": "string" + }, + "kind": { + "default": "compute#targetTcpProxy", + "description": "[Output Only] Type of the resource. Always compute#targetTcpProxy for target TCP proxies.", + "type": "string" + }, + "name": { + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "proxyHeader": { + "description": "Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.", + "enum": [ + "NONE", + "PROXY_V1" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" + }, + "service": { + "description": "URL to the BackendService resource.", + "type": "string" + } + }, + "type": "object" + }, + "TargetTcpProxyList": { + "description": "Contains a list of TargetTcpProxy resources.", + "id": "TargetTcpProxyList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "description": "A list of TargetTcpProxy resources.", "items": { - "$ref": "TargetInstance" + "$ref": "TargetTcpProxy" }, "type": "array" }, + "kind": { + "default": "compute#targetTcpProxyList", + "description": "Type of resource.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, "warning": { - "description": "Informational warning which replaces the list of addresses when the list is empty.", + "description": "[Output Only] Informational warning message.", "properties": { "code": { "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", @@ -41174,14 +47057,10 @@ }, "type": "object" }, - "TargetPool": { - "description": "A TargetPool resource. This resource defines a pool of instances, an associated HttpHealthCheck resource, and the fallback target pool. (== resource_for beta.targetPools ==) (== resource_for v1.targetPools ==)", - "id": "TargetPool", + "TargetVpnGateway": { + "description": "Represents a Target VPN gateway resource. (== resource_for beta.targetVpnGateways ==) (== resource_for v1.targetVpnGateways ==)", + "id": "TargetVpnGateway", "properties": { - "backupPool": { - "description": "This field is applicable only when the containing target pool is serving a forwarding rule as the primary pool, and its failoverRatio field is properly set to a value between [0, 1].\n\nbackupPool and failoverRatio together define the fallback behavior of the primary target pool: if the ratio of the healthy instances in the primary pool is at or below failoverRatio, traffic arriving at the load-balanced IP will be directed to the backup pool.\n\nIn case where failoverRatio and backupPool are not set, or all the instances in the backup pool are unhealthy, the traffic will be directed back to the primary pool in the \"force\" mode, where traffic will be spread to the healthy instances with the best effort, or to all instances when no instance is healthy.", - "type": "string" - }, "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" @@ -41190,13 +47069,8 @@ "description": "An optional description of this resource. Provide this property when you create the resource.", "type": "string" }, - "failoverRatio": { - "description": "This field is applicable only when the containing target pool is serving a forwarding rule as the primary pool (i.e., not as a backup pool to some other target pool). The value of the field must be in [0, 1].\n\nIf set, backupPool must also be set. They together define the fallback behavior of the primary target pool: if the ratio of the healthy instances in the primary pool is at or below this number, traffic arriving at the load-balanced IP will be directed to the backup pool.\n\nIn case where failoverRatio is not set or all the instances in the backup pool are unhealthy, the traffic will be directed back to the primary pool in the \"force\" mode, where traffic will be spread to the healthy instances with the best effort, or to all instances when no instance is healthy.", - "format": "float", - "type": "number" - }, - "healthChecks": { - "description": "The URL of the HttpHealthCheck resource. A member instance in this pool is considered healthy if and only if the health checks pass. An empty list means all member instances will be considered healthy at all times. Only HttpHealthChecks are supported. Only one health check may be specified.", + "forwardingRules": { + "description": "[Output Only] A list of URLs to the ForwardingRule resources. ForwardingRules are created using compute.forwardingRules.insert and associated with a VPN gateway.", "items": { "type": "string" }, @@ -41207,54 +47081,78 @@ "format": "uint64", "type": "string" }, - "instances": { - "description": "A list of resource URLs to the virtual machine instances serving this pool. They must live in zones contained in the same region as this pool.", - "items": { - "type": "string" - }, - "type": "array" - }, "kind": { - "default": "compute#targetPool", - "description": "[Output Only] Type of the resource. Always compute#targetPool for target pools.", + "default": "compute#targetVpnGateway", + "description": "[Output Only] Type of resource. Always compute#targetVpnGateway for target VPN gateways.", + "type": "string" + }, + "labelFingerprint": { + "description": "A fingerprint for the labels being applied to this TargetVpnGateway, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet.\n\nTo see the latest fingerprint, make a get() request to retrieve a TargetVpnGateway.", + "format": "byte", "type": "string" }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Labels to apply to this TargetVpnGateway resource. These can be later modified by the setLabels method. Each label key/value must comply with RFC1035. Label values may be empty.", + "type": "object" + }, "name": { + "annotations": { + "required": [ + "compute.targetVpnGateways.insert" + ] + }, "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, + "network": { + "annotations": { + "required": [ + "compute.targetVpnGateways.insert" + ] + }, + "description": "URL of the network to which this VPN gateway is attached. Provided by the client when the VPN gateway is created.", + "type": "string" + }, "region": { - "description": "[Output Only] URL of the region where the target pool resides.", + "description": "[Output Only] URL of the region where the target VPN gateway resides. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", "type": "string" }, "selfLink": { "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, - "sessionAffinity": { - "description": "Session affinity option, must be one of the following values:\nNONE: Connections from the same client IP may go to any instance in the pool.\nCLIENT_IP: Connections from the same client IP will go to the same instance in the pool while that instance remains healthy.\nCLIENT_IP_PROTO: Connections from the same client IP with the same IP protocol will go to the same instance in the pool while that instance remains healthy.", + "status": { + "description": "[Output Only] The status of the VPN gateway, which can be one of the following: CREATING, READY, FAILED, or DELETING.", "enum": [ - "CLIENT_IP", - "CLIENT_IP_PORT_PROTO", - "CLIENT_IP_PROTO", - "GENERATED_COOKIE", - "NONE" + "CREATING", + "DELETING", + "FAILED", + "READY" ], "enumDescriptions": [ "", "", "", - "", "" ], "type": "string" + }, + "tunnels": { + "description": "[Output Only] A list of URLs to VpnTunnel resources. VpnTunnels are created using the compute.vpntunnels.insert method and associated with a VPN gateway.", + "items": { + "type": "string" + }, + "type": "array" } }, "type": "object" }, - "TargetPoolAggregatedList": { - "id": "TargetPoolAggregatedList", + "TargetVpnGatewayAggregatedList": { + "id": "TargetVpnGatewayAggregatedList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", @@ -41262,15 +47160,15 @@ }, "items": { "additionalProperties": { - "$ref": "TargetPoolsScopedList", - "description": "Name of the scope containing this set of target pools." + "$ref": "TargetVpnGatewaysScopedList", + "description": "[Output Only] Name of the scope containing this set of target VPN gateways." }, - "description": "A list of TargetPool resources.", + "description": "A list of TargetVpnGateway resources.", "type": "object" }, "kind": { - "default": "compute#targetPoolAggregatedList", - "description": "[Output Only] Type of resource. Always compute#targetPoolAggregatedList for aggregated lists of target pools.", + "default": "compute#targetVpnGatewayAggregatedList", + "description": "[Output Only] Type of resource. Always compute#targetVpnGateway for target VPN gateways.", "type": "string" }, "nextPageToken": { @@ -41365,41 +47263,24 @@ }, "type": "object" }, - "TargetPoolInstanceHealth": { - "id": "TargetPoolInstanceHealth", - "properties": { - "healthStatus": { - "items": { - "$ref": "HealthStatus" - }, - "type": "array" - }, - "kind": { - "default": "compute#targetPoolInstanceHealth", - "description": "[Output Only] Type of resource. Always compute#targetPoolInstanceHealth when checking the health of an instance.", - "type": "string" - } - }, - "type": "object" - }, - "TargetPoolList": { - "description": "Contains a list of TargetPool resources.", - "id": "TargetPoolList", + "TargetVpnGatewayList": { + "description": "Contains a list of TargetVpnGateway resources.", + "id": "TargetVpnGatewayList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of TargetPool resources.", + "description": "A list of TargetVpnGateway resources.", "items": { - "$ref": "TargetPool" + "$ref": "TargetVpnGateway" }, "type": "array" }, "kind": { - "default": "compute#targetPoolList", - "description": "[Output Only] Type of resource. Always compute#targetPoolList for lists of target pools.", + "default": "compute#targetVpnGatewayList", + "description": "[Output Only] Type of resource. Always compute#targetVpnGateway for target VPN gateways.", "type": "string" }, "nextPageToken": { @@ -41494,70 +47375,18 @@ }, "type": "object" }, - "TargetPoolsAddHealthCheckRequest": { - "id": "TargetPoolsAddHealthCheckRequest", - "properties": { - "healthChecks": { - "description": "The HttpHealthCheck to add to the target pool.", - "items": { - "$ref": "HealthCheckReference" - }, - "type": "array" - } - }, - "type": "object" - }, - "TargetPoolsAddInstanceRequest": { - "id": "TargetPoolsAddInstanceRequest", - "properties": { - "instances": { - "description": "A full or partial URL to an instance to add to this target pool. This can be a full or partial URL. For example, the following are valid URLs: \n- https://www.googleapis.com/compute/v1/projects/project-id/zones/zone/instances/instance-name \n- projects/project-id/zones/zone/instances/instance-name \n- zones/zone/instances/instance-name", - "items": { - "$ref": "InstanceReference" - }, - "type": "array" - } - }, - "type": "object" - }, - "TargetPoolsRemoveHealthCheckRequest": { - "id": "TargetPoolsRemoveHealthCheckRequest", - "properties": { - "healthChecks": { - "description": "Health check URL to be removed. This can be a full or valid partial URL. For example, the following are valid URLs: \n- https://www.googleapis.com/compute/beta/projects/project/global/httpHealthChecks/health-check \n- projects/project/global/httpHealthChecks/health-check \n- global/httpHealthChecks/health-check", - "items": { - "$ref": "HealthCheckReference" - }, - "type": "array" - } - }, - "type": "object" - }, - "TargetPoolsRemoveInstanceRequest": { - "id": "TargetPoolsRemoveInstanceRequest", - "properties": { - "instances": { - "description": "URLs of the instances to be removed from target pool.", - "items": { - "$ref": "InstanceReference" - }, - "type": "array" - } - }, - "type": "object" - }, - "TargetPoolsScopedList": { - "id": "TargetPoolsScopedList", + "TargetVpnGatewaysScopedList": { + "id": "TargetVpnGatewaysScopedList", "properties": { - "targetPools": { - "description": "A list of target pools contained in this scope.", + "targetVpnGateways": { + "description": "[Output Only] A list of target VPN gateways contained in this scope.", "items": { - "$ref": "TargetPool" + "$ref": "TargetVpnGateway" }, "type": "array" }, "warning": { - "description": "Informational warning which replaces the list of addresses when the list is empty.", + "description": "[Output Only] Informational warning which replaces the list of addresses when the list is empty.", "properties": { "code": { "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", @@ -41640,48 +47469,42 @@ }, "type": "object" }, - "TargetReference": { - "id": "TargetReference", + "TestFailure": { + "id": "TestFailure", "properties": { - "target": { + "actualService": { "type": "string" - } - }, - "type": "object" - }, - "TargetSslProxiesSetBackendServiceRequest": { - "id": "TargetSslProxiesSetBackendServiceRequest", - "properties": { - "service": { - "description": "The URL of the new BackendService resource for the targetSslProxy.", + }, + "expectedService": { + "type": "string" + }, + "host": { + "type": "string" + }, + "path": { "type": "string" } }, "type": "object" }, - "TargetSslProxiesSetProxyHeaderRequest": { - "id": "TargetSslProxiesSetProxyHeaderRequest", + "TestPermissionsRequest": { + "id": "TestPermissionsRequest", "properties": { - "proxyHeader": { - "description": "The new type of proxy header to append before sending data to the backend. NONE or PROXY_V1 are allowed.", - "enum": [ - "NONE", - "PROXY_V1" - ], - "enumDescriptions": [ - "", - "" - ], - "type": "string" + "permissions": { + "description": "The set of permissions to check for the 'resource'. Permissions with wildcards (such as '*' or 'storage.*') are not allowed.", + "items": { + "type": "string" + }, + "type": "array" } }, "type": "object" }, - "TargetSslProxiesSetSslCertificatesRequest": { - "id": "TargetSslProxiesSetSslCertificatesRequest", + "TestPermissionsResponse": { + "id": "TestPermissionsResponse", "properties": { - "sslCertificates": { - "description": "New set of URLs to SslCertificate resources to associate with this TargetSslProxy. Currently exactly one ssl certificate must be specified.", + "permissions": { + "description": "A subset of `TestPermissionsRequest.permissions` that the caller is allowed.", "items": { "type": "string" }, @@ -41690,26 +47513,54 @@ }, "type": "object" }, - "TargetSslProxy": { - "description": "A TargetSslProxy resource. This resource defines an SSL proxy. (== resource_for beta.targetSslProxies ==) (== resource_for v1.targetSslProxies ==)", - "id": "TargetSslProxy", + "UrlMap": { + "description": "A UrlMap resource. This resource defines the mapping from URL to the BackendService resource, based on the \"longest-match\" of the URL's host and path.", + "id": "UrlMap", "properties": { "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" }, + "defaultRouteAction": { + "$ref": "HttpRouteAction", + "description": "defaultRouteAction takes effect when none of the hostRules match. The load balancer performs advanced routing actions like URL rewrites, header transformations, etc. prior to forwarding the request to the selected backend. If defaultRouteAction specifies any weightedBackendServices, defaultService must not be set. Conversely if defaultService is set, defaultRouteAction cannot contain any weightedBackendServices.\nOnly one of defaultRouteAction or defaultUrlRedirect must be set." + }, + "defaultService": { + "description": "The full or partial URL of the defaultService resource to which traffic is directed if none of the hostRules match. If defaultRouteAction is additionally specified, advanced routing actions like URL Rewrites, etc. take effect prior to sending the request to the backend. However, if defaultService is specified, defaultRouteAction cannot contain any weightedBackendServices. Conversely, if routeAction specifies any weightedBackendServices, service must not be specified.\nOnly one of defaultService, defaultUrlRedirect or defaultRouteAction.weightedBackendService must be set.", + "type": "string" + }, + "defaultUrlRedirect": { + "$ref": "HttpRedirectAction", + "description": "When none of the specified hostRules match, the request is redirected to a URL specified by defaultUrlRedirect.\nIf defaultUrlRedirect is specified, defaultService or defaultRouteAction must not be set." + }, "description": { "description": "An optional description of this resource. Provide this property when you create the resource.", "type": "string" }, + "fingerprint": { + "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a UrlMap. An up-to-date fingerprint must be provided in order to update the UrlMap, otherwise the request will fail with error 412 conditionNotMet.\n\nTo see the latest fingerprint, make a get() request to retrieve a UrlMap.", + "format": "byte", + "type": "string" + }, + "headerAction": { + "$ref": "HttpHeaderAction", + "description": "Specifies changes to request and response headers that need to take effect for the selected backendService.\nThe headerAction specified here take effect after headerAction specified under pathMatcher." + }, + "hostRules": { + "description": "The list of HostRules to use against the URL.", + "items": { + "$ref": "HostRule" + }, + "type": "array" + }, "id": { "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", "format": "uint64", "type": "string" }, "kind": { - "default": "compute#targetSslProxy", - "description": "[Output Only] Type of the resource. Always compute#targetSslProxy for target SSL proxies.", + "default": "compute#urlMap", + "description": "[Output Only] Type of the resource. Always compute#urlMaps for url maps.", "type": "string" }, "name": { @@ -41717,57 +47568,48 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, - "proxyHeader": { - "description": "Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.", - "enum": [ - "NONE", - "PROXY_V1" - ], - "enumDescriptions": [ - "", - "" - ], + "pathMatchers": { + "description": "The list of named PathMatchers to use against the URL.", + "items": { + "$ref": "PathMatcher" + }, + "type": "array" + }, + "region": { + "description": "[Output Only] URL of the region where the regional URL map resides. This field is not applicable to global URL maps. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", "type": "string" }, "selfLink": { "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, - "service": { - "description": "URL to the BackendService resource.", - "type": "string" - }, - "sslCertificates": { - "description": "URLs to SslCertificate resources that are used to authenticate connections to Backends. At least one SSL certificate must be specified. Currently, you may specify up to 15 SSL certificates.", + "tests": { + "description": "The list of expected URL mapping tests. Request to update this UrlMap will succeed only if all of the test cases pass. You can specify a maximum of 100 tests per UrlMap.", "items": { - "type": "string" + "$ref": "UrlMapTest" }, "type": "array" - }, - "sslPolicy": { - "description": "URL of SslPolicy resource that will be associated with the TargetSslProxy resource. If not set, the TargetSslProxy resource will not have any SSL policy configured.", - "type": "string" } }, "type": "object" }, - "TargetSslProxyList": { - "description": "Contains a list of TargetSslProxy resources.", - "id": "TargetSslProxyList", + "UrlMapList": { + "description": "Contains a list of UrlMap resources.", + "id": "UrlMapList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of TargetSslProxy resources.", + "description": "A list of UrlMap resources.", "items": { - "$ref": "TargetSslProxy" + "$ref": "UrlMap" }, "type": "array" }, "kind": { - "default": "compute#targetSslProxyList", + "default": "compute#urlMapList", "description": "Type of resource.", "type": "string" }, @@ -41863,101 +47705,82 @@ }, "type": "object" }, - "TargetTcpProxiesSetBackendServiceRequest": { - "id": "TargetTcpProxiesSetBackendServiceRequest", - "properties": { - "service": { - "description": "The URL of the new BackendService resource for the targetTcpProxy.", - "type": "string" - } - }, - "type": "object" - }, - "TargetTcpProxiesSetProxyHeaderRequest": { - "id": "TargetTcpProxiesSetProxyHeaderRequest", + "UrlMapReference": { + "id": "UrlMapReference", "properties": { - "proxyHeader": { - "description": "The new type of proxy header to append before sending data to the backend. NONE or PROXY_V1 are allowed.", - "enum": [ - "NONE", - "PROXY_V1" - ], - "enumDescriptions": [ - "", - "" - ], + "urlMap": { "type": "string" } }, "type": "object" }, - "TargetTcpProxy": { - "description": "A TargetTcpProxy resource. This resource defines a TCP proxy. (== resource_for beta.targetTcpProxies ==) (== resource_for v1.targetTcpProxies ==)", - "id": "TargetTcpProxy", + "UrlMapTest": { + "description": "Message for the expected URL mappings.", + "id": "UrlMapTest", "properties": { - "creationTimestamp": { - "description": "[Output Only] Creation timestamp in RFC3339 text format.", - "type": "string" - }, "description": { - "description": "An optional description of this resource. Provide this property when you create the resource.", - "type": "string" - }, - "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64", + "description": "Description of this test case.", "type": "string" }, - "kind": { - "default": "compute#targetTcpProxy", - "description": "[Output Only] Type of the resource. Always compute#targetTcpProxy for target TCP proxies.", + "host": { + "description": "Host portion of the URL.", "type": "string" }, - "name": { - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "path": { + "description": "Path portion of the URL.", "type": "string" }, - "proxyHeader": { - "description": "Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.", - "enum": [ - "NONE", - "PROXY_V1" - ], - "enumDescriptions": [ - "", - "" - ], + "service": { + "description": "Expected BackendService resource the given URL should be mapped to.", "type": "string" + } + }, + "type": "object" + }, + "UrlMapValidationResult": { + "description": "Message representing the validation result for a UrlMap.", + "id": "UrlMapValidationResult", + "properties": { + "loadErrors": { + "items": { + "type": "string" + }, + "type": "array" }, - "selfLink": { - "description": "[Output Only] Server-defined URL for the resource.", - "type": "string" + "loadSucceeded": { + "description": "Whether the given UrlMap can be successfully loaded. If false, 'loadErrors' indicates the reasons.", + "type": "boolean" }, - "service": { - "description": "URL to the BackendService resource.", - "type": "string" + "testFailures": { + "items": { + "$ref": "TestFailure" + }, + "type": "array" + }, + "testPassed": { + "description": "If successfully loaded, this field indicates whether the test passed. If false, 'testFailures's indicate the reason of failure.", + "type": "boolean" } }, "type": "object" }, - "TargetTcpProxyList": { - "description": "Contains a list of TargetTcpProxy resources.", - "id": "TargetTcpProxyList", + "UrlMapsAggregatedList": { + "id": "UrlMapsAggregatedList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of TargetTcpProxy resources.", - "items": { - "$ref": "TargetTcpProxy" + "additionalProperties": { + "$ref": "UrlMapsScopedList", + "description": "Name of the scope containing this set of UrlMaps." }, - "type": "array" + "description": "A list of UrlMapsScopedList resources.", + "type": "object" }, "kind": { - "default": "compute#targetTcpProxyList", + "default": "compute#urlMapsAggregatedList", "description": "Type of resource.", "type": "string" }, @@ -42053,130 +47876,18 @@ }, "type": "object" }, - "TargetVpnGateway": { - "description": "Represents a Target VPN gateway resource. (== resource_for beta.targetVpnGateways ==) (== resource_for v1.targetVpnGateways ==)", - "id": "TargetVpnGateway", + "UrlMapsScopedList": { + "id": "UrlMapsScopedList", "properties": { - "creationTimestamp": { - "description": "[Output Only] Creation timestamp in RFC3339 text format.", - "type": "string" - }, - "description": { - "description": "An optional description of this resource. Provide this property when you create the resource.", - "type": "string" - }, - "forwardingRules": { - "description": "[Output Only] A list of URLs to the ForwardingRule resources. ForwardingRules are created using compute.forwardingRules.insert and associated to a VPN gateway.", - "items": { - "type": "string" - }, - "type": "array" - }, - "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64", - "type": "string" - }, - "kind": { - "default": "compute#targetVpnGateway", - "description": "[Output Only] Type of resource. Always compute#targetVpnGateway for target VPN gateways.", - "type": "string" - }, - "labelFingerprint": { - "description": "A fingerprint for the labels being applied to this TargetVpnGateway, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet.\n\nTo see the latest fingerprint, make a get() request to retrieve a TargetVpnGateway.", - "format": "byte", - "type": "string" - }, - "labels": { - "additionalProperties": { - "type": "string" - }, - "description": "Labels to apply to this TargetVpnGateway resource. These can be later modified by the setLabels method. Each label key/value must comply with RFC1035. Label values may be empty.", - "type": "object" - }, - "name": { - "annotations": { - "required": [ - "compute.targetVpnGateways.insert" - ] - }, - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "type": "string" - }, - "network": { - "annotations": { - "required": [ - "compute.targetVpnGateways.insert" - ] - }, - "description": "URL of the network to which this VPN gateway is attached. Provided by the client when the VPN gateway is created.", - "type": "string" - }, - "region": { - "description": "[Output Only] URL of the region where the target VPN gateway resides. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", - "type": "string" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for the resource.", - "type": "string" - }, - "status": { - "description": "[Output Only] The status of the VPN gateway.", - "enum": [ - "CREATING", - "DELETING", - "FAILED", - "READY" - ], - "enumDescriptions": [ - "", - "", - "", - "" - ], - "type": "string" - }, - "tunnels": { - "description": "[Output Only] A list of URLs to VpnTunnel resources. VpnTunnels are created using compute.vpntunnels.insert method and associated to a VPN gateway.", + "urlMaps": { + "description": "A list of UrlMaps contained in this scope.", "items": { - "type": "string" + "$ref": "UrlMap" }, "type": "array" - } - }, - "type": "object" - }, - "TargetVpnGatewayAggregatedList": { - "id": "TargetVpnGatewayAggregatedList", - "properties": { - "id": { - "description": "[Output Only] Unique identifier for the resource; defined by the server.", - "type": "string" - }, - "items": { - "additionalProperties": { - "$ref": "TargetVpnGatewaysScopedList", - "description": "[Output Only] Name of the scope containing this set of target VPN gateways." - }, - "description": "A list of TargetVpnGateway resources.", - "type": "object" - }, - "kind": { - "default": "compute#targetVpnGatewayAggregatedList", - "description": "[Output Only] Type of resource. Always compute#targetVpnGateway for target VPN gateways.", - "type": "string" - }, - "nextPageToken": { - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", - "type": "string" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for this resource.", - "type": "string" }, "warning": { - "description": "[Output Only] Informational warning message.", + "description": "Informational warning which replaces the list of backend services when the list is empty.", "properties": { "code": { "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", @@ -42259,24 +47970,98 @@ }, "type": "object" }, - "TargetVpnGatewayList": { - "description": "Contains a list of TargetVpnGateway resources.", - "id": "TargetVpnGatewayList", + "UrlMapsValidateRequest": { + "id": "UrlMapsValidateRequest", + "properties": { + "resource": { + "$ref": "UrlMap", + "description": "Content of the UrlMap to be validated." + } + }, + "type": "object" + }, + "UrlMapsValidateResponse": { + "id": "UrlMapsValidateResponse", + "properties": { + "result": { + "$ref": "UrlMapValidationResult" + } + }, + "type": "object" + }, + "UrlRewrite": { + "description": "The spec for modifying the path before sending the request to the matched backend service.", + "id": "UrlRewrite", + "properties": { + "hostRewrite": { + "description": "Prior to forwarding the request to the selected service, the request's host header is replaced with contents of hostRewrite.\nThe value must be between 1 and 255 characters.", + "type": "string" + }, + "pathPrefixRewrite": { + "description": "Prior to forwarding the request to the selected backend service, the matching portion of the request's path is replaced by pathPrefixRewrite.\nThe value must be between 1 and 1024 characters.", + "type": "string" + } + }, + "type": "object" + }, + "UsableSubnetwork": { + "description": "Subnetwork which the current user has compute.subnetworks.use permission on.", + "id": "UsableSubnetwork", + "properties": { + "ipCidrRange": { + "description": "The range of internal addresses that are owned by this subnetwork.", + "type": "string" + }, + "network": { + "description": "Network URL.", + "type": "string" + }, + "secondaryIpRanges": { + "description": "Secondary IP ranges.", + "items": { + "$ref": "UsableSubnetworkSecondaryRange" + }, + "type": "array" + }, + "subnetwork": { + "description": "Subnetwork URL.", + "type": "string" + } + }, + "type": "object" + }, + "UsableSubnetworkSecondaryRange": { + "description": "Secondary IP range of a usable subnetwork.", + "id": "UsableSubnetworkSecondaryRange", + "properties": { + "ipCidrRange": { + "description": "The range of IP addresses belonging to this subnetwork secondary range.", + "type": "string" + }, + "rangeName": { + "description": "The name associated with this subnetwork secondary range, used when adding an alias IP range to a VM instance. The name must be 1-63 characters long, and comply with RFC1035. The name must be unique within the subnetwork.", + "type": "string" + } + }, + "type": "object" + }, + "UsableSubnetworksAggregatedList": { + "id": "UsableSubnetworksAggregatedList", "properties": { "id": { - "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", "type": "string" }, "items": { - "description": "A list of TargetVpnGateway resources.", + "description": "[Output] A list of usable subnetwork URLs.", "items": { - "$ref": "TargetVpnGateway" + "$ref": "UsableSubnetwork" }, "type": "array" }, "kind": { - "default": "compute#targetVpnGatewayList", - "description": "[Output Only] Type of resource. Always compute#targetVpnGateway for target VPN gateways.", + "default": "compute#usableSubnetworksAggregatedList", + "description": "[Output Only] Type of resource. Always compute#usableSubnetworksAggregatedList for aggregated lists of usable subnetworks.", "type": "string" }, "nextPageToken": { @@ -42371,18 +48156,95 @@ }, "type": "object" }, - "TargetVpnGatewaysScopedList": { - "id": "TargetVpnGatewaysScopedList", + "UsageExportLocation": { + "description": "The location in Cloud Storage and naming method of the daily usage report. Contains bucket_name and report_name prefix.", + "id": "UsageExportLocation", "properties": { - "targetVpnGateways": { - "description": "[Output Only] A list of target vpn gateways contained in this scope.", + "bucketName": { + "description": "The name of an existing bucket in Cloud Storage where the usage report object is stored. The Google Service Account is granted write access to this bucket. This can either be the bucket name by itself, such as example-bucket, or the bucket name with gs:// or https://storage.googleapis.com/ in front of it, such as gs://example-bucket.", + "type": "string" + }, + "reportNamePrefix": { + "description": "An optional prefix for the name of the usage report object stored in bucketName. If not supplied, defaults to usage. The report is stored as a CSV file named report_name_prefix_gce_YYYYMMDD.csv where YYYYMMDD is the day of the usage according to Pacific Time. If you supply a prefix, it should conform to Cloud Storage object naming conventions.", + "type": "string" + } + }, + "type": "object" + }, + "VmEndpointNatMappings": { + "description": "Contain information of Nat mapping for a VM endpoint (i.e., NIC).", + "id": "VmEndpointNatMappings", + "properties": { + "instanceName": { + "description": "Name of the VM instance which the endpoint belongs to", + "type": "string" + }, + "interfaceNatMappings": { "items": { - "$ref": "TargetVpnGateway" + "$ref": "VmEndpointNatMappingsInterfaceNatMappings" + }, + "type": "array" + } + }, + "type": "object" + }, + "VmEndpointNatMappingsInterfaceNatMappings": { + "description": "Contain information of Nat mapping for an interface of this endpoint.", + "id": "VmEndpointNatMappingsInterfaceNatMappings", + "properties": { + "natIpPortRanges": { + "description": "A list of all IP:port-range mappings assigned to this interface. These ranges are inclusive, that is, both the first and the last ports can be used for NAT. Example: [\"2.2.2.2:12345-12355\", \"1.1.1.1:2234-2234\"].", + "items": { + "type": "string" + }, + "type": "array" + }, + "numTotalNatPorts": { + "description": "Total number of ports across all NAT IPs allocated to this interface. It equals to the aggregated port number in the field nat_ip_port_ranges.", + "format": "int32", + "type": "integer" + }, + "sourceAliasIpRange": { + "description": "Alias IP range for this interface endpoint. It will be a private (RFC 1918) IP range. Examples: \"10.33.4.55/32\", or \"192.168.5.0/24\".", + "type": "string" + }, + "sourceVirtualIp": { + "description": "Primary IP of the VM for this NIC.", + "type": "string" + } + }, + "type": "object" + }, + "VmEndpointNatMappingsList": { + "description": "Contains a list of VmEndpointNatMappings.", + "id": "VmEndpointNatMappingsList", + "properties": { + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "type": "string" + }, + "kind": { + "default": "compute#vmEndpointNatMappingsList", + "description": "[Output Only] Type of resource. Always compute#vmEndpointNatMappingsList for lists of Nat mappings of VM endpoints.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "result": { + "description": "[Output Only] A list of Nat mapping information of VM endpoints.", + "items": { + "$ref": "VmEndpointNatMappings" }, "type": "array" }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, "warning": { - "description": "[Output Only] Informational warning which replaces the list of addresses when the list is empty.", + "description": "[Output Only] Informational warning message.", "properties": { "code": { "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", @@ -42465,132 +48327,207 @@ }, "type": "object" }, - "TestFailure": { - "id": "TestFailure", + "VpnGateway": { + "description": "Represents a VPN gateway resource.", + "id": "VpnGateway", "properties": { - "actualService": { + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" }, - "expectedService": { + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", "type": "string" }, - "host": { + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", "type": "string" }, - "path": { + "kind": { + "default": "compute#vpnGateway", + "description": "[Output Only] Type of resource. Always compute#vpnGateway for VPN gateways.", "type": "string" - } - }, - "type": "object" - }, - "TestPermissionsRequest": { - "id": "TestPermissionsRequest", - "properties": { - "permissions": { - "description": "The set of permissions to check for the 'resource'. Permissions with wildcards (such as '*' or 'storage.*') are not allowed.", - "items": { + }, + "labelFingerprint": { + "description": "A fingerprint for the labels being applied to this VpnGateway, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet.\n\nTo see the latest fingerprint, make a get() request to retrieve an VpnGateway.", + "format": "byte", + "type": "string" + }, + "labels": { + "additionalProperties": { "type": "string" }, - "type": "array" - } - }, - "type": "object" - }, - "TestPermissionsResponse": { - "id": "TestPermissionsResponse", - "properties": { - "permissions": { - "description": "A subset of `TestPermissionsRequest.permissions` that the caller is allowed.", - "items": { - "type": "string" + "description": "Labels to apply to this VpnGateway resource. These can be later modified by the setLabels method. Each label key/value must comply with RFC1035. Label values may be empty.", + "type": "object" + }, + "name": { + "annotations": { + "required": [ + "compute.vpnGateways.insert" + ] }, - "type": "array" - } - }, - "type": "object" - }, - "UrlMap": { - "description": "A UrlMap resource. This resource defines the mapping from URL to the BackendService resource, based on the \"longest-match\" of the URL's host and path.", - "id": "UrlMap", - "properties": { - "creationTimestamp": { - "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, - "defaultService": { - "description": "The URL of the backendService resource if none of the hostRules match.\nUse defaultService instead of defaultRouteAction when simple routing to a backendService is desired and other advanced capabilities like traffic splitting and rewrites are not required.\nOnly one of defaultService, defaultRouteAction or defaultUrlRedirect should must be set.", + "network": { + "annotations": { + "required": [ + "compute.vpnGateways.insert" + ] + }, + "description": "URL of the network to which this VPN gateway is attached. Provided by the client when the VPN gateway is created.", "type": "string" }, - "description": { - "description": "An optional description of this resource. Provide this property when you create the resource.", + "region": { + "description": "[Output Only] URL of the region where the VPN gateway resides.", "type": "string" }, - "fingerprint": { - "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a UrlMap. An up-to-date fingerprint must be provided in order to update the UrlMap, otherwise the request will fail with error 412 conditionNotMet.\n\nTo see the latest fingerprint, make a get() request to retrieve a UrlMap.", - "format": "byte", + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, - "hostRules": { - "description": "The list of HostRules to use against the URL.", + "vpnInterfaces": { + "description": "[Output Only] A list of interfaces on this VPN gateway.", "items": { - "$ref": "HostRule" + "$ref": "VpnGatewayVpnGatewayInterface" }, "type": "array" - }, + } + }, + "type": "object" + }, + "VpnGatewayAggregatedList": { + "id": "VpnGatewayAggregatedList", + "properties": { "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64", + "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, + "items": { + "additionalProperties": { + "$ref": "VpnGatewaysScopedList", + "description": "[Output Only] Name of the scope containing this set of VPN gateways." + }, + "description": "A list of VpnGateway resources.", + "type": "object" + }, "kind": { - "default": "compute#urlMap", - "description": "[Output Only] Type of the resource. Always compute#urlMaps for url maps.", + "default": "compute#vpnGatewayAggregatedList", + "description": "[Output Only] Type of resource. Always compute#vpnGateway for VPN gateways.", "type": "string" }, - "name": { - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", "type": "string" }, - "pathMatchers": { - "description": "The list of named PathMatchers to use against the URL.", - "items": { - "$ref": "PathMatcher" - }, - "type": "array" - }, "selfLink": { - "description": "[Output Only] Server-defined URL for the resource.", + "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, - "tests": { - "description": "The list of expected URL mapping tests. Request to update this UrlMap will succeed only if all of the test cases pass. You can specify a maximum of 100 tests per UrlMap.", - "items": { - "$ref": "UrlMapTest" + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } }, - "type": "array" + "type": "object" } }, "type": "object" }, - "UrlMapList": { - "description": "Contains a list of UrlMap resources.", - "id": "UrlMapList", + "VpnGatewayList": { + "description": "Contains a list of VpnGateway resources.", + "id": "VpnGatewayList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of UrlMap resources.", + "description": "A list of VpnGateway resources.", "items": { - "$ref": "UrlMap" + "$ref": "VpnGateway" }, "type": "array" }, "kind": { - "default": "compute#urlMapList", - "description": "Type of resource.", + "default": "compute#vpnGatewayList", + "description": "[Output Only] Type of resource. Always compute#vpnGateway for VPN gateways.", "type": "string" }, "nextPageToken": { @@ -42685,325 +48622,132 @@ }, "type": "object" }, - "UrlMapReference": { - "id": "UrlMapReference", - "properties": { - "urlMap": { - "type": "string" - } - }, - "type": "object" - }, - "UrlMapTest": { - "description": "Message for the expected URL mappings.", - "id": "UrlMapTest", - "properties": { - "description": { - "description": "Description of this test case.", - "type": "string" - }, - "host": { - "description": "Host portion of the URL.", - "type": "string" - }, - "path": { - "description": "Path portion of the URL.", - "type": "string" - }, - "service": { - "description": "Expected BackendService resource the given URL should be mapped to.", - "type": "string" - } - }, - "type": "object" - }, - "UrlMapValidationResult": { - "description": "Message representing the validation result for a UrlMap.", - "id": "UrlMapValidationResult", + "VpnGatewayStatus": { + "id": "VpnGatewayStatus", "properties": { - "loadErrors": { - "items": { - "type": "string" - }, - "type": "array" - }, - "loadSucceeded": { - "description": "Whether the given UrlMap can be successfully loaded. If false, 'loadErrors' indicates the reasons.", - "type": "boolean" - }, - "testFailures": { + "vpnConnections": { + "description": "List of VPN connection for this VpnGateway.", "items": { - "$ref": "TestFailure" + "$ref": "VpnGatewayStatusVpnConnection" }, "type": "array" - }, - "testPassed": { - "description": "If successfully loaded, this field indicates whether the test passed. If false, 'testFailures's indicate the reason of failure.", - "type": "boolean" - } - }, - "type": "object" - }, - "UrlMapsValidateRequest": { - "id": "UrlMapsValidateRequest", - "properties": { - "resource": { - "$ref": "UrlMap", - "description": "Content of the UrlMap to be validated." - } - }, - "type": "object" - }, - "UrlMapsValidateResponse": { - "id": "UrlMapsValidateResponse", - "properties": { - "result": { - "$ref": "UrlMapValidationResult" } }, "type": "object" }, - "UsableSubnetwork": { - "description": "Subnetwork which the current user has compute.subnetworks.use permission on.", - "id": "UsableSubnetwork", + "VpnGatewayStatusHighAvailabilityRequirementState": { + "description": "Describes the high availability requirement state for the VPN connection between this Cloud VPN gateway and a peer gateway.", + "id": "VpnGatewayStatusHighAvailabilityRequirementState", "properties": { - "ipCidrRange": { - "description": "The range of internal addresses that are owned by this subnetwork.", - "type": "string" - }, - "network": { - "description": "Network URL.", + "state": { + "description": "Indicates the high availability requirement state for the VPN connection. Valid values are CONNECTION_REDUNDANCY_MET, CONNECTION_REDUNDANCY_NOT_MET.", + "enum": [ + "CONNECTION_REDUNDANCY_MET", + "CONNECTION_REDUNDANCY_NOT_MET" + ], + "enumDescriptions": [ + "", + "" + ], "type": "string" }, - "secondaryIpRanges": { - "description": "Secondary IP ranges.", - "items": { - "$ref": "UsableSubnetworkSecondaryRange" - }, - "type": "array" - }, - "subnetwork": { - "description": "Subnetwork URL.", + "unsatisfiedReason": { + "description": "Indicates the reason why the VPN connection does not meet the high availability redundancy criteria/requirement. Valid values is INCOMPLETE_TUNNELS_COVERAGE.", + "enum": [ + "INCOMPLETE_TUNNELS_COVERAGE" + ], + "enumDescriptions": [ + "" + ], "type": "string" } }, "type": "object" }, - "UsableSubnetworkSecondaryRange": { - "description": "Secondary IP range of a usable subnetwork.", - "id": "UsableSubnetworkSecondaryRange", + "VpnGatewayStatusTunnel": { + "description": "Contains some information about a VPN tunnel.", + "id": "VpnGatewayStatusTunnel", "properties": { - "ipCidrRange": { - "description": "The range of IP addresses belonging to this subnetwork secondary range.", - "type": "string" + "localGatewayInterface": { + "description": "The VPN gateway interface this VPN tunnel is associated with.", + "format": "uint32", + "type": "integer" }, - "rangeName": { - "description": "The name associated with this subnetwork secondary range, used when adding an alias IP range to a VM instance. The name must be 1-63 characters long, and comply with RFC1035. The name must be unique within the subnetwork.", + "peerGatewayInterface": { + "description": "The peer gateway interface this VPN tunnel is connected to, the peer gateway could either be an external VPN gateway or GCP VPN gateway.", + "format": "uint32", + "type": "integer" + }, + "tunnelUrl": { + "description": "URL reference to the VPN tunnel.", "type": "string" } }, "type": "object" }, - "UsableSubnetworksAggregatedList": { - "id": "UsableSubnetworksAggregatedList", + "VpnGatewayStatusVpnConnection": { + "description": "A VPN connection contains all VPN tunnels connected from this VpnGateway to the same peer gateway. The peer gateway could either be a external VPN gateway or GCP VPN gateway.", + "id": "VpnGatewayStatusVpnConnection", "properties": { - "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "peerExternalGateway": { + "description": "URL reference to the peer external VPN gateways to which the VPN tunnels in this VPN connection are connected. This field is mutually exclusive with peer_gcp_gateway.", "type": "string" }, - "items": { - "description": "[Output] A list of usable subnetwork URLs.", - "items": { - "$ref": "UsableSubnetwork" - }, - "type": "array" - }, - "kind": { - "default": "compute#usableSubnetworksAggregatedList", - "description": "[Output Only] Type of resource. Always compute#usableSubnetworksAggregatedList for aggregated lists of usable subnetworks.", - "type": "string" - }, - "nextPageToken": { - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "peerGcpGateway": { + "description": "URL reference to the peer side VPN gateways to which the VPN tunnels in this VPN connection are connected. This field is mutually exclusive with peer_gcp_gateway.", "type": "string" }, - "selfLink": { - "description": "[Output Only] Server-defined URL for this resource.", - "type": "string" + "state": { + "$ref": "VpnGatewayStatusHighAvailabilityRequirementState", + "description": "HighAvailabilityRequirementState for the VPN connection." }, - "warning": { - "description": "[Output Only] Informational warning message.", - "properties": { - "code": { - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DEPRECATED_TYPE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "EXPERIMENTAL_TYPE_USED", - "EXTERNAL_API_WARNING", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "MISSING_TYPE_DEPENDENCY", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SCHEMA_VALIDATION_IGNORED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNDECLARED_PROPERTIES", - "UNREACHABLE" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "data": { - "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", - "items": { - "properties": { - "key": { - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", - "type": "string" - }, - "value": { - "description": "[Output Only] A warning data value corresponding to the key.", - "type": "string" - } - }, - "type": "object" - }, - "type": "array" - }, - "message": { - "description": "[Output Only] A human-readable description of the warning code.", - "type": "string" - } + "tunnels": { + "description": "List of VPN tunnels that are in this VPN connection.", + "items": { + "$ref": "VpnGatewayStatusTunnel" }, - "type": "object" + "type": "array" } }, "type": "object" }, - "UsageExportLocation": { - "description": "The location in Cloud Storage and naming method of the daily usage report. Contains bucket_name and report_name prefix.", - "id": "UsageExportLocation", + "VpnGatewayVpnGatewayInterface": { + "description": "A VPN gateway interface.", + "id": "VpnGatewayVpnGatewayInterface", "properties": { - "bucketName": { - "description": "The name of an existing bucket in Cloud Storage where the usage report object is stored. The Google Service Account is granted write access to this bucket. This can either be the bucket name by itself, such as example-bucket, or the bucket name with gs:// or https://storage.googleapis.com/ in front of it, such as gs://example-bucket.", - "type": "string" + "id": { + "description": "The numeric ID of this VPN gateway interface.", + "format": "uint32", + "type": "integer" }, - "reportNamePrefix": { - "description": "An optional prefix for the name of the usage report object stored in bucketName. If not supplied, defaults to usage. The report is stored as a CSV file named report_name_prefix_gce_YYYYMMDD.csv where YYYYMMDD is the day of the usage according to Pacific Time. If you supply a prefix, it should conform to Cloud Storage object naming conventions.", - "type": "string" - } - }, - "type": "object" - }, - "VmEndpointNatMappings": { - "description": "Contain information of Nat mapping for a VM endpoint (i.e., NIC).", - "id": "VmEndpointNatMappings", - "properties": { - "instanceName": { - "description": "Name of the VM instance which the endpoint belongs to", + "ipAddress": { + "description": "The external IP address for this VPN gateway interface.", "type": "string" - }, - "interfaceNatMappings": { - "items": { - "$ref": "VmEndpointNatMappingsInterfaceNatMappings" - }, - "type": "array" } }, "type": "object" }, - "VmEndpointNatMappingsInterfaceNatMappings": { - "description": "Contain information of Nat mapping for an interface of this endpoint.", - "id": "VmEndpointNatMappingsInterfaceNatMappings", + "VpnGatewaysGetStatusResponse": { + "id": "VpnGatewaysGetStatusResponse", "properties": { - "natIpPortRanges": { - "description": "A list of all IP:port-range mappings assigned to this interface. These ranges are inclusive, that is, both the first and the last ports can be used for NAT. Example: [\"2.2.2.2:12345-12355\", \"1.1.1.1:2234-2234\"].", - "items": { - "type": "string" - }, - "type": "array" - }, - "numTotalNatPorts": { - "description": "Total number of ports across all NAT IPs allocated to this interface. It equals to the aggregated port number in the field nat_ip_port_ranges.", - "format": "int32", - "type": "integer" - }, - "sourceAliasIpRange": { - "description": "Alias IP range for this interface endpoint. It will be a private (RFC 1918) IP range. Examples: \"10.33.4.55/32\", or \"192.168.5.0/24\".", - "type": "string" - }, - "sourceVirtualIp": { - "description": "Primary IP of the VM for this NIC.", - "type": "string" + "result": { + "$ref": "VpnGatewayStatus" } }, "type": "object" }, - "VmEndpointNatMappingsList": { - "description": "Contains a list of VmEndpointNatMappings.", - "id": "VmEndpointNatMappingsList", + "VpnGatewaysScopedList": { + "id": "VpnGatewaysScopedList", "properties": { - "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "type": "string" - }, - "kind": { - "default": "compute#vmEndpointNatMappingsList", - "description": "[Output Only] Type of resource. Always compute#vmEndpointNatMappingsList for lists of Nat mappings of VM endpoints.", - "type": "string" - }, - "nextPageToken": { - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", - "type": "string" - }, - "result": { - "description": "[Output Only] A list of Nat mapping information of VM endpoints.", + "vpnGateways": { + "description": "[Output Only] A list of VPN gateways contained in this scope.", "items": { - "$ref": "VmEndpointNatMappings" + "$ref": "VpnGateway" }, "type": "array" }, - "selfLink": { - "description": "[Output Only] Server-defined URL for this resource.", - "type": "string" - }, "warning": { - "description": "[Output Only] Informational warning message.", + "description": "[Output Only] Informational warning which replaces the list of addresses when the list is empty.", "properties": { "code": { "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", @@ -43108,7 +48852,7 @@ "type": "string" }, "ikeVersion": { - "description": "IKE protocol version to use when establishing the VPN tunnel with peer VPN gateway. Acceptable IKE versions are 1 or 2. Default version is 2.", + "description": "IKE protocol version to use when establishing the VPN tunnel with the peer VPN gateway. Acceptable IKE versions are 1 or 2. The default version is 2.", "format": "int32", "type": "integer" }, @@ -43130,7 +48874,7 @@ "type": "object" }, "localTrafficSelector": { - "description": "Local traffic selector to use when establishing the VPN tunnel with peer VPN gateway. The value should be a CIDR formatted string, for example: 192.168.0.0/16. The ranges should be disjoint. Only IPv4 is supported.", + "description": "Local traffic selector to use when establishing the VPN tunnel with the peer VPN gateway. The value should be a CIDR formatted string, for example: 192.168.0.0/16. The ranges must be disjoint. Only IPv4 is supported.", "items": { "type": "string" }, @@ -43146,6 +48890,19 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, + "peerExternalGateway": { + "description": "URL of the peer side external VPN gateway to which this VPN tunnel is connected. Provided by the client when the VPN tunnel is created. This field is exclusive with the field peerGcpGateway.", + "type": "string" + }, + "peerExternalGatewayInterface": { + "description": "The interface ID of the external VPN gateway to which this VPN tunnel is connected. Provided by the client when the VPN tunnel is created.", + "format": "int32", + "type": "integer" + }, + "peerGcpGateway": { + "description": "URL of the peer side HA GCP VPN gateway to which this VPN tunnel is connected. Provided by the client when the VPN tunnel is created. This field can be used when creating highly available VPN from VPC network to VPC network, the field is exclusive with the field peerExternalGateway. If provided, the VPN tunnel will automatically use the same vpnGatewayInterface ID in the peer GCP VPN gateway.", + "type": "string" + }, "peerIp": { "description": "IP address of the peer VPN gateway. Only IPv4 is supported.", "type": "string" @@ -43155,14 +48912,14 @@ "type": "string" }, "remoteTrafficSelector": { - "description": "Remote traffic selectors to use when establishing the VPN tunnel with peer VPN gateway. The value should be a CIDR formatted string, for example: 192.168.0.0/16. The ranges should be disjoint. Only IPv4 is supported.", + "description": "Remote traffic selectors to use when establishing the VPN tunnel with the peer VPN gateway. The value should be a CIDR formatted string, for example: 192.168.0.0/16. The ranges should be disjoint. Only IPv4 is supported.", "items": { "type": "string" }, "type": "array" }, "router": { - "description": "URL of router resource to be used for dynamic routing.", + "description": "URL of the router resource to be used for dynamic routing.", "type": "string" }, "selfLink": { @@ -43178,7 +48935,7 @@ "type": "string" }, "status": { - "description": "[Output Only] The status of the VPN tunnel.", + "description": "[Output Only] The status of the VPN tunnel, which can be one of the following: \n- PROVISIONING: Resource is being allocated for the VPN tunnel. \n- WAITING_FOR_FULL_CONFIG: Waiting to receive all VPN-related configs from the user. Network, TargetVpnGateway, VpnTunnel, ForwardingRule, and Route resources are needed to setup the VPN tunnel. \n- FIRST_HANDSHAKE: Successful first handshake with the peer VPN. \n- ESTABLISHED: Secure session is successfully established with the peer VPN. \n- NETWORK_ERROR: Deprecated, replaced by NO_INCOMING_PACKETS \n- AUTHORIZATION_ERROR: Auth error (for example, bad shared secret). \n- NEGOTIATION_FAILURE: Handshake failed. \n- DEPROVISIONING: Resources are being deallocated for the VPN tunnel. \n- FAILED: Tunnel creation has failed and the tunnel is not ready to be used.", "enum": [ "ALLOCATING_RESOURCES", "AUTHORIZATION_ERROR", @@ -43212,6 +48969,15 @@ "targetVpnGateway": { "description": "URL of the Target VPN gateway with which this VPN tunnel is associated. Provided by the client when the VPN tunnel is created.", "type": "string" + }, + "vpnGateway": { + "description": "URL of the VPN gateway with which this VPN tunnel is associated. Provided by the client when the VPN tunnel is created. This must be used (instead of target_vpn_gateway) if a High Availability VPN gateway resource is created.", + "type": "string" + }, + "vpnGatewayInterface": { + "description": "The interface ID of the VPN gateway with which this VPN tunnel is associated.", + "format": "int32", + "type": "integer" } }, "type": "object" @@ -43226,7 +48992,7 @@ "items": { "additionalProperties": { "$ref": "VpnTunnelsScopedList", - "description": "Name of the scope containing this set of vpn tunnels." + "description": "Name of the scope containing this set of VPN tunnels." }, "description": "A list of VpnTunnelsScopedList resources.", "type": "object" @@ -43444,7 +49210,7 @@ "id": "VpnTunnelsScopedList", "properties": { "vpnTunnels": { - "description": "A list of vpn tunnels contained in this scope.", + "description": "A list of VPN tunnels contained in this scope.", "items": { "$ref": "VpnTunnel" }, @@ -43568,6 +49334,26 @@ }, "type": "object" }, + "WeightedBackendService": { + "description": "In contrast to a single BackendService in HttpRouteAction to which all matching traffic is directed to, WeightedBackendService allows traffic to be split across multiple BackendServices. The volume of traffic for each BackendService is proportional to the weight specified in each WeightedBackendService", + "id": "WeightedBackendService", + "properties": { + "backendService": { + "description": "The full or partial URL to the default BackendService resource. Before forwarding the request to backendService, the loadbalancer applies any relevant headerActions specified as part of this backendServiceWeight.", + "type": "string" + }, + "headerAction": { + "$ref": "HttpHeaderAction", + "description": "Specifies changes to request and response headers that need to take effect for the selected backendService.\nheaderAction specified here take effect before headerAction in the enclosing HttpRouteRule, PathMatcher and UrlMap." + }, + "weight": { + "description": "Specifies the fraction of traffic sent to backendService, computed as weight / (sum of all weightedBackendService weights in routeAction) .\nThe selection of a backend service is determined only for new traffic. Once a user's request has been directed to a backendService, subsequent requests will be sent to the same backendService as determined by the BackendService's session affinity policy.\nThe value must be between 0 and 1000", + "format": "uint32", + "type": "integer" + } + }, + "type": "object" + }, "XpnHostList": { "id": "XpnHostList", "properties": { @@ -43703,7 +49489,7 @@ "type": "object" }, "Zone": { - "description": "A Zone resource. (== resource_for beta.zones ==) (== resource_for v1.zones ==)", + "description": "A Zone resource. (== resource_for beta.zones ==) (== resource_for v1.zones ==) Next ID: 17", "id": "Zone", "properties": { "availableCpuPlatforms": { diff --git a/vendor/google.golang.org/api/compute/v0.beta/compute-gen.go b/vendor/google.golang.org/api/compute/v0.beta/compute-gen.go index 6f11e2e89f553..1d47d2b15c18d 100644 --- a/vendor/google.golang.org/api/compute/v0.beta/compute-gen.go +++ b/vendor/google.golang.org/api/compute/v0.beta/compute-gen.go @@ -1,4 +1,4 @@ -// Copyright 2018 Google Inc. All rights reserved. +// Copyright 2019 Google LLC. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -6,13 +6,39 @@ // Package compute provides access to the Compute Engine API. // -// See https://developers.google.com/compute/docs/reference/latest/ +// For product documentation, see: https://developers.google.com/compute/docs/reference/latest/ +// +// Creating a client // // Usage example: // // import "google.golang.org/api/compute/v0.beta" // ... -// computeService, err := compute.New(oauthHttpClient) +// ctx := context.Background() +// computeService, err := compute.NewService(ctx) +// +// In this example, Google Application Default Credentials are used for authentication. +// +// For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. +// +// Other authentication options +// +// By default, all available scopes (see "Constants") are used to authenticate. To restrict scopes, use option.WithScopes: +// +// computeService, err := compute.NewService(ctx, option.WithScopes(compute.DevstorageReadWriteScope)) +// +// To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey: +// +// computeService, err := compute.NewService(ctx, option.WithAPIKey("AIza...")) +// +// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource: +// +// config := &oauth2.Config{...} +// // ... +// token, err := config.Exchange(ctx, ...) +// computeService, err := compute.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) +// +// See https://godoc.org/google.golang.org/api/option/ for details on options. package compute // import "google.golang.org/api/compute/v0.beta" import ( @@ -29,6 +55,8 @@ import ( gensupport "google.golang.org/api/gensupport" googleapi "google.golang.org/api/googleapi" + option "google.golang.org/api/option" + htransport "google.golang.org/api/transport/http" ) // Always reference these packages, just in case the auto-generated code @@ -71,6 +99,37 @@ const ( DevstorageReadWriteScope = "https://www.googleapis.com/auth/devstorage.read_write" ) +// NewService creates a new Service. +func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, error) { + scopesOption := option.WithScopes( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write", + ) + // NOTE: prepend, so we don't override user-specified scopes. + opts = append([]option.ClientOption{scopesOption}, opts...) + client, endpoint, err := htransport.NewClient(ctx, opts...) + if err != nil { + return nil, err + } + s, err := New(client) + if err != nil { + return nil, err + } + if endpoint != "" { + s.BasePath = endpoint + } + return s, nil +} + +// New creates a new Service. It uses the provided http.Client for requests. +// +// Deprecated: please use NewService instead. +// To provide a custom HTTP client, use option.WithHTTPClient. +// If you are using google.golang.org/api/googleapis/transport.APIKey, use option.WithAPIKey with NewService instead. func New(client *http.Client) (*Service, error) { if client == nil { return nil, errors.New("client is nil") @@ -78,12 +137,12 @@ func New(client *http.Client) (*Service, error) { s := &Service{client: client, BasePath: basePath} s.AcceleratorTypes = NewAcceleratorTypesService(s) s.Addresses = NewAddressesService(s) - s.Allocations = NewAllocationsService(s) s.Autoscalers = NewAutoscalersService(s) s.BackendBuckets = NewBackendBucketsService(s) s.BackendServices = NewBackendServicesService(s) s.DiskTypes = NewDiskTypesService(s) s.Disks = NewDisksService(s) + s.ExternalVpnGateways = NewExternalVpnGatewaysService(s) s.Firewalls = NewFirewallsService(s) s.ForwardingRules = NewForwardingRulesService(s) s.GlobalAddresses = NewGlobalAddressesService(s) @@ -114,10 +173,16 @@ func New(client *http.Client) (*Service, error) { s.RegionCommitments = NewRegionCommitmentsService(s) s.RegionDiskTypes = NewRegionDiskTypesService(s) s.RegionDisks = NewRegionDisksService(s) + s.RegionHealthChecks = NewRegionHealthChecksService(s) s.RegionInstanceGroupManagers = NewRegionInstanceGroupManagersService(s) s.RegionInstanceGroups = NewRegionInstanceGroupsService(s) s.RegionOperations = NewRegionOperationsService(s) + s.RegionSslCertificates = NewRegionSslCertificatesService(s) + s.RegionTargetHttpProxies = NewRegionTargetHttpProxiesService(s) + s.RegionTargetHttpsProxies = NewRegionTargetHttpsProxiesService(s) + s.RegionUrlMaps = NewRegionUrlMapsService(s) s.Regions = NewRegionsService(s) + s.Reservations = NewReservationsService(s) s.ResourcePolicies = NewResourcePoliciesService(s) s.Routers = NewRoutersService(s) s.Routes = NewRoutesService(s) @@ -134,6 +199,7 @@ func New(client *http.Client) (*Service, error) { s.TargetTcpProxies = NewTargetTcpProxiesService(s) s.TargetVpnGateways = NewTargetVpnGatewaysService(s) s.UrlMaps = NewUrlMapsService(s) + s.VpnGateways = NewVpnGatewaysService(s) s.VpnTunnels = NewVpnTunnelsService(s) s.ZoneOperations = NewZoneOperationsService(s) s.Zones = NewZonesService(s) @@ -149,8 +215,6 @@ type Service struct { Addresses *AddressesService - Allocations *AllocationsService - Autoscalers *AutoscalersService BackendBuckets *BackendBucketsService @@ -161,6 +225,8 @@ type Service struct { Disks *DisksService + ExternalVpnGateways *ExternalVpnGatewaysService + Firewalls *FirewallsService ForwardingRules *ForwardingRulesService @@ -221,14 +287,26 @@ type Service struct { RegionDisks *RegionDisksService + RegionHealthChecks *RegionHealthChecksService + RegionInstanceGroupManagers *RegionInstanceGroupManagersService RegionInstanceGroups *RegionInstanceGroupsService RegionOperations *RegionOperationsService + RegionSslCertificates *RegionSslCertificatesService + + RegionTargetHttpProxies *RegionTargetHttpProxiesService + + RegionTargetHttpsProxies *RegionTargetHttpsProxiesService + + RegionUrlMaps *RegionUrlMapsService + Regions *RegionsService + Reservations *ReservationsService + ResourcePolicies *ResourcePoliciesService Routers *RoutersService @@ -261,6 +339,8 @@ type Service struct { UrlMaps *UrlMapsService + VpnGateways *VpnGatewaysService + VpnTunnels *VpnTunnelsService ZoneOperations *ZoneOperationsService @@ -293,15 +373,6 @@ type AddressesService struct { s *Service } -func NewAllocationsService(s *Service) *AllocationsService { - rs := &AllocationsService{s: s} - return rs -} - -type AllocationsService struct { - s *Service -} - func NewAutoscalersService(s *Service) *AutoscalersService { rs := &AutoscalersService{s: s} return rs @@ -347,6 +418,15 @@ type DisksService struct { s *Service } +func NewExternalVpnGatewaysService(s *Service) *ExternalVpnGatewaysService { + rs := &ExternalVpnGatewaysService{s: s} + return rs +} + +type ExternalVpnGatewaysService struct { + s *Service +} + func NewFirewallsService(s *Service) *FirewallsService { rs := &FirewallsService{s: s} return rs @@ -617,6 +697,15 @@ type RegionDisksService struct { s *Service } +func NewRegionHealthChecksService(s *Service) *RegionHealthChecksService { + rs := &RegionHealthChecksService{s: s} + return rs +} + +type RegionHealthChecksService struct { + s *Service +} + func NewRegionInstanceGroupManagersService(s *Service) *RegionInstanceGroupManagersService { rs := &RegionInstanceGroupManagersService{s: s} return rs @@ -644,6 +733,42 @@ type RegionOperationsService struct { s *Service } +func NewRegionSslCertificatesService(s *Service) *RegionSslCertificatesService { + rs := &RegionSslCertificatesService{s: s} + return rs +} + +type RegionSslCertificatesService struct { + s *Service +} + +func NewRegionTargetHttpProxiesService(s *Service) *RegionTargetHttpProxiesService { + rs := &RegionTargetHttpProxiesService{s: s} + return rs +} + +type RegionTargetHttpProxiesService struct { + s *Service +} + +func NewRegionTargetHttpsProxiesService(s *Service) *RegionTargetHttpsProxiesService { + rs := &RegionTargetHttpsProxiesService{s: s} + return rs +} + +type RegionTargetHttpsProxiesService struct { + s *Service +} + +func NewRegionUrlMapsService(s *Service) *RegionUrlMapsService { + rs := &RegionUrlMapsService{s: s} + return rs +} + +type RegionUrlMapsService struct { + s *Service +} + func NewRegionsService(s *Service) *RegionsService { rs := &RegionsService{s: s} return rs @@ -653,6 +778,15 @@ type RegionsService struct { s *Service } +func NewReservationsService(s *Service) *ReservationsService { + rs := &ReservationsService{s: s} + return rs +} + +type ReservationsService struct { + s *Service +} + func NewResourcePoliciesService(s *Service) *ResourcePoliciesService { rs := &ResourcePoliciesService{s: s} return rs @@ -797,6 +931,15 @@ type UrlMapsService struct { s *Service } +func NewVpnGatewaysService(s *Service) *VpnGatewaysService { + rs := &VpnGatewaysService{s: s} + return rs +} + +type VpnGatewaysService struct { + s *Service +} + func NewVpnTunnelsService(s *Service) *VpnTunnelsService { rs := &VpnTunnelsService{s: s} return rs @@ -832,8 +975,11 @@ type AcceleratorConfig struct { AcceleratorCount int64 `json:"acceleratorCount,omitempty"` // AcceleratorType: Full or partial URL of the accelerator type resource - // to attach to this instance. If you are creating an instance template, - // specify only the accelerator name. + // to attach to this instance. For example: + // projects/my-project/zones/us-central1-c/acceleratorTypes/nvidia-tesla- + // p100 If you are creating an instance template, specify only the + // accelerator name. See GPUs on Compute Engine for a full list of + // accelerator types. AcceleratorType string `json:"acceleratorType,omitempty"` // ForceSendFields is a list of field names (e.g. "AcceleratorCount") to @@ -1545,6 +1691,7 @@ type Address struct { // Possible values: // "DNS_RESOLVER" // "GCE_ENDPOINT" + // "NAT_AUTO" // "VPC_PEERING" Purpose string `json:"purpose,omitempty"` @@ -2088,301 +2235,21 @@ func (s *AliasIpRange) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Allocation: Allocation resource -type Allocation struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - Description string `json:"description,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] Type of the resource. Always compute#allocations - // for allocations. - Kind string `json:"kind,omitempty"` - - // Name: The name of the resource, provided by the client when initially - // creating the resource. The resource name must be 1-63 characters - // long, and comply with RFC1035. Specifically, the name must be 1-63 - // characters long and match the regular expression - // `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be - // a lowercase letter, and all following characters must be a dash, - // lowercase letter, or digit, except the last character, which cannot - // be a dash. - Name string `json:"name,omitempty"` - - // SelfLink: [Output Only] Server-defined fully-qualified URL for this - // resource. - SelfLink string `json:"selfLink,omitempty"` - - SpecificAllocation *AllocationSpecificSKUAllocation `json:"specificAllocation,omitempty"` - - // SpecificAllocationRequired: Indicates whether the allocation can be - // consumed by VMs with "any allocation" defined. If the field is set, - // then only VMs that target the allocation by name using - // --allocation-affinity can consume this allocation. - SpecificAllocationRequired bool `json:"specificAllocationRequired,omitempty"` - - Zone string `json:"zone,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *Allocation) MarshalJSON() ([]byte, error) { - type NoMethod Allocation - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// AllocationAffinity: AllocationAffinity is the configuration of -// desired allocation which this instance could take capacity from. -type AllocationAffinity struct { - // Possible values: - // "ANY_ALLOCATION" - // "NO_ALLOCATION" - // "SPECIFIC_ALLOCATION" - // "UNSPECIFIED" - ConsumeAllocationType string `json:"consumeAllocationType,omitempty"` - - // Key: Corresponds to the label key of allocation resource. - Key string `json:"key,omitempty"` - - // Values: Corresponds to the label values of allocation resource. - Values []string `json:"values,omitempty"` - - // ForceSendFields is a list of field names (e.g. - // "ConsumeAllocationType") to unconditionally include in API requests. - // By default, fields with empty values are omitted from API requests. - // However, any non-pointer, non-interface field appearing in - // ForceSendFields will be sent to the server regardless of whether the - // field is empty or not. This may be used to include empty fields in - // Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "ConsumeAllocationType") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *AllocationAffinity) MarshalJSON() ([]byte, error) { - type NoMethod AllocationAffinity - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// AllocationAggregatedList: Contains a list of allocations. -type AllocationAggregatedList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of Allocation resources. - Items map[string]AllocationsScopedList `json:"items,omitempty"` - - // Kind: Type of resource. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // Warning: [Output Only] Informational warning message. - Warning *AllocationAggregatedListWarning `json:"warning,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *AllocationAggregatedList) MarshalJSON() ([]byte, error) { - type NoMethod AllocationAggregatedList - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} +type AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk struct { + // DiskSizeGb: Specifies the size of the disk in base-2 GB. + DiskSizeGb int64 `json:"diskSizeGb,omitempty,string"` -// AllocationAggregatedListWarning: [Output Only] Informational warning -// message. -type AllocationAggregatedListWarning struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. + // Interface: Specifies the disk interface to use for attaching this + // disk, which is either SCSI or NVME. The default is SCSI. For + // performance characteristics of SCSI over NVMe, see Local SSD + // performance. // // Possible values: - // "CLEANUP_FAILED" - // "DEPRECATED_RESOURCE_USED" - // "DEPRECATED_TYPE_USED" - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - // "EXPERIMENTAL_TYPE_USED" - // "EXTERNAL_API_WARNING" - // "FIELD_VALUE_OVERRIDEN" - // "INJECTED_KERNELS_DEPRECATED" - // "MISSING_TYPE_DEPENDENCY" - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - // "NEXT_HOP_CANNOT_IP_FORWARD" - // "NEXT_HOP_INSTANCE_NOT_FOUND" - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - // "NEXT_HOP_NOT_RUNNING" - // "NOT_CRITICAL_ERROR" - // "NO_RESULTS_ON_PAGE" - // "REQUIRED_TOS_AGREEMENT" - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - // "RESOURCE_NOT_DELETED" - // "SCHEMA_VALIDATION_IGNORED" - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - // "UNDECLARED_PROPERTIES" - // "UNREACHABLE" - Code string `json:"code,omitempty"` - - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: - // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*AllocationAggregatedListWarningData `json:"data,omitempty"` - - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Code") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *AllocationAggregatedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod AllocationAggregatedListWarning - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type AllocationAggregatedListWarningData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` - - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Key") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Key") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *AllocationAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod AllocationAggregatedListWarningData - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type AllocationList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id string `json:"id,omitempty"` - - // Items: [Output Only] A list of Allocation resources. - Items []*Allocation `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource.Always compute#allocationsList - // for listsof allocations - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // Warning: [Output Only] Informational warning message. - Warning *AllocationListWarning `json:"warning,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // "NVME" + // "SCSI" + Interface string `json:"interface,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "DiskSizeGb") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -2390,8 +2257,8 @@ type AllocationList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "DiskSizeGb") to include in + // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -2399,123 +2266,67 @@ type AllocationList struct { NullFields []string `json:"-"` } -func (s *AllocationList) MarshalJSON() ([]byte, error) { - type NoMethod AllocationList +func (s *AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk) MarshalJSON() ([]byte, error) { + type NoMethod AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// AllocationListWarning: [Output Only] Informational warning message. -type AllocationListWarning struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. - // - // Possible values: - // "CLEANUP_FAILED" - // "DEPRECATED_RESOURCE_USED" - // "DEPRECATED_TYPE_USED" - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - // "EXPERIMENTAL_TYPE_USED" - // "EXTERNAL_API_WARNING" - // "FIELD_VALUE_OVERRIDEN" - // "INJECTED_KERNELS_DEPRECATED" - // "MISSING_TYPE_DEPENDENCY" - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - // "NEXT_HOP_CANNOT_IP_FORWARD" - // "NEXT_HOP_INSTANCE_NOT_FOUND" - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - // "NEXT_HOP_NOT_RUNNING" - // "NOT_CRITICAL_ERROR" - // "NO_RESULTS_ON_PAGE" - // "REQUIRED_TOS_AGREEMENT" - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - // "RESOURCE_NOT_DELETED" - // "SCHEMA_VALIDATION_IGNORED" - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - // "UNDECLARED_PROPERTIES" - // "UNREACHABLE" - Code string `json:"code,omitempty"` - - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: - // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*AllocationListWarningData `json:"data,omitempty"` - - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Code") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} +// AllocationSpecificSKUAllocationReservedInstanceProperties: Properties +// of the SKU instances being reserved. +type AllocationSpecificSKUAllocationReservedInstanceProperties struct { + // GuestAccelerators: Specifies accelerator type and count. + GuestAccelerators []*AcceleratorConfig `json:"guestAccelerators,omitempty"` -func (s *AllocationListWarning) MarshalJSON() ([]byte, error) { - type NoMethod AllocationListWarning - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // LocalSsds: Specifies amount of local ssd to reserve with each + // instance. The type of disk is local-ssd. + LocalSsds []*AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk `json:"localSsds,omitempty"` -type AllocationListWarningData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` + // MachineType: Specifies type of machine (name only) which has fixed + // number of vCPUs and fixed amount of memory. This also includes + // specifying custom machine type following + // custom-NUMBER_OF_CPUS-AMOUNT_OF_MEMORY pattern. + MachineType string `json:"machineType,omitempty"` - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` + // MinCpuPlatform: Minimum cpu platform the reservation. + MinCpuPlatform string `json:"minCpuPlatform,omitempty"` - // ForceSendFields is a list of field names (e.g. "Key") to - // unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "GuestAccelerators") + // to unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Key") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "GuestAccelerators") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *AllocationListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod AllocationListWarningData +func (s *AllocationSpecificSKUAllocationReservedInstanceProperties) MarshalJSON() ([]byte, error) { + type NoMethod AllocationSpecificSKUAllocationReservedInstanceProperties raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// AllocationSpecificSKUAllocation: This allocation type allows to pre +// AllocationSpecificSKUReservation: This reservation type allows to pre // allocate specific instance configuration. -type AllocationSpecificSKUAllocation struct { +type AllocationSpecificSKUReservation struct { // Count: Specifies number of resources that are allocated. Count int64 `json:"count,omitempty,string"` // InUseCount: [OutputOnly] Indicates how many resource are in use. InUseCount int64 `json:"inUseCount,omitempty,string"` - InstanceProperties *AllocationSpecificSKUAllocationAllocatedInstanceProperties `json:"instanceProperties,omitempty"` + // InstanceProperties: The instance properties for this specific sku + // reservation. + InstanceProperties *AllocationSpecificSKUAllocationReservedInstanceProperties `json:"instanceProperties,omitempty"` // ForceSendFields is a list of field names (e.g. "Count") to // unconditionally include in API requests. By default, fields with @@ -2534,221 +2345,8 @@ type AllocationSpecificSKUAllocation struct { NullFields []string `json:"-"` } -func (s *AllocationSpecificSKUAllocation) MarshalJSON() ([]byte, error) { - type NoMethod AllocationSpecificSKUAllocation - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// AllocationSpecificSKUAllocationAllocatedInstanceProperties: -// Properties of the SKU instances being reserved. -type AllocationSpecificSKUAllocationAllocatedInstanceProperties struct { - // GuestAccelerators: Specifies accelerator type and count. - GuestAccelerators []*AcceleratorConfig `json:"guestAccelerators,omitempty"` - - // LocalSsds: Specifies amount of local ssd to reserve with each - // instance. The type of disk is local-ssd. - LocalSsds []*AllocationSpecificSKUAllocationAllocatedInstancePropertiesAllocatedDisk `json:"localSsds,omitempty"` - - // MachineType: Specifies type of machine (name only) which has fixed - // number of vCPUs and fixed amount of memory. This also includes - // specifying custom machine type following - // custom-NUMBER_OF_CPUS-AMOUNT_OF_MEMORY pattern. - MachineType string `json:"machineType,omitempty"` - - // MinCpuPlatform: Minimum cpu platform the allocation. - MinCpuPlatform string `json:"minCpuPlatform,omitempty"` - - // ForceSendFields is a list of field names (e.g. "GuestAccelerators") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "GuestAccelerators") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *AllocationSpecificSKUAllocationAllocatedInstanceProperties) MarshalJSON() ([]byte, error) { - type NoMethod AllocationSpecificSKUAllocationAllocatedInstanceProperties - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type AllocationSpecificSKUAllocationAllocatedInstancePropertiesAllocatedDisk struct { - // DiskSizeGb: Specifies the size of the disk in base-2 GB. - DiskSizeGb int64 `json:"diskSizeGb,omitempty,string"` - - // Interface: Specifies the disk interface to use for attaching this - // disk, which is either SCSI or NVME. The default is SCSI. For - // performance characteristics of SCSI over NVMe, see Local SSD - // performance. - // - // Possible values: - // "NVME" - // "SCSI" - Interface string `json:"interface,omitempty"` - - // ForceSendFields is a list of field names (e.g. "DiskSizeGb") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "DiskSizeGb") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *AllocationSpecificSKUAllocationAllocatedInstancePropertiesAllocatedDisk) MarshalJSON() ([]byte, error) { - type NoMethod AllocationSpecificSKUAllocationAllocatedInstancePropertiesAllocatedDisk - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type AllocationsScopedList struct { - // Allocations: A list of allocations contained in this scope. - Allocations []*Allocation `json:"allocations,omitempty"` - - // Warning: Informational warning which replaces the list of allocations - // when the list is empty. - Warning *AllocationsScopedListWarning `json:"warning,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Allocations") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Allocations") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *AllocationsScopedList) MarshalJSON() ([]byte, error) { - type NoMethod AllocationsScopedList - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// AllocationsScopedListWarning: Informational warning which replaces -// the list of allocations when the list is empty. -type AllocationsScopedListWarning struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. - // - // Possible values: - // "CLEANUP_FAILED" - // "DEPRECATED_RESOURCE_USED" - // "DEPRECATED_TYPE_USED" - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - // "EXPERIMENTAL_TYPE_USED" - // "EXTERNAL_API_WARNING" - // "FIELD_VALUE_OVERRIDEN" - // "INJECTED_KERNELS_DEPRECATED" - // "MISSING_TYPE_DEPENDENCY" - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - // "NEXT_HOP_CANNOT_IP_FORWARD" - // "NEXT_HOP_INSTANCE_NOT_FOUND" - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - // "NEXT_HOP_NOT_RUNNING" - // "NOT_CRITICAL_ERROR" - // "NO_RESULTS_ON_PAGE" - // "REQUIRED_TOS_AGREEMENT" - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - // "RESOURCE_NOT_DELETED" - // "SCHEMA_VALIDATION_IGNORED" - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - // "UNDECLARED_PROPERTIES" - // "UNREACHABLE" - Code string `json:"code,omitempty"` - - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: - // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*AllocationsScopedListWarningData `json:"data,omitempty"` - - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Code") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *AllocationsScopedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod AllocationsScopedListWarning - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type AllocationsScopedListWarningData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` - - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Key") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Key") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *AllocationsScopedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod AllocationsScopedListWarningData +func (s *AllocationSpecificSKUReservation) MarshalJSON() ([]byte, error) { + type NoMethod AllocationSpecificSKUReservation raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -2771,7 +2369,7 @@ type AttachedDisk struct { // the instance. // // If not specified, the server chooses a default device name to apply - // to this disk, in the form persistent-disks-x, where x is a number + // to this disk, in the form persistent-disk-x, where x is a number // assigned by Google Compute Engine. This field is only applicable for // persistent disks. DeviceName string `json:"deviceName,omitempty"` @@ -2929,11 +2527,24 @@ type AttachedDiskInitializeParams struct { // is the name of the disk type, not URL. DiskType string `json:"diskType,omitempty"` + // GuestOsFeatures: A list of features to enable on the guest operating + // system. Applicable only for bootable images. Read Enabling guest + // operating system features to see a list of available options. + // + // Guest OS features are applied by merging + // initializeParams.guestOsFeatures and disks.guestOsFeatures + GuestOsFeatures []*GuestOsFeature `json:"guestOsFeatures,omitempty"` + // Labels: Labels to apply to this disk. These can be later modified by // the disks.setLabels method. This field is only applicable for // persistent disks. Labels map[string]string `json:"labels,omitempty"` + // ResourcePolicies: Resource policies applied to this disk for + // automatic snapshot creations. Specified using the full or partial + // URL. For instance template, specify only the resource policy name. + ResourcePolicies []string `json:"resourcePolicies,omitempty"` + // SourceImage: The source image to create this disk. When creating a // new instance, one of initializeParams.sourceImage or disks.source is // required except for local SSD. @@ -2977,6 +2588,23 @@ type AttachedDiskInitializeParams struct { // the source images are encrypted with your own keys. SourceImageEncryptionKey *CustomerEncryptionKey `json:"sourceImageEncryptionKey,omitempty"` + // SourceSnapshot: The source snapshot to create this disk. When + // creating a new instance, one of initializeParams.sourceSnapshot or + // disks.source is required except for local SSD. + // + // To create a disk with a snapshot that you created, specify the + // snapshot name in the following + // format: + // global/snapshots/my-backup + // + // + // If the source snapshot is deleted later, this field will not be set. + SourceSnapshot string `json:"sourceSnapshot,omitempty"` + + // SourceSnapshotEncryptionKey: The customer-supplied encryption key of + // the source snapshot. + SourceSnapshotEncryptionKey *CustomerEncryptionKey `json:"sourceSnapshotEncryptionKey,omitempty"` + // ForceSendFields is a list of field names (e.g. "Description") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -3977,7 +3605,7 @@ func (s *AutoscalingPolicyCustomMetricUtilization) UnmarshalJSON(data []byte) er // of autoscaling based on load balancing. type AutoscalingPolicyLoadBalancingUtilization struct { // UtilizationTarget: Fraction of backend capacity utilization (set in - // HTTP(s) load balancing configuration) that autoscaler should + // HTTP(S) load balancing configuration) that autoscaler should // maintain. Must be a positive float value. If not defined, the default // is 0.8. UtilizationTarget float64 `json:"utilizationTarget,omitempty"` @@ -4050,6 +3678,11 @@ type Backend struct { // property when you create the resource. Description string `json:"description,omitempty"` + // Failover: This field designates whether this is a failover backend. + // More than one failover backend can be configured for a given + // BackendService. + Failover bool `json:"failover,omitempty"` + // Group: The fully-qualified URL of an Instance Group or Network // Endpoint Group resource. In case of instance group this defines the // list of instances that serve traffic. Member virtual machine @@ -4456,8 +4089,36 @@ type BackendService struct { // CdnPolicy: Cloud CDN configuration for this BackendService. CdnPolicy *BackendServiceCdnPolicy `json:"cdnPolicy,omitempty"` + // CircuitBreakers: Settings controlling the volume of connections to a + // backend service. + // + // This field is applicable to either: + // - A regional backend service with the service_protocol set to HTTP, + // HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. + // + // - A global backend service with the load_balancing_scheme set to + // INTERNAL_SELF_MANAGED. + CircuitBreakers *CircuitBreakers `json:"circuitBreakers,omitempty"` + ConnectionDraining *ConnectionDraining `json:"connectionDraining,omitempty"` + // ConsistentHash: Consistent Hash-based load balancing can be used to + // provide soft session affinity based on HTTP headers, cookies or other + // properties. This load balancing policy is applicable only for HTTP + // connections. The affinity to a particular destination host will be + // lost when one or more hosts are added/removed from the destination + // service. This field specifies parameters that control consistent + // hashing. This field is only applicable when localityLbPolicy is set + // to MAGLEV or RING_HASH. + // + // This field is applicable to either: + // - A regional backend service with the service_protocol set to HTTP, + // HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. + // + // - A global backend service with the load_balancing_scheme set to + // INTERNAL_SELF_MANAGED. + ConsistentHash *ConsistentHashLoadBalancerSettings `json:"consistentHash,omitempty"` + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -4475,6 +4136,8 @@ type BackendService struct { // When the load balancing scheme is INTERNAL, this field is not used. EnableCDN bool `json:"enableCDN,omitempty"` + FailoverPolicy *BackendServiceFailoverPolicy `json:"failoverPolicy,omitempty"` + // Fingerprint: Fingerprint of this resource. A hash of the contents // stored in this object. This field is used in optimistic locking. This // field will be ignored when inserting a BackendService. An up-to-date @@ -4514,9 +4177,54 @@ type BackendService struct { // Possible values: // "EXTERNAL" // "INTERNAL" + // "INTERNAL_MANAGED" + // "INTERNAL_SELF_MANAGED" // "INVALID_LOAD_BALANCING_SCHEME" LoadBalancingScheme string `json:"loadBalancingScheme,omitempty"` + // LocalityLbPolicy: The load balancing algorithm used within the scope + // of the locality. The possible values are: + // - ROUND_ROBIN: This is a simple policy in which each healthy backend + // is selected in round robin order. This is the default. + // - LEAST_REQUEST: An O(1) algorithm which selects two random healthy + // hosts and picks the host which has fewer active requests. + // - RING_HASH: The ring/modulo hash load balancer implements consistent + // hashing to backends. The algorithm has the property that the + // addition/removal of a host from a set of N hosts only affects 1/N of + // the requests. + // - RANDOM: The load balancer selects a random healthy host. + // - ORIGINAL_DESTINATION: Backend host is selected based on the client + // connection metadata, i.e., connections are opened to the same address + // as the destination address of the incoming connection before the + // connection was redirected to the load balancer. + // - MAGLEV: used as a drop in replacement for the ring hash load + // balancer. Maglev is not as stable as ring hash but has faster table + // lookup build times and host selection times. For more information + // about Maglev, refer to https://ai.google/research/pubs/pub44824 + // + // + // This field is applicable to either: + // - A regional backend service with the service_protocol set to HTTP, + // HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. + // + // - A global backend service with the load_balancing_scheme set to + // INTERNAL_SELF_MANAGED. + // + // Possible values: + // "INVALID_LB_POLICY" + // "LEAST_REQUEST" + // "MAGLEV" + // "ORIGINAL_DESTINATION" + // "RANDOM" + // "RING_HASH" + // "ROUND_ROBIN" + LocalityLbPolicy string `json:"localityLbPolicy,omitempty"` + + // LogConfig: This field denotes the logging options for the load + // balancer traffic served by this backend service. If logging is + // enabled, logs will be exported to Stackdriver. + LogConfig *BackendServiceLogConfig `json:"logConfig,omitempty"` + // Name: Name of the resource. Provided by the client when the resource // is created. The name must be 1-63 characters long, and comply with // RFC1035. Specifically, the name must be 1-63 characters long and @@ -4526,6 +4234,15 @@ type BackendService struct { // last character, which cannot be a dash. Name string `json:"name,omitempty"` + // OutlierDetection: Settings controlling eviction of unhealthy hosts + // from the load balancing pool. This field is applicable to either: + // - A regional backend service with the service_protocol set to HTTP, + // HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. + // + // - A global backend service with the load_balancing_scheme set to + // INTERNAL_SELF_MANAGED. + OutlierDetection *OutlierDetection `json:"outlierDetection,omitempty"` + // Port: Deprecated in favor of portName. The TCP port to connect on the // backend. The default value is 80. // @@ -4586,6 +4303,8 @@ type BackendService struct { // "CLIENT_IP_PORT_PROTO" // "CLIENT_IP_PROTO" // "GENERATED_COOKIE" + // "HEADER_FIELD" + // "HTTP_COOKIE" // "NONE" SessionAffinity string `json:"sessionAffinity,omitempty"` @@ -4822,6 +4541,81 @@ func (s *BackendServiceCdnPolicy) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type BackendServiceFailoverPolicy struct { + // DisableConnectionDrainOnFailover: On failover or failback, this field + // indicates whether connection drain will be honored. Setting this to + // true has the following effect: connections to the old active pool are + // not drained. Connections to the new active pool use the timeout of 10 + // min (currently fixed). Setting to false has the following effect: + // both old and new connections will have a drain timeout of 10 + // min. + // + // This can be set to true only if the protocol is TCP. + // + // The default is false. + DisableConnectionDrainOnFailover bool `json:"disableConnectionDrainOnFailover,omitempty"` + + // DropTrafficIfUnhealthy: This option is used only when no healthy VMs + // are detected in the primary and backup instance groups. When set to + // true, traffic is dropped. When set to false, new connections are sent + // across all VMs in the primary group. + // + // The default is false. + DropTrafficIfUnhealthy bool `json:"dropTrafficIfUnhealthy,omitempty"` + + // FailoverRatio: The value of the field must be in [0, 1]. If the ratio + // of the healthy VMs in the primary backend is at or below this number, + // traffic arriving at the load-balanced IP will be directed to the + // failover backend. + // + // In case where 'failoverRatio' is not set or all the VMs in the backup + // backend are unhealthy, the traffic will be directed back to the + // primary backend in the "force" mode, where traffic will be spread to + // the healthy VMs with the best effort, or to all VMs when no VM is + // healthy. + // + // This field is only used with l4 load balancing. + FailoverRatio float64 `json:"failoverRatio,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "DisableConnectionDrainOnFailover") to unconditionally include in API + // requests. By default, fields with empty values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. + // "DisableConnectionDrainOnFailover") to include in API requests with + // the JSON null value. By default, fields with empty values are omitted + // from API requests. However, any field with an empty value appearing + // in NullFields will be sent to the server as null. It is an error if a + // field in this list has a non-empty value. This may be used to include + // null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackendServiceFailoverPolicy) MarshalJSON() ([]byte, error) { + type NoMethod BackendServiceFailoverPolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +func (s *BackendServiceFailoverPolicy) UnmarshalJSON(data []byte) error { + type NoMethod BackendServiceFailoverPolicy + var s1 struct { + FailoverRatio gensupport.JSONFloat64 `json:"failoverRatio"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.FailoverRatio = float64(s1.FailoverRatio) + return nil +} + type BackendServiceGroupHealth struct { // HealthStatus: Health state of the backend instances or endpoints in // requested instance or network endpoint group, determined based on @@ -5050,6 +4844,57 @@ func (s *BackendServiceListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// BackendServiceLogConfig: The available logging options for the load +// balancer traffic served by this backend service. +type BackendServiceLogConfig struct { + // Enable: This field denotes whether to enable logging for the load + // balancer traffic served by this backend service. + Enable bool `json:"enable,omitempty"` + + // SampleRate: This field can only be specified if logging is enabled + // for this backend service. The value of the field must be in [0, 1]. + // This configures the sampling rate of requests to the load balancer + // where 1.0 means all logged requests are reported and 0.0 means no + // logged requests are reported. The default value is 1.0. + SampleRate float64 `json:"sampleRate,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Enable") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Enable") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackendServiceLogConfig) MarshalJSON() ([]byte, error) { + type NoMethod BackendServiceLogConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +func (s *BackendServiceLogConfig) UnmarshalJSON(data []byte) error { + type NoMethod BackendServiceLogConfig + var s1 struct { + SampleRate gensupport.JSONFloat64 `json:"sampleRate"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.SampleRate = float64(s1.SampleRate) + return nil +} + type BackendServiceReference struct { BackendService string `json:"backendService,omitempty"` @@ -5213,10 +5058,10 @@ func (s *BackendServicesScopedListWarningData) MarshalJSON() ([]byte, error) { // Binding: Associates `members` with a `role`. type Binding struct { - // Condition: Unimplemented. The condition that is associated with this - // binding. NOTE: an unsatisfied condition will not allow user access - // via current binding. Different bindings, including their conditions, - // are examined independently. + // Condition: The condition that is associated with this binding. NOTE: + // An unsatisfied condition will not allow user access via current + // binding. Different bindings, including their conditions, are examined + // independently. Condition *Expr `json:"condition,omitempty"` // Members: Specifies the identities requesting access for a Cloud @@ -5243,7 +5088,7 @@ type Binding struct { // // // - // * `domain:{domain}`: A Google Apps domain name that represents all + // * `domain:{domain}`: The G Suite domain (primary) that represents all // the users of that domain. For example, `google.com` or `example.com`. Members []string `json:"members,omitempty"` @@ -5357,6 +5202,58 @@ func (s *CacheKeyPolicy) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// CircuitBreakers: Settings controlling the volume of connections to a +// backend service. +type CircuitBreakers struct { + // ConnectTimeout: The timeout for new network connections to hosts. + ConnectTimeout *Duration `json:"connectTimeout,omitempty"` + + // MaxConnections: The maximum number of connections to the backend + // cluster. If not specified, the default is 1024. + MaxConnections int64 `json:"maxConnections,omitempty"` + + // MaxPendingRequests: The maximum number of pending requests allowed to + // the backend cluster. If not specified, the default is 1024. + MaxPendingRequests int64 `json:"maxPendingRequests,omitempty"` + + // MaxRequests: The maximum number of parallel requests that allowed to + // the backend cluster. If not specified, the default is 1024. + MaxRequests int64 `json:"maxRequests,omitempty"` + + // MaxRequestsPerConnection: Maximum requests for a single backend + // connection. This parameter is respected by both the HTTP/1.1 and + // HTTP/2 implementations. If not specified, there is no limit. Setting + // this parameter to 1 will effectively disable keep alive. + MaxRequestsPerConnection int64 `json:"maxRequestsPerConnection,omitempty"` + + // MaxRetries: The maximum number of parallel retries allowed to the + // backend cluster. If not specified, the default is 3. + MaxRetries int64 `json:"maxRetries,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ConnectTimeout") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ConnectTimeout") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *CircuitBreakers) MarshalJSON() ([]byte, error) { + type NoMethod CircuitBreakers + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Commitment: Represents a Commitment resource. Creating a Commitment // resource means that you are purchasing a committed use contract with // an explicit start and end time. You can create commitments based on @@ -5369,9 +5266,6 @@ func (s *CacheKeyPolicy) MarshalJSON() ([]byte, error) { // must purchase a new commitment to continue receiving discounts. (== // resource_for beta.commitments ==) (== resource_for v1.commitments ==) type Commitment struct { - // Allocations: List of allocations for this commitment. - Allocations []*Allocation `json:"allocations,omitempty"` - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -5415,6 +5309,9 @@ type Commitment struct { // used. Region string `json:"region,omitempty"` + // Reservations: List of reservations for this commitment. + Reservations []*Reservation `json:"reservations,omitempty"` + // Resources: A list of commitment amounts for particular resources. // Note that VCPU and MEMORY resource commitments must occur together. Resources []*ResourceCommitment `json:"resources,omitempty"` @@ -5445,20 +5342,21 @@ type Commitment struct { // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Allocations") to - // unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Allocations") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } @@ -5951,11 +5849,7 @@ type Condition struct { // "SERVICE" Sys string `json:"sys,omitempty"` - // Value: DEPRECATED. Use 'values' instead. - Value string `json:"value,omitempty"` - - // Values: The objects of the condition. This is mutually exclusive with - // 'value'. + // Values: The objects of the condition. Values []string `json:"values,omitempty"` // ForceSendFields is a list of field names (e.g. "Iam") to @@ -6012,6 +5906,155 @@ func (s *ConnectionDraining) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ConsistentHashLoadBalancerSettings: This message defines settings for +// a consistent hash style load balancer. +type ConsistentHashLoadBalancerSettings struct { + // HttpCookie: Hash is based on HTTP Cookie. This field describes a HTTP + // cookie that will be used as the hash key for the consistent hash load + // balancer. If the cookie is not present, it will be generated. This + // field is applicable if the sessionAffinity is set to HTTP_COOKIE. + HttpCookie *ConsistentHashLoadBalancerSettingsHttpCookie `json:"httpCookie,omitempty"` + + // HttpHeaderName: The hash based on the value of the specified header + // field. This field is applicable if the sessionAffinity is set to + // HEADER_FIELD. + HttpHeaderName string `json:"httpHeaderName,omitempty"` + + // MinimumRingSize: The minimum number of virtual nodes to use for the + // hash ring. Defaults to 1024. Larger ring sizes result in more + // granular load distributions. If the number of hosts in the load + // balancing pool is larger than the ring size, each host will be + // assigned a single virtual node. + MinimumRingSize int64 `json:"minimumRingSize,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. "HttpCookie") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "HttpCookie") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ConsistentHashLoadBalancerSettings) MarshalJSON() ([]byte, error) { + type NoMethod ConsistentHashLoadBalancerSettings + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ConsistentHashLoadBalancerSettingsHttpCookie: The information about +// the HTTP Cookie on which the hash function is based for load +// balancing policies that use a consistent hash. +type ConsistentHashLoadBalancerSettingsHttpCookie struct { + // Name: Name of the cookie. + Name string `json:"name,omitempty"` + + // Path: Path to set for the cookie. + Path string `json:"path,omitempty"` + + // Ttl: Lifetime of the cookie. + Ttl *Duration `json:"ttl,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Name") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Name") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ConsistentHashLoadBalancerSettingsHttpCookie) MarshalJSON() ([]byte, error) { + type NoMethod ConsistentHashLoadBalancerSettingsHttpCookie + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// CorsPolicy: The specification for allowing client side cross-origin +// requests. Please see W3C Recommendation for Cross Origin Resource +// Sharing +type CorsPolicy struct { + // AllowCredentials: In response to a preflight request, setting this to + // true indicates that the actual request can include user credentials. + // This translates to the Access-Control-Allow-Credentials + // header. + // Default is false. + AllowCredentials bool `json:"allowCredentials,omitempty"` + + // AllowHeaders: Specifies the content for the + // Access-Control-Allow-Headers header. + AllowHeaders []string `json:"allowHeaders,omitempty"` + + // AllowMethods: Specifies the content for the + // Access-Control-Allow-Methods header. + AllowMethods []string `json:"allowMethods,omitempty"` + + // AllowOriginRegexes: Specifies the regualar expression patterns that + // match allowed origins. For regular expression grammar please see + // en.cppreference.com/w/cpp/regex/ecmascript + // An origin is allowed if it matches either allow_origins or + // allow_origin_regex. + AllowOriginRegexes []string `json:"allowOriginRegexes,omitempty"` + + // AllowOrigins: Specifies the list of origins that will be allowed to + // do CORS requests. + // An origin is allowed if it matches either allow_origins or + // allow_origin_regex. + AllowOrigins []string `json:"allowOrigins,omitempty"` + + // Disabled: If true, specifies the CORS policy is disabled. The default + // value of false, which indicates that the CORS policy is in effect. + Disabled bool `json:"disabled,omitempty"` + + // ExposeHeaders: Specifies the content for the + // Access-Control-Expose-Headers header. + ExposeHeaders []string `json:"exposeHeaders,omitempty"` + + // MaxAge: Specifies how long the results of a preflight request can be + // cached. This translates to the content for the Access-Control-Max-Age + // header. + MaxAge int64 `json:"maxAge,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AllowCredentials") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AllowCredentials") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *CorsPolicy) MarshalJSON() ([]byte, error) { + type NoMethod CorsPolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // CustomerEncryptionKey: Represents a customer-supplied encryption key type CustomerEncryptionKey struct { // KmsKeyName: The name of the encryption key that is stored in Google @@ -6123,14 +6166,16 @@ type DeprecationStatus struct { // resource as the deprecated resource. Replacement string `json:"replacement,omitempty"` - // State: The deprecation state of this resource. This can be - // DEPRECATED, OBSOLETE, or DELETED. Operations which create a new - // resource using a DEPRECATED resource will return successfully, but - // with a warning indicating the deprecated resource and recommending - // its replacement. Operations which use OBSOLETE or DELETED resources - // will be rejected and result in an error. + // State: The deprecation state of this resource. This can be ACTIVE, + // DEPRECATED, OBSOLETE, or DELETED. Operations which communicate the + // end of life date for an image, can use ACTIVE. Operations which + // create a new resource using a DEPRECATED resource will return + // successfully, but with a warning indicating the deprecated resource + // and recommending its replacement. Operations which use OBSOLETE or + // DELETED resources will be rejected and result in an error. // // Possible values: + // "ACTIVE" // "DELETED" // "DEPRECATED" // "OBSOLETE" @@ -6175,8 +6220,8 @@ type Disk struct { // // After you encrypt a disk with a customer-supplied key, you must // provide the same key if you use the disk later (e.g. to create a disk - // snapshot or an image, or to attach the disk to a virtual - // machine). + // snapshot, to create a disk image, to create a machine image, or to + // attach the disk to a virtual machine). // // Customer-supplied encryption keys do not protect access to metadata // of the disk. @@ -6348,6 +6393,7 @@ type Disk struct { // // Possible values: // "CREATING" + // "DELETING" // "FAILED" // "READY" // "RESTORING" @@ -6362,11 +6408,11 @@ type Disk struct { // Type: URL of the disk type resource describing which disk type to use // to create the disk. Provide this when creating the disk. For example: - // project/zones/zone/diskTypes/pd-standard or pd-ssd + // projects/project/zones/zone/diskTypes/pd-standard or pd-ssd Type string `json:"type,omitempty"` // Users: [Output Only] Links to the users of the disk (attached - // instances) in form: project/zones/zone/instances/instance + // instances) in form: projects/project/zones/zone/instances/instance Users []string `json:"users,omitempty"` // Zone: [Output Only] URL of the zone where the disk resides. You must @@ -7640,6 +7686,45 @@ func (s *DistributionPolicyZoneConfiguration) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// Duration: A Duration represents a fixed-length span of time +// represented as a count of seconds and fractions of seconds at +// nanosecond resolution. It is independent of any calendar and concepts +// like "day" or "month". Range is approximately 10,000 years. +type Duration struct { + // Nanos: Span of time that's a fraction of a second at nanosecond + // resolution. Durations less than one second are represented with a 0 + // `seconds` field and a positive `nanos` field. Must be from 0 to + // 999,999,999 inclusive. + Nanos int64 `json:"nanos,omitempty"` + + // Seconds: Span of time at a resolution of a second. Must be from 0 to + // 315,576,000,000 inclusive. Note: these bounds are computed from: 60 + // sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + Seconds int64 `json:"seconds,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. "Nanos") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Nanos") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Duration) MarshalJSON() ([]byte, error) { + type NoMethod Duration + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type ExchangedPeeringRoute struct { // DestRange: The destination range of the route. DestRange string `json:"destRange,omitempty"` @@ -7892,67 +7977,51 @@ func (s *Expr) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Firewall: Represents a Firewall resource. -type Firewall struct { - // Allowed: The list of ALLOW rules specified by this firewall. Each - // rule specifies a protocol and port-range tuple that describes a - // permitted connection. - Allowed []*FirewallAllowed `json:"allowed,omitempty"` - +// ExternalVpnGateway: External VPN gateway is the on-premises VPN +// gateway(s) or another cloud provider?s VPN gateway that connects to +// your Google Cloud VPN gateway. To create a highly available VPN from +// Google Cloud to your on-premises side or another Cloud provider's VPN +// gateway, you must create a external VPN gateway resource in GCP, +// which provides the information to GCP about your external VPN +// gateway. +type ExternalVpnGateway struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` - // Denied: The list of DENY rules specified by this firewall. Each rule - // specifies a protocol and port-range tuple that describes a denied - // connection. - Denied []*FirewallDenied `json:"denied,omitempty"` - // Description: An optional description of this resource. Provide this // property when you create the resource. Description string `json:"description,omitempty"` - // DestinationRanges: If destination ranges are specified, the firewall - // will apply only to traffic that has destination IP address in these - // ranges. These ranges must be expressed in CIDR format. Only IPv4 is - // supported. - DestinationRanges []string `json:"destinationRanges,omitempty"` - - // Direction: Direction of traffic to which this firewall applies; - // default is INGRESS. Note: For INGRESS traffic, it is NOT supported to - // specify destinationRanges; For EGRESS traffic, it is NOT supported to - // specify sourceRanges OR sourceTags. - // - // Possible values: - // "EGRESS" - // "INGRESS" - Direction string `json:"direction,omitempty"` - - // Disabled: Denotes whether the firewall rule is disabled, i.e not - // applied to the network it is associated with. When set to true, the - // firewall rule is not enforced and the network behaves as if it did - // not exist. If this is unspecified, the firewall rule will be enabled. - Disabled bool `json:"disabled,omitempty"` - - // EnableLogging: Deprecated in favor of enable in LogConfig. This field - // denotes whether to enable logging for a particular firewall rule. If - // logging is enabled, logs will be exported to Stackdriver. - EnableLogging bool `json:"enableLogging,omitempty"` - // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` - // Kind: [Output Only] Type of the resource. Always compute#firewall for - // firewall rules. + // Interfaces: List of interfaces for this external VPN gateway. + Interfaces []*ExternalVpnGatewayInterface `json:"interfaces,omitempty"` + + // Kind: [Output Only] Type of the resource. Always + // compute#externalVpnGateway for externalVpnGateways. Kind string `json:"kind,omitempty"` - // LogConfig: This field denotes the logging options for a particular - // firewall rule. If logging is enabled, logs will be exported to - // Stackdriver. - LogConfig *FirewallLogConfig `json:"logConfig,omitempty"` + // LabelFingerprint: A fingerprint for the labels being applied to this + // ExternalVpnGateway, which is essentially a hash of the labels set + // used for optimistic locking. The fingerprint is initially generated + // by Compute Engine and changes after every request to modify or update + // labels. You must always provide an up-to-date fingerprint hash in + // order to update or change labels, otherwise the request will fail + // with error 412 conditionNotMet. + // + // To see the latest fingerprint, make a get() request to retrieve an + // ExternalVpnGateway. + LabelFingerprint string `json:"labelFingerprint,omitempty"` - // Name: Name of the resource; provided by the client when the resource + // Labels: Labels to apply to this ExternalVpnGateway resource. These + // can be later modified by the setLabels method. Each label key/value + // must comply with RFC1035. Label values may be empty. + Labels map[string]string `json:"labels,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource // is created. The name must be 1-63 characters long, and comply with // RFC1035. Specifically, the name must be 1-63 characters long and // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means @@ -7961,126 +8030,62 @@ type Firewall struct { // last character, which cannot be a dash. Name string `json:"name,omitempty"` - // Network: URL of the network resource for this firewall rule. If not - // specified when creating a firewall rule, the default network is - // used: - // global/networks/default - // If you choose to specify this property, you can specify the network - // as a full or partial URL. For example, the following are all valid - // URLs: - // - - // https://www.googleapis.com/compute/v1/projects/myproject/global/networks/my-network - // - projects/myproject/global/networks/my-network - // - global/networks/default - Network string `json:"network,omitempty"` - - // Priority: Priority for this rule. This is an integer between 0 and - // 65535, both inclusive. When not specified, the value assumed is 1000. - // Relative priorities determine precedence of conflicting rules. Lower - // value of priority implies higher precedence (eg, a rule with priority - // 0 has higher precedence than a rule with priority 1). DENY rules take - // precedence over ALLOW rules having equal priority. - Priority int64 `json:"priority,omitempty"` + // RedundancyType: Indicates the user-supplied redundancy type of this + // external VPN gateway. + // + // Possible values: + // "FOUR_IPS_REDUNDANCY" + // "SINGLE_IP_INTERNALLY_REDUNDANT" + // "TWO_IPS_REDUNDANCY" + RedundancyType string `json:"redundancyType,omitempty"` // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // SourceRanges: If source ranges are specified, the firewall will apply - // only to traffic that has source IP address in these ranges. These - // ranges must be expressed in CIDR format. One or both of sourceRanges - // and sourceTags may be set. If both properties are set, the firewall - // will apply to traffic that has source IP address within sourceRanges - // OR the source IP that belongs to a tag listed in the sourceTags - // property. The connection does not need to match both properties for - // the firewall to apply. Only IPv4 is supported. - SourceRanges []string `json:"sourceRanges,omitempty"` - - // SourceServiceAccounts: If source service accounts are specified, the - // firewall will apply only to traffic originating from an instance with - // a service account in this list. Source service accounts cannot be - // used to control traffic to an instance's external IP address because - // service accounts are associated with an instance, not an IP address. - // sourceRanges can be set at the same time as sourceServiceAccounts. If - // both are set, the firewall will apply to traffic that has source IP - // address within sourceRanges OR the source IP belongs to an instance - // with service account listed in sourceServiceAccount. The connection - // does not need to match both properties for the firewall to apply. - // sourceServiceAccounts cannot be used at the same time as sourceTags - // or targetTags. - SourceServiceAccounts []string `json:"sourceServiceAccounts,omitempty"` - - // SourceTags: If source tags are specified, the firewall rule applies - // only to traffic with source IPs that match the primary network - // interfaces of VM instances that have the tag and are in the same VPC - // network. Source tags cannot be used to control traffic to an - // instance's external IP address, it only applies to traffic between - // instances in the same virtual network. Because tags are associated - // with instances, not IP addresses. One or both of sourceRanges and - // sourceTags may be set. If both properties are set, the firewall will - // apply to traffic that has source IP address within sourceRanges OR - // the source IP that belongs to a tag listed in the sourceTags - // property. The connection does not need to match both properties for - // the firewall to apply. - SourceTags []string `json:"sourceTags,omitempty"` - - // TargetServiceAccounts: A list of service accounts indicating sets of - // instances located in the network that may make network connections as - // specified in allowed[]. targetServiceAccounts cannot be used at the - // same time as targetTags or sourceTags. If neither - // targetServiceAccounts nor targetTags are specified, the firewall rule - // applies to all instances on the specified network. - TargetServiceAccounts []string `json:"targetServiceAccounts,omitempty"` - - // TargetTags: A list of tags that controls which instances the firewall - // rule applies to. If targetTags are specified, then the firewall rule - // applies only to instances in the VPC network that have one of those - // tags. If no targetTags are specified, the firewall rule applies to - // all instances on the specified network. - TargetTags []string `json:"targetTags,omitempty"` - // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Allowed") to - // unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Allowed") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *Firewall) MarshalJSON() ([]byte, error) { - type NoMethod Firewall +func (s *ExternalVpnGateway) MarshalJSON() ([]byte, error) { + type NoMethod ExternalVpnGateway raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type FirewallAllowed struct { - // IPProtocol: The IP protocol to which this rule applies. The protocol - // type is required when creating a firewall rule. This value can either - // be one of the following well known protocol strings (tcp, udp, icmp, - // esp, ah, ipip, sctp), or the IP protocol number. - IPProtocol string `json:"IPProtocol,omitempty"` +// ExternalVpnGatewayInterface: The interface for the external VPN +// gateway. +type ExternalVpnGatewayInterface struct { + // Id: The numeric ID of this interface. The allowed input values for + // this id for different redundancy types of external VPN gateway: + // SINGLE_IP_INTERNALLY_REDUNDANT - 0 TWO_IPS_REDUNDANCY - 0, 1 + // FOUR_IPS_REDUNDANCY - 0, 1, 2, 3 + Id int64 `json:"id,omitempty"` - // Ports: An optional list of ports to which this rule applies. This - // field is only applicable for UDP or TCP protocol. Each entry must be - // either an integer or a range. If not specified, this rule applies to - // connections through any port. - // - // Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. - Ports []string `json:"ports,omitempty"` + // IpAddress: IP address of the interface in the external VPN gateway. + // Only IPv4 is supported. This IP address can be either from your + // on-premise gateway or another Cloud provider?s VPN gateway, it cannot + // be an IP address from Google Compute Engine. + IpAddress string `json:"ipAddress,omitempty"` - // ForceSendFields is a list of field names (e.g. "IPProtocol") to + // ForceSendFields is a list of field names (e.g. "Id") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -8088,8 +8093,8 @@ type FirewallAllowed struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "IPProtocol") to include in - // API requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -8097,61 +8102,26 @@ type FirewallAllowed struct { NullFields []string `json:"-"` } -func (s *FirewallAllowed) MarshalJSON() ([]byte, error) { - type NoMethod FirewallAllowed +func (s *ExternalVpnGatewayInterface) MarshalJSON() ([]byte, error) { + type NoMethod ExternalVpnGatewayInterface raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type FirewallDenied struct { - // IPProtocol: The IP protocol to which this rule applies. The protocol - // type is required when creating a firewall rule. This value can either - // be one of the following well known protocol strings (tcp, udp, icmp, - // esp, ah, ipip, sctp), or the IP protocol number. - IPProtocol string `json:"IPProtocol,omitempty"` - - // Ports: An optional list of ports to which this rule applies. This - // field is only applicable for UDP or TCP protocol. Each entry must be - // either an integer or a range. If not specified, this rule applies to - // connections through any port. - // - // Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. - Ports []string `json:"ports,omitempty"` - - // ForceSendFields is a list of field names (e.g. "IPProtocol") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "IPProtocol") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *FirewallDenied) MarshalJSON() ([]byte, error) { - type NoMethod FirewallDenied - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} +// ExternalVpnGatewayList: Response to the list request, and contains a +// list of externalVpnGateways. +type ExternalVpnGatewayList struct { + Etag string `json:"etag,omitempty"` -// FirewallList: Contains a list of firewalls. -type FirewallList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of Firewall resources. - Items []*Firewall `json:"items,omitempty"` + // Items: A list of ExternalVpnGateway resources. + Items []*ExternalVpnGateway `json:"items,omitempty"` - // Kind: [Output Only] Type of resource. Always compute#firewallList for - // lists of firewalls. + // Kind: [Output Only] Type of resource. Always + // compute#externalVpnGatewayList for lists of externalVpnGateways. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -8166,13 +8136,13 @@ type FirewallList struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *FirewallListWarning `json:"warning,omitempty"` + Warning *ExternalVpnGatewayListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "Etag") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -8180,7 +8150,7 @@ type FirewallList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API + // NullFields is a list of field names (e.g. "Etag") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -8189,14 +8159,419 @@ type FirewallList struct { NullFields []string `json:"-"` } -func (s *FirewallList) MarshalJSON() ([]byte, error) { - type NoMethod FirewallList +func (s *ExternalVpnGatewayList) MarshalJSON() ([]byte, error) { + type NoMethod ExternalVpnGatewayList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// FirewallListWarning: [Output Only] Informational warning message. -type FirewallListWarning struct { +// ExternalVpnGatewayListWarning: [Output Only] Informational warning +// message. +type ExternalVpnGatewayListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*ExternalVpnGatewayListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ExternalVpnGatewayListWarning) MarshalJSON() ([]byte, error) { + type NoMethod ExternalVpnGatewayListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ExternalVpnGatewayListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ExternalVpnGatewayListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod ExternalVpnGatewayListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Firewall: Represents a Firewall resource. +type Firewall struct { + // Allowed: The list of ALLOW rules specified by this firewall. Each + // rule specifies a protocol and port-range tuple that describes a + // permitted connection. + Allowed []*FirewallAllowed `json:"allowed,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Denied: The list of DENY rules specified by this firewall. Each rule + // specifies a protocol and port-range tuple that describes a denied + // connection. + Denied []*FirewallDenied `json:"denied,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // DestinationRanges: If destination ranges are specified, the firewall + // will apply only to traffic that has destination IP address in these + // ranges. These ranges must be expressed in CIDR format. Only IPv4 is + // supported. + DestinationRanges []string `json:"destinationRanges,omitempty"` + + // Direction: Direction of traffic to which this firewall applies; + // default is INGRESS. Note: For INGRESS traffic, it is NOT supported to + // specify destinationRanges; For EGRESS traffic, it is NOT supported to + // specify sourceRanges OR sourceTags. + // + // Possible values: + // "EGRESS" + // "INGRESS" + Direction string `json:"direction,omitempty"` + + // Disabled: Denotes whether the firewall rule is disabled, i.e not + // applied to the network it is associated with. When set to true, the + // firewall rule is not enforced and the network behaves as if it did + // not exist. If this is unspecified, the firewall rule will be enabled. + Disabled bool `json:"disabled,omitempty"` + + // EnableLogging: Deprecated in favor of enable in LogConfig. This field + // denotes whether to enable logging for a particular firewall rule. If + // logging is enabled, logs will be exported to Stackdriver. + EnableLogging bool `json:"enableLogging,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always compute#firewall for + // firewall rules. + Kind string `json:"kind,omitempty"` + + // LogConfig: This field denotes the logging options for a particular + // firewall rule. If logging is enabled, logs will be exported to + // Stackdriver. + LogConfig *FirewallLogConfig `json:"logConfig,omitempty"` + + // Name: Name of the resource; provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // Network: URL of the network resource for this firewall rule. If not + // specified when creating a firewall rule, the default network is + // used: + // global/networks/default + // If you choose to specify this property, you can specify the network + // as a full or partial URL. For example, the following are all valid + // URLs: + // - + // https://www.googleapis.com/compute/v1/projects/myproject/global/networks/my-network + // - projects/myproject/global/networks/my-network + // - global/networks/default + Network string `json:"network,omitempty"` + + // Priority: Priority for this rule. This is an integer between 0 and + // 65535, both inclusive. When not specified, the value assumed is 1000. + // Relative priorities determine precedence of conflicting rules. Lower + // value of priority implies higher precedence (eg, a rule with priority + // 0 has higher precedence than a rule with priority 1). DENY rules take + // precedence over ALLOW rules having equal priority. + Priority int64 `json:"priority,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // SourceRanges: If source ranges are specified, the firewall will apply + // only to traffic that has source IP address in these ranges. These + // ranges must be expressed in CIDR format. One or both of sourceRanges + // and sourceTags may be set. If both properties are set, the firewall + // will apply to traffic that has source IP address within sourceRanges + // OR the source IP that belongs to a tag listed in the sourceTags + // property. The connection does not need to match both properties for + // the firewall to apply. Only IPv4 is supported. + SourceRanges []string `json:"sourceRanges,omitempty"` + + // SourceServiceAccounts: If source service accounts are specified, the + // firewall will apply only to traffic originating from an instance with + // a service account in this list. Source service accounts cannot be + // used to control traffic to an instance's external IP address because + // service accounts are associated with an instance, not an IP address. + // sourceRanges can be set at the same time as sourceServiceAccounts. If + // both are set, the firewall will apply to traffic that has source IP + // address within sourceRanges OR the source IP belongs to an instance + // with service account listed in sourceServiceAccount. The connection + // does not need to match both properties for the firewall to apply. + // sourceServiceAccounts cannot be used at the same time as sourceTags + // or targetTags. + SourceServiceAccounts []string `json:"sourceServiceAccounts,omitempty"` + + // SourceTags: If source tags are specified, the firewall rule applies + // only to traffic with source IPs that match the primary network + // interfaces of VM instances that have the tag and are in the same VPC + // network. Source tags cannot be used to control traffic to an + // instance's external IP address, it only applies to traffic between + // instances in the same virtual network. Because tags are associated + // with instances, not IP addresses. One or both of sourceRanges and + // sourceTags may be set. If both properties are set, the firewall will + // apply to traffic that has source IP address within sourceRanges OR + // the source IP that belongs to a tag listed in the sourceTags + // property. The connection does not need to match both properties for + // the firewall to apply. + SourceTags []string `json:"sourceTags,omitempty"` + + // TargetServiceAccounts: A list of service accounts indicating sets of + // instances located in the network that may make network connections as + // specified in allowed[]. targetServiceAccounts cannot be used at the + // same time as targetTags or sourceTags. If neither + // targetServiceAccounts nor targetTags are specified, the firewall rule + // applies to all instances on the specified network. + TargetServiceAccounts []string `json:"targetServiceAccounts,omitempty"` + + // TargetTags: A list of tags that controls which instances the firewall + // rule applies to. If targetTags are specified, then the firewall rule + // applies only to instances in the VPC network that have one of those + // tags. If no targetTags are specified, the firewall rule applies to + // all instances on the specified network. + TargetTags []string `json:"targetTags,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Allowed") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Allowed") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Firewall) MarshalJSON() ([]byte, error) { + type NoMethod Firewall + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type FirewallAllowed struct { + // IPProtocol: The IP protocol to which this rule applies. The protocol + // type is required when creating a firewall rule. This value can either + // be one of the following well known protocol strings (tcp, udp, icmp, + // esp, ah, ipip, sctp), or the IP protocol number. + IPProtocol string `json:"IPProtocol,omitempty"` + + // Ports: An optional list of ports to which this rule applies. This + // field is only applicable for UDP or TCP protocol. Each entry must be + // either an integer or a range. If not specified, this rule applies to + // connections through any port. + // + // Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. + Ports []string `json:"ports,omitempty"` + + // ForceSendFields is a list of field names (e.g. "IPProtocol") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "IPProtocol") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *FirewallAllowed) MarshalJSON() ([]byte, error) { + type NoMethod FirewallAllowed + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type FirewallDenied struct { + // IPProtocol: The IP protocol to which this rule applies. The protocol + // type is required when creating a firewall rule. This value can either + // be one of the following well known protocol strings (tcp, udp, icmp, + // esp, ah, ipip, sctp), or the IP protocol number. + IPProtocol string `json:"IPProtocol,omitempty"` + + // Ports: An optional list of ports to which this rule applies. This + // field is only applicable for UDP or TCP protocol. Each entry must be + // either an integer or a range. If not specified, this rule applies to + // connections through any port. + // + // Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. + Ports []string `json:"ports,omitempty"` + + // ForceSendFields is a list of field names (e.g. "IPProtocol") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "IPProtocol") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *FirewallDenied) MarshalJSON() ([]byte, error) { + type NoMethod FirewallDenied + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// FirewallList: Contains a list of firewalls. +type FirewallList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of Firewall resources. + Items []*Firewall `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#firewallList for + // lists of firewalls. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *FirewallListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *FirewallList) MarshalJSON() ([]byte, error) { + type NoMethod FirewallList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// FirewallListWarning: [Output Only] Informational warning message. +type FirewallListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -8332,7 +8707,7 @@ type FixedOrPercent struct { // based on the specific mode. // // - // - If the value is fixed, then the caculated value is equal to the + // - If the value is fixed, then the calculated value is equal to the // fixed value. // - If the value is a percent, then the calculated value is percent/100 // * targetSize. For example, the calculated value of a 80% of a managed @@ -8444,6 +8819,13 @@ type ForwardingRule struct { // forwarded to the backends configured with this forwarding rule. AllPorts bool `json:"allPorts,omitempty"` + // AllowGlobalAccess: This field is used along with the backend_service + // field for internal load balancing or with the target field for + // internal TargetInstance. If the field is set to TRUE, clients can + // access ILB from all regions. Otherwise only allows access from + // clients in the same region as the internal load balancer. + AllowGlobalAccess bool `json:"allowGlobalAccess,omitempty"` + // BackendService: This field is only used for INTERNAL load // balancing. // @@ -8459,6 +8841,16 @@ type ForwardingRule struct { // property when you create the resource. Description string `json:"description,omitempty"` + // Fingerprint: Fingerprint of this resource. A hash of the contents + // stored in this object. This field is used in optimistic locking. This + // field will be ignored when inserting a ForwardingRule. Include the + // fingerprint in patch request to ensure that you do not overwrite + // changes that were applied from another concurrent request. + // + // To see the latest fingerprint, make a get() request to retrieve a + // ForwardingRule. + Fingerprint string `json:"fingerprint,omitempty"` + // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` @@ -8506,9 +8898,28 @@ type ForwardingRule struct { // Possible values: // "EXTERNAL" // "INTERNAL" + // "INTERNAL_MANAGED" + // "INTERNAL_SELF_MANAGED" // "INVALID" LoadBalancingScheme string `json:"loadBalancingScheme,omitempty"` + // MetadataFilters: Opaque filter criteria used by Loadbalancer to + // restrict routing configuration to a limited set xDS compliant + // clients. In their xDS requests to Loadbalancer, xDS clients present + // node metadata. If a match takes place, the relevant routing + // configuration is made available to those proxies. + // For each metadataFilter in this list, if its filterMatchCriteria is + // set to MATCH_ANY, at least one of the filterLabels must match the + // corresponding label provided in the metadata. If its + // filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels + // must match with corresponding labels in the provided + // metadata. + // metadataFilters specified here can be overridden by those specified + // in the UrlMap that this ForwardingRule references. + // metadataFilters only applies to Loadbalancers that have their + // loadBalancingScheme set to INTERNAL_SELF_MANAGED. + MetadataFilters []*MetadataFilter `json:"metadataFilters,omitempty"` + // Name: Name of the resource; provided by the client when the resource // is created. The name must be 1-63 characters long, and comply with // RFC1035. Specifically, the name must be 1-63 characters long and @@ -8618,8 +9029,8 @@ type ForwardingRule struct { // same region as the forwarding rule. For global forwarding rules, this // target must be a global load balancing resource. The forwarded // traffic must be of a type appropriate to the target object. For - // INTERNAL_SELF_MANAGED" load balancing, only HTTP and HTTPS targets - // are valid. + // INTERNAL_SELF_MANAGED load balancing, only HTTP and HTTPS targets are + // valid. Target string `json:"target,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -9638,6 +10049,10 @@ type HealthCheck struct { // last character, which cannot be a dash. Name string `json:"name,omitempty"` + // Region: [Output Only] Region where the health check resides. Not + // applicable to global health checks. + Region string `json:"region,omitempty"` + // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` @@ -9650,10 +10065,10 @@ type HealthCheck struct { // greater value than checkIntervalSec. TimeoutSec int64 `json:"timeoutSec,omitempty"` - // Type: Specifies the type of the healthCheck, either TCP, SSL, HTTP or - // HTTPS. If not specified, the default is TCP. Exactly one of the - // protocol-specific health check field must be specified, which must - // match type field. + // Type: Specifies the type of the healthCheck, either TCP, SSL, HTTP, + // HTTPS or HTTP2. If not specified, the default is TCP. Exactly one of + // the protocol-specific health check field must be specified, which + // must match type field. // // Possible values: // "HTTP" @@ -9883,6 +10298,293 @@ func (s *HealthCheckReference) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type HealthChecksAggregatedList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of HealthChecksScopedList resources. + Items map[string]HealthChecksScopedList `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *HealthChecksAggregatedListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HealthChecksAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod HealthChecksAggregatedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// HealthChecksAggregatedListWarning: [Output Only] Informational +// warning message. +type HealthChecksAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*HealthChecksAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HealthChecksAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod HealthChecksAggregatedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type HealthChecksAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HealthChecksAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod HealthChecksAggregatedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type HealthChecksScopedList struct { + // HealthChecks: A list of HealthChecks contained in this scope. + HealthChecks []*HealthCheck `json:"healthChecks,omitempty"` + + // Warning: Informational warning which replaces the list of backend + // services when the list is empty. + Warning *HealthChecksScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "HealthChecks") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "HealthChecks") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HealthChecksScopedList) MarshalJSON() ([]byte, error) { + type NoMethod HealthChecksScopedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// HealthChecksScopedListWarning: Informational warning which replaces +// the list of backend services when the list is empty. +type HealthChecksScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*HealthChecksScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HealthChecksScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod HealthChecksScopedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type HealthChecksScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HealthChecksScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod HealthChecksScopedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type HealthStatus struct { // HealthState: Health state of the instance. // @@ -10010,6 +10712,313 @@ func (s *HostRule) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// HttpFaultAbort: Specification for how requests are aborted as part of +// fault injection. +type HttpFaultAbort struct { + // HttpStatus: The HTTP status code used to abort the request. + // The value must be between 200 and 599 inclusive. + HttpStatus int64 `json:"httpStatus,omitempty"` + + // Percentage: The percentage of traffic + // (connections/operations/requests) which will be aborted as part of + // fault injection. + // The value must be between 0.0 and 100.0 inclusive. + Percentage float64 `json:"percentage,omitempty"` + + // ForceSendFields is a list of field names (e.g. "HttpStatus") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "HttpStatus") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HttpFaultAbort) MarshalJSON() ([]byte, error) { + type NoMethod HttpFaultAbort + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +func (s *HttpFaultAbort) UnmarshalJSON(data []byte) error { + type NoMethod HttpFaultAbort + var s1 struct { + Percentage gensupport.JSONFloat64 `json:"percentage"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.Percentage = float64(s1.Percentage) + return nil +} + +// HttpFaultDelay: Specifies the delay introduced by Loadbalancer before +// forwarding the request to the backend service as part of fault +// injection. +type HttpFaultDelay struct { + // FixedDelay: Specifies the value of the fixed delay interval. + FixedDelay *Duration `json:"fixedDelay,omitempty"` + + // Percentage: The percentage of traffic + // (connections/operations/requests) on which delay will be introduced + // as part of fault injection. + // The value must be between 0.0 and 100.0 inclusive. + Percentage float64 `json:"percentage,omitempty"` + + // ForceSendFields is a list of field names (e.g. "FixedDelay") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "FixedDelay") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HttpFaultDelay) MarshalJSON() ([]byte, error) { + type NoMethod HttpFaultDelay + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +func (s *HttpFaultDelay) UnmarshalJSON(data []byte) error { + type NoMethod HttpFaultDelay + var s1 struct { + Percentage gensupport.JSONFloat64 `json:"percentage"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.Percentage = float64(s1.Percentage) + return nil +} + +// HttpFaultInjection: The specification for fault injection introduced +// into traffic to test the resiliency of clients to backend service +// failure. As part of fault injection, when clients send requests to a +// backend service, delays can be introduced by Loadbalancer on a +// percentage of requests before sending those request to the backend +// service. Similarly requests from clients can be aborted by the +// Loadbalancer for a percentage of requests. +type HttpFaultInjection struct { + // Abort: The specification for how client requests are aborted as part + // of fault injection. + Abort *HttpFaultAbort `json:"abort,omitempty"` + + // Delay: The specification for how client requests are delayed as part + // of fault injection, before being sent to a backend service. + Delay *HttpFaultDelay `json:"delay,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Abort") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Abort") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HttpFaultInjection) MarshalJSON() ([]byte, error) { + type NoMethod HttpFaultInjection + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// HttpHeaderAction: The request and response header transformations +// that take effect before the request is passed along to the selected +// backendService. +type HttpHeaderAction struct { + // RequestHeadersToAdd: Headers to add to a matching request prior to + // forwarding the request to the backendService. + RequestHeadersToAdd []*HttpHeaderOption `json:"requestHeadersToAdd,omitempty"` + + // RequestHeadersToRemove: A list of header names for headers that need + // to be removed from the request prior to forwarding the request to the + // backendService. + RequestHeadersToRemove []string `json:"requestHeadersToRemove,omitempty"` + + // ResponseHeadersToAdd: Headers to add the response prior to sending + // the response back to the client. + ResponseHeadersToAdd []*HttpHeaderOption `json:"responseHeadersToAdd,omitempty"` + + // ResponseHeadersToRemove: A list of header names for headers that need + // to be removed from the response prior to sending the response back to + // the client. + ResponseHeadersToRemove []string `json:"responseHeadersToRemove,omitempty"` + + // ForceSendFields is a list of field names (e.g. "RequestHeadersToAdd") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "RequestHeadersToAdd") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *HttpHeaderAction) MarshalJSON() ([]byte, error) { + type NoMethod HttpHeaderAction + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// HttpHeaderMatch: matchRule criteria for request header matches. +type HttpHeaderMatch struct { + // ExactMatch: The value should exactly match contents of + // exactMatch. + // Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, + // presentMatch or rangeMatch must be set. + ExactMatch string `json:"exactMatch,omitempty"` + + // HeaderName: The name of the HTTP header to match. + // For matching against the HTTP request's authority, use a headerMatch + // with the header name ":authority". + // For matching a request's method, use the headerName ":method". + HeaderName string `json:"headerName,omitempty"` + + // InvertMatch: If set to false, the headerMatch is considered a match + // if the match criteria above are met. If set to true, the headerMatch + // is considered a match if the match criteria above are NOT met. + // The default setting is false. + InvertMatch bool `json:"invertMatch,omitempty"` + + // PrefixMatch: The value of the header must start with the contents of + // prefixMatch. + // Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, + // presentMatch or rangeMatch must be set. + PrefixMatch string `json:"prefixMatch,omitempty"` + + // PresentMatch: A header with the contents of headerName must exist. + // The match takes place whether or not the request's header has a value + // or not. + // Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, + // presentMatch or rangeMatch must be set. + PresentMatch bool `json:"presentMatch,omitempty"` + + // RangeMatch: The header value must be an integer and its value must be + // in the range specified in rangeMatch. If the header does not contain + // an integer, number or is empty, the match fails. + // For example for a range [-5, 0] + // - -3 will match. + // - 0 will not match. + // - 0.25 will not match. + // - -3someString will not match. + // Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, + // presentMatch or rangeMatch must be set. + RangeMatch *Int64RangeMatch `json:"rangeMatch,omitempty"` + + // RegexMatch: The value of the header must match the regualar + // expression specified in regexMatch. For regular expression grammar, + // please see: en.cppreference.com/w/cpp/regex/ecmascript + // For matching against a port specified in the HTTP request, use a + // headerMatch with headerName set to PORT and a regular expression that + // satisfies the RFC2616 Host header's port specifier. + // Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, + // presentMatch or rangeMatch must be set. + RegexMatch string `json:"regexMatch,omitempty"` + + // SuffixMatch: The value of the header must end with the contents of + // suffixMatch. + // Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, + // presentMatch or rangeMatch must be set. + SuffixMatch string `json:"suffixMatch,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ExactMatch") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ExactMatch") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HttpHeaderMatch) MarshalJSON() ([]byte, error) { + type NoMethod HttpHeaderMatch + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// HttpHeaderOption: Specification determining how headers are added to +// requests or responses. +type HttpHeaderOption struct { + // HeaderName: The name of the header. + HeaderName string `json:"headerName,omitempty"` + + // HeaderValue: The value of the header to add. + HeaderValue string `json:"headerValue,omitempty"` + + // Replace: If false, headerValue is appended to any values that already + // exist for the header. If true, headerValue is set for the header, + // discarding any values that were set for that header. + // The default value is false. + Replace bool `json:"replace,omitempty"` + + // ForceSendFields is a list of field names (e.g. "HeaderName") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "HeaderName") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HttpHeaderOption) MarshalJSON() ([]byte, error) { + type NoMethod HttpHeaderOption + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // HttpHealthCheck: An HttpHealthCheck resource. This resource defines a // template for how individual instances should be checked for health, // via HTTP. @@ -10256,73 +11265,108 @@ func (s *HttpHealthCheckListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// HttpsHealthCheck: An HttpsHealthCheck resource. This resource defines -// a template for how individual instances should be checked for health, -// via HTTPS. -type HttpsHealthCheck struct { - // CheckIntervalSec: How often (in seconds) to send a health check. The - // default value is 5 seconds. - CheckIntervalSec int64 `json:"checkIntervalSec,omitempty"` - - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` +// HttpQueryParameterMatch: HttpRouteRuleMatch criteria for a request's +// query parameter. +type HttpQueryParameterMatch struct { + // ExactMatch: The queryParameterMatch matches if the value of the + // parameter exactly matches the contents of exactMatch. + // Only one of presentMatch, exactMatch and regexMatch must be set. + ExactMatch string `json:"exactMatch,omitempty"` - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` + // Name: The name of the query parameter to match. The query parameter + // must exist in the request, in the absence of which the request match + // fails. + Name string `json:"name,omitempty"` - // HealthyThreshold: A so-far unhealthy instance will be marked healthy - // after this many consecutive successes. The default value is 2. - HealthyThreshold int64 `json:"healthyThreshold,omitempty"` + // PresentMatch: Specifies that the queryParameterMatch matches if the + // request contains the query parameter, irrespective of whether the + // parameter has a value or not. + // Only one of presentMatch, exactMatch and regexMatch must be set. + PresentMatch bool `json:"presentMatch,omitempty"` - // Host: The value of the host header in the HTTPS health check request. - // If left empty (default value), the public IP on behalf of which this - // health check is performed will be used. - Host string `json:"host,omitempty"` + // RegexMatch: The queryParameterMatch matches if the value of the + // parameter matches the regular expression specified by regexMatch. For + // the regular expression grammar, please see + // en.cppreference.com/w/cpp/regex/ecmascript + // Only one of presentMatch, exactMatch and regexMatch must be set. + RegexMatch string `json:"regexMatch,omitempty"` - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` + // ForceSendFields is a list of field names (e.g. "ExactMatch") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // Kind: Type of the resource. - Kind string `json:"kind,omitempty"` + // NullFields is a list of field names (e.g. "ExactMatch") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` +func (s *HttpQueryParameterMatch) MarshalJSON() ([]byte, error) { + type NoMethod HttpQueryParameterMatch + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} - // Port: The TCP port number for the HTTPS health check request. The - // default value is 443. - Port int64 `json:"port,omitempty"` +// HttpRedirectAction: Specifies settings for an HTTP redirect. +type HttpRedirectAction struct { + // HostRedirect: The host that will be used in the redirect response + // instead of the one that was supplied in the request. + // The value must be between 1 and 255 characters. + HostRedirect string `json:"hostRedirect,omitempty"` - // RequestPath: The request path of the HTTPS health check request. The - // default value is "/". - RequestPath string `json:"requestPath,omitempty"` + // HttpsRedirect: If set to true, the URL scheme in the redirected + // request is set to https. If set to false, the URL scheme of the + // redirected request will remain the same as that of the request. + // This must only be set for UrlMaps used in TargetHttpProxys. Setting + // this true for TargetHttpsProxy is not permitted. + // The default is set to false. + HttpsRedirect bool `json:"httpsRedirect,omitempty"` - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` + // PathRedirect: The path that will be used in the redirect response + // instead of the one that was supplied in the request. + // Only one of pathRedirect or prefixRedirect must be specified. + // The value must be between 1 and 1024 characters. + PathRedirect string `json:"pathRedirect,omitempty"` - // TimeoutSec: How long (in seconds) to wait before claiming failure. - // The default value is 5 seconds. It is invalid for timeoutSec to have - // a greater value than checkIntervalSec. - TimeoutSec int64 `json:"timeoutSec,omitempty"` + // PrefixRedirect: The prefix that replaces the prefixMatch specified in + // the HttpRouteRuleMatch, retaining the remaining portion of the URL + // before redirecting the request. + PrefixRedirect string `json:"prefixRedirect,omitempty"` - // UnhealthyThreshold: A so-far healthy instance will be marked - // unhealthy after this many consecutive failures. The default value is - // 2. - UnhealthyThreshold int64 `json:"unhealthyThreshold,omitempty"` + // RedirectResponseCode: The HTTP Status code to use for this + // RedirectAction. + // Supported values are: + // - MOVED_PERMANENTLY_DEFAULT, which is the default value and + // corresponds to 301. + // - FOUND, which corresponds to 302. + // - SEE_OTHER which corresponds to 303. + // - TEMPORARY_REDIRECT, which corresponds to 307. In this case, the + // request method will be retained. + // - PERMANENT_REDIRECT, which corresponds to 308. In this case, the + // request method will be retained. + // + // Possible values: + // "FOUND" + // "MOVED_PERMANENTLY_DEFAULT" + // "PERMANENT_REDIRECT" + // "SEE_OTHER" + // "TEMPORARY_REDIRECT" + RedirectResponseCode string `json:"redirectResponseCode,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // StripQuery: If set to true, any accompanying query portion of the + // original URL is removed prior to redirecting the request. If set to + // false, the query portion of the original URL is retained. + // The default is set to false. + StripQuery bool `json:"stripQuery,omitempty"` - // ForceSendFields is a list of field names (e.g. "CheckIntervalSec") to + // ForceSendFields is a list of field names (e.g. "HostRedirect") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -10330,33 +11374,400 @@ type HttpsHealthCheck struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CheckIntervalSec") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "HostRedirect") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *HttpsHealthCheck) MarshalJSON() ([]byte, error) { - type NoMethod HttpsHealthCheck +func (s *HttpRedirectAction) MarshalJSON() ([]byte, error) { + type NoMethod HttpRedirectAction raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// HttpsHealthCheckList: Contains a list of HttpsHealthCheck resources. -type HttpsHealthCheckList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` +// HttpRetryPolicy: The retry policy associates with HttpRouteRule +type HttpRetryPolicy struct { + // NumRetries: Specifies the allowed number retries. This number must be + // > 0. + NumRetries int64 `json:"numRetries,omitempty"` - // Items: A list of HttpsHealthCheck resources. - Items []*HttpsHealthCheck `json:"items,omitempty"` + // PerTryTimeout: Specifies a non-zero timeout per retry attempt. + PerTryTimeout *Duration `json:"perTryTimeout,omitempty"` - // Kind: Type of resource. - Kind string `json:"kind,omitempty"` + // RetryConditions: Specfies one or more conditions when this retry rule + // applies. Valid values are: + // - 5xx: Loadbalancer will attempt a retry if the backend service + // responds with any 5xx response code, or if the backend service does + // not respond at all, example: disconnects, reset, read timeout, + // connection failure, and refused streams. + // - gateway-error: Similar to 5xx, but only applies to response codes + // 502, 503 or 504. + // - + // - connect-failure: Loadbalancer will retry on failures connecting to + // backend services, for example due to connection timeouts. + // - retriable-4xx: Loadbalancer will retry for retriable 4xx response + // codes. Currently the only retriable error supported is 409. + // - refused-stream:Loadbalancer will retry if the backend service + // resets the stream with a REFUSED_STREAM error code. This reset type + // indicates that it is safe to retry. + // - cancelledLoadbalancer will retry if the gRPC status code in the + // response header is set to cancelled + // - deadline-exceeded: Loadbalancer will retry if the gRPC status code + // in the response header is set to deadline-exceeded + // - resource-exhausted: Loadbalancer will retry if the gRPC status code + // in the response header is set to resource-exhausted + // - unavailable: Loadbalancer will retry if the gRPC status code in the + // response header is set to unavailable + RetryConditions []string `json:"retryConditions,omitempty"` + + // ForceSendFields is a list of field names (e.g. "NumRetries") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NumRetries") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HttpRetryPolicy) MarshalJSON() ([]byte, error) { + type NoMethod HttpRetryPolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type HttpRouteAction struct { + // CorsPolicy: The specification for allowing client side cross-origin + // requests. Please see W3C Recommendation for Cross Origin Resource + // Sharing + CorsPolicy *CorsPolicy `json:"corsPolicy,omitempty"` + + // FaultInjectionPolicy: The specification for fault injection + // introduced into traffic to test the resiliency of clients to backend + // service failure. As part of fault injection, when clients send + // requests to a backend service, delays can be introduced by + // Loadbalancer on a percentage of requests before sending those request + // to the backend service. Similarly requests from clients can be + // aborted by the Loadbalancer for a percentage of requests. + // timeout and retry_policy will be ignored by clients that are + // configured with a fault_injection_policy. + FaultInjectionPolicy *HttpFaultInjection `json:"faultInjectionPolicy,omitempty"` + + // RequestMirrorPolicy: Specifies the policy on how requests intended + // for the route's backends are shadowed to a separate mirrored backend + // service. Loadbalancer does not wait for responses from the shadow + // service. Prior to sending traffic to the shadow service, the host / + // authority header is suffixed with -shadow. + RequestMirrorPolicy *RequestMirrorPolicy `json:"requestMirrorPolicy,omitempty"` + + // RetryPolicy: Specifies the retry policy associated with this route. + RetryPolicy *HttpRetryPolicy `json:"retryPolicy,omitempty"` + + // Timeout: Specifies the timeout for the selected route. Timeout is + // computed from the time the request is has been fully processed (i.e. + // end-of-stream) up until the response has been completely processed. + // Timeout includes all retries. + // If not specified, the default value is 15 seconds. + Timeout *Duration `json:"timeout,omitempty"` + + // UrlRewrite: The spec to modify the URL of the request, prior to + // forwarding the request to the matched service + UrlRewrite *UrlRewrite `json:"urlRewrite,omitempty"` + + // WeightedBackendServices: A list of weighted backend services to send + // traffic to when a route match occurs. The weights determine the + // fraction of traffic that flows to their corresponding backend + // service. If all traffic needs to go to a single backend service, + // there must be one weightedBackendService with weight set to a non 0 + // number. + // Once a backendService is identified and before forwarding the request + // to the backend service, advanced routing actions like Url rewrites + // and header transformations are applied depending on additional + // settings specified in this HttpRouteAction. + WeightedBackendServices []*WeightedBackendService `json:"weightedBackendServices,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CorsPolicy") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CorsPolicy") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HttpRouteAction) MarshalJSON() ([]byte, error) { + type NoMethod HttpRouteAction + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// HttpRouteRule: An HttpRouteRule specifies how to match an HTTP +// request and the corresponding routing action that load balancing +// proxies will perform. +type HttpRouteRule struct { + // HeaderAction: Specifies changes to request and response headers that + // need to take effect for the selected backendService. + // The headerAction specified here are applied before the matching + // pathMatchers[].headerAction and after + // pathMatchers[].routeRules[].routeAction.weightedBackendService.backend + // ServiceWeightAction[].headerAction + HeaderAction *HttpHeaderAction `json:"headerAction,omitempty"` + + MatchRules []*HttpRouteRuleMatch `json:"matchRules,omitempty"` + + // RouteAction: In response to a matching matchRule, the load balancer + // performs advanced routing actions like URL rewrites, header + // transformations, etc. prior to forwarding the request to the selected + // backend. If routeAction specifies any weightedBackendServices, + // service must not be set. Conversely if service is set, routeAction + // cannot contain any weightedBackendServices. + // Only one of routeAction or urlRedirect must be set. + RouteAction *HttpRouteAction `json:"routeAction,omitempty"` + + // Service: The full or partial URL of the backend service resource to + // which traffic is directed if this rule is matched. If routeAction is + // additionally specified, advanced routing actions like URL Rewrites, + // etc. take effect prior to sending the request to the backend. + // However, if service is specified, routeAction cannot contain any + // weightedBackendService s. Conversely, if routeAction specifies any + // weightedBackendServices, service must not be specified. + // Only one of urlRedirect, service or + // routeAction.weightedBackendService must be set. + Service string `json:"service,omitempty"` + + // UrlRedirect: When this rule is matched, the request is redirected to + // a URL specified by urlRedirect. + // If urlRedirect is specified, service or routeAction must not be set. + UrlRedirect *HttpRedirectAction `json:"urlRedirect,omitempty"` + + // ForceSendFields is a list of field names (e.g. "HeaderAction") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "HeaderAction") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HttpRouteRule) MarshalJSON() ([]byte, error) { + type NoMethod HttpRouteRule + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// HttpRouteRuleMatch: HttpRouteRuleMatch specifies a set of criteria +// for matching requests to an HttpRouteRule. All specified criteria +// must be satisfied for a match to occur. +type HttpRouteRuleMatch struct { + // FullPathMatch: For satifying the matchRule condition, the path of the + // request must exactly match the value specified in fullPathMatch after + // removing any query parameters and anchor that may be part of the + // original URL. + // FullPathMatch must be between 1 and 1024 characters. + // Only one of prefixMatch, fullPathMatch or regexMatch must be + // specified. + FullPathMatch string `json:"fullPathMatch,omitempty"` + + // HeaderMatches: Specifies a list of header match criteria, all of + // which must match corresponding headers in the request. + HeaderMatches []*HttpHeaderMatch `json:"headerMatches,omitempty"` + + // IgnoreCase: Specifies that prefixMatch and fullPathMatch matches are + // case sensitive. + // The default value is false. + // caseSensitive must not be used with regexMatch. + IgnoreCase bool `json:"ignoreCase,omitempty"` + + // MetadataFilters: Opaque filter criteria used by Loadbalancer to + // restrict routing configuration to a limited set xDS compliant + // clients. In their xDS requests to Loadbalancer, xDS clients present + // node metadata. If a match takes place, the relevant routing + // configuration is made available to those proxies. + // For each metadataFilter in this list, if its filterMatchCriteria is + // set to MATCH_ANY, at least one of the filterLabels must match the + // corresponding label provided in the metadata. If its + // filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels + // must match with corresponding labels in the provided + // metadata. + // metadataFilters specified here can be overrides those specified in + // ForwardingRule that refers to this UrlMap. + // metadataFilters only applies to Loadbalancers that have their + // loadBalancingScheme set to INTERNAL_SELF_MANAGED. + MetadataFilters []*MetadataFilter `json:"metadataFilters,omitempty"` + + // PrefixMatch: For satifying the matchRule condition, the request's + // path must begin with the specified prefixMatch. prefixMatch must + // begin with a /. + // The value must be between 1 and 1024 characters. + // Only one of prefixMatch, fullPathMatch or regexMatch must be + // specified. + PrefixMatch string `json:"prefixMatch,omitempty"` + + // QueryParameterMatches: Specifies a list of query parameter match + // criteria, all of which must match corresponding query parameters in + // the request. + QueryParameterMatches []*HttpQueryParameterMatch `json:"queryParameterMatches,omitempty"` + + // RegexMatch: For satifying the matchRule condition, the path of the + // request must satisfy the regular expression specified in regexMatch + // after removing any query parameters and anchor supplied with the + // original URL. For regular expression grammar please see + // en.cppreference.com/w/cpp/regex/ecmascript + // Only one of prefixMatch, fullPathMatch or regexMatch must be + // specified. + RegexMatch string `json:"regexMatch,omitempty"` + + // ForceSendFields is a list of field names (e.g. "FullPathMatch") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "FullPathMatch") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HttpRouteRuleMatch) MarshalJSON() ([]byte, error) { + type NoMethod HttpRouteRuleMatch + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// HttpsHealthCheck: An HttpsHealthCheck resource. This resource defines +// a template for how individual instances should be checked for health, +// via HTTPS. +type HttpsHealthCheck struct { + // CheckIntervalSec: How often (in seconds) to send a health check. The + // default value is 5 seconds. + CheckIntervalSec int64 `json:"checkIntervalSec,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // HealthyThreshold: A so-far unhealthy instance will be marked healthy + // after this many consecutive successes. The default value is 2. + HealthyThreshold int64 `json:"healthyThreshold,omitempty"` + + // Host: The value of the host header in the HTTPS health check request. + // If left empty (default value), the public IP on behalf of which this + // health check is performed will be used. + Host string `json:"host,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: Type of the resource. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // Port: The TCP port number for the HTTPS health check request. The + // default value is 443. + Port int64 `json:"port,omitempty"` + + // RequestPath: The request path of the HTTPS health check request. The + // default value is "/". + RequestPath string `json:"requestPath,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // TimeoutSec: How long (in seconds) to wait before claiming failure. + // The default value is 5 seconds. It is invalid for timeoutSec to have + // a greater value than checkIntervalSec. + TimeoutSec int64 `json:"timeoutSec,omitempty"` + + // UnhealthyThreshold: A so-far healthy instance will be marked + // unhealthy after this many consecutive failures. The default value is + // 2. + UnhealthyThreshold int64 `json:"unhealthyThreshold,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CheckIntervalSec") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CheckIntervalSec") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *HttpsHealthCheck) MarshalJSON() ([]byte, error) { + type NoMethod HttpsHealthCheck + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// HttpsHealthCheckList: Contains a list of HttpsHealthCheck resources. +type HttpsHealthCheckList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of HttpsHealthCheck resources. + Items []*HttpsHealthCheck `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next // page of results for list requests. If the number of results is larger @@ -10667,11 +12078,16 @@ type Image struct { // Possible values are FAILED, PENDING, or READY. // // Possible values: + // "DELETING" // "FAILED" // "PENDING" // "READY" Status string `json:"status,omitempty"` + // StorageLocations: GCS bucket storage location of the image (regional + // or multi-regional). + StorageLocations []string `json:"storageLocations,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -10711,8 +12127,9 @@ type ImageRawDisk struct { // "TAR" ContainerType string `json:"containerType,omitempty"` - // Sha1Checksum: An optional SHA1 checksum of the disk image before - // unpackaging; provided by the client when the disk image is created. + // Sha1Checksum: [Deprecated] This field is deprecated. An optional SHA1 + // checksum of the disk image before unpackaging provided by the client + // when the disk image is created. Sha1Checksum string `json:"sha1Checksum,omitempty"` // Source: The full Google Cloud Storage URL where the disk image is @@ -10900,10 +12317,6 @@ func (s *ImageListWarningData) MarshalJSON() ([]byte, error) { // Instance: An Instance resource. (== resource_for beta.instances ==) // (== resource_for v1.instances ==) type Instance struct { - // AllocationAffinity: The configuration of desired allocations which - // this Instance could consume capacity from. - AllocationAffinity *AllocationAffinity `json:"allocationAffinity,omitempty"` - // CanIpForward: Allows this instance to send and receive packets with // non-matching destination or source IPs. This is required if you plan // to use this instance to forward routes. For more information, see @@ -11010,6 +12423,10 @@ type Instance struct { // Multiple interfaces are supported per instance. NetworkInterfaces []*NetworkInterface `json:"networkInterfaces,omitempty"` + // ReservationAffinity: The configuration of desired reservations from + // which this Instance can consume capacity from. + ReservationAffinity *ReservationAffinity `json:"reservationAffinity,omitempty"` + // Scheduling: Sets the scheduling options for this instance. Scheduling *Scheduling `json:"scheduling,omitempty"` @@ -11025,6 +12442,10 @@ type Instance struct { // instance. See Service Accounts for more information. ServiceAccounts []*ServiceAccount `json:"serviceAccounts,omitempty"` + ShieldedInstanceConfig *ShieldedInstanceConfig `json:"shieldedInstanceConfig,omitempty"` + + ShieldedInstanceIntegrityPolicy *ShieldedInstanceIntegrityPolicy `json:"shieldedInstanceIntegrityPolicy,omitempty"` + ShieldedVmConfig *ShieldedVmConfig `json:"shieldedVmConfig,omitempty"` ShieldedVmIntegrityPolicy *ShieldedVmIntegrityPolicy `json:"shieldedVmIntegrityPolicy,omitempty"` @@ -11039,6 +12460,7 @@ type Instance struct { // // Possible values: // "PROVISIONING" + // "REPAIRING" // "RUNNING" // "STAGING" // "STOPPED" @@ -11068,21 +12490,20 @@ type Instance struct { // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "AllocationAffinity") - // to unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "CanIpForward") to + // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AllocationAffinity") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "CanIpForward") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } @@ -11728,9 +13149,12 @@ type InstanceGroupManager struct { // complementary to this Instance Group Manager. NamedPorts []*NamedPort `json:"namedPorts,omitempty"` - // PendingActions: [Output Only] The list of instance actions and the - // number of instances in this managed instance group that are pending - // for each of those actions. + // PendingActions: [Deprecated] This field is deprecated and will be + // removed. Prefer using the status field instead. Please contact + // cloud-updater-feedback@google.com to leave feedback if your workload + // relies on this field. [Output Only] The list of instance actions and + // the number of instances in this managed instance group that are + // pending for each of those actions. PendingActions *InstanceGroupManagerPendingActionsSummary `json:"pendingActions,omitempty"` // Region: [Output Only] The URL of the region where the managed @@ -11767,12 +13191,12 @@ type InstanceGroupManager struct { // Versions: Specifies the instance templates used by this managed // instance group to create instances. // - // Each version is defined by an instanceTemplate. Every template can - // appear at most once per instance group. This field overrides the - // top-level instanceTemplate field. Read more about the relationships - // between these fields. Exactly one version must leave the targetSize - // field unset. That version will be applied to all remaining instances. - // For more information, read about canary updates. + // Each version is defined by an instanceTemplate and a name. Every + // version can appear at most once per instance group. This field + // overrides the top-level instanceTemplate field. Read more about the + // relationships between these fields. Exactly one version must leave + // the targetSize field unset. That version will be applied to all + // remaining instances. For more information, read about canary updates. Versions []*InstanceGroupManagerVersion `json:"versions,omitempty"` // Zone: [Output Only] The URL of the zone where the managed instance @@ -12238,20 +13662,32 @@ func (s *InstanceGroupManagerListWarningData) MarshalJSON() ([]byte, error) { } type InstanceGroupManagerPendingActionsSummary struct { - // Creating: [Output Only] The number of instances in the managed - // instance group that are pending to be created. + // Creating: [Deprecated] This field is deprecated and will be removed. + // Prefer using the status field instead. Please contact + // cloud-updater-feedback@google.com to leave feedback if your workload + // relies on this field. [Output Only] The number of instances in the + // managed instance group that are pending to be created. Creating int64 `json:"creating,omitempty"` - // Deleting: [Output Only] The number of instances in the managed - // instance group that are pending to be deleted. + // Deleting: [Deprecated] This field is deprecated and will be removed. + // Prefer using the status field instead. Please contact + // cloud-updater-feedback@google.com to leave feedback if your workload + // relies on this field. [Output Only] The number of instances in the + // managed instance group that are pending to be deleted. Deleting int64 `json:"deleting,omitempty"` - // Recreating: [Output Only] The number of instances in the managed - // instance group that are pending to be recreated. + // Recreating: [Deprecated] This field is deprecated and will be + // removed. Prefer using the status field instead. Please contact + // cloud-updater-feedback@google.com to leave feedback if your workload + // relies on this field. [Output Only] The number of instances in the + // managed instance group that are pending to be recreated. Recreating int64 `json:"recreating,omitempty"` - // Restarting: [Output Only] The number of instances in the managed - // instance group that are pending to be restarted. + // Restarting: [Deprecated] This field is deprecated and will be + // removed. Prefer using the status field instead. Please contact + // cloud-updater-feedback@google.com to leave feedback if your workload + // relies on this field. [Output Only] The number of instances in the + // managed instance group that are pending to be restarted. Restarting int64 `json:"restarting,omitempty"` // ForceSendFields is a list of field names (e.g. "Creating") to @@ -12286,6 +13722,11 @@ type InstanceGroupManagerStatus struct { // group; and the managed instance group itself is not being modified. IsStable bool `json:"isStable,omitempty"` + // VersionTarget: [Output Only] A status of consistency of Instances' + // versions with their target version specified by version field on + // Instance Group Manager. + VersionTarget *InstanceGroupManagerStatusVersionTarget `json:"versionTarget,omitempty"` + // ForceSendFields is a list of field names (e.g. "IsStable") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -12309,7 +13750,42 @@ func (s *InstanceGroupManagerStatus) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type InstanceGroupManagerStatusVersionTarget struct { + // IsReached: [Output Only] A bit indicating whether version target has + // been reached in this managed instance group, i.e. all instances are + // in their target version. Instances' target version are specified by + // version field on Instance Group Manager. + IsReached bool `json:"isReached,omitempty"` + + // ForceSendFields is a list of field names (e.g. "IsReached") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "IsReached") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupManagerStatusVersionTarget) MarshalJSON() ([]byte, error) { + type NoMethod InstanceGroupManagerStatusVersionTarget + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type InstanceGroupManagerUpdatePolicy struct { + // Possible values: + // "NONE" + // "PROACTIVE" + InstanceRedistributionType string `json:"instanceRedistributionType,omitempty"` + // MaxSurge: The maximum number of instances that can be created above // the specified targetSize during the update process. By default, a // fixed value of 1 is used. This value can be either a fixed number or @@ -12363,20 +13839,22 @@ type InstanceGroupManagerUpdatePolicy struct { // "PROACTIVE" Type string `json:"type,omitempty"` - // ForceSendFields is a list of field names (e.g. "MaxSurge") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. + // "InstanceRedistributionType") to unconditionally include in API + // requests. By default, fields with empty values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "MaxSurge") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. + // "InstanceRedistributionType") to include in API requests with the + // JSON null value. By default, fields with empty values are omitted + // from API requests. However, any field with an empty value appearing + // in NullFields will be sent to the server as null. It is an error if a + // field in this list has a non-empty value. This may be used to include + // null fields in Patch requests. NullFields []string `json:"-"` } @@ -12387,6 +13865,10 @@ func (s *InstanceGroupManagerUpdatePolicy) MarshalJSON() ([]byte, error) { } type InstanceGroupManagerVersion struct { + // InstanceTemplate: The URL of the instance template that is specified + // for this managed instance group. The group uses this template to + // create new instances in the managed instance group until the + // `targetSize` for this version is reached. InstanceTemplate string `json:"instanceTemplate,omitempty"` // Name: Name of the version. Unique among all versions in the scope of @@ -12458,6 +13940,54 @@ func (s *InstanceGroupManagersAbandonInstancesRequest) MarshalJSON() ([]byte, er return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// InstanceGroupManagersApplyUpdatesRequest: +// InstanceGroupManagers.applyUpdatesToInstances +type InstanceGroupManagersApplyUpdatesRequest struct { + // Instances: The list of URLs of one or more instances for which we + // want to apply updates on this managed instance group. This can be a + // full URL or a partial URL, such as + // zones/[ZONE]/instances/[INSTANCE_NAME]. + Instances []string `json:"instances,omitempty"` + + // MinimalAction: The minimal action that should be perfomed on the + // instances. By default NONE. + // + // Possible values: + // "REPLACE" + // "RESTART" + MinimalAction string `json:"minimalAction,omitempty"` + + // MostDisruptiveAllowedAction: The most disruptive action that allowed + // to be performed on the instances. By default REPLACE. + // + // Possible values: + // "REPLACE" + // "RESTART" + MostDisruptiveAllowedAction string `json:"mostDisruptiveAllowedAction,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Instances") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Instances") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupManagersApplyUpdatesRequest) MarshalJSON() ([]byte, error) { + type NoMethod InstanceGroupManagersApplyUpdatesRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type InstanceGroupManagersDeleteInstancesRequest struct { // Instances: The URLs of one or more instances to delete. This can be a // full URL or a partial URL, such as @@ -13616,10 +15146,6 @@ func (s *InstanceMoveRequest) MarshalJSON() ([]byte, error) { } type InstanceProperties struct { - // AllocationAffinity: The configuration of desired allocations which - // this Instance could consume capacity from. - AllocationAffinity *AllocationAffinity `json:"allocationAffinity,omitempty"` - // CanIpForward: Enables instances created based on this template to // send packets with source IP addresses other than their own and // receive packets with destination IP addresses other than their own. @@ -13637,6 +15163,10 @@ type InstanceProperties struct { // are created from this template. Disks []*AttachedDisk `json:"disks,omitempty"` + // DisplayDevice: Display Device properties to enable support for remote + // display products like: Teradici, VNC and TeamViewer + DisplayDevice *DisplayDevice `json:"displayDevice,omitempty"` + // GuestAccelerators: A list of guest accelerator cards' type and count // to use for instances created from the instance template. GuestAccelerators []*AcceleratorConfig `json:"guestAccelerators,omitempty"` @@ -13667,6 +15197,10 @@ type InstanceProperties struct { // interface. NetworkInterfaces []*NetworkInterface `json:"networkInterfaces,omitempty"` + // ReservationAffinity: The configuration of desired reservations which + // this Instance could consume capacity from. + ReservationAffinity *ReservationAffinity `json:"reservationAffinity,omitempty"` + // Scheduling: Specifies the scheduling options for the instances that // are created from this template. Scheduling *Scheduling `json:"scheduling,omitempty"` @@ -13677,6 +15211,8 @@ type InstanceProperties struct { // to obtain the access tokens for these instances. ServiceAccounts []*ServiceAccount `json:"serviceAccounts,omitempty"` + ShieldedInstanceConfig *ShieldedInstanceConfig `json:"shieldedInstanceConfig,omitempty"` + // ShieldedVmConfig: Specifies the Shielded VM options for the instances // that are created from this template. ShieldedVmConfig *ShieldedVmConfig `json:"shieldedVmConfig,omitempty"` @@ -13687,21 +15223,20 @@ type InstanceProperties struct { // within the list must comply with RFC1035. Tags *Tags `json:"tags,omitempty"` - // ForceSendFields is a list of field names (e.g. "AllocationAffinity") - // to unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "CanIpForward") to + // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AllocationAffinity") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "CanIpForward") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } @@ -13981,6 +15516,7 @@ type InstanceWithNamedPorts struct { // // Possible values: // "PROVISIONING" + // "REPAIRING" // "RUNNING" // "STAGING" // "STOPPED" @@ -14372,6 +15908,40 @@ func (s *InstancesStartWithEncryptionKeyRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// Int64RangeMatch: HttpRouteRuleMatch criteria for field values that +// must stay within the specified integer range. +type Int64RangeMatch struct { + // RangeEnd: The end of the range (exclusive) in signed long integer + // format. + RangeEnd int64 `json:"rangeEnd,omitempty,string"` + + // RangeStart: The start of the range (inclusive) in signed long integer + // format. + RangeStart int64 `json:"rangeStart,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. "RangeEnd") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "RangeEnd") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Int64RangeMatch) MarshalJSON() ([]byte, error) { + type NoMethod Int64RangeMatch + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Interconnect: Represents an Interconnects resource. The Interconnects // resource is a dedicated connection between Google's network and your // on-premises network. For more information, see the Dedicated @@ -14409,7 +15979,7 @@ type Interconnect struct { // side of the Interconnect link. This can be used only for ping tests. GoogleIpAddress string `json:"googleIpAddress,omitempty"` - // GoogleReferenceId: [Output Only] Google reference ID; to be used when + // GoogleReferenceId: [Output Only] Google reference ID to be used when // raising support tickets with Google or otherwise to debug backend // connectivity issues. GoogleReferenceId string `json:"googleReferenceId,omitempty"` @@ -14422,8 +15992,13 @@ type Interconnect struct { // InterconnectAttachments configured to use this Interconnect. InterconnectAttachments []string `json:"interconnectAttachments,omitempty"` - // InterconnectType: Type of interconnect. Note that "IT_PRIVATE" has - // been deprecated in favor of "DEDICATED" + // InterconnectType: Type of interconnect, which can take one of the + // following values: + // - PARTNER: A partner-managed interconnection shared between customers + // though a partner. + // - DEDICATED: A dedicated physical interconnection with the customer. + // Note that a value IT_PRIVATE has been deprecated in favor of + // DEDICATED. // // Possible values: // "DEDICATED" @@ -14452,11 +16027,15 @@ type Interconnect struct { // comply with RFC1035. Label values may be empty. Labels map[string]string `json:"labels,omitempty"` - // LinkType: Type of link requested. This field indicates speed of each - // of the links in the bundle, not the entire bundle. Only 10G per link - // is allowed for a dedicated interconnect. Options: Ethernet_10G_LR + // LinkType: Type of link requested, which can take one of the following + // values: + // - LINK_TYPE_ETHERNET_10G_LR: A 10G Ethernet with LR optics + // - LINK_TYPE_ETHERNET_100G_LR: A 100G Ethernet with LR optics. Note + // that this field indicates the speed of each of the links in the + // bundle, not the speed of the entire bundle. // // Possible values: + // "LINK_TYPE_ETHERNET_100G_LR" // "LINK_TYPE_ETHERNET_10G_LR" LinkType string `json:"linkType,omitempty"` @@ -14480,8 +16059,16 @@ type Interconnect struct { // Notifications. NocContactEmail string `json:"nocContactEmail,omitempty"` - // OperationalStatus: [Output Only] The current status of whether or not - // this Interconnect is functional. + // OperationalStatus: [Output Only] The current status of this + // Interconnect's functionality, which can take one of the following + // values: + // - OS_ACTIVE: A valid Interconnect, which is turned up and is ready to + // use. Attachments may be provisioned on this Interconnect. + // - OS_UNPROVISIONED: An Interconnect that has not completed turnup. No + // attachments may be provisioned on this Interconnect. + // - OS_UNDER_MAINTENANCE: An Interconnect that is undergoing internal + // maintenance. No attachments may be provisioned or updated on this + // Interconnect. // // Possible values: // "OS_ACTIVE" @@ -14505,8 +16092,15 @@ type Interconnect struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // State: [Output Only] The current state of whether or not this - // Interconnect is functional. + // State: [Output Only] The current state of Interconnect functionality, + // which can take one of the following values: + // - ACTIVE: The Interconnect is valid, turned up and ready to use. + // Attachments may be provisioned on this Interconnect. + // - UNPROVISIONED: The Interconnect has not completed turnup. No + // attachments may be provisioned on this Interconnect. + // - UNDER_MAINTENANCE: The Interconnect is undergoing internal + // maintenance. No attachments may be provisioned or updated on this + // Interconnect. // // Possible values: // "ACTIVE" @@ -14549,10 +16143,22 @@ type InterconnectAttachment struct { // Not present for PARTNER_PROVIDER. AdminEnabled bool `json:"adminEnabled,omitempty"` - // Bandwidth: Provisioned bandwidth capacity for the - // interconnectAttachment. Can be set by the partner to update the - // customer's provisioned bandwidth. Output only for PARTNER type, - // mutable for PARTNER_PROVIDER and DEDICATED. + // Bandwidth: Provisioned bandwidth capacity for the interconnect + // attachment. For attachments of type DEDICATED, the user can set the + // bandwidth. For attachments of type PARTNER, the Google Partner that + // is operating the interconnect must set the bandwidth. Output only for + // PARTNER type, mutable for PARTNER_PROVIDER and DEDICATED, and can + // take one of the following values: + // - BPS_50M: 50 Mbit/s + // - BPS_100M: 100 Mbit/s + // - BPS_200M: 200 Mbit/s + // - BPS_300M: 300 Mbit/s + // - BPS_400M: 400 Mbit/s + // - BPS_500M: 500 Mbit/s + // - BPS_1G: 1 Gbit/s + // - BPS_2G: 2 Gbit/s + // - BPS_5G: 5 Gbit/s + // - BPS_10G: 10 Gbit/s // // Possible values: // "BPS_100M" @@ -14595,12 +16201,16 @@ type InterconnectAttachment struct { Description string `json:"description,omitempty"` // EdgeAvailabilityDomain: Desired availability domain for the - // attachment. Only available for type PARTNER, at creation time. For - // improved reliability, customers should configure a pair of - // attachments with one per availability domain. The selected - // availability domain will be provided to the Partner via the pairing - // key so that the provisioned circuit will lie in the specified domain. - // If not specified, the value will default to AVAILABILITY_DOMAIN_ANY. + // attachment. Only available for type PARTNER, at creation time, and + // can take one of the following values: + // - AVAILABILITY_DOMAIN_ANY + // - AVAILABILITY_DOMAIN_1 + // - AVAILABILITY_DOMAIN_2 For improved reliability, customers should + // configure a pair of attachments, one per availability domain. The + // selected availability domain will be provided to the Partner via the + // pairing key, so that the provisioned circuit will lie in the + // specified domain. If not specified, the value will default to + // AVAILABILITY_DOMAIN_ANY. // // Possible values: // "AVAILABILITY_DOMAIN_1" @@ -14652,7 +16262,12 @@ type InterconnectAttachment struct { Name string `json:"name,omitempty"` // OperationalStatus: [Output Only] The current status of whether or not - // this interconnect attachment is functional. + // this interconnect attachment is functional, which can take one of the + // following values: + // - OS_ACTIVE: The attachment has been turned up and is ready to use. + // + // - OS_UNPROVISIONED: The attachment is not ready to use yet, because + // turnup is not complete. // // Possible values: // "OS_ACTIVE" @@ -14665,10 +16280,10 @@ type InterconnectAttachment struct { // selected partner. Of the form "XXXXX/region/domain" PairingKey string `json:"pairingKey,omitempty"` - // PartnerAsn: Optional BGP ASN for the router that should be supplied - // by a layer 3 Partner if they configured BGP on behalf of the - // customer. Output only for PARTNER type, input only for - // PARTNER_PROVIDER, not available for DEDICATED. + // PartnerAsn: Optional BGP ASN for the router supplied by a Layer 3 + // Partner if they configured BGP on behalf of the customer. Output only + // for PARTNER type, input only for PARTNER_PROVIDER, not available for + // DEDICATED. PartnerAsn int64 `json:"partnerAsn,omitempty,string"` // PartnerMetadata: Informational metadata about Partner attachments @@ -14697,7 +16312,26 @@ type InterconnectAttachment struct { SelfLink string `json:"selfLink,omitempty"` // State: [Output Only] The current state of this attachment's - // functionality. + // functionality. Enum values ACTIVE and UNPROVISIONED are shared by + // DEDICATED/PRIVATE, PARTNER, and PARTNER_PROVIDER interconnect + // attachments, while enum values PENDING_PARTNER, + // PARTNER_REQUEST_RECEIVED, and PENDING_CUSTOMER are used for only + // PARTNER and PARTNER_PROVIDER interconnect attachments. This state can + // take one of the following values: + // - ACTIVE: The attachment has been turned up and is ready to use. + // - UNPROVISIONED: The attachment is not ready to use yet, because + // turnup is not complete. + // - PENDING_PARTNER: A newly-created PARTNER attachment that has not + // yet been configured on the Partner side. + // - PARTNER_REQUEST_RECEIVED: A PARTNER attachment is in the process of + // provisioning after a PARTNER_PROVIDER attachment was created that + // references it. + // - PENDING_CUSTOMER: A PARTNER or PARTNER_PROVIDER attachment that is + // waiting for a customer to activate it. + // - DEFUNCT: The attachment was deleted externally and is no longer + // functional. This could be because the associated Interconnect was + // removed, or because the other side of a Partner attachment was + // deleted. // // Possible values: // "ACTIVE" @@ -14709,6 +16343,14 @@ type InterconnectAttachment struct { // "UNPROVISIONED" State string `json:"state,omitempty"` + // Type: The type of interconnect attachment this is, which can take one + // of the following values: + // - DEDICATED: an attachment to a Dedicated Interconnect. + // - PARTNER: an attachment to a Partner Interconnect, created by the + // customer. + // - PARTNER_PROVIDER: an attachment to a Partner Interconnect, created + // by the partner. + // // Possible values: // "DEDICATED" // "PARTNER" @@ -15077,7 +16719,7 @@ type InterconnectAttachmentPartnerMetadata struct { PartnerName string `json:"partnerName,omitempty"` // PortalUrl: URL of the Partner?s portal for this Attachment. Partners - // may customise this to be a deep-link to the specific resource on the + // may customise this to be a deep link to the specific resource on the // Partner portal. This value may be validated to match approved Partner // values. PortalUrl string `json:"portalUrl,omitempty"` @@ -15393,6 +17035,12 @@ type InterconnectDiagnosticsLinkLACPStatus struct { // LACP exchange. NeighborSystemId string `json:"neighborSystemId,omitempty"` + // State: The state of a LACP link, which can take one of the following + // values: + // - ACTIVE: The link is configured and active within the bundle. + // - DETACHED: The link is not configured within the bundle. This means + // that the rest of the object should be empty. + // // Possible values: // "ACTIVE" // "DETACHED" @@ -15423,6 +17071,17 @@ func (s *InterconnectDiagnosticsLinkLACPStatus) MarshalJSON() ([]byte, error) { } type InterconnectDiagnosticsLinkOpticalPower struct { + // State: The status of the current value when compared to the warning + // and alarm levels for the receiving or transmitting transceiver. + // Possible states include: + // - OK: The value has not crossed a warning threshold. + // - LOW_WARNING: The value has crossed below the low warning threshold. + // + // - HIGH_WARNING: The value has crossed above the high warning + // threshold. + // - LOW_ALARM: The value has crossed below the low alarm threshold. + // - HIGH_ALARM: The value has crossed above the high alarm threshold. + // // Possible values: // "HIGH_ALARM" // "HIGH_WARNING" @@ -15431,10 +17090,11 @@ type InterconnectDiagnosticsLinkOpticalPower struct { // "OK" State string `json:"state,omitempty"` - // Value: Value of the current optical power, read in dBm. Take a known - // good optical value, give it a 10% margin and trigger warnings - // relative to that value. In general, a -7dBm warning and a -11dBm - // alarm are good optical value estimates for most links. + // Value: Value of the current receiving or transmitting optical power, + // read in dBm. Take a known good optical value, give it a 10% margin + // and trigger warnings relative to that value. In general, a -7dBm + // warning and a -11dBm alarm are good optical value estimates for most + // links. Value float64 `json:"value,omitempty"` // ForceSendFields is a list of field names (e.g. "State") to @@ -15490,8 +17150,14 @@ type InterconnectDiagnosticsLinkStatus struct { LacpStatus *InterconnectDiagnosticsLinkLACPStatus `json:"lacpStatus,omitempty"` + // ReceivingOpticalPower: An InterconnectDiagnostics.LinkOpticalPower + // object, describing the current value and status of the received light + // level. ReceivingOpticalPower *InterconnectDiagnosticsLinkOpticalPower `json:"receivingOpticalPower,omitempty"` + // TransmittingOpticalPower: An InterconnectDiagnostics.LinkOpticalPower + // object, describing the current value and status of the transmitted + // light level. TransmittingOpticalPower *InterconnectDiagnosticsLinkOpticalPower `json:"transmittingOpticalPower,omitempty"` // ForceSendFields is a list of field names (e.g. "ArpCaches") to @@ -15693,7 +17359,13 @@ type InterconnectLocation struct { // "Amsterdam, Netherlands". City string `json:"city,omitempty"` - // Continent: [Output Only] Continent for this location. + // Continent: [Output Only] Continent for this location, which can take + // one of the following values: + // - AFRICA + // - ASIA_PAC + // - EUROPE + // - NORTH_AMERICA + // - SOUTH_AMERICA // // Possible values: // "AFRICA" @@ -15746,10 +17418,12 @@ type InterconnectLocation struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // Status: [Output Only] The status of this InterconnectLocation. If the - // status is AVAILABLE, new Interconnects may be provisioned in this - // InterconnectLocation. Otherwise, no new Interconnects may be - // provisioned. + // Status: [Output Only] The status of this InterconnectLocation, which + // can take one of the following values: + // - CLOSED: The InterconnectLocation is closed and is unavailable for + // provisioning new Interconnects. + // - AVAILABLE: The InterconnectLocation is available for provisioning + // new Interconnects. // // Possible values: // "AVAILABLE" @@ -15997,9 +17671,14 @@ type InterconnectOutageNotification struct { // epoch). EndTime int64 `json:"endTime,omitempty,string"` - // IssueType: Form this outage is expected to take. Note that the "IT_" - // versions of this enum have been deprecated in favor of the unprefixed - // values. + // IssueType: Form this outage is expected to take, which can take one + // of the following values: + // - OUTAGE: The Interconnect may be completely out of service for some + // or all of the specified window. + // - PARTIAL_OUTAGE: Some circuits comprising the Interconnect as a + // whole should remain up, but with reduced bandwidth. Note that the + // versions of this enum prefixed with "IT_" have been deprecated in + // favor of the unprefixed values. // // Possible values: // "IT_OUTAGE" @@ -16011,8 +17690,10 @@ type InterconnectOutageNotification struct { // Name: Unique identifier for this outage notification. Name string `json:"name,omitempty"` - // Source: The party that generated this notification. Note that - // "NSRC_GOOGLE" has been deprecated in favor of "GOOGLE" + // Source: The party that generated this notification, which can take + // the following value: + // - GOOGLE: this notification as generated by Google. Note that the + // value of NSRC_GOOGLE has been deprecated in favor of GOOGLE. // // Possible values: // "GOOGLE" @@ -16023,8 +17704,15 @@ type InterconnectOutageNotification struct { // Unix epoch). StartTime int64 `json:"startTime,omitempty,string"` - // State: State of this notification. Note that the "NS_" versions of - // this enum have been deprecated in favor of the unprefixed values. + // State: State of this notification, which can take one of the + // following values: + // - ACTIVE: This outage notification is active. The event could be in + // the past, present, or future. See start_time and end_time for + // scheduling. + // - CANCELLED: The outage associated with this notification was + // cancelled before the outage was due to start. Note that the versions + // of this enum prefixed with "NS_" have been deprecated in favor of the + // unprefixed values. // // Possible values: // "ACTIVE" @@ -16576,12 +18264,6 @@ type LogConfigDataAccessOptions struct { // the caller. This is relevant only in the LocalIAM implementation, for // now. // - // NOTE: Logging to Gin in a fail-closed manner is currently unsupported - // while work is being done to satisfy the requirements of go/345. - // Currently, setting LOG_FAIL_CLOSED mode will have no effect, but - // still exists because there is active work being done to support it - // (b/115874152). - // // Possible values: // "LOG_FAIL_CLOSED" // "LOG_MODE_UNSPECIFIED" @@ -17190,6 +18872,7 @@ type ManagedInstance struct { // // Possible values: // "PROVISIONING" + // "REPAIRING" // "RUNNING" // "STAGING" // "STOPPED" @@ -17436,6 +19119,103 @@ func (s *MetadataItems) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// MetadataFilter: Opaque filter criteria used by loadbalancers to +// restrict routing configuration to a limited set of loadbalancing +// proxies. Proxies and sidecars involved in loadbalancing would +// typically present metadata to the loadbalancers which need to match +// criteria specified here. If a match takes place, the relevant routing +// configuration is made available to those proxies. +// For each metadataFilter in this list, if its filterMatchCriteria is +// set to MATCH_ANY, at least one of the filterLabels must match the +// corresponding label provided in the metadata. If its +// filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels +// must match with corresponding labels in the provided metadata. +// An example for using metadataFilters would be: if loadbalancing +// involves Envoys, they will only receive routing configuration when +// values in metadataFilters match values supplied in /global/gateways/default-internet-gateway - NextHopGateway string `json:"nextHopGateway,omitempty"` - - // NextHopIlb: The URL to a forwarding rule of type - // loadBalancingScheme=INTERNAL that should handle matching packets. You - // can only specify the forwarding rule as a partial or full URL. For - // example, the following are all valid URLs: - // - - // https://www.googleapis.com/compute/v1/projects/project/regions/region/forwardingRules/forwardingRule - // - regions/region/forwardingRules/forwardingRule Note that this can - // only be used when the destination_range is a public (non-RFC 1918) IP - // CIDR range. - NextHopIlb string `json:"nextHopIlb,omitempty"` - - // NextHopInstance: The URL to an instance that should handle matching - // packets. You can specify this as a full or partial URL. For - // example: - // https://www.googleapis.com/compute/v1/projects/project/zones/ - // zone/instances/ - NextHopInstance string `json:"nextHopInstance,omitempty"` - - // NextHopIp: The network IP address of an instance that should handle - // matching packets. Only IPv4 is supported. - NextHopIp string `json:"nextHopIp,omitempty"` - - // NextHopNetwork: The URL of the local network if it should handle - // matching packets. - NextHopNetwork string `json:"nextHopNetwork,omitempty"` - - // NextHopPeering: [Output Only] The network peering name that should - // handle matching packets, which should conform to RFC1035. - NextHopPeering string `json:"nextHopPeering,omitempty"` - - // NextHopVpnTunnel: The URL to a VpnTunnel that should handle matching - // packets. - NextHopVpnTunnel string `json:"nextHopVpnTunnel,omitempty"` - - // Priority: The priority of this route. Priority is used to break ties - // in cases where there is more than one matching route of equal prefix - // length. In the case of two routes with equal prefix length, the one - // with the lowest-numbered priority value wins. Default value is 1000. - // Valid range is 0 through 65535. - Priority int64 `json:"priority,omitempty"` - - // SelfLink: [Output Only] Server-defined fully-qualified URL for this - // resource. - SelfLink string `json:"selfLink,omitempty"` - - // Tags: A list of instance tags to which this route applies. - Tags []string `json:"tags,omitempty"` - - // Warnings: [Output Only] If potential misconfigurations are detected - // for this route, this field will be populated with warning messages. - Warnings []*RouteWarnings `json:"warnings,omitempty"` +type ResourcePoliciesScopedList struct { + // ResourcePolicies: A list of resourcePolicies contained in this scope. + ResourcePolicies []*ResourcePolicy `json:"resourcePolicies,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Warning: Informational warning which replaces the list of + // resourcePolicies when the list is empty. + Warning *ResourcePoliciesScopedListWarning `json:"warning,omitempty"` - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "ResourcePolicies") to + // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CreationTimestamp") to + // NullFields is a list of field names (e.g. "ResourcePolicies") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -24645,13 +26443,15 @@ type Route struct { NullFields []string `json:"-"` } -func (s *Route) MarshalJSON() ([]byte, error) { - type NoMethod Route +func (s *ResourcePoliciesScopedList) MarshalJSON() ([]byte, error) { + type NoMethod ResourcePoliciesScopedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RouteWarnings struct { +// ResourcePoliciesScopedListWarning: Informational warning which +// replaces the list of resourcePolicies when the list is empty. +type ResourcePoliciesScopedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -24685,7 +26485,7 @@ type RouteWarnings struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*RouteWarningsData `json:"data,omitempty"` + Data []*ResourcePoliciesScopedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -24708,13 +26508,13 @@ type RouteWarnings struct { NullFields []string `json:"-"` } -func (s *RouteWarnings) MarshalJSON() ([]byte, error) { - type NoMethod RouteWarnings +func (s *ResourcePoliciesScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod ResourcePoliciesScopedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RouteWarningsData struct { +type ResourcePoliciesScopedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -24745,20 +26545,94 @@ type RouteWarningsData struct { NullFields []string `json:"-"` } -func (s *RouteWarningsData) MarshalJSON() ([]byte, error) { - type NoMethod RouteWarningsData +func (s *ResourcePoliciesScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod ResourcePoliciesScopedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RouteList: Contains a list of Route resources. -type RouteList struct { +type ResourcePolicy struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always + // compute#resource_policies for resource policies. + Kind string `json:"kind,omitempty"` + + // Name: The name of the resource, provided by the client when initially + // creating the resource. The resource name must be 1-63 characters + // long, and comply with RFC1035. Specifically, the name must be 1-63 + // characters long and match the regular expression + // `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be + // a lowercase letter, and all following characters must be a dash, + // lowercase letter, or digit, except the last character, which cannot + // be a dash. + Name string `json:"name,omitempty"` + + Region string `json:"region,omitempty"` + + // SelfLink: [Output Only] Server-defined fully-qualified URL for this + // resource. + SelfLink string `json:"selfLink,omitempty"` + + // SnapshotSchedulePolicy: Resource policy for persistent disks for + // creating snapshots. + SnapshotSchedulePolicy *ResourcePolicySnapshotSchedulePolicy `json:"snapshotSchedulePolicy,omitempty"` + + // Status: [Output Only] The status of resource policy creation. + // + // Possible values: + // "CREATING" + // "DELETING" + // "INVALID" + // "READY" + Status string `json:"status,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *ResourcePolicy) MarshalJSON() ([]byte, error) { + type NoMethod ResourcePolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ResourcePolicyAggregatedList: Contains a list of resourcePolicies. +type ResourcePolicyAggregatedList struct { + Etag string `json:"etag,omitempty"` + // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of Route resources. - Items []*Route `json:"items,omitempty"` + // Items: A list of ResourcePolicy resources. + Items map[string]ResourcePoliciesScopedList `json:"items,omitempty"` // Kind: Type of resource. Kind string `json:"kind,omitempty"` @@ -24775,13 +26649,13 @@ type RouteList struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *RouteListWarning `json:"warning,omitempty"` + Warning *ResourcePolicyAggregatedListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "Etag") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -24789,7 +26663,7 @@ type RouteList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API + // NullFields is a list of field names (e.g. "Etag") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -24798,14 +26672,15 @@ type RouteList struct { NullFields []string `json:"-"` } -func (s *RouteList) MarshalJSON() ([]byte, error) { - type NoMethod RouteList +func (s *ResourcePolicyAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod ResourcePolicyAggregatedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RouteListWarning: [Output Only] Informational warning message. -type RouteListWarning struct { +// ResourcePolicyAggregatedListWarning: [Output Only] Informational +// warning message. +type ResourcePolicyAggregatedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -24839,7 +26714,7 @@ type RouteListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*RouteListWarningData `json:"data,omitempty"` + Data []*ResourcePolicyAggregatedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -24862,13 +26737,13 @@ type RouteListWarning struct { NullFields []string `json:"-"` } -func (s *RouteListWarning) MarshalJSON() ([]byte, error) { - type NoMethod RouteListWarning +func (s *ResourcePolicyAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod ResourcePolicyAggregatedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RouteListWarningData struct { +type ResourcePolicyAggregatedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -24899,71 +26774,28 @@ type RouteListWarningData struct { NullFields []string `json:"-"` } -func (s *RouteListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod RouteListWarningData +func (s *ResourcePolicyAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod ResourcePolicyAggregatedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Router: Router resource. -type Router struct { - // Bgp: BGP information specific to this router. - Bgp *RouterBgp `json:"bgp,omitempty"` - - // BgpPeers: BGP information that needs to be configured into the - // routing stack to establish the BGP peering. It must specify peer ASN - // and either interface name, IP, or peer IP. Please refer to RFC4273. - BgpPeers []*RouterBgpPeer `json:"bgpPeers,omitempty"` - - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Interfaces: Router interfaces. Each interface requires either one - // linked resource (e.g. linkedVpnTunnel), or IP address and IP address - // range (e.g. ipRange), or both. - Interfaces []*RouterInterface `json:"interfaces,omitempty"` - - // Kind: [Output Only] Type of resource. Always compute#router for - // routers. - Kind string `json:"kind,omitempty"` - - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // Nats: A list of Nat services created in this router. - Nats []*RouterNat `json:"nats,omitempty"` - - // Network: URI of the network to which this router belongs. - Network string `json:"network,omitempty"` - - // Region: [Output Only] URI of the region where the router resides. You - // must specify this field as part of the HTTP request URL. It is not - // settable as a field in the request body. - Region string `json:"region,omitempty"` +// ResourcePolicyDailyCycle: Time window specified for daily operations. +type ResourcePolicyDailyCycle struct { + // DaysInCycle: Defines a schedule that runs every nth day of the month. + DaysInCycle int64 `json:"daysInCycle,omitempty"` - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` + // Duration: [Output only] A predetermined duration for the window, + // automatically chosen to be the smallest possible in the given + // scenario. + Duration string `json:"duration,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // StartTime: Start time of the window. This must be in UTC format that + // resolves to one of 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For + // example, both 13:00-5 and 08:00 are valid. + StartTime string `json:"startTime,omitempty"` - // ForceSendFields is a list of field names (e.g. "Bgp") to + // ForceSendFields is a list of field names (e.g. "DaysInCycle") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -24971,32 +26803,36 @@ type Router struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Bgp") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "DaysInCycle") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *Router) MarshalJSON() ([]byte, error) { - type NoMethod Router +func (s *ResourcePolicyDailyCycle) MarshalJSON() ([]byte, error) { + type NoMethod ResourcePolicyDailyCycle raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RouterAdvertisedIpRange: Description-tagged IP ranges for the router -// to advertise. -type RouterAdvertisedIpRange struct { - // Description: User-specified description for the IP range. - Description string `json:"description,omitempty"` +// ResourcePolicyHourlyCycle: Time window specified for hourly +// operations. +type ResourcePolicyHourlyCycle struct { + // Duration: [Output only] Duration of the time window, automatically + // chosen to be smallest possible in the given scenario. + Duration string `json:"duration,omitempty"` - // Range: The IP range to advertise. The value must be a CIDR-formatted - // string. - Range string `json:"range,omitempty"` + // HoursInCycle: Allows to define schedule that runs every nth hour. + HoursInCycle int64 `json:"hoursInCycle,omitempty"` - // ForceSendFields is a list of field names (e.g. "Description") to + // StartTime: Time within the window to start the operations. It must be + // in format "HH:MM", where HH : [00-23] and MM : [00-00] GMT. + StartTime string `json:"startTime,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Duration") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -25004,31 +26840,33 @@ type RouterAdvertisedIpRange struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Description") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Duration") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *RouterAdvertisedIpRange) MarshalJSON() ([]byte, error) { - type NoMethod RouterAdvertisedIpRange +func (s *ResourcePolicyHourlyCycle) MarshalJSON() ([]byte, error) { + type NoMethod ResourcePolicyHourlyCycle raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RouterAggregatedList: Contains a list of routers. -type RouterAggregatedList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. +type ResourcePolicyList struct { + Etag string `json:"etag,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. Id string `json:"id,omitempty"` - // Items: A list of Router resources. - Items map[string]RoutersScopedList `json:"items,omitempty"` + // Items: [Output Only] A list of ResourcePolicy resources. + Items []*ResourcePolicy `json:"items,omitempty"` - // Kind: Type of resource. + // Kind: [Output Only] Type of resource.Always + // compute#resourcePoliciesList for listsof resourcePolicies Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -25043,13 +26881,13 @@ type RouterAggregatedList struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *RouterAggregatedListWarning `json:"warning,omitempty"` + Warning *ResourcePolicyListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "Etag") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -25057,7 +26895,7 @@ type RouterAggregatedList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API + // NullFields is a list of field names (e.g. "Etag") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -25066,15 +26904,15 @@ type RouterAggregatedList struct { NullFields []string `json:"-"` } -func (s *RouterAggregatedList) MarshalJSON() ([]byte, error) { - type NoMethod RouterAggregatedList +func (s *ResourcePolicyList) MarshalJSON() ([]byte, error) { + type NoMethod ResourcePolicyList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RouterAggregatedListWarning: [Output Only] Informational warning +// ResourcePolicyListWarning: [Output Only] Informational warning // message. -type RouterAggregatedListWarning struct { +type ResourcePolicyListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -25108,7 +26946,7 @@ type RouterAggregatedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*RouterAggregatedListWarningData `json:"data,omitempty"` + Data []*ResourcePolicyListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -25131,13 +26969,13 @@ type RouterAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *RouterAggregatedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod RouterAggregatedListWarning +func (s *ResourcePolicyListWarning) MarshalJSON() ([]byte, error) { + type NoMethod ResourcePolicyListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RouterAggregatedListWarningData struct { +type ResourcePolicyListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -25168,46 +27006,31 @@ type RouterAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *RouterAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod RouterAggregatedListWarningData +func (s *ResourcePolicyListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod ResourcePolicyListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RouterBgp struct { - // AdvertiseMode: User-specified flag to indicate which mode to use for - // advertisement. - // - // Possible values: - // "CUSTOM" - // "DEFAULT" - AdvertiseMode string `json:"advertiseMode,omitempty"` - - // AdvertisedGroups: User-specified list of prefix groups to advertise - // in custom mode. This field can only be populated if advertise_mode is - // CUSTOM and is advertised to all peers of the router. These groups - // will be advertised in addition to any specified prefixes. Leave this - // field blank to advertise no custom groups. - // - // Possible values: - // "ALL_SUBNETS" - AdvertisedGroups []string `json:"advertisedGroups,omitempty"` +// ResourcePolicySnapshotSchedulePolicy: A snapshot schedule policy +// specifies when and how frequently snapshots are to be created for the +// target disk. Also specifies how many and how long these scheduled +// snapshots should be retained. +type ResourcePolicySnapshotSchedulePolicy struct { + // RetentionPolicy: Retention policy applied to snapshots created by + // this resource policy. + RetentionPolicy *ResourcePolicySnapshotSchedulePolicyRetentionPolicy `json:"retentionPolicy,omitempty"` - // AdvertisedIpRanges: User-specified list of individual IP ranges to - // advertise in custom mode. This field can only be populated if - // advertise_mode is CUSTOM and is advertised to all peers of the - // router. These IP ranges will be advertised in addition to any - // specified groups. Leave this field blank to advertise no custom IP - // ranges. - AdvertisedIpRanges []*RouterAdvertisedIpRange `json:"advertisedIpRanges,omitempty"` + // Schedule: A Vm Maintenance Policy specifies what kind of + // infrastructure maintenance we are allowed to perform on this VM and + // when. Schedule that is applied to disks covered by this policy. + Schedule *ResourcePolicySnapshotSchedulePolicySchedule `json:"schedule,omitempty"` - // Asn: Local BGP Autonomous System Number (ASN). Must be an RFC6996 - // private ASN, either 16-bit or 32-bit. The value will be fixed for - // this router resource. All VPN tunnels that link to this router will - // have the same local ASN. - Asn int64 `json:"asn,omitempty"` + // SnapshotProperties: Properties with which snapshots are created such + // as labels, encryption keys. + SnapshotProperties *ResourcePolicySnapshotSchedulePolicySnapshotProperties `json:"snapshotProperties,omitempty"` - // ForceSendFields is a list of field names (e.g. "AdvertiseMode") to + // ForceSendFields is a list of field names (e.g. "RetentionPolicy") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -25215,87 +27038,72 @@ type RouterBgp struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AdvertiseMode") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "RetentionPolicy") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *RouterBgp) MarshalJSON() ([]byte, error) { - type NoMethod RouterBgp +func (s *ResourcePolicySnapshotSchedulePolicy) MarshalJSON() ([]byte, error) { + type NoMethod ResourcePolicySnapshotSchedulePolicy raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RouterBgpPeer struct { - // AdvertiseMode: User-specified flag to indicate which mode to use for - // advertisement. - // - // Possible values: - // "CUSTOM" - // "DEFAULT" - AdvertiseMode string `json:"advertiseMode,omitempty"` +// ResourcePolicySnapshotSchedulePolicyRetentionPolicy: Policy for +// retention of scheduled snapshots. +type ResourcePolicySnapshotSchedulePolicyRetentionPolicy struct { + // MaxRetentionDays: Maximum age of the snapshot that is allowed to be + // kept. + MaxRetentionDays int64 `json:"maxRetentionDays,omitempty"` - // AdvertisedGroups: User-specified list of prefix groups to advertise - // in custom mode. This field can only be populated if advertise_mode is - // CUSTOM and overrides the list defined for the router (in Bgp - // message). These groups will be advertised in addition to any - // specified prefixes. Leave this field blank to advertise no custom - // groups. + // OnSourceDiskDelete: Specifies the behavior to apply to scheduled + // snapshots when the source disk is deleted. // // Possible values: - // "ALL_SUBNETS" - AdvertisedGroups []string `json:"advertisedGroups,omitempty"` - - // AdvertisedIpRanges: User-specified list of individual IP ranges to - // advertise in custom mode. This field can only be populated if - // advertise_mode is CUSTOM and overrides the list defined for the - // router (in Bgp message). These IP ranges will be advertised in - // addition to any specified groups. Leave this field blank to advertise - // no custom IP ranges. - AdvertisedIpRanges []*RouterAdvertisedIpRange `json:"advertisedIpRanges,omitempty"` - - // AdvertisedRoutePriority: The priority of routes advertised to this - // BGP peer. In the case where there is more than one matching route of - // maximum length, the routes with lowest priority value win. - AdvertisedRoutePriority int64 `json:"advertisedRoutePriority,omitempty"` + // "APPLY_RETENTION_POLICY" + // "KEEP_AUTO_SNAPSHOTS" + // "UNSPECIFIED_ON_SOURCE_DISK_DELETE" + OnSourceDiskDelete string `json:"onSourceDiskDelete,omitempty"` - // InterfaceName: Name of the interface the BGP peer is associated with. - InterfaceName string `json:"interfaceName,omitempty"` + // ForceSendFields is a list of field names (e.g. "MaxRetentionDays") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // IpAddress: IP address of the interface inside Google Cloud Platform. - // Only IPv4 is supported. - IpAddress string `json:"ipAddress,omitempty"` + // NullFields is a list of field names (e.g. "MaxRetentionDays") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} - // ManagementType: [Output Only] The resource that configures and - // manages this BGP peer. MANAGED_BY_USER is the default value and can - // be managed by you or other users; MANAGED_BY_ATTACHMENT is a BGP peer - // that is configured and managed by Cloud Interconnect, specifically by - // an InterconnectAttachment of type PARTNER. Google will automatically - // create, update, and delete this type of BGP peer when the PARTNER - // InterconnectAttachment is created, updated, or deleted. - // - // Possible values: - // "MANAGED_BY_ATTACHMENT" - // "MANAGED_BY_USER" - ManagementType string `json:"managementType,omitempty"` +func (s *ResourcePolicySnapshotSchedulePolicyRetentionPolicy) MarshalJSON() ([]byte, error) { + type NoMethod ResourcePolicySnapshotSchedulePolicyRetentionPolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} - // Name: Name of this BGP peer. The name must be 1-63 characters long - // and comply with RFC1035. - Name string `json:"name,omitempty"` +// ResourcePolicySnapshotSchedulePolicySchedule: A schedule for disks +// where the schedueled operations are performed. +type ResourcePolicySnapshotSchedulePolicySchedule struct { + DailySchedule *ResourcePolicyDailyCycle `json:"dailySchedule,omitempty"` - // PeerAsn: Peer BGP Autonomous System Number (ASN). For VPN use case, - // this value can be different for every tunnel. - PeerAsn int64 `json:"peerAsn,omitempty"` + HourlySchedule *ResourcePolicyHourlyCycle `json:"hourlySchedule,omitempty"` - // PeerIpAddress: IP address of the BGP interface outside Google cloud. - // Only IPv4 is supported. - PeerIpAddress string `json:"peerIpAddress,omitempty"` + WeeklySchedule *ResourcePolicyWeeklyCycle `json:"weeklySchedule,omitempty"` - // ForceSendFields is a list of field names (e.g. "AdvertiseMode") to + // ForceSendFields is a list of field names (e.g. "DailySchedule") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -25303,7 +27111,7 @@ type RouterBgpPeer struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AdvertiseMode") to include + // NullFields is a list of field names (e.g. "DailySchedule") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as @@ -25312,51 +27120,101 @@ type RouterBgpPeer struct { NullFields []string `json:"-"` } -func (s *RouterBgpPeer) MarshalJSON() ([]byte, error) { - type NoMethod RouterBgpPeer +func (s *ResourcePolicySnapshotSchedulePolicySchedule) MarshalJSON() ([]byte, error) { + type NoMethod ResourcePolicySnapshotSchedulePolicySchedule raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RouterInterface struct { - // IpRange: IP address and range of the interface. The IP range must be - // in the RFC3927 link-local IP space. The value must be a - // CIDR-formatted string, for example: 169.254.0.1/30. NOTE: Do not - // truncate the address as it represents the IP address of the - // interface. - IpRange string `json:"ipRange,omitempty"` +// ResourcePolicySnapshotSchedulePolicySnapshotProperties: Specified +// snapshot properties for scheduled snapshots created by this policy. +type ResourcePolicySnapshotSchedulePolicySnapshotProperties struct { + // GuestFlush: Indication to perform a ?guest aware? snapshot. + GuestFlush bool `json:"guestFlush,omitempty"` - // LinkedInterconnectAttachment: URI of the linked interconnect - // attachment. It must be in the same region as the router. Each - // interface can have at most one linked resource and it could either be - // a VPN Tunnel or an interconnect attachment. - LinkedInterconnectAttachment string `json:"linkedInterconnectAttachment,omitempty"` + // Labels: Labels to apply to scheduled snapshots. These can be later + // modified by the setLabels method. Label values may be empty. + Labels map[string]string `json:"labels,omitempty"` - // LinkedVpnTunnel: URI of the linked VPN tunnel. It must be in the same - // region as the router. Each interface can have at most one linked - // resource and it could either be a VPN Tunnel or an interconnect - // attachment. - LinkedVpnTunnel string `json:"linkedVpnTunnel,omitempty"` + // StorageLocations: GCS bucket storage location of the auto snapshot + // (regional or multi-regional). + StorageLocations []string `json:"storageLocations,omitempty"` - // ManagementType: [Output Only] The resource that configures and - // manages this interface. MANAGED_BY_USER is the default value and can - // be managed by you or other users; MANAGED_BY_ATTACHMENT is an - // interface that is configured and managed by Cloud Interconnect, - // specifically by an InterconnectAttachment of type PARTNER. Google - // will automatically create, update, and delete this type of interface - // when the PARTNER InterconnectAttachment is created, updated, or - // deleted. + // ForceSendFields is a list of field names (e.g. "GuestFlush") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "GuestFlush") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ResourcePolicySnapshotSchedulePolicySnapshotProperties) MarshalJSON() ([]byte, error) { + type NoMethod ResourcePolicySnapshotSchedulePolicySnapshotProperties + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ResourcePolicyWeeklyCycle: Time window specified for weekly +// operations. +type ResourcePolicyWeeklyCycle struct { + // DayOfWeeks: Up to 7 intervals/windows, one for each day of the week. + DayOfWeeks []*ResourcePolicyWeeklyCycleDayOfWeek `json:"dayOfWeeks,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DayOfWeeks") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DayOfWeeks") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ResourcePolicyWeeklyCycle) MarshalJSON() ([]byte, error) { + type NoMethod ResourcePolicyWeeklyCycle + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ResourcePolicyWeeklyCycleDayOfWeek struct { + // Day: Allows to define schedule that runs specified day of the week. // // Possible values: - // "MANAGED_BY_ATTACHMENT" - // "MANAGED_BY_USER" - ManagementType string `json:"managementType,omitempty"` + // "FRIDAY" + // "INVALID" + // "MONDAY" + // "SATURDAY" + // "SUNDAY" + // "THURSDAY" + // "TUESDAY" + // "WEDNESDAY" + Day string `json:"day,omitempty"` - // Name: Name of this interface entry. The name must be 1-63 characters - // long and comply with RFC1035. - Name string `json:"name,omitempty"` + // Duration: [Output only] Duration of the time window, automatically + // chosen to be smallest possible in the given scenario. + Duration string `json:"duration,omitempty"` - // ForceSendFields is a list of field names (e.g. "IpRange") to + // StartTime: Time within the window to start the operations. It must be + // in format "HH:MM", where HH : [00-23] and MM : [00-00] GMT. + StartTime string `json:"startTime,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Day") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -25364,8 +27222,8 @@ type RouterInterface struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "IpRange") to include in - // API requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Day") to include in API + // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -25373,68 +27231,158 @@ type RouterInterface struct { NullFields []string `json:"-"` } -func (s *RouterInterface) MarshalJSON() ([]byte, error) { - type NoMethod RouterInterface +func (s *ResourcePolicyWeeklyCycleDayOfWeek) MarshalJSON() ([]byte, error) { + type NoMethod ResourcePolicyWeeklyCycleDayOfWeek raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RouterList: Contains a list of Router resources. -type RouterList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` +// Route: Represents a Route resource. A route specifies how certain +// packets should be handled by the network. Routes are associated with +// instances by tags and the set of routes for a particular instance is +// called its routing table. +// +// For each packet leaving an instance, the system searches that +// instance's routing table for a single best matching route. Routes +// match packets by destination IP address, preferring smaller or more +// specific ranges over larger ones. If there is a tie, the system +// selects the route with the smallest priority value. If there is still +// a tie, it uses the layer three and four packet headers to select just +// one of the remaining matching routes. The packet is then forwarded as +// specified by the nextHop field of the winning route - either to +// another instance destination, an instance gateway, or a Google +// Compute Engine-operated gateway. +// +// Packets that do not match any route in the sending instance's routing +// table are dropped. (== resource_for beta.routes ==) (== resource_for +// v1.routes ==) +type Route struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` - // Items: A list of Router resources. - Items []*Router `json:"items,omitempty"` + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` - // Kind: [Output Only] Type of resource. Always compute#router for - // routers. + // DestRange: The destination range of outgoing packets that this route + // applies to. Only IPv4 is supported. + DestRange string `json:"destRange,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of this resource. Always compute#routes for + // Route resources. Kind string `json:"kind,omitempty"` - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` - // SelfLink: [Output Only] Server-defined URL for this resource. + // Network: Fully-qualified URL of the network that this route applies + // to. + Network string `json:"network,omitempty"` + + // NextHopGateway: The URL to a gateway that should handle matching + // packets. You can only specify the internet gateway using a full or + // partial valid URL: + // projects//global/gateways/default-internet-gateway + NextHopGateway string `json:"nextHopGateway,omitempty"` + + // NextHopIlb: The URL to a forwarding rule of type + // loadBalancingScheme=INTERNAL that should handle matching packets. You + // can only specify the forwarding rule as a partial or full URL. For + // example, the following are all valid URLs: + // - + // https://www.googleapis.com/compute/v1/projects/project/regions/region/forwardingRules/forwardingRule + // - regions/region/forwardingRules/forwardingRule Note that this can + // only be used when the destination_range is a public (non-RFC 1918) IP + // CIDR range. + NextHopIlb string `json:"nextHopIlb,omitempty"` + + // NextHopInstance: The URL to an instance that should handle matching + // packets. You can specify this as a full or partial URL. For + // example: + // https://www.googleapis.com/compute/v1/projects/project/zones/ + // zone/instances/ + NextHopInstance string `json:"nextHopInstance,omitempty"` + + // NextHopInterconnectAttachment: [Output Only] The URL to an + // InterconnectAttachment which is the next hop for the route. This + // field will only be populated for the dynamic routes generated by + // Cloud Router with a linked interconnectAttachment. + NextHopInterconnectAttachment string `json:"nextHopInterconnectAttachment,omitempty"` + + // NextHopIp: The network IP address of an instance that should handle + // matching packets. Only IPv4 is supported. + NextHopIp string `json:"nextHopIp,omitempty"` + + // NextHopNetwork: The URL of the local network if it should handle + // matching packets. + NextHopNetwork string `json:"nextHopNetwork,omitempty"` + + // NextHopPeering: [Output Only] The network peering name that should + // handle matching packets, which should conform to RFC1035. + NextHopPeering string `json:"nextHopPeering,omitempty"` + + // NextHopVpnTunnel: The URL to a VpnTunnel that should handle matching + // packets. + NextHopVpnTunnel string `json:"nextHopVpnTunnel,omitempty"` + + // Priority: The priority of this route. Priority is used to break ties + // in cases where there is more than one matching route of equal prefix + // length. In the case of two routes with equal prefix length, the one + // with the lowest-numbered priority value wins. Default value is 1000. + // Valid range is 0 through 65535. + Priority int64 `json:"priority,omitempty"` + + // SelfLink: [Output Only] Server-defined fully-qualified URL for this + // resource. SelfLink string `json:"selfLink,omitempty"` - // Warning: [Output Only] Informational warning message. - Warning *RouterListWarning `json:"warning,omitempty"` + // Tags: A list of instance tags to which this route applies. + Tags []string `json:"tags,omitempty"` + + // Warnings: [Output Only] If potential misconfigurations are detected + // for this route, this field will be populated with warning messages. + Warnings []*RouteWarnings `json:"warnings,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *RouterList) MarshalJSON() ([]byte, error) { - type NoMethod RouterList +func (s *Route) MarshalJSON() ([]byte, error) { + type NoMethod Route raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RouterListWarning: [Output Only] Informational warning message. -type RouterListWarning struct { +type RouteWarnings struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -25468,7 +27416,7 @@ type RouterListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*RouterListWarningData `json:"data,omitempty"` + Data []*RouteWarningsData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -25491,13 +27439,13 @@ type RouterListWarning struct { NullFields []string `json:"-"` } -func (s *RouterListWarning) MarshalJSON() ([]byte, error) { - type NoMethod RouterListWarning +func (s *RouteWarnings) MarshalJSON() ([]byte, error) { + type NoMethod RouteWarnings raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RouterListWarningData struct { +type RouteWarningsData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -25528,124 +27476,107 @@ type RouterListWarningData struct { NullFields []string `json:"-"` } -func (s *RouterListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod RouterListWarningData +func (s *RouteWarningsData) MarshalJSON() ([]byte, error) { + type NoMethod RouteWarningsData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RouterNat: Represents a Nat resource. It enables the VMs within the -// specified subnetworks to access Internet without external IP -// addresses. It specifies a list of subnetworks (and the ranges within) -// that want to use NAT. Customers can also provide the external IPs -// that would be used for NAT. GCP would auto-allocate ephemeral IPs if -// no external IPs are provided. -type RouterNat struct { - // IcmpIdleTimeoutSec: Timeout (in seconds) for ICMP connections. - // Defaults to 30s if not set. - IcmpIdleTimeoutSec int64 `json:"icmpIdleTimeoutSec,omitempty"` - - // MinPortsPerVm: Minimum number of ports allocated to a VM from this - // NAT config. If not set, a default number of ports is allocated to a - // VM. This gets rounded up to the nearest power of 2. Eg. if the value - // of this field is 50, at least 64 ports will be allocated to a VM. - MinPortsPerVm int64 `json:"minPortsPerVm,omitempty"` - - // Name: Unique name of this Nat service. The name must be 1-63 - // characters long and comply with RFC1035. - Name string `json:"name,omitempty"` - - // NatIpAllocateOption: Specify the NatIpAllocateOption. If it is - // AUTO_ONLY, then nat_ip should be empty. - // - // Possible values: - // "AUTO_ONLY" - // "MANUAL_ONLY" - NatIpAllocateOption string `json:"natIpAllocateOption,omitempty"` +// RouteList: Contains a list of Route resources. +type RouteList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` - // NatIps: A list of URLs of the IP resources used for this Nat service. - // These IPs must be valid static external IP addresses assigned to the - // project. max_length is subject to change post alpha. - NatIps []string `json:"natIps,omitempty"` + // Items: A list of Route resources. + Items []*Route `json:"items,omitempty"` - // SourceSubnetworkIpRangesToNat: Specify the Nat option. If this field - // contains ALL_SUBNETWORKS_ALL_IP_RANGES or - // ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES, then there should not be any - // other Router.Nat section in any Router for this network in this - // region. - // - // Possible values: - // "ALL_SUBNETWORKS_ALL_IP_RANGES" - // "ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES" - // "LIST_OF_SUBNETWORKS" - SourceSubnetworkIpRangesToNat string `json:"sourceSubnetworkIpRangesToNat,omitempty"` + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` - // Subnetworks: A list of Subnetwork resources whose traffic should be - // translated by NAT Gateway. It is used only when LIST_OF_SUBNETWORKS - // is selected for the SubnetworkIpRangeToNatOption above. - Subnetworks []*RouterNatSubnetworkToNat `json:"subnetworks,omitempty"` + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` - // TcpEstablishedIdleTimeoutSec: Timeout (in seconds) for TCP - // established connections. Defaults to 1200s if not set. - TcpEstablishedIdleTimeoutSec int64 `json:"tcpEstablishedIdleTimeoutSec,omitempty"` + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` - // TcpTransitoryIdleTimeoutSec: Timeout (in seconds) for TCP transitory - // connections. Defaults to 30s if not set. - TcpTransitoryIdleTimeoutSec int64 `json:"tcpTransitoryIdleTimeoutSec,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *RouteListWarning `json:"warning,omitempty"` - // UdpIdleTimeoutSec: Timeout (in seconds) for UDP connections. Defaults - // to 30s if not set. - UdpIdleTimeoutSec int64 `json:"udpIdleTimeoutSec,omitempty"` + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "IcmpIdleTimeoutSec") - // to unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "IcmpIdleTimeoutSec") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *RouterNat) MarshalJSON() ([]byte, error) { - type NoMethod RouterNat +func (s *RouteList) MarshalJSON() ([]byte, error) { + type NoMethod RouteList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RouterNatSubnetworkToNat: Defines the IP ranges that want to use NAT -// for a subnetwork. -type RouterNatSubnetworkToNat struct { - // Name: URL for the subnetwork resource to use NAT. - Name string `json:"name,omitempty"` - - // SecondaryIpRangeNames: A list of the secondary ranges of the - // Subnetwork that are allowed to use NAT. This can be populated only if - // "LIST_OF_SECONDARY_IP_RANGES" is one of the values in - // source_ip_ranges_to_nat. - SecondaryIpRangeNames []string `json:"secondaryIpRangeNames,omitempty"` - - // SourceIpRangesToNat: Specify the options for NAT ranges in the - // Subnetwork. All usages of single value are valid except - // NAT_IP_RANGE_OPTION_UNSPECIFIED. The only valid option with multiple - // values is: ["PRIMARY_IP_RANGE", "LIST_OF_SECONDARY_IP_RANGES"] - // Default: [ALL_IP_RANGES] +// RouteListWarning: [Output Only] Informational warning message. +type RouteListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. // // Possible values: - // "ALL_IP_RANGES" - // "LIST_OF_SECONDARY_IP_RANGES" - // "PRIMARY_IP_RANGE" - SourceIpRangesToNat []string `json:"sourceIpRangesToNat,omitempty"` + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` - // ForceSendFields is a list of field names (e.g. "Name") to + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*RouteListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -25653,7 +27584,7 @@ type RouterNatSubnetworkToNat struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Name") to include in API + // NullFields is a list of field names (e.g. "Code") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -25662,27 +27593,27 @@ type RouterNatSubnetworkToNat struct { NullFields []string `json:"-"` } -func (s *RouterNatSubnetworkToNat) MarshalJSON() ([]byte, error) { - type NoMethod RouterNatSubnetworkToNat +func (s *RouteListWarning) MarshalJSON() ([]byte, error) { + type NoMethod RouteListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RouterStatus struct { - // BestRoutes: Best routes for this router's network. - BestRoutes []*Route `json:"bestRoutes,omitempty"` - - // BestRoutesForRouter: Best routes learned by this router. - BestRoutesForRouter []*Route `json:"bestRoutesForRouter,omitempty"` - - BgpPeerStatus []*RouterStatusBgpPeerStatus `json:"bgpPeerStatus,omitempty"` - - NatStatus []*RouterStatusNatStatus `json:"natStatus,omitempty"` +type RouteListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` - // Network: URI of the network to which this router belongs. - Network string `json:"network,omitempty"` + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` - // ForceSendFields is a list of field names (e.g. "BestRoutes") to + // ForceSendFields is a list of field names (e.g. "Key") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -25690,8 +27621,8 @@ type RouterStatus struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "BestRoutes") to include in - // API requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -25699,135 +27630,72 @@ type RouterStatus struct { NullFields []string `json:"-"` } -func (s *RouterStatus) MarshalJSON() ([]byte, error) { - type NoMethod RouterStatus +func (s *RouteListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod RouteListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RouterStatusBgpPeerStatus struct { - // AdvertisedRoutes: Routes that were advertised to the remote BGP peer - AdvertisedRoutes []*Route `json:"advertisedRoutes,omitempty"` - - // IpAddress: IP address of the local BGP interface. - IpAddress string `json:"ipAddress,omitempty"` - - // LinkedVpnTunnel: URL of the VPN tunnel that this BGP peer controls. - LinkedVpnTunnel string `json:"linkedVpnTunnel,omitempty"` - - // Name: Name of this BGP peer. Unique within the Routers resource. - Name string `json:"name,omitempty"` - - // NumLearnedRoutes: Number of routes learned from the remote BGP Peer. - NumLearnedRoutes int64 `json:"numLearnedRoutes,omitempty"` - - // PeerIpAddress: IP address of the remote BGP interface. - PeerIpAddress string `json:"peerIpAddress,omitempty"` - - // State: BGP state as specified in RFC1771. - State string `json:"state,omitempty"` - - // Status: Status of the BGP peer: {UP, DOWN} - // - // Possible values: - // "DOWN" - // "UNKNOWN" - // "UP" - Status string `json:"status,omitempty"` - - // Uptime: Time this session has been up. Format: 14 years, 51 weeks, 6 - // days, 23 hours, 59 minutes, 59 seconds - Uptime string `json:"uptime,omitempty"` +// Router: Router resource. +type Router struct { + // Bgp: BGP information specific to this router. + Bgp *RouterBgp `json:"bgp,omitempty"` - // UptimeSeconds: Time this session has been up, in seconds. Format: 145 - UptimeSeconds string `json:"uptimeSeconds,omitempty"` + // BgpPeers: BGP information that must be configured into the routing + // stack to establish BGP peering. This information must specify the + // peer ASN and either the interface name, IP address, or peer IP + // address. Please refer to RFC4273. + BgpPeers []*RouterBgpPeer `json:"bgpPeers,omitempty"` - // ForceSendFields is a list of field names (e.g. "AdvertisedRoutes") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` - // NullFields is a list of field names (e.g. "AdvertisedRoutes") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` -func (s *RouterStatusBgpPeerStatus) MarshalJSON() ([]byte, error) { - type NoMethod RouterStatusBgpPeerStatus - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` -// RouterStatusNatStatus: Status of a NAT contained in this router. -type RouterStatusNatStatus struct { - // AutoAllocatedNatIps: A list of IPs auto-allocated for NAT. Example: - // ["1.1.1.1", "129.2.16.89"] - AutoAllocatedNatIps []string `json:"autoAllocatedNatIps,omitempty"` + // Interfaces: Router interfaces. Each interface requires either one + // linked resource, (for example, linkedVpnTunnel), or IP address and IP + // address range (for example, ipRange), or both. + Interfaces []*RouterInterface `json:"interfaces,omitempty"` - // MinExtraNatIpsNeeded: The number of extra IPs to allocate. This will - // be greater than 0 only if user-specified IPs are NOT enough to allow - // all configured VMs to use NAT. This value is meaningful only when - // auto-allocation of NAT IPs is *not* used. - MinExtraNatIpsNeeded int64 `json:"minExtraNatIpsNeeded,omitempty"` + // Kind: [Output Only] Type of resource. Always compute#router for + // routers. + Kind string `json:"kind,omitempty"` - // Name: Unique name of this NAT. + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. Name string `json:"name,omitempty"` - // NumVmEndpointsWithNatMappings: Number of VM endpoints (i.e., Nics) - // that can use NAT. - NumVmEndpointsWithNatMappings int64 `json:"numVmEndpointsWithNatMappings,omitempty"` - - // UserAllocatedNatIpResources: A list of fully qualified URLs of - // reserved IP address resources. - UserAllocatedNatIpResources []string `json:"userAllocatedNatIpResources,omitempty"` - - // UserAllocatedNatIps: A list of IPs user-allocated for NAT. They will - // be raw IP strings like "179.12.26.133". - UserAllocatedNatIps []string `json:"userAllocatedNatIps,omitempty"` - - // ForceSendFields is a list of field names (e.g. "AutoAllocatedNatIps") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "AutoAllocatedNatIps") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} + // Nats: A list of NAT services created in this router. + Nats []*RouterNat `json:"nats,omitempty"` -func (s *RouterStatusNatStatus) MarshalJSON() ([]byte, error) { - type NoMethod RouterStatusNatStatus - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // Network: URI of the network to which this router belongs. + Network string `json:"network,omitempty"` -type RouterStatusResponse struct { - // Kind: Type of resource. - Kind string `json:"kind,omitempty"` + // Region: [Output Only] URI of the region where the router resides. You + // must specify this field as part of the HTTP request URL. It is not + // settable as a field in the request body. + Region string `json:"region,omitempty"` - Result *RouterStatus `json:"result,omitempty"` + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Kind") to + // ForceSendFields is a list of field names (e.g. "Bgp") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -25835,7 +27703,7 @@ type RouterStatusResponse struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Kind") to include in API + // NullFields is a list of field names (e.g. "Bgp") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -25844,21 +27712,23 @@ type RouterStatusResponse struct { NullFields []string `json:"-"` } -func (s *RouterStatusResponse) MarshalJSON() ([]byte, error) { - type NoMethod RouterStatusResponse +func (s *Router) MarshalJSON() ([]byte, error) { + type NoMethod Router raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RoutersPreviewResponse struct { - // Resource: Preview of given router. - Resource *Router `json:"resource,omitempty"` +// RouterAdvertisedIpRange: Description-tagged IP ranges for the router +// to advertise. +type RouterAdvertisedIpRange struct { + // Description: User-specified description for the IP range. + Description string `json:"description,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Range: The IP range to advertise. The value must be a CIDR-formatted + // string. + Range string `json:"range,omitempty"` - // ForceSendFields is a list of field names (e.g. "Resource") to + // ForceSendFields is a list of field names (e.g. "Description") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -25866,30 +27736,52 @@ type RoutersPreviewResponse struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Resource") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Description") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *RoutersPreviewResponse) MarshalJSON() ([]byte, error) { - type NoMethod RoutersPreviewResponse +func (s *RouterAdvertisedIpRange) MarshalJSON() ([]byte, error) { + type NoMethod RouterAdvertisedIpRange raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RoutersScopedList struct { - // Routers: A list of routers contained in this scope. - Routers []*Router `json:"routers,omitempty"` +// RouterAggregatedList: Contains a list of routers. +type RouterAggregatedList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` - // Warning: Informational warning which replaces the list of routers - // when the list is empty. - Warning *RoutersScopedListWarning `json:"warning,omitempty"` + // Items: A list of Router resources. + Items map[string]RoutersScopedList `json:"items,omitempty"` - // ForceSendFields is a list of field names (e.g. "Routers") to + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *RouterAggregatedListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -25897,8 +27789,8 @@ type RoutersScopedList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Routers") to include in - // API requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -25906,15 +27798,15 @@ type RoutersScopedList struct { NullFields []string `json:"-"` } -func (s *RoutersScopedList) MarshalJSON() ([]byte, error) { - type NoMethod RoutersScopedList +func (s *RouterAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod RouterAggregatedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RoutersScopedListWarning: Informational warning which replaces the -// list of routers when the list is empty. -type RoutersScopedListWarning struct { +// RouterAggregatedListWarning: [Output Only] Informational warning +// message. +type RouterAggregatedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -25948,7 +27840,7 @@ type RoutersScopedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*RoutersScopedListWarningData `json:"data,omitempty"` + Data []*RouterAggregatedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -25971,13 +27863,13 @@ type RoutersScopedListWarning struct { NullFields []string `json:"-"` } -func (s *RoutersScopedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod RoutersScopedListWarning +func (s *RouterAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod RouterAggregatedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RoutersScopedListWarningData struct { +type RouterAggregatedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -26008,51 +27900,46 @@ type RoutersScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *RoutersScopedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod RoutersScopedListWarningData +func (s *RouterAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod RouterAggregatedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Rule: A rule to be applied in a Policy. -type Rule struct { - // Action: Required +type RouterBgp struct { + // AdvertiseMode: User-specified flag to indicate which mode to use for + // advertisement. The options are DEFAULT or CUSTOM. // // Possible values: - // "ALLOW" - // "ALLOW_WITH_LOG" - // "DENY" - // "DENY_WITH_LOG" - // "LOG" - // "NO_ACTION" - Action string `json:"action,omitempty"` - - // Conditions: Additional restrictions that must be met. All conditions - // must pass for the rule to match. - Conditions []*Condition `json:"conditions,omitempty"` - - // Description: Human-readable description of the rule. - Description string `json:"description,omitempty"` - - // Ins: If one or more 'in' clauses are specified, the rule matches if - // the PRINCIPAL/AUTHORITY_SELECTOR is in at least one of these entries. - Ins []string `json:"ins,omitempty"` + // "CUSTOM" + // "DEFAULT" + AdvertiseMode string `json:"advertiseMode,omitempty"` - // LogConfigs: The config returned to callers of - // tech.iam.IAM.CheckPolicy for any entries that match the LOG action. - LogConfigs []*LogConfig `json:"logConfigs,omitempty"` + // AdvertisedGroups: User-specified list of prefix groups to advertise + // in custom mode. This field can only be populated if advertise_mode is + // CUSTOM and is advertised to all peers of the router. These groups + // will be advertised in addition to any specified prefixes. Leave this + // field blank to advertise no custom groups. + // + // Possible values: + // "ALL_SUBNETS" + AdvertisedGroups []string `json:"advertisedGroups,omitempty"` - // NotIns: If one or more 'not_in' clauses are specified, the rule - // matches if the PRINCIPAL/AUTHORITY_SELECTOR is in none of the - // entries. - NotIns []string `json:"notIns,omitempty"` + // AdvertisedIpRanges: User-specified list of individual IP ranges to + // advertise in custom mode. This field can only be populated if + // advertise_mode is CUSTOM and is advertised to all peers of the + // router. These IP ranges will be advertised in addition to any + // specified groups. Leave this field blank to advertise no custom IP + // ranges. + AdvertisedIpRanges []*RouterAdvertisedIpRange `json:"advertisedIpRanges,omitempty"` - // Permissions: A permission is a string of form '..' (e.g., - // 'storage.buckets.list'). A value of '*' matches all permissions, and - // a verb part of '*' (e.g., 'storage.buckets.*') matches all verbs. - Permissions []string `json:"permissions,omitempty"` + // Asn: Local BGP Autonomous System Number (ASN). Must be an RFC6996 + // private ASN, either 16-bit or 32-bit. The value will be fixed for + // this router resource. All VPN tunnels that link to this router will + // have the same local ASN. + Asn int64 `json:"asn,omitempty"` - // ForceSendFields is a list of field names (e.g. "Action") to + // ForceSendFields is a list of field names (e.g. "AdvertiseMode") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -26060,78 +27947,93 @@ type Rule struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Action") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "AdvertiseMode") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *Rule) MarshalJSON() ([]byte, error) { - type NoMethod Rule +func (s *RouterBgp) MarshalJSON() ([]byte, error) { + type NoMethod RouterBgp raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SSLHealthCheck struct { - // Port: The TCP port number for the health check request. The default - // value is 443. Valid values are 1 through 65535. - Port int64 `json:"port,omitempty"` - - // PortName: Port name as defined in InstanceGroup#NamedPort#name. If - // both port and port_name are defined, port takes precedence. - PortName string `json:"portName,omitempty"` - - // PortSpecification: Specifies how port is selected for health - // checking, can be one of following values: - // USE_FIXED_PORT: The port number in - // port - // is used for health checking. - // USE_NAMED_PORT: The - // portName - // is used for health checking. - // USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for - // each network endpoint is used for health checking. For other - // backends, the port or named port specified in the Backend Service is - // used for health checking. - // +type RouterBgpPeer struct { + // AdvertiseMode: User-specified flag to indicate which mode to use for + // advertisement. // - // If not specified, SSL health check follows behavior specified - // in - // port - // and - // portName - // fields. + // Possible values: + // "CUSTOM" + // "DEFAULT" + AdvertiseMode string `json:"advertiseMode,omitempty"` + + // AdvertisedGroups: User-specified list of prefix groups to advertise + // in custom mode, which can take one of the following options: + // - ALL_SUBNETS: Advertises all available subnets, including peer VPC + // subnets. + // - ALL_VPC_SUBNETS: Advertises the router's own VPC subnets. + // - ALL_PEER_VPC_SUBNETS: Advertises peer subnets of the router's VPC + // network. Note that this field can only be populated if advertise_mode + // is CUSTOM and overrides the list defined for the router (in the "bgp" + // message). These groups are advertised in addition to any specified + // prefixes. Leave this field blank to advertise no custom groups. // // Possible values: - // "USE_FIXED_PORT" - // "USE_NAMED_PORT" - // "USE_SERVING_PORT" - PortSpecification string `json:"portSpecification,omitempty"` + // "ALL_SUBNETS" + AdvertisedGroups []string `json:"advertisedGroups,omitempty"` - // ProxyHeader: Specifies the type of proxy header to append before - // sending data to the backend, either NONE or PROXY_V1. The default is - // NONE. + // AdvertisedIpRanges: User-specified list of individual IP ranges to + // advertise in custom mode. This field can only be populated if + // advertise_mode is CUSTOM and overrides the list defined for the + // router (in the "bgp" message). These IP ranges are advertised in + // addition to any specified groups. Leave this field blank to advertise + // no custom IP ranges. + AdvertisedIpRanges []*RouterAdvertisedIpRange `json:"advertisedIpRanges,omitempty"` + + // AdvertisedRoutePriority: The priority of routes advertised to this + // BGP peer. Where there is more than one matching route of maximum + // length, the routes with the lowest priority value win. + AdvertisedRoutePriority int64 `json:"advertisedRoutePriority,omitempty"` + + // InterfaceName: Name of the interface the BGP peer is associated with. + InterfaceName string `json:"interfaceName,omitempty"` + + // IpAddress: IP address of the interface inside Google Cloud Platform. + // Only IPv4 is supported. + IpAddress string `json:"ipAddress,omitempty"` + + // ManagementType: [Output Only] The resource that configures and + // manages this BGP peer. + // - MANAGED_BY_USER is the default value and can be managed by you or + // other users + // - MANAGED_BY_ATTACHMENT is a BGP peer that is configured and managed + // by Cloud Interconnect, specifically by an InterconnectAttachment of + // type PARTNER. Google automatically creates, updates, and deletes this + // type of BGP peer when the PARTNER InterconnectAttachment is created, + // updated, or deleted. // // Possible values: - // "NONE" - // "PROXY_V1" - ProxyHeader string `json:"proxyHeader,omitempty"` + // "MANAGED_BY_ATTACHMENT" + // "MANAGED_BY_USER" + ManagementType string `json:"managementType,omitempty"` - // Request: The application data to send once the SSL connection has - // been established (default value is empty). If both request and - // response are empty, the connection establishment alone will indicate - // health. The request data can only be ASCII. - Request string `json:"request,omitempty"` + // Name: Name of this BGP peer. The name must be 1-63 characters long + // and comply with RFC1035. + Name string `json:"name,omitempty"` - // Response: The bytes to match against the beginning of the response - // data. If left empty (the default value), any response will indicate - // health. The response data can only be ASCII. - Response string `json:"response,omitempty"` + // PeerAsn: Peer BGP Autonomous System Number (ASN). Each BGP interface + // may use a different value. + PeerAsn int64 `json:"peerAsn,omitempty"` - // ForceSendFields is a list of field names (e.g. "Port") to + // PeerIpAddress: IP address of the BGP interface outside Google Cloud + // Platform. Only IPv4 is supported. + PeerIpAddress string `json:"peerIpAddress,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AdvertiseMode") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -26139,53 +28041,61 @@ type SSLHealthCheck struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Port") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "AdvertiseMode") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *SSLHealthCheck) MarshalJSON() ([]byte, error) { - type NoMethod SSLHealthCheck +func (s *RouterBgpPeer) MarshalJSON() ([]byte, error) { + type NoMethod RouterBgpPeer raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Scheduling: Sets the scheduling options for an Instance. -type Scheduling struct { - // AutomaticRestart: Specifies whether the instance should be - // automatically restarted if it is terminated by Compute Engine (not - // terminated by a user). You can only set the automatic restart option - // for standard instances. Preemptible instances cannot be automatically - // restarted. - // - // By default, this is set to true so an instance is automatically - // restarted if it is terminated by Compute Engine. - AutomaticRestart *bool `json:"automaticRestart,omitempty"` +type RouterInterface struct { + // IpRange: IP address and range of the interface. The IP range must be + // in the RFC3927 link-local IP address space. The value must be a + // CIDR-formatted string, for example: 169.254.0.1/30. NOTE: Do not + // truncate the address as it represents the IP address of the + // interface. + IpRange string `json:"ipRange,omitempty"` - // NodeAffinities: A set of node affinity and anti-affinity. - NodeAffinities []*SchedulingNodeAffinity `json:"nodeAffinities,omitempty"` + // LinkedInterconnectAttachment: URI of the linked Interconnect + // attachment. It must be in the same region as the router. Each + // interface can have one linked resource, which can be either be a VPN + // tunnel or an Interconnect attachment. + LinkedInterconnectAttachment string `json:"linkedInterconnectAttachment,omitempty"` - // OnHostMaintenance: Defines the maintenance behavior for this - // instance. For standard instances, the default behavior is MIGRATE. - // For preemptible instances, the default and only possible behavior is - // TERMINATE. For more information, see Setting Instance Scheduling - // Options. + // LinkedVpnTunnel: URI of the linked VPN tunnel, which must be in the + // same region as the router. Each interface can have one linked + // resource, which can be either a VPN tunnel or an Interconnect + // attachment. + LinkedVpnTunnel string `json:"linkedVpnTunnel,omitempty"` + + // ManagementType: [Output Only] The resource that configures and + // manages this interface. + // - MANAGED_BY_USER is the default value and can be managed directly by + // users. + // - MANAGED_BY_ATTACHMENT is an interface that is configured and + // managed by Cloud Interconnect, specifically, by an + // InterconnectAttachment of type PARTNER. Google automatically creates, + // updates, and deletes this type of interface when the PARTNER + // InterconnectAttachment is created, updated, or deleted. // // Possible values: - // "MIGRATE" - // "TERMINATE" - OnHostMaintenance string `json:"onHostMaintenance,omitempty"` + // "MANAGED_BY_ATTACHMENT" + // "MANAGED_BY_USER" + ManagementType string `json:"managementType,omitempty"` - // Preemptible: Defines whether the instance is preemptible. This can - // only be set during instance creation, it cannot be set or changed - // after the instance has been created. - Preemptible bool `json:"preemptible,omitempty"` + // Name: Name of this interface entry. The name must be 1-63 characters + // long and comply with RFC1035. + Name string `json:"name,omitempty"` - // ForceSendFields is a list of field names (e.g. "AutomaticRestart") to + // ForceSendFields is a list of field names (e.g. "IpRange") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -26193,225 +28103,32 @@ type Scheduling struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AutomaticRestart") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "IpRange") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *Scheduling) MarshalJSON() ([]byte, error) { - type NoMethod Scheduling +func (s *RouterInterface) MarshalJSON() ([]byte, error) { + type NoMethod RouterInterface raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SchedulingNodeAffinity: Node Affinity: the configuration of desired -// nodes onto which this Instance could be scheduled. -type SchedulingNodeAffinity struct { - // Key: Corresponds to the label key of Node resource. - Key string `json:"key,omitempty"` - - // Operator: Defines the operation of node selection. - // - // Possible values: - // "IN" - // "NOT_IN" - // "OPERATOR_UNSPECIFIED" - Operator string `json:"operator,omitempty"` - - // Values: Corresponds to the label values of Node resource. - Values []string `json:"values,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Key") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Key") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *SchedulingNodeAffinity) MarshalJSON() ([]byte, error) { - type NoMethod SchedulingNodeAffinity - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type SecurityPoliciesListPreconfiguredExpressionSetsResponse struct { - PreconfiguredExpressionSets *SecurityPoliciesWafConfig `json:"preconfiguredExpressionSets,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. - // "PreconfiguredExpressionSets") to unconditionally include in API - // requests. By default, fields with empty values are omitted from API - // requests. However, any non-pointer, non-interface field appearing in - // ForceSendFields will be sent to the server regardless of whether the - // field is empty or not. This may be used to include empty fields in - // Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. - // "PreconfiguredExpressionSets") to include in API requests with the - // JSON null value. By default, fields with empty values are omitted - // from API requests. However, any field with an empty value appearing - // in NullFields will be sent to the server as null. It is an error if a - // field in this list has a non-empty value. This may be used to include - // null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *SecurityPoliciesListPreconfiguredExpressionSetsResponse) MarshalJSON() ([]byte, error) { - type NoMethod SecurityPoliciesListPreconfiguredExpressionSetsResponse - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type SecurityPoliciesWafConfig struct { - WafRules *PreconfiguredWafSet `json:"wafRules,omitempty"` - - // ForceSendFields is a list of field names (e.g. "WafRules") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "WafRules") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *SecurityPoliciesWafConfig) MarshalJSON() ([]byte, error) { - type NoMethod SecurityPoliciesWafConfig - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// SecurityPolicy: A security policy is comprised of one or more rules. -// It can also be associated with one or more 'targets'. (== -// resource_for v1.securityPolicies ==) (== resource_for -// beta.securityPolicies ==) -type SecurityPolicy struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // Fingerprint: Specifies a fingerprint for this resource, which is - // essentially a hash of the metadata's contents and used for optimistic - // locking. The fingerprint is initially generated by Compute Engine and - // changes after every request to modify or update metadata. You must - // always provide an up-to-date fingerprint hash in order to update or - // change metadata, otherwise the request will fail with error 412 - // conditionNotMet. - // - // To see the latest fingerprint, make get() request to the security - // policy. - Fingerprint string `json:"fingerprint,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output only] Type of the resource. Always - // compute#securityPolicyfor security policies - Kind string `json:"kind,omitempty"` - - // LabelFingerprint: A fingerprint for the labels being applied to this - // security policy, which is essentially a hash of the labels set used - // for optimistic locking. The fingerprint is initially generated by - // Compute Engine and changes after every request to modify or update - // labels. You must always provide an up-to-date fingerprint hash in - // order to update or change labels. - // - // To see the latest fingerprint, make get() request to the security - // policy. - LabelFingerprint string `json:"labelFingerprint,omitempty"` - - // Labels: Labels to apply to this security policy resource. These can - // be later modified by the setLabels method. Each label key/value must - // comply with RFC1035. Label values may be empty. - Labels map[string]string `json:"labels,omitempty"` - - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // Rules: A list of rules that belong to this policy. There must always - // be a default rule (rule with priority 2147483647 and match "*"). If - // no rules are provided when creating a security policy, a default rule - // with action "allow" will be added. - Rules []*SecurityPolicyRule `json:"rules,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *SecurityPolicy) MarshalJSON() ([]byte, error) { - type NoMethod SecurityPolicy - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type SecurityPolicyList struct { +// RouterList: Contains a list of Router resources. +type RouterList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of SecurityPolicy resources. - Items []*SecurityPolicy `json:"items,omitempty"` + // Items: A list of Router resources. + Items []*Router `json:"items,omitempty"` - // Kind: [Output Only] Type of resource. Always - // compute#securityPolicyList for listsof securityPolicies + // Kind: [Output Only] Type of resource. Always compute#router for + // routers. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -26422,8 +28139,11 @@ type SecurityPolicyList struct { // the results. NextPageToken string `json:"nextPageToken,omitempty"` + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. - Warning *SecurityPolicyListWarning `json:"warning,omitempty"` + Warning *RouterListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -26446,15 +28166,14 @@ type SecurityPolicyList struct { NullFields []string `json:"-"` } -func (s *SecurityPolicyList) MarshalJSON() ([]byte, error) { - type NoMethod SecurityPolicyList +func (s *RouterList) MarshalJSON() ([]byte, error) { + type NoMethod RouterList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SecurityPolicyListWarning: [Output Only] Informational warning -// message. -type SecurityPolicyListWarning struct { +// RouterListWarning: [Output Only] Informational warning message. +type RouterListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -26488,7 +28207,7 @@ type SecurityPolicyListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*SecurityPolicyListWarningData `json:"data,omitempty"` + Data []*RouterListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -26511,13 +28230,13 @@ type SecurityPolicyListWarning struct { NullFields []string `json:"-"` } -func (s *SecurityPolicyListWarning) MarshalJSON() ([]byte, error) { - type NoMethod SecurityPolicyListWarning +func (s *RouterListWarning) MarshalJSON() ([]byte, error) { + type NoMethod RouterListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SecurityPolicyListWarningData struct { +type RouterListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -26548,24 +28267,101 @@ type SecurityPolicyListWarningData struct { NullFields []string `json:"-"` } -func (s *SecurityPolicyListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod SecurityPolicyListWarningData +func (s *RouterListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod RouterListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SecurityPolicyReference struct { - SecurityPolicy string `json:"securityPolicy,omitempty"` +// RouterNat: Represents a Nat resource. It enables the VMs within the +// specified subnetworks to access Internet without external IP +// addresses. It specifies a list of subnetworks (and the ranges within) +// that want to use NAT. Customers can also provide the external IPs +// that would be used for NAT. GCP would auto-allocate ephemeral IPs if +// no external IPs are provided. +type RouterNat struct { + // IcmpIdleTimeoutSec: Timeout (in seconds) for ICMP connections. + // Defaults to 30s if not set. + IcmpIdleTimeoutSec int64 `json:"icmpIdleTimeoutSec,omitempty"` - // ForceSendFields is a list of field names (e.g. "SecurityPolicy") to - // unconditionally include in API requests. By default, fields with + // LogConfig: Configure logging on this NAT. + LogConfig *RouterNatLogConfig `json:"logConfig,omitempty"` + + // MinPortsPerVm: Minimum number of ports allocated to a VM from this + // NAT config. If not set, a default number of ports is allocated to a + // VM. This is rounded up to the nearest power of 2. For example, if the + // value of this field is 50, at least 64 ports are allocated to a VM. + MinPortsPerVm int64 `json:"minPortsPerVm,omitempty"` + + // Name: Unique name of this Nat service. The name must be 1-63 + // characters long and comply with RFC1035. + Name string `json:"name,omitempty"` + + // NatIpAllocateOption: Specify the NatIpAllocateOption, which can take + // one of the following values: + // - MANUAL_ONLY: Uses only Nat IP addresses provided by customers. When + // there are not enough specified Nat IPs, the Nat service fails for new + // VMs. + // - AUTO_ONLY: Nat IPs are allocated by Google Cloud Platform; + // customers can't specify any Nat IPs. When choosing AUTO_ONLY, then + // nat_ip should be empty. + // + // Possible values: + // "AUTO_ONLY" + // "MANUAL_ONLY" + NatIpAllocateOption string `json:"natIpAllocateOption,omitempty"` + + // NatIps: A list of URLs of the IP resources used for this Nat service. + // These IP addresses must be valid static external IP addresses + // assigned to the project. + NatIps []string `json:"natIps,omitempty"` + + // SourceSubnetworkIpRangesToNat: Specify the Nat option, which can take + // one of the following values: + // - ALL_SUBNETWORKS_ALL_IP_RANGES: All of the IP ranges in every + // Subnetwork are allowed to Nat. + // - ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES: All of the primary IP ranges + // in every Subnetwork are allowed to Nat. + // - LIST_OF_SUBNETWORKS: A list of Subnetworks are allowed to Nat + // (specified in the field subnetwork below) The default is + // SUBNETWORK_IP_RANGE_TO_NAT_OPTION_UNSPECIFIED. Note that if this + // field contains ALL_SUBNETWORKS_ALL_IP_RANGES or + // ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES, then there should not be any + // other Router.Nat section in any Router for this network in this + // region. + // + // Possible values: + // "ALL_SUBNETWORKS_ALL_IP_RANGES" + // "ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES" + // "LIST_OF_SUBNETWORKS" + SourceSubnetworkIpRangesToNat string `json:"sourceSubnetworkIpRangesToNat,omitempty"` + + // Subnetworks: A list of Subnetwork resources whose traffic should be + // translated by NAT Gateway. It is used only when LIST_OF_SUBNETWORKS + // is selected for the SubnetworkIpRangeToNatOption above. + Subnetworks []*RouterNatSubnetworkToNat `json:"subnetworks,omitempty"` + + // TcpEstablishedIdleTimeoutSec: Timeout (in seconds) for TCP + // established connections. Defaults to 1200s if not set. + TcpEstablishedIdleTimeoutSec int64 `json:"tcpEstablishedIdleTimeoutSec,omitempty"` + + // TcpTransitoryIdleTimeoutSec: Timeout (in seconds) for TCP transitory + // connections. Defaults to 30s if not set. + TcpTransitoryIdleTimeoutSec int64 `json:"tcpTransitoryIdleTimeoutSec,omitempty"` + + // UdpIdleTimeoutSec: Timeout (in seconds) for UDP connections. Defaults + // to 30s if not set. + UdpIdleTimeoutSec int64 `json:"udpIdleTimeoutSec,omitempty"` + + // ForceSendFields is a list of field names (e.g. "IcmpIdleTimeoutSec") + // to unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "SecurityPolicy") to + // NullFields is a list of field names (e.g. "IcmpIdleTimeoutSec") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -26575,47 +28371,29 @@ type SecurityPolicyReference struct { NullFields []string `json:"-"` } -func (s *SecurityPolicyReference) MarshalJSON() ([]byte, error) { - type NoMethod SecurityPolicyReference +func (s *RouterNat) MarshalJSON() ([]byte, error) { + type NoMethod RouterNat raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SecurityPolicyRule: Represents a rule that describes one or more -// match conditions along with the action to be taken when traffic -// matches this condition (allow or deny). -type SecurityPolicyRule struct { - // Action: The Action to preform when the client connection triggers the - // rule. Can currently be either "allow" or "deny()" where valid values - // for status are 403, 404, and 502. - Action string `json:"action,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // Kind: [Output only] Type of the resource. Always - // compute#securityPolicyRule for security policy rules - Kind string `json:"kind,omitempty"` - - // Match: A match condition that incoming traffic is evaluated against. - // If it evaluates to true, the corresponding ?action? is enforced. - Match *SecurityPolicyRuleMatcher `json:"match,omitempty"` - - // Preview: If set to true, the specified action is not enforced. - Preview bool `json:"preview,omitempty"` - - // Priority: An integer indicating the priority of a rule in the list. - // The priority must be a positive value between 0 and 2147483647. Rules - // are evaluated from highest to lowest priority where 0 is the highest - // priority and 2147483647 is the lowest prority. - Priority int64 `json:"priority,omitempty"` +// RouterNatLogConfig: Configuration of logging on a NAT. +type RouterNatLogConfig struct { + // Enable: Indicates whether or not to export logs. This is false by + // default. + Enable bool `json:"enable,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Filter: Specifies the desired filtering of logs on this NAT. If + // unspecified, logs are exported for all connections handled by this + // NAT. + // + // Possible values: + // "ALL" + // "ERRORS_ONLY" + // "TRANSLATIONS_ONLY" + Filter string `json:"filter,omitempty"` - // ForceSendFields is a list of field names (e.g. "Action") to + // ForceSendFields is a list of field names (e.g. "Enable") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -26623,7 +28401,7 @@ type SecurityPolicyRule struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Action") to include in API + // NullFields is a list of field names (e.g. "Enable") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -26632,35 +28410,37 @@ type SecurityPolicyRule struct { NullFields []string `json:"-"` } -func (s *SecurityPolicyRule) MarshalJSON() ([]byte, error) { - type NoMethod SecurityPolicyRule +func (s *RouterNatLogConfig) MarshalJSON() ([]byte, error) { + type NoMethod RouterNatLogConfig raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SecurityPolicyRuleMatcher: Represents a match condition that incoming -// traffic is evaluated against. Exactly one field must be specified. -type SecurityPolicyRuleMatcher struct { - // Config: The configuration options available when specifying - // versioned_expr. This field must be specified if versioned_expr is - // specified and cannot be specified if versioned_expr is not specified. - Config *SecurityPolicyRuleMatcherConfig `json:"config,omitempty"` +// RouterNatSubnetworkToNat: Defines the IP ranges that want to use NAT +// for a subnetwork. +type RouterNatSubnetworkToNat struct { + // Name: URL for the subnetwork resource that will use NAT. + Name string `json:"name,omitempty"` - // Expr: User defined CEVAL expression. A CEVAL expression is used to - // specify match criteria such as origin.ip, source.region_code and - // contents in the request header. - Expr *Expr `json:"expr,omitempty"` + // SecondaryIpRangeNames: A list of the secondary ranges of the + // Subnetwork that are allowed to use NAT. This can be populated only if + // "LIST_OF_SECONDARY_IP_RANGES" is one of the values in + // source_ip_ranges_to_nat. + SecondaryIpRangeNames []string `json:"secondaryIpRangeNames,omitempty"` - // VersionedExpr: Preconfigured versioned expression. If this field is - // specified, config must also be specified. Available preconfigured - // expressions along with their requirements are: SRC_IPS_V1 - must - // specify the corresponding src_ip_range field in config. + // SourceIpRangesToNat: Specify the options for NAT ranges in the + // Subnetwork. All options of a single value are valid except + // NAT_IP_RANGE_OPTION_UNSPECIFIED. The only valid option with multiple + // values is: ["PRIMARY_IP_RANGE", "LIST_OF_SECONDARY_IP_RANGES"] + // Default: [ALL_IP_RANGES] // // Possible values: - // "SRC_IPS_V1" - VersionedExpr string `json:"versionedExpr,omitempty"` + // "ALL_IP_RANGES" + // "LIST_OF_SECONDARY_IP_RANGES" + // "PRIMARY_IP_RANGE" + SourceIpRangesToNat []string `json:"sourceIpRangesToNat,omitempty"` - // ForceSendFields is a list of field names (e.g. "Config") to + // ForceSendFields is a list of field names (e.g. "Name") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -26668,7 +28448,7 @@ type SecurityPolicyRuleMatcher struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Config") to include in API + // NullFields is a list of field names (e.g. "Name") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -26677,17 +28457,27 @@ type SecurityPolicyRuleMatcher struct { NullFields []string `json:"-"` } -func (s *SecurityPolicyRuleMatcher) MarshalJSON() ([]byte, error) { - type NoMethod SecurityPolicyRuleMatcher +func (s *RouterNatSubnetworkToNat) MarshalJSON() ([]byte, error) { + type NoMethod RouterNatSubnetworkToNat raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SecurityPolicyRuleMatcherConfig struct { - // SrcIpRanges: CIDR IP address range. - SrcIpRanges []string `json:"srcIpRanges,omitempty"` +type RouterStatus struct { + // BestRoutes: Best routes for this router's network. + BestRoutes []*Route `json:"bestRoutes,omitempty"` - // ForceSendFields is a list of field names (e.g. "SrcIpRanges") to + // BestRoutesForRouter: Best routes learned by this router. + BestRoutesForRouter []*Route `json:"bestRoutesForRouter,omitempty"` + + BgpPeerStatus []*RouterStatusBgpPeerStatus `json:"bgpPeerStatus,omitempty"` + + NatStatus []*RouterStatusNatStatus `json:"natStatus,omitempty"` + + // Network: URI of the network to which this router belongs. + Network string `json:"network,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BestRoutes") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -26695,50 +28485,59 @@ type SecurityPolicyRuleMatcherConfig struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "SrcIpRanges") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "BestRoutes") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *SecurityPolicyRuleMatcherConfig) MarshalJSON() ([]byte, error) { - type NoMethod SecurityPolicyRuleMatcherConfig +func (s *RouterStatus) MarshalJSON() ([]byte, error) { + type NoMethod RouterStatus raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SerialPortOutput: An instance's serial console output. -type SerialPortOutput struct { - // Contents: [Output Only] The contents of the console output. - Contents string `json:"contents,omitempty"` +type RouterStatusBgpPeerStatus struct { + // AdvertisedRoutes: Routes that were advertised to the remote BGP peer + AdvertisedRoutes []*Route `json:"advertisedRoutes,omitempty"` - // Kind: [Output Only] Type of the resource. Always - // compute#serialPortOutput for serial port output. - Kind string `json:"kind,omitempty"` + // IpAddress: IP address of the local BGP interface. + IpAddress string `json:"ipAddress,omitempty"` - // Next: [Output Only] The position of the next byte of content from the - // serial console output. Use this value in the next request as the - // start parameter. - Next int64 `json:"next,omitempty,string"` + // LinkedVpnTunnel: URL of the VPN tunnel that this BGP peer controls. + LinkedVpnTunnel string `json:"linkedVpnTunnel,omitempty"` - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` + // Name: Name of this BGP peer. Unique within the Routers resource. + Name string `json:"name,omitempty"` - // Start: The starting byte position of the output that was returned. - // This should match the start parameter sent with the request. If the - // serial console output exceeds the size of the buffer, older output - // will be overwritten by newer content and the start values will be - // mismatched. - Start int64 `json:"start,omitempty,string"` + // NumLearnedRoutes: Number of routes learned from the remote BGP Peer. + NumLearnedRoutes int64 `json:"numLearnedRoutes,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // PeerIpAddress: IP address of the remote BGP interface. + PeerIpAddress string `json:"peerIpAddress,omitempty"` - // ForceSendFields is a list of field names (e.g. "Contents") to + // State: BGP state as specified in RFC1771. + State string `json:"state,omitempty"` + + // Status: Status of the BGP peer: {UP, DOWN} + // + // Possible values: + // "DOWN" + // "UNKNOWN" + // "UP" + Status string `json:"status,omitempty"` + + // Uptime: Time this session has been up. Format: 14 years, 51 weeks, 6 + // days, 23 hours, 59 minutes, 59 seconds + Uptime string `json:"uptime,omitempty"` + + // UptimeSeconds: Time this session has been up, in seconds. Format: 145 + UptimeSeconds string `json:"uptimeSeconds,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AdvertisedRoutes") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -26746,179 +28545,59 @@ type SerialPortOutput struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Contents") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "AdvertisedRoutes") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *SerialPortOutput) MarshalJSON() ([]byte, error) { - type NoMethod SerialPortOutput +func (s *RouterStatusBgpPeerStatus) MarshalJSON() ([]byte, error) { + type NoMethod RouterStatusBgpPeerStatus raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ServiceAccount: A service account. -type ServiceAccount struct { - // Email: Email address of the service account. - Email string `json:"email,omitempty"` +// RouterStatusNatStatus: Status of a NAT contained in this router. Next +// tag: 9 +type RouterStatusNatStatus struct { + // AutoAllocatedNatIps: A list of IPs auto-allocated for NAT. Example: + // ["1.1.1.1", "129.2.16.89"] + AutoAllocatedNatIps []string `json:"autoAllocatedNatIps,omitempty"` - // Scopes: The list of scopes to be made available for this service - // account. - Scopes []string `json:"scopes,omitempty"` + // MinExtraNatIpsNeeded: The number of extra IPs to allocate. This will + // be greater than 0 only if user-specified IPs are NOT enough to allow + // all configured VMs to use NAT. This value is meaningful only when + // auto-allocation of NAT IPs is *not* used. + MinExtraNatIpsNeeded int64 `json:"minExtraNatIpsNeeded,omitempty"` - // ForceSendFields is a list of field names (e.g. "Email") to - // unconditionally include in API requests. By default, fields with + // Name: Unique name of this NAT. + Name string `json:"name,omitempty"` + + // NumVmEndpointsWithNatMappings: Number of VM endpoints (i.e., Nics) + // that can use NAT. + NumVmEndpointsWithNatMappings int64 `json:"numVmEndpointsWithNatMappings,omitempty"` + + // UserAllocatedNatIpResources: A list of fully qualified URLs of + // reserved IP address resources. + UserAllocatedNatIpResources []string `json:"userAllocatedNatIpResources,omitempty"` + + // UserAllocatedNatIps: A list of IPs user-allocated for NAT. They will + // be raw IP strings like "179.12.26.133". + UserAllocatedNatIps []string `json:"userAllocatedNatIps,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AutoAllocatedNatIps") + // to unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Email") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ServiceAccount) MarshalJSON() ([]byte, error) { - type NoMethod ServiceAccount - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ShieldedVmConfig: A set of Shielded VM options. -type ShieldedVmConfig struct { - // EnableIntegrityMonitoring: Defines whether the instance has integrity - // monitoring enabled. - EnableIntegrityMonitoring bool `json:"enableIntegrityMonitoring,omitempty"` - - // EnableSecureBoot: Defines whether the instance has Secure Boot - // enabled. - EnableSecureBoot bool `json:"enableSecureBoot,omitempty"` - - // EnableVtpm: Defines whether the instance has the vTPM enabled. - EnableVtpm bool `json:"enableVtpm,omitempty"` - - // ForceSendFields is a list of field names (e.g. - // "EnableIntegrityMonitoring") to unconditionally include in API - // requests. By default, fields with empty values are omitted from API - // requests. However, any non-pointer, non-interface field appearing in - // ForceSendFields will be sent to the server regardless of whether the - // field is empty or not. This may be used to include empty fields in - // Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. - // "EnableIntegrityMonitoring") to include in API requests with the JSON - // null value. By default, fields with empty values are omitted from API - // requests. However, any field with an empty value appearing in - // NullFields will be sent to the server as null. It is an error if a - // field in this list has a non-empty value. This may be used to include - // null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ShieldedVmConfig) MarshalJSON() ([]byte, error) { - type NoMethod ShieldedVmConfig - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ShieldedVmIdentity: A shielded VM identity entry. -type ShieldedVmIdentity struct { - // EncryptionKey: An Endorsement Key (EK) issued to the Shielded VM's - // vTPM. - EncryptionKey *ShieldedVmIdentityEntry `json:"encryptionKey,omitempty"` - - // Kind: [Output Only] Type of the resource. Always - // compute#shieldedVmIdentity for shielded VM identity entry. - Kind string `json:"kind,omitempty"` - - // SigningKey: An Attestation Key (AK) issued to the Shielded VM's vTPM. - SigningKey *ShieldedVmIdentityEntry `json:"signingKey,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "EncryptionKey") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "EncryptionKey") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ShieldedVmIdentity) MarshalJSON() ([]byte, error) { - type NoMethod ShieldedVmIdentity - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ShieldedVmIdentityEntry: A Shielded VM Identity Entry. -type ShieldedVmIdentityEntry struct { - // EkCert: A PEM-encoded X.509 certificate. This field can be empty. - EkCert string `json:"ekCert,omitempty"` - - // EkPub: A PEM-encoded public key. - EkPub string `json:"ekPub,omitempty"` - - // ForceSendFields is a list of field names (e.g. "EkCert") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "EkCert") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ShieldedVmIdentityEntry) MarshalJSON() ([]byte, error) { - type NoMethod ShieldedVmIdentityEntry - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ShieldedVmIntegrityPolicy: The policy describes the baseline against -// which VM instance boot integrity is measured. -type ShieldedVmIntegrityPolicy struct { - // UpdateAutoLearnPolicy: Updates the integrity policy baseline using - // the measurements from the VM instance's most recent boot. - UpdateAutoLearnPolicy bool `json:"updateAutoLearnPolicy,omitempty"` - - // ForceSendFields is a list of field names (e.g. - // "UpdateAutoLearnPolicy") to unconditionally include in API requests. - // By default, fields with empty values are omitted from API requests. - // However, any non-pointer, non-interface field appearing in - // ForceSendFields will be sent to the server regardless of whether the - // field is empty or not. This may be used to include empty fields in - // Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "UpdateAutoLearnPolicy") to + // NullFields is a list of field names (e.g. "AutoAllocatedNatIps") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -26928,28 +28607,23 @@ type ShieldedVmIntegrityPolicy struct { NullFields []string `json:"-"` } -func (s *ShieldedVmIntegrityPolicy) MarshalJSON() ([]byte, error) { - type NoMethod ShieldedVmIntegrityPolicy +func (s *RouterStatusNatStatus) MarshalJSON() ([]byte, error) { + type NoMethod RouterStatusNatStatus raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SignedUrlKey: Represents a customer-supplied Signing Key used by -// Cloud CDN Signed URLs -type SignedUrlKey struct { - // KeyName: Name of the key. The name must be 1-63 characters long, and - // comply with RFC1035. Specifically, the name must be 1-63 characters - // long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` - // which means the first character must be a lowercase letter, and all - // following characters must be a dash, lowercase letter, or digit, - // except the last character, which cannot be a dash. - KeyName string `json:"keyName,omitempty"` +type RouterStatusResponse struct { + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` - // KeyValue: 128-bit key value used for signing the URL. The key value - // must be a valid RFC 4648 Section 5 base64url encoded string. - KeyValue string `json:"keyValue,omitempty"` + Result *RouterStatus `json:"result,omitempty"` - // ForceSendFields is a list of field names (e.g. "KeyName") to + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Kind") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -26957,8 +28631,8 @@ type SignedUrlKey struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "KeyName") to include in - // API requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Kind") to include in API + // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -26966,142 +28640,21 @@ type SignedUrlKey struct { NullFields []string `json:"-"` } -func (s *SignedUrlKey) MarshalJSON() ([]byte, error) { - type NoMethod SignedUrlKey +func (s *RouterStatusResponse) MarshalJSON() ([]byte, error) { + type NoMethod RouterStatusResponse raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Snapshot: A persistent disk snapshot resource. (== resource_for -// beta.snapshots ==) (== resource_for v1.snapshots ==) -type Snapshot struct { - // AutoCreated: [Output Only] Set to true if snapshots are automatically - // by applying resource policy on the target disk. - AutoCreated bool `json:"autoCreated,omitempty"` - - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // DiskSizeGb: [Output Only] Size of the snapshot, specified in GB. - DiskSizeGb int64 `json:"diskSizeGb,omitempty,string"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] Type of the resource. Always compute#snapshot for - // Snapshot resources. - Kind string `json:"kind,omitempty"` - - // LabelFingerprint: A fingerprint for the labels being applied to this - // snapshot, which is essentially a hash of the labels set used for - // optimistic locking. The fingerprint is initially generated by Compute - // Engine and changes after every request to modify or update labels. - // You must always provide an up-to-date fingerprint hash in order to - // update or change labels, otherwise the request will fail with error - // 412 conditionNotMet. - // - // To see the latest fingerprint, make a get() request to retrieve a - // snapshot. - LabelFingerprint string `json:"labelFingerprint,omitempty"` - - // Labels: Labels to apply to this snapshot. These can be later modified - // by the setLabels method. Label values may be empty. - Labels map[string]string `json:"labels,omitempty"` - - // LicenseCodes: [Output Only] Integer license codes indicating which - // licenses are attached to this snapshot. - LicenseCodes googleapi.Int64s `json:"licenseCodes,omitempty"` - - // Licenses: [Output Only] A list of public visible licenses that apply - // to this snapshot. This can be because the original image had licenses - // attached (such as a Windows image). - Licenses []string `json:"licenses,omitempty"` - - // Name: Name of the resource; provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // SnapshotEncryptionKey: Encrypts the snapshot using a - // customer-supplied encryption key. - // - // After you encrypt a snapshot using a customer-supplied key, you must - // provide the same key if you use the image later For example, you must - // provide the encryption key when you create a disk from the encrypted - // snapshot in a future request. - // - // Customer-supplied encryption keys do not protect access to metadata - // of the disk. - // - // If you do not provide an encryption key when creating the snapshot, - // then the snapshot will be encrypted using an automatically generated - // key and you do not need to provide a key to use the snapshot later. - SnapshotEncryptionKey *CustomerEncryptionKey `json:"snapshotEncryptionKey,omitempty"` - - // SourceDisk: [Output Only] The source disk used to create this - // snapshot. - SourceDisk string `json:"sourceDisk,omitempty"` - - // SourceDiskEncryptionKey: The customer-supplied encryption key of the - // source disk. Required if the source disk is protected by a - // customer-supplied encryption key. - SourceDiskEncryptionKey *CustomerEncryptionKey `json:"sourceDiskEncryptionKey,omitempty"` - - // SourceDiskId: [Output Only] The ID value of the disk used to create - // this snapshot. This value may be used to determine whether the - // snapshot was taken from the current or a previous instance of a given - // disk name. - SourceDiskId string `json:"sourceDiskId,omitempty"` - - // Status: [Output Only] The status of the snapshot. This can be - // CREATING, DELETING, FAILED, READY, or UPLOADING. - // - // Possible values: - // "CREATING" - // "DELETING" - // "FAILED" - // "READY" - // "UPLOADING" - Status string `json:"status,omitempty"` - - // StorageBytes: [Output Only] A size of the storage used by the - // snapshot. As snapshots share storage, this number is expected to - // change with snapshot creation/deletion. - StorageBytes int64 `json:"storageBytes,omitempty,string"` - - // StorageBytesStatus: [Output Only] An indicator whether storageBytes - // is in a stable state or it is being adjusted as a result of shared - // storage reallocation. This status can either be UPDATING, meaning the - // size of the snapshot is being updated, or UP_TO_DATE, meaning the - // size of the snapshot is up-to-date. - // - // Possible values: - // "UPDATING" - // "UP_TO_DATE" - StorageBytesStatus string `json:"storageBytesStatus,omitempty"` - - // StorageLocations: GCS bucket storage location of the snapshot - // (regional or multi-regional). - StorageLocations []string `json:"storageLocations,omitempty"` +type RoutersPreviewResponse struct { + // Resource: Preview of given router. + Resource *Router `json:"resource,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "AutoCreated") to + // ForceSendFields is a list of field names (e.g. "Resource") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -27109,52 +28662,30 @@ type Snapshot struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AutoCreated") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Resource") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *Snapshot) MarshalJSON() ([]byte, error) { - type NoMethod Snapshot +func (s *RoutersPreviewResponse) MarshalJSON() ([]byte, error) { + type NoMethod RoutersPreviewResponse raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SnapshotList: Contains a list of Snapshot resources. -type SnapshotList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of Snapshot resources. - Items []*Snapshot `json:"items,omitempty"` - - // Kind: Type of resource. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // Warning: [Output Only] Informational warning message. - Warning *SnapshotListWarning `json:"warning,omitempty"` +type RoutersScopedList struct { + // Routers: A list of routers contained in this scope. + Routers []*Router `json:"routers,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Warning: Informational warning which replaces the list of routers + // when the list is empty. + Warning *RoutersScopedListWarning `json:"warning,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "Routers") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -27162,8 +28693,8 @@ type SnapshotList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Routers") to include in + // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -27171,14 +28702,15 @@ type SnapshotList struct { NullFields []string `json:"-"` } -func (s *SnapshotList) MarshalJSON() ([]byte, error) { - type NoMethod SnapshotList +func (s *RoutersScopedList) MarshalJSON() ([]byte, error) { + type NoMethod RoutersScopedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SnapshotListWarning: [Output Only] Informational warning message. -type SnapshotListWarning struct { +// RoutersScopedListWarning: Informational warning which replaces the +// list of routers when the list is empty. +type RoutersScopedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -27212,7 +28744,7 @@ type SnapshotListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*SnapshotListWarningData `json:"data,omitempty"` + Data []*RoutersScopedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -27235,13 +28767,13 @@ type SnapshotListWarning struct { NullFields []string `json:"-"` } -func (s *SnapshotListWarning) MarshalJSON() ([]byte, error) { - type NoMethod SnapshotListWarning +func (s *RoutersScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod RoutersScopedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SnapshotListWarningData struct { +type RoutersScopedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -27272,23 +28804,51 @@ type SnapshotListWarningData struct { NullFields []string `json:"-"` } -func (s *SnapshotListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod SnapshotListWarningData +func (s *RoutersScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod RoutersScopedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SourceInstanceParams: A specification of the parameters to use when -// creating the instance template from a source instance. -type SourceInstanceParams struct { - // DiskConfigs: Attached disks configuration. If not provided, defaults - // are applied: For boot disk and any other R/W disks, new custom images - // will be created from each disk. For read-only disks, they will be - // attached in read-only mode. Local SSD disks will be created as blank - // volumes. - DiskConfigs []*DiskInstantiationConfig `json:"diskConfigs,omitempty"` +// Rule: A rule to be applied in a Policy. +type Rule struct { + // Action: Required + // + // Possible values: + // "ALLOW" + // "ALLOW_WITH_LOG" + // "DENY" + // "DENY_WITH_LOG" + // "LOG" + // "NO_ACTION" + Action string `json:"action,omitempty"` - // ForceSendFields is a list of field names (e.g. "DiskConfigs") to + // Conditions: Additional restrictions that must be met. All conditions + // must pass for the rule to match. + Conditions []*Condition `json:"conditions,omitempty"` + + // Description: Human-readable description of the rule. + Description string `json:"description,omitempty"` + + // Ins: If one or more 'in' clauses are specified, the rule matches if + // the PRINCIPAL/AUTHORITY_SELECTOR is in at least one of these entries. + Ins []string `json:"ins,omitempty"` + + // LogConfigs: The config returned to callers of + // tech.iam.IAM.CheckPolicy for any entries that match the LOG action. + LogConfigs []*LogConfig `json:"logConfigs,omitempty"` + + // NotIns: If one or more 'not_in' clauses are specified, the rule + // matches if the PRINCIPAL/AUTHORITY_SELECTOR is in none of the + // entries. + NotIns []string `json:"notIns,omitempty"` + + // Permissions: A permission is a string of form '..' (e.g., + // 'storage.buckets.list'). A value of '*' matches all permissions, and + // a verb part of '*' (e.g., 'storage.buckets.*') matches all verbs. + Permissions []string `json:"permissions,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Action") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -27296,92 +28856,78 @@ type SourceInstanceParams struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "DiskConfigs") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Action") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *SourceInstanceParams) MarshalJSON() ([]byte, error) { - type NoMethod SourceInstanceParams +func (s *Rule) MarshalJSON() ([]byte, error) { + type NoMethod Rule raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SslCertificate: An SslCertificate resource. This resource provides a -// mechanism to upload an SSL key and certificate to the load balancer -// to serve secure connections from the user. (== resource_for -// beta.sslCertificates ==) (== resource_for v1.sslCertificates ==) -type SslCertificate struct { - // Certificate: A local certificate file. The certificate must be in PEM - // format. The certificate chain must be no greater than 5 certs long. - // The chain must include at least one intermediate cert. - Certificate string `json:"certificate,omitempty"` - - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` +type SSLHealthCheck struct { + // Port: The TCP port number for the health check request. The default + // value is 443. Valid values are 1 through 65535. + Port int64 `json:"port,omitempty"` - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` + // PortName: Port name as defined in InstanceGroup#NamedPort#name. If + // both port and port_name are defined, port takes precedence. + PortName string `json:"portName,omitempty"` - // ExpireTime: [Output Only] Expire time of the certificate. RFC3339 - ExpireTime string `json:"expireTime,omitempty"` + // PortSpecification: Specifies how port is selected for health + // checking, can be one of following values: + // USE_FIXED_PORT: The port number in + // port + // is used for health checking. + // USE_NAMED_PORT: The + // portName + // is used for health checking. + // USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for + // each network endpoint is used for health checking. For other + // backends, the port or named port specified in the Backend Service is + // used for health checking. + // + // + // If not specified, SSL health check follows behavior specified + // in + // port + // and + // portName + // fields. + // + // Possible values: + // "USE_FIXED_PORT" + // "USE_NAMED_PORT" + // "USE_SERVING_PORT" + PortSpecification string `json:"portSpecification,omitempty"` - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` + // ProxyHeader: Specifies the type of proxy header to append before + // sending data to the backend, either NONE or PROXY_V1. The default is + // NONE. + // + // Possible values: + // "NONE" + // "PROXY_V1" + ProxyHeader string `json:"proxyHeader,omitempty"` - // Kind: [Output Only] Type of the resource. Always - // compute#sslCertificate for SSL certificates. - Kind string `json:"kind,omitempty"` + // Request: The application data to send once the SSL connection has + // been established (default value is empty). If both request and + // response are empty, the connection establishment alone will indicate + // health. The request data can only be ASCII. + Request string `json:"request,omitempty"` - // Managed: Configuration and status of a managed SSL certificate. - Managed *SslCertificateManagedSslCertificate `json:"managed,omitempty"` + // Response: The bytes to match against the beginning of the response + // data. If left empty (the default value), any response will indicate + // health. The response data can only be ASCII. + Response string `json:"response,omitempty"` - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // PrivateKey: A write-only private key in PEM format. Only insert - // requests will include this field. - PrivateKey string `json:"privateKey,omitempty"` - - // SelfLink: [Output only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // SelfManaged: Configuration and status of a self-managed SSL - // certificate. - SelfManaged *SslCertificateSelfManagedSslCertificate `json:"selfManaged,omitempty"` - - // SubjectAlternativeNames: [Output Only] Domains associated with the - // certificate via Subject Alternative Name. - SubjectAlternativeNames []string `json:"subjectAlternativeNames,omitempty"` - - // Type: (Optional) Specifies the type of SSL certificate, either - // "SELF_MANAGED" or "MANAGED". If not specified, the certificate is - // self-managed and the fields certificate and private_key are used. - // - // Possible values: - // "MANAGED" - // "SELF_MANAGED" - // "TYPE_UNSPECIFIED" - Type string `json:"type,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Certificate") to + // ForceSendFields is a list of field names (e.g. "Port") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -27389,52 +28935,53 @@ type SslCertificate struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Certificate") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Port") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *SslCertificate) MarshalJSON() ([]byte, error) { - type NoMethod SslCertificate +func (s *SSLHealthCheck) MarshalJSON() ([]byte, error) { + type NoMethod SSLHealthCheck raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SslCertificateList: Contains a list of SslCertificate resources. -type SslCertificateList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of SslCertificate resources. - Items []*SslCertificate `json:"items,omitempty"` - - // Kind: Type of resource. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` +// Scheduling: Sets the scheduling options for an Instance. +type Scheduling struct { + // AutomaticRestart: Specifies whether the instance should be + // automatically restarted if it is terminated by Compute Engine (not + // terminated by a user). You can only set the automatic restart option + // for standard instances. Preemptible instances cannot be automatically + // restarted. + // + // By default, this is set to true so an instance is automatically + // restarted if it is terminated by Compute Engine. + AutomaticRestart *bool `json:"automaticRestart,omitempty"` - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` + // NodeAffinities: A set of node affinity and anti-affinity. + NodeAffinities []*SchedulingNodeAffinity `json:"nodeAffinities,omitempty"` - // Warning: [Output Only] Informational warning message. - Warning *SslCertificateListWarning `json:"warning,omitempty"` + // OnHostMaintenance: Defines the maintenance behavior for this + // instance. For standard instances, the default behavior is MIGRATE. + // For preemptible instances, the default and only possible behavior is + // TERMINATE. For more information, see Setting Instance Scheduling + // Options. + // + // Possible values: + // "MIGRATE" + // "TERMINATE" + OnHostMaintenance string `json:"onHostMaintenance,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Preemptible: Defines whether the instance is preemptible. This can + // only be set during instance creation, it cannot be set or changed + // after the instance has been created. + Preemptible bool `json:"preemptible,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "AutomaticRestart") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -27442,64 +28989,40 @@ type SslCertificateList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "AutomaticRestart") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *SslCertificateList) MarshalJSON() ([]byte, error) { - type NoMethod SslCertificateList +func (s *Scheduling) MarshalJSON() ([]byte, error) { + type NoMethod Scheduling raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SslCertificateListWarning: [Output Only] Informational warning -// message. -type SslCertificateListWarning struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. +// SchedulingNodeAffinity: Node Affinity: the configuration of desired +// nodes onto which this Instance could be scheduled. +type SchedulingNodeAffinity struct { + // Key: Corresponds to the label key of Node resource. + Key string `json:"key,omitempty"` + + // Operator: Defines the operation of node selection. // // Possible values: - // "CLEANUP_FAILED" - // "DEPRECATED_RESOURCE_USED" - // "DEPRECATED_TYPE_USED" - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - // "EXPERIMENTAL_TYPE_USED" - // "EXTERNAL_API_WARNING" - // "FIELD_VALUE_OVERRIDEN" - // "INJECTED_KERNELS_DEPRECATED" - // "MISSING_TYPE_DEPENDENCY" - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - // "NEXT_HOP_CANNOT_IP_FORWARD" - // "NEXT_HOP_INSTANCE_NOT_FOUND" - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - // "NEXT_HOP_NOT_RUNNING" - // "NOT_CRITICAL_ERROR" - // "NO_RESULTS_ON_PAGE" - // "REQUIRED_TOS_AGREEMENT" - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - // "RESOURCE_NOT_DELETED" - // "SCHEMA_VALIDATION_IGNORED" - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - // "UNDECLARED_PROPERTIES" - // "UNREACHABLE" - Code string `json:"code,omitempty"` - - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: - // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*SslCertificateListWarningData `json:"data,omitempty"` + // "IN" + // "NOT_IN" + // "OPERATOR_UNSPECIFIED" + Operator string `json:"operator,omitempty"` - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` + // Values: Corresponds to the label values of Node resource. + Values []string `json:"values,omitempty"` - // ForceSendFields is a list of field names (e.g. "Code") to + // ForceSendFields is a list of field names (e.g. "Key") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -27507,7 +29030,7 @@ type SslCertificateListWarning struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Code") to include in API + // NullFields is a list of field names (e.g. "Key") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -27516,72 +29039,48 @@ type SslCertificateListWarning struct { NullFields []string `json:"-"` } -func (s *SslCertificateListWarning) MarshalJSON() ([]byte, error) { - type NoMethod SslCertificateListWarning +func (s *SchedulingNodeAffinity) MarshalJSON() ([]byte, error) { + type NoMethod SchedulingNodeAffinity raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SslCertificateListWarningData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` +type SecurityPoliciesListPreconfiguredExpressionSetsResponse struct { + PreconfiguredExpressionSets *SecurityPoliciesWafConfig `json:"preconfiguredExpressionSets,omitempty"` - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Key") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. + // "PreconfiguredExpressionSets") to unconditionally include in API + // requests. By default, fields with empty values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Key") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. + // "PreconfiguredExpressionSets") to include in API requests with the + // JSON null value. By default, fields with empty values are omitted + // from API requests. However, any field with an empty value appearing + // in NullFields will be sent to the server as null. It is an error if a + // field in this list has a non-empty value. This may be used to include + // null fields in Patch requests. NullFields []string `json:"-"` } -func (s *SslCertificateListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod SslCertificateListWarningData +func (s *SecurityPoliciesListPreconfiguredExpressionSetsResponse) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPoliciesListPreconfiguredExpressionSetsResponse raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SslCertificateManagedSslCertificate: Configuration and status of a -// managed SSL certificate. -type SslCertificateManagedSslCertificate struct { - // DomainStatus: [Output only] Detailed statuses of the domains - // specified for managed certificate resource. - DomainStatus map[string]string `json:"domainStatus,omitempty"` - - // Domains: The domains for which a managed SSL certificate will be - // generated. Currently only single-domain certs are supported. - Domains []string `json:"domains,omitempty"` - - // Status: [Output only] Status of the managed certificate resource. - // - // Possible values: - // "ACTIVE" - // "MANAGED_CERTIFICATE_STATUS_UNSPECIFIED" - // "PROVISIONING" - // "PROVISIONING_FAILED" - // "PROVISIONING_FAILED_PERMANENTLY" - // "RENEWAL_FAILED" - Status string `json:"status,omitempty"` +type SecurityPoliciesWafConfig struct { + WafRules *PreconfiguredWafSet `json:"wafRules,omitempty"` - // ForceSendFields is a list of field names (e.g. "DomainStatus") to + // ForceSendFields is a list of field names (e.g. "WafRules") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -27589,66 +29088,126 @@ type SslCertificateManagedSslCertificate struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "DomainStatus") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "WafRules") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *SslCertificateManagedSslCertificate) MarshalJSON() ([]byte, error) { - type NoMethod SslCertificateManagedSslCertificate +func (s *SecurityPoliciesWafConfig) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPoliciesWafConfig raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SslCertificateSelfManagedSslCertificate: Configuration and status of -// a self-managed SSL certificate. -type SslCertificateSelfManagedSslCertificate struct { - // Certificate: A local certificate file. The certificate must be in PEM - // format. The certificate chain must be no greater than 5 certs long. - // The chain must include at least one intermediate cert. - Certificate string `json:"certificate,omitempty"` +// SecurityPolicy: A security policy is comprised of one or more rules. +// It can also be associated with one or more 'targets'. (== +// resource_for v1.securityPolicies ==) (== resource_for +// beta.securityPolicies ==) +type SecurityPolicy struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` - // PrivateKey: A write-only private key in PEM format. Only insert - // requests will include this field. - PrivateKey string `json:"privateKey,omitempty"` + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` - // ForceSendFields is a list of field names (e.g. "Certificate") to - // unconditionally include in API requests. By default, fields with + // Fingerprint: Specifies a fingerprint for this resource, which is + // essentially a hash of the metadata's contents and used for optimistic + // locking. The fingerprint is initially generated by Compute Engine and + // changes after every request to modify or update metadata. You must + // always provide an up-to-date fingerprint hash in order to update or + // change metadata, otherwise the request will fail with error 412 + // conditionNotMet. + // + // To see the latest fingerprint, make get() request to the security + // policy. + Fingerprint string `json:"fingerprint,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output only] Type of the resource. Always + // compute#securityPolicyfor security policies + Kind string `json:"kind,omitempty"` + + // LabelFingerprint: A fingerprint for the labels being applied to this + // security policy, which is essentially a hash of the labels set used + // for optimistic locking. The fingerprint is initially generated by + // Compute Engine and changes after every request to modify or update + // labels. You must always provide an up-to-date fingerprint hash in + // order to update or change labels. + // + // To see the latest fingerprint, make get() request to the security + // policy. + LabelFingerprint string `json:"labelFingerprint,omitempty"` + + // Labels: Labels to apply to this security policy resource. These can + // be later modified by the setLabels method. Each label key/value must + // comply with RFC1035. Label values may be empty. + Labels map[string]string `json:"labels,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // Rules: A list of rules that belong to this policy. There must always + // be a default rule (rule with priority 2147483647 and match "*"). If + // no rules are provided when creating a security policy, a default rule + // with action "allow" will be added. + Rules []*SecurityPolicyRule `json:"rules,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Certificate") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *SslCertificateSelfManagedSslCertificate) MarshalJSON() ([]byte, error) { - type NoMethod SslCertificateSelfManagedSslCertificate +func (s *SecurityPolicy) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicy raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SslPoliciesList struct { +type SecurityPolicyList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of SslPolicy resources. - Items []*SslPolicy `json:"items,omitempty"` + // Items: A list of SecurityPolicy resources. + Items []*SecurityPolicy `json:"items,omitempty"` - // Kind: [Output Only] Type of the resource. Always - // compute#sslPoliciesList for lists of sslPolicies. + // Kind: [Output Only] Type of resource. Always + // compute#securityPolicyList for listsof securityPolicies Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -27659,11 +29218,8 @@ type SslPoliciesList struct { // the results. NextPageToken string `json:"nextPageToken,omitempty"` - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - // Warning: [Output Only] Informational warning message. - Warning *SslPoliciesListWarning `json:"warning,omitempty"` + Warning *SecurityPolicyListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -27686,14 +29242,15 @@ type SslPoliciesList struct { NullFields []string `json:"-"` } -func (s *SslPoliciesList) MarshalJSON() ([]byte, error) { - type NoMethod SslPoliciesList +func (s *SecurityPolicyList) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SslPoliciesListWarning: [Output Only] Informational warning message. -type SslPoliciesListWarning struct { +// SecurityPolicyListWarning: [Output Only] Informational warning +// message. +type SecurityPolicyListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -27727,7 +29284,7 @@ type SslPoliciesListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*SslPoliciesListWarningData `json:"data,omitempty"` + Data []*SecurityPolicyListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -27750,13 +29307,13 @@ type SslPoliciesListWarning struct { NullFields []string `json:"-"` } -func (s *SslPoliciesListWarning) MarshalJSON() ([]byte, error) { - type NoMethod SslPoliciesListWarning +func (s *SecurityPolicyListWarning) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SslPoliciesListWarningData struct { +type SecurityPolicyListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -27787,20 +29344,16 @@ type SslPoliciesListWarningData struct { NullFields []string `json:"-"` } -func (s *SslPoliciesListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod SslPoliciesListWarningData +func (s *SecurityPolicyListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SslPoliciesListAvailableFeaturesResponse struct { - Features []string `json:"features,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` +type SecurityPolicyReference struct { + SecurityPolicy string `json:"securityPolicy,omitempty"` - // ForceSendFields is a list of field names (e.g. "Features") to + // ForceSendFields is a list of field names (e.g. "SecurityPolicy") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -27808,172 +29361,180 @@ type SslPoliciesListAvailableFeaturesResponse struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Features") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "SecurityPolicy") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *SslPoliciesListAvailableFeaturesResponse) MarshalJSON() ([]byte, error) { - type NoMethod SslPoliciesListAvailableFeaturesResponse +func (s *SecurityPolicyReference) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyReference raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SslPolicy: A SSL policy specifies the server-side support for SSL -// features. This can be attached to a TargetHttpsProxy or a -// TargetSslProxy. This affects connections between clients and the -// HTTPS or SSL proxy load balancer. They do not affect the connection -// between the load balancers and the backends. -type SslPolicy struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // CustomFeatures: A list of features enabled when the selected profile - // is CUSTOM. The - // - method returns the set of features that can be specified in this - // list. This field must be empty if the profile is not CUSTOM. - CustomFeatures []string `json:"customFeatures,omitempty"` +// SecurityPolicyRule: Represents a rule that describes one or more +// match conditions along with the action to be taken when traffic +// matches this condition (allow or deny). +type SecurityPolicyRule struct { + // Action: The Action to preform when the client connection triggers the + // rule. Can currently be either "allow" or "deny()" where valid values + // for status are 403, 404, and 502. + Action string `json:"action,omitempty"` // Description: An optional description of this resource. Provide this // property when you create the resource. Description string `json:"description,omitempty"` - // EnabledFeatures: [Output Only] The list of features enabled in the - // SSL policy. - EnabledFeatures []string `json:"enabledFeatures,omitempty"` + // Kind: [Output only] Type of the resource. Always + // compute#securityPolicyRule for security policy rules + Kind string `json:"kind,omitempty"` - // Fingerprint: Fingerprint of this resource. A hash of the contents - // stored in this object. This field is used in optimistic locking. This - // field will be ignored when inserting a SslPolicy. An up-to-date - // fingerprint must be provided in order to update the SslPolicy, - // otherwise the request will fail with error 412 conditionNotMet. - // - // To see the latest fingerprint, make a get() request to retrieve an - // SslPolicy. - Fingerprint string `json:"fingerprint,omitempty"` + // Match: A match condition that incoming traffic is evaluated against. + // If it evaluates to true, the corresponding ?action? is enforced. + Match *SecurityPolicyRuleMatcher `json:"match,omitempty"` - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` + // Preview: If set to true, the specified action is not enforced. + Preview bool `json:"preview,omitempty"` - // Kind: [Output only] Type of the resource. Always compute#sslPolicyfor - // SSL policies. - Kind string `json:"kind,omitempty"` + // Priority: An integer indicating the priority of a rule in the list. + // The priority must be a positive value between 0 and 2147483647. Rules + // are evaluated from highest to lowest priority where 0 is the highest + // priority and 2147483647 is the lowest prority. + Priority int64 `json:"priority,omitempty"` - // MinTlsVersion: The minimum version of SSL protocol that can be used - // by the clients to establish a connection with the load balancer. This - // can be one of TLS_1_0, TLS_1_1, TLS_1_2. - // - // Possible values: - // "TLS_1_0" - // "TLS_1_1" - // "TLS_1_2" - MinTlsVersion string `json:"minTlsVersion,omitempty"` + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` - // Name: Name of the resource. The name must be 1-63 characters long, - // and comply with RFC1035. Specifically, the name must be 1-63 - // characters long and match the regular expression - // `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be - // a lowercase letter, and all following characters must be a dash, - // lowercase letter, or digit, except the last character, which cannot - // be a dash. - Name string `json:"name,omitempty"` + // ForceSendFields is a list of field names (e.g. "Action") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // Profile: Profile specifies the set of SSL features that can be used - // by the load balancer when negotiating SSL with clients. This can be - // one of COMPATIBLE, MODERN, RESTRICTED, or CUSTOM. If using CUSTOM, - // the set of SSL features to enable must be specified in the - // customFeatures field. + // NullFields is a list of field names (e.g. "Action") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SecurityPolicyRule) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyRule + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SecurityPolicyRuleMatcher: Represents a match condition that incoming +// traffic is evaluated against. Exactly one field must be specified. +type SecurityPolicyRuleMatcher struct { + // Config: The configuration options available when specifying + // versioned_expr. This field must be specified if versioned_expr is + // specified and cannot be specified if versioned_expr is not specified. + Config *SecurityPolicyRuleMatcherConfig `json:"config,omitempty"` + + // Expr: User defined CEVAL expression. A CEVAL expression is used to + // specify match criteria such as origin.ip, source.region_code and + // contents in the request header. + Expr *Expr `json:"expr,omitempty"` + + // VersionedExpr: Preconfigured versioned expression. If this field is + // specified, config must also be specified. Available preconfigured + // expressions along with their requirements are: SRC_IPS_V1 - must + // specify the corresponding src_ip_range field in config. // // Possible values: - // "COMPATIBLE" - // "CUSTOM" - // "MODERN" - // "RESTRICTED" - Profile string `json:"profile,omitempty"` + // "SRC_IPS_V1" + VersionedExpr string `json:"versionedExpr,omitempty"` - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` + // ForceSendFields is a list of field names (e.g. "Config") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // Warnings: [Output Only] If potential misconfigurations are detected - // for this SSL policy, this field will be populated with warning - // messages. - Warnings []*SslPolicyWarnings `json:"warnings,omitempty"` + // NullFields is a list of field names (e.g. "Config") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` +func (s *SecurityPolicyRuleMatcher) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyRuleMatcher + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with +type SecurityPolicyRuleMatcherConfig struct { + // SrcIpRanges: CIDR IP address range. + SrcIpRanges []string `json:"srcIpRanges,omitempty"` + + // ForceSendFields is a list of field names (e.g. "SrcIpRanges") to + // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "SrcIpRanges") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *SslPolicy) MarshalJSON() ([]byte, error) { - type NoMethod SslPolicy +func (s *SecurityPolicyRuleMatcherConfig) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyRuleMatcherConfig raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SslPolicyWarnings struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. - // - // Possible values: - // "CLEANUP_FAILED" - // "DEPRECATED_RESOURCE_USED" - // "DEPRECATED_TYPE_USED" - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - // "EXPERIMENTAL_TYPE_USED" - // "EXTERNAL_API_WARNING" - // "FIELD_VALUE_OVERRIDEN" - // "INJECTED_KERNELS_DEPRECATED" - // "MISSING_TYPE_DEPENDENCY" - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - // "NEXT_HOP_CANNOT_IP_FORWARD" - // "NEXT_HOP_INSTANCE_NOT_FOUND" - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - // "NEXT_HOP_NOT_RUNNING" - // "NOT_CRITICAL_ERROR" - // "NO_RESULTS_ON_PAGE" - // "REQUIRED_TOS_AGREEMENT" - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - // "RESOURCE_NOT_DELETED" - // "SCHEMA_VALIDATION_IGNORED" - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - // "UNDECLARED_PROPERTIES" - // "UNREACHABLE" - Code string `json:"code,omitempty"` +// SerialPortOutput: An instance's serial console output. +type SerialPortOutput struct { + // Contents: [Output Only] The contents of the console output. + Contents string `json:"contents,omitempty"` - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: - // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*SslPolicyWarningsData `json:"data,omitempty"` + // Kind: [Output Only] Type of the resource. Always + // compute#serialPortOutput for serial port output. + Kind string `json:"kind,omitempty"` - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` + // Next: [Output Only] The position of the next byte of content from the + // serial console output. Use this value in the next request as the + // start parameter. + Next int64 `json:"next,omitempty,string"` - // ForceSendFields is a list of field names (e.g. "Code") to + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Start: The starting byte position of the output that was returned. + // This should match the start parameter sent with the request. If the + // serial console output exceeds the size of the buffer, older output + // will be overwritten by newer content and the start values will be + // mismatched. + Start int64 `json:"start,omitempty,string"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Contents") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -27981,8 +29542,8 @@ type SslPolicyWarnings struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Contents") to include in + // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -27990,27 +29551,20 @@ type SslPolicyWarnings struct { NullFields []string `json:"-"` } -func (s *SslPolicyWarnings) MarshalJSON() ([]byte, error) { - type NoMethod SslPolicyWarnings +func (s *SerialPortOutput) MarshalJSON() ([]byte, error) { + type NoMethod SerialPortOutput raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SslPolicyWarningsData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` - - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` +type ServerBinding struct { + // Possible values: + // "RESTART_NODE_ON_ANY_SERVER" + // "RESTART_NODE_ON_MINIMAL_SERVERS" + // "SERVER_BINDING_TYPE_UNSPECIFIED" + Type string `json:"type,omitempty"` - // ForceSendFields is a list of field names (e.g. "Key") to + // ForceSendFields is a list of field names (e.g. "Type") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -28018,7 +29572,7 @@ type SslPolicyWarningsData struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Key") to include in API + // NullFields is a list of field names (e.g. "Type") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -28027,19 +29581,22 @@ type SslPolicyWarningsData struct { NullFields []string `json:"-"` } -func (s *SslPolicyWarningsData) MarshalJSON() ([]byte, error) { - type NoMethod SslPolicyWarningsData +func (s *ServerBinding) MarshalJSON() ([]byte, error) { + type NoMethod ServerBinding raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SslPolicyReference struct { - // SslPolicy: URL of the SSL policy resource. Set this to empty string - // to clear any existing SSL policy associated with the target proxy - // resource. - SslPolicy string `json:"sslPolicy,omitempty"` +// ServiceAccount: A service account. +type ServiceAccount struct { + // Email: Email address of the service account. + Email string `json:"email,omitempty"` - // ForceSendFields is a list of field names (e.g. "SslPolicy") to + // Scopes: The list of scopes to be made available for this service + // account. + Scopes []string `json:"scopes,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Email") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -28047,8 +29604,8 @@ type SslPolicyReference struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "SslPolicy") to include in - // API requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Email") to include in API + // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -28056,116 +29613,27 @@ type SslPolicyReference struct { NullFields []string `json:"-"` } -func (s *SslPolicyReference) MarshalJSON() ([]byte, error) { - type NoMethod SslPolicyReference +func (s *ServiceAccount) MarshalJSON() ([]byte, error) { + type NoMethod ServiceAccount raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Subnetwork: A Subnetwork resource. (== resource_for beta.subnetworks -// ==) (== resource_for v1.subnetworks ==) -type Subnetwork struct { - // AllowSubnetCidrRoutesOverlap: Whether this subnetwork can conflict - // with static routes. Setting this to true allows this subnetwork's - // primary and secondary ranges to conflict with routes that have - // already been configured on the corresponding network. Static routes - // will take precedence over the subnetwork route if the route prefix - // length is at least as large as the subnetwork prefix length. - // - // Also, packets destined to IPs within subnetwork may contain - // private/sensitive data and are prevented from leaving the virtual - // network. Setting this field to true will disable this feature. - // - // The default value is false and applies to all existing subnetworks - // and automatically created subnetworks. - // - // This field cannot be set to true at resource creation time. - AllowSubnetCidrRoutesOverlap bool `json:"allowSubnetCidrRoutesOverlap,omitempty"` - - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. This field can be set only at - // resource creation time. - Description string `json:"description,omitempty"` - - // EnableFlowLogs: Whether to enable flow logging for this subnetwork. - // If this field is not explicitly set, it will not appear in get - // listings. If not set the default behavior is to disable flow logging. - EnableFlowLogs bool `json:"enableFlowLogs,omitempty"` - - // Fingerprint: Fingerprint of this resource. A hash of the contents - // stored in this object. This field is used in optimistic locking. This - // field will be ignored when inserting a Subnetwork. An up-to-date - // fingerprint must be provided in order to update the Subnetwork, - // otherwise the request will fail with error 412 conditionNotMet. - // - // To see the latest fingerprint, make a get() request to retrieve a - // Subnetwork. - Fingerprint string `json:"fingerprint,omitempty"` - - // GatewayAddress: [Output Only] The gateway address for default routes - // to reach destination addresses outside this subnetwork. - GatewayAddress string `json:"gatewayAddress,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // IpCidrRange: The range of internal addresses that are owned by this - // subnetwork. Provide this property when you create the subnetwork. For - // example, 10.0.0.0/8 or 192.168.0.0/16. Ranges must be unique and - // non-overlapping within a network. Only IPv4 is supported. This field - // can be set only at resource creation time. - IpCidrRange string `json:"ipCidrRange,omitempty"` - - // Kind: [Output Only] Type of the resource. Always compute#subnetwork - // for Subnetwork resources. - Kind string `json:"kind,omitempty"` - - // Name: The name of the resource, provided by the client when initially - // creating the resource. The name must be 1-63 characters long, and - // comply with RFC1035. Specifically, the name must be 1-63 characters - // long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` - // which means the first character must be a lowercase letter, and all - // following characters must be a dash, lowercase letter, or digit, - // except the last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // Network: The URL of the network to which this subnetwork belongs, - // provided by the client when initially creating the subnetwork. Only - // networks that are in the distributed mode can have subnetworks. This - // field can be set only at resource creation time. - Network string `json:"network,omitempty"` - - // PrivateIpGoogleAccess: Whether the VMs in this subnet can access - // Google services without assigned external IP addresses. This field - // can be both set at resource creation time and updated using - // setPrivateIpGoogleAccess. - PrivateIpGoogleAccess bool `json:"privateIpGoogleAccess,omitempty"` - - // Region: URL of the region where the Subnetwork resides. This field - // can be set only at resource creation time. - Region string `json:"region,omitempty"` - - // SecondaryIpRanges: An array of configurations for secondary IP ranges - // for VM instances contained in this subnetwork. The primary IP of such - // VM must belong to the primary ipCidrRange of the subnetwork. The - // alias IPs may belong to either primary or secondary ranges. This - // field can be updated with a patch request. - SecondaryIpRanges []*SubnetworkSecondaryRange `json:"secondaryIpRanges,omitempty"` +// ShieldedInstanceConfig: A set of Shielded Instance options. +type ShieldedInstanceConfig struct { + // EnableIntegrityMonitoring: Defines whether the instance has integrity + // monitoring enabled. + EnableIntegrityMonitoring bool `json:"enableIntegrityMonitoring,omitempty"` - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` + // EnableSecureBoot: Defines whether the instance has Secure Boot + // enabled. + EnableSecureBoot bool `json:"enableSecureBoot,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // EnableVtpm: Defines whether the instance has the vTPM enabled. + EnableVtpm bool `json:"enableVtpm,omitempty"` // ForceSendFields is a list of field names (e.g. - // "AllowSubnetCidrRoutesOverlap") to unconditionally include in API + // "EnableIntegrityMonitoring") to unconditionally include in API // requests. By default, fields with empty values are omitted from API // requests. However, any non-pointer, non-interface field appearing in // ForceSendFields will be sent to the server regardless of whether the @@ -28174,52 +29642,41 @@ type Subnetwork struct { ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. - // "AllowSubnetCidrRoutesOverlap") to include in API requests with the - // JSON null value. By default, fields with empty values are omitted - // from API requests. However, any field with an empty value appearing - // in NullFields will be sent to the server as null. It is an error if a + // "EnableIntegrityMonitoring") to include in API requests with the JSON + // null value. By default, fields with empty values are omitted from API + // requests. However, any field with an empty value appearing in + // NullFields will be sent to the server as null. It is an error if a // field in this list has a non-empty value. This may be used to include // null fields in Patch requests. NullFields []string `json:"-"` } -func (s *Subnetwork) MarshalJSON() ([]byte, error) { - type NoMethod Subnetwork +func (s *ShieldedInstanceConfig) MarshalJSON() ([]byte, error) { + type NoMethod ShieldedInstanceConfig raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SubnetworkAggregatedList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of SubnetworksScopedList resources. - Items map[string]SubnetworksScopedList `json:"items,omitempty"` +// ShieldedInstanceIdentity: A shielded Instance identity entry. +type ShieldedInstanceIdentity struct { + // EncryptionKey: An Endorsement Key (EK) issued to the Shielded + // Instance's vTPM. + EncryptionKey *ShieldedInstanceIdentityEntry `json:"encryptionKey,omitempty"` - // Kind: [Output Only] Type of resource. Always - // compute#subnetworkAggregatedList for aggregated lists of subnetworks. + // Kind: [Output Only] Type of the resource. Always + // compute#shieldedInstanceIdentity for shielded Instance identity + // entry. Kind string `json:"kind,omitempty"` - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // Warning: [Output Only] Informational warning message. - Warning *SubnetworkAggregatedListWarning `json:"warning,omitempty"` + // SigningKey: An Attestation Key (AK) issued to the Shielded Instance's + // vTPM. + SigningKey *ShieldedInstanceIdentityEntry `json:"signingKey,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "EncryptionKey") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -28227,64 +29684,30 @@ type SubnetworkAggregatedList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "EncryptionKey") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *SubnetworkAggregatedList) MarshalJSON() ([]byte, error) { - type NoMethod SubnetworkAggregatedList +func (s *ShieldedInstanceIdentity) MarshalJSON() ([]byte, error) { + type NoMethod ShieldedInstanceIdentity raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SubnetworkAggregatedListWarning: [Output Only] Informational warning -// message. -type SubnetworkAggregatedListWarning struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. - // - // Possible values: - // "CLEANUP_FAILED" - // "DEPRECATED_RESOURCE_USED" - // "DEPRECATED_TYPE_USED" - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - // "EXPERIMENTAL_TYPE_USED" - // "EXTERNAL_API_WARNING" - // "FIELD_VALUE_OVERRIDEN" - // "INJECTED_KERNELS_DEPRECATED" - // "MISSING_TYPE_DEPENDENCY" - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - // "NEXT_HOP_CANNOT_IP_FORWARD" - // "NEXT_HOP_INSTANCE_NOT_FOUND" - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - // "NEXT_HOP_NOT_RUNNING" - // "NOT_CRITICAL_ERROR" - // "NO_RESULTS_ON_PAGE" - // "REQUIRED_TOS_AGREEMENT" - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - // "RESOURCE_NOT_DELETED" - // "SCHEMA_VALIDATION_IGNORED" - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - // "UNDECLARED_PROPERTIES" - // "UNREACHABLE" - Code string `json:"code,omitempty"` - - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: - // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*SubnetworkAggregatedListWarningData `json:"data,omitempty"` +// ShieldedInstanceIdentityEntry: A Shielded Instance Identity Entry. +type ShieldedInstanceIdentityEntry struct { + // EkCert: A PEM-encoded X.509 certificate. This field can be empty. + EkCert string `json:"ekCert,omitempty"` - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` + // EkPub: A PEM-encoded public key. + EkPub string `json:"ekPub,omitempty"` - // ForceSendFields is a list of field names (e.g. "Code") to + // ForceSendFields is a list of field names (e.g. "EkCert") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -28292,7 +29715,7 @@ type SubnetworkAggregatedListWarning struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Code") to include in API + // NullFields is a list of field names (e.g. "EkCert") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -28301,145 +29724,100 @@ type SubnetworkAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *SubnetworkAggregatedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod SubnetworkAggregatedListWarning +func (s *ShieldedInstanceIdentityEntry) MarshalJSON() ([]byte, error) { + type NoMethod ShieldedInstanceIdentityEntry raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SubnetworkAggregatedListWarningData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` - - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` +// ShieldedInstanceIntegrityPolicy: The policy describes the baseline +// against which Instance boot integrity is measured. +type ShieldedInstanceIntegrityPolicy struct { + // UpdateAutoLearnPolicy: Updates the integrity policy baseline using + // the measurements from the VM instance's most recent boot. + UpdateAutoLearnPolicy bool `json:"updateAutoLearnPolicy,omitempty"` - // ForceSendFields is a list of field names (e.g. "Key") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. + // "UpdateAutoLearnPolicy") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Key") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "UpdateAutoLearnPolicy") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *SubnetworkAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod SubnetworkAggregatedListWarningData +func (s *ShieldedInstanceIntegrityPolicy) MarshalJSON() ([]byte, error) { + type NoMethod ShieldedInstanceIntegrityPolicy raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SubnetworkList: Contains a list of Subnetwork resources. -type SubnetworkList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of Subnetwork resources. - Items []*Subnetwork `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always compute#subnetworkList - // for lists of subnetworks. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` +// ShieldedVmConfig: A set of Shielded VM options. +type ShieldedVmConfig struct { + // EnableIntegrityMonitoring: Defines whether the instance has integrity + // monitoring enabled. + EnableIntegrityMonitoring bool `json:"enableIntegrityMonitoring,omitempty"` - // Warning: [Output Only] Informational warning message. - Warning *SubnetworkListWarning `json:"warning,omitempty"` + // EnableSecureBoot: Defines whether the instance has Secure Boot + // enabled. + EnableSecureBoot bool `json:"enableSecureBoot,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // EnableVtpm: Defines whether the instance has the vTPM enabled. + EnableVtpm bool `json:"enableVtpm,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. + // "EnableIntegrityMonitoring") to unconditionally include in API + // requests. By default, fields with empty values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. + // "EnableIntegrityMonitoring") to include in API requests with the JSON + // null value. By default, fields with empty values are omitted from API + // requests. However, any field with an empty value appearing in + // NullFields will be sent to the server as null. It is an error if a + // field in this list has a non-empty value. This may be used to include + // null fields in Patch requests. NullFields []string `json:"-"` } -func (s *SubnetworkList) MarshalJSON() ([]byte, error) { - type NoMethod SubnetworkList +func (s *ShieldedVmConfig) MarshalJSON() ([]byte, error) { + type NoMethod ShieldedVmConfig raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SubnetworkListWarning: [Output Only] Informational warning message. -type SubnetworkListWarning struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. - // - // Possible values: - // "CLEANUP_FAILED" - // "DEPRECATED_RESOURCE_USED" - // "DEPRECATED_TYPE_USED" - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - // "EXPERIMENTAL_TYPE_USED" - // "EXTERNAL_API_WARNING" - // "FIELD_VALUE_OVERRIDEN" - // "INJECTED_KERNELS_DEPRECATED" - // "MISSING_TYPE_DEPENDENCY" - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - // "NEXT_HOP_CANNOT_IP_FORWARD" - // "NEXT_HOP_INSTANCE_NOT_FOUND" - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - // "NEXT_HOP_NOT_RUNNING" - // "NOT_CRITICAL_ERROR" - // "NO_RESULTS_ON_PAGE" - // "REQUIRED_TOS_AGREEMENT" - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - // "RESOURCE_NOT_DELETED" - // "SCHEMA_VALIDATION_IGNORED" - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - // "UNDECLARED_PROPERTIES" - // "UNREACHABLE" - Code string `json:"code,omitempty"` +// ShieldedVmIdentity: A shielded VM identity entry. +type ShieldedVmIdentity struct { + // EncryptionKey: An Endorsement Key (EK) issued to the Shielded VM's + // vTPM. + EncryptionKey *ShieldedVmIdentityEntry `json:"encryptionKey,omitempty"` - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: - // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*SubnetworkListWarningData `json:"data,omitempty"` + // Kind: [Output Only] Type of the resource. Always + // compute#shieldedVmIdentity for shielded VM identity entry. + Kind string `json:"kind,omitempty"` - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` + // SigningKey: An Attestation Key (AK) issued to the Shielded VM's vTPM. + SigningKey *ShieldedVmIdentityEntry `json:"signingKey,omitempty"` - // ForceSendFields is a list of field names (e.g. "Code") to + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "EncryptionKey") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -28447,36 +29825,30 @@ type SubnetworkListWarning struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "EncryptionKey") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *SubnetworkListWarning) MarshalJSON() ([]byte, error) { - type NoMethod SubnetworkListWarning +func (s *ShieldedVmIdentity) MarshalJSON() ([]byte, error) { + type NoMethod ShieldedVmIdentity raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SubnetworkListWarningData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` +// ShieldedVmIdentityEntry: A Shielded Instance Identity Entry. +type ShieldedVmIdentityEntry struct { + // EkCert: A PEM-encoded X.509 certificate. This field can be empty. + EkCert string `json:"ekCert,omitempty"` - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` + // EkPub: A PEM-encoded public key. + EkPub string `json:"ekPub,omitempty"` - // ForceSendFields is a list of field names (e.g. "Key") to + // ForceSendFields is a list of field names (e.g. "EkCert") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -28484,7 +29856,7 @@ type SubnetworkListWarningData struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Key") to include in API + // NullFields is a list of field names (e.g. "EkCert") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -28493,29 +29865,60 @@ type SubnetworkListWarningData struct { NullFields []string `json:"-"` } -func (s *SubnetworkListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod SubnetworkListWarningData +func (s *ShieldedVmIdentityEntry) MarshalJSON() ([]byte, error) { + type NoMethod ShieldedVmIdentityEntry raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SubnetworkSecondaryRange: Represents a secondary IP range of a -// subnetwork. -type SubnetworkSecondaryRange struct { - // IpCidrRange: The range of IP addresses belonging to this subnetwork - // secondary range. Provide this property when you create the - // subnetwork. Ranges must be unique and non-overlapping with all - // primary and secondary IP ranges within a network. Only IPv4 is - // supported. - IpCidrRange string `json:"ipCidrRange,omitempty"` +// ShieldedVmIntegrityPolicy: The policy describes the baseline against +// which VM instance boot integrity is measured. +type ShieldedVmIntegrityPolicy struct { + // UpdateAutoLearnPolicy: Updates the integrity policy baseline using + // the measurements from the VM instance's most recent boot. + UpdateAutoLearnPolicy bool `json:"updateAutoLearnPolicy,omitempty"` - // RangeName: The name associated with this subnetwork secondary range, - // used when adding an alias IP range to a VM instance. The name must be - // 1-63 characters long, and comply with RFC1035. The name must be - // unique within the subnetwork. - RangeName string `json:"rangeName,omitempty"` + // ForceSendFields is a list of field names (e.g. + // "UpdateAutoLearnPolicy") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` - // ForceSendFields is a list of field names (e.g. "IpCidrRange") to + // NullFields is a list of field names (e.g. "UpdateAutoLearnPolicy") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *ShieldedVmIntegrityPolicy) MarshalJSON() ([]byte, error) { + type NoMethod ShieldedVmIntegrityPolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SignedUrlKey: Represents a customer-supplied Signing Key used by +// Cloud CDN Signed URLs +type SignedUrlKey struct { + // KeyName: Name of the key. The name must be 1-63 characters long, and + // comply with RFC1035. Specifically, the name must be 1-63 characters + // long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + // which means the first character must be a lowercase letter, and all + // following characters must be a dash, lowercase letter, or digit, + // except the last character, which cannot be a dash. + KeyName string `json:"keyName,omitempty"` + + // KeyValue: 128-bit key value used for signing the URL. The key value + // must be a valid RFC 4648 Section 5 base64url encoded string. + KeyValue string `json:"keyValue,omitempty"` + + // ForceSendFields is a list of field names (e.g. "KeyName") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -28523,30 +29926,151 @@ type SubnetworkSecondaryRange struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "IpCidrRange") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "KeyName") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *SubnetworkSecondaryRange) MarshalJSON() ([]byte, error) { - type NoMethod SubnetworkSecondaryRange +func (s *SignedUrlKey) MarshalJSON() ([]byte, error) { + type NoMethod SignedUrlKey raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SubnetworksExpandIpCidrRangeRequest struct { - // IpCidrRange: The IP (in CIDR format or netmask) of internal addresses - // that are legal on this Subnetwork. This range should be disjoint from - // other subnetworks within this network. This range can only be larger - // than (i.e. a superset of) the range previously defined before the - // update. - IpCidrRange string `json:"ipCidrRange,omitempty"` +// Snapshot: A persistent disk snapshot resource. (== resource_for +// beta.snapshots ==) (== resource_for v1.snapshots ==) +type Snapshot struct { + // AutoCreated: [Output Only] Set to true if snapshots are automatically + // by applying resource policy on the target disk. + AutoCreated bool `json:"autoCreated,omitempty"` - // ForceSendFields is a list of field names (e.g. "IpCidrRange") to + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // DiskSizeGb: [Output Only] Size of the snapshot, specified in GB. + DiskSizeGb int64 `json:"diskSizeGb,omitempty,string"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always compute#snapshot for + // Snapshot resources. + Kind string `json:"kind,omitempty"` + + // LabelFingerprint: A fingerprint for the labels being applied to this + // snapshot, which is essentially a hash of the labels set used for + // optimistic locking. The fingerprint is initially generated by Compute + // Engine and changes after every request to modify or update labels. + // You must always provide an up-to-date fingerprint hash in order to + // update or change labels, otherwise the request will fail with error + // 412 conditionNotMet. + // + // To see the latest fingerprint, make a get() request to retrieve a + // snapshot. + LabelFingerprint string `json:"labelFingerprint,omitempty"` + + // Labels: Labels to apply to this snapshot. These can be later modified + // by the setLabels method. Label values may be empty. + Labels map[string]string `json:"labels,omitempty"` + + // LicenseCodes: [Output Only] Integer license codes indicating which + // licenses are attached to this snapshot. + LicenseCodes googleapi.Int64s `json:"licenseCodes,omitempty"` + + // Licenses: [Output Only] A list of public visible licenses that apply + // to this snapshot. This can be because the original image had licenses + // attached (such as a Windows image). + Licenses []string `json:"licenses,omitempty"` + + // Name: Name of the resource; provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // SnapshotEncryptionKey: Encrypts the snapshot using a + // customer-supplied encryption key. + // + // After you encrypt a snapshot using a customer-supplied key, you must + // provide the same key if you use the snapshot later. For example, you + // must provide the encryption key when you create a disk from the + // encrypted snapshot in a future request. + // + // Customer-supplied encryption keys do not protect access to metadata + // of the snapshot. + // + // If you do not provide an encryption key when creating the snapshot, + // then the snapshot will be encrypted using an automatically generated + // key and you do not need to provide a key to use the snapshot later. + SnapshotEncryptionKey *CustomerEncryptionKey `json:"snapshotEncryptionKey,omitempty"` + + // SourceDisk: [Output Only] The source disk used to create this + // snapshot. + SourceDisk string `json:"sourceDisk,omitempty"` + + // SourceDiskEncryptionKey: The customer-supplied encryption key of the + // source disk. Required if the source disk is protected by a + // customer-supplied encryption key. + SourceDiskEncryptionKey *CustomerEncryptionKey `json:"sourceDiskEncryptionKey,omitempty"` + + // SourceDiskId: [Output Only] The ID value of the disk used to create + // this snapshot. This value may be used to determine whether the + // snapshot was taken from the current or a previous instance of a given + // disk name. + SourceDiskId string `json:"sourceDiskId,omitempty"` + + // Status: [Output Only] The status of the snapshot. This can be + // CREATING, DELETING, FAILED, READY, or UPLOADING. + // + // Possible values: + // "CREATING" + // "DELETING" + // "FAILED" + // "READY" + // "UPLOADING" + Status string `json:"status,omitempty"` + + // StorageBytes: [Output Only] A size of the storage used by the + // snapshot. As snapshots share storage, this number is expected to + // change with snapshot creation/deletion. + StorageBytes int64 `json:"storageBytes,omitempty,string"` + + // StorageBytesStatus: [Output Only] An indicator whether storageBytes + // is in a stable state or it is being adjusted as a result of shared + // storage reallocation. This status can either be UPDATING, meaning the + // size of the snapshot is being updated, or UP_TO_DATE, meaning the + // size of the snapshot is up-to-date. + // + // Possible values: + // "UPDATING" + // "UP_TO_DATE" + StorageBytesStatus string `json:"storageBytesStatus,omitempty"` + + // StorageLocations: GCS bucket storage location of the snapshot + // (regional or multi-regional). + StorageLocations []string `json:"storageLocations,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "AutoCreated") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -28554,7 +30078,7 @@ type SubnetworksExpandIpCidrRangeRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "IpCidrRange") to include + // NullFields is a list of field names (e.g. "AutoCreated") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as @@ -28563,21 +30087,43 @@ type SubnetworksExpandIpCidrRangeRequest struct { NullFields []string `json:"-"` } -func (s *SubnetworksExpandIpCidrRangeRequest) MarshalJSON() ([]byte, error) { - type NoMethod SubnetworksExpandIpCidrRangeRequest +func (s *Snapshot) MarshalJSON() ([]byte, error) { + type NoMethod Snapshot raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SubnetworksScopedList struct { - // Subnetworks: A list of subnetworks contained in this scope. - Subnetworks []*Subnetwork `json:"subnetworks,omitempty"` +// SnapshotList: Contains a list of Snapshot resources. +type SnapshotList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` - // Warning: An informational warning that appears when the list of - // addresses is empty. - Warning *SubnetworksScopedListWarning `json:"warning,omitempty"` + // Items: A list of Snapshot resources. + Items []*Snapshot `json:"items,omitempty"` - // ForceSendFields is a list of field names (e.g. "Subnetworks") to + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *SnapshotListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -28585,24 +30131,23 @@ type SubnetworksScopedList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Subnetworks") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *SubnetworksScopedList) MarshalJSON() ([]byte, error) { - type NoMethod SubnetworksScopedList +func (s *SnapshotList) MarshalJSON() ([]byte, error) { + type NoMethod SnapshotList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SubnetworksScopedListWarning: An informational warning that appears -// when the list of addresses is empty. -type SubnetworksScopedListWarning struct { +// SnapshotListWarning: [Output Only] Informational warning message. +type SnapshotListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -28636,7 +30181,7 @@ type SubnetworksScopedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*SubnetworksScopedListWarningData `json:"data,omitempty"` + Data []*SnapshotListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -28659,13 +30204,13 @@ type SubnetworksScopedListWarning struct { NullFields []string `json:"-"` } -func (s *SubnetworksScopedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod SubnetworksScopedListWarning +func (s *SnapshotListWarning) MarshalJSON() ([]byte, error) { + type NoMethod SnapshotListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SubnetworksScopedListWarningData struct { +type SnapshotListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -28696,97 +30241,23 @@ type SubnetworksScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *SubnetworksScopedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod SubnetworksScopedListWarningData - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type SubnetworksSetPrivateIpGoogleAccessRequest struct { - PrivateIpGoogleAccess bool `json:"privateIpGoogleAccess,omitempty"` - - // ForceSendFields is a list of field names (e.g. - // "PrivateIpGoogleAccess") to unconditionally include in API requests. - // By default, fields with empty values are omitted from API requests. - // However, any non-pointer, non-interface field appearing in - // ForceSendFields will be sent to the server regardless of whether the - // field is empty or not. This may be used to include empty fields in - // Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "PrivateIpGoogleAccess") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *SubnetworksSetPrivateIpGoogleAccessRequest) MarshalJSON() ([]byte, error) { - type NoMethod SubnetworksSetPrivateIpGoogleAccessRequest +func (s *SnapshotListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod SnapshotListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TCPHealthCheck struct { - // Port: The TCP port number for the health check request. The default - // value is 80. Valid values are 1 through 65535. - Port int64 `json:"port,omitempty"` - - // PortName: Port name as defined in InstanceGroup#NamedPort#name. If - // both port and port_name are defined, port takes precedence. - PortName string `json:"portName,omitempty"` - - // PortSpecification: Specifies how port is selected for health - // checking, can be one of following values: - // USE_FIXED_PORT: The port number in - // port - // is used for health checking. - // USE_NAMED_PORT: The - // portName - // is used for health checking. - // USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for - // each network endpoint is used for health checking. For other - // backends, the port or named port specified in the Backend Service is - // used for health checking. - // - // - // If not specified, TCP health check follows behavior specified - // in - // port - // and - // portName - // fields. - // - // Possible values: - // "USE_FIXED_PORT" - // "USE_NAMED_PORT" - // "USE_SERVING_PORT" - PortSpecification string `json:"portSpecification,omitempty"` - - // ProxyHeader: Specifies the type of proxy header to append before - // sending data to the backend, either NONE or PROXY_V1. The default is - // NONE. - // - // Possible values: - // "NONE" - // "PROXY_V1" - ProxyHeader string `json:"proxyHeader,omitempty"` - - // Request: The application data to send once the TCP connection has - // been established (default value is empty). If both request and - // response are empty, the connection establishment alone will indicate - // health. The request data can only be ASCII. - Request string `json:"request,omitempty"` - - // Response: The bytes to match against the beginning of the response - // data. If left empty (the default value), any response will indicate - // health. The response data can only be ASCII. - Response string `json:"response,omitempty"` +// SourceInstanceParams: A specification of the parameters to use when +// creating the instance template from a source instance. +type SourceInstanceParams struct { + // DiskConfigs: Attached disks configuration. If not provided, defaults + // are applied: For boot disk and any other R/W disks, new custom images + // will be created from each disk. For read-only disks, they will be + // attached in read-only mode. Local SSD disks will be created as blank + // volumes. + DiskConfigs []*DiskInstantiationConfig `json:"diskConfigs,omitempty"` - // ForceSendFields is a list of field names (e.g. "Port") to + // ForceSendFields is a list of field names (e.g. "DiskConfigs") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -28794,64 +30265,31 @@ type TCPHealthCheck struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Port") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "DiskConfigs") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *TCPHealthCheck) MarshalJSON() ([]byte, error) { - type NoMethod TCPHealthCheck +func (s *SourceInstanceParams) MarshalJSON() ([]byte, error) { + type NoMethod SourceInstanceParams raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Tags: A set of instance tags. -type Tags struct { - // Fingerprint: Specifies a fingerprint for this request, which is - // essentially a hash of the tags' contents and used for optimistic - // locking. The fingerprint is initially generated by Compute Engine and - // changes after every request to modify or update tags. You must always - // provide an up-to-date fingerprint hash in order to update or change - // tags. - // - // To see the latest fingerprint, make get() request to the instance. - Fingerprint string `json:"fingerprint,omitempty"` +// SslCertificate: An SslCertificate resource. This resource provides a +// mechanism to upload an SSL key and certificate to the load balancer +// to serve secure connections from the user. (== resource_for +// beta.sslCertificates ==) (== resource_for v1.sslCertificates ==) +type SslCertificate struct { + // Certificate: A local certificate file. The certificate must be in PEM + // format. The certificate chain must be no greater than 5 certs long. + // The chain must include at least one intermediate cert. + Certificate string `json:"certificate,omitempty"` - // Items: An array of tags. Each tag must be 1-63 characters long, and - // comply with RFC1035. - Items []string `json:"items,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Fingerprint") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Fingerprint") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Tags) MarshalJSON() ([]byte, error) { - type NoMethod Tags - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// TargetHttpProxy: A TargetHttpProxy resource. This resource defines an -// HTTP proxy. (== resource_for beta.targetHttpProxies ==) (== -// resource_for v1.targetHttpProxies ==) -type TargetHttpProxy struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -28860,14 +30298,20 @@ type TargetHttpProxy struct { // property when you create the resource. Description string `json:"description,omitempty"` + // ExpireTime: [Output Only] Expire time of the certificate. RFC3339 + ExpireTime string `json:"expireTime,omitempty"` + // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` - // Kind: [Output Only] Type of resource. Always compute#targetHttpProxy - // for target HTTP proxies. + // Kind: [Output Only] Type of the resource. Always + // compute#sslCertificate for SSL certificates. Kind string `json:"kind,omitempty"` + // Managed: Configuration and status of a managed SSL certificate. + Managed *SslCertificateManagedSslCertificate `json:"managed,omitempty"` + // Name: Name of the resource. Provided by the client when the resource // is created. The name must be 1-63 characters long, and comply with // RFC1035. Specifically, the name must be 1-63 characters long and @@ -28877,52 +30321,73 @@ type TargetHttpProxy struct { // last character, which cannot be a dash. Name string `json:"name,omitempty"` - // SelfLink: [Output Only] Server-defined URL for the resource. + // PrivateKey: A write-only private key in PEM format. Only insert + // requests will include this field. + PrivateKey string `json:"privateKey,omitempty"` + + // Region: [Output Only] URL of the region where the regional SSL + // Certificate resides. This field is not applicable to global SSL + // Certificate. + Region string `json:"region,omitempty"` + + // SelfLink: [Output only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // UrlMap: URL to the UrlMap resource that defines the mapping from URL - // to the BackendService. - UrlMap string `json:"urlMap,omitempty"` + // SelfManaged: Configuration and status of a self-managed SSL + // certificate. + SelfManaged *SslCertificateSelfManagedSslCertificate `json:"selfManaged,omitempty"` + + // SubjectAlternativeNames: [Output Only] Domains associated with the + // certificate via Subject Alternative Name. + SubjectAlternativeNames []string `json:"subjectAlternativeNames,omitempty"` + + // Type: (Optional) Specifies the type of SSL certificate, either + // "SELF_MANAGED" or "MANAGED". If not specified, the certificate is + // self-managed and the fields certificate and private_key are used. + // + // Possible values: + // "MANAGED" + // "SELF_MANAGED" + // "TYPE_UNSPECIFIED" + Type string `json:"type,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "Certificate") to + // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "Certificate") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *TargetHttpProxy) MarshalJSON() ([]byte, error) { - type NoMethod TargetHttpProxy +func (s *SslCertificate) MarshalJSON() ([]byte, error) { + type NoMethod SslCertificate raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetHttpProxyList: A list of TargetHttpProxy resources. -type TargetHttpProxyList struct { +type SslCertificateAggregatedList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of TargetHttpProxy resources. - Items []*TargetHttpProxy `json:"items,omitempty"` + // Items: A list of SslCertificatesScopedList resources. + Items map[string]SslCertificatesScopedList `json:"items,omitempty"` - // Kind: Type of resource. Always compute#targetHttpProxyList for lists - // of target HTTP proxies. + // Kind: [Output Only] Type of resource. Always + // compute#sslCertificateAggregatedList for lists of SSL Certificates. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -28937,7 +30402,7 @@ type TargetHttpProxyList struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *TargetHttpProxyListWarning `json:"warning,omitempty"` + Warning *SslCertificateAggregatedListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -28960,15 +30425,15 @@ type TargetHttpProxyList struct { NullFields []string `json:"-"` } -func (s *TargetHttpProxyList) MarshalJSON() ([]byte, error) { - type NoMethod TargetHttpProxyList +func (s *SslCertificateAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod SslCertificateAggregatedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetHttpProxyListWarning: [Output Only] Informational warning -// message. -type TargetHttpProxyListWarning struct { +// SslCertificateAggregatedListWarning: [Output Only] Informational +// warning message. +type SslCertificateAggregatedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -29002,7 +30467,7 @@ type TargetHttpProxyListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*TargetHttpProxyListWarningData `json:"data,omitempty"` + Data []*SslCertificateAggregatedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -29025,13 +30490,13 @@ type TargetHttpProxyListWarning struct { NullFields []string `json:"-"` } -func (s *TargetHttpProxyListWarning) MarshalJSON() ([]byte, error) { - type NoMethod TargetHttpProxyListWarning +func (s *SslCertificateAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod SslCertificateAggregatedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetHttpProxyListWarningData struct { +type SslCertificateAggregatedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -29062,180 +30527,22 @@ type TargetHttpProxyListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetHttpProxyListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod TargetHttpProxyListWarningData - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type TargetHttpsProxiesSetQuicOverrideRequest struct { - // QuicOverride: QUIC policy for the TargetHttpsProxy resource. - // - // Possible values: - // "DISABLE" - // "ENABLE" - // "NONE" - QuicOverride string `json:"quicOverride,omitempty"` - - // ForceSendFields is a list of field names (e.g. "QuicOverride") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "QuicOverride") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TargetHttpsProxiesSetQuicOverrideRequest) MarshalJSON() ([]byte, error) { - type NoMethod TargetHttpsProxiesSetQuicOverrideRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type TargetHttpsProxiesSetSslCertificatesRequest struct { - // SslCertificates: New set of SslCertificate resources to associate - // with this TargetHttpsProxy resource. Currently exactly one - // SslCertificate resource must be specified. - SslCertificates []string `json:"sslCertificates,omitempty"` - - // ForceSendFields is a list of field names (e.g. "SslCertificates") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "SslCertificates") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *TargetHttpsProxiesSetSslCertificatesRequest) MarshalJSON() ([]byte, error) { - type NoMethod TargetHttpsProxiesSetSslCertificatesRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// TargetHttpsProxy: A TargetHttpsProxy resource. This resource defines -// an HTTPS proxy. (== resource_for beta.targetHttpsProxies ==) (== -// resource_for v1.targetHttpsProxies ==) -type TargetHttpsProxy struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] Type of resource. Always compute#targetHttpsProxy - // for target HTTPS proxies. - Kind string `json:"kind,omitempty"` - - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // QuicOverride: Specifies the QUIC override policy for this - // TargetHttpsProxy resource. This determines whether the load balancer - // will attempt to negotiate QUIC with clients or not. Can specify one - // of NONE, ENABLE, or DISABLE. Specify ENABLE to always enable QUIC, - // Enables QUIC when set to ENABLE, and disables QUIC when set to - // DISABLE. If NONE is specified, uses the QUIC policy with no user - // overrides, which is equivalent to DISABLE. Not specifying this field - // is equivalent to specifying NONE. - // - // Possible values: - // "DISABLE" - // "ENABLE" - // "NONE" - QuicOverride string `json:"quicOverride,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // SslCertificates: URLs to SslCertificate resources that are used to - // authenticate connections between users and the load balancer. At - // least one SSL certificate must be specified. Currently, you may - // specify up to 15 SSL certificates. - SslCertificates []string `json:"sslCertificates,omitempty"` - - // SslPolicy: URL of SslPolicy resource that will be associated with the - // TargetHttpsProxy resource. If not set, the TargetHttpsProxy resource - // will not have any SSL policy configured. - SslPolicy string `json:"sslPolicy,omitempty"` - - // UrlMap: A fully-qualified or valid partial URL to the UrlMap resource - // that defines the mapping from URL to the BackendService. For example, - // the following are all valid URLs for specifying a URL map: - // - - // https://www.googleapis.compute/v1/projects/project/global/urlMaps/url-map - // - projects/project/global/urlMaps/url-map - // - global/urlMaps/url-map - UrlMap string `json:"urlMap,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *TargetHttpsProxy) MarshalJSON() ([]byte, error) { - type NoMethod TargetHttpsProxy +func (s *SslCertificateAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod SslCertificateAggregatedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetHttpsProxyList: Contains a list of TargetHttpsProxy resources. -type TargetHttpsProxyList struct { +// SslCertificateList: Contains a list of SslCertificate resources. +type SslCertificateList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of TargetHttpsProxy resources. - Items []*TargetHttpsProxy `json:"items,omitempty"` + // Items: A list of SslCertificate resources. + Items []*SslCertificate `json:"items,omitempty"` - // Kind: Type of resource. Always compute#targetHttpsProxyList for lists - // of target HTTPS proxies. + // Kind: Type of resource. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -29250,7 +30557,7 @@ type TargetHttpsProxyList struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *TargetHttpsProxyListWarning `json:"warning,omitempty"` + Warning *SslCertificateListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -29273,15 +30580,15 @@ type TargetHttpsProxyList struct { NullFields []string `json:"-"` } -func (s *TargetHttpsProxyList) MarshalJSON() ([]byte, error) { - type NoMethod TargetHttpsProxyList +func (s *SslCertificateList) MarshalJSON() ([]byte, error) { + type NoMethod SslCertificateList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetHttpsProxyListWarning: [Output Only] Informational warning +// SslCertificateListWarning: [Output Only] Informational warning // message. -type TargetHttpsProxyListWarning struct { +type SslCertificateListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -29315,7 +30622,7 @@ type TargetHttpsProxyListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*TargetHttpsProxyListWarningData `json:"data,omitempty"` + Data []*SslCertificateListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -29338,13 +30645,13 @@ type TargetHttpsProxyListWarning struct { NullFields []string `json:"-"` } -func (s *TargetHttpsProxyListWarning) MarshalJSON() ([]byte, error) { - type NoMethod TargetHttpsProxyListWarning +func (s *SslCertificateListWarning) MarshalJSON() ([]byte, error) { + type NoMethod SslCertificateListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetHttpsProxyListWarningData struct { +type SslCertificateListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -29375,125 +30682,101 @@ type TargetHttpsProxyListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetHttpsProxyListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod TargetHttpsProxyListWarningData +func (s *SslCertificateListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod SslCertificateListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetInstance: A TargetInstance resource. This resource defines an -// endpoint instance that terminates traffic of certain protocols. (== -// resource_for beta.targetInstances ==) (== resource_for -// v1.targetInstances ==) -type TargetInstance struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Instance: A URL to the virtual machine instance that handles traffic - // for this target instance. When creating a target instance, you can - // provide the fully-qualified URL or a valid partial URL to the desired - // virtual machine. For example, the following are all valid URLs: - // - - // https://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/instance - // - projects/project/zones/zone/instances/instance - // - zones/zone/instances/instance - Instance string `json:"instance,omitempty"` - - // Kind: [Output Only] The type of the resource. Always - // compute#targetInstance for target instances. - Kind string `json:"kind,omitempty"` +// SslCertificateManagedSslCertificate: Configuration and status of a +// managed SSL certificate. +type SslCertificateManagedSslCertificate struct { + // DomainStatus: [Output only] Detailed statuses of the domains + // specified for managed certificate resource. + DomainStatus map[string]string `json:"domainStatus,omitempty"` - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` + // Domains: The domains for which a managed SSL certificate will be + // generated. Currently only single-domain certs are supported. + Domains []string `json:"domains,omitempty"` - // NatPolicy: NAT option controlling how IPs are NAT'ed to the instance. - // Currently only NO_NAT (default value) is supported. + // Status: [Output only] Status of the managed certificate resource. // // Possible values: - // "NO_NAT" - NatPolicy string `json:"natPolicy,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // Zone: [Output Only] URL of the zone where the target instance - // resides. You must specify this field as part of the HTTP request URL. - // It is not settable as a field in the request body. - Zone string `json:"zone,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // "ACTIVE" + // "MANAGED_CERTIFICATE_STATUS_UNSPECIFIED" + // "PROVISIONING" + // "PROVISIONING_FAILED" + // "PROVISIONING_FAILED_PERMANENTLY" + // "RENEWAL_FAILED" + Status string `json:"status,omitempty"` - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "DomainStatus") to + // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "DomainStatus") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *TargetInstance) MarshalJSON() ([]byte, error) { - type NoMethod TargetInstance +func (s *SslCertificateManagedSslCertificate) MarshalJSON() ([]byte, error) { + type NoMethod SslCertificateManagedSslCertificate raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetInstanceAggregatedList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` +// SslCertificateSelfManagedSslCertificate: Configuration and status of +// a self-managed SSL certificate. +type SslCertificateSelfManagedSslCertificate struct { + // Certificate: A local certificate file. The certificate must be in PEM + // format. The certificate chain must be no greater than 5 certs long. + // The chain must include at least one intermediate cert. + Certificate string `json:"certificate,omitempty"` - // Items: A list of TargetInstance resources. - Items map[string]TargetInstancesScopedList `json:"items,omitempty"` + // PrivateKey: A write-only private key in PEM format. Only insert + // requests will include this field. + PrivateKey string `json:"privateKey,omitempty"` - // Kind: Type of resource. - Kind string `json:"kind,omitempty"` + // ForceSendFields is a list of field names (e.g. "Certificate") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` + // NullFields is a list of field names (e.g. "Certificate") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` +func (s *SslCertificateSelfManagedSslCertificate) MarshalJSON() ([]byte, error) { + type NoMethod SslCertificateSelfManagedSslCertificate + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} - // Warning: [Output Only] Informational warning message. - Warning *TargetInstanceAggregatedListWarning `json:"warning,omitempty"` +type SslCertificatesScopedList struct { + // SslCertificates: List of SslCertificates contained in this scope. + SslCertificates []*SslCertificate `json:"sslCertificates,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Warning: Informational warning which replaces the list of backend + // services when the list is empty. + Warning *SslCertificatesScopedListWarning `json:"warning,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "SslCertificates") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -29501,24 +30784,25 @@ type TargetInstanceAggregatedList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "SslCertificates") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *TargetInstanceAggregatedList) MarshalJSON() ([]byte, error) { - type NoMethod TargetInstanceAggregatedList +func (s *SslCertificatesScopedList) MarshalJSON() ([]byte, error) { + type NoMethod SslCertificatesScopedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetInstanceAggregatedListWarning: [Output Only] Informational -// warning message. -type TargetInstanceAggregatedListWarning struct { +// SslCertificatesScopedListWarning: Informational warning which +// replaces the list of backend services when the list is empty. +type SslCertificatesScopedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -29552,7 +30836,7 @@ type TargetInstanceAggregatedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*TargetInstanceAggregatedListWarningData `json:"data,omitempty"` + Data []*SslCertificatesScopedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -29575,13 +30859,13 @@ type TargetInstanceAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *TargetInstanceAggregatedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod TargetInstanceAggregatedListWarning +func (s *SslCertificatesScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod SslCertificatesScopedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetInstanceAggregatedListWarningData struct { +type SslCertificatesScopedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -29612,22 +30896,22 @@ type TargetInstanceAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetInstanceAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod TargetInstanceAggregatedListWarningData +func (s *SslCertificatesScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod SslCertificatesScopedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetInstanceList: Contains a list of TargetInstance resources. -type TargetInstanceList struct { +type SslPoliciesList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of TargetInstance resources. - Items []*TargetInstance `json:"items,omitempty"` + // Items: A list of SslPolicy resources. + Items []*SslPolicy `json:"items,omitempty"` - // Kind: Type of resource. + // Kind: [Output Only] Type of the resource. Always + // compute#sslPoliciesList for lists of sslPolicies. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -29642,7 +30926,7 @@ type TargetInstanceList struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *TargetInstanceListWarning `json:"warning,omitempty"` + Warning *SslPoliciesListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -29665,15 +30949,14 @@ type TargetInstanceList struct { NullFields []string `json:"-"` } -func (s *TargetInstanceList) MarshalJSON() ([]byte, error) { - type NoMethod TargetInstanceList +func (s *SslPoliciesList) MarshalJSON() ([]byte, error) { + type NoMethod SslPoliciesList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetInstanceListWarning: [Output Only] Informational warning -// message. -type TargetInstanceListWarning struct { +// SslPoliciesListWarning: [Output Only] Informational warning message. +type SslPoliciesListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -29707,7 +30990,7 @@ type TargetInstanceListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*TargetInstanceListWarningData `json:"data,omitempty"` + Data []*SslPoliciesListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -29730,13 +31013,13 @@ type TargetInstanceListWarning struct { NullFields []string `json:"-"` } -func (s *TargetInstanceListWarning) MarshalJSON() ([]byte, error) { - type NoMethod TargetInstanceListWarning +func (s *SslPoliciesListWarning) MarshalJSON() ([]byte, error) { + type NoMethod SslPoliciesListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetInstanceListWarningData struct { +type SslPoliciesListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -29767,21 +31050,20 @@ type TargetInstanceListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetInstanceListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod TargetInstanceListWarningData +func (s *SslPoliciesListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod SslPoliciesListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetInstancesScopedList struct { - // TargetInstances: A list of target instances contained in this scope. - TargetInstances []*TargetInstance `json:"targetInstances,omitempty"` +type SslPoliciesListAvailableFeaturesResponse struct { + Features []string `json:"features,omitempty"` - // Warning: Informational warning which replaces the list of addresses - // when the list is empty. - Warning *TargetInstancesScopedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "TargetInstances") to + // ForceSendFields is a list of field names (e.g. "Features") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -29789,7 +31071,116 @@ type TargetInstancesScopedList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "TargetInstances") to + // NullFields is a list of field names (e.g. "Features") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SslPoliciesListAvailableFeaturesResponse) MarshalJSON() ([]byte, error) { + type NoMethod SslPoliciesListAvailableFeaturesResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SslPolicy: A SSL policy specifies the server-side support for SSL +// features. This can be attached to a TargetHttpsProxy or a +// TargetSslProxy. This affects connections between clients and the +// HTTPS or SSL proxy load balancer. They do not affect the connection +// between the load balancers and the backends. +type SslPolicy struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // CustomFeatures: A list of features enabled when the selected profile + // is CUSTOM. The + // - method returns the set of features that can be specified in this + // list. This field must be empty if the profile is not CUSTOM. + CustomFeatures []string `json:"customFeatures,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // EnabledFeatures: [Output Only] The list of features enabled in the + // SSL policy. + EnabledFeatures []string `json:"enabledFeatures,omitempty"` + + // Fingerprint: Fingerprint of this resource. A hash of the contents + // stored in this object. This field is used in optimistic locking. This + // field will be ignored when inserting a SslPolicy. An up-to-date + // fingerprint must be provided in order to update the SslPolicy, + // otherwise the request will fail with error 412 conditionNotMet. + // + // To see the latest fingerprint, make a get() request to retrieve an + // SslPolicy. + Fingerprint string `json:"fingerprint,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output only] Type of the resource. Always compute#sslPolicyfor + // SSL policies. + Kind string `json:"kind,omitempty"` + + // MinTlsVersion: The minimum version of SSL protocol that can be used + // by the clients to establish a connection with the load balancer. This + // can be one of TLS_1_0, TLS_1_1, TLS_1_2. + // + // Possible values: + // "TLS_1_0" + // "TLS_1_1" + // "TLS_1_2" + MinTlsVersion string `json:"minTlsVersion,omitempty"` + + // Name: Name of the resource. The name must be 1-63 characters long, + // and comply with RFC1035. Specifically, the name must be 1-63 + // characters long and match the regular expression + // `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be + // a lowercase letter, and all following characters must be a dash, + // lowercase letter, or digit, except the last character, which cannot + // be a dash. + Name string `json:"name,omitempty"` + + // Profile: Profile specifies the set of SSL features that can be used + // by the load balancer when negotiating SSL with clients. This can be + // one of COMPATIBLE, MODERN, RESTRICTED, or CUSTOM. If using CUSTOM, + // the set of SSL features to enable must be specified in the + // customFeatures field. + // + // Possible values: + // "COMPATIBLE" + // "CUSTOM" + // "MODERN" + // "RESTRICTED" + Profile string `json:"profile,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warnings: [Output Only] If potential misconfigurations are detected + // for this SSL policy, this field will be populated with warning + // messages. + Warnings []*SslPolicyWarnings `json:"warnings,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -29799,15 +31190,13 @@ type TargetInstancesScopedList struct { NullFields []string `json:"-"` } -func (s *TargetInstancesScopedList) MarshalJSON() ([]byte, error) { - type NoMethod TargetInstancesScopedList +func (s *SslPolicy) MarshalJSON() ([]byte, error) { + type NoMethod SslPolicy raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetInstancesScopedListWarning: Informational warning which -// replaces the list of addresses when the list is empty. -type TargetInstancesScopedListWarning struct { +type SslPolicyWarnings struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -29841,7 +31230,7 @@ type TargetInstancesScopedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*TargetInstancesScopedListWarningData `json:"data,omitempty"` + Data []*SslPolicyWarningsData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -29864,13 +31253,13 @@ type TargetInstancesScopedListWarning struct { NullFields []string `json:"-"` } -func (s *TargetInstancesScopedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod TargetInstancesScopedListWarning +func (s *SslPolicyWarnings) MarshalJSON() ([]byte, error) { + type NoMethod SslPolicyWarnings raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetInstancesScopedListWarningData struct { +type SslPolicyWarningsData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -29901,167 +31290,219 @@ type TargetInstancesScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetInstancesScopedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod TargetInstancesScopedListWarningData +func (s *SslPolicyWarningsData) MarshalJSON() ([]byte, error) { + type NoMethod SslPolicyWarningsData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetPool: A TargetPool resource. This resource defines a pool of -// instances, an associated HttpHealthCheck resource, and the fallback -// target pool. (== resource_for beta.targetPools ==) (== resource_for -// v1.targetPools ==) -type TargetPool struct { - // BackupPool: This field is applicable only when the containing target - // pool is serving a forwarding rule as the primary pool, and its - // failoverRatio field is properly set to a value between [0, - // 1]. +type SslPolicyReference struct { + // SslPolicy: URL of the SSL policy resource. Set this to empty string + // to clear any existing SSL policy associated with the target proxy + // resource. + SslPolicy string `json:"sslPolicy,omitempty"` + + // ForceSendFields is a list of field names (e.g. "SslPolicy") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "SslPolicy") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SslPolicyReference) MarshalJSON() ([]byte, error) { + type NoMethod SslPolicyReference + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Subnetwork: A Subnetwork resource. (== resource_for beta.subnetworks +// ==) (== resource_for v1.subnetworks ==) +type Subnetwork struct { + // AllowSubnetCidrRoutesOverlap: Whether this subnetwork can conflict + // with static routes. Setting this to true allows this subnetwork's + // primary and secondary ranges to conflict with routes that have + // already been configured on the corresponding network. Static routes + // will take precedence over the subnetwork route if the route prefix + // length is at least as large as the subnetwork prefix length. // - // backupPool and failoverRatio together define the fallback behavior of - // the primary target pool: if the ratio of the healthy instances in the - // primary pool is at or below failoverRatio, traffic arriving at the - // load-balanced IP will be directed to the backup pool. + // Also, packets destined to IPs within subnetwork may contain + // private/sensitive data and are prevented from leaving the virtual + // network. Setting this field to true will disable this feature. // - // In case where failoverRatio and backupPool are not set, or all the - // instances in the backup pool are unhealthy, the traffic will be - // directed back to the primary pool in the "force" mode, where traffic - // will be spread to the healthy instances with the best effort, or to - // all instances when no instance is healthy. - BackupPool string `json:"backupPool,omitempty"` + // The default value is false and applies to all existing subnetworks + // and automatically created subnetworks. + // + // This field cannot be set to true at resource creation time. + AllowSubnetCidrRoutesOverlap bool `json:"allowSubnetCidrRoutesOverlap,omitempty"` // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` // Description: An optional description of this resource. Provide this - // property when you create the resource. + // property when you create the resource. This field can be set only at + // resource creation time. Description string `json:"description,omitempty"` - // FailoverRatio: This field is applicable only when the containing - // target pool is serving a forwarding rule as the primary pool (i.e., - // not as a backup pool to some other target pool). The value of the - // field must be in [0, 1]. - // - // If set, backupPool must also be set. They together define the - // fallback behavior of the primary target pool: if the ratio of the - // healthy instances in the primary pool is at or below this number, - // traffic arriving at the load-balanced IP will be directed to the - // backup pool. + // EnableFlowLogs: Whether to enable flow logging for this subnetwork. + // If this field is not explicitly set, it will not appear in get + // listings. If not set the default behavior is to disable flow logging. + EnableFlowLogs bool `json:"enableFlowLogs,omitempty"` + + // Fingerprint: Fingerprint of this resource. A hash of the contents + // stored in this object. This field is used in optimistic locking. This + // field will be ignored when inserting a Subnetwork. An up-to-date + // fingerprint must be provided in order to update the Subnetwork, + // otherwise the request will fail with error 412 conditionNotMet. // - // In case where failoverRatio is not set or all the instances in the - // backup pool are unhealthy, the traffic will be directed back to the - // primary pool in the "force" mode, where traffic will be spread to the - // healthy instances with the best effort, or to all instances when no - // instance is healthy. - FailoverRatio float64 `json:"failoverRatio,omitempty"` + // To see the latest fingerprint, make a get() request to retrieve a + // Subnetwork. + Fingerprint string `json:"fingerprint,omitempty"` - // HealthChecks: The URL of the HttpHealthCheck resource. A member - // instance in this pool is considered healthy if and only if the health - // checks pass. An empty list means all member instances will be - // considered healthy at all times. Only HttpHealthChecks are supported. - // Only one health check may be specified. - HealthChecks []string `json:"healthChecks,omitempty"` + // GatewayAddress: [Output Only] The gateway address for default routes + // to reach destination addresses outside this subnetwork. + GatewayAddress string `json:"gatewayAddress,omitempty"` // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` - // Instances: A list of resource URLs to the virtual machine instances - // serving this pool. They must live in zones contained in the same - // region as this pool. - Instances []string `json:"instances,omitempty"` + // IpCidrRange: The range of internal addresses that are owned by this + // subnetwork. Provide this property when you create the subnetwork. For + // example, 10.0.0.0/8 or 192.168.0.0/16. Ranges must be unique and + // non-overlapping within a network. Only IPv4 is supported. This field + // can be set only at resource creation time. + IpCidrRange string `json:"ipCidrRange,omitempty"` - // Kind: [Output Only] Type of the resource. Always compute#targetPool - // for target pools. + // Kind: [Output Only] Type of the resource. Always compute#subnetwork + // for Subnetwork resources. Kind string `json:"kind,omitempty"` - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. + // LogConfig: This field denotes the VPC flow logging options for this + // subnetwork. If logging is enabled, logs are exported to Stackdriver. + LogConfig *SubnetworkLogConfig `json:"logConfig,omitempty"` + + // Name: The name of the resource, provided by the client when initially + // creating the resource. The name must be 1-63 characters long, and + // comply with RFC1035. Specifically, the name must be 1-63 characters + // long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + // which means the first character must be a lowercase letter, and all + // following characters must be a dash, lowercase letter, or digit, + // except the last character, which cannot be a dash. Name string `json:"name,omitempty"` - // Region: [Output Only] URL of the region where the target pool - // resides. + // Network: The URL of the network to which this subnetwork belongs, + // provided by the client when initially creating the subnetwork. Only + // networks that are in the distributed mode can have subnetworks. This + // field can be set only at resource creation time. + Network string `json:"network,omitempty"` + + // PrivateIpGoogleAccess: Whether the VMs in this subnet can access + // Google services without assigned external IP addresses. This field + // can be both set at resource creation time and updated using + // setPrivateIpGoogleAccess. + PrivateIpGoogleAccess bool `json:"privateIpGoogleAccess,omitempty"` + + // Purpose: The purpose of the resource. This field can be either + // PRIVATE_RFC_1918 or INTERNAL_HTTPS_LOAD_BALANCER. A subnetwork with + // purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a user-created + // subnetwork that is reserved for Internal HTTP(S) Load Balancing. If + // unspecified, the purpose defaults to PRIVATE_RFC_1918. + // + // Possible values: + // "INTERNAL_HTTPS_LOAD_BALANCER" + // "PRIVATE" + // "PRIVATE_RFC_1918" + Purpose string `json:"purpose,omitempty"` + + // Region: URL of the region where the Subnetwork resides. This field + // can be set only at resource creation time. Region string `json:"region,omitempty"` + // Role: The role of subnetwork. Currenly, this field is only used when + // purpose = INTERNAL_HTTPS_LOAD_BALANCER. The value can be set to + // ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being + // used for Internal HTTP(S) Load Balancing. A BACKUP subnetwork is one + // that is ready to be promoted to ACTIVE or is currently draining. This + // field can be updated with a patch request. + // + // Possible values: + // "ACTIVE" + // "BACKUP" + Role string `json:"role,omitempty"` + + // SecondaryIpRanges: An array of configurations for secondary IP ranges + // for VM instances contained in this subnetwork. The primary IP of such + // VM must belong to the primary ipCidrRange of the subnetwork. The + // alias IPs may belong to either primary or secondary ranges. This + // field can be updated with a patch request. + SecondaryIpRanges []*SubnetworkSecondaryRange `json:"secondaryIpRanges,omitempty"` + // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // SessionAffinity: Session affinity option, must be one of the - // following values: - // NONE: Connections from the same client IP may go to any instance in - // the pool. - // CLIENT_IP: Connections from the same client IP will go to the same - // instance in the pool while that instance remains - // healthy. - // CLIENT_IP_PROTO: Connections from the same client IP with the same IP - // protocol will go to the same instance in the pool while that instance - // remains healthy. + // State: [Output Only] The state of the subnetwork, which can be one of + // READY or DRAINING. A subnetwork that is READY is ready to be used. + // The state of DRAINING is only applicable to subnetworks that have the + // purpose set to INTERNAL_HTTPS_LOAD_BALANCER and indicates that + // connections to the load balancer are being drained. A subnetwork that + // is draining cannot be used or modified until it reaches a status of + // READY. // // Possible values: - // "CLIENT_IP" - // "CLIENT_IP_PORT_PROTO" - // "CLIENT_IP_PROTO" - // "GENERATED_COOKIE" - // "NONE" - SessionAffinity string `json:"sessionAffinity,omitempty"` + // "DRAINING" + // "READY" + State string `json:"state,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "BackupPool") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. + // "AllowSubnetCidrRoutesOverlap") to unconditionally include in API + // requests. By default, fields with empty values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "BackupPool") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. + // "AllowSubnetCidrRoutesOverlap") to include in API requests with the + // JSON null value. By default, fields with empty values are omitted + // from API requests. However, any field with an empty value appearing + // in NullFields will be sent to the server as null. It is an error if a + // field in this list has a non-empty value. This may be used to include + // null fields in Patch requests. NullFields []string `json:"-"` } -func (s *TargetPool) MarshalJSON() ([]byte, error) { - type NoMethod TargetPool +func (s *Subnetwork) MarshalJSON() ([]byte, error) { + type NoMethod Subnetwork raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -func (s *TargetPool) UnmarshalJSON(data []byte) error { - type NoMethod TargetPool - var s1 struct { - FailoverRatio gensupport.JSONFloat64 `json:"failoverRatio"` - *NoMethod - } - s1.NoMethod = (*NoMethod)(s) - if err := json.Unmarshal(data, &s1); err != nil { - return err - } - s.FailoverRatio = float64(s1.FailoverRatio) - return nil -} - -type TargetPoolAggregatedList struct { +type SubnetworkAggregatedList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of TargetPool resources. - Items map[string]TargetPoolsScopedList `json:"items,omitempty"` + // Items: A list of SubnetworksScopedList resources. + Items map[string]SubnetworksScopedList `json:"items,omitempty"` // Kind: [Output Only] Type of resource. Always - // compute#targetPoolAggregatedList for aggregated lists of target - // pools. + // compute#subnetworkAggregatedList for aggregated lists of subnetworks. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -30076,7 +31517,7 @@ type TargetPoolAggregatedList struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *TargetPoolAggregatedListWarning `json:"warning,omitempty"` + Warning *SubnetworkAggregatedListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -30099,15 +31540,15 @@ type TargetPoolAggregatedList struct { NullFields []string `json:"-"` } -func (s *TargetPoolAggregatedList) MarshalJSON() ([]byte, error) { - type NoMethod TargetPoolAggregatedList +func (s *SubnetworkAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod SubnetworkAggregatedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetPoolAggregatedListWarning: [Output Only] Informational warning +// SubnetworkAggregatedListWarning: [Output Only] Informational warning // message. -type TargetPoolAggregatedListWarning struct { +type SubnetworkAggregatedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -30141,7 +31582,7 @@ type TargetPoolAggregatedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*TargetPoolAggregatedListWarningData `json:"data,omitempty"` + Data []*SubnetworkAggregatedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -30164,13 +31605,13 @@ type TargetPoolAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *TargetPoolAggregatedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod TargetPoolAggregatedListWarning +func (s *SubnetworkAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod SubnetworkAggregatedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetPoolAggregatedListWarningData struct { +type SubnetworkAggregatedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -30201,58 +31642,23 @@ type TargetPoolAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetPoolAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod TargetPoolAggregatedListWarningData - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type TargetPoolInstanceHealth struct { - HealthStatus []*HealthStatus `json:"healthStatus,omitempty"` - - // Kind: [Output Only] Type of resource. Always - // compute#targetPoolInstanceHealth when checking the health of an - // instance. - Kind string `json:"kind,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "HealthStatus") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "HealthStatus") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TargetPoolInstanceHealth) MarshalJSON() ([]byte, error) { - type NoMethod TargetPoolInstanceHealth +func (s *SubnetworkAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod SubnetworkAggregatedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetPoolList: Contains a list of TargetPool resources. -type TargetPoolList struct { +// SubnetworkList: Contains a list of Subnetwork resources. +type SubnetworkList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of TargetPool resources. - Items []*TargetPool `json:"items,omitempty"` + // Items: A list of Subnetwork resources. + Items []*Subnetwork `json:"items,omitempty"` - // Kind: [Output Only] Type of resource. Always compute#targetPoolList - // for lists of target pools. + // Kind: [Output Only] Type of resource. Always compute#subnetworkList + // for lists of subnetworks. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -30267,7 +31673,7 @@ type TargetPoolList struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *TargetPoolListWarning `json:"warning,omitempty"` + Warning *SubnetworkListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -30290,14 +31696,14 @@ type TargetPoolList struct { NullFields []string `json:"-"` } -func (s *TargetPoolList) MarshalJSON() ([]byte, error) { - type NoMethod TargetPoolList +func (s *SubnetworkList) MarshalJSON() ([]byte, error) { + type NoMethod SubnetworkList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetPoolListWarning: [Output Only] Informational warning message. -type TargetPoolListWarning struct { +// SubnetworkListWarning: [Output Only] Informational warning message. +type SubnetworkListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -30331,7 +31737,7 @@ type TargetPoolListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*TargetPoolListWarningData `json:"data,omitempty"` + Data []*SubnetworkListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -30354,13 +31760,13 @@ type TargetPoolListWarning struct { NullFields []string `json:"-"` } -func (s *TargetPoolListWarning) MarshalJSON() ([]byte, error) { - type NoMethod TargetPoolListWarning +func (s *SubnetworkListWarning) MarshalJSON() ([]byte, error) { + type NoMethod SubnetworkListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetPoolListWarningData struct { +type SubnetworkListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -30391,82 +31797,107 @@ type TargetPoolListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetPoolListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod TargetPoolListWarningData +func (s *SubnetworkListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod SubnetworkListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetPoolsAddHealthCheckRequest struct { - // HealthChecks: The HttpHealthCheck to add to the target pool. - HealthChecks []*HealthCheckReference `json:"healthChecks,omitempty"` +// SubnetworkLogConfig: The available logging options for this +// subnetwork. +type SubnetworkLogConfig struct { + // AggregationInterval: Can only be specified if VPC flow logging for + // this subnetwork is enabled. Toggles the aggregation interval for + // collecting flow logs. Increasing the interval time will reduce the + // amount of generated flow logs for long lasting connections. Default + // is an interval of 5 seconds per connection. + // + // Possible values: + // "INTERVAL_10_MIN" + // "INTERVAL_15_MIN" + // "INTERVAL_1_MIN" + // "INTERVAL_30_SEC" + // "INTERVAL_5_MIN" + // "INTERVAL_5_SEC" + AggregationInterval string `json:"aggregationInterval,omitempty"` + + // Enable: Whether to enable flow logging for this subnetwork. If this + // field is not explicitly set, it will not appear in get listings. If + // not set the default behavior is to disable flow logging. + Enable bool `json:"enable,omitempty"` - // ForceSendFields is a list of field names (e.g. "HealthChecks") to - // unconditionally include in API requests. By default, fields with + // FlowSampling: Can only be specified if VPC flow logging for this + // subnetwork is enabled. The value of the field must be in [0, 1]. Set + // the sampling rate of VPC flow logs within the subnetwork where 1.0 + // means all collected logs are reported and 0.0 means no logs are + // reported. Default is 0.5, which means half of all collected logs are + // reported. + FlowSampling float64 `json:"flowSampling,omitempty"` + + // Metadata: Can only be specified if VPC flow logging for this + // subnetwork is enabled. Configures whether metadata fields should be + // added to the reported VPC flow logs. Default is INCLUDE_ALL_METADATA. + // + // Possible values: + // "EXCLUDE_ALL_METADATA" + // "INCLUDE_ALL_METADATA" + Metadata string `json:"metadata,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AggregationInterval") + // to unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "HealthChecks") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "AggregationInterval") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *TargetPoolsAddHealthCheckRequest) MarshalJSON() ([]byte, error) { - type NoMethod TargetPoolsAddHealthCheckRequest +func (s *SubnetworkLogConfig) MarshalJSON() ([]byte, error) { + type NoMethod SubnetworkLogConfig raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetPoolsAddInstanceRequest struct { - // Instances: A full or partial URL to an instance to add to this target - // pool. This can be a full or partial URL. For example, the following - // are valid URLs: - // - - // https://www.googleapis.com/compute/v1/projects/project-id/zones/zone/instances/instance-name - // - projects/project-id/zones/zone/instances/instance-name - // - zones/zone/instances/instance-name - Instances []*InstanceReference `json:"instances,omitempty"` +func (s *SubnetworkLogConfig) UnmarshalJSON(data []byte) error { + type NoMethod SubnetworkLogConfig + var s1 struct { + FlowSampling gensupport.JSONFloat64 `json:"flowSampling"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.FlowSampling = float64(s1.FlowSampling) + return nil +} - // ForceSendFields is a list of field names (e.g. "Instances") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Instances") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TargetPoolsAddInstanceRequest) MarshalJSON() ([]byte, error) { - type NoMethod TargetPoolsAddInstanceRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} +// SubnetworkSecondaryRange: Represents a secondary IP range of a +// subnetwork. +type SubnetworkSecondaryRange struct { + // IpCidrRange: The range of IP addresses belonging to this subnetwork + // secondary range. Provide this property when you create the + // subnetwork. Ranges must be unique and non-overlapping with all + // primary and secondary IP ranges within a network. Only IPv4 is + // supported. + IpCidrRange string `json:"ipCidrRange,omitempty"` -type TargetPoolsRemoveHealthCheckRequest struct { - // HealthChecks: Health check URL to be removed. This can be a full or - // valid partial URL. For example, the following are valid URLs: - // - - // https://www.googleapis.com/compute/beta/projects/project/global/httpHealthChecks/health-check - // - projects/project/global/httpHealthChecks/health-check - // - global/httpHealthChecks/health-check - HealthChecks []*HealthCheckReference `json:"healthChecks,omitempty"` + // RangeName: The name associated with this subnetwork secondary range, + // used when adding an alias IP range to a VM instance. The name must be + // 1-63 characters long, and comply with RFC1035. The name must be + // unique within the subnetwork. + RangeName string `json:"rangeName,omitempty"` - // ForceSendFields is a list of field names (e.g. "HealthChecks") to + // ForceSendFields is a list of field names (e.g. "IpCidrRange") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -30474,7 +31905,7 @@ type TargetPoolsRemoveHealthCheckRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "HealthChecks") to include + // NullFields is a list of field names (e.g. "IpCidrRange") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as @@ -30483,17 +31914,21 @@ type TargetPoolsRemoveHealthCheckRequest struct { NullFields []string `json:"-"` } -func (s *TargetPoolsRemoveHealthCheckRequest) MarshalJSON() ([]byte, error) { - type NoMethod TargetPoolsRemoveHealthCheckRequest +func (s *SubnetworkSecondaryRange) MarshalJSON() ([]byte, error) { + type NoMethod SubnetworkSecondaryRange raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetPoolsRemoveInstanceRequest struct { - // Instances: URLs of the instances to be removed from target pool. - Instances []*InstanceReference `json:"instances,omitempty"` +type SubnetworksExpandIpCidrRangeRequest struct { + // IpCidrRange: The IP (in CIDR format or netmask) of internal addresses + // that are legal on this Subnetwork. This range should be disjoint from + // other subnetworks within this network. This range can only be larger + // than (i.e. a superset of) the range previously defined before the + // update. + IpCidrRange string `json:"ipCidrRange,omitempty"` - // ForceSendFields is a list of field names (e.g. "Instances") to + // ForceSendFields is a list of field names (e.g. "IpCidrRange") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -30501,30 +31936,30 @@ type TargetPoolsRemoveInstanceRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Instances") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "IpCidrRange") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *TargetPoolsRemoveInstanceRequest) MarshalJSON() ([]byte, error) { - type NoMethod TargetPoolsRemoveInstanceRequest +func (s *SubnetworksExpandIpCidrRangeRequest) MarshalJSON() ([]byte, error) { + type NoMethod SubnetworksExpandIpCidrRangeRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetPoolsScopedList struct { - // TargetPools: A list of target pools contained in this scope. - TargetPools []*TargetPool `json:"targetPools,omitempty"` +type SubnetworksScopedList struct { + // Subnetworks: A list of subnetworks contained in this scope. + Subnetworks []*Subnetwork `json:"subnetworks,omitempty"` - // Warning: Informational warning which replaces the list of addresses - // when the list is empty. - Warning *TargetPoolsScopedListWarning `json:"warning,omitempty"` + // Warning: An informational warning that appears when the list of + // addresses is empty. + Warning *SubnetworksScopedListWarning `json:"warning,omitempty"` - // ForceSendFields is a list of field names (e.g. "TargetPools") to + // ForceSendFields is a list of field names (e.g. "Subnetworks") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -30532,7 +31967,7 @@ type TargetPoolsScopedList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "TargetPools") to include + // NullFields is a list of field names (e.g. "Subnetworks") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as @@ -30541,15 +31976,15 @@ type TargetPoolsScopedList struct { NullFields []string `json:"-"` } -func (s *TargetPoolsScopedList) MarshalJSON() ([]byte, error) { - type NoMethod TargetPoolsScopedList +func (s *SubnetworksScopedList) MarshalJSON() ([]byte, error) { + type NoMethod SubnetworksScopedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetPoolsScopedListWarning: Informational warning which replaces -// the list of addresses when the list is empty. -type TargetPoolsScopedListWarning struct { +// SubnetworksScopedListWarning: An informational warning that appears +// when the list of addresses is empty. +type SubnetworksScopedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -30583,7 +32018,7 @@ type TargetPoolsScopedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*TargetPoolsScopedListWarningData `json:"data,omitempty"` + Data []*SubnetworksScopedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -30606,13 +32041,13 @@ type TargetPoolsScopedListWarning struct { NullFields []string `json:"-"` } -func (s *TargetPoolsScopedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod TargetPoolsScopedListWarning +func (s *SubnetworksScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod SubnetworksScopedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetPoolsScopedListWarningData struct { +type SubnetworksScopedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -30643,76 +32078,97 @@ type TargetPoolsScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetPoolsScopedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod TargetPoolsScopedListWarningData +func (s *SubnetworksScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod SubnetworksScopedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetReference struct { - Target string `json:"target,omitempty"` +type SubnetworksSetPrivateIpGoogleAccessRequest struct { + PrivateIpGoogleAccess bool `json:"privateIpGoogleAccess,omitempty"` - // ForceSendFields is a list of field names (e.g. "Target") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. + // "PrivateIpGoogleAccess") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Target") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "PrivateIpGoogleAccess") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *TargetReference) MarshalJSON() ([]byte, error) { - type NoMethod TargetReference +func (s *SubnetworksSetPrivateIpGoogleAccessRequest) MarshalJSON() ([]byte, error) { + type NoMethod SubnetworksSetPrivateIpGoogleAccessRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetSslProxiesSetBackendServiceRequest struct { - // Service: The URL of the new BackendService resource for the - // targetSslProxy. - Service string `json:"service,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Service") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` +type TCPHealthCheck struct { + // Port: The TCP port number for the health check request. The default + // value is 80. Valid values are 1 through 65535. + Port int64 `json:"port,omitempty"` - // NullFields is a list of field names (e.g. "Service") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} + // PortName: Port name as defined in InstanceGroup#NamedPort#name. If + // both port and port_name are defined, port takes precedence. + PortName string `json:"portName,omitempty"` -func (s *TargetSslProxiesSetBackendServiceRequest) MarshalJSON() ([]byte, error) { - type NoMethod TargetSslProxiesSetBackendServiceRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // PortSpecification: Specifies how port is selected for health + // checking, can be one of following values: + // USE_FIXED_PORT: The port number in + // port + // is used for health checking. + // USE_NAMED_PORT: The + // portName + // is used for health checking. + // USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for + // each network endpoint is used for health checking. For other + // backends, the port or named port specified in the Backend Service is + // used for health checking. + // + // + // If not specified, TCP health check follows behavior specified + // in + // port + // and + // portName + // fields. + // + // Possible values: + // "USE_FIXED_PORT" + // "USE_NAMED_PORT" + // "USE_SERVING_PORT" + PortSpecification string `json:"portSpecification,omitempty"` -type TargetSslProxiesSetProxyHeaderRequest struct { - // ProxyHeader: The new type of proxy header to append before sending - // data to the backend. NONE or PROXY_V1 are allowed. + // ProxyHeader: Specifies the type of proxy header to append before + // sending data to the backend, either NONE or PROXY_V1. The default is + // NONE. // // Possible values: // "NONE" // "PROXY_V1" ProxyHeader string `json:"proxyHeader,omitempty"` - // ForceSendFields is a list of field names (e.g. "ProxyHeader") to + // Request: The application data to send once the TCP connection has + // been established (default value is empty). If both request and + // response are empty, the connection establishment alone will indicate + // health. The request data can only be ASCII. + Request string `json:"request,omitempty"` + + // Response: The bytes to match against the beginning of the response + // data. If left empty (the default value), any response will indicate + // health. The response data can only be ASCII. + Response string `json:"response,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Port") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -30720,28 +32176,38 @@ type TargetSslProxiesSetProxyHeaderRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ProxyHeader") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Port") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *TargetSslProxiesSetProxyHeaderRequest) MarshalJSON() ([]byte, error) { - type NoMethod TargetSslProxiesSetProxyHeaderRequest +func (s *TCPHealthCheck) MarshalJSON() ([]byte, error) { + type NoMethod TCPHealthCheck raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetSslProxiesSetSslCertificatesRequest struct { - // SslCertificates: New set of URLs to SslCertificate resources to - // associate with this TargetSslProxy. Currently exactly one ssl - // certificate must be specified. - SslCertificates []string `json:"sslCertificates,omitempty"` +// Tags: A set of instance tags. +type Tags struct { + // Fingerprint: Specifies a fingerprint for this request, which is + // essentially a hash of the tags' contents and used for optimistic + // locking. The fingerprint is initially generated by Compute Engine and + // changes after every request to modify or update tags. You must always + // provide an up-to-date fingerprint hash in order to update or change + // tags. + // + // To see the latest fingerprint, make get() request to the instance. + Fingerprint string `json:"fingerprint,omitempty"` - // ForceSendFields is a list of field names (e.g. "SslCertificates") to + // Items: An array of tags. Each tag must be 1-63 characters long, and + // comply with RFC1035. + Items []string `json:"items,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Fingerprint") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -30749,82 +32215,31 @@ type TargetSslProxiesSetSslCertificatesRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "SslCertificates") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "Fingerprint") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *TargetSslProxiesSetSslCertificatesRequest) MarshalJSON() ([]byte, error) { - type NoMethod TargetSslProxiesSetSslCertificatesRequest +func (s *Tags) MarshalJSON() ([]byte, error) { + type NoMethod Tags raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetSslProxy: A TargetSslProxy resource. This resource defines an -// SSL proxy. (== resource_for beta.targetSslProxies ==) (== -// resource_for v1.targetSslProxies ==) -type TargetSslProxy struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] Type of the resource. Always - // compute#targetSslProxy for target SSL proxies. - Kind string `json:"kind,omitempty"` - - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // ProxyHeader: Specifies the type of proxy header to append before - // sending data to the backend, either NONE or PROXY_V1. The default is - // NONE. - // - // Possible values: - // "NONE" - // "PROXY_V1" - ProxyHeader string `json:"proxyHeader,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // Service: URL to the BackendService resource. - Service string `json:"service,omitempty"` - - // SslCertificates: URLs to SslCertificate resources that are used to - // authenticate connections to Backends. At least one SSL certificate - // must be specified. Currently, you may specify up to 15 SSL - // certificates. - SslCertificates []string `json:"sslCertificates,omitempty"` - - // SslPolicy: URL of SslPolicy resource that will be associated with the - // TargetSslProxy resource. If not set, the TargetSslProxy resource will - // not have any SSL policy configured. - SslPolicy string `json:"sslPolicy,omitempty"` +type TargetHttpProxiesScopedList struct { + // TargetHttpProxies: A list of TargetHttpProxies contained in this + // scope. + TargetHttpProxies []*TargetHttpProxy `json:"targetHttpProxies,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Warning: Informational warning which replaces the list of backend + // services when the list is empty. + Warning *TargetHttpProxiesScopedListWarning `json:"warning,omitempty"` - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // ForceSendFields is a list of field names (e.g. "TargetHttpProxies") // to unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -30832,7 +32247,7 @@ type TargetSslProxy struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CreationTimestamp") to + // NullFields is a list of field names (e.g. "TargetHttpProxies") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -30842,68 +32257,15 @@ type TargetSslProxy struct { NullFields []string `json:"-"` } -func (s *TargetSslProxy) MarshalJSON() ([]byte, error) { - type NoMethod TargetSslProxy - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// TargetSslProxyList: Contains a list of TargetSslProxy resources. -type TargetSslProxyList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of TargetSslProxy resources. - Items []*TargetSslProxy `json:"items,omitempty"` - - // Kind: Type of resource. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // Warning: [Output Only] Informational warning message. - Warning *TargetSslProxyListWarning `json:"warning,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TargetSslProxyList) MarshalJSON() ([]byte, error) { - type NoMethod TargetSslProxyList +func (s *TargetHttpProxiesScopedList) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpProxiesScopedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetSslProxyListWarning: [Output Only] Informational warning -// message. -type TargetSslProxyListWarning struct { +// TargetHttpProxiesScopedListWarning: Informational warning which +// replaces the list of backend services when the list is empty. +type TargetHttpProxiesScopedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -30937,7 +32299,7 @@ type TargetSslProxyListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*TargetSslProxyListWarningData `json:"data,omitempty"` + Data []*TargetHttpProxiesScopedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -30960,13 +32322,13 @@ type TargetSslProxyListWarning struct { NullFields []string `json:"-"` } -func (s *TargetSslProxyListWarning) MarshalJSON() ([]byte, error) { - type NoMethod TargetSslProxyListWarning +func (s *TargetHttpProxiesScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpProxiesScopedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetSslProxyListWarningData struct { +type TargetHttpProxiesScopedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -30997,76 +32359,16 @@ type TargetSslProxyListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetSslProxyListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod TargetSslProxyListWarningData - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type TargetTcpProxiesSetBackendServiceRequest struct { - // Service: The URL of the new BackendService resource for the - // targetTcpProxy. - Service string `json:"service,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Service") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Service") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TargetTcpProxiesSetBackendServiceRequest) MarshalJSON() ([]byte, error) { - type NoMethod TargetTcpProxiesSetBackendServiceRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type TargetTcpProxiesSetProxyHeaderRequest struct { - // ProxyHeader: The new type of proxy header to append before sending - // data to the backend. NONE or PROXY_V1 are allowed. - // - // Possible values: - // "NONE" - // "PROXY_V1" - ProxyHeader string `json:"proxyHeader,omitempty"` - - // ForceSendFields is a list of field names (e.g. "ProxyHeader") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "ProxyHeader") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TargetTcpProxiesSetProxyHeaderRequest) MarshalJSON() ([]byte, error) { - type NoMethod TargetTcpProxiesSetProxyHeaderRequest +func (s *TargetHttpProxiesScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpProxiesScopedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetTcpProxy: A TargetTcpProxy resource. This resource defines a -// TCP proxy. (== resource_for beta.targetTcpProxies ==) (== -// resource_for v1.targetTcpProxies ==) -type TargetTcpProxy struct { +// TargetHttpProxy: A TargetHttpProxy resource. This resource defines an +// HTTP proxy. (== resource_for beta.targetHttpProxies ==) (== +// resource_for v1.targetHttpProxies ==) +type TargetHttpProxy struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -31079,8 +32381,8 @@ type TargetTcpProxy struct { // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` - // Kind: [Output Only] Type of the resource. Always - // compute#targetTcpProxy for target TCP proxies. + // Kind: [Output Only] Type of resource. Always compute#targetHttpProxy + // for target HTTP proxies. Kind string `json:"kind,omitempty"` // Name: Name of the resource. Provided by the client when the resource @@ -31092,20 +32394,17 @@ type TargetTcpProxy struct { // last character, which cannot be a dash. Name string `json:"name,omitempty"` - // ProxyHeader: Specifies the type of proxy header to append before - // sending data to the backend, either NONE or PROXY_V1. The default is - // NONE. - // - // Possible values: - // "NONE" - // "PROXY_V1" - ProxyHeader string `json:"proxyHeader,omitempty"` + // Region: [Output Only] URL of the region where the regional Target + // HTTP Proxy resides. This field is not applicable to global Target + // HTTP Proxies. + Region string `json:"region,omitempty"` // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // Service: URL to the BackendService resource. - Service string `json:"service,omitempty"` + // UrlMap: URL to the UrlMap resource that defines the mapping from URL + // to the BackendService. + UrlMap string `json:"urlMap,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -31129,22 +32428,23 @@ type TargetTcpProxy struct { NullFields []string `json:"-"` } -func (s *TargetTcpProxy) MarshalJSON() ([]byte, error) { - type NoMethod TargetTcpProxy +func (s *TargetHttpProxy) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpProxy raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetTcpProxyList: Contains a list of TargetTcpProxy resources. -type TargetTcpProxyList struct { +type TargetHttpProxyAggregatedList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of TargetTcpProxy resources. - Items []*TargetTcpProxy `json:"items,omitempty"` + // Items: A list of TargetHttpProxiesScopedList resources. + Items map[string]TargetHttpProxiesScopedList `json:"items,omitempty"` - // Kind: Type of resource. + // Kind: [Output Only] Type of resource. Always + // compute#targetHttpProxyAggregatedList for lists of Target HTTP + // Proxies. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -31159,7 +32459,7 @@ type TargetTcpProxyList struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *TargetTcpProxyListWarning `json:"warning,omitempty"` + Warning *TargetHttpProxyAggregatedListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -31182,15 +32482,15 @@ type TargetTcpProxyList struct { NullFields []string `json:"-"` } -func (s *TargetTcpProxyList) MarshalJSON() ([]byte, error) { - type NoMethod TargetTcpProxyList +func (s *TargetHttpProxyAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpProxyAggregatedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetTcpProxyListWarning: [Output Only] Informational warning -// message. -type TargetTcpProxyListWarning struct { +// TargetHttpProxyAggregatedListWarning: [Output Only] Informational +// warning message. +type TargetHttpProxyAggregatedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -31224,7 +32524,7 @@ type TargetTcpProxyListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*TargetTcpProxyListWarningData `json:"data,omitempty"` + Data []*TargetHttpProxyAggregatedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -31247,13 +32547,13 @@ type TargetTcpProxyListWarning struct { NullFields []string `json:"-"` } -func (s *TargetTcpProxyListWarning) MarshalJSON() ([]byte, error) { - type NoMethod TargetTcpProxyListWarning +func (s *TargetHttpProxyAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpProxyAggregatedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetTcpProxyListWarningData struct { +type TargetHttpProxyAggregatedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -31284,283 +32584,23 @@ type TargetTcpProxyListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetTcpProxyListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod TargetTcpProxyListWarningData +func (s *TargetHttpProxyAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpProxyAggregatedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetVpnGateway: Represents a Target VPN gateway resource. (== -// resource_for beta.targetVpnGateways ==) (== resource_for -// v1.targetVpnGateways ==) -type TargetVpnGateway struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // ForwardingRules: [Output Only] A list of URLs to the ForwardingRule - // resources. ForwardingRules are created using - // compute.forwardingRules.insert and associated to a VPN gateway. - ForwardingRules []string `json:"forwardingRules,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] Type of resource. Always compute#targetVpnGateway - // for target VPN gateways. - Kind string `json:"kind,omitempty"` - - // LabelFingerprint: A fingerprint for the labels being applied to this - // TargetVpnGateway, which is essentially a hash of the labels set used - // for optimistic locking. The fingerprint is initially generated by - // Compute Engine and changes after every request to modify or update - // labels. You must always provide an up-to-date fingerprint hash in - // order to update or change labels, otherwise the request will fail - // with error 412 conditionNotMet. - // - // To see the latest fingerprint, make a get() request to retrieve a - // TargetVpnGateway. - LabelFingerprint string `json:"labelFingerprint,omitempty"` - - // Labels: Labels to apply to this TargetVpnGateway resource. These can - // be later modified by the setLabels method. Each label key/value must - // comply with RFC1035. Label values may be empty. - Labels map[string]string `json:"labels,omitempty"` - - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // Network: URL of the network to which this VPN gateway is attached. - // Provided by the client when the VPN gateway is created. - Network string `json:"network,omitempty"` - - // Region: [Output Only] URL of the region where the target VPN gateway - // resides. You must specify this field as part of the HTTP request URL. - // It is not settable as a field in the request body. - Region string `json:"region,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // Status: [Output Only] The status of the VPN gateway. - // - // Possible values: - // "CREATING" - // "DELETING" - // "FAILED" - // "READY" - Status string `json:"status,omitempty"` - - // Tunnels: [Output Only] A list of URLs to VpnTunnel resources. - // VpnTunnels are created using compute.vpntunnels.insert method and - // associated to a VPN gateway. - Tunnels []string `json:"tunnels,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *TargetVpnGateway) MarshalJSON() ([]byte, error) { - type NoMethod TargetVpnGateway - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type TargetVpnGatewayAggregatedList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of TargetVpnGateway resources. - Items map[string]TargetVpnGatewaysScopedList `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always compute#targetVpnGateway - // for target VPN gateways. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // Warning: [Output Only] Informational warning message. - Warning *TargetVpnGatewayAggregatedListWarning `json:"warning,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TargetVpnGatewayAggregatedList) MarshalJSON() ([]byte, error) { - type NoMethod TargetVpnGatewayAggregatedList - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// TargetVpnGatewayAggregatedListWarning: [Output Only] Informational -// warning message. -type TargetVpnGatewayAggregatedListWarning struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. - // - // Possible values: - // "CLEANUP_FAILED" - // "DEPRECATED_RESOURCE_USED" - // "DEPRECATED_TYPE_USED" - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - // "EXPERIMENTAL_TYPE_USED" - // "EXTERNAL_API_WARNING" - // "FIELD_VALUE_OVERRIDEN" - // "INJECTED_KERNELS_DEPRECATED" - // "MISSING_TYPE_DEPENDENCY" - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - // "NEXT_HOP_CANNOT_IP_FORWARD" - // "NEXT_HOP_INSTANCE_NOT_FOUND" - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - // "NEXT_HOP_NOT_RUNNING" - // "NOT_CRITICAL_ERROR" - // "NO_RESULTS_ON_PAGE" - // "REQUIRED_TOS_AGREEMENT" - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - // "RESOURCE_NOT_DELETED" - // "SCHEMA_VALIDATION_IGNORED" - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - // "UNDECLARED_PROPERTIES" - // "UNREACHABLE" - Code string `json:"code,omitempty"` - - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: - // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*TargetVpnGatewayAggregatedListWarningData `json:"data,omitempty"` - - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Code") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TargetVpnGatewayAggregatedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod TargetVpnGatewayAggregatedListWarning - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type TargetVpnGatewayAggregatedListWarningData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` - - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Key") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Key") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TargetVpnGatewayAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod TargetVpnGatewayAggregatedListWarningData - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// TargetVpnGatewayList: Contains a list of TargetVpnGateway resources. -type TargetVpnGatewayList struct { +// TargetHttpProxyList: A list of TargetHttpProxy resources. +type TargetHttpProxyList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of TargetVpnGateway resources. - Items []*TargetVpnGateway `json:"items,omitempty"` + // Items: A list of TargetHttpProxy resources. + Items []*TargetHttpProxy `json:"items,omitempty"` - // Kind: [Output Only] Type of resource. Always compute#targetVpnGateway - // for target VPN gateways. + // Kind: Type of resource. Always compute#targetHttpProxyList for lists + // of target HTTP proxies. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -31575,7 +32615,7 @@ type TargetVpnGatewayList struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *TargetVpnGatewayListWarning `json:"warning,omitempty"` + Warning *TargetHttpProxyListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -31598,15 +32638,15 @@ type TargetVpnGatewayList struct { NullFields []string `json:"-"` } -func (s *TargetVpnGatewayList) MarshalJSON() ([]byte, error) { - type NoMethod TargetVpnGatewayList +func (s *TargetHttpProxyList) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpProxyList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetVpnGatewayListWarning: [Output Only] Informational warning +// TargetHttpProxyListWarning: [Output Only] Informational warning // message. -type TargetVpnGatewayListWarning struct { +type TargetHttpProxyListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -31640,7 +32680,7 @@ type TargetVpnGatewayListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*TargetVpnGatewayListWarningData `json:"data,omitempty"` + Data []*TargetHttpProxyListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -31663,13 +32703,13 @@ type TargetVpnGatewayListWarning struct { NullFields []string `json:"-"` } -func (s *TargetVpnGatewayListWarning) MarshalJSON() ([]byte, error) { - type NoMethod TargetVpnGatewayListWarning +func (s *TargetHttpProxyListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpProxyListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetVpnGatewayListWarningData struct { +type TargetHttpProxyListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -31700,22 +32740,22 @@ type TargetVpnGatewayListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetVpnGatewayListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod TargetVpnGatewayListWarningData +func (s *TargetHttpProxyListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpProxyListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetVpnGatewaysScopedList struct { - // TargetVpnGateways: [Output Only] A list of target vpn gateways - // contained in this scope. - TargetVpnGateways []*TargetVpnGateway `json:"targetVpnGateways,omitempty"` +type TargetHttpsProxiesScopedList struct { + // TargetHttpsProxies: A list of TargetHttpsProxies contained in this + // scope. + TargetHttpsProxies []*TargetHttpsProxy `json:"targetHttpsProxies,omitempty"` - // Warning: [Output Only] Informational warning which replaces the list - // of addresses when the list is empty. - Warning *TargetVpnGatewaysScopedListWarning `json:"warning,omitempty"` + // Warning: Informational warning which replaces the list of backend + // services when the list is empty. + Warning *TargetHttpsProxiesScopedListWarning `json:"warning,omitempty"` - // ForceSendFields is a list of field names (e.g. "TargetVpnGateways") + // ForceSendFields is a list of field names (e.g. "TargetHttpsProxies") // to unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -31723,7 +32763,7 @@ type TargetVpnGatewaysScopedList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "TargetVpnGateways") to + // NullFields is a list of field names (e.g. "TargetHttpsProxies") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -31733,15 +32773,15 @@ type TargetVpnGatewaysScopedList struct { NullFields []string `json:"-"` } -func (s *TargetVpnGatewaysScopedList) MarshalJSON() ([]byte, error) { - type NoMethod TargetVpnGatewaysScopedList +func (s *TargetHttpsProxiesScopedList) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpsProxiesScopedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetVpnGatewaysScopedListWarning: [Output Only] Informational -// warning which replaces the list of addresses when the list is empty. -type TargetVpnGatewaysScopedListWarning struct { +// TargetHttpsProxiesScopedListWarning: Informational warning which +// replaces the list of backend services when the list is empty. +type TargetHttpsProxiesScopedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -31775,7 +32815,7 @@ type TargetVpnGatewaysScopedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*TargetVpnGatewaysScopedListWarningData `json:"data,omitempty"` + Data []*TargetHttpsProxiesScopedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -31798,13 +32838,13 @@ type TargetVpnGatewaysScopedListWarning struct { NullFields []string `json:"-"` } -func (s *TargetVpnGatewaysScopedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod TargetVpnGatewaysScopedListWarning +func (s *TargetHttpsProxiesScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpsProxiesScopedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetVpnGatewaysScopedListWarningData struct { +type TargetHttpsProxiesScopedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -31835,51 +32875,22 @@ type TargetVpnGatewaysScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetVpnGatewaysScopedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod TargetVpnGatewaysScopedListWarningData - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type TestFailure struct { - ActualService string `json:"actualService,omitempty"` - - ExpectedService string `json:"expectedService,omitempty"` - - Host string `json:"host,omitempty"` - - Path string `json:"path,omitempty"` - - // ForceSendFields is a list of field names (e.g. "ActualService") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "ActualService") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TestFailure) MarshalJSON() ([]byte, error) { - type NoMethod TestFailure +func (s *TargetHttpsProxiesScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpsProxiesScopedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TestPermissionsRequest struct { - // Permissions: The set of permissions to check for the 'resource'. - // Permissions with wildcards (such as '*' or 'storage.*') are not - // allowed. - Permissions []string `json:"permissions,omitempty"` +type TargetHttpsProxiesSetQuicOverrideRequest struct { + // QuicOverride: QUIC policy for the TargetHttpsProxy resource. + // + // Possible values: + // "DISABLE" + // "ENABLE" + // "NONE" + QuicOverride string `json:"quicOverride,omitempty"` - // ForceSendFields is a list of field names (e.g. "Permissions") to + // ForceSendFields is a list of field names (e.g. "QuicOverride") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -31887,7 +32898,7 @@ type TestPermissionsRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Permissions") to include + // NullFields is a list of field names (e.g. "QuicOverride") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as @@ -31896,22 +32907,19 @@ type TestPermissionsRequest struct { NullFields []string `json:"-"` } -func (s *TestPermissionsRequest) MarshalJSON() ([]byte, error) { - type NoMethod TestPermissionsRequest +func (s *TargetHttpsProxiesSetQuicOverrideRequest) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpsProxiesSetQuicOverrideRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TestPermissionsResponse struct { - // Permissions: A subset of `TestPermissionsRequest.permissions` that - // the caller is allowed. - Permissions []string `json:"permissions,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` +type TargetHttpsProxiesSetSslCertificatesRequest struct { + // SslCertificates: New set of SslCertificate resources to associate + // with this TargetHttpsProxy resource. Currently exactly one + // SslCertificate resource must be specified. + SslCertificates []string `json:"sslCertificates,omitempty"` - // ForceSendFields is a list of field names (e.g. "Permissions") to + // ForceSendFields is a list of field names (e.g. "SslCertificates") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -31919,61 +32927,40 @@ type TestPermissionsResponse struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Permissions") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "SslCertificates") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *TestPermissionsResponse) MarshalJSON() ([]byte, error) { - type NoMethod TestPermissionsResponse +func (s *TargetHttpsProxiesSetSslCertificatesRequest) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpsProxiesSetSslCertificatesRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// UrlMap: A UrlMap resource. This resource defines the mapping from URL -// to the BackendService resource, based on the "longest-match" of the -// URL's host and path. -type UrlMap struct { +// TargetHttpsProxy: A TargetHttpsProxy resource. This resource defines +// an HTTPS proxy. (== resource_for beta.targetHttpsProxies ==) (== +// resource_for v1.targetHttpsProxies ==) +type TargetHttpsProxy struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` - // DefaultService: The URL of the backendService resource if none of the - // hostRules match. - // Use defaultService instead of defaultRouteAction when simple routing - // to a backendService is desired and other advanced capabilities like - // traffic splitting and rewrites are not required. - // Only one of defaultService, defaultRouteAction or defaultUrlRedirect - // should must be set. - DefaultService string `json:"defaultService,omitempty"` - // Description: An optional description of this resource. Provide this // property when you create the resource. Description string `json:"description,omitempty"` - // Fingerprint: Fingerprint of this resource. A hash of the contents - // stored in this object. This field is used in optimistic locking. This - // field will be ignored when inserting a UrlMap. An up-to-date - // fingerprint must be provided in order to update the UrlMap, otherwise - // the request will fail with error 412 conditionNotMet. - // - // To see the latest fingerprint, make a get() request to retrieve a - // UrlMap. - Fingerprint string `json:"fingerprint,omitempty"` - - // HostRules: The list of HostRules to use against the URL. - HostRules []*HostRule `json:"hostRules,omitempty"` - // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` - // Kind: [Output Only] Type of the resource. Always compute#urlMaps for - // url maps. + // Kind: [Output Only] Type of resource. Always compute#targetHttpsProxy + // for target HTTPS proxies. Kind string `json:"kind,omitempty"` // Name: Name of the resource. Provided by the client when the resource @@ -31985,16 +32972,48 @@ type UrlMap struct { // last character, which cannot be a dash. Name string `json:"name,omitempty"` - // PathMatchers: The list of named PathMatchers to use against the URL. - PathMatchers []*PathMatcher `json:"pathMatchers,omitempty"` + // QuicOverride: Specifies the QUIC override policy for this + // TargetHttpsProxy resource. This determines whether the load balancer + // will attempt to negotiate QUIC with clients or not. Can specify one + // of NONE, ENABLE, or DISABLE. Specify ENABLE to always enable QUIC, + // Enables QUIC when set to ENABLE, and disables QUIC when set to + // DISABLE. If NONE is specified, uses the QUIC policy with no user + // overrides, which is equivalent to DISABLE. Not specifying this field + // is equivalent to specifying NONE. + // + // Possible values: + // "DISABLE" + // "ENABLE" + // "NONE" + QuicOverride string `json:"quicOverride,omitempty"` + + // Region: [Output Only] URL of the region where the regional + // TargetHttpsProxy resides. This field is not applicable to global + // TargetHttpsProxies. + Region string `json:"region,omitempty"` // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // Tests: The list of expected URL mapping tests. Request to update this - // UrlMap will succeed only if all of the test cases pass. You can - // specify a maximum of 100 tests per UrlMap. - Tests []*UrlMapTest `json:"tests,omitempty"` + // SslCertificates: URLs to SslCertificate resources that are used to + // authenticate connections between users and the load balancer. At + // least one SSL certificate must be specified. Currently, you may + // specify up to 15 SSL certificates. + SslCertificates []string `json:"sslCertificates,omitempty"` + + // SslPolicy: URL of SslPolicy resource that will be associated with the + // TargetHttpsProxy resource. If not set, the TargetHttpsProxy resource + // will not have any SSL policy configured. + SslPolicy string `json:"sslPolicy,omitempty"` + + // UrlMap: A fully-qualified or valid partial URL to the UrlMap resource + // that defines the mapping from URL to the BackendService. For example, + // the following are all valid URLs for specifying a URL map: + // - + // https://www.googleapis.compute/v1/projects/project/global/urlMaps/url-map + // - projects/project/global/urlMaps/url-map + // - global/urlMaps/url-map + UrlMap string `json:"urlMap,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -32018,22 +33037,23 @@ type UrlMap struct { NullFields []string `json:"-"` } -func (s *UrlMap) MarshalJSON() ([]byte, error) { - type NoMethod UrlMap +func (s *TargetHttpsProxy) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpsProxy raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// UrlMapList: Contains a list of UrlMap resources. -type UrlMapList struct { +type TargetHttpsProxyAggregatedList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of UrlMap resources. - Items []*UrlMap `json:"items,omitempty"` + // Items: A list of TargetHttpsProxiesScopedList resources. + Items map[string]TargetHttpsProxiesScopedList `json:"items,omitempty"` - // Kind: Type of resource. + // Kind: [Output Only] Type of resource. Always + // compute#targetHttpsProxyAggregatedList for lists of Target HTTP + // Proxies. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -32048,7 +33068,7 @@ type UrlMapList struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *UrlMapListWarning `json:"warning,omitempty"` + Warning *TargetHttpsProxyAggregatedListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -32071,14 +33091,15 @@ type UrlMapList struct { NullFields []string `json:"-"` } -func (s *UrlMapList) MarshalJSON() ([]byte, error) { - type NoMethod UrlMapList +func (s *TargetHttpsProxyAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpsProxyAggregatedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// UrlMapListWarning: [Output Only] Informational warning message. -type UrlMapListWarning struct { +// TargetHttpsProxyAggregatedListWarning: [Output Only] Informational +// warning message. +type TargetHttpsProxyAggregatedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -32112,7 +33133,7 @@ type UrlMapListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*UrlMapListWarningData `json:"data,omitempty"` + Data []*TargetHttpsProxyAggregatedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -32135,13 +33156,13 @@ type UrlMapListWarning struct { NullFields []string `json:"-"` } -func (s *UrlMapListWarning) MarshalJSON() ([]byte, error) { - type NoMethod UrlMapListWarning +func (s *TargetHttpsProxyAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpsProxyAggregatedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type UrlMapListWarningData struct { +type TargetHttpsProxyAggregatedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -32172,93 +33193,44 @@ type UrlMapListWarningData struct { NullFields []string `json:"-"` } -func (s *UrlMapListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod UrlMapListWarningData +func (s *TargetHttpsProxyAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpsProxyAggregatedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type UrlMapReference struct { - UrlMap string `json:"urlMap,omitempty"` +// TargetHttpsProxyList: Contains a list of TargetHttpsProxy resources. +type TargetHttpsProxyList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` - // ForceSendFields is a list of field names (e.g. "UrlMap") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` + // Items: A list of TargetHttpsProxy resources. + Items []*TargetHttpsProxy `json:"items,omitempty"` - // NullFields is a list of field names (e.g. "UrlMap") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *UrlMapReference) MarshalJSON() ([]byte, error) { - type NoMethod UrlMapReference - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// UrlMapTest: Message for the expected URL mappings. -type UrlMapTest struct { - // Description: Description of this test case. - Description string `json:"description,omitempty"` - - // Host: Host portion of the URL. - Host string `json:"host,omitempty"` - - // Path: Path portion of the URL. - Path string `json:"path,omitempty"` - - // Service: Expected BackendService resource the given URL should be - // mapped to. - Service string `json:"service,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Description") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Description") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *UrlMapTest) MarshalJSON() ([]byte, error) { - type NoMethod UrlMapTest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // Kind: Type of resource. Always compute#targetHttpsProxyList for lists + // of target HTTPS proxies. + Kind string `json:"kind,omitempty"` -// UrlMapValidationResult: Message representing the validation result -// for a UrlMap. -type UrlMapValidationResult struct { - LoadErrors []string `json:"loadErrors,omitempty"` + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` - // LoadSucceeded: Whether the given UrlMap can be successfully loaded. - // If false, 'loadErrors' indicates the reasons. - LoadSucceeded bool `json:"loadSucceeded,omitempty"` + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` - TestFailures []*TestFailure `json:"testFailures,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *TargetHttpsProxyListWarning `json:"warning,omitempty"` - // TestPassed: If successfully loaded, this field indicates whether the - // test passed. If false, 'testFailures's indicate the reason of - // failure. - TestPassed bool `json:"testPassed,omitempty"` + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "LoadErrors") to + // ForceSendFields is a list of field names (e.g. "Id") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -32266,8 +33238,8 @@ type UrlMapValidationResult struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "LoadErrors") to include in - // API requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -32275,17 +33247,55 @@ type UrlMapValidationResult struct { NullFields []string `json:"-"` } -func (s *UrlMapValidationResult) MarshalJSON() ([]byte, error) { - type NoMethod UrlMapValidationResult +func (s *TargetHttpsProxyList) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpsProxyList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type UrlMapsValidateRequest struct { - // Resource: Content of the UrlMap to be validated. - Resource *UrlMap `json:"resource,omitempty"` +// TargetHttpsProxyListWarning: [Output Only] Informational warning +// message. +type TargetHttpsProxyListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` - // ForceSendFields is a list of field names (e.g. "Resource") to + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetHttpsProxyListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -32293,8 +33303,8 @@ type UrlMapsValidateRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Resource") to include in - // API requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -32302,20 +33312,27 @@ type UrlMapsValidateRequest struct { NullFields []string `json:"-"` } -func (s *UrlMapsValidateRequest) MarshalJSON() ([]byte, error) { - type NoMethod UrlMapsValidateRequest +func (s *TargetHttpsProxyListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpsProxyListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type UrlMapsValidateResponse struct { - Result *UrlMapValidationResult `json:"result,omitempty"` +type TargetHttpsProxyListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` - // ForceSendFields is a list of field names (e.g. "Result") to + // ForceSendFields is a list of field names (e.g. "Key") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -32323,7 +33340,7 @@ type UrlMapsValidateResponse struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Result") to include in API + // NullFields is a list of field names (e.g. "Key") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -32332,98 +33349,104 @@ type UrlMapsValidateResponse struct { NullFields []string `json:"-"` } -func (s *UrlMapsValidateResponse) MarshalJSON() ([]byte, error) { - type NoMethod UrlMapsValidateResponse +func (s *TargetHttpsProxyListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpsProxyListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// UsableSubnetwork: Subnetwork which the current user has -// compute.subnetworks.use permission on. -type UsableSubnetwork struct { - // IpCidrRange: The range of internal addresses that are owned by this - // subnetwork. - IpCidrRange string `json:"ipCidrRange,omitempty"` +// TargetInstance: A TargetInstance resource. This resource defines an +// endpoint instance that terminates traffic of certain protocols. (== +// resource_for beta.targetInstances ==) (== resource_for +// v1.targetInstances ==) +type TargetInstance struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` - // Network: Network URL. - Network string `json:"network,omitempty"` + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` - // SecondaryIpRanges: Secondary IP ranges. - SecondaryIpRanges []*UsableSubnetworkSecondaryRange `json:"secondaryIpRanges,omitempty"` + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` - // Subnetwork: Subnetwork URL. - Subnetwork string `json:"subnetwork,omitempty"` + // Instance: A URL to the virtual machine instance that handles traffic + // for this target instance. When creating a target instance, you can + // provide the fully-qualified URL or a valid partial URL to the desired + // virtual machine. For example, the following are all valid URLs: + // - + // https://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/instance + // - projects/project/zones/zone/instances/instance + // - zones/zone/instances/instance + Instance string `json:"instance,omitempty"` - // ForceSendFields is a list of field names (e.g. "IpCidrRange") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` + // Kind: [Output Only] The type of the resource. Always + // compute#targetInstance for target instances. + Kind string `json:"kind,omitempty"` - // NullFields is a list of field names (e.g. "IpCidrRange") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` -func (s *UsableSubnetwork) MarshalJSON() ([]byte, error) { - type NoMethod UsableSubnetwork - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // NatPolicy: NAT option controlling how IPs are NAT'ed to the instance. + // Currently only NO_NAT (default value) is supported. + // + // Possible values: + // "NO_NAT" + NatPolicy string `json:"natPolicy,omitempty"` -// UsableSubnetworkSecondaryRange: Secondary IP range of a usable -// subnetwork. -type UsableSubnetworkSecondaryRange struct { - // IpCidrRange: The range of IP addresses belonging to this subnetwork - // secondary range. - IpCidrRange string `json:"ipCidrRange,omitempty"` + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` - // RangeName: The name associated with this subnetwork secondary range, - // used when adding an alias IP range to a VM instance. The name must be - // 1-63 characters long, and comply with RFC1035. The name must be - // unique within the subnetwork. - RangeName string `json:"rangeName,omitempty"` + // Zone: [Output Only] URL of the zone where the target instance + // resides. You must specify this field as part of the HTTP request URL. + // It is not settable as a field in the request body. + Zone string `json:"zone,omitempty"` - // ForceSendFields is a list of field names (e.g. "IpCidrRange") to - // unconditionally include in API requests. By default, fields with + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "IpCidrRange") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *UsableSubnetworkSecondaryRange) MarshalJSON() ([]byte, error) { - type NoMethod UsableSubnetworkSecondaryRange +func (s *TargetInstance) MarshalJSON() ([]byte, error) { + type NoMethod TargetInstance raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type UsableSubnetworksAggregatedList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. +type TargetInstanceAggregatedList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. Id string `json:"id,omitempty"` - // Items: [Output] A list of usable subnetwork URLs. - Items []*UsableSubnetwork `json:"items,omitempty"` + // Items: A list of TargetInstance resources. + Items map[string]TargetInstancesScopedList `json:"items,omitempty"` - // Kind: [Output Only] Type of resource. Always - // compute#usableSubnetworksAggregatedList for aggregated lists of - // usable subnetworks. + // Kind: Type of resource. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -32438,7 +33461,7 @@ type UsableSubnetworksAggregatedList struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *UsableSubnetworksAggregatedListWarning `json:"warning,omitempty"` + Warning *TargetInstanceAggregatedListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -32461,15 +33484,15 @@ type UsableSubnetworksAggregatedList struct { NullFields []string `json:"-"` } -func (s *UsableSubnetworksAggregatedList) MarshalJSON() ([]byte, error) { - type NoMethod UsableSubnetworksAggregatedList +func (s *TargetInstanceAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod TargetInstanceAggregatedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// UsableSubnetworksAggregatedListWarning: [Output Only] Informational +// TargetInstanceAggregatedListWarning: [Output Only] Informational // warning message. -type UsableSubnetworksAggregatedListWarning struct { +type TargetInstanceAggregatedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -32503,7 +33526,7 @@ type UsableSubnetworksAggregatedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*UsableSubnetworksAggregatedListWarningData `json:"data,omitempty"` + Data []*TargetInstanceAggregatedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -32526,13 +33549,13 @@ type UsableSubnetworksAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *UsableSubnetworksAggregatedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod UsableSubnetworksAggregatedListWarning +func (s *TargetInstanceAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetInstanceAggregatedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type UsableSubnetworksAggregatedListWarningData struct { +type TargetInstanceAggregatedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -32563,141 +33586,22 @@ type UsableSubnetworksAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *UsableSubnetworksAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod UsableSubnetworksAggregatedListWarningData - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// UsageExportLocation: The location in Cloud Storage and naming method -// of the daily usage report. Contains bucket_name and report_name -// prefix. -type UsageExportLocation struct { - // BucketName: The name of an existing bucket in Cloud Storage where the - // usage report object is stored. The Google Service Account is granted - // write access to this bucket. This can either be the bucket name by - // itself, such as example-bucket, or the bucket name with gs:// or - // https://storage.googleapis.com/ in front of it, such as - // gs://example-bucket. - BucketName string `json:"bucketName,omitempty"` - - // ReportNamePrefix: An optional prefix for the name of the usage report - // object stored in bucketName. If not supplied, defaults to usage. The - // report is stored as a CSV file named - // report_name_prefix_gce_YYYYMMDD.csv where YYYYMMDD is the day of the - // usage according to Pacific Time. If you supply a prefix, it should - // conform to Cloud Storage object naming conventions. - ReportNamePrefix string `json:"reportNamePrefix,omitempty"` - - // ForceSendFields is a list of field names (e.g. "BucketName") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "BucketName") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *UsageExportLocation) MarshalJSON() ([]byte, error) { - type NoMethod UsageExportLocation - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// VmEndpointNatMappings: Contain information of Nat mapping for a VM -// endpoint (i.e., NIC). -type VmEndpointNatMappings struct { - // InstanceName: Name of the VM instance which the endpoint belongs to - InstanceName string `json:"instanceName,omitempty"` - - InterfaceNatMappings []*VmEndpointNatMappingsInterfaceNatMappings `json:"interfaceNatMappings,omitempty"` - - // ForceSendFields is a list of field names (e.g. "InstanceName") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "InstanceName") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *VmEndpointNatMappings) MarshalJSON() ([]byte, error) { - type NoMethod VmEndpointNatMappings - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// VmEndpointNatMappingsInterfaceNatMappings: Contain information of Nat -// mapping for an interface of this endpoint. -type VmEndpointNatMappingsInterfaceNatMappings struct { - // NatIpPortRanges: A list of all IP:port-range mappings assigned to - // this interface. These ranges are inclusive, that is, both the first - // and the last ports can be used for NAT. Example: - // ["2.2.2.2:12345-12355", "1.1.1.1:2234-2234"]. - NatIpPortRanges []string `json:"natIpPortRanges,omitempty"` - - // NumTotalNatPorts: Total number of ports across all NAT IPs allocated - // to this interface. It equals to the aggregated port number in the - // field nat_ip_port_ranges. - NumTotalNatPorts int64 `json:"numTotalNatPorts,omitempty"` - - // SourceAliasIpRange: Alias IP range for this interface endpoint. It - // will be a private (RFC 1918) IP range. Examples: "10.33.4.55/32", or - // "192.168.5.0/24". - SourceAliasIpRange string `json:"sourceAliasIpRange,omitempty"` - - // SourceVirtualIp: Primary IP of the VM for this NIC. - SourceVirtualIp string `json:"sourceVirtualIp,omitempty"` - - // ForceSendFields is a list of field names (e.g. "NatIpPortRanges") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "NatIpPortRanges") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *VmEndpointNatMappingsInterfaceNatMappings) MarshalJSON() ([]byte, error) { - type NoMethod VmEndpointNatMappingsInterfaceNatMappings +func (s *TargetInstanceAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetInstanceAggregatedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// VmEndpointNatMappingsList: Contains a list of VmEndpointNatMappings. -type VmEndpointNatMappingsList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. +// TargetInstanceList: Contains a list of TargetInstance resources. +type TargetInstanceList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. Id string `json:"id,omitempty"` - // Kind: [Output Only] Type of resource. Always - // compute#vmEndpointNatMappingsList for lists of Nat mappings of VM - // endpoints. + // Items: A list of TargetInstance resources. + Items []*TargetInstance `json:"items,omitempty"` + + // Kind: Type of resource. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -32708,15 +33612,11 @@ type VmEndpointNatMappingsList struct { // the results. NextPageToken string `json:"nextPageToken,omitempty"` - // Result: [Output Only] A list of Nat mapping information of VM - // endpoints. - Result []*VmEndpointNatMappings `json:"result,omitempty"` - // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *VmEndpointNatMappingsListWarning `json:"warning,omitempty"` + Warning *TargetInstanceListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -32739,15 +33639,15 @@ type VmEndpointNatMappingsList struct { NullFields []string `json:"-"` } -func (s *VmEndpointNatMappingsList) MarshalJSON() ([]byte, error) { - type NoMethod VmEndpointNatMappingsList +func (s *TargetInstanceList) MarshalJSON() ([]byte, error) { + type NoMethod TargetInstanceList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// VmEndpointNatMappingsListWarning: [Output Only] Informational warning +// TargetInstanceListWarning: [Output Only] Informational warning // message. -type VmEndpointNatMappingsListWarning struct { +type TargetInstanceListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -32781,7 +33681,7 @@ type VmEndpointNatMappingsListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*VmEndpointNatMappingsListWarningData `json:"data,omitempty"` + Data []*TargetInstanceListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -32804,13 +33704,13 @@ type VmEndpointNatMappingsListWarning struct { NullFields []string `json:"-"` } -func (s *VmEndpointNatMappingsListWarning) MarshalJSON() ([]byte, error) { - type NoMethod VmEndpointNatMappingsListWarning +func (s *TargetInstanceListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetInstanceListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type VmEndpointNatMappingsListWarningData struct { +type TargetInstanceListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -32841,205 +33741,47 @@ type VmEndpointNatMappingsListWarningData struct { NullFields []string `json:"-"` } -func (s *VmEndpointNatMappingsListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod VmEndpointNatMappingsListWarningData +func (s *TargetInstanceListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetInstanceListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// VpnTunnel: VPN tunnel resource. (== resource_for beta.vpnTunnels ==) -// (== resource_for v1.vpnTunnels ==) -type VpnTunnel struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` +type TargetInstancesScopedList struct { + // TargetInstances: A list of target instances contained in this scope. + TargetInstances []*TargetInstance `json:"targetInstances,omitempty"` - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` + // Warning: Informational warning which replaces the list of addresses + // when the list is empty. + Warning *TargetInstancesScopedListWarning `json:"warning,omitempty"` - // DetailedStatus: [Output Only] Detailed status message for the VPN - // tunnel. - DetailedStatus string `json:"detailedStatus,omitempty"` + // ForceSendFields is a list of field names (e.g. "TargetInstances") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` + // NullFields is a list of field names (e.g. "TargetInstances") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} - // IkeVersion: IKE protocol version to use when establishing the VPN - // tunnel with peer VPN gateway. Acceptable IKE versions are 1 or 2. - // Default version is 2. - IkeVersion int64 `json:"ikeVersion,omitempty"` - - // Kind: [Output Only] Type of resource. Always compute#vpnTunnel for - // VPN tunnels. - Kind string `json:"kind,omitempty"` - - // LabelFingerprint: A fingerprint for the labels being applied to this - // VpnTunnel, which is essentially a hash of the labels set used for - // optimistic locking. The fingerprint is initially generated by Compute - // Engine and changes after every request to modify or update labels. - // You must always provide an up-to-date fingerprint hash in order to - // update or change labels, otherwise the request will fail with error - // 412 conditionNotMet. - // - // To see the latest fingerprint, make a get() request to retrieve a - // VpnTunnel. - LabelFingerprint string `json:"labelFingerprint,omitempty"` - - // Labels: Labels to apply to this VpnTunnel. These can be later - // modified by the setLabels method. Each label key/value pair must - // comply with RFC1035. Label values may be empty. - Labels map[string]string `json:"labels,omitempty"` - - // LocalTrafficSelector: Local traffic selector to use when establishing - // the VPN tunnel with peer VPN gateway. The value should be a CIDR - // formatted string, for example: 192.168.0.0/16. The ranges should be - // disjoint. Only IPv4 is supported. - LocalTrafficSelector []string `json:"localTrafficSelector,omitempty"` - - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // PeerIp: IP address of the peer VPN gateway. Only IPv4 is supported. - PeerIp string `json:"peerIp,omitempty"` - - // Region: [Output Only] URL of the region where the VPN tunnel resides. - // You must specify this field as part of the HTTP request URL. It is - // not settable as a field in the request body. - Region string `json:"region,omitempty"` - - // RemoteTrafficSelector: Remote traffic selectors to use when - // establishing the VPN tunnel with peer VPN gateway. The value should - // be a CIDR formatted string, for example: 192.168.0.0/16. The ranges - // should be disjoint. Only IPv4 is supported. - RemoteTrafficSelector []string `json:"remoteTrafficSelector,omitempty"` - - // Router: URL of router resource to be used for dynamic routing. - Router string `json:"router,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // SharedSecret: Shared secret used to set the secure session between - // the Cloud VPN gateway and the peer VPN gateway. - SharedSecret string `json:"sharedSecret,omitempty"` - - // SharedSecretHash: Hash of the shared secret. - SharedSecretHash string `json:"sharedSecretHash,omitempty"` - - // Status: [Output Only] The status of the VPN tunnel. - // - // Possible values: - // "ALLOCATING_RESOURCES" - // "AUTHORIZATION_ERROR" - // "DEPROVISIONING" - // "ESTABLISHED" - // "FAILED" - // "FIRST_HANDSHAKE" - // "NEGOTIATION_FAILURE" - // "NETWORK_ERROR" - // "NO_INCOMING_PACKETS" - // "PROVISIONING" - // "REJECTED" - // "WAITING_FOR_FULL_CONFIG" - Status string `json:"status,omitempty"` - - // TargetVpnGateway: URL of the Target VPN gateway with which this VPN - // tunnel is associated. Provided by the client when the VPN tunnel is - // created. - TargetVpnGateway string `json:"targetVpnGateway,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *VpnTunnel) MarshalJSON() ([]byte, error) { - type NoMethod VpnTunnel - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type VpnTunnelAggregatedList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of VpnTunnelsScopedList resources. - Items map[string]VpnTunnelsScopedList `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always compute#vpnTunnel for - // VPN tunnels. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // Warning: [Output Only] Informational warning message. - Warning *VpnTunnelAggregatedListWarning `json:"warning,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *VpnTunnelAggregatedList) MarshalJSON() ([]byte, error) { - type NoMethod VpnTunnelAggregatedList +func (s *TargetInstancesScopedList) MarshalJSON() ([]byte, error) { + type NoMethod TargetInstancesScopedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// VpnTunnelAggregatedListWarning: [Output Only] Informational warning -// message. -type VpnTunnelAggregatedListWarning struct { +// TargetInstancesScopedListWarning: Informational warning which +// replaces the list of addresses when the list is empty. +type TargetInstancesScopedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -33073,7 +33815,7 @@ type VpnTunnelAggregatedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*VpnTunnelAggregatedListWarningData `json:"data,omitempty"` + Data []*TargetInstancesScopedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -33096,13 +33838,13 @@ type VpnTunnelAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *VpnTunnelAggregatedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod VpnTunnelAggregatedListWarning +func (s *TargetInstancesScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetInstancesScopedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type VpnTunnelAggregatedListWarningData struct { +type TargetInstancesScopedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -33133,23 +33875,169 @@ type VpnTunnelAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *VpnTunnelAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod VpnTunnelAggregatedListWarningData +func (s *TargetInstancesScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetInstancesScopedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// VpnTunnelList: Contains a list of VpnTunnel resources. -type VpnTunnelList struct { +// TargetPool: A TargetPool resource. This resource defines a pool of +// instances, an associated HttpHealthCheck resource, and the fallback +// target pool. (== resource_for beta.targetPools ==) (== resource_for +// v1.targetPools ==) +type TargetPool struct { + // BackupPool: This field is applicable only when the containing target + // pool is serving a forwarding rule as the primary pool, and its + // failoverRatio field is properly set to a value between [0, + // 1]. + // + // backupPool and failoverRatio together define the fallback behavior of + // the primary target pool: if the ratio of the healthy instances in the + // primary pool is at or below failoverRatio, traffic arriving at the + // load-balanced IP will be directed to the backup pool. + // + // In case where failoverRatio and backupPool are not set, or all the + // instances in the backup pool are unhealthy, the traffic will be + // directed back to the primary pool in the "force" mode, where traffic + // will be spread to the healthy instances with the best effort, or to + // all instances when no instance is healthy. + BackupPool string `json:"backupPool,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // FailoverRatio: This field is applicable only when the containing + // target pool is serving a forwarding rule as the primary pool (i.e., + // not as a backup pool to some other target pool). The value of the + // field must be in [0, 1]. + // + // If set, backupPool must also be set. They together define the + // fallback behavior of the primary target pool: if the ratio of the + // healthy instances in the primary pool is at or below this number, + // traffic arriving at the load-balanced IP will be directed to the + // backup pool. + // + // In case where failoverRatio is not set or all the instances in the + // backup pool are unhealthy, the traffic will be directed back to the + // primary pool in the "force" mode, where traffic will be spread to the + // healthy instances with the best effort, or to all instances when no + // instance is healthy. + FailoverRatio float64 `json:"failoverRatio,omitempty"` + + // HealthChecks: The URL of the HttpHealthCheck resource. A member + // instance in this pool is considered healthy if and only if the health + // checks pass. An empty list means all member instances will be + // considered healthy at all times. Only HttpHealthChecks are supported. + // Only one health check may be specified. + HealthChecks []string `json:"healthChecks,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Instances: A list of resource URLs to the virtual machine instances + // serving this pool. They must live in zones contained in the same + // region as this pool. + Instances []string `json:"instances,omitempty"` + + // Kind: [Output Only] Type of the resource. Always compute#targetPool + // for target pools. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // Region: [Output Only] URL of the region where the target pool + // resides. + Region string `json:"region,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // SessionAffinity: Session affinity option, must be one of the + // following values: + // NONE: Connections from the same client IP may go to any instance in + // the pool. + // CLIENT_IP: Connections from the same client IP will go to the same + // instance in the pool while that instance remains + // healthy. + // CLIENT_IP_PROTO: Connections from the same client IP with the same IP + // protocol will go to the same instance in the pool while that instance + // remains healthy. + // + // Possible values: + // "CLIENT_IP" + // "CLIENT_IP_PORT_PROTO" + // "CLIENT_IP_PROTO" + // "GENERATED_COOKIE" + // "HEADER_FIELD" + // "HTTP_COOKIE" + // "NONE" + SessionAffinity string `json:"sessionAffinity,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "BackupPool") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BackupPool") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetPool) MarshalJSON() ([]byte, error) { + type NoMethod TargetPool + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +func (s *TargetPool) UnmarshalJSON(data []byte) error { + type NoMethod TargetPool + var s1 struct { + FailoverRatio gensupport.JSONFloat64 `json:"failoverRatio"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.FailoverRatio = float64(s1.FailoverRatio) + return nil +} + +type TargetPoolAggregatedList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of VpnTunnel resources. - Items []*VpnTunnel `json:"items,omitempty"` + // Items: A list of TargetPool resources. + Items map[string]TargetPoolsScopedList `json:"items,omitempty"` - // Kind: [Output Only] Type of resource. Always compute#vpnTunnel for - // VPN tunnels. + // Kind: [Output Only] Type of resource. Always + // compute#targetPoolAggregatedList for aggregated lists of target + // pools. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -33164,7 +34052,7 @@ type VpnTunnelList struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *VpnTunnelListWarning `json:"warning,omitempty"` + Warning *TargetPoolAggregatedListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -33187,14 +34075,15 @@ type VpnTunnelList struct { NullFields []string `json:"-"` } -func (s *VpnTunnelList) MarshalJSON() ([]byte, error) { - type NoMethod VpnTunnelList +func (s *TargetPoolAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod TargetPoolAggregatedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// VpnTunnelListWarning: [Output Only] Informational warning message. -type VpnTunnelListWarning struct { +// TargetPoolAggregatedListWarning: [Output Only] Informational warning +// message. +type TargetPoolAggregatedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -33228,7 +34117,7 @@ type VpnTunnelListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*VpnTunnelListWarningData `json:"data,omitempty"` + Data []*TargetPoolAggregatedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -33251,13 +34140,13 @@ type VpnTunnelListWarning struct { NullFields []string `json:"-"` } -func (s *VpnTunnelListWarning) MarshalJSON() ([]byte, error) { - type NoMethod VpnTunnelListWarning +func (s *TargetPoolAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetPoolAggregatedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type VpnTunnelListWarningData struct { +type TargetPoolAggregatedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -33288,21 +34177,25 @@ type VpnTunnelListWarningData struct { NullFields []string `json:"-"` } -func (s *VpnTunnelListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod VpnTunnelListWarningData +func (s *TargetPoolAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetPoolAggregatedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type VpnTunnelsScopedList struct { - // VpnTunnels: A list of vpn tunnels contained in this scope. - VpnTunnels []*VpnTunnel `json:"vpnTunnels,omitempty"` +type TargetPoolInstanceHealth struct { + HealthStatus []*HealthStatus `json:"healthStatus,omitempty"` - // Warning: Informational warning which replaces the list of addresses - // when the list is empty. - Warning *VpnTunnelsScopedListWarning `json:"warning,omitempty"` + // Kind: [Output Only] Type of resource. Always + // compute#targetPoolInstanceHealth when checking the health of an + // instance. + Kind string `json:"kind,omitempty"` - // ForceSendFields is a list of field names (e.g. "VpnTunnels") to + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "HealthStatus") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -33310,8 +34203,62 @@ type VpnTunnelsScopedList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "VpnTunnels") to include in - // API requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "HealthStatus") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetPoolInstanceHealth) MarshalJSON() ([]byte, error) { + type NoMethod TargetPoolInstanceHealth + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetPoolList: Contains a list of TargetPool resources. +type TargetPoolList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of TargetPool resources. + Items []*TargetPool `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#targetPoolList + // for lists of target pools. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *TargetPoolListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -33319,15 +34266,14 @@ type VpnTunnelsScopedList struct { NullFields []string `json:"-"` } -func (s *VpnTunnelsScopedList) MarshalJSON() ([]byte, error) { - type NoMethod VpnTunnelsScopedList +func (s *TargetPoolList) MarshalJSON() ([]byte, error) { + type NoMethod TargetPoolList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// VpnTunnelsScopedListWarning: Informational warning which replaces the -// list of addresses when the list is empty. -type VpnTunnelsScopedListWarning struct { +// TargetPoolListWarning: [Output Only] Informational warning message. +type TargetPoolListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -33361,7 +34307,7 @@ type VpnTunnelsScopedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*VpnTunnelsScopedListWarningData `json:"data,omitempty"` + Data []*TargetPoolListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -33384,13 +34330,13 @@ type VpnTunnelsScopedListWarning struct { NullFields []string `json:"-"` } -func (s *VpnTunnelsScopedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod VpnTunnelsScopedListWarning +func (s *TargetPoolListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetPoolListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type VpnTunnelsScopedListWarningData struct { +type TargetPoolListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -33421,28 +34367,17 @@ type VpnTunnelsScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *VpnTunnelsScopedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod VpnTunnelsScopedListWarningData +func (s *TargetPoolListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetPoolListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type WafExpressionSet struct { - // Aliases: A list of alternate IDs. The format should be: - E.g. - // XSS-stable Generic suffix like "stable" is particularly useful if a - // policy likes to avail newer set of expressions without having to - // change the policy. A given alias name can't be used for more than one - // entity set. - Aliases []string `json:"aliases,omitempty"` - - // Expressions: List of available expressions. - Expressions []*WafExpressionSetExpression `json:"expressions,omitempty"` - - // Id: Google specified expression set ID. The format should be: - E.g. - // XSS-20170329 - Id string `json:"id,omitempty"` +type TargetPoolsAddHealthCheckRequest struct { + // HealthChecks: The HttpHealthCheck to add to the target pool. + HealthChecks []*HealthCheckReference `json:"healthChecks,omitempty"` - // ForceSendFields is a list of field names (e.g. "Aliases") to + // ForceSendFields is a list of field names (e.g. "HealthChecks") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -33450,31 +34385,32 @@ type WafExpressionSet struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Aliases") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "HealthChecks") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *WafExpressionSet) MarshalJSON() ([]byte, error) { - type NoMethod WafExpressionSet +func (s *TargetPoolsAddHealthCheckRequest) MarshalJSON() ([]byte, error) { + type NoMethod TargetPoolsAddHealthCheckRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type WafExpressionSetExpression struct { - // Id: Expression ID should uniquely identify the origin of the - // expression. E.g. owasp-crs-v020901-id973337 identifies Owasp core - // rule set version 2.9.1 rule id 973337. The ID could be used to - // determine the individual attack definition that has been detected. It - // could also be used to exclude it from the policy in case of false - // positive. - Id string `json:"id,omitempty"` +type TargetPoolsAddInstanceRequest struct { + // Instances: A full or partial URL to an instance to add to this target + // pool. This can be a full or partial URL. For example, the following + // are valid URLs: + // - + // https://www.googleapis.com/compute/v1/projects/project-id/zones/zone/instances/instance-name + // - projects/project-id/zones/zone/instances/instance-name + // - zones/zone/instances/instance-name + Instances []*InstanceReference `json:"instances,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "Instances") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -33482,8 +34418,8 @@ type WafExpressionSetExpression struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Instances") to include in + // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -33491,43 +34427,49 @@ type WafExpressionSetExpression struct { NullFields []string `json:"-"` } -func (s *WafExpressionSetExpression) MarshalJSON() ([]byte, error) { - type NoMethod WafExpressionSetExpression +func (s *TargetPoolsAddInstanceRequest) MarshalJSON() ([]byte, error) { + type NoMethod TargetPoolsAddInstanceRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type XpnHostList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: [Output Only] A list of shared VPC host project URLs. - Items []*Project `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always compute#xpnHostList for - // lists of shared VPC hosts. - Kind string `json:"kind,omitempty"` +type TargetPoolsRemoveHealthCheckRequest struct { + // HealthChecks: Health check URL to be removed. This can be a full or + // valid partial URL. For example, the following are valid URLs: + // - + // https://www.googleapis.com/compute/beta/projects/project/global/httpHealthChecks/health-check + // - projects/project/global/httpHealthChecks/health-check + // - global/httpHealthChecks/health-check + HealthChecks []*HealthCheckReference `json:"healthChecks,omitempty"` - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` + // ForceSendFields is a list of field names (e.g. "HealthChecks") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` + // NullFields is a list of field names (e.g. "HealthChecks") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} - // Warning: [Output Only] Informational warning message. - Warning *XpnHostListWarning `json:"warning,omitempty"` +func (s *TargetPoolsRemoveHealthCheckRequest) MarshalJSON() ([]byte, error) { + type NoMethod TargetPoolsRemoveHealthCheckRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` +type TargetPoolsRemoveInstanceRequest struct { + // Instances: URLs of the instances to be removed from target pool. + Instances []*InstanceReference `json:"instances,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "Instances") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -33535,8 +34477,8 @@ type XpnHostList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Instances") to include in + // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -33544,14 +34486,46 @@ type XpnHostList struct { NullFields []string `json:"-"` } -func (s *XpnHostList) MarshalJSON() ([]byte, error) { - type NoMethod XpnHostList +func (s *TargetPoolsRemoveInstanceRequest) MarshalJSON() ([]byte, error) { + type NoMethod TargetPoolsRemoveInstanceRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// XpnHostListWarning: [Output Only] Informational warning message. -type XpnHostListWarning struct { +type TargetPoolsScopedList struct { + // TargetPools: A list of target pools contained in this scope. + TargetPools []*TargetPool `json:"targetPools,omitempty"` + + // Warning: Informational warning which replaces the list of addresses + // when the list is empty. + Warning *TargetPoolsScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "TargetPools") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "TargetPools") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetPoolsScopedList) MarshalJSON() ([]byte, error) { + type NoMethod TargetPoolsScopedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetPoolsScopedListWarning: Informational warning which replaces +// the list of addresses when the list is empty. +type TargetPoolsScopedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -33585,7 +34559,7 @@ type XpnHostListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*XpnHostListWarningData `json:"data,omitempty"` + Data []*TargetPoolsScopedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -33608,13 +34582,13 @@ type XpnHostListWarning struct { NullFields []string `json:"-"` } -func (s *XpnHostListWarning) MarshalJSON() ([]byte, error) { - type NoMethod XpnHostListWarning +func (s *TargetPoolsScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetPoolsScopedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type XpnHostListWarningData struct { +type TargetPoolsScopedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -33645,27 +34619,16 @@ type XpnHostListWarningData struct { NullFields []string `json:"-"` } -func (s *XpnHostListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod XpnHostListWarningData +func (s *TargetPoolsScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetPoolsScopedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// XpnResourceId: Service resource (a.k.a service project) ID. -type XpnResourceId struct { - // Id: The ID of the service resource. In the case of projects, this - // field supports project id (e.g., my-project-123) and project number - // (e.g. 12345678). - Id string `json:"id,omitempty"` - - // Type: The type of the service resource. - // - // Possible values: - // "PROJECT" - // "XPN_RESOURCE_TYPE_UNSPECIFIED" - Type string `json:"type,omitempty"` +type TargetReference struct { + Target string `json:"target,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "Target") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -33673,7 +34636,7 @@ type XpnResourceId struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API + // NullFields is a list of field names (e.g. "Target") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -33682,69 +34645,170 @@ type XpnResourceId struct { NullFields []string `json:"-"` } -func (s *XpnResourceId) MarshalJSON() ([]byte, error) { - type NoMethod XpnResourceId +func (s *TargetReference) MarshalJSON() ([]byte, error) { + type NoMethod TargetReference raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Zone: A Zone resource. (== resource_for beta.zones ==) (== -// resource_for v1.zones ==) -type Zone struct { - // AvailableCpuPlatforms: [Output Only] Available cpu/platform - // selections for the zone. - AvailableCpuPlatforms []string `json:"availableCpuPlatforms,omitempty"` +type TargetSslProxiesSetBackendServiceRequest struct { + // Service: The URL of the new BackendService resource for the + // targetSslProxy. + Service string `json:"service,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Service") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Service") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetSslProxiesSetBackendServiceRequest) MarshalJSON() ([]byte, error) { + type NoMethod TargetSslProxiesSetBackendServiceRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetSslProxiesSetProxyHeaderRequest struct { + // ProxyHeader: The new type of proxy header to append before sending + // data to the backend. NONE or PROXY_V1 are allowed. + // + // Possible values: + // "NONE" + // "PROXY_V1" + ProxyHeader string `json:"proxyHeader,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ProxyHeader") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ProxyHeader") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetSslProxiesSetProxyHeaderRequest) MarshalJSON() ([]byte, error) { + type NoMethod TargetSslProxiesSetProxyHeaderRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetSslProxiesSetSslCertificatesRequest struct { + // SslCertificates: New set of URLs to SslCertificate resources to + // associate with this TargetSslProxy. Currently exactly one ssl + // certificate must be specified. + SslCertificates []string `json:"sslCertificates,omitempty"` + + // ForceSendFields is a list of field names (e.g. "SslCertificates") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "SslCertificates") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *TargetSslProxiesSetSslCertificatesRequest) MarshalJSON() ([]byte, error) { + type NoMethod TargetSslProxiesSetSslCertificatesRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} +// TargetSslProxy: A TargetSslProxy resource. This resource defines an +// SSL proxy. (== resource_for beta.targetSslProxies ==) (== +// resource_for v1.targetSslProxies ==) +type TargetSslProxy struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` - // Deprecated: [Output Only] The deprecation status associated with this - // zone. - Deprecated *DeprecationStatus `json:"deprecated,omitempty"` - - // Description: [Output Only] Textual description of the resource. + // Description: An optional description of this resource. Provide this + // property when you create the resource. Description string `json:"description,omitempty"` // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` - // Kind: [Output Only] Type of the resource. Always compute#zone for - // zones. + // Kind: [Output Only] Type of the resource. Always + // compute#targetSslProxy for target SSL proxies. Kind string `json:"kind,omitempty"` - // Name: [Output Only] Name of the resource. + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. Name string `json:"name,omitempty"` - // Region: [Output Only] Full URL reference to the region which hosts - // the zone. - Region string `json:"region,omitempty"` + // ProxyHeader: Specifies the type of proxy header to append before + // sending data to the backend, either NONE or PROXY_V1. The default is + // NONE. + // + // Possible values: + // "NONE" + // "PROXY_V1" + ProxyHeader string `json:"proxyHeader,omitempty"` // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // Status: [Output Only] Status of the zone, either UP or DOWN. - // - // Possible values: - // "DOWN" - // "UP" - Status string `json:"status,omitempty"` + // Service: URL to the BackendService resource. + Service string `json:"service,omitempty"` + + // SslCertificates: URLs to SslCertificate resources that are used to + // authenticate connections to Backends. At least one SSL certificate + // must be specified. Currently, you may specify up to 15 SSL + // certificates. + SslCertificates []string `json:"sslCertificates,omitempty"` + + // SslPolicy: URL of SslPolicy resource that will be associated with the + // TargetSslProxy resource. If not set, the TargetSslProxy resource will + // not have any SSL policy configured. + SslPolicy string `json:"sslPolicy,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. - // "AvailableCpuPlatforms") to unconditionally include in API requests. - // By default, fields with empty values are omitted from API requests. - // However, any non-pointer, non-interface field appearing in - // ForceSendFields will be sent to the server regardless of whether the - // field is empty or not. This may be used to include empty fields in - // Patch requests. + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AvailableCpuPlatforms") to + // NullFields is a list of field names (e.g. "CreationTimestamp") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -33754,20 +34818,20 @@ type Zone struct { NullFields []string `json:"-"` } -func (s *Zone) MarshalJSON() ([]byte, error) { - type NoMethod Zone +func (s *TargetSslProxy) MarshalJSON() ([]byte, error) { + type NoMethod TargetSslProxy raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ZoneList: Contains a list of zone resources. -type ZoneList struct { +// TargetSslProxyList: Contains a list of TargetSslProxy resources. +type TargetSslProxyList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of Zone resources. - Items []*Zone `json:"items,omitempty"` + // Items: A list of TargetSslProxy resources. + Items []*TargetSslProxy `json:"items,omitempty"` // Kind: Type of resource. Kind string `json:"kind,omitempty"` @@ -33784,7 +34848,7 @@ type ZoneList struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *ZoneListWarning `json:"warning,omitempty"` + Warning *TargetSslProxyListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -33807,14 +34871,15 @@ type ZoneList struct { NullFields []string `json:"-"` } -func (s *ZoneList) MarshalJSON() ([]byte, error) { - type NoMethod ZoneList +func (s *TargetSslProxyList) MarshalJSON() ([]byte, error) { + type NoMethod TargetSslProxyList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ZoneListWarning: [Output Only] Informational warning message. -type ZoneListWarning struct { +// TargetSslProxyListWarning: [Output Only] Informational warning +// message. +type TargetSslProxyListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -33848,7 +34913,7 @@ type ZoneListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*ZoneListWarningData `json:"data,omitempty"` + Data []*TargetSslProxyListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -33871,13 +34936,13 @@ type ZoneListWarning struct { NullFields []string `json:"-"` } -func (s *ZoneListWarning) MarshalJSON() ([]byte, error) { - type NoMethod ZoneListWarning +func (s *TargetSslProxyListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetSslProxyListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type ZoneListWarningData struct { +type TargetSslProxyListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -33908,25 +34973,50 @@ type ZoneListWarningData struct { NullFields []string `json:"-"` } -func (s *ZoneListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod ZoneListWarningData +func (s *TargetSslProxyListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetSslProxyListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type ZoneSetLabelsRequest struct { - // LabelFingerprint: The fingerprint of the previous set of labels for - // this resource, used to detect conflicts. The fingerprint is initially - // generated by Compute Engine and changes after every request to modify - // or update labels. You must always provide an up-to-date fingerprint - // hash in order to update or change labels. Make a get() request to the - // resource to get the latest fingerprint. - LabelFingerprint string `json:"labelFingerprint,omitempty"` +type TargetTcpProxiesSetBackendServiceRequest struct { + // Service: The URL of the new BackendService resource for the + // targetTcpProxy. + Service string `json:"service,omitempty"` - // Labels: The labels to set for this resource. - Labels map[string]string `json:"labels,omitempty"` + // ForceSendFields is a list of field names (e.g. "Service") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // ForceSendFields is a list of field names (e.g. "LabelFingerprint") to + // NullFields is a list of field names (e.g. "Service") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetTcpProxiesSetBackendServiceRequest) MarshalJSON() ([]byte, error) { + type NoMethod TargetTcpProxiesSetBackendServiceRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetTcpProxiesSetProxyHeaderRequest struct { + // ProxyHeader: The new type of proxy header to append before sending + // data to the backend. NONE or PROXY_V1 are allowed. + // + // Possible values: + // "NONE" + // "PROXY_V1" + ProxyHeader string `json:"proxyHeader,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ProxyHeader") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -33934,7 +35024,78 @@ type ZoneSetLabelsRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "LabelFingerprint") to + // NullFields is a list of field names (e.g. "ProxyHeader") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetTcpProxiesSetProxyHeaderRequest) MarshalJSON() ([]byte, error) { + type NoMethod TargetTcpProxiesSetProxyHeaderRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetTcpProxy: A TargetTcpProxy resource. This resource defines a +// TCP proxy. (== resource_for beta.targetTcpProxies ==) (== +// resource_for v1.targetTcpProxies ==) +type TargetTcpProxy struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always + // compute#targetTcpProxy for target TCP proxies. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // ProxyHeader: Specifies the type of proxy header to append before + // sending data to the backend, either NONE or PROXY_V1. The default is + // NONE. + // + // Possible values: + // "NONE" + // "PROXY_V1" + ProxyHeader string `json:"proxyHeader,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Service: URL to the BackendService resource. + Service string `json:"service,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -33944,28 +35105,43 @@ type ZoneSetLabelsRequest struct { NullFields []string `json:"-"` } -func (s *ZoneSetLabelsRequest) MarshalJSON() ([]byte, error) { - type NoMethod ZoneSetLabelsRequest +func (s *TargetTcpProxy) MarshalJSON() ([]byte, error) { + type NoMethod TargetTcpProxy raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type ZoneSetPolicyRequest struct { - // Bindings: Flatten Policy to create a backwacd compatible wire-format. - // Deprecated. Use 'policy' to specify bindings. - Bindings []*Binding `json:"bindings,omitempty"` +// TargetTcpProxyList: Contains a list of TargetTcpProxy resources. +type TargetTcpProxyList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` - // Etag: Flatten Policy to create a backward compatible wire-format. - // Deprecated. Use 'policy' to specify the etag. - Etag string `json:"etag,omitempty"` + // Items: A list of TargetTcpProxy resources. + Items []*TargetTcpProxy `json:"items,omitempty"` - // Policy: REQUIRED: The complete policy to be applied to the - // 'resource'. The size of the policy is limited to a few 10s of KB. An - // empty policy is in general a valid policy but certain services (like - // Projects) might reject them. - Policy *Policy `json:"policy,omitempty"` + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` - // ForceSendFields is a list of field names (e.g. "Bindings") to + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *TargetTcpProxyListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -33973,8 +35149,8 @@ type ZoneSetPolicyRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Bindings") to include in - // API requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -33982,15 +35158,14919 @@ type ZoneSetPolicyRequest struct { NullFields []string `json:"-"` } -func (s *ZoneSetPolicyRequest) MarshalJSON() ([]byte, error) { - type NoMethod ZoneSetPolicyRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +func (s *TargetTcpProxyList) MarshalJSON() ([]byte, error) { + type NoMethod TargetTcpProxyList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetTcpProxyListWarning: [Output Only] Informational warning +// message. +type TargetTcpProxyListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetTcpProxyListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetTcpProxyListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetTcpProxyListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetTcpProxyListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetTcpProxyListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetTcpProxyListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetVpnGateway: Represents a Target VPN gateway resource. (== +// resource_for beta.targetVpnGateways ==) (== resource_for +// v1.targetVpnGateways ==) +type TargetVpnGateway struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // ForwardingRules: [Output Only] A list of URLs to the ForwardingRule + // resources. ForwardingRules are created using + // compute.forwardingRules.insert and associated with a VPN gateway. + ForwardingRules []string `json:"forwardingRules,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of resource. Always compute#targetVpnGateway + // for target VPN gateways. + Kind string `json:"kind,omitempty"` + + // LabelFingerprint: A fingerprint for the labels being applied to this + // TargetVpnGateway, which is essentially a hash of the labels set used + // for optimistic locking. The fingerprint is initially generated by + // Compute Engine and changes after every request to modify or update + // labels. You must always provide an up-to-date fingerprint hash in + // order to update or change labels, otherwise the request will fail + // with error 412 conditionNotMet. + // + // To see the latest fingerprint, make a get() request to retrieve a + // TargetVpnGateway. + LabelFingerprint string `json:"labelFingerprint,omitempty"` + + // Labels: Labels to apply to this TargetVpnGateway resource. These can + // be later modified by the setLabels method. Each label key/value must + // comply with RFC1035. Label values may be empty. + Labels map[string]string `json:"labels,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // Network: URL of the network to which this VPN gateway is attached. + // Provided by the client when the VPN gateway is created. + Network string `json:"network,omitempty"` + + // Region: [Output Only] URL of the region where the target VPN gateway + // resides. You must specify this field as part of the HTTP request URL. + // It is not settable as a field in the request body. + Region string `json:"region,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Status: [Output Only] The status of the VPN gateway, which can be one + // of the following: CREATING, READY, FAILED, or DELETING. + // + // Possible values: + // "CREATING" + // "DELETING" + // "FAILED" + // "READY" + Status string `json:"status,omitempty"` + + // Tunnels: [Output Only] A list of URLs to VpnTunnel resources. + // VpnTunnels are created using the compute.vpntunnels.insert method and + // associated with a VPN gateway. + Tunnels []string `json:"tunnels,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *TargetVpnGateway) MarshalJSON() ([]byte, error) { + type NoMethod TargetVpnGateway + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetVpnGatewayAggregatedList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of TargetVpnGateway resources. + Items map[string]TargetVpnGatewaysScopedList `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#targetVpnGateway + // for target VPN gateways. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *TargetVpnGatewayAggregatedListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetVpnGatewayAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod TargetVpnGatewayAggregatedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetVpnGatewayAggregatedListWarning: [Output Only] Informational +// warning message. +type TargetVpnGatewayAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetVpnGatewayAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetVpnGatewayAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetVpnGatewayAggregatedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetVpnGatewayAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetVpnGatewayAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetVpnGatewayAggregatedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetVpnGatewayList: Contains a list of TargetVpnGateway resources. +type TargetVpnGatewayList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of TargetVpnGateway resources. + Items []*TargetVpnGateway `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#targetVpnGateway + // for target VPN gateways. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *TargetVpnGatewayListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetVpnGatewayList) MarshalJSON() ([]byte, error) { + type NoMethod TargetVpnGatewayList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetVpnGatewayListWarning: [Output Only] Informational warning +// message. +type TargetVpnGatewayListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetVpnGatewayListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetVpnGatewayListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetVpnGatewayListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetVpnGatewayListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetVpnGatewayListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetVpnGatewayListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetVpnGatewaysScopedList struct { + // TargetVpnGateways: [Output Only] A list of target VPN gateways + // contained in this scope. + TargetVpnGateways []*TargetVpnGateway `json:"targetVpnGateways,omitempty"` + + // Warning: [Output Only] Informational warning which replaces the list + // of addresses when the list is empty. + Warning *TargetVpnGatewaysScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "TargetVpnGateways") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "TargetVpnGateways") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *TargetVpnGatewaysScopedList) MarshalJSON() ([]byte, error) { + type NoMethod TargetVpnGatewaysScopedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetVpnGatewaysScopedListWarning: [Output Only] Informational +// warning which replaces the list of addresses when the list is empty. +type TargetVpnGatewaysScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetVpnGatewaysScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetVpnGatewaysScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetVpnGatewaysScopedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetVpnGatewaysScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetVpnGatewaysScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetVpnGatewaysScopedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TestFailure struct { + ActualService string `json:"actualService,omitempty"` + + ExpectedService string `json:"expectedService,omitempty"` + + Host string `json:"host,omitempty"` + + Path string `json:"path,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ActualService") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ActualService") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TestFailure) MarshalJSON() ([]byte, error) { + type NoMethod TestFailure + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TestPermissionsRequest struct { + // Permissions: The set of permissions to check for the 'resource'. + // Permissions with wildcards (such as '*' or 'storage.*') are not + // allowed. + Permissions []string `json:"permissions,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Permissions") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Permissions") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TestPermissionsRequest) MarshalJSON() ([]byte, error) { + type NoMethod TestPermissionsRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TestPermissionsResponse struct { + // Permissions: A subset of `TestPermissionsRequest.permissions` that + // the caller is allowed. + Permissions []string `json:"permissions,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Permissions") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Permissions") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TestPermissionsResponse) MarshalJSON() ([]byte, error) { + type NoMethod TestPermissionsResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UrlMap: A UrlMap resource. This resource defines the mapping from URL +// to the BackendService resource, based on the "longest-match" of the +// URL's host and path. +type UrlMap struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // DefaultRouteAction: defaultRouteAction takes effect when none of the + // hostRules match. The load balancer performs advanced routing actions + // like URL rewrites, header transformations, etc. prior to forwarding + // the request to the selected backend. If defaultRouteAction specifies + // any weightedBackendServices, defaultService must not be set. + // Conversely if defaultService is set, defaultRouteAction cannot + // contain any weightedBackendServices. + // Only one of defaultRouteAction or defaultUrlRedirect must be set. + DefaultRouteAction *HttpRouteAction `json:"defaultRouteAction,omitempty"` + + // DefaultService: The full or partial URL of the defaultService + // resource to which traffic is directed if none of the hostRules match. + // If defaultRouteAction is additionally specified, advanced routing + // actions like URL Rewrites, etc. take effect prior to sending the + // request to the backend. However, if defaultService is specified, + // defaultRouteAction cannot contain any weightedBackendServices. + // Conversely, if routeAction specifies any weightedBackendServices, + // service must not be specified. + // Only one of defaultService, defaultUrlRedirect or + // defaultRouteAction.weightedBackendService must be set. + DefaultService string `json:"defaultService,omitempty"` + + // DefaultUrlRedirect: When none of the specified hostRules match, the + // request is redirected to a URL specified by defaultUrlRedirect. + // If defaultUrlRedirect is specified, defaultService or + // defaultRouteAction must not be set. + DefaultUrlRedirect *HttpRedirectAction `json:"defaultUrlRedirect,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Fingerprint: Fingerprint of this resource. A hash of the contents + // stored in this object. This field is used in optimistic locking. This + // field will be ignored when inserting a UrlMap. An up-to-date + // fingerprint must be provided in order to update the UrlMap, otherwise + // the request will fail with error 412 conditionNotMet. + // + // To see the latest fingerprint, make a get() request to retrieve a + // UrlMap. + Fingerprint string `json:"fingerprint,omitempty"` + + // HeaderAction: Specifies changes to request and response headers that + // need to take effect for the selected backendService. + // The headerAction specified here take effect after headerAction + // specified under pathMatcher. + HeaderAction *HttpHeaderAction `json:"headerAction,omitempty"` + + // HostRules: The list of HostRules to use against the URL. + HostRules []*HostRule `json:"hostRules,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always compute#urlMaps for + // url maps. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // PathMatchers: The list of named PathMatchers to use against the URL. + PathMatchers []*PathMatcher `json:"pathMatchers,omitempty"` + + // Region: [Output Only] URL of the region where the regional URL map + // resides. This field is not applicable to global URL maps. You must + // specify this field as part of the HTTP request URL. It is not + // settable as a field in the request body. + Region string `json:"region,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Tests: The list of expected URL mapping tests. Request to update this + // UrlMap will succeed only if all of the test cases pass. You can + // specify a maximum of 100 tests per UrlMap. + Tests []*UrlMapTest `json:"tests,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *UrlMap) MarshalJSON() ([]byte, error) { + type NoMethod UrlMap + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UrlMapList: Contains a list of UrlMap resources. +type UrlMapList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of UrlMap resources. + Items []*UrlMap `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *UrlMapListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UrlMapList) MarshalJSON() ([]byte, error) { + type NoMethod UrlMapList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UrlMapListWarning: [Output Only] Informational warning message. +type UrlMapListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*UrlMapListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UrlMapListWarning) MarshalJSON() ([]byte, error) { + type NoMethod UrlMapListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type UrlMapListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UrlMapListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod UrlMapListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type UrlMapReference struct { + UrlMap string `json:"urlMap,omitempty"` + + // ForceSendFields is a list of field names (e.g. "UrlMap") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "UrlMap") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UrlMapReference) MarshalJSON() ([]byte, error) { + type NoMethod UrlMapReference + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UrlMapTest: Message for the expected URL mappings. +type UrlMapTest struct { + // Description: Description of this test case. + Description string `json:"description,omitempty"` + + // Host: Host portion of the URL. + Host string `json:"host,omitempty"` + + // Path: Path portion of the URL. + Path string `json:"path,omitempty"` + + // Service: Expected BackendService resource the given URL should be + // mapped to. + Service string `json:"service,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Description") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Description") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UrlMapTest) MarshalJSON() ([]byte, error) { + type NoMethod UrlMapTest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UrlMapValidationResult: Message representing the validation result +// for a UrlMap. +type UrlMapValidationResult struct { + LoadErrors []string `json:"loadErrors,omitempty"` + + // LoadSucceeded: Whether the given UrlMap can be successfully loaded. + // If false, 'loadErrors' indicates the reasons. + LoadSucceeded bool `json:"loadSucceeded,omitempty"` + + TestFailures []*TestFailure `json:"testFailures,omitempty"` + + // TestPassed: If successfully loaded, this field indicates whether the + // test passed. If false, 'testFailures's indicate the reason of + // failure. + TestPassed bool `json:"testPassed,omitempty"` + + // ForceSendFields is a list of field names (e.g. "LoadErrors") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "LoadErrors") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UrlMapValidationResult) MarshalJSON() ([]byte, error) { + type NoMethod UrlMapValidationResult + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type UrlMapsAggregatedList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of UrlMapsScopedList resources. + Items map[string]UrlMapsScopedList `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *UrlMapsAggregatedListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UrlMapsAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod UrlMapsAggregatedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UrlMapsAggregatedListWarning: [Output Only] Informational warning +// message. +type UrlMapsAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*UrlMapsAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UrlMapsAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod UrlMapsAggregatedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type UrlMapsAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UrlMapsAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod UrlMapsAggregatedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type UrlMapsScopedList struct { + // UrlMaps: A list of UrlMaps contained in this scope. + UrlMaps []*UrlMap `json:"urlMaps,omitempty"` + + // Warning: Informational warning which replaces the list of backend + // services when the list is empty. + Warning *UrlMapsScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "UrlMaps") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "UrlMaps") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UrlMapsScopedList) MarshalJSON() ([]byte, error) { + type NoMethod UrlMapsScopedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UrlMapsScopedListWarning: Informational warning which replaces the +// list of backend services when the list is empty. +type UrlMapsScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*UrlMapsScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UrlMapsScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod UrlMapsScopedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type UrlMapsScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UrlMapsScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod UrlMapsScopedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type UrlMapsValidateRequest struct { + // Resource: Content of the UrlMap to be validated. + Resource *UrlMap `json:"resource,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Resource") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Resource") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UrlMapsValidateRequest) MarshalJSON() ([]byte, error) { + type NoMethod UrlMapsValidateRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type UrlMapsValidateResponse struct { + Result *UrlMapValidationResult `json:"result,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Result") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Result") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UrlMapsValidateResponse) MarshalJSON() ([]byte, error) { + type NoMethod UrlMapsValidateResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UrlRewrite: The spec for modifying the path before sending the +// request to the matched backend service. +type UrlRewrite struct { + // HostRewrite: Prior to forwarding the request to the selected service, + // the request's host header is replaced with contents of + // hostRewrite. + // The value must be between 1 and 255 characters. + HostRewrite string `json:"hostRewrite,omitempty"` + + // PathPrefixRewrite: Prior to forwarding the request to the selected + // backend service, the matching portion of the request's path is + // replaced by pathPrefixRewrite. + // The value must be between 1 and 1024 characters. + PathPrefixRewrite string `json:"pathPrefixRewrite,omitempty"` + + // ForceSendFields is a list of field names (e.g. "HostRewrite") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "HostRewrite") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UrlRewrite) MarshalJSON() ([]byte, error) { + type NoMethod UrlRewrite + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UsableSubnetwork: Subnetwork which the current user has +// compute.subnetworks.use permission on. +type UsableSubnetwork struct { + // IpCidrRange: The range of internal addresses that are owned by this + // subnetwork. + IpCidrRange string `json:"ipCidrRange,omitempty"` + + // Network: Network URL. + Network string `json:"network,omitempty"` + + // SecondaryIpRanges: Secondary IP ranges. + SecondaryIpRanges []*UsableSubnetworkSecondaryRange `json:"secondaryIpRanges,omitempty"` + + // Subnetwork: Subnetwork URL. + Subnetwork string `json:"subnetwork,omitempty"` + + // ForceSendFields is a list of field names (e.g. "IpCidrRange") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "IpCidrRange") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UsableSubnetwork) MarshalJSON() ([]byte, error) { + type NoMethod UsableSubnetwork + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UsableSubnetworkSecondaryRange: Secondary IP range of a usable +// subnetwork. +type UsableSubnetworkSecondaryRange struct { + // IpCidrRange: The range of IP addresses belonging to this subnetwork + // secondary range. + IpCidrRange string `json:"ipCidrRange,omitempty"` + + // RangeName: The name associated with this subnetwork secondary range, + // used when adding an alias IP range to a VM instance. The name must be + // 1-63 characters long, and comply with RFC1035. The name must be + // unique within the subnetwork. + RangeName string `json:"rangeName,omitempty"` + + // ForceSendFields is a list of field names (e.g. "IpCidrRange") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "IpCidrRange") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UsableSubnetworkSecondaryRange) MarshalJSON() ([]byte, error) { + type NoMethod UsableSubnetworkSecondaryRange + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type UsableSubnetworksAggregatedList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: [Output] A list of usable subnetwork URLs. + Items []*UsableSubnetwork `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always + // compute#usableSubnetworksAggregatedList for aggregated lists of + // usable subnetworks. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *UsableSubnetworksAggregatedListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UsableSubnetworksAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod UsableSubnetworksAggregatedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UsableSubnetworksAggregatedListWarning: [Output Only] Informational +// warning message. +type UsableSubnetworksAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*UsableSubnetworksAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UsableSubnetworksAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod UsableSubnetworksAggregatedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type UsableSubnetworksAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UsableSubnetworksAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod UsableSubnetworksAggregatedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UsageExportLocation: The location in Cloud Storage and naming method +// of the daily usage report. Contains bucket_name and report_name +// prefix. +type UsageExportLocation struct { + // BucketName: The name of an existing bucket in Cloud Storage where the + // usage report object is stored. The Google Service Account is granted + // write access to this bucket. This can either be the bucket name by + // itself, such as example-bucket, or the bucket name with gs:// or + // https://storage.googleapis.com/ in front of it, such as + // gs://example-bucket. + BucketName string `json:"bucketName,omitempty"` + + // ReportNamePrefix: An optional prefix for the name of the usage report + // object stored in bucketName. If not supplied, defaults to usage. The + // report is stored as a CSV file named + // report_name_prefix_gce_YYYYMMDD.csv where YYYYMMDD is the day of the + // usage according to Pacific Time. If you supply a prefix, it should + // conform to Cloud Storage object naming conventions. + ReportNamePrefix string `json:"reportNamePrefix,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BucketName") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BucketName") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UsageExportLocation) MarshalJSON() ([]byte, error) { + type NoMethod UsageExportLocation + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VmEndpointNatMappings: Contain information of Nat mapping for a VM +// endpoint (i.e., NIC). +type VmEndpointNatMappings struct { + // InstanceName: Name of the VM instance which the endpoint belongs to + InstanceName string `json:"instanceName,omitempty"` + + InterfaceNatMappings []*VmEndpointNatMappingsInterfaceNatMappings `json:"interfaceNatMappings,omitempty"` + + // ForceSendFields is a list of field names (e.g. "InstanceName") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "InstanceName") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VmEndpointNatMappings) MarshalJSON() ([]byte, error) { + type NoMethod VmEndpointNatMappings + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VmEndpointNatMappingsInterfaceNatMappings: Contain information of Nat +// mapping for an interface of this endpoint. +type VmEndpointNatMappingsInterfaceNatMappings struct { + // NatIpPortRanges: A list of all IP:port-range mappings assigned to + // this interface. These ranges are inclusive, that is, both the first + // and the last ports can be used for NAT. Example: + // ["2.2.2.2:12345-12355", "1.1.1.1:2234-2234"]. + NatIpPortRanges []string `json:"natIpPortRanges,omitempty"` + + // NumTotalNatPorts: Total number of ports across all NAT IPs allocated + // to this interface. It equals to the aggregated port number in the + // field nat_ip_port_ranges. + NumTotalNatPorts int64 `json:"numTotalNatPorts,omitempty"` + + // SourceAliasIpRange: Alias IP range for this interface endpoint. It + // will be a private (RFC 1918) IP range. Examples: "10.33.4.55/32", or + // "192.168.5.0/24". + SourceAliasIpRange string `json:"sourceAliasIpRange,omitempty"` + + // SourceVirtualIp: Primary IP of the VM for this NIC. + SourceVirtualIp string `json:"sourceVirtualIp,omitempty"` + + // ForceSendFields is a list of field names (e.g. "NatIpPortRanges") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NatIpPortRanges") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *VmEndpointNatMappingsInterfaceNatMappings) MarshalJSON() ([]byte, error) { + type NoMethod VmEndpointNatMappingsInterfaceNatMappings + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VmEndpointNatMappingsList: Contains a list of VmEndpointNatMappings. +type VmEndpointNatMappingsList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Kind: [Output Only] Type of resource. Always + // compute#vmEndpointNatMappingsList for lists of Nat mappings of VM + // endpoints. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // Result: [Output Only] A list of Nat mapping information of VM + // endpoints. + Result []*VmEndpointNatMappings `json:"result,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *VmEndpointNatMappingsListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VmEndpointNatMappingsList) MarshalJSON() ([]byte, error) { + type NoMethod VmEndpointNatMappingsList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VmEndpointNatMappingsListWarning: [Output Only] Informational warning +// message. +type VmEndpointNatMappingsListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*VmEndpointNatMappingsListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VmEndpointNatMappingsListWarning) MarshalJSON() ([]byte, error) { + type NoMethod VmEndpointNatMappingsListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type VmEndpointNatMappingsListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VmEndpointNatMappingsListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod VmEndpointNatMappingsListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VpnGateway: Represents a VPN gateway resource. +type VpnGateway struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of resource. Always compute#vpnGateway for + // VPN gateways. + Kind string `json:"kind,omitempty"` + + // LabelFingerprint: A fingerprint for the labels being applied to this + // VpnGateway, which is essentially a hash of the labels set used for + // optimistic locking. The fingerprint is initially generated by Compute + // Engine and changes after every request to modify or update labels. + // You must always provide an up-to-date fingerprint hash in order to + // update or change labels, otherwise the request will fail with error + // 412 conditionNotMet. + // + // To see the latest fingerprint, make a get() request to retrieve an + // VpnGateway. + LabelFingerprint string `json:"labelFingerprint,omitempty"` + + // Labels: Labels to apply to this VpnGateway resource. These can be + // later modified by the setLabels method. Each label key/value must + // comply with RFC1035. Label values may be empty. + Labels map[string]string `json:"labels,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // Network: URL of the network to which this VPN gateway is attached. + // Provided by the client when the VPN gateway is created. + Network string `json:"network,omitempty"` + + // Region: [Output Only] URL of the region where the VPN gateway + // resides. + Region string `json:"region,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // VpnInterfaces: [Output Only] A list of interfaces on this VPN + // gateway. + VpnInterfaces []*VpnGatewayVpnGatewayInterface `json:"vpnInterfaces,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *VpnGateway) MarshalJSON() ([]byte, error) { + type NoMethod VpnGateway + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type VpnGatewayAggregatedList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of VpnGateway resources. + Items map[string]VpnGatewaysScopedList `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#vpnGateway for + // VPN gateways. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *VpnGatewayAggregatedListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnGatewayAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod VpnGatewayAggregatedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VpnGatewayAggregatedListWarning: [Output Only] Informational warning +// message. +type VpnGatewayAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*VpnGatewayAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnGatewayAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod VpnGatewayAggregatedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type VpnGatewayAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnGatewayAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod VpnGatewayAggregatedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VpnGatewayList: Contains a list of VpnGateway resources. +type VpnGatewayList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of VpnGateway resources. + Items []*VpnGateway `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#vpnGateway for + // VPN gateways. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *VpnGatewayListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnGatewayList) MarshalJSON() ([]byte, error) { + type NoMethod VpnGatewayList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VpnGatewayListWarning: [Output Only] Informational warning message. +type VpnGatewayListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*VpnGatewayListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnGatewayListWarning) MarshalJSON() ([]byte, error) { + type NoMethod VpnGatewayListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type VpnGatewayListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnGatewayListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod VpnGatewayListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type VpnGatewayStatus struct { + // VpnConnections: List of VPN connection for this VpnGateway. + VpnConnections []*VpnGatewayStatusVpnConnection `json:"vpnConnections,omitempty"` + + // ForceSendFields is a list of field names (e.g. "VpnConnections") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "VpnConnections") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *VpnGatewayStatus) MarshalJSON() ([]byte, error) { + type NoMethod VpnGatewayStatus + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VpnGatewayStatusHighAvailabilityRequirementState: Describes the high +// availability requirement state for the VPN connection between this +// Cloud VPN gateway and a peer gateway. +type VpnGatewayStatusHighAvailabilityRequirementState struct { + // State: Indicates the high availability requirement state for the VPN + // connection. Valid values are CONNECTION_REDUNDANCY_MET, + // CONNECTION_REDUNDANCY_NOT_MET. + // + // Possible values: + // "CONNECTION_REDUNDANCY_MET" + // "CONNECTION_REDUNDANCY_NOT_MET" + State string `json:"state,omitempty"` + + // UnsatisfiedReason: Indicates the reason why the VPN connection does + // not meet the high availability redundancy criteria/requirement. Valid + // values is INCOMPLETE_TUNNELS_COVERAGE. + // + // Possible values: + // "INCOMPLETE_TUNNELS_COVERAGE" + UnsatisfiedReason string `json:"unsatisfiedReason,omitempty"` + + // ForceSendFields is a list of field names (e.g. "State") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "State") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnGatewayStatusHighAvailabilityRequirementState) MarshalJSON() ([]byte, error) { + type NoMethod VpnGatewayStatusHighAvailabilityRequirementState + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VpnGatewayStatusTunnel: Contains some information about a VPN tunnel. +type VpnGatewayStatusTunnel struct { + // LocalGatewayInterface: The VPN gateway interface this VPN tunnel is + // associated with. + LocalGatewayInterface int64 `json:"localGatewayInterface,omitempty"` + + // PeerGatewayInterface: The peer gateway interface this VPN tunnel is + // connected to, the peer gateway could either be an external VPN + // gateway or GCP VPN gateway. + PeerGatewayInterface int64 `json:"peerGatewayInterface,omitempty"` + + // TunnelUrl: URL reference to the VPN tunnel. + TunnelUrl string `json:"tunnelUrl,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "LocalGatewayInterface") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "LocalGatewayInterface") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *VpnGatewayStatusTunnel) MarshalJSON() ([]byte, error) { + type NoMethod VpnGatewayStatusTunnel + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VpnGatewayStatusVpnConnection: A VPN connection contains all VPN +// tunnels connected from this VpnGateway to the same peer gateway. The +// peer gateway could either be a external VPN gateway or GCP VPN +// gateway. +type VpnGatewayStatusVpnConnection struct { + // PeerExternalGateway: URL reference to the peer external VPN gateways + // to which the VPN tunnels in this VPN connection are connected. This + // field is mutually exclusive with peer_gcp_gateway. + PeerExternalGateway string `json:"peerExternalGateway,omitempty"` + + // PeerGcpGateway: URL reference to the peer side VPN gateways to which + // the VPN tunnels in this VPN connection are connected. This field is + // mutually exclusive with peer_gcp_gateway. + PeerGcpGateway string `json:"peerGcpGateway,omitempty"` + + // State: HighAvailabilityRequirementState for the VPN connection. + State *VpnGatewayStatusHighAvailabilityRequirementState `json:"state,omitempty"` + + // Tunnels: List of VPN tunnels that are in this VPN connection. + Tunnels []*VpnGatewayStatusTunnel `json:"tunnels,omitempty"` + + // ForceSendFields is a list of field names (e.g. "PeerExternalGateway") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "PeerExternalGateway") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *VpnGatewayStatusVpnConnection) MarshalJSON() ([]byte, error) { + type NoMethod VpnGatewayStatusVpnConnection + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VpnGatewayVpnGatewayInterface: A VPN gateway interface. +type VpnGatewayVpnGatewayInterface struct { + // Id: The numeric ID of this VPN gateway interface. + Id int64 `json:"id,omitempty"` + + // IpAddress: The external IP address for this VPN gateway interface. + IpAddress string `json:"ipAddress,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnGatewayVpnGatewayInterface) MarshalJSON() ([]byte, error) { + type NoMethod VpnGatewayVpnGatewayInterface + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type VpnGatewaysGetStatusResponse struct { + Result *VpnGatewayStatus `json:"result,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Result") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Result") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnGatewaysGetStatusResponse) MarshalJSON() ([]byte, error) { + type NoMethod VpnGatewaysGetStatusResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type VpnGatewaysScopedList struct { + // VpnGateways: [Output Only] A list of VPN gateways contained in this + // scope. + VpnGateways []*VpnGateway `json:"vpnGateways,omitempty"` + + // Warning: [Output Only] Informational warning which replaces the list + // of addresses when the list is empty. + Warning *VpnGatewaysScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "VpnGateways") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "VpnGateways") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnGatewaysScopedList) MarshalJSON() ([]byte, error) { + type NoMethod VpnGatewaysScopedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VpnGatewaysScopedListWarning: [Output Only] Informational warning +// which replaces the list of addresses when the list is empty. +type VpnGatewaysScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*VpnGatewaysScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnGatewaysScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod VpnGatewaysScopedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type VpnGatewaysScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnGatewaysScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod VpnGatewaysScopedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VpnTunnel: VPN tunnel resource. (== resource_for beta.vpnTunnels ==) +// (== resource_for v1.vpnTunnels ==) +type VpnTunnel struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // DetailedStatus: [Output Only] Detailed status message for the VPN + // tunnel. + DetailedStatus string `json:"detailedStatus,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // IkeVersion: IKE protocol version to use when establishing the VPN + // tunnel with the peer VPN gateway. Acceptable IKE versions are 1 or 2. + // The default version is 2. + IkeVersion int64 `json:"ikeVersion,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#vpnTunnel for + // VPN tunnels. + Kind string `json:"kind,omitempty"` + + // LabelFingerprint: A fingerprint for the labels being applied to this + // VpnTunnel, which is essentially a hash of the labels set used for + // optimistic locking. The fingerprint is initially generated by Compute + // Engine and changes after every request to modify or update labels. + // You must always provide an up-to-date fingerprint hash in order to + // update or change labels, otherwise the request will fail with error + // 412 conditionNotMet. + // + // To see the latest fingerprint, make a get() request to retrieve a + // VpnTunnel. + LabelFingerprint string `json:"labelFingerprint,omitempty"` + + // Labels: Labels to apply to this VpnTunnel. These can be later + // modified by the setLabels method. Each label key/value pair must + // comply with RFC1035. Label values may be empty. + Labels map[string]string `json:"labels,omitempty"` + + // LocalTrafficSelector: Local traffic selector to use when establishing + // the VPN tunnel with the peer VPN gateway. The value should be a CIDR + // formatted string, for example: 192.168.0.0/16. The ranges must be + // disjoint. Only IPv4 is supported. + LocalTrafficSelector []string `json:"localTrafficSelector,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // PeerExternalGateway: URL of the peer side external VPN gateway to + // which this VPN tunnel is connected. Provided by the client when the + // VPN tunnel is created. This field is exclusive with the field + // peerGcpGateway. + PeerExternalGateway string `json:"peerExternalGateway,omitempty"` + + // PeerExternalGatewayInterface: The interface ID of the external VPN + // gateway to which this VPN tunnel is connected. Provided by the client + // when the VPN tunnel is created. + PeerExternalGatewayInterface int64 `json:"peerExternalGatewayInterface,omitempty"` + + // PeerGcpGateway: URL of the peer side HA GCP VPN gateway to which this + // VPN tunnel is connected. Provided by the client when the VPN tunnel + // is created. This field can be used when creating highly available VPN + // from VPC network to VPC network, the field is exclusive with the + // field peerExternalGateway. If provided, the VPN tunnel will + // automatically use the same vpnGatewayInterface ID in the peer GCP VPN + // gateway. + PeerGcpGateway string `json:"peerGcpGateway,omitempty"` + + // PeerIp: IP address of the peer VPN gateway. Only IPv4 is supported. + PeerIp string `json:"peerIp,omitempty"` + + // Region: [Output Only] URL of the region where the VPN tunnel resides. + // You must specify this field as part of the HTTP request URL. It is + // not settable as a field in the request body. + Region string `json:"region,omitempty"` + + // RemoteTrafficSelector: Remote traffic selectors to use when + // establishing the VPN tunnel with the peer VPN gateway. The value + // should be a CIDR formatted string, for example: 192.168.0.0/16. The + // ranges should be disjoint. Only IPv4 is supported. + RemoteTrafficSelector []string `json:"remoteTrafficSelector,omitempty"` + + // Router: URL of the router resource to be used for dynamic routing. + Router string `json:"router,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // SharedSecret: Shared secret used to set the secure session between + // the Cloud VPN gateway and the peer VPN gateway. + SharedSecret string `json:"sharedSecret,omitempty"` + + // SharedSecretHash: Hash of the shared secret. + SharedSecretHash string `json:"sharedSecretHash,omitempty"` + + // Status: [Output Only] The status of the VPN tunnel, which can be one + // of the following: + // - PROVISIONING: Resource is being allocated for the VPN tunnel. + // - WAITING_FOR_FULL_CONFIG: Waiting to receive all VPN-related configs + // from the user. Network, TargetVpnGateway, VpnTunnel, ForwardingRule, + // and Route resources are needed to setup the VPN tunnel. + // - FIRST_HANDSHAKE: Successful first handshake with the peer VPN. + // - ESTABLISHED: Secure session is successfully established with the + // peer VPN. + // - NETWORK_ERROR: Deprecated, replaced by NO_INCOMING_PACKETS + // - AUTHORIZATION_ERROR: Auth error (for example, bad shared secret). + // + // - NEGOTIATION_FAILURE: Handshake failed. + // - DEPROVISIONING: Resources are being deallocated for the VPN tunnel. + // + // - FAILED: Tunnel creation has failed and the tunnel is not ready to + // be used. + // + // Possible values: + // "ALLOCATING_RESOURCES" + // "AUTHORIZATION_ERROR" + // "DEPROVISIONING" + // "ESTABLISHED" + // "FAILED" + // "FIRST_HANDSHAKE" + // "NEGOTIATION_FAILURE" + // "NETWORK_ERROR" + // "NO_INCOMING_PACKETS" + // "PROVISIONING" + // "REJECTED" + // "WAITING_FOR_FULL_CONFIG" + Status string `json:"status,omitempty"` + + // TargetVpnGateway: URL of the Target VPN gateway with which this VPN + // tunnel is associated. Provided by the client when the VPN tunnel is + // created. + TargetVpnGateway string `json:"targetVpnGateway,omitempty"` + + // VpnGateway: URL of the VPN gateway with which this VPN tunnel is + // associated. Provided by the client when the VPN tunnel is created. + // This must be used (instead of target_vpn_gateway) if a High + // Availability VPN gateway resource is created. + VpnGateway string `json:"vpnGateway,omitempty"` + + // VpnGatewayInterface: The interface ID of the VPN gateway with which + // this VPN tunnel is associated. + VpnGatewayInterface int64 `json:"vpnGatewayInterface,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnel) MarshalJSON() ([]byte, error) { + type NoMethod VpnTunnel + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type VpnTunnelAggregatedList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of VpnTunnelsScopedList resources. + Items map[string]VpnTunnelsScopedList `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#vpnTunnel for + // VPN tunnels. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *VpnTunnelAggregatedListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnelAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod VpnTunnelAggregatedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VpnTunnelAggregatedListWarning: [Output Only] Informational warning +// message. +type VpnTunnelAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*VpnTunnelAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnelAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod VpnTunnelAggregatedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type VpnTunnelAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnelAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod VpnTunnelAggregatedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VpnTunnelList: Contains a list of VpnTunnel resources. +type VpnTunnelList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of VpnTunnel resources. + Items []*VpnTunnel `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#vpnTunnel for + // VPN tunnels. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *VpnTunnelListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnelList) MarshalJSON() ([]byte, error) { + type NoMethod VpnTunnelList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VpnTunnelListWarning: [Output Only] Informational warning message. +type VpnTunnelListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*VpnTunnelListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnelListWarning) MarshalJSON() ([]byte, error) { + type NoMethod VpnTunnelListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type VpnTunnelListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnelListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod VpnTunnelListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type VpnTunnelsScopedList struct { + // VpnTunnels: A list of VPN tunnels contained in this scope. + VpnTunnels []*VpnTunnel `json:"vpnTunnels,omitempty"` + + // Warning: Informational warning which replaces the list of addresses + // when the list is empty. + Warning *VpnTunnelsScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "VpnTunnels") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "VpnTunnels") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnelsScopedList) MarshalJSON() ([]byte, error) { + type NoMethod VpnTunnelsScopedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VpnTunnelsScopedListWarning: Informational warning which replaces the +// list of addresses when the list is empty. +type VpnTunnelsScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*VpnTunnelsScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnelsScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod VpnTunnelsScopedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type VpnTunnelsScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnelsScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod VpnTunnelsScopedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type WafExpressionSet struct { + // Aliases: A list of alternate IDs. The format should be: - E.g. + // XSS-stable Generic suffix like "stable" is particularly useful if a + // policy likes to avail newer set of expressions without having to + // change the policy. A given alias name can't be used for more than one + // entity set. + Aliases []string `json:"aliases,omitempty"` + + // Expressions: List of available expressions. + Expressions []*WafExpressionSetExpression `json:"expressions,omitempty"` + + // Id: Google specified expression set ID. The format should be: - E.g. + // XSS-20170329 + Id string `json:"id,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Aliases") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Aliases") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *WafExpressionSet) MarshalJSON() ([]byte, error) { + type NoMethod WafExpressionSet + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type WafExpressionSetExpression struct { + // Id: Expression ID should uniquely identify the origin of the + // expression. E.g. owasp-crs-v020901-id973337 identifies Owasp core + // rule set version 2.9.1 rule id 973337. The ID could be used to + // determine the individual attack definition that has been detected. It + // could also be used to exclude it from the policy in case of false + // positive. + Id string `json:"id,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *WafExpressionSetExpression) MarshalJSON() ([]byte, error) { + type NoMethod WafExpressionSetExpression + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// WeightedBackendService: In contrast to a single BackendService in +// HttpRouteAction to which all matching traffic is directed to, +// WeightedBackendService allows traffic to be split across multiple +// BackendServices. The volume of traffic for each BackendService is +// proportional to the weight specified in each WeightedBackendService +type WeightedBackendService struct { + // BackendService: The full or partial URL to the default BackendService + // resource. Before forwarding the request to backendService, the + // loadbalancer applies any relevant headerActions specified as part of + // this backendServiceWeight. + BackendService string `json:"backendService,omitempty"` + + // HeaderAction: Specifies changes to request and response headers that + // need to take effect for the selected backendService. + // headerAction specified here take effect before headerAction in the + // enclosing HttpRouteRule, PathMatcher and UrlMap. + HeaderAction *HttpHeaderAction `json:"headerAction,omitempty"` + + // Weight: Specifies the fraction of traffic sent to backendService, + // computed as weight / (sum of all weightedBackendService weights in + // routeAction) . + // The selection of a backend service is determined only for new + // traffic. Once a user's request has been directed to a backendService, + // subsequent requests will be sent to the same backendService as + // determined by the BackendService's session affinity policy. + // The value must be between 0 and 1000 + Weight int64 `json:"weight,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BackendService") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BackendService") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *WeightedBackendService) MarshalJSON() ([]byte, error) { + type NoMethod WeightedBackendService + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type XpnHostList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: [Output Only] A list of shared VPC host project URLs. + Items []*Project `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#xpnHostList for + // lists of shared VPC hosts. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *XpnHostListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *XpnHostList) MarshalJSON() ([]byte, error) { + type NoMethod XpnHostList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// XpnHostListWarning: [Output Only] Informational warning message. +type XpnHostListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*XpnHostListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *XpnHostListWarning) MarshalJSON() ([]byte, error) { + type NoMethod XpnHostListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type XpnHostListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *XpnHostListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod XpnHostListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// XpnResourceId: Service resource (a.k.a service project) ID. +type XpnResourceId struct { + // Id: The ID of the service resource. In the case of projects, this + // field supports project id (e.g., my-project-123) and project number + // (e.g. 12345678). + Id string `json:"id,omitempty"` + + // Type: The type of the service resource. + // + // Possible values: + // "PROJECT" + // "XPN_RESOURCE_TYPE_UNSPECIFIED" + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *XpnResourceId) MarshalJSON() ([]byte, error) { + type NoMethod XpnResourceId + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Zone: A Zone resource. (== resource_for beta.zones ==) (== +// resource_for v1.zones ==) Next ID: 17 +type Zone struct { + // AvailableCpuPlatforms: [Output Only] Available cpu/platform + // selections for the zone. + AvailableCpuPlatforms []string `json:"availableCpuPlatforms,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Deprecated: [Output Only] The deprecation status associated with this + // zone. + Deprecated *DeprecationStatus `json:"deprecated,omitempty"` + + // Description: [Output Only] Textual description of the resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always compute#zone for + // zones. + Kind string `json:"kind,omitempty"` + + // Name: [Output Only] Name of the resource. + Name string `json:"name,omitempty"` + + // Region: [Output Only] Full URL reference to the region which hosts + // the zone. + Region string `json:"region,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Status: [Output Only] Status of the zone, either UP or DOWN. + // + // Possible values: + // "DOWN" + // "UP" + Status string `json:"status,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. + // "AvailableCpuPlatforms") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AvailableCpuPlatforms") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *Zone) MarshalJSON() ([]byte, error) { + type NoMethod Zone + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ZoneList: Contains a list of zone resources. +type ZoneList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of Zone resources. + Items []*Zone `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *ZoneListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ZoneList) MarshalJSON() ([]byte, error) { + type NoMethod ZoneList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ZoneListWarning: [Output Only] Informational warning message. +type ZoneListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*ZoneListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ZoneListWarning) MarshalJSON() ([]byte, error) { + type NoMethod ZoneListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ZoneListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ZoneListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod ZoneListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ZoneSetLabelsRequest struct { + // LabelFingerprint: The fingerprint of the previous set of labels for + // this resource, used to detect conflicts. The fingerprint is initially + // generated by Compute Engine and changes after every request to modify + // or update labels. You must always provide an up-to-date fingerprint + // hash in order to update or change labels. Make a get() request to the + // resource to get the latest fingerprint. + LabelFingerprint string `json:"labelFingerprint,omitempty"` + + // Labels: The labels to set for this resource. + Labels map[string]string `json:"labels,omitempty"` + + // ForceSendFields is a list of field names (e.g. "LabelFingerprint") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "LabelFingerprint") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *ZoneSetLabelsRequest) MarshalJSON() ([]byte, error) { + type NoMethod ZoneSetLabelsRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ZoneSetPolicyRequest struct { + // Bindings: Flatten Policy to create a backwacd compatible wire-format. + // Deprecated. Use 'policy' to specify bindings. + Bindings []*Binding `json:"bindings,omitempty"` + + // Etag: Flatten Policy to create a backward compatible wire-format. + // Deprecated. Use 'policy' to specify the etag. + Etag string `json:"etag,omitempty"` + + // Policy: REQUIRED: The complete policy to be applied to the + // 'resource'. The size of the policy is limited to a few 10s of KB. An + // empty policy is in general a valid policy but certain services (like + // Projects) might reject them. + Policy *Policy `json:"policy,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Bindings") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Bindings") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ZoneSetPolicyRequest) MarshalJSON() ([]byte, error) { + type NoMethod ZoneSetPolicyRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// method id "compute.acceleratorTypes.aggregatedList": + +type AcceleratorTypesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves an aggregated list of accelerator types. +func (r *AcceleratorTypesService) AggregatedList(project string) *AcceleratorTypesAggregatedListCall { + c := &AcceleratorTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *AcceleratorTypesAggregatedListCall) Filter(filter string) *AcceleratorTypesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *AcceleratorTypesAggregatedListCall) MaxResults(maxResults int64) *AcceleratorTypesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *AcceleratorTypesAggregatedListCall) OrderBy(orderBy string) *AcceleratorTypesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *AcceleratorTypesAggregatedListCall) PageToken(pageToken string) *AcceleratorTypesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AcceleratorTypesAggregatedListCall) Fields(s ...googleapi.Field) *AcceleratorTypesAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AcceleratorTypesAggregatedListCall) IfNoneMatch(entityTag string) *AcceleratorTypesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AcceleratorTypesAggregatedListCall) Context(ctx context.Context) *AcceleratorTypesAggregatedListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AcceleratorTypesAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AcceleratorTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/acceleratorTypes") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.acceleratorTypes.aggregatedList" call. +// Exactly one of *AcceleratorTypeAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *AcceleratorTypeAggregatedList.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AcceleratorTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*AcceleratorTypeAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &AcceleratorTypeAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an aggregated list of accelerator types.", + // "httpMethod": "GET", + // "id": "compute.acceleratorTypes.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/acceleratorTypes", + // "response": { + // "$ref": "AcceleratorTypeAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AcceleratorTypesAggregatedListCall) Pages(ctx context.Context, f func(*AcceleratorTypeAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.acceleratorTypes.get": + +type AcceleratorTypesGetCall struct { + s *Service + project string + zone string + acceleratorType string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified accelerator type. +func (r *AcceleratorTypesService) Get(project string, zone string, acceleratorType string) *AcceleratorTypesGetCall { + c := &AcceleratorTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.acceleratorType = acceleratorType + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AcceleratorTypesGetCall) Fields(s ...googleapi.Field) *AcceleratorTypesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AcceleratorTypesGetCall) IfNoneMatch(entityTag string) *AcceleratorTypesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AcceleratorTypesGetCall) Context(ctx context.Context) *AcceleratorTypesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AcceleratorTypesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AcceleratorTypesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/acceleratorTypes/{acceleratorType}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "acceleratorType": c.acceleratorType, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.acceleratorTypes.get" call. +// Exactly one of *AcceleratorType or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *AcceleratorType.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AcceleratorTypesGetCall) Do(opts ...googleapi.CallOption) (*AcceleratorType, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &AcceleratorType{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified accelerator type.", + // "httpMethod": "GET", + // "id": "compute.acceleratorTypes.get", + // "parameterOrder": [ + // "project", + // "zone", + // "acceleratorType" + // ], + // "parameters": { + // "acceleratorType": { + // "description": "Name of the accelerator type to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/acceleratorTypes/{acceleratorType}", + // "response": { + // "$ref": "AcceleratorType" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.acceleratorTypes.list": + +type AcceleratorTypesListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of accelerator types available to the +// specified project. +func (r *AcceleratorTypesService) List(project string, zone string) *AcceleratorTypesListCall { + c := &AcceleratorTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *AcceleratorTypesListCall) Filter(filter string) *AcceleratorTypesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *AcceleratorTypesListCall) MaxResults(maxResults int64) *AcceleratorTypesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *AcceleratorTypesListCall) OrderBy(orderBy string) *AcceleratorTypesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *AcceleratorTypesListCall) PageToken(pageToken string) *AcceleratorTypesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AcceleratorTypesListCall) Fields(s ...googleapi.Field) *AcceleratorTypesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AcceleratorTypesListCall) IfNoneMatch(entityTag string) *AcceleratorTypesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AcceleratorTypesListCall) Context(ctx context.Context) *AcceleratorTypesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AcceleratorTypesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AcceleratorTypesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/acceleratorTypes") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.acceleratorTypes.list" call. +// Exactly one of *AcceleratorTypeList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *AcceleratorTypeList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AcceleratorTypesListCall) Do(opts ...googleapi.CallOption) (*AcceleratorTypeList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &AcceleratorTypeList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of accelerator types available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.acceleratorTypes.list", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/acceleratorTypes", + // "response": { + // "$ref": "AcceleratorTypeList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AcceleratorTypesListCall) Pages(ctx context.Context, f func(*AcceleratorTypeList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.addresses.aggregatedList": + +type AddressesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves an aggregated list of addresses. +// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/aggregatedList +func (r *AddressesService) AggregatedList(project string) *AddressesAggregatedListCall { + c := &AddressesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *AddressesAggregatedListCall) Filter(filter string) *AddressesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *AddressesAggregatedListCall) MaxResults(maxResults int64) *AddressesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *AddressesAggregatedListCall) OrderBy(orderBy string) *AddressesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *AddressesAggregatedListCall) PageToken(pageToken string) *AddressesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AddressesAggregatedListCall) Fields(s ...googleapi.Field) *AddressesAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AddressesAggregatedListCall) IfNoneMatch(entityTag string) *AddressesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AddressesAggregatedListCall) Context(ctx context.Context) *AddressesAggregatedListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AddressesAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AddressesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/addresses") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.addresses.aggregatedList" call. +// Exactly one of *AddressAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *AddressAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AddressesAggregatedListCall) Do(opts ...googleapi.CallOption) (*AddressAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &AddressAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an aggregated list of addresses.", + // "httpMethod": "GET", + // "id": "compute.addresses.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/addresses", + // "response": { + // "$ref": "AddressAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AddressesAggregatedListCall) Pages(ctx context.Context, f func(*AddressAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.addresses.delete": + +type AddressesDeleteCall struct { + s *Service + project string + region string + address string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified address resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/delete +func (r *AddressesService) Delete(project string, region string, address string) *AddressesDeleteCall { + c := &AddressesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.address = address + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *AddressesDeleteCall) RequestId(requestId string) *AddressesDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AddressesDeleteCall) Fields(s ...googleapi.Field) *AddressesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AddressesDeleteCall) Context(ctx context.Context) *AddressesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AddressesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AddressesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses/{address}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "address": c.address, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.addresses.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified address resource.", + // "httpMethod": "DELETE", + // "id": "compute.addresses.delete", + // "parameterOrder": [ + // "project", + // "region", + // "address" + // ], + // "parameters": { + // "address": { + // "description": "Name of the address resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/addresses/{address}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.addresses.get": + +type AddressesGetCall struct { + s *Service + project string + region string + address string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified address resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/get +func (r *AddressesService) Get(project string, region string, address string) *AddressesGetCall { + c := &AddressesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.address = address + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AddressesGetCall) Fields(s ...googleapi.Field) *AddressesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AddressesGetCall) IfNoneMatch(entityTag string) *AddressesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AddressesGetCall) Context(ctx context.Context) *AddressesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AddressesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AddressesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses/{address}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "address": c.address, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.addresses.get" call. +// Exactly one of *Address or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Address.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *AddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Address{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified address resource.", + // "httpMethod": "GET", + // "id": "compute.addresses.get", + // "parameterOrder": [ + // "project", + // "region", + // "address" + // ], + // "parameters": { + // "address": { + // "description": "Name of the address resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/addresses/{address}", + // "response": { + // "$ref": "Address" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.addresses.insert": + +type AddressesInsertCall struct { + s *Service + project string + region string + address *Address + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates an address resource in the specified project using +// the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/insert +func (r *AddressesService) Insert(project string, region string, address *Address) *AddressesInsertCall { + c := &AddressesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.address = address + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *AddressesInsertCall) RequestId(requestId string) *AddressesInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AddressesInsertCall) Fields(s ...googleapi.Field) *AddressesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AddressesInsertCall) Context(ctx context.Context) *AddressesInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AddressesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AddressesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.address) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.addresses.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates an address resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.addresses.insert", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/addresses", + // "request": { + // "$ref": "Address" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.addresses.list": + +type AddressesListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of addresses contained within the specified +// region. +// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/list +func (r *AddressesService) List(project string, region string) *AddressesListCall { + c := &AddressesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *AddressesListCall) Filter(filter string) *AddressesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *AddressesListCall) MaxResults(maxResults int64) *AddressesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *AddressesListCall) OrderBy(orderBy string) *AddressesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *AddressesListCall) PageToken(pageToken string) *AddressesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AddressesListCall) Fields(s ...googleapi.Field) *AddressesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AddressesListCall) IfNoneMatch(entityTag string) *AddressesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AddressesListCall) Context(ctx context.Context) *AddressesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AddressesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AddressesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.addresses.list" call. +// Exactly one of *AddressList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *AddressList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &AddressList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of addresses contained within the specified region.", + // "httpMethod": "GET", + // "id": "compute.addresses.list", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/addresses", + // "response": { + // "$ref": "AddressList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AddressesListCall) Pages(ctx context.Context, f func(*AddressList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.addresses.setLabels": + +type AddressesSetLabelsCall struct { + s *Service + project string + region string + resource string + regionsetlabelsrequest *RegionSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetLabels: Sets the labels on an Address. To learn more about labels, +// read the Labeling Resources documentation. +func (r *AddressesService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *AddressesSetLabelsCall { + c := &AddressesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + c.regionsetlabelsrequest = regionsetlabelsrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *AddressesSetLabelsCall) RequestId(requestId string) *AddressesSetLabelsCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AddressesSetLabelsCall) Fields(s ...googleapi.Field) *AddressesSetLabelsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AddressesSetLabelsCall) Context(ctx context.Context) *AddressesSetLabelsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AddressesSetLabelsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AddressesSetLabelsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses/{resource}/setLabels") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.addresses.setLabels" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AddressesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the labels on an Address. To learn more about labels, read the Labeling Resources documentation.", + // "httpMethod": "POST", + // "id": "compute.addresses.setLabels", + // "parameterOrder": [ + // "project", + // "region", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/addresses/{resource}/setLabels", + // "request": { + // "$ref": "RegionSetLabelsRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.addresses.testIamPermissions": + +type AddressesTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *AddressesService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *AddressesTestIamPermissionsCall { + c := &AddressesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AddressesTestIamPermissionsCall) Fields(s ...googleapi.Field) *AddressesTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AddressesTestIamPermissionsCall) Context(ctx context.Context) *AddressesTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AddressesTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AddressesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses/{resource}/testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.addresses.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AddressesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TestPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.addresses.testIamPermissions", + // "parameterOrder": [ + // "project", + // "region", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/addresses/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, + // "response": { + // "$ref": "TestPermissionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.autoscalers.aggregatedList": + +type AutoscalersAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves an aggregated list of autoscalers. +func (r *AutoscalersService) AggregatedList(project string) *AutoscalersAggregatedListCall { + c := &AutoscalersAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *AutoscalersAggregatedListCall) Filter(filter string) *AutoscalersAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *AutoscalersAggregatedListCall) MaxResults(maxResults int64) *AutoscalersAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *AutoscalersAggregatedListCall) OrderBy(orderBy string) *AutoscalersAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *AutoscalersAggregatedListCall) PageToken(pageToken string) *AutoscalersAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersAggregatedListCall) Fields(s ...googleapi.Field) *AutoscalersAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AutoscalersAggregatedListCall) IfNoneMatch(entityTag string) *AutoscalersAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersAggregatedListCall) Context(ctx context.Context) *AutoscalersAggregatedListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AutoscalersAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AutoscalersAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/autoscalers") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.autoscalers.aggregatedList" call. +// Exactly one of *AutoscalerAggregatedList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *AutoscalerAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*AutoscalerAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &AutoscalerAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an aggregated list of autoscalers.", + // "httpMethod": "GET", + // "id": "compute.autoscalers.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/autoscalers", + // "response": { + // "$ref": "AutoscalerAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AutoscalersAggregatedListCall) Pages(ctx context.Context, f func(*AutoscalerAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.autoscalers.delete": + +type AutoscalersDeleteCall struct { + s *Service + project string + zone string + autoscaler string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified autoscaler. +func (r *AutoscalersService) Delete(project string, zone string, autoscaler string) *AutoscalersDeleteCall { + c := &AutoscalersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.autoscaler = autoscaler + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *AutoscalersDeleteCall) RequestId(requestId string) *AutoscalersDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersDeleteCall) Fields(s ...googleapi.Field) *AutoscalersDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersDeleteCall) Context(ctx context.Context) *AutoscalersDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AutoscalersDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AutoscalersDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers/{autoscaler}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "autoscaler": c.autoscaler, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.autoscalers.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified autoscaler.", + // "httpMethod": "DELETE", + // "id": "compute.autoscalers.delete", + // "parameterOrder": [ + // "project", + // "zone", + // "autoscaler" + // ], + // "parameters": { + // "autoscaler": { + // "description": "Name of the autoscaler to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/autoscalers/{autoscaler}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.autoscalers.get": + +type AutoscalersGetCall struct { + s *Service + project string + zone string + autoscaler string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified autoscaler resource. Gets a list of +// available autoscalers by making a list() request. +func (r *AutoscalersService) Get(project string, zone string, autoscaler string) *AutoscalersGetCall { + c := &AutoscalersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.autoscaler = autoscaler + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersGetCall) Fields(s ...googleapi.Field) *AutoscalersGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AutoscalersGetCall) IfNoneMatch(entityTag string) *AutoscalersGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersGetCall) Context(ctx context.Context) *AutoscalersGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AutoscalersGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AutoscalersGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers/{autoscaler}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "autoscaler": c.autoscaler, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.autoscalers.get" call. +// Exactly one of *Autoscaler or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Autoscaler.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Autoscaler{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified autoscaler resource. Gets a list of available autoscalers by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.autoscalers.get", + // "parameterOrder": [ + // "project", + // "zone", + // "autoscaler" + // ], + // "parameters": { + // "autoscaler": { + // "description": "Name of the autoscaler to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/autoscalers/{autoscaler}", + // "response": { + // "$ref": "Autoscaler" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.autoscalers.insert": + +type AutoscalersInsertCall struct { + s *Service + project string + zone string + autoscaler *Autoscaler + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates an autoscaler in the specified project using the data +// included in the request. +func (r *AutoscalersService) Insert(project string, zone string, autoscaler *Autoscaler) *AutoscalersInsertCall { + c := &AutoscalersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.autoscaler = autoscaler + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *AutoscalersInsertCall) RequestId(requestId string) *AutoscalersInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersInsertCall) Fields(s ...googleapi.Field) *AutoscalersInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersInsertCall) Context(ctx context.Context) *AutoscalersInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AutoscalersInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AutoscalersInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.autoscalers.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates an autoscaler in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.autoscalers.insert", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/autoscalers", + // "request": { + // "$ref": "Autoscaler" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.autoscalers.list": + +type AutoscalersListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of autoscalers contained within the specified +// zone. +func (r *AutoscalersService) List(project string, zone string) *AutoscalersListCall { + c := &AutoscalersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *AutoscalersListCall) Filter(filter string) *AutoscalersListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *AutoscalersListCall) MaxResults(maxResults int64) *AutoscalersListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *AutoscalersListCall) OrderBy(orderBy string) *AutoscalersListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *AutoscalersListCall) PageToken(pageToken string) *AutoscalersListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersListCall) Fields(s ...googleapi.Field) *AutoscalersListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AutoscalersListCall) IfNoneMatch(entityTag string) *AutoscalersListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersListCall) Context(ctx context.Context) *AutoscalersListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AutoscalersListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AutoscalersListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.autoscalers.list" call. +// Exactly one of *AutoscalerList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *AutoscalerList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &AutoscalerList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of autoscalers contained within the specified zone.", + // "httpMethod": "GET", + // "id": "compute.autoscalers.list", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/autoscalers", + // "response": { + // "$ref": "AutoscalerList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AutoscalersListCall) Pages(ctx context.Context, f func(*AutoscalerList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.autoscalers.patch": + +type AutoscalersPatchCall struct { + s *Service + project string + zone string + autoscaler *Autoscaler + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates an autoscaler in the specified project using the data +// included in the request. This method supports PATCH semantics and +// uses the JSON merge patch format and processing rules. +func (r *AutoscalersService) Patch(project string, zone string, autoscaler *Autoscaler) *AutoscalersPatchCall { + c := &AutoscalersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.autoscaler = autoscaler + return c +} + +// Autoscaler sets the optional parameter "autoscaler": Name of the +// autoscaler to patch. +func (c *AutoscalersPatchCall) Autoscaler(autoscaler string) *AutoscalersPatchCall { + c.urlParams_.Set("autoscaler", autoscaler) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *AutoscalersPatchCall) RequestId(requestId string) *AutoscalersPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersPatchCall) Fields(s ...googleapi.Field) *AutoscalersPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersPatchCall) Context(ctx context.Context) *AutoscalersPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AutoscalersPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AutoscalersPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.autoscalers.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an autoscaler in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.autoscalers.patch", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "autoscaler": { + // "description": "Name of the autoscaler to patch.", + // "location": "query", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/autoscalers", + // "request": { + // "$ref": "Autoscaler" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.autoscalers.testIamPermissions": + +type AutoscalersTestIamPermissionsCall struct { + s *Service + project string + zone string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *AutoscalersService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *AutoscalersTestIamPermissionsCall { + c := &AutoscalersTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersTestIamPermissionsCall) Fields(s ...googleapi.Field) *AutoscalersTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersTestIamPermissionsCall) Context(ctx context.Context) *AutoscalersTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AutoscalersTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AutoscalersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers/{resource}/testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.autoscalers.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AutoscalersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TestPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.autoscalers.testIamPermissions", + // "parameterOrder": [ + // "project", + // "zone", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/autoscalers/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, + // "response": { + // "$ref": "TestPermissionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.autoscalers.update": + +type AutoscalersUpdateCall struct { + s *Service + project string + zone string + autoscaler *Autoscaler + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates an autoscaler in the specified project using the data +// included in the request. +func (r *AutoscalersService) Update(project string, zone string, autoscaler *Autoscaler) *AutoscalersUpdateCall { + c := &AutoscalersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.autoscaler = autoscaler + return c +} + +// Autoscaler sets the optional parameter "autoscaler": Name of the +// autoscaler to update. +func (c *AutoscalersUpdateCall) Autoscaler(autoscaler string) *AutoscalersUpdateCall { + c.urlParams_.Set("autoscaler", autoscaler) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *AutoscalersUpdateCall) RequestId(requestId string) *AutoscalersUpdateCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersUpdateCall) Fields(s ...googleapi.Field) *AutoscalersUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersUpdateCall) Context(ctx context.Context) *AutoscalersUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AutoscalersUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AutoscalersUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PUT", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.autoscalers.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an autoscaler in the specified project using the data included in the request.", + // "httpMethod": "PUT", + // "id": "compute.autoscalers.update", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "autoscaler": { + // "description": "Name of the autoscaler to update.", + // "location": "query", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/autoscalers", + // "request": { + // "$ref": "Autoscaler" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendBuckets.addSignedUrlKey": + +type BackendBucketsAddSignedUrlKeyCall struct { + s *Service + project string + backendBucket string + signedurlkey *SignedUrlKey + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// AddSignedUrlKey: Adds a key for validating requests with signed URLs +// for this backend bucket. +func (r *BackendBucketsService) AddSignedUrlKey(project string, backendBucket string, signedurlkey *SignedUrlKey) *BackendBucketsAddSignedUrlKeyCall { + c := &BackendBucketsAddSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendBucket = backendBucket + c.signedurlkey = signedurlkey + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendBucketsAddSignedUrlKeyCall) RequestId(requestId string) *BackendBucketsAddSignedUrlKeyCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendBucketsAddSignedUrlKeyCall) Fields(s ...googleapi.Field) *BackendBucketsAddSignedUrlKeyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendBucketsAddSignedUrlKeyCall) Context(ctx context.Context) *BackendBucketsAddSignedUrlKeyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendBucketsAddSignedUrlKeyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendBucketsAddSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.signedurlkey) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}/addSignedUrlKey") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendBucket": c.backendBucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendBuckets.addSignedUrlKey" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendBucketsAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Adds a key for validating requests with signed URLs for this backend bucket.", + // "httpMethod": "POST", + // "id": "compute.backendBuckets.addSignedUrlKey", + // "parameterOrder": [ + // "project", + // "backendBucket" + // ], + // "parameters": { + // "backendBucket": { + // "description": "Name of the BackendBucket resource to which the Signed URL Key should be added. The name should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/backendBuckets/{backendBucket}/addSignedUrlKey", + // "request": { + // "$ref": "SignedUrlKey" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendBuckets.delete": + +type BackendBucketsDeleteCall struct { + s *Service + project string + backendBucket string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified BackendBucket resource. +func (r *BackendBucketsService) Delete(project string, backendBucket string) *BackendBucketsDeleteCall { + c := &BackendBucketsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendBucket = backendBucket + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendBucketsDeleteCall) RequestId(requestId string) *BackendBucketsDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendBucketsDeleteCall) Fields(s ...googleapi.Field) *BackendBucketsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendBucketsDeleteCall) Context(ctx context.Context) *BackendBucketsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendBucketsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendBucketsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendBucket": c.backendBucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendBuckets.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendBucketsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified BackendBucket resource.", + // "httpMethod": "DELETE", + // "id": "compute.backendBuckets.delete", + // "parameterOrder": [ + // "project", + // "backendBucket" + // ], + // "parameters": { + // "backendBucket": { + // "description": "Name of the BackendBucket resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/backendBuckets/{backendBucket}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendBuckets.deleteSignedUrlKey": + +type BackendBucketsDeleteSignedUrlKeyCall struct { + s *Service + project string + backendBucket string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// DeleteSignedUrlKey: Deletes a key for validating requests with signed +// URLs for this backend bucket. +func (r *BackendBucketsService) DeleteSignedUrlKey(project string, backendBucket string, keyName string) *BackendBucketsDeleteSignedUrlKeyCall { + c := &BackendBucketsDeleteSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendBucket = backendBucket + c.urlParams_.Set("keyName", keyName) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendBucketsDeleteSignedUrlKeyCall) RequestId(requestId string) *BackendBucketsDeleteSignedUrlKeyCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendBucketsDeleteSignedUrlKeyCall) Fields(s ...googleapi.Field) *BackendBucketsDeleteSignedUrlKeyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendBucketsDeleteSignedUrlKeyCall) Context(ctx context.Context) *BackendBucketsDeleteSignedUrlKeyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendBucketsDeleteSignedUrlKeyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendBucketsDeleteSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}/deleteSignedUrlKey") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendBucket": c.backendBucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendBuckets.deleteSignedUrlKey" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendBucketsDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes a key for validating requests with signed URLs for this backend bucket.", + // "httpMethod": "POST", + // "id": "compute.backendBuckets.deleteSignedUrlKey", + // "parameterOrder": [ + // "project", + // "backendBucket", + // "keyName" + // ], + // "parameters": { + // "backendBucket": { + // "description": "Name of the BackendBucket resource to which the Signed URL Key should be added. The name should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "keyName": { + // "description": "The name of the Signed URL Key to delete.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/backendBuckets/{backendBucket}/deleteSignedUrlKey", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendBuckets.get": + +type BackendBucketsGetCall struct { + s *Service + project string + backendBucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified BackendBucket resource. Gets a list of +// available backend buckets by making a list() request. +func (r *BackendBucketsService) Get(project string, backendBucket string) *BackendBucketsGetCall { + c := &BackendBucketsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendBucket = backendBucket + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendBucketsGetCall) Fields(s ...googleapi.Field) *BackendBucketsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BackendBucketsGetCall) IfNoneMatch(entityTag string) *BackendBucketsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendBucketsGetCall) Context(ctx context.Context) *BackendBucketsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendBucketsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendBucketsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendBucket": c.backendBucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendBuckets.get" call. +// Exactly one of *BackendBucket or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *BackendBucket.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BackendBucketsGetCall) Do(opts ...googleapi.CallOption) (*BackendBucket, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &BackendBucket{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified BackendBucket resource. Gets a list of available backend buckets by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.backendBuckets.get", + // "parameterOrder": [ + // "project", + // "backendBucket" + // ], + // "parameters": { + // "backendBucket": { + // "description": "Name of the BackendBucket resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/backendBuckets/{backendBucket}", + // "response": { + // "$ref": "BackendBucket" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.backendBuckets.insert": + +type BackendBucketsInsertCall struct { + s *Service + project string + backendbucket *BackendBucket + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a BackendBucket resource in the specified project +// using the data included in the request. +func (r *BackendBucketsService) Insert(project string, backendbucket *BackendBucket) *BackendBucketsInsertCall { + c := &BackendBucketsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendbucket = backendbucket + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendBucketsInsertCall) RequestId(requestId string) *BackendBucketsInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendBucketsInsertCall) Fields(s ...googleapi.Field) *BackendBucketsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendBucketsInsertCall) Context(ctx context.Context) *BackendBucketsInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendBucketsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendBucketsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendbucket) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendBuckets.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendBucketsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a BackendBucket resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.backendBuckets.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/backendBuckets", + // "request": { + // "$ref": "BackendBucket" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendBuckets.list": + +type BackendBucketsListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves the list of BackendBucket resources available to the +// specified project. +func (r *BackendBucketsService) List(project string) *BackendBucketsListCall { + c := &BackendBucketsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *BackendBucketsListCall) Filter(filter string) *BackendBucketsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *BackendBucketsListCall) MaxResults(maxResults int64) *BackendBucketsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *BackendBucketsListCall) OrderBy(orderBy string) *BackendBucketsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *BackendBucketsListCall) PageToken(pageToken string) *BackendBucketsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendBucketsListCall) Fields(s ...googleapi.Field) *BackendBucketsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BackendBucketsListCall) IfNoneMatch(entityTag string) *BackendBucketsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendBucketsListCall) Context(ctx context.Context) *BackendBucketsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendBucketsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendBucketsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendBuckets.list" call. +// Exactly one of *BackendBucketList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *BackendBucketList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BackendBucketsListCall) Do(opts ...googleapi.CallOption) (*BackendBucketList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &BackendBucketList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of BackendBucket resources available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.backendBuckets.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/backendBuckets", + // "response": { + // "$ref": "BackendBucketList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *BackendBucketsListCall) Pages(ctx context.Context, f func(*BackendBucketList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.backendBuckets.patch": + +type BackendBucketsPatchCall struct { + s *Service + project string + backendBucket string + backendbucket *BackendBucket + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates the specified BackendBucket resource with the data +// included in the request. This method supports PATCH semantics and +// uses the JSON merge patch format and processing rules. +func (r *BackendBucketsService) Patch(project string, backendBucket string, backendbucket *BackendBucket) *BackendBucketsPatchCall { + c := &BackendBucketsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendBucket = backendBucket + c.backendbucket = backendbucket + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendBucketsPatchCall) RequestId(requestId string) *BackendBucketsPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendBucketsPatchCall) Fields(s ...googleapi.Field) *BackendBucketsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendBucketsPatchCall) Context(ctx context.Context) *BackendBucketsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendBucketsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendBucketsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendbucket) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendBucket": c.backendBucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendBuckets.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendBucketsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the specified BackendBucket resource with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.backendBuckets.patch", + // "parameterOrder": [ + // "project", + // "backendBucket" + // ], + // "parameters": { + // "backendBucket": { + // "description": "Name of the BackendBucket resource to patch.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/backendBuckets/{backendBucket}", + // "request": { + // "$ref": "BackendBucket" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendBuckets.update": + +type BackendBucketsUpdateCall struct { + s *Service + project string + backendBucket string + backendbucket *BackendBucket + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates the specified BackendBucket resource with the data +// included in the request. +func (r *BackendBucketsService) Update(project string, backendBucket string, backendbucket *BackendBucket) *BackendBucketsUpdateCall { + c := &BackendBucketsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendBucket = backendBucket + c.backendbucket = backendbucket + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendBucketsUpdateCall) RequestId(requestId string) *BackendBucketsUpdateCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendBucketsUpdateCall) Fields(s ...googleapi.Field) *BackendBucketsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendBucketsUpdateCall) Context(ctx context.Context) *BackendBucketsUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendBucketsUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendBucketsUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendbucket) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PUT", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendBucket": c.backendBucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendBuckets.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendBucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the specified BackendBucket resource with the data included in the request.", + // "httpMethod": "PUT", + // "id": "compute.backendBuckets.update", + // "parameterOrder": [ + // "project", + // "backendBucket" + // ], + // "parameters": { + // "backendBucket": { + // "description": "Name of the BackendBucket resource to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/backendBuckets/{backendBucket}", + // "request": { + // "$ref": "BackendBucket" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendServices.addSignedUrlKey": + +type BackendServicesAddSignedUrlKeyCall struct { + s *Service + project string + backendService string + signedurlkey *SignedUrlKey + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// AddSignedUrlKey: Adds a key for validating requests with signed URLs +// for this backend service. +func (r *BackendServicesService) AddSignedUrlKey(project string, backendService string, signedurlkey *SignedUrlKey) *BackendServicesAddSignedUrlKeyCall { + c := &BackendServicesAddSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendService = backendService + c.signedurlkey = signedurlkey + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendServicesAddSignedUrlKeyCall) RequestId(requestId string) *BackendServicesAddSignedUrlKeyCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesAddSignedUrlKeyCall) Fields(s ...googleapi.Field) *BackendServicesAddSignedUrlKeyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesAddSignedUrlKeyCall) Context(ctx context.Context) *BackendServicesAddSignedUrlKeyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendServicesAddSignedUrlKeyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendServicesAddSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.signedurlkey) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/addSignedUrlKey") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendService": c.backendService, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendServices.addSignedUrlKey" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendServicesAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Adds a key for validating requests with signed URLs for this backend service.", + // "httpMethod": "POST", + // "id": "compute.backendServices.addSignedUrlKey", + // "parameterOrder": [ + // "project", + // "backendService" + // ], + // "parameters": { + // "backendService": { + // "description": "Name of the BackendService resource to which the Signed URL Key should be added. The name should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/backendServices/{backendService}/addSignedUrlKey", + // "request": { + // "$ref": "SignedUrlKey" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendServices.aggregatedList": + +type BackendServicesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves the list of all BackendService resources, +// regional and global, available to the specified project. +func (r *BackendServicesService) AggregatedList(project string) *BackendServicesAggregatedListCall { + c := &BackendServicesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *BackendServicesAggregatedListCall) Filter(filter string) *BackendServicesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *BackendServicesAggregatedListCall) MaxResults(maxResults int64) *BackendServicesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *BackendServicesAggregatedListCall) OrderBy(orderBy string) *BackendServicesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *BackendServicesAggregatedListCall) PageToken(pageToken string) *BackendServicesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesAggregatedListCall) Fields(s ...googleapi.Field) *BackendServicesAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BackendServicesAggregatedListCall) IfNoneMatch(entityTag string) *BackendServicesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesAggregatedListCall) Context(ctx context.Context) *BackendServicesAggregatedListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendServicesAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendServicesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/backendServices") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendServices.aggregatedList" call. +// Exactly one of *BackendServiceAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *BackendServiceAggregatedList.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BackendServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*BackendServiceAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &BackendServiceAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of all BackendService resources, regional and global, available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.backendServices.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Name of the project scoping this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/backendServices", + // "response": { + // "$ref": "BackendServiceAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *BackendServicesAggregatedListCall) Pages(ctx context.Context, f func(*BackendServiceAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.backendServices.delete": + +type BackendServicesDeleteCall struct { + s *Service + project string + backendService string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified BackendService resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/delete +func (r *BackendServicesService) Delete(project string, backendService string) *BackendServicesDeleteCall { + c := &BackendServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendService = backendService + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendServicesDeleteCall) RequestId(requestId string) *BackendServicesDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesDeleteCall) Fields(s ...googleapi.Field) *BackendServicesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesDeleteCall) Context(ctx context.Context) *BackendServicesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendServicesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendServicesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendService": c.backendService, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendServices.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified BackendService resource.", + // "httpMethod": "DELETE", + // "id": "compute.backendServices.delete", + // "parameterOrder": [ + // "project", + // "backendService" + // ], + // "parameters": { + // "backendService": { + // "description": "Name of the BackendService resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/backendServices/{backendService}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendServices.deleteSignedUrlKey": + +type BackendServicesDeleteSignedUrlKeyCall struct { + s *Service + project string + backendService string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// DeleteSignedUrlKey: Deletes a key for validating requests with signed +// URLs for this backend service. +func (r *BackendServicesService) DeleteSignedUrlKey(project string, backendService string, keyName string) *BackendServicesDeleteSignedUrlKeyCall { + c := &BackendServicesDeleteSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendService = backendService + c.urlParams_.Set("keyName", keyName) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendServicesDeleteSignedUrlKeyCall) RequestId(requestId string) *BackendServicesDeleteSignedUrlKeyCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesDeleteSignedUrlKeyCall) Fields(s ...googleapi.Field) *BackendServicesDeleteSignedUrlKeyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesDeleteSignedUrlKeyCall) Context(ctx context.Context) *BackendServicesDeleteSignedUrlKeyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendServicesDeleteSignedUrlKeyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendServicesDeleteSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/deleteSignedUrlKey") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendService": c.backendService, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendServices.deleteSignedUrlKey" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendServicesDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes a key for validating requests with signed URLs for this backend service.", + // "httpMethod": "POST", + // "id": "compute.backendServices.deleteSignedUrlKey", + // "parameterOrder": [ + // "project", + // "backendService", + // "keyName" + // ], + // "parameters": { + // "backendService": { + // "description": "Name of the BackendService resource to which the Signed URL Key should be added. The name should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "keyName": { + // "description": "The name of the Signed URL Key to delete.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/backendServices/{backendService}/deleteSignedUrlKey", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendServices.get": + +type BackendServicesGetCall struct { + s *Service + project string + backendService string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified BackendService resource. Gets a list of +// available backend services. +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/get +func (r *BackendServicesService) Get(project string, backendService string) *BackendServicesGetCall { + c := &BackendServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendService = backendService + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesGetCall) Fields(s ...googleapi.Field) *BackendServicesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BackendServicesGetCall) IfNoneMatch(entityTag string) *BackendServicesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesGetCall) Context(ctx context.Context) *BackendServicesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendServicesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendServicesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendService": c.backendService, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendServices.get" call. +// Exactly one of *BackendService or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *BackendService.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BackendServicesGetCall) Do(opts ...googleapi.CallOption) (*BackendService, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &BackendService{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified BackendService resource. Gets a list of available backend services.", + // "httpMethod": "GET", + // "id": "compute.backendServices.get", + // "parameterOrder": [ + // "project", + // "backendService" + // ], + // "parameters": { + // "backendService": { + // "description": "Name of the BackendService resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/backendServices/{backendService}", + // "response": { + // "$ref": "BackendService" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.backendServices.getHealth": + +type BackendServicesGetHealthCall struct { + s *Service + project string + backendService string + resourcegroupreference *ResourceGroupReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// GetHealth: Gets the most recent health check results for this +// BackendService. +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/getHealth +func (r *BackendServicesService) GetHealth(project string, backendService string, resourcegroupreference *ResourceGroupReference) *BackendServicesGetHealthCall { + c := &BackendServicesGetHealthCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendService = backendService + c.resourcegroupreference = resourcegroupreference + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesGetHealthCall) Fields(s ...googleapi.Field) *BackendServicesGetHealthCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesGetHealthCall) Context(ctx context.Context) *BackendServicesGetHealthCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendServicesGetHealthCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendServicesGetHealthCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.resourcegroupreference) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/getHealth") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendService": c.backendService, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendServices.getHealth" call. +// Exactly one of *BackendServiceGroupHealth or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *BackendServiceGroupHealth.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (*BackendServiceGroupHealth, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &BackendServiceGroupHealth{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the most recent health check results for this BackendService.", + // "httpMethod": "POST", + // "id": "compute.backendServices.getHealth", + // "parameterOrder": [ + // "project", + // "backendService" + // ], + // "parameters": { + // "backendService": { + // "description": "Name of the BackendService resource to which the queried instance belongs.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/backendServices/{backendService}/getHealth", + // "request": { + // "$ref": "ResourceGroupReference" + // }, + // "response": { + // "$ref": "BackendServiceGroupHealth" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.backendServices.insert": + +type BackendServicesInsertCall struct { + s *Service + project string + backendservice *BackendService + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a BackendService resource in the specified project +// using the data included in the request. There are several +// restrictions and guidelines to keep in mind when creating a backend +// service. Read Restrictions and Guidelines for more information. +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/insert +func (r *BackendServicesService) Insert(project string, backendservice *BackendService) *BackendServicesInsertCall { + c := &BackendServicesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendservice = backendservice + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendServicesInsertCall) RequestId(requestId string) *BackendServicesInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesInsertCall) Fields(s ...googleapi.Field) *BackendServicesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesInsertCall) Context(ctx context.Context) *BackendServicesInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendServicesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendServicesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendServices.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a BackendService resource in the specified project using the data included in the request. There are several restrictions and guidelines to keep in mind when creating a backend service. Read Restrictions and Guidelines for more information.", + // "httpMethod": "POST", + // "id": "compute.backendServices.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/backendServices", + // "request": { + // "$ref": "BackendService" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendServices.list": + +type BackendServicesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves the list of BackendService resources available to the +// specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/list +func (r *BackendServicesService) List(project string) *BackendServicesListCall { + c := &BackendServicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *BackendServicesListCall) Filter(filter string) *BackendServicesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *BackendServicesListCall) MaxResults(maxResults int64) *BackendServicesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *BackendServicesListCall) OrderBy(orderBy string) *BackendServicesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *BackendServicesListCall) PageToken(pageToken string) *BackendServicesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesListCall) Fields(s ...googleapi.Field) *BackendServicesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BackendServicesListCall) IfNoneMatch(entityTag string) *BackendServicesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesListCall) Context(ctx context.Context) *BackendServicesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendServicesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendServicesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendServices.list" call. +// Exactly one of *BackendServiceList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *BackendServiceList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServiceList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &BackendServiceList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of BackendService resources available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.backendServices.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/backendServices", + // "response": { + // "$ref": "BackendServiceList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *BackendServicesListCall) Pages(ctx context.Context, f func(*BackendServiceList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.backendServices.patch": + +type BackendServicesPatchCall struct { + s *Service + project string + backendService string + backendservice *BackendService + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Patches the specified BackendService resource with the data +// included in the request. There are several restrictions and +// guidelines to keep in mind when updating a backend service. Read +// Restrictions and Guidelines for more information. This method +// supports PATCH semantics and uses the JSON merge patch format and +// processing rules. +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/patch +func (r *BackendServicesService) Patch(project string, backendService string, backendservice *BackendService) *BackendServicesPatchCall { + c := &BackendServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendService = backendService + c.backendservice = backendservice + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendServicesPatchCall) RequestId(requestId string) *BackendServicesPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesPatchCall) Fields(s ...googleapi.Field) *BackendServicesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesPatchCall) Context(ctx context.Context) *BackendServicesPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendServicesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendServicesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendService": c.backendService, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendServices.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patches the specified BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.backendServices.patch", + // "parameterOrder": [ + // "project", + // "backendService" + // ], + // "parameters": { + // "backendService": { + // "description": "Name of the BackendService resource to patch.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/backendServices/{backendService}", + // "request": { + // "$ref": "BackendService" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendServices.setSecurityPolicy": + +type BackendServicesSetSecurityPolicyCall struct { + s *Service + project string + backendService string + securitypolicyreference *SecurityPolicyReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetSecurityPolicy: Sets the security policy for the specified backend +// service. +func (r *BackendServicesService) SetSecurityPolicy(project string, backendService string, securitypolicyreference *SecurityPolicyReference) *BackendServicesSetSecurityPolicyCall { + c := &BackendServicesSetSecurityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendService = backendService + c.securitypolicyreference = securitypolicyreference + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendServicesSetSecurityPolicyCall) RequestId(requestId string) *BackendServicesSetSecurityPolicyCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesSetSecurityPolicyCall) Fields(s ...googleapi.Field) *BackendServicesSetSecurityPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesSetSecurityPolicyCall) Context(ctx context.Context) *BackendServicesSetSecurityPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendServicesSetSecurityPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendServicesSetSecurityPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicyreference) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/setSecurityPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendService": c.backendService, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendServices.setSecurityPolicy" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendServicesSetSecurityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the security policy for the specified backend service.", + // "httpMethod": "POST", + // "id": "compute.backendServices.setSecurityPolicy", + // "parameterOrder": [ + // "project", + // "backendService" + // ], + // "parameters": { + // "backendService": { + // "description": "Name of the BackendService resource to which the security policy should be set. The name should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/backendServices/{backendService}/setSecurityPolicy", + // "request": { + // "$ref": "SecurityPolicyReference" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendServices.testIamPermissions": + +type BackendServicesTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *BackendServicesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *BackendServicesTestIamPermissionsCall { + c := &BackendServicesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesTestIamPermissionsCall) Fields(s ...googleapi.Field) *BackendServicesTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesTestIamPermissionsCall) Context(ctx context.Context) *BackendServicesTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendServicesTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendServicesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{resource}/testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendServices.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BackendServicesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TestPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.backendServices.testIamPermissions", + // "parameterOrder": [ + // "project", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/backendServices/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, + // "response": { + // "$ref": "TestPermissionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.backendServices.update": + +type BackendServicesUpdateCall struct { + s *Service + project string + backendService string + backendservice *BackendService + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates the specified BackendService resource with the data +// included in the request. There are several restrictions and +// guidelines to keep in mind when updating a backend service. Read +// Restrictions and Guidelines for more information. +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/update +func (r *BackendServicesService) Update(project string, backendService string, backendservice *BackendService) *BackendServicesUpdateCall { + c := &BackendServicesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendService = backendService + c.backendservice = backendservice + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendServicesUpdateCall) RequestId(requestId string) *BackendServicesUpdateCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesUpdateCall) Fields(s ...googleapi.Field) *BackendServicesUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesUpdateCall) Context(ctx context.Context) *BackendServicesUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendServicesUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendServicesUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PUT", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendService": c.backendService, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendServices.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the specified BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information.", + // "httpMethod": "PUT", + // "id": "compute.backendServices.update", + // "parameterOrder": [ + // "project", + // "backendService" + // ], + // "parameters": { + // "backendService": { + // "description": "Name of the BackendService resource to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/backendServices/{backendService}", + // "request": { + // "$ref": "BackendService" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.diskTypes.aggregatedList": + +type DiskTypesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves an aggregated list of disk types. +// For details, see https://cloud.google.com/compute/docs/reference/latest/diskTypes/aggregatedList +func (r *DiskTypesService) AggregatedList(project string) *DiskTypesAggregatedListCall { + c := &DiskTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *DiskTypesAggregatedListCall) Filter(filter string) *DiskTypesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *DiskTypesAggregatedListCall) MaxResults(maxResults int64) *DiskTypesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *DiskTypesAggregatedListCall) OrderBy(orderBy string) *DiskTypesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *DiskTypesAggregatedListCall) PageToken(pageToken string) *DiskTypesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DiskTypesAggregatedListCall) Fields(s ...googleapi.Field) *DiskTypesAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DiskTypesAggregatedListCall) IfNoneMatch(entityTag string) *DiskTypesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DiskTypesAggregatedListCall) Context(ctx context.Context) *DiskTypesAggregatedListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DiskTypesAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DiskTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/diskTypes") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.diskTypes.aggregatedList" call. +// Exactly one of *DiskTypeAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *DiskTypeAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTypeAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &DiskTypeAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an aggregated list of disk types.", + // "httpMethod": "GET", + // "id": "compute.diskTypes.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/diskTypes", + // "response": { + // "$ref": "DiskTypeAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *DiskTypesAggregatedListCall) Pages(ctx context.Context, f func(*DiskTypeAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.diskTypes.get": + +type DiskTypesGetCall struct { + s *Service + project string + zone string + diskType string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified disk type. Gets a list of available disk +// types by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/diskTypes/get +func (r *DiskTypesService) Get(project string, zone string, diskType string) *DiskTypesGetCall { + c := &DiskTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.diskType = diskType + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DiskTypesGetCall) Fields(s ...googleapi.Field) *DiskTypesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DiskTypesGetCall) IfNoneMatch(entityTag string) *DiskTypesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DiskTypesGetCall) Context(ctx context.Context) *DiskTypesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DiskTypesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DiskTypesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/diskTypes/{diskType}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "diskType": c.diskType, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.diskTypes.get" call. +// Exactly one of *DiskType or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *DiskType.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &DiskType{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified disk type. Gets a list of available disk types by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.diskTypes.get", + // "parameterOrder": [ + // "project", + // "zone", + // "diskType" + // ], + // "parameters": { + // "diskType": { + // "description": "Name of the disk type to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/diskTypes/{diskType}", + // "response": { + // "$ref": "DiskType" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.diskTypes.list": + +type DiskTypesListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of disk types available to the specified +// project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/diskTypes/list +func (r *DiskTypesService) List(project string, zone string) *DiskTypesListCall { + c := &DiskTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *DiskTypesListCall) Filter(filter string) *DiskTypesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *DiskTypesListCall) MaxResults(maxResults int64) *DiskTypesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *DiskTypesListCall) OrderBy(orderBy string) *DiskTypesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *DiskTypesListCall) PageToken(pageToken string) *DiskTypesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DiskTypesListCall) Fields(s ...googleapi.Field) *DiskTypesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DiskTypesListCall) IfNoneMatch(entityTag string) *DiskTypesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DiskTypesListCall) Context(ctx context.Context) *DiskTypesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DiskTypesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DiskTypesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/diskTypes") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.diskTypes.list" call. +// Exactly one of *DiskTypeList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *DiskTypeList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &DiskTypeList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of disk types available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.diskTypes.list", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/diskTypes", + // "response": { + // "$ref": "DiskTypeList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *DiskTypesListCall) Pages(ctx context.Context, f func(*DiskTypeList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.disks.addResourcePolicies": + +type DisksAddResourcePoliciesCall struct { + s *Service + project string + zone string + disk string + disksaddresourcepoliciesrequest *DisksAddResourcePoliciesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// AddResourcePolicies: Adds existing resource policies to a disk. You +// can only add one policy which will be applied to this disk for +// scheduling snapshot creation. +func (r *DisksService) AddResourcePolicies(project string, zone string, disk string, disksaddresourcepoliciesrequest *DisksAddResourcePoliciesRequest) *DisksAddResourcePoliciesCall { + c := &DisksAddResourcePoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.disk = disk + c.disksaddresourcepoliciesrequest = disksaddresourcepoliciesrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *DisksAddResourcePoliciesCall) RequestId(requestId string) *DisksAddResourcePoliciesCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DisksAddResourcePoliciesCall) Fields(s ...googleapi.Field) *DisksAddResourcePoliciesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DisksAddResourcePoliciesCall) Context(ctx context.Context) *DisksAddResourcePoliciesCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DisksAddResourcePoliciesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DisksAddResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.disksaddresourcepoliciesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/addResourcePolicies") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "disk": c.disk, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.disks.addResourcePolicies" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DisksAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Adds existing resource policies to a disk. You can only add one policy which will be applied to this disk for scheduling snapshot creation.", + // "httpMethod": "POST", + // "id": "compute.disks.addResourcePolicies", + // "parameterOrder": [ + // "project", + // "zone", + // "disk" + // ], + // "parameters": { + // "disk": { + // "description": "The disk name for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/disks/{disk}/addResourcePolicies", + // "request": { + // "$ref": "DisksAddResourcePoliciesRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.disks.aggregatedList": + +type DisksAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves an aggregated list of persistent disks. +// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/aggregatedList +func (r *DisksService) AggregatedList(project string) *DisksAggregatedListCall { + c := &DisksAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *DisksAggregatedListCall) Filter(filter string) *DisksAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *DisksAggregatedListCall) MaxResults(maxResults int64) *DisksAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *DisksAggregatedListCall) OrderBy(orderBy string) *DisksAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *DisksAggregatedListCall) PageToken(pageToken string) *DisksAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DisksAggregatedListCall) Fields(s ...googleapi.Field) *DisksAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DisksAggregatedListCall) IfNoneMatch(entityTag string) *DisksAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DisksAggregatedListCall) Context(ctx context.Context) *DisksAggregatedListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DisksAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DisksAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/disks") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.disks.aggregatedList" call. +// Exactly one of *DiskAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *DiskAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &DiskAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an aggregated list of persistent disks.", + // "httpMethod": "GET", + // "id": "compute.disks.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/disks", + // "response": { + // "$ref": "DiskAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *DisksAggregatedListCall) Pages(ctx context.Context, f func(*DiskAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.disks.createSnapshot": + +type DisksCreateSnapshotCall struct { + s *Service + project string + zone string + disk string + snapshot *Snapshot + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// CreateSnapshot: Creates a snapshot of a specified persistent disk. +// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/createSnapshot +func (r *DisksService) CreateSnapshot(project string, zone string, disk string, snapshot *Snapshot) *DisksCreateSnapshotCall { + c := &DisksCreateSnapshotCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.disk = disk + c.snapshot = snapshot + return c +} + +// GuestFlush sets the optional parameter "guestFlush": [Input Only] +// Specifies to create an application consistent snapshot by informing +// the OS to prepare for the snapshot process. Currently only supported +// on Windows instances using the Volume Shadow Copy Service (VSS). +func (c *DisksCreateSnapshotCall) GuestFlush(guestFlush bool) *DisksCreateSnapshotCall { + c.urlParams_.Set("guestFlush", fmt.Sprint(guestFlush)) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *DisksCreateSnapshotCall) RequestId(requestId string) *DisksCreateSnapshotCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DisksCreateSnapshotCall) Fields(s ...googleapi.Field) *DisksCreateSnapshotCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DisksCreateSnapshotCall) Context(ctx context.Context) *DisksCreateSnapshotCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DisksCreateSnapshotCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DisksCreateSnapshotCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.snapshot) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/createSnapshot") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "disk": c.disk, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.disks.createSnapshot" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a snapshot of a specified persistent disk.", + // "httpMethod": "POST", + // "id": "compute.disks.createSnapshot", + // "parameterOrder": [ + // "project", + // "zone", + // "disk" + // ], + // "parameters": { + // "disk": { + // "description": "Name of the persistent disk to snapshot.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "guestFlush": { + // "description": "[Input Only] Specifies to create an application consistent snapshot by informing the OS to prepare for the snapshot process. Currently only supported on Windows instances using the Volume Shadow Copy Service (VSS).", + // "location": "query", + // "type": "boolean" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/disks/{disk}/createSnapshot", + // "request": { + // "$ref": "Snapshot" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.disks.delete": + +type DisksDeleteCall struct { + s *Service + project string + zone string + disk string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified persistent disk. Deleting a disk +// removes its data permanently and is irreversible. However, deleting a +// disk does not delete any snapshots previously made from the disk. You +// must separately delete snapshots. +// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/delete +func (r *DisksService) Delete(project string, zone string, disk string) *DisksDeleteCall { + c := &DisksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.disk = disk + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *DisksDeleteCall) RequestId(requestId string) *DisksDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DisksDeleteCall) Fields(s ...googleapi.Field) *DisksDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DisksDeleteCall) Context(ctx context.Context) *DisksDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DisksDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DisksDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "disk": c.disk, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.disks.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified persistent disk. Deleting a disk removes its data permanently and is irreversible. However, deleting a disk does not delete any snapshots previously made from the disk. You must separately delete snapshots.", + // "httpMethod": "DELETE", + // "id": "compute.disks.delete", + // "parameterOrder": [ + // "project", + // "zone", + // "disk" + // ], + // "parameters": { + // "disk": { + // "description": "Name of the persistent disk to delete.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/disks/{disk}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.disks.get": + +type DisksGetCall struct { + s *Service + project string + zone string + disk string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns a specified persistent disk. Gets a list of available +// persistent disks by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/get +func (r *DisksService) Get(project string, zone string, disk string) *DisksGetCall { + c := &DisksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.disk = disk + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DisksGetCall) Fields(s ...googleapi.Field) *DisksGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DisksGetCall) IfNoneMatch(entityTag string) *DisksGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DisksGetCall) Context(ctx context.Context) *DisksGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DisksGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DisksGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "disk": c.disk, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.disks.get" call. +// Exactly one of *Disk or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Disk.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *DisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Disk{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns a specified persistent disk. Gets a list of available persistent disks by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.disks.get", + // "parameterOrder": [ + // "project", + // "zone", + // "disk" + // ], + // "parameters": { + // "disk": { + // "description": "Name of the persistent disk to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/disks/{disk}", + // "response": { + // "$ref": "Disk" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.disks.getIamPolicy": + +type DisksGetIamPolicyCall struct { + s *Service + project string + zone string + resource string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. +func (r *DisksService) GetIamPolicy(project string, zone string, resource string) *DisksGetIamPolicyCall { + c := &DisksGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.resource = resource + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DisksGetIamPolicyCall) Fields(s ...googleapi.Field) *DisksGetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DisksGetIamPolicyCall) IfNoneMatch(entityTag string) *DisksGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DisksGetIamPolicyCall) Context(ctx context.Context) *DisksGetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DisksGetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DisksGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{resource}/getIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.disks.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *DisksGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "httpMethod": "GET", + // "id": "compute.disks.getIamPolicy", + // "parameterOrder": [ + // "project", + // "zone", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/disks/{resource}/getIamPolicy", + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.disks.insert": + +type DisksInsertCall struct { + s *Service + project string + zone string + disk *Disk + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a persistent disk in the specified project using the +// data in the request. You can create a disk with a sourceImage, a +// sourceSnapshot, or create an empty 500 GB data disk by omitting all +// properties. You can also create a disk that is larger than the +// default size by specifying the sizeGb property. +// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/insert +func (r *DisksService) Insert(project string, zone string, disk *Disk) *DisksInsertCall { + c := &DisksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.disk = disk + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *DisksInsertCall) RequestId(requestId string) *DisksInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// SourceImage sets the optional parameter "sourceImage": Source image +// to restore onto a disk. +func (c *DisksInsertCall) SourceImage(sourceImage string) *DisksInsertCall { + c.urlParams_.Set("sourceImage", sourceImage) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DisksInsertCall) Fields(s ...googleapi.Field) *DisksInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DisksInsertCall) Context(ctx context.Context) *DisksInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DisksInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DisksInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.disk) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.disks.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a persistent disk in the specified project using the data in the request. You can create a disk with a sourceImage, a sourceSnapshot, or create an empty 500 GB data disk by omitting all properties. You can also create a disk that is larger than the default size by specifying the sizeGb property.", + // "httpMethod": "POST", + // "id": "compute.disks.insert", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "sourceImage": { + // "description": "Optional. Source image to restore onto a disk.", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/disks", + // "request": { + // "$ref": "Disk" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.disks.list": + +type DisksListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of persistent disks contained within the +// specified zone. +// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/list +func (r *DisksService) List(project string, zone string) *DisksListCall { + c := &DisksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *DisksListCall) Filter(filter string) *DisksListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *DisksListCall) MaxResults(maxResults int64) *DisksListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *DisksListCall) OrderBy(orderBy string) *DisksListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *DisksListCall) PageToken(pageToken string) *DisksListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DisksListCall) Fields(s ...googleapi.Field) *DisksListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DisksListCall) IfNoneMatch(entityTag string) *DisksListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DisksListCall) Context(ctx context.Context) *DisksListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DisksListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DisksListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.disks.list" call. +// Exactly one of *DiskList or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *DiskList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &DiskList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of persistent disks contained within the specified zone.", + // "httpMethod": "GET", + // "id": "compute.disks.list", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/disks", + // "response": { + // "$ref": "DiskList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *DisksListCall) Pages(ctx context.Context, f func(*DiskList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.disks.removeResourcePolicies": + +type DisksRemoveResourcePoliciesCall struct { + s *Service + project string + zone string + disk string + disksremoveresourcepoliciesrequest *DisksRemoveResourcePoliciesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// RemoveResourcePolicies: Removes resource policies from a disk. +func (r *DisksService) RemoveResourcePolicies(project string, zone string, disk string, disksremoveresourcepoliciesrequest *DisksRemoveResourcePoliciesRequest) *DisksRemoveResourcePoliciesCall { + c := &DisksRemoveResourcePoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.disk = disk + c.disksremoveresourcepoliciesrequest = disksremoveresourcepoliciesrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *DisksRemoveResourcePoliciesCall) RequestId(requestId string) *DisksRemoveResourcePoliciesCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DisksRemoveResourcePoliciesCall) Fields(s ...googleapi.Field) *DisksRemoveResourcePoliciesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DisksRemoveResourcePoliciesCall) Context(ctx context.Context) *DisksRemoveResourcePoliciesCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DisksRemoveResourcePoliciesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DisksRemoveResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.disksremoveresourcepoliciesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/removeResourcePolicies") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "disk": c.disk, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.disks.removeResourcePolicies" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DisksRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Removes resource policies from a disk.", + // "httpMethod": "POST", + // "id": "compute.disks.removeResourcePolicies", + // "parameterOrder": [ + // "project", + // "zone", + // "disk" + // ], + // "parameters": { + // "disk": { + // "description": "The disk name for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/disks/{disk}/removeResourcePolicies", + // "request": { + // "$ref": "DisksRemoveResourcePoliciesRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.disks.resize": + +type DisksResizeCall struct { + s *Service + project string + zone string + disk string + disksresizerequest *DisksResizeRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Resize: Resizes the specified persistent disk. You can only increase +// the size of the disk. +func (r *DisksService) Resize(project string, zone string, disk string, disksresizerequest *DisksResizeRequest) *DisksResizeCall { + c := &DisksResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.disk = disk + c.disksresizerequest = disksresizerequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *DisksResizeCall) RequestId(requestId string) *DisksResizeCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DisksResizeCall) Fields(s ...googleapi.Field) *DisksResizeCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DisksResizeCall) Context(ctx context.Context) *DisksResizeCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DisksResizeCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DisksResizeCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.disksresizerequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/resize") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "disk": c.disk, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.disks.resize" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Resizes the specified persistent disk. You can only increase the size of the disk.", + // "httpMethod": "POST", + // "id": "compute.disks.resize", + // "parameterOrder": [ + // "project", + // "zone", + // "disk" + // ], + // "parameters": { + // "disk": { + // "description": "The name of the persistent disk.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/disks/{disk}/resize", + // "request": { + // "$ref": "DisksResizeRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.disks.setIamPolicy": + +type DisksSetIamPolicyCall struct { + s *Service + project string + zone string + resource string + zonesetpolicyrequest *ZoneSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +func (r *DisksService) SetIamPolicy(project string, zone string, resource string, zonesetpolicyrequest *ZoneSetPolicyRequest) *DisksSetIamPolicyCall { + c := &DisksSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.resource = resource + c.zonesetpolicyrequest = zonesetpolicyrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DisksSetIamPolicyCall) Fields(s ...googleapi.Field) *DisksSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DisksSetIamPolicyCall) Context(ctx context.Context) *DisksSetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DisksSetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DisksSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.zonesetpolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{resource}/setIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.disks.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *DisksSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "httpMethod": "POST", + // "id": "compute.disks.setIamPolicy", + // "parameterOrder": [ + // "project", + // "zone", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/disks/{resource}/setIamPolicy", + // "request": { + // "$ref": "ZoneSetPolicyRequest" + // }, + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.disks.setLabels": + +type DisksSetLabelsCall struct { + s *Service + project string + zone string + resource string + zonesetlabelsrequest *ZoneSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetLabels: Sets the labels on a disk. To learn more about labels, +// read the Labeling Resources documentation. +func (r *DisksService) SetLabels(project string, zone string, resource string, zonesetlabelsrequest *ZoneSetLabelsRequest) *DisksSetLabelsCall { + c := &DisksSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.resource = resource + c.zonesetlabelsrequest = zonesetlabelsrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *DisksSetLabelsCall) RequestId(requestId string) *DisksSetLabelsCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DisksSetLabelsCall) Fields(s ...googleapi.Field) *DisksSetLabelsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DisksSetLabelsCall) Context(ctx context.Context) *DisksSetLabelsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DisksSetLabelsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DisksSetLabelsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.zonesetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{resource}/setLabels") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.disks.setLabels" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the labels on a disk. To learn more about labels, read the Labeling Resources documentation.", + // "httpMethod": "POST", + // "id": "compute.disks.setLabels", + // "parameterOrder": [ + // "project", + // "zone", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/disks/{resource}/setLabels", + // "request": { + // "$ref": "ZoneSetLabelsRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.disks.testIamPermissions": + +type DisksTestIamPermissionsCall struct { + s *Service + project string + zone string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *DisksService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *DisksTestIamPermissionsCall { + c := &DisksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DisksTestIamPermissionsCall) Fields(s ...googleapi.Field) *DisksTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DisksTestIamPermissionsCall) Context(ctx context.Context) *DisksTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DisksTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DisksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{resource}/testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.disks.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TestPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.disks.testIamPermissions", + // "parameterOrder": [ + // "project", + // "zone", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/disks/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, + // "response": { + // "$ref": "TestPermissionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.externalVpnGateways.delete": + +type ExternalVpnGatewaysDeleteCall struct { + s *Service + project string + externalVpnGateway string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified externalVpnGateway. +func (r *ExternalVpnGatewaysService) Delete(project string, externalVpnGateway string) *ExternalVpnGatewaysDeleteCall { + c := &ExternalVpnGatewaysDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.externalVpnGateway = externalVpnGateway + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ExternalVpnGatewaysDeleteCall) RequestId(requestId string) *ExternalVpnGatewaysDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ExternalVpnGatewaysDeleteCall) Fields(s ...googleapi.Field) *ExternalVpnGatewaysDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ExternalVpnGatewaysDeleteCall) Context(ctx context.Context) *ExternalVpnGatewaysDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ExternalVpnGatewaysDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ExternalVpnGatewaysDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/externalVpnGateways/{externalVpnGateway}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "externalVpnGateway": c.externalVpnGateway, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.externalVpnGateways.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ExternalVpnGatewaysDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified externalVpnGateway.", + // "httpMethod": "DELETE", + // "id": "compute.externalVpnGateways.delete", + // "parameterOrder": [ + // "project", + // "externalVpnGateway" + // ], + // "parameters": { + // "externalVpnGateway": { + // "description": "Name of the externalVpnGateways to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/externalVpnGateways/{externalVpnGateway}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.externalVpnGateways.get": + +type ExternalVpnGatewaysGetCall struct { + s *Service + project string + externalVpnGateway string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified externalVpnGateway. Get a list of +// available externalVpnGateways by making a list() request. +func (r *ExternalVpnGatewaysService) Get(project string, externalVpnGateway string) *ExternalVpnGatewaysGetCall { + c := &ExternalVpnGatewaysGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.externalVpnGateway = externalVpnGateway + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ExternalVpnGatewaysGetCall) Fields(s ...googleapi.Field) *ExternalVpnGatewaysGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ExternalVpnGatewaysGetCall) IfNoneMatch(entityTag string) *ExternalVpnGatewaysGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ExternalVpnGatewaysGetCall) Context(ctx context.Context) *ExternalVpnGatewaysGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ExternalVpnGatewaysGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ExternalVpnGatewaysGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/externalVpnGateways/{externalVpnGateway}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "externalVpnGateway": c.externalVpnGateway, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.externalVpnGateways.get" call. +// Exactly one of *ExternalVpnGateway or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ExternalVpnGateway.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ExternalVpnGatewaysGetCall) Do(opts ...googleapi.CallOption) (*ExternalVpnGateway, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ExternalVpnGateway{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified externalVpnGateway. Get a list of available externalVpnGateways by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.externalVpnGateways.get", + // "parameterOrder": [ + // "project", + // "externalVpnGateway" + // ], + // "parameters": { + // "externalVpnGateway": { + // "description": "Name of the externalVpnGateway to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/externalVpnGateways/{externalVpnGateway}", + // "response": { + // "$ref": "ExternalVpnGateway" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.externalVpnGateways.insert": + +type ExternalVpnGatewaysInsertCall struct { + s *Service + project string + externalvpngateway *ExternalVpnGateway + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a ExternalVpnGateway in the specified project using +// the data included in the request. +func (r *ExternalVpnGatewaysService) Insert(project string, externalvpngateway *ExternalVpnGateway) *ExternalVpnGatewaysInsertCall { + c := &ExternalVpnGatewaysInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.externalvpngateway = externalvpngateway + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ExternalVpnGatewaysInsertCall) RequestId(requestId string) *ExternalVpnGatewaysInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ExternalVpnGatewaysInsertCall) Fields(s ...googleapi.Field) *ExternalVpnGatewaysInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ExternalVpnGatewaysInsertCall) Context(ctx context.Context) *ExternalVpnGatewaysInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ExternalVpnGatewaysInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ExternalVpnGatewaysInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.externalvpngateway) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/externalVpnGateways") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.externalVpnGateways.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ExternalVpnGatewaysInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a ExternalVpnGateway in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.externalVpnGateways.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/externalVpnGateways", + // "request": { + // "$ref": "ExternalVpnGateway" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + } -// method id "compute.acceleratorTypes.aggregatedList": +// method id "compute.externalVpnGateways.list": -type AcceleratorTypesAggregatedListCall struct { +type ExternalVpnGatewaysListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -33999,9 +50079,10 @@ type AcceleratorTypesAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves an aggregated list of accelerator types. -func (r *AcceleratorTypesService) AggregatedList(project string) *AcceleratorTypesAggregatedListCall { - c := &AcceleratorTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of ExternalVpnGateway available to the +// specified project. +func (r *ExternalVpnGatewaysService) List(project string) *ExternalVpnGatewaysListCall { + c := &ExternalVpnGatewaysListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -34028,7 +50109,7 @@ func (r *AcceleratorTypesService) AggregatedList(project string) *AcceleratorTyp // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *AcceleratorTypesAggregatedListCall) Filter(filter string) *AcceleratorTypesAggregatedListCall { +func (c *ExternalVpnGatewaysListCall) Filter(filter string) *ExternalVpnGatewaysListCall { c.urlParams_.Set("filter", filter) return c } @@ -34039,7 +50120,7 @@ func (c *AcceleratorTypesAggregatedListCall) Filter(filter string) *AcceleratorT // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *AcceleratorTypesAggregatedListCall) MaxResults(maxResults int64) *AcceleratorTypesAggregatedListCall { +func (c *ExternalVpnGatewaysListCall) MaxResults(maxResults int64) *ExternalVpnGatewaysListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -34056,7 +50137,7 @@ func (c *AcceleratorTypesAggregatedListCall) MaxResults(maxResults int64) *Accel // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *AcceleratorTypesAggregatedListCall) OrderBy(orderBy string) *AcceleratorTypesAggregatedListCall { +func (c *ExternalVpnGatewaysListCall) OrderBy(orderBy string) *ExternalVpnGatewaysListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -34064,7 +50145,7 @@ func (c *AcceleratorTypesAggregatedListCall) OrderBy(orderBy string) *Accelerato // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *AcceleratorTypesAggregatedListCall) PageToken(pageToken string) *AcceleratorTypesAggregatedListCall { +func (c *ExternalVpnGatewaysListCall) PageToken(pageToken string) *ExternalVpnGatewaysListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -34072,7 +50153,7 @@ func (c *AcceleratorTypesAggregatedListCall) PageToken(pageToken string) *Accele // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AcceleratorTypesAggregatedListCall) Fields(s ...googleapi.Field) *AcceleratorTypesAggregatedListCall { +func (c *ExternalVpnGatewaysListCall) Fields(s ...googleapi.Field) *ExternalVpnGatewaysListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -34082,7 +50163,7 @@ func (c *AcceleratorTypesAggregatedListCall) Fields(s ...googleapi.Field) *Accel // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *AcceleratorTypesAggregatedListCall) IfNoneMatch(entityTag string) *AcceleratorTypesAggregatedListCall { +func (c *ExternalVpnGatewaysListCall) IfNoneMatch(entityTag string) *ExternalVpnGatewaysListCall { c.ifNoneMatch_ = entityTag return c } @@ -34090,21 +50171,21 @@ func (c *AcceleratorTypesAggregatedListCall) IfNoneMatch(entityTag string) *Acce // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AcceleratorTypesAggregatedListCall) Context(ctx context.Context) *AcceleratorTypesAggregatedListCall { +func (c *ExternalVpnGatewaysListCall) Context(ctx context.Context) *ExternalVpnGatewaysListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AcceleratorTypesAggregatedListCall) Header() http.Header { +func (c *ExternalVpnGatewaysListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AcceleratorTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *ExternalVpnGatewaysListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -34116,7 +50197,7 @@ func (c *AcceleratorTypesAggregatedListCall) doRequest(alt string) (*http.Respon var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/acceleratorTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/externalVpnGateways") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -34129,14 +50210,14 @@ func (c *AcceleratorTypesAggregatedListCall) doRequest(alt string) (*http.Respon return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.acceleratorTypes.aggregatedList" call. -// Exactly one of *AcceleratorTypeAggregatedList or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *AcceleratorTypeAggregatedList.ServerResponse.Header or (if a -// response was returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.externalVpnGateways.list" call. +// Exactly one of *ExternalVpnGatewayList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ExternalVpnGatewayList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *AcceleratorTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*AcceleratorTypeAggregatedList, error) { +func (c *ExternalVpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*ExternalVpnGatewayList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -34155,7 +50236,7 @@ func (c *AcceleratorTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (* if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &AcceleratorTypeAggregatedList{ + ret := &ExternalVpnGatewayList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -34167,9 +50248,9 @@ func (c *AcceleratorTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Retrieves an aggregated list of accelerator types.", + // "description": "Retrieves the list of ExternalVpnGateway available to the specified project.", // "httpMethod": "GET", - // "id": "compute.acceleratorTypes.aggregatedList", + // "id": "compute.externalVpnGateways.list", // "parameterOrder": [ // "project" // ], @@ -34205,66 +50286,533 @@ func (c *AcceleratorTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (* // "type": "string" // } // }, - // "path": "{project}/aggregated/acceleratorTypes", + // "path": "{project}/global/externalVpnGateways", + // "response": { + // "$ref": "ExternalVpnGatewayList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ExternalVpnGatewaysListCall) Pages(ctx context.Context, f func(*ExternalVpnGatewayList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.externalVpnGateways.setLabels": + +type ExternalVpnGatewaysSetLabelsCall struct { + s *Service + project string + resource string + globalsetlabelsrequest *GlobalSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetLabels: Sets the labels on an ExternalVpnGateway. To learn more +// about labels, read the Labeling Resources documentation. +func (r *ExternalVpnGatewaysService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *ExternalVpnGatewaysSetLabelsCall { + c := &ExternalVpnGatewaysSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource + c.globalsetlabelsrequest = globalsetlabelsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ExternalVpnGatewaysSetLabelsCall) Fields(s ...googleapi.Field) *ExternalVpnGatewaysSetLabelsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ExternalVpnGatewaysSetLabelsCall) Context(ctx context.Context) *ExternalVpnGatewaysSetLabelsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ExternalVpnGatewaysSetLabelsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ExternalVpnGatewaysSetLabelsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/externalVpnGateways/{resource}/setLabels") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.externalVpnGateways.setLabels" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ExternalVpnGatewaysSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the labels on an ExternalVpnGateway. To learn more about labels, read the Labeling Resources documentation.", + // "httpMethod": "POST", + // "id": "compute.externalVpnGateways.setLabels", + // "parameterOrder": [ + // "project", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/externalVpnGateways/{resource}/setLabels", + // "request": { + // "$ref": "GlobalSetLabelsRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.externalVpnGateways.testIamPermissions": + +type ExternalVpnGatewaysTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *ExternalVpnGatewaysService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *ExternalVpnGatewaysTestIamPermissionsCall { + c := &ExternalVpnGatewaysTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ExternalVpnGatewaysTestIamPermissionsCall) Fields(s ...googleapi.Field) *ExternalVpnGatewaysTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ExternalVpnGatewaysTestIamPermissionsCall) Context(ctx context.Context) *ExternalVpnGatewaysTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ExternalVpnGatewaysTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ExternalVpnGatewaysTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/externalVpnGateways/{resource}/testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.externalVpnGateways.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ExternalVpnGatewaysTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TestPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.externalVpnGateways.testIamPermissions", + // "parameterOrder": [ + // "project", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/externalVpnGateways/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, + // "response": { + // "$ref": "TestPermissionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.firewalls.delete": + +type FirewallsDeleteCall struct { + s *Service + project string + firewall string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified firewall. +// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/delete +func (r *FirewallsService) Delete(project string, firewall string) *FirewallsDeleteCall { + c := &FirewallsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.firewall = firewall + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *FirewallsDeleteCall) RequestId(requestId string) *FirewallsDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *FirewallsDeleteCall) Fields(s ...googleapi.Field) *FirewallsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *FirewallsDeleteCall) Context(ctx context.Context) *FirewallsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *FirewallsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *FirewallsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "firewall": c.firewall, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.firewalls.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *FirewallsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified firewall.", + // "httpMethod": "DELETE", + // "id": "compute.firewalls.delete", + // "parameterOrder": [ + // "project", + // "firewall" + // ], + // "parameters": { + // "firewall": { + // "description": "Name of the firewall rule to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/firewalls/{firewall}", // "response": { - // "$ref": "AcceleratorTypeAggregatedList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *AcceleratorTypesAggregatedListCall) Pages(ctx context.Context, f func(*AcceleratorTypeAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.acceleratorTypes.get": +// method id "compute.firewalls.get": -type AcceleratorTypesGetCall struct { - s *Service - project string - zone string - acceleratorType string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type FirewallsGetCall struct { + s *Service + project string + firewall string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified accelerator type. -func (r *AcceleratorTypesService) Get(project string, zone string, acceleratorType string) *AcceleratorTypesGetCall { - c := &AcceleratorTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified firewall. +// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/get +func (r *FirewallsService) Get(project string, firewall string) *FirewallsGetCall { + c := &FirewallsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.acceleratorType = acceleratorType + c.firewall = firewall return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AcceleratorTypesGetCall) Fields(s ...googleapi.Field) *AcceleratorTypesGetCall { +func (c *FirewallsGetCall) Fields(s ...googleapi.Field) *FirewallsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -34274,7 +50822,7 @@ func (c *AcceleratorTypesGetCall) Fields(s ...googleapi.Field) *AcceleratorTypes // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *AcceleratorTypesGetCall) IfNoneMatch(entityTag string) *AcceleratorTypesGetCall { +func (c *FirewallsGetCall) IfNoneMatch(entityTag string) *FirewallsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -34282,21 +50830,21 @@ func (c *AcceleratorTypesGetCall) IfNoneMatch(entityTag string) *AcceleratorType // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AcceleratorTypesGetCall) Context(ctx context.Context) *AcceleratorTypesGetCall { +func (c *FirewallsGetCall) Context(ctx context.Context) *FirewallsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AcceleratorTypesGetCall) Header() http.Header { +func (c *FirewallsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AcceleratorTypesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *FirewallsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -34308,7 +50856,7 @@ func (c *AcceleratorTypesGetCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/acceleratorTypes/{acceleratorType}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -34316,21 +50864,20 @@ func (c *AcceleratorTypesGetCall) doRequest(alt string) (*http.Response, error) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "acceleratorType": c.acceleratorType, + "project": c.project, + "firewall": c.firewall, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.acceleratorTypes.get" call. -// Exactly one of *AcceleratorType or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *AcceleratorType.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *AcceleratorTypesGetCall) Do(opts ...googleapi.CallOption) (*AcceleratorType, error) { +// Do executes the "compute.firewalls.get" call. +// Exactly one of *Firewall or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Firewall.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *FirewallsGetCall) Do(opts ...googleapi.CallOption) (*Firewall, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -34349,7 +50896,7 @@ func (c *AcceleratorTypesGetCall) Do(opts ...googleapi.CallOption) (*Accelerator if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &AcceleratorType{ + ret := &Firewall{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -34361,19 +50908,18 @@ func (c *AcceleratorTypesGetCall) Do(opts ...googleapi.CallOption) (*Accelerator } return ret, nil // { - // "description": "Returns the specified accelerator type.", + // "description": "Returns the specified firewall.", // "httpMethod": "GET", - // "id": "compute.acceleratorTypes.get", + // "id": "compute.firewalls.get", // "parameterOrder": [ // "project", - // "zone", - // "acceleratorType" + // "firewall" // ], // "parameters": { - // "acceleratorType": { - // "description": "Name of the accelerator type to return.", + // "firewall": { + // "description": "Name of the firewall rule to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -34383,18 +50929,11 @@ func (c *AcceleratorTypesGetCall) Do(opts ...googleapi.CallOption) (*Accelerator // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/acceleratorTypes/{acceleratorType}", + // "path": "{project}/global/firewalls/{firewall}", // "response": { - // "$ref": "AcceleratorType" + // "$ref": "Firewall" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -34405,159 +50944,106 @@ func (c *AcceleratorTypesGetCall) Do(opts ...googleapi.CallOption) (*Accelerator } -// method id "compute.acceleratorTypes.list": +// method id "compute.firewalls.insert": -type AcceleratorTypesListCall struct { - s *Service - project string - zone string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type FirewallsInsertCall struct { + s *Service + project string + firewall *Firewall + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves a list of accelerator types available to the -// specified project. -func (r *AcceleratorTypesService) List(project string, zone string) *AcceleratorTypesListCall { - c := &AcceleratorTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a firewall rule in the specified project using the +// data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/insert +func (r *FirewallsService) Insert(project string, firewall *Firewall) *FirewallsInsertCall { + c := &FirewallsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *AcceleratorTypesListCall) Filter(filter string) *AcceleratorTypesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *AcceleratorTypesListCall) MaxResults(maxResults int64) *AcceleratorTypesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + c.firewall = firewall return c } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *AcceleratorTypesListCall) OrderBy(orderBy string) *AcceleratorTypesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *AcceleratorTypesListCall) PageToken(pageToken string) *AcceleratorTypesListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *FirewallsInsertCall) RequestId(requestId string) *FirewallsInsertCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AcceleratorTypesListCall) Fields(s ...googleapi.Field) *AcceleratorTypesListCall { +func (c *FirewallsInsertCall) Fields(s ...googleapi.Field) *FirewallsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *AcceleratorTypesListCall) IfNoneMatch(entityTag string) *AcceleratorTypesListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AcceleratorTypesListCall) Context(ctx context.Context) *AcceleratorTypesListCall { +func (c *FirewallsInsertCall) Context(ctx context.Context) *FirewallsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AcceleratorTypesListCall) Header() http.Header { +func (c *FirewallsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AcceleratorTypesListCall) doRequest(alt string) (*http.Response, error) { +func (c *FirewallsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/acceleratorTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.acceleratorTypes.list" call. -// Exactly one of *AcceleratorTypeList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *AcceleratorTypeList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *AcceleratorTypesListCall) Do(opts ...googleapi.CallOption) (*AcceleratorTypeList, error) { +// Do executes the "compute.firewalls.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *FirewallsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -34576,7 +51062,7 @@ func (c *AcceleratorTypesListCall) Do(opts ...googleapi.CallOption) (*Accelerato if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &AcceleratorTypeList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -34588,37 +51074,13 @@ func (c *AcceleratorTypesListCall) Do(opts ...googleapi.CallOption) (*Accelerato } return ret, nil // { - // "description": "Retrieves a list of accelerator types available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.acceleratorTypes.list", + // "description": "Creates a firewall rule in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.firewalls.insert", // "parameterOrder": [ - // "project", - // "zone" + // "project" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -34626,51 +51088,30 @@ func (c *AcceleratorTypesListCall) Do(opts ...googleapi.CallOption) (*Accelerato // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/acceleratorTypes", + // "path": "{project}/global/firewalls", + // "request": { + // "$ref": "Firewall" + // }, // "response": { - // "$ref": "AcceleratorTypeList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *AcceleratorTypesListCall) Pages(ctx context.Context, f func(*AcceleratorTypeList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.addresses.aggregatedList": +// method id "compute.firewalls.list": -type AddressesAggregatedListCall struct { +type FirewallsListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -34679,10 +51120,11 @@ type AddressesAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves an aggregated list of addresses. -// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/aggregatedList -func (r *AddressesService) AggregatedList(project string) *AddressesAggregatedListCall { - c := &AddressesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of firewall rules available to the specified +// project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/list +func (r *FirewallsService) List(project string) *FirewallsListCall { + c := &FirewallsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -34709,7 +51151,7 @@ func (r *AddressesService) AggregatedList(project string) *AddressesAggregatedLi // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *AddressesAggregatedListCall) Filter(filter string) *AddressesAggregatedListCall { +func (c *FirewallsListCall) Filter(filter string) *FirewallsListCall { c.urlParams_.Set("filter", filter) return c } @@ -34720,7 +51162,7 @@ func (c *AddressesAggregatedListCall) Filter(filter string) *AddressesAggregated // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *AddressesAggregatedListCall) MaxResults(maxResults int64) *AddressesAggregatedListCall { +func (c *FirewallsListCall) MaxResults(maxResults int64) *FirewallsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -34737,7 +51179,7 @@ func (c *AddressesAggregatedListCall) MaxResults(maxResults int64) *AddressesAgg // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *AddressesAggregatedListCall) OrderBy(orderBy string) *AddressesAggregatedListCall { +func (c *FirewallsListCall) OrderBy(orderBy string) *FirewallsListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -34745,7 +51187,7 @@ func (c *AddressesAggregatedListCall) OrderBy(orderBy string) *AddressesAggregat // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *AddressesAggregatedListCall) PageToken(pageToken string) *AddressesAggregatedListCall { +func (c *FirewallsListCall) PageToken(pageToken string) *FirewallsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -34753,7 +51195,7 @@ func (c *AddressesAggregatedListCall) PageToken(pageToken string) *AddressesAggr // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AddressesAggregatedListCall) Fields(s ...googleapi.Field) *AddressesAggregatedListCall { +func (c *FirewallsListCall) Fields(s ...googleapi.Field) *FirewallsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -34763,7 +51205,7 @@ func (c *AddressesAggregatedListCall) Fields(s ...googleapi.Field) *AddressesAgg // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *AddressesAggregatedListCall) IfNoneMatch(entityTag string) *AddressesAggregatedListCall { +func (c *FirewallsListCall) IfNoneMatch(entityTag string) *FirewallsListCall { c.ifNoneMatch_ = entityTag return c } @@ -34771,21 +51213,21 @@ func (c *AddressesAggregatedListCall) IfNoneMatch(entityTag string) *AddressesAg // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AddressesAggregatedListCall) Context(ctx context.Context) *AddressesAggregatedListCall { +func (c *FirewallsListCall) Context(ctx context.Context) *FirewallsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AddressesAggregatedListCall) Header() http.Header { +func (c *FirewallsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AddressesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *FirewallsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -34797,7 +51239,7 @@ func (c *AddressesAggregatedListCall) doRequest(alt string) (*http.Response, err var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/addresses") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -34810,14 +51252,14 @@ func (c *AddressesAggregatedListCall) doRequest(alt string) (*http.Response, err return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.addresses.aggregatedList" call. -// Exactly one of *AddressAggregatedList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *AddressAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *AddressesAggregatedListCall) Do(opts ...googleapi.CallOption) (*AddressAggregatedList, error) { +// Do executes the "compute.firewalls.list" call. +// Exactly one of *FirewallList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *FirewallList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *FirewallsListCall) Do(opts ...googleapi.CallOption) (*FirewallList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -34836,7 +51278,7 @@ func (c *AddressesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Address if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &AddressAggregatedList{ + ret := &FirewallList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -34848,9 +51290,9 @@ func (c *AddressesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Address } return ret, nil // { - // "description": "Retrieves an aggregated list of addresses.", + // "description": "Retrieves the list of firewall rules available to the specified project.", // "httpMethod": "GET", - // "id": "compute.addresses.aggregatedList", + // "id": "compute.firewalls.list", // "parameterOrder": [ // "project" // ], @@ -34886,9 +51328,9 @@ func (c *AddressesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Address // "type": "string" // } // }, - // "path": "{project}/aggregated/addresses", + // "path": "{project}/global/firewalls", // "response": { - // "$ref": "AddressAggregatedList" + // "$ref": "FirewallList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -34902,7 +51344,7 @@ func (c *AddressesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Address // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *AddressesAggregatedListCall) Pages(ctx context.Context, f func(*AddressAggregatedList) error) error { +func (c *FirewallsListCall) Pages(ctx context.Context, f func(*FirewallList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -34920,25 +51362,27 @@ func (c *AddressesAggregatedListCall) Pages(ctx context.Context, f func(*Address } } -// method id "compute.addresses.delete": +// method id "compute.firewalls.patch": -type AddressesDeleteCall struct { +type FirewallsPatchCall struct { s *Service project string - region string - address string + firewall string + firewall2 *Firewall urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Delete: Deletes the specified address resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/delete -func (r *AddressesService) Delete(project string, region string, address string) *AddressesDeleteCall { - c := &AddressesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates the specified firewall rule with the data included in +// the request. This method supports PATCH semantics and uses the JSON +// merge patch format and processing rules. +// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/patch +func (r *FirewallsService) Patch(project string, firewall string, firewall2 *Firewall) *FirewallsPatchCall { + c := &FirewallsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.address = address + c.firewall = firewall + c.firewall2 = firewall2 return c } @@ -34956,7 +51400,7 @@ func (r *AddressesService) Delete(project string, region string, address string) // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *AddressesDeleteCall) RequestId(requestId string) *AddressesDeleteCall { +func (c *FirewallsPatchCall) RequestId(requestId string) *FirewallsPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -34964,7 +51408,7 @@ func (c *AddressesDeleteCall) RequestId(requestId string) *AddressesDeleteCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AddressesDeleteCall) Fields(s ...googleapi.Field) *AddressesDeleteCall { +func (c *FirewallsPatchCall) Fields(s ...googleapi.Field) *FirewallsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -34972,52 +51416,56 @@ func (c *AddressesDeleteCall) Fields(s ...googleapi.Field) *AddressesDeleteCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AddressesDeleteCall) Context(ctx context.Context) *AddressesDeleteCall { +func (c *FirewallsPatchCall) Context(ctx context.Context) *FirewallsPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AddressesDeleteCall) Header() http.Header { +func (c *FirewallsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AddressesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *FirewallsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses/{address}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "address": c.address, + "project": c.project, + "firewall": c.firewall, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.addresses.delete" call. +// Do executes the "compute.firewalls.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *AddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *FirewallsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -35048,19 +51496,18 @@ func (c *AddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Deletes the specified address resource.", - // "httpMethod": "DELETE", - // "id": "compute.addresses.delete", + // "description": "Updates the specified firewall rule with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.firewalls.patch", // "parameterOrder": [ // "project", - // "region", - // "address" + // "firewall" // ], // "parameters": { - // "address": { - // "description": "Name of the address resource to delete.", + // "firewall": { + // "description": "Name of the firewall rule to patch.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -35071,20 +51518,16 @@ func (c *AddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "{project}/regions/{region}/addresses/{address}", + // "path": "{project}/global/firewalls/{firewall}", + // "request": { + // "$ref": "Firewall" + // }, // "response": { // "$ref": "Operation" // }, @@ -35096,99 +51539,89 @@ func (c *AddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro } -// method id "compute.addresses.get": +// method id "compute.firewalls.testIamPermissions": -type AddressesGetCall struct { - s *Service - project string - region string - address string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type FirewallsTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified address resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/get -func (r *AddressesService) Get(project string, region string, address string) *AddressesGetCall { - c := &AddressesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *FirewallsService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *FirewallsTestIamPermissionsCall { + c := &FirewallsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.address = address + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AddressesGetCall) Fields(s ...googleapi.Field) *AddressesGetCall { +func (c *FirewallsTestIamPermissionsCall) Fields(s ...googleapi.Field) *FirewallsTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *AddressesGetCall) IfNoneMatch(entityTag string) *AddressesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AddressesGetCall) Context(ctx context.Context) *AddressesGetCall { +func (c *FirewallsTestIamPermissionsCall) Context(ctx context.Context) *FirewallsTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AddressesGetCall) Header() http.Header { +func (c *FirewallsTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AddressesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *FirewallsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses/{address}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "address": c.address, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.addresses.get" call. -// Exactly one of *Address or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Address.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *AddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { +// Do executes the "compute.firewalls.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *FirewallsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -35207,7 +51640,7 @@ func (c *AddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Address{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -35219,22 +51652,14 @@ func (c *AddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { } return ret, nil // { - // "description": "Returns the specified address resource.", - // "httpMethod": "GET", - // "id": "compute.addresses.get", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.firewalls.testIamPermissions", // "parameterOrder": [ // "project", - // "region", - // "address" + // "resource" // ], // "parameters": { - // "address": { - // "description": "Name of the address resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -35242,17 +51667,20 @@ func (c *AddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/addresses/{address}", + // "path": "{project}/global/firewalls/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "Address" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -35263,26 +51691,28 @@ func (c *AddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { } -// method id "compute.addresses.insert": +// method id "compute.firewalls.update": -type AddressesInsertCall struct { +type FirewallsUpdateCall struct { s *Service project string - region string - address *Address + firewall string + firewall2 *Firewall urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Insert: Creates an address resource in the specified project using -// the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/insert -func (r *AddressesService) Insert(project string, region string, address *Address) *AddressesInsertCall { - c := &AddressesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates the specified firewall rule with the data included in +// the request. The PUT method can only update the following fields of +// firewall rule: allowed, description, sourceRanges, sourceTags, +// targetTags. +// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/update +func (r *FirewallsService) Update(project string, firewall string, firewall2 *Firewall) *FirewallsUpdateCall { + c := &FirewallsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.address = address + c.firewall = firewall + c.firewall2 = firewall2 return c } @@ -35300,7 +51730,7 @@ func (r *AddressesService) Insert(project string, region string, address *Addres // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *AddressesInsertCall) RequestId(requestId string) *AddressesInsertCall { +func (c *FirewallsUpdateCall) RequestId(requestId string) *FirewallsUpdateCall { c.urlParams_.Set("requestId", requestId) return c } @@ -35308,7 +51738,7 @@ func (c *AddressesInsertCall) RequestId(requestId string) *AddressesInsertCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AddressesInsertCall) Fields(s ...googleapi.Field) *AddressesInsertCall { +func (c *FirewallsUpdateCall) Fields(s ...googleapi.Field) *FirewallsUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -35316,56 +51746,56 @@ func (c *AddressesInsertCall) Fields(s ...googleapi.Field) *AddressesInsertCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AddressesInsertCall) Context(ctx context.Context) *AddressesInsertCall { +func (c *FirewallsUpdateCall) Context(ctx context.Context) *FirewallsUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AddressesInsertCall) Header() http.Header { +func (c *FirewallsUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AddressesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *FirewallsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.address) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall2) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "firewall": c.firewall, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.addresses.insert" call. +// Do executes the "compute.firewalls.update" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *AddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *FirewallsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -35396,25 +51826,25 @@ func (c *AddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Creates an address resource in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.addresses.insert", + // "description": "Updates the specified firewall rule with the data included in the request. The PUT method can only update the following fields of firewall rule: allowed, description, sourceRanges, sourceTags, targetTags.", + // "httpMethod": "PUT", + // "id": "compute.firewalls.update", // "parameterOrder": [ // "project", - // "region" + // "firewall" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "firewall": { + // "description": "Name of the firewall rule to update.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, @@ -35424,9 +51854,9 @@ func (c *AddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "type": "string" // } // }, - // "path": "{project}/regions/{region}/addresses", + // "path": "{project}/global/firewalls/{firewall}", // "request": { - // "$ref": "Address" + // "$ref": "Firewall" // }, // "response": { // "$ref": "Operation" @@ -35439,25 +51869,22 @@ func (c *AddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro } -// method id "compute.addresses.list": +// method id "compute.forwardingRules.aggregatedList": -type AddressesListCall struct { +type ForwardingRulesAggregatedListCall struct { s *Service project string - region string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves a list of addresses contained within the specified -// region. -// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/list -func (r *AddressesService) List(project string, region string) *AddressesListCall { - c := &AddressesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of forwarding rules. +// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/aggregatedList +func (r *ForwardingRulesService) AggregatedList(project string) *ForwardingRulesAggregatedListCall { + c := &ForwardingRulesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region return c } @@ -35483,7 +51910,7 @@ func (r *AddressesService) List(project string, region string) *AddressesListCal // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *AddressesListCall) Filter(filter string) *AddressesListCall { +func (c *ForwardingRulesAggregatedListCall) Filter(filter string) *ForwardingRulesAggregatedListCall { c.urlParams_.Set("filter", filter) return c } @@ -35494,7 +51921,7 @@ func (c *AddressesListCall) Filter(filter string) *AddressesListCall { // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *AddressesListCall) MaxResults(maxResults int64) *AddressesListCall { +func (c *ForwardingRulesAggregatedListCall) MaxResults(maxResults int64) *ForwardingRulesAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -35511,7 +51938,7 @@ func (c *AddressesListCall) MaxResults(maxResults int64) *AddressesListCall { // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *AddressesListCall) OrderBy(orderBy string) *AddressesListCall { +func (c *ForwardingRulesAggregatedListCall) OrderBy(orderBy string) *ForwardingRulesAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -35519,7 +51946,7 @@ func (c *AddressesListCall) OrderBy(orderBy string) *AddressesListCall { // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *AddressesListCall) PageToken(pageToken string) *AddressesListCall { +func (c *ForwardingRulesAggregatedListCall) PageToken(pageToken string) *ForwardingRulesAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -35527,7 +51954,7 @@ func (c *AddressesListCall) PageToken(pageToken string) *AddressesListCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AddressesListCall) Fields(s ...googleapi.Field) *AddressesListCall { +func (c *ForwardingRulesAggregatedListCall) Fields(s ...googleapi.Field) *ForwardingRulesAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -35537,7 +51964,7 @@ func (c *AddressesListCall) Fields(s ...googleapi.Field) *AddressesListCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *AddressesListCall) IfNoneMatch(entityTag string) *AddressesListCall { +func (c *ForwardingRulesAggregatedListCall) IfNoneMatch(entityTag string) *ForwardingRulesAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -35545,21 +51972,21 @@ func (c *AddressesListCall) IfNoneMatch(entityTag string) *AddressesListCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AddressesListCall) Context(ctx context.Context) *AddressesListCall { +func (c *ForwardingRulesAggregatedListCall) Context(ctx context.Context) *ForwardingRulesAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AddressesListCall) Header() http.Header { +func (c *ForwardingRulesAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AddressesListCall) doRequest(alt string) (*http.Response, error) { +func (c *ForwardingRulesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -35571,7 +51998,7 @@ func (c *AddressesListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/forwardingRules") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -35580,19 +52007,18 @@ func (c *AddressesListCall) doRequest(alt string) (*http.Response, error) { req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.addresses.list" call. -// Exactly one of *AddressList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *AddressList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *AddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, error) { +// Do executes the "compute.forwardingRules.aggregatedList" call. +// Exactly one of *ForwardingRuleAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *ForwardingRuleAggregatedList.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ForwardingRulesAggregatedListCall) Do(opts ...googleapi.CallOption) (*ForwardingRuleAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -35611,7 +52037,7 @@ func (c *AddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, erro if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &AddressList{ + ret := &ForwardingRuleAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -35623,12 +52049,11 @@ func (c *AddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, erro } return ret, nil // { - // "description": "Retrieves a list of addresses contained within the specified region.", + // "description": "Retrieves an aggregated list of forwarding rules.", // "httpMethod": "GET", - // "id": "compute.addresses.list", + // "id": "compute.forwardingRules.aggregatedList", // "parameterOrder": [ - // "project", - // "region" + // "project" // ], // "parameters": { // "filter": { @@ -35660,18 +52085,11 @@ func (c *AddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, erro // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/addresses", + // "path": "{project}/aggregated/forwardingRules", // "response": { - // "$ref": "AddressList" + // "$ref": "ForwardingRuleAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -35685,7 +52103,7 @@ func (c *AddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, erro // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *AddressesListCall) Pages(ctx context.Context, f func(*AddressList) error) error { +func (c *ForwardingRulesAggregatedListCall) Pages(ctx context.Context, f func(*ForwardingRuleAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -35703,27 +52121,25 @@ func (c *AddressesListCall) Pages(ctx context.Context, f func(*AddressList) erro } } -// method id "compute.addresses.setLabels": +// method id "compute.forwardingRules.delete": -type AddressesSetLabelsCall struct { - s *Service - project string - region string - resource string - regionsetlabelsrequest *RegionSetLabelsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ForwardingRulesDeleteCall struct { + s *Service + project string + region string + forwardingRule string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetLabels: Sets the labels on an Address. To learn more about labels, -// read the Labeling Resources documentation. -func (r *AddressesService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *AddressesSetLabelsCall { - c := &AddressesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified ForwardingRule resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/delete +func (r *ForwardingRulesService) Delete(project string, region string, forwardingRule string) *ForwardingRulesDeleteCall { + c := &ForwardingRulesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.resource = resource - c.regionsetlabelsrequest = regionsetlabelsrequest + c.forwardingRule = forwardingRule return c } @@ -35741,7 +52157,7 @@ func (r *AddressesService) SetLabels(project string, region string, resource str // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *AddressesSetLabelsCall) RequestId(requestId string) *AddressesSetLabelsCall { +func (c *ForwardingRulesDeleteCall) RequestId(requestId string) *ForwardingRulesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -35749,7 +52165,7 @@ func (c *AddressesSetLabelsCall) RequestId(requestId string) *AddressesSetLabels // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AddressesSetLabelsCall) Fields(s ...googleapi.Field) *AddressesSetLabelsCall { +func (c *ForwardingRulesDeleteCall) Fields(s ...googleapi.Field) *ForwardingRulesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -35757,57 +52173,52 @@ func (c *AddressesSetLabelsCall) Fields(s ...googleapi.Field) *AddressesSetLabel // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AddressesSetLabelsCall) Context(ctx context.Context) *AddressesSetLabelsCall { +func (c *ForwardingRulesDeleteCall) Context(ctx context.Context) *ForwardingRulesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AddressesSetLabelsCall) Header() http.Header { +func (c *ForwardingRulesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AddressesSetLabelsCall) doRequest(alt string) (*http.Response, error) { +func (c *ForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses/{resource}/setLabels") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, + "region": c.region, + "forwardingRule": c.forwardingRule, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.addresses.setLabels" call. +// Do executes the "compute.forwardingRules.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *AddressesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -35838,182 +52249,22 @@ func (c *AddressesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Sets the labels on an Address. To learn more about labels, read the Labeling Resources documentation.", - // "httpMethod": "POST", - // "id": "compute.addresses.setLabels", + // "description": "Deletes the specified ForwardingRule resource.", + // "httpMethod": "DELETE", + // "id": "compute.forwardingRules.delete", // "parameterOrder": [ // "project", // "region", - // "resource" + // "forwardingRule" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "The region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/addresses/{resource}/setLabels", - // "request": { - // "$ref": "RegionSetLabelsRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.addresses.testIamPermissions": - -type AddressesTestIamPermissionsCall struct { - s *Service - project string - region string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *AddressesService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *AddressesTestIamPermissionsCall { - c := &AddressesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *AddressesTestIamPermissionsCall) Fields(s ...googleapi.Field) *AddressesTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *AddressesTestIamPermissionsCall) Context(ctx context.Context) *AddressesTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *AddressesTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *AddressesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses/{resource}/testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.addresses.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *AddressesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.addresses.testIamPermissions", - // "parameterOrder": [ - // "project", - // "region", - // "resource" - // ], - // "parameters": { + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -36022,121 +52273,57 @@ func (c *AddressesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes // "type": "string" // }, // "region": { - // "description": "The name of the region for this request.", + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/regions/{region}/addresses/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}", // "response": { - // "$ref": "TestPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.allocations.aggregatedList": - -type AllocationsAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// AggregatedList: Retrieves an aggregated list of allocations. -func (r *AllocationsService) AggregatedList(project string) *AllocationsAggregatedListCall { - c := &AllocationsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *AllocationsAggregatedListCall) Filter(filter string) *AllocationsAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c } -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *AllocationsAggregatedListCall) MaxResults(maxResults int64) *AllocationsAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} +// method id "compute.forwardingRules.get": -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *AllocationsAggregatedListCall) OrderBy(orderBy string) *AllocationsAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c +type ForwardingRulesGetCall struct { + s *Service + project string + region string + forwardingRule string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *AllocationsAggregatedListCall) PageToken(pageToken string) *AllocationsAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) +// Get: Returns the specified ForwardingRule resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/get +func (r *ForwardingRulesService) Get(project string, region string, forwardingRule string) *ForwardingRulesGetCall { + c := &ForwardingRulesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.forwardingRule = forwardingRule return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AllocationsAggregatedListCall) Fields(s ...googleapi.Field) *AllocationsAggregatedListCall { +func (c *ForwardingRulesGetCall) Fields(s ...googleapi.Field) *ForwardingRulesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -36146,7 +52333,7 @@ func (c *AllocationsAggregatedListCall) Fields(s ...googleapi.Field) *Allocation // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *AllocationsAggregatedListCall) IfNoneMatch(entityTag string) *AllocationsAggregatedListCall { +func (c *ForwardingRulesGetCall) IfNoneMatch(entityTag string) *ForwardingRulesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -36154,21 +52341,21 @@ func (c *AllocationsAggregatedListCall) IfNoneMatch(entityTag string) *Allocatio // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AllocationsAggregatedListCall) Context(ctx context.Context) *AllocationsAggregatedListCall { +func (c *ForwardingRulesGetCall) Context(ctx context.Context) *ForwardingRulesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AllocationsAggregatedListCall) Header() http.Header { +func (c *ForwardingRulesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AllocationsAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *ForwardingRulesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -36180,7 +52367,7 @@ func (c *AllocationsAggregatedListCall) doRequest(alt string) (*http.Response, e var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/allocations") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -36188,19 +52375,21 @@ func (c *AllocationsAggregatedListCall) doRequest(alt string) (*http.Response, e } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "region": c.region, + "forwardingRule": c.forwardingRule, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.allocations.aggregatedList" call. -// Exactly one of *AllocationAggregatedList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *AllocationAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.forwardingRules.get" call. +// Exactly one of *ForwardingRule or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ForwardingRule.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *AllocationsAggregatedListCall) Do(opts ...googleapi.CallOption) (*AllocationAggregatedList, error) { +func (c *ForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRule, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -36219,7 +52408,7 @@ func (c *AllocationsAggregatedListCall) Do(opts ...googleapi.CallOption) (*Alloc if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &AllocationAggregatedList{ + ret := &ForwardingRule{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -36231,34 +52420,20 @@ func (c *AllocationsAggregatedListCall) Do(opts ...googleapi.CallOption) (*Alloc } return ret, nil // { - // "description": "Retrieves an aggregated list of allocations.", + // "description": "Returns the specified ForwardingRule resource.", // "httpMethod": "GET", - // "id": "compute.allocations.aggregatedList", + // "id": "compute.forwardingRules.get", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "forwardingRule" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "project": { @@ -36267,11 +52442,18 @@ func (c *AllocationsAggregatedListCall) Do(opts ...googleapi.CallOption) (*Alloc // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/aggregated/allocations", + // "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}", // "response": { - // "$ref": "AllocationAggregatedList" + // "$ref": "ForwardingRule" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -36282,45 +52464,26 @@ func (c *AllocationsAggregatedListCall) Do(opts ...googleapi.CallOption) (*Alloc } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *AllocationsAggregatedListCall) Pages(ctx context.Context, f func(*AllocationAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.allocations.delete": +// method id "compute.forwardingRules.insert": -type AllocationsDeleteCall struct { - s *Service - project string - zone string - allocation string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ForwardingRulesInsertCall struct { + s *Service + project string + region string + forwardingrule *ForwardingRule + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified allocation. -func (r *AllocationsService) Delete(project string, zone string, allocation string) *AllocationsDeleteCall { - c := &AllocationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a ForwardingRule resource in the specified project +// and region using the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/insert +func (r *ForwardingRulesService) Insert(project string, region string, forwardingrule *ForwardingRule) *ForwardingRulesInsertCall { + c := &ForwardingRulesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.allocation = allocation + c.region = region + c.forwardingrule = forwardingrule return c } @@ -36338,7 +52501,7 @@ func (r *AllocationsService) Delete(project string, zone string, allocation stri // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *AllocationsDeleteCall) RequestId(requestId string) *AllocationsDeleteCall { +func (c *ForwardingRulesInsertCall) RequestId(requestId string) *ForwardingRulesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -36346,7 +52509,7 @@ func (c *AllocationsDeleteCall) RequestId(requestId string) *AllocationsDeleteCa // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AllocationsDeleteCall) Fields(s ...googleapi.Field) *AllocationsDeleteCall { +func (c *ForwardingRulesInsertCall) Fields(s ...googleapi.Field) *ForwardingRulesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -36354,52 +52517,56 @@ func (c *AllocationsDeleteCall) Fields(s ...googleapi.Field) *AllocationsDeleteC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AllocationsDeleteCall) Context(ctx context.Context) *AllocationsDeleteCall { +func (c *ForwardingRulesInsertCall) Context(ctx context.Context) *ForwardingRulesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AllocationsDeleteCall) Header() http.Header { +func (c *ForwardingRulesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AllocationsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *ForwardingRulesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.forwardingrule) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/allocations/{allocation}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "allocation": c.allocation, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.allocations.delete" call. +// Do executes the "compute.forwardingRules.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *AllocationsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -36430,26 +52597,25 @@ func (c *AllocationsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Deletes the specified allocation.", - // "httpMethod": "DELETE", - // "id": "compute.allocations.delete", + // "description": "Creates a ForwardingRule resource in the specified project and region using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.forwardingRules.insert", // "parameterOrder": [ // "project", - // "zone", - // "allocation" + // "region" // ], // "parameters": { - // "allocation": { - // "description": "Name of the allocation to delete.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -36457,16 +52623,12 @@ func (c *AllocationsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "Name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/allocations/{allocation}", + // "path": "{project}/regions/{region}/forwardingRules", + // "request": { + // "$ref": "ForwardingRule" + // }, // "response": { // "$ref": "Operation" // }, @@ -36478,32 +52640,95 @@ func (c *AllocationsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er } -// method id "compute.allocations.get": +// method id "compute.forwardingRules.list": -type AllocationsGetCall struct { +type ForwardingRulesListCall struct { s *Service project string - zone string - allocation string + region string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Retrieves all information of the specified allocation. -func (r *AllocationsService) Get(project string, zone string, allocation string) *AllocationsGetCall { - c := &AllocationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of ForwardingRule resources available to the +// specified project and region. +// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/list +func (r *ForwardingRulesService) List(project string, region string) *ForwardingRulesListCall { + c := &ForwardingRulesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.allocation = allocation + c.region = region + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *ForwardingRulesListCall) Filter(filter string) *ForwardingRulesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *ForwardingRulesListCall) MaxResults(maxResults int64) *ForwardingRulesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *ForwardingRulesListCall) OrderBy(orderBy string) *ForwardingRulesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *ForwardingRulesListCall) PageToken(pageToken string) *ForwardingRulesListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AllocationsGetCall) Fields(s ...googleapi.Field) *AllocationsGetCall { +func (c *ForwardingRulesListCall) Fields(s ...googleapi.Field) *ForwardingRulesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -36513,7 +52738,7 @@ func (c *AllocationsGetCall) Fields(s ...googleapi.Field) *AllocationsGetCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *AllocationsGetCall) IfNoneMatch(entityTag string) *AllocationsGetCall { +func (c *ForwardingRulesListCall) IfNoneMatch(entityTag string) *ForwardingRulesListCall { c.ifNoneMatch_ = entityTag return c } @@ -36521,21 +52746,21 @@ func (c *AllocationsGetCall) IfNoneMatch(entityTag string) *AllocationsGetCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AllocationsGetCall) Context(ctx context.Context) *AllocationsGetCall { +func (c *ForwardingRulesListCall) Context(ctx context.Context) *ForwardingRulesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AllocationsGetCall) Header() http.Header { +func (c *ForwardingRulesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AllocationsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *ForwardingRulesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -36547,7 +52772,7 @@ func (c *AllocationsGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/allocations/{allocation}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -36555,21 +52780,20 @@ func (c *AllocationsGetCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "allocation": c.allocation, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.allocations.get" call. -// Exactly one of *Allocation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Allocation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *AllocationsGetCall) Do(opts ...googleapi.CallOption) (*Allocation, error) { +// Do executes the "compute.forwardingRules.list" call. +// Exactly one of *ForwardingRuleList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ForwardingRuleList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingRuleList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -36588,7 +52812,7 @@ func (c *AllocationsGetCall) Do(opts ...googleapi.CallOption) (*Allocation, erro if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Allocation{ + ret := &ForwardingRuleList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -36600,20 +52824,35 @@ func (c *AllocationsGetCall) Do(opts ...googleapi.CallOption) (*Allocation, erro } return ret, nil // { - // "description": "Retrieves all information of the specified allocation.", + // "description": "Retrieves a list of ForwardingRule resources available to the specified project and region.", // "httpMethod": "GET", - // "id": "compute.allocations.get", + // "id": "compute.forwardingRules.list", // "parameterOrder": [ // "project", - // "zone", - // "allocation" + // "region" // ], // "parameters": { - // "allocation": { - // "description": "Name of the allocation to retrieve.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -36623,17 +52862,17 @@ func (c *AllocationsGetCall) Do(opts ...googleapi.CallOption) (*Allocation, erro // "required": true, // "type": "string" // }, - // "zone": { - // "description": "Name of the zone for this request.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/allocations/{allocation}", + // "path": "{project}/regions/{region}/forwardingRules", // "response": { - // "$ref": "Allocation" + // "$ref": "ForwardingRuleList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -36644,99 +52883,134 @@ func (c *AllocationsGetCall) Do(opts ...googleapi.CallOption) (*Allocation, erro } -// method id "compute.allocations.getIamPolicy": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ForwardingRulesListCall) Pages(ctx context.Context, f func(*ForwardingRuleList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.forwardingRules.patch": -type AllocationsGetIamPolicyCall struct { - s *Service - project string - zone string - resource string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type ForwardingRulesPatchCall struct { + s *Service + project string + region string + forwardingRule string + forwardingrule *ForwardingRule + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. -func (r *AllocationsService) GetIamPolicy(project string, zone string, resource string) *AllocationsGetIamPolicyCall { - c := &AllocationsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates the specified forwarding rule with the data included +// in the request. This method supports PATCH semantics and uses the +// JSON merge patch format and processing rules. Currently, you can only +// patch the network_tier field. +func (r *ForwardingRulesService) Patch(project string, region string, forwardingRule string, forwardingrule *ForwardingRule) *ForwardingRulesPatchCall { + c := &ForwardingRulesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.resource = resource + c.region = region + c.forwardingRule = forwardingRule + c.forwardingrule = forwardingrule + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ForwardingRulesPatchCall) RequestId(requestId string) *ForwardingRulesPatchCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AllocationsGetIamPolicyCall) Fields(s ...googleapi.Field) *AllocationsGetIamPolicyCall { +func (c *ForwardingRulesPatchCall) Fields(s ...googleapi.Field) *ForwardingRulesPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *AllocationsGetIamPolicyCall) IfNoneMatch(entityTag string) *AllocationsGetIamPolicyCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AllocationsGetIamPolicyCall) Context(ctx context.Context) *AllocationsGetIamPolicyCall { +func (c *ForwardingRulesPatchCall) Context(ctx context.Context) *ForwardingRulesPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AllocationsGetIamPolicyCall) Header() http.Header { +func (c *ForwardingRulesPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AllocationsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *ForwardingRulesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.forwardingrule) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/allocations/{resource}/getIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, + "project": c.project, + "region": c.region, + "forwardingRule": c.forwardingRule, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.allocations.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *AllocationsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.forwardingRules.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ForwardingRulesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -36755,7 +53029,7 @@ func (c *AllocationsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -36767,68 +53041,78 @@ func (c *AllocationsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", - // "httpMethod": "GET", - // "id": "compute.allocations.getIamPolicy", + // "description": "Updates the specified forwarding rule with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. Currently, you can only patch the network_tier field.", + // "httpMethod": "PATCH", + // "id": "compute.forwardingRules.patch", // "parameterOrder": [ // "project", - // "zone", - // "resource" + // "region", + // "forwardingRule" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource to patch.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/allocations/{resource}/getIamPolicy", + // "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}", + // "request": { + // "$ref": "ForwardingRule" + // }, // "response": { - // "$ref": "Policy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.allocations.insert": +// method id "compute.forwardingRules.setLabels": -type AllocationsInsertCall struct { - s *Service - project string - zone string - allocation *Allocation - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ForwardingRulesSetLabelsCall struct { + s *Service + project string + region string + resource string + regionsetlabelsrequest *RegionSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a new allocation. -func (r *AllocationsService) Insert(project string, zone string, allocation *Allocation) *AllocationsInsertCall { - c := &AllocationsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetLabels: Sets the labels on the specified resource. To learn more +// about labels, read the Labeling Resources documentation. +func (r *ForwardingRulesService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *ForwardingRulesSetLabelsCall { + c := &ForwardingRulesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.allocation = allocation + c.region = region + c.resource = resource + c.regionsetlabelsrequest = regionsetlabelsrequest return c } @@ -36846,7 +53130,7 @@ func (r *AllocationsService) Insert(project string, zone string, allocation *All // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *AllocationsInsertCall) RequestId(requestId string) *AllocationsInsertCall { +func (c *ForwardingRulesSetLabelsCall) RequestId(requestId string) *ForwardingRulesSetLabelsCall { c.urlParams_.Set("requestId", requestId) return c } @@ -36854,7 +53138,7 @@ func (c *AllocationsInsertCall) RequestId(requestId string) *AllocationsInsertCa // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AllocationsInsertCall) Fields(s ...googleapi.Field) *AllocationsInsertCall { +func (c *ForwardingRulesSetLabelsCall) Fields(s ...googleapi.Field) *ForwardingRulesSetLabelsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -36862,35 +53146,35 @@ func (c *AllocationsInsertCall) Fields(s ...googleapi.Field) *AllocationsInsertC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AllocationsInsertCall) Context(ctx context.Context) *AllocationsInsertCall { +func (c *ForwardingRulesSetLabelsCall) Context(ctx context.Context) *ForwardingRulesSetLabelsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AllocationsInsertCall) Header() http.Header { +func (c *ForwardingRulesSetLabelsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AllocationsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *ForwardingRulesSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.allocation) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/allocations") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -36898,20 +53182,21 @@ func (c *AllocationsInsertCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.allocations.insert" call. +// Do executes the "compute.forwardingRules.setLabels" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *AllocationsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ForwardingRulesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -36942,12 +53227,13 @@ func (c *AllocationsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Creates a new allocation.", + // "description": "Sets the labels on the specified resource. To learn more about labels, read the Labeling Resources documentation.", // "httpMethod": "POST", - // "id": "compute.allocations.insert", + // "id": "compute.forwardingRules.setLabels", // "parameterOrder": [ // "project", - // "zone" + // "region", + // "resource" // ], // "parameters": { // "project": { @@ -36957,22 +53243,29 @@ func (c *AllocationsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er // "required": true, // "type": "string" // }, + // "region": { + // "description": "The region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // }, - // "zone": { - // "description": "Name of the zone for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/allocations", + // "path": "{project}/regions/{region}/forwardingRules/{resource}/setLabels", // "request": { - // "$ref": "Allocation" + // "$ref": "RegionSetLabelsRequest" // }, // "response": { // "$ref": "Operation" @@ -36985,159 +53278,112 @@ func (c *AllocationsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er } -// method id "compute.allocations.list": +// method id "compute.forwardingRules.setTarget": -type AllocationsListCall struct { - s *Service - project string - zone string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type ForwardingRulesSetTargetCall struct { + s *Service + project string + region string + forwardingRule string + targetreference *TargetReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: A list all the allocations that have been configured for the -// specified project in specified zone. -func (r *AllocationsService) List(project string, zone string) *AllocationsListCall { - c := &AllocationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetTarget: Changes target URL for forwarding rule. The new target +// should be of the same type as the old target. +// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/setTarget +func (r *ForwardingRulesService) SetTarget(project string, region string, forwardingRule string, targetreference *TargetReference) *ForwardingRulesSetTargetCall { + c := &ForwardingRulesSetTargetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *AllocationsListCall) Filter(filter string) *AllocationsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *AllocationsListCall) MaxResults(maxResults int64) *AllocationsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + c.region = region + c.forwardingRule = forwardingRule + c.targetreference = targetreference return c } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *AllocationsListCall) OrderBy(orderBy string) *AllocationsListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *AllocationsListCall) PageToken(pageToken string) *AllocationsListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ForwardingRulesSetTargetCall) RequestId(requestId string) *ForwardingRulesSetTargetCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AllocationsListCall) Fields(s ...googleapi.Field) *AllocationsListCall { +func (c *ForwardingRulesSetTargetCall) Fields(s ...googleapi.Field) *ForwardingRulesSetTargetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *AllocationsListCall) IfNoneMatch(entityTag string) *AllocationsListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AllocationsListCall) Context(ctx context.Context) *AllocationsListCall { +func (c *ForwardingRulesSetTargetCall) Context(ctx context.Context) *ForwardingRulesSetTargetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AllocationsListCall) Header() http.Header { +func (c *ForwardingRulesSetTargetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AllocationsListCall) doRequest(alt string) (*http.Response, error) { +func (c *ForwardingRulesSetTargetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetreference) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/allocations") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}/setTarget") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "region": c.region, + "forwardingRule": c.forwardingRule, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.allocations.list" call. -// Exactly one of *AllocationList or error will be non-nil. Any non-2xx +// Do executes the "compute.forwardingRules.setTarget" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *AllocationList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *AllocationsListCall) Do(opts ...googleapi.CallOption) (*AllocationList, error) { +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -37156,7 +53402,7 @@ func (c *AllocationsListCall) Do(opts ...googleapi.CallOption) (*AllocationList, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &AllocationList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -37168,35 +53414,20 @@ func (c *AllocationsListCall) Do(opts ...googleapi.CallOption) (*AllocationList, } return ret, nil // { - // "description": "A list all the allocations that have been configured for the specified project in specified zone.", - // "httpMethod": "GET", - // "id": "compute.allocations.list", + // "description": "Changes target URL for forwarding rule. The new target should be of the same type as the old target.", + // "httpMethod": "POST", + // "id": "compute.forwardingRules.setTarget", // "parameterOrder": [ // "project", - // "zone" + // "region", + // "forwardingRule" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource in which target is to be set.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "project": { @@ -37206,76 +53437,62 @@ func (c *AllocationsListCall) Do(opts ...googleapi.CallOption) (*AllocationList, // "required": true, // "type": "string" // }, - // "zone": { - // "description": "Name of the zone for this request.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/allocations", + // "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}/setTarget", + // "request": { + // "$ref": "TargetReference" + // }, // "response": { - // "$ref": "AllocationList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *AllocationsListCall) Pages(ctx context.Context, f func(*AllocationList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.allocations.setIamPolicy": +// method id "compute.forwardingRules.testIamPermissions": -type AllocationsSetIamPolicyCall struct { - s *Service - project string - zone string - resource string - zonesetpolicyrequest *ZoneSetPolicyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ForwardingRulesTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. -func (r *AllocationsService) SetIamPolicy(project string, zone string, resource string, zonesetpolicyrequest *ZoneSetPolicyRequest) *AllocationsSetIamPolicyCall { - c := &AllocationsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *ForwardingRulesService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *ForwardingRulesTestIamPermissionsCall { + c := &ForwardingRulesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone + c.region = region c.resource = resource - c.zonesetpolicyrequest = zonesetpolicyrequest + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AllocationsSetIamPolicyCall) Fields(s ...googleapi.Field) *AllocationsSetIamPolicyCall { +func (c *ForwardingRulesTestIamPermissionsCall) Fields(s ...googleapi.Field) *ForwardingRulesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -37283,35 +53500,35 @@ func (c *AllocationsSetIamPolicyCall) Fields(s ...googleapi.Field) *AllocationsS // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AllocationsSetIamPolicyCall) Context(ctx context.Context) *AllocationsSetIamPolicyCall { +func (c *ForwardingRulesTestIamPermissionsCall) Context(ctx context.Context) *ForwardingRulesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AllocationsSetIamPolicyCall) Header() http.Header { +func (c *ForwardingRulesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AllocationsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *ForwardingRulesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.zonesetpolicyrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/allocations/{resource}/setIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -37320,20 +53537,20 @@ func (c *AllocationsSetIamPolicyCall) doRequest(alt string) (*http.Response, err req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, + "region": c.region, "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.allocations.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *AllocationsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.forwardingRules.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ForwardingRulesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -37352,7 +53569,7 @@ func (c *AllocationsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -37364,12 +53581,12 @@ func (c *AllocationsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.allocations.setIamPolicy", + // "id": "compute.forwardingRules.testIamPermissions", // "parameterOrder": [ // "project", - // "zone", + // "region", // "resource" // ], // "parameters": { @@ -37380,64 +53597,80 @@ func (c *AllocationsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "region": { + // "description": "The name of the region for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/allocations/{resource}/setIamPolicy", + // "path": "{project}/regions/{region}/forwardingRules/{resource}/testIamPermissions", // "request": { - // "$ref": "ZoneSetPolicyRequest" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Policy" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.allocations.testIamPermissions": +// method id "compute.globalAddresses.delete": -type AllocationsTestIamPermissionsCall struct { - s *Service - project string - zone string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalAddressesDeleteCall struct { + s *Service + project string + address string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *AllocationsService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *AllocationsTestIamPermissionsCall { - c := &AllocationsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified address resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/delete +func (r *GlobalAddressesService) Delete(project string, address string) *GlobalAddressesDeleteCall { + c := &GlobalAddressesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.address = address + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *GlobalAddressesDeleteCall) RequestId(requestId string) *GlobalAddressesDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AllocationsTestIamPermissionsCall) Fields(s ...googleapi.Field) *AllocationsTestIamPermissionsCall { +func (c *GlobalAddressesDeleteCall) Fields(s ...googleapi.Field) *GlobalAddressesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -37445,57 +53678,51 @@ func (c *AllocationsTestIamPermissionsCall) Fields(s ...googleapi.Field) *Alloca // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AllocationsTestIamPermissionsCall) Context(ctx context.Context) *AllocationsTestIamPermissionsCall { +func (c *GlobalAddressesDeleteCall) Context(ctx context.Context) *GlobalAddressesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AllocationsTestIamPermissionsCall) Header() http.Header { +func (c *GlobalAddressesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AllocationsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalAddressesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/allocations/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses/{address}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, + "project": c.project, + "address": c.address, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.allocations.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *AllocationsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.globalAddresses.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *GlobalAddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -37514,7 +53741,7 @@ func (c *AllocationsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -37526,138 +53753,72 @@ func (c *AllocationsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.allocations.testIamPermissions", + // "description": "Deletes the specified address resource.", + // "httpMethod": "DELETE", + // "id": "compute.globalAddresses.delete", // "parameterOrder": [ // "project", - // "zone", - // "resource" + // "address" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "address": { + // "description": "Name of the address resource to delete.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/allocations/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/global/addresses/{address}", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.autoscalers.aggregatedList": +// method id "compute.globalAddresses.get": -type AutoscalersAggregatedListCall struct { +type GlobalAddressesGetCall struct { s *Service project string + address string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// AggregatedList: Retrieves an aggregated list of autoscalers. -func (r *AutoscalersService) AggregatedList(project string) *AutoscalersAggregatedListCall { - c := &AutoscalersAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *AutoscalersAggregatedListCall) Filter(filter string) *AutoscalersAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *AutoscalersAggregatedListCall) MaxResults(maxResults int64) *AutoscalersAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *AutoscalersAggregatedListCall) OrderBy(orderBy string) *AutoscalersAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *AutoscalersAggregatedListCall) PageToken(pageToken string) *AutoscalersAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) +// Get: Returns the specified address resource. Gets a list of available +// addresses by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/get +func (r *GlobalAddressesService) Get(project string, address string) *GlobalAddressesGetCall { + c := &GlobalAddressesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.address = address return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AutoscalersAggregatedListCall) Fields(s ...googleapi.Field) *AutoscalersAggregatedListCall { +func (c *GlobalAddressesGetCall) Fields(s ...googleapi.Field) *GlobalAddressesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -37667,7 +53828,7 @@ func (c *AutoscalersAggregatedListCall) Fields(s ...googleapi.Field) *Autoscaler // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *AutoscalersAggregatedListCall) IfNoneMatch(entityTag string) *AutoscalersAggregatedListCall { +func (c *GlobalAddressesGetCall) IfNoneMatch(entityTag string) *GlobalAddressesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -37675,21 +53836,21 @@ func (c *AutoscalersAggregatedListCall) IfNoneMatch(entityTag string) *Autoscale // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AutoscalersAggregatedListCall) Context(ctx context.Context) *AutoscalersAggregatedListCall { +func (c *GlobalAddressesGetCall) Context(ctx context.Context) *GlobalAddressesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AutoscalersAggregatedListCall) Header() http.Header { +func (c *GlobalAddressesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AutoscalersAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalAddressesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -37701,7 +53862,7 @@ func (c *AutoscalersAggregatedListCall) doRequest(alt string) (*http.Response, e var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/autoscalers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses/{address}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -37710,18 +53871,19 @@ func (c *AutoscalersAggregatedListCall) doRequest(alt string) (*http.Response, e req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "address": c.address, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.autoscalers.aggregatedList" call. -// Exactly one of *AutoscalerAggregatedList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *AutoscalerAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*AutoscalerAggregatedList, error) { +// Do executes the "compute.globalAddresses.get" call. +// Exactly one of *Address or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Address.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *GlobalAddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -37740,7 +53902,7 @@ func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*Autos if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &AutoscalerAggregatedList{ + ret := &Address{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -37752,34 +53914,19 @@ func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*Autos } return ret, nil // { - // "description": "Retrieves an aggregated list of autoscalers.", + // "description": "Returns the specified address resource. Gets a list of available addresses by making a list() request.", // "httpMethod": "GET", - // "id": "compute.autoscalers.aggregatedList", + // "id": "compute.globalAddresses.get", // "parameterOrder": [ - // "project" + // "project", + // "address" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "address": { + // "description": "Name of the address resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "project": { @@ -37790,9 +53937,9 @@ func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*Autos // "type": "string" // } // }, - // "path": "{project}/aggregated/autoscalers", + // "path": "{project}/global/addresses/{address}", // "response": { - // "$ref": "AutoscalerAggregatedList" + // "$ref": "Address" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -37803,45 +53950,24 @@ func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*Autos } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *AutoscalersAggregatedListCall) Pages(ctx context.Context, f func(*AutoscalerAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.autoscalers.delete": +// method id "compute.globalAddresses.insert": -type AutoscalersDeleteCall struct { +type GlobalAddressesInsertCall struct { s *Service project string - zone string - autoscaler string + address *Address urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Delete: Deletes the specified autoscaler. -func (r *AutoscalersService) Delete(project string, zone string, autoscaler string) *AutoscalersDeleteCall { - c := &AutoscalersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates an address resource in the specified project using +// the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/insert +func (r *GlobalAddressesService) Insert(project string, address *Address) *GlobalAddressesInsertCall { + c := &GlobalAddressesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.autoscaler = autoscaler + c.address = address return c } @@ -37859,7 +53985,7 @@ func (r *AutoscalersService) Delete(project string, zone string, autoscaler stri // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *AutoscalersDeleteCall) RequestId(requestId string) *AutoscalersDeleteCall { +func (c *GlobalAddressesInsertCall) RequestId(requestId string) *GlobalAddressesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -37867,7 +53993,7 @@ func (c *AutoscalersDeleteCall) RequestId(requestId string) *AutoscalersDeleteCa // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AutoscalersDeleteCall) Fields(s ...googleapi.Field) *AutoscalersDeleteCall { +func (c *GlobalAddressesInsertCall) Fields(s ...googleapi.Field) *GlobalAddressesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -37875,52 +54001,55 @@ func (c *AutoscalersDeleteCall) Fields(s ...googleapi.Field) *AutoscalersDeleteC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AutoscalersDeleteCall) Context(ctx context.Context) *AutoscalersDeleteCall { +func (c *GlobalAddressesInsertCall) Context(ctx context.Context) *GlobalAddressesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AutoscalersDeleteCall) Header() http.Header { +func (c *GlobalAddressesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AutoscalersDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalAddressesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.address) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers/{autoscaler}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "autoscaler": c.autoscaler, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.autoscalers.delete" call. +// Do executes the "compute.globalAddresses.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *AutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *GlobalAddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -37951,22 +54080,13 @@ func (c *AutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Deletes the specified autoscaler.", - // "httpMethod": "DELETE", - // "id": "compute.autoscalers.delete", + // "description": "Creates an address resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.globalAddresses.insert", // "parameterOrder": [ - // "project", - // "zone", - // "autoscaler" + // "project" // ], // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -37978,16 +54098,12 @@ func (c *AutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "Name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/autoscalers/{autoscaler}", + // "path": "{project}/global/addresses", + // "request": { + // "$ref": "Address" + // }, // "response": { // "$ref": "Operation" // }, @@ -37999,33 +54115,92 @@ func (c *AutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er } -// method id "compute.autoscalers.get": +// method id "compute.globalAddresses.list": -type AutoscalersGetCall struct { +type GlobalAddressesListCall struct { s *Service project string - zone string - autoscaler string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Returns the specified autoscaler resource. Gets a list of -// available autoscalers by making a list() request. -func (r *AutoscalersService) Get(project string, zone string, autoscaler string) *AutoscalersGetCall { - c := &AutoscalersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of global addresses. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/list +func (r *GlobalAddressesService) List(project string) *GlobalAddressesListCall { + c := &GlobalAddressesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.autoscaler = autoscaler + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *GlobalAddressesListCall) Filter(filter string) *GlobalAddressesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *GlobalAddressesListCall) MaxResults(maxResults int64) *GlobalAddressesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *GlobalAddressesListCall) OrderBy(orderBy string) *GlobalAddressesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *GlobalAddressesListCall) PageToken(pageToken string) *GlobalAddressesListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AutoscalersGetCall) Fields(s ...googleapi.Field) *AutoscalersGetCall { +func (c *GlobalAddressesListCall) Fields(s ...googleapi.Field) *GlobalAddressesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -38035,7 +54210,7 @@ func (c *AutoscalersGetCall) Fields(s ...googleapi.Field) *AutoscalersGetCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *AutoscalersGetCall) IfNoneMatch(entityTag string) *AutoscalersGetCall { +func (c *GlobalAddressesListCall) IfNoneMatch(entityTag string) *GlobalAddressesListCall { c.ifNoneMatch_ = entityTag return c } @@ -38043,21 +54218,21 @@ func (c *AutoscalersGetCall) IfNoneMatch(entityTag string) *AutoscalersGetCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AutoscalersGetCall) Context(ctx context.Context) *AutoscalersGetCall { +func (c *GlobalAddressesListCall) Context(ctx context.Context) *GlobalAddressesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AutoscalersGetCall) Header() http.Header { +func (c *GlobalAddressesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AutoscalersGetCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalAddressesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -38069,7 +54244,7 @@ func (c *AutoscalersGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers/{autoscaler}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -38077,21 +54252,19 @@ func (c *AutoscalersGetCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "autoscaler": c.autoscaler, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.autoscalers.get" call. -// Exactly one of *Autoscaler or error will be non-nil. Any non-2xx +// Do executes the "compute.globalAddresses.list" call. +// Exactly one of *AddressList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Autoscaler.ServerResponse.Header or (if a response was returned at +// *AddressList.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *AutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, error) { +func (c *GlobalAddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -38110,7 +54283,7 @@ func (c *AutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, erro if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Autoscaler{ + ret := &AddressList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -38122,20 +54295,34 @@ func (c *AutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, erro } return ret, nil // { - // "description": "Returns the specified autoscaler resource. Gets a list of available autoscalers by making a list() request.", + // "description": "Retrieves a list of global addresses.", // "httpMethod": "GET", - // "id": "compute.autoscalers.get", + // "id": "compute.globalAddresses.list", // "parameterOrder": [ - // "project", - // "zone", - // "autoscaler" + // "project" // ], // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -38144,18 +54331,11 @@ func (c *AutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, erro // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "zone": { - // "description": "Name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/autoscalers/{autoscaler}", + // "path": "{project}/global/addresses", // "response": { - // "$ref": "Autoscaler" + // "$ref": "AddressList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -38166,51 +54346,53 @@ func (c *AutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, erro } -// method id "compute.autoscalers.insert": - -type AutoscalersInsertCall struct { - s *Service - project string - zone string - autoscaler *Autoscaler - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *GlobalAddressesListCall) Pages(ctx context.Context, f func(*AddressList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } } -// Insert: Creates an autoscaler in the specified project using the data -// included in the request. -func (r *AutoscalersService) Insert(project string, zone string, autoscaler *Autoscaler) *AutoscalersInsertCall { - c := &AutoscalersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.autoscaler = autoscaler - return c +// method id "compute.globalAddresses.setLabels": + +type GlobalAddressesSetLabelsCall struct { + s *Service + project string + resource string + globalsetlabelsrequest *GlobalSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *AutoscalersInsertCall) RequestId(requestId string) *AutoscalersInsertCall { - c.urlParams_.Set("requestId", requestId) +// SetLabels: Sets the labels on a GlobalAddress. To learn more about +// labels, read the Labeling Resources documentation. +func (r *GlobalAddressesService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *GlobalAddressesSetLabelsCall { + c := &GlobalAddressesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource + c.globalsetlabelsrequest = globalsetlabelsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AutoscalersInsertCall) Fields(s ...googleapi.Field) *AutoscalersInsertCall { +func (c *GlobalAddressesSetLabelsCall) Fields(s ...googleapi.Field) *GlobalAddressesSetLabelsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -38218,35 +54400,35 @@ func (c *AutoscalersInsertCall) Fields(s ...googleapi.Field) *AutoscalersInsertC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AutoscalersInsertCall) Context(ctx context.Context) *AutoscalersInsertCall { +func (c *GlobalAddressesSetLabelsCall) Context(ctx context.Context) *GlobalAddressesSetLabelsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AutoscalersInsertCall) Header() http.Header { +func (c *GlobalAddressesSetLabelsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AutoscalersInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalAddressesSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -38254,20 +54436,20 @@ func (c *AutoscalersInsertCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.autoscalers.insert" call. +// Do executes the "compute.globalAddresses.setLabels" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *AutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *GlobalAddressesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -38298,12 +54480,12 @@ func (c *AutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Creates an autoscaler in the specified project using the data included in the request.", + // "description": "Sets the labels on a GlobalAddress. To learn more about labels, read the Labeling Resources documentation.", // "httpMethod": "POST", - // "id": "compute.autoscalers.insert", + // "id": "compute.globalAddresses.setLabels", // "parameterOrder": [ // "project", - // "zone" + // "resource" // ], // "parameters": { // "project": { @@ -38313,187 +54495,112 @@ func (c *AutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "Name of the zone for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/autoscalers", + // "path": "{project}/global/addresses/{resource}/setLabels", // "request": { - // "$ref": "Autoscaler" + // "$ref": "GlobalSetLabelsRequest" // }, // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.autoscalers.list": - -type AutoscalersListCall struct { - s *Service - project string - zone string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves a list of autoscalers contained within the specified -// zone. -func (r *AutoscalersService) List(project string, zone string) *AutoscalersListCall { - c := &AutoscalersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *AutoscalersListCall) Filter(filter string) *AutoscalersListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *AutoscalersListCall) MaxResults(maxResults int64) *AutoscalersListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *AutoscalersListCall) OrderBy(orderBy string) *AutoscalersListCall { - c.urlParams_.Set("orderBy", orderBy) - return c + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + } -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *AutoscalersListCall) PageToken(pageToken string) *AutoscalersListCall { - c.urlParams_.Set("pageToken", pageToken) +// method id "compute.globalAddresses.testIamPermissions": + +type GlobalAddressesTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *GlobalAddressesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *GlobalAddressesTestIamPermissionsCall { + c := &GlobalAddressesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AutoscalersListCall) Fields(s ...googleapi.Field) *AutoscalersListCall { +func (c *GlobalAddressesTestIamPermissionsCall) Fields(s ...googleapi.Field) *GlobalAddressesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *AutoscalersListCall) IfNoneMatch(entityTag string) *AutoscalersListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AutoscalersListCall) Context(ctx context.Context) *AutoscalersListCall { +func (c *GlobalAddressesTestIamPermissionsCall) Context(ctx context.Context) *GlobalAddressesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AutoscalersListCall) Header() http.Header { +func (c *GlobalAddressesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AutoscalersListCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalAddressesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.autoscalers.list" call. -// Exactly one of *AutoscalerList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *AutoscalerList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.globalAddresses.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, error) { +func (c *GlobalAddressesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -38512,7 +54619,7 @@ func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &AutoscalerList{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -38524,37 +54631,14 @@ func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, } return ret, nil // { - // "description": "Retrieves a list of autoscalers contained within the specified zone.", - // "httpMethod": "GET", - // "id": "compute.autoscalers.list", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.globalAddresses.testIamPermissions", // "parameterOrder": [ // "project", - // "zone" + // "resource" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -38562,17 +54646,20 @@ func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, // "required": true, // "type": "string" // }, - // "zone": { - // "description": "Name of the zone for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/autoscalers", + // "path": "{project}/global/addresses/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "AutoscalerList" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -38583,54 +54670,23 @@ func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *AutoscalersListCall) Pages(ctx context.Context, f func(*AutoscalerList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.autoscalers.patch": +// method id "compute.globalForwardingRules.delete": -type AutoscalersPatchCall struct { - s *Service - project string - zone string - autoscaler *Autoscaler - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalForwardingRulesDeleteCall struct { + s *Service + project string + forwardingRule string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates an autoscaler in the specified project using the data -// included in the request. This method supports PATCH semantics and -// uses the JSON merge patch format and processing rules. -func (r *AutoscalersService) Patch(project string, zone string, autoscaler *Autoscaler) *AutoscalersPatchCall { - c := &AutoscalersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified GlobalForwardingRule resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/delete +func (r *GlobalForwardingRulesService) Delete(project string, forwardingRule string) *GlobalForwardingRulesDeleteCall { + c := &GlobalForwardingRulesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.autoscaler = autoscaler - return c -} - -// Autoscaler sets the optional parameter "autoscaler": Name of the -// autoscaler to patch. -func (c *AutoscalersPatchCall) Autoscaler(autoscaler string) *AutoscalersPatchCall { - c.urlParams_.Set("autoscaler", autoscaler) + c.forwardingRule = forwardingRule return c } @@ -38648,7 +54704,7 @@ func (c *AutoscalersPatchCall) Autoscaler(autoscaler string) *AutoscalersPatchCa // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *AutoscalersPatchCall) RequestId(requestId string) *AutoscalersPatchCall { +func (c *GlobalForwardingRulesDeleteCall) RequestId(requestId string) *GlobalForwardingRulesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -38656,7 +54712,7 @@ func (c *AutoscalersPatchCall) RequestId(requestId string) *AutoscalersPatchCall // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AutoscalersPatchCall) Fields(s ...googleapi.Field) *AutoscalersPatchCall { +func (c *GlobalForwardingRulesDeleteCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -38664,56 +54720,51 @@ func (c *AutoscalersPatchCall) Fields(s ...googleapi.Field) *AutoscalersPatchCal // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AutoscalersPatchCall) Context(ctx context.Context) *AutoscalersPatchCall { +func (c *GlobalForwardingRulesDeleteCall) Context(ctx context.Context) *GlobalForwardingRulesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AutoscalersPatchCall) Header() http.Header { +func (c *GlobalForwardingRulesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AutoscalersPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "forwardingRule": c.forwardingRule, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.autoscalers.patch" call. +// Do executes the "compute.globalForwardingRules.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *AutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *GlobalForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -38744,18 +54795,19 @@ func (c *AutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, err } return ret, nil // { - // "description": "Updates an autoscaler in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.autoscalers.patch", + // "description": "Deletes the specified GlobalForwardingRule resource.", + // "httpMethod": "DELETE", + // "id": "compute.globalForwardingRules.delete", // "parameterOrder": [ // "project", - // "zone" + // "forwardingRule" // ], // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to patch.", - // "location": "query", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "project": { @@ -38769,19 +54821,9 @@ func (c *AutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, err // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "Name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/autoscalers", - // "request": { - // "$ref": "Autoscaler" - // }, + // "path": "{project}/global/forwardingRules/{forwardingRule}", // "response": { // "$ref": "Operation" // }, @@ -38793,92 +54835,97 @@ func (c *AutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, err } -// method id "compute.autoscalers.testIamPermissions": +// method id "compute.globalForwardingRules.get": -type AutoscalersTestIamPermissionsCall struct { - s *Service - project string - zone string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalForwardingRulesGetCall struct { + s *Service + project string + forwardingRule string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *AutoscalersService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *AutoscalersTestIamPermissionsCall { - c := &AutoscalersTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified GlobalForwardingRule resource. Gets a list +// of available forwarding rules by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/get +func (r *GlobalForwardingRulesService) Get(project string, forwardingRule string) *GlobalForwardingRulesGetCall { + c := &GlobalForwardingRulesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.forwardingRule = forwardingRule return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AutoscalersTestIamPermissionsCall) Fields(s ...googleapi.Field) *AutoscalersTestIamPermissionsCall { +func (c *GlobalForwardingRulesGetCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *GlobalForwardingRulesGetCall) IfNoneMatch(entityTag string) *GlobalForwardingRulesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AutoscalersTestIamPermissionsCall) Context(ctx context.Context) *AutoscalersTestIamPermissionsCall { +func (c *GlobalForwardingRulesGetCall) Context(ctx context.Context) *GlobalForwardingRulesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AutoscalersTestIamPermissionsCall) Header() http.Header { +func (c *GlobalForwardingRulesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AutoscalersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalForwardingRulesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, + "project": c.project, + "forwardingRule": c.forwardingRule, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.autoscalers.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.globalForwardingRules.get" call. +// Exactly one of *ForwardingRule or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ForwardingRule.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *AutoscalersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *GlobalForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRule, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -38897,7 +54944,7 @@ func (c *AutoscalersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &ForwardingRule{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -38909,43 +54956,32 @@ func (c *AutoscalersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.autoscalers.testIamPermissions", + // "description": "Returns the specified GlobalForwardingRule resource. Gets a list of available forwarding rules by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.globalForwardingRules.get", // "parameterOrder": [ // "project", - // "zone", - // "resource" + // "forwardingRule" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/autoscalers/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/global/forwardingRules/{forwardingRule}", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "ForwardingRule" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -38956,32 +54992,24 @@ func (c *AutoscalersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T } -// method id "compute.autoscalers.update": +// method id "compute.globalForwardingRules.insert": -type AutoscalersUpdateCall struct { - s *Service - project string - zone string - autoscaler *Autoscaler - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalForwardingRulesInsertCall struct { + s *Service + project string + forwardingrule *ForwardingRule + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Update: Updates an autoscaler in the specified project using the data -// included in the request. -func (r *AutoscalersService) Update(project string, zone string, autoscaler *Autoscaler) *AutoscalersUpdateCall { - c := &AutoscalersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a GlobalForwardingRule resource in the specified +// project using the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/insert +func (r *GlobalForwardingRulesService) Insert(project string, forwardingrule *ForwardingRule) *GlobalForwardingRulesInsertCall { + c := &GlobalForwardingRulesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.autoscaler = autoscaler - return c -} - -// Autoscaler sets the optional parameter "autoscaler": Name of the -// autoscaler to update. -func (c *AutoscalersUpdateCall) Autoscaler(autoscaler string) *AutoscalersUpdateCall { - c.urlParams_.Set("autoscaler", autoscaler) + c.forwardingrule = forwardingrule return c } @@ -38999,7 +55027,7 @@ func (c *AutoscalersUpdateCall) Autoscaler(autoscaler string) *AutoscalersUpdate // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *AutoscalersUpdateCall) RequestId(requestId string) *AutoscalersUpdateCall { +func (c *GlobalForwardingRulesInsertCall) RequestId(requestId string) *GlobalForwardingRulesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -39007,7 +55035,7 @@ func (c *AutoscalersUpdateCall) RequestId(requestId string) *AutoscalersUpdateCa // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AutoscalersUpdateCall) Fields(s ...googleapi.Field) *AutoscalersUpdateCall { +func (c *GlobalForwardingRulesInsertCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -39015,56 +55043,55 @@ func (c *AutoscalersUpdateCall) Fields(s ...googleapi.Field) *AutoscalersUpdateC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AutoscalersUpdateCall) Context(ctx context.Context) *AutoscalersUpdateCall { +func (c *GlobalForwardingRulesInsertCall) Context(ctx context.Context) *GlobalForwardingRulesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AutoscalersUpdateCall) Header() http.Header { +func (c *GlobalForwardingRulesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AutoscalersUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalForwardingRulesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.forwardingrule) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.autoscalers.update" call. +// Do executes the "compute.globalForwardingRules.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *AutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *GlobalForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -39095,20 +55122,13 @@ func (c *AutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Updates an autoscaler in the specified project using the data included in the request.", - // "httpMethod": "PUT", - // "id": "compute.autoscalers.update", + // "description": "Creates a GlobalForwardingRule resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.globalForwardingRules.insert", // "parameterOrder": [ - // "project", - // "zone" + // "project" // ], // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to update.", - // "location": "query", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -39120,18 +55140,11 @@ func (c *AutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, er // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "Name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/autoscalers", + // "path": "{project}/global/forwardingRules", // "request": { - // "$ref": "Autoscaler" + // "$ref": "ForwardingRule" // }, // "response": { // "$ref": "Operation" @@ -39144,108 +55157,157 @@ func (c *AutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, er } -// method id "compute.backendBuckets.addSignedUrlKey": +// method id "compute.globalForwardingRules.list": -type BackendBucketsAddSignedUrlKeyCall struct { - s *Service - project string - backendBucket string - signedurlkey *SignedUrlKey - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalForwardingRulesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// AddSignedUrlKey: Adds a key for validating requests with signed URLs -// for this backend bucket. -func (r *BackendBucketsService) AddSignedUrlKey(project string, backendBucket string, signedurlkey *SignedUrlKey) *BackendBucketsAddSignedUrlKeyCall { - c := &BackendBucketsAddSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of GlobalForwardingRule resources available to +// the specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/list +func (r *GlobalForwardingRulesService) List(project string) *GlobalForwardingRulesListCall { + c := &GlobalForwardingRulesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendBucket = backendBucket - c.signedurlkey = signedurlkey return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendBucketsAddSignedUrlKeyCall) RequestId(requestId string) *BackendBucketsAddSignedUrlKeyCall { - c.urlParams_.Set("requestId", requestId) +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *GlobalForwardingRulesListCall) Filter(filter string) *GlobalForwardingRulesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *GlobalForwardingRulesListCall) MaxResults(maxResults int64) *GlobalForwardingRulesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *GlobalForwardingRulesListCall) OrderBy(orderBy string) *GlobalForwardingRulesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *GlobalForwardingRulesListCall) PageToken(pageToken string) *GlobalForwardingRulesListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsAddSignedUrlKeyCall) Fields(s ...googleapi.Field) *BackendBucketsAddSignedUrlKeyCall { +func (c *GlobalForwardingRulesListCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *GlobalForwardingRulesListCall) IfNoneMatch(entityTag string) *GlobalForwardingRulesListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsAddSignedUrlKeyCall) Context(ctx context.Context) *BackendBucketsAddSignedUrlKeyCall { +func (c *GlobalForwardingRulesListCall) Context(ctx context.Context) *GlobalForwardingRulesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsAddSignedUrlKeyCall) Header() http.Header { +func (c *GlobalForwardingRulesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsAddSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalForwardingRulesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.signedurlkey) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}/addSignedUrlKey") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendBucket": c.backendBucket, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.addSignedUrlKey" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *BackendBucketsAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.globalForwardingRules.list" call. +// Exactly one of *ForwardingRuleList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ForwardingRuleList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *GlobalForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingRuleList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -39264,7 +55326,7 @@ func (c *BackendBucketsAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*O if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &ForwardingRuleList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -39276,18 +55338,34 @@ func (c *BackendBucketsAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*O } return ret, nil // { - // "description": "Adds a key for validating requests with signed URLs for this backend bucket.", - // "httpMethod": "POST", - // "id": "compute.backendBuckets.addSignedUrlKey", + // "description": "Retrieves a list of GlobalForwardingRule resources available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.globalForwardingRules.list", // "parameterOrder": [ - // "project", - // "backendBucket" + // "project" // ], // "parameters": { - // "backendBucket": { - // "description": "Name of the BackendBucket resource to which the Signed URL Key should be added. The name should conform to RFC1035.", - // "location": "path", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -39296,44 +55374,63 @@ func (c *BackendBucketsAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*O // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/global/backendBuckets/{backendBucket}/addSignedUrlKey", - // "request": { - // "$ref": "SignedUrlKey" - // }, + // "path": "{project}/global/forwardingRules", // "response": { - // "$ref": "Operation" + // "$ref": "ForwardingRuleList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.backendBuckets.delete": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *GlobalForwardingRulesListCall) Pages(ctx context.Context, f func(*ForwardingRuleList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type BackendBucketsDeleteCall struct { - s *Service - project string - backendBucket string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.globalForwardingRules.patch": + +type GlobalForwardingRulesPatchCall struct { + s *Service + project string + forwardingRule string + forwardingrule *ForwardingRule + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified BackendBucket resource. -func (r *BackendBucketsService) Delete(project string, backendBucket string) *BackendBucketsDeleteCall { - c := &BackendBucketsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates the specified forwarding rule with the data included +// in the request. This method supports PATCH semantics and uses the +// JSON merge patch format and processing rules. Currently, you can only +// patch the network_tier field. +func (r *GlobalForwardingRulesService) Patch(project string, forwardingRule string, forwardingrule *ForwardingRule) *GlobalForwardingRulesPatchCall { + c := &GlobalForwardingRulesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendBucket = backendBucket + c.forwardingRule = forwardingRule + c.forwardingrule = forwardingrule return c } @@ -39351,7 +55448,7 @@ func (r *BackendBucketsService) Delete(project string, backendBucket string) *Ba // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendBucketsDeleteCall) RequestId(requestId string) *BackendBucketsDeleteCall { +func (c *GlobalForwardingRulesPatchCall) RequestId(requestId string) *GlobalForwardingRulesPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -39359,7 +55456,7 @@ func (c *BackendBucketsDeleteCall) RequestId(requestId string) *BackendBucketsDe // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsDeleteCall) Fields(s ...googleapi.Field) *BackendBucketsDeleteCall { +func (c *GlobalForwardingRulesPatchCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -39367,51 +55464,56 @@ func (c *BackendBucketsDeleteCall) Fields(s ...googleapi.Field) *BackendBucketsD // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsDeleteCall) Context(ctx context.Context) *BackendBucketsDeleteCall { +func (c *GlobalForwardingRulesPatchCall) Context(ctx context.Context) *GlobalForwardingRulesPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsDeleteCall) Header() http.Header { +func (c *GlobalForwardingRulesPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalForwardingRulesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.forwardingrule) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendBucket": c.backendBucket, + "project": c.project, + "forwardingRule": c.forwardingRule, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.delete" call. +// Do executes the "compute.globalForwardingRules.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendBucketsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *GlobalForwardingRulesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -39442,18 +55544,18 @@ func (c *BackendBucketsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Deletes the specified BackendBucket resource.", - // "httpMethod": "DELETE", - // "id": "compute.backendBuckets.delete", + // "description": "Updates the specified forwarding rule with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. Currently, you can only patch the network_tier field.", + // "httpMethod": "PATCH", + // "id": "compute.globalForwardingRules.patch", // "parameterOrder": [ // "project", - // "backendBucket" + // "forwardingRule" // ], // "parameters": { - // "backendBucket": { - // "description": "Name of the BackendBucket resource to delete.", + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource to patch.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -39470,7 +55572,10 @@ func (c *BackendBucketsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // } // }, - // "path": "{project}/global/backendBuckets/{backendBucket}", + // "path": "{project}/global/forwardingRules/{forwardingRule}", + // "request": { + // "$ref": "ForwardingRule" + // }, // "response": { // "$ref": "Operation" // }, @@ -39482,50 +55587,32 @@ func (c *BackendBucketsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.backendBuckets.deleteSignedUrlKey": +// method id "compute.globalForwardingRules.setLabels": -type BackendBucketsDeleteSignedUrlKeyCall struct { - s *Service - project string - backendBucket string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalForwardingRulesSetLabelsCall struct { + s *Service + project string + resource string + globalsetlabelsrequest *GlobalSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// DeleteSignedUrlKey: Deletes a key for validating requests with signed -// URLs for this backend bucket. -func (r *BackendBucketsService) DeleteSignedUrlKey(project string, backendBucket string, keyName string) *BackendBucketsDeleteSignedUrlKeyCall { - c := &BackendBucketsDeleteSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetLabels: Sets the labels on the specified resource. To learn more +// about labels, read the Labeling Resources documentation. +func (r *GlobalForwardingRulesService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *GlobalForwardingRulesSetLabelsCall { + c := &GlobalForwardingRulesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendBucket = backendBucket - c.urlParams_.Set("keyName", keyName) - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendBucketsDeleteSignedUrlKeyCall) RequestId(requestId string) *BackendBucketsDeleteSignedUrlKeyCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.globalsetlabelsrequest = globalsetlabelsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsDeleteSignedUrlKeyCall) Fields(s ...googleapi.Field) *BackendBucketsDeleteSignedUrlKeyCall { +func (c *GlobalForwardingRulesSetLabelsCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesSetLabelsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -39533,30 +55620,35 @@ func (c *BackendBucketsDeleteSignedUrlKeyCall) Fields(s ...googleapi.Field) *Bac // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsDeleteSignedUrlKeyCall) Context(ctx context.Context) *BackendBucketsDeleteSignedUrlKeyCall { +func (c *GlobalForwardingRulesSetLabelsCall) Context(ctx context.Context) *GlobalForwardingRulesSetLabelsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsDeleteSignedUrlKeyCall) Header() http.Header { +func (c *GlobalForwardingRulesSetLabelsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsDeleteSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalForwardingRulesSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}/deleteSignedUrlKey") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -39564,20 +55656,20 @@ func (c *BackendBucketsDeleteSignedUrlKeyCall) doRequest(alt string) (*http.Resp } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendBucket": c.backendBucket, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.deleteSignedUrlKey" call. +// Do executes the "compute.globalForwardingRules.setLabels" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendBucketsDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *GlobalForwardingRulesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -39608,27 +55700,14 @@ func (c *BackendBucketsDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Deletes a key for validating requests with signed URLs for this backend bucket.", + // "description": "Sets the labels on the specified resource. To learn more about labels, read the Labeling Resources documentation.", // "httpMethod": "POST", - // "id": "compute.backendBuckets.deleteSignedUrlKey", + // "id": "compute.globalForwardingRules.setLabels", // "parameterOrder": [ // "project", - // "backendBucket", - // "keyName" + // "resource" // ], // "parameters": { - // "backendBucket": { - // "description": "Name of the BackendBucket resource to which the Signed URL Key should be added. The name should conform to RFC1035.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "keyName": { - // "description": "The name of the Signed URL Key to delete.", - // "location": "query", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -39636,13 +55715,18 @@ func (c *BackendBucketsDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/backendBuckets/{backendBucket}/deleteSignedUrlKey", + // "path": "{project}/global/forwardingRules/{resource}/setLabels", + // "request": { + // "$ref": "GlobalSetLabelsRequest" + // }, // "response": { // "$ref": "Operation" // }, @@ -39654,96 +55738,109 @@ func (c *BackendBucketsDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) } -// method id "compute.backendBuckets.get": +// method id "compute.globalForwardingRules.setTarget": -type BackendBucketsGetCall struct { - s *Service - project string - backendBucket string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type GlobalForwardingRulesSetTargetCall struct { + s *Service + project string + forwardingRule string + targetreference *TargetReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified BackendBucket resource. Gets a list of -// available backend buckets by making a list() request. -func (r *BackendBucketsService) Get(project string, backendBucket string) *BackendBucketsGetCall { - c := &BackendBucketsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetTarget: Changes target URL for the GlobalForwardingRule resource. +// The new target should be of the same type as the old target. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/setTarget +func (r *GlobalForwardingRulesService) SetTarget(project string, forwardingRule string, targetreference *TargetReference) *GlobalForwardingRulesSetTargetCall { + c := &GlobalForwardingRulesSetTargetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendBucket = backendBucket + c.forwardingRule = forwardingRule + c.targetreference = targetreference + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *GlobalForwardingRulesSetTargetCall) RequestId(requestId string) *GlobalForwardingRulesSetTargetCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsGetCall) Fields(s ...googleapi.Field) *BackendBucketsGetCall { +func (c *GlobalForwardingRulesSetTargetCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesSetTargetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *BackendBucketsGetCall) IfNoneMatch(entityTag string) *BackendBucketsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsGetCall) Context(ctx context.Context) *BackendBucketsGetCall { +func (c *GlobalForwardingRulesSetTargetCall) Context(ctx context.Context) *GlobalForwardingRulesSetTargetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsGetCall) Header() http.Header { +func (c *GlobalForwardingRulesSetTargetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalForwardingRulesSetTargetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetreference) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}/setTarget") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendBucket": c.backendBucket, + "project": c.project, + "forwardingRule": c.forwardingRule, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.get" call. -// Exactly one of *BackendBucket or error will be non-nil. Any non-2xx +// Do executes the "compute.globalForwardingRules.setTarget" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *BackendBucket.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *BackendBucketsGetCall) Do(opts ...googleapi.CallOption) (*BackendBucket, error) { +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *GlobalForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -39762,7 +55859,7 @@ func (c *BackendBucketsGetCall) Do(opts ...googleapi.CallOption) (*BackendBucket if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &BackendBucket{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -39774,18 +55871,18 @@ func (c *BackendBucketsGetCall) Do(opts ...googleapi.CallOption) (*BackendBucket } return ret, nil // { - // "description": "Returns the specified BackendBucket resource. Gets a list of available backend buckets by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.backendBuckets.get", + // "description": "Changes target URL for the GlobalForwardingRule resource. The new target should be of the same type as the old target.", + // "httpMethod": "POST", + // "id": "compute.globalForwardingRules.setTarget", // "parameterOrder": [ // "project", - // "backendBucket" + // "forwardingRule" // ], // "parameters": { - // "backendBucket": { - // "description": "Name of the BackendBucket resource to return.", + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource in which target is to be set.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -39795,64 +55892,54 @@ func (c *BackendBucketsGetCall) Do(opts ...googleapi.CallOption) (*BackendBucket // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/global/backendBuckets/{backendBucket}", + // "path": "{project}/global/forwardingRules/{forwardingRule}/setTarget", + // "request": { + // "$ref": "TargetReference" + // }, // "response": { - // "$ref": "BackendBucket" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.backendBuckets.insert": +// method id "compute.globalForwardingRules.testIamPermissions": -type BackendBucketsInsertCall struct { - s *Service - project string - backendbucket *BackendBucket - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalForwardingRulesTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a BackendBucket resource in the specified project -// using the data included in the request. -func (r *BackendBucketsService) Insert(project string, backendbucket *BackendBucket) *BackendBucketsInsertCall { - c := &BackendBucketsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *GlobalForwardingRulesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *GlobalForwardingRulesTestIamPermissionsCall { + c := &GlobalForwardingRulesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendbucket = backendbucket - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendBucketsInsertCall) RequestId(requestId string) *BackendBucketsInsertCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsInsertCall) Fields(s ...googleapi.Field) *BackendBucketsInsertCall { +func (c *GlobalForwardingRulesTestIamPermissionsCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -39860,35 +55947,35 @@ func (c *BackendBucketsInsertCall) Fields(s ...googleapi.Field) *BackendBucketsI // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsInsertCall) Context(ctx context.Context) *BackendBucketsInsertCall { +func (c *GlobalForwardingRulesTestIamPermissionsCall) Context(ctx context.Context) *GlobalForwardingRulesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsInsertCall) Header() http.Header { +func (c *GlobalForwardingRulesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalForwardingRulesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendbucket) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -39896,19 +55983,20 @@ func (c *BackendBucketsInsertCall) doRequest(alt string) (*http.Response, error) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *BackendBucketsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.globalForwardingRules.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *GlobalForwardingRulesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -39927,7 +56015,7 @@ func (c *BackendBucketsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -39939,11 +56027,12 @@ func (c *BackendBucketsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Creates a BackendBucket resource in the specified project using the data included in the request.", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.backendBuckets.insert", + // "id": "compute.globalForwardingRules.testIamPermissions", // "parameterOrder": [ - // "project" + // "project", + // "resource" // ], // "parameters": { // "project": { @@ -39953,30 +56042,33 @@ func (c *BackendBucketsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/backendBuckets", + // "path": "{project}/global/forwardingRules/{resource}/testIamPermissions", // "request": { - // "$ref": "BackendBucket" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.backendBuckets.list": +// method id "compute.globalOperations.aggregatedList": -type BackendBucketsListCall struct { +type GlobalOperationsAggregatedListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -39985,10 +56077,10 @@ type BackendBucketsListCall struct { header_ http.Header } -// List: Retrieves the list of BackendBucket resources available to the -// specified project. -func (r *BackendBucketsService) List(project string) *BackendBucketsListCall { - c := &BackendBucketsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of all operations. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/aggregatedList +func (r *GlobalOperationsService) AggregatedList(project string) *GlobalOperationsAggregatedListCall { + c := &GlobalOperationsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -40015,7 +56107,7 @@ func (r *BackendBucketsService) List(project string) *BackendBucketsListCall { // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *BackendBucketsListCall) Filter(filter string) *BackendBucketsListCall { +func (c *GlobalOperationsAggregatedListCall) Filter(filter string) *GlobalOperationsAggregatedListCall { c.urlParams_.Set("filter", filter) return c } @@ -40026,7 +56118,7 @@ func (c *BackendBucketsListCall) Filter(filter string) *BackendBucketsListCall { // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *BackendBucketsListCall) MaxResults(maxResults int64) *BackendBucketsListCall { +func (c *GlobalOperationsAggregatedListCall) MaxResults(maxResults int64) *GlobalOperationsAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -40043,7 +56135,7 @@ func (c *BackendBucketsListCall) MaxResults(maxResults int64) *BackendBucketsLis // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *BackendBucketsListCall) OrderBy(orderBy string) *BackendBucketsListCall { +func (c *GlobalOperationsAggregatedListCall) OrderBy(orderBy string) *GlobalOperationsAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -40051,7 +56143,7 @@ func (c *BackendBucketsListCall) OrderBy(orderBy string) *BackendBucketsListCall // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *BackendBucketsListCall) PageToken(pageToken string) *BackendBucketsListCall { +func (c *GlobalOperationsAggregatedListCall) PageToken(pageToken string) *GlobalOperationsAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -40059,7 +56151,7 @@ func (c *BackendBucketsListCall) PageToken(pageToken string) *BackendBucketsList // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsListCall) Fields(s ...googleapi.Field) *BackendBucketsListCall { +func (c *GlobalOperationsAggregatedListCall) Fields(s ...googleapi.Field) *GlobalOperationsAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -40069,7 +56161,7 @@ func (c *BackendBucketsListCall) Fields(s ...googleapi.Field) *BackendBucketsLis // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *BackendBucketsListCall) IfNoneMatch(entityTag string) *BackendBucketsListCall { +func (c *GlobalOperationsAggregatedListCall) IfNoneMatch(entityTag string) *GlobalOperationsAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -40077,21 +56169,21 @@ func (c *BackendBucketsListCall) IfNoneMatch(entityTag string) *BackendBucketsLi // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsListCall) Context(ctx context.Context) *BackendBucketsListCall { +func (c *GlobalOperationsAggregatedListCall) Context(ctx context.Context) *GlobalOperationsAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsListCall) Header() http.Header { +func (c *GlobalOperationsAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsListCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalOperationsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -40103,7 +56195,7 @@ func (c *BackendBucketsListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/operations") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -40116,14 +56208,14 @@ func (c *BackendBucketsListCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.list" call. -// Exactly one of *BackendBucketList or error will be non-nil. Any +// Do executes the "compute.globalOperations.aggregatedList" call. +// Exactly one of *OperationAggregatedList or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *BackendBucketList.ServerResponse.Header or (if a response was +// *OperationAggregatedList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *BackendBucketsListCall) Do(opts ...googleapi.CallOption) (*BackendBucketList, error) { +func (c *GlobalOperationsAggregatedListCall) Do(opts ...googleapi.CallOption) (*OperationAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -40142,7 +56234,7 @@ func (c *BackendBucketsListCall) Do(opts ...googleapi.CallOption) (*BackendBucke if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &BackendBucketList{ + ret := &OperationAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -40154,9 +56246,9 @@ func (c *BackendBucketsListCall) Do(opts ...googleapi.CallOption) (*BackendBucke } return ret, nil // { - // "description": "Retrieves the list of BackendBucket resources available to the specified project.", + // "description": "Retrieves an aggregated list of all operations.", // "httpMethod": "GET", - // "id": "compute.backendBuckets.list", + // "id": "compute.globalOperations.aggregatedList", // "parameterOrder": [ // "project" // ], @@ -40192,9 +56284,9 @@ func (c *BackendBucketsListCall) Do(opts ...googleapi.CallOption) (*BackendBucke // "type": "string" // } // }, - // "path": "{project}/global/backendBuckets", + // "path": "{project}/aggregated/operations", // "response": { - // "$ref": "BackendBucketList" + // "$ref": "OperationAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -40208,7 +56300,7 @@ func (c *BackendBucketsListCall) Do(opts ...googleapi.CallOption) (*BackendBucke // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *BackendBucketsListCall) Pages(ctx context.Context, f func(*BackendBucketList) error) error { +func (c *GlobalOperationsAggregatedListCall) Pages(ctx context.Context, f func(*OperationAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -40226,52 +56318,30 @@ func (c *BackendBucketsListCall) Pages(ctx context.Context, f func(*BackendBucke } } -// method id "compute.backendBuckets.patch": +// method id "compute.globalOperations.delete": -type BackendBucketsPatchCall struct { - s *Service - project string - backendBucket string - backendbucket *BackendBucket - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalOperationsDeleteCall struct { + s *Service + project string + operation string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates the specified BackendBucket resource with the data -// included in the request. This method supports PATCH semantics and -// uses the JSON merge patch format and processing rules. -func (r *BackendBucketsService) Patch(project string, backendBucket string, backendbucket *BackendBucket) *BackendBucketsPatchCall { - c := &BackendBucketsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified Operations resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/delete +func (r *GlobalOperationsService) Delete(project string, operation string) *GlobalOperationsDeleteCall { + c := &GlobalOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendBucket = backendBucket - c.backendbucket = backendbucket - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendBucketsPatchCall) RequestId(requestId string) *BackendBucketsPatchCall { - c.urlParams_.Set("requestId", requestId) + c.operation = operation return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsPatchCall) Fields(s ...googleapi.Field) *BackendBucketsPatchCall { +func (c *GlobalOperationsDeleteCall) Fields(s ...googleapi.Field) *GlobalOperationsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -40279,98 +56349,68 @@ func (c *BackendBucketsPatchCall) Fields(s ...googleapi.Field) *BackendBucketsPa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsPatchCall) Context(ctx context.Context) *BackendBucketsPatchCall { +func (c *GlobalOperationsDeleteCall) Context(ctx context.Context) *GlobalOperationsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsPatchCall) Header() http.Header { +func (c *GlobalOperationsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendbucket) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations/{operation}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendBucket": c.backendBucket, + "project": c.project, + "operation": c.operation, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *BackendBucketsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.globalOperations.delete" call. +func (c *GlobalOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } if err != nil { - return nil, err + return err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err + return err } - return ret, nil + return nil // { - // "description": "Updates the specified BackendBucket resource with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.backendBuckets.patch", + // "description": "Deletes the specified Operations resource.", + // "httpMethod": "DELETE", + // "id": "compute.globalOperations.delete", // "parameterOrder": [ // "project", - // "backendBucket" + // "operation" // ], // "parameters": { - // "backendBucket": { - // "description": "Name of the BackendBucket resource to patch.", + // "operation": { + // "description": "Name of the Operations resource to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -40380,20 +56420,9 @@ func (c *BackendBucketsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/global/backendBuckets/{backendBucket}", - // "request": { - // "$ref": "BackendBucket" - // }, - // "response": { - // "$ref": "Operation" - // }, + // "path": "{project}/global/operations/{operation}", // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/compute" @@ -40402,283 +56431,97 @@ func (c *BackendBucketsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.backendBuckets.update": +// method id "compute.globalOperations.get": -type BackendBucketsUpdateCall struct { - s *Service - project string - backendBucket string - backendbucket *BackendBucket - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalOperationsGetCall struct { + s *Service + project string + operation string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Update: Updates the specified BackendBucket resource with the data -// included in the request. -func (r *BackendBucketsService) Update(project string, backendBucket string, backendbucket *BackendBucket) *BackendBucketsUpdateCall { - c := &BackendBucketsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Retrieves the specified Operations resource. Gets a list of +// operations by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/get +func (r *GlobalOperationsService) Get(project string, operation string) *GlobalOperationsGetCall { + c := &GlobalOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendBucket = backendBucket - c.backendbucket = backendbucket - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendBucketsUpdateCall) RequestId(requestId string) *BackendBucketsUpdateCall { - c.urlParams_.Set("requestId", requestId) + c.operation = operation return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsUpdateCall) Fields(s ...googleapi.Field) *BackendBucketsUpdateCall { +func (c *GlobalOperationsGetCall) Fields(s ...googleapi.Field) *GlobalOperationsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BackendBucketsUpdateCall) Context(ctx context.Context) *BackendBucketsUpdateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BackendBucketsUpdateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BackendBucketsUpdateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendbucket) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendBucket": c.backendBucket, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.backendBuckets.update" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *BackendBucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates the specified BackendBucket resource with the data included in the request.", - // "httpMethod": "PUT", - // "id": "compute.backendBuckets.update", - // "parameterOrder": [ - // "project", - // "backendBucket" - // ], - // "parameters": { - // "backendBucket": { - // "description": "Name of the BackendBucket resource to update.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "{project}/global/backendBuckets/{backendBucket}", - // "request": { - // "$ref": "BackendBucket" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.backendServices.addSignedUrlKey": - -type BackendServicesAddSignedUrlKeyCall struct { - s *Service - project string - backendService string - signedurlkey *SignedUrlKey - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// AddSignedUrlKey: Adds a key for validating requests with signed URLs -// for this backend service. -func (r *BackendServicesService) AddSignedUrlKey(project string, backendService string, signedurlkey *SignedUrlKey) *BackendServicesAddSignedUrlKeyCall { - c := &BackendServicesAddSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.backendService = backendService - c.signedurlkey = signedurlkey - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendServicesAddSignedUrlKeyCall) RequestId(requestId string) *BackendServicesAddSignedUrlKeyCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BackendServicesAddSignedUrlKeyCall) Fields(s ...googleapi.Field) *BackendServicesAddSignedUrlKeyCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *GlobalOperationsGetCall) IfNoneMatch(entityTag string) *GlobalOperationsGetCall { + c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesAddSignedUrlKeyCall) Context(ctx context.Context) *BackendServicesAddSignedUrlKeyCall { +func (c *GlobalOperationsGetCall) Context(ctx context.Context) *GlobalOperationsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesAddSignedUrlKeyCall) Header() http.Header { +func (c *GlobalOperationsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesAddSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.signedurlkey) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/addSignedUrlKey") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations/{operation}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendService": c.backendService, + "project": c.project, + "operation": c.operation, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.addSignedUrlKey" call. +// Do executes the "compute.globalOperations.get" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendServicesAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *GlobalOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -40709,17 +56552,18 @@ func (c *BackendServicesAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Adds a key for validating requests with signed URLs for this backend service.", - // "httpMethod": "POST", - // "id": "compute.backendServices.addSignedUrlKey", + // "description": "Retrieves the specified Operations resource. Gets a list of operations by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.globalOperations.get", // "parameterOrder": [ // "project", - // "backendService" + // "operation" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to which the Signed URL Key should be added. The name should conform to RFC1035.", + // "operation": { + // "description": "Name of the Operations resource to return.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -40729,31 +56573,24 @@ func (c *BackendServicesAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (* // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/global/backendServices/{backendService}/addSignedUrlKey", - // "request": { - // "$ref": "SignedUrlKey" - // }, + // "path": "{project}/global/operations/{operation}", // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.backendServices.aggregatedList": +// method id "compute.globalOperations.list": -type BackendServicesAggregatedListCall struct { +type GlobalOperationsListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -40762,10 +56599,11 @@ type BackendServicesAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves the list of all BackendService resources, -// regional and global, available to the specified project. -func (r *BackendServicesService) AggregatedList(project string) *BackendServicesAggregatedListCall { - c := &BackendServicesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of Operation resources contained within the +// specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/list +func (r *GlobalOperationsService) List(project string) *GlobalOperationsListCall { + c := &GlobalOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -40792,7 +56630,7 @@ func (r *BackendServicesService) AggregatedList(project string) *BackendServices // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *BackendServicesAggregatedListCall) Filter(filter string) *BackendServicesAggregatedListCall { +func (c *GlobalOperationsListCall) Filter(filter string) *GlobalOperationsListCall { c.urlParams_.Set("filter", filter) return c } @@ -40803,7 +56641,7 @@ func (c *BackendServicesAggregatedListCall) Filter(filter string) *BackendServic // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *BackendServicesAggregatedListCall) MaxResults(maxResults int64) *BackendServicesAggregatedListCall { +func (c *GlobalOperationsListCall) MaxResults(maxResults int64) *GlobalOperationsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -40820,7 +56658,7 @@ func (c *BackendServicesAggregatedListCall) MaxResults(maxResults int64) *Backen // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *BackendServicesAggregatedListCall) OrderBy(orderBy string) *BackendServicesAggregatedListCall { +func (c *GlobalOperationsListCall) OrderBy(orderBy string) *GlobalOperationsListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -40828,7 +56666,7 @@ func (c *BackendServicesAggregatedListCall) OrderBy(orderBy string) *BackendServ // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *BackendServicesAggregatedListCall) PageToken(pageToken string) *BackendServicesAggregatedListCall { +func (c *GlobalOperationsListCall) PageToken(pageToken string) *GlobalOperationsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -40836,7 +56674,7 @@ func (c *BackendServicesAggregatedListCall) PageToken(pageToken string) *Backend // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesAggregatedListCall) Fields(s ...googleapi.Field) *BackendServicesAggregatedListCall { +func (c *GlobalOperationsListCall) Fields(s ...googleapi.Field) *GlobalOperationsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -40846,7 +56684,7 @@ func (c *BackendServicesAggregatedListCall) Fields(s ...googleapi.Field) *Backen // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *BackendServicesAggregatedListCall) IfNoneMatch(entityTag string) *BackendServicesAggregatedListCall { +func (c *GlobalOperationsListCall) IfNoneMatch(entityTag string) *GlobalOperationsListCall { c.ifNoneMatch_ = entityTag return c } @@ -40854,21 +56692,21 @@ func (c *BackendServicesAggregatedListCall) IfNoneMatch(entityTag string) *Backe // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesAggregatedListCall) Context(ctx context.Context) *BackendServicesAggregatedListCall { +func (c *GlobalOperationsListCall) Context(ctx context.Context) *GlobalOperationsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesAggregatedListCall) Header() http.Header { +func (c *GlobalOperationsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -40880,7 +56718,7 @@ func (c *BackendServicesAggregatedListCall) doRequest(alt string) (*http.Respons var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/backendServices") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -40893,14 +56731,14 @@ func (c *BackendServicesAggregatedListCall) doRequest(alt string) (*http.Respons return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.aggregatedList" call. -// Exactly one of *BackendServiceAggregatedList or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *BackendServiceAggregatedList.ServerResponse.Header or (if a -// response was returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.globalOperations.list" call. +// Exactly one of *OperationList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *OperationList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *BackendServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*BackendServiceAggregatedList, error) { +func (c *GlobalOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -40919,7 +56757,7 @@ func (c *BackendServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*B if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &BackendServiceAggregatedList{ + ret := &OperationList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -40931,9 +56769,9 @@ func (c *BackendServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*B } return ret, nil // { - // "description": "Retrieves the list of all BackendService resources, regional and global, available to the specified project.", + // "description": "Retrieves a list of Operation resources contained within the specified project.", // "httpMethod": "GET", - // "id": "compute.backendServices.aggregatedList", + // "id": "compute.globalOperations.list", // "parameterOrder": [ // "project" // ], @@ -40962,16 +56800,16 @@ func (c *BackendServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*B // "type": "string" // }, // "project": { - // "description": "Name of the project scoping this request.", + // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/aggregated/backendServices", + // "path": "{project}/global/operations", // "response": { - // "$ref": "BackendServiceAggregatedList" + // "$ref": "OperationList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -40985,7 +56823,7 @@ func (c *BackendServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*B // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *BackendServicesAggregatedListCall) Pages(ctx context.Context, f func(*BackendServiceAggregatedList) error) error { +func (c *GlobalOperationsListCall) Pages(ctx context.Context, f func(*OperationList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -41003,101 +56841,156 @@ func (c *BackendServicesAggregatedListCall) Pages(ctx context.Context, f func(*B } } -// method id "compute.backendServices.delete": +// method id "compute.healthChecks.aggregatedList": -type BackendServicesDeleteCall struct { - s *Service - project string - backendService string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HealthChecksAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified BackendService resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/delete -func (r *BackendServicesService) Delete(project string, backendService string) *BackendServicesDeleteCall { - c := &BackendServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves the list of all HealthCheck resources, +// regional and global, available to the specified project. +func (r *HealthChecksService) AggregatedList(project string) *HealthChecksAggregatedListCall { + c := &HealthChecksAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendService = backendService return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendServicesDeleteCall) RequestId(requestId string) *BackendServicesDeleteCall { - c.urlParams_.Set("requestId", requestId) +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *HealthChecksAggregatedListCall) Filter(filter string) *HealthChecksAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *HealthChecksAggregatedListCall) MaxResults(maxResults int64) *HealthChecksAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *HealthChecksAggregatedListCall) OrderBy(orderBy string) *HealthChecksAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *HealthChecksAggregatedListCall) PageToken(pageToken string) *HealthChecksAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesDeleteCall) Fields(s ...googleapi.Field) *BackendServicesDeleteCall { +func (c *HealthChecksAggregatedListCall) Fields(s ...googleapi.Field) *HealthChecksAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *HealthChecksAggregatedListCall) IfNoneMatch(entityTag string) *HealthChecksAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesDeleteCall) Context(ctx context.Context) *BackendServicesDeleteCall { +func (c *HealthChecksAggregatedListCall) Context(ctx context.Context) *HealthChecksAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesDeleteCall) Header() http.Header { +func (c *HealthChecksAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *HealthChecksAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/healthChecks") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendService": c.backendService, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *BackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.healthChecks.aggregatedList" call. +// Exactly one of *HealthChecksAggregatedList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *HealthChecksAggregatedList.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *HealthChecksAggregatedListCall) Do(opts ...googleapi.CallOption) (*HealthChecksAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -41116,7 +57009,7 @@ func (c *BackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &HealthChecksAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -41128,64 +57021,94 @@ func (c *BackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Deletes the specified BackendService resource.", - // "httpMethod": "DELETE", - // "id": "compute.backendServices.delete", + // "description": "Retrieves the list of all HealthCheck resources, regional and global, available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.healthChecks.aggregatedList", // "parameterOrder": [ - // "project", - // "backendService" + // "project" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { - // "description": "Project ID for this request.", + // "description": "Name of the project scoping this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/global/backendServices/{backendService}", + // "path": "{project}/aggregated/healthChecks", // "response": { - // "$ref": "Operation" + // "$ref": "HealthChecksAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.backendServices.deleteSignedUrlKey": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *HealthChecksAggregatedListCall) Pages(ctx context.Context, f func(*HealthChecksAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type BackendServicesDeleteSignedUrlKeyCall struct { - s *Service - project string - backendService string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.healthChecks.delete": + +type HealthChecksDeleteCall struct { + s *Service + project string + healthCheck string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// DeleteSignedUrlKey: Deletes a key for validating requests with signed -// URLs for this backend service. -func (r *BackendServicesService) DeleteSignedUrlKey(project string, backendService string, keyName string) *BackendServicesDeleteSignedUrlKeyCall { - c := &BackendServicesDeleteSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified HealthCheck resource. +func (r *HealthChecksService) Delete(project string, healthCheck string) *HealthChecksDeleteCall { + c := &HealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendService = backendService - c.urlParams_.Set("keyName", keyName) + c.healthCheck = healthCheck return c } @@ -41203,7 +57126,7 @@ func (r *BackendServicesService) DeleteSignedUrlKey(project string, backendServi // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendServicesDeleteSignedUrlKeyCall) RequestId(requestId string) *BackendServicesDeleteSignedUrlKeyCall { +func (c *HealthChecksDeleteCall) RequestId(requestId string) *HealthChecksDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -41211,7 +57134,7 @@ func (c *BackendServicesDeleteSignedUrlKeyCall) RequestId(requestId string) *Bac // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesDeleteSignedUrlKeyCall) Fields(s ...googleapi.Field) *BackendServicesDeleteSignedUrlKeyCall { +func (c *HealthChecksDeleteCall) Fields(s ...googleapi.Field) *HealthChecksDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -41219,21 +57142,21 @@ func (c *BackendServicesDeleteSignedUrlKeyCall) Fields(s ...googleapi.Field) *Ba // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesDeleteSignedUrlKeyCall) Context(ctx context.Context) *BackendServicesDeleteSignedUrlKeyCall { +func (c *HealthChecksDeleteCall) Context(ctx context.Context) *HealthChecksDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesDeleteSignedUrlKeyCall) Header() http.Header { +func (c *HealthChecksDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesDeleteSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { +func (c *HealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -41242,28 +57165,28 @@ func (c *BackendServicesDeleteSignedUrlKeyCall) doRequest(alt string) (*http.Res var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/deleteSignedUrlKey") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendService": c.backendService, + "project": c.project, + "healthCheck": c.healthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.deleteSignedUrlKey" call. +// Do executes the "compute.healthChecks.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendServicesDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -41294,24 +57217,18 @@ func (c *BackendServicesDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Deletes a key for validating requests with signed URLs for this backend service.", - // "httpMethod": "POST", - // "id": "compute.backendServices.deleteSignedUrlKey", + // "description": "Deletes the specified HealthCheck resource.", + // "httpMethod": "DELETE", + // "id": "compute.healthChecks.delete", // "parameterOrder": [ // "project", - // "backendService", - // "keyName" + // "healthCheck" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to which the Signed URL Key should be added. The name should conform to RFC1035.", + // "healthCheck": { + // "description": "Name of the HealthCheck resource to delete.", // "location": "path", - // "required": true, - // "type": "string" - // }, - // "keyName": { - // "description": "The name of the Signed URL Key to delete.", - // "location": "query", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -41328,7 +57245,7 @@ func (c *BackendServicesDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) // "type": "string" // } // }, - // "path": "{project}/global/backendServices/{backendService}/deleteSignedUrlKey", + // "path": "{project}/global/healthChecks/{healthCheck}", // "response": { // "$ref": "Operation" // }, @@ -41340,32 +57257,31 @@ func (c *BackendServicesDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) } -// method id "compute.backendServices.get": +// method id "compute.healthChecks.get": -type BackendServicesGetCall struct { - s *Service - project string - backendService string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type HealthChecksGetCall struct { + s *Service + project string + healthCheck string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified BackendService resource. Gets a list of -// available backend services. -// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/get -func (r *BackendServicesService) Get(project string, backendService string) *BackendServicesGetCall { - c := &BackendServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified HealthCheck resource. Gets a list of +// available health checks by making a list() request. +func (r *HealthChecksService) Get(project string, healthCheck string) *HealthChecksGetCall { + c := &HealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendService = backendService + c.healthCheck = healthCheck return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesGetCall) Fields(s ...googleapi.Field) *BackendServicesGetCall { +func (c *HealthChecksGetCall) Fields(s ...googleapi.Field) *HealthChecksGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -41375,7 +57291,7 @@ func (c *BackendServicesGetCall) Fields(s ...googleapi.Field) *BackendServicesGe // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *BackendServicesGetCall) IfNoneMatch(entityTag string) *BackendServicesGetCall { +func (c *HealthChecksGetCall) IfNoneMatch(entityTag string) *HealthChecksGetCall { c.ifNoneMatch_ = entityTag return c } @@ -41383,21 +57299,21 @@ func (c *BackendServicesGetCall) IfNoneMatch(entityTag string) *BackendServicesG // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesGetCall) Context(ctx context.Context) *BackendServicesGetCall { +func (c *HealthChecksGetCall) Context(ctx context.Context) *HealthChecksGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesGetCall) Header() http.Header { +func (c *HealthChecksGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *HealthChecksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -41409,7 +57325,7 @@ func (c *BackendServicesGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -41417,20 +57333,20 @@ func (c *BackendServicesGetCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendService": c.backendService, + "project": c.project, + "healthCheck": c.healthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.get" call. -// Exactly one of *BackendService or error will be non-nil. Any non-2xx +// Do executes the "compute.healthChecks.get" call. +// Exactly one of *HealthCheck or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *BackendService.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *BackendServicesGetCall) Do(opts ...googleapi.CallOption) (*BackendService, error) { +// *HealthCheck.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *HealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -41449,7 +57365,7 @@ func (c *BackendServicesGetCall) Do(opts ...googleapi.CallOption) (*BackendServi if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &BackendService{ + ret := &HealthCheck{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -41461,184 +57377,32 @@ func (c *BackendServicesGetCall) Do(opts ...googleapi.CallOption) (*BackendServi } return ret, nil // { - // "description": "Returns the specified BackendService resource. Gets a list of available backend services.", + // "description": "Returns the specified HealthCheck resource. Gets a list of available health checks by making a list() request.", // "httpMethod": "GET", - // "id": "compute.backendServices.get", - // "parameterOrder": [ - // "project", - // "backendService" - // ], - // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/backendServices/{backendService}", - // "response": { - // "$ref": "BackendService" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.backendServices.getHealth": - -type BackendServicesGetHealthCall struct { - s *Service - project string - backendService string - resourcegroupreference *ResourceGroupReference - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// GetHealth: Gets the most recent health check results for this -// BackendService. -// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/getHealth -func (r *BackendServicesService) GetHealth(project string, backendService string, resourcegroupreference *ResourceGroupReference) *BackendServicesGetHealthCall { - c := &BackendServicesGetHealthCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.backendService = backendService - c.resourcegroupreference = resourcegroupreference - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BackendServicesGetHealthCall) Fields(s ...googleapi.Field) *BackendServicesGetHealthCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BackendServicesGetHealthCall) Context(ctx context.Context) *BackendServicesGetHealthCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BackendServicesGetHealthCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BackendServicesGetHealthCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.resourcegroupreference) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/getHealth") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendService": c.backendService, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.backendServices.getHealth" call. -// Exactly one of *BackendServiceGroupHealth or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *BackendServiceGroupHealth.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *BackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (*BackendServiceGroupHealth, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &BackendServiceGroupHealth{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Gets the most recent health check results for this BackendService.", - // "httpMethod": "POST", - // "id": "compute.backendServices.getHealth", + // "id": "compute.healthChecks.get", // "parameterOrder": [ // "project", - // "backendService" + // "healthCheck" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to which the queried instance belongs.", + // "healthCheck": { + // "description": "Name of the HealthCheck resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, // "project": { + // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/backendServices/{backendService}/getHealth", - // "request": { - // "$ref": "ResourceGroupReference" - // }, + // "path": "{project}/global/healthChecks/{healthCheck}", // "response": { - // "$ref": "BackendServiceGroupHealth" + // "$ref": "HealthCheck" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -41649,26 +57413,23 @@ func (c *BackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (*Backen } -// method id "compute.backendServices.insert": +// method id "compute.healthChecks.insert": -type BackendServicesInsertCall struct { - s *Service - project string - backendservice *BackendService - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HealthChecksInsertCall struct { + s *Service + project string + healthcheck *HealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a BackendService resource in the specified project -// using the data included in the request. There are several -// restrictions and guidelines to keep in mind when creating a backend -// service. Read Restrictions and Guidelines for more information. -// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/insert -func (r *BackendServicesService) Insert(project string, backendservice *BackendService) *BackendServicesInsertCall { - c := &BackendServicesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a HealthCheck resource in the specified project using +// the data included in the request. +func (r *HealthChecksService) Insert(project string, healthcheck *HealthCheck) *HealthChecksInsertCall { + c := &HealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendservice = backendservice + c.healthcheck = healthcheck return c } @@ -41686,7 +57447,7 @@ func (r *BackendServicesService) Insert(project string, backendservice *BackendS // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendServicesInsertCall) RequestId(requestId string) *BackendServicesInsertCall { +func (c *HealthChecksInsertCall) RequestId(requestId string) *HealthChecksInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -41694,7 +57455,7 @@ func (c *BackendServicesInsertCall) RequestId(requestId string) *BackendServices // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesInsertCall) Fields(s ...googleapi.Field) *BackendServicesInsertCall { +func (c *HealthChecksInsertCall) Fields(s ...googleapi.Field) *HealthChecksInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -41702,35 +57463,35 @@ func (c *BackendServicesInsertCall) Fields(s ...googleapi.Field) *BackendService // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesInsertCall) Context(ctx context.Context) *BackendServicesInsertCall { +func (c *HealthChecksInsertCall) Context(ctx context.Context) *HealthChecksInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesInsertCall) Header() http.Header { +func (c *HealthChecksInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *HealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -41743,14 +57504,14 @@ func (c *BackendServicesInsertCall) doRequest(alt string) (*http.Response, error return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.insert" call. +// Do executes the "compute.healthChecks.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -41781,9 +57542,9 @@ func (c *BackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Creates a BackendService resource in the specified project using the data included in the request. There are several restrictions and guidelines to keep in mind when creating a backend service. Read Restrictions and Guidelines for more information.", + // "description": "Creates a HealthCheck resource in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.backendServices.insert", + // "id": "compute.healthChecks.insert", // "parameterOrder": [ // "project" // ], @@ -41801,9 +57562,9 @@ func (c *BackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation // "type": "string" // } // }, - // "path": "{project}/global/backendServices", + // "path": "{project}/global/healthChecks", // "request": { - // "$ref": "BackendService" + // "$ref": "HealthCheck" // }, // "response": { // "$ref": "Operation" @@ -41816,9 +57577,9 @@ func (c *BackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.backendServices.list": +// method id "compute.healthChecks.list": -type BackendServicesListCall struct { +type HealthChecksListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -41827,11 +57588,10 @@ type BackendServicesListCall struct { header_ http.Header } -// List: Retrieves the list of BackendService resources available to the +// List: Retrieves the list of HealthCheck resources available to the // specified project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/list -func (r *BackendServicesService) List(project string) *BackendServicesListCall { - c := &BackendServicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *HealthChecksService) List(project string) *HealthChecksListCall { + c := &HealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -41858,7 +57618,7 @@ func (r *BackendServicesService) List(project string) *BackendServicesListCall { // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *BackendServicesListCall) Filter(filter string) *BackendServicesListCall { +func (c *HealthChecksListCall) Filter(filter string) *HealthChecksListCall { c.urlParams_.Set("filter", filter) return c } @@ -41869,7 +57629,7 @@ func (c *BackendServicesListCall) Filter(filter string) *BackendServicesListCall // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *BackendServicesListCall) MaxResults(maxResults int64) *BackendServicesListCall { +func (c *HealthChecksListCall) MaxResults(maxResults int64) *HealthChecksListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -41886,7 +57646,7 @@ func (c *BackendServicesListCall) MaxResults(maxResults int64) *BackendServicesL // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *BackendServicesListCall) OrderBy(orderBy string) *BackendServicesListCall { +func (c *HealthChecksListCall) OrderBy(orderBy string) *HealthChecksListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -41894,7 +57654,7 @@ func (c *BackendServicesListCall) OrderBy(orderBy string) *BackendServicesListCa // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *BackendServicesListCall) PageToken(pageToken string) *BackendServicesListCall { +func (c *HealthChecksListCall) PageToken(pageToken string) *HealthChecksListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -41902,7 +57662,7 @@ func (c *BackendServicesListCall) PageToken(pageToken string) *BackendServicesLi // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesListCall) Fields(s ...googleapi.Field) *BackendServicesListCall { +func (c *HealthChecksListCall) Fields(s ...googleapi.Field) *HealthChecksListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -41912,7 +57672,7 @@ func (c *BackendServicesListCall) Fields(s ...googleapi.Field) *BackendServicesL // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *BackendServicesListCall) IfNoneMatch(entityTag string) *BackendServicesListCall { +func (c *HealthChecksListCall) IfNoneMatch(entityTag string) *HealthChecksListCall { c.ifNoneMatch_ = entityTag return c } @@ -41920,21 +57680,21 @@ func (c *BackendServicesListCall) IfNoneMatch(entityTag string) *BackendServices // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesListCall) Context(ctx context.Context) *BackendServicesListCall { +func (c *HealthChecksListCall) Context(ctx context.Context) *HealthChecksListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesListCall) Header() http.Header { +func (c *HealthChecksListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesListCall) doRequest(alt string) (*http.Response, error) { +func (c *HealthChecksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -41946,7 +57706,7 @@ func (c *BackendServicesListCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -41959,14 +57719,14 @@ func (c *BackendServicesListCall) doRequest(alt string) (*http.Response, error) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.list" call. -// Exactly one of *BackendServiceList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *BackendServiceList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.healthChecks.list" call. +// Exactly one of *HealthCheckList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *HealthCheckList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServiceList, error) { +func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -41985,7 +57745,7 @@ func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServ if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &BackendServiceList{ + ret := &HealthCheckList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -41997,9 +57757,9 @@ func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServ } return ret, nil // { - // "description": "Retrieves the list of BackendService resources available to the specified project.", + // "description": "Retrieves the list of HealthCheck resources available to the specified project.", // "httpMethod": "GET", - // "id": "compute.backendServices.list", + // "id": "compute.healthChecks.list", // "parameterOrder": [ // "project" // ], @@ -42035,9 +57795,9 @@ func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServ // "type": "string" // } // }, - // "path": "{project}/global/backendServices", + // "path": "{project}/global/healthChecks", // "response": { - // "$ref": "BackendServiceList" + // "$ref": "HealthCheckList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -42051,7 +57811,7 @@ func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServ // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *BackendServicesListCall) Pages(ctx context.Context, f func(*BackendServiceList) error) error { +func (c *HealthChecksListCall) Pages(ctx context.Context, f func(*HealthCheckList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -42069,30 +57829,26 @@ func (c *BackendServicesListCall) Pages(ctx context.Context, f func(*BackendServ } } -// method id "compute.backendServices.patch": +// method id "compute.healthChecks.patch": -type BackendServicesPatchCall struct { - s *Service - project string - backendService string - backendservice *BackendService - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HealthChecksPatchCall struct { + s *Service + project string + healthCheck string + healthcheck *HealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Patches the specified BackendService resource with the data -// included in the request. There are several restrictions and -// guidelines to keep in mind when updating a backend service. Read -// Restrictions and Guidelines for more information. This method -// supports PATCH semantics and uses the JSON merge patch format and -// processing rules. -// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/patch -func (r *BackendServicesService) Patch(project string, backendService string, backendservice *BackendService) *BackendServicesPatchCall { - c := &BackendServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates a HealthCheck resource in the specified project using +// the data included in the request. This method supports PATCH +// semantics and uses the JSON merge patch format and processing rules. +func (r *HealthChecksService) Patch(project string, healthCheck string, healthcheck *HealthCheck) *HealthChecksPatchCall { + c := &HealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendService = backendService - c.backendservice = backendservice + c.healthCheck = healthCheck + c.healthcheck = healthcheck return c } @@ -42110,7 +57866,7 @@ func (r *BackendServicesService) Patch(project string, backendService string, ba // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendServicesPatchCall) RequestId(requestId string) *BackendServicesPatchCall { +func (c *HealthChecksPatchCall) RequestId(requestId string) *HealthChecksPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -42118,7 +57874,7 @@ func (c *BackendServicesPatchCall) RequestId(requestId string) *BackendServicesP // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesPatchCall) Fields(s ...googleapi.Field) *BackendServicesPatchCall { +func (c *HealthChecksPatchCall) Fields(s ...googleapi.Field) *HealthChecksPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -42126,35 +57882,35 @@ func (c *BackendServicesPatchCall) Fields(s ...googleapi.Field) *BackendServices // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesPatchCall) Context(ctx context.Context) *BackendServicesPatchCall { +func (c *HealthChecksPatchCall) Context(ctx context.Context) *HealthChecksPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesPatchCall) Header() http.Header { +func (c *HealthChecksPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *HealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("PATCH", urls, body) if err != nil { @@ -42162,20 +57918,347 @@ func (c *BackendServicesPatchCall) doRequest(alt string) (*http.Response, error) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendService": c.backendService, + "project": c.project, + "healthCheck": c.healthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.patch" call. +// Do executes the "compute.healthChecks.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a HealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.healthChecks.patch", + // "parameterOrder": [ + // "project", + // "healthCheck" + // ], + // "parameters": { + // "healthCheck": { + // "description": "Name of the HealthCheck resource to patch.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/healthChecks/{healthCheck}", + // "request": { + // "$ref": "HealthCheck" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.healthChecks.testIamPermissions": + +type HealthChecksTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *HealthChecksService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *HealthChecksTestIamPermissionsCall { + c := &HealthChecksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *HealthChecksTestIamPermissionsCall) Fields(s ...googleapi.Field) *HealthChecksTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *HealthChecksTestIamPermissionsCall) Context(ctx context.Context) *HealthChecksTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *HealthChecksTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *HealthChecksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{resource}/testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.healthChecks.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *HealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TestPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.healthChecks.testIamPermissions", + // "parameterOrder": [ + // "project", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/healthChecks/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, + // "response": { + // "$ref": "TestPermissionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.healthChecks.update": + +type HealthChecksUpdateCall struct { + s *Service + project string + healthCheck string + healthcheck *HealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates a HealthCheck resource in the specified project using +// the data included in the request. +func (r *HealthChecksService) Update(project string, healthCheck string, healthcheck *HealthCheck) *HealthChecksUpdateCall { + c := &HealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.healthCheck = healthCheck + c.healthcheck = healthcheck + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *HealthChecksUpdateCall) RequestId(requestId string) *HealthChecksUpdateCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *HealthChecksUpdateCall) Fields(s ...googleapi.Field) *HealthChecksUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *HealthChecksUpdateCall) Context(ctx context.Context) *HealthChecksUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *HealthChecksUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *HealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PUT", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "healthCheck": c.healthCheck, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.healthChecks.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *HealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -42206,16 +58289,16 @@ func (c *BackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Patches the specified BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.backendServices.patch", + // "description": "Updates a HealthCheck resource in the specified project using the data included in the request.", + // "httpMethod": "PUT", + // "id": "compute.healthChecks.update", // "parameterOrder": [ // "project", - // "backendService" + // "healthCheck" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to patch.", + // "healthCheck": { + // "description": "Name of the HealthCheck resource to update.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -42234,9 +58317,9 @@ func (c *BackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // } // }, - // "path": "{project}/global/backendServices/{backendService}", + // "path": "{project}/global/healthChecks/{healthCheck}", // "request": { - // "$ref": "BackendService" + // "$ref": "HealthCheck" // }, // "response": { // "$ref": "Operation" @@ -42249,25 +58332,23 @@ func (c *BackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.backendServices.setSecurityPolicy": +// method id "compute.httpHealthChecks.delete": -type BackendServicesSetSecurityPolicyCall struct { - s *Service - project string - backendService string - securitypolicyreference *SecurityPolicyReference - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HttpHealthChecksDeleteCall struct { + s *Service + project string + httpHealthCheck string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetSecurityPolicy: Sets the security policy for the specified backend -// service. -func (r *BackendServicesService) SetSecurityPolicy(project string, backendService string, securitypolicyreference *SecurityPolicyReference) *BackendServicesSetSecurityPolicyCall { - c := &BackendServicesSetSecurityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified HttpHealthCheck resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/delete +func (r *HttpHealthChecksService) Delete(project string, httpHealthCheck string) *HttpHealthChecksDeleteCall { + c := &HttpHealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendService = backendService - c.securitypolicyreference = securitypolicyreference + c.httpHealthCheck = httpHealthCheck return c } @@ -42285,7 +58366,7 @@ func (r *BackendServicesService) SetSecurityPolicy(project string, backendServic // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendServicesSetSecurityPolicyCall) RequestId(requestId string) *BackendServicesSetSecurityPolicyCall { +func (c *HttpHealthChecksDeleteCall) RequestId(requestId string) *HttpHealthChecksDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -42293,7 +58374,7 @@ func (c *BackendServicesSetSecurityPolicyCall) RequestId(requestId string) *Back // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesSetSecurityPolicyCall) Fields(s ...googleapi.Field) *BackendServicesSetSecurityPolicyCall { +func (c *HttpHealthChecksDeleteCall) Fields(s ...googleapi.Field) *HttpHealthChecksDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -42301,56 +58382,51 @@ func (c *BackendServicesSetSecurityPolicyCall) Fields(s ...googleapi.Field) *Bac // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesSetSecurityPolicyCall) Context(ctx context.Context) *BackendServicesSetSecurityPolicyCall { +func (c *HttpHealthChecksDeleteCall) Context(ctx context.Context) *HttpHealthChecksDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesSetSecurityPolicyCall) Header() http.Header { +func (c *HttpHealthChecksDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesSetSecurityPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicyreference) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/setSecurityPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendService": c.backendService, + "project": c.project, + "httpHealthCheck": c.httpHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.setSecurityPolicy" call. +// Do executes the "compute.httpHealthChecks.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendServicesSetSecurityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HttpHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -42381,17 +58457,18 @@ func (c *BackendServicesSetSecurityPolicyCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Sets the security policy for the specified backend service.", - // "httpMethod": "POST", - // "id": "compute.backendServices.setSecurityPolicy", + // "description": "Deletes the specified HttpHealthCheck resource.", + // "httpMethod": "DELETE", + // "id": "compute.httpHealthChecks.delete", // "parameterOrder": [ // "project", - // "backendService" + // "httpHealthCheck" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to which the security policy should be set. The name should conform to RFC1035.", + // "httpHealthCheck": { + // "description": "Name of the HttpHealthCheck resource to delete.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -42408,10 +58485,7 @@ func (c *BackendServicesSetSecurityPolicyCall) Do(opts ...googleapi.CallOption) // "type": "string" // } // }, - // "path": "{project}/global/backendServices/{backendService}/setSecurityPolicy", - // "request": { - // "$ref": "SecurityPolicyReference" - // }, + // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", // "response": { // "$ref": "Operation" // }, @@ -42423,89 +58497,97 @@ func (c *BackendServicesSetSecurityPolicyCall) Do(opts ...googleapi.CallOption) } -// method id "compute.backendServices.testIamPermissions": +// method id "compute.httpHealthChecks.get": -type BackendServicesTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HttpHealthChecksGetCall struct { + s *Service + project string + httpHealthCheck string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *BackendServicesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *BackendServicesTestIamPermissionsCall { - c := &BackendServicesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified HttpHealthCheck resource. Gets a list of +// available HTTP health checks by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/get +func (r *HttpHealthChecksService) Get(project string, httpHealthCheck string) *HttpHealthChecksGetCall { + c := &HttpHealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.httpHealthCheck = httpHealthCheck return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesTestIamPermissionsCall) Fields(s ...googleapi.Field) *BackendServicesTestIamPermissionsCall { +func (c *HttpHealthChecksGetCall) Fields(s ...googleapi.Field) *HttpHealthChecksGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *HttpHealthChecksGetCall) IfNoneMatch(entityTag string) *HttpHealthChecksGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesTestIamPermissionsCall) Context(ctx context.Context) *BackendServicesTestIamPermissionsCall { +func (c *HttpHealthChecksGetCall) Context(ctx context.Context) *HttpHealthChecksGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesTestIamPermissionsCall) Header() http.Header { +func (c *HttpHealthChecksGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "httpHealthCheck": c.httpHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.httpHealthChecks.get" call. +// Exactly one of *HttpHealthCheck or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *HttpHealthCheck.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *BackendServicesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *HttpHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpHealthCheck, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -42524,7 +58606,7 @@ func (c *BackendServicesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &HttpHealthCheck{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -42536,35 +58618,32 @@ func (c *BackendServicesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.backendServices.testIamPermissions", + // "description": "Returns the specified HttpHealthCheck resource. Gets a list of available HTTP health checks by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.httpHealthChecks.get", // "parameterOrder": [ // "project", - // "resource" + // "httpHealthCheck" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "httpHealthCheck": { + // "description": "Name of the HttpHealthCheck resource to return.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/backendServices/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "HttpHealthCheck" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -42575,28 +58654,24 @@ func (c *BackendServicesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) } -// method id "compute.backendServices.update": +// method id "compute.httpHealthChecks.insert": -type BackendServicesUpdateCall struct { - s *Service - project string - backendService string - backendservice *BackendService - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HttpHealthChecksInsertCall struct { + s *Service + project string + httphealthcheck *HttpHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Update: Updates the specified BackendService resource with the data -// included in the request. There are several restrictions and -// guidelines to keep in mind when updating a backend service. Read -// Restrictions and Guidelines for more information. -// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/update -func (r *BackendServicesService) Update(project string, backendService string, backendservice *BackendService) *BackendServicesUpdateCall { - c := &BackendServicesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a HttpHealthCheck resource in the specified project +// using the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/insert +func (r *HttpHealthChecksService) Insert(project string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksInsertCall { + c := &HttpHealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendService = backendService - c.backendservice = backendservice + c.httphealthcheck = httphealthcheck return c } @@ -42614,7 +58689,7 @@ func (r *BackendServicesService) Update(project string, backendService string, b // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendServicesUpdateCall) RequestId(requestId string) *BackendServicesUpdateCall { +func (c *HttpHealthChecksInsertCall) RequestId(requestId string) *HttpHealthChecksInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -42622,7 +58697,7 @@ func (c *BackendServicesUpdateCall) RequestId(requestId string) *BackendServices // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesUpdateCall) Fields(s ...googleapi.Field) *BackendServicesUpdateCall { +func (c *HttpHealthChecksInsertCall) Fields(s ...googleapi.Field) *HttpHealthChecksInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -42630,56 +58705,55 @@ func (c *BackendServicesUpdateCall) Fields(s ...googleapi.Field) *BackendService // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesUpdateCall) Context(ctx context.Context) *BackendServicesUpdateCall { +func (c *HttpHealthChecksInsertCall) Context(ctx context.Context) *HttpHealthChecksInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesUpdateCall) Header() http.Header { +func (c *HttpHealthChecksInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendService": c.backendService, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.update" call. +// Do executes the "compute.httpHealthChecks.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HttpHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -42710,21 +58784,13 @@ func (c *BackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Updates the specified BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information.", - // "httpMethod": "PUT", - // "id": "compute.backendServices.update", + // "description": "Creates a HttpHealthCheck resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.httpHealthChecks.insert", // "parameterOrder": [ - // "project", - // "backendService" + // "project" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to update.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -42738,9 +58804,9 @@ func (c *BackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation // "type": "string" // } // }, - // "path": "{project}/global/backendServices/{backendService}", + // "path": "{project}/global/httpHealthChecks", // "request": { - // "$ref": "BackendService" + // "$ref": "HttpHealthCheck" // }, // "response": { // "$ref": "Operation" @@ -42753,9 +58819,9 @@ func (c *BackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.diskTypes.aggregatedList": +// method id "compute.httpHealthChecks.list": -type DiskTypesAggregatedListCall struct { +type HttpHealthChecksListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -42764,10 +58830,11 @@ type DiskTypesAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves an aggregated list of disk types. -// For details, see https://cloud.google.com/compute/docs/reference/latest/diskTypes/aggregatedList -func (r *DiskTypesService) AggregatedList(project string) *DiskTypesAggregatedListCall { - c := &DiskTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of HttpHealthCheck resources available to +// the specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/list +func (r *HttpHealthChecksService) List(project string) *HttpHealthChecksListCall { + c := &HttpHealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -42794,7 +58861,7 @@ func (r *DiskTypesService) AggregatedList(project string) *DiskTypesAggregatedLi // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *DiskTypesAggregatedListCall) Filter(filter string) *DiskTypesAggregatedListCall { +func (c *HttpHealthChecksListCall) Filter(filter string) *HttpHealthChecksListCall { c.urlParams_.Set("filter", filter) return c } @@ -42805,7 +58872,7 @@ func (c *DiskTypesAggregatedListCall) Filter(filter string) *DiskTypesAggregated // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *DiskTypesAggregatedListCall) MaxResults(maxResults int64) *DiskTypesAggregatedListCall { +func (c *HttpHealthChecksListCall) MaxResults(maxResults int64) *HttpHealthChecksListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -42822,7 +58889,7 @@ func (c *DiskTypesAggregatedListCall) MaxResults(maxResults int64) *DiskTypesAgg // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *DiskTypesAggregatedListCall) OrderBy(orderBy string) *DiskTypesAggregatedListCall { +func (c *HttpHealthChecksListCall) OrderBy(orderBy string) *HttpHealthChecksListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -42830,7 +58897,7 @@ func (c *DiskTypesAggregatedListCall) OrderBy(orderBy string) *DiskTypesAggregat // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *DiskTypesAggregatedListCall) PageToken(pageToken string) *DiskTypesAggregatedListCall { +func (c *HttpHealthChecksListCall) PageToken(pageToken string) *HttpHealthChecksListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -42838,7 +58905,7 @@ func (c *DiskTypesAggregatedListCall) PageToken(pageToken string) *DiskTypesAggr // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DiskTypesAggregatedListCall) Fields(s ...googleapi.Field) *DiskTypesAggregatedListCall { +func (c *HttpHealthChecksListCall) Fields(s ...googleapi.Field) *HttpHealthChecksListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -42848,7 +58915,7 @@ func (c *DiskTypesAggregatedListCall) Fields(s ...googleapi.Field) *DiskTypesAgg // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *DiskTypesAggregatedListCall) IfNoneMatch(entityTag string) *DiskTypesAggregatedListCall { +func (c *HttpHealthChecksListCall) IfNoneMatch(entityTag string) *HttpHealthChecksListCall { c.ifNoneMatch_ = entityTag return c } @@ -42856,21 +58923,21 @@ func (c *DiskTypesAggregatedListCall) IfNoneMatch(entityTag string) *DiskTypesAg // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DiskTypesAggregatedListCall) Context(ctx context.Context) *DiskTypesAggregatedListCall { +func (c *HttpHealthChecksListCall) Context(ctx context.Context) *HttpHealthChecksListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DiskTypesAggregatedListCall) Header() http.Header { +func (c *HttpHealthChecksListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DiskTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpHealthChecksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -42882,7 +58949,7 @@ func (c *DiskTypesAggregatedListCall) doRequest(alt string) (*http.Response, err var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/diskTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -42895,14 +58962,14 @@ func (c *DiskTypesAggregatedListCall) doRequest(alt string) (*http.Response, err return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.diskTypes.aggregatedList" call. -// Exactly one of *DiskTypeAggregatedList or error will be non-nil. Any +// Do executes the "compute.httpHealthChecks.list" call. +// Exactly one of *HttpHealthCheckList or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *DiskTypeAggregatedList.ServerResponse.Header or (if a response was +// *HttpHealthCheckList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTypeAggregatedList, error) { +func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealthCheckList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -42921,7 +58988,7 @@ func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTyp if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &DiskTypeAggregatedList{ + ret := &HttpHealthCheckList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -42933,9 +59000,9 @@ func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTyp } return ret, nil // { - // "description": "Retrieves an aggregated list of disk types.", + // "description": "Retrieves the list of HttpHealthCheck resources available to the specified project.", // "httpMethod": "GET", - // "id": "compute.diskTypes.aggregatedList", + // "id": "compute.httpHealthChecks.list", // "parameterOrder": [ // "project" // ], @@ -42971,9 +59038,9 @@ func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTyp // "type": "string" // } // }, - // "path": "{project}/aggregated/diskTypes", + // "path": "{project}/global/httpHealthChecks", // "response": { - // "$ref": "DiskTypeAggregatedList" + // "$ref": "HttpHealthCheckList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -42987,7 +59054,7 @@ func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTyp // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *DiskTypesAggregatedListCall) Pages(ctx context.Context, f func(*DiskTypeAggregatedList) error) error { +func (c *HttpHealthChecksListCall) Pages(ctx context.Context, f func(*HttpHealthCheckList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -43005,100 +59072,110 @@ func (c *DiskTypesAggregatedListCall) Pages(ctx context.Context, f func(*DiskTyp } } -// method id "compute.diskTypes.get": +// method id "compute.httpHealthChecks.patch": -type DiskTypesGetCall struct { - s *Service - project string - zone string - diskType string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type HttpHealthChecksPatchCall struct { + s *Service + project string + httpHealthCheck string + httphealthcheck *HttpHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified disk type. Gets a list of available disk -// types by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/diskTypes/get -func (r *DiskTypesService) Get(project string, zone string, diskType string) *DiskTypesGetCall { - c := &DiskTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates a HttpHealthCheck resource in the specified project +// using the data included in the request. This method supports PATCH +// semantics and uses the JSON merge patch format and processing rules. +// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/patch +func (r *HttpHealthChecksService) Patch(project string, httpHealthCheck string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksPatchCall { + c := &HttpHealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.diskType = diskType + c.httpHealthCheck = httpHealthCheck + c.httphealthcheck = httphealthcheck + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *HttpHealthChecksPatchCall) RequestId(requestId string) *HttpHealthChecksPatchCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DiskTypesGetCall) Fields(s ...googleapi.Field) *DiskTypesGetCall { +func (c *HttpHealthChecksPatchCall) Fields(s ...googleapi.Field) *HttpHealthChecksPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *DiskTypesGetCall) IfNoneMatch(entityTag string) *DiskTypesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DiskTypesGetCall) Context(ctx context.Context) *DiskTypesGetCall { +func (c *HttpHealthChecksPatchCall) Context(ctx context.Context) *HttpHealthChecksPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DiskTypesGetCall) Header() http.Header { +func (c *HttpHealthChecksPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DiskTypesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/diskTypes/{diskType}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "diskType": c.diskType, + "project": c.project, + "httpHealthCheck": c.httpHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.diskTypes.get" call. -// Exactly one of *DiskType or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *DiskType.ServerResponse.Header or (if a response was returned at +// Do executes the "compute.httpHealthChecks.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *DiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { +func (c *HttpHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -43117,7 +59194,7 @@ func (c *DiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &DiskType{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -43129,19 +59206,18 @@ func (c *DiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { } return ret, nil // { - // "description": "Returns the specified disk type. Gets a list of available disk types by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.diskTypes.get", + // "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.httpHealthChecks.patch", // "parameterOrder": [ // "project", - // "zone", - // "diskType" + // "httpHealthCheck" // ], // "parameters": { - // "diskType": { - // "description": "Name of the disk type to return.", + // "httpHealthCheck": { + // "description": "Name of the HttpHealthCheck resource to patch.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -43152,181 +59228,110 @@ func (c *DiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/diskTypes/{diskType}", + // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + // "request": { + // "$ref": "HttpHealthCheck" + // }, // "response": { - // "$ref": "DiskType" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.diskTypes.list": +// method id "compute.httpHealthChecks.testIamPermissions": -type DiskTypesListCall struct { - s *Service - project string - zone string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type HttpHealthChecksTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves a list of disk types available to the specified -// project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/diskTypes/list -func (r *DiskTypesService) List(project string, zone string) *DiskTypesListCall { - c := &DiskTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *HttpHealthChecksService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *HttpHealthChecksTestIamPermissionsCall { + c := &HttpHealthChecksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *DiskTypesListCall) Filter(filter string) *DiskTypesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *DiskTypesListCall) MaxResults(maxResults int64) *DiskTypesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *DiskTypesListCall) OrderBy(orderBy string) *DiskTypesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *DiskTypesListCall) PageToken(pageToken string) *DiskTypesListCall { - c.urlParams_.Set("pageToken", pageToken) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DiskTypesListCall) Fields(s ...googleapi.Field) *DiskTypesListCall { +func (c *HttpHealthChecksTestIamPermissionsCall) Fields(s ...googleapi.Field) *HttpHealthChecksTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *DiskTypesListCall) IfNoneMatch(entityTag string) *DiskTypesListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DiskTypesListCall) Context(ctx context.Context) *DiskTypesListCall { +func (c *HttpHealthChecksTestIamPermissionsCall) Context(ctx context.Context) *HttpHealthChecksTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DiskTypesListCall) Header() http.Header { +func (c *HttpHealthChecksTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DiskTypesListCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpHealthChecksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/diskTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.diskTypes.list" call. -// Exactly one of *DiskTypeList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *DiskTypeList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, error) { +// Do executes the "compute.httpHealthChecks.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *HttpHealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -43345,7 +59350,7 @@ func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, err if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &DiskTypeList{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -43357,37 +59362,14 @@ func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, err } return ret, nil // { - // "description": "Retrieves a list of disk types available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.diskTypes.list", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.httpHealthChecks.testIamPermissions", // "parameterOrder": [ // "project", - // "zone" + // "resource" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -43395,17 +59377,20 @@ func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, err // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/diskTypes", + // "path": "{project}/global/httpHealthChecks/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "DiskTypeList" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -43416,49 +59401,26 @@ func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, err } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *DiskTypesListCall) Pages(ctx context.Context, f func(*DiskTypeList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.disks.addResourcePolicies": +// method id "compute.httpHealthChecks.update": -type DisksAddResourcePoliciesCall struct { - s *Service - project string - zone string - disk string - disksaddresourcepoliciesrequest *DisksAddResourcePoliciesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HttpHealthChecksUpdateCall struct { + s *Service + project string + httpHealthCheck string + httphealthcheck *HttpHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AddResourcePolicies: Adds existing resource policies to a disk. You -// can only add one policy which will be applied to this disk for -// scheduling snapshot creation. -func (r *DisksService) AddResourcePolicies(project string, zone string, disk string, disksaddresourcepoliciesrequest *DisksAddResourcePoliciesRequest) *DisksAddResourcePoliciesCall { - c := &DisksAddResourcePoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates a HttpHealthCheck resource in the specified project +// using the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/update +func (r *HttpHealthChecksService) Update(project string, httpHealthCheck string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksUpdateCall { + c := &HttpHealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.disk = disk - c.disksaddresourcepoliciesrequest = disksaddresourcepoliciesrequest + c.httpHealthCheck = httpHealthCheck + c.httphealthcheck = httphealthcheck return c } @@ -43476,7 +59438,7 @@ func (r *DisksService) AddResourcePolicies(project string, zone string, disk str // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *DisksAddResourcePoliciesCall) RequestId(requestId string) *DisksAddResourcePoliciesCall { +func (c *HttpHealthChecksUpdateCall) RequestId(requestId string) *HttpHealthChecksUpdateCall { c.urlParams_.Set("requestId", requestId) return c } @@ -43484,7 +59446,7 @@ func (c *DisksAddResourcePoliciesCall) RequestId(requestId string) *DisksAddReso // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksAddResourcePoliciesCall) Fields(s ...googleapi.Field) *DisksAddResourcePoliciesCall { +func (c *HttpHealthChecksUpdateCall) Fields(s ...googleapi.Field) *HttpHealthChecksUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -43492,57 +59454,56 @@ func (c *DisksAddResourcePoliciesCall) Fields(s ...googleapi.Field) *DisksAddRes // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksAddResourcePoliciesCall) Context(ctx context.Context) *DisksAddResourcePoliciesCall { +func (c *HttpHealthChecksUpdateCall) Context(ctx context.Context) *HttpHealthChecksUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksAddResourcePoliciesCall) Header() http.Header { +func (c *HttpHealthChecksUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksAddResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.disksaddresourcepoliciesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/addResourcePolicies") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "disk": c.disk, + "project": c.project, + "httpHealthCheck": c.httpHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.addResourcePolicies" call. +// Do executes the "compute.httpHealthChecks.update" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *DisksAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HttpHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -43573,17 +59534,16 @@ func (c *DisksAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operat } return ret, nil // { - // "description": "Adds existing resource policies to a disk. You can only add one policy which will be applied to this disk for scheduling snapshot creation.", - // "httpMethod": "POST", - // "id": "compute.disks.addResourcePolicies", + // "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request.", + // "httpMethod": "PUT", + // "id": "compute.httpHealthChecks.update", // "parameterOrder": [ // "project", - // "zone", - // "disk" + // "httpHealthCheck" // ], // "parameters": { - // "disk": { - // "description": "The disk name for this request.", + // "httpHealthCheck": { + // "description": "Name of the HttpHealthCheck resource to update.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -43600,18 +59560,11 @@ func (c *DisksAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operat // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks/{disk}/addResourcePolicies", + // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", // "request": { - // "$ref": "DisksAddResourcePoliciesRequest" + // "$ref": "HttpHealthCheck" // }, // "response": { // "$ref": "Operation" @@ -43624,156 +59577,100 @@ func (c *DisksAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operat } -// method id "compute.disks.aggregatedList": - -type DisksAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// AggregatedList: Retrieves an aggregated list of persistent disks. -// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/aggregatedList -func (r *DisksService) AggregatedList(project string) *DisksAggregatedListCall { - c := &DisksAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *DisksAggregatedListCall) Filter(filter string) *DisksAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c +// method id "compute.httpsHealthChecks.delete": + +type HttpsHealthChecksDeleteCall struct { + s *Service + project string + httpsHealthCheck string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *DisksAggregatedListCall) MaxResults(maxResults int64) *DisksAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) +// Delete: Deletes the specified HttpsHealthCheck resource. +func (r *HttpsHealthChecksService) Delete(project string, httpsHealthCheck string) *HttpsHealthChecksDeleteCall { + c := &HttpsHealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.httpsHealthCheck = httpsHealthCheck return c } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *DisksAggregatedListCall) OrderBy(orderBy string) *DisksAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *DisksAggregatedListCall) PageToken(pageToken string) *DisksAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *HttpsHealthChecksDeleteCall) RequestId(requestId string) *HttpsHealthChecksDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksAggregatedListCall) Fields(s ...googleapi.Field) *DisksAggregatedListCall { +func (c *HttpsHealthChecksDeleteCall) Fields(s ...googleapi.Field) *HttpsHealthChecksDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *DisksAggregatedListCall) IfNoneMatch(entityTag string) *DisksAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksAggregatedListCall) Context(ctx context.Context) *DisksAggregatedListCall { +func (c *HttpsHealthChecksDeleteCall) Context(ctx context.Context) *HttpsHealthChecksDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksAggregatedListCall) Header() http.Header { +func (c *HttpsHealthChecksDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpsHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/disks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "httpsHealthCheck": c.httpsHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.aggregatedList" call. -// Exactly one of *DiskAggregatedList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *DiskAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggregatedList, error) { +// Do executes the "compute.httpsHealthChecks.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *HttpsHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -43792,7 +59689,7 @@ func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggrega if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &DiskAggregatedList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -43804,34 +59701,19 @@ func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggrega } return ret, nil // { - // "description": "Retrieves an aggregated list of persistent disks.", - // "httpMethod": "GET", - // "id": "compute.disks.aggregatedList", + // "description": "Deletes the specified HttpsHealthCheck resource.", + // "httpMethod": "DELETE", + // "id": "compute.httpsHealthChecks.delete", // "parameterOrder": [ - // "project" + // "project", + // "httpsHealthCheck" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "httpsHealthCheck": { + // "description": "Name of the HttpsHealthCheck resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "project": { @@ -43840,153 +59722,115 @@ func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggrega // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/aggregated/disks", + // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", // "response": { - // "$ref": "DiskAggregatedList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *DisksAggregatedListCall) Pages(ctx context.Context, f func(*DiskAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.disks.createSnapshot": +// method id "compute.httpsHealthChecks.get": -type DisksCreateSnapshotCall struct { - s *Service - project string - zone string - disk string - snapshot *Snapshot - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HttpsHealthChecksGetCall struct { + s *Service + project string + httpsHealthCheck string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// CreateSnapshot: Creates a snapshot of a specified persistent disk. -// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/createSnapshot -func (r *DisksService) CreateSnapshot(project string, zone string, disk string, snapshot *Snapshot) *DisksCreateSnapshotCall { - c := &DisksCreateSnapshotCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified HttpsHealthCheck resource. Gets a list of +// available HTTPS health checks by making a list() request. +func (r *HttpsHealthChecksService) Get(project string, httpsHealthCheck string) *HttpsHealthChecksGetCall { + c := &HttpsHealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.disk = disk - c.snapshot = snapshot - return c -} - -// GuestFlush sets the optional parameter "guestFlush": -func (c *DisksCreateSnapshotCall) GuestFlush(guestFlush bool) *DisksCreateSnapshotCall { - c.urlParams_.Set("guestFlush", fmt.Sprint(guestFlush)) - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *DisksCreateSnapshotCall) RequestId(requestId string) *DisksCreateSnapshotCall { - c.urlParams_.Set("requestId", requestId) + c.httpsHealthCheck = httpsHealthCheck return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksCreateSnapshotCall) Fields(s ...googleapi.Field) *DisksCreateSnapshotCall { +func (c *HttpsHealthChecksGetCall) Fields(s ...googleapi.Field) *HttpsHealthChecksGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *HttpsHealthChecksGetCall) IfNoneMatch(entityTag string) *HttpsHealthChecksGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksCreateSnapshotCall) Context(ctx context.Context) *DisksCreateSnapshotCall { +func (c *HttpsHealthChecksGetCall) Context(ctx context.Context) *HttpsHealthChecksGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksCreateSnapshotCall) Header() http.Header { +func (c *HttpsHealthChecksGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksCreateSnapshotCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpsHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.snapshot) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/createSnapshot") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "disk": c.disk, + "project": c.project, + "httpsHealthCheck": c.httpsHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.createSnapshot" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *DisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.httpsHealthChecks.get" call. +// Exactly one of *HttpsHealthCheck or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *HttpsHealthCheck.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *HttpsHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpsHealthCheck, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -44005,7 +59849,7 @@ func (c *DisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &HttpsHealthCheck{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -44017,83 +59861,59 @@ func (c *DisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Creates a snapshot of a specified persistent disk.", - // "httpMethod": "POST", - // "id": "compute.disks.createSnapshot", + // "description": "Returns the specified HttpsHealthCheck resource. Gets a list of available HTTPS health checks by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.httpsHealthChecks.get", // "parameterOrder": [ // "project", - // "zone", - // "disk" + // "httpsHealthCheck" // ], // "parameters": { - // "disk": { - // "description": "Name of the persistent disk to snapshot.", + // "httpsHealthCheck": { + // "description": "Name of the HttpsHealthCheck resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "guestFlush": { - // "location": "query", - // "type": "boolean" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks/{disk}/createSnapshot", - // "request": { - // "$ref": "Snapshot" - // }, + // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", // "response": { - // "$ref": "Operation" + // "$ref": "HttpsHealthCheck" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.disks.delete": +// method id "compute.httpsHealthChecks.insert": -type DisksDeleteCall struct { - s *Service - project string - zone string - disk string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HttpsHealthChecksInsertCall struct { + s *Service + project string + httpshealthcheck *HttpsHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified persistent disk. Deleting a disk -// removes its data permanently and is irreversible. However, deleting a -// disk does not delete any snapshots previously made from the disk. You -// must separately delete snapshots. -// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/delete -func (r *DisksService) Delete(project string, zone string, disk string) *DisksDeleteCall { - c := &DisksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a HttpsHealthCheck resource in the specified project +// using the data included in the request. +func (r *HttpsHealthChecksService) Insert(project string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksInsertCall { + c := &HttpsHealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.disk = disk + c.httpshealthcheck = httpshealthcheck return c } @@ -44111,7 +59931,7 @@ func (r *DisksService) Delete(project string, zone string, disk string) *DisksDe // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *DisksDeleteCall) RequestId(requestId string) *DisksDeleteCall { +func (c *HttpsHealthChecksInsertCall) RequestId(requestId string) *HttpsHealthChecksInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -44119,7 +59939,7 @@ func (c *DisksDeleteCall) RequestId(requestId string) *DisksDeleteCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksDeleteCall) Fields(s ...googleapi.Field) *DisksDeleteCall { +func (c *HttpsHealthChecksInsertCall) Fields(s ...googleapi.Field) *HttpsHealthChecksInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -44127,52 +59947,55 @@ func (c *DisksDeleteCall) Fields(s ...googleapi.Field) *DisksDeleteCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksDeleteCall) Context(ctx context.Context) *DisksDeleteCall { +func (c *HttpsHealthChecksInsertCall) Context(ctx context.Context) *HttpsHealthChecksInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksDeleteCall) Header() http.Header { +func (c *HttpsHealthChecksInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpsHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, - "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.delete" call. +// Do executes the "compute.httpsHealthChecks.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *DisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HttpsHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -44203,21 +60026,13 @@ func (c *DisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { } return ret, nil // { - // "description": "Deletes the specified persistent disk. Deleting a disk removes its data permanently and is irreversible. However, deleting a disk does not delete any snapshots previously made from the disk. You must separately delete snapshots.", - // "httpMethod": "DELETE", - // "id": "compute.disks.delete", + // "description": "Creates a HttpsHealthCheck resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.httpsHealthChecks.insert", // "parameterOrder": [ - // "project", - // "zone", - // "disk" + // "project" // ], // "parameters": { - // "disk": { - // "description": "Name of the persistent disk to delete.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -44229,16 +60044,12 @@ func (c *DisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks/{disk}", + // "path": "{project}/global/httpsHealthChecks", + // "request": { + // "$ref": "HttpsHealthCheck" + // }, // "response": { // "$ref": "Operation" // }, @@ -44250,34 +60061,92 @@ func (c *DisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { } -// method id "compute.disks.get": +// method id "compute.httpsHealthChecks.list": -type DisksGetCall struct { +type HttpsHealthChecksListCall struct { s *Service project string - zone string - disk string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Returns a specified persistent disk. Gets a list of available -// persistent disks by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/get -func (r *DisksService) Get(project string, zone string, disk string) *DisksGetCall { - c := &DisksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of HttpsHealthCheck resources available to +// the specified project. +func (r *HttpsHealthChecksService) List(project string) *HttpsHealthChecksListCall { + c := &HttpsHealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.disk = disk + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *HttpsHealthChecksListCall) Filter(filter string) *HttpsHealthChecksListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *HttpsHealthChecksListCall) MaxResults(maxResults int64) *HttpsHealthChecksListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *HttpsHealthChecksListCall) OrderBy(orderBy string) *HttpsHealthChecksListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *HttpsHealthChecksListCall) PageToken(pageToken string) *HttpsHealthChecksListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksGetCall) Fields(s ...googleapi.Field) *DisksGetCall { +func (c *HttpsHealthChecksListCall) Fields(s ...googleapi.Field) *HttpsHealthChecksListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -44287,7 +60156,7 @@ func (c *DisksGetCall) Fields(s ...googleapi.Field) *DisksGetCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *DisksGetCall) IfNoneMatch(entityTag string) *DisksGetCall { +func (c *HttpsHealthChecksListCall) IfNoneMatch(entityTag string) *HttpsHealthChecksListCall { c.ifNoneMatch_ = entityTag return c } @@ -44295,21 +60164,21 @@ func (c *DisksGetCall) IfNoneMatch(entityTag string) *DisksGetCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksGetCall) Context(ctx context.Context) *DisksGetCall { +func (c *HttpsHealthChecksListCall) Context(ctx context.Context) *HttpsHealthChecksListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksGetCall) Header() http.Header { +func (c *HttpsHealthChecksListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksGetCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpsHealthChecksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -44321,7 +60190,7 @@ func (c *DisksGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -44330,20 +60199,18 @@ func (c *DisksGetCall) doRequest(alt string) (*http.Response, error) { req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, - "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.get" call. -// Exactly one of *Disk or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Disk.ServerResponse.Header or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *DisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { +// Do executes the "compute.httpsHealthChecks.list" call. +// Exactly one of *HttpsHealthCheckList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *HttpsHealthCheckList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHealthCheckList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -44362,7 +60229,7 @@ func (c *DisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Disk{ + ret := &HttpsHealthCheckList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -44374,20 +60241,34 @@ func (c *DisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { } return ret, nil // { - // "description": "Returns a specified persistent disk. Gets a list of available persistent disks by making a list() request.", + // "description": "Retrieves the list of HttpsHealthCheck resources available to the specified project.", // "httpMethod": "GET", - // "id": "compute.disks.get", + // "id": "compute.httpsHealthChecks.list", // "parameterOrder": [ - // "project", - // "zone", - // "disk" + // "project" // ], // "parameters": { - // "disk": { - // "description": "Name of the persistent disk to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -44396,18 +60277,11 @@ func (c *DisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks/{disk}", + // "path": "{project}/global/httpsHealthChecks", // "response": { - // "$ref": "Disk" + // "$ref": "HttpsHealthCheckList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -44418,99 +60292,130 @@ func (c *DisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { } -// method id "compute.disks.getIamPolicy": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *HttpsHealthChecksListCall) Pages(ctx context.Context, f func(*HttpsHealthCheckList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type DisksGetIamPolicyCall struct { - s *Service - project string - zone string - resource string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +// method id "compute.httpsHealthChecks.patch": + +type HttpsHealthChecksPatchCall struct { + s *Service + project string + httpsHealthCheck string + httpshealthcheck *HttpsHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. -func (r *DisksService) GetIamPolicy(project string, zone string, resource string) *DisksGetIamPolicyCall { - c := &DisksGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates a HttpsHealthCheck resource in the specified project +// using the data included in the request. This method supports PATCH +// semantics and uses the JSON merge patch format and processing rules. +func (r *HttpsHealthChecksService) Patch(project string, httpsHealthCheck string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksPatchCall { + c := &HttpsHealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.resource = resource + c.httpsHealthCheck = httpsHealthCheck + c.httpshealthcheck = httpshealthcheck + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *HttpsHealthChecksPatchCall) RequestId(requestId string) *HttpsHealthChecksPatchCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksGetIamPolicyCall) Fields(s ...googleapi.Field) *DisksGetIamPolicyCall { +func (c *HttpsHealthChecksPatchCall) Fields(s ...googleapi.Field) *HttpsHealthChecksPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *DisksGetIamPolicyCall) IfNoneMatch(entityTag string) *DisksGetIamPolicyCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksGetIamPolicyCall) Context(ctx context.Context) *DisksGetIamPolicyCall { +func (c *HttpsHealthChecksPatchCall) Context(ctx context.Context) *HttpsHealthChecksPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksGetIamPolicyCall) Header() http.Header { +func (c *HttpsHealthChecksPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpsHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{resource}/getIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, + "project": c.project, + "httpsHealthCheck": c.httpsHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *DisksGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.httpsHealthChecks.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *HttpsHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -44529,7 +60434,7 @@ func (c *DisksGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -44541,106 +60446,75 @@ func (c *DisksGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", - // "httpMethod": "GET", - // "id": "compute.disks.getIamPolicy", + // "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.httpsHealthChecks.patch", // "parameterOrder": [ // "project", - // "zone", - // "resource" + // "httpsHealthCheck" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "httpsHealthCheck": { + // "description": "Name of the HttpsHealthCheck resource to patch.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks/{resource}/getIamPolicy", + // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", + // "request": { + // "$ref": "HttpsHealthCheck" + // }, // "response": { - // "$ref": "Policy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.disks.insert": +// method id "compute.httpsHealthChecks.testIamPermissions": -type DisksInsertCall struct { - s *Service - project string - zone string - disk *Disk - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HttpsHealthChecksTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a persistent disk in the specified project using the -// data in the request. You can create a disk with a sourceImage, a -// sourceSnapshot, or create an empty 500 GB data disk by omitting all -// properties. You can also create a disk that is larger than the -// default size by specifying the sizeGb property. -// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/insert -func (r *DisksService) Insert(project string, zone string, disk *Disk) *DisksInsertCall { - c := &DisksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *HttpsHealthChecksService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *HttpsHealthChecksTestIamPermissionsCall { + c := &HttpsHealthChecksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.disk = disk - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *DisksInsertCall) RequestId(requestId string) *DisksInsertCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// SourceImage sets the optional parameter "sourceImage": Source image -// to restore onto a disk. -func (c *DisksInsertCall) SourceImage(sourceImage string) *DisksInsertCall { - c.urlParams_.Set("sourceImage", sourceImage) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksInsertCall) Fields(s ...googleapi.Field) *DisksInsertCall { +func (c *HttpsHealthChecksTestIamPermissionsCall) Fields(s ...googleapi.Field) *HttpsHealthChecksTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -44648,35 +60522,35 @@ func (c *DisksInsertCall) Fields(s ...googleapi.Field) *DisksInsertCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksInsertCall) Context(ctx context.Context) *DisksInsertCall { +func (c *HttpsHealthChecksTestIamPermissionsCall) Context(ctx context.Context) *HttpsHealthChecksTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksInsertCall) Header() http.Header { +func (c *HttpsHealthChecksTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpsHealthChecksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.disk) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -44684,20 +60558,20 @@ func (c *DisksInsertCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *DisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.httpsHealthChecks.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *HttpsHealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -44716,7 +60590,7 @@ func (c *DisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -44728,12 +60602,12 @@ func (c *DisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { } return ret, nil // { - // "description": "Creates a persistent disk in the specified project using the data in the request. You can create a disk with a sourceImage, a sourceSnapshot, or create an empty 500 GB data disk by omitting all properties. You can also create a disk that is larger than the default size by specifying the sizeGb property.", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.disks.insert", + // "id": "compute.httpsHealthChecks.testIamPermissions", // "parameterOrder": [ // "project", - // "zone" + // "resource" // ], // "parameters": { // "project": { @@ -44743,193 +60617,132 @@ func (c *DisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "sourceImage": { - // "description": "Optional. Source image to restore onto a disk.", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks", + // "path": "{project}/global/httpsHealthChecks/{resource}/testIamPermissions", // "request": { - // "$ref": "Disk" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.disks.list": +// method id "compute.httpsHealthChecks.update": -type DisksListCall struct { - s *Service - project string - zone string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type HttpsHealthChecksUpdateCall struct { + s *Service + project string + httpsHealthCheck string + httpshealthcheck *HttpsHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves a list of persistent disks contained within the -// specified zone. -// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/list -func (r *DisksService) List(project string, zone string) *DisksListCall { - c := &DisksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates a HttpsHealthCheck resource in the specified project +// using the data included in the request. +func (r *HttpsHealthChecksService) Update(project string, httpsHealthCheck string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksUpdateCall { + c := &HttpsHealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *DisksListCall) Filter(filter string) *DisksListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *DisksListCall) MaxResults(maxResults int64) *DisksListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + c.httpsHealthCheck = httpsHealthCheck + c.httpshealthcheck = httpshealthcheck return c } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *DisksListCall) OrderBy(orderBy string) *DisksListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *DisksListCall) PageToken(pageToken string) *DisksListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *HttpsHealthChecksUpdateCall) RequestId(requestId string) *HttpsHealthChecksUpdateCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksListCall) Fields(s ...googleapi.Field) *DisksListCall { +func (c *HttpsHealthChecksUpdateCall) Fields(s ...googleapi.Field) *HttpsHealthChecksUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *DisksListCall) IfNoneMatch(entityTag string) *DisksListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksListCall) Context(ctx context.Context) *DisksListCall { +func (c *HttpsHealthChecksUpdateCall) Context(ctx context.Context) *HttpsHealthChecksUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksListCall) Header() http.Header { +func (c *HttpsHealthChecksUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksListCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpsHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "httpsHealthCheck": c.httpsHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.list" call. -// Exactly one of *DiskList or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *DiskList.ServerResponse.Header or (if a response was returned at +// Do executes the "compute.httpsHealthChecks.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *DisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { +func (c *HttpsHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -44948,7 +60761,7 @@ func (c *DisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &DiskList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -44960,35 +60773,19 @@ func (c *DisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { } return ret, nil // { - // "description": "Retrieves a list of persistent disks contained within the specified zone.", - // "httpMethod": "GET", - // "id": "compute.disks.list", + // "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request.", + // "httpMethod": "PUT", + // "id": "compute.httpsHealthChecks.update", // "parameterOrder": [ // "project", - // "zone" + // "httpsHealthCheck" // ], - // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "parameters": { + // "httpsHealthCheck": { + // "description": "Name of the HttpsHealthCheck resource to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "project": { @@ -44998,68 +60795,44 @@ func (c *DisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks", + // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", + // "request": { + // "$ref": "HttpsHealthCheck" + // }, // "response": { - // "$ref": "DiskList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *DisksListCall) Pages(ctx context.Context, f func(*DiskList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.disks.removeResourcePolicies": +// method id "compute.images.delete": -type DisksRemoveResourcePoliciesCall struct { - s *Service - project string - zone string - disk string - disksremoveresourcepoliciesrequest *DisksRemoveResourcePoliciesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ImagesDeleteCall struct { + s *Service + project string + image string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// RemoveResourcePolicies: Removes resource policies from a disk. -func (r *DisksService) RemoveResourcePolicies(project string, zone string, disk string, disksremoveresourcepoliciesrequest *DisksRemoveResourcePoliciesRequest) *DisksRemoveResourcePoliciesCall { - c := &DisksRemoveResourcePoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified image. +// For details, see https://cloud.google.com/compute/docs/reference/latest/images/delete +func (r *ImagesService) Delete(project string, image string) *ImagesDeleteCall { + c := &ImagesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.disk = disk - c.disksremoveresourcepoliciesrequest = disksremoveresourcepoliciesrequest + c.image = image return c } @@ -45077,7 +60850,7 @@ func (r *DisksService) RemoveResourcePolicies(project string, zone string, disk // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *DisksRemoveResourcePoliciesCall) RequestId(requestId string) *DisksRemoveResourcePoliciesCall { +func (c *ImagesDeleteCall) RequestId(requestId string) *ImagesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -45085,7 +60858,7 @@ func (c *DisksRemoveResourcePoliciesCall) RequestId(requestId string) *DisksRemo // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksRemoveResourcePoliciesCall) Fields(s ...googleapi.Field) *DisksRemoveResourcePoliciesCall { +func (c *ImagesDeleteCall) Fields(s ...googleapi.Field) *ImagesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -45093,57 +60866,51 @@ func (c *DisksRemoveResourcePoliciesCall) Fields(s ...googleapi.Field) *DisksRem // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksRemoveResourcePoliciesCall) Context(ctx context.Context) *DisksRemoveResourcePoliciesCall { +func (c *ImagesDeleteCall) Context(ctx context.Context) *ImagesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksRemoveResourcePoliciesCall) Header() http.Header { +func (c *ImagesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksRemoveResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.disksremoveresourcepoliciesrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/removeResourcePolicies") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, - "disk": c.disk, + "image": c.image, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.removeResourcePolicies" call. +// Do executes the "compute.images.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *DisksRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -45174,17 +60941,16 @@ func (c *DisksRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Removes resource policies from a disk.", - // "httpMethod": "POST", - // "id": "compute.disks.removeResourcePolicies", + // "description": "Deletes the specified image.", + // "httpMethod": "DELETE", + // "id": "compute.images.delete", // "parameterOrder": [ // "project", - // "zone", - // "disk" + // "image" // ], // "parameters": { - // "disk": { - // "description": "The disk name for this request.", + // "image": { + // "description": "Name of the image resource to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -45201,19 +60967,9 @@ func (c *DisksRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Ope // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks/{disk}/removeResourcePolicies", - // "request": { - // "$ref": "DisksRemoveResourcePoliciesRequest" - // }, + // "path": "{project}/global/images/{image}", // "response": { // "$ref": "Operation" // }, @@ -45225,27 +60981,28 @@ func (c *DisksRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.disks.resize": +// method id "compute.images.deprecate": -type DisksResizeCall struct { - s *Service - project string - zone string - disk string - disksresizerequest *DisksResizeRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ImagesDeprecateCall struct { + s *Service + project string + image string + deprecationstatus *DeprecationStatus + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Resize: Resizes the specified persistent disk. You can only increase -// the size of the disk. -func (r *DisksService) Resize(project string, zone string, disk string, disksresizerequest *DisksResizeRequest) *DisksResizeCall { - c := &DisksResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Deprecate: Sets the deprecation status of an image. +// +// If an empty request body is given, clears the deprecation status +// instead. +// For details, see https://cloud.google.com/compute/docs/reference/latest/images/deprecate +func (r *ImagesService) Deprecate(project string, image string, deprecationstatus *DeprecationStatus) *ImagesDeprecateCall { + c := &ImagesDeprecateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.disk = disk - c.disksresizerequest = disksresizerequest + c.image = image + c.deprecationstatus = deprecationstatus return c } @@ -45263,7 +61020,7 @@ func (r *DisksService) Resize(project string, zone string, disk string, disksres // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *DisksResizeCall) RequestId(requestId string) *DisksResizeCall { +func (c *ImagesDeprecateCall) RequestId(requestId string) *ImagesDeprecateCall { c.urlParams_.Set("requestId", requestId) return c } @@ -45271,7 +61028,7 @@ func (c *DisksResizeCall) RequestId(requestId string) *DisksResizeCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksResizeCall) Fields(s ...googleapi.Field) *DisksResizeCall { +func (c *ImagesDeprecateCall) Fields(s ...googleapi.Field) *ImagesDeprecateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -45279,35 +61036,35 @@ func (c *DisksResizeCall) Fields(s ...googleapi.Field) *DisksResizeCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksResizeCall) Context(ctx context.Context) *DisksResizeCall { +func (c *ImagesDeprecateCall) Context(ctx context.Context) *ImagesDeprecateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksResizeCall) Header() http.Header { +func (c *ImagesDeprecateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksResizeCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesDeprecateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.disksresizerequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.deprecationstatus) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/resize") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}/deprecate") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -45316,20 +61073,19 @@ func (c *DisksResizeCall) doRequest(alt string) (*http.Response, error) { req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, - "disk": c.disk, + "image": c.image, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.resize" call. +// Do executes the "compute.images.deprecate" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *DisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ImagesDeprecateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -45360,17 +61116,16 @@ func (c *DisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { } return ret, nil // { - // "description": "Resizes the specified persistent disk. You can only increase the size of the disk.", + // "description": "Sets the deprecation status of an image.\n\nIf an empty request body is given, clears the deprecation status instead.", // "httpMethod": "POST", - // "id": "compute.disks.resize", + // "id": "compute.images.deprecate", // "parameterOrder": [ // "project", - // "zone", - // "disk" + // "image" // ], // "parameters": { - // "disk": { - // "description": "The name of the persistent disk.", + // "image": { + // "description": "Image name.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -45387,18 +61142,11 @@ func (c *DisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks/{disk}/resize", + // "path": "{project}/global/images/{image}/deprecate", // "request": { - // "$ref": "DisksResizeRequest" + // "$ref": "DeprecationStatus" // }, // "response": { // "$ref": "Operation" @@ -45411,92 +61159,97 @@ func (c *DisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { } -// method id "compute.disks.setIamPolicy": +// method id "compute.images.get": -type DisksSetIamPolicyCall struct { - s *Service - project string - zone string - resource string - zonesetpolicyrequest *ZoneSetPolicyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ImagesGetCall struct { + s *Service + project string + image string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. -func (r *DisksService) SetIamPolicy(project string, zone string, resource string, zonesetpolicyrequest *ZoneSetPolicyRequest) *DisksSetIamPolicyCall { - c := &DisksSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified image. Gets a list of available images by +// making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/images/get +func (r *ImagesService) Get(project string, image string) *ImagesGetCall { + c := &ImagesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.resource = resource - c.zonesetpolicyrequest = zonesetpolicyrequest + c.image = image return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksSetIamPolicyCall) Fields(s ...googleapi.Field) *DisksSetIamPolicyCall { +func (c *ImagesGetCall) Fields(s ...googleapi.Field) *ImagesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ImagesGetCall) IfNoneMatch(entityTag string) *ImagesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksSetIamPolicyCall) Context(ctx context.Context) *DisksSetIamPolicyCall { +func (c *ImagesGetCall) Context(ctx context.Context) *ImagesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksSetIamPolicyCall) Header() http.Header { +func (c *ImagesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.zonesetpolicyrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{resource}/setIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, + "project": c.project, + "image": c.image, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// Do executes the "compute.images.get" call. +// Exactly one of *Image or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) +// *Image.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *DisksSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +func (c *ImagesGetCall) Do(opts ...googleapi.CallOption) (*Image, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -45515,7 +61268,7 @@ func (c *DisksSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &Image{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -45527,157 +61280,132 @@ func (c *DisksSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", - // "httpMethod": "POST", - // "id": "compute.disks.setIamPolicy", + // "description": "Returns the specified image. Gets a list of available images by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.images.get", // "parameterOrder": [ // "project", - // "zone", - // "resource" + // "image" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "image": { + // "description": "Name of the image resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks/{resource}/setIamPolicy", - // "request": { - // "$ref": "ZoneSetPolicyRequest" - // }, + // "path": "{project}/global/images/{image}", // "response": { - // "$ref": "Policy" + // "$ref": "Image" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.disks.setLabels": +// method id "compute.images.getFromFamily": -type DisksSetLabelsCall struct { - s *Service - project string - zone string - resource string - zonesetlabelsrequest *ZoneSetLabelsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ImagesGetFromFamilyCall struct { + s *Service + project string + family string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetLabels: Sets the labels on a disk. To learn more about labels, -// read the Labeling Resources documentation. -func (r *DisksService) SetLabels(project string, zone string, resource string, zonesetlabelsrequest *ZoneSetLabelsRequest) *DisksSetLabelsCall { - c := &DisksSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetFromFamily: Returns the latest image that is part of an image +// family and is not deprecated. +func (r *ImagesService) GetFromFamily(project string, family string) *ImagesGetFromFamilyCall { + c := &ImagesGetFromFamilyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.resource = resource - c.zonesetlabelsrequest = zonesetlabelsrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *DisksSetLabelsCall) RequestId(requestId string) *DisksSetLabelsCall { - c.urlParams_.Set("requestId", requestId) + c.family = family return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksSetLabelsCall) Fields(s ...googleapi.Field) *DisksSetLabelsCall { +func (c *ImagesGetFromFamilyCall) Fields(s ...googleapi.Field) *ImagesGetFromFamilyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ImagesGetFromFamilyCall) IfNoneMatch(entityTag string) *ImagesGetFromFamilyCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksSetLabelsCall) Context(ctx context.Context) *DisksSetLabelsCall { +func (c *ImagesGetFromFamilyCall) Context(ctx context.Context) *ImagesGetFromFamilyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksSetLabelsCall) Header() http.Header { +func (c *ImagesGetFromFamilyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksSetLabelsCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesGetFromFamilyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.zonesetlabelsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{resource}/setLabels") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/family/{family}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, + "project": c.project, + "family": c.family, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.setLabels" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *DisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.images.getFromFamily" call. +// Exactly one of *Image or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Image.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ImagesGetFromFamilyCall) Do(opts ...googleapi.CallOption) (*Image, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -45696,7 +61424,7 @@ func (c *DisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Image{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -45708,143 +61436,132 @@ func (c *DisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Sets the labels on a disk. To learn more about labels, read the Labeling Resources documentation.", - // "httpMethod": "POST", - // "id": "compute.disks.setLabels", + // "description": "Returns the latest image that is part of an image family and is not deprecated.", + // "httpMethod": "GET", + // "id": "compute.images.getFromFamily", // "parameterOrder": [ // "project", - // "zone", - // "resource" + // "family" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "family": { + // "description": "Name of the image family to search for.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks/{resource}/setLabels", - // "request": { - // "$ref": "ZoneSetLabelsRequest" - // }, + // "path": "{project}/global/images/family/{family}", // "response": { - // "$ref": "Operation" + // "$ref": "Image" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.disks.testIamPermissions": +// method id "compute.images.getIamPolicy": -type DisksTestIamPermissionsCall struct { - s *Service - project string - zone string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ImagesGetIamPolicyCall struct { + s *Service + project string + resource string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *DisksService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *DisksTestIamPermissionsCall { - c := &DisksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. +func (r *ImagesService) GetIamPolicy(project string, resource string) *ImagesGetIamPolicyCall { + c := &ImagesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone c.resource = resource - c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksTestIamPermissionsCall) Fields(s ...googleapi.Field) *DisksTestIamPermissionsCall { +func (c *ImagesGetIamPolicyCall) Fields(s ...googleapi.Field) *ImagesGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ImagesGetIamPolicyCall) IfNoneMatch(entityTag string) *ImagesGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksTestIamPermissionsCall) Context(ctx context.Context) *DisksTestIamPermissionsCall { +func (c *ImagesGetIamPolicyCall) Context(ctx context.Context) *ImagesGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksTestIamPermissionsCall) Header() http.Header { +func (c *ImagesGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.images.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ImagesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -45863,7 +61580,7 @@ func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPer if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -45875,12 +61592,11 @@ func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPer } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.disks.testIamPermissions", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "httpMethod": "GET", + // "id": "compute.images.getIamPolicy", // "parameterOrder": [ // "project", - // "zone", // "resource" // ], // "parameters": { @@ -45894,24 +61610,14 @@ func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPer // "resource": { // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/global/images/{resource}/getIamPolicy", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -45922,23 +61628,31 @@ func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPer } -// method id "compute.firewalls.delete": +// method id "compute.images.insert": -type FirewallsDeleteCall struct { +type ImagesInsertCall struct { s *Service project string - firewall string + image *Image urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Delete: Deletes the specified firewall. -// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/delete -func (r *FirewallsService) Delete(project string, firewall string) *FirewallsDeleteCall { - c := &FirewallsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates an image in the specified project using the data +// included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/images/insert +func (r *ImagesService) Insert(project string, image *Image) *ImagesInsertCall { + c := &ImagesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.firewall = firewall + c.image = image + return c +} + +// ForceCreate sets the optional parameter "forceCreate": Force image +// creation if true. +func (c *ImagesInsertCall) ForceCreate(forceCreate bool) *ImagesInsertCall { + c.urlParams_.Set("forceCreate", fmt.Sprint(forceCreate)) return c } @@ -45956,7 +61670,7 @@ func (r *FirewallsService) Delete(project string, firewall string) *FirewallsDel // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *FirewallsDeleteCall) RequestId(requestId string) *FirewallsDeleteCall { +func (c *ImagesInsertCall) RequestId(requestId string) *ImagesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -45964,7 +61678,7 @@ func (c *FirewallsDeleteCall) RequestId(requestId string) *FirewallsDeleteCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FirewallsDeleteCall) Fields(s ...googleapi.Field) *FirewallsDeleteCall { +func (c *ImagesInsertCall) Fields(s ...googleapi.Field) *ImagesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -45972,51 +61686,55 @@ func (c *FirewallsDeleteCall) Fields(s ...googleapi.Field) *FirewallsDeleteCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FirewallsDeleteCall) Context(ctx context.Context) *FirewallsDeleteCall { +func (c *ImagesInsertCall) Context(ctx context.Context) *ImagesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FirewallsDeleteCall) Header() http.Header { +func (c *ImagesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FirewallsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.image) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "firewall": c.firewall, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.firewalls.delete" call. +// Do executes the "compute.images.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *FirewallsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -46047,20 +61765,17 @@ func (c *FirewallsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Deletes the specified firewall.", - // "httpMethod": "DELETE", - // "id": "compute.firewalls.delete", + // "description": "Creates an image in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.images.insert", // "parameterOrder": [ - // "project", - // "firewall" + // "project" // ], // "parameters": { - // "firewall": { - // "description": "Name of the firewall rule to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" + // "forceCreate": { + // "description": "Force image creation if true.", + // "location": "query", + // "type": "boolean" // }, // "project": { // "description": "Project ID for this request.", @@ -46075,43 +61790,116 @@ func (c *FirewallsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "type": "string" // } // }, - // "path": "{project}/global/firewalls/{firewall}", + // "path": "{project}/global/images", + // "request": { + // "$ref": "Image" + // }, // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } -// method id "compute.firewalls.get": +// method id "compute.images.list": -type FirewallsGetCall struct { +type ImagesListCall struct { s *Service project string - firewall string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Returns the specified firewall. -// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/get -func (r *FirewallsService) Get(project string, firewall string) *FirewallsGetCall { - c := &FirewallsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of custom images available to the specified +// project. Custom images are images you create that belong to your +// project. This method does not get any images that belong to other +// projects, including publicly-available images, like Debian 8. If you +// want to get a list of publicly-available images, use this method to +// make a request to the respective image project, such as debian-cloud +// or windows-cloud. +// For details, see https://cloud.google.com/compute/docs/reference/latest/images/list +func (r *ImagesService) List(project string) *ImagesListCall { + c := &ImagesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.firewall = firewall + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *ImagesListCall) Filter(filter string) *ImagesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *ImagesListCall) MaxResults(maxResults int64) *ImagesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *ImagesListCall) OrderBy(orderBy string) *ImagesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *ImagesListCall) PageToken(pageToken string) *ImagesListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FirewallsGetCall) Fields(s ...googleapi.Field) *FirewallsGetCall { +func (c *ImagesListCall) Fields(s ...googleapi.Field) *ImagesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -46121,7 +61909,7 @@ func (c *FirewallsGetCall) Fields(s ...googleapi.Field) *FirewallsGetCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *FirewallsGetCall) IfNoneMatch(entityTag string) *FirewallsGetCall { +func (c *ImagesListCall) IfNoneMatch(entityTag string) *ImagesListCall { c.ifNoneMatch_ = entityTag return c } @@ -46129,21 +61917,21 @@ func (c *FirewallsGetCall) IfNoneMatch(entityTag string) *FirewallsGetCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FirewallsGetCall) Context(ctx context.Context) *FirewallsGetCall { +func (c *ImagesListCall) Context(ctx context.Context) *ImagesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FirewallsGetCall) Header() http.Header { +func (c *ImagesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FirewallsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -46155,7 +61943,7 @@ func (c *FirewallsGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -46163,20 +61951,19 @@ func (c *FirewallsGetCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "firewall": c.firewall, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.firewalls.get" call. -// Exactly one of *Firewall or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Firewall.ServerResponse.Header or (if a response was returned at +// Do executes the "compute.images.list" call. +// Exactly one of *ImageList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ImageList.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *FirewallsGetCall) Do(opts ...googleapi.CallOption) (*Firewall, error) { +func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -46195,7 +61982,7 @@ func (c *FirewallsGetCall) Do(opts ...googleapi.CallOption) (*Firewall, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Firewall{ + ret := &ImageList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -46207,19 +61994,34 @@ func (c *FirewallsGetCall) Do(opts ...googleapi.CallOption) (*Firewall, error) { } return ret, nil // { - // "description": "Returns the specified firewall.", + // "description": "Retrieves the list of custom images available to the specified project. Custom images are images you create that belong to your project. This method does not get any images that belong to other projects, including publicly-available images, like Debian 8. If you want to get a list of publicly-available images, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.", // "httpMethod": "GET", - // "id": "compute.firewalls.get", + // "id": "compute.images.list", // "parameterOrder": [ - // "project", - // "firewall" + // "project" // ], // "parameters": { - // "firewall": { - // "description": "Name of the firewall rule to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -46230,9 +62032,9 @@ func (c *FirewallsGetCall) Do(opts ...googleapi.CallOption) (*Firewall, error) { // "type": "string" // } // }, - // "path": "{project}/global/firewalls/{firewall}", + // "path": "{project}/global/images", // "response": { - // "$ref": "Firewall" + // "$ref": "ImageList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -46243,50 +62045,204 @@ func (c *FirewallsGetCall) Do(opts ...googleapi.CallOption) (*Firewall, error) { } -// method id "compute.firewalls.insert": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ImagesListCall) Pages(ctx context.Context, f func(*ImageList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type FirewallsInsertCall struct { - s *Service - project string - firewall *Firewall - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.images.setIamPolicy": + +type ImagesSetIamPolicyCall struct { + s *Service + project string + resource string + globalsetpolicyrequest *GlobalSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a firewall rule in the specified project using the -// data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/insert -func (r *FirewallsService) Insert(project string, firewall *Firewall) *FirewallsInsertCall { - c := &FirewallsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +func (r *ImagesService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *ImagesSetIamPolicyCall { + c := &ImagesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.firewall = firewall + c.resource = resource + c.globalsetpolicyrequest = globalsetpolicyrequest return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *FirewallsInsertCall) RequestId(requestId string) *FirewallsInsertCall { - c.urlParams_.Set("requestId", requestId) +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ImagesSetIamPolicyCall) Fields(s ...googleapi.Field) *ImagesSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ImagesSetIamPolicyCall) Context(ctx context.Context) *ImagesSetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ImagesSetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ImagesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetpolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{resource}/setIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.images.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "httpMethod": "POST", + // "id": "compute.images.setIamPolicy", + // "parameterOrder": [ + // "project", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/images/{resource}/setIamPolicy", + // "request": { + // "$ref": "GlobalSetPolicyRequest" + // }, + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.images.setLabels": + +type ImagesSetLabelsCall struct { + s *Service + project string + resource string + globalsetlabelsrequest *GlobalSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetLabels: Sets the labels on an image. To learn more about labels, +// read the Labeling Resources documentation. +func (r *ImagesService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *ImagesSetLabelsCall { + c := &ImagesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource + c.globalsetlabelsrequest = globalsetlabelsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FirewallsInsertCall) Fields(s ...googleapi.Field) *FirewallsInsertCall { +func (c *ImagesSetLabelsCall) Fields(s ...googleapi.Field) *ImagesSetLabelsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -46294,35 +62250,35 @@ func (c *FirewallsInsertCall) Fields(s ...googleapi.Field) *FirewallsInsertCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FirewallsInsertCall) Context(ctx context.Context) *FirewallsInsertCall { +func (c *ImagesSetLabelsCall) Context(ctx context.Context) *ImagesSetLabelsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FirewallsInsertCall) Header() http.Header { +func (c *ImagesSetLabelsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FirewallsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -46330,19 +62286,20 @@ func (c *FirewallsInsertCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.firewalls.insert" call. +// Do executes the "compute.images.setLabels" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *FirewallsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ImagesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -46373,11 +62330,12 @@ func (c *FirewallsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Creates a firewall rule in the specified project using the data included in the request.", + // "description": "Sets the labels on an image. To learn more about labels, read the Labeling Resources documentation.", // "httpMethod": "POST", - // "id": "compute.firewalls.insert", + // "id": "compute.images.setLabels", // "parameterOrder": [ - // "project" + // "project", + // "resource" // ], // "parameters": { // "project": { @@ -46387,15 +62345,17 @@ func (c *FirewallsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/firewalls", + // "path": "{project}/global/images/{resource}/setLabels", // "request": { - // "$ref": "Firewall" + // "$ref": "GlobalSetLabelsRequest" // }, // "response": { // "$ref": "Operation" @@ -46408,157 +62368,89 @@ func (c *FirewallsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro } -// method id "compute.firewalls.list": +// method id "compute.images.testIamPermissions": -type FirewallsListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type ImagesTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves the list of firewall rules available to the specified -// project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/list -func (r *FirewallsService) List(project string) *FirewallsListCall { - c := &FirewallsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *ImagesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *ImagesTestIamPermissionsCall { + c := &ImagesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *FirewallsListCall) Filter(filter string) *FirewallsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *FirewallsListCall) MaxResults(maxResults int64) *FirewallsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *FirewallsListCall) OrderBy(orderBy string) *FirewallsListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *FirewallsListCall) PageToken(pageToken string) *FirewallsListCall { - c.urlParams_.Set("pageToken", pageToken) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FirewallsListCall) Fields(s ...googleapi.Field) *FirewallsListCall { +func (c *ImagesTestIamPermissionsCall) Fields(s ...googleapi.Field) *ImagesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *FirewallsListCall) IfNoneMatch(entityTag string) *FirewallsListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FirewallsListCall) Context(ctx context.Context) *FirewallsListCall { +func (c *ImagesTestIamPermissionsCall) Context(ctx context.Context) *ImagesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FirewallsListCall) Header() http.Header { +func (c *ImagesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FirewallsListCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.firewalls.list" call. -// Exactly one of *FirewallList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *FirewallList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *FirewallsListCall) Do(opts ...googleapi.CallOption) (*FirewallList, error) { +// Do executes the "compute.images.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -46577,7 +62469,7 @@ func (c *FirewallsListCall) Do(opts ...googleapi.CallOption) (*FirewallList, err if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &FirewallList{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -46589,47 +62481,35 @@ func (c *FirewallsListCall) Do(opts ...googleapi.CallOption) (*FirewallList, err } return ret, nil // { - // "description": "Retrieves the list of firewall rules available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.firewalls.list", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.images.testIamPermissions", // "parameterOrder": [ - // "project" + // "project", + // "resource" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/firewalls", + // "path": "{project}/global/images/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "FirewallList" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -46640,48 +62520,42 @@ func (c *FirewallsListCall) Do(opts ...googleapi.CallOption) (*FirewallList, err } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *FirewallsListCall) Pages(ctx context.Context, f func(*FirewallList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.firewalls.patch": +// method id "compute.instanceGroupManagers.abandonInstances": -type FirewallsPatchCall struct { - s *Service - project string - firewall string - firewall2 *Firewall - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersAbandonInstancesCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagersabandoninstancesrequest *InstanceGroupManagersAbandonInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates the specified firewall rule with the data included in -// the request. This method supports PATCH semantics and uses the JSON -// merge patch format and processing rules. -// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/patch -func (r *FirewallsService) Patch(project string, firewall string, firewall2 *Firewall) *FirewallsPatchCall { - c := &FirewallsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AbandonInstances: Flags the specified instances to be removed from +// the managed instance group. Abandoning an instance does not delete +// the instance, but it does remove the instance from any target pools +// that are applied by the managed instance group. This method reduces +// the targetSize of the managed instance group by the number of +// instances that you abandon. This operation is marked as DONE when the +// action is scheduled even if the instances have not yet been removed +// from the group. You must separately verify the status of the +// abandoning action with the listmanagedinstances method. +// +// If the group is part of a backend service that has enabled connection +// draining, it can take up to 60 seconds after the connection draining +// duration has elapsed before the VM instance is removed or +// deleted. +// +// You can specify a maximum of 1000 instances with this method per +// request. +func (r *InstanceGroupManagersService) AbandonInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersabandoninstancesrequest *InstanceGroupManagersAbandonInstancesRequest) *InstanceGroupManagersAbandonInstancesCall { + c := &InstanceGroupManagersAbandonInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.firewall = firewall - c.firewall2 = firewall2 + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagersabandoninstancesrequest = instancegroupmanagersabandoninstancesrequest return c } @@ -46699,7 +62573,7 @@ func (r *FirewallsService) Patch(project string, firewall string, firewall2 *Fir // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *FirewallsPatchCall) RequestId(requestId string) *FirewallsPatchCall { +func (c *InstanceGroupManagersAbandonInstancesCall) RequestId(requestId string) *InstanceGroupManagersAbandonInstancesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -46707,7 +62581,7 @@ func (c *FirewallsPatchCall) RequestId(requestId string) *FirewallsPatchCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FirewallsPatchCall) Fields(s ...googleapi.Field) *FirewallsPatchCall { +func (c *InstanceGroupManagersAbandonInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersAbandonInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -46715,56 +62589,57 @@ func (c *FirewallsPatchCall) Fields(s ...googleapi.Field) *FirewallsPatchCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FirewallsPatchCall) Context(ctx context.Context) *FirewallsPatchCall { +func (c *InstanceGroupManagersAbandonInstancesCall) Context(ctx context.Context) *InstanceGroupManagersAbandonInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FirewallsPatchCall) Header() http.Header { +func (c *InstanceGroupManagersAbandonInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FirewallsPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersAbandonInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall2) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersabandoninstancesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/abandonInstances") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "firewall": c.firewall, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.firewalls.patch" call. +// Do executes the "compute.instanceGroupManagers.abandonInstances" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *FirewallsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -46795,18 +62670,18 @@ func (c *FirewallsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Updates the specified firewall rule with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.firewalls.patch", + // "description": "Flags the specified instances to be removed from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.abandonInstances", // "parameterOrder": [ // "project", - // "firewall" + // "zone", + // "instanceGroupManager" // ], // "parameters": { - // "firewall": { - // "description": "Name of the firewall rule to patch.", + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -46821,11 +62696,17 @@ func (c *FirewallsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/firewalls/{firewall}", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/abandonInstances", // "request": { - // "$ref": "Firewall" + // "$ref": "InstanceGroupManagersAbandonInstancesRequest" // }, // "response": { // "$ref": "Operation" @@ -46838,89 +62719,157 @@ func (c *FirewallsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error } -// method id "compute.firewalls.testIamPermissions": +// method id "compute.instanceGroupManagers.aggregatedList": -type FirewallsTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *FirewallsService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *FirewallsTestIamPermissionsCall { - c := &FirewallsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves the list of managed instance groups and +// groups them by zone. +func (r *InstanceGroupManagersService) AggregatedList(project string) *InstanceGroupManagersAggregatedListCall { + c := &InstanceGroupManagersAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *InstanceGroupManagersAggregatedListCall) Filter(filter string) *InstanceGroupManagersAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *InstanceGroupManagersAggregatedListCall) MaxResults(maxResults int64) *InstanceGroupManagersAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *InstanceGroupManagersAggregatedListCall) OrderBy(orderBy string) *InstanceGroupManagersAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InstanceGroupManagersAggregatedListCall) PageToken(pageToken string) *InstanceGroupManagersAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FirewallsTestIamPermissionsCall) Fields(s ...googleapi.Field) *FirewallsTestIamPermissionsCall { +func (c *InstanceGroupManagersAggregatedListCall) Fields(s ...googleapi.Field) *InstanceGroupManagersAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstanceGroupManagersAggregatedListCall) IfNoneMatch(entityTag string) *InstanceGroupManagersAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FirewallsTestIamPermissionsCall) Context(ctx context.Context) *FirewallsTestIamPermissionsCall { +func (c *InstanceGroupManagersAggregatedListCall) Context(ctx context.Context) *InstanceGroupManagersAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FirewallsTestIamPermissionsCall) Header() http.Header { +func (c *InstanceGroupManagersAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FirewallsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instanceGroupManagers") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.firewalls.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *FirewallsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.instanceGroupManagers.aggregatedList" call. +// Exactly one of *InstanceGroupManagerAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *InstanceGroupManagerAggregatedList.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagerAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -46939,7 +62888,7 @@ func (c *FirewallsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &InstanceGroupManagerAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -46951,35 +62900,47 @@ func (c *FirewallsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.firewalls.testIamPermissions", + // "description": "Retrieves the list of managed instance groups and groups them by zone.", + // "httpMethod": "GET", + // "id": "compute.instanceGroupManagers.aggregatedList", // "parameterOrder": [ - // "project", - // "resource" + // "project" // ], // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/global/firewalls/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/aggregated/instanceGroupManagers", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "InstanceGroupManagerAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -46990,54 +62951,56 @@ func (c *FirewallsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes } -// method id "compute.firewalls.update": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstanceGroupManagersAggregatedListCall) Pages(ctx context.Context, f func(*InstanceGroupManagerAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.instanceGroupManagers.applyUpdatesToInstances": -type FirewallsUpdateCall struct { - s *Service - project string - firewall string - firewall2 *Firewall - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersApplyUpdatesToInstancesCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagersapplyupdatesrequest *InstanceGroupManagersApplyUpdatesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Update: Updates the specified firewall rule with the data included in -// the request. The PUT method can only update the following fields of -// firewall rule: allowed, description, sourceRanges, sourceTags, -// targetTags. -// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/update -func (r *FirewallsService) Update(project string, firewall string, firewall2 *Firewall) *FirewallsUpdateCall { - c := &FirewallsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ApplyUpdatesToInstances: Apply changes to selected instances on the +// managed instance group. This method can be used to apply new +// overrides and/or new versions. +func (r *InstanceGroupManagersService) ApplyUpdatesToInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersapplyupdatesrequest *InstanceGroupManagersApplyUpdatesRequest) *InstanceGroupManagersApplyUpdatesToInstancesCall { + c := &InstanceGroupManagersApplyUpdatesToInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.firewall = firewall - c.firewall2 = firewall2 - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *FirewallsUpdateCall) RequestId(requestId string) *FirewallsUpdateCall { - c.urlParams_.Set("requestId", requestId) + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagersapplyupdatesrequest = instancegroupmanagersapplyupdatesrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FirewallsUpdateCall) Fields(s ...googleapi.Field) *FirewallsUpdateCall { +func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersApplyUpdatesToInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -47045,56 +63008,57 @@ func (c *FirewallsUpdateCall) Fields(s ...googleapi.Field) *FirewallsUpdateCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FirewallsUpdateCall) Context(ctx context.Context) *FirewallsUpdateCall { +func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) Context(ctx context.Context) *InstanceGroupManagersApplyUpdatesToInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FirewallsUpdateCall) Header() http.Header { +func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FirewallsUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall2) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersapplyupdatesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/applyUpdatesToInstances") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "firewall": c.firewall, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.firewalls.update" call. +// Do executes the "compute.instanceGroupManagers.applyUpdatesToInstances" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *FirewallsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -47125,18 +63089,18 @@ func (c *FirewallsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Updates the specified firewall rule with the data included in the request. The PUT method can only update the following fields of firewall rule: allowed, description, sourceRanges, sourceTags, targetTags.", - // "httpMethod": "PUT", - // "id": "compute.firewalls.update", + // "description": "Apply changes to selected instances on the managed instance group. This method can be used to apply new overrides and/or new versions.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.applyUpdatesToInstances", // "parameterOrder": [ // "project", - // "firewall" + // "zone", + // "instanceGroupManager" // ], // "parameters": { - // "firewall": { - // "description": "Name of the firewall rule to update.", + // "instanceGroupManager": { + // "description": "The name of the managed instance group, should conform to RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -47147,15 +63111,16 @@ func (c *FirewallsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "zone": { + // "description": "The name of the zone where the managed instance group is located. Should conform to RFC1035.", + // "location": "path", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/firewalls/{firewall}", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/applyUpdatesToInstances", // "request": { - // "$ref": "Firewall" + // "$ref": "InstanceGroupManagersApplyUpdatesRequest" // }, // "response": { // "$ref": "Operation" @@ -47168,156 +63133,106 @@ func (c *FirewallsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, erro } -// method id "compute.forwardingRules.aggregatedList": +// method id "compute.instanceGroupManagers.delete": -type ForwardingRulesAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersDeleteCall struct { + s *Service + project string + zone string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AggregatedList: Retrieves an aggregated list of forwarding rules. -// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/aggregatedList -func (r *ForwardingRulesService) AggregatedList(project string) *ForwardingRulesAggregatedListCall { - c := &ForwardingRulesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified managed instance group and all of the +// instances in that group. Note that the instance group must not belong +// to a backend service. Read Deleting an instance group for more +// information. +func (r *InstanceGroupManagersService) Delete(project string, zone string, instanceGroupManager string) *InstanceGroupManagersDeleteCall { + c := &InstanceGroupManagersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.zone = zone + c.instanceGroupManager = instanceGroupManager return c } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *ForwardingRulesAggregatedListCall) Filter(filter string) *ForwardingRulesAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *ForwardingRulesAggregatedListCall) MaxResults(maxResults int64) *ForwardingRulesAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *ForwardingRulesAggregatedListCall) OrderBy(orderBy string) *ForwardingRulesAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *ForwardingRulesAggregatedListCall) PageToken(pageToken string) *ForwardingRulesAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupManagersDeleteCall) RequestId(requestId string) *InstanceGroupManagersDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ForwardingRulesAggregatedListCall) Fields(s ...googleapi.Field) *ForwardingRulesAggregatedListCall { +func (c *InstanceGroupManagersDeleteCall) Fields(s ...googleapi.Field) *InstanceGroupManagersDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ForwardingRulesAggregatedListCall) IfNoneMatch(entityTag string) *ForwardingRulesAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ForwardingRulesAggregatedListCall) Context(ctx context.Context) *ForwardingRulesAggregatedListCall { +func (c *InstanceGroupManagersDeleteCall) Context(ctx context.Context) *InstanceGroupManagersDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ForwardingRulesAggregatedListCall) Header() http.Header { +func (c *InstanceGroupManagersDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ForwardingRulesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/forwardingRules") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.forwardingRules.aggregatedList" call. -// Exactly one of *ForwardingRuleAggregatedList or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *ForwardingRuleAggregatedList.ServerResponse.Header or (if a -// response was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ForwardingRulesAggregatedListCall) Do(opts ...googleapi.CallOption) (*ForwardingRuleAggregatedList, error) { +// Do executes the "compute.instanceGroupManagers.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -47336,7 +63251,7 @@ func (c *ForwardingRulesAggregatedListCall) Do(opts ...googleapi.CallOption) (*F if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ForwardingRuleAggregatedList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -47348,97 +63263,87 @@ func (c *ForwardingRulesAggregatedListCall) Do(opts ...googleapi.CallOption) (*F } return ret, nil // { - // "description": "Retrieves an aggregated list of forwarding rules.", - // "httpMethod": "GET", - // "id": "compute.forwardingRules.aggregatedList", + // "description": "Deletes the specified managed instance group and all of the instances in that group. Note that the instance group must not belong to a backend service. Read Deleting an instance group for more information.", + // "httpMethod": "DELETE", + // "id": "compute.instanceGroupManagers.delete", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "instanceGroupManager" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", + // "instanceGroupManager": { + // "description": "The name of the managed instance group to delete.", + // "location": "path", + // "required": true, // "type": "string" // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/aggregated/forwardingRules", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", // "response": { - // "$ref": "ForwardingRuleAggregatedList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ForwardingRulesAggregatedListCall) Pages(ctx context.Context, f func(*ForwardingRuleAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.forwardingRules.delete": +// method id "compute.instanceGroupManagers.deleteInstances": -type ForwardingRulesDeleteCall struct { - s *Service - project string - region string - forwardingRule string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersDeleteInstancesCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagersdeleteinstancesrequest *InstanceGroupManagersDeleteInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified ForwardingRule resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/delete -func (r *ForwardingRulesService) Delete(project string, region string, forwardingRule string) *ForwardingRulesDeleteCall { - c := &ForwardingRulesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// DeleteInstances: Flags the specified instances in the managed +// instance group for immediate deletion. The instances are also removed +// from any target pools of which they were a member. This method +// reduces the targetSize of the managed instance group by the number of +// instances that you delete. This operation is marked as DONE when the +// action is scheduled even if the instances are still being deleted. +// You must separately verify the status of the deleting action with the +// listmanagedinstances method. +// +// If the group is part of a backend service that has enabled connection +// draining, it can take up to 60 seconds after the connection draining +// duration has elapsed before the VM instance is removed or +// deleted. +// +// You can specify a maximum of 1000 instances with this method per +// request. +func (r *InstanceGroupManagersService) DeleteInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersdeleteinstancesrequest *InstanceGroupManagersDeleteInstancesRequest) *InstanceGroupManagersDeleteInstancesCall { + c := &InstanceGroupManagersDeleteInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.forwardingRule = forwardingRule + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagersdeleteinstancesrequest = instancegroupmanagersdeleteinstancesrequest return c } @@ -47456,7 +63361,7 @@ func (r *ForwardingRulesService) Delete(project string, region string, forwardin // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ForwardingRulesDeleteCall) RequestId(requestId string) *ForwardingRulesDeleteCall { +func (c *InstanceGroupManagersDeleteInstancesCall) RequestId(requestId string) *InstanceGroupManagersDeleteInstancesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -47464,7 +63369,7 @@ func (c *ForwardingRulesDeleteCall) RequestId(requestId string) *ForwardingRules // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ForwardingRulesDeleteCall) Fields(s ...googleapi.Field) *ForwardingRulesDeleteCall { +func (c *InstanceGroupManagersDeleteInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersDeleteInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -47472,52 +63377,57 @@ func (c *ForwardingRulesDeleteCall) Fields(s ...googleapi.Field) *ForwardingRule // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ForwardingRulesDeleteCall) Context(ctx context.Context) *ForwardingRulesDeleteCall { +func (c *InstanceGroupManagersDeleteInstancesCall) Context(ctx context.Context) *InstanceGroupManagersDeleteInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ForwardingRulesDeleteCall) Header() http.Header { +func (c *InstanceGroupManagersDeleteInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersDeleteInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersdeleteinstancesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deleteInstances") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "forwardingRule": c.forwardingRule, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.forwardingRules.delete" call. +// Do executes the "compute.instanceGroupManagers.deleteInstances" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -47548,19 +63458,18 @@ func (c *ForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Deletes the specified ForwardingRule resource.", - // "httpMethod": "DELETE", - // "id": "compute.forwardingRules.delete", + // "description": "Flags the specified instances in the managed instance group for immediate deletion. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. This operation is marked as DONE when the action is scheduled even if the instances are still being deleted. You must separately verify the status of the deleting action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.deleteInstances", // "parameterOrder": [ // "project", - // "region", - // "forwardingRule" + // "zone", + // "instanceGroupManager" // ], // "parameters": { - // "forwardingRule": { - // "description": "Name of the ForwardingRule resource to delete.", + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -47571,20 +63480,22 @@ func (c *ForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deleteInstances", + // "request": { + // "$ref": "InstanceGroupManagersDeleteInstancesRequest" + // }, // "response": { // "$ref": "Operation" // }, @@ -47596,33 +63507,34 @@ func (c *ForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.forwardingRules.get": +// method id "compute.instanceGroupManagers.get": -type ForwardingRulesGetCall struct { - s *Service - project string - region string - forwardingRule string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersGetCall struct { + s *Service + project string + zone string + instanceGroupManager string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified ForwardingRule resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/get -func (r *ForwardingRulesService) Get(project string, region string, forwardingRule string) *ForwardingRulesGetCall { - c := &ForwardingRulesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns all of the details about the specified managed instance +// group. Gets a list of available managed instance groups by making a +// list() request. +func (r *InstanceGroupManagersService) Get(project string, zone string, instanceGroupManager string) *InstanceGroupManagersGetCall { + c := &InstanceGroupManagersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.forwardingRule = forwardingRule + c.zone = zone + c.instanceGroupManager = instanceGroupManager return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ForwardingRulesGetCall) Fields(s ...googleapi.Field) *ForwardingRulesGetCall { +func (c *InstanceGroupManagersGetCall) Fields(s ...googleapi.Field) *InstanceGroupManagersGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -47632,7 +63544,7 @@ func (c *ForwardingRulesGetCall) Fields(s ...googleapi.Field) *ForwardingRulesGe // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ForwardingRulesGetCall) IfNoneMatch(entityTag string) *ForwardingRulesGetCall { +func (c *InstanceGroupManagersGetCall) IfNoneMatch(entityTag string) *InstanceGroupManagersGetCall { c.ifNoneMatch_ = entityTag return c } @@ -47640,21 +63552,21 @@ func (c *ForwardingRulesGetCall) IfNoneMatch(entityTag string) *ForwardingRulesG // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ForwardingRulesGetCall) Context(ctx context.Context) *ForwardingRulesGetCall { +func (c *InstanceGroupManagersGetCall) Context(ctx context.Context) *InstanceGroupManagersGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ForwardingRulesGetCall) Header() http.Header { +func (c *InstanceGroupManagersGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ForwardingRulesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -47666,7 +63578,7 @@ func (c *ForwardingRulesGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -47674,21 +63586,21 @@ func (c *ForwardingRulesGetCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "forwardingRule": c.forwardingRule, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.forwardingRules.get" call. -// Exactly one of *ForwardingRule or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *ForwardingRule.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.instanceGroupManagers.get" call. +// Exactly one of *InstanceGroupManager or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceGroupManager.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *ForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRule, error) { +func (c *InstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManager, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -47707,7 +63619,7 @@ func (c *ForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRu if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ForwardingRule{ + ret := &InstanceGroupManager{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -47719,19 +63631,18 @@ func (c *ForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRu } return ret, nil // { - // "description": "Returns the specified ForwardingRule resource.", + // "description": "Returns all of the details about the specified managed instance group. Gets a list of available managed instance groups by making a list() request.", // "httpMethod": "GET", - // "id": "compute.forwardingRules.get", + // "id": "compute.instanceGroupManagers.get", // "parameterOrder": [ // "project", - // "region", - // "forwardingRule" + // "zone", + // "instanceGroupManager" // ], // "parameters": { - // "forwardingRule": { - // "description": "Name of the ForwardingRule resource to return.", + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -47742,17 +63653,16 @@ func (c *ForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRu // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region scoping this request.", + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", // "response": { - // "$ref": "ForwardingRule" + // "$ref": "InstanceGroupManager" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -47763,26 +63673,33 @@ func (c *ForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRu } -// method id "compute.forwardingRules.insert": +// method id "compute.instanceGroupManagers.insert": -type ForwardingRulesInsertCall struct { - s *Service - project string - region string - forwardingrule *ForwardingRule - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersInsertCall struct { + s *Service + project string + zone string + instancegroupmanager *InstanceGroupManager + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a ForwardingRule resource in the specified project -// and region using the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/insert -func (r *ForwardingRulesService) Insert(project string, region string, forwardingrule *ForwardingRule) *ForwardingRulesInsertCall { - c := &ForwardingRulesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a managed instance group using the information that +// you specify in the request. After the group is created, instances in +// the group are created using the specified instance template. This +// operation is marked as DONE when the group is created even if the +// instances in the group have not yet been created. You must separately +// verify the status of the individual instances with the +// listmanagedinstances method. +// +// A managed instance group can have up to 1000 VM instances per group. +// Please contact Cloud Support if you need an increase in this limit. +func (r *InstanceGroupManagersService) Insert(project string, zone string, instancegroupmanager *InstanceGroupManager) *InstanceGroupManagersInsertCall { + c := &InstanceGroupManagersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.forwardingrule = forwardingrule + c.zone = zone + c.instancegroupmanager = instancegroupmanager return c } @@ -47800,7 +63717,7 @@ func (r *ForwardingRulesService) Insert(project string, region string, forwardin // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ForwardingRulesInsertCall) RequestId(requestId string) *ForwardingRulesInsertCall { +func (c *InstanceGroupManagersInsertCall) RequestId(requestId string) *InstanceGroupManagersInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -47808,7 +63725,7 @@ func (c *ForwardingRulesInsertCall) RequestId(requestId string) *ForwardingRules // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ForwardingRulesInsertCall) Fields(s ...googleapi.Field) *ForwardingRulesInsertCall { +func (c *InstanceGroupManagersInsertCall) Fields(s ...googleapi.Field) *InstanceGroupManagersInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -47816,35 +63733,35 @@ func (c *ForwardingRulesInsertCall) Fields(s ...googleapi.Field) *ForwardingRule // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ForwardingRulesInsertCall) Context(ctx context.Context) *ForwardingRulesInsertCall { +func (c *InstanceGroupManagersInsertCall) Context(ctx context.Context) *InstanceGroupManagersInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ForwardingRulesInsertCall) Header() http.Header { +func (c *InstanceGroupManagersInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ForwardingRulesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.forwardingrule) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -47853,19 +63770,19 @@ func (c *ForwardingRulesInsertCall) doRequest(alt string) (*http.Response, error req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.forwardingRules.insert" call. +// Do executes the "compute.instanceGroupManagers.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -47896,12 +63813,12 @@ func (c *ForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Creates a ForwardingRule resource in the specified project and region using the data included in the request.", + // "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, instances in the group are created using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.\n\nA managed instance group can have up to 1000 VM instances per group. Please contact Cloud Support if you need an increase in this limit.", // "httpMethod": "POST", - // "id": "compute.forwardingRules.insert", + // "id": "compute.instanceGroupManagers.insert", // "parameterOrder": [ // "project", - // "region" + // "zone" // ], // "parameters": { // "project": { @@ -47911,22 +63828,21 @@ func (c *ForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Operation // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where you want to create the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/forwardingRules", + // "path": "{project}/zones/{zone}/instanceGroupManagers", // "request": { - // "$ref": "ForwardingRule" + // "$ref": "InstanceGroupManager" // }, // "response": { // "$ref": "Operation" @@ -47939,25 +63855,24 @@ func (c *ForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.forwardingRules.list": +// method id "compute.instanceGroupManagers.list": -type ForwardingRulesListCall struct { +type InstanceGroupManagersListCall struct { s *Service project string - region string + zone string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves a list of ForwardingRule resources available to the -// specified project and region. -// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/list -func (r *ForwardingRulesService) List(project string, region string) *ForwardingRulesListCall { - c := &ForwardingRulesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of managed instance groups that are contained +// within the specified project and zone. +func (r *InstanceGroupManagersService) List(project string, zone string) *InstanceGroupManagersListCall { + c := &InstanceGroupManagersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region + c.zone = zone return c } @@ -47983,7 +63898,7 @@ func (r *ForwardingRulesService) List(project string, region string) *Forwarding // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *ForwardingRulesListCall) Filter(filter string) *ForwardingRulesListCall { +func (c *InstanceGroupManagersListCall) Filter(filter string) *InstanceGroupManagersListCall { c.urlParams_.Set("filter", filter) return c } @@ -47994,7 +63909,7 @@ func (c *ForwardingRulesListCall) Filter(filter string) *ForwardingRulesListCall // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *ForwardingRulesListCall) MaxResults(maxResults int64) *ForwardingRulesListCall { +func (c *InstanceGroupManagersListCall) MaxResults(maxResults int64) *InstanceGroupManagersListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -48011,7 +63926,7 @@ func (c *ForwardingRulesListCall) MaxResults(maxResults int64) *ForwardingRulesL // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *ForwardingRulesListCall) OrderBy(orderBy string) *ForwardingRulesListCall { +func (c *InstanceGroupManagersListCall) OrderBy(orderBy string) *InstanceGroupManagersListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -48019,7 +63934,7 @@ func (c *ForwardingRulesListCall) OrderBy(orderBy string) *ForwardingRulesListCa // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *ForwardingRulesListCall) PageToken(pageToken string) *ForwardingRulesListCall { +func (c *InstanceGroupManagersListCall) PageToken(pageToken string) *InstanceGroupManagersListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -48027,7 +63942,7 @@ func (c *ForwardingRulesListCall) PageToken(pageToken string) *ForwardingRulesLi // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ForwardingRulesListCall) Fields(s ...googleapi.Field) *ForwardingRulesListCall { +func (c *InstanceGroupManagersListCall) Fields(s ...googleapi.Field) *InstanceGroupManagersListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -48037,7 +63952,7 @@ func (c *ForwardingRulesListCall) Fields(s ...googleapi.Field) *ForwardingRulesL // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ForwardingRulesListCall) IfNoneMatch(entityTag string) *ForwardingRulesListCall { +func (c *InstanceGroupManagersListCall) IfNoneMatch(entityTag string) *InstanceGroupManagersListCall { c.ifNoneMatch_ = entityTag return c } @@ -48045,21 +63960,21 @@ func (c *ForwardingRulesListCall) IfNoneMatch(entityTag string) *ForwardingRules // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ForwardingRulesListCall) Context(ctx context.Context) *ForwardingRulesListCall { +func (c *InstanceGroupManagersListCall) Context(ctx context.Context) *InstanceGroupManagersListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ForwardingRulesListCall) Header() http.Header { +func (c *InstanceGroupManagersListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ForwardingRulesListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -48071,7 +63986,7 @@ func (c *ForwardingRulesListCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -48080,19 +63995,19 @@ func (c *ForwardingRulesListCall) doRequest(alt string) (*http.Response, error) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.forwardingRules.list" call. -// Exactly one of *ForwardingRuleList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ForwardingRuleList.ServerResponse.Header or (if a response was +// Do executes the "compute.instanceGroupManagers.list" call. +// Exactly one of *InstanceGroupManagerList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *InstanceGroupManagerList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *ForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingRuleList, error) { +func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagerList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -48111,7 +64026,7 @@ func (c *ForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingR if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ForwardingRuleList{ + ret := &InstanceGroupManagerList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -48123,12 +64038,12 @@ func (c *ForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingR } return ret, nil // { - // "description": "Retrieves a list of ForwardingRule resources available to the specified project and region.", + // "description": "Retrieves a list of managed instance groups that are contained within the specified project and zone.", // "httpMethod": "GET", - // "id": "compute.forwardingRules.list", + // "id": "compute.instanceGroupManagers.list", // "parameterOrder": [ // "project", - // "region" + // "zone" // ], // "parameters": { // "filter": { @@ -48161,17 +64076,16 @@ func (c *ForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingR // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region scoping this request.", + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/forwardingRules", + // "path": "{project}/zones/{zone}/instanceGroupManagers", // "response": { - // "$ref": "ForwardingRuleList" + // "$ref": "InstanceGroupManagerList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -48185,7 +64099,7 @@ func (c *ForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingR // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *ForwardingRulesListCall) Pages(ctx context.Context, f func(*ForwardingRuleList) error) error { +func (c *InstanceGroupManagersListCall) Pages(ctx context.Context, f func(*InstanceGroupManagerList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -48203,53 +64117,99 @@ func (c *ForwardingRulesListCall) Pages(ctx context.Context, f func(*ForwardingR } } -// method id "compute.forwardingRules.setLabels": +// method id "compute.instanceGroupManagers.listManagedInstances": -type ForwardingRulesSetLabelsCall struct { - s *Service - project string - region string - resource string - regionsetlabelsrequest *RegionSetLabelsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersListManagedInstancesCall struct { + s *Service + project string + zone string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetLabels: Sets the labels on the specified resource. To learn more -// about labels, read the Labeling Resources documentation. -func (r *ForwardingRulesService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *ForwardingRulesSetLabelsCall { - c := &ForwardingRulesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ListManagedInstances: Lists all of the instances in the managed +// instance group. Each instance in the list has a currentAction, which +// indicates the action that the managed instance group is performing on +// the instance. For example, if the group is still creating an +// instance, the currentAction is CREATING. If a previous action failed, +// the list displays the errors for that failed action. +func (r *InstanceGroupManagersService) ListManagedInstances(project string, zone string, instanceGroupManager string) *InstanceGroupManagersListManagedInstancesCall { + c := &InstanceGroupManagersListManagedInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.resource = resource - c.regionsetlabelsrequest = regionsetlabelsrequest + c.zone = zone + c.instanceGroupManager = instanceGroupManager return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *ForwardingRulesSetLabelsCall) RequestId(requestId string) *ForwardingRulesSetLabelsCall { - c.urlParams_.Set("requestId", requestId) +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *InstanceGroupManagersListManagedInstancesCall) Filter(filter string) *InstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *InstanceGroupManagersListManagedInstancesCall) MaxResults(maxResults int64) *InstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "order_by": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *InstanceGroupManagersListManagedInstancesCall) OrderBy(orderBy string) *InstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("order_by", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InstanceGroupManagersListManagedInstancesCall) PageToken(pageToken string) *InstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ForwardingRulesSetLabelsCall) Fields(s ...googleapi.Field) *ForwardingRulesSetLabelsCall { +func (c *InstanceGroupManagersListManagedInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersListManagedInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -48257,35 +64217,30 @@ func (c *ForwardingRulesSetLabelsCall) Fields(s ...googleapi.Field) *ForwardingR // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ForwardingRulesSetLabelsCall) Context(ctx context.Context) *ForwardingRulesSetLabelsCall { +func (c *InstanceGroupManagersListManagedInstancesCall) Context(ctx context.Context) *InstanceGroupManagersListManagedInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ForwardingRulesSetLabelsCall) Header() http.Header { +func (c *InstanceGroupManagersListManagedInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ForwardingRulesSetLabelsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersListManagedInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{resource}/setLabels") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -48293,21 +64248,23 @@ func (c *ForwardingRulesSetLabelsCall) doRequest(alt string) (*http.Response, er } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.forwardingRules.setLabels" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ForwardingRulesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instanceGroupManagers.listManagedInstances" call. +// Exactly one of *InstanceGroupManagersListManagedInstancesResponse or +// error will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *InstanceGroupManagersListManagedInstancesResponse.ServerResponse.Head +// er or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagersListManagedInstancesResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -48326,7 +64283,7 @@ func (c *ForwardingRulesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operat if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &InstanceGroupManagersListManagedInstancesResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -48338,79 +64295,118 @@ func (c *ForwardingRulesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operat } return ret, nil // { - // "description": "Sets the labels on the specified resource. To learn more about labels, read the Labeling Resources documentation.", + // "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action.", // "httpMethod": "POST", - // "id": "compute.forwardingRules.setLabels", + // "id": "compute.instanceGroupManagers.listManagedInstances", // "parameterOrder": [ // "project", - // "region", - // "resource" + // "zone", + // "instanceGroupManager" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", // "type": "string" // }, - // "region": { - // "description": "The region for this request.", + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "order_by": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", // "location": "query", // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/forwardingRules/{resource}/setLabels", - // "request": { - // "$ref": "RegionSetLabelsRequest" - // }, + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", // "response": { - // "$ref": "Operation" + // "$ref": "InstanceGroupManagersListManagedInstancesResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.forwardingRules.setTarget": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstanceGroupManagersListManagedInstancesCall) Pages(ctx context.Context, f func(*InstanceGroupManagersListManagedInstancesResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type ForwardingRulesSetTargetCall struct { - s *Service - project string - region string - forwardingRule string - targetreference *TargetReference - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.instanceGroupManagers.patch": + +type InstanceGroupManagersPatchCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanager *InstanceGroupManager + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetTarget: Changes target URL for forwarding rule. The new target -// should be of the same type as the old target. -// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/setTarget -func (r *ForwardingRulesService) SetTarget(project string, region string, forwardingRule string, targetreference *TargetReference) *ForwardingRulesSetTargetCall { - c := &ForwardingRulesSetTargetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates a managed instance group using the information that +// you specify in the request. This operation is marked as DONE when the +// group is patched even if the instances in the group are still in the +// process of being patched. You must separately verify the status of +// the individual instances with the listManagedInstances method. This +// method supports PATCH semantics and uses the JSON merge patch format +// and processing rules. +func (r *InstanceGroupManagersService) Patch(project string, zone string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *InstanceGroupManagersPatchCall { + c := &InstanceGroupManagersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.forwardingRule = forwardingRule - c.targetreference = targetreference + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanager = instancegroupmanager return c } @@ -48428,7 +64424,7 @@ func (r *ForwardingRulesService) SetTarget(project string, region string, forwar // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ForwardingRulesSetTargetCall) RequestId(requestId string) *ForwardingRulesSetTargetCall { +func (c *InstanceGroupManagersPatchCall) RequestId(requestId string) *InstanceGroupManagersPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -48436,7 +64432,7 @@ func (c *ForwardingRulesSetTargetCall) RequestId(requestId string) *ForwardingRu // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ForwardingRulesSetTargetCall) Fields(s ...googleapi.Field) *ForwardingRulesSetTargetCall { +func (c *InstanceGroupManagersPatchCall) Fields(s ...googleapi.Field) *InstanceGroupManagersPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -48444,57 +64440,57 @@ func (c *ForwardingRulesSetTargetCall) Fields(s ...googleapi.Field) *ForwardingR // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ForwardingRulesSetTargetCall) Context(ctx context.Context) *ForwardingRulesSetTargetCall { +func (c *InstanceGroupManagersPatchCall) Context(ctx context.Context) *InstanceGroupManagersPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ForwardingRulesSetTargetCall) Header() http.Header { +func (c *InstanceGroupManagersPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ForwardingRulesSetTargetCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetreference) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}/setTarget") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "forwardingRule": c.forwardingRule, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.forwardingRules.setTarget" call. +// Do executes the "compute.instanceGroupManagers.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -48525,19 +64521,18 @@ func (c *ForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operat } return ret, nil // { - // "description": "Changes target URL for forwarding rule. The new target should be of the same type as the old target.", - // "httpMethod": "POST", - // "id": "compute.forwardingRules.setTarget", + // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listManagedInstances method. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.instanceGroupManagers.patch", // "parameterOrder": [ // "project", - // "region", - // "forwardingRule" + // "zone", + // "instanceGroupManager" // ], // "parameters": { - // "forwardingRule": { - // "description": "Name of the ForwardingRule resource in which target is to be set.", + // "instanceGroupManager": { + // "description": "The name of the instance group manager.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -48548,22 +64543,21 @@ func (c *ForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operat // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where you want to create the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}/setTarget", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", // "request": { - // "$ref": "TargetReference" + // "$ref": "InstanceGroupManager" // }, // "response": { // "$ref": "Operation" @@ -48576,34 +64570,66 @@ func (c *ForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operat } -// method id "compute.forwardingRules.testIamPermissions": +// method id "compute.instanceGroupManagers.recreateInstances": -type ForwardingRulesTestIamPermissionsCall struct { - s *Service - project string - region string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersRecreateInstancesCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagersrecreateinstancesrequest *InstanceGroupManagersRecreateInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// RecreateInstances: Flags the specified instances in the managed +// instance group to be immediately recreated. The instances are deleted +// and recreated using the current instance template for the managed +// instance group. This operation is marked as DONE when the flag is set +// even if the instances have not yet been recreated. You must +// separately verify the status of the recreating action with the +// listmanagedinstances method. +// +// If the group is part of a backend service that has enabled connection +// draining, it can take up to 60 seconds after the connection draining +// duration has elapsed before the VM instance is removed or +// deleted. +// +// You can specify a maximum of 1000 instances with this method per +// request. +func (r *InstanceGroupManagersService) RecreateInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersrecreateinstancesrequest *InstanceGroupManagersRecreateInstancesRequest) *InstanceGroupManagersRecreateInstancesCall { + c := &InstanceGroupManagersRecreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagersrecreateinstancesrequest = instancegroupmanagersrecreateinstancesrequest + return c } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *ForwardingRulesService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *ForwardingRulesTestIamPermissionsCall { - c := &ForwardingRulesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupManagersRecreateInstancesCall) RequestId(requestId string) *InstanceGroupManagersRecreateInstancesCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ForwardingRulesTestIamPermissionsCall) Fields(s ...googleapi.Field) *ForwardingRulesTestIamPermissionsCall { +func (c *InstanceGroupManagersRecreateInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersRecreateInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -48611,35 +64637,35 @@ func (c *ForwardingRulesTestIamPermissionsCall) Fields(s ...googleapi.Field) *Fo // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ForwardingRulesTestIamPermissionsCall) Context(ctx context.Context) *ForwardingRulesTestIamPermissionsCall { +func (c *InstanceGroupManagersRecreateInstancesCall) Context(ctx context.Context) *InstanceGroupManagersRecreateInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ForwardingRulesTestIamPermissionsCall) Header() http.Header { +func (c *InstanceGroupManagersRecreateInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ForwardingRulesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersrecreateinstancesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -48647,21 +64673,21 @@ func (c *ForwardingRulesTestIamPermissionsCall) doRequest(alt string) (*http.Res } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.forwardingRules.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ForwardingRulesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.instanceGroupManagers.recreateInstances" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -48680,7 +64706,7 @@ func (c *ForwardingRulesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -48692,15 +64718,21 @@ func (c *ForwardingRulesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", + // "description": "Flags the specified instances in the managed instance group to be immediately recreated. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the flag is set even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", // "httpMethod": "POST", - // "id": "compute.forwardingRules.testIamPermissions", + // "id": "compute.instanceGroupManagers.recreateInstances", // "parameterOrder": [ // "project", - // "region", - // "resource" + // "zone", + // "instanceGroupManager" // ], // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -48708,54 +64740,72 @@ func (c *ForwardingRulesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) // "required": true, // "type": "string" // }, - // "region": { - // "description": "The name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/forwardingRules/{resource}/testIamPermissions", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "InstanceGroupManagersRecreateInstancesRequest" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.globalAddresses.delete": +// method id "compute.instanceGroupManagers.resize": -type GlobalAddressesDeleteCall struct { - s *Service - project string - address string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersResizeCall struct { + s *Service + project string + zone string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified address resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/delete -func (r *GlobalAddressesService) Delete(project string, address string) *GlobalAddressesDeleteCall { - c := &GlobalAddressesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Resize: Resizes the managed instance group. If you increase the size, +// the group creates new instances using the current instance template. +// If you decrease the size, the group deletes instances. The resize +// operation is marked DONE when the resize actions are scheduled even +// if the group has not yet added or deleted any instances. You must +// separately verify the status of the creating or deleting actions with +// the listmanagedinstances method. +// +// When resizing down, the instance group arbitrarily chooses the order +// in which VMs are deleted. The group takes into account some VM +// attributes when making the selection including: +// +// + The status of the VM instance. + The health of the VM instance. + +// The instance template version the VM is based on. + For regional +// managed instance groups, the location of the VM instance. +// +// This list is subject to change. +// +// If the group is part of a backend service that has enabled connection +// draining, it can take up to 60 seconds after the connection draining +// duration has elapsed before the VM instance is removed or deleted. +func (r *InstanceGroupManagersService) Resize(project string, zone string, instanceGroupManager string, size int64) *InstanceGroupManagersResizeCall { + c := &InstanceGroupManagersResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.address = address + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.urlParams_.Set("size", fmt.Sprint(size)) return c } @@ -48773,7 +64823,7 @@ func (r *GlobalAddressesService) Delete(project string, address string) *GlobalA // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *GlobalAddressesDeleteCall) RequestId(requestId string) *GlobalAddressesDeleteCall { +func (c *InstanceGroupManagersResizeCall) RequestId(requestId string) *InstanceGroupManagersResizeCall { c.urlParams_.Set("requestId", requestId) return c } @@ -48781,7 +64831,7 @@ func (c *GlobalAddressesDeleteCall) RequestId(requestId string) *GlobalAddresses // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalAddressesDeleteCall) Fields(s ...googleapi.Field) *GlobalAddressesDeleteCall { +func (c *InstanceGroupManagersResizeCall) Fields(s ...googleapi.Field) *InstanceGroupManagersResizeCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -48789,21 +64839,21 @@ func (c *GlobalAddressesDeleteCall) Fields(s ...googleapi.Field) *GlobalAddresse // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalAddressesDeleteCall) Context(ctx context.Context) *GlobalAddressesDeleteCall { +func (c *InstanceGroupManagersResizeCall) Context(ctx context.Context) *InstanceGroupManagersResizeCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalAddressesDeleteCall) Header() http.Header { +func (c *InstanceGroupManagersResizeCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalAddressesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -48812,28 +64862,29 @@ func (c *GlobalAddressesDeleteCall) doRequest(alt string) (*http.Response, error var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses/{address}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "address": c.address, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalAddresses.delete" call. +// Do executes the "compute.instanceGroupManagers.resize" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *GlobalAddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -48864,18 +64915,19 @@ func (c *GlobalAddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Deletes the specified address resource.", - // "httpMethod": "DELETE", - // "id": "compute.globalAddresses.delete", + // "description": "Resizes the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.\n\nWhen resizing down, the instance group arbitrarily chooses the order in which VMs are deleted. The group takes into account some VM attributes when making the selection including:\n\n+ The status of the VM instance. + The health of the VM instance. + The instance template version the VM is based on. + For regional managed instance groups, the location of the VM instance.\n\nThis list is subject to change.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.resize", // "parameterOrder": [ // "project", - // "address" + // "zone", + // "instanceGroupManager", + // "size" // ], // "parameters": { - // "address": { - // "description": "Name of the address resource to delete.", + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -48890,9 +64942,22 @@ func (c *GlobalAddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "size": { + // "description": "The number of running instances that the managed instance group should maintain at any given time. The group automatically adds or removes instances to maintain the number of instances specified by this parameter.", + // "format": "int32", + // "location": "query", + // "required": true, + // "type": "integer" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/addresses/{address}", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize", // "response": { // "$ref": "Operation" // }, @@ -48904,97 +64969,124 @@ func (c *GlobalAddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.globalAddresses.get": +// method id "compute.instanceGroupManagers.resizeAdvanced": -type GlobalAddressesGetCall struct { - s *Service - project string - address string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersResizeAdvancedCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagersresizeadvancedrequest *InstanceGroupManagersResizeAdvancedRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified address resource. Gets a list of available -// addresses by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/get -func (r *GlobalAddressesService) Get(project string, address string) *GlobalAddressesGetCall { - c := &GlobalAddressesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ResizeAdvanced: Resizes the managed instance group with advanced +// configuration options like disabling creation retries. This is an +// extended version of the resize method. +// +// If you increase the size of the instance group, the group creates new +// instances using the current instance template. If you decrease the +// size, the group deletes instances. The resize operation is marked +// DONE when the resize actions are scheduled even if the group has not +// yet added or deleted any instances. You must separately verify the +// status of the creating, creatingWithoutRetries, or deleting actions +// with the get or listmanagedinstances method. +// +// If the group is part of a backend service that has enabled connection +// draining, it can take up to 60 seconds after the connection draining +// duration has elapsed before the VM instance is removed or deleted. +func (r *InstanceGroupManagersService) ResizeAdvanced(project string, zone string, instanceGroupManager string, instancegroupmanagersresizeadvancedrequest *InstanceGroupManagersResizeAdvancedRequest) *InstanceGroupManagersResizeAdvancedCall { + c := &InstanceGroupManagersResizeAdvancedCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.address = address + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagersresizeadvancedrequest = instancegroupmanagersresizeadvancedrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupManagersResizeAdvancedCall) RequestId(requestId string) *InstanceGroupManagersResizeAdvancedCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalAddressesGetCall) Fields(s ...googleapi.Field) *GlobalAddressesGetCall { +func (c *InstanceGroupManagersResizeAdvancedCall) Fields(s ...googleapi.Field) *InstanceGroupManagersResizeAdvancedCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *GlobalAddressesGetCall) IfNoneMatch(entityTag string) *GlobalAddressesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalAddressesGetCall) Context(ctx context.Context) *GlobalAddressesGetCall { +func (c *InstanceGroupManagersResizeAdvancedCall) Context(ctx context.Context) *InstanceGroupManagersResizeAdvancedCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalAddressesGetCall) Header() http.Header { +func (c *InstanceGroupManagersResizeAdvancedCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalAddressesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersResizeAdvancedCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersresizeadvancedrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses/{address}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resizeAdvanced") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "address": c.address, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalAddresses.get" call. -// Exactly one of *Address or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Address.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *GlobalAddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { +// Do executes the "compute.instanceGroupManagers.resizeAdvanced" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersResizeAdvancedCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -49013,7 +65105,7 @@ func (c *GlobalAddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, err if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Address{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -49025,18 +65117,18 @@ func (c *GlobalAddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, err } return ret, nil // { - // "description": "Returns the specified address resource. Gets a list of available addresses by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.globalAddresses.get", + // "description": "Resizes the managed instance group with advanced configuration options like disabling creation retries. This is an extended version of the resize method.\n\nIf you increase the size of the instance group, the group creates new instances using the current instance template. If you decrease the size, the group deletes instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating, creatingWithoutRetries, or deleting actions with the get or listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.resizeAdvanced", // "parameterOrder": [ // "project", - // "address" + // "zone", + // "instanceGroupManager" // ], // "parameters": { - // "address": { - // "description": "Name of the address resource to return.", + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -49046,39 +65138,55 @@ func (c *GlobalAddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, err // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/addresses/{address}", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resizeAdvanced", + // "request": { + // "$ref": "InstanceGroupManagersResizeAdvancedRequest" + // }, // "response": { - // "$ref": "Address" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.globalAddresses.insert": +// method id "compute.instanceGroupManagers.setAutoHealingPolicies": -type GlobalAddressesInsertCall struct { - s *Service - project string - address *Address - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersSetAutoHealingPoliciesCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagerssetautohealingrequest *InstanceGroupManagersSetAutoHealingRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates an address resource in the specified project using -// the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/insert -func (r *GlobalAddressesService) Insert(project string, address *Address) *GlobalAddressesInsertCall { - c := &GlobalAddressesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetAutoHealingPolicies: Modifies the autohealing policies. +// [Deprecated] This method is deprecated. Please use Patch instead. +func (r *InstanceGroupManagersService) SetAutoHealingPolicies(project string, zone string, instanceGroupManager string, instancegroupmanagerssetautohealingrequest *InstanceGroupManagersSetAutoHealingRequest) *InstanceGroupManagersSetAutoHealingPoliciesCall { + c := &InstanceGroupManagersSetAutoHealingPoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.address = address + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagerssetautohealingrequest = instancegroupmanagerssetautohealingrequest return c } @@ -49096,7 +65204,7 @@ func (r *GlobalAddressesService) Insert(project string, address *Address) *Globa // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *GlobalAddressesInsertCall) RequestId(requestId string) *GlobalAddressesInsertCall { +func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) RequestId(requestId string) *InstanceGroupManagersSetAutoHealingPoliciesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -49104,7 +65212,7 @@ func (c *GlobalAddressesInsertCall) RequestId(requestId string) *GlobalAddresses // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalAddressesInsertCall) Fields(s ...googleapi.Field) *GlobalAddressesInsertCall { +func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersSetAutoHealingPoliciesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -49112,35 +65220,35 @@ func (c *GlobalAddressesInsertCall) Fields(s ...googleapi.Field) *GlobalAddresse // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalAddressesInsertCall) Context(ctx context.Context) *GlobalAddressesInsertCall { +func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Context(ctx context.Context) *InstanceGroupManagersSetAutoHealingPoliciesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalAddressesInsertCall) Header() http.Header { +func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalAddressesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.address) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerssetautohealingrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -49148,19 +65256,21 @@ func (c *GlobalAddressesInsertCall) doRequest(alt string) (*http.Response, error } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalAddresses.insert" call. +// Do executes the "compute.instanceGroupManagers.setAutoHealingPolicies" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *GlobalAddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -49191,13 +65301,21 @@ func (c *GlobalAddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Creates an address resource in the specified project using the data included in the request.", + // "description": "Modifies the autohealing policies. [Deprecated] This method is deprecated. Please use Patch instead.", // "httpMethod": "POST", - // "id": "compute.globalAddresses.insert", + // "id": "compute.instanceGroupManagers.setAutoHealingPolicies", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "instanceGroupManager" // ], // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the instance group manager.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -49209,11 +65327,17 @@ func (c *GlobalAddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/addresses", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies", // "request": { - // "$ref": "Address" + // "$ref": "InstanceGroupManagersSetAutoHealingRequest" // }, // "response": { // "$ref": "Operation" @@ -49226,156 +65350,112 @@ func (c *GlobalAddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.globalAddresses.list": - -type GlobalAddressesListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves a list of global addresses. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/list -func (r *GlobalAddressesService) List(project string) *GlobalAddressesListCall { - c := &GlobalAddressesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *GlobalAddressesListCall) Filter(filter string) *GlobalAddressesListCall { - c.urlParams_.Set("filter", filter) - return c -} +// method id "compute.instanceGroupManagers.setInstanceTemplate": -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *GlobalAddressesListCall) MaxResults(maxResults int64) *GlobalAddressesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c +type InstanceGroupManagersSetInstanceTemplateCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagerssetinstancetemplaterequest *InstanceGroupManagersSetInstanceTemplateRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *GlobalAddressesListCall) OrderBy(orderBy string) *GlobalAddressesListCall { - c.urlParams_.Set("orderBy", orderBy) +// SetInstanceTemplate: Specifies the instance template to use when +// creating new instances in this group. The templates for existing +// instances in the group do not change unless you recreate them. +func (r *InstanceGroupManagersService) SetInstanceTemplate(project string, zone string, instanceGroupManager string, instancegroupmanagerssetinstancetemplaterequest *InstanceGroupManagersSetInstanceTemplateRequest) *InstanceGroupManagersSetInstanceTemplateCall { + c := &InstanceGroupManagersSetInstanceTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagerssetinstancetemplaterequest = instancegroupmanagerssetinstancetemplaterequest return c } -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *GlobalAddressesListCall) PageToken(pageToken string) *GlobalAddressesListCall { - c.urlParams_.Set("pageToken", pageToken) +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupManagersSetInstanceTemplateCall) RequestId(requestId string) *InstanceGroupManagersSetInstanceTemplateCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalAddressesListCall) Fields(s ...googleapi.Field) *GlobalAddressesListCall { +func (c *InstanceGroupManagersSetInstanceTemplateCall) Fields(s ...googleapi.Field) *InstanceGroupManagersSetInstanceTemplateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *GlobalAddressesListCall) IfNoneMatch(entityTag string) *GlobalAddressesListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalAddressesListCall) Context(ctx context.Context) *GlobalAddressesListCall { +func (c *InstanceGroupManagersSetInstanceTemplateCall) Context(ctx context.Context) *InstanceGroupManagersSetInstanceTemplateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalAddressesListCall) Header() http.Header { +func (c *InstanceGroupManagersSetInstanceTemplateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalAddressesListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerssetinstancetemplaterequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalAddresses.list" call. -// Exactly one of *AddressList or error will be non-nil. Any non-2xx +// Do executes the "compute.instanceGroupManagers.setInstanceTemplate" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *AddressList.ServerResponse.Header or (if a response was returned at +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *GlobalAddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, error) { +func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -49394,7 +65474,7 @@ func (c *GlobalAddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &AddressList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -49406,104 +65486,107 @@ func (c *GlobalAddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList } return ret, nil // { - // "description": "Retrieves a list of global addresses.", - // "httpMethod": "GET", - // "id": "compute.globalAddresses.list", + // "description": "Specifies the instance template to use when creating new instances in this group. The templates for existing instances in the group do not change unless you recreate them.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.setInstanceTemplate", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "instanceGroupManager" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, // "type": "string" // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/addresses", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", + // "request": { + // "$ref": "InstanceGroupManagersSetInstanceTemplateRequest" + // }, // "response": { - // "$ref": "AddressList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *GlobalAddressesListCall) Pages(ctx context.Context, f func(*AddressList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.globalAddresses.setLabels": +// method id "compute.instanceGroupManagers.setTargetPools": -type GlobalAddressesSetLabelsCall struct { - s *Service - project string - resource string - globalsetlabelsrequest *GlobalSetLabelsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersSetTargetPoolsCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagerssettargetpoolsrequest *InstanceGroupManagersSetTargetPoolsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetLabels: Sets the labels on a GlobalAddress. To learn more about -// labels, read the Labeling Resources documentation. -func (r *GlobalAddressesService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *GlobalAddressesSetLabelsCall { - c := &GlobalAddressesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetTargetPools: Modifies the target pools to which all instances in +// this managed instance group are assigned. The target pools +// automatically apply to all of the instances in the managed instance +// group. This operation is marked DONE when you make the request even +// if the instances have not yet been added to their target pools. The +// change might take some time to apply to all of the instances in the +// group depending on the size of the group. +func (r *InstanceGroupManagersService) SetTargetPools(project string, zone string, instanceGroupManager string, instancegroupmanagerssettargetpoolsrequest *InstanceGroupManagersSetTargetPoolsRequest) *InstanceGroupManagersSetTargetPoolsCall { + c := &InstanceGroupManagersSetTargetPoolsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.globalsetlabelsrequest = globalsetlabelsrequest + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagerssettargetpoolsrequest = instancegroupmanagerssettargetpoolsrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupManagersSetTargetPoolsCall) RequestId(requestId string) *InstanceGroupManagersSetTargetPoolsCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalAddressesSetLabelsCall) Fields(s ...googleapi.Field) *GlobalAddressesSetLabelsCall { +func (c *InstanceGroupManagersSetTargetPoolsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersSetTargetPoolsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -49511,35 +65594,35 @@ func (c *GlobalAddressesSetLabelsCall) Fields(s ...googleapi.Field) *GlobalAddre // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalAddressesSetLabelsCall) Context(ctx context.Context) *GlobalAddressesSetLabelsCall { +func (c *InstanceGroupManagersSetTargetPoolsCall) Context(ctx context.Context) *InstanceGroupManagersSetTargetPoolsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalAddressesSetLabelsCall) Header() http.Header { +func (c *InstanceGroupManagersSetTargetPoolsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalAddressesSetLabelsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerssettargetpoolsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses/{resource}/setLabels") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setTargetPools") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -49547,20 +65630,21 @@ func (c *GlobalAddressesSetLabelsCall) doRequest(alt string) (*http.Response, er } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalAddresses.setLabels" call. +// Do executes the "compute.instanceGroupManagers.setTargetPools" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *GlobalAddressesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -49591,14 +65675,21 @@ func (c *GlobalAddressesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operat } return ret, nil // { - // "description": "Sets the labels on a GlobalAddress. To learn more about labels, read the Labeling Resources documentation.", + // "description": "Modifies the target pools to which all instances in this managed instance group are assigned. The target pools automatically apply to all of the instances in the managed instance group. This operation is marked DONE when you make the request even if the instances have not yet been added to their target pools. The change might take some time to apply to all of the instances in the group depending on the size of the group.", // "httpMethod": "POST", - // "id": "compute.globalAddresses.setLabels", + // "id": "compute.instanceGroupManagers.setTargetPools", // "parameterOrder": [ // "project", - // "resource" + // "zone", + // "instanceGroupManager" // ], // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -49606,17 +65697,21 @@ func (c *GlobalAddressesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operat // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/addresses/{resource}/setLabels", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", // "request": { - // "$ref": "GlobalSetLabelsRequest" + // "$ref": "InstanceGroupManagersSetTargetPoolsRequest" // }, // "response": { // "$ref": "Operation" @@ -49629,11 +65724,12 @@ func (c *GlobalAddressesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operat } -// method id "compute.globalAddresses.testIamPermissions": +// method id "compute.instanceGroupManagers.testIamPermissions": -type GlobalAddressesTestIamPermissionsCall struct { +type InstanceGroupManagersTestIamPermissionsCall struct { s *Service project string + zone string resource string testpermissionsrequest *TestPermissionsRequest urlParams_ gensupport.URLParams @@ -49643,9 +65739,10 @@ type GlobalAddressesTestIamPermissionsCall struct { // TestIamPermissions: Returns permissions that a caller has on the // specified resource. -func (r *GlobalAddressesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *GlobalAddressesTestIamPermissionsCall { - c := &GlobalAddressesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *InstanceGroupManagersService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *InstanceGroupManagersTestIamPermissionsCall { + c := &InstanceGroupManagersTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.zone = zone c.resource = resource c.testpermissionsrequest = testpermissionsrequest return c @@ -49654,7 +65751,7 @@ func (r *GlobalAddressesService) TestIamPermissions(project string, resource str // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalAddressesTestIamPermissionsCall) Fields(s ...googleapi.Field) *GlobalAddressesTestIamPermissionsCall { +func (c *InstanceGroupManagersTestIamPermissionsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -49662,21 +65759,21 @@ func (c *GlobalAddressesTestIamPermissionsCall) Fields(s ...googleapi.Field) *Gl // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalAddressesTestIamPermissionsCall) Context(ctx context.Context) *GlobalAddressesTestIamPermissionsCall { +func (c *InstanceGroupManagersTestIamPermissionsCall) Context(ctx context.Context) *InstanceGroupManagersTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalAddressesTestIamPermissionsCall) Header() http.Header { +func (c *InstanceGroupManagersTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalAddressesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -49690,7 +65787,7 @@ func (c *GlobalAddressesTestIamPermissionsCall) doRequest(alt string) (*http.Res reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -49699,19 +65796,20 @@ func (c *GlobalAddressesTestIamPermissionsCall) doRequest(alt string) (*http.Res req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "zone": c.zone, "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalAddresses.testIamPermissions" call. +// Do executes the "compute.instanceGroupManagers.testIamPermissions" call. // Exactly one of *TestPermissionsResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *TestPermissionsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *GlobalAddressesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *InstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -49744,9 +65842,10 @@ func (c *GlobalAddressesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) // { // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.globalAddresses.testIamPermissions", + // "id": "compute.instanceGroupManagers.testIamPermissions", // "parameterOrder": [ // "project", + // "zone", // "resource" // ], // "parameters": { @@ -49760,12 +65859,19 @@ func (c *GlobalAddressesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) // "resource": { // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/addresses/{resource}/testIamPermissions", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{resource}/testIamPermissions", // "request": { // "$ref": "TestPermissionsRequest" // }, @@ -49781,23 +65887,30 @@ func (c *GlobalAddressesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) } -// method id "compute.globalForwardingRules.delete": +// method id "compute.instanceGroupManagers.update": -type GlobalForwardingRulesDeleteCall struct { - s *Service - project string - forwardingRule string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersUpdateCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanager *InstanceGroupManager + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified GlobalForwardingRule resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/delete -func (r *GlobalForwardingRulesService) Delete(project string, forwardingRule string) *GlobalForwardingRulesDeleteCall { - c := &GlobalForwardingRulesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates a managed instance group using the information that +// you specify in the request. This operation is marked as DONE when the +// group is updated even if the instances in the group have not yet been +// updated. You must separately verify the status of the individual +// instances with the listManagedInstances method. +func (r *InstanceGroupManagersService) Update(project string, zone string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *InstanceGroupManagersUpdateCall { + c := &InstanceGroupManagersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.forwardingRule = forwardingRule + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanager = instancegroupmanager return c } @@ -49815,7 +65928,7 @@ func (r *GlobalForwardingRulesService) Delete(project string, forwardingRule str // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *GlobalForwardingRulesDeleteCall) RequestId(requestId string) *GlobalForwardingRulesDeleteCall { +func (c *InstanceGroupManagersUpdateCall) RequestId(requestId string) *InstanceGroupManagersUpdateCall { c.urlParams_.Set("requestId", requestId) return c } @@ -49823,7 +65936,7 @@ func (c *GlobalForwardingRulesDeleteCall) RequestId(requestId string) *GlobalFor // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalForwardingRulesDeleteCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesDeleteCall { +func (c *InstanceGroupManagersUpdateCall) Fields(s ...googleapi.Field) *InstanceGroupManagersUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -49831,212 +65944,57 @@ func (c *GlobalForwardingRulesDeleteCall) Fields(s ...googleapi.Field) *GlobalFo // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalForwardingRulesDeleteCall) Context(ctx context.Context) *GlobalForwardingRulesDeleteCall { +func (c *InstanceGroupManagersUpdateCall) Context(ctx context.Context) *InstanceGroupManagersUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalForwardingRulesDeleteCall) Header() http.Header { +func (c *InstanceGroupManagersUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "forwardingRule": c.forwardingRule, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalForwardingRules.delete" call. +// Do executes the "compute.instanceGroupManagers.update" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *GlobalForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes the specified GlobalForwardingRule resource.", - // "httpMethod": "DELETE", - // "id": "compute.globalForwardingRules.delete", - // "parameterOrder": [ - // "project", - // "forwardingRule" - // ], - // "parameters": { - // "forwardingRule": { - // "description": "Name of the ForwardingRule resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "{project}/global/forwardingRules/{forwardingRule}", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.globalForwardingRules.get": - -type GlobalForwardingRulesGetCall struct { - s *Service - project string - forwardingRule string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified GlobalForwardingRule resource. Gets a list -// of available forwarding rules by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/get -func (r *GlobalForwardingRulesService) Get(project string, forwardingRule string) *GlobalForwardingRulesGetCall { - c := &GlobalForwardingRulesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.forwardingRule = forwardingRule - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *GlobalForwardingRulesGetCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *GlobalForwardingRulesGetCall) IfNoneMatch(entityTag string) *GlobalForwardingRulesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *GlobalForwardingRulesGetCall) Context(ctx context.Context) *GlobalForwardingRulesGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *GlobalForwardingRulesGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *GlobalForwardingRulesGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "forwardingRule": c.forwardingRule, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.globalForwardingRules.get" call. -// Exactly one of *ForwardingRule or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *ForwardingRule.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *GlobalForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRule, error) { +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -50055,7 +66013,7 @@ func (c *GlobalForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*Forwar if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ForwardingRule{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -50067,18 +66025,18 @@ func (c *GlobalForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*Forwar } return ret, nil // { - // "description": "Returns the specified GlobalForwardingRule resource. Gets a list of available forwarding rules by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.globalForwardingRules.get", + // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is updated even if the instances in the group have not yet been updated. You must separately verify the status of the individual instances with the listManagedInstances method.", + // "httpMethod": "PUT", + // "id": "compute.instanceGroupManagers.update", // "parameterOrder": [ // "project", - // "forwardingRule" + // "zone", + // "instanceGroupManager" // ], // "parameters": { - // "forwardingRule": { - // "description": "Name of the ForwardingRule resource to return.", + // "instanceGroupManager": { + // "description": "The name of the instance group manager.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -50088,39 +66046,56 @@ func (c *GlobalForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*Forwar // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where you want to create the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/forwardingRules/{forwardingRule}", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", + // "request": { + // "$ref": "InstanceGroupManager" + // }, // "response": { - // "$ref": "ForwardingRule" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.globalForwardingRules.insert": +// method id "compute.instanceGroups.addInstances": -type GlobalForwardingRulesInsertCall struct { - s *Service - project string - forwardingrule *ForwardingRule - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupsAddInstancesCall struct { + s *Service + project string + zone string + instanceGroup string + instancegroupsaddinstancesrequest *InstanceGroupsAddInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a GlobalForwardingRule resource in the specified -// project using the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/insert -func (r *GlobalForwardingRulesService) Insert(project string, forwardingrule *ForwardingRule) *GlobalForwardingRulesInsertCall { - c := &GlobalForwardingRulesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AddInstances: Adds a list of instances to the specified instance +// group. All of the instances in the instance group must be in the same +// network/subnetwork. Read Adding instances for more information. +func (r *InstanceGroupsService) AddInstances(project string, zone string, instanceGroup string, instancegroupsaddinstancesrequest *InstanceGroupsAddInstancesRequest) *InstanceGroupsAddInstancesCall { + c := &InstanceGroupsAddInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.forwardingrule = forwardingrule + c.zone = zone + c.instanceGroup = instanceGroup + c.instancegroupsaddinstancesrequest = instancegroupsaddinstancesrequest return c } @@ -50138,7 +66113,7 @@ func (r *GlobalForwardingRulesService) Insert(project string, forwardingrule *Fo // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *GlobalForwardingRulesInsertCall) RequestId(requestId string) *GlobalForwardingRulesInsertCall { +func (c *InstanceGroupsAddInstancesCall) RequestId(requestId string) *InstanceGroupsAddInstancesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -50146,7 +66121,7 @@ func (c *GlobalForwardingRulesInsertCall) RequestId(requestId string) *GlobalFor // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalForwardingRulesInsertCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesInsertCall { +func (c *InstanceGroupsAddInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsAddInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -50154,35 +66129,35 @@ func (c *GlobalForwardingRulesInsertCall) Fields(s ...googleapi.Field) *GlobalFo // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalForwardingRulesInsertCall) Context(ctx context.Context) *GlobalForwardingRulesInsertCall { +func (c *InstanceGroupsAddInstancesCall) Context(ctx context.Context) *InstanceGroupsAddInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalForwardingRulesInsertCall) Header() http.Header { +func (c *InstanceGroupsAddInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalForwardingRulesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsAddInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.forwardingrule) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupsaddinstancesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/addInstances") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -50190,19 +66165,21 @@ func (c *GlobalForwardingRulesInsertCall) doRequest(alt string) (*http.Response, } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalForwardingRules.insert" call. +// Do executes the "compute.instanceGroups.addInstances" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *GlobalForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupsAddInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -50233,13 +66210,21 @@ func (c *GlobalForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Creates a GlobalForwardingRule resource in the specified project using the data included in the request.", + // "description": "Adds a list of instances to the specified instance group. All of the instances in the instance group must be in the same network/subnetwork. Read Adding instances for more information.", // "httpMethod": "POST", - // "id": "compute.globalForwardingRules.insert", + // "id": "compute.instanceGroups.addInstances", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "instanceGroup" // ], // "parameters": { + // "instanceGroup": { + // "description": "The name of the instance group where you are adding instances.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -50251,11 +66236,17 @@ func (c *GlobalForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Ope // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/forwardingRules", + // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/addInstances", // "request": { - // "$ref": "ForwardingRule" + // "$ref": "InstanceGroupsAddInstancesRequest" // }, // "response": { // "$ref": "Operation" @@ -50268,9 +66259,9 @@ func (c *GlobalForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.globalForwardingRules.list": +// method id "compute.instanceGroups.aggregatedList": -type GlobalForwardingRulesListCall struct { +type InstanceGroupsAggregatedListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -50279,11 +66270,10 @@ type GlobalForwardingRulesListCall struct { header_ http.Header } -// List: Retrieves a list of GlobalForwardingRule resources available to -// the specified project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/list -func (r *GlobalForwardingRulesService) List(project string) *GlobalForwardingRulesListCall { - c := &GlobalForwardingRulesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves the list of instance groups and sorts them +// by zone. +func (r *InstanceGroupsService) AggregatedList(project string) *InstanceGroupsAggregatedListCall { + c := &InstanceGroupsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -50310,7 +66300,7 @@ func (r *GlobalForwardingRulesService) List(project string) *GlobalForwardingRul // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *GlobalForwardingRulesListCall) Filter(filter string) *GlobalForwardingRulesListCall { +func (c *InstanceGroupsAggregatedListCall) Filter(filter string) *InstanceGroupsAggregatedListCall { c.urlParams_.Set("filter", filter) return c } @@ -50321,7 +66311,7 @@ func (c *GlobalForwardingRulesListCall) Filter(filter string) *GlobalForwardingR // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *GlobalForwardingRulesListCall) MaxResults(maxResults int64) *GlobalForwardingRulesListCall { +func (c *InstanceGroupsAggregatedListCall) MaxResults(maxResults int64) *InstanceGroupsAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -50338,7 +66328,7 @@ func (c *GlobalForwardingRulesListCall) MaxResults(maxResults int64) *GlobalForw // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *GlobalForwardingRulesListCall) OrderBy(orderBy string) *GlobalForwardingRulesListCall { +func (c *InstanceGroupsAggregatedListCall) OrderBy(orderBy string) *InstanceGroupsAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -50346,7 +66336,7 @@ func (c *GlobalForwardingRulesListCall) OrderBy(orderBy string) *GlobalForwardin // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *GlobalForwardingRulesListCall) PageToken(pageToken string) *GlobalForwardingRulesListCall { +func (c *InstanceGroupsAggregatedListCall) PageToken(pageToken string) *InstanceGroupsAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -50354,7 +66344,7 @@ func (c *GlobalForwardingRulesListCall) PageToken(pageToken string) *GlobalForwa // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalForwardingRulesListCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesListCall { +func (c *InstanceGroupsAggregatedListCall) Fields(s ...googleapi.Field) *InstanceGroupsAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -50364,7 +66354,7 @@ func (c *GlobalForwardingRulesListCall) Fields(s ...googleapi.Field) *GlobalForw // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *GlobalForwardingRulesListCall) IfNoneMatch(entityTag string) *GlobalForwardingRulesListCall { +func (c *InstanceGroupsAggregatedListCall) IfNoneMatch(entityTag string) *InstanceGroupsAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -50372,21 +66362,21 @@ func (c *GlobalForwardingRulesListCall) IfNoneMatch(entityTag string) *GlobalFor // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalForwardingRulesListCall) Context(ctx context.Context) *GlobalForwardingRulesListCall { +func (c *InstanceGroupsAggregatedListCall) Context(ctx context.Context) *InstanceGroupsAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalForwardingRulesListCall) Header() http.Header { +func (c *InstanceGroupsAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalForwardingRulesListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -50398,7 +66388,7 @@ func (c *GlobalForwardingRulesListCall) doRequest(alt string) (*http.Response, e var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instanceGroups") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -50411,14 +66401,14 @@ func (c *GlobalForwardingRulesListCall) doRequest(alt string) (*http.Response, e return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalForwardingRules.list" call. -// Exactly one of *ForwardingRuleList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ForwardingRuleList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.instanceGroups.aggregatedList" call. +// Exactly one of *InstanceGroupAggregatedList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *InstanceGroupAggregatedList.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *GlobalForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingRuleList, error) { +func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -50437,7 +66427,7 @@ func (c *GlobalForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*Forwa if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ForwardingRuleList{ + ret := &InstanceGroupAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -50449,9 +66439,9 @@ func (c *GlobalForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*Forwa } return ret, nil // { - // "description": "Retrieves a list of GlobalForwardingRule resources available to the specified project.", + // "description": "Retrieves the list of instance groups and sorts them by zone.", // "httpMethod": "GET", - // "id": "compute.globalForwardingRules.list", + // "id": "compute.instanceGroups.aggregatedList", // "parameterOrder": [ // "project" // ], @@ -50487,9 +66477,9 @@ func (c *GlobalForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*Forwa // "type": "string" // } // }, - // "path": "{project}/global/forwardingRules", + // "path": "{project}/aggregated/instanceGroups", // "response": { - // "$ref": "ForwardingRuleList" + // "$ref": "InstanceGroupAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -50503,7 +66493,7 @@ func (c *GlobalForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*Forwa // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *GlobalForwardingRulesListCall) Pages(ctx context.Context, f func(*ForwardingRuleList) error) error { +func (c *InstanceGroupsAggregatedListCall) Pages(ctx context.Context, f func(*InstanceGroupAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -50521,32 +66511,53 @@ func (c *GlobalForwardingRulesListCall) Pages(ctx context.Context, f func(*Forwa } } -// method id "compute.globalForwardingRules.setLabels": +// method id "compute.instanceGroups.delete": -type GlobalForwardingRulesSetLabelsCall struct { - s *Service - project string - resource string - globalsetlabelsrequest *GlobalSetLabelsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupsDeleteCall struct { + s *Service + project string + zone string + instanceGroup string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetLabels: Sets the labels on the specified resource. To learn more -// about labels, read the Labeling Resources documentation. -func (r *GlobalForwardingRulesService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *GlobalForwardingRulesSetLabelsCall { - c := &GlobalForwardingRulesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified instance group. The instances in the +// group are not deleted. Note that instance group must not belong to a +// backend service. Read Deleting an instance group for more +// information. +func (r *InstanceGroupsService) Delete(project string, zone string, instanceGroup string) *InstanceGroupsDeleteCall { + c := &InstanceGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.globalsetlabelsrequest = globalsetlabelsrequest + c.zone = zone + c.instanceGroup = instanceGroup + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupsDeleteCall) RequestId(requestId string) *InstanceGroupsDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalForwardingRulesSetLabelsCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesSetLabelsCall { +func (c *InstanceGroupsDeleteCall) Fields(s ...googleapi.Field) *InstanceGroupsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -50554,56 +66565,52 @@ func (c *GlobalForwardingRulesSetLabelsCall) Fields(s ...googleapi.Field) *Globa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalForwardingRulesSetLabelsCall) Context(ctx context.Context) *GlobalForwardingRulesSetLabelsCall { +func (c *InstanceGroupsDeleteCall) Context(ctx context.Context) *InstanceGroupsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalForwardingRulesSetLabelsCall) Header() http.Header { +func (c *InstanceGroupsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalForwardingRulesSetLabelsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{resource}/setLabels") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalForwardingRules.setLabels" call. +// Do executes the "compute.instanceGroups.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *GlobalForwardingRulesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -50634,14 +66641,21 @@ func (c *GlobalForwardingRulesSetLabelsCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Sets the labels on the specified resource. To learn more about labels, read the Labeling Resources documentation.", - // "httpMethod": "POST", - // "id": "compute.globalForwardingRules.setLabels", + // "description": "Deletes the specified instance group. The instances in the group are not deleted. Note that instance group must not belong to a backend service. Read Deleting an instance group for more information.", + // "httpMethod": "DELETE", + // "id": "compute.instanceGroups.delete", // "parameterOrder": [ // "project", - // "resource" + // "zone", + // "instanceGroup" // ], // "parameters": { + // "instanceGroup": { + // "description": "The name of the instance group to delete.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -50649,18 +66663,19 @@ func (c *GlobalForwardingRulesSetLabelsCall) Do(opts ...googleapi.CallOption) (* // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the instance group is located.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/forwardingRules/{resource}/setLabels", - // "request": { - // "$ref": "GlobalSetLabelsRequest" - // }, + // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}", // "response": { // "$ref": "Operation" // }, @@ -50672,26 +66687,190 @@ func (c *GlobalForwardingRulesSetLabelsCall) Do(opts ...googleapi.CallOption) (* } -// method id "compute.globalForwardingRules.setTarget": +// method id "compute.instanceGroups.get": -type GlobalForwardingRulesSetTargetCall struct { - s *Service - project string - forwardingRule string - targetreference *TargetReference - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupsGetCall struct { + s *Service + project string + zone string + instanceGroup string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetTarget: Changes target URL for the GlobalForwardingRule resource. -// The new target should be of the same type as the old target. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/setTarget -func (r *GlobalForwardingRulesService) SetTarget(project string, forwardingRule string, targetreference *TargetReference) *GlobalForwardingRulesSetTargetCall { - c := &GlobalForwardingRulesSetTargetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified instance group. Gets a list of available +// instance groups by making a list() request. +func (r *InstanceGroupsService) Get(project string, zone string, instanceGroup string) *InstanceGroupsGetCall { + c := &InstanceGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.forwardingRule = forwardingRule - c.targetreference = targetreference + c.zone = zone + c.instanceGroup = instanceGroup + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupsGetCall) Fields(s ...googleapi.Field) *InstanceGroupsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstanceGroupsGetCall) IfNoneMatch(entityTag string) *InstanceGroupsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupsGetCall) Context(ctx context.Context) *InstanceGroupsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstanceGroupsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instanceGroups.get" call. +// Exactly one of *InstanceGroup or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *InstanceGroup.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InstanceGroup{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified instance group. Gets a list of available instance groups by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.instanceGroups.get", + // "parameterOrder": [ + // "project", + // "zone", + // "instanceGroup" + // ], + // "parameters": { + // "instanceGroup": { + // "description": "The name of the instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}", + // "response": { + // "$ref": "InstanceGroup" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.instanceGroups.insert": + +type InstanceGroupsInsertCall struct { + s *Service + project string + zone string + instancegroup *InstanceGroup + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates an instance group in the specified project using the +// parameters that are included in the request. +func (r *InstanceGroupsService) Insert(project string, zone string, instancegroup *InstanceGroup) *InstanceGroupsInsertCall { + c := &InstanceGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instancegroup = instancegroup return c } @@ -50709,7 +66888,7 @@ func (r *GlobalForwardingRulesService) SetTarget(project string, forwardingRule // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *GlobalForwardingRulesSetTargetCall) RequestId(requestId string) *GlobalForwardingRulesSetTargetCall { +func (c *InstanceGroupsInsertCall) RequestId(requestId string) *InstanceGroupsInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -50717,7 +66896,7 @@ func (c *GlobalForwardingRulesSetTargetCall) RequestId(requestId string) *Global // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalForwardingRulesSetTargetCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesSetTargetCall { +func (c *InstanceGroupsInsertCall) Fields(s ...googleapi.Field) *InstanceGroupsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -50725,35 +66904,35 @@ func (c *GlobalForwardingRulesSetTargetCall) Fields(s ...googleapi.Field) *Globa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalForwardingRulesSetTargetCall) Context(ctx context.Context) *GlobalForwardingRulesSetTargetCall { +func (c *InstanceGroupsInsertCall) Context(ctx context.Context) *InstanceGroupsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalForwardingRulesSetTargetCall) Header() http.Header { +func (c *InstanceGroupsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalForwardingRulesSetTargetCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetreference) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroup) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}/setTarget") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -50761,20 +66940,20 @@ func (c *GlobalForwardingRulesSetTargetCall) doRequest(alt string) (*http.Respon } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "forwardingRule": c.forwardingRule, + "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalForwardingRules.setTarget" call. +// Do executes the "compute.instanceGroups.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *GlobalForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -50805,21 +66984,14 @@ func (c *GlobalForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Changes target URL for the GlobalForwardingRule resource. The new target should be of the same type as the old target.", + // "description": "Creates an instance group in the specified project using the parameters that are included in the request.", // "httpMethod": "POST", - // "id": "compute.globalForwardingRules.setTarget", + // "id": "compute.instanceGroups.insert", // "parameterOrder": [ // "project", - // "forwardingRule" + // "zone" // ], // "parameters": { - // "forwardingRule": { - // "description": "Name of the ForwardingRule resource in which target is to be set.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -50831,11 +67003,17 @@ func (c *GlobalForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (* // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where you want to create the instance group.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/forwardingRules/{forwardingRule}/setTarget", + // "path": "{project}/zones/{zone}/instanceGroups", // "request": { - // "$ref": "TargetReference" + // "$ref": "InstanceGroup" // }, // "response": { // "$ref": "Operation" @@ -50848,89 +67026,159 @@ func (c *GlobalForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (* } -// method id "compute.globalForwardingRules.testIamPermissions": +// method id "compute.instanceGroups.list": -type GlobalForwardingRulesTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupsListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *GlobalForwardingRulesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *GlobalForwardingRulesTestIamPermissionsCall { - c := &GlobalForwardingRulesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of instance groups that are located in the +// specified project and zone. +func (r *InstanceGroupsService) List(project string, zone string) *InstanceGroupsListCall { + c := &InstanceGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.zone = zone + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *InstanceGroupsListCall) Filter(filter string) *InstanceGroupsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *InstanceGroupsListCall) MaxResults(maxResults int64) *InstanceGroupsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *InstanceGroupsListCall) OrderBy(orderBy string) *InstanceGroupsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InstanceGroupsListCall) PageToken(pageToken string) *InstanceGroupsListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalForwardingRulesTestIamPermissionsCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesTestIamPermissionsCall { +func (c *InstanceGroupsListCall) Fields(s ...googleapi.Field) *InstanceGroupsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstanceGroupsListCall) IfNoneMatch(entityTag string) *InstanceGroupsListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalForwardingRulesTestIamPermissionsCall) Context(ctx context.Context) *GlobalForwardingRulesTestIamPermissionsCall { +func (c *InstanceGroupsListCall) Context(ctx context.Context) *InstanceGroupsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalForwardingRulesTestIamPermissionsCall) Header() http.Header { +func (c *InstanceGroupsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalForwardingRulesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalForwardingRules.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// Do executes the "compute.instanceGroups.list" call. +// Exactly one of *InstanceGroupList or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// *InstanceGroupList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *GlobalForwardingRulesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -50949,7 +67197,7 @@ func (c *GlobalForwardingRulesTestIamPermissionsCall) Do(opts ...googleapi.CallO if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &InstanceGroupList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -50961,14 +67209,37 @@ func (c *GlobalForwardingRulesTestIamPermissionsCall) Do(opts ...googleapi.CallO } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.globalForwardingRules.testIamPermissions", + // "description": "Retrieves the list of instance groups that are located in the specified project and zone.", + // "httpMethod": "GET", + // "id": "compute.instanceGroups.list", // "parameterOrder": [ // "project", - // "resource" + // "zone" // ], // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -50976,20 +67247,16 @@ func (c *GlobalForwardingRulesTestIamPermissionsCall) Do(opts ...googleapi.CallO // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "zone": { + // "description": "The name of the zone where the instance group is located.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/forwardingRules/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/zones/{zone}/instanceGroups", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "InstanceGroupList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -51000,22 +67267,47 @@ func (c *GlobalForwardingRulesTestIamPermissionsCall) Do(opts ...googleapi.CallO } -// method id "compute.globalOperations.aggregatedList": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstanceGroupsListCall) Pages(ctx context.Context, f func(*InstanceGroupList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type GlobalOperationsAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +// method id "compute.instanceGroups.listInstances": + +type InstanceGroupsListInstancesCall struct { + s *Service + project string + zone string + instanceGroup string + instancegroupslistinstancesrequest *InstanceGroupsListInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AggregatedList: Retrieves an aggregated list of all operations. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/aggregatedList -func (r *GlobalOperationsService) AggregatedList(project string) *GlobalOperationsAggregatedListCall { - c := &GlobalOperationsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ListInstances: Lists the instances in the specified instance group. +func (r *InstanceGroupsService) ListInstances(project string, zone string, instanceGroup string, instancegroupslistinstancesrequest *InstanceGroupsListInstancesRequest) *InstanceGroupsListInstancesCall { + c := &InstanceGroupsListInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.zone = zone + c.instanceGroup = instanceGroup + c.instancegroupslistinstancesrequest = instancegroupslistinstancesrequest return c } @@ -51041,7 +67333,7 @@ func (r *GlobalOperationsService) AggregatedList(project string) *GlobalOperatio // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *GlobalOperationsAggregatedListCall) Filter(filter string) *GlobalOperationsAggregatedListCall { +func (c *InstanceGroupsListInstancesCall) Filter(filter string) *InstanceGroupsListInstancesCall { c.urlParams_.Set("filter", filter) return c } @@ -51052,7 +67344,7 @@ func (c *GlobalOperationsAggregatedListCall) Filter(filter string) *GlobalOperat // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *GlobalOperationsAggregatedListCall) MaxResults(maxResults int64) *GlobalOperationsAggregatedListCall { +func (c *InstanceGroupsListInstancesCall) MaxResults(maxResults int64) *InstanceGroupsListInstancesCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -51069,7 +67361,7 @@ func (c *GlobalOperationsAggregatedListCall) MaxResults(maxResults int64) *Globa // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *GlobalOperationsAggregatedListCall) OrderBy(orderBy string) *GlobalOperationsAggregatedListCall { +func (c *InstanceGroupsListInstancesCall) OrderBy(orderBy string) *InstanceGroupsListInstancesCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -51077,7 +67369,7 @@ func (c *GlobalOperationsAggregatedListCall) OrderBy(orderBy string) *GlobalOper // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *GlobalOperationsAggregatedListCall) PageToken(pageToken string) *GlobalOperationsAggregatedListCall { +func (c *InstanceGroupsListInstancesCall) PageToken(pageToken string) *InstanceGroupsListInstancesCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -51085,71 +67377,65 @@ func (c *GlobalOperationsAggregatedListCall) PageToken(pageToken string) *Global // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalOperationsAggregatedListCall) Fields(s ...googleapi.Field) *GlobalOperationsAggregatedListCall { +func (c *InstanceGroupsListInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsListInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *GlobalOperationsAggregatedListCall) IfNoneMatch(entityTag string) *GlobalOperationsAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalOperationsAggregatedListCall) Context(ctx context.Context) *GlobalOperationsAggregatedListCall { +func (c *InstanceGroupsListInstancesCall) Context(ctx context.Context) *InstanceGroupsListInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalOperationsAggregatedListCall) Header() http.Header { +func (c *InstanceGroupsListInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalOperationsAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupslistinstancesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/operations") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/listInstances") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalOperations.aggregatedList" call. -// Exactly one of *OperationAggregatedList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *OperationAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.instanceGroups.listInstances" call. +// Exactly one of *InstanceGroupsListInstances or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *InstanceGroupsListInstances.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *GlobalOperationsAggregatedListCall) Do(opts ...googleapi.CallOption) (*OperationAggregatedList, error) { +func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*InstanceGroupsListInstances, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -51168,7 +67454,7 @@ func (c *GlobalOperationsAggregatedListCall) Do(opts ...googleapi.CallOption) (* if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &OperationAggregatedList{ + ret := &InstanceGroupsListInstances{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -51180,11 +67466,13 @@ func (c *GlobalOperationsAggregatedListCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Retrieves an aggregated list of all operations.", - // "httpMethod": "GET", - // "id": "compute.globalOperations.aggregatedList", + // "description": "Lists the instances in the specified instance group.", + // "httpMethod": "POST", + // "id": "compute.instanceGroups.listInstances", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "instanceGroup" // ], // "parameters": { // "filter": { @@ -51192,6 +67480,12 @@ func (c *GlobalOperationsAggregatedListCall) Do(opts ...googleapi.CallOption) (* // "location": "query", // "type": "string" // }, + // "instanceGroup": { + // "description": "The name of the instance group from which you want to generate a list of included instances.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "maxResults": { // "default": "500", // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", @@ -51216,11 +67510,20 @@ func (c *GlobalOperationsAggregatedListCall) Do(opts ...googleapi.CallOption) (* // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/aggregated/operations", + // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/listInstances", + // "request": { + // "$ref": "InstanceGroupsListInstancesRequest" + // }, // "response": { - // "$ref": "OperationAggregatedList" + // "$ref": "InstanceGroupsListInstances" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -51234,7 +67537,7 @@ func (c *GlobalOperationsAggregatedListCall) Do(opts ...googleapi.CallOption) (* // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *GlobalOperationsAggregatedListCall) Pages(ctx context.Context, f func(*OperationAggregatedList) error) error { +func (c *InstanceGroupsListInstancesCall) Pages(ctx context.Context, f func(*InstanceGroupsListInstances) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -51252,30 +67555,57 @@ func (c *GlobalOperationsAggregatedListCall) Pages(ctx context.Context, f func(* } } -// method id "compute.globalOperations.delete": +// method id "compute.instanceGroups.removeInstances": -type GlobalOperationsDeleteCall struct { - s *Service - project string - operation string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupsRemoveInstancesCall struct { + s *Service + project string + zone string + instanceGroup string + instancegroupsremoveinstancesrequest *InstanceGroupsRemoveInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified Operations resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/delete -func (r *GlobalOperationsService) Delete(project string, operation string) *GlobalOperationsDeleteCall { - c := &GlobalOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// RemoveInstances: Removes one or more instances from the specified +// instance group, but does not delete those instances. +// +// If the group is part of a backend service that has enabled connection +// draining, it can take up to 60 seconds after the connection draining +// duration before the VM instance is removed or deleted. +func (r *InstanceGroupsService) RemoveInstances(project string, zone string, instanceGroup string, instancegroupsremoveinstancesrequest *InstanceGroupsRemoveInstancesRequest) *InstanceGroupsRemoveInstancesCall { + c := &InstanceGroupsRemoveInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.operation = operation + c.zone = zone + c.instanceGroup = instanceGroup + c.instancegroupsremoveinstancesrequest = instancegroupsremoveinstancesrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupsRemoveInstancesCall) RequestId(requestId string) *InstanceGroupsRemoveInstancesCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalOperationsDeleteCall) Fields(s ...googleapi.Field) *GlobalOperationsDeleteCall { +func (c *InstanceGroupsRemoveInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsRemoveInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -51283,68 +67613,99 @@ func (c *GlobalOperationsDeleteCall) Fields(s ...googleapi.Field) *GlobalOperati // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalOperationsDeleteCall) Context(ctx context.Context) *GlobalOperationsDeleteCall { +func (c *InstanceGroupsRemoveInstancesCall) Context(ctx context.Context) *InstanceGroupsRemoveInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalOperationsDeleteCall) Header() http.Header { +func (c *InstanceGroupsRemoveInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsRemoveInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupsremoveinstancesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations/{operation}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/removeInstances") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "operation": c.operation, + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalOperations.delete" call. -func (c *GlobalOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { +// Do executes the "compute.instanceGroups.removeInstances" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupsRemoveInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } if err != nil { - return err + return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return nil, err } - return nil + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil // { - // "description": "Deletes the specified Operations resource.", - // "httpMethod": "DELETE", - // "id": "compute.globalOperations.delete", + // "description": "Removes one or more instances from the specified instance group, but does not delete those instances.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration before the VM instance is removed or deleted.", + // "httpMethod": "POST", + // "id": "compute.instanceGroups.removeInstances", // "parameterOrder": [ // "project", - // "operation" + // "zone", + // "instanceGroup" // ], // "parameters": { - // "operation": { - // "description": "Name of the Operations resource to delete.", + // "instanceGroup": { + // "description": "The name of the instance group where the specified instances will be removed.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -51354,9 +67715,26 @@ func (c *GlobalOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/operations/{operation}", + // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/removeInstances", + // "request": { + // "$ref": "InstanceGroupsRemoveInstancesRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/compute" @@ -51365,97 +67743,110 @@ func (c *GlobalOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { } -// method id "compute.globalOperations.get": +// method id "compute.instanceGroups.setNamedPorts": -type GlobalOperationsGetCall struct { - s *Service - project string - operation string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstanceGroupsSetNamedPortsCall struct { + s *Service + project string + zone string + instanceGroup string + instancegroupssetnamedportsrequest *InstanceGroupsSetNamedPortsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Retrieves the specified Operations resource. Gets a list of -// operations by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/get -func (r *GlobalOperationsService) Get(project string, operation string) *GlobalOperationsGetCall { - c := &GlobalOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetNamedPorts: Sets the named ports for the specified instance group. +func (r *InstanceGroupsService) SetNamedPorts(project string, zone string, instanceGroup string, instancegroupssetnamedportsrequest *InstanceGroupsSetNamedPortsRequest) *InstanceGroupsSetNamedPortsCall { + c := &InstanceGroupsSetNamedPortsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.operation = operation + c.zone = zone + c.instanceGroup = instanceGroup + c.instancegroupssetnamedportsrequest = instancegroupssetnamedportsrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupsSetNamedPortsCall) RequestId(requestId string) *InstanceGroupsSetNamedPortsCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalOperationsGetCall) Fields(s ...googleapi.Field) *GlobalOperationsGetCall { +func (c *InstanceGroupsSetNamedPortsCall) Fields(s ...googleapi.Field) *InstanceGroupsSetNamedPortsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *GlobalOperationsGetCall) IfNoneMatch(entityTag string) *GlobalOperationsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalOperationsGetCall) Context(ctx context.Context) *GlobalOperationsGetCall { +func (c *InstanceGroupsSetNamedPortsCall) Context(ctx context.Context) *InstanceGroupsSetNamedPortsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalOperationsGetCall) Header() http.Header { +func (c *InstanceGroupsSetNamedPortsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalOperationsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupssetnamedportsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations/{operation}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/setNamedPorts") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "operation": c.operation, + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalOperations.get" call. +// Do executes the "compute.instanceGroups.setNamedPorts" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *GlobalOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -51486,18 +67877,18 @@ func (c *GlobalOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Retrieves the specified Operations resource. Gets a list of operations by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.globalOperations.get", + // "description": "Sets the named ports for the specified instance group.", + // "httpMethod": "POST", + // "id": "compute.instanceGroups.setNamedPorts", // "parameterOrder": [ // "project", - // "operation" + // "zone", + // "instanceGroup" // ], // "parameters": { - // "operation": { - // "description": "Name of the Operations resource to return.", + // "instanceGroup": { + // "description": "The name of the instance group where the named ports are updated.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -51507,172 +67898,120 @@ func (c *GlobalOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/operations/{operation}", + // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/setNamedPorts", + // "request": { + // "$ref": "InstanceGroupsSetNamedPortsRequest" + // }, // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.globalOperations.list": +// method id "compute.instanceGroups.testIamPermissions": -type GlobalOperationsListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstanceGroupsTestIamPermissionsCall struct { + s *Service + project string + zone string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves a list of Operation resources contained within the -// specified project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/list -func (r *GlobalOperationsService) List(project string) *GlobalOperationsListCall { - c := &GlobalOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *InstanceGroupsService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *InstanceGroupsTestIamPermissionsCall { + c := &InstanceGroupsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *GlobalOperationsListCall) Filter(filter string) *GlobalOperationsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *GlobalOperationsListCall) MaxResults(maxResults int64) *GlobalOperationsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *GlobalOperationsListCall) OrderBy(orderBy string) *GlobalOperationsListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *GlobalOperationsListCall) PageToken(pageToken string) *GlobalOperationsListCall { - c.urlParams_.Set("pageToken", pageToken) + c.zone = zone + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalOperationsListCall) Fields(s ...googleapi.Field) *GlobalOperationsListCall { +func (c *InstanceGroupsTestIamPermissionsCall) Fields(s ...googleapi.Field) *InstanceGroupsTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *GlobalOperationsListCall) IfNoneMatch(entityTag string) *GlobalOperationsListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalOperationsListCall) Context(ctx context.Context) *GlobalOperationsListCall { +func (c *InstanceGroupsTestIamPermissionsCall) Context(ctx context.Context) *InstanceGroupsTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalOperationsListCall) Header() http.Header { +func (c *InstanceGroupsTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalOperationsListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalOperations.list" call. -// Exactly one of *OperationList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *OperationList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.instanceGroups.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *GlobalOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationList, error) { +func (c *InstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -51691,7 +68030,7 @@ func (c *GlobalOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &OperationList{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -51703,47 +68042,43 @@ func (c *GlobalOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL } return ret, nil // { - // "description": "Retrieves a list of Operation resources contained within the specified project.", - // "httpMethod": "GET", - // "id": "compute.globalOperations.list", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.instanceGroups.testIamPermissions", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "resource" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/operations", + // "path": "{project}/zones/{zone}/instanceGroups/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "OperationList" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -51754,43 +68089,25 @@ func (c *GlobalOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *GlobalOperationsListCall) Pages(ctx context.Context, f func(*OperationList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.healthChecks.delete": +// method id "compute.instanceTemplates.delete": -type HealthChecksDeleteCall struct { - s *Service - project string - healthCheck string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceTemplatesDeleteCall struct { + s *Service + project string + instanceTemplate string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified HealthCheck resource. -func (r *HealthChecksService) Delete(project string, healthCheck string) *HealthChecksDeleteCall { - c := &HealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified instance template. Deleting an instance +// template is permanent and cannot be undone. It is not possible to +// delete templates that are already in use by a managed instance group. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/delete +func (r *InstanceTemplatesService) Delete(project string, instanceTemplate string) *InstanceTemplatesDeleteCall { + c := &InstanceTemplatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.healthCheck = healthCheck + c.instanceTemplate = instanceTemplate return c } @@ -51808,7 +68125,7 @@ func (r *HealthChecksService) Delete(project string, healthCheck string) *Health // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *HealthChecksDeleteCall) RequestId(requestId string) *HealthChecksDeleteCall { +func (c *InstanceTemplatesDeleteCall) RequestId(requestId string) *InstanceTemplatesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -51816,7 +68133,7 @@ func (c *HealthChecksDeleteCall) RequestId(requestId string) *HealthChecksDelete // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HealthChecksDeleteCall) Fields(s ...googleapi.Field) *HealthChecksDeleteCall { +func (c *InstanceTemplatesDeleteCall) Fields(s ...googleapi.Field) *InstanceTemplatesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -51824,21 +68141,21 @@ func (c *HealthChecksDeleteCall) Fields(s ...googleapi.Field) *HealthChecksDelet // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HealthChecksDeleteCall) Context(ctx context.Context) *HealthChecksDeleteCall { +func (c *InstanceTemplatesDeleteCall) Context(ctx context.Context) *InstanceTemplatesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HealthChecksDeleteCall) Header() http.Header { +func (c *InstanceTemplatesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -51847,7 +68164,7 @@ func (c *HealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{instanceTemplate}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { @@ -51855,20 +68172,20 @@ func (c *HealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "healthCheck": c.healthCheck, + "project": c.project, + "instanceTemplate": c.instanceTemplate, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.healthChecks.delete" call. +// Do executes the "compute.instanceTemplates.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -51899,18 +68216,18 @@ func (c *HealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Deletes the specified HealthCheck resource.", + // "description": "Deletes the specified instance template. Deleting an instance template is permanent and cannot be undone. It is not possible to delete templates that are already in use by a managed instance group.", // "httpMethod": "DELETE", - // "id": "compute.healthChecks.delete", + // "id": "compute.instanceTemplates.delete", // "parameterOrder": [ // "project", - // "healthCheck" + // "instanceTemplate" // ], // "parameters": { - // "healthCheck": { - // "description": "Name of the HealthCheck resource to delete.", + // "instanceTemplate": { + // "description": "The name of the instance template to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -51927,7 +68244,7 @@ func (c *HealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, e // "type": "string" // } // }, - // "path": "{project}/global/healthChecks/{healthCheck}", + // "path": "{project}/global/instanceTemplates/{instanceTemplate}", // "response": { // "$ref": "Operation" // }, @@ -51939,31 +68256,188 @@ func (c *HealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, e } -// method id "compute.healthChecks.get": +// method id "compute.instanceTemplates.get": -type HealthChecksGetCall struct { +type InstanceTemplatesGetCall struct { + s *Service + project string + instanceTemplate string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified instance template. Gets a list of +// available instance templates by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/get +func (r *InstanceTemplatesService) Get(project string, instanceTemplate string) *InstanceTemplatesGetCall { + c := &InstanceTemplatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instanceTemplate = instanceTemplate + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceTemplatesGetCall) Fields(s ...googleapi.Field) *InstanceTemplatesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstanceTemplatesGetCall) IfNoneMatch(entityTag string) *InstanceTemplatesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceTemplatesGetCall) Context(ctx context.Context) *InstanceTemplatesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstanceTemplatesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstanceTemplatesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{instanceTemplate}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "instanceTemplate": c.instanceTemplate, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instanceTemplates.get" call. +// Exactly one of *InstanceTemplate or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceTemplate.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTemplate, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InstanceTemplate{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified instance template. Gets a list of available instance templates by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.instanceTemplates.get", + // "parameterOrder": [ + // "project", + // "instanceTemplate" + // ], + // "parameters": { + // "instanceTemplate": { + // "description": "The name of the instance template.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/instanceTemplates/{instanceTemplate}", + // "response": { + // "$ref": "InstanceTemplate" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.instanceTemplates.getIamPolicy": + +type InstanceTemplatesGetIamPolicyCall struct { s *Service project string - healthCheck string + resource string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Returns the specified HealthCheck resource. Gets a list of -// available health checks by making a list() request. -func (r *HealthChecksService) Get(project string, healthCheck string) *HealthChecksGetCall { - c := &HealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. +func (r *InstanceTemplatesService) GetIamPolicy(project string, resource string) *InstanceTemplatesGetIamPolicyCall { + c := &InstanceTemplatesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.healthCheck = healthCheck + c.resource = resource return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HealthChecksGetCall) Fields(s ...googleapi.Field) *HealthChecksGetCall { +func (c *InstanceTemplatesGetIamPolicyCall) Fields(s ...googleapi.Field) *InstanceTemplatesGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -51973,7 +68447,7 @@ func (c *HealthChecksGetCall) Fields(s ...googleapi.Field) *HealthChecksGetCall // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *HealthChecksGetCall) IfNoneMatch(entityTag string) *HealthChecksGetCall { +func (c *InstanceTemplatesGetIamPolicyCall) IfNoneMatch(entityTag string) *InstanceTemplatesGetIamPolicyCall { c.ifNoneMatch_ = entityTag return c } @@ -51981,21 +68455,21 @@ func (c *HealthChecksGetCall) IfNoneMatch(entityTag string) *HealthChecksGetCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HealthChecksGetCall) Context(ctx context.Context) *HealthChecksGetCall { +func (c *InstanceTemplatesGetIamPolicyCall) Context(ctx context.Context) *InstanceTemplatesGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HealthChecksGetCall) Header() http.Header { +func (c *InstanceTemplatesGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HealthChecksGetCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceTemplatesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -52007,7 +68481,7 @@ func (c *HealthChecksGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -52015,20 +68489,20 @@ func (c *HealthChecksGetCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "healthCheck": c.healthCheck, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.healthChecks.get" call. -// Exactly one of *HealthCheck or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *HealthCheck.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *HealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, error) { +// Do executes the "compute.instanceTemplates.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *InstanceTemplatesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -52047,7 +68521,7 @@ func (c *HealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, er if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &HealthCheck{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -52059,32 +68533,32 @@ func (c *HealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, er } return ret, nil // { - // "description": "Returns the specified HealthCheck resource. Gets a list of available health checks by making a list() request.", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", // "httpMethod": "GET", - // "id": "compute.healthChecks.get", + // "id": "compute.instanceTemplates.getIamPolicy", // "parameterOrder": [ // "project", - // "healthCheck" + // "resource" // ], // "parameters": { - // "healthCheck": { - // "description": "Name of the HealthCheck resource to return.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/healthChecks/{healthCheck}", + // "path": "{project}/global/instanceTemplates/{resource}/getIamPolicy", // "response": { - // "$ref": "HealthCheck" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -52095,23 +68569,27 @@ func (c *HealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, er } -// method id "compute.healthChecks.insert": +// method id "compute.instanceTemplates.insert": -type HealthChecksInsertCall struct { - s *Service - project string - healthcheck *HealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceTemplatesInsertCall struct { + s *Service + project string + instancetemplate *InstanceTemplate + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a HealthCheck resource in the specified project using -// the data included in the request. -func (r *HealthChecksService) Insert(project string, healthcheck *HealthCheck) *HealthChecksInsertCall { - c := &HealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates an instance template in the specified project using +// the data that is included in the request. If you are creating a new +// template to update an existing instance group, your new instance +// template must use the same network or, if applicable, the same +// subnetwork as the original template. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/insert +func (r *InstanceTemplatesService) Insert(project string, instancetemplate *InstanceTemplate) *InstanceTemplatesInsertCall { + c := &InstanceTemplatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.healthcheck = healthcheck + c.instancetemplate = instancetemplate return c } @@ -52129,7 +68607,7 @@ func (r *HealthChecksService) Insert(project string, healthcheck *HealthCheck) * // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *HealthChecksInsertCall) RequestId(requestId string) *HealthChecksInsertCall { +func (c *InstanceTemplatesInsertCall) RequestId(requestId string) *InstanceTemplatesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -52137,7 +68615,7 @@ func (c *HealthChecksInsertCall) RequestId(requestId string) *HealthChecksInsert // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HealthChecksInsertCall) Fields(s ...googleapi.Field) *HealthChecksInsertCall { +func (c *InstanceTemplatesInsertCall) Fields(s ...googleapi.Field) *InstanceTemplatesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -52145,35 +68623,35 @@ func (c *HealthChecksInsertCall) Fields(s ...googleapi.Field) *HealthChecksInser // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HealthChecksInsertCall) Context(ctx context.Context) *HealthChecksInsertCall { +func (c *InstanceTemplatesInsertCall) Context(ctx context.Context) *InstanceTemplatesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HealthChecksInsertCall) Header() http.Header { +func (c *InstanceTemplatesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceTemplatesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancetemplate) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -52186,14 +68664,14 @@ func (c *HealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.healthChecks.insert" call. +// Do executes the "compute.instanceTemplates.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -52224,9 +68702,9 @@ func (c *HealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Creates a HealthCheck resource in the specified project using the data included in the request.", + // "description": "Creates an instance template in the specified project using the data that is included in the request. If you are creating a new template to update an existing instance group, your new instance template must use the same network or, if applicable, the same subnetwork as the original template.", // "httpMethod": "POST", - // "id": "compute.healthChecks.insert", + // "id": "compute.instanceTemplates.insert", // "parameterOrder": [ // "project" // ], @@ -52244,9 +68722,9 @@ func (c *HealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, e // "type": "string" // } // }, - // "path": "{project}/global/healthChecks", + // "path": "{project}/global/instanceTemplates", // "request": { - // "$ref": "HealthCheck" + // "$ref": "InstanceTemplate" // }, // "response": { // "$ref": "Operation" @@ -52259,9 +68737,9 @@ func (c *HealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, e } -// method id "compute.healthChecks.list": +// method id "compute.instanceTemplates.list": -type HealthChecksListCall struct { +type InstanceTemplatesListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -52270,10 +68748,11 @@ type HealthChecksListCall struct { header_ http.Header } -// List: Retrieves the list of HealthCheck resources available to the -// specified project. -func (r *HealthChecksService) List(project string) *HealthChecksListCall { - c := &HealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of instance templates that are contained +// within the specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/list +func (r *InstanceTemplatesService) List(project string) *InstanceTemplatesListCall { + c := &InstanceTemplatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -52300,7 +68779,7 @@ func (r *HealthChecksService) List(project string) *HealthChecksListCall { // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *HealthChecksListCall) Filter(filter string) *HealthChecksListCall { +func (c *InstanceTemplatesListCall) Filter(filter string) *InstanceTemplatesListCall { c.urlParams_.Set("filter", filter) return c } @@ -52311,7 +68790,7 @@ func (c *HealthChecksListCall) Filter(filter string) *HealthChecksListCall { // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *HealthChecksListCall) MaxResults(maxResults int64) *HealthChecksListCall { +func (c *InstanceTemplatesListCall) MaxResults(maxResults int64) *InstanceTemplatesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -52328,7 +68807,7 @@ func (c *HealthChecksListCall) MaxResults(maxResults int64) *HealthChecksListCal // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *HealthChecksListCall) OrderBy(orderBy string) *HealthChecksListCall { +func (c *InstanceTemplatesListCall) OrderBy(orderBy string) *InstanceTemplatesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -52336,7 +68815,7 @@ func (c *HealthChecksListCall) OrderBy(orderBy string) *HealthChecksListCall { // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *HealthChecksListCall) PageToken(pageToken string) *HealthChecksListCall { +func (c *InstanceTemplatesListCall) PageToken(pageToken string) *InstanceTemplatesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -52344,7 +68823,7 @@ func (c *HealthChecksListCall) PageToken(pageToken string) *HealthChecksListCall // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HealthChecksListCall) Fields(s ...googleapi.Field) *HealthChecksListCall { +func (c *InstanceTemplatesListCall) Fields(s ...googleapi.Field) *InstanceTemplatesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -52354,7 +68833,7 @@ func (c *HealthChecksListCall) Fields(s ...googleapi.Field) *HealthChecksListCal // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *HealthChecksListCall) IfNoneMatch(entityTag string) *HealthChecksListCall { +func (c *InstanceTemplatesListCall) IfNoneMatch(entityTag string) *InstanceTemplatesListCall { c.ifNoneMatch_ = entityTag return c } @@ -52362,21 +68841,21 @@ func (c *HealthChecksListCall) IfNoneMatch(entityTag string) *HealthChecksListCa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HealthChecksListCall) Context(ctx context.Context) *HealthChecksListCall { +func (c *InstanceTemplatesListCall) Context(ctx context.Context) *InstanceTemplatesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HealthChecksListCall) Header() http.Header { +func (c *InstanceTemplatesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HealthChecksListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceTemplatesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -52388,7 +68867,7 @@ func (c *HealthChecksListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -52401,14 +68880,14 @@ func (c *HealthChecksListCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.healthChecks.list" call. -// Exactly one of *HealthCheckList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *HealthCheckList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.instanceTemplates.list" call. +// Exactly one of *InstanceTemplateList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceTemplateList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckList, error) { +func (c *InstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceTemplateList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -52427,7 +68906,7 @@ func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckLis if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &HealthCheckList{ + ret := &InstanceTemplateList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -52439,9 +68918,9 @@ func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckLis } return ret, nil // { - // "description": "Retrieves the list of HealthCheck resources available to the specified project.", + // "description": "Retrieves a list of instance templates that are contained within the specified project.", // "httpMethod": "GET", - // "id": "compute.healthChecks.list", + // "id": "compute.instanceTemplates.list", // "parameterOrder": [ // "project" // ], @@ -52477,9 +68956,9 @@ func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckLis // "type": "string" // } // }, - // "path": "{project}/global/healthChecks", + // "path": "{project}/global/instanceTemplates", // "response": { - // "$ref": "HealthCheckList" + // "$ref": "InstanceTemplateList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -52493,7 +68972,7 @@ func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckLis // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *HealthChecksListCall) Pages(ctx context.Context, f func(*HealthCheckList) error) error { +func (c *InstanceTemplatesListCall) Pages(ctx context.Context, f func(*InstanceTemplateList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -52511,52 +68990,32 @@ func (c *HealthChecksListCall) Pages(ctx context.Context, f func(*HealthCheckLis } } -// method id "compute.healthChecks.patch": +// method id "compute.instanceTemplates.setIamPolicy": -type HealthChecksPatchCall struct { - s *Service - project string - healthCheck string - healthcheck *HealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceTemplatesSetIamPolicyCall struct { + s *Service + project string + resource string + globalsetpolicyrequest *GlobalSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates a HealthCheck resource in the specified project using -// the data included in the request. This method supports PATCH -// semantics and uses the JSON merge patch format and processing rules. -func (r *HealthChecksService) Patch(project string, healthCheck string, healthcheck *HealthCheck) *HealthChecksPatchCall { - c := &HealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +func (r *InstanceTemplatesService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *InstanceTemplatesSetIamPolicyCall { + c := &InstanceTemplatesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.healthCheck = healthCheck - c.healthcheck = healthcheck - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *HealthChecksPatchCall) RequestId(requestId string) *HealthChecksPatchCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.globalsetpolicyrequest = globalsetpolicyrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HealthChecksPatchCall) Fields(s ...googleapi.Field) *HealthChecksPatchCall { +func (c *InstanceTemplatesSetIamPolicyCall) Fields(s ...googleapi.Field) *InstanceTemplatesSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -52564,56 +69023,56 @@ func (c *HealthChecksPatchCall) Fields(s ...googleapi.Field) *HealthChecksPatchC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HealthChecksPatchCall) Context(ctx context.Context) *HealthChecksPatchCall { +func (c *InstanceTemplatesSetIamPolicyCall) Context(ctx context.Context) *InstanceTemplatesSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HealthChecksPatchCall) Header() http.Header { +func (c *InstanceTemplatesSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceTemplatesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetpolicyrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "healthCheck": c.healthCheck, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.healthChecks.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *HealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instanceTemplates.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *InstanceTemplatesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -52632,7 +69091,7 @@ func (c *HealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, er if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -52644,21 +69103,14 @@ func (c *HealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Updates a HealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.healthChecks.patch", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "httpMethod": "POST", + // "id": "compute.instanceTemplates.setIamPolicy", // "parameterOrder": [ // "project", - // "healthCheck" + // "resource" // ], // "parameters": { - // "healthCheck": { - // "description": "Name of the HealthCheck resource to patch.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -52666,18 +69118,20 @@ func (c *HealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, er // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/healthChecks/{healthCheck}", + // "path": "{project}/global/instanceTemplates/{resource}/setIamPolicy", // "request": { - // "$ref": "HealthCheck" + // "$ref": "GlobalSetPolicyRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -52687,9 +69141,9 @@ func (c *HealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, er } -// method id "compute.healthChecks.testIamPermissions": +// method id "compute.instanceTemplates.testIamPermissions": -type HealthChecksTestIamPermissionsCall struct { +type InstanceTemplatesTestIamPermissionsCall struct { s *Service project string resource string @@ -52701,8 +69155,8 @@ type HealthChecksTestIamPermissionsCall struct { // TestIamPermissions: Returns permissions that a caller has on the // specified resource. -func (r *HealthChecksService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *HealthChecksTestIamPermissionsCall { - c := &HealthChecksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *InstanceTemplatesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *InstanceTemplatesTestIamPermissionsCall { + c := &InstanceTemplatesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.resource = resource c.testpermissionsrequest = testpermissionsrequest @@ -52712,7 +69166,7 @@ func (r *HealthChecksService) TestIamPermissions(project string, resource string // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HealthChecksTestIamPermissionsCall) Fields(s ...googleapi.Field) *HealthChecksTestIamPermissionsCall { +func (c *InstanceTemplatesTestIamPermissionsCall) Fields(s ...googleapi.Field) *InstanceTemplatesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -52720,21 +69174,21 @@ func (c *HealthChecksTestIamPermissionsCall) Fields(s ...googleapi.Field) *Healt // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HealthChecksTestIamPermissionsCall) Context(ctx context.Context) *HealthChecksTestIamPermissionsCall { +func (c *InstanceTemplatesTestIamPermissionsCall) Context(ctx context.Context) *InstanceTemplatesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HealthChecksTestIamPermissionsCall) Header() http.Header { +func (c *InstanceTemplatesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HealthChecksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceTemplatesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -52748,7 +69202,7 @@ func (c *HealthChecksTestIamPermissionsCall) doRequest(alt string) (*http.Respon reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -52762,14 +69216,14 @@ func (c *HealthChecksTestIamPermissionsCall) doRequest(alt string) (*http.Respon return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.healthChecks.testIamPermissions" call. +// Do executes the "compute.instanceTemplates.testIamPermissions" call. // Exactly one of *TestPermissionsResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *TestPermissionsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *HealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *InstanceTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -52802,7 +69256,7 @@ func (c *HealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (* // { // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.healthChecks.testIamPermissions", + // "id": "compute.instanceTemplates.testIamPermissions", // "parameterOrder": [ // "project", // "resource" @@ -52823,7 +69277,7 @@ func (c *HealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (* // "type": "string" // } // }, - // "path": "{project}/global/healthChecks/{resource}/testIamPermissions", + // "path": "{project}/global/instanceTemplates/{resource}/testIamPermissions", // "request": { // "$ref": "TestPermissionsRequest" // }, @@ -52839,198 +69293,29 @@ func (c *HealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (* } -// method id "compute.healthChecks.update": - -type HealthChecksUpdateCall struct { - s *Service - project string - healthCheck string - healthcheck *HealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Update: Updates a HealthCheck resource in the specified project using -// the data included in the request. -func (r *HealthChecksService) Update(project string, healthCheck string, healthcheck *HealthCheck) *HealthChecksUpdateCall { - c := &HealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.healthCheck = healthCheck - c.healthcheck = healthcheck - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *HealthChecksUpdateCall) RequestId(requestId string) *HealthChecksUpdateCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *HealthChecksUpdateCall) Fields(s ...googleapi.Field) *HealthChecksUpdateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *HealthChecksUpdateCall) Context(ctx context.Context) *HealthChecksUpdateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *HealthChecksUpdateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *HealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "healthCheck": c.healthCheck, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.healthChecks.update" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *HealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates a HealthCheck resource in the specified project using the data included in the request.", - // "httpMethod": "PUT", - // "id": "compute.healthChecks.update", - // "parameterOrder": [ - // "project", - // "healthCheck" - // ], - // "parameters": { - // "healthCheck": { - // "description": "Name of the HealthCheck resource to update.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "{project}/global/healthChecks/{healthCheck}", - // "request": { - // "$ref": "HealthCheck" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.httpHealthChecks.delete": +// method id "compute.instances.addAccessConfig": -type HttpHealthChecksDeleteCall struct { - s *Service - project string - httpHealthCheck string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesAddAccessConfigCall struct { + s *Service + project string + zone string + instance string + accessconfig *AccessConfig + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified HttpHealthCheck resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/delete -func (r *HttpHealthChecksService) Delete(project string, httpHealthCheck string) *HttpHealthChecksDeleteCall { - c := &HttpHealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AddAccessConfig: Adds an access config to an instance's network +// interface. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/addAccessConfig +func (r *InstancesService) AddAccessConfig(project string, zone string, instance string, networkInterface string, accessconfig *AccessConfig) *InstancesAddAccessConfigCall { + c := &InstancesAddAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpHealthCheck = httpHealthCheck + c.zone = zone + c.instance = instance + c.urlParams_.Set("networkInterface", networkInterface) + c.accessconfig = accessconfig return c } @@ -53048,7 +69333,7 @@ func (r *HttpHealthChecksService) Delete(project string, httpHealthCheck string) // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *HttpHealthChecksDeleteCall) RequestId(requestId string) *HttpHealthChecksDeleteCall { +func (c *InstancesAddAccessConfigCall) RequestId(requestId string) *InstancesAddAccessConfigCall { c.urlParams_.Set("requestId", requestId) return c } @@ -53056,7 +69341,7 @@ func (c *HttpHealthChecksDeleteCall) RequestId(requestId string) *HttpHealthChec // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpHealthChecksDeleteCall) Fields(s ...googleapi.Field) *HttpHealthChecksDeleteCall { +func (c *InstancesAddAccessConfigCall) Fields(s ...googleapi.Field) *InstancesAddAccessConfigCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -53064,51 +69349,57 @@ func (c *HttpHealthChecksDeleteCall) Fields(s ...googleapi.Field) *HttpHealthChe // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpHealthChecksDeleteCall) Context(ctx context.Context) *HttpHealthChecksDeleteCall { +func (c *InstancesAddAccessConfigCall) Context(ctx context.Context) *InstancesAddAccessConfigCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpHealthChecksDeleteCall) Header() http.Header { +func (c *InstancesAddAccessConfigCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesAddAccessConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.accessconfig) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/addAccessConfig") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpHealthCheck": c.httpHealthCheck, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpHealthChecks.delete" call. +// Do executes the "compute.instances.addAccessConfig" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HttpHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesAddAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -53139,18 +69430,26 @@ func (c *HttpHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Deletes the specified HttpHealthCheck resource.", - // "httpMethod": "DELETE", - // "id": "compute.httpHealthChecks.delete", + // "description": "Adds an access config to an instance's network interface.", + // "httpMethod": "POST", + // "id": "compute.instances.addAccessConfig", // "parameterOrder": [ // "project", - // "httpHealthCheck" + // "zone", + // "instance", + // "networkInterface" // ], // "parameters": { - // "httpHealthCheck": { - // "description": "Name of the HttpHealthCheck resource to delete.", + // "instance": { + // "description": "The instance name for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "networkInterface": { + // "description": "The name of the network interface to add to this instance.", + // "location": "query", // "required": true, // "type": "string" // }, @@ -53165,9 +69464,19 @@ func (c *HttpHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + // "path": "{project}/zones/{zone}/instances/{instance}/addAccessConfig", + // "request": { + // "$ref": "AccessConfig" + // }, // "response": { // "$ref": "Operation" // }, @@ -53179,32 +69488,93 @@ func (c *HttpHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio } -// method id "compute.httpHealthChecks.get": +// method id "compute.instances.aggregatedList": -type HttpHealthChecksGetCall struct { - s *Service - project string - httpHealthCheck string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstancesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified HttpHealthCheck resource. Gets a list of -// available HTTP health checks by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/get -func (r *HttpHealthChecksService) Get(project string, httpHealthCheck string) *HttpHealthChecksGetCall { - c := &HttpHealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves aggregated list of all of the instances in +// your project across all regions and zones. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/aggregatedList +func (r *InstancesService) AggregatedList(project string) *InstancesAggregatedListCall { + c := &InstancesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpHealthCheck = httpHealthCheck + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *InstancesAggregatedListCall) Filter(filter string) *InstancesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *InstancesAggregatedListCall) MaxResults(maxResults int64) *InstancesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *InstancesAggregatedListCall) OrderBy(orderBy string) *InstancesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InstancesAggregatedListCall) PageToken(pageToken string) *InstancesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpHealthChecksGetCall) Fields(s ...googleapi.Field) *HttpHealthChecksGetCall { +func (c *InstancesAggregatedListCall) Fields(s ...googleapi.Field) *InstancesAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -53214,7 +69584,7 @@ func (c *HttpHealthChecksGetCall) Fields(s ...googleapi.Field) *HttpHealthChecks // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *HttpHealthChecksGetCall) IfNoneMatch(entityTag string) *HttpHealthChecksGetCall { +func (c *InstancesAggregatedListCall) IfNoneMatch(entityTag string) *InstancesAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -53222,21 +69592,21 @@ func (c *HttpHealthChecksGetCall) IfNoneMatch(entityTag string) *HttpHealthCheck // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpHealthChecksGetCall) Context(ctx context.Context) *HttpHealthChecksGetCall { +func (c *InstancesAggregatedListCall) Context(ctx context.Context) *InstancesAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpHealthChecksGetCall) Header() http.Header { +func (c *InstancesAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -53248,7 +69618,7 @@ func (c *HttpHealthChecksGetCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instances") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -53256,20 +69626,19 @@ func (c *HttpHealthChecksGetCall) doRequest(alt string) (*http.Response, error) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpHealthCheck": c.httpHealthCheck, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpHealthChecks.get" call. -// Exactly one of *HttpHealthCheck or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *HttpHealthCheck.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.instances.aggregatedList" call. +// Exactly one of *InstanceAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *HttpHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpHealthCheck, error) { +func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -53288,7 +69657,7 @@ func (c *HttpHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpHealthC if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &HttpHealthCheck{ + ret := &InstanceAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -53300,19 +69669,34 @@ func (c *HttpHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpHealthC } return ret, nil // { - // "description": "Returns the specified HttpHealthCheck resource. Gets a list of available HTTP health checks by making a list() request.", + // "description": "Retrieves aggregated list of all of the instances in your project across all regions and zones.", // "httpMethod": "GET", - // "id": "compute.httpHealthChecks.get", + // "id": "compute.instances.aggregatedList", // "parameterOrder": [ - // "project", - // "httpHealthCheck" + // "project" // ], // "parameters": { - // "httpHealthCheck": { - // "description": "Name of the HttpHealthCheck resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -53323,9 +69707,9 @@ func (c *HttpHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpHealthC // "type": "string" // } // }, - // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + // "path": "{project}/aggregated/instances", // "response": { - // "$ref": "HttpHealthCheck" + // "$ref": "InstanceAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -53336,24 +69720,59 @@ func (c *HttpHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpHealthC } -// method id "compute.httpHealthChecks.insert": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstancesAggregatedListCall) Pages(ctx context.Context, f func(*InstanceAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type HttpHealthChecksInsertCall struct { - s *Service - project string - httphealthcheck *HttpHealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.instances.attachDisk": + +type InstancesAttachDiskCall struct { + s *Service + project string + zone string + instance string + attacheddisk *AttachedDisk + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a HttpHealthCheck resource in the specified project -// using the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/insert -func (r *HttpHealthChecksService) Insert(project string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksInsertCall { - c := &HttpHealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AttachDisk: Attaches an existing Disk resource to an instance. You +// must first create the disk before you can attach it. It is not +// possible to create and attach a disk at the same time. For more +// information, read Adding a persistent disk to your instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/attachDisk +func (r *InstancesService) AttachDisk(project string, zone string, instance string, attacheddisk *AttachedDisk) *InstancesAttachDiskCall { + c := &InstancesAttachDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httphealthcheck = httphealthcheck + c.zone = zone + c.instance = instance + c.attacheddisk = attacheddisk + return c +} + +// ForceAttach sets the optional parameter "forceAttach": Whether to +// force attach the disk even if it's currently attached to another +// instance. +func (c *InstancesAttachDiskCall) ForceAttach(forceAttach bool) *InstancesAttachDiskCall { + c.urlParams_.Set("forceAttach", fmt.Sprint(forceAttach)) return c } @@ -53371,7 +69790,7 @@ func (r *HttpHealthChecksService) Insert(project string, httphealthcheck *HttpHe // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *HttpHealthChecksInsertCall) RequestId(requestId string) *HttpHealthChecksInsertCall { +func (c *InstancesAttachDiskCall) RequestId(requestId string) *InstancesAttachDiskCall { c.urlParams_.Set("requestId", requestId) return c } @@ -53379,7 +69798,7 @@ func (c *HttpHealthChecksInsertCall) RequestId(requestId string) *HttpHealthChec // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpHealthChecksInsertCall) Fields(s ...googleapi.Field) *HttpHealthChecksInsertCall { +func (c *InstancesAttachDiskCall) Fields(s ...googleapi.Field) *InstancesAttachDiskCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -53387,35 +69806,35 @@ func (c *HttpHealthChecksInsertCall) Fields(s ...googleapi.Field) *HttpHealthChe // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpHealthChecksInsertCall) Context(ctx context.Context) *HttpHealthChecksInsertCall { +func (c *InstancesAttachDiskCall) Context(ctx context.Context) *InstancesAttachDiskCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpHealthChecksInsertCall) Header() http.Header { +func (c *InstancesAttachDiskCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesAttachDiskCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.attacheddisk) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/attachDisk") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -53423,19 +69842,21 @@ func (c *HttpHealthChecksInsertCall) doRequest(alt string) (*http.Response, erro } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpHealthChecks.insert" call. +// Do executes the "compute.instances.attachDisk" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HttpHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesAttachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -53466,13 +69887,27 @@ func (c *HttpHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Creates a HttpHealthCheck resource in the specified project using the data included in the request.", + // "description": "Attaches an existing Disk resource to an instance. You must first create the disk before you can attach it. It is not possible to create and attach a disk at the same time. For more information, read Adding a persistent disk to your instance.", // "httpMethod": "POST", - // "id": "compute.httpHealthChecks.insert", + // "id": "compute.instances.attachDisk", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "instance" // ], // "parameters": { + // "forceAttach": { + // "description": "Whether to force attach the disk even if it's currently attached to another instance.", + // "location": "query", + // "type": "boolean" + // }, + // "instance": { + // "description": "The instance name for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -53484,11 +69919,18 @@ func (c *HttpHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operatio // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/httpHealthChecks", + // "path": "{project}/zones/{zone}/instances/{instance}/attachDisk", // "request": { - // "$ref": "HttpHealthCheck" + // "$ref": "AttachedDisk" // }, // "response": { // "$ref": "Operation" @@ -53501,157 +69943,105 @@ func (c *HttpHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operatio } -// method id "compute.httpHealthChecks.list": +// method id "compute.instances.delete": -type HttpHealthChecksListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstancesDeleteCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves the list of HttpHealthCheck resources available to -// the specified project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/list -func (r *HttpHealthChecksService) List(project string) *HttpHealthChecksListCall { - c := &HttpHealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified Instance resource. For more +// information, see Stopping or Deleting an Instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/delete +func (r *InstancesService) Delete(project string, zone string, instance string) *InstancesDeleteCall { + c := &InstancesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.zone = zone + c.instance = instance return c } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *HttpHealthChecksListCall) Filter(filter string) *HttpHealthChecksListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *HttpHealthChecksListCall) MaxResults(maxResults int64) *HttpHealthChecksListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *HttpHealthChecksListCall) OrderBy(orderBy string) *HttpHealthChecksListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *HttpHealthChecksListCall) PageToken(pageToken string) *HttpHealthChecksListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesDeleteCall) RequestId(requestId string) *InstancesDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpHealthChecksListCall) Fields(s ...googleapi.Field) *HttpHealthChecksListCall { +func (c *InstancesDeleteCall) Fields(s ...googleapi.Field) *InstancesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *HttpHealthChecksListCall) IfNoneMatch(entityTag string) *HttpHealthChecksListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpHealthChecksListCall) Context(ctx context.Context) *HttpHealthChecksListCall { +func (c *InstancesDeleteCall) Context(ctx context.Context) *InstancesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpHealthChecksListCall) Header() http.Header { +func (c *InstancesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpHealthChecksListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpHealthChecks.list" call. -// Exactly one of *HttpHealthCheckList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *HttpHealthCheckList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealthCheckList, error) { +// Do executes the "compute.instances.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -53670,7 +70060,7 @@ func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealth if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &HttpHealthCheckList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -53682,99 +70072,76 @@ func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealth } return ret, nil // { - // "description": "Retrieves the list of HttpHealthCheck resources available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.httpHealthChecks.list", + // "description": "Deletes the specified Instance resource. For more information, see Stopping or Deleting an Instance.", + // "httpMethod": "DELETE", + // "id": "compute.instances.delete", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "instance" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", + // "instance": { + // "description": "Name of the instance resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/httpHealthChecks", + // "path": "{project}/zones/{zone}/instances/{instance}", // "response": { - // "$ref": "HttpHealthCheckList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *HttpHealthChecksListCall) Pages(ctx context.Context, f func(*HttpHealthCheckList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.httpHealthChecks.patch": +// method id "compute.instances.deleteAccessConfig": -type HttpHealthChecksPatchCall struct { - s *Service - project string - httpHealthCheck string - httphealthcheck *HttpHealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesDeleteAccessConfigCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates a HttpHealthCheck resource in the specified project -// using the data included in the request. This method supports PATCH -// semantics and uses the JSON merge patch format and processing rules. -// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/patch -func (r *HttpHealthChecksService) Patch(project string, httpHealthCheck string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksPatchCall { - c := &HttpHealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// DeleteAccessConfig: Deletes an access config from an instance's +// network interface. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/deleteAccessConfig +func (r *InstancesService) DeleteAccessConfig(project string, zone string, instance string, accessConfig string, networkInterface string) *InstancesDeleteAccessConfigCall { + c := &InstancesDeleteAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpHealthCheck = httpHealthCheck - c.httphealthcheck = httphealthcheck + c.zone = zone + c.instance = instance + c.urlParams_.Set("accessConfig", accessConfig) + c.urlParams_.Set("networkInterface", networkInterface) return c } @@ -53792,7 +70159,7 @@ func (r *HttpHealthChecksService) Patch(project string, httpHealthCheck string, // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *HttpHealthChecksPatchCall) RequestId(requestId string) *HttpHealthChecksPatchCall { +func (c *InstancesDeleteAccessConfigCall) RequestId(requestId string) *InstancesDeleteAccessConfigCall { c.urlParams_.Set("requestId", requestId) return c } @@ -53800,7 +70167,7 @@ func (c *HttpHealthChecksPatchCall) RequestId(requestId string) *HttpHealthCheck // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpHealthChecksPatchCall) Fields(s ...googleapi.Field) *HttpHealthChecksPatchCall { +func (c *InstancesDeleteAccessConfigCall) Fields(s ...googleapi.Field) *InstancesDeleteAccessConfigCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -53808,56 +70175,52 @@ func (c *HttpHealthChecksPatchCall) Fields(s ...googleapi.Field) *HttpHealthChec // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpHealthChecksPatchCall) Context(ctx context.Context) *HttpHealthChecksPatchCall { +func (c *InstancesDeleteAccessConfigCall) Context(ctx context.Context) *InstancesDeleteAccessConfigCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpHealthChecksPatchCall) Header() http.Header { +func (c *InstancesDeleteAccessConfigCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesDeleteAccessConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/deleteAccessConfig") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpHealthCheck": c.httpHealthCheck, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpHealthChecks.patch" call. +// Do executes the "compute.instances.deleteAccessConfig" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HttpHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesDeleteAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -53888,18 +70251,33 @@ func (c *HttpHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.httpHealthChecks.patch", + // "description": "Deletes an access config from an instance's network interface.", + // "httpMethod": "POST", + // "id": "compute.instances.deleteAccessConfig", // "parameterOrder": [ // "project", - // "httpHealthCheck" + // "zone", + // "instance", + // "accessConfig", + // "networkInterface" // ], // "parameters": { - // "httpHealthCheck": { - // "description": "Name of the HttpHealthCheck resource to patch.", + // "accessConfig": { + // "description": "The name of the access config to delete.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "instance": { + // "description": "The instance name for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "networkInterface": { + // "description": "The name of the network interface.", + // "location": "query", // "required": true, // "type": "string" // }, @@ -53914,12 +70292,16 @@ func (c *HttpHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", - // "request": { - // "$ref": "HttpHealthCheck" - // }, + // "path": "{project}/zones/{zone}/instances/{instance}/deleteAccessConfig", // "response": { // "$ref": "Operation" // }, @@ -53931,32 +70313,52 @@ func (c *HttpHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.httpHealthChecks.testIamPermissions": +// method id "compute.instances.detachDisk": -type HttpHealthChecksTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesDetachDiskCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *HttpHealthChecksService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *HttpHealthChecksTestIamPermissionsCall { - c := &HttpHealthChecksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// DetachDisk: Detaches a disk from an instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/detachDisk +func (r *InstancesService) DetachDisk(project string, zone string, instance string, deviceName string) *InstancesDetachDiskCall { + c := &InstancesDetachDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.zone = zone + c.instance = instance + c.urlParams_.Set("deviceName", deviceName) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesDetachDiskCall) RequestId(requestId string) *InstancesDetachDiskCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpHealthChecksTestIamPermissionsCall) Fields(s ...googleapi.Field) *HttpHealthChecksTestIamPermissionsCall { +func (c *InstancesDetachDiskCall) Fields(s ...googleapi.Field) *InstancesDetachDiskCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -53964,35 +70366,30 @@ func (c *HttpHealthChecksTestIamPermissionsCall) Fields(s ...googleapi.Field) *H // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpHealthChecksTestIamPermissionsCall) Context(ctx context.Context) *HttpHealthChecksTestIamPermissionsCall { +func (c *InstancesDetachDiskCall) Context(ctx context.Context) *InstancesDetachDiskCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpHealthChecksTestIamPermissionsCall) Header() http.Header { +func (c *InstancesDetachDiskCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpHealthChecksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesDetachDiskCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/detachDisk") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -54001,19 +70398,20 @@ func (c *HttpHealthChecksTestIamPermissionsCall) doRequest(alt string) (*http.Re req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "resource": c.resource, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpHealthChecks.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *HttpHealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.instances.detachDisk" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesDetachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -54032,7 +70430,7 @@ func (c *HttpHealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOption if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -54044,14 +70442,29 @@ func (c *HttpHealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOption } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", + // "description": "Detaches a disk from an instance.", // "httpMethod": "POST", - // "id": "compute.httpHealthChecks.testIamPermissions", + // "id": "compute.instances.detachDisk", // "parameterOrder": [ // "project", - // "resource" + // "zone", + // "instance", + // "deviceName" // ], // "parameters": { + // "deviceName": { + // "description": "The device name of the disk to detach. Make a get() request on the instance to view currently attached disks and device names.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "instance": { + // "description": "Instance name for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -54059,133 +70472,125 @@ func (c *HttpHealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOption // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/httpHealthChecks/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/zones/{zone}/instances/{instance}/detachDisk", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.httpHealthChecks.update": +// method id "compute.instances.get": -type HttpHealthChecksUpdateCall struct { - s *Service - project string - httpHealthCheck string - httphealthcheck *HttpHealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesGetCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Update: Updates a HttpHealthCheck resource in the specified project -// using the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/update -func (r *HttpHealthChecksService) Update(project string, httpHealthCheck string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksUpdateCall { - c := &HttpHealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified Instance resource. Gets a list of +// available instances by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/get +func (r *InstancesService) Get(project string, zone string, instance string) *InstancesGetCall { + c := &InstancesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpHealthCheck = httpHealthCheck - c.httphealthcheck = httphealthcheck - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *HttpHealthChecksUpdateCall) RequestId(requestId string) *HttpHealthChecksUpdateCall { - c.urlParams_.Set("requestId", requestId) + c.zone = zone + c.instance = instance return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpHealthChecksUpdateCall) Fields(s ...googleapi.Field) *HttpHealthChecksUpdateCall { +func (c *InstancesGetCall) Fields(s ...googleapi.Field) *InstancesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesGetCall) IfNoneMatch(entityTag string) *InstancesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpHealthChecksUpdateCall) Context(ctx context.Context) *HttpHealthChecksUpdateCall { +func (c *InstancesGetCall) Context(ctx context.Context) *InstancesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpHealthChecksUpdateCall) Header() http.Header { +func (c *InstancesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpHealthCheck": c.httpHealthCheck, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpHealthChecks.update" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// Do executes the "compute.instances.get" call. +// Exactly one of *Instance or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Instance.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HttpHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -54204,7 +70609,7 @@ func (c *HttpHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operatio if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Instance{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -54216,18 +70621,19 @@ func (c *HttpHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request.", - // "httpMethod": "PUT", - // "id": "compute.httpHealthChecks.update", + // "description": "Returns the specified Instance resource. Gets a list of available instances by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.instances.get", // "parameterOrder": [ // "project", - // "httpHealthCheck" + // "zone", + // "instance" // ], // "parameters": { - // "httpHealthCheck": { - // "description": "Name of the HttpHealthCheck resource to update.", + // "instance": { + // "description": "Name of the instance resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -54238,121 +70644,133 @@ func (c *HttpHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operatio // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", - // "request": { - // "$ref": "HttpHealthCheck" - // }, + // "path": "{project}/zones/{zone}/instances/{instance}", // "response": { - // "$ref": "Operation" + // "$ref": "Instance" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.httpsHealthChecks.delete": +// method id "compute.instances.getGuestAttributes": -type HttpsHealthChecksDeleteCall struct { - s *Service - project string - httpsHealthCheck string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesGetGuestAttributesCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified HttpsHealthCheck resource. -func (r *HttpsHealthChecksService) Delete(project string, httpsHealthCheck string) *HttpsHealthChecksDeleteCall { - c := &HttpsHealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetGuestAttributes: Returns the specified guest attributes entry. +func (r *InstancesService) GetGuestAttributes(project string, zone string, instance string) *InstancesGetGuestAttributesCall { + c := &InstancesGetGuestAttributesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpsHealthCheck = httpsHealthCheck + c.zone = zone + c.instance = instance return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *HttpsHealthChecksDeleteCall) RequestId(requestId string) *HttpsHealthChecksDeleteCall { - c.urlParams_.Set("requestId", requestId) +// QueryPath sets the optional parameter "queryPath": Specifies the +// guest attributes path to be queried. +func (c *InstancesGetGuestAttributesCall) QueryPath(queryPath string) *InstancesGetGuestAttributesCall { + c.urlParams_.Set("queryPath", queryPath) + return c +} + +// VariableKey sets the optional parameter "variableKey": Specifies the +// key for the guest attributes entry. +func (c *InstancesGetGuestAttributesCall) VariableKey(variableKey string) *InstancesGetGuestAttributesCall { + c.urlParams_.Set("variableKey", variableKey) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpsHealthChecksDeleteCall) Fields(s ...googleapi.Field) *HttpsHealthChecksDeleteCall { +func (c *InstancesGetGuestAttributesCall) Fields(s ...googleapi.Field) *InstancesGetGuestAttributesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesGetGuestAttributesCall) IfNoneMatch(entityTag string) *InstancesGetGuestAttributesCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpsHealthChecksDeleteCall) Context(ctx context.Context) *HttpsHealthChecksDeleteCall { +func (c *InstancesGetGuestAttributesCall) Context(ctx context.Context) *InstancesGetGuestAttributesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpsHealthChecksDeleteCall) Header() http.Header { +func (c *InstancesGetGuestAttributesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpsHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesGetGuestAttributesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/getGuestAttributes") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpsHealthCheck": c.httpsHealthCheck, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpsHealthChecks.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *HttpsHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instances.getGuestAttributes" call. +// Exactly one of *GuestAttributes or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *GuestAttributes.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstancesGetGuestAttributesCall) Do(opts ...googleapi.CallOption) (*GuestAttributes, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -54371,7 +70789,7 @@ func (c *HttpsHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operati if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &GuestAttributes{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -54383,18 +70801,19 @@ func (c *HttpsHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Deletes the specified HttpsHealthCheck resource.", - // "httpMethod": "DELETE", - // "id": "compute.httpsHealthChecks.delete", + // "description": "Returns the specified guest attributes entry.", + // "httpMethod": "GET", + // "id": "compute.instances.getGuestAttributes", // "parameterOrder": [ // "project", - // "httpsHealthCheck" + // "zone", + // "instance" // ], // "parameters": { - // "httpsHealthCheck": { - // "description": "Name of the HttpsHealthCheck resource to delete.", + // "instance": { + // "description": "Name of the instance scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -54405,49 +70824,64 @@ func (c *HttpsHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operati // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "queryPath": { + // "description": "Specifies the guest attributes path to be queried.", + // "location": "query", + // "type": "string" + // }, + // "variableKey": { + // "description": "Specifies the key for the guest attributes entry.", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", + // "path": "{project}/zones/{zone}/instances/{instance}/getGuestAttributes", // "response": { - // "$ref": "Operation" + // "$ref": "GuestAttributes" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.httpsHealthChecks.get": +// method id "compute.instances.getIamPolicy": -type HttpsHealthChecksGetCall struct { - s *Service - project string - httpsHealthCheck string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstancesGetIamPolicyCall struct { + s *Service + project string + zone string + resource string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified HttpsHealthCheck resource. Gets a list of -// available HTTPS health checks by making a list() request. -func (r *HttpsHealthChecksService) Get(project string, httpsHealthCheck string) *HttpsHealthChecksGetCall { - c := &HttpsHealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. +func (r *InstancesService) GetIamPolicy(project string, zone string, resource string) *InstancesGetIamPolicyCall { + c := &InstancesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpsHealthCheck = httpsHealthCheck + c.zone = zone + c.resource = resource return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpsHealthChecksGetCall) Fields(s ...googleapi.Field) *HttpsHealthChecksGetCall { +func (c *InstancesGetIamPolicyCall) Fields(s ...googleapi.Field) *InstancesGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -54457,7 +70891,7 @@ func (c *HttpsHealthChecksGetCall) Fields(s ...googleapi.Field) *HttpsHealthChec // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *HttpsHealthChecksGetCall) IfNoneMatch(entityTag string) *HttpsHealthChecksGetCall { +func (c *InstancesGetIamPolicyCall) IfNoneMatch(entityTag string) *InstancesGetIamPolicyCall { c.ifNoneMatch_ = entityTag return c } @@ -54465,21 +70899,21 @@ func (c *HttpsHealthChecksGetCall) IfNoneMatch(entityTag string) *HttpsHealthChe // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpsHealthChecksGetCall) Context(ctx context.Context) *HttpsHealthChecksGetCall { +func (c *InstancesGetIamPolicyCall) Context(ctx context.Context) *InstancesGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpsHealthChecksGetCall) Header() http.Header { +func (c *InstancesGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpsHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -54491,7 +70925,7 @@ func (c *HttpsHealthChecksGetCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -54499,20 +70933,21 @@ func (c *HttpsHealthChecksGetCall) doRequest(alt string) (*http.Response, error) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpsHealthCheck": c.httpsHealthCheck, + "project": c.project, + "zone": c.zone, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpsHealthChecks.get" call. -// Exactly one of *HttpsHealthCheck or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *HttpsHealthCheck.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *HttpsHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpsHealthCheck, error) { +// Do executes the "compute.instances.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *InstancesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -54531,7 +70966,7 @@ func (c *HttpsHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpsHealt if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &HttpsHealthCheck{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -54543,32 +70978,40 @@ func (c *HttpsHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpsHealt } return ret, nil // { - // "description": "Returns the specified HttpsHealthCheck resource. Gets a list of available HTTPS health checks by making a list() request.", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", // "httpMethod": "GET", - // "id": "compute.httpsHealthChecks.get", + // "id": "compute.instances.getIamPolicy", // "parameterOrder": [ // "project", - // "httpsHealthCheck" + // "zone", + // "resource" // ], // "parameters": { - // "httpsHealthCheck": { - // "description": "Name of the HttpsHealthCheck resource to return.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", + // "path": "{project}/zones/{zone}/instances/{resource}/getIamPolicy", // "response": { - // "$ref": "HttpsHealthCheck" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -54579,105 +71022,118 @@ func (c *HttpsHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpsHealt } -// method id "compute.httpsHealthChecks.insert": +// method id "compute.instances.getSerialPortOutput": -type HttpsHealthChecksInsertCall struct { - s *Service - project string - httpshealthcheck *HttpsHealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesGetSerialPortOutputCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Insert: Creates a HttpsHealthCheck resource in the specified project -// using the data included in the request. -func (r *HttpsHealthChecksService) Insert(project string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksInsertCall { - c := &HttpsHealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetSerialPortOutput: Returns the last 1 MB of serial port output from +// the specified instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/getSerialPortOutput +func (r *InstancesService) GetSerialPortOutput(project string, zone string, instance string) *InstancesGetSerialPortOutputCall { + c := &InstancesGetSerialPortOutputCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpshealthcheck = httpshealthcheck + c.zone = zone + c.instance = instance return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *HttpsHealthChecksInsertCall) RequestId(requestId string) *HttpsHealthChecksInsertCall { - c.urlParams_.Set("requestId", requestId) +// Port sets the optional parameter "port": Specifies which COM or +// serial port to retrieve data from. +func (c *InstancesGetSerialPortOutputCall) Port(port int64) *InstancesGetSerialPortOutputCall { + c.urlParams_.Set("port", fmt.Sprint(port)) + return c +} + +// Start sets the optional parameter "start": Returns output starting +// from a specific byte position. Use this to page through output when +// the output is too large to return in a single request. For the +// initial request, leave this field unspecified. For subsequent calls, +// this field should be set to the next value returned in the previous +// call. +func (c *InstancesGetSerialPortOutputCall) Start(start int64) *InstancesGetSerialPortOutputCall { + c.urlParams_.Set("start", fmt.Sprint(start)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpsHealthChecksInsertCall) Fields(s ...googleapi.Field) *HttpsHealthChecksInsertCall { +func (c *InstancesGetSerialPortOutputCall) Fields(s ...googleapi.Field) *InstancesGetSerialPortOutputCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesGetSerialPortOutputCall) IfNoneMatch(entityTag string) *InstancesGetSerialPortOutputCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpsHealthChecksInsertCall) Context(ctx context.Context) *HttpsHealthChecksInsertCall { +func (c *InstancesGetSerialPortOutputCall) Context(ctx context.Context) *InstancesGetSerialPortOutputCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpsHealthChecksInsertCall) Header() http.Header { +func (c *InstancesGetSerialPortOutputCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpsHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesGetSerialPortOutputCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/serialPort") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpsHealthChecks.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *HttpsHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instances.getSerialPortOutput" call. +// Exactly one of *SerialPortOutput or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *SerialPortOutput.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*SerialPortOutput, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -54696,7 +71152,7 @@ func (c *HttpsHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operati if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &SerialPortOutput{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -54708,13 +71164,31 @@ func (c *HttpsHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Creates a HttpsHealthCheck resource in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.httpsHealthChecks.insert", + // "description": "Returns the last 1 MB of serial port output from the specified instance.", + // "httpMethod": "GET", + // "id": "compute.instances.getSerialPortOutput", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "instance" // ], // "parameters": { + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "port": { + // "default": "1", + // "description": "Specifies which COM or serial port to retrieve data from.", + // "format": "int32", + // "location": "query", + // "maximum": "4", + // "minimum": "1", + // "type": "integer" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -54722,113 +71196,60 @@ func (c *HttpsHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operati // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "start": { + // "description": "Returns output starting from a specific byte position. Use this to page through output when the output is too large to return in a single request. For the initial request, leave this field unspecified. For subsequent calls, this field should be set to the next value returned in the previous call.", + // "format": "int64", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/httpsHealthChecks", - // "request": { - // "$ref": "HttpsHealthCheck" - // }, + // "path": "{project}/zones/{zone}/instances/{instance}/serialPort", // "response": { - // "$ref": "Operation" + // "$ref": "SerialPortOutput" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.httpsHealthChecks.list": +// method id "compute.instances.getShieldedInstanceIdentity": -type HttpsHealthChecksListCall struct { +type InstancesGetShieldedInstanceIdentityCall struct { s *Service project string + zone string + instance string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves the list of HttpsHealthCheck resources available to -// the specified project. -func (r *HttpsHealthChecksService) List(project string) *HttpsHealthChecksListCall { - c := &HttpsHealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetShieldedInstanceIdentity: Returns the Shielded Instance Identity +// of an instance +func (r *InstancesService) GetShieldedInstanceIdentity(project string, zone string, instance string) *InstancesGetShieldedInstanceIdentityCall { + c := &InstancesGetShieldedInstanceIdentityCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *HttpsHealthChecksListCall) Filter(filter string) *HttpsHealthChecksListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *HttpsHealthChecksListCall) MaxResults(maxResults int64) *HttpsHealthChecksListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *HttpsHealthChecksListCall) OrderBy(orderBy string) *HttpsHealthChecksListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *HttpsHealthChecksListCall) PageToken(pageToken string) *HttpsHealthChecksListCall { - c.urlParams_.Set("pageToken", pageToken) + c.zone = zone + c.instance = instance return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpsHealthChecksListCall) Fields(s ...googleapi.Field) *HttpsHealthChecksListCall { +func (c *InstancesGetShieldedInstanceIdentityCall) Fields(s ...googleapi.Field) *InstancesGetShieldedInstanceIdentityCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -54838,7 +71259,7 @@ func (c *HttpsHealthChecksListCall) Fields(s ...googleapi.Field) *HttpsHealthChe // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *HttpsHealthChecksListCall) IfNoneMatch(entityTag string) *HttpsHealthChecksListCall { +func (c *InstancesGetShieldedInstanceIdentityCall) IfNoneMatch(entityTag string) *InstancesGetShieldedInstanceIdentityCall { c.ifNoneMatch_ = entityTag return c } @@ -54846,21 +71267,21 @@ func (c *HttpsHealthChecksListCall) IfNoneMatch(entityTag string) *HttpsHealthCh // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpsHealthChecksListCall) Context(ctx context.Context) *HttpsHealthChecksListCall { +func (c *InstancesGetShieldedInstanceIdentityCall) Context(ctx context.Context) *InstancesGetShieldedInstanceIdentityCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpsHealthChecksListCall) Header() http.Header { +func (c *InstancesGetShieldedInstanceIdentityCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpsHealthChecksListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesGetShieldedInstanceIdentityCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -54872,7 +71293,7 @@ func (c *HttpsHealthChecksListCall) doRequest(alt string) (*http.Response, error var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/getShieldedInstanceIdentity") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -54880,19 +71301,21 @@ func (c *HttpsHealthChecksListCall) doRequest(alt string) (*http.Response, error } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpsHealthChecks.list" call. -// Exactly one of *HttpsHealthCheckList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *HttpsHealthCheckList.ServerResponse.Header or (if a response was +// Do executes the "compute.instances.getShieldedInstanceIdentity" call. +// Exactly one of *ShieldedInstanceIdentity or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *ShieldedInstanceIdentity.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHealthCheckList, error) { +func (c *InstancesGetShieldedInstanceIdentityCall) Do(opts ...googleapi.CallOption) (*ShieldedInstanceIdentity, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -54911,7 +71334,7 @@ func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHeal if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &HttpsHealthCheckList{ + ret := &ShieldedInstanceIdentity{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -54923,34 +71346,20 @@ func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHeal } return ret, nil // { - // "description": "Retrieves the list of HttpsHealthCheck resources available to the specified project.", + // "description": "Returns the Shielded Instance Identity of an instance", // "httpMethod": "GET", - // "id": "compute.httpsHealthChecks.list", + // "id": "compute.instances.getShieldedInstanceIdentity", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "instance" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "instance": { + // "description": "Name or id of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "project": { @@ -54959,11 +71368,18 @@ func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHeal // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/httpsHealthChecks", + // "path": "{project}/zones/{zone}/instances/{instance}/getShieldedInstanceIdentity", // "response": { - // "$ref": "HttpsHealthCheckList" + // "$ref": "ShieldedInstanceIdentity" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -54974,130 +71390,99 @@ func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHeal } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *HttpsHealthChecksListCall) Pages(ctx context.Context, f func(*HttpsHealthCheckList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.httpsHealthChecks.patch": +// method id "compute.instances.getShieldedVmIdentity": -type HttpsHealthChecksPatchCall struct { - s *Service - project string - httpsHealthCheck string - httpshealthcheck *HttpsHealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesGetShieldedVmIdentityCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Patch: Updates a HttpsHealthCheck resource in the specified project -// using the data included in the request. This method supports PATCH -// semantics and uses the JSON merge patch format and processing rules. -func (r *HttpsHealthChecksService) Patch(project string, httpsHealthCheck string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksPatchCall { - c := &HttpsHealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetShieldedVmIdentity: Returns the Shielded VM Identity of an +// instance +func (r *InstancesService) GetShieldedVmIdentity(project string, zone string, instance string) *InstancesGetShieldedVmIdentityCall { + c := &InstancesGetShieldedVmIdentityCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpsHealthCheck = httpsHealthCheck - c.httpshealthcheck = httpshealthcheck - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *HttpsHealthChecksPatchCall) RequestId(requestId string) *HttpsHealthChecksPatchCall { - c.urlParams_.Set("requestId", requestId) + c.zone = zone + c.instance = instance return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpsHealthChecksPatchCall) Fields(s ...googleapi.Field) *HttpsHealthChecksPatchCall { +func (c *InstancesGetShieldedVmIdentityCall) Fields(s ...googleapi.Field) *InstancesGetShieldedVmIdentityCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesGetShieldedVmIdentityCall) IfNoneMatch(entityTag string) *InstancesGetShieldedVmIdentityCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpsHealthChecksPatchCall) Context(ctx context.Context) *HttpsHealthChecksPatchCall { +func (c *InstancesGetShieldedVmIdentityCall) Context(ctx context.Context) *InstancesGetShieldedVmIdentityCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpsHealthChecksPatchCall) Header() http.Header { +func (c *InstancesGetShieldedVmIdentityCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpsHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesGetShieldedVmIdentityCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/getShieldedVmIdentity") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpsHealthCheck": c.httpsHealthCheck, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpsHealthChecks.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *HttpsHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instances.getShieldedVmIdentity" call. +// Exactly one of *ShieldedVmIdentity or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ShieldedVmIdentity.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstancesGetShieldedVmIdentityCall) Do(opts ...googleapi.CallOption) (*ShieldedVmIdentity, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -55116,7 +71501,7 @@ func (c *HttpsHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operatio if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &ShieldedVmIdentity{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -55128,18 +71513,19 @@ func (c *HttpsHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.httpsHealthChecks.patch", + // "description": "Returns the Shielded VM Identity of an instance", + // "httpMethod": "GET", + // "id": "compute.instances.getShieldedVmIdentity", // "parameterOrder": [ // "project", - // "httpsHealthCheck" + // "zone", + // "instance" // ], // "parameters": { - // "httpsHealthCheck": { - // "description": "Name of the HttpsHealthCheck resource to patch.", + // "instance": { + // "description": "Name of the instance scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -55150,53 +71536,88 @@ func (c *HttpsHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operatio // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", - // "request": { - // "$ref": "HttpsHealthCheck" - // }, + // "path": "{project}/zones/{zone}/instances/{instance}/getShieldedVmIdentity", // "response": { - // "$ref": "Operation" + // "$ref": "ShieldedVmIdentity" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.httpsHealthChecks.testIamPermissions": +// method id "compute.instances.insert": -type HttpsHealthChecksTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesInsertCall struct { + s *Service + project string + zone string + instance *Instance + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *HttpsHealthChecksService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *HttpsHealthChecksTestIamPermissionsCall { - c := &HttpsHealthChecksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates an instance resource in the specified project using +// the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/insert +func (r *InstancesService) Insert(project string, zone string, instance *Instance) *InstancesInsertCall { + c := &InstancesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.zone = zone + c.instance = instance + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesInsertCall) RequestId(requestId string) *InstancesInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// SourceInstanceTemplate sets the optional parameter +// "sourceInstanceTemplate": Specifies instance template to create the +// instance. +// +// This field is optional. It can be a full or partial URL. For example, +// the following are all valid URLs to an instance template: +// - +// https://www.googleapis.com/compute/v1/projects/project/global/instanceTemplates/instanceTemplate +// - projects/project/global/instanceTemplates/instanceTemplate +// - global/instanceTemplates/instanceTemplate +func (c *InstancesInsertCall) SourceInstanceTemplate(sourceInstanceTemplate string) *InstancesInsertCall { + c.urlParams_.Set("sourceInstanceTemplate", sourceInstanceTemplate) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpsHealthChecksTestIamPermissionsCall) Fields(s ...googleapi.Field) *HttpsHealthChecksTestIamPermissionsCall { +func (c *InstancesInsertCall) Fields(s ...googleapi.Field) *InstancesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -55204,35 +71625,35 @@ func (c *HttpsHealthChecksTestIamPermissionsCall) Fields(s ...googleapi.Field) * // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpsHealthChecksTestIamPermissionsCall) Context(ctx context.Context) *HttpsHealthChecksTestIamPermissionsCall { +func (c *InstancesInsertCall) Context(ctx context.Context) *InstancesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpsHealthChecksTestIamPermissionsCall) Header() http.Header { +func (c *InstancesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpsHealthChecksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instance) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -55240,20 +71661,20 @@ func (c *HttpsHealthChecksTestIamPermissionsCall) doRequest(alt string) (*http.R } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpsHealthChecks.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *HttpsHealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.instances.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -55272,7 +71693,7 @@ func (c *HttpsHealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOptio if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -55284,12 +71705,12 @@ func (c *HttpsHealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", + // "description": "Creates an instance resource in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.httpsHealthChecks.testIamPermissions", + // "id": "compute.instances.insert", // "parameterOrder": [ // "project", - // "resource" + // "zone" // ], // "parameters": { // "project": { @@ -55299,132 +71720,193 @@ func (c *HttpsHealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOptio // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "sourceInstanceTemplate": { + // "description": "Specifies instance template to create the instance.\n\nThis field is optional. It can be a full or partial URL. For example, the following are all valid URLs to an instance template: \n- https://www.googleapis.com/compute/v1/projects/project/global/instanceTemplates/instanceTemplate \n- projects/project/global/instanceTemplates/instanceTemplate \n- global/instanceTemplates/instanceTemplate", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/httpsHealthChecks/{resource}/testIamPermissions", + // "path": "{project}/zones/{zone}/instances", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "Instance" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.httpsHealthChecks.update": +// method id "compute.instances.list": -type HttpsHealthChecksUpdateCall struct { - s *Service - project string - httpsHealthCheck string - httpshealthcheck *HttpsHealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Update: Updates a HttpsHealthCheck resource in the specified project -// using the data included in the request. -func (r *HttpsHealthChecksService) Update(project string, httpsHealthCheck string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksUpdateCall { - c := &HttpsHealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of instances contained within the specified +// zone. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/list +func (r *InstancesService) List(project string, zone string) *InstancesListCall { + c := &InstancesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpsHealthCheck = httpsHealthCheck - c.httpshealthcheck = httpshealthcheck + c.zone = zone return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *HttpsHealthChecksUpdateCall) RequestId(requestId string) *HttpsHealthChecksUpdateCall { - c.urlParams_.Set("requestId", requestId) +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *InstancesListCall) Filter(filter string) *InstancesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *InstancesListCall) MaxResults(maxResults int64) *InstancesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *InstancesListCall) OrderBy(orderBy string) *InstancesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InstancesListCall) PageToken(pageToken string) *InstancesListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpsHealthChecksUpdateCall) Fields(s ...googleapi.Field) *HttpsHealthChecksUpdateCall { +func (c *InstancesListCall) Fields(s ...googleapi.Field) *InstancesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesListCall) IfNoneMatch(entityTag string) *InstancesListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpsHealthChecksUpdateCall) Context(ctx context.Context) *HttpsHealthChecksUpdateCall { +func (c *InstancesListCall) Context(ctx context.Context) *InstancesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpsHealthChecksUpdateCall) Header() http.Header { +func (c *InstancesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpsHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpsHealthCheck": c.httpsHealthCheck, + "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpsHealthChecks.update" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.instances.list" call. +// Exactly one of *InstanceList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// *InstanceList.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HttpsHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -55443,7 +71925,7 @@ func (c *HttpsHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operati if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &InstanceList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -55455,19 +71937,35 @@ func (c *HttpsHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request.", - // "httpMethod": "PUT", - // "id": "compute.httpsHealthChecks.update", + // "description": "Retrieves the list of instances contained within the specified zone.", + // "httpMethod": "GET", + // "id": "compute.instances.list", // "parameterOrder": [ // "project", - // "httpsHealthCheck" + // "zone" // ], // "parameters": { - // "httpsHealthCheck": { - // "description": "Name of the HttpsHealthCheck resource to update.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -55477,122 +71975,205 @@ func (c *HttpsHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operati // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", - // "request": { - // "$ref": "HttpsHealthCheck" - // }, + // "path": "{project}/zones/{zone}/instances", // "response": { - // "$ref": "Operation" + // "$ref": "InstanceList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.images.delete": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstancesListCall) Pages(ctx context.Context, f func(*InstanceList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type ImagesDeleteCall struct { - s *Service - project string - image string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.instances.listReferrers": + +type InstancesListReferrersCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified image. -// For details, see https://cloud.google.com/compute/docs/reference/latest/images/delete -func (r *ImagesService) Delete(project string, image string) *ImagesDeleteCall { - c := &ImagesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ListReferrers: Retrieves the list of referrers to instances contained +// within the specified zone. For more information, read Viewing +// Referrers to VM Instances. +func (r *InstancesService) ListReferrers(project string, zone string, instance string) *InstancesListReferrersCall { + c := &InstancesListReferrersCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.image = image + c.zone = zone + c.instance = instance + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *InstancesListReferrersCall) Filter(filter string) *InstancesListReferrersCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *InstancesListReferrersCall) MaxResults(maxResults int64) *InstancesListReferrersCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *InstancesListReferrersCall) OrderBy(orderBy string) *InstancesListReferrersCall { + c.urlParams_.Set("orderBy", orderBy) return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *ImagesDeleteCall) RequestId(requestId string) *ImagesDeleteCall { - c.urlParams_.Set("requestId", requestId) +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InstancesListReferrersCall) PageToken(pageToken string) *InstancesListReferrersCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesDeleteCall) Fields(s ...googleapi.Field) *ImagesDeleteCall { +func (c *InstancesListReferrersCall) Fields(s ...googleapi.Field) *InstancesListReferrersCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesListReferrersCall) IfNoneMatch(entityTag string) *InstancesListReferrersCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesDeleteCall) Context(ctx context.Context) *ImagesDeleteCall { +func (c *InstancesListReferrersCall) Context(ctx context.Context) *InstancesListReferrersCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesDeleteCall) Header() http.Header { +func (c *InstancesListReferrersCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesListReferrersCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/referrers") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "image": c.image, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instances.listReferrers" call. +// Exactly one of *InstanceListReferrers or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceListReferrers.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*InstanceListReferrers, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -55611,7 +72192,7 @@ func (c *ImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &InstanceListReferrers{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -55623,21 +72204,45 @@ func (c *ImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Deletes the specified image.", - // "httpMethod": "DELETE", - // "id": "compute.images.delete", + // "description": "Retrieves the list of referrers to instances contained within the specified zone. For more information, read Viewing Referrers to VM Instances.", + // "httpMethod": "GET", + // "id": "compute.instances.listReferrers", // "parameterOrder": [ // "project", - // "image" + // "zone", + // "instance" // ], // "parameters": { - // "image": { - // "description": "Name of the image resource to delete.", + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "instance": { + // "description": "Name of the target instance scoping this request, or '-' if the request should span over all instances in the container.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "-|[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -55645,46 +72250,69 @@ func (c *ImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/images/{image}", + // "path": "{project}/zones/{zone}/instances/{instance}/referrers", // "response": { - // "$ref": "Operation" + // "$ref": "InstanceListReferrers" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.images.deprecate": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstancesListReferrersCall) Pages(ctx context.Context, f func(*InstanceListReferrers) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type ImagesDeprecateCall struct { - s *Service - project string - image string - deprecationstatus *DeprecationStatus - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.instances.reset": + +type InstancesResetCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Deprecate: Sets the deprecation status of an image. -// -// If an empty request body is given, clears the deprecation status -// instead. -// For details, see https://cloud.google.com/compute/docs/reference/latest/images/deprecate -func (r *ImagesService) Deprecate(project string, image string, deprecationstatus *DeprecationStatus) *ImagesDeprecateCall { - c := &ImagesDeprecateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Reset: Performs a reset on the instance. This is a hard reset the VM +// does not do a graceful shutdown. For more information, see Resetting +// an instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/reset +func (r *InstancesService) Reset(project string, zone string, instance string) *InstancesResetCall { + c := &InstancesResetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.image = image - c.deprecationstatus = deprecationstatus + c.zone = zone + c.instance = instance return c } @@ -55702,7 +72330,7 @@ func (r *ImagesService) Deprecate(project string, image string, deprecationstatu // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ImagesDeprecateCall) RequestId(requestId string) *ImagesDeprecateCall { +func (c *InstancesResetCall) RequestId(requestId string) *InstancesResetCall { c.urlParams_.Set("requestId", requestId) return c } @@ -55710,7 +72338,7 @@ func (c *ImagesDeprecateCall) RequestId(requestId string) *ImagesDeprecateCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesDeprecateCall) Fields(s ...googleapi.Field) *ImagesDeprecateCall { +func (c *InstancesResetCall) Fields(s ...googleapi.Field) *InstancesResetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -55718,35 +72346,30 @@ func (c *ImagesDeprecateCall) Fields(s ...googleapi.Field) *ImagesDeprecateCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesDeprecateCall) Context(ctx context.Context) *ImagesDeprecateCall { +func (c *InstancesResetCall) Context(ctx context.Context) *InstancesResetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesDeprecateCall) Header() http.Header { +func (c *InstancesResetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesDeprecateCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesResetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.deprecationstatus) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}/deprecate") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/reset") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -55754,20 +72377,21 @@ func (c *ImagesDeprecateCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "image": c.image, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.deprecate" call. +// Do executes the "compute.instances.reset" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ImagesDeprecateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesResetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -55798,16 +72422,17 @@ func (c *ImagesDeprecateCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Sets the deprecation status of an image.\n\nIf an empty request body is given, clears the deprecation status instead.", + // "description": "Performs a reset on the instance. This is a hard reset the VM does not do a graceful shutdown. For more information, see Resetting an instance.", // "httpMethod": "POST", - // "id": "compute.images.deprecate", + // "id": "compute.instances.reset", // "parameterOrder": [ // "project", - // "image" + // "zone", + // "instance" // ], // "parameters": { - // "image": { - // "description": "Image name.", + // "instance": { + // "description": "Name of the instance scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -55824,12 +72449,16 @@ func (c *ImagesDeprecateCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/images/{image}/deprecate", - // "request": { - // "$ref": "DeprecationStatus" - // }, + // "path": "{project}/zones/{zone}/instances/{instance}/reset", // "response": { // "$ref": "Operation" // }, @@ -55841,97 +72470,111 @@ func (c *ImagesDeprecateCall) Do(opts ...googleapi.CallOption) (*Operation, erro } -// method id "compute.images.get": +// method id "compute.instances.resume": -type ImagesGetCall struct { - s *Service - project string - image string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstancesResumeCall struct { + s *Service + project string + zone string + instance string + instancesresumerequest *InstancesResumeRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified image. Gets a list of available images by -// making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/images/get -func (r *ImagesService) Get(project string, image string) *ImagesGetCall { - c := &ImagesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Resume: Resumes an instance that was suspended using the +// instances().suspend method. +func (r *InstancesService) Resume(project string, zone string, instance string, instancesresumerequest *InstancesResumeRequest) *InstancesResumeCall { + c := &InstancesResumeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.image = image + c.zone = zone + c.instance = instance + c.instancesresumerequest = instancesresumerequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesResumeCall) RequestId(requestId string) *InstancesResumeCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesGetCall) Fields(s ...googleapi.Field) *ImagesGetCall { +func (c *InstancesResumeCall) Fields(s ...googleapi.Field) *InstancesResumeCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ImagesGetCall) IfNoneMatch(entityTag string) *ImagesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesGetCall) Context(ctx context.Context) *ImagesGetCall { +func (c *InstancesResumeCall) Context(ctx context.Context) *InstancesResumeCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesGetCall) Header() http.Header { +func (c *InstancesResumeCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesResumeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesresumerequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/resume") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "image": c.image, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.get" call. -// Exactly one of *Image or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Image.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ImagesGetCall) Do(opts ...googleapi.CallOption) (*Image, error) { +// Do executes the "compute.instances.resume" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesResumeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -55950,7 +72593,7 @@ func (c *ImagesGetCall) Do(opts ...googleapi.CallOption) (*Image, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Image{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -55962,16 +72605,17 @@ func (c *ImagesGetCall) Do(opts ...googleapi.CallOption) (*Image, error) { } return ret, nil // { - // "description": "Returns the specified image. Gets a list of available images by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.images.get", + // "description": "Resumes an instance that was suspended using the instances().suspend method.", + // "httpMethod": "POST", + // "id": "compute.instances.resume", // "parameterOrder": [ // "project", - // "image" + // "zone", + // "instance" // ], // "parameters": { - // "image": { - // "description": "Name of the image resource to return.", + // "instance": { + // "description": "Name of the instance resource to resume.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -55983,267 +72627,139 @@ func (c *ImagesGetCall) Do(opts ...googleapi.CallOption) (*Image, error) { // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // } - // }, - // "path": "{project}/global/images/{image}", - // "response": { - // "$ref": "Image" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.images.getFromFamily": - -type ImagesGetFromFamilyCall struct { - s *Service - project string - family string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// GetFromFamily: Returns the latest image that is part of an image -// family and is not deprecated. -func (r *ImagesService) GetFromFamily(project string, family string) *ImagesGetFromFamilyCall { - c := &ImagesGetFromFamilyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.family = family - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ImagesGetFromFamilyCall) Fields(s ...googleapi.Field) *ImagesGetFromFamilyCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ImagesGetFromFamilyCall) IfNoneMatch(entityTag string) *ImagesGetFromFamilyCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ImagesGetFromFamilyCall) Context(ctx context.Context) *ImagesGetFromFamilyCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ImagesGetFromFamilyCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ImagesGetFromFamilyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/family/{family}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "family": c.family, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.images.getFromFamily" call. -// Exactly one of *Image or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Image.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ImagesGetFromFamilyCall) Do(opts ...googleapi.CallOption) (*Image, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Image{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the latest image that is part of an image family and is not deprecated.", - // "httpMethod": "GET", - // "id": "compute.images.getFromFamily", - // "parameterOrder": [ - // "project", - // "family" - // ], - // "parameters": { - // "family": { - // "description": "Name of the image family to search for.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/images/family/{family}", + // "path": "{project}/zones/{zone}/instances/{instance}/resume", + // "request": { + // "$ref": "InstancesResumeRequest" + // }, // "response": { - // "$ref": "Image" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.images.getIamPolicy": +// method id "compute.instances.setDeletionProtection": -type ImagesGetIamPolicyCall struct { - s *Service - project string - resource string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstancesSetDeletionProtectionCall struct { + s *Service + project string + zone string + resource string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. -func (r *ImagesService) GetIamPolicy(project string, resource string) *ImagesGetIamPolicyCall { - c := &ImagesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetDeletionProtection: Sets deletion protection on the instance. +func (r *InstancesService) SetDeletionProtection(project string, zone string, resource string) *InstancesSetDeletionProtectionCall { + c := &InstancesSetDeletionProtectionCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.zone = zone c.resource = resource return c } +// DeletionProtection sets the optional parameter "deletionProtection": +// Whether the resource should be protected against deletion. +func (c *InstancesSetDeletionProtectionCall) DeletionProtection(deletionProtection bool) *InstancesSetDeletionProtectionCall { + c.urlParams_.Set("deletionProtection", fmt.Sprint(deletionProtection)) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesSetDeletionProtectionCall) RequestId(requestId string) *InstancesSetDeletionProtectionCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesGetIamPolicyCall) Fields(s ...googleapi.Field) *ImagesGetIamPolicyCall { +func (c *InstancesSetDeletionProtectionCall) Fields(s ...googleapi.Field) *InstancesSetDeletionProtectionCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ImagesGetIamPolicyCall) IfNoneMatch(entityTag string) *ImagesGetIamPolicyCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesGetIamPolicyCall) Context(ctx context.Context) *ImagesGetIamPolicyCall { +func (c *InstancesSetDeletionProtectionCall) Context(ctx context.Context) *InstancesSetDeletionProtectionCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesGetIamPolicyCall) Header() http.Header { +func (c *InstancesSetDeletionProtectionCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetDeletionProtectionCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{resource}/getIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{resource}/setDeletionProtection") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "zone": c.zone, "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ImagesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.instances.setDeletionProtection" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSetDeletionProtectionCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -56262,7 +72778,7 @@ func (c *ImagesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, erro if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -56274,14 +72790,21 @@ func (c *ImagesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, erro } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", - // "httpMethod": "GET", - // "id": "compute.images.getIamPolicy", + // "description": "Sets deletion protection on the instance.", + // "httpMethod": "POST", + // "id": "compute.instances.setDeletionProtection", // "parameterOrder": [ // "project", + // "zone", // "resource" // ], // "parameters": { + // "deletionProtection": { + // "default": "true", + // "description": "Whether the resource should be protected against deletion.", + // "location": "query", + // "type": "boolean" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -56289,52 +72812,60 @@ func (c *ImagesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, erro // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "resource": { // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/images/{resource}/getIamPolicy", + // "path": "{project}/zones/{zone}/instances/{resource}/setDeletionProtection", // "response": { - // "$ref": "Policy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.images.insert": +// method id "compute.instances.setDiskAutoDelete": -type ImagesInsertCall struct { +type InstancesSetDiskAutoDeleteCall struct { s *Service project string - image *Image + zone string + instance string urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Insert: Creates an image in the specified project using the data -// included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/images/insert -func (r *ImagesService) Insert(project string, image *Image) *ImagesInsertCall { - c := &ImagesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetDiskAutoDelete: Sets the auto-delete flag for a disk attached to +// an instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setDiskAutoDelete +func (r *InstancesService) SetDiskAutoDelete(project string, zone string, instance string, autoDelete bool, deviceName string) *InstancesSetDiskAutoDeleteCall { + c := &InstancesSetDiskAutoDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.image = image - return c -} - -// ForceCreate sets the optional parameter "forceCreate": Force image -// creation if true. -func (c *ImagesInsertCall) ForceCreate(forceCreate bool) *ImagesInsertCall { - c.urlParams_.Set("forceCreate", fmt.Sprint(forceCreate)) + c.zone = zone + c.instance = instance + c.urlParams_.Set("autoDelete", fmt.Sprint(autoDelete)) + c.urlParams_.Set("deviceName", deviceName) return c } @@ -56352,7 +72883,7 @@ func (c *ImagesInsertCall) ForceCreate(forceCreate bool) *ImagesInsertCall { // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ImagesInsertCall) RequestId(requestId string) *ImagesInsertCall { +func (c *InstancesSetDiskAutoDeleteCall) RequestId(requestId string) *InstancesSetDiskAutoDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -56360,7 +72891,7 @@ func (c *ImagesInsertCall) RequestId(requestId string) *ImagesInsertCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesInsertCall) Fields(s ...googleapi.Field) *ImagesInsertCall { +func (c *InstancesSetDiskAutoDeleteCall) Fields(s ...googleapi.Field) *InstancesSetDiskAutoDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -56368,35 +72899,30 @@ func (c *ImagesInsertCall) Fields(s ...googleapi.Field) *ImagesInsertCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesInsertCall) Context(ctx context.Context) *ImagesInsertCall { +func (c *InstancesSetDiskAutoDeleteCall) Context(ctx context.Context) *InstancesSetDiskAutoDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesInsertCall) Header() http.Header { +func (c *InstancesSetDiskAutoDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetDiskAutoDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.image) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -56404,19 +72930,21 @@ func (c *ImagesInsertCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.insert" call. +// Do executes the "compute.instances.setDiskAutoDelete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSetDiskAutoDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -56447,18 +72975,37 @@ func (c *ImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Creates an image in the specified project using the data included in the request.", + // "description": "Sets the auto-delete flag for a disk attached to an instance.", // "httpMethod": "POST", - // "id": "compute.images.insert", + // "id": "compute.instances.setDiskAutoDelete", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "instance", + // "autoDelete", + // "deviceName" // ], // "parameters": { - // "forceCreate": { - // "description": "Force image creation if true.", + // "autoDelete": { + // "description": "Whether to auto-delete the disk when the instance is deleted.", // "location": "query", + // "required": true, // "type": "boolean" // }, + // "deviceName": { + // "description": "The device name of the disk to modify. Make a get() request on the instance to view currently attached disks and device names.", + // "location": "query", + // "pattern": "\\w[\\w.-]{0,254}", + // "required": true, + // "type": "string" + // }, + // "instance": { + // "description": "The instance name for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -56470,182 +73017,113 @@ func (c *ImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/images", - // "request": { - // "$ref": "Image" - // }, + // "path": "{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete", // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.images.list": +// method id "compute.instances.setIamPolicy": -type ImagesListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstancesSetIamPolicyCall struct { + s *Service + project string + zone string + resource string + zonesetpolicyrequest *ZoneSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves the list of custom images available to the specified -// project. Custom images are images you create that belong to your -// project. This method does not get any images that belong to other -// projects, including publicly-available images, like Debian 8. If you -// want to get a list of publicly-available images, use this method to -// make a request to the respective image project, such as debian-cloud -// or windows-cloud. -// For details, see https://cloud.google.com/compute/docs/reference/latest/images/list -func (r *ImagesService) List(project string) *ImagesListCall { - c := &ImagesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +func (r *InstancesService) SetIamPolicy(project string, zone string, resource string, zonesetpolicyrequest *ZoneSetPolicyRequest) *InstancesSetIamPolicyCall { + c := &InstancesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *ImagesListCall) Filter(filter string) *ImagesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *ImagesListCall) MaxResults(maxResults int64) *ImagesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *ImagesListCall) OrderBy(orderBy string) *ImagesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *ImagesListCall) PageToken(pageToken string) *ImagesListCall { - c.urlParams_.Set("pageToken", pageToken) + c.zone = zone + c.resource = resource + c.zonesetpolicyrequest = zonesetpolicyrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesListCall) Fields(s ...googleapi.Field) *ImagesListCall { +func (c *InstancesSetIamPolicyCall) Fields(s ...googleapi.Field) *InstancesSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ImagesListCall) IfNoneMatch(entityTag string) *ImagesListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesListCall) Context(ctx context.Context) *ImagesListCall { +func (c *InstancesSetIamPolicyCall) Context(ctx context.Context) *InstancesSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesListCall) Header() http.Header { +func (c *InstancesSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.zonesetpolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.list" call. -// Exactly one of *ImageList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *ImageList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { +// Do executes the "compute.instances.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *InstancesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -56664,7 +73142,7 @@ func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ImageList{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -56676,104 +73154,99 @@ func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { } return ret, nil // { - // "description": "Retrieves the list of custom images available to the specified project. Custom images are images you create that belong to your project. This method does not get any images that belong to other projects, including publicly-available images, like Debian 8. If you want to get a list of publicly-available images, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.", - // "httpMethod": "GET", - // "id": "compute.images.list", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "httpMethod": "POST", + // "id": "compute.instances.setIamPolicy", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "resource" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/images", + // "path": "{project}/zones/{zone}/instances/{resource}/setIamPolicy", + // "request": { + // "$ref": "ZoneSetPolicyRequest" + // }, // "response": { - // "$ref": "ImageList" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ImagesListCall) Pages(ctx context.Context, f func(*ImageList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.images.setIamPolicy": +// method id "compute.instances.setLabels": -type ImagesSetIamPolicyCall struct { - s *Service - project string - resource string - globalsetpolicyrequest *GlobalSetPolicyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSetLabelsCall struct { + s *Service + project string + zone string + instance string + instancessetlabelsrequest *InstancesSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. -func (r *ImagesService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *ImagesSetIamPolicyCall { - c := &ImagesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetLabels: Sets labels on an instance. To learn more about labels, +// read the Labeling Resources documentation. +func (r *InstancesService) SetLabels(project string, zone string, instance string, instancessetlabelsrequest *InstancesSetLabelsRequest) *InstancesSetLabelsCall { + c := &InstancesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.globalsetpolicyrequest = globalsetpolicyrequest + c.zone = zone + c.instance = instance + c.instancessetlabelsrequest = instancessetlabelsrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesSetLabelsCall) RequestId(requestId string) *InstancesSetLabelsCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesSetIamPolicyCall) Fields(s ...googleapi.Field) *ImagesSetIamPolicyCall { +func (c *InstancesSetLabelsCall) Fields(s ...googleapi.Field) *InstancesSetLabelsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -56781,35 +73254,35 @@ func (c *ImagesSetIamPolicyCall) Fields(s ...googleapi.Field) *ImagesSetIamPolic // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesSetIamPolicyCall) Context(ctx context.Context) *ImagesSetIamPolicyCall { +func (c *InstancesSetLabelsCall) Context(ctx context.Context) *InstancesSetLabelsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesSetIamPolicyCall) Header() http.Header { +func (c *InstancesSetLabelsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetpolicyrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetlabelsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{resource}/setIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setLabels") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -56818,19 +73291,20 @@ func (c *ImagesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "resource": c.resource, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.instances.setLabels" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -56849,7 +73323,7 @@ func (c *ImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, erro if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -56861,14 +73335,22 @@ func (c *ImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, erro } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "description": "Sets labels on an instance. To learn more about labels, read the Labeling Resources documentation.", // "httpMethod": "POST", - // "id": "compute.images.setIamPolicy", + // "id": "compute.instances.setLabels", // "parameterOrder": [ // "project", - // "resource" + // "zone", + // "instance" // ], // "parameters": { + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -56876,20 +73358,25 @@ func (c *ImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, erro // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/images/{resource}/setIamPolicy", + // "path": "{project}/zones/{zone}/instances/{instance}/setLabels", // "request": { - // "$ref": "GlobalSetPolicyRequest" + // "$ref": "InstancesSetLabelsRequest" // }, // "response": { - // "$ref": "Policy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -56899,32 +73386,53 @@ func (c *ImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, erro } -// method id "compute.images.setLabels": +// method id "compute.instances.setMachineResources": -type ImagesSetLabelsCall struct { - s *Service - project string - resource string - globalsetlabelsrequest *GlobalSetLabelsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSetMachineResourcesCall struct { + s *Service + project string + zone string + instance string + instancessetmachineresourcesrequest *InstancesSetMachineResourcesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetLabels: Sets the labels on an image. To learn more about labels, -// read the Labeling Resources documentation. -func (r *ImagesService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *ImagesSetLabelsCall { - c := &ImagesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetMachineResources: Changes the number and/or type of accelerator +// for a stopped instance to the values specified in the request. +func (r *InstancesService) SetMachineResources(project string, zone string, instance string, instancessetmachineresourcesrequest *InstancesSetMachineResourcesRequest) *InstancesSetMachineResourcesCall { + c := &InstancesSetMachineResourcesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.globalsetlabelsrequest = globalsetlabelsrequest + c.zone = zone + c.instance = instance + c.instancessetmachineresourcesrequest = instancessetmachineresourcesrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesSetMachineResourcesCall) RequestId(requestId string) *InstancesSetMachineResourcesCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesSetLabelsCall) Fields(s ...googleapi.Field) *ImagesSetLabelsCall { +func (c *InstancesSetMachineResourcesCall) Fields(s ...googleapi.Field) *InstancesSetMachineResourcesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -56932,35 +73440,35 @@ func (c *ImagesSetLabelsCall) Fields(s ...googleapi.Field) *ImagesSetLabelsCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesSetLabelsCall) Context(ctx context.Context) *ImagesSetLabelsCall { +func (c *InstancesSetMachineResourcesCall) Context(ctx context.Context) *InstancesSetMachineResourcesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesSetLabelsCall) Header() http.Header { +func (c *InstancesSetMachineResourcesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesSetLabelsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetMachineResourcesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetmachineresourcesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{resource}/setLabels") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMachineResources") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -56969,19 +73477,20 @@ func (c *ImagesSetLabelsCall) doRequest(alt string) (*http.Response, error) { req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "resource": c.resource, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.setLabels" call. +// Do executes the "compute.instances.setMachineResources" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ImagesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSetMachineResourcesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -57012,14 +73521,22 @@ func (c *ImagesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Sets the labels on an image. To learn more about labels, read the Labeling Resources documentation.", + // "description": "Changes the number and/or type of accelerator for a stopped instance to the values specified in the request.", // "httpMethod": "POST", - // "id": "compute.images.setLabels", + // "id": "compute.instances.setMachineResources", // "parameterOrder": [ // "project", - // "resource" + // "zone", + // "instance" // ], // "parameters": { + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -57027,17 +73544,22 @@ func (c *ImagesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/images/{resource}/setLabels", + // "path": "{project}/zones/{zone}/instances/{instance}/setMachineResources", // "request": { - // "$ref": "GlobalSetLabelsRequest" + // "$ref": "InstancesSetMachineResourcesRequest" // }, // "response": { // "$ref": "Operation" @@ -57050,32 +73572,53 @@ func (c *ImagesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, erro } -// method id "compute.images.testIamPermissions": +// method id "compute.instances.setMachineType": -type ImagesTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSetMachineTypeCall struct { + s *Service + project string + zone string + instance string + instancessetmachinetyperequest *InstancesSetMachineTypeRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *ImagesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *ImagesTestIamPermissionsCall { - c := &ImagesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetMachineType: Changes the machine type for a stopped instance to +// the machine type specified in the request. +func (r *InstancesService) SetMachineType(project string, zone string, instance string, instancessetmachinetyperequest *InstancesSetMachineTypeRequest) *InstancesSetMachineTypeCall { + c := &InstancesSetMachineTypeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.zone = zone + c.instance = instance + c.instancessetmachinetyperequest = instancessetmachinetyperequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesSetMachineTypeCall) RequestId(requestId string) *InstancesSetMachineTypeCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesTestIamPermissionsCall) Fields(s ...googleapi.Field) *ImagesTestIamPermissionsCall { +func (c *InstancesSetMachineTypeCall) Fields(s ...googleapi.Field) *InstancesSetMachineTypeCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -57083,35 +73626,35 @@ func (c *ImagesTestIamPermissionsCall) Fields(s ...googleapi.Field) *ImagesTestI // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesTestIamPermissionsCall) Context(ctx context.Context) *ImagesTestIamPermissionsCall { +func (c *InstancesSetMachineTypeCall) Context(ctx context.Context) *InstancesSetMachineTypeCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesTestIamPermissionsCall) Header() http.Header { +func (c *InstancesSetMachineTypeCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetMachineTypeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetmachinetyperequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMachineType") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -57120,19 +73663,20 @@ func (c *ImagesTestIamPermissionsCall) doRequest(alt string) (*http.Response, er req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "resource": c.resource, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.instances.setMachineType" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSetMachineTypeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -57151,7 +73695,7 @@ func (c *ImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPe if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -57163,14 +73707,22 @@ func (c *ImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPe } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", + // "description": "Changes the machine type for a stopped instance to the machine type specified in the request.", // "httpMethod": "POST", - // "id": "compute.images.testIamPermissions", + // "id": "compute.instances.setMachineType", // "parameterOrder": [ // "project", - // "resource" + // "zone", + // "instance" // ], // "parameters": { + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -57178,66 +73730,56 @@ func (c *ImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPe // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/images/{resource}/testIamPermissions", + // "path": "{project}/zones/{zone}/instances/{instance}/setMachineType", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "InstancesSetMachineTypeRequest" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instanceGroupManagers.abandonInstances": +// method id "compute.instances.setMetadata": -type InstanceGroupManagersAbandonInstancesCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagersabandoninstancesrequest *InstanceGroupManagersAbandonInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSetMetadataCall struct { + s *Service + project string + zone string + instance string + metadata *Metadata + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AbandonInstances: Flags the specified instances to be removed from -// the managed instance group. Abandoning an instance does not delete -// the instance, but it does remove the instance from any target pools -// that are applied by the managed instance group. This method reduces -// the targetSize of the managed instance group by the number of -// instances that you abandon. This operation is marked as DONE when the -// action is scheduled even if the instances have not yet been removed -// from the group. You must separately verify the status of the -// abandoning action with the listmanagedinstances method. -// -// If the group is part of a backend service that has enabled connection -// draining, it can take up to 60 seconds after the connection draining -// duration has elapsed before the VM instance is removed or -// deleted. -// -// You can specify a maximum of 1000 instances with this method per -// request. -func (r *InstanceGroupManagersService) AbandonInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersabandoninstancesrequest *InstanceGroupManagersAbandonInstancesRequest) *InstanceGroupManagersAbandonInstancesCall { - c := &InstanceGroupManagersAbandonInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetMetadata: Sets metadata for the specified instance to the data +// included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setMetadata +func (r *InstancesService) SetMetadata(project string, zone string, instance string, metadata *Metadata) *InstancesSetMetadataCall { + c := &InstancesSetMetadataCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagersabandoninstancesrequest = instancegroupmanagersabandoninstancesrequest + c.instance = instance + c.metadata = metadata return c } @@ -57255,7 +73797,7 @@ func (r *InstanceGroupManagersService) AbandonInstances(project string, zone str // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersAbandonInstancesCall) RequestId(requestId string) *InstanceGroupManagersAbandonInstancesCall { +func (c *InstancesSetMetadataCall) RequestId(requestId string) *InstancesSetMetadataCall { c.urlParams_.Set("requestId", requestId) return c } @@ -57263,7 +73805,7 @@ func (c *InstanceGroupManagersAbandonInstancesCall) RequestId(requestId string) // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersAbandonInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersAbandonInstancesCall { +func (c *InstancesSetMetadataCall) Fields(s ...googleapi.Field) *InstancesSetMetadataCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -57271,35 +73813,35 @@ func (c *InstanceGroupManagersAbandonInstancesCall) Fields(s ...googleapi.Field) // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersAbandonInstancesCall) Context(ctx context.Context) *InstanceGroupManagersAbandonInstancesCall { +func (c *InstancesSetMetadataCall) Context(ctx context.Context) *InstancesSetMetadataCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersAbandonInstancesCall) Header() http.Header { +func (c *InstancesSetMetadataCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersAbandonInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetMetadataCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersabandoninstancesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.metadata) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/abandonInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMetadata") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -57307,21 +73849,21 @@ func (c *InstanceGroupManagersAbandonInstancesCall) doRequest(alt string) (*http } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.abandonInstances" call. +// Do executes the "compute.instances.setMetadata" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSetMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -57352,18 +73894,19 @@ func (c *InstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOpt } return ret, nil // { - // "description": "Flags the specified instances to be removed from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + // "description": "Sets metadata for the specified instance to the data included in the request.", // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.abandonInstances", + // "id": "compute.instances.setMetadata", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "instance" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "instance": { + // "description": "Name of the instance scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -57380,15 +73923,16 @@ func (c *InstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOpt // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/abandonInstances", + // "path": "{project}/zones/{zone}/instances/{instance}/setMetadata", // "request": { - // "$ref": "InstanceGroupManagersAbandonInstancesRequest" + // "$ref": "Metadata" // }, // "response": { // "$ref": "Operation" @@ -57401,157 +73945,113 @@ func (c *InstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOpt } -// method id "compute.instanceGroupManagers.aggregatedList": +// method id "compute.instances.setMinCpuPlatform": -type InstanceGroupManagersAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstancesSetMinCpuPlatformCall struct { + s *Service + project string + zone string + instance string + instancessetmincpuplatformrequest *InstancesSetMinCpuPlatformRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AggregatedList: Retrieves the list of managed instance groups and -// groups them by zone. -func (r *InstanceGroupManagersService) AggregatedList(project string) *InstanceGroupManagersAggregatedListCall { - c := &InstanceGroupManagersAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetMinCpuPlatform: Changes the minimum CPU platform that this +// instance should use. This method can only be called on a stopped +// instance. For more information, read Specifying a Minimum CPU +// Platform. +func (r *InstancesService) SetMinCpuPlatform(project string, zone string, instance string, instancessetmincpuplatformrequest *InstancesSetMinCpuPlatformRequest) *InstancesSetMinCpuPlatformCall { + c := &InstancesSetMinCpuPlatformCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.zone = zone + c.instance = instance + c.instancessetmincpuplatformrequest = instancessetmincpuplatformrequest return c } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *InstanceGroupManagersAggregatedListCall) Filter(filter string) *InstanceGroupManagersAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InstanceGroupManagersAggregatedListCall) MaxResults(maxResults int64) *InstanceGroupManagersAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *InstanceGroupManagersAggregatedListCall) OrderBy(orderBy string) *InstanceGroupManagersAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InstanceGroupManagersAggregatedListCall) PageToken(pageToken string) *InstanceGroupManagersAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesSetMinCpuPlatformCall) RequestId(requestId string) *InstancesSetMinCpuPlatformCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersAggregatedListCall) Fields(s ...googleapi.Field) *InstanceGroupManagersAggregatedListCall { +func (c *InstancesSetMinCpuPlatformCall) Fields(s ...googleapi.Field) *InstancesSetMinCpuPlatformCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstanceGroupManagersAggregatedListCall) IfNoneMatch(entityTag string) *InstanceGroupManagersAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersAggregatedListCall) Context(ctx context.Context) *InstanceGroupManagersAggregatedListCall { +func (c *InstancesSetMinCpuPlatformCall) Context(ctx context.Context) *InstancesSetMinCpuPlatformCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersAggregatedListCall) Header() http.Header { +func (c *InstancesSetMinCpuPlatformCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetMinCpuPlatformCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetmincpuplatformrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instanceGroupManagers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMinCpuPlatform") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } - -// Do executes the "compute.instanceGroupManagers.aggregatedList" call. -// Exactly one of *InstanceGroupManagerAggregatedList or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *InstanceGroupManagerAggregatedList.ServerResponse.Header or -// (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagerAggregatedList, error) { + +// Do executes the "compute.instances.setMinCpuPlatform" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSetMinCpuPlatformCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -57570,7 +74070,7 @@ func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOptio if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroupManagerAggregatedList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -57582,99 +74082,78 @@ func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Retrieves the list of managed instance groups and groups them by zone.", - // "httpMethod": "GET", - // "id": "compute.instanceGroupManagers.aggregatedList", + // "description": "Changes the minimum CPU platform that this instance should use. This method can only be called on a stopped instance. For more information, read Specifying a Minimum CPU Platform.", + // "httpMethod": "POST", + // "id": "compute.instances.setMinCpuPlatform", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "instance" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/aggregated/instanceGroupManagers", + // "path": "{project}/zones/{zone}/instances/{instance}/setMinCpuPlatform", + // "request": { + // "$ref": "InstancesSetMinCpuPlatformRequest" + // }, // "response": { - // "$ref": "InstanceGroupManagerAggregatedList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstanceGroupManagersAggregatedListCall) Pages(ctx context.Context, f func(*InstanceGroupManagerAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instanceGroupManagers.delete": +// method id "compute.instances.setScheduling": -type InstanceGroupManagersDeleteCall struct { - s *Service - project string - zone string - instanceGroupManager string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSetSchedulingCall struct { + s *Service + project string + zone string + instance string + scheduling *Scheduling + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified managed instance group and all of the -// instances in that group. Note that the instance group must not belong -// to a backend service. Read Deleting an instance group for more -// information. -func (r *InstanceGroupManagersService) Delete(project string, zone string, instanceGroupManager string) *InstanceGroupManagersDeleteCall { - c := &InstanceGroupManagersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetScheduling: Sets an instance's scheduling options. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setScheduling +func (r *InstancesService) SetScheduling(project string, zone string, instance string, scheduling *Scheduling) *InstancesSetSchedulingCall { + c := &InstancesSetSchedulingCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager + c.instance = instance + c.scheduling = scheduling return c } @@ -57692,7 +74171,7 @@ func (r *InstanceGroupManagersService) Delete(project string, zone string, insta // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersDeleteCall) RequestId(requestId string) *InstanceGroupManagersDeleteCall { +func (c *InstancesSetSchedulingCall) RequestId(requestId string) *InstancesSetSchedulingCall { c.urlParams_.Set("requestId", requestId) return c } @@ -57700,7 +74179,7 @@ func (c *InstanceGroupManagersDeleteCall) RequestId(requestId string) *InstanceG // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersDeleteCall) Fields(s ...googleapi.Field) *InstanceGroupManagersDeleteCall { +func (c *InstancesSetSchedulingCall) Fields(s ...googleapi.Field) *InstancesSetSchedulingCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -57708,52 +74187,57 @@ func (c *InstanceGroupManagersDeleteCall) Fields(s ...googleapi.Field) *Instance // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersDeleteCall) Context(ctx context.Context) *InstanceGroupManagersDeleteCall { +func (c *InstancesSetSchedulingCall) Context(ctx context.Context) *InstancesSetSchedulingCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersDeleteCall) Header() http.Header { +func (c *InstancesSetSchedulingCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetSchedulingCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.scheduling) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setScheduling") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.delete" call. +// Do executes the "compute.instances.setScheduling" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -57784,18 +74268,19 @@ func (c *InstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Deletes the specified managed instance group and all of the instances in that group. Note that the instance group must not belong to a backend service. Read Deleting an instance group for more information.", - // "httpMethod": "DELETE", - // "id": "compute.instanceGroupManagers.delete", + // "description": "Sets an instance's scheduling options.", + // "httpMethod": "POST", + // "id": "compute.instances.setScheduling", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "instance" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group to delete.", + // "instance": { + // "description": "Instance name for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -57812,13 +74297,17 @@ func (c *InstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Ope // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", + // "path": "{project}/zones/{zone}/instances/{instance}/setScheduling", + // "request": { + // "$ref": "Scheduling" + // }, // "response": { // "$ref": "Operation" // }, @@ -57830,41 +74319,28 @@ func (c *InstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.instanceGroupManagers.deleteInstances": +// method id "compute.instances.setServiceAccount": -type InstanceGroupManagersDeleteInstancesCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagersdeleteinstancesrequest *InstanceGroupManagersDeleteInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSetServiceAccountCall struct { + s *Service + project string + zone string + instance string + instancessetserviceaccountrequest *InstancesSetServiceAccountRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// DeleteInstances: Flags the specified instances in the managed -// instance group for immediate deletion. The instances are also removed -// from any target pools of which they were a member. This method -// reduces the targetSize of the managed instance group by the number of -// instances that you delete. This operation is marked as DONE when the -// action is scheduled even if the instances are still being deleted. -// You must separately verify the status of the deleting action with the -// listmanagedinstances method. -// -// If the group is part of a backend service that has enabled connection -// draining, it can take up to 60 seconds after the connection draining -// duration has elapsed before the VM instance is removed or -// deleted. -// -// You can specify a maximum of 1000 instances with this method per -// request. -func (r *InstanceGroupManagersService) DeleteInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersdeleteinstancesrequest *InstanceGroupManagersDeleteInstancesRequest) *InstanceGroupManagersDeleteInstancesCall { - c := &InstanceGroupManagersDeleteInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetServiceAccount: Sets the service account on the instance. For more +// information, read Changing the service account and access scopes for +// an instance. +func (r *InstancesService) SetServiceAccount(project string, zone string, instance string, instancessetserviceaccountrequest *InstancesSetServiceAccountRequest) *InstancesSetServiceAccountCall { + c := &InstancesSetServiceAccountCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagersdeleteinstancesrequest = instancegroupmanagersdeleteinstancesrequest + c.instance = instance + c.instancessetserviceaccountrequest = instancessetserviceaccountrequest return c } @@ -57882,7 +74358,7 @@ func (r *InstanceGroupManagersService) DeleteInstances(project string, zone stri // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersDeleteInstancesCall) RequestId(requestId string) *InstanceGroupManagersDeleteInstancesCall { +func (c *InstancesSetServiceAccountCall) RequestId(requestId string) *InstancesSetServiceAccountCall { c.urlParams_.Set("requestId", requestId) return c } @@ -57890,7 +74366,7 @@ func (c *InstanceGroupManagersDeleteInstancesCall) RequestId(requestId string) * // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersDeleteInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersDeleteInstancesCall { +func (c *InstancesSetServiceAccountCall) Fields(s ...googleapi.Field) *InstancesSetServiceAccountCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -57898,35 +74374,35 @@ func (c *InstanceGroupManagersDeleteInstancesCall) Fields(s ...googleapi.Field) // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersDeleteInstancesCall) Context(ctx context.Context) *InstanceGroupManagersDeleteInstancesCall { +func (c *InstancesSetServiceAccountCall) Context(ctx context.Context) *InstancesSetServiceAccountCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersDeleteInstancesCall) Header() http.Header { +func (c *InstancesSetServiceAccountCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersDeleteInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetServiceAccountCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersdeleteinstancesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetserviceaccountrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deleteInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setServiceAccount") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -57934,21 +74410,21 @@ func (c *InstanceGroupManagersDeleteInstancesCall) doRequest(alt string) (*http. } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.deleteInstances" call. +// Do executes the "compute.instances.setServiceAccount" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -57979,18 +74455,19 @@ func (c *InstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOpti } return ret, nil // { - // "description": "Flags the specified instances in the managed instance group for immediate deletion. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. This operation is marked as DONE when the action is scheduled even if the instances are still being deleted. You must separately verify the status of the deleting action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + // "description": "Sets the service account on the instance. For more information, read Changing the service account and access scopes for an instance.", // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.deleteInstances", + // "id": "compute.instances.setServiceAccount", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "instance" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "instance": { + // "description": "Name of the instance resource to start.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -58007,15 +74484,16 @@ func (c *InstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOpti // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deleteInstances", + // "path": "{project}/zones/{zone}/instances/{instance}/setServiceAccount", // "request": { - // "$ref": "InstanceGroupManagersDeleteInstancesRequest" + // "$ref": "InstancesSetServiceAccountRequest" // }, // "response": { // "$ref": "Operation" @@ -58028,100 +74506,113 @@ func (c *InstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOpti } -// method id "compute.instanceGroupManagers.get": +// method id "compute.instances.setShieldedInstanceIntegrityPolicy": -type InstanceGroupManagersGetCall struct { - s *Service - project string - zone string - instanceGroupManager string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstancesSetShieldedInstanceIntegrityPolicyCall struct { + s *Service + project string + zone string + instance string + shieldedinstanceintegritypolicy *ShieldedInstanceIntegrityPolicy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns all of the details about the specified managed instance -// group. Gets a list of available managed instance groups by making a -// list() request. -func (r *InstanceGroupManagersService) Get(project string, zone string, instanceGroupManager string) *InstanceGroupManagersGetCall { - c := &InstanceGroupManagersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetShieldedInstanceIntegrityPolicy: Sets the Shielded Instance +// integrity policy for an instance. You can only use this method on a +// running instance. This method supports PATCH semantics and uses the +// JSON merge patch format and processing rules. +func (r *InstancesService) SetShieldedInstanceIntegrityPolicy(project string, zone string, instance string, shieldedinstanceintegritypolicy *ShieldedInstanceIntegrityPolicy) *InstancesSetShieldedInstanceIntegrityPolicyCall { + c := &InstancesSetShieldedInstanceIntegrityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager + c.instance = instance + c.shieldedinstanceintegritypolicy = shieldedinstanceintegritypolicy + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) RequestId(requestId string) *InstancesSetShieldedInstanceIntegrityPolicyCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersGetCall) Fields(s ...googleapi.Field) *InstanceGroupManagersGetCall { +func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Fields(s ...googleapi.Field) *InstancesSetShieldedInstanceIntegrityPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstanceGroupManagersGetCall) IfNoneMatch(entityTag string) *InstanceGroupManagersGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersGetCall) Context(ctx context.Context) *InstanceGroupManagersGetCall { +func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Context(ctx context.Context) *InstancesSetShieldedInstanceIntegrityPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersGetCall) Header() http.Header { +func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersGetCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.shieldedinstanceintegritypolicy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.get" call. -// Exactly one of *InstanceGroupManager or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InstanceGroupManager.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManager, error) { +// Do executes the "compute.instances.setShieldedInstanceIntegrityPolicy" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -58140,7 +74631,7 @@ func (c *InstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*Instan if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroupManager{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -58152,18 +74643,19 @@ func (c *InstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*Instan } return ret, nil // { - // "description": "Returns all of the details about the specified managed instance group. Gets a list of available managed instance groups by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.instanceGroupManagers.get", + // "description": "Sets the Shielded Instance integrity policy for an instance. You can only use this method on a running instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.instances.setShieldedInstanceIntegrityPolicy", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "instance" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "instance": { + // "description": "Name or id of the instance scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -58174,53 +74666,57 @@ func (c *InstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*Instan // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", + // "path": "{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy", + // "request": { + // "$ref": "ShieldedInstanceIntegrityPolicy" + // }, // "response": { - // "$ref": "InstanceGroupManager" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instanceGroupManagers.insert": +// method id "compute.instances.setShieldedVmIntegrityPolicy": -type InstanceGroupManagersInsertCall struct { - s *Service - project string - zone string - instancegroupmanager *InstanceGroupManager - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSetShieldedVmIntegrityPolicyCall struct { + s *Service + project string + zone string + instance string + shieldedvmintegritypolicy *ShieldedVmIntegrityPolicy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a managed instance group using the information that -// you specify in the request. After the group is created, instances in -// the group are created using the specified instance template. This -// operation is marked as DONE when the group is created even if the -// instances in the group have not yet been created. You must separately -// verify the status of the individual instances with the -// listmanagedinstances method. -// -// A managed instance group can have up to 1000 VM instances per group. -// Please contact Cloud Support if you need an increase in this limit. -func (r *InstanceGroupManagersService) Insert(project string, zone string, instancegroupmanager *InstanceGroupManager) *InstanceGroupManagersInsertCall { - c := &InstanceGroupManagersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetShieldedVmIntegrityPolicy: Sets the Shielded VM integrity policy +// for a VM instance. You can only use this method on a running VM +// instance. This method supports PATCH semantics and uses the JSON +// merge patch format and processing rules. +func (r *InstancesService) SetShieldedVmIntegrityPolicy(project string, zone string, instance string, shieldedvmintegritypolicy *ShieldedVmIntegrityPolicy) *InstancesSetShieldedVmIntegrityPolicyCall { + c := &InstancesSetShieldedVmIntegrityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instancegroupmanager = instancegroupmanager + c.instance = instance + c.shieldedvmintegritypolicy = shieldedvmintegritypolicy return c } @@ -58238,7 +74734,7 @@ func (r *InstanceGroupManagersService) Insert(project string, zone string, insta // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersInsertCall) RequestId(requestId string) *InstanceGroupManagersInsertCall { +func (c *InstancesSetShieldedVmIntegrityPolicyCall) RequestId(requestId string) *InstancesSetShieldedVmIntegrityPolicyCall { c.urlParams_.Set("requestId", requestId) return c } @@ -58246,7 +74742,7 @@ func (c *InstanceGroupManagersInsertCall) RequestId(requestId string) *InstanceG // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersInsertCall) Fields(s ...googleapi.Field) *InstanceGroupManagersInsertCall { +func (c *InstancesSetShieldedVmIntegrityPolicyCall) Fields(s ...googleapi.Field) *InstancesSetShieldedVmIntegrityPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -58254,56 +74750,57 @@ func (c *InstanceGroupManagersInsertCall) Fields(s ...googleapi.Field) *Instance // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersInsertCall) Context(ctx context.Context) *InstanceGroupManagersInsertCall { +func (c *InstancesSetShieldedVmIntegrityPolicyCall) Context(ctx context.Context) *InstancesSetShieldedVmIntegrityPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersInsertCall) Header() http.Header { +func (c *InstancesSetShieldedVmIntegrityPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetShieldedVmIntegrityPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.shieldedvmintegritypolicy) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setShieldedVmIntegrityPolicy") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.insert" call. +// Do executes the "compute.instances.setShieldedVmIntegrityPolicy" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSetShieldedVmIntegrityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -58334,14 +74831,22 @@ func (c *InstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, instances in the group are created using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.\n\nA managed instance group can have up to 1000 VM instances per group. Please contact Cloud Support if you need an increase in this limit.", - // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.insert", + // "description": "Sets the Shielded VM integrity policy for a VM instance. You can only use this method on a running VM instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.instances.setShieldedVmIntegrityPolicy", // "parameterOrder": [ // "project", - // "zone" + // "zone", + // "instance" // ], // "parameters": { + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -58355,15 +74860,16 @@ func (c *InstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Ope // "type": "string" // }, // "zone": { - // "description": "The name of the zone where you want to create the managed instance group.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers", + // "path": "{project}/zones/{zone}/instances/{instance}/setShieldedVmIntegrityPolicy", // "request": { - // "$ref": "InstanceGroupManager" + // "$ref": "ShieldedVmIntegrityPolicy" // }, // "response": { // "$ref": "Operation" @@ -58376,159 +74882,112 @@ func (c *InstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.instanceGroupManagers.list": - -type InstanceGroupManagersListCall struct { - s *Service - project string - zone string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves a list of managed instance groups that are contained -// within the specified project and zone. -func (r *InstanceGroupManagersService) List(project string, zone string) *InstanceGroupManagersListCall { - c := &InstanceGroupManagersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - return c -} +// method id "compute.instances.setTags": -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *InstanceGroupManagersListCall) Filter(filter string) *InstanceGroupManagersListCall { - c.urlParams_.Set("filter", filter) - return c +type InstancesSetTagsCall struct { + s *Service + project string + zone string + instance string + tags *Tags + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InstanceGroupManagersListCall) MaxResults(maxResults int64) *InstanceGroupManagersListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) +// SetTags: Sets network tags for the specified instance to the data +// included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setTags +func (r *InstancesService) SetTags(project string, zone string, instance string, tags *Tags) *InstancesSetTagsCall { + c := &InstancesSetTagsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + c.tags = tags return c } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *InstanceGroupManagersListCall) OrderBy(orderBy string) *InstanceGroupManagersListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InstanceGroupManagersListCall) PageToken(pageToken string) *InstanceGroupManagersListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesSetTagsCall) RequestId(requestId string) *InstancesSetTagsCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersListCall) Fields(s ...googleapi.Field) *InstanceGroupManagersListCall { +func (c *InstancesSetTagsCall) Fields(s ...googleapi.Field) *InstancesSetTagsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstanceGroupManagersListCall) IfNoneMatch(entityTag string) *InstanceGroupManagersListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersListCall) Context(ctx context.Context) *InstanceGroupManagersListCall { +func (c *InstancesSetTagsCall) Context(ctx context.Context) *InstancesSetTagsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersListCall) Header() http.Header { +func (c *InstancesSetTagsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetTagsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.tags) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setTags") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.list" call. -// Exactly one of *InstanceGroupManagerList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *InstanceGroupManagerList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagerList, error) { +// Do executes the "compute.instances.setTags" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -58547,7 +75006,7 @@ func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*Insta if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroupManagerList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -58559,35 +75018,20 @@ func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*Insta } return ret, nil // { - // "description": "Retrieves a list of managed instance groups that are contained within the specified project and zone.", - // "httpMethod": "GET", - // "id": "compute.instanceGroupManagers.list", + // "description": "Sets network tags for the specified instance to the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.instances.setTags", // "parameterOrder": [ // "project", - // "zone" + // "zone", + // "instance" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "project": { @@ -58597,140 +75041,60 @@ func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*Insta // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers", + // "path": "{project}/zones/{zone}/instances/{instance}/setTags", + // "request": { + // "$ref": "Tags" + // }, // "response": { - // "$ref": "InstanceGroupManagerList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstanceGroupManagersListCall) Pages(ctx context.Context, f func(*InstanceGroupManagerList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instanceGroupManagers.listManagedInstances": +// method id "compute.instances.simulateMaintenanceEvent": -type InstanceGroupManagersListManagedInstancesCall struct { - s *Service - project string - zone string - instanceGroupManager string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSimulateMaintenanceEventCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// ListManagedInstances: Lists all of the instances in the managed -// instance group. Each instance in the list has a currentAction, which -// indicates the action that the managed instance group is performing on -// the instance. For example, if the group is still creating an -// instance, the currentAction is CREATING. If a previous action failed, -// the list displays the errors for that failed action. -func (r *InstanceGroupManagersService) ListManagedInstances(project string, zone string, instanceGroupManager string) *InstanceGroupManagersListManagedInstancesCall { - c := &InstanceGroupManagersListManagedInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SimulateMaintenanceEvent: Simulates a maintenance event on the +// instance. +func (r *InstancesService) SimulateMaintenanceEvent(project string, zone string, instance string) *InstancesSimulateMaintenanceEventCall { + c := &InstancesSimulateMaintenanceEventCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *InstanceGroupManagersListManagedInstancesCall) Filter(filter string) *InstanceGroupManagersListManagedInstancesCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InstanceGroupManagersListManagedInstancesCall) MaxResults(maxResults int64) *InstanceGroupManagersListManagedInstancesCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "order_by": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *InstanceGroupManagersListManagedInstancesCall) OrderBy(orderBy string) *InstanceGroupManagersListManagedInstancesCall { - c.urlParams_.Set("order_by", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InstanceGroupManagersListManagedInstancesCall) PageToken(pageToken string) *InstanceGroupManagersListManagedInstancesCall { - c.urlParams_.Set("pageToken", pageToken) + c.instance = instance return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersListManagedInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersListManagedInstancesCall { +func (c *InstancesSimulateMaintenanceEventCall) Fields(s ...googleapi.Field) *InstancesSimulateMaintenanceEventCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -58738,21 +75102,21 @@ func (c *InstanceGroupManagersListManagedInstancesCall) Fields(s ...googleapi.Fi // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersListManagedInstancesCall) Context(ctx context.Context) *InstanceGroupManagersListManagedInstancesCall { +func (c *InstancesSimulateMaintenanceEventCall) Context(ctx context.Context) *InstancesSimulateMaintenanceEventCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersListManagedInstancesCall) Header() http.Header { +func (c *InstancesSimulateMaintenanceEventCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersListManagedInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSimulateMaintenanceEventCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -58761,7 +75125,7 @@ func (c *InstanceGroupManagersListManagedInstancesCall) doRequest(alt string) (* var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -58769,23 +75133,21 @@ func (c *InstanceGroupManagersListManagedInstancesCall) doRequest(alt string) (* } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.listManagedInstances" call. -// Exactly one of *InstanceGroupManagersListManagedInstancesResponse or -// error will be non-nil. Any non-2xx status code is an error. Response -// headers are in either -// *InstanceGroupManagersListManagedInstancesResponse.ServerResponse.Head -// er or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagersListManagedInstancesResponse, error) { +// Do executes the "compute.instances.simulateMaintenanceEvent" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -58804,7 +75166,7 @@ func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.Cal if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroupManagersListManagedInstancesResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -58816,44 +75178,22 @@ func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.Cal } return ret, nil // { - // "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action.", + // "description": "Simulates a maintenance event on the instance.", // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.listManagedInstances", + // "id": "compute.instances.simulateMaintenanceEvent", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "instance" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "instance": { + // "description": "Name of the instance scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "order_by": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -58862,72 +75202,45 @@ func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.Cal // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", + // "path": "{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent", // "response": { - // "$ref": "InstanceGroupManagersListManagedInstancesResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstanceGroupManagersListManagedInstancesCall) Pages(ctx context.Context, f func(*InstanceGroupManagersListManagedInstancesResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instanceGroupManagers.patch": +// method id "compute.instances.start": -type InstanceGroupManagersPatchCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanager *InstanceGroupManager - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesStartCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates a managed instance group using the information that -// you specify in the request. This operation is marked as DONE when the -// group is patched even if the instances in the group are still in the -// process of being patched. You must separately verify the status of -// the individual instances with the listManagedInstances method. This -// method supports PATCH semantics and uses the JSON merge patch format -// and processing rules. -func (r *InstanceGroupManagersService) Patch(project string, zone string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *InstanceGroupManagersPatchCall { - c := &InstanceGroupManagersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Start: Starts an instance that was stopped using the instances().stop +// method. For more information, see Restart an instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/start +func (r *InstancesService) Start(project string, zone string, instance string) *InstancesStartCall { + c := &InstancesStartCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanager = instancegroupmanager + c.instance = instance return c } @@ -58945,7 +75258,7 @@ func (r *InstanceGroupManagersService) Patch(project string, zone string, instan // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersPatchCall) RequestId(requestId string) *InstanceGroupManagersPatchCall { +func (c *InstancesStartCall) RequestId(requestId string) *InstancesStartCall { c.urlParams_.Set("requestId", requestId) return c } @@ -58953,7 +75266,7 @@ func (c *InstanceGroupManagersPatchCall) RequestId(requestId string) *InstanceGr // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersPatchCall) Fields(s ...googleapi.Field) *InstanceGroupManagersPatchCall { +func (c *InstancesStartCall) Fields(s ...googleapi.Field) *InstancesStartCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -58961,57 +75274,52 @@ func (c *InstanceGroupManagersPatchCall) Fields(s ...googleapi.Field) *InstanceG // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersPatchCall) Context(ctx context.Context) *InstanceGroupManagersPatchCall { +func (c *InstancesStartCall) Context(ctx context.Context) *InstancesStartCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersPatchCall) Header() http.Header { +func (c *InstancesStartCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesStartCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/start") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.patch" call. +// Do executes the "compute.instances.start" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -59042,18 +75350,19 @@ func (c *InstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listManagedInstances method. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.instanceGroupManagers.patch", + // "description": "Starts an instance that was stopped using the instances().stop method. For more information, see Restart an instance.", + // "httpMethod": "POST", + // "id": "compute.instances.start", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "instance" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the instance group manager.", + // "instance": { + // "description": "Name of the instance resource to start.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -59070,16 +75379,14 @@ func (c *InstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Oper // "type": "string" // }, // "zone": { - // "description": "The name of the zone where you want to create the managed instance group.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", - // "request": { - // "$ref": "InstanceGroupManager" - // }, + // "path": "{project}/zones/{zone}/instances/{instance}/start", // "response": { // "$ref": "Operation" // }, @@ -59091,40 +75398,28 @@ func (c *InstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Oper } -// method id "compute.instanceGroupManagers.recreateInstances": +// method id "compute.instances.startWithEncryptionKey": -type InstanceGroupManagersRecreateInstancesCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagersrecreateinstancesrequest *InstanceGroupManagersRecreateInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesStartWithEncryptionKeyCall struct { + s *Service + project string + zone string + instance string + instancesstartwithencryptionkeyrequest *InstancesStartWithEncryptionKeyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// RecreateInstances: Flags the specified instances in the managed -// instance group to be immediately recreated. The instances are deleted -// and recreated using the current instance template for the managed -// instance group. This operation is marked as DONE when the flag is set -// even if the instances have not yet been recreated. You must -// separately verify the status of the recreating action with the -// listmanagedinstances method. -// -// If the group is part of a backend service that has enabled connection -// draining, it can take up to 60 seconds after the connection draining -// duration has elapsed before the VM instance is removed or -// deleted. -// -// You can specify a maximum of 1000 instances with this method per -// request. -func (r *InstanceGroupManagersService) RecreateInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersrecreateinstancesrequest *InstanceGroupManagersRecreateInstancesRequest) *InstanceGroupManagersRecreateInstancesCall { - c := &InstanceGroupManagersRecreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// StartWithEncryptionKey: Starts an instance that was stopped using the +// instances().stop method. For more information, see Restart an +// instance. +func (r *InstancesService) StartWithEncryptionKey(project string, zone string, instance string, instancesstartwithencryptionkeyrequest *InstancesStartWithEncryptionKeyRequest) *InstancesStartWithEncryptionKeyCall { + c := &InstancesStartWithEncryptionKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagersrecreateinstancesrequest = instancegroupmanagersrecreateinstancesrequest + c.instance = instance + c.instancesstartwithencryptionkeyrequest = instancesstartwithencryptionkeyrequest return c } @@ -59142,7 +75437,7 @@ func (r *InstanceGroupManagersService) RecreateInstances(project string, zone st // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersRecreateInstancesCall) RequestId(requestId string) *InstanceGroupManagersRecreateInstancesCall { +func (c *InstancesStartWithEncryptionKeyCall) RequestId(requestId string) *InstancesStartWithEncryptionKeyCall { c.urlParams_.Set("requestId", requestId) return c } @@ -59150,7 +75445,7 @@ func (c *InstanceGroupManagersRecreateInstancesCall) RequestId(requestId string) // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersRecreateInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersRecreateInstancesCall { +func (c *InstancesStartWithEncryptionKeyCall) Fields(s ...googleapi.Field) *InstancesStartWithEncryptionKeyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -59158,35 +75453,35 @@ func (c *InstanceGroupManagersRecreateInstancesCall) Fields(s ...googleapi.Field // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersRecreateInstancesCall) Context(ctx context.Context) *InstanceGroupManagersRecreateInstancesCall { +func (c *InstancesStartWithEncryptionKeyCall) Context(ctx context.Context) *InstancesStartWithEncryptionKeyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersRecreateInstancesCall) Header() http.Header { +func (c *InstancesStartWithEncryptionKeyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesStartWithEncryptionKeyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersrecreateinstancesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesstartwithencryptionkeyrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -59194,21 +75489,21 @@ func (c *InstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*htt } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.recreateInstances" call. +// Do executes the "compute.instances.startWithEncryptionKey" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -59239,18 +75534,19 @@ func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOp } return ret, nil // { - // "description": "Flags the specified instances in the managed instance group to be immediately recreated. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the flag is set even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + // "description": "Starts an instance that was stopped using the instances().stop method. For more information, see Restart an instance.", // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.recreateInstances", + // "id": "compute.instances.startWithEncryptionKey", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "instance" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "instance": { + // "description": "Name of the instance resource to start.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -59267,15 +75563,16 @@ func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOp // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", + // "path": "{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey", // "request": { - // "$ref": "InstanceGroupManagersRecreateInstancesRequest" + // "$ref": "InstancesStartWithEncryptionKeyRequest" // }, // "response": { // "$ref": "Operation" @@ -59288,45 +75585,30 @@ func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOp } -// method id "compute.instanceGroupManagers.resize": +// method id "compute.instances.stop": -type InstanceGroupManagersResizeCall struct { - s *Service - project string - zone string - instanceGroupManager string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesStopCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Resize: Resizes the managed instance group. If you increase the size, -// the group creates new instances using the current instance template. -// If you decrease the size, the group deletes instances. The resize -// operation is marked DONE when the resize actions are scheduled even -// if the group has not yet added or deleted any instances. You must -// separately verify the status of the creating or deleting actions with -// the listmanagedinstances method. -// -// When resizing down, the instance group arbitrarily chooses the order -// in which VMs are deleted. The group takes into account some VM -// attributes when making the selection including: -// -// + The status of the VM instance. + The health of the VM instance. + -// The instance template version the VM is based on. + For regional -// managed instance groups, the location of the VM instance. -// -// This list is subject to change. -// -// If the group is part of a backend service that has enabled connection -// draining, it can take up to 60 seconds after the connection draining -// duration has elapsed before the VM instance is removed or deleted. -func (r *InstanceGroupManagersService) Resize(project string, zone string, instanceGroupManager string, size int64) *InstanceGroupManagersResizeCall { - c := &InstanceGroupManagersResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Stop: Stops a running instance, shutting it down cleanly, and allows +// you to restart the instance at a later time. Stopped instances do not +// incur VM usage charges while they are stopped. However, resources +// that the VM is using, such as persistent disks and static IP +// addresses, will continue to be charged until they are deleted. For +// more information, see Stopping an instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/stop +func (r *InstancesService) Stop(project string, zone string, instance string) *InstancesStopCall { + c := &InstancesStopCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.urlParams_.Set("size", fmt.Sprint(size)) + c.instance = instance return c } @@ -59344,7 +75626,7 @@ func (r *InstanceGroupManagersService) Resize(project string, zone string, insta // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersResizeCall) RequestId(requestId string) *InstanceGroupManagersResizeCall { +func (c *InstancesStopCall) RequestId(requestId string) *InstancesStopCall { c.urlParams_.Set("requestId", requestId) return c } @@ -59352,7 +75634,7 @@ func (c *InstanceGroupManagersResizeCall) RequestId(requestId string) *InstanceG // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersResizeCall) Fields(s ...googleapi.Field) *InstanceGroupManagersResizeCall { +func (c *InstancesStopCall) Fields(s ...googleapi.Field) *InstancesStopCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -59360,21 +75642,21 @@ func (c *InstanceGroupManagersResizeCall) Fields(s ...googleapi.Field) *Instance // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersResizeCall) Context(ctx context.Context) *InstanceGroupManagersResizeCall { +func (c *InstancesStopCall) Context(ctx context.Context) *InstancesStopCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersResizeCall) Header() http.Header { +func (c *InstancesStopCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesStopCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -59383,7 +75665,7 @@ func (c *InstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/stop") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -59391,21 +75673,21 @@ func (c *InstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.resize" call. +// Do executes the "compute.instances.stop" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -59436,19 +75718,19 @@ func (c *InstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Resizes the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.\n\nWhen resizing down, the instance group arbitrarily chooses the order in which VMs are deleted. The group takes into account some VM attributes when making the selection including:\n\n+ The status of the VM instance. + The health of the VM instance. + The instance template version the VM is based on. + For regional managed instance groups, the location of the VM instance.\n\nThis list is subject to change.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", + // "description": "Stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time. Stopped instances do not incur VM usage charges while they are stopped. However, resources that the VM is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted. For more information, see Stopping an instance.", // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.resize", + // "id": "compute.instances.stop", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager", - // "size" + // "instance" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "instance": { + // "description": "Name of the instance resource to stop.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -59464,21 +75746,15 @@ func (c *InstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Ope // "location": "query", // "type": "string" // }, - // "size": { - // "description": "The number of running instances that the managed instance group should maintain at any given time. The group automatically adds or removes instances to maintain the number of instances specified by this parameter.", - // "format": "int32", - // "location": "query", - // "required": true, - // "type": "integer" - // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize", + // "path": "{project}/zones/{zone}/instances/{instance}/stop", // "response": { // "$ref": "Operation" // }, @@ -59490,40 +75766,37 @@ func (c *InstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.instanceGroupManagers.resizeAdvanced": +// method id "compute.instances.suspend": -type InstanceGroupManagersResizeAdvancedCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagersresizeadvancedrequest *InstanceGroupManagersResizeAdvancedRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSuspendCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// ResizeAdvanced: Resizes the managed instance group with advanced -// configuration options like disabling creation retries. This is an -// extended version of the resize method. -// -// If you increase the size of the instance group, the group creates new -// instances using the current instance template. If you decrease the -// size, the group deletes instances. The resize operation is marked -// DONE when the resize actions are scheduled even if the group has not -// yet added or deleted any instances. You must separately verify the -// status of the creating, creatingWithoutRetries, or deleting actions -// with the get or listmanagedinstances method. -// -// If the group is part of a backend service that has enabled connection -// draining, it can take up to 60 seconds after the connection draining -// duration has elapsed before the VM instance is removed or deleted. -func (r *InstanceGroupManagersService) ResizeAdvanced(project string, zone string, instanceGroupManager string, instancegroupmanagersresizeadvancedrequest *InstanceGroupManagersResizeAdvancedRequest) *InstanceGroupManagersResizeAdvancedCall { - c := &InstanceGroupManagersResizeAdvancedCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Suspend: This method suspends a running instance, saving its state to +// persistent storage, and allows you to resume the instance at a later +// time. Suspended instances incur reduced per-minute, virtual machine +// usage charges while they are suspended. Any resources the virtual +// machine is using, such as persistent disks and static IP addresses, +// will continue to be charged until they are deleted. +func (r *InstancesService) Suspend(project string, zone string, instance string) *InstancesSuspendCall { + c := &InstancesSuspendCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagersresizeadvancedrequest = instancegroupmanagersresizeadvancedrequest + c.instance = instance + return c +} + +// DiscardLocalSsd sets the optional parameter "discardLocalSsd": If +// true, discard the contents of any attached localSSD partitions. +// Default value is false (== preserve localSSD data). +func (c *InstancesSuspendCall) DiscardLocalSsd(discardLocalSsd bool) *InstancesSuspendCall { + c.urlParams_.Set("discardLocalSsd", fmt.Sprint(discardLocalSsd)) return c } @@ -59541,7 +75814,7 @@ func (r *InstanceGroupManagersService) ResizeAdvanced(project string, zone strin // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersResizeAdvancedCall) RequestId(requestId string) *InstanceGroupManagersResizeAdvancedCall { +func (c *InstancesSuspendCall) RequestId(requestId string) *InstancesSuspendCall { c.urlParams_.Set("requestId", requestId) return c } @@ -59549,7 +75822,7 @@ func (c *InstanceGroupManagersResizeAdvancedCall) RequestId(requestId string) *I // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersResizeAdvancedCall) Fields(s ...googleapi.Field) *InstanceGroupManagersResizeAdvancedCall { +func (c *InstancesSuspendCall) Fields(s ...googleapi.Field) *InstancesSuspendCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -59557,35 +75830,30 @@ func (c *InstanceGroupManagersResizeAdvancedCall) Fields(s ...googleapi.Field) * // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersResizeAdvancedCall) Context(ctx context.Context) *InstanceGroupManagersResizeAdvancedCall { +func (c *InstancesSuspendCall) Context(ctx context.Context) *InstancesSuspendCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersResizeAdvancedCall) Header() http.Header { +func (c *InstancesSuspendCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersResizeAdvancedCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSuspendCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersresizeadvancedrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resizeAdvanced") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/suspend") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -59593,21 +75861,21 @@ func (c *InstanceGroupManagersResizeAdvancedCall) doRequest(alt string) (*http.R } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.resizeAdvanced" call. +// Do executes the "compute.instances.suspend" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersResizeAdvancedCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSuspendCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -59638,18 +75906,24 @@ func (c *InstanceGroupManagersResizeAdvancedCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Resizes the managed instance group with advanced configuration options like disabling creation retries. This is an extended version of the resize method.\n\nIf you increase the size of the instance group, the group creates new instances using the current instance template. If you decrease the size, the group deletes instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating, creatingWithoutRetries, or deleting actions with the get or listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", + // "description": "This method suspends a running instance, saving its state to persistent storage, and allows you to resume the instance at a later time. Suspended instances incur reduced per-minute, virtual machine usage charges while they are suspended. Any resources the virtual machine is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted.", // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.resizeAdvanced", + // "id": "compute.instances.suspend", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "instance" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "discardLocalSsd": { + // "description": "If true, discard the contents of any attached localSSD partitions. Default value is false (== preserve localSSD data).", + // "location": "query", + // "type": "boolean" + // }, + // "instance": { + // "description": "Name of the instance resource to suspend.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -59666,16 +75940,14 @@ func (c *InstanceGroupManagersResizeAdvancedCall) Do(opts ...googleapi.CallOptio // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resizeAdvanced", - // "request": { - // "$ref": "InstanceGroupManagersResizeAdvancedRequest" - // }, + // "path": "{project}/zones/{zone}/instances/{instance}/suspend", // "response": { // "$ref": "Operation" // }, @@ -59687,53 +75959,34 @@ func (c *InstanceGroupManagersResizeAdvancedCall) Do(opts ...googleapi.CallOptio } -// method id "compute.instanceGroupManagers.setAutoHealingPolicies": +// method id "compute.instances.testIamPermissions": -type InstanceGroupManagersSetAutoHealingPoliciesCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagerssetautohealingrequest *InstanceGroupManagersSetAutoHealingRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesTestIamPermissionsCall struct { + s *Service + project string + zone string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetAutoHealingPolicies: Modifies the autohealing policies. -// [Deprecated] This method is deprecated. Please use Patch instead. -func (r *InstanceGroupManagersService) SetAutoHealingPolicies(project string, zone string, instanceGroupManager string, instancegroupmanagerssetautohealingrequest *InstanceGroupManagersSetAutoHealingRequest) *InstanceGroupManagersSetAutoHealingPoliciesCall { - c := &InstanceGroupManagersSetAutoHealingPoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *InstancesService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *InstancesTestIamPermissionsCall { + c := &InstancesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagerssetautohealingrequest = instancegroupmanagerssetautohealingrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) RequestId(requestId string) *InstanceGroupManagersSetAutoHealingPoliciesCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersSetAutoHealingPoliciesCall { +func (c *InstancesTestIamPermissionsCall) Fields(s ...googleapi.Field) *InstancesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -59741,35 +75994,35 @@ func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Fields(s ...googleapi. // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Context(ctx context.Context) *InstanceGroupManagersSetAutoHealingPoliciesCall { +func (c *InstancesTestIamPermissionsCall) Context(ctx context.Context) *InstancesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Header() http.Header { +func (c *InstancesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerssetautohealingrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -59777,21 +76030,21 @@ func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) doRequest(alt string) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.setAutoHealingPolicies" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instances.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstancesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -59810,7 +76063,7 @@ func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googleapi.C if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -59822,21 +76075,15 @@ func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googleapi.C } return ret, nil // { - // "description": "Modifies the autohealing policies. [Deprecated] This method is deprecated. Please use Patch instead.", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.setAutoHealingPolicies", + // "id": "compute.instances.testIamPermissions", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "resource" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the instance group manager.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -59844,55 +76091,61 @@ func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googleapi.C // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies", + // "path": "{project}/zones/{zone}/instances/{resource}/testIamPermissions", // "request": { - // "$ref": "InstanceGroupManagersSetAutoHealingRequest" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instanceGroupManagers.setInstanceTemplate": +// method id "compute.instances.updateAccessConfig": -type InstanceGroupManagersSetInstanceTemplateCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagerssetinstancetemplaterequest *InstanceGroupManagersSetInstanceTemplateRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesUpdateAccessConfigCall struct { + s *Service + project string + zone string + instance string + accessconfig *AccessConfig + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetInstanceTemplate: Specifies the instance template to use when -// creating new instances in this group. The templates for existing -// instances in the group do not change unless you recreate them. -func (r *InstanceGroupManagersService) SetInstanceTemplate(project string, zone string, instanceGroupManager string, instancegroupmanagerssetinstancetemplaterequest *InstanceGroupManagersSetInstanceTemplateRequest) *InstanceGroupManagersSetInstanceTemplateCall { - c := &InstanceGroupManagersSetInstanceTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// UpdateAccessConfig: Updates the specified access config from an +// instance's network interface with the data included in the request. +// This method supports PATCH semantics and uses the JSON merge patch +// format and processing rules. +func (r *InstancesService) UpdateAccessConfig(project string, zone string, instance string, networkInterface string, accessconfig *AccessConfig) *InstancesUpdateAccessConfigCall { + c := &InstancesUpdateAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagerssetinstancetemplaterequest = instancegroupmanagerssetinstancetemplaterequest + c.instance = instance + c.urlParams_.Set("networkInterface", networkInterface) + c.accessconfig = accessconfig return c } @@ -59910,7 +76163,7 @@ func (r *InstanceGroupManagersService) SetInstanceTemplate(project string, zone // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersSetInstanceTemplateCall) RequestId(requestId string) *InstanceGroupManagersSetInstanceTemplateCall { +func (c *InstancesUpdateAccessConfigCall) RequestId(requestId string) *InstancesUpdateAccessConfigCall { c.urlParams_.Set("requestId", requestId) return c } @@ -59918,7 +76171,7 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) RequestId(requestId strin // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersSetInstanceTemplateCall) Fields(s ...googleapi.Field) *InstanceGroupManagersSetInstanceTemplateCall { +func (c *InstancesUpdateAccessConfigCall) Fields(s ...googleapi.Field) *InstancesUpdateAccessConfigCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -59926,35 +76179,35 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) Fields(s ...googleapi.Fie // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersSetInstanceTemplateCall) Context(ctx context.Context) *InstanceGroupManagersSetInstanceTemplateCall { +func (c *InstancesUpdateAccessConfigCall) Context(ctx context.Context) *InstancesUpdateAccessConfigCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersSetInstanceTemplateCall) Header() http.Header { +func (c *InstancesUpdateAccessConfigCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesUpdateAccessConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerssetinstancetemplaterequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.accessconfig) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/updateAccessConfig") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -59962,21 +76215,21 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*h } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.setInstanceTemplate" call. +// Do executes the "compute.instances.updateAccessConfig" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesUpdateAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -60007,18 +76260,26 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.Call } return ret, nil // { - // "description": "Specifies the instance template to use when creating new instances in this group. The templates for existing instances in the group do not change unless you recreate them.", + // "description": "Updates the specified access config from an instance's network interface with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.setInstanceTemplate", + // "id": "compute.instances.updateAccessConfig", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "instance", + // "networkInterface" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "instance": { + // "description": "The instance name for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "networkInterface": { + // "description": "The name of the network interface where the access config is attached.", + // "location": "query", // "required": true, // "type": "string" // }, @@ -60035,15 +76296,16 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.Call // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", + // "path": "{project}/zones/{zone}/instances/{instance}/updateAccessConfig", // "request": { - // "$ref": "InstanceGroupManagersSetInstanceTemplateRequest" + // "$ref": "AccessConfig" // }, // "response": { // "$ref": "Operation" @@ -60056,32 +76318,29 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.Call } -// method id "compute.instanceGroupManagers.setTargetPools": +// method id "compute.instances.updateDisplayDevice": -type InstanceGroupManagersSetTargetPoolsCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagerssettargetpoolsrequest *InstanceGroupManagersSetTargetPoolsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesUpdateDisplayDeviceCall struct { + s *Service + project string + zone string + instance string + displaydevice *DisplayDevice + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetTargetPools: Modifies the target pools to which all instances in -// this managed instance group are assigned. The target pools -// automatically apply to all of the instances in the managed instance -// group. This operation is marked DONE when you make the request even -// if the instances have not yet been added to their target pools. The -// change might take some time to apply to all of the instances in the -// group depending on the size of the group. -func (r *InstanceGroupManagersService) SetTargetPools(project string, zone string, instanceGroupManager string, instancegroupmanagerssettargetpoolsrequest *InstanceGroupManagersSetTargetPoolsRequest) *InstanceGroupManagersSetTargetPoolsCall { - c := &InstanceGroupManagersSetTargetPoolsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// UpdateDisplayDevice: Updates the Display config for a VM instance. +// You can only use this method on a stopped VM instance. This method +// supports PATCH semantics and uses the JSON merge patch format and +// processing rules. +func (r *InstancesService) UpdateDisplayDevice(project string, zone string, instance string, displaydevice *DisplayDevice) *InstancesUpdateDisplayDeviceCall { + c := &InstancesUpdateDisplayDeviceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagerssettargetpoolsrequest = instancegroupmanagerssettargetpoolsrequest + c.instance = instance + c.displaydevice = displaydevice return c } @@ -60099,7 +76358,7 @@ func (r *InstanceGroupManagersService) SetTargetPools(project string, zone strin // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersSetTargetPoolsCall) RequestId(requestId string) *InstanceGroupManagersSetTargetPoolsCall { +func (c *InstancesUpdateDisplayDeviceCall) RequestId(requestId string) *InstancesUpdateDisplayDeviceCall { c.urlParams_.Set("requestId", requestId) return c } @@ -60107,7 +76366,7 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) RequestId(requestId string) *I // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersSetTargetPoolsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersSetTargetPoolsCall { +func (c *InstancesUpdateDisplayDeviceCall) Fields(s ...googleapi.Field) *InstancesUpdateDisplayDeviceCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -60115,57 +76374,57 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) Fields(s ...googleapi.Field) * // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersSetTargetPoolsCall) Context(ctx context.Context) *InstanceGroupManagersSetTargetPoolsCall { +func (c *InstancesUpdateDisplayDeviceCall) Context(ctx context.Context) *InstancesUpdateDisplayDeviceCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersSetTargetPoolsCall) Header() http.Header { +func (c *InstancesUpdateDisplayDeviceCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesUpdateDisplayDeviceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerssettargetpoolsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.displaydevice) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setTargetPools") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/updateDisplayDevice") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.setTargetPools" call. +// Do executes the "compute.instances.updateDisplayDevice" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesUpdateDisplayDeviceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -60196,18 +76455,19 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Modifies the target pools to which all instances in this managed instance group are assigned. The target pools automatically apply to all of the instances in the managed instance group. This operation is marked DONE when you make the request even if the instances have not yet been added to their target pools. The change might take some time to apply to all of the instances in the group depending on the size of the group.", - // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.setTargetPools", + // "description": "Updates the Display config for a VM instance. You can only use this method on a stopped VM instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.instances.updateDisplayDevice", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "instance" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "instance": { + // "description": "Name of the instance scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -60224,15 +76484,16 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOptio // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", + // "path": "{project}/zones/{zone}/instances/{instance}/updateDisplayDevice", // "request": { - // "$ref": "InstanceGroupManagersSetTargetPoolsRequest" + // "$ref": "DisplayDevice" // }, // "response": { // "$ref": "Operation" @@ -60245,34 +76506,54 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOptio } -// method id "compute.instanceGroupManagers.testIamPermissions": +// method id "compute.instances.updateNetworkInterface": -type InstanceGroupManagersTestIamPermissionsCall struct { - s *Service - project string - zone string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesUpdateNetworkInterfaceCall struct { + s *Service + project string + zone string + instance string + networkinterface *NetworkInterface + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *InstanceGroupManagersService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *InstanceGroupManagersTestIamPermissionsCall { - c := &InstanceGroupManagersTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// UpdateNetworkInterface: Updates an instance's network interface. This +// method follows PATCH semantics. +func (r *InstancesService) UpdateNetworkInterface(project string, zone string, instance string, networkInterface string, networkinterface *NetworkInterface) *InstancesUpdateNetworkInterfaceCall { + c := &InstancesUpdateNetworkInterfaceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.instance = instance + c.urlParams_.Set("networkInterface", networkInterface) + c.networkinterface = networkinterface + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesUpdateNetworkInterfaceCall) RequestId(requestId string) *InstancesUpdateNetworkInterfaceCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersTestIamPermissionsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersTestIamPermissionsCall { +func (c *InstancesUpdateNetworkInterfaceCall) Fields(s ...googleapi.Field) *InstancesUpdateNetworkInterfaceCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -60280,37 +76561,37 @@ func (c *InstanceGroupManagersTestIamPermissionsCall) Fields(s ...googleapi.Fiel // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersTestIamPermissionsCall) Context(ctx context.Context) *InstanceGroupManagersTestIamPermissionsCall { +func (c *InstancesUpdateNetworkInterfaceCall) Context(ctx context.Context) *InstancesUpdateNetworkInterfaceCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersTestIamPermissionsCall) Header() http.Header { +func (c *InstancesUpdateNetworkInterfaceCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesUpdateNetworkInterfaceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkinterface) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/updateNetworkInterface") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } @@ -60318,19 +76599,19 @@ func (c *InstanceGroupManagersTestIamPermissionsCall) doRequest(alt string) (*ht googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, - "resource": c.resource, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.instances.updateNetworkInterface" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesUpdateNetworkInterfaceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -60349,7 +76630,7 @@ func (c *InstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi.CallO if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -60361,15 +76642,29 @@ func (c *InstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi.CallO } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.testIamPermissions", + // "description": "Updates an instance's network interface. This method follows PATCH semantics.", + // "httpMethod": "PATCH", + // "id": "compute.instances.updateNetworkInterface", // "parameterOrder": [ // "project", // "zone", - // "resource" + // "instance", + // "networkInterface" // ], // "parameters": { + // "instance": { + // "description": "The instance name for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "networkInterface": { + // "description": "The name of the network interface to update.", + // "location": "query", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -60377,11 +76672,9 @@ func (c *InstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi.CallO // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // }, // "zone": { @@ -60392,46 +76685,44 @@ func (c *InstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi.CallO // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{resource}/testIamPermissions", + // "path": "{project}/zones/{zone}/instances/{instance}/updateNetworkInterface", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "NetworkInterface" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instanceGroupManagers.update": +// method id "compute.instances.updateShieldedInstanceConfig": -type InstanceGroupManagersUpdateCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanager *InstanceGroupManager - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesUpdateShieldedInstanceConfigCall struct { + s *Service + project string + zone string + instance string + shieldedinstanceconfig *ShieldedInstanceConfig + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Update: Updates a managed instance group using the information that -// you specify in the request. This operation is marked as DONE when the -// group is updated even if the instances in the group have not yet been -// updated. You must separately verify the status of the individual -// instances with the listManagedInstances method. -func (r *InstanceGroupManagersService) Update(project string, zone string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *InstanceGroupManagersUpdateCall { - c := &InstanceGroupManagersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// UpdateShieldedInstanceConfig: Updates the Shielded Instance config +// for an instance. You can only use this method on a stopped instance. +// This method supports PATCH semantics and uses the JSON merge patch +// format and processing rules. +func (r *InstancesService) UpdateShieldedInstanceConfig(project string, zone string, instance string, shieldedinstanceconfig *ShieldedInstanceConfig) *InstancesUpdateShieldedInstanceConfigCall { + c := &InstancesUpdateShieldedInstanceConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanager = instancegroupmanager + c.instance = instance + c.shieldedinstanceconfig = shieldedinstanceconfig return c } @@ -60449,7 +76740,7 @@ func (r *InstanceGroupManagersService) Update(project string, zone string, insta // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersUpdateCall) RequestId(requestId string) *InstanceGroupManagersUpdateCall { +func (c *InstancesUpdateShieldedInstanceConfigCall) RequestId(requestId string) *InstancesUpdateShieldedInstanceConfigCall { c.urlParams_.Set("requestId", requestId) return c } @@ -60457,7 +76748,7 @@ func (c *InstanceGroupManagersUpdateCall) RequestId(requestId string) *InstanceG // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersUpdateCall) Fields(s ...googleapi.Field) *InstanceGroupManagersUpdateCall { +func (c *InstancesUpdateShieldedInstanceConfigCall) Fields(s ...googleapi.Field) *InstancesUpdateShieldedInstanceConfigCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -60465,57 +76756,57 @@ func (c *InstanceGroupManagersUpdateCall) Fields(s ...googleapi.Field) *Instance // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersUpdateCall) Context(ctx context.Context) *InstanceGroupManagersUpdateCall { +func (c *InstancesUpdateShieldedInstanceConfigCall) Context(ctx context.Context) *InstancesUpdateShieldedInstanceConfigCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersUpdateCall) Header() http.Header { +func (c *InstancesUpdateShieldedInstanceConfigCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesUpdateShieldedInstanceConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.shieldedinstanceconfig) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/updateShieldedInstanceConfig") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.update" call. +// Do executes the "compute.instances.updateShieldedInstanceConfig" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesUpdateShieldedInstanceConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -60546,18 +76837,19 @@ func (c *InstanceGroupManagersUpdateCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is updated even if the instances in the group have not yet been updated. You must separately verify the status of the individual instances with the listManagedInstances method.", - // "httpMethod": "PUT", - // "id": "compute.instanceGroupManagers.update", + // "description": "Updates the Shielded Instance config for an instance. You can only use this method on a stopped instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.instances.updateShieldedInstanceConfig", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "instance" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the instance group manager.", + // "instance": { + // "description": "Name or id of the instance scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -60574,15 +76866,16 @@ func (c *InstanceGroupManagersUpdateCall) Do(opts ...googleapi.CallOption) (*Ope // "type": "string" // }, // "zone": { - // "description": "The name of the zone where you want to create the managed instance group.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", + // "path": "{project}/zones/{zone}/instances/{instance}/updateShieldedInstanceConfig", // "request": { - // "$ref": "InstanceGroupManager" + // "$ref": "ShieldedInstanceConfig" // }, // "response": { // "$ref": "Operation" @@ -60595,28 +76888,29 @@ func (c *InstanceGroupManagersUpdateCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.instanceGroups.addInstances": +// method id "compute.instances.updateShieldedVmConfig": -type InstanceGroupsAddInstancesCall struct { - s *Service - project string - zone string - instanceGroup string - instancegroupsaddinstancesrequest *InstanceGroupsAddInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesUpdateShieldedVmConfigCall struct { + s *Service + project string + zone string + instance string + shieldedvmconfig *ShieldedVmConfig + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AddInstances: Adds a list of instances to the specified instance -// group. All of the instances in the instance group must be in the same -// network/subnetwork. Read Adding instances for more information. -func (r *InstanceGroupsService) AddInstances(project string, zone string, instanceGroup string, instancegroupsaddinstancesrequest *InstanceGroupsAddInstancesRequest) *InstanceGroupsAddInstancesCall { - c := &InstanceGroupsAddInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// UpdateShieldedVmConfig: Updates the Shielded VM config for a VM +// instance. You can only use this method on a stopped VM instance. This +// method supports PATCH semantics and uses the JSON merge patch format +// and processing rules. +func (r *InstancesService) UpdateShieldedVmConfig(project string, zone string, instance string, shieldedvmconfig *ShieldedVmConfig) *InstancesUpdateShieldedVmConfigCall { + c := &InstancesUpdateShieldedVmConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroup = instanceGroup - c.instancegroupsaddinstancesrequest = instancegroupsaddinstancesrequest + c.instance = instance + c.shieldedvmconfig = shieldedvmconfig return c } @@ -60634,7 +76928,7 @@ func (r *InstanceGroupsService) AddInstances(project string, zone string, instan // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupsAddInstancesCall) RequestId(requestId string) *InstanceGroupsAddInstancesCall { +func (c *InstancesUpdateShieldedVmConfigCall) RequestId(requestId string) *InstancesUpdateShieldedVmConfigCall { c.urlParams_.Set("requestId", requestId) return c } @@ -60642,7 +76936,7 @@ func (c *InstanceGroupsAddInstancesCall) RequestId(requestId string) *InstanceGr // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsAddInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsAddInstancesCall { +func (c *InstancesUpdateShieldedVmConfigCall) Fields(s ...googleapi.Field) *InstancesUpdateShieldedVmConfigCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -60650,57 +76944,57 @@ func (c *InstanceGroupsAddInstancesCall) Fields(s ...googleapi.Field) *InstanceG // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsAddInstancesCall) Context(ctx context.Context) *InstanceGroupsAddInstancesCall { +func (c *InstancesUpdateShieldedVmConfigCall) Context(ctx context.Context) *InstancesUpdateShieldedVmConfigCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsAddInstancesCall) Header() http.Header { +func (c *InstancesUpdateShieldedVmConfigCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsAddInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesUpdateShieldedVmConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupsaddinstancesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.shieldedvmconfig) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/addInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/updateShieldedVmConfig") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroup": c.instanceGroup, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.addInstances" call. +// Do executes the "compute.instances.updateShieldedVmConfig" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupsAddInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesUpdateShieldedVmConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -60731,18 +77025,19 @@ func (c *InstanceGroupsAddInstancesCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Adds a list of instances to the specified instance group. All of the instances in the instance group must be in the same network/subnetwork. Read Adding instances for more information.", - // "httpMethod": "POST", - // "id": "compute.instanceGroups.addInstances", + // "description": "Updates the Shielded VM config for a VM instance. You can only use this method on a stopped VM instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.instances.updateShieldedVmConfig", // "parameterOrder": [ // "project", // "zone", - // "instanceGroup" + // "instance" // ], // "parameters": { - // "instanceGroup": { - // "description": "The name of the instance group where you are adding instances.", + // "instance": { + // "description": "Name of the instance scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -60759,15 +77054,16 @@ func (c *InstanceGroupsAddInstancesCall) Do(opts ...googleapi.CallOption) (*Oper // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/addInstances", + // "path": "{project}/zones/{zone}/instances/{instance}/updateShieldedVmConfig", // "request": { - // "$ref": "InstanceGroupsAddInstancesRequest" + // "$ref": "ShieldedVmConfig" // }, // "response": { // "$ref": "Operation" @@ -60780,9 +77076,9 @@ func (c *InstanceGroupsAddInstancesCall) Do(opts ...googleapi.CallOption) (*Oper } -// method id "compute.instanceGroups.aggregatedList": +// method id "compute.interconnectAttachments.aggregatedList": -type InstanceGroupsAggregatedListCall struct { +type InterconnectAttachmentsAggregatedListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -60791,10 +77087,10 @@ type InstanceGroupsAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves the list of instance groups and sorts them -// by zone. -func (r *InstanceGroupsService) AggregatedList(project string) *InstanceGroupsAggregatedListCall { - c := &InstanceGroupsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of interconnect +// attachments. +func (r *InterconnectAttachmentsService) AggregatedList(project string) *InterconnectAttachmentsAggregatedListCall { + c := &InterconnectAttachmentsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -60821,7 +77117,7 @@ func (r *InstanceGroupsService) AggregatedList(project string) *InstanceGroupsAg // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *InstanceGroupsAggregatedListCall) Filter(filter string) *InstanceGroupsAggregatedListCall { +func (c *InterconnectAttachmentsAggregatedListCall) Filter(filter string) *InterconnectAttachmentsAggregatedListCall { c.urlParams_.Set("filter", filter) return c } @@ -60832,7 +77128,7 @@ func (c *InstanceGroupsAggregatedListCall) Filter(filter string) *InstanceGroups // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *InstanceGroupsAggregatedListCall) MaxResults(maxResults int64) *InstanceGroupsAggregatedListCall { +func (c *InterconnectAttachmentsAggregatedListCall) MaxResults(maxResults int64) *InterconnectAttachmentsAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -60849,7 +77145,7 @@ func (c *InstanceGroupsAggregatedListCall) MaxResults(maxResults int64) *Instanc // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *InstanceGroupsAggregatedListCall) OrderBy(orderBy string) *InstanceGroupsAggregatedListCall { +func (c *InterconnectAttachmentsAggregatedListCall) OrderBy(orderBy string) *InterconnectAttachmentsAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -60857,7 +77153,7 @@ func (c *InstanceGroupsAggregatedListCall) OrderBy(orderBy string) *InstanceGrou // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *InstanceGroupsAggregatedListCall) PageToken(pageToken string) *InstanceGroupsAggregatedListCall { +func (c *InterconnectAttachmentsAggregatedListCall) PageToken(pageToken string) *InterconnectAttachmentsAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -60865,7 +77161,7 @@ func (c *InstanceGroupsAggregatedListCall) PageToken(pageToken string) *Instance // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsAggregatedListCall) Fields(s ...googleapi.Field) *InstanceGroupsAggregatedListCall { +func (c *InterconnectAttachmentsAggregatedListCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -60875,7 +77171,7 @@ func (c *InstanceGroupsAggregatedListCall) Fields(s ...googleapi.Field) *Instanc // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InstanceGroupsAggregatedListCall) IfNoneMatch(entityTag string) *InstanceGroupsAggregatedListCall { +func (c *InterconnectAttachmentsAggregatedListCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -60883,21 +77179,21 @@ func (c *InstanceGroupsAggregatedListCall) IfNoneMatch(entityTag string) *Instan // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsAggregatedListCall) Context(ctx context.Context) *InstanceGroupsAggregatedListCall { +func (c *InterconnectAttachmentsAggregatedListCall) Context(ctx context.Context) *InterconnectAttachmentsAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsAggregatedListCall) Header() http.Header { +func (c *InterconnectAttachmentsAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectAttachmentsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -60909,7 +77205,7 @@ func (c *InstanceGroupsAggregatedListCall) doRequest(alt string) (*http.Response var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instanceGroups") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/interconnectAttachments") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -60922,14 +77218,15 @@ func (c *InstanceGroupsAggregatedListCall) doRequest(alt string) (*http.Response return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.aggregatedList" call. -// Exactly one of *InstanceGroupAggregatedList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *InstanceGroupAggregatedList.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupAggregatedList, error) { +// Do executes the "compute.interconnectAttachments.aggregatedList" call. +// Exactly one of *InterconnectAttachmentAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *InterconnectAttachmentAggregatedList.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachmentAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -60948,7 +77245,7 @@ func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*In if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroupAggregatedList{ + ret := &InterconnectAttachmentAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -60960,9 +77257,9 @@ func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*In } return ret, nil // { - // "description": "Retrieves the list of instance groups and sorts them by zone.", + // "description": "Retrieves an aggregated list of interconnect attachments.", // "httpMethod": "GET", - // "id": "compute.instanceGroups.aggregatedList", + // "id": "compute.interconnectAttachments.aggregatedList", // "parameterOrder": [ // "project" // ], @@ -60998,9 +77295,9 @@ func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*In // "type": "string" // } // }, - // "path": "{project}/aggregated/instanceGroups", + // "path": "{project}/aggregated/interconnectAttachments", // "response": { - // "$ref": "InstanceGroupAggregatedList" + // "$ref": "InterconnectAttachmentAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -61014,7 +77311,7 @@ func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*In // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *InstanceGroupsAggregatedListCall) Pages(ctx context.Context, f func(*InstanceGroupAggregatedList) error) error { +func (c *InterconnectAttachmentsAggregatedListCall) Pages(ctx context.Context, f func(*InterconnectAttachmentAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -61032,27 +77329,24 @@ func (c *InstanceGroupsAggregatedListCall) Pages(ctx context.Context, f func(*In } } -// method id "compute.instanceGroups.delete": +// method id "compute.interconnectAttachments.delete": -type InstanceGroupsDeleteCall struct { - s *Service - project string - zone string - instanceGroup string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InterconnectAttachmentsDeleteCall struct { + s *Service + project string + region string + interconnectAttachment string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified instance group. The instances in the -// group are not deleted. Note that instance group must not belong to a -// backend service. Read Deleting an instance group for more -// information. -func (r *InstanceGroupsService) Delete(project string, zone string, instanceGroup string) *InstanceGroupsDeleteCall { - c := &InstanceGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified interconnect attachment. +func (r *InterconnectAttachmentsService) Delete(project string, region string, interconnectAttachment string) *InterconnectAttachmentsDeleteCall { + c := &InterconnectAttachmentsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instanceGroup = instanceGroup + c.region = region + c.interconnectAttachment = interconnectAttachment return c } @@ -61070,7 +77364,7 @@ func (r *InstanceGroupsService) Delete(project string, zone string, instanceGrou // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupsDeleteCall) RequestId(requestId string) *InstanceGroupsDeleteCall { +func (c *InterconnectAttachmentsDeleteCall) RequestId(requestId string) *InterconnectAttachmentsDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -61078,7 +77372,7 @@ func (c *InstanceGroupsDeleteCall) RequestId(requestId string) *InstanceGroupsDe // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsDeleteCall) Fields(s ...googleapi.Field) *InstanceGroupsDeleteCall { +func (c *InterconnectAttachmentsDeleteCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -61086,21 +77380,21 @@ func (c *InstanceGroupsDeleteCall) Fields(s ...googleapi.Field) *InstanceGroupsD // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsDeleteCall) Context(ctx context.Context) *InstanceGroupsDeleteCall { +func (c *InterconnectAttachmentsDeleteCall) Context(ctx context.Context) *InterconnectAttachmentsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsDeleteCall) Header() http.Header { +func (c *InterconnectAttachmentsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectAttachmentsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -61109,29 +77403,29 @@ func (c *InstanceGroupsDeleteCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroup": c.instanceGroup, + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "interconnectAttachment": c.interconnectAttachment, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.delete" call. +// Do executes the "compute.interconnectAttachments.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InterconnectAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -61162,18 +77456,19 @@ func (c *InstanceGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Deletes the specified instance group. The instances in the group are not deleted. Note that instance group must not belong to a backend service. Read Deleting an instance group for more information.", + // "description": "Deletes the specified interconnect attachment.", // "httpMethod": "DELETE", - // "id": "compute.instanceGroups.delete", + // "id": "compute.interconnectAttachments.delete", // "parameterOrder": [ // "project", - // "zone", - // "instanceGroup" + // "region", + // "interconnectAttachment" // ], // "parameters": { - // "instanceGroup": { - // "description": "The name of the instance group to delete.", + // "interconnectAttachment": { + // "description": "Name of the interconnect attachment to delete.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -61184,19 +77479,20 @@ func (c *InstanceGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, // "required": true, // "type": "string" // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the instance group is located.", - // "location": "path", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}", + // "path": "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", // "response": { // "$ref": "Operation" // }, @@ -61208,33 +77504,32 @@ func (c *InstanceGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.instanceGroups.get": +// method id "compute.interconnectAttachments.get": -type InstanceGroupsGetCall struct { - s *Service - project string - zone string - instanceGroup string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InterconnectAttachmentsGetCall struct { + s *Service + project string + region string + interconnectAttachment string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified instance group. Gets a list of available -// instance groups by making a list() request. -func (r *InstanceGroupsService) Get(project string, zone string, instanceGroup string) *InstanceGroupsGetCall { - c := &InstanceGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified interconnect attachment. +func (r *InterconnectAttachmentsService) Get(project string, region string, interconnectAttachment string) *InterconnectAttachmentsGetCall { + c := &InterconnectAttachmentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instanceGroup = instanceGroup + c.region = region + c.interconnectAttachment = interconnectAttachment return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsGetCall) Fields(s ...googleapi.Field) *InstanceGroupsGetCall { +func (c *InterconnectAttachmentsGetCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -61244,7 +77539,7 @@ func (c *InstanceGroupsGetCall) Fields(s ...googleapi.Field) *InstanceGroupsGetC // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InstanceGroupsGetCall) IfNoneMatch(entityTag string) *InstanceGroupsGetCall { +func (c *InterconnectAttachmentsGetCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -61252,21 +77547,21 @@ func (c *InstanceGroupsGetCall) IfNoneMatch(entityTag string) *InstanceGroupsGet // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsGetCall) Context(ctx context.Context) *InstanceGroupsGetCall { +func (c *InterconnectAttachmentsGetCall) Context(ctx context.Context) *InterconnectAttachmentsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsGetCall) Header() http.Header { +func (c *InterconnectAttachmentsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectAttachmentsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -61278,7 +77573,7 @@ func (c *InstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -61286,21 +77581,21 @@ func (c *InstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroup": c.instanceGroup, + "project": c.project, + "region": c.region, + "interconnectAttachment": c.interconnectAttachment, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.get" call. -// Exactly one of *InstanceGroup or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *InstanceGroup.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.interconnectAttachments.get" call. +// Exactly one of *InterconnectAttachment or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InterconnectAttachment.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *InstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup, error) { +func (c *InterconnectAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachment, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -61319,7 +77614,7 @@ func (c *InstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroup{ + ret := &InterconnectAttachment{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -61331,18 +77626,19 @@ func (c *InstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup } return ret, nil // { - // "description": "Returns the specified instance group. Gets a list of available instance groups by making a list() request.", + // "description": "Returns the specified interconnect attachment.", // "httpMethod": "GET", - // "id": "compute.instanceGroups.get", + // "id": "compute.interconnectAttachments.get", // "parameterOrder": [ // "project", - // "zone", - // "instanceGroup" + // "region", + // "interconnectAttachment" // ], // "parameters": { - // "instanceGroup": { - // "description": "The name of the instance group.", + // "interconnectAttachment": { + // "description": "Name of the interconnect attachment to return.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -61353,16 +77649,17 @@ func (c *InstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone where the instance group is located.", + // "region": { + // "description": "Name of the region for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}", + // "path": "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", // "response": { - // "$ref": "InstanceGroup" + // "$ref": "InterconnectAttachment" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -61373,25 +77670,25 @@ func (c *InstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup } -// method id "compute.instanceGroups.insert": +// method id "compute.interconnectAttachments.insert": -type InstanceGroupsInsertCall struct { - s *Service - project string - zone string - instancegroup *InstanceGroup - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InterconnectAttachmentsInsertCall struct { + s *Service + project string + region string + interconnectattachment *InterconnectAttachment + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates an instance group in the specified project using the -// parameters that are included in the request. -func (r *InstanceGroupsService) Insert(project string, zone string, instancegroup *InstanceGroup) *InstanceGroupsInsertCall { - c := &InstanceGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates an InterconnectAttachment in the specified project +// using the data included in the request. +func (r *InterconnectAttachmentsService) Insert(project string, region string, interconnectattachment *InterconnectAttachment) *InterconnectAttachmentsInsertCall { + c := &InterconnectAttachmentsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instancegroup = instancegroup + c.region = region + c.interconnectattachment = interconnectattachment return c } @@ -61409,7 +77706,7 @@ func (r *InstanceGroupsService) Insert(project string, zone string, instancegrou // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupsInsertCall) RequestId(requestId string) *InstanceGroupsInsertCall { +func (c *InterconnectAttachmentsInsertCall) RequestId(requestId string) *InterconnectAttachmentsInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -61417,7 +77714,7 @@ func (c *InstanceGroupsInsertCall) RequestId(requestId string) *InstanceGroupsIn // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsInsertCall) Fields(s ...googleapi.Field) *InstanceGroupsInsertCall { +func (c *InterconnectAttachmentsInsertCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -61425,35 +77722,35 @@ func (c *InstanceGroupsInsertCall) Fields(s ...googleapi.Field) *InstanceGroupsI // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsInsertCall) Context(ctx context.Context) *InstanceGroupsInsertCall { +func (c *InterconnectAttachmentsInsertCall) Context(ctx context.Context) *InterconnectAttachmentsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsInsertCall) Header() http.Header { +func (c *InterconnectAttachmentsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectAttachmentsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroup) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnectattachment) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -61462,19 +77759,19 @@ func (c *InstanceGroupsInsertCall) doRequest(alt string) (*http.Response, error) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.insert" call. +// Do executes the "compute.interconnectAttachments.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InterconnectAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -61505,12 +77802,12 @@ func (c *InstanceGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Creates an instance group in the specified project using the parameters that are included in the request.", + // "description": "Creates an InterconnectAttachment in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.instanceGroups.insert", + // "id": "compute.interconnectAttachments.insert", // "parameterOrder": [ // "project", - // "zone" + // "region" // ], // "parameters": { // "project": { @@ -61520,21 +77817,22 @@ func (c *InstanceGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, // "required": true, // "type": "string" // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where you want to create the instance group.", - // "location": "path", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroups", + // "path": "{project}/regions/{region}/interconnectAttachments", // "request": { - // "$ref": "InstanceGroup" + // "$ref": "InterconnectAttachment" // }, // "response": { // "$ref": "Operation" @@ -61547,24 +77845,24 @@ func (c *InstanceGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.instanceGroups.list": +// method id "compute.interconnectAttachments.list": -type InstanceGroupsListCall struct { +type InterconnectAttachmentsListCall struct { s *Service project string - zone string + region string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves the list of instance groups that are located in the -// specified project and zone. -func (r *InstanceGroupsService) List(project string, zone string) *InstanceGroupsListCall { - c := &InstanceGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of interconnect attachments contained within +// the specified region. +func (r *InterconnectAttachmentsService) List(project string, region string) *InterconnectAttachmentsListCall { + c := &InterconnectAttachmentsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone + c.region = region return c } @@ -61590,7 +77888,7 @@ func (r *InstanceGroupsService) List(project string, zone string) *InstanceGroup // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *InstanceGroupsListCall) Filter(filter string) *InstanceGroupsListCall { +func (c *InterconnectAttachmentsListCall) Filter(filter string) *InterconnectAttachmentsListCall { c.urlParams_.Set("filter", filter) return c } @@ -61601,7 +77899,7 @@ func (c *InstanceGroupsListCall) Filter(filter string) *InstanceGroupsListCall { // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *InstanceGroupsListCall) MaxResults(maxResults int64) *InstanceGroupsListCall { +func (c *InterconnectAttachmentsListCall) MaxResults(maxResults int64) *InterconnectAttachmentsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -61618,7 +77916,7 @@ func (c *InstanceGroupsListCall) MaxResults(maxResults int64) *InstanceGroupsLis // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *InstanceGroupsListCall) OrderBy(orderBy string) *InstanceGroupsListCall { +func (c *InterconnectAttachmentsListCall) OrderBy(orderBy string) *InterconnectAttachmentsListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -61626,7 +77924,7 @@ func (c *InstanceGroupsListCall) OrderBy(orderBy string) *InstanceGroupsListCall // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *InstanceGroupsListCall) PageToken(pageToken string) *InstanceGroupsListCall { +func (c *InterconnectAttachmentsListCall) PageToken(pageToken string) *InterconnectAttachmentsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -61634,7 +77932,7 @@ func (c *InstanceGroupsListCall) PageToken(pageToken string) *InstanceGroupsList // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsListCall) Fields(s ...googleapi.Field) *InstanceGroupsListCall { +func (c *InterconnectAttachmentsListCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -61644,7 +77942,7 @@ func (c *InstanceGroupsListCall) Fields(s ...googleapi.Field) *InstanceGroupsLis // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InstanceGroupsListCall) IfNoneMatch(entityTag string) *InstanceGroupsListCall { +func (c *InterconnectAttachmentsListCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsListCall { c.ifNoneMatch_ = entityTag return c } @@ -61652,21 +77950,21 @@ func (c *InstanceGroupsListCall) IfNoneMatch(entityTag string) *InstanceGroupsLi // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsListCall) Context(ctx context.Context) *InstanceGroupsListCall { +func (c *InterconnectAttachmentsListCall) Context(ctx context.Context) *InterconnectAttachmentsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsListCall) Header() http.Header { +func (c *InterconnectAttachmentsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectAttachmentsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -61678,7 +77976,7 @@ func (c *InstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -61687,19 +77985,19 @@ func (c *InstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.list" call. -// Exactly one of *InstanceGroupList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InstanceGroupList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.interconnectAttachments.list" call. +// Exactly one of *InterconnectAttachmentList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *InterconnectAttachmentList.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupList, error) { +func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachmentList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -61718,7 +78016,7 @@ func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGrou if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroupList{ + ret := &InterconnectAttachmentList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -61730,12 +78028,12 @@ func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGrou } return ret, nil // { - // "description": "Retrieves the list of instance groups that are located in the specified project and zone.", + // "description": "Retrieves the list of interconnect attachments contained within the specified region.", // "httpMethod": "GET", - // "id": "compute.instanceGroups.list", + // "id": "compute.interconnectAttachments.list", // "parameterOrder": [ // "project", - // "zone" + // "region" // ], // "parameters": { // "filter": { @@ -61768,16 +78066,17 @@ func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGrou // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone where the instance group is located.", + // "region": { + // "description": "Name of the region for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroups", + // "path": "{project}/regions/{region}/interconnectAttachments", // "response": { - // "$ref": "InstanceGroupList" + // "$ref": "InterconnectAttachmentList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -61791,7 +78090,7 @@ func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGrou // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *InstanceGroupsListCall) Pages(ctx context.Context, f func(*InstanceGroupList) error) error { +func (c *InterconnectAttachmentsListCall) Pages(ctx context.Context, f func(*InterconnectAttachmentList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -61809,96 +78108,54 @@ func (c *InstanceGroupsListCall) Pages(ctx context.Context, f func(*InstanceGrou } } -// method id "compute.instanceGroups.listInstances": +// method id "compute.interconnectAttachments.patch": -type InstanceGroupsListInstancesCall struct { - s *Service - project string - zone string - instanceGroup string - instancegroupslistinstancesrequest *InstanceGroupsListInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InterconnectAttachmentsPatchCall struct { + s *Service + project string + region string + interconnectAttachment string + interconnectattachment *InterconnectAttachment + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// ListInstances: Lists the instances in the specified instance group. -func (r *InstanceGroupsService) ListInstances(project string, zone string, instanceGroup string, instancegroupslistinstancesrequest *InstanceGroupsListInstancesRequest) *InstanceGroupsListInstancesCall { - c := &InstanceGroupsListInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates the specified interconnect attachment with the data +// included in the request. This method supports PATCH semantics and +// uses the JSON merge patch format and processing rules. +func (r *InterconnectAttachmentsService) Patch(project string, region string, interconnectAttachment string, interconnectattachment *InterconnectAttachment) *InterconnectAttachmentsPatchCall { + c := &InterconnectAttachmentsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instanceGroup = instanceGroup - c.instancegroupslistinstancesrequest = instancegroupslistinstancesrequest - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *InstanceGroupsListInstancesCall) Filter(filter string) *InstanceGroupsListInstancesCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InstanceGroupsListInstancesCall) MaxResults(maxResults int64) *InstanceGroupsListInstancesCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + c.region = region + c.interconnectAttachment = interconnectAttachment + c.interconnectattachment = interconnectattachment return c } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *InstanceGroupsListInstancesCall) OrderBy(orderBy string) *InstanceGroupsListInstancesCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InstanceGroupsListInstancesCall) PageToken(pageToken string) *InstanceGroupsListInstancesCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InterconnectAttachmentsPatchCall) RequestId(requestId string) *InterconnectAttachmentsPatchCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsListInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsListInstancesCall { +func (c *InterconnectAttachmentsPatchCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -61906,57 +78163,57 @@ func (c *InstanceGroupsListInstancesCall) Fields(s ...googleapi.Field) *Instance // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsListInstancesCall) Context(ctx context.Context) *InstanceGroupsListInstancesCall { +func (c *InterconnectAttachmentsPatchCall) Context(ctx context.Context) *InterconnectAttachmentsPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsListInstancesCall) Header() http.Header { +func (c *InterconnectAttachmentsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectAttachmentsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupslistinstancesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnectattachment) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/listInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroup": c.instanceGroup, + "project": c.project, + "region": c.region, + "interconnectAttachment": c.interconnectAttachment, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.listInstances" call. -// Exactly one of *InstanceGroupsListInstances or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *InstanceGroupsListInstances.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*InstanceGroupsListInstances, error) { +// Do executes the "compute.interconnectAttachments.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InterconnectAttachmentsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -61975,7 +78232,7 @@ func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*Ins if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroupsListInstances{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -61987,44 +78244,22 @@ func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*Ins } return ret, nil // { - // "description": "Lists the instances in the specified instance group.", - // "httpMethod": "POST", - // "id": "compute.instanceGroups.listInstances", + // "description": "Updates the specified interconnect attachment with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.interconnectAttachments.patch", // "parameterOrder": [ // "project", - // "zone", - // "instanceGroup" + // "region", + // "interconnectAttachment" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "instanceGroup": { - // "description": "The name of the instance group from which you want to generate a list of included instances.", + // "interconnectAttachment": { + // "description": "Name of the interconnect attachment to patch.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -62032,75 +78267,55 @@ func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*Ins // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone where the instance group is located.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/listInstances", + // "path": "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", // "request": { - // "$ref": "InstanceGroupsListInstancesRequest" + // "$ref": "InterconnectAttachment" // }, // "response": { - // "$ref": "InstanceGroupsListInstances" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstanceGroupsListInstancesCall) Pages(ctx context.Context, f func(*InstanceGroupsListInstances) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instanceGroups.removeInstances": - -type InstanceGroupsRemoveInstancesCall struct { - s *Service - project string - zone string - instanceGroup string - instancegroupsremoveinstancesrequest *InstanceGroupsRemoveInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} +// method id "compute.interconnectAttachments.setLabels": -// RemoveInstances: Removes one or more instances from the specified -// instance group, but does not delete those instances. -// -// If the group is part of a backend service that has enabled connection -// draining, it can take up to 60 seconds after the connection draining -// duration before the VM instance is removed or deleted. -func (r *InstanceGroupsService) RemoveInstances(project string, zone string, instanceGroup string, instancegroupsremoveinstancesrequest *InstanceGroupsRemoveInstancesRequest) *InstanceGroupsRemoveInstancesCall { - c := &InstanceGroupsRemoveInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +type InterconnectAttachmentsSetLabelsCall struct { + s *Service + project string + region string + resource string + regionsetlabelsrequest *RegionSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetLabels: Sets the labels on an InterconnectAttachment. To learn +// more about labels, read the Labeling Resources documentation. +func (r *InterconnectAttachmentsService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *InterconnectAttachmentsSetLabelsCall { + c := &InterconnectAttachmentsSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instanceGroup = instanceGroup - c.instancegroupsremoveinstancesrequest = instancegroupsremoveinstancesrequest + c.region = region + c.resource = resource + c.regionsetlabelsrequest = regionsetlabelsrequest return c } @@ -62118,7 +78333,7 @@ func (r *InstanceGroupsService) RemoveInstances(project string, zone string, ins // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupsRemoveInstancesCall) RequestId(requestId string) *InstanceGroupsRemoveInstancesCall { +func (c *InterconnectAttachmentsSetLabelsCall) RequestId(requestId string) *InterconnectAttachmentsSetLabelsCall { c.urlParams_.Set("requestId", requestId) return c } @@ -62126,7 +78341,7 @@ func (c *InstanceGroupsRemoveInstancesCall) RequestId(requestId string) *Instanc // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsRemoveInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsRemoveInstancesCall { +func (c *InterconnectAttachmentsSetLabelsCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsSetLabelsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -62134,35 +78349,35 @@ func (c *InstanceGroupsRemoveInstancesCall) Fields(s ...googleapi.Field) *Instan // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsRemoveInstancesCall) Context(ctx context.Context) *InstanceGroupsRemoveInstancesCall { +func (c *InterconnectAttachmentsSetLabelsCall) Context(ctx context.Context) *InterconnectAttachmentsSetLabelsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsRemoveInstancesCall) Header() http.Header { +func (c *InterconnectAttachmentsSetLabelsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsRemoveInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectAttachmentsSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupsremoveinstancesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/removeInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -62170,21 +78385,21 @@ func (c *InstanceGroupsRemoveInstancesCall) doRequest(alt string) (*http.Respons } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroup": c.instanceGroup, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.removeInstances" call. +// Do executes the "compute.interconnectAttachments.setLabels" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupsRemoveInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InterconnectAttachmentsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -62215,25 +78430,26 @@ func (c *InstanceGroupsRemoveInstancesCall) Do(opts ...googleapi.CallOption) (*O } return ret, nil // { - // "description": "Removes one or more instances from the specified instance group, but does not delete those instances.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration before the VM instance is removed or deleted.", + // "description": "Sets the labels on an InterconnectAttachment. To learn more about labels, read the Labeling Resources documentation.", // "httpMethod": "POST", - // "id": "compute.instanceGroups.removeInstances", + // "id": "compute.interconnectAttachments.setLabels", // "parameterOrder": [ // "project", - // "zone", - // "instanceGroup" + // "region", + // "resource" // ], // "parameters": { - // "instanceGroup": { - // "description": "The name of the instance group where the specified instances will be removed.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "region": { + // "description": "The region for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -62242,16 +78458,17 @@ func (c *InstanceGroupsRemoveInstancesCall) Do(opts ...googleapi.CallOption) (*O // "location": "query", // "type": "string" // }, - // "zone": { - // "description": "The name of the zone where the instance group is located.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/removeInstances", + // "path": "{project}/regions/{region}/interconnectAttachments/{resource}/setLabels", // "request": { - // "$ref": "InstanceGroupsRemoveInstancesRequest" + // "$ref": "RegionSetLabelsRequest" // }, // "response": { // "$ref": "Operation" @@ -62264,52 +78481,34 @@ func (c *InstanceGroupsRemoveInstancesCall) Do(opts ...googleapi.CallOption) (*O } -// method id "compute.instanceGroups.setNamedPorts": +// method id "compute.interconnectAttachments.testIamPermissions": -type InstanceGroupsSetNamedPortsCall struct { - s *Service - project string - zone string - instanceGroup string - instancegroupssetnamedportsrequest *InstanceGroupsSetNamedPortsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InterconnectAttachmentsTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetNamedPorts: Sets the named ports for the specified instance group. -func (r *InstanceGroupsService) SetNamedPorts(project string, zone string, instanceGroup string, instancegroupssetnamedportsrequest *InstanceGroupsSetNamedPortsRequest) *InstanceGroupsSetNamedPortsCall { - c := &InstanceGroupsSetNamedPortsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *InterconnectAttachmentsService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *InterconnectAttachmentsTestIamPermissionsCall { + c := &InterconnectAttachmentsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instanceGroup = instanceGroup - c.instancegroupssetnamedportsrequest = instancegroupssetnamedportsrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupsSetNamedPortsCall) RequestId(requestId string) *InstanceGroupsSetNamedPortsCall { - c.urlParams_.Set("requestId", requestId) + c.region = region + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsSetNamedPortsCall) Fields(s ...googleapi.Field) *InstanceGroupsSetNamedPortsCall { +func (c *InterconnectAttachmentsTestIamPermissionsCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -62317,35 +78516,35 @@ func (c *InstanceGroupsSetNamedPortsCall) Fields(s ...googleapi.Field) *Instance // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsSetNamedPortsCall) Context(ctx context.Context) *InstanceGroupsSetNamedPortsCall { +func (c *InterconnectAttachmentsTestIamPermissionsCall) Context(ctx context.Context) *InterconnectAttachmentsTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsSetNamedPortsCall) Header() http.Header { +func (c *InterconnectAttachmentsTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectAttachmentsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupssetnamedportsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/setNamedPorts") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -62353,21 +78552,21 @@ func (c *InstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroup": c.instanceGroup, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.setNamedPorts" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.interconnectAttachments.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InterconnectAttachmentsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -62386,7 +78585,7 @@ func (c *InstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Ope if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -62398,21 +78597,15 @@ func (c *InstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Sets the named ports for the specified instance group.", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.instanceGroups.setNamedPorts", + // "id": "compute.interconnectAttachments.testIamPermissions", // "parameterOrder": [ // "project", - // "zone", - // "instanceGroup" + // "region", + // "resource" // ], // "parameters": { - // "instanceGroup": { - // "description": "The name of the instance group where the named ports are updated.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -62420,119 +78613,128 @@ func (c *InstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Ope // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone where the instance group is located.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/setNamedPorts", + // "path": "{project}/regions/{region}/interconnectAttachments/{resource}/testIamPermissions", // "request": { - // "$ref": "InstanceGroupsSetNamedPortsRequest" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instanceGroups.testIamPermissions": +// method id "compute.interconnectLocations.get": -type InstanceGroupsTestIamPermissionsCall struct { - s *Service - project string - zone string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InterconnectLocationsGetCall struct { + s *Service + project string + interconnectLocation string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *InstanceGroupsService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *InstanceGroupsTestIamPermissionsCall { - c := &InstanceGroupsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the details for the specified interconnect location. +// Gets a list of available interconnect locations by making a list() +// request. +func (r *InterconnectLocationsService) Get(project string, interconnectLocation string) *InterconnectLocationsGetCall { + c := &InterconnectLocationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.interconnectLocation = interconnectLocation return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsTestIamPermissionsCall) Fields(s ...googleapi.Field) *InstanceGroupsTestIamPermissionsCall { +func (c *InterconnectLocationsGetCall) Fields(s ...googleapi.Field) *InterconnectLocationsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InterconnectLocationsGetCall) IfNoneMatch(entityTag string) *InterconnectLocationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsTestIamPermissionsCall) Context(ctx context.Context) *InstanceGroupsTestIamPermissionsCall { +func (c *InterconnectLocationsGetCall) Context(ctx context.Context) *InterconnectLocationsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsTestIamPermissionsCall) Header() http.Header { +func (c *InterconnectLocationsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectLocationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnectLocations/{interconnectLocation}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, + "project": c.project, + "interconnectLocation": c.interconnectLocation, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// Do executes the "compute.interconnectLocations.get" call. +// Exactly one of *InterconnectLocation or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// *InterconnectLocation.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *InstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *InterconnectLocationsGetCall) Do(opts ...googleapi.CallOption) (*InterconnectLocation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -62551,7 +78753,7 @@ func (c *InstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &InterconnectLocation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -62563,43 +78765,263 @@ func (c *InstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.instanceGroups.testIamPermissions", + // "description": "Returns the details for the specified interconnect location. Gets a list of available interconnect locations by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.interconnectLocations.get", // "parameterOrder": [ // "project", - // "zone", - // "resource" + // "interconnectLocation" // ], // "parameters": { + // "interconnectLocation": { + // "description": "Name of the interconnect location to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // } + // }, + // "path": "{project}/global/interconnectLocations/{interconnectLocation}", + // "response": { + // "$ref": "InterconnectLocation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.interconnectLocations.list": + +type InterconnectLocationsListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves the list of interconnect locations available to the +// specified project. +func (r *InterconnectLocationsService) List(project string) *InterconnectLocationsListCall { + c := &InterconnectLocationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *InterconnectLocationsListCall) Filter(filter string) *InterconnectLocationsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *InterconnectLocationsListCall) MaxResults(maxResults int64) *InterconnectLocationsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *InterconnectLocationsListCall) OrderBy(orderBy string) *InterconnectLocationsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InterconnectLocationsListCall) PageToken(pageToken string) *InterconnectLocationsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InterconnectLocationsListCall) Fields(s ...googleapi.Field) *InterconnectLocationsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InterconnectLocationsListCall) IfNoneMatch(entityTag string) *InterconnectLocationsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InterconnectLocationsListCall) Context(ctx context.Context) *InterconnectLocationsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InterconnectLocationsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InterconnectLocationsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnectLocations") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.interconnectLocations.list" call. +// Exactly one of *InterconnectLocationList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *InterconnectLocationList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*InterconnectLocationList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InterconnectLocationList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of interconnect locations available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.interconnectLocations.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroups/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/global/interconnectLocations", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "InterconnectLocationList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -62610,25 +79032,43 @@ func (c *InstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) } -// method id "compute.instanceTemplates.delete": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InterconnectLocationsListCall) Pages(ctx context.Context, f func(*InterconnectLocationList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InstanceTemplatesDeleteCall struct { - s *Service - project string - instanceTemplate string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.interconnects.delete": + +type InterconnectsDeleteCall struct { + s *Service + project string + interconnect string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified instance template. Deleting an instance -// template is permanent and cannot be undone. It's not possible to -// delete templates which are in use by an instance group. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/delete -func (r *InstanceTemplatesService) Delete(project string, instanceTemplate string) *InstanceTemplatesDeleteCall { - c := &InstanceTemplatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified interconnect. +func (r *InterconnectsService) Delete(project string, interconnect string) *InterconnectsDeleteCall { + c := &InterconnectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.instanceTemplate = instanceTemplate + c.interconnect = interconnect return c } @@ -62646,7 +79086,7 @@ func (r *InstanceTemplatesService) Delete(project string, instanceTemplate strin // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceTemplatesDeleteCall) RequestId(requestId string) *InstanceTemplatesDeleteCall { +func (c *InterconnectsDeleteCall) RequestId(requestId string) *InterconnectsDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -62654,7 +79094,7 @@ func (c *InstanceTemplatesDeleteCall) RequestId(requestId string) *InstanceTempl // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceTemplatesDeleteCall) Fields(s ...googleapi.Field) *InstanceTemplatesDeleteCall { +func (c *InterconnectsDeleteCall) Fields(s ...googleapi.Field) *InterconnectsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -62662,21 +79102,21 @@ func (c *InstanceTemplatesDeleteCall) Fields(s ...googleapi.Field) *InstanceTemp // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceTemplatesDeleteCall) Context(ctx context.Context) *InstanceTemplatesDeleteCall { +func (c *InterconnectsDeleteCall) Context(ctx context.Context) *InterconnectsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceTemplatesDeleteCall) Header() http.Header { +func (c *InterconnectsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -62685,7 +79125,7 @@ func (c *InstanceTemplatesDeleteCall) doRequest(alt string) (*http.Response, err var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{instanceTemplate}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { @@ -62693,20 +79133,20 @@ func (c *InstanceTemplatesDeleteCall) doRequest(alt string) (*http.Response, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "instanceTemplate": c.instanceTemplate, + "project": c.project, + "interconnect": c.interconnect, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceTemplates.delete" call. +// Do executes the "compute.interconnects.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InterconnectsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -62737,18 +79177,18 @@ func (c *InstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Deletes the specified instance template. Deleting an instance template is permanent and cannot be undone. It's not possible to delete templates which are in use by an instance group.", + // "description": "Deletes the specified interconnect.", // "httpMethod": "DELETE", - // "id": "compute.instanceTemplates.delete", + // "id": "compute.interconnects.delete", // "parameterOrder": [ // "project", - // "instanceTemplate" + // "interconnect" // ], // "parameters": { - // "instanceTemplate": { - // "description": "The name of the instance template to delete.", + // "interconnect": { + // "description": "Name of the interconnect to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -62765,7 +79205,7 @@ func (c *InstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operati // "type": "string" // } // }, - // "path": "{project}/global/instanceTemplates/{instanceTemplate}", + // "path": "{project}/global/interconnects/{interconnect}", // "response": { // "$ref": "Operation" // }, @@ -62777,32 +79217,31 @@ func (c *InstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.instanceTemplates.get": +// method id "compute.interconnects.get": -type InstanceTemplatesGetCall struct { - s *Service - project string - instanceTemplate string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InterconnectsGetCall struct { + s *Service + project string + interconnect string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified instance template. Gets a list of -// available instance templates by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/get -func (r *InstanceTemplatesService) Get(project string, instanceTemplate string) *InstanceTemplatesGetCall { - c := &InstanceTemplatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified interconnect. Get a list of available +// interconnects by making a list() request. +func (r *InterconnectsService) Get(project string, interconnect string) *InterconnectsGetCall { + c := &InterconnectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.instanceTemplate = instanceTemplate + c.interconnect = interconnect return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceTemplatesGetCall) Fields(s ...googleapi.Field) *InstanceTemplatesGetCall { +func (c *InterconnectsGetCall) Fields(s ...googleapi.Field) *InterconnectsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -62812,7 +79251,7 @@ func (c *InstanceTemplatesGetCall) Fields(s ...googleapi.Field) *InstanceTemplat // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InstanceTemplatesGetCall) IfNoneMatch(entityTag string) *InstanceTemplatesGetCall { +func (c *InterconnectsGetCall) IfNoneMatch(entityTag string) *InterconnectsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -62820,21 +79259,21 @@ func (c *InstanceTemplatesGetCall) IfNoneMatch(entityTag string) *InstanceTempla // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceTemplatesGetCall) Context(ctx context.Context) *InstanceTemplatesGetCall { +func (c *InterconnectsGetCall) Context(ctx context.Context) *InterconnectsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceTemplatesGetCall) Header() http.Header { +func (c *InterconnectsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceTemplatesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -62846,7 +79285,7 @@ func (c *InstanceTemplatesGetCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{instanceTemplate}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -62854,20 +79293,20 @@ func (c *InstanceTemplatesGetCall) doRequest(alt string) (*http.Response, error) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "instanceTemplate": c.instanceTemplate, + "project": c.project, + "interconnect": c.interconnect, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceTemplates.get" call. -// Exactly one of *InstanceTemplate or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InstanceTemplate.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTemplate, error) { +// Do executes the "compute.interconnects.get" call. +// Exactly one of *Interconnect or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Interconnect.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InterconnectsGetCall) Do(opts ...googleapi.CallOption) (*Interconnect, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -62886,7 +79325,7 @@ func (c *InstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTe if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceTemplate{ + ret := &Interconnect{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -62898,18 +79337,18 @@ func (c *InstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTe } return ret, nil // { - // "description": "Returns the specified instance template. Gets a list of available instance templates by making a list() request.", + // "description": "Returns the specified interconnect. Get a list of available interconnects by making a list() request.", // "httpMethod": "GET", - // "id": "compute.instanceTemplates.get", + // "id": "compute.interconnects.get", // "parameterOrder": [ // "project", - // "instanceTemplate" + // "interconnect" // ], // "parameters": { - // "instanceTemplate": { - // "description": "The name of the instance template.", + // "interconnect": { + // "description": "Name of the interconnect to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -62921,9 +79360,9 @@ func (c *InstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTe // "type": "string" // } // }, - // "path": "{project}/global/instanceTemplates/{instanceTemplate}", + // "path": "{project}/global/interconnects/{interconnect}", // "response": { - // "$ref": "InstanceTemplate" + // "$ref": "Interconnect" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -62934,31 +79373,31 @@ func (c *InstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTe } -// method id "compute.instanceTemplates.getIamPolicy": +// method id "compute.interconnects.getDiagnostics": -type InstanceTemplatesGetIamPolicyCall struct { +type InterconnectsGetDiagnosticsCall struct { s *Service project string - resource string + interconnect string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. -func (r *InstanceTemplatesService) GetIamPolicy(project string, resource string) *InstanceTemplatesGetIamPolicyCall { - c := &InstanceTemplatesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetDiagnostics: Returns the interconnectDiagnostics for the specified +// interconnect. +func (r *InterconnectsService) GetDiagnostics(project string, interconnect string) *InterconnectsGetDiagnosticsCall { + c := &InterconnectsGetDiagnosticsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource + c.interconnect = interconnect return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceTemplatesGetIamPolicyCall) Fields(s ...googleapi.Field) *InstanceTemplatesGetIamPolicyCall { +func (c *InterconnectsGetDiagnosticsCall) Fields(s ...googleapi.Field) *InterconnectsGetDiagnosticsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -62968,7 +79407,7 @@ func (c *InstanceTemplatesGetIamPolicyCall) Fields(s ...googleapi.Field) *Instan // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InstanceTemplatesGetIamPolicyCall) IfNoneMatch(entityTag string) *InstanceTemplatesGetIamPolicyCall { +func (c *InterconnectsGetDiagnosticsCall) IfNoneMatch(entityTag string) *InterconnectsGetDiagnosticsCall { c.ifNoneMatch_ = entityTag return c } @@ -62976,21 +79415,21 @@ func (c *InstanceTemplatesGetIamPolicyCall) IfNoneMatch(entityTag string) *Insta // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceTemplatesGetIamPolicyCall) Context(ctx context.Context) *InstanceTemplatesGetIamPolicyCall { +func (c *InterconnectsGetDiagnosticsCall) Context(ctx context.Context) *InterconnectsGetDiagnosticsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceTemplatesGetIamPolicyCall) Header() http.Header { +func (c *InterconnectsGetDiagnosticsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceTemplatesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectsGetDiagnosticsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -63002,7 +79441,7 @@ func (c *InstanceTemplatesGetIamPolicyCall) doRequest(alt string) (*http.Respons var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{resource}/getIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}/getDiagnostics") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -63010,20 +79449,21 @@ func (c *InstanceTemplatesGetIamPolicyCall) doRequest(alt string) (*http.Respons } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "interconnect": c.interconnect, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceTemplates.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *InstanceTemplatesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.interconnects.getDiagnostics" call. +// Exactly one of *InterconnectsGetDiagnosticsResponse or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *InterconnectsGetDiagnosticsResponse.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *InterconnectsGetDiagnosticsCall) Do(opts ...googleapi.CallOption) (*InterconnectsGetDiagnosticsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -63042,7 +79482,7 @@ func (c *InstanceTemplatesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*P if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &InterconnectsGetDiagnosticsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -63054,32 +79494,32 @@ func (c *InstanceTemplatesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*P } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "description": "Returns the interconnectDiagnostics for the specified interconnect.", // "httpMethod": "GET", - // "id": "compute.instanceTemplates.getIamPolicy", + // "id": "compute.interconnects.getDiagnostics", // "parameterOrder": [ // "project", - // "resource" + // "interconnect" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "interconnect": { + // "description": "Name of the interconnect resource to query.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/instanceTemplates/{resource}/getIamPolicy", + // "path": "{project}/global/interconnects/{interconnect}/getDiagnostics", // "response": { - // "$ref": "Policy" + // "$ref": "InterconnectsGetDiagnosticsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -63090,27 +79530,23 @@ func (c *InstanceTemplatesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*P } -// method id "compute.instanceTemplates.insert": +// method id "compute.interconnects.insert": -type InstanceTemplatesInsertCall struct { - s *Service - project string - instancetemplate *InstanceTemplate - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InterconnectsInsertCall struct { + s *Service + project string + interconnect *Interconnect + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates an instance template in the specified project using -// the data that is included in the request. If you are creating a new -// template to update an existing instance group, your new instance -// template must use the same network or, if applicable, the same -// subnetwork as the original template. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/insert -func (r *InstanceTemplatesService) Insert(project string, instancetemplate *InstanceTemplate) *InstanceTemplatesInsertCall { - c := &InstanceTemplatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a Interconnect in the specified project using the +// data included in the request. +func (r *InterconnectsService) Insert(project string, interconnect *Interconnect) *InterconnectsInsertCall { + c := &InterconnectsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.instancetemplate = instancetemplate + c.interconnect = interconnect return c } @@ -63128,7 +79564,7 @@ func (r *InstanceTemplatesService) Insert(project string, instancetemplate *Inst // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceTemplatesInsertCall) RequestId(requestId string) *InstanceTemplatesInsertCall { +func (c *InterconnectsInsertCall) RequestId(requestId string) *InterconnectsInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -63136,7 +79572,7 @@ func (c *InstanceTemplatesInsertCall) RequestId(requestId string) *InstanceTempl // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceTemplatesInsertCall) Fields(s ...googleapi.Field) *InstanceTemplatesInsertCall { +func (c *InterconnectsInsertCall) Fields(s ...googleapi.Field) *InterconnectsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -63144,35 +79580,35 @@ func (c *InstanceTemplatesInsertCall) Fields(s ...googleapi.Field) *InstanceTemp // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceTemplatesInsertCall) Context(ctx context.Context) *InstanceTemplatesInsertCall { +func (c *InterconnectsInsertCall) Context(ctx context.Context) *InterconnectsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceTemplatesInsertCall) Header() http.Header { +func (c *InterconnectsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceTemplatesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancetemplate) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnect) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -63185,14 +79621,14 @@ func (c *InstanceTemplatesInsertCall) doRequest(alt string) (*http.Response, err return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceTemplates.insert" call. +// Do executes the "compute.interconnects.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InterconnectsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -63223,9 +79659,9 @@ func (c *InstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Creates an instance template in the specified project using the data that is included in the request. If you are creating a new template to update an existing instance group, your new instance template must use the same network or, if applicable, the same subnetwork as the original template.", + // "description": "Creates a Interconnect in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.instanceTemplates.insert", + // "id": "compute.interconnects.insert", // "parameterOrder": [ // "project" // ], @@ -63243,9 +79679,9 @@ func (c *InstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operati // "type": "string" // } // }, - // "path": "{project}/global/instanceTemplates", + // "path": "{project}/global/interconnects", // "request": { - // "$ref": "InstanceTemplate" + // "$ref": "Interconnect" // }, // "response": { // "$ref": "Operation" @@ -63258,9 +79694,9 @@ func (c *InstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.instanceTemplates.list": +// method id "compute.interconnects.list": -type InstanceTemplatesListCall struct { +type InterconnectsListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -63269,11 +79705,10 @@ type InstanceTemplatesListCall struct { header_ http.Header } -// List: Retrieves a list of instance templates that are contained -// within the specified project and zone. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/list -func (r *InstanceTemplatesService) List(project string) *InstanceTemplatesListCall { - c := &InstanceTemplatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of interconnect available to the specified +// project. +func (r *InterconnectsService) List(project string) *InterconnectsListCall { + c := &InterconnectsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -63300,7 +79735,7 @@ func (r *InstanceTemplatesService) List(project string) *InstanceTemplatesListCa // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *InstanceTemplatesListCall) Filter(filter string) *InstanceTemplatesListCall { +func (c *InterconnectsListCall) Filter(filter string) *InterconnectsListCall { c.urlParams_.Set("filter", filter) return c } @@ -63311,7 +79746,7 @@ func (c *InstanceTemplatesListCall) Filter(filter string) *InstanceTemplatesList // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *InstanceTemplatesListCall) MaxResults(maxResults int64) *InstanceTemplatesListCall { +func (c *InterconnectsListCall) MaxResults(maxResults int64) *InterconnectsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -63328,7 +79763,7 @@ func (c *InstanceTemplatesListCall) MaxResults(maxResults int64) *InstanceTempla // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *InstanceTemplatesListCall) OrderBy(orderBy string) *InstanceTemplatesListCall { +func (c *InterconnectsListCall) OrderBy(orderBy string) *InterconnectsListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -63336,7 +79771,7 @@ func (c *InstanceTemplatesListCall) OrderBy(orderBy string) *InstanceTemplatesLi // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *InstanceTemplatesListCall) PageToken(pageToken string) *InstanceTemplatesListCall { +func (c *InterconnectsListCall) PageToken(pageToken string) *InterconnectsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -63344,256 +79779,71 @@ func (c *InstanceTemplatesListCall) PageToken(pageToken string) *InstanceTemplat // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceTemplatesListCall) Fields(s ...googleapi.Field) *InstanceTemplatesListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstanceTemplatesListCall) IfNoneMatch(entityTag string) *InstanceTemplatesListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstanceTemplatesListCall) Context(ctx context.Context) *InstanceTemplatesListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstanceTemplatesListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstanceTemplatesListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instanceTemplates.list" call. -// Exactly one of *InstanceTemplateList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InstanceTemplateList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceTemplateList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &InstanceTemplateList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves a list of instance templates that are contained within the specified project and zone.", - // "httpMethod": "GET", - // "id": "compute.instanceTemplates.list", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/instanceTemplates", - // "response": { - // "$ref": "InstanceTemplateList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstanceTemplatesListCall) Pages(ctx context.Context, f func(*InstanceTemplateList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instanceTemplates.setIamPolicy": - -type InstanceTemplatesSetIamPolicyCall struct { - s *Service - project string - resource string - globalsetpolicyrequest *GlobalSetPolicyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. -func (r *InstanceTemplatesService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *InstanceTemplatesSetIamPolicyCall { - c := &InstanceTemplatesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.resource = resource - c.globalsetpolicyrequest = globalsetpolicyrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstanceTemplatesSetIamPolicyCall) Fields(s ...googleapi.Field) *InstanceTemplatesSetIamPolicyCall { +func (c *InterconnectsListCall) Fields(s ...googleapi.Field) *InterconnectsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InterconnectsListCall) IfNoneMatch(entityTag string) *InterconnectsListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceTemplatesSetIamPolicyCall) Context(ctx context.Context) *InstanceTemplatesSetIamPolicyCall { +func (c *InterconnectsListCall) Context(ctx context.Context) *InterconnectsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceTemplatesSetIamPolicyCall) Header() http.Header { +func (c *InterconnectsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceTemplatesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetpolicyrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{resource}/setIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceTemplates.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *InstanceTemplatesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.interconnects.list" call. +// Exactly one of *InterconnectList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InterconnectList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -63612,7 +79862,7 @@ func (c *InstanceTemplatesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*P if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &InterconnectList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -63624,70 +79874,124 @@ func (c *InstanceTemplatesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*P } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", - // "httpMethod": "POST", - // "id": "compute.instanceTemplates.setIamPolicy", + // "description": "Retrieves the list of interconnect available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.interconnects.list", // "parameterOrder": [ - // "project", - // "resource" + // "project" // ], // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/global/instanceTemplates/{resource}/setIamPolicy", - // "request": { - // "$ref": "GlobalSetPolicyRequest" - // }, + // "path": "{project}/global/interconnects", // "response": { - // "$ref": "Policy" + // "$ref": "InterconnectList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instanceTemplates.testIamPermissions": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InterconnectsListCall) Pages(ctx context.Context, f func(*InterconnectList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InstanceTemplatesTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.interconnects.patch": + +type InterconnectsPatchCall struct { + s *Service + project string + interconnect string + interconnect2 *Interconnect + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *InstanceTemplatesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *InstanceTemplatesTestIamPermissionsCall { - c := &InstanceTemplatesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates the specified interconnect with the data included in +// the request. This method supports PATCH semantics and uses the JSON +// merge patch format and processing rules. +func (r *InterconnectsService) Patch(project string, interconnect string, interconnect2 *Interconnect) *InterconnectsPatchCall { + c := &InterconnectsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.interconnect = interconnect + c.interconnect2 = interconnect2 + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InterconnectsPatchCall) RequestId(requestId string) *InterconnectsPatchCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceTemplatesTestIamPermissionsCall) Fields(s ...googleapi.Field) *InstanceTemplatesTestIamPermissionsCall { +func (c *InterconnectsPatchCall) Fields(s ...googleapi.Field) *InterconnectsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -63695,56 +79999,56 @@ func (c *InstanceTemplatesTestIamPermissionsCall) Fields(s ...googleapi.Field) * // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceTemplatesTestIamPermissionsCall) Context(ctx context.Context) *InstanceTemplatesTestIamPermissionsCall { +func (c *InterconnectsPatchCall) Context(ctx context.Context) *InterconnectsPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceTemplatesTestIamPermissionsCall) Header() http.Header { +func (c *InterconnectsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceTemplatesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnect2) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "interconnect": c.interconnect, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceTemplates.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.interconnects.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InterconnectsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -63763,7 +80067,7 @@ func (c *InstanceTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOptio if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -63775,14 +80079,21 @@ func (c *InstanceTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.instanceTemplates.testIamPermissions", + // "description": "Updates the specified interconnect with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.interconnects.patch", // "parameterOrder": [ // "project", - // "resource" + // "interconnect" // ], // "parameters": { + // "interconnect": { + // "description": "Name of the interconnect to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -63790,79 +80101,53 @@ func (c *InstanceTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOptio // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/global/instanceTemplates/{resource}/testIamPermissions", + // "path": "{project}/global/interconnects/{interconnect}", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "Interconnect" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instances.addAccessConfig": +// method id "compute.interconnects.setLabels": -type InstancesAddAccessConfigCall struct { - s *Service - project string - zone string - instance string - accessconfig *AccessConfig - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InterconnectsSetLabelsCall struct { + s *Service + project string + resource string + globalsetlabelsrequest *GlobalSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AddAccessConfig: Adds an access config to an instance's network -// interface. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/addAccessConfig -func (r *InstancesService) AddAccessConfig(project string, zone string, instance string, networkInterface string, accessconfig *AccessConfig) *InstancesAddAccessConfigCall { - c := &InstancesAddAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetLabels: Sets the labels on an Interconnect. To learn more about +// labels, read the Labeling Resources documentation. +func (r *InterconnectsService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *InterconnectsSetLabelsCall { + c := &InterconnectsSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.urlParams_.Set("networkInterface", networkInterface) - c.accessconfig = accessconfig - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesAddAccessConfigCall) RequestId(requestId string) *InstancesAddAccessConfigCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.globalsetlabelsrequest = globalsetlabelsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesAddAccessConfigCall) Fields(s ...googleapi.Field) *InstancesAddAccessConfigCall { +func (c *InterconnectsSetLabelsCall) Fields(s ...googleapi.Field) *InterconnectsSetLabelsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -63870,35 +80155,35 @@ func (c *InstancesAddAccessConfigCall) Fields(s ...googleapi.Field) *InstancesAd // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesAddAccessConfigCall) Context(ctx context.Context) *InstancesAddAccessConfigCall { +func (c *InterconnectsSetLabelsCall) Context(ctx context.Context) *InterconnectsSetLabelsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesAddAccessConfigCall) Header() http.Header { +func (c *InterconnectsSetLabelsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesAddAccessConfigCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectsSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.accessconfig) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/addAccessConfig") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -63907,20 +80192,19 @@ func (c *InstancesAddAccessConfigCall) doRequest(alt string) (*http.Response, er req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, - "instance": c.instance, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.addAccessConfig" call. +// Do executes the "compute.interconnects.setLabels" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesAddAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InterconnectsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -63951,29 +80235,14 @@ func (c *InstancesAddAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operat } return ret, nil // { - // "description": "Adds an access config to an instance's network interface.", + // "description": "Sets the labels on an Interconnect. To learn more about labels, read the Labeling Resources documentation.", // "httpMethod": "POST", - // "id": "compute.instances.addAccessConfig", + // "id": "compute.interconnects.setLabels", // "parameterOrder": [ // "project", - // "zone", - // "instance", - // "networkInterface" + // "resource" // ], // "parameters": { - // "instance": { - // "description": "The instance name for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "networkInterface": { - // "description": "The name of the network interface to add to this instance.", - // "location": "query", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -63981,22 +80250,17 @@ func (c *InstancesAddAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operat // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/addAccessConfig", + // "path": "{project}/global/interconnects/{resource}/setLabels", // "request": { - // "$ref": "AccessConfig" + // "$ref": "GlobalSetLabelsRequest" // }, // "response": { // "$ref": "Operation" @@ -64009,157 +80273,89 @@ func (c *InstancesAddAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operat } -// method id "compute.instances.aggregatedList": +// method id "compute.interconnects.testIamPermissions": -type InstancesAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InterconnectsTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AggregatedList: Retrieves aggregated list of all of the instances in -// your project across all regions and zones. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/aggregatedList -func (r *InstancesService) AggregatedList(project string) *InstancesAggregatedListCall { - c := &InstancesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *InterconnectsService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *InterconnectsTestIamPermissionsCall { + c := &InterconnectsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *InstancesAggregatedListCall) Filter(filter string) *InstancesAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InstancesAggregatedListCall) MaxResults(maxResults int64) *InstancesAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *InstancesAggregatedListCall) OrderBy(orderBy string) *InstancesAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InstancesAggregatedListCall) PageToken(pageToken string) *InstancesAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesAggregatedListCall) Fields(s ...googleapi.Field) *InstancesAggregatedListCall { +func (c *InterconnectsTestIamPermissionsCall) Fields(s ...googleapi.Field) *InterconnectsTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstancesAggregatedListCall) IfNoneMatch(entityTag string) *InstancesAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesAggregatedListCall) Context(ctx context.Context) *InstancesAggregatedListCall { +func (c *InterconnectsTestIamPermissionsCall) Context(ctx context.Context) *InterconnectsTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesAggregatedListCall) Header() http.Header { +func (c *InterconnectsTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.aggregatedList" call. -// Exactly one of *InstanceAggregatedList or error will be non-nil. Any +// Do executes the "compute.interconnects.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *InstanceAggregatedList.ServerResponse.Header or (if a response was +// *TestPermissionsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceAggregatedList, error) { +func (c *InterconnectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -64178,7 +80374,7 @@ func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Instanc if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceAggregatedList{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -64190,47 +80386,35 @@ func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Instanc } return ret, nil // { - // "description": "Retrieves aggregated list of all of the instances in your project across all regions and zones.", - // "httpMethod": "GET", - // "id": "compute.instances.aggregatedList", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.interconnects.testIamPermissions", // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, + // "project", + // "resource" + // ], + // "parameters": { // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/aggregated/instances", + // "path": "{project}/global/interconnects/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "InstanceAggregatedList" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -64241,143 +80425,96 @@ func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Instanc } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstancesAggregatedListCall) Pages(ctx context.Context, f func(*InstanceAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instances.attachDisk": +// method id "compute.licenseCodes.get": -type InstancesAttachDiskCall struct { +type LicenseCodesGetCall struct { s *Service project string - zone string - instance string - attacheddisk *AttachedDisk + licenseCode string urlParams_ gensupport.URLParams + ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// AttachDisk: Attaches an existing Disk resource to an instance. You -// must first create the disk before you can attach it. It is not -// possible to create and attach a disk at the same time. For more -// information, read Adding a persistent disk to your instance. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/attachDisk -func (r *InstancesService) AttachDisk(project string, zone string, instance string, attacheddisk *AttachedDisk) *InstancesAttachDiskCall { - c := &InstancesAttachDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Return a specified license code. License codes are mirrored +// across all projects that have permissions to read the License Code. +func (r *LicenseCodesService) Get(project string, licenseCode string) *LicenseCodesGetCall { + c := &LicenseCodesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.attacheddisk = attacheddisk - return c -} - -// ForceAttach sets the optional parameter "forceAttach": Whether to -// force attach the disk even if it's currently attached to another -// instance. This is only available for regional disks. -func (c *InstancesAttachDiskCall) ForceAttach(forceAttach bool) *InstancesAttachDiskCall { - c.urlParams_.Set("forceAttach", fmt.Sprint(forceAttach)) - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesAttachDiskCall) RequestId(requestId string) *InstancesAttachDiskCall { - c.urlParams_.Set("requestId", requestId) + c.licenseCode = licenseCode return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesAttachDiskCall) Fields(s ...googleapi.Field) *InstancesAttachDiskCall { +func (c *LicenseCodesGetCall) Fields(s ...googleapi.Field) *LicenseCodesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *LicenseCodesGetCall) IfNoneMatch(entityTag string) *LicenseCodesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesAttachDiskCall) Context(ctx context.Context) *InstancesAttachDiskCall { +func (c *LicenseCodesGetCall) Context(ctx context.Context) *LicenseCodesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesAttachDiskCall) Header() http.Header { +func (c *LicenseCodesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesAttachDiskCall) doRequest(alt string) (*http.Response, error) { +func (c *LicenseCodesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.attacheddisk) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/attachDisk") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenseCodes/{licenseCode}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "licenseCode": c.licenseCode, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.attachDisk" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.licenseCodes.get" call. +// Exactly one of *LicenseCode or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// *LicenseCode.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesAttachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *LicenseCodesGetCall) Do(opts ...googleapi.CallOption) (*LicenseCode, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -64396,7 +80533,7 @@ func (c *InstancesAttachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &LicenseCode{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -64408,24 +80545,18 @@ func (c *InstancesAttachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Attaches an existing Disk resource to an instance. You must first create the disk before you can attach it. It is not possible to create and attach a disk at the same time. For more information, read Adding a persistent disk to your instance.", - // "httpMethod": "POST", - // "id": "compute.instances.attachDisk", + // "description": "Return a specified license code. License codes are mirrored across all projects that have permissions to read the License Code.", + // "httpMethod": "GET", + // "id": "compute.licenseCodes.get", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "licenseCode" // ], // "parameters": { - // "forceAttach": { - // "description": "Whether to force attach the disk even if it's currently attached to another instance. This is only available for regional disks.", - // "location": "query", - // "type": "boolean" - // }, - // "instance": { - // "description": "The instance name for this request.", + // "licenseCode": { + // "description": "Number corresponding to the License code resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[0-9]{0,61}?", // "required": true, // "type": "string" // }, @@ -64435,55 +80566,37 @@ func (c *InstancesAttachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/attachDisk", - // "request": { - // "$ref": "AttachedDisk" - // }, + // "path": "{project}/global/licenseCodes/{licenseCode}", // "response": { - // "$ref": "Operation" + // "$ref": "LicenseCode" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.delete": +// method id "compute.licenses.delete": -type InstancesDeleteCall struct { +type LicensesDeleteCall struct { s *Service project string - zone string - instance string + license string urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Delete: Deletes the specified Instance resource. For more -// information, see Stopping or Deleting an Instance. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/delete -func (r *InstancesService) Delete(project string, zone string, instance string) *InstancesDeleteCall { - c := &InstancesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified license. +func (r *LicensesService) Delete(project string, license string) *LicensesDeleteCall { + c := &LicensesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance + c.license = license return c } @@ -64501,7 +80614,7 @@ func (r *InstancesService) Delete(project string, zone string, instance string) // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesDeleteCall) RequestId(requestId string) *InstancesDeleteCall { +func (c *LicensesDeleteCall) RequestId(requestId string) *LicensesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -64509,7 +80622,7 @@ func (c *InstancesDeleteCall) RequestId(requestId string) *InstancesDeleteCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesDeleteCall) Fields(s ...googleapi.Field) *InstancesDeleteCall { +func (c *LicensesDeleteCall) Fields(s ...googleapi.Field) *LicensesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -64517,21 +80630,21 @@ func (c *InstancesDeleteCall) Fields(s ...googleapi.Field) *InstancesDeleteCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesDeleteCall) Context(ctx context.Context) *InstancesDeleteCall { +func (c *LicensesDeleteCall) Context(ctx context.Context) *LicensesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesDeleteCall) Header() http.Header { +func (c *LicensesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *LicensesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -64540,7 +80653,7 @@ func (c *InstancesDeleteCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{license}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { @@ -64548,21 +80661,20 @@ func (c *InstancesDeleteCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "license": c.license, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.delete" call. +// Do executes the "compute.licenses.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *LicensesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -64593,17 +80705,16 @@ func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Deletes the specified Instance resource. For more information, see Stopping or Deleting an Instance.", + // "description": "Deletes the specified license.", // "httpMethod": "DELETE", - // "id": "compute.instances.delete", + // "id": "compute.licenses.delete", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "license" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance resource to delete.", + // "license": { + // "description": "Name of the license resource to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -64620,16 +80731,9 @@ func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}", + // "path": "{project}/global/licenses/{license}", // "response": { // "$ref": "Operation" // }, @@ -64641,107 +80745,96 @@ func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro } -// method id "compute.instances.deleteAccessConfig": +// method id "compute.licenses.get": -type InstancesDeleteAccessConfigCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type LicensesGetCall struct { + s *Service + project string + license string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// DeleteAccessConfig: Deletes an access config from an instance's -// network interface. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/deleteAccessConfig -func (r *InstancesService) DeleteAccessConfig(project string, zone string, instance string, accessConfig string, networkInterface string) *InstancesDeleteAccessConfigCall { - c := &InstancesDeleteAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified License resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/licenses/get +func (r *LicensesService) Get(project string, license string) *LicensesGetCall { + c := &LicensesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.urlParams_.Set("accessConfig", accessConfig) - c.urlParams_.Set("networkInterface", networkInterface) - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesDeleteAccessConfigCall) RequestId(requestId string) *InstancesDeleteAccessConfigCall { - c.urlParams_.Set("requestId", requestId) + c.license = license return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesDeleteAccessConfigCall) Fields(s ...googleapi.Field) *InstancesDeleteAccessConfigCall { +func (c *LicensesGetCall) Fields(s ...googleapi.Field) *LicensesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *LicensesGetCall) IfNoneMatch(entityTag string) *LicensesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesDeleteAccessConfigCall) Context(ctx context.Context) *InstancesDeleteAccessConfigCall { +func (c *LicensesGetCall) Context(ctx context.Context) *LicensesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesDeleteAccessConfigCall) Header() http.Header { +func (c *LicensesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesDeleteAccessConfigCall) doRequest(alt string) (*http.Response, error) { +func (c *LicensesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/deleteAccessConfig") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{license}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "license": c.license, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.deleteAccessConfig" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesDeleteAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.licenses.get" call. +// Exactly one of *License or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *License.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -64760,7 +80853,7 @@ func (c *InstancesDeleteAccessConfigCall) Do(opts ...googleapi.CallOption) (*Ope if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &License{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -64772,167 +80865,132 @@ func (c *InstancesDeleteAccessConfigCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Deletes an access config from an instance's network interface.", - // "httpMethod": "POST", - // "id": "compute.instances.deleteAccessConfig", + // "description": "Returns the specified License resource.", + // "httpMethod": "GET", + // "id": "compute.licenses.get", // "parameterOrder": [ // "project", - // "zone", - // "instance", - // "accessConfig", - // "networkInterface" + // "license" // ], // "parameters": { - // "accessConfig": { - // "description": "The name of the access config to delete.", - // "location": "query", - // "required": true, - // "type": "string" - // }, - // "instance": { - // "description": "The instance name for this request.", + // "license": { + // "description": "Name of the License resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "networkInterface": { - // "description": "The name of the network interface.", - // "location": "query", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/deleteAccessConfig", + // "path": "{project}/global/licenses/{license}", // "response": { - // "$ref": "Operation" + // "$ref": "License" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.detachDisk": +// method id "compute.licenses.getIamPolicy": -type InstancesDetachDiskCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type LicensesGetIamPolicyCall struct { + s *Service + project string + resource string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// DetachDisk: Detaches a disk from an instance. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/detachDisk -func (r *InstancesService) DetachDisk(project string, zone string, instance string, deviceName string) *InstancesDetachDiskCall { - c := &InstancesDetachDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. +func (r *LicensesService) GetIamPolicy(project string, resource string) *LicensesGetIamPolicyCall { + c := &LicensesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.urlParams_.Set("deviceName", deviceName) - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesDetachDiskCall) RequestId(requestId string) *InstancesDetachDiskCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesDetachDiskCall) Fields(s ...googleapi.Field) *InstancesDetachDiskCall { +func (c *LicensesGetIamPolicyCall) Fields(s ...googleapi.Field) *LicensesGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *LicensesGetIamPolicyCall) IfNoneMatch(entityTag string) *LicensesGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesDetachDiskCall) Context(ctx context.Context) *InstancesDetachDiskCall { +func (c *LicensesGetIamPolicyCall) Context(ctx context.Context) *LicensesGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesDetachDiskCall) Header() http.Header { +func (c *LicensesGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesDetachDiskCall) doRequest(alt string) (*http.Response, error) { +func (c *LicensesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/detachDisk") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, - "instance": c.instance, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.detachDisk" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesDetachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.licenses.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *LicensesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -64951,7 +81009,7 @@ func (c *InstancesDetachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -64963,29 +81021,14 @@ func (c *InstancesDetachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Detaches a disk from an instance.", - // "httpMethod": "POST", - // "id": "compute.instances.detachDisk", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "httpMethod": "GET", + // "id": "compute.licenses.getIamPolicy", // "parameterOrder": [ // "project", - // "zone", - // "instance", - // "deviceName" + // "resource" // ], // "parameters": { - // "deviceName": { - // "description": "The device name of the disk to detach. Make a get() request on the instance to view currently attached disks and device names.", - // "location": "query", - // "required": true, - // "type": "string" - // }, - // "instance": { - // "description": "Instance name for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -64993,125 +81036,125 @@ func (c *InstancesDetachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/detachDisk", + // "path": "{project}/global/licenses/{resource}/getIamPolicy", // "response": { - // "$ref": "Operation" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.get": +// method id "compute.licenses.insert": -type InstancesGetCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type LicensesInsertCall struct { + s *Service + project string + license *License + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified Instance resource. Gets a list of -// available instances by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/get -func (r *InstancesService) Get(project string, zone string, instance string) *InstancesGetCall { - c := &InstancesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Create a License resource in the specified project. +func (r *LicensesService) Insert(project string, license *License) *LicensesInsertCall { + c := &LicensesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance + c.license = license + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *LicensesInsertCall) RequestId(requestId string) *LicensesInsertCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesGetCall) Fields(s ...googleapi.Field) *InstancesGetCall { +func (c *LicensesInsertCall) Fields(s ...googleapi.Field) *LicensesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstancesGetCall) IfNoneMatch(entityTag string) *InstancesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesGetCall) Context(ctx context.Context) *InstancesGetCall { +func (c *LicensesInsertCall) Context(ctx context.Context) *LicensesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesGetCall) Header() http.Header { +func (c *LicensesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *LicensesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.license) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.get" call. -// Exactly one of *Instance or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Instance.ServerResponse.Header or (if a response was returned at +// Do executes the "compute.licenses.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { +func (c *LicensesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -65130,7 +81173,7 @@ func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Instance{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -65142,22 +81185,13 @@ func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { } return ret, nil // { - // "description": "Returns the specified Instance resource. Gets a list of available instances by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.instances.get", + // "description": "Create a License resource in the specified project.", + // "httpMethod": "POST", + // "id": "compute.licenses.insert", // "parameterOrder": [ - // "project", - // "zone", - // "instance" + // "project" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -65165,67 +81199,120 @@ func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}", + // "path": "{project}/global/licenses", + // "request": { + // "$ref": "License" + // }, // "response": { - // "$ref": "Instance" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } -// method id "compute.instances.getGuestAttributes": +// method id "compute.licenses.list": -type InstancesGetGuestAttributesCall struct { +type LicensesListCall struct { s *Service project string - zone string - instance string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// GetGuestAttributes: Returns the specified guest attributes entry. -func (r *InstancesService) GetGuestAttributes(project string, zone string, instance string) *InstancesGetGuestAttributesCall { - c := &InstancesGetGuestAttributesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of licenses available in the specified +// project. This method does not get any licenses that belong to other +// projects, including licenses attached to publicly-available images, +// like Debian 9. If you want to get a list of publicly-available +// licenses, use this method to make a request to the respective image +// project, such as debian-cloud or windows-cloud. +func (r *LicensesService) List(project string) *LicensesListCall { + c := &LicensesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance return c } -// QueryPath sets the optional parameter "queryPath": Specifies the -// guest attributes path to be queried. -func (c *InstancesGetGuestAttributesCall) QueryPath(queryPath string) *InstancesGetGuestAttributesCall { - c.urlParams_.Set("queryPath", queryPath) +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *LicensesListCall) Filter(filter string) *LicensesListCall { + c.urlParams_.Set("filter", filter) return c } -// VariableKey sets the optional parameter "variableKey": Specifies the -// key for the guest attributes entry. -func (c *InstancesGetGuestAttributesCall) VariableKey(variableKey string) *InstancesGetGuestAttributesCall { - c.urlParams_.Set("variableKey", variableKey) +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *LicensesListCall) MaxResults(maxResults int64) *LicensesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *LicensesListCall) OrderBy(orderBy string) *LicensesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *LicensesListCall) PageToken(pageToken string) *LicensesListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesGetGuestAttributesCall) Fields(s ...googleapi.Field) *InstancesGetGuestAttributesCall { +func (c *LicensesListCall) Fields(s ...googleapi.Field) *LicensesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -65235,7 +81322,7 @@ func (c *InstancesGetGuestAttributesCall) Fields(s ...googleapi.Field) *Instance // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InstancesGetGuestAttributesCall) IfNoneMatch(entityTag string) *InstancesGetGuestAttributesCall { +func (c *LicensesListCall) IfNoneMatch(entityTag string) *LicensesListCall { c.ifNoneMatch_ = entityTag return c } @@ -65243,21 +81330,21 @@ func (c *InstancesGetGuestAttributesCall) IfNoneMatch(entityTag string) *Instanc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesGetGuestAttributesCall) Context(ctx context.Context) *InstancesGetGuestAttributesCall { +func (c *LicensesListCall) Context(ctx context.Context) *LicensesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesGetGuestAttributesCall) Header() http.Header { +func (c *LicensesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesGetGuestAttributesCall) doRequest(alt string) (*http.Response, error) { +func (c *LicensesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -65269,7 +81356,7 @@ func (c *InstancesGetGuestAttributesCall) doRequest(alt string) (*http.Response, var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/getGuestAttributes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -65277,21 +81364,19 @@ func (c *InstancesGetGuestAttributesCall) doRequest(alt string) (*http.Response, } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.getGuestAttributes" call. -// Exactly one of *GuestAttributes or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *GuestAttributes.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.licenses.list" call. +// Exactly one of *LicensesListResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *LicensesListResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *InstancesGetGuestAttributesCall) Do(opts ...googleapi.CallOption) (*GuestAttributes, error) { +func (c *LicensesListCall) Do(opts ...googleapi.CallOption) (*LicensesListResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -65310,7 +81395,7 @@ func (c *InstancesGetGuestAttributesCall) Do(opts ...googleapi.CallOption) (*Gue if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &GuestAttributes{ + ret := &LicensesListResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -65322,50 +81407,47 @@ func (c *InstancesGetGuestAttributesCall) Do(opts ...googleapi.CallOption) (*Gue } return ret, nil // { - // "description": "Returns the specified guest attributes entry.", + // "description": "Retrieves the list of licenses available in the specified project. This method does not get any licenses that belong to other projects, including licenses attached to publicly-available images, like Debian 9. If you want to get a list of publicly-available licenses, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.", // "httpMethod": "GET", - // "id": "compute.instances.getGuestAttributes", + // "id": "compute.licenses.list", // "parameterOrder": [ - // "project", - // "zone", - // "instance" + // "project" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" // }, - // "queryPath": { - // "description": "Specifies the guest attributes path to be queried.", + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", // "location": "query", // "type": "string" // }, - // "variableKey": { - // "description": "Specifies the key for the guest attributes entry.", + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/getGuestAttributes", + // "path": "{project}/global/licenses", // "response": { - // "$ref": "GuestAttributes" + // "$ref": "LicensesListResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -65376,99 +81458,110 @@ func (c *InstancesGetGuestAttributesCall) Do(opts ...googleapi.CallOption) (*Gue } -// method id "compute.instances.getIamPolicy": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *LicensesListCall) Pages(ctx context.Context, f func(*LicensesListResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InstancesGetIamPolicyCall struct { - s *Service - project string - zone string - resource string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +// method id "compute.licenses.setIamPolicy": + +type LicensesSetIamPolicyCall struct { + s *Service + project string + resource string + globalsetpolicyrequest *GlobalSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. -func (r *InstancesService) GetIamPolicy(project string, zone string, resource string) *InstancesGetIamPolicyCall { - c := &InstancesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +func (r *LicensesService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *LicensesSetIamPolicyCall { + c := &LicensesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone c.resource = resource + c.globalsetpolicyrequest = globalsetpolicyrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesGetIamPolicyCall) Fields(s ...googleapi.Field) *InstancesGetIamPolicyCall { +func (c *LicensesSetIamPolicyCall) Fields(s ...googleapi.Field) *LicensesSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstancesGetIamPolicyCall) IfNoneMatch(entityTag string) *InstancesGetIamPolicyCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesGetIamPolicyCall) Context(ctx context.Context) *InstancesGetIamPolicyCall { +func (c *LicensesSetIamPolicyCall) Context(ctx context.Context) *LicensesSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesGetIamPolicyCall) Header() http.Header { +func (c *LicensesSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *LicensesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetpolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{resource}/getIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.getIamPolicy" call. +// Do executes the "compute.licenses.setIamPolicy" call. // Exactly one of *Policy or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *Policy.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *InstancesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +func (c *LicensesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -65499,12 +81592,11 @@ func (c *InstancesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", - // "httpMethod": "GET", - // "id": "compute.instances.getIamPolicy", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "httpMethod": "POST", + // "id": "compute.licenses.setIamPolicy", // "parameterOrder": [ // "project", - // "zone", // "resource" // ], // "parameters": { @@ -65518,77 +81610,112 @@ func (c *InstancesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e // "resource": { // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{resource}/getIamPolicy", + // "path": "{project}/global/licenses/{resource}/setIamPolicy", + // "request": { + // "$ref": "GlobalSetPolicyRequest" + // }, // "response": { // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instances.getSerialPortOutput": +// method id "compute.machineTypes.aggregatedList": -type InstancesGetSerialPortOutputCall struct { +type MachineTypesAggregatedListCall struct { s *Service project string - zone string - instance string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// GetSerialPortOutput: Returns the last 1 MB of serial port output from -// the specified instance. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/getSerialPortOutput -func (r *InstancesService) GetSerialPortOutput(project string, zone string, instance string) *InstancesGetSerialPortOutputCall { - c := &InstancesGetSerialPortOutputCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of machine types. +// For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/aggregatedList +func (r *MachineTypesService) AggregatedList(project string) *MachineTypesAggregatedListCall { + c := &MachineTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance return c } -// Port sets the optional parameter "port": Specifies which COM or -// serial port to retrieve data from. -func (c *InstancesGetSerialPortOutputCall) Port(port int64) *InstancesGetSerialPortOutputCall { - c.urlParams_.Set("port", fmt.Sprint(port)) +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *MachineTypesAggregatedListCall) Filter(filter string) *MachineTypesAggregatedListCall { + c.urlParams_.Set("filter", filter) return c } -// Start sets the optional parameter "start": Returns output starting -// from a specific byte position. Use this to page through output when -// the output is too large to return in a single request. For the -// initial request, leave this field unspecified. For subsequent calls, -// this field should be set to the next value returned in the previous -// call. -func (c *InstancesGetSerialPortOutputCall) Start(start int64) *InstancesGetSerialPortOutputCall { - c.urlParams_.Set("start", fmt.Sprint(start)) +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *MachineTypesAggregatedListCall) MaxResults(maxResults int64) *MachineTypesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *MachineTypesAggregatedListCall) OrderBy(orderBy string) *MachineTypesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *MachineTypesAggregatedListCall) PageToken(pageToken string) *MachineTypesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesGetSerialPortOutputCall) Fields(s ...googleapi.Field) *InstancesGetSerialPortOutputCall { +func (c *MachineTypesAggregatedListCall) Fields(s ...googleapi.Field) *MachineTypesAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -65598,7 +81725,7 @@ func (c *InstancesGetSerialPortOutputCall) Fields(s ...googleapi.Field) *Instanc // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InstancesGetSerialPortOutputCall) IfNoneMatch(entityTag string) *InstancesGetSerialPortOutputCall { +func (c *MachineTypesAggregatedListCall) IfNoneMatch(entityTag string) *MachineTypesAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -65606,21 +81733,21 @@ func (c *InstancesGetSerialPortOutputCall) IfNoneMatch(entityTag string) *Instan // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesGetSerialPortOutputCall) Context(ctx context.Context) *InstancesGetSerialPortOutputCall { +func (c *MachineTypesAggregatedListCall) Context(ctx context.Context) *MachineTypesAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesGetSerialPortOutputCall) Header() http.Header { +func (c *MachineTypesAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesGetSerialPortOutputCall) doRequest(alt string) (*http.Response, error) { +func (c *MachineTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -65632,7 +81759,7 @@ func (c *InstancesGetSerialPortOutputCall) doRequest(alt string) (*http.Response var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/serialPort") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/machineTypes") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -65640,21 +81767,19 @@ func (c *InstancesGetSerialPortOutputCall) doRequest(alt string) (*http.Response } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.getSerialPortOutput" call. -// Exactly one of *SerialPortOutput or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *SerialPortOutput.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.machineTypes.aggregatedList" call. +// Exactly one of *MachineTypeAggregatedList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *MachineTypeAggregatedList.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*SerialPortOutput, error) { +func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*MachineTypeAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -65673,7 +81798,7 @@ func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*Se if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &SerialPortOutput{ + ret := &MachineTypeAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -65685,55 +81810,47 @@ func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*Se } return ret, nil // { - // "description": "Returns the last 1 MB of serial port output from the specified instance.", + // "description": "Retrieves an aggregated list of machine types.", // "httpMethod": "GET", - // "id": "compute.instances.getSerialPortOutput", + // "id": "compute.machineTypes.aggregatedList", // "parameterOrder": [ - // "project", - // "zone", - // "instance" + // "project" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", // "type": "string" // }, - // "port": { - // "default": "1", - // "description": "Specifies which COM or serial port to retrieve data from.", - // "format": "int32", + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", // "location": "query", - // "maximum": "4", - // "minimum": "1", + // "minimum": "0", // "type": "integer" // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", // "type": "string" // }, - // "start": { - // "description": "Returns output starting from a specific byte position. Use this to page through output when the output is too large to return in a single request. For the initial request, leave this field unspecified. For subsequent calls, this field should be set to the next value returned in the previous call.", - // "format": "int64", + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/serialPort", + // "path": "{project}/aggregated/machineTypes", // "response": { - // "$ref": "SerialPortOutput" + // "$ref": "MachineTypeAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -65744,33 +81861,55 @@ func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*Se } -// method id "compute.instances.getShieldedVmIdentity": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *MachineTypesAggregatedListCall) Pages(ctx context.Context, f func(*MachineTypeAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InstancesGetShieldedVmIdentityCall struct { +// method id "compute.machineTypes.get": + +type MachineTypesGetCall struct { s *Service project string zone string - instance string + machineType string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// GetShieldedVmIdentity: Returns the Shielded VM Identity of an -// instance -func (r *InstancesService) GetShieldedVmIdentity(project string, zone string, instance string) *InstancesGetShieldedVmIdentityCall { - c := &InstancesGetShieldedVmIdentityCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified machine type. Gets a list of available +// machine types by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/get +func (r *MachineTypesService) Get(project string, zone string, machineType string) *MachineTypesGetCall { + c := &MachineTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instance = instance + c.machineType = machineType return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesGetShieldedVmIdentityCall) Fields(s ...googleapi.Field) *InstancesGetShieldedVmIdentityCall { +func (c *MachineTypesGetCall) Fields(s ...googleapi.Field) *MachineTypesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -65780,7 +81919,7 @@ func (c *InstancesGetShieldedVmIdentityCall) Fields(s ...googleapi.Field) *Insta // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InstancesGetShieldedVmIdentityCall) IfNoneMatch(entityTag string) *InstancesGetShieldedVmIdentityCall { +func (c *MachineTypesGetCall) IfNoneMatch(entityTag string) *MachineTypesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -65788,21 +81927,21 @@ func (c *InstancesGetShieldedVmIdentityCall) IfNoneMatch(entityTag string) *Inst // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesGetShieldedVmIdentityCall) Context(ctx context.Context) *InstancesGetShieldedVmIdentityCall { +func (c *MachineTypesGetCall) Context(ctx context.Context) *MachineTypesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesGetShieldedVmIdentityCall) Header() http.Header { +func (c *MachineTypesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesGetShieldedVmIdentityCall) doRequest(alt string) (*http.Response, error) { +func (c *MachineTypesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -65814,7 +81953,7 @@ func (c *InstancesGetShieldedVmIdentityCall) doRequest(alt string) (*http.Respon var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/getShieldedVmIdentity") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/machineTypes/{machineType}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -65822,21 +81961,21 @@ func (c *InstancesGetShieldedVmIdentityCall) doRequest(alt string) (*http.Respon } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "zone": c.zone, + "machineType": c.machineType, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.getShieldedVmIdentity" call. -// Exactly one of *ShieldedVmIdentity or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ShieldedVmIdentity.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstancesGetShieldedVmIdentityCall) Do(opts ...googleapi.CallOption) (*ShieldedVmIdentity, error) { +// Do executes the "compute.machineTypes.get" call. +// Exactly one of *MachineType or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *MachineType.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -65855,7 +81994,7 @@ func (c *InstancesGetShieldedVmIdentityCall) Do(opts ...googleapi.CallOption) (* if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ShieldedVmIdentity{ + ret := &MachineType{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -65867,17 +82006,17 @@ func (c *InstancesGetShieldedVmIdentityCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Returns the Shielded VM Identity of an instance", + // "description": "Returns the specified machine type. Gets a list of available machine types by making a list() request.", // "httpMethod": "GET", - // "id": "compute.instances.getShieldedVmIdentity", + // "id": "compute.machineTypes.get", // "parameterOrder": [ // "project", // "zone", - // "instance" + // "machineType" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", + // "machineType": { + // "description": "Name of the machine type to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -65898,9 +82037,9 @@ func (c *InstancesGetShieldedVmIdentityCall) Do(opts ...googleapi.CallOption) (* // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/getShieldedVmIdentity", + // "path": "{project}/zones/{zone}/machineTypes/{machineType}", // "response": { - // "$ref": "ShieldedVmIdentity" + // "$ref": "MachineType" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -65911,105 +82050,141 @@ func (c *InstancesGetShieldedVmIdentityCall) Do(opts ...googleapi.CallOption) (* } -// method id "compute.instances.insert": +// method id "compute.machineTypes.list": -type InstancesInsertCall struct { - s *Service - project string - zone string - instance *Instance - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type MachineTypesListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Insert: Creates an instance resource in the specified project using -// the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/insert -func (r *InstancesService) Insert(project string, zone string, instance *Instance) *InstancesInsertCall { - c := &InstancesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of machine types available to the specified +// project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/list +func (r *MachineTypesService) List(project string, zone string) *MachineTypesListCall { + c := &MachineTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instance = instance return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesInsertCall) RequestId(requestId string) *InstancesInsertCall { - c.urlParams_.Set("requestId", requestId) +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *MachineTypesListCall) Filter(filter string) *MachineTypesListCall { + c.urlParams_.Set("filter", filter) return c } -// SourceInstanceTemplate sets the optional parameter -// "sourceInstanceTemplate": Specifies instance template to create the -// instance. +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *MachineTypesListCall) MaxResults(maxResults int64) *MachineTypesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. // -// This field is optional. It can be a full or partial URL. For example, -// the following are all valid URLs to an instance template: -// - -// https://www.googleapis.com/compute/v1/projects/project/global/instanceTemplates/instanceTemplate -// - projects/project/global/instanceTemplates/instanceTemplate -// - global/instanceTemplates/instanceTemplate -func (c *InstancesInsertCall) SourceInstanceTemplate(sourceInstanceTemplate string) *InstancesInsertCall { - c.urlParams_.Set("sourceInstanceTemplate", sourceInstanceTemplate) +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *MachineTypesListCall) OrderBy(orderBy string) *MachineTypesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *MachineTypesListCall) PageToken(pageToken string) *MachineTypesListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesInsertCall) Fields(s ...googleapi.Field) *InstancesInsertCall { +func (c *MachineTypesListCall) Fields(s ...googleapi.Field) *MachineTypesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *MachineTypesListCall) IfNoneMatch(entityTag string) *MachineTypesListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesInsertCall) Context(ctx context.Context) *InstancesInsertCall { +func (c *MachineTypesListCall) Context(ctx context.Context) *MachineTypesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesInsertCall) Header() http.Header { +func (c *MachineTypesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *MachineTypesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instance) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/machineTypes") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } @@ -66021,14 +82196,14 @@ func (c *InstancesInsertCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.machineTypes.list" call. +// Exactly one of *MachineTypeList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// *MachineTypeList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -66047,7 +82222,7 @@ func (c *InstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &MachineTypeList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -66059,31 +82234,44 @@ func (c *InstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Creates an instance resource in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.instances.insert", + // "description": "Retrieves a list of machine types available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.machineTypes.list", // "parameterOrder": [ // "project", // "zone" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", // "location": "query", // "type": "string" // }, - // "sourceInstanceTemplate": { - // "description": "Specifies instance template to create the instance.\n\nThis field is optional. It can be a full or partial URL. For example, the following are all valid URLs to an instance template: \n- https://www.googleapis.com/compute/v1/projects/project/global/instanceTemplates/instanceTemplate \n- projects/project/global/instanceTemplates/instanceTemplate \n- global/instanceTemplates/instanceTemplate", + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -66092,40 +82280,56 @@ func (c *InstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances", - // "request": { - // "$ref": "Instance" - // }, + // "path": "{project}/zones/{zone}/machineTypes", // "response": { - // "$ref": "Operation" + // "$ref": "MachineTypeList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.list": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *MachineTypesListCall) Pages(ctx context.Context, f func(*MachineTypeList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InstancesListCall struct { +// method id "compute.networkEndpointGroups.aggregatedList": + +type NetworkEndpointGroupsAggregatedListCall struct { s *Service project string - zone string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves the list of instances contained within the specified -// zone. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/list -func (r *InstancesService) List(project string, zone string) *InstancesListCall { - c := &InstancesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves the list of network endpoint groups and +// sorts them by zone. +func (r *NetworkEndpointGroupsService) AggregatedList(project string) *NetworkEndpointGroupsAggregatedListCall { + c := &NetworkEndpointGroupsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone return c } @@ -66151,7 +82355,7 @@ func (r *InstancesService) List(project string, zone string) *InstancesListCall // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *InstancesListCall) Filter(filter string) *InstancesListCall { +func (c *NetworkEndpointGroupsAggregatedListCall) Filter(filter string) *NetworkEndpointGroupsAggregatedListCall { c.urlParams_.Set("filter", filter) return c } @@ -66162,7 +82366,7 @@ func (c *InstancesListCall) Filter(filter string) *InstancesListCall { // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *InstancesListCall) MaxResults(maxResults int64) *InstancesListCall { +func (c *NetworkEndpointGroupsAggregatedListCall) MaxResults(maxResults int64) *NetworkEndpointGroupsAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -66179,7 +82383,7 @@ func (c *InstancesListCall) MaxResults(maxResults int64) *InstancesListCall { // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *InstancesListCall) OrderBy(orderBy string) *InstancesListCall { +func (c *NetworkEndpointGroupsAggregatedListCall) OrderBy(orderBy string) *NetworkEndpointGroupsAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -66187,7 +82391,7 @@ func (c *InstancesListCall) OrderBy(orderBy string) *InstancesListCall { // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *InstancesListCall) PageToken(pageToken string) *InstancesListCall { +func (c *NetworkEndpointGroupsAggregatedListCall) PageToken(pageToken string) *NetworkEndpointGroupsAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -66195,7 +82399,7 @@ func (c *InstancesListCall) PageToken(pageToken string) *InstancesListCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesListCall) Fields(s ...googleapi.Field) *InstancesListCall { +func (c *NetworkEndpointGroupsAggregatedListCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -66205,7 +82409,7 @@ func (c *InstancesListCall) Fields(s ...googleapi.Field) *InstancesListCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InstancesListCall) IfNoneMatch(entityTag string) *InstancesListCall { +func (c *NetworkEndpointGroupsAggregatedListCall) IfNoneMatch(entityTag string) *NetworkEndpointGroupsAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -66213,21 +82417,21 @@ func (c *InstancesListCall) IfNoneMatch(entityTag string) *InstancesListCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesListCall) Context(ctx context.Context) *InstancesListCall { +func (c *NetworkEndpointGroupsAggregatedListCall) Context(ctx context.Context) *NetworkEndpointGroupsAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesListCall) Header() http.Header { +func (c *NetworkEndpointGroupsAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesListCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkEndpointGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -66239,7 +82443,7 @@ func (c *InstancesListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/networkEndpointGroups") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -66248,19 +82452,19 @@ func (c *InstancesListCall) doRequest(alt string) (*http.Response, error) { req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.list" call. -// Exactly one of *InstanceList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *InstanceList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, error) { +// Do executes the "compute.networkEndpointGroups.aggregatedList" call. +// Exactly one of *NetworkEndpointGroupAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *NetworkEndpointGroupAggregatedList.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *NetworkEndpointGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroupAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -66279,7 +82483,7 @@ func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, err if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceList{ + ret := &NetworkEndpointGroupAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -66291,12 +82495,11 @@ func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, err } return ret, nil // { - // "description": "Retrieves the list of instances contained within the specified zone.", + // "description": "Retrieves the list of network endpoint groups and sorts them by zone.", // "httpMethod": "GET", - // "id": "compute.instances.list", + // "id": "compute.networkEndpointGroups.aggregatedList", // "parameterOrder": [ - // "project", - // "zone" + // "project" // ], // "parameters": { // "filter": { @@ -66328,18 +82531,11 @@ func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, err // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances", + // "path": "{project}/aggregated/networkEndpointGroups", // "response": { - // "$ref": "InstanceList" + // "$ref": "NetworkEndpointGroupAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -66353,7 +82549,7 @@ func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, err // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *InstancesListCall) Pages(ctx context.Context, f func(*InstanceList) error) error { +func (c *NetworkEndpointGroupsAggregatedListCall) Pages(ctx context.Context, f func(*NetworkEndpointGroupAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -66371,163 +82567,111 @@ func (c *InstancesListCall) Pages(ctx context.Context, f func(*InstanceList) err } } -// method id "compute.instances.listReferrers": - -type InstancesListReferrersCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// ListReferrers: Retrieves the list of referrers to instances contained -// within the specified zone. For more information, read Viewing -// Referrers to VM Instances. -func (r *InstancesService) ListReferrers(project string, zone string, instance string) *InstancesListReferrersCall { - c := &InstancesListReferrersCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instance = instance - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *InstancesListReferrersCall) Filter(filter string) *InstancesListReferrersCall { - c.urlParams_.Set("filter", filter) - return c -} +// method id "compute.networkEndpointGroups.attachNetworkEndpoints": -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InstancesListReferrersCall) MaxResults(maxResults int64) *InstancesListReferrersCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c +type NetworkEndpointGroupsAttachNetworkEndpointsCall struct { + s *Service + project string + zone string + networkEndpointGroup string + networkendpointgroupsattachendpointsrequest *NetworkEndpointGroupsAttachEndpointsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *InstancesListReferrersCall) OrderBy(orderBy string) *InstancesListReferrersCall { - c.urlParams_.Set("orderBy", orderBy) + +// AttachNetworkEndpoints: Attach a list of network endpoints to the +// specified network endpoint group. +func (r *NetworkEndpointGroupsService) AttachNetworkEndpoints(project string, zone string, networkEndpointGroup string, networkendpointgroupsattachendpointsrequest *NetworkEndpointGroupsAttachEndpointsRequest) *NetworkEndpointGroupsAttachNetworkEndpointsCall { + c := &NetworkEndpointGroupsAttachNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.networkEndpointGroup = networkEndpointGroup + c.networkendpointgroupsattachendpointsrequest = networkendpointgroupsattachendpointsrequest return c } -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InstancesListReferrersCall) PageToken(pageToken string) *InstancesListReferrersCall { - c.urlParams_.Set("pageToken", pageToken) +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) RequestId(requestId string) *NetworkEndpointGroupsAttachNetworkEndpointsCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesListReferrersCall) Fields(s ...googleapi.Field) *InstancesListReferrersCall { +func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsAttachNetworkEndpointsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstancesListReferrersCall) IfNoneMatch(entityTag string) *InstancesListReferrersCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesListReferrersCall) Context(ctx context.Context) *InstancesListReferrersCall { +func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Context(ctx context.Context) *NetworkEndpointGroupsAttachNetworkEndpointsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesListReferrersCall) Header() http.Header { +func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesListReferrersCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroupsattachendpointsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/referrers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "zone": c.zone, + "networkEndpointGroup": c.networkEndpointGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.listReferrers" call. -// Exactly one of *InstanceListReferrers or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InstanceListReferrers.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*InstanceListReferrers, error) { +// Do executes the "compute.networkEndpointGroups.attachNetworkEndpoints" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -66546,7 +82690,7 @@ func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*Instance if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceListReferrers{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -66558,45 +82702,21 @@ func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*Instance } return ret, nil // { - // "description": "Retrieves the list of referrers to instances contained within the specified zone. For more information, read Viewing Referrers to VM Instances.", - // "httpMethod": "GET", - // "id": "compute.instances.listReferrers", + // "description": "Attach a list of network endpoints to the specified network endpoint group.", + // "httpMethod": "POST", + // "id": "compute.networkEndpointGroups.attachNetworkEndpoints", // "parameterOrder": [ // "project", // "zone", - // "instance" + // "networkEndpointGroup" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "instance": { - // "description": "Name of the target instance scoping this request, or '-' if the request should span over all instances in the container.", + // "networkEndpointGroup": { + // "description": "The name of the network endpoint group where you are attaching network endpoints to. It should comply with RFC1035.", // "location": "path", - // "pattern": "-|[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -66604,68 +82724,54 @@ func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*Instance // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { - // "description": "The name of the zone for this request.", + // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/referrers", + // "path": "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints", + // "request": { + // "$ref": "NetworkEndpointGroupsAttachEndpointsRequest" + // }, // "response": { - // "$ref": "InstanceListReferrers" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstancesListReferrersCall) Pages(ctx context.Context, f func(*InstanceListReferrers) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instances.reset": +// method id "compute.networkEndpointGroups.delete": -type InstancesResetCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworkEndpointGroupsDeleteCall struct { + s *Service + project string + zone string + networkEndpointGroup string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Reset: Performs a reset on the instance. For more information, see -// Resetting an instance. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/reset -func (r *InstancesService) Reset(project string, zone string, instance string) *InstancesResetCall { - c := &InstancesResetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified network endpoint group. The network +// endpoints in the NEG and the VM instances they belong to are not +// terminated when the NEG is deleted. Note that the NEG cannot be +// deleted if there are backend services referencing it. +func (r *NetworkEndpointGroupsService) Delete(project string, zone string, networkEndpointGroup string) *NetworkEndpointGroupsDeleteCall { + c := &NetworkEndpointGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instance = instance + c.networkEndpointGroup = networkEndpointGroup return c } @@ -66683,7 +82789,7 @@ func (r *InstancesService) Reset(project string, zone string, instance string) * // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesResetCall) RequestId(requestId string) *InstancesResetCall { +func (c *NetworkEndpointGroupsDeleteCall) RequestId(requestId string) *NetworkEndpointGroupsDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -66691,7 +82797,7 @@ func (c *InstancesResetCall) RequestId(requestId string) *InstancesResetCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesResetCall) Fields(s ...googleapi.Field) *InstancesResetCall { +func (c *NetworkEndpointGroupsDeleteCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -66699,21 +82805,21 @@ func (c *InstancesResetCall) Fields(s ...googleapi.Field) *InstancesResetCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesResetCall) Context(ctx context.Context) *InstancesResetCall { +func (c *NetworkEndpointGroupsDeleteCall) Context(ctx context.Context) *NetworkEndpointGroupsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesResetCall) Header() http.Header { +func (c *NetworkEndpointGroupsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesResetCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkEndpointGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -66722,29 +82828,29 @@ func (c *InstancesResetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/reset") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "zone": c.zone, + "networkEndpointGroup": c.networkEndpointGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.reset" call. +// Do executes the "compute.networkEndpointGroups.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesResetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NetworkEndpointGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -66775,19 +82881,18 @@ func (c *InstancesResetCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Performs a reset on the instance. For more information, see Resetting an instance.", - // "httpMethod": "POST", - // "id": "compute.instances.reset", + // "description": "Deletes the specified network endpoint group. The network endpoints in the NEG and the VM instances they belong to are not terminated when the NEG is deleted. Note that the NEG cannot be deleted if there are backend services referencing it.", + // "httpMethod": "DELETE", + // "id": "compute.networkEndpointGroups.delete", // "parameterOrder": [ // "project", // "zone", - // "instance" + // "networkEndpointGroup" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", + // "networkEndpointGroup": { + // "description": "The name of the network endpoint group to delete. It should comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -66804,14 +82909,13 @@ func (c *InstancesResetCall) Do(opts ...googleapi.CallOption) (*Operation, error // "type": "string" // }, // "zone": { - // "description": "The name of the zone for this request.", + // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/reset", + // "path": "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", // "response": { // "$ref": "Operation" // }, @@ -66823,27 +82927,27 @@ func (c *InstancesResetCall) Do(opts ...googleapi.CallOption) (*Operation, error } -// method id "compute.instances.resume": +// method id "compute.networkEndpointGroups.detachNetworkEndpoints": -type InstancesResumeCall struct { - s *Service - project string - zone string - instance string - instancesresumerequest *InstancesResumeRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworkEndpointGroupsDetachNetworkEndpointsCall struct { + s *Service + project string + zone string + networkEndpointGroup string + networkendpointgroupsdetachendpointsrequest *NetworkEndpointGroupsDetachEndpointsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Resume: Resumes an instance that was suspended using the -// instances().suspend method. -func (r *InstancesService) Resume(project string, zone string, instance string, instancesresumerequest *InstancesResumeRequest) *InstancesResumeCall { - c := &InstancesResumeCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// DetachNetworkEndpoints: Detach a list of network endpoints from the +// specified network endpoint group. +func (r *NetworkEndpointGroupsService) DetachNetworkEndpoints(project string, zone string, networkEndpointGroup string, networkendpointgroupsdetachendpointsrequest *NetworkEndpointGroupsDetachEndpointsRequest) *NetworkEndpointGroupsDetachNetworkEndpointsCall { + c := &NetworkEndpointGroupsDetachNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instance = instance - c.instancesresumerequest = instancesresumerequest + c.networkEndpointGroup = networkEndpointGroup + c.networkendpointgroupsdetachendpointsrequest = networkendpointgroupsdetachendpointsrequest return c } @@ -66861,7 +82965,7 @@ func (r *InstancesService) Resume(project string, zone string, instance string, // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesResumeCall) RequestId(requestId string) *InstancesResumeCall { +func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) RequestId(requestId string) *NetworkEndpointGroupsDetachNetworkEndpointsCall { c.urlParams_.Set("requestId", requestId) return c } @@ -66869,7 +82973,7 @@ func (c *InstancesResumeCall) RequestId(requestId string) *InstancesResumeCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesResumeCall) Fields(s ...googleapi.Field) *InstancesResumeCall { +func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsDetachNetworkEndpointsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -66877,35 +82981,35 @@ func (c *InstancesResumeCall) Fields(s ...googleapi.Field) *InstancesResumeCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesResumeCall) Context(ctx context.Context) *InstancesResumeCall { +func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Context(ctx context.Context) *NetworkEndpointGroupsDetachNetworkEndpointsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesResumeCall) Header() http.Header { +func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesResumeCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesresumerequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroupsdetachendpointsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/resume") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -66913,21 +83017,21 @@ func (c *InstancesResumeCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "zone": c.zone, + "networkEndpointGroup": c.networkEndpointGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.resume" call. +// Do executes the "compute.networkEndpointGroups.detachNetworkEndpoints" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesResumeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -66958,19 +83062,18 @@ func (c *InstancesResumeCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Resumes an instance that was suspended using the instances().suspend method.", + // "description": "Detach a list of network endpoints from the specified network endpoint group.", // "httpMethod": "POST", - // "id": "compute.instances.resume", + // "id": "compute.networkEndpointGroups.detachNetworkEndpoints", // "parameterOrder": [ // "project", // "zone", - // "instance" + // "networkEndpointGroup" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance resource to resume.", + // "networkEndpointGroup": { + // "description": "The name of the network endpoint group where you are removing network endpoints. It should comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -66987,16 +83090,15 @@ func (c *InstancesResumeCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "type": "string" // }, // "zone": { - // "description": "The name of the zone for this request.", + // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/resume", + // "path": "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints", // "request": { - // "$ref": "InstancesResumeRequest" + // "$ref": "NetworkEndpointGroupsDetachEndpointsRequest" // }, // "response": { // "$ref": "Operation" @@ -67009,31 +83111,190 @@ func (c *InstancesResumeCall) Do(opts ...googleapi.CallOption) (*Operation, erro } -// method id "compute.instances.setDeletionProtection": +// method id "compute.networkEndpointGroups.get": -type InstancesSetDeletionProtectionCall struct { - s *Service - project string - zone string - resource string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworkEndpointGroupsGetCall struct { + s *Service + project string + zone string + networkEndpointGroup string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetDeletionProtection: Sets deletion protection on the instance. -func (r *InstancesService) SetDeletionProtection(project string, zone string, resource string) *InstancesSetDeletionProtectionCall { - c := &InstancesSetDeletionProtectionCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified network endpoint group. Gets a list of +// available network endpoint groups by making a list() request. +func (r *NetworkEndpointGroupsService) Get(project string, zone string, networkEndpointGroup string) *NetworkEndpointGroupsGetCall { + c := &NetworkEndpointGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.resource = resource + c.networkEndpointGroup = networkEndpointGroup return c } -// DeletionProtection sets the optional parameter "deletionProtection": -// Whether the resource should be protected against deletion. -func (c *InstancesSetDeletionProtectionCall) DeletionProtection(deletionProtection bool) *InstancesSetDeletionProtectionCall { - c.urlParams_.Set("deletionProtection", fmt.Sprint(deletionProtection)) +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NetworkEndpointGroupsGetCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NetworkEndpointGroupsGetCall) IfNoneMatch(entityTag string) *NetworkEndpointGroupsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NetworkEndpointGroupsGetCall) Context(ctx context.Context) *NetworkEndpointGroupsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NetworkEndpointGroupsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NetworkEndpointGroupsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "networkEndpointGroup": c.networkEndpointGroup, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.networkEndpointGroups.get" call. +// Exactly one of *NetworkEndpointGroup or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *NetworkEndpointGroup.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroup, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &NetworkEndpointGroup{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified network endpoint group. Gets a list of available network endpoint groups by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.networkEndpointGroups.get", + // "parameterOrder": [ + // "project", + // "zone", + // "networkEndpointGroup" + // ], + // "parameters": { + // "networkEndpointGroup": { + // "description": "The name of the network endpoint group. It should comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", + // "response": { + // "$ref": "NetworkEndpointGroup" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.networkEndpointGroups.insert": + +type NetworkEndpointGroupsInsertCall struct { + s *Service + project string + zone string + networkendpointgroup *NetworkEndpointGroup + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a network endpoint group in the specified project +// using the parameters that are included in the request. +func (r *NetworkEndpointGroupsService) Insert(project string, zone string, networkendpointgroup *NetworkEndpointGroup) *NetworkEndpointGroupsInsertCall { + c := &NetworkEndpointGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.networkendpointgroup = networkendpointgroup return c } @@ -67051,7 +83312,7 @@ func (c *InstancesSetDeletionProtectionCall) DeletionProtection(deletionProtecti // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetDeletionProtectionCall) RequestId(requestId string) *InstancesSetDeletionProtectionCall { +func (c *NetworkEndpointGroupsInsertCall) RequestId(requestId string) *NetworkEndpointGroupsInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -67059,7 +83320,7 @@ func (c *InstancesSetDeletionProtectionCall) RequestId(requestId string) *Instan // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetDeletionProtectionCall) Fields(s ...googleapi.Field) *InstancesSetDeletionProtectionCall { +func (c *NetworkEndpointGroupsInsertCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -67067,30 +83328,35 @@ func (c *InstancesSetDeletionProtectionCall) Fields(s ...googleapi.Field) *Insta // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetDeletionProtectionCall) Context(ctx context.Context) *InstancesSetDeletionProtectionCall { +func (c *NetworkEndpointGroupsInsertCall) Context(ctx context.Context) *NetworkEndpointGroupsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetDeletionProtectionCall) Header() http.Header { +func (c *NetworkEndpointGroupsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetDeletionProtectionCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkEndpointGroupsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroup) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{resource}/setDeletionProtection") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -67098,21 +83364,20 @@ func (c *InstancesSetDeletionProtectionCall) doRequest(alt string) (*http.Respon } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, + "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setDeletionProtection" call. +// Do executes the "compute.networkEndpointGroups.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSetDeletionProtectionCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NetworkEndpointGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -67143,21 +83408,14 @@ func (c *InstancesSetDeletionProtectionCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Sets deletion protection on the instance.", + // "description": "Creates a network endpoint group in the specified project using the parameters that are included in the request.", // "httpMethod": "POST", - // "id": "compute.instances.setDeletionProtection", + // "id": "compute.networkEndpointGroups.insert", // "parameterOrder": [ // "project", - // "zone", - // "resource" + // "zone" // ], // "parameters": { - // "deletionProtection": { - // "default": "true", - // "description": "Whether the resource should be protected against deletion.", - // "location": "query", - // "type": "boolean" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -67170,22 +83428,17 @@ func (c *InstancesSetDeletionProtectionCall) Do(opts ...googleapi.CallOption) (* // "location": "query", // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "zone": { - // "description": "The name of the zone for this request.", + // "description": "The name of the zone where you want to create the network endpoint group. It should comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{resource}/setDeletionProtection", + // "path": "{project}/zones/{zone}/networkEndpointGroups", + // "request": { + // "$ref": "NetworkEndpointGroup" + // }, // "response": { // "$ref": "Operation" // }, @@ -67197,107 +83450,159 @@ func (c *InstancesSetDeletionProtectionCall) Do(opts ...googleapi.CallOption) (* } -// method id "compute.instances.setDiskAutoDelete": +// method id "compute.networkEndpointGroups.list": -type InstancesSetDiskAutoDeleteCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworkEndpointGroupsListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetDiskAutoDelete: Sets the auto-delete flag for a disk attached to -// an instance. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setDiskAutoDelete -func (r *InstancesService) SetDiskAutoDelete(project string, zone string, instance string, autoDelete bool, deviceName string) *InstancesSetDiskAutoDeleteCall { - c := &InstancesSetDiskAutoDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of network endpoint groups that are located +// in the specified project and zone. +func (r *NetworkEndpointGroupsService) List(project string, zone string) *NetworkEndpointGroupsListCall { + c := &NetworkEndpointGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instance = instance - c.urlParams_.Set("autoDelete", fmt.Sprint(autoDelete)) - c.urlParams_.Set("deviceName", deviceName) return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetDiskAutoDeleteCall) RequestId(requestId string) *InstancesSetDiskAutoDeleteCall { - c.urlParams_.Set("requestId", requestId) +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *NetworkEndpointGroupsListCall) Filter(filter string) *NetworkEndpointGroupsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *NetworkEndpointGroupsListCall) MaxResults(maxResults int64) *NetworkEndpointGroupsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *NetworkEndpointGroupsListCall) OrderBy(orderBy string) *NetworkEndpointGroupsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *NetworkEndpointGroupsListCall) PageToken(pageToken string) *NetworkEndpointGroupsListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetDiskAutoDeleteCall) Fields(s ...googleapi.Field) *InstancesSetDiskAutoDeleteCall { +func (c *NetworkEndpointGroupsListCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NetworkEndpointGroupsListCall) IfNoneMatch(entityTag string) *NetworkEndpointGroupsListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetDiskAutoDeleteCall) Context(ctx context.Context) *InstancesSetDiskAutoDeleteCall { +func (c *NetworkEndpointGroupsListCall) Context(ctx context.Context) *NetworkEndpointGroupsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetDiskAutoDeleteCall) Header() http.Header { +func (c *NetworkEndpointGroupsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetDiskAutoDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkEndpointGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setDiskAutoDelete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesSetDiskAutoDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.networkEndpointGroups.list" call. +// Exactly one of *NetworkEndpointGroupList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *NetworkEndpointGroupList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroupList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -67316,7 +83621,7 @@ func (c *InstancesSetDiskAutoDeleteCall) Do(opts ...googleapi.CallOption) (*Oper if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &NetworkEndpointGroupList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -67328,35 +83633,35 @@ func (c *InstancesSetDiskAutoDeleteCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Sets the auto-delete flag for a disk attached to an instance.", - // "httpMethod": "POST", - // "id": "compute.instances.setDiskAutoDelete", + // "description": "Retrieves the list of network endpoint groups that are located in the specified project and zone.", + // "httpMethod": "GET", + // "id": "compute.networkEndpointGroups.list", // "parameterOrder": [ // "project", - // "zone", - // "instance", - // "autoDelete", - // "deviceName" + // "zone" // ], // "parameters": { - // "autoDelete": { - // "description": "Whether to auto-delete the disk when the instance is deleted.", + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", - // "required": true, - // "type": "boolean" + // "type": "string" // }, - // "deviceName": { - // "description": "The device name of the disk to modify. Make a get() request on the instance to view currently attached disks and device names.", + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", // "location": "query", - // "pattern": "\\w[\\w.-]{0,254}", - // "required": true, // "type": "string" // }, - // "instance": { - // "description": "The instance name for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -67366,59 +83671,138 @@ func (c *InstancesSetDiskAutoDeleteCall) Do(opts ...googleapi.CallOption) (*Oper // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, // "zone": { - // "description": "The name of the zone for this request.", + // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete", + // "path": "{project}/zones/{zone}/networkEndpointGroups", // "response": { - // "$ref": "Operation" + // "$ref": "NetworkEndpointGroupList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.setIamPolicy": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *NetworkEndpointGroupsListCall) Pages(ctx context.Context, f func(*NetworkEndpointGroupList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InstancesSetIamPolicyCall struct { - s *Service - project string - zone string - resource string - zonesetpolicyrequest *ZoneSetPolicyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.networkEndpointGroups.listNetworkEndpoints": + +type NetworkEndpointGroupsListNetworkEndpointsCall struct { + s *Service + project string + zone string + networkEndpointGroup string + networkendpointgroupslistendpointsrequest *NetworkEndpointGroupsListEndpointsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. -func (r *InstancesService) SetIamPolicy(project string, zone string, resource string, zonesetpolicyrequest *ZoneSetPolicyRequest) *InstancesSetIamPolicyCall { - c := &InstancesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ListNetworkEndpoints: Lists the network endpoints in the specified +// network endpoint group. +func (r *NetworkEndpointGroupsService) ListNetworkEndpoints(project string, zone string, networkEndpointGroup string, networkendpointgroupslistendpointsrequest *NetworkEndpointGroupsListEndpointsRequest) *NetworkEndpointGroupsListNetworkEndpointsCall { + c := &NetworkEndpointGroupsListNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.resource = resource - c.zonesetpolicyrequest = zonesetpolicyrequest + c.networkEndpointGroup = networkEndpointGroup + c.networkendpointgroupslistendpointsrequest = networkendpointgroupslistendpointsrequest + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Filter(filter string) *NetworkEndpointGroupsListNetworkEndpointsCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) MaxResults(maxResults int64) *NetworkEndpointGroupsListNetworkEndpointsCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) OrderBy(orderBy string) *NetworkEndpointGroupsListNetworkEndpointsCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) PageToken(pageToken string) *NetworkEndpointGroupsListNetworkEndpointsCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetIamPolicyCall) Fields(s ...googleapi.Field) *InstancesSetIamPolicyCall { +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsListNetworkEndpointsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -67426,35 +83810,35 @@ func (c *InstancesSetIamPolicyCall) Fields(s ...googleapi.Field) *InstancesSetIa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetIamPolicyCall) Context(ctx context.Context) *InstancesSetIamPolicyCall { +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Context(ctx context.Context) *NetworkEndpointGroupsListNetworkEndpointsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetIamPolicyCall) Header() http.Header { +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.zonesetpolicyrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroupslistendpointsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{resource}/setIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -67462,21 +83846,23 @@ func (c *InstancesSetIamPolicyCall) doRequest(alt string) (*http.Response, error } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, + "project": c.project, + "zone": c.zone, + "networkEndpointGroup": c.networkEndpointGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *InstancesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.networkEndpointGroups.listNetworkEndpoints" call. +// Exactly one of *NetworkEndpointGroupsListNetworkEndpoints or error +// will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *NetworkEndpointGroupsListNetworkEndpoints.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroupsListNetworkEndpoints, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -67495,7 +83881,7 @@ func (c *InstancesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &NetworkEndpointGroupsListNetworkEndpoints{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -67507,99 +83893,123 @@ func (c *InstancesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "description": "Lists the network endpoints in the specified network endpoint group.", // "httpMethod": "POST", - // "id": "compute.instances.setIamPolicy", + // "id": "compute.networkEndpointGroups.listNetworkEndpoints", // "parameterOrder": [ // "project", // "zone", - // "resource" + // "networkEndpointGroup" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "networkEndpointGroup": { + // "description": "The name of the network endpoint group from which you want to generate a list of included network endpoints. It should comply with RFC1035.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, // "zone": { - // "description": "The name of the zone for this request.", + // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{resource}/setIamPolicy", + // "path": "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints", // "request": { - // "$ref": "ZoneSetPolicyRequest" + // "$ref": "NetworkEndpointGroupsListEndpointsRequest" // }, // "response": { - // "$ref": "Policy" + // "$ref": "NetworkEndpointGroupsListNetworkEndpoints" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.setLabels": - -type InstancesSetLabelsCall struct { - s *Service - project string - zone string - instance string - instancessetlabelsrequest *InstancesSetLabelsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Pages(ctx context.Context, f func(*NetworkEndpointGroupsListNetworkEndpoints) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } } -// SetLabels: Sets labels on an instance. To learn more about labels, -// read the Labeling Resources documentation. -func (r *InstancesService) SetLabels(project string, zone string, instance string, instancessetlabelsrequest *InstancesSetLabelsRequest) *InstancesSetLabelsCall { - c := &InstancesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instance = instance - c.instancessetlabelsrequest = instancessetlabelsrequest - return c -} +// method id "compute.networkEndpointGroups.testIamPermissions": -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetLabelsCall) RequestId(requestId string) *InstancesSetLabelsCall { - c.urlParams_.Set("requestId", requestId) +type NetworkEndpointGroupsTestIamPermissionsCall struct { + s *Service + project string + zone string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *NetworkEndpointGroupsService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *NetworkEndpointGroupsTestIamPermissionsCall { + c := &NetworkEndpointGroupsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetLabelsCall) Fields(s ...googleapi.Field) *InstancesSetLabelsCall { +func (c *NetworkEndpointGroupsTestIamPermissionsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -67607,35 +84017,35 @@ func (c *InstancesSetLabelsCall) Fields(s ...googleapi.Field) *InstancesSetLabel // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetLabelsCall) Context(ctx context.Context) *InstancesSetLabelsCall { +func (c *NetworkEndpointGroupsTestIamPermissionsCall) Context(ctx context.Context) *NetworkEndpointGroupsTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetLabelsCall) Header() http.Header { +func (c *NetworkEndpointGroupsTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetLabelsCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkEndpointGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetlabelsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setLabels") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -67645,19 +84055,19 @@ func (c *InstancesSetLabelsCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, - "instance": c.instance, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setLabels" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.networkEndpointGroups.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NetworkEndpointGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -67676,7 +84086,7 @@ func (c *InstancesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, e if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -67688,22 +84098,15 @@ func (c *InstancesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Sets labels on an instance. To learn more about labels, read the Labeling Resources documentation.", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.instances.setLabels", + // "id": "compute.networkEndpointGroups.testIamPermissions", // "parameterOrder": [ // "project", // "zone", - // "instance" + // "resource" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -67711,9 +84114,11 @@ func (c *InstancesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, e // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "zone": { @@ -67724,42 +84129,40 @@ func (c *InstancesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, e // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setLabels", + // "path": "{project}/zones/{zone}/networkEndpointGroups/{resource}/testIamPermissions", // "request": { - // "$ref": "InstancesSetLabelsRequest" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.setMachineResources": +// method id "compute.networks.addPeering": -type InstancesSetMachineResourcesCall struct { - s *Service - project string - zone string - instance string - instancessetmachineresourcesrequest *InstancesSetMachineResourcesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworksAddPeeringCall struct { + s *Service + project string + network string + networksaddpeeringrequest *NetworksAddPeeringRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetMachineResources: Changes the number and/or type of accelerator -// for a stopped instance to the values specified in the request. -func (r *InstancesService) SetMachineResources(project string, zone string, instance string, instancessetmachineresourcesrequest *InstancesSetMachineResourcesRequest) *InstancesSetMachineResourcesCall { - c := &InstancesSetMachineResourcesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AddPeering: Adds a peering to the specified network. +func (r *NetworksService) AddPeering(project string, network string, networksaddpeeringrequest *NetworksAddPeeringRequest) *NetworksAddPeeringCall { + c := &NetworksAddPeeringCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.instancessetmachineresourcesrequest = instancessetmachineresourcesrequest + c.network = network + c.networksaddpeeringrequest = networksaddpeeringrequest return c } @@ -67777,7 +84180,7 @@ func (r *InstancesService) SetMachineResources(project string, zone string, inst // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetMachineResourcesCall) RequestId(requestId string) *InstancesSetMachineResourcesCall { +func (c *NetworksAddPeeringCall) RequestId(requestId string) *NetworksAddPeeringCall { c.urlParams_.Set("requestId", requestId) return c } @@ -67785,7 +84188,7 @@ func (c *InstancesSetMachineResourcesCall) RequestId(requestId string) *Instance // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetMachineResourcesCall) Fields(s ...googleapi.Field) *InstancesSetMachineResourcesCall { +func (c *NetworksAddPeeringCall) Fields(s ...googleapi.Field) *NetworksAddPeeringCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -67793,35 +84196,35 @@ func (c *InstancesSetMachineResourcesCall) Fields(s ...googleapi.Field) *Instanc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetMachineResourcesCall) Context(ctx context.Context) *InstancesSetMachineResourcesCall { +func (c *NetworksAddPeeringCall) Context(ctx context.Context) *NetworksAddPeeringCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetMachineResourcesCall) Header() http.Header { +func (c *NetworksAddPeeringCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetMachineResourcesCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksAddPeeringCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetmachineresourcesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networksaddpeeringrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMachineResources") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/addPeering") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -67829,21 +84232,20 @@ func (c *InstancesSetMachineResourcesCall) doRequest(alt string) (*http.Response } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setMachineResources" call. +// Do executes the "compute.networks.addPeering" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSetMachineResourcesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NetworksAddPeeringCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -67874,17 +84276,16 @@ func (c *InstancesSetMachineResourcesCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Changes the number and/or type of accelerator for a stopped instance to the values specified in the request.", + // "description": "Adds a peering to the specified network.", // "httpMethod": "POST", - // "id": "compute.instances.setMachineResources", + // "id": "compute.networks.addPeering", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "network" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", + // "network": { + // "description": "Name of the network resource to add peering to.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -67901,18 +84302,11 @@ func (c *InstancesSetMachineResourcesCall) Do(opts ...googleapi.CallOption) (*Op // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setMachineResources", + // "path": "{project}/global/networks/{network}/addPeering", // "request": { - // "$ref": "InstancesSetMachineResourcesRequest" + // "$ref": "NetworksAddPeeringRequest" // }, // "response": { // "$ref": "Operation" @@ -67925,27 +84319,23 @@ func (c *InstancesSetMachineResourcesCall) Do(opts ...googleapi.CallOption) (*Op } -// method id "compute.instances.setMachineType": +// method id "compute.networks.delete": -type InstancesSetMachineTypeCall struct { - s *Service - project string - zone string - instance string - instancessetmachinetyperequest *InstancesSetMachineTypeRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworksDeleteCall struct { + s *Service + project string + network string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetMachineType: Changes the machine type for a stopped instance to -// the machine type specified in the request. -func (r *InstancesService) SetMachineType(project string, zone string, instance string, instancessetmachinetyperequest *InstancesSetMachineTypeRequest) *InstancesSetMachineTypeCall { - c := &InstancesSetMachineTypeCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified network. +// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/delete +func (r *NetworksService) Delete(project string, network string) *NetworksDeleteCall { + c := &NetworksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.instancessetmachinetyperequest = instancessetmachinetyperequest + c.network = network return c } @@ -67963,7 +84353,7 @@ func (r *InstancesService) SetMachineType(project string, zone string, instance // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetMachineTypeCall) RequestId(requestId string) *InstancesSetMachineTypeCall { +func (c *NetworksDeleteCall) RequestId(requestId string) *NetworksDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -67971,7 +84361,7 @@ func (c *InstancesSetMachineTypeCall) RequestId(requestId string) *InstancesSetM // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetMachineTypeCall) Fields(s ...googleapi.Field) *InstancesSetMachineTypeCall { +func (c *NetworksDeleteCall) Fields(s ...googleapi.Field) *NetworksDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -67979,57 +84369,51 @@ func (c *InstancesSetMachineTypeCall) Fields(s ...googleapi.Field) *InstancesSet // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetMachineTypeCall) Context(ctx context.Context) *InstancesSetMachineTypeCall { +func (c *NetworksDeleteCall) Context(ctx context.Context) *NetworksDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetMachineTypeCall) Header() http.Header { +func (c *NetworksDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetMachineTypeCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetmachinetyperequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMachineType") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setMachineType" call. +// Do executes the "compute.networks.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSetMachineTypeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -68060,17 +84444,16 @@ func (c *InstancesSetMachineTypeCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Changes the machine type for a stopped instance to the machine type specified in the request.", - // "httpMethod": "POST", - // "id": "compute.instances.setMachineType", + // "description": "Deletes the specified network.", + // "httpMethod": "DELETE", + // "id": "compute.networks.delete", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "network" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", + // "network": { + // "description": "Name of the network to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -68087,19 +84470,9 @@ func (c *InstancesSetMachineTypeCall) Do(opts ...googleapi.CallOption) (*Operati // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setMachineType", - // "request": { - // "$ref": "InstancesSetMachineTypeRequest" - // }, + // "path": "{project}/global/networks/{network}", // "response": { // "$ref": "Operation" // }, @@ -68111,112 +84484,97 @@ func (c *InstancesSetMachineTypeCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.instances.setMetadata": +// method id "compute.networks.get": -type InstancesSetMetadataCall struct { - s *Service - project string - zone string - instance string - metadata *Metadata - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworksGetCall struct { + s *Service + project string + network string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetMetadata: Sets metadata for the specified instance to the data -// included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setMetadata -func (r *InstancesService) SetMetadata(project string, zone string, instance string, metadata *Metadata) *InstancesSetMetadataCall { - c := &InstancesSetMetadataCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified network. Gets a list of available networks +// by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/get +func (r *NetworksService) Get(project string, network string) *NetworksGetCall { + c := &NetworksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.metadata = metadata - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetMetadataCall) RequestId(requestId string) *InstancesSetMetadataCall { - c.urlParams_.Set("requestId", requestId) + c.network = network return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetMetadataCall) Fields(s ...googleapi.Field) *InstancesSetMetadataCall { +func (c *NetworksGetCall) Fields(s ...googleapi.Field) *NetworksGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NetworksGetCall) IfNoneMatch(entityTag string) *NetworksGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetMetadataCall) Context(ctx context.Context) *InstancesSetMetadataCall { +func (c *NetworksGetCall) Context(ctx context.Context) *NetworksGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetMetadataCall) Header() http.Header { +func (c *NetworksGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetMetadataCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.metadata) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMetadata") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setMetadata" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesSetMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.networks.get" call. +// Exactly one of *Network or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Network.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *NetworksGetCall) Do(opts ...googleapi.CallOption) (*Network, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -68235,7 +84593,7 @@ func (c *InstancesSetMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Network{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -68247,17 +84605,16 @@ func (c *InstancesSetMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Sets metadata for the specified instance to the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.instances.setMetadata", + // "description": "Returns the specified network. Gets a list of available networks by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.networks.get", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "network" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", + // "network": { + // "description": "Name of the network to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -68269,58 +84626,39 @@ func (c *InstancesSetMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setMetadata", - // "request": { - // "$ref": "Metadata" - // }, + // "path": "{project}/global/networks/{network}", // "response": { - // "$ref": "Operation" + // "$ref": "Network" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.setMinCpuPlatform": +// method id "compute.networks.insert": -type InstancesSetMinCpuPlatformCall struct { - s *Service - project string - zone string - instance string - instancessetmincpuplatformrequest *InstancesSetMinCpuPlatformRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworksInsertCall struct { + s *Service + project string + network *Network + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetMinCpuPlatform: Changes the minimum CPU platform that this -// instance should use. This method can only be called on a stopped -// instance. For more information, read Specifying a Minimum CPU -// Platform. -func (r *InstancesService) SetMinCpuPlatform(project string, zone string, instance string, instancessetmincpuplatformrequest *InstancesSetMinCpuPlatformRequest) *InstancesSetMinCpuPlatformCall { - c := &InstancesSetMinCpuPlatformCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a network in the specified project using the data +// included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/insert +func (r *NetworksService) Insert(project string, network *Network) *NetworksInsertCall { + c := &NetworksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.instancessetmincpuplatformrequest = instancessetmincpuplatformrequest + c.network = network return c } @@ -68338,7 +84676,7 @@ func (r *InstancesService) SetMinCpuPlatform(project string, zone string, instan // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetMinCpuPlatformCall) RequestId(requestId string) *InstancesSetMinCpuPlatformCall { +func (c *NetworksInsertCall) RequestId(requestId string) *NetworksInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -68346,7 +84684,7 @@ func (c *InstancesSetMinCpuPlatformCall) RequestId(requestId string) *InstancesS // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetMinCpuPlatformCall) Fields(s ...googleapi.Field) *InstancesSetMinCpuPlatformCall { +func (c *NetworksInsertCall) Fields(s ...googleapi.Field) *NetworksInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -68354,35 +84692,35 @@ func (c *InstancesSetMinCpuPlatformCall) Fields(s ...googleapi.Field) *Instances // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetMinCpuPlatformCall) Context(ctx context.Context) *InstancesSetMinCpuPlatformCall { +func (c *NetworksInsertCall) Context(ctx context.Context) *NetworksInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetMinCpuPlatformCall) Header() http.Header { +func (c *NetworksInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetMinCpuPlatformCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetmincpuplatformrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.network) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMinCpuPlatform") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -68390,21 +84728,19 @@ func (c *InstancesSetMinCpuPlatformCall) doRequest(alt string) (*http.Response, } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setMinCpuPlatform" call. +// Do executes the "compute.networks.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSetMinCpuPlatformCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -68435,22 +84771,13 @@ func (c *InstancesSetMinCpuPlatformCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Changes the minimum CPU platform that this instance should use. This method can only be called on a stopped instance. For more information, read Specifying a Minimum CPU Platform.", + // "description": "Creates a network in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.instances.setMinCpuPlatform", + // "id": "compute.networks.insert", // "parameterOrder": [ - // "project", - // "zone", - // "instance" + // "project" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -68462,18 +84789,11 @@ func (c *InstancesSetMinCpuPlatformCall) Do(opts ...googleapi.CallOption) (*Oper // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setMinCpuPlatform", + // "path": "{project}/global/networks", // "request": { - // "$ref": "InstancesSetMinCpuPlatformRequest" + // "$ref": "Network" // }, // "response": { // "$ref": "Operation" @@ -68486,111 +84806,157 @@ func (c *InstancesSetMinCpuPlatformCall) Do(opts ...googleapi.CallOption) (*Oper } -// method id "compute.instances.setScheduling": +// method id "compute.networks.list": -type InstancesSetSchedulingCall struct { - s *Service - project string - zone string - instance string - scheduling *Scheduling - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworksListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves the list of networks available to the specified +// project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/list +func (r *NetworksService) List(project string) *NetworksListCall { + c := &NetworksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *NetworksListCall) Filter(filter string) *NetworksListCall { + c.urlParams_.Set("filter", filter) + return c } -// SetScheduling: Sets an instance's scheduling options. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setScheduling -func (r *InstancesService) SetScheduling(project string, zone string, instance string, scheduling *Scheduling) *InstancesSetSchedulingCall { - c := &InstancesSetSchedulingCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instance = instance - c.scheduling = scheduling +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *NetworksListCall) MaxResults(maxResults int64) *NetworksListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetSchedulingCall) RequestId(requestId string) *InstancesSetSchedulingCall { - c.urlParams_.Set("requestId", requestId) +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *NetworksListCall) OrderBy(orderBy string) *NetworksListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *NetworksListCall) PageToken(pageToken string) *NetworksListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetSchedulingCall) Fields(s ...googleapi.Field) *InstancesSetSchedulingCall { +func (c *NetworksListCall) Fields(s ...googleapi.Field) *NetworksListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NetworksListCall) IfNoneMatch(entityTag string) *NetworksListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetSchedulingCall) Context(ctx context.Context) *InstancesSetSchedulingCall { +func (c *NetworksListCall) Context(ctx context.Context) *NetworksListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetSchedulingCall) Header() http.Header { +func (c *NetworksListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetSchedulingCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.scheduling) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setScheduling") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setScheduling" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.networks.list" call. +// Exactly one of *NetworkList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// *NetworkList.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -68609,7 +84975,7 @@ func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operatio if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &NetworkList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -68621,163 +84987,257 @@ func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Sets an instance's scheduling options.", - // "httpMethod": "POST", - // "id": "compute.instances.setScheduling", + // "description": "Retrieves the list of networks available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.networks.list", // "parameterOrder": [ - // "project", - // "zone", - // "instance" + // "project" // ], // "parameters": { - // "instance": { - // "description": "Instance name for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setScheduling", - // "request": { - // "$ref": "Scheduling" - // }, + // "path": "{project}/global/networks", // "response": { - // "$ref": "Operation" + // "$ref": "NetworkList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.setServiceAccount": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *NetworksListCall) Pages(ctx context.Context, f func(*NetworkList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InstancesSetServiceAccountCall struct { - s *Service - project string - zone string - instance string - instancessetserviceaccountrequest *InstancesSetServiceAccountRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.networks.listPeeringRoutes": + +type NetworksListPeeringRoutesCall struct { + s *Service + project string + network string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetServiceAccount: Sets the service account on the instance. For more -// information, read Changing the service account and access scopes for -// an instance. -func (r *InstancesService) SetServiceAccount(project string, zone string, instance string, instancessetserviceaccountrequest *InstancesSetServiceAccountRequest) *InstancesSetServiceAccountCall { - c := &InstancesSetServiceAccountCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ListPeeringRoutes: Lists the peering routes exchanged over peering +// connection. +func (r *NetworksService) ListPeeringRoutes(project string, network string) *NetworksListPeeringRoutesCall { + c := &NetworksListPeeringRoutesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.instancessetserviceaccountrequest = instancessetserviceaccountrequest + c.network = network return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Direction sets the optional parameter "direction": The direction of +// the exchanged routes. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// Possible values: +// "INCOMING" +// "OUTGOING" +func (c *NetworksListPeeringRoutesCall) Direction(direction string) *NetworksListPeeringRoutesCall { + c.urlParams_.Set("direction", direction) + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetServiceAccountCall) RequestId(requestId string) *InstancesSetServiceAccountCall { - c.urlParams_.Set("requestId", requestId) +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *NetworksListPeeringRoutesCall) Filter(filter string) *NetworksListPeeringRoutesCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *NetworksListPeeringRoutesCall) MaxResults(maxResults int64) *NetworksListPeeringRoutesCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *NetworksListPeeringRoutesCall) OrderBy(orderBy string) *NetworksListPeeringRoutesCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *NetworksListPeeringRoutesCall) PageToken(pageToken string) *NetworksListPeeringRoutesCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// PeeringName sets the optional parameter "peeringName": The response +// will show routes exchanged over the given peering connection. +func (c *NetworksListPeeringRoutesCall) PeeringName(peeringName string) *NetworksListPeeringRoutesCall { + c.urlParams_.Set("peeringName", peeringName) + return c +} + +// Region sets the optional parameter "region": The region of the +// request. The response will include all subnet routes, static routes +// and dynamic routes in the region. +func (c *NetworksListPeeringRoutesCall) Region(region string) *NetworksListPeeringRoutesCall { + c.urlParams_.Set("region", region) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetServiceAccountCall) Fields(s ...googleapi.Field) *InstancesSetServiceAccountCall { +func (c *NetworksListPeeringRoutesCall) Fields(s ...googleapi.Field) *NetworksListPeeringRoutesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NetworksListPeeringRoutesCall) IfNoneMatch(entityTag string) *NetworksListPeeringRoutesCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetServiceAccountCall) Context(ctx context.Context) *InstancesSetServiceAccountCall { +func (c *NetworksListPeeringRoutesCall) Context(ctx context.Context) *NetworksListPeeringRoutesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetServiceAccountCall) Header() http.Header { +func (c *NetworksListPeeringRoutesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetServiceAccountCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksListPeeringRoutesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetserviceaccountrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setServiceAccount") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/listPeeringRoutes") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setServiceAccount" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.networks.listPeeringRoutes" call. +// Exactly one of *ExchangedPeeringRoutesList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *ExchangedPeeringRoutesList.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NetworksListPeeringRoutesCall) Do(opts ...googleapi.CallOption) (*ExchangedPeeringRoutesList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -68796,7 +85256,7 @@ func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Oper if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &ExchangedPeeringRoutesList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -68808,22 +85268,62 @@ func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Sets the service account on the instance. For more information, read Changing the service account and access scopes for an instance.", - // "httpMethod": "POST", - // "id": "compute.instances.setServiceAccount", + // "description": "Lists the peering routes exchanged over peering connection.", + // "httpMethod": "GET", + // "id": "compute.networks.listPeeringRoutes", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "network" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance resource to start.", + // "direction": { + // "description": "The direction of the exchanged routes.", + // "enum": [ + // "INCOMING", + // "OUTGOING" + // ], + // "enumDescriptions": [ + // "", + // "" + // ], + // "location": "query", + // "type": "string" + // }, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "network": { + // "description": "Name of the network for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "peeringName": { + // "description": "The response will show routes exchanged over the given peering connection.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -68831,57 +85331,66 @@ func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Oper // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "region": { + // "description": "The region of the request. The response will include all subnet routes, static routes and dynamic routes in the region.", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setServiceAccount", - // "request": { - // "$ref": "InstancesSetServiceAccountRequest" - // }, + // "path": "{project}/global/networks/{network}/listPeeringRoutes", // "response": { - // "$ref": "Operation" + // "$ref": "ExchangedPeeringRoutesList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.setShieldedVmIntegrityPolicy": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *NetworksListPeeringRoutesCall) Pages(ctx context.Context, f func(*ExchangedPeeringRoutesList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InstancesSetShieldedVmIntegrityPolicyCall struct { - s *Service - project string - zone string - instance string - shieldedvmintegritypolicy *ShieldedVmIntegrityPolicy - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.networks.patch": + +type NetworksPatchCall struct { + s *Service + project string + network string + network2 *Network + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetShieldedVmIntegrityPolicy: Sets the Shielded VM integrity policy -// for a VM instance. You can only use this method on a running VM -// instance. This method supports PATCH semantics and uses the JSON -// merge patch format and processing rules. -func (r *InstancesService) SetShieldedVmIntegrityPolicy(project string, zone string, instance string, shieldedvmintegritypolicy *ShieldedVmIntegrityPolicy) *InstancesSetShieldedVmIntegrityPolicyCall { - c := &InstancesSetShieldedVmIntegrityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Patches the specified network with the data included in the +// request. Only the following fields can be modified: +// routingConfig.routingMode. +func (r *NetworksService) Patch(project string, network string, network2 *Network) *NetworksPatchCall { + c := &NetworksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.shieldedvmintegritypolicy = shieldedvmintegritypolicy + c.network = network + c.network2 = network2 return c } @@ -68899,7 +85408,7 @@ func (r *InstancesService) SetShieldedVmIntegrityPolicy(project string, zone str // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetShieldedVmIntegrityPolicyCall) RequestId(requestId string) *InstancesSetShieldedVmIntegrityPolicyCall { +func (c *NetworksPatchCall) RequestId(requestId string) *NetworksPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -68907,7 +85416,7 @@ func (c *InstancesSetShieldedVmIntegrityPolicyCall) RequestId(requestId string) // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetShieldedVmIntegrityPolicyCall) Fields(s ...googleapi.Field) *InstancesSetShieldedVmIntegrityPolicyCall { +func (c *NetworksPatchCall) Fields(s ...googleapi.Field) *NetworksPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -68915,35 +85424,35 @@ func (c *InstancesSetShieldedVmIntegrityPolicyCall) Fields(s ...googleapi.Field) // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetShieldedVmIntegrityPolicyCall) Context(ctx context.Context) *InstancesSetShieldedVmIntegrityPolicyCall { +func (c *NetworksPatchCall) Context(ctx context.Context) *NetworksPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetShieldedVmIntegrityPolicyCall) Header() http.Header { +func (c *NetworksPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetShieldedVmIntegrityPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.shieldedvmintegritypolicy) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.network2) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setShieldedVmIntegrityPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("PATCH", urls, body) if err != nil { @@ -68951,21 +85460,20 @@ func (c *InstancesSetShieldedVmIntegrityPolicyCall) doRequest(alt string) (*http } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setShieldedVmIntegrityPolicy" call. +// Do executes the "compute.networks.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSetShieldedVmIntegrityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NetworksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -68996,17 +85504,16 @@ func (c *InstancesSetShieldedVmIntegrityPolicyCall) Do(opts ...googleapi.CallOpt } return ret, nil // { - // "description": "Sets the Shielded VM integrity policy for a VM instance. You can only use this method on a running VM instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "description": "Patches the specified network with the data included in the request. Only the following fields can be modified: routingConfig.routingMode.", // "httpMethod": "PATCH", - // "id": "compute.instances.setShieldedVmIntegrityPolicy", + // "id": "compute.networks.patch", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "network" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", + // "network": { + // "description": "Name of the network to update.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -69023,18 +85530,11 @@ func (c *InstancesSetShieldedVmIntegrityPolicyCall) Do(opts ...googleapi.CallOpt // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setShieldedVmIntegrityPolicy", + // "path": "{project}/global/networks/{network}", // "request": { - // "$ref": "ShieldedVmIntegrityPolicy" + // "$ref": "Network" // }, // "response": { // "$ref": "Operation" @@ -69047,28 +85547,24 @@ func (c *InstancesSetShieldedVmIntegrityPolicyCall) Do(opts ...googleapi.CallOpt } -// method id "compute.instances.setTags": +// method id "compute.networks.removePeering": -type InstancesSetTagsCall struct { - s *Service - project string - zone string - instance string - tags *Tags - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworksRemovePeeringCall struct { + s *Service + project string + network string + networksremovepeeringrequest *NetworksRemovePeeringRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetTags: Sets network tags for the specified instance to the data -// included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setTags -func (r *InstancesService) SetTags(project string, zone string, instance string, tags *Tags) *InstancesSetTagsCall { - c := &InstancesSetTagsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// RemovePeering: Removes a peering from the specified network. +func (r *NetworksService) RemovePeering(project string, network string, networksremovepeeringrequest *NetworksRemovePeeringRequest) *NetworksRemovePeeringCall { + c := &NetworksRemovePeeringCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.tags = tags + c.network = network + c.networksremovepeeringrequest = networksremovepeeringrequest return c } @@ -69086,7 +85582,7 @@ func (r *InstancesService) SetTags(project string, zone string, instance string, // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetTagsCall) RequestId(requestId string) *InstancesSetTagsCall { +func (c *NetworksRemovePeeringCall) RequestId(requestId string) *NetworksRemovePeeringCall { c.urlParams_.Set("requestId", requestId) return c } @@ -69094,7 +85590,7 @@ func (c *InstancesSetTagsCall) RequestId(requestId string) *InstancesSetTagsCall // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetTagsCall) Fields(s ...googleapi.Field) *InstancesSetTagsCall { +func (c *NetworksRemovePeeringCall) Fields(s ...googleapi.Field) *NetworksRemovePeeringCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -69102,35 +85598,35 @@ func (c *InstancesSetTagsCall) Fields(s ...googleapi.Field) *InstancesSetTagsCal // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetTagsCall) Context(ctx context.Context) *InstancesSetTagsCall { +func (c *NetworksRemovePeeringCall) Context(ctx context.Context) *NetworksRemovePeeringCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetTagsCall) Header() http.Header { +func (c *NetworksRemovePeeringCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetTagsCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksRemovePeeringCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.tags) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networksremovepeeringrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setTags") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/removePeering") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -69138,21 +85634,20 @@ func (c *InstancesSetTagsCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setTags" call. +// Do executes the "compute.networks.removePeering" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NetworksRemovePeeringCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -69183,17 +85678,16 @@ func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, err } return ret, nil // { - // "description": "Sets network tags for the specified instance to the data included in the request.", + // "description": "Removes a peering from the specified network.", // "httpMethod": "POST", - // "id": "compute.instances.setTags", + // "id": "compute.networks.removePeering", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "network" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", + // "network": { + // "description": "Name of the network resource to remove peering from.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -69210,18 +85704,11 @@ func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, err // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setTags", + // "path": "{project}/global/networks/{network}/removePeering", // "request": { - // "$ref": "Tags" + // "$ref": "NetworksRemovePeeringRequest" // }, // "response": { // "$ref": "Operation" @@ -69234,32 +85721,49 @@ func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, err } -// method id "compute.instances.simulateMaintenanceEvent": +// method id "compute.networks.switchToCustomMode": -type InstancesSimulateMaintenanceEventCall struct { +type NetworksSwitchToCustomModeCall struct { s *Service project string - zone string - instance string + network string urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// SimulateMaintenanceEvent: Simulates a maintenance event on the -// instance. -func (r *InstancesService) SimulateMaintenanceEvent(project string, zone string, instance string) *InstancesSimulateMaintenanceEventCall { - c := &InstancesSimulateMaintenanceEventCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SwitchToCustomMode: Switches the network mode from auto subnet mode +// to custom subnet mode. +func (r *NetworksService) SwitchToCustomMode(project string, network string) *NetworksSwitchToCustomModeCall { + c := &NetworksSwitchToCustomModeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance + c.network = network + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *NetworksSwitchToCustomModeCall) RequestId(requestId string) *NetworksSwitchToCustomModeCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSimulateMaintenanceEventCall) Fields(s ...googleapi.Field) *InstancesSimulateMaintenanceEventCall { +func (c *NetworksSwitchToCustomModeCall) Fields(s ...googleapi.Field) *NetworksSwitchToCustomModeCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -69267,21 +85771,21 @@ func (c *InstancesSimulateMaintenanceEventCall) Fields(s ...googleapi.Field) *In // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSimulateMaintenanceEventCall) Context(ctx context.Context) *InstancesSimulateMaintenanceEventCall { +func (c *NetworksSwitchToCustomModeCall) Context(ctx context.Context) *NetworksSwitchToCustomModeCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSimulateMaintenanceEventCall) Header() http.Header { +func (c *NetworksSwitchToCustomModeCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSimulateMaintenanceEventCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksSwitchToCustomModeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -69290,7 +85794,7 @@ func (c *InstancesSimulateMaintenanceEventCall) doRequest(alt string) (*http.Res var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/switchToCustomMode") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -69298,21 +85802,20 @@ func (c *InstancesSimulateMaintenanceEventCall) doRequest(alt string) (*http.Res } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.simulateMaintenanceEvent" call. +// Do executes the "compute.networks.switchToCustomMode" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NetworksSwitchToCustomModeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -69343,17 +85846,16 @@ func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Simulates a maintenance event on the instance.", + // "description": "Switches the network mode from auto subnet mode to custom subnet mode.", // "httpMethod": "POST", - // "id": "compute.instances.simulateMaintenanceEvent", + // "id": "compute.networks.switchToCustomMode", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "network" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", + // "network": { + // "description": "Name of the network to be updated.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -69366,15 +85868,13 @@ func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent", + // "path": "{project}/global/networks/{network}/switchToCustomMode", // "response": { // "$ref": "Operation" // }, @@ -69386,52 +85886,32 @@ func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) } -// method id "compute.instances.start": +// method id "compute.networks.testIamPermissions": -type InstancesStartCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworksTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Start: Starts an instance that was stopped using the instances().stop -// method. For more information, see Restart an instance. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/start -func (r *InstancesService) Start(project string, zone string, instance string) *InstancesStartCall { - c := &InstancesStartCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *NetworksService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *NetworksTestIamPermissionsCall { + c := &NetworksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesStartCall) RequestId(requestId string) *InstancesStartCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesStartCall) Fields(s ...googleapi.Field) *InstancesStartCall { +func (c *NetworksTestIamPermissionsCall) Fields(s ...googleapi.Field) *NetworksTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -69439,30 +85919,35 @@ func (c *InstancesStartCall) Fields(s ...googleapi.Field) *InstancesStartCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesStartCall) Context(ctx context.Context) *InstancesStartCall { +func (c *NetworksTestIamPermissionsCall) Context(ctx context.Context) *NetworksTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesStartCall) Header() http.Header { +func (c *NetworksTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesStartCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/start") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -69471,20 +85956,19 @@ func (c *InstancesStartCall) doRequest(alt string) (*http.Response, error) { req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, - "instance": c.instance, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.start" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.networks.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NetworksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -69503,7 +85987,7 @@ func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -69515,22 +85999,14 @@ func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Starts an instance that was stopped using the instances().stop method. For more information, see Restart an instance.", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.instances.start", + // "id": "compute.networks.testIamPermissions", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "resource" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance resource to start.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -69538,53 +86014,51 @@ func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/start", + // "path": "{project}/global/networks/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.startWithEncryptionKey": +// method id "compute.networks.updatePeering": -type InstancesStartWithEncryptionKeyCall struct { - s *Service - project string - zone string - instance string - instancesstartwithencryptionkeyrequest *InstancesStartWithEncryptionKeyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworksUpdatePeeringCall struct { + s *Service + project string + network string + networksupdatepeeringrequest *NetworksUpdatePeeringRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// StartWithEncryptionKey: Starts an instance that was stopped using the -// instances().stop method. For more information, see Restart an -// instance. -func (r *InstancesService) StartWithEncryptionKey(project string, zone string, instance string, instancesstartwithencryptionkeyrequest *InstancesStartWithEncryptionKeyRequest) *InstancesStartWithEncryptionKeyCall { - c := &InstancesStartWithEncryptionKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// UpdatePeering: Updates the specified network peering with the data +// included in the request Only the following fields can be modified: +// NetworkPeering.export_custom_routes, and +// NetworkPeering.import_custom_routes +func (r *NetworksService) UpdatePeering(project string, network string, networksupdatepeeringrequest *NetworksUpdatePeeringRequest) *NetworksUpdatePeeringCall { + c := &NetworksUpdatePeeringCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.instancesstartwithencryptionkeyrequest = instancesstartwithencryptionkeyrequest + c.network = network + c.networksupdatepeeringrequest = networksupdatepeeringrequest return c } @@ -69602,7 +86076,7 @@ func (r *InstancesService) StartWithEncryptionKey(project string, zone string, i // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesStartWithEncryptionKeyCall) RequestId(requestId string) *InstancesStartWithEncryptionKeyCall { +func (c *NetworksUpdatePeeringCall) RequestId(requestId string) *NetworksUpdatePeeringCall { c.urlParams_.Set("requestId", requestId) return c } @@ -69610,7 +86084,7 @@ func (c *InstancesStartWithEncryptionKeyCall) RequestId(requestId string) *Insta // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesStartWithEncryptionKeyCall) Fields(s ...googleapi.Field) *InstancesStartWithEncryptionKeyCall { +func (c *NetworksUpdatePeeringCall) Fields(s ...googleapi.Field) *NetworksUpdatePeeringCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -69618,57 +86092,56 @@ func (c *InstancesStartWithEncryptionKeyCall) Fields(s ...googleapi.Field) *Inst // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesStartWithEncryptionKeyCall) Context(ctx context.Context) *InstancesStartWithEncryptionKeyCall { +func (c *NetworksUpdatePeeringCall) Context(ctx context.Context) *NetworksUpdatePeeringCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesStartWithEncryptionKeyCall) Header() http.Header { +func (c *NetworksUpdatePeeringCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesStartWithEncryptionKeyCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksUpdatePeeringCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesstartwithencryptionkeyrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networksupdatepeeringrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/updatePeering") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.startWithEncryptionKey" call. +// Do executes the "compute.networks.updatePeering" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NetworksUpdatePeeringCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -69699,17 +86172,16 @@ func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Starts an instance that was stopped using the instances().stop method. For more information, see Restart an instance.", - // "httpMethod": "POST", - // "id": "compute.instances.startWithEncryptionKey", + // "description": "Updates the specified network peering with the data included in the request Only the following fields can be modified: NetworkPeering.export_custom_routes, and NetworkPeering.import_custom_routes", + // "httpMethod": "PATCH", + // "id": "compute.networks.updatePeering", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "network" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance resource to start.", + // "network": { + // "description": "Name of the network resource which the updated peering is belonging to.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -69726,18 +86198,11 @@ func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) ( // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey", + // "path": "{project}/global/networks/{network}/updatePeering", // "request": { - // "$ref": "InstancesStartWithEncryptionKeyRequest" + // "$ref": "NetworksUpdatePeeringRequest" // }, // "response": { // "$ref": "Operation" @@ -69750,30 +86215,26 @@ func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) ( } -// method id "compute.instances.stop": +// method id "compute.nodeGroups.addNodes": -type InstancesStopCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NodeGroupsAddNodesCall struct { + s *Service + project string + zone string + nodeGroup string + nodegroupsaddnodesrequest *NodeGroupsAddNodesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Stop: Stops a running instance, shutting it down cleanly, and allows -// you to restart the instance at a later time. Stopped instances do not -// incur VM usage charges while they are stopped. However, resources -// that the VM is using, such as persistent disks and static IP -// addresses, will continue to be charged until they are deleted. For -// more information, see Stopping an instance. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/stop -func (r *InstancesService) Stop(project string, zone string, instance string) *InstancesStopCall { - c := &InstancesStopCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AddNodes: Adds specified number of nodes to the node group. +func (r *NodeGroupsService) AddNodes(project string, zone string, nodeGroup string, nodegroupsaddnodesrequest *NodeGroupsAddNodesRequest) *NodeGroupsAddNodesCall { + c := &NodeGroupsAddNodesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instance = instance + c.nodeGroup = nodeGroup + c.nodegroupsaddnodesrequest = nodegroupsaddnodesrequest return c } @@ -69791,7 +86252,7 @@ func (r *InstancesService) Stop(project string, zone string, instance string) *I // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesStopCall) RequestId(requestId string) *InstancesStopCall { +func (c *NodeGroupsAddNodesCall) RequestId(requestId string) *NodeGroupsAddNodesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -69799,7 +86260,7 @@ func (c *InstancesStopCall) RequestId(requestId string) *InstancesStopCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesStopCall) Fields(s ...googleapi.Field) *InstancesStopCall { +func (c *NodeGroupsAddNodesCall) Fields(s ...googleapi.Field) *NodeGroupsAddNodesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -69807,30 +86268,35 @@ func (c *InstancesStopCall) Fields(s ...googleapi.Field) *InstancesStopCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesStopCall) Context(ctx context.Context) *InstancesStopCall { +func (c *NodeGroupsAddNodesCall) Context(ctx context.Context) *NodeGroupsAddNodesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesStopCall) Header() http.Header { +func (c *NodeGroupsAddNodesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesStopCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeGroupsAddNodesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.nodegroupsaddnodesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/stop") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}/addNodes") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -69838,21 +86304,21 @@ func (c *InstancesStopCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "zone": c.zone, + "nodeGroup": c.nodeGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.stop" call. +// Do executes the "compute.nodeGroups.addNodes" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NodeGroupsAddNodesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -69883,17 +86349,17 @@ func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time. Stopped instances do not incur VM usage charges while they are stopped. However, resources that the VM is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted. For more information, see Stopping an instance.", + // "description": "Adds specified number of nodes to the node group.", // "httpMethod": "POST", - // "id": "compute.instances.stop", + // "id": "compute.nodeGroups.addNodes", // "parameterOrder": [ // "project", // "zone", - // "instance" + // "nodeGroup" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance resource to stop.", + // "nodeGroup": { + // "description": "Name of the NodeGroup resource.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -69919,7 +86385,10 @@ func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/stop", + // "path": "{project}/zones/{zone}/nodeGroups/{nodeGroup}/addNodes", + // "request": { + // "$ref": "NodeGroupsAddNodesRequest" + // }, // "response": { // "$ref": "Operation" // }, @@ -69931,116 +86400,156 @@ func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) } -// method id "compute.instances.suspend": +// method id "compute.nodeGroups.aggregatedList": -type InstancesSuspendCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NodeGroupsAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Suspend: This method suspends a running instance, saving its state to -// persistent storage, and allows you to resume the instance at a later -// time. Suspended instances incur reduced per-minute, virtual machine -// usage charges while they are suspended. Any resources the virtual -// machine is using, such as persistent disks and static IP addresses, -// will continue to be charged until they are deleted. -func (r *InstancesService) Suspend(project string, zone string, instance string) *InstancesSuspendCall { - c := &InstancesSuspendCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of node groups. Note: +// use nodeGroups.listNodes for more details about each group. +func (r *NodeGroupsService) AggregatedList(project string) *NodeGroupsAggregatedListCall { + c := &NodeGroupsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance return c } -// DiscardLocalSsd sets the optional parameter "discardLocalSsd": If -// true, discard the contents of any attached localSSD partitions. -// Default value is false (== preserve localSSD data). -func (c *InstancesSuspendCall) DiscardLocalSsd(discardLocalSsd bool) *InstancesSuspendCall { - c.urlParams_.Set("discardLocalSsd", fmt.Sprint(discardLocalSsd)) +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *NodeGroupsAggregatedListCall) Filter(filter string) *NodeGroupsAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *NodeGroupsAggregatedListCall) MaxResults(maxResults int64) *NodeGroupsAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSuspendCall) RequestId(requestId string) *InstancesSuspendCall { - c.urlParams_.Set("requestId", requestId) +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *NodeGroupsAggregatedListCall) OrderBy(orderBy string) *NodeGroupsAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *NodeGroupsAggregatedListCall) PageToken(pageToken string) *NodeGroupsAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSuspendCall) Fields(s ...googleapi.Field) *InstancesSuspendCall { +func (c *NodeGroupsAggregatedListCall) Fields(s ...googleapi.Field) *NodeGroupsAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NodeGroupsAggregatedListCall) IfNoneMatch(entityTag string) *NodeGroupsAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSuspendCall) Context(ctx context.Context) *InstancesSuspendCall { +func (c *NodeGroupsAggregatedListCall) Context(ctx context.Context) *NodeGroupsAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSuspendCall) Header() http.Header { +func (c *NodeGroupsAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSuspendCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/suspend") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/nodeGroups") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.suspend" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesSuspendCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.nodeGroups.aggregatedList" call. +// Exactly one of *NodeGroupAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *NodeGroupAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NodeGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeGroupAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -70059,7 +86568,7 @@ func (c *InstancesSuspendCall) Do(opts ...googleapi.CallOption) (*Operation, err if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &NodeGroupAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -70071,87 +86580,122 @@ func (c *InstancesSuspendCall) Do(opts ...googleapi.CallOption) (*Operation, err } return ret, nil // { - // "description": "This method suspends a running instance, saving its state to persistent storage, and allows you to resume the instance at a later time. Suspended instances incur reduced per-minute, virtual machine usage charges while they are suspended. Any resources the virtual machine is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted.", - // "httpMethod": "POST", - // "id": "compute.instances.suspend", + // "description": "Retrieves an aggregated list of node groups. Note: use nodeGroups.listNodes for more details about each group.", + // "httpMethod": "GET", + // "id": "compute.nodeGroups.aggregatedList", // "parameterOrder": [ - // "project", - // "zone", - // "instance" + // "project" // ], // "parameters": { - // "discardLocalSsd": { - // "description": "If true, discard the contents of any attached localSSD partitions. Default value is false (== preserve localSSD data).", + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", - // "type": "boolean" - // }, - // "instance": { - // "description": "Name of the instance resource to suspend.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/suspend", + // "path": "{project}/aggregated/nodeGroups", // "response": { - // "$ref": "Operation" + // "$ref": "NodeGroupAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.testIamPermissions": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *NodeGroupsAggregatedListCall) Pages(ctx context.Context, f func(*NodeGroupAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InstancesTestIamPermissionsCall struct { - s *Service - project string - zone string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.nodeGroups.delete": + +type NodeGroupsDeleteCall struct { + s *Service + project string + zone string + nodeGroup string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *InstancesService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *InstancesTestIamPermissionsCall { - c := &InstancesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified NodeGroup resource. +func (r *NodeGroupsService) Delete(project string, zone string, nodeGroup string) *NodeGroupsDeleteCall { + c := &NodeGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.nodeGroup = nodeGroup + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *NodeGroupsDeleteCall) RequestId(requestId string) *NodeGroupsDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesTestIamPermissionsCall) Fields(s ...googleapi.Field) *InstancesTestIamPermissionsCall { +func (c *NodeGroupsDeleteCall) Fields(s ...googleapi.Field) *NodeGroupsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -70159,57 +86703,52 @@ func (c *InstancesTestIamPermissionsCall) Fields(s ...googleapi.Field) *Instance // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesTestIamPermissionsCall) Context(ctx context.Context) *InstancesTestIamPermissionsCall { +func (c *NodeGroupsDeleteCall) Context(ctx context.Context) *NodeGroupsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesTestIamPermissionsCall) Header() http.Header { +func (c *NodeGroupsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, + "project": c.project, + "zone": c.zone, + "nodeGroup": c.nodeGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstancesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.nodeGroups.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NodeGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -70228,7 +86767,7 @@ func (c *InstancesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -70240,15 +86779,22 @@ func (c *InstancesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.instances.testIamPermissions", + // "description": "Deletes the specified NodeGroup resource.", + // "httpMethod": "DELETE", + // "id": "compute.nodeGroups.delete", // "parameterOrder": [ // "project", // "zone", - // "resource" + // "nodeGroup" // ], // "parameters": { + // "nodeGroup": { + // "description": "Name of the NodeGroup resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -70256,11 +86802,9 @@ func (c *InstancesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // }, // "zone": { @@ -70271,46 +86815,38 @@ func (c *InstancesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/zones/{zone}/nodeGroups/{nodeGroup}", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instances.updateAccessConfig": +// method id "compute.nodeGroups.deleteNodes": -type InstancesUpdateAccessConfigCall struct { - s *Service - project string - zone string - instance string - accessconfig *AccessConfig - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NodeGroupsDeleteNodesCall struct { + s *Service + project string + zone string + nodeGroup string + nodegroupsdeletenodesrequest *NodeGroupsDeleteNodesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// UpdateAccessConfig: Updates the specified access config from an -// instance's network interface with the data included in the request. -// This method supports PATCH semantics and uses the JSON merge patch -// format and processing rules. -func (r *InstancesService) UpdateAccessConfig(project string, zone string, instance string, networkInterface string, accessconfig *AccessConfig) *InstancesUpdateAccessConfigCall { - c := &InstancesUpdateAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// DeleteNodes: Deletes specified nodes from the node group. +func (r *NodeGroupsService) DeleteNodes(project string, zone string, nodeGroup string, nodegroupsdeletenodesrequest *NodeGroupsDeleteNodesRequest) *NodeGroupsDeleteNodesCall { + c := &NodeGroupsDeleteNodesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instance = instance - c.urlParams_.Set("networkInterface", networkInterface) - c.accessconfig = accessconfig + c.nodeGroup = nodeGroup + c.nodegroupsdeletenodesrequest = nodegroupsdeletenodesrequest return c } @@ -70328,7 +86864,7 @@ func (r *InstancesService) UpdateAccessConfig(project string, zone string, insta // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesUpdateAccessConfigCall) RequestId(requestId string) *InstancesUpdateAccessConfigCall { +func (c *NodeGroupsDeleteNodesCall) RequestId(requestId string) *NodeGroupsDeleteNodesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -70336,7 +86872,7 @@ func (c *InstancesUpdateAccessConfigCall) RequestId(requestId string) *Instances // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesUpdateAccessConfigCall) Fields(s ...googleapi.Field) *InstancesUpdateAccessConfigCall { +func (c *NodeGroupsDeleteNodesCall) Fields(s ...googleapi.Field) *NodeGroupsDeleteNodesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -70344,35 +86880,35 @@ func (c *InstancesUpdateAccessConfigCall) Fields(s ...googleapi.Field) *Instance // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesUpdateAccessConfigCall) Context(ctx context.Context) *InstancesUpdateAccessConfigCall { +func (c *NodeGroupsDeleteNodesCall) Context(ctx context.Context) *NodeGroupsDeleteNodesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesUpdateAccessConfigCall) Header() http.Header { +func (c *NodeGroupsDeleteNodesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesUpdateAccessConfigCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeGroupsDeleteNodesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.accessconfig) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.nodegroupsdeletenodesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/updateAccessConfig") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}/deleteNodes") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -70380,21 +86916,21 @@ func (c *InstancesUpdateAccessConfigCall) doRequest(alt string) (*http.Response, } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "zone": c.zone, + "nodeGroup": c.nodeGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.updateAccessConfig" call. +// Do executes the "compute.nodeGroups.deleteNodes" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesUpdateAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NodeGroupsDeleteNodesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -70425,29 +86961,22 @@ func (c *InstancesUpdateAccessConfigCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Updates the specified access config from an instance's network interface with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "description": "Deletes specified nodes from the node group.", // "httpMethod": "POST", - // "id": "compute.instances.updateAccessConfig", + // "id": "compute.nodeGroups.deleteNodes", // "parameterOrder": [ // "project", // "zone", - // "instance", - // "networkInterface" + // "nodeGroup" // ], // "parameters": { - // "instance": { - // "description": "The instance name for this request.", + // "nodeGroup": { + // "description": "Name of the NodeGroup resource to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "networkInterface": { - // "description": "The name of the network interface where the access config is attached.", - // "location": "query", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -70468,9 +86997,9 @@ func (c *InstancesUpdateAccessConfigCall) Do(opts ...googleapi.CallOption) (*Ope // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/updateAccessConfig", + // "path": "{project}/zones/{zone}/nodeGroups/{nodeGroup}/deleteNodes", // "request": { - // "$ref": "AccessConfig" + // "$ref": "NodeGroupsDeleteNodesRequest" // }, // "response": { // "$ref": "Operation" @@ -70483,113 +87012,100 @@ func (c *InstancesUpdateAccessConfigCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.instances.updateDisplayDevice": +// method id "compute.nodeGroups.get": -type InstancesUpdateDisplayDeviceCall struct { - s *Service - project string - zone string - instance string - displaydevice *DisplayDevice - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NodeGroupsGetCall struct { + s *Service + project string + zone string + nodeGroup string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// UpdateDisplayDevice: Updates the Display config for a VM instance. -// You can only use this method on a stopped VM instance. This method -// supports PATCH semantics and uses the JSON merge patch format and -// processing rules. -func (r *InstancesService) UpdateDisplayDevice(project string, zone string, instance string, displaydevice *DisplayDevice) *InstancesUpdateDisplayDeviceCall { - c := &InstancesUpdateDisplayDeviceCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified NodeGroup. Get a list of available +// NodeGroups by making a list() request. Note: the "nodes" field should +// not be used. Use nodeGroups.listNodes instead. +func (r *NodeGroupsService) Get(project string, zone string, nodeGroup string) *NodeGroupsGetCall { + c := &NodeGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instance = instance - c.displaydevice = displaydevice - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesUpdateDisplayDeviceCall) RequestId(requestId string) *InstancesUpdateDisplayDeviceCall { - c.urlParams_.Set("requestId", requestId) + c.nodeGroup = nodeGroup return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesUpdateDisplayDeviceCall) Fields(s ...googleapi.Field) *InstancesUpdateDisplayDeviceCall { +func (c *NodeGroupsGetCall) Fields(s ...googleapi.Field) *NodeGroupsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NodeGroupsGetCall) IfNoneMatch(entityTag string) *NodeGroupsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesUpdateDisplayDeviceCall) Context(ctx context.Context) *InstancesUpdateDisplayDeviceCall { +func (c *NodeGroupsGetCall) Context(ctx context.Context) *NodeGroupsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesUpdateDisplayDeviceCall) Header() http.Header { +func (c *NodeGroupsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesUpdateDisplayDeviceCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.displaydevice) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/updateDisplayDevice") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "zone": c.zone, + "nodeGroup": c.nodeGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.updateDisplayDevice" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.nodeGroups.get" call. +// Exactly one of *NodeGroup or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// *NodeGroup.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesUpdateDisplayDeviceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NodeGroupsGetCall) Do(opts ...googleapi.CallOption) (*NodeGroup, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -70608,7 +87124,7 @@ func (c *InstancesUpdateDisplayDeviceCall) Do(opts ...googleapi.CallOption) (*Op if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &NodeGroup{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -70620,19 +87136,19 @@ func (c *InstancesUpdateDisplayDeviceCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Updates the Display config for a VM instance. You can only use this method on a stopped VM instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.instances.updateDisplayDevice", + // "description": "Returns the specified NodeGroup. Get a list of available NodeGroups by making a list() request. Note: the \"nodes\" field should not be used. Use nodeGroups.listNodes instead.", + // "httpMethod": "GET", + // "id": "compute.nodeGroups.get", // "parameterOrder": [ // "project", // "zone", - // "instance" + // "nodeGroup" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", + // "nodeGroup": { + // "description": "Name of the node group to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -70643,11 +87159,6 @@ func (c *InstancesUpdateDisplayDeviceCall) Do(opts ...googleapi.CallOption) (*Op // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -70656,107 +87167,92 @@ func (c *InstancesUpdateDisplayDeviceCall) Do(opts ...googleapi.CallOption) (*Op // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/updateDisplayDevice", - // "request": { - // "$ref": "DisplayDevice" - // }, + // "path": "{project}/zones/{zone}/nodeGroups/{nodeGroup}", // "response": { - // "$ref": "Operation" + // "$ref": "NodeGroup" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } - -} - -// method id "compute.instances.updateNetworkInterface": - -type InstancesUpdateNetworkInterfaceCall struct { - s *Service - project string - zone string - instance string - networkinterface *NetworkInterface - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header + } -// UpdateNetworkInterface: Updates an instance's network interface. This -// method follows PATCH semantics. -func (r *InstancesService) UpdateNetworkInterface(project string, zone string, instance string, networkInterface string, networkinterface *NetworkInterface) *InstancesUpdateNetworkInterfaceCall { - c := &InstancesUpdateNetworkInterfaceCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instance = instance - c.urlParams_.Set("networkInterface", networkInterface) - c.networkinterface = networkinterface - return c +// method id "compute.nodeGroups.getIamPolicy": + +type NodeGroupsGetIamPolicyCall struct { + s *Service + project string + zone string + resource string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesUpdateNetworkInterfaceCall) RequestId(requestId string) *InstancesUpdateNetworkInterfaceCall { - c.urlParams_.Set("requestId", requestId) +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. +func (r *NodeGroupsService) GetIamPolicy(project string, zone string, resource string) *NodeGroupsGetIamPolicyCall { + c := &NodeGroupsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.resource = resource return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesUpdateNetworkInterfaceCall) Fields(s ...googleapi.Field) *InstancesUpdateNetworkInterfaceCall { +func (c *NodeGroupsGetIamPolicyCall) Fields(s ...googleapi.Field) *NodeGroupsGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NodeGroupsGetIamPolicyCall) IfNoneMatch(entityTag string) *NodeGroupsGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesUpdateNetworkInterfaceCall) Context(ctx context.Context) *InstancesUpdateNetworkInterfaceCall { +func (c *NodeGroupsGetIamPolicyCall) Context(ctx context.Context) *NodeGroupsGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesUpdateNetworkInterfaceCall) Header() http.Header { +func (c *NodeGroupsGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesUpdateNetworkInterfaceCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeGroupsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkinterface) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/updateNetworkInterface") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } @@ -70764,19 +87260,19 @@ func (c *InstancesUpdateNetworkInterfaceCall) doRequest(alt string) (*http.Respo googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, - "instance": c.instance, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.updateNetworkInterface" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesUpdateNetworkInterfaceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.nodeGroups.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *NodeGroupsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -70795,7 +87291,7 @@ func (c *InstancesUpdateNetworkInterfaceCall) Do(opts ...googleapi.CallOption) ( if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -70807,29 +87303,15 @@ func (c *InstancesUpdateNetworkInterfaceCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Updates an instance's network interface. This method follows PATCH semantics.", - // "httpMethod": "PATCH", - // "id": "compute.instances.updateNetworkInterface", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "httpMethod": "GET", + // "id": "compute.nodeGroups.getIamPolicy", // "parameterOrder": [ // "project", // "zone", - // "instance", - // "networkInterface" + // "resource" // ], // "parameters": { - // "instance": { - // "description": "The instance name for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "networkInterface": { - // "description": "The name of the network interface to update.", - // "location": "query", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -70837,9 +87319,11 @@ func (c *InstancesUpdateNetworkInterfaceCall) Do(opts ...googleapi.CallOption) ( // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "zone": { @@ -70850,44 +87334,39 @@ func (c *InstancesUpdateNetworkInterfaceCall) Do(opts ...googleapi.CallOption) ( // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/updateNetworkInterface", - // "request": { - // "$ref": "NetworkInterface" - // }, + // "path": "{project}/zones/{zone}/nodeGroups/{resource}/getIamPolicy", // "response": { - // "$ref": "Operation" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.updateShieldedVmConfig": +// method id "compute.nodeGroups.insert": -type InstancesUpdateShieldedVmConfigCall struct { - s *Service - project string - zone string - instance string - shieldedvmconfig *ShieldedVmConfig - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NodeGroupsInsertCall struct { + s *Service + project string + zone string + nodegroup *NodeGroup + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// UpdateShieldedVmConfig: Updates the Shielded VM config for a VM -// instance. You can only use this method on a stopped VM instance. This -// method supports PATCH semantics and uses the JSON merge patch format -// and processing rules. -func (r *InstancesService) UpdateShieldedVmConfig(project string, zone string, instance string, shieldedvmconfig *ShieldedVmConfig) *InstancesUpdateShieldedVmConfigCall { - c := &InstancesUpdateShieldedVmConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a NodeGroup resource in the specified project using +// the data included in the request. +func (r *NodeGroupsService) Insert(project string, zone string, initialNodeCount int64, nodegroup *NodeGroup) *NodeGroupsInsertCall { + c := &NodeGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instance = instance - c.shieldedvmconfig = shieldedvmconfig + c.urlParams_.Set("initialNodeCount", fmt.Sprint(initialNodeCount)) + c.nodegroup = nodegroup return c } @@ -70905,7 +87384,7 @@ func (r *InstancesService) UpdateShieldedVmConfig(project string, zone string, i // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesUpdateShieldedVmConfigCall) RequestId(requestId string) *InstancesUpdateShieldedVmConfigCall { +func (c *NodeGroupsInsertCall) RequestId(requestId string) *NodeGroupsInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -70913,7 +87392,7 @@ func (c *InstancesUpdateShieldedVmConfigCall) RequestId(requestId string) *Insta // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesUpdateShieldedVmConfigCall) Fields(s ...googleapi.Field) *InstancesUpdateShieldedVmConfigCall { +func (c *NodeGroupsInsertCall) Fields(s ...googleapi.Field) *NodeGroupsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -70921,57 +87400,56 @@ func (c *InstancesUpdateShieldedVmConfigCall) Fields(s ...googleapi.Field) *Inst // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesUpdateShieldedVmConfigCall) Context(ctx context.Context) *InstancesUpdateShieldedVmConfigCall { +func (c *NodeGroupsInsertCall) Context(ctx context.Context) *NodeGroupsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesUpdateShieldedVmConfigCall) Header() http.Header { +func (c *NodeGroupsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesUpdateShieldedVmConfigCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeGroupsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.shieldedvmconfig) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.nodegroup) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/updateShieldedVmConfig") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.updateShieldedVmConfig" call. +// Do executes the "compute.nodeGroups.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesUpdateShieldedVmConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NodeGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -71002,21 +87480,21 @@ func (c *InstancesUpdateShieldedVmConfigCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Updates the Shielded VM config for a VM instance. You can only use this method on a stopped VM instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.instances.updateShieldedVmConfig", + // "description": "Creates a NodeGroup resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.nodeGroups.insert", // "parameterOrder": [ // "project", // "zone", - // "instance" + // "initialNodeCount" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "initialNodeCount": { + // "description": "Initial count of nodes in the node group.", + // "format": "int32", + // "location": "query", // "required": true, - // "type": "string" + // "type": "integer" // }, // "project": { // "description": "Project ID for this request.", @@ -71038,9 +87516,9 @@ func (c *InstancesUpdateShieldedVmConfigCall) Do(opts ...googleapi.CallOption) ( // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/updateShieldedVmConfig", + // "path": "{project}/zones/{zone}/nodeGroups", // "request": { - // "$ref": "ShieldedVmConfig" + // "$ref": "NodeGroup" // }, // "response": { // "$ref": "Operation" @@ -71053,22 +87531,25 @@ func (c *InstancesUpdateShieldedVmConfigCall) Do(opts ...googleapi.CallOption) ( } -// method id "compute.interconnectAttachments.aggregatedList": +// method id "compute.nodeGroups.list": -type InterconnectAttachmentsAggregatedListCall struct { +type NodeGroupsListCall struct { s *Service project string + zone string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// AggregatedList: Retrieves an aggregated list of interconnect -// attachments. -func (r *InterconnectAttachmentsService) AggregatedList(project string) *InterconnectAttachmentsAggregatedListCall { - c := &InterconnectAttachmentsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of node groups available to the specified +// project. Note: use nodeGroups.listNodes for more details about each +// group. +func (r *NodeGroupsService) List(project string, zone string) *NodeGroupsListCall { + c := &NodeGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.zone = zone return c } @@ -71094,7 +87575,7 @@ func (r *InterconnectAttachmentsService) AggregatedList(project string) *Interco // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *InterconnectAttachmentsAggregatedListCall) Filter(filter string) *InterconnectAttachmentsAggregatedListCall { +func (c *NodeGroupsListCall) Filter(filter string) *NodeGroupsListCall { c.urlParams_.Set("filter", filter) return c } @@ -71105,7 +87586,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) Filter(filter string) *Inter // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *InterconnectAttachmentsAggregatedListCall) MaxResults(maxResults int64) *InterconnectAttachmentsAggregatedListCall { +func (c *NodeGroupsListCall) MaxResults(maxResults int64) *NodeGroupsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -71122,7 +87603,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) MaxResults(maxResults int64) // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *InterconnectAttachmentsAggregatedListCall) OrderBy(orderBy string) *InterconnectAttachmentsAggregatedListCall { +func (c *NodeGroupsListCall) OrderBy(orderBy string) *NodeGroupsListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -71130,7 +87611,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) OrderBy(orderBy string) *Int // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *InterconnectAttachmentsAggregatedListCall) PageToken(pageToken string) *InterconnectAttachmentsAggregatedListCall { +func (c *NodeGroupsListCall) PageToken(pageToken string) *NodeGroupsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -71138,7 +87619,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) PageToken(pageToken string) // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectAttachmentsAggregatedListCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsAggregatedListCall { +func (c *NodeGroupsListCall) Fields(s ...googleapi.Field) *NodeGroupsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -71148,7 +87629,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) Fields(s ...googleapi.Field) // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InterconnectAttachmentsAggregatedListCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsAggregatedListCall { +func (c *NodeGroupsListCall) IfNoneMatch(entityTag string) *NodeGroupsListCall { c.ifNoneMatch_ = entityTag return c } @@ -71156,21 +87637,21 @@ func (c *InterconnectAttachmentsAggregatedListCall) IfNoneMatch(entityTag string // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectAttachmentsAggregatedListCall) Context(ctx context.Context) *InterconnectAttachmentsAggregatedListCall { +func (c *NodeGroupsListCall) Context(ctx context.Context) *NodeGroupsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectAttachmentsAggregatedListCall) Header() http.Header { +func (c *NodeGroupsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectAttachmentsAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -71182,7 +87663,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) doRequest(alt string) (*http var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/interconnectAttachments") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -71191,19 +87672,19 @@ func (c *InterconnectAttachmentsAggregatedListCall) doRequest(alt string) (*http req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnectAttachments.aggregatedList" call. -// Exactly one of *InterconnectAttachmentAggregatedList or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *InterconnectAttachmentAggregatedList.ServerResponse.Header or -// (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachmentAggregatedList, error) { +// Do executes the "compute.nodeGroups.list" call. +// Exactly one of *NodeGroupList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *NodeGroupList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NodeGroupsListCall) Do(opts ...googleapi.CallOption) (*NodeGroupList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -71222,7 +87703,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOpt if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InterconnectAttachmentAggregatedList{ + ret := &NodeGroupList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -71234,11 +87715,12 @@ func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOpt } return ret, nil // { - // "description": "Retrieves an aggregated list of interconnect attachments.", + // "description": "Retrieves a list of node groups available to the specified project. Note: use nodeGroups.listNodes for more details about each group.", // "httpMethod": "GET", - // "id": "compute.interconnectAttachments.aggregatedList", + // "id": "compute.nodeGroups.list", // "parameterOrder": [ - // "project" + // "project", + // "zone" // ], // "parameters": { // "filter": { @@ -71270,11 +87752,18 @@ func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOpt // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/aggregated/interconnectAttachments", + // "path": "{project}/zones/{zone}/nodeGroups", // "response": { - // "$ref": "InterconnectAttachmentAggregatedList" + // "$ref": "NodeGroupList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -71288,7 +87777,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOpt // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *InterconnectAttachmentsAggregatedListCall) Pages(ctx context.Context, f func(*InterconnectAttachmentAggregatedList) error) error { +func (c *NodeGroupsListCall) Pages(ctx context.Context, f func(*NodeGroupList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -71306,273 +87795,147 @@ func (c *InterconnectAttachmentsAggregatedListCall) Pages(ctx context.Context, f } } -// method id "compute.interconnectAttachments.delete": +// method id "compute.nodeGroups.listNodes": -type InterconnectAttachmentsDeleteCall struct { - s *Service - project string - region string - interconnectAttachment string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NodeGroupsListNodesCall struct { + s *Service + project string + zone string + nodeGroup string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified interconnect attachment. -func (r *InterconnectAttachmentsService) Delete(project string, region string, interconnectAttachment string) *InterconnectAttachmentsDeleteCall { - c := &InterconnectAttachmentsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ListNodes: Lists nodes in the node group. +func (r *NodeGroupsService) ListNodes(project string, zone string, nodeGroup string) *NodeGroupsListNodesCall { + c := &NodeGroupsListNodesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.interconnectAttachment = interconnectAttachment + c.zone = zone + c.nodeGroup = nodeGroup return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InterconnectAttachmentsDeleteCall) RequestId(requestId string) *InterconnectAttachmentsDeleteCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InterconnectAttachmentsDeleteCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *NodeGroupsListNodesCall) Filter(filter string) *NodeGroupsListNodesCall { + c.urlParams_.Set("filter", filter) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InterconnectAttachmentsDeleteCall) Context(ctx context.Context) *InterconnectAttachmentsDeleteCall { - c.ctx_ = ctx +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *NodeGroupsListNodesCall) MaxResults(maxResults int64) *NodeGroupsListNodesCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InterconnectAttachmentsDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InterconnectAttachmentsDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "interconnectAttachment": c.interconnectAttachment, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.interconnectAttachments.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InterconnectAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes the specified interconnect attachment.", - // "httpMethod": "DELETE", - // "id": "compute.interconnectAttachments.delete", - // "parameterOrder": [ - // "project", - // "region", - // "interconnectAttachment" - // ], - // "parameters": { - // "interconnectAttachment": { - // "description": "Name of the interconnect attachment to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.interconnectAttachments.get": - -type InterconnectAttachmentsGetCall struct { - s *Service - project string - region string - interconnectAttachment string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *NodeGroupsListNodesCall) OrderBy(orderBy string) *NodeGroupsListNodesCall { + c.urlParams_.Set("orderBy", orderBy) + return c } -// Get: Returns the specified interconnect attachment. -func (r *InterconnectAttachmentsService) Get(project string, region string, interconnectAttachment string) *InterconnectAttachmentsGetCall { - c := &InterconnectAttachmentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.interconnectAttachment = interconnectAttachment +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *NodeGroupsListNodesCall) PageToken(pageToken string) *NodeGroupsListNodesCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectAttachmentsGetCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsGetCall { +func (c *NodeGroupsListNodesCall) Fields(s ...googleapi.Field) *NodeGroupsListNodesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InterconnectAttachmentsGetCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectAttachmentsGetCall) Context(ctx context.Context) *InterconnectAttachmentsGetCall { +func (c *NodeGroupsListNodesCall) Context(ctx context.Context) *NodeGroupsListNodesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectAttachmentsGetCall) Header() http.Header { +func (c *NodeGroupsListNodesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectAttachmentsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeGroupsListNodesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}/listNodes") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "interconnectAttachment": c.interconnectAttachment, + "project": c.project, + "zone": c.zone, + "nodeGroup": c.nodeGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnectAttachments.get" call. -// Exactly one of *InterconnectAttachment or error will be non-nil. Any +// Do executes the "compute.nodeGroups.listNodes" call. +// Exactly one of *NodeGroupsListNodes or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *InterconnectAttachment.ServerResponse.Header or (if a response was +// *NodeGroupsListNodes.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *InterconnectAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachment, error) { +func (c *NodeGroupsListNodesCall) Do(opts ...googleapi.CallOption) (*NodeGroupsListNodes, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -71591,7 +87954,7 @@ func (c *InterconnectAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*Inte if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InterconnectAttachment{ + ret := &NodeGroupsListNodes{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -71603,22 +87966,45 @@ func (c *InterconnectAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*Inte } return ret, nil // { - // "description": "Returns the specified interconnect attachment.", - // "httpMethod": "GET", - // "id": "compute.interconnectAttachments.get", + // "description": "Lists nodes in the node group.", + // "httpMethod": "POST", + // "id": "compute.nodeGroups.listNodes", // "parameterOrder": [ // "project", - // "region", - // "interconnectAttachment" + // "zone", + // "nodeGroup" // ], // "parameters": { - // "interconnectAttachment": { - // "description": "Name of the interconnect attachment to return.", + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "nodeGroup": { + // "description": "Name of the NodeGroup resource whose nodes you want to list.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -71626,17 +88012,17 @@ func (c *InterconnectAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*Inte // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + // "path": "{project}/zones/{zone}/nodeGroups/{nodeGroup}/listNodes", // "response": { - // "$ref": "InterconnectAttachment" + // "$ref": "NodeGroupsListNodes" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -71647,51 +88033,55 @@ func (c *InterconnectAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*Inte } -// method id "compute.interconnectAttachments.insert": - -type InterconnectAttachmentsInsertCall struct { - s *Service - project string - region string - interconnectattachment *InterconnectAttachment - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *NodeGroupsListNodesCall) Pages(ctx context.Context, f func(*NodeGroupsListNodes) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } } -// Insert: Creates an InterconnectAttachment in the specified project -// using the data included in the request. -func (r *InterconnectAttachmentsService) Insert(project string, region string, interconnectattachment *InterconnectAttachment) *InterconnectAttachmentsInsertCall { - c := &InterconnectAttachmentsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.interconnectattachment = interconnectattachment - return c +// method id "compute.nodeGroups.setIamPolicy": + +type NodeGroupsSetIamPolicyCall struct { + s *Service + project string + zone string + resource string + zonesetpolicyrequest *ZoneSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InterconnectAttachmentsInsertCall) RequestId(requestId string) *InterconnectAttachmentsInsertCall { - c.urlParams_.Set("requestId", requestId) +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +func (r *NodeGroupsService) SetIamPolicy(project string, zone string, resource string, zonesetpolicyrequest *ZoneSetPolicyRequest) *NodeGroupsSetIamPolicyCall { + c := &NodeGroupsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.resource = resource + c.zonesetpolicyrequest = zonesetpolicyrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectAttachmentsInsertCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsInsertCall { +func (c *NodeGroupsSetIamPolicyCall) Fields(s ...googleapi.Field) *NodeGroupsSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -71699,35 +88089,35 @@ func (c *InterconnectAttachmentsInsertCall) Fields(s ...googleapi.Field) *Interc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectAttachmentsInsertCall) Context(ctx context.Context) *InterconnectAttachmentsInsertCall { +func (c *NodeGroupsSetIamPolicyCall) Context(ctx context.Context) *NodeGroupsSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectAttachmentsInsertCall) Header() http.Header { +func (c *NodeGroupsSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectAttachmentsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeGroupsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnectattachment) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.zonesetpolicyrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -71735,20 +88125,21 @@ func (c *InterconnectAttachmentsInsertCall) doRequest(alt string) (*http.Respons } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "zone": c.zone, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnectAttachments.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InterconnectAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.nodeGroups.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *NodeGroupsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -71767,7 +88158,7 @@ func (c *InterconnectAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*O if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -71779,12 +88170,13 @@ func (c *InterconnectAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*O } return ret, nil // { - // "description": "Creates an InterconnectAttachment in the specified project using the data included in the request.", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", // "httpMethod": "POST", - // "id": "compute.interconnectAttachments.insert", + // "id": "compute.nodeGroups.setIamPolicy", // "parameterOrder": [ // "project", - // "region" + // "zone", + // "resource" // ], // "parameters": { // "project": { @@ -71794,25 +88186,27 @@ func (c *InterconnectAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*O // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/interconnectAttachments", + // "path": "{project}/zones/{zone}/nodeGroups/{resource}/setIamPolicy", // "request": { - // "$ref": "InterconnectAttachment" + // "$ref": "ZoneSetPolicyRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -71822,159 +88216,110 @@ func (c *InterconnectAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*O } -// method id "compute.interconnectAttachments.list": +// method id "compute.nodeGroups.setNodeTemplate": -type InterconnectAttachmentsListCall struct { - s *Service - project string - region string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type NodeGroupsSetNodeTemplateCall struct { + s *Service + project string + zone string + nodeGroup string + nodegroupssetnodetemplaterequest *NodeGroupsSetNodeTemplateRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves the list of interconnect attachments contained within -// the specified region. -func (r *InterconnectAttachmentsService) List(project string, region string) *InterconnectAttachmentsListCall { - c := &InterconnectAttachmentsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetNodeTemplate: Updates the node template of the node group. +func (r *NodeGroupsService) SetNodeTemplate(project string, zone string, nodeGroup string, nodegroupssetnodetemplaterequest *NodeGroupsSetNodeTemplateRequest) *NodeGroupsSetNodeTemplateCall { + c := &NodeGroupsSetNodeTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *InterconnectAttachmentsListCall) Filter(filter string) *InterconnectAttachmentsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InterconnectAttachmentsListCall) MaxResults(maxResults int64) *InterconnectAttachmentsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + c.zone = zone + c.nodeGroup = nodeGroup + c.nodegroupssetnodetemplaterequest = nodegroupssetnodetemplaterequest return c } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *InterconnectAttachmentsListCall) OrderBy(orderBy string) *InterconnectAttachmentsListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InterconnectAttachmentsListCall) PageToken(pageToken string) *InterconnectAttachmentsListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *NodeGroupsSetNodeTemplateCall) RequestId(requestId string) *NodeGroupsSetNodeTemplateCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectAttachmentsListCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsListCall { +func (c *NodeGroupsSetNodeTemplateCall) Fields(s ...googleapi.Field) *NodeGroupsSetNodeTemplateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InterconnectAttachmentsListCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectAttachmentsListCall) Context(ctx context.Context) *InterconnectAttachmentsListCall { +func (c *NodeGroupsSetNodeTemplateCall) Context(ctx context.Context) *NodeGroupsSetNodeTemplateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectAttachmentsListCall) Header() http.Header { +func (c *NodeGroupsSetNodeTemplateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectAttachmentsListCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeGroupsSetNodeTemplateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.nodegroupssetnodetemplaterequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}/setNodeTemplate") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "zone": c.zone, + "nodeGroup": c.nodeGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnectAttachments.list" call. -// Exactly one of *InterconnectAttachmentList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *InterconnectAttachmentList.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachmentList, error) { +// Do executes the "compute.nodeGroups.setNodeTemplate" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NodeGroupsSetNodeTemplateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -71993,7 +88338,7 @@ func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*Int if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InterconnectAttachmentList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -72005,35 +88350,20 @@ func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*Int } return ret, nil // { - // "description": "Retrieves the list of interconnect attachments contained within the specified region.", - // "httpMethod": "GET", - // "id": "compute.interconnectAttachments.list", + // "description": "Updates the node template of the node group.", + // "httpMethod": "POST", + // "id": "compute.nodeGroups.setNodeTemplate", // "parameterOrder": [ // "project", - // "region" + // "zone", + // "nodeGroup" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "nodeGroup": { + // "description": "Name of the NodeGroup resource to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "project": { @@ -72043,96 +88373,62 @@ func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*Int // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/interconnectAttachments", + // "path": "{project}/zones/{zone}/nodeGroups/{nodeGroup}/setNodeTemplate", + // "request": { + // "$ref": "NodeGroupsSetNodeTemplateRequest" + // }, // "response": { - // "$ref": "InterconnectAttachmentList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InterconnectAttachmentsListCall) Pages(ctx context.Context, f func(*InterconnectAttachmentList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.interconnectAttachments.patch": +// method id "compute.nodeGroups.testIamPermissions": -type InterconnectAttachmentsPatchCall struct { +type NodeGroupsTestIamPermissionsCall struct { s *Service project string - region string - interconnectAttachment string - interconnectattachment *InterconnectAttachment - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Patch: Updates the specified interconnect attachment with the data -// included in the request. This method supports PATCH semantics and -// uses the JSON merge patch format and processing rules. -func (r *InterconnectAttachmentsService) Patch(project string, region string, interconnectAttachment string, interconnectattachment *InterconnectAttachment) *InterconnectAttachmentsPatchCall { - c := &InterconnectAttachmentsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.interconnectAttachment = interconnectAttachment - c.interconnectattachment = interconnectattachment - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InterconnectAttachmentsPatchCall) RequestId(requestId string) *InterconnectAttachmentsPatchCall { - c.urlParams_.Set("requestId", requestId) + zone string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *NodeGroupsService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *NodeGroupsTestIamPermissionsCall { + c := &NodeGroupsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectAttachmentsPatchCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsPatchCall { +func (c *NodeGroupsTestIamPermissionsCall) Fields(s ...googleapi.Field) *NodeGroupsTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -72140,57 +88436,57 @@ func (c *InterconnectAttachmentsPatchCall) Fields(s ...googleapi.Field) *Interco // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectAttachmentsPatchCall) Context(ctx context.Context) *InterconnectAttachmentsPatchCall { +func (c *NodeGroupsTestIamPermissionsCall) Context(ctx context.Context) *NodeGroupsTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectAttachmentsPatchCall) Header() http.Header { +func (c *NodeGroupsTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectAttachmentsPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnectattachment) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "interconnectAttachment": c.interconnectAttachment, + "project": c.project, + "zone": c.zone, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnectAttachments.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InterconnectAttachmentsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.nodeGroups.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NodeGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -72209,7 +88505,7 @@ func (c *InterconnectAttachmentsPatchCall) Do(opts ...googleapi.CallOption) (*Op if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -72221,22 +88517,15 @@ func (c *InterconnectAttachmentsPatchCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Updates the specified interconnect attachment with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.interconnectAttachments.patch", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.nodeGroups.testIamPermissions", // "parameterOrder": [ // "project", - // "region", - // "interconnectAttachment" + // "zone", + // "resource" // ], // "parameters": { - // "interconnectAttachment": { - // "description": "Name of the interconnect attachment to patch.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -72244,139 +88533,186 @@ func (c *InterconnectAttachmentsPatchCall) Do(opts ...googleapi.CallOption) (*Op // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region scoping this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + // "path": "{project}/zones/{zone}/nodeGroups/{resource}/testIamPermissions", // "request": { - // "$ref": "InterconnectAttachment" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.interconnectAttachments.setLabels": +// method id "compute.nodeTemplates.aggregatedList": -type InterconnectAttachmentsSetLabelsCall struct { - s *Service - project string - region string - resource string - regionsetlabelsrequest *RegionSetLabelsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NodeTemplatesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetLabels: Sets the labels on an InterconnectAttachment. To learn -// more about labels, read the Labeling Resources documentation. -func (r *InterconnectAttachmentsService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *InterconnectAttachmentsSetLabelsCall { - c := &InterconnectAttachmentsSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of node templates. +func (r *NodeTemplatesService) AggregatedList(project string) *NodeTemplatesAggregatedListCall { + c := &NodeTemplatesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.resource = resource - c.regionsetlabelsrequest = regionsetlabelsrequest return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InterconnectAttachmentsSetLabelsCall) RequestId(requestId string) *InterconnectAttachmentsSetLabelsCall { - c.urlParams_.Set("requestId", requestId) +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *NodeTemplatesAggregatedListCall) Filter(filter string) *NodeTemplatesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *NodeTemplatesAggregatedListCall) MaxResults(maxResults int64) *NodeTemplatesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *NodeTemplatesAggregatedListCall) OrderBy(orderBy string) *NodeTemplatesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *NodeTemplatesAggregatedListCall) PageToken(pageToken string) *NodeTemplatesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectAttachmentsSetLabelsCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsSetLabelsCall { +func (c *NodeTemplatesAggregatedListCall) Fields(s ...googleapi.Field) *NodeTemplatesAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NodeTemplatesAggregatedListCall) IfNoneMatch(entityTag string) *NodeTemplatesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectAttachmentsSetLabelsCall) Context(ctx context.Context) *InterconnectAttachmentsSetLabelsCall { +func (c *NodeTemplatesAggregatedListCall) Context(ctx context.Context) *NodeTemplatesAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectAttachmentsSetLabelsCall) Header() http.Header { +func (c *NodeTemplatesAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectAttachmentsSetLabelsCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeTemplatesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{resource}/setLabels") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/nodeTemplates") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnectAttachments.setLabels" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InterconnectAttachmentsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.nodeTemplates.aggregatedList" call. +// Exactly one of *NodeTemplateAggregatedList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *NodeTemplateAggregatedList.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NodeTemplatesAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeTemplateAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -72395,7 +88731,7 @@ func (c *InterconnectAttachmentsSetLabelsCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &NodeTemplateAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -72407,85 +88743,122 @@ func (c *InterconnectAttachmentsSetLabelsCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Sets the labels on an InterconnectAttachment. To learn more about labels, read the Labeling Resources documentation.", - // "httpMethod": "POST", - // "id": "compute.interconnectAttachments.setLabels", + // "description": "Retrieves an aggregated list of node templates.", + // "httpMethod": "GET", + // "id": "compute.nodeTemplates.aggregatedList", // "parameterOrder": [ - // "project", - // "region", - // "resource" + // "project" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", // "type": "string" // }, - // "region": { - // "description": "The region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/interconnectAttachments/{resource}/setLabels", - // "request": { - // "$ref": "RegionSetLabelsRequest" - // }, + // "path": "{project}/aggregated/nodeTemplates", // "response": { - // "$ref": "Operation" + // "$ref": "NodeTemplateAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.interconnectAttachments.testIamPermissions": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *NodeTemplatesAggregatedListCall) Pages(ctx context.Context, f func(*NodeTemplateAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InterconnectAttachmentsTestIamPermissionsCall struct { - s *Service - project string - region string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.nodeTemplates.delete": + +type NodeTemplatesDeleteCall struct { + s *Service + project string + region string + nodeTemplate string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *InterconnectAttachmentsService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *InterconnectAttachmentsTestIamPermissionsCall { - c := &InterconnectAttachmentsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified NodeTemplate resource. +func (r *NodeTemplatesService) Delete(project string, region string, nodeTemplate string) *NodeTemplatesDeleteCall { + c := &NodeTemplatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.nodeTemplate = nodeTemplate + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *NodeTemplatesDeleteCall) RequestId(requestId string) *NodeTemplatesDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectAttachmentsTestIamPermissionsCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsTestIamPermissionsCall { +func (c *NodeTemplatesDeleteCall) Fields(s ...googleapi.Field) *NodeTemplatesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -72493,57 +88866,52 @@ func (c *InterconnectAttachmentsTestIamPermissionsCall) Fields(s ...googleapi.Fi // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectAttachmentsTestIamPermissionsCall) Context(ctx context.Context) *InterconnectAttachmentsTestIamPermissionsCall { +func (c *NodeTemplatesDeleteCall) Context(ctx context.Context) *NodeTemplatesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectAttachmentsTestIamPermissionsCall) Header() http.Header { +func (c *NodeTemplatesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectAttachmentsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates/{nodeTemplate}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, + "region": c.region, + "nodeTemplate": c.nodeTemplate, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnectAttachments.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InterconnectAttachmentsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.nodeTemplates.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NodeTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -72562,7 +88930,7 @@ func (c *InterconnectAttachmentsTestIamPermissionsCall) Do(opts ...googleapi.Cal if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -72574,15 +88942,22 @@ func (c *InterconnectAttachmentsTestIamPermissionsCall) Do(opts ...googleapi.Cal } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.interconnectAttachments.testIamPermissions", + // "description": "Deletes the specified NodeTemplate resource.", + // "httpMethod": "DELETE", + // "id": "compute.nodeTemplates.delete", // "parameterOrder": [ // "project", // "region", - // "resource" + // "nodeTemplate" // ], // "parameters": { + // "nodeTemplate": { + // "description": "Name of the NodeTemplate resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -72597,56 +88972,51 @@ func (c *InterconnectAttachmentsTestIamPermissionsCall) Do(opts ...googleapi.Cal // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/regions/{region}/interconnectAttachments/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/regions/{region}/nodeTemplates/{nodeTemplate}", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.interconnectLocations.get": +// method id "compute.nodeTemplates.get": -type InterconnectLocationsGetCall struct { - s *Service - project string - interconnectLocation string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type NodeTemplatesGetCall struct { + s *Service + project string + region string + nodeTemplate string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the details for the specified interconnect location. -// Gets a list of available interconnect locations by making a list() -// request. -func (r *InterconnectLocationsService) Get(project string, interconnectLocation string) *InterconnectLocationsGetCall { - c := &InterconnectLocationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified node template. Gets a list of available +// node templates by making a list() request. +func (r *NodeTemplatesService) Get(project string, region string, nodeTemplate string) *NodeTemplatesGetCall { + c := &NodeTemplatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.interconnectLocation = interconnectLocation + c.region = region + c.nodeTemplate = nodeTemplate return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectLocationsGetCall) Fields(s ...googleapi.Field) *InterconnectLocationsGetCall { +func (c *NodeTemplatesGetCall) Fields(s ...googleapi.Field) *NodeTemplatesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -72656,7 +89026,7 @@ func (c *InterconnectLocationsGetCall) Fields(s ...googleapi.Field) *Interconnec // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InterconnectLocationsGetCall) IfNoneMatch(entityTag string) *InterconnectLocationsGetCall { +func (c *NodeTemplatesGetCall) IfNoneMatch(entityTag string) *NodeTemplatesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -72664,21 +89034,21 @@ func (c *InterconnectLocationsGetCall) IfNoneMatch(entityTag string) *Interconne // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectLocationsGetCall) Context(ctx context.Context) *InterconnectLocationsGetCall { +func (c *NodeTemplatesGetCall) Context(ctx context.Context) *NodeTemplatesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectLocationsGetCall) Header() http.Header { +func (c *NodeTemplatesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectLocationsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeTemplatesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -72690,7 +89060,7 @@ func (c *InterconnectLocationsGetCall) doRequest(alt string) (*http.Response, er var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnectLocations/{interconnectLocation}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates/{nodeTemplate}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -72698,20 +89068,21 @@ func (c *InterconnectLocationsGetCall) doRequest(alt string) (*http.Response, er } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "interconnectLocation": c.interconnectLocation, + "project": c.project, + "region": c.region, + "nodeTemplate": c.nodeTemplate, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnectLocations.get" call. -// Exactly one of *InterconnectLocation or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InterconnectLocation.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InterconnectLocationsGetCall) Do(opts ...googleapi.CallOption) (*InterconnectLocation, error) { +// Do executes the "compute.nodeTemplates.get" call. +// Exactly one of *NodeTemplate or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *NodeTemplate.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NodeTemplatesGetCall) Do(opts ...googleapi.CallOption) (*NodeTemplate, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -72730,7 +89101,7 @@ func (c *InterconnectLocationsGetCall) Do(opts ...googleapi.CallOption) (*Interc if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InterconnectLocation{ + ret := &NodeTemplate{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -72742,18 +89113,19 @@ func (c *InterconnectLocationsGetCall) Do(opts ...googleapi.CallOption) (*Interc } return ret, nil // { - // "description": "Returns the details for the specified interconnect location. Gets a list of available interconnect locations by making a list() request.", + // "description": "Returns the specified node template. Gets a list of available node templates by making a list() request.", // "httpMethod": "GET", - // "id": "compute.interconnectLocations.get", + // "id": "compute.nodeTemplates.get", // "parameterOrder": [ // "project", - // "interconnectLocation" + // "region", + // "nodeTemplate" // ], // "parameters": { - // "interconnectLocation": { - // "description": "Name of the interconnect location to return.", + // "nodeTemplate": { + // "description": "Name of the node template to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -72763,11 +89135,18 @@ func (c *InterconnectLocationsGetCall) Do(opts ...googleapi.CallOption) (*Interc // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/interconnectLocations/{interconnectLocation}", + // "path": "{project}/regions/{region}/nodeTemplates/{nodeTemplate}", // "response": { - // "$ref": "InterconnectLocation" + // "$ref": "NodeTemplate" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -72778,92 +89157,33 @@ func (c *InterconnectLocationsGetCall) Do(opts ...googleapi.CallOption) (*Interc } -// method id "compute.interconnectLocations.list": +// method id "compute.nodeTemplates.getIamPolicy": -type InterconnectLocationsListCall struct { +type NodeTemplatesGetIamPolicyCall struct { s *Service project string + region string + resource string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves the list of interconnect locations available to the -// specified project. -func (r *InterconnectLocationsService) List(project string) *InterconnectLocationsListCall { - c := &InterconnectLocationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. +func (r *NodeTemplatesService) GetIamPolicy(project string, region string, resource string) *NodeTemplatesGetIamPolicyCall { + c := &NodeTemplatesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *InterconnectLocationsListCall) Filter(filter string) *InterconnectLocationsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InterconnectLocationsListCall) MaxResults(maxResults int64) *InterconnectLocationsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *InterconnectLocationsListCall) OrderBy(orderBy string) *InterconnectLocationsListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InterconnectLocationsListCall) PageToken(pageToken string) *InterconnectLocationsListCall { - c.urlParams_.Set("pageToken", pageToken) + c.region = region + c.resource = resource return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectLocationsListCall) Fields(s ...googleapi.Field) *InterconnectLocationsListCall { +func (c *NodeTemplatesGetIamPolicyCall) Fields(s ...googleapi.Field) *NodeTemplatesGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -72873,7 +89193,7 @@ func (c *InterconnectLocationsListCall) Fields(s ...googleapi.Field) *Interconne // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InterconnectLocationsListCall) IfNoneMatch(entityTag string) *InterconnectLocationsListCall { +func (c *NodeTemplatesGetIamPolicyCall) IfNoneMatch(entityTag string) *NodeTemplatesGetIamPolicyCall { c.ifNoneMatch_ = entityTag return c } @@ -72881,21 +89201,21 @@ func (c *InterconnectLocationsListCall) IfNoneMatch(entityTag string) *Interconn // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectLocationsListCall) Context(ctx context.Context) *InterconnectLocationsListCall { +func (c *NodeTemplatesGetIamPolicyCall) Context(ctx context.Context) *NodeTemplatesGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectLocationsListCall) Header() http.Header { +func (c *NodeTemplatesGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectLocationsListCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeTemplatesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -72907,7 +89227,7 @@ func (c *InterconnectLocationsListCall) doRequest(alt string) (*http.Response, e var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnectLocations") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -72915,19 +89235,21 @@ func (c *InterconnectLocationsListCall) doRequest(alt string) (*http.Response, e } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnectLocations.list" call. -// Exactly one of *InterconnectLocationList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *InterconnectLocationList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*InterconnectLocationList, error) { +// Do executes the "compute.nodeTemplates.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *NodeTemplatesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -72946,7 +89268,7 @@ func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*Inter if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InterconnectLocationList{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -72958,47 +89280,40 @@ func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*Inter } return ret, nil // { - // "description": "Retrieves the list of interconnect locations available to the specified project.", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", // "httpMethod": "GET", - // "id": "compute.interconnectLocations.list", + // "id": "compute.nodeTemplates.getIamPolicy", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "resource" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/interconnectLocations", + // "path": "{project}/regions/{region}/nodeTemplates/{resource}/getIamPolicy", // "response": { - // "$ref": "InterconnectLocationList" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -73009,43 +89324,25 @@ func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*Inter } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InterconnectLocationsListCall) Pages(ctx context.Context, f func(*InterconnectLocationList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.interconnects.delete": +// method id "compute.nodeTemplates.insert": -type InterconnectsDeleteCall struct { +type NodeTemplatesInsertCall struct { s *Service project string - interconnect string + region string + nodetemplate *NodeTemplate urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Delete: Deletes the specified interconnect. -func (r *InterconnectsService) Delete(project string, interconnect string) *InterconnectsDeleteCall { - c := &InterconnectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a NodeTemplate resource in the specified project +// using the data included in the request. +func (r *NodeTemplatesService) Insert(project string, region string, nodetemplate *NodeTemplate) *NodeTemplatesInsertCall { + c := &NodeTemplatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.interconnect = interconnect + c.region = region + c.nodetemplate = nodetemplate return c } @@ -73063,7 +89360,7 @@ func (r *InterconnectsService) Delete(project string, interconnect string) *Inte // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InterconnectsDeleteCall) RequestId(requestId string) *InterconnectsDeleteCall { +func (c *NodeTemplatesInsertCall) RequestId(requestId string) *NodeTemplatesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -73071,7 +89368,7 @@ func (c *InterconnectsDeleteCall) RequestId(requestId string) *InterconnectsDele // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectsDeleteCall) Fields(s ...googleapi.Field) *InterconnectsDeleteCall { +func (c *NodeTemplatesInsertCall) Fields(s ...googleapi.Field) *NodeTemplatesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -73079,51 +89376,56 @@ func (c *InterconnectsDeleteCall) Fields(s ...googleapi.Field) *InterconnectsDel // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectsDeleteCall) Context(ctx context.Context) *InterconnectsDeleteCall { +func (c *NodeTemplatesInsertCall) Context(ctx context.Context) *NodeTemplatesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectsDeleteCall) Header() http.Header { +func (c *NodeTemplatesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeTemplatesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.nodetemplate) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "interconnect": c.interconnect, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnects.delete" call. +// Do executes the "compute.nodeTemplates.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InterconnectsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NodeTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -73154,25 +89456,25 @@ func (c *InterconnectsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Deletes the specified interconnect.", - // "httpMethod": "DELETE", - // "id": "compute.interconnects.delete", + // "description": "Creates a NodeTemplate resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.nodeTemplates.insert", // "parameterOrder": [ // "project", - // "interconnect" + // "region" // ], // "parameters": { - // "interconnect": { - // "description": "Name of the interconnect to delete.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "region": { + // "description": "The name of the region for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -73182,7 +89484,10 @@ func (c *InterconnectsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // } // }, - // "path": "{project}/global/interconnects/{interconnect}", + // "path": "{project}/regions/{region}/nodeTemplates", + // "request": { + // "$ref": "NodeTemplate" + // }, // "response": { // "$ref": "Operation" // }, @@ -73194,31 +89499,94 @@ func (c *InterconnectsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.interconnects.get": +// method id "compute.nodeTemplates.list": -type InterconnectsGetCall struct { +type NodeTemplatesListCall struct { s *Service project string - interconnect string + region string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Returns the specified interconnect. Get a list of available -// interconnects by making a list() request. -func (r *InterconnectsService) Get(project string, interconnect string) *InterconnectsGetCall { - c := &InterconnectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of node templates available to the specified +// project. +func (r *NodeTemplatesService) List(project string, region string) *NodeTemplatesListCall { + c := &NodeTemplatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.interconnect = interconnect + c.region = region + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *NodeTemplatesListCall) Filter(filter string) *NodeTemplatesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *NodeTemplatesListCall) MaxResults(maxResults int64) *NodeTemplatesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *NodeTemplatesListCall) OrderBy(orderBy string) *NodeTemplatesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *NodeTemplatesListCall) PageToken(pageToken string) *NodeTemplatesListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectsGetCall) Fields(s ...googleapi.Field) *InterconnectsGetCall { +func (c *NodeTemplatesListCall) Fields(s ...googleapi.Field) *NodeTemplatesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -73228,7 +89596,7 @@ func (c *InterconnectsGetCall) Fields(s ...googleapi.Field) *InterconnectsGetCal // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InterconnectsGetCall) IfNoneMatch(entityTag string) *InterconnectsGetCall { +func (c *NodeTemplatesListCall) IfNoneMatch(entityTag string) *NodeTemplatesListCall { c.ifNoneMatch_ = entityTag return c } @@ -73236,21 +89604,21 @@ func (c *InterconnectsGetCall) IfNoneMatch(entityTag string) *InterconnectsGetCa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectsGetCall) Context(ctx context.Context) *InterconnectsGetCall { +func (c *NodeTemplatesListCall) Context(ctx context.Context) *NodeTemplatesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectsGetCall) Header() http.Header { +func (c *NodeTemplatesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeTemplatesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -73262,7 +89630,7 @@ func (c *InterconnectsGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -73270,20 +89638,20 @@ func (c *InterconnectsGetCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "interconnect": c.interconnect, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnects.get" call. -// Exactly one of *Interconnect or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Interconnect.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InterconnectsGetCall) Do(opts ...googleapi.CallOption) (*Interconnect, error) { +// Do executes the "compute.nodeTemplates.list" call. +// Exactly one of *NodeTemplateList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *NodeTemplateList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NodeTemplatesListCall) Do(opts ...googleapi.CallOption) (*NodeTemplateList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -73302,7 +89670,7 @@ func (c *InterconnectsGetCall) Do(opts ...googleapi.CallOption) (*Interconnect, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Interconnect{ + ret := &NodeTemplateList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -73314,19 +89682,35 @@ func (c *InterconnectsGetCall) Do(opts ...googleapi.CallOption) (*Interconnect, } return ret, nil // { - // "description": "Returns the specified interconnect. Get a list of available interconnects by making a list() request.", + // "description": "Retrieves a list of node templates available to the specified project.", // "httpMethod": "GET", - // "id": "compute.interconnects.get", + // "id": "compute.nodeTemplates.list", // "parameterOrder": [ // "project", - // "interconnect" + // "region" // ], // "parameters": { - // "interconnect": { - // "description": "Name of the interconnect to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -73335,11 +89719,18 @@ func (c *InterconnectsGetCall) Do(opts ...googleapi.CallOption) (*Interconnect, // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/interconnects/{interconnect}", + // "path": "{project}/regions/{region}/nodeTemplates", // "response": { - // "$ref": "Interconnect" + // "$ref": "NodeTemplateList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -73350,97 +89741,113 @@ func (c *InterconnectsGetCall) Do(opts ...googleapi.CallOption) (*Interconnect, } -// method id "compute.interconnects.getDiagnostics": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *NodeTemplatesListCall) Pages(ctx context.Context, f func(*NodeTemplateList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InterconnectsGetDiagnosticsCall struct { - s *Service - project string - interconnect string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +// method id "compute.nodeTemplates.setIamPolicy": + +type NodeTemplatesSetIamPolicyCall struct { + s *Service + project string + region string + resource string + regionsetpolicyrequest *RegionSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetDiagnostics: Returns the interconnectDiagnostics for the specified -// interconnect. -func (r *InterconnectsService) GetDiagnostics(project string, interconnect string) *InterconnectsGetDiagnosticsCall { - c := &InterconnectsGetDiagnosticsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +func (r *NodeTemplatesService) SetIamPolicy(project string, region string, resource string, regionsetpolicyrequest *RegionSetPolicyRequest) *NodeTemplatesSetIamPolicyCall { + c := &NodeTemplatesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.interconnect = interconnect + c.region = region + c.resource = resource + c.regionsetpolicyrequest = regionsetpolicyrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectsGetDiagnosticsCall) Fields(s ...googleapi.Field) *InterconnectsGetDiagnosticsCall { +func (c *NodeTemplatesSetIamPolicyCall) Fields(s ...googleapi.Field) *NodeTemplatesSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InterconnectsGetDiagnosticsCall) IfNoneMatch(entityTag string) *InterconnectsGetDiagnosticsCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectsGetDiagnosticsCall) Context(ctx context.Context) *InterconnectsGetDiagnosticsCall { +func (c *NodeTemplatesSetIamPolicyCall) Context(ctx context.Context) *NodeTemplatesSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectsGetDiagnosticsCall) Header() http.Header { +func (c *NodeTemplatesSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectsGetDiagnosticsCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeTemplatesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetpolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}/getDiagnostics") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "interconnect": c.interconnect, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnects.getDiagnostics" call. -// Exactly one of *InterconnectsGetDiagnosticsResponse or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *InterconnectsGetDiagnosticsResponse.ServerResponse.Header or -// (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *InterconnectsGetDiagnosticsCall) Do(opts ...googleapi.CallOption) (*InterconnectsGetDiagnosticsResponse, error) { +// Do executes the "compute.nodeTemplates.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *NodeTemplatesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -73459,7 +89866,7 @@ func (c *InterconnectsGetDiagnosticsCall) Do(opts ...googleapi.CallOption) (*Int if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InterconnectsGetDiagnosticsResponse{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -73471,85 +89878,80 @@ func (c *InterconnectsGetDiagnosticsCall) Do(opts ...googleapi.CallOption) (*Int } return ret, nil // { - // "description": "Returns the interconnectDiagnostics for the specified interconnect.", - // "httpMethod": "GET", - // "id": "compute.interconnects.getDiagnostics", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "httpMethod": "POST", + // "id": "compute.nodeTemplates.setIamPolicy", // "parameterOrder": [ // "project", - // "interconnect" + // "region", + // "resource" // ], // "parameters": { - // "interconnect": { - // "description": "Name of the interconnect resource to query.", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/interconnects/{interconnect}/getDiagnostics", + // "path": "{project}/regions/{region}/nodeTemplates/{resource}/setIamPolicy", + // "request": { + // "$ref": "RegionSetPolicyRequest" + // }, // "response": { - // "$ref": "InterconnectsGetDiagnosticsResponse" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.interconnects.insert": +// method id "compute.nodeTemplates.testIamPermissions": -type InterconnectsInsertCall struct { - s *Service - project string - interconnect *Interconnect - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NodeTemplatesTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a Interconnect in the specified project using the -// data included in the request. -func (r *InterconnectsService) Insert(project string, interconnect *Interconnect) *InterconnectsInsertCall { - c := &InterconnectsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *NodeTemplatesService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *NodeTemplatesTestIamPermissionsCall { + c := &NodeTemplatesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.interconnect = interconnect - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InterconnectsInsertCall) RequestId(requestId string) *InterconnectsInsertCall { - c.urlParams_.Set("requestId", requestId) + c.region = region + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectsInsertCall) Fields(s ...googleapi.Field) *InterconnectsInsertCall { +func (c *NodeTemplatesTestIamPermissionsCall) Fields(s ...googleapi.Field) *NodeTemplatesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -73557,35 +89959,35 @@ func (c *InterconnectsInsertCall) Fields(s ...googleapi.Field) *InterconnectsIns // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectsInsertCall) Context(ctx context.Context) *InterconnectsInsertCall { +func (c *NodeTemplatesTestIamPermissionsCall) Context(ctx context.Context) *NodeTemplatesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectsInsertCall) Header() http.Header { +func (c *NodeTemplatesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeTemplatesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnect) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -73593,19 +89995,21 @@ func (c *InterconnectsInsertCall) doRequest(alt string) (*http.Response, error) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnects.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InterconnectsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.nodeTemplates.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NodeTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -73624,7 +90028,7 @@ func (c *InterconnectsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -73636,11 +90040,13 @@ func (c *InterconnectsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Creates a Interconnect in the specified project using the data included in the request.", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.interconnects.insert", + // "id": "compute.nodeTemplates.testIamPermissions", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "resource" // ], // "parameters": { // "project": { @@ -73650,30 +90056,40 @@ func (c *InterconnectsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/interconnects", + // "path": "{project}/regions/{region}/nodeTemplates/{resource}/testIamPermissions", // "request": { - // "$ref": "Interconnect" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.interconnects.list": +// method id "compute.nodeTypes.aggregatedList": -type InterconnectsListCall struct { +type NodeTypesAggregatedListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -73682,10 +90098,9 @@ type InterconnectsListCall struct { header_ http.Header } -// List: Retrieves the list of interconnect available to the specified -// project. -func (r *InterconnectsService) List(project string) *InterconnectsListCall { - c := &InterconnectsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of node types. +func (r *NodeTypesService) AggregatedList(project string) *NodeTypesAggregatedListCall { + c := &NodeTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -73712,7 +90127,7 @@ func (r *InterconnectsService) List(project string) *InterconnectsListCall { // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *InterconnectsListCall) Filter(filter string) *InterconnectsListCall { +func (c *NodeTypesAggregatedListCall) Filter(filter string) *NodeTypesAggregatedListCall { c.urlParams_.Set("filter", filter) return c } @@ -73723,7 +90138,7 @@ func (c *InterconnectsListCall) Filter(filter string) *InterconnectsListCall { // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *InterconnectsListCall) MaxResults(maxResults int64) *InterconnectsListCall { +func (c *NodeTypesAggregatedListCall) MaxResults(maxResults int64) *NodeTypesAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -73740,7 +90155,7 @@ func (c *InterconnectsListCall) MaxResults(maxResults int64) *InterconnectsListC // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *InterconnectsListCall) OrderBy(orderBy string) *InterconnectsListCall { +func (c *NodeTypesAggregatedListCall) OrderBy(orderBy string) *NodeTypesAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -73748,7 +90163,7 @@ func (c *InterconnectsListCall) OrderBy(orderBy string) *InterconnectsListCall { // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *InterconnectsListCall) PageToken(pageToken string) *InterconnectsListCall { +func (c *NodeTypesAggregatedListCall) PageToken(pageToken string) *NodeTypesAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -73756,7 +90171,7 @@ func (c *InterconnectsListCall) PageToken(pageToken string) *InterconnectsListCa // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectsListCall) Fields(s ...googleapi.Field) *InterconnectsListCall { +func (c *NodeTypesAggregatedListCall) Fields(s ...googleapi.Field) *NodeTypesAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -73766,7 +90181,7 @@ func (c *InterconnectsListCall) Fields(s ...googleapi.Field) *InterconnectsListC // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InterconnectsListCall) IfNoneMatch(entityTag string) *InterconnectsListCall { +func (c *NodeTypesAggregatedListCall) IfNoneMatch(entityTag string) *NodeTypesAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -73774,21 +90189,21 @@ func (c *InterconnectsListCall) IfNoneMatch(entityTag string) *InterconnectsList // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectsListCall) Context(ctx context.Context) *InterconnectsListCall { +func (c *NodeTypesAggregatedListCall) Context(ctx context.Context) *NodeTypesAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectsListCall) Header() http.Header { +func (c *NodeTypesAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectsListCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -73800,7 +90215,7 @@ func (c *InterconnectsListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/nodeTypes") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -73813,14 +90228,14 @@ func (c *InterconnectsListCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnects.list" call. -// Exactly one of *InterconnectList or error will be non-nil. Any +// Do executes the "compute.nodeTypes.aggregatedList" call. +// Exactly one of *NodeTypeAggregatedList or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *InterconnectList.ServerResponse.Header or (if a response was +// *NodeTypeAggregatedList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectList, error) { +func (c *NodeTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeTypeAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -73839,7 +90254,7 @@ func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectL if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InterconnectList{ + ret := &NodeTypeAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -73851,9 +90266,9 @@ func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectL } return ret, nil // { - // "description": "Retrieves the list of interconnect available to the specified project.", + // "description": "Retrieves an aggregated list of node types.", // "httpMethod": "GET", - // "id": "compute.interconnects.list", + // "id": "compute.nodeTypes.aggregatedList", // "parameterOrder": [ // "project" // ], @@ -73889,9 +90304,9 @@ func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectL // "type": "string" // } // }, - // "path": "{project}/global/interconnects", + // "path": "{project}/aggregated/nodeTypes", // "response": { - // "$ref": "InterconnectList" + // "$ref": "NodeTypeAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -73905,7 +90320,7 @@ func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectL // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *InterconnectsListCall) Pages(ctx context.Context, f func(*InterconnectList) error) error { +func (c *NodeTypesAggregatedListCall) Pages(ctx context.Context, f func(*NodeTypeAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -73923,109 +90338,99 @@ func (c *InterconnectsListCall) Pages(ctx context.Context, f func(*InterconnectL } } -// method id "compute.interconnects.patch": +// method id "compute.nodeTypes.get": -type InterconnectsPatchCall struct { - s *Service - project string - interconnect string - interconnect2 *Interconnect - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NodeTypesGetCall struct { + s *Service + project string + zone string + nodeType string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Patch: Updates the specified interconnect with the data included in -// the request. This method supports PATCH semantics and uses the JSON -// merge patch format and processing rules. -func (r *InterconnectsService) Patch(project string, interconnect string, interconnect2 *Interconnect) *InterconnectsPatchCall { - c := &InterconnectsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified node type. Gets a list of available node +// types by making a list() request. +func (r *NodeTypesService) Get(project string, zone string, nodeType string) *NodeTypesGetCall { + c := &NodeTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.interconnect = interconnect - c.interconnect2 = interconnect2 - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InterconnectsPatchCall) RequestId(requestId string) *InterconnectsPatchCall { - c.urlParams_.Set("requestId", requestId) + c.zone = zone + c.nodeType = nodeType return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectsPatchCall) Fields(s ...googleapi.Field) *InterconnectsPatchCall { +func (c *NodeTypesGetCall) Fields(s ...googleapi.Field) *NodeTypesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NodeTypesGetCall) IfNoneMatch(entityTag string) *NodeTypesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectsPatchCall) Context(ctx context.Context) *InterconnectsPatchCall { +func (c *NodeTypesGetCall) Context(ctx context.Context) *NodeTypesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectsPatchCall) Header() http.Header { +func (c *NodeTypesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectsPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeTypesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnect2) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeTypes/{nodeType}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "interconnect": c.interconnect, + "project": c.project, + "zone": c.zone, + "nodeType": c.nodeType, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnects.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// Do executes the "compute.nodeTypes.get" call. +// Exactly one of *NodeType or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *NodeType.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InterconnectsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NodeTypesGetCall) Do(opts ...googleapi.CallOption) (*NodeType, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -74044,7 +90449,7 @@ func (c *InterconnectsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, e if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &NodeType{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -74056,18 +90461,19 @@ func (c *InterconnectsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Updates the specified interconnect with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.interconnects.patch", + // "description": "Returns the specified node type. Gets a list of available node types by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.nodeTypes.get", // "parameterOrder": [ // "project", - // "interconnect" + // "zone", + // "nodeType" // ], // "parameters": { - // "interconnect": { - // "description": "Name of the interconnect to update.", + // "nodeType": { + // "description": "Name of the node type to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -74078,261 +90484,180 @@ func (c *InterconnectsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, e // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/interconnects/{interconnect}", - // "request": { - // "$ref": "Interconnect" - // }, + // "path": "{project}/zones/{zone}/nodeTypes/{nodeType}", // "response": { - // "$ref": "Operation" + // "$ref": "NodeType" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.interconnects.setLabels": +// method id "compute.nodeTypes.list": -type InterconnectsSetLabelsCall struct { - s *Service - project string - resource string - globalsetlabelsrequest *GlobalSetLabelsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NodeTypesListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetLabels: Sets the labels on an Interconnect. To learn more about -// labels, read the Labeling Resources documentation. -func (r *InterconnectsService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *InterconnectsSetLabelsCall { - c := &InterconnectsSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of node types available to the specified +// project. +func (r *NodeTypesService) List(project string, zone string) *NodeTypesListCall { + c := &NodeTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.globalsetlabelsrequest = globalsetlabelsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InterconnectsSetLabelsCall) Fields(s ...googleapi.Field) *InterconnectsSetLabelsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InterconnectsSetLabelsCall) Context(ctx context.Context) *InterconnectsSetLabelsCall { - c.ctx_ = ctx + c.zone = zone return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InterconnectsSetLabelsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InterconnectsSetLabelsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{resource}/setLabels") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.interconnects.setLabels" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InterconnectsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Sets the labels on an Interconnect. To learn more about labels, read the Labeling Resources documentation.", - // "httpMethod": "POST", - // "id": "compute.interconnects.setLabels", - // "parameterOrder": [ - // "project", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/interconnects/{resource}/setLabels", - // "request": { - // "$ref": "GlobalSetLabelsRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *NodeTypesListCall) Filter(filter string) *NodeTypesListCall { + c.urlParams_.Set("filter", filter) + return c } -// method id "compute.interconnects.testIamPermissions": +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *NodeTypesListCall) MaxResults(maxResults int64) *NodeTypesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} -type InterconnectsTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *NodeTypesListCall) OrderBy(orderBy string) *NodeTypesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *InterconnectsService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *InterconnectsTestIamPermissionsCall { - c := &InterconnectsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *NodeTypesListCall) PageToken(pageToken string) *NodeTypesListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectsTestIamPermissionsCall) Fields(s ...googleapi.Field) *InterconnectsTestIamPermissionsCall { +func (c *NodeTypesListCall) Fields(s ...googleapi.Field) *NodeTypesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NodeTypesListCall) IfNoneMatch(entityTag string) *NodeTypesListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectsTestIamPermissionsCall) Context(ctx context.Context) *InterconnectsTestIamPermissionsCall { +func (c *NodeTypesListCall) Context(ctx context.Context) *NodeTypesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectsTestIamPermissionsCall) Header() http.Header { +func (c *NodeTypesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *NodeTypesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeTypes") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnects.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InterconnectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.nodeTypes.list" call. +// Exactly one of *NodeTypeList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *NodeTypeList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NodeTypesListCall) Do(opts ...googleapi.CallOption) (*NodeTypeList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -74351,7 +90676,7 @@ func (c *InterconnectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) ( if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &NodeTypeList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -74363,14 +90688,37 @@ func (c *InterconnectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.interconnects.testIamPermissions", + // "description": "Retrieves a list of node types available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.nodeTypes.list", // "parameterOrder": [ // "project", - // "resource" + // "zone" // ], // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -74378,20 +90726,17 @@ func (c *InterconnectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) ( // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/interconnects/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/zones/{zone}/nodeTypes", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "NodeTypeList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -74402,96 +90747,118 @@ func (c *InterconnectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) ( } -// method id "compute.licenseCodes.get": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *NodeTypesListCall) Pages(ctx context.Context, f func(*NodeTypeList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type LicenseCodesGetCall struct { - s *Service - project string - licenseCode string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +// method id "compute.projects.disableXpnHost": + +type ProjectsDisableXpnHostCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Return a specified license code. License codes are mirrored -// across all projects that have permissions to read the License Code. -func (r *LicenseCodesService) Get(project string, licenseCode string) *LicenseCodesGetCall { - c := &LicenseCodesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// DisableXpnHost: Disable this project as a shared VPC host project. +func (r *ProjectsService) DisableXpnHost(project string) *ProjectsDisableXpnHostCall { + c := &ProjectsDisableXpnHostCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.licenseCode = licenseCode + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ProjectsDisableXpnHostCall) RequestId(requestId string) *ProjectsDisableXpnHostCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LicenseCodesGetCall) Fields(s ...googleapi.Field) *LicenseCodesGetCall { +func (c *ProjectsDisableXpnHostCall) Fields(s ...googleapi.Field) *ProjectsDisableXpnHostCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *LicenseCodesGetCall) IfNoneMatch(entityTag string) *LicenseCodesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LicenseCodesGetCall) Context(ctx context.Context) *LicenseCodesGetCall { +func (c *ProjectsDisableXpnHostCall) Context(ctx context.Context) *ProjectsDisableXpnHostCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LicenseCodesGetCall) Header() http.Header { +func (c *ProjectsDisableXpnHostCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LicenseCodesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsDisableXpnHostCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenseCodes/{licenseCode}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/disableXpnHost") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "licenseCode": c.licenseCode, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.licenseCodes.get" call. -// Exactly one of *LicenseCode or error will be non-nil. Any non-2xx +// Do executes the "compute.projects.disableXpnHost" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *LicenseCode.ServerResponse.Header or (if a response was returned at +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *LicenseCodesGetCall) Do(opts ...googleapi.CallOption) (*LicenseCode, error) { +func (c *ProjectsDisableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -74510,7 +90877,7 @@ func (c *LicenseCodesGetCall) Do(opts ...googleapi.CallOption) (*LicenseCode, er if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &LicenseCode{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -74522,58 +90889,55 @@ func (c *LicenseCodesGetCall) Do(opts ...googleapi.CallOption) (*LicenseCode, er } return ret, nil // { - // "description": "Return a specified license code. License codes are mirrored across all projects that have permissions to read the License Code.", - // "httpMethod": "GET", - // "id": "compute.licenseCodes.get", + // "description": "Disable this project as a shared VPC host project.", + // "httpMethod": "POST", + // "id": "compute.projects.disableXpnHost", // "parameterOrder": [ - // "project", - // "licenseCode" + // "project" // ], // "parameters": { - // "licenseCode": { - // "description": "Number corresponding to the License code resource to return.", - // "location": "path", - // "pattern": "[0-9]{0,61}?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/global/licenseCodes/{licenseCode}", + // "path": "{project}/disableXpnHost", // "response": { - // "$ref": "LicenseCode" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.licenses.delete": +// method id "compute.projects.disableXpnResource": -type LicensesDeleteCall struct { - s *Service - project string - license string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsDisableXpnResourceCall struct { + s *Service + project string + projectsdisablexpnresourcerequest *ProjectsDisableXpnResourceRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified license. -func (r *LicensesService) Delete(project string, license string) *LicensesDeleteCall { - c := &LicensesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// DisableXpnResource: Disable a serivce resource (a.k.a service +// project) associated with this host project. +func (r *ProjectsService) DisableXpnResource(project string, projectsdisablexpnresourcerequest *ProjectsDisableXpnResourceRequest) *ProjectsDisableXpnResourceCall { + c := &ProjectsDisableXpnResourceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.license = license + c.projectsdisablexpnresourcerequest = projectsdisablexpnresourcerequest return c } @@ -74591,7 +90955,7 @@ func (r *LicensesService) Delete(project string, license string) *LicensesDelete // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *LicensesDeleteCall) RequestId(requestId string) *LicensesDeleteCall { +func (c *ProjectsDisableXpnResourceCall) RequestId(requestId string) *ProjectsDisableXpnResourceCall { c.urlParams_.Set("requestId", requestId) return c } @@ -74599,7 +90963,7 @@ func (c *LicensesDeleteCall) RequestId(requestId string) *LicensesDeleteCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LicensesDeleteCall) Fields(s ...googleapi.Field) *LicensesDeleteCall { +func (c *ProjectsDisableXpnResourceCall) Fields(s ...googleapi.Field) *ProjectsDisableXpnResourceCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -74607,51 +90971,55 @@ func (c *LicensesDeleteCall) Fields(s ...googleapi.Field) *LicensesDeleteCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LicensesDeleteCall) Context(ctx context.Context) *LicensesDeleteCall { +func (c *ProjectsDisableXpnResourceCall) Context(ctx context.Context) *ProjectsDisableXpnResourceCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LicensesDeleteCall) Header() http.Header { +func (c *ProjectsDisableXpnResourceCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LicensesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsDisableXpnResourceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectsdisablexpnresourcerequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{license}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/disableXpnResource") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "license": c.license, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.licenses.delete" call. +// Do executes the "compute.projects.disableXpnResource" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *LicensesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ProjectsDisableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -74682,21 +91050,13 @@ func (c *LicensesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Deletes the specified license.", - // "httpMethod": "DELETE", - // "id": "compute.licenses.delete", + // "description": "Disable a serivce resource (a.k.a service project) associated with this host project.", + // "httpMethod": "POST", + // "id": "compute.projects.disableXpnResource", // "parameterOrder": [ - // "project", - // "license" + // "project" // ], // "parameters": { - // "license": { - // "description": "Name of the license resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -74710,7 +91070,10 @@ func (c *LicensesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error // "type": "string" // } // }, - // "path": "{project}/global/licenses/{license}", + // "path": "{project}/disableXpnResource", + // "request": { + // "$ref": "ProjectsDisableXpnResourceRequest" + // }, // "response": { // "$ref": "Operation" // }, @@ -74722,96 +91085,97 @@ func (c *LicensesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error } -// method id "compute.licenses.get": +// method id "compute.projects.enableXpnHost": -type LicensesGetCall struct { - s *Service - project string - license string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type ProjectsEnableXpnHostCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified License resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/licenses/get -func (r *LicensesService) Get(project string, license string) *LicensesGetCall { - c := &LicensesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// EnableXpnHost: Enable this project as a shared VPC host project. +func (r *ProjectsService) EnableXpnHost(project string) *ProjectsEnableXpnHostCall { + c := &ProjectsEnableXpnHostCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.license = license + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ProjectsEnableXpnHostCall) RequestId(requestId string) *ProjectsEnableXpnHostCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LicensesGetCall) Fields(s ...googleapi.Field) *LicensesGetCall { +func (c *ProjectsEnableXpnHostCall) Fields(s ...googleapi.Field) *ProjectsEnableXpnHostCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *LicensesGetCall) IfNoneMatch(entityTag string) *LicensesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LicensesGetCall) Context(ctx context.Context) *LicensesGetCall { +func (c *ProjectsEnableXpnHostCall) Context(ctx context.Context) *ProjectsEnableXpnHostCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LicensesGetCall) Header() http.Header { +func (c *ProjectsEnableXpnHostCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LicensesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsEnableXpnHostCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{license}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/enableXpnHost") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "license": c.license, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.licenses.get" call. -// Exactly one of *License or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *License.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { +// Do executes the "compute.projects.enableXpnHost" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsEnableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -74830,7 +91194,7 @@ func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &License{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -74842,58 +91206,56 @@ func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { } return ret, nil // { - // "description": "Returns the specified License resource.", - // "httpMethod": "GET", - // "id": "compute.licenses.get", + // "description": "Enable this project as a shared VPC host project.", + // "httpMethod": "POST", + // "id": "compute.projects.enableXpnHost", // "parameterOrder": [ - // "project", - // "license" + // "project" // ], // "parameters": { - // "license": { - // "description": "Name of the License resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/global/licenses/{license}", + // "path": "{project}/enableXpnHost", // "response": { - // "$ref": "License" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.licenses.insert": +// method id "compute.projects.enableXpnResource": -type LicensesInsertCall struct { - s *Service - project string - license *License - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsEnableXpnResourceCall struct { + s *Service + project string + projectsenablexpnresourcerequest *ProjectsEnableXpnResourceRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Create a License resource in the specified project. -func (r *LicensesService) Insert(project string, license *License) *LicensesInsertCall { - c := &LicensesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// EnableXpnResource: Enable service resource (a.k.a service project) +// for a host project, so that subnets in the host project can be used +// by instances in the service project. +func (r *ProjectsService) EnableXpnResource(project string, projectsenablexpnresourcerequest *ProjectsEnableXpnResourceRequest) *ProjectsEnableXpnResourceCall { + c := &ProjectsEnableXpnResourceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.license = license + c.projectsenablexpnresourcerequest = projectsenablexpnresourcerequest return c } @@ -74911,7 +91273,7 @@ func (r *LicensesService) Insert(project string, license *License) *LicensesInse // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *LicensesInsertCall) RequestId(requestId string) *LicensesInsertCall { +func (c *ProjectsEnableXpnResourceCall) RequestId(requestId string) *ProjectsEnableXpnResourceCall { c.urlParams_.Set("requestId", requestId) return c } @@ -74919,7 +91281,7 @@ func (c *LicensesInsertCall) RequestId(requestId string) *LicensesInsertCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LicensesInsertCall) Fields(s ...googleapi.Field) *LicensesInsertCall { +func (c *ProjectsEnableXpnResourceCall) Fields(s ...googleapi.Field) *ProjectsEnableXpnResourceCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -74927,35 +91289,35 @@ func (c *LicensesInsertCall) Fields(s ...googleapi.Field) *LicensesInsertCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LicensesInsertCall) Context(ctx context.Context) *LicensesInsertCall { +func (c *ProjectsEnableXpnResourceCall) Context(ctx context.Context) *ProjectsEnableXpnResourceCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LicensesInsertCall) Header() http.Header { +func (c *ProjectsEnableXpnResourceCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LicensesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsEnableXpnResourceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.license) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectsenablexpnresourcerequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/enableXpnResource") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -74968,14 +91330,14 @@ func (c *LicensesInsertCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.licenses.insert" call. +// Do executes the "compute.projects.enableXpnResource" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *LicensesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ProjectsEnableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -75006,9 +91368,9 @@ func (c *LicensesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Create a License resource in the specified project.", + // "description": "Enable service resource (a.k.a service project) for a host project, so that subnets in the host project can be used by instances in the service project.", // "httpMethod": "POST", - // "id": "compute.licenses.insert", + // "id": "compute.projects.enableXpnResource", // "parameterOrder": [ // "project" // ], @@ -75026,27 +91388,24 @@ func (c *LicensesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error // "type": "string" // } // }, - // "path": "{project}/global/licenses", + // "path": "{project}/enableXpnResource", // "request": { - // "$ref": "License" + // "$ref": "ProjectsEnableXpnResourceRequest" // }, // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.licenses.list": +// method id "compute.projects.get": -type LicensesListCall struct { +type ProjectsGetCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -75055,85 +91414,18 @@ type LicensesListCall struct { header_ http.Header } -// List: Retrieves the list of licenses available in the specified -// project. This method does not get any licenses that belong to other -// projects, including licenses attached to publicly-available images, -// like Debian 9. If you want to get a list of publicly-available -// licenses, use this method to make a request to the respective image -// project, such as debian-cloud or windows-cloud. -func (r *LicensesService) List(project string) *LicensesListCall { - c := &LicensesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified Project resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/projects/get +func (r *ProjectsService) Get(project string) *ProjectsGetCall { + c := &ProjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *LicensesListCall) Filter(filter string) *LicensesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *LicensesListCall) MaxResults(maxResults int64) *LicensesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *LicensesListCall) OrderBy(orderBy string) *LicensesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *LicensesListCall) PageToken(pageToken string) *LicensesListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LicensesListCall) Fields(s ...googleapi.Field) *LicensesListCall { +func (c *ProjectsGetCall) Fields(s ...googleapi.Field) *ProjectsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -75143,7 +91435,7 @@ func (c *LicensesListCall) Fields(s ...googleapi.Field) *LicensesListCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *LicensesListCall) IfNoneMatch(entityTag string) *LicensesListCall { +func (c *ProjectsGetCall) IfNoneMatch(entityTag string) *ProjectsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -75151,21 +91443,21 @@ func (c *LicensesListCall) IfNoneMatch(entityTag string) *LicensesListCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LicensesListCall) Context(ctx context.Context) *LicensesListCall { +func (c *ProjectsGetCall) Context(ctx context.Context) *ProjectsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LicensesListCall) Header() http.Header { +func (c *ProjectsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LicensesListCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -75177,7 +91469,7 @@ func (c *LicensesListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -75190,14 +91482,14 @@ func (c *LicensesListCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.licenses.list" call. -// Exactly one of *LicensesListResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *LicensesListResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *LicensesListCall) Do(opts ...googleapi.CallOption) (*LicensesListResponse, error) { +// Do executes the "compute.projects.get" call. +// Exactly one of *Project or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Project.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -75216,7 +91508,7 @@ func (c *LicensesListCall) Do(opts ...googleapi.CallOption) (*LicensesListRespon if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &LicensesListResponse{ + ret := &Project{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -75228,36 +91520,13 @@ func (c *LicensesListCall) Do(opts ...googleapi.CallOption) (*LicensesListRespon } return ret, nil // { - // "description": "Retrieves the list of licenses available in the specified project. This method does not get any licenses that belong to other projects, including licenses attached to publicly-available images, like Debian 9. If you want to get a list of publicly-available licenses, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.", + // "description": "Returns the specified Project resource.", // "httpMethod": "GET", - // "id": "compute.licenses.list", + // "id": "compute.projects.get", // "parameterOrder": [ // "project" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -75266,9 +91535,9 @@ func (c *LicensesListCall) Do(opts ...googleapi.CallOption) (*LicensesListRespon // "type": "string" // } // }, - // "path": "{project}/global/licenses", + // "path": "{project}", // "response": { - // "$ref": "LicensesListResponse" + // "$ref": "Project" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -75279,110 +91548,93 @@ func (c *LicensesListCall) Do(opts ...googleapi.CallOption) (*LicensesListRespon } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *LicensesListCall) Pages(ctx context.Context, f func(*LicensesListResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.licenses.setIamPolicy": +// method id "compute.projects.getXpnHost": -type LicensesSetIamPolicyCall struct { - s *Service - project string - resource string - globalsetpolicyrequest *GlobalSetPolicyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsGetXpnHostCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. -func (r *LicensesService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *LicensesSetIamPolicyCall { - c := &LicensesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetXpnHost: Gets the shared VPC host project that this project links +// to. May be empty if no link exists. +func (r *ProjectsService) GetXpnHost(project string) *ProjectsGetXpnHostCall { + c := &ProjectsGetXpnHostCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.globalsetpolicyrequest = globalsetpolicyrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LicensesSetIamPolicyCall) Fields(s ...googleapi.Field) *LicensesSetIamPolicyCall { +func (c *ProjectsGetXpnHostCall) Fields(s ...googleapi.Field) *ProjectsGetXpnHostCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsGetXpnHostCall) IfNoneMatch(entityTag string) *ProjectsGetXpnHostCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LicensesSetIamPolicyCall) Context(ctx context.Context) *LicensesSetIamPolicyCall { +func (c *ProjectsGetXpnHostCall) Context(ctx context.Context) *ProjectsGetXpnHostCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LicensesSetIamPolicyCall) Header() http.Header { +func (c *ProjectsGetXpnHostCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LicensesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsGetXpnHostCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetpolicyrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{resource}/setIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/getXpnHost") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.licenses.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// Do executes the "compute.projects.getXpnHost" call. +// Exactly one of *Project or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) +// *Project.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *LicensesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +func (c *ProjectsGetXpnHostCall) Do(opts ...googleapi.CallOption) (*Project, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -75401,7 +91653,7 @@ func (c *LicensesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &Project{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -75413,12 +91665,11 @@ func (c *LicensesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", - // "httpMethod": "POST", - // "id": "compute.licenses.setIamPolicy", + // "description": "Gets the shared VPC host project that this project links to. May be empty if no link exists.", + // "httpMethod": "GET", + // "id": "compute.projects.getXpnHost", // "parameterOrder": [ - // "project", - // "resource" + // "project" // ], // "parameters": { // "project": { @@ -75427,21 +91678,11 @@ func (c *LicensesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/global/licenses/{resource}/setIamPolicy", - // "request": { - // "$ref": "GlobalSetPolicyRequest" - // }, + // "path": "{project}/getXpnHost", // "response": { - // "$ref": "Policy" + // "$ref": "Project" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -75451,9 +91692,9 @@ func (c *LicensesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er } -// method id "compute.machineTypes.aggregatedList": +// method id "compute.projects.getXpnResources": -type MachineTypesAggregatedListCall struct { +type ProjectsGetXpnResourcesCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -75462,10 +91703,10 @@ type MachineTypesAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves an aggregated list of machine types. -// For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/aggregatedList -func (r *MachineTypesService) AggregatedList(project string) *MachineTypesAggregatedListCall { - c := &MachineTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetXpnResources: Gets service resources (a.k.a service project) +// associated with this host project. +func (r *ProjectsService) GetXpnResources(project string) *ProjectsGetXpnResourcesCall { + c := &ProjectsGetXpnResourcesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -75492,7 +91733,7 @@ func (r *MachineTypesService) AggregatedList(project string) *MachineTypesAggreg // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *MachineTypesAggregatedListCall) Filter(filter string) *MachineTypesAggregatedListCall { +func (c *ProjectsGetXpnResourcesCall) Filter(filter string) *ProjectsGetXpnResourcesCall { c.urlParams_.Set("filter", filter) return c } @@ -75503,12 +91744,12 @@ func (c *MachineTypesAggregatedListCall) Filter(filter string) *MachineTypesAggr // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *MachineTypesAggregatedListCall) MaxResults(maxResults int64) *MachineTypesAggregatedListCall { +func (c *ProjectsGetXpnResourcesCall) MaxResults(maxResults int64) *ProjectsGetXpnResourcesCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } -// OrderBy sets the optional parameter "orderBy": Sorts list results by +// OrderBy sets the optional parameter "order_by": Sorts list results by // a certain order. By default, results are returned in alphanumerical // order based on the resource name. // @@ -75520,15 +91761,15 @@ func (c *MachineTypesAggregatedListCall) MaxResults(maxResults int64) *MachineTy // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *MachineTypesAggregatedListCall) OrderBy(orderBy string) *MachineTypesAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) +func (c *ProjectsGetXpnResourcesCall) OrderBy(orderBy string) *ProjectsGetXpnResourcesCall { + c.urlParams_.Set("order_by", orderBy) return c } // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *MachineTypesAggregatedListCall) PageToken(pageToken string) *MachineTypesAggregatedListCall { +func (c *ProjectsGetXpnResourcesCall) PageToken(pageToken string) *ProjectsGetXpnResourcesCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -75536,7 +91777,7 @@ func (c *MachineTypesAggregatedListCall) PageToken(pageToken string) *MachineTyp // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *MachineTypesAggregatedListCall) Fields(s ...googleapi.Field) *MachineTypesAggregatedListCall { +func (c *ProjectsGetXpnResourcesCall) Fields(s ...googleapi.Field) *ProjectsGetXpnResourcesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -75546,7 +91787,7 @@ func (c *MachineTypesAggregatedListCall) Fields(s ...googleapi.Field) *MachineTy // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *MachineTypesAggregatedListCall) IfNoneMatch(entityTag string) *MachineTypesAggregatedListCall { +func (c *ProjectsGetXpnResourcesCall) IfNoneMatch(entityTag string) *ProjectsGetXpnResourcesCall { c.ifNoneMatch_ = entityTag return c } @@ -75554,21 +91795,21 @@ func (c *MachineTypesAggregatedListCall) IfNoneMatch(entityTag string) *MachineT // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *MachineTypesAggregatedListCall) Context(ctx context.Context) *MachineTypesAggregatedListCall { +func (c *ProjectsGetXpnResourcesCall) Context(ctx context.Context) *ProjectsGetXpnResourcesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *MachineTypesAggregatedListCall) Header() http.Header { +func (c *ProjectsGetXpnResourcesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *MachineTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsGetXpnResourcesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -75580,7 +91821,7 @@ func (c *MachineTypesAggregatedListCall) doRequest(alt string) (*http.Response, var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/machineTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/getXpnResources") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -75593,14 +91834,14 @@ func (c *MachineTypesAggregatedListCall) doRequest(alt string) (*http.Response, return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.machineTypes.aggregatedList" call. -// Exactly one of *MachineTypeAggregatedList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *MachineTypeAggregatedList.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.projects.getXpnResources" call. +// Exactly one of *ProjectsGetXpnResources or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ProjectsGetXpnResources.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*MachineTypeAggregatedList, error) { +func (c *ProjectsGetXpnResourcesCall) Do(opts ...googleapi.CallOption) (*ProjectsGetXpnResources, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -75619,7 +91860,7 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &MachineTypeAggregatedList{ + ret := &ProjectsGetXpnResources{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -75631,9 +91872,9 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach } return ret, nil // { - // "description": "Retrieves an aggregated list of machine types.", + // "description": "Gets service resources (a.k.a service project) associated with this host project.", // "httpMethod": "GET", - // "id": "compute.machineTypes.aggregatedList", + // "id": "compute.projects.getXpnResources", // "parameterOrder": [ // "project" // ], @@ -75651,7 +91892,7 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach // "minimum": "0", // "type": "integer" // }, - // "orderBy": { + // "order_by": { // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", // "location": "query", // "type": "string" @@ -75669,14 +91910,13 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach // "type": "string" // } // }, - // "path": "{project}/aggregated/machineTypes", + // "path": "{project}/getXpnResources", // "response": { - // "$ref": "MachineTypeAggregatedList" + // "$ref": "ProjectsGetXpnResources" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } @@ -75685,7 +91925,7 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *MachineTypesAggregatedListCall) Pages(ctx context.Context, f func(*MachineTypeAggregatedList) error) error { +func (c *ProjectsGetXpnResourcesCall) Pages(ctx context.Context, f func(*ProjectsGetXpnResources) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -75703,193 +91943,23 @@ func (c *MachineTypesAggregatedListCall) Pages(ctx context.Context, f func(*Mach } } -// method id "compute.machineTypes.get": - -type MachineTypesGetCall struct { - s *Service - project string - zone string - machineType string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified machine type. Gets a list of available -// machine types by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/get -func (r *MachineTypesService) Get(project string, zone string, machineType string) *MachineTypesGetCall { - c := &MachineTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.machineType = machineType - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *MachineTypesGetCall) Fields(s ...googleapi.Field) *MachineTypesGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *MachineTypesGetCall) IfNoneMatch(entityTag string) *MachineTypesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *MachineTypesGetCall) Context(ctx context.Context) *MachineTypesGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *MachineTypesGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *MachineTypesGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/machineTypes/{machineType}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "machineType": c.machineType, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.machineTypes.get" call. -// Exactly one of *MachineType or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *MachineType.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &MachineType{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified machine type. Gets a list of available machine types by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.machineTypes.get", - // "parameterOrder": [ - // "project", - // "zone", - // "machineType" - // ], - // "parameters": { - // "machineType": { - // "description": "Name of the machine type to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/machineTypes/{machineType}", - // "response": { - // "$ref": "MachineType" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.machineTypes.list": +// method id "compute.projects.listXpnHosts": -type MachineTypesListCall struct { - s *Service - project string - zone string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type ProjectsListXpnHostsCall struct { + s *Service + project string + projectslistxpnhostsrequest *ProjectsListXpnHostsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves a list of machine types available to the specified -// project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/list -func (r *MachineTypesService) List(project string, zone string) *MachineTypesListCall { - c := &MachineTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ListXpnHosts: Lists all shared VPC host projects visible to the user +// in an organization. +func (r *ProjectsService) ListXpnHosts(project string, projectslistxpnhostsrequest *ProjectsListXpnHostsRequest) *ProjectsListXpnHostsCall { + c := &ProjectsListXpnHostsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone + c.projectslistxpnhostsrequest = projectslistxpnhostsrequest return c } @@ -75915,7 +91985,7 @@ func (r *MachineTypesService) List(project string, zone string) *MachineTypesLis // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *MachineTypesListCall) Filter(filter string) *MachineTypesListCall { +func (c *ProjectsListXpnHostsCall) Filter(filter string) *ProjectsListXpnHostsCall { c.urlParams_.Set("filter", filter) return c } @@ -75926,12 +91996,12 @@ func (c *MachineTypesListCall) Filter(filter string) *MachineTypesListCall { // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *MachineTypesListCall) MaxResults(maxResults int64) *MachineTypesListCall { +func (c *ProjectsListXpnHostsCall) MaxResults(maxResults int64) *ProjectsListXpnHostsCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } -// OrderBy sets the optional parameter "orderBy": Sorts list results by +// OrderBy sets the optional parameter "order_by": Sorts list results by // a certain order. By default, results are returned in alphanumerical // order based on the resource name. // @@ -75943,15 +92013,15 @@ func (c *MachineTypesListCall) MaxResults(maxResults int64) *MachineTypesListCal // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *MachineTypesListCall) OrderBy(orderBy string) *MachineTypesListCall { - c.urlParams_.Set("orderBy", orderBy) +func (c *ProjectsListXpnHostsCall) OrderBy(orderBy string) *ProjectsListXpnHostsCall { + c.urlParams_.Set("order_by", orderBy) return c } // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *MachineTypesListCall) PageToken(pageToken string) *MachineTypesListCall { +func (c *ProjectsListXpnHostsCall) PageToken(pageToken string) *ProjectsListXpnHostsCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -75959,72 +92029,63 @@ func (c *MachineTypesListCall) PageToken(pageToken string) *MachineTypesListCall // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *MachineTypesListCall) Fields(s ...googleapi.Field) *MachineTypesListCall { +func (c *ProjectsListXpnHostsCall) Fields(s ...googleapi.Field) *ProjectsListXpnHostsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *MachineTypesListCall) IfNoneMatch(entityTag string) *MachineTypesListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *MachineTypesListCall) Context(ctx context.Context) *MachineTypesListCall { +func (c *ProjectsListXpnHostsCall) Context(ctx context.Context) *ProjectsListXpnHostsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *MachineTypesListCall) Header() http.Header { +func (c *ProjectsListXpnHostsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *MachineTypesListCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsListXpnHostsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectslistxpnhostsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/machineTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/listXpnHosts") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.machineTypes.list" call. -// Exactly one of *MachineTypeList or error will be non-nil. Any non-2xx +// Do executes the "compute.projects.listXpnHosts" call. +// Exactly one of *XpnHostList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *MachineTypeList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeList, error) { +// *XpnHostList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsListXpnHostsCall) Do(opts ...googleapi.CallOption) (*XpnHostList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -76043,7 +92104,7 @@ func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeLis if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &MachineTypeList{ + ret := &XpnHostList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -76055,12 +92116,11 @@ func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeLis } return ret, nil // { - // "description": "Retrieves a list of machine types available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.machineTypes.list", + // "description": "Lists all shared VPC host projects visible to the user in an organization.", + // "httpMethod": "POST", + // "id": "compute.projects.listXpnHosts", // "parameterOrder": [ - // "project", - // "zone" + // "project" // ], // "parameters": { // "filter": { @@ -76076,7 +92136,7 @@ func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeLis // "minimum": "0", // "type": "integer" // }, - // "orderBy": { + // "order_by": { // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", // "location": "query", // "type": "string" @@ -76092,23 +92152,18 @@ func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeLis // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/machineTypes", + // "path": "{project}/listXpnHosts", + // "request": { + // "$ref": "ProjectsListXpnHostsRequest" + // }, // "response": { - // "$ref": "MachineTypeList" + // "$ref": "XpnHostList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } @@ -76117,7 +92172,7 @@ func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeLis // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *MachineTypesListCall) Pages(ctx context.Context, f func(*MachineTypeList) error) error { +func (c *ProjectsListXpnHostsCall) Pages(ctx context.Context, f func(*XpnHostList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -76135,138 +92190,250 @@ func (c *MachineTypesListCall) Pages(ctx context.Context, f func(*MachineTypeLis } } -// method id "compute.networkEndpointGroups.aggregatedList": +// method id "compute.projects.moveDisk": -type NetworkEndpointGroupsAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type ProjectsMoveDiskCall struct { + s *Service + project string + diskmoverequest *DiskMoveRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AggregatedList: Retrieves the list of network endpoint groups and -// sorts them by zone. -func (r *NetworkEndpointGroupsService) AggregatedList(project string) *NetworkEndpointGroupsAggregatedListCall { - c := &NetworkEndpointGroupsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// MoveDisk: Moves a persistent disk from one zone to another. +func (r *ProjectsService) MoveDisk(project string, diskmoverequest *DiskMoveRequest) *ProjectsMoveDiskCall { + c := &ProjectsMoveDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.diskmoverequest = diskmoverequest return c } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *NetworkEndpointGroupsAggregatedListCall) Filter(filter string) *NetworkEndpointGroupsAggregatedListCall { - c.urlParams_.Set("filter", filter) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ProjectsMoveDiskCall) RequestId(requestId string) *ProjectsMoveDiskCall { + c.urlParams_.Set("requestId", requestId) return c } -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *NetworkEndpointGroupsAggregatedListCall) MaxResults(maxResults int64) *NetworkEndpointGroupsAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsMoveDiskCall) Fields(s ...googleapi.Field) *ProjectsMoveDiskCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *NetworkEndpointGroupsAggregatedListCall) OrderBy(orderBy string) *NetworkEndpointGroupsAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsMoveDiskCall) Context(ctx context.Context) *ProjectsMoveDiskCall { + c.ctx_ = ctx return c } -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *NetworkEndpointGroupsAggregatedListCall) PageToken(pageToken string) *NetworkEndpointGroupsAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsMoveDiskCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsMoveDiskCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.diskmoverequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/moveDisk") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.projects.moveDisk" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsMoveDiskCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Moves a persistent disk from one zone to another.", + // "httpMethod": "POST", + // "id": "compute.projects.moveDisk", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/moveDisk", + // "request": { + // "$ref": "DiskMoveRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.projects.moveInstance": + +type ProjectsMoveInstanceCall struct { + s *Service + project string + instancemoverequest *InstanceMoveRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// MoveInstance: Moves an instance and its attached persistent disks +// from one zone to another. +func (r *ProjectsService) MoveInstance(project string, instancemoverequest *InstanceMoveRequest) *ProjectsMoveInstanceCall { + c := &ProjectsMoveInstanceCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instancemoverequest = instancemoverequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ProjectsMoveInstanceCall) RequestId(requestId string) *ProjectsMoveInstanceCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworkEndpointGroupsAggregatedListCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsAggregatedListCall { +func (c *ProjectsMoveInstanceCall) Fields(s ...googleapi.Field) *ProjectsMoveInstanceCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *NetworkEndpointGroupsAggregatedListCall) IfNoneMatch(entityTag string) *NetworkEndpointGroupsAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworkEndpointGroupsAggregatedListCall) Context(ctx context.Context) *NetworkEndpointGroupsAggregatedListCall { +func (c *ProjectsMoveInstanceCall) Context(ctx context.Context) *ProjectsMoveInstanceCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworkEndpointGroupsAggregatedListCall) Header() http.Header { +func (c *ProjectsMoveInstanceCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworkEndpointGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsMoveInstanceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancemoverequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/networkEndpointGroups") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/moveInstance") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -76277,15 +92444,14 @@ func (c *NetworkEndpointGroupsAggregatedListCall) doRequest(alt string) (*http.R return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networkEndpointGroups.aggregatedList" call. -// Exactly one of *NetworkEndpointGroupAggregatedList or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *NetworkEndpointGroupAggregatedList.ServerResponse.Header or -// (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *NetworkEndpointGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroupAggregatedList, error) { +// Do executes the "compute.projects.moveInstance" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsMoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -76304,7 +92470,7 @@ func (c *NetworkEndpointGroupsAggregatedListCall) Do(opts ...googleapi.CallOptio if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &NetworkEndpointGroupAggregatedList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -76316,99 +92482,59 @@ func (c *NetworkEndpointGroupsAggregatedListCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Retrieves the list of network endpoint groups and sorts them by zone.", - // "httpMethod": "GET", - // "id": "compute.networkEndpointGroups.aggregatedList", + // "description": "Moves an instance and its attached persistent disks from one zone to another.", + // "httpMethod": "POST", + // "id": "compute.projects.moveInstance", // "parameterOrder": [ // "project" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/aggregated/networkEndpointGroups", + // "path": "{project}/moveInstance", + // "request": { + // "$ref": "InstanceMoveRequest" + // }, // "response": { - // "$ref": "NetworkEndpointGroupAggregatedList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *NetworkEndpointGroupsAggregatedListCall) Pages(ctx context.Context, f func(*NetworkEndpointGroupAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.networkEndpointGroups.attachNetworkEndpoints": +// method id "compute.projects.setCommonInstanceMetadata": -type NetworkEndpointGroupsAttachNetworkEndpointsCall struct { - s *Service - project string - zone string - networkEndpointGroup string - networkendpointgroupsattachendpointsrequest *NetworkEndpointGroupsAttachEndpointsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsSetCommonInstanceMetadataCall struct { + s *Service + project string + metadata *Metadata + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AttachNetworkEndpoints: Attach a list of network endpoints to the -// specified network endpoint group. -func (r *NetworkEndpointGroupsService) AttachNetworkEndpoints(project string, zone string, networkEndpointGroup string, networkendpointgroupsattachendpointsrequest *NetworkEndpointGroupsAttachEndpointsRequest) *NetworkEndpointGroupsAttachNetworkEndpointsCall { - c := &NetworkEndpointGroupsAttachNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetCommonInstanceMetadata: Sets metadata common to all instances +// within the specified project using the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/projects/setCommonInstanceMetadata +func (r *ProjectsService) SetCommonInstanceMetadata(project string, metadata *Metadata) *ProjectsSetCommonInstanceMetadataCall { + c := &ProjectsSetCommonInstanceMetadataCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.networkEndpointGroup = networkEndpointGroup - c.networkendpointgroupsattachendpointsrequest = networkendpointgroupsattachendpointsrequest + c.metadata = metadata return c } @@ -76426,7 +92552,7 @@ func (r *NetworkEndpointGroupsService) AttachNetworkEndpoints(project string, zo // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) RequestId(requestId string) *NetworkEndpointGroupsAttachNetworkEndpointsCall { +func (c *ProjectsSetCommonInstanceMetadataCall) RequestId(requestId string) *ProjectsSetCommonInstanceMetadataCall { c.urlParams_.Set("requestId", requestId) return c } @@ -76434,7 +92560,7 @@ func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) RequestId(requestId st // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsAttachNetworkEndpointsCall { +func (c *ProjectsSetCommonInstanceMetadataCall) Fields(s ...googleapi.Field) *ProjectsSetCommonInstanceMetadataCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -76442,35 +92568,35 @@ func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Fields(s ...googleapi. // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Context(ctx context.Context) *NetworkEndpointGroupsAttachNetworkEndpointsCall { +func (c *ProjectsSetCommonInstanceMetadataCall) Context(ctx context.Context) *ProjectsSetCommonInstanceMetadataCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Header() http.Header { +func (c *ProjectsSetCommonInstanceMetadataCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsSetCommonInstanceMetadataCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroupsattachendpointsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.metadata) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/setCommonInstanceMetadata") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -76478,21 +92604,19 @@ func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) doRequest(alt string) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "networkEndpointGroup": c.networkEndpointGroup, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networkEndpointGroups.attachNetworkEndpoints" call. +// Do executes the "compute.projects.setCommonInstanceMetadata" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ProjectsSetCommonInstanceMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -76523,21 +92647,13 @@ func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Do(opts ...googleapi.C } return ret, nil // { - // "description": "Attach a list of network endpoints to the specified network endpoint group.", + // "description": "Sets metadata common to all instances within the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.networkEndpointGroups.attachNetworkEndpoints", + // "id": "compute.projects.setCommonInstanceMetadata", // "parameterOrder": [ - // "project", - // "zone", - // "networkEndpointGroup" + // "project" // ], // "parameters": { - // "networkEndpointGroup": { - // "description": "The name of the network endpoint group where you are attaching network endpoints to. It should comply with RFC1035.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -76549,17 +92665,11 @@ func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Do(opts ...googleapi.C // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", - // "location": "path", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints", + // "path": "{project}/setCommonInstanceMetadata", // "request": { - // "$ref": "NetworkEndpointGroupsAttachEndpointsRequest" + // "$ref": "Metadata" // }, // "response": { // "$ref": "Operation" @@ -76572,27 +92682,25 @@ func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Do(opts ...googleapi.C } -// method id "compute.networkEndpointGroups.delete": +// method id "compute.projects.setDefaultNetworkTier": -type NetworkEndpointGroupsDeleteCall struct { - s *Service - project string - zone string - networkEndpointGroup string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsSetDefaultNetworkTierCall struct { + s *Service + project string + projectssetdefaultnetworktierrequest *ProjectsSetDefaultNetworkTierRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified network endpoint group. The network -// endpoints in the NEG and the VM instances they belong to are not -// terminated when the NEG is deleted. Note that the NEG cannot be -// deleted if there are backend services referencing it. -func (r *NetworkEndpointGroupsService) Delete(project string, zone string, networkEndpointGroup string) *NetworkEndpointGroupsDeleteCall { - c := &NetworkEndpointGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetDefaultNetworkTier: Sets the default network tier of the project. +// The default network tier is used when an +// address/forwardingRule/instance is created without specifying the +// network tier field. +func (r *ProjectsService) SetDefaultNetworkTier(project string, projectssetdefaultnetworktierrequest *ProjectsSetDefaultNetworkTierRequest) *ProjectsSetDefaultNetworkTierCall { + c := &ProjectsSetDefaultNetworkTierCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.networkEndpointGroup = networkEndpointGroup + c.projectssetdefaultnetworktierrequest = projectssetdefaultnetworktierrequest return c } @@ -76610,7 +92718,7 @@ func (r *NetworkEndpointGroupsService) Delete(project string, zone string, netwo // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *NetworkEndpointGroupsDeleteCall) RequestId(requestId string) *NetworkEndpointGroupsDeleteCall { +func (c *ProjectsSetDefaultNetworkTierCall) RequestId(requestId string) *ProjectsSetDefaultNetworkTierCall { c.urlParams_.Set("requestId", requestId) return c } @@ -76618,7 +92726,7 @@ func (c *NetworkEndpointGroupsDeleteCall) RequestId(requestId string) *NetworkEn // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworkEndpointGroupsDeleteCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsDeleteCall { +func (c *ProjectsSetDefaultNetworkTierCall) Fields(s ...googleapi.Field) *ProjectsSetDefaultNetworkTierCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -76626,52 +92734,55 @@ func (c *NetworkEndpointGroupsDeleteCall) Fields(s ...googleapi.Field) *NetworkE // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworkEndpointGroupsDeleteCall) Context(ctx context.Context) *NetworkEndpointGroupsDeleteCall { +func (c *ProjectsSetDefaultNetworkTierCall) Context(ctx context.Context) *ProjectsSetDefaultNetworkTierCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworkEndpointGroupsDeleteCall) Header() http.Header { +func (c *ProjectsSetDefaultNetworkTierCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworkEndpointGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsSetDefaultNetworkTierCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectssetdefaultnetworktierrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/setDefaultNetworkTier") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "networkEndpointGroup": c.networkEndpointGroup, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networkEndpointGroups.delete" call. +// Do executes the "compute.projects.setDefaultNetworkTier" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NetworkEndpointGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ProjectsSetDefaultNetworkTierCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -76702,21 +92813,13 @@ func (c *NetworkEndpointGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Deletes the specified network endpoint group. The network endpoints in the NEG and the VM instances they belong to are not terminated when the NEG is deleted. Note that the NEG cannot be deleted if there are backend services referencing it.", - // "httpMethod": "DELETE", - // "id": "compute.networkEndpointGroups.delete", + // "description": "Sets the default network tier of the project. The default network tier is used when an address/forwardingRule/instance is created without specifying the network tier field.", + // "httpMethod": "POST", + // "id": "compute.projects.setDefaultNetworkTier", // "parameterOrder": [ - // "project", - // "zone", - // "networkEndpointGroup" + // "project" // ], // "parameters": { - // "networkEndpointGroup": { - // "description": "The name of the network endpoint group to delete. It should comply with RFC1035.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -76728,15 +92831,12 @@ func (c *NetworkEndpointGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Ope // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", - // "location": "path", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", + // "path": "{project}/setDefaultNetworkTier", + // "request": { + // "$ref": "ProjectsSetDefaultNetworkTierRequest" + // }, // "response": { // "$ref": "Operation" // }, @@ -76748,27 +92848,26 @@ func (c *NetworkEndpointGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.networkEndpointGroups.detachNetworkEndpoints": +// method id "compute.projects.setUsageExportBucket": -type NetworkEndpointGroupsDetachNetworkEndpointsCall struct { - s *Service - project string - zone string - networkEndpointGroup string - networkendpointgroupsdetachendpointsrequest *NetworkEndpointGroupsDetachEndpointsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsSetUsageExportBucketCall struct { + s *Service + project string + usageexportlocation *UsageExportLocation + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// DetachNetworkEndpoints: Detach a list of network endpoints from the -// specified network endpoint group. -func (r *NetworkEndpointGroupsService) DetachNetworkEndpoints(project string, zone string, networkEndpointGroup string, networkendpointgroupsdetachendpointsrequest *NetworkEndpointGroupsDetachEndpointsRequest) *NetworkEndpointGroupsDetachNetworkEndpointsCall { - c := &NetworkEndpointGroupsDetachNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetUsageExportBucket: Enables the usage export feature and sets the +// usage export bucket where reports are stored. If you provide an empty +// request body using this method, the usage export feature will be +// disabled. +// For details, see https://cloud.google.com/compute/docs/reference/latest/projects/setUsageExportBucket +func (r *ProjectsService) SetUsageExportBucket(project string, usageexportlocation *UsageExportLocation) *ProjectsSetUsageExportBucketCall { + c := &ProjectsSetUsageExportBucketCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.networkEndpointGroup = networkEndpointGroup - c.networkendpointgroupsdetachendpointsrequest = networkendpointgroupsdetachendpointsrequest + c.usageexportlocation = usageexportlocation return c } @@ -76786,7 +92885,7 @@ func (r *NetworkEndpointGroupsService) DetachNetworkEndpoints(project string, zo // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) RequestId(requestId string) *NetworkEndpointGroupsDetachNetworkEndpointsCall { +func (c *ProjectsSetUsageExportBucketCall) RequestId(requestId string) *ProjectsSetUsageExportBucketCall { c.urlParams_.Set("requestId", requestId) return c } @@ -76794,7 +92893,7 @@ func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) RequestId(requestId st // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsDetachNetworkEndpointsCall { +func (c *ProjectsSetUsageExportBucketCall) Fields(s ...googleapi.Field) *ProjectsSetUsageExportBucketCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -76802,35 +92901,35 @@ func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Fields(s ...googleapi. // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Context(ctx context.Context) *NetworkEndpointGroupsDetachNetworkEndpointsCall { +func (c *ProjectsSetUsageExportBucketCall) Context(ctx context.Context) *ProjectsSetUsageExportBucketCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Header() http.Header { +func (c *ProjectsSetUsageExportBucketCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsSetUsageExportBucketCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroupsdetachendpointsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.usageexportlocation) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/setUsageExportBucket") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -76838,21 +92937,19 @@ func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) doRequest(alt string) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "networkEndpointGroup": c.networkEndpointGroup, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networkEndpointGroups.detachNetworkEndpoints" call. +// Do executes the "compute.projects.setUsageExportBucket" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ProjectsSetUsageExportBucketCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -76883,21 +92980,13 @@ func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Do(opts ...googleapi.C } return ret, nil // { - // "description": "Detach a list of network endpoints from the specified network endpoint group.", + // "description": "Enables the usage export feature and sets the usage export bucket where reports are stored. If you provide an empty request body using this method, the usage export feature will be disabled.", // "httpMethod": "POST", - // "id": "compute.networkEndpointGroups.detachNetworkEndpoints", + // "id": "compute.projects.setUsageExportBucket", // "parameterOrder": [ - // "project", - // "zone", - // "networkEndpointGroup" + // "project" // ], // "parameters": { - // "networkEndpointGroup": { - // "description": "The name of the network endpoint group where you are removing network endpoints. It should comply with RFC1035.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -76909,122 +92998,123 @@ func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Do(opts ...googleapi.C // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", - // "location": "path", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints", + // "path": "{project}/setUsageExportBucket", // "request": { - // "$ref": "NetworkEndpointGroupsDetachEndpointsRequest" + // "$ref": "UsageExportLocation" // }, // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } -// method id "compute.networkEndpointGroups.get": +// method id "compute.regionAutoscalers.delete": -type NetworkEndpointGroupsGetCall struct { - s *Service - project string - zone string - networkEndpointGroup string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionAutoscalersDeleteCall struct { + s *Service + project string + region string + autoscaler string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified network endpoint group. Gets a list of -// available network endpoint groups by making a list() request. -func (r *NetworkEndpointGroupsService) Get(project string, zone string, networkEndpointGroup string) *NetworkEndpointGroupsGetCall { - c := &NetworkEndpointGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified autoscaler. +func (r *RegionAutoscalersService) Delete(project string, region string, autoscaler string) *RegionAutoscalersDeleteCall { + c := &RegionAutoscalersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.networkEndpointGroup = networkEndpointGroup + c.region = region + c.autoscaler = autoscaler + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionAutoscalersDeleteCall) RequestId(requestId string) *RegionAutoscalersDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworkEndpointGroupsGetCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsGetCall { +func (c *RegionAutoscalersDeleteCall) Fields(s ...googleapi.Field) *RegionAutoscalersDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *NetworkEndpointGroupsGetCall) IfNoneMatch(entityTag string) *NetworkEndpointGroupsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworkEndpointGroupsGetCall) Context(ctx context.Context) *NetworkEndpointGroupsGetCall { +func (c *RegionAutoscalersDeleteCall) Context(ctx context.Context) *RegionAutoscalersDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworkEndpointGroupsGetCall) Header() http.Header { +func (c *RegionAutoscalersDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworkEndpointGroupsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionAutoscalersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers/{autoscaler}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "networkEndpointGroup": c.networkEndpointGroup, + "project": c.project, + "region": c.region, + "autoscaler": c.autoscaler, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networkEndpointGroups.get" call. -// Exactly one of *NetworkEndpointGroup or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *NetworkEndpointGroup.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *NetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroup, error) { +// Do executes the "compute.regionAutoscalers.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionAutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -77043,7 +93133,7 @@ func (c *NetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (*Networ if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &NetworkEndpointGroup{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -77055,18 +93145,19 @@ func (c *NetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (*Networ } return ret, nil // { - // "description": "Returns the specified network endpoint group. Gets a list of available network endpoint groups by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.networkEndpointGroups.get", + // "description": "Deletes the specified autoscaler.", + // "httpMethod": "DELETE", + // "id": "compute.regionAutoscalers.delete", // "parameterOrder": [ // "project", - // "zone", - // "networkEndpointGroup" + // "region", + // "autoscaler" // ], // "parameters": { - // "networkEndpointGroup": { - // "description": "The name of the network endpoint group. It should comply with RFC1035.", + // "autoscaler": { + // "description": "Name of the autoscaler to delete.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -77077,128 +93168,123 @@ func (c *NetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (*Networ // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", + // "path": "{project}/regions/{region}/autoscalers/{autoscaler}", // "response": { - // "$ref": "NetworkEndpointGroup" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.networkEndpointGroups.insert": +// method id "compute.regionAutoscalers.get": -type NetworkEndpointGroupsInsertCall struct { - s *Service - project string - zone string - networkendpointgroup *NetworkEndpointGroup - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionAutoscalersGetCall struct { + s *Service + project string + region string + autoscaler string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Insert: Creates a network endpoint group in the specified project -// using the parameters that are included in the request. -func (r *NetworkEndpointGroupsService) Insert(project string, zone string, networkendpointgroup *NetworkEndpointGroup) *NetworkEndpointGroupsInsertCall { - c := &NetworkEndpointGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified autoscaler. +func (r *RegionAutoscalersService) Get(project string, region string, autoscaler string) *RegionAutoscalersGetCall { + c := &RegionAutoscalersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.networkendpointgroup = networkendpointgroup - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *NetworkEndpointGroupsInsertCall) RequestId(requestId string) *NetworkEndpointGroupsInsertCall { - c.urlParams_.Set("requestId", requestId) + c.region = region + c.autoscaler = autoscaler return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworkEndpointGroupsInsertCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsInsertCall { +func (c *RegionAutoscalersGetCall) Fields(s ...googleapi.Field) *RegionAutoscalersGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionAutoscalersGetCall) IfNoneMatch(entityTag string) *RegionAutoscalersGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworkEndpointGroupsInsertCall) Context(ctx context.Context) *NetworkEndpointGroupsInsertCall { +func (c *RegionAutoscalersGetCall) Context(ctx context.Context) *RegionAutoscalersGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworkEndpointGroupsInsertCall) Header() http.Header { +func (c *RegionAutoscalersGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworkEndpointGroupsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionAutoscalersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroup) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers/{autoscaler}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "region": c.region, + "autoscaler": c.autoscaler, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networkEndpointGroups.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.regionAutoscalers.get" call. +// Exactly one of *Autoscaler or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// *Autoscaler.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NetworkEndpointGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionAutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -77217,7 +93303,7 @@ func (c *NetworkEndpointGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Ope if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Autoscaler{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -77229,14 +93315,22 @@ func (c *NetworkEndpointGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Creates a network endpoint group in the specified project using the parameters that are included in the request.", - // "httpMethod": "POST", - // "id": "compute.networkEndpointGroups.insert", + // "description": "Returns the specified autoscaler.", + // "httpMethod": "GET", + // "id": "compute.regionAutoscalers.get", // "parameterOrder": [ // "project", - // "zone" + // "region", + // "autoscaler" // ], // "parameters": { + // "autoscaler": { + // "description": "Name of the autoscaler to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -77244,186 +93338,129 @@ func (c *NetworkEndpointGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Ope // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where you want to create the network endpoint group. It should comply with RFC1035.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/networkEndpointGroups", - // "request": { - // "$ref": "NetworkEndpointGroup" - // }, + // "path": "{project}/regions/{region}/autoscalers/{autoscaler}", // "response": { - // "$ref": "Operation" + // "$ref": "Autoscaler" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.networkEndpointGroups.list": - -type NetworkEndpointGroupsListCall struct { - s *Service - project string - zone string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves the list of network endpoint groups that are located -// in the specified project and zone. -func (r *NetworkEndpointGroupsService) List(project string, zone string) *NetworkEndpointGroupsListCall { - c := &NetworkEndpointGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - return c -} +// method id "compute.regionAutoscalers.insert": -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *NetworkEndpointGroupsListCall) Filter(filter string) *NetworkEndpointGroupsListCall { - c.urlParams_.Set("filter", filter) - return c +type RegionAutoscalersInsertCall struct { + s *Service + project string + region string + autoscaler *Autoscaler + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *NetworkEndpointGroupsListCall) MaxResults(maxResults int64) *NetworkEndpointGroupsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) +// Insert: Creates an autoscaler in the specified project using the data +// included in the request. +func (r *RegionAutoscalersService) Insert(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersInsertCall { + c := &RegionAutoscalersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.autoscaler = autoscaler return c } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *NetworkEndpointGroupsListCall) OrderBy(orderBy string) *NetworkEndpointGroupsListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *NetworkEndpointGroupsListCall) PageToken(pageToken string) *NetworkEndpointGroupsListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionAutoscalersInsertCall) RequestId(requestId string) *RegionAutoscalersInsertCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworkEndpointGroupsListCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsListCall { +func (c *RegionAutoscalersInsertCall) Fields(s ...googleapi.Field) *RegionAutoscalersInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *NetworkEndpointGroupsListCall) IfNoneMatch(entityTag string) *NetworkEndpointGroupsListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworkEndpointGroupsListCall) Context(ctx context.Context) *NetworkEndpointGroupsListCall { +func (c *RegionAutoscalersInsertCall) Context(ctx context.Context) *RegionAutoscalersInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworkEndpointGroupsListCall) Header() http.Header { +func (c *RegionAutoscalersInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworkEndpointGroupsListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionAutoscalersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networkEndpointGroups.list" call. -// Exactly one of *NetworkEndpointGroupList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *NetworkEndpointGroupList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *NetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroupList, error) { +// Do executes the "compute.regionAutoscalers.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionAutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -77442,7 +93479,7 @@ func (c *NetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) (*Netwo if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &NetworkEndpointGroupList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -77454,37 +93491,14 @@ func (c *NetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) (*Netwo } return ret, nil // { - // "description": "Retrieves the list of network endpoint groups that are located in the specified project and zone.", - // "httpMethod": "GET", - // "id": "compute.networkEndpointGroups.list", + // "description": "Creates an autoscaler in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.regionAutoscalers.insert", // "parameterOrder": [ // "project", - // "zone" + // "region" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -77492,68 +93506,52 @@ func (c *NetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) (*Netwo // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/networkEndpointGroups", + // "path": "{project}/regions/{region}/autoscalers", + // "request": { + // "$ref": "Autoscaler" + // }, // "response": { - // "$ref": "NetworkEndpointGroupList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *NetworkEndpointGroupsListCall) Pages(ctx context.Context, f func(*NetworkEndpointGroupList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.networkEndpointGroups.listNetworkEndpoints": +// method id "compute.regionAutoscalers.list": -type NetworkEndpointGroupsListNetworkEndpointsCall struct { - s *Service - project string - zone string - networkEndpointGroup string - networkendpointgroupslistendpointsrequest *NetworkEndpointGroupsListEndpointsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionAutoscalersListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// ListNetworkEndpoints: Lists the network endpoints in the specified -// network endpoint group. -func (r *NetworkEndpointGroupsService) ListNetworkEndpoints(project string, zone string, networkEndpointGroup string, networkendpointgroupslistendpointsrequest *NetworkEndpointGroupsListEndpointsRequest) *NetworkEndpointGroupsListNetworkEndpointsCall { - c := &NetworkEndpointGroupsListNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of autoscalers contained within the specified +// region. +func (r *RegionAutoscalersService) List(project string, region string) *RegionAutoscalersListCall { + c := &RegionAutoscalersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.networkEndpointGroup = networkEndpointGroup - c.networkendpointgroupslistendpointsrequest = networkendpointgroupslistendpointsrequest + c.region = region return c } @@ -77579,7 +93577,7 @@ func (r *NetworkEndpointGroupsService) ListNetworkEndpoints(project string, zone // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Filter(filter string) *NetworkEndpointGroupsListNetworkEndpointsCall { +func (c *RegionAutoscalersListCall) Filter(filter string) *RegionAutoscalersListCall { c.urlParams_.Set("filter", filter) return c } @@ -77590,7 +93588,7 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Filter(filter string) *N // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *NetworkEndpointGroupsListNetworkEndpointsCall) MaxResults(maxResults int64) *NetworkEndpointGroupsListNetworkEndpointsCall { +func (c *RegionAutoscalersListCall) MaxResults(maxResults int64) *RegionAutoscalersListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -77607,7 +93605,7 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) MaxResults(maxResults in // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *NetworkEndpointGroupsListNetworkEndpointsCall) OrderBy(orderBy string) *NetworkEndpointGroupsListNetworkEndpointsCall { +func (c *RegionAutoscalersListCall) OrderBy(orderBy string) *RegionAutoscalersListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -77615,7 +93613,7 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) OrderBy(orderBy string) // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *NetworkEndpointGroupsListNetworkEndpointsCall) PageToken(pageToken string) *NetworkEndpointGroupsListNetworkEndpointsCall { +func (c *RegionAutoscalersListCall) PageToken(pageToken string) *RegionAutoscalersListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -77623,67 +93621,72 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) PageToken(pageToken stri // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsListNetworkEndpointsCall { +func (c *RegionAutoscalersListCall) Fields(s ...googleapi.Field) *RegionAutoscalersListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionAutoscalersListCall) IfNoneMatch(entityTag string) *RegionAutoscalersListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Context(ctx context.Context) *NetworkEndpointGroupsListNetworkEndpointsCall { +func (c *RegionAutoscalersListCall) Context(ctx context.Context) *RegionAutoscalersListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Header() http.Header { +func (c *RegionAutoscalersListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworkEndpointGroupsListNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionAutoscalersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroupslistendpointsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "networkEndpointGroup": c.networkEndpointGroup, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networkEndpointGroups.listNetworkEndpoints" call. -// Exactly one of *NetworkEndpointGroupsListNetworkEndpoints or error -// will be non-nil. Any non-2xx status code is an error. Response -// headers are in either -// *NetworkEndpointGroupsListNetworkEndpoints.ServerResponse.Header or -// (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroupsListNetworkEndpoints, error) { +// Do executes the "compute.regionAutoscalers.list" call. +// Exactly one of *RegionAutoscalerList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *RegionAutoscalerList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionAutoscalersListCall) Do(opts ...googleapi.CallOption) (*RegionAutoscalerList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -77702,7 +93705,7 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googleapi.Cal if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &NetworkEndpointGroupsListNetworkEndpoints{ + ret := &RegionAutoscalerList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -77714,13 +93717,12 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googleapi.Cal } return ret, nil // { - // "description": "Lists the network endpoints in the specified network endpoint group.", - // "httpMethod": "POST", - // "id": "compute.networkEndpointGroups.listNetworkEndpoints", + // "description": "Retrieves a list of autoscalers contained within the specified region.", + // "httpMethod": "GET", + // "id": "compute.regionAutoscalers.list", // "parameterOrder": [ // "project", - // "zone", - // "networkEndpointGroup" + // "region" // ], // "parameters": { // "filter": { @@ -77736,12 +93738,6 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googleapi.Cal // "minimum": "0", // "type": "integer" // }, - // "networkEndpointGroup": { - // "description": "The name of the network endpoint group from which you want to generate a list of included network endpoints. It should comply with RFC1035.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "orderBy": { // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", // "location": "query", @@ -77759,19 +93755,17 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googleapi.Cal // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints", - // "request": { - // "$ref": "NetworkEndpointGroupsListEndpointsRequest" - // }, + // "path": "{project}/regions/{region}/autoscalers", // "response": { - // "$ref": "NetworkEndpointGroupsListNetworkEndpoints" + // "$ref": "RegionAutoscalerList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -77785,7 +93779,7 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googleapi.Cal // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Pages(ctx context.Context, f func(*NetworkEndpointGroupsListNetworkEndpoints) error) error { +func (c *RegionAutoscalersListCall) Pages(ctx context.Context, f func(*RegionAutoscalerList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -77803,187 +93797,33 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Pages(ctx context.Contex } } -// method id "compute.networkEndpointGroups.testIamPermissions": +// method id "compute.regionAutoscalers.patch": -type NetworkEndpointGroupsTestIamPermissionsCall struct { - s *Service - project string - zone string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionAutoscalersPatchCall struct { + s *Service + project string + region string + autoscaler *Autoscaler + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *NetworkEndpointGroupsService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *NetworkEndpointGroupsTestIamPermissionsCall { - c := &NetworkEndpointGroupsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates an autoscaler in the specified project using the data +// included in the request. This method supports PATCH semantics and +// uses the JSON merge patch format and processing rules. +func (r *RegionAutoscalersService) Patch(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersPatchCall { + c := &RegionAutoscalersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *NetworkEndpointGroupsTestIamPermissionsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *NetworkEndpointGroupsTestIamPermissionsCall) Context(ctx context.Context) *NetworkEndpointGroupsTestIamPermissionsCall { - c.ctx_ = ctx + c.region = region + c.autoscaler = autoscaler return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *NetworkEndpointGroupsTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *NetworkEndpointGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{resource}/testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.networkEndpointGroups.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *NetworkEndpointGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.networkEndpointGroups.testIamPermissions", - // "parameterOrder": [ - // "project", - // "zone", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/networkEndpointGroups/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, - // "response": { - // "$ref": "TestPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.networks.addPeering": - -type NetworksAddPeeringCall struct { - s *Service - project string - network string - networksaddpeeringrequest *NetworksAddPeeringRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// AddPeering: Adds a peering to the specified network. -func (r *NetworksService) AddPeering(project string, network string, networksaddpeeringrequest *NetworksAddPeeringRequest) *NetworksAddPeeringCall { - c := &NetworksAddPeeringCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.network = network - c.networksaddpeeringrequest = networksaddpeeringrequest +// Autoscaler sets the optional parameter "autoscaler": Name of the +// autoscaler to patch. +func (c *RegionAutoscalersPatchCall) Autoscaler(autoscaler string) *RegionAutoscalersPatchCall { + c.urlParams_.Set("autoscaler", autoscaler) return c } @@ -78001,7 +93841,7 @@ func (r *NetworksService) AddPeering(project string, network string, networksadd // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *NetworksAddPeeringCall) RequestId(requestId string) *NetworksAddPeeringCall { +func (c *RegionAutoscalersPatchCall) RequestId(requestId string) *RegionAutoscalersPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -78009,7 +93849,7 @@ func (c *NetworksAddPeeringCall) RequestId(requestId string) *NetworksAddPeering // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksAddPeeringCall) Fields(s ...googleapi.Field) *NetworksAddPeeringCall { +func (c *RegionAutoscalersPatchCall) Fields(s ...googleapi.Field) *RegionAutoscalersPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -78017,56 +93857,56 @@ func (c *NetworksAddPeeringCall) Fields(s ...googleapi.Field) *NetworksAddPeerin // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksAddPeeringCall) Context(ctx context.Context) *NetworksAddPeeringCall { +func (c *RegionAutoscalersPatchCall) Context(ctx context.Context) *RegionAutoscalersPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksAddPeeringCall) Header() http.Header { +func (c *RegionAutoscalersPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksAddPeeringCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionAutoscalersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.networksaddpeeringrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/addPeering") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "network": c.network, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.addPeering" call. +// Do executes the "compute.regionAutoscalers.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NetworksAddPeeringCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionAutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -78097,19 +93937,18 @@ func (c *NetworksAddPeeringCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Adds a peering to the specified network.", - // "httpMethod": "POST", - // "id": "compute.networks.addPeering", + // "description": "Updates an autoscaler in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.regionAutoscalers.patch", // "parameterOrder": [ // "project", - // "network" + // "region" // ], // "parameters": { - // "network": { - // "description": "Name of the network resource to add peering to.", - // "location": "path", + // "autoscaler": { + // "description": "Name of the autoscaler to patch.", + // "location": "query", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, // "type": "string" // }, // "project": { @@ -78119,15 +93958,22 @@ func (c *NetworksAddPeeringCall) Do(opts ...googleapi.CallOption) (*Operation, e // "required": true, // "type": "string" // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "{project}/global/networks/{network}/addPeering", + // "path": "{project}/regions/{region}/autoscalers", // "request": { - // "$ref": "NetworksAddPeeringRequest" + // "$ref": "Autoscaler" // }, // "response": { // "$ref": "Operation" @@ -78140,49 +93986,34 @@ func (c *NetworksAddPeeringCall) Do(opts ...googleapi.CallOption) (*Operation, e } -// method id "compute.networks.delete": +// method id "compute.regionAutoscalers.testIamPermissions": -type NetworksDeleteCall struct { - s *Service - project string - network string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionAutoscalersTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified network. -// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/delete -func (r *NetworksService) Delete(project string, network string) *NetworksDeleteCall { - c := &NetworksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *RegionAutoscalersService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionAutoscalersTestIamPermissionsCall { + c := &RegionAutoscalersTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.network = network - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *NetworksDeleteCall) RequestId(requestId string) *NetworksDeleteCall { - c.urlParams_.Set("requestId", requestId) + c.region = region + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksDeleteCall) Fields(s ...googleapi.Field) *NetworksDeleteCall { +func (c *RegionAutoscalersTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionAutoscalersTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -78190,51 +94021,57 @@ func (c *NetworksDeleteCall) Fields(s ...googleapi.Field) *NetworksDeleteCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksDeleteCall) Context(ctx context.Context) *NetworksDeleteCall { +func (c *RegionAutoscalersTestIamPermissionsCall) Context(ctx context.Context) *RegionAutoscalersTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksDeleteCall) Header() http.Header { +func (c *RegionAutoscalersTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionAutoscalersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "network": c.network, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *NetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regionAutoscalers.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionAutoscalersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -78253,7 +94090,7 @@ func (c *NetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -78265,137 +94102,162 @@ func (c *NetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Deletes the specified network.", - // "httpMethod": "DELETE", - // "id": "compute.networks.delete", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.regionAutoscalers.testIamPermissions", // "parameterOrder": [ // "project", - // "network" + // "region", + // "resource" // ], // "parameters": { - // "network": { - // "description": "Name of the network to delete.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "region": { + // "description": "The name of the region for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/networks/{network}", + // "path": "{project}/regions/{region}/autoscalers/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.networks.get": +// method id "compute.regionAutoscalers.update": -type NetworksGetCall struct { - s *Service - project string - network string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionAutoscalersUpdateCall struct { + s *Service + project string + region string + autoscaler *Autoscaler + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified network. Gets a list of available networks -// by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/get -func (r *NetworksService) Get(project string, network string) *NetworksGetCall { - c := &NetworksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates an autoscaler in the specified project using the data +// included in the request. +func (r *RegionAutoscalersService) Update(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersUpdateCall { + c := &RegionAutoscalersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.network = network + c.region = region + c.autoscaler = autoscaler + return c +} + +// Autoscaler sets the optional parameter "autoscaler": Name of the +// autoscaler to update. +func (c *RegionAutoscalersUpdateCall) Autoscaler(autoscaler string) *RegionAutoscalersUpdateCall { + c.urlParams_.Set("autoscaler", autoscaler) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionAutoscalersUpdateCall) RequestId(requestId string) *RegionAutoscalersUpdateCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksGetCall) Fields(s ...googleapi.Field) *NetworksGetCall { +func (c *RegionAutoscalersUpdateCall) Fields(s ...googleapi.Field) *RegionAutoscalersUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *NetworksGetCall) IfNoneMatch(entityTag string) *NetworksGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksGetCall) Context(ctx context.Context) *NetworksGetCall { +func (c *RegionAutoscalersUpdateCall) Context(ctx context.Context) *RegionAutoscalersUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksGetCall) Header() http.Header { +func (c *RegionAutoscalersUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionAutoscalersUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "network": c.network, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.get" call. -// Exactly one of *Network or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Network.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *NetworksGetCall) Do(opts ...googleapi.CallOption) (*Network, error) { +// Do executes the "compute.regionAutoscalers.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionAutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -78414,7 +94276,7 @@ func (c *NetworksGetCall) Do(opts ...googleapi.CallOption) (*Network, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Network{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -78426,19 +94288,18 @@ func (c *NetworksGetCall) Do(opts ...googleapi.CallOption) (*Network, error) { } return ret, nil // { - // "description": "Returns the specified network. Gets a list of available networks by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.networks.get", + // "description": "Updates an autoscaler in the specified project using the data included in the request.", + // "httpMethod": "PUT", + // "id": "compute.regionAutoscalers.update", // "parameterOrder": [ // "project", - // "network" + // "region" // ], // "parameters": { - // "network": { - // "description": "Name of the network to return.", - // "location": "path", + // "autoscaler": { + // "description": "Name of the autoscaler to update.", + // "location": "query", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, // "type": "string" // }, // "project": { @@ -78447,39 +94308,53 @@ func (c *NetworksGetCall) Do(opts ...googleapi.CallOption) (*Network, error) { // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/global/networks/{network}", + // "path": "{project}/regions/{region}/autoscalers", + // "request": { + // "$ref": "Autoscaler" + // }, // "response": { - // "$ref": "Network" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.networks.insert": +// method id "compute.regionBackendServices.delete": -type NetworksInsertCall struct { - s *Service - project string - network *Network - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionBackendServicesDeleteCall struct { + s *Service + project string + region string + backendService string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a network in the specified project using the data -// included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/insert -func (r *NetworksService) Insert(project string, network *Network) *NetworksInsertCall { - c := &NetworksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified regional BackendService resource. +func (r *RegionBackendServicesService) Delete(project string, region string, backendService string) *RegionBackendServicesDeleteCall { + c := &RegionBackendServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.network = network + c.region = region + c.backendService = backendService return c } @@ -78497,7 +94372,7 @@ func (r *NetworksService) Insert(project string, network *Network) *NetworksInse // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *NetworksInsertCall) RequestId(requestId string) *NetworksInsertCall { +func (c *RegionBackendServicesDeleteCall) RequestId(requestId string) *RegionBackendServicesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -78505,7 +94380,7 @@ func (c *NetworksInsertCall) RequestId(requestId string) *NetworksInsertCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksInsertCall) Fields(s ...googleapi.Field) *NetworksInsertCall { +func (c *RegionBackendServicesDeleteCall) Fields(s ...googleapi.Field) *RegionBackendServicesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -78513,55 +94388,52 @@ func (c *NetworksInsertCall) Fields(s ...googleapi.Field) *NetworksInsertCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksInsertCall) Context(ctx context.Context) *NetworksInsertCall { +func (c *RegionBackendServicesDeleteCall) Context(ctx context.Context) *RegionBackendServicesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksInsertCall) Header() http.Header { +func (c *RegionBackendServicesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionBackendServicesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.network) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "region": c.region, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.insert" call. +// Do executes the "compute.regionBackendServices.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionBackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -78592,13 +94464,22 @@ func (c *NetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Creates a network in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.networks.insert", + // "description": "Deletes the specified regional BackendService resource.", + // "httpMethod": "DELETE", + // "id": "compute.regionBackendServices.delete", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "backendService" // ], // "parameters": { + // "backendService": { + // "description": "Name of the BackendService resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -78606,16 +94487,20 @@ func (c *NetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error // "required": true, // "type": "string" // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "{project}/global/networks", - // "request": { - // "$ref": "Network" - // }, + // "path": "{project}/regions/{region}/backendServices/{backendService}", // "response": { // "$ref": "Operation" // }, @@ -78627,93 +94512,32 @@ func (c *NetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error } -// method id "compute.networks.list": +// method id "compute.regionBackendServices.get": -type NetworksListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionBackendServicesGetCall struct { + s *Service + project string + region string + backendService string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// List: Retrieves the list of networks available to the specified -// project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/list -func (r *NetworksService) List(project string) *NetworksListCall { - c := &NetworksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified regional BackendService resource. +func (r *RegionBackendServicesService) Get(project string, region string, backendService string) *RegionBackendServicesGetCall { + c := &RegionBackendServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *NetworksListCall) Filter(filter string) *NetworksListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *NetworksListCall) MaxResults(maxResults int64) *NetworksListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *NetworksListCall) OrderBy(orderBy string) *NetworksListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *NetworksListCall) PageToken(pageToken string) *NetworksListCall { - c.urlParams_.Set("pageToken", pageToken) + c.region = region + c.backendService = backendService return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksListCall) Fields(s ...googleapi.Field) *NetworksListCall { +func (c *RegionBackendServicesGetCall) Fields(s ...googleapi.Field) *RegionBackendServicesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -78723,7 +94547,7 @@ func (c *NetworksListCall) Fields(s ...googleapi.Field) *NetworksListCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *NetworksListCall) IfNoneMatch(entityTag string) *NetworksListCall { +func (c *RegionBackendServicesGetCall) IfNoneMatch(entityTag string) *RegionBackendServicesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -78731,21 +94555,21 @@ func (c *NetworksListCall) IfNoneMatch(entityTag string) *NetworksListCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksListCall) Context(ctx context.Context) *NetworksListCall { +func (c *RegionBackendServicesGetCall) Context(ctx context.Context) *RegionBackendServicesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksListCall) Header() http.Header { +func (c *RegionBackendServicesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionBackendServicesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -78757,7 +94581,7 @@ func (c *NetworksListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -78765,19 +94589,21 @@ func (c *NetworksListCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "region": c.region, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.list" call. -// Exactly one of *NetworkList or error will be non-nil. Any non-2xx +// Do executes the "compute.regionBackendServices.get" call. +// Exactly one of *BackendService or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *NetworkList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error) { +// *BackendService.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionBackendServicesGetCall) Do(opts ...googleapi.CallOption) (*BackendService, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -78796,7 +94622,7 @@ func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &NetworkList{ + ret := &BackendService{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -78808,34 +94634,20 @@ func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error } return ret, nil // { - // "description": "Retrieves the list of networks available to the specified project.", + // "description": "Returns the specified regional BackendService resource.", // "httpMethod": "GET", - // "id": "compute.networks.list", + // "id": "compute.regionBackendServices.get", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "backendService" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "backendService": { + // "description": "Name of the BackendService resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "project": { @@ -78844,11 +94656,18 @@ func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/networks", + // "path": "{project}/regions/{region}/backendServices/{backendService}", // "response": { - // "$ref": "NetworkList" + // "$ref": "BackendService" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -78859,206 +94678,92 @@ func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *NetworksListCall) Pages(ctx context.Context, f func(*NetworkList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.networks.listPeeringRoutes": +// method id "compute.regionBackendServices.getHealth": -type NetworksListPeeringRoutesCall struct { - s *Service - project string - network string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionBackendServicesGetHealthCall struct { + s *Service + project string + region string + backendService string + resourcegroupreference *ResourceGroupReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// ListPeeringRoutes: Lists the peering routes exchanged over peering -// connection. -func (r *NetworksService) ListPeeringRoutes(project string, network string) *NetworksListPeeringRoutesCall { - c := &NetworksListPeeringRoutesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetHealth: Gets the most recent health check results for this +// regional BackendService. +func (r *RegionBackendServicesService) GetHealth(project string, region string, backendService string, resourcegroupreference *ResourceGroupReference) *RegionBackendServicesGetHealthCall { + c := &RegionBackendServicesGetHealthCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.network = network - return c -} - -// Direction sets the optional parameter "direction": The direction of -// the exchanged routes. -// -// Possible values: -// "INCOMING" -// "OUTGOING" -func (c *NetworksListPeeringRoutesCall) Direction(direction string) *NetworksListPeeringRoutesCall { - c.urlParams_.Set("direction", direction) - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *NetworksListPeeringRoutesCall) Filter(filter string) *NetworksListPeeringRoutesCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *NetworksListPeeringRoutesCall) MaxResults(maxResults int64) *NetworksListPeeringRoutesCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *NetworksListPeeringRoutesCall) OrderBy(orderBy string) *NetworksListPeeringRoutesCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *NetworksListPeeringRoutesCall) PageToken(pageToken string) *NetworksListPeeringRoutesCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// PeeringName sets the optional parameter "peeringName": The response -// will show routes exchanged over the given peering connection. -func (c *NetworksListPeeringRoutesCall) PeeringName(peeringName string) *NetworksListPeeringRoutesCall { - c.urlParams_.Set("peeringName", peeringName) - return c -} - -// Region sets the optional parameter "region": The region of the -// request. The response will include all subnet routes, static routes -// and dynamic routes in the region. -func (c *NetworksListPeeringRoutesCall) Region(region string) *NetworksListPeeringRoutesCall { - c.urlParams_.Set("region", region) + c.region = region + c.backendService = backendService + c.resourcegroupreference = resourcegroupreference return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksListPeeringRoutesCall) Fields(s ...googleapi.Field) *NetworksListPeeringRoutesCall { +func (c *RegionBackendServicesGetHealthCall) Fields(s ...googleapi.Field) *RegionBackendServicesGetHealthCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *NetworksListPeeringRoutesCall) IfNoneMatch(entityTag string) *NetworksListPeeringRoutesCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksListPeeringRoutesCall) Context(ctx context.Context) *NetworksListPeeringRoutesCall { +func (c *RegionBackendServicesGetHealthCall) Context(ctx context.Context) *RegionBackendServicesGetHealthCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksListPeeringRoutesCall) Header() http.Header { +func (c *RegionBackendServicesGetHealthCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksListPeeringRoutesCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionBackendServicesGetHealthCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.resourcegroupreference) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/listPeeringRoutes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}/getHealth") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "network": c.network, + "project": c.project, + "region": c.region, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.listPeeringRoutes" call. -// Exactly one of *ExchangedPeeringRoutesList or error will be non-nil. +// Do executes the "compute.regionBackendServices.getHealth" call. +// Exactly one of *BackendServiceGroupHealth or error will be non-nil. // Any non-2xx status code is an error. Response headers are in either -// *ExchangedPeeringRoutesList.ServerResponse.Header or (if a response +// *BackendServiceGroupHealth.ServerResponse.Header or (if a response // was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *NetworksListPeeringRoutesCall) Do(opts ...googleapi.CallOption) (*ExchangedPeeringRoutesList, error) { +func (c *RegionBackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (*BackendServiceGroupHealth, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -79077,7 +94782,7 @@ func (c *NetworksListPeeringRoutesCall) Do(opts ...googleapi.CallOption) (*Excha if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ExchangedPeeringRoutesList{ + ret := &BackendServiceGroupHealth{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -79089,78 +94794,42 @@ func (c *NetworksListPeeringRoutesCall) Do(opts ...googleapi.CallOption) (*Excha } return ret, nil // { - // "description": "Lists the peering routes exchanged over peering connection.", - // "httpMethod": "GET", - // "id": "compute.networks.listPeeringRoutes", + // "description": "Gets the most recent health check results for this regional BackendService.", + // "httpMethod": "POST", + // "id": "compute.regionBackendServices.getHealth", // "parameterOrder": [ // "project", - // "network" + // "region", + // "backendService" // ], // "parameters": { - // "direction": { - // "description": "The direction of the exchanged routes.", - // "enum": [ - // "INCOMING", - // "OUTGOING" - // ], - // "enumDescriptions": [ - // "", - // "" - // ], - // "location": "query", - // "type": "string" - // }, - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "network": { - // "description": "Name of the network for this request.", + // "backendService": { + // "description": "Name of the BackendService resource for which to get health.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "peeringName": { - // "description": "The response will show routes exchanged over the given peering connection.", - // "location": "query", - // "type": "string" - // }, // "project": { - // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, // "region": { - // "description": "The region of the request. The response will include all subnet routes, static routes and dynamic routes in the region.", - // "location": "query", + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/networks/{network}/listPeeringRoutes", + // "path": "{project}/regions/{region}/backendServices/{backendService}/getHealth", + // "request": { + // "$ref": "ResourceGroupReference" + // }, // "response": { - // "$ref": "ExchangedPeeringRoutesList" + // "$ref": "BackendServiceGroupHealth" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -79171,47 +94840,28 @@ func (c *NetworksListPeeringRoutesCall) Do(opts ...googleapi.CallOption) (*Excha } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *NetworksListPeeringRoutesCall) Pages(ctx context.Context, f func(*ExchangedPeeringRoutesList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.networks.patch": +// method id "compute.regionBackendServices.insert": -type NetworksPatchCall struct { - s *Service - project string - network string - network2 *Network - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionBackendServicesInsertCall struct { + s *Service + project string + region string + backendservice *BackendService + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Patches the specified network with the data included in the -// request. Only the following fields can be modified: -// routingConfig.routingMode. -func (r *NetworksService) Patch(project string, network string, network2 *Network) *NetworksPatchCall { - c := &NetworksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a regional BackendService resource in the specified +// project using the data included in the request. There are several +// restrictions and guidelines to keep in mind when creating a regional +// backend service. Read Restrictions and Guidelines for more +// information. +func (r *RegionBackendServicesService) Insert(project string, region string, backendservice *BackendService) *RegionBackendServicesInsertCall { + c := &RegionBackendServicesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.network = network - c.network2 = network2 + c.region = region + c.backendservice = backendservice return c } @@ -79229,7 +94879,7 @@ func (r *NetworksService) Patch(project string, network string, network2 *Networ // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *NetworksPatchCall) RequestId(requestId string) *NetworksPatchCall { +func (c *RegionBackendServicesInsertCall) RequestId(requestId string) *RegionBackendServicesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -79237,7 +94887,7 @@ func (c *NetworksPatchCall) RequestId(requestId string) *NetworksPatchCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksPatchCall) Fields(s ...googleapi.Field) *NetworksPatchCall { +func (c *RegionBackendServicesInsertCall) Fields(s ...googleapi.Field) *RegionBackendServicesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -79245,56 +94895,56 @@ func (c *NetworksPatchCall) Fields(s ...googleapi.Field) *NetworksPatchCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksPatchCall) Context(ctx context.Context) *NetworksPatchCall { +func (c *RegionBackendServicesInsertCall) Context(ctx context.Context) *RegionBackendServicesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksPatchCall) Header() http.Header { +func (c *RegionBackendServicesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionBackendServicesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.network2) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "network": c.network, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.patch" call. +// Do executes the "compute.regionBackendServices.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NetworksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionBackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -79325,25 +94975,25 @@ func (c *NetworksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Patches the specified network with the data included in the request. Only the following fields can be modified: routingConfig.routingMode.", - // "httpMethod": "PATCH", - // "id": "compute.networks.patch", + // "description": "Creates a regional BackendService resource in the specified project using the data included in the request. There are several restrictions and guidelines to keep in mind when creating a regional backend service. Read Restrictions and Guidelines for more information.", + // "httpMethod": "POST", + // "id": "compute.regionBackendServices.insert", // "parameterOrder": [ // "project", - // "network" + // "region" // ], // "parameters": { - // "network": { - // "description": "Name of the network to update.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -79353,9 +95003,9 @@ func (c *NetworksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "type": "string" // } // }, - // "path": "{project}/global/networks/{network}", + // "path": "{project}/regions/{region}/backendServices", // "request": { - // "$ref": "Network" + // "$ref": "BackendService" // }, // "response": { // "$ref": "Operation" @@ -79368,107 +95018,159 @@ func (c *NetworksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) } -// method id "compute.networks.removePeering": +// method id "compute.regionBackendServices.list": -type NetworksRemovePeeringCall struct { - s *Service - project string - network string - networksremovepeeringrequest *NetworksRemovePeeringRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionBackendServicesListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// RemovePeering: Removes a peering from the specified network. -func (r *NetworksService) RemovePeering(project string, network string, networksremovepeeringrequest *NetworksRemovePeeringRequest) *NetworksRemovePeeringCall { - c := &NetworksRemovePeeringCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of regional BackendService resources +// available to the specified project in the given region. +func (r *RegionBackendServicesService) List(project string, region string) *RegionBackendServicesListCall { + c := &RegionBackendServicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.network = network - c.networksremovepeeringrequest = networksremovepeeringrequest + c.region = region return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *NetworksRemovePeeringCall) RequestId(requestId string) *NetworksRemovePeeringCall { - c.urlParams_.Set("requestId", requestId) +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *RegionBackendServicesListCall) Filter(filter string) *RegionBackendServicesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *RegionBackendServicesListCall) MaxResults(maxResults int64) *RegionBackendServicesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *RegionBackendServicesListCall) OrderBy(orderBy string) *RegionBackendServicesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *RegionBackendServicesListCall) PageToken(pageToken string) *RegionBackendServicesListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksRemovePeeringCall) Fields(s ...googleapi.Field) *NetworksRemovePeeringCall { +func (c *RegionBackendServicesListCall) Fields(s ...googleapi.Field) *RegionBackendServicesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionBackendServicesListCall) IfNoneMatch(entityTag string) *RegionBackendServicesListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksRemovePeeringCall) Context(ctx context.Context) *NetworksRemovePeeringCall { +func (c *RegionBackendServicesListCall) Context(ctx context.Context) *RegionBackendServicesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksRemovePeeringCall) Header() http.Header { +func (c *RegionBackendServicesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksRemovePeeringCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionBackendServicesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.networksremovepeeringrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/removePeering") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "network": c.network, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.removePeering" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *NetworksRemovePeeringCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regionBackendServices.list" call. +// Exactly one of *BackendServiceList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *BackendServiceList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionBackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServiceList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -79487,7 +95189,7 @@ func (c *NetworksRemovePeeringCall) Do(opts ...googleapi.CallOption) (*Operation if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &BackendServiceList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -79499,19 +95201,35 @@ func (c *NetworksRemovePeeringCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Removes a peering from the specified network.", - // "httpMethod": "POST", - // "id": "compute.networks.removePeering", + // "description": "Retrieves the list of regional BackendService resources available to the specified project in the given region.", + // "httpMethod": "GET", + // "id": "compute.regionBackendServices.list", // "parameterOrder": [ // "project", - // "network" + // "region" // ], // "parameters": { - // "network": { - // "description": "Name of the network resource to remove peering from.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -79521,44 +95239,73 @@ func (c *NetworksRemovePeeringCall) Do(opts ...googleapi.CallOption) (*Operation // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/networks/{network}/removePeering", - // "request": { - // "$ref": "NetworksRemovePeeringRequest" - // }, + // "path": "{project}/regions/{region}/backendServices", // "response": { - // "$ref": "Operation" + // "$ref": "BackendServiceList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.networks.switchToCustomMode": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionBackendServicesListCall) Pages(ctx context.Context, f func(*BackendServiceList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type NetworksSwitchToCustomModeCall struct { - s *Service - project string - network string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.regionBackendServices.patch": + +type RegionBackendServicesPatchCall struct { + s *Service + project string + region string + backendService string + backendservice *BackendService + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SwitchToCustomMode: Switches the network mode from auto subnet mode -// to custom subnet mode. -func (r *NetworksService) SwitchToCustomMode(project string, network string) *NetworksSwitchToCustomModeCall { - c := &NetworksSwitchToCustomModeCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates the specified regional BackendService resource with +// the data included in the request. There are several restrictions and +// guidelines to keep in mind when updating a backend service. Read +// Restrictions and Guidelines for more information. This method +// supports PATCH semantics and uses the JSON merge patch format and +// processing rules. +func (r *RegionBackendServicesService) Patch(project string, region string, backendService string, backendservice *BackendService) *RegionBackendServicesPatchCall { + c := &RegionBackendServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.network = network + c.region = region + c.backendService = backendService + c.backendservice = backendservice return c } @@ -79576,7 +95323,7 @@ func (r *NetworksService) SwitchToCustomMode(project string, network string) *Ne // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *NetworksSwitchToCustomModeCall) RequestId(requestId string) *NetworksSwitchToCustomModeCall { +func (c *RegionBackendServicesPatchCall) RequestId(requestId string) *RegionBackendServicesPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -79584,7 +95331,7 @@ func (c *NetworksSwitchToCustomModeCall) RequestId(requestId string) *NetworksSw // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksSwitchToCustomModeCall) Fields(s ...googleapi.Field) *NetworksSwitchToCustomModeCall { +func (c *RegionBackendServicesPatchCall) Fields(s ...googleapi.Field) *RegionBackendServicesPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -79592,51 +95339,57 @@ func (c *NetworksSwitchToCustomModeCall) Fields(s ...googleapi.Field) *NetworksS // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksSwitchToCustomModeCall) Context(ctx context.Context) *NetworksSwitchToCustomModeCall { +func (c *RegionBackendServicesPatchCall) Context(ctx context.Context) *RegionBackendServicesPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksSwitchToCustomModeCall) Header() http.Header { +func (c *RegionBackendServicesPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksSwitchToCustomModeCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionBackendServicesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/switchToCustomMode") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "network": c.network, + "project": c.project, + "region": c.region, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.switchToCustomMode" call. +// Do executes the "compute.regionBackendServices.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NetworksSwitchToCustomModeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionBackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -79667,16 +95420,17 @@ func (c *NetworksSwitchToCustomModeCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Switches the network mode from auto subnet mode to custom subnet mode.", - // "httpMethod": "POST", - // "id": "compute.networks.switchToCustomMode", + // "description": "Updates the specified regional BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.regionBackendServices.patch", // "parameterOrder": [ // "project", - // "network" + // "region", + // "backendService" // ], // "parameters": { - // "network": { - // "description": "Name of the network to be updated.", + // "backendService": { + // "description": "Name of the BackendService resource to patch.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -79689,13 +95443,23 @@ func (c *NetworksSwitchToCustomModeCall) Do(opts ...googleapi.CallOption) (*Oper // "required": true, // "type": "string" // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "{project}/global/networks/{network}/switchToCustomMode", + // "path": "{project}/regions/{region}/backendServices/{backendService}", + // "request": { + // "$ref": "BackendService" + // }, // "response": { // "$ref": "Operation" // }, @@ -79707,11 +95471,12 @@ func (c *NetworksSwitchToCustomModeCall) Do(opts ...googleapi.CallOption) (*Oper } -// method id "compute.networks.testIamPermissions": +// method id "compute.regionBackendServices.testIamPermissions": -type NetworksTestIamPermissionsCall struct { +type RegionBackendServicesTestIamPermissionsCall struct { s *Service project string + region string resource string testpermissionsrequest *TestPermissionsRequest urlParams_ gensupport.URLParams @@ -79721,9 +95486,10 @@ type NetworksTestIamPermissionsCall struct { // TestIamPermissions: Returns permissions that a caller has on the // specified resource. -func (r *NetworksService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *NetworksTestIamPermissionsCall { - c := &NetworksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *RegionBackendServicesService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionBackendServicesTestIamPermissionsCall { + c := &RegionBackendServicesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.region = region c.resource = resource c.testpermissionsrequest = testpermissionsrequest return c @@ -79732,7 +95498,7 @@ func (r *NetworksService) TestIamPermissions(project string, resource string, te // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksTestIamPermissionsCall) Fields(s ...googleapi.Field) *NetworksTestIamPermissionsCall { +func (c *RegionBackendServicesTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionBackendServicesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -79740,21 +95506,21 @@ func (c *NetworksTestIamPermissionsCall) Fields(s ...googleapi.Field) *NetworksT // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksTestIamPermissionsCall) Context(ctx context.Context) *NetworksTestIamPermissionsCall { +func (c *RegionBackendServicesTestIamPermissionsCall) Context(ctx context.Context) *RegionBackendServicesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksTestIamPermissionsCall) Header() http.Header { +func (c *RegionBackendServicesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionBackendServicesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -79768,7 +95534,7 @@ func (c *NetworksTestIamPermissionsCall) doRequest(alt string) (*http.Response, reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -79777,19 +95543,20 @@ func (c *NetworksTestIamPermissionsCall) doRequest(alt string) (*http.Response, req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.testIamPermissions" call. +// Do executes the "compute.regionBackendServices.testIamPermissions" call. // Exactly one of *TestPermissionsResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *TestPermissionsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *NetworksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *RegionBackendServicesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -79822,9 +95589,10 @@ func (c *NetworksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Test // { // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.networks.testIamPermissions", + // "id": "compute.regionBackendServices.testIamPermissions", // "parameterOrder": [ // "project", + // "region", // "resource" // ], // "parameters": { @@ -79835,15 +95603,22 @@ func (c *NetworksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Test // "required": true, // "type": "string" // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "resource": { // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/networks/{resource}/testIamPermissions", + // "path": "{project}/regions/{region}/backendServices/{resource}/testIamPermissions", // "request": { // "$ref": "TestPermissionsRequest" // }, @@ -79859,27 +95634,29 @@ func (c *NetworksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Test } -// method id "compute.networks.updatePeering": +// method id "compute.regionBackendServices.update": -type NetworksUpdatePeeringCall struct { - s *Service - project string - network string - networksupdatepeeringrequest *NetworksUpdatePeeringRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionBackendServicesUpdateCall struct { + s *Service + project string + region string + backendService string + backendservice *BackendService + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// UpdatePeering: Updates the specified network peering with the data -// included in the request Only the following fields can be modified: -// NetworkPeering.export_custom_routes, and -// NetworkPeering.import_custom_routes -func (r *NetworksService) UpdatePeering(project string, network string, networksupdatepeeringrequest *NetworksUpdatePeeringRequest) *NetworksUpdatePeeringCall { - c := &NetworksUpdatePeeringCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates the specified regional BackendService resource with +// the data included in the request. There are several restrictions and +// guidelines to keep in mind when updating a backend service. Read +// Restrictions and Guidelines for more information. +func (r *RegionBackendServicesService) Update(project string, region string, backendService string, backendservice *BackendService) *RegionBackendServicesUpdateCall { + c := &RegionBackendServicesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.network = network - c.networksupdatepeeringrequest = networksupdatepeeringrequest + c.region = region + c.backendService = backendService + c.backendservice = backendservice return c } @@ -79897,7 +95674,7 @@ func (r *NetworksService) UpdatePeering(project string, network string, networks // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *NetworksUpdatePeeringCall) RequestId(requestId string) *NetworksUpdatePeeringCall { +func (c *RegionBackendServicesUpdateCall) RequestId(requestId string) *RegionBackendServicesUpdateCall { c.urlParams_.Set("requestId", requestId) return c } @@ -79905,7 +95682,7 @@ func (c *NetworksUpdatePeeringCall) RequestId(requestId string) *NetworksUpdateP // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksUpdatePeeringCall) Fields(s ...googleapi.Field) *NetworksUpdatePeeringCall { +func (c *RegionBackendServicesUpdateCall) Fields(s ...googleapi.Field) *RegionBackendServicesUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -79913,56 +95690,57 @@ func (c *NetworksUpdatePeeringCall) Fields(s ...googleapi.Field) *NetworksUpdate // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksUpdatePeeringCall) Context(ctx context.Context) *NetworksUpdatePeeringCall { +func (c *RegionBackendServicesUpdateCall) Context(ctx context.Context) *RegionBackendServicesUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksUpdatePeeringCall) Header() http.Header { +func (c *RegionBackendServicesUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksUpdatePeeringCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionBackendServicesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.networksupdatepeeringrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/updatePeering") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "network": c.network, + "project": c.project, + "region": c.region, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.updatePeering" call. +// Do executes the "compute.regionBackendServices.update" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NetworksUpdatePeeringCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionBackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -79993,16 +95771,17 @@ func (c *NetworksUpdatePeeringCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Updates the specified network peering with the data included in the request Only the following fields can be modified: NetworkPeering.export_custom_routes, and NetworkPeering.import_custom_routes", - // "httpMethod": "PATCH", - // "id": "compute.networks.updatePeering", + // "description": "Updates the specified regional BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information.", + // "httpMethod": "PUT", + // "id": "compute.regionBackendServices.update", // "parameterOrder": [ // "project", - // "network" + // "region", + // "backendService" // ], // "parameters": { - // "network": { - // "description": "Name of the network resource which the updated peering is belonging to.", + // "backendService": { + // "description": "Name of the BackendService resource to update.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -80015,200 +95794,22 @@ func (c *NetworksUpdatePeeringCall) Do(opts ...googleapi.CallOption) (*Operation // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "{project}/global/networks/{network}/updatePeering", - // "request": { - // "$ref": "NetworksUpdatePeeringRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.nodeGroups.addNodes": - -type NodeGroupsAddNodesCall struct { - s *Service - project string - zone string - nodeGroup string - nodegroupsaddnodesrequest *NodeGroupsAddNodesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// AddNodes: Adds specified number of nodes to the node group. -func (r *NodeGroupsService) AddNodes(project string, zone string, nodeGroup string, nodegroupsaddnodesrequest *NodeGroupsAddNodesRequest) *NodeGroupsAddNodesCall { - c := &NodeGroupsAddNodesCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.nodeGroup = nodeGroup - c.nodegroupsaddnodesrequest = nodegroupsaddnodesrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *NodeGroupsAddNodesCall) RequestId(requestId string) *NodeGroupsAddNodesCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *NodeGroupsAddNodesCall) Fields(s ...googleapi.Field) *NodeGroupsAddNodesCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *NodeGroupsAddNodesCall) Context(ctx context.Context) *NodeGroupsAddNodesCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *NodeGroupsAddNodesCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *NodeGroupsAddNodesCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.nodegroupsaddnodesrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}/addNodes") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "nodeGroup": c.nodeGroup, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.nodeGroups.addNodes" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *NodeGroupsAddNodesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Adds specified number of nodes to the node group.", - // "httpMethod": "POST", - // "id": "compute.nodeGroups.addNodes", - // "parameterOrder": [ - // "project", - // "zone", - // "nodeGroup" - // ], - // "parameters": { - // "nodeGroup": { - // "description": "Name of the NodeGroup resource.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/nodeGroups/{nodeGroup}/addNodes", + // "path": "{project}/regions/{region}/backendServices/{backendService}", // "request": { - // "$ref": "NodeGroupsAddNodesRequest" + // "$ref": "BackendService" // }, // "response": { // "$ref": "Operation" @@ -80221,9 +95822,9 @@ func (c *NodeGroupsAddNodesCall) Do(opts ...googleapi.CallOption) (*Operation, e } -// method id "compute.nodeGroups.aggregatedList": +// method id "compute.regionCommitments.aggregatedList": -type NodeGroupsAggregatedListCall struct { +type RegionCommitmentsAggregatedListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -80232,10 +95833,9 @@ type NodeGroupsAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves an aggregated list of node groups. Note: -// use nodeGroups.listNodes for more details about each group. -func (r *NodeGroupsService) AggregatedList(project string) *NodeGroupsAggregatedListCall { - c := &NodeGroupsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of commitments. +func (r *RegionCommitmentsService) AggregatedList(project string) *RegionCommitmentsAggregatedListCall { + c := &RegionCommitmentsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -80262,7 +95862,7 @@ func (r *NodeGroupsService) AggregatedList(project string) *NodeGroupsAggregated // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *NodeGroupsAggregatedListCall) Filter(filter string) *NodeGroupsAggregatedListCall { +func (c *RegionCommitmentsAggregatedListCall) Filter(filter string) *RegionCommitmentsAggregatedListCall { c.urlParams_.Set("filter", filter) return c } @@ -80273,7 +95873,7 @@ func (c *NodeGroupsAggregatedListCall) Filter(filter string) *NodeGroupsAggregat // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *NodeGroupsAggregatedListCall) MaxResults(maxResults int64) *NodeGroupsAggregatedListCall { +func (c *RegionCommitmentsAggregatedListCall) MaxResults(maxResults int64) *RegionCommitmentsAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -80290,7 +95890,7 @@ func (c *NodeGroupsAggregatedListCall) MaxResults(maxResults int64) *NodeGroupsA // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *NodeGroupsAggregatedListCall) OrderBy(orderBy string) *NodeGroupsAggregatedListCall { +func (c *RegionCommitmentsAggregatedListCall) OrderBy(orderBy string) *RegionCommitmentsAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -80298,7 +95898,7 @@ func (c *NodeGroupsAggregatedListCall) OrderBy(orderBy string) *NodeGroupsAggreg // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *NodeGroupsAggregatedListCall) PageToken(pageToken string) *NodeGroupsAggregatedListCall { +func (c *RegionCommitmentsAggregatedListCall) PageToken(pageToken string) *RegionCommitmentsAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -80306,7 +95906,7 @@ func (c *NodeGroupsAggregatedListCall) PageToken(pageToken string) *NodeGroupsAg // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeGroupsAggregatedListCall) Fields(s ...googleapi.Field) *NodeGroupsAggregatedListCall { +func (c *RegionCommitmentsAggregatedListCall) Fields(s ...googleapi.Field) *RegionCommitmentsAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -80316,7 +95916,7 @@ func (c *NodeGroupsAggregatedListCall) Fields(s ...googleapi.Field) *NodeGroupsA // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *NodeGroupsAggregatedListCall) IfNoneMatch(entityTag string) *NodeGroupsAggregatedListCall { +func (c *RegionCommitmentsAggregatedListCall) IfNoneMatch(entityTag string) *RegionCommitmentsAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -80324,21 +95924,21 @@ func (c *NodeGroupsAggregatedListCall) IfNoneMatch(entityTag string) *NodeGroups // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeGroupsAggregatedListCall) Context(ctx context.Context) *NodeGroupsAggregatedListCall { +func (c *RegionCommitmentsAggregatedListCall) Context(ctx context.Context) *RegionCommitmentsAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeGroupsAggregatedListCall) Header() http.Header { +func (c *RegionCommitmentsAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionCommitmentsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -80350,7 +95950,7 @@ func (c *NodeGroupsAggregatedListCall) doRequest(alt string) (*http.Response, er var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/nodeGroups") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/commitments") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -80363,14 +95963,14 @@ func (c *NodeGroupsAggregatedListCall) doRequest(alt string) (*http.Response, er return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeGroups.aggregatedList" call. -// Exactly one of *NodeGroupAggregatedList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *NodeGroupAggregatedList.ServerResponse.Header or (if a response was +// Do executes the "compute.regionCommitments.aggregatedList" call. +// Exactly one of *CommitmentAggregatedList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *CommitmentAggregatedList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *NodeGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeGroupAggregatedList, error) { +func (c *RegionCommitmentsAggregatedListCall) Do(opts ...googleapi.CallOption) (*CommitmentAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -80389,7 +95989,7 @@ func (c *NodeGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeGr if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &NodeGroupAggregatedList{ + ret := &CommitmentAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -80401,9 +96001,9 @@ func (c *NodeGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeGr } return ret, nil // { - // "description": "Retrieves an aggregated list of node groups. Note: use nodeGroups.listNodes for more details about each group.", + // "description": "Retrieves an aggregated list of commitments.", // "httpMethod": "GET", - // "id": "compute.nodeGroups.aggregatedList", + // "id": "compute.regionCommitments.aggregatedList", // "parameterOrder": [ // "project" // ], @@ -80439,9 +96039,9 @@ func (c *NodeGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeGr // "type": "string" // } // }, - // "path": "{project}/aggregated/nodeGroups", + // "path": "{project}/aggregated/commitments", // "response": { - // "$ref": "NodeGroupAggregatedList" + // "$ref": "CommitmentAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -80455,7 +96055,7 @@ func (c *NodeGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeGr // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *NodeGroupsAggregatedListCall) Pages(ctx context.Context, f func(*NodeGroupAggregatedList) error) error { +func (c *RegionCommitmentsAggregatedListCall) Pages(ctx context.Context, f func(*CommitmentAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -80473,103 +96073,99 @@ func (c *NodeGroupsAggregatedListCall) Pages(ctx context.Context, f func(*NodeGr } } -// method id "compute.nodeGroups.delete": +// method id "compute.regionCommitments.get": -type NodeGroupsDeleteCall struct { - s *Service - project string - zone string - nodeGroup string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionCommitmentsGetCall struct { + s *Service + project string + region string + commitment string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified NodeGroup resource. -func (r *NodeGroupsService) Delete(project string, zone string, nodeGroup string) *NodeGroupsDeleteCall { - c := &NodeGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified commitment resource. Gets a list of +// available commitments by making a list() request. +func (r *RegionCommitmentsService) Get(project string, region string, commitment string) *RegionCommitmentsGetCall { + c := &RegionCommitmentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.nodeGroup = nodeGroup - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *NodeGroupsDeleteCall) RequestId(requestId string) *NodeGroupsDeleteCall { - c.urlParams_.Set("requestId", requestId) + c.region = region + c.commitment = commitment return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeGroupsDeleteCall) Fields(s ...googleapi.Field) *NodeGroupsDeleteCall { +func (c *RegionCommitmentsGetCall) Fields(s ...googleapi.Field) *RegionCommitmentsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionCommitmentsGetCall) IfNoneMatch(entityTag string) *RegionCommitmentsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeGroupsDeleteCall) Context(ctx context.Context) *NodeGroupsDeleteCall { +func (c *RegionCommitmentsGetCall) Context(ctx context.Context) *RegionCommitmentsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeGroupsDeleteCall) Header() http.Header { +func (c *RegionCommitmentsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionCommitmentsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/commitments/{commitment}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "nodeGroup": c.nodeGroup, + "project": c.project, + "region": c.region, + "commitment": c.commitment, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeGroups.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.regionCommitments.get" call. +// Exactly one of *Commitment or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// *Commitment.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NodeGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionCommitmentsGetCall) Do(opts ...googleapi.CallOption) (*Commitment, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -80588,7 +96184,7 @@ func (c *NodeGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, err if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Commitment{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -80600,19 +96196,19 @@ func (c *NodeGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, err } return ret, nil // { - // "description": "Deletes the specified NodeGroup resource.", - // "httpMethod": "DELETE", - // "id": "compute.nodeGroups.delete", + // "description": "Returns the specified commitment resource. Gets a list of available commitments by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.regionCommitments.get", // "parameterOrder": [ // "project", - // "zone", - // "nodeGroup" + // "region", + // "commitment" // ], // "parameters": { - // "nodeGroup": { - // "description": "Name of the NodeGroup resource to delete.", + // "commitment": { + // "description": "Name of the commitment to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -80623,51 +96219,46 @@ func (c *NodeGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, err // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/nodeGroups/{nodeGroup}", + // "path": "{project}/regions/{region}/commitments/{commitment}", // "response": { - // "$ref": "Operation" + // "$ref": "Commitment" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.nodeGroups.deleteNodes": +// method id "compute.regionCommitments.insert": -type NodeGroupsDeleteNodesCall struct { - s *Service - project string - zone string - nodeGroup string - nodegroupsdeletenodesrequest *NodeGroupsDeleteNodesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionCommitmentsInsertCall struct { + s *Service + project string + region string + commitment *Commitment + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// DeleteNodes: Deletes specified nodes from the node group. -func (r *NodeGroupsService) DeleteNodes(project string, zone string, nodeGroup string, nodegroupsdeletenodesrequest *NodeGroupsDeleteNodesRequest) *NodeGroupsDeleteNodesCall { - c := &NodeGroupsDeleteNodesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a commitment in the specified project using the data +// included in the request. +func (r *RegionCommitmentsService) Insert(project string, region string, commitment *Commitment) *RegionCommitmentsInsertCall { + c := &RegionCommitmentsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.nodeGroup = nodeGroup - c.nodegroupsdeletenodesrequest = nodegroupsdeletenodesrequest + c.region = region + c.commitment = commitment return c } @@ -80685,7 +96276,7 @@ func (r *NodeGroupsService) DeleteNodes(project string, zone string, nodeGroup s // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *NodeGroupsDeleteNodesCall) RequestId(requestId string) *NodeGroupsDeleteNodesCall { +func (c *RegionCommitmentsInsertCall) RequestId(requestId string) *RegionCommitmentsInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -80693,7 +96284,7 @@ func (c *NodeGroupsDeleteNodesCall) RequestId(requestId string) *NodeGroupsDelet // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeGroupsDeleteNodesCall) Fields(s ...googleapi.Field) *NodeGroupsDeleteNodesCall { +func (c *RegionCommitmentsInsertCall) Fields(s ...googleapi.Field) *RegionCommitmentsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -80701,35 +96292,35 @@ func (c *NodeGroupsDeleteNodesCall) Fields(s ...googleapi.Field) *NodeGroupsDele // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeGroupsDeleteNodesCall) Context(ctx context.Context) *NodeGroupsDeleteNodesCall { +func (c *RegionCommitmentsInsertCall) Context(ctx context.Context) *RegionCommitmentsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeGroupsDeleteNodesCall) Header() http.Header { +func (c *RegionCommitmentsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeGroupsDeleteNodesCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionCommitmentsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.nodegroupsdeletenodesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.commitment) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}/deleteNodes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/commitments") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -80737,21 +96328,20 @@ func (c *NodeGroupsDeleteNodesCall) doRequest(alt string) (*http.Response, error } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "nodeGroup": c.nodeGroup, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeGroups.deleteNodes" call. +// Do executes the "compute.regionCommitments.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NodeGroupsDeleteNodesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionCommitmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -80782,26 +96372,25 @@ func (c *NodeGroupsDeleteNodesCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Deletes specified nodes from the node group.", + // "description": "Creates a commitment in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.nodeGroups.deleteNodes", + // "id": "compute.regionCommitments.insert", // "parameterOrder": [ // "project", - // "zone", - // "nodeGroup" + // "region" // ], // "parameters": { - // "nodeGroup": { - // "description": "Name of the NodeGroup resource to delete.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "region": { + // "description": "Name of the region for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -80809,18 +96398,11 @@ func (c *NodeGroupsDeleteNodesCall) Do(opts ...googleapi.CallOption) (*Operation // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/nodeGroups/{nodeGroup}/deleteNodes", + // "path": "{project}/regions/{region}/commitments", // "request": { - // "$ref": "NodeGroupsDeleteNodesRequest" + // "$ref": "Commitment" // }, // "response": { // "$ref": "Operation" @@ -80833,34 +96415,94 @@ func (c *NodeGroupsDeleteNodesCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.nodeGroups.get": +// method id "compute.regionCommitments.list": -type NodeGroupsGetCall struct { +type RegionCommitmentsListCall struct { s *Service project string - zone string - nodeGroup string + region string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Returns the specified NodeGroup. Get a list of available -// NodeGroups by making a list() request. Note: the "nodes" field should -// not be used. Use nodeGroups.listNodes instead. -func (r *NodeGroupsService) Get(project string, zone string, nodeGroup string) *NodeGroupsGetCall { - c := &NodeGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of commitments contained within the specified +// region. +func (r *RegionCommitmentsService) List(project string, region string) *RegionCommitmentsListCall { + c := &RegionCommitmentsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.nodeGroup = nodeGroup + c.region = region + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *RegionCommitmentsListCall) Filter(filter string) *RegionCommitmentsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *RegionCommitmentsListCall) MaxResults(maxResults int64) *RegionCommitmentsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *RegionCommitmentsListCall) OrderBy(orderBy string) *RegionCommitmentsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *RegionCommitmentsListCall) PageToken(pageToken string) *RegionCommitmentsListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeGroupsGetCall) Fields(s ...googleapi.Field) *NodeGroupsGetCall { +func (c *RegionCommitmentsListCall) Fields(s ...googleapi.Field) *RegionCommitmentsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -80870,7 +96512,7 @@ func (c *NodeGroupsGetCall) Fields(s ...googleapi.Field) *NodeGroupsGetCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *NodeGroupsGetCall) IfNoneMatch(entityTag string) *NodeGroupsGetCall { +func (c *RegionCommitmentsListCall) IfNoneMatch(entityTag string) *RegionCommitmentsListCall { c.ifNoneMatch_ = entityTag return c } @@ -80878,21 +96520,21 @@ func (c *NodeGroupsGetCall) IfNoneMatch(entityTag string) *NodeGroupsGetCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeGroupsGetCall) Context(ctx context.Context) *NodeGroupsGetCall { +func (c *RegionCommitmentsListCall) Context(ctx context.Context) *RegionCommitmentsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeGroupsGetCall) Header() http.Header { +func (c *RegionCommitmentsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeGroupsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionCommitmentsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -80904,7 +96546,7 @@ func (c *NodeGroupsGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/commitments") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -80912,21 +96554,20 @@ func (c *NodeGroupsGetCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "nodeGroup": c.nodeGroup, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeGroups.get" call. -// Exactly one of *NodeGroup or error will be non-nil. Any non-2xx +// Do executes the "compute.regionCommitments.list" call. +// Exactly one of *CommitmentList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *NodeGroup.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *NodeGroupsGetCall) Do(opts ...googleapi.CallOption) (*NodeGroup, error) { +// *CommitmentList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionCommitmentsListCall) Do(opts ...googleapi.CallOption) (*CommitmentList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -80945,7 +96586,7 @@ func (c *NodeGroupsGetCall) Do(opts ...googleapi.CallOption) (*NodeGroup, error) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &NodeGroup{ + ret := &CommitmentList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -80957,20 +96598,35 @@ func (c *NodeGroupsGetCall) Do(opts ...googleapi.CallOption) (*NodeGroup, error) } return ret, nil // { - // "description": "Returns the specified NodeGroup. Get a list of available NodeGroups by making a list() request. Note: the \"nodes\" field should not be used. Use nodeGroups.listNodes instead.", + // "description": "Retrieves a list of commitments contained within the specified region.", // "httpMethod": "GET", - // "id": "compute.nodeGroups.get", + // "id": "compute.regionCommitments.list", // "parameterOrder": [ // "project", - // "zone", - // "nodeGroup" + // "region" // ], // "parameters": { - // "nodeGroup": { - // "description": "Name of the node group to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -80980,17 +96636,17 @@ func (c *NodeGroupsGetCall) Do(opts ...googleapi.CallOption) (*NodeGroup, error) // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/nodeGroups/{nodeGroup}", + // "path": "{project}/regions/{region}/commitments", // "response": { - // "$ref": "NodeGroup" + // "$ref": "CommitmentList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -81001,99 +96657,132 @@ func (c *NodeGroupsGetCall) Do(opts ...googleapi.CallOption) (*NodeGroup, error) } -// method id "compute.nodeGroups.getIamPolicy": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionCommitmentsListCall) Pages(ctx context.Context, f func(*CommitmentList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type NodeGroupsGetIamPolicyCall struct { - s *Service - project string - zone string - resource string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +// method id "compute.regionCommitments.updateReservations": + +type RegionCommitmentsUpdateReservationsCall struct { + s *Service + project string + region string + commitment string + regioncommitmentsupdatereservationsrequest *RegionCommitmentsUpdateReservationsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. -func (r *NodeGroupsService) GetIamPolicy(project string, zone string, resource string) *NodeGroupsGetIamPolicyCall { - c := &NodeGroupsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// UpdateReservations: Update the shape of reservations for GPUS/Local +// SSDs of reservations within the commitments. +func (r *RegionCommitmentsService) UpdateReservations(project string, region string, commitment string, regioncommitmentsupdatereservationsrequest *RegionCommitmentsUpdateReservationsRequest) *RegionCommitmentsUpdateReservationsCall { + c := &RegionCommitmentsUpdateReservationsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.resource = resource + c.region = region + c.commitment = commitment + c.regioncommitmentsupdatereservationsrequest = regioncommitmentsupdatereservationsrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionCommitmentsUpdateReservationsCall) RequestId(requestId string) *RegionCommitmentsUpdateReservationsCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeGroupsGetIamPolicyCall) Fields(s ...googleapi.Field) *NodeGroupsGetIamPolicyCall { +func (c *RegionCommitmentsUpdateReservationsCall) Fields(s ...googleapi.Field) *RegionCommitmentsUpdateReservationsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *NodeGroupsGetIamPolicyCall) IfNoneMatch(entityTag string) *NodeGroupsGetIamPolicyCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeGroupsGetIamPolicyCall) Context(ctx context.Context) *NodeGroupsGetIamPolicyCall { +func (c *RegionCommitmentsUpdateReservationsCall) Context(ctx context.Context) *RegionCommitmentsUpdateReservationsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeGroupsGetIamPolicyCall) Header() http.Header { +func (c *RegionCommitmentsUpdateReservationsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeGroupsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionCommitmentsUpdateReservationsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioncommitmentsupdatereservationsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{resource}/getIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/commitments/{commitment}/updateReservations") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, + "project": c.project, + "region": c.region, + "commitment": c.commitment, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeGroups.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *NodeGroupsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.regionCommitments.updateReservations" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionCommitmentsUpdateReservationsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -81112,7 +96801,7 @@ func (c *NodeGroupsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -81122,155 +96811,152 @@ func (c *NodeGroupsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } - return ret, nil - // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", - // "httpMethod": "GET", - // "id": "compute.nodeGroups.getIamPolicy", + return ret, nil + // { + // "description": "Update the shape of reservations for GPUS/Local SSDs of reservations within the commitments.", + // "httpMethod": "POST", + // "id": "compute.regionCommitments.updateReservations", // "parameterOrder": [ // "project", - // "zone", - // "resource" + // "region", + // "commitment" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "commitment": { + // "description": "Name of the commitment of which the reservation's capacities are being updated.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/nodeGroups/{resource}/getIamPolicy", + // "path": "{project}/regions/{region}/commitments/{commitment}/updateReservations", + // "request": { + // "$ref": "RegionCommitmentsUpdateReservationsRequest" + // }, // "response": { - // "$ref": "Policy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.nodeGroups.insert": +// method id "compute.regionDiskTypes.get": -type NodeGroupsInsertCall struct { - s *Service - project string - zone string - nodegroup *NodeGroup - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionDiskTypesGetCall struct { + s *Service + project string + region string + diskType string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Insert: Creates a NodeGroup resource in the specified project using -// the data included in the request. -func (r *NodeGroupsService) Insert(project string, zone string, initialNodeCount int64, nodegroup *NodeGroup) *NodeGroupsInsertCall { - c := &NodeGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified regional disk type. Gets a list of +// available disk types by making a list() request. +func (r *RegionDiskTypesService) Get(project string, region string, diskType string) *RegionDiskTypesGetCall { + c := &RegionDiskTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.urlParams_.Set("initialNodeCount", fmt.Sprint(initialNodeCount)) - c.nodegroup = nodegroup - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *NodeGroupsInsertCall) RequestId(requestId string) *NodeGroupsInsertCall { - c.urlParams_.Set("requestId", requestId) + c.region = region + c.diskType = diskType return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeGroupsInsertCall) Fields(s ...googleapi.Field) *NodeGroupsInsertCall { +func (c *RegionDiskTypesGetCall) Fields(s ...googleapi.Field) *RegionDiskTypesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionDiskTypesGetCall) IfNoneMatch(entityTag string) *RegionDiskTypesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeGroupsInsertCall) Context(ctx context.Context) *NodeGroupsInsertCall { +func (c *RegionDiskTypesGetCall) Context(ctx context.Context) *RegionDiskTypesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeGroupsInsertCall) Header() http.Header { +func (c *RegionDiskTypesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeGroupsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDiskTypesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.nodegroup) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/diskTypes/{diskType}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "region": c.region, + "diskType": c.diskType, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeGroups.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// Do executes the "compute.regionDiskTypes.get" call. +// Exactly one of *DiskType or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *DiskType.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NodeGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionDiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -81289,7 +96975,7 @@ func (c *NodeGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, err if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &DiskType{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -81301,21 +96987,21 @@ func (c *NodeGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, err } return ret, nil // { - // "description": "Creates a NodeGroup resource in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.nodeGroups.insert", + // "description": "Returns the specified regional disk type. Gets a list of available disk types by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.regionDiskTypes.get", // "parameterOrder": [ // "project", - // "zone", - // "initialNodeCount" + // "region", + // "diskType" // ], // "parameters": { - // "initialNodeCount": { - // "description": "Initial count of nodes in the node group.", - // "format": "int32", - // "location": "query", + // "diskType": { + // "description": "Name of the disk type to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, - // "type": "integer" + // "type": "string" // }, // "project": { // "description": "Project ID for this request.", @@ -81324,53 +97010,45 @@ func (c *NodeGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, err // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "The name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/nodeGroups", - // "request": { - // "$ref": "NodeGroup" - // }, + // "path": "{project}/regions/{region}/diskTypes/{diskType}", // "response": { - // "$ref": "Operation" + // "$ref": "DiskType" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.nodeGroups.list": +// method id "compute.regionDiskTypes.list": -type NodeGroupsListCall struct { +type RegionDiskTypesListCall struct { s *Service project string - zone string + region string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves a list of node groups available to the specified -// project. Note: use nodeGroups.listNodes for more details about each -// group. -func (r *NodeGroupsService) List(project string, zone string) *NodeGroupsListCall { - c := &NodeGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of regional disk types available to the +// specified project. +func (r *RegionDiskTypesService) List(project string, region string) *RegionDiskTypesListCall { + c := &RegionDiskTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone + c.region = region return c } @@ -81396,7 +97074,7 @@ func (r *NodeGroupsService) List(project string, zone string) *NodeGroupsListCal // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *NodeGroupsListCall) Filter(filter string) *NodeGroupsListCall { +func (c *RegionDiskTypesListCall) Filter(filter string) *RegionDiskTypesListCall { c.urlParams_.Set("filter", filter) return c } @@ -81407,7 +97085,7 @@ func (c *NodeGroupsListCall) Filter(filter string) *NodeGroupsListCall { // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *NodeGroupsListCall) MaxResults(maxResults int64) *NodeGroupsListCall { +func (c *RegionDiskTypesListCall) MaxResults(maxResults int64) *RegionDiskTypesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -81424,7 +97102,7 @@ func (c *NodeGroupsListCall) MaxResults(maxResults int64) *NodeGroupsListCall { // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *NodeGroupsListCall) OrderBy(orderBy string) *NodeGroupsListCall { +func (c *RegionDiskTypesListCall) OrderBy(orderBy string) *RegionDiskTypesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -81432,7 +97110,7 @@ func (c *NodeGroupsListCall) OrderBy(orderBy string) *NodeGroupsListCall { // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *NodeGroupsListCall) PageToken(pageToken string) *NodeGroupsListCall { +func (c *RegionDiskTypesListCall) PageToken(pageToken string) *RegionDiskTypesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -81440,7 +97118,7 @@ func (c *NodeGroupsListCall) PageToken(pageToken string) *NodeGroupsListCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeGroupsListCall) Fields(s ...googleapi.Field) *NodeGroupsListCall { +func (c *RegionDiskTypesListCall) Fields(s ...googleapi.Field) *RegionDiskTypesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -81450,7 +97128,7 @@ func (c *NodeGroupsListCall) Fields(s ...googleapi.Field) *NodeGroupsListCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *NodeGroupsListCall) IfNoneMatch(entityTag string) *NodeGroupsListCall { +func (c *RegionDiskTypesListCall) IfNoneMatch(entityTag string) *RegionDiskTypesListCall { c.ifNoneMatch_ = entityTag return c } @@ -81458,21 +97136,21 @@ func (c *NodeGroupsListCall) IfNoneMatch(entityTag string) *NodeGroupsListCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeGroupsListCall) Context(ctx context.Context) *NodeGroupsListCall { +func (c *RegionDiskTypesListCall) Context(ctx context.Context) *RegionDiskTypesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeGroupsListCall) Header() http.Header { +func (c *RegionDiskTypesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeGroupsListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDiskTypesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -81484,7 +97162,7 @@ func (c *NodeGroupsListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/diskTypes") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -81493,19 +97171,19 @@ func (c *NodeGroupsListCall) doRequest(alt string) (*http.Response, error) { req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeGroups.list" call. -// Exactly one of *NodeGroupList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *NodeGroupList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.regionDiskTypes.list" call. +// Exactly one of *RegionDiskTypeList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *RegionDiskTypeList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *NodeGroupsListCall) Do(opts ...googleapi.CallOption) (*NodeGroupList, error) { +func (c *RegionDiskTypesListCall) Do(opts ...googleapi.CallOption) (*RegionDiskTypeList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -81524,7 +97202,7 @@ func (c *NodeGroupsListCall) Do(opts ...googleapi.CallOption) (*NodeGroupList, e if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &NodeGroupList{ + ret := &RegionDiskTypeList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -81536,12 +97214,12 @@ func (c *NodeGroupsListCall) Do(opts ...googleapi.CallOption) (*NodeGroupList, e } return ret, nil // { - // "description": "Retrieves a list of node groups available to the specified project. Note: use nodeGroups.listNodes for more details about each group.", + // "description": "Retrieves a list of regional disk types available to the specified project.", // "httpMethod": "GET", - // "id": "compute.nodeGroups.list", + // "id": "compute.regionDiskTypes.list", // "parameterOrder": [ // "project", - // "zone" + // "region" // ], // "parameters": { // "filter": { @@ -81574,17 +97252,17 @@ func (c *NodeGroupsListCall) Do(opts ...googleapi.CallOption) (*NodeGroupList, e // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "The name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/nodeGroups", + // "path": "{project}/regions/{region}/diskTypes", // "response": { - // "$ref": "NodeGroupList" + // "$ref": "RegionDiskTypeList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -81598,7 +97276,7 @@ func (c *NodeGroupsListCall) Do(opts ...googleapi.CallOption) (*NodeGroupList, e // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *NodeGroupsListCall) Pages(ctx context.Context, f func(*NodeGroupList) error) error { +func (c *RegionDiskTypesListCall) Pages(ctx context.Context, f func(*RegionDiskTypeList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -81616,94 +97294,54 @@ func (c *NodeGroupsListCall) Pages(ctx context.Context, f func(*NodeGroupList) e } } -// method id "compute.nodeGroups.listNodes": +// method id "compute.regionDisks.addResourcePolicies": -type NodeGroupsListNodesCall struct { - s *Service - project string - zone string - nodeGroup string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionDisksAddResourcePoliciesCall struct { + s *Service + project string + region string + disk string + regiondisksaddresourcepoliciesrequest *RegionDisksAddResourcePoliciesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// ListNodes: Lists nodes in the node group. -func (r *NodeGroupsService) ListNodes(project string, zone string, nodeGroup string) *NodeGroupsListNodesCall { - c := &NodeGroupsListNodesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AddResourcePolicies: Adds existing resource policies to a regional +// disk. You can only add one policy which will be applied to this disk +// for scheduling snapshot creation. +func (r *RegionDisksService) AddResourcePolicies(project string, region string, disk string, regiondisksaddresourcepoliciesrequest *RegionDisksAddResourcePoliciesRequest) *RegionDisksAddResourcePoliciesCall { + c := &RegionDisksAddResourcePoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.nodeGroup = nodeGroup - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *NodeGroupsListNodesCall) Filter(filter string) *NodeGroupsListNodesCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *NodeGroupsListNodesCall) MaxResults(maxResults int64) *NodeGroupsListNodesCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + c.region = region + c.disk = disk + c.regiondisksaddresourcepoliciesrequest = regiondisksaddresourcepoliciesrequest return c } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *NodeGroupsListNodesCall) OrderBy(orderBy string) *NodeGroupsListNodesCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *NodeGroupsListNodesCall) PageToken(pageToken string) *NodeGroupsListNodesCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionDisksAddResourcePoliciesCall) RequestId(requestId string) *RegionDisksAddResourcePoliciesCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeGroupsListNodesCall) Fields(s ...googleapi.Field) *NodeGroupsListNodesCall { +func (c *RegionDisksAddResourcePoliciesCall) Fields(s ...googleapi.Field) *RegionDisksAddResourcePoliciesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -81711,30 +97349,35 @@ func (c *NodeGroupsListNodesCall) Fields(s ...googleapi.Field) *NodeGroupsListNo // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeGroupsListNodesCall) Context(ctx context.Context) *NodeGroupsListNodesCall { +func (c *RegionDisksAddResourcePoliciesCall) Context(ctx context.Context) *RegionDisksAddResourcePoliciesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeGroupsListNodesCall) Header() http.Header { +func (c *RegionDisksAddResourcePoliciesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeGroupsListNodesCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDisksAddResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regiondisksaddresourcepoliciesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}/listNodes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{disk}/addResourcePolicies") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -81742,21 +97385,21 @@ func (c *NodeGroupsListNodesCall) doRequest(alt string) (*http.Response, error) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "nodeGroup": c.nodeGroup, + "project": c.project, + "region": c.region, + "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeGroups.listNodes" call. -// Exactly one of *NodeGroupsListNodes or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *NodeGroupsListNodes.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *NodeGroupsListNodesCall) Do(opts ...googleapi.CallOption) (*NodeGroupsListNodes, error) { +// Do executes the "compute.regionDisks.addResourcePolicies" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionDisksAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -81775,7 +97418,7 @@ func (c *NodeGroupsListNodesCall) Do(opts ...googleapi.CallOption) (*NodeGroupsL if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &NodeGroupsListNodes{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -81787,45 +97430,22 @@ func (c *NodeGroupsListNodesCall) Do(opts ...googleapi.CallOption) (*NodeGroupsL } return ret, nil // { - // "description": "Lists nodes in the node group.", + // "description": "Adds existing resource policies to a regional disk. You can only add one policy which will be applied to this disk for scheduling snapshot creation.", // "httpMethod": "POST", - // "id": "compute.nodeGroups.listNodes", + // "id": "compute.regionDisks.addResourcePolicies", // "parameterOrder": [ // "project", - // "zone", - // "nodeGroup" + // "region", + // "disk" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "nodeGroup": { - // "description": "Name of the NodeGroup resource whose nodes you want to list.", + // "disk": { + // "description": "The disk name for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -81833,76 +97453,80 @@ func (c *NodeGroupsListNodesCall) Do(opts ...googleapi.CallOption) (*NodeGroupsL // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "The name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/nodeGroups/{nodeGroup}/listNodes", + // "path": "{project}/regions/{region}/disks/{disk}/addResourcePolicies", + // "request": { + // "$ref": "RegionDisksAddResourcePoliciesRequest" + // }, // "response": { - // "$ref": "NodeGroupsListNodes" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *NodeGroupsListNodesCall) Pages(ctx context.Context, f func(*NodeGroupsListNodes) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.nodeGroups.setIamPolicy": +// method id "compute.regionDisks.createSnapshot": -type NodeGroupsSetIamPolicyCall struct { - s *Service - project string - zone string - resource string - zonesetpolicyrequest *ZoneSetPolicyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionDisksCreateSnapshotCall struct { + s *Service + project string + region string + disk string + snapshot *Snapshot + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. -func (r *NodeGroupsService) SetIamPolicy(project string, zone string, resource string, zonesetpolicyrequest *ZoneSetPolicyRequest) *NodeGroupsSetIamPolicyCall { - c := &NodeGroupsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// CreateSnapshot: Creates a snapshot of this regional disk. +func (r *RegionDisksService) CreateSnapshot(project string, region string, disk string, snapshot *Snapshot) *RegionDisksCreateSnapshotCall { + c := &RegionDisksCreateSnapshotCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.resource = resource - c.zonesetpolicyrequest = zonesetpolicyrequest + c.region = region + c.disk = disk + c.snapshot = snapshot + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionDisksCreateSnapshotCall) RequestId(requestId string) *RegionDisksCreateSnapshotCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeGroupsSetIamPolicyCall) Fields(s ...googleapi.Field) *NodeGroupsSetIamPolicyCall { +func (c *RegionDisksCreateSnapshotCall) Fields(s ...googleapi.Field) *RegionDisksCreateSnapshotCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -81910,35 +97534,35 @@ func (c *NodeGroupsSetIamPolicyCall) Fields(s ...googleapi.Field) *NodeGroupsSet // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeGroupsSetIamPolicyCall) Context(ctx context.Context) *NodeGroupsSetIamPolicyCall { +func (c *RegionDisksCreateSnapshotCall) Context(ctx context.Context) *RegionDisksCreateSnapshotCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeGroupsSetIamPolicyCall) Header() http.Header { +func (c *RegionDisksCreateSnapshotCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeGroupsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDisksCreateSnapshotCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.zonesetpolicyrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.snapshot) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{resource}/setIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{disk}/createSnapshot") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -81946,21 +97570,21 @@ func (c *NodeGroupsSetIamPolicyCall) doRequest(alt string) (*http.Response, erro } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, + "project": c.project, + "region": c.region, + "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeGroups.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *NodeGroupsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.regionDisks.createSnapshot" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionDisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -81979,7 +97603,7 @@ func (c *NodeGroupsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -81991,43 +97615,48 @@ func (c *NodeGroupsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "description": "Creates a snapshot of this regional disk.", // "httpMethod": "POST", - // "id": "compute.nodeGroups.setIamPolicy", + // "id": "compute.regionDisks.createSnapshot", // "parameterOrder": [ // "project", - // "zone", - // "resource" + // "region", + // "disk" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "disk": { + // "description": "Name of the regional persistent disk to snapshot.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/nodeGroups/{resource}/setIamPolicy", + // "path": "{project}/regions/{region}/disks/{disk}/createSnapshot", // "request": { - // "$ref": "ZoneSetPolicyRequest" + // "$ref": "Snapshot" // }, // "response": { - // "$ref": "Policy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -82037,26 +97666,27 @@ func (c *NodeGroupsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, } -// method id "compute.nodeGroups.setNodeTemplate": +// method id "compute.regionDisks.delete": -type NodeGroupsSetNodeTemplateCall struct { - s *Service - project string - zone string - nodeGroup string - nodegroupssetnodetemplaterequest *NodeGroupsSetNodeTemplateRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionDisksDeleteCall struct { + s *Service + project string + region string + disk string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetNodeTemplate: Updates the node template of the node group. -func (r *NodeGroupsService) SetNodeTemplate(project string, zone string, nodeGroup string, nodegroupssetnodetemplaterequest *NodeGroupsSetNodeTemplateRequest) *NodeGroupsSetNodeTemplateCall { - c := &NodeGroupsSetNodeTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified regional persistent disk. Deleting a +// regional disk removes all the replicas of its data permanently and is +// irreversible. However, deleting a disk does not delete any snapshots +// previously made from the disk. You must separately delete snapshots. +func (r *RegionDisksService) Delete(project string, region string, disk string) *RegionDisksDeleteCall { + c := &RegionDisksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.nodeGroup = nodeGroup - c.nodegroupssetnodetemplaterequest = nodegroupssetnodetemplaterequest + c.region = region + c.disk = disk return c } @@ -82074,7 +97704,7 @@ func (r *NodeGroupsService) SetNodeTemplate(project string, zone string, nodeGro // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *NodeGroupsSetNodeTemplateCall) RequestId(requestId string) *NodeGroupsSetNodeTemplateCall { +func (c *RegionDisksDeleteCall) RequestId(requestId string) *RegionDisksDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -82082,7 +97712,7 @@ func (c *NodeGroupsSetNodeTemplateCall) RequestId(requestId string) *NodeGroupsS // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeGroupsSetNodeTemplateCall) Fields(s ...googleapi.Field) *NodeGroupsSetNodeTemplateCall { +func (c *RegionDisksDeleteCall) Fields(s ...googleapi.Field) *RegionDisksDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -82090,57 +97720,52 @@ func (c *NodeGroupsSetNodeTemplateCall) Fields(s ...googleapi.Field) *NodeGroups // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeGroupsSetNodeTemplateCall) Context(ctx context.Context) *NodeGroupsSetNodeTemplateCall { +func (c *RegionDisksDeleteCall) Context(ctx context.Context) *RegionDisksDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeGroupsSetNodeTemplateCall) Header() http.Header { +func (c *RegionDisksDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeGroupsSetNodeTemplateCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDisksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.nodegroupssetnodetemplaterequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}/setNodeTemplate") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{disk}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "nodeGroup": c.nodeGroup, + "project": c.project, + "region": c.region, + "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeGroups.setNodeTemplate" call. +// Do executes the "compute.regionDisks.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NodeGroupsSetNodeTemplateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionDisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -82171,19 +97796,18 @@ func (c *NodeGroupsSetNodeTemplateCall) Do(opts ...googleapi.CallOption) (*Opera } return ret, nil // { - // "description": "Updates the node template of the node group.", - // "httpMethod": "POST", - // "id": "compute.nodeGroups.setNodeTemplate", + // "description": "Deletes the specified regional persistent disk. Deleting a regional disk removes all the replicas of its data permanently and is irreversible. However, deleting a disk does not delete any snapshots previously made from the disk. You must separately delete snapshots.", + // "httpMethod": "DELETE", + // "id": "compute.regionDisks.delete", // "parameterOrder": [ // "project", - // "zone", - // "nodeGroup" + // "region", + // "disk" // ], // "parameters": { - // "nodeGroup": { - // "description": "Name of the NodeGroup resource to delete.", + // "disk": { + // "description": "Name of the regional persistent disk to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -82194,23 +97818,20 @@ func (c *NodeGroupsSetNodeTemplateCall) Do(opts ...googleapi.CallOption) (*Opera // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/nodeGroups/{nodeGroup}/setNodeTemplate", - // "request": { - // "$ref": "NodeGroupsSetNodeTemplateRequest" - // }, + // "path": "{project}/regions/{region}/disks/{disk}", // "response": { // "$ref": "Operation" // }, @@ -82222,92 +97843,98 @@ func (c *NodeGroupsSetNodeTemplateCall) Do(opts ...googleapi.CallOption) (*Opera } -// method id "compute.nodeGroups.testIamPermissions": +// method id "compute.regionDisks.get": -type NodeGroupsTestIamPermissionsCall struct { - s *Service - project string - zone string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionDisksGetCall struct { + s *Service + project string + region string + disk string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *NodeGroupsService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *NodeGroupsTestIamPermissionsCall { - c := &NodeGroupsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns a specified regional persistent disk. +func (r *RegionDisksService) Get(project string, region string, disk string) *RegionDisksGetCall { + c := &RegionDisksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.region = region + c.disk = disk return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeGroupsTestIamPermissionsCall) Fields(s ...googleapi.Field) *NodeGroupsTestIamPermissionsCall { +func (c *RegionDisksGetCall) Fields(s ...googleapi.Field) *RegionDisksGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionDisksGetCall) IfNoneMatch(entityTag string) *RegionDisksGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeGroupsTestIamPermissionsCall) Context(ctx context.Context) *NodeGroupsTestIamPermissionsCall { +func (c *RegionDisksGetCall) Context(ctx context.Context) *RegionDisksGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeGroupsTestIamPermissionsCall) Header() http.Header { +func (c *RegionDisksGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDisksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{disk}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, + "project": c.project, + "region": c.region, + "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeGroups.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *NodeGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.regionDisks.get" call. +// Exactly one of *Disk or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Disk.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *RegionDisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -82326,7 +97953,7 @@ func (c *NodeGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Te if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Disk{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -82338,43 +97965,40 @@ func (c *NodeGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Te } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.nodeGroups.testIamPermissions", + // "description": "Returns a specified regional persistent disk.", + // "httpMethod": "GET", + // "id": "compute.regionDisks.get", // "parameterOrder": [ // "project", - // "zone", - // "resource" + // "region", + // "disk" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "disk": { + // "description": "Name of the regional persistent disk to return.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/nodeGroups/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/regions/{region}/disks/{disk}", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Disk" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -82385,91 +98009,33 @@ func (c *NodeGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Te } -// method id "compute.nodeTemplates.aggregatedList": +// method id "compute.regionDisks.getIamPolicy": -type NodeTemplatesAggregatedListCall struct { +type RegionDisksGetIamPolicyCall struct { s *Service project string + region string + resource string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// AggregatedList: Retrieves an aggregated list of node templates. -func (r *NodeTemplatesService) AggregatedList(project string) *NodeTemplatesAggregatedListCall { - c := &NodeTemplatesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. +func (r *RegionDisksService) GetIamPolicy(project string, region string, resource string) *RegionDisksGetIamPolicyCall { + c := &RegionDisksGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *NodeTemplatesAggregatedListCall) Filter(filter string) *NodeTemplatesAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *NodeTemplatesAggregatedListCall) MaxResults(maxResults int64) *NodeTemplatesAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *NodeTemplatesAggregatedListCall) OrderBy(orderBy string) *NodeTemplatesAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *NodeTemplatesAggregatedListCall) PageToken(pageToken string) *NodeTemplatesAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) + c.region = region + c.resource = resource return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeTemplatesAggregatedListCall) Fields(s ...googleapi.Field) *NodeTemplatesAggregatedListCall { +func (c *RegionDisksGetIamPolicyCall) Fields(s ...googleapi.Field) *RegionDisksGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -82479,7 +98045,7 @@ func (c *NodeTemplatesAggregatedListCall) Fields(s ...googleapi.Field) *NodeTemp // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *NodeTemplatesAggregatedListCall) IfNoneMatch(entityTag string) *NodeTemplatesAggregatedListCall { +func (c *RegionDisksGetIamPolicyCall) IfNoneMatch(entityTag string) *RegionDisksGetIamPolicyCall { c.ifNoneMatch_ = entityTag return c } @@ -82487,21 +98053,21 @@ func (c *NodeTemplatesAggregatedListCall) IfNoneMatch(entityTag string) *NodeTem // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeTemplatesAggregatedListCall) Context(ctx context.Context) *NodeTemplatesAggregatedListCall { +func (c *RegionDisksGetIamPolicyCall) Context(ctx context.Context) *RegionDisksGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeTemplatesAggregatedListCall) Header() http.Header { +func (c *RegionDisksGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeTemplatesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDisksGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -82513,7 +98079,7 @@ func (c *NodeTemplatesAggregatedListCall) doRequest(alt string) (*http.Response, var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/nodeTemplates") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -82521,19 +98087,21 @@ func (c *NodeTemplatesAggregatedListCall) doRequest(alt string) (*http.Response, } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeTemplates.aggregatedList" call. -// Exactly one of *NodeTemplateAggregatedList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *NodeTemplateAggregatedList.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *NodeTemplatesAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeTemplateAggregatedList, error) { +// Do executes the "compute.regionDisks.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *RegionDisksGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -82552,7 +98120,7 @@ func (c *NodeTemplatesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Nod if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &NodeTemplateAggregatedList{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -82564,47 +98132,40 @@ func (c *NodeTemplatesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Nod } return ret, nil // { - // "description": "Retrieves an aggregated list of node templates.", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", // "httpMethod": "GET", - // "id": "compute.nodeTemplates.aggregatedList", + // "id": "compute.regionDisks.getIamPolicy", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "resource" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/aggregated/nodeTemplates", + // "path": "{project}/regions/{region}/disks/{resource}/getIamPolicy", // "response": { - // "$ref": "NodeTemplateAggregatedList" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -82615,45 +98176,25 @@ func (c *NodeTemplatesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Nod } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *NodeTemplatesAggregatedListCall) Pages(ctx context.Context, f func(*NodeTemplateAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.nodeTemplates.delete": +// method id "compute.regionDisks.insert": -type NodeTemplatesDeleteCall struct { - s *Service - project string - region string - nodeTemplate string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionDisksInsertCall struct { + s *Service + project string + region string + disk *Disk + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified NodeTemplate resource. -func (r *NodeTemplatesService) Delete(project string, region string, nodeTemplate string) *NodeTemplatesDeleteCall { - c := &NodeTemplatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a persistent regional disk in the specified project +// using the data included in the request. +func (r *RegionDisksService) Insert(project string, region string, disk *Disk) *RegionDisksInsertCall { + c := &RegionDisksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.nodeTemplate = nodeTemplate + c.disk = disk return c } @@ -82671,15 +98212,22 @@ func (r *NodeTemplatesService) Delete(project string, region string, nodeTemplat // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *NodeTemplatesDeleteCall) RequestId(requestId string) *NodeTemplatesDeleteCall { +func (c *RegionDisksInsertCall) RequestId(requestId string) *RegionDisksInsertCall { c.urlParams_.Set("requestId", requestId) return c } +// SourceImage sets the optional parameter "sourceImage": Source image +// to restore onto a disk. +func (c *RegionDisksInsertCall) SourceImage(sourceImage string) *RegionDisksInsertCall { + c.urlParams_.Set("sourceImage", sourceImage) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeTemplatesDeleteCall) Fields(s ...googleapi.Field) *NodeTemplatesDeleteCall { +func (c *RegionDisksInsertCall) Fields(s ...googleapi.Field) *RegionDisksInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -82687,52 +98235,56 @@ func (c *NodeTemplatesDeleteCall) Fields(s ...googleapi.Field) *NodeTemplatesDel // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeTemplatesDeleteCall) Context(ctx context.Context) *NodeTemplatesDeleteCall { +func (c *RegionDisksInsertCall) Context(ctx context.Context) *RegionDisksInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeTemplatesDeleteCall) Header() http.Header { +func (c *RegionDisksInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDisksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.disk) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates/{nodeTemplate}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "nodeTemplate": c.nodeTemplate, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeTemplates.delete" call. +// Do executes the "compute.regionDisks.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NodeTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionDisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -82763,22 +98315,14 @@ func (c *NodeTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Deletes the specified NodeTemplate resource.", - // "httpMethod": "DELETE", - // "id": "compute.nodeTemplates.delete", + // "description": "Creates a persistent regional disk in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.regionDisks.insert", // "parameterOrder": [ // "project", - // "region", - // "nodeTemplate" + // "region" // ], // "parameters": { - // "nodeTemplate": { - // "description": "Name of the NodeTemplate resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -82787,7 +98331,7 @@ func (c *NodeTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // }, // "region": { - // "description": "The name of the region for this request.", + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -82797,9 +98341,17 @@ func (c *NodeTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "sourceImage": { + // "description": "Optional. Source image to restore onto a disk.", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/nodeTemplates/{nodeTemplate}", + // "path": "{project}/regions/{region}/disks", + // "request": { + // "$ref": "Disk" + // }, // "response": { // "$ref": "Operation" // }, @@ -82811,33 +98363,94 @@ func (c *NodeTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.nodeTemplates.get": +// method id "compute.regionDisks.list": -type NodeTemplatesGetCall struct { +type RegionDisksListCall struct { s *Service project string region string - nodeTemplate string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Returns the specified node template. Gets a list of available -// node templates by making a list() request. -func (r *NodeTemplatesService) Get(project string, region string, nodeTemplate string) *NodeTemplatesGetCall { - c := &NodeTemplatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of persistent disks contained within the +// specified region. +func (r *RegionDisksService) List(project string, region string) *RegionDisksListCall { + c := &RegionDisksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.nodeTemplate = nodeTemplate + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *RegionDisksListCall) Filter(filter string) *RegionDisksListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *RegionDisksListCall) MaxResults(maxResults int64) *RegionDisksListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *RegionDisksListCall) OrderBy(orderBy string) *RegionDisksListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *RegionDisksListCall) PageToken(pageToken string) *RegionDisksListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeTemplatesGetCall) Fields(s ...googleapi.Field) *NodeTemplatesGetCall { +func (c *RegionDisksListCall) Fields(s ...googleapi.Field) *RegionDisksListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -82847,7 +98460,7 @@ func (c *NodeTemplatesGetCall) Fields(s ...googleapi.Field) *NodeTemplatesGetCal // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *NodeTemplatesGetCall) IfNoneMatch(entityTag string) *NodeTemplatesGetCall { +func (c *RegionDisksListCall) IfNoneMatch(entityTag string) *RegionDisksListCall { c.ifNoneMatch_ = entityTag return c } @@ -82855,21 +98468,21 @@ func (c *NodeTemplatesGetCall) IfNoneMatch(entityTag string) *NodeTemplatesGetCa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeTemplatesGetCall) Context(ctx context.Context) *NodeTemplatesGetCall { +func (c *RegionDisksListCall) Context(ctx context.Context) *RegionDisksListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeTemplatesGetCall) Header() http.Header { +func (c *RegionDisksListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeTemplatesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDisksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -82881,7 +98494,7 @@ func (c *NodeTemplatesGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates/{nodeTemplate}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -82889,21 +98502,20 @@ func (c *NodeTemplatesGetCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "nodeTemplate": c.nodeTemplate, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeTemplates.get" call. -// Exactly one of *NodeTemplate or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *NodeTemplate.ServerResponse.Header or (if a response was returned at +// Do executes the "compute.regionDisks.list" call. +// Exactly one of *DiskList or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *DiskList.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NodeTemplatesGetCall) Do(opts ...googleapi.CallOption) (*NodeTemplate, error) { +func (c *RegionDisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -82922,7 +98534,7 @@ func (c *NodeTemplatesGetCall) Do(opts ...googleapi.CallOption) (*NodeTemplate, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &NodeTemplate{ + ret := &DiskList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -82934,20 +98546,35 @@ func (c *NodeTemplatesGetCall) Do(opts ...googleapi.CallOption) (*NodeTemplate, } return ret, nil // { - // "description": "Returns the specified node template. Gets a list of available node templates by making a list() request.", + // "description": "Retrieves the list of persistent disks contained within the specified region.", // "httpMethod": "GET", - // "id": "compute.nodeTemplates.get", + // "id": "compute.regionDisks.list", // "parameterOrder": [ // "project", - // "region", - // "nodeTemplate" + // "region" // ], // "parameters": { - // "nodeTemplate": { - // "description": "Name of the node template to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -82958,16 +98585,16 @@ func (c *NodeTemplatesGetCall) Do(opts ...googleapi.CallOption) (*NodeTemplate, // "type": "string" // }, // "region": { - // "description": "The name of the region for this request.", + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/nodeTemplates/{nodeTemplate}", + // "path": "{project}/regions/{region}/disks", // "response": { - // "$ref": "NodeTemplate" + // "$ref": "DiskList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -82978,99 +98605,132 @@ func (c *NodeTemplatesGetCall) Do(opts ...googleapi.CallOption) (*NodeTemplate, } -// method id "compute.nodeTemplates.getIamPolicy": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionDisksListCall) Pages(ctx context.Context, f func(*DiskList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type NodeTemplatesGetIamPolicyCall struct { - s *Service - project string - region string - resource string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +// method id "compute.regionDisks.removeResourcePolicies": + +type RegionDisksRemoveResourcePoliciesCall struct { + s *Service + project string + region string + disk string + regiondisksremoveresourcepoliciesrequest *RegionDisksRemoveResourcePoliciesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. -func (r *NodeTemplatesService) GetIamPolicy(project string, region string, resource string) *NodeTemplatesGetIamPolicyCall { - c := &NodeTemplatesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// RemoveResourcePolicies: Removes resource policies from a regional +// disk. +func (r *RegionDisksService) RemoveResourcePolicies(project string, region string, disk string, regiondisksremoveresourcepoliciesrequest *RegionDisksRemoveResourcePoliciesRequest) *RegionDisksRemoveResourcePoliciesCall { + c := &RegionDisksRemoveResourcePoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.resource = resource + c.disk = disk + c.regiondisksremoveresourcepoliciesrequest = regiondisksremoveresourcepoliciesrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionDisksRemoveResourcePoliciesCall) RequestId(requestId string) *RegionDisksRemoveResourcePoliciesCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeTemplatesGetIamPolicyCall) Fields(s ...googleapi.Field) *NodeTemplatesGetIamPolicyCall { +func (c *RegionDisksRemoveResourcePoliciesCall) Fields(s ...googleapi.Field) *RegionDisksRemoveResourcePoliciesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *NodeTemplatesGetIamPolicyCall) IfNoneMatch(entityTag string) *NodeTemplatesGetIamPolicyCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeTemplatesGetIamPolicyCall) Context(ctx context.Context) *NodeTemplatesGetIamPolicyCall { +func (c *RegionDisksRemoveResourcePoliciesCall) Context(ctx context.Context) *RegionDisksRemoveResourcePoliciesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeTemplatesGetIamPolicyCall) Header() http.Header { +func (c *RegionDisksRemoveResourcePoliciesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeTemplatesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDisksRemoveResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regiondisksremoveresourcepoliciesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates/{resource}/getIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{disk}/removeResourcePolicies") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, + "region": c.region, + "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeTemplates.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *NodeTemplatesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.regionDisks.removeResourcePolicies" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionDisksRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -83089,7 +98749,7 @@ func (c *NodeTemplatesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -83101,15 +98761,22 @@ func (c *NodeTemplatesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", - // "httpMethod": "GET", - // "id": "compute.nodeTemplates.getIamPolicy", + // "description": "Removes resource policies from a regional disk.", + // "httpMethod": "POST", + // "id": "compute.regionDisks.removeResourcePolicies", // "parameterOrder": [ // "project", // "region", - // "resource" + // "disk" // ], // "parameters": { + // "disk": { + // "description": "The disk name for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -83124,46 +98791,47 @@ func (c *NodeTemplatesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/regions/{region}/nodeTemplates/{resource}/getIamPolicy", + // "path": "{project}/regions/{region}/disks/{disk}/removeResourcePolicies", + // "request": { + // "$ref": "RegionDisksRemoveResourcePoliciesRequest" + // }, // "response": { - // "$ref": "Policy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.nodeTemplates.insert": +// method id "compute.regionDisks.resize": -type NodeTemplatesInsertCall struct { - s *Service - project string - region string - nodetemplate *NodeTemplate - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionDisksResizeCall struct { + s *Service + project string + region string + disk string + regiondisksresizerequest *RegionDisksResizeRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a NodeTemplate resource in the specified project -// using the data included in the request. -func (r *NodeTemplatesService) Insert(project string, region string, nodetemplate *NodeTemplate) *NodeTemplatesInsertCall { - c := &NodeTemplatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Resize: Resizes the specified regional persistent disk. +func (r *RegionDisksService) Resize(project string, region string, disk string, regiondisksresizerequest *RegionDisksResizeRequest) *RegionDisksResizeCall { + c := &RegionDisksResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.nodetemplate = nodetemplate + c.disk = disk + c.regiondisksresizerequest = regiondisksresizerequest return c } @@ -83181,7 +98849,7 @@ func (r *NodeTemplatesService) Insert(project string, region string, nodetemplat // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *NodeTemplatesInsertCall) RequestId(requestId string) *NodeTemplatesInsertCall { +func (c *RegionDisksResizeCall) RequestId(requestId string) *RegionDisksResizeCall { c.urlParams_.Set("requestId", requestId) return c } @@ -83189,7 +98857,7 @@ func (c *NodeTemplatesInsertCall) RequestId(requestId string) *NodeTemplatesInse // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeTemplatesInsertCall) Fields(s ...googleapi.Field) *NodeTemplatesInsertCall { +func (c *RegionDisksResizeCall) Fields(s ...googleapi.Field) *RegionDisksResizeCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -83197,35 +98865,35 @@ func (c *NodeTemplatesInsertCall) Fields(s ...googleapi.Field) *NodeTemplatesIns // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeTemplatesInsertCall) Context(ctx context.Context) *NodeTemplatesInsertCall { +func (c *RegionDisksResizeCall) Context(ctx context.Context) *RegionDisksResizeCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeTemplatesInsertCall) Header() http.Header { +func (c *RegionDisksResizeCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeTemplatesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDisksResizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.nodetemplate) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regiondisksresizerequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{disk}/resize") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -83235,18 +98903,19 @@ func (c *NodeTemplatesInsertCall) doRequest(alt string) (*http.Response, error) googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, + "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeTemplates.insert" call. +// Do executes the "compute.regionDisks.resize" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NodeTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionDisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -83277,23 +98946,31 @@ func (c *NodeTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Creates a NodeTemplate resource in the specified project using the data included in the request.", + // "description": "Resizes the specified regional persistent disk.", // "httpMethod": "POST", - // "id": "compute.nodeTemplates.insert", + // "id": "compute.regionDisks.resize", // "parameterOrder": [ // "project", - // "region" + // "region", + // "disk" // ], // "parameters": { + // "disk": { + // "description": "Name of the regional persistent disk.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { - // "description": "Project ID for this request.", + // "description": "The project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, // "region": { - // "description": "The name of the region for this request.", + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -83305,9 +98982,9 @@ func (c *NodeTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/nodeTemplates", + // "path": "{project}/regions/{region}/disks/{disk}/resize", // "request": { - // "$ref": "NodeTemplate" + // "$ref": "RegionDisksResizeRequest" // }, // "response": { // "$ref": "Operation" @@ -83320,159 +98997,92 @@ func (c *NodeTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.nodeTemplates.list": +// method id "compute.regionDisks.setIamPolicy": -type NodeTemplatesListCall struct { - s *Service - project string - region string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionDisksSetIamPolicyCall struct { + s *Service + project string + region string + resource string + regionsetpolicyrequest *RegionSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves a list of node templates available to the specified -// project. -func (r *NodeTemplatesService) List(project string, region string) *NodeTemplatesListCall { - c := &NodeTemplatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +func (r *RegionDisksService) SetIamPolicy(project string, region string, resource string, regionsetpolicyrequest *RegionSetPolicyRequest) *RegionDisksSetIamPolicyCall { + c := &RegionDisksSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *NodeTemplatesListCall) Filter(filter string) *NodeTemplatesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *NodeTemplatesListCall) MaxResults(maxResults int64) *NodeTemplatesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *NodeTemplatesListCall) OrderBy(orderBy string) *NodeTemplatesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *NodeTemplatesListCall) PageToken(pageToken string) *NodeTemplatesListCall { - c.urlParams_.Set("pageToken", pageToken) + c.resource = resource + c.regionsetpolicyrequest = regionsetpolicyrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeTemplatesListCall) Fields(s ...googleapi.Field) *NodeTemplatesListCall { +func (c *RegionDisksSetIamPolicyCall) Fields(s ...googleapi.Field) *RegionDisksSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *NodeTemplatesListCall) IfNoneMatch(entityTag string) *NodeTemplatesListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeTemplatesListCall) Context(ctx context.Context) *NodeTemplatesListCall { +func (c *RegionDisksSetIamPolicyCall) Context(ctx context.Context) *RegionDisksSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeTemplatesListCall) Header() http.Header { +func (c *RegionDisksSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeTemplatesListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDisksSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetpolicyrequest) + if err != nil { + return nil, err } - var body io.Reader = nil + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeTemplates.list" call. -// Exactly one of *NodeTemplateList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *NodeTemplateList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *NodeTemplatesListCall) Do(opts ...googleapi.CallOption) (*NodeTemplateList, error) { +// Do executes the "compute.regionDisks.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *RegionDisksSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -83491,7 +99101,7 @@ func (c *NodeTemplatesListCall) Do(opts ...googleapi.CallOption) (*NodeTemplateL if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &NodeTemplateList{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -83503,37 +99113,15 @@ func (c *NodeTemplatesListCall) Do(opts ...googleapi.CallOption) (*NodeTemplateL } return ret, nil // { - // "description": "Retrieves a list of node templates available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.nodeTemplates.list", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "httpMethod": "POST", + // "id": "compute.regionDisks.setIamPolicy", // "parameterOrder": [ // "project", - // "region" + // "region", + // "resource" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -83547,70 +99135,76 @@ func (c *NodeTemplatesListCall) Do(opts ...googleapi.CallOption) (*NodeTemplateL // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/nodeTemplates", + // "path": "{project}/regions/{region}/disks/{resource}/setIamPolicy", + // "request": { + // "$ref": "RegionSetPolicyRequest" + // }, // "response": { - // "$ref": "NodeTemplateList" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *NodeTemplatesListCall) Pages(ctx context.Context, f func(*NodeTemplateList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.nodeTemplates.setIamPolicy": +// method id "compute.regionDisks.setLabels": -type NodeTemplatesSetIamPolicyCall struct { +type RegionDisksSetLabelsCall struct { s *Service project string region string resource string - regionsetpolicyrequest *RegionSetPolicyRequest + regionsetlabelsrequest *RegionSetLabelsRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. -func (r *NodeTemplatesService) SetIamPolicy(project string, region string, resource string, regionsetpolicyrequest *RegionSetPolicyRequest) *NodeTemplatesSetIamPolicyCall { - c := &NodeTemplatesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetLabels: Sets the labels on the target regional disk. +func (r *RegionDisksService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *RegionDisksSetLabelsCall { + c := &RegionDisksSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region c.resource = resource - c.regionsetpolicyrequest = regionsetpolicyrequest + c.regionsetlabelsrequest = regionsetlabelsrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionDisksSetLabelsCall) RequestId(requestId string) *RegionDisksSetLabelsCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeTemplatesSetIamPolicyCall) Fields(s ...googleapi.Field) *NodeTemplatesSetIamPolicyCall { +func (c *RegionDisksSetLabelsCall) Fields(s ...googleapi.Field) *RegionDisksSetLabelsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -83618,35 +99212,35 @@ func (c *NodeTemplatesSetIamPolicyCall) Fields(s ...googleapi.Field) *NodeTempla // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeTemplatesSetIamPolicyCall) Context(ctx context.Context) *NodeTemplatesSetIamPolicyCall { +func (c *RegionDisksSetLabelsCall) Context(ctx context.Context) *RegionDisksSetLabelsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeTemplatesSetIamPolicyCall) Header() http.Header { +func (c *RegionDisksSetLabelsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeTemplatesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDisksSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetpolicyrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates/{resource}/setIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -83661,14 +99255,14 @@ func (c *NodeTemplatesSetIamPolicyCall) doRequest(alt string) (*http.Response, e return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeTemplates.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *NodeTemplatesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.regionDisks.setLabels" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionDisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -83687,7 +99281,7 @@ func (c *NodeTemplatesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -83699,9 +99293,9 @@ func (c *NodeTemplatesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "description": "Sets the labels on the target regional disk.", // "httpMethod": "POST", - // "id": "compute.nodeTemplates.setIamPolicy", + // "id": "compute.regionDisks.setLabels", // "parameterOrder": [ // "project", // "region", @@ -83716,12 +99310,17 @@ func (c *NodeTemplatesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic // "type": "string" // }, // "region": { - // "description": "The name of the region for this request.", + // "description": "The region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "resource": { // "description": "Name or id of the resource for this request.", // "location": "path", @@ -83730,12 +99329,12 @@ func (c *NodeTemplatesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic // "type": "string" // } // }, - // "path": "{project}/regions/{region}/nodeTemplates/{resource}/setIamPolicy", + // "path": "{project}/regions/{region}/disks/{resource}/setLabels", // "request": { - // "$ref": "RegionSetPolicyRequest" + // "$ref": "RegionSetLabelsRequest" // }, // "response": { - // "$ref": "Policy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -83745,9 +99344,9 @@ func (c *NodeTemplatesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic } -// method id "compute.nodeTemplates.testIamPermissions": +// method id "compute.regionDisks.testIamPermissions": -type NodeTemplatesTestIamPermissionsCall struct { +type RegionDisksTestIamPermissionsCall struct { s *Service project string region string @@ -83760,8 +99359,8 @@ type NodeTemplatesTestIamPermissionsCall struct { // TestIamPermissions: Returns permissions that a caller has on the // specified resource. -func (r *NodeTemplatesService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *NodeTemplatesTestIamPermissionsCall { - c := &NodeTemplatesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *RegionDisksService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionDisksTestIamPermissionsCall { + c := &RegionDisksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region c.resource = resource @@ -83772,7 +99371,7 @@ func (r *NodeTemplatesService) TestIamPermissions(project string, region string, // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeTemplatesTestIamPermissionsCall) Fields(s ...googleapi.Field) *NodeTemplatesTestIamPermissionsCall { +func (c *RegionDisksTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionDisksTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -83780,21 +99379,21 @@ func (c *NodeTemplatesTestIamPermissionsCall) Fields(s ...googleapi.Field) *Node // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeTemplatesTestIamPermissionsCall) Context(ctx context.Context) *NodeTemplatesTestIamPermissionsCall { +func (c *RegionDisksTestIamPermissionsCall) Context(ctx context.Context) *RegionDisksTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeTemplatesTestIamPermissionsCall) Header() http.Header { +func (c *RegionDisksTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeTemplatesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDisksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -83808,7 +99407,7 @@ func (c *NodeTemplatesTestIamPermissionsCall) doRequest(alt string) (*http.Respo reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/nodeTemplates/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -83823,14 +99422,14 @@ func (c *NodeTemplatesTestIamPermissionsCall) doRequest(alt string) (*http.Respo return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeTemplates.testIamPermissions" call. +// Do executes the "compute.regionDisks.testIamPermissions" call. // Exactly one of *TestPermissionsResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *TestPermissionsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *NodeTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *RegionDisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -83863,7 +99462,7 @@ func (c *NodeTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) ( // { // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.nodeTemplates.testIamPermissions", + // "id": "compute.regionDisks.testIamPermissions", // "parameterOrder": [ // "project", // "region", @@ -83892,7 +99491,7 @@ func (c *NodeTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) ( // "type": "string" // } // }, - // "path": "{project}/regions/{region}/nodeTemplates/{resource}/testIamPermissions", + // "path": "{project}/regions/{region}/disks/{resource}/testIamPermissions", // "request": { // "$ref": "TestPermissionsRequest" // }, @@ -83908,91 +99507,208 @@ func (c *NodeTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) ( } -// method id "compute.nodeTypes.aggregatedList": +// method id "compute.regionHealthChecks.delete": -type NodeTypesAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionHealthChecksDeleteCall struct { + s *Service + project string + region string + healthCheck string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AggregatedList: Retrieves an aggregated list of node types. -func (r *NodeTypesService) AggregatedList(project string) *NodeTypesAggregatedListCall { - c := &NodeTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified HealthCheck resource. +func (r *RegionHealthChecksService) Delete(project string, region string, healthCheck string) *RegionHealthChecksDeleteCall { + c := &RegionHealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.region = region + c.healthCheck = healthCheck return c } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *NodeTypesAggregatedListCall) Filter(filter string) *NodeTypesAggregatedListCall { - c.urlParams_.Set("filter", filter) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionHealthChecksDeleteCall) RequestId(requestId string) *RegionHealthChecksDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *NodeTypesAggregatedListCall) MaxResults(maxResults int64) *NodeTypesAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionHealthChecksDeleteCall) Fields(s ...googleapi.Field) *RegionHealthChecksDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *NodeTypesAggregatedListCall) OrderBy(orderBy string) *NodeTypesAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionHealthChecksDeleteCall) Context(ctx context.Context) *RegionHealthChecksDeleteCall { + c.ctx_ = ctx return c } -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *NodeTypesAggregatedListCall) PageToken(pageToken string) *NodeTypesAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionHealthChecksDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthChecks/{healthCheck}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "healthCheck": c.healthCheck, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionHealthChecks.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified HealthCheck resource.", + // "httpMethod": "DELETE", + // "id": "compute.regionHealthChecks.delete", + // "parameterOrder": [ + // "project", + // "region", + // "healthCheck" + // ], + // "parameters": { + // "healthCheck": { + // "description": "Name of the HealthCheck resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/healthChecks/{healthCheck}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionHealthChecks.get": + +type RegionHealthChecksGetCall struct { + s *Service + project string + region string + healthCheck string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified HealthCheck resource. Gets a list of +// available health checks by making a list() request. +func (r *RegionHealthChecksService) Get(project string, region string, healthCheck string) *RegionHealthChecksGetCall { + c := &RegionHealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.healthCheck = healthCheck return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeTypesAggregatedListCall) Fields(s ...googleapi.Field) *NodeTypesAggregatedListCall { +func (c *RegionHealthChecksGetCall) Fields(s ...googleapi.Field) *RegionHealthChecksGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -84002,7 +99718,7 @@ func (c *NodeTypesAggregatedListCall) Fields(s ...googleapi.Field) *NodeTypesAgg // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *NodeTypesAggregatedListCall) IfNoneMatch(entityTag string) *NodeTypesAggregatedListCall { +func (c *RegionHealthChecksGetCall) IfNoneMatch(entityTag string) *RegionHealthChecksGetCall { c.ifNoneMatch_ = entityTag return c } @@ -84010,21 +99726,21 @@ func (c *NodeTypesAggregatedListCall) IfNoneMatch(entityTag string) *NodeTypesAg // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeTypesAggregatedListCall) Context(ctx context.Context) *NodeTypesAggregatedListCall { +func (c *RegionHealthChecksGetCall) Context(ctx context.Context) *RegionHealthChecksGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeTypesAggregatedListCall) Header() http.Header { +func (c *RegionHealthChecksGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -84036,7 +99752,7 @@ func (c *NodeTypesAggregatedListCall) doRequest(alt string) (*http.Response, err var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/nodeTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -84044,19 +99760,21 @@ func (c *NodeTypesAggregatedListCall) doRequest(alt string) (*http.Response, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "region": c.region, + "healthCheck": c.healthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeTypes.aggregatedList" call. -// Exactly one of *NodeTypeAggregatedList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *NodeTypeAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *NodeTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeTypeAggregatedList, error) { +// Do executes the "compute.regionHealthChecks.get" call. +// Exactly one of *HealthCheck or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *HealthCheck.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -84075,7 +99793,7 @@ func (c *NodeTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeTyp if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &NodeTypeAggregatedList{ + ret := &HealthCheck{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -84087,34 +99805,20 @@ func (c *NodeTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeTyp } return ret, nil // { - // "description": "Retrieves an aggregated list of node types.", + // "description": "Returns the specified HealthCheck resource. Gets a list of available health checks by making a list() request.", // "httpMethod": "GET", - // "id": "compute.nodeTypes.aggregatedList", + // "id": "compute.regionHealthChecks.get", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "healthCheck" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "healthCheck": { + // "description": "Name of the HealthCheck resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "project": { @@ -84123,135 +99827,130 @@ func (c *NodeTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeTyp // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/aggregated/nodeTypes", + // "path": "{project}/regions/{region}/healthChecks/{healthCheck}", // "response": { - // "$ref": "NodeTypeAggregatedList" + // "$ref": "HealthCheck" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *NodeTypesAggregatedListCall) Pages(ctx context.Context, f func(*NodeTypeAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + } -// method id "compute.nodeTypes.get": +// method id "compute.regionHealthChecks.insert": -type NodeTypesGetCall struct { - s *Service - project string - zone string - nodeType string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionHealthChecksInsertCall struct { + s *Service + project string + region string + healthcheck *HealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified node type. Gets a list of available node -// types by making a list() request. -func (r *NodeTypesService) Get(project string, zone string, nodeType string) *NodeTypesGetCall { - c := &NodeTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a HealthCheck resource in the specified project using +// the data included in the request. +func (r *RegionHealthChecksService) Insert(project string, region string, healthcheck *HealthCheck) *RegionHealthChecksInsertCall { + c := &RegionHealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.nodeType = nodeType + c.region = region + c.healthcheck = healthcheck + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionHealthChecksInsertCall) RequestId(requestId string) *RegionHealthChecksInsertCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeTypesGetCall) Fields(s ...googleapi.Field) *NodeTypesGetCall { +func (c *RegionHealthChecksInsertCall) Fields(s ...googleapi.Field) *RegionHealthChecksInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *NodeTypesGetCall) IfNoneMatch(entityTag string) *NodeTypesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeTypesGetCall) Context(ctx context.Context) *NodeTypesGetCall { +func (c *RegionHealthChecksInsertCall) Context(ctx context.Context) *RegionHealthChecksInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeTypesGetCall) Header() http.Header { +func (c *RegionHealthChecksInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeTypesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeTypes/{nodeType}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthChecks") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "nodeType": c.nodeType, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeTypes.get" call. -// Exactly one of *NodeType or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *NodeType.ServerResponse.Header or (if a response was returned at +// Do executes the "compute.regionHealthChecks.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NodeTypesGetCall) Do(opts ...googleapi.CallOption) (*NodeType, error) { +func (c *RegionHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -84270,7 +99969,7 @@ func (c *NodeTypesGetCall) Do(opts ...googleapi.CallOption) (*NodeType, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &NodeType{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -84282,22 +99981,14 @@ func (c *NodeTypesGetCall) Do(opts ...googleapi.CallOption) (*NodeType, error) { } return ret, nil // { - // "description": "Returns the specified node type. Gets a list of available node types by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.nodeTypes.get", + // "description": "Creates a HealthCheck resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.regionHealthChecks.insert", // "parameterOrder": [ // "project", - // "zone", - // "nodeType" + // "region" // ], // "parameters": { - // "nodeType": { - // "description": "Name of the node type to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -84305,45 +99996,52 @@ func (c *NodeTypesGetCall) Do(opts ...googleapi.CallOption) (*NodeType, error) { // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/nodeTypes/{nodeType}", + // "path": "{project}/regions/{region}/healthChecks", + // "request": { + // "$ref": "HealthCheck" + // }, // "response": { - // "$ref": "NodeType" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.nodeTypes.list": +// method id "compute.regionHealthChecks.list": -type NodeTypesListCall struct { +type RegionHealthChecksListCall struct { s *Service project string - zone string + region string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves a list of node types available to the specified -// project. -func (r *NodeTypesService) List(project string, zone string) *NodeTypesListCall { - c := &NodeTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of HealthCheck resources available to the +// specified project. +func (r *RegionHealthChecksService) List(project string, region string) *RegionHealthChecksListCall { + c := &RegionHealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone + c.region = region return c } @@ -84369,7 +100067,7 @@ func (r *NodeTypesService) List(project string, zone string) *NodeTypesListCall // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *NodeTypesListCall) Filter(filter string) *NodeTypesListCall { +func (c *RegionHealthChecksListCall) Filter(filter string) *RegionHealthChecksListCall { c.urlParams_.Set("filter", filter) return c } @@ -84380,7 +100078,7 @@ func (c *NodeTypesListCall) Filter(filter string) *NodeTypesListCall { // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *NodeTypesListCall) MaxResults(maxResults int64) *NodeTypesListCall { +func (c *RegionHealthChecksListCall) MaxResults(maxResults int64) *RegionHealthChecksListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -84397,7 +100095,7 @@ func (c *NodeTypesListCall) MaxResults(maxResults int64) *NodeTypesListCall { // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *NodeTypesListCall) OrderBy(orderBy string) *NodeTypesListCall { +func (c *RegionHealthChecksListCall) OrderBy(orderBy string) *RegionHealthChecksListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -84405,7 +100103,7 @@ func (c *NodeTypesListCall) OrderBy(orderBy string) *NodeTypesListCall { // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *NodeTypesListCall) PageToken(pageToken string) *NodeTypesListCall { +func (c *RegionHealthChecksListCall) PageToken(pageToken string) *RegionHealthChecksListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -84413,7 +100111,7 @@ func (c *NodeTypesListCall) PageToken(pageToken string) *NodeTypesListCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NodeTypesListCall) Fields(s ...googleapi.Field) *NodeTypesListCall { +func (c *RegionHealthChecksListCall) Fields(s ...googleapi.Field) *RegionHealthChecksListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -84423,7 +100121,7 @@ func (c *NodeTypesListCall) Fields(s ...googleapi.Field) *NodeTypesListCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *NodeTypesListCall) IfNoneMatch(entityTag string) *NodeTypesListCall { +func (c *RegionHealthChecksListCall) IfNoneMatch(entityTag string) *RegionHealthChecksListCall { c.ifNoneMatch_ = entityTag return c } @@ -84431,21 +100129,21 @@ func (c *NodeTypesListCall) IfNoneMatch(entityTag string) *NodeTypesListCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NodeTypesListCall) Context(ctx context.Context) *NodeTypesListCall { +func (c *RegionHealthChecksListCall) Context(ctx context.Context) *RegionHealthChecksListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NodeTypesListCall) Header() http.Header { +func (c *RegionHealthChecksListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NodeTypesListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionHealthChecksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -84457,7 +100155,7 @@ func (c *NodeTypesListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthChecks") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -84466,19 +100164,19 @@ func (c *NodeTypesListCall) doRequest(alt string) (*http.Response, error) { req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.nodeTypes.list" call. -// Exactly one of *NodeTypeList or error will be non-nil. Any non-2xx +// Do executes the "compute.regionHealthChecks.list" call. +// Exactly one of *HealthCheckList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *NodeTypeList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *NodeTypesListCall) Do(opts ...googleapi.CallOption) (*NodeTypeList, error) { +// *HealthCheckList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -84497,7 +100195,7 @@ func (c *NodeTypesListCall) Do(opts ...googleapi.CallOption) (*NodeTypeList, err if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &NodeTypeList{ + ret := &HealthCheckList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -84509,12 +100207,12 @@ func (c *NodeTypesListCall) Do(opts ...googleapi.CallOption) (*NodeTypeList, err } return ret, nil // { - // "description": "Retrieves a list of node types available to the specified project.", + // "description": "Retrieves the list of HealthCheck resources available to the specified project.", // "httpMethod": "GET", - // "id": "compute.nodeTypes.list", + // "id": "compute.regionHealthChecks.list", // "parameterOrder": [ // "project", - // "zone" + // "region" // ], // "parameters": { // "filter": { @@ -84547,17 +100245,17 @@ func (c *NodeTypesListCall) Do(opts ...googleapi.CallOption) (*NodeTypeList, err // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/nodeTypes", + // "path": "{project}/regions/{region}/healthChecks", // "response": { - // "$ref": "NodeTypeList" + // "$ref": "HealthCheckList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -84571,7 +100269,7 @@ func (c *NodeTypesListCall) Do(opts ...googleapi.CallOption) (*NodeTypeList, err // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *NodeTypesListCall) Pages(ctx context.Context, f func(*NodeTypeList) error) error { +func (c *RegionHealthChecksListCall) Pages(ctx context.Context, f func(*HealthCheckList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -84589,20 +100287,28 @@ func (c *NodeTypesListCall) Pages(ctx context.Context, f func(*NodeTypeList) err } } -// method id "compute.projects.disableXpnHost": +// method id "compute.regionHealthChecks.patch": -type ProjectsDisableXpnHostCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionHealthChecksPatchCall struct { + s *Service + project string + region string + healthCheck string + healthcheck *HealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// DisableXpnHost: Disable this project as a shared VPC host project. -func (r *ProjectsService) DisableXpnHost(project string) *ProjectsDisableXpnHostCall { - c := &ProjectsDisableXpnHostCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates a HealthCheck resource in the specified project using +// the data included in the request. This method supports PATCH +// semantics and uses the JSON merge patch format and processing rules. +func (r *RegionHealthChecksService) Patch(project string, region string, healthCheck string, healthcheck *HealthCheck) *RegionHealthChecksPatchCall { + c := &RegionHealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.region = region + c.healthCheck = healthCheck + c.healthcheck = healthcheck return c } @@ -84620,7 +100326,7 @@ func (r *ProjectsService) DisableXpnHost(project string) *ProjectsDisableXpnHost // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ProjectsDisableXpnHostCall) RequestId(requestId string) *ProjectsDisableXpnHostCall { +func (c *RegionHealthChecksPatchCall) RequestId(requestId string) *RegionHealthChecksPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -84628,7 +100334,7 @@ func (c *ProjectsDisableXpnHostCall) RequestId(requestId string) *ProjectsDisabl // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsDisableXpnHostCall) Fields(s ...googleapi.Field) *ProjectsDisableXpnHostCall { +func (c *RegionHealthChecksPatchCall) Fields(s ...googleapi.Field) *RegionHealthChecksPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -84636,50 +100342,57 @@ func (c *ProjectsDisableXpnHostCall) Fields(s ...googleapi.Field) *ProjectsDisab // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsDisableXpnHostCall) Context(ctx context.Context) *ProjectsDisableXpnHostCall { +func (c *RegionHealthChecksPatchCall) Context(ctx context.Context) *RegionHealthChecksPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsDisableXpnHostCall) Header() http.Header { +func (c *RegionHealthChecksPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsDisableXpnHostCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/disableXpnHost") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "region": c.region, + "healthCheck": c.healthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.disableXpnHost" call. +// Do executes the "compute.regionHealthChecks.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ProjectsDisableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -84710,13 +100423,22 @@ func (c *ProjectsDisableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Disable this project as a shared VPC host project.", - // "httpMethod": "POST", - // "id": "compute.projects.disableXpnHost", + // "description": "Updates a HealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.regionHealthChecks.patch", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "healthCheck" // ], // "parameters": { + // "healthCheck": { + // "description": "Name of the HealthCheck resource to patch.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -84724,13 +100446,23 @@ func (c *ProjectsDisableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operatio // "required": true, // "type": "string" // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "{project}/disableXpnHost", + // "path": "{project}/regions/{region}/healthChecks/{healthCheck}", + // "request": { + // "$ref": "HealthCheck" + // }, // "response": { // "$ref": "Operation" // }, @@ -84742,23 +100474,27 @@ func (c *ProjectsDisableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operatio } -// method id "compute.projects.disableXpnResource": +// method id "compute.regionHealthChecks.update": -type ProjectsDisableXpnResourceCall struct { - s *Service - project string - projectsdisablexpnresourcerequest *ProjectsDisableXpnResourceRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionHealthChecksUpdateCall struct { + s *Service + project string + region string + healthCheck string + healthcheck *HealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// DisableXpnResource: Disable a serivce resource (a.k.a service -// project) associated with this host project. -func (r *ProjectsService) DisableXpnResource(project string, projectsdisablexpnresourcerequest *ProjectsDisableXpnResourceRequest) *ProjectsDisableXpnResourceCall { - c := &ProjectsDisableXpnResourceCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates a HealthCheck resource in the specified project using +// the data included in the request. +func (r *RegionHealthChecksService) Update(project string, region string, healthCheck string, healthcheck *HealthCheck) *RegionHealthChecksUpdateCall { + c := &RegionHealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.projectsdisablexpnresourcerequest = projectsdisablexpnresourcerequest + c.region = region + c.healthCheck = healthCheck + c.healthcheck = healthcheck return c } @@ -84776,7 +100512,7 @@ func (r *ProjectsService) DisableXpnResource(project string, projectsdisablexpnr // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ProjectsDisableXpnResourceCall) RequestId(requestId string) *ProjectsDisableXpnResourceCall { +func (c *RegionHealthChecksUpdateCall) RequestId(requestId string) *RegionHealthChecksUpdateCall { c.urlParams_.Set("requestId", requestId) return c } @@ -84784,7 +100520,7 @@ func (c *ProjectsDisableXpnResourceCall) RequestId(requestId string) *ProjectsDi // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsDisableXpnResourceCall) Fields(s ...googleapi.Field) *ProjectsDisableXpnResourceCall { +func (c *RegionHealthChecksUpdateCall) Fields(s ...googleapi.Field) *RegionHealthChecksUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -84792,55 +100528,57 @@ func (c *ProjectsDisableXpnResourceCall) Fields(s ...googleapi.Field) *ProjectsD // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsDisableXpnResourceCall) Context(ctx context.Context) *ProjectsDisableXpnResourceCall { +func (c *RegionHealthChecksUpdateCall) Context(ctx context.Context) *RegionHealthChecksUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsDisableXpnResourceCall) Header() http.Header { +func (c *RegionHealthChecksUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsDisableXpnResourceCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectsdisablexpnresourcerequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/disableXpnResource") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "region": c.region, + "healthCheck": c.healthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.disableXpnResource" call. +// Do executes the "compute.regionHealthChecks.update" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ProjectsDisableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -84871,13 +100609,22 @@ func (c *ProjectsDisableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Disable a serivce resource (a.k.a service project) associated with this host project.", - // "httpMethod": "POST", - // "id": "compute.projects.disableXpnResource", + // "description": "Updates a HealthCheck resource in the specified project using the data included in the request.", + // "httpMethod": "PUT", + // "id": "compute.regionHealthChecks.update", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "healthCheck" // ], // "parameters": { + // "healthCheck": { + // "description": "Name of the HealthCheck resource to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -84885,15 +100632,22 @@ func (c *ProjectsDisableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Oper // "required": true, // "type": "string" // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "{project}/disableXpnResource", + // "path": "{project}/regions/{region}/healthChecks/{healthCheck}", // "request": { - // "$ref": "ProjectsDisableXpnResourceRequest" + // "$ref": "HealthCheck" // }, // "response": { // "$ref": "Operation" @@ -84906,46 +100660,233 @@ func (c *ProjectsDisableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Oper } -// method id "compute.projects.enableXpnHost": +// method id "compute.regionInstanceGroupManagers.abandonInstances": + +type RegionInstanceGroupManagersAbandonInstancesCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagersabandoninstancesrequest *RegionInstanceGroupManagersAbandonInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// AbandonInstances: Flags the specified instances to be immediately +// removed from the managed instance group. Abandoning an instance does +// not delete the instance, but it does remove the instance from any +// target pools that are applied by the managed instance group. This +// method reduces the targetSize of the managed instance group by the +// number of instances that you abandon. This operation is marked as +// DONE when the action is scheduled even if the instances have not yet +// been removed from the group. You must separately verify the status of +// the abandoning action with the listmanagedinstances method. +// +// If the group is part of a backend service that has enabled connection +// draining, it can take up to 60 seconds after the connection draining +// duration has elapsed before the VM instance is removed or +// deleted. +// +// You can specify a maximum of 1000 instances with this method per +// request. +func (r *RegionInstanceGroupManagersService) AbandonInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersabandoninstancesrequest *RegionInstanceGroupManagersAbandonInstancesRequest) *RegionInstanceGroupManagersAbandonInstancesCall { + c := &RegionInstanceGroupManagersAbandonInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.instanceGroupManager = instanceGroupManager + c.regioninstancegroupmanagersabandoninstancesrequest = regioninstancegroupmanagersabandoninstancesrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionInstanceGroupManagersAbandonInstancesCall) RequestId(requestId string) *RegionInstanceGroupManagersAbandonInstancesCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionInstanceGroupManagersAbandonInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersAbandonInstancesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionInstanceGroupManagersAbandonInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersAbandonInstancesCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionInstanceGroupManagersAbandonInstancesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionInstanceGroupManagersAbandonInstancesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersabandoninstancesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/abandonInstances") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionInstanceGroupManagers.abandonInstances" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionInstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Flags the specified instances to be immediately removed from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroupManagers.abandonInstances", + // "parameterOrder": [ + // "project", + // "region", + // "instanceGroupManager" + // ], + // "parameters": { + // "instanceGroupManager": { + // "description": "Name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/abandonInstances", + // "request": { + // "$ref": "RegionInstanceGroupManagersAbandonInstancesRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } -type ProjectsEnableXpnHostCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header } -// EnableXpnHost: Enable this project as a shared VPC host project. -func (r *ProjectsService) EnableXpnHost(project string) *ProjectsEnableXpnHostCall { - c := &ProjectsEnableXpnHostCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c +// method id "compute.regionInstanceGroupManagers.applyUpdatesToInstances": + +type RegionInstanceGroupManagersApplyUpdatesToInstancesCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagersapplyupdatesrequest *RegionInstanceGroupManagersApplyUpdatesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *ProjectsEnableXpnHostCall) RequestId(requestId string) *ProjectsEnableXpnHostCall { - c.urlParams_.Set("requestId", requestId) +// ApplyUpdatesToInstances: Apply updates to selected instances the +// managed instance group. +func (r *RegionInstanceGroupManagersService) ApplyUpdatesToInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersapplyupdatesrequest *RegionInstanceGroupManagersApplyUpdatesRequest) *RegionInstanceGroupManagersApplyUpdatesToInstancesCall { + c := &RegionInstanceGroupManagersApplyUpdatesToInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.instanceGroupManager = instanceGroupManager + c.regioninstancegroupmanagersapplyupdatesrequest = regioninstancegroupmanagersapplyupdatesrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsEnableXpnHostCall) Fields(s ...googleapi.Field) *ProjectsEnableXpnHostCall { +func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersApplyUpdatesToInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -84953,30 +100894,35 @@ func (c *ProjectsEnableXpnHostCall) Fields(s ...googleapi.Field) *ProjectsEnable // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsEnableXpnHostCall) Context(ctx context.Context) *ProjectsEnableXpnHostCall { +func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersApplyUpdatesToInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsEnableXpnHostCall) Header() http.Header { +func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsEnableXpnHostCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersapplyupdatesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/enableXpnHost") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/applyUpdatesToInstances") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -84984,19 +100930,21 @@ func (c *ProjectsEnableXpnHostCall) doRequest(alt string) (*http.Response, error } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.enableXpnHost" call. +// Do executes the "compute.regionInstanceGroupManagers.applyUpdatesToInstances" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ProjectsEnableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -85027,13 +100975,21 @@ func (c *ProjectsEnableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Enable this project as a shared VPC host project.", + // "description": "Apply updates to selected instances the managed instance group.", // "httpMethod": "POST", - // "id": "compute.projects.enableXpnHost", + // "id": "compute.regionInstanceGroupManagers.applyUpdatesToInstances", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "instanceGroupManager" // ], // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group, should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -85041,13 +100997,17 @@ func (c *ProjectsEnableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operation // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "region": { + // "description": "Name of the region scoping this request, should conform to RFC1035.", + // "location": "path", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/enableXpnHost", + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/applyUpdatesToInstances", + // "request": { + // "$ref": "RegionInstanceGroupManagersApplyUpdatesRequest" + // }, // "response": { // "$ref": "Operation" // }, @@ -85059,24 +101019,25 @@ func (c *ProjectsEnableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.projects.enableXpnResource": +// method id "compute.regionInstanceGroupManagers.delete": -type ProjectsEnableXpnResourceCall struct { - s *Service - project string - projectsenablexpnresourcerequest *ProjectsEnableXpnResourceRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersDeleteCall struct { + s *Service + project string + region string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// EnableXpnResource: Enable service resource (a.k.a service project) -// for a host project, so that subnets in the host project can be used -// by instances in the service project. -func (r *ProjectsService) EnableXpnResource(project string, projectsenablexpnresourcerequest *ProjectsEnableXpnResourceRequest) *ProjectsEnableXpnResourceCall { - c := &ProjectsEnableXpnResourceCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified managed instance group and all of the +// instances in that group. +func (r *RegionInstanceGroupManagersService) Delete(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersDeleteCall { + c := &RegionInstanceGroupManagersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.projectsenablexpnresourcerequest = projectsenablexpnresourcerequest + c.region = region + c.instanceGroupManager = instanceGroupManager return c } @@ -85094,7 +101055,7 @@ func (r *ProjectsService) EnableXpnResource(project string, projectsenablexpnres // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ProjectsEnableXpnResourceCall) RequestId(requestId string) *ProjectsEnableXpnResourceCall { +func (c *RegionInstanceGroupManagersDeleteCall) RequestId(requestId string) *RegionInstanceGroupManagersDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -85102,7 +101063,7 @@ func (c *ProjectsEnableXpnResourceCall) RequestId(requestId string) *ProjectsEna // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsEnableXpnResourceCall) Fields(s ...googleapi.Field) *ProjectsEnableXpnResourceCall { +func (c *RegionInstanceGroupManagersDeleteCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -85110,55 +101071,52 @@ func (c *ProjectsEnableXpnResourceCall) Fields(s ...googleapi.Field) *ProjectsEn // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsEnableXpnResourceCall) Context(ctx context.Context) *ProjectsEnableXpnResourceCall { +func (c *RegionInstanceGroupManagersDeleteCall) Context(ctx context.Context) *RegionInstanceGroupManagersDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsEnableXpnResourceCall) Header() http.Header { +func (c *RegionInstanceGroupManagersDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsEnableXpnResourceCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectsenablexpnresourcerequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/enableXpnResource") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.enableXpnResource" call. +// Do executes the "compute.regionInstanceGroupManagers.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ProjectsEnableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -85189,13 +101147,21 @@ func (c *ProjectsEnableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Opera } return ret, nil // { - // "description": "Enable service resource (a.k.a service project) for a host project, so that subnets in the host project can be used by instances in the service project.", - // "httpMethod": "POST", - // "id": "compute.projects.enableXpnResource", + // "description": "Deletes the specified managed instance group and all of the instances in that group.", + // "httpMethod": "DELETE", + // "id": "compute.regionInstanceGroupManagers.delete", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "instanceGroupManager" // ], // "parameters": { + // "instanceGroupManager": { + // "description": "Name of the managed instance group to delete.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -85203,16 +101169,19 @@ func (c *ProjectsEnableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Opera // "required": true, // "type": "string" // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "{project}/enableXpnResource", - // "request": { - // "$ref": "ProjectsEnableXpnResourceRequest" - // }, + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", // "response": { // "$ref": "Operation" // }, @@ -85224,93 +101193,126 @@ func (c *ProjectsEnableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Opera } -// method id "compute.projects.get": +// method id "compute.regionInstanceGroupManagers.deleteInstances": -type ProjectsGetCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersDeleteInstancesCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagersdeleteinstancesrequest *RegionInstanceGroupManagersDeleteInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified Project resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/projects/get -func (r *ProjectsService) Get(project string) *ProjectsGetCall { - c := &ProjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// DeleteInstances: Flags the specified instances in the managed +// instance group to be immediately deleted. The instances are also +// removed from any target pools of which they were a member. This +// method reduces the targetSize of the managed instance group by the +// number of instances that you delete. The deleteInstances operation is +// marked DONE if the deleteInstances request is successful. The +// underlying actions take additional time. You must separately verify +// the status of the deleting action with the listmanagedinstances +// method. +// +// If the group is part of a backend service that has enabled connection +// draining, it can take up to 60 seconds after the connection draining +// duration has elapsed before the VM instance is removed or +// deleted. +// +// You can specify a maximum of 1000 instances with this method per +// request. +func (r *RegionInstanceGroupManagersService) DeleteInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersdeleteinstancesrequest *RegionInstanceGroupManagersDeleteInstancesRequest) *RegionInstanceGroupManagersDeleteInstancesCall { + c := &RegionInstanceGroupManagersDeleteInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.region = region + c.instanceGroupManager = instanceGroupManager + c.regioninstancegroupmanagersdeleteinstancesrequest = regioninstancegroupmanagersdeleteinstancesrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionInstanceGroupManagersDeleteInstancesCall) RequestId(requestId string) *RegionInstanceGroupManagersDeleteInstancesCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsGetCall) Fields(s ...googleapi.Field) *ProjectsGetCall { +func (c *RegionInstanceGroupManagersDeleteInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersDeleteInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsGetCall) IfNoneMatch(entityTag string) *ProjectsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsGetCall) Context(ctx context.Context) *ProjectsGetCall { +func (c *RegionInstanceGroupManagersDeleteInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersDeleteInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsGetCall) Header() http.Header { +func (c *RegionInstanceGroupManagersDeleteInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersDeleteInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersdeleteinstancesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deleteInstances") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.get" call. -// Exactly one of *Project or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Project.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { +// Do executes the "compute.regionInstanceGroupManagers.deleteInstances" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionInstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -85329,7 +101331,7 @@ func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Project{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -85341,57 +101343,82 @@ func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { } return ret, nil // { - // "description": "Returns the specified Project resource.", - // "httpMethod": "GET", - // "id": "compute.projects.get", + // "description": "Flags the specified instances in the managed instance group to be immediately deleted. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. The deleteInstances operation is marked DONE if the deleteInstances request is successful. The underlying actions take additional time. You must separately verify the status of the deleting action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroupManagers.deleteInstances", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "instanceGroupManager" // ], // "parameters": { + // "instanceGroupManager": { + // "description": "Name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}", + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deleteInstances", + // "request": { + // "$ref": "RegionInstanceGroupManagersDeleteInstancesRequest" + // }, // "response": { - // "$ref": "Project" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.projects.getXpnHost": +// method id "compute.regionInstanceGroupManagers.get": -type ProjectsGetXpnHostCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersGetCall struct { + s *Service + project string + region string + instanceGroupManager string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// GetXpnHost: Gets the shared VPC host project that this project links -// to. May be empty if no link exists. -func (r *ProjectsService) GetXpnHost(project string) *ProjectsGetXpnHostCall { - c := &ProjectsGetXpnHostCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns all of the details about the specified managed instance +// group. +func (r *RegionInstanceGroupManagersService) Get(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersGetCall { + c := &RegionInstanceGroupManagersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.region = region + c.instanceGroupManager = instanceGroupManager return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsGetXpnHostCall) Fields(s ...googleapi.Field) *ProjectsGetXpnHostCall { +func (c *RegionInstanceGroupManagersGetCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -85401,7 +101428,7 @@ func (c *ProjectsGetXpnHostCall) Fields(s ...googleapi.Field) *ProjectsGetXpnHos // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ProjectsGetXpnHostCall) IfNoneMatch(entityTag string) *ProjectsGetXpnHostCall { +func (c *RegionInstanceGroupManagersGetCall) IfNoneMatch(entityTag string) *RegionInstanceGroupManagersGetCall { c.ifNoneMatch_ = entityTag return c } @@ -85409,21 +101436,21 @@ func (c *ProjectsGetXpnHostCall) IfNoneMatch(entityTag string) *ProjectsGetXpnHo // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsGetXpnHostCall) Context(ctx context.Context) *ProjectsGetXpnHostCall { +func (c *RegionInstanceGroupManagersGetCall) Context(ctx context.Context) *RegionInstanceGroupManagersGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsGetXpnHostCall) Header() http.Header { +func (c *RegionInstanceGroupManagersGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsGetXpnHostCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -85435,7 +101462,7 @@ func (c *ProjectsGetXpnHostCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/getXpnHost") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -85443,19 +101470,21 @@ func (c *ProjectsGetXpnHostCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.getXpnHost" call. -// Exactly one of *Project or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Project.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsGetXpnHostCall) Do(opts ...googleapi.CallOption) (*Project, error) { +// Do executes the "compute.regionInstanceGroupManagers.get" call. +// Exactly one of *InstanceGroupManager or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceGroupManager.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionInstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManager, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -85474,7 +101503,7 @@ func (c *ProjectsGetXpnHostCall) Do(opts ...googleapi.CallOption) (*Project, err if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Project{ + ret := &InstanceGroupManager{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -85486,183 +101515,157 @@ func (c *ProjectsGetXpnHostCall) Do(opts ...googleapi.CallOption) (*Project, err } return ret, nil // { - // "description": "Gets the shared VPC host project that this project links to. May be empty if no link exists.", + // "description": "Returns all of the details about the specified managed instance group.", // "httpMethod": "GET", - // "id": "compute.projects.getXpnHost", + // "id": "compute.regionInstanceGroupManagers.get", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "instanceGroupManager" // ], // "parameters": { + // "instanceGroupManager": { + // "description": "Name of the managed instance group to return.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/getXpnHost", + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", // "response": { - // "$ref": "Project" + // "$ref": "InstanceGroupManager" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.projects.getXpnResources": - -type ProjectsGetXpnResourcesCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} +// method id "compute.regionInstanceGroupManagers.insert": -// GetXpnResources: Gets service resources (a.k.a service project) -// associated with this host project. -func (r *ProjectsService) GetXpnResources(project string) *ProjectsGetXpnResourcesCall { - c := &ProjectsGetXpnResourcesCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c +type RegionInstanceGroupManagersInsertCall struct { + s *Service + project string + region string + instancegroupmanager *InstanceGroupManager + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// Insert: Creates a managed instance group using the information that +// you specify in the request. After the group is created, instances in +// the group are created using the specified instance template. This +// operation is marked as DONE when the group is created even if the +// instances in the group have not yet been created. You must separately +// verify the status of the individual instances with the +// listmanagedinstances method. // -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *ProjectsGetXpnResourcesCall) Filter(filter string) *ProjectsGetXpnResourcesCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *ProjectsGetXpnResourcesCall) MaxResults(maxResults int64) *ProjectsGetXpnResourcesCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) +// A regional managed instance group can contain up to 2000 instances. +func (r *RegionInstanceGroupManagersService) Insert(project string, region string, instancegroupmanager *InstanceGroupManager) *RegionInstanceGroupManagersInsertCall { + c := &RegionInstanceGroupManagersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.instancegroupmanager = instancegroupmanager return c } -// OrderBy sets the optional parameter "order_by": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *ProjectsGetXpnResourcesCall) OrderBy(orderBy string) *ProjectsGetXpnResourcesCall { - c.urlParams_.Set("order_by", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *ProjectsGetXpnResourcesCall) PageToken(pageToken string) *ProjectsGetXpnResourcesCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionInstanceGroupManagersInsertCall) RequestId(requestId string) *RegionInstanceGroupManagersInsertCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsGetXpnResourcesCall) Fields(s ...googleapi.Field) *ProjectsGetXpnResourcesCall { +func (c *RegionInstanceGroupManagersInsertCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsGetXpnResourcesCall) IfNoneMatch(entityTag string) *ProjectsGetXpnResourcesCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsGetXpnResourcesCall) Context(ctx context.Context) *ProjectsGetXpnResourcesCall { +func (c *RegionInstanceGroupManagersInsertCall) Context(ctx context.Context) *RegionInstanceGroupManagersInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsGetXpnResourcesCall) Header() http.Header { +func (c *RegionInstanceGroupManagersInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsGetXpnResourcesCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/getXpnResources") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.getXpnResources" call. -// Exactly one of *ProjectsGetXpnResources or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ProjectsGetXpnResources.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsGetXpnResourcesCall) Do(opts ...googleapi.CallOption) (*ProjectsGetXpnResources, error) { +// Do executes the "compute.regionInstanceGroupManagers.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionInstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -85681,7 +101684,7 @@ func (c *ProjectsGetXpnResourcesCall) Do(opts ...googleapi.CallOption) (*Project if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ProjectsGetXpnResources{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -85693,47 +101696,39 @@ func (c *ProjectsGetXpnResourcesCall) Do(opts ...googleapi.CallOption) (*Project } return ret, nil // { - // "description": "Gets service resources (a.k.a service project) associated with this host project.", - // "httpMethod": "GET", - // "id": "compute.projects.getXpnResources", + // "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, instances in the group are created using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.\n\nA regional managed instance group can contain up to 2000 instances.", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroupManagers.insert", // "parameterOrder": [ - // "project" + // "project", + // "region" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "order_by": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/getXpnResources", + // "path": "{project}/regions/{region}/instanceGroupManagers", + // "request": { + // "$ref": "InstanceGroupManager" + // }, // "response": { - // "$ref": "ProjectsGetXpnResources" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -85743,44 +101738,24 @@ func (c *ProjectsGetXpnResourcesCall) Do(opts ...googleapi.CallOption) (*Project } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ProjectsGetXpnResourcesCall) Pages(ctx context.Context, f func(*ProjectsGetXpnResources) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.projects.listXpnHosts": +// method id "compute.regionInstanceGroupManagers.list": -type ProjectsListXpnHostsCall struct { - s *Service - project string - projectslistxpnhostsrequest *ProjectsListXpnHostsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// ListXpnHosts: Lists all shared VPC host projects visible to the user -// in an organization. -func (r *ProjectsService) ListXpnHosts(project string, projectslistxpnhostsrequest *ProjectsListXpnHostsRequest) *ProjectsListXpnHostsCall { - c := &ProjectsListXpnHostsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of managed instance groups that are +// contained within the specified region. +func (r *RegionInstanceGroupManagersService) List(project string, region string) *RegionInstanceGroupManagersListCall { + c := &RegionInstanceGroupManagersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.projectslistxpnhostsrequest = projectslistxpnhostsrequest + c.region = region return c } @@ -85806,7 +101781,7 @@ func (r *ProjectsService) ListXpnHosts(project string, projectslistxpnhostsreque // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *ProjectsListXpnHostsCall) Filter(filter string) *ProjectsListXpnHostsCall { +func (c *RegionInstanceGroupManagersListCall) Filter(filter string) *RegionInstanceGroupManagersListCall { c.urlParams_.Set("filter", filter) return c } @@ -85817,12 +101792,12 @@ func (c *ProjectsListXpnHostsCall) Filter(filter string) *ProjectsListXpnHostsCa // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *ProjectsListXpnHostsCall) MaxResults(maxResults int64) *ProjectsListXpnHostsCall { +func (c *RegionInstanceGroupManagersListCall) MaxResults(maxResults int64) *RegionInstanceGroupManagersListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } -// OrderBy sets the optional parameter "order_by": Sorts list results by +// OrderBy sets the optional parameter "orderBy": Sorts list results by // a certain order. By default, results are returned in alphanumerical // order based on the resource name. // @@ -85834,15 +101809,15 @@ func (c *ProjectsListXpnHostsCall) MaxResults(maxResults int64) *ProjectsListXpn // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *ProjectsListXpnHostsCall) OrderBy(orderBy string) *ProjectsListXpnHostsCall { - c.urlParams_.Set("order_by", orderBy) +func (c *RegionInstanceGroupManagersListCall) OrderBy(orderBy string) *RegionInstanceGroupManagersListCall { + c.urlParams_.Set("orderBy", orderBy) return c } // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *ProjectsListXpnHostsCall) PageToken(pageToken string) *ProjectsListXpnHostsCall { +func (c *RegionInstanceGroupManagersListCall) PageToken(pageToken string) *RegionInstanceGroupManagersListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -85850,63 +101825,72 @@ func (c *ProjectsListXpnHostsCall) PageToken(pageToken string) *ProjectsListXpnH // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsListXpnHostsCall) Fields(s ...googleapi.Field) *ProjectsListXpnHostsCall { +func (c *RegionInstanceGroupManagersListCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionInstanceGroupManagersListCall) IfNoneMatch(entityTag string) *RegionInstanceGroupManagersListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsListXpnHostsCall) Context(ctx context.Context) *ProjectsListXpnHostsCall { +func (c *RegionInstanceGroupManagersListCall) Context(ctx context.Context) *RegionInstanceGroupManagersListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsListXpnHostsCall) Header() http.Header { +func (c *RegionInstanceGroupManagersListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsListXpnHostsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectslistxpnhostsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/listXpnHosts") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.listXpnHosts" call. -// Exactly one of *XpnHostList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *XpnHostList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsListXpnHostsCall) Do(opts ...googleapi.CallOption) (*XpnHostList, error) { +// Do executes the "compute.regionInstanceGroupManagers.list" call. +// Exactly one of *RegionInstanceGroupManagerList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *RegionInstanceGroupManagerList.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionInstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupManagerList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -85925,7 +101909,7 @@ func (c *ProjectsListXpnHostsCall) Do(opts ...googleapi.CallOption) (*XpnHostLis if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &XpnHostList{ + ret := &RegionInstanceGroupManagerList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -85937,11 +101921,12 @@ func (c *ProjectsListXpnHostsCall) Do(opts ...googleapi.CallOption) (*XpnHostLis } return ret, nil // { - // "description": "Lists all shared VPC host projects visible to the user in an organization.", - // "httpMethod": "POST", - // "id": "compute.projects.listXpnHosts", + // "description": "Retrieves the list of managed instance groups that are contained within the specified region.", + // "httpMethod": "GET", + // "id": "compute.regionInstanceGroupManagers.list", // "parameterOrder": [ - // "project" + // "project", + // "region" // ], // "parameters": { // "filter": { @@ -85957,7 +101942,7 @@ func (c *ProjectsListXpnHostsCall) Do(opts ...googleapi.CallOption) (*XpnHostLis // "minimum": "0", // "type": "integer" // }, - // "order_by": { + // "orderBy": { // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", // "location": "query", // "type": "string" @@ -85973,18 +101958,22 @@ func (c *ProjectsListXpnHostsCall) Do(opts ...googleapi.CallOption) (*XpnHostLis // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/listXpnHosts", - // "request": { - // "$ref": "ProjectsListXpnHostsRequest" - // }, + // "path": "{project}/regions/{region}/instanceGroupManagers", // "response": { - // "$ref": "XpnHostList" + // "$ref": "RegionInstanceGroupManagerList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } @@ -85993,7 +101982,7 @@ func (c *ProjectsListXpnHostsCall) Do(opts ...googleapi.CallOption) (*XpnHostLis // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *ProjectsListXpnHostsCall) Pages(ctx context.Context, f func(*XpnHostList) error) error { +func (c *RegionInstanceGroupManagersListCall) Pages(ctx context.Context, f func(*RegionInstanceGroupManagerList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -86011,212 +102000,97 @@ func (c *ProjectsListXpnHostsCall) Pages(ctx context.Context, f func(*XpnHostLis } } -// method id "compute.projects.moveDisk": +// method id "compute.regionInstanceGroupManagers.listManagedInstances": -type ProjectsMoveDiskCall struct { - s *Service - project string - diskmoverequest *DiskMoveRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersListManagedInstancesCall struct { + s *Service + project string + region string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// MoveDisk: Moves a persistent disk from one zone to another. -func (r *ProjectsService) MoveDisk(project string, diskmoverequest *DiskMoveRequest) *ProjectsMoveDiskCall { - c := &ProjectsMoveDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ListManagedInstances: Lists the instances in the managed instance +// group and instances that are scheduled to be created. The list +// includes any current actions that the group has scheduled for its +// instances. +func (r *RegionInstanceGroupManagersService) ListManagedInstances(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersListManagedInstancesCall { + c := &RegionInstanceGroupManagersListManagedInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.diskmoverequest = diskmoverequest + c.region = region + c.instanceGroupManager = instanceGroupManager return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *ProjectsMoveDiskCall) RequestId(requestId string) *ProjectsMoveDiskCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsMoveDiskCall) Fields(s ...googleapi.Field) *ProjectsMoveDiskCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *RegionInstanceGroupManagersListManagedInstancesCall) Filter(filter string) *RegionInstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("filter", filter) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsMoveDiskCall) Context(ctx context.Context) *ProjectsMoveDiskCall { - c.ctx_ = ctx +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *RegionInstanceGroupManagersListManagedInstancesCall) MaxResults(maxResults int64) *RegionInstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsMoveDiskCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsMoveDiskCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.diskmoverequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/moveDisk") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.projects.moveDisk" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsMoveDiskCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Moves a persistent disk from one zone to another.", - // "httpMethod": "POST", - // "id": "compute.projects.moveDisk", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "{project}/moveDisk", - // "request": { - // "$ref": "DiskMoveRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.projects.moveInstance": - -type ProjectsMoveInstanceCall struct { - s *Service - project string - instancemoverequest *InstanceMoveRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// MoveInstance: Moves an instance and its attached persistent disks -// from one zone to another. -func (r *ProjectsService) MoveInstance(project string, instancemoverequest *InstanceMoveRequest) *ProjectsMoveInstanceCall { - c := &ProjectsMoveInstanceCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.instancemoverequest = instancemoverequest +// OrderBy sets the optional parameter "order_by": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *RegionInstanceGroupManagersListManagedInstancesCall) OrderBy(orderBy string) *RegionInstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("order_by", orderBy) return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *ProjectsMoveInstanceCall) RequestId(requestId string) *ProjectsMoveInstanceCall { - c.urlParams_.Set("requestId", requestId) +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *RegionInstanceGroupManagersListManagedInstancesCall) PageToken(pageToken string) *RegionInstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsMoveInstanceCall) Fields(s ...googleapi.Field) *ProjectsMoveInstanceCall { +func (c *RegionInstanceGroupManagersListManagedInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersListManagedInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -86224,35 +102098,30 @@ func (c *ProjectsMoveInstanceCall) Fields(s ...googleapi.Field) *ProjectsMoveIns // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsMoveInstanceCall) Context(ctx context.Context) *ProjectsMoveInstanceCall { +func (c *RegionInstanceGroupManagersListManagedInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersListManagedInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsMoveInstanceCall) Header() http.Header { +func (c *RegionInstanceGroupManagersListManagedInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsMoveInstanceCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersListManagedInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancemoverequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/moveInstance") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -86260,19 +102129,23 @@ func (c *ProjectsMoveInstanceCall) doRequest(alt string) (*http.Response, error) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.moveInstance" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsMoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regionInstanceGroupManagers.listManagedInstances" call. +// Exactly one of *RegionInstanceGroupManagersListInstancesResponse or +// error will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *RegionInstanceGroupManagersListInstancesResponse.ServerResponse.Heade +// r or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupManagersListInstancesResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -86291,7 +102164,7 @@ func (c *ProjectsMoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &RegionInstanceGroupManagersListInstancesResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -86303,13 +102176,44 @@ func (c *ProjectsMoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Moves an instance and its attached persistent disks from one zone to another.", + // "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances.", // "httpMethod": "POST", - // "id": "compute.projects.moveInstance", + // "id": "compute.regionInstanceGroupManagers.listManagedInstances", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "instanceGroupManager" // ], // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "order_by": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -86317,45 +102221,73 @@ func (c *ProjectsMoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/moveInstance", - // "request": { - // "$ref": "InstanceMoveRequest" - // }, + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", // "response": { - // "$ref": "Operation" + // "$ref": "RegionInstanceGroupManagersListInstancesResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.projects.setCommonInstanceMetadata": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionInstanceGroupManagersListManagedInstancesCall) Pages(ctx context.Context, f func(*RegionInstanceGroupManagersListInstancesResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type ProjectsSetCommonInstanceMetadataCall struct { - s *Service - project string - metadata *Metadata - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.regionInstanceGroupManagers.patch": + +type RegionInstanceGroupManagersPatchCall struct { + s *Service + project string + region string + instanceGroupManager string + instancegroupmanager *InstanceGroupManager + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetCommonInstanceMetadata: Sets metadata common to all instances -// within the specified project using the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/projects/setCommonInstanceMetadata -func (r *ProjectsService) SetCommonInstanceMetadata(project string, metadata *Metadata) *ProjectsSetCommonInstanceMetadataCall { - c := &ProjectsSetCommonInstanceMetadataCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates a managed instance group using the information that +// you specify in the request. This operation is marked as DONE when the +// group is patched even if the instances in the group are still in the +// process of being patched. You must separately verify the status of +// the individual instances with the listmanagedinstances method. This +// method supports PATCH semantics and uses the JSON merge patch format +// and processing rules. +func (r *RegionInstanceGroupManagersService) Patch(project string, region string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *RegionInstanceGroupManagersPatchCall { + c := &RegionInstanceGroupManagersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.metadata = metadata + c.region = region + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanager = instancegroupmanager return c } @@ -86373,7 +102305,7 @@ func (r *ProjectsService) SetCommonInstanceMetadata(project string, metadata *Me // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ProjectsSetCommonInstanceMetadataCall) RequestId(requestId string) *ProjectsSetCommonInstanceMetadataCall { +func (c *RegionInstanceGroupManagersPatchCall) RequestId(requestId string) *RegionInstanceGroupManagersPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -86381,7 +102313,7 @@ func (c *ProjectsSetCommonInstanceMetadataCall) RequestId(requestId string) *Pro // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsSetCommonInstanceMetadataCall) Fields(s ...googleapi.Field) *ProjectsSetCommonInstanceMetadataCall { +func (c *RegionInstanceGroupManagersPatchCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -86389,55 +102321,57 @@ func (c *ProjectsSetCommonInstanceMetadataCall) Fields(s ...googleapi.Field) *Pr // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsSetCommonInstanceMetadataCall) Context(ctx context.Context) *ProjectsSetCommonInstanceMetadataCall { +func (c *RegionInstanceGroupManagersPatchCall) Context(ctx context.Context) *RegionInstanceGroupManagersPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsSetCommonInstanceMetadataCall) Header() http.Header { +func (c *RegionInstanceGroupManagersPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsSetCommonInstanceMetadataCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.metadata) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/setCommonInstanceMetadata") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.setCommonInstanceMetadata" call. +// Do executes the "compute.regionInstanceGroupManagers.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ProjectsSetCommonInstanceMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -86468,13 +102402,21 @@ func (c *ProjectsSetCommonInstanceMetadataCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Sets metadata common to all instances within the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.projects.setCommonInstanceMetadata", + // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listmanagedinstances method. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.regionInstanceGroupManagers.patch", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "instanceGroupManager" // ], // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the instance group manager.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -86482,15 +102424,21 @@ func (c *ProjectsSetCommonInstanceMetadataCall) Do(opts ...googleapi.CallOption) // "required": true, // "type": "string" // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "{project}/setCommonInstanceMetadata", + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", // "request": { - // "$ref": "Metadata" + // "$ref": "InstanceGroupManager" // }, // "response": { // "$ref": "Operation" @@ -86503,25 +102451,40 @@ func (c *ProjectsSetCommonInstanceMetadataCall) Do(opts ...googleapi.CallOption) } -// method id "compute.projects.setDefaultNetworkTier": +// method id "compute.regionInstanceGroupManagers.recreateInstances": -type ProjectsSetDefaultNetworkTierCall struct { - s *Service - project string - projectssetdefaultnetworktierrequest *ProjectsSetDefaultNetworkTierRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersRecreateInstancesCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagersrecreaterequest *RegionInstanceGroupManagersRecreateRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetDefaultNetworkTier: Sets the default network tier of the project. -// The default network tier is used when an -// address/forwardingRule/instance is created without specifying the -// network tier field. -func (r *ProjectsService) SetDefaultNetworkTier(project string, projectssetdefaultnetworktierrequest *ProjectsSetDefaultNetworkTierRequest) *ProjectsSetDefaultNetworkTierCall { - c := &ProjectsSetDefaultNetworkTierCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// RecreateInstances: Flags the specified instances in the managed +// instance group to be immediately recreated. The instances are deleted +// and recreated using the current instance template for the managed +// instance group. This operation is marked as DONE when the flag is set +// even if the instances have not yet been recreated. You must +// separately verify the status of the recreating action with the +// listmanagedinstances method. +// +// If the group is part of a backend service that has enabled connection +// draining, it can take up to 60 seconds after the connection draining +// duration has elapsed before the VM instance is removed or +// deleted. +// +// You can specify a maximum of 1000 instances with this method per +// request. +func (r *RegionInstanceGroupManagersService) RecreateInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersrecreaterequest *RegionInstanceGroupManagersRecreateRequest) *RegionInstanceGroupManagersRecreateInstancesCall { + c := &RegionInstanceGroupManagersRecreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.projectssetdefaultnetworktierrequest = projectssetdefaultnetworktierrequest + c.region = region + c.instanceGroupManager = instanceGroupManager + c.regioninstancegroupmanagersrecreaterequest = regioninstancegroupmanagersrecreaterequest return c } @@ -86539,7 +102502,7 @@ func (r *ProjectsService) SetDefaultNetworkTier(project string, projectssetdefau // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ProjectsSetDefaultNetworkTierCall) RequestId(requestId string) *ProjectsSetDefaultNetworkTierCall { +func (c *RegionInstanceGroupManagersRecreateInstancesCall) RequestId(requestId string) *RegionInstanceGroupManagersRecreateInstancesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -86547,7 +102510,7 @@ func (c *ProjectsSetDefaultNetworkTierCall) RequestId(requestId string) *Project // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsSetDefaultNetworkTierCall) Fields(s ...googleapi.Field) *ProjectsSetDefaultNetworkTierCall { +func (c *RegionInstanceGroupManagersRecreateInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersRecreateInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -86555,35 +102518,35 @@ func (c *ProjectsSetDefaultNetworkTierCall) Fields(s ...googleapi.Field) *Projec // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsSetDefaultNetworkTierCall) Context(ctx context.Context) *ProjectsSetDefaultNetworkTierCall { +func (c *RegionInstanceGroupManagersRecreateInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersRecreateInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsSetDefaultNetworkTierCall) Header() http.Header { +func (c *RegionInstanceGroupManagersRecreateInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsSetDefaultNetworkTierCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectssetdefaultnetworktierrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersrecreaterequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/setDefaultNetworkTier") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/recreateInstances") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -86591,19 +102554,21 @@ func (c *ProjectsSetDefaultNetworkTierCall) doRequest(alt string) (*http.Respons } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.setDefaultNetworkTier" call. +// Do executes the "compute.regionInstanceGroupManagers.recreateInstances" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ProjectsSetDefaultNetworkTierCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -86634,13 +102599,21 @@ func (c *ProjectsSetDefaultNetworkTierCall) Do(opts ...googleapi.CallOption) (*O } return ret, nil // { - // "description": "Sets the default network tier of the project. The default network tier is used when an address/forwardingRule/instance is created without specifying the network tier field.", + // "description": "Flags the specified instances in the managed instance group to be immediately recreated. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the flag is set even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", // "httpMethod": "POST", - // "id": "compute.projects.setDefaultNetworkTier", + // "id": "compute.regionInstanceGroupManagers.recreateInstances", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "instanceGroupManager" // ], // "parameters": { + // "instanceGroupManager": { + // "description": "Name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -86648,15 +102621,21 @@ func (c *ProjectsSetDefaultNetworkTierCall) Do(opts ...googleapi.CallOption) (*O // "required": true, // "type": "string" // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "{project}/setDefaultNetworkTier", + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", // "request": { - // "$ref": "ProjectsSetDefaultNetworkTierRequest" + // "$ref": "RegionInstanceGroupManagersRecreateRequest" // }, // "response": { // "$ref": "Operation" @@ -86669,26 +102648,37 @@ func (c *ProjectsSetDefaultNetworkTierCall) Do(opts ...googleapi.CallOption) (*O } -// method id "compute.projects.setUsageExportBucket": +// method id "compute.regionInstanceGroupManagers.resize": -type ProjectsSetUsageExportBucketCall struct { - s *Service - project string - usageexportlocation *UsageExportLocation - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersResizeCall struct { + s *Service + project string + region string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetUsageExportBucket: Enables the usage export feature and sets the -// usage export bucket where reports are stored. If you provide an empty -// request body using this method, the usage export feature will be -// disabled. -// For details, see https://cloud.google.com/compute/docs/reference/latest/projects/setUsageExportBucket -func (r *ProjectsService) SetUsageExportBucket(project string, usageexportlocation *UsageExportLocation) *ProjectsSetUsageExportBucketCall { - c := &ProjectsSetUsageExportBucketCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Resize: Changes the intended size of the managed instance group. If +// you increase the size, the group creates new instances using the +// current instance template. If you decrease the size, the group +// deletes one or more instances. +// +// The resize operation is marked DONE if the resize request is +// successful. The underlying actions take additional time. You must +// separately verify the status of the creating or deleting actions with +// the listmanagedinstances method. +// +// If the group is part of a backend service that has enabled connection +// draining, it can take up to 60 seconds after the connection draining +// duration has elapsed before the VM instance is removed or deleted. +func (r *RegionInstanceGroupManagersService) Resize(project string, region string, instanceGroupManager string, size int64) *RegionInstanceGroupManagersResizeCall { + c := &RegionInstanceGroupManagersResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.usageexportlocation = usageexportlocation + c.region = region + c.instanceGroupManager = instanceGroupManager + c.urlParams_.Set("size", fmt.Sprint(size)) return c } @@ -86706,7 +102696,7 @@ func (r *ProjectsService) SetUsageExportBucket(project string, usageexportlocati // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ProjectsSetUsageExportBucketCall) RequestId(requestId string) *ProjectsSetUsageExportBucketCall { +func (c *RegionInstanceGroupManagersResizeCall) RequestId(requestId string) *RegionInstanceGroupManagersResizeCall { c.urlParams_.Set("requestId", requestId) return c } @@ -86714,7 +102704,7 @@ func (c *ProjectsSetUsageExportBucketCall) RequestId(requestId string) *Projects // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsSetUsageExportBucketCall) Fields(s ...googleapi.Field) *ProjectsSetUsageExportBucketCall { +func (c *RegionInstanceGroupManagersResizeCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersResizeCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -86722,35 +102712,30 @@ func (c *ProjectsSetUsageExportBucketCall) Fields(s ...googleapi.Field) *Project // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsSetUsageExportBucketCall) Context(ctx context.Context) *ProjectsSetUsageExportBucketCall { +func (c *RegionInstanceGroupManagersResizeCall) Context(ctx context.Context) *RegionInstanceGroupManagersResizeCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsSetUsageExportBucketCall) Header() http.Header { +func (c *RegionInstanceGroupManagersResizeCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsSetUsageExportBucketCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.usageexportlocation) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/setUsageExportBucket") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resize") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -86758,19 +102743,21 @@ func (c *ProjectsSetUsageExportBucketCall) doRequest(alt string) (*http.Response } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.setUsageExportBucket" call. +// Do executes the "compute.regionInstanceGroupManagers.resize" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ProjectsSetUsageExportBucketCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -86801,13 +102788,22 @@ func (c *ProjectsSetUsageExportBucketCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Enables the usage export feature and sets the usage export bucket where reports are stored. If you provide an empty request body using this method, the usage export feature will be disabled.", + // "description": "Changes the intended size of the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes one or more instances.\n\nThe resize operation is marked DONE if the resize request is successful. The underlying actions take additional time. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", // "httpMethod": "POST", - // "id": "compute.projects.setUsageExportBucket", + // "id": "compute.regionInstanceGroupManagers.resize", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "instanceGroupManager", + // "size" // ], // "parameters": { + // "instanceGroupManager": { + // "description": "Name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -86815,48 +102811,60 @@ func (c *ProjectsSetUsageExportBucketCall) Do(opts ...googleapi.CallOption) (*Op // "required": true, // "type": "string" // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "size": { + // "description": "Number of instances that should exist in this instance group manager.", + // "format": "int32", + // "location": "query", + // "minimum": "0", + // "required": true, + // "type": "integer" // } // }, - // "path": "{project}/setUsageExportBucket", - // "request": { - // "$ref": "UsageExportLocation" - // }, + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resize", // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.regionAutoscalers.delete": +// method id "compute.regionInstanceGroupManagers.setAutoHealingPolicies": -type RegionAutoscalersDeleteCall struct { - s *Service - project string - region string - autoscaler string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersSetAutoHealingPoliciesCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagerssetautohealingrequest *RegionInstanceGroupManagersSetAutoHealingRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified autoscaler. -func (r *RegionAutoscalersService) Delete(project string, region string, autoscaler string) *RegionAutoscalersDeleteCall { - c := &RegionAutoscalersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetAutoHealingPolicies: Modifies the autohealing policy for the +// instances in this managed instance group. [Deprecated] This method is +// deprecated. Please use Patch instead. +func (r *RegionInstanceGroupManagersService) SetAutoHealingPolicies(project string, region string, instanceGroupManager string, regioninstancegroupmanagerssetautohealingrequest *RegionInstanceGroupManagersSetAutoHealingRequest) *RegionInstanceGroupManagersSetAutoHealingPoliciesCall { + c := &RegionInstanceGroupManagersSetAutoHealingPoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.autoscaler = autoscaler + c.instanceGroupManager = instanceGroupManager + c.regioninstancegroupmanagerssetautohealingrequest = regioninstancegroupmanagerssetautohealingrequest return c } @@ -86874,7 +102882,7 @@ func (r *RegionAutoscalersService) Delete(project string, region string, autosca // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionAutoscalersDeleteCall) RequestId(requestId string) *RegionAutoscalersDeleteCall { +func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) RequestId(requestId string) *RegionInstanceGroupManagersSetAutoHealingPoliciesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -86882,7 +102890,7 @@ func (c *RegionAutoscalersDeleteCall) RequestId(requestId string) *RegionAutosca // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionAutoscalersDeleteCall) Fields(s ...googleapi.Field) *RegionAutoscalersDeleteCall { +func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersSetAutoHealingPoliciesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -86890,52 +102898,57 @@ func (c *RegionAutoscalersDeleteCall) Fields(s ...googleapi.Field) *RegionAutosc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionAutoscalersDeleteCall) Context(ctx context.Context) *RegionAutoscalersDeleteCall { +func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Context(ctx context.Context) *RegionInstanceGroupManagersSetAutoHealingPoliciesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionAutoscalersDeleteCall) Header() http.Header { +func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionAutoscalersDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerssetautohealingrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers/{autoscaler}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "autoscaler": c.autoscaler, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionAutoscalers.delete" call. +// Do executes the "compute.regionInstanceGroupManagers.setAutoHealingPolicies" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionAutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -86966,19 +102979,18 @@ func (c *RegionAutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Deletes the specified autoscaler.", - // "httpMethod": "DELETE", - // "id": "compute.regionAutoscalers.delete", + // "description": "Modifies the autohealing policy for the instances in this managed instance group. [Deprecated] This method is deprecated. Please use Patch instead.", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroupManagers.setAutoHealingPolicies", // "parameterOrder": [ // "project", // "region", - // "autoscaler" + // "instanceGroupManager" // ], // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to delete.", + // "instanceGroupManager": { + // "description": "Name of the managed instance group.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -86992,7 +103004,6 @@ func (c *RegionAutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operati // "region": { // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -87002,7 +103013,10 @@ func (c *RegionAutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operati // "type": "string" // } // }, - // "path": "{project}/regions/{region}/autoscalers/{autoscaler}", + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies", + // "request": { + // "$ref": "RegionInstanceGroupManagersSetAutoHealingRequest" + // }, // "response": { // "$ref": "Operation" // }, @@ -87014,98 +103028,112 @@ func (c *RegionAutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.regionAutoscalers.get": +// method id "compute.regionInstanceGroupManagers.setInstanceTemplate": -type RegionAutoscalersGetCall struct { - s *Service - project string - region string - autoscaler string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersSetInstanceTemplateCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagerssettemplaterequest *RegionInstanceGroupManagersSetTemplateRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified autoscaler. -func (r *RegionAutoscalersService) Get(project string, region string, autoscaler string) *RegionAutoscalersGetCall { - c := &RegionAutoscalersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetInstanceTemplate: Sets the instance template to use when creating +// new instances or recreating instances in this group. Existing +// instances are not affected. +func (r *RegionInstanceGroupManagersService) SetInstanceTemplate(project string, region string, instanceGroupManager string, regioninstancegroupmanagerssettemplaterequest *RegionInstanceGroupManagersSetTemplateRequest) *RegionInstanceGroupManagersSetInstanceTemplateCall { + c := &RegionInstanceGroupManagersSetInstanceTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.autoscaler = autoscaler + c.instanceGroupManager = instanceGroupManager + c.regioninstancegroupmanagerssettemplaterequest = regioninstancegroupmanagerssettemplaterequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) RequestId(requestId string) *RegionInstanceGroupManagersSetInstanceTemplateCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionAutoscalersGetCall) Fields(s ...googleapi.Field) *RegionAutoscalersGetCall { +func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersSetInstanceTemplateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionAutoscalersGetCall) IfNoneMatch(entityTag string) *RegionAutoscalersGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionAutoscalersGetCall) Context(ctx context.Context) *RegionAutoscalersGetCall { +func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Context(ctx context.Context) *RegionInstanceGroupManagersSetInstanceTemplateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionAutoscalersGetCall) Header() http.Header { +func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionAutoscalersGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerssettemplaterequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers/{autoscaler}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "autoscaler": c.autoscaler, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionAutoscalers.get" call. -// Exactly one of *Autoscaler or error will be non-nil. Any non-2xx +// Do executes the "compute.regionInstanceGroupManagers.setInstanceTemplate" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Autoscaler.ServerResponse.Header or (if a response was returned at +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionAutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, error) { +func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -87124,7 +103152,7 @@ func (c *RegionAutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Autoscaler{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -87136,19 +103164,18 @@ func (c *RegionAutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler } return ret, nil // { - // "description": "Returns the specified autoscaler.", - // "httpMethod": "GET", - // "id": "compute.regionAutoscalers.get", + // "description": "Sets the instance template to use when creating new instances or recreating instances in this group. Existing instances are not affected.", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroupManagers.setInstanceTemplate", // "parameterOrder": [ // "project", // "region", - // "autoscaler" + // "instanceGroupManager" // ], // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to return.", + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -87162,43 +103189,52 @@ func (c *RegionAutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler // "region": { // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/autoscalers/{autoscaler}", + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", + // "request": { + // "$ref": "RegionInstanceGroupManagersSetTemplateRequest" + // }, // "response": { - // "$ref": "Autoscaler" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.regionAutoscalers.insert": +// method id "compute.regionInstanceGroupManagers.setTargetPools": -type RegionAutoscalersInsertCall struct { - s *Service - project string - region string - autoscaler *Autoscaler - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersSetTargetPoolsCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagerssettargetpoolsrequest *RegionInstanceGroupManagersSetTargetPoolsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates an autoscaler in the specified project using the data -// included in the request. -func (r *RegionAutoscalersService) Insert(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersInsertCall { - c := &RegionAutoscalersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetTargetPools: Modifies the target pools to which all new instances +// in this group are assigned. Existing instances in the group are not +// affected. +func (r *RegionInstanceGroupManagersService) SetTargetPools(project string, region string, instanceGroupManager string, regioninstancegroupmanagerssettargetpoolsrequest *RegionInstanceGroupManagersSetTargetPoolsRequest) *RegionInstanceGroupManagersSetTargetPoolsCall { + c := &RegionInstanceGroupManagersSetTargetPoolsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.autoscaler = autoscaler + c.instanceGroupManager = instanceGroupManager + c.regioninstancegroupmanagerssettargetpoolsrequest = regioninstancegroupmanagerssettargetpoolsrequest return c } @@ -87216,7 +103252,7 @@ func (r *RegionAutoscalersService) Insert(project string, region string, autosca // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionAutoscalersInsertCall) RequestId(requestId string) *RegionAutoscalersInsertCall { +func (c *RegionInstanceGroupManagersSetTargetPoolsCall) RequestId(requestId string) *RegionInstanceGroupManagersSetTargetPoolsCall { c.urlParams_.Set("requestId", requestId) return c } @@ -87224,7 +103260,7 @@ func (c *RegionAutoscalersInsertCall) RequestId(requestId string) *RegionAutosca // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionAutoscalersInsertCall) Fields(s ...googleapi.Field) *RegionAutoscalersInsertCall { +func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersSetTargetPoolsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -87232,35 +103268,35 @@ func (c *RegionAutoscalersInsertCall) Fields(s ...googleapi.Field) *RegionAutosc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionAutoscalersInsertCall) Context(ctx context.Context) *RegionAutoscalersInsertCall { +func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Context(ctx context.Context) *RegionInstanceGroupManagersSetTargetPoolsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionAutoscalersInsertCall) Header() http.Header { +func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionAutoscalersInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerssettargetpoolsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setTargetPools") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -87268,20 +103304,21 @@ func (c *RegionAutoscalersInsertCall) doRequest(alt string) (*http.Response, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionAutoscalers.insert" call. +// Do executes the "compute.regionInstanceGroupManagers.setTargetPools" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionAutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -87312,14 +103349,21 @@ func (c *RegionAutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Creates an autoscaler in the specified project using the data included in the request.", + // "description": "Modifies the target pools to which all new instances in this group are assigned. Existing instances in the group are not affected.", // "httpMethod": "POST", - // "id": "compute.regionAutoscalers.insert", + // "id": "compute.regionInstanceGroupManagers.setTargetPools", // "parameterOrder": [ // "project", - // "region" + // "region", + // "instanceGroupManager" // ], // "parameters": { + // "instanceGroupManager": { + // "description": "Name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -87330,7 +103374,6 @@ func (c *RegionAutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operati // "region": { // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -87340,9 +103383,9 @@ func (c *RegionAutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operati // "type": "string" // } // }, - // "path": "{project}/regions/{region}/autoscalers", + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", // "request": { - // "$ref": "Autoscaler" + // "$ref": "RegionInstanceGroupManagersSetTargetPoolsRequest" // }, // "response": { // "$ref": "Operation" @@ -87355,159 +103398,92 @@ func (c *RegionAutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.regionAutoscalers.list": +// method id "compute.regionInstanceGroupManagers.testIamPermissions": -type RegionAutoscalersListCall struct { - s *Service - project string - region string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves a list of autoscalers contained within the specified -// region. -func (r *RegionAutoscalersService) List(project string, region string) *RegionAutoscalersListCall { - c := &RegionAutoscalersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *RegionInstanceGroupManagersService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionInstanceGroupManagersTestIamPermissionsCall { + c := &RegionInstanceGroupManagersTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *RegionAutoscalersListCall) Filter(filter string) *RegionAutoscalersListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *RegionAutoscalersListCall) MaxResults(maxResults int64) *RegionAutoscalersListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *RegionAutoscalersListCall) OrderBy(orderBy string) *RegionAutoscalersListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *RegionAutoscalersListCall) PageToken(pageToken string) *RegionAutoscalersListCall { - c.urlParams_.Set("pageToken", pageToken) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionAutoscalersListCall) Fields(s ...googleapi.Field) *RegionAutoscalersListCall { +func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionAutoscalersListCall) IfNoneMatch(entityTag string) *RegionAutoscalersListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionAutoscalersListCall) Context(ctx context.Context) *RegionAutoscalersListCall { +func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Context(ctx context.Context) *RegionInstanceGroupManagersTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionAutoscalersListCall) Header() http.Header { +func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionAutoscalersListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionAutoscalers.list" call. -// Exactly one of *RegionAutoscalerList or error will be non-nil. Any +// Do executes the "compute.regionInstanceGroupManagers.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *RegionAutoscalerList.ServerResponse.Header or (if a response was +// *TestPermissionsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionAutoscalersListCall) Do(opts ...googleapi.CallOption) (*RegionAutoscalerList, error) { +func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -87526,7 +103502,7 @@ func (c *RegionAutoscalersListCall) Do(opts ...googleapi.CallOption) (*RegionAut if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &RegionAutoscalerList{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -87538,37 +103514,15 @@ func (c *RegionAutoscalersListCall) Do(opts ...googleapi.CallOption) (*RegionAut } return ret, nil // { - // "description": "Retrieves a list of autoscalers contained within the specified region.", - // "httpMethod": "GET", - // "id": "compute.regionAutoscalers.list", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroupManagers.testIamPermissions", // "parameterOrder": [ // "project", - // "region" + // "region", + // "resource" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -87577,16 +103531,26 @@ func (c *RegionAutoscalersListCall) Do(opts ...googleapi.CallOption) (*RegionAut // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "The name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/autoscalers", + // "path": "{project}/regions/{region}/instanceGroupManagers/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "RegionAutoscalerList" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -87597,54 +103561,30 @@ func (c *RegionAutoscalersListCall) Do(opts ...googleapi.CallOption) (*RegionAut } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RegionAutoscalersListCall) Pages(ctx context.Context, f func(*RegionAutoscalerList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.regionAutoscalers.patch": +// method id "compute.regionInstanceGroupManagers.update": -type RegionAutoscalersPatchCall struct { - s *Service - project string - region string - autoscaler *Autoscaler - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersUpdateCall struct { + s *Service + project string + region string + instanceGroupManager string + instancegroupmanager *InstanceGroupManager + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates an autoscaler in the specified project using the data -// included in the request. This method supports PATCH semantics and -// uses the JSON merge patch format and processing rules. -func (r *RegionAutoscalersService) Patch(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersPatchCall { - c := &RegionAutoscalersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates a managed instance group using the information that +// you specify in the request. This operation is marked as DONE when the +// group is updated even if the instances in the group have not yet been +// updated. You must separately verify the status of the individual +// instances with the listmanagedinstances method. +func (r *RegionInstanceGroupManagersService) Update(project string, region string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *RegionInstanceGroupManagersUpdateCall { + c := &RegionInstanceGroupManagersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.autoscaler = autoscaler - return c -} - -// Autoscaler sets the optional parameter "autoscaler": Name of the -// autoscaler to patch. -func (c *RegionAutoscalersPatchCall) Autoscaler(autoscaler string) *RegionAutoscalersPatchCall { - c.urlParams_.Set("autoscaler", autoscaler) + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanager = instancegroupmanager return c } @@ -87662,7 +103602,7 @@ func (c *RegionAutoscalersPatchCall) Autoscaler(autoscaler string) *RegionAutosc // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionAutoscalersPatchCall) RequestId(requestId string) *RegionAutoscalersPatchCall { +func (c *RegionInstanceGroupManagersUpdateCall) RequestId(requestId string) *RegionInstanceGroupManagersUpdateCall { c.urlParams_.Set("requestId", requestId) return c } @@ -87670,64 +103610,236 @@ func (c *RegionAutoscalersPatchCall) RequestId(requestId string) *RegionAutoscal // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionAutoscalersPatchCall) Fields(s ...googleapi.Field) *RegionAutoscalersPatchCall { +func (c *RegionInstanceGroupManagersUpdateCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionInstanceGroupManagersUpdateCall) Context(ctx context.Context) *RegionInstanceGroupManagersUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionInstanceGroupManagersUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionInstanceGroupManagersUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PUT", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionInstanceGroupManagers.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionInstanceGroupManagersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is updated even if the instances in the group have not yet been updated. You must separately verify the status of the individual instances with the listmanagedinstances method.", + // "httpMethod": "PUT", + // "id": "compute.regionInstanceGroupManagers.update", + // "parameterOrder": [ + // "project", + // "region", + // "instanceGroupManager" + // ], + // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the instance group manager.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", + // "request": { + // "$ref": "InstanceGroupManager" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionInstanceGroups.get": + +type RegionInstanceGroupsGetCall struct { + s *Service + project string + region string + instanceGroup string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified instance group resource. +func (r *RegionInstanceGroupsService) Get(project string, region string, instanceGroup string) *RegionInstanceGroupsGetCall { + c := &RegionInstanceGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.instanceGroup = instanceGroup + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionInstanceGroupsGetCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionInstanceGroupsGetCall) IfNoneMatch(entityTag string) *RegionInstanceGroupsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionAutoscalersPatchCall) Context(ctx context.Context) *RegionAutoscalersPatchCall { +func (c *RegionInstanceGroupsGetCall) Context(ctx context.Context) *RegionInstanceGroupsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionAutoscalersPatchCall) Header() http.Header { +func (c *RegionInstanceGroupsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionAutoscalersPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{instanceGroup}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "region": c.region, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionAutoscalers.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.regionInstanceGroups.get" call. +// Exactly one of *InstanceGroup or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionAutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// *InstanceGroup.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionInstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -87746,7 +103858,7 @@ func (c *RegionAutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operatio if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &InstanceGroup{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -87758,18 +103870,19 @@ func (c *RegionAutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Updates an autoscaler in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.regionAutoscalers.patch", + // "description": "Returns the specified instance group resource.", + // "httpMethod": "GET", + // "id": "compute.regionInstanceGroups.get", // "parameterOrder": [ // "project", - // "region" + // "region", + // "instanceGroup" // ], // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to patch.", - // "location": "query", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "instanceGroup": { + // "description": "Name of the instance group resource to return.", + // "location": "path", + // "required": true, // "type": "string" // }, // "project": { @@ -87782,117 +103895,176 @@ func (c *RegionAutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operatio // "region": { // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/autoscalers", - // "request": { - // "$ref": "Autoscaler" - // }, + // "path": "{project}/regions/{region}/instanceGroups/{instanceGroup}", // "response": { - // "$ref": "Operation" + // "$ref": "InstanceGroup" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionAutoscalers.testIamPermissions": +// method id "compute.regionInstanceGroups.list": -type RegionAutoscalersTestIamPermissionsCall struct { - s *Service - project string - region string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupsListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *RegionAutoscalersService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionAutoscalersTestIamPermissionsCall { - c := &RegionAutoscalersTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of instance group resources contained within +// the specified region. +func (r *RegionInstanceGroupsService) List(project string, region string) *RegionInstanceGroupsListCall { + c := &RegionInstanceGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *RegionInstanceGroupsListCall) Filter(filter string) *RegionInstanceGroupsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *RegionInstanceGroupsListCall) MaxResults(maxResults int64) *RegionInstanceGroupsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *RegionInstanceGroupsListCall) OrderBy(orderBy string) *RegionInstanceGroupsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *RegionInstanceGroupsListCall) PageToken(pageToken string) *RegionInstanceGroupsListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionAutoscalersTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionAutoscalersTestIamPermissionsCall { +func (c *RegionInstanceGroupsListCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionInstanceGroupsListCall) IfNoneMatch(entityTag string) *RegionInstanceGroupsListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionAutoscalersTestIamPermissionsCall) Context(ctx context.Context) *RegionAutoscalersTestIamPermissionsCall { +func (c *RegionInstanceGroupsListCall) Context(ctx context.Context) *RegionInstanceGroupsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionAutoscalersTestIamPermissionsCall) Header() http.Header { +func (c *RegionInstanceGroupsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionAutoscalersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionAutoscalers.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// Do executes the "compute.regionInstanceGroups.list" call. +// Exactly one of *RegionInstanceGroupList or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// *RegionInstanceGroupList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionAutoscalersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -87911,7 +104083,7 @@ func (c *RegionAutoscalersTestIamPermissionsCall) Do(opts ...googleapi.CallOptio if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &RegionInstanceGroupList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -87923,15 +104095,37 @@ func (c *RegionAutoscalersTestIamPermissionsCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.regionAutoscalers.testIamPermissions", + // "description": "Retrieves the list of instance group resources contained within the specified region.", + // "httpMethod": "GET", + // "id": "compute.regionInstanceGroups.list", // "parameterOrder": [ // "project", - // "region", - // "resource" + // "region" // ], // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -87940,26 +104134,15 @@ func (c *RegionAutoscalersTestIamPermissionsCall) Do(opts ...googleapi.CallOptio // "type": "string" // }, // "region": { - // "description": "The name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/autoscalers/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/regions/{region}/instanceGroups", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "RegionInstanceGroupList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -87970,58 +104153,120 @@ func (c *RegionAutoscalersTestIamPermissionsCall) Do(opts ...googleapi.CallOptio } -// method id "compute.regionAutoscalers.update": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionInstanceGroupsListCall) Pages(ctx context.Context, f func(*RegionInstanceGroupList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type RegionAutoscalersUpdateCall struct { - s *Service - project string - region string - autoscaler *Autoscaler - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.regionInstanceGroups.listInstances": + +type RegionInstanceGroupsListInstancesCall struct { + s *Service + project string + region string + instanceGroup string + regioninstancegroupslistinstancesrequest *RegionInstanceGroupsListInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Update: Updates an autoscaler in the specified project using the data -// included in the request. -func (r *RegionAutoscalersService) Update(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersUpdateCall { - c := &RegionAutoscalersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ListInstances: Lists the instances in the specified instance group +// and displays information about the named ports. Depending on the +// specified options, this method can list all instances or only the +// instances that are running. +func (r *RegionInstanceGroupsService) ListInstances(project string, region string, instanceGroup string, regioninstancegroupslistinstancesrequest *RegionInstanceGroupsListInstancesRequest) *RegionInstanceGroupsListInstancesCall { + c := &RegionInstanceGroupsListInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.autoscaler = autoscaler + c.instanceGroup = instanceGroup + c.regioninstancegroupslistinstancesrequest = regioninstancegroupslistinstancesrequest return c } -// Autoscaler sets the optional parameter "autoscaler": Name of the -// autoscaler to update. -func (c *RegionAutoscalersUpdateCall) Autoscaler(autoscaler string) *RegionAutoscalersUpdateCall { - c.urlParams_.Set("autoscaler", autoscaler) +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *RegionInstanceGroupsListInstancesCall) Filter(filter string) *RegionInstanceGroupsListInstancesCall { + c.urlParams_.Set("filter", filter) return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *RegionInstanceGroupsListInstancesCall) MaxResults(maxResults int64) *RegionInstanceGroupsListInstancesCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionAutoscalersUpdateCall) RequestId(requestId string) *RegionAutoscalersUpdateCall { - c.urlParams_.Set("requestId", requestId) +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *RegionInstanceGroupsListInstancesCall) OrderBy(orderBy string) *RegionInstanceGroupsListInstancesCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *RegionInstanceGroupsListInstancesCall) PageToken(pageToken string) *RegionInstanceGroupsListInstancesCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionAutoscalersUpdateCall) Fields(s ...googleapi.Field) *RegionAutoscalersUpdateCall { +func (c *RegionInstanceGroupsListInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsListInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -88029,56 +104274,58 @@ func (c *RegionAutoscalersUpdateCall) Fields(s ...googleapi.Field) *RegionAutosc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionAutoscalersUpdateCall) Context(ctx context.Context) *RegionAutoscalersUpdateCall { +func (c *RegionInstanceGroupsListInstancesCall) Context(ctx context.Context) *RegionInstanceGroupsListInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionAutoscalersUpdateCall) Header() http.Header { +func (c *RegionInstanceGroupsListInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionAutoscalersUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupslistinstancesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "region": c.region, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionAutoscalers.update" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionAutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regionInstanceGroups.listInstances" call. +// Exactly one of *RegionInstanceGroupsListInstances or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *RegionInstanceGroupsListInstances.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupsListInstances, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -88097,7 +104344,7 @@ func (c *RegionAutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operati if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &RegionInstanceGroupsListInstances{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -88109,18 +104356,42 @@ func (c *RegionAutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Updates an autoscaler in the specified project using the data included in the request.", - // "httpMethod": "PUT", - // "id": "compute.regionAutoscalers.update", + // "description": "Lists the instances in the specified instance group and displays information about the named ports. Depending on the specified options, this method can list all instances or only the instances that are running.", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroups.listInstances", // "parameterOrder": [ // "project", - // "region" + // "region", + // "instanceGroup" // ], // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to update.", + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "instanceGroup": { + // "description": "Name of the regional instance group for which we want to list the instances.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "type": "string" // }, // "project": { @@ -88133,49 +104404,68 @@ func (c *RegionAutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operati // "region": { // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/autoscalers", + // "path": "{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances", // "request": { - // "$ref": "Autoscaler" + // "$ref": "RegionInstanceGroupsListInstancesRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "RegionInstanceGroupsListInstances" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionBackendServices.delete": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionInstanceGroupsListInstancesCall) Pages(ctx context.Context, f func(*RegionInstanceGroupsListInstances) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.regionInstanceGroups.setNamedPorts": -type RegionBackendServicesDeleteCall struct { - s *Service - project string - region string - backendService string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupsSetNamedPortsCall struct { + s *Service + project string + region string + instanceGroup string + regioninstancegroupssetnamedportsrequest *RegionInstanceGroupsSetNamedPortsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified regional BackendService resource. -func (r *RegionBackendServicesService) Delete(project string, region string, backendService string) *RegionBackendServicesDeleteCall { - c := &RegionBackendServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetNamedPorts: Sets the named ports for the specified regional +// instance group. +func (r *RegionInstanceGroupsService) SetNamedPorts(project string, region string, instanceGroup string, regioninstancegroupssetnamedportsrequest *RegionInstanceGroupsSetNamedPortsRequest) *RegionInstanceGroupsSetNamedPortsCall { + c := &RegionInstanceGroupsSetNamedPortsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.backendService = backendService + c.instanceGroup = instanceGroup + c.regioninstancegroupssetnamedportsrequest = regioninstancegroupssetnamedportsrequest return c } @@ -88193,7 +104483,7 @@ func (r *RegionBackendServicesService) Delete(project string, region string, bac // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionBackendServicesDeleteCall) RequestId(requestId string) *RegionBackendServicesDeleteCall { +func (c *RegionInstanceGroupsSetNamedPortsCall) RequestId(requestId string) *RegionInstanceGroupsSetNamedPortsCall { c.urlParams_.Set("requestId", requestId) return c } @@ -88201,7 +104491,7 @@ func (c *RegionBackendServicesDeleteCall) RequestId(requestId string) *RegionBac // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionBackendServicesDeleteCall) Fields(s ...googleapi.Field) *RegionBackendServicesDeleteCall { +func (c *RegionInstanceGroupsSetNamedPortsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsSetNamedPortsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -88209,52 +104499,57 @@ func (c *RegionBackendServicesDeleteCall) Fields(s ...googleapi.Field) *RegionBa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionBackendServicesDeleteCall) Context(ctx context.Context) *RegionBackendServicesDeleteCall { +func (c *RegionInstanceGroupsSetNamedPortsCall) Context(ctx context.Context) *RegionInstanceGroupsSetNamedPortsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionBackendServicesDeleteCall) Header() http.Header { +func (c *RegionInstanceGroupsSetNamedPortsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionBackendServicesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupssetnamedportsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "backendService": c.backendService, + "project": c.project, + "region": c.region, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionBackendServices.delete" call. +// Do executes the "compute.regionInstanceGroups.setNamedPorts" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionBackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -88285,19 +104580,18 @@ func (c *RegionBackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Deletes the specified regional BackendService resource.", - // "httpMethod": "DELETE", - // "id": "compute.regionBackendServices.delete", + // "description": "Sets the named ports for the specified regional instance group.", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroups.setNamedPorts", // "parameterOrder": [ // "project", // "region", - // "backendService" + // "instanceGroup" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to delete.", + // "instanceGroup": { + // "description": "The name of the regional instance group where the named ports are updated.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -88311,7 +104605,6 @@ func (c *RegionBackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope // "region": { // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -88321,7 +104614,10 @@ func (c *RegionBackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope // "type": "string" // } // }, - // "path": "{project}/regions/{region}/backendServices/{backendService}", + // "path": "{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts", + // "request": { + // "$ref": "RegionInstanceGroupsSetNamedPortsRequest" + // }, // "response": { // "$ref": "Operation" // }, @@ -88333,98 +104629,92 @@ func (c *RegionBackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.regionBackendServices.get": +// method id "compute.regionInstanceGroups.testIamPermissions": -type RegionBackendServicesGetCall struct { - s *Service - project string - region string - backendService string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupsTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified regional BackendService resource. -func (r *RegionBackendServicesService) Get(project string, region string, backendService string) *RegionBackendServicesGetCall { - c := &RegionBackendServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *RegionInstanceGroupsService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionInstanceGroupsTestIamPermissionsCall { + c := &RegionInstanceGroupsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.backendService = backendService + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionBackendServicesGetCall) Fields(s ...googleapi.Field) *RegionBackendServicesGetCall { +func (c *RegionInstanceGroupsTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionBackendServicesGetCall) IfNoneMatch(entityTag string) *RegionBackendServicesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionBackendServicesGetCall) Context(ctx context.Context) *RegionBackendServicesGetCall { +func (c *RegionInstanceGroupsTestIamPermissionsCall) Context(ctx context.Context) *RegionInstanceGroupsTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionBackendServicesGetCall) Header() http.Header { +func (c *RegionInstanceGroupsTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionBackendServicesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "backendService": c.backendService, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionBackendServices.get" call. -// Exactly one of *BackendService or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *BackendService.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.regionInstanceGroups.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionBackendServicesGetCall) Do(opts ...googleapi.CallOption) (*BackendService, error) { +func (c *RegionInstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -88443,7 +104733,7 @@ func (c *RegionBackendServicesGetCall) Do(opts ...googleapi.CallOption) (*Backen if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &BackendService{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -88455,22 +104745,15 @@ func (c *RegionBackendServicesGetCall) Do(opts ...googleapi.CallOption) (*Backen } return ret, nil // { - // "description": "Returns the specified regional BackendService resource.", - // "httpMethod": "GET", - // "id": "compute.regionBackendServices.get", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroups.testIamPermissions", // "parameterOrder": [ // "project", // "region", - // "backendService" + // "resource" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -88479,16 +104762,26 @@ func (c *RegionBackendServicesGetCall) Do(opts ...googleapi.CallOption) (*Backen // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "The name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/backendServices/{backendService}", + // "path": "{project}/regions/{region}/instanceGroups/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "BackendService" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -88499,34 +104792,32 @@ func (c *RegionBackendServicesGetCall) Do(opts ...googleapi.CallOption) (*Backen } -// method id "compute.regionBackendServices.getHealth": +// method id "compute.regionOperations.delete": -type RegionBackendServicesGetHealthCall struct { - s *Service - project string - region string - backendService string - resourcegroupreference *ResourceGroupReference - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionOperationsDeleteCall struct { + s *Service + project string + region string + operation string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetHealth: Gets the most recent health check results for this -// regional BackendService. -func (r *RegionBackendServicesService) GetHealth(project string, region string, backendService string, resourcegroupreference *ResourceGroupReference) *RegionBackendServicesGetHealthCall { - c := &RegionBackendServicesGetHealthCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified region-specific Operations resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/regionOperations/delete +func (r *RegionOperationsService) Delete(project string, region string, operation string) *RegionOperationsDeleteCall { + c := &RegionOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.backendService = backendService - c.resourcegroupreference = resourcegroupreference + c.operation = operation return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionBackendServicesGetHealthCall) Fields(s ...googleapi.Field) *RegionBackendServicesGetHealthCall { +func (c *RegionOperationsDeleteCall) Fields(s ...googleapi.Field) *RegionOperationsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -88534,238 +104825,190 @@ func (c *RegionBackendServicesGetHealthCall) Fields(s ...googleapi.Field) *Regio // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionBackendServicesGetHealthCall) Context(ctx context.Context) *RegionBackendServicesGetHealthCall { +func (c *RegionOperationsDeleteCall) Context(ctx context.Context) *RegionOperationsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionBackendServicesGetHealthCall) Header() http.Header { +func (c *RegionOperationsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionBackendServicesGetHealthCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.resourcegroupreference) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}/getHealth") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations/{operation}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "backendService": c.backendService, + "project": c.project, + "region": c.region, + "operation": c.operation, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionBackendServices.getHealth" call. -// Exactly one of *BackendServiceGroupHealth or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *BackendServiceGroupHealth.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionBackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (*BackendServiceGroupHealth, error) { +// Do executes the "compute.regionOperations.delete" call. +func (c *RegionOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } if err != nil { - return nil, err + return err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &BackendServiceGroupHealth{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err + return err } - return ret, nil + return nil // { - // "description": "Gets the most recent health check results for this regional BackendService.", - // "httpMethod": "POST", - // "id": "compute.regionBackendServices.getHealth", + // "description": "Deletes the specified region-specific Operations resource.", + // "httpMethod": "DELETE", + // "id": "compute.regionOperations.delete", // "parameterOrder": [ // "project", // "region", - // "backendService" + // "operation" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource for which to get health.", + // "operation": { + // "description": "Name of the Operations resource to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, // "project": { + // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/backendServices/{backendService}/getHealth", - // "request": { - // "$ref": "ResourceGroupReference" - // }, - // "response": { - // "$ref": "BackendServiceGroupHealth" - // }, + // "path": "{project}/regions/{region}/operations/{operation}", // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.regionBackendServices.insert": +// method id "compute.regionOperations.get": -type RegionBackendServicesInsertCall struct { - s *Service - project string - region string - backendservice *BackendService - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionOperationsGetCall struct { + s *Service + project string + region string + operation string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Insert: Creates a regional BackendService resource in the specified -// project using the data included in the request. There are several -// restrictions and guidelines to keep in mind when creating a regional -// backend service. Read Restrictions and Guidelines for more -// information. -func (r *RegionBackendServicesService) Insert(project string, region string, backendservice *BackendService) *RegionBackendServicesInsertCall { - c := &RegionBackendServicesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Retrieves the specified region-specific Operations resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/regionOperations/get +func (r *RegionOperationsService) Get(project string, region string, operation string) *RegionOperationsGetCall { + c := &RegionOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.backendservice = backendservice - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionBackendServicesInsertCall) RequestId(requestId string) *RegionBackendServicesInsertCall { - c.urlParams_.Set("requestId", requestId) + c.operation = operation return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionBackendServicesInsertCall) Fields(s ...googleapi.Field) *RegionBackendServicesInsertCall { +func (c *RegionOperationsGetCall) Fields(s ...googleapi.Field) *RegionOperationsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionOperationsGetCall) IfNoneMatch(entityTag string) *RegionOperationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionBackendServicesInsertCall) Context(ctx context.Context) *RegionBackendServicesInsertCall { +func (c *RegionOperationsGetCall) Context(ctx context.Context) *RegionOperationsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionBackendServicesInsertCall) Header() http.Header { +func (c *RegionOperationsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionBackendServicesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations/{operation}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "region": c.region, + "operation": c.operation, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionBackendServices.insert" call. +// Do executes the "compute.regionOperations.get" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionBackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -88796,14 +105039,22 @@ func (c *RegionBackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Creates a regional BackendService resource in the specified project using the data included in the request. There are several restrictions and guidelines to keep in mind when creating a regional backend service. Read Restrictions and Guidelines for more information.", - // "httpMethod": "POST", - // "id": "compute.regionBackendServices.insert", + // "description": "Retrieves the specified region-specific Operations resource.", + // "httpMethod": "GET", + // "id": "compute.regionOperations.get", // "parameterOrder": [ // "project", - // "region" + // "region", + // "operation" // ], // "parameters": { + // "operation": { + // "description": "Name of the Operations resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -88812,36 +105063,29 @@ func (c *RegionBackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Ope // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/backendServices", - // "request": { - // "$ref": "BackendService" - // }, + // "path": "{project}/regions/{region}/operations/{operation}", // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionBackendServices.list": +// method id "compute.regionOperations.list": -type RegionBackendServicesListCall struct { +type RegionOperationsListCall struct { s *Service project string region string @@ -88851,10 +105095,11 @@ type RegionBackendServicesListCall struct { header_ http.Header } -// List: Retrieves the list of regional BackendService resources -// available to the specified project in the given region. -func (r *RegionBackendServicesService) List(project string, region string) *RegionBackendServicesListCall { - c := &RegionBackendServicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of Operation resources contained within the +// specified region. +// For details, see https://cloud.google.com/compute/docs/reference/latest/regionOperations/list +func (r *RegionOperationsService) List(project string, region string) *RegionOperationsListCall { + c := &RegionOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region return c @@ -88882,7 +105127,7 @@ func (r *RegionBackendServicesService) List(project string, region string) *Regi // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *RegionBackendServicesListCall) Filter(filter string) *RegionBackendServicesListCall { +func (c *RegionOperationsListCall) Filter(filter string) *RegionOperationsListCall { c.urlParams_.Set("filter", filter) return c } @@ -88893,7 +105138,7 @@ func (c *RegionBackendServicesListCall) Filter(filter string) *RegionBackendServ // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *RegionBackendServicesListCall) MaxResults(maxResults int64) *RegionBackendServicesListCall { +func (c *RegionOperationsListCall) MaxResults(maxResults int64) *RegionOperationsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -88910,7 +105155,7 @@ func (c *RegionBackendServicesListCall) MaxResults(maxResults int64) *RegionBack // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *RegionBackendServicesListCall) OrderBy(orderBy string) *RegionBackendServicesListCall { +func (c *RegionOperationsListCall) OrderBy(orderBy string) *RegionOperationsListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -88918,7 +105163,7 @@ func (c *RegionBackendServicesListCall) OrderBy(orderBy string) *RegionBackendSe // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *RegionBackendServicesListCall) PageToken(pageToken string) *RegionBackendServicesListCall { +func (c *RegionOperationsListCall) PageToken(pageToken string) *RegionOperationsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -88926,7 +105171,7 @@ func (c *RegionBackendServicesListCall) PageToken(pageToken string) *RegionBacke // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionBackendServicesListCall) Fields(s ...googleapi.Field) *RegionBackendServicesListCall { +func (c *RegionOperationsListCall) Fields(s ...googleapi.Field) *RegionOperationsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -88936,7 +105181,7 @@ func (c *RegionBackendServicesListCall) Fields(s ...googleapi.Field) *RegionBack // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionBackendServicesListCall) IfNoneMatch(entityTag string) *RegionBackendServicesListCall { +func (c *RegionOperationsListCall) IfNoneMatch(entityTag string) *RegionOperationsListCall { c.ifNoneMatch_ = entityTag return c } @@ -88944,21 +105189,21 @@ func (c *RegionBackendServicesListCall) IfNoneMatch(entityTag string) *RegionBac // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionBackendServicesListCall) Context(ctx context.Context) *RegionBackendServicesListCall { +func (c *RegionOperationsListCall) Context(ctx context.Context) *RegionOperationsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionBackendServicesListCall) Header() http.Header { +func (c *RegionOperationsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionBackendServicesListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -88970,7 +105215,7 @@ func (c *RegionBackendServicesListCall) doRequest(alt string) (*http.Response, e var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -88984,14 +105229,14 @@ func (c *RegionBackendServicesListCall) doRequest(alt string) (*http.Response, e return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionBackendServices.list" call. -// Exactly one of *BackendServiceList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *BackendServiceList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.regionOperations.list" call. +// Exactly one of *OperationList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *OperationList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionBackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServiceList, error) { +func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -89010,7 +105255,7 @@ func (c *RegionBackendServicesListCall) Do(opts ...googleapi.CallOption) (*Backe if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &BackendServiceList{ + ret := &OperationList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -89022,9 +105267,9 @@ func (c *RegionBackendServicesListCall) Do(opts ...googleapi.CallOption) (*Backe } return ret, nil // { - // "description": "Retrieves the list of regional BackendService resources available to the specified project in the given region.", + // "description": "Retrieves a list of Operation resources contained within the specified region.", // "httpMethod": "GET", - // "id": "compute.regionBackendServices.list", + // "id": "compute.regionOperations.list", // "parameterOrder": [ // "project", // "region" @@ -89061,16 +105306,16 @@ func (c *RegionBackendServicesListCall) Do(opts ...googleapi.CallOption) (*Backe // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/backendServices", + // "path": "{project}/regions/{region}/operations", // "response": { - // "$ref": "BackendServiceList" + // "$ref": "OperationList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -89084,7 +105329,7 @@ func (c *RegionBackendServicesListCall) Do(opts ...googleapi.CallOption) (*Backe // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *RegionBackendServicesListCall) Pages(ctx context.Context, f func(*BackendServiceList) error) error { +func (c *RegionOperationsListCall) Pages(ctx context.Context, f func(*OperationList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -89102,31 +105347,24 @@ func (c *RegionBackendServicesListCall) Pages(ctx context.Context, f func(*Backe } } -// method id "compute.regionBackendServices.patch": +// method id "compute.regionSslCertificates.delete": -type RegionBackendServicesPatchCall struct { +type RegionSslCertificatesDeleteCall struct { s *Service project string region string - backendService string - backendservice *BackendService + sslCertificate string urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Patch: Updates the specified regional BackendService resource with -// the data included in the request. There are several restrictions and -// guidelines to keep in mind when updating a backend service. Read -// Restrictions and Guidelines for more information. This method -// supports PATCH semantics and uses the JSON merge patch format and -// processing rules. -func (r *RegionBackendServicesService) Patch(project string, region string, backendService string, backendservice *BackendService) *RegionBackendServicesPatchCall { - c := &RegionBackendServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified SslCertificate resource in the region. +func (r *RegionSslCertificatesService) Delete(project string, region string, sslCertificate string) *RegionSslCertificatesDeleteCall { + c := &RegionSslCertificatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.backendService = backendService - c.backendservice = backendservice + c.sslCertificate = sslCertificate return c } @@ -89144,7 +105382,7 @@ func (r *RegionBackendServicesService) Patch(project string, region string, back // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionBackendServicesPatchCall) RequestId(requestId string) *RegionBackendServicesPatchCall { +func (c *RegionSslCertificatesDeleteCall) RequestId(requestId string) *RegionSslCertificatesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -89152,7 +105390,7 @@ func (c *RegionBackendServicesPatchCall) RequestId(requestId string) *RegionBack // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionBackendServicesPatchCall) Fields(s ...googleapi.Field) *RegionBackendServicesPatchCall { +func (c *RegionSslCertificatesDeleteCall) Fields(s ...googleapi.Field) *RegionSslCertificatesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -89160,37 +105398,32 @@ func (c *RegionBackendServicesPatchCall) Fields(s ...googleapi.Field) *RegionBac // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionBackendServicesPatchCall) Context(ctx context.Context) *RegionBackendServicesPatchCall { +func (c *RegionSslCertificatesDeleteCall) Context(ctx context.Context) *RegionSslCertificatesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionBackendServicesPatchCall) Header() http.Header { +func (c *RegionSslCertificatesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionBackendServicesPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionSslCertificatesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/sslCertificates/{sslCertificate}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } @@ -89198,19 +105431,19 @@ func (c *RegionBackendServicesPatchCall) doRequest(alt string) (*http.Response, googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, - "backendService": c.backendService, + "sslCertificate": c.sslCertificate, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionBackendServices.patch" call. +// Do executes the "compute.regionSslCertificates.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionBackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionSslCertificatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -89241,22 +105474,15 @@ func (c *RegionBackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Updates the specified regional BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.regionBackendServices.patch", + // "description": "Deletes the specified SslCertificate resource in the region.", + // "httpMethod": "DELETE", + // "id": "compute.regionSslCertificates.delete", // "parameterOrder": [ // "project", // "region", - // "backendService" + // "sslCertificate" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to patch.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -89275,12 +105501,16 @@ func (c *RegionBackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Oper // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "sslCertificate": { + // "description": "Name of the SslCertificate resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/backendServices/{backendService}", - // "request": { - // "$ref": "BackendService" - // }, + // "path": "{project}/regions/{region}/sslCertificates/{sslCertificate}", // "response": { // "$ref": "Operation" // }, @@ -89292,92 +105522,100 @@ func (c *RegionBackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Oper } -// method id "compute.regionBackendServices.testIamPermissions": +// method id "compute.regionSslCertificates.get": -type RegionBackendServicesTestIamPermissionsCall struct { - s *Service - project string - region string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionSslCertificatesGetCall struct { + s *Service + project string + region string + sslCertificate string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *RegionBackendServicesService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionBackendServicesTestIamPermissionsCall { - c := &RegionBackendServicesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified SslCertificate resource in the specified +// region. Get a list of available SSL certificates by making a list() +// request. +func (r *RegionSslCertificatesService) Get(project string, region string, sslCertificate string) *RegionSslCertificatesGetCall { + c := &RegionSslCertificatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.sslCertificate = sslCertificate return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionBackendServicesTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionBackendServicesTestIamPermissionsCall { +func (c *RegionSslCertificatesGetCall) Fields(s ...googleapi.Field) *RegionSslCertificatesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionSslCertificatesGetCall) IfNoneMatch(entityTag string) *RegionSslCertificatesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionBackendServicesTestIamPermissionsCall) Context(ctx context.Context) *RegionBackendServicesTestIamPermissionsCall { +func (c *RegionSslCertificatesGetCall) Context(ctx context.Context) *RegionSslCertificatesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionBackendServicesTestIamPermissionsCall) Header() http.Header { +func (c *RegionSslCertificatesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionBackendServicesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionSslCertificatesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/sslCertificates/{sslCertificate}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, + "region": c.region, + "sslCertificate": c.sslCertificate, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionBackendServices.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.regionSslCertificates.get" call. +// Exactly one of *SslCertificate or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *SslCertificate.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionBackendServicesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *RegionSslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCertificate, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -89396,7 +105634,7 @@ func (c *RegionBackendServicesTestIamPermissionsCall) Do(opts ...googleapi.CallO if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &SslCertificate{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -89408,13 +105646,13 @@ func (c *RegionBackendServicesTestIamPermissionsCall) Do(opts ...googleapi.CallO } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.regionBackendServices.testIamPermissions", + // "description": "Returns the specified SslCertificate resource in the specified region. Get a list of available SSL certificates by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.regionSslCertificates.get", // "parameterOrder": [ // "project", // "region", - // "resource" + // "sslCertificate" // ], // "parameters": { // "project": { @@ -89425,26 +105663,23 @@ func (c *RegionBackendServicesTestIamPermissionsCall) Do(opts ...googleapi.CallO // "type": "string" // }, // "region": { - // "description": "The name of the region for this request.", + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "sslCertificate": { + // "description": "Name of the SslCertificate resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/backendServices/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/regions/{region}/sslCertificates/{sslCertificate}", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "SslCertificate" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -89455,29 +105690,25 @@ func (c *RegionBackendServicesTestIamPermissionsCall) Do(opts ...googleapi.CallO } -// method id "compute.regionBackendServices.update": +// method id "compute.regionSslCertificates.insert": -type RegionBackendServicesUpdateCall struct { +type RegionSslCertificatesInsertCall struct { s *Service project string region string - backendService string - backendservice *BackendService + sslcertificate *SslCertificate urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Update: Updates the specified regional BackendService resource with -// the data included in the request. There are several restrictions and -// guidelines to keep in mind when updating a backend service. Read -// Restrictions and Guidelines for more information. -func (r *RegionBackendServicesService) Update(project string, region string, backendService string, backendservice *BackendService) *RegionBackendServicesUpdateCall { - c := &RegionBackendServicesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a SslCertificate resource in the specified project +// and region using the data included in the request +func (r *RegionSslCertificatesService) Insert(project string, region string, sslcertificate *SslCertificate) *RegionSslCertificatesInsertCall { + c := &RegionSslCertificatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.backendService = backendService - c.backendservice = backendservice + c.sslcertificate = sslcertificate return c } @@ -89495,7 +105726,7 @@ func (r *RegionBackendServicesService) Update(project string, region string, bac // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionBackendServicesUpdateCall) RequestId(requestId string) *RegionBackendServicesUpdateCall { +func (c *RegionSslCertificatesInsertCall) RequestId(requestId string) *RegionSslCertificatesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -89503,7 +105734,7 @@ func (c *RegionBackendServicesUpdateCall) RequestId(requestId string) *RegionBac // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionBackendServicesUpdateCall) Fields(s ...googleapi.Field) *RegionBackendServicesUpdateCall { +func (c *RegionSslCertificatesInsertCall) Fields(s ...googleapi.Field) *RegionSslCertificatesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -89511,57 +105742,56 @@ func (c *RegionBackendServicesUpdateCall) Fields(s ...googleapi.Field) *RegionBa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionBackendServicesUpdateCall) Context(ctx context.Context) *RegionBackendServicesUpdateCall { +func (c *RegionSslCertificatesInsertCall) Context(ctx context.Context) *RegionSslCertificatesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionBackendServicesUpdateCall) Header() http.Header { +func (c *RegionSslCertificatesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionBackendServicesUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionSslCertificatesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.sslcertificate) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/sslCertificates") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "backendService": c.backendService, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionBackendServices.update" call. +// Do executes the "compute.regionSslCertificates.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionBackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionSslCertificatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -89592,22 +105822,14 @@ func (c *RegionBackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Updates the specified regional BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information.", - // "httpMethod": "PUT", - // "id": "compute.regionBackendServices.update", + // "description": "Creates a SslCertificate resource in the specified project and region using the data included in the request", + // "httpMethod": "POST", + // "id": "compute.regionSslCertificates.insert", // "parameterOrder": [ // "project", - // "region", - // "backendService" + // "region" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to update.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -89628,9 +105850,9 @@ func (c *RegionBackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Ope // "type": "string" // } // }, - // "path": "{project}/regions/{region}/backendServices/{backendService}", + // "path": "{project}/regions/{region}/sslCertificates", // "request": { - // "$ref": "BackendService" + // "$ref": "SslCertificate" // }, // "response": { // "$ref": "Operation" @@ -89643,21 +105865,24 @@ func (c *RegionBackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.regionCommitments.aggregatedList": +// method id "compute.regionSslCertificates.list": -type RegionCommitmentsAggregatedListCall struct { +type RegionSslCertificatesListCall struct { s *Service project string + region string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// AggregatedList: Retrieves an aggregated list of commitments. -func (r *RegionCommitmentsService) AggregatedList(project string) *RegionCommitmentsAggregatedListCall { - c := &RegionCommitmentsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of SslCertificate resources available to the +// specified project in the specified region. +func (r *RegionSslCertificatesService) List(project string, region string) *RegionSslCertificatesListCall { + c := &RegionSslCertificatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.region = region return c } @@ -89683,7 +105908,7 @@ func (r *RegionCommitmentsService) AggregatedList(project string) *RegionCommitm // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *RegionCommitmentsAggregatedListCall) Filter(filter string) *RegionCommitmentsAggregatedListCall { +func (c *RegionSslCertificatesListCall) Filter(filter string) *RegionSslCertificatesListCall { c.urlParams_.Set("filter", filter) return c } @@ -89694,7 +105919,7 @@ func (c *RegionCommitmentsAggregatedListCall) Filter(filter string) *RegionCommi // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *RegionCommitmentsAggregatedListCall) MaxResults(maxResults int64) *RegionCommitmentsAggregatedListCall { +func (c *RegionSslCertificatesListCall) MaxResults(maxResults int64) *RegionSslCertificatesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -89711,7 +105936,7 @@ func (c *RegionCommitmentsAggregatedListCall) MaxResults(maxResults int64) *Regi // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *RegionCommitmentsAggregatedListCall) OrderBy(orderBy string) *RegionCommitmentsAggregatedListCall { +func (c *RegionSslCertificatesListCall) OrderBy(orderBy string) *RegionSslCertificatesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -89719,7 +105944,7 @@ func (c *RegionCommitmentsAggregatedListCall) OrderBy(orderBy string) *RegionCom // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *RegionCommitmentsAggregatedListCall) PageToken(pageToken string) *RegionCommitmentsAggregatedListCall { +func (c *RegionSslCertificatesListCall) PageToken(pageToken string) *RegionSslCertificatesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -89727,7 +105952,7 @@ func (c *RegionCommitmentsAggregatedListCall) PageToken(pageToken string) *Regio // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionCommitmentsAggregatedListCall) Fields(s ...googleapi.Field) *RegionCommitmentsAggregatedListCall { +func (c *RegionSslCertificatesListCall) Fields(s ...googleapi.Field) *RegionSslCertificatesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -89737,7 +105962,7 @@ func (c *RegionCommitmentsAggregatedListCall) Fields(s ...googleapi.Field) *Regi // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionCommitmentsAggregatedListCall) IfNoneMatch(entityTag string) *RegionCommitmentsAggregatedListCall { +func (c *RegionSslCertificatesListCall) IfNoneMatch(entityTag string) *RegionSslCertificatesListCall { c.ifNoneMatch_ = entityTag return c } @@ -89745,21 +105970,21 @@ func (c *RegionCommitmentsAggregatedListCall) IfNoneMatch(entityTag string) *Reg // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionCommitmentsAggregatedListCall) Context(ctx context.Context) *RegionCommitmentsAggregatedListCall { +func (c *RegionSslCertificatesListCall) Context(ctx context.Context) *RegionSslCertificatesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionCommitmentsAggregatedListCall) Header() http.Header { +func (c *RegionSslCertificatesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionCommitmentsAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionSslCertificatesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -89771,7 +105996,7 @@ func (c *RegionCommitmentsAggregatedListCall) doRequest(alt string) (*http.Respo var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/commitments") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/sslCertificates") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -89780,18 +106005,19 @@ func (c *RegionCommitmentsAggregatedListCall) doRequest(alt string) (*http.Respo req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionCommitments.aggregatedList" call. -// Exactly one of *CommitmentAggregatedList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *CommitmentAggregatedList.ServerResponse.Header or (if a response was +// Do executes the "compute.regionSslCertificates.list" call. +// Exactly one of *SslCertificateList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *SslCertificateList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionCommitmentsAggregatedListCall) Do(opts ...googleapi.CallOption) (*CommitmentAggregatedList, error) { +func (c *RegionSslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCertificateList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -89810,7 +106036,7 @@ func (c *RegionCommitmentsAggregatedListCall) Do(opts ...googleapi.CallOption) ( if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &CommitmentAggregatedList{ + ret := &SslCertificateList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -89822,11 +106048,12 @@ func (c *RegionCommitmentsAggregatedListCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Retrieves an aggregated list of commitments.", + // "description": "Retrieves the list of SslCertificate resources available to the specified project in the specified region.", // "httpMethod": "GET", - // "id": "compute.regionCommitments.aggregatedList", + // "id": "compute.regionSslCertificates.list", // "parameterOrder": [ - // "project" + // "project", + // "region" // ], // "parameters": { // "filter": { @@ -89858,11 +106085,18 @@ func (c *RegionCommitmentsAggregatedListCall) Do(opts ...googleapi.CallOption) ( // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/aggregated/commitments", + // "path": "{project}/regions/{region}/sslCertificates", // "response": { - // "$ref": "CommitmentAggregatedList" + // "$ref": "SslCertificateList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -89876,7 +106110,7 @@ func (c *RegionCommitmentsAggregatedListCall) Do(opts ...googleapi.CallOption) ( // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *RegionCommitmentsAggregatedListCall) Pages(ctx context.Context, f func(*CommitmentAggregatedList) error) error { +func (c *RegionSslCertificatesListCall) Pages(ctx context.Context, f func(*SslCertificateList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -89894,192 +106128,24 @@ func (c *RegionCommitmentsAggregatedListCall) Pages(ctx context.Context, f func( } } -// method id "compute.regionCommitments.get": - -type RegionCommitmentsGetCall struct { - s *Service - project string - region string - commitment string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified commitment resource. Gets a list of -// available commitments by making a list() request. -func (r *RegionCommitmentsService) Get(project string, region string, commitment string) *RegionCommitmentsGetCall { - c := &RegionCommitmentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.commitment = commitment - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionCommitmentsGetCall) Fields(s ...googleapi.Field) *RegionCommitmentsGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionCommitmentsGetCall) IfNoneMatch(entityTag string) *RegionCommitmentsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionCommitmentsGetCall) Context(ctx context.Context) *RegionCommitmentsGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionCommitmentsGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionCommitmentsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/commitments/{commitment}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "commitment": c.commitment, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionCommitments.get" call. -// Exactly one of *Commitment or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Commitment.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionCommitmentsGetCall) Do(opts ...googleapi.CallOption) (*Commitment, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Commitment{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified commitment resource. Gets a list of available commitments by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.regionCommitments.get", - // "parameterOrder": [ - // "project", - // "region", - // "commitment" - // ], - // "parameters": { - // "commitment": { - // "description": "Name of the commitment to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/commitments/{commitment}", - // "response": { - // "$ref": "Commitment" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.regionCommitments.insert": +// method id "compute.regionTargetHttpProxies.delete": -type RegionCommitmentsInsertCall struct { - s *Service - project string - region string - commitment *Commitment - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionTargetHttpProxiesDeleteCall struct { + s *Service + project string + region string + targetHttpProxy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a commitment in the specified project using the data -// included in the request. -func (r *RegionCommitmentsService) Insert(project string, region string, commitment *Commitment) *RegionCommitmentsInsertCall { - c := &RegionCommitmentsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified TargetHttpProxy resource. +func (r *RegionTargetHttpProxiesService) Delete(project string, region string, targetHttpProxy string) *RegionTargetHttpProxiesDeleteCall { + c := &RegionTargetHttpProxiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.commitment = commitment + c.targetHttpProxy = targetHttpProxy return c } @@ -90097,7 +106163,7 @@ func (r *RegionCommitmentsService) Insert(project string, region string, commitm // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionCommitmentsInsertCall) RequestId(requestId string) *RegionCommitmentsInsertCall { +func (c *RegionTargetHttpProxiesDeleteCall) RequestId(requestId string) *RegionTargetHttpProxiesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -90105,7 +106171,7 @@ func (c *RegionCommitmentsInsertCall) RequestId(requestId string) *RegionCommitm // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionCommitmentsInsertCall) Fields(s ...googleapi.Field) *RegionCommitmentsInsertCall { +func (c *RegionTargetHttpProxiesDeleteCall) Fields(s ...googleapi.Field) *RegionTargetHttpProxiesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -90113,56 +106179,52 @@ func (c *RegionCommitmentsInsertCall) Fields(s ...googleapi.Field) *RegionCommit // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionCommitmentsInsertCall) Context(ctx context.Context) *RegionCommitmentsInsertCall { +func (c *RegionTargetHttpProxiesDeleteCall) Context(ctx context.Context) *RegionTargetHttpProxiesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionCommitmentsInsertCall) Header() http.Header { +func (c *RegionTargetHttpProxiesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionCommitmentsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionTargetHttpProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.commitment) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/commitments") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "region": c.region, + "targetHttpProxy": c.targetHttpProxy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionCommitments.insert" call. +// Do executes the "compute.regionTargetHttpProxies.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionCommitmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionTargetHttpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -90193,12 +106255,13 @@ func (c *RegionCommitmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Creates a commitment in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.regionCommitments.insert", + // "description": "Deletes the specified TargetHttpProxy resource.", + // "httpMethod": "DELETE", + // "id": "compute.regionTargetHttpProxies.delete", // "parameterOrder": [ // "project", - // "region" + // "region", + // "targetHttpProxy" // ], // "parameters": { // "project": { @@ -90209,7 +106272,7 @@ func (c *RegionCommitmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operati // "type": "string" // }, // "region": { - // "description": "Name of the region for this request.", + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -90219,111 +106282,55 @@ func (c *RegionCommitmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operati // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "targetHttpProxy": { + // "description": "Name of the TargetHttpProxy resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/commitments", - // "request": { - // "$ref": "Commitment" - // }, + // "path": "{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}", // "response": { // "$ref": "Operation" // }, // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.regionCommitments.list": - -type RegionCommitmentsListCall struct { - s *Service - project string - region string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves a list of commitments contained within the specified -// region. -func (r *RegionCommitmentsService) List(project string, region string) *RegionCommitmentsListCall { - c := &RegionCommitmentsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *RegionCommitmentsListCall) Filter(filter string) *RegionCommitmentsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *RegionCommitmentsListCall) MaxResults(maxResults int64) *RegionCommitmentsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *RegionCommitmentsListCall) OrderBy(orderBy string) *RegionCommitmentsListCall { - c.urlParams_.Set("orderBy", orderBy) - return c +// method id "compute.regionTargetHttpProxies.get": + +type RegionTargetHttpProxiesGetCall struct { + s *Service + project string + region string + targetHttpProxy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *RegionCommitmentsListCall) PageToken(pageToken string) *RegionCommitmentsListCall { - c.urlParams_.Set("pageToken", pageToken) +// Get: Returns the specified TargetHttpProxy resource in the specified +// region. Gets a list of available target HTTP proxies by making a +// list() request. +func (r *RegionTargetHttpProxiesService) Get(project string, region string, targetHttpProxy string) *RegionTargetHttpProxiesGetCall { + c := &RegionTargetHttpProxiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.targetHttpProxy = targetHttpProxy return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionCommitmentsListCall) Fields(s ...googleapi.Field) *RegionCommitmentsListCall { +func (c *RegionTargetHttpProxiesGetCall) Fields(s ...googleapi.Field) *RegionTargetHttpProxiesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -90333,7 +106340,7 @@ func (c *RegionCommitmentsListCall) Fields(s ...googleapi.Field) *RegionCommitme // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionCommitmentsListCall) IfNoneMatch(entityTag string) *RegionCommitmentsListCall { +func (c *RegionTargetHttpProxiesGetCall) IfNoneMatch(entityTag string) *RegionTargetHttpProxiesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -90341,21 +106348,21 @@ func (c *RegionCommitmentsListCall) IfNoneMatch(entityTag string) *RegionCommitm // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionCommitmentsListCall) Context(ctx context.Context) *RegionCommitmentsListCall { +func (c *RegionTargetHttpProxiesGetCall) Context(ctx context.Context) *RegionTargetHttpProxiesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionCommitmentsListCall) Header() http.Header { +func (c *RegionTargetHttpProxiesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionCommitmentsListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionTargetHttpProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -90367,7 +106374,7 @@ func (c *RegionCommitmentsListCall) doRequest(alt string) (*http.Response, error var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/commitments") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -90375,20 +106382,21 @@ func (c *RegionCommitmentsListCall) doRequest(alt string) (*http.Response, error } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "region": c.region, + "targetHttpProxy": c.targetHttpProxy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionCommitments.list" call. -// Exactly one of *CommitmentList or error will be non-nil. Any non-2xx +// Do executes the "compute.regionTargetHttpProxies.get" call. +// Exactly one of *TargetHttpProxy or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *CommitmentList.ServerResponse.Header or (if a response was returned +// *TargetHttpProxy.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionCommitmentsListCall) Do(opts ...googleapi.CallOption) (*CommitmentList, error) { +func (c *RegionTargetHttpProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHttpProxy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -90407,7 +106415,7 @@ func (c *RegionCommitmentsListCall) Do(opts ...googleapi.CallOption) (*Commitmen if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &CommitmentList{ + ret := &TargetHttpProxy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -90419,37 +106427,15 @@ func (c *RegionCommitmentsListCall) Do(opts ...googleapi.CallOption) (*Commitmen } return ret, nil // { - // "description": "Retrieves a list of commitments contained within the specified region.", + // "description": "Returns the specified TargetHttpProxy resource in the specified region. Gets a list of available target HTTP proxies by making a list() request.", // "httpMethod": "GET", - // "id": "compute.regionCommitments.list", + // "id": "compute.regionTargetHttpProxies.get", // "parameterOrder": [ // "project", - // "region" + // "region", + // "targetHttpProxy" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -90458,16 +106444,23 @@ func (c *RegionCommitmentsListCall) Do(opts ...googleapi.CallOption) (*Commitmen // "type": "string" // }, // "region": { - // "description": "Name of the region for this request.", + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "targetHttpProxy": { + // "description": "Name of the TargetHttpProxy resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/commitments", + // "path": "{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}", // "response": { - // "$ref": "CommitmentList" + // "$ref": "TargetHttpProxy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -90478,120 +106471,108 @@ func (c *RegionCommitmentsListCall) Do(opts ...googleapi.CallOption) (*Commitmen } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RegionCommitmentsListCall) Pages(ctx context.Context, f func(*CommitmentList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} +// method id "compute.regionTargetHttpProxies.insert": -// method id "compute.regionDiskTypes.get": - -type RegionDiskTypesGetCall struct { - s *Service - project string - region string - diskType string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionTargetHttpProxiesInsertCall struct { + s *Service + project string + region string + targethttpproxy *TargetHttpProxy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified regional disk type. Gets a list of -// available disk types by making a list() request. -func (r *RegionDiskTypesService) Get(project string, region string, diskType string) *RegionDiskTypesGetCall { - c := &RegionDiskTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a TargetHttpProxy resource in the specified project +// and region using the data included in the request. +func (r *RegionTargetHttpProxiesService) Insert(project string, region string, targethttpproxy *TargetHttpProxy) *RegionTargetHttpProxiesInsertCall { + c := &RegionTargetHttpProxiesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.diskType = diskType + c.targethttpproxy = targethttpproxy + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionTargetHttpProxiesInsertCall) RequestId(requestId string) *RegionTargetHttpProxiesInsertCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionDiskTypesGetCall) Fields(s ...googleapi.Field) *RegionDiskTypesGetCall { +func (c *RegionTargetHttpProxiesInsertCall) Fields(s ...googleapi.Field) *RegionTargetHttpProxiesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionDiskTypesGetCall) IfNoneMatch(entityTag string) *RegionDiskTypesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionDiskTypesGetCall) Context(ctx context.Context) *RegionDiskTypesGetCall { +func (c *RegionTargetHttpProxiesInsertCall) Context(ctx context.Context) *RegionTargetHttpProxiesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionDiskTypesGetCall) Header() http.Header { +func (c *RegionTargetHttpProxiesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionDiskTypesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionTargetHttpProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targethttpproxy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/diskTypes/{diskType}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpProxies") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "diskType": c.diskType, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionDiskTypes.get" call. -// Exactly one of *DiskType or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *DiskType.ServerResponse.Header or (if a response was returned at +// Do executes the "compute.regionTargetHttpProxies.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionDiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { +func (c *RegionTargetHttpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -90610,7 +106591,7 @@ func (c *RegionDiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, er if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &DiskType{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -90622,22 +106603,14 @@ func (c *RegionDiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, er } return ret, nil // { - // "description": "Returns the specified regional disk type. Gets a list of available disk types by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.regionDiskTypes.get", + // "description": "Creates a TargetHttpProxy resource in the specified project and region using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.regionTargetHttpProxies.insert", // "parameterOrder": [ // "project", - // "region", - // "diskType" + // "region" // ], // "parameters": { - // "diskType": { - // "description": "Name of the disk type to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -90646,29 +106619,36 @@ func (c *RegionDiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, er // "type": "string" // }, // "region": { - // "description": "The name of the region for this request.", + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/diskTypes/{diskType}", + // "path": "{project}/regions/{region}/targetHttpProxies", + // "request": { + // "$ref": "TargetHttpProxy" + // }, // "response": { - // "$ref": "DiskType" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.regionDiskTypes.list": +// method id "compute.regionTargetHttpProxies.list": -type RegionDiskTypesListCall struct { +type RegionTargetHttpProxiesListCall struct { s *Service project string region string @@ -90678,10 +106658,10 @@ type RegionDiskTypesListCall struct { header_ http.Header } -// List: Retrieves a list of regional disk types available to the -// specified project. -func (r *RegionDiskTypesService) List(project string, region string) *RegionDiskTypesListCall { - c := &RegionDiskTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of TargetHttpProxy resources available to +// the specified project in the specified region. +func (r *RegionTargetHttpProxiesService) List(project string, region string) *RegionTargetHttpProxiesListCall { + c := &RegionTargetHttpProxiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region return c @@ -90709,7 +106689,7 @@ func (r *RegionDiskTypesService) List(project string, region string) *RegionDisk // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *RegionDiskTypesListCall) Filter(filter string) *RegionDiskTypesListCall { +func (c *RegionTargetHttpProxiesListCall) Filter(filter string) *RegionTargetHttpProxiesListCall { c.urlParams_.Set("filter", filter) return c } @@ -90720,7 +106700,7 @@ func (c *RegionDiskTypesListCall) Filter(filter string) *RegionDiskTypesListCall // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *RegionDiskTypesListCall) MaxResults(maxResults int64) *RegionDiskTypesListCall { +func (c *RegionTargetHttpProxiesListCall) MaxResults(maxResults int64) *RegionTargetHttpProxiesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -90737,7 +106717,7 @@ func (c *RegionDiskTypesListCall) MaxResults(maxResults int64) *RegionDiskTypesL // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *RegionDiskTypesListCall) OrderBy(orderBy string) *RegionDiskTypesListCall { +func (c *RegionTargetHttpProxiesListCall) OrderBy(orderBy string) *RegionTargetHttpProxiesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -90745,7 +106725,7 @@ func (c *RegionDiskTypesListCall) OrderBy(orderBy string) *RegionDiskTypesListCa // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *RegionDiskTypesListCall) PageToken(pageToken string) *RegionDiskTypesListCall { +func (c *RegionTargetHttpProxiesListCall) PageToken(pageToken string) *RegionTargetHttpProxiesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -90753,7 +106733,7 @@ func (c *RegionDiskTypesListCall) PageToken(pageToken string) *RegionDiskTypesLi // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionDiskTypesListCall) Fields(s ...googleapi.Field) *RegionDiskTypesListCall { +func (c *RegionTargetHttpProxiesListCall) Fields(s ...googleapi.Field) *RegionTargetHttpProxiesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -90763,7 +106743,7 @@ func (c *RegionDiskTypesListCall) Fields(s ...googleapi.Field) *RegionDiskTypesL // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionDiskTypesListCall) IfNoneMatch(entityTag string) *RegionDiskTypesListCall { +func (c *RegionTargetHttpProxiesListCall) IfNoneMatch(entityTag string) *RegionTargetHttpProxiesListCall { c.ifNoneMatch_ = entityTag return c } @@ -90771,21 +106751,21 @@ func (c *RegionDiskTypesListCall) IfNoneMatch(entityTag string) *RegionDiskTypes // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionDiskTypesListCall) Context(ctx context.Context) *RegionDiskTypesListCall { +func (c *RegionTargetHttpProxiesListCall) Context(ctx context.Context) *RegionTargetHttpProxiesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionDiskTypesListCall) Header() http.Header { +func (c *RegionTargetHttpProxiesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionDiskTypesListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionTargetHttpProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -90797,7 +106777,7 @@ func (c *RegionDiskTypesListCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/diskTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpProxies") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -90811,14 +106791,14 @@ func (c *RegionDiskTypesListCall) doRequest(alt string) (*http.Response, error) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionDiskTypes.list" call. -// Exactly one of *RegionDiskTypeList or error will be non-nil. Any +// Do executes the "compute.regionTargetHttpProxies.list" call. +// Exactly one of *TargetHttpProxyList or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *RegionDiskTypeList.ServerResponse.Header or (if a response was +// *TargetHttpProxyList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionDiskTypesListCall) Do(opts ...googleapi.CallOption) (*RegionDiskTypeList, error) { +func (c *RegionTargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHttpProxyList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -90837,7 +106817,7 @@ func (c *RegionDiskTypesListCall) Do(opts ...googleapi.CallOption) (*RegionDiskT if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &RegionDiskTypeList{ + ret := &TargetHttpProxyList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -90849,9 +106829,9 @@ func (c *RegionDiskTypesListCall) Do(opts ...googleapi.CallOption) (*RegionDiskT } return ret, nil // { - // "description": "Retrieves a list of regional disk types available to the specified project.", + // "description": "Retrieves the list of TargetHttpProxy resources available to the specified project in the specified region.", // "httpMethod": "GET", - // "id": "compute.regionDiskTypes.list", + // "id": "compute.regionTargetHttpProxies.list", // "parameterOrder": [ // "project", // "region" @@ -90888,16 +106868,16 @@ func (c *RegionDiskTypesListCall) Do(opts ...googleapi.CallOption) (*RegionDiskT // "type": "string" // }, // "region": { - // "description": "The name of the region for this request.", + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/diskTypes", + // "path": "{project}/regions/{region}/targetHttpProxies", // "response": { - // "$ref": "RegionDiskTypeList" + // "$ref": "TargetHttpProxyList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -90911,7 +106891,7 @@ func (c *RegionDiskTypesListCall) Do(opts ...googleapi.CallOption) (*RegionDiskT // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *RegionDiskTypesListCall) Pages(ctx context.Context, f func(*RegionDiskTypeList) error) error { +func (c *RegionTargetHttpProxiesListCall) Pages(ctx context.Context, f func(*TargetHttpProxyList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -90929,28 +106909,26 @@ func (c *RegionDiskTypesListCall) Pages(ctx context.Context, f func(*RegionDiskT } } -// method id "compute.regionDisks.addResourcePolicies": +// method id "compute.regionTargetHttpProxies.setUrlMap": -type RegionDisksAddResourcePoliciesCall struct { - s *Service - project string - region string - disk string - regiondisksaddresourcepoliciesrequest *RegionDisksAddResourcePoliciesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionTargetHttpProxiesSetUrlMapCall struct { + s *Service + project string + region string + targetHttpProxy string + urlmapreference *UrlMapReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AddResourcePolicies: Adds existing resource policies to a regional -// disk. You can only add one policy which will be applied to this disk -// for scheduling snapshot creation. -func (r *RegionDisksService) AddResourcePolicies(project string, region string, disk string, regiondisksaddresourcepoliciesrequest *RegionDisksAddResourcePoliciesRequest) *RegionDisksAddResourcePoliciesCall { - c := &RegionDisksAddResourcePoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetUrlMap: Changes the URL map for TargetHttpProxy. +func (r *RegionTargetHttpProxiesService) SetUrlMap(project string, region string, targetHttpProxy string, urlmapreference *UrlMapReference) *RegionTargetHttpProxiesSetUrlMapCall { + c := &RegionTargetHttpProxiesSetUrlMapCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.disk = disk - c.regiondisksaddresourcepoliciesrequest = regiondisksaddresourcepoliciesrequest + c.targetHttpProxy = targetHttpProxy + c.urlmapreference = urlmapreference return c } @@ -90968,7 +106946,7 @@ func (r *RegionDisksService) AddResourcePolicies(project string, region string, // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionDisksAddResourcePoliciesCall) RequestId(requestId string) *RegionDisksAddResourcePoliciesCall { +func (c *RegionTargetHttpProxiesSetUrlMapCall) RequestId(requestId string) *RegionTargetHttpProxiesSetUrlMapCall { c.urlParams_.Set("requestId", requestId) return c } @@ -90976,7 +106954,7 @@ func (c *RegionDisksAddResourcePoliciesCall) RequestId(requestId string) *Region // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionDisksAddResourcePoliciesCall) Fields(s ...googleapi.Field) *RegionDisksAddResourcePoliciesCall { +func (c *RegionTargetHttpProxiesSetUrlMapCall) Fields(s ...googleapi.Field) *RegionTargetHttpProxiesSetUrlMapCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -90984,35 +106962,35 @@ func (c *RegionDisksAddResourcePoliciesCall) Fields(s ...googleapi.Field) *Regio // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionDisksAddResourcePoliciesCall) Context(ctx context.Context) *RegionDisksAddResourcePoliciesCall { +func (c *RegionTargetHttpProxiesSetUrlMapCall) Context(ctx context.Context) *RegionTargetHttpProxiesSetUrlMapCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionDisksAddResourcePoliciesCall) Header() http.Header { +func (c *RegionTargetHttpProxiesSetUrlMapCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionDisksAddResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionTargetHttpProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regiondisksaddresourcepoliciesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmapreference) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{disk}/addResourcePolicies") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}/setUrlMap") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -91020,21 +106998,21 @@ func (c *RegionDisksAddResourcePoliciesCall) doRequest(alt string) (*http.Respon } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "disk": c.disk, + "project": c.project, + "region": c.region, + "targetHttpProxy": c.targetHttpProxy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionDisks.addResourcePolicies" call. +// Do executes the "compute.regionTargetHttpProxies.setUrlMap" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionDisksAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionTargetHttpProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -91065,22 +107043,15 @@ func (c *RegionDisksAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Adds existing resource policies to a regional disk. You can only add one policy which will be applied to this disk for scheduling snapshot creation.", + // "description": "Changes the URL map for TargetHttpProxy.", // "httpMethod": "POST", - // "id": "compute.regionDisks.addResourcePolicies", + // "id": "compute.regionTargetHttpProxies.setUrlMap", // "parameterOrder": [ // "project", // "region", - // "disk" + // "targetHttpProxy" // ], // "parameters": { - // "disk": { - // "description": "The disk name for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -91089,7 +107060,7 @@ func (c *RegionDisksAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (* // "type": "string" // }, // "region": { - // "description": "The name of the region for this request.", + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -91099,11 +107070,18 @@ func (c *RegionDisksAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (* // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "targetHttpProxy": { + // "description": "Name of the TargetHttpProxy to set a URL map for.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/disks/{disk}/addResourcePolicies", + // "path": "{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}/setUrlMap", // "request": { - // "$ref": "RegionDisksAddResourcePoliciesRequest" + // "$ref": "UrlMapReference" // }, // "response": { // "$ref": "Operation" @@ -91116,26 +107094,24 @@ func (c *RegionDisksAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (* } -// method id "compute.regionDisks.createSnapshot": +// method id "compute.regionTargetHttpsProxies.delete": -type RegionDisksCreateSnapshotCall struct { - s *Service - project string - region string - disk string - snapshot *Snapshot - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionTargetHttpsProxiesDeleteCall struct { + s *Service + project string + region string + targetHttpsProxy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// CreateSnapshot: Creates a snapshot of this regional disk. -func (r *RegionDisksService) CreateSnapshot(project string, region string, disk string, snapshot *Snapshot) *RegionDisksCreateSnapshotCall { - c := &RegionDisksCreateSnapshotCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified TargetHttpsProxy resource. +func (r *RegionTargetHttpsProxiesService) Delete(project string, region string, targetHttpsProxy string) *RegionTargetHttpsProxiesDeleteCall { + c := &RegionTargetHttpsProxiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.disk = disk - c.snapshot = snapshot + c.targetHttpsProxy = targetHttpsProxy return c } @@ -91153,7 +107129,7 @@ func (r *RegionDisksService) CreateSnapshot(project string, region string, disk // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionDisksCreateSnapshotCall) RequestId(requestId string) *RegionDisksCreateSnapshotCall { +func (c *RegionTargetHttpsProxiesDeleteCall) RequestId(requestId string) *RegionTargetHttpsProxiesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -91161,7 +107137,7 @@ func (c *RegionDisksCreateSnapshotCall) RequestId(requestId string) *RegionDisks // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionDisksCreateSnapshotCall) Fields(s ...googleapi.Field) *RegionDisksCreateSnapshotCall { +func (c *RegionTargetHttpsProxiesDeleteCall) Fields(s ...googleapi.Field) *RegionTargetHttpsProxiesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -91169,57 +107145,224 @@ func (c *RegionDisksCreateSnapshotCall) Fields(s ...googleapi.Field) *RegionDisk // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionDisksCreateSnapshotCall) Context(ctx context.Context) *RegionDisksCreateSnapshotCall { +func (c *RegionTargetHttpsProxiesDeleteCall) Context(ctx context.Context) *RegionTargetHttpsProxiesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionDisksCreateSnapshotCall) Header() http.Header { +func (c *RegionTargetHttpsProxiesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionDisksCreateSnapshotCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionTargetHttpsProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "targetHttpsProxy": c.targetHttpsProxy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionTargetHttpsProxies.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionTargetHttpsProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified TargetHttpsProxy resource.", + // "httpMethod": "DELETE", + // "id": "compute.regionTargetHttpsProxies.delete", + // "parameterOrder": [ + // "project", + // "region", + // "targetHttpsProxy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "targetHttpsProxy": { + // "description": "Name of the TargetHttpsProxy resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionTargetHttpsProxies.get": + +type RegionTargetHttpsProxiesGetCall struct { + s *Service + project string + region string + targetHttpsProxy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified TargetHttpsProxy resource in the specified +// region. Gets a list of available target HTTP proxies by making a +// list() request. +func (r *RegionTargetHttpsProxiesService) Get(project string, region string, targetHttpsProxy string) *RegionTargetHttpsProxiesGetCall { + c := &RegionTargetHttpsProxiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.targetHttpsProxy = targetHttpsProxy + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionTargetHttpsProxiesGetCall) Fields(s ...googleapi.Field) *RegionTargetHttpsProxiesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionTargetHttpsProxiesGetCall) IfNoneMatch(entityTag string) *RegionTargetHttpsProxiesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionTargetHttpsProxiesGetCall) Context(ctx context.Context) *RegionTargetHttpsProxiesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionTargetHttpsProxiesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionTargetHttpsProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.snapshot) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{disk}/createSnapshot") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "disk": c.disk, + "project": c.project, + "region": c.region, + "targetHttpsProxy": c.targetHttpsProxy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionDisks.createSnapshot" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionDisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regionTargetHttpsProxies.get" call. +// Exactly one of *TargetHttpsProxy or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TargetHttpsProxy.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionTargetHttpsProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHttpsProxy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -91238,7 +107381,7 @@ func (c *RegionDisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Opera if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TargetHttpsProxy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -91250,22 +107393,15 @@ func (c *RegionDisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Opera } return ret, nil // { - // "description": "Creates a snapshot of this regional disk.", - // "httpMethod": "POST", - // "id": "compute.regionDisks.createSnapshot", + // "description": "Returns the specified TargetHttpsProxy resource in the specified region. Gets a list of available target HTTP proxies by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.regionTargetHttpsProxies.get", // "parameterOrder": [ // "project", // "region", - // "disk" + // "targetHttpsProxy" // ], // "parameters": { - // "disk": { - // "description": "Name of the regional persistent disk to snapshot.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -91274,54 +107410,52 @@ func (c *RegionDisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Opera // "type": "string" // }, // "region": { - // "description": "Name of the region for this request.", + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "targetHttpsProxy": { + // "description": "Name of the TargetHttpsProxy resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/disks/{disk}/createSnapshot", - // "request": { - // "$ref": "Snapshot" - // }, + // "path": "{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", // "response": { - // "$ref": "Operation" + // "$ref": "TargetHttpsProxy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionDisks.delete": +// method id "compute.regionTargetHttpsProxies.insert": -type RegionDisksDeleteCall struct { - s *Service - project string - region string - disk string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionTargetHttpsProxiesInsertCall struct { + s *Service + project string + region string + targethttpsproxy *TargetHttpsProxy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified regional persistent disk. Deleting a -// regional disk removes all the replicas of its data permanently and is -// irreversible. However, deleting a disk does not delete any snapshots -// previously made from the disk. You must separately delete snapshots. -func (r *RegionDisksService) Delete(project string, region string, disk string) *RegionDisksDeleteCall { - c := &RegionDisksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a TargetHttpsProxy resource in the specified project +// and region using the data included in the request. +func (r *RegionTargetHttpsProxiesService) Insert(project string, region string, targethttpsproxy *TargetHttpsProxy) *RegionTargetHttpsProxiesInsertCall { + c := &RegionTargetHttpsProxiesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.disk = disk + c.targethttpsproxy = targethttpsproxy return c } @@ -91339,7 +107473,7 @@ func (r *RegionDisksService) Delete(project string, region string, disk string) // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionDisksDeleteCall) RequestId(requestId string) *RegionDisksDeleteCall { +func (c *RegionTargetHttpsProxiesInsertCall) RequestId(requestId string) *RegionTargetHttpsProxiesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -91347,7 +107481,7 @@ func (c *RegionDisksDeleteCall) RequestId(requestId string) *RegionDisksDeleteCa // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionDisksDeleteCall) Fields(s ...googleapi.Field) *RegionDisksDeleteCall { +func (c *RegionTargetHttpsProxiesInsertCall) Fields(s ...googleapi.Field) *RegionTargetHttpsProxiesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -91355,32 +107489,37 @@ func (c *RegionDisksDeleteCall) Fields(s ...googleapi.Field) *RegionDisksDeleteC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionDisksDeleteCall) Context(ctx context.Context) *RegionDisksDeleteCall { +func (c *RegionTargetHttpsProxiesInsertCall) Context(ctx context.Context) *RegionTargetHttpsProxiesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionDisksDeleteCall) Header() http.Header { +func (c *RegionTargetHttpsProxiesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionDisksDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionTargetHttpsProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targethttpsproxy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{disk}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpsProxies") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -91388,19 +107527,18 @@ func (c *RegionDisksDeleteCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, - "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionDisks.delete" call. +// Do executes the "compute.regionTargetHttpsProxies.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionDisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionTargetHttpsProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -91431,21 +107569,14 @@ func (c *RegionDisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Deletes the specified regional persistent disk. Deleting a regional disk removes all the replicas of its data permanently and is irreversible. However, deleting a disk does not delete any snapshots previously made from the disk. You must separately delete snapshots.", - // "httpMethod": "DELETE", - // "id": "compute.regionDisks.delete", + // "description": "Creates a TargetHttpsProxy resource in the specified project and region using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.regionTargetHttpsProxies.insert", // "parameterOrder": [ // "project", - // "region", - // "disk" + // "region" // ], // "parameters": { - // "disk": { - // "description": "Name of the regional persistent disk to delete.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -91454,7 +107585,7 @@ func (c *RegionDisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er // "type": "string" // }, // "region": { - // "description": "Name of the region for this request.", + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -91466,7 +107597,10 @@ func (c *RegionDisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er // "type": "string" // } // }, - // "path": "{project}/regions/{region}/disks/{disk}", + // "path": "{project}/regions/{region}/targetHttpsProxies", + // "request": { + // "$ref": "TargetHttpsProxy" + // }, // "response": { // "$ref": "Operation" // }, @@ -91478,32 +107612,94 @@ func (c *RegionDisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er } -// method id "compute.regionDisks.get": +// method id "compute.regionTargetHttpsProxies.list": -type RegionDisksGetCall struct { +type RegionTargetHttpsProxiesListCall struct { s *Service project string region string - disk string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Returns a specified regional persistent disk. -func (r *RegionDisksService) Get(project string, region string, disk string) *RegionDisksGetCall { - c := &RegionDisksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of TargetHttpsProxy resources available to +// the specified project in the specified region. +func (r *RegionTargetHttpsProxiesService) List(project string, region string) *RegionTargetHttpsProxiesListCall { + c := &RegionTargetHttpsProxiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.disk = disk + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *RegionTargetHttpsProxiesListCall) Filter(filter string) *RegionTargetHttpsProxiesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *RegionTargetHttpsProxiesListCall) MaxResults(maxResults int64) *RegionTargetHttpsProxiesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *RegionTargetHttpsProxiesListCall) OrderBy(orderBy string) *RegionTargetHttpsProxiesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *RegionTargetHttpsProxiesListCall) PageToken(pageToken string) *RegionTargetHttpsProxiesListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionDisksGetCall) Fields(s ...googleapi.Field) *RegionDisksGetCall { +func (c *RegionTargetHttpsProxiesListCall) Fields(s ...googleapi.Field) *RegionTargetHttpsProxiesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -91513,7 +107709,7 @@ func (c *RegionDisksGetCall) Fields(s ...googleapi.Field) *RegionDisksGetCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionDisksGetCall) IfNoneMatch(entityTag string) *RegionDisksGetCall { +func (c *RegionTargetHttpsProxiesListCall) IfNoneMatch(entityTag string) *RegionTargetHttpsProxiesListCall { c.ifNoneMatch_ = entityTag return c } @@ -91521,21 +107717,21 @@ func (c *RegionDisksGetCall) IfNoneMatch(entityTag string) *RegionDisksGetCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionDisksGetCall) Context(ctx context.Context) *RegionDisksGetCall { +func (c *RegionTargetHttpsProxiesListCall) Context(ctx context.Context) *RegionTargetHttpsProxiesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionDisksGetCall) Header() http.Header { +func (c *RegionTargetHttpsProxiesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionDisksGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionTargetHttpsProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -91547,7 +107743,7 @@ func (c *RegionDisksGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{disk}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpsProxies") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -91557,19 +107753,18 @@ func (c *RegionDisksGetCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, - "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionDisks.get" call. -// Exactly one of *Disk or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Disk.ServerResponse.Header or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *RegionDisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { +// Do executes the "compute.regionTargetHttpsProxies.list" call. +// Exactly one of *TargetHttpsProxyList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TargetHttpsProxyList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionTargetHttpsProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHttpsProxyList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -91588,7 +107783,7 @@ func (c *RegionDisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Disk{ + ret := &TargetHttpsProxyList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -91600,20 +107795,35 @@ func (c *RegionDisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { } return ret, nil // { - // "description": "Returns a specified regional persistent disk.", + // "description": "Retrieves the list of TargetHttpsProxy resources available to the specified project in the specified region.", // "httpMethod": "GET", - // "id": "compute.regionDisks.get", + // "id": "compute.regionTargetHttpsProxies.list", // "parameterOrder": [ // "project", - // "region", - // "disk" + // "region" // ], // "parameters": { - // "disk": { - // "description": "Name of the regional persistent disk to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -91624,16 +107834,16 @@ func (c *RegionDisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { // "type": "string" // }, // "region": { - // "description": "Name of the region for this request.", + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/disks/{disk}", + // "path": "{project}/regions/{region}/targetHttpsProxies", // "response": { - // "$ref": "Disk" + // "$ref": "TargetHttpsProxyList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -91644,25 +107854,47 @@ func (c *RegionDisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { } -// method id "compute.regionDisks.insert": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionTargetHttpsProxiesListCall) Pages(ctx context.Context, f func(*TargetHttpsProxyList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type RegionDisksInsertCall struct { - s *Service - project string - region string - disk *Disk - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.regionTargetHttpsProxies.setSslCertificates": + +type RegionTargetHttpsProxiesSetSslCertificatesCall struct { + s *Service + project string + region string + targetHttpsProxy string + regiontargethttpsproxiessetsslcertificatesrequest *RegionTargetHttpsProxiesSetSslCertificatesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a persistent regional disk in the specified project -// using the data included in the request. -func (r *RegionDisksService) Insert(project string, region string, disk *Disk) *RegionDisksInsertCall { - c := &RegionDisksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetSslCertificates: Replaces SslCertificates for TargetHttpsProxy. +func (r *RegionTargetHttpsProxiesService) SetSslCertificates(project string, region string, targetHttpsProxy string, regiontargethttpsproxiessetsslcertificatesrequest *RegionTargetHttpsProxiesSetSslCertificatesRequest) *RegionTargetHttpsProxiesSetSslCertificatesCall { + c := &RegionTargetHttpsProxiesSetSslCertificatesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.disk = disk + c.targetHttpsProxy = targetHttpsProxy + c.regiontargethttpsproxiessetsslcertificatesrequest = regiontargethttpsproxiessetsslcertificatesrequest return c } @@ -91680,22 +107912,15 @@ func (r *RegionDisksService) Insert(project string, region string, disk *Disk) * // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionDisksInsertCall) RequestId(requestId string) *RegionDisksInsertCall { +func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) RequestId(requestId string) *RegionTargetHttpsProxiesSetSslCertificatesCall { c.urlParams_.Set("requestId", requestId) return c } -// SourceImage sets the optional parameter "sourceImage": Source image -// to restore onto a disk. -func (c *RegionDisksInsertCall) SourceImage(sourceImage string) *RegionDisksInsertCall { - c.urlParams_.Set("sourceImage", sourceImage) - return c -} - // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionDisksInsertCall) Fields(s ...googleapi.Field) *RegionDisksInsertCall { +func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) Fields(s ...googleapi.Field) *RegionTargetHttpsProxiesSetSslCertificatesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -91703,35 +107928,35 @@ func (c *RegionDisksInsertCall) Fields(s ...googleapi.Field) *RegionDisksInsertC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionDisksInsertCall) Context(ctx context.Context) *RegionDisksInsertCall { +func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) Context(ctx context.Context) *RegionTargetHttpsProxiesSetSslCertificatesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionDisksInsertCall) Header() http.Header { +func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionDisksInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.disk) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regiontargethttpsproxiessetsslcertificatesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -91739,20 +107964,21 @@ func (c *RegionDisksInsertCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "region": c.region, + "targetHttpsProxy": c.targetHttpsProxy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionDisks.insert" call. +// Do executes the "compute.regionTargetHttpsProxies.setSslCertificates" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionDisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -91783,12 +108009,13 @@ func (c *RegionDisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Creates a persistent regional disk in the specified project using the data included in the request.", + // "description": "Replaces SslCertificates for TargetHttpsProxy.", // "httpMethod": "POST", - // "id": "compute.regionDisks.insert", + // "id": "compute.regionTargetHttpsProxies.setSslCertificates", // "parameterOrder": [ // "project", - // "region" + // "region", + // "targetHttpsProxy" // ], // "parameters": { // "project": { @@ -91799,7 +108026,7 @@ func (c *RegionDisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er // "type": "string" // }, // "region": { - // "description": "Name of the region for this request.", + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -91810,15 +108037,17 @@ func (c *RegionDisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er // "location": "query", // "type": "string" // }, - // "sourceImage": { - // "description": "Optional. Source image to restore onto a disk.", - // "location": "query", + // "targetHttpsProxy": { + // "description": "Name of the TargetHttpsProxy resource to set an SslCertificates resource for.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/disks", + // "path": "{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates", // "request": { - // "$ref": "Disk" + // "$ref": "RegionTargetHttpsProxiesSetSslCertificatesRequest" // }, // "response": { // "$ref": "Operation" @@ -91831,159 +108060,110 @@ func (c *RegionDisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er } -// method id "compute.regionDisks.list": +// method id "compute.regionTargetHttpsProxies.setUrlMap": -type RegionDisksListCall struct { - s *Service - project string - region string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionTargetHttpsProxiesSetUrlMapCall struct { + s *Service + project string + region string + targetHttpsProxy string + urlmapreference *UrlMapReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves the list of persistent disks contained within the -// specified region. -func (r *RegionDisksService) List(project string, region string) *RegionDisksListCall { - c := &RegionDisksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetUrlMap: Changes the URL map for TargetHttpsProxy. +func (r *RegionTargetHttpsProxiesService) SetUrlMap(project string, region string, targetHttpsProxy string, urlmapreference *UrlMapReference) *RegionTargetHttpsProxiesSetUrlMapCall { + c := &RegionTargetHttpsProxiesSetUrlMapCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region + c.targetHttpsProxy = targetHttpsProxy + c.urlmapreference = urlmapreference return c } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *RegionDisksListCall) Filter(filter string) *RegionDisksListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *RegionDisksListCall) MaxResults(maxResults int64) *RegionDisksListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *RegionDisksListCall) OrderBy(orderBy string) *RegionDisksListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *RegionDisksListCall) PageToken(pageToken string) *RegionDisksListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionTargetHttpsProxiesSetUrlMapCall) RequestId(requestId string) *RegionTargetHttpsProxiesSetUrlMapCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionDisksListCall) Fields(s ...googleapi.Field) *RegionDisksListCall { +func (c *RegionTargetHttpsProxiesSetUrlMapCall) Fields(s ...googleapi.Field) *RegionTargetHttpsProxiesSetUrlMapCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionDisksListCall) IfNoneMatch(entityTag string) *RegionDisksListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionDisksListCall) Context(ctx context.Context) *RegionDisksListCall { +func (c *RegionTargetHttpsProxiesSetUrlMapCall) Context(ctx context.Context) *RegionTargetHttpsProxiesSetUrlMapCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionDisksListCall) Header() http.Header { +func (c *RegionTargetHttpsProxiesSetUrlMapCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionDisksListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionTargetHttpsProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmapreference) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "region": c.region, + "targetHttpsProxy": c.targetHttpsProxy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionDisks.list" call. -// Exactly one of *DiskList or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *DiskList.ServerResponse.Header or (if a response was returned at +// Do executes the "compute.regionTargetHttpsProxies.setUrlMap" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionDisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { +func (c *RegionTargetHttpsProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -92002,7 +108182,7 @@ func (c *RegionDisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &DiskList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -92014,37 +108194,15 @@ func (c *RegionDisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error } return ret, nil // { - // "description": "Retrieves the list of persistent disks contained within the specified region.", - // "httpMethod": "GET", - // "id": "compute.regionDisks.list", + // "description": "Changes the URL map for TargetHttpsProxy.", + // "httpMethod": "POST", + // "id": "compute.regionTargetHttpsProxies.setUrlMap", // "parameterOrder": [ // "project", - // "region" + // "region", + // "targetHttpsProxy" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -92053,86 +108211,64 @@ func (c *RegionDisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error // "type": "string" // }, // "region": { - // "description": "Name of the region for this request.", + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "targetHttpsProxy": { + // "description": "Name of the TargetHttpsProxy to set a URL map for.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/disks", + // "path": "{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap", + // "request": { + // "$ref": "UrlMapReference" + // }, // "response": { - // "$ref": "DiskList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RegionDisksListCall) Pages(ctx context.Context, f func(*DiskList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.regionDisks.removeResourcePolicies": +// method id "compute.regionUrlMaps.delete": -type RegionDisksRemoveResourcePoliciesCall struct { - s *Service - project string - region string - disk string - regiondisksremoveresourcepoliciesrequest *RegionDisksRemoveResourcePoliciesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionUrlMapsDeleteCall struct { + s *Service + project string + region string + urlMap string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// RemoveResourcePolicies: Removes resource policies from a regional -// disk. -func (r *RegionDisksService) RemoveResourcePolicies(project string, region string, disk string, regiondisksremoveresourcepoliciesrequest *RegionDisksRemoveResourcePoliciesRequest) *RegionDisksRemoveResourcePoliciesCall { - c := &RegionDisksRemoveResourcePoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified UrlMap resource. +func (r *RegionUrlMapsService) Delete(project string, region string, urlMap string) *RegionUrlMapsDeleteCall { + c := &RegionUrlMapsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.disk = disk - c.regiondisksremoveresourcepoliciesrequest = regiondisksremoveresourcepoliciesrequest + c.urlMap = urlMap return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionDisksRemoveResourcePoliciesCall) RequestId(requestId string) *RegionDisksRemoveResourcePoliciesCall { +// RequestId sets the optional parameter "requestId": begin_interface: +// MixerMutationRequestBuilder Request ID to support idempotency. +func (c *RegionUrlMapsDeleteCall) RequestId(requestId string) *RegionUrlMapsDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -92140,7 +108276,7 @@ func (c *RegionDisksRemoveResourcePoliciesCall) RequestId(requestId string) *Reg // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionDisksRemoveResourcePoliciesCall) Fields(s ...googleapi.Field) *RegionDisksRemoveResourcePoliciesCall { +func (c *RegionUrlMapsDeleteCall) Fields(s ...googleapi.Field) *RegionUrlMapsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -92148,37 +108284,32 @@ func (c *RegionDisksRemoveResourcePoliciesCall) Fields(s ...googleapi.Field) *Re // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionDisksRemoveResourcePoliciesCall) Context(ctx context.Context) *RegionDisksRemoveResourcePoliciesCall { +func (c *RegionUrlMapsDeleteCall) Context(ctx context.Context) *RegionUrlMapsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionDisksRemoveResourcePoliciesCall) Header() http.Header { +func (c *RegionUrlMapsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionDisksRemoveResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionUrlMapsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regiondisksremoveresourcepoliciesrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{disk}/removeResourcePolicies") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/urlMaps/{urlMap}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } @@ -92186,19 +108317,19 @@ func (c *RegionDisksRemoveResourcePoliciesCall) doRequest(alt string) (*http.Res googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, - "disk": c.disk, + "urlMap": c.urlMap, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionDisks.removeResourcePolicies" call. +// Do executes the "compute.regionUrlMaps.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionDisksRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionUrlMapsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -92229,22 +108360,15 @@ func (c *RegionDisksRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Removes resource policies from a regional disk.", - // "httpMethod": "POST", - // "id": "compute.regionDisks.removeResourcePolicies", + // "description": "Deletes the specified UrlMap resource.", + // "httpMethod": "DELETE", + // "id": "compute.regionUrlMaps.delete", // "parameterOrder": [ // "project", // "region", - // "disk" + // "urlMap" // ], // "parameters": { - // "disk": { - // "description": "The disk name for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -92253,22 +108377,26 @@ func (c *RegionDisksRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) // "type": "string" // }, // "region": { - // "description": "The name of the region for this request.", + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "description": "begin_interface: MixerMutationRequestBuilder Request ID to support idempotency.", // "location": "query", // "type": "string" + // }, + // "urlMap": { + // "description": "Name of the UrlMap resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/disks/{disk}/removeResourcePolicies", - // "request": { - // "$ref": "RegionDisksRemoveResourcePoliciesRequest" - // }, + // "path": "{project}/regions/{region}/urlMaps/{urlMap}", // "response": { // "$ref": "Operation" // }, @@ -92280,90 +108408,79 @@ func (c *RegionDisksRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) } -// method id "compute.regionDisks.resize": +// method id "compute.regionUrlMaps.get": -type RegionDisksResizeCall struct { - s *Service - project string - region string - disk string - regiondisksresizerequest *RegionDisksResizeRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionUrlMapsGetCall struct { + s *Service + project string + region string + urlMap string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Resize: Resizes the specified regional persistent disk. -func (r *RegionDisksService) Resize(project string, region string, disk string, regiondisksresizerequest *RegionDisksResizeRequest) *RegionDisksResizeCall { - c := &RegionDisksResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified UrlMap resource. Gets a list of available +// URL maps by making a list() request. +func (r *RegionUrlMapsService) Get(project string, region string, urlMap string) *RegionUrlMapsGetCall { + c := &RegionUrlMapsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.disk = disk - c.regiondisksresizerequest = regiondisksresizerequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionDisksResizeCall) RequestId(requestId string) *RegionDisksResizeCall { - c.urlParams_.Set("requestId", requestId) + c.urlMap = urlMap return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionDisksResizeCall) Fields(s ...googleapi.Field) *RegionDisksResizeCall { +func (c *RegionUrlMapsGetCall) Fields(s ...googleapi.Field) *RegionUrlMapsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionUrlMapsGetCall) IfNoneMatch(entityTag string) *RegionUrlMapsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionDisksResizeCall) Context(ctx context.Context) *RegionDisksResizeCall { +func (c *RegionUrlMapsGetCall) Context(ctx context.Context) *RegionUrlMapsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionDisksResizeCall) Header() http.Header { +func (c *RegionUrlMapsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionDisksResizeCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionUrlMapsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regiondisksresizerequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{disk}/resize") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/urlMaps/{urlMap}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } @@ -92371,19 +108488,19 @@ func (c *RegionDisksResizeCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, - "disk": c.disk, + "urlMap": c.urlMap, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionDisks.resize" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionDisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regionUrlMaps.get" call. +// Exactly one of *UrlMap or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *UrlMap.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *RegionUrlMapsGetCall) Do(opts ...googleapi.CallOption) (*UrlMap, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -92402,7 +108519,7 @@ func (c *RegionDisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, er if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &UrlMap{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -92414,95 +108531,75 @@ func (c *RegionDisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Resizes the specified regional persistent disk.", - // "httpMethod": "POST", - // "id": "compute.regionDisks.resize", + // "description": "Returns the specified UrlMap resource. Gets a list of available URL maps by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.regionUrlMaps.get", // "parameterOrder": [ // "project", // "region", - // "disk" + // "urlMap" // ], // "parameters": { - // "disk": { - // "description": "Name of the regional persistent disk.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { - // "description": "The project ID for this request.", + // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, // "region": { - // "description": "Name of the region for this request.", + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "urlMap": { + // "description": "Name of the UrlMap resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/disks/{disk}/resize", - // "request": { - // "$ref": "RegionDisksResizeRequest" - // }, + // "path": "{project}/regions/{region}/urlMaps/{urlMap}", // "response": { - // "$ref": "Operation" + // "$ref": "UrlMap" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionDisks.setLabels": +// method id "compute.regionUrlMaps.insert": -type RegionDisksSetLabelsCall struct { - s *Service - project string - region string - resource string - regionsetlabelsrequest *RegionSetLabelsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionUrlMapsInsertCall struct { + s *Service + project string + region string + urlmap *UrlMap + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetLabels: Sets the labels on the target regional disk. -func (r *RegionDisksService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *RegionDisksSetLabelsCall { - c := &RegionDisksSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a UrlMap resource in the specified project using the +// data included in the request. +func (r *RegionUrlMapsService) Insert(project string, region string, urlmap *UrlMap) *RegionUrlMapsInsertCall { + c := &RegionUrlMapsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.resource = resource - c.regionsetlabelsrequest = regionsetlabelsrequest + c.urlmap = urlmap return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionDisksSetLabelsCall) RequestId(requestId string) *RegionDisksSetLabelsCall { +// RequestId sets the optional parameter "requestId": begin_interface: +// MixerMutationRequestBuilder Request ID to support idempotency. +func (c *RegionUrlMapsInsertCall) RequestId(requestId string) *RegionUrlMapsInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -92510,7 +108607,7 @@ func (c *RegionDisksSetLabelsCall) RequestId(requestId string) *RegionDisksSetLa // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionDisksSetLabelsCall) Fields(s ...googleapi.Field) *RegionDisksSetLabelsCall { +func (c *RegionUrlMapsInsertCall) Fields(s ...googleapi.Field) *RegionUrlMapsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -92518,35 +108615,35 @@ func (c *RegionDisksSetLabelsCall) Fields(s ...googleapi.Field) *RegionDisksSetL // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionDisksSetLabelsCall) Context(ctx context.Context) *RegionDisksSetLabelsCall { +func (c *RegionUrlMapsInsertCall) Context(ctx context.Context) *RegionUrlMapsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionDisksSetLabelsCall) Header() http.Header { +func (c *RegionUrlMapsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionDisksSetLabelsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionUrlMapsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmap) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{resource}/setLabels") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/urlMaps") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -92554,21 +108651,20 @@ func (c *RegionDisksSetLabelsCall) doRequest(alt string) (*http.Response, error) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionDisks.setLabels" call. +// Do executes the "compute.regionUrlMaps.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionDisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionUrlMapsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -92599,13 +108695,12 @@ func (c *RegionDisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Sets the labels on the target regional disk.", + // "description": "Creates a UrlMap resource in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.regionDisks.setLabels", + // "id": "compute.regionUrlMaps.insert", // "parameterOrder": [ // "project", - // "region", - // "resource" + // "region" // ], // "parameters": { // "project": { @@ -92616,28 +108711,21 @@ func (c *RegionDisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // }, // "region": { - // "description": "The region for this request.", + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "description": "begin_interface: MixerMutationRequestBuilder Request ID to support idempotency.", // "location": "query", // "type": "string" - // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/disks/{resource}/setLabels", + // "path": "{project}/regions/{region}/urlMaps", // "request": { - // "$ref": "RegionSetLabelsRequest" + // "$ref": "UrlMap" // }, // "response": { // "$ref": "Operation" @@ -92650,34 +108738,41 @@ func (c *RegionDisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.regionDisks.testIamPermissions": +// method id "compute.regionUrlMaps.invalidateCache": -type RegionDisksTestIamPermissionsCall struct { - s *Service - project string - region string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionUrlMapsInvalidateCacheCall struct { + s *Service + project string + region string + urlMap string + cacheinvalidationrule *CacheInvalidationRule + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *RegionDisksService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionDisksTestIamPermissionsCall { - c := &RegionDisksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// InvalidateCache: Initiates a cache invalidation operation, +// invalidating the specified path, scoped to the specified UrlMap. +func (r *RegionUrlMapsService) InvalidateCache(project string, region string, urlMap string, cacheinvalidationrule *CacheInvalidationRule) *RegionUrlMapsInvalidateCacheCall { + c := &RegionUrlMapsInvalidateCacheCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.urlMap = urlMap + c.cacheinvalidationrule = cacheinvalidationrule + return c +} + +// RequestId sets the optional parameter "requestId": begin_interface: +// MixerMutationRequestBuilder Request ID to support idempotency. +func (c *RegionUrlMapsInvalidateCacheCall) RequestId(requestId string) *RegionUrlMapsInvalidateCacheCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionDisksTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionDisksTestIamPermissionsCall { +func (c *RegionUrlMapsInvalidateCacheCall) Fields(s ...googleapi.Field) *RegionUrlMapsInvalidateCacheCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -92685,35 +108780,35 @@ func (c *RegionDisksTestIamPermissionsCall) Fields(s ...googleapi.Field) *Region // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionDisksTestIamPermissionsCall) Context(ctx context.Context) *RegionDisksTestIamPermissionsCall { +func (c *RegionUrlMapsInvalidateCacheCall) Context(ctx context.Context) *RegionUrlMapsInvalidateCacheCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionDisksTestIamPermissionsCall) Header() http.Header { +func (c *RegionUrlMapsInvalidateCacheCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionDisksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionUrlMapsInvalidateCacheCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.cacheinvalidationrule) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/urlMaps/{urlMap}/invalidateCache") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -92721,21 +108816,21 @@ func (c *RegionDisksTestIamPermissionsCall) doRequest(alt string) (*http.Respons } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, + "region": c.region, + "urlMap": c.urlMap, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionDisks.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionDisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.regionUrlMaps.invalidateCache" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionUrlMapsInvalidateCacheCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -92754,7 +108849,7 @@ func (c *RegionDisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -92766,13 +108861,13 @@ func (c *RegionDisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", + // "description": "Initiates a cache invalidation operation, invalidating the specified path, scoped to the specified UrlMap.", // "httpMethod": "POST", - // "id": "compute.regionDisks.testIamPermissions", + // "id": "compute.regionUrlMaps.invalidateCache", // "parameterOrder": [ // "project", // "region", - // "resource" + // "urlMap" // ], // "parameters": { // "project": { @@ -92783,156 +108878,193 @@ func (c *RegionDisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T // "type": "string" // }, // "region": { - // "description": "The name of the region for this request.", + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "requestId": { + // "description": "begin_interface: MixerMutationRequestBuilder Request ID to support idempotency.", + // "location": "query", + // "type": "string" + // }, + // "urlMap": { + // "description": "Name of the UrlMap scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/disks/{resource}/testIamPermissions", + // "path": "{project}/regions/{region}/urlMaps/{urlMap}/invalidateCache", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "CacheInvalidationRule" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.regionInstanceGroupManagers.abandonInstances": +// method id "compute.regionUrlMaps.list": -type RegionInstanceGroupManagersAbandonInstancesCall struct { - s *Service - project string - region string - instanceGroupManager string - regioninstancegroupmanagersabandoninstancesrequest *RegionInstanceGroupManagersAbandonInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionUrlMapsListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// AbandonInstances: Flags the specified instances to be immediately -// removed from the managed instance group. Abandoning an instance does -// not delete the instance, but it does remove the instance from any -// target pools that are applied by the managed instance group. This -// method reduces the targetSize of the managed instance group by the -// number of instances that you abandon. This operation is marked as -// DONE when the action is scheduled even if the instances have not yet -// been removed from the group. You must separately verify the status of -// the abandoning action with the listmanagedinstances method. -// -// If the group is part of a backend service that has enabled connection -// draining, it can take up to 60 seconds after the connection draining -// duration has elapsed before the VM instance is removed or -// deleted. -// -// You can specify a maximum of 1000 instances with this method per -// request. -func (r *RegionInstanceGroupManagersService) AbandonInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersabandoninstancesrequest *RegionInstanceGroupManagersAbandonInstancesRequest) *RegionInstanceGroupManagersAbandonInstancesCall { - c := &RegionInstanceGroupManagersAbandonInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of UrlMap resources available to the +// specified project in the specified region. +func (r *RegionUrlMapsService) List(project string, region string) *RegionUrlMapsListCall { + c := &RegionUrlMapsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroupManager = instanceGroupManager - c.regioninstancegroupmanagersabandoninstancesrequest = regioninstancegroupmanagersabandoninstancesrequest return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersAbandonInstancesCall) RequestId(requestId string) *RegionInstanceGroupManagersAbandonInstancesCall { - c.urlParams_.Set("requestId", requestId) +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *RegionUrlMapsListCall) Filter(filter string) *RegionUrlMapsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *RegionUrlMapsListCall) MaxResults(maxResults int64) *RegionUrlMapsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *RegionUrlMapsListCall) OrderBy(orderBy string) *RegionUrlMapsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *RegionUrlMapsListCall) PageToken(pageToken string) *RegionUrlMapsListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersAbandonInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersAbandonInstancesCall { +func (c *RegionUrlMapsListCall) Fields(s ...googleapi.Field) *RegionUrlMapsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionUrlMapsListCall) IfNoneMatch(entityTag string) *RegionUrlMapsListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersAbandonInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersAbandonInstancesCall { +func (c *RegionUrlMapsListCall) Context(ctx context.Context) *RegionUrlMapsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersAbandonInstancesCall) Header() http.Header { +func (c *RegionUrlMapsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersAbandonInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionUrlMapsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersabandoninstancesrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/abandonInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/urlMaps") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.abandonInstances" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.regionUrlMaps.list" call. +// Exactly one of *UrlMapList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// *UrlMapList.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionUrlMapsListCall) Do(opts ...googleapi.CallOption) (*UrlMapList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -92951,7 +109083,7 @@ func (c *RegionInstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.C if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &UrlMapList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -92963,19 +109095,35 @@ func (c *RegionInstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.C } return ret, nil // { - // "description": "Flags the specified instances to be immediately removed from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.abandonInstances", + // "description": "Retrieves the list of UrlMap resources available to the specified project in the specified region.", + // "httpMethod": "GET", + // "id": "compute.regionUrlMaps.list", // "parameterOrder": [ // "project", - // "region", - // "instanceGroupManager" + // "region" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "Name of the managed instance group.", - // "location": "path", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -92988,67 +109136,73 @@ func (c *RegionInstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.C // "region": { // "description": "Name of the region scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/abandonInstances", - // "request": { - // "$ref": "RegionInstanceGroupManagersAbandonInstancesRequest" - // }, + // "path": "{project}/regions/{region}/urlMaps", // "response": { - // "$ref": "Operation" + // "$ref": "UrlMapList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionInstanceGroupManagers.delete": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionUrlMapsListCall) Pages(ctx context.Context, f func(*UrlMapList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type RegionInstanceGroupManagersDeleteCall struct { - s *Service - project string - region string - instanceGroupManager string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.regionUrlMaps.patch": + +type RegionUrlMapsPatchCall struct { + s *Service + project string + region string + urlMap string + urlmap *UrlMap + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified managed instance group and all of the -// instances in that group. -func (r *RegionInstanceGroupManagersService) Delete(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersDeleteCall { - c := &RegionInstanceGroupManagersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Patches the specified UrlMap resource with the data included +// in the request. This method supports PATCH semantics and uses JSON +// merge patch format and processing rules. +func (r *RegionUrlMapsService) Patch(project string, region string, urlMap string, urlmap *UrlMap) *RegionUrlMapsPatchCall { + c := &RegionUrlMapsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroupManager = instanceGroupManager + c.urlMap = urlMap + c.urlmap = urlmap return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersDeleteCall) RequestId(requestId string) *RegionInstanceGroupManagersDeleteCall { +// RequestId sets the optional parameter "requestId": begin_interface: +// MixerMutationRequestBuilder Request ID to support idempotency. +func (c *RegionUrlMapsPatchCall) RequestId(requestId string) *RegionUrlMapsPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -93056,7 +109210,7 @@ func (c *RegionInstanceGroupManagersDeleteCall) RequestId(requestId string) *Reg // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersDeleteCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersDeleteCall { +func (c *RegionUrlMapsPatchCall) Fields(s ...googleapi.Field) *RegionUrlMapsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -93064,52 +109218,57 @@ func (c *RegionInstanceGroupManagersDeleteCall) Fields(s ...googleapi.Field) *Re // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersDeleteCall) Context(ctx context.Context) *RegionInstanceGroupManagersDeleteCall { +func (c *RegionUrlMapsPatchCall) Context(ctx context.Context) *RegionUrlMapsPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersDeleteCall) Header() http.Header { +func (c *RegionUrlMapsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionUrlMapsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmap) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/urlMaps/{urlMap}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "region": c.region, + "urlMap": c.urlMap, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.delete" call. +// Do executes the "compute.regionUrlMaps.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionUrlMapsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -93140,21 +109299,15 @@ func (c *RegionInstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Deletes the specified managed instance group and all of the instances in that group.", - // "httpMethod": "DELETE", - // "id": "compute.regionInstanceGroupManagers.delete", + // "description": "Patches the specified UrlMap resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.regionUrlMaps.patch", // "parameterOrder": [ // "project", // "region", - // "instanceGroupManager" + // "urlMap" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "Name of the managed instance group to delete.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -93165,16 +109318,27 @@ func (c *RegionInstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) // "region": { // "description": "Name of the region scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "description": "begin_interface: MixerMutationRequestBuilder Request ID to support idempotency.", // "location": "query", // "type": "string" + // }, + // "urlMap": { + // "description": "Name of the UrlMap resource to patch.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", + // "path": "{project}/regions/{region}/urlMaps/{urlMap}", + // "request": { + // "$ref": "UrlMap" + // }, // "response": { // "$ref": "Operation" // }, @@ -93186,60 +109350,33 @@ func (c *RegionInstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) } -// method id "compute.regionInstanceGroupManagers.deleteInstances": +// method id "compute.regionUrlMaps.update": -type RegionInstanceGroupManagersDeleteInstancesCall struct { - s *Service - project string - region string - instanceGroupManager string - regioninstancegroupmanagersdeleteinstancesrequest *RegionInstanceGroupManagersDeleteInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionUrlMapsUpdateCall struct { + s *Service + project string + region string + urlMap string + urlmap *UrlMap + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// DeleteInstances: Flags the specified instances in the managed -// instance group to be immediately deleted. The instances are also -// removed from any target pools of which they were a member. This -// method reduces the targetSize of the managed instance group by the -// number of instances that you delete. The deleteInstances operation is -// marked DONE if the deleteInstances request is successful. The -// underlying actions take additional time. You must separately verify -// the status of the deleting action with the listmanagedinstances -// method. -// -// If the group is part of a backend service that has enabled connection -// draining, it can take up to 60 seconds after the connection draining -// duration has elapsed before the VM instance is removed or -// deleted. -// -// You can specify a maximum of 1000 instances with this method per -// request. -func (r *RegionInstanceGroupManagersService) DeleteInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersdeleteinstancesrequest *RegionInstanceGroupManagersDeleteInstancesRequest) *RegionInstanceGroupManagersDeleteInstancesCall { - c := &RegionInstanceGroupManagersDeleteInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates the specified UrlMap resource with the data included +// in the request. +func (r *RegionUrlMapsService) Update(project string, region string, urlMap string, urlmap *UrlMap) *RegionUrlMapsUpdateCall { + c := &RegionUrlMapsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroupManager = instanceGroupManager - c.regioninstancegroupmanagersdeleteinstancesrequest = regioninstancegroupmanagersdeleteinstancesrequest + c.urlMap = urlMap + c.urlmap = urlmap return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersDeleteInstancesCall) RequestId(requestId string) *RegionInstanceGroupManagersDeleteInstancesCall { +// RequestId sets the optional parameter "requestId": begin_interface: +// MixerMutationRequestBuilder Request ID to support idempotency. +func (c *RegionUrlMapsUpdateCall) RequestId(requestId string) *RegionUrlMapsUpdateCall { c.urlParams_.Set("requestId", requestId) return c } @@ -93247,7 +109384,7 @@ func (c *RegionInstanceGroupManagersDeleteInstancesCall) RequestId(requestId str // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersDeleteInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersDeleteInstancesCall { +func (c *RegionUrlMapsUpdateCall) Fields(s ...googleapi.Field) *RegionUrlMapsUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -93255,57 +109392,57 @@ func (c *RegionInstanceGroupManagersDeleteInstancesCall) Fields(s ...googleapi.F // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersDeleteInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersDeleteInstancesCall { +func (c *RegionUrlMapsUpdateCall) Context(ctx context.Context) *RegionUrlMapsUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersDeleteInstancesCall) Header() http.Header { +func (c *RegionUrlMapsUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersDeleteInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionUrlMapsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersdeleteinstancesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmap) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deleteInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/urlMaps/{urlMap}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "region": c.region, + "urlMap": c.urlMap, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.deleteInstances" call. +// Do executes the "compute.regionUrlMaps.update" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionUrlMapsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -93336,21 +109473,15 @@ func (c *RegionInstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.Ca } return ret, nil // { - // "description": "Flags the specified instances in the managed instance group to be immediately deleted. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. The deleteInstances operation is marked DONE if the deleteInstances request is successful. The underlying actions take additional time. You must separately verify the status of the deleting action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.deleteInstances", + // "description": "Updates the specified UrlMap resource with the data included in the request.", + // "httpMethod": "PUT", + // "id": "compute.regionUrlMaps.update", // "parameterOrder": [ // "project", // "region", - // "instanceGroupManager" + // "urlMap" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "Name of the managed instance group.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -93361,18 +109492,26 @@ func (c *RegionInstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.Ca // "region": { // "description": "Name of the region scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "description": "begin_interface: MixerMutationRequestBuilder Request ID to support idempotency.", // "location": "query", // "type": "string" + // }, + // "urlMap": { + // "description": "Name of the UrlMap resource to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deleteInstances", + // "path": "{project}/regions/{region}/urlMaps/{urlMap}", // "request": { - // "$ref": "RegionInstanceGroupManagersDeleteInstancesRequest" + // "$ref": "UrlMap" // }, // "response": { // "$ref": "Operation" @@ -93385,99 +109524,93 @@ func (c *RegionInstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.Ca } -// method id "compute.regionInstanceGroupManagers.get": +// method id "compute.regionUrlMaps.validate": -type RegionInstanceGroupManagersGetCall struct { - s *Service - project string - region string - instanceGroupManager string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionUrlMapsValidateCall struct { + s *Service + project string + region string + urlMap string + regionurlmapsvalidaterequest *RegionUrlMapsValidateRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns all of the details about the specified managed instance -// group. -func (r *RegionInstanceGroupManagersService) Get(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersGetCall { - c := &RegionInstanceGroupManagersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Validate: Runs static validation for the UrlMap. In particular, the +// tests of the provided UrlMap will be run. Calling this method does +// NOT create the UrlMap. +func (r *RegionUrlMapsService) Validate(project string, region string, urlMap string, regionurlmapsvalidaterequest *RegionUrlMapsValidateRequest) *RegionUrlMapsValidateCall { + c := &RegionUrlMapsValidateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroupManager = instanceGroupManager + c.urlMap = urlMap + c.regionurlmapsvalidaterequest = regionurlmapsvalidaterequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersGetCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersGetCall { +func (c *RegionUrlMapsValidateCall) Fields(s ...googleapi.Field) *RegionUrlMapsValidateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionInstanceGroupManagersGetCall) IfNoneMatch(entityTag string) *RegionInstanceGroupManagersGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersGetCall) Context(ctx context.Context) *RegionInstanceGroupManagersGetCall { +func (c *RegionUrlMapsValidateCall) Context(ctx context.Context) *RegionUrlMapsValidateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersGetCall) Header() http.Header { +func (c *RegionUrlMapsValidateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionUrlMapsValidateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionurlmapsvalidaterequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/urlMaps/{urlMap}/validate") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "region": c.region, + "urlMap": c.urlMap, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.get" call. -// Exactly one of *InstanceGroupManager or error will be non-nil. Any +// Do executes the "compute.regionUrlMaps.validate" call. +// Exactly one of *UrlMapsValidateResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *InstanceGroupManager.ServerResponse.Header or (if a response was +// *UrlMapsValidateResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManager, error) { +func (c *RegionUrlMapsValidateCall) Do(opts ...googleapi.CallOption) (*UrlMapsValidateResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -93496,7 +109629,7 @@ func (c *RegionInstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (* if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroupManager{ + ret := &UrlMapsValidateResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -93508,21 +109641,15 @@ func (c *RegionInstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Returns all of the details about the specified managed instance group.", - // "httpMethod": "GET", - // "id": "compute.regionInstanceGroupManagers.get", + // "description": "Runs static validation for the UrlMap. In particular, the tests of the provided UrlMap will be run. Calling this method does NOT create the UrlMap.", + // "httpMethod": "POST", + // "id": "compute.regionUrlMaps.validate", // "parameterOrder": [ // "project", // "region", - // "instanceGroupManager" + // "urlMap" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "Name of the managed instance group to return.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -93533,113 +109660,105 @@ func (c *RegionInstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (* // "region": { // "description": "Name of the region scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "urlMap": { + // "description": "Name of the UrlMap resource to be validated as.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", + // "path": "{project}/regions/{region}/urlMaps/{urlMap}/validate", + // "request": { + // "$ref": "RegionUrlMapsValidateRequest" + // }, // "response": { - // "$ref": "InstanceGroupManager" + // "$ref": "UrlMapsValidateResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.regionInstanceGroupManagers.insert": +// method id "compute.regions.get": -type RegionInstanceGroupManagersInsertCall struct { - s *Service - project string - region string - instancegroupmanager *InstanceGroupManager - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionsGetCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Insert: Creates a managed instance group using the information that -// you specify in the request. After the group is created, instances in -// the group are created using the specified instance template. This -// operation is marked as DONE when the group is created even if the -// instances in the group have not yet been created. You must separately -// verify the status of the individual instances with the -// listmanagedinstances method. -// -// A regional managed instance group can contain up to 2000 instances. -func (r *RegionInstanceGroupManagersService) Insert(project string, region string, instancegroupmanager *InstanceGroupManager) *RegionInstanceGroupManagersInsertCall { - c := &RegionInstanceGroupManagersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified Region resource. Gets a list of available +// regions by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/regions/get +func (r *RegionsService) Get(project string, region string) *RegionsGetCall { + c := &RegionsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instancegroupmanager = instancegroupmanager - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersInsertCall) RequestId(requestId string) *RegionInstanceGroupManagersInsertCall { - c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersInsertCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersInsertCall { +func (c *RegionsGetCall) Fields(s ...googleapi.Field) *RegionsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionsGetCall) IfNoneMatch(entityTag string) *RegionsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersInsertCall) Context(ctx context.Context) *RegionInstanceGroupManagersInsertCall { +func (c *RegionsGetCall) Context(ctx context.Context) *RegionsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersInsertCall) Header() http.Header { +func (c *RegionsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } @@ -93651,14 +109770,14 @@ func (c *RegionInstanceGroupManagersInsertCall) doRequest(alt string) (*http.Res return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regions.get" call. +// Exactly one of *Region or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Region.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *RegionsGetCall) Do(opts ...googleapi.CallOption) (*Region, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -93677,7 +109796,7 @@ func (c *RegionInstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Region{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -93689,9 +109808,9 @@ func (c *RegionInstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, instances in the group are created using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.\n\nA regional managed instance group can contain up to 2000 instances.", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.insert", + // "description": "Returns the specified Region resource. Gets a list of available regions by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.regions.get", // "parameterOrder": [ // "project", // "region" @@ -93705,50 +109824,43 @@ func (c *RegionInstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "Name of the region resource to return.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers", - // "request": { - // "$ref": "InstanceGroupManager" - // }, + // "path": "{project}/regions/{region}", // "response": { - // "$ref": "Operation" + // "$ref": "Region" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionInstanceGroupManagers.list": +// method id "compute.regions.list": -type RegionInstanceGroupManagersListCall struct { +type RegionsListCall struct { s *Service project string - region string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves the list of managed instance groups that are -// contained within the specified region. -func (r *RegionInstanceGroupManagersService) List(project string, region string) *RegionInstanceGroupManagersListCall { - c := &RegionInstanceGroupManagersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of region resources available to the +// specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/regions/list +func (r *RegionsService) List(project string) *RegionsListCall { + c := &RegionsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region return c } @@ -93774,7 +109886,7 @@ func (r *RegionInstanceGroupManagersService) List(project string, region string) // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *RegionInstanceGroupManagersListCall) Filter(filter string) *RegionInstanceGroupManagersListCall { +func (c *RegionsListCall) Filter(filter string) *RegionsListCall { c.urlParams_.Set("filter", filter) return c } @@ -93785,7 +109897,7 @@ func (c *RegionInstanceGroupManagersListCall) Filter(filter string) *RegionInsta // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *RegionInstanceGroupManagersListCall) MaxResults(maxResults int64) *RegionInstanceGroupManagersListCall { +func (c *RegionsListCall) MaxResults(maxResults int64) *RegionsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -93802,7 +109914,7 @@ func (c *RegionInstanceGroupManagersListCall) MaxResults(maxResults int64) *Regi // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *RegionInstanceGroupManagersListCall) OrderBy(orderBy string) *RegionInstanceGroupManagersListCall { +func (c *RegionsListCall) OrderBy(orderBy string) *RegionsListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -93810,7 +109922,7 @@ func (c *RegionInstanceGroupManagersListCall) OrderBy(orderBy string) *RegionIns // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *RegionInstanceGroupManagersListCall) PageToken(pageToken string) *RegionInstanceGroupManagersListCall { +func (c *RegionsListCall) PageToken(pageToken string) *RegionsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -93818,7 +109930,7 @@ func (c *RegionInstanceGroupManagersListCall) PageToken(pageToken string) *Regio // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersListCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersListCall { +func (c *RegionsListCall) Fields(s ...googleapi.Field) *RegionsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -93828,7 +109940,7 @@ func (c *RegionInstanceGroupManagersListCall) Fields(s ...googleapi.Field) *Regi // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionInstanceGroupManagersListCall) IfNoneMatch(entityTag string) *RegionInstanceGroupManagersListCall { +func (c *RegionsListCall) IfNoneMatch(entityTag string) *RegionsListCall { c.ifNoneMatch_ = entityTag return c } @@ -93836,21 +109948,21 @@ func (c *RegionInstanceGroupManagersListCall) IfNoneMatch(entityTag string) *Reg // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersListCall) Context(ctx context.Context) *RegionInstanceGroupManagersListCall { +func (c *RegionsListCall) Context(ctx context.Context) *RegionsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersListCall) Header() http.Header { +func (c *RegionsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -93862,7 +109974,7 @@ func (c *RegionInstanceGroupManagersListCall) doRequest(alt string) (*http.Respo var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -93871,19 +109983,18 @@ func (c *RegionInstanceGroupManagersListCall) doRequest(alt string) (*http.Respo req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.list" call. -// Exactly one of *RegionInstanceGroupManagerList or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *RegionInstanceGroupManagerList.ServerResponse.Header or (if a -// response was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupManagerList, error) { +// Do executes the "compute.regions.list" call. +// Exactly one of *RegionList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *RegionList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionsListCall) Do(opts ...googleapi.CallOption) (*RegionList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -93902,7 +110013,7 @@ func (c *RegionInstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) ( if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &RegionInstanceGroupManagerList{ + ret := &RegionList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -93914,12 +110025,11 @@ func (c *RegionInstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Retrieves the list of managed instance groups that are contained within the specified region.", + // "description": "Retrieves the list of region resources available to the specified project.", // "httpMethod": "GET", - // "id": "compute.regionInstanceGroupManagers.list", + // "id": "compute.regions.list", // "parameterOrder": [ - // "project", - // "region" + // "project" // ], // "parameters": { // "filter": { @@ -93951,17 +110061,11 @@ func (c *RegionInstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) ( // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers", + // "path": "{project}/regions", // "response": { - // "$ref": "RegionInstanceGroupManagerList" + // "$ref": "RegionList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -93975,7 +110079,7 @@ func (c *RegionInstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) ( // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *RegionInstanceGroupManagersListCall) Pages(ctx context.Context, f func(*RegionInstanceGroupManagerList) error) error { +func (c *RegionsListCall) Pages(ctx context.Context, f func(*RegionList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -93993,27 +110097,21 @@ func (c *RegionInstanceGroupManagersListCall) Pages(ctx context.Context, f func( } } -// method id "compute.regionInstanceGroupManagers.listManagedInstances": +// method id "compute.reservations.aggregatedList": -type RegionInstanceGroupManagersListManagedInstancesCall struct { - s *Service - project string - region string - instanceGroupManager string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ReservationsAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// ListManagedInstances: Lists the instances in the managed instance -// group and instances that are scheduled to be created. The list -// includes any current actions that the group has scheduled for its -// instances. -func (r *RegionInstanceGroupManagersService) ListManagedInstances(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersListManagedInstancesCall { - c := &RegionInstanceGroupManagersListManagedInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of reservations. +func (r *ReservationsService) AggregatedList(project string) *ReservationsAggregatedListCall { + c := &ReservationsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.instanceGroupManager = instanceGroupManager return c } @@ -94039,7 +110137,7 @@ func (r *RegionInstanceGroupManagersService) ListManagedInstances(project string // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *RegionInstanceGroupManagersListManagedInstancesCall) Filter(filter string) *RegionInstanceGroupManagersListManagedInstancesCall { +func (c *ReservationsAggregatedListCall) Filter(filter string) *ReservationsAggregatedListCall { c.urlParams_.Set("filter", filter) return c } @@ -94050,12 +110148,12 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Filter(filter stri // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *RegionInstanceGroupManagersListManagedInstancesCall) MaxResults(maxResults int64) *RegionInstanceGroupManagersListManagedInstancesCall { +func (c *ReservationsAggregatedListCall) MaxResults(maxResults int64) *ReservationsAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } -// OrderBy sets the optional parameter "order_by": Sorts list results by +// OrderBy sets the optional parameter "orderBy": Sorts list results by // a certain order. By default, results are returned in alphanumerical // order based on the resource name. // @@ -94067,15 +110165,15 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) MaxResults(maxResu // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *RegionInstanceGroupManagersListManagedInstancesCall) OrderBy(orderBy string) *RegionInstanceGroupManagersListManagedInstancesCall { - c.urlParams_.Set("order_by", orderBy) +func (c *ReservationsAggregatedListCall) OrderBy(orderBy string) *ReservationsAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) return c } // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *RegionInstanceGroupManagersListManagedInstancesCall) PageToken(pageToken string) *RegionInstanceGroupManagersListManagedInstancesCall { +func (c *ReservationsAggregatedListCall) PageToken(pageToken string) *ReservationsAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -94083,62 +110181,71 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) PageToken(pageToke // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersListManagedInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersListManagedInstancesCall { +func (c *ReservationsAggregatedListCall) Fields(s ...googleapi.Field) *ReservationsAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ReservationsAggregatedListCall) IfNoneMatch(entityTag string) *ReservationsAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersListManagedInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersListManagedInstancesCall { +func (c *ReservationsAggregatedListCall) Context(ctx context.Context) *ReservationsAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersListManagedInstancesCall) Header() http.Header { +func (c *ReservationsAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersListManagedInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *ReservationsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/reservations") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.listManagedInstances" call. -// Exactly one of *RegionInstanceGroupManagersListInstancesResponse or -// error will be non-nil. Any non-2xx status code is an error. Response -// headers are in either -// *RegionInstanceGroupManagersListInstancesResponse.ServerResponse.Heade -// r or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupManagersListInstancesResponse, error) { +// Do executes the "compute.reservations.aggregatedList" call. +// Exactly one of *ReservationAggregatedList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *ReservationAggregatedList.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ReservationsAggregatedListCall) Do(opts ...googleapi.CallOption) (*ReservationAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -94157,7 +110264,7 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googlea if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &RegionInstanceGroupManagersListInstancesResponse{ + ret := &ReservationAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -94169,13 +110276,11 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googlea } return ret, nil // { - // "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances.", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.listManagedInstances", + // "description": "Retrieves an aggregated list of reservations.", + // "httpMethod": "GET", + // "id": "compute.reservations.aggregatedList", // "parameterOrder": [ - // "project", - // "region", - // "instanceGroupManager" + // "project" // ], // "parameters": { // "filter": { @@ -94183,12 +110288,6 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googlea // "location": "query", // "type": "string" // }, - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "maxResults": { // "default": "500", // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", @@ -94197,7 +110296,7 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googlea // "minimum": "0", // "type": "integer" // }, - // "order_by": { + // "orderBy": { // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", // "location": "query", // "type": "string" @@ -94213,17 +110312,11 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googlea // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", + // "path": "{project}/aggregated/reservations", // "response": { - // "$ref": "RegionInstanceGroupManagersListInstancesResponse" + // "$ref": "ReservationAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -94237,7 +110330,7 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googlea // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *RegionInstanceGroupManagersListManagedInstancesCall) Pages(ctx context.Context, f func(*RegionInstanceGroupManagersListInstancesResponse) error) error { +func (c *ReservationsAggregatedListCall) Pages(ctx context.Context, f func(*ReservationAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -94255,32 +110348,24 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Pages(ctx context. } } -// method id "compute.regionInstanceGroupManagers.patch": +// method id "compute.reservations.delete": -type RegionInstanceGroupManagersPatchCall struct { - s *Service - project string - region string - instanceGroupManager string - instancegroupmanager *InstanceGroupManager - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ReservationsDeleteCall struct { + s *Service + project string + zone string + reservation string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates a managed instance group using the information that -// you specify in the request. This operation is marked as DONE when the -// group is patched even if the instances in the group are still in the -// process of being patched. You must separately verify the status of -// the individual instances with the listmanagedinstances method. This -// method supports PATCH semantics and uses the JSON merge patch format -// and processing rules. -func (r *RegionInstanceGroupManagersService) Patch(project string, region string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *RegionInstanceGroupManagersPatchCall { - c := &RegionInstanceGroupManagersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified reservation. +func (r *ReservationsService) Delete(project string, zone string, reservation string) *ReservationsDeleteCall { + c := &ReservationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanager = instancegroupmanager + c.zone = zone + c.reservation = reservation return c } @@ -94298,7 +110383,7 @@ func (r *RegionInstanceGroupManagersService) Patch(project string, region string // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersPatchCall) RequestId(requestId string) *RegionInstanceGroupManagersPatchCall { +func (c *ReservationsDeleteCall) RequestId(requestId string) *ReservationsDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -94306,7 +110391,7 @@ func (c *RegionInstanceGroupManagersPatchCall) RequestId(requestId string) *Regi // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersPatchCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersPatchCall { +func (c *ReservationsDeleteCall) Fields(s ...googleapi.Field) *ReservationsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -94314,57 +110399,52 @@ func (c *RegionInstanceGroupManagersPatchCall) Fields(s ...googleapi.Field) *Reg // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersPatchCall) Context(ctx context.Context) *RegionInstanceGroupManagersPatchCall { +func (c *ReservationsDeleteCall) Context(ctx context.Context) *ReservationsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersPatchCall) Header() http.Header { +func (c *ReservationsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *ReservationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/reservations/{reservation}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "reservation": c.reservation, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.patch" call. +// Do executes the "compute.reservations.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ReservationsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -94395,21 +110475,15 @@ func (c *RegionInstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listmanagedinstances method. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.regionInstanceGroupManagers.patch", + // "description": "Deletes the specified reservation.", + // "httpMethod": "DELETE", + // "id": "compute.reservations.delete", // "parameterOrder": [ // "project", - // "region", - // "instanceGroupManager" + // "zone", + // "reservation" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the instance group manager.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -94417,22 +110491,27 @@ func (c *RegionInstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region scoping this request.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "reservation": { + // "description": "Name of the reservation to delete.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "zone": { + // "description": "Name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", - // "request": { - // "$ref": "InstanceGroupManager" - // }, + // "path": "{project}/zones/{zone}/reservations/{reservation}", // "response": { // "$ref": "Operation" // }, @@ -94444,124 +110523,98 @@ func (c *RegionInstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) } -// method id "compute.regionInstanceGroupManagers.recreateInstances": - -type RegionInstanceGroupManagersRecreateInstancesCall struct { - s *Service - project string - region string - instanceGroupManager string - regioninstancegroupmanagersrecreaterequest *RegionInstanceGroupManagersRecreateRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} +// method id "compute.reservations.get": -// RecreateInstances: Flags the specified instances in the managed -// instance group to be immediately recreated. The instances are deleted -// and recreated using the current instance template for the managed -// instance group. This operation is marked as DONE when the flag is set -// even if the instances have not yet been recreated. You must -// separately verify the status of the recreating action with the -// listmanagedinstances method. -// -// If the group is part of a backend service that has enabled connection -// draining, it can take up to 60 seconds after the connection draining -// duration has elapsed before the VM instance is removed or -// deleted. -// -// You can specify a maximum of 1000 instances with this method per -// request. -func (r *RegionInstanceGroupManagersService) RecreateInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersrecreaterequest *RegionInstanceGroupManagersRecreateRequest) *RegionInstanceGroupManagersRecreateInstancesCall { - c := &RegionInstanceGroupManagersRecreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.instanceGroupManager = instanceGroupManager - c.regioninstancegroupmanagersrecreaterequest = regioninstancegroupmanagersrecreaterequest - return c +type ReservationsGetCall struct { + s *Service + project string + zone string + reservation string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersRecreateInstancesCall) RequestId(requestId string) *RegionInstanceGroupManagersRecreateInstancesCall { - c.urlParams_.Set("requestId", requestId) + +// Get: Retrieves all information of the specified reservation. +func (r *ReservationsService) Get(project string, zone string, reservation string) *ReservationsGetCall { + c := &ReservationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.reservation = reservation return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersRecreateInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersRecreateInstancesCall { +func (c *ReservationsGetCall) Fields(s ...googleapi.Field) *ReservationsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ReservationsGetCall) IfNoneMatch(entityTag string) *ReservationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersRecreateInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersRecreateInstancesCall { +func (c *ReservationsGetCall) Context(ctx context.Context) *ReservationsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersRecreateInstancesCall) Header() http.Header { +func (c *ReservationsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *ReservationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersrecreaterequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/recreateInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/reservations/{reservation}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "reservation": c.reservation, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.recreateInstances" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.reservations.get" call. +// Exactly one of *Reservation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// *Reservation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ReservationsGetCall) Do(opts ...googleapi.CallOption) (*Reservation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -94580,7 +110633,7 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi. if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Reservation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -94592,21 +110645,15 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi. } return ret, nil // { - // "description": "Flags the specified instances in the managed instance group to be immediately recreated. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the flag is set even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.recreateInstances", + // "description": "Retrieves all information of the specified reservation.", + // "httpMethod": "GET", + // "id": "compute.reservations.get", // "parameterOrder": [ // "project", - // "region", - // "instanceGroupManager" + // "zone", + // "reservation" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "Name of the managed instance group.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -94614,143 +110661,127 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi. // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region scoping this request.", + // "reservation": { + // "description": "Name of the reservation to retrieve.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "zone": { + // "description": "Name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", - // "request": { - // "$ref": "RegionInstanceGroupManagersRecreateRequest" - // }, + // "path": "{project}/zones/{zone}/reservations/{reservation}", // "response": { - // "$ref": "Operation" + // "$ref": "Reservation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionInstanceGroupManagers.resize": +// method id "compute.reservations.getIamPolicy": -type RegionInstanceGroupManagersResizeCall struct { - s *Service - project string - region string - instanceGroupManager string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ReservationsGetIamPolicyCall struct { + s *Service + project string + zone string + resource string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Resize: Changes the intended size of the managed instance group. If -// you increase the size, the group creates new instances using the -// current instance template. If you decrease the size, the group -// deletes one or more instances. -// -// The resize operation is marked DONE if the resize request is -// successful. The underlying actions take additional time. You must -// separately verify the status of the creating or deleting actions with -// the listmanagedinstances method. -// -// If the group is part of a backend service that has enabled connection -// draining, it can take up to 60 seconds after the connection draining -// duration has elapsed before the VM instance is removed or deleted. -func (r *RegionInstanceGroupManagersService) Resize(project string, region string, instanceGroupManager string, size int64) *RegionInstanceGroupManagersResizeCall { - c := &RegionInstanceGroupManagersResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. +func (r *ReservationsService) GetIamPolicy(project string, zone string, resource string) *ReservationsGetIamPolicyCall { + c := &ReservationsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.instanceGroupManager = instanceGroupManager - c.urlParams_.Set("size", fmt.Sprint(size)) - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersResizeCall) RequestId(requestId string) *RegionInstanceGroupManagersResizeCall { - c.urlParams_.Set("requestId", requestId) + c.zone = zone + c.resource = resource return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersResizeCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersResizeCall { +func (c *ReservationsGetIamPolicyCall) Fields(s ...googleapi.Field) *ReservationsGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ReservationsGetIamPolicyCall) IfNoneMatch(entityTag string) *ReservationsGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersResizeCall) Context(ctx context.Context) *RegionInstanceGroupManagersResizeCall { +func (c *ReservationsGetIamPolicyCall) Context(ctx context.Context) *ReservationsGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersResizeCall) Header() http.Header { +func (c *ReservationsGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, error) { +func (c *ReservationsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resize") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/reservations/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.resize" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.reservations.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ReservationsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -94769,7 +110800,7 @@ func (c *RegionInstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -94781,22 +110812,15 @@ func (c *RegionInstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Changes the intended size of the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes one or more instances.\n\nThe resize operation is marked DONE if the resize request is successful. The underlying actions take additional time. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.resize", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "httpMethod": "GET", + // "id": "compute.reservations.getIamPolicy", // "parameterOrder": [ // "project", - // "region", - // "instanceGroupManager", - // "size" + // "zone", + // "resource" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "Name of the managed instance group.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -94804,60 +110828,52 @@ func (c *RegionInstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region scoping this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "size": { - // "description": "Number of instances that should exist in this instance group manager.", - // "format": "int32", - // "location": "query", - // "minimum": "0", + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, - // "type": "integer" + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resize", + // "path": "{project}/zones/{zone}/reservations/{resource}/getIamPolicy", // "response": { - // "$ref": "Operation" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionInstanceGroupManagers.setAutoHealingPolicies": +// method id "compute.reservations.insert": -type RegionInstanceGroupManagersSetAutoHealingPoliciesCall struct { - s *Service - project string - region string - instanceGroupManager string - regioninstancegroupmanagerssetautohealingrequest *RegionInstanceGroupManagersSetAutoHealingRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ReservationsInsertCall struct { + s *Service + project string + zone string + reservation *Reservation + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetAutoHealingPolicies: Modifies the autohealing policy for the -// instances in this managed instance group. [Deprecated] This method is -// deprecated. Please use Patch instead. -func (r *RegionInstanceGroupManagersService) SetAutoHealingPolicies(project string, region string, instanceGroupManager string, regioninstancegroupmanagerssetautohealingrequest *RegionInstanceGroupManagersSetAutoHealingRequest) *RegionInstanceGroupManagersSetAutoHealingPoliciesCall { - c := &RegionInstanceGroupManagersSetAutoHealingPoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a new reservation. +func (r *ReservationsService) Insert(project string, zone string, reservation *Reservation) *ReservationsInsertCall { + c := &ReservationsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.instanceGroupManager = instanceGroupManager - c.regioninstancegroupmanagerssetautohealingrequest = regioninstancegroupmanagerssetautohealingrequest + c.zone = zone + c.reservation = reservation return c } @@ -94875,7 +110891,7 @@ func (r *RegionInstanceGroupManagersService) SetAutoHealingPolicies(project stri // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) RequestId(requestId string) *RegionInstanceGroupManagersSetAutoHealingPoliciesCall { +func (c *ReservationsInsertCall) RequestId(requestId string) *ReservationsInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -94883,7 +110899,7 @@ func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) RequestId(reques // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersSetAutoHealingPoliciesCall { +func (c *ReservationsInsertCall) Fields(s ...googleapi.Field) *ReservationsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -94891,35 +110907,35 @@ func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Fields(s ...goog // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Context(ctx context.Context) *RegionInstanceGroupManagersSetAutoHealingPoliciesCall { +func (c *ReservationsInsertCall) Context(ctx context.Context) *ReservationsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Header() http.Header { +func (c *ReservationsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) doRequest(alt string) (*http.Response, error) { +func (c *ReservationsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerssetautohealingrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.reservation) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/reservations") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -94927,21 +110943,20 @@ func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) doRequest(alt st } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.setAutoHealingPolicies" call. +// Do executes the "compute.reservations.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ReservationsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -94972,21 +110987,14 @@ func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googl } return ret, nil // { - // "description": "Modifies the autohealing policy for the instances in this managed instance group. [Deprecated] This method is deprecated. Please use Patch instead.", + // "description": "Creates a new reservation.", // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.setAutoHealingPolicies", + // "id": "compute.reservations.insert", // "parameterOrder": [ // "project", - // "region", - // "instanceGroupManager" + // "zone" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "Name of the managed instance group.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -94994,21 +111002,22 @@ func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googl // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies", + // "path": "{project}/zones/{zone}/reservations", // "request": { - // "$ref": "RegionInstanceGroupManagersSetAutoHealingRequest" + // "$ref": "Reservation" // }, // "response": { // "$ref": "Operation" @@ -95021,112 +111030,159 @@ func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googl } -// method id "compute.regionInstanceGroupManagers.setInstanceTemplate": +// method id "compute.reservations.list": -type RegionInstanceGroupManagersSetInstanceTemplateCall struct { - s *Service - project string - region string - instanceGroupManager string - regioninstancegroupmanagerssettemplaterequest *RegionInstanceGroupManagersSetTemplateRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ReservationsListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetInstanceTemplate: Sets the instance template to use when creating -// new instances or recreating instances in this group. Existing -// instances are not affected. -func (r *RegionInstanceGroupManagersService) SetInstanceTemplate(project string, region string, instanceGroupManager string, regioninstancegroupmanagerssettemplaterequest *RegionInstanceGroupManagersSetTemplateRequest) *RegionInstanceGroupManagersSetInstanceTemplateCall { - c := &RegionInstanceGroupManagersSetInstanceTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: A list all the reservations that have been configured for the +// specified project in specified zone. +func (r *ReservationsService) List(project string, zone string) *ReservationsListCall { + c := &ReservationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.instanceGroupManager = instanceGroupManager - c.regioninstancegroupmanagerssettemplaterequest = regioninstancegroupmanagerssettemplaterequest + c.zone = zone return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) RequestId(requestId string) *RegionInstanceGroupManagersSetInstanceTemplateCall { - c.urlParams_.Set("requestId", requestId) +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *ReservationsListCall) Filter(filter string) *ReservationsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *ReservationsListCall) MaxResults(maxResults int64) *ReservationsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *ReservationsListCall) OrderBy(orderBy string) *ReservationsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *ReservationsListCall) PageToken(pageToken string) *ReservationsListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersSetInstanceTemplateCall { +func (c *ReservationsListCall) Fields(s ...googleapi.Field) *ReservationsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ReservationsListCall) IfNoneMatch(entityTag string) *ReservationsListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Context(ctx context.Context) *RegionInstanceGroupManagersSetInstanceTemplateCall { +func (c *ReservationsListCall) Context(ctx context.Context) *ReservationsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Header() http.Header { +func (c *ReservationsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*http.Response, error) { +func (c *ReservationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerssettemplaterequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/reservations") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.setInstanceTemplate" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.reservations.list" call. +// Exactly one of *ReservationList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// *ReservationList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ReservationsListCall) Do(opts ...googleapi.CallOption) (*ReservationList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -95145,7 +111201,7 @@ func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleap if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &ReservationList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -95157,19 +111213,35 @@ func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleap } return ret, nil // { - // "description": "Sets the instance template to use when creating new instances or recreating instances in this group. Existing instances are not affected.", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.setInstanceTemplate", + // "description": "A list all the reservations that have been configured for the specified project in specified zone.", + // "httpMethod": "GET", + // "id": "compute.reservations.list", // "parameterOrder": [ // "project", - // "region", - // "instanceGroupManager" + // "zone" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", - // "location": "path", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -95179,55 +111251,69 @@ func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleap // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region scoping this request.", + // "zone": { + // "description": "Name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", - // "request": { - // "$ref": "RegionInstanceGroupManagersSetTemplateRequest" - // }, + // "path": "{project}/zones/{zone}/reservations", // "response": { - // "$ref": "Operation" + // "$ref": "ReservationList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionInstanceGroupManagers.setTargetPools": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ReservationsListCall) Pages(ctx context.Context, f func(*ReservationList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type RegionInstanceGroupManagersSetTargetPoolsCall struct { - s *Service - project string - region string - instanceGroupManager string - regioninstancegroupmanagerssettargetpoolsrequest *RegionInstanceGroupManagersSetTargetPoolsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.reservations.resize": + +type ReservationsResizeCall struct { + s *Service + project string + zone string + reservation string + reservationsresizerequest *ReservationsResizeRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetTargetPools: Modifies the target pools to which all new instances -// in this group are assigned. Existing instances in the group are not -// affected. -func (r *RegionInstanceGroupManagersService) SetTargetPools(project string, region string, instanceGroupManager string, regioninstancegroupmanagerssettargetpoolsrequest *RegionInstanceGroupManagersSetTargetPoolsRequest) *RegionInstanceGroupManagersSetTargetPoolsCall { - c := &RegionInstanceGroupManagersSetTargetPoolsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Resize: Resizes the reservation (applicable to standalone +// reservations only) +func (r *ReservationsService) Resize(project string, zone string, reservation string, reservationsresizerequest *ReservationsResizeRequest) *ReservationsResizeCall { + c := &ReservationsResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.instanceGroupManager = instanceGroupManager - c.regioninstancegroupmanagerssettargetpoolsrequest = regioninstancegroupmanagerssettargetpoolsrequest + c.zone = zone + c.reservation = reservation + c.reservationsresizerequest = reservationsresizerequest return c } @@ -95245,7 +111331,7 @@ func (r *RegionInstanceGroupManagersService) SetTargetPools(project string, regi // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersSetTargetPoolsCall) RequestId(requestId string) *RegionInstanceGroupManagersSetTargetPoolsCall { +func (c *ReservationsResizeCall) RequestId(requestId string) *ReservationsResizeCall { c.urlParams_.Set("requestId", requestId) return c } @@ -95253,7 +111339,7 @@ func (c *RegionInstanceGroupManagersSetTargetPoolsCall) RequestId(requestId stri // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersSetTargetPoolsCall { +func (c *ReservationsResizeCall) Fields(s ...googleapi.Field) *ReservationsResizeCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -95261,35 +111347,35 @@ func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Fields(s ...googleapi.Fi // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Context(ctx context.Context) *RegionInstanceGroupManagersSetTargetPoolsCall { +func (c *ReservationsResizeCall) Context(ctx context.Context) *ReservationsResizeCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Header() http.Header { +func (c *ReservationsResizeCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.Response, error) { +func (c *ReservationsResizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerssettargetpoolsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.reservationsresizerequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setTargetPools") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/reservations/{reservation}/resize") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -95297,21 +111383,21 @@ func (c *RegionInstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (* } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "reservation": c.reservation, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.setTargetPools" call. +// Do executes the "compute.reservations.resize" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ReservationsResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -95342,21 +111428,15 @@ func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.Cal } return ret, nil // { - // "description": "Modifies the target pools to which all new instances in this group are assigned. Existing instances in the group are not affected.", + // "description": "Resizes the reservation (applicable to standalone reservations only)", // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.setTargetPools", + // "id": "compute.reservations.resize", // "parameterOrder": [ // "project", - // "region", - // "instanceGroupManager" + // "zone", + // "reservation" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "Name of the managed instance group.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -95364,21 +111444,29 @@ func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.Cal // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region scoping this request.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "reservation": { + // "description": "Name of the reservation to update.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "zone": { + // "description": "Name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", + // "path": "{project}/zones/{zone}/reservations/{reservation}/resize", // "request": { - // "$ref": "RegionInstanceGroupManagersSetTargetPoolsRequest" + // "$ref": "ReservationsResizeRequest" // }, // "response": { // "$ref": "Operation" @@ -95391,34 +111479,34 @@ func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.Cal } -// method id "compute.regionInstanceGroupManagers.testIamPermissions": +// method id "compute.reservations.setIamPolicy": -type RegionInstanceGroupManagersTestIamPermissionsCall struct { - s *Service - project string - region string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ReservationsSetIamPolicyCall struct { + s *Service + project string + zone string + resource string + zonesetpolicyrequest *ZoneSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *RegionInstanceGroupManagersService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionInstanceGroupManagersTestIamPermissionsCall { - c := &RegionInstanceGroupManagersTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +func (r *ReservationsService) SetIamPolicy(project string, zone string, resource string, zonesetpolicyrequest *ZoneSetPolicyRequest) *ReservationsSetIamPolicyCall { + c := &ReservationsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region + c.zone = zone c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.zonesetpolicyrequest = zonesetpolicyrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersTestIamPermissionsCall { +func (c *ReservationsSetIamPolicyCall) Fields(s ...googleapi.Field) *ReservationsSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -95426,35 +111514,35 @@ func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Fields(s ...googleap // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Context(ctx context.Context) *RegionInstanceGroupManagersTestIamPermissionsCall { +func (c *ReservationsSetIamPolicyCall) Context(ctx context.Context) *ReservationsSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Header() http.Header { +func (c *ReservationsSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *ReservationsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.zonesetpolicyrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/reservations/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -95463,20 +111551,20 @@ func (c *RegionInstanceGroupManagersTestIamPermissionsCall) doRequest(alt string req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, + "zone": c.zone, "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.reservations.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ReservationsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -95495,7 +111583,7 @@ func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -95507,12 +111595,12 @@ func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.testIamPermissions", + // "id": "compute.reservations.setIamPolicy", // "parameterOrder": [ // "project", - // "region", + // "zone", // "resource" // ], // "parameters": { @@ -95523,87 +111611,64 @@ func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi // "required": true, // "type": "string" // }, - // "region": { - // "description": "The name of the region for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{resource}/testIamPermissions", + // "path": "{project}/zones/{zone}/reservations/{resource}/setIamPolicy", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "ZoneSetPolicyRequest" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.regionInstanceGroupManagers.update": +// method id "compute.reservations.testIamPermissions": -type RegionInstanceGroupManagersUpdateCall struct { - s *Service - project string - region string - instanceGroupManager string - instancegroupmanager *InstanceGroupManager - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ReservationsTestIamPermissionsCall struct { + s *Service + project string + zone string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Update: Updates a managed instance group using the information that -// you specify in the request. This operation is marked as DONE when the -// group is updated even if the instances in the group have not yet been -// updated. You must separately verify the status of the individual -// instances with the listmanagedinstances method. -func (r *RegionInstanceGroupManagersService) Update(project string, region string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *RegionInstanceGroupManagersUpdateCall { - c := &RegionInstanceGroupManagersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *ReservationsService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *ReservationsTestIamPermissionsCall { + c := &ReservationsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanager = instancegroupmanager - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersUpdateCall) RequestId(requestId string) *RegionInstanceGroupManagersUpdateCall { - c.urlParams_.Set("requestId", requestId) + c.zone = zone + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersUpdateCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersUpdateCall { +func (c *ReservationsTestIamPermissionsCall) Fields(s ...googleapi.Field) *ReservationsTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -95611,57 +111676,57 @@ func (c *RegionInstanceGroupManagersUpdateCall) Fields(s ...googleapi.Field) *Re // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersUpdateCall) Context(ctx context.Context) *RegionInstanceGroupManagersUpdateCall { +func (c *ReservationsTestIamPermissionsCall) Context(ctx context.Context) *ReservationsTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersUpdateCall) Header() http.Header { +func (c *ReservationsTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *ReservationsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/reservations/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.update" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.reservations.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ReservationsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -95680,7 +111745,7 @@ func (c *RegionInstanceGroupManagersUpdateCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -95692,21 +111757,15 @@ func (c *RegionInstanceGroupManagersUpdateCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is updated even if the instances in the group have not yet been updated. You must separately verify the status of the individual instances with the listmanagedinstances method.", - // "httpMethod": "PUT", - // "id": "compute.regionInstanceGroupManagers.update", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.reservations.testIamPermissions", // "parameterOrder": [ // "project", - // "region", - // "instanceGroupManager" + // "zone", + // "resource" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the instance group manager.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -95714,59 +111773,122 @@ func (c *RegionInstanceGroupManagersUpdateCall) Do(opts ...googleapi.CallOption) // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region scoping this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", + // "path": "{project}/zones/{zone}/reservations/{resource}/testIamPermissions", // "request": { - // "$ref": "InstanceGroupManager" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionInstanceGroups.get": +// method id "compute.resourcePolicies.aggregatedList": -type RegionInstanceGroupsGetCall struct { - s *Service - project string - region string - instanceGroup string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type ResourcePoliciesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified instance group resource. -func (r *RegionInstanceGroupsService) Get(project string, region string, instanceGroup string) *RegionInstanceGroupsGetCall { - c := &RegionInstanceGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of resource policies. +func (r *ResourcePoliciesService) AggregatedList(project string) *ResourcePoliciesAggregatedListCall { + c := &ResourcePoliciesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.instanceGroup = instanceGroup + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *ResourcePoliciesAggregatedListCall) Filter(filter string) *ResourcePoliciesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *ResourcePoliciesAggregatedListCall) MaxResults(maxResults int64) *ResourcePoliciesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *ResourcePoliciesAggregatedListCall) OrderBy(orderBy string) *ResourcePoliciesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *ResourcePoliciesAggregatedListCall) PageToken(pageToken string) *ResourcePoliciesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupsGetCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsGetCall { +func (c *ResourcePoliciesAggregatedListCall) Fields(s ...googleapi.Field) *ResourcePoliciesAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -95776,7 +111898,7 @@ func (c *RegionInstanceGroupsGetCall) Fields(s ...googleapi.Field) *RegionInstan // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionInstanceGroupsGetCall) IfNoneMatch(entityTag string) *RegionInstanceGroupsGetCall { +func (c *ResourcePoliciesAggregatedListCall) IfNoneMatch(entityTag string) *ResourcePoliciesAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -95784,21 +111906,21 @@ func (c *RegionInstanceGroupsGetCall) IfNoneMatch(entityTag string) *RegionInsta // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupsGetCall) Context(ctx context.Context) *RegionInstanceGroupsGetCall { +func (c *ResourcePoliciesAggregatedListCall) Context(ctx context.Context) *ResourcePoliciesAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupsGetCall) Header() http.Header { +func (c *ResourcePoliciesAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *ResourcePoliciesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -95810,7 +111932,7 @@ func (c *RegionInstanceGroupsGetCall) doRequest(alt string) (*http.Response, err var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{instanceGroup}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/resourcePolicies") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -95818,21 +111940,19 @@ func (c *RegionInstanceGroupsGetCall) doRequest(alt string) (*http.Response, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroup": c.instanceGroup, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroups.get" call. -// Exactly one of *InstanceGroup or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *InstanceGroup.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.resourcePolicies.aggregatedList" call. +// Exactly one of *ResourcePolicyAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *ResourcePolicyAggregatedList.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionInstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup, error) { +func (c *ResourcePoliciesAggregatedListCall) Do(opts ...googleapi.CallOption) (*ResourcePolicyAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -95851,7 +111971,7 @@ func (c *RegionInstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*Instanc if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroup{ + ret := &ResourcePolicyAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -95863,19 +111983,34 @@ func (c *RegionInstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*Instanc } return ret, nil // { - // "description": "Returns the specified instance group resource.", + // "description": "Retrieves an aggregated list of resource policies.", // "httpMethod": "GET", - // "id": "compute.regionInstanceGroups.get", + // "id": "compute.resourcePolicies.aggregatedList", // "parameterOrder": [ - // "project", - // "region", - // "instanceGroup" + // "project" // ], // "parameters": { - // "instanceGroup": { - // "description": "Name of the instance group resource to return.", - // "location": "path", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -95884,17 +112019,11 @@ func (c *RegionInstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*Instanc // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroups/{instanceGroup}", + // "path": "{project}/aggregated/resourcePolicies", // "response": { - // "$ref": "InstanceGroup" + // "$ref": "ResourcePolicyAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -95905,159 +112034,124 @@ func (c *RegionInstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*Instanc } -// method id "compute.regionInstanceGroups.list": - -type RegionInstanceGroupsListCall struct { - s *Service - project string - region string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ResourcePoliciesAggregatedListCall) Pages(ctx context.Context, f func(*ResourcePolicyAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } } -// List: Retrieves the list of instance group resources contained within -// the specified region. -func (r *RegionInstanceGroupsService) List(project string, region string) *RegionInstanceGroupsListCall { - c := &RegionInstanceGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - return c -} +// method id "compute.resourcePolicies.delete": -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *RegionInstanceGroupsListCall) Filter(filter string) *RegionInstanceGroupsListCall { - c.urlParams_.Set("filter", filter) - return c +type ResourcePoliciesDeleteCall struct { + s *Service + project string + region string + resourcePolicy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *RegionInstanceGroupsListCall) MaxResults(maxResults int64) *RegionInstanceGroupsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) +// Delete: Deletes the specified resource policy. +func (r *ResourcePoliciesService) Delete(project string, region string, resourcePolicy string) *ResourcePoliciesDeleteCall { + c := &ResourcePoliciesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resourcePolicy = resourcePolicy return c } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *RegionInstanceGroupsListCall) OrderBy(orderBy string) *RegionInstanceGroupsListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *RegionInstanceGroupsListCall) PageToken(pageToken string) *RegionInstanceGroupsListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ResourcePoliciesDeleteCall) RequestId(requestId string) *ResourcePoliciesDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupsListCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsListCall { +func (c *ResourcePoliciesDeleteCall) Fields(s ...googleapi.Field) *ResourcePoliciesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionInstanceGroupsListCall) IfNoneMatch(entityTag string) *RegionInstanceGroupsListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupsListCall) Context(ctx context.Context) *RegionInstanceGroupsListCall { +func (c *ResourcePoliciesDeleteCall) Context(ctx context.Context) *ResourcePoliciesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupsListCall) Header() http.Header { +func (c *ResourcePoliciesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { +func (c *ResourcePoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/resourcePolicies/{resourcePolicy}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "region": c.region, + "resourcePolicy": c.resourcePolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroups.list" call. -// Exactly one of *RegionInstanceGroupList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *RegionInstanceGroupList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupList, error) { +// Do executes the "compute.resourcePolicies.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ResourcePoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -96076,7 +112170,7 @@ func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*Region if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &RegionInstanceGroupList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -96088,37 +112182,15 @@ func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*Region } return ret, nil // { - // "description": "Retrieves the list of instance group resources contained within the specified region.", - // "httpMethod": "GET", - // "id": "compute.regionInstanceGroups.list", + // "description": "Deletes the specified resource policy.", + // "httpMethod": "DELETE", + // "id": "compute.resourcePolicies.delete", // "parameterOrder": [ // "project", - // "region" + // "region", + // "resourcePolicy" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -96127,198 +112199,129 @@ func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*Region // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "resourcePolicy": { + // "description": "Name of the resource policy to delete.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroups", + // "path": "{project}/regions/{region}/resourcePolicies/{resourcePolicy}", // "response": { - // "$ref": "RegionInstanceGroupList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RegionInstanceGroupsListCall) Pages(ctx context.Context, f func(*RegionInstanceGroupList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.regionInstanceGroups.listInstances": +// method id "compute.resourcePolicies.get": -type RegionInstanceGroupsListInstancesCall struct { - s *Service - project string - region string - instanceGroup string - regioninstancegroupslistinstancesrequest *RegionInstanceGroupsListInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ResourcePoliciesGetCall struct { + s *Service + project string + region string + resourcePolicy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// ListInstances: Lists the instances in the specified instance group -// and displays information about the named ports. Depending on the -// specified options, this method can list all instances or only the -// instances that are running. -func (r *RegionInstanceGroupsService) ListInstances(project string, region string, instanceGroup string, regioninstancegroupslistinstancesrequest *RegionInstanceGroupsListInstancesRequest) *RegionInstanceGroupsListInstancesCall { - c := &RegionInstanceGroupsListInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Retrieves all information of the specified resource policy. +func (r *ResourcePoliciesService) Get(project string, region string, resourcePolicy string) *ResourcePoliciesGetCall { + c := &ResourcePoliciesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroup = instanceGroup - c.regioninstancegroupslistinstancesrequest = regioninstancegroupslistinstancesrequest - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *RegionInstanceGroupsListInstancesCall) Filter(filter string) *RegionInstanceGroupsListInstancesCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *RegionInstanceGroupsListInstancesCall) MaxResults(maxResults int64) *RegionInstanceGroupsListInstancesCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *RegionInstanceGroupsListInstancesCall) OrderBy(orderBy string) *RegionInstanceGroupsListInstancesCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *RegionInstanceGroupsListInstancesCall) PageToken(pageToken string) *RegionInstanceGroupsListInstancesCall { - c.urlParams_.Set("pageToken", pageToken) + c.resourcePolicy = resourcePolicy return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupsListInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsListInstancesCall { +func (c *ResourcePoliciesGetCall) Fields(s ...googleapi.Field) *ResourcePoliciesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ResourcePoliciesGetCall) IfNoneMatch(entityTag string) *ResourcePoliciesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupsListInstancesCall) Context(ctx context.Context) *RegionInstanceGroupsListInstancesCall { +func (c *ResourcePoliciesGetCall) Context(ctx context.Context) *ResourcePoliciesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupsListInstancesCall) Header() http.Header { +func (c *ResourcePoliciesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *ResourcePoliciesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupslistinstancesrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/resourcePolicies/{resourcePolicy}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroup": c.instanceGroup, + "project": c.project, + "region": c.region, + "resourcePolicy": c.resourcePolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroups.listInstances" call. -// Exactly one of *RegionInstanceGroupsListInstances or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *RegionInstanceGroupsListInstances.ServerResponse.Header or -// (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupsListInstances, error) { +// Do executes the "compute.resourcePolicies.get" call. +// Exactly one of *ResourcePolicy or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ResourcePolicy.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ResourcePoliciesGetCall) Do(opts ...googleapi.CallOption) (*ResourcePolicy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -96337,7 +112340,7 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &RegionInstanceGroupsListInstances{ + ret := &ResourcePolicy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -96349,44 +112352,15 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Lists the instances in the specified instance group and displays information about the named ports. Depending on the specified options, this method can list all instances or only the instances that are running.", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroups.listInstances", + // "description": "Retrieves all information of the specified resource policy.", + // "httpMethod": "GET", + // "id": "compute.resourcePolicies.get", // "parameterOrder": [ // "project", // "region", - // "instanceGroup" + // "resourcePolicy" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "instanceGroup": { - // "description": "Name of the regional instance group for which we want to list the instances.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -96395,18 +112369,23 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "resourcePolicy": { + // "description": "Name of the resource policy to retrieve.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances", - // "request": { - // "$ref": "RegionInstanceGroupsListInstancesRequest" - // }, + // "path": "{project}/regions/{region}/resourcePolicies/{resourcePolicy}", // "response": { - // "$ref": "RegionInstanceGroupsListInstances" + // "$ref": "ResourcePolicy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -96417,48 +112396,24 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RegionInstanceGroupsListInstancesCall) Pages(ctx context.Context, f func(*RegionInstanceGroupsListInstances) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.regionInstanceGroups.setNamedPorts": +// method id "compute.resourcePolicies.insert": -type RegionInstanceGroupsSetNamedPortsCall struct { - s *Service - project string - region string - instanceGroup string - regioninstancegroupssetnamedportsrequest *RegionInstanceGroupsSetNamedPortsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ResourcePoliciesInsertCall struct { + s *Service + project string + region string + resourcepolicy *ResourcePolicy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetNamedPorts: Sets the named ports for the specified regional -// instance group. -func (r *RegionInstanceGroupsService) SetNamedPorts(project string, region string, instanceGroup string, regioninstancegroupssetnamedportsrequest *RegionInstanceGroupsSetNamedPortsRequest) *RegionInstanceGroupsSetNamedPortsCall { - c := &RegionInstanceGroupsSetNamedPortsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a new resource policy. +func (r *ResourcePoliciesService) Insert(project string, region string, resourcepolicy *ResourcePolicy) *ResourcePoliciesInsertCall { + c := &ResourcePoliciesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroup = instanceGroup - c.regioninstancegroupssetnamedportsrequest = regioninstancegroupssetnamedportsrequest + c.resourcepolicy = resourcepolicy return c } @@ -96476,7 +112431,7 @@ func (r *RegionInstanceGroupsService) SetNamedPorts(project string, region strin // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupsSetNamedPortsCall) RequestId(requestId string) *RegionInstanceGroupsSetNamedPortsCall { +func (c *ResourcePoliciesInsertCall) RequestId(requestId string) *ResourcePoliciesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -96484,7 +112439,7 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) RequestId(requestId string) *Reg // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupsSetNamedPortsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsSetNamedPortsCall { +func (c *ResourcePoliciesInsertCall) Fields(s ...googleapi.Field) *ResourcePoliciesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -96492,35 +112447,35 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) Fields(s ...googleapi.Field) *Re // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupsSetNamedPortsCall) Context(ctx context.Context) *RegionInstanceGroupsSetNamedPortsCall { +func (c *ResourcePoliciesInsertCall) Context(ctx context.Context) *ResourcePoliciesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupsSetNamedPortsCall) Header() http.Header { +func (c *ResourcePoliciesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, error) { +func (c *ResourcePoliciesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupssetnamedportsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.resourcepolicy) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/resourcePolicies") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -96528,21 +112483,20 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Res } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroup": c.instanceGroup, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroups.setNamedPorts" call. +// Do executes the "compute.resourcePolicies.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ResourcePoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -96573,21 +112527,14 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Sets the named ports for the specified regional instance group.", + // "description": "Creates a new resource policy.", // "httpMethod": "POST", - // "id": "compute.regionInstanceGroups.setNamedPorts", + // "id": "compute.resourcePolicies.insert", // "parameterOrder": [ // "project", - // "region", - // "instanceGroup" + // "region" // ], // "parameters": { - // "instanceGroup": { - // "description": "The name of the regional instance group where the named ports are updated.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -96596,8 +112543,9 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "Name of the region for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -96607,9 +112555,9 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts", + // "path": "{project}/regions/{region}/resourcePolicies", // "request": { - // "$ref": "RegionInstanceGroupsSetNamedPortsRequest" + // "$ref": "ResourcePolicy" // }, // "response": { // "$ref": "Operation" @@ -96622,92 +112570,159 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) } -// method id "compute.regionInstanceGroups.testIamPermissions": +// method id "compute.resourcePolicies.list": -type RegionInstanceGroupsTestIamPermissionsCall struct { - s *Service - project string - region string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ResourcePoliciesListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *RegionInstanceGroupsService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionInstanceGroupsTestIamPermissionsCall { - c := &RegionInstanceGroupsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: A list all the resource policies that have been configured for +// the specified project in specified region. +func (r *ResourcePoliciesService) List(project string, region string) *ResourcePoliciesListCall { + c := &ResourcePoliciesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *ResourcePoliciesListCall) Filter(filter string) *ResourcePoliciesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *ResourcePoliciesListCall) MaxResults(maxResults int64) *ResourcePoliciesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *ResourcePoliciesListCall) OrderBy(orderBy string) *ResourcePoliciesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *ResourcePoliciesListCall) PageToken(pageToken string) *ResourcePoliciesListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupsTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsTestIamPermissionsCall { +func (c *ResourcePoliciesListCall) Fields(s ...googleapi.Field) *ResourcePoliciesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ResourcePoliciesListCall) IfNoneMatch(entityTag string) *ResourcePoliciesListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupsTestIamPermissionsCall) Context(ctx context.Context) *RegionInstanceGroupsTestIamPermissionsCall { +func (c *ResourcePoliciesListCall) Context(ctx context.Context) *ResourcePoliciesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupsTestIamPermissionsCall) Header() http.Header { +func (c *ResourcePoliciesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *ResourcePoliciesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/resourcePolicies") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroups.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// Do executes the "compute.resourcePolicies.list" call. +// Exactly one of *ResourcePolicyList or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// *ResourcePolicyList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionInstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *ResourcePoliciesListCall) Do(opts ...googleapi.CallOption) (*ResourcePolicyList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -96726,7 +112741,7 @@ func (c *RegionInstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOp if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &ResourcePolicyList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -96738,15 +112753,37 @@ func (c *RegionInstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOp } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroups.testIamPermissions", + // "description": "A list all the resource policies that have been configured for the specified project in specified region.", + // "httpMethod": "GET", + // "id": "compute.resourcePolicies.list", // "parameterOrder": [ // "project", - // "region", - // "resource" + // "region" // ], // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -96755,26 +112792,16 @@ func (c *RegionInstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOp // "type": "string" // }, // "region": { - // "description": "The name of the region for this request.", + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" - // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroups/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/regions/{region}/resourcePolicies", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "ResourcePolicyList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -96785,223 +112812,113 @@ func (c *RegionInstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOp } -// method id "compute.regionOperations.delete": - -type RegionOperationsDeleteCall struct { - s *Service - project string - region string - operation string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes the specified region-specific Operations resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/regionOperations/delete -func (r *RegionOperationsService) Delete(project string, region string, operation string) *RegionOperationsDeleteCall { - c := &RegionOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.operation = operation - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionOperationsDeleteCall) Fields(s ...googleapi.Field) *RegionOperationsDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionOperationsDeleteCall) Context(ctx context.Context) *RegionOperationsDeleteCall { +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ResourcePoliciesListCall) Pages(ctx context.Context, f func(*ResourcePolicyList) error) error { c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionOperationsDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations/{operation}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "operation": c.operation, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionOperations.delete" call. -func (c *RegionOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if err != nil { - return err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return err + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) } - return nil - // { - // "description": "Deletes the specified region-specific Operations resource.", - // "httpMethod": "DELETE", - // "id": "compute.regionOperations.delete", - // "parameterOrder": [ - // "project", - // "region", - // "operation" - // ], - // "parameters": { - // "operation": { - // "description": "Name of the Operations resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/operations/{operation}", - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - } -// method id "compute.regionOperations.get": +// method id "compute.resourcePolicies.testIamPermissions": -type RegionOperationsGetCall struct { - s *Service - project string - region string - operation string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type ResourcePoliciesTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Retrieves the specified region-specific Operations resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/regionOperations/get -func (r *RegionOperationsService) Get(project string, region string, operation string) *RegionOperationsGetCall { - c := &RegionOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *ResourcePoliciesService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *ResourcePoliciesTestIamPermissionsCall { + c := &ResourcePoliciesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.operation = operation + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionOperationsGetCall) Fields(s ...googleapi.Field) *RegionOperationsGetCall { +func (c *ResourcePoliciesTestIamPermissionsCall) Fields(s ...googleapi.Field) *ResourcePoliciesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionOperationsGetCall) IfNoneMatch(entityTag string) *RegionOperationsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionOperationsGetCall) Context(ctx context.Context) *RegionOperationsGetCall { +func (c *ResourcePoliciesTestIamPermissionsCall) Context(ctx context.Context) *ResourcePoliciesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionOperationsGetCall) Header() http.Header { +func (c *ResourcePoliciesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionOperationsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *ResourcePoliciesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations/{operation}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/resourcePolicies/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "operation": c.operation, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionOperations.get" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.resourcePolicies.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ResourcePoliciesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -97020,7 +112937,7 @@ func (c *RegionOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -97032,22 +112949,15 @@ func (c *RegionOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Retrieves the specified region-specific Operations resource.", - // "httpMethod": "GET", - // "id": "compute.regionOperations.get", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.resourcePolicies.testIamPermissions", // "parameterOrder": [ // "project", // "region", - // "operation" + // "resource" // ], // "parameters": { - // "operation": { - // "description": "Name of the Operations resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -97056,16 +112966,26 @@ func (c *RegionOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // }, // "region": { - // "description": "Name of the region for this request.", + // "description": "The name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/operations/{operation}", + // "path": "{project}/regions/{region}/resourcePolicies/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -97076,25 +112996,21 @@ func (c *RegionOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.regionOperations.list": +// method id "compute.routers.aggregatedList": -type RegionOperationsListCall struct { +type RoutersAggregatedListCall struct { s *Service project string - region string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves a list of Operation resources contained within the -// specified region. -// For details, see https://cloud.google.com/compute/docs/reference/latest/regionOperations/list -func (r *RegionOperationsService) List(project string, region string) *RegionOperationsListCall { - c := &RegionOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of routers. +func (r *RoutersService) AggregatedList(project string) *RoutersAggregatedListCall { + c := &RoutersAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region return c } @@ -97120,7 +113036,7 @@ func (r *RegionOperationsService) List(project string, region string) *RegionOpe // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *RegionOperationsListCall) Filter(filter string) *RegionOperationsListCall { +func (c *RoutersAggregatedListCall) Filter(filter string) *RoutersAggregatedListCall { c.urlParams_.Set("filter", filter) return c } @@ -97131,7 +113047,7 @@ func (c *RegionOperationsListCall) Filter(filter string) *RegionOperationsListCa // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *RegionOperationsListCall) MaxResults(maxResults int64) *RegionOperationsListCall { +func (c *RoutersAggregatedListCall) MaxResults(maxResults int64) *RoutersAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -97148,7 +113064,7 @@ func (c *RegionOperationsListCall) MaxResults(maxResults int64) *RegionOperation // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *RegionOperationsListCall) OrderBy(orderBy string) *RegionOperationsListCall { +func (c *RoutersAggregatedListCall) OrderBy(orderBy string) *RoutersAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -97156,7 +113072,7 @@ func (c *RegionOperationsListCall) OrderBy(orderBy string) *RegionOperationsList // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *RegionOperationsListCall) PageToken(pageToken string) *RegionOperationsListCall { +func (c *RoutersAggregatedListCall) PageToken(pageToken string) *RoutersAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -97164,7 +113080,7 @@ func (c *RegionOperationsListCall) PageToken(pageToken string) *RegionOperations // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionOperationsListCall) Fields(s ...googleapi.Field) *RegionOperationsListCall { +func (c *RoutersAggregatedListCall) Fields(s ...googleapi.Field) *RoutersAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -97174,7 +113090,7 @@ func (c *RegionOperationsListCall) Fields(s ...googleapi.Field) *RegionOperation // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionOperationsListCall) IfNoneMatch(entityTag string) *RegionOperationsListCall { +func (c *RoutersAggregatedListCall) IfNoneMatch(entityTag string) *RoutersAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -97182,21 +113098,21 @@ func (c *RegionOperationsListCall) IfNoneMatch(entityTag string) *RegionOperatio // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionOperationsListCall) Context(ctx context.Context) *RegionOperationsListCall { +func (c *RoutersAggregatedListCall) Context(ctx context.Context) *RoutersAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionOperationsListCall) Header() http.Header { +func (c *RoutersAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionOperationsListCall) doRequest(alt string) (*http.Response, error) { +func (c *RoutersAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -97208,7 +113124,7 @@ func (c *RegionOperationsListCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/routers") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -97217,19 +113133,18 @@ func (c *RegionOperationsListCall) doRequest(alt string) (*http.Response, error) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionOperations.list" call. -// Exactly one of *OperationList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *OperationList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.routers.aggregatedList" call. +// Exactly one of *RouterAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *RouterAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationList, error) { +func (c *RoutersAggregatedListCall) Do(opts ...googleapi.CallOption) (*RouterAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -97248,7 +113163,7 @@ func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &OperationList{ + ret := &RouterAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -97260,12 +113175,11 @@ func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL } return ret, nil // { - // "description": "Retrieves a list of Operation resources contained within the specified region.", + // "description": "Retrieves an aggregated list of routers.", // "httpMethod": "GET", - // "id": "compute.regionOperations.list", + // "id": "compute.routers.aggregatedList", // "parameterOrder": [ - // "project", - // "region" + // "project" // ], // "parameters": { // "filter": { @@ -97297,18 +113211,11 @@ func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/operations", + // "path": "{project}/aggregated/routers", // "response": { - // "$ref": "OperationList" + // "$ref": "RouterAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -97322,7 +113229,7 @@ func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *RegionOperationsListCall) Pages(ctx context.Context, f func(*OperationList) error) error { +func (c *RoutersAggregatedListCall) Pages(ctx context.Context, f func(*RouterAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -97340,78 +113247,83 @@ func (c *RegionOperationsListCall) Pages(ctx context.Context, f func(*OperationL } } -// method id "compute.regions.get": +// method id "compute.routers.delete": -type RegionsGetCall struct { - s *Service - project string - region string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RoutersDeleteCall struct { + s *Service + project string + region string + router string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified Region resource. Gets a list of available -// regions by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/regions/get -func (r *RegionsService) Get(project string, region string) *RegionsGetCall { - c := &RegionsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified Router resource. +func (r *RoutersService) Delete(project string, region string, router string) *RoutersDeleteCall { + c := &RoutersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region + c.router = router + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RoutersDeleteCall) RequestId(requestId string) *RoutersDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionsGetCall) Fields(s ...googleapi.Field) *RegionsGetCall { +func (c *RoutersDeleteCall) Fields(s ...googleapi.Field) *RoutersDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionsGetCall) IfNoneMatch(entityTag string) *RegionsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionsGetCall) Context(ctx context.Context) *RegionsGetCall { +func (c *RoutersDeleteCall) Context(ctx context.Context) *RoutersDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionsGetCall) Header() http.Header { +func (c *RoutersDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RoutersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } @@ -97419,18 +113331,19 @@ func (c *RegionsGetCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, + "router": c.router, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regions.get" call. -// Exactly one of *Region or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Region.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *RegionsGetCall) Do(opts ...googleapi.CallOption) (*Region, error) { +// Do executes the "compute.routers.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RoutersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -97449,7 +113362,7 @@ func (c *RegionsGetCall) Do(opts ...googleapi.CallOption) (*Region, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Region{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -97461,12 +113374,13 @@ func (c *RegionsGetCall) Do(opts ...googleapi.CallOption) (*Region, error) { } return ret, nil // { - // "description": "Returns the specified Region resource. Gets a list of available regions by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.regions.get", + // "description": "Deletes the specified Router resource.", + // "httpMethod": "DELETE", + // "id": "compute.routers.delete", // "parameterOrder": [ // "project", - // "region" + // "region", + // "router" // ], // "parameters": { // "project": { @@ -97477,113 +113391,64 @@ func (c *RegionsGetCall) Do(opts ...googleapi.CallOption) (*Region, error) { // "type": "string" // }, // "region": { - // "description": "Name of the region resource to return.", + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "router": { + // "description": "Name of the Router resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/regions/{region}", + // "path": "{project}/regions/{region}/routers/{router}", // "response": { - // "$ref": "Region" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.regions.list": +// method id "compute.routers.get": -type RegionsListCall struct { +type RoutersGetCall struct { s *Service project string + region string + router string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves the list of region resources available to the -// specified project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/regions/list -func (r *RegionsService) List(project string) *RegionsListCall { - c := &RegionsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified Router resource. Gets a list of available +// routers by making a list() request. +func (r *RoutersService) Get(project string, region string, router string) *RoutersGetCall { + c := &RoutersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *RegionsListCall) Filter(filter string) *RegionsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *RegionsListCall) MaxResults(maxResults int64) *RegionsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *RegionsListCall) OrderBy(orderBy string) *RegionsListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *RegionsListCall) PageToken(pageToken string) *RegionsListCall { - c.urlParams_.Set("pageToken", pageToken) + c.region = region + c.router = router return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionsListCall) Fields(s ...googleapi.Field) *RegionsListCall { +func (c *RoutersGetCall) Fields(s ...googleapi.Field) *RoutersGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -97593,7 +113458,7 @@ func (c *RegionsListCall) Fields(s ...googleapi.Field) *RegionsListCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionsListCall) IfNoneMatch(entityTag string) *RegionsListCall { +func (c *RoutersGetCall) IfNoneMatch(entityTag string) *RoutersGetCall { c.ifNoneMatch_ = entityTag return c } @@ -97601,21 +113466,21 @@ func (c *RegionsListCall) IfNoneMatch(entityTag string) *RegionsListCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionsListCall) Context(ctx context.Context) *RegionsListCall { +func (c *RoutersGetCall) Context(ctx context.Context) *RoutersGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionsListCall) Header() http.Header { +func (c *RoutersGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionsListCall) doRequest(alt string) (*http.Response, error) { +func (c *RoutersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -97627,7 +113492,7 @@ func (c *RegionsListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -97636,18 +113501,20 @@ func (c *RegionsListCall) doRequest(alt string) (*http.Response, error) { req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, + "router": c.router, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regions.list" call. -// Exactly one of *RegionList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *RegionList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionsListCall) Do(opts ...googleapi.CallOption) (*RegionList, error) { +// Do executes the "compute.routers.get" call. +// Exactly one of *Router or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Router.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *RoutersGetCall) Do(opts ...googleapi.CallOption) (*Router, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -97666,7 +113533,7 @@ func (c *RegionsListCall) Do(opts ...googleapi.CallOption) (*RegionList, error) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &RegionList{ + ret := &Router{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -97678,47 +113545,40 @@ func (c *RegionsListCall) Do(opts ...googleapi.CallOption) (*RegionList, error) } return ret, nil // { - // "description": "Retrieves the list of region resources available to the specified project.", + // "description": "Returns the specified Router resource. Gets a list of available routers by making a list() request.", // "httpMethod": "GET", - // "id": "compute.regions.list", + // "id": "compute.routers.get", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "router" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "router": { + // "description": "Name of the Router resource to return.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions", + // "path": "{project}/regions/{region}/routers/{router}", // "response": { - // "$ref": "RegionList" + // "$ref": "Router" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -97729,42 +113589,26 @@ func (c *RegionsListCall) Do(opts ...googleapi.CallOption) (*RegionList, error) } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RegionsListCall) Pages(ctx context.Context, f func(*RegionList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.resourcePolicies.aggregatedList": +// method id "compute.routers.getNatMappingInfo": -type ResourcePoliciesAggregatedListCall struct { +type RoutersGetNatMappingInfoCall struct { s *Service project string + region string + router string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// AggregatedList: Retrieves an aggregated list of resource policies. -func (r *ResourcePoliciesService) AggregatedList(project string) *ResourcePoliciesAggregatedListCall { - c := &ResourcePoliciesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetNatMappingInfo: Retrieves runtime Nat mapping information of VM +// endpoints. +func (r *RoutersService) GetNatMappingInfo(project string, region string, router string) *RoutersGetNatMappingInfoCall { + c := &RoutersGetNatMappingInfoCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.region = region + c.router = router return c } @@ -97790,7 +113634,7 @@ func (r *ResourcePoliciesService) AggregatedList(project string) *ResourcePolici // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *ResourcePoliciesAggregatedListCall) Filter(filter string) *ResourcePoliciesAggregatedListCall { +func (c *RoutersGetNatMappingInfoCall) Filter(filter string) *RoutersGetNatMappingInfoCall { c.urlParams_.Set("filter", filter) return c } @@ -97801,7 +113645,7 @@ func (c *ResourcePoliciesAggregatedListCall) Filter(filter string) *ResourcePoli // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *ResourcePoliciesAggregatedListCall) MaxResults(maxResults int64) *ResourcePoliciesAggregatedListCall { +func (c *RoutersGetNatMappingInfoCall) MaxResults(maxResults int64) *RoutersGetNatMappingInfoCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -97818,7 +113662,7 @@ func (c *ResourcePoliciesAggregatedListCall) MaxResults(maxResults int64) *Resou // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *ResourcePoliciesAggregatedListCall) OrderBy(orderBy string) *ResourcePoliciesAggregatedListCall { +func (c *RoutersGetNatMappingInfoCall) OrderBy(orderBy string) *RoutersGetNatMappingInfoCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -97826,7 +113670,7 @@ func (c *ResourcePoliciesAggregatedListCall) OrderBy(orderBy string) *ResourcePo // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *ResourcePoliciesAggregatedListCall) PageToken(pageToken string) *ResourcePoliciesAggregatedListCall { +func (c *RoutersGetNatMappingInfoCall) PageToken(pageToken string) *RoutersGetNatMappingInfoCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -97834,7 +113678,7 @@ func (c *ResourcePoliciesAggregatedListCall) PageToken(pageToken string) *Resour // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ResourcePoliciesAggregatedListCall) Fields(s ...googleapi.Field) *ResourcePoliciesAggregatedListCall { +func (c *RoutersGetNatMappingInfoCall) Fields(s ...googleapi.Field) *RoutersGetNatMappingInfoCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -97844,7 +113688,7 @@ func (c *ResourcePoliciesAggregatedListCall) Fields(s ...googleapi.Field) *Resou // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ResourcePoliciesAggregatedListCall) IfNoneMatch(entityTag string) *ResourcePoliciesAggregatedListCall { +func (c *RoutersGetNatMappingInfoCall) IfNoneMatch(entityTag string) *RoutersGetNatMappingInfoCall { c.ifNoneMatch_ = entityTag return c } @@ -97852,21 +113696,21 @@ func (c *ResourcePoliciesAggregatedListCall) IfNoneMatch(entityTag string) *Reso // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ResourcePoliciesAggregatedListCall) Context(ctx context.Context) *ResourcePoliciesAggregatedListCall { +func (c *RoutersGetNatMappingInfoCall) Context(ctx context.Context) *RoutersGetNatMappingInfoCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ResourcePoliciesAggregatedListCall) Header() http.Header { +func (c *RoutersGetNatMappingInfoCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ResourcePoliciesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *RoutersGetNatMappingInfoCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -97878,7 +113722,7 @@ func (c *ResourcePoliciesAggregatedListCall) doRequest(alt string) (*http.Respon var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/resourcePolicies") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}/getNatMappingInfo") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -97887,18 +113731,20 @@ func (c *ResourcePoliciesAggregatedListCall) doRequest(alt string) (*http.Respon req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, + "router": c.router, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.resourcePolicies.aggregatedList" call. -// Exactly one of *ResourcePolicyAggregatedList or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *ResourcePolicyAggregatedList.ServerResponse.Header or (if a -// response was returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.routers.getNatMappingInfo" call. +// Exactly one of *VmEndpointNatMappingsList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *VmEndpointNatMappingsList.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *ResourcePoliciesAggregatedListCall) Do(opts ...googleapi.CallOption) (*ResourcePolicyAggregatedList, error) { +func (c *RoutersGetNatMappingInfoCall) Do(opts ...googleapi.CallOption) (*VmEndpointNatMappingsList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -97917,7 +113763,7 @@ func (c *ResourcePoliciesAggregatedListCall) Do(opts ...googleapi.CallOption) (* if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ResourcePolicyAggregatedList{ + ret := &VmEndpointNatMappingsList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -97929,11 +113775,13 @@ func (c *ResourcePoliciesAggregatedListCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Retrieves an aggregated list of resource policies.", + // "description": "Retrieves runtime Nat mapping information of VM endpoints.", // "httpMethod": "GET", - // "id": "compute.resourcePolicies.aggregatedList", + // "id": "compute.routers.getNatMappingInfo", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "router" // ], // "parameters": { // "filter": { @@ -97965,11 +113813,25 @@ func (c *ResourcePoliciesAggregatedListCall) Do(opts ...googleapi.CallOption) (* // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "router": { + // "description": "Name of the Router resource to query for Nat Mapping information of VM endpoints.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/aggregated/resourcePolicies", + // "path": "{project}/regions/{region}/routers/{router}/getNatMappingInfo", // "response": { - // "$ref": "ResourcePolicyAggregatedList" + // "$ref": "VmEndpointNatMappingsList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -97983,7 +113845,7 @@ func (c *ResourcePoliciesAggregatedListCall) Do(opts ...googleapi.CallOption) (* // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *ResourcePoliciesAggregatedListCall) Pages(ctx context.Context, f func(*ResourcePolicyAggregatedList) error) error { +func (c *RoutersGetNatMappingInfoCall) Pages(ctx context.Context, f func(*VmEndpointNatMappingsList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -98001,207 +113863,33 @@ func (c *ResourcePoliciesAggregatedListCall) Pages(ctx context.Context, f func(* } } -// method id "compute.resourcePolicies.delete": - -type ResourcePoliciesDeleteCall struct { - s *Service - project string - region string - resourcePolicy string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes the specified resource policy. -func (r *ResourcePoliciesService) Delete(project string, region string, resourcePolicy string) *ResourcePoliciesDeleteCall { - c := &ResourcePoliciesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.resourcePolicy = resourcePolicy - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *ResourcePoliciesDeleteCall) RequestId(requestId string) *ResourcePoliciesDeleteCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ResourcePoliciesDeleteCall) Fields(s ...googleapi.Field) *ResourcePoliciesDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ResourcePoliciesDeleteCall) Context(ctx context.Context) *ResourcePoliciesDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ResourcePoliciesDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ResourcePoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/resourcePolicies/{resourcePolicy}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resourcePolicy": c.resourcePolicy, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.resourcePolicies.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ResourcePoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes the specified resource policy.", - // "httpMethod": "DELETE", - // "id": "compute.resourcePolicies.delete", - // "parameterOrder": [ - // "project", - // "region", - // "resourcePolicy" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "resourcePolicy": { - // "description": "Name of the resource policy to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/resourcePolicies/{resourcePolicy}", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.resourcePolicies.get": +// method id "compute.routers.getRouterStatus": -type ResourcePoliciesGetCall struct { - s *Service - project string - region string - resourcePolicy string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RoutersGetRouterStatusCall struct { + s *Service + project string + region string + router string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Retrieves all information of the specified resource policy. -func (r *ResourcePoliciesService) Get(project string, region string, resourcePolicy string) *ResourcePoliciesGetCall { - c := &ResourcePoliciesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetRouterStatus: Retrieves runtime information of the specified +// router. +func (r *RoutersService) GetRouterStatus(project string, region string, router string) *RoutersGetRouterStatusCall { + c := &RoutersGetRouterStatusCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.resourcePolicy = resourcePolicy + c.router = router return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ResourcePoliciesGetCall) Fields(s ...googleapi.Field) *ResourcePoliciesGetCall { +func (c *RoutersGetRouterStatusCall) Fields(s ...googleapi.Field) *RoutersGetRouterStatusCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -98211,7 +113899,7 @@ func (c *ResourcePoliciesGetCall) Fields(s ...googleapi.Field) *ResourcePolicies // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ResourcePoliciesGetCall) IfNoneMatch(entityTag string) *ResourcePoliciesGetCall { +func (c *RoutersGetRouterStatusCall) IfNoneMatch(entityTag string) *RoutersGetRouterStatusCall { c.ifNoneMatch_ = entityTag return c } @@ -98219,21 +113907,21 @@ func (c *ResourcePoliciesGetCall) IfNoneMatch(entityTag string) *ResourcePolicie // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ResourcePoliciesGetCall) Context(ctx context.Context) *ResourcePoliciesGetCall { +func (c *RoutersGetRouterStatusCall) Context(ctx context.Context) *RoutersGetRouterStatusCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ResourcePoliciesGetCall) Header() http.Header { +func (c *RoutersGetRouterStatusCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ResourcePoliciesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RoutersGetRouterStatusCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -98245,7 +113933,7 @@ func (c *ResourcePoliciesGetCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/resourcePolicies/{resourcePolicy}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}/getRouterStatus") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -98253,21 +113941,21 @@ func (c *ResourcePoliciesGetCall) doRequest(alt string) (*http.Response, error) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resourcePolicy": c.resourcePolicy, + "project": c.project, + "region": c.region, + "router": c.router, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } - -// Do executes the "compute.resourcePolicies.get" call. -// Exactly one of *ResourcePolicy or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *ResourcePolicy.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use + +// Do executes the "compute.routers.getRouterStatus" call. +// Exactly one of *RouterStatusResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *RouterStatusResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *ResourcePoliciesGetCall) Do(opts ...googleapi.CallOption) (*ResourcePolicy, error) { +func (c *RoutersGetRouterStatusCall) Do(opts ...googleapi.CallOption) (*RouterStatusResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -98286,7 +113974,7 @@ func (c *ResourcePoliciesGetCall) Do(opts ...googleapi.CallOption) (*ResourcePol if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ResourcePolicy{ + ret := &RouterStatusResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -98298,13 +113986,13 @@ func (c *ResourcePoliciesGetCall) Do(opts ...googleapi.CallOption) (*ResourcePol } return ret, nil // { - // "description": "Retrieves all information of the specified resource policy.", + // "description": "Retrieves runtime information of the specified router.", // "httpMethod": "GET", - // "id": "compute.resourcePolicies.get", + // "id": "compute.routers.getRouterStatus", // "parameterOrder": [ // "project", // "region", - // "resourcePolicy" + // "router" // ], // "parameters": { // "project": { @@ -98321,17 +114009,17 @@ func (c *ResourcePoliciesGetCall) Do(opts ...googleapi.CallOption) (*ResourcePol // "required": true, // "type": "string" // }, - // "resourcePolicy": { - // "description": "Name of the resource policy to retrieve.", + // "router": { + // "description": "Name of the Router resource to query.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/resourcePolicies/{resourcePolicy}", + // "path": "{project}/regions/{region}/routers/{router}/getRouterStatus", // "response": { - // "$ref": "ResourcePolicy" + // "$ref": "RouterStatusResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -98342,24 +114030,25 @@ func (c *ResourcePoliciesGetCall) Do(opts ...googleapi.CallOption) (*ResourcePol } -// method id "compute.resourcePolicies.insert": +// method id "compute.routers.insert": -type ResourcePoliciesInsertCall struct { - s *Service - project string - region string - resourcepolicy *ResourcePolicy - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RoutersInsertCall struct { + s *Service + project string + region string + router *Router + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a new resource policy. -func (r *ResourcePoliciesService) Insert(project string, region string, resourcepolicy *ResourcePolicy) *ResourcePoliciesInsertCall { - c := &ResourcePoliciesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a Router resource in the specified project and region +// using the data included in the request. +func (r *RoutersService) Insert(project string, region string, router *Router) *RoutersInsertCall { + c := &RoutersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.resourcepolicy = resourcepolicy + c.router = router return c } @@ -98377,7 +114066,7 @@ func (r *ResourcePoliciesService) Insert(project string, region string, resource // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ResourcePoliciesInsertCall) RequestId(requestId string) *ResourcePoliciesInsertCall { +func (c *RoutersInsertCall) RequestId(requestId string) *RoutersInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -98385,7 +114074,7 @@ func (c *ResourcePoliciesInsertCall) RequestId(requestId string) *ResourcePolici // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ResourcePoliciesInsertCall) Fields(s ...googleapi.Field) *ResourcePoliciesInsertCall { +func (c *RoutersInsertCall) Fields(s ...googleapi.Field) *RoutersInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -98393,35 +114082,35 @@ func (c *ResourcePoliciesInsertCall) Fields(s ...googleapi.Field) *ResourcePolic // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ResourcePoliciesInsertCall) Context(ctx context.Context) *ResourcePoliciesInsertCall { +func (c *RoutersInsertCall) Context(ctx context.Context) *RoutersInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ResourcePoliciesInsertCall) Header() http.Header { +func (c *RoutersInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ResourcePoliciesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *RoutersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.resourcepolicy) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.router) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/resourcePolicies") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -98435,14 +114124,14 @@ func (c *ResourcePoliciesInsertCall) doRequest(alt string) (*http.Response, erro return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.resourcePolicies.insert" call. +// Do executes the "compute.routers.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ResourcePoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RoutersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -98473,9 +114162,9 @@ func (c *ResourcePoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Creates a new resource policy.", + // "description": "Creates a Router resource in the specified project and region using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.resourcePolicies.insert", + // "id": "compute.routers.insert", // "parameterOrder": [ // "project", // "region" @@ -98501,9 +114190,9 @@ func (c *ResourcePoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operatio // "type": "string" // } // }, - // "path": "{project}/regions/{region}/resourcePolicies", + // "path": "{project}/regions/{region}/routers", // "request": { - // "$ref": "ResourcePolicy" + // "$ref": "Router" // }, // "response": { // "$ref": "Operation" @@ -98516,9 +114205,9 @@ func (c *ResourcePoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operatio } -// method id "compute.resourcePolicies.list": +// method id "compute.routers.list": -type ResourcePoliciesListCall struct { +type RoutersListCall struct { s *Service project string region string @@ -98528,10 +114217,10 @@ type ResourcePoliciesListCall struct { header_ http.Header } -// List: A list all the resource policies that have been configured for -// the specified project in specified region. -func (r *ResourcePoliciesService) List(project string, region string) *ResourcePoliciesListCall { - c := &ResourcePoliciesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of Router resources available to the specified +// project. +func (r *RoutersService) List(project string, region string) *RoutersListCall { + c := &RoutersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region return c @@ -98559,7 +114248,7 @@ func (r *ResourcePoliciesService) List(project string, region string) *ResourceP // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *ResourcePoliciesListCall) Filter(filter string) *ResourcePoliciesListCall { +func (c *RoutersListCall) Filter(filter string) *RoutersListCall { c.urlParams_.Set("filter", filter) return c } @@ -98570,7 +114259,7 @@ func (c *ResourcePoliciesListCall) Filter(filter string) *ResourcePoliciesListCa // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *ResourcePoliciesListCall) MaxResults(maxResults int64) *ResourcePoliciesListCall { +func (c *RoutersListCall) MaxResults(maxResults int64) *RoutersListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -98587,7 +114276,7 @@ func (c *ResourcePoliciesListCall) MaxResults(maxResults int64) *ResourcePolicie // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *ResourcePoliciesListCall) OrderBy(orderBy string) *ResourcePoliciesListCall { +func (c *RoutersListCall) OrderBy(orderBy string) *RoutersListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -98595,7 +114284,7 @@ func (c *ResourcePoliciesListCall) OrderBy(orderBy string) *ResourcePoliciesList // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *ResourcePoliciesListCall) PageToken(pageToken string) *ResourcePoliciesListCall { +func (c *RoutersListCall) PageToken(pageToken string) *RoutersListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -98603,7 +114292,7 @@ func (c *ResourcePoliciesListCall) PageToken(pageToken string) *ResourcePolicies // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ResourcePoliciesListCall) Fields(s ...googleapi.Field) *ResourcePoliciesListCall { +func (c *RoutersListCall) Fields(s ...googleapi.Field) *RoutersListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -98613,7 +114302,7 @@ func (c *ResourcePoliciesListCall) Fields(s ...googleapi.Field) *ResourcePolicie // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ResourcePoliciesListCall) IfNoneMatch(entityTag string) *ResourcePoliciesListCall { +func (c *RoutersListCall) IfNoneMatch(entityTag string) *RoutersListCall { c.ifNoneMatch_ = entityTag return c } @@ -98621,21 +114310,21 @@ func (c *ResourcePoliciesListCall) IfNoneMatch(entityTag string) *ResourcePolici // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ResourcePoliciesListCall) Context(ctx context.Context) *ResourcePoliciesListCall { +func (c *RoutersListCall) Context(ctx context.Context) *RoutersListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ResourcePoliciesListCall) Header() http.Header { +func (c *RoutersListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ResourcePoliciesListCall) doRequest(alt string) (*http.Response, error) { +func (c *RoutersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -98647,7 +114336,7 @@ func (c *ResourcePoliciesListCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/resourcePolicies") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -98661,14 +114350,14 @@ func (c *ResourcePoliciesListCall) doRequest(alt string) (*http.Response, error) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.resourcePolicies.list" call. -// Exactly one of *ResourcePolicyList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ResourcePolicyList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ResourcePoliciesListCall) Do(opts ...googleapi.CallOption) (*ResourcePolicyList, error) { +// Do executes the "compute.routers.list" call. +// Exactly one of *RouterList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *RouterList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RoutersListCall) Do(opts ...googleapi.CallOption) (*RouterList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -98687,7 +114376,7 @@ func (c *ResourcePoliciesListCall) Do(opts ...googleapi.CallOption) (*ResourcePo if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ResourcePolicyList{ + ret := &RouterList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -98699,9 +114388,9 @@ func (c *ResourcePoliciesListCall) Do(opts ...googleapi.CallOption) (*ResourcePo } return ret, nil // { - // "description": "A list all the resource policies that have been configured for the specified project in specified region.", + // "description": "Retrieves a list of Router resources available to the specified project.", // "httpMethod": "GET", - // "id": "compute.resourcePolicies.list", + // "id": "compute.routers.list", // "parameterOrder": [ // "project", // "region" @@ -98745,9 +114434,9 @@ func (c *ResourcePoliciesListCall) Do(opts ...googleapi.CallOption) (*ResourcePo // "type": "string" // } // }, - // "path": "{project}/regions/{region}/resourcePolicies", + // "path": "{project}/regions/{region}/routers", // "response": { - // "$ref": "ResourcePolicyList" + // "$ref": "RouterList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -98761,7 +114450,7 @@ func (c *ResourcePoliciesListCall) Do(opts ...googleapi.CallOption) (*ResourcePo // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *ResourcePoliciesListCall) Pages(ctx context.Context, f func(*ResourcePolicyList) error) error { +func (c *RoutersListCall) Pages(ctx context.Context, f func(*RouterList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -98779,34 +114468,54 @@ func (c *ResourcePoliciesListCall) Pages(ctx context.Context, f func(*ResourcePo } } -// method id "compute.resourcePolicies.testIamPermissions": +// method id "compute.routers.patch": -type ResourcePoliciesTestIamPermissionsCall struct { - s *Service - project string - region string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RoutersPatchCall struct { + s *Service + project string + region string + router string + router2 *Router + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *ResourcePoliciesService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *ResourcePoliciesTestIamPermissionsCall { - c := &ResourcePoliciesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Patches the specified Router resource with the data included +// in the request. This method supports PATCH semantics and uses JSON +// merge patch format and processing rules. +func (r *RoutersService) Patch(project string, region string, router string, router2 *Router) *RoutersPatchCall { + c := &RoutersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.router = router + c.router2 = router2 + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RoutersPatchCall) RequestId(requestId string) *RoutersPatchCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ResourcePoliciesTestIamPermissionsCall) Fields(s ...googleapi.Field) *ResourcePoliciesTestIamPermissionsCall { +func (c *RoutersPatchCall) Fields(s ...googleapi.Field) *RoutersPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -98814,57 +114523,57 @@ func (c *ResourcePoliciesTestIamPermissionsCall) Fields(s ...googleapi.Field) *R // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ResourcePoliciesTestIamPermissionsCall) Context(ctx context.Context) *ResourcePoliciesTestIamPermissionsCall { +func (c *RoutersPatchCall) Context(ctx context.Context) *RoutersPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ResourcePoliciesTestIamPermissionsCall) Header() http.Header { +func (c *RoutersPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ResourcePoliciesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *RoutersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.router2) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/resourcePolicies/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, + "region": c.region, + "router": c.router, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.resourcePolicies.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ResourcePoliciesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.routers.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RoutersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -98883,7 +114592,7 @@ func (c *ResourcePoliciesTestIamPermissionsCall) Do(opts ...googleapi.CallOption if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -98895,13 +114604,13 @@ func (c *ResourcePoliciesTestIamPermissionsCall) Do(opts ...googleapi.CallOption } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.resourcePolicies.testIamPermissions", + // "description": "Patches the specified Router resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.routers.patch", // "parameterOrder": [ // "project", // "region", - // "resource" + // "router" // ], // "parameters": { // "project": { @@ -98912,185 +114621,127 @@ func (c *ResourcePoliciesTestIamPermissionsCall) Do(opts ...googleapi.CallOption // "type": "string" // }, // "region": { - // "description": "The name of the region for this request.", + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "router": { + // "description": "Name of the Router resource to patch.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/resourcePolicies/{resource}/testIamPermissions", + // "path": "{project}/regions/{region}/routers/{router}", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "Router" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.routers.aggregatedList": +// method id "compute.routers.preview": -type RoutersAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RoutersPreviewCall struct { + s *Service + project string + region string + router string + router2 *Router + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AggregatedList: Retrieves an aggregated list of routers. -func (r *RoutersService) AggregatedList(project string) *RoutersAggregatedListCall { - c := &RoutersAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Preview: Preview fields auto-generated during router create and +// update operations. Calling this method does NOT create or update the +// router. +func (r *RoutersService) Preview(project string, region string, router string, router2 *Router) *RoutersPreviewCall { + c := &RoutersPreviewCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *RoutersAggregatedListCall) Filter(filter string) *RoutersAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *RoutersAggregatedListCall) MaxResults(maxResults int64) *RoutersAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *RoutersAggregatedListCall) OrderBy(orderBy string) *RoutersAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *RoutersAggregatedListCall) PageToken(pageToken string) *RoutersAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) + c.region = region + c.router = router + c.router2 = router2 return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RoutersAggregatedListCall) Fields(s ...googleapi.Field) *RoutersAggregatedListCall { +func (c *RoutersPreviewCall) Fields(s ...googleapi.Field) *RoutersPreviewCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RoutersAggregatedListCall) IfNoneMatch(entityTag string) *RoutersAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RoutersAggregatedListCall) Context(ctx context.Context) *RoutersAggregatedListCall { +func (c *RoutersPreviewCall) Context(ctx context.Context) *RoutersPreviewCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RoutersAggregatedListCall) Header() http.Header { +func (c *RoutersPreviewCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RoutersAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *RoutersPreviewCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.router2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/routers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}/preview") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, + "router": c.router, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.routers.aggregatedList" call. -// Exactly one of *RouterAggregatedList or error will be non-nil. Any +// Do executes the "compute.routers.preview" call. +// Exactly one of *RoutersPreviewResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *RouterAggregatedList.ServerResponse.Header or (if a response was +// *RoutersPreviewResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RoutersAggregatedListCall) Do(opts ...googleapi.CallOption) (*RouterAggregatedList, error) { +func (c *RoutersPreviewCall) Do(opts ...googleapi.CallOption) (*RoutersPreviewResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -99109,7 +114760,7 @@ func (c *RoutersAggregatedListCall) Do(opts ...googleapi.CallOption) (*RouterAgg if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &RouterAggregatedList{ + ret := &RoutersPreviewResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -99121,47 +114772,43 @@ func (c *RoutersAggregatedListCall) Do(opts ...googleapi.CallOption) (*RouterAgg } return ret, nil // { - // "description": "Retrieves an aggregated list of routers.", - // "httpMethod": "GET", - // "id": "compute.routers.aggregatedList", + // "description": "Preview fields auto-generated during router create and update operations. Calling this method does NOT create or update the router.", + // "httpMethod": "POST", + // "id": "compute.routers.preview", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "router" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "router": { + // "description": "Name of the Router resource to query.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/aggregated/routers", + // "path": "{project}/regions/{region}/routers/{router}/preview", + // "request": { + // "$ref": "Router" + // }, // "response": { - // "$ref": "RouterAggregatedList" + // "$ref": "RoutersPreviewResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -99172,71 +114819,34 @@ func (c *RoutersAggregatedListCall) Do(opts ...googleapi.CallOption) (*RouterAgg } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RoutersAggregatedListCall) Pages(ctx context.Context, f func(*RouterAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.routers.delete": - -type RoutersDeleteCall struct { - s *Service - project string - region string - router string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} +// method id "compute.routers.testIamPermissions": -// Delete: Deletes the specified Router resource. -func (r *RoutersService) Delete(project string, region string, router string) *RoutersDeleteCall { - c := &RoutersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.router = router - return c +type RoutersTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RoutersDeleteCall) RequestId(requestId string) *RoutersDeleteCall { - c.urlParams_.Set("requestId", requestId) +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *RoutersService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RoutersTestIamPermissionsCall { + c := &RoutersTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RoutersDeleteCall) Fields(s ...googleapi.Field) *RoutersDeleteCall { +func (c *RoutersTestIamPermissionsCall) Fields(s ...googleapi.Field) *RoutersTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -99244,52 +114854,57 @@ func (c *RoutersDeleteCall) Fields(s ...googleapi.Field) *RoutersDeleteCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RoutersDeleteCall) Context(ctx context.Context) *RoutersDeleteCall { +func (c *RoutersTestIamPermissionsCall) Context(ctx context.Context) *RoutersTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RoutersDeleteCall) Header() http.Header { +func (c *RoutersTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RoutersDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *RoutersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "router": c.router, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.routers.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RoutersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.routers.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RoutersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -99308,7 +114923,7 @@ func (c *RoutersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -99320,13 +114935,13 @@ func (c *RoutersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Deletes the specified Router resource.", - // "httpMethod": "DELETE", - // "id": "compute.routers.delete", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.routers.testIamPermissions", // "parameterOrder": [ // "project", // "region", - // "router" + // "resource" // ], // "parameters": { // "project": { @@ -99337,110 +114952,121 @@ func (c *RoutersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "type": "string" // }, // "region": { - // "description": "Name of the region for this request.", + // "description": "The name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "router": { - // "description": "Name of the Router resource to delete.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/routers/{router}", + // "path": "{project}/regions/{region}/routers/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.routers.get": +// method id "compute.routers.update": -type RoutersGetCall struct { - s *Service - project string - region string - router string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RoutersUpdateCall struct { + s *Service + project string + region string + router string + router2 *Router + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified Router resource. Gets a list of available -// routers by making a list() request. -func (r *RoutersService) Get(project string, region string, router string) *RoutersGetCall { - c := &RoutersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates the specified Router resource with the data included +// in the request. +func (r *RoutersService) Update(project string, region string, router string, router2 *Router) *RoutersUpdateCall { + c := &RoutersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region c.router = router + c.router2 = router2 + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RoutersUpdateCall) RequestId(requestId string) *RoutersUpdateCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RoutersGetCall) Fields(s ...googleapi.Field) *RoutersGetCall { +func (c *RoutersUpdateCall) Fields(s ...googleapi.Field) *RoutersUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RoutersGetCall) IfNoneMatch(entityTag string) *RoutersGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RoutersGetCall) Context(ctx context.Context) *RoutersGetCall { +func (c *RoutersUpdateCall) Context(ctx context.Context) *RoutersUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RoutersGetCall) Header() http.Header { +func (c *RoutersUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RoutersGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RoutersUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.router2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } @@ -99453,14 +115079,14 @@ func (c *RoutersGetCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.routers.get" call. -// Exactly one of *Router or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Router.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *RoutersGetCall) Do(opts ...googleapi.CallOption) (*Router, error) { +// Do executes the "compute.routers.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RoutersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -99479,7 +115105,7 @@ func (c *RoutersGetCall) Do(opts ...googleapi.CallOption) (*Router, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Router{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -99491,9 +115117,9 @@ func (c *RoutersGetCall) Do(opts ...googleapi.CallOption) (*Router, error) { } return ret, nil // { - // "description": "Returns the specified Router resource. Gets a list of available routers by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.routers.get", + // "description": "Updates the specified Router resource with the data included in the request.", + // "httpMethod": "PUT", + // "id": "compute.routers.update", // "parameterOrder": [ // "project", // "region", @@ -99514,183 +115140,129 @@ func (c *RoutersGetCall) Do(opts ...googleapi.CallOption) (*Router, error) { // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "router": { - // "description": "Name of the Router resource to return.", + // "description": "Name of the Router resource to update.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, // "path": "{project}/regions/{region}/routers/{router}", - // "response": { + // "request": { // "$ref": "Router" // }, + // "response": { + // "$ref": "Operation" + // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.routers.getNatMappingInfo": +// method id "compute.routes.delete": -type RoutersGetNatMappingInfoCall struct { - s *Service - project string - region string - router string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RoutesDeleteCall struct { + s *Service + project string + route string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetNatMappingInfo: Retrieves runtime Nat mapping information of VM -// endpoints. -func (r *RoutersService) GetNatMappingInfo(project string, region string, router string) *RoutersGetNatMappingInfoCall { - c := &RoutersGetNatMappingInfoCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified Route resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/routes/delete +func (r *RoutesService) Delete(project string, route string) *RoutesDeleteCall { + c := &RoutesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.router = router - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *RoutersGetNatMappingInfoCall) Filter(filter string) *RoutersGetNatMappingInfoCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *RoutersGetNatMappingInfoCall) MaxResults(maxResults int64) *RoutersGetNatMappingInfoCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + c.route = route return c } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *RoutersGetNatMappingInfoCall) OrderBy(orderBy string) *RoutersGetNatMappingInfoCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *RoutersGetNatMappingInfoCall) PageToken(pageToken string) *RoutersGetNatMappingInfoCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RoutesDeleteCall) RequestId(requestId string) *RoutesDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RoutersGetNatMappingInfoCall) Fields(s ...googleapi.Field) *RoutersGetNatMappingInfoCall { +func (c *RoutesDeleteCall) Fields(s ...googleapi.Field) *RoutesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RoutersGetNatMappingInfoCall) IfNoneMatch(entityTag string) *RoutersGetNatMappingInfoCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RoutersGetNatMappingInfoCall) Context(ctx context.Context) *RoutersGetNatMappingInfoCall { +func (c *RoutesDeleteCall) Context(ctx context.Context) *RoutesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RoutersGetNatMappingInfoCall) Header() http.Header { +func (c *RoutesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RoutersGetNatMappingInfoCall) doRequest(alt string) (*http.Response, error) { +func (c *RoutesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}/getNatMappingInfo") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes/{route}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, - "router": c.router, + "route": c.route, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.routers.getNatMappingInfo" call. -// Exactly one of *VmEndpointNatMappingsList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *VmEndpointNatMappingsList.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RoutersGetNatMappingInfoCall) Do(opts ...googleapi.CallOption) (*VmEndpointNatMappingsList, error) { +// Do executes the "compute.routes.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RoutesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -99709,7 +115281,7 @@ func (c *RoutersGetNatMappingInfoCall) Do(opts ...googleapi.CallOption) (*VmEndp if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &VmEndpointNatMappingsList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -99721,38 +115293,14 @@ func (c *RoutersGetNatMappingInfoCall) Do(opts ...googleapi.CallOption) (*VmEndp } return ret, nil // { - // "description": "Retrieves runtime Nat mapping information of VM endpoints.", - // "httpMethod": "GET", - // "id": "compute.routers.getNatMappingInfo", + // "description": "Deletes the specified Route resource.", + // "httpMethod": "DELETE", + // "id": "compute.routes.delete", // "parameterOrder": [ // "project", - // "region", - // "router" + // "route" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -99760,82 +115308,57 @@ func (c *RoutersGetNatMappingInfoCall) Do(opts ...googleapi.CallOption) (*VmEndp // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // }, - // "router": { - // "description": "Name of the Router resource to query for Nat Mapping information of VM endpoints.", + // "route": { + // "description": "Name of the Route resource to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/routers/{router}/getNatMappingInfo", + // "path": "{project}/global/routes/{route}", // "response": { - // "$ref": "VmEndpointNatMappingsList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RoutersGetNatMappingInfoCall) Pages(ctx context.Context, f func(*VmEndpointNatMappingsList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.routers.getRouterStatus": +// method id "compute.routes.get": -type RoutersGetRouterStatusCall struct { +type RoutesGetCall struct { s *Service project string - region string - router string + route string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// GetRouterStatus: Retrieves runtime information of the specified -// router. -func (r *RoutersService) GetRouterStatus(project string, region string, router string) *RoutersGetRouterStatusCall { - c := &RoutersGetRouterStatusCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified Route resource. Gets a list of available +// routes by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/routes/get +func (r *RoutesService) Get(project string, route string) *RoutesGetCall { + c := &RoutesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.router = router + c.route = route return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RoutersGetRouterStatusCall) Fields(s ...googleapi.Field) *RoutersGetRouterStatusCall { +func (c *RoutesGetCall) Fields(s ...googleapi.Field) *RoutesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -99845,7 +115368,7 @@ func (c *RoutersGetRouterStatusCall) Fields(s ...googleapi.Field) *RoutersGetRou // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RoutersGetRouterStatusCall) IfNoneMatch(entityTag string) *RoutersGetRouterStatusCall { +func (c *RoutesGetCall) IfNoneMatch(entityTag string) *RoutesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -99853,21 +115376,21 @@ func (c *RoutersGetRouterStatusCall) IfNoneMatch(entityTag string) *RoutersGetRo // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RoutersGetRouterStatusCall) Context(ctx context.Context) *RoutersGetRouterStatusCall { +func (c *RoutesGetCall) Context(ctx context.Context) *RoutesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RoutersGetRouterStatusCall) Header() http.Header { +func (c *RoutesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RoutersGetRouterStatusCall) doRequest(alt string) (*http.Response, error) { +func (c *RoutesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -99879,7 +115402,7 @@ func (c *RoutersGetRouterStatusCall) doRequest(alt string) (*http.Response, erro var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}/getRouterStatus") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes/{route}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -99888,20 +115411,19 @@ func (c *RoutersGetRouterStatusCall) doRequest(alt string) (*http.Response, erro req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, - "router": c.router, + "route": c.route, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.routers.getRouterStatus" call. -// Exactly one of *RouterStatusResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *RouterStatusResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RoutersGetRouterStatusCall) Do(opts ...googleapi.CallOption) (*RouterStatusResponse, error) { +// Do executes the "compute.routes.get" call. +// Exactly one of *Route or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Route.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *RoutesGetCall) Do(opts ...googleapi.CallOption) (*Route, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -99920,7 +115442,7 @@ func (c *RoutersGetRouterStatusCall) Do(opts ...googleapi.CallOption) (*RouterSt if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &RouterStatusResponse{ + ret := &Route{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -99932,13 +115454,12 @@ func (c *RoutersGetRouterStatusCall) Do(opts ...googleapi.CallOption) (*RouterSt } return ret, nil // { - // "description": "Retrieves runtime information of the specified router.", + // "description": "Returns the specified Route resource. Gets a list of available routes by making a list() request.", // "httpMethod": "GET", - // "id": "compute.routers.getRouterStatus", + // "id": "compute.routes.get", // "parameterOrder": [ // "project", - // "region", - // "router" + // "route" // ], // "parameters": { // "project": { @@ -99948,24 +115469,17 @@ func (c *RoutersGetRouterStatusCall) Do(opts ...googleapi.CallOption) (*RouterSt // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "router": { - // "description": "Name of the Router resource to query.", + // "route": { + // "description": "Name of the Route resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/routers/{router}/getRouterStatus", + // "path": "{project}/global/routes/{route}", // "response": { - // "$ref": "RouterStatusResponse" + // "$ref": "Route" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -99976,25 +115490,24 @@ func (c *RoutersGetRouterStatusCall) Do(opts ...googleapi.CallOption) (*RouterSt } -// method id "compute.routers.insert": +// method id "compute.routes.insert": -type RoutersInsertCall struct { +type RoutesInsertCall struct { s *Service project string - region string - router *Router + route *Route urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Insert: Creates a Router resource in the specified project and region -// using the data included in the request. -func (r *RoutersService) Insert(project string, region string, router *Router) *RoutersInsertCall { - c := &RoutersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a Route resource in the specified project using the +// data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/routes/insert +func (r *RoutesService) Insert(project string, route *Route) *RoutesInsertCall { + c := &RoutesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.router = router + c.route = route return c } @@ -100012,7 +115525,7 @@ func (r *RoutersService) Insert(project string, region string, router *Router) * // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RoutersInsertCall) RequestId(requestId string) *RoutersInsertCall { +func (c *RoutesInsertCall) RequestId(requestId string) *RoutesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -100020,7 +115533,7 @@ func (c *RoutersInsertCall) RequestId(requestId string) *RoutersInsertCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RoutersInsertCall) Fields(s ...googleapi.Field) *RoutersInsertCall { +func (c *RoutesInsertCall) Fields(s ...googleapi.Field) *RoutesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -100028,35 +115541,35 @@ func (c *RoutersInsertCall) Fields(s ...googleapi.Field) *RoutersInsertCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RoutersInsertCall) Context(ctx context.Context) *RoutersInsertCall { +func (c *RoutesInsertCall) Context(ctx context.Context) *RoutesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RoutersInsertCall) Header() http.Header { +func (c *RoutesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RoutersInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *RoutesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.router) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.route) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -100065,19 +115578,18 @@ func (c *RoutersInsertCall) doRequest(alt string) (*http.Response, error) { req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.routers.insert" call. +// Do executes the "compute.routes.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RoutersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RoutesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -100108,12 +115620,11 @@ func (c *RoutersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Creates a Router resource in the specified project and region using the data included in the request.", + // "description": "Creates a Route resource in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.routers.insert", + // "id": "compute.routes.insert", // "parameterOrder": [ - // "project", - // "region" + // "project" // ], // "parameters": { // "project": { @@ -100123,22 +115634,15 @@ func (c *RoutersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "{project}/regions/{region}/routers", + // "path": "{project}/global/routes", // "request": { - // "$ref": "Router" + // "$ref": "Route" // }, // "response": { // "$ref": "Operation" @@ -100151,24 +115655,23 @@ func (c *RoutersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) } -// method id "compute.routers.list": +// method id "compute.routes.list": -type RoutersListCall struct { +type RoutesListCall struct { s *Service project string - region string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves a list of Router resources available to the specified -// project. -func (r *RoutersService) List(project string, region string) *RoutersListCall { - c := &RoutersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of Route resources available to the +// specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/routes/list +func (r *RoutesService) List(project string) *RoutesListCall { + c := &RoutesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region return c } @@ -100194,7 +115697,7 @@ func (r *RoutersService) List(project string, region string) *RoutersListCall { // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *RoutersListCall) Filter(filter string) *RoutersListCall { +func (c *RoutesListCall) Filter(filter string) *RoutesListCall { c.urlParams_.Set("filter", filter) return c } @@ -100205,7 +115708,7 @@ func (c *RoutersListCall) Filter(filter string) *RoutersListCall { // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *RoutersListCall) MaxResults(maxResults int64) *RoutersListCall { +func (c *RoutesListCall) MaxResults(maxResults int64) *RoutesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -100222,7 +115725,7 @@ func (c *RoutersListCall) MaxResults(maxResults int64) *RoutersListCall { // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *RoutersListCall) OrderBy(orderBy string) *RoutersListCall { +func (c *RoutesListCall) OrderBy(orderBy string) *RoutesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -100230,7 +115733,7 @@ func (c *RoutersListCall) OrderBy(orderBy string) *RoutersListCall { // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *RoutersListCall) PageToken(pageToken string) *RoutersListCall { +func (c *RoutesListCall) PageToken(pageToken string) *RoutesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -100238,7 +115741,7 @@ func (c *RoutersListCall) PageToken(pageToken string) *RoutersListCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RoutersListCall) Fields(s ...googleapi.Field) *RoutersListCall { +func (c *RoutesListCall) Fields(s ...googleapi.Field) *RoutesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -100248,7 +115751,7 @@ func (c *RoutersListCall) Fields(s ...googleapi.Field) *RoutersListCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RoutersListCall) IfNoneMatch(entityTag string) *RoutersListCall { +func (c *RoutesListCall) IfNoneMatch(entityTag string) *RoutesListCall { c.ifNoneMatch_ = entityTag return c } @@ -100256,21 +115759,21 @@ func (c *RoutersListCall) IfNoneMatch(entityTag string) *RoutersListCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RoutersListCall) Context(ctx context.Context) *RoutersListCall { +func (c *RoutesListCall) Context(ctx context.Context) *RoutesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RoutersListCall) Header() http.Header { +func (c *RoutesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RoutersListCall) doRequest(alt string) (*http.Response, error) { +func (c *RoutesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -100282,7 +115785,7 @@ func (c *RoutersListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -100291,19 +115794,18 @@ func (c *RoutersListCall) doRequest(alt string) (*http.Response, error) { req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.routers.list" call. -// Exactly one of *RouterList or error will be non-nil. Any non-2xx +// Do executes the "compute.routes.list" call. +// Exactly one of *RouteList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *RouterList.ServerResponse.Header or (if a response was returned at +// *RouteList.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RoutersListCall) Do(opts ...googleapi.CallOption) (*RouterList, error) { +func (c *RoutesListCall) Do(opts ...googleapi.CallOption) (*RouteList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -100322,7 +115824,7 @@ func (c *RoutersListCall) Do(opts ...googleapi.CallOption) (*RouterList, error) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &RouterList{ + ret := &RouteList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -100334,12 +115836,11 @@ func (c *RoutersListCall) Do(opts ...googleapi.CallOption) (*RouterList, error) } return ret, nil // { - // "description": "Retrieves a list of Router resources available to the specified project.", + // "description": "Retrieves the list of Route resources available to the specified project.", // "httpMethod": "GET", - // "id": "compute.routers.list", + // "id": "compute.routes.list", // "parameterOrder": [ - // "project", - // "region" + // "project" // ], // "parameters": { // "filter": { @@ -100371,18 +115872,11 @@ func (c *RoutersListCall) Do(opts ...googleapi.CallOption) (*RouterList, error) // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/routers", + // "path": "{project}/global/routes", // "response": { - // "$ref": "RouterList" + // "$ref": "RouteList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -100396,7 +115890,7 @@ func (c *RoutersListCall) Do(opts ...googleapi.CallOption) (*RouterList, error) // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *RoutersListCall) Pages(ctx context.Context, f func(*RouterList) error) error { +func (c *RoutesListCall) Pages(ctx context.Context, f func(*RouteList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -100414,222 +115908,32 @@ func (c *RoutersListCall) Pages(ctx context.Context, f func(*RouterList) error) } } -// method id "compute.routers.patch": - -type RoutersPatchCall struct { - s *Service - project string - region string - router string - router2 *Router - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Patch: Patches the specified Router resource with the data included -// in the request. This method supports PATCH semantics and uses JSON -// merge patch format and processing rules. -func (r *RoutersService) Patch(project string, region string, router string, router2 *Router) *RoutersPatchCall { - c := &RoutersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.router = router - c.router2 = router2 - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RoutersPatchCall) RequestId(requestId string) *RoutersPatchCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RoutersPatchCall) Fields(s ...googleapi.Field) *RoutersPatchCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RoutersPatchCall) Context(ctx context.Context) *RoutersPatchCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RoutersPatchCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RoutersPatchCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.router2) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "router": c.router, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.routers.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RoutersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Patches the specified Router resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.routers.patch", - // "parameterOrder": [ - // "project", - // "region", - // "router" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "router": { - // "description": "Name of the Router resource to patch.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/routers/{router}", - // "request": { - // "$ref": "Router" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.routers.preview": +// method id "compute.routes.testIamPermissions": -type RoutersPreviewCall struct { - s *Service - project string - region string - router string - router2 *Router - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RoutesTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Preview: Preview fields auto-generated during router create and -// update operations. Calling this method does NOT create or update the -// router. -func (r *RoutersService) Preview(project string, region string, router string, router2 *Router) *RoutersPreviewCall { - c := &RoutersPreviewCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *RoutesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *RoutesTestIamPermissionsCall { + c := &RoutesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.router = router - c.router2 = router2 + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RoutersPreviewCall) Fields(s ...googleapi.Field) *RoutersPreviewCall { +func (c *RoutesTestIamPermissionsCall) Fields(s ...googleapi.Field) *RoutesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -100637,35 +115941,35 @@ func (c *RoutersPreviewCall) Fields(s ...googleapi.Field) *RoutersPreviewCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RoutersPreviewCall) Context(ctx context.Context) *RoutersPreviewCall { +func (c *RoutesTestIamPermissionsCall) Context(ctx context.Context) *RoutesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RoutersPreviewCall) Header() http.Header { +func (c *RoutesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RoutersPreviewCall) doRequest(alt string) (*http.Response, error) { +func (c *RoutesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.router2) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}/preview") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -100673,21 +115977,20 @@ func (c *RoutersPreviewCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "router": c.router, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.routers.preview" call. -// Exactly one of *RoutersPreviewResponse or error will be non-nil. Any +// Do executes the "compute.routes.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *RoutersPreviewResponse.ServerResponse.Header or (if a response was +// *TestPermissionsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RoutersPreviewCall) Do(opts ...googleapi.CallOption) (*RoutersPreviewResponse, error) { +func (c *RoutesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -100706,7 +116009,7 @@ func (c *RoutersPreviewCall) Do(opts ...googleapi.CallOption) (*RoutersPreviewRe if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &RoutersPreviewResponse{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -100718,13 +116021,12 @@ func (c *RoutersPreviewCall) Do(opts ...googleapi.CallOption) (*RoutersPreviewRe } return ret, nil // { - // "description": "Preview fields auto-generated during router create and update operations. Calling this method does NOT create or update the router.", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.routers.preview", + // "id": "compute.routes.testIamPermissions", // "parameterOrder": [ // "project", - // "region", - // "router" + // "resource" // ], // "parameters": { // "project": { @@ -100734,27 +116036,20 @@ func (c *RoutersPreviewCall) Do(opts ...googleapi.CallOption) (*RoutersPreviewRe // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "router": { - // "description": "Name of the Router resource to query.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/routers/{router}/preview", + // "path": "{project}/global/routes/{resource}/testIamPermissions", // "request": { - // "$ref": "Router" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "RoutersPreviewResponse" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -100765,34 +116060,38 @@ func (c *RoutersPreviewCall) Do(opts ...googleapi.CallOption) (*RoutersPreviewRe } -// method id "compute.routers.testIamPermissions": +// method id "compute.securityPolicies.addRule": -type RoutersTestIamPermissionsCall struct { - s *Service - project string - region string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type SecurityPoliciesAddRuleCall struct { + s *Service + project string + securityPolicy string + securitypolicyrule *SecurityPolicyRule + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *RoutersService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RoutersTestIamPermissionsCall { - c := &RoutersTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AddRule: Inserts a rule into a security policy. +func (r *SecurityPoliciesService) AddRule(project string, securityPolicy string, securitypolicyrule *SecurityPolicyRule) *SecurityPoliciesAddRuleCall { + c := &SecurityPoliciesAddRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.securityPolicy = securityPolicy + c.securitypolicyrule = securitypolicyrule + return c +} + +// ValidateOnly sets the optional parameter "validateOnly": If true, the +// request will not be committed. +func (c *SecurityPoliciesAddRuleCall) ValidateOnly(validateOnly bool) *SecurityPoliciesAddRuleCall { + c.urlParams_.Set("validateOnly", fmt.Sprint(validateOnly)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RoutersTestIamPermissionsCall) Fields(s ...googleapi.Field) *RoutersTestIamPermissionsCall { +func (c *SecurityPoliciesAddRuleCall) Fields(s ...googleapi.Field) *SecurityPoliciesAddRuleCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -100800,35 +116099,35 @@ func (c *RoutersTestIamPermissionsCall) Fields(s ...googleapi.Field) *RoutersTes // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RoutersTestIamPermissionsCall) Context(ctx context.Context) *RoutersTestIamPermissionsCall { +func (c *SecurityPoliciesAddRuleCall) Context(ctx context.Context) *SecurityPoliciesAddRuleCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RoutersTestIamPermissionsCall) Header() http.Header { +func (c *SecurityPoliciesAddRuleCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RoutersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *SecurityPoliciesAddRuleCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicyrule) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{securityPolicy}/addRule") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -100836,21 +116135,20 @@ func (c *RoutersTestIamPermissionsCall) doRequest(alt string) (*http.Response, e } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, + "securityPolicy": c.securityPolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.routers.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RoutersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.securityPolicies.addRule" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *SecurityPoliciesAddRuleCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -100869,7 +116167,7 @@ func (c *RoutersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestP if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -100881,13 +116179,12 @@ func (c *RoutersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestP } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", + // "description": "Inserts a rule into a security policy.", // "httpMethod": "POST", - // "id": "compute.routers.testIamPermissions", + // "id": "compute.securityPolicies.addRule", // "parameterOrder": [ // "project", - // "region", - // "resource" + // "securityPolicy" // ], // "parameters": { // "project": { @@ -100897,58 +116194,50 @@ func (c *RoutersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestP // "required": true, // "type": "string" // }, - // "region": { - // "description": "The name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "securityPolicy": { + // "description": "Name of the security policy to update.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" + // }, + // "validateOnly": { + // "description": "If true, the request will not be committed.", + // "location": "query", + // "type": "boolean" // } // }, - // "path": "{project}/regions/{region}/routers/{resource}/testIamPermissions", + // "path": "{project}/global/securityPolicies/{securityPolicy}/addRule", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "SecurityPolicyRule" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.routers.update": +// method id "compute.securityPolicies.delete": -type RoutersUpdateCall struct { - s *Service - project string - region string - router string - router2 *Router - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type SecurityPoliciesDeleteCall struct { + s *Service + project string + securityPolicy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Update: Updates the specified Router resource with the data included -// in the request. -func (r *RoutersService) Update(project string, region string, router string, router2 *Router) *RoutersUpdateCall { - c := &RoutersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified policy. +func (r *SecurityPoliciesService) Delete(project string, securityPolicy string) *SecurityPoliciesDeleteCall { + c := &SecurityPoliciesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.router = router - c.router2 = router2 + c.securityPolicy = securityPolicy return c } @@ -100966,7 +116255,7 @@ func (r *RoutersService) Update(project string, region string, router string, ro // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RoutersUpdateCall) RequestId(requestId string) *RoutersUpdateCall { +func (c *SecurityPoliciesDeleteCall) RequestId(requestId string) *SecurityPoliciesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -100974,7 +116263,7 @@ func (c *RoutersUpdateCall) RequestId(requestId string) *RoutersUpdateCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RoutersUpdateCall) Fields(s ...googleapi.Field) *RoutersUpdateCall { +func (c *SecurityPoliciesDeleteCall) Fields(s ...googleapi.Field) *SecurityPoliciesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -100982,57 +116271,51 @@ func (c *RoutersUpdateCall) Fields(s ...googleapi.Field) *RoutersUpdateCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RoutersUpdateCall) Context(ctx context.Context) *RoutersUpdateCall { +func (c *SecurityPoliciesDeleteCall) Context(ctx context.Context) *SecurityPoliciesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RoutersUpdateCall) Header() http.Header { +func (c *SecurityPoliciesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RoutersUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *SecurityPoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.router2) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{securityPolicy}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "router": c.router, + "project": c.project, + "securityPolicy": c.securityPolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.routers.update" call. +// Do executes the "compute.securityPolicies.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RoutersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *SecurityPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -101063,13 +116346,12 @@ func (c *RoutersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Updates the specified Router resource with the data included in the request.", - // "httpMethod": "PUT", - // "id": "compute.routers.update", + // "description": "Deletes the specified policy.", + // "httpMethod": "DELETE", + // "id": "compute.securityPolicies.delete", // "parameterOrder": [ // "project", - // "region", - // "router" + // "securityPolicy" // ], // "parameters": { // "project": { @@ -101079,30 +116361,20 @@ func (c *RoutersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // }, - // "router": { - // "description": "Name of the Router resource to update.", + // "securityPolicy": { + // "description": "Name of the security policy to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/routers/{router}", - // "request": { - // "$ref": "Router" - // }, + // "path": "{project}/global/securityPolicies/{securityPolicy}", // "response": { // "$ref": "Operation" // }, @@ -101114,101 +116386,96 @@ func (c *RoutersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) } -// method id "compute.routes.delete": +// method id "compute.securityPolicies.get": -type RoutesDeleteCall struct { - s *Service - project string - route string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type SecurityPoliciesGetCall struct { + s *Service + project string + securityPolicy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified Route resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/routes/delete -func (r *RoutesService) Delete(project string, route string) *RoutesDeleteCall { - c := &RoutesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: List all of the ordered rules present in a single specified +// policy. +func (r *SecurityPoliciesService) Get(project string, securityPolicy string) *SecurityPoliciesGetCall { + c := &SecurityPoliciesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.route = route - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RoutesDeleteCall) RequestId(requestId string) *RoutesDeleteCall { - c.urlParams_.Set("requestId", requestId) + c.securityPolicy = securityPolicy return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RoutesDeleteCall) Fields(s ...googleapi.Field) *RoutesDeleteCall { +func (c *SecurityPoliciesGetCall) Fields(s ...googleapi.Field) *SecurityPoliciesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *SecurityPoliciesGetCall) IfNoneMatch(entityTag string) *SecurityPoliciesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RoutesDeleteCall) Context(ctx context.Context) *RoutesDeleteCall { +func (c *SecurityPoliciesGetCall) Context(ctx context.Context) *SecurityPoliciesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RoutesDeleteCall) Header() http.Header { +func (c *SecurityPoliciesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RoutesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *SecurityPoliciesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes/{route}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{securityPolicy}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "route": c.route, + "project": c.project, + "securityPolicy": c.securityPolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.routes.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.securityPolicies.get" call. +// Exactly one of *SecurityPolicy or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RoutesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// *SecurityPolicy.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *SecurityPoliciesGetCall) Do(opts ...googleapi.CallOption) (*SecurityPolicy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -101227,7 +116494,7 @@ func (c *RoutesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &SecurityPolicy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -101239,12 +116506,12 @@ func (c *RoutesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Deletes the specified Route resource.", - // "httpMethod": "DELETE", - // "id": "compute.routes.delete", + // "description": "List all of the ordered rules present in a single specified policy.", + // "httpMethod": "GET", + // "id": "compute.securityPolicies.get", // "parameterOrder": [ // "project", - // "route" + // "securityPolicy" // ], // "parameters": { // "project": { @@ -101254,57 +116521,58 @@ func (c *RoutesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "route": { - // "description": "Name of the Route resource to delete.", + // "securityPolicy": { + // "description": "Name of the security policy to get.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/routes/{route}", + // "path": "{project}/global/securityPolicies/{securityPolicy}", // "response": { - // "$ref": "Operation" + // "$ref": "SecurityPolicy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.routes.get": +// method id "compute.securityPolicies.getRule": -type RoutesGetCall struct { - s *Service - project string - route string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type SecurityPoliciesGetRuleCall struct { + s *Service + project string + securityPolicy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified Route resource. Gets a list of available -// routes by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/routes/get -func (r *RoutesService) Get(project string, route string) *RoutesGetCall { - c := &RoutesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetRule: Gets a rule at the specified priority. +func (r *SecurityPoliciesService) GetRule(project string, securityPolicy string) *SecurityPoliciesGetRuleCall { + c := &SecurityPoliciesGetRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.route = route + c.securityPolicy = securityPolicy + return c +} + +// Priority sets the optional parameter "priority": The priority of the +// rule to get from the security policy. +func (c *SecurityPoliciesGetRuleCall) Priority(priority int64) *SecurityPoliciesGetRuleCall { + c.urlParams_.Set("priority", fmt.Sprint(priority)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RoutesGetCall) Fields(s ...googleapi.Field) *RoutesGetCall { +func (c *SecurityPoliciesGetRuleCall) Fields(s ...googleapi.Field) *SecurityPoliciesGetRuleCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -101314,7 +116582,7 @@ func (c *RoutesGetCall) Fields(s ...googleapi.Field) *RoutesGetCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RoutesGetCall) IfNoneMatch(entityTag string) *RoutesGetCall { +func (c *SecurityPoliciesGetRuleCall) IfNoneMatch(entityTag string) *SecurityPoliciesGetRuleCall { c.ifNoneMatch_ = entityTag return c } @@ -101322,21 +116590,21 @@ func (c *RoutesGetCall) IfNoneMatch(entityTag string) *RoutesGetCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RoutesGetCall) Context(ctx context.Context) *RoutesGetCall { +func (c *SecurityPoliciesGetRuleCall) Context(ctx context.Context) *SecurityPoliciesGetRuleCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RoutesGetCall) Header() http.Header { +func (c *SecurityPoliciesGetRuleCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RoutesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *SecurityPoliciesGetRuleCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -101348,7 +116616,7 @@ func (c *RoutesGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes/{route}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{securityPolicy}/getRule") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -101356,20 +116624,20 @@ func (c *RoutesGetCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "route": c.route, + "project": c.project, + "securityPolicy": c.securityPolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.routes.get" call. -// Exactly one of *Route or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Route.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *RoutesGetCall) Do(opts ...googleapi.CallOption) (*Route, error) { +// Do executes the "compute.securityPolicies.getRule" call. +// Exactly one of *SecurityPolicyRule or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *SecurityPolicyRule.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *SecurityPoliciesGetRuleCall) Do(opts ...googleapi.CallOption) (*SecurityPolicyRule, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -101388,7 +116656,7 @@ func (c *RoutesGetCall) Do(opts ...googleapi.CallOption) (*Route, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Route{ + ret := &SecurityPolicyRule{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -101400,14 +116668,20 @@ func (c *RoutesGetCall) Do(opts ...googleapi.CallOption) (*Route, error) { } return ret, nil // { - // "description": "Returns the specified Route resource. Gets a list of available routes by making a list() request.", + // "description": "Gets a rule at the specified priority.", // "httpMethod": "GET", - // "id": "compute.routes.get", + // "id": "compute.securityPolicies.getRule", // "parameterOrder": [ // "project", - // "route" + // "securityPolicy" // ], // "parameters": { + // "priority": { + // "description": "The priority of the rule to get from the security policy.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -101415,17 +116689,17 @@ func (c *RoutesGetCall) Do(opts ...googleapi.CallOption) (*Route, error) { // "required": true, // "type": "string" // }, - // "route": { - // "description": "Name of the Route resource to return.", + // "securityPolicy": { + // "description": "Name of the security policy to which the queried rule belongs.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/routes/{route}", + // "path": "{project}/global/securityPolicies/{securityPolicy}/getRule", // "response": { - // "$ref": "Route" + // "$ref": "SecurityPolicyRule" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -101436,24 +116710,23 @@ func (c *RoutesGetCall) Do(opts ...googleapi.CallOption) (*Route, error) { } -// method id "compute.routes.insert": +// method id "compute.securityPolicies.insert": -type RoutesInsertCall struct { - s *Service - project string - route *Route - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type SecurityPoliciesInsertCall struct { + s *Service + project string + securitypolicy *SecurityPolicy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a Route resource in the specified project using the -// data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/routes/insert -func (r *RoutesService) Insert(project string, route *Route) *RoutesInsertCall { - c := &RoutesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a new policy in the specified project using the data +// included in the request. +func (r *SecurityPoliciesService) Insert(project string, securitypolicy *SecurityPolicy) *SecurityPoliciesInsertCall { + c := &SecurityPoliciesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.route = route + c.securitypolicy = securitypolicy return c } @@ -101471,53 +116744,280 @@ func (r *RoutesService) Insert(project string, route *Route) *RoutesInsertCall { // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RoutesInsertCall) RequestId(requestId string) *RoutesInsertCall { +func (c *SecurityPoliciesInsertCall) RequestId(requestId string) *SecurityPoliciesInsertCall { c.urlParams_.Set("requestId", requestId) return c } +// ValidateOnly sets the optional parameter "validateOnly": If true, the +// request will not be committed. +func (c *SecurityPoliciesInsertCall) ValidateOnly(validateOnly bool) *SecurityPoliciesInsertCall { + c.urlParams_.Set("validateOnly", fmt.Sprint(validateOnly)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RoutesInsertCall) Fields(s ...googleapi.Field) *RoutesInsertCall { +func (c *SecurityPoliciesInsertCall) Fields(s ...googleapi.Field) *SecurityPoliciesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SecurityPoliciesInsertCall) Context(ctx context.Context) *SecurityPoliciesInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SecurityPoliciesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SecurityPoliciesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.securityPolicies.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *SecurityPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new policy in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.securityPolicies.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "validateOnly": { + // "description": "If true, the request will not be committed.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "{project}/global/securityPolicies", + // "request": { + // "$ref": "SecurityPolicy" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.securityPolicies.list": + +type SecurityPoliciesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: List all the policies that have been configured for the +// specified project. +func (r *SecurityPoliciesService) List(project string) *SecurityPoliciesListCall { + c := &SecurityPoliciesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *SecurityPoliciesListCall) Filter(filter string) *SecurityPoliciesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *SecurityPoliciesListCall) MaxResults(maxResults int64) *SecurityPoliciesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *SecurityPoliciesListCall) OrderBy(orderBy string) *SecurityPoliciesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *SecurityPoliciesListCall) PageToken(pageToken string) *SecurityPoliciesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SecurityPoliciesListCall) Fields(s ...googleapi.Field) *SecurityPoliciesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *SecurityPoliciesListCall) IfNoneMatch(entityTag string) *SecurityPoliciesListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RoutesInsertCall) Context(ctx context.Context) *RoutesInsertCall { +func (c *SecurityPoliciesListCall) Context(ctx context.Context) *SecurityPoliciesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RoutesInsertCall) Header() http.Header { +func (c *SecurityPoliciesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RoutesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *SecurityPoliciesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.route) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } @@ -101528,14 +117028,14 @@ func (c *RoutesInsertCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.routes.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RoutesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.securityPolicies.list" call. +// Exactly one of *SecurityPolicyList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *SecurityPolicyList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *SecurityPoliciesListCall) Do(opts ...googleapi.CallOption) (*SecurityPolicyList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -101554,7 +117054,7 @@ func (c *RoutesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &SecurityPolicyList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -101566,44 +117066,81 @@ func (c *RoutesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Creates a Route resource in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.routes.insert", + // "description": "List all the policies that have been configured for the specified project.", + // "httpMethod": "GET", + // "id": "compute.securityPolicies.list", // "parameterOrder": [ // "project" // ], // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/global/routes", - // "request": { - // "$ref": "Route" - // }, + // "path": "{project}/global/securityPolicies", // "response": { - // "$ref": "Operation" + // "$ref": "SecurityPolicyList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.routes.list": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *SecurityPoliciesListCall) Pages(ctx context.Context, f func(*SecurityPolicyList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type RoutesListCall struct { +// method id "compute.securityPolicies.listPreconfiguredExpressionSets": + +type SecurityPoliciesListPreconfiguredExpressionSetsCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -101612,11 +117149,10 @@ type RoutesListCall struct { header_ http.Header } -// List: Retrieves the list of Route resources available to the -// specified project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/routes/list -func (r *RoutesService) List(project string) *RoutesListCall { - c := &RoutesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ListPreconfiguredExpressionSets: Gets the current list of +// preconfigured Web Application Firewall (WAF) expressions. +func (r *SecurityPoliciesService) ListPreconfiguredExpressionSets(project string) *SecurityPoliciesListPreconfiguredExpressionSetsCall { + c := &SecurityPoliciesListPreconfiguredExpressionSetsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -101643,7 +117179,7 @@ func (r *RoutesService) List(project string) *RoutesListCall { // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *RoutesListCall) Filter(filter string) *RoutesListCall { +func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) Filter(filter string) *SecurityPoliciesListPreconfiguredExpressionSetsCall { c.urlParams_.Set("filter", filter) return c } @@ -101654,7 +117190,7 @@ func (c *RoutesListCall) Filter(filter string) *RoutesListCall { // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *RoutesListCall) MaxResults(maxResults int64) *RoutesListCall { +func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) MaxResults(maxResults int64) *SecurityPoliciesListPreconfiguredExpressionSetsCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -101671,7 +117207,7 @@ func (c *RoutesListCall) MaxResults(maxResults int64) *RoutesListCall { // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *RoutesListCall) OrderBy(orderBy string) *RoutesListCall { +func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) OrderBy(orderBy string) *SecurityPoliciesListPreconfiguredExpressionSetsCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -101679,7 +117215,7 @@ func (c *RoutesListCall) OrderBy(orderBy string) *RoutesListCall { // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *RoutesListCall) PageToken(pageToken string) *RoutesListCall { +func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) PageToken(pageToken string) *SecurityPoliciesListPreconfiguredExpressionSetsCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -101687,7 +117223,7 @@ func (c *RoutesListCall) PageToken(pageToken string) *RoutesListCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RoutesListCall) Fields(s ...googleapi.Field) *RoutesListCall { +func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) Fields(s ...googleapi.Field) *SecurityPoliciesListPreconfiguredExpressionSetsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -101697,7 +117233,7 @@ func (c *RoutesListCall) Fields(s ...googleapi.Field) *RoutesListCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RoutesListCall) IfNoneMatch(entityTag string) *RoutesListCall { +func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) IfNoneMatch(entityTag string) *SecurityPoliciesListPreconfiguredExpressionSetsCall { c.ifNoneMatch_ = entityTag return c } @@ -101705,21 +117241,21 @@ func (c *RoutesListCall) IfNoneMatch(entityTag string) *RoutesListCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RoutesListCall) Context(ctx context.Context) *RoutesListCall { +func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) Context(ctx context.Context) *SecurityPoliciesListPreconfiguredExpressionSetsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RoutesListCall) Header() http.Header { +func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RoutesListCall) doRequest(alt string) (*http.Response, error) { +func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -101731,7 +117267,7 @@ func (c *RoutesListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/listPreconfiguredExpressionSets") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -101744,14 +117280,17 @@ func (c *RoutesListCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.routes.list" call. -// Exactly one of *RouteList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *RouteList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RoutesListCall) Do(opts ...googleapi.CallOption) (*RouteList, error) { +// Do executes the "compute.securityPolicies.listPreconfiguredExpressionSets" call. +// Exactly one of +// *SecurityPoliciesListPreconfiguredExpressionSetsResponse or error +// will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *SecurityPoliciesListPreconfiguredExpressionSetsResponse.ServerRespons +// e.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) Do(opts ...googleapi.CallOption) (*SecurityPoliciesListPreconfiguredExpressionSetsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -101770,7 +117309,7 @@ func (c *RoutesListCall) Do(opts ...googleapi.CallOption) (*RouteList, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &RouteList{ + ret := &SecurityPoliciesListPreconfiguredExpressionSetsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -101782,9 +117321,9 @@ func (c *RoutesListCall) Do(opts ...googleapi.CallOption) (*RouteList, error) { } return ret, nil // { - // "description": "Retrieves the list of Route resources available to the specified project.", + // "description": "Gets the current list of preconfigured Web Application Firewall (WAF) expressions.", // "httpMethod": "GET", - // "id": "compute.routes.list", + // "id": "compute.securityPolicies.listPreconfiguredExpressionSets", // "parameterOrder": [ // "project" // ], @@ -101820,66 +117359,63 @@ func (c *RoutesListCall) Do(opts ...googleapi.CallOption) (*RouteList, error) { // "type": "string" // } // }, - // "path": "{project}/global/routes", + // "path": "{project}/global/securityPolicies/listPreconfiguredExpressionSets", // "response": { - // "$ref": "RouteList" + // "$ref": "SecurityPoliciesListPreconfiguredExpressionSetsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RoutesListCall) Pages(ctx context.Context, f func(*RouteList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.routes.testIamPermissions": +// method id "compute.securityPolicies.patch": -type RoutesTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type SecurityPoliciesPatchCall struct { + s *Service + project string + securityPolicy string + securitypolicy *SecurityPolicy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *RoutesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *RoutesTestIamPermissionsCall { - c := &RoutesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Patches the specified policy with the data included in the +// request. +func (r *SecurityPoliciesService) Patch(project string, securityPolicy string, securitypolicy *SecurityPolicy) *SecurityPoliciesPatchCall { + c := &SecurityPoliciesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.securityPolicy = securityPolicy + c.securitypolicy = securitypolicy + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *SecurityPoliciesPatchCall) RequestId(requestId string) *SecurityPoliciesPatchCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RoutesTestIamPermissionsCall) Fields(s ...googleapi.Field) *RoutesTestIamPermissionsCall { +func (c *SecurityPoliciesPatchCall) Fields(s ...googleapi.Field) *SecurityPoliciesPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -101887,56 +117423,56 @@ func (c *RoutesTestIamPermissionsCall) Fields(s ...googleapi.Field) *RoutesTestI // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RoutesTestIamPermissionsCall) Context(ctx context.Context) *RoutesTestIamPermissionsCall { +func (c *SecurityPoliciesPatchCall) Context(ctx context.Context) *SecurityPoliciesPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RoutesTestIamPermissionsCall) Header() http.Header { +func (c *SecurityPoliciesPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RoutesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *SecurityPoliciesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicy) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{securityPolicy}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "securityPolicy": c.securityPolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.routes.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RoutesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.securityPolicies.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *SecurityPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -101955,7 +117491,7 @@ func (c *RoutesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPe if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -101967,12 +117503,12 @@ func (c *RoutesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPe } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.routes.testIamPermissions", + // "description": "Patches the specified policy with the data included in the request.", + // "httpMethod": "PATCH", + // "id": "compute.securityPolicies.patch", // "parameterOrder": [ // "project", - // "resource" + // "securityPolicy" // ], // "parameters": { // "project": { @@ -101982,33 +117518,37 @@ func (c *RoutesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPe // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "securityPolicy": { + // "description": "Name of the security policy to update.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/routes/{resource}/testIamPermissions", + // "path": "{project}/global/securityPolicies/{securityPolicy}", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "SecurityPolicy" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.securityPolicies.addRule": +// method id "compute.securityPolicies.patchRule": -type SecurityPoliciesAddRuleCall struct { +type SecurityPoliciesPatchRuleCall struct { s *Service project string securityPolicy string @@ -102018,18 +117558,25 @@ type SecurityPoliciesAddRuleCall struct { header_ http.Header } -// AddRule: Inserts a rule into a security policy. -func (r *SecurityPoliciesService) AddRule(project string, securityPolicy string, securitypolicyrule *SecurityPolicyRule) *SecurityPoliciesAddRuleCall { - c := &SecurityPoliciesAddRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// PatchRule: Patches a rule at the specified priority. +func (r *SecurityPoliciesService) PatchRule(project string, securityPolicy string, securitypolicyrule *SecurityPolicyRule) *SecurityPoliciesPatchRuleCall { + c := &SecurityPoliciesPatchRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.securityPolicy = securityPolicy c.securitypolicyrule = securitypolicyrule return c } +// Priority sets the optional parameter "priority": The priority of the +// rule to patch. +func (c *SecurityPoliciesPatchRuleCall) Priority(priority int64) *SecurityPoliciesPatchRuleCall { + c.urlParams_.Set("priority", fmt.Sprint(priority)) + return c +} + // ValidateOnly sets the optional parameter "validateOnly": If true, the // request will not be committed. -func (c *SecurityPoliciesAddRuleCall) ValidateOnly(validateOnly bool) *SecurityPoliciesAddRuleCall { +func (c *SecurityPoliciesPatchRuleCall) ValidateOnly(validateOnly bool) *SecurityPoliciesPatchRuleCall { c.urlParams_.Set("validateOnly", fmt.Sprint(validateOnly)) return c } @@ -102037,7 +117584,7 @@ func (c *SecurityPoliciesAddRuleCall) ValidateOnly(validateOnly bool) *SecurityP // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SecurityPoliciesAddRuleCall) Fields(s ...googleapi.Field) *SecurityPoliciesAddRuleCall { +func (c *SecurityPoliciesPatchRuleCall) Fields(s ...googleapi.Field) *SecurityPoliciesPatchRuleCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -102045,21 +117592,21 @@ func (c *SecurityPoliciesAddRuleCall) Fields(s ...googleapi.Field) *SecurityPoli // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SecurityPoliciesAddRuleCall) Context(ctx context.Context) *SecurityPoliciesAddRuleCall { +func (c *SecurityPoliciesPatchRuleCall) Context(ctx context.Context) *SecurityPoliciesPatchRuleCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SecurityPoliciesAddRuleCall) Header() http.Header { +func (c *SecurityPoliciesPatchRuleCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SecurityPoliciesAddRuleCall) doRequest(alt string) (*http.Response, error) { +func (c *SecurityPoliciesPatchRuleCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -102073,7 +117620,7 @@ func (c *SecurityPoliciesAddRuleCall) doRequest(alt string) (*http.Response, err reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{securityPolicy}/addRule") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{securityPolicy}/patchRule") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -102087,14 +117634,14 @@ func (c *SecurityPoliciesAddRuleCall) doRequest(alt string) (*http.Response, err return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.securityPolicies.addRule" call. +// Do executes the "compute.securityPolicies.patchRule" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *SecurityPoliciesAddRuleCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *SecurityPoliciesPatchRuleCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -102125,14 +117672,20 @@ func (c *SecurityPoliciesAddRuleCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Inserts a rule into a security policy.", + // "description": "Patches a rule at the specified priority.", // "httpMethod": "POST", - // "id": "compute.securityPolicies.addRule", + // "id": "compute.securityPolicies.patchRule", // "parameterOrder": [ // "project", // "securityPolicy" // ], // "parameters": { + // "priority": { + // "description": "The priority of the rule to patch.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -102143,7 +117696,7 @@ func (c *SecurityPoliciesAddRuleCall) Do(opts ...googleapi.CallOption) (*Operati // "securityPolicy": { // "description": "Name of the security policy to update.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -102153,7 +117706,7 @@ func (c *SecurityPoliciesAddRuleCall) Do(opts ...googleapi.CallOption) (*Operati // "type": "boolean" // } // }, - // "path": "{project}/global/securityPolicies/{securityPolicy}/addRule", + // "path": "{project}/global/securityPolicies/{securityPolicy}/patchRule", // "request": { // "$ref": "SecurityPolicyRule" // }, @@ -102168,9 +117721,9 @@ func (c *SecurityPoliciesAddRuleCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.securityPolicies.delete": +// method id "compute.securityPolicies.removeRule": -type SecurityPoliciesDeleteCall struct { +type SecurityPoliciesRemoveRuleCall struct { s *Service project string securityPolicy string @@ -102179,37 +117732,174 @@ type SecurityPoliciesDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified policy. -func (r *SecurityPoliciesService) Delete(project string, securityPolicy string) *SecurityPoliciesDeleteCall { - c := &SecurityPoliciesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// RemoveRule: Deletes a rule at the specified priority. +func (r *SecurityPoliciesService) RemoveRule(project string, securityPolicy string) *SecurityPoliciesRemoveRuleCall { + c := &SecurityPoliciesRemoveRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.securityPolicy = securityPolicy return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *SecurityPoliciesDeleteCall) RequestId(requestId string) *SecurityPoliciesDeleteCall { - c.urlParams_.Set("requestId", requestId) +// Priority sets the optional parameter "priority": The priority of the +// rule to remove from the security policy. +func (c *SecurityPoliciesRemoveRuleCall) Priority(priority int64) *SecurityPoliciesRemoveRuleCall { + c.urlParams_.Set("priority", fmt.Sprint(priority)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SecurityPoliciesRemoveRuleCall) Fields(s ...googleapi.Field) *SecurityPoliciesRemoveRuleCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SecurityPoliciesRemoveRuleCall) Context(ctx context.Context) *SecurityPoliciesRemoveRuleCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SecurityPoliciesRemoveRuleCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SecurityPoliciesRemoveRuleCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{securityPolicy}/removeRule") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "securityPolicy": c.securityPolicy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.securityPolicies.removeRule" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *SecurityPoliciesRemoveRuleCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes a rule at the specified priority.", + // "httpMethod": "POST", + // "id": "compute.securityPolicies.removeRule", + // "parameterOrder": [ + // "project", + // "securityPolicy" + // ], + // "parameters": { + // "priority": { + // "description": "The priority of the rule to remove from the security policy.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "securityPolicy": { + // "description": "Name of the security policy to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/securityPolicies/{securityPolicy}/removeRule", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.securityPolicies.setLabels": + +type SecurityPoliciesSetLabelsCall struct { + s *Service + project string + resource string + globalsetlabelsrequest *GlobalSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetLabels: Sets the labels on a security policy. To learn more about +// labels, read the Labeling Resources documentation. +func (r *SecurityPoliciesService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *SecurityPoliciesSetLabelsCall { + c := &SecurityPoliciesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource + c.globalsetlabelsrequest = globalsetlabelsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SecurityPoliciesDeleteCall) Fields(s ...googleapi.Field) *SecurityPoliciesDeleteCall { +func (c *SecurityPoliciesSetLabelsCall) Fields(s ...googleapi.Field) *SecurityPoliciesSetLabelsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -102217,51 +117907,56 @@ func (c *SecurityPoliciesDeleteCall) Fields(s ...googleapi.Field) *SecurityPolic // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SecurityPoliciesDeleteCall) Context(ctx context.Context) *SecurityPoliciesDeleteCall { +func (c *SecurityPoliciesSetLabelsCall) Context(ctx context.Context) *SecurityPoliciesSetLabelsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SecurityPoliciesDeleteCall) Header() http.Header { +func (c *SecurityPoliciesSetLabelsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SecurityPoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *SecurityPoliciesSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{securityPolicy}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "securityPolicy": c.securityPolicy, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.securityPolicies.delete" call. +// Do executes the "compute.securityPolicies.setLabels" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *SecurityPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *SecurityPoliciesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -102292,12 +117987,12 @@ func (c *SecurityPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Deletes the specified policy.", - // "httpMethod": "DELETE", - // "id": "compute.securityPolicies.delete", + // "description": "Sets the labels on a security policy. To learn more about labels, read the Labeling Resources documentation.", + // "httpMethod": "POST", + // "id": "compute.securityPolicies.setLabels", // "parameterOrder": [ // "project", - // "securityPolicy" + // "resource" // ], // "parameters": { // "project": { @@ -102307,20 +118002,18 @@ func (c *SecurityPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "securityPolicy": { - // "description": "Name of the security policy to delete.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/securityPolicies/{securityPolicy}", + // "path": "{project}/global/securityPolicies/{resource}/setLabels", + // "request": { + // "$ref": "GlobalSetLabelsRequest" + // }, // "response": { // "$ref": "Operation" // }, @@ -102332,96 +118025,89 @@ func (c *SecurityPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio } -// method id "compute.securityPolicies.get": +// method id "compute.securityPolicies.testIamPermissions": -type SecurityPoliciesGetCall struct { - s *Service - project string - securityPolicy string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type SecurityPoliciesTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: List all of the ordered rules present in a single specified -// policy. -func (r *SecurityPoliciesService) Get(project string, securityPolicy string) *SecurityPoliciesGetCall { - c := &SecurityPoliciesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *SecurityPoliciesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *SecurityPoliciesTestIamPermissionsCall { + c := &SecurityPoliciesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.securityPolicy = securityPolicy + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SecurityPoliciesGetCall) Fields(s ...googleapi.Field) *SecurityPoliciesGetCall { +func (c *SecurityPoliciesTestIamPermissionsCall) Fields(s ...googleapi.Field) *SecurityPoliciesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *SecurityPoliciesGetCall) IfNoneMatch(entityTag string) *SecurityPoliciesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SecurityPoliciesGetCall) Context(ctx context.Context) *SecurityPoliciesGetCall { +func (c *SecurityPoliciesTestIamPermissionsCall) Context(ctx context.Context) *SecurityPoliciesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SecurityPoliciesGetCall) Header() http.Header { +func (c *SecurityPoliciesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SecurityPoliciesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *SecurityPoliciesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{securityPolicy}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "securityPolicy": c.securityPolicy, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.securityPolicies.get" call. -// Exactly one of *SecurityPolicy or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *SecurityPolicy.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.securityPolicies.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *SecurityPoliciesGetCall) Do(opts ...googleapi.CallOption) (*SecurityPolicy, error) { +func (c *SecurityPoliciesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -102440,7 +118126,7 @@ func (c *SecurityPoliciesGetCall) Do(opts ...googleapi.CallOption) (*SecurityPol if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &SecurityPolicy{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -102452,12 +118138,12 @@ func (c *SecurityPoliciesGetCall) Do(opts ...googleapi.CallOption) (*SecurityPol } return ret, nil // { - // "description": "List all of the ordered rules present in a single specified policy.", - // "httpMethod": "GET", - // "id": "compute.securityPolicies.get", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.securityPolicies.testIamPermissions", // "parameterOrder": [ // "project", - // "securityPolicy" + // "resource" // ], // "parameters": { // "project": { @@ -102467,17 +118153,20 @@ func (c *SecurityPoliciesGetCall) Do(opts ...googleapi.CallOption) (*SecurityPol // "required": true, // "type": "string" // }, - // "securityPolicy": { - // "description": "Name of the security policy to get.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/securityPolicies/{securityPolicy}", + // "path": "{project}/global/securityPolicies/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "SecurityPolicy" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -102488,102 +118177,107 @@ func (c *SecurityPoliciesGetCall) Do(opts ...googleapi.CallOption) (*SecurityPol } -// method id "compute.securityPolicies.getRule": +// method id "compute.snapshots.delete": -type SecurityPoliciesGetRuleCall struct { - s *Service - project string - securityPolicy string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type SnapshotsDeleteCall struct { + s *Service + project string + snapshot string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetRule: Gets a rule at the specified priority. -func (r *SecurityPoliciesService) GetRule(project string, securityPolicy string) *SecurityPoliciesGetRuleCall { - c := &SecurityPoliciesGetRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified Snapshot resource. Keep in mind that +// deleting a single snapshot might not necessarily delete all the data +// on that snapshot. If any data on the snapshot that is marked for +// deletion is needed for subsequent snapshots, the data will be moved +// to the next corresponding snapshot. +// +// For more information, see Deleting snapshots. +// For details, see https://cloud.google.com/compute/docs/reference/latest/snapshots/delete +func (r *SnapshotsService) Delete(project string, snapshot string) *SnapshotsDeleteCall { + c := &SnapshotsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.securityPolicy = securityPolicy + c.snapshot = snapshot return c } -// Priority sets the optional parameter "priority": The priority of the -// rule to get from the security policy. -func (c *SecurityPoliciesGetRuleCall) Priority(priority int64) *SecurityPoliciesGetRuleCall { - c.urlParams_.Set("priority", fmt.Sprint(priority)) +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *SnapshotsDeleteCall) RequestId(requestId string) *SnapshotsDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SecurityPoliciesGetRuleCall) Fields(s ...googleapi.Field) *SecurityPoliciesGetRuleCall { +func (c *SnapshotsDeleteCall) Fields(s ...googleapi.Field) *SnapshotsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *SecurityPoliciesGetRuleCall) IfNoneMatch(entityTag string) *SecurityPoliciesGetRuleCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SecurityPoliciesGetRuleCall) Context(ctx context.Context) *SecurityPoliciesGetRuleCall { +func (c *SnapshotsDeleteCall) Context(ctx context.Context) *SnapshotsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SecurityPoliciesGetRuleCall) Header() http.Header { +func (c *SnapshotsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SecurityPoliciesGetRuleCall) doRequest(alt string) (*http.Response, error) { +func (c *SnapshotsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{securityPolicy}/getRule") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{snapshot}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "securityPolicy": c.securityPolicy, + "project": c.project, + "snapshot": c.snapshot, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.securityPolicies.getRule" call. -// Exactly one of *SecurityPolicyRule or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *SecurityPolicyRule.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *SecurityPoliciesGetRuleCall) Do(opts ...googleapi.CallOption) (*SecurityPolicyRule, error) { +// Do executes the "compute.snapshots.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *SnapshotsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -102602,7 +118296,7 @@ func (c *SecurityPoliciesGetRuleCall) Do(opts ...googleapi.CallOption) (*Securit if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &SecurityPolicyRule{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -102614,20 +118308,14 @@ func (c *SecurityPoliciesGetRuleCall) Do(opts ...googleapi.CallOption) (*Securit } return ret, nil // { - // "description": "Gets a rule at the specified priority.", - // "httpMethod": "GET", - // "id": "compute.securityPolicies.getRule", + // "description": "Deletes the specified Snapshot resource. Keep in mind that deleting a single snapshot might not necessarily delete all the data on that snapshot. If any data on the snapshot that is marked for deletion is needed for subsequent snapshots, the data will be moved to the next corresponding snapshot.\n\nFor more information, see Deleting snapshots.", + // "httpMethod": "DELETE", + // "id": "compute.snapshots.delete", // "parameterOrder": [ // "project", - // "securityPolicy" + // "snapshot" // ], // "parameters": { - // "priority": { - // "description": "The priority of the rule to get from the security policy.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -102635,133 +118323,122 @@ func (c *SecurityPoliciesGetRuleCall) Do(opts ...googleapi.CallOption) (*Securit // "required": true, // "type": "string" // }, - // "securityPolicy": { - // "description": "Name of the security policy to which the queried rule belongs.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "snapshot": { + // "description": "Name of the Snapshot resource to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/securityPolicies/{securityPolicy}/getRule", + // "path": "{project}/global/snapshots/{snapshot}", // "response": { - // "$ref": "SecurityPolicyRule" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.securityPolicies.insert": +// method id "compute.snapshots.get": -type SecurityPoliciesInsertCall struct { - s *Service - project string - securitypolicy *SecurityPolicy - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type SnapshotsGetCall struct { + s *Service + project string + snapshot string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Insert: Creates a new policy in the specified project using the data -// included in the request. -func (r *SecurityPoliciesService) Insert(project string, securitypolicy *SecurityPolicy) *SecurityPoliciesInsertCall { - c := &SecurityPoliciesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified Snapshot resource. Gets a list of +// available snapshots by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/snapshots/get +func (r *SnapshotsService) Get(project string, snapshot string) *SnapshotsGetCall { + c := &SnapshotsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.securitypolicy = securitypolicy - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *SecurityPoliciesInsertCall) RequestId(requestId string) *SecurityPoliciesInsertCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// ValidateOnly sets the optional parameter "validateOnly": If true, the -// request will not be committed. -func (c *SecurityPoliciesInsertCall) ValidateOnly(validateOnly bool) *SecurityPoliciesInsertCall { - c.urlParams_.Set("validateOnly", fmt.Sprint(validateOnly)) + c.snapshot = snapshot return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SecurityPoliciesInsertCall) Fields(s ...googleapi.Field) *SecurityPoliciesInsertCall { +func (c *SnapshotsGetCall) Fields(s ...googleapi.Field) *SnapshotsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *SnapshotsGetCall) IfNoneMatch(entityTag string) *SnapshotsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SecurityPoliciesInsertCall) Context(ctx context.Context) *SecurityPoliciesInsertCall { +func (c *SnapshotsGetCall) Context(ctx context.Context) *SnapshotsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SecurityPoliciesInsertCall) Header() http.Header { +func (c *SnapshotsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SecurityPoliciesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *SnapshotsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicy) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{snapshot}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "snapshot": c.snapshot, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.securityPolicies.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// Do executes the "compute.snapshots.get" call. +// Exactly one of *Snapshot or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Snapshot.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *SecurityPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *SnapshotsGetCall) Do(opts ...googleapi.CallOption) (*Snapshot, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -102780,7 +118457,7 @@ func (c *SecurityPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operatio if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Snapshot{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -102792,11 +118469,12 @@ func (c *SecurityPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Creates a new policy in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.securityPolicies.insert", + // "description": "Returns the specified Snapshot resource. Gets a list of available snapshots by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.snapshots.get", // "parameterOrder": [ - // "project" + // "project", + // "snapshot" // ], // "parameters": { // "project": { @@ -102806,118 +118484,52 @@ func (c *SecurityPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operatio // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "snapshot": { + // "description": "Name of the Snapshot resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" - // }, - // "validateOnly": { - // "description": "If true, the request will not be committed.", - // "location": "query", - // "type": "boolean" // } // }, - // "path": "{project}/global/securityPolicies", - // "request": { - // "$ref": "SecurityPolicy" - // }, + // "path": "{project}/global/snapshots/{snapshot}", // "response": { - // "$ref": "Operation" + // "$ref": "Snapshot" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.securityPolicies.list": +// method id "compute.snapshots.getIamPolicy": -type SecurityPoliciesListCall struct { +type SnapshotsGetIamPolicyCall struct { s *Service project string + resource string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: List all the policies that have been configured for the -// specified project. -func (r *SecurityPoliciesService) List(project string) *SecurityPoliciesListCall { - c := &SecurityPoliciesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. +func (r *SnapshotsService) GetIamPolicy(project string, resource string) *SnapshotsGetIamPolicyCall { + c := &SnapshotsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *SecurityPoliciesListCall) Filter(filter string) *SecurityPoliciesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *SecurityPoliciesListCall) MaxResults(maxResults int64) *SecurityPoliciesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *SecurityPoliciesListCall) OrderBy(orderBy string) *SecurityPoliciesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *SecurityPoliciesListCall) PageToken(pageToken string) *SecurityPoliciesListCall { - c.urlParams_.Set("pageToken", pageToken) + c.resource = resource return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SecurityPoliciesListCall) Fields(s ...googleapi.Field) *SecurityPoliciesListCall { +func (c *SnapshotsGetIamPolicyCall) Fields(s ...googleapi.Field) *SnapshotsGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -102927,7 +118539,7 @@ func (c *SecurityPoliciesListCall) Fields(s ...googleapi.Field) *SecurityPolicie // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *SecurityPoliciesListCall) IfNoneMatch(entityTag string) *SecurityPoliciesListCall { +func (c *SnapshotsGetIamPolicyCall) IfNoneMatch(entityTag string) *SnapshotsGetIamPolicyCall { c.ifNoneMatch_ = entityTag return c } @@ -102935,21 +118547,21 @@ func (c *SecurityPoliciesListCall) IfNoneMatch(entityTag string) *SecurityPolici // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SecurityPoliciesListCall) Context(ctx context.Context) *SecurityPoliciesListCall { +func (c *SnapshotsGetIamPolicyCall) Context(ctx context.Context) *SnapshotsGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SecurityPoliciesListCall) Header() http.Header { +func (c *SnapshotsGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SecurityPoliciesListCall) doRequest(alt string) (*http.Response, error) { +func (c *SnapshotsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -102961,7 +118573,7 @@ func (c *SecurityPoliciesListCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -102969,19 +118581,20 @@ func (c *SecurityPoliciesListCall) doRequest(alt string) (*http.Response, error) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.securityPolicies.list" call. -// Exactly one of *SecurityPolicyList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *SecurityPolicyList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *SecurityPoliciesListCall) Do(opts ...googleapi.CallOption) (*SecurityPolicyList, error) { +// Do executes the "compute.snapshots.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *SnapshotsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -103000,7 +118613,7 @@ func (c *SecurityPoliciesListCall) Do(opts ...googleapi.CallOption) (*SecurityPo if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &SecurityPolicyList{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -103012,47 +118625,32 @@ func (c *SecurityPoliciesListCall) Do(opts ...googleapi.CallOption) (*SecurityPo } return ret, nil // { - // "description": "List all the policies that have been configured for the specified project.", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", // "httpMethod": "GET", - // "id": "compute.securityPolicies.list", + // "id": "compute.snapshots.getIamPolicy", // "parameterOrder": [ - // "project" + // "project", + // "resource" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/securityPolicies", + // "path": "{project}/global/snapshots/{resource}/getIamPolicy", // "response": { - // "$ref": "SecurityPolicyList" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -103063,30 +118661,9 @@ func (c *SecurityPoliciesListCall) Do(opts ...googleapi.CallOption) (*SecurityPo } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *SecurityPoliciesListCall) Pages(ctx context.Context, f func(*SecurityPolicyList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.securityPolicies.listPreconfiguredExpressionSets": +// method id "compute.snapshots.list": -type SecurityPoliciesListPreconfiguredExpressionSetsCall struct { +type SnapshotsListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -103095,10 +118672,11 @@ type SecurityPoliciesListPreconfiguredExpressionSetsCall struct { header_ http.Header } -// ListPreconfiguredExpressionSets: Gets the current list of -// preconfigured Web Application Firewall (WAF) expressions. -func (r *SecurityPoliciesService) ListPreconfiguredExpressionSets(project string) *SecurityPoliciesListPreconfiguredExpressionSetsCall { - c := &SecurityPoliciesListPreconfiguredExpressionSetsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of Snapshot resources contained within the +// specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/snapshots/list +func (r *SnapshotsService) List(project string) *SnapshotsListCall { + c := &SnapshotsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -103125,7 +118703,7 @@ func (r *SecurityPoliciesService) ListPreconfiguredExpressionSets(project string // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) Filter(filter string) *SecurityPoliciesListPreconfiguredExpressionSetsCall { +func (c *SnapshotsListCall) Filter(filter string) *SnapshotsListCall { c.urlParams_.Set("filter", filter) return c } @@ -103136,7 +118714,7 @@ func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) Filter(filter stri // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) MaxResults(maxResults int64) *SecurityPoliciesListPreconfiguredExpressionSetsCall { +func (c *SnapshotsListCall) MaxResults(maxResults int64) *SnapshotsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -103153,7 +118731,7 @@ func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) MaxResults(maxResu // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) OrderBy(orderBy string) *SecurityPoliciesListPreconfiguredExpressionSetsCall { +func (c *SnapshotsListCall) OrderBy(orderBy string) *SnapshotsListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -103161,7 +118739,7 @@ func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) OrderBy(orderBy st // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) PageToken(pageToken string) *SecurityPoliciesListPreconfiguredExpressionSetsCall { +func (c *SnapshotsListCall) PageToken(pageToken string) *SnapshotsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -103169,7 +118747,7 @@ func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) PageToken(pageToke // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) Fields(s ...googleapi.Field) *SecurityPoliciesListPreconfiguredExpressionSetsCall { +func (c *SnapshotsListCall) Fields(s ...googleapi.Field) *SnapshotsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -103179,7 +118757,7 @@ func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) Fields(s ...google // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) IfNoneMatch(entityTag string) *SecurityPoliciesListPreconfiguredExpressionSetsCall { +func (c *SnapshotsListCall) IfNoneMatch(entityTag string) *SnapshotsListCall { c.ifNoneMatch_ = entityTag return c } @@ -103187,21 +118765,21 @@ func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) IfNoneMatch(entity // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) Context(ctx context.Context) *SecurityPoliciesListPreconfiguredExpressionSetsCall { +func (c *SnapshotsListCall) Context(ctx context.Context) *SnapshotsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) Header() http.Header { +func (c *SnapshotsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) doRequest(alt string) (*http.Response, error) { +func (c *SnapshotsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -103213,7 +118791,7 @@ func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) doRequest(alt stri var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/listPreconfiguredExpressionSets") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -103226,17 +118804,14 @@ func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) doRequest(alt stri return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.securityPolicies.listPreconfiguredExpressionSets" call. -// Exactly one of -// *SecurityPoliciesListPreconfiguredExpressionSetsResponse or error -// will be non-nil. Any non-2xx status code is an error. Response -// headers are in either -// *SecurityPoliciesListPreconfiguredExpressionSetsResponse.ServerRespons -// e.Header or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) Do(opts ...googleapi.CallOption) (*SecurityPoliciesListPreconfiguredExpressionSetsResponse, error) { +// Do executes the "compute.snapshots.list" call. +// Exactly one of *SnapshotList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *SnapshotList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *SnapshotsListCall) Do(opts ...googleapi.CallOption) (*SnapshotList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -103255,7 +118830,7 @@ func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) Do(opts ...googlea if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &SecurityPoliciesListPreconfiguredExpressionSetsResponse{ + ret := &SnapshotList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -103267,9 +118842,9 @@ func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) Do(opts ...googlea } return ret, nil // { - // "description": "Gets the current list of preconfigured Web Application Firewall (WAF) expressions.", + // "description": "Retrieves the list of Snapshot resources contained within the specified project.", // "httpMethod": "GET", - // "id": "compute.securityPolicies.listPreconfiguredExpressionSets", + // "id": "compute.snapshots.list", // "parameterOrder": [ // "project" // ], @@ -103305,232 +118880,66 @@ func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) Do(opts ...googlea // "type": "string" // } // }, - // "path": "{project}/global/securityPolicies/listPreconfiguredExpressionSets", + // "path": "{project}/global/snapshots", // "response": { - // "$ref": "SecurityPoliciesListPreconfiguredExpressionSetsResponse" + // "$ref": "SnapshotList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.securityPolicies.patch": - -type SecurityPoliciesPatchCall struct { - s *Service - project string - securityPolicy string - securitypolicy *SecurityPolicy - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Patch: Patches the specified policy with the data included in the -// request. -func (r *SecurityPoliciesService) Patch(project string, securityPolicy string, securitypolicy *SecurityPolicy) *SecurityPoliciesPatchCall { - c := &SecurityPoliciesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.securityPolicy = securityPolicy - c.securitypolicy = securitypolicy - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *SecurityPoliciesPatchCall) RequestId(requestId string) *SecurityPoliciesPatchCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *SecurityPoliciesPatchCall) Fields(s ...googleapi.Field) *SecurityPoliciesPatchCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *SecurityPoliciesPatchCall) Context(ctx context.Context) *SecurityPoliciesPatchCall { +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *SnapshotsListCall) Pages(ctx context.Context, f func(*SnapshotList) error) error { c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *SecurityPoliciesPatchCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *SecurityPoliciesPatchCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicy) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{securityPolicy}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "securityPolicy": c.securityPolicy, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.securityPolicies.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *SecurityPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, + if err := f(x); err != nil { + return err } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Patches the specified policy with the data included in the request.", - // "httpMethod": "PATCH", - // "id": "compute.securityPolicies.patch", - // "parameterOrder": [ - // "project", - // "securityPolicy" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "securityPolicy": { - // "description": "Name of the security policy to update.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/securityPolicies/{securityPolicy}", - // "request": { - // "$ref": "SecurityPolicy" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - } -// method id "compute.securityPolicies.patchRule": +// method id "compute.snapshots.setIamPolicy": -type SecurityPoliciesPatchRuleCall struct { - s *Service - project string - securityPolicy string - securitypolicyrule *SecurityPolicyRule - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type SnapshotsSetIamPolicyCall struct { + s *Service + project string + resource string + globalsetpolicyrequest *GlobalSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// PatchRule: Patches a rule at the specified priority. -func (r *SecurityPoliciesService) PatchRule(project string, securityPolicy string, securitypolicyrule *SecurityPolicyRule) *SecurityPoliciesPatchRuleCall { - c := &SecurityPoliciesPatchRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +func (r *SnapshotsService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *SnapshotsSetIamPolicyCall { + c := &SnapshotsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.securityPolicy = securityPolicy - c.securitypolicyrule = securitypolicyrule - return c -} - -// Priority sets the optional parameter "priority": The priority of the -// rule to patch. -func (c *SecurityPoliciesPatchRuleCall) Priority(priority int64) *SecurityPoliciesPatchRuleCall { - c.urlParams_.Set("priority", fmt.Sprint(priority)) - return c -} - -// ValidateOnly sets the optional parameter "validateOnly": If true, the -// request will not be committed. -func (c *SecurityPoliciesPatchRuleCall) ValidateOnly(validateOnly bool) *SecurityPoliciesPatchRuleCall { - c.urlParams_.Set("validateOnly", fmt.Sprint(validateOnly)) + c.resource = resource + c.globalsetpolicyrequest = globalsetpolicyrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SecurityPoliciesPatchRuleCall) Fields(s ...googleapi.Field) *SecurityPoliciesPatchRuleCall { +func (c *SnapshotsSetIamPolicyCall) Fields(s ...googleapi.Field) *SnapshotsSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -103538,35 +118947,35 @@ func (c *SecurityPoliciesPatchRuleCall) Fields(s ...googleapi.Field) *SecurityPo // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SecurityPoliciesPatchRuleCall) Context(ctx context.Context) *SecurityPoliciesPatchRuleCall { +func (c *SnapshotsSetIamPolicyCall) Context(ctx context.Context) *SnapshotsSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SecurityPoliciesPatchRuleCall) Header() http.Header { +func (c *SnapshotsSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SecurityPoliciesPatchRuleCall) doRequest(alt string) (*http.Response, error) { +func (c *SnapshotsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicyrule) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetpolicyrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{securityPolicy}/patchRule") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -103574,20 +118983,20 @@ func (c *SecurityPoliciesPatchRuleCall) doRequest(alt string) (*http.Response, e } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "securityPolicy": c.securityPolicy, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.securityPolicies.patchRule" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *SecurityPoliciesPatchRuleCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.snapshots.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *SnapshotsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -103606,7 +119015,7 @@ func (c *SecurityPoliciesPatchRuleCall) Do(opts ...googleapi.CallOption) (*Opera if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -103618,20 +119027,14 @@ func (c *SecurityPoliciesPatchRuleCall) Do(opts ...googleapi.CallOption) (*Opera } return ret, nil // { - // "description": "Patches a rule at the specified priority.", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", // "httpMethod": "POST", - // "id": "compute.securityPolicies.patchRule", + // "id": "compute.snapshots.setIamPolicy", // "parameterOrder": [ // "project", - // "securityPolicy" + // "resource" // ], // "parameters": { - // "priority": { - // "description": "The priority of the rule to patch.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -103639,25 +119042,20 @@ func (c *SecurityPoliciesPatchRuleCall) Do(opts ...googleapi.CallOption) (*Opera // "required": true, // "type": "string" // }, - // "securityPolicy": { - // "description": "Name of the security policy to update.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" - // }, - // "validateOnly": { - // "description": "If true, the request will not be committed.", - // "location": "query", - // "type": "boolean" // } // }, - // "path": "{project}/global/securityPolicies/{securityPolicy}/patchRule", + // "path": "{project}/global/snapshots/{resource}/setIamPolicy", // "request": { - // "$ref": "SecurityPolicyRule" + // "$ref": "GlobalSetPolicyRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -103667,36 +119065,32 @@ func (c *SecurityPoliciesPatchRuleCall) Do(opts ...googleapi.CallOption) (*Opera } -// method id "compute.securityPolicies.removeRule": +// method id "compute.snapshots.setLabels": -type SecurityPoliciesRemoveRuleCall struct { - s *Service - project string - securityPolicy string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type SnapshotsSetLabelsCall struct { + s *Service + project string + resource string + globalsetlabelsrequest *GlobalSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// RemoveRule: Deletes a rule at the specified priority. -func (r *SecurityPoliciesService) RemoveRule(project string, securityPolicy string) *SecurityPoliciesRemoveRuleCall { - c := &SecurityPoliciesRemoveRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetLabels: Sets the labels on a snapshot. To learn more about labels, +// read the Labeling Resources documentation. +func (r *SnapshotsService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *SnapshotsSetLabelsCall { + c := &SnapshotsSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.securityPolicy = securityPolicy - return c -} - -// Priority sets the optional parameter "priority": The priority of the -// rule to remove from the security policy. -func (c *SecurityPoliciesRemoveRuleCall) Priority(priority int64) *SecurityPoliciesRemoveRuleCall { - c.urlParams_.Set("priority", fmt.Sprint(priority)) + c.resource = resource + c.globalsetlabelsrequest = globalsetlabelsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SecurityPoliciesRemoveRuleCall) Fields(s ...googleapi.Field) *SecurityPoliciesRemoveRuleCall { +func (c *SnapshotsSetLabelsCall) Fields(s ...googleapi.Field) *SnapshotsSetLabelsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -103704,30 +119098,35 @@ func (c *SecurityPoliciesRemoveRuleCall) Fields(s ...googleapi.Field) *SecurityP // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SecurityPoliciesRemoveRuleCall) Context(ctx context.Context) *SecurityPoliciesRemoveRuleCall { +func (c *SnapshotsSetLabelsCall) Context(ctx context.Context) *SnapshotsSetLabelsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SecurityPoliciesRemoveRuleCall) Header() http.Header { +func (c *SnapshotsSetLabelsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SecurityPoliciesRemoveRuleCall) doRequest(alt string) (*http.Response, error) { +func (c *SnapshotsSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{securityPolicy}/removeRule") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -103735,20 +119134,20 @@ func (c *SecurityPoliciesRemoveRuleCall) doRequest(alt string) (*http.Response, } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "securityPolicy": c.securityPolicy, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.securityPolicies.removeRule" call. +// Do executes the "compute.snapshots.setLabels" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *SecurityPoliciesRemoveRuleCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *SnapshotsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -103779,20 +119178,14 @@ func (c *SecurityPoliciesRemoveRuleCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Deletes a rule at the specified priority.", + // "description": "Sets the labels on a snapshot. To learn more about labels, read the Labeling Resources documentation.", // "httpMethod": "POST", - // "id": "compute.securityPolicies.removeRule", + // "id": "compute.snapshots.setLabels", // "parameterOrder": [ // "project", - // "securityPolicy" + // "resource" // ], // "parameters": { - // "priority": { - // "description": "The priority of the rule to remove from the security policy.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -103800,15 +119193,18 @@ func (c *SecurityPoliciesRemoveRuleCall) Do(opts ...googleapi.CallOption) (*Oper // "required": true, // "type": "string" // }, - // "securityPolicy": { - // "description": "Name of the security policy to update.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/securityPolicies/{securityPolicy}/removeRule", + // "path": "{project}/global/snapshots/{resource}/setLabels", + // "request": { + // "$ref": "GlobalSetLabelsRequest" + // }, // "response": { // "$ref": "Operation" // }, @@ -103820,32 +119216,32 @@ func (c *SecurityPoliciesRemoveRuleCall) Do(opts ...googleapi.CallOption) (*Oper } -// method id "compute.securityPolicies.setLabels": +// method id "compute.snapshots.testIamPermissions": -type SecurityPoliciesSetLabelsCall struct { +type SnapshotsTestIamPermissionsCall struct { s *Service project string resource string - globalsetlabelsrequest *GlobalSetLabelsRequest + testpermissionsrequest *TestPermissionsRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// SetLabels: Sets the labels on a security policy. To learn more about -// labels, read the Labeling Resources documentation. -func (r *SecurityPoliciesService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *SecurityPoliciesSetLabelsCall { - c := &SecurityPoliciesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *SnapshotsService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *SnapshotsTestIamPermissionsCall { + c := &SnapshotsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.resource = resource - c.globalsetlabelsrequest = globalsetlabelsrequest + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SecurityPoliciesSetLabelsCall) Fields(s ...googleapi.Field) *SecurityPoliciesSetLabelsCall { +func (c *SnapshotsTestIamPermissionsCall) Fields(s ...googleapi.Field) *SnapshotsTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -103853,35 +119249,35 @@ func (c *SecurityPoliciesSetLabelsCall) Fields(s ...googleapi.Field) *SecurityPo // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SecurityPoliciesSetLabelsCall) Context(ctx context.Context) *SecurityPoliciesSetLabelsCall { +func (c *SnapshotsTestIamPermissionsCall) Context(ctx context.Context) *SnapshotsTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SecurityPoliciesSetLabelsCall) Header() http.Header { +func (c *SnapshotsTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SecurityPoliciesSetLabelsCall) doRequest(alt string) (*http.Response, error) { +func (c *SnapshotsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{resource}/setLabels") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -103895,14 +119291,14 @@ func (c *SecurityPoliciesSetLabelsCall) doRequest(alt string) (*http.Response, e return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.securityPolicies.setLabels" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *SecurityPoliciesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.snapshots.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *SnapshotsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -103921,7 +119317,7 @@ func (c *SecurityPoliciesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Opera if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -103933,9 +119329,9 @@ func (c *SecurityPoliciesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Opera } return ret, nil // { - // "description": "Sets the labels on a security policy. To learn more about labels, read the Labeling Resources documentation.", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.securityPolicies.setLabels", + // "id": "compute.snapshots.testIamPermissions", // "parameterOrder": [ // "project", // "resource" @@ -103956,104 +119352,172 @@ func (c *SecurityPoliciesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Opera // "type": "string" // } // }, - // "path": "{project}/global/securityPolicies/{resource}/setLabels", + // "path": "{project}/global/snapshots/{resource}/testIamPermissions", // "request": { - // "$ref": "GlobalSetLabelsRequest" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.securityPolicies.testIamPermissions": +// method id "compute.sslCertificates.aggregatedList": -type SecurityPoliciesTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type SslCertificatesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *SecurityPoliciesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *SecurityPoliciesTestIamPermissionsCall { - c := &SecurityPoliciesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves the list of all SslCertificate resources, +// regional and global, available to the specified project. +func (r *SslCertificatesService) AggregatedList(project string) *SslCertificatesAggregatedListCall { + c := &SslCertificatesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *SslCertificatesAggregatedListCall) Filter(filter string) *SslCertificatesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *SslCertificatesAggregatedListCall) MaxResults(maxResults int64) *SslCertificatesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *SslCertificatesAggregatedListCall) OrderBy(orderBy string) *SslCertificatesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *SslCertificatesAggregatedListCall) PageToken(pageToken string) *SslCertificatesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SecurityPoliciesTestIamPermissionsCall) Fields(s ...googleapi.Field) *SecurityPoliciesTestIamPermissionsCall { +func (c *SslCertificatesAggregatedListCall) Fields(s ...googleapi.Field) *SslCertificatesAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *SslCertificatesAggregatedListCall) IfNoneMatch(entityTag string) *SslCertificatesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SecurityPoliciesTestIamPermissionsCall) Context(ctx context.Context) *SecurityPoliciesTestIamPermissionsCall { +func (c *SslCertificatesAggregatedListCall) Context(ctx context.Context) *SslCertificatesAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SecurityPoliciesTestIamPermissionsCall) Header() http.Header { +func (c *SslCertificatesAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SecurityPoliciesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *SslCertificatesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/sslCertificates") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.securityPolicies.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.sslCertificates.aggregatedList" call. +// Exactly one of *SslCertificateAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *SslCertificateAggregatedList.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *SecurityPoliciesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *SslCertificatesAggregatedListCall) Do(opts ...googleapi.CallOption) (*SslCertificateAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -104072,7 +119536,7 @@ func (c *SecurityPoliciesTestIamPermissionsCall) Do(opts ...googleapi.CallOption if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &SslCertificateAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -104084,35 +119548,47 @@ func (c *SecurityPoliciesTestIamPermissionsCall) Do(opts ...googleapi.CallOption } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.securityPolicies.testIamPermissions", + // "description": "Retrieves the list of all SslCertificate resources, regional and global, available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.sslCertificates.aggregatedList", // "parameterOrder": [ - // "project", - // "resource" + // "project" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Name of the project scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/securityPolicies/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/aggregated/sslCertificates", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "SslCertificateAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -104123,29 +119599,43 @@ func (c *SecurityPoliciesTestIamPermissionsCall) Do(opts ...googleapi.CallOption } -// method id "compute.snapshots.delete": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *SslCertificatesAggregatedListCall) Pages(ctx context.Context, f func(*SslCertificateAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type SnapshotsDeleteCall struct { - s *Service - project string - snapshot string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.sslCertificates.delete": + +type SslCertificatesDeleteCall struct { + s *Service + project string + sslCertificate string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified Snapshot resource. Keep in mind that -// deleting a single snapshot might not necessarily delete all the data -// on that snapshot. If any data on the snapshot that is marked for -// deletion is needed for subsequent snapshots, the data will be moved -// to the next corresponding snapshot. -// -// For more information, see Deleting snapshots. -// For details, see https://cloud.google.com/compute/docs/reference/latest/snapshots/delete -func (r *SnapshotsService) Delete(project string, snapshot string) *SnapshotsDeleteCall { - c := &SnapshotsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified SslCertificate resource. +func (r *SslCertificatesService) Delete(project string, sslCertificate string) *SslCertificatesDeleteCall { + c := &SslCertificatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.snapshot = snapshot + c.sslCertificate = sslCertificate return c } @@ -104163,7 +119653,7 @@ func (r *SnapshotsService) Delete(project string, snapshot string) *SnapshotsDel // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *SnapshotsDeleteCall) RequestId(requestId string) *SnapshotsDeleteCall { +func (c *SslCertificatesDeleteCall) RequestId(requestId string) *SslCertificatesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -104171,7 +119661,7 @@ func (c *SnapshotsDeleteCall) RequestId(requestId string) *SnapshotsDeleteCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SnapshotsDeleteCall) Fields(s ...googleapi.Field) *SnapshotsDeleteCall { +func (c *SslCertificatesDeleteCall) Fields(s ...googleapi.Field) *SslCertificatesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -104179,21 +119669,21 @@ func (c *SnapshotsDeleteCall) Fields(s ...googleapi.Field) *SnapshotsDeleteCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SnapshotsDeleteCall) Context(ctx context.Context) *SnapshotsDeleteCall { +func (c *SslCertificatesDeleteCall) Context(ctx context.Context) *SslCertificatesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SnapshotsDeleteCall) Header() http.Header { +func (c *SslCertificatesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SnapshotsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *SslCertificatesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -104202,7 +119692,7 @@ func (c *SnapshotsDeleteCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{snapshot}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates/{sslCertificate}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { @@ -104210,20 +119700,20 @@ func (c *SnapshotsDeleteCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "snapshot": c.snapshot, + "project": c.project, + "sslCertificate": c.sslCertificate, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.snapshots.delete" call. +// Do executes the "compute.sslCertificates.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *SnapshotsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *SslCertificatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -104254,12 +119744,12 @@ func (c *SnapshotsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Deletes the specified Snapshot resource. Keep in mind that deleting a single snapshot might not necessarily delete all the data on that snapshot. If any data on the snapshot that is marked for deletion is needed for subsequent snapshots, the data will be moved to the next corresponding snapshot.\n\nFor more information, see Deleting snapshots.", + // "description": "Deletes the specified SslCertificate resource.", // "httpMethod": "DELETE", - // "id": "compute.snapshots.delete", + // "id": "compute.sslCertificates.delete", // "parameterOrder": [ // "project", - // "snapshot" + // "sslCertificate" // ], // "parameters": { // "project": { @@ -104274,15 +119764,15 @@ func (c *SnapshotsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "location": "query", // "type": "string" // }, - // "snapshot": { - // "description": "Name of the Snapshot resource to delete.", + // "sslCertificate": { + // "description": "Name of the SslCertificate resource to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/snapshots/{snapshot}", + // "path": "{project}/global/sslCertificates/{sslCertificate}", // "response": { // "$ref": "Operation" // }, @@ -104294,32 +119784,31 @@ func (c *SnapshotsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro } -// method id "compute.snapshots.get": +// method id "compute.sslCertificates.get": -type SnapshotsGetCall struct { - s *Service - project string - snapshot string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type SslCertificatesGetCall struct { + s *Service + project string + sslCertificate string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified Snapshot resource. Gets a list of -// available snapshots by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/snapshots/get -func (r *SnapshotsService) Get(project string, snapshot string) *SnapshotsGetCall { - c := &SnapshotsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified SslCertificate resource. Gets a list of +// available SSL certificates by making a list() request. +func (r *SslCertificatesService) Get(project string, sslCertificate string) *SslCertificatesGetCall { + c := &SslCertificatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.snapshot = snapshot + c.sslCertificate = sslCertificate return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SnapshotsGetCall) Fields(s ...googleapi.Field) *SnapshotsGetCall { +func (c *SslCertificatesGetCall) Fields(s ...googleapi.Field) *SslCertificatesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -104329,7 +119818,7 @@ func (c *SnapshotsGetCall) Fields(s ...googleapi.Field) *SnapshotsGetCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *SnapshotsGetCall) IfNoneMatch(entityTag string) *SnapshotsGetCall { +func (c *SslCertificatesGetCall) IfNoneMatch(entityTag string) *SslCertificatesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -104337,21 +119826,21 @@ func (c *SnapshotsGetCall) IfNoneMatch(entityTag string) *SnapshotsGetCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SnapshotsGetCall) Context(ctx context.Context) *SnapshotsGetCall { +func (c *SslCertificatesGetCall) Context(ctx context.Context) *SslCertificatesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SnapshotsGetCall) Header() http.Header { +func (c *SslCertificatesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SnapshotsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *SslCertificatesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -104363,7 +119852,7 @@ func (c *SnapshotsGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{snapshot}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates/{sslCertificate}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -104371,20 +119860,20 @@ func (c *SnapshotsGetCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "snapshot": c.snapshot, + "project": c.project, + "sslCertificate": c.sslCertificate, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.snapshots.get" call. -// Exactly one of *Snapshot or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Snapshot.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *SnapshotsGetCall) Do(opts ...googleapi.CallOption) (*Snapshot, error) { +// Do executes the "compute.sslCertificates.get" call. +// Exactly one of *SslCertificate or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *SslCertificate.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *SslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCertificate, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -104403,7 +119892,7 @@ func (c *SnapshotsGetCall) Do(opts ...googleapi.CallOption) (*Snapshot, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Snapshot{ + ret := &SslCertificate{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -104415,12 +119904,12 @@ func (c *SnapshotsGetCall) Do(opts ...googleapi.CallOption) (*Snapshot, error) { } return ret, nil // { - // "description": "Returns the specified Snapshot resource. Gets a list of available snapshots by making a list() request.", + // "description": "Returns the specified SslCertificate resource. Gets a list of available SSL certificates by making a list() request.", // "httpMethod": "GET", - // "id": "compute.snapshots.get", + // "id": "compute.sslCertificates.get", // "parameterOrder": [ // "project", - // "snapshot" + // "sslCertificate" // ], // "parameters": { // "project": { @@ -104430,17 +119919,17 @@ func (c *SnapshotsGetCall) Do(opts ...googleapi.CallOption) (*Snapshot, error) { // "required": true, // "type": "string" // }, - // "snapshot": { - // "description": "Name of the Snapshot resource to return.", + // "sslCertificate": { + // "description": "Name of the SslCertificate resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/snapshots/{snapshot}", + // "path": "{project}/global/sslCertificates/{sslCertificate}", // "response": { - // "$ref": "Snapshot" + // "$ref": "SslCertificate" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -104451,96 +119940,105 @@ func (c *SnapshotsGetCall) Do(opts ...googleapi.CallOption) (*Snapshot, error) { } -// method id "compute.snapshots.getIamPolicy": +// method id "compute.sslCertificates.insert": -type SnapshotsGetIamPolicyCall struct { - s *Service - project string - resource string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type SslCertificatesInsertCall struct { + s *Service + project string + sslcertificate *SslCertificate + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. -func (r *SnapshotsService) GetIamPolicy(project string, resource string) *SnapshotsGetIamPolicyCall { - c := &SnapshotsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a SslCertificate resource in the specified project +// using the data included in the request. +func (r *SslCertificatesService) Insert(project string, sslcertificate *SslCertificate) *SslCertificatesInsertCall { + c := &SslCertificatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource + c.sslcertificate = sslcertificate + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *SslCertificatesInsertCall) RequestId(requestId string) *SslCertificatesInsertCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SnapshotsGetIamPolicyCall) Fields(s ...googleapi.Field) *SnapshotsGetIamPolicyCall { +func (c *SslCertificatesInsertCall) Fields(s ...googleapi.Field) *SslCertificatesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *SnapshotsGetIamPolicyCall) IfNoneMatch(entityTag string) *SnapshotsGetIamPolicyCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SnapshotsGetIamPolicyCall) Context(ctx context.Context) *SnapshotsGetIamPolicyCall { +func (c *SslCertificatesInsertCall) Context(ctx context.Context) *SslCertificatesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SnapshotsGetIamPolicyCall) Header() http.Header { +func (c *SslCertificatesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SnapshotsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *SslCertificatesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.sslcertificate) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{resource}/getIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.snapshots.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *SnapshotsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.sslCertificates.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *SslCertificatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -104559,7 +120057,7 @@ func (c *SnapshotsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -104571,12 +120069,11 @@ func (c *SnapshotsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", - // "httpMethod": "GET", - // "id": "compute.snapshots.getIamPolicy", + // "description": "Creates a SslCertificate resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.sslCertificates.insert", // "parameterOrder": [ - // "project", - // "resource" + // "project" // ], // "parameters": { // "project": { @@ -104586,30 +120083,30 @@ func (c *SnapshotsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/global/snapshots/{resource}/getIamPolicy", + // "path": "{project}/global/sslCertificates", + // "request": { + // "$ref": "SslCertificate" + // }, // "response": { - // "$ref": "Policy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.snapshots.list": +// method id "compute.sslCertificates.list": -type SnapshotsListCall struct { +type SslCertificatesListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -104618,11 +120115,10 @@ type SnapshotsListCall struct { header_ http.Header } -// List: Retrieves the list of Snapshot resources contained within the +// List: Retrieves the list of SslCertificate resources available to the // specified project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/snapshots/list -func (r *SnapshotsService) List(project string) *SnapshotsListCall { - c := &SnapshotsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *SslCertificatesService) List(project string) *SslCertificatesListCall { + c := &SslCertificatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -104649,7 +120145,7 @@ func (r *SnapshotsService) List(project string) *SnapshotsListCall { // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *SnapshotsListCall) Filter(filter string) *SnapshotsListCall { +func (c *SslCertificatesListCall) Filter(filter string) *SslCertificatesListCall { c.urlParams_.Set("filter", filter) return c } @@ -104660,7 +120156,7 @@ func (c *SnapshotsListCall) Filter(filter string) *SnapshotsListCall { // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *SnapshotsListCall) MaxResults(maxResults int64) *SnapshotsListCall { +func (c *SslCertificatesListCall) MaxResults(maxResults int64) *SslCertificatesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -104677,7 +120173,7 @@ func (c *SnapshotsListCall) MaxResults(maxResults int64) *SnapshotsListCall { // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *SnapshotsListCall) OrderBy(orderBy string) *SnapshotsListCall { +func (c *SslCertificatesListCall) OrderBy(orderBy string) *SslCertificatesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -104685,7 +120181,7 @@ func (c *SnapshotsListCall) OrderBy(orderBy string) *SnapshotsListCall { // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *SnapshotsListCall) PageToken(pageToken string) *SnapshotsListCall { +func (c *SslCertificatesListCall) PageToken(pageToken string) *SslCertificatesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -104693,7 +120189,7 @@ func (c *SnapshotsListCall) PageToken(pageToken string) *SnapshotsListCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SnapshotsListCall) Fields(s ...googleapi.Field) *SnapshotsListCall { +func (c *SslCertificatesListCall) Fields(s ...googleapi.Field) *SslCertificatesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -104703,7 +120199,7 @@ func (c *SnapshotsListCall) Fields(s ...googleapi.Field) *SnapshotsListCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *SnapshotsListCall) IfNoneMatch(entityTag string) *SnapshotsListCall { +func (c *SslCertificatesListCall) IfNoneMatch(entityTag string) *SslCertificatesListCall { c.ifNoneMatch_ = entityTag return c } @@ -104711,21 +120207,21 @@ func (c *SnapshotsListCall) IfNoneMatch(entityTag string) *SnapshotsListCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SnapshotsListCall) Context(ctx context.Context) *SnapshotsListCall { +func (c *SslCertificatesListCall) Context(ctx context.Context) *SslCertificatesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SnapshotsListCall) Header() http.Header { +func (c *SslCertificatesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SnapshotsListCall) doRequest(alt string) (*http.Response, error) { +func (c *SslCertificatesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -104737,7 +120233,7 @@ func (c *SnapshotsListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -104750,14 +120246,14 @@ func (c *SnapshotsListCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.snapshots.list" call. -// Exactly one of *SnapshotList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *SnapshotList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *SnapshotsListCall) Do(opts ...googleapi.CallOption) (*SnapshotList, error) { +// Do executes the "compute.sslCertificates.list" call. +// Exactly one of *SslCertificateList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *SslCertificateList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *SslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCertificateList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -104776,7 +120272,7 @@ func (c *SnapshotsListCall) Do(opts ...googleapi.CallOption) (*SnapshotList, err if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &SnapshotList{ + ret := &SslCertificateList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -104788,9 +120284,9 @@ func (c *SnapshotsListCall) Do(opts ...googleapi.CallOption) (*SnapshotList, err } return ret, nil // { - // "description": "Retrieves the list of Snapshot resources contained within the specified project.", + // "description": "Retrieves the list of SslCertificate resources available to the specified project.", // "httpMethod": "GET", - // "id": "compute.snapshots.list", + // "id": "compute.sslCertificates.list", // "parameterOrder": [ // "project" // ], @@ -104826,9 +120322,9 @@ func (c *SnapshotsListCall) Do(opts ...googleapi.CallOption) (*SnapshotList, err // "type": "string" // } // }, - // "path": "{project}/global/snapshots", + // "path": "{project}/global/sslCertificates", // "response": { - // "$ref": "SnapshotList" + // "$ref": "SslCertificateList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -104842,7 +120338,7 @@ func (c *SnapshotsListCall) Do(opts ...googleapi.CallOption) (*SnapshotList, err // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *SnapshotsListCall) Pages(ctx context.Context, f func(*SnapshotList) error) error { +func (c *SslCertificatesListCall) Pages(ctx context.Context, f func(*SslCertificateList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -104860,32 +120356,32 @@ func (c *SnapshotsListCall) Pages(ctx context.Context, f func(*SnapshotList) err } } -// method id "compute.snapshots.setIamPolicy": +// method id "compute.sslCertificates.testIamPermissions": -type SnapshotsSetIamPolicyCall struct { +type SslCertificatesTestIamPermissionsCall struct { s *Service project string resource string - globalsetpolicyrequest *GlobalSetPolicyRequest + testpermissionsrequest *TestPermissionsRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. -func (r *SnapshotsService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *SnapshotsSetIamPolicyCall { - c := &SnapshotsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *SslCertificatesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *SslCertificatesTestIamPermissionsCall { + c := &SslCertificatesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.resource = resource - c.globalsetpolicyrequest = globalsetpolicyrequest + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SnapshotsSetIamPolicyCall) Fields(s ...googleapi.Field) *SnapshotsSetIamPolicyCall { +func (c *SslCertificatesTestIamPermissionsCall) Fields(s ...googleapi.Field) *SslCertificatesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -104893,35 +120389,35 @@ func (c *SnapshotsSetIamPolicyCall) Fields(s ...googleapi.Field) *SnapshotsSetIa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SnapshotsSetIamPolicyCall) Context(ctx context.Context) *SnapshotsSetIamPolicyCall { +func (c *SslCertificatesTestIamPermissionsCall) Context(ctx context.Context) *SslCertificatesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SnapshotsSetIamPolicyCall) Header() http.Header { +func (c *SslCertificatesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SnapshotsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *SslCertificatesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetpolicyrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{resource}/setIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -104935,14 +120431,14 @@ func (c *SnapshotsSetIamPolicyCall) doRequest(alt string) (*http.Response, error return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.snapshots.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *SnapshotsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.sslCertificates.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *SslCertificatesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -104961,7 +120457,7 @@ func (c *SnapshotsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -104973,9 +120469,9 @@ func (c *SnapshotsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.snapshots.setIamPolicy", + // "id": "compute.sslCertificates.testIamPermissions", // "parameterOrder": [ // "project", // "resource" @@ -104996,47 +120492,66 @@ func (c *SnapshotsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e // "type": "string" // } // }, - // "path": "{project}/global/snapshots/{resource}/setIamPolicy", + // "path": "{project}/global/sslCertificates/{resource}/testIamPermissions", // "request": { - // "$ref": "GlobalSetPolicyRequest" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Policy" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.snapshots.setLabels": +// method id "compute.sslPolicies.delete": -type SnapshotsSetLabelsCall struct { - s *Service - project string - resource string - globalsetlabelsrequest *GlobalSetLabelsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type SslPoliciesDeleteCall struct { + s *Service + project string + sslPolicy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetLabels: Sets the labels on a snapshot. To learn more about labels, -// read the Labeling Resources documentation. -func (r *SnapshotsService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *SnapshotsSetLabelsCall { - c := &SnapshotsSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified SSL policy. The SSL policy resource can +// be deleted only if it is not in use by any TargetHttpsProxy or +// TargetSslProxy resources. +func (r *SslPoliciesService) Delete(project string, sslPolicy string) *SslPoliciesDeleteCall { + c := &SslPoliciesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.globalsetlabelsrequest = globalsetlabelsrequest + c.sslPolicy = sslPolicy + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *SslPoliciesDeleteCall) RequestId(requestId string) *SslPoliciesDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SnapshotsSetLabelsCall) Fields(s ...googleapi.Field) *SnapshotsSetLabelsCall { +func (c *SslPoliciesDeleteCall) Fields(s ...googleapi.Field) *SslPoliciesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -105044,56 +120559,51 @@ func (c *SnapshotsSetLabelsCall) Fields(s ...googleapi.Field) *SnapshotsSetLabel // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SnapshotsSetLabelsCall) Context(ctx context.Context) *SnapshotsSetLabelsCall { +func (c *SslPoliciesDeleteCall) Context(ctx context.Context) *SslPoliciesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SnapshotsSetLabelsCall) Header() http.Header { +func (c *SslPoliciesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SnapshotsSetLabelsCall) doRequest(alt string) (*http.Response, error) { +func (c *SslPoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{resource}/setLabels") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslPolicies/{sslPolicy}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "sslPolicy": c.sslPolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.snapshots.setLabels" call. +// Do executes the "compute.sslPolicies.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *SnapshotsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *SslPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -105124,12 +120634,12 @@ func (c *SnapshotsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Sets the labels on a snapshot. To learn more about labels, read the Labeling Resources documentation.", - // "httpMethod": "POST", - // "id": "compute.snapshots.setLabels", + // "description": "Deletes the specified SSL policy. The SSL policy resource can be deleted only if it is not in use by any TargetHttpsProxy or TargetSslProxy resources.", + // "httpMethod": "DELETE", + // "id": "compute.sslPolicies.delete", // "parameterOrder": [ // "project", - // "resource" + // "sslPolicy" // ], // "parameters": { // "project": { @@ -105139,18 +120649,19 @@ func (c *SnapshotsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, e // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "sslPolicy": { + // "description": "Name of the SSL policy to delete. The name must be 1-63 characters long, and comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/snapshots/{resource}/setLabels", - // "request": { - // "$ref": "GlobalSetLabelsRequest" - // }, + // "path": "{project}/global/sslPolicies/{sslPolicy}", // "response": { // "$ref": "Operation" // }, @@ -105162,89 +120673,96 @@ func (c *SnapshotsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, e } -// method id "compute.snapshots.testIamPermissions": +// method id "compute.sslPolicies.get": -type SnapshotsTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type SslPoliciesGetCall struct { + s *Service + project string + sslPolicy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *SnapshotsService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *SnapshotsTestIamPermissionsCall { - c := &SnapshotsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Lists all of the ordered rules present in a single specified +// policy. +func (r *SslPoliciesService) Get(project string, sslPolicy string) *SslPoliciesGetCall { + c := &SslPoliciesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.sslPolicy = sslPolicy return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SnapshotsTestIamPermissionsCall) Fields(s ...googleapi.Field) *SnapshotsTestIamPermissionsCall { +func (c *SslPoliciesGetCall) Fields(s ...googleapi.Field) *SslPoliciesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *SslPoliciesGetCall) IfNoneMatch(entityTag string) *SslPoliciesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SnapshotsTestIamPermissionsCall) Context(ctx context.Context) *SnapshotsTestIamPermissionsCall { +func (c *SslPoliciesGetCall) Context(ctx context.Context) *SslPoliciesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SnapshotsTestIamPermissionsCall) Header() http.Header { +func (c *SslPoliciesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SnapshotsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *SslPoliciesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslPolicies/{sslPolicy}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "sslPolicy": c.sslPolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.snapshots.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *SnapshotsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.sslPolicies.get" call. +// Exactly one of *SslPolicy or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *SslPolicy.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *SslPoliciesGetCall) Do(opts ...googleapi.CallOption) (*SslPolicy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -105263,7 +120781,7 @@ func (c *SnapshotsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &SslPolicy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -105275,12 +120793,12 @@ func (c *SnapshotsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.snapshots.testIamPermissions", + // "description": "Lists all of the ordered rules present in a single specified policy.", + // "httpMethod": "GET", + // "id": "compute.sslPolicies.get", // "parameterOrder": [ // "project", - // "resource" + // "sslPolicy" // ], // "parameters": { // "project": { @@ -105290,20 +120808,16 @@ func (c *SnapshotsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "sslPolicy": { + // "description": "Name of the SSL policy to update. The name must be 1-63 characters long, and comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/snapshots/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/global/sslPolicies/{sslPolicy}", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "SslPolicy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -105314,22 +120828,23 @@ func (c *SnapshotsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes } -// method id "compute.sslCertificates.delete": +// method id "compute.sslPolicies.insert": -type SslCertificatesDeleteCall struct { - s *Service - project string - sslCertificate string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type SslPoliciesInsertCall struct { + s *Service + project string + sslpolicy *SslPolicy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified SslCertificate resource. -func (r *SslCertificatesService) Delete(project string, sslCertificate string) *SslCertificatesDeleteCall { - c := &SslCertificatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Returns the specified SSL policy resource. Gets a list of +// available SSL policies by making a list() request. +func (r *SslPoliciesService) Insert(project string, sslpolicy *SslPolicy) *SslPoliciesInsertCall { + c := &SslPoliciesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.sslCertificate = sslCertificate + c.sslpolicy = sslpolicy return c } @@ -105347,7 +120862,7 @@ func (r *SslCertificatesService) Delete(project string, sslCertificate string) * // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *SslCertificatesDeleteCall) RequestId(requestId string) *SslCertificatesDeleteCall { +func (c *SslPoliciesInsertCall) RequestId(requestId string) *SslPoliciesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -105355,219 +120870,63 @@ func (c *SslCertificatesDeleteCall) RequestId(requestId string) *SslCertificates // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SslCertificatesDeleteCall) Fields(s ...googleapi.Field) *SslCertificatesDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *SslCertificatesDeleteCall) Context(ctx context.Context) *SslCertificatesDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *SslCertificatesDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *SslCertificatesDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates/{sslCertificate}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "sslCertificate": c.sslCertificate, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.sslCertificates.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *SslCertificatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes the specified SslCertificate resource.", - // "httpMethod": "DELETE", - // "id": "compute.sslCertificates.delete", - // "parameterOrder": [ - // "project", - // "sslCertificate" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "sslCertificate": { - // "description": "Name of the SslCertificate resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/sslCertificates/{sslCertificate}", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.sslCertificates.get": - -type SslCertificatesGetCall struct { - s *Service - project string - sslCertificate string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified SslCertificate resource. Gets a list of -// available SSL certificates by making a list() request. -func (r *SslCertificatesService) Get(project string, sslCertificate string) *SslCertificatesGetCall { - c := &SslCertificatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.sslCertificate = sslCertificate - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *SslCertificatesGetCall) Fields(s ...googleapi.Field) *SslCertificatesGetCall { +func (c *SslPoliciesInsertCall) Fields(s ...googleapi.Field) *SslPoliciesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *SslCertificatesGetCall) IfNoneMatch(entityTag string) *SslCertificatesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SslCertificatesGetCall) Context(ctx context.Context) *SslCertificatesGetCall { +func (c *SslPoliciesInsertCall) Context(ctx context.Context) *SslPoliciesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SslCertificatesGetCall) Header() http.Header { +func (c *SslPoliciesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SslCertificatesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *SslPoliciesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + reqHeaders[k] = v } + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.sslpolicy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates/{sslCertificate}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslPolicies") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "sslCertificate": c.sslCertificate, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.sslCertificates.get" call. -// Exactly one of *SslCertificate or error will be non-nil. Any non-2xx +// Do executes the "compute.sslPolicies.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *SslCertificate.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *SslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCertificate, error) { +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *SslPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -105586,7 +120945,7 @@ func (c *SslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCertifica if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &SslCertificate{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -105598,12 +120957,11 @@ func (c *SslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCertifica } return ret, nil // { - // "description": "Returns the specified SslCertificate resource. Gets a list of available SSL certificates by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.sslCertificates.get", + // "description": "Returns the specified SSL policy resource. Gets a list of available SSL policies by making a list() request.", + // "httpMethod": "POST", + // "id": "compute.sslPolicies.insert", // "parameterOrder": [ - // "project", - // "sslCertificate" + // "project" // ], // "parameters": { // "project": { @@ -105613,108 +120971,159 @@ func (c *SslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCertifica // "required": true, // "type": "string" // }, - // "sslCertificate": { - // "description": "Name of the SslCertificate resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/global/sslCertificates/{sslCertificate}", + // "path": "{project}/global/sslPolicies", + // "request": { + // "$ref": "SslPolicy" + // }, // "response": { - // "$ref": "SslCertificate" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.sslCertificates.insert": +// method id "compute.sslPolicies.list": -type SslCertificatesInsertCall struct { - s *Service - project string - sslcertificate *SslCertificate - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type SslPoliciesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Insert: Creates a SslCertificate resource in the specified project -// using the data included in the request. -func (r *SslCertificatesService) Insert(project string, sslcertificate *SslCertificate) *SslCertificatesInsertCall { - c := &SslCertificatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Lists all the SSL policies that have been configured for the +// specified project. +func (r *SslPoliciesService) List(project string) *SslPoliciesListCall { + c := &SslPoliciesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.sslcertificate = sslcertificate return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *SslCertificatesInsertCall) RequestId(requestId string) *SslCertificatesInsertCall { - c.urlParams_.Set("requestId", requestId) +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *SslPoliciesListCall) Filter(filter string) *SslPoliciesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *SslPoliciesListCall) MaxResults(maxResults int64) *SslPoliciesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *SslPoliciesListCall) OrderBy(orderBy string) *SslPoliciesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *SslPoliciesListCall) PageToken(pageToken string) *SslPoliciesListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SslCertificatesInsertCall) Fields(s ...googleapi.Field) *SslCertificatesInsertCall { +func (c *SslPoliciesListCall) Fields(s ...googleapi.Field) *SslPoliciesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *SslPoliciesListCall) IfNoneMatch(entityTag string) *SslPoliciesListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SslCertificatesInsertCall) Context(ctx context.Context) *SslCertificatesInsertCall { +func (c *SslPoliciesListCall) Context(ctx context.Context) *SslPoliciesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SslCertificatesInsertCall) Header() http.Header { +func (c *SslPoliciesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SslCertificatesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *SslPoliciesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.sslcertificate) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslPolicies") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } @@ -105725,14 +121134,14 @@ func (c *SslCertificatesInsertCall) doRequest(alt string) (*http.Response, error return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.sslCertificates.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.sslPolicies.list" call. +// Exactly one of *SslPoliciesList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *SslCertificatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// *SslPoliciesList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *SslPoliciesListCall) Do(opts ...googleapi.CallOption) (*SslPoliciesList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -105751,7 +121160,7 @@ func (c *SslCertificatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &SslPoliciesList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -105763,44 +121172,81 @@ func (c *SslCertificatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Creates a SslCertificate resource in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.sslCertificates.insert", + // "description": "Lists all the SSL policies that have been configured for the specified project.", + // "httpMethod": "GET", + // "id": "compute.sslPolicies.list", // "parameterOrder": [ // "project" // ], // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/global/sslCertificates", - // "request": { - // "$ref": "SslCertificate" - // }, + // "path": "{project}/global/sslPolicies", // "response": { - // "$ref": "Operation" + // "$ref": "SslPoliciesList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.sslCertificates.list": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *SslPoliciesListCall) Pages(ctx context.Context, f func(*SslPoliciesList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type SslCertificatesListCall struct { +// method id "compute.sslPolicies.listAvailableFeatures": + +type SslPoliciesListAvailableFeaturesCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -105809,10 +121255,10 @@ type SslCertificatesListCall struct { header_ http.Header } -// List: Retrieves the list of SslCertificate resources available to the -// specified project. -func (r *SslCertificatesService) List(project string) *SslCertificatesListCall { - c := &SslCertificatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ListAvailableFeatures: Lists all features that can be specified in +// the SSL policy when using custom profile. +func (r *SslPoliciesService) ListAvailableFeatures(project string) *SslPoliciesListAvailableFeaturesCall { + c := &SslPoliciesListAvailableFeaturesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -105839,7 +121285,7 @@ func (r *SslCertificatesService) List(project string) *SslCertificatesListCall { // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *SslCertificatesListCall) Filter(filter string) *SslCertificatesListCall { +func (c *SslPoliciesListAvailableFeaturesCall) Filter(filter string) *SslPoliciesListAvailableFeaturesCall { c.urlParams_.Set("filter", filter) return c } @@ -105850,7 +121296,7 @@ func (c *SslCertificatesListCall) Filter(filter string) *SslCertificatesListCall // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *SslCertificatesListCall) MaxResults(maxResults int64) *SslCertificatesListCall { +func (c *SslPoliciesListAvailableFeaturesCall) MaxResults(maxResults int64) *SslPoliciesListAvailableFeaturesCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -105867,7 +121313,7 @@ func (c *SslCertificatesListCall) MaxResults(maxResults int64) *SslCertificatesL // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *SslCertificatesListCall) OrderBy(orderBy string) *SslCertificatesListCall { +func (c *SslPoliciesListAvailableFeaturesCall) OrderBy(orderBy string) *SslPoliciesListAvailableFeaturesCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -105875,7 +121321,7 @@ func (c *SslCertificatesListCall) OrderBy(orderBy string) *SslCertificatesListCa // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *SslCertificatesListCall) PageToken(pageToken string) *SslCertificatesListCall { +func (c *SslPoliciesListAvailableFeaturesCall) PageToken(pageToken string) *SslPoliciesListAvailableFeaturesCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -105883,7 +121329,7 @@ func (c *SslCertificatesListCall) PageToken(pageToken string) *SslCertificatesLi // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SslCertificatesListCall) Fields(s ...googleapi.Field) *SslCertificatesListCall { +func (c *SslPoliciesListAvailableFeaturesCall) Fields(s ...googleapi.Field) *SslPoliciesListAvailableFeaturesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -105893,7 +121339,7 @@ func (c *SslCertificatesListCall) Fields(s ...googleapi.Field) *SslCertificatesL // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *SslCertificatesListCall) IfNoneMatch(entityTag string) *SslCertificatesListCall { +func (c *SslPoliciesListAvailableFeaturesCall) IfNoneMatch(entityTag string) *SslPoliciesListAvailableFeaturesCall { c.ifNoneMatch_ = entityTag return c } @@ -105901,21 +121347,21 @@ func (c *SslCertificatesListCall) IfNoneMatch(entityTag string) *SslCertificates // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SslCertificatesListCall) Context(ctx context.Context) *SslCertificatesListCall { +func (c *SslPoliciesListAvailableFeaturesCall) Context(ctx context.Context) *SslPoliciesListAvailableFeaturesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SslCertificatesListCall) Header() http.Header { +func (c *SslPoliciesListAvailableFeaturesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SslCertificatesListCall) doRequest(alt string) (*http.Response, error) { +func (c *SslPoliciesListAvailableFeaturesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -105927,7 +121373,7 @@ func (c *SslCertificatesListCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslPolicies/listAvailableFeatures") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -105940,14 +121386,16 @@ func (c *SslCertificatesListCall) doRequest(alt string) (*http.Response, error) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.sslCertificates.list" call. -// Exactly one of *SslCertificateList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *SslCertificateList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *SslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCertificateList, error) { +// Do executes the "compute.sslPolicies.listAvailableFeatures" call. +// Exactly one of *SslPoliciesListAvailableFeaturesResponse or error +// will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *SslPoliciesListAvailableFeaturesResponse.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *SslPoliciesListAvailableFeaturesCall) Do(opts ...googleapi.CallOption) (*SslPoliciesListAvailableFeaturesResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -105966,7 +121414,7 @@ func (c *SslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCertific if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &SslCertificateList{ + ret := &SslPoliciesListAvailableFeaturesResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -105978,9 +121426,9 @@ func (c *SslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCertific } return ret, nil // { - // "description": "Retrieves the list of SslCertificate resources available to the specified project.", + // "description": "Lists all features that can be specified in the SSL policy when using custom profile.", // "httpMethod": "GET", - // "id": "compute.sslCertificates.list", + // "id": "compute.sslPolicies.listAvailableFeatures", // "parameterOrder": [ // "project" // ], @@ -106016,9 +121464,9 @@ func (c *SslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCertific // "type": "string" // } // }, - // "path": "{project}/global/sslCertificates", + // "path": "{project}/global/sslPolicies/listAvailableFeatures", // "response": { - // "$ref": "SslCertificateList" + // "$ref": "SslPoliciesListAvailableFeaturesResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -106029,30 +121477,183 @@ func (c *SslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCertific } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *SslCertificatesListCall) Pages(ctx context.Context, f func(*SslCertificateList) error) error { +// method id "compute.sslPolicies.patch": + +type SslPoliciesPatchCall struct { + s *Service + project string + sslPolicy string + sslpolicy *SslPolicy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Patches the specified SSL policy with the data included in the +// request. +func (r *SslPoliciesService) Patch(project string, sslPolicy string, sslpolicy *SslPolicy) *SslPoliciesPatchCall { + c := &SslPoliciesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.sslPolicy = sslPolicy + c.sslpolicy = sslpolicy + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *SslPoliciesPatchCall) RequestId(requestId string) *SslPoliciesPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SslPoliciesPatchCall) Fields(s ...googleapi.Field) *SslPoliciesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SslPoliciesPatchCall) Context(ctx context.Context) *SslPoliciesPatchCall { c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SslPoliciesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SslPoliciesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.sslpolicy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslPolicies/{sslPolicy}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "sslPolicy": c.sslPolicy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.sslPolicies.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *SslPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() } - if x.NextPageToken == "" { - return nil + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, } - c.PageToken(x.NextPageToken) } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patches the specified SSL policy with the data included in the request.", + // "httpMethod": "PATCH", + // "id": "compute.sslPolicies.patch", + // "parameterOrder": [ + // "project", + // "sslPolicy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "sslPolicy": { + // "description": "Name of the SSL policy to update. The name must be 1-63 characters long, and comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/sslPolicies/{sslPolicy}", + // "request": { + // "$ref": "SslPolicy" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + } -// method id "compute.sslCertificates.testIamPermissions": +// method id "compute.sslPolicies.testIamPermissions": -type SslCertificatesTestIamPermissionsCall struct { +type SslPoliciesTestIamPermissionsCall struct { s *Service project string resource string @@ -106064,8 +121665,8 @@ type SslCertificatesTestIamPermissionsCall struct { // TestIamPermissions: Returns permissions that a caller has on the // specified resource. -func (r *SslCertificatesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *SslCertificatesTestIamPermissionsCall { - c := &SslCertificatesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *SslPoliciesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *SslPoliciesTestIamPermissionsCall { + c := &SslPoliciesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.resource = resource c.testpermissionsrequest = testpermissionsrequest @@ -106075,7 +121676,7 @@ func (r *SslCertificatesService) TestIamPermissions(project string, resource str // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SslCertificatesTestIamPermissionsCall) Fields(s ...googleapi.Field) *SslCertificatesTestIamPermissionsCall { +func (c *SslPoliciesTestIamPermissionsCall) Fields(s ...googleapi.Field) *SslPoliciesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -106083,21 +121684,21 @@ func (c *SslCertificatesTestIamPermissionsCall) Fields(s ...googleapi.Field) *Ss // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SslCertificatesTestIamPermissionsCall) Context(ctx context.Context) *SslCertificatesTestIamPermissionsCall { +func (c *SslPoliciesTestIamPermissionsCall) Context(ctx context.Context) *SslPoliciesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SslCertificatesTestIamPermissionsCall) Header() http.Header { +func (c *SslPoliciesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SslCertificatesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *SslPoliciesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -106111,7 +121712,7 @@ func (c *SslCertificatesTestIamPermissionsCall) doRequest(alt string) (*http.Res reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslPolicies/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -106125,14 +121726,14 @@ func (c *SslCertificatesTestIamPermissionsCall) doRequest(alt string) (*http.Res return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.sslCertificates.testIamPermissions" call. +// Do executes the "compute.sslPolicies.testIamPermissions" call. // Exactly one of *TestPermissionsResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *TestPermissionsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *SslCertificatesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *SslPoliciesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -106165,7 +121766,7 @@ func (c *SslCertificatesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) // { // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.sslCertificates.testIamPermissions", + // "id": "compute.sslPolicies.testIamPermissions", // "parameterOrder": [ // "project", // "resource" @@ -106186,7 +121787,7 @@ func (c *SslCertificatesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) // "type": "string" // } // }, - // "path": "{project}/global/sslCertificates/{resource}/testIamPermissions", + // "path": "{project}/global/sslPolicies/{resource}/testIamPermissions", // "request": { // "$ref": "TestPermissionsRequest" // }, @@ -106202,102 +121803,155 @@ func (c *SslCertificatesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) } -// method id "compute.sslPolicies.delete": +// method id "compute.subnetworks.aggregatedList": -type SslPoliciesDeleteCall struct { - s *Service - project string - sslPolicy string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type SubnetworksAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified SSL policy. The SSL policy resource can -// be deleted only if it is not in use by any TargetHttpsProxy or -// TargetSslProxy resources. -func (r *SslPoliciesService) Delete(project string, sslPolicy string) *SslPoliciesDeleteCall { - c := &SslPoliciesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of subnetworks. +func (r *SubnetworksService) AggregatedList(project string) *SubnetworksAggregatedListCall { + c := &SubnetworksAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.sslPolicy = sslPolicy return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *SslPoliciesDeleteCall) RequestId(requestId string) *SslPoliciesDeleteCall { - c.urlParams_.Set("requestId", requestId) +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *SubnetworksAggregatedListCall) Filter(filter string) *SubnetworksAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *SubnetworksAggregatedListCall) MaxResults(maxResults int64) *SubnetworksAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *SubnetworksAggregatedListCall) OrderBy(orderBy string) *SubnetworksAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *SubnetworksAggregatedListCall) PageToken(pageToken string) *SubnetworksAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SslPoliciesDeleteCall) Fields(s ...googleapi.Field) *SslPoliciesDeleteCall { +func (c *SubnetworksAggregatedListCall) Fields(s ...googleapi.Field) *SubnetworksAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *SubnetworksAggregatedListCall) IfNoneMatch(entityTag string) *SubnetworksAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SslPoliciesDeleteCall) Context(ctx context.Context) *SslPoliciesDeleteCall { +func (c *SubnetworksAggregatedListCall) Context(ctx context.Context) *SubnetworksAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SslPoliciesDeleteCall) Header() http.Header { +func (c *SubnetworksAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SslPoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *SubnetworksAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslPolicies/{sslPolicy}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/subnetworks") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "sslPolicy": c.sslPolicy, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.sslPolicies.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *SslPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.subnetworks.aggregatedList" call. +// Exactly one of *SubnetworkAggregatedList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *SubnetworkAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *SubnetworksAggregatedListCall) Do(opts ...googleapi.CallOption) (*SubnetworkAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -106316,7 +121970,7 @@ func (c *SslPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &SubnetworkAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -106328,135 +121982,175 @@ func (c *SslPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Deletes the specified SSL policy. The SSL policy resource can be deleted only if it is not in use by any TargetHttpsProxy or TargetSslProxy resources.", - // "httpMethod": "DELETE", - // "id": "compute.sslPolicies.delete", + // "description": "Retrieves an aggregated list of subnetworks.", + // "httpMethod": "GET", + // "id": "compute.subnetworks.aggregatedList", // "parameterOrder": [ - // "project", - // "sslPolicy" + // "project" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", // "location": "query", // "type": "string" // }, - // "sslPolicy": { - // "description": "Name of the SSL policy to delete. The name must be 1-63 characters long, and comply with RFC1035.", + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/sslPolicies/{sslPolicy}", + // "path": "{project}/aggregated/subnetworks", // "response": { - // "$ref": "Operation" + // "$ref": "SubnetworkAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.sslPolicies.get": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *SubnetworksAggregatedListCall) Pages(ctx context.Context, f func(*SubnetworkAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type SslPoliciesGetCall struct { - s *Service - project string - sslPolicy string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +// method id "compute.subnetworks.delete": + +type SubnetworksDeleteCall struct { + s *Service + project string + region string + subnetwork string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Lists all of the ordered rules present in a single specified -// policy. -func (r *SslPoliciesService) Get(project string, sslPolicy string) *SslPoliciesGetCall { - c := &SslPoliciesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified subnetwork. +func (r *SubnetworksService) Delete(project string, region string, subnetwork string) *SubnetworksDeleteCall { + c := &SubnetworksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.sslPolicy = sslPolicy + c.region = region + c.subnetwork = subnetwork + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *SubnetworksDeleteCall) RequestId(requestId string) *SubnetworksDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SslPoliciesGetCall) Fields(s ...googleapi.Field) *SslPoliciesGetCall { +func (c *SubnetworksDeleteCall) Fields(s ...googleapi.Field) *SubnetworksDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *SslPoliciesGetCall) IfNoneMatch(entityTag string) *SslPoliciesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SslPoliciesGetCall) Context(ctx context.Context) *SslPoliciesGetCall { +func (c *SubnetworksDeleteCall) Context(ctx context.Context) *SubnetworksDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SslPoliciesGetCall) Header() http.Header { +func (c *SubnetworksDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SslPoliciesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *SubnetworksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslPolicies/{sslPolicy}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks/{subnetwork}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "sslPolicy": c.sslPolicy, + "project": c.project, + "region": c.region, + "subnetwork": c.subnetwork, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.sslPolicies.get" call. -// Exactly one of *SslPolicy or error will be non-nil. Any non-2xx +// Do executes the "compute.subnetworks.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *SslPolicy.ServerResponse.Header or (if a response was returned at +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *SslPoliciesGetCall) Do(opts ...googleapi.CallOption) (*SslPolicy, error) { +func (c *SubnetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -106475,7 +122169,7 @@ func (c *SslPoliciesGetCall) Do(opts ...googleapi.CallOption) (*SslPolicy, error if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &SslPolicy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -106487,12 +122181,13 @@ func (c *SslPoliciesGetCall) Do(opts ...googleapi.CallOption) (*SslPolicy, error } return ret, nil // { - // "description": "Lists all of the ordered rules present in a single specified policy.", - // "httpMethod": "GET", - // "id": "compute.sslPolicies.get", + // "description": "Deletes the specified subnetwork.", + // "httpMethod": "DELETE", + // "id": "compute.subnetworks.delete", // "parameterOrder": [ // "project", - // "sslPolicy" + // "region", + // "subnetwork" // ], // "parameters": { // "project": { @@ -106502,43 +122197,59 @@ func (c *SslPoliciesGetCall) Do(opts ...googleapi.CallOption) (*SslPolicy, error // "required": true, // "type": "string" // }, - // "sslPolicy": { - // "description": "Name of the SSL policy to update. The name must be 1-63 characters long, and comply with RFC1035.", + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "subnetwork": { + // "description": "Name of the Subnetwork resource to delete.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/sslPolicies/{sslPolicy}", + // "path": "{project}/regions/{region}/subnetworks/{subnetwork}", // "response": { - // "$ref": "SslPolicy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.sslPolicies.insert": +// method id "compute.subnetworks.expandIpCidrRange": -type SslPoliciesInsertCall struct { - s *Service - project string - sslpolicy *SslPolicy - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type SubnetworksExpandIpCidrRangeCall struct { + s *Service + project string + region string + subnetwork string + subnetworksexpandipcidrrangerequest *SubnetworksExpandIpCidrRangeRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Returns the specified SSL policy resource. Gets a list of -// available SSL policies by making a list() request. -func (r *SslPoliciesService) Insert(project string, sslpolicy *SslPolicy) *SslPoliciesInsertCall { - c := &SslPoliciesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ExpandIpCidrRange: Expands the IP CIDR range of the subnetwork to a +// specified value. +func (r *SubnetworksService) ExpandIpCidrRange(project string, region string, subnetwork string, subnetworksexpandipcidrrangerequest *SubnetworksExpandIpCidrRangeRequest) *SubnetworksExpandIpCidrRangeCall { + c := &SubnetworksExpandIpCidrRangeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.sslpolicy = sslpolicy + c.region = region + c.subnetwork = subnetwork + c.subnetworksexpandipcidrrangerequest = subnetworksexpandipcidrrangerequest return c } @@ -106556,7 +122267,7 @@ func (r *SslPoliciesService) Insert(project string, sslpolicy *SslPolicy) *SslPo // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *SslPoliciesInsertCall) RequestId(requestId string) *SslPoliciesInsertCall { +func (c *SubnetworksExpandIpCidrRangeCall) RequestId(requestId string) *SubnetworksExpandIpCidrRangeCall { c.urlParams_.Set("requestId", requestId) return c } @@ -106564,7 +122275,7 @@ func (c *SslPoliciesInsertCall) RequestId(requestId string) *SslPoliciesInsertCa // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SslPoliciesInsertCall) Fields(s ...googleapi.Field) *SslPoliciesInsertCall { +func (c *SubnetworksExpandIpCidrRangeCall) Fields(s ...googleapi.Field) *SubnetworksExpandIpCidrRangeCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -106572,35 +122283,35 @@ func (c *SslPoliciesInsertCall) Fields(s ...googleapi.Field) *SslPoliciesInsertC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SslPoliciesInsertCall) Context(ctx context.Context) *SslPoliciesInsertCall { +func (c *SubnetworksExpandIpCidrRangeCall) Context(ctx context.Context) *SubnetworksExpandIpCidrRangeCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SslPoliciesInsertCall) Header() http.Header { +func (c *SubnetworksExpandIpCidrRangeCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SslPoliciesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *SubnetworksExpandIpCidrRangeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.sslpolicy) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.subnetworksexpandipcidrrangerequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslPolicies") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks/{subnetwork}/expandIpCidrRange") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -106608,19 +122319,21 @@ func (c *SslPoliciesInsertCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "region": c.region, + "subnetwork": c.subnetwork, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.sslPolicies.insert" call. +// Do executes the "compute.subnetworks.expandIpCidrRange" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *SslPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *SubnetworksExpandIpCidrRangeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -106651,11 +122364,13 @@ func (c *SslPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Returns the specified SSL policy resource. Gets a list of available SSL policies by making a list() request.", + // "description": "Expands the IP CIDR range of the subnetwork to a specified value.", // "httpMethod": "POST", - // "id": "compute.sslPolicies.insert", + // "id": "compute.subnetworks.expandIpCidrRange", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "subnetwork" // ], // "parameters": { // "project": { @@ -106665,15 +122380,29 @@ func (c *SslPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er // "required": true, // "type": "string" // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "subnetwork": { + // "description": "Name of the Subnetwork resource to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/sslPolicies", + // "path": "{project}/regions/{region}/subnetworks/{subnetwork}/expandIpCidrRange", // "request": { - // "$ref": "SslPolicy" + // "$ref": "SubnetworksExpandIpCidrRangeRequest" // }, // "response": { // "$ref": "Operation" @@ -106686,92 +122415,33 @@ func (c *SslPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er } -// method id "compute.sslPolicies.list": +// method id "compute.subnetworks.get": -type SslPoliciesListCall struct { +type SubnetworksGetCall struct { s *Service project string + region string + subnetwork string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Lists all the SSL policies that have been configured for the -// specified project. -func (r *SslPoliciesService) List(project string) *SslPoliciesListCall { - c := &SslPoliciesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified subnetwork. Gets a list of available +// subnetworks list() request. +func (r *SubnetworksService) Get(project string, region string, subnetwork string) *SubnetworksGetCall { + c := &SubnetworksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *SslPoliciesListCall) Filter(filter string) *SslPoliciesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *SslPoliciesListCall) MaxResults(maxResults int64) *SslPoliciesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *SslPoliciesListCall) OrderBy(orderBy string) *SslPoliciesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *SslPoliciesListCall) PageToken(pageToken string) *SslPoliciesListCall { - c.urlParams_.Set("pageToken", pageToken) + c.region = region + c.subnetwork = subnetwork return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SslPoliciesListCall) Fields(s ...googleapi.Field) *SslPoliciesListCall { +func (c *SubnetworksGetCall) Fields(s ...googleapi.Field) *SubnetworksGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -106781,7 +122451,7 @@ func (c *SslPoliciesListCall) Fields(s ...googleapi.Field) *SslPoliciesListCall // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *SslPoliciesListCall) IfNoneMatch(entityTag string) *SslPoliciesListCall { +func (c *SubnetworksGetCall) IfNoneMatch(entityTag string) *SubnetworksGetCall { c.ifNoneMatch_ = entityTag return c } @@ -106789,21 +122459,21 @@ func (c *SslPoliciesListCall) IfNoneMatch(entityTag string) *SslPoliciesListCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SslPoliciesListCall) Context(ctx context.Context) *SslPoliciesListCall { +func (c *SubnetworksGetCall) Context(ctx context.Context) *SubnetworksGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SslPoliciesListCall) Header() http.Header { +func (c *SubnetworksGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SslPoliciesListCall) doRequest(alt string) (*http.Response, error) { +func (c *SubnetworksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -106815,7 +122485,7 @@ func (c *SslPoliciesListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslPolicies") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks/{subnetwork}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -106823,19 +122493,21 @@ func (c *SslPoliciesListCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "region": c.region, + "subnetwork": c.subnetwork, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.sslPolicies.list" call. -// Exactly one of *SslPoliciesList or error will be non-nil. Any non-2xx +// Do executes the "compute.subnetworks.get" call. +// Exactly one of *Subnetwork or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *SslPoliciesList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *SslPoliciesListCall) Do(opts ...googleapi.CallOption) (*SslPoliciesList, error) { +// *Subnetwork.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *SubnetworksGetCall) Do(opts ...googleapi.CallOption) (*Subnetwork, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -106854,7 +122526,7 @@ func (c *SslPoliciesListCall) Do(opts ...googleapi.CallOption) (*SslPoliciesList if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &SslPoliciesList{ + ret := &Subnetwork{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -106866,47 +122538,40 @@ func (c *SslPoliciesListCall) Do(opts ...googleapi.CallOption) (*SslPoliciesList } return ret, nil // { - // "description": "Lists all the SSL policies that have been configured for the specified project.", + // "description": "Returns the specified subnetwork. Gets a list of available subnetworks list() request.", // "httpMethod": "GET", - // "id": "compute.sslPolicies.list", + // "id": "compute.subnetworks.get", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "subnetwork" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "subnetwork": { + // "description": "Name of the Subnetwork resource to return.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/sslPolicies", + // "path": "{project}/regions/{region}/subnetworks/{subnetwork}", // "response": { - // "$ref": "SslPoliciesList" + // "$ref": "Subnetwork" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -106917,113 +122582,33 @@ func (c *SslPoliciesListCall) Do(opts ...googleapi.CallOption) (*SslPoliciesList } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *SslPoliciesListCall) Pages(ctx context.Context, f func(*SslPoliciesList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.sslPolicies.listAvailableFeatures": +// method id "compute.subnetworks.getIamPolicy": -type SslPoliciesListAvailableFeaturesCall struct { +type SubnetworksGetIamPolicyCall struct { s *Service project string + region string + resource string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// ListAvailableFeatures: Lists all features that can be specified in -// the SSL policy when using custom profile. -func (r *SslPoliciesService) ListAvailableFeatures(project string) *SslPoliciesListAvailableFeaturesCall { - c := &SslPoliciesListAvailableFeaturesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. +func (r *SubnetworksService) GetIamPolicy(project string, region string, resource string) *SubnetworksGetIamPolicyCall { + c := &SubnetworksGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *SslPoliciesListAvailableFeaturesCall) Filter(filter string) *SslPoliciesListAvailableFeaturesCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *SslPoliciesListAvailableFeaturesCall) MaxResults(maxResults int64) *SslPoliciesListAvailableFeaturesCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *SslPoliciesListAvailableFeaturesCall) OrderBy(orderBy string) *SslPoliciesListAvailableFeaturesCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *SslPoliciesListAvailableFeaturesCall) PageToken(pageToken string) *SslPoliciesListAvailableFeaturesCall { - c.urlParams_.Set("pageToken", pageToken) + c.region = region + c.resource = resource return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SslPoliciesListAvailableFeaturesCall) Fields(s ...googleapi.Field) *SslPoliciesListAvailableFeaturesCall { +func (c *SubnetworksGetIamPolicyCall) Fields(s ...googleapi.Field) *SubnetworksGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -107033,7 +122618,7 @@ func (c *SslPoliciesListAvailableFeaturesCall) Fields(s ...googleapi.Field) *Ssl // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *SslPoliciesListAvailableFeaturesCall) IfNoneMatch(entityTag string) *SslPoliciesListAvailableFeaturesCall { +func (c *SubnetworksGetIamPolicyCall) IfNoneMatch(entityTag string) *SubnetworksGetIamPolicyCall { c.ifNoneMatch_ = entityTag return c } @@ -107041,21 +122626,21 @@ func (c *SslPoliciesListAvailableFeaturesCall) IfNoneMatch(entityTag string) *Ss // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SslPoliciesListAvailableFeaturesCall) Context(ctx context.Context) *SslPoliciesListAvailableFeaturesCall { +func (c *SubnetworksGetIamPolicyCall) Context(ctx context.Context) *SubnetworksGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SslPoliciesListAvailableFeaturesCall) Header() http.Header { +func (c *SubnetworksGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SslPoliciesListAvailableFeaturesCall) doRequest(alt string) (*http.Response, error) { +func (c *SubnetworksGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -107067,7 +122652,7 @@ func (c *SslPoliciesListAvailableFeaturesCall) doRequest(alt string) (*http.Resp var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslPolicies/listAvailableFeatures") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -107075,21 +122660,21 @@ func (c *SslPoliciesListAvailableFeaturesCall) doRequest(alt string) (*http.Resp } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.sslPolicies.listAvailableFeatures" call. -// Exactly one of *SslPoliciesListAvailableFeaturesResponse or error -// will be non-nil. Any non-2xx status code is an error. Response -// headers are in either -// *SslPoliciesListAvailableFeaturesResponse.ServerResponse.Header or -// (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *SslPoliciesListAvailableFeaturesCall) Do(opts ...googleapi.CallOption) (*SslPoliciesListAvailableFeaturesResponse, error) { +// Do executes the "compute.subnetworks.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *SubnetworksGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -107108,7 +122693,7 @@ func (c *SslPoliciesListAvailableFeaturesCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &SslPoliciesListAvailableFeaturesResponse{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -107120,47 +122705,40 @@ func (c *SslPoliciesListAvailableFeaturesCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Lists all features that can be specified in the SSL policy when using custom profile.", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", // "httpMethod": "GET", - // "id": "compute.sslPolicies.listAvailableFeatures", + // "id": "compute.subnetworks.getIamPolicy", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "resource" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/sslPolicies/listAvailableFeatures", + // "path": "{project}/regions/{region}/subnetworks/{resource}/getIamPolicy", // "response": { - // "$ref": "SslPoliciesListAvailableFeaturesResponse" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -107171,25 +122749,25 @@ func (c *SslPoliciesListAvailableFeaturesCall) Do(opts ...googleapi.CallOption) } -// method id "compute.sslPolicies.patch": +// method id "compute.subnetworks.insert": -type SslPoliciesPatchCall struct { +type SubnetworksInsertCall struct { s *Service project string - sslPolicy string - sslpolicy *SslPolicy + region string + subnetwork *Subnetwork urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Patch: Patches the specified SSL policy with the data included in the -// request. -func (r *SslPoliciesService) Patch(project string, sslPolicy string, sslpolicy *SslPolicy) *SslPoliciesPatchCall { - c := &SslPoliciesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a subnetwork in the specified project using the data +// included in the request. +func (r *SubnetworksService) Insert(project string, region string, subnetwork *Subnetwork) *SubnetworksInsertCall { + c := &SubnetworksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.sslPolicy = sslPolicy - c.sslpolicy = sslpolicy + c.region = region + c.subnetwork = subnetwork return c } @@ -107207,7 +122785,7 @@ func (r *SslPoliciesService) Patch(project string, sslPolicy string, sslpolicy * // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *SslPoliciesPatchCall) RequestId(requestId string) *SslPoliciesPatchCall { +func (c *SubnetworksInsertCall) RequestId(requestId string) *SubnetworksInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -107215,7 +122793,7 @@ func (c *SslPoliciesPatchCall) RequestId(requestId string) *SslPoliciesPatchCall // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SslPoliciesPatchCall) Fields(s ...googleapi.Field) *SslPoliciesPatchCall { +func (c *SubnetworksInsertCall) Fields(s ...googleapi.Field) *SubnetworksInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -107223,56 +122801,56 @@ func (c *SslPoliciesPatchCall) Fields(s ...googleapi.Field) *SslPoliciesPatchCal // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SslPoliciesPatchCall) Context(ctx context.Context) *SslPoliciesPatchCall { +func (c *SubnetworksInsertCall) Context(ctx context.Context) *SubnetworksInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SslPoliciesPatchCall) Header() http.Header { +func (c *SubnetworksInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SslPoliciesPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *SubnetworksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.sslpolicy) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.subnetwork) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslPolicies/{sslPolicy}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "sslPolicy": c.sslPolicy, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.sslPolicies.patch" call. +// Do executes the "compute.subnetworks.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *SslPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *SubnetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -107303,12 +122881,12 @@ func (c *SslPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, err } return ret, nil // { - // "description": "Patches the specified SSL policy with the data included in the request.", - // "httpMethod": "PATCH", - // "id": "compute.sslPolicies.patch", + // "description": "Creates a subnetwork in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.subnetworks.insert", // "parameterOrder": [ // "project", - // "sslPolicy" + // "region" // ], // "parameters": { // "project": { @@ -107318,21 +122896,22 @@ func (c *SslPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, err // "required": true, // "type": "string" // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "sslPolicy": { - // "description": "Name of the SSL policy to update. The name must be 1-63 characters long, and comply with RFC1035.", - // "location": "path", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/global/sslPolicies/{sslPolicy}", + // "path": "{project}/regions/{region}/subnetworks", // "request": { - // "$ref": "SslPolicy" + // "$ref": "Subnetwork" // }, // "response": { // "$ref": "Operation" @@ -107345,89 +122924,159 @@ func (c *SslPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, err } -// method id "compute.sslPolicies.testIamPermissions": +// method id "compute.subnetworks.list": -type SslPoliciesTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type SubnetworksListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *SslPoliciesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *SslPoliciesTestIamPermissionsCall { - c := &SslPoliciesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of subnetworks available to the specified +// project. +func (r *SubnetworksService) List(project string, region string) *SubnetworksListCall { + c := &SubnetworksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.region = region + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *SubnetworksListCall) Filter(filter string) *SubnetworksListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *SubnetworksListCall) MaxResults(maxResults int64) *SubnetworksListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *SubnetworksListCall) OrderBy(orderBy string) *SubnetworksListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *SubnetworksListCall) PageToken(pageToken string) *SubnetworksListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SslPoliciesTestIamPermissionsCall) Fields(s ...googleapi.Field) *SslPoliciesTestIamPermissionsCall { +func (c *SubnetworksListCall) Fields(s ...googleapi.Field) *SubnetworksListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *SubnetworksListCall) IfNoneMatch(entityTag string) *SubnetworksListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SslPoliciesTestIamPermissionsCall) Context(ctx context.Context) *SslPoliciesTestIamPermissionsCall { +func (c *SubnetworksListCall) Context(ctx context.Context) *SubnetworksListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SslPoliciesTestIamPermissionsCall) Header() http.Header { +func (c *SubnetworksListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SslPoliciesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *SubnetworksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslPolicies/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.sslPolicies.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.subnetworks.list" call. +// Exactly one of *SubnetworkList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *SubnetworkList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *SslPoliciesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *SubnetworksListCall) Do(opts ...googleapi.CallOption) (*SubnetworkList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -107446,7 +123095,7 @@ func (c *SslPoliciesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &SubnetworkList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -107458,14 +123107,37 @@ func (c *SslPoliciesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.sslPolicies.testIamPermissions", + // "description": "Retrieves a list of subnetworks available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.subnetworks.list", // "parameterOrder": [ // "project", - // "resource" + // "region" // ], // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -107473,20 +123145,17 @@ func (c *SslPoliciesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/sslPolicies/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } // }, + // "path": "{project}/regions/{region}/subnetworks", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "SubnetworkList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -107497,9 +123166,30 @@ func (c *SslPoliciesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T } -// method id "compute.subnetworks.aggregatedList": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *SubnetworksListCall) Pages(ctx context.Context, f func(*SubnetworkList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type SubnetworksAggregatedListCall struct { +// method id "compute.subnetworks.listUsable": + +type SubnetworksListUsableCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -107508,9 +123198,9 @@ type SubnetworksAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves an aggregated list of subnetworks. -func (r *SubnetworksService) AggregatedList(project string) *SubnetworksAggregatedListCall { - c := &SubnetworksAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ListUsable: Retrieves an aggregated list of usable subnetworks. +func (r *SubnetworksService) ListUsable(project string) *SubnetworksListUsableCall { + c := &SubnetworksListUsableCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -107537,7 +123227,7 @@ func (r *SubnetworksService) AggregatedList(project string) *SubnetworksAggregat // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *SubnetworksAggregatedListCall) Filter(filter string) *SubnetworksAggregatedListCall { +func (c *SubnetworksListUsableCall) Filter(filter string) *SubnetworksListUsableCall { c.urlParams_.Set("filter", filter) return c } @@ -107548,7 +123238,7 @@ func (c *SubnetworksAggregatedListCall) Filter(filter string) *SubnetworksAggreg // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *SubnetworksAggregatedListCall) MaxResults(maxResults int64) *SubnetworksAggregatedListCall { +func (c *SubnetworksListUsableCall) MaxResults(maxResults int64) *SubnetworksListUsableCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -107565,7 +123255,7 @@ func (c *SubnetworksAggregatedListCall) MaxResults(maxResults int64) *Subnetwork // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *SubnetworksAggregatedListCall) OrderBy(orderBy string) *SubnetworksAggregatedListCall { +func (c *SubnetworksListUsableCall) OrderBy(orderBy string) *SubnetworksListUsableCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -107573,7 +123263,7 @@ func (c *SubnetworksAggregatedListCall) OrderBy(orderBy string) *SubnetworksAggr // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *SubnetworksAggregatedListCall) PageToken(pageToken string) *SubnetworksAggregatedListCall { +func (c *SubnetworksListUsableCall) PageToken(pageToken string) *SubnetworksListUsableCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -107581,7 +123271,7 @@ func (c *SubnetworksAggregatedListCall) PageToken(pageToken string) *Subnetworks // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SubnetworksAggregatedListCall) Fields(s ...googleapi.Field) *SubnetworksAggregatedListCall { +func (c *SubnetworksListUsableCall) Fields(s ...googleapi.Field) *SubnetworksListUsableCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -107591,7 +123281,7 @@ func (c *SubnetworksAggregatedListCall) Fields(s ...googleapi.Field) *Subnetwork // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *SubnetworksAggregatedListCall) IfNoneMatch(entityTag string) *SubnetworksAggregatedListCall { +func (c *SubnetworksListUsableCall) IfNoneMatch(entityTag string) *SubnetworksListUsableCall { c.ifNoneMatch_ = entityTag return c } @@ -107599,21 +123289,21 @@ func (c *SubnetworksAggregatedListCall) IfNoneMatch(entityTag string) *Subnetwor // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SubnetworksAggregatedListCall) Context(ctx context.Context) *SubnetworksAggregatedListCall { +func (c *SubnetworksListUsableCall) Context(ctx context.Context) *SubnetworksListUsableCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SubnetworksAggregatedListCall) Header() http.Header { +func (c *SubnetworksListUsableCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SubnetworksAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *SubnetworksListUsableCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -107625,7 +123315,7 @@ func (c *SubnetworksAggregatedListCall) doRequest(alt string) (*http.Response, e var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/subnetworks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/subnetworks/listUsable") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -107638,14 +123328,14 @@ func (c *SubnetworksAggregatedListCall) doRequest(alt string) (*http.Response, e return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.subnetworks.aggregatedList" call. -// Exactly one of *SubnetworkAggregatedList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *SubnetworkAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was +// Do executes the "compute.subnetworks.listUsable" call. +// Exactly one of *UsableSubnetworksAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *UsableSubnetworksAggregatedList.ServerResponse.Header or (if +// a response was returned at all) in error.(*googleapi.Error).Header. +// Use googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *SubnetworksAggregatedListCall) Do(opts ...googleapi.CallOption) (*SubnetworkAggregatedList, error) { +func (c *SubnetworksListUsableCall) Do(opts ...googleapi.CallOption) (*UsableSubnetworksAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -107664,7 +123354,7 @@ func (c *SubnetworksAggregatedListCall) Do(opts ...googleapi.CallOption) (*Subne if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &SubnetworkAggregatedList{ + ret := &UsableSubnetworksAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -107676,9 +123366,9 @@ func (c *SubnetworksAggregatedListCall) Do(opts ...googleapi.CallOption) (*Subne } return ret, nil // { - // "description": "Retrieves an aggregated list of subnetworks.", + // "description": "Retrieves an aggregated list of usable subnetworks.", // "httpMethod": "GET", - // "id": "compute.subnetworks.aggregatedList", + // "id": "compute.subnetworks.listUsable", // "parameterOrder": [ // "project" // ], @@ -107714,9 +123404,9 @@ func (c *SubnetworksAggregatedListCall) Do(opts ...googleapi.CallOption) (*Subne // "type": "string" // } // }, - // "path": "{project}/aggregated/subnetworks", + // "path": "{project}/aggregated/subnetworks/listUsable", // "response": { - // "$ref": "SubnetworkAggregatedList" + // "$ref": "UsableSubnetworksAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -107730,7 +123420,7 @@ func (c *SubnetworksAggregatedListCall) Do(opts ...googleapi.CallOption) (*Subne // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *SubnetworksAggregatedListCall) Pages(ctx context.Context, f func(*SubnetworkAggregatedList) error) error { +func (c *SubnetworksListUsableCall) Pages(ctx context.Context, f func(*UsableSubnetworksAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -107748,24 +123438,44 @@ func (c *SubnetworksAggregatedListCall) Pages(ctx context.Context, f func(*Subne } } -// method id "compute.subnetworks.delete": +// method id "compute.subnetworks.patch": -type SubnetworksDeleteCall struct { - s *Service - project string - region string - subnetwork string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type SubnetworksPatchCall struct { + s *Service + project string + region string + subnetwork string + subnetwork2 *Subnetwork + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified subnetwork. -func (r *SubnetworksService) Delete(project string, region string, subnetwork string) *SubnetworksDeleteCall { - c := &SubnetworksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Patches the specified subnetwork with the data included in the +// request. Only certain fields can up updated with a patch request as +// indicated in the field descriptions. You must specify the current +// fingeprint of the subnetwork resource being patched. +func (r *SubnetworksService) Patch(project string, region string, subnetwork string, subnetwork2 *Subnetwork) *SubnetworksPatchCall { + c := &SubnetworksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region c.subnetwork = subnetwork + c.subnetwork2 = subnetwork2 + return c +} + +// DrainTimeoutSeconds sets the optional parameter +// "drainTimeoutSeconds": The drain timeout specifies the upper bound in +// seconds on the amount of time allowed to drain connections from the +// current ACTIVE subnetwork to the current BACKUP subnetwork. The drain +// timeout is only applicable when the following conditions are true: - +// the subnetwork being patched has purpose = +// INTERNAL_HTTPS_LOAD_BALANCER - the subnetwork being patched has role +// = BACKUP - the patch request is setting the role to ACTIVE. Note that +// after this patch operation the roles of the ACTIVE and BACKUP +// subnetworks will be swapped. +func (c *SubnetworksPatchCall) DrainTimeoutSeconds(drainTimeoutSeconds int64) *SubnetworksPatchCall { + c.urlParams_.Set("drainTimeoutSeconds", fmt.Sprint(drainTimeoutSeconds)) return c } @@ -107783,7 +123493,7 @@ func (r *SubnetworksService) Delete(project string, region string, subnetwork st // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *SubnetworksDeleteCall) RequestId(requestId string) *SubnetworksDeleteCall { +func (c *SubnetworksPatchCall) RequestId(requestId string) *SubnetworksPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -107791,7 +123501,7 @@ func (c *SubnetworksDeleteCall) RequestId(requestId string) *SubnetworksDeleteCa // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SubnetworksDeleteCall) Fields(s ...googleapi.Field) *SubnetworksDeleteCall { +func (c *SubnetworksPatchCall) Fields(s ...googleapi.Field) *SubnetworksPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -107799,32 +123509,37 @@ func (c *SubnetworksDeleteCall) Fields(s ...googleapi.Field) *SubnetworksDeleteC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SubnetworksDeleteCall) Context(ctx context.Context) *SubnetworksDeleteCall { +func (c *SubnetworksPatchCall) Context(ctx context.Context) *SubnetworksPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SubnetworksDeleteCall) Header() http.Header { +func (c *SubnetworksPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SubnetworksDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *SubnetworksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.subnetwork2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks/{subnetwork}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } @@ -107837,14 +123552,14 @@ func (c *SubnetworksDeleteCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.subnetworks.delete" call. +// Do executes the "compute.subnetworks.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *SubnetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *SubnetworksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -107875,15 +123590,21 @@ func (c *SubnetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Deletes the specified subnetwork.", - // "httpMethod": "DELETE", - // "id": "compute.subnetworks.delete", + // "description": "Patches the specified subnetwork with the data included in the request. Only certain fields can up updated with a patch request as indicated in the field descriptions. You must specify the current fingeprint of the subnetwork resource being patched.", + // "httpMethod": "PATCH", + // "id": "compute.subnetworks.patch", // "parameterOrder": [ // "project", // "region", // "subnetwork" // ], // "parameters": { + // "drainTimeoutSeconds": { + // "description": "The drain timeout specifies the upper bound in seconds on the amount of time allowed to drain connections from the current ACTIVE subnetwork to the current BACKUP subnetwork. The drain timeout is only applicable when the following conditions are true: - the subnetwork being patched has purpose = INTERNAL_HTTPS_LOAD_BALANCER - the subnetwork being patched has role = BACKUP - the patch request is setting the role to ACTIVE. Note that after this patch operation the roles of the ACTIVE and BACKUP subnetworks will be swapped.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -107904,14 +123625,17 @@ func (c *SubnetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er // "type": "string" // }, // "subnetwork": { - // "description": "Name of the Subnetwork resource to delete.", + // "description": "Name of the Subnetwork resource to patch.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, // "path": "{project}/regions/{region}/subnetworks/{subnetwork}", + // "request": { + // "$ref": "Subnetwork" + // }, // "response": { // "$ref": "Operation" // }, @@ -107923,27 +123647,190 @@ func (c *SubnetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er } -// method id "compute.subnetworks.expandIpCidrRange": +// method id "compute.subnetworks.setIamPolicy": -type SubnetworksExpandIpCidrRangeCall struct { - s *Service - project string - region string - subnetwork string - subnetworksexpandipcidrrangerequest *SubnetworksExpandIpCidrRangeRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type SubnetworksSetIamPolicyCall struct { + s *Service + project string + region string + resource string + regionsetpolicyrequest *RegionSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// ExpandIpCidrRange: Expands the IP CIDR range of the subnetwork to a -// specified value. -func (r *SubnetworksService) ExpandIpCidrRange(project string, region string, subnetwork string, subnetworksexpandipcidrrangerequest *SubnetworksExpandIpCidrRangeRequest) *SubnetworksExpandIpCidrRangeCall { - c := &SubnetworksExpandIpCidrRangeCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +func (r *SubnetworksService) SetIamPolicy(project string, region string, resource string, regionsetpolicyrequest *RegionSetPolicyRequest) *SubnetworksSetIamPolicyCall { + c := &SubnetworksSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + c.regionsetpolicyrequest = regionsetpolicyrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SubnetworksSetIamPolicyCall) Fields(s ...googleapi.Field) *SubnetworksSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SubnetworksSetIamPolicyCall) Context(ctx context.Context) *SubnetworksSetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SubnetworksSetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SubnetworksSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetpolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks/{resource}/setIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.subnetworks.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *SubnetworksSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "httpMethod": "POST", + // "id": "compute.subnetworks.setIamPolicy", + // "parameterOrder": [ + // "project", + // "region", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/subnetworks/{resource}/setIamPolicy", + // "request": { + // "$ref": "RegionSetPolicyRequest" + // }, + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.subnetworks.setPrivateIpGoogleAccess": + +type SubnetworksSetPrivateIpGoogleAccessCall struct { + s *Service + project string + region string + subnetwork string + subnetworkssetprivateipgoogleaccessrequest *SubnetworksSetPrivateIpGoogleAccessRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetPrivateIpGoogleAccess: Set whether VMs in this subnet can access +// Google services without assigning external IP addresses through +// Private Google Access. +func (r *SubnetworksService) SetPrivateIpGoogleAccess(project string, region string, subnetwork string, subnetworkssetprivateipgoogleaccessrequest *SubnetworksSetPrivateIpGoogleAccessRequest) *SubnetworksSetPrivateIpGoogleAccessCall { + c := &SubnetworksSetPrivateIpGoogleAccessCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region c.subnetwork = subnetwork - c.subnetworksexpandipcidrrangerequest = subnetworksexpandipcidrrangerequest + c.subnetworkssetprivateipgoogleaccessrequest = subnetworkssetprivateipgoogleaccessrequest return c } @@ -107961,7 +123848,7 @@ func (r *SubnetworksService) ExpandIpCidrRange(project string, region string, su // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *SubnetworksExpandIpCidrRangeCall) RequestId(requestId string) *SubnetworksExpandIpCidrRangeCall { +func (c *SubnetworksSetPrivateIpGoogleAccessCall) RequestId(requestId string) *SubnetworksSetPrivateIpGoogleAccessCall { c.urlParams_.Set("requestId", requestId) return c } @@ -107969,7 +123856,7 @@ func (c *SubnetworksExpandIpCidrRangeCall) RequestId(requestId string) *Subnetwo // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SubnetworksExpandIpCidrRangeCall) Fields(s ...googleapi.Field) *SubnetworksExpandIpCidrRangeCall { +func (c *SubnetworksSetPrivateIpGoogleAccessCall) Fields(s ...googleapi.Field) *SubnetworksSetPrivateIpGoogleAccessCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -107977,35 +123864,35 @@ func (c *SubnetworksExpandIpCidrRangeCall) Fields(s ...googleapi.Field) *Subnetw // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SubnetworksExpandIpCidrRangeCall) Context(ctx context.Context) *SubnetworksExpandIpCidrRangeCall { +func (c *SubnetworksSetPrivateIpGoogleAccessCall) Context(ctx context.Context) *SubnetworksSetPrivateIpGoogleAccessCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SubnetworksExpandIpCidrRangeCall) Header() http.Header { +func (c *SubnetworksSetPrivateIpGoogleAccessCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SubnetworksExpandIpCidrRangeCall) doRequest(alt string) (*http.Response, error) { +func (c *SubnetworksSetPrivateIpGoogleAccessCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.subnetworksexpandipcidrrangerequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.subnetworkssetprivateipgoogleaccessrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks/{subnetwork}/expandIpCidrRange") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks/{subnetwork}/setPrivateIpGoogleAccess") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -108020,14 +123907,14 @@ func (c *SubnetworksExpandIpCidrRangeCall) doRequest(alt string) (*http.Response return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.subnetworks.expandIpCidrRange" call. +// Do executes the "compute.subnetworks.setPrivateIpGoogleAccess" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *SubnetworksExpandIpCidrRangeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *SubnetworksSetPrivateIpGoogleAccessCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -108058,9 +123945,9 @@ func (c *SubnetworksExpandIpCidrRangeCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Expands the IP CIDR range of the subnetwork to a specified value.", + // "description": "Set whether VMs in this subnet can access Google services without assigning external IP addresses through Private Google Access.", // "httpMethod": "POST", - // "id": "compute.subnetworks.expandIpCidrRange", + // "id": "compute.subnetworks.setPrivateIpGoogleAccess", // "parameterOrder": [ // "project", // "region", @@ -108087,16 +123974,16 @@ func (c *SubnetworksExpandIpCidrRangeCall) Do(opts ...googleapi.CallOption) (*Op // "type": "string" // }, // "subnetwork": { - // "description": "Name of the Subnetwork resource to update.", + // "description": "Name of the Subnetwork resource.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/subnetworks/{subnetwork}/expandIpCidrRange", + // "path": "{project}/regions/{region}/subnetworks/{subnetwork}/setPrivateIpGoogleAccess", // "request": { - // "$ref": "SubnetworksExpandIpCidrRangeRequest" + // "$ref": "SubnetworksSetPrivateIpGoogleAccessRequest" // }, // "response": { // "$ref": "Operation" @@ -108109,99 +123996,92 @@ func (c *SubnetworksExpandIpCidrRangeCall) Do(opts ...googleapi.CallOption) (*Op } -// method id "compute.subnetworks.get": +// method id "compute.subnetworks.testIamPermissions": -type SubnetworksGetCall struct { - s *Service - project string - region string - subnetwork string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type SubnetworksTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified subnetwork. Gets a list of available -// subnetworks list() request. -func (r *SubnetworksService) Get(project string, region string, subnetwork string) *SubnetworksGetCall { - c := &SubnetworksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *SubnetworksService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *SubnetworksTestIamPermissionsCall { + c := &SubnetworksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.subnetwork = subnetwork + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SubnetworksGetCall) Fields(s ...googleapi.Field) *SubnetworksGetCall { +func (c *SubnetworksTestIamPermissionsCall) Fields(s ...googleapi.Field) *SubnetworksTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *SubnetworksGetCall) IfNoneMatch(entityTag string) *SubnetworksGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SubnetworksGetCall) Context(ctx context.Context) *SubnetworksGetCall { +func (c *SubnetworksTestIamPermissionsCall) Context(ctx context.Context) *SubnetworksTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SubnetworksGetCall) Header() http.Header { +func (c *SubnetworksTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SubnetworksGetCall) doRequest(alt string) (*http.Response, error) { +func (c *SubnetworksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks/{subnetwork}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "subnetwork": c.subnetwork, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.subnetworks.get" call. -// Exactly one of *Subnetwork or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Subnetwork.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *SubnetworksGetCall) Do(opts ...googleapi.CallOption) (*Subnetwork, error) { +// Do executes the "compute.subnetworks.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *SubnetworksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -108220,7 +124100,7 @@ func (c *SubnetworksGetCall) Do(opts ...googleapi.CallOption) (*Subnetwork, erro if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Subnetwork{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -108232,13 +124112,13 @@ func (c *SubnetworksGetCall) Do(opts ...googleapi.CallOption) (*Subnetwork, erro } return ret, nil // { - // "description": "Returns the specified subnetwork. Gets a list of available subnetworks list() request.", - // "httpMethod": "GET", - // "id": "compute.subnetworks.get", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.subnetworks.testIamPermissions", // "parameterOrder": [ // "project", // "region", - // "subnetwork" + // "resource" // ], // "parameters": { // "project": { @@ -108249,23 +124129,26 @@ func (c *SubnetworksGetCall) Do(opts ...googleapi.CallOption) (*Subnetwork, erro // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "The name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "subnetwork": { - // "description": "Name of the Subnetwork resource to return.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/subnetworks/{subnetwork}", + // "path": "{project}/regions/{region}/subnetworks/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "Subnetwork" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -108276,33 +124159,92 @@ func (c *SubnetworksGetCall) Do(opts ...googleapi.CallOption) (*Subnetwork, erro } -// method id "compute.subnetworks.getIamPolicy": +// method id "compute.targetHttpProxies.aggregatedList": + +type TargetHttpProxiesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves the list of all TargetHttpProxy resources, +// regional and global, available to the specified project. +func (r *TargetHttpProxiesService) AggregatedList(project string) *TargetHttpProxiesAggregatedListCall { + c := &TargetHttpProxiesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *TargetHttpProxiesAggregatedListCall) Filter(filter string) *TargetHttpProxiesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *TargetHttpProxiesAggregatedListCall) MaxResults(maxResults int64) *TargetHttpProxiesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} -type SubnetworksGetIamPolicyCall struct { - s *Service - project string - region string - resource string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *TargetHttpProxiesAggregatedListCall) OrderBy(orderBy string) *TargetHttpProxiesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c } -// GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. -func (r *SubnetworksService) GetIamPolicy(project string, region string, resource string) *SubnetworksGetIamPolicyCall { - c := &SubnetworksGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.resource = resource +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *TargetHttpProxiesAggregatedListCall) PageToken(pageToken string) *TargetHttpProxiesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SubnetworksGetIamPolicyCall) Fields(s ...googleapi.Field) *SubnetworksGetIamPolicyCall { +func (c *TargetHttpProxiesAggregatedListCall) Fields(s ...googleapi.Field) *TargetHttpProxiesAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -108312,7 +124254,7 @@ func (c *SubnetworksGetIamPolicyCall) Fields(s ...googleapi.Field) *SubnetworksG // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *SubnetworksGetIamPolicyCall) IfNoneMatch(entityTag string) *SubnetworksGetIamPolicyCall { +func (c *TargetHttpProxiesAggregatedListCall) IfNoneMatch(entityTag string) *TargetHttpProxiesAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -108320,21 +124262,21 @@ func (c *SubnetworksGetIamPolicyCall) IfNoneMatch(entityTag string) *Subnetworks // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SubnetworksGetIamPolicyCall) Context(ctx context.Context) *SubnetworksGetIamPolicyCall { +func (c *TargetHttpProxiesAggregatedListCall) Context(ctx context.Context) *TargetHttpProxiesAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SubnetworksGetIamPolicyCall) Header() http.Header { +func (c *TargetHttpProxiesAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SubnetworksGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetHttpProxiesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -108346,7 +124288,7 @@ func (c *SubnetworksGetIamPolicyCall) doRequest(alt string) (*http.Response, err var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks/{resource}/getIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/targetHttpProxies") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -108354,21 +124296,19 @@ func (c *SubnetworksGetIamPolicyCall) doRequest(alt string) (*http.Response, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.subnetworks.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *SubnetworksGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.targetHttpProxies.aggregatedList" call. +// Exactly one of *TargetHttpProxyAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *TargetHttpProxyAggregatedList.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *TargetHttpProxiesAggregatedListCall) Do(opts ...googleapi.CallOption) (*TargetHttpProxyAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -108387,7 +124327,7 @@ func (c *SubnetworksGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &TargetHttpProxyAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -108399,40 +124339,47 @@ func (c *SubnetworksGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "description": "Retrieves the list of all TargetHttpProxy resources, regional and global, available to the specified project.", // "httpMethod": "GET", - // "id": "compute.subnetworks.getIamPolicy", + // "id": "compute.targetHttpProxies.aggregatedList", // "parameterOrder": [ - // "project", - // "region", - // "resource" + // "project" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", // "type": "string" // }, - // "region": { - // "description": "The name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Name of the project scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/subnetworks/{resource}/getIamPolicy", + // "path": "{project}/aggregated/targetHttpProxies", // "response": { - // "$ref": "Policy" + // "$ref": "TargetHttpProxyAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -108443,25 +124390,44 @@ func (c *SubnetworksGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, } -// method id "compute.subnetworks.insert": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *TargetHttpProxiesAggregatedListCall) Pages(ctx context.Context, f func(*TargetHttpProxyAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type SubnetworksInsertCall struct { - s *Service - project string - region string - subnetwork *Subnetwork - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.targetHttpProxies.delete": + +type TargetHttpProxiesDeleteCall struct { + s *Service + project string + targetHttpProxy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a subnetwork in the specified project using the data -// included in the request. -func (r *SubnetworksService) Insert(project string, region string, subnetwork *Subnetwork) *SubnetworksInsertCall { - c := &SubnetworksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified TargetHttpProxy resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies/delete +func (r *TargetHttpProxiesService) Delete(project string, targetHttpProxy string) *TargetHttpProxiesDeleteCall { + c := &TargetHttpProxiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.subnetwork = subnetwork + c.targetHttpProxy = targetHttpProxy return c } @@ -108479,7 +124445,7 @@ func (r *SubnetworksService) Insert(project string, region string, subnetwork *S // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *SubnetworksInsertCall) RequestId(requestId string) *SubnetworksInsertCall { +func (c *TargetHttpProxiesDeleteCall) RequestId(requestId string) *TargetHttpProxiesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -108487,7 +124453,7 @@ func (c *SubnetworksInsertCall) RequestId(requestId string) *SubnetworksInsertCa // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SubnetworksInsertCall) Fields(s ...googleapi.Field) *SubnetworksInsertCall { +func (c *TargetHttpProxiesDeleteCall) Fields(s ...googleapi.Field) *TargetHttpProxiesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -108495,56 +124461,51 @@ func (c *SubnetworksInsertCall) Fields(s ...googleapi.Field) *SubnetworksInsertC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SubnetworksInsertCall) Context(ctx context.Context) *SubnetworksInsertCall { +func (c *TargetHttpProxiesDeleteCall) Context(ctx context.Context) *TargetHttpProxiesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SubnetworksInsertCall) Header() http.Header { +func (c *TargetHttpProxiesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SubnetworksInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetHttpProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.subnetwork) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies/{targetHttpProxy}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "targetHttpProxy": c.targetHttpProxy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.subnetworks.insert" call. +// Do executes the "compute.targetHttpProxies.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *SubnetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *TargetHttpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -108575,12 +124536,12 @@ func (c *SubnetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Creates a subnetwork in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.subnetworks.insert", + // "description": "Deletes the specified TargetHttpProxy resource.", + // "httpMethod": "DELETE", + // "id": "compute.targetHttpProxies.delete", // "parameterOrder": [ // "project", - // "region" + // "targetHttpProxy" // ], // "parameters": { // "project": { @@ -108590,23 +124551,20 @@ func (c *SubnetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "targetHttpProxy": { + // "description": "Name of the TargetHttpProxy resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/subnetworks", - // "request": { - // "$ref": "Subnetwork" - // }, + // "path": "{project}/global/targetHttpProxies/{targetHttpProxy}", // "response": { // "$ref": "Operation" // }, @@ -108618,94 +124576,32 @@ func (c *SubnetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er } -// method id "compute.subnetworks.list": +// method id "compute.targetHttpProxies.get": -type SubnetworksListCall struct { - s *Service - project string - region string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type TargetHttpProxiesGetCall struct { + s *Service + project string + targetHttpProxy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// List: Retrieves a list of subnetworks available to the specified -// project. -func (r *SubnetworksService) List(project string, region string) *SubnetworksListCall { - c := &SubnetworksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified TargetHttpProxy resource. Gets a list of +// available target HTTP proxies by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies/get +func (r *TargetHttpProxiesService) Get(project string, targetHttpProxy string) *TargetHttpProxiesGetCall { + c := &TargetHttpProxiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *SubnetworksListCall) Filter(filter string) *SubnetworksListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *SubnetworksListCall) MaxResults(maxResults int64) *SubnetworksListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *SubnetworksListCall) OrderBy(orderBy string) *SubnetworksListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *SubnetworksListCall) PageToken(pageToken string) *SubnetworksListCall { - c.urlParams_.Set("pageToken", pageToken) + c.targetHttpProxy = targetHttpProxy return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SubnetworksListCall) Fields(s ...googleapi.Field) *SubnetworksListCall { +func (c *TargetHttpProxiesGetCall) Fields(s ...googleapi.Field) *TargetHttpProxiesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -108715,7 +124611,7 @@ func (c *SubnetworksListCall) Fields(s ...googleapi.Field) *SubnetworksListCall // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *SubnetworksListCall) IfNoneMatch(entityTag string) *SubnetworksListCall { +func (c *TargetHttpProxiesGetCall) IfNoneMatch(entityTag string) *TargetHttpProxiesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -108723,21 +124619,21 @@ func (c *SubnetworksListCall) IfNoneMatch(entityTag string) *SubnetworksListCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SubnetworksListCall) Context(ctx context.Context) *SubnetworksListCall { +func (c *TargetHttpProxiesGetCall) Context(ctx context.Context) *TargetHttpProxiesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SubnetworksListCall) Header() http.Header { +func (c *TargetHttpProxiesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SubnetworksListCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetHttpProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -108749,7 +124645,7 @@ func (c *SubnetworksListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies/{targetHttpProxy}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -108757,20 +124653,20 @@ func (c *SubnetworksListCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "targetHttpProxy": c.targetHttpProxy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.subnetworks.list" call. -// Exactly one of *SubnetworkList or error will be non-nil. Any non-2xx +// Do executes the "compute.targetHttpProxies.get" call. +// Exactly one of *TargetHttpProxy or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *SubnetworkList.ServerResponse.Header or (if a response was returned +// *TargetHttpProxy.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *SubnetworksListCall) Do(opts ...googleapi.CallOption) (*SubnetworkList, error) { +func (c *TargetHttpProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHttpProxy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -108789,7 +124685,7 @@ func (c *SubnetworksListCall) Do(opts ...googleapi.CallOption) (*SubnetworkList, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &SubnetworkList{ + ret := &TargetHttpProxy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -108801,37 +124697,179 @@ func (c *SubnetworksListCall) Do(opts ...googleapi.CallOption) (*SubnetworkList, } return ret, nil // { - // "description": "Retrieves a list of subnetworks available to the specified project.", + // "description": "Returns the specified TargetHttpProxy resource. Gets a list of available target HTTP proxies by making a list() request.", // "httpMethod": "GET", - // "id": "compute.subnetworks.list", + // "id": "compute.targetHttpProxies.get", // "parameterOrder": [ // "project", - // "region" + // "targetHttpProxy" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "targetHttpProxy": { + // "description": "Name of the TargetHttpProxy resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" - // }, + // } + // }, + // "path": "{project}/global/targetHttpProxies/{targetHttpProxy}", + // "response": { + // "$ref": "TargetHttpProxy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.targetHttpProxies.insert": + +type TargetHttpProxiesInsertCall struct { + s *Service + project string + targethttpproxy *TargetHttpProxy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a TargetHttpProxy resource in the specified project +// using the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies/insert +func (r *TargetHttpProxiesService) Insert(project string, targethttpproxy *TargetHttpProxy) *TargetHttpProxiesInsertCall { + c := &TargetHttpProxiesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.targethttpproxy = targethttpproxy + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *TargetHttpProxiesInsertCall) RequestId(requestId string) *TargetHttpProxiesInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetHttpProxiesInsertCall) Fields(s ...googleapi.Field) *TargetHttpProxiesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetHttpProxiesInsertCall) Context(ctx context.Context) *TargetHttpProxiesInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetHttpProxiesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetHttpProxiesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targethttpproxy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.targetHttpProxies.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetHttpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a TargetHttpProxy resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.targetHttpProxies.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -108839,51 +124877,30 @@ func (c *SubnetworksListCall) Do(opts ...googleapi.CallOption) (*SubnetworkList, // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/regions/{region}/subnetworks", + // "path": "{project}/global/targetHttpProxies", + // "request": { + // "$ref": "TargetHttpProxy" + // }, // "response": { - // "$ref": "SubnetworkList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *SubnetworksListCall) Pages(ctx context.Context, f func(*SubnetworkList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.subnetworks.listUsable": +// method id "compute.targetHttpProxies.list": -type SubnetworksListUsableCall struct { +type TargetHttpProxiesListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -108892,9 +124909,11 @@ type SubnetworksListUsableCall struct { header_ http.Header } -// ListUsable: Retrieves an aggregated list of usable subnetworks. -func (r *SubnetworksService) ListUsable(project string) *SubnetworksListUsableCall { - c := &SubnetworksListUsableCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of TargetHttpProxy resources available to +// the specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies/list +func (r *TargetHttpProxiesService) List(project string) *TargetHttpProxiesListCall { + c := &TargetHttpProxiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -108921,7 +124940,7 @@ func (r *SubnetworksService) ListUsable(project string) *SubnetworksListUsableCa // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *SubnetworksListUsableCall) Filter(filter string) *SubnetworksListUsableCall { +func (c *TargetHttpProxiesListCall) Filter(filter string) *TargetHttpProxiesListCall { c.urlParams_.Set("filter", filter) return c } @@ -108932,7 +124951,7 @@ func (c *SubnetworksListUsableCall) Filter(filter string) *SubnetworksListUsable // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *SubnetworksListUsableCall) MaxResults(maxResults int64) *SubnetworksListUsableCall { +func (c *TargetHttpProxiesListCall) MaxResults(maxResults int64) *TargetHttpProxiesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -108949,7 +124968,7 @@ func (c *SubnetworksListUsableCall) MaxResults(maxResults int64) *SubnetworksLis // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *SubnetworksListUsableCall) OrderBy(orderBy string) *SubnetworksListUsableCall { +func (c *TargetHttpProxiesListCall) OrderBy(orderBy string) *TargetHttpProxiesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -108957,7 +124976,7 @@ func (c *SubnetworksListUsableCall) OrderBy(orderBy string) *SubnetworksListUsab // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *SubnetworksListUsableCall) PageToken(pageToken string) *SubnetworksListUsableCall { +func (c *TargetHttpProxiesListCall) PageToken(pageToken string) *TargetHttpProxiesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -108965,7 +124984,7 @@ func (c *SubnetworksListUsableCall) PageToken(pageToken string) *SubnetworksList // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SubnetworksListUsableCall) Fields(s ...googleapi.Field) *SubnetworksListUsableCall { +func (c *TargetHttpProxiesListCall) Fields(s ...googleapi.Field) *TargetHttpProxiesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -108975,7 +124994,7 @@ func (c *SubnetworksListUsableCall) Fields(s ...googleapi.Field) *SubnetworksLis // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *SubnetworksListUsableCall) IfNoneMatch(entityTag string) *SubnetworksListUsableCall { +func (c *TargetHttpProxiesListCall) IfNoneMatch(entityTag string) *TargetHttpProxiesListCall { c.ifNoneMatch_ = entityTag return c } @@ -108983,21 +125002,21 @@ func (c *SubnetworksListUsableCall) IfNoneMatch(entityTag string) *SubnetworksLi // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SubnetworksListUsableCall) Context(ctx context.Context) *SubnetworksListUsableCall { +func (c *TargetHttpProxiesListCall) Context(ctx context.Context) *TargetHttpProxiesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SubnetworksListUsableCall) Header() http.Header { +func (c *TargetHttpProxiesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SubnetworksListUsableCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetHttpProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -109009,7 +125028,7 @@ func (c *SubnetworksListUsableCall) doRequest(alt string) (*http.Response, error var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/subnetworks/listUsable") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -109022,14 +125041,14 @@ func (c *SubnetworksListUsableCall) doRequest(alt string) (*http.Response, error return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.subnetworks.listUsable" call. -// Exactly one of *UsableSubnetworksAggregatedList or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *UsableSubnetworksAggregatedList.ServerResponse.Header or (if -// a response was returned at all) in error.(*googleapi.Error).Header. -// Use googleapi.IsNotModified to check whether the returned error was +// Do executes the "compute.targetHttpProxies.list" call. +// Exactly one of *TargetHttpProxyList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TargetHttpProxyList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *SubnetworksListUsableCall) Do(opts ...googleapi.CallOption) (*UsableSubnetworksAggregatedList, error) { +func (c *TargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHttpProxyList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -109048,7 +125067,7 @@ func (c *SubnetworksListUsableCall) Do(opts ...googleapi.CallOption) (*UsableSub if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &UsableSubnetworksAggregatedList{ + ret := &TargetHttpProxyList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -109060,9 +125079,9 @@ func (c *SubnetworksListUsableCall) Do(opts ...googleapi.CallOption) (*UsableSub } return ret, nil // { - // "description": "Retrieves an aggregated list of usable subnetworks.", + // "description": "Retrieves the list of TargetHttpProxy resources available to the specified project.", // "httpMethod": "GET", - // "id": "compute.subnetworks.listUsable", + // "id": "compute.targetHttpProxies.list", // "parameterOrder": [ // "project" // ], @@ -109098,9 +125117,9 @@ func (c *SubnetworksListUsableCall) Do(opts ...googleapi.CallOption) (*UsableSub // "type": "string" // } // }, - // "path": "{project}/aggregated/subnetworks/listUsable", + // "path": "{project}/global/targetHttpProxies", // "response": { - // "$ref": "UsableSubnetworksAggregatedList" + // "$ref": "TargetHttpProxyList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -109114,7 +125133,7 @@ func (c *SubnetworksListUsableCall) Do(opts ...googleapi.CallOption) (*UsableSub // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *SubnetworksListUsableCall) Pages(ctx context.Context, f func(*UsableSubnetworksAggregatedList) error) error { +func (c *TargetHttpProxiesListCall) Pages(ctx context.Context, f func(*TargetHttpProxyList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -109132,29 +125151,25 @@ func (c *SubnetworksListUsableCall) Pages(ctx context.Context, f func(*UsableSub } } -// method id "compute.subnetworks.patch": +// method id "compute.targetHttpProxies.setUrlMap": -type SubnetworksPatchCall struct { - s *Service - project string - region string - subnetwork string - subnetwork2 *Subnetwork - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type TargetHttpProxiesSetUrlMapCall struct { + s *Service + project string + targetHttpProxy string + urlmapreference *UrlMapReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Patches the specified subnetwork with the data included in the -// request. Only certain fields can up updated with a patch request as -// indicated in the field descriptions. You must specify the current -// fingeprint of the subnetwork resource being patched. -func (r *SubnetworksService) Patch(project string, region string, subnetwork string, subnetwork2 *Subnetwork) *SubnetworksPatchCall { - c := &SubnetworksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetUrlMap: Changes the URL map for TargetHttpProxy. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies/setUrlMap +func (r *TargetHttpProxiesService) SetUrlMap(project string, targetHttpProxy string, urlmapreference *UrlMapReference) *TargetHttpProxiesSetUrlMapCall { + c := &TargetHttpProxiesSetUrlMapCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.subnetwork = subnetwork - c.subnetwork2 = subnetwork2 + c.targetHttpProxy = targetHttpProxy + c.urlmapreference = urlmapreference return c } @@ -109172,7 +125187,7 @@ func (r *SubnetworksService) Patch(project string, region string, subnetwork str // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *SubnetworksPatchCall) RequestId(requestId string) *SubnetworksPatchCall { +func (c *TargetHttpProxiesSetUrlMapCall) RequestId(requestId string) *TargetHttpProxiesSetUrlMapCall { c.urlParams_.Set("requestId", requestId) return c } @@ -109180,7 +125195,7 @@ func (c *SubnetworksPatchCall) RequestId(requestId string) *SubnetworksPatchCall // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SubnetworksPatchCall) Fields(s ...googleapi.Field) *SubnetworksPatchCall { +func (c *TargetHttpProxiesSetUrlMapCall) Fields(s ...googleapi.Field) *TargetHttpProxiesSetUrlMapCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -109188,57 +125203,56 @@ func (c *SubnetworksPatchCall) Fields(s ...googleapi.Field) *SubnetworksPatchCal // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SubnetworksPatchCall) Context(ctx context.Context) *SubnetworksPatchCall { +func (c *TargetHttpProxiesSetUrlMapCall) Context(ctx context.Context) *TargetHttpProxiesSetUrlMapCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SubnetworksPatchCall) Header() http.Header { +func (c *TargetHttpProxiesSetUrlMapCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SubnetworksPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetHttpProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.subnetwork2) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmapreference) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks/{subnetwork}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/targetHttpProxies/{targetHttpProxy}/setUrlMap") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "subnetwork": c.subnetwork, + "project": c.project, + "targetHttpProxy": c.targetHttpProxy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.subnetworks.patch" call. +// Do executes the "compute.targetHttpProxies.setUrlMap" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *SubnetworksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *TargetHttpProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -109269,13 +125283,12 @@ func (c *SubnetworksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, err } return ret, nil // { - // "description": "Patches the specified subnetwork with the data included in the request. Only certain fields can up updated with a patch request as indicated in the field descriptions. You must specify the current fingeprint of the subnetwork resource being patched.", - // "httpMethod": "PATCH", - // "id": "compute.subnetworks.patch", + // "description": "Changes the URL map for TargetHttpProxy.", + // "httpMethod": "POST", + // "id": "compute.targetHttpProxies.setUrlMap", // "parameterOrder": [ // "project", - // "region", - // "subnetwork" + // "targetHttpProxy" // ], // "parameters": { // "project": { @@ -109285,29 +125298,22 @@ func (c *SubnetworksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, err // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // }, - // "subnetwork": { - // "description": "Name of the Subnetwork resource to patch.", + // "targetHttpProxy": { + // "description": "Name of the TargetHttpProxy to set a URL map for.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/subnetworks/{subnetwork}", + // "path": "{project}/targetHttpProxies/{targetHttpProxy}/setUrlMap", // "request": { - // "$ref": "Subnetwork" + // "$ref": "UrlMapReference" // }, // "response": { // "$ref": "Operation" @@ -109320,34 +125326,32 @@ func (c *SubnetworksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, err } -// method id "compute.subnetworks.setIamPolicy": +// method id "compute.targetHttpProxies.testIamPermissions": -type SubnetworksSetIamPolicyCall struct { +type TargetHttpProxiesTestIamPermissionsCall struct { s *Service project string - region string resource string - regionsetpolicyrequest *RegionSetPolicyRequest + testpermissionsrequest *TestPermissionsRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. -func (r *SubnetworksService) SetIamPolicy(project string, region string, resource string, regionsetpolicyrequest *RegionSetPolicyRequest) *SubnetworksSetIamPolicyCall { - c := &SubnetworksSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *TargetHttpProxiesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *TargetHttpProxiesTestIamPermissionsCall { + c := &TargetHttpProxiesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region c.resource = resource - c.regionsetpolicyrequest = regionsetpolicyrequest + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SubnetworksSetIamPolicyCall) Fields(s ...googleapi.Field) *SubnetworksSetIamPolicyCall { +func (c *TargetHttpProxiesTestIamPermissionsCall) Fields(s ...googleapi.Field) *TargetHttpProxiesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -109355,35 +125359,35 @@ func (c *SubnetworksSetIamPolicyCall) Fields(s ...googleapi.Field) *SubnetworksS // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SubnetworksSetIamPolicyCall) Context(ctx context.Context) *SubnetworksSetIamPolicyCall { +func (c *TargetHttpProxiesTestIamPermissionsCall) Context(ctx context.Context) *TargetHttpProxiesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SubnetworksSetIamPolicyCall) Header() http.Header { +func (c *TargetHttpProxiesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SubnetworksSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetHttpProxiesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetpolicyrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks/{resource}/setIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -109392,20 +125396,19 @@ func (c *SubnetworksSetIamPolicyCall) doRequest(alt string) (*http.Response, err req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.subnetworks.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *SubnetworksSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.targetHttpProxies.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *TargetHttpProxiesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -109424,7 +125427,7 @@ func (c *SubnetworksSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -109436,12 +125439,11 @@ func (c *SubnetworksSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.subnetworks.setIamPolicy", + // "id": "compute.targetHttpProxies.testIamPermissions", // "parameterOrder": [ // "project", - // "region", // "resource" // ], // "parameters": { @@ -109452,309 +125454,180 @@ func (c *SubnetworksSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, // "required": true, // "type": "string" // }, - // "region": { - // "description": "The name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "resource": { // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/subnetworks/{resource}/setIamPolicy", + // "path": "{project}/global/targetHttpProxies/{resource}/testIamPermissions", // "request": { - // "$ref": "RegionSetPolicyRequest" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Policy" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.subnetworks.setPrivateIpGoogleAccess": +// method id "compute.targetHttpsProxies.aggregatedList": -type SubnetworksSetPrivateIpGoogleAccessCall struct { - s *Service - project string - region string - subnetwork string - subnetworkssetprivateipgoogleaccessrequest *SubnetworksSetPrivateIpGoogleAccessRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type TargetHttpsProxiesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetPrivateIpGoogleAccess: Set whether VMs in this subnet can access -// Google services without assigning external IP addresses through -// Private Google Access. -func (r *SubnetworksService) SetPrivateIpGoogleAccess(project string, region string, subnetwork string, subnetworkssetprivateipgoogleaccessrequest *SubnetworksSetPrivateIpGoogleAccessRequest) *SubnetworksSetPrivateIpGoogleAccessCall { - c := &SubnetworksSetPrivateIpGoogleAccessCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves the list of all TargetHttpsProxy resources, +// regional and global, available to the specified project. +func (r *TargetHttpsProxiesService) AggregatedList(project string) *TargetHttpsProxiesAggregatedListCall { + c := &TargetHttpsProxiesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.subnetwork = subnetwork - c.subnetworkssetprivateipgoogleaccessrequest = subnetworkssetprivateipgoogleaccessrequest return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *SubnetworksSetPrivateIpGoogleAccessCall) RequestId(requestId string) *SubnetworksSetPrivateIpGoogleAccessCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *SubnetworksSetPrivateIpGoogleAccessCall) Fields(s ...googleapi.Field) *SubnetworksSetPrivateIpGoogleAccessCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *TargetHttpsProxiesAggregatedListCall) Filter(filter string) *TargetHttpsProxiesAggregatedListCall { + c.urlParams_.Set("filter", filter) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *SubnetworksSetPrivateIpGoogleAccessCall) Context(ctx context.Context) *SubnetworksSetPrivateIpGoogleAccessCall { - c.ctx_ = ctx +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *TargetHttpsProxiesAggregatedListCall) MaxResults(maxResults int64) *TargetHttpsProxiesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *SubnetworksSetPrivateIpGoogleAccessCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *SubnetworksSetPrivateIpGoogleAccessCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.subnetworkssetprivateipgoogleaccessrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks/{subnetwork}/setPrivateIpGoogleAccess") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "subnetwork": c.subnetwork, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.subnetworks.setPrivateIpGoogleAccess" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *SubnetworksSetPrivateIpGoogleAccessCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Set whether VMs in this subnet can access Google services without assigning external IP addresses through Private Google Access.", - // "httpMethod": "POST", - // "id": "compute.subnetworks.setPrivateIpGoogleAccess", - // "parameterOrder": [ - // "project", - // "region", - // "subnetwork" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "subnetwork": { - // "description": "Name of the Subnetwork resource.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/subnetworks/{subnetwork}/setPrivateIpGoogleAccess", - // "request": { - // "$ref": "SubnetworksSetPrivateIpGoogleAccessRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.subnetworks.testIamPermissions": - -type SubnetworksTestIamPermissionsCall struct { - s *Service - project string - region string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *SubnetworksService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *SubnetworksTestIamPermissionsCall { - c := &SubnetworksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *TargetHttpsProxiesAggregatedListCall) OrderBy(orderBy string) *TargetHttpsProxiesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *TargetHttpsProxiesAggregatedListCall) PageToken(pageToken string) *TargetHttpsProxiesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SubnetworksTestIamPermissionsCall) Fields(s ...googleapi.Field) *SubnetworksTestIamPermissionsCall { +func (c *TargetHttpsProxiesAggregatedListCall) Fields(s ...googleapi.Field) *TargetHttpsProxiesAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TargetHttpsProxiesAggregatedListCall) IfNoneMatch(entityTag string) *TargetHttpsProxiesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SubnetworksTestIamPermissionsCall) Context(ctx context.Context) *SubnetworksTestIamPermissionsCall { +func (c *TargetHttpsProxiesAggregatedListCall) Context(ctx context.Context) *TargetHttpsProxiesAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SubnetworksTestIamPermissionsCall) Header() http.Header { +func (c *TargetHttpsProxiesAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SubnetworksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetHttpsProxiesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/targetHttpsProxies") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.subnetworks.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.targetHttpsProxies.aggregatedList" call. +// Exactly one of *TargetHttpsProxyAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *TargetHttpsProxyAggregatedList.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *SubnetworksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *TargetHttpsProxiesAggregatedListCall) Do(opts ...googleapi.CallOption) (*TargetHttpsProxyAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -109773,7 +125646,7 @@ func (c *SubnetworksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &TargetHttpsProxyAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -109785,43 +125658,47 @@ func (c *SubnetworksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.subnetworks.testIamPermissions", + // "description": "Retrieves the list of all TargetHttpsProxy resources, regional and global, available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.targetHttpsProxies.aggregatedList", // "parameterOrder": [ - // "project", - // "region", - // "resource" + // "project" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", // "type": "string" // }, - // "region": { - // "description": "The name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Name of the project scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/subnetworks/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/aggregated/targetHttpsProxies", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "TargetHttpsProxyAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -109832,23 +125709,43 @@ func (c *SubnetworksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T } -// method id "compute.targetHttpProxies.delete": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *TargetHttpsProxiesAggregatedListCall) Pages(ctx context.Context, f func(*TargetHttpsProxyAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type TargetHttpProxiesDeleteCall struct { - s *Service - project string - targetHttpProxy string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.targetHttpsProxies.delete": + +type TargetHttpsProxiesDeleteCall struct { + s *Service + project string + targetHttpsProxy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified TargetHttpProxy resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies/delete -func (r *TargetHttpProxiesService) Delete(project string, targetHttpProxy string) *TargetHttpProxiesDeleteCall { - c := &TargetHttpProxiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified TargetHttpsProxy resource. +func (r *TargetHttpsProxiesService) Delete(project string, targetHttpsProxy string) *TargetHttpsProxiesDeleteCall { + c := &TargetHttpsProxiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.targetHttpProxy = targetHttpProxy + c.targetHttpsProxy = targetHttpsProxy return c } @@ -109866,7 +125763,7 @@ func (r *TargetHttpProxiesService) Delete(project string, targetHttpProxy string // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *TargetHttpProxiesDeleteCall) RequestId(requestId string) *TargetHttpProxiesDeleteCall { +func (c *TargetHttpsProxiesDeleteCall) RequestId(requestId string) *TargetHttpsProxiesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -109874,7 +125771,7 @@ func (c *TargetHttpProxiesDeleteCall) RequestId(requestId string) *TargetHttpPro // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetHttpProxiesDeleteCall) Fields(s ...googleapi.Field) *TargetHttpProxiesDeleteCall { +func (c *TargetHttpsProxiesDeleteCall) Fields(s ...googleapi.Field) *TargetHttpsProxiesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -109882,21 +125779,21 @@ func (c *TargetHttpProxiesDeleteCall) Fields(s ...googleapi.Field) *TargetHttpPr // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetHttpProxiesDeleteCall) Context(ctx context.Context) *TargetHttpProxiesDeleteCall { +func (c *TargetHttpsProxiesDeleteCall) Context(ctx context.Context) *TargetHttpsProxiesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *TargetHttpProxiesDeleteCall) Header() http.Header { +func (c *TargetHttpsProxiesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *TargetHttpProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetHttpsProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -109905,7 +125802,7 @@ func (c *TargetHttpProxiesDeleteCall) doRequest(alt string) (*http.Response, err var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies/{targetHttpProxy}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies/{targetHttpsProxy}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { @@ -109913,20 +125810,20 @@ func (c *TargetHttpProxiesDeleteCall) doRequest(alt string) (*http.Response, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "targetHttpProxy": c.targetHttpProxy, + "project": c.project, + "targetHttpsProxy": c.targetHttpsProxy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetHttpProxies.delete" call. +// Do executes the "compute.targetHttpsProxies.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *TargetHttpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *TargetHttpsProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -109957,12 +125854,12 @@ func (c *TargetHttpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Deletes the specified TargetHttpProxy resource.", + // "description": "Deletes the specified TargetHttpsProxy resource.", // "httpMethod": "DELETE", - // "id": "compute.targetHttpProxies.delete", + // "id": "compute.targetHttpsProxies.delete", // "parameterOrder": [ // "project", - // "targetHttpProxy" + // "targetHttpsProxy" // ], // "parameters": { // "project": { @@ -109977,15 +125874,15 @@ func (c *TargetHttpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operati // "location": "query", // "type": "string" // }, - // "targetHttpProxy": { - // "description": "Name of the TargetHttpProxy resource to delete.", + // "targetHttpsProxy": { + // "description": "Name of the TargetHttpsProxy resource to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/targetHttpProxies/{targetHttpProxy}", + // "path": "{project}/global/targetHttpsProxies/{targetHttpsProxy}", // "response": { // "$ref": "Operation" // }, @@ -109997,32 +125894,31 @@ func (c *TargetHttpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.targetHttpProxies.get": +// method id "compute.targetHttpsProxies.get": -type TargetHttpProxiesGetCall struct { - s *Service - project string - targetHttpProxy string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type TargetHttpsProxiesGetCall struct { + s *Service + project string + targetHttpsProxy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified TargetHttpProxy resource. Gets a list of -// available target HTTP proxies by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies/get -func (r *TargetHttpProxiesService) Get(project string, targetHttpProxy string) *TargetHttpProxiesGetCall { - c := &TargetHttpProxiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified TargetHttpsProxy resource. Gets a list of +// available target HTTPS proxies by making a list() request. +func (r *TargetHttpsProxiesService) Get(project string, targetHttpsProxy string) *TargetHttpsProxiesGetCall { + c := &TargetHttpsProxiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.targetHttpProxy = targetHttpProxy + c.targetHttpsProxy = targetHttpsProxy return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetHttpProxiesGetCall) Fields(s ...googleapi.Field) *TargetHttpProxiesGetCall { +func (c *TargetHttpsProxiesGetCall) Fields(s ...googleapi.Field) *TargetHttpsProxiesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -110032,7 +125928,7 @@ func (c *TargetHttpProxiesGetCall) Fields(s ...googleapi.Field) *TargetHttpProxi // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *TargetHttpProxiesGetCall) IfNoneMatch(entityTag string) *TargetHttpProxiesGetCall { +func (c *TargetHttpsProxiesGetCall) IfNoneMatch(entityTag string) *TargetHttpsProxiesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -110040,21 +125936,21 @@ func (c *TargetHttpProxiesGetCall) IfNoneMatch(entityTag string) *TargetHttpProx // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetHttpProxiesGetCall) Context(ctx context.Context) *TargetHttpProxiesGetCall { +func (c *TargetHttpsProxiesGetCall) Context(ctx context.Context) *TargetHttpsProxiesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *TargetHttpProxiesGetCall) Header() http.Header { +func (c *TargetHttpsProxiesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *TargetHttpProxiesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetHttpsProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -110066,7 +125962,7 @@ func (c *TargetHttpProxiesGetCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies/{targetHttpProxy}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies/{targetHttpsProxy}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -110074,20 +125970,20 @@ func (c *TargetHttpProxiesGetCall) doRequest(alt string) (*http.Response, error) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "targetHttpProxy": c.targetHttpProxy, + "project": c.project, + "targetHttpsProxy": c.targetHttpsProxy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetHttpProxies.get" call. -// Exactly one of *TargetHttpProxy or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *TargetHttpProxy.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.targetHttpsProxies.get" call. +// Exactly one of *TargetHttpsProxy or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TargetHttpsProxy.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *TargetHttpProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHttpProxy, error) { +func (c *TargetHttpsProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHttpsProxy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -110106,7 +126002,7 @@ func (c *TargetHttpProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHttp if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TargetHttpProxy{ + ret := &TargetHttpsProxy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -110118,12 +126014,12 @@ func (c *TargetHttpProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHttp } return ret, nil // { - // "description": "Returns the specified TargetHttpProxy resource. Gets a list of available target HTTP proxies by making a list() request.", + // "description": "Returns the specified TargetHttpsProxy resource. Gets a list of available target HTTPS proxies by making a list() request.", // "httpMethod": "GET", - // "id": "compute.targetHttpProxies.get", + // "id": "compute.targetHttpsProxies.get", // "parameterOrder": [ // "project", - // "targetHttpProxy" + // "targetHttpsProxy" // ], // "parameters": { // "project": { @@ -110133,17 +126029,17 @@ func (c *TargetHttpProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHttp // "required": true, // "type": "string" // }, - // "targetHttpProxy": { - // "description": "Name of the TargetHttpProxy resource to return.", + // "targetHttpsProxy": { + // "description": "Name of the TargetHttpsProxy resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/targetHttpProxies/{targetHttpProxy}", + // "path": "{project}/global/targetHttpsProxies/{targetHttpsProxy}", // "response": { - // "$ref": "TargetHttpProxy" + // "$ref": "TargetHttpsProxy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -110154,24 +126050,23 @@ func (c *TargetHttpProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHttp } -// method id "compute.targetHttpProxies.insert": +// method id "compute.targetHttpsProxies.insert": -type TargetHttpProxiesInsertCall struct { - s *Service - project string - targethttpproxy *TargetHttpProxy - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type TargetHttpsProxiesInsertCall struct { + s *Service + project string + targethttpsproxy *TargetHttpsProxy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a TargetHttpProxy resource in the specified project +// Insert: Creates a TargetHttpsProxy resource in the specified project // using the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies/insert -func (r *TargetHttpProxiesService) Insert(project string, targethttpproxy *TargetHttpProxy) *TargetHttpProxiesInsertCall { - c := &TargetHttpProxiesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *TargetHttpsProxiesService) Insert(project string, targethttpsproxy *TargetHttpsProxy) *TargetHttpsProxiesInsertCall { + c := &TargetHttpsProxiesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.targethttpproxy = targethttpproxy + c.targethttpsproxy = targethttpsproxy return c } @@ -110189,7 +126084,7 @@ func (r *TargetHttpProxiesService) Insert(project string, targethttpproxy *Targe // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *TargetHttpProxiesInsertCall) RequestId(requestId string) *TargetHttpProxiesInsertCall { +func (c *TargetHttpsProxiesInsertCall) RequestId(requestId string) *TargetHttpsProxiesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -110197,7 +126092,7 @@ func (c *TargetHttpProxiesInsertCall) RequestId(requestId string) *TargetHttpPro // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetHttpProxiesInsertCall) Fields(s ...googleapi.Field) *TargetHttpProxiesInsertCall { +func (c *TargetHttpsProxiesInsertCall) Fields(s ...googleapi.Field) *TargetHttpsProxiesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -110205,35 +126100,35 @@ func (c *TargetHttpProxiesInsertCall) Fields(s ...googleapi.Field) *TargetHttpPr // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetHttpProxiesInsertCall) Context(ctx context.Context) *TargetHttpProxiesInsertCall { +func (c *TargetHttpsProxiesInsertCall) Context(ctx context.Context) *TargetHttpsProxiesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *TargetHttpProxiesInsertCall) Header() http.Header { +func (c *TargetHttpsProxiesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *TargetHttpProxiesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetHttpsProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targethttpproxy) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targethttpsproxy) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -110246,14 +126141,14 @@ func (c *TargetHttpProxiesInsertCall) doRequest(alt string) (*http.Response, err return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetHttpProxies.insert" call. +// Do executes the "compute.targetHttpsProxies.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *TargetHttpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *TargetHttpsProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -110284,9 +126179,9 @@ func (c *TargetHttpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Creates a TargetHttpProxy resource in the specified project using the data included in the request.", + // "description": "Creates a TargetHttpsProxy resource in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.targetHttpProxies.insert", + // "id": "compute.targetHttpsProxies.insert", // "parameterOrder": [ // "project" // ], @@ -110304,9 +126199,9 @@ func (c *TargetHttpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operati // "type": "string" // } // }, - // "path": "{project}/global/targetHttpProxies", + // "path": "{project}/global/targetHttpsProxies", // "request": { - // "$ref": "TargetHttpProxy" + // "$ref": "TargetHttpsProxy" // }, // "response": { // "$ref": "Operation" @@ -110319,9 +126214,9 @@ func (c *TargetHttpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.targetHttpProxies.list": +// method id "compute.targetHttpsProxies.list": -type TargetHttpProxiesListCall struct { +type TargetHttpsProxiesListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -110330,11 +126225,10 @@ type TargetHttpProxiesListCall struct { header_ http.Header } -// List: Retrieves the list of TargetHttpProxy resources available to +// List: Retrieves the list of TargetHttpsProxy resources available to // the specified project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies/list -func (r *TargetHttpProxiesService) List(project string) *TargetHttpProxiesListCall { - c := &TargetHttpProxiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *TargetHttpsProxiesService) List(project string) *TargetHttpsProxiesListCall { + c := &TargetHttpsProxiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -110361,7 +126255,7 @@ func (r *TargetHttpProxiesService) List(project string) *TargetHttpProxiesListCa // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *TargetHttpProxiesListCall) Filter(filter string) *TargetHttpProxiesListCall { +func (c *TargetHttpsProxiesListCall) Filter(filter string) *TargetHttpsProxiesListCall { c.urlParams_.Set("filter", filter) return c } @@ -110372,7 +126266,7 @@ func (c *TargetHttpProxiesListCall) Filter(filter string) *TargetHttpProxiesList // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *TargetHttpProxiesListCall) MaxResults(maxResults int64) *TargetHttpProxiesListCall { +func (c *TargetHttpsProxiesListCall) MaxResults(maxResults int64) *TargetHttpsProxiesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -110389,87 +126283,290 @@ func (c *TargetHttpProxiesListCall) MaxResults(maxResults int64) *TargetHttpProx // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *TargetHttpProxiesListCall) OrderBy(orderBy string) *TargetHttpProxiesListCall { +func (c *TargetHttpsProxiesListCall) OrderBy(orderBy string) *TargetHttpsProxiesListCall { c.urlParams_.Set("orderBy", orderBy) return c } -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *TargetHttpProxiesListCall) PageToken(pageToken string) *TargetHttpProxiesListCall { - c.urlParams_.Set("pageToken", pageToken) +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *TargetHttpsProxiesListCall) PageToken(pageToken string) *TargetHttpsProxiesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetHttpsProxiesListCall) Fields(s ...googleapi.Field) *TargetHttpsProxiesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TargetHttpsProxiesListCall) IfNoneMatch(entityTag string) *TargetHttpsProxiesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetHttpsProxiesListCall) Context(ctx context.Context) *TargetHttpsProxiesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetHttpsProxiesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetHttpsProxiesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.targetHttpsProxies.list" call. +// Exactly one of *TargetHttpsProxyList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TargetHttpsProxyList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *TargetHttpsProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHttpsProxyList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TargetHttpsProxyList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of TargetHttpsProxy resources available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.targetHttpsProxies.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/targetHttpsProxies", + // "response": { + // "$ref": "TargetHttpsProxyList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *TargetHttpsProxiesListCall) Pages(ctx context.Context, f func(*TargetHttpsProxyList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.targetHttpsProxies.setQuicOverride": + +type TargetHttpsProxiesSetQuicOverrideCall struct { + s *Service + project string + targetHttpsProxy string + targethttpsproxiessetquicoverriderequest *TargetHttpsProxiesSetQuicOverrideRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetQuicOverride: Sets the QUIC override policy for TargetHttpsProxy. +func (r *TargetHttpsProxiesService) SetQuicOverride(project string, targetHttpsProxy string, targethttpsproxiessetquicoverriderequest *TargetHttpsProxiesSetQuicOverrideRequest) *TargetHttpsProxiesSetQuicOverrideCall { + c := &TargetHttpsProxiesSetQuicOverrideCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.targetHttpsProxy = targetHttpsProxy + c.targethttpsproxiessetquicoverriderequest = targethttpsproxiessetquicoverriderequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *TargetHttpsProxiesSetQuicOverrideCall) RequestId(requestId string) *TargetHttpsProxiesSetQuicOverrideCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetHttpProxiesListCall) Fields(s ...googleapi.Field) *TargetHttpProxiesListCall { +func (c *TargetHttpsProxiesSetQuicOverrideCall) Fields(s ...googleapi.Field) *TargetHttpsProxiesSetQuicOverrideCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *TargetHttpProxiesListCall) IfNoneMatch(entityTag string) *TargetHttpProxiesListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetHttpProxiesListCall) Context(ctx context.Context) *TargetHttpProxiesListCall { +func (c *TargetHttpsProxiesSetQuicOverrideCall) Context(ctx context.Context) *TargetHttpsProxiesSetQuicOverrideCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *TargetHttpProxiesListCall) Header() http.Header { +func (c *TargetHttpsProxiesSetQuicOverrideCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *TargetHttpProxiesListCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetHttpsProxiesSetQuicOverrideCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targethttpsproxiessetquicoverriderequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies/{targetHttpsProxy}/setQuicOverride") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "targetHttpsProxy": c.targetHttpsProxy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetHttpProxies.list" call. -// Exactly one of *TargetHttpProxyList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TargetHttpProxyList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *TargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHttpProxyList, error) { +// Do executes the "compute.targetHttpsProxies.setQuicOverride" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetHttpsProxiesSetQuicOverrideCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -110488,7 +126585,7 @@ func (c *TargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHtt if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TargetHttpProxyList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -110500,97 +126597,66 @@ func (c *TargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHtt } return ret, nil // { - // "description": "Retrieves the list of TargetHttpProxy resources available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.targetHttpProxies.list", + // "description": "Sets the QUIC override policy for TargetHttpsProxy.", + // "httpMethod": "POST", + // "id": "compute.targetHttpsProxies.setQuicOverride", // "parameterOrder": [ - // "project" + // "project", + // "targetHttpsProxy" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "targetHttpsProxy": { + // "description": "Name of the TargetHttpsProxy resource to set the QUIC override policy for. The name should conform to RFC1035.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/targetHttpProxies", + // "path": "{project}/global/targetHttpsProxies/{targetHttpsProxy}/setQuicOverride", + // "request": { + // "$ref": "TargetHttpsProxiesSetQuicOverrideRequest" + // }, // "response": { - // "$ref": "TargetHttpProxyList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *TargetHttpProxiesListCall) Pages(ctx context.Context, f func(*TargetHttpProxyList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.targetHttpProxies.setUrlMap": +// method id "compute.targetHttpsProxies.setSslCertificates": -type TargetHttpProxiesSetUrlMapCall struct { - s *Service - project string - targetHttpProxy string - urlmapreference *UrlMapReference - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type TargetHttpsProxiesSetSslCertificatesCall struct { + s *Service + project string + targetHttpsProxy string + targethttpsproxiessetsslcertificatesrequest *TargetHttpsProxiesSetSslCertificatesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetUrlMap: Changes the URL map for TargetHttpProxy. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies/setUrlMap -func (r *TargetHttpProxiesService) SetUrlMap(project string, targetHttpProxy string, urlmapreference *UrlMapReference) *TargetHttpProxiesSetUrlMapCall { - c := &TargetHttpProxiesSetUrlMapCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetSslCertificates: Replaces SslCertificates for TargetHttpsProxy. +func (r *TargetHttpsProxiesService) SetSslCertificates(project string, targetHttpsProxy string, targethttpsproxiessetsslcertificatesrequest *TargetHttpsProxiesSetSslCertificatesRequest) *TargetHttpsProxiesSetSslCertificatesCall { + c := &TargetHttpsProxiesSetSslCertificatesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.targetHttpProxy = targetHttpProxy - c.urlmapreference = urlmapreference + c.targetHttpsProxy = targetHttpsProxy + c.targethttpsproxiessetsslcertificatesrequest = targethttpsproxiessetsslcertificatesrequest return c } @@ -110608,7 +126674,7 @@ func (r *TargetHttpProxiesService) SetUrlMap(project string, targetHttpProxy str // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *TargetHttpProxiesSetUrlMapCall) RequestId(requestId string) *TargetHttpProxiesSetUrlMapCall { +func (c *TargetHttpsProxiesSetSslCertificatesCall) RequestId(requestId string) *TargetHttpsProxiesSetSslCertificatesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -110616,7 +126682,7 @@ func (c *TargetHttpProxiesSetUrlMapCall) RequestId(requestId string) *TargetHttp // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetHttpProxiesSetUrlMapCall) Fields(s ...googleapi.Field) *TargetHttpProxiesSetUrlMapCall { +func (c *TargetHttpsProxiesSetSslCertificatesCall) Fields(s ...googleapi.Field) *TargetHttpsProxiesSetSslCertificatesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -110624,35 +126690,35 @@ func (c *TargetHttpProxiesSetUrlMapCall) Fields(s ...googleapi.Field) *TargetHtt // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetHttpProxiesSetUrlMapCall) Context(ctx context.Context) *TargetHttpProxiesSetUrlMapCall { +func (c *TargetHttpsProxiesSetSslCertificatesCall) Context(ctx context.Context) *TargetHttpsProxiesSetSslCertificatesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *TargetHttpProxiesSetUrlMapCall) Header() http.Header { +func (c *TargetHttpsProxiesSetSslCertificatesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *TargetHttpProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetHttpsProxiesSetSslCertificatesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmapreference) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targethttpsproxiessetsslcertificatesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/targetHttpProxies/{targetHttpProxy}/setUrlMap") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -110660,20 +126726,20 @@ func (c *TargetHttpProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "targetHttpProxy": c.targetHttpProxy, + "project": c.project, + "targetHttpsProxy": c.targetHttpsProxy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetHttpProxies.setUrlMap" call. +// Do executes the "compute.targetHttpsProxies.setSslCertificates" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *TargetHttpProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *TargetHttpsProxiesSetSslCertificatesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -110704,12 +126770,12 @@ func (c *TargetHttpProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Changes the URL map for TargetHttpProxy.", + // "description": "Replaces SslCertificates for TargetHttpsProxy.", // "httpMethod": "POST", - // "id": "compute.targetHttpProxies.setUrlMap", + // "id": "compute.targetHttpsProxies.setSslCertificates", // "parameterOrder": [ // "project", - // "targetHttpProxy" + // "targetHttpsProxy" // ], // "parameters": { // "project": { @@ -110724,17 +126790,17 @@ func (c *TargetHttpProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Oper // "location": "query", // "type": "string" // }, - // "targetHttpProxy": { - // "description": "Name of the TargetHttpProxy to set a URL map for.", + // "targetHttpsProxy": { + // "description": "Name of the TargetHttpsProxy resource to set an SslCertificates resource for.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/targetHttpProxies/{targetHttpProxy}/setUrlMap", + // "path": "{project}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates", // "request": { - // "$ref": "UrlMapReference" + // "$ref": "TargetHttpsProxiesSetSslCertificatesRequest" // }, // "response": { // "$ref": "Operation" @@ -110747,32 +126813,54 @@ func (c *TargetHttpProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Oper } -// method id "compute.targetHttpProxies.testIamPermissions": +// method id "compute.targetHttpsProxies.setSslPolicy": -type TargetHttpProxiesTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type TargetHttpsProxiesSetSslPolicyCall struct { + s *Service + project string + targetHttpsProxy string + sslpolicyreference *SslPolicyReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *TargetHttpProxiesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *TargetHttpProxiesTestIamPermissionsCall { - c := &TargetHttpProxiesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetSslPolicy: Sets the SSL policy for TargetHttpsProxy. The SSL +// policy specifies the server-side support for SSL features. This +// affects connections between clients and the HTTPS proxy load +// balancer. They do not affect the connection between the load balancer +// and the backends. +func (r *TargetHttpsProxiesService) SetSslPolicy(project string, targetHttpsProxy string, sslpolicyreference *SslPolicyReference) *TargetHttpsProxiesSetSslPolicyCall { + c := &TargetHttpsProxiesSetSslPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.targetHttpsProxy = targetHttpsProxy + c.sslpolicyreference = sslpolicyreference + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *TargetHttpsProxiesSetSslPolicyCall) RequestId(requestId string) *TargetHttpsProxiesSetSslPolicyCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetHttpProxiesTestIamPermissionsCall) Fields(s ...googleapi.Field) *TargetHttpProxiesTestIamPermissionsCall { +func (c *TargetHttpsProxiesSetSslPolicyCall) Fields(s ...googleapi.Field) *TargetHttpsProxiesSetSslPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -110780,35 +126868,35 @@ func (c *TargetHttpProxiesTestIamPermissionsCall) Fields(s ...googleapi.Field) * // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetHttpProxiesTestIamPermissionsCall) Context(ctx context.Context) *TargetHttpProxiesTestIamPermissionsCall { +func (c *TargetHttpsProxiesSetSslPolicyCall) Context(ctx context.Context) *TargetHttpsProxiesSetSslPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *TargetHttpProxiesTestIamPermissionsCall) Header() http.Header { +func (c *TargetHttpsProxiesSetSslPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *TargetHttpProxiesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetHttpsProxiesSetSslPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.sslpolicyreference) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies/{targetHttpsProxy}/setSslPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -110816,20 +126904,20 @@ func (c *TargetHttpProxiesTestIamPermissionsCall) doRequest(alt string) (*http.R } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "targetHttpsProxy": c.targetHttpsProxy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetHttpProxies.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *TargetHttpProxiesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.targetHttpsProxies.setSslPolicy" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetHttpsProxiesSetSslPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -110848,7 +126936,7 @@ func (c *TargetHttpProxiesTestIamPermissionsCall) Do(opts ...googleapi.CallOptio if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -110860,12 +126948,12 @@ func (c *TargetHttpProxiesTestIamPermissionsCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", + // "description": "Sets the SSL policy for TargetHttpsProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the HTTPS proxy load balancer. They do not affect the connection between the load balancer and the backends.", // "httpMethod": "POST", - // "id": "compute.targetHttpProxies.testIamPermissions", + // "id": "compute.targetHttpsProxies.setSslPolicy", // "parameterOrder": [ // "project", - // "resource" + // "targetHttpsProxy" // ], // "parameters": { // "project": { @@ -110875,46 +126963,51 @@ func (c *TargetHttpProxiesTestIamPermissionsCall) Do(opts ...googleapi.CallOptio // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "targetHttpsProxy": { + // "description": "Name of the TargetHttpsProxy resource whose SSL policy is to be set. The name must be 1-63 characters long, and comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/targetHttpProxies/{resource}/testIamPermissions", + // "path": "{project}/global/targetHttpsProxies/{targetHttpsProxy}/setSslPolicy", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "SslPolicyReference" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.targetHttpsProxies.delete": +// method id "compute.targetHttpsProxies.setUrlMap": -type TargetHttpsProxiesDeleteCall struct { +type TargetHttpsProxiesSetUrlMapCall struct { s *Service project string targetHttpsProxy string + urlmapreference *UrlMapReference urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Delete: Deletes the specified TargetHttpsProxy resource. -func (r *TargetHttpsProxiesService) Delete(project string, targetHttpsProxy string) *TargetHttpsProxiesDeleteCall { - c := &TargetHttpsProxiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetUrlMap: Changes the URL map for TargetHttpsProxy. +func (r *TargetHttpsProxiesService) SetUrlMap(project string, targetHttpsProxy string, urlmapreference *UrlMapReference) *TargetHttpsProxiesSetUrlMapCall { + c := &TargetHttpsProxiesSetUrlMapCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.targetHttpsProxy = targetHttpsProxy + c.urlmapreference = urlmapreference return c } @@ -110932,7 +127025,7 @@ func (r *TargetHttpsProxiesService) Delete(project string, targetHttpsProxy stri // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *TargetHttpsProxiesDeleteCall) RequestId(requestId string) *TargetHttpsProxiesDeleteCall { +func (c *TargetHttpsProxiesSetUrlMapCall) RequestId(requestId string) *TargetHttpsProxiesSetUrlMapCall { c.urlParams_.Set("requestId", requestId) return c } @@ -110940,7 +127033,7 @@ func (c *TargetHttpsProxiesDeleteCall) RequestId(requestId string) *TargetHttpsP // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetHttpsProxiesDeleteCall) Fields(s ...googleapi.Field) *TargetHttpsProxiesDeleteCall { +func (c *TargetHttpsProxiesSetUrlMapCall) Fields(s ...googleapi.Field) *TargetHttpsProxiesSetUrlMapCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -110948,32 +127041,37 @@ func (c *TargetHttpsProxiesDeleteCall) Fields(s ...googleapi.Field) *TargetHttps // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetHttpsProxiesDeleteCall) Context(ctx context.Context) *TargetHttpsProxiesDeleteCall { +func (c *TargetHttpsProxiesSetUrlMapCall) Context(ctx context.Context) *TargetHttpsProxiesSetUrlMapCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *TargetHttpsProxiesDeleteCall) Header() http.Header { +func (c *TargetHttpsProxiesSetUrlMapCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *TargetHttpsProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetHttpsProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmapreference) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies/{targetHttpsProxy}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -110985,14 +127083,14 @@ func (c *TargetHttpsProxiesDeleteCall) doRequest(alt string) (*http.Response, er return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetHttpsProxies.delete" call. +// Do executes the "compute.targetHttpsProxies.setUrlMap" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *TargetHttpsProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *TargetHttpsProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -111023,9 +127121,9 @@ func (c *TargetHttpsProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operat } return ret, nil // { - // "description": "Deletes the specified TargetHttpsProxy resource.", - // "httpMethod": "DELETE", - // "id": "compute.targetHttpsProxies.delete", + // "description": "Changes the URL map for TargetHttpsProxy.", + // "httpMethod": "POST", + // "id": "compute.targetHttpsProxies.setUrlMap", // "parameterOrder": [ // "project", // "targetHttpsProxy" @@ -111043,51 +127141,267 @@ func (c *TargetHttpsProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operat // "location": "query", // "type": "string" // }, - // "targetHttpsProxy": { - // "description": "Name of the TargetHttpsProxy resource to delete.", + // "targetHttpsProxy": { + // "description": "Name of the TargetHttpsProxy resource whose URL map is to be set.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap", + // "request": { + // "$ref": "UrlMapReference" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.targetHttpsProxies.testIamPermissions": + +type TargetHttpsProxiesTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *TargetHttpsProxiesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *TargetHttpsProxiesTestIamPermissionsCall { + c := &TargetHttpsProxiesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetHttpsProxiesTestIamPermissionsCall) Fields(s ...googleapi.Field) *TargetHttpsProxiesTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetHttpsProxiesTestIamPermissionsCall) Context(ctx context.Context) *TargetHttpsProxiesTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetHttpsProxiesTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetHttpsProxiesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies/{resource}/testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.targetHttpsProxies.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *TargetHttpsProxiesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TestPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.targetHttpsProxies.testIamPermissions", + // "parameterOrder": [ + // "project", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/targetHttpsProxies/{targetHttpsProxy}", + // "path": "{project}/global/targetHttpsProxies/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.targetHttpsProxies.get": +// method id "compute.targetInstances.aggregatedList": -type TargetHttpsProxiesGetCall struct { - s *Service - project string - targetHttpsProxy string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type TargetInstancesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified TargetHttpsProxy resource. Gets a list of -// available target HTTPS proxies by making a list() request. -func (r *TargetHttpsProxiesService) Get(project string, targetHttpsProxy string) *TargetHttpsProxiesGetCall { - c := &TargetHttpsProxiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of target instances. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetInstances/aggregatedList +func (r *TargetInstancesService) AggregatedList(project string) *TargetInstancesAggregatedListCall { + c := &TargetInstancesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.targetHttpsProxy = targetHttpsProxy + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *TargetInstancesAggregatedListCall) Filter(filter string) *TargetInstancesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *TargetInstancesAggregatedListCall) MaxResults(maxResults int64) *TargetInstancesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *TargetInstancesAggregatedListCall) OrderBy(orderBy string) *TargetInstancesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *TargetInstancesAggregatedListCall) PageToken(pageToken string) *TargetInstancesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetHttpsProxiesGetCall) Fields(s ...googleapi.Field) *TargetHttpsProxiesGetCall { +func (c *TargetInstancesAggregatedListCall) Fields(s ...googleapi.Field) *TargetInstancesAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -111097,7 +127411,7 @@ func (c *TargetHttpsProxiesGetCall) Fields(s ...googleapi.Field) *TargetHttpsPro // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *TargetHttpsProxiesGetCall) IfNoneMatch(entityTag string) *TargetHttpsProxiesGetCall { +func (c *TargetInstancesAggregatedListCall) IfNoneMatch(entityTag string) *TargetInstancesAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -111105,21 +127419,21 @@ func (c *TargetHttpsProxiesGetCall) IfNoneMatch(entityTag string) *TargetHttpsPr // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetHttpsProxiesGetCall) Context(ctx context.Context) *TargetHttpsProxiesGetCall { +func (c *TargetInstancesAggregatedListCall) Context(ctx context.Context) *TargetInstancesAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *TargetHttpsProxiesGetCall) Header() http.Header { +func (c *TargetInstancesAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *TargetHttpsProxiesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetInstancesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -111131,7 +127445,7 @@ func (c *TargetHttpsProxiesGetCall) doRequest(alt string) (*http.Response, error var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies/{targetHttpsProxy}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/targetInstances") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -111139,20 +127453,19 @@ func (c *TargetHttpsProxiesGetCall) doRequest(alt string) (*http.Response, error } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "targetHttpsProxy": c.targetHttpsProxy, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetHttpsProxies.get" call. -// Exactly one of *TargetHttpsProxy or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TargetHttpsProxy.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.targetInstances.aggregatedList" call. +// Exactly one of *TargetInstanceAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *TargetInstanceAggregatedList.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *TargetHttpsProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHttpsProxy, error) { +func (c *TargetInstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*TargetInstanceAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -111171,7 +127484,7 @@ func (c *TargetHttpsProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHtt if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TargetHttpsProxy{ + ret := &TargetInstanceAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -111183,32 +127496,47 @@ func (c *TargetHttpsProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHtt } return ret, nil // { - // "description": "Returns the specified TargetHttpsProxy resource. Gets a list of available target HTTPS proxies by making a list() request.", + // "description": "Retrieves an aggregated list of target instances.", // "httpMethod": "GET", - // "id": "compute.targetHttpsProxies.get", + // "id": "compute.targetInstances.aggregatedList", // "parameterOrder": [ - // "project", - // "targetHttpsProxy" + // "project" // ], // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "targetHttpsProxy": { - // "description": "Name of the TargetHttpsProxy resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/global/targetHttpsProxies/{targetHttpsProxy}", + // "path": "{project}/aggregated/targetInstances", // "response": { - // "$ref": "TargetHttpsProxy" + // "$ref": "TargetInstanceAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -111219,23 +127547,46 @@ func (c *TargetHttpsProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHtt } -// method id "compute.targetHttpsProxies.insert": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *TargetInstancesAggregatedListCall) Pages(ctx context.Context, f func(*TargetInstanceAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type TargetHttpsProxiesInsertCall struct { - s *Service - project string - targethttpsproxy *TargetHttpsProxy - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.targetInstances.delete": + +type TargetInstancesDeleteCall struct { + s *Service + project string + zone string + targetInstance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a TargetHttpsProxy resource in the specified project -// using the data included in the request. -func (r *TargetHttpsProxiesService) Insert(project string, targethttpsproxy *TargetHttpsProxy) *TargetHttpsProxiesInsertCall { - c := &TargetHttpsProxiesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified TargetInstance resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetInstances/delete +func (r *TargetInstancesService) Delete(project string, zone string, targetInstance string) *TargetInstancesDeleteCall { + c := &TargetInstancesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.targethttpsproxy = targethttpsproxy + c.zone = zone + c.targetInstance = targetInstance return c } @@ -111253,7 +127604,7 @@ func (r *TargetHttpsProxiesService) Insert(project string, targethttpsproxy *Tar // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *TargetHttpsProxiesInsertCall) RequestId(requestId string) *TargetHttpsProxiesInsertCall { +func (c *TargetInstancesDeleteCall) RequestId(requestId string) *TargetInstancesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -111261,7 +127612,7 @@ func (c *TargetHttpsProxiesInsertCall) RequestId(requestId string) *TargetHttpsP // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetHttpsProxiesInsertCall) Fields(s ...googleapi.Field) *TargetHttpsProxiesInsertCall { +func (c *TargetInstancesDeleteCall) Fields(s ...googleapi.Field) *TargetInstancesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -111269,55 +127620,52 @@ func (c *TargetHttpsProxiesInsertCall) Fields(s ...googleapi.Field) *TargetHttps // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetHttpsProxiesInsertCall) Context(ctx context.Context) *TargetHttpsProxiesInsertCall { +func (c *TargetInstancesDeleteCall) Context(ctx context.Context) *TargetInstancesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *TargetHttpsProxiesInsertCall) Header() http.Header { +func (c *TargetInstancesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *TargetHttpsProxiesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetInstancesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targethttpsproxy) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances/{targetInstance}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "targetInstance": c.targetInstance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetHttpsProxies.insert" call. +// Do executes the "compute.targetInstances.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *TargetHttpsProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *TargetInstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -111348,11 +127696,13 @@ func (c *TargetHttpsProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operat } return ret, nil // { - // "description": "Creates a TargetHttpsProxy resource in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.targetHttpsProxies.insert", + // "description": "Deletes the specified TargetInstance resource.", + // "httpMethod": "DELETE", + // "id": "compute.targetInstances.delete", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "targetInstance" // ], // "parameters": { // "project": { @@ -111366,12 +127716,23 @@ func (c *TargetHttpsProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operat // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "targetInstance": { + // "description": "Name of the TargetInstance resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/targetHttpsProxies", - // "request": { - // "$ref": "TargetHttpsProxy" - // }, + // "path": "{project}/zones/{zone}/targetInstances/{targetInstance}", // "response": { // "$ref": "Operation" // }, @@ -111383,92 +127744,34 @@ func (c *TargetHttpsProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operat } -// method id "compute.targetHttpsProxies.list": +// method id "compute.targetInstances.get": -type TargetHttpsProxiesListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type TargetInstancesGetCall struct { + s *Service + project string + zone string + targetInstance string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// List: Retrieves the list of TargetHttpsProxy resources available to -// the specified project. -func (r *TargetHttpsProxiesService) List(project string) *TargetHttpsProxiesListCall { - c := &TargetHttpsProxiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified TargetInstance resource. Gets a list of +// available target instances by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetInstances/get +func (r *TargetInstancesService) Get(project string, zone string, targetInstance string) *TargetInstancesGetCall { + c := &TargetInstancesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *TargetHttpsProxiesListCall) Filter(filter string) *TargetHttpsProxiesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *TargetHttpsProxiesListCall) MaxResults(maxResults int64) *TargetHttpsProxiesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *TargetHttpsProxiesListCall) OrderBy(orderBy string) *TargetHttpsProxiesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *TargetHttpsProxiesListCall) PageToken(pageToken string) *TargetHttpsProxiesListCall { - c.urlParams_.Set("pageToken", pageToken) + c.zone = zone + c.targetInstance = targetInstance return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetHttpsProxiesListCall) Fields(s ...googleapi.Field) *TargetHttpsProxiesListCall { +func (c *TargetInstancesGetCall) Fields(s ...googleapi.Field) *TargetInstancesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -111478,7 +127781,7 @@ func (c *TargetHttpsProxiesListCall) Fields(s ...googleapi.Field) *TargetHttpsPr // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *TargetHttpsProxiesListCall) IfNoneMatch(entityTag string) *TargetHttpsProxiesListCall { +func (c *TargetInstancesGetCall) IfNoneMatch(entityTag string) *TargetInstancesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -111486,21 +127789,21 @@ func (c *TargetHttpsProxiesListCall) IfNoneMatch(entityTag string) *TargetHttpsP // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetHttpsProxiesListCall) Context(ctx context.Context) *TargetHttpsProxiesListCall { +func (c *TargetInstancesGetCall) Context(ctx context.Context) *TargetInstancesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *TargetHttpsProxiesListCall) Header() http.Header { +func (c *TargetInstancesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *TargetHttpsProxiesListCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetInstancesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -111512,7 +127815,7 @@ func (c *TargetHttpsProxiesListCall) doRequest(alt string) (*http.Response, erro var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances/{targetInstance}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -111520,19 +127823,21 @@ func (c *TargetHttpsProxiesListCall) doRequest(alt string) (*http.Response, erro } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "targetInstance": c.targetInstance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetHttpsProxies.list" call. -// Exactly one of *TargetHttpsProxyList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TargetHttpsProxyList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.targetInstances.get" call. +// Exactly one of *TargetInstance or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *TargetInstance.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *TargetHttpsProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHttpsProxyList, error) { +func (c *TargetInstancesGetCall) Do(opts ...googleapi.CallOption) (*TargetInstance, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -111551,7 +127856,7 @@ func (c *TargetHttpsProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHt if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TargetHttpsProxyList{ + ret := &TargetInstance{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -111563,47 +127868,40 @@ func (c *TargetHttpsProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHt } return ret, nil // { - // "description": "Retrieves the list of TargetHttpsProxy resources available to the specified project.", + // "description": "Returns the specified TargetInstance resource. Gets a list of available target instances by making a list() request.", // "httpMethod": "GET", - // "id": "compute.targetHttpsProxies.list", + // "id": "compute.targetInstances.get", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "targetInstance" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "targetInstance": { + // "description": "Name of the TargetInstance resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "zone": { + // "description": "Name of the zone scoping this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/targetHttpsProxies", + // "path": "{project}/zones/{zone}/targetInstances/{targetInstance}", // "response": { - // "$ref": "TargetHttpsProxyList" + // "$ref": "TargetInstance" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -111614,45 +127912,26 @@ func (c *TargetHttpsProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHt } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *TargetHttpsProxiesListCall) Pages(ctx context.Context, f func(*TargetHttpsProxyList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.targetHttpsProxies.setQuicOverride": +// method id "compute.targetInstances.insert": -type TargetHttpsProxiesSetQuicOverrideCall struct { - s *Service - project string - targetHttpsProxy string - targethttpsproxiessetquicoverriderequest *TargetHttpsProxiesSetQuicOverrideRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type TargetInstancesInsertCall struct { + s *Service + project string + zone string + targetinstance *TargetInstance + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetQuicOverride: Sets the QUIC override policy for TargetHttpsProxy. -func (r *TargetHttpsProxiesService) SetQuicOverride(project string, targetHttpsProxy string, targethttpsproxiessetquicoverriderequest *TargetHttpsProxiesSetQuicOverrideRequest) *TargetHttpsProxiesSetQuicOverrideCall { - c := &TargetHttpsProxiesSetQuicOverrideCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a TargetInstance resource in the specified project +// and zone using the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetInstances/insert +func (r *TargetInstancesService) Insert(project string, zone string, targetinstance *TargetInstance) *TargetInstancesInsertCall { + c := &TargetInstancesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.targetHttpsProxy = targetHttpsProxy - c.targethttpsproxiessetquicoverriderequest = targethttpsproxiessetquicoverriderequest + c.zone = zone + c.targetinstance = targetinstance return c } @@ -111670,7 +127949,7 @@ func (r *TargetHttpsProxiesService) SetQuicOverride(project string, targetHttpsP // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *TargetHttpsProxiesSetQuicOverrideCall) RequestId(requestId string) *TargetHttpsProxiesSetQuicOverrideCall { +func (c *TargetInstancesInsertCall) RequestId(requestId string) *TargetInstancesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -111678,7 +127957,7 @@ func (c *TargetHttpsProxiesSetQuicOverrideCall) RequestId(requestId string) *Tar // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetHttpsProxiesSetQuicOverrideCall) Fields(s ...googleapi.Field) *TargetHttpsProxiesSetQuicOverrideCall { +func (c *TargetInstancesInsertCall) Fields(s ...googleapi.Field) *TargetInstancesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -111686,35 +127965,35 @@ func (c *TargetHttpsProxiesSetQuicOverrideCall) Fields(s ...googleapi.Field) *Ta // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetHttpsProxiesSetQuicOverrideCall) Context(ctx context.Context) *TargetHttpsProxiesSetQuicOverrideCall { +func (c *TargetInstancesInsertCall) Context(ctx context.Context) *TargetInstancesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *TargetHttpsProxiesSetQuicOverrideCall) Header() http.Header { +func (c *TargetInstancesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *TargetHttpsProxiesSetQuicOverrideCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetInstancesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targethttpsproxiessetquicoverriderequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetinstance) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies/{targetHttpsProxy}/setQuicOverride") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -111722,20 +128001,20 @@ func (c *TargetHttpsProxiesSetQuicOverrideCall) doRequest(alt string) (*http.Res } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "targetHttpsProxy": c.targetHttpsProxy, + "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetHttpsProxies.setQuicOverride" call. +// Do executes the "compute.targetInstances.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *TargetHttpsProxiesSetQuicOverrideCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *TargetInstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -111766,12 +128045,12 @@ func (c *TargetHttpsProxiesSetQuicOverrideCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Sets the QUIC override policy for TargetHttpsProxy.", + // "description": "Creates a TargetInstance resource in the specified project and zone using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.targetHttpsProxies.setQuicOverride", + // "id": "compute.targetInstances.insert", // "parameterOrder": [ // "project", - // "targetHttpsProxy" + // "zone" // ], // "parameters": { // "project": { @@ -111786,16 +128065,17 @@ func (c *TargetHttpsProxiesSetQuicOverrideCall) Do(opts ...googleapi.CallOption) // "location": "query", // "type": "string" // }, - // "targetHttpsProxy": { - // "description": "Name of the TargetHttpsProxy resource to set the QUIC override policy for. The name should conform to RFC1035.", + // "zone": { + // "description": "Name of the zone scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/targetHttpsProxies/{targetHttpsProxy}/setQuicOverride", + // "path": "{project}/zones/{zone}/targetInstances", // "request": { - // "$ref": "TargetHttpsProxiesSetQuicOverrideRequest" + // "$ref": "TargetInstance" // }, // "response": { // "$ref": "Operation" @@ -111808,107 +128088,160 @@ func (c *TargetHttpsProxiesSetQuicOverrideCall) Do(opts ...googleapi.CallOption) } -// method id "compute.targetHttpsProxies.setSslCertificates": +// method id "compute.targetInstances.list": -type TargetHttpsProxiesSetSslCertificatesCall struct { - s *Service - project string - targetHttpsProxy string - targethttpsproxiessetsslcertificatesrequest *TargetHttpsProxiesSetSslCertificatesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type TargetInstancesListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetSslCertificates: Replaces SslCertificates for TargetHttpsProxy. -func (r *TargetHttpsProxiesService) SetSslCertificates(project string, targetHttpsProxy string, targethttpsproxiessetsslcertificatesrequest *TargetHttpsProxiesSetSslCertificatesRequest) *TargetHttpsProxiesSetSslCertificatesCall { - c := &TargetHttpsProxiesSetSslCertificatesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of TargetInstance resources available to the +// specified project and zone. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetInstances/list +func (r *TargetInstancesService) List(project string, zone string) *TargetInstancesListCall { + c := &TargetInstancesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.targetHttpsProxy = targetHttpsProxy - c.targethttpsproxiessetsslcertificatesrequest = targethttpsproxiessetsslcertificatesrequest + c.zone = zone return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *TargetHttpsProxiesSetSslCertificatesCall) RequestId(requestId string) *TargetHttpsProxiesSetSslCertificatesCall { - c.urlParams_.Set("requestId", requestId) +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *TargetInstancesListCall) Filter(filter string) *TargetInstancesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *TargetInstancesListCall) MaxResults(maxResults int64) *TargetInstancesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *TargetInstancesListCall) OrderBy(orderBy string) *TargetInstancesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *TargetInstancesListCall) PageToken(pageToken string) *TargetInstancesListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetHttpsProxiesSetSslCertificatesCall) Fields(s ...googleapi.Field) *TargetHttpsProxiesSetSslCertificatesCall { +func (c *TargetInstancesListCall) Fields(s ...googleapi.Field) *TargetInstancesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TargetInstancesListCall) IfNoneMatch(entityTag string) *TargetInstancesListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetHttpsProxiesSetSslCertificatesCall) Context(ctx context.Context) *TargetHttpsProxiesSetSslCertificatesCall { +func (c *TargetInstancesListCall) Context(ctx context.Context) *TargetInstancesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *TargetHttpsProxiesSetSslCertificatesCall) Header() http.Header { +func (c *TargetInstancesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *TargetHttpsProxiesSetSslCertificatesCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetInstancesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targethttpsproxiessetsslcertificatesrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "targetHttpsProxy": c.targetHttpsProxy, + "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetHttpsProxies.setSslCertificates" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *TargetHttpsProxiesSetSslCertificatesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.targetInstances.list" call. +// Exactly one of *TargetInstanceList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TargetInstanceList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *TargetInstancesListCall) Do(opts ...googleapi.CallOption) (*TargetInstanceList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -111927,7 +128260,7 @@ func (c *TargetHttpsProxiesSetSslCertificatesCall) Do(opts ...googleapi.CallOpti if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TargetInstanceList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -111939,14 +128272,37 @@ func (c *TargetHttpsProxiesSetSslCertificatesCall) Do(opts ...googleapi.CallOpti } return ret, nil // { - // "description": "Replaces SslCertificates for TargetHttpsProxy.", - // "httpMethod": "POST", - // "id": "compute.targetHttpsProxies.setSslCertificates", + // "description": "Retrieves a list of TargetInstance resources available to the specified project and zone.", + // "httpMethod": "GET", + // "id": "compute.targetInstances.list", // "parameterOrder": [ // "project", - // "targetHttpsProxy" + // "zone" // ], // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -111954,82 +128310,76 @@ func (c *TargetHttpsProxiesSetSslCertificatesCall) Do(opts ...googleapi.CallOpti // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "targetHttpsProxy": { - // "description": "Name of the TargetHttpsProxy resource to set an SslCertificates resource for.", + // "zone": { + // "description": "Name of the zone scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates", - // "request": { - // "$ref": "TargetHttpsProxiesSetSslCertificatesRequest" - // }, + // "path": "{project}/zones/{zone}/targetInstances", // "response": { - // "$ref": "Operation" + // "$ref": "TargetInstanceList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.targetHttpsProxies.setSslPolicy": - -type TargetHttpsProxiesSetSslPolicyCall struct { - s *Service - project string - targetHttpsProxy string - sslpolicyreference *SslPolicyReference - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *TargetInstancesListCall) Pages(ctx context.Context, f func(*TargetInstanceList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } } -// SetSslPolicy: Sets the SSL policy for TargetHttpsProxy. The SSL -// policy specifies the server-side support for SSL features. This -// affects connections between clients and the HTTPS proxy load -// balancer. They do not affect the connection between the load balancer -// and the backends. -func (r *TargetHttpsProxiesService) SetSslPolicy(project string, targetHttpsProxy string, sslpolicyreference *SslPolicyReference) *TargetHttpsProxiesSetSslPolicyCall { - c := &TargetHttpsProxiesSetSslPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.targetHttpsProxy = targetHttpsProxy - c.sslpolicyreference = sslpolicyreference - return c +// method id "compute.targetInstances.testIamPermissions": + +type TargetInstancesTestIamPermissionsCall struct { + s *Service + project string + zone string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *TargetHttpsProxiesSetSslPolicyCall) RequestId(requestId string) *TargetHttpsProxiesSetSslPolicyCall { - c.urlParams_.Set("requestId", requestId) +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *TargetInstancesService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *TargetInstancesTestIamPermissionsCall { + c := &TargetInstancesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetHttpsProxiesSetSslPolicyCall) Fields(s ...googleapi.Field) *TargetHttpsProxiesSetSslPolicyCall { +func (c *TargetInstancesTestIamPermissionsCall) Fields(s ...googleapi.Field) *TargetInstancesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -112037,35 +128387,35 @@ func (c *TargetHttpsProxiesSetSslPolicyCall) Fields(s ...googleapi.Field) *Targe // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetHttpsProxiesSetSslPolicyCall) Context(ctx context.Context) *TargetHttpsProxiesSetSslPolicyCall { +func (c *TargetInstancesTestIamPermissionsCall) Context(ctx context.Context) *TargetInstancesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *TargetHttpsProxiesSetSslPolicyCall) Header() http.Header { +func (c *TargetInstancesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *TargetHttpsProxiesSetSslPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetInstancesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.sslpolicyreference) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies/{targetHttpsProxy}/setSslPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -112073,20 +128423,21 @@ func (c *TargetHttpsProxiesSetSslPolicyCall) doRequest(alt string) (*http.Respon } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "targetHttpsProxy": c.targetHttpsProxy, + "project": c.project, + "zone": c.zone, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetHttpsProxies.setSslPolicy" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *TargetHttpsProxiesSetSslPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.targetInstances.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *TargetInstancesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -112105,7 +128456,7 @@ func (c *TargetHttpsProxiesSetSslPolicyCall) Do(opts ...googleapi.CallOption) (* if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -112117,12 +128468,13 @@ func (c *TargetHttpsProxiesSetSslPolicyCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Sets the SSL policy for TargetHttpsProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the HTTPS proxy load balancer. They do not affect the connection between the load balancer and the backends.", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.targetHttpsProxies.setSslPolicy", + // "id": "compute.targetInstances.testIamPermissions", // "parameterOrder": [ // "project", - // "targetHttpsProxy" + // "zone", + // "resource" // ], // "parameters": { // "project": { @@ -112132,51 +128484,58 @@ func (c *TargetHttpsProxiesSetSslPolicyCall) Do(opts ...googleapi.CallOption) (* // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, - // "targetHttpsProxy": { - // "description": "Name of the TargetHttpsProxy resource whose SSL policy is to be set. The name must be 1-63 characters long, and comply with RFC1035.", + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/targetHttpsProxies/{targetHttpsProxy}/setSslPolicy", + // "path": "{project}/zones/{zone}/targetInstances/{resource}/testIamPermissions", // "request": { - // "$ref": "SslPolicyReference" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.targetHttpsProxies.setUrlMap": +// method id "compute.targetPools.addHealthCheck": -type TargetHttpsProxiesSetUrlMapCall struct { - s *Service - project string - targetHttpsProxy string - urlmapreference *UrlMapReference - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type TargetPoolsAddHealthCheckCall struct { + s *Service + project string + region string + targetPool string + targetpoolsaddhealthcheckrequest *TargetPoolsAddHealthCheckRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetUrlMap: Changes the URL map for TargetHttpsProxy. -func (r *TargetHttpsProxiesService) SetUrlMap(project string, targetHttpsProxy string, urlmapreference *UrlMapReference) *TargetHttpsProxiesSetUrlMapCall { - c := &TargetHttpsProxiesSetUrlMapCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AddHealthCheck: Adds health check URLs to a target pool. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/addHealthCheck +func (r *TargetPoolsService) AddHealthCheck(project string, region string, targetPool string, targetpoolsaddhealthcheckrequest *TargetPoolsAddHealthCheckRequest) *TargetPoolsAddHealthCheckCall { + c := &TargetPoolsAddHealthCheckCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.targetHttpsProxy = targetHttpsProxy - c.urlmapreference = urlmapreference + c.region = region + c.targetPool = targetPool + c.targetpoolsaddhealthcheckrequest = targetpoolsaddhealthcheckrequest return c } @@ -112194,7 +128553,7 @@ func (r *TargetHttpsProxiesService) SetUrlMap(project string, targetHttpsProxy s // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *TargetHttpsProxiesSetUrlMapCall) RequestId(requestId string) *TargetHttpsProxiesSetUrlMapCall { +func (c *TargetPoolsAddHealthCheckCall) RequestId(requestId string) *TargetPoolsAddHealthCheckCall { c.urlParams_.Set("requestId", requestId) return c } @@ -112202,7 +128561,7 @@ func (c *TargetHttpsProxiesSetUrlMapCall) RequestId(requestId string) *TargetHtt // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetHttpsProxiesSetUrlMapCall) Fields(s ...googleapi.Field) *TargetHttpsProxiesSetUrlMapCall { +func (c *TargetPoolsAddHealthCheckCall) Fields(s ...googleapi.Field) *TargetPoolsAddHealthCheckCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -112210,35 +128569,35 @@ func (c *TargetHttpsProxiesSetUrlMapCall) Fields(s ...googleapi.Field) *TargetHt // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetHttpsProxiesSetUrlMapCall) Context(ctx context.Context) *TargetHttpsProxiesSetUrlMapCall { +func (c *TargetPoolsAddHealthCheckCall) Context(ctx context.Context) *TargetPoolsAddHealthCheckCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *TargetHttpsProxiesSetUrlMapCall) Header() http.Header { +func (c *TargetPoolsAddHealthCheckCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *TargetHttpsProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetPoolsAddHealthCheckCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmapreference) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetpoolsaddhealthcheckrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/addHealthCheck") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -112246,20 +128605,21 @@ func (c *TargetHttpsProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "targetHttpsProxy": c.targetHttpsProxy, + "project": c.project, + "region": c.region, + "targetPool": c.targetPool, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetHttpsProxies.setUrlMap" call. +// Do executes the "compute.targetPools.addHealthCheck" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *TargetHttpsProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *TargetPoolsAddHealthCheckCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -112290,12 +128650,13 @@ func (c *TargetHttpsProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Changes the URL map for TargetHttpsProxy.", + // "description": "Adds health check URLs to a target pool.", // "httpMethod": "POST", - // "id": "compute.targetHttpsProxies.setUrlMap", + // "id": "compute.targetPools.addHealthCheck", // "parameterOrder": [ // "project", - // "targetHttpsProxy" + // "region", + // "targetPool" // ], // "parameters": { // "project": { @@ -112305,22 +128666,29 @@ func (c *TargetHttpsProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Ope // "required": true, // "type": "string" // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // }, - // "targetHttpsProxy": { - // "description": "Name of the TargetHttpsProxy resource whose URL map is to be set.", + // "targetPool": { + // "description": "Name of the target pool to add a health check to.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap", + // "path": "{project}/regions/{region}/targetPools/{targetPool}/addHealthCheck", // "request": { - // "$ref": "UrlMapReference" + // "$ref": "TargetPoolsAddHealthCheckRequest" // }, // "response": { // "$ref": "Operation" @@ -112333,32 +128701,53 @@ func (c *TargetHttpsProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.targetHttpsProxies.testIamPermissions": +// method id "compute.targetPools.addInstance": -type TargetHttpsProxiesTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type TargetPoolsAddInstanceCall struct { + s *Service + project string + region string + targetPool string + targetpoolsaddinstancerequest *TargetPoolsAddInstanceRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *TargetHttpsProxiesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *TargetHttpsProxiesTestIamPermissionsCall { - c := &TargetHttpsProxiesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AddInstance: Adds an instance to a target pool. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/addInstance +func (r *TargetPoolsService) AddInstance(project string, region string, targetPool string, targetpoolsaddinstancerequest *TargetPoolsAddInstanceRequest) *TargetPoolsAddInstanceCall { + c := &TargetPoolsAddInstanceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.region = region + c.targetPool = targetPool + c.targetpoolsaddinstancerequest = targetpoolsaddinstancerequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *TargetPoolsAddInstanceCall) RequestId(requestId string) *TargetPoolsAddInstanceCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetHttpsProxiesTestIamPermissionsCall) Fields(s ...googleapi.Field) *TargetHttpsProxiesTestIamPermissionsCall { +func (c *TargetPoolsAddInstanceCall) Fields(s ...googleapi.Field) *TargetPoolsAddInstanceCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -112366,35 +128755,35 @@ func (c *TargetHttpsProxiesTestIamPermissionsCall) Fields(s ...googleapi.Field) // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetHttpsProxiesTestIamPermissionsCall) Context(ctx context.Context) *TargetHttpsProxiesTestIamPermissionsCall { +func (c *TargetPoolsAddInstanceCall) Context(ctx context.Context) *TargetPoolsAddInstanceCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *TargetHttpsProxiesTestIamPermissionsCall) Header() http.Header { +func (c *TargetPoolsAddInstanceCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *TargetHttpsProxiesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetPoolsAddInstanceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetpoolsaddinstancerequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/addInstance") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -112402,20 +128791,21 @@ func (c *TargetHttpsProxiesTestIamPermissionsCall) doRequest(alt string) (*http. } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "region": c.region, + "targetPool": c.targetPool, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetHttpsProxies.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *TargetHttpsProxiesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.targetPools.addInstance" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetPoolsAddInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -112434,7 +128824,7 @@ func (c *TargetHttpsProxiesTestIamPermissionsCall) Do(opts ...googleapi.CallOpti if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -112446,12 +128836,13 @@ func (c *TargetHttpsProxiesTestIamPermissionsCall) Do(opts ...googleapi.CallOpti } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", + // "description": "Adds an instance to a target pool.", // "httpMethod": "POST", - // "id": "compute.targetHttpsProxies.testIamPermissions", + // "id": "compute.targetPools.addInstance", // "parameterOrder": [ // "project", - // "resource" + // "region", + // "targetPool" // ], // "parameters": { // "project": { @@ -112461,33 +128852,44 @@ func (c *TargetHttpsProxiesTestIamPermissionsCall) Do(opts ...googleapi.CallOpti // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "targetPool": { + // "description": "Name of the TargetPool resource to add instances to.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/targetHttpsProxies/{resource}/testIamPermissions", + // "path": "{project}/regions/{region}/targetPools/{targetPool}/addInstance", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "TargetPoolsAddInstanceRequest" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.targetInstances.aggregatedList": +// method id "compute.targetPools.aggregatedList": -type TargetInstancesAggregatedListCall struct { +type TargetPoolsAggregatedListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -112496,10 +128898,10 @@ type TargetInstancesAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves an aggregated list of target instances. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetInstances/aggregatedList -func (r *TargetInstancesService) AggregatedList(project string) *TargetInstancesAggregatedListCall { - c := &TargetInstancesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of target pools. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/aggregatedList +func (r *TargetPoolsService) AggregatedList(project string) *TargetPoolsAggregatedListCall { + c := &TargetPoolsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -112526,7 +128928,7 @@ func (r *TargetInstancesService) AggregatedList(project string) *TargetInstances // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *TargetInstancesAggregatedListCall) Filter(filter string) *TargetInstancesAggregatedListCall { +func (c *TargetPoolsAggregatedListCall) Filter(filter string) *TargetPoolsAggregatedListCall { c.urlParams_.Set("filter", filter) return c } @@ -112537,7 +128939,7 @@ func (c *TargetInstancesAggregatedListCall) Filter(filter string) *TargetInstanc // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *TargetInstancesAggregatedListCall) MaxResults(maxResults int64) *TargetInstancesAggregatedListCall { +func (c *TargetPoolsAggregatedListCall) MaxResults(maxResults int64) *TargetPoolsAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -112554,7 +128956,7 @@ func (c *TargetInstancesAggregatedListCall) MaxResults(maxResults int64) *Target // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *TargetInstancesAggregatedListCall) OrderBy(orderBy string) *TargetInstancesAggregatedListCall { +func (c *TargetPoolsAggregatedListCall) OrderBy(orderBy string) *TargetPoolsAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -112562,7 +128964,7 @@ func (c *TargetInstancesAggregatedListCall) OrderBy(orderBy string) *TargetInsta // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *TargetInstancesAggregatedListCall) PageToken(pageToken string) *TargetInstancesAggregatedListCall { +func (c *TargetPoolsAggregatedListCall) PageToken(pageToken string) *TargetPoolsAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -112570,7 +128972,7 @@ func (c *TargetInstancesAggregatedListCall) PageToken(pageToken string) *TargetI // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetInstancesAggregatedListCall) Fields(s ...googleapi.Field) *TargetInstancesAggregatedListCall { +func (c *TargetPoolsAggregatedListCall) Fields(s ...googleapi.Field) *TargetPoolsAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -112580,7 +128982,7 @@ func (c *TargetInstancesAggregatedListCall) Fields(s ...googleapi.Field) *Target // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *TargetInstancesAggregatedListCall) IfNoneMatch(entityTag string) *TargetInstancesAggregatedListCall { +func (c *TargetPoolsAggregatedListCall) IfNoneMatch(entityTag string) *TargetPoolsAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -112588,21 +128990,21 @@ func (c *TargetInstancesAggregatedListCall) IfNoneMatch(entityTag string) *Targe // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetInstancesAggregatedListCall) Context(ctx context.Context) *TargetInstancesAggregatedListCall { +func (c *TargetPoolsAggregatedListCall) Context(ctx context.Context) *TargetPoolsAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *TargetInstancesAggregatedListCall) Header() http.Header { +func (c *TargetPoolsAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *TargetInstancesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetPoolsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -112614,7 +129016,7 @@ func (c *TargetInstancesAggregatedListCall) doRequest(alt string) (*http.Respons var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/targetInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/targetPools") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -112627,14 +129029,14 @@ func (c *TargetInstancesAggregatedListCall) doRequest(alt string) (*http.Respons return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetInstances.aggregatedList" call. -// Exactly one of *TargetInstanceAggregatedList or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *TargetInstanceAggregatedList.ServerResponse.Header or (if a -// response was returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.targetPools.aggregatedList" call. +// Exactly one of *TargetPoolAggregatedList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *TargetPoolAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *TargetInstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*TargetInstanceAggregatedList, error) { +func (c *TargetPoolsAggregatedListCall) Do(opts ...googleapi.CallOption) (*TargetPoolAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -112653,7 +129055,7 @@ func (c *TargetInstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*T if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TargetInstanceAggregatedList{ + ret := &TargetPoolAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -112665,9 +129067,9 @@ func (c *TargetInstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*T } return ret, nil // { - // "description": "Retrieves an aggregated list of target instances.", + // "description": "Retrieves an aggregated list of target pools.", // "httpMethod": "GET", - // "id": "compute.targetInstances.aggregatedList", + // "id": "compute.targetPools.aggregatedList", // "parameterOrder": [ // "project" // ], @@ -112703,9 +129105,9 @@ func (c *TargetInstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*T // "type": "string" // } // }, - // "path": "{project}/aggregated/targetInstances", + // "path": "{project}/aggregated/targetPools", // "response": { - // "$ref": "TargetInstanceAggregatedList" + // "$ref": "TargetPoolAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -112719,7 +129121,7 @@ func (c *TargetInstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*T // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *TargetInstancesAggregatedListCall) Pages(ctx context.Context, f func(*TargetInstanceAggregatedList) error) error { +func (c *TargetPoolsAggregatedListCall) Pages(ctx context.Context, f func(*TargetPoolAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -112737,25 +129139,25 @@ func (c *TargetInstancesAggregatedListCall) Pages(ctx context.Context, f func(*T } } -// method id "compute.targetInstances.delete": +// method id "compute.targetPools.delete": -type TargetInstancesDeleteCall struct { - s *Service - project string - zone string - targetInstance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type TargetPoolsDeleteCall struct { + s *Service + project string + region string + targetPool string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified TargetInstance resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetInstances/delete -func (r *TargetInstancesService) Delete(project string, zone string, targetInstance string) *TargetInstancesDeleteCall { - c := &TargetInstancesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified target pool. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/delete +func (r *TargetPoolsService) Delete(project string, region string, targetPool string) *TargetPoolsDeleteCall { + c := &TargetPoolsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.targetInstance = targetInstance + c.region = region + c.targetPool = targetPool return c } @@ -112773,7 +129175,7 @@ func (r *TargetInstancesService) Delete(project string, zone string, targetInsta // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *TargetInstancesDeleteCall) RequestId(requestId string) *TargetInstancesDeleteCall { +func (c *TargetPoolsDeleteCall) RequestId(requestId string) *TargetPoolsDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -112781,7 +129183,7 @@ func (c *TargetInstancesDeleteCall) RequestId(requestId string) *TargetInstances // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetInstancesDeleteCall) Fields(s ...googleapi.Field) *TargetInstancesDeleteCall { +func (c *TargetPoolsDeleteCall) Fields(s ...googleapi.Field) *TargetPoolsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -112789,21 +129191,21 @@ func (c *TargetInstancesDeleteCall) Fields(s ...googleapi.Field) *TargetInstance // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetInstancesDeleteCall) Context(ctx context.Context) *TargetInstancesDeleteCall { +func (c *TargetPoolsDeleteCall) Context(ctx context.Context) *TargetPoolsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *TargetInstancesDeleteCall) Header() http.Header { +func (c *TargetPoolsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *TargetInstancesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetPoolsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -112812,7 +129214,7 @@ func (c *TargetInstancesDeleteCall) doRequest(alt string) (*http.Response, error var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances/{targetInstance}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { @@ -112820,21 +129222,21 @@ func (c *TargetInstancesDeleteCall) doRequest(alt string) (*http.Response, error } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "targetInstance": c.targetInstance, + "project": c.project, + "region": c.region, + "targetPool": c.targetPool, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetInstances.delete" call. +// Do executes the "compute.targetPools.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *TargetInstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *TargetPoolsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -112865,13 +129267,13 @@ func (c *TargetInstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Deletes the specified TargetInstance resource.", + // "description": "Deletes the specified target pool.", // "httpMethod": "DELETE", - // "id": "compute.targetInstances.delete", + // "id": "compute.targetPools.delete", // "parameterOrder": [ // "project", - // "zone", - // "targetInstance" + // "region", + // "targetPool" // ], // "parameters": { // "project": { @@ -112881,27 +129283,27 @@ func (c *TargetInstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "targetInstance": { - // "description": "Name of the TargetInstance resource to delete.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "zone": { - // "description": "Name of the zone scoping this request.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "targetPool": { + // "description": "Name of the TargetPool resource to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/targetInstances/{targetInstance}", + // "path": "{project}/regions/{region}/targetPools/{targetPool}", // "response": { // "$ref": "Operation" // }, @@ -112913,34 +129315,34 @@ func (c *TargetInstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.targetInstances.get": +// method id "compute.targetPools.get": -type TargetInstancesGetCall struct { - s *Service - project string - zone string - targetInstance string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type TargetPoolsGetCall struct { + s *Service + project string + region string + targetPool string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified TargetInstance resource. Gets a list of -// available target instances by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetInstances/get -func (r *TargetInstancesService) Get(project string, zone string, targetInstance string) *TargetInstancesGetCall { - c := &TargetInstancesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified target pool. Gets a list of available +// target pools by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/get +func (r *TargetPoolsService) Get(project string, region string, targetPool string) *TargetPoolsGetCall { + c := &TargetPoolsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.targetInstance = targetInstance + c.region = region + c.targetPool = targetPool return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetInstancesGetCall) Fields(s ...googleapi.Field) *TargetInstancesGetCall { +func (c *TargetPoolsGetCall) Fields(s ...googleapi.Field) *TargetPoolsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -112950,7 +129352,7 @@ func (c *TargetInstancesGetCall) Fields(s ...googleapi.Field) *TargetInstancesGe // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *TargetInstancesGetCall) IfNoneMatch(entityTag string) *TargetInstancesGetCall { +func (c *TargetPoolsGetCall) IfNoneMatch(entityTag string) *TargetPoolsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -112958,21 +129360,21 @@ func (c *TargetInstancesGetCall) IfNoneMatch(entityTag string) *TargetInstancesG // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetInstancesGetCall) Context(ctx context.Context) *TargetInstancesGetCall { +func (c *TargetPoolsGetCall) Context(ctx context.Context) *TargetPoolsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *TargetInstancesGetCall) Header() http.Header { +func (c *TargetPoolsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *TargetInstancesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetPoolsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -112984,7 +129386,7 @@ func (c *TargetInstancesGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances/{targetInstance}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -112992,21 +129394,21 @@ func (c *TargetInstancesGetCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "targetInstance": c.targetInstance, + "project": c.project, + "region": c.region, + "targetPool": c.targetPool, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetInstances.get" call. -// Exactly one of *TargetInstance or error will be non-nil. Any non-2xx +// Do executes the "compute.targetPools.get" call. +// Exactly one of *TargetPool or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *TargetInstance.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *TargetInstancesGetCall) Do(opts ...googleapi.CallOption) (*TargetInstance, error) { +// *TargetPool.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetPoolsGetCall) Do(opts ...googleapi.CallOption) (*TargetPool, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -113025,7 +129427,7 @@ func (c *TargetInstancesGetCall) Do(opts ...googleapi.CallOption) (*TargetInstan if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TargetInstance{ + ret := &TargetPool{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -113037,13 +129439,13 @@ func (c *TargetInstancesGetCall) Do(opts ...googleapi.CallOption) (*TargetInstan } return ret, nil // { - // "description": "Returns the specified TargetInstance resource. Gets a list of available target instances by making a list() request.", + // "description": "Returns the specified target pool. Gets a list of available target pools by making a list() request.", // "httpMethod": "GET", - // "id": "compute.targetInstances.get", + // "id": "compute.targetPools.get", // "parameterOrder": [ // "project", - // "zone", - // "targetInstance" + // "region", + // "targetPool" // ], // "parameters": { // "project": { @@ -113053,24 +129455,188 @@ func (c *TargetInstancesGetCall) Do(opts ...googleapi.CallOption) (*TargetInstan // "required": true, // "type": "string" // }, - // "targetInstance": { - // "description": "Name of the TargetInstance resource to return.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "zone": { - // "description": "Name of the zone scoping this request.", + // "targetPool": { + // "description": "Name of the TargetPool resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/targetPools/{targetPool}", + // "response": { + // "$ref": "TargetPool" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.targetPools.getHealth": + +type TargetPoolsGetHealthCall struct { + s *Service + project string + region string + targetPool string + instancereference *InstanceReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// GetHealth: Gets the most recent health check results for each IP for +// the instance that is referenced by the given target pool. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/getHealth +func (r *TargetPoolsService) GetHealth(project string, region string, targetPool string, instancereference *InstanceReference) *TargetPoolsGetHealthCall { + c := &TargetPoolsGetHealthCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.targetPool = targetPool + c.instancereference = instancereference + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetPoolsGetHealthCall) Fields(s ...googleapi.Field) *TargetPoolsGetHealthCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetPoolsGetHealthCall) Context(ctx context.Context) *TargetPoolsGetHealthCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetPoolsGetHealthCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetPoolsGetHealthCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancereference) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/getHealth") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "targetPool": c.targetPool, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.targetPools.getHealth" call. +// Exactly one of *TargetPoolInstanceHealth or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *TargetPoolInstanceHealth.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *TargetPoolsGetHealthCall) Do(opts ...googleapi.CallOption) (*TargetPoolInstanceHealth, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TargetPoolInstanceHealth{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the most recent health check results for each IP for the instance that is referenced by the given target pool.", + // "httpMethod": "POST", + // "id": "compute.targetPools.getHealth", + // "parameterOrder": [ + // "project", + // "region", + // "targetPool" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "targetPool": { + // "description": "Name of the TargetPool resource to which the queried instance belongs.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/targetInstances/{targetInstance}", + // "path": "{project}/regions/{region}/targetPools/{targetPool}/getHealth", + // "request": { + // "$ref": "InstanceReference" + // }, // "response": { - // "$ref": "TargetInstance" + // "$ref": "TargetPoolInstanceHealth" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -113081,26 +129647,26 @@ func (c *TargetInstancesGetCall) Do(opts ...googleapi.CallOption) (*TargetInstan } -// method id "compute.targetInstances.insert": +// method id "compute.targetPools.insert": -type TargetInstancesInsertCall struct { - s *Service - project string - zone string - targetinstance *TargetInstance - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type TargetPoolsInsertCall struct { + s *Service + project string + region string + targetpool *TargetPool + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a TargetInstance resource in the specified project -// and zone using the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetInstances/insert -func (r *TargetInstancesService) Insert(project string, zone string, targetinstance *TargetInstance) *TargetInstancesInsertCall { - c := &TargetInstancesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a target pool in the specified project and region +// using the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/insert +func (r *TargetPoolsService) Insert(project string, region string, targetpool *TargetPool) *TargetPoolsInsertCall { + c := &TargetPoolsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.targetinstance = targetinstance + c.region = region + c.targetpool = targetpool return c } @@ -113118,7 +129684,7 @@ func (r *TargetInstancesService) Insert(project string, zone string, targetinsta // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *TargetInstancesInsertCall) RequestId(requestId string) *TargetInstancesInsertCall { +func (c *TargetPoolsInsertCall) RequestId(requestId string) *TargetPoolsInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -113126,7 +129692,7 @@ func (c *TargetInstancesInsertCall) RequestId(requestId string) *TargetInstances // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetInstancesInsertCall) Fields(s ...googleapi.Field) *TargetInstancesInsertCall { +func (c *TargetPoolsInsertCall) Fields(s ...googleapi.Field) *TargetPoolsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -113134,35 +129700,35 @@ func (c *TargetInstancesInsertCall) Fields(s ...googleapi.Field) *TargetInstance // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetInstancesInsertCall) Context(ctx context.Context) *TargetInstancesInsertCall { +func (c *TargetPoolsInsertCall) Context(ctx context.Context) *TargetPoolsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *TargetInstancesInsertCall) Header() http.Header { +func (c *TargetPoolsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *TargetInstancesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetPoolsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetinstance) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetpool) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -113171,19 +129737,19 @@ func (c *TargetInstancesInsertCall) doRequest(alt string) (*http.Response, error req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetInstances.insert" call. +// Do executes the "compute.targetPools.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *TargetInstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *TargetPoolsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -113214,12 +129780,12 @@ func (c *TargetInstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Creates a TargetInstance resource in the specified project and zone using the data included in the request.", + // "description": "Creates a target pool in the specified project and region using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.targetInstances.insert", + // "id": "compute.targetPools.insert", // "parameterOrder": [ // "project", - // "zone" + // "region" // ], // "parameters": { // "project": { @@ -113229,22 +129795,22 @@ func (c *TargetInstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "Name of the zone scoping this request.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/targetInstances", + // "path": "{project}/regions/{region}/targetPools", // "request": { - // "$ref": "TargetInstance" + // "$ref": "TargetPool" // }, // "response": { // "$ref": "Operation" @@ -113257,25 +129823,25 @@ func (c *TargetInstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.targetInstances.list": +// method id "compute.targetPools.list": -type TargetInstancesListCall struct { +type TargetPoolsListCall struct { s *Service project string - zone string + region string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves a list of TargetInstance resources available to the -// specified project and zone. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetInstances/list -func (r *TargetInstancesService) List(project string, zone string) *TargetInstancesListCall { - c := &TargetInstancesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of target pools available to the specified +// project and region. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/list +func (r *TargetPoolsService) List(project string, region string) *TargetPoolsListCall { + c := &TargetPoolsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone + c.region = region return c } @@ -113301,7 +129867,7 @@ func (r *TargetInstancesService) List(project string, zone string) *TargetInstan // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *TargetInstancesListCall) Filter(filter string) *TargetInstancesListCall { +func (c *TargetPoolsListCall) Filter(filter string) *TargetPoolsListCall { c.urlParams_.Set("filter", filter) return c } @@ -113312,7 +129878,7 @@ func (c *TargetInstancesListCall) Filter(filter string) *TargetInstancesListCall // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *TargetInstancesListCall) MaxResults(maxResults int64) *TargetInstancesListCall { +func (c *TargetPoolsListCall) MaxResults(maxResults int64) *TargetPoolsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -113329,7 +129895,7 @@ func (c *TargetInstancesListCall) MaxResults(maxResults int64) *TargetInstancesL // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *TargetInstancesListCall) OrderBy(orderBy string) *TargetInstancesListCall { +func (c *TargetPoolsListCall) OrderBy(orderBy string) *TargetPoolsListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -113337,7 +129903,7 @@ func (c *TargetInstancesListCall) OrderBy(orderBy string) *TargetInstancesListCa // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *TargetInstancesListCall) PageToken(pageToken string) *TargetInstancesListCall { +func (c *TargetPoolsListCall) PageToken(pageToken string) *TargetPoolsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -113345,7 +129911,7 @@ func (c *TargetInstancesListCall) PageToken(pageToken string) *TargetInstancesLi // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetInstancesListCall) Fields(s ...googleapi.Field) *TargetInstancesListCall { +func (c *TargetPoolsListCall) Fields(s ...googleapi.Field) *TargetPoolsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -113355,7 +129921,7 @@ func (c *TargetInstancesListCall) Fields(s ...googleapi.Field) *TargetInstancesL // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *TargetInstancesListCall) IfNoneMatch(entityTag string) *TargetInstancesListCall { +func (c *TargetPoolsListCall) IfNoneMatch(entityTag string) *TargetPoolsListCall { c.ifNoneMatch_ = entityTag return c } @@ -113363,21 +129929,21 @@ func (c *TargetInstancesListCall) IfNoneMatch(entityTag string) *TargetInstances // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetInstancesListCall) Context(ctx context.Context) *TargetInstancesListCall { +func (c *TargetPoolsListCall) Context(ctx context.Context) *TargetPoolsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *TargetInstancesListCall) Header() http.Header { +func (c *TargetPoolsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *TargetInstancesListCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetPoolsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -113389,7 +129955,7 @@ func (c *TargetInstancesListCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -113398,19 +129964,19 @@ func (c *TargetInstancesListCall) doRequest(alt string) (*http.Response, error) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetInstances.list" call. -// Exactly one of *TargetInstanceList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TargetInstanceList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.targetPools.list" call. +// Exactly one of *TargetPoolList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *TargetPoolList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *TargetInstancesListCall) Do(opts ...googleapi.CallOption) (*TargetInstanceList, error) { +func (c *TargetPoolsListCall) Do(opts ...googleapi.CallOption) (*TargetPoolList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -113429,7 +129995,7 @@ func (c *TargetInstancesListCall) Do(opts ...googleapi.CallOption) (*TargetInsta if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TargetInstanceList{ + ret := &TargetPoolList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -113441,12 +130007,12 @@ func (c *TargetInstancesListCall) Do(opts ...googleapi.CallOption) (*TargetInsta } return ret, nil // { - // "description": "Retrieves a list of TargetInstance resources available to the specified project and zone.", + // "description": "Retrieves a list of target pools available to the specified project and region.", // "httpMethod": "GET", - // "id": "compute.targetInstances.list", + // "id": "compute.targetPools.list", // "parameterOrder": [ // "project", - // "zone" + // "region" // ], // "parameters": { // "filter": { @@ -113479,17 +130045,17 @@ func (c *TargetInstancesListCall) Do(opts ...googleapi.CallOption) (*TargetInsta // "required": true, // "type": "string" // }, - // "zone": { - // "description": "Name of the zone scoping this request.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/targetInstances", + // "path": "{project}/regions/{region}/targetPools", // "response": { - // "$ref": "TargetInstanceList" + // "$ref": "TargetPoolList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -113503,7 +130069,7 @@ func (c *TargetInstancesListCall) Do(opts ...googleapi.CallOption) (*TargetInsta // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *TargetInstancesListCall) Pages(ctx context.Context, f func(*TargetInstanceList) error) error { +func (c *TargetPoolsListCall) Pages(ctx context.Context, f func(*TargetPoolList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -113521,190 +130087,27 @@ func (c *TargetInstancesListCall) Pages(ctx context.Context, f func(*TargetInsta } } -// method id "compute.targetInstances.testIamPermissions": - -type TargetInstancesTestIamPermissionsCall struct { - s *Service - project string - zone string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *TargetInstancesService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *TargetInstancesTestIamPermissionsCall { - c := &TargetInstancesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetInstancesTestIamPermissionsCall) Fields(s ...googleapi.Field) *TargetInstancesTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetInstancesTestIamPermissionsCall) Context(ctx context.Context) *TargetInstancesTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetInstancesTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetInstancesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances/{resource}/testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetInstances.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *TargetInstancesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.targetInstances.testIamPermissions", - // "parameterOrder": [ - // "project", - // "zone", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/targetInstances/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, - // "response": { - // "$ref": "TestPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.targetPools.addHealthCheck": +// method id "compute.targetPools.removeHealthCheck": -type TargetPoolsAddHealthCheckCall struct { - s *Service - project string - region string - targetPool string - targetpoolsaddhealthcheckrequest *TargetPoolsAddHealthCheckRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type TargetPoolsRemoveHealthCheckCall struct { + s *Service + project string + region string + targetPool string + targetpoolsremovehealthcheckrequest *TargetPoolsRemoveHealthCheckRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AddHealthCheck: Adds health check URLs to a target pool. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/addHealthCheck -func (r *TargetPoolsService) AddHealthCheck(project string, region string, targetPool string, targetpoolsaddhealthcheckrequest *TargetPoolsAddHealthCheckRequest) *TargetPoolsAddHealthCheckCall { - c := &TargetPoolsAddHealthCheckCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// RemoveHealthCheck: Removes health check URL from a target pool. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/removeHealthCheck +func (r *TargetPoolsService) RemoveHealthCheck(project string, region string, targetPool string, targetpoolsremovehealthcheckrequest *TargetPoolsRemoveHealthCheckRequest) *TargetPoolsRemoveHealthCheckCall { + c := &TargetPoolsRemoveHealthCheckCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region c.targetPool = targetPool - c.targetpoolsaddhealthcheckrequest = targetpoolsaddhealthcheckrequest + c.targetpoolsremovehealthcheckrequest = targetpoolsremovehealthcheckrequest return c } @@ -113722,7 +130125,7 @@ func (r *TargetPoolsService) AddHealthCheck(project string, region string, targe // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *TargetPoolsAddHealthCheckCall) RequestId(requestId string) *TargetPoolsAddHealthCheckCall { +func (c *TargetPoolsRemoveHealthCheckCall) RequestId(requestId string) *TargetPoolsRemoveHealthCheckCall { c.urlParams_.Set("requestId", requestId) return c } @@ -113730,7 +130133,7 @@ func (c *TargetPoolsAddHealthCheckCall) RequestId(requestId string) *TargetPools // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetPoolsAddHealthCheckCall) Fields(s ...googleapi.Field) *TargetPoolsAddHealthCheckCall { +func (c *TargetPoolsRemoveHealthCheckCall) Fields(s ...googleapi.Field) *TargetPoolsRemoveHealthCheckCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -113738,35 +130141,35 @@ func (c *TargetPoolsAddHealthCheckCall) Fields(s ...googleapi.Field) *TargetPool // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetPoolsAddHealthCheckCall) Context(ctx context.Context) *TargetPoolsAddHealthCheckCall { +func (c *TargetPoolsRemoveHealthCheckCall) Context(ctx context.Context) *TargetPoolsRemoveHealthCheckCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *TargetPoolsAddHealthCheckCall) Header() http.Header { +func (c *TargetPoolsRemoveHealthCheckCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *TargetPoolsAddHealthCheckCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetPoolsRemoveHealthCheckCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetpoolsaddhealthcheckrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetpoolsremovehealthcheckrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/addHealthCheck") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/removeHealthCheck") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -113781,14 +130184,14 @@ func (c *TargetPoolsAddHealthCheckCall) doRequest(alt string) (*http.Response, e return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetPools.addHealthCheck" call. +// Do executes the "compute.targetPools.removeHealthCheck" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *TargetPoolsAddHealthCheckCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *TargetPoolsRemoveHealthCheckCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -113819,9 +130222,9 @@ func (c *TargetPoolsAddHealthCheckCall) Do(opts ...googleapi.CallOption) (*Opera } return ret, nil // { - // "description": "Adds health check URLs to a target pool.", + // "description": "Removes health check URL from a target pool.", // "httpMethod": "POST", - // "id": "compute.targetPools.addHealthCheck", + // "id": "compute.targetPools.removeHealthCheck", // "parameterOrder": [ // "project", // "region", @@ -113836,7 +130239,7 @@ func (c *TargetPoolsAddHealthCheckCall) Do(opts ...googleapi.CallOption) (*Opera // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -113848,16 +130251,16 @@ func (c *TargetPoolsAddHealthCheckCall) Do(opts ...googleapi.CallOption) (*Opera // "type": "string" // }, // "targetPool": { - // "description": "Name of the target pool to add a health check to.", + // "description": "Name of the target pool to remove health checks from.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/targetPools/{targetPool}/addHealthCheck", + // "path": "{project}/regions/{region}/targetPools/{targetPool}/removeHealthCheck", // "request": { - // "$ref": "TargetPoolsAddHealthCheckRequest" + // "$ref": "TargetPoolsRemoveHealthCheckRequest" // }, // "response": { // "$ref": "Operation" @@ -113870,27 +130273,27 @@ func (c *TargetPoolsAddHealthCheckCall) Do(opts ...googleapi.CallOption) (*Opera } -// method id "compute.targetPools.addInstance": +// method id "compute.targetPools.removeInstance": -type TargetPoolsAddInstanceCall struct { - s *Service - project string - region string - targetPool string - targetpoolsaddinstancerequest *TargetPoolsAddInstanceRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type TargetPoolsRemoveInstanceCall struct { + s *Service + project string + region string + targetPool string + targetpoolsremoveinstancerequest *TargetPoolsRemoveInstanceRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AddInstance: Adds an instance to a target pool. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/addInstance -func (r *TargetPoolsService) AddInstance(project string, region string, targetPool string, targetpoolsaddinstancerequest *TargetPoolsAddInstanceRequest) *TargetPoolsAddInstanceCall { - c := &TargetPoolsAddInstanceCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// RemoveInstance: Removes instance URL from a target pool. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/removeInstance +func (r *TargetPoolsService) RemoveInstance(project string, region string, targetPool string, targetpoolsremoveinstancerequest *TargetPoolsRemoveInstanceRequest) *TargetPoolsRemoveInstanceCall { + c := &TargetPoolsRemoveInstanceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region c.targetPool = targetPool - c.targetpoolsaddinstancerequest = targetpoolsaddinstancerequest + c.targetpoolsremoveinstancerequest = targetpoolsremoveinstancerequest return c } @@ -113908,7 +130311,7 @@ func (r *TargetPoolsService) AddInstance(project string, region string, targetPo // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *TargetPoolsAddInstanceCall) RequestId(requestId string) *TargetPoolsAddInstanceCall { +func (c *TargetPoolsRemoveInstanceCall) RequestId(requestId string) *TargetPoolsRemoveInstanceCall { c.urlParams_.Set("requestId", requestId) return c } @@ -113916,7 +130319,7 @@ func (c *TargetPoolsAddInstanceCall) RequestId(requestId string) *TargetPoolsAdd // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetPoolsAddInstanceCall) Fields(s ...googleapi.Field) *TargetPoolsAddInstanceCall { +func (c *TargetPoolsRemoveInstanceCall) Fields(s ...googleapi.Field) *TargetPoolsRemoveInstanceCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -113924,35 +130327,35 @@ func (c *TargetPoolsAddInstanceCall) Fields(s ...googleapi.Field) *TargetPoolsAd // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetPoolsAddInstanceCall) Context(ctx context.Context) *TargetPoolsAddInstanceCall { +func (c *TargetPoolsRemoveInstanceCall) Context(ctx context.Context) *TargetPoolsRemoveInstanceCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *TargetPoolsAddInstanceCall) Header() http.Header { +func (c *TargetPoolsRemoveInstanceCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *TargetPoolsAddInstanceCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetPoolsRemoveInstanceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetpoolsaddinstancerequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetpoolsremoveinstancerequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/addInstance") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/removeInstance") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -113967,14 +130370,14 @@ func (c *TargetPoolsAddInstanceCall) doRequest(alt string) (*http.Response, erro return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetPools.addInstance" call. +// Do executes the "compute.targetPools.removeInstance" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *TargetPoolsAddInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *TargetPoolsRemoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -114005,9 +130408,9 @@ func (c *TargetPoolsAddInstanceCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Adds an instance to a target pool.", + // "description": "Removes instance URL from a target pool.", // "httpMethod": "POST", - // "id": "compute.targetPools.addInstance", + // "id": "compute.targetPools.removeInstance", // "parameterOrder": [ // "project", // "region", @@ -114034,16 +130437,16 @@ func (c *TargetPoolsAddInstanceCall) Do(opts ...googleapi.CallOption) (*Operatio // "type": "string" // }, // "targetPool": { - // "description": "Name of the TargetPool resource to add instances to.", + // "description": "Name of the TargetPool resource to remove instances from.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/targetPools/{targetPool}/addInstance", + // "path": "{project}/regions/{region}/targetPools/{targetPool}/removeInstance", // "request": { - // "$ref": "TargetPoolsAddInstanceRequest" + // "$ref": "TargetPoolsRemoveInstanceRequest" // }, // "response": { // "$ref": "Operation" @@ -114056,156 +130459,118 @@ func (c *TargetPoolsAddInstanceCall) Do(opts ...googleapi.CallOption) (*Operatio } -// method id "compute.targetPools.aggregatedList": +// method id "compute.targetPools.setBackup": -type TargetPoolsAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type TargetPoolsSetBackupCall struct { + s *Service + project string + region string + targetPool string + targetreference *TargetReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AggregatedList: Retrieves an aggregated list of target pools. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/aggregatedList -func (r *TargetPoolsService) AggregatedList(project string) *TargetPoolsAggregatedListCall { - c := &TargetPoolsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetBackup: Changes a backup target pool's configurations. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/setBackup +func (r *TargetPoolsService) SetBackup(project string, region string, targetPool string, targetreference *TargetReference) *TargetPoolsSetBackupCall { + c := &TargetPoolsSetBackupCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.region = region + c.targetPool = targetPool + c.targetreference = targetreference return c } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *TargetPoolsAggregatedListCall) Filter(filter string) *TargetPoolsAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *TargetPoolsAggregatedListCall) MaxResults(maxResults int64) *TargetPoolsAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) +// FailoverRatio sets the optional parameter "failoverRatio": New +// failoverRatio value for the target pool. +func (c *TargetPoolsSetBackupCall) FailoverRatio(failoverRatio float64) *TargetPoolsSetBackupCall { + c.urlParams_.Set("failoverRatio", fmt.Sprint(failoverRatio)) return c } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *TargetPoolsAggregatedListCall) OrderBy(orderBy string) *TargetPoolsAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *TargetPoolsAggregatedListCall) PageToken(pageToken string) *TargetPoolsAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *TargetPoolsSetBackupCall) RequestId(requestId string) *TargetPoolsSetBackupCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetPoolsAggregatedListCall) Fields(s ...googleapi.Field) *TargetPoolsAggregatedListCall { +func (c *TargetPoolsSetBackupCall) Fields(s ...googleapi.Field) *TargetPoolsSetBackupCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *TargetPoolsAggregatedListCall) IfNoneMatch(entityTag string) *TargetPoolsAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetPoolsAggregatedListCall) Context(ctx context.Context) *TargetPoolsAggregatedListCall { +func (c *TargetPoolsSetBackupCall) Context(ctx context.Context) *TargetPoolsSetBackupCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *TargetPoolsAggregatedListCall) Header() http.Header { +func (c *TargetPoolsSetBackupCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *TargetPoolsAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetPoolsSetBackupCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetreference) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/targetPools") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/setBackup") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "region": c.region, + "targetPool": c.targetPool, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetPools.aggregatedList" call. -// Exactly one of *TargetPoolAggregatedList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *TargetPoolAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *TargetPoolsAggregatedListCall) Do(opts ...googleapi.CallOption) (*TargetPoolAggregatedList, error) { +// Do executes the "compute.targetPools.setBackup" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetPoolsSetBackupCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -114224,7 +130589,7 @@ func (c *TargetPoolsAggregatedListCall) Do(opts ...googleapi.CallOption) (*Targe if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TargetPoolAggregatedList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -114236,123 +130601,91 @@ func (c *TargetPoolsAggregatedListCall) Do(opts ...googleapi.CallOption) (*Targe } return ret, nil // { - // "description": "Retrieves an aggregated list of target pools.", - // "httpMethod": "GET", - // "id": "compute.targetPools.aggregatedList", + // "description": "Changes a backup target pool's configurations.", + // "httpMethod": "POST", + // "id": "compute.targetPools.setBackup", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "targetPool" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "failoverRatio": { + // "description": "New failoverRatio value for the target pool.", + // "format": "float", // "location": "query", - // "type": "string" + // "type": "number" // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "targetPool": { + // "description": "Name of the TargetPool resource to set a backup pool for.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/aggregated/targetPools", + // "path": "{project}/regions/{region}/targetPools/{targetPool}/setBackup", + // "request": { + // "$ref": "TargetReference" + // }, // "response": { - // "$ref": "TargetPoolAggregatedList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *TargetPoolsAggregatedListCall) Pages(ctx context.Context, f func(*TargetPoolAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.targetPools.delete": +// method id "compute.targetPools.testIamPermissions": -type TargetPoolsDeleteCall struct { - s *Service - project string - region string - targetPool string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type TargetPoolsTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified target pool. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/delete -func (r *TargetPoolsService) Delete(project string, region string, targetPool string) *TargetPoolsDeleteCall { - c := &TargetPoolsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *TargetPoolsService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *TargetPoolsTestIamPermissionsCall { + c := &TargetPoolsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.targetPool = targetPool - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *TargetPoolsDeleteCall) RequestId(requestId string) *TargetPoolsDeleteCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetPoolsDeleteCall) Fields(s ...googleapi.Field) *TargetPoolsDeleteCall { +func (c *TargetPoolsTestIamPermissionsCall) Fields(s ...googleapi.Field) *TargetPoolsTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -114360,52 +130693,57 @@ func (c *TargetPoolsDeleteCall) Fields(s ...googleapi.Field) *TargetPoolsDeleteC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetPoolsDeleteCall) Context(ctx context.Context) *TargetPoolsDeleteCall { +func (c *TargetPoolsTestIamPermissionsCall) Context(ctx context.Context) *TargetPoolsTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *TargetPoolsDeleteCall) Header() http.Header { +func (c *TargetPoolsTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *TargetPoolsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetPoolsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "targetPool": c.targetPool, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetPools.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *TargetPoolsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.targetPools.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *TargetPoolsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -114424,7 +130762,7 @@ func (c *TargetPoolsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -114436,13 +130774,13 @@ func (c *TargetPoolsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Deletes the specified target pool.", - // "httpMethod": "DELETE", - // "id": "compute.targetPools.delete", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.targetPools.testIamPermissions", // "parameterOrder": [ // "project", // "region", - // "targetPool" + // "resource" // ], // "parameters": { // "project": { @@ -114453,131 +130791,130 @@ func (c *TargetPoolsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "The name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "targetPool": { - // "description": "Name of the TargetPool resource to delete.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/targetPools/{targetPool}", + // "path": "{project}/regions/{region}/targetPools/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.targetPools.get": +// method id "compute.targetSslProxies.delete": -type TargetPoolsGetCall struct { - s *Service - project string - region string - targetPool string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type TargetSslProxiesDeleteCall struct { + s *Service + project string + targetSslProxy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified target pool. Gets a list of available -// target pools by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/get -func (r *TargetPoolsService) Get(project string, region string, targetPool string) *TargetPoolsGetCall { - c := &TargetPoolsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified TargetSslProxy resource. +func (r *TargetSslProxiesService) Delete(project string, targetSslProxy string) *TargetSslProxiesDeleteCall { + c := &TargetSslProxiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.targetPool = targetPool + c.targetSslProxy = targetSslProxy + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *TargetSslProxiesDeleteCall) RequestId(requestId string) *TargetSslProxiesDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetPoolsGetCall) Fields(s ...googleapi.Field) *TargetPoolsGetCall { +func (c *TargetSslProxiesDeleteCall) Fields(s ...googleapi.Field) *TargetSslProxiesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *TargetPoolsGetCall) IfNoneMatch(entityTag string) *TargetPoolsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetPoolsGetCall) Context(ctx context.Context) *TargetPoolsGetCall { +func (c *TargetSslProxiesDeleteCall) Context(ctx context.Context) *TargetSslProxiesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *TargetPoolsGetCall) Header() http.Header { +func (c *TargetSslProxiesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *TargetPoolsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetSslProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies/{targetSslProxy}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "targetPool": c.targetPool, + "project": c.project, + "targetSslProxy": c.targetSslProxy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetPools.get" call. -// Exactly one of *TargetPool or error will be non-nil. Any non-2xx +// Do executes the "compute.targetSslProxies.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *TargetPool.ServerResponse.Header or (if a response was returned at +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *TargetPoolsGetCall) Do(opts ...googleapi.CallOption) (*TargetPool, error) { +func (c *TargetSslProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -114596,7 +130933,7 @@ func (c *TargetPoolsGetCall) Do(opts ...googleapi.CallOption) (*TargetPool, erro if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TargetPool{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -114608,13 +130945,12 @@ func (c *TargetPoolsGetCall) Do(opts ...googleapi.CallOption) (*TargetPool, erro } return ret, nil // { - // "description": "Returns the specified target pool. Gets a list of available target pools by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.targetPools.get", + // "description": "Deletes the specified TargetSslProxy resource.", + // "httpMethod": "DELETE", + // "id": "compute.targetSslProxies.delete", // "parameterOrder": [ // "project", - // "region", - // "targetPool" + // "targetSslProxy" // ], // "parameters": { // "project": { @@ -114624,121 +130960,121 @@ func (c *TargetPoolsGetCall) Do(opts ...googleapi.CallOption) (*TargetPool, erro // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // }, - // "targetPool": { - // "description": "Name of the TargetPool resource to return.", + // "targetSslProxy": { + // "description": "Name of the TargetSslProxy resource to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/targetPools/{targetPool}", + // "path": "{project}/global/targetSslProxies/{targetSslProxy}", // "response": { - // "$ref": "TargetPool" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.targetPools.getHealth": +// method id "compute.targetSslProxies.get": -type TargetPoolsGetHealthCall struct { - s *Service - project string - region string - targetPool string - instancereference *InstanceReference - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type TargetSslProxiesGetCall struct { + s *Service + project string + targetSslProxy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// GetHealth: Gets the most recent health check results for each IP for -// the instance that is referenced by the given target pool. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/getHealth -func (r *TargetPoolsService) GetHealth(project string, region string, targetPool string, instancereference *InstanceReference) *TargetPoolsGetHealthCall { - c := &TargetPoolsGetHealthCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified TargetSslProxy resource. Gets a list of +// available target SSL proxies by making a list() request. +func (r *TargetSslProxiesService) Get(project string, targetSslProxy string) *TargetSslProxiesGetCall { + c := &TargetSslProxiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.targetPool = targetPool - c.instancereference = instancereference + c.targetSslProxy = targetSslProxy return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetPoolsGetHealthCall) Fields(s ...googleapi.Field) *TargetPoolsGetHealthCall { +func (c *TargetSslProxiesGetCall) Fields(s ...googleapi.Field) *TargetSslProxiesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TargetSslProxiesGetCall) IfNoneMatch(entityTag string) *TargetSslProxiesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetPoolsGetHealthCall) Context(ctx context.Context) *TargetPoolsGetHealthCall { +func (c *TargetSslProxiesGetCall) Context(ctx context.Context) *TargetSslProxiesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *TargetPoolsGetHealthCall) Header() http.Header { +func (c *TargetSslProxiesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *TargetPoolsGetHealthCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetSslProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancereference) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/getHealth") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies/{targetSslProxy}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "targetPool": c.targetPool, + "project": c.project, + "targetSslProxy": c.targetSslProxy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetPools.getHealth" call. -// Exactly one of *TargetPoolInstanceHealth or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *TargetPoolInstanceHealth.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.targetSslProxies.get" call. +// Exactly one of *TargetSslProxy or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *TargetSslProxy.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *TargetPoolsGetHealthCall) Do(opts ...googleapi.CallOption) (*TargetPoolInstanceHealth, error) { +func (c *TargetSslProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetSslProxy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -114757,7 +131093,7 @@ func (c *TargetPoolsGetHealthCall) Do(opts ...googleapi.CallOption) (*TargetPool if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TargetPoolInstanceHealth{ + ret := &TargetSslProxy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -114769,13 +131105,12 @@ func (c *TargetPoolsGetHealthCall) Do(opts ...googleapi.CallOption) (*TargetPool } return ret, nil // { - // "description": "Gets the most recent health check results for each IP for the instance that is referenced by the given target pool.", - // "httpMethod": "POST", - // "id": "compute.targetPools.getHealth", + // "description": "Returns the specified TargetSslProxy resource. Gets a list of available target SSL proxies by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.targetSslProxies.get", // "parameterOrder": [ // "project", - // "region", - // "targetPool" + // "targetSslProxy" // ], // "parameters": { // "project": { @@ -114785,27 +131120,17 @@ func (c *TargetPoolsGetHealthCall) Do(opts ...googleapi.CallOption) (*TargetPool // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "targetPool": { - // "description": "Name of the TargetPool resource to which the queried instance belongs.", + // "targetSslProxy": { + // "description": "Name of the TargetSslProxy resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/targetPools/{targetPool}/getHealth", - // "request": { - // "$ref": "InstanceReference" - // }, + // "path": "{project}/global/targetSslProxies/{targetSslProxy}", // "response": { - // "$ref": "TargetPoolInstanceHealth" + // "$ref": "TargetSslProxy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -114816,26 +131141,23 @@ func (c *TargetPoolsGetHealthCall) Do(opts ...googleapi.CallOption) (*TargetPool } -// method id "compute.targetPools.insert": +// method id "compute.targetSslProxies.insert": -type TargetPoolsInsertCall struct { - s *Service - project string - region string - targetpool *TargetPool - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type TargetSslProxiesInsertCall struct { + s *Service + project string + targetsslproxy *TargetSslProxy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a target pool in the specified project and region +// Insert: Creates a TargetSslProxy resource in the specified project // using the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/insert -func (r *TargetPoolsService) Insert(project string, region string, targetpool *TargetPool) *TargetPoolsInsertCall { - c := &TargetPoolsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *TargetSslProxiesService) Insert(project string, targetsslproxy *TargetSslProxy) *TargetSslProxiesInsertCall { + c := &TargetSslProxiesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.targetpool = targetpool + c.targetsslproxy = targetsslproxy return c } @@ -114853,7 +131175,7 @@ func (r *TargetPoolsService) Insert(project string, region string, targetpool *T // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *TargetPoolsInsertCall) RequestId(requestId string) *TargetPoolsInsertCall { +func (c *TargetSslProxiesInsertCall) RequestId(requestId string) *TargetSslProxiesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -114861,7 +131183,7 @@ func (c *TargetPoolsInsertCall) RequestId(requestId string) *TargetPoolsInsertCa // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetPoolsInsertCall) Fields(s ...googleapi.Field) *TargetPoolsInsertCall { +func (c *TargetSslProxiesInsertCall) Fields(s ...googleapi.Field) *TargetSslProxiesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -114869,35 +131191,35 @@ func (c *TargetPoolsInsertCall) Fields(s ...googleapi.Field) *TargetPoolsInsertC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetPoolsInsertCall) Context(ctx context.Context) *TargetPoolsInsertCall { +func (c *TargetSslProxiesInsertCall) Context(ctx context.Context) *TargetSslProxiesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *TargetPoolsInsertCall) Header() http.Header { +func (c *TargetSslProxiesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *TargetPoolsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetSslProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetpool) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetsslproxy) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -114906,19 +131228,18 @@ func (c *TargetPoolsInsertCall) doRequest(alt string) (*http.Response, error) { req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetPools.insert" call. +// Do executes the "compute.targetSslProxies.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *TargetPoolsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *TargetSslProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -114949,12 +131270,11 @@ func (c *TargetPoolsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Creates a target pool in the specified project and region using the data included in the request.", + // "description": "Creates a TargetSslProxy resource in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.targetPools.insert", + // "id": "compute.targetSslProxies.insert", // "parameterOrder": [ - // "project", - // "region" + // "project" // ], // "parameters": { // "project": { @@ -114964,22 +131284,15 @@ func (c *TargetPoolsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "{project}/regions/{region}/targetPools", + // "path": "{project}/global/targetSslProxies", // "request": { - // "$ref": "TargetPool" + // "$ref": "TargetSslProxy" // }, // "response": { // "$ref": "Operation" @@ -114992,25 +131305,22 @@ func (c *TargetPoolsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er } -// method id "compute.targetPools.list": +// method id "compute.targetSslProxies.list": -type TargetPoolsListCall struct { +type TargetSslProxiesListCall struct { s *Service project string - region string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves a list of target pools available to the specified -// project and region. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/list -func (r *TargetPoolsService) List(project string, region string) *TargetPoolsListCall { - c := &TargetPoolsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of TargetSslProxy resources available to the +// specified project. +func (r *TargetSslProxiesService) List(project string) *TargetSslProxiesListCall { + c := &TargetSslProxiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region return c } @@ -115036,7 +131346,7 @@ func (r *TargetPoolsService) List(project string, region string) *TargetPoolsLis // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *TargetPoolsListCall) Filter(filter string) *TargetPoolsListCall { +func (c *TargetSslProxiesListCall) Filter(filter string) *TargetSslProxiesListCall { c.urlParams_.Set("filter", filter) return c } @@ -115047,7 +131357,7 @@ func (c *TargetPoolsListCall) Filter(filter string) *TargetPoolsListCall { // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *TargetPoolsListCall) MaxResults(maxResults int64) *TargetPoolsListCall { +func (c *TargetSslProxiesListCall) MaxResults(maxResults int64) *TargetSslProxiesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -115064,7 +131374,7 @@ func (c *TargetPoolsListCall) MaxResults(maxResults int64) *TargetPoolsListCall // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *TargetPoolsListCall) OrderBy(orderBy string) *TargetPoolsListCall { +func (c *TargetSslProxiesListCall) OrderBy(orderBy string) *TargetSslProxiesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -115072,7 +131382,7 @@ func (c *TargetPoolsListCall) OrderBy(orderBy string) *TargetPoolsListCall { // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *TargetPoolsListCall) PageToken(pageToken string) *TargetPoolsListCall { +func (c *TargetSslProxiesListCall) PageToken(pageToken string) *TargetSslProxiesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -115080,7 +131390,7 @@ func (c *TargetPoolsListCall) PageToken(pageToken string) *TargetPoolsListCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetPoolsListCall) Fields(s ...googleapi.Field) *TargetPoolsListCall { +func (c *TargetSslProxiesListCall) Fields(s ...googleapi.Field) *TargetSslProxiesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -115090,7 +131400,7 @@ func (c *TargetPoolsListCall) Fields(s ...googleapi.Field) *TargetPoolsListCall // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *TargetPoolsListCall) IfNoneMatch(entityTag string) *TargetPoolsListCall { +func (c *TargetSslProxiesListCall) IfNoneMatch(entityTag string) *TargetSslProxiesListCall { c.ifNoneMatch_ = entityTag return c } @@ -115098,21 +131408,21 @@ func (c *TargetPoolsListCall) IfNoneMatch(entityTag string) *TargetPoolsListCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetPoolsListCall) Context(ctx context.Context) *TargetPoolsListCall { +func (c *TargetSslProxiesListCall) Context(ctx context.Context) *TargetSslProxiesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *TargetPoolsListCall) Header() http.Header { +func (c *TargetSslProxiesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *TargetPoolsListCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetSslProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -115124,7 +131434,7 @@ func (c *TargetPoolsListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -115133,19 +131443,18 @@ func (c *TargetPoolsListCall) doRequest(alt string) (*http.Response, error) { req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetPools.list" call. -// Exactly one of *TargetPoolList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *TargetPoolList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.targetSslProxies.list" call. +// Exactly one of *TargetSslProxyList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TargetSslProxyList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *TargetPoolsListCall) Do(opts ...googleapi.CallOption) (*TargetPoolList, error) { +func (c *TargetSslProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetSslProxyList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -115164,7 +131473,7 @@ func (c *TargetPoolsListCall) Do(opts ...googleapi.CallOption) (*TargetPoolList, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TargetPoolList{ + ret := &TargetSslProxyList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -115176,12 +131485,11 @@ func (c *TargetPoolsListCall) Do(opts ...googleapi.CallOption) (*TargetPoolList, } return ret, nil // { - // "description": "Retrieves a list of target pools available to the specified project and region.", + // "description": "Retrieves the list of TargetSslProxy resources available to the specified project.", // "httpMethod": "GET", - // "id": "compute.targetPools.list", + // "id": "compute.targetSslProxies.list", // "parameterOrder": [ - // "project", - // "region" + // "project" // ], // "parameters": { // "filter": { @@ -115213,18 +131521,11 @@ func (c *TargetPoolsListCall) Do(opts ...googleapi.CallOption) (*TargetPoolList, // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/targetPools", + // "path": "{project}/global/targetSslProxies", // "response": { - // "$ref": "TargetPoolList" + // "$ref": "TargetSslProxyList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -115238,7 +131539,7 @@ func (c *TargetPoolsListCall) Do(opts ...googleapi.CallOption) (*TargetPoolList, // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *TargetPoolsListCall) Pages(ctx context.Context, f func(*TargetPoolList) error) error { +func (c *TargetSslProxiesListCall) Pages(ctx context.Context, f func(*TargetSslProxyList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -115256,27 +131557,24 @@ func (c *TargetPoolsListCall) Pages(ctx context.Context, f func(*TargetPoolList) } } -// method id "compute.targetPools.removeHealthCheck": +// method id "compute.targetSslProxies.setBackendService": -type TargetPoolsRemoveHealthCheckCall struct { - s *Service - project string - region string - targetPool string - targetpoolsremovehealthcheckrequest *TargetPoolsRemoveHealthCheckRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type TargetSslProxiesSetBackendServiceCall struct { + s *Service + project string + targetSslProxy string + targetsslproxiessetbackendservicerequest *TargetSslProxiesSetBackendServiceRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// RemoveHealthCheck: Removes health check URL from a target pool. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/removeHealthCheck -func (r *TargetPoolsService) RemoveHealthCheck(project string, region string, targetPool string, targetpoolsremovehealthcheckrequest *TargetPoolsRemoveHealthCheckRequest) *TargetPoolsRemoveHealthCheckCall { - c := &TargetPoolsRemoveHealthCheckCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetBackendService: Changes the BackendService for TargetSslProxy. +func (r *TargetSslProxiesService) SetBackendService(project string, targetSslProxy string, targetsslproxiessetbackendservicerequest *TargetSslProxiesSetBackendServiceRequest) *TargetSslProxiesSetBackendServiceCall { + c := &TargetSslProxiesSetBackendServiceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.targetPool = targetPool - c.targetpoolsremovehealthcheckrequest = targetpoolsremovehealthcheckrequest + c.targetSslProxy = targetSslProxy + c.targetsslproxiessetbackendservicerequest = targetsslproxiessetbackendservicerequest return c } @@ -115294,7 +131592,7 @@ func (r *TargetPoolsService) RemoveHealthCheck(project string, region string, ta // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *TargetPoolsRemoveHealthCheckCall) RequestId(requestId string) *TargetPoolsRemoveHealthCheckCall { +func (c *TargetSslProxiesSetBackendServiceCall) RequestId(requestId string) *TargetSslProxiesSetBackendServiceCall { c.urlParams_.Set("requestId", requestId) return c } @@ -115302,7 +131600,7 @@ func (c *TargetPoolsRemoveHealthCheckCall) RequestId(requestId string) *TargetPo // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetPoolsRemoveHealthCheckCall) Fields(s ...googleapi.Field) *TargetPoolsRemoveHealthCheckCall { +func (c *TargetSslProxiesSetBackendServiceCall) Fields(s ...googleapi.Field) *TargetSslProxiesSetBackendServiceCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -115310,35 +131608,35 @@ func (c *TargetPoolsRemoveHealthCheckCall) Fields(s ...googleapi.Field) *TargetP // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetPoolsRemoveHealthCheckCall) Context(ctx context.Context) *TargetPoolsRemoveHealthCheckCall { +func (c *TargetSslProxiesSetBackendServiceCall) Context(ctx context.Context) *TargetSslProxiesSetBackendServiceCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *TargetPoolsRemoveHealthCheckCall) Header() http.Header { +func (c *TargetSslProxiesSetBackendServiceCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *TargetPoolsRemoveHealthCheckCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetSslProxiesSetBackendServiceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetpoolsremovehealthcheckrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetsslproxiessetbackendservicerequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/removeHealthCheck") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies/{targetSslProxy}/setBackendService") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -115346,21 +131644,20 @@ func (c *TargetPoolsRemoveHealthCheckCall) doRequest(alt string) (*http.Response } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "targetPool": c.targetPool, + "project": c.project, + "targetSslProxy": c.targetSslProxy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetPools.removeHealthCheck" call. +// Do executes the "compute.targetSslProxies.setBackendService" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *TargetPoolsRemoveHealthCheckCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *TargetSslProxiesSetBackendServiceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -115391,13 +131688,12 @@ func (c *TargetPoolsRemoveHealthCheckCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Removes health check URL from a target pool.", + // "description": "Changes the BackendService for TargetSslProxy.", // "httpMethod": "POST", - // "id": "compute.targetPools.removeHealthCheck", + // "id": "compute.targetSslProxies.setBackendService", // "parameterOrder": [ // "project", - // "region", - // "targetPool" + // "targetSslProxy" // ], // "parameters": { // "project": { @@ -115407,29 +131703,22 @@ func (c *TargetPoolsRemoveHealthCheckCall) Do(opts ...googleapi.CallOption) (*Op // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // }, - // "targetPool": { - // "description": "Name of the target pool to remove health checks from.", + // "targetSslProxy": { + // "description": "Name of the TargetSslProxy resource whose BackendService resource is to be set.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/targetPools/{targetPool}/removeHealthCheck", + // "path": "{project}/global/targetSslProxies/{targetSslProxy}/setBackendService", // "request": { - // "$ref": "TargetPoolsRemoveHealthCheckRequest" + // "$ref": "TargetSslProxiesSetBackendServiceRequest" // }, // "response": { // "$ref": "Operation" @@ -115442,27 +131731,24 @@ func (c *TargetPoolsRemoveHealthCheckCall) Do(opts ...googleapi.CallOption) (*Op } -// method id "compute.targetPools.removeInstance": +// method id "compute.targetSslProxies.setProxyHeader": -type TargetPoolsRemoveInstanceCall struct { - s *Service - project string - region string - targetPool string - targetpoolsremoveinstancerequest *TargetPoolsRemoveInstanceRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type TargetSslProxiesSetProxyHeaderCall struct { + s *Service + project string + targetSslProxy string + targetsslproxiessetproxyheaderrequest *TargetSslProxiesSetProxyHeaderRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// RemoveInstance: Removes instance URL from a target pool. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/removeInstance -func (r *TargetPoolsService) RemoveInstance(project string, region string, targetPool string, targetpoolsremoveinstancerequest *TargetPoolsRemoveInstanceRequest) *TargetPoolsRemoveInstanceCall { - c := &TargetPoolsRemoveInstanceCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetProxyHeader: Changes the ProxyHeaderType for TargetSslProxy. +func (r *TargetSslProxiesService) SetProxyHeader(project string, targetSslProxy string, targetsslproxiessetproxyheaderrequest *TargetSslProxiesSetProxyHeaderRequest) *TargetSslProxiesSetProxyHeaderCall { + c := &TargetSslProxiesSetProxyHeaderCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.targetPool = targetPool - c.targetpoolsremoveinstancerequest = targetpoolsremoveinstancerequest + c.targetSslProxy = targetSslProxy + c.targetsslproxiessetproxyheaderrequest = targetsslproxiessetproxyheaderrequest return c } @@ -115480,7 +131766,7 @@ func (r *TargetPoolsService) RemoveInstance(project string, region string, targe // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *TargetPoolsRemoveInstanceCall) RequestId(requestId string) *TargetPoolsRemoveInstanceCall { +func (c *TargetSslProxiesSetProxyHeaderCall) RequestId(requestId string) *TargetSslProxiesSetProxyHeaderCall { c.urlParams_.Set("requestId", requestId) return c } @@ -115488,7 +131774,7 @@ func (c *TargetPoolsRemoveInstanceCall) RequestId(requestId string) *TargetPools // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetPoolsRemoveInstanceCall) Fields(s ...googleapi.Field) *TargetPoolsRemoveInstanceCall { +func (c *TargetSslProxiesSetProxyHeaderCall) Fields(s ...googleapi.Field) *TargetSslProxiesSetProxyHeaderCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -115496,35 +131782,35 @@ func (c *TargetPoolsRemoveInstanceCall) Fields(s ...googleapi.Field) *TargetPool // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetPoolsRemoveInstanceCall) Context(ctx context.Context) *TargetPoolsRemoveInstanceCall { +func (c *TargetSslProxiesSetProxyHeaderCall) Context(ctx context.Context) *TargetSslProxiesSetProxyHeaderCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *TargetPoolsRemoveInstanceCall) Header() http.Header { +func (c *TargetSslProxiesSetProxyHeaderCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *TargetPoolsRemoveInstanceCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetSslProxiesSetProxyHeaderCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetpoolsremoveinstancerequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetsslproxiessetproxyheaderrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/removeInstance") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies/{targetSslProxy}/setProxyHeader") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -115532,21 +131818,20 @@ func (c *TargetPoolsRemoveInstanceCall) doRequest(alt string) (*http.Response, e } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "targetPool": c.targetPool, + "project": c.project, + "targetSslProxy": c.targetSslProxy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetPools.removeInstance" call. +// Do executes the "compute.targetSslProxies.setProxyHeader" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *TargetPoolsRemoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *TargetSslProxiesSetProxyHeaderCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -115577,13 +131862,12 @@ func (c *TargetPoolsRemoveInstanceCall) Do(opts ...googleapi.CallOption) (*Opera } return ret, nil // { - // "description": "Removes instance URL from a target pool.", + // "description": "Changes the ProxyHeaderType for TargetSslProxy.", // "httpMethod": "POST", - // "id": "compute.targetPools.removeInstance", + // "id": "compute.targetSslProxies.setProxyHeader", // "parameterOrder": [ // "project", - // "region", - // "targetPool" + // "targetSslProxy" // ], // "parameters": { // "project": { @@ -115593,29 +131877,22 @@ func (c *TargetPoolsRemoveInstanceCall) Do(opts ...googleapi.CallOption) (*Opera // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // }, - // "targetPool": { - // "description": "Name of the TargetPool resource to remove instances from.", + // "targetSslProxy": { + // "description": "Name of the TargetSslProxy resource whose ProxyHeader is to be set.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/targetPools/{targetPool}/removeInstance", + // "path": "{project}/global/targetSslProxies/{targetSslProxy}/setProxyHeader", // "request": { - // "$ref": "TargetPoolsRemoveInstanceRequest" + // "$ref": "TargetSslProxiesSetProxyHeaderRequest" // }, // "response": { // "$ref": "Operation" @@ -115628,34 +131905,24 @@ func (c *TargetPoolsRemoveInstanceCall) Do(opts ...googleapi.CallOption) (*Opera } -// method id "compute.targetPools.setBackup": +// method id "compute.targetSslProxies.setSslCertificates": -type TargetPoolsSetBackupCall struct { - s *Service - project string - region string - targetPool string - targetreference *TargetReference - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type TargetSslProxiesSetSslCertificatesCall struct { + s *Service + project string + targetSslProxy string + targetsslproxiessetsslcertificatesrequest *TargetSslProxiesSetSslCertificatesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetBackup: Changes a backup target pool's configurations. -// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/setBackup -func (r *TargetPoolsService) SetBackup(project string, region string, targetPool string, targetreference *TargetReference) *TargetPoolsSetBackupCall { - c := &TargetPoolsSetBackupCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetSslCertificates: Changes SslCertificates for TargetSslProxy. +func (r *TargetSslProxiesService) SetSslCertificates(project string, targetSslProxy string, targetsslproxiessetsslcertificatesrequest *TargetSslProxiesSetSslCertificatesRequest) *TargetSslProxiesSetSslCertificatesCall { + c := &TargetSslProxiesSetSslCertificatesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.targetPool = targetPool - c.targetreference = targetreference - return c -} - -// FailoverRatio sets the optional parameter "failoverRatio": New -// failoverRatio value for the target pool. -func (c *TargetPoolsSetBackupCall) FailoverRatio(failoverRatio float64) *TargetPoolsSetBackupCall { - c.urlParams_.Set("failoverRatio", fmt.Sprint(failoverRatio)) + c.targetSslProxy = targetSslProxy + c.targetsslproxiessetsslcertificatesrequest = targetsslproxiessetsslcertificatesrequest return c } @@ -115673,7 +131940,7 @@ func (c *TargetPoolsSetBackupCall) FailoverRatio(failoverRatio float64) *TargetP // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *TargetPoolsSetBackupCall) RequestId(requestId string) *TargetPoolsSetBackupCall { +func (c *TargetSslProxiesSetSslCertificatesCall) RequestId(requestId string) *TargetSslProxiesSetSslCertificatesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -115681,7 +131948,7 @@ func (c *TargetPoolsSetBackupCall) RequestId(requestId string) *TargetPoolsSetBa // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetPoolsSetBackupCall) Fields(s ...googleapi.Field) *TargetPoolsSetBackupCall { +func (c *TargetSslProxiesSetSslCertificatesCall) Fields(s ...googleapi.Field) *TargetSslProxiesSetSslCertificatesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -115689,35 +131956,35 @@ func (c *TargetPoolsSetBackupCall) Fields(s ...googleapi.Field) *TargetPoolsSetB // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetPoolsSetBackupCall) Context(ctx context.Context) *TargetPoolsSetBackupCall { +func (c *TargetSslProxiesSetSslCertificatesCall) Context(ctx context.Context) *TargetSslProxiesSetSslCertificatesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *TargetPoolsSetBackupCall) Header() http.Header { +func (c *TargetSslProxiesSetSslCertificatesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *TargetPoolsSetBackupCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetSslProxiesSetSslCertificatesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetreference) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetsslproxiessetsslcertificatesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/setBackup") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies/{targetSslProxy}/setSslCertificates") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -115725,21 +131992,20 @@ func (c *TargetPoolsSetBackupCall) doRequest(alt string) (*http.Response, error) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "targetPool": c.targetPool, + "project": c.project, + "targetSslProxy": c.targetSslProxy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetPools.setBackup" call. +// Do executes the "compute.targetSslProxies.setSslCertificates" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *TargetPoolsSetBackupCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *TargetSslProxiesSetSslCertificatesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -115770,21 +132036,14 @@ func (c *TargetPoolsSetBackupCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Changes a backup target pool's configurations.", + // "description": "Changes SslCertificates for TargetSslProxy.", // "httpMethod": "POST", - // "id": "compute.targetPools.setBackup", + // "id": "compute.targetSslProxies.setSslCertificates", // "parameterOrder": [ // "project", - // "region", - // "targetPool" + // "targetSslProxy" // ], // "parameters": { - // "failoverRatio": { - // "description": "New failoverRatio value for the target pool.", - // "format": "float", - // "location": "query", - // "type": "number" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -115792,29 +132051,22 @@ func (c *TargetPoolsSetBackupCall) Do(opts ...googleapi.CallOption) (*Operation, // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // }, - // "targetPool": { - // "description": "Name of the TargetPool resource to set a backup pool for.", + // "targetSslProxy": { + // "description": "Name of the TargetSslProxy resource whose SslCertificate resource is to be set.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/targetPools/{targetPool}/setBackup", + // "path": "{project}/global/targetSslProxies/{targetSslProxy}/setSslCertificates", // "request": { - // "$ref": "TargetReference" + // "$ref": "TargetSslProxiesSetSslCertificatesRequest" // }, // "response": { // "$ref": "Operation" @@ -115827,185 +132079,27 @@ func (c *TargetPoolsSetBackupCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.targetPools.testIamPermissions": - -type TargetPoolsTestIamPermissionsCall struct { - s *Service - project string - region string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *TargetPoolsService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *TargetPoolsTestIamPermissionsCall { - c := &TargetPoolsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetPoolsTestIamPermissionsCall) Fields(s ...googleapi.Field) *TargetPoolsTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetPoolsTestIamPermissionsCall) Context(ctx context.Context) *TargetPoolsTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetPoolsTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetPoolsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{resource}/testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetPools.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *TargetPoolsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.targetPools.testIamPermissions", - // "parameterOrder": [ - // "project", - // "region", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "The name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/targetPools/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, - // "response": { - // "$ref": "TestPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.targetSslProxies.delete": +// method id "compute.targetSslProxies.setSslPolicy": -type TargetSslProxiesDeleteCall struct { - s *Service - project string - targetSslProxy string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type TargetSslProxiesSetSslPolicyCall struct { + s *Service + project string + targetSslProxy string + sslpolicyreference *SslPolicyReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified TargetSslProxy resource. -func (r *TargetSslProxiesService) Delete(project string, targetSslProxy string) *TargetSslProxiesDeleteCall { - c := &TargetSslProxiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetSslPolicy: Sets the SSL policy for TargetSslProxy. The SSL policy +// specifies the server-side support for SSL features. This affects +// connections between clients and the SSL proxy load balancer. They do +// not affect the connection between the load balancer and the backends. +func (r *TargetSslProxiesService) SetSslPolicy(project string, targetSslProxy string, sslpolicyreference *SslPolicyReference) *TargetSslProxiesSetSslPolicyCall { + c := &TargetSslProxiesSetSslPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.targetSslProxy = targetSslProxy + c.sslpolicyreference = sslpolicyreference return c } @@ -116023,7 +132117,7 @@ func (r *TargetSslProxiesService) Delete(project string, targetSslProxy string) // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *TargetSslProxiesDeleteCall) RequestId(requestId string) *TargetSslProxiesDeleteCall { +func (c *TargetSslProxiesSetSslPolicyCall) RequestId(requestId string) *TargetSslProxiesSetSslPolicyCall { c.urlParams_.Set("requestId", requestId) return c } @@ -116031,7 +132125,7 @@ func (c *TargetSslProxiesDeleteCall) RequestId(requestId string) *TargetSslProxi // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetSslProxiesDeleteCall) Fields(s ...googleapi.Field) *TargetSslProxiesDeleteCall { +func (c *TargetSslProxiesSetSslPolicyCall) Fields(s ...googleapi.Field) *TargetSslProxiesSetSslPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -116039,32 +132133,37 @@ func (c *TargetSslProxiesDeleteCall) Fields(s ...googleapi.Field) *TargetSslProx // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetSslProxiesDeleteCall) Context(ctx context.Context) *TargetSslProxiesDeleteCall { +func (c *TargetSslProxiesSetSslPolicyCall) Context(ctx context.Context) *TargetSslProxiesSetSslPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *TargetSslProxiesDeleteCall) Header() http.Header { +func (c *TargetSslProxiesSetSslPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *TargetSslProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetSslProxiesSetSslPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.sslpolicyreference) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies/{targetSslProxy}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies/{targetSslProxy}/setSslPolicy") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -116076,14 +132175,14 @@ func (c *TargetSslProxiesDeleteCall) doRequest(alt string) (*http.Response, erro return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetSslProxies.delete" call. +// Do executes the "compute.targetSslProxies.setSslPolicy" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *TargetSslProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *TargetSslProxiesSetSslPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -116114,9 +132213,9 @@ func (c *TargetSslProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Deletes the specified TargetSslProxy resource.", - // "httpMethod": "DELETE", - // "id": "compute.targetSslProxies.delete", + // "description": "Sets the SSL policy for TargetSslProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the SSL proxy load balancer. They do not affect the connection between the load balancer and the backends.", + // "httpMethod": "POST", + // "id": "compute.targetSslProxies.setSslPolicy", // "parameterOrder": [ // "project", // "targetSslProxy" @@ -116135,14 +132234,16 @@ func (c *TargetSslProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio // "type": "string" // }, // "targetSslProxy": { - // "description": "Name of the TargetSslProxy resource to delete.", + // "description": "Name of the TargetSslProxy resource whose SSL policy is to be set. The name must be 1-63 characters long, and comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/targetSslProxies/{targetSslProxy}", + // "path": "{project}/global/targetSslProxies/{targetSslProxy}/setSslPolicy", + // "request": { + // "$ref": "SslPolicyReference" + // }, // "response": { // "$ref": "Operation" // }, @@ -116154,96 +132255,89 @@ func (c *TargetSslProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio } -// method id "compute.targetSslProxies.get": +// method id "compute.targetSslProxies.testIamPermissions": -type TargetSslProxiesGetCall struct { - s *Service - project string - targetSslProxy string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type TargetSslProxiesTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified TargetSslProxy resource. Gets a list of -// available target SSL proxies by making a list() request. -func (r *TargetSslProxiesService) Get(project string, targetSslProxy string) *TargetSslProxiesGetCall { - c := &TargetSslProxiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *TargetSslProxiesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *TargetSslProxiesTestIamPermissionsCall { + c := &TargetSslProxiesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.targetSslProxy = targetSslProxy + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetSslProxiesGetCall) Fields(s ...googleapi.Field) *TargetSslProxiesGetCall { +func (c *TargetSslProxiesTestIamPermissionsCall) Fields(s ...googleapi.Field) *TargetSslProxiesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *TargetSslProxiesGetCall) IfNoneMatch(entityTag string) *TargetSslProxiesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetSslProxiesGetCall) Context(ctx context.Context) *TargetSslProxiesGetCall { +func (c *TargetSslProxiesTestIamPermissionsCall) Context(ctx context.Context) *TargetSslProxiesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *TargetSslProxiesGetCall) Header() http.Header { +func (c *TargetSslProxiesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *TargetSslProxiesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetSslProxiesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies/{targetSslProxy}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "targetSslProxy": c.targetSslProxy, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetSslProxies.get" call. -// Exactly one of *TargetSslProxy or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *TargetSslProxy.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.targetSslProxies.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *TargetSslProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetSslProxy, error) { +func (c *TargetSslProxiesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -116262,7 +132356,7 @@ func (c *TargetSslProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetSslPr if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TargetSslProxy{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -116274,12 +132368,12 @@ func (c *TargetSslProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetSslPr } return ret, nil // { - // "description": "Returns the specified TargetSslProxy resource. Gets a list of available target SSL proxies by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.targetSslProxies.get", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.targetSslProxies.testIamPermissions", // "parameterOrder": [ // "project", - // "targetSslProxy" + // "resource" // ], // "parameters": { // "project": { @@ -116289,17 +132383,20 @@ func (c *TargetSslProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetSslPr // "required": true, // "type": "string" // }, - // "targetSslProxy": { - // "description": "Name of the TargetSslProxy resource to return.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/targetSslProxies/{targetSslProxy}", + // "path": "{project}/global/targetSslProxies/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "TargetSslProxy" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -116310,23 +132407,22 @@ func (c *TargetSslProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetSslPr } -// method id "compute.targetSslProxies.insert": +// method id "compute.targetTcpProxies.delete": -type TargetSslProxiesInsertCall struct { +type TargetTcpProxiesDeleteCall struct { s *Service project string - targetsslproxy *TargetSslProxy + targetTcpProxy string urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Insert: Creates a TargetSslProxy resource in the specified project -// using the data included in the request. -func (r *TargetSslProxiesService) Insert(project string, targetsslproxy *TargetSslProxy) *TargetSslProxiesInsertCall { - c := &TargetSslProxiesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified TargetTcpProxy resource. +func (r *TargetTcpProxiesService) Delete(project string, targetTcpProxy string) *TargetTcpProxiesDeleteCall { + c := &TargetTcpProxiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.targetsslproxy = targetsslproxy + c.targetTcpProxy = targetTcpProxy return c } @@ -116344,7 +132440,7 @@ func (r *TargetSslProxiesService) Insert(project string, targetsslproxy *TargetS // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *TargetSslProxiesInsertCall) RequestId(requestId string) *TargetSslProxiesInsertCall { +func (c *TargetTcpProxiesDeleteCall) RequestId(requestId string) *TargetTcpProxiesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -116352,7 +132448,7 @@ func (c *TargetSslProxiesInsertCall) RequestId(requestId string) *TargetSslProxi // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetSslProxiesInsertCall) Fields(s ...googleapi.Field) *TargetSslProxiesInsertCall { +func (c *TargetTcpProxiesDeleteCall) Fields(s ...googleapi.Field) *TargetTcpProxiesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -116360,55 +132456,51 @@ func (c *TargetSslProxiesInsertCall) Fields(s ...googleapi.Field) *TargetSslProx // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetSslProxiesInsertCall) Context(ctx context.Context) *TargetSslProxiesInsertCall { +func (c *TargetTcpProxiesDeleteCall) Context(ctx context.Context) *TargetTcpProxiesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *TargetSslProxiesInsertCall) Header() http.Header { +func (c *TargetTcpProxiesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *TargetSslProxiesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetTcpProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetsslproxy) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetTcpProxies/{targetTcpProxy}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "targetTcpProxy": c.targetTcpProxy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetSslProxies.insert" call. +// Do executes the "compute.targetTcpProxies.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *TargetSslProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *TargetTcpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -116439,11 +132531,12 @@ func (c *TargetSslProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Creates a TargetSslProxy resource in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.targetSslProxies.insert", + // "description": "Deletes the specified TargetTcpProxy resource.", + // "httpMethod": "DELETE", + // "id": "compute.targetTcpProxies.delete", // "parameterOrder": [ - // "project" + // "project", + // "targetTcpProxy" // ], // "parameters": { // "project": { @@ -116457,12 +132550,16 @@ func (c *TargetSslProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operatio // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "targetTcpProxy": { + // "description": "Name of the TargetTcpProxy resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/targetSslProxies", - // "request": { - // "$ref": "TargetSslProxy" - // }, + // "path": "{project}/global/targetTcpProxies/{targetTcpProxy}", // "response": { // "$ref": "Operation" // }, @@ -116474,92 +132571,31 @@ func (c *TargetSslProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operatio } -// method id "compute.targetSslProxies.list": +// method id "compute.targetTcpProxies.get": -type TargetSslProxiesListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type TargetTcpProxiesGetCall struct { + s *Service + project string + targetTcpProxy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// List: Retrieves the list of TargetSslProxy resources available to the -// specified project. -func (r *TargetSslProxiesService) List(project string) *TargetSslProxiesListCall { - c := &TargetSslProxiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified TargetTcpProxy resource. Gets a list of +// available target TCP proxies by making a list() request. +func (r *TargetTcpProxiesService) Get(project string, targetTcpProxy string) *TargetTcpProxiesGetCall { + c := &TargetTcpProxiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *TargetSslProxiesListCall) Filter(filter string) *TargetSslProxiesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *TargetSslProxiesListCall) MaxResults(maxResults int64) *TargetSslProxiesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *TargetSslProxiesListCall) OrderBy(orderBy string) *TargetSslProxiesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *TargetSslProxiesListCall) PageToken(pageToken string) *TargetSslProxiesListCall { - c.urlParams_.Set("pageToken", pageToken) + c.targetTcpProxy = targetTcpProxy return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetSslProxiesListCall) Fields(s ...googleapi.Field) *TargetSslProxiesListCall { +func (c *TargetTcpProxiesGetCall) Fields(s ...googleapi.Field) *TargetTcpProxiesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -116569,7 +132605,7 @@ func (c *TargetSslProxiesListCall) Fields(s ...googleapi.Field) *TargetSslProxie // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *TargetSslProxiesListCall) IfNoneMatch(entityTag string) *TargetSslProxiesListCall { +func (c *TargetTcpProxiesGetCall) IfNoneMatch(entityTag string) *TargetTcpProxiesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -116577,21 +132613,21 @@ func (c *TargetSslProxiesListCall) IfNoneMatch(entityTag string) *TargetSslProxi // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetSslProxiesListCall) Context(ctx context.Context) *TargetSslProxiesListCall { +func (c *TargetTcpProxiesGetCall) Context(ctx context.Context) *TargetTcpProxiesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *TargetSslProxiesListCall) Header() http.Header { +func (c *TargetTcpProxiesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *TargetSslProxiesListCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetTcpProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -116603,7 +132639,7 @@ func (c *TargetSslProxiesListCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetTcpProxies/{targetTcpProxy}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -116611,19 +132647,20 @@ func (c *TargetSslProxiesListCall) doRequest(alt string) (*http.Response, error) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "targetTcpProxy": c.targetTcpProxy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetSslProxies.list" call. -// Exactly one of *TargetSslProxyList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TargetSslProxyList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.targetTcpProxies.get" call. +// Exactly one of *TargetTcpProxy or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *TargetTcpProxy.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *TargetSslProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetSslProxyList, error) { +func (c *TargetTcpProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetTcpProxy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -116642,7 +132679,7 @@ func (c *TargetSslProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetSslP if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TargetSslProxyList{ + ret := &TargetTcpProxy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -116654,47 +132691,32 @@ func (c *TargetSslProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetSslP } return ret, nil // { - // "description": "Retrieves the list of TargetSslProxy resources available to the specified project.", + // "description": "Returns the specified TargetTcpProxy resource. Gets a list of available target TCP proxies by making a list() request.", // "httpMethod": "GET", - // "id": "compute.targetSslProxies.list", + // "id": "compute.targetTcpProxies.get", // "parameterOrder": [ - // "project" + // "project", + // "targetTcpProxy" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "targetTcpProxy": { + // "description": "Name of the TargetTcpProxy resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/targetSslProxies", + // "path": "{project}/global/targetTcpProxies/{targetTcpProxy}", // "response": { - // "$ref": "TargetSslProxyList" + // "$ref": "TargetTcpProxy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -116705,45 +132727,23 @@ func (c *TargetSslProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetSslP } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *TargetSslProxiesListCall) Pages(ctx context.Context, f func(*TargetSslProxyList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.targetSslProxies.setBackendService": +// method id "compute.targetTcpProxies.insert": -type TargetSslProxiesSetBackendServiceCall struct { - s *Service - project string - targetSslProxy string - targetsslproxiessetbackendservicerequest *TargetSslProxiesSetBackendServiceRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type TargetTcpProxiesInsertCall struct { + s *Service + project string + targettcpproxy *TargetTcpProxy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetBackendService: Changes the BackendService for TargetSslProxy. -func (r *TargetSslProxiesService) SetBackendService(project string, targetSslProxy string, targetsslproxiessetbackendservicerequest *TargetSslProxiesSetBackendServiceRequest) *TargetSslProxiesSetBackendServiceCall { - c := &TargetSslProxiesSetBackendServiceCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a TargetTcpProxy resource in the specified project +// using the data included in the request. +func (r *TargetTcpProxiesService) Insert(project string, targettcpproxy *TargetTcpProxy) *TargetTcpProxiesInsertCall { + c := &TargetTcpProxiesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.targetSslProxy = targetSslProxy - c.targetsslproxiessetbackendservicerequest = targetsslproxiessetbackendservicerequest + c.targettcpproxy = targettcpproxy return c } @@ -116761,7 +132761,7 @@ func (r *TargetSslProxiesService) SetBackendService(project string, targetSslPro // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *TargetSslProxiesSetBackendServiceCall) RequestId(requestId string) *TargetSslProxiesSetBackendServiceCall { +func (c *TargetTcpProxiesInsertCall) RequestId(requestId string) *TargetTcpProxiesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -116769,7 +132769,7 @@ func (c *TargetSslProxiesSetBackendServiceCall) RequestId(requestId string) *Tar // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetSslProxiesSetBackendServiceCall) Fields(s ...googleapi.Field) *TargetSslProxiesSetBackendServiceCall { +func (c *TargetTcpProxiesInsertCall) Fields(s ...googleapi.Field) *TargetTcpProxiesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -116777,35 +132777,35 @@ func (c *TargetSslProxiesSetBackendServiceCall) Fields(s ...googleapi.Field) *Ta // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetSslProxiesSetBackendServiceCall) Context(ctx context.Context) *TargetSslProxiesSetBackendServiceCall { +func (c *TargetTcpProxiesInsertCall) Context(ctx context.Context) *TargetTcpProxiesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *TargetSslProxiesSetBackendServiceCall) Header() http.Header { +func (c *TargetTcpProxiesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *TargetSslProxiesSetBackendServiceCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetTcpProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetsslproxiessetbackendservicerequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targettcpproxy) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies/{targetSslProxy}/setBackendService") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetTcpProxies") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -116813,20 +132813,19 @@ func (c *TargetSslProxiesSetBackendServiceCall) doRequest(alt string) (*http.Res } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "targetSslProxy": c.targetSslProxy, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetSslProxies.setBackendService" call. +// Do executes the "compute.targetTcpProxies.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *TargetSslProxiesSetBackendServiceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *TargetTcpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -116857,12 +132856,11 @@ func (c *TargetSslProxiesSetBackendServiceCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Changes the BackendService for TargetSslProxy.", + // "description": "Creates a TargetTcpProxy resource in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.targetSslProxies.setBackendService", + // "id": "compute.targetTcpProxies.insert", // "parameterOrder": [ - // "project", - // "targetSslProxy" + // "project" // ], // "parameters": { // "project": { @@ -116876,18 +132874,11 @@ func (c *TargetSslProxiesSetBackendServiceCall) Do(opts ...googleapi.CallOption) // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "targetSslProxy": { - // "description": "Name of the TargetSslProxy resource whose BackendService resource is to be set.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/global/targetSslProxies/{targetSslProxy}/setBackendService", + // "path": "{project}/global/targetTcpProxies", // "request": { - // "$ref": "TargetSslProxiesSetBackendServiceRequest" + // "$ref": "TargetTcpProxy" // }, // "response": { // "$ref": "Operation" @@ -116900,107 +132891,156 @@ func (c *TargetSslProxiesSetBackendServiceCall) Do(opts ...googleapi.CallOption) } -// method id "compute.targetSslProxies.setProxyHeader": +// method id "compute.targetTcpProxies.list": -type TargetSslProxiesSetProxyHeaderCall struct { - s *Service - project string - targetSslProxy string - targetsslproxiessetproxyheaderrequest *TargetSslProxiesSetProxyHeaderRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type TargetTcpProxiesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetProxyHeader: Changes the ProxyHeaderType for TargetSslProxy. -func (r *TargetSslProxiesService) SetProxyHeader(project string, targetSslProxy string, targetsslproxiessetproxyheaderrequest *TargetSslProxiesSetProxyHeaderRequest) *TargetSslProxiesSetProxyHeaderCall { - c := &TargetSslProxiesSetProxyHeaderCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of TargetTcpProxy resources available to the +// specified project. +func (r *TargetTcpProxiesService) List(project string) *TargetTcpProxiesListCall { + c := &TargetTcpProxiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.targetSslProxy = targetSslProxy - c.targetsslproxiessetproxyheaderrequest = targetsslproxiessetproxyheaderrequest return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *TargetSslProxiesSetProxyHeaderCall) RequestId(requestId string) *TargetSslProxiesSetProxyHeaderCall { - c.urlParams_.Set("requestId", requestId) +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *TargetTcpProxiesListCall) Filter(filter string) *TargetTcpProxiesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *TargetTcpProxiesListCall) MaxResults(maxResults int64) *TargetTcpProxiesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *TargetTcpProxiesListCall) OrderBy(orderBy string) *TargetTcpProxiesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *TargetTcpProxiesListCall) PageToken(pageToken string) *TargetTcpProxiesListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetSslProxiesSetProxyHeaderCall) Fields(s ...googleapi.Field) *TargetSslProxiesSetProxyHeaderCall { +func (c *TargetTcpProxiesListCall) Fields(s ...googleapi.Field) *TargetTcpProxiesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TargetTcpProxiesListCall) IfNoneMatch(entityTag string) *TargetTcpProxiesListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetSslProxiesSetProxyHeaderCall) Context(ctx context.Context) *TargetSslProxiesSetProxyHeaderCall { +func (c *TargetTcpProxiesListCall) Context(ctx context.Context) *TargetTcpProxiesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *TargetSslProxiesSetProxyHeaderCall) Header() http.Header { +func (c *TargetTcpProxiesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *TargetSslProxiesSetProxyHeaderCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetTcpProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetsslproxiessetproxyheaderrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies/{targetSslProxy}/setProxyHeader") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetTcpProxies") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "targetSslProxy": c.targetSslProxy, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetSslProxies.setProxyHeader" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *TargetSslProxiesSetProxyHeaderCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.targetTcpProxies.list" call. +// Exactly one of *TargetTcpProxyList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TargetTcpProxyList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *TargetTcpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetTcpProxyList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -117019,7 +133059,7 @@ func (c *TargetSslProxiesSetProxyHeaderCall) Do(opts ...googleapi.CallOption) (* if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TargetTcpProxyList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -117031,67 +133071,96 @@ func (c *TargetSslProxiesSetProxyHeaderCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Changes the ProxyHeaderType for TargetSslProxy.", - // "httpMethod": "POST", - // "id": "compute.targetSslProxies.setProxyHeader", + // "description": "Retrieves the list of TargetTcpProxy resources available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.targetTcpProxies.list", // "parameterOrder": [ - // "project", - // "targetSslProxy" + // "project" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", // "location": "query", // "type": "string" // }, - // "targetSslProxy": { - // "description": "Name of the TargetSslProxy resource whose ProxyHeader is to be set.", + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/targetSslProxies/{targetSslProxy}/setProxyHeader", - // "request": { - // "$ref": "TargetSslProxiesSetProxyHeaderRequest" - // }, + // "path": "{project}/global/targetTcpProxies", // "response": { - // "$ref": "Operation" + // "$ref": "TargetTcpProxyList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.targetSslProxies.setSslCertificates": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *TargetTcpProxiesListCall) Pages(ctx context.Context, f func(*TargetTcpProxyList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type TargetSslProxiesSetSslCertificatesCall struct { - s *Service - project string - targetSslProxy string - targetsslproxiessetsslcertificatesrequest *TargetSslProxiesSetSslCertificatesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.targetTcpProxies.setBackendService": + +type TargetTcpProxiesSetBackendServiceCall struct { + s *Service + project string + targetTcpProxy string + targettcpproxiessetbackendservicerequest *TargetTcpProxiesSetBackendServiceRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetSslCertificates: Changes SslCertificates for TargetSslProxy. -func (r *TargetSslProxiesService) SetSslCertificates(project string, targetSslProxy string, targetsslproxiessetsslcertificatesrequest *TargetSslProxiesSetSslCertificatesRequest) *TargetSslProxiesSetSslCertificatesCall { - c := &TargetSslProxiesSetSslCertificatesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetBackendService: Changes the BackendService for TargetTcpProxy. +func (r *TargetTcpProxiesService) SetBackendService(project string, targetTcpProxy string, targettcpproxiessetbackendservicerequest *TargetTcpProxiesSetBackendServiceRequest) *TargetTcpProxiesSetBackendServiceCall { + c := &TargetTcpProxiesSetBackendServiceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.targetSslProxy = targetSslProxy - c.targetsslproxiessetsslcertificatesrequest = targetsslproxiessetsslcertificatesrequest + c.targetTcpProxy = targetTcpProxy + c.targettcpproxiessetbackendservicerequest = targettcpproxiessetbackendservicerequest return c } @@ -117109,7 +133178,7 @@ func (r *TargetSslProxiesService) SetSslCertificates(project string, targetSslPr // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *TargetSslProxiesSetSslCertificatesCall) RequestId(requestId string) *TargetSslProxiesSetSslCertificatesCall { +func (c *TargetTcpProxiesSetBackendServiceCall) RequestId(requestId string) *TargetTcpProxiesSetBackendServiceCall { c.urlParams_.Set("requestId", requestId) return c } @@ -117117,7 +133186,7 @@ func (c *TargetSslProxiesSetSslCertificatesCall) RequestId(requestId string) *Ta // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetSslProxiesSetSslCertificatesCall) Fields(s ...googleapi.Field) *TargetSslProxiesSetSslCertificatesCall { +func (c *TargetTcpProxiesSetBackendServiceCall) Fields(s ...googleapi.Field) *TargetTcpProxiesSetBackendServiceCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -117125,35 +133194,35 @@ func (c *TargetSslProxiesSetSslCertificatesCall) Fields(s ...googleapi.Field) *T // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetSslProxiesSetSslCertificatesCall) Context(ctx context.Context) *TargetSslProxiesSetSslCertificatesCall { +func (c *TargetTcpProxiesSetBackendServiceCall) Context(ctx context.Context) *TargetTcpProxiesSetBackendServiceCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *TargetSslProxiesSetSslCertificatesCall) Header() http.Header { +func (c *TargetTcpProxiesSetBackendServiceCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *TargetSslProxiesSetSslCertificatesCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetTcpProxiesSetBackendServiceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetsslproxiessetsslcertificatesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targettcpproxiessetbackendservicerequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies/{targetSslProxy}/setSslCertificates") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetTcpProxies/{targetTcpProxy}/setBackendService") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -117162,19 +133231,19 @@ func (c *TargetSslProxiesSetSslCertificatesCall) doRequest(alt string) (*http.Re req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "targetSslProxy": c.targetSslProxy, + "targetTcpProxy": c.targetTcpProxy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetSslProxies.setSslCertificates" call. +// Do executes the "compute.targetTcpProxies.setBackendService" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *TargetSslProxiesSetSslCertificatesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *TargetTcpProxiesSetBackendServiceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -117205,12 +133274,12 @@ func (c *TargetSslProxiesSetSslCertificatesCall) Do(opts ...googleapi.CallOption } return ret, nil // { - // "description": "Changes SslCertificates for TargetSslProxy.", + // "description": "Changes the BackendService for TargetTcpProxy.", // "httpMethod": "POST", - // "id": "compute.targetSslProxies.setSslCertificates", + // "id": "compute.targetTcpProxies.setBackendService", // "parameterOrder": [ // "project", - // "targetSslProxy" + // "targetTcpProxy" // ], // "parameters": { // "project": { @@ -117225,17 +133294,17 @@ func (c *TargetSslProxiesSetSslCertificatesCall) Do(opts ...googleapi.CallOption // "location": "query", // "type": "string" // }, - // "targetSslProxy": { - // "description": "Name of the TargetSslProxy resource whose SslCertificate resource is to be set.", + // "targetTcpProxy": { + // "description": "Name of the TargetTcpProxy resource whose BackendService resource is to be set.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/targetSslProxies/{targetSslProxy}/setSslCertificates", + // "path": "{project}/global/targetTcpProxies/{targetTcpProxy}/setBackendService", // "request": { - // "$ref": "TargetSslProxiesSetSslCertificatesRequest" + // "$ref": "TargetTcpProxiesSetBackendServiceRequest" // }, // "response": { // "$ref": "Operation" @@ -117248,27 +133317,24 @@ func (c *TargetSslProxiesSetSslCertificatesCall) Do(opts ...googleapi.CallOption } -// method id "compute.targetSslProxies.setSslPolicy": +// method id "compute.targetTcpProxies.setProxyHeader": -type TargetSslProxiesSetSslPolicyCall struct { - s *Service - project string - targetSslProxy string - sslpolicyreference *SslPolicyReference - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type TargetTcpProxiesSetProxyHeaderCall struct { + s *Service + project string + targetTcpProxy string + targettcpproxiessetproxyheaderrequest *TargetTcpProxiesSetProxyHeaderRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetSslPolicy: Sets the SSL policy for TargetSslProxy. The SSL policy -// specifies the server-side support for SSL features. This affects -// connections between clients and the SSL proxy load balancer. They do -// not affect the connection between the load balancer and the backends. -func (r *TargetSslProxiesService) SetSslPolicy(project string, targetSslProxy string, sslpolicyreference *SslPolicyReference) *TargetSslProxiesSetSslPolicyCall { - c := &TargetSslProxiesSetSslPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetProxyHeader: Changes the ProxyHeaderType for TargetTcpProxy. +func (r *TargetTcpProxiesService) SetProxyHeader(project string, targetTcpProxy string, targettcpproxiessetproxyheaderrequest *TargetTcpProxiesSetProxyHeaderRequest) *TargetTcpProxiesSetProxyHeaderCall { + c := &TargetTcpProxiesSetProxyHeaderCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.targetSslProxy = targetSslProxy - c.sslpolicyreference = sslpolicyreference + c.targetTcpProxy = targetTcpProxy + c.targettcpproxiessetproxyheaderrequest = targettcpproxiessetproxyheaderrequest return c } @@ -117286,7 +133352,7 @@ func (r *TargetSslProxiesService) SetSslPolicy(project string, targetSslProxy st // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *TargetSslProxiesSetSslPolicyCall) RequestId(requestId string) *TargetSslProxiesSetSslPolicyCall { +func (c *TargetTcpProxiesSetProxyHeaderCall) RequestId(requestId string) *TargetTcpProxiesSetProxyHeaderCall { c.urlParams_.Set("requestId", requestId) return c } @@ -117294,7 +133360,7 @@ func (c *TargetSslProxiesSetSslPolicyCall) RequestId(requestId string) *TargetSs // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetSslProxiesSetSslPolicyCall) Fields(s ...googleapi.Field) *TargetSslProxiesSetSslPolicyCall { +func (c *TargetTcpProxiesSetProxyHeaderCall) Fields(s ...googleapi.Field) *TargetTcpProxiesSetProxyHeaderCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -117302,35 +133368,35 @@ func (c *TargetSslProxiesSetSslPolicyCall) Fields(s ...googleapi.Field) *TargetS // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetSslProxiesSetSslPolicyCall) Context(ctx context.Context) *TargetSslProxiesSetSslPolicyCall { +func (c *TargetTcpProxiesSetProxyHeaderCall) Context(ctx context.Context) *TargetTcpProxiesSetProxyHeaderCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *TargetSslProxiesSetSslPolicyCall) Header() http.Header { +func (c *TargetTcpProxiesSetProxyHeaderCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *TargetSslProxiesSetSslPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetTcpProxiesSetProxyHeaderCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.sslpolicyreference) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targettcpproxiessetproxyheaderrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies/{targetSslProxy}/setSslPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetTcpProxies/{targetTcpProxy}/setProxyHeader") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -117339,19 +133405,19 @@ func (c *TargetSslProxiesSetSslPolicyCall) doRequest(alt string) (*http.Response req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "targetSslProxy": c.targetSslProxy, + "targetTcpProxy": c.targetTcpProxy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetSslProxies.setSslPolicy" call. +// Do executes the "compute.targetTcpProxies.setProxyHeader" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *TargetSslProxiesSetSslPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *TargetTcpProxiesSetProxyHeaderCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -117382,12 +133448,12 @@ func (c *TargetSslProxiesSetSslPolicyCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Sets the SSL policy for TargetSslProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the SSL proxy load balancer. They do not affect the connection between the load balancer and the backends.", + // "description": "Changes the ProxyHeaderType for TargetTcpProxy.", // "httpMethod": "POST", - // "id": "compute.targetSslProxies.setSslPolicy", + // "id": "compute.targetTcpProxies.setProxyHeader", // "parameterOrder": [ // "project", - // "targetSslProxy" + // "targetTcpProxy" // ], // "parameters": { // "project": { @@ -117402,16 +133468,17 @@ func (c *TargetSslProxiesSetSslPolicyCall) Do(opts ...googleapi.CallOption) (*Op // "location": "query", // "type": "string" // }, - // "targetSslProxy": { - // "description": "Name of the TargetSslProxy resource whose SSL policy is to be set. The name must be 1-63 characters long, and comply with RFC1035.", + // "targetTcpProxy": { + // "description": "Name of the TargetTcpProxy resource whose ProxyHeader is to be set.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/targetSslProxies/{targetSslProxy}/setSslPolicy", + // "path": "{project}/global/targetTcpProxies/{targetTcpProxy}/setProxyHeader", // "request": { - // "$ref": "SslPolicyReference" + // "$ref": "TargetTcpProxiesSetProxyHeaderRequest" // }, // "response": { // "$ref": "Operation" @@ -117424,89 +133491,155 @@ func (c *TargetSslProxiesSetSslPolicyCall) Do(opts ...googleapi.CallOption) (*Op } -// method id "compute.targetSslProxies.testIamPermissions": +// method id "compute.targetVpnGateways.aggregatedList": -type TargetSslProxiesTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type TargetVpnGatewaysAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *TargetSslProxiesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *TargetSslProxiesTestIamPermissionsCall { - c := &TargetSslProxiesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of target VPN gateways. +func (r *TargetVpnGatewaysService) AggregatedList(project string) *TargetVpnGatewaysAggregatedListCall { + c := &TargetVpnGatewaysAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *TargetVpnGatewaysAggregatedListCall) Filter(filter string) *TargetVpnGatewaysAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *TargetVpnGatewaysAggregatedListCall) MaxResults(maxResults int64) *TargetVpnGatewaysAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *TargetVpnGatewaysAggregatedListCall) OrderBy(orderBy string) *TargetVpnGatewaysAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *TargetVpnGatewaysAggregatedListCall) PageToken(pageToken string) *TargetVpnGatewaysAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetSslProxiesTestIamPermissionsCall) Fields(s ...googleapi.Field) *TargetSslProxiesTestIamPermissionsCall { +func (c *TargetVpnGatewaysAggregatedListCall) Fields(s ...googleapi.Field) *TargetVpnGatewaysAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TargetVpnGatewaysAggregatedListCall) IfNoneMatch(entityTag string) *TargetVpnGatewaysAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetSslProxiesTestIamPermissionsCall) Context(ctx context.Context) *TargetSslProxiesTestIamPermissionsCall { +func (c *TargetVpnGatewaysAggregatedListCall) Context(ctx context.Context) *TargetVpnGatewaysAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *TargetSslProxiesTestIamPermissionsCall) Header() http.Header { +func (c *TargetVpnGatewaysAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *TargetSslProxiesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetVpnGatewaysAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/targetVpnGateways") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetSslProxies.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.targetVpnGateways.aggregatedList" call. +// Exactly one of *TargetVpnGatewayAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *TargetVpnGatewayAggregatedList.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *TargetSslProxiesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *TargetVpnGatewaysAggregatedListCall) Do(opts ...googleapi.CallOption) (*TargetVpnGatewayAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -117525,7 +133658,7 @@ func (c *TargetSslProxiesTestIamPermissionsCall) Do(opts ...googleapi.CallOption if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &TargetVpnGatewayAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -117537,35 +133670,47 @@ func (c *TargetSslProxiesTestIamPermissionsCall) Do(opts ...googleapi.CallOption } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.targetSslProxies.testIamPermissions", + // "description": "Retrieves an aggregated list of target VPN gateways.", + // "httpMethod": "GET", + // "id": "compute.targetVpnGateways.aggregatedList", // "parameterOrder": [ - // "project", - // "resource" + // "project" // ], // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/global/targetSslProxies/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/aggregated/targetVpnGateways", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "TargetVpnGatewayAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -117576,22 +133721,45 @@ func (c *TargetSslProxiesTestIamPermissionsCall) Do(opts ...googleapi.CallOption } -// method id "compute.targetTcpProxies.delete": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *TargetVpnGatewaysAggregatedListCall) Pages(ctx context.Context, f func(*TargetVpnGatewayAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type TargetTcpProxiesDeleteCall struct { - s *Service - project string - targetTcpProxy string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.targetVpnGateways.delete": + +type TargetVpnGatewaysDeleteCall struct { + s *Service + project string + region string + targetVpnGateway string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified TargetTcpProxy resource. -func (r *TargetTcpProxiesService) Delete(project string, targetTcpProxy string) *TargetTcpProxiesDeleteCall { - c := &TargetTcpProxiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified target VPN gateway. +func (r *TargetVpnGatewaysService) Delete(project string, region string, targetVpnGateway string) *TargetVpnGatewaysDeleteCall { + c := &TargetVpnGatewaysDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.targetTcpProxy = targetTcpProxy + c.region = region + c.targetVpnGateway = targetVpnGateway return c } @@ -117609,7 +133777,7 @@ func (r *TargetTcpProxiesService) Delete(project string, targetTcpProxy string) // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *TargetTcpProxiesDeleteCall) RequestId(requestId string) *TargetTcpProxiesDeleteCall { +func (c *TargetVpnGatewaysDeleteCall) RequestId(requestId string) *TargetVpnGatewaysDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -117617,7 +133785,7 @@ func (c *TargetTcpProxiesDeleteCall) RequestId(requestId string) *TargetTcpProxi // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetTcpProxiesDeleteCall) Fields(s ...googleapi.Field) *TargetTcpProxiesDeleteCall { +func (c *TargetVpnGatewaysDeleteCall) Fields(s ...googleapi.Field) *TargetVpnGatewaysDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -117625,21 +133793,21 @@ func (c *TargetTcpProxiesDeleteCall) Fields(s ...googleapi.Field) *TargetTcpProx // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetTcpProxiesDeleteCall) Context(ctx context.Context) *TargetTcpProxiesDeleteCall { +func (c *TargetVpnGatewaysDeleteCall) Context(ctx context.Context) *TargetVpnGatewaysDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *TargetTcpProxiesDeleteCall) Header() http.Header { +func (c *TargetVpnGatewaysDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *TargetTcpProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetVpnGatewaysDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -117648,7 +133816,7 @@ func (c *TargetTcpProxiesDeleteCall) doRequest(alt string) (*http.Response, erro var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetTcpProxies/{targetTcpProxy}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { @@ -117656,20 +133824,21 @@ func (c *TargetTcpProxiesDeleteCall) doRequest(alt string) (*http.Response, erro } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "targetTcpProxy": c.targetTcpProxy, + "project": c.project, + "region": c.region, + "targetVpnGateway": c.targetVpnGateway, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetTcpProxies.delete" call. +// Do executes the "compute.targetVpnGateways.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *TargetTcpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *TargetVpnGatewaysDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -117700,12 +133869,13 @@ func (c *TargetTcpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Deletes the specified TargetTcpProxy resource.", + // "description": "Deletes the specified target VPN gateway.", // "httpMethod": "DELETE", - // "id": "compute.targetTcpProxies.delete", + // "id": "compute.targetVpnGateways.delete", // "parameterOrder": [ // "project", - // "targetTcpProxy" + // "region", + // "targetVpnGateway" // ], // "parameters": { // "project": { @@ -117715,20 +133885,27 @@ func (c *TargetTcpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio // "required": true, // "type": "string" // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // }, - // "targetTcpProxy": { - // "description": "Name of the TargetTcpProxy resource to delete.", + // "targetVpnGateway": { + // "description": "Name of the target VPN gateway to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/targetTcpProxies/{targetTcpProxy}", + // "path": "{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}", // "response": { // "$ref": "Operation" // }, @@ -117740,31 +133917,33 @@ func (c *TargetTcpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio } -// method id "compute.targetTcpProxies.get": +// method id "compute.targetVpnGateways.get": -type TargetTcpProxiesGetCall struct { - s *Service - project string - targetTcpProxy string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type TargetVpnGatewaysGetCall struct { + s *Service + project string + region string + targetVpnGateway string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified TargetTcpProxy resource. Gets a list of -// available target TCP proxies by making a list() request. -func (r *TargetTcpProxiesService) Get(project string, targetTcpProxy string) *TargetTcpProxiesGetCall { - c := &TargetTcpProxiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified target VPN gateway. Gets a list of +// available target VPN gateways by making a list() request. +func (r *TargetVpnGatewaysService) Get(project string, region string, targetVpnGateway string) *TargetVpnGatewaysGetCall { + c := &TargetVpnGatewaysGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.targetTcpProxy = targetTcpProxy + c.region = region + c.targetVpnGateway = targetVpnGateway return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetTcpProxiesGetCall) Fields(s ...googleapi.Field) *TargetTcpProxiesGetCall { +func (c *TargetVpnGatewaysGetCall) Fields(s ...googleapi.Field) *TargetVpnGatewaysGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -117774,7 +133953,7 @@ func (c *TargetTcpProxiesGetCall) Fields(s ...googleapi.Field) *TargetTcpProxies // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *TargetTcpProxiesGetCall) IfNoneMatch(entityTag string) *TargetTcpProxiesGetCall { +func (c *TargetVpnGatewaysGetCall) IfNoneMatch(entityTag string) *TargetVpnGatewaysGetCall { c.ifNoneMatch_ = entityTag return c } @@ -117782,21 +133961,21 @@ func (c *TargetTcpProxiesGetCall) IfNoneMatch(entityTag string) *TargetTcpProxie // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetTcpProxiesGetCall) Context(ctx context.Context) *TargetTcpProxiesGetCall { +func (c *TargetVpnGatewaysGetCall) Context(ctx context.Context) *TargetVpnGatewaysGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *TargetTcpProxiesGetCall) Header() http.Header { +func (c *TargetVpnGatewaysGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *TargetTcpProxiesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetVpnGatewaysGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -117808,7 +133987,7 @@ func (c *TargetTcpProxiesGetCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetTcpProxies/{targetTcpProxy}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -117816,20 +133995,21 @@ func (c *TargetTcpProxiesGetCall) doRequest(alt string) (*http.Response, error) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "targetTcpProxy": c.targetTcpProxy, + "project": c.project, + "region": c.region, + "targetVpnGateway": c.targetVpnGateway, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetTcpProxies.get" call. -// Exactly one of *TargetTcpProxy or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *TargetTcpProxy.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.targetVpnGateways.get" call. +// Exactly one of *TargetVpnGateway or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TargetVpnGateway.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *TargetTcpProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetTcpProxy, error) { +func (c *TargetVpnGatewaysGetCall) Do(opts ...googleapi.CallOption) (*TargetVpnGateway, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -117848,7 +134028,7 @@ func (c *TargetTcpProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetTcpPr if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TargetTcpProxy{ + ret := &TargetVpnGateway{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -117860,12 +134040,13 @@ func (c *TargetTcpProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetTcpPr } return ret, nil // { - // "description": "Returns the specified TargetTcpProxy resource. Gets a list of available target TCP proxies by making a list() request.", + // "description": "Returns the specified target VPN gateway. Gets a list of available target VPN gateways by making a list() request.", // "httpMethod": "GET", - // "id": "compute.targetTcpProxies.get", + // "id": "compute.targetVpnGateways.get", // "parameterOrder": [ // "project", - // "targetTcpProxy" + // "region", + // "targetVpnGateway" // ], // "parameters": { // "project": { @@ -117875,17 +134056,24 @@ func (c *TargetTcpProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetTcpPr // "required": true, // "type": "string" // }, - // "targetTcpProxy": { - // "description": "Name of the TargetTcpProxy resource to return.", + // "region": { + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "targetVpnGateway": { + // "description": "Name of the target VPN gateway to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/targetTcpProxies/{targetTcpProxy}", + // "path": "{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}", // "response": { - // "$ref": "TargetTcpProxy" + // "$ref": "TargetVpnGateway" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -117896,23 +134084,25 @@ func (c *TargetTcpProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetTcpPr } -// method id "compute.targetTcpProxies.insert": +// method id "compute.targetVpnGateways.insert": -type TargetTcpProxiesInsertCall struct { - s *Service - project string - targettcpproxy *TargetTcpProxy - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type TargetVpnGatewaysInsertCall struct { + s *Service + project string + region string + targetvpngateway *TargetVpnGateway + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a TargetTcpProxy resource in the specified project -// using the data included in the request. -func (r *TargetTcpProxiesService) Insert(project string, targettcpproxy *TargetTcpProxy) *TargetTcpProxiesInsertCall { - c := &TargetTcpProxiesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a target VPN gateway in the specified project and +// region using the data included in the request. +func (r *TargetVpnGatewaysService) Insert(project string, region string, targetvpngateway *TargetVpnGateway) *TargetVpnGatewaysInsertCall { + c := &TargetVpnGatewaysInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.targettcpproxy = targettcpproxy + c.region = region + c.targetvpngateway = targetvpngateway return c } @@ -117930,7 +134120,7 @@ func (r *TargetTcpProxiesService) Insert(project string, targettcpproxy *TargetT // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *TargetTcpProxiesInsertCall) RequestId(requestId string) *TargetTcpProxiesInsertCall { +func (c *TargetVpnGatewaysInsertCall) RequestId(requestId string) *TargetVpnGatewaysInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -117938,7 +134128,7 @@ func (c *TargetTcpProxiesInsertCall) RequestId(requestId string) *TargetTcpProxi // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetTcpProxiesInsertCall) Fields(s ...googleapi.Field) *TargetTcpProxiesInsertCall { +func (c *TargetVpnGatewaysInsertCall) Fields(s ...googleapi.Field) *TargetVpnGatewaysInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -117946,35 +134136,35 @@ func (c *TargetTcpProxiesInsertCall) Fields(s ...googleapi.Field) *TargetTcpProx // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetTcpProxiesInsertCall) Context(ctx context.Context) *TargetTcpProxiesInsertCall { +func (c *TargetVpnGatewaysInsertCall) Context(ctx context.Context) *TargetVpnGatewaysInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *TargetTcpProxiesInsertCall) Header() http.Header { +func (c *TargetVpnGatewaysInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *TargetTcpProxiesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetVpnGatewaysInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targettcpproxy) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetvpngateway) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetTcpProxies") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetVpnGateways") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -117983,18 +134173,19 @@ func (c *TargetTcpProxiesInsertCall) doRequest(alt string) (*http.Response, erro req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetTcpProxies.insert" call. +// Do executes the "compute.targetVpnGateways.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *TargetTcpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *TargetVpnGatewaysInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -118025,11 +134216,12 @@ func (c *TargetTcpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Creates a TargetTcpProxy resource in the specified project using the data included in the request.", + // "description": "Creates a target VPN gateway in the specified project and region using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.targetTcpProxies.insert", + // "id": "compute.targetVpnGateways.insert", // "parameterOrder": [ - // "project" + // "project", + // "region" // ], // "parameters": { // "project": { @@ -118039,15 +134231,22 @@ func (c *TargetTcpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operatio // "required": true, // "type": "string" // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "{project}/global/targetTcpProxies", + // "path": "{project}/regions/{region}/targetVpnGateways", // "request": { - // "$ref": "TargetTcpProxy" + // "$ref": "TargetVpnGateway" // }, // "response": { // "$ref": "Operation" @@ -118060,22 +134259,24 @@ func (c *TargetTcpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operatio } -// method id "compute.targetTcpProxies.list": +// method id "compute.targetVpnGateways.list": -type TargetTcpProxiesListCall struct { +type TargetVpnGatewaysListCall struct { s *Service project string + region string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves the list of TargetTcpProxy resources available to the -// specified project. -func (r *TargetTcpProxiesService) List(project string) *TargetTcpProxiesListCall { - c := &TargetTcpProxiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of target VPN gateways available to the +// specified project and region. +func (r *TargetVpnGatewaysService) List(project string, region string) *TargetVpnGatewaysListCall { + c := &TargetVpnGatewaysListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.region = region return c } @@ -118101,7 +134302,7 @@ func (r *TargetTcpProxiesService) List(project string) *TargetTcpProxiesListCall // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *TargetTcpProxiesListCall) Filter(filter string) *TargetTcpProxiesListCall { +func (c *TargetVpnGatewaysListCall) Filter(filter string) *TargetVpnGatewaysListCall { c.urlParams_.Set("filter", filter) return c } @@ -118112,7 +134313,7 @@ func (c *TargetTcpProxiesListCall) Filter(filter string) *TargetTcpProxiesListCa // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *TargetTcpProxiesListCall) MaxResults(maxResults int64) *TargetTcpProxiesListCall { +func (c *TargetVpnGatewaysListCall) MaxResults(maxResults int64) *TargetVpnGatewaysListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -118129,7 +134330,7 @@ func (c *TargetTcpProxiesListCall) MaxResults(maxResults int64) *TargetTcpProxie // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *TargetTcpProxiesListCall) OrderBy(orderBy string) *TargetTcpProxiesListCall { +func (c *TargetVpnGatewaysListCall) OrderBy(orderBy string) *TargetVpnGatewaysListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -118137,7 +134338,7 @@ func (c *TargetTcpProxiesListCall) OrderBy(orderBy string) *TargetTcpProxiesList // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *TargetTcpProxiesListCall) PageToken(pageToken string) *TargetTcpProxiesListCall { +func (c *TargetVpnGatewaysListCall) PageToken(pageToken string) *TargetVpnGatewaysListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -118145,7 +134346,7 @@ func (c *TargetTcpProxiesListCall) PageToken(pageToken string) *TargetTcpProxies // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetTcpProxiesListCall) Fields(s ...googleapi.Field) *TargetTcpProxiesListCall { +func (c *TargetVpnGatewaysListCall) Fields(s ...googleapi.Field) *TargetVpnGatewaysListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -118155,7 +134356,7 @@ func (c *TargetTcpProxiesListCall) Fields(s ...googleapi.Field) *TargetTcpProxie // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *TargetTcpProxiesListCall) IfNoneMatch(entityTag string) *TargetTcpProxiesListCall { +func (c *TargetVpnGatewaysListCall) IfNoneMatch(entityTag string) *TargetVpnGatewaysListCall { c.ifNoneMatch_ = entityTag return c } @@ -118163,21 +134364,21 @@ func (c *TargetTcpProxiesListCall) IfNoneMatch(entityTag string) *TargetTcpProxi // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetTcpProxiesListCall) Context(ctx context.Context) *TargetTcpProxiesListCall { +func (c *TargetVpnGatewaysListCall) Context(ctx context.Context) *TargetVpnGatewaysListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *TargetTcpProxiesListCall) Header() http.Header { +func (c *TargetVpnGatewaysListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *TargetTcpProxiesListCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetVpnGatewaysListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -118189,7 +134390,7 @@ func (c *TargetTcpProxiesListCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetTcpProxies") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetVpnGateways") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -118198,18 +134399,19 @@ func (c *TargetTcpProxiesListCall) doRequest(alt string) (*http.Response, error) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetTcpProxies.list" call. -// Exactly one of *TargetTcpProxyList or error will be non-nil. Any +// Do executes the "compute.targetVpnGateways.list" call. +// Exactly one of *TargetVpnGatewayList or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *TargetTcpProxyList.ServerResponse.Header or (if a response was +// *TargetVpnGatewayList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *TargetTcpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetTcpProxyList, error) { +func (c *TargetVpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*TargetVpnGatewayList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -118228,7 +134430,7 @@ func (c *TargetTcpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetTcpP if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TargetTcpProxyList{ + ret := &TargetVpnGatewayList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -118240,11 +134442,12 @@ func (c *TargetTcpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetTcpP } return ret, nil // { - // "description": "Retrieves the list of TargetTcpProxy resources available to the specified project.", + // "description": "Retrieves a list of target VPN gateways available to the specified project and region.", // "httpMethod": "GET", - // "id": "compute.targetTcpProxies.list", + // "id": "compute.targetVpnGateways.list", // "parameterOrder": [ - // "project" + // "project", + // "region" // ], // "parameters": { // "filter": { @@ -118276,11 +134479,18 @@ func (c *TargetTcpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetTcpP // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/targetTcpProxies", + // "path": "{project}/regions/{region}/targetVpnGateways", // "response": { - // "$ref": "TargetTcpProxyList" + // "$ref": "TargetVpnGatewayList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -118294,7 +134504,7 @@ func (c *TargetTcpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetTcpP // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *TargetTcpProxiesListCall) Pages(ctx context.Context, f func(*TargetTcpProxyList) error) error { +func (c *TargetVpnGatewaysListCall) Pages(ctx context.Context, f func(*TargetVpnGatewayList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -118312,24 +134522,27 @@ func (c *TargetTcpProxiesListCall) Pages(ctx context.Context, f func(*TargetTcpP } } -// method id "compute.targetTcpProxies.setBackendService": +// method id "compute.targetVpnGateways.setLabels": -type TargetTcpProxiesSetBackendServiceCall struct { - s *Service - project string - targetTcpProxy string - targettcpproxiessetbackendservicerequest *TargetTcpProxiesSetBackendServiceRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type TargetVpnGatewaysSetLabelsCall struct { + s *Service + project string + region string + resource string + regionsetlabelsrequest *RegionSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetBackendService: Changes the BackendService for TargetTcpProxy. -func (r *TargetTcpProxiesService) SetBackendService(project string, targetTcpProxy string, targettcpproxiessetbackendservicerequest *TargetTcpProxiesSetBackendServiceRequest) *TargetTcpProxiesSetBackendServiceCall { - c := &TargetTcpProxiesSetBackendServiceCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetLabels: Sets the labels on a TargetVpnGateway. To learn more about +// labels, read the Labeling Resources documentation. +func (r *TargetVpnGatewaysService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *TargetVpnGatewaysSetLabelsCall { + c := &TargetVpnGatewaysSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.targetTcpProxy = targetTcpProxy - c.targettcpproxiessetbackendservicerequest = targettcpproxiessetbackendservicerequest + c.region = region + c.resource = resource + c.regionsetlabelsrequest = regionsetlabelsrequest return c } @@ -118347,7 +134560,7 @@ func (r *TargetTcpProxiesService) SetBackendService(project string, targetTcpPro // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *TargetTcpProxiesSetBackendServiceCall) RequestId(requestId string) *TargetTcpProxiesSetBackendServiceCall { +func (c *TargetVpnGatewaysSetLabelsCall) RequestId(requestId string) *TargetVpnGatewaysSetLabelsCall { c.urlParams_.Set("requestId", requestId) return c } @@ -118355,7 +134568,7 @@ func (c *TargetTcpProxiesSetBackendServiceCall) RequestId(requestId string) *Tar // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetTcpProxiesSetBackendServiceCall) Fields(s ...googleapi.Field) *TargetTcpProxiesSetBackendServiceCall { +func (c *TargetVpnGatewaysSetLabelsCall) Fields(s ...googleapi.Field) *TargetVpnGatewaysSetLabelsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -118363,35 +134576,35 @@ func (c *TargetTcpProxiesSetBackendServiceCall) Fields(s ...googleapi.Field) *Ta // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetTcpProxiesSetBackendServiceCall) Context(ctx context.Context) *TargetTcpProxiesSetBackendServiceCall { +func (c *TargetVpnGatewaysSetLabelsCall) Context(ctx context.Context) *TargetVpnGatewaysSetLabelsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *TargetTcpProxiesSetBackendServiceCall) Header() http.Header { +func (c *TargetVpnGatewaysSetLabelsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *TargetTcpProxiesSetBackendServiceCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetVpnGatewaysSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targettcpproxiessetbackendservicerequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetTcpProxies/{targetTcpProxy}/setBackendService") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetVpnGateways/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -118399,20 +134612,21 @@ func (c *TargetTcpProxiesSetBackendServiceCall) doRequest(alt string) (*http.Res } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "targetTcpProxy": c.targetTcpProxy, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetTcpProxies.setBackendService" call. +// Do executes the "compute.targetVpnGateways.setLabels" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *TargetTcpProxiesSetBackendServiceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *TargetVpnGatewaysSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -118443,12 +134657,13 @@ func (c *TargetTcpProxiesSetBackendServiceCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Changes the BackendService for TargetTcpProxy.", + // "description": "Sets the labels on a TargetVpnGateway. To learn more about labels, read the Labeling Resources documentation.", // "httpMethod": "POST", - // "id": "compute.targetTcpProxies.setBackendService", + // "id": "compute.targetVpnGateways.setLabels", // "parameterOrder": [ // "project", - // "targetTcpProxy" + // "region", + // "resource" // ], // "parameters": { // "project": { @@ -118458,22 +134673,29 @@ func (c *TargetTcpProxiesSetBackendServiceCall) Do(opts ...googleapi.CallOption) // "required": true, // "type": "string" // }, + // "region": { + // "description": "The region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // }, - // "targetTcpProxy": { - // "description": "Name of the TargetTcpProxy resource whose BackendService resource is to be set.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/targetTcpProxies/{targetTcpProxy}/setBackendService", + // "path": "{project}/regions/{region}/targetVpnGateways/{resource}/setLabels", // "request": { - // "$ref": "TargetTcpProxiesSetBackendServiceRequest" + // "$ref": "RegionSetLabelsRequest" // }, // "response": { // "$ref": "Operation" @@ -118486,50 +134708,34 @@ func (c *TargetTcpProxiesSetBackendServiceCall) Do(opts ...googleapi.CallOption) } -// method id "compute.targetTcpProxies.setProxyHeader": +// method id "compute.targetVpnGateways.testIamPermissions": -type TargetTcpProxiesSetProxyHeaderCall struct { - s *Service - project string - targetTcpProxy string - targettcpproxiessetproxyheaderrequest *TargetTcpProxiesSetProxyHeaderRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type TargetVpnGatewaysTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetProxyHeader: Changes the ProxyHeaderType for TargetTcpProxy. -func (r *TargetTcpProxiesService) SetProxyHeader(project string, targetTcpProxy string, targettcpproxiessetproxyheaderrequest *TargetTcpProxiesSetProxyHeaderRequest) *TargetTcpProxiesSetProxyHeaderCall { - c := &TargetTcpProxiesSetProxyHeaderCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *TargetVpnGatewaysService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *TargetVpnGatewaysTestIamPermissionsCall { + c := &TargetVpnGatewaysTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.targetTcpProxy = targetTcpProxy - c.targettcpproxiessetproxyheaderrequest = targettcpproxiessetproxyheaderrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *TargetTcpProxiesSetProxyHeaderCall) RequestId(requestId string) *TargetTcpProxiesSetProxyHeaderCall { - c.urlParams_.Set("requestId", requestId) + c.region = region + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetTcpProxiesSetProxyHeaderCall) Fields(s ...googleapi.Field) *TargetTcpProxiesSetProxyHeaderCall { +func (c *TargetVpnGatewaysTestIamPermissionsCall) Fields(s ...googleapi.Field) *TargetVpnGatewaysTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -118537,35 +134743,35 @@ func (c *TargetTcpProxiesSetProxyHeaderCall) Fields(s ...googleapi.Field) *Targe // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetTcpProxiesSetProxyHeaderCall) Context(ctx context.Context) *TargetTcpProxiesSetProxyHeaderCall { +func (c *TargetVpnGatewaysTestIamPermissionsCall) Context(ctx context.Context) *TargetVpnGatewaysTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *TargetTcpProxiesSetProxyHeaderCall) Header() http.Header { +func (c *TargetVpnGatewaysTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *TargetTcpProxiesSetProxyHeaderCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetVpnGatewaysTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targettcpproxiessetproxyheaderrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetTcpProxies/{targetTcpProxy}/setProxyHeader") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetVpnGateways/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -118573,20 +134779,21 @@ func (c *TargetTcpProxiesSetProxyHeaderCall) doRequest(alt string) (*http.Respon } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "targetTcpProxy": c.targetTcpProxy, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetTcpProxies.setProxyHeader" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *TargetTcpProxiesSetProxyHeaderCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.targetVpnGateways.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *TargetVpnGatewaysTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -118605,7 +134812,7 @@ func (c *TargetTcpProxiesSetProxyHeaderCall) Do(opts ...googleapi.CallOption) (* if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -118617,12 +134824,13 @@ func (c *TargetTcpProxiesSetProxyHeaderCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Changes the ProxyHeaderType for TargetTcpProxy.", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.targetTcpProxies.setProxyHeader", + // "id": "compute.targetVpnGateways.testIamPermissions", // "parameterOrder": [ // "project", - // "targetTcpProxy" + // "region", + // "resource" // ], // "parameters": { // "project": { @@ -118632,37 +134840,40 @@ func (c *TargetTcpProxiesSetProxyHeaderCall) Do(opts ...googleapi.CallOption) (* // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // }, - // "targetTcpProxy": { - // "description": "Name of the TargetTcpProxy resource whose ProxyHeader is to be set.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/targetTcpProxies/{targetTcpProxy}/setProxyHeader", + // "path": "{project}/regions/{region}/targetVpnGateways/{resource}/testIamPermissions", // "request": { - // "$ref": "TargetTcpProxiesSetProxyHeaderRequest" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.targetVpnGateways.aggregatedList": +// method id "compute.urlMaps.aggregatedList": -type TargetVpnGatewaysAggregatedListCall struct { +type UrlMapsAggregatedListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -118671,9 +134882,10 @@ type TargetVpnGatewaysAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves an aggregated list of target VPN gateways. -func (r *TargetVpnGatewaysService) AggregatedList(project string) *TargetVpnGatewaysAggregatedListCall { - c := &TargetVpnGatewaysAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves the list of all UrlMap resources, regional +// and global, available to the specified project. +func (r *UrlMapsService) AggregatedList(project string) *UrlMapsAggregatedListCall { + c := &UrlMapsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -118700,7 +134912,7 @@ func (r *TargetVpnGatewaysService) AggregatedList(project string) *TargetVpnGate // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *TargetVpnGatewaysAggregatedListCall) Filter(filter string) *TargetVpnGatewaysAggregatedListCall { +func (c *UrlMapsAggregatedListCall) Filter(filter string) *UrlMapsAggregatedListCall { c.urlParams_.Set("filter", filter) return c } @@ -118711,7 +134923,7 @@ func (c *TargetVpnGatewaysAggregatedListCall) Filter(filter string) *TargetVpnGa // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *TargetVpnGatewaysAggregatedListCall) MaxResults(maxResults int64) *TargetVpnGatewaysAggregatedListCall { +func (c *UrlMapsAggregatedListCall) MaxResults(maxResults int64) *UrlMapsAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -118728,7 +134940,7 @@ func (c *TargetVpnGatewaysAggregatedListCall) MaxResults(maxResults int64) *Targ // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *TargetVpnGatewaysAggregatedListCall) OrderBy(orderBy string) *TargetVpnGatewaysAggregatedListCall { +func (c *UrlMapsAggregatedListCall) OrderBy(orderBy string) *UrlMapsAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -118736,7 +134948,7 @@ func (c *TargetVpnGatewaysAggregatedListCall) OrderBy(orderBy string) *TargetVpn // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *TargetVpnGatewaysAggregatedListCall) PageToken(pageToken string) *TargetVpnGatewaysAggregatedListCall { +func (c *UrlMapsAggregatedListCall) PageToken(pageToken string) *UrlMapsAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -118744,7 +134956,7 @@ func (c *TargetVpnGatewaysAggregatedListCall) PageToken(pageToken string) *Targe // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetVpnGatewaysAggregatedListCall) Fields(s ...googleapi.Field) *TargetVpnGatewaysAggregatedListCall { +func (c *UrlMapsAggregatedListCall) Fields(s ...googleapi.Field) *UrlMapsAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -118754,7 +134966,7 @@ func (c *TargetVpnGatewaysAggregatedListCall) Fields(s ...googleapi.Field) *Targ // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *TargetVpnGatewaysAggregatedListCall) IfNoneMatch(entityTag string) *TargetVpnGatewaysAggregatedListCall { +func (c *UrlMapsAggregatedListCall) IfNoneMatch(entityTag string) *UrlMapsAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -118762,21 +134974,21 @@ func (c *TargetVpnGatewaysAggregatedListCall) IfNoneMatch(entityTag string) *Tar // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetVpnGatewaysAggregatedListCall) Context(ctx context.Context) *TargetVpnGatewaysAggregatedListCall { +func (c *UrlMapsAggregatedListCall) Context(ctx context.Context) *UrlMapsAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *TargetVpnGatewaysAggregatedListCall) Header() http.Header { +func (c *UrlMapsAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *TargetVpnGatewaysAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *UrlMapsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -118788,7 +135000,7 @@ func (c *TargetVpnGatewaysAggregatedListCall) doRequest(alt string) (*http.Respo var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/targetVpnGateways") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/urlMaps") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -118801,14 +135013,14 @@ func (c *TargetVpnGatewaysAggregatedListCall) doRequest(alt string) (*http.Respo return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetVpnGateways.aggregatedList" call. -// Exactly one of *TargetVpnGatewayAggregatedList or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *TargetVpnGatewayAggregatedList.ServerResponse.Header or (if a -// response was returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.urlMaps.aggregatedList" call. +// Exactly one of *UrlMapsAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *UrlMapsAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *TargetVpnGatewaysAggregatedListCall) Do(opts ...googleapi.CallOption) (*TargetVpnGatewayAggregatedList, error) { +func (c *UrlMapsAggregatedListCall) Do(opts ...googleapi.CallOption) (*UrlMapsAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -118827,7 +135039,7 @@ func (c *TargetVpnGatewaysAggregatedListCall) Do(opts ...googleapi.CallOption) ( if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TargetVpnGatewayAggregatedList{ + ret := &UrlMapsAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -118839,9 +135051,9 @@ func (c *TargetVpnGatewaysAggregatedListCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Retrieves an aggregated list of target VPN gateways.", + // "description": "Retrieves the list of all UrlMap resources, regional and global, available to the specified project.", // "httpMethod": "GET", - // "id": "compute.targetVpnGateways.aggregatedList", + // "id": "compute.urlMaps.aggregatedList", // "parameterOrder": [ // "project" // ], @@ -118870,16 +135082,16 @@ func (c *TargetVpnGatewaysAggregatedListCall) Do(opts ...googleapi.CallOption) ( // "type": "string" // }, // "project": { - // "description": "Project ID for this request.", + // "description": "Name of the project scoping this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/aggregated/targetVpnGateways", + // "path": "{project}/aggregated/urlMaps", // "response": { - // "$ref": "TargetVpnGatewayAggregatedList" + // "$ref": "UrlMapsAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -118893,7 +135105,7 @@ func (c *TargetVpnGatewaysAggregatedListCall) Do(opts ...googleapi.CallOption) ( // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *TargetVpnGatewaysAggregatedListCall) Pages(ctx context.Context, f func(*TargetVpnGatewayAggregatedList) error) error { +func (c *UrlMapsAggregatedListCall) Pages(ctx context.Context, f func(*UrlMapsAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -118911,24 +135123,23 @@ func (c *TargetVpnGatewaysAggregatedListCall) Pages(ctx context.Context, f func( } } -// method id "compute.targetVpnGateways.delete": +// method id "compute.urlMaps.delete": -type TargetVpnGatewaysDeleteCall struct { - s *Service - project string - region string - targetVpnGateway string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type UrlMapsDeleteCall struct { + s *Service + project string + urlMap string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified target VPN gateway. -func (r *TargetVpnGatewaysService) Delete(project string, region string, targetVpnGateway string) *TargetVpnGatewaysDeleteCall { - c := &TargetVpnGatewaysDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified UrlMap resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/delete +func (r *UrlMapsService) Delete(project string, urlMap string) *UrlMapsDeleteCall { + c := &UrlMapsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.targetVpnGateway = targetVpnGateway + c.urlMap = urlMap return c } @@ -118946,7 +135157,7 @@ func (r *TargetVpnGatewaysService) Delete(project string, region string, targetV // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *TargetVpnGatewaysDeleteCall) RequestId(requestId string) *TargetVpnGatewaysDeleteCall { +func (c *UrlMapsDeleteCall) RequestId(requestId string) *UrlMapsDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -118954,7 +135165,7 @@ func (c *TargetVpnGatewaysDeleteCall) RequestId(requestId string) *TargetVpnGate // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetVpnGatewaysDeleteCall) Fields(s ...googleapi.Field) *TargetVpnGatewaysDeleteCall { +func (c *UrlMapsDeleteCall) Fields(s ...googleapi.Field) *UrlMapsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -118962,21 +135173,21 @@ func (c *TargetVpnGatewaysDeleteCall) Fields(s ...googleapi.Field) *TargetVpnGat // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetVpnGatewaysDeleteCall) Context(ctx context.Context) *TargetVpnGatewaysDeleteCall { +func (c *UrlMapsDeleteCall) Context(ctx context.Context) *UrlMapsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *TargetVpnGatewaysDeleteCall) Header() http.Header { +func (c *UrlMapsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *TargetVpnGatewaysDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *UrlMapsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -118985,7 +135196,7 @@ func (c *TargetVpnGatewaysDeleteCall) doRequest(alt string) (*http.Response, err var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { @@ -118993,21 +135204,20 @@ func (c *TargetVpnGatewaysDeleteCall) doRequest(alt string) (*http.Response, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "targetVpnGateway": c.targetVpnGateway, + "project": c.project, + "urlMap": c.urlMap, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetVpnGateways.delete" call. +// Do executes the "compute.urlMaps.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *TargetVpnGatewaysDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *UrlMapsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -119038,13 +135248,12 @@ func (c *TargetVpnGatewaysDeleteCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Deletes the specified target VPN gateway.", + // "description": "Deletes the specified UrlMap resource.", // "httpMethod": "DELETE", - // "id": "compute.targetVpnGateways.delete", + // "id": "compute.urlMaps.delete", // "parameterOrder": [ // "project", - // "region", - // "targetVpnGateway" + // "urlMap" // ], // "parameters": { // "project": { @@ -119054,27 +135263,20 @@ func (c *TargetVpnGatewaysDeleteCall) Do(opts ...googleapi.CallOption) (*Operati // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // }, - // "targetVpnGateway": { - // "description": "Name of the target VPN gateway to delete.", + // "urlMap": { + // "description": "Name of the UrlMap resource to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}", + // "path": "{project}/global/urlMaps/{urlMap}", // "response": { // "$ref": "Operation" // }, @@ -119086,33 +135288,32 @@ func (c *TargetVpnGatewaysDeleteCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.targetVpnGateways.get": +// method id "compute.urlMaps.get": -type TargetVpnGatewaysGetCall struct { - s *Service - project string - region string - targetVpnGateway string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type UrlMapsGetCall struct { + s *Service + project string + urlMap string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified target VPN gateway. Gets a list of -// available target VPN gateways by making a list() request. -func (r *TargetVpnGatewaysService) Get(project string, region string, targetVpnGateway string) *TargetVpnGatewaysGetCall { - c := &TargetVpnGatewaysGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified UrlMap resource. Gets a list of available +// URL maps by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/get +func (r *UrlMapsService) Get(project string, urlMap string) *UrlMapsGetCall { + c := &UrlMapsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.targetVpnGateway = targetVpnGateway + c.urlMap = urlMap return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetVpnGatewaysGetCall) Fields(s ...googleapi.Field) *TargetVpnGatewaysGetCall { +func (c *UrlMapsGetCall) Fields(s ...googleapi.Field) *UrlMapsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -119122,7 +135323,7 @@ func (c *TargetVpnGatewaysGetCall) Fields(s ...googleapi.Field) *TargetVpnGatewa // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *TargetVpnGatewaysGetCall) IfNoneMatch(entityTag string) *TargetVpnGatewaysGetCall { +func (c *UrlMapsGetCall) IfNoneMatch(entityTag string) *UrlMapsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -119130,21 +135331,21 @@ func (c *TargetVpnGatewaysGetCall) IfNoneMatch(entityTag string) *TargetVpnGatew // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetVpnGatewaysGetCall) Context(ctx context.Context) *TargetVpnGatewaysGetCall { +func (c *UrlMapsGetCall) Context(ctx context.Context) *UrlMapsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *TargetVpnGatewaysGetCall) Header() http.Header { +func (c *UrlMapsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *TargetVpnGatewaysGetCall) doRequest(alt string) (*http.Response, error) { +func (c *UrlMapsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -119156,7 +135357,7 @@ func (c *TargetVpnGatewaysGetCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -119164,21 +135365,20 @@ func (c *TargetVpnGatewaysGetCall) doRequest(alt string) (*http.Response, error) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "targetVpnGateway": c.targetVpnGateway, + "project": c.project, + "urlMap": c.urlMap, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetVpnGateways.get" call. -// Exactly one of *TargetVpnGateway or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TargetVpnGateway.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *TargetVpnGatewaysGetCall) Do(opts ...googleapi.CallOption) (*TargetVpnGateway, error) { +// Do executes the "compute.urlMaps.get" call. +// Exactly one of *UrlMap or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *UrlMap.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *UrlMapsGetCall) Do(opts ...googleapi.CallOption) (*UrlMap, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -119197,7 +135397,7 @@ func (c *TargetVpnGatewaysGetCall) Do(opts ...googleapi.CallOption) (*TargetVpnG if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TargetVpnGateway{ + ret := &UrlMap{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -119209,13 +135409,12 @@ func (c *TargetVpnGatewaysGetCall) Do(opts ...googleapi.CallOption) (*TargetVpnG } return ret, nil // { - // "description": "Returns the specified target VPN gateway. Gets a list of available target VPN gateways by making a list() request.", + // "description": "Returns the specified UrlMap resource. Gets a list of available URL maps by making a list() request.", // "httpMethod": "GET", - // "id": "compute.targetVpnGateways.get", + // "id": "compute.urlMaps.get", // "parameterOrder": [ // "project", - // "region", - // "targetVpnGateway" + // "urlMap" // ], // "parameters": { // "project": { @@ -119225,53 +135424,211 @@ func (c *TargetVpnGatewaysGetCall) Do(opts ...googleapi.CallOption) (*TargetVpnG // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", + // "urlMap": { + // "description": "Name of the UrlMap resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" - // }, - // "targetVpnGateway": { - // "description": "Name of the target VPN gateway to return.", + // } + // }, + // "path": "{project}/global/urlMaps/{urlMap}", + // "response": { + // "$ref": "UrlMap" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.urlMaps.insert": + +type UrlMapsInsertCall struct { + s *Service + project string + urlmap *UrlMap + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a UrlMap resource in the specified project using the +// data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/insert +func (r *UrlMapsService) Insert(project string, urlmap *UrlMap) *UrlMapsInsertCall { + c := &UrlMapsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.urlmap = urlmap + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *UrlMapsInsertCall) RequestId(requestId string) *UrlMapsInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *UrlMapsInsertCall) Fields(s ...googleapi.Field) *UrlMapsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *UrlMapsInsertCall) Context(ctx context.Context) *UrlMapsInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *UrlMapsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *UrlMapsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmap) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.urlMaps.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *UrlMapsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a UrlMap resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.urlMaps.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}", + // "path": "{project}/global/urlMaps", + // "request": { + // "$ref": "UrlMap" + // }, // "response": { - // "$ref": "TargetVpnGateway" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.targetVpnGateways.insert": +// method id "compute.urlMaps.invalidateCache": -type TargetVpnGatewaysInsertCall struct { - s *Service - project string - region string - targetvpngateway *TargetVpnGateway - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type UrlMapsInvalidateCacheCall struct { + s *Service + project string + urlMap string + cacheinvalidationrule *CacheInvalidationRule + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a target VPN gateway in the specified project and -// region using the data included in the request. -func (r *TargetVpnGatewaysService) Insert(project string, region string, targetvpngateway *TargetVpnGateway) *TargetVpnGatewaysInsertCall { - c := &TargetVpnGatewaysInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// InvalidateCache: Initiates a cache invalidation operation, +// invalidating the specified path, scoped to the specified UrlMap. +func (r *UrlMapsService) InvalidateCache(project string, urlMap string, cacheinvalidationrule *CacheInvalidationRule) *UrlMapsInvalidateCacheCall { + c := &UrlMapsInvalidateCacheCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.targetvpngateway = targetvpngateway + c.urlMap = urlMap + c.cacheinvalidationrule = cacheinvalidationrule return c } @@ -119289,7 +135646,7 @@ func (r *TargetVpnGatewaysService) Insert(project string, region string, targetv // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *TargetVpnGatewaysInsertCall) RequestId(requestId string) *TargetVpnGatewaysInsertCall { +func (c *UrlMapsInvalidateCacheCall) RequestId(requestId string) *UrlMapsInvalidateCacheCall { c.urlParams_.Set("requestId", requestId) return c } @@ -119297,7 +135654,7 @@ func (c *TargetVpnGatewaysInsertCall) RequestId(requestId string) *TargetVpnGate // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetVpnGatewaysInsertCall) Fields(s ...googleapi.Field) *TargetVpnGatewaysInsertCall { +func (c *UrlMapsInvalidateCacheCall) Fields(s ...googleapi.Field) *UrlMapsInvalidateCacheCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -119305,35 +135662,35 @@ func (c *TargetVpnGatewaysInsertCall) Fields(s ...googleapi.Field) *TargetVpnGat // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetVpnGatewaysInsertCall) Context(ctx context.Context) *TargetVpnGatewaysInsertCall { +func (c *UrlMapsInvalidateCacheCall) Context(ctx context.Context) *UrlMapsInvalidateCacheCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *TargetVpnGatewaysInsertCall) Header() http.Header { +func (c *UrlMapsInvalidateCacheCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *TargetVpnGatewaysInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *UrlMapsInvalidateCacheCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetvpngateway) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.cacheinvalidationrule) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetVpnGateways") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}/invalidateCache") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -119342,19 +135699,19 @@ func (c *TargetVpnGatewaysInsertCall) doRequest(alt string) (*http.Response, err req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, + "urlMap": c.urlMap, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetVpnGateways.insert" call. +// Do executes the "compute.urlMaps.invalidateCache" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *TargetVpnGatewaysInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *UrlMapsInvalidateCacheCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -119385,12 +135742,12 @@ func (c *TargetVpnGatewaysInsertCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Creates a target VPN gateway in the specified project and region using the data included in the request.", + // "description": "Initiates a cache invalidation operation, invalidating the specified path, scoped to the specified UrlMap.", // "httpMethod": "POST", - // "id": "compute.targetVpnGateways.insert", + // "id": "compute.urlMaps.invalidateCache", // "parameterOrder": [ // "project", - // "region" + // "urlMap" // ], // "parameters": { // "project": { @@ -119400,22 +135757,22 @@ func (c *TargetVpnGatewaysInsertCall) Do(opts ...googleapi.CallOption) (*Operati // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "urlMap": { + // "description": "Name of the UrlMap scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/targetVpnGateways", + // "path": "{project}/global/urlMaps/{urlMap}/invalidateCache", // "request": { - // "$ref": "TargetVpnGateway" + // "$ref": "CacheInvalidationRule" // }, // "response": { // "$ref": "Operation" @@ -119428,24 +135785,23 @@ func (c *TargetVpnGatewaysInsertCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.targetVpnGateways.list": +// method id "compute.urlMaps.list": -type TargetVpnGatewaysListCall struct { +type UrlMapsListCall struct { s *Service project string - region string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves a list of target VPN gateways available to the -// specified project and region. -func (r *TargetVpnGatewaysService) List(project string, region string) *TargetVpnGatewaysListCall { - c := &TargetVpnGatewaysListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of UrlMap resources available to the +// specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/list +func (r *UrlMapsService) List(project string) *UrlMapsListCall { + c := &UrlMapsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region return c } @@ -119471,7 +135827,7 @@ func (r *TargetVpnGatewaysService) List(project string, region string) *TargetVp // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *TargetVpnGatewaysListCall) Filter(filter string) *TargetVpnGatewaysListCall { +func (c *UrlMapsListCall) Filter(filter string) *UrlMapsListCall { c.urlParams_.Set("filter", filter) return c } @@ -119482,7 +135838,7 @@ func (c *TargetVpnGatewaysListCall) Filter(filter string) *TargetVpnGatewaysList // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *TargetVpnGatewaysListCall) MaxResults(maxResults int64) *TargetVpnGatewaysListCall { +func (c *UrlMapsListCall) MaxResults(maxResults int64) *UrlMapsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -119499,7 +135855,7 @@ func (c *TargetVpnGatewaysListCall) MaxResults(maxResults int64) *TargetVpnGatew // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *TargetVpnGatewaysListCall) OrderBy(orderBy string) *TargetVpnGatewaysListCall { +func (c *UrlMapsListCall) OrderBy(orderBy string) *UrlMapsListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -119507,7 +135863,7 @@ func (c *TargetVpnGatewaysListCall) OrderBy(orderBy string) *TargetVpnGatewaysLi // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *TargetVpnGatewaysListCall) PageToken(pageToken string) *TargetVpnGatewaysListCall { +func (c *UrlMapsListCall) PageToken(pageToken string) *UrlMapsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -119515,7 +135871,7 @@ func (c *TargetVpnGatewaysListCall) PageToken(pageToken string) *TargetVpnGatewa // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetVpnGatewaysListCall) Fields(s ...googleapi.Field) *TargetVpnGatewaysListCall { +func (c *UrlMapsListCall) Fields(s ...googleapi.Field) *UrlMapsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -119525,7 +135881,7 @@ func (c *TargetVpnGatewaysListCall) Fields(s ...googleapi.Field) *TargetVpnGatew // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *TargetVpnGatewaysListCall) IfNoneMatch(entityTag string) *TargetVpnGatewaysListCall { +func (c *UrlMapsListCall) IfNoneMatch(entityTag string) *UrlMapsListCall { c.ifNoneMatch_ = entityTag return c } @@ -119533,21 +135889,21 @@ func (c *TargetVpnGatewaysListCall) IfNoneMatch(entityTag string) *TargetVpnGate // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetVpnGatewaysListCall) Context(ctx context.Context) *TargetVpnGatewaysListCall { +func (c *UrlMapsListCall) Context(ctx context.Context) *UrlMapsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *TargetVpnGatewaysListCall) Header() http.Header { +func (c *UrlMapsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *TargetVpnGatewaysListCall) doRequest(alt string) (*http.Response, error) { +func (c *UrlMapsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -119559,7 +135915,7 @@ func (c *TargetVpnGatewaysListCall) doRequest(alt string) (*http.Response, error var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetVpnGateways") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -119568,19 +135924,18 @@ func (c *TargetVpnGatewaysListCall) doRequest(alt string) (*http.Response, error req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetVpnGateways.list" call. -// Exactly one of *TargetVpnGatewayList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TargetVpnGatewayList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *TargetVpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*TargetVpnGatewayList, error) { +// Do executes the "compute.urlMaps.list" call. +// Exactly one of *UrlMapList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *UrlMapList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *UrlMapsListCall) Do(opts ...googleapi.CallOption) (*UrlMapList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -119599,7 +135954,7 @@ func (c *TargetVpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*TargetVpn if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TargetVpnGatewayList{ + ret := &UrlMapList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -119611,12 +135966,11 @@ func (c *TargetVpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*TargetVpn } return ret, nil // { - // "description": "Retrieves a list of target VPN gateways available to the specified project and region.", + // "description": "Retrieves the list of UrlMap resources available to the specified project.", // "httpMethod": "GET", - // "id": "compute.targetVpnGateways.list", + // "id": "compute.urlMaps.list", // "parameterOrder": [ - // "project", - // "region" + // "project" // ], // "parameters": { // "filter": { @@ -119648,18 +136002,11 @@ func (c *TargetVpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*TargetVpn // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/targetVpnGateways", + // "path": "{project}/global/urlMaps", // "response": { - // "$ref": "TargetVpnGatewayList" + // "$ref": "UrlMapList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -119673,7 +136020,7 @@ func (c *TargetVpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*TargetVpn // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *TargetVpnGatewaysListCall) Pages(ctx context.Context, f func(*TargetVpnGatewayList) error) error { +func (c *UrlMapsListCall) Pages(ctx context.Context, f func(*UrlMapList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -119691,372 +136038,27 @@ func (c *TargetVpnGatewaysListCall) Pages(ctx context.Context, f func(*TargetVpn } } -// method id "compute.targetVpnGateways.setLabels": - -type TargetVpnGatewaysSetLabelsCall struct { - s *Service - project string - region string - resource string - regionsetlabelsrequest *RegionSetLabelsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetLabels: Sets the labels on a TargetVpnGateway. To learn more about -// labels, read the Labeling Resources documentation. -func (r *TargetVpnGatewaysService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *TargetVpnGatewaysSetLabelsCall { - c := &TargetVpnGatewaysSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.resource = resource - c.regionsetlabelsrequest = regionsetlabelsrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *TargetVpnGatewaysSetLabelsCall) RequestId(requestId string) *TargetVpnGatewaysSetLabelsCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetVpnGatewaysSetLabelsCall) Fields(s ...googleapi.Field) *TargetVpnGatewaysSetLabelsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetVpnGatewaysSetLabelsCall) Context(ctx context.Context) *TargetVpnGatewaysSetLabelsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetVpnGatewaysSetLabelsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetVpnGatewaysSetLabelsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetVpnGateways/{resource}/setLabels") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetVpnGateways.setLabels" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *TargetVpnGatewaysSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Sets the labels on a TargetVpnGateway. To learn more about labels, read the Labeling Resources documentation.", - // "httpMethod": "POST", - // "id": "compute.targetVpnGateways.setLabels", - // "parameterOrder": [ - // "project", - // "region", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "The region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/targetVpnGateways/{resource}/setLabels", - // "request": { - // "$ref": "RegionSetLabelsRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.targetVpnGateways.testIamPermissions": - -type TargetVpnGatewaysTestIamPermissionsCall struct { - s *Service - project string - region string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *TargetVpnGatewaysService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *TargetVpnGatewaysTestIamPermissionsCall { - c := &TargetVpnGatewaysTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetVpnGatewaysTestIamPermissionsCall) Fields(s ...googleapi.Field) *TargetVpnGatewaysTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetVpnGatewaysTestIamPermissionsCall) Context(ctx context.Context) *TargetVpnGatewaysTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetVpnGatewaysTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetVpnGatewaysTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetVpnGateways/{resource}/testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetVpnGateways.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *TargetVpnGatewaysTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.targetVpnGateways.testIamPermissions", - // "parameterOrder": [ - // "project", - // "region", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "The name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/targetVpnGateways/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, - // "response": { - // "$ref": "TestPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.urlMaps.delete": +// method id "compute.urlMaps.patch": -type UrlMapsDeleteCall struct { +type UrlMapsPatchCall struct { s *Service project string urlMap string + urlmap *UrlMap urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Delete: Deletes the specified UrlMap resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/delete -func (r *UrlMapsService) Delete(project string, urlMap string) *UrlMapsDeleteCall { - c := &UrlMapsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Patches the specified UrlMap resource with the data included +// in the request. This method supports PATCH semantics and uses the +// JSON merge patch format and processing rules. +// For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/patch +func (r *UrlMapsService) Patch(project string, urlMap string, urlmap *UrlMap) *UrlMapsPatchCall { + c := &UrlMapsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.urlMap = urlMap + c.urlmap = urlmap return c } @@ -120074,7 +136076,7 @@ func (r *UrlMapsService) Delete(project string, urlMap string) *UrlMapsDeleteCal // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *UrlMapsDeleteCall) RequestId(requestId string) *UrlMapsDeleteCall { +func (c *UrlMapsPatchCall) RequestId(requestId string) *UrlMapsPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -120082,7 +136084,7 @@ func (c *UrlMapsDeleteCall) RequestId(requestId string) *UrlMapsDeleteCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *UrlMapsDeleteCall) Fields(s ...googleapi.Field) *UrlMapsDeleteCall { +func (c *UrlMapsPatchCall) Fields(s ...googleapi.Field) *UrlMapsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -120090,32 +136092,37 @@ func (c *UrlMapsDeleteCall) Fields(s ...googleapi.Field) *UrlMapsDeleteCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *UrlMapsDeleteCall) Context(ctx context.Context) *UrlMapsDeleteCall { +func (c *UrlMapsPatchCall) Context(ctx context.Context) *UrlMapsPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *UrlMapsDeleteCall) Header() http.Header { +func (c *UrlMapsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *UrlMapsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *UrlMapsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmap) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } @@ -120127,14 +136134,14 @@ func (c *UrlMapsDeleteCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.urlMaps.delete" call. +// Do executes the "compute.urlMaps.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *UrlMapsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *UrlMapsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -120165,9 +136172,9 @@ func (c *UrlMapsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Deletes the specified UrlMap resource.", - // "httpMethod": "DELETE", - // "id": "compute.urlMaps.delete", + // "description": "Patches the specified UrlMap resource with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.urlMaps.patch", // "parameterOrder": [ // "project", // "urlMap" @@ -120186,14 +136193,17 @@ func (c *UrlMapsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "type": "string" // }, // "urlMap": { - // "description": "Name of the UrlMap resource to delete.", + // "description": "Name of the UrlMap resource to patch.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, // "path": "{project}/global/urlMaps/{urlMap}", + // "request": { + // "$ref": "UrlMap" + // }, // "response": { // "$ref": "Operation" // }, @@ -120205,97 +136215,89 @@ func (c *UrlMapsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) } -// method id "compute.urlMaps.get": +// method id "compute.urlMaps.testIamPermissions": -type UrlMapsGetCall struct { - s *Service - project string - urlMap string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type UrlMapsTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified UrlMap resource. Gets a list of available -// URL maps by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/get -func (r *UrlMapsService) Get(project string, urlMap string) *UrlMapsGetCall { - c := &UrlMapsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *UrlMapsService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *UrlMapsTestIamPermissionsCall { + c := &UrlMapsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.urlMap = urlMap + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *UrlMapsGetCall) Fields(s ...googleapi.Field) *UrlMapsGetCall { +func (c *UrlMapsTestIamPermissionsCall) Fields(s ...googleapi.Field) *UrlMapsTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *UrlMapsGetCall) IfNoneMatch(entityTag string) *UrlMapsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *UrlMapsGetCall) Context(ctx context.Context) *UrlMapsGetCall { +func (c *UrlMapsTestIamPermissionsCall) Context(ctx context.Context) *UrlMapsTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *UrlMapsGetCall) Header() http.Header { +func (c *UrlMapsTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *UrlMapsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *UrlMapsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "urlMap": c.urlMap, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.urlMaps.get" call. -// Exactly one of *UrlMap or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *UrlMap.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *UrlMapsGetCall) Do(opts ...googleapi.CallOption) (*UrlMap, error) { +// Do executes the "compute.urlMaps.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *UrlMapsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -120314,7 +136316,7 @@ func (c *UrlMapsGetCall) Do(opts ...googleapi.CallOption) (*UrlMap, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &UrlMap{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -120326,12 +136328,12 @@ func (c *UrlMapsGetCall) Do(opts ...googleapi.CallOption) (*UrlMap, error) { } return ret, nil // { - // "description": "Returns the specified UrlMap resource. Gets a list of available URL maps by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.urlMaps.get", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.urlMaps.testIamPermissions", // "parameterOrder": [ // "project", - // "urlMap" + // "resource" // ], // "parameters": { // "project": { @@ -120341,17 +136343,20 @@ func (c *UrlMapsGetCall) Do(opts ...googleapi.CallOption) (*UrlMap, error) { // "required": true, // "type": "string" // }, - // "urlMap": { - // "description": "Name of the UrlMap resource to return.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/urlMaps/{urlMap}", + // "path": "{project}/global/urlMaps/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "UrlMap" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -120362,23 +136367,25 @@ func (c *UrlMapsGetCall) Do(opts ...googleapi.CallOption) (*UrlMap, error) { } -// method id "compute.urlMaps.insert": +// method id "compute.urlMaps.update": -type UrlMapsInsertCall struct { +type UrlMapsUpdateCall struct { s *Service project string + urlMap string urlmap *UrlMap urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Insert: Creates a UrlMap resource in the specified project using the -// data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/insert -func (r *UrlMapsService) Insert(project string, urlmap *UrlMap) *UrlMapsInsertCall { - c := &UrlMapsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates the specified UrlMap resource with the data included +// in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/update +func (r *UrlMapsService) Update(project string, urlMap string, urlmap *UrlMap) *UrlMapsUpdateCall { + c := &UrlMapsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.urlMap = urlMap c.urlmap = urlmap return c } @@ -120397,7 +136404,7 @@ func (r *UrlMapsService) Insert(project string, urlmap *UrlMap) *UrlMapsInsertCa // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *UrlMapsInsertCall) RequestId(requestId string) *UrlMapsInsertCall { +func (c *UrlMapsUpdateCall) RequestId(requestId string) *UrlMapsUpdateCall { c.urlParams_.Set("requestId", requestId) return c } @@ -120405,7 +136412,7 @@ func (c *UrlMapsInsertCall) RequestId(requestId string) *UrlMapsInsertCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *UrlMapsInsertCall) Fields(s ...googleapi.Field) *UrlMapsInsertCall { +func (c *UrlMapsUpdateCall) Fields(s ...googleapi.Field) *UrlMapsUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -120413,21 +136420,21 @@ func (c *UrlMapsInsertCall) Fields(s ...googleapi.Field) *UrlMapsInsertCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *UrlMapsInsertCall) Context(ctx context.Context) *UrlMapsInsertCall { +func (c *UrlMapsUpdateCall) Context(ctx context.Context) *UrlMapsUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *UrlMapsInsertCall) Header() http.Header { +func (c *UrlMapsUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *UrlMapsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *UrlMapsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -120441,27 +136448,28 @@ func (c *UrlMapsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "urlMap": c.urlMap, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.urlMaps.insert" call. +// Do executes the "compute.urlMaps.update" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *UrlMapsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *UrlMapsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -120492,11 +136500,12 @@ func (c *UrlMapsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Creates a UrlMap resource in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.urlMaps.insert", + // "description": "Updates the specified UrlMap resource with the data included in the request.", + // "httpMethod": "PUT", + // "id": "compute.urlMaps.update", // "parameterOrder": [ - // "project" + // "project", + // "urlMap" // ], // "parameters": { // "project": { @@ -120510,9 +136519,16 @@ func (c *UrlMapsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "urlMap": { + // "description": "Name of the UrlMap resource to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/urlMaps", + // "path": "{project}/global/urlMaps/{urlMap}", // "request": { // "$ref": "UrlMap" // }, @@ -120527,51 +136543,34 @@ func (c *UrlMapsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) } -// method id "compute.urlMaps.invalidateCache": +// method id "compute.urlMaps.validate": -type UrlMapsInvalidateCacheCall struct { - s *Service - project string - urlMap string - cacheinvalidationrule *CacheInvalidationRule - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type UrlMapsValidateCall struct { + s *Service + project string + urlMap string + urlmapsvalidaterequest *UrlMapsValidateRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// InvalidateCache: Initiates a cache invalidation operation, -// invalidating the specified path, scoped to the specified UrlMap. -func (r *UrlMapsService) InvalidateCache(project string, urlMap string, cacheinvalidationrule *CacheInvalidationRule) *UrlMapsInvalidateCacheCall { - c := &UrlMapsInvalidateCacheCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Validate: Runs static validation for the UrlMap. In particular, the +// tests of the provided UrlMap will be run. Calling this method does +// NOT create the UrlMap. +// For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/validate +func (r *UrlMapsService) Validate(project string, urlMap string, urlmapsvalidaterequest *UrlMapsValidateRequest) *UrlMapsValidateCall { + c := &UrlMapsValidateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.urlMap = urlMap - c.cacheinvalidationrule = cacheinvalidationrule - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *UrlMapsInvalidateCacheCall) RequestId(requestId string) *UrlMapsInvalidateCacheCall { - c.urlParams_.Set("requestId", requestId) + c.urlmapsvalidaterequest = urlmapsvalidaterequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *UrlMapsInvalidateCacheCall) Fields(s ...googleapi.Field) *UrlMapsInvalidateCacheCall { +func (c *UrlMapsValidateCall) Fields(s ...googleapi.Field) *UrlMapsValidateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -120579,35 +136578,35 @@ func (c *UrlMapsInvalidateCacheCall) Fields(s ...googleapi.Field) *UrlMapsInvali // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *UrlMapsInvalidateCacheCall) Context(ctx context.Context) *UrlMapsInvalidateCacheCall { +func (c *UrlMapsValidateCall) Context(ctx context.Context) *UrlMapsValidateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *UrlMapsInvalidateCacheCall) Header() http.Header { +func (c *UrlMapsValidateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *UrlMapsInvalidateCacheCall) doRequest(alt string) (*http.Response, error) { +func (c *UrlMapsValidateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.cacheinvalidationrule) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmapsvalidaterequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}/invalidateCache") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}/validate") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -120621,14 +136620,14 @@ func (c *UrlMapsInvalidateCacheCall) doRequest(alt string) (*http.Response, erro return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.urlMaps.invalidateCache" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *UrlMapsInvalidateCacheCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.urlMaps.validate" call. +// Exactly one of *UrlMapsValidateResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *UrlMapsValidateResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *UrlMapsValidateCall) Do(opts ...googleapi.CallOption) (*UrlMapsValidateResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -120647,7 +136646,7 @@ func (c *UrlMapsInvalidateCacheCall) Do(opts ...googleapi.CallOption) (*Operatio if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &UrlMapsValidateResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -120659,9 +136658,9 @@ func (c *UrlMapsInvalidateCacheCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Initiates a cache invalidation operation, invalidating the specified path, scoped to the specified UrlMap.", + // "description": "Runs static validation for the UrlMap. In particular, the tests of the provided UrlMap will be run. Calling this method does NOT create the UrlMap.", // "httpMethod": "POST", - // "id": "compute.urlMaps.invalidateCache", + // "id": "compute.urlMaps.validate", // "parameterOrder": [ // "project", // "urlMap" @@ -120674,25 +136673,20 @@ func (c *UrlMapsInvalidateCacheCall) Do(opts ...googleapi.CallOption) (*Operatio // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, // "urlMap": { - // "description": "Name of the UrlMap scoping this request.", + // "description": "Name of the UrlMap resource to be validated as.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/urlMaps/{urlMap}/invalidateCache", + // "path": "{project}/global/urlMaps/{urlMap}/validate", // "request": { - // "$ref": "CacheInvalidationRule" + // "$ref": "UrlMapsValidateRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "UrlMapsValidateResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -120702,9 +136696,9 @@ func (c *UrlMapsInvalidateCacheCall) Do(opts ...googleapi.CallOption) (*Operatio } -// method id "compute.urlMaps.list": +// method id "compute.vpnGateways.aggregatedList": -type UrlMapsListCall struct { +type VpnGatewaysAggregatedListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -120713,11 +136707,9 @@ type UrlMapsListCall struct { header_ http.Header } -// List: Retrieves the list of UrlMap resources available to the -// specified project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/list -func (r *UrlMapsService) List(project string) *UrlMapsListCall { - c := &UrlMapsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of VPN gateways. +func (r *VpnGatewaysService) AggregatedList(project string) *VpnGatewaysAggregatedListCall { + c := &VpnGatewaysAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -120744,7 +136736,7 @@ func (r *UrlMapsService) List(project string) *UrlMapsListCall { // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *UrlMapsListCall) Filter(filter string) *UrlMapsListCall { +func (c *VpnGatewaysAggregatedListCall) Filter(filter string) *VpnGatewaysAggregatedListCall { c.urlParams_.Set("filter", filter) return c } @@ -120755,7 +136747,7 @@ func (c *UrlMapsListCall) Filter(filter string) *UrlMapsListCall { // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *UrlMapsListCall) MaxResults(maxResults int64) *UrlMapsListCall { +func (c *VpnGatewaysAggregatedListCall) MaxResults(maxResults int64) *VpnGatewaysAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -120772,7 +136764,7 @@ func (c *UrlMapsListCall) MaxResults(maxResults int64) *UrlMapsListCall { // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *UrlMapsListCall) OrderBy(orderBy string) *UrlMapsListCall { +func (c *VpnGatewaysAggregatedListCall) OrderBy(orderBy string) *VpnGatewaysAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -120780,7 +136772,7 @@ func (c *UrlMapsListCall) OrderBy(orderBy string) *UrlMapsListCall { // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *UrlMapsListCall) PageToken(pageToken string) *UrlMapsListCall { +func (c *VpnGatewaysAggregatedListCall) PageToken(pageToken string) *VpnGatewaysAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -120788,7 +136780,7 @@ func (c *UrlMapsListCall) PageToken(pageToken string) *UrlMapsListCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *UrlMapsListCall) Fields(s ...googleapi.Field) *UrlMapsListCall { +func (c *VpnGatewaysAggregatedListCall) Fields(s ...googleapi.Field) *VpnGatewaysAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -120798,7 +136790,7 @@ func (c *UrlMapsListCall) Fields(s ...googleapi.Field) *UrlMapsListCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *UrlMapsListCall) IfNoneMatch(entityTag string) *UrlMapsListCall { +func (c *VpnGatewaysAggregatedListCall) IfNoneMatch(entityTag string) *VpnGatewaysAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -120806,21 +136798,21 @@ func (c *UrlMapsListCall) IfNoneMatch(entityTag string) *UrlMapsListCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *UrlMapsListCall) Context(ctx context.Context) *UrlMapsListCall { +func (c *VpnGatewaysAggregatedListCall) Context(ctx context.Context) *VpnGatewaysAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *UrlMapsListCall) Header() http.Header { +func (c *VpnGatewaysAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *UrlMapsListCall) doRequest(alt string) (*http.Response, error) { +func (c *VpnGatewaysAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -120832,7 +136824,7 @@ func (c *UrlMapsListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/vpnGateways") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -120845,14 +136837,14 @@ func (c *UrlMapsListCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.urlMaps.list" call. -// Exactly one of *UrlMapList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *UrlMapList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *UrlMapsListCall) Do(opts ...googleapi.CallOption) (*UrlMapList, error) { +// Do executes the "compute.vpnGateways.aggregatedList" call. +// Exactly one of *VpnGatewayAggregatedList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *VpnGatewayAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *VpnGatewaysAggregatedListCall) Do(opts ...googleapi.CallOption) (*VpnGatewayAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -120871,7 +136863,7 @@ func (c *UrlMapsListCall) Do(opts ...googleapi.CallOption) (*UrlMapList, error) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &UrlMapList{ + ret := &VpnGatewayAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -120883,9 +136875,9 @@ func (c *UrlMapsListCall) Do(opts ...googleapi.CallOption) (*UrlMapList, error) } return ret, nil // { - // "description": "Retrieves the list of UrlMap resources available to the specified project.", + // "description": "Retrieves an aggregated list of VPN gateways.", // "httpMethod": "GET", - // "id": "compute.urlMaps.list", + // "id": "compute.vpnGateways.aggregatedList", // "parameterOrder": [ // "project" // ], @@ -120921,9 +136913,9 @@ func (c *UrlMapsListCall) Do(opts ...googleapi.CallOption) (*UrlMapList, error) // "type": "string" // } // }, - // "path": "{project}/global/urlMaps", + // "path": "{project}/aggregated/vpnGateways", // "response": { - // "$ref": "UrlMapList" + // "$ref": "VpnGatewayAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -120937,7 +136929,7 @@ func (c *UrlMapsListCall) Do(opts ...googleapi.CallOption) (*UrlMapList, error) // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *UrlMapsListCall) Pages(ctx context.Context, f func(*UrlMapList) error) error { +func (c *VpnGatewaysAggregatedListCall) Pages(ctx context.Context, f func(*VpnGatewayAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -120955,27 +136947,24 @@ func (c *UrlMapsListCall) Pages(ctx context.Context, f func(*UrlMapList) error) } } -// method id "compute.urlMaps.patch": +// method id "compute.vpnGateways.delete": -type UrlMapsPatchCall struct { +type VpnGatewaysDeleteCall struct { s *Service project string - urlMap string - urlmap *UrlMap + region string + vpnGateway string urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Patch: Patches the specified UrlMap resource with the data included -// in the request. This method supports PATCH semantics and uses the -// JSON merge patch format and processing rules. -// For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/patch -func (r *UrlMapsService) Patch(project string, urlMap string, urlmap *UrlMap) *UrlMapsPatchCall { - c := &UrlMapsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified VPN gateway. +func (r *VpnGatewaysService) Delete(project string, region string, vpnGateway string) *VpnGatewaysDeleteCall { + c := &VpnGatewaysDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.urlMap = urlMap - c.urlmap = urlmap + c.region = region + c.vpnGateway = vpnGateway return c } @@ -120993,7 +136982,7 @@ func (r *UrlMapsService) Patch(project string, urlMap string, urlmap *UrlMap) *U // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *UrlMapsPatchCall) RequestId(requestId string) *UrlMapsPatchCall { +func (c *VpnGatewaysDeleteCall) RequestId(requestId string) *VpnGatewaysDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -121001,7 +136990,7 @@ func (c *UrlMapsPatchCall) RequestId(requestId string) *UrlMapsPatchCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *UrlMapsPatchCall) Fields(s ...googleapi.Field) *UrlMapsPatchCall { +func (c *VpnGatewaysDeleteCall) Fields(s ...googleapi.Field) *VpnGatewaysDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -121009,56 +136998,52 @@ func (c *UrlMapsPatchCall) Fields(s ...googleapi.Field) *UrlMapsPatchCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *UrlMapsPatchCall) Context(ctx context.Context) *UrlMapsPatchCall { +func (c *VpnGatewaysDeleteCall) Context(ctx context.Context) *VpnGatewaysDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *UrlMapsPatchCall) Header() http.Header { +func (c *VpnGatewaysDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *UrlMapsPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *VpnGatewaysDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmap) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/vpnGateways/{vpnGateway}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "urlMap": c.urlMap, + "project": c.project, + "region": c.region, + "vpnGateway": c.vpnGateway, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.urlMaps.patch" call. +// Do executes the "compute.vpnGateways.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *UrlMapsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *VpnGatewaysDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -121089,12 +137074,13 @@ func (c *UrlMapsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Patches the specified UrlMap resource with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.urlMaps.patch", + // "description": "Deletes the specified VPN gateway.", + // "httpMethod": "DELETE", + // "id": "compute.vpnGateways.delete", // "parameterOrder": [ // "project", - // "urlMap" + // "region", + // "vpnGateway" // ], // "parameters": { // "project": { @@ -121104,23 +137090,27 @@ func (c *UrlMapsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "required": true, // "type": "string" // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // }, - // "urlMap": { - // "description": "Name of the UrlMap resource to patch.", + // "vpnGateway": { + // "description": "Name of the VPN gateway to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/urlMaps/{urlMap}", - // "request": { - // "$ref": "UrlMap" - // }, + // "path": "{project}/regions/{region}/vpnGateways/{vpnGateway}", // "response": { // "$ref": "Operation" // }, @@ -121132,89 +137122,265 @@ func (c *UrlMapsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) } -// method id "compute.urlMaps.testIamPermissions": +// method id "compute.vpnGateways.get": -type UrlMapsTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type VpnGatewaysGetCall struct { + s *Service + project string + region string + vpnGateway string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *UrlMapsService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *UrlMapsTestIamPermissionsCall { - c := &UrlMapsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified VPN gateway. Gets a list of available VPN +// gateways by making a list() request. +func (r *VpnGatewaysService) Get(project string, region string, vpnGateway string) *VpnGatewaysGetCall { + c := &VpnGatewaysGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.region = region + c.vpnGateway = vpnGateway return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *UrlMapsTestIamPermissionsCall) Fields(s ...googleapi.Field) *UrlMapsTestIamPermissionsCall { +func (c *VpnGatewaysGetCall) Fields(s ...googleapi.Field) *VpnGatewaysGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *VpnGatewaysGetCall) IfNoneMatch(entityTag string) *VpnGatewaysGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *UrlMapsTestIamPermissionsCall) Context(ctx context.Context) *UrlMapsTestIamPermissionsCall { +func (c *VpnGatewaysGetCall) Context(ctx context.Context) *VpnGatewaysGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *UrlMapsTestIamPermissionsCall) Header() http.Header { +func (c *VpnGatewaysGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *UrlMapsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *VpnGatewaysGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/vpnGateways/{vpnGateway}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } - reqHeaders.Set("Content-Type", "application/json") + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "vpnGateway": c.vpnGateway, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.vpnGateways.get" call. +// Exactly one of *VpnGateway or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *VpnGateway.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *VpnGatewaysGetCall) Do(opts ...googleapi.CallOption) (*VpnGateway, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &VpnGateway{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified VPN gateway. Gets a list of available VPN gateways by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.vpnGateways.get", + // "parameterOrder": [ + // "project", + // "region", + // "vpnGateway" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "vpnGateway": { + // "description": "Name of the VPN gateway to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/vpnGateways/{vpnGateway}", + // "response": { + // "$ref": "VpnGateway" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.vpnGateways.getStatus": + +type VpnGatewaysGetStatusCall struct { + s *Service + project string + region string + vpnGateway string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetStatus: Returns the status for the specified VPN gateway. +func (r *VpnGatewaysService) GetStatus(project string, region string, vpnGateway string) *VpnGatewaysGetStatusCall { + c := &VpnGatewaysGetStatusCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.vpnGateway = vpnGateway + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *VpnGatewaysGetStatusCall) Fields(s ...googleapi.Field) *VpnGatewaysGetStatusCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *VpnGatewaysGetStatusCall) IfNoneMatch(entityTag string) *VpnGatewaysGetStatusCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *VpnGatewaysGetStatusCall) Context(ctx context.Context) *VpnGatewaysGetStatusCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *VpnGatewaysGetStatusCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *VpnGatewaysGetStatusCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/vpnGateways/{vpnGateway}/getStatus") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "region": c.region, + "vpnGateway": c.vpnGateway, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.urlMaps.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.vpnGateways.getStatus" call. +// Exactly one of *VpnGatewaysGetStatusResponse or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *VpnGatewaysGetStatusResponse.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *UrlMapsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *VpnGatewaysGetStatusCall) Do(opts ...googleapi.CallOption) (*VpnGatewaysGetStatusResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -121233,7 +137399,7 @@ func (c *UrlMapsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestP if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &VpnGatewaysGetStatusResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -121245,12 +137411,13 @@ func (c *UrlMapsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestP } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.urlMaps.testIamPermissions", + // "description": "Returns the status for the specified VPN gateway.", + // "httpMethod": "GET", + // "id": "compute.vpnGateways.getStatus", // "parameterOrder": [ // "project", - // "resource" + // "region", + // "vpnGateway" // ], // "parameters": { // "project": { @@ -121260,20 +137427,24 @@ func (c *UrlMapsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestP // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "region": { + // "description": "Name of the region for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "vpnGateway": { + // "description": "Name of the VPN gateway to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/urlMaps/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/regions/{region}/vpnGateways/{vpnGateway}/getStatus", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "VpnGatewaysGetStatusResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -121284,26 +137455,25 @@ func (c *UrlMapsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestP } -// method id "compute.urlMaps.update": +// method id "compute.vpnGateways.insert": -type UrlMapsUpdateCall struct { +type VpnGatewaysInsertCall struct { s *Service project string - urlMap string - urlmap *UrlMap + region string + vpngateway *VpnGateway urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Update: Updates the specified UrlMap resource with the data included -// in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/update -func (r *UrlMapsService) Update(project string, urlMap string, urlmap *UrlMap) *UrlMapsUpdateCall { - c := &UrlMapsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a VPN gateway in the specified project and region +// using the data included in the request. +func (r *VpnGatewaysService) Insert(project string, region string, vpngateway *VpnGateway) *VpnGatewaysInsertCall { + c := &VpnGatewaysInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.urlMap = urlMap - c.urlmap = urlmap + c.region = region + c.vpngateway = vpngateway return c } @@ -121321,7 +137491,7 @@ func (r *UrlMapsService) Update(project string, urlMap string, urlmap *UrlMap) * // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *UrlMapsUpdateCall) RequestId(requestId string) *UrlMapsUpdateCall { +func (c *VpnGatewaysInsertCall) RequestId(requestId string) *VpnGatewaysInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -121329,7 +137499,7 @@ func (c *UrlMapsUpdateCall) RequestId(requestId string) *UrlMapsUpdateCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *UrlMapsUpdateCall) Fields(s ...googleapi.Field) *UrlMapsUpdateCall { +func (c *VpnGatewaysInsertCall) Fields(s ...googleapi.Field) *VpnGatewaysInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -121337,56 +137507,56 @@ func (c *UrlMapsUpdateCall) Fields(s ...googleapi.Field) *UrlMapsUpdateCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *UrlMapsUpdateCall) Context(ctx context.Context) *UrlMapsUpdateCall { +func (c *VpnGatewaysInsertCall) Context(ctx context.Context) *VpnGatewaysInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *UrlMapsUpdateCall) Header() http.Header { +func (c *VpnGatewaysInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *UrlMapsUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *VpnGatewaysInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmap) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.vpngateway) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/vpnGateways") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "urlMap": c.urlMap, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.urlMaps.update" call. +// Do executes the "compute.vpnGateways.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *UrlMapsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *VpnGatewaysInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -121417,12 +137587,12 @@ func (c *UrlMapsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Updates the specified UrlMap resource with the data included in the request.", - // "httpMethod": "PUT", - // "id": "compute.urlMaps.update", + // "description": "Creates a VPN gateway in the specified project and region using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.vpnGateways.insert", // "parameterOrder": [ // "project", - // "urlMap" + // "region" // ], // "parameters": { // "project": { @@ -121432,62 +137602,344 @@ func (c *UrlMapsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "required": true, // "type": "string" // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/vpnGateways", + // "request": { + // "$ref": "VpnGateway" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.vpnGateways.list": + +type VpnGatewaysListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of VPN gateways available to the specified +// project and region. +func (r *VpnGatewaysService) List(project string, region string) *VpnGatewaysListCall { + c := &VpnGatewaysListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *VpnGatewaysListCall) Filter(filter string) *VpnGatewaysListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *VpnGatewaysListCall) MaxResults(maxResults int64) *VpnGatewaysListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *VpnGatewaysListCall) OrderBy(orderBy string) *VpnGatewaysListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *VpnGatewaysListCall) PageToken(pageToken string) *VpnGatewaysListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *VpnGatewaysListCall) Fields(s ...googleapi.Field) *VpnGatewaysListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *VpnGatewaysListCall) IfNoneMatch(entityTag string) *VpnGatewaysListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *VpnGatewaysListCall) Context(ctx context.Context) *VpnGatewaysListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *VpnGatewaysListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *VpnGatewaysListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/vpnGateways") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.vpnGateways.list" call. +// Exactly one of *VpnGatewayList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *VpnGatewayList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *VpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*VpnGatewayList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &VpnGatewayList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of VPN gateways available to the specified project and region.", + // "httpMethod": "GET", + // "id": "compute.vpnGateways.list", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" // }, - // "urlMap": { - // "description": "Name of the UrlMap resource to update.", + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/urlMaps/{urlMap}", - // "request": { - // "$ref": "UrlMap" - // }, + // "path": "{project}/regions/{region}/vpnGateways", // "response": { - // "$ref": "Operation" + // "$ref": "VpnGatewayList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.urlMaps.validate": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *VpnGatewaysListCall) Pages(ctx context.Context, f func(*VpnGatewayList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type UrlMapsValidateCall struct { +// method id "compute.vpnGateways.setLabels": + +type VpnGatewaysSetLabelsCall struct { s *Service project string - urlMap string - urlmapsvalidaterequest *UrlMapsValidateRequest + region string + resource string + regionsetlabelsrequest *RegionSetLabelsRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Validate: Runs static validation for the UrlMap. In particular, the -// tests of the provided UrlMap will be run. Calling this method does -// NOT create the UrlMap. -// For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/validate -func (r *UrlMapsService) Validate(project string, urlMap string, urlmapsvalidaterequest *UrlMapsValidateRequest) *UrlMapsValidateCall { - c := &UrlMapsValidateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetLabels: Sets the labels on a VpnGateway. To learn more about +// labels, read the Labeling Resources documentation. +func (r *VpnGatewaysService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *VpnGatewaysSetLabelsCall { + c := &VpnGatewaysSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.urlMap = urlMap - c.urlmapsvalidaterequest = urlmapsvalidaterequest + c.region = region + c.resource = resource + c.regionsetlabelsrequest = regionsetlabelsrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *VpnGatewaysSetLabelsCall) RequestId(requestId string) *VpnGatewaysSetLabelsCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *UrlMapsValidateCall) Fields(s ...googleapi.Field) *UrlMapsValidateCall { +func (c *VpnGatewaysSetLabelsCall) Fields(s ...googleapi.Field) *VpnGatewaysSetLabelsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -121495,35 +137947,35 @@ func (c *UrlMapsValidateCall) Fields(s ...googleapi.Field) *UrlMapsValidateCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *UrlMapsValidateCall) Context(ctx context.Context) *UrlMapsValidateCall { +func (c *VpnGatewaysSetLabelsCall) Context(ctx context.Context) *VpnGatewaysSetLabelsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *UrlMapsValidateCall) Header() http.Header { +func (c *VpnGatewaysSetLabelsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *UrlMapsValidateCall) doRequest(alt string) (*http.Response, error) { +func (c *VpnGatewaysSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmapsvalidaterequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}/validate") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/vpnGateways/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -121531,20 +137983,21 @@ func (c *UrlMapsValidateCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "urlMap": c.urlMap, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.urlMaps.validate" call. -// Exactly one of *UrlMapsValidateResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *UrlMapsValidateResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *UrlMapsValidateCall) Do(opts ...googleapi.CallOption) (*UrlMapsValidateResponse, error) { +// Do executes the "compute.vpnGateways.setLabels" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *VpnGatewaysSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -121563,7 +138016,7 @@ func (c *UrlMapsValidateCall) Do(opts ...googleapi.CallOption) (*UrlMapsValidate if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &UrlMapsValidateResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -121575,12 +138028,13 @@ func (c *UrlMapsValidateCall) Do(opts ...googleapi.CallOption) (*UrlMapsValidate } return ret, nil // { - // "description": "Runs static validation for the UrlMap. In particular, the tests of the provided UrlMap will be run. Calling this method does NOT create the UrlMap.", + // "description": "Sets the labels on a VpnGateway. To learn more about labels, read the Labeling Resources documentation.", // "httpMethod": "POST", - // "id": "compute.urlMaps.validate", + // "id": "compute.vpnGateways.setLabels", // "parameterOrder": [ // "project", - // "urlMap" + // "region", + // "resource" // ], // "parameters": { // "project": { @@ -121590,20 +138044,32 @@ func (c *UrlMapsValidateCall) Do(opts ...googleapi.CallOption) (*UrlMapsValidate // "required": true, // "type": "string" // }, - // "urlMap": { - // "description": "Name of the UrlMap resource to be validated as.", + // "region": { + // "description": "The region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/urlMaps/{urlMap}/validate", + // "path": "{project}/regions/{region}/vpnGateways/{resource}/setLabels", // "request": { - // "$ref": "UrlMapsValidateRequest" + // "$ref": "RegionSetLabelsRequest" // }, // "response": { - // "$ref": "UrlMapsValidateResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -122022,7 +138488,7 @@ func (c *VpnTunnelsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, err // "vpnTunnel": { // "description": "Name of the VpnTunnel resource to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -122188,7 +138654,7 @@ func (c *VpnTunnelsGetCall) Do(opts ...googleapi.CallOption) (*VpnTunnel, error) // "vpnTunnel": { // "description": "Name of the VpnTunnel resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -123089,7 +139555,7 @@ func (c *ZoneOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { // "operation": { // "description": "Name of the Operations resource to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -123252,7 +139718,7 @@ func (c *ZoneOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, er // "operation": { // "description": "Name of the Operations resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -123687,7 +140153,7 @@ func (c *ZonesGetCall) Do(opts ...googleapi.CallOption) (*Zone, error) { // "zone": { // "description": "Name of the zone resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } diff --git a/vendor/google.golang.org/api/compute/v1/BUILD.bazel b/vendor/google.golang.org/api/compute/v1/BUILD.bazel index 4f11357e59268..7987bf8c55fa7 100644 --- a/vendor/google.golang.org/api/compute/v1/BUILD.bazel +++ b/vendor/google.golang.org/api/compute/v1/BUILD.bazel @@ -9,5 +9,7 @@ go_library( deps = [ "//vendor/google.golang.org/api/gensupport:go_default_library", "//vendor/google.golang.org/api/googleapi:go_default_library", + "//vendor/google.golang.org/api/option:go_default_library", + "//vendor/google.golang.org/api/transport/http:go_default_library", ], ) diff --git a/vendor/google.golang.org/api/compute/v1/compute-api.json b/vendor/google.golang.org/api/compute/v1/compute-api.json index 2b7e314e4b1c0..39800a7c96ae2 100644 --- a/vendor/google.golang.org/api/compute/v1/compute-api.json +++ b/vendor/google.golang.org/api/compute/v1/compute-api.json @@ -29,7 +29,7 @@ "description": "Creates and runs virtual machines on Google Cloud Platform.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/compute/docs/reference/latest/", - "etag": "\"J3WqvAcMk4eQjJXvfSI4Yr8VouA/m6oCgMxscM7YlnV69HIdTmFUj8g\"", + "etag": "\"VPK3KBfpaEgZ16pozGOoMYfKc0U/isJ5vctr0fRB9VbwWEZjs0c-shw\"", "icons": { "x16": "https://www.google.com/images/icons/product/compute_engine-16.png", "x32": "https://www.google.com/images/icons/product/compute_engine-32.png" @@ -150,7 +150,7 @@ "acceleratorType": { "description": "Name of the accelerator type to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -302,7 +302,7 @@ "address": { "description": "Name of the address resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -348,7 +348,7 @@ "address": { "description": "Name of the address resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -541,7 +541,7 @@ "autoscaler": { "description": "Name of the autoscaler to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -587,7 +587,7 @@ "autoscaler": { "description": "Name of the autoscaler to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -726,7 +726,7 @@ "autoscaler": { "description": "Name of the autoscaler to patch.", "location": "query", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "type": "string" }, "project": { @@ -773,7 +773,7 @@ "autoscaler": { "description": "Name of the autoscaler to update.", "location": "query", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "type": "string" }, "project": { @@ -864,7 +864,7 @@ "backendBucket": { "description": "Name of the BackendBucket resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -946,7 +946,7 @@ "backendBucket": { "description": "Name of the BackendBucket resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -1062,7 +1062,7 @@ "backendBucket": { "description": "Name of the BackendBucket resource to patch.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -1103,7 +1103,7 @@ "backendBucket": { "description": "Name of the BackendBucket resource to update.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -1237,7 +1237,7 @@ "backendService": { "description": "Name of the BackendService resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -1319,7 +1319,7 @@ "backendService": { "description": "Name of the BackendService resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -1353,7 +1353,7 @@ "backendService": { "description": "Name of the BackendService resource to which the queried instance belongs.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -1552,7 +1552,7 @@ "backendService": { "description": "Name of the BackendService resource to update.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -1647,7 +1647,7 @@ "diskType": { "description": "Name of the disk type to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -1804,6 +1804,7 @@ "type": "string" }, "guestFlush": { + "description": "[Input Only] Specifies to create an application consistent snapshot by informing the OS to prepare for the snapshot process. Currently only supported on Windows instances using the Volume Shadow Copy Service (VSS).", "location": "query", "type": "boolean" }, @@ -2564,7 +2565,7 @@ "forwardingRule": { "description": "Name of the ForwardingRule resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -2610,7 +2611,7 @@ "forwardingRule": { "description": "Name of the ForwardingRule resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -2750,7 +2751,7 @@ "forwardingRule": { "description": "Name of the ForwardingRule resource in which target is to be set.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -2802,7 +2803,7 @@ "address": { "description": "Name of the address resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -2840,7 +2841,7 @@ "address": { "description": "Name of the address resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -2960,7 +2961,7 @@ "forwardingRule": { "description": "Name of the ForwardingRule resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -2998,7 +2999,7 @@ "forwardingRule": { "description": "Name of the ForwardingRule resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -3114,7 +3115,7 @@ "forwardingRule": { "description": "Name of the ForwardingRule resource in which target is to be set.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -3208,7 +3209,7 @@ "operation": { "description": "Name of the Operations resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -3238,7 +3239,7 @@ "operation": { "description": "Name of the Operations resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -3325,7 +3326,7 @@ "healthCheck": { "description": "Name of the HealthCheck resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -3363,7 +3364,7 @@ "healthCheck": { "description": "Name of the HealthCheck resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -3479,7 +3480,7 @@ "healthCheck": { "description": "Name of the HealthCheck resource to patch.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -3520,7 +3521,7 @@ "healthCheck": { "description": "Name of the HealthCheck resource to update.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -3565,7 +3566,7 @@ "httpHealthCheck": { "description": "Name of the HttpHealthCheck resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -3603,7 +3604,7 @@ "httpHealthCheck": { "description": "Name of the HttpHealthCheck resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -3719,7 +3720,7 @@ "httpHealthCheck": { "description": "Name of the HttpHealthCheck resource to patch.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -3760,7 +3761,7 @@ "httpHealthCheck": { "description": "Name of the HttpHealthCheck resource to update.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -3805,7 +3806,7 @@ "httpsHealthCheck": { "description": "Name of the HttpsHealthCheck resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -3843,7 +3844,7 @@ "httpsHealthCheck": { "description": "Name of the HttpsHealthCheck resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -3959,7 +3960,7 @@ "httpsHealthCheck": { "description": "Name of the HttpsHealthCheck resource to patch.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -4000,7 +4001,7 @@ "httpsHealthCheck": { "description": "Name of the HttpsHealthCheck resource to update.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -5488,7 +5489,7 @@ "instanceTemplates": { "methods": { "delete": { - "description": "Deletes the specified instance template. Deleting an instance template is permanent and cannot be undone. It's not possible to delete templates which are in use by an instance group.", + "description": "Deletes the specified instance template. Deleting an instance template is permanent and cannot be undone. It is not possible to delete templates that are already in use by a managed instance group.", "httpMethod": "DELETE", "id": "compute.instanceTemplates.delete", "parameterOrder": [ @@ -5499,7 +5500,7 @@ "instanceTemplate": { "description": "The name of the instance template to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -5537,7 +5538,7 @@ "instanceTemplate": { "description": "The name of the instance template.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -5627,7 +5628,7 @@ ] }, "list": { - "description": "Retrieves a list of instance templates that are contained within the specified project and zone.", + "description": "Retrieves a list of instance templates that are contained within the specified project.", "httpMethod": "GET", "id": "compute.instanceTemplates.list", "parameterOrder": [ @@ -5868,7 +5869,7 @@ ], "parameters": { "forceAttach": { - "description": "Whether to force attach the disk even if it's currently attached to another instance. This is only available for regional disks.", + "description": "Whether to force attach the disk even if it's currently attached to another instance.", "location": "query", "type": "boolean" }, @@ -6211,6 +6212,48 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "getShieldedInstanceIdentity": { + "description": "Returns the Shielded Instance Identity of an instance", + "httpMethod": "GET", + "id": "compute.instances.getShieldedInstanceIdentity", + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "parameters": { + "instance": { + "description": "Name or id of the instance scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}/instances/{instance}/getShieldedInstanceIdentity", + "response": { + "$ref": "ShieldedInstanceIdentity" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, "insert": { "description": "Creates an instance resource in the specified project using the data included in the request.", "httpMethod": "POST", @@ -6380,7 +6423,7 @@ ] }, "reset": { - "description": "Performs a reset on the instance. For more information, see Resetting an instance.", + "description": "Performs a reset on the instance. This is a hard reset the VM does not do a graceful shutdown. For more information, see Resetting an instance.", "httpMethod": "POST", "id": "compute.instances.reset", "parameterOrder": [ @@ -6925,6 +6968,55 @@ "https://www.googleapis.com/auth/compute" ] }, + "setShieldedInstanceIntegrityPolicy": { + "description": "Sets the Shielded Instance integrity policy for an instance. You can only use this method on a running instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "httpMethod": "PATCH", + "id": "compute.instances.setShieldedInstanceIntegrityPolicy", + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "parameters": { + "instance": { + "description": "Name or id of the instance scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy", + "request": { + "$ref": "ShieldedInstanceIntegrityPolicy" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "setTags": { "description": "Sets network tags for the specified instance to the data included in the request.", "httpMethod": "POST", @@ -7312,74 +7404,21 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] - } - } - }, - "interconnectAttachments": { - "methods": { - "aggregatedList": { - "description": "Retrieves an aggregated list of interconnect attachments.", - "httpMethod": "GET", - "id": "compute.interconnectAttachments.aggregatedList", - "parameterOrder": [ - "project" - ], - "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - "location": "query", - "type": "string" - }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query", - "type": "string" - }, - "pageToken": { - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query", - "type": "string" - }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - } - }, - "path": "{project}/aggregated/interconnectAttachments", - "response": { - "$ref": "InterconnectAttachmentAggregatedList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] }, - "delete": { - "description": "Deletes the specified interconnect attachment.", - "httpMethod": "DELETE", - "id": "compute.interconnectAttachments.delete", + "updateShieldedInstanceConfig": { + "description": "Updates the Shielded Instance config for an instance. You can only use this method on a stopped instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "httpMethod": "PATCH", + "id": "compute.instances.updateShieldedInstanceConfig", "parameterOrder": [ "project", - "region", - "interconnectAttachment" + "zone", + "instance" ], "parameters": { - "interconnectAttachment": { - "description": "Name of the interconnect attachment to delete.", + "instance": { + "description": "Name or id of the instance scoping this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -7390,102 +7429,22 @@ "required": true, "type": "string" }, - "region": { - "description": "Name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, "requestId": { "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" - } - }, - "path": "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "description": "Returns the specified interconnect attachment.", - "httpMethod": "GET", - "id": "compute.interconnectAttachments.get", - "parameterOrder": [ - "project", - "region", - "interconnectAttachment" - ], - "parameters": { - "interconnectAttachment": { - "description": "Name of the interconnect attachment to return.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" }, - "region": { - "description": "Name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - } - }, - "path": "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", - "response": { - "$ref": "InterconnectAttachment" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "description": "Creates an InterconnectAttachment in the specified project using the data included in the request.", - "httpMethod": "POST", - "id": "compute.interconnectAttachments.insert", - "parameterOrder": [ - "project", - "region" - ], - "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "region": { - "description": "Name of the region for this request.", + "zone": { + "description": "The name of the zone for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" } }, - "path": "{project}/regions/{region}/interconnectAttachments", + "path": "{project}/zones/{zone}/instances/{instance}/updateShieldedInstanceConfig", "request": { - "$ref": "InterconnectAttachment" + "$ref": "ShieldedInstanceConfig" }, "response": { "$ref": "Operation" @@ -7494,14 +7453,196 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] - }, - "list": { - "description": "Retrieves the list of interconnect attachments contained within the specified region.", + } + } + }, + "interconnectAttachments": { + "methods": { + "aggregatedList": { + "description": "Retrieves an aggregated list of interconnect attachments.", "httpMethod": "GET", - "id": "compute.interconnectAttachments.list", + "id": "compute.interconnectAttachments.aggregatedList", "parameterOrder": [ - "project", - "region" + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + } + }, + "path": "{project}/aggregated/interconnectAttachments", + "response": { + "$ref": "InterconnectAttachmentAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "description": "Deletes the specified interconnect attachment.", + "httpMethod": "DELETE", + "id": "compute.interconnectAttachments.delete", + "parameterOrder": [ + "project", + "region", + "interconnectAttachment" + ], + "parameters": { + "interconnectAttachment": { + "description": "Name of the interconnect attachment to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Returns the specified interconnect attachment.", + "httpMethod": "GET", + "id": "compute.interconnectAttachments.get", + "parameterOrder": [ + "project", + "region", + "interconnectAttachment" + ], + "parameters": { + "interconnectAttachment": { + "description": "Name of the interconnect attachment to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + "response": { + "$ref": "InterconnectAttachment" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates an InterconnectAttachment in the specified project using the data included in the request.", + "httpMethod": "POST", + "id": "compute.interconnectAttachments.insert", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/interconnectAttachments", + "request": { + "$ref": "InterconnectAttachment" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "description": "Retrieves the list of interconnect attachments contained within the specified region.", + "httpMethod": "GET", + "id": "compute.interconnectAttachments.list", + "parameterOrder": [ + "project", + "region" ], "parameters": { "filter": { @@ -7565,7 +7706,7 @@ "interconnectAttachment": { "description": "Name of the interconnect attachment to patch.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -7617,7 +7758,7 @@ "interconnectLocation": { "description": "Name of the interconnect location to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -7704,7 +7845,7 @@ "interconnect": { "description": "Name of the interconnect to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -7742,7 +7883,7 @@ "interconnect": { "description": "Name of the interconnect to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -7776,7 +7917,7 @@ "interconnect": { "description": "Name of the interconnect resource to query.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -7892,7 +8033,7 @@ "interconnect": { "description": "Name of the interconnect to update.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -8012,7 +8153,7 @@ "license": { "description": "Name of the license resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -8050,7 +8191,7 @@ "license": { "description": "Name of the License resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -8330,7 +8471,7 @@ "machineType": { "description": "Name of the machine type to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -8418,21 +8559,70 @@ } } }, - "networks": { + "networkEndpointGroups": { "methods": { - "addPeering": { - "description": "Adds a peering to the specified network.", + "aggregatedList": { + "description": "Retrieves the list of network endpoint groups and sorts them by zone.", + "httpMethod": "GET", + "id": "compute.networkEndpointGroups.aggregatedList", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + } + }, + "path": "{project}/aggregated/networkEndpointGroups", + "response": { + "$ref": "NetworkEndpointGroupAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "attachNetworkEndpoints": { + "description": "Attach a list of network endpoints to the specified network endpoint group.", "httpMethod": "POST", - "id": "compute.networks.addPeering", + "id": "compute.networkEndpointGroups.attachNetworkEndpoints", "parameterOrder": [ "project", - "network" + "zone", + "networkEndpointGroup" ], "parameters": { - "network": { - "description": "Name of the network resource to add peering to.", + "networkEndpointGroup": { + "description": "The name of the network endpoint group where you are attaching network endpoints to. It should comply with RFC1035.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -8447,11 +8637,17 @@ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" + }, + "zone": { + "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" } }, - "path": "{project}/global/networks/{network}/addPeering", + "path": "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints", "request": { - "$ref": "NetworksAddPeeringRequest" + "$ref": "NetworkEndpointGroupsAttachEndpointsRequest" }, "response": { "$ref": "Operation" @@ -8462,18 +8658,18 @@ ] }, "delete": { - "description": "Deletes the specified network.", + "description": "Deletes the specified network endpoint group. The network endpoints in the NEG and the VM instances they belong to are not terminated when the NEG is deleted. Note that the NEG cannot be deleted if there are backend services referencing it.", "httpMethod": "DELETE", - "id": "compute.networks.delete", + "id": "compute.networkEndpointGroups.delete", "parameterOrder": [ "project", - "network" + "zone", + "networkEndpointGroup" ], "parameters": { - "network": { - "description": "Name of the network to delete.", + "networkEndpointGroup": { + "description": "The name of the network endpoint group to delete. It should comply with RFC1035.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -8488,9 +8684,62 @@ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" + }, + "zone": { + "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" } }, - "path": "{project}/global/networks/{network}", + "path": "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "detachNetworkEndpoints": { + "description": "Detach a list of network endpoints from the specified network endpoint group.", + "httpMethod": "POST", + "id": "compute.networkEndpointGroups.detachNetworkEndpoints", + "parameterOrder": [ + "project", + "zone", + "networkEndpointGroup" + ], + "parameters": { + "networkEndpointGroup": { + "description": "The name of the network endpoint group where you are removing network endpoints. It should comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "zone": { + "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints", + "request": { + "$ref": "NetworkEndpointGroupsDetachEndpointsRequest" + }, "response": { "$ref": "Operation" }, @@ -8500,18 +8749,18 @@ ] }, "get": { - "description": "Returns the specified network. Gets a list of available networks by making a list() request.", + "description": "Returns the specified network endpoint group. Gets a list of available network endpoint groups by making a list() request.", "httpMethod": "GET", - "id": "compute.networks.get", + "id": "compute.networkEndpointGroups.get", "parameterOrder": [ "project", - "network" + "zone", + "networkEndpointGroup" ], "parameters": { - "network": { - "description": "Name of the network to return.", + "networkEndpointGroup": { + "description": "The name of the network endpoint group. It should comply with RFC1035.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -8521,11 +8770,17 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "zone": { + "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" } }, - "path": "{project}/global/networks/{network}", + "path": "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", "response": { - "$ref": "Network" + "$ref": "NetworkEndpointGroup" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -8534,11 +8789,12 @@ ] }, "insert": { - "description": "Creates a network in the specified project using the data included in the request.", + "description": "Creates a network endpoint group in the specified project using the parameters that are included in the request.", "httpMethod": "POST", - "id": "compute.networks.insert", + "id": "compute.networkEndpointGroups.insert", "parameterOrder": [ - "project" + "project", + "zone" ], "parameters": { "project": { @@ -8552,11 +8808,17 @@ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" + }, + "zone": { + "description": "The name of the zone where you want to create the network endpoint group. It should comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" } }, - "path": "{project}/global/networks", + "path": "{project}/zones/{zone}/networkEndpointGroups", "request": { - "$ref": "Network" + "$ref": "NetworkEndpointGroup" }, "response": { "$ref": "Operation" @@ -8567,11 +8829,328 @@ ] }, "list": { - "description": "Retrieves the list of networks available to the specified project.", + "description": "Retrieves the list of network endpoint groups that are located in the specified project and zone.", "httpMethod": "GET", - "id": "compute.networks.list", + "id": "compute.networkEndpointGroups.list", "parameterOrder": [ - "project" + "project", + "zone" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "zone": { + "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}/networkEndpointGroups", + "response": { + "$ref": "NetworkEndpointGroupList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "listNetworkEndpoints": { + "description": "Lists the network endpoints in the specified network endpoint group.", + "httpMethod": "POST", + "id": "compute.networkEndpointGroups.listNetworkEndpoints", + "parameterOrder": [ + "project", + "zone", + "networkEndpointGroup" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "networkEndpointGroup": { + "description": "The name of the network endpoint group from which you want to generate a list of included network endpoints. It should comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "zone": { + "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints", + "request": { + "$ref": "NetworkEndpointGroupsListEndpointsRequest" + }, + "response": { + "$ref": "NetworkEndpointGroupsListNetworkEndpoints" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", + "httpMethod": "POST", + "id": "compute.networkEndpointGroups.testIamPermissions", + "parameterOrder": [ + "project", + "zone", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}/networkEndpointGroups/{resource}/testIamPermissions", + "request": { + "$ref": "TestPermissionsRequest" + }, + "response": { + "$ref": "TestPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "networks": { + "methods": { + "addPeering": { + "description": "Adds a peering to the specified network.", + "httpMethod": "POST", + "id": "compute.networks.addPeering", + "parameterOrder": [ + "project", + "network" + ], + "parameters": { + "network": { + "description": "Name of the network resource to add peering to.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/global/networks/{network}/addPeering", + "request": { + "$ref": "NetworksAddPeeringRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "delete": { + "description": "Deletes the specified network.", + "httpMethod": "DELETE", + "id": "compute.networks.delete", + "parameterOrder": [ + "project", + "network" + ], + "parameters": { + "network": { + "description": "Name of the network to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/global/networks/{network}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Returns the specified network. Gets a list of available networks by making a list() request.", + "httpMethod": "GET", + "id": "compute.networks.get", + "parameterOrder": [ + "project", + "network" + ], + "parameters": { + "network": { + "description": "Name of the network to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + } + }, + "path": "{project}/global/networks/{network}", + "response": { + "$ref": "Network" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a network in the specified project using the data included in the request.", + "httpMethod": "POST", + "id": "compute.networks.insert", + "parameterOrder": [ + "project" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/global/networks", + "request": { + "$ref": "Network" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "description": "Retrieves the list of networks available to the specified project.", + "httpMethod": "GET", + "id": "compute.networks.list", + "parameterOrder": [ + "project" ], "parameters": { "filter": { @@ -8752,7 +9331,7 @@ "nodeGroup": { "description": "Name of the NodeGroup resource.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -8850,7 +9429,7 @@ "nodeGroup": { "description": "Name of the NodeGroup resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -8896,7 +9475,7 @@ "nodeGroup": { "description": "Name of the NodeGroup resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -8945,7 +9524,7 @@ "nodeGroup": { "description": "Name of the node group to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -9148,7 +9727,7 @@ "nodeGroup": { "description": "Name of the NodeGroup resource whose nodes you want to list.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -9242,9 +9821,9 @@ ], "parameters": { "nodeGroup": { - "description": "Name of the NodeGroup resource to delete.", + "description": "Name of the NodeGroup resource to update.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -9391,7 +9970,7 @@ "nodeTemplate": { "description": "Name of the NodeTemplate resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -9437,7 +10016,7 @@ "nodeTemplate": { "description": "Name of the node template to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -9761,7 +10340,7 @@ "nodeType": { "description": "Name of the node type to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -10312,7 +10891,7 @@ "autoscaler": { "description": "Name of the autoscaler to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -10358,7 +10937,7 @@ "autoscaler": { "description": "Name of the autoscaler to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -10497,7 +11076,7 @@ "autoscaler": { "description": "Name of the autoscaler to patch.", "location": "query", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "type": "string" }, "project": { @@ -10544,7 +11123,7 @@ "autoscaler": { "description": "Name of the autoscaler to update.", "location": "query", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "type": "string" }, "project": { @@ -10596,7 +11175,7 @@ "backendService": { "description": "Name of the BackendService resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -10642,7 +11221,7 @@ "backendService": { "description": "Name of the BackendService resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -10684,7 +11263,7 @@ "backendService": { "description": "Name of the BackendService resource for which to get health.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -10875,7 +11454,7 @@ "backendService": { "description": "Name of the BackendService resource to update.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -10977,7 +11556,7 @@ "commitment": { "description": "Name of the commitment to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -11121,7 +11700,7 @@ "diskType": { "description": "Name of the disk type to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -12405,7 +12984,7 @@ "operation": { "description": "Name of the Operations resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -12443,7 +13022,7 @@ "operation": { "description": "Name of the Operations resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -12552,7 +13131,7 @@ "region": { "description": "Name of the region resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -12701,7 +13280,7 @@ "router": { "description": "Name of the Router resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -12742,7 +13321,7 @@ "router": { "description": "Name of the Router resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -12807,7 +13386,7 @@ "router": { "description": "Name of the Router resource to query for Nat Mapping information of VM endpoints.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -12849,7 +13428,7 @@ "router": { "description": "Name of the Router resource to query.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -12994,7 +13573,7 @@ "router": { "description": "Name of the Router resource to patch.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -13038,7 +13617,7 @@ "router": { "description": "Name of the Router resource to query.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -13088,7 +13667,7 @@ "router": { "description": "Name of the Router resource to update.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -13286,7 +13865,7 @@ "securityPolicy": { "description": "Name of the security policy to update.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -13327,7 +13906,7 @@ "securityPolicy": { "description": "Name of the security policy to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -13360,7 +13939,7 @@ "securityPolicy": { "description": "Name of the security policy to get.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -13400,7 +13979,7 @@ "securityPolicy": { "description": "Name of the security policy to which the queried rule belongs.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -13521,7 +14100,7 @@ "securityPolicy": { "description": "Name of the security policy to update.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -13563,7 +14142,7 @@ "securityPolicy": { "description": "Name of the security policy to update.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -13605,7 +14184,7 @@ "securityPolicy": { "description": "Name of the security policy to update.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -13915,7 +14494,7 @@ "sslCertificate": { "description": "Name of the SslCertificate resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -13948,7 +14527,7 @@ "sslCertificate": { "description": "Name of the SslCertificate resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -14375,7 +14954,7 @@ "subnetwork": { "description": "Name of the Subnetwork resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -14421,7 +15000,7 @@ "subnetwork": { "description": "Name of the Subnetwork resource to update.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -14465,7 +15044,7 @@ "subnetwork": { "description": "Name of the Subnetwork resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -14701,7 +15280,7 @@ "subnetwork": { "description": "Name of the Subnetwork resource to patch.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -14794,7 +15373,7 @@ "subnetwork": { "description": "Name of the Subnetwork resource.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -14884,7 +15463,7 @@ "targetHttpProxy": { "description": "Name of the TargetHttpProxy resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -14917,7 +15496,7 @@ "targetHttpProxy": { "description": "Name of the TargetHttpProxy resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -15038,7 +15617,7 @@ "targetHttpProxy": { "description": "Name of the TargetHttpProxy to set a URL map for.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -15083,7 +15662,7 @@ "targetHttpsProxy": { "description": "Name of the TargetHttpsProxy resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -15116,7 +15695,7 @@ "targetHttpsProxy": { "description": "Name of the TargetHttpsProxy resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -15277,7 +15856,7 @@ "targetHttpsProxy": { "description": "Name of the TargetHttpsProxy resource to set an SslCertificates resource for.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -15358,7 +15937,7 @@ "targetHttpsProxy": { "description": "Name of the TargetHttpsProxy resource whose URL map is to be set.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -15453,7 +16032,7 @@ "targetInstance": { "description": "Name of the TargetInstance resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -15494,7 +16073,7 @@ "targetInstance": { "description": "Name of the TargetInstance resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -15650,7 +16229,7 @@ "targetPool": { "description": "Name of the target pool to add a health check to.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -15699,7 +16278,7 @@ "targetPool": { "description": "Name of the TargetPool resource to add instances to.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -15797,7 +16376,7 @@ "targetPool": { "description": "Name of the TargetPool resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -15838,7 +16417,7 @@ "targetPool": { "description": "Name of the TargetPool resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -15880,7 +16459,7 @@ "targetPool": { "description": "Name of the TargetPool resource to which the queried instance belongs.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -16028,7 +16607,7 @@ "targetPool": { "description": "Name of the target pool to remove health checks from.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -16077,7 +16656,7 @@ "targetPool": { "description": "Name of the TargetPool resource to remove instances from.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -16132,7 +16711,7 @@ "targetPool": { "description": "Name of the TargetPool resource to set a backup pool for.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -16177,7 +16756,7 @@ "targetSslProxy": { "description": "Name of the TargetSslProxy resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -16210,7 +16789,7 @@ "targetSslProxy": { "description": "Name of the TargetSslProxy resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -16331,7 +16910,7 @@ "targetSslProxy": { "description": "Name of the TargetSslProxy resource whose BackendService resource is to be set.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -16372,7 +16951,7 @@ "targetSslProxy": { "description": "Name of the TargetSslProxy resource whose ProxyHeader is to be set.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -16413,7 +16992,7 @@ "targetSslProxy": { "description": "Name of the TargetSslProxy resource whose SslCertificate resource is to be set.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -16498,7 +17077,7 @@ "targetTcpProxy": { "description": "Name of the TargetTcpProxy resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -16531,7 +17110,7 @@ "targetTcpProxy": { "description": "Name of the TargetTcpProxy resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -16652,7 +17231,7 @@ "targetTcpProxy": { "description": "Name of the TargetTcpProxy resource whose BackendService resource is to be set.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -16693,7 +17272,7 @@ "targetTcpProxy": { "description": "Name of the TargetTcpProxy resource whose ProxyHeader is to be set.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -16795,7 +17374,7 @@ "targetVpnGateway": { "description": "Name of the target VPN gateway to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -16836,7 +17415,7 @@ "targetVpnGateway": { "description": "Name of the target VPN gateway to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -16977,7 +17556,7 @@ "urlMap": { "description": "Name of the UrlMap resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -17010,7 +17589,7 @@ "urlMap": { "description": "Name of the UrlMap resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -17082,7 +17661,7 @@ "urlMap": { "description": "Name of the UrlMap scoping this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -17172,7 +17751,7 @@ "urlMap": { "description": "Name of the UrlMap resource to patch.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -17213,7 +17792,7 @@ "urlMap": { "description": "Name of the UrlMap resource to update.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -17249,7 +17828,7 @@ "urlMap": { "description": "Name of the UrlMap resource to be validated as.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -17351,7 +17930,7 @@ "vpnTunnel": { "description": "Name of the VpnTunnel resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -17392,7 +17971,7 @@ "vpnTunnel": { "description": "Name of the VpnTunnel resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -17522,7 +18101,7 @@ "operation": { "description": "Name of the Operations resource to delete.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -17560,7 +18139,7 @@ "operation": { "description": "Name of the Operations resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -17669,7 +18248,7 @@ "zone": { "description": "Name of the zone resource to return.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } @@ -17736,7 +18315,7 @@ } } }, - "revision": "20181130", + "revision": "20190528", "rootUrl": "https://www.googleapis.com/", "schemas": { "AcceleratorConfig": { @@ -17749,7 +18328,7 @@ "type": "integer" }, "acceleratorType": { - "description": "Full or partial URL of the accelerator type resource to attach to this instance. If you are creating an instance template, specify only the accelerator name.", + "description": "Full or partial URL of the accelerator type resource to attach to this instance. For example: projects/my-project/zones/us-central1-c/acceleratorTypes/nvidia-tesla-p100 If you are creating an instance template, specify only the accelerator name. See GPUs on Compute Engine for a full list of accelerator types.", "type": "string" } }, @@ -18261,9 +18840,11 @@ "enum": [ "DNS_RESOLVER", "GCE_ENDPOINT", + "NAT_AUTO", "VPC_PEERING" ], "enumDescriptions": [ + "", "", "", "" @@ -18652,7 +19233,7 @@ "type": "boolean" }, "deviceName": { - "description": "Specifies a unique device name of your choice that is reflected into the /dev/disk/by-id/google-* tree of a Linux operating system running within the instance. This name can be used to reference the device for mounting, resizing, and so on, from within the instance.\n\nIf not specified, the server chooses a default device name to apply to this disk, in the form persistent-disks-x, where x is a number assigned by Google Compute Engine. This field is only applicable for persistent disks.", + "description": "Specifies a unique device name of your choice that is reflected into the /dev/disk/by-id/google-* tree of a Linux operating system running within the instance. This name can be used to reference the device for mounting, resizing, and so on, from within the instance.\n\nIf not specified, the server chooses a default device name to apply to this disk, in the form persistent-disk-x, where x is a number assigned by Google Compute Engine. This field is only applicable for persistent disks.", "type": "string" }, "diskEncryptionKey": { @@ -18765,6 +19346,14 @@ "sourceImageEncryptionKey": { "$ref": "CustomerEncryptionKey", "description": "The customer-supplied encryption key of the source image. Required if the source image is protected by a customer-supplied encryption key.\n\nInstance templates do not store customer-supplied encryption keys, so you cannot create disks for instances in a managed instance group if the source images are encrypted with your own keys." + }, + "sourceSnapshot": { + "description": "The source snapshot to create this disk. When creating a new instance, one of initializeParams.sourceSnapshot or disks.source is required except for local SSD.\n\nTo create a disk with a snapshot that you created, specify the snapshot name in the following format:\nglobal/snapshots/my-backup\n\n\nIf the source snapshot is deleted later, this field will not be set.", + "type": "string" + }, + "sourceSnapshotEncryptionKey": { + "$ref": "CustomerEncryptionKey", + "description": "The customer-supplied encryption key of the source snapshot." } }, "type": "object" @@ -19377,7 +19966,7 @@ "id": "AutoscalingPolicyLoadBalancingUtilization", "properties": { "utilizationTarget": { - "description": "Fraction of backend capacity utilization (set in HTTP(s) load balancing configuration) that autoscaler should maintain. Must be a positive float value. If not defined, the default is 0.8.", + "description": "Fraction of backend capacity utilization (set in HTTP(S) load balancing configuration) that autoscaler should maintain. Must be a positive float value. If not defined, the default is 0.8.", "format": "double", "type": "number" } @@ -19420,6 +20009,11 @@ "format": "int32", "type": "integer" }, + "maxConnectionsPerEndpoint": { + "description": "The max number of simultaneous connections that a single backend network endpoint can handle. This is used to calculate the capacity of the group. Can be used in either CONNECTION or UTILIZATION balancing modes. For CONNECTION mode, either maxConnections or maxConnectionsPerEndpoint must be set.\n\nThis cannot be used for internal load balancing.", + "format": "int32", + "type": "integer" + }, "maxConnectionsPerInstance": { "description": "The max number of simultaneous connections that a single backend instance can handle. This is used to calculate the capacity of the group. Can be used in either CONNECTION or UTILIZATION balancing modes. For CONNECTION mode, either maxConnections or maxConnectionsPerInstance must be set.\n\nThis cannot be used for internal load balancing.", "format": "int32", @@ -19430,6 +20024,11 @@ "format": "int32", "type": "integer" }, + "maxRatePerEndpoint": { + "description": "The max requests per second (RPS) that a single backend network endpoint can handle. This is used to calculate the capacity of the group. Can be used in either balancing mode. For RATE mode, either maxRate or maxRatePerEndpoint must be set.\n\nThis cannot be used for internal load balancing.", + "format": "float", + "type": "number" + }, "maxRatePerInstance": { "description": "The max requests per second (RPS) that a single backend instance can handle. This is used to calculate the capacity of the group. Can be used in either balancing mode. For RATE mode, either maxRate or maxRatePerInstance must be set.\n\nThis cannot be used for internal load balancing.", "format": "float", @@ -19647,6 +20246,13 @@ "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" }, + "customRequestHeaders": { + "description": "Headers that the HTTP/S load balancer should add to proxied requests.", + "items": { + "type": "string" + }, + "type": "array" + }, "description": { "description": "An optional description of this resource. Provide this property when you create the resource.", "type": "string" @@ -19685,9 +20291,11 @@ "enum": [ "EXTERNAL", "INTERNAL", + "INTERNAL_SELF_MANAGED", "INVALID_LOAD_BALANCING_SCHEME" ], "enumDescriptions": [ + "", "", "", "" @@ -19712,6 +20320,7 @@ "description": "The protocol this BackendService uses to communicate with backends.\n\nPossible values are HTTP, HTTPS, TCP, and SSL. The default is HTTP.\n\nFor internal load balancing, the possible values are TCP and UDP, and the default is TCP.", "enum": [ "HTTP", + "HTTP2", "HTTPS", "SSL", "TCP", @@ -19722,6 +20331,7 @@ "", "", "", + "", "" ], "type": "string" @@ -20050,6 +20660,15 @@ }, "type": "object" }, + "BackendServiceReference": { + "id": "BackendServiceReference", + "properties": { + "backendService": { + "type": "string" + } + }, + "type": "object" + }, "BackendServicesScopedList": { "id": "BackendServicesScopedList", "properties": { @@ -20150,10 +20769,10 @@ "properties": { "condition": { "$ref": "Expr", - "description": "Unimplemented. The condition that is associated with this binding. NOTE: an unsatisfied condition will not allow user access via current binding. Different bindings, including their conditions, are examined independently." + "description": "The condition that is associated with this binding. NOTE: An unsatisfied condition will not allow user access via current binding. Different bindings, including their conditions, are examined independently." }, "members": { - "description": "Specifies the identities requesting access for a Cloud Platform resource. `members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@gmail.com` .\n\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`.\n\n\n\n* `domain:{domain}`: A Google Apps domain name that represents all the users of that domain. For example, `google.com` or `example.com`.", + "description": "Specifies the identities requesting access for a Cloud Platform resource. `members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@gmail.com` .\n\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`.\n\n\n\n* `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`.", "items": { "type": "string" }, @@ -20685,12 +21304,8 @@ ], "type": "string" }, - "value": { - "description": "DEPRECATED. Use 'values' instead.", - "type": "string" - }, "values": { - "description": "The objects of the condition. This is mutually exclusive with 'value'.", + "description": "The objects of the condition.", "items": { "type": "string" }, @@ -20765,13 +21380,15 @@ "type": "string" }, "state": { - "description": "The deprecation state of this resource. This can be DEPRECATED, OBSOLETE, or DELETED. Operations which create a new resource using a DEPRECATED resource will return successfully, but with a warning indicating the deprecated resource and recommending its replacement. Operations which use OBSOLETE or DELETED resources will be rejected and result in an error.", + "description": "The deprecation state of this resource. This can be ACTIVE, DEPRECATED, OBSOLETE, or DELETED. Operations which communicate the end of life date for an image, can use ACTIVE. Operations which create a new resource using a DEPRECATED resource will return successfully, but with a warning indicating the deprecated resource and recommending its replacement. Operations which use OBSOLETE or DELETED resources will be rejected and result in an error.", "enum": [ + "ACTIVE", "DELETED", "DEPRECATED", "OBSOLETE" ], "enumDescriptions": [ + "", "", "", "" @@ -20795,7 +21412,7 @@ }, "diskEncryptionKey": { "$ref": "CustomerEncryptionKey", - "description": "Encrypts the disk using a customer-supplied encryption key.\n\nAfter you encrypt a disk with a customer-supplied key, you must provide the same key if you use the disk later (e.g. to create a disk snapshot or an image, or to attach the disk to a virtual machine).\n\nCustomer-supplied encryption keys do not protect access to metadata of the disk.\n\nIf you do not provide an encryption key when creating the disk, then the disk will be encrypted using an automatically generated key and you do not need to provide a key to use the disk later." + "description": "Encrypts the disk using a customer-supplied encryption key.\n\nAfter you encrypt a disk with a customer-supplied key, you must provide the same key if you use the disk later (e.g. to create a disk snapshot, to create a disk image, to create a machine image, or to attach the disk to a virtual machine).\n\nCustomer-supplied encryption keys do not protect access to metadata of the disk.\n\nIf you do not provide an encryption key when creating the disk, then the disk will be encrypted using an automatically generated key and you do not need to provide a key to use the disk later." }, "guestOsFeatures": { "description": "A list of features to enable on the guest operating system. Applicable only for bootable images. Read Enabling guest operating system features to see a list of available options.", @@ -20916,6 +21533,7 @@ "description": "[Output Only] The status of disk creation.", "enum": [ "CREATING", + "DELETING", "FAILED", "READY", "RESTORING" @@ -20924,16 +21542,17 @@ "", "", "", + "", "" ], "type": "string" }, "type": { - "description": "URL of the disk type resource describing which disk type to use to create the disk. Provide this when creating the disk. For example: project/zones/zone/diskTypes/pd-standard or pd-ssd", + "description": "URL of the disk type resource describing which disk type to use to create the disk. Provide this when creating the disk. For example: projects/project/zones/zone/diskTypes/pd-standard or pd-ssd", "type": "string" }, "users": { - "description": "[Output Only] Links to the users of the disk (attached instances) in form: project/zones/zone/instances/instance", + "description": "[Output Only] Links to the users of the disk (attached instances) in form: projects/project/zones/zone/instances/instance", "items": { "type": "string" }, @@ -22028,6 +22647,28 @@ }, "type": "object" }, + "FixedOrPercent": { + "description": "Encapsulates numeric value that can be either absolute or relative.", + "id": "FixedOrPercent", + "properties": { + "calculated": { + "description": "[Output Only] Absolute value of VM instances calculated based on the specific mode.\n\n \n- If the value is fixed, then the calculated value is equal to the fixed value. \n- If the value is a percent, then the calculated value is percent/100 * targetSize. For example, the calculated value of a 80% of a managed instance group with 150 instances would be (80/100 * 150) = 120 VM instances. If there is a remainder, the number is rounded up.", + "format": "int32", + "type": "integer" + }, + "fixed": { + "description": "Specifies a fixed number of VM instances. This must be a positive integer.", + "format": "int32", + "type": "integer" + }, + "percent": { + "description": "Specifies a percentage of instances between 0 to 100%, inclusive. For example, specify 80 for 80%.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, "ForwardingRule": { "description": "A ForwardingRule resource. A ForwardingRule resource specifies which pool of target virtual machines to forward a packet to if it matches the given [IPAddress, IPProtocol, ports] tuple. (== resource_for beta.forwardingRules ==) (== resource_for v1.forwardingRules ==) (== resource_for beta.globalForwardingRules ==) (== resource_for v1.globalForwardingRules ==) (== resource_for beta.regionForwardingRules ==) (== resource_for v1.regionForwardingRules ==)", "id": "ForwardingRule", @@ -22056,6 +22697,10 @@ ], "type": "string" }, + "allPorts": { + "description": "This field is used along with the backend_service field for internal load balancing or with the target field for internal TargetInstance. This field cannot be used with port or portRange fields.\n\nWhen the load balancing scheme is INTERNAL and protocol is TCP/UDP, specify this field to allow packets addressed to any ports will be forwarded to the backends configured with this forwarding rule.", + "type": "boolean" + }, "backendService": { "description": "This field is only used for INTERNAL load balancing.\n\nFor internal load balancing, this field identifies the BackendService resource to receive the matched traffic.", "type": "string" @@ -22097,9 +22742,11 @@ "enum": [ "EXTERNAL", "INTERNAL", + "INTERNAL_SELF_MANAGED", "INVALID" ], "enumDescriptions": [ + "", "", "", "" @@ -22146,12 +22793,21 @@ "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, + "serviceLabel": { + "description": "An optional prefix to the service name for this Forwarding Rule. If specified, will be the first label of the fully qualified service name.\n\nThe label must be 1-63 characters long, and comply with RFC1035. Specifically, the label must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.\n\nThis field is only used for internal load balancing.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "serviceName": { + "description": "[Output Only] The internal fully qualified service name for this Forwarding Rule.\n\nThis field is only used for internal load balancing.", + "type": "string" + }, "subnetwork": { "description": "This field is only used for INTERNAL load balancing.\n\nFor internal load balancing, this field identifies the subnetwork that the load balanced IP should belong to for this Forwarding Rule.\n\nIf the network specified is in auto subnet mode, this field is optional. However, if the network is in custom subnet mode, a subnetwork must be specified.", "type": "string" }, "target": { - "description": "The URL of the target resource to receive the matched traffic. For regional forwarding rules, this target must live in the same region as the forwarding rule. For global forwarding rules, this target must be a global load balancing resource. The forwarded traffic must be of a type appropriate to the target object. For INTERNAL_SELF_MANAGED\" load balancing, only HTTP and HTTPS targets are valid.", + "description": "The URL of the target resource to receive the matched traffic. For regional forwarding rules, this target must live in the same region as the forwarding rule. For global forwarding rules, this target must be a global load balancing resource. The forwarded traffic must be of a type appropriate to the target object. For INTERNAL_SELF_MANAGED load balancing, only HTTP and HTTPS targets are valid.", "type": "string" } }, @@ -22381,6 +23037,15 @@ }, "type": "object" }, + "ForwardingRuleReference": { + "id": "ForwardingRuleReference", + "properties": { + "forwardingRule": { + "type": "string" + } + }, + "type": "object" + }, "ForwardingRulesScopedList": { "id": "ForwardingRulesScopedList", "properties": { @@ -22542,6 +23207,59 @@ }, "type": "object" }, + "HTTP2HealthCheck": { + "id": "HTTP2HealthCheck", + "properties": { + "host": { + "description": "The value of the host header in the HTTP/2 health check request. If left empty (default value), the IP on behalf of which this health check is performed will be used.", + "type": "string" + }, + "port": { + "description": "The TCP port number for the health check request. The default value is 443. Valid values are 1 through 65535.", + "format": "int32", + "type": "integer" + }, + "portName": { + "description": "Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence.", + "type": "string" + }, + "portSpecification": { + "description": "Specifies how port is selected for health checking, can be one of following values:\nUSE_FIXED_PORT: The port number in\nport\nis used for health checking.\nUSE_NAMED_PORT: The\nportName\nis used for health checking.\nUSE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking.\n\n\nIf not specified, HTTP2 health check follows behavior specified in\nport\nand\nportName\nfields.", + "enum": [ + "USE_FIXED_PORT", + "USE_NAMED_PORT", + "USE_SERVING_PORT" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + }, + "proxyHeader": { + "description": "Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.", + "enum": [ + "NONE", + "PROXY_V1" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "requestPath": { + "description": "The request path of the HTTP/2 health check request. The default value is /.", + "type": "string" + }, + "response": { + "description": "The string to match anywhere in the first 1024 bytes of the response body. If left empty (the default value), the status code determines health. The response data can only be ASCII.", + "type": "string" + } + }, + "type": "object" + }, "HTTPHealthCheck": { "id": "HTTPHealthCheck", "properties": { @@ -22558,6 +23276,20 @@ "description": "Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence.", "type": "string" }, + "portSpecification": { + "description": "Specifies how port is selected for health checking, can be one of following values:\nUSE_FIXED_PORT: The port number in\nport\nis used for health checking.\nUSE_NAMED_PORT: The\nportName\nis used for health checking.\nUSE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking.\n\n\nIf not specified, HTTP health check follows behavior specified in\nport\nand\nportName\nfields.", + "enum": [ + "USE_FIXED_PORT", + "USE_NAMED_PORT", + "USE_SERVING_PORT" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + }, "proxyHeader": { "description": "Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.", "enum": [ @@ -22597,6 +23329,20 @@ "description": "Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence.", "type": "string" }, + "portSpecification": { + "description": "Specifies how port is selected for health checking, can be one of following values:\nUSE_FIXED_PORT: The port number in\nport\nis used for health checking.\nUSE_NAMED_PORT: The\nportName\nis used for health checking.\nUSE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking.\n\n\nIf not specified, HTTPS health check follows behavior specified in\nport\nand\nportName\nfields.", + "enum": [ + "USE_FIXED_PORT", + "USE_NAMED_PORT", + "USE_SERVING_PORT" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + }, "proxyHeader": { "description": "Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.", "enum": [ @@ -22642,6 +23388,9 @@ "format": "int32", "type": "integer" }, + "http2HealthCheck": { + "$ref": "HTTP2HealthCheck" + }, "httpHealthCheck": { "$ref": "HTTPHealthCheck" }, @@ -22679,9 +23428,10 @@ "type": "integer" }, "type": { - "description": "Specifies the type of the healthCheck, either TCP, SSL, HTTP or HTTPS. If not specified, the default is TCP. Exactly one of the protocol-specific health check field must be specified, which must match type field.", + "description": "Specifies the type of the healthCheck, either TCP, SSL, HTTP, HTTPS or HTTP2. If not specified, the default is TCP. Exactly one of the protocol-specific health check field must be specified, which must match type field.", "enum": [ "HTTP", + "HTTP2", "HTTPS", "INVALID", "SSL", @@ -22692,6 +23442,7 @@ "", "", "", + "", "" ], "type": "string" @@ -22857,6 +23608,40 @@ }, "type": "object" }, + "HealthStatusForNetworkEndpoint": { + "id": "HealthStatusForNetworkEndpoint", + "properties": { + "backendService": { + "$ref": "BackendServiceReference", + "description": "URL of the backend service associated with the health state of the network endpoint." + }, + "forwardingRule": { + "$ref": "ForwardingRuleReference", + "description": "URL of the forwarding rule associated with the health state of the network endpoint." + }, + "healthCheck": { + "$ref": "HealthCheckReference", + "description": "URL of the health check associated with the health state of the network endpoint." + }, + "healthState": { + "description": "Health state of the network endpoint determined based on the health checks configured.", + "enum": [ + "DRAINING", + "HEALTHY", + "UNHEALTHY", + "UNKNOWN" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, "HostRule": { "description": "UrlMaps A host-matching rule for a URL. If matched, will use the named PathMatcher to select the BackendService.", "id": "HostRule", @@ -23339,7 +24124,7 @@ "type": "string" }, "sha1Checksum": { - "description": "An optional SHA1 checksum of the disk image before unpackaging; provided by the client when the disk image is created.", + "description": "[Deprecated] This field is deprecated. An optional SHA1 checksum of the disk image before unpackaging provided by the client when the disk image is created.", "pattern": "[a-f0-9]{40}", "type": "string" }, @@ -23409,11 +24194,13 @@ "status": { "description": "[Output Only] The status of the image. An image can be used to create other resources, such as instances, only after the image has been successfully created and the status is set to READY. Possible values are FAILED, PENDING, or READY.", "enum": [ + "DELETING", "FAILED", "PENDING", "READY" ], "enumDescriptions": [ + "", "", "", "" @@ -23573,6 +24360,9 @@ }, "type": "array" }, + "hostname": { + "type": "string" + }, "id": { "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", "format": "uint64", @@ -23644,6 +24434,12 @@ }, "type": "array" }, + "shieldedInstanceConfig": { + "$ref": "ShieldedInstanceConfig" + }, + "shieldedInstanceIntegrityPolicy": { + "$ref": "ShieldedInstanceIntegrityPolicy" + }, "startRestricted": { "description": "[Output Only] Whether a VM has been restricted for start because Compute Engine has detected suspicious activity.", "type": "boolean" @@ -23652,6 +24448,7 @@ "description": "[Output Only] The status of the instance. One of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, STOPPED, SUSPENDING, SUSPENDED, and TERMINATED.", "enum": [ "PROVISIONING", + "REPAIRING", "RUNNING", "STAGING", "STOPPED", @@ -23668,6 +24465,7 @@ "", "", "", + "", "" ], "type": "string" @@ -24099,6 +24897,13 @@ "description": "An Instance Group Manager resource. (== resource_for beta.instanceGroupManagers ==) (== resource_for v1.instanceGroupManagers ==) (== resource_for beta.regionInstanceGroupManagers ==) (== resource_for v1.regionInstanceGroupManagers ==)", "id": "InstanceGroupManager", "properties": { + "autoHealingPolicies": { + "description": "The autohealing policy for this managed instance group. You can specify only one value.", + "items": { + "$ref": "InstanceGroupManagerAutoHealingPolicy" + }, + "type": "array" + }, "baseInstanceName": { "annotations": { "required": [ @@ -24174,6 +24979,10 @@ "description": "[Output Only] The URL for this managed instance group. The server defines this URL.", "type": "string" }, + "status": { + "$ref": "InstanceGroupManagerStatus", + "description": "[Output Only] The status of this managed instance group." + }, "targetPools": { "description": "The URLs for all TargetPool resources to which instances in the instanceGroup field are added. The target pools automatically apply to all of the instances in the managed instance group.", "items": { @@ -24192,6 +25001,17 @@ "format": "int32", "type": "integer" }, + "updatePolicy": { + "$ref": "InstanceGroupManagerUpdatePolicy", + "description": "The update policy for this managed instance group." + }, + "versions": { + "description": "Specifies the instance templates used by this managed instance group to create instances.\n\nEach version is defined by an instanceTemplate and a name. Every version can appear at most once per instance group. This field overrides the top-level instanceTemplate field. Read more about the relationships between these fields. Exactly one version must leave the targetSize field unset. That version will be applied to all remaining instances. For more information, read about canary updates.", + "items": { + "$ref": "InstanceGroupManagerVersion" + }, + "type": "array" + }, "zone": { "description": "[Output Only] The URL of the zone where the managed instance group is located (for zonal resources).", "type": "string" @@ -24362,6 +25182,22 @@ }, "type": "object" }, + "InstanceGroupManagerAutoHealingPolicy": { + "description": "", + "id": "InstanceGroupManagerAutoHealingPolicy", + "properties": { + "healthCheck": { + "description": "The URL for the health check that signals autohealing.", + "type": "string" + }, + "initialDelaySec": { + "description": "The number of seconds that the managed instance group waits before it applies autohealing policies to new instances or recently recreated instances. This initial delay allows instances to initialize and run their startup scripts before the instance group determines that they are UNHEALTHY. This prevents the managed instance group from recreating its instances prematurely. This value must be from range [0, 3600].", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, "InstanceGroupManagerList": { "description": "[Output Only] A list of managed instance groups.", "id": "InstanceGroupManagerList", @@ -24474,6 +25310,71 @@ }, "type": "object" }, + "InstanceGroupManagerStatus": { + "id": "InstanceGroupManagerStatus", + "properties": { + "isStable": { + "description": "[Output Only] A bit indicating whether the managed instance group is in a stable state. A stable state means that: none of the instances in the managed instance group is currently undergoing any type of change (for example, creation, restart, or deletion); no future changes are scheduled for instances in the managed instance group; and the managed instance group itself is not being modified.", + "type": "boolean" + } + }, + "type": "object" + }, + "InstanceGroupManagerUpdatePolicy": { + "id": "InstanceGroupManagerUpdatePolicy", + "properties": { + "maxSurge": { + "$ref": "FixedOrPercent", + "description": "The maximum number of instances that can be created above the specified targetSize during the update process. By default, a fixed value of 1 is used. This value can be either a fixed number or a percentage if the instance group has 10 or more instances. If you set a percentage, the number of instances will be rounded up if necessary.\n\nAt least one of either maxSurge or maxUnavailable must be greater than 0. Learn more about maxSurge." + }, + "maxUnavailable": { + "$ref": "FixedOrPercent", + "description": "The maximum number of instances that can be unavailable during the update process. An instance is considered available if all of the following conditions are satisfied:\n\n \n- The instance's status is RUNNING. \n- If there is a health check on the instance group, the instance's liveness health check result must be HEALTHY at least once. If there is no health check on the group, then the instance only needs to have a status of RUNNING to be considered available. By default, a fixed value of 1 is used. This value can be either a fixed number or a percentage if the instance group has 10 or more instances. If you set a percentage, the number of instances will be rounded up if necessary.\n\nAt least one of either maxSurge or maxUnavailable must be greater than 0. Learn more about maxUnavailable." + }, + "minimalAction": { + "description": "Minimal action to be taken on an instance. You can specify either RESTART to restart existing instances or REPLACE to delete and create new instances from the target template. If you specify a RESTART, the Updater will attempt to perform that action only. However, if the Updater determines that the minimal action you specify is not enough to perform the update, it might perform a more disruptive action.", + "enum": [ + "REPLACE", + "RESTART" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "type": { + "enum": [ + "OPPORTUNISTIC", + "PROACTIVE" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "InstanceGroupManagerVersion": { + "id": "InstanceGroupManagerVersion", + "properties": { + "instanceTemplate": { + "description": "The URL of the instance template that is specified for this managed instance group. The group uses this template to create new instances in the managed instance group until the `targetSize` for this version is reached.", + "type": "string" + }, + "name": { + "description": "Name of the version. Unique among all versions in the scope of this managed instance group.", + "type": "string" + }, + "targetSize": { + "$ref": "FixedOrPercent", + "description": "Specifies the intended number of instances to be created from the instanceTemplate. The final number of instances created from the template will be equal to: \n- If expressed as a fixed number, the minimum of either targetSize.fixed or instanceGroupManager.targetSize is used. \n- if expressed as a percent, the targetSize would be (targetSize.percent/100 * InstanceGroupManager.targetSize) If there is a remainder, the number is rounded up. If unset, this version will update any remaining instances not updated by another version. Read Starting a canary update for more information." + } + }, + "type": "object" + }, "InstanceGroupManagersAbandonInstancesRequest": { "id": "InstanceGroupManagersAbandonInstancesRequest", "properties": { @@ -25221,6 +26122,9 @@ }, "type": "array" }, + "shieldedInstanceConfig": { + "$ref": "ShieldedInstanceConfig" + }, "tags": { "$ref": "Tags", "description": "A list of tags to apply to the instances that are created from this template. The tags identify valid sources or targets for network firewalls. The setTags method can modify this list of tags. Each tag within the list must comply with RFC1035." @@ -25419,6 +26323,7 @@ "description": "[Output Only] The status of the instance.", "enum": [ "PROVISIONING", + "REPAIRING", "RUNNING", "STAGING", "STOPPED", @@ -25435,6 +26340,7 @@ "", "", "", + "", "" ], "type": "string" @@ -25655,7 +26561,7 @@ "type": "string" }, "googleReferenceId": { - "description": "[Output Only] Google reference ID; to be used when raising support tickets with Google or otherwise to debug backend connectivity issues.", + "description": "[Output Only] Google reference ID to be used when raising support tickets with Google or otherwise to debug backend connectivity issues.", "type": "string" }, "id": { @@ -25671,7 +26577,7 @@ "type": "array" }, "interconnectType": { - "description": "Type of interconnect. Note that \"IT_PRIVATE\" has been deprecated in favor of \"DEDICATED\"", + "description": "Type of interconnect, which can take one of the following values: \n- PARTNER: A partner-managed interconnection shared between customers though a partner. \n- DEDICATED: A dedicated physical interconnection with the customer. Note that a value IT_PRIVATE has been deprecated in favor of DEDICATED.", "enum": [ "DEDICATED", "IT_PRIVATE", @@ -25690,7 +26596,7 @@ "type": "string" }, "linkType": { - "description": "Type of link requested. This field indicates speed of each of the links in the bundle, not the entire bundle. Only 10G per link is allowed for a dedicated interconnect. Options: Ethernet_10G_LR", + "description": "Type of link requested, which can take one of the following values: \n- LINK_TYPE_ETHERNET_10G_LR: A 10G Ethernet with LR optics \n- LINK_TYPE_ETHERNET_100G_LR: A 100G Ethernet with LR optics. Note that this field indicates the speed of each of the links in the bundle, not the speed of the entire bundle.", "enum": [ "LINK_TYPE_ETHERNET_10G_LR" ], @@ -25718,7 +26624,7 @@ "type": "string" }, "operationalStatus": { - "description": "[Output Only] The current status of whether or not this Interconnect is functional.", + "description": "[Output Only] The current status of this Interconnect's functionality, which can take one of the following values: \n- OS_ACTIVE: A valid Interconnect, which is turned up and is ready to use. Attachments may be provisioned on this Interconnect. \n- OS_UNPROVISIONED: An Interconnect that has not completed turnup. No attachments may be provisioned on this Interconnect. \n- OS_UNDER_MAINTENANCE: An Interconnect that is undergoing internal maintenance. No attachments may be provisioned or updated on this Interconnect.", "enum": [ "OS_ACTIVE", "OS_UNPROVISIONED" @@ -25748,7 +26654,7 @@ "type": "string" }, "state": { - "description": "[Output Only] The current state of whether or not this Interconnect is functional.", + "description": "[Output Only] The current state of Interconnect functionality, which can take one of the following values: \n- ACTIVE: The Interconnect is valid, turned up and ready to use. Attachments may be provisioned on this Interconnect. \n- UNPROVISIONED: The Interconnect has not completed turnup. No attachments may be provisioned on this Interconnect. \n- UNDER_MAINTENANCE: The Interconnect is undergoing internal maintenance. No attachments may be provisioned or updated on this Interconnect.", "enum": [ "ACTIVE", "UNPROVISIONED" @@ -25771,7 +26677,7 @@ "type": "boolean" }, "bandwidth": { - "description": "Provisioned bandwidth capacity for the interconnectAttachment. Can be set by the partner to update the customer's provisioned bandwidth. Output only for PARTNER type, mutable for PARTNER_PROVIDER and DEDICATED.", + "description": "Provisioned bandwidth capacity for the interconnect attachment. For attachments of type DEDICATED, the user can set the bandwidth. For attachments of type PARTNER, the Google Partner that is operating the interconnect must set the bandwidth. Output only for PARTNER type, mutable for PARTNER_PROVIDER and DEDICATED, and can take one of the following values: \n- BPS_50M: 50 Mbit/s \n- BPS_100M: 100 Mbit/s \n- BPS_200M: 200 Mbit/s \n- BPS_300M: 300 Mbit/s \n- BPS_400M: 400 Mbit/s \n- BPS_500M: 500 Mbit/s \n- BPS_1G: 1 Gbit/s \n- BPS_2G: 2 Gbit/s \n- BPS_5G: 5 Gbit/s \n- BPS_10G: 10 Gbit/s", "enum": [ "BPS_100M", "BPS_10G", @@ -25822,7 +26728,7 @@ "type": "string" }, "edgeAvailabilityDomain": { - "description": "Desired availability domain for the attachment. Only available for type PARTNER, at creation time. For improved reliability, customers should configure a pair of attachments with one per availability domain. The selected availability domain will be provided to the Partner via the pairing key so that the provisioned circuit will lie in the specified domain. If not specified, the value will default to AVAILABILITY_DOMAIN_ANY.", + "description": "Desired availability domain for the attachment. Only available for type PARTNER, at creation time, and can take one of the following values: \n- AVAILABILITY_DOMAIN_ANY \n- AVAILABILITY_DOMAIN_1 \n- AVAILABILITY_DOMAIN_2 For improved reliability, customers should configure a pair of attachments, one per availability domain. The selected availability domain will be provided to the Partner via the pairing key, so that the provisioned circuit will lie in the specified domain. If not specified, the value will default to AVAILABILITY_DOMAIN_ANY.", "enum": [ "AVAILABILITY_DOMAIN_1", "AVAILABILITY_DOMAIN_2", @@ -25859,7 +26765,7 @@ "type": "string" }, "operationalStatus": { - "description": "[Output Only] The current status of whether or not this interconnect attachment is functional.", + "description": "[Output Only] The current status of whether or not this interconnect attachment is functional, which can take one of the following values: \n- OS_ACTIVE: The attachment has been turned up and is ready to use. \n- OS_UNPROVISIONED: The attachment is not ready to use yet, because turnup is not complete.", "enum": [ "OS_ACTIVE", "OS_UNPROVISIONED" @@ -25875,7 +26781,7 @@ "type": "string" }, "partnerAsn": { - "description": "Optional BGP ASN for the router that should be supplied by a layer 3 Partner if they configured BGP on behalf of the customer. Output only for PARTNER type, input only for PARTNER_PROVIDER, not available for DEDICATED.", + "description": "Optional BGP ASN for the router supplied by a Layer 3 Partner if they configured BGP on behalf of the customer. Output only for PARTNER type, input only for PARTNER_PROVIDER, not available for DEDICATED.", "format": "int64", "type": "string" }, @@ -25900,7 +26806,7 @@ "type": "string" }, "state": { - "description": "[Output Only] The current state of this attachment's functionality.", + "description": "[Output Only] The current state of this attachment's functionality. Enum values ACTIVE and UNPROVISIONED are shared by DEDICATED/PRIVATE, PARTNER, and PARTNER_PROVIDER interconnect attachments, while enum values PENDING_PARTNER, PARTNER_REQUEST_RECEIVED, and PENDING_CUSTOMER are used for only PARTNER and PARTNER_PROVIDER interconnect attachments. This state can take one of the following values: \n- ACTIVE: The attachment has been turned up and is ready to use. \n- UNPROVISIONED: The attachment is not ready to use yet, because turnup is not complete. \n- PENDING_PARTNER: A newly-created PARTNER attachment that has not yet been configured on the Partner side. \n- PARTNER_REQUEST_RECEIVED: A PARTNER attachment is in the process of provisioning after a PARTNER_PROVIDER attachment was created that references it. \n- PENDING_CUSTOMER: A PARTNER or PARTNER_PROVIDER attachment that is waiting for a customer to activate it. \n- DEFUNCT: The attachment was deleted externally and is no longer functional. This could be because the associated Interconnect was removed, or because the other side of a Partner attachment was deleted.", "enum": [ "ACTIVE", "DEFUNCT", @@ -25922,6 +26828,7 @@ "type": "string" }, "type": { + "description": "The type of interconnect attachment this is, which can take one of the following values: \n- DEDICATED: an attachment to a Dedicated Interconnect. \n- PARTNER: an attachment to a Partner Interconnect, created by the customer. \n- PARTNER_PROVIDER: an attachment to a Partner Interconnect, created by the partner.", "enum": [ "DEDICATED", "PARTNER", @@ -26179,7 +27086,7 @@ "type": "string" }, "portalUrl": { - "description": "URL of the Partner?s portal for this Attachment. Partners may customise this to be a deep-link to the specific resource on the Partner portal. This value may be validated to match approved Partner values.", + "description": "URL of the Partner?s portal for this Attachment. Partners may customise this to be a deep link to the specific resource on the Partner portal. This value may be validated to match approved Partner values.", "type": "string" } }, @@ -26362,6 +27269,7 @@ "type": "string" }, "state": { + "description": "The state of a LACP link, which can take one of the following values: \n- ACTIVE: The link is configured and active within the bundle. \n- DETACHED: The link is not configured within the bundle. This means that the rest of the object should be empty.", "enum": [ "ACTIVE", "DETACHED" @@ -26379,6 +27287,7 @@ "id": "InterconnectDiagnosticsLinkOpticalPower", "properties": { "state": { + "description": "The status of the current value when compared to the warning and alarm levels for the receiving or transmitting transceiver. Possible states include: \n- OK: The value has not crossed a warning threshold. \n- LOW_WARNING: The value has crossed below the low warning threshold. \n- HIGH_WARNING: The value has crossed above the high warning threshold. \n- LOW_ALARM: The value has crossed below the low alarm threshold. \n- HIGH_ALARM: The value has crossed above the high alarm threshold.", "enum": [ "HIGH_ALARM", "HIGH_WARNING", @@ -26396,7 +27305,7 @@ "type": "string" }, "value": { - "description": "Value of the current optical power, read in dBm. Take a known good optical value, give it a 10% margin and trigger warnings relative to that value. In general, a -7dBm warning and a -11dBm alarm are good optical value estimates for most links.", + "description": "Value of the current receiving or transmitting optical power, read in dBm. Take a known good optical value, give it a 10% margin and trigger warnings relative to that value. In general, a -7dBm warning and a -11dBm alarm are good optical value estimates for most links.", "format": "float", "type": "number" } @@ -26425,10 +27334,12 @@ "$ref": "InterconnectDiagnosticsLinkLACPStatus" }, "receivingOpticalPower": { - "$ref": "InterconnectDiagnosticsLinkOpticalPower" + "$ref": "InterconnectDiagnosticsLinkOpticalPower", + "description": "An InterconnectDiagnostics.LinkOpticalPower object, describing the current value and status of the received light level." }, "transmittingOpticalPower": { - "$ref": "InterconnectDiagnosticsLinkOpticalPower" + "$ref": "InterconnectDiagnosticsLinkOpticalPower", + "description": "An InterconnectDiagnostics.LinkOpticalPower object, describing the current value and status of the transmitted light level." } }, "type": "object" @@ -26562,7 +27473,7 @@ "type": "string" }, "continent": { - "description": "[Output Only] Continent for this location.", + "description": "[Output Only] Continent for this location, which can take one of the following values: \n- AFRICA \n- ASIA_PAC \n- EUROPE \n- NORTH_AMERICA \n- SOUTH_AMERICA", "enum": [ "AFRICA", "ASIA_PAC", @@ -26633,6 +27544,18 @@ "selfLink": { "description": "[Output Only] Server-defined URL for the resource.", "type": "string" + }, + "status": { + "description": "[Output Only] The status of this InterconnectLocation, which can take one of the following values: \n- CLOSED: The InterconnectLocation is closed and is unavailable for provisioning new Interconnects. \n- AVAILABLE: The InterconnectLocation is available for provisioning new Interconnects.", + "enum": [ + "AVAILABLE", + "CLOSED" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" } }, "type": "object" @@ -26802,7 +27725,7 @@ "type": "string" }, "issueType": { - "description": "Form this outage is expected to take. Note that the \"IT_\" versions of this enum have been deprecated in favor of the unprefixed values.", + "description": "Form this outage is expected to take, which can take one of the following values: \n- OUTAGE: The Interconnect may be completely out of service for some or all of the specified window. \n- PARTIAL_OUTAGE: Some circuits comprising the Interconnect as a whole should remain up, but with reduced bandwidth. Note that the versions of this enum prefixed with \"IT_\" have been deprecated in favor of the unprefixed values.", "enum": [ "IT_OUTAGE", "IT_PARTIAL_OUTAGE", @@ -26822,7 +27745,7 @@ "type": "string" }, "source": { - "description": "The party that generated this notification. Note that \"NSRC_GOOGLE\" has been deprecated in favor of \"GOOGLE\"", + "description": "The party that generated this notification, which can take the following value: \n- GOOGLE: this notification as generated by Google. Note that the value of NSRC_GOOGLE has been deprecated in favor of GOOGLE.", "enum": [ "GOOGLE", "NSRC_GOOGLE" @@ -26839,7 +27762,7 @@ "type": "string" }, "state": { - "description": "State of this notification. Note that the \"NS_\" versions of this enum have been deprecated in favor of the unprefixed values.", + "description": "State of this notification, which can take one of the following values: \n- ACTIVE: This outage notification is active. The event could be in the past, present, or future. See start_time and end_time for scheduling. \n- CANCELLED: The outage associated with this notification was cancelled before the outage was due to start. Note that the versions of this enum prefixed with \"NS_\" have been deprecated in favor of the unprefixed values.", "enum": [ "ACTIVE", "CANCELLED", @@ -27189,7 +28112,7 @@ "id": "LogConfigDataAccessOptions", "properties": { "logMode": { - "description": "Whether Gin logging should happen in a fail-closed manner at the caller. This is relevant only in the LocalIAM implementation, for now.\n\nNOTE: Logging to Gin in a fail-closed manner is currently unsupported while work is being done to satisfy the requirements of go/345. Currently, setting LOG_FAIL_CLOSED mode will have no effect, but still exists because there is active work being done to support it (b/115874152).", + "description": "Whether Gin logging should happen in a fail-closed manner at the caller. This is relevant only in the LocalIAM implementation, for now.", "enum": [ "LOG_FAIL_CLOSED", "LOG_MODE_UNSPECIFIED" @@ -27606,239 +28529,829 @@ }, "type": "object" }, - "ManagedInstance": { - "description": "A Managed Instance resource.", - "id": "ManagedInstance", - "properties": { - "currentAction": { - "description": "[Output Only] The current action that the managed instance group has scheduled for the instance. Possible values: \n- NONE The instance is running, and the managed instance group does not have any scheduled actions for this instance. \n- CREATING The managed instance group is creating this instance. If the group fails to create this instance, it will try again until it is successful. \n- CREATING_WITHOUT_RETRIES The managed instance group is attempting to create this instance only once. If the group fails to create this instance, it does not try again and the group's targetSize value is decreased instead. \n- RECREATING The managed instance group is recreating this instance. \n- DELETING The managed instance group is permanently deleting this instance. \n- ABANDONING The managed instance group is abandoning this instance. The instance will be removed from the instance group and from any target pools that are associated with this group. \n- RESTARTING The managed instance group is restarting the instance. \n- REFRESHING The managed instance group is applying configuration changes to the instance without stopping it. For example, the group can update the target pool list for an instance without stopping that instance. \n- VERIFYING The managed instance group has created the instance and it is in the process of being verified.", - "enum": [ - "ABANDONING", - "CREATING", - "CREATING_WITHOUT_RETRIES", - "DELETING", - "NONE", - "RECREATING", - "REFRESHING", - "RESTARTING", - "VERIFYING" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "id": { - "description": "[Output only] The unique identifier for this resource. This field is empty when instance does not exist.", - "format": "uint64", - "type": "string" - }, - "instance": { - "description": "[Output Only] The URL of the instance. The URL can exist even if the instance has not yet been created.", - "type": "string" - }, - "instanceStatus": { - "description": "[Output Only] The status of the instance. This field is empty when the instance does not exist.", - "enum": [ - "PROVISIONING", - "RUNNING", - "STAGING", - "STOPPED", - "STOPPING", - "SUSPENDED", - "SUSPENDING", - "TERMINATED" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "lastAttempt": { - "$ref": "ManagedInstanceLastAttempt", - "description": "[Output Only] Information about the last attempt to create or delete the instance." - } - }, - "type": "object" - }, - "ManagedInstanceLastAttempt": { - "id": "ManagedInstanceLastAttempt", - "properties": { - "errors": { - "description": "[Output Only] Encountered errors during the last attempt to create or delete the instance.", - "properties": { - "errors": { - "description": "[Output Only] The array of errors encountered while processing this operation.", - "items": { - "properties": { - "code": { - "description": "[Output Only] The error type identifier for this error.", - "type": "string" - }, - "location": { - "description": "[Output Only] Indicates the field in the request that caused the error. This property is optional.", - "type": "string" - }, - "message": { - "description": "[Output Only] An optional, human-readable error message.", - "type": "string" - } - }, - "type": "object" - }, - "type": "array" - } - }, - "type": "object" - } - }, - "type": "object" - }, - "Metadata": { - "description": "A metadata key/value entry.", - "id": "Metadata", + "ManagedInstance": { + "description": "A Managed Instance resource.", + "id": "ManagedInstance", + "properties": { + "currentAction": { + "description": "[Output Only] The current action that the managed instance group has scheduled for the instance. Possible values: \n- NONE The instance is running, and the managed instance group does not have any scheduled actions for this instance. \n- CREATING The managed instance group is creating this instance. If the group fails to create this instance, it will try again until it is successful. \n- CREATING_WITHOUT_RETRIES The managed instance group is attempting to create this instance only once. If the group fails to create this instance, it does not try again and the group's targetSize value is decreased instead. \n- RECREATING The managed instance group is recreating this instance. \n- DELETING The managed instance group is permanently deleting this instance. \n- ABANDONING The managed instance group is abandoning this instance. The instance will be removed from the instance group and from any target pools that are associated with this group. \n- RESTARTING The managed instance group is restarting the instance. \n- REFRESHING The managed instance group is applying configuration changes to the instance without stopping it. For example, the group can update the target pool list for an instance without stopping that instance. \n- VERIFYING The managed instance group has created the instance and it is in the process of being verified.", + "enum": [ + "ABANDONING", + "CREATING", + "CREATING_WITHOUT_RETRIES", + "DELETING", + "NONE", + "RECREATING", + "REFRESHING", + "RESTARTING", + "VERIFYING" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "id": { + "description": "[Output only] The unique identifier for this resource. This field is empty when instance does not exist.", + "format": "uint64", + "type": "string" + }, + "instance": { + "description": "[Output Only] The URL of the instance. The URL can exist even if the instance has not yet been created.", + "type": "string" + }, + "instanceStatus": { + "description": "[Output Only] The status of the instance. This field is empty when the instance does not exist.", + "enum": [ + "PROVISIONING", + "REPAIRING", + "RUNNING", + "STAGING", + "STOPPED", + "STOPPING", + "SUSPENDED", + "SUSPENDING", + "TERMINATED" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "lastAttempt": { + "$ref": "ManagedInstanceLastAttempt", + "description": "[Output Only] Information about the last attempt to create or delete the instance." + }, + "version": { + "$ref": "ManagedInstanceVersion", + "description": "[Output Only] Intended version of this instance." + } + }, + "type": "object" + }, + "ManagedInstanceLastAttempt": { + "id": "ManagedInstanceLastAttempt", + "properties": { + "errors": { + "description": "[Output Only] Encountered errors during the last attempt to create or delete the instance.", + "properties": { + "errors": { + "description": "[Output Only] The array of errors encountered while processing this operation.", + "items": { + "properties": { + "code": { + "description": "[Output Only] The error type identifier for this error.", + "type": "string" + }, + "location": { + "description": "[Output Only] Indicates the field in the request that caused the error. This property is optional.", + "type": "string" + }, + "message": { + "description": "[Output Only] An optional, human-readable error message.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "ManagedInstanceVersion": { + "id": "ManagedInstanceVersion", + "properties": { + "instanceTemplate": { + "description": "[Output Only] The intended template of the instance. This field is empty when current_action is one of { DELETING, ABANDONING }.", + "type": "string" + }, + "name": { + "description": "[Output Only] Name of the version.", + "type": "string" + } + }, + "type": "object" + }, + "Metadata": { + "description": "A metadata key/value entry.", + "id": "Metadata", + "properties": { + "fingerprint": { + "description": "Specifies a fingerprint for this request, which is essentially a hash of the metadata's contents and used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update metadata. You must always provide an up-to-date fingerprint hash in order to update or change metadata, otherwise the request will fail with error 412 conditionNotMet.\n\nTo see the latest fingerprint, make a get() request to retrieve the resource.", + "format": "byte", + "type": "string" + }, + "items": { + "description": "Array of key/value pairs. The total size of all keys and values must be less than 512 KB.", + "items": { + "properties": { + "key": { + "annotations": { + "required": [ + "compute.instances.insert", + "compute.projects.setCommonInstanceMetadata" + ] + }, + "description": "Key for the metadata entry. Keys must conform to the following regexp: [a-zA-Z0-9-_]+, and be less than 128 bytes in length. This is reflected as part of a URL in the metadata server. Additionally, to avoid ambiguity, keys must not conflict with any other metadata keys for the project.", + "pattern": "[a-zA-Z0-9-_]{1,128}", + "type": "string" + }, + "value": { + "annotations": { + "required": [ + "compute.instances.insert", + "compute.projects.setCommonInstanceMetadata" + ] + }, + "description": "Value for the metadata entry. These are free-form strings, and only have meaning as interpreted by the image running in the instance. The only restriction placed on values is that their size must be less than or equal to 262144 bytes (256 KiB).", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "kind": { + "default": "compute#metadata", + "description": "[Output Only] Type of the resource. Always compute#metadata for metadata.", + "type": "string" + } + }, + "type": "object" + }, + "NamedPort": { + "description": "The named port. For example: .", + "id": "NamedPort", + "properties": { + "name": { + "description": "The name for this named port. The name must be 1-63 characters long, and comply with RFC1035.", + "type": "string" + }, + "port": { + "description": "The port number, which can be a value between 1 and 65535.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "Network": { + "description": "Represents a Network resource. Read Virtual Private Cloud (VPC) Network Overview for more information. (== resource_for v1.networks ==) (== resource_for beta.networks ==)", + "id": "Network", + "properties": { + "IPv4Range": { + "description": "Deprecated in favor of subnet mode networks. The range of internal addresses that are legal on this network. This range is a CIDR specification, for example: 192.168.0.0/16. Provided by the client when the network is created.", + "pattern": "[0-9]{1,3}(?:\\.[0-9]{1,3}){3}/[0-9]{1,2}", + "type": "string" + }, + "autoCreateSubnetworks": { + "description": "When set to true, the VPC network is created in \"auto\" mode. When set to false, the VPC network is created in \"custom\" mode.\n\nAn auto mode VPC network starts with one subnet per region. Each subnet has a predetermined range as described in Auto mode VPC network IP ranges.", + "type": "boolean" + }, + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, + "gatewayIPv4": { + "description": "[Output Only] The gateway address for default routing out of the network. This value is read only and is selected by GCP.", + "pattern": "[0-9]{1,3}(?:\\.[0-9]{1,3}){3}", + "type": "string" + }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", + "type": "string" + }, + "kind": { + "default": "compute#network", + "description": "[Output Only] Type of the resource. Always compute#network for networks.", + "type": "string" + }, + "name": { + "annotations": { + "required": [ + "compute.networks.insert" + ] + }, + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "peerings": { + "description": "[Output Only] A list of network peerings for the resource.", + "items": { + "$ref": "NetworkPeering" + }, + "type": "array" + }, + "routingConfig": { + "$ref": "NetworkRoutingConfig", + "description": "The network-level routing configuration for this network. Used by Cloud Router to determine what type of network-wide routing behavior to enforce." + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" + }, + "subnetworks": { + "description": "[Output Only] Server-defined fully-qualified URLs for all subnetworks in this VPC network.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "NetworkEndpoint": { + "description": "The network endpoint.", + "id": "NetworkEndpoint", + "properties": { + "instance": { + "description": "The name for a specific VM instance that the IP address belongs to. This is required for network endpoints of type GCE_VM_IP_PORT. The instance must be in the same zone of network endpoint group.\n\nThe name must be 1-63 characters long, and comply with RFC1035.", + "type": "string" + }, + "ipAddress": { + "description": "Optional IPv4 address of network endpoint. The IP address must belong to a VM in GCE (either the primary IP or as part of an aliased IP range). If the IP address is not specified, then the primary IP address for the VM instance in the network that the network endpoint group belongs to will be used.", + "type": "string" + }, + "port": { + "description": "Optional port number of network endpoint. If not specified and the NetworkEndpointGroup.network_endpoint_type is GCE_IP_PORT, the defaultPort for the network endpoint group will be used.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "NetworkEndpointGroup": { + "description": "Represents a collection of network endpoints.", + "id": "NetworkEndpointGroup", + "properties": { + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "defaultPort": { + "description": "The default port used if the port number is not specified in the network endpoint.", + "format": "int32", + "type": "integer" + }, + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", + "type": "string" + }, + "kind": { + "default": "compute#networkEndpointGroup", + "description": "[Output Only] Type of the resource. Always compute#networkEndpointGroup for network endpoint group.", + "type": "string" + }, + "name": { + "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "type": "string" + }, + "network": { + "description": "The URL of the network to which all network endpoints in the NEG belong. Uses \"default\" project network if unspecified.", + "type": "string" + }, + "networkEndpointType": { + "description": "Type of network endpoints in this network endpoint group. Currently the only supported value is GCE_VM_IP_PORT.", + "enum": [ + "GCE_VM_IP_PORT" + ], + "enumDescriptions": [ + "" + ], + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" + }, + "size": { + "description": "[Output only] Number of network endpoints in the network endpoint group.", + "format": "int32", + "type": "integer" + }, + "subnetwork": { + "description": "Optional URL of the subnetwork to which all network endpoints in the NEG belong.", + "type": "string" + }, + "zone": { + "description": "[Output Only] The URL of the zone where the network endpoint group is located.", + "type": "string" + } + }, + "type": "object" + }, + "NetworkEndpointGroupAggregatedList": { + "id": "NetworkEndpointGroupAggregatedList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "additionalProperties": { + "$ref": "NetworkEndpointGroupsScopedList", + "description": "The name of the scope that contains this set of network endpoint groups." + }, + "description": "A list of NetworkEndpointGroupsScopedList resources.", + "type": "object" + }, + "kind": { + "default": "compute#networkEndpointGroupAggregatedList", + "description": "[Output Only] The resource type, which is always compute#networkEndpointGroupAggregatedList for aggregated lists of network endpoint groups.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "NetworkEndpointGroupList": { + "id": "NetworkEndpointGroupList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "description": "A list of NetworkEndpointGroup resources.", + "items": { + "$ref": "NetworkEndpointGroup" + }, + "type": "array" + }, + "kind": { + "default": "compute#networkEndpointGroupList", + "description": "[Output Only] The resource type, which is always compute#networkEndpointGroupList for network endpoint group lists.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "NetworkEndpointGroupsAttachEndpointsRequest": { + "id": "NetworkEndpointGroupsAttachEndpointsRequest", + "properties": { + "networkEndpoints": { + "description": "The list of network endpoints to be attached.", + "items": { + "$ref": "NetworkEndpoint" + }, + "type": "array" + } + }, + "type": "object" + }, + "NetworkEndpointGroupsDetachEndpointsRequest": { + "id": "NetworkEndpointGroupsDetachEndpointsRequest", + "properties": { + "networkEndpoints": { + "description": "The list of network endpoints to be detached.", + "items": { + "$ref": "NetworkEndpoint" + }, + "type": "array" + } + }, + "type": "object" + }, + "NetworkEndpointGroupsListEndpointsRequest": { + "id": "NetworkEndpointGroupsListEndpointsRequest", + "properties": { + "healthStatus": { + "description": "Optional query parameter for showing the health status of each network endpoint. Valid options are SKIP or SHOW. If you don't specifiy this parameter, the health status of network endpoints will not be provided.", + "enum": [ + "SHOW", + "SKIP" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "NetworkEndpointGroupsListNetworkEndpoints": { + "id": "NetworkEndpointGroupsListNetworkEndpoints", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "description": "A list of NetworkEndpointWithHealthStatus resources.", + "items": { + "$ref": "NetworkEndpointWithHealthStatus" + }, + "type": "array" + }, + "kind": { + "default": "compute#networkEndpointGroupsListNetworkEndpoints", + "description": "[Output Only] The resource type, which is always compute#networkEndpointGroupsListNetworkEndpoints for the list of network endpoints in the specified network endpoint group.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "NetworkEndpointGroupsScopedList": { + "id": "NetworkEndpointGroupsScopedList", + "properties": { + "networkEndpointGroups": { + "description": "[Output Only] The list of network endpoint groups that are contained in this scope.", + "items": { + "$ref": "NetworkEndpointGroup" + }, + "type": "array" + }, + "warning": { + "description": "[Output Only] An informational warning that replaces the list of network endpoint groups when the list is empty.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "NetworkEndpointWithHealthStatus": { + "id": "NetworkEndpointWithHealthStatus", "properties": { - "fingerprint": { - "description": "Specifies a fingerprint for this request, which is essentially a hash of the metadata's contents and used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update metadata. You must always provide an up-to-date fingerprint hash in order to update or change metadata, otherwise the request will fail with error 412 conditionNotMet.\n\nTo see the latest fingerprint, make a get() request to retrieve the resource.", - "format": "byte", - "type": "string" - }, - "items": { - "description": "Array of key/value pairs. The total size of all keys and values must be less than 512 KB.", + "healths": { + "description": "[Output only] The health status of network endpoint;", "items": { - "properties": { - "key": { - "annotations": { - "required": [ - "compute.instances.insert", - "compute.projects.setCommonInstanceMetadata" - ] - }, - "description": "Key for the metadata entry. Keys must conform to the following regexp: [a-zA-Z0-9-_]+, and be less than 128 bytes in length. This is reflected as part of a URL in the metadata server. Additionally, to avoid ambiguity, keys must not conflict with any other metadata keys for the project.", - "pattern": "[a-zA-Z0-9-_]{1,128}", - "type": "string" - }, - "value": { - "annotations": { - "required": [ - "compute.instances.insert", - "compute.projects.setCommonInstanceMetadata" - ] - }, - "description": "Value for the metadata entry. These are free-form strings, and only have meaning as interpreted by the image running in the instance. The only restriction placed on values is that their size must be less than or equal to 262144 bytes (256 KiB).", - "type": "string" - } - }, - "type": "object" + "$ref": "HealthStatusForNetworkEndpoint" }, "type": "array" }, - "kind": { - "default": "compute#metadata", - "description": "[Output Only] Type of the resource. Always compute#metadata for metadata.", - "type": "string" - } - }, - "type": "object" - }, - "NamedPort": { - "description": "The named port. For example: .", - "id": "NamedPort", - "properties": { - "name": { - "description": "The name for this named port. The name must be 1-63 characters long, and comply with RFC1035.", - "type": "string" - }, - "port": { - "description": "The port number, which can be a value between 1 and 65535.", - "format": "int32", - "type": "integer" - } - }, - "type": "object" - }, - "Network": { - "description": "Represents a Network resource. Read Virtual Private Cloud (VPC) Network Overview for more information. (== resource_for v1.networks ==) (== resource_for beta.networks ==)", - "id": "Network", - "properties": { - "IPv4Range": { - "description": "The range of internal addresses that are legal on this network. This range is a CIDR specification, for example: 192.168.0.0/16. Provided by the client when the network is created.", - "pattern": "[0-9]{1,3}(?:\\.[0-9]{1,3}){3}/[0-9]{1,2}", - "type": "string" - }, - "autoCreateSubnetworks": { - "description": "When set to true, the VPC network is created in \"auto\" mode. When set to false, the VPC network is created in \"custom\" mode.\n\nAn auto mode VPC network starts with one subnet per region. Each subnet has a predetermined range as described in Auto mode VPC network IP ranges.", - "type": "boolean" - }, - "creationTimestamp": { - "description": "[Output Only] Creation timestamp in RFC3339 text format.", - "type": "string" - }, - "description": { - "description": "An optional description of this resource. Provide this property when you create the resource.", - "type": "string" - }, - "gatewayIPv4": { - "description": "[Output Only] The gateway address for default routing out of the network. This value is read only and is selected by GCP.", - "pattern": "[0-9]{1,3}(?:\\.[0-9]{1,3}){3}", - "type": "string" - }, - "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64", - "type": "string" - }, - "kind": { - "default": "compute#network", - "description": "[Output Only] Type of the resource. Always compute#network for networks.", - "type": "string" - }, - "name": { - "annotations": { - "required": [ - "compute.networks.insert" - ] - }, - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "type": "string" - }, - "peerings": { - "description": "[Output Only] A list of network peerings for the resource.", - "items": { - "$ref": "NetworkPeering" - }, - "type": "array" - }, - "routingConfig": { - "$ref": "NetworkRoutingConfig", - "description": "The network-level routing configuration for this network. Used by Cloud Router to determine what type of network-wide routing behavior to enforce." - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for the resource.", - "type": "string" - }, - "subnetworks": { - "description": "[Output Only] Server-defined fully-qualified URLs for all subnetworks in this VPC network.", - "items": { - "type": "string" - }, - "type": "array" + "networkEndpoint": { + "$ref": "NetworkEndpoint", + "description": "[Output only] The network endpoint;" } }, "type": "object" @@ -28007,7 +29520,11 @@ "id": "NetworkPeering", "properties": { "autoCreateRoutes": { - "description": "Indicates whether full mesh connectivity is created and managed automatically. When it is set to true, Google Compute Engine will automatically create and manage the routes between two networks when the state is ACTIVE. Otherwise, user needs to create routes manually to route packets to peer network.", + "description": "This field will be deprecated soon. Prefer using exchange_subnet_routes instead. Indicates whether full mesh connectivity is created and managed automatically. When it is set to true, Google Compute Engine will automatically create and manage the routes between two networks when the state is ACTIVE. Otherwise, user needs to create routes manually to route packets to peer network.", + "type": "boolean" + }, + "exchangeSubnetRoutes": { + "description": "Whether full mesh connectivity is created and managed automatically. When it is set to true, Google Compute Engine will automatically create and manage the routes between two networks when the peering state is ACTIVE. Otherwise, user needs to create routes manually to route packets to peer network.", "type": "boolean" }, "name": { @@ -28060,7 +29577,7 @@ "id": "NetworksAddPeeringRequest", "properties": { "autoCreateRoutes": { - "description": "Whether Google Compute Engine manages the routes automatically.", + "description": "This field will be deprecated soon. Prefer using exchange_subnet_routes in network_peering instead. Whether Google Compute Engine manages the routes automatically.", "type": "boolean" }, "name": { @@ -28072,6 +29589,10 @@ "description": "Name of the peering, which should conform to RFC1035.", "type": "string" }, + "networkPeering": { + "$ref": "NetworkPeering", + "description": "Network peering parameters. In order to specify route policies for peering using import/export custom routes, you will have to fill all peering related parameters (name, peer network, exchange_subnet_routes) in network_peeringfield. Corresponding fields in NetworksAddPeeringRequest will be deprecated soon." + }, "peerNetwork": { "description": "URL of the peer network. It can be either full URL or partial URL. The peer network may belong to a different project. If the partial URL does not contain project, it is assumed that the peer network is in the same project as the current network.", "type": "string" @@ -28090,7 +29611,7 @@ "type": "object" }, "NodeGroup": { - "description": "A NodeGroup resource.", + "description": "A NodeGroup resource. To create a node group, you must first create a node templates. To learn more about node groups and sole-tenant nodes, read the Sole-tenant nodes documentation. (== resource_for beta.nodeGroups ==) (== resource_for v1.nodeGroups ==)", "id": "NodeGroup", "properties": { "creationTimestamp": { @@ -28651,7 +30172,7 @@ "type": "object" }, "NodeTemplate": { - "description": "A Node Template resource.", + "description": "A Node Template resource. To learn more about node templates and sole-tenant nodes, read the Sole-tenant nodes documentation. (== resource_for beta.nodeTemplates ==) (== resource_for v1.nodeTemplates ==)", "id": "NodeTemplate", "properties": { "creationTimestamp": { @@ -29974,7 +31495,7 @@ "id": "PathMatcher", "properties": { "defaultService": { - "description": "The full or partial URL to the BackendService resource. This will be used if none of the pathRules or routeRules defined by this PathMatcher are matched. For example, the following are all valid URLs to a BackendService resource: \n- https://www.googleapis.com/compute/v1/projects/project/global/backendServices/backendService \n- compute/v1/projects/project/global/backendServices/backendService \n- global/backendServices/backendService \nUse defaultService instead of defaultRouteAction when simple routing to a backend service is desired and other advanced capabilities like traffic splitting and URL rewrites are not required.\nOnly one of defaultService, defaultRouteAction or defaultUrlRedirect must be set.\nAuthorization requires one or more of the following Google IAM permissions on the specified resource default_service: \n- compute.backendBuckets.use \n- compute.backendServices.use", + "description": "The full or partial URL to the BackendService resource. This will be used if none of the pathRules or routeRules defined by this PathMatcher are matched. For example, the following are all valid URLs to a BackendService resource: \n- https://www.googleapis.com/compute/v1/projects/project/global/backendServices/backendService \n- compute/v1/projects/project/global/backendServices/backendService \n- global/backendServices/backendService If defaultRouteAction is additionally specified, advanced routing actions like URL Rewrites, etc. take effect prior to sending the request to the backend. However, if defaultService is specified, defaultRouteAction cannot contain any weightedBackendServices. Conversely, if defaultRouteAction specifies any weightedBackendServices, defaultService must not be specified.\nOnly one of defaultService, defaultUrlRedirect or defaultRouteAction.weightedBackendService must be set.\nAuthorization requires one or more of the following Google IAM permissions on the specified resource default_service: \n- compute.backendBuckets.use \n- compute.backendServices.use", "type": "string" }, "description": { @@ -30007,7 +31528,7 @@ "type": "array" }, "service": { - "description": "The URL of the backend service resource if this rule is matched.\nUse service instead of routeAction when simple routing to a backend service is desired and other advanced capabilities like traffic splitting and rewrites are not required.\nOnly one of service, routeAction or urlRedirect should must be set.", + "description": "The full or partial URL of the backend service resource to which traffic is directed if this rule is matched. If routeAction is additionally specified, advanced routing actions like URL Rewrites, etc. take effect prior to sending the request to the backend. However, if service is specified, routeAction cannot contain any weightedBackendService s. Conversely, if routeAction specifies any weightedBackendServices, service must not be specified.\nOnly one of urlRedirect, service or routeAction.weightedBackendService must be set.", "type": "string" } }, @@ -30227,6 +31748,7 @@ "CPUS", "CPUS_ALL_REGIONS", "DISKS_TOTAL_GB", + "EXTERNAL_VPN_GATEWAYS", "FIREWALLS", "FORWARDING_RULES", "GLOBAL_INTERNAL_ADDRESSES", @@ -30243,8 +31765,10 @@ "INTERNAL_ADDRESSES", "IN_USE_ADDRESSES", "IN_USE_BACKUP_SCHEDULES", + "IN_USE_SNAPSHOT_SCHEDULES", "LOCAL_SSD_TOTAL_GB", "NETWORKS", + "NETWORK_ENDPOINT_GROUPS", "NVIDIA_K80_GPUS", "NVIDIA_P100_GPUS", "NVIDIA_P100_VWS_GPUS", @@ -30351,10 +31875,17 @@ "", "", "", + "", + "", + "", "" ], "type": "string" }, + "owner": { + "description": "[Output Only] Owning resource. This is the resource on which this quota is applied.", + "type": "string" + }, "usage": { "description": "[Output Only] Current usage of this metric.", "format": "double", @@ -31636,7 +33167,7 @@ "description": "BGP information specific to this router." }, "bgpPeers": { - "description": "BGP information that needs to be configured into the routing stack to establish the BGP peering. It must specify peer ASN and either interface name, IP, or peer IP. Please refer to RFC4273.", + "description": "BGP information that must be configured into the routing stack to establish BGP peering. This information must specify the peer ASN and either the interface name, IP address, or peer IP address. Please refer to RFC4273.", "items": { "$ref": "RouterBgpPeer" }, @@ -31656,7 +33187,7 @@ "type": "string" }, "interfaces": { - "description": "Router interfaces. Each interface requires either one linked resource (e.g. linkedVpnTunnel), or IP address and IP address range (e.g. ipRange), or both.", + "description": "Router interfaces. Each interface requires either one linked resource, (for example, linkedVpnTunnel), or IP address and IP address range (for example, ipRange), or both.", "items": { "$ref": "RouterInterface" }, @@ -31678,7 +33209,7 @@ "type": "string" }, "nats": { - "description": "A list of Nat services created in this router.", + "description": "A list of NAT services created in this router.", "items": { "$ref": "RouterNat" }, @@ -31836,7 +33367,7 @@ "id": "RouterBgp", "properties": { "advertiseMode": { - "description": "User-specified flag to indicate which mode to use for advertisement.", + "description": "User-specified flag to indicate which mode to use for advertisement. The options are DEFAULT or CUSTOM.", "enum": [ "CUSTOM", "DEFAULT" @@ -31891,7 +33422,7 @@ "type": "string" }, "advertisedGroups": { - "description": "User-specified list of prefix groups to advertise in custom mode. This field can only be populated if advertise_mode is CUSTOM and overrides the list defined for the router (in Bgp message). These groups will be advertised in addition to any specified prefixes. Leave this field blank to advertise no custom groups.", + "description": "User-specified list of prefix groups to advertise in custom mode, which can take one of the following options: \n- ALL_SUBNETS: Advertises all available subnets, including peer VPC subnets. \n- ALL_VPC_SUBNETS: Advertises the router's own VPC subnets. \n- ALL_PEER_VPC_SUBNETS: Advertises peer subnets of the router's VPC network. Note that this field can only be populated if advertise_mode is CUSTOM and overrides the list defined for the router (in the \"bgp\" message). These groups are advertised in addition to any specified prefixes. Leave this field blank to advertise no custom groups.", "items": { "enum": [ "ALL_SUBNETS" @@ -31904,14 +33435,14 @@ "type": "array" }, "advertisedIpRanges": { - "description": "User-specified list of individual IP ranges to advertise in custom mode. This field can only be populated if advertise_mode is CUSTOM and overrides the list defined for the router (in Bgp message). These IP ranges will be advertised in addition to any specified groups. Leave this field blank to advertise no custom IP ranges.", + "description": "User-specified list of individual IP ranges to advertise in custom mode. This field can only be populated if advertise_mode is CUSTOM and overrides the list defined for the router (in the \"bgp\" message). These IP ranges are advertised in addition to any specified groups. Leave this field blank to advertise no custom IP ranges.", "items": { "$ref": "RouterAdvertisedIpRange" }, "type": "array" }, "advertisedRoutePriority": { - "description": "The priority of routes advertised to this BGP peer. In the case where there is more than one matching route of maximum length, the routes with lowest priority value win.", + "description": "The priority of routes advertised to this BGP peer. Where there is more than one matching route of maximum length, the routes with the lowest priority value win.", "format": "uint32", "type": "integer" }, @@ -31924,7 +33455,7 @@ "type": "string" }, "managementType": { - "description": "[Output Only] The resource that configures and manages this BGP peer. MANAGED_BY_USER is the default value and can be managed by you or other users; MANAGED_BY_ATTACHMENT is a BGP peer that is configured and managed by Cloud Interconnect, specifically by an InterconnectAttachment of type PARTNER. Google will automatically create, update, and delete this type of BGP peer when the PARTNER InterconnectAttachment is created, updated, or deleted.", + "description": "[Output Only] The resource that configures and manages this BGP peer. \n- MANAGED_BY_USER is the default value and can be managed by you or other users \n- MANAGED_BY_ATTACHMENT is a BGP peer that is configured and managed by Cloud Interconnect, specifically by an InterconnectAttachment of type PARTNER. Google automatically creates, updates, and deletes this type of BGP peer when the PARTNER InterconnectAttachment is created, updated, or deleted.", "enum": [ "MANAGED_BY_ATTACHMENT", "MANAGED_BY_USER" @@ -31941,12 +33472,12 @@ "type": "string" }, "peerAsn": { - "description": "Peer BGP Autonomous System Number (ASN). For VPN use case, this value can be different for every tunnel.", + "description": "Peer BGP Autonomous System Number (ASN). Each BGP interface may use a different value.", "format": "uint32", "type": "integer" }, "peerIpAddress": { - "description": "IP address of the BGP interface outside Google cloud. Only IPv4 is supported.", + "description": "IP address of the BGP interface outside Google Cloud Platform. Only IPv4 is supported.", "type": "string" } }, @@ -31956,19 +33487,19 @@ "id": "RouterInterface", "properties": { "ipRange": { - "description": "IP address and range of the interface. The IP range must be in the RFC3927 link-local IP space. The value must be a CIDR-formatted string, for example: 169.254.0.1/30. NOTE: Do not truncate the address as it represents the IP address of the interface.", + "description": "IP address and range of the interface. The IP range must be in the RFC3927 link-local IP address space. The value must be a CIDR-formatted string, for example: 169.254.0.1/30. NOTE: Do not truncate the address as it represents the IP address of the interface.", "type": "string" }, "linkedInterconnectAttachment": { - "description": "URI of the linked interconnect attachment. It must be in the same region as the router. Each interface can have at most one linked resource and it could either be a VPN Tunnel or an interconnect attachment.", + "description": "URI of the linked Interconnect attachment. It must be in the same region as the router. Each interface can have one linked resource, which can be either be a VPN tunnel or an Interconnect attachment.", "type": "string" }, "linkedVpnTunnel": { - "description": "URI of the linked VPN tunnel. It must be in the same region as the router. Each interface can have at most one linked resource and it could either be a VPN Tunnel or an interconnect attachment.", + "description": "URI of the linked VPN tunnel, which must be in the same region as the router. Each interface can have one linked resource, which can be either a VPN tunnel or an Interconnect attachment.", "type": "string" }, "managementType": { - "description": "[Output Only] The resource that configures and manages this interface. MANAGED_BY_USER is the default value and can be managed by you or other users; MANAGED_BY_ATTACHMENT is an interface that is configured and managed by Cloud Interconnect, specifically by an InterconnectAttachment of type PARTNER. Google will automatically create, update, and delete this type of interface when the PARTNER InterconnectAttachment is created, updated, or deleted.", + "description": "[Output Only] The resource that configures and manages this interface. \n- MANAGED_BY_USER is the default value and can be managed directly by users. \n- MANAGED_BY_ATTACHMENT is an interface that is configured and managed by Cloud Interconnect, specifically, by an InterconnectAttachment of type PARTNER. Google automatically creates, updates, and deletes this type of interface when the PARTNER InterconnectAttachment is created, updated, or deleted.", "enum": [ "MANAGED_BY_ATTACHMENT", "MANAGED_BY_USER" @@ -32108,8 +33639,12 @@ "format": "int32", "type": "integer" }, + "logConfig": { + "$ref": "RouterNatLogConfig", + "description": "Configure logging on this NAT." + }, "minPortsPerVm": { - "description": "Minimum number of ports allocated to a VM from this NAT config. If not set, a default number of ports is allocated to a VM. This gets rounded up to the nearest power of 2. Eg. if the value of this field is 50, at least 64 ports will be allocated to a VM.", + "description": "Minimum number of ports allocated to a VM from this NAT config. If not set, a default number of ports is allocated to a VM. This is rounded up to the nearest power of 2. For example, if the value of this field is 50, at least 64 ports are allocated to a VM.", "format": "int32", "type": "integer" }, @@ -32119,7 +33654,7 @@ "type": "string" }, "natIpAllocateOption": { - "description": "Specify the NatIpAllocateOption. If it is AUTO_ONLY, then nat_ip should be empty.", + "description": "Specify the NatIpAllocateOption, which can take one of the following values: \n- MANUAL_ONLY: Uses only Nat IP addresses provided by customers. When there are not enough specified Nat IPs, the Nat service fails for new VMs. \n- AUTO_ONLY: Nat IPs are allocated by Google Cloud Platform; customers can't specify any Nat IPs. When choosing AUTO_ONLY, then nat_ip should be empty.", "enum": [ "AUTO_ONLY", "MANUAL_ONLY" @@ -32131,14 +33666,14 @@ "type": "string" }, "natIps": { - "description": "A list of URLs of the IP resources used for this Nat service. These IPs must be valid static external IP addresses assigned to the project. max_length is subject to change post alpha.", + "description": "A list of URLs of the IP resources used for this Nat service. These IP addresses must be valid static external IP addresses assigned to the project.", "items": { "type": "string" }, "type": "array" }, "sourceSubnetworkIpRangesToNat": { - "description": "Specify the Nat option. If this field contains ALL_SUBNETWORKS_ALL_IP_RANGES or ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES, then there should not be any other Router.Nat section in any Router for this network in this region.", + "description": "Specify the Nat option, which can take one of the following values: \n- ALL_SUBNETWORKS_ALL_IP_RANGES: All of the IP ranges in every Subnetwork are allowed to Nat. \n- ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES: All of the primary IP ranges in every Subnetwork are allowed to Nat. \n- LIST_OF_SUBNETWORKS: A list of Subnetworks are allowed to Nat (specified in the field subnetwork below) The default is SUBNETWORK_IP_RANGE_TO_NAT_OPTION_UNSPECIFIED. Note that if this field contains ALL_SUBNETWORKS_ALL_IP_RANGES or ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES, then there should not be any other Router.Nat section in any Router for this network in this region.", "enum": [ "ALL_SUBNETWORKS_ALL_IP_RANGES", "ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES", @@ -32176,12 +33711,37 @@ }, "type": "object" }, + "RouterNatLogConfig": { + "description": "Configuration of logging on a NAT.", + "id": "RouterNatLogConfig", + "properties": { + "enable": { + "description": "Indicates whether or not to export logs. This is false by default.", + "type": "boolean" + }, + "filter": { + "description": "Specifies the desired filtering of logs on this NAT. If unspecified, logs are exported for all connections handled by this NAT.", + "enum": [ + "ALL", + "ERRORS_ONLY", + "TRANSLATIONS_ONLY" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, "RouterNatSubnetworkToNat": { "description": "Defines the IP ranges that want to use NAT for a subnetwork.", "id": "RouterNatSubnetworkToNat", "properties": { "name": { - "description": "URL for the subnetwork resource to use NAT.", + "description": "URL for the subnetwork resource that will use NAT.", "type": "string" }, "secondaryIpRangeNames": { @@ -32192,7 +33752,7 @@ "type": "array" }, "sourceIpRangesToNat": { - "description": "Specify the options for NAT ranges in the Subnetwork. All usages of single value are valid except NAT_IP_RANGE_OPTION_UNSPECIFIED. The only valid option with multiple values is: [\"PRIMARY_IP_RANGE\", \"LIST_OF_SECONDARY_IP_RANGES\"] Default: [ALL_IP_RANGES]", + "description": "Specify the options for NAT ranges in the Subnetwork. All options of a single value are valid except NAT_IP_RANGE_OPTION_UNSPECIFIED. The only valid option with multiple values is: [\"PRIMARY_IP_RANGE\", \"LIST_OF_SECONDARY_IP_RANGES\"] Default: [ALL_IP_RANGES]", "items": { "enum": [ "ALL_IP_RANGES", @@ -32308,7 +33868,7 @@ "type": "object" }, "RouterStatusNatStatus": { - "description": "Status of a NAT contained in this router.", + "description": "Status of a NAT contained in this router. Next tag: 9", "id": "RouterStatusNatStatus", "properties": { "autoAllocatedNatIps": { @@ -32545,6 +34105,20 @@ "description": "Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence.", "type": "string" }, + "portSpecification": { + "description": "Specifies how port is selected for health checking, can be one of following values:\nUSE_FIXED_PORT: The port number in\nport\nis used for health checking.\nUSE_NAMED_PORT: The\nportName\nis used for health checking.\nUSE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking.\n\n\nIf not specified, SSL health check follows behavior specified in\nport\nand\nportName\nfields.", + "enum": [ + "USE_FIXED_PORT", + "USE_NAMED_PORT", + "USE_SERVING_PORT" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + }, "proxyHeader": { "description": "Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.", "enum": [ @@ -32911,6 +34485,71 @@ }, "type": "object" }, + "ShieldedInstanceConfig": { + "description": "A set of Shielded Instance options.", + "id": "ShieldedInstanceConfig", + "properties": { + "enableIntegrityMonitoring": { + "description": "Defines whether the instance has integrity monitoring enabled.", + "type": "boolean" + }, + "enableSecureBoot": { + "description": "Defines whether the instance has Secure Boot enabled.", + "type": "boolean" + }, + "enableVtpm": { + "description": "Defines whether the instance has the vTPM enabled.", + "type": "boolean" + } + }, + "type": "object" + }, + "ShieldedInstanceIdentity": { + "description": "A shielded Instance identity entry.", + "id": "ShieldedInstanceIdentity", + "properties": { + "encryptionKey": { + "$ref": "ShieldedInstanceIdentityEntry", + "description": "An Endorsement Key (EK) issued to the Shielded Instance's vTPM." + }, + "kind": { + "default": "compute#shieldedInstanceIdentity", + "description": "[Output Only] Type of the resource. Always compute#shieldedInstanceIdentity for shielded Instance identity entry.", + "type": "string" + }, + "signingKey": { + "$ref": "ShieldedInstanceIdentityEntry", + "description": "An Attestation Key (AK) issued to the Shielded Instance's vTPM." + } + }, + "type": "object" + }, + "ShieldedInstanceIdentityEntry": { + "description": "A Shielded Instance Identity Entry.", + "id": "ShieldedInstanceIdentityEntry", + "properties": { + "ekCert": { + "description": "A PEM-encoded X.509 certificate. This field can be empty.", + "type": "string" + }, + "ekPub": { + "description": "A PEM-encoded public key.", + "type": "string" + } + }, + "type": "object" + }, + "ShieldedInstanceIntegrityPolicy": { + "description": "The policy describes the baseline against which Instance boot integrity is measured.", + "id": "ShieldedInstanceIntegrityPolicy", + "properties": { + "updateAutoLearnPolicy": { + "description": "Updates the integrity policy baseline using the measurements from the VM instance's most recent boot.", + "type": "boolean" + } + }, + "type": "object" + }, "SignedUrlKey": { "description": "Represents a customer-supplied Signing Key used by Cloud CDN Signed URLs", "id": "SignedUrlKey", @@ -32992,7 +34631,7 @@ }, "snapshotEncryptionKey": { "$ref": "CustomerEncryptionKey", - "description": "Encrypts the snapshot using a customer-supplied encryption key.\n\nAfter you encrypt a snapshot using a customer-supplied key, you must provide the same key if you use the image later For example, you must provide the encryption key when you create a disk from the encrypted snapshot in a future request.\n\nCustomer-supplied encryption keys do not protect access to metadata of the disk.\n\nIf you do not provide an encryption key when creating the snapshot, then the snapshot will be encrypted using an automatically generated key and you do not need to provide a key to use the snapshot later." + "description": "Encrypts the snapshot using a customer-supplied encryption key.\n\nAfter you encrypt a snapshot using a customer-supplied key, you must provide the same key if you use the snapshot later. For example, you must provide the encryption key when you create a disk from the encrypted snapshot in a future request.\n\nCustomer-supplied encryption keys do not protect access to metadata of the snapshot.\n\nIf you do not provide an encryption key when creating the snapshot, then the snapshot will be encrypted using an automatically generated key and you do not need to provide a key to use the snapshot later." }, "sourceDisk": { "description": "[Output Only] The source disk used to create this snapshot.", @@ -33040,6 +34679,13 @@ "" ], "type": "string" + }, + "storageLocations": { + "description": "GCS bucket storage location of the snapshot (regional or multi-regional).", + "items": { + "type": "string" + }, + "type": "array" } }, "type": "object" @@ -34058,6 +35704,20 @@ "description": "Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence.", "type": "string" }, + "portSpecification": { + "description": "Specifies how port is selected for health checking, can be one of following values:\nUSE_FIXED_PORT: The port number in\nport\nis used for health checking.\nUSE_NAMED_PORT: The\nportName\nis used for health checking.\nUSE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking.\n\n\nIf not specified, TCP health check follows behavior specified in\nport\nand\nportName\nfields.", + "enum": [ + "USE_FIXED_PORT", + "USE_NAMED_PORT", + "USE_SERVING_PORT" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + }, "proxyHeader": { "description": "Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.", "enum": [ @@ -35720,7 +37380,7 @@ "type": "string" }, "forwardingRules": { - "description": "[Output Only] A list of URLs to the ForwardingRule resources. ForwardingRules are created using compute.forwardingRules.insert and associated to a VPN gateway.", + "description": "[Output Only] A list of URLs to the ForwardingRule resources. ForwardingRules are created using compute.forwardingRules.insert and associated with a VPN gateway.", "items": { "type": "string" }, @@ -35764,7 +37424,7 @@ "type": "string" }, "status": { - "description": "[Output Only] The status of the VPN gateway.", + "description": "[Output Only] The status of the VPN gateway, which can be one of the following: CREATING, READY, FAILED, or DELETING.", "enum": [ "CREATING", "DELETING", @@ -35780,7 +37440,7 @@ "type": "string" }, "tunnels": { - "description": "[Output Only] A list of URLs to VpnTunnel resources. VpnTunnels are created using compute.vpntunnels.insert method and associated to a VPN gateway.", + "description": "[Output Only] A list of URLs to VpnTunnel resources. VpnTunnels are created using the compute.vpntunnels.insert method and associated with a VPN gateway.", "items": { "type": "string" }, @@ -36017,7 +37677,7 @@ "id": "TargetVpnGatewaysScopedList", "properties": { "targetVpnGateways": { - "description": "[Output Only] A list of target vpn gateways contained in this scope.", + "description": "[Output Only] A list of target VPN gateways contained in this scope.", "items": { "$ref": "TargetVpnGateway" }, @@ -36160,7 +37820,7 @@ "type": "string" }, "defaultService": { - "description": "The URL of the backendService resource if none of the hostRules match.\nUse defaultService instead of defaultRouteAction when simple routing to a backendService is desired and other advanced capabilities like traffic splitting and rewrites are not required.\nOnly one of defaultService, defaultRouteAction or defaultUrlRedirect should must be set.", + "description": "The full or partial URL of the defaultService resource to which traffic is directed if none of the hostRules match. If defaultRouteAction is additionally specified, advanced routing actions like URL Rewrites, etc. take effect prior to sending the request to the backend. However, if defaultService is specified, defaultRouteAction cannot contain any weightedBackendServices. Conversely, if routeAction specifies any weightedBackendServices, service must not be specified.\nOnly one of defaultService, defaultUrlRedirect or defaultRouteAction.weightedBackendService must be set.", "type": "string" }, "description": { @@ -36750,7 +38410,7 @@ "type": "string" }, "ikeVersion": { - "description": "IKE protocol version to use when establishing the VPN tunnel with peer VPN gateway. Acceptable IKE versions are 1 or 2. Default version is 2.", + "description": "IKE protocol version to use when establishing the VPN tunnel with the peer VPN gateway. Acceptable IKE versions are 1 or 2. The default version is 2.", "format": "int32", "type": "integer" }, @@ -36760,7 +38420,7 @@ "type": "string" }, "localTrafficSelector": { - "description": "Local traffic selector to use when establishing the VPN tunnel with peer VPN gateway. The value should be a CIDR formatted string, for example: 192.168.0.0/16. The ranges should be disjoint. Only IPv4 is supported.", + "description": "Local traffic selector to use when establishing the VPN tunnel with the peer VPN gateway. The value should be a CIDR formatted string, for example: 192.168.0.0/16. The ranges must be disjoint. Only IPv4 is supported.", "items": { "type": "string" }, @@ -36785,14 +38445,14 @@ "type": "string" }, "remoteTrafficSelector": { - "description": "Remote traffic selectors to use when establishing the VPN tunnel with peer VPN gateway. The value should be a CIDR formatted string, for example: 192.168.0.0/16. The ranges should be disjoint. Only IPv4 is supported.", + "description": "Remote traffic selectors to use when establishing the VPN tunnel with the peer VPN gateway. The value should be a CIDR formatted string, for example: 192.168.0.0/16. The ranges should be disjoint. Only IPv4 is supported.", "items": { "type": "string" }, "type": "array" }, "router": { - "description": "URL of router resource to be used for dynamic routing.", + "description": "URL of the router resource to be used for dynamic routing.", "type": "string" }, "selfLink": { @@ -36808,7 +38468,7 @@ "type": "string" }, "status": { - "description": "[Output Only] The status of the VPN tunnel.", + "description": "[Output Only] The status of the VPN tunnel, which can be one of the following: \n- PROVISIONING: Resource is being allocated for the VPN tunnel. \n- WAITING_FOR_FULL_CONFIG: Waiting to receive all VPN-related configs from the user. Network, TargetVpnGateway, VpnTunnel, ForwardingRule, and Route resources are needed to setup the VPN tunnel. \n- FIRST_HANDSHAKE: Successful first handshake with the peer VPN. \n- ESTABLISHED: Secure session is successfully established with the peer VPN. \n- NETWORK_ERROR: Deprecated, replaced by NO_INCOMING_PACKETS \n- AUTHORIZATION_ERROR: Auth error (for example, bad shared secret). \n- NEGOTIATION_FAILURE: Handshake failed. \n- DEPROVISIONING: Resources are being deallocated for the VPN tunnel. \n- FAILED: Tunnel creation has failed and the tunnel is not ready to be used.", "enum": [ "ALLOCATING_RESOURCES", "AUTHORIZATION_ERROR", @@ -36856,7 +38516,7 @@ "items": { "additionalProperties": { "$ref": "VpnTunnelsScopedList", - "description": "Name of the scope containing this set of vpn tunnels." + "description": "Name of the scope containing this set of VPN tunnels." }, "description": "A list of VpnTunnelsScopedList resources.", "type": "object" @@ -37074,7 +38734,7 @@ "id": "VpnTunnelsScopedList", "properties": { "vpnTunnels": { - "description": "A list of vpn tunnels contained in this scope.", + "description": "A list of VPN tunnels contained in this scope.", "items": { "$ref": "VpnTunnel" }, @@ -37299,7 +38959,7 @@ "type": "object" }, "Zone": { - "description": "A Zone resource. (== resource_for beta.zones ==) (== resource_for v1.zones ==)", + "description": "A Zone resource. (== resource_for beta.zones ==) (== resource_for v1.zones ==) Next ID: 17", "id": "Zone", "properties": { "availableCpuPlatforms": { diff --git a/vendor/google.golang.org/api/compute/v1/compute-gen.go b/vendor/google.golang.org/api/compute/v1/compute-gen.go index e00472231cddf..2c40ec8905598 100644 --- a/vendor/google.golang.org/api/compute/v1/compute-gen.go +++ b/vendor/google.golang.org/api/compute/v1/compute-gen.go @@ -1,4 +1,4 @@ -// Copyright 2018 Google Inc. All rights reserved. +// Copyright 2019 Google LLC. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -6,13 +6,39 @@ // Package compute provides access to the Compute Engine API. // -// See https://developers.google.com/compute/docs/reference/latest/ +// For product documentation, see: https://developers.google.com/compute/docs/reference/latest/ +// +// Creating a client // // Usage example: // // import "google.golang.org/api/compute/v1" // ... -// computeService, err := compute.New(oauthHttpClient) +// ctx := context.Background() +// computeService, err := compute.NewService(ctx) +// +// In this example, Google Application Default Credentials are used for authentication. +// +// For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. +// +// Other authentication options +// +// By default, all available scopes (see "Constants") are used to authenticate. To restrict scopes, use option.WithScopes: +// +// computeService, err := compute.NewService(ctx, option.WithScopes(compute.DevstorageReadWriteScope)) +// +// To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey: +// +// computeService, err := compute.NewService(ctx, option.WithAPIKey("AIza...")) +// +// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource: +// +// config := &oauth2.Config{...} +// // ... +// token, err := config.Exchange(ctx, ...) +// computeService, err := compute.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) +// +// See https://godoc.org/google.golang.org/api/option/ for details on options. package compute // import "google.golang.org/api/compute/v1" import ( @@ -29,6 +55,8 @@ import ( gensupport "google.golang.org/api/gensupport" googleapi "google.golang.org/api/googleapi" + option "google.golang.org/api/option" + htransport "google.golang.org/api/transport/http" ) // Always reference these packages, just in case the auto-generated code @@ -71,6 +99,37 @@ const ( DevstorageReadWriteScope = "https://www.googleapis.com/auth/devstorage.read_write" ) +// NewService creates a new Service. +func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, error) { + scopesOption := option.WithScopes( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write", + ) + // NOTE: prepend, so we don't override user-specified scopes. + opts = append([]option.ClientOption{scopesOption}, opts...) + client, endpoint, err := htransport.NewClient(ctx, opts...) + if err != nil { + return nil, err + } + s, err := New(client) + if err != nil { + return nil, err + } + if endpoint != "" { + s.BasePath = endpoint + } + return s, nil +} + +// New creates a new Service. It uses the provided http.Client for requests. +// +// Deprecated: please use NewService instead. +// To provide a custom HTTP client, use option.WithHTTPClient. +// If you are using google.golang.org/api/googleapis/transport.APIKey, use option.WithAPIKey with NewService instead. func New(client *http.Client) (*Service, error) { if client == nil { return nil, errors.New("client is nil") @@ -102,6 +161,7 @@ func New(client *http.Client) (*Service, error) { s.LicenseCodes = NewLicenseCodesService(s) s.Licenses = NewLicensesService(s) s.MachineTypes = NewMachineTypesService(s) + s.NetworkEndpointGroups = NewNetworkEndpointGroupsService(s) s.Networks = NewNetworksService(s) s.NodeGroups = NewNodeGroupsService(s) s.NodeTemplates = NewNodeTemplatesService(s) @@ -194,6 +254,8 @@ type Service struct { MachineTypes *MachineTypesService + NetworkEndpointGroups *NetworkEndpointGroupsService + Networks *NetworksService NodeGroups *NodeGroupsService @@ -500,6 +562,15 @@ type MachineTypesService struct { s *Service } +func NewNetworkEndpointGroupsService(s *Service) *NetworkEndpointGroupsService { + rs := &NetworkEndpointGroupsService{s: s} + return rs +} + +type NetworkEndpointGroupsService struct { + s *Service +} + func NewNetworksService(s *Service) *NetworksService { rs := &NetworksService{s: s} return rs @@ -796,8 +867,11 @@ type AcceleratorConfig struct { AcceleratorCount int64 `json:"acceleratorCount,omitempty"` // AcceleratorType: Full or partial URL of the accelerator type resource - // to attach to this instance. If you are creating an instance template, - // specify only the accelerator name. + // to attach to this instance. For example: + // projects/my-project/zones/us-central1-c/acceleratorTypes/nvidia-tesla- + // p100 If you are creating an instance template, specify only the + // accelerator name. See GPUs on Compute Engine for a full list of + // accelerator types. AcceleratorType string `json:"acceleratorType,omitempty"` // ForceSendFields is a list of field names (e.g. "AcceleratorCount") to @@ -1492,6 +1566,7 @@ type Address struct { // Possible values: // "DNS_RESOLVER" // "GCE_ENDPOINT" + // "NAT_AUTO" // "VPC_PEERING" Purpose string `json:"purpose,omitempty"` @@ -2053,7 +2128,7 @@ type AttachedDisk struct { // the instance. // // If not specified, the server chooses a default device name to apply - // to this disk, in the form persistent-disks-x, where x is a number + // to this disk, in the form persistent-disk-x, where x is a number // assigned by Google Compute Engine. This field is only applicable for // persistent disks. DeviceName string `json:"deviceName,omitempty"` @@ -2259,6 +2334,23 @@ type AttachedDiskInitializeParams struct { // the source images are encrypted with your own keys. SourceImageEncryptionKey *CustomerEncryptionKey `json:"sourceImageEncryptionKey,omitempty"` + // SourceSnapshot: The source snapshot to create this disk. When + // creating a new instance, one of initializeParams.sourceSnapshot or + // disks.source is required except for local SSD. + // + // To create a disk with a snapshot that you created, specify the + // snapshot name in the following + // format: + // global/snapshots/my-backup + // + // + // If the source snapshot is deleted later, this field will not be set. + SourceSnapshot string `json:"sourceSnapshot,omitempty"` + + // SourceSnapshotEncryptionKey: The customer-supplied encryption key of + // the source snapshot. + SourceSnapshotEncryptionKey *CustomerEncryptionKey `json:"sourceSnapshotEncryptionKey,omitempty"` + // ForceSendFields is a list of field names (e.g. "Description") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -3206,7 +3298,7 @@ func (s *AutoscalingPolicyCustomMetricUtilization) UnmarshalJSON(data []byte) er // of autoscaling based on load balancing. type AutoscalingPolicyLoadBalancingUtilization struct { // UtilizationTarget: Fraction of backend capacity utilization (set in - // HTTP(s) load balancing configuration) that autoscaler should + // HTTP(S) load balancing configuration) that autoscaler should // maintain. Must be a positive float value. If not defined, the default // is 0.8. UtilizationTarget float64 `json:"utilizationTarget,omitempty"` @@ -3311,6 +3403,15 @@ type Backend struct { // This cannot be used for internal load balancing. MaxConnections int64 `json:"maxConnections,omitempty"` + // MaxConnectionsPerEndpoint: The max number of simultaneous connections + // that a single backend network endpoint can handle. This is used to + // calculate the capacity of the group. Can be used in either CONNECTION + // or UTILIZATION balancing modes. For CONNECTION mode, either + // maxConnections or maxConnectionsPerEndpoint must be set. + // + // This cannot be used for internal load balancing. + MaxConnectionsPerEndpoint int64 `json:"maxConnectionsPerEndpoint,omitempty"` + // MaxConnectionsPerInstance: The max number of simultaneous connections // that a single backend instance can handle. This is used to calculate // the capacity of the group. Can be used in either CONNECTION or @@ -3328,6 +3429,14 @@ type Backend struct { // This cannot be used for internal load balancing. MaxRate int64 `json:"maxRate,omitempty"` + // MaxRatePerEndpoint: The max requests per second (RPS) that a single + // backend network endpoint can handle. This is used to calculate the + // capacity of the group. Can be used in either balancing mode. For RATE + // mode, either maxRate or maxRatePerEndpoint must be set. + // + // This cannot be used for internal load balancing. + MaxRatePerEndpoint float64 `json:"maxRatePerEndpoint,omitempty"` + // MaxRatePerInstance: The max requests per second (RPS) that a single // backend instance can handle. This is used to calculate the capacity // of the group. Can be used in either balancing mode. For RATE mode, @@ -3370,6 +3479,7 @@ func (s *Backend) UnmarshalJSON(data []byte) error { type NoMethod Backend var s1 struct { CapacityScaler gensupport.JSONFloat64 `json:"capacityScaler"` + MaxRatePerEndpoint gensupport.JSONFloat64 `json:"maxRatePerEndpoint"` MaxRatePerInstance gensupport.JSONFloat64 `json:"maxRatePerInstance"` MaxUtilization gensupport.JSONFloat64 `json:"maxUtilization"` *NoMethod @@ -3379,6 +3489,7 @@ func (s *Backend) UnmarshalJSON(data []byte) error { return err } s.CapacityScaler = float64(s1.CapacityScaler) + s.MaxRatePerEndpoint = float64(s1.MaxRatePerEndpoint) s.MaxRatePerInstance = float64(s1.MaxRatePerInstance) s.MaxUtilization = float64(s1.MaxUtilization) return nil @@ -3672,6 +3783,10 @@ type BackendService struct { // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` + // CustomRequestHeaders: Headers that the HTTP/S load balancer should + // add to proxied requests. + CustomRequestHeaders []string `json:"customRequestHeaders,omitempty"` + // Description: An optional description of this resource. Provide this // property when you create the resource. Description string `json:"description,omitempty"` @@ -3720,6 +3835,7 @@ type BackendService struct { // Possible values: // "EXTERNAL" // "INTERNAL" + // "INTERNAL_SELF_MANAGED" // "INVALID_LOAD_BALANCING_SCHEME" LoadBalancingScheme string `json:"loadBalancingScheme,omitempty"` @@ -3756,6 +3872,7 @@ type BackendService struct { // // Possible values: // "HTTP" + // "HTTP2" // "HTTPS" // "SSL" // "TCP" @@ -4255,6 +4372,33 @@ func (s *BackendServiceListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type BackendServiceReference struct { + BackendService string `json:"backendService,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BackendService") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BackendService") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *BackendServiceReference) MarshalJSON() ([]byte, error) { + type NoMethod BackendServiceReference + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type BackendServicesScopedList struct { // BackendServices: A list of BackendServices contained in this scope. BackendServices []*BackendService `json:"backendServices,omitempty"` @@ -4391,10 +4535,10 @@ func (s *BackendServicesScopedListWarningData) MarshalJSON() ([]byte, error) { // Binding: Associates `members` with a `role`. type Binding struct { - // Condition: Unimplemented. The condition that is associated with this - // binding. NOTE: an unsatisfied condition will not allow user access - // via current binding. Different bindings, including their conditions, - // are examined independently. + // Condition: The condition that is associated with this binding. NOTE: + // An unsatisfied condition will not allow user access via current + // binding. Different bindings, including their conditions, are examined + // independently. Condition *Expr `json:"condition,omitempty"` // Members: Specifies the identities requesting access for a Cloud @@ -4421,7 +4565,7 @@ type Binding struct { // // // - // * `domain:{domain}`: A Google Apps domain name that represents all + // * `domain:{domain}`: The G Suite domain (primary) that represents all // the users of that domain. For example, `google.com` or `example.com`. Members []string `json:"members,omitempty"` @@ -5127,11 +5271,7 @@ type Condition struct { // "SERVICE" Sys string `json:"sys,omitempty"` - // Value: DEPRECATED. Use 'values' instead. - Value string `json:"value,omitempty"` - - // Values: The objects of the condition. This is mutually exclusive with - // 'value'. + // Values: The objects of the condition. Values []string `json:"values,omitempty"` // ForceSendFields is a list of field names (e.g. "Iam") to @@ -5284,14 +5424,16 @@ type DeprecationStatus struct { // resource as the deprecated resource. Replacement string `json:"replacement,omitempty"` - // State: The deprecation state of this resource. This can be - // DEPRECATED, OBSOLETE, or DELETED. Operations which create a new - // resource using a DEPRECATED resource will return successfully, but - // with a warning indicating the deprecated resource and recommending - // its replacement. Operations which use OBSOLETE or DELETED resources - // will be rejected and result in an error. + // State: The deprecation state of this resource. This can be ACTIVE, + // DEPRECATED, OBSOLETE, or DELETED. Operations which communicate the + // end of life date for an image, can use ACTIVE. Operations which + // create a new resource using a DEPRECATED resource will return + // successfully, but with a warning indicating the deprecated resource + // and recommending its replacement. Operations which use OBSOLETE or + // DELETED resources will be rejected and result in an error. // // Possible values: + // "ACTIVE" // "DELETED" // "DEPRECATED" // "OBSOLETE" @@ -5336,8 +5478,8 @@ type Disk struct { // // After you encrypt a disk with a customer-supplied key, you must // provide the same key if you use the disk later (e.g. to create a disk - // snapshot or an image, or to attach the disk to a virtual - // machine). + // snapshot, to create a disk image, to create a machine image, or to + // attach the disk to a virtual machine). // // Customer-supplied encryption keys do not protect access to metadata // of the disk. @@ -5505,6 +5647,7 @@ type Disk struct { // // Possible values: // "CREATING" + // "DELETING" // "FAILED" // "READY" // "RESTORING" @@ -5512,11 +5655,11 @@ type Disk struct { // Type: URL of the disk type resource describing which disk type to use // to create the disk. Provide this when creating the disk. For example: - // project/zones/zone/diskTypes/pd-standard or pd-ssd + // projects/project/zones/zone/diskTypes/pd-standard or pd-ssd Type string `json:"type,omitempty"` // Users: [Output Only] Links to the users of the disk (attached - // instances) in form: project/zones/zone/instances/instance + // instances) in form: projects/project/zones/zone/instances/instance Users []string `json:"users,omitempty"` // Zone: [Output Only] URL of the zone where the disk resides. You must @@ -7184,6 +7327,52 @@ func (s *FirewallLogConfig) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// FixedOrPercent: Encapsulates numeric value that can be either +// absolute or relative. +type FixedOrPercent struct { + // Calculated: [Output Only] Absolute value of VM instances calculated + // based on the specific mode. + // + // + // - If the value is fixed, then the calculated value is equal to the + // fixed value. + // - If the value is a percent, then the calculated value is percent/100 + // * targetSize. For example, the calculated value of a 80% of a managed + // instance group with 150 instances would be (80/100 * 150) = 120 VM + // instances. If there is a remainder, the number is rounded up. + Calculated int64 `json:"calculated,omitempty"` + + // Fixed: Specifies a fixed number of VM instances. This must be a + // positive integer. + Fixed int64 `json:"fixed,omitempty"` + + // Percent: Specifies a percentage of instances between 0 to 100%, + // inclusive. For example, specify 80 for 80%. + Percent int64 `json:"percent,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Calculated") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Calculated") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *FixedOrPercent) MarshalJSON() ([]byte, error) { + type NoMethod FixedOrPercent + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // ForwardingRule: A ForwardingRule resource. A ForwardingRule resource // specifies which pool of target virtual machines to forward a packet // to if it matches the given [IPAddress, IPProtocol, ports] tuple. (== @@ -7247,6 +7436,16 @@ type ForwardingRule struct { // "UDP" IPProtocol string `json:"IPProtocol,omitempty"` + // AllPorts: This field is used along with the backend_service field for + // internal load balancing or with the target field for internal + // TargetInstance. This field cannot be used with port or portRange + // fields. + // + // When the load balancing scheme is INTERNAL and protocol is TCP/UDP, + // specify this field to allow packets addressed to any ports will be + // forwarded to the backends configured with this forwarding rule. + AllPorts bool `json:"allPorts,omitempty"` + // BackendService: This field is only used for INTERNAL load // balancing. // @@ -7292,6 +7491,7 @@ type ForwardingRule struct { // Possible values: // "EXTERNAL" // "INTERNAL" + // "INTERNAL_SELF_MANAGED" // "INVALID" LoadBalancingScheme string `json:"loadBalancingScheme,omitempty"` @@ -7368,6 +7568,26 @@ type ForwardingRule struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` + // ServiceLabel: An optional prefix to the service name for this + // Forwarding Rule. If specified, will be the first label of the fully + // qualified service name. + // + // The label must be 1-63 characters long, and comply with RFC1035. + // Specifically, the label must be 1-63 characters long and match the + // regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first + // character must be a lowercase letter, and all following characters + // must be a dash, lowercase letter, or digit, except the last + // character, which cannot be a dash. + // + // This field is only used for internal load balancing. + ServiceLabel string `json:"serviceLabel,omitempty"` + + // ServiceName: [Output Only] The internal fully qualified service name + // for this Forwarding Rule. + // + // This field is only used for internal load balancing. + ServiceName string `json:"serviceName,omitempty"` + // Subnetwork: This field is only used for INTERNAL load balancing. // // For internal load balancing, this field identifies the subnetwork @@ -7384,8 +7604,8 @@ type ForwardingRule struct { // same region as the forwarding rule. For global forwarding rules, this // target must be a global load balancing resource. The forwarded // traffic must be of a type appropriate to the target object. For - // INTERNAL_SELF_MANAGED" load balancing, only HTTP and HTTPS targets - // are valid. + // INTERNAL_SELF_MANAGED load balancing, only HTTP and HTTPS targets are + // valid. Target string `json:"target,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -7725,6 +7945,33 @@ func (s *ForwardingRuleListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type ForwardingRuleReference struct { + ForwardingRule string `json:"forwardingRule,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ForwardingRule") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ForwardingRule") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *ForwardingRuleReference) MarshalJSON() ([]byte, error) { + type NoMethod ForwardingRuleReference + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type ForwardingRulesScopedList struct { // ForwardingRules: A list of forwarding rules contained in this scope. ForwardingRules []*ForwardingRule `json:"forwardingRules,omitempty"` @@ -7978,6 +8225,88 @@ func (s *GuestOsFeature) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type HTTP2HealthCheck struct { + // Host: The value of the host header in the HTTP/2 health check + // request. If left empty (default value), the IP on behalf of which + // this health check is performed will be used. + Host string `json:"host,omitempty"` + + // Port: The TCP port number for the health check request. The default + // value is 443. Valid values are 1 through 65535. + Port int64 `json:"port,omitempty"` + + // PortName: Port name as defined in InstanceGroup#NamedPort#name. If + // both port and port_name are defined, port takes precedence. + PortName string `json:"portName,omitempty"` + + // PortSpecification: Specifies how port is selected for health + // checking, can be one of following values: + // USE_FIXED_PORT: The port number in + // port + // is used for health checking. + // USE_NAMED_PORT: The + // portName + // is used for health checking. + // USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for + // each network endpoint is used for health checking. For other + // backends, the port or named port specified in the Backend Service is + // used for health checking. + // + // + // If not specified, HTTP2 health check follows behavior specified + // in + // port + // and + // portName + // fields. + // + // Possible values: + // "USE_FIXED_PORT" + // "USE_NAMED_PORT" + // "USE_SERVING_PORT" + PortSpecification string `json:"portSpecification,omitempty"` + + // ProxyHeader: Specifies the type of proxy header to append before + // sending data to the backend, either NONE or PROXY_V1. The default is + // NONE. + // + // Possible values: + // "NONE" + // "PROXY_V1" + ProxyHeader string `json:"proxyHeader,omitempty"` + + // RequestPath: The request path of the HTTP/2 health check request. The + // default value is /. + RequestPath string `json:"requestPath,omitempty"` + + // Response: The string to match anywhere in the first 1024 bytes of the + // response body. If left empty (the default value), the status code + // determines health. The response data can only be ASCII. + Response string `json:"response,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Host") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Host") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HTTP2HealthCheck) MarshalJSON() ([]byte, error) { + type NoMethod HTTP2HealthCheck + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type HTTPHealthCheck struct { // Host: The value of the host header in the HTTP health check request. // If left empty (default value), the IP on behalf of which this health @@ -7992,6 +8321,33 @@ type HTTPHealthCheck struct { // both port and port_name are defined, port takes precedence. PortName string `json:"portName,omitempty"` + // PortSpecification: Specifies how port is selected for health + // checking, can be one of following values: + // USE_FIXED_PORT: The port number in + // port + // is used for health checking. + // USE_NAMED_PORT: The + // portName + // is used for health checking. + // USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for + // each network endpoint is used for health checking. For other + // backends, the port or named port specified in the Backend Service is + // used for health checking. + // + // + // If not specified, HTTP health check follows behavior specified + // in + // port + // and + // portName + // fields. + // + // Possible values: + // "USE_FIXED_PORT" + // "USE_NAMED_PORT" + // "USE_SERVING_PORT" + PortSpecification string `json:"portSpecification,omitempty"` + // ProxyHeader: Specifies the type of proxy header to append before // sending data to the backend, either NONE or PROXY_V1. The default is // NONE. @@ -8047,6 +8403,33 @@ type HTTPSHealthCheck struct { // both port and port_name are defined, port takes precedence. PortName string `json:"portName,omitempty"` + // PortSpecification: Specifies how port is selected for health + // checking, can be one of following values: + // USE_FIXED_PORT: The port number in + // port + // is used for health checking. + // USE_NAMED_PORT: The + // portName + // is used for health checking. + // USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for + // each network endpoint is used for health checking. For other + // backends, the port or named port specified in the Backend Service is + // used for health checking. + // + // + // If not specified, HTTPS health check follows behavior specified + // in + // port + // and + // portName + // fields. + // + // Possible values: + // "USE_FIXED_PORT" + // "USE_NAMED_PORT" + // "USE_SERVING_PORT" + PortSpecification string `json:"portSpecification,omitempty"` + // ProxyHeader: Specifies the type of proxy header to append before // sending data to the backend, either NONE or PROXY_V1. The default is // NONE. @@ -8108,6 +8491,8 @@ type HealthCheck struct { // after this many consecutive successes. The default value is 2. HealthyThreshold int64 `json:"healthyThreshold,omitempty"` + Http2HealthCheck *HTTP2HealthCheck `json:"http2HealthCheck,omitempty"` + HttpHealthCheck *HTTPHealthCheck `json:"httpHealthCheck,omitempty"` HttpsHealthCheck *HTTPSHealthCheck `json:"httpsHealthCheck,omitempty"` @@ -8140,13 +8525,14 @@ type HealthCheck struct { // greater value than checkIntervalSec. TimeoutSec int64 `json:"timeoutSec,omitempty"` - // Type: Specifies the type of the healthCheck, either TCP, SSL, HTTP or - // HTTPS. If not specified, the default is TCP. Exactly one of the - // protocol-specific health check field must be specified, which must - // match type field. + // Type: Specifies the type of the healthCheck, either TCP, SSL, HTTP, + // HTTPS or HTTP2. If not specified, the default is TCP. Exactly one of + // the protocol-specific health check field must be specified, which + // must match type field. // // Possible values: // "HTTP" + // "HTTP2" // "HTTPS" // "INVALID" // "SSL" @@ -8412,6 +8798,53 @@ func (s *HealthStatus) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type HealthStatusForNetworkEndpoint struct { + // BackendService: URL of the backend service associated with the health + // state of the network endpoint. + BackendService *BackendServiceReference `json:"backendService,omitempty"` + + // ForwardingRule: URL of the forwarding rule associated with the health + // state of the network endpoint. + ForwardingRule *ForwardingRuleReference `json:"forwardingRule,omitempty"` + + // HealthCheck: URL of the health check associated with the health state + // of the network endpoint. + HealthCheck *HealthCheckReference `json:"healthCheck,omitempty"` + + // HealthState: Health state of the network endpoint determined based on + // the health checks configured. + // + // Possible values: + // "DRAINING" + // "HEALTHY" + // "UNHEALTHY" + // "UNKNOWN" + HealthState string `json:"healthState,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BackendService") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BackendService") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *HealthStatusForNetworkEndpoint) MarshalJSON() ([]byte, error) { + type NoMethod HealthStatusForNetworkEndpoint + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // HostRule: UrlMaps A host-matching rule for a URL. If matched, will // use the named PathMatcher to select the BackendService. type HostRule struct { @@ -9109,6 +9542,7 @@ type Image struct { // Possible values are FAILED, PENDING, or READY. // // Possible values: + // "DELETING" // "FAILED" // "PENDING" // "READY" @@ -9153,8 +9587,9 @@ type ImageRawDisk struct { // "TAR" ContainerType string `json:"containerType,omitempty"` - // Sha1Checksum: An optional SHA1 checksum of the disk image before - // unpackaging; provided by the client when the disk image is created. + // Sha1Checksum: [Deprecated] This field is deprecated. An optional SHA1 + // checksum of the disk image before unpackaging provided by the client + // when the disk image is created. Sha1Checksum string `json:"sha1Checksum,omitempty"` // Source: The full Google Cloud Storage URL where the disk image is @@ -9371,6 +9806,8 @@ type Instance struct { // attached to the instance. GuestAccelerators []*AcceleratorConfig `json:"guestAccelerators,omitempty"` + Hostname string `json:"hostname,omitempty"` + // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` @@ -9458,6 +9895,10 @@ type Instance struct { // instance. See Service Accounts for more information. ServiceAccounts []*ServiceAccount `json:"serviceAccounts,omitempty"` + ShieldedInstanceConfig *ShieldedInstanceConfig `json:"shieldedInstanceConfig,omitempty"` + + ShieldedInstanceIntegrityPolicy *ShieldedInstanceIntegrityPolicy `json:"shieldedInstanceIntegrityPolicy,omitempty"` + // StartRestricted: [Output Only] Whether a VM has been restricted for // start because Compute Engine has detected suspicious activity. StartRestricted bool `json:"startRestricted,omitempty"` @@ -9468,6 +9909,7 @@ type Instance struct { // // Possible values: // "PROVISIONING" + // "REPAIRING" // "RUNNING" // "STAGING" // "STOPPED" @@ -10087,6 +10529,10 @@ func (s *InstanceGroupListWarningData) MarshalJSON() ([]byte, error) { // beta.regionInstanceGroupManagers ==) (== resource_for // v1.regionInstanceGroupManagers ==) type InstanceGroupManager struct { + // AutoHealingPolicies: The autohealing policy for this managed instance + // group. You can specify only one value. + AutoHealingPolicies []*InstanceGroupManagerAutoHealingPolicy `json:"autoHealingPolicies,omitempty"` + // BaseInstanceName: The base instance name to use for instances in this // group. The value must be 1-58 characters long. Instances are named by // appending a hyphen and a random four-character string to the base @@ -10152,6 +10598,9 @@ type InstanceGroupManager struct { // server defines this URL. SelfLink string `json:"selfLink,omitempty"` + // Status: [Output Only] The status of this managed instance group. + Status *InstanceGroupManagerStatus `json:"status,omitempty"` + // TargetPools: The URLs for all TargetPool resources to which instances // in the instanceGroup field are added. The target pools automatically // apply to all of the instances in the managed instance group. @@ -10162,6 +10611,20 @@ type InstanceGroupManager struct { // Resizing the group changes this number. TargetSize int64 `json:"targetSize,omitempty"` + // UpdatePolicy: The update policy for this managed instance group. + UpdatePolicy *InstanceGroupManagerUpdatePolicy `json:"updatePolicy,omitempty"` + + // Versions: Specifies the instance templates used by this managed + // instance group to create instances. + // + // Each version is defined by an instanceTemplate and a name. Every + // version can appear at most once per instance group. This field + // overrides the top-level instanceTemplate field. Read more about the + // relationships between these fields. Exactly one version must leave + // the targetSize field unset. That version will be applied to all + // remaining instances. For more information, read about canary updates. + Versions []*InstanceGroupManagerVersion `json:"versions,omitempty"` + // Zone: [Output Only] The URL of the zone where the managed instance // group is located (for zonal resources). Zone string `json:"zone,omitempty"` @@ -10170,15 +10633,15 @@ type InstanceGroupManager struct { // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "BaseInstanceName") to - // unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "AutoHealingPolicies") + // to unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "BaseInstanceName") to + // NullFields is a list of field names (e.g. "AutoHealingPolicies") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -10430,6 +10893,42 @@ func (s *InstanceGroupManagerAggregatedListWarningData) MarshalJSON() ([]byte, e return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type InstanceGroupManagerAutoHealingPolicy struct { + // HealthCheck: The URL for the health check that signals autohealing. + HealthCheck string `json:"healthCheck,omitempty"` + + // InitialDelaySec: The number of seconds that the managed instance + // group waits before it applies autohealing policies to new instances + // or recently recreated instances. This initial delay allows instances + // to initialize and run their startup scripts before the instance group + // determines that they are UNHEALTHY. This prevents the managed + // instance group from recreating its instances prematurely. This value + // must be from range [0, 3600]. + InitialDelaySec int64 `json:"initialDelaySec,omitempty"` + + // ForceSendFields is a list of field names (e.g. "HealthCheck") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "HealthCheck") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupManagerAutoHealingPolicy) MarshalJSON() ([]byte, error) { + type NoMethod InstanceGroupManagerAutoHealingPolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // InstanceGroupManagerList: [Output Only] A list of managed instance // groups. type InstanceGroupManagerList struct { @@ -10588,13 +11087,16 @@ func (s *InstanceGroupManagerListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type InstanceGroupManagersAbandonInstancesRequest struct { - // Instances: The URLs of one or more instances to abandon. This can be - // a full URL or a partial URL, such as - // zones/[ZONE]/instances/[INSTANCE_NAME]. - Instances []string `json:"instances,omitempty"` +type InstanceGroupManagerStatus struct { + // IsStable: [Output Only] A bit indicating whether the managed instance + // group is in a stable state. A stable state means that: none of the + // instances in the managed instance group is currently undergoing any + // type of change (for example, creation, restart, or deletion); no + // future changes are scheduled for instances in the managed instance + // group; and the managed instance group itself is not being modified. + IsStable bool `json:"isStable,omitempty"` - // ForceSendFields is a list of field names (e.g. "Instances") to + // ForceSendFields is a list of field names (e.g. "IsStable") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -10602,7 +11104,7 @@ type InstanceGroupManagersAbandonInstancesRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Instances") to include in + // NullFields is a list of field names (e.g. "IsStable") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -10611,19 +11113,62 @@ type InstanceGroupManagersAbandonInstancesRequest struct { NullFields []string `json:"-"` } -func (s *InstanceGroupManagersAbandonInstancesRequest) MarshalJSON() ([]byte, error) { - type NoMethod InstanceGroupManagersAbandonInstancesRequest +func (s *InstanceGroupManagerStatus) MarshalJSON() ([]byte, error) { + type NoMethod InstanceGroupManagerStatus raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type InstanceGroupManagersDeleteInstancesRequest struct { - // Instances: The URLs of one or more instances to delete. This can be a - // full URL or a partial URL, such as - // zones/[ZONE]/instances/[INSTANCE_NAME]. - Instances []string `json:"instances,omitempty"` +type InstanceGroupManagerUpdatePolicy struct { + // MaxSurge: The maximum number of instances that can be created above + // the specified targetSize during the update process. By default, a + // fixed value of 1 is used. This value can be either a fixed number or + // a percentage if the instance group has 10 or more instances. If you + // set a percentage, the number of instances will be rounded up if + // necessary. + // + // At least one of either maxSurge or maxUnavailable must be greater + // than 0. Learn more about maxSurge. + MaxSurge *FixedOrPercent `json:"maxSurge,omitempty"` - // ForceSendFields is a list of field names (e.g. "Instances") to + // MaxUnavailable: The maximum number of instances that can be + // unavailable during the update process. An instance is considered + // available if all of the following conditions are satisfied: + // + // + // - The instance's status is RUNNING. + // - If there is a health check on the instance group, the instance's + // liveness health check result must be HEALTHY at least once. If there + // is no health check on the group, then the instance only needs to have + // a status of RUNNING to be considered available. By default, a fixed + // value of 1 is used. This value can be either a fixed number or a + // percentage if the instance group has 10 or more instances. If you set + // a percentage, the number of instances will be rounded up if + // necessary. + // + // At least one of either maxSurge or maxUnavailable must be greater + // than 0. Learn more about maxUnavailable. + MaxUnavailable *FixedOrPercent `json:"maxUnavailable,omitempty"` + + // MinimalAction: Minimal action to be taken on an instance. You can + // specify either RESTART to restart existing instances or REPLACE to + // delete and create new instances from the target template. If you + // specify a RESTART, the Updater will attempt to perform that action + // only. However, if the Updater determines that the minimal action you + // specify is not enough to perform the update, it might perform a more + // disruptive action. + // + // Possible values: + // "REPLACE" + // "RESTART" + MinimalAction string `json:"minimalAction,omitempty"` + + // Possible values: + // "OPPORTUNISTIC" + // "PROACTIVE" + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "MaxSurge") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -10631,7 +11176,7 @@ type InstanceGroupManagersDeleteInstancesRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Instances") to include in + // NullFields is a list of field names (e.g. "MaxSurge") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -10640,22 +11185,36 @@ type InstanceGroupManagersDeleteInstancesRequest struct { NullFields []string `json:"-"` } -func (s *InstanceGroupManagersDeleteInstancesRequest) MarshalJSON() ([]byte, error) { - type NoMethod InstanceGroupManagersDeleteInstancesRequest +func (s *InstanceGroupManagerUpdatePolicy) MarshalJSON() ([]byte, error) { + type NoMethod InstanceGroupManagerUpdatePolicy raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type InstanceGroupManagersListManagedInstancesResponse struct { - // ManagedInstances: [Output Only] The list of instances in the managed - // instance group. - ManagedInstances []*ManagedInstance `json:"managedInstances,omitempty"` +type InstanceGroupManagerVersion struct { + // InstanceTemplate: The URL of the instance template that is specified + // for this managed instance group. The group uses this template to + // create new instances in the managed instance group until the + // `targetSize` for this version is reached. + InstanceTemplate string `json:"instanceTemplate,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Name: Name of the version. Unique among all versions in the scope of + // this managed instance group. + Name string `json:"name,omitempty"` - // ForceSendFields is a list of field names (e.g. "ManagedInstances") to + // TargetSize: Specifies the intended number of instances to be created + // from the instanceTemplate. The final number of instances created from + // the template will be equal to: + // - If expressed as a fixed number, the minimum of either + // targetSize.fixed or instanceGroupManager.targetSize is used. + // - if expressed as a percent, the targetSize would be + // (targetSize.percent/100 * InstanceGroupManager.targetSize) If there + // is a remainder, the number is rounded up. If unset, this version + // will update any remaining instances not updated by another version. + // Read Starting a canary update for more information. + TargetSize *FixedOrPercent `json:"targetSize,omitempty"` + + // ForceSendFields is a list of field names (e.g. "InstanceTemplate") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -10663,7 +11222,7 @@ type InstanceGroupManagersListManagedInstancesResponse struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ManagedInstances") to + // NullFields is a list of field names (e.g. "InstanceTemplate") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -10673,14 +11232,105 @@ type InstanceGroupManagersListManagedInstancesResponse struct { NullFields []string `json:"-"` } -func (s *InstanceGroupManagersListManagedInstancesResponse) MarshalJSON() ([]byte, error) { - type NoMethod InstanceGroupManagersListManagedInstancesResponse +func (s *InstanceGroupManagerVersion) MarshalJSON() ([]byte, error) { + type NoMethod InstanceGroupManagerVersion raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type InstanceGroupManagersRecreateInstancesRequest struct { - // Instances: The URLs of one or more instances to recreate. This can be +type InstanceGroupManagersAbandonInstancesRequest struct { + // Instances: The URLs of one or more instances to abandon. This can be + // a full URL or a partial URL, such as + // zones/[ZONE]/instances/[INSTANCE_NAME]. + Instances []string `json:"instances,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Instances") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Instances") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupManagersAbandonInstancesRequest) MarshalJSON() ([]byte, error) { + type NoMethod InstanceGroupManagersAbandonInstancesRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InstanceGroupManagersDeleteInstancesRequest struct { + // Instances: The URLs of one or more instances to delete. This can be a + // full URL or a partial URL, such as + // zones/[ZONE]/instances/[INSTANCE_NAME]. + Instances []string `json:"instances,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Instances") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Instances") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupManagersDeleteInstancesRequest) MarshalJSON() ([]byte, error) { + type NoMethod InstanceGroupManagersDeleteInstancesRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InstanceGroupManagersListManagedInstancesResponse struct { + // ManagedInstances: [Output Only] The list of instances in the managed + // instance group. + ManagedInstances []*ManagedInstance `json:"managedInstances,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "ManagedInstances") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ManagedInstances") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupManagersListManagedInstancesResponse) MarshalJSON() ([]byte, error) { + type NoMethod InstanceGroupManagersListManagedInstancesResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InstanceGroupManagersRecreateInstancesRequest struct { + // Instances: The URLs of one or more instances to recreate. This can be // a full URL or a partial URL, such as // zones/[ZONE]/instances/[INSTANCE_NAME]. Instances []string `json:"instances,omitempty"` @@ -11743,6 +12393,8 @@ type InstanceProperties struct { // to obtain the access tokens for these instances. ServiceAccounts []*ServiceAccount `json:"serviceAccounts,omitempty"` + ShieldedInstanceConfig *ShieldedInstanceConfig `json:"shieldedInstanceConfig,omitempty"` + // Tags: A list of tags to apply to the instances that are created from // this template. The tags identify valid sources or targets for network // firewalls. The setTags method can modify this list of tags. Each tag @@ -12042,6 +12694,7 @@ type InstanceWithNamedPorts struct { // // Possible values: // "PROVISIONING" + // "REPAIRING" // "RUNNING" // "STAGING" // "STOPPED" @@ -12428,7 +13081,7 @@ type Interconnect struct { // side of the Interconnect link. This can be used only for ping tests. GoogleIpAddress string `json:"googleIpAddress,omitempty"` - // GoogleReferenceId: [Output Only] Google reference ID; to be used when + // GoogleReferenceId: [Output Only] Google reference ID to be used when // raising support tickets with Google or otherwise to debug backend // connectivity issues. GoogleReferenceId string `json:"googleReferenceId,omitempty"` @@ -12441,8 +13094,13 @@ type Interconnect struct { // InterconnectAttachments configured to use this Interconnect. InterconnectAttachments []string `json:"interconnectAttachments,omitempty"` - // InterconnectType: Type of interconnect. Note that "IT_PRIVATE" has - // been deprecated in favor of "DEDICATED" + // InterconnectType: Type of interconnect, which can take one of the + // following values: + // - PARTNER: A partner-managed interconnection shared between customers + // though a partner. + // - DEDICATED: A dedicated physical interconnection with the customer. + // Note that a value IT_PRIVATE has been deprecated in favor of + // DEDICATED. // // Possible values: // "DEDICATED" @@ -12454,9 +13112,12 @@ type Interconnect struct { // for interconnects. Kind string `json:"kind,omitempty"` - // LinkType: Type of link requested. This field indicates speed of each - // of the links in the bundle, not the entire bundle. Only 10G per link - // is allowed for a dedicated interconnect. Options: Ethernet_10G_LR + // LinkType: Type of link requested, which can take one of the following + // values: + // - LINK_TYPE_ETHERNET_10G_LR: A 10G Ethernet with LR optics + // - LINK_TYPE_ETHERNET_100G_LR: A 100G Ethernet with LR optics. Note + // that this field indicates the speed of each of the links in the + // bundle, not the speed of the entire bundle. // // Possible values: // "LINK_TYPE_ETHERNET_10G_LR" @@ -12482,8 +13143,16 @@ type Interconnect struct { // Notifications. NocContactEmail string `json:"nocContactEmail,omitempty"` - // OperationalStatus: [Output Only] The current status of whether or not - // this Interconnect is functional. + // OperationalStatus: [Output Only] The current status of this + // Interconnect's functionality, which can take one of the following + // values: + // - OS_ACTIVE: A valid Interconnect, which is turned up and is ready to + // use. Attachments may be provisioned on this Interconnect. + // - OS_UNPROVISIONED: An Interconnect that has not completed turnup. No + // attachments may be provisioned on this Interconnect. + // - OS_UNDER_MAINTENANCE: An Interconnect that is undergoing internal + // maintenance. No attachments may be provisioned or updated on this + // Interconnect. // // Possible values: // "OS_ACTIVE" @@ -12507,8 +13176,15 @@ type Interconnect struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // State: [Output Only] The current state of whether or not this - // Interconnect is functional. + // State: [Output Only] The current state of Interconnect functionality, + // which can take one of the following values: + // - ACTIVE: The Interconnect is valid, turned up and ready to use. + // Attachments may be provisioned on this Interconnect. + // - UNPROVISIONED: The Interconnect has not completed turnup. No + // attachments may be provisioned on this Interconnect. + // - UNDER_MAINTENANCE: The Interconnect is undergoing internal + // maintenance. No attachments may be provisioned or updated on this + // Interconnect. // // Possible values: // "ACTIVE" @@ -12551,10 +13227,22 @@ type InterconnectAttachment struct { // Not present for PARTNER_PROVIDER. AdminEnabled bool `json:"adminEnabled,omitempty"` - // Bandwidth: Provisioned bandwidth capacity for the - // interconnectAttachment. Can be set by the partner to update the - // customer's provisioned bandwidth. Output only for PARTNER type, - // mutable for PARTNER_PROVIDER and DEDICATED. + // Bandwidth: Provisioned bandwidth capacity for the interconnect + // attachment. For attachments of type DEDICATED, the user can set the + // bandwidth. For attachments of type PARTNER, the Google Partner that + // is operating the interconnect must set the bandwidth. Output only for + // PARTNER type, mutable for PARTNER_PROVIDER and DEDICATED, and can + // take one of the following values: + // - BPS_50M: 50 Mbit/s + // - BPS_100M: 100 Mbit/s + // - BPS_200M: 200 Mbit/s + // - BPS_300M: 300 Mbit/s + // - BPS_400M: 400 Mbit/s + // - BPS_500M: 500 Mbit/s + // - BPS_1G: 1 Gbit/s + // - BPS_2G: 2 Gbit/s + // - BPS_5G: 5 Gbit/s + // - BPS_10G: 10 Gbit/s // // Possible values: // "BPS_100M" @@ -12597,12 +13285,16 @@ type InterconnectAttachment struct { Description string `json:"description,omitempty"` // EdgeAvailabilityDomain: Desired availability domain for the - // attachment. Only available for type PARTNER, at creation time. For - // improved reliability, customers should configure a pair of - // attachments with one per availability domain. The selected - // availability domain will be provided to the Partner via the pairing - // key so that the provisioned circuit will lie in the specified domain. - // If not specified, the value will default to AVAILABILITY_DOMAIN_ANY. + // attachment. Only available for type PARTNER, at creation time, and + // can take one of the following values: + // - AVAILABILITY_DOMAIN_ANY + // - AVAILABILITY_DOMAIN_1 + // - AVAILABILITY_DOMAIN_2 For improved reliability, customers should + // configure a pair of attachments, one per availability domain. The + // selected availability domain will be provided to the Partner via the + // pairing key, so that the provisioned circuit will lie in the + // specified domain. If not specified, the value will default to + // AVAILABILITY_DOMAIN_ANY. // // Possible values: // "AVAILABILITY_DOMAIN_1" @@ -12637,7 +13329,12 @@ type InterconnectAttachment struct { Name string `json:"name,omitempty"` // OperationalStatus: [Output Only] The current status of whether or not - // this interconnect attachment is functional. + // this interconnect attachment is functional, which can take one of the + // following values: + // - OS_ACTIVE: The attachment has been turned up and is ready to use. + // + // - OS_UNPROVISIONED: The attachment is not ready to use yet, because + // turnup is not complete. // // Possible values: // "OS_ACTIVE" @@ -12650,10 +13347,10 @@ type InterconnectAttachment struct { // selected partner. Of the form "XXXXX/region/domain" PairingKey string `json:"pairingKey,omitempty"` - // PartnerAsn: Optional BGP ASN for the router that should be supplied - // by a layer 3 Partner if they configured BGP on behalf of the - // customer. Output only for PARTNER type, input only for - // PARTNER_PROVIDER, not available for DEDICATED. + // PartnerAsn: Optional BGP ASN for the router supplied by a Layer 3 + // Partner if they configured BGP on behalf of the customer. Output only + // for PARTNER type, input only for PARTNER_PROVIDER, not available for + // DEDICATED. PartnerAsn int64 `json:"partnerAsn,omitempty,string"` // PartnerMetadata: Informational metadata about Partner attachments @@ -12682,7 +13379,26 @@ type InterconnectAttachment struct { SelfLink string `json:"selfLink,omitempty"` // State: [Output Only] The current state of this attachment's - // functionality. + // functionality. Enum values ACTIVE and UNPROVISIONED are shared by + // DEDICATED/PRIVATE, PARTNER, and PARTNER_PROVIDER interconnect + // attachments, while enum values PENDING_PARTNER, + // PARTNER_REQUEST_RECEIVED, and PENDING_CUSTOMER are used for only + // PARTNER and PARTNER_PROVIDER interconnect attachments. This state can + // take one of the following values: + // - ACTIVE: The attachment has been turned up and is ready to use. + // - UNPROVISIONED: The attachment is not ready to use yet, because + // turnup is not complete. + // - PENDING_PARTNER: A newly-created PARTNER attachment that has not + // yet been configured on the Partner side. + // - PARTNER_REQUEST_RECEIVED: A PARTNER attachment is in the process of + // provisioning after a PARTNER_PROVIDER attachment was created that + // references it. + // - PENDING_CUSTOMER: A PARTNER or PARTNER_PROVIDER attachment that is + // waiting for a customer to activate it. + // - DEFUNCT: The attachment was deleted externally and is no longer + // functional. This could be because the associated Interconnect was + // removed, or because the other side of a Partner attachment was + // deleted. // // Possible values: // "ACTIVE" @@ -12694,6 +13410,14 @@ type InterconnectAttachment struct { // "UNPROVISIONED" State string `json:"state,omitempty"` + // Type: The type of interconnect attachment this is, which can take one + // of the following values: + // - DEDICATED: an attachment to a Dedicated Interconnect. + // - PARTNER: an attachment to a Partner Interconnect, created by the + // customer. + // - PARTNER_PROVIDER: an attachment to a Partner Interconnect, created + // by the partner. + // // Possible values: // "DEDICATED" // "PARTNER" @@ -13062,7 +13786,7 @@ type InterconnectAttachmentPartnerMetadata struct { PartnerName string `json:"partnerName,omitempty"` // PortalUrl: URL of the Partner?s portal for this Attachment. Partners - // may customise this to be a deep-link to the specific resource on the + // may customise this to be a deep link to the specific resource on the // Partner portal. This value may be validated to match approved Partner // values. PortalUrl string `json:"portalUrl,omitempty"` @@ -13378,6 +14102,12 @@ type InterconnectDiagnosticsLinkLACPStatus struct { // LACP exchange. NeighborSystemId string `json:"neighborSystemId,omitempty"` + // State: The state of a LACP link, which can take one of the following + // values: + // - ACTIVE: The link is configured and active within the bundle. + // - DETACHED: The link is not configured within the bundle. This means + // that the rest of the object should be empty. + // // Possible values: // "ACTIVE" // "DETACHED" @@ -13408,6 +14138,17 @@ func (s *InterconnectDiagnosticsLinkLACPStatus) MarshalJSON() ([]byte, error) { } type InterconnectDiagnosticsLinkOpticalPower struct { + // State: The status of the current value when compared to the warning + // and alarm levels for the receiving or transmitting transceiver. + // Possible states include: + // - OK: The value has not crossed a warning threshold. + // - LOW_WARNING: The value has crossed below the low warning threshold. + // + // - HIGH_WARNING: The value has crossed above the high warning + // threshold. + // - LOW_ALARM: The value has crossed below the low alarm threshold. + // - HIGH_ALARM: The value has crossed above the high alarm threshold. + // // Possible values: // "HIGH_ALARM" // "HIGH_WARNING" @@ -13416,10 +14157,11 @@ type InterconnectDiagnosticsLinkOpticalPower struct { // "OK" State string `json:"state,omitempty"` - // Value: Value of the current optical power, read in dBm. Take a known - // good optical value, give it a 10% margin and trigger warnings - // relative to that value. In general, a -7dBm warning and a -11dBm - // alarm are good optical value estimates for most links. + // Value: Value of the current receiving or transmitting optical power, + // read in dBm. Take a known good optical value, give it a 10% margin + // and trigger warnings relative to that value. In general, a -7dBm + // warning and a -11dBm alarm are good optical value estimates for most + // links. Value float64 `json:"value,omitempty"` // ForceSendFields is a list of field names (e.g. "State") to @@ -13475,8 +14217,14 @@ type InterconnectDiagnosticsLinkStatus struct { LacpStatus *InterconnectDiagnosticsLinkLACPStatus `json:"lacpStatus,omitempty"` + // ReceivingOpticalPower: An InterconnectDiagnostics.LinkOpticalPower + // object, describing the current value and status of the received light + // level. ReceivingOpticalPower *InterconnectDiagnosticsLinkOpticalPower `json:"receivingOpticalPower,omitempty"` + // TransmittingOpticalPower: An InterconnectDiagnostics.LinkOpticalPower + // object, describing the current value and status of the transmitted + // light level. TransmittingOpticalPower *InterconnectDiagnosticsLinkOpticalPower `json:"transmittingOpticalPower,omitempty"` // ForceSendFields is a list of field names (e.g. "ArpCaches") to @@ -13678,7 +14426,13 @@ type InterconnectLocation struct { // "Amsterdam, Netherlands". City string `json:"city,omitempty"` - // Continent: [Output Only] Continent for this location. + // Continent: [Output Only] Continent for this location, which can take + // one of the following values: + // - AFRICA + // - ASIA_PAC + // - EUROPE + // - NORTH_AMERICA + // - SOUTH_AMERICA // // Possible values: // "AFRICA" @@ -13731,6 +14485,18 @@ type InterconnectLocation struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` + // Status: [Output Only] The status of this InterconnectLocation, which + // can take one of the following values: + // - CLOSED: The InterconnectLocation is closed and is unavailable for + // provisioning new Interconnects. + // - AVAILABLE: The InterconnectLocation is available for provisioning + // new Interconnects. + // + // Possible values: + // "AVAILABLE" + // "CLOSED" + Status string `json:"status,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -13972,9 +14738,14 @@ type InterconnectOutageNotification struct { // epoch). EndTime int64 `json:"endTime,omitempty,string"` - // IssueType: Form this outage is expected to take. Note that the "IT_" - // versions of this enum have been deprecated in favor of the unprefixed - // values. + // IssueType: Form this outage is expected to take, which can take one + // of the following values: + // - OUTAGE: The Interconnect may be completely out of service for some + // or all of the specified window. + // - PARTIAL_OUTAGE: Some circuits comprising the Interconnect as a + // whole should remain up, but with reduced bandwidth. Note that the + // versions of this enum prefixed with "IT_" have been deprecated in + // favor of the unprefixed values. // // Possible values: // "IT_OUTAGE" @@ -13986,8 +14757,10 @@ type InterconnectOutageNotification struct { // Name: Unique identifier for this outage notification. Name string `json:"name,omitempty"` - // Source: The party that generated this notification. Note that - // "NSRC_GOOGLE" has been deprecated in favor of "GOOGLE" + // Source: The party that generated this notification, which can take + // the following value: + // - GOOGLE: this notification as generated by Google. Note that the + // value of NSRC_GOOGLE has been deprecated in favor of GOOGLE. // // Possible values: // "GOOGLE" @@ -13998,8 +14771,15 @@ type InterconnectOutageNotification struct { // Unix epoch). StartTime int64 `json:"startTime,omitempty,string"` - // State: State of this notification. Note that the "NS_" versions of - // this enum have been deprecated in favor of the unprefixed values. + // State: State of this notification, which can take one of the + // following values: + // - ACTIVE: This outage notification is active. The event could be in + // the past, present, or future. See start_time and end_time for + // scheduling. + // - CANCELLED: The outage associated with this notification was + // cancelled before the outage was due to start. Note that the versions + // of this enum prefixed with "NS_" have been deprecated in favor of the + // unprefixed values. // // Possible values: // "ACTIVE" @@ -14551,12 +15331,6 @@ type LogConfigDataAccessOptions struct { // the caller. This is relevant only in the LocalIAM implementation, for // now. // - // NOTE: Logging to Gin in a fail-closed manner is currently unsupported - // while work is being done to satisfy the requirements of go/345. - // Currently, setting LOG_FAIL_CLOSED mode will have no effect, but - // still exists because there is active work being done to support it - // (b/115874152). - // // Possible values: // "LOG_FAIL_CLOSED" // "LOG_MODE_UNSPECIFIED" @@ -15200,6 +15974,7 @@ type ManagedInstance struct { // // Possible values: // "PROVISIONING" + // "REPAIRING" // "RUNNING" // "STAGING" // "STOPPED" @@ -15213,6 +15988,9 @@ type ManagedInstance struct { // create or delete the instance. LastAttempt *ManagedInstanceLastAttempt `json:"lastAttempt,omitempty"` + // Version: [Output Only] Intended version of this instance. + Version *ManagedInstanceVersion `json:"version,omitempty"` + // ForceSendFields is a list of field names (e.g. "CurrentAction") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -15328,6 +16106,39 @@ func (s *ManagedInstanceLastAttemptErrorsErrors) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type ManagedInstanceVersion struct { + // InstanceTemplate: [Output Only] The intended template of the + // instance. This field is empty when current_action is one of { + // DELETING, ABANDONING }. + InstanceTemplate string `json:"instanceTemplate,omitempty"` + + // Name: [Output Only] Name of the version. + Name string `json:"name,omitempty"` + + // ForceSendFields is a list of field names (e.g. "InstanceTemplate") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "InstanceTemplate") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *ManagedInstanceVersion) MarshalJSON() ([]byte, error) { + type NoMethod ManagedInstanceVersion + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Metadata: A metadata key/value entry. type Metadata struct { // Fingerprint: Specifies a fingerprint for this request, which is @@ -15446,9 +16257,10 @@ func (s *NamedPort) MarshalJSON() ([]byte, error) { // (VPC) Network Overview for more information. (== resource_for // v1.networks ==) (== resource_for beta.networks ==) type Network struct { - // IPv4Range: The range of internal addresses that are legal on this - // network. This range is a CIDR specification, for example: - // 192.168.0.0/16. Provided by the client when the network is created. + // IPv4Range: Deprecated in favor of subnet mode networks. The range of + // internal addresses that are legal on this network. This range is a + // CIDR specification, for example: 192.168.0.0/16. Provided by the + // client when the network is created. IPv4Range string `json:"IPv4Range,omitempty"` // AutoCreateSubnetworks: When set to true, the VPC network is created @@ -15531,104 +16343,147 @@ func (s *Network) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NetworkInterface: A network interface resource attached to an -// instance. -type NetworkInterface struct { - // AccessConfigs: An array of configurations for this interface. - // Currently, only one access config, ONE_TO_ONE_NAT, is supported. If - // there are no accessConfigs specified, then this instance will have no - // external internet access. - AccessConfigs []*AccessConfig `json:"accessConfigs,omitempty"` +// NetworkEndpoint: The network endpoint. +type NetworkEndpoint struct { + // Instance: The name for a specific VM instance that the IP address + // belongs to. This is required for network endpoints of type + // GCE_VM_IP_PORT. The instance must be in the same zone of network + // endpoint group. + // + // The name must be 1-63 characters long, and comply with RFC1035. + Instance string `json:"instance,omitempty"` - // AliasIpRanges: An array of alias IP ranges for this network - // interface. Can only be specified for network interfaces on - // subnet-mode networks. - AliasIpRanges []*AliasIpRange `json:"aliasIpRanges,omitempty"` + // IpAddress: Optional IPv4 address of network endpoint. The IP address + // must belong to a VM in GCE (either the primary IP or as part of an + // aliased IP range). If the IP address is not specified, then the + // primary IP address for the VM instance in the network that the + // network endpoint group belongs to will be used. + IpAddress string `json:"ipAddress,omitempty"` - // Fingerprint: Fingerprint hash of contents stored in this network - // interface. This field will be ignored when inserting an Instance or - // adding a NetworkInterface. An up-to-date fingerprint must be provided - // in order to update the NetworkInterface, otherwise the request will - // fail with error 412 conditionNotMet. - Fingerprint string `json:"fingerprint,omitempty"` + // Port: Optional port number of network endpoint. If not specified and + // the NetworkEndpointGroup.network_endpoint_type is GCE_IP_PORT, the + // defaultPort for the network endpoint group will be used. + Port int64 `json:"port,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Instance") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Instance") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NetworkEndpoint) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEndpoint + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// NetworkEndpointGroup: Represents a collection of network endpoints. +type NetworkEndpointGroup struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // DefaultPort: The default port used if the port number is not + // specified in the network endpoint. + DefaultPort int64 `json:"defaultPort,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` // Kind: [Output Only] Type of the resource. Always - // compute#networkInterface for network interfaces. + // compute#networkEndpointGroup for network endpoint group. Kind string `json:"kind,omitempty"` - // Name: [Output Only] The name of the network interface, generated by - // the server. For network devices, these are eth0, eth1, etc. + // Name: Name of the resource; provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. Name string `json:"name,omitempty"` - // Network: URL of the network resource for this instance. When creating - // an instance, if neither the network nor the subnetwork is specified, - // the default network global/networks/default is used; if the network - // is not specified but the subnetwork is specified, the network is - // inferred. - // - // This field is optional when creating a firewall rule. If not - // specified when creating a firewall rule, the default network - // global/networks/default is used. - // - // If you specify this property, you can specify the network as a full - // or partial URL. For example, the following are all valid URLs: - // - - // https://www.googleapis.com/compute/v1/projects/project/global/networks/network - // - projects/project/global/networks/network - // - global/networks/default + // Network: The URL of the network to which all network endpoints in the + // NEG belong. Uses "default" project network if unspecified. Network string `json:"network,omitempty"` - // NetworkIP: An IPv4 internal network address to assign to the instance - // for this network interface. If not specified by the user, an unused - // internal IP is assigned by the system. - NetworkIP string `json:"networkIP,omitempty"` + // NetworkEndpointType: Type of network endpoints in this network + // endpoint group. Currently the only supported value is GCE_VM_IP_PORT. + // + // Possible values: + // "GCE_VM_IP_PORT" + NetworkEndpointType string `json:"networkEndpointType,omitempty"` - // Subnetwork: The URL of the Subnetwork resource for this instance. If - // the network resource is in legacy mode, do not provide this property. - // If the network is in auto subnet mode, providing the subnetwork is - // optional. If the network is in custom subnet mode, then this field - // should be specified. If you specify this property, you can specify - // the subnetwork as a full or partial URL. For example, the following - // are all valid URLs: - // - - // https://www.googleapis.com/compute/v1/projects/project/regions/region/subnetworks/subnetwork - // - regions/region/subnetworks/subnetwork + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Size: [Output only] Number of network endpoints in the network + // endpoint group. + Size int64 `json:"size,omitempty"` + + // Subnetwork: Optional URL of the subnetwork to which all network + // endpoints in the NEG belong. Subnetwork string `json:"subnetwork,omitempty"` - // ForceSendFields is a list of field names (e.g. "AccessConfigs") to - // unconditionally include in API requests. By default, fields with + // Zone: [Output Only] The URL of the zone where the network endpoint + // group is located. + Zone string `json:"zone,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AccessConfigs") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *NetworkInterface) MarshalJSON() ([]byte, error) { - type NoMethod NetworkInterface +func (s *NetworkEndpointGroup) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEndpointGroup raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NetworkList: Contains a list of networks. -type NetworkList struct { +type NetworkEndpointGroupAggregatedList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of Network resources. - Items []*Network `json:"items,omitempty"` + // Items: A list of NetworkEndpointGroupsScopedList resources. + Items map[string]NetworkEndpointGroupsScopedList `json:"items,omitempty"` - // Kind: [Output Only] Type of resource. Always compute#networkList for - // lists of networks. + // Kind: [Output Only] The resource type, which is always + // compute#networkEndpointGroupAggregatedList for aggregated lists of + // network endpoint groups. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -15643,7 +16498,7 @@ type NetworkList struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *NetworkListWarning `json:"warning,omitempty"` + Warning *NetworkEndpointGroupAggregatedListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -15666,14 +16521,15 @@ type NetworkList struct { NullFields []string `json:"-"` } -func (s *NetworkList) MarshalJSON() ([]byte, error) { - type NoMethod NetworkList +func (s *NetworkEndpointGroupAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEndpointGroupAggregatedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NetworkListWarning: [Output Only] Informational warning message. -type NetworkListWarning struct { +// NetworkEndpointGroupAggregatedListWarning: [Output Only] +// Informational warning message. +type NetworkEndpointGroupAggregatedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -15707,7 +16563,7 @@ type NetworkListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*NetworkListWarningData `json:"data,omitempty"` + Data []*NetworkEndpointGroupAggregatedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -15730,13 +16586,13 @@ type NetworkListWarning struct { NullFields []string `json:"-"` } -func (s *NetworkListWarning) MarshalJSON() ([]byte, error) { - type NoMethod NetworkListWarning +func (s *NetworkEndpointGroupAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEndpointGroupAggregatedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NetworkListWarningData struct { +type NetworkEndpointGroupAggregatedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -15767,51 +16623,43 @@ type NetworkListWarningData struct { NullFields []string `json:"-"` } -func (s *NetworkListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod NetworkListWarningData +func (s *NetworkEndpointGroupAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEndpointGroupAggregatedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NetworkPeering: A network peering attached to a network resource. The -// message includes the peering name, peer network, peering state, and a -// flag indicating whether Google Compute Engine should automatically -// create routes for the peering. -type NetworkPeering struct { - // AutoCreateRoutes: Indicates whether full mesh connectivity is created - // and managed automatically. When it is set to true, Google Compute - // Engine will automatically create and manage the routes between two - // networks when the state is ACTIVE. Otherwise, user needs to create - // routes manually to route packets to peer network. - AutoCreateRoutes bool `json:"autoCreateRoutes,omitempty"` +type NetworkEndpointGroupList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` - // Name: Name of this peering. Provided by the client when the peering - // is created. The name must comply with RFC1035. Specifically, the name - // must be 1-63 characters long and match regular expression - // `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be - // a lowercase letter, and all the following characters must be a dash, - // lowercase letter, or digit, except the last character, which cannot - // be a dash. - Name string `json:"name,omitempty"` + // Items: A list of NetworkEndpointGroup resources. + Items []*NetworkEndpointGroup `json:"items,omitempty"` - // Network: The URL of the peer network. It can be either full URL or - // partial URL. The peer network may belong to a different project. If - // the partial URL does not contain project, it is assumed that the peer - // network is in the same project as the current network. - Network string `json:"network,omitempty"` + // Kind: [Output Only] The resource type, which is always + // compute#networkEndpointGroupList for network endpoint group lists. + Kind string `json:"kind,omitempty"` - // State: [Output Only] State for the peering. - // - // Possible values: - // "ACTIVE" - // "INACTIVE" - State string `json:"state,omitempty"` + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` - // StateDetails: [Output Only] Details about the current state of the - // peering. - StateDetails string `json:"stateDetails,omitempty"` + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` - // ForceSendFields is a list of field names (e.g. "AutoCreateRoutes") to + // Warning: [Output Only] Informational warning message. + Warning *NetworkEndpointGroupListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -15819,39 +16667,64 @@ type NetworkPeering struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AutoCreateRoutes") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *NetworkPeering) MarshalJSON() ([]byte, error) { - type NoMethod NetworkPeering +func (s *NetworkEndpointGroupList) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEndpointGroupList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NetworkRoutingConfig: A routing configuration attached to a network -// resource. The message includes the list of routers associated with -// the network, and a flag indicating the type of routing behavior to -// enforce network-wide. -type NetworkRoutingConfig struct { - // RoutingMode: The network-wide routing mode to use. If set to - // REGIONAL, this network's cloud routers will only advertise routes - // with subnets of this network in the same region as the router. If set - // to GLOBAL, this network's cloud routers will advertise routes with - // all subnets of this network, across regions. +// NetworkEndpointGroupListWarning: [Output Only] Informational warning +// message. +type NetworkEndpointGroupListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. // // Possible values: - // "GLOBAL" - // "REGIONAL" - RoutingMode string `json:"routingMode,omitempty"` + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` - // ForceSendFields is a list of field names (e.g. "RoutingMode") to + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*NetworkEndpointGroupListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -15859,36 +16732,63 @@ type NetworkRoutingConfig struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "RoutingMode") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *NetworkRoutingConfig) MarshalJSON() ([]byte, error) { - type NoMethod NetworkRoutingConfig +func (s *NetworkEndpointGroupListWarning) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEndpointGroupListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NetworksAddPeeringRequest struct { - // AutoCreateRoutes: Whether Google Compute Engine manages the routes - // automatically. - AutoCreateRoutes bool `json:"autoCreateRoutes,omitempty"` +type NetworkEndpointGroupListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` - // Name: Name of the peering, which should conform to RFC1035. - Name string `json:"name,omitempty"` + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` - // PeerNetwork: URL of the peer network. It can be either full URL or - // partial URL. The peer network may belong to a different project. If - // the partial URL does not contain project, it is assumed that the peer - // network is in the same project as the current network. - PeerNetwork string `json:"peerNetwork,omitempty"` + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // ForceSendFields is a list of field names (e.g. "AutoCreateRoutes") to + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NetworkEndpointGroupListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEndpointGroupListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type NetworkEndpointGroupsAttachEndpointsRequest struct { + // NetworkEndpoints: The list of network endpoints to be attached. + NetworkEndpoints []*NetworkEndpoint `json:"networkEndpoints,omitempty"` + + // ForceSendFields is a list of field names (e.g. "NetworkEndpoints") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -15896,7 +16796,7 @@ type NetworksAddPeeringRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AutoCreateRoutes") to + // NullFields is a list of field names (e.g. "NetworkEndpoints") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -15906,17 +16806,17 @@ type NetworksAddPeeringRequest struct { NullFields []string `json:"-"` } -func (s *NetworksAddPeeringRequest) MarshalJSON() ([]byte, error) { - type NoMethod NetworksAddPeeringRequest +func (s *NetworkEndpointGroupsAttachEndpointsRequest) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEndpointGroupsAttachEndpointsRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NetworksRemovePeeringRequest struct { - // Name: Name of the peering, which should conform to RFC1035. - Name string `json:"name,omitempty"` +type NetworkEndpointGroupsDetachEndpointsRequest struct { + // NetworkEndpoints: The list of network endpoints to be detached. + NetworkEndpoints []*NetworkEndpoint `json:"networkEndpoints,omitempty"` - // ForceSendFields is a list of field names (e.g. "Name") to + // ForceSendFields is a list of field names (e.g. "NetworkEndpoints") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -15924,108 +16824,67 @@ type NetworksRemovePeeringRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Name") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "NetworkEndpoints") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *NetworksRemovePeeringRequest) MarshalJSON() ([]byte, error) { - type NoMethod NetworksRemovePeeringRequest +func (s *NetworkEndpointGroupsDetachEndpointsRequest) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEndpointGroupsDetachEndpointsRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NodeGroup: A NodeGroup resource. -type NodeGroup struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] The type of the resource. Always - // compute#nodeGroup for node group. - Kind string `json:"kind,omitempty"` - - // Name: The name of the resource, provided by the client when initially - // creating the resource. The resource name must be 1-63 characters - // long, and comply with RFC1035. Specifically, the name must be 1-63 - // characters long and match the regular expression - // `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be - // a lowercase letter, and all following characters must be a dash, - // lowercase letter, or digit, except the last character, which cannot - // be a dash. - Name string `json:"name,omitempty"` - - // NodeTemplate: The URL of the node template to which this node group - // belongs. - NodeTemplate string `json:"nodeTemplate,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // Size: [Output Only] The total number of nodes in the node group. - Size int64 `json:"size,omitempty"` - +type NetworkEndpointGroupsListEndpointsRequest struct { + // HealthStatus: Optional query parameter for showing the health status + // of each network endpoint. Valid options are SKIP or SHOW. If you + // don't specifiy this parameter, the health status of network endpoints + // will not be provided. + // // Possible values: - // "CREATING" - // "DELETING" - // "INVALID" - // "READY" - Status string `json:"status,omitempty"` - - // Zone: [Output Only] The name of the zone where the node group - // resides, such as us-central1-a. - Zone string `json:"zone,omitempty"` + // "SHOW" + // "SKIP" + HealthStatus string `json:"healthStatus,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "HealthStatus") to + // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "HealthStatus") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *NodeGroup) MarshalJSON() ([]byte, error) { - type NoMethod NodeGroup +func (s *NetworkEndpointGroupsListEndpointsRequest) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEndpointGroupsListEndpointsRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NodeGroupAggregatedList struct { +type NetworkEndpointGroupsListNetworkEndpoints struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of NodeGroupsScopedList resources. - Items map[string]NodeGroupsScopedList `json:"items,omitempty"` + // Items: A list of NetworkEndpointWithHealthStatus resources. + Items []*NetworkEndpointWithHealthStatus `json:"items,omitempty"` - // Kind: [Output Only] Type of resource.Always - // compute#nodeGroupAggregatedList for aggregated lists of node groups. + // Kind: [Output Only] The resource type, which is always + // compute#networkEndpointGroupsListNetworkEndpoints for the list of + // network endpoints in the specified network endpoint group. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -16036,11 +16895,8 @@ type NodeGroupAggregatedList struct { // the results. NextPageToken string `json:"nextPageToken,omitempty"` - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - // Warning: [Output Only] Informational warning message. - Warning *NodeGroupAggregatedListWarning `json:"warning,omitempty"` + Warning *NetworkEndpointGroupsListNetworkEndpointsWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -16063,15 +16919,15 @@ type NodeGroupAggregatedList struct { NullFields []string `json:"-"` } -func (s *NodeGroupAggregatedList) MarshalJSON() ([]byte, error) { - type NoMethod NodeGroupAggregatedList +func (s *NetworkEndpointGroupsListNetworkEndpoints) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEndpointGroupsListNetworkEndpoints raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NodeGroupAggregatedListWarning: [Output Only] Informational warning -// message. -type NodeGroupAggregatedListWarning struct { +// NetworkEndpointGroupsListNetworkEndpointsWarning: [Output Only] +// Informational warning message. +type NetworkEndpointGroupsListNetworkEndpointsWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -16105,7 +16961,7 @@ type NodeGroupAggregatedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*NodeGroupAggregatedListWarningData `json:"data,omitempty"` + Data []*NetworkEndpointGroupsListNetworkEndpointsWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -16128,13 +16984,13 @@ type NodeGroupAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *NodeGroupAggregatedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod NodeGroupAggregatedListWarning +func (s *NetworkEndpointGroupsListNetworkEndpointsWarning) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEndpointGroupsListNetworkEndpointsWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NodeGroupAggregatedListWarningData struct { +type NetworkEndpointGroupsListNetworkEndpointsWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -16165,68 +17021,50 @@ type NodeGroupAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *NodeGroupAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod NodeGroupAggregatedListWarningData +func (s *NetworkEndpointGroupsListNetworkEndpointsWarningData) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEndpointGroupsListNetworkEndpointsWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NodeGroupList: Contains a list of nodeGroups. -type NodeGroupList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of NodeGroup resources. - Items []*NodeGroup `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource.Always compute#nodeGroupList for - // lists of node groups. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // Warning: [Output Only] Informational warning message. - Warning *NodeGroupListWarning `json:"warning,omitempty"` +type NetworkEndpointGroupsScopedList struct { + // NetworkEndpointGroups: [Output Only] The list of network endpoint + // groups that are contained in this scope. + NetworkEndpointGroups []*NetworkEndpointGroup `json:"networkEndpointGroups,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Warning: [Output Only] An informational warning that replaces the + // list of network endpoint groups when the list is empty. + Warning *NetworkEndpointGroupsScopedListWarning `json:"warning,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. + // "NetworkEndpointGroups") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "NetworkEndpointGroups") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *NodeGroupList) MarshalJSON() ([]byte, error) { - type NoMethod NodeGroupList +func (s *NetworkEndpointGroupsScopedList) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEndpointGroupsScopedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NodeGroupListWarning: [Output Only] Informational warning message. -type NodeGroupListWarning struct { +// NetworkEndpointGroupsScopedListWarning: [Output Only] An +// informational warning that replaces the list of network endpoint +// groups when the list is empty. +type NetworkEndpointGroupsScopedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -16260,7 +17098,7 @@ type NodeGroupListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*NodeGroupListWarningData `json:"data,omitempty"` + Data []*NetworkEndpointGroupsScopedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -16283,13 +17121,13 @@ type NodeGroupListWarning struct { NullFields []string `json:"-"` } -func (s *NodeGroupListWarning) MarshalJSON() ([]byte, error) { - type NoMethod NodeGroupListWarning +func (s *NetworkEndpointGroupsScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEndpointGroupsScopedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NodeGroupListWarningData struct { +type NetworkEndpointGroupsScopedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -16320,31 +17158,20 @@ type NodeGroupListWarningData struct { NullFields []string `json:"-"` } -func (s *NodeGroupListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod NodeGroupListWarningData +func (s *NetworkEndpointGroupsScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEndpointGroupsScopedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NodeGroupNode struct { - // Instances: Instances scheduled on this node. - Instances []string `json:"instances,omitempty"` - - // Name: The name of the node. - Name string `json:"name,omitempty"` - - // NodeType: The type of this node. - NodeType string `json:"nodeType,omitempty"` +type NetworkEndpointWithHealthStatus struct { + // Healths: [Output only] The health status of network endpoint; + Healths []*HealthStatusForNetworkEndpoint `json:"healths,omitempty"` - // Possible values: - // "CREATING" - // "DELETING" - // "INVALID" - // "READY" - // "REPAIRING" - Status string `json:"status,omitempty"` + // NetworkEndpoint: [Output only] The network endpoint; + NetworkEndpoint *NetworkEndpoint `json:"networkEndpoint,omitempty"` - // ForceSendFields is a list of field names (e.g. "Instances") to + // ForceSendFields is a list of field names (e.g. "Healths") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -16352,7 +17179,7 @@ type NodeGroupNode struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Instances") to include in + // NullFields is a list of field names (e.g. "Healths") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -16361,45 +17188,77 @@ type NodeGroupNode struct { NullFields []string `json:"-"` } -func (s *NodeGroupNode) MarshalJSON() ([]byte, error) { - type NoMethod NodeGroupNode +func (s *NetworkEndpointWithHealthStatus) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEndpointWithHealthStatus raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NodeGroupsAddNodesRequest struct { - // AdditionalNodeCount: Count of additional nodes to be added to the - // node group. - AdditionalNodeCount int64 `json:"additionalNodeCount,omitempty"` +// NetworkInterface: A network interface resource attached to an +// instance. +type NetworkInterface struct { + // AccessConfigs: An array of configurations for this interface. + // Currently, only one access config, ONE_TO_ONE_NAT, is supported. If + // there are no accessConfigs specified, then this instance will have no + // external internet access. + AccessConfigs []*AccessConfig `json:"accessConfigs,omitempty"` - // ForceSendFields is a list of field names (e.g. "AdditionalNodeCount") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` + // AliasIpRanges: An array of alias IP ranges for this network + // interface. Can only be specified for network interfaces on + // subnet-mode networks. + AliasIpRanges []*AliasIpRange `json:"aliasIpRanges,omitempty"` - // NullFields is a list of field names (e.g. "AdditionalNodeCount") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} + // Fingerprint: Fingerprint hash of contents stored in this network + // interface. This field will be ignored when inserting an Instance or + // adding a NetworkInterface. An up-to-date fingerprint must be provided + // in order to update the NetworkInterface, otherwise the request will + // fail with error 412 conditionNotMet. + Fingerprint string `json:"fingerprint,omitempty"` -func (s *NodeGroupsAddNodesRequest) MarshalJSON() ([]byte, error) { - type NoMethod NodeGroupsAddNodesRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // Kind: [Output Only] Type of the resource. Always + // compute#networkInterface for network interfaces. + Kind string `json:"kind,omitempty"` -type NodeGroupsDeleteNodesRequest struct { - Nodes []string `json:"nodes,omitempty"` + // Name: [Output Only] The name of the network interface, generated by + // the server. For network devices, these are eth0, eth1, etc. + Name string `json:"name,omitempty"` - // ForceSendFields is a list of field names (e.g. "Nodes") to + // Network: URL of the network resource for this instance. When creating + // an instance, if neither the network nor the subnetwork is specified, + // the default network global/networks/default is used; if the network + // is not specified but the subnetwork is specified, the network is + // inferred. + // + // This field is optional when creating a firewall rule. If not + // specified when creating a firewall rule, the default network + // global/networks/default is used. + // + // If you specify this property, you can specify the network as a full + // or partial URL. For example, the following are all valid URLs: + // - + // https://www.googleapis.com/compute/v1/projects/project/global/networks/network + // - projects/project/global/networks/network + // - global/networks/default + Network string `json:"network,omitempty"` + + // NetworkIP: An IPv4 internal network address to assign to the instance + // for this network interface. If not specified by the user, an unused + // internal IP is assigned by the system. + NetworkIP string `json:"networkIP,omitempty"` + + // Subnetwork: The URL of the Subnetwork resource for this instance. If + // the network resource is in legacy mode, do not provide this property. + // If the network is in auto subnet mode, providing the subnetwork is + // optional. If the network is in custom subnet mode, then this field + // should be specified. If you specify this property, you can specify + // the subnetwork as a full or partial URL. For example, the following + // are all valid URLs: + // - + // https://www.googleapis.com/compute/v1/projects/project/regions/region/subnetworks/subnetwork + // - regions/region/subnetworks/subnetwork + Subnetwork string `json:"subnetwork,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AccessConfigs") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -16407,32 +17266,32 @@ type NodeGroupsDeleteNodesRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Nodes") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "AccessConfigs") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *NodeGroupsDeleteNodesRequest) MarshalJSON() ([]byte, error) { - type NoMethod NodeGroupsDeleteNodesRequest +func (s *NetworkInterface) MarshalJSON() ([]byte, error) { + type NoMethod NetworkInterface raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NodeGroupsListNodes struct { +// NetworkList: Contains a list of networks. +type NetworkList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of Node resources. - Items []*NodeGroupNode `json:"items,omitempty"` + // Items: A list of Network resources. + Items []*Network `json:"items,omitempty"` - // Kind: [Output Only] The resource type, which is always - // compute.nodeGroupsListNodes for the list of nodes in the specified - // node group. + // Kind: [Output Only] Type of resource. Always compute#networkList for + // lists of networks. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -16447,7 +17306,7 @@ type NodeGroupsListNodes struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *NodeGroupsListNodesWarning `json:"warning,omitempty"` + Warning *NetworkListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -16470,15 +17329,14 @@ type NodeGroupsListNodes struct { NullFields []string `json:"-"` } -func (s *NodeGroupsListNodes) MarshalJSON() ([]byte, error) { - type NoMethod NodeGroupsListNodes +func (s *NetworkList) MarshalJSON() ([]byte, error) { + type NoMethod NetworkList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NodeGroupsListNodesWarning: [Output Only] Informational warning -// message. -type NodeGroupsListNodesWarning struct { +// NetworkListWarning: [Output Only] Informational warning message. +type NetworkListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -16512,7 +17370,7 @@ type NodeGroupsListNodesWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*NodeGroupsListNodesWarningData `json:"data,omitempty"` + Data []*NetworkListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -16535,13 +17393,13 @@ type NodeGroupsListNodesWarning struct { NullFields []string `json:"-"` } -func (s *NodeGroupsListNodesWarning) MarshalJSON() ([]byte, error) { - type NoMethod NodeGroupsListNodesWarning +func (s *NetworkListWarning) MarshalJSON() ([]byte, error) { + type NoMethod NetworkListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NodeGroupsListNodesWarningData struct { +type NetworkListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -16572,22 +17430,59 @@ type NodeGroupsListNodesWarningData struct { NullFields []string `json:"-"` } -func (s *NodeGroupsListNodesWarningData) MarshalJSON() ([]byte, error) { - type NoMethod NodeGroupsListNodesWarningData +func (s *NetworkListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod NetworkListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NodeGroupsScopedList struct { - // NodeGroups: [Output Only] A list of node groups contained in this - // scope. - NodeGroups []*NodeGroup `json:"nodeGroups,omitempty"` +// NetworkPeering: A network peering attached to a network resource. The +// message includes the peering name, peer network, peering state, and a +// flag indicating whether Google Compute Engine should automatically +// create routes for the peering. +type NetworkPeering struct { + // AutoCreateRoutes: This field will be deprecated soon. Prefer using + // exchange_subnet_routes instead. Indicates whether full mesh + // connectivity is created and managed automatically. When it is set to + // true, Google Compute Engine will automatically create and manage the + // routes between two networks when the state is ACTIVE. Otherwise, user + // needs to create routes manually to route packets to peer network. + AutoCreateRoutes bool `json:"autoCreateRoutes,omitempty"` - // Warning: [Output Only] An informational warning that appears when the - // nodeGroup list is empty. - Warning *NodeGroupsScopedListWarning `json:"warning,omitempty"` + // ExchangeSubnetRoutes: Whether full mesh connectivity is created and + // managed automatically. When it is set to true, Google Compute Engine + // will automatically create and manage the routes between two networks + // when the peering state is ACTIVE. Otherwise, user needs to create + // routes manually to route packets to peer network. + ExchangeSubnetRoutes bool `json:"exchangeSubnetRoutes,omitempty"` - // ForceSendFields is a list of field names (e.g. "NodeGroups") to + // Name: Name of this peering. Provided by the client when the peering + // is created. The name must comply with RFC1035. Specifically, the name + // must be 1-63 characters long and match regular expression + // `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be + // a lowercase letter, and all the following characters must be a dash, + // lowercase letter, or digit, except the last character, which cannot + // be a dash. + Name string `json:"name,omitempty"` + + // Network: The URL of the peer network. It can be either full URL or + // partial URL. The peer network may belong to a different project. If + // the partial URL does not contain project, it is assumed that the peer + // network is in the same project as the current network. + Network string `json:"network,omitempty"` + + // State: [Output Only] State for the peering. + // + // Possible values: + // "ACTIVE" + // "INACTIVE" + State string `json:"state,omitempty"` + + // StateDetails: [Output Only] Details about the current state of the + // peering. + StateDetails string `json:"stateDetails,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AutoCreateRoutes") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -16595,64 +17490,39 @@ type NodeGroupsScopedList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "NodeGroups") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "AutoCreateRoutes") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *NodeGroupsScopedList) MarshalJSON() ([]byte, error) { - type NoMethod NodeGroupsScopedList +func (s *NetworkPeering) MarshalJSON() ([]byte, error) { + type NoMethod NetworkPeering raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NodeGroupsScopedListWarning: [Output Only] An informational warning -// that appears when the nodeGroup list is empty. -type NodeGroupsScopedListWarning struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. +// NetworkRoutingConfig: A routing configuration attached to a network +// resource. The message includes the list of routers associated with +// the network, and a flag indicating the type of routing behavior to +// enforce network-wide. +type NetworkRoutingConfig struct { + // RoutingMode: The network-wide routing mode to use. If set to + // REGIONAL, this network's cloud routers will only advertise routes + // with subnets of this network in the same region as the router. If set + // to GLOBAL, this network's cloud routers will advertise routes with + // all subnets of this network, across regions. // // Possible values: - // "CLEANUP_FAILED" - // "DEPRECATED_RESOURCE_USED" - // "DEPRECATED_TYPE_USED" - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - // "EXPERIMENTAL_TYPE_USED" - // "EXTERNAL_API_WARNING" - // "FIELD_VALUE_OVERRIDEN" - // "INJECTED_KERNELS_DEPRECATED" - // "MISSING_TYPE_DEPENDENCY" - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - // "NEXT_HOP_CANNOT_IP_FORWARD" - // "NEXT_HOP_INSTANCE_NOT_FOUND" - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - // "NEXT_HOP_NOT_RUNNING" - // "NOT_CRITICAL_ERROR" - // "NO_RESULTS_ON_PAGE" - // "REQUIRED_TOS_AGREEMENT" - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - // "RESOURCE_NOT_DELETED" - // "SCHEMA_VALIDATION_IGNORED" - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - // "UNDECLARED_PROPERTIES" - // "UNREACHABLE" - Code string `json:"code,omitempty"` - - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: - // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*NodeGroupsScopedListWarningData `json:"data,omitempty"` - - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` + // "GLOBAL" + // "REGIONAL" + RoutingMode string `json:"routingMode,omitempty"` - // ForceSendFields is a list of field names (e.g. "Code") to + // ForceSendFields is a list of field names (e.g. "RoutingMode") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -16660,36 +17530,44 @@ type NodeGroupsScopedListWarning struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "RoutingMode") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *NodeGroupsScopedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod NodeGroupsScopedListWarning +func (s *NetworkRoutingConfig) MarshalJSON() ([]byte, error) { + type NoMethod NetworkRoutingConfig raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NodeGroupsScopedListWarningData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` +type NetworksAddPeeringRequest struct { + // AutoCreateRoutes: This field will be deprecated soon. Prefer using + // exchange_subnet_routes in network_peering instead. Whether Google + // Compute Engine manages the routes automatically. + AutoCreateRoutes bool `json:"autoCreateRoutes,omitempty"` - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` + // Name: Name of the peering, which should conform to RFC1035. + Name string `json:"name,omitempty"` - // ForceSendFields is a list of field names (e.g. "Key") to + // NetworkPeering: Network peering parameters. In order to specify route + // policies for peering using import/export custom routes, you will have + // to fill all peering related parameters (name, peer network, + // exchange_subnet_routes) in network_peeringfield. Corresponding fields + // in NetworksAddPeeringRequest will be deprecated soon. + NetworkPeering *NetworkPeering `json:"networkPeering,omitempty"` + + // PeerNetwork: URL of the peer network. It can be either full URL or + // partial URL. The peer network may belong to a different project. If + // the partial URL does not contain project, it is assumed that the peer + // network is in the same project as the current network. + PeerNetwork string `json:"peerNetwork,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AutoCreateRoutes") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -16697,27 +17575,27 @@ type NodeGroupsScopedListWarningData struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Key") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "AutoCreateRoutes") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *NodeGroupsScopedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod NodeGroupsScopedListWarningData +func (s *NetworksAddPeeringRequest) MarshalJSON() ([]byte, error) { + type NoMethod NetworksAddPeeringRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NodeGroupsSetNodeTemplateRequest struct { - // NodeTemplate: Full or partial URL of the node template resource to be - // updated for this node group. - NodeTemplate string `json:"nodeTemplate,omitempty"` +type NetworksRemovePeeringRequest struct { + // Name: Name of the peering, which should conform to RFC1035. + Name string `json:"name,omitempty"` - // ForceSendFields is a list of field names (e.g. "NodeTemplate") to + // ForceSendFields is a list of field names (e.g. "Name") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -16725,23 +17603,26 @@ type NodeGroupsSetNodeTemplateRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "NodeTemplate") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Name") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *NodeGroupsSetNodeTemplateRequest) MarshalJSON() ([]byte, error) { - type NoMethod NodeGroupsSetNodeTemplateRequest +func (s *NetworksRemovePeeringRequest) MarshalJSON() ([]byte, error) { + type NoMethod NetworksRemovePeeringRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NodeTemplate: A Node Template resource. -type NodeTemplate struct { +// NodeGroup: A NodeGroup resource. To create a node group, you must +// first create a node templates. To learn more about node groups and +// sole-tenant nodes, read the Sole-tenant nodes documentation. (== +// resource_for beta.nodeGroups ==) (== resource_for v1.nodeGroups ==) +type NodeGroup struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -16755,7 +17636,7 @@ type NodeTemplate struct { Id uint64 `json:"id,omitempty,string"` // Kind: [Output Only] The type of the resource. Always - // compute#nodeTemplate for node templates. + // compute#nodeGroup for node group. Kind string `json:"kind,omitempty"` // Name: The name of the resource, provided by the client when initially @@ -16768,32 +17649,16 @@ type NodeTemplate struct { // be a dash. Name string `json:"name,omitempty"` - // NodeAffinityLabels: Labels to use for node affinity, which will be - // used in instance scheduling. - NodeAffinityLabels map[string]string `json:"nodeAffinityLabels,omitempty"` - - // NodeType: The node type to use for nodes group that are created from - // this template. - NodeType string `json:"nodeType,omitempty"` - - // NodeTypeFlexibility: The flexible properties of the desired node - // type. Node groups that use this node template will create nodes of a - // type that matches these properties. - // - // This field is mutually exclusive with the node_type property; you can - // only define one or the other, but not both. - NodeTypeFlexibility *NodeTemplateNodeTypeFlexibility `json:"nodeTypeFlexibility,omitempty"` - - // Region: [Output Only] The name of the region where the node template - // resides, such as us-central1. - Region string `json:"region,omitempty"` + // NodeTemplate: The URL of the node template to which this node group + // belongs. + NodeTemplate string `json:"nodeTemplate,omitempty"` // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // Status: [Output Only] The status of the node template. One of the - // following values: CREATING, READY, and DELETING. - // + // Size: [Output Only] The total number of nodes in the node group. + Size int64 `json:"size,omitempty"` + // Possible values: // "CREATING" // "DELETING" @@ -16801,9 +17666,9 @@ type NodeTemplate struct { // "READY" Status string `json:"status,omitempty"` - // StatusMessage: [Output Only] An optional, human-readable explanation - // of the status. - StatusMessage string `json:"statusMessage,omitempty"` + // Zone: [Output Only] The name of the zone where the node group + // resides, such as us-central1-a. + Zone string `json:"zone,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -16827,23 +17692,22 @@ type NodeTemplate struct { NullFields []string `json:"-"` } -func (s *NodeTemplate) MarshalJSON() ([]byte, error) { - type NoMethod NodeTemplate +func (s *NodeGroup) MarshalJSON() ([]byte, error) { + type NoMethod NodeGroup raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NodeTemplateAggregatedList struct { +type NodeGroupAggregatedList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of NodeTemplatesScopedList resources. - Items map[string]NodeTemplatesScopedList `json:"items,omitempty"` + // Items: A list of NodeGroupsScopedList resources. + Items map[string]NodeGroupsScopedList `json:"items,omitempty"` // Kind: [Output Only] Type of resource.Always - // compute#nodeTemplateAggregatedList for aggregated lists of node - // templates. + // compute#nodeGroupAggregatedList for aggregated lists of node groups. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -16858,7 +17722,7 @@ type NodeTemplateAggregatedList struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *NodeTemplateAggregatedListWarning `json:"warning,omitempty"` + Warning *NodeGroupAggregatedListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -16881,15 +17745,15 @@ type NodeTemplateAggregatedList struct { NullFields []string `json:"-"` } -func (s *NodeTemplateAggregatedList) MarshalJSON() ([]byte, error) { - type NoMethod NodeTemplateAggregatedList +func (s *NodeGroupAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod NodeGroupAggregatedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NodeTemplateAggregatedListWarning: [Output Only] Informational -// warning message. -type NodeTemplateAggregatedListWarning struct { +// NodeGroupAggregatedListWarning: [Output Only] Informational warning +// message. +type NodeGroupAggregatedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -16923,7 +17787,7 @@ type NodeTemplateAggregatedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*NodeTemplateAggregatedListWarningData `json:"data,omitempty"` + Data []*NodeGroupAggregatedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -16946,13 +17810,13 @@ type NodeTemplateAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *NodeTemplateAggregatedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod NodeTemplateAggregatedListWarning +func (s *NodeGroupAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod NodeGroupAggregatedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NodeTemplateAggregatedListWarningData struct { +type NodeGroupAggregatedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -16983,23 +17847,23 @@ type NodeTemplateAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *NodeTemplateAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod NodeTemplateAggregatedListWarningData +func (s *NodeGroupAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod NodeGroupAggregatedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NodeTemplateList: Contains a list of node templates. -type NodeTemplateList struct { +// NodeGroupList: Contains a list of nodeGroups. +type NodeGroupList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of NodeTemplate resources. - Items []*NodeTemplate `json:"items,omitempty"` + // Items: A list of NodeGroup resources. + Items []*NodeGroup `json:"items,omitempty"` - // Kind: [Output Only] Type of resource.Always compute#nodeTemplateList - // for lists of node templates. + // Kind: [Output Only] Type of resource.Always compute#nodeGroupList for + // lists of node groups. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -17014,7 +17878,7 @@ type NodeTemplateList struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *NodeTemplateListWarning `json:"warning,omitempty"` + Warning *NodeGroupListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -17037,14 +17901,14 @@ type NodeTemplateList struct { NullFields []string `json:"-"` } -func (s *NodeTemplateList) MarshalJSON() ([]byte, error) { - type NoMethod NodeTemplateList +func (s *NodeGroupList) MarshalJSON() ([]byte, error) { + type NoMethod NodeGroupList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NodeTemplateListWarning: [Output Only] Informational warning message. -type NodeTemplateListWarning struct { +// NodeGroupListWarning: [Output Only] Informational warning message. +type NodeGroupListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -17078,7 +17942,7 @@ type NodeTemplateListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*NodeTemplateListWarningData `json:"data,omitempty"` + Data []*NodeGroupListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -17101,13 +17965,13 @@ type NodeTemplateListWarning struct { NullFields []string `json:"-"` } -func (s *NodeTemplateListWarning) MarshalJSON() ([]byte, error) { - type NoMethod NodeTemplateListWarning +func (s *NodeGroupListWarning) MarshalJSON() ([]byte, error) { + type NoMethod NodeGroupListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NodeTemplateListWarningData struct { +type NodeGroupListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -17138,20 +18002,31 @@ type NodeTemplateListWarningData struct { NullFields []string `json:"-"` } -func (s *NodeTemplateListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod NodeTemplateListWarningData +func (s *NodeGroupListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod NodeGroupListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NodeTemplateNodeTypeFlexibility struct { - Cpus string `json:"cpus,omitempty"` +type NodeGroupNode struct { + // Instances: Instances scheduled on this node. + Instances []string `json:"instances,omitempty"` - LocalSsd string `json:"localSsd,omitempty"` + // Name: The name of the node. + Name string `json:"name,omitempty"` - Memory string `json:"memory,omitempty"` + // NodeType: The type of this node. + NodeType string `json:"nodeType,omitempty"` - // ForceSendFields is a list of field names (e.g. "Cpus") to + // Possible values: + // "CREATING" + // "DELETING" + // "INVALID" + // "READY" + // "REPAIRING" + Status string `json:"status,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Instances") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -17159,7 +18034,62 @@ type NodeTemplateNodeTypeFlexibility struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Cpus") to include in API + // NullFields is a list of field names (e.g. "Instances") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NodeGroupNode) MarshalJSON() ([]byte, error) { + type NoMethod NodeGroupNode + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type NodeGroupsAddNodesRequest struct { + // AdditionalNodeCount: Count of additional nodes to be added to the + // node group. + AdditionalNodeCount int64 `json:"additionalNodeCount,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AdditionalNodeCount") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AdditionalNodeCount") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *NodeGroupsAddNodesRequest) MarshalJSON() ([]byte, error) { + type NoMethod NodeGroupsAddNodesRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type NodeGroupsDeleteNodesRequest struct { + Nodes []string `json:"nodes,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Nodes") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Nodes") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -17168,22 +18098,44 @@ type NodeTemplateNodeTypeFlexibility struct { NullFields []string `json:"-"` } -func (s *NodeTemplateNodeTypeFlexibility) MarshalJSON() ([]byte, error) { - type NoMethod NodeTemplateNodeTypeFlexibility +func (s *NodeGroupsDeleteNodesRequest) MarshalJSON() ([]byte, error) { + type NoMethod NodeGroupsDeleteNodesRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NodeTemplatesScopedList struct { - // NodeTemplates: [Output Only] A list of node templates contained in - // this scope. - NodeTemplates []*NodeTemplate `json:"nodeTemplates,omitempty"` +type NodeGroupsListNodes struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` - // Warning: [Output Only] An informational warning that appears when the - // node templates list is empty. - Warning *NodeTemplatesScopedListWarning `json:"warning,omitempty"` + // Items: A list of Node resources. + Items []*NodeGroupNode `json:"items,omitempty"` - // ForceSendFields is a list of field names (e.g. "NodeTemplates") to + // Kind: [Output Only] The resource type, which is always + // compute.nodeGroupsListNodes for the list of nodes in the specified + // node group. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *NodeGroupsListNodesWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -17191,24 +18143,24 @@ type NodeTemplatesScopedList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "NodeTemplates") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *NodeTemplatesScopedList) MarshalJSON() ([]byte, error) { - type NoMethod NodeTemplatesScopedList +func (s *NodeGroupsListNodes) MarshalJSON() ([]byte, error) { + type NoMethod NodeGroupsListNodes raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NodeTemplatesScopedListWarning: [Output Only] An informational -// warning that appears when the node templates list is empty. -type NodeTemplatesScopedListWarning struct { +// NodeGroupsListNodesWarning: [Output Only] Informational warning +// message. +type NodeGroupsListNodesWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -17242,7 +18194,7 @@ type NodeTemplatesScopedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*NodeTemplatesScopedListWarningData `json:"data,omitempty"` + Data []*NodeGroupsListNodesWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -17265,13 +18217,13 @@ type NodeTemplatesScopedListWarning struct { NullFields []string `json:"-"` } -func (s *NodeTemplatesScopedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod NodeTemplatesScopedListWarning +func (s *NodeGroupsListNodesWarning) MarshalJSON() ([]byte, error) { + type NoMethod NodeGroupsListNodesWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NodeTemplatesScopedListWarningData struct { +type NodeGroupsListNodesWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -17302,64 +18254,22 @@ type NodeTemplatesScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *NodeTemplatesScopedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod NodeTemplatesScopedListWarningData +func (s *NodeGroupsListNodesWarningData) MarshalJSON() ([]byte, error) { + type NoMethod NodeGroupsListNodesWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NodeType: A Node Type resource. -type NodeType struct { - // CpuPlatform: [Output Only] The CPU platform used by this node type. - CpuPlatform string `json:"cpuPlatform,omitempty"` - - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Deprecated: [Output Only] The deprecation status associated with this - // node type. - Deprecated *DeprecationStatus `json:"deprecated,omitempty"` - - // Description: [Output Only] An optional textual description of the - // resource. - Description string `json:"description,omitempty"` - - // GuestCpus: [Output Only] The number of virtual CPUs that are - // available to the node type. - GuestCpus int64 `json:"guestCpus,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] The type of the resource. Always compute#nodeType - // for node types. - Kind string `json:"kind,omitempty"` - - // LocalSsdGb: [Output Only] Local SSD available to the node type, - // defined in GB. - LocalSsdGb int64 `json:"localSsdGb,omitempty"` - - // MemoryMb: [Output Only] The amount of physical memory available to - // the node type, defined in MB. - MemoryMb int64 `json:"memoryMb,omitempty"` - - // Name: [Output Only] Name of the resource. - Name string `json:"name,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // Zone: [Output Only] The name of the zone where the node type resides, - // such as us-central1-a. - Zone string `json:"zone,omitempty"` +type NodeGroupsScopedList struct { + // NodeGroups: [Output Only] A list of node groups contained in this + // scope. + NodeGroups []*NodeGroup `json:"nodeGroups,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Warning: [Output Only] An informational warning that appears when the + // nodeGroup list is empty. + Warning *NodeGroupsScopedListWarning `json:"warning,omitempty"` - // ForceSendFields is a list of field names (e.g. "CpuPlatform") to + // ForceSendFields is a list of field names (e.g. "NodeGroups") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -17367,61 +18277,8 @@ type NodeType struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CpuPlatform") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *NodeType) MarshalJSON() ([]byte, error) { - type NoMethod NodeType - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type NodeTypeAggregatedList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of NodeTypesScopedList resources. - Items map[string]NodeTypesScopedList `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource.Always - // compute#nodeTypeAggregatedList for aggregated lists of node types. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // Warning: [Output Only] Informational warning message. - Warning *NodeTypeAggregatedListWarning `json:"warning,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "NodeGroups") to include in + // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -17429,15 +18286,15 @@ type NodeTypeAggregatedList struct { NullFields []string `json:"-"` } -func (s *NodeTypeAggregatedList) MarshalJSON() ([]byte, error) { - type NoMethod NodeTypeAggregatedList +func (s *NodeGroupsScopedList) MarshalJSON() ([]byte, error) { + type NoMethod NodeGroupsScopedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NodeTypeAggregatedListWarning: [Output Only] Informational warning -// message. -type NodeTypeAggregatedListWarning struct { +// NodeGroupsScopedListWarning: [Output Only] An informational warning +// that appears when the nodeGroup list is empty. +type NodeGroupsScopedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -17471,7 +18328,7 @@ type NodeTypeAggregatedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*NodeTypeAggregatedListWarningData `json:"data,omitempty"` + Data []*NodeGroupsScopedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -17494,13 +18351,13 @@ type NodeTypeAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *NodeTypeAggregatedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod NodeTypeAggregatedListWarning +func (s *NodeGroupsScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod NodeGroupsScopedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NodeTypeAggregatedListWarningData struct { +type NodeGroupsScopedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -17531,23 +18388,147 @@ type NodeTypeAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *NodeTypeAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod NodeTypeAggregatedListWarningData +func (s *NodeGroupsScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod NodeGroupsScopedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NodeTypeList: Contains a list of node types. -type NodeTypeList struct { +type NodeGroupsSetNodeTemplateRequest struct { + // NodeTemplate: Full or partial URL of the node template resource to be + // updated for this node group. + NodeTemplate string `json:"nodeTemplate,omitempty"` + + // ForceSendFields is a list of field names (e.g. "NodeTemplate") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NodeTemplate") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NodeGroupsSetNodeTemplateRequest) MarshalJSON() ([]byte, error) { + type NoMethod NodeGroupsSetNodeTemplateRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// NodeTemplate: A Node Template resource. To learn more about node +// templates and sole-tenant nodes, read the Sole-tenant nodes +// documentation. (== resource_for beta.nodeTemplates ==) (== +// resource_for v1.nodeTemplates ==) +type NodeTemplate struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] The type of the resource. Always + // compute#nodeTemplate for node templates. + Kind string `json:"kind,omitempty"` + + // Name: The name of the resource, provided by the client when initially + // creating the resource. The resource name must be 1-63 characters + // long, and comply with RFC1035. Specifically, the name must be 1-63 + // characters long and match the regular expression + // `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be + // a lowercase letter, and all following characters must be a dash, + // lowercase letter, or digit, except the last character, which cannot + // be a dash. + Name string `json:"name,omitempty"` + + // NodeAffinityLabels: Labels to use for node affinity, which will be + // used in instance scheduling. + NodeAffinityLabels map[string]string `json:"nodeAffinityLabels,omitempty"` + + // NodeType: The node type to use for nodes group that are created from + // this template. + NodeType string `json:"nodeType,omitempty"` + + // NodeTypeFlexibility: The flexible properties of the desired node + // type. Node groups that use this node template will create nodes of a + // type that matches these properties. + // + // This field is mutually exclusive with the node_type property; you can + // only define one or the other, but not both. + NodeTypeFlexibility *NodeTemplateNodeTypeFlexibility `json:"nodeTypeFlexibility,omitempty"` + + // Region: [Output Only] The name of the region where the node template + // resides, such as us-central1. + Region string `json:"region,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Status: [Output Only] The status of the node template. One of the + // following values: CREATING, READY, and DELETING. + // + // Possible values: + // "CREATING" + // "DELETING" + // "INVALID" + // "READY" + Status string `json:"status,omitempty"` + + // StatusMessage: [Output Only] An optional, human-readable explanation + // of the status. + StatusMessage string `json:"statusMessage,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *NodeTemplate) MarshalJSON() ([]byte, error) { + type NoMethod NodeTemplate + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type NodeTemplateAggregatedList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of NodeType resources. - Items []*NodeType `json:"items,omitempty"` + // Items: A list of NodeTemplatesScopedList resources. + Items map[string]NodeTemplatesScopedList `json:"items,omitempty"` - // Kind: [Output Only] Type of resource.Always compute#nodeTypeList for - // lists of node types. + // Kind: [Output Only] Type of resource.Always + // compute#nodeTemplateAggregatedList for aggregated lists of node + // templates. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -17562,7 +18543,7 @@ type NodeTypeList struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *NodeTypeListWarning `json:"warning,omitempty"` + Warning *NodeTemplateAggregatedListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -17585,14 +18566,15 @@ type NodeTypeList struct { NullFields []string `json:"-"` } -func (s *NodeTypeList) MarshalJSON() ([]byte, error) { - type NoMethod NodeTypeList +func (s *NodeTemplateAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod NodeTemplateAggregatedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NodeTypeListWarning: [Output Only] Informational warning message. -type NodeTypeListWarning struct { +// NodeTemplateAggregatedListWarning: [Output Only] Informational +// warning message. +type NodeTemplateAggregatedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -17626,7 +18608,7 @@ type NodeTypeListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*NodeTypeListWarningData `json:"data,omitempty"` + Data []*NodeTemplateAggregatedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -17649,13 +18631,13 @@ type NodeTypeListWarning struct { NullFields []string `json:"-"` } -func (s *NodeTypeListWarning) MarshalJSON() ([]byte, error) { - type NoMethod NodeTypeListWarning +func (s *NodeTemplateAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod NodeTemplateAggregatedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NodeTypeListWarningData struct { +type NodeTemplateAggregatedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -17686,22 +18668,44 @@ type NodeTypeListWarningData struct { NullFields []string `json:"-"` } -func (s *NodeTypeListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod NodeTypeListWarningData +func (s *NodeTemplateAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod NodeTemplateAggregatedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NodeTypesScopedList struct { - // NodeTypes: [Output Only] A list of node types contained in this - // scope. - NodeTypes []*NodeType `json:"nodeTypes,omitempty"` +// NodeTemplateList: Contains a list of node templates. +type NodeTemplateList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` - // Warning: [Output Only] An informational warning that appears when the - // node types list is empty. - Warning *NodeTypesScopedListWarning `json:"warning,omitempty"` + // Items: A list of NodeTemplate resources. + Items []*NodeTemplate `json:"items,omitempty"` - // ForceSendFields is a list of field names (e.g. "NodeTypes") to + // Kind: [Output Only] Type of resource.Always compute#nodeTemplateList + // for lists of node templates. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *NodeTemplateListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -17709,8 +18713,8 @@ type NodeTypesScopedList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "NodeTypes") to include in - // API requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -17718,15 +18722,14 @@ type NodeTypesScopedList struct { NullFields []string `json:"-"` } -func (s *NodeTypesScopedList) MarshalJSON() ([]byte, error) { - type NoMethod NodeTypesScopedList +func (s *NodeTemplateList) MarshalJSON() ([]byte, error) { + type NoMethod NodeTemplateList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NodeTypesScopedListWarning: [Output Only] An informational warning -// that appears when the node types list is empty. -type NodeTypesScopedListWarning struct { +// NodeTemplateListWarning: [Output Only] Informational warning message. +type NodeTemplateListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -17760,7 +18763,7 @@ type NodeTypesScopedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*NodeTypesScopedListWarningData `json:"data,omitempty"` + Data []*NodeTemplateListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -17783,13 +18786,13 @@ type NodeTypesScopedListWarning struct { NullFields []string `json:"-"` } -func (s *NodeTypesScopedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod NodeTypesScopedListWarning +func (s *NodeTemplateListWarning) MarshalJSON() ([]byte, error) { + type NoMethod NodeTemplateListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NodeTypesScopedListWarningData struct { +type NodeTemplateListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -17820,157 +18823,20 @@ type NodeTypesScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *NodeTypesScopedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod NodeTypesScopedListWarningData +func (s *NodeTemplateListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod NodeTemplateListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Operation: An Operation resource, used to manage asynchronous API -// requests. (== resource_for v1.globalOperations ==) (== resource_for -// beta.globalOperations ==) (== resource_for v1.regionOperations ==) -// (== resource_for beta.regionOperations ==) (== resource_for -// v1.zoneOperations ==) (== resource_for beta.zoneOperations ==) -type Operation struct { - // ClientOperationId: [Output Only] The value of `requestId` if you - // provided it in the request. Not present otherwise. - ClientOperationId string `json:"clientOperationId,omitempty"` - - // CreationTimestamp: [Deprecated] This field is deprecated. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: [Output Only] A textual description of the operation, - // which is set when the operation is created. - Description string `json:"description,omitempty"` - - // EndTime: [Output Only] The time that this operation was completed. - // This value is in RFC3339 text format. - EndTime string `json:"endTime,omitempty"` - - // Error: [Output Only] If errors are generated during processing of the - // operation, this field will be populated. - Error *OperationError `json:"error,omitempty"` - - // HttpErrorMessage: [Output Only] If the operation fails, this field - // contains the HTTP error message that was returned, such as NOT FOUND. - HttpErrorMessage string `json:"httpErrorMessage,omitempty"` - - // HttpErrorStatusCode: [Output Only] If the operation fails, this field - // contains the HTTP error status code that was returned. For example, a - // 404 means the resource was not found. - HttpErrorStatusCode int64 `json:"httpErrorStatusCode,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // InsertTime: [Output Only] The time that this operation was requested. - // This value is in RFC3339 text format. - InsertTime string `json:"insertTime,omitempty"` - - // Kind: [Output Only] Type of the resource. Always compute#operation - // for Operation resources. - Kind string `json:"kind,omitempty"` - - // Name: [Output Only] Name of the resource. - Name string `json:"name,omitempty"` - - // OperationType: [Output Only] The type of operation, such as insert, - // update, or delete, and so on. - OperationType string `json:"operationType,omitempty"` - - // Progress: [Output Only] An optional progress indicator that ranges - // from 0 to 100. There is no requirement that this be linear or support - // any granularity of operations. This should not be used to guess when - // the operation will be complete. This number should monotonically - // increase as the operation progresses. - Progress int64 `json:"progress,omitempty"` - - // Region: [Output Only] The URL of the region where the operation - // resides. Only available when performing regional operations. You must - // specify this field as part of the HTTP request URL. It is not - // settable as a field in the request body. - Region string `json:"region,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // StartTime: [Output Only] The time that this operation was started by - // the server. This value is in RFC3339 text format. - StartTime string `json:"startTime,omitempty"` - - // Status: [Output Only] The status of the operation, which can be one - // of the following: PENDING, RUNNING, or DONE. - // - // Possible values: - // "DONE" - // "PENDING" - // "RUNNING" - Status string `json:"status,omitempty"` - - // StatusMessage: [Output Only] An optional textual description of the - // current status of the operation. - StatusMessage string `json:"statusMessage,omitempty"` - - // TargetId: [Output Only] The unique target ID, which identifies a - // specific incarnation of the target resource. - TargetId uint64 `json:"targetId,omitempty,string"` - - // TargetLink: [Output Only] The URL of the resource that the operation - // modifies. For operations related to creating a snapshot, this points - // to the persistent disk that the snapshot was created from. - TargetLink string `json:"targetLink,omitempty"` - - // User: [Output Only] User who requested the operation, for example: - // user@example.com. - User string `json:"user,omitempty"` - - // Warnings: [Output Only] If warning messages are generated during - // processing of the operation, this field will be populated. - Warnings []*OperationWarnings `json:"warnings,omitempty"` - - // Zone: [Output Only] The URL of the zone where the operation resides. - // Only available when performing per-zone operations. You must specify - // this field as part of the HTTP request URL. It is not settable as a - // field in the request body. - Zone string `json:"zone,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "ClientOperationId") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "ClientOperationId") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} +type NodeTemplateNodeTypeFlexibility struct { + Cpus string `json:"cpus,omitempty"` -func (s *Operation) MarshalJSON() ([]byte, error) { - type NoMethod Operation - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + LocalSsd string `json:"localSsd,omitempty"` -// OperationError: [Output Only] If errors are generated during -// processing of the operation, this field will be populated. -type OperationError struct { - // Errors: [Output Only] The array of errors encountered while - // processing this operation. - Errors []*OperationErrorErrors `json:"errors,omitempty"` + Memory string `json:"memory,omitempty"` - // ForceSendFields is a list of field names (e.g. "Errors") to + // ForceSendFields is a list of field names (e.g. "Cpus") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -17978,7 +18844,7 @@ type OperationError struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Errors") to include in API + // NullFields is a list of field names (e.g. "Cpus") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -17987,24 +18853,22 @@ type OperationError struct { NullFields []string `json:"-"` } -func (s *OperationError) MarshalJSON() ([]byte, error) { - type NoMethod OperationError +func (s *NodeTemplateNodeTypeFlexibility) MarshalJSON() ([]byte, error) { + type NoMethod NodeTemplateNodeTypeFlexibility raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type OperationErrorErrors struct { - // Code: [Output Only] The error type identifier for this error. - Code string `json:"code,omitempty"` - - // Location: [Output Only] Indicates the field in the request that - // caused the error. This property is optional. - Location string `json:"location,omitempty"` +type NodeTemplatesScopedList struct { + // NodeTemplates: [Output Only] A list of node templates contained in + // this scope. + NodeTemplates []*NodeTemplate `json:"nodeTemplates,omitempty"` - // Message: [Output Only] An optional, human-readable error message. - Message string `json:"message,omitempty"` + // Warning: [Output Only] An informational warning that appears when the + // node templates list is empty. + Warning *NodeTemplatesScopedListWarning `json:"warning,omitempty"` - // ForceSendFields is a list of field names (e.g. "Code") to + // ForceSendFields is a list of field names (e.g. "NodeTemplates") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -18012,22 +18876,24 @@ type OperationErrorErrors struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "NodeTemplates") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *OperationErrorErrors) MarshalJSON() ([]byte, error) { - type NoMethod OperationErrorErrors +func (s *NodeTemplatesScopedList) MarshalJSON() ([]byte, error) { + type NoMethod NodeTemplatesScopedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type OperationWarnings struct { +// NodeTemplatesScopedListWarning: [Output Only] An informational +// warning that appears when the node templates list is empty. +type NodeTemplatesScopedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -18061,7 +18927,7 @@ type OperationWarnings struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*OperationWarningsData `json:"data,omitempty"` + Data []*NodeTemplatesScopedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -18084,13 +18950,13 @@ type OperationWarnings struct { NullFields []string `json:"-"` } -func (s *OperationWarnings) MarshalJSON() ([]byte, error) { - type NoMethod OperationWarnings +func (s *NodeTemplatesScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod NodeTemplatesScopedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type OperationWarningsData struct { +type NodeTemplatesScopedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -18121,28 +18987,102 @@ type OperationWarningsData struct { NullFields []string `json:"-"` } -func (s *OperationWarningsData) MarshalJSON() ([]byte, error) { - type NoMethod OperationWarningsData +func (s *NodeTemplatesScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod NodeTemplatesScopedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type OperationAggregatedList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id string `json:"id,omitempty"` +// NodeType: A Node Type resource. +type NodeType struct { + // CpuPlatform: [Output Only] The CPU platform used by this node type. + CpuPlatform string `json:"cpuPlatform,omitempty"` - // Items: [Output Only] A map of scoped operation lists. - Items map[string]OperationsScopedList `json:"items,omitempty"` + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` - // Kind: [Output Only] Type of resource. Always - // compute#operationAggregatedList for aggregated lists of operations. - Kind string `json:"kind,omitempty"` + // Deprecated: [Output Only] The deprecation status associated with this + // node type. + Deprecated *DeprecationStatus `json:"deprecated,omitempty"` - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list + // Description: [Output Only] An optional textual description of the + // resource. + Description string `json:"description,omitempty"` + + // GuestCpus: [Output Only] The number of virtual CPUs that are + // available to the node type. + GuestCpus int64 `json:"guestCpus,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] The type of the resource. Always compute#nodeType + // for node types. + Kind string `json:"kind,omitempty"` + + // LocalSsdGb: [Output Only] Local SSD available to the node type, + // defined in GB. + LocalSsdGb int64 `json:"localSsdGb,omitempty"` + + // MemoryMb: [Output Only] The amount of physical memory available to + // the node type, defined in MB. + MemoryMb int64 `json:"memoryMb,omitempty"` + + // Name: [Output Only] Name of the resource. + Name string `json:"name,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Zone: [Output Only] The name of the zone where the node type resides, + // such as us-central1-a. + Zone string `json:"zone,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CpuPlatform") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CpuPlatform") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NodeType) MarshalJSON() ([]byte, error) { + type NoMethod NodeType + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type NodeTypeAggregatedList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of NodeTypesScopedList resources. + Items map[string]NodeTypesScopedList `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource.Always + // compute#nodeTypeAggregatedList for aggregated lists of node types. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list // requests will have their own nextPageToken to continue paging through // the results. NextPageToken string `json:"nextPageToken,omitempty"` @@ -18151,7 +19091,7 @@ type OperationAggregatedList struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *OperationAggregatedListWarning `json:"warning,omitempty"` + Warning *NodeTypeAggregatedListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -18174,15 +19114,15 @@ type OperationAggregatedList struct { NullFields []string `json:"-"` } -func (s *OperationAggregatedList) MarshalJSON() ([]byte, error) { - type NoMethod OperationAggregatedList +func (s *NodeTypeAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod NodeTypeAggregatedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// OperationAggregatedListWarning: [Output Only] Informational warning +// NodeTypeAggregatedListWarning: [Output Only] Informational warning // message. -type OperationAggregatedListWarning struct { +type NodeTypeAggregatedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -18216,7 +19156,7 @@ type OperationAggregatedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*OperationAggregatedListWarningData `json:"data,omitempty"` + Data []*NodeTypeAggregatedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -18239,13 +19179,13 @@ type OperationAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *OperationAggregatedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod OperationAggregatedListWarning +func (s *NodeTypeAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod NodeTypeAggregatedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type OperationAggregatedListWarningData struct { +type NodeTypeAggregatedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -18276,23 +19216,23 @@ type OperationAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *OperationAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod OperationAggregatedListWarningData +func (s *NodeTypeAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod NodeTypeAggregatedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// OperationList: Contains a list of Operation resources. -type OperationList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. +// NodeTypeList: Contains a list of node types. +type NodeTypeList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. Id string `json:"id,omitempty"` - // Items: [Output Only] A list of Operation resources. - Items []*Operation `json:"items,omitempty"` + // Items: A list of NodeType resources. + Items []*NodeType `json:"items,omitempty"` - // Kind: [Output Only] Type of resource. Always compute#operations for - // Operations resource. + // Kind: [Output Only] Type of resource.Always compute#nodeTypeList for + // lists of node types. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -18307,7 +19247,7 @@ type OperationList struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *OperationListWarning `json:"warning,omitempty"` + Warning *NodeTypeListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -18330,14 +19270,14 @@ type OperationList struct { NullFields []string `json:"-"` } -func (s *OperationList) MarshalJSON() ([]byte, error) { - type NoMethod OperationList +func (s *NodeTypeList) MarshalJSON() ([]byte, error) { + type NoMethod NodeTypeList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// OperationListWarning: [Output Only] Informational warning message. -type OperationListWarning struct { +// NodeTypeListWarning: [Output Only] Informational warning message. +type NodeTypeListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -18371,7 +19311,7 @@ type OperationListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*OperationListWarningData `json:"data,omitempty"` + Data []*NodeTypeListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -18394,13 +19334,13 @@ type OperationListWarning struct { NullFields []string `json:"-"` } -func (s *OperationListWarning) MarshalJSON() ([]byte, error) { - type NoMethod OperationListWarning +func (s *NodeTypeListWarning) MarshalJSON() ([]byte, error) { + type NoMethod NodeTypeListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type OperationListWarningData struct { +type NodeTypeListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -18431,22 +19371,22 @@ type OperationListWarningData struct { NullFields []string `json:"-"` } -func (s *OperationListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod OperationListWarningData +func (s *NodeTypeListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod NodeTypeListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type OperationsScopedList struct { - // Operations: [Output Only] A list of operations contained in this +type NodeTypesScopedList struct { + // NodeTypes: [Output Only] A list of node types contained in this // scope. - Operations []*Operation `json:"operations,omitempty"` + NodeTypes []*NodeType `json:"nodeTypes,omitempty"` - // Warning: [Output Only] Informational warning which replaces the list - // of operations when the list is empty. - Warning *OperationsScopedListWarning `json:"warning,omitempty"` + // Warning: [Output Only] An informational warning that appears when the + // node types list is empty. + Warning *NodeTypesScopedListWarning `json:"warning,omitempty"` - // ForceSendFields is a list of field names (e.g. "Operations") to + // ForceSendFields is a list of field names (e.g. "NodeTypes") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -18454,7 +19394,7 @@ type OperationsScopedList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Operations") to include in + // NullFields is a list of field names (e.g. "NodeTypes") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -18463,15 +19403,15 @@ type OperationsScopedList struct { NullFields []string `json:"-"` } -func (s *OperationsScopedList) MarshalJSON() ([]byte, error) { - type NoMethod OperationsScopedList +func (s *NodeTypesScopedList) MarshalJSON() ([]byte, error) { + type NoMethod NodeTypesScopedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// OperationsScopedListWarning: [Output Only] Informational warning -// which replaces the list of operations when the list is empty. -type OperationsScopedListWarning struct { +// NodeTypesScopedListWarning: [Output Only] An informational warning +// that appears when the node types list is empty. +type NodeTypesScopedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -18505,7 +19445,7 @@ type OperationsScopedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*OperationsScopedListWarningData `json:"data,omitempty"` + Data []*NodeTypesScopedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -18528,13 +19468,13 @@ type OperationsScopedListWarning struct { NullFields []string `json:"-"` } -func (s *OperationsScopedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod OperationsScopedListWarning +func (s *NodeTypesScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod NodeTypesScopedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type OperationsScopedListWarningData struct { +type NodeTypesScopedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -18565,62 +19505,134 @@ type OperationsScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *OperationsScopedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod OperationsScopedListWarningData +func (s *NodeTypesScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod NodeTypesScopedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// PathMatcher: A matcher for the path portion of the URL. The -// BackendService from the longest-matched rule will serve the URL. If -// no rule was matched, the default service will be used. -type PathMatcher struct { - // DefaultService: The full or partial URL to the BackendService - // resource. This will be used if none of the pathRules or routeRules - // defined by this PathMatcher are matched. For example, the following - // are all valid URLs to a BackendService resource: - // - - // https://www.googleapis.com/compute/v1/projects/project/global/backendServices/backendService - // - compute/v1/projects/project/global/backendServices/backendService - // - // - global/backendServices/backendService - // Use defaultService instead of defaultRouteAction when simple routing - // to a backend service is desired and other advanced capabilities like - // traffic splitting and URL rewrites are not required. - // Only one of defaultService, defaultRouteAction or defaultUrlRedirect - // must be set. - // Authorization requires one or more of the following Google IAM - // permissions on the specified resource default_service: - // - compute.backendBuckets.use - // - compute.backendServices.use - DefaultService string `json:"defaultService,omitempty"` +// Operation: An Operation resource, used to manage asynchronous API +// requests. (== resource_for v1.globalOperations ==) (== resource_for +// beta.globalOperations ==) (== resource_for v1.regionOperations ==) +// (== resource_for beta.regionOperations ==) (== resource_for +// v1.zoneOperations ==) (== resource_for beta.zoneOperations ==) +type Operation struct { + // ClientOperationId: [Output Only] The value of `requestId` if you + // provided it in the request. Not present otherwise. + ClientOperationId string `json:"clientOperationId,omitempty"` - // Description: An optional description of this resource. Provide this - // property when you create the resource. + // CreationTimestamp: [Deprecated] This field is deprecated. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: [Output Only] A textual description of the operation, + // which is set when the operation is created. Description string `json:"description,omitempty"` - // Name: The name to which this PathMatcher is referred by the HostRule. + // EndTime: [Output Only] The time that this operation was completed. + // This value is in RFC3339 text format. + EndTime string `json:"endTime,omitempty"` + + // Error: [Output Only] If errors are generated during processing of the + // operation, this field will be populated. + Error *OperationError `json:"error,omitempty"` + + // HttpErrorMessage: [Output Only] If the operation fails, this field + // contains the HTTP error message that was returned, such as NOT FOUND. + HttpErrorMessage string `json:"httpErrorMessage,omitempty"` + + // HttpErrorStatusCode: [Output Only] If the operation fails, this field + // contains the HTTP error status code that was returned. For example, a + // 404 means the resource was not found. + HttpErrorStatusCode int64 `json:"httpErrorStatusCode,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // InsertTime: [Output Only] The time that this operation was requested. + // This value is in RFC3339 text format. + InsertTime string `json:"insertTime,omitempty"` + + // Kind: [Output Only] Type of the resource. Always compute#operation + // for Operation resources. + Kind string `json:"kind,omitempty"` + + // Name: [Output Only] Name of the resource. Name string `json:"name,omitempty"` - // PathRules: The list of path rules. Use this list instead of - // routeRules when routing based on simple path matching is all that's - // required. The order by which path rules are specified does not - // matter. Matches are always done on the longest-path-first basis. - // For example: a pathRule with a path /a/b/c/* will match before /a/b/* - // irrespective of the order in which those paths appear in this - // list. - // Only one of pathRules or routeRules must be set. - PathRules []*PathRule `json:"pathRules,omitempty"` + // OperationType: [Output Only] The type of operation, such as insert, + // update, or delete, and so on. + OperationType string `json:"operationType,omitempty"` - // ForceSendFields is a list of field names (e.g. "DefaultService") to - // unconditionally include in API requests. By default, fields with + // Progress: [Output Only] An optional progress indicator that ranges + // from 0 to 100. There is no requirement that this be linear or support + // any granularity of operations. This should not be used to guess when + // the operation will be complete. This number should monotonically + // increase as the operation progresses. + Progress int64 `json:"progress,omitempty"` + + // Region: [Output Only] The URL of the region where the operation + // resides. Only available when performing regional operations. You must + // specify this field as part of the HTTP request URL. It is not + // settable as a field in the request body. + Region string `json:"region,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // StartTime: [Output Only] The time that this operation was started by + // the server. This value is in RFC3339 text format. + StartTime string `json:"startTime,omitempty"` + + // Status: [Output Only] The status of the operation, which can be one + // of the following: PENDING, RUNNING, or DONE. + // + // Possible values: + // "DONE" + // "PENDING" + // "RUNNING" + Status string `json:"status,omitempty"` + + // StatusMessage: [Output Only] An optional textual description of the + // current status of the operation. + StatusMessage string `json:"statusMessage,omitempty"` + + // TargetId: [Output Only] The unique target ID, which identifies a + // specific incarnation of the target resource. + TargetId uint64 `json:"targetId,omitempty,string"` + + // TargetLink: [Output Only] The URL of the resource that the operation + // modifies. For operations related to creating a snapshot, this points + // to the persistent disk that the snapshot was created from. + TargetLink string `json:"targetLink,omitempty"` + + // User: [Output Only] User who requested the operation, for example: + // user@example.com. + User string `json:"user,omitempty"` + + // Warnings: [Output Only] If warning messages are generated during + // processing of the operation, this field will be populated. + Warnings []*OperationWarnings `json:"warnings,omitempty"` + + // Zone: [Output Only] The URL of the zone where the operation resides. + // Only available when performing per-zone operations. You must specify + // this field as part of the HTTP request URL. It is not settable as a + // field in the request body. + Zone string `json:"zone,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "ClientOperationId") + // to unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "DefaultService") to + // NullFields is a list of field names (e.g. "ClientOperationId") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -18630,30 +19642,20 @@ type PathMatcher struct { NullFields []string `json:"-"` } -func (s *PathMatcher) MarshalJSON() ([]byte, error) { - type NoMethod PathMatcher +func (s *Operation) MarshalJSON() ([]byte, error) { + type NoMethod Operation raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// PathRule: A path-matching rule for a URL. If matched, will use the -// specified BackendService to handle the traffic arriving at this URL. -type PathRule struct { - // Paths: The list of path patterns to match. Each must start with / and - // the only place a * is allowed is at the end following a /. The string - // fed to the path matcher does not include any text after the first ? - // or #, and those chars are not allowed here. - Paths []string `json:"paths,omitempty"` - - // Service: The URL of the backend service resource if this rule is - // matched. - // Use service instead of routeAction when simple routing to a backend - // service is desired and other advanced capabilities like traffic - // splitting and rewrites are not required. - // Only one of service, routeAction or urlRedirect should must be set. - Service string `json:"service,omitempty"` +// OperationError: [Output Only] If errors are generated during +// processing of the operation, this field will be populated. +type OperationError struct { + // Errors: [Output Only] The array of errors encountered while + // processing this operation. + Errors []*OperationErrorErrors `json:"errors,omitempty"` - // ForceSendFields is a list of field names (e.g. "Paths") to + // ForceSendFields is a list of field names (e.g. "Errors") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -18661,7 +19663,7 @@ type PathRule struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Paths") to include in API + // NullFields is a list of field names (e.g. "Errors") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -18670,86 +19672,24 @@ type PathRule struct { NullFields []string `json:"-"` } -func (s *PathRule) MarshalJSON() ([]byte, error) { - type NoMethod PathRule +func (s *OperationError) MarshalJSON() ([]byte, error) { + type NoMethod OperationError raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Policy: Defines an Identity and Access Management (IAM) policy. It is -// used to specify access control policies for Cloud Platform -// resources. -// -// -// -// A `Policy` consists of a list of `bindings`. A `binding` binds a list -// of `members` to a `role`, where the members can be user accounts, -// Google groups, Google domains, and service accounts. A `role` is a -// named list of permissions defined by IAM. -// -// **JSON Example** -// -// { "bindings": [ { "role": "roles/owner", "members": [ -// "user:mike@example.com", "group:admins@example.com", -// "domain:google.com", -// "serviceAccount:my-other-app@appspot.gserviceaccount.com" ] }, { -// "role": "roles/viewer", "members": ["user:sean@example.com"] } ] -// } -// -// **YAML Example** -// -// bindings: - members: - user:mike@example.com - -// group:admins@example.com - domain:google.com - -// serviceAccount:my-other-app@appspot.gserviceaccount.com role: -// roles/owner - members: - user:sean@example.com role: -// roles/viewer -// -// -// -// For a description of IAM and its features, see the [IAM developer's -// guide](https://cloud.google.com/iam/docs). -type Policy struct { - // AuditConfigs: Specifies cloud audit logging configuration for this - // policy. - AuditConfigs []*AuditConfig `json:"auditConfigs,omitempty"` - - // Bindings: Associates a list of `members` to a `role`. `bindings` with - // no members will result in an error. - Bindings []*Binding `json:"bindings,omitempty"` - - // Etag: `etag` is used for optimistic concurrency control as a way to - // help prevent simultaneous updates of a policy from overwriting each - // other. It is strongly suggested that systems make use of the `etag` - // in the read-modify-write cycle to perform policy updates in order to - // avoid race conditions: An `etag` is returned in the response to - // `getIamPolicy`, and systems are expected to put that etag in the - // request to `setIamPolicy` to ensure that their change will be applied - // to the same version of the policy. - // - // If no `etag` is provided in the call to `setIamPolicy`, then the - // existing policy is overwritten blindly. - Etag string `json:"etag,omitempty"` - - IamOwned bool `json:"iamOwned,omitempty"` - - // Rules: If more than one rule is specified, the rules are applied in - // the following manner: - All matching LOG rules are always applied. - - // If any DENY/DENY_WITH_LOG rule matches, permission is denied. Logging - // will be applied if one or more matching rule requires logging. - - // Otherwise, if any ALLOW/ALLOW_WITH_LOG rule matches, permission is - // granted. Logging will be applied if one or more matching rule - // requires logging. - Otherwise, if no rule applies, permission is - // denied. - Rules []*Rule `json:"rules,omitempty"` +type OperationErrorErrors struct { + // Code: [Output Only] The error type identifier for this error. + Code string `json:"code,omitempty"` - // Version: Deprecated. - Version int64 `json:"version,omitempty"` + // Location: [Output Only] Indicates the field in the request that + // caused the error. This property is optional. + Location string `json:"location,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Message: [Output Only] An optional, human-readable error message. + Message string `json:"message,omitempty"` - // ForceSendFields is a list of field names (e.g. "AuditConfigs") to + // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -18757,360 +19697,70 @@ type Policy struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AuditConfigs") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *Policy) MarshalJSON() ([]byte, error) { - type NoMethod Policy +func (s *OperationErrorErrors) MarshalJSON() ([]byte, error) { + type NoMethod OperationErrorErrors raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Project: A Project resource. For an overview of projects, see Cloud -// Platform Resource Hierarchy. (== resource_for v1.projects ==) (== -// resource_for beta.projects ==) -type Project struct { - // CommonInstanceMetadata: Metadata key/value pairs available to all - // instances contained in this project. See Custom metadata for more - // information. - CommonInstanceMetadata *Metadata `json:"commonInstanceMetadata,omitempty"` - - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // DefaultNetworkTier: This signifies the default network tier used for - // configuring resources of the project and can only take the following - // values: PREMIUM, STANDARD. Initially the default network tier is - // PREMIUM. +type OperationWarnings struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. // // Possible values: - // "PREMIUM" - // "STANDARD" - DefaultNetworkTier string `json:"defaultNetworkTier,omitempty"` - - // DefaultServiceAccount: [Output Only] Default service account used by - // VMs running in this project. - DefaultServiceAccount string `json:"defaultServiceAccount,omitempty"` + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` - // Description: An optional textual description of the resource. - Description string `json:"description,omitempty"` + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*OperationWarningsData `json:"data,omitempty"` - // EnabledFeatures: Restricted features enabled for use on this project. - EnabledFeatures []string `json:"enabledFeatures,omitempty"` + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. This is not the project ID, and - // is just a unique ID used by Compute Engine to identify resources. - Id uint64 `json:"id,omitempty,string"` + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // Kind: [Output Only] Type of the resource. Always compute#project for - // projects. - Kind string `json:"kind,omitempty"` - - // Name: The project ID. For example: my-example-project. Use the - // project ID to make requests to Compute Engine. - Name string `json:"name,omitempty"` - - // Quotas: [Output Only] Quotas assigned to this project. - Quotas []*Quota `json:"quotas,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // UsageExportLocation: The naming prefix for daily usage reports and - // the Google Cloud Storage bucket where they are stored. - UsageExportLocation *UsageExportLocation `json:"usageExportLocation,omitempty"` - - // XpnProjectStatus: [Output Only] The role this project has in a shared - // VPC configuration. Currently only HOST projects are differentiated. - // - // Possible values: - // "HOST" - // "UNSPECIFIED_XPN_PROJECT_STATUS" - XpnProjectStatus string `json:"xpnProjectStatus,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. - // "CommonInstanceMetadata") to unconditionally include in API requests. - // By default, fields with empty values are omitted from API requests. - // However, any non-pointer, non-interface field appearing in - // ForceSendFields will be sent to the server regardless of whether the - // field is empty or not. This may be used to include empty fields in - // Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CommonInstanceMetadata") - // to include in API requests with the JSON null value. By default, - // fields with empty values are omitted from API requests. However, any - // field with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *Project) MarshalJSON() ([]byte, error) { - type NoMethod Project - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type ProjectsDisableXpnResourceRequest struct { - // XpnResource: Service resource (a.k.a service project) ID. - XpnResource *XpnResourceId `json:"xpnResource,omitempty"` - - // ForceSendFields is a list of field names (e.g. "XpnResource") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "XpnResource") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ProjectsDisableXpnResourceRequest) MarshalJSON() ([]byte, error) { - type NoMethod ProjectsDisableXpnResourceRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type ProjectsEnableXpnResourceRequest struct { - // XpnResource: Service resource (a.k.a service project) ID. - XpnResource *XpnResourceId `json:"xpnResource,omitempty"` - - // ForceSendFields is a list of field names (e.g. "XpnResource") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "XpnResource") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ProjectsEnableXpnResourceRequest) MarshalJSON() ([]byte, error) { - type NoMethod ProjectsEnableXpnResourceRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type ProjectsGetXpnResources struct { - // Kind: [Output Only] Type of resource. Always - // compute#projectsGetXpnResources for lists of service resources (a.k.a - // service projects) - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // Resources: Service resources (a.k.a service projects) attached to - // this project as their shared VPC host. - Resources []*XpnResourceId `json:"resources,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Kind") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Kind") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ProjectsGetXpnResources) MarshalJSON() ([]byte, error) { - type NoMethod ProjectsGetXpnResources - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type ProjectsListXpnHostsRequest struct { - // Organization: Optional organization ID managed by Cloud Resource - // Manager, for which to list shared VPC host projects. If not - // specified, the organization will be inferred from the project. - Organization string `json:"organization,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Organization") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Organization") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ProjectsListXpnHostsRequest) MarshalJSON() ([]byte, error) { - type NoMethod ProjectsListXpnHostsRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type ProjectsSetDefaultNetworkTierRequest struct { - // NetworkTier: Default network tier to be set. - // - // Possible values: - // "PREMIUM" - // "STANDARD" - NetworkTier string `json:"networkTier,omitempty"` - - // ForceSendFields is a list of field names (e.g. "NetworkTier") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "NetworkTier") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ProjectsSetDefaultNetworkTierRequest) MarshalJSON() ([]byte, error) { - type NoMethod ProjectsSetDefaultNetworkTierRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Quota: A quotas entry. -type Quota struct { - // Limit: [Output Only] Quota limit for this metric. - Limit float64 `json:"limit,omitempty"` - - // Metric: [Output Only] Name of the quota metric. - // - // Possible values: - // "AUTOSCALERS" - // "BACKEND_BUCKETS" - // "BACKEND_SERVICES" - // "COMMITMENTS" - // "CPUS" - // "CPUS_ALL_REGIONS" - // "DISKS_TOTAL_GB" - // "FIREWALLS" - // "FORWARDING_RULES" - // "GLOBAL_INTERNAL_ADDRESSES" - // "GPUS_ALL_REGIONS" - // "HEALTH_CHECKS" - // "IMAGES" - // "INSTANCES" - // "INSTANCE_GROUPS" - // "INSTANCE_GROUP_MANAGERS" - // "INSTANCE_TEMPLATES" - // "INTERCONNECTS" - // "INTERCONNECT_ATTACHMENTS_PER_REGION" - // "INTERCONNECT_ATTACHMENTS_TOTAL_MBPS" - // "INTERNAL_ADDRESSES" - // "IN_USE_ADDRESSES" - // "IN_USE_BACKUP_SCHEDULES" - // "LOCAL_SSD_TOTAL_GB" - // "NETWORKS" - // "NVIDIA_K80_GPUS" - // "NVIDIA_P100_GPUS" - // "NVIDIA_P100_VWS_GPUS" - // "NVIDIA_P4_GPUS" - // "NVIDIA_P4_VWS_GPUS" - // "NVIDIA_T4_GPUS" - // "NVIDIA_T4_VWS_GPUS" - // "NVIDIA_V100_GPUS" - // "PREEMPTIBLE_CPUS" - // "PREEMPTIBLE_LOCAL_SSD_GB" - // "PREEMPTIBLE_NVIDIA_K80_GPUS" - // "PREEMPTIBLE_NVIDIA_P100_GPUS" - // "PREEMPTIBLE_NVIDIA_P100_VWS_GPUS" - // "PREEMPTIBLE_NVIDIA_P4_GPUS" - // "PREEMPTIBLE_NVIDIA_P4_VWS_GPUS" - // "PREEMPTIBLE_NVIDIA_T4_GPUS" - // "PREEMPTIBLE_NVIDIA_T4_VWS_GPUS" - // "PREEMPTIBLE_NVIDIA_V100_GPUS" - // "REGIONAL_AUTOSCALERS" - // "REGIONAL_INSTANCE_GROUP_MANAGERS" - // "RESOURCE_POLICIES" - // "ROUTERS" - // "ROUTES" - // "SECURITY_POLICIES" - // "SECURITY_POLICY_RULES" - // "SNAPSHOTS" - // "SSD_TOTAL_GB" - // "SSL_CERTIFICATES" - // "STATIC_ADDRESSES" - // "SUBNETWORKS" - // "TARGET_HTTPS_PROXIES" - // "TARGET_HTTP_PROXIES" - // "TARGET_INSTANCES" - // "TARGET_POOLS" - // "TARGET_SSL_PROXIES" - // "TARGET_TCP_PROXIES" - // "TARGET_VPN_GATEWAYS" - // "URL_MAPS" - // "VPN_GATEWAYS" - // "VPN_TUNNELS" - Metric string `json:"metric,omitempty"` - - // Usage: [Output Only] Current usage of this metric. - Usage float64 `json:"usage,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Limit") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Limit") to include in API + // NullFields is a list of field names (e.g. "Code") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -19119,46 +19769,27 @@ type Quota struct { NullFields []string `json:"-"` } -func (s *Quota) MarshalJSON() ([]byte, error) { - type NoMethod Quota +func (s *OperationWarnings) MarshalJSON() ([]byte, error) { + type NoMethod OperationWarnings raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -func (s *Quota) UnmarshalJSON(data []byte) error { - type NoMethod Quota - var s1 struct { - Limit gensupport.JSONFloat64 `json:"limit"` - Usage gensupport.JSONFloat64 `json:"usage"` - *NoMethod - } - s1.NoMethod = (*NoMethod)(s) - if err := json.Unmarshal(data, &s1); err != nil { - return err - } - s.Limit = float64(s1.Limit) - s.Usage = float64(s1.Usage) - return nil -} - -// Reference: Represents a reference to a resource. -type Reference struct { - // Kind: [Output Only] Type of the resource. Always compute#reference - // for references. - Kind string `json:"kind,omitempty"` - - // ReferenceType: A description of the reference type with no implied - // semantics. Possible values include: - // - MEMBER_OF - ReferenceType string `json:"referenceType,omitempty"` - - // Referrer: URL of the resource which refers to the target. - Referrer string `json:"referrer,omitempty"` +type OperationWarningsData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` - // Target: URL of the resource to which this reference points. - Target string `json:"target,omitempty"` + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` - // ForceSendFields is a list of field names (e.g. "Kind") to + // ForceSendFields is a list of field names (e.g. "Key") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -19166,7 +19797,7 @@ type Reference struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Kind") to include in API + // NullFields is a list of field names (e.g. "Key") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -19175,92 +19806,22 @@ type Reference struct { NullFields []string `json:"-"` } -func (s *Reference) MarshalJSON() ([]byte, error) { - type NoMethod Reference +func (s *OperationWarningsData) MarshalJSON() ([]byte, error) { + type NoMethod OperationWarningsData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Region: Region resource. (== resource_for beta.regions ==) (== -// resource_for v1.regions ==) -type Region struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Deprecated: [Output Only] The deprecation status associated with this - // region. - Deprecated *DeprecationStatus `json:"deprecated,omitempty"` - - // Description: [Output Only] Textual description of the resource. - Description string `json:"description,omitempty"` - +type OperationAggregatedList struct { // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] Type of the resource. Always compute#region for - // regions. - Kind string `json:"kind,omitempty"` - - // Name: [Output Only] Name of the resource. - Name string `json:"name,omitempty"` - - // Quotas: [Output Only] Quotas assigned to this region. - Quotas []*Quota `json:"quotas,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // Status: [Output Only] Status of the region, either UP or DOWN. - // - // Possible values: - // "DOWN" - // "UP" - Status string `json:"status,omitempty"` - - // Zones: [Output Only] A list of zones available in this region, in the - // form of resource URLs. - Zones []string `json:"zones,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *Region) MarshalJSON() ([]byte, error) { - type NoMethod Region - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// RegionAutoscalerList: Contains a list of autoscalers. -type RegionAutoscalerList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. Id string `json:"id,omitempty"` - // Items: A list of Autoscaler resources. - Items []*Autoscaler `json:"items,omitempty"` + // Items: [Output Only] A map of scoped operation lists. + Items map[string]OperationsScopedList `json:"items,omitempty"` - // Kind: Type of resource. + // Kind: [Output Only] Type of resource. Always + // compute#operationAggregatedList for aggregated lists of operations. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -19275,7 +19836,7 @@ type RegionAutoscalerList struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *RegionAutoscalerListWarning `json:"warning,omitempty"` + Warning *OperationAggregatedListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -19298,15 +19859,15 @@ type RegionAutoscalerList struct { NullFields []string `json:"-"` } -func (s *RegionAutoscalerList) MarshalJSON() ([]byte, error) { - type NoMethod RegionAutoscalerList +func (s *OperationAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod OperationAggregatedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RegionAutoscalerListWarning: [Output Only] Informational warning +// OperationAggregatedListWarning: [Output Only] Informational warning // message. -type RegionAutoscalerListWarning struct { +type OperationAggregatedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -19340,7 +19901,7 @@ type RegionAutoscalerListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*RegionAutoscalerListWarningData `json:"data,omitempty"` + Data []*OperationAggregatedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -19363,13 +19924,13 @@ type RegionAutoscalerListWarning struct { NullFields []string `json:"-"` } -func (s *RegionAutoscalerListWarning) MarshalJSON() ([]byte, error) { - type NoMethod RegionAutoscalerListWarning +func (s *OperationAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod OperationAggregatedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RegionAutoscalerListWarningData struct { +type OperationAggregatedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -19400,22 +19961,23 @@ type RegionAutoscalerListWarningData struct { NullFields []string `json:"-"` } -func (s *RegionAutoscalerListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod RegionAutoscalerListWarningData +func (s *OperationAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod OperationAggregatedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RegionDiskTypeList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. +// OperationList: Contains a list of Operation resources. +type OperationList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. Id string `json:"id,omitempty"` - // Items: A list of DiskType resources. - Items []*DiskType `json:"items,omitempty"` + // Items: [Output Only] A list of Operation resources. + Items []*Operation `json:"items,omitempty"` - // Kind: [Output Only] Type of resource. Always - // compute#regionDiskTypeList for region disk types. + // Kind: [Output Only] Type of resource. Always compute#operations for + // Operations resource. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -19430,7 +19992,7 @@ type RegionDiskTypeList struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *RegionDiskTypeListWarning `json:"warning,omitempty"` + Warning *OperationListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -19453,15 +20015,14 @@ type RegionDiskTypeList struct { NullFields []string `json:"-"` } -func (s *RegionDiskTypeList) MarshalJSON() ([]byte, error) { - type NoMethod RegionDiskTypeList +func (s *OperationList) MarshalJSON() ([]byte, error) { + type NoMethod OperationList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RegionDiskTypeListWarning: [Output Only] Informational warning -// message. -type RegionDiskTypeListWarning struct { +// OperationListWarning: [Output Only] Informational warning message. +type OperationListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -19495,7 +20056,7 @@ type RegionDiskTypeListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*RegionDiskTypeListWarningData `json:"data,omitempty"` + Data []*OperationListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -19518,13 +20079,13 @@ type RegionDiskTypeListWarning struct { NullFields []string `json:"-"` } -func (s *RegionDiskTypeListWarning) MarshalJSON() ([]byte, error) { - type NoMethod RegionDiskTypeListWarning +func (s *OperationListWarning) MarshalJSON() ([]byte, error) { + type NoMethod OperationListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RegionDiskTypeListWarningData struct { +type OperationListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -19555,18 +20116,22 @@ type RegionDiskTypeListWarningData struct { NullFields []string `json:"-"` } -func (s *RegionDiskTypeListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod RegionDiskTypeListWarningData +func (s *OperationListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod OperationListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RegionDisksResizeRequest struct { - // SizeGb: The new size of the regional persistent disk, which is - // specified in GB. - SizeGb int64 `json:"sizeGb,omitempty,string"` +type OperationsScopedList struct { + // Operations: [Output Only] A list of operations contained in this + // scope. + Operations []*Operation `json:"operations,omitempty"` - // ForceSendFields is a list of field names (e.g. "SizeGb") to + // Warning: [Output Only] Informational warning which replaces the list + // of operations when the list is empty. + Warning *OperationsScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Operations") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -19574,61 +20139,8 @@ type RegionDisksResizeRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "SizeGb") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *RegionDisksResizeRequest) MarshalJSON() ([]byte, error) { - type NoMethod RegionDisksResizeRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// RegionInstanceGroupList: Contains a list of InstanceGroup resources. -type RegionInstanceGroupList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of InstanceGroup resources. - Items []*InstanceGroup `json:"items,omitempty"` - - // Kind: The resource type. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // Warning: [Output Only] Informational warning message. - Warning *RegionInstanceGroupListWarning `json:"warning,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Operations") to include in + // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -19636,15 +20148,15 @@ type RegionInstanceGroupList struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupList) MarshalJSON() ([]byte, error) { - type NoMethod RegionInstanceGroupList +func (s *OperationsScopedList) MarshalJSON() ([]byte, error) { + type NoMethod OperationsScopedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RegionInstanceGroupListWarning: [Output Only] Informational warning -// message. -type RegionInstanceGroupListWarning struct { +// OperationsScopedListWarning: [Output Only] Informational warning +// which replaces the list of operations when the list is empty. +type OperationsScopedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -19678,7 +20190,7 @@ type RegionInstanceGroupListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*RegionInstanceGroupListWarningData `json:"data,omitempty"` + Data []*OperationsScopedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -19701,13 +20213,13 @@ type RegionInstanceGroupListWarning struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupListWarning) MarshalJSON() ([]byte, error) { - type NoMethod RegionInstanceGroupListWarning +func (s *OperationsScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod OperationsScopedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RegionInstanceGroupListWarningData struct { +type OperationsScopedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -19738,46 +20250,57 @@ type RegionInstanceGroupListWarningData struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod RegionInstanceGroupListWarningData +func (s *OperationsScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod OperationsScopedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RegionInstanceGroupManagerList: Contains a list of managed instance -// groups. -type RegionInstanceGroupManagerList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of InstanceGroupManager resources. - Items []*InstanceGroupManager `json:"items,omitempty"` - - // Kind: [Output Only] The resource type, which is always - // compute#instanceGroupManagerList for a list of managed instance - // groups that exist in th regional scope. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` +// PathMatcher: A matcher for the path portion of the URL. The +// BackendService from the longest-matched rule will serve the URL. If +// no rule was matched, the default service will be used. +type PathMatcher struct { + // DefaultService: The full or partial URL to the BackendService + // resource. This will be used if none of the pathRules or routeRules + // defined by this PathMatcher are matched. For example, the following + // are all valid URLs to a BackendService resource: + // - + // https://www.googleapis.com/compute/v1/projects/project/global/backendServices/backendService + // - compute/v1/projects/project/global/backendServices/backendService + // + // - global/backendServices/backendService If defaultRouteAction is + // additionally specified, advanced routing actions like URL Rewrites, + // etc. take effect prior to sending the request to the backend. + // However, if defaultService is specified, defaultRouteAction cannot + // contain any weightedBackendServices. Conversely, if + // defaultRouteAction specifies any weightedBackendServices, + // defaultService must not be specified. + // Only one of defaultService, defaultUrlRedirect or + // defaultRouteAction.weightedBackendService must be set. + // Authorization requires one or more of the following Google IAM + // permissions on the specified resource default_service: + // - compute.backendBuckets.use + // - compute.backendServices.use + DefaultService string `json:"defaultService,omitempty"` - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` - // Warning: [Output Only] Informational warning message. - Warning *RegionInstanceGroupManagerListWarning `json:"warning,omitempty"` + // Name: The name to which this PathMatcher is referred by the HostRule. + Name string `json:"name,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // PathRules: The list of path rules. Use this list instead of + // routeRules when routing based on simple path matching is all that's + // required. The order by which path rules are specified does not + // matter. Matches are always done on the longest-path-first basis. + // For example: a pathRule with a path /a/b/c/* will match before /a/b/* + // irrespective of the order in which those paths appear in this + // list. + // Only one of pathRules or routeRules must be set. + PathRules []*PathRule `json:"pathRules,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "DefaultService") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -19785,64 +20308,43 @@ type RegionInstanceGroupManagerList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "DefaultService") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *RegionInstanceGroupManagerList) MarshalJSON() ([]byte, error) { - type NoMethod RegionInstanceGroupManagerList +func (s *PathMatcher) MarshalJSON() ([]byte, error) { + type NoMethod PathMatcher raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RegionInstanceGroupManagerListWarning: [Output Only] Informational -// warning message. -type RegionInstanceGroupManagerListWarning struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. - // - // Possible values: - // "CLEANUP_FAILED" - // "DEPRECATED_RESOURCE_USED" - // "DEPRECATED_TYPE_USED" - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - // "EXPERIMENTAL_TYPE_USED" - // "EXTERNAL_API_WARNING" - // "FIELD_VALUE_OVERRIDEN" - // "INJECTED_KERNELS_DEPRECATED" - // "MISSING_TYPE_DEPENDENCY" - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - // "NEXT_HOP_CANNOT_IP_FORWARD" - // "NEXT_HOP_INSTANCE_NOT_FOUND" - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - // "NEXT_HOP_NOT_RUNNING" - // "NOT_CRITICAL_ERROR" - // "NO_RESULTS_ON_PAGE" - // "REQUIRED_TOS_AGREEMENT" - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - // "RESOURCE_NOT_DELETED" - // "SCHEMA_VALIDATION_IGNORED" - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - // "UNDECLARED_PROPERTIES" - // "UNREACHABLE" - Code string `json:"code,omitempty"` - - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: - // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*RegionInstanceGroupManagerListWarningData `json:"data,omitempty"` +// PathRule: A path-matching rule for a URL. If matched, will use the +// specified BackendService to handle the traffic arriving at this URL. +type PathRule struct { + // Paths: The list of path patterns to match. Each must start with / and + // the only place a * is allowed is at the end following a /. The string + // fed to the path matcher does not include any text after the first ? + // or #, and those chars are not allowed here. + Paths []string `json:"paths,omitempty"` - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` + // Service: The full or partial URL of the backend service resource to + // which traffic is directed if this rule is matched. If routeAction is + // additionally specified, advanced routing actions like URL Rewrites, + // etc. take effect prior to sending the request to the backend. + // However, if service is specified, routeAction cannot contain any + // weightedBackendService s. Conversely, if routeAction specifies any + // weightedBackendServices, service must not be specified. + // Only one of urlRedirect, service or + // routeAction.weightedBackendService must be set. + Service string `json:"service,omitempty"` - // ForceSendFields is a list of field names (e.g. "Code") to + // ForceSendFields is a list of field names (e.g. "Paths") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -19850,7 +20352,7 @@ type RegionInstanceGroupManagerListWarning struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Code") to include in API + // NullFields is a list of field names (e.g. "Paths") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -19859,27 +20361,86 @@ type RegionInstanceGroupManagerListWarning struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupManagerListWarning) MarshalJSON() ([]byte, error) { - type NoMethod RegionInstanceGroupManagerListWarning +func (s *PathRule) MarshalJSON() ([]byte, error) { + type NoMethod PathRule raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RegionInstanceGroupManagerListWarningData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` +// Policy: Defines an Identity and Access Management (IAM) policy. It is +// used to specify access control policies for Cloud Platform +// resources. +// +// +// +// A `Policy` consists of a list of `bindings`. A `binding` binds a list +// of `members` to a `role`, where the members can be user accounts, +// Google groups, Google domains, and service accounts. A `role` is a +// named list of permissions defined by IAM. +// +// **JSON Example** +// +// { "bindings": [ { "role": "roles/owner", "members": [ +// "user:mike@example.com", "group:admins@example.com", +// "domain:google.com", +// "serviceAccount:my-other-app@appspot.gserviceaccount.com" ] }, { +// "role": "roles/viewer", "members": ["user:sean@example.com"] } ] +// } +// +// **YAML Example** +// +// bindings: - members: - user:mike@example.com - +// group:admins@example.com - domain:google.com - +// serviceAccount:my-other-app@appspot.gserviceaccount.com role: +// roles/owner - members: - user:sean@example.com role: +// roles/viewer +// +// +// +// For a description of IAM and its features, see the [IAM developer's +// guide](https://cloud.google.com/iam/docs). +type Policy struct { + // AuditConfigs: Specifies cloud audit logging configuration for this + // policy. + AuditConfigs []*AuditConfig `json:"auditConfigs,omitempty"` - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` + // Bindings: Associates a list of `members` to a `role`. `bindings` with + // no members will result in an error. + Bindings []*Binding `json:"bindings,omitempty"` - // ForceSendFields is a list of field names (e.g. "Key") to + // Etag: `etag` is used for optimistic concurrency control as a way to + // help prevent simultaneous updates of a policy from overwriting each + // other. It is strongly suggested that systems make use of the `etag` + // in the read-modify-write cycle to perform policy updates in order to + // avoid race conditions: An `etag` is returned in the response to + // `getIamPolicy`, and systems are expected to put that etag in the + // request to `setIamPolicy` to ensure that their change will be applied + // to the same version of the policy. + // + // If no `etag` is provided in the call to `setIamPolicy`, then the + // existing policy is overwritten blindly. + Etag string `json:"etag,omitempty"` + + IamOwned bool `json:"iamOwned,omitempty"` + + // Rules: If more than one rule is specified, the rules are applied in + // the following manner: - All matching LOG rules are always applied. - + // If any DENY/DENY_WITH_LOG rule matches, permission is denied. Logging + // will be applied if one or more matching rule requires logging. - + // Otherwise, if any ALLOW/ALLOW_WITH_LOG rule matches, permission is + // granted. Logging will be applied if one or more matching rule + // requires logging. - Otherwise, if no rule applies, permission is + // denied. + Rules []*Rule `json:"rules,omitempty"` + + // Version: Deprecated. + Version int64 `json:"version,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "AuditConfigs") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -19887,28 +20448,119 @@ type RegionInstanceGroupManagerListWarningData struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Key") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "AuditConfigs") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *RegionInstanceGroupManagerListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod RegionInstanceGroupManagerListWarningData +func (s *Policy) MarshalJSON() ([]byte, error) { + type NoMethod Policy raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RegionInstanceGroupManagersAbandonInstancesRequest struct { - // Instances: The URLs of one or more instances to abandon. This can be - // a full URL or a partial URL, such as - // zones/[ZONE]/instances/[INSTANCE_NAME]. - Instances []string `json:"instances,omitempty"` +// Project: A Project resource. For an overview of projects, see Cloud +// Platform Resource Hierarchy. (== resource_for v1.projects ==) (== +// resource_for beta.projects ==) +type Project struct { + // CommonInstanceMetadata: Metadata key/value pairs available to all + // instances contained in this project. See Custom metadata for more + // information. + CommonInstanceMetadata *Metadata `json:"commonInstanceMetadata,omitempty"` - // ForceSendFields is a list of field names (e.g. "Instances") to + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // DefaultNetworkTier: This signifies the default network tier used for + // configuring resources of the project and can only take the following + // values: PREMIUM, STANDARD. Initially the default network tier is + // PREMIUM. + // + // Possible values: + // "PREMIUM" + // "STANDARD" + DefaultNetworkTier string `json:"defaultNetworkTier,omitempty"` + + // DefaultServiceAccount: [Output Only] Default service account used by + // VMs running in this project. + DefaultServiceAccount string `json:"defaultServiceAccount,omitempty"` + + // Description: An optional textual description of the resource. + Description string `json:"description,omitempty"` + + // EnabledFeatures: Restricted features enabled for use on this project. + EnabledFeatures []string `json:"enabledFeatures,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. This is not the project ID, and + // is just a unique ID used by Compute Engine to identify resources. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always compute#project for + // projects. + Kind string `json:"kind,omitempty"` + + // Name: The project ID. For example: my-example-project. Use the + // project ID to make requests to Compute Engine. + Name string `json:"name,omitempty"` + + // Quotas: [Output Only] Quotas assigned to this project. + Quotas []*Quota `json:"quotas,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // UsageExportLocation: The naming prefix for daily usage reports and + // the Google Cloud Storage bucket where they are stored. + UsageExportLocation *UsageExportLocation `json:"usageExportLocation,omitempty"` + + // XpnProjectStatus: [Output Only] The role this project has in a shared + // VPC configuration. Currently only HOST projects are differentiated. + // + // Possible values: + // "HOST" + // "UNSPECIFIED_XPN_PROJECT_STATUS" + XpnProjectStatus string `json:"xpnProjectStatus,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. + // "CommonInstanceMetadata") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CommonInstanceMetadata") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *Project) MarshalJSON() ([]byte, error) { + type NoMethod Project + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ProjectsDisableXpnResourceRequest struct { + // XpnResource: Service resource (a.k.a service project) ID. + XpnResource *XpnResourceId `json:"xpnResource,omitempty"` + + // ForceSendFields is a list of field names (e.g. "XpnResource") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -19916,28 +20568,26 @@ type RegionInstanceGroupManagersAbandonInstancesRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Instances") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "XpnResource") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *RegionInstanceGroupManagersAbandonInstancesRequest) MarshalJSON() ([]byte, error) { - type NoMethod RegionInstanceGroupManagersAbandonInstancesRequest +func (s *ProjectsDisableXpnResourceRequest) MarshalJSON() ([]byte, error) { + type NoMethod ProjectsDisableXpnResourceRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RegionInstanceGroupManagersDeleteInstancesRequest struct { - // Instances: The URLs of one or more instances to delete. This can be a - // full URL or a partial URL, such as - // zones/[ZONE]/instances/[INSTANCE_NAME]. - Instances []string `json:"instances,omitempty"` +type ProjectsEnableXpnResourceRequest struct { + // XpnResource: Service resource (a.k.a service project) ID. + XpnResource *XpnResourceId `json:"xpnResource,omitempty"` - // ForceSendFields is a list of field names (e.g. "Instances") to + // ForceSendFields is a list of field names (e.g. "XpnResource") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -19945,30 +20595,44 @@ type RegionInstanceGroupManagersDeleteInstancesRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Instances") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "XpnResource") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *RegionInstanceGroupManagersDeleteInstancesRequest) MarshalJSON() ([]byte, error) { - type NoMethod RegionInstanceGroupManagersDeleteInstancesRequest +func (s *ProjectsEnableXpnResourceRequest) MarshalJSON() ([]byte, error) { + type NoMethod ProjectsEnableXpnResourceRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RegionInstanceGroupManagersListInstancesResponse struct { - // ManagedInstances: A list of managed instances. - ManagedInstances []*ManagedInstance `json:"managedInstances,omitempty"` +type ProjectsGetXpnResources struct { + // Kind: [Output Only] Type of resource. Always + // compute#projectsGetXpnResources for lists of service resources (a.k.a + // service projects) + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // Resources: Service resources (a.k.a service projects) attached to + // this project as their shared VPC host. + Resources []*XpnResourceId `json:"resources,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "ManagedInstances") to + // ForceSendFields is a list of field names (e.g. "Kind") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -19976,29 +20640,28 @@ type RegionInstanceGroupManagersListInstancesResponse struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ManagedInstances") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "Kind") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *RegionInstanceGroupManagersListInstancesResponse) MarshalJSON() ([]byte, error) { - type NoMethod RegionInstanceGroupManagersListInstancesResponse +func (s *ProjectsGetXpnResources) MarshalJSON() ([]byte, error) { + type NoMethod ProjectsGetXpnResources raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RegionInstanceGroupManagersRecreateRequest struct { - // Instances: The URLs of one or more instances to recreate. This can be - // a full URL or a partial URL, such as - // zones/[ZONE]/instances/[INSTANCE_NAME]. - Instances []string `json:"instances,omitempty"` +type ProjectsListXpnHostsRequest struct { + // Organization: Optional organization ID managed by Cloud Resource + // Manager, for which to list shared VPC host projects. If not + // specified, the organization will be inferred from the project. + Organization string `json:"organization,omitempty"` - // ForceSendFields is a list of field names (e.g. "Instances") to + // ForceSendFields is a list of field names (e.g. "Organization") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -20006,33 +20669,30 @@ type RegionInstanceGroupManagersRecreateRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Instances") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Organization") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *RegionInstanceGroupManagersRecreateRequest) MarshalJSON() ([]byte, error) { - type NoMethod RegionInstanceGroupManagersRecreateRequest +func (s *ProjectsListXpnHostsRequest) MarshalJSON() ([]byte, error) { + type NoMethod ProjectsListXpnHostsRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RegionInstanceGroupManagersSetTargetPoolsRequest struct { - // Fingerprint: Fingerprint of the target pools information, which is a - // hash of the contents. This field is used for optimistic locking when - // you update the target pool entries. This field is optional. - Fingerprint string `json:"fingerprint,omitempty"` - - // TargetPools: The URL of all TargetPool resources to which instances - // in the instanceGroup field are added. The target pools automatically - // apply to all of the instances in the managed instance group. - TargetPools []string `json:"targetPools,omitempty"` +type ProjectsSetDefaultNetworkTierRequest struct { + // NetworkTier: Default network tier to be set. + // + // Possible values: + // "PREMIUM" + // "STANDARD" + NetworkTier string `json:"networkTier,omitempty"` - // ForceSendFields is a list of field names (e.g. "Fingerprint") to + // ForceSendFields is a list of field names (e.g. "NetworkTier") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -20040,7 +20700,7 @@ type RegionInstanceGroupManagersSetTargetPoolsRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Fingerprint") to include + // NullFields is a list of field names (e.g. "NetworkTier") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as @@ -20049,18 +20709,98 @@ type RegionInstanceGroupManagersSetTargetPoolsRequest struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupManagersSetTargetPoolsRequest) MarshalJSON() ([]byte, error) { - type NoMethod RegionInstanceGroupManagersSetTargetPoolsRequest +func (s *ProjectsSetDefaultNetworkTierRequest) MarshalJSON() ([]byte, error) { + type NoMethod ProjectsSetDefaultNetworkTierRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RegionInstanceGroupManagersSetTemplateRequest struct { - // InstanceTemplate: URL of the InstanceTemplate resource from which all - // new instances will be created. - InstanceTemplate string `json:"instanceTemplate,omitempty"` +// Quota: A quotas entry. +type Quota struct { + // Limit: [Output Only] Quota limit for this metric. + Limit float64 `json:"limit,omitempty"` - // ForceSendFields is a list of field names (e.g. "InstanceTemplate") to + // Metric: [Output Only] Name of the quota metric. + // + // Possible values: + // "AUTOSCALERS" + // "BACKEND_BUCKETS" + // "BACKEND_SERVICES" + // "COMMITMENTS" + // "CPUS" + // "CPUS_ALL_REGIONS" + // "DISKS_TOTAL_GB" + // "EXTERNAL_VPN_GATEWAYS" + // "FIREWALLS" + // "FORWARDING_RULES" + // "GLOBAL_INTERNAL_ADDRESSES" + // "GPUS_ALL_REGIONS" + // "HEALTH_CHECKS" + // "IMAGES" + // "INSTANCES" + // "INSTANCE_GROUPS" + // "INSTANCE_GROUP_MANAGERS" + // "INSTANCE_TEMPLATES" + // "INTERCONNECTS" + // "INTERCONNECT_ATTACHMENTS_PER_REGION" + // "INTERCONNECT_ATTACHMENTS_TOTAL_MBPS" + // "INTERNAL_ADDRESSES" + // "IN_USE_ADDRESSES" + // "IN_USE_BACKUP_SCHEDULES" + // "IN_USE_SNAPSHOT_SCHEDULES" + // "LOCAL_SSD_TOTAL_GB" + // "NETWORKS" + // "NETWORK_ENDPOINT_GROUPS" + // "NVIDIA_K80_GPUS" + // "NVIDIA_P100_GPUS" + // "NVIDIA_P100_VWS_GPUS" + // "NVIDIA_P4_GPUS" + // "NVIDIA_P4_VWS_GPUS" + // "NVIDIA_T4_GPUS" + // "NVIDIA_T4_VWS_GPUS" + // "NVIDIA_V100_GPUS" + // "PREEMPTIBLE_CPUS" + // "PREEMPTIBLE_LOCAL_SSD_GB" + // "PREEMPTIBLE_NVIDIA_K80_GPUS" + // "PREEMPTIBLE_NVIDIA_P100_GPUS" + // "PREEMPTIBLE_NVIDIA_P100_VWS_GPUS" + // "PREEMPTIBLE_NVIDIA_P4_GPUS" + // "PREEMPTIBLE_NVIDIA_P4_VWS_GPUS" + // "PREEMPTIBLE_NVIDIA_T4_GPUS" + // "PREEMPTIBLE_NVIDIA_T4_VWS_GPUS" + // "PREEMPTIBLE_NVIDIA_V100_GPUS" + // "REGIONAL_AUTOSCALERS" + // "REGIONAL_INSTANCE_GROUP_MANAGERS" + // "RESOURCE_POLICIES" + // "ROUTERS" + // "ROUTES" + // "SECURITY_POLICIES" + // "SECURITY_POLICY_RULES" + // "SNAPSHOTS" + // "SSD_TOTAL_GB" + // "SSL_CERTIFICATES" + // "STATIC_ADDRESSES" + // "SUBNETWORKS" + // "TARGET_HTTPS_PROXIES" + // "TARGET_HTTP_PROXIES" + // "TARGET_INSTANCES" + // "TARGET_POOLS" + // "TARGET_SSL_PROXIES" + // "TARGET_TCP_PROXIES" + // "TARGET_VPN_GATEWAYS" + // "URL_MAPS" + // "VPN_GATEWAYS" + // "VPN_TUNNELS" + Metric string `json:"metric,omitempty"` + + // Owner: [Output Only] Owning resource. This is the resource on which + // this quota is applied. + Owner string `json:"owner,omitempty"` + + // Usage: [Output Only] Current usage of this metric. + Usage float64 `json:"usage,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Limit") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -20068,7 +20808,132 @@ type RegionInstanceGroupManagersSetTemplateRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "InstanceTemplate") to + // NullFields is a list of field names (e.g. "Limit") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Quota) MarshalJSON() ([]byte, error) { + type NoMethod Quota + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +func (s *Quota) UnmarshalJSON(data []byte) error { + type NoMethod Quota + var s1 struct { + Limit gensupport.JSONFloat64 `json:"limit"` + Usage gensupport.JSONFloat64 `json:"usage"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.Limit = float64(s1.Limit) + s.Usage = float64(s1.Usage) + return nil +} + +// Reference: Represents a reference to a resource. +type Reference struct { + // Kind: [Output Only] Type of the resource. Always compute#reference + // for references. + Kind string `json:"kind,omitempty"` + + // ReferenceType: A description of the reference type with no implied + // semantics. Possible values include: + // - MEMBER_OF + ReferenceType string `json:"referenceType,omitempty"` + + // Referrer: URL of the resource which refers to the target. + Referrer string `json:"referrer,omitempty"` + + // Target: URL of the resource to which this reference points. + Target string `json:"target,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Kind") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Reference) MarshalJSON() ([]byte, error) { + type NoMethod Reference + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Region: Region resource. (== resource_for beta.regions ==) (== +// resource_for v1.regions ==) +type Region struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Deprecated: [Output Only] The deprecation status associated with this + // region. + Deprecated *DeprecationStatus `json:"deprecated,omitempty"` + + // Description: [Output Only] Textual description of the resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always compute#region for + // regions. + Kind string `json:"kind,omitempty"` + + // Name: [Output Only] Name of the resource. + Name string `json:"name,omitempty"` + + // Quotas: [Output Only] Quotas assigned to this region. + Quotas []*Quota `json:"quotas,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Status: [Output Only] Status of the region, either UP or DOWN. + // + // Possible values: + // "DOWN" + // "UP" + Status string `json:"status,omitempty"` + + // Zones: [Output Only] A list of zones available in this region, in the + // form of resource URLs. + Zones []string `json:"zones,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -20078,21 +20943,22 @@ type RegionInstanceGroupManagersSetTemplateRequest struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupManagersSetTemplateRequest) MarshalJSON() ([]byte, error) { - type NoMethod RegionInstanceGroupManagersSetTemplateRequest +func (s *Region) MarshalJSON() ([]byte, error) { + type NoMethod Region raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RegionInstanceGroupsListInstances struct { +// RegionAutoscalerList: Contains a list of autoscalers. +type RegionAutoscalerList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of InstanceWithNamedPorts resources. - Items []*InstanceWithNamedPorts `json:"items,omitempty"` + // Items: A list of Autoscaler resources. + Items []*Autoscaler `json:"items,omitempty"` - // Kind: The resource type. + // Kind: Type of resource. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -20107,7 +20973,7 @@ type RegionInstanceGroupsListInstances struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *RegionInstanceGroupsListInstancesWarning `json:"warning,omitempty"` + Warning *RegionAutoscalerListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -20130,15 +20996,15 @@ type RegionInstanceGroupsListInstances struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupsListInstances) MarshalJSON() ([]byte, error) { - type NoMethod RegionInstanceGroupsListInstances +func (s *RegionAutoscalerList) MarshalJSON() ([]byte, error) { + type NoMethod RegionAutoscalerList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RegionInstanceGroupsListInstancesWarning: [Output Only] Informational -// warning message. -type RegionInstanceGroupsListInstancesWarning struct { +// RegionAutoscalerListWarning: [Output Only] Informational warning +// message. +type RegionAutoscalerListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -20172,7 +21038,7 @@ type RegionInstanceGroupsListInstancesWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*RegionInstanceGroupsListInstancesWarningData `json:"data,omitempty"` + Data []*RegionAutoscalerListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -20195,13 +21061,13 @@ type RegionInstanceGroupsListInstancesWarning struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupsListInstancesWarning) MarshalJSON() ([]byte, error) { - type NoMethod RegionInstanceGroupsListInstancesWarning +func (s *RegionAutoscalerListWarning) MarshalJSON() ([]byte, error) { + type NoMethod RegionAutoscalerListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RegionInstanceGroupsListInstancesWarningData struct { +type RegionAutoscalerListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -20232,96 +21098,22 @@ type RegionInstanceGroupsListInstancesWarningData struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupsListInstancesWarningData) MarshalJSON() ([]byte, error) { - type NoMethod RegionInstanceGroupsListInstancesWarningData - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type RegionInstanceGroupsListInstancesRequest struct { - // InstanceState: Instances in which state should be returned. Valid - // options are: 'ALL', 'RUNNING'. By default, it lists all instances. - // - // Possible values: - // "ALL" - // "RUNNING" - InstanceState string `json:"instanceState,omitempty"` - - // PortName: Name of port user is interested in. It is optional. If it - // is set, only information about this ports will be returned. If it is - // not set, all the named ports will be returned. Always lists all - // instances. - PortName string `json:"portName,omitempty"` - - // ForceSendFields is a list of field names (e.g. "InstanceState") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "InstanceState") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *RegionInstanceGroupsListInstancesRequest) MarshalJSON() ([]byte, error) { - type NoMethod RegionInstanceGroupsListInstancesRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type RegionInstanceGroupsSetNamedPortsRequest struct { - // Fingerprint: The fingerprint of the named ports information for this - // instance group. Use this optional property to prevent conflicts when - // multiple users change the named ports settings concurrently. Obtain - // the fingerprint with the instanceGroups.get method. Then, include the - // fingerprint in your request to ensure that you do not overwrite - // changes that were applied from another concurrent request. - Fingerprint string `json:"fingerprint,omitempty"` - - // NamedPorts: The list of named ports to set for this instance group. - NamedPorts []*NamedPort `json:"namedPorts,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Fingerprint") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Fingerprint") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *RegionInstanceGroupsSetNamedPortsRequest) MarshalJSON() ([]byte, error) { - type NoMethod RegionInstanceGroupsSetNamedPortsRequest +func (s *RegionAutoscalerListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod RegionAutoscalerListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RegionList: Contains a list of region resources. -type RegionList struct { +type RegionDiskTypeList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of Region resources. - Items []*Region `json:"items,omitempty"` + // Items: A list of DiskType resources. + Items []*DiskType `json:"items,omitempty"` - // Kind: [Output Only] Type of resource. Always compute#regionList for - // lists of regions. + // Kind: [Output Only] Type of resource. Always + // compute#regionDiskTypeList for region disk types. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -20336,7 +21128,7 @@ type RegionList struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *RegionListWarning `json:"warning,omitempty"` + Warning *RegionDiskTypeListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -20359,14 +21151,15 @@ type RegionList struct { NullFields []string `json:"-"` } -func (s *RegionList) MarshalJSON() ([]byte, error) { - type NoMethod RegionList +func (s *RegionDiskTypeList) MarshalJSON() ([]byte, error) { + type NoMethod RegionDiskTypeList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RegionListWarning: [Output Only] Informational warning message. -type RegionListWarning struct { +// RegionDiskTypeListWarning: [Output Only] Informational warning +// message. +type RegionDiskTypeListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -20400,7 +21193,7 @@ type RegionListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*RegionListWarningData `json:"data,omitempty"` + Data []*RegionDiskTypeListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -20423,13 +21216,13 @@ type RegionListWarning struct { NullFields []string `json:"-"` } -func (s *RegionListWarning) MarshalJSON() ([]byte, error) { - type NoMethod RegionListWarning +func (s *RegionDiskTypeListWarning) MarshalJSON() ([]byte, error) { + type NoMethod RegionDiskTypeListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RegionListWarningData struct { +type RegionDiskTypeListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -20460,25 +21253,18 @@ type RegionListWarningData struct { NullFields []string `json:"-"` } -func (s *RegionListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod RegionListWarningData +func (s *RegionDiskTypeListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod RegionDiskTypeListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RegionSetLabelsRequest struct { - // LabelFingerprint: The fingerprint of the previous set of labels for - // this resource, used to detect conflicts. The fingerprint is initially - // generated by Compute Engine and changes after every request to modify - // or update labels. You must always provide an up-to-date fingerprint - // hash in order to update or change labels. Make a get() request to the - // resource to get the latest fingerprint. - LabelFingerprint string `json:"labelFingerprint,omitempty"` - - // Labels: The labels to set for this resource. - Labels map[string]string `json:"labels,omitempty"` +type RegionDisksResizeRequest struct { + // SizeGb: The new size of the regional persistent disk, which is + // specified in GB. + SizeGb int64 `json:"sizeGb,omitempty,string"` - // ForceSendFields is a list of field names (e.g. "LabelFingerprint") to + // ForceSendFields is a list of field names (e.g. "SizeGb") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -20486,79 +21272,52 @@ type RegionSetLabelsRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "LabelFingerprint") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "SizeGb") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *RegionSetLabelsRequest) MarshalJSON() ([]byte, error) { - type NoMethod RegionSetLabelsRequest +func (s *RegionDisksResizeRequest) MarshalJSON() ([]byte, error) { + type NoMethod RegionDisksResizeRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RegionSetPolicyRequest struct { - // Bindings: Flatten Policy to create a backwacd compatible wire-format. - // Deprecated. Use 'policy' to specify bindings. - Bindings []*Binding `json:"bindings,omitempty"` - - // Etag: Flatten Policy to create a backward compatible wire-format. - // Deprecated. Use 'policy' to specify the etag. - Etag string `json:"etag,omitempty"` +// RegionInstanceGroupList: Contains a list of InstanceGroup resources. +type RegionInstanceGroupList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` - // Policy: REQUIRED: The complete policy to be applied to the - // 'resource'. The size of the policy is limited to a few 10s of KB. An - // empty policy is in general a valid policy but certain services (like - // Projects) might reject them. - Policy *Policy `json:"policy,omitempty"` + // Items: A list of InstanceGroup resources. + Items []*InstanceGroup `json:"items,omitempty"` - // ForceSendFields is a list of field names (e.g. "Bindings") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` + // Kind: The resource type. + Kind string `json:"kind,omitempty"` - // NullFields is a list of field names (e.g. "Bindings") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` -func (s *RegionSetPolicyRequest) MarshalJSON() ([]byte, error) { - type NoMethod RegionSetPolicyRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` -// ResourceCommitment: Commitment for a particular resource (a -// Commitment is composed of one or more of these). -type ResourceCommitment struct { - // Amount: The amount of the resource purchased (in a type-dependent - // unit, such as bytes). For vCPUs, this can just be an integer. For - // memory, this must be provided in MB. Memory must be a multiple of 256 - // MB, with up to 6.5GB of memory per every vCPU. - Amount int64 `json:"amount,omitempty,string"` + // Warning: [Output Only] Informational warning message. + Warning *RegionInstanceGroupListWarning `json:"warning,omitempty"` - // Type: Type of resource for which this commitment applies. Possible - // values are VCPU and MEMORY - // - // Possible values: - // "MEMORY" - // "UNSPECIFIED" - // "VCPU" - Type string `json:"type,omitempty"` + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Amount") to + // ForceSendFields is a list of field names (e.g. "Id") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -20566,7 +21325,7 @@ type ResourceCommitment struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Amount") to include in API + // NullFields is a list of field names (e.g. "Id") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -20575,169 +21334,15 @@ type ResourceCommitment struct { NullFields []string `json:"-"` } -func (s *ResourceCommitment) MarshalJSON() ([]byte, error) { - type NoMethod ResourceCommitment +func (s *RegionInstanceGroupList) MarshalJSON() ([]byte, error) { + type NoMethod RegionInstanceGroupList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type ResourceGroupReference struct { - // Group: A URI referencing one of the instance groups or network - // endpoint groups listed in the backend service. - Group string `json:"group,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Group") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Group") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ResourceGroupReference) MarshalJSON() ([]byte, error) { - type NoMethod ResourceGroupReference - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Route: Represents a Route resource. A route specifies how certain -// packets should be handled by the network. Routes are associated with -// instances by tags and the set of routes for a particular instance is -// called its routing table. -// -// For each packet leaving an instance, the system searches that -// instance's routing table for a single best matching route. Routes -// match packets by destination IP address, preferring smaller or more -// specific ranges over larger ones. If there is a tie, the system -// selects the route with the smallest priority value. If there is still -// a tie, it uses the layer three and four packet headers to select just -// one of the remaining matching routes. The packet is then forwarded as -// specified by the nextHop field of the winning route - either to -// another instance destination, an instance gateway, or a Google -// Compute Engine-operated gateway. -// -// Packets that do not match any route in the sending instance's routing -// table are dropped. (== resource_for beta.routes ==) (== resource_for -// v1.routes ==) -type Route struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // DestRange: The destination range of outgoing packets that this route - // applies to. Only IPv4 is supported. - DestRange string `json:"destRange,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] Type of this resource. Always compute#routes for - // Route resources. - Kind string `json:"kind,omitempty"` - - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // Network: Fully-qualified URL of the network that this route applies - // to. - Network string `json:"network,omitempty"` - - // NextHopGateway: The URL to a gateway that should handle matching - // packets. You can only specify the internet gateway using a full or - // partial valid URL: - // projects//global/gateways/default-internet-gateway - NextHopGateway string `json:"nextHopGateway,omitempty"` - - // NextHopInstance: The URL to an instance that should handle matching - // packets. You can specify this as a full or partial URL. For - // example: - // https://www.googleapis.com/compute/v1/projects/project/zones/ - // zone/instances/ - NextHopInstance string `json:"nextHopInstance,omitempty"` - - // NextHopIp: The network IP address of an instance that should handle - // matching packets. Only IPv4 is supported. - NextHopIp string `json:"nextHopIp,omitempty"` - - // NextHopNetwork: The URL of the local network if it should handle - // matching packets. - NextHopNetwork string `json:"nextHopNetwork,omitempty"` - - // NextHopPeering: [Output Only] The network peering name that should - // handle matching packets, which should conform to RFC1035. - NextHopPeering string `json:"nextHopPeering,omitempty"` - - // NextHopVpnTunnel: The URL to a VpnTunnel that should handle matching - // packets. - NextHopVpnTunnel string `json:"nextHopVpnTunnel,omitempty"` - - // Priority: The priority of this route. Priority is used to break ties - // in cases where there is more than one matching route of equal prefix - // length. In the case of two routes with equal prefix length, the one - // with the lowest-numbered priority value wins. Default value is 1000. - // Valid range is 0 through 65535. - Priority int64 `json:"priority,omitempty"` - - // SelfLink: [Output Only] Server-defined fully-qualified URL for this - // resource. - SelfLink string `json:"selfLink,omitempty"` - - // Tags: A list of instance tags to which this route applies. - Tags []string `json:"tags,omitempty"` - - // Warnings: [Output Only] If potential misconfigurations are detected - // for this route, this field will be populated with warning messages. - Warnings []*RouteWarnings `json:"warnings,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *Route) MarshalJSON() ([]byte, error) { - type NoMethod Route - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type RouteWarnings struct { +// RegionInstanceGroupListWarning: [Output Only] Informational warning +// message. +type RegionInstanceGroupListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -20771,7 +21376,7 @@ type RouteWarnings struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*RouteWarningsData `json:"data,omitempty"` + Data []*RegionInstanceGroupListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -20794,13 +21399,13 @@ type RouteWarnings struct { NullFields []string `json:"-"` } -func (s *RouteWarnings) MarshalJSON() ([]byte, error) { - type NoMethod RouteWarnings +func (s *RegionInstanceGroupListWarning) MarshalJSON() ([]byte, error) { + type NoMethod RegionInstanceGroupListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RouteWarningsData struct { +type RegionInstanceGroupListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -20831,22 +21436,25 @@ type RouteWarningsData struct { NullFields []string `json:"-"` } -func (s *RouteWarningsData) MarshalJSON() ([]byte, error) { - type NoMethod RouteWarningsData +func (s *RegionInstanceGroupListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod RegionInstanceGroupListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RouteList: Contains a list of Route resources. -type RouteList struct { +// RegionInstanceGroupManagerList: Contains a list of managed instance +// groups. +type RegionInstanceGroupManagerList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of Route resources. - Items []*Route `json:"items,omitempty"` + // Items: A list of InstanceGroupManager resources. + Items []*InstanceGroupManager `json:"items,omitempty"` - // Kind: Type of resource. + // Kind: [Output Only] The resource type, which is always + // compute#instanceGroupManagerList for a list of managed instance + // groups that exist in th regional scope. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -20861,7 +21469,7 @@ type RouteList struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *RouteListWarning `json:"warning,omitempty"` + Warning *RegionInstanceGroupManagerListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -20884,14 +21492,15 @@ type RouteList struct { NullFields []string `json:"-"` } -func (s *RouteList) MarshalJSON() ([]byte, error) { - type NoMethod RouteList +func (s *RegionInstanceGroupManagerList) MarshalJSON() ([]byte, error) { + type NoMethod RegionInstanceGroupManagerList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RouteListWarning: [Output Only] Informational warning message. -type RouteListWarning struct { +// RegionInstanceGroupManagerListWarning: [Output Only] Informational +// warning message. +type RegionInstanceGroupManagerListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -20925,7 +21534,7 @@ type RouteListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*RouteListWarningData `json:"data,omitempty"` + Data []*RegionInstanceGroupManagerListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -20948,13 +21557,13 @@ type RouteListWarning struct { NullFields []string `json:"-"` } -func (s *RouteListWarning) MarshalJSON() ([]byte, error) { - type NoMethod RouteListWarning +func (s *RegionInstanceGroupManagerListWarning) MarshalJSON() ([]byte, error) { + type NoMethod RegionInstanceGroupManagerListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RouteListWarningData struct { +type RegionInstanceGroupManagerListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -20985,71 +21594,79 @@ type RouteListWarningData struct { NullFields []string `json:"-"` } -func (s *RouteListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod RouteListWarningData +func (s *RegionInstanceGroupManagerListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod RegionInstanceGroupManagerListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Router: Router resource. -type Router struct { - // Bgp: BGP information specific to this router. - Bgp *RouterBgp `json:"bgp,omitempty"` - - // BgpPeers: BGP information that needs to be configured into the - // routing stack to establish the BGP peering. It must specify peer ASN - // and either interface name, IP, or peer IP. Please refer to RFC4273. - BgpPeers []*RouterBgpPeer `json:"bgpPeers,omitempty"` - - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` +type RegionInstanceGroupManagersAbandonInstancesRequest struct { + // Instances: The URLs of one or more instances to abandon. This can be + // a full URL or a partial URL, such as + // zones/[ZONE]/instances/[INSTANCE_NAME]. + Instances []string `json:"instances,omitempty"` - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` + // ForceSendFields is a list of field names (e.g. "Instances") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // Interfaces: Router interfaces. Each interface requires either one - // linked resource (e.g. linkedVpnTunnel), or IP address and IP address - // range (e.g. ipRange), or both. - Interfaces []*RouterInterface `json:"interfaces,omitempty"` + // NullFields is a list of field names (e.g. "Instances") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} - // Kind: [Output Only] Type of resource. Always compute#router for - // routers. - Kind string `json:"kind,omitempty"` +func (s *RegionInstanceGroupManagersAbandonInstancesRequest) MarshalJSON() ([]byte, error) { + type NoMethod RegionInstanceGroupManagersAbandonInstancesRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` +type RegionInstanceGroupManagersDeleteInstancesRequest struct { + // Instances: The URLs of one or more instances to delete. This can be a + // full URL or a partial URL, such as + // zones/[ZONE]/instances/[INSTANCE_NAME]. + Instances []string `json:"instances,omitempty"` - // Nats: A list of Nat services created in this router. - Nats []*RouterNat `json:"nats,omitempty"` + // ForceSendFields is a list of field names (e.g. "Instances") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // Network: URI of the network to which this router belongs. - Network string `json:"network,omitempty"` + // NullFields is a list of field names (e.g. "Instances") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} - // Region: [Output Only] URI of the region where the router resides. You - // must specify this field as part of the HTTP request URL. It is not - // settable as a field in the request body. - Region string `json:"region,omitempty"` +func (s *RegionInstanceGroupManagersDeleteInstancesRequest) MarshalJSON() ([]byte, error) { + type NoMethod RegionInstanceGroupManagersDeleteInstancesRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` +type RegionInstanceGroupManagersListInstancesResponse struct { + // ManagedInstances: A list of managed instances. + ManagedInstances []*ManagedInstance `json:"managedInstances,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Bgp") to + // ForceSendFields is a list of field names (e.g. "ManagedInstances") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -21057,8 +21674,38 @@ type Router struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Bgp") to include in API - // requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "ManagedInstances") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *RegionInstanceGroupManagersListInstancesResponse) MarshalJSON() ([]byte, error) { + type NoMethod RegionInstanceGroupManagersListInstancesResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type RegionInstanceGroupManagersRecreateRequest struct { + // Instances: The URLs of one or more instances to recreate. This can be + // a full URL or a partial URL, such as + // zones/[ZONE]/instances/[INSTANCE_NAME]. + Instances []string `json:"instances,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Instances") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Instances") to include in + // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -21066,23 +21713,24 @@ type Router struct { NullFields []string `json:"-"` } -func (s *Router) MarshalJSON() ([]byte, error) { - type NoMethod Router +func (s *RegionInstanceGroupManagersRecreateRequest) MarshalJSON() ([]byte, error) { + type NoMethod RegionInstanceGroupManagersRecreateRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RouterAdvertisedIpRange: Description-tagged IP ranges for the router -// to advertise. -type RouterAdvertisedIpRange struct { - // Description: User-specified description for the IP range. - Description string `json:"description,omitempty"` +type RegionInstanceGroupManagersSetTargetPoolsRequest struct { + // Fingerprint: Fingerprint of the target pools information, which is a + // hash of the contents. This field is used for optimistic locking when + // you update the target pool entries. This field is optional. + Fingerprint string `json:"fingerprint,omitempty"` - // Range: The IP range to advertise. The value must be a CIDR-formatted - // string. - Range string `json:"range,omitempty"` + // TargetPools: The URL of all TargetPool resources to which instances + // in the instanceGroup field are added. The target pools automatically + // apply to all of the instances in the managed instance group. + TargetPools []string `json:"targetPools,omitempty"` - // ForceSendFields is a list of field names (e.g. "Description") to + // ForceSendFields is a list of field names (e.g. "Fingerprint") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -21090,7 +21738,7 @@ type RouterAdvertisedIpRange struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Description") to include + // NullFields is a list of field names (e.g. "Fingerprint") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as @@ -21099,22 +21747,50 @@ type RouterAdvertisedIpRange struct { NullFields []string `json:"-"` } -func (s *RouterAdvertisedIpRange) MarshalJSON() ([]byte, error) { - type NoMethod RouterAdvertisedIpRange +func (s *RegionInstanceGroupManagersSetTargetPoolsRequest) MarshalJSON() ([]byte, error) { + type NoMethod RegionInstanceGroupManagersSetTargetPoolsRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RouterAggregatedList: Contains a list of routers. -type RouterAggregatedList struct { +type RegionInstanceGroupManagersSetTemplateRequest struct { + // InstanceTemplate: URL of the InstanceTemplate resource from which all + // new instances will be created. + InstanceTemplate string `json:"instanceTemplate,omitempty"` + + // ForceSendFields is a list of field names (e.g. "InstanceTemplate") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "InstanceTemplate") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *RegionInstanceGroupManagersSetTemplateRequest) MarshalJSON() ([]byte, error) { + type NoMethod RegionInstanceGroupManagersSetTemplateRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type RegionInstanceGroupsListInstances struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of Router resources. - Items map[string]RoutersScopedList `json:"items,omitempty"` + // Items: A list of InstanceWithNamedPorts resources. + Items []*InstanceWithNamedPorts `json:"items,omitempty"` - // Kind: Type of resource. + // Kind: The resource type. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -21129,7 +21805,7 @@ type RouterAggregatedList struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *RouterAggregatedListWarning `json:"warning,omitempty"` + Warning *RegionInstanceGroupsListInstancesWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -21152,15 +21828,15 @@ type RouterAggregatedList struct { NullFields []string `json:"-"` } -func (s *RouterAggregatedList) MarshalJSON() ([]byte, error) { - type NoMethod RouterAggregatedList +func (s *RegionInstanceGroupsListInstances) MarshalJSON() ([]byte, error) { + type NoMethod RegionInstanceGroupsListInstances raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RouterAggregatedListWarning: [Output Only] Informational warning -// message. -type RouterAggregatedListWarning struct { +// RegionInstanceGroupsListInstancesWarning: [Output Only] Informational +// warning message. +type RegionInstanceGroupsListInstancesWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -21194,7 +21870,7 @@ type RouterAggregatedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*RouterAggregatedListWarningData `json:"data,omitempty"` + Data []*RegionInstanceGroupsListInstancesWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -21217,13 +21893,13 @@ type RouterAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *RouterAggregatedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod RouterAggregatedListWarning +func (s *RegionInstanceGroupsListInstancesWarning) MarshalJSON() ([]byte, error) { + type NoMethod RegionInstanceGroupsListInstancesWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RouterAggregatedListWarningData struct { +type RegionInstanceGroupsListInstancesWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -21254,46 +21930,28 @@ type RouterAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *RouterAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod RouterAggregatedListWarningData +func (s *RegionInstanceGroupsListInstancesWarningData) MarshalJSON() ([]byte, error) { + type NoMethod RegionInstanceGroupsListInstancesWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RouterBgp struct { - // AdvertiseMode: User-specified flag to indicate which mode to use for - // advertisement. - // - // Possible values: - // "CUSTOM" - // "DEFAULT" - AdvertiseMode string `json:"advertiseMode,omitempty"` - - // AdvertisedGroups: User-specified list of prefix groups to advertise - // in custom mode. This field can only be populated if advertise_mode is - // CUSTOM and is advertised to all peers of the router. These groups - // will be advertised in addition to any specified prefixes. Leave this - // field blank to advertise no custom groups. +type RegionInstanceGroupsListInstancesRequest struct { + // InstanceState: Instances in which state should be returned. Valid + // options are: 'ALL', 'RUNNING'. By default, it lists all instances. // // Possible values: - // "ALL_SUBNETS" - AdvertisedGroups []string `json:"advertisedGroups,omitempty"` - - // AdvertisedIpRanges: User-specified list of individual IP ranges to - // advertise in custom mode. This field can only be populated if - // advertise_mode is CUSTOM and is advertised to all peers of the - // router. These IP ranges will be advertised in addition to any - // specified groups. Leave this field blank to advertise no custom IP - // ranges. - AdvertisedIpRanges []*RouterAdvertisedIpRange `json:"advertisedIpRanges,omitempty"` + // "ALL" + // "RUNNING" + InstanceState string `json:"instanceState,omitempty"` - // Asn: Local BGP Autonomous System Number (ASN). Must be an RFC6996 - // private ASN, either 16-bit or 32-bit. The value will be fixed for - // this router resource. All VPN tunnels that link to this router will - // have the same local ASN. - Asn int64 `json:"asn,omitempty"` + // PortName: Name of port user is interested in. It is optional. If it + // is set, only information about this ports will be returned. If it is + // not set, all the named ports will be returned. Always lists all + // instances. + PortName string `json:"portName,omitempty"` - // ForceSendFields is a list of field names (e.g. "AdvertiseMode") to + // ForceSendFields is a list of field names (e.g. "InstanceState") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -21301,7 +21959,7 @@ type RouterBgp struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AdvertiseMode") to include + // NullFields is a list of field names (e.g. "InstanceState") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as @@ -21310,172 +21968,58 @@ type RouterBgp struct { NullFields []string `json:"-"` } -func (s *RouterBgp) MarshalJSON() ([]byte, error) { - type NoMethod RouterBgp +func (s *RegionInstanceGroupsListInstancesRequest) MarshalJSON() ([]byte, error) { + type NoMethod RegionInstanceGroupsListInstancesRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RouterBgpPeer struct { - // AdvertiseMode: User-specified flag to indicate which mode to use for - // advertisement. - // - // Possible values: - // "CUSTOM" - // "DEFAULT" - AdvertiseMode string `json:"advertiseMode,omitempty"` +type RegionInstanceGroupsSetNamedPortsRequest struct { + // Fingerprint: The fingerprint of the named ports information for this + // instance group. Use this optional property to prevent conflicts when + // multiple users change the named ports settings concurrently. Obtain + // the fingerprint with the instanceGroups.get method. Then, include the + // fingerprint in your request to ensure that you do not overwrite + // changes that were applied from another concurrent request. + Fingerprint string `json:"fingerprint,omitempty"` - // AdvertisedGroups: User-specified list of prefix groups to advertise - // in custom mode. This field can only be populated if advertise_mode is - // CUSTOM and overrides the list defined for the router (in Bgp - // message). These groups will be advertised in addition to any - // specified prefixes. Leave this field blank to advertise no custom - // groups. - // - // Possible values: - // "ALL_SUBNETS" - AdvertisedGroups []string `json:"advertisedGroups,omitempty"` + // NamedPorts: The list of named ports to set for this instance group. + NamedPorts []*NamedPort `json:"namedPorts,omitempty"` - // AdvertisedIpRanges: User-specified list of individual IP ranges to - // advertise in custom mode. This field can only be populated if - // advertise_mode is CUSTOM and overrides the list defined for the - // router (in Bgp message). These IP ranges will be advertised in - // addition to any specified groups. Leave this field blank to advertise - // no custom IP ranges. - AdvertisedIpRanges []*RouterAdvertisedIpRange `json:"advertisedIpRanges,omitempty"` + // ForceSendFields is a list of field names (e.g. "Fingerprint") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // AdvertisedRoutePriority: The priority of routes advertised to this - // BGP peer. In the case where there is more than one matching route of - // maximum length, the routes with lowest priority value win. - AdvertisedRoutePriority int64 `json:"advertisedRoutePriority,omitempty"` + // NullFields is a list of field names (e.g. "Fingerprint") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} - // InterfaceName: Name of the interface the BGP peer is associated with. - InterfaceName string `json:"interfaceName,omitempty"` +func (s *RegionInstanceGroupsSetNamedPortsRequest) MarshalJSON() ([]byte, error) { + type NoMethod RegionInstanceGroupsSetNamedPortsRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} - // IpAddress: IP address of the interface inside Google Cloud Platform. - // Only IPv4 is supported. - IpAddress string `json:"ipAddress,omitempty"` - - // ManagementType: [Output Only] The resource that configures and - // manages this BGP peer. MANAGED_BY_USER is the default value and can - // be managed by you or other users; MANAGED_BY_ATTACHMENT is a BGP peer - // that is configured and managed by Cloud Interconnect, specifically by - // an InterconnectAttachment of type PARTNER. Google will automatically - // create, update, and delete this type of BGP peer when the PARTNER - // InterconnectAttachment is created, updated, or deleted. - // - // Possible values: - // "MANAGED_BY_ATTACHMENT" - // "MANAGED_BY_USER" - ManagementType string `json:"managementType,omitempty"` - - // Name: Name of this BGP peer. The name must be 1-63 characters long - // and comply with RFC1035. - Name string `json:"name,omitempty"` - - // PeerAsn: Peer BGP Autonomous System Number (ASN). For VPN use case, - // this value can be different for every tunnel. - PeerAsn int64 `json:"peerAsn,omitempty"` - - // PeerIpAddress: IP address of the BGP interface outside Google cloud. - // Only IPv4 is supported. - PeerIpAddress string `json:"peerIpAddress,omitempty"` - - // ForceSendFields is a list of field names (e.g. "AdvertiseMode") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "AdvertiseMode") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *RouterBgpPeer) MarshalJSON() ([]byte, error) { - type NoMethod RouterBgpPeer - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type RouterInterface struct { - // IpRange: IP address and range of the interface. The IP range must be - // in the RFC3927 link-local IP space. The value must be a - // CIDR-formatted string, for example: 169.254.0.1/30. NOTE: Do not - // truncate the address as it represents the IP address of the - // interface. - IpRange string `json:"ipRange,omitempty"` - - // LinkedInterconnectAttachment: URI of the linked interconnect - // attachment. It must be in the same region as the router. Each - // interface can have at most one linked resource and it could either be - // a VPN Tunnel or an interconnect attachment. - LinkedInterconnectAttachment string `json:"linkedInterconnectAttachment,omitempty"` - - // LinkedVpnTunnel: URI of the linked VPN tunnel. It must be in the same - // region as the router. Each interface can have at most one linked - // resource and it could either be a VPN Tunnel or an interconnect - // attachment. - LinkedVpnTunnel string `json:"linkedVpnTunnel,omitempty"` - - // ManagementType: [Output Only] The resource that configures and - // manages this interface. MANAGED_BY_USER is the default value and can - // be managed by you or other users; MANAGED_BY_ATTACHMENT is an - // interface that is configured and managed by Cloud Interconnect, - // specifically by an InterconnectAttachment of type PARTNER. Google - // will automatically create, update, and delete this type of interface - // when the PARTNER InterconnectAttachment is created, updated, or - // deleted. - // - // Possible values: - // "MANAGED_BY_ATTACHMENT" - // "MANAGED_BY_USER" - ManagementType string `json:"managementType,omitempty"` - - // Name: Name of this interface entry. The name must be 1-63 characters - // long and comply with RFC1035. - Name string `json:"name,omitempty"` - - // ForceSendFields is a list of field names (e.g. "IpRange") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "IpRange") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *RouterInterface) MarshalJSON() ([]byte, error) { - type NoMethod RouterInterface - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// RouterList: Contains a list of Router resources. -type RouterList struct { +// RegionList: Contains a list of region resources. +type RegionList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of Router resources. - Items []*Router `json:"items,omitempty"` + // Items: A list of Region resources. + Items []*Region `json:"items,omitempty"` - // Kind: [Output Only] Type of resource. Always compute#router for - // routers. + // Kind: [Output Only] Type of resource. Always compute#regionList for + // lists of regions. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -21490,7 +22034,7 @@ type RouterList struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *RouterListWarning `json:"warning,omitempty"` + Warning *RegionListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -21513,14 +22057,14 @@ type RouterList struct { NullFields []string `json:"-"` } -func (s *RouterList) MarshalJSON() ([]byte, error) { - type NoMethod RouterList +func (s *RegionList) MarshalJSON() ([]byte, error) { + type NoMethod RegionList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RouterListWarning: [Output Only] Informational warning message. -type RouterListWarning struct { +// RegionListWarning: [Output Only] Informational warning message. +type RegionListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -21554,7 +22098,7 @@ type RouterListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*RouterListWarningData `json:"data,omitempty"` + Data []*RegionListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -21577,13 +22121,13 @@ type RouterListWarning struct { NullFields []string `json:"-"` } -func (s *RouterListWarning) MarshalJSON() ([]byte, error) { - type NoMethod RouterListWarning +func (s *RegionListWarning) MarshalJSON() ([]byte, error) { + type NoMethod RegionListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RouterListWarningData struct { +type RegionListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -21614,84 +22158,33 @@ type RouterListWarningData struct { NullFields []string `json:"-"` } -func (s *RouterListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod RouterListWarningData +func (s *RegionListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod RegionListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RouterNat: Represents a Nat resource. It enables the VMs within the -// specified subnetworks to access Internet without external IP -// addresses. It specifies a list of subnetworks (and the ranges within) -// that want to use NAT. Customers can also provide the external IPs -// that would be used for NAT. GCP would auto-allocate ephemeral IPs if -// no external IPs are provided. -type RouterNat struct { - // IcmpIdleTimeoutSec: Timeout (in seconds) for ICMP connections. - // Defaults to 30s if not set. - IcmpIdleTimeoutSec int64 `json:"icmpIdleTimeoutSec,omitempty"` - - // MinPortsPerVm: Minimum number of ports allocated to a VM from this - // NAT config. If not set, a default number of ports is allocated to a - // VM. This gets rounded up to the nearest power of 2. Eg. if the value - // of this field is 50, at least 64 ports will be allocated to a VM. - MinPortsPerVm int64 `json:"minPortsPerVm,omitempty"` - - // Name: Unique name of this Nat service. The name must be 1-63 - // characters long and comply with RFC1035. - Name string `json:"name,omitempty"` - - // NatIpAllocateOption: Specify the NatIpAllocateOption. If it is - // AUTO_ONLY, then nat_ip should be empty. - // - // Possible values: - // "AUTO_ONLY" - // "MANUAL_ONLY" - NatIpAllocateOption string `json:"natIpAllocateOption,omitempty"` - - // NatIps: A list of URLs of the IP resources used for this Nat service. - // These IPs must be valid static external IP addresses assigned to the - // project. max_length is subject to change post alpha. - NatIps []string `json:"natIps,omitempty"` - - // SourceSubnetworkIpRangesToNat: Specify the Nat option. If this field - // contains ALL_SUBNETWORKS_ALL_IP_RANGES or - // ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES, then there should not be any - // other Router.Nat section in any Router for this network in this - // region. - // - // Possible values: - // "ALL_SUBNETWORKS_ALL_IP_RANGES" - // "ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES" - // "LIST_OF_SUBNETWORKS" - SourceSubnetworkIpRangesToNat string `json:"sourceSubnetworkIpRangesToNat,omitempty"` - - // Subnetworks: A list of Subnetwork resources whose traffic should be - // translated by NAT Gateway. It is used only when LIST_OF_SUBNETWORKS - // is selected for the SubnetworkIpRangeToNatOption above. - Subnetworks []*RouterNatSubnetworkToNat `json:"subnetworks,omitempty"` - - // TcpEstablishedIdleTimeoutSec: Timeout (in seconds) for TCP - // established connections. Defaults to 1200s if not set. - TcpEstablishedIdleTimeoutSec int64 `json:"tcpEstablishedIdleTimeoutSec,omitempty"` - - // TcpTransitoryIdleTimeoutSec: Timeout (in seconds) for TCP transitory - // connections. Defaults to 30s if not set. - TcpTransitoryIdleTimeoutSec int64 `json:"tcpTransitoryIdleTimeoutSec,omitempty"` +type RegionSetLabelsRequest struct { + // LabelFingerprint: The fingerprint of the previous set of labels for + // this resource, used to detect conflicts. The fingerprint is initially + // generated by Compute Engine and changes after every request to modify + // or update labels. You must always provide an up-to-date fingerprint + // hash in order to update or change labels. Make a get() request to the + // resource to get the latest fingerprint. + LabelFingerprint string `json:"labelFingerprint,omitempty"` - // UdpIdleTimeoutSec: Timeout (in seconds) for UDP connections. Defaults - // to 30s if not set. - UdpIdleTimeoutSec int64 `json:"udpIdleTimeoutSec,omitempty"` + // Labels: The labels to set for this resource. + Labels map[string]string `json:"labels,omitempty"` - // ForceSendFields is a list of field names (e.g. "IcmpIdleTimeoutSec") - // to unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "LabelFingerprint") to + // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "IcmpIdleTimeoutSec") to + // NullFields is a list of field names (e.g. "LabelFingerprint") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -21701,37 +22194,28 @@ type RouterNat struct { NullFields []string `json:"-"` } -func (s *RouterNat) MarshalJSON() ([]byte, error) { - type NoMethod RouterNat +func (s *RegionSetLabelsRequest) MarshalJSON() ([]byte, error) { + type NoMethod RegionSetLabelsRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RouterNatSubnetworkToNat: Defines the IP ranges that want to use NAT -// for a subnetwork. -type RouterNatSubnetworkToNat struct { - // Name: URL for the subnetwork resource to use NAT. - Name string `json:"name,omitempty"` +type RegionSetPolicyRequest struct { + // Bindings: Flatten Policy to create a backwacd compatible wire-format. + // Deprecated. Use 'policy' to specify bindings. + Bindings []*Binding `json:"bindings,omitempty"` - // SecondaryIpRangeNames: A list of the secondary ranges of the - // Subnetwork that are allowed to use NAT. This can be populated only if - // "LIST_OF_SECONDARY_IP_RANGES" is one of the values in - // source_ip_ranges_to_nat. - SecondaryIpRangeNames []string `json:"secondaryIpRangeNames,omitempty"` + // Etag: Flatten Policy to create a backward compatible wire-format. + // Deprecated. Use 'policy' to specify the etag. + Etag string `json:"etag,omitempty"` - // SourceIpRangesToNat: Specify the options for NAT ranges in the - // Subnetwork. All usages of single value are valid except - // NAT_IP_RANGE_OPTION_UNSPECIFIED. The only valid option with multiple - // values is: ["PRIMARY_IP_RANGE", "LIST_OF_SECONDARY_IP_RANGES"] - // Default: [ALL_IP_RANGES] - // - // Possible values: - // "ALL_IP_RANGES" - // "LIST_OF_SECONDARY_IP_RANGES" - // "PRIMARY_IP_RANGE" - SourceIpRangesToNat []string `json:"sourceIpRangesToNat,omitempty"` + // Policy: REQUIRED: The complete policy to be applied to the + // 'resource'. The size of the policy is limited to a few 10s of KB. An + // empty policy is in general a valid policy but certain services (like + // Projects) might reject them. + Policy *Policy `json:"policy,omitempty"` - // ForceSendFields is a list of field names (e.g. "Name") to + // ForceSendFields is a list of field names (e.g. "Bindings") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -21739,8 +22223,8 @@ type RouterNatSubnetworkToNat struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Name") to include in API - // requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Bindings") to include in + // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -21748,27 +22232,31 @@ type RouterNatSubnetworkToNat struct { NullFields []string `json:"-"` } -func (s *RouterNatSubnetworkToNat) MarshalJSON() ([]byte, error) { - type NoMethod RouterNatSubnetworkToNat +func (s *RegionSetPolicyRequest) MarshalJSON() ([]byte, error) { + type NoMethod RegionSetPolicyRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RouterStatus struct { - // BestRoutes: Best routes for this router's network. - BestRoutes []*Route `json:"bestRoutes,omitempty"` - - // BestRoutesForRouter: Best routes learned by this router. - BestRoutesForRouter []*Route `json:"bestRoutesForRouter,omitempty"` - - BgpPeerStatus []*RouterStatusBgpPeerStatus `json:"bgpPeerStatus,omitempty"` - - NatStatus []*RouterStatusNatStatus `json:"natStatus,omitempty"` +// ResourceCommitment: Commitment for a particular resource (a +// Commitment is composed of one or more of these). +type ResourceCommitment struct { + // Amount: The amount of the resource purchased (in a type-dependent + // unit, such as bytes). For vCPUs, this can just be an integer. For + // memory, this must be provided in MB. Memory must be a multiple of 256 + // MB, with up to 6.5GB of memory per every vCPU. + Amount int64 `json:"amount,omitempty,string"` - // Network: URI of the network to which this router belongs. - Network string `json:"network,omitempty"` + // Type: Type of resource for which this commitment applies. Possible + // values are VCPU and MEMORY + // + // Possible values: + // "MEMORY" + // "UNSPECIFIED" + // "VCPU" + Type string `json:"type,omitempty"` - // ForceSendFields is a list of field names (e.g. "BestRoutes") to + // ForceSendFields is a list of field names (e.g. "Amount") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -21776,8 +22264,8 @@ type RouterStatus struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "BestRoutes") to include in - // API requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Amount") to include in API + // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -21785,50 +22273,18 @@ type RouterStatus struct { NullFields []string `json:"-"` } -func (s *RouterStatus) MarshalJSON() ([]byte, error) { - type NoMethod RouterStatus +func (s *ResourceCommitment) MarshalJSON() ([]byte, error) { + type NoMethod ResourceCommitment raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RouterStatusBgpPeerStatus struct { - // AdvertisedRoutes: Routes that were advertised to the remote BGP peer - AdvertisedRoutes []*Route `json:"advertisedRoutes,omitempty"` - - // IpAddress: IP address of the local BGP interface. - IpAddress string `json:"ipAddress,omitempty"` - - // LinkedVpnTunnel: URL of the VPN tunnel that this BGP peer controls. - LinkedVpnTunnel string `json:"linkedVpnTunnel,omitempty"` - - // Name: Name of this BGP peer. Unique within the Routers resource. - Name string `json:"name,omitempty"` - - // NumLearnedRoutes: Number of routes learned from the remote BGP Peer. - NumLearnedRoutes int64 `json:"numLearnedRoutes,omitempty"` - - // PeerIpAddress: IP address of the remote BGP interface. - PeerIpAddress string `json:"peerIpAddress,omitempty"` - - // State: BGP state as specified in RFC1771. - State string `json:"state,omitempty"` - - // Status: Status of the BGP peer: {UP, DOWN} - // - // Possible values: - // "DOWN" - // "UNKNOWN" - // "UP" - Status string `json:"status,omitempty"` - - // Uptime: Time this session has been up. Format: 14 years, 51 weeks, 6 - // days, 23 hours, 59 minutes, 59 seconds - Uptime string `json:"uptime,omitempty"` - - // UptimeSeconds: Time this session has been up, in seconds. Format: 145 - UptimeSeconds string `json:"uptimeSeconds,omitempty"` +type ResourceGroupReference struct { + // Group: A URI referencing one of the instance groups or network + // endpoint groups listed in the backend service. + Group string `json:"group,omitempty"` - // ForceSendFields is a list of field names (e.g. "AdvertisedRoutes") to + // ForceSendFields is a list of field names (e.g. "Group") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -21836,50 +22292,126 @@ type RouterStatusBgpPeerStatus struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AdvertisedRoutes") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "Group") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *RouterStatusBgpPeerStatus) MarshalJSON() ([]byte, error) { - type NoMethod RouterStatusBgpPeerStatus +func (s *ResourceGroupReference) MarshalJSON() ([]byte, error) { + type NoMethod ResourceGroupReference raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RouterStatusNatStatus: Status of a NAT contained in this router. -type RouterStatusNatStatus struct { - // AutoAllocatedNatIps: A list of IPs auto-allocated for NAT. Example: - // ["1.1.1.1", "129.2.16.89"] - AutoAllocatedNatIps []string `json:"autoAllocatedNatIps,omitempty"` +// Route: Represents a Route resource. A route specifies how certain +// packets should be handled by the network. Routes are associated with +// instances by tags and the set of routes for a particular instance is +// called its routing table. +// +// For each packet leaving an instance, the system searches that +// instance's routing table for a single best matching route. Routes +// match packets by destination IP address, preferring smaller or more +// specific ranges over larger ones. If there is a tie, the system +// selects the route with the smallest priority value. If there is still +// a tie, it uses the layer three and four packet headers to select just +// one of the remaining matching routes. The packet is then forwarded as +// specified by the nextHop field of the winning route - either to +// another instance destination, an instance gateway, or a Google +// Compute Engine-operated gateway. +// +// Packets that do not match any route in the sending instance's routing +// table are dropped. (== resource_for beta.routes ==) (== resource_for +// v1.routes ==) +type Route struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` - // MinExtraNatIpsNeeded: The number of extra IPs to allocate. This will - // be greater than 0 only if user-specified IPs are NOT enough to allow - // all configured VMs to use NAT. This value is meaningful only when - // auto-allocation of NAT IPs is *not* used. - MinExtraNatIpsNeeded int64 `json:"minExtraNatIpsNeeded,omitempty"` + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` - // Name: Unique name of this NAT. + // DestRange: The destination range of outgoing packets that this route + // applies to. Only IPv4 is supported. + DestRange string `json:"destRange,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of this resource. Always compute#routes for + // Route resources. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. Name string `json:"name,omitempty"` - // NumVmEndpointsWithNatMappings: Number of VM endpoints (i.e., Nics) - // that can use NAT. - NumVmEndpointsWithNatMappings int64 `json:"numVmEndpointsWithNatMappings,omitempty"` + // Network: Fully-qualified URL of the network that this route applies + // to. + Network string `json:"network,omitempty"` - // UserAllocatedNatIpResources: A list of fully qualified URLs of - // reserved IP address resources. - UserAllocatedNatIpResources []string `json:"userAllocatedNatIpResources,omitempty"` + // NextHopGateway: The URL to a gateway that should handle matching + // packets. You can only specify the internet gateway using a full or + // partial valid URL: + // projects//global/gateways/default-internet-gateway + NextHopGateway string `json:"nextHopGateway,omitempty"` - // UserAllocatedNatIps: A list of IPs user-allocated for NAT. They will - // be raw IP strings like "179.12.26.133". - UserAllocatedNatIps []string `json:"userAllocatedNatIps,omitempty"` + // NextHopInstance: The URL to an instance that should handle matching + // packets. You can specify this as a full or partial URL. For + // example: + // https://www.googleapis.com/compute/v1/projects/project/zones/ + // zone/instances/ + NextHopInstance string `json:"nextHopInstance,omitempty"` - // ForceSendFields is a list of field names (e.g. "AutoAllocatedNatIps") + // NextHopIp: The network IP address of an instance that should handle + // matching packets. Only IPv4 is supported. + NextHopIp string `json:"nextHopIp,omitempty"` + + // NextHopNetwork: The URL of the local network if it should handle + // matching packets. + NextHopNetwork string `json:"nextHopNetwork,omitempty"` + + // NextHopPeering: [Output Only] The network peering name that should + // handle matching packets, which should conform to RFC1035. + NextHopPeering string `json:"nextHopPeering,omitempty"` + + // NextHopVpnTunnel: The URL to a VpnTunnel that should handle matching + // packets. + NextHopVpnTunnel string `json:"nextHopVpnTunnel,omitempty"` + + // Priority: The priority of this route. Priority is used to break ties + // in cases where there is more than one matching route of equal prefix + // length. In the case of two routes with equal prefix length, the one + // with the lowest-numbered priority value wins. Default value is 1000. + // Valid range is 0 through 65535. + Priority int64 `json:"priority,omitempty"` + + // SelfLink: [Output Only] Server-defined fully-qualified URL for this + // resource. + SelfLink string `json:"selfLink,omitempty"` + + // Tags: A list of instance tags to which this route applies. + Tags []string `json:"tags,omitempty"` + + // Warnings: [Output Only] If potential misconfigurations are detected + // for this route, this field will be populated with warning messages. + Warnings []*RouteWarnings `json:"warnings,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") // to unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -21887,7 +22419,7 @@ type RouterStatusNatStatus struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AutoAllocatedNatIps") to + // NullFields is a list of field names (e.g. "CreationTimestamp") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -21897,23 +22429,53 @@ type RouterStatusNatStatus struct { NullFields []string `json:"-"` } -func (s *RouterStatusNatStatus) MarshalJSON() ([]byte, error) { - type NoMethod RouterStatusNatStatus +func (s *Route) MarshalJSON() ([]byte, error) { + type NoMethod Route raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RouterStatusResponse struct { - // Kind: Type of resource. - Kind string `json:"kind,omitempty"` +type RouteWarnings struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` - Result *RouterStatus `json:"result,omitempty"` + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*RouteWarningsData `json:"data,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` - // ForceSendFields is a list of field names (e.g. "Kind") to + // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -21921,7 +22483,7 @@ type RouterStatusResponse struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Kind") to include in API + // NullFields is a list of field names (e.g. "Code") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -21930,21 +22492,27 @@ type RouterStatusResponse struct { NullFields []string `json:"-"` } -func (s *RouterStatusResponse) MarshalJSON() ([]byte, error) { - type NoMethod RouterStatusResponse +func (s *RouteWarnings) MarshalJSON() ([]byte, error) { + type NoMethod RouteWarnings raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RoutersPreviewResponse struct { - // Resource: Preview of given router. - Resource *Router `json:"resource,omitempty"` +type RouteWarningsData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` - // ForceSendFields is a list of field names (e.g. "Resource") to + // ForceSendFields is a list of field names (e.g. "Key") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -21952,8 +22520,8 @@ type RoutersPreviewResponse struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Resource") to include in - // API requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -21961,21 +22529,43 @@ type RoutersPreviewResponse struct { NullFields []string `json:"-"` } -func (s *RoutersPreviewResponse) MarshalJSON() ([]byte, error) { - type NoMethod RoutersPreviewResponse +func (s *RouteWarningsData) MarshalJSON() ([]byte, error) { + type NoMethod RouteWarningsData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RoutersScopedList struct { - // Routers: A list of routers contained in this scope. - Routers []*Router `json:"routers,omitempty"` +// RouteList: Contains a list of Route resources. +type RouteList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` - // Warning: Informational warning which replaces the list of routers - // when the list is empty. - Warning *RoutersScopedListWarning `json:"warning,omitempty"` + // Items: A list of Route resources. + Items []*Route `json:"items,omitempty"` - // ForceSendFields is a list of field names (e.g. "Routers") to + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *RouteListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -21983,8 +22573,8 @@ type RoutersScopedList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Routers") to include in - // API requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -21992,15 +22582,14 @@ type RoutersScopedList struct { NullFields []string `json:"-"` } -func (s *RoutersScopedList) MarshalJSON() ([]byte, error) { - type NoMethod RoutersScopedList +func (s *RouteList) MarshalJSON() ([]byte, error) { + type NoMethod RouteList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RoutersScopedListWarning: Informational warning which replaces the -// list of routers when the list is empty. -type RoutersScopedListWarning struct { +// RouteListWarning: [Output Only] Informational warning message. +type RouteListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -22034,7 +22623,7 @@ type RoutersScopedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*RoutersScopedListWarningData `json:"data,omitempty"` + Data []*RouteListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -22057,13 +22646,13 @@ type RoutersScopedListWarning struct { NullFields []string `json:"-"` } -func (s *RoutersScopedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod RoutersScopedListWarning +func (s *RouteListWarning) MarshalJSON() ([]byte, error) { + type NoMethod RouteListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RoutersScopedListWarningData struct { +type RouteListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -22094,103 +22683,72 @@ type RoutersScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *RoutersScopedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod RoutersScopedListWarningData +func (s *RouteListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod RouteListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Rule: A rule to be applied in a Policy. -type Rule struct { - // Action: Required - // - // Possible values: - // "ALLOW" - // "ALLOW_WITH_LOG" - // "DENY" - // "DENY_WITH_LOG" - // "LOG" - // "NO_ACTION" - Action string `json:"action,omitempty"` - - // Conditions: Additional restrictions that must be met. All conditions - // must pass for the rule to match. - Conditions []*Condition `json:"conditions,omitempty"` - - // Description: Human-readable description of the rule. - Description string `json:"description,omitempty"` +// Router: Router resource. +type Router struct { + // Bgp: BGP information specific to this router. + Bgp *RouterBgp `json:"bgp,omitempty"` - // Ins: If one or more 'in' clauses are specified, the rule matches if - // the PRINCIPAL/AUTHORITY_SELECTOR is in at least one of these entries. - Ins []string `json:"ins,omitempty"` + // BgpPeers: BGP information that must be configured into the routing + // stack to establish BGP peering. This information must specify the + // peer ASN and either the interface name, IP address, or peer IP + // address. Please refer to RFC4273. + BgpPeers []*RouterBgpPeer `json:"bgpPeers,omitempty"` - // LogConfigs: The config returned to callers of - // tech.iam.IAM.CheckPolicy for any entries that match the LOG action. - LogConfigs []*LogConfig `json:"logConfigs,omitempty"` + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` - // NotIns: If one or more 'not_in' clauses are specified, the rule - // matches if the PRINCIPAL/AUTHORITY_SELECTOR is in none of the - // entries. - NotIns []string `json:"notIns,omitempty"` + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` - // Permissions: A permission is a string of form '..' (e.g., - // 'storage.buckets.list'). A value of '*' matches all permissions, and - // a verb part of '*' (e.g., 'storage.buckets.*') matches all verbs. - Permissions []string `json:"permissions,omitempty"` + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` - // ForceSendFields is a list of field names (e.g. "Action") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` + // Interfaces: Router interfaces. Each interface requires either one + // linked resource, (for example, linkedVpnTunnel), or IP address and IP + // address range (for example, ipRange), or both. + Interfaces []*RouterInterface `json:"interfaces,omitempty"` - // NullFields is a list of field names (e.g. "Action") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} + // Kind: [Output Only] Type of resource. Always compute#router for + // routers. + Kind string `json:"kind,omitempty"` -func (s *Rule) MarshalJSON() ([]byte, error) { - type NoMethod Rule - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` -type SSLHealthCheck struct { - // Port: The TCP port number for the health check request. The default - // value is 443. Valid values are 1 through 65535. - Port int64 `json:"port,omitempty"` + // Nats: A list of NAT services created in this router. + Nats []*RouterNat `json:"nats,omitempty"` - // PortName: Port name as defined in InstanceGroup#NamedPort#name. If - // both port and port_name are defined, port takes precedence. - PortName string `json:"portName,omitempty"` + // Network: URI of the network to which this router belongs. + Network string `json:"network,omitempty"` - // ProxyHeader: Specifies the type of proxy header to append before - // sending data to the backend, either NONE or PROXY_V1. The default is - // NONE. - // - // Possible values: - // "NONE" - // "PROXY_V1" - ProxyHeader string `json:"proxyHeader,omitempty"` + // Region: [Output Only] URI of the region where the router resides. You + // must specify this field as part of the HTTP request URL. It is not + // settable as a field in the request body. + Region string `json:"region,omitempty"` - // Request: The application data to send once the SSL connection has - // been established (default value is empty). If both request and - // response are empty, the connection establishment alone will indicate - // health. The request data can only be ASCII. - Request string `json:"request,omitempty"` + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` - // Response: The bytes to match against the beginning of the response - // data. If left empty (the default value), any response will indicate - // health. The response data can only be ASCII. - Response string `json:"response,omitempty"` + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Port") to + // ForceSendFields is a list of field names (e.g. "Bgp") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -22198,7 +22756,7 @@ type SSLHealthCheck struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Port") to include in API + // NullFields is a list of field names (e.g. "Bgp") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -22207,85 +22765,23 @@ type SSLHealthCheck struct { NullFields []string `json:"-"` } -func (s *SSLHealthCheck) MarshalJSON() ([]byte, error) { - type NoMethod SSLHealthCheck - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Scheduling: Sets the scheduling options for an Instance. -type Scheduling struct { - // AutomaticRestart: Specifies whether the instance should be - // automatically restarted if it is terminated by Compute Engine (not - // terminated by a user). You can only set the automatic restart option - // for standard instances. Preemptible instances cannot be automatically - // restarted. - // - // By default, this is set to true so an instance is automatically - // restarted if it is terminated by Compute Engine. - AutomaticRestart *bool `json:"automaticRestart,omitempty"` - - // NodeAffinities: A set of node affinity and anti-affinity. - NodeAffinities []*SchedulingNodeAffinity `json:"nodeAffinities,omitempty"` - - // OnHostMaintenance: Defines the maintenance behavior for this - // instance. For standard instances, the default behavior is MIGRATE. - // For preemptible instances, the default and only possible behavior is - // TERMINATE. For more information, see Setting Instance Scheduling - // Options. - // - // Possible values: - // "MIGRATE" - // "TERMINATE" - OnHostMaintenance string `json:"onHostMaintenance,omitempty"` - - // Preemptible: Defines whether the instance is preemptible. This can - // only be set during instance creation, it cannot be set or changed - // after the instance has been created. - Preemptible bool `json:"preemptible,omitempty"` - - // ForceSendFields is a list of field names (e.g. "AutomaticRestart") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "AutomaticRestart") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *Scheduling) MarshalJSON() ([]byte, error) { - type NoMethod Scheduling +func (s *Router) MarshalJSON() ([]byte, error) { + type NoMethod Router raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SchedulingNodeAffinity: Node Affinity: the configuration of desired -// nodes onto which this Instance could be scheduled. -type SchedulingNodeAffinity struct { - // Key: Corresponds to the label key of Node resource. - Key string `json:"key,omitempty"` - - // Operator: Defines the operation of node selection. - // - // Possible values: - // "IN" - // "NOT_IN" - // "OPERATOR_UNSPECIFIED" - Operator string `json:"operator,omitempty"` +// RouterAdvertisedIpRange: Description-tagged IP ranges for the router +// to advertise. +type RouterAdvertisedIpRange struct { + // Description: User-specified description for the IP range. + Description string `json:"description,omitempty"` - // Values: Corresponds to the label values of Node resource. - Values []string `json:"values,omitempty"` + // Range: The IP range to advertise. The value must be a CIDR-formatted + // string. + Range string `json:"range,omitempty"` - // ForceSendFields is a list of field names (e.g. "Key") to + // ForceSendFields is a list of field names (e.g. "Description") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -22293,110 +22789,31 @@ type SchedulingNodeAffinity struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Key") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Description") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *SchedulingNodeAffinity) MarshalJSON() ([]byte, error) { - type NoMethod SchedulingNodeAffinity - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// SecurityPolicy: A security policy is comprised of one or more rules. -// It can also be associated with one or more 'targets'. (== -// resource_for v1.securityPolicies ==) (== resource_for -// beta.securityPolicies ==) -type SecurityPolicy struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // Fingerprint: Specifies a fingerprint for this resource, which is - // essentially a hash of the metadata's contents and used for optimistic - // locking. The fingerprint is initially generated by Compute Engine and - // changes after every request to modify or update metadata. You must - // always provide an up-to-date fingerprint hash in order to update or - // change metadata, otherwise the request will fail with error 412 - // conditionNotMet. - // - // To see the latest fingerprint, make get() request to the security - // policy. - Fingerprint string `json:"fingerprint,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output only] Type of the resource. Always - // compute#securityPolicyfor security policies - Kind string `json:"kind,omitempty"` - - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // Rules: A list of rules that belong to this policy. There must always - // be a default rule (rule with priority 2147483647 and match "*"). If - // no rules are provided when creating a security policy, a default rule - // with action "allow" will be added. - Rules []*SecurityPolicyRule `json:"rules,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *SecurityPolicy) MarshalJSON() ([]byte, error) { - type NoMethod SecurityPolicy +func (s *RouterAdvertisedIpRange) MarshalJSON() ([]byte, error) { + type NoMethod RouterAdvertisedIpRange raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SecurityPolicyList struct { +// RouterAggregatedList: Contains a list of routers. +type RouterAggregatedList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of SecurityPolicy resources. - Items []*SecurityPolicy `json:"items,omitempty"` + // Items: A list of Router resources. + Items map[string]RoutersScopedList `json:"items,omitempty"` - // Kind: [Output Only] Type of resource. Always - // compute#securityPolicyList for listsof securityPolicies + // Kind: Type of resource. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -22407,8 +22824,11 @@ type SecurityPolicyList struct { // the results. NextPageToken string `json:"nextPageToken,omitempty"` + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. - Warning *SecurityPolicyListWarning `json:"warning,omitempty"` + Warning *RouterAggregatedListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -22431,15 +22851,15 @@ type SecurityPolicyList struct { NullFields []string `json:"-"` } -func (s *SecurityPolicyList) MarshalJSON() ([]byte, error) { - type NoMethod SecurityPolicyList +func (s *RouterAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod RouterAggregatedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SecurityPolicyListWarning: [Output Only] Informational warning +// RouterAggregatedListWarning: [Output Only] Informational warning // message. -type SecurityPolicyListWarning struct { +type RouterAggregatedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -22473,7 +22893,7 @@ type SecurityPolicyListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*SecurityPolicyListWarningData `json:"data,omitempty"` + Data []*RouterAggregatedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -22496,13 +22916,13 @@ type SecurityPolicyListWarning struct { NullFields []string `json:"-"` } -func (s *SecurityPolicyListWarning) MarshalJSON() ([]byte, error) { - type NoMethod SecurityPolicyListWarning +func (s *RouterAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod RouterAggregatedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SecurityPolicyListWarningData struct { +type RouterAggregatedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -22533,16 +22953,46 @@ type SecurityPolicyListWarningData struct { NullFields []string `json:"-"` } -func (s *SecurityPolicyListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod SecurityPolicyListWarningData +func (s *RouterAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod RouterAggregatedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SecurityPolicyReference struct { - SecurityPolicy string `json:"securityPolicy,omitempty"` +type RouterBgp struct { + // AdvertiseMode: User-specified flag to indicate which mode to use for + // advertisement. The options are DEFAULT or CUSTOM. + // + // Possible values: + // "CUSTOM" + // "DEFAULT" + AdvertiseMode string `json:"advertiseMode,omitempty"` - // ForceSendFields is a list of field names (e.g. "SecurityPolicy") to + // AdvertisedGroups: User-specified list of prefix groups to advertise + // in custom mode. This field can only be populated if advertise_mode is + // CUSTOM and is advertised to all peers of the router. These groups + // will be advertised in addition to any specified prefixes. Leave this + // field blank to advertise no custom groups. + // + // Possible values: + // "ALL_SUBNETS" + AdvertisedGroups []string `json:"advertisedGroups,omitempty"` + + // AdvertisedIpRanges: User-specified list of individual IP ranges to + // advertise in custom mode. This field can only be populated if + // advertise_mode is CUSTOM and is advertised to all peers of the + // router. These IP ranges will be advertised in addition to any + // specified groups. Leave this field blank to advertise no custom IP + // ranges. + AdvertisedIpRanges []*RouterAdvertisedIpRange `json:"advertisedIpRanges,omitempty"` + + // Asn: Local BGP Autonomous System Number (ASN). Must be an RFC6996 + // private ASN, either 16-bit or 32-bit. The value will be fixed for + // this router resource. All VPN tunnels that link to this router will + // have the same local ASN. + Asn int64 `json:"asn,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AdvertiseMode") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -22550,124 +23000,93 @@ type SecurityPolicyReference struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "SecurityPolicy") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "AdvertiseMode") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *SecurityPolicyReference) MarshalJSON() ([]byte, error) { - type NoMethod SecurityPolicyReference +func (s *RouterBgp) MarshalJSON() ([]byte, error) { + type NoMethod RouterBgp raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SecurityPolicyRule: Represents a rule that describes one or more -// match conditions along with the action to be taken when traffic -// matches this condition (allow or deny). -type SecurityPolicyRule struct { - // Action: The Action to preform when the client connection triggers the - // rule. Can currently be either "allow" or "deny()" where valid values - // for status are 403, 404, and 502. - Action string `json:"action,omitempty"` +type RouterBgpPeer struct { + // AdvertiseMode: User-specified flag to indicate which mode to use for + // advertisement. + // + // Possible values: + // "CUSTOM" + // "DEFAULT" + AdvertiseMode string `json:"advertiseMode,omitempty"` - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` + // AdvertisedGroups: User-specified list of prefix groups to advertise + // in custom mode, which can take one of the following options: + // - ALL_SUBNETS: Advertises all available subnets, including peer VPC + // subnets. + // - ALL_VPC_SUBNETS: Advertises the router's own VPC subnets. + // - ALL_PEER_VPC_SUBNETS: Advertises peer subnets of the router's VPC + // network. Note that this field can only be populated if advertise_mode + // is CUSTOM and overrides the list defined for the router (in the "bgp" + // message). These groups are advertised in addition to any specified + // prefixes. Leave this field blank to advertise no custom groups. + // + // Possible values: + // "ALL_SUBNETS" + AdvertisedGroups []string `json:"advertisedGroups,omitempty"` - // Kind: [Output only] Type of the resource. Always - // compute#securityPolicyRule for security policy rules - Kind string `json:"kind,omitempty"` + // AdvertisedIpRanges: User-specified list of individual IP ranges to + // advertise in custom mode. This field can only be populated if + // advertise_mode is CUSTOM and overrides the list defined for the + // router (in the "bgp" message). These IP ranges are advertised in + // addition to any specified groups. Leave this field blank to advertise + // no custom IP ranges. + AdvertisedIpRanges []*RouterAdvertisedIpRange `json:"advertisedIpRanges,omitempty"` - // Match: A match condition that incoming traffic is evaluated against. - // If it evaluates to true, the corresponding ?action? is enforced. - Match *SecurityPolicyRuleMatcher `json:"match,omitempty"` + // AdvertisedRoutePriority: The priority of routes advertised to this + // BGP peer. Where there is more than one matching route of maximum + // length, the routes with the lowest priority value win. + AdvertisedRoutePriority int64 `json:"advertisedRoutePriority,omitempty"` - // Preview: If set to true, the specified action is not enforced. - Preview bool `json:"preview,omitempty"` + // InterfaceName: Name of the interface the BGP peer is associated with. + InterfaceName string `json:"interfaceName,omitempty"` - // Priority: An integer indicating the priority of a rule in the list. - // The priority must be a positive value between 0 and 2147483647. Rules - // are evaluated from highest to lowest priority where 0 is the highest - // priority and 2147483647 is the lowest prority. - Priority int64 `json:"priority,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Action") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Action") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *SecurityPolicyRule) MarshalJSON() ([]byte, error) { - type NoMethod SecurityPolicyRule - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// SecurityPolicyRuleMatcher: Represents a match condition that incoming -// traffic is evaluated against. Exactly one field must be specified. -type SecurityPolicyRuleMatcher struct { - // Config: The configuration options available when specifying - // versioned_expr. This field must be specified if versioned_expr is - // specified and cannot be specified if versioned_expr is not specified. - Config *SecurityPolicyRuleMatcherConfig `json:"config,omitempty"` + // IpAddress: IP address of the interface inside Google Cloud Platform. + // Only IPv4 is supported. + IpAddress string `json:"ipAddress,omitempty"` - // VersionedExpr: Preconfigured versioned expression. If this field is - // specified, config must also be specified. Available preconfigured - // expressions along with their requirements are: SRC_IPS_V1 - must - // specify the corresponding src_ip_range field in config. + // ManagementType: [Output Only] The resource that configures and + // manages this BGP peer. + // - MANAGED_BY_USER is the default value and can be managed by you or + // other users + // - MANAGED_BY_ATTACHMENT is a BGP peer that is configured and managed + // by Cloud Interconnect, specifically by an InterconnectAttachment of + // type PARTNER. Google automatically creates, updates, and deletes this + // type of BGP peer when the PARTNER InterconnectAttachment is created, + // updated, or deleted. // // Possible values: - // "SRC_IPS_V1" - VersionedExpr string `json:"versionedExpr,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Config") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` + // "MANAGED_BY_ATTACHMENT" + // "MANAGED_BY_USER" + ManagementType string `json:"managementType,omitempty"` - // NullFields is a list of field names (e.g. "Config") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} + // Name: Name of this BGP peer. The name must be 1-63 characters long + // and comply with RFC1035. + Name string `json:"name,omitempty"` -func (s *SecurityPolicyRuleMatcher) MarshalJSON() ([]byte, error) { - type NoMethod SecurityPolicyRuleMatcher - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // PeerAsn: Peer BGP Autonomous System Number (ASN). Each BGP interface + // may use a different value. + PeerAsn int64 `json:"peerAsn,omitempty"` -type SecurityPolicyRuleMatcherConfig struct { - // SrcIpRanges: CIDR IP address range. - SrcIpRanges []string `json:"srcIpRanges,omitempty"` + // PeerIpAddress: IP address of the BGP interface outside Google Cloud + // Platform. Only IPv4 is supported. + PeerIpAddress string `json:"peerIpAddress,omitempty"` - // ForceSendFields is a list of field names (e.g. "SrcIpRanges") to + // ForceSendFields is a list of field names (e.g. "AdvertiseMode") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -22675,7 +23094,7 @@ type SecurityPolicyRuleMatcherConfig struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "SrcIpRanges") to include + // NullFields is a list of field names (e.g. "AdvertiseMode") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as @@ -22684,111 +23103,52 @@ type SecurityPolicyRuleMatcherConfig struct { NullFields []string `json:"-"` } -func (s *SecurityPolicyRuleMatcherConfig) MarshalJSON() ([]byte, error) { - type NoMethod SecurityPolicyRuleMatcherConfig - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// SerialPortOutput: An instance's serial console output. -type SerialPortOutput struct { - // Contents: [Output Only] The contents of the console output. - Contents string `json:"contents,omitempty"` - - // Kind: [Output Only] Type of the resource. Always - // compute#serialPortOutput for serial port output. - Kind string `json:"kind,omitempty"` - - // Next: [Output Only] The position of the next byte of content from the - // serial console output. Use this value in the next request as the - // start parameter. - Next int64 `json:"next,omitempty,string"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // Start: The starting byte position of the output that was returned. - // This should match the start parameter sent with the request. If the - // serial console output exceeds the size of the buffer, older output - // will be overwritten by newer content and the start values will be - // mismatched. - Start int64 `json:"start,omitempty,string"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Contents") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Contents") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *SerialPortOutput) MarshalJSON() ([]byte, error) { - type NoMethod SerialPortOutput +func (s *RouterBgpPeer) MarshalJSON() ([]byte, error) { + type NoMethod RouterBgpPeer raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ServiceAccount: A service account. -type ServiceAccount struct { - // Email: Email address of the service account. - Email string `json:"email,omitempty"` - - // Scopes: The list of scopes to be made available for this service - // account. - Scopes []string `json:"scopes,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Email") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` +type RouterInterface struct { + // IpRange: IP address and range of the interface. The IP range must be + // in the RFC3927 link-local IP address space. The value must be a + // CIDR-formatted string, for example: 169.254.0.1/30. NOTE: Do not + // truncate the address as it represents the IP address of the + // interface. + IpRange string `json:"ipRange,omitempty"` - // NullFields is a list of field names (e.g. "Email") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} + // LinkedInterconnectAttachment: URI of the linked Interconnect + // attachment. It must be in the same region as the router. Each + // interface can have one linked resource, which can be either be a VPN + // tunnel or an Interconnect attachment. + LinkedInterconnectAttachment string `json:"linkedInterconnectAttachment,omitempty"` -func (s *ServiceAccount) MarshalJSON() ([]byte, error) { - type NoMethod ServiceAccount - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // LinkedVpnTunnel: URI of the linked VPN tunnel, which must be in the + // same region as the router. Each interface can have one linked + // resource, which can be either a VPN tunnel or an Interconnect + // attachment. + LinkedVpnTunnel string `json:"linkedVpnTunnel,omitempty"` -// SignedUrlKey: Represents a customer-supplied Signing Key used by -// Cloud CDN Signed URLs -type SignedUrlKey struct { - // KeyName: Name of the key. The name must be 1-63 characters long, and - // comply with RFC1035. Specifically, the name must be 1-63 characters - // long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` - // which means the first character must be a lowercase letter, and all - // following characters must be a dash, lowercase letter, or digit, - // except the last character, which cannot be a dash. - KeyName string `json:"keyName,omitempty"` + // ManagementType: [Output Only] The resource that configures and + // manages this interface. + // - MANAGED_BY_USER is the default value and can be managed directly by + // users. + // - MANAGED_BY_ATTACHMENT is an interface that is configured and + // managed by Cloud Interconnect, specifically, by an + // InterconnectAttachment of type PARTNER. Google automatically creates, + // updates, and deletes this type of interface when the PARTNER + // InterconnectAttachment is created, updated, or deleted. + // + // Possible values: + // "MANAGED_BY_ATTACHMENT" + // "MANAGED_BY_USER" + ManagementType string `json:"managementType,omitempty"` - // KeyValue: 128-bit key value used for signing the URL. The key value - // must be a valid RFC 4648 Section 5 base64url encoded string. - KeyValue string `json:"keyValue,omitempty"` + // Name: Name of this interface entry. The name must be 1-63 characters + // long and comply with RFC1035. + Name string `json:"name,omitempty"` - // ForceSendFields is a list of field names (e.g. "KeyName") to + // ForceSendFields is a list of field names (e.g. "IpRange") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -22796,7 +23156,7 @@ type SignedUrlKey struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "KeyName") to include in + // NullFields is a list of field names (e.g. "IpRange") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -22805,167 +23165,23 @@ type SignedUrlKey struct { NullFields []string `json:"-"` } -func (s *SignedUrlKey) MarshalJSON() ([]byte, error) { - type NoMethod SignedUrlKey - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Snapshot: A persistent disk snapshot resource. (== resource_for -// beta.snapshots ==) (== resource_for v1.snapshots ==) -type Snapshot struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // DiskSizeGb: [Output Only] Size of the snapshot, specified in GB. - DiskSizeGb int64 `json:"diskSizeGb,omitempty,string"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] Type of the resource. Always compute#snapshot for - // Snapshot resources. - Kind string `json:"kind,omitempty"` - - // LabelFingerprint: A fingerprint for the labels being applied to this - // snapshot, which is essentially a hash of the labels set used for - // optimistic locking. The fingerprint is initially generated by Compute - // Engine and changes after every request to modify or update labels. - // You must always provide an up-to-date fingerprint hash in order to - // update or change labels, otherwise the request will fail with error - // 412 conditionNotMet. - // - // To see the latest fingerprint, make a get() request to retrieve a - // snapshot. - LabelFingerprint string `json:"labelFingerprint,omitempty"` - - // Labels: Labels to apply to this snapshot. These can be later modified - // by the setLabels method. Label values may be empty. - Labels map[string]string `json:"labels,omitempty"` - - // LicenseCodes: [Output Only] Integer license codes indicating which - // licenses are attached to this snapshot. - LicenseCodes googleapi.Int64s `json:"licenseCodes,omitempty"` - - // Licenses: [Output Only] A list of public visible licenses that apply - // to this snapshot. This can be because the original image had licenses - // attached (such as a Windows image). - Licenses []string `json:"licenses,omitempty"` - - // Name: Name of the resource; provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // SnapshotEncryptionKey: Encrypts the snapshot using a - // customer-supplied encryption key. - // - // After you encrypt a snapshot using a customer-supplied key, you must - // provide the same key if you use the image later For example, you must - // provide the encryption key when you create a disk from the encrypted - // snapshot in a future request. - // - // Customer-supplied encryption keys do not protect access to metadata - // of the disk. - // - // If you do not provide an encryption key when creating the snapshot, - // then the snapshot will be encrypted using an automatically generated - // key and you do not need to provide a key to use the snapshot later. - SnapshotEncryptionKey *CustomerEncryptionKey `json:"snapshotEncryptionKey,omitempty"` - - // SourceDisk: [Output Only] The source disk used to create this - // snapshot. - SourceDisk string `json:"sourceDisk,omitempty"` - - // SourceDiskEncryptionKey: The customer-supplied encryption key of the - // source disk. Required if the source disk is protected by a - // customer-supplied encryption key. - SourceDiskEncryptionKey *CustomerEncryptionKey `json:"sourceDiskEncryptionKey,omitempty"` - - // SourceDiskId: [Output Only] The ID value of the disk used to create - // this snapshot. This value may be used to determine whether the - // snapshot was taken from the current or a previous instance of a given - // disk name. - SourceDiskId string `json:"sourceDiskId,omitempty"` - - // Status: [Output Only] The status of the snapshot. This can be - // CREATING, DELETING, FAILED, READY, or UPLOADING. - // - // Possible values: - // "CREATING" - // "DELETING" - // "FAILED" - // "READY" - // "UPLOADING" - Status string `json:"status,omitempty"` - - // StorageBytes: [Output Only] A size of the storage used by the - // snapshot. As snapshots share storage, this number is expected to - // change with snapshot creation/deletion. - StorageBytes int64 `json:"storageBytes,omitempty,string"` - - // StorageBytesStatus: [Output Only] An indicator whether storageBytes - // is in a stable state or it is being adjusted as a result of shared - // storage reallocation. This status can either be UPDATING, meaning the - // size of the snapshot is being updated, or UP_TO_DATE, meaning the - // size of the snapshot is up-to-date. - // - // Possible values: - // "UPDATING" - // "UP_TO_DATE" - StorageBytesStatus string `json:"storageBytesStatus,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *Snapshot) MarshalJSON() ([]byte, error) { - type NoMethod Snapshot +func (s *RouterInterface) MarshalJSON() ([]byte, error) { + type NoMethod RouterInterface raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SnapshotList: Contains a list of Snapshot resources. -type SnapshotList struct { +// RouterList: Contains a list of Router resources. +type RouterList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of Snapshot resources. - Items []*Snapshot `json:"items,omitempty"` + // Items: A list of Router resources. + Items []*Router `json:"items,omitempty"` - // Kind: Type of resource. + // Kind: [Output Only] Type of resource. Always compute#router for + // routers. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -22980,7 +23196,7 @@ type SnapshotList struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *SnapshotListWarning `json:"warning,omitempty"` + Warning *RouterListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -23003,14 +23219,14 @@ type SnapshotList struct { NullFields []string `json:"-"` } -func (s *SnapshotList) MarshalJSON() ([]byte, error) { - type NoMethod SnapshotList +func (s *RouterList) MarshalJSON() ([]byte, error) { + type NoMethod RouterList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SnapshotListWarning: [Output Only] Informational warning message. -type SnapshotListWarning struct { +// RouterListWarning: [Output Only] Informational warning message. +type RouterListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -23044,7 +23260,7 @@ type SnapshotListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*SnapshotListWarningData `json:"data,omitempty"` + Data []*RouterListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -23067,13 +23283,13 @@ type SnapshotListWarning struct { NullFields []string `json:"-"` } -func (s *SnapshotListWarning) MarshalJSON() ([]byte, error) { - type NoMethod SnapshotListWarning +func (s *RouterListWarning) MarshalJSON() ([]byte, error) { + type NoMethod RouterListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SnapshotListWarningData struct { +type RouterListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -23104,92 +23320,180 @@ type SnapshotListWarningData struct { NullFields []string `json:"-"` } -func (s *SnapshotListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod SnapshotListWarningData +func (s *RouterListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod RouterListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SourceInstanceParams: A specification of the parameters to use when -// creating the instance template from a source instance. -type SourceInstanceParams struct { - // DiskConfigs: Attached disks configuration. If not provided, defaults - // are applied: For boot disk and any other R/W disks, new custom images - // will be created from each disk. For read-only disks, they will be - // attached in read-only mode. Local SSD disks will be created as blank - // volumes. - DiskConfigs []*DiskInstantiationConfig `json:"diskConfigs,omitempty"` +// RouterNat: Represents a Nat resource. It enables the VMs within the +// specified subnetworks to access Internet without external IP +// addresses. It specifies a list of subnetworks (and the ranges within) +// that want to use NAT. Customers can also provide the external IPs +// that would be used for NAT. GCP would auto-allocate ephemeral IPs if +// no external IPs are provided. +type RouterNat struct { + // IcmpIdleTimeoutSec: Timeout (in seconds) for ICMP connections. + // Defaults to 30s if not set. + IcmpIdleTimeoutSec int64 `json:"icmpIdleTimeoutSec,omitempty"` - // ForceSendFields is a list of field names (e.g. "DiskConfigs") to - // unconditionally include in API requests. By default, fields with + // LogConfig: Configure logging on this NAT. + LogConfig *RouterNatLogConfig `json:"logConfig,omitempty"` + + // MinPortsPerVm: Minimum number of ports allocated to a VM from this + // NAT config. If not set, a default number of ports is allocated to a + // VM. This is rounded up to the nearest power of 2. For example, if the + // value of this field is 50, at least 64 ports are allocated to a VM. + MinPortsPerVm int64 `json:"minPortsPerVm,omitempty"` + + // Name: Unique name of this Nat service. The name must be 1-63 + // characters long and comply with RFC1035. + Name string `json:"name,omitempty"` + + // NatIpAllocateOption: Specify the NatIpAllocateOption, which can take + // one of the following values: + // - MANUAL_ONLY: Uses only Nat IP addresses provided by customers. When + // there are not enough specified Nat IPs, the Nat service fails for new + // VMs. + // - AUTO_ONLY: Nat IPs are allocated by Google Cloud Platform; + // customers can't specify any Nat IPs. When choosing AUTO_ONLY, then + // nat_ip should be empty. + // + // Possible values: + // "AUTO_ONLY" + // "MANUAL_ONLY" + NatIpAllocateOption string `json:"natIpAllocateOption,omitempty"` + + // NatIps: A list of URLs of the IP resources used for this Nat service. + // These IP addresses must be valid static external IP addresses + // assigned to the project. + NatIps []string `json:"natIps,omitempty"` + + // SourceSubnetworkIpRangesToNat: Specify the Nat option, which can take + // one of the following values: + // - ALL_SUBNETWORKS_ALL_IP_RANGES: All of the IP ranges in every + // Subnetwork are allowed to Nat. + // - ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES: All of the primary IP ranges + // in every Subnetwork are allowed to Nat. + // - LIST_OF_SUBNETWORKS: A list of Subnetworks are allowed to Nat + // (specified in the field subnetwork below) The default is + // SUBNETWORK_IP_RANGE_TO_NAT_OPTION_UNSPECIFIED. Note that if this + // field contains ALL_SUBNETWORKS_ALL_IP_RANGES or + // ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES, then there should not be any + // other Router.Nat section in any Router for this network in this + // region. + // + // Possible values: + // "ALL_SUBNETWORKS_ALL_IP_RANGES" + // "ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES" + // "LIST_OF_SUBNETWORKS" + SourceSubnetworkIpRangesToNat string `json:"sourceSubnetworkIpRangesToNat,omitempty"` + + // Subnetworks: A list of Subnetwork resources whose traffic should be + // translated by NAT Gateway. It is used only when LIST_OF_SUBNETWORKS + // is selected for the SubnetworkIpRangeToNatOption above. + Subnetworks []*RouterNatSubnetworkToNat `json:"subnetworks,omitempty"` + + // TcpEstablishedIdleTimeoutSec: Timeout (in seconds) for TCP + // established connections. Defaults to 1200s if not set. + TcpEstablishedIdleTimeoutSec int64 `json:"tcpEstablishedIdleTimeoutSec,omitempty"` + + // TcpTransitoryIdleTimeoutSec: Timeout (in seconds) for TCP transitory + // connections. Defaults to 30s if not set. + TcpTransitoryIdleTimeoutSec int64 `json:"tcpTransitoryIdleTimeoutSec,omitempty"` + + // UdpIdleTimeoutSec: Timeout (in seconds) for UDP connections. Defaults + // to 30s if not set. + UdpIdleTimeoutSec int64 `json:"udpIdleTimeoutSec,omitempty"` + + // ForceSendFields is a list of field names (e.g. "IcmpIdleTimeoutSec") + // to unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "DiskConfigs") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "IcmpIdleTimeoutSec") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *SourceInstanceParams) MarshalJSON() ([]byte, error) { - type NoMethod SourceInstanceParams +func (s *RouterNat) MarshalJSON() ([]byte, error) { + type NoMethod RouterNat raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SslCertificate: An SslCertificate resource. This resource provides a -// mechanism to upload an SSL key and certificate to the load balancer -// to serve secure connections from the user. (== resource_for -// beta.sslCertificates ==) (== resource_for v1.sslCertificates ==) -type SslCertificate struct { - // Certificate: A local certificate file. The certificate must be in PEM - // format. The certificate chain must be no greater than 5 certs long. - // The chain must include at least one intermediate cert. - Certificate string `json:"certificate,omitempty"` - - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` +// RouterNatLogConfig: Configuration of logging on a NAT. +type RouterNatLogConfig struct { + // Enable: Indicates whether or not to export logs. This is false by + // default. + Enable bool `json:"enable,omitempty"` - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` + // Filter: Specifies the desired filtering of logs on this NAT. If + // unspecified, logs are exported for all connections handled by this + // NAT. + // + // Possible values: + // "ALL" + // "ERRORS_ONLY" + // "TRANSLATIONS_ONLY" + Filter string `json:"filter,omitempty"` - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` + // ForceSendFields is a list of field names (e.g. "Enable") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // Kind: [Output Only] Type of the resource. Always - // compute#sslCertificate for SSL certificates. - Kind string `json:"kind,omitempty"` + // NullFields is a list of field names (e.g. "Enable") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` +func (s *RouterNatLogConfig) MarshalJSON() ([]byte, error) { + type NoMethod RouterNatLogConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} - // PrivateKey: A write-only private key in PEM format. Only insert - // requests will include this field. - PrivateKey string `json:"privateKey,omitempty"` +// RouterNatSubnetworkToNat: Defines the IP ranges that want to use NAT +// for a subnetwork. +type RouterNatSubnetworkToNat struct { + // Name: URL for the subnetwork resource that will use NAT. + Name string `json:"name,omitempty"` - // SelfLink: [Output only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` + // SecondaryIpRangeNames: A list of the secondary ranges of the + // Subnetwork that are allowed to use NAT. This can be populated only if + // "LIST_OF_SECONDARY_IP_RANGES" is one of the values in + // source_ip_ranges_to_nat. + SecondaryIpRangeNames []string `json:"secondaryIpRangeNames,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // SourceIpRangesToNat: Specify the options for NAT ranges in the + // Subnetwork. All options of a single value are valid except + // NAT_IP_RANGE_OPTION_UNSPECIFIED. The only valid option with multiple + // values is: ["PRIMARY_IP_RANGE", "LIST_OF_SECONDARY_IP_RANGES"] + // Default: [ALL_IP_RANGES] + // + // Possible values: + // "ALL_IP_RANGES" + // "LIST_OF_SECONDARY_IP_RANGES" + // "PRIMARY_IP_RANGE" + SourceIpRangesToNat []string `json:"sourceIpRangesToNat,omitempty"` - // ForceSendFields is a list of field names (e.g. "Certificate") to + // ForceSendFields is a list of field names (e.g. "Name") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -23197,52 +23501,36 @@ type SslCertificate struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Certificate") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Name") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *SslCertificate) MarshalJSON() ([]byte, error) { - type NoMethod SslCertificate +func (s *RouterNatSubnetworkToNat) MarshalJSON() ([]byte, error) { + type NoMethod RouterNatSubnetworkToNat raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SslCertificateList: Contains a list of SslCertificate resources. -type SslCertificateList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of SslCertificate resources. - Items []*SslCertificate `json:"items,omitempty"` - - // Kind: Type of resource. - Kind string `json:"kind,omitempty"` +type RouterStatus struct { + // BestRoutes: Best routes for this router's network. + BestRoutes []*Route `json:"bestRoutes,omitempty"` - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` + // BestRoutesForRouter: Best routes learned by this router. + BestRoutesForRouter []*Route `json:"bestRoutesForRouter,omitempty"` - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` + BgpPeerStatus []*RouterStatusBgpPeerStatus `json:"bgpPeerStatus,omitempty"` - // Warning: [Output Only] Informational warning message. - Warning *SslCertificateListWarning `json:"warning,omitempty"` + NatStatus []*RouterStatusNatStatus `json:"natStatus,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Network: URI of the network to which this router belongs. + Network string `json:"network,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "BestRoutes") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -23250,8 +23538,8 @@ type SslCertificateList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "BestRoutes") to include in + // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -23259,55 +23547,50 @@ type SslCertificateList struct { NullFields []string `json:"-"` } -func (s *SslCertificateList) MarshalJSON() ([]byte, error) { - type NoMethod SslCertificateList +func (s *RouterStatus) MarshalJSON() ([]byte, error) { + type NoMethod RouterStatus raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SslCertificateListWarning: [Output Only] Informational warning -// message. -type SslCertificateListWarning struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. +type RouterStatusBgpPeerStatus struct { + // AdvertisedRoutes: Routes that were advertised to the remote BGP peer + AdvertisedRoutes []*Route `json:"advertisedRoutes,omitempty"` + + // IpAddress: IP address of the local BGP interface. + IpAddress string `json:"ipAddress,omitempty"` + + // LinkedVpnTunnel: URL of the VPN tunnel that this BGP peer controls. + LinkedVpnTunnel string `json:"linkedVpnTunnel,omitempty"` + + // Name: Name of this BGP peer. Unique within the Routers resource. + Name string `json:"name,omitempty"` + + // NumLearnedRoutes: Number of routes learned from the remote BGP Peer. + NumLearnedRoutes int64 `json:"numLearnedRoutes,omitempty"` + + // PeerIpAddress: IP address of the remote BGP interface. + PeerIpAddress string `json:"peerIpAddress,omitempty"` + + // State: BGP state as specified in RFC1771. + State string `json:"state,omitempty"` + + // Status: Status of the BGP peer: {UP, DOWN} // // Possible values: - // "CLEANUP_FAILED" - // "DEPRECATED_RESOURCE_USED" - // "DEPRECATED_TYPE_USED" - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - // "EXPERIMENTAL_TYPE_USED" - // "EXTERNAL_API_WARNING" - // "FIELD_VALUE_OVERRIDEN" - // "INJECTED_KERNELS_DEPRECATED" - // "MISSING_TYPE_DEPENDENCY" - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - // "NEXT_HOP_CANNOT_IP_FORWARD" - // "NEXT_HOP_INSTANCE_NOT_FOUND" - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - // "NEXT_HOP_NOT_RUNNING" - // "NOT_CRITICAL_ERROR" - // "NO_RESULTS_ON_PAGE" - // "REQUIRED_TOS_AGREEMENT" - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - // "RESOURCE_NOT_DELETED" - // "SCHEMA_VALIDATION_IGNORED" - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - // "UNDECLARED_PROPERTIES" - // "UNREACHABLE" - Code string `json:"code,omitempty"` + // "DOWN" + // "UNKNOWN" + // "UP" + Status string `json:"status,omitempty"` - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: - // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*SslCertificateListWarningData `json:"data,omitempty"` + // Uptime: Time this session has been up. Format: 14 years, 51 weeks, 6 + // days, 23 hours, 59 minutes, 59 seconds + Uptime string `json:"uptime,omitempty"` - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` + // UptimeSeconds: Time this session has been up, in seconds. Format: 145 + UptimeSeconds string `json:"uptimeSeconds,omitempty"` - // ForceSendFields is a list of field names (e.g. "Code") to + // ForceSendFields is a list of field names (e.g. "AdvertisedRoutes") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -23315,36 +23598,85 @@ type SslCertificateListWarning struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "AdvertisedRoutes") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *SslCertificateListWarning) MarshalJSON() ([]byte, error) { - type NoMethod SslCertificateListWarning +func (s *RouterStatusBgpPeerStatus) MarshalJSON() ([]byte, error) { + type NoMethod RouterStatusBgpPeerStatus raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SslCertificateListWarningData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` +// RouterStatusNatStatus: Status of a NAT contained in this router. Next +// tag: 9 +type RouterStatusNatStatus struct { + // AutoAllocatedNatIps: A list of IPs auto-allocated for NAT. Example: + // ["1.1.1.1", "129.2.16.89"] + AutoAllocatedNatIps []string `json:"autoAllocatedNatIps,omitempty"` - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` + // MinExtraNatIpsNeeded: The number of extra IPs to allocate. This will + // be greater than 0 only if user-specified IPs are NOT enough to allow + // all configured VMs to use NAT. This value is meaningful only when + // auto-allocation of NAT IPs is *not* used. + MinExtraNatIpsNeeded int64 `json:"minExtraNatIpsNeeded,omitempty"` - // ForceSendFields is a list of field names (e.g. "Key") to + // Name: Unique name of this NAT. + Name string `json:"name,omitempty"` + + // NumVmEndpointsWithNatMappings: Number of VM endpoints (i.e., Nics) + // that can use NAT. + NumVmEndpointsWithNatMappings int64 `json:"numVmEndpointsWithNatMappings,omitempty"` + + // UserAllocatedNatIpResources: A list of fully qualified URLs of + // reserved IP address resources. + UserAllocatedNatIpResources []string `json:"userAllocatedNatIpResources,omitempty"` + + // UserAllocatedNatIps: A list of IPs user-allocated for NAT. They will + // be raw IP strings like "179.12.26.133". + UserAllocatedNatIps []string `json:"userAllocatedNatIps,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AutoAllocatedNatIps") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AutoAllocatedNatIps") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *RouterStatusNatStatus) MarshalJSON() ([]byte, error) { + type NoMethod RouterStatusNatStatus + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type RouterStatusResponse struct { + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + Result *RouterStatus `json:"result,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Kind") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -23352,7 +23684,7 @@ type SslCertificateListWarningData struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Key") to include in API + // NullFields is a list of field names (e.g. "Kind") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -23361,43 +23693,52 @@ type SslCertificateListWarningData struct { NullFields []string `json:"-"` } -func (s *SslCertificateListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod SslCertificateListWarningData +func (s *RouterStatusResponse) MarshalJSON() ([]byte, error) { + type NoMethod RouterStatusResponse raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SslPoliciesList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` +type RoutersPreviewResponse struct { + // Resource: Preview of given router. + Resource *Router `json:"resource,omitempty"` - // Items: A list of SslPolicy resources. - Items []*SslPolicy `json:"items,omitempty"` + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` - // Kind: [Output Only] Type of the resource. Always - // compute#sslPoliciesList for lists of sslPolicies. - Kind string `json:"kind,omitempty"` + // ForceSendFields is a list of field names (e.g. "Resource") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` + // NullFields is a list of field names (e.g. "Resource") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` +func (s *RoutersPreviewResponse) MarshalJSON() ([]byte, error) { + type NoMethod RoutersPreviewResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} - // Warning: [Output Only] Informational warning message. - Warning *SslPoliciesListWarning `json:"warning,omitempty"` +type RoutersScopedList struct { + // Routers: A list of routers contained in this scope. + Routers []*Router `json:"routers,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Warning: Informational warning which replaces the list of routers + // when the list is empty. + Warning *RoutersScopedListWarning `json:"warning,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "Routers") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -23405,8 +23746,8 @@ type SslPoliciesList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Routers") to include in + // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -23414,14 +23755,15 @@ type SslPoliciesList struct { NullFields []string `json:"-"` } -func (s *SslPoliciesList) MarshalJSON() ([]byte, error) { - type NoMethod SslPoliciesList +func (s *RoutersScopedList) MarshalJSON() ([]byte, error) { + type NoMethod RoutersScopedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SslPoliciesListWarning: [Output Only] Informational warning message. -type SslPoliciesListWarning struct { +// RoutersScopedListWarning: Informational warning which replaces the +// list of routers when the list is empty. +type RoutersScopedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -23455,7 +23797,7 @@ type SslPoliciesListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*SslPoliciesListWarningData `json:"data,omitempty"` + Data []*RoutersScopedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -23478,13 +23820,13 @@ type SslPoliciesListWarning struct { NullFields []string `json:"-"` } -func (s *SslPoliciesListWarning) MarshalJSON() ([]byte, error) { - type NoMethod SslPoliciesListWarning +func (s *RoutersScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod RoutersScopedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SslPoliciesListWarningData struct { +type RoutersScopedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -23515,20 +23857,51 @@ type SslPoliciesListWarningData struct { NullFields []string `json:"-"` } -func (s *SslPoliciesListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod SslPoliciesListWarningData +func (s *RoutersScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod RoutersScopedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SslPoliciesListAvailableFeaturesResponse struct { - Features []string `json:"features,omitempty"` +// Rule: A rule to be applied in a Policy. +type Rule struct { + // Action: Required + // + // Possible values: + // "ALLOW" + // "ALLOW_WITH_LOG" + // "DENY" + // "DENY_WITH_LOG" + // "LOG" + // "NO_ACTION" + Action string `json:"action,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Conditions: Additional restrictions that must be met. All conditions + // must pass for the rule to match. + Conditions []*Condition `json:"conditions,omitempty"` - // ForceSendFields is a list of field names (e.g. "Features") to + // Description: Human-readable description of the rule. + Description string `json:"description,omitempty"` + + // Ins: If one or more 'in' clauses are specified, the rule matches if + // the PRINCIPAL/AUTHORITY_SELECTOR is in at least one of these entries. + Ins []string `json:"ins,omitempty"` + + // LogConfigs: The config returned to callers of + // tech.iam.IAM.CheckPolicy for any entries that match the LOG action. + LogConfigs []*LogConfig `json:"logConfigs,omitempty"` + + // NotIns: If one or more 'not_in' clauses are specified, the rule + // matches if the PRINCIPAL/AUTHORITY_SELECTOR is in none of the + // entries. + NotIns []string `json:"notIns,omitempty"` + + // Permissions: A permission is a string of form '..' (e.g., + // 'storage.buckets.list'). A value of '*' matches all permissions, and + // a verb part of '*' (e.g., 'storage.buckets.*') matches all verbs. + Permissions []string `json:"permissions,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Action") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -23536,8 +23909,8 @@ type SslPoliciesListAvailableFeaturesResponse struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Features") to include in - // API requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Action") to include in API + // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -23545,163 +23918,123 @@ type SslPoliciesListAvailableFeaturesResponse struct { NullFields []string `json:"-"` } -func (s *SslPoliciesListAvailableFeaturesResponse) MarshalJSON() ([]byte, error) { - type NoMethod SslPoliciesListAvailableFeaturesResponse +func (s *Rule) MarshalJSON() ([]byte, error) { + type NoMethod Rule raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SslPolicy: A SSL policy specifies the server-side support for SSL -// features. This can be attached to a TargetHttpsProxy or a -// TargetSslProxy. This affects connections between clients and the -// HTTPS or SSL proxy load balancer. They do not affect the connection -// between the load balancers and the backends. -type SslPolicy struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // CustomFeatures: A list of features enabled when the selected profile - // is CUSTOM. The - // - method returns the set of features that can be specified in this - // list. This field must be empty if the profile is not CUSTOM. - CustomFeatures []string `json:"customFeatures,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` +type SSLHealthCheck struct { + // Port: The TCP port number for the health check request. The default + // value is 443. Valid values are 1 through 65535. + Port int64 `json:"port,omitempty"` - // EnabledFeatures: [Output Only] The list of features enabled in the - // SSL policy. - EnabledFeatures []string `json:"enabledFeatures,omitempty"` + // PortName: Port name as defined in InstanceGroup#NamedPort#name. If + // both port and port_name are defined, port takes precedence. + PortName string `json:"portName,omitempty"` - // Fingerprint: Fingerprint of this resource. A hash of the contents - // stored in this object. This field is used in optimistic locking. This - // field will be ignored when inserting a SslPolicy. An up-to-date - // fingerprint must be provided in order to update the SslPolicy, - // otherwise the request will fail with error 412 conditionNotMet. + // PortSpecification: Specifies how port is selected for health + // checking, can be one of following values: + // USE_FIXED_PORT: The port number in + // port + // is used for health checking. + // USE_NAMED_PORT: The + // portName + // is used for health checking. + // USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for + // each network endpoint is used for health checking. For other + // backends, the port or named port specified in the Backend Service is + // used for health checking. // - // To see the latest fingerprint, make a get() request to retrieve an - // SslPolicy. - Fingerprint string `json:"fingerprint,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output only] Type of the resource. Always compute#sslPolicyfor - // SSL policies. - Kind string `json:"kind,omitempty"` - - // MinTlsVersion: The minimum version of SSL protocol that can be used - // by the clients to establish a connection with the load balancer. This - // can be one of TLS_1_0, TLS_1_1, TLS_1_2. + // + // If not specified, SSL health check follows behavior specified + // in + // port + // and + // portName + // fields. // // Possible values: - // "TLS_1_0" - // "TLS_1_1" - // "TLS_1_2" - MinTlsVersion string `json:"minTlsVersion,omitempty"` - - // Name: Name of the resource. The name must be 1-63 characters long, - // and comply with RFC1035. Specifically, the name must be 1-63 - // characters long and match the regular expression - // `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be - // a lowercase letter, and all following characters must be a dash, - // lowercase letter, or digit, except the last character, which cannot - // be a dash. - Name string `json:"name,omitempty"` + // "USE_FIXED_PORT" + // "USE_NAMED_PORT" + // "USE_SERVING_PORT" + PortSpecification string `json:"portSpecification,omitempty"` - // Profile: Profile specifies the set of SSL features that can be used - // by the load balancer when negotiating SSL with clients. This can be - // one of COMPATIBLE, MODERN, RESTRICTED, or CUSTOM. If using CUSTOM, - // the set of SSL features to enable must be specified in the - // customFeatures field. + // ProxyHeader: Specifies the type of proxy header to append before + // sending data to the backend, either NONE or PROXY_V1. The default is + // NONE. // // Possible values: - // "COMPATIBLE" - // "CUSTOM" - // "MODERN" - // "RESTRICTED" - Profile string `json:"profile,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` + // "NONE" + // "PROXY_V1" + ProxyHeader string `json:"proxyHeader,omitempty"` - // Warnings: [Output Only] If potential misconfigurations are detected - // for this SSL policy, this field will be populated with warning - // messages. - Warnings []*SslPolicyWarnings `json:"warnings,omitempty"` + // Request: The application data to send once the SSL connection has + // been established (default value is empty). If both request and + // response are empty, the connection establishment alone will indicate + // health. The request data can only be ASCII. + Request string `json:"request,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Response: The bytes to match against the beginning of the response + // data. If left empty (the default value), any response will indicate + // health. The response data can only be ASCII. + Response string `json:"response,omitempty"` - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "Port") to + // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "Port") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *SslPolicy) MarshalJSON() ([]byte, error) { - type NoMethod SslPolicy +func (s *SSLHealthCheck) MarshalJSON() ([]byte, error) { + type NoMethod SSLHealthCheck raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SslPolicyWarnings struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. +// Scheduling: Sets the scheduling options for an Instance. +type Scheduling struct { + // AutomaticRestart: Specifies whether the instance should be + // automatically restarted if it is terminated by Compute Engine (not + // terminated by a user). You can only set the automatic restart option + // for standard instances. Preemptible instances cannot be automatically + // restarted. // - // Possible values: - // "CLEANUP_FAILED" - // "DEPRECATED_RESOURCE_USED" - // "DEPRECATED_TYPE_USED" - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - // "EXPERIMENTAL_TYPE_USED" - // "EXTERNAL_API_WARNING" - // "FIELD_VALUE_OVERRIDEN" - // "INJECTED_KERNELS_DEPRECATED" - // "MISSING_TYPE_DEPENDENCY" - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - // "NEXT_HOP_CANNOT_IP_FORWARD" - // "NEXT_HOP_INSTANCE_NOT_FOUND" - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - // "NEXT_HOP_NOT_RUNNING" - // "NOT_CRITICAL_ERROR" - // "NO_RESULTS_ON_PAGE" - // "REQUIRED_TOS_AGREEMENT" - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - // "RESOURCE_NOT_DELETED" - // "SCHEMA_VALIDATION_IGNORED" - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - // "UNDECLARED_PROPERTIES" - // "UNREACHABLE" - Code string `json:"code,omitempty"` + // By default, this is set to true so an instance is automatically + // restarted if it is terminated by Compute Engine. + AutomaticRestart *bool `json:"automaticRestart,omitempty"` - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: - // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*SslPolicyWarningsData `json:"data,omitempty"` + // NodeAffinities: A set of node affinity and anti-affinity. + NodeAffinities []*SchedulingNodeAffinity `json:"nodeAffinities,omitempty"` - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` + // OnHostMaintenance: Defines the maintenance behavior for this + // instance. For standard instances, the default behavior is MIGRATE. + // For preemptible instances, the default and only possible behavior is + // TERMINATE. For more information, see Setting Instance Scheduling + // Options. + // + // Possible values: + // "MIGRATE" + // "TERMINATE" + OnHostMaintenance string `json:"onHostMaintenance,omitempty"` - // ForceSendFields is a list of field names (e.g. "Code") to + // Preemptible: Defines whether the instance is preemptible. This can + // only be set during instance creation, it cannot be set or changed + // after the instance has been created. + Preemptible bool `json:"preemptible,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AutomaticRestart") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -23709,34 +24042,38 @@ type SslPolicyWarnings struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "AutomaticRestart") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *SslPolicyWarnings) MarshalJSON() ([]byte, error) { - type NoMethod SslPolicyWarnings +func (s *Scheduling) MarshalJSON() ([]byte, error) { + type NoMethod Scheduling raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SslPolicyWarningsData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). +// SchedulingNodeAffinity: Node Affinity: the configuration of desired +// nodes onto which this Instance could be scheduled. +type SchedulingNodeAffinity struct { + // Key: Corresponds to the label key of Node resource. Key string `json:"key,omitempty"` - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` + // Operator: Defines the operation of node selection. + // + // Possible values: + // "IN" + // "NOT_IN" + // "OPERATOR_UNSPECIFIED" + Operator string `json:"operator,omitempty"` + + // Values: Corresponds to the label values of Node resource. + Values []string `json:"values,omitempty"` // ForceSendFields is a list of field names (e.g. "Key") to // unconditionally include in API requests. By default, fields with @@ -23755,118 +24092,59 @@ type SslPolicyWarningsData struct { NullFields []string `json:"-"` } -func (s *SslPolicyWarningsData) MarshalJSON() ([]byte, error) { - type NoMethod SslPolicyWarningsData - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type SslPolicyReference struct { - // SslPolicy: URL of the SSL policy resource. Set this to empty string - // to clear any existing SSL policy associated with the target proxy - // resource. - SslPolicy string `json:"sslPolicy,omitempty"` - - // ForceSendFields is a list of field names (e.g. "SslPolicy") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "SslPolicy") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *SslPolicyReference) MarshalJSON() ([]byte, error) { - type NoMethod SslPolicyReference +func (s *SchedulingNodeAffinity) MarshalJSON() ([]byte, error) { + type NoMethod SchedulingNodeAffinity raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Subnetwork: A Subnetwork resource. (== resource_for beta.subnetworks -// ==) (== resource_for v1.subnetworks ==) -type Subnetwork struct { +// SecurityPolicy: A security policy is comprised of one or more rules. +// It can also be associated with one or more 'targets'. (== +// resource_for v1.securityPolicies ==) (== resource_for +// beta.securityPolicies ==) +type SecurityPolicy struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` // Description: An optional description of this resource. Provide this - // property when you create the resource. This field can be set only at - // resource creation time. + // property when you create the resource. Description string `json:"description,omitempty"` - // EnableFlowLogs: Whether to enable flow logging for this subnetwork. - // If this field is not explicitly set, it will not appear in get - // listings. If not set the default behavior is to disable flow logging. - EnableFlowLogs bool `json:"enableFlowLogs,omitempty"` - - // Fingerprint: Fingerprint of this resource. A hash of the contents - // stored in this object. This field is used in optimistic locking. This - // field will be ignored when inserting a Subnetwork. An up-to-date - // fingerprint must be provided in order to update the Subnetwork, - // otherwise the request will fail with error 412 conditionNotMet. + // Fingerprint: Specifies a fingerprint for this resource, which is + // essentially a hash of the metadata's contents and used for optimistic + // locking. The fingerprint is initially generated by Compute Engine and + // changes after every request to modify or update metadata. You must + // always provide an up-to-date fingerprint hash in order to update or + // change metadata, otherwise the request will fail with error 412 + // conditionNotMet. // - // To see the latest fingerprint, make a get() request to retrieve a - // Subnetwork. + // To see the latest fingerprint, make get() request to the security + // policy. Fingerprint string `json:"fingerprint,omitempty"` - // GatewayAddress: [Output Only] The gateway address for default routes - // to reach destination addresses outside this subnetwork. - GatewayAddress string `json:"gatewayAddress,omitempty"` - // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` - // IpCidrRange: The range of internal addresses that are owned by this - // subnetwork. Provide this property when you create the subnetwork. For - // example, 10.0.0.0/8 or 192.168.0.0/16. Ranges must be unique and - // non-overlapping within a network. Only IPv4 is supported. This field - // can be set only at resource creation time. - IpCidrRange string `json:"ipCidrRange,omitempty"` - - // Kind: [Output Only] Type of the resource. Always compute#subnetwork - // for Subnetwork resources. + // Kind: [Output only] Type of the resource. Always + // compute#securityPolicyfor security policies Kind string `json:"kind,omitempty"` - // Name: The name of the resource, provided by the client when initially - // creating the resource. The name must be 1-63 characters long, and - // comply with RFC1035. Specifically, the name must be 1-63 characters - // long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` - // which means the first character must be a lowercase letter, and all - // following characters must be a dash, lowercase letter, or digit, - // except the last character, which cannot be a dash. + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. Name string `json:"name,omitempty"` - // Network: The URL of the network to which this subnetwork belongs, - // provided by the client when initially creating the subnetwork. Only - // networks that are in the distributed mode can have subnetworks. This - // field can be set only at resource creation time. - Network string `json:"network,omitempty"` - - // PrivateIpGoogleAccess: Whether the VMs in this subnet can access - // Google services without assigned external IP addresses. This field - // can be both set at resource creation time and updated using - // setPrivateIpGoogleAccess. - PrivateIpGoogleAccess bool `json:"privateIpGoogleAccess,omitempty"` - - // Region: URL of the region where the Subnetwork resides. This field - // can be set only at resource creation time. - Region string `json:"region,omitempty"` - - // SecondaryIpRanges: An array of configurations for secondary IP ranges - // for VM instances contained in this subnetwork. The primary IP of such - // VM must belong to the primary ipCidrRange of the subnetwork. The - // alias IPs may belong to either primary or secondary ranges. This - // field can be updated with a patch request. - SecondaryIpRanges []*SubnetworkSecondaryRange `json:"secondaryIpRanges,omitempty"` + // Rules: A list of rules that belong to this policy. There must always + // be a default rule (rule with priority 2147483647 and match "*"). If + // no rules are provided when creating a security policy, a default rule + // with action "allow" will be added. + Rules []*SecurityPolicyRule `json:"rules,omitempty"` // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` @@ -23893,22 +24171,22 @@ type Subnetwork struct { NullFields []string `json:"-"` } -func (s *Subnetwork) MarshalJSON() ([]byte, error) { - type NoMethod Subnetwork +func (s *SecurityPolicy) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicy raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SubnetworkAggregatedList struct { +type SecurityPolicyList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of SubnetworksScopedList resources. - Items map[string]SubnetworksScopedList `json:"items,omitempty"` + // Items: A list of SecurityPolicy resources. + Items []*SecurityPolicy `json:"items,omitempty"` // Kind: [Output Only] Type of resource. Always - // compute#subnetworkAggregatedList for aggregated lists of subnetworks. + // compute#securityPolicyList for listsof securityPolicies Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -23919,11 +24197,8 @@ type SubnetworkAggregatedList struct { // the results. NextPageToken string `json:"nextPageToken,omitempty"` - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - // Warning: [Output Only] Informational warning message. - Warning *SubnetworkAggregatedListWarning `json:"warning,omitempty"` + Warning *SecurityPolicyListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -23946,15 +24221,15 @@ type SubnetworkAggregatedList struct { NullFields []string `json:"-"` } -func (s *SubnetworkAggregatedList) MarshalJSON() ([]byte, error) { - type NoMethod SubnetworkAggregatedList +func (s *SecurityPolicyList) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SubnetworkAggregatedListWarning: [Output Only] Informational warning +// SecurityPolicyListWarning: [Output Only] Informational warning // message. -type SubnetworkAggregatedListWarning struct { +type SecurityPolicyListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -23988,7 +24263,7 @@ type SubnetworkAggregatedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*SubnetworkAggregatedListWarningData `json:"data,omitempty"` + Data []*SecurityPolicyListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -24011,13 +24286,13 @@ type SubnetworkAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *SubnetworkAggregatedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod SubnetworkAggregatedListWarning +func (s *SecurityPolicyListWarning) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SubnetworkAggregatedListWarningData struct { +type SecurityPolicyListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -24048,44 +24323,16 @@ type SubnetworkAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *SubnetworkAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod SubnetworkAggregatedListWarningData +func (s *SecurityPolicyListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SubnetworkList: Contains a list of Subnetwork resources. -type SubnetworkList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of Subnetwork resources. - Items []*Subnetwork `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always compute#subnetworkList - // for lists of subnetworks. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // Warning: [Output Only] Informational warning message. - Warning *SubnetworkListWarning `json:"warning,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` +type SecurityPolicyReference struct { + SecurityPolicy string `json:"securityPolicy,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "SecurityPolicy") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -24093,63 +24340,57 @@ type SubnetworkList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "SecurityPolicy") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *SubnetworkList) MarshalJSON() ([]byte, error) { - type NoMethod SubnetworkList +func (s *SecurityPolicyReference) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyReference raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SubnetworkListWarning: [Output Only] Informational warning message. -type SubnetworkListWarning struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. - // - // Possible values: - // "CLEANUP_FAILED" - // "DEPRECATED_RESOURCE_USED" - // "DEPRECATED_TYPE_USED" - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - // "EXPERIMENTAL_TYPE_USED" - // "EXTERNAL_API_WARNING" - // "FIELD_VALUE_OVERRIDEN" - // "INJECTED_KERNELS_DEPRECATED" - // "MISSING_TYPE_DEPENDENCY" - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - // "NEXT_HOP_CANNOT_IP_FORWARD" - // "NEXT_HOP_INSTANCE_NOT_FOUND" - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - // "NEXT_HOP_NOT_RUNNING" - // "NOT_CRITICAL_ERROR" - // "NO_RESULTS_ON_PAGE" - // "REQUIRED_TOS_AGREEMENT" - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - // "RESOURCE_NOT_DELETED" - // "SCHEMA_VALIDATION_IGNORED" - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - // "UNDECLARED_PROPERTIES" - // "UNREACHABLE" - Code string `json:"code,omitempty"` +// SecurityPolicyRule: Represents a rule that describes one or more +// match conditions along with the action to be taken when traffic +// matches this condition (allow or deny). +type SecurityPolicyRule struct { + // Action: The Action to preform when the client connection triggers the + // rule. Can currently be either "allow" or "deny()" where valid values + // for status are 403, 404, and 502. + Action string `json:"action,omitempty"` - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: - // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*SubnetworkListWarningData `json:"data,omitempty"` + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` + // Kind: [Output only] Type of the resource. Always + // compute#securityPolicyRule for security policy rules + Kind string `json:"kind,omitempty"` - // ForceSendFields is a list of field names (e.g. "Code") to + // Match: A match condition that incoming traffic is evaluated against. + // If it evaluates to true, the corresponding ?action? is enforced. + Match *SecurityPolicyRuleMatcher `json:"match,omitempty"` + + // Preview: If set to true, the specified action is not enforced. + Preview bool `json:"preview,omitempty"` + + // Priority: An integer indicating the priority of a rule in the list. + // The priority must be a positive value between 0 and 2147483647. Rules + // are evaluated from highest to lowest priority where 0 is the highest + // priority and 2147483647 is the lowest prority. + Priority int64 `json:"priority,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Action") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -24157,7 +24398,7 @@ type SubnetworkListWarning struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Code") to include in API + // NullFields is a list of field names (e.g. "Action") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -24166,27 +24407,30 @@ type SubnetworkListWarning struct { NullFields []string `json:"-"` } -func (s *SubnetworkListWarning) MarshalJSON() ([]byte, error) { - type NoMethod SubnetworkListWarning +func (s *SecurityPolicyRule) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyRule raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SubnetworkListWarningData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` +// SecurityPolicyRuleMatcher: Represents a match condition that incoming +// traffic is evaluated against. Exactly one field must be specified. +type SecurityPolicyRuleMatcher struct { + // Config: The configuration options available when specifying + // versioned_expr. This field must be specified if versioned_expr is + // specified and cannot be specified if versioned_expr is not specified. + Config *SecurityPolicyRuleMatcherConfig `json:"config,omitempty"` - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` + // VersionedExpr: Preconfigured versioned expression. If this field is + // specified, config must also be specified. Available preconfigured + // expressions along with their requirements are: SRC_IPS_V1 - must + // specify the corresponding src_ip_range field in config. + // + // Possible values: + // "SRC_IPS_V1" + VersionedExpr string `json:"versionedExpr,omitempty"` - // ForceSendFields is a list of field names (e.g. "Key") to + // ForceSendFields is a list of field names (e.g. "Config") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -24194,7 +24438,7 @@ type SubnetworkListWarningData struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Key") to include in API + // NullFields is a list of field names (e.g. "Config") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -24203,29 +24447,17 @@ type SubnetworkListWarningData struct { NullFields []string `json:"-"` } -func (s *SubnetworkListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod SubnetworkListWarningData +func (s *SecurityPolicyRuleMatcher) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyRuleMatcher raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SubnetworkSecondaryRange: Represents a secondary IP range of a -// subnetwork. -type SubnetworkSecondaryRange struct { - // IpCidrRange: The range of IP addresses belonging to this subnetwork - // secondary range. Provide this property when you create the - // subnetwork. Ranges must be unique and non-overlapping with all - // primary and secondary IP ranges within a network. Only IPv4 is - // supported. - IpCidrRange string `json:"ipCidrRange,omitempty"` - - // RangeName: The name associated with this subnetwork secondary range, - // used when adding an alias IP range to a VM instance. The name must be - // 1-63 characters long, and comply with RFC1035. The name must be - // unique within the subnetwork. - RangeName string `json:"rangeName,omitempty"` +type SecurityPolicyRuleMatcherConfig struct { + // SrcIpRanges: CIDR IP address range. + SrcIpRanges []string `json:"srcIpRanges,omitempty"` - // ForceSendFields is a list of field names (e.g. "IpCidrRange") to + // ForceSendFields is a list of field names (e.g. "SrcIpRanges") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -24233,7 +24465,7 @@ type SubnetworkSecondaryRange struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "IpCidrRange") to include + // NullFields is a list of field names (e.g. "SrcIpRanges") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as @@ -24242,21 +24474,41 @@ type SubnetworkSecondaryRange struct { NullFields []string `json:"-"` } -func (s *SubnetworkSecondaryRange) MarshalJSON() ([]byte, error) { - type NoMethod SubnetworkSecondaryRange +func (s *SecurityPolicyRuleMatcherConfig) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyRuleMatcherConfig raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SubnetworksExpandIpCidrRangeRequest struct { - // IpCidrRange: The IP (in CIDR format or netmask) of internal addresses - // that are legal on this Subnetwork. This range should be disjoint from - // other subnetworks within this network. This range can only be larger - // than (i.e. a superset of) the range previously defined before the - // update. - IpCidrRange string `json:"ipCidrRange,omitempty"` +// SerialPortOutput: An instance's serial console output. +type SerialPortOutput struct { + // Contents: [Output Only] The contents of the console output. + Contents string `json:"contents,omitempty"` - // ForceSendFields is a list of field names (e.g. "IpCidrRange") to + // Kind: [Output Only] Type of the resource. Always + // compute#serialPortOutput for serial port output. + Kind string `json:"kind,omitempty"` + + // Next: [Output Only] The position of the next byte of content from the + // serial console output. Use this value in the next request as the + // start parameter. + Next int64 `json:"next,omitempty,string"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Start: The starting byte position of the output that was returned. + // This should match the start parameter sent with the request. If the + // serial console output exceeds the size of the buffer, older output + // will be overwritten by newer content and the start values will be + // mismatched. + Start int64 `json:"start,omitempty,string"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Contents") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -24264,30 +24516,31 @@ type SubnetworksExpandIpCidrRangeRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "IpCidrRange") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Contents") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *SubnetworksExpandIpCidrRangeRequest) MarshalJSON() ([]byte, error) { - type NoMethod SubnetworksExpandIpCidrRangeRequest +func (s *SerialPortOutput) MarshalJSON() ([]byte, error) { + type NoMethod SerialPortOutput raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SubnetworksScopedList struct { - // Subnetworks: A list of subnetworks contained in this scope. - Subnetworks []*Subnetwork `json:"subnetworks,omitempty"` +// ServiceAccount: A service account. +type ServiceAccount struct { + // Email: Email address of the service account. + Email string `json:"email,omitempty"` - // Warning: An informational warning that appears when the list of - // addresses is empty. - Warning *SubnetworksScopedListWarning `json:"warning,omitempty"` + // Scopes: The list of scopes to be made available for this service + // account. + Scopes []string `json:"scopes,omitempty"` - // ForceSendFields is a list of field names (e.g. "Subnetworks") to + // ForceSendFields is a list of field names (e.g. "Email") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -24295,64 +24548,79 @@ type SubnetworksScopedList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Subnetworks") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Email") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *SubnetworksScopedList) MarshalJSON() ([]byte, error) { - type NoMethod SubnetworksScopedList +func (s *ServiceAccount) MarshalJSON() ([]byte, error) { + type NoMethod ServiceAccount raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SubnetworksScopedListWarning: An informational warning that appears -// when the list of addresses is empty. -type SubnetworksScopedListWarning struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. - // - // Possible values: - // "CLEANUP_FAILED" - // "DEPRECATED_RESOURCE_USED" - // "DEPRECATED_TYPE_USED" - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - // "EXPERIMENTAL_TYPE_USED" - // "EXTERNAL_API_WARNING" - // "FIELD_VALUE_OVERRIDEN" - // "INJECTED_KERNELS_DEPRECATED" - // "MISSING_TYPE_DEPENDENCY" - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - // "NEXT_HOP_CANNOT_IP_FORWARD" - // "NEXT_HOP_INSTANCE_NOT_FOUND" - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - // "NEXT_HOP_NOT_RUNNING" - // "NOT_CRITICAL_ERROR" - // "NO_RESULTS_ON_PAGE" - // "REQUIRED_TOS_AGREEMENT" - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - // "RESOURCE_NOT_DELETED" - // "SCHEMA_VALIDATION_IGNORED" - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - // "UNDECLARED_PROPERTIES" - // "UNREACHABLE" - Code string `json:"code,omitempty"` +// ShieldedInstanceConfig: A set of Shielded Instance options. +type ShieldedInstanceConfig struct { + // EnableIntegrityMonitoring: Defines whether the instance has integrity + // monitoring enabled. + EnableIntegrityMonitoring bool `json:"enableIntegrityMonitoring,omitempty"` - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: - // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*SubnetworksScopedListWarningData `json:"data,omitempty"` + // EnableSecureBoot: Defines whether the instance has Secure Boot + // enabled. + EnableSecureBoot bool `json:"enableSecureBoot,omitempty"` - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` + // EnableVtpm: Defines whether the instance has the vTPM enabled. + EnableVtpm bool `json:"enableVtpm,omitempty"` - // ForceSendFields is a list of field names (e.g. "Code") to + // ForceSendFields is a list of field names (e.g. + // "EnableIntegrityMonitoring") to unconditionally include in API + // requests. By default, fields with empty values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. + // "EnableIntegrityMonitoring") to include in API requests with the JSON + // null value. By default, fields with empty values are omitted from API + // requests. However, any field with an empty value appearing in + // NullFields will be sent to the server as null. It is an error if a + // field in this list has a non-empty value. This may be used to include + // null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ShieldedInstanceConfig) MarshalJSON() ([]byte, error) { + type NoMethod ShieldedInstanceConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ShieldedInstanceIdentity: A shielded Instance identity entry. +type ShieldedInstanceIdentity struct { + // EncryptionKey: An Endorsement Key (EK) issued to the Shielded + // Instance's vTPM. + EncryptionKey *ShieldedInstanceIdentityEntry `json:"encryptionKey,omitempty"` + + // Kind: [Output Only] Type of the resource. Always + // compute#shieldedInstanceIdentity for shielded Instance identity + // entry. + Kind string `json:"kind,omitempty"` + + // SigningKey: An Attestation Key (AK) issued to the Shielded Instance's + // vTPM. + SigningKey *ShieldedInstanceIdentityEntry `json:"signingKey,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "EncryptionKey") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -24360,36 +24628,30 @@ type SubnetworksScopedListWarning struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "EncryptionKey") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *SubnetworksScopedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod SubnetworksScopedListWarning +func (s *ShieldedInstanceIdentity) MarshalJSON() ([]byte, error) { + type NoMethod ShieldedInstanceIdentity raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SubnetworksScopedListWarningData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` +// ShieldedInstanceIdentityEntry: A Shielded Instance Identity Entry. +type ShieldedInstanceIdentityEntry struct { + // EkCert: A PEM-encoded X.509 certificate. This field can be empty. + EkCert string `json:"ekCert,omitempty"` - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` + // EkPub: A PEM-encoded public key. + EkPub string `json:"ekPub,omitempty"` - // ForceSendFields is a list of field names (e.g. "Key") to + // ForceSendFields is a list of field names (e.g. "EkCert") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -24397,7 +24659,7 @@ type SubnetworksScopedListWarningData struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Key") to include in API + // NullFields is a list of field names (e.g. "EkCert") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -24406,17 +24668,21 @@ type SubnetworksScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *SubnetworksScopedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod SubnetworksScopedListWarningData +func (s *ShieldedInstanceIdentityEntry) MarshalJSON() ([]byte, error) { + type NoMethod ShieldedInstanceIdentityEntry raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SubnetworksSetPrivateIpGoogleAccessRequest struct { - PrivateIpGoogleAccess bool `json:"privateIpGoogleAccess,omitempty"` +// ShieldedInstanceIntegrityPolicy: The policy describes the baseline +// against which Instance boot integrity is measured. +type ShieldedInstanceIntegrityPolicy struct { + // UpdateAutoLearnPolicy: Updates the integrity policy baseline using + // the measurements from the VM instance's most recent boot. + UpdateAutoLearnPolicy bool `json:"updateAutoLearnPolicy,omitempty"` // ForceSendFields is a list of field names (e.g. - // "PrivateIpGoogleAccess") to unconditionally include in API requests. + // "UpdateAutoLearnPolicy") to unconditionally include in API requests. // By default, fields with empty values are omitted from API requests. // However, any non-pointer, non-interface field appearing in // ForceSendFields will be sent to the server regardless of whether the @@ -24424,7 +24690,7 @@ type SubnetworksSetPrivateIpGoogleAccessRequest struct { // Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "PrivateIpGoogleAccess") to + // NullFields is a list of field names (e.g. "UpdateAutoLearnPolicy") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -24434,42 +24700,28 @@ type SubnetworksSetPrivateIpGoogleAccessRequest struct { NullFields []string `json:"-"` } -func (s *SubnetworksSetPrivateIpGoogleAccessRequest) MarshalJSON() ([]byte, error) { - type NoMethod SubnetworksSetPrivateIpGoogleAccessRequest +func (s *ShieldedInstanceIntegrityPolicy) MarshalJSON() ([]byte, error) { + type NoMethod ShieldedInstanceIntegrityPolicy raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TCPHealthCheck struct { - // Port: The TCP port number for the health check request. The default - // value is 80. Valid values are 1 through 65535. - Port int64 `json:"port,omitempty"` - - // PortName: Port name as defined in InstanceGroup#NamedPort#name. If - // both port and port_name are defined, port takes precedence. - PortName string `json:"portName,omitempty"` - - // ProxyHeader: Specifies the type of proxy header to append before - // sending data to the backend, either NONE or PROXY_V1. The default is - // NONE. - // - // Possible values: - // "NONE" - // "PROXY_V1" - ProxyHeader string `json:"proxyHeader,omitempty"` - - // Request: The application data to send once the TCP connection has - // been established (default value is empty). If both request and - // response are empty, the connection establishment alone will indicate - // health. The request data can only be ASCII. - Request string `json:"request,omitempty"` +// SignedUrlKey: Represents a customer-supplied Signing Key used by +// Cloud CDN Signed URLs +type SignedUrlKey struct { + // KeyName: Name of the key. The name must be 1-63 characters long, and + // comply with RFC1035. Specifically, the name must be 1-63 characters + // long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + // which means the first character must be a lowercase letter, and all + // following characters must be a dash, lowercase letter, or digit, + // except the last character, which cannot be a dash. + KeyName string `json:"keyName,omitempty"` - // Response: The bytes to match against the beginning of the response - // data. If left empty (the default value), any response will indicate - // health. The response data can only be ASCII. - Response string `json:"response,omitempty"` + // KeyValue: 128-bit key value used for signing the URL. The key value + // must be a valid RFC 4648 Section 5 base64url encoded string. + KeyValue string `json:"keyValue,omitempty"` - // ForceSendFields is a list of field names (e.g. "Port") to + // ForceSendFields is a list of field names (e.g. "KeyName") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -24477,8 +24729,8 @@ type TCPHealthCheck struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Port") to include in API - // requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "KeyName") to include in + // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -24486,55 +24738,15 @@ type TCPHealthCheck struct { NullFields []string `json:"-"` } -func (s *TCPHealthCheck) MarshalJSON() ([]byte, error) { - type NoMethod TCPHealthCheck - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Tags: A set of instance tags. -type Tags struct { - // Fingerprint: Specifies a fingerprint for this request, which is - // essentially a hash of the tags' contents and used for optimistic - // locking. The fingerprint is initially generated by Compute Engine and - // changes after every request to modify or update tags. You must always - // provide an up-to-date fingerprint hash in order to update or change - // tags. - // - // To see the latest fingerprint, make get() request to the instance. - Fingerprint string `json:"fingerprint,omitempty"` - - // Items: An array of tags. Each tag must be 1-63 characters long, and - // comply with RFC1035. - Items []string `json:"items,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Fingerprint") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Fingerprint") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Tags) MarshalJSON() ([]byte, error) { - type NoMethod Tags +func (s *SignedUrlKey) MarshalJSON() ([]byte, error) { + type NoMethod SignedUrlKey raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetHttpProxy: A TargetHttpProxy resource. This resource defines an -// HTTP proxy. (== resource_for beta.targetHttpProxies ==) (== -// resource_for v1.targetHttpProxies ==) -type TargetHttpProxy struct { +// Snapshot: A persistent disk snapshot resource. (== resource_for +// beta.snapshots ==) (== resource_for v1.snapshots ==) +type Snapshot struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -24543,15 +24755,43 @@ type TargetHttpProxy struct { // property when you create the resource. Description string `json:"description,omitempty"` + // DiskSizeGb: [Output Only] Size of the snapshot, specified in GB. + DiskSizeGb int64 `json:"diskSizeGb,omitempty,string"` + // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` - // Kind: [Output Only] Type of resource. Always compute#targetHttpProxy - // for target HTTP proxies. + // Kind: [Output Only] Type of the resource. Always compute#snapshot for + // Snapshot resources. Kind string `json:"kind,omitempty"` - // Name: Name of the resource. Provided by the client when the resource + // LabelFingerprint: A fingerprint for the labels being applied to this + // snapshot, which is essentially a hash of the labels set used for + // optimistic locking. The fingerprint is initially generated by Compute + // Engine and changes after every request to modify or update labels. + // You must always provide an up-to-date fingerprint hash in order to + // update or change labels, otherwise the request will fail with error + // 412 conditionNotMet. + // + // To see the latest fingerprint, make a get() request to retrieve a + // snapshot. + LabelFingerprint string `json:"labelFingerprint,omitempty"` + + // Labels: Labels to apply to this snapshot. These can be later modified + // by the setLabels method. Label values may be empty. + Labels map[string]string `json:"labels,omitempty"` + + // LicenseCodes: [Output Only] Integer license codes indicating which + // licenses are attached to this snapshot. + LicenseCodes googleapi.Int64s `json:"licenseCodes,omitempty"` + + // Licenses: [Output Only] A list of public visible licenses that apply + // to this snapshot. This can be because the original image had licenses + // attached (such as a Windows image). + Licenses []string `json:"licenses,omitempty"` + + // Name: Name of the resource; provided by the client when the resource // is created. The name must be 1-63 characters long, and comply with // RFC1035. Specifically, the name must be 1-63 characters long and // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means @@ -24563,9 +24803,67 @@ type TargetHttpProxy struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // UrlMap: URL to the UrlMap resource that defines the mapping from URL - // to the BackendService. - UrlMap string `json:"urlMap,omitempty"` + // SnapshotEncryptionKey: Encrypts the snapshot using a + // customer-supplied encryption key. + // + // After you encrypt a snapshot using a customer-supplied key, you must + // provide the same key if you use the snapshot later. For example, you + // must provide the encryption key when you create a disk from the + // encrypted snapshot in a future request. + // + // Customer-supplied encryption keys do not protect access to metadata + // of the snapshot. + // + // If you do not provide an encryption key when creating the snapshot, + // then the snapshot will be encrypted using an automatically generated + // key and you do not need to provide a key to use the snapshot later. + SnapshotEncryptionKey *CustomerEncryptionKey `json:"snapshotEncryptionKey,omitempty"` + + // SourceDisk: [Output Only] The source disk used to create this + // snapshot. + SourceDisk string `json:"sourceDisk,omitempty"` + + // SourceDiskEncryptionKey: The customer-supplied encryption key of the + // source disk. Required if the source disk is protected by a + // customer-supplied encryption key. + SourceDiskEncryptionKey *CustomerEncryptionKey `json:"sourceDiskEncryptionKey,omitempty"` + + // SourceDiskId: [Output Only] The ID value of the disk used to create + // this snapshot. This value may be used to determine whether the + // snapshot was taken from the current or a previous instance of a given + // disk name. + SourceDiskId string `json:"sourceDiskId,omitempty"` + + // Status: [Output Only] The status of the snapshot. This can be + // CREATING, DELETING, FAILED, READY, or UPLOADING. + // + // Possible values: + // "CREATING" + // "DELETING" + // "FAILED" + // "READY" + // "UPLOADING" + Status string `json:"status,omitempty"` + + // StorageBytes: [Output Only] A size of the storage used by the + // snapshot. As snapshots share storage, this number is expected to + // change with snapshot creation/deletion. + StorageBytes int64 `json:"storageBytes,omitempty,string"` + + // StorageBytesStatus: [Output Only] An indicator whether storageBytes + // is in a stable state or it is being adjusted as a result of shared + // storage reallocation. This status can either be UPDATING, meaning the + // size of the snapshot is being updated, or UP_TO_DATE, meaning the + // size of the snapshot is up-to-date. + // + // Possible values: + // "UPDATING" + // "UP_TO_DATE" + StorageBytesStatus string `json:"storageBytesStatus,omitempty"` + + // StorageLocations: GCS bucket storage location of the snapshot + // (regional or multi-regional). + StorageLocations []string `json:"storageLocations,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -24589,23 +24887,22 @@ type TargetHttpProxy struct { NullFields []string `json:"-"` } -func (s *TargetHttpProxy) MarshalJSON() ([]byte, error) { - type NoMethod TargetHttpProxy +func (s *Snapshot) MarshalJSON() ([]byte, error) { + type NoMethod Snapshot raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetHttpProxyList: A list of TargetHttpProxy resources. -type TargetHttpProxyList struct { +// SnapshotList: Contains a list of Snapshot resources. +type SnapshotList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of TargetHttpProxy resources. - Items []*TargetHttpProxy `json:"items,omitempty"` + // Items: A list of Snapshot resources. + Items []*Snapshot `json:"items,omitempty"` - // Kind: Type of resource. Always compute#targetHttpProxyList for lists - // of target HTTP proxies. + // Kind: Type of resource. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -24620,7 +24917,7 @@ type TargetHttpProxyList struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *TargetHttpProxyListWarning `json:"warning,omitempty"` + Warning *SnapshotListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -24643,15 +24940,14 @@ type TargetHttpProxyList struct { NullFields []string `json:"-"` } -func (s *TargetHttpProxyList) MarshalJSON() ([]byte, error) { - type NoMethod TargetHttpProxyList +func (s *SnapshotList) MarshalJSON() ([]byte, error) { + type NoMethod SnapshotList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetHttpProxyListWarning: [Output Only] Informational warning -// message. -type TargetHttpProxyListWarning struct { +// SnapshotListWarning: [Output Only] Informational warning message. +type SnapshotListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -24685,7 +24981,7 @@ type TargetHttpProxyListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*TargetHttpProxyListWarningData `json:"data,omitempty"` + Data []*SnapshotListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -24708,13 +25004,13 @@ type TargetHttpProxyListWarning struct { NullFields []string `json:"-"` } -func (s *TargetHttpProxyListWarning) MarshalJSON() ([]byte, error) { - type NoMethod TargetHttpProxyListWarning +func (s *SnapshotListWarning) MarshalJSON() ([]byte, error) { + type NoMethod SnapshotListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetHttpProxyListWarningData struct { +type SnapshotListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -24745,22 +25041,23 @@ type TargetHttpProxyListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetHttpProxyListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod TargetHttpProxyListWarningData +func (s *SnapshotListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod SnapshotListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetHttpsProxiesSetQuicOverrideRequest struct { - // QuicOverride: QUIC policy for the TargetHttpsProxy resource. - // - // Possible values: - // "DISABLE" - // "ENABLE" - // "NONE" - QuicOverride string `json:"quicOverride,omitempty"` +// SourceInstanceParams: A specification of the parameters to use when +// creating the instance template from a source instance. +type SourceInstanceParams struct { + // DiskConfigs: Attached disks configuration. If not provided, defaults + // are applied: For boot disk and any other R/W disks, new custom images + // will be created from each disk. For read-only disks, they will be + // attached in read-only mode. Local SSD disks will be created as blank + // volumes. + DiskConfigs []*DiskInstantiationConfig `json:"diskConfigs,omitempty"` - // ForceSendFields is a list of field names (e.g. "QuicOverride") to + // ForceSendFields is a list of field names (e.g. "DiskConfigs") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -24768,7 +25065,7 @@ type TargetHttpsProxiesSetQuicOverrideRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "QuicOverride") to include + // NullFields is a list of field names (e.g. "DiskConfigs") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as @@ -24777,46 +25074,22 @@ type TargetHttpsProxiesSetQuicOverrideRequest struct { NullFields []string `json:"-"` } -func (s *TargetHttpsProxiesSetQuicOverrideRequest) MarshalJSON() ([]byte, error) { - type NoMethod TargetHttpsProxiesSetQuicOverrideRequest +func (s *SourceInstanceParams) MarshalJSON() ([]byte, error) { + type NoMethod SourceInstanceParams raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetHttpsProxiesSetSslCertificatesRequest struct { - // SslCertificates: New set of SslCertificate resources to associate - // with this TargetHttpsProxy resource. Currently exactly one - // SslCertificate resource must be specified. - SslCertificates []string `json:"sslCertificates,omitempty"` - - // ForceSendFields is a list of field names (e.g. "SslCertificates") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "SslCertificates") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *TargetHttpsProxiesSetSslCertificatesRequest) MarshalJSON() ([]byte, error) { - type NoMethod TargetHttpsProxiesSetSslCertificatesRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} +// SslCertificate: An SslCertificate resource. This resource provides a +// mechanism to upload an SSL key and certificate to the load balancer +// to serve secure connections from the user. (== resource_for +// beta.sslCertificates ==) (== resource_for v1.sslCertificates ==) +type SslCertificate struct { + // Certificate: A local certificate file. The certificate must be in PEM + // format. The certificate chain must be no greater than 5 certs long. + // The chain must include at least one intermediate cert. + Certificate string `json:"certificate,omitempty"` -// TargetHttpsProxy: A TargetHttpsProxy resource. This resource defines -// an HTTPS proxy. (== resource_for beta.targetHttpsProxies ==) (== -// resource_for v1.targetHttpsProxies ==) -type TargetHttpsProxy struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -24829,8 +25102,8 @@ type TargetHttpsProxy struct { // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` - // Kind: [Output Only] Type of resource. Always compute#targetHttpsProxy - // for target HTTPS proxies. + // Kind: [Output Only] Type of the resource. Always + // compute#sslCertificate for SSL certificates. Kind string `json:"kind,omitempty"` // Name: Name of the resource. Provided by the client when the resource @@ -24842,83 +25115,50 @@ type TargetHttpsProxy struct { // last character, which cannot be a dash. Name string `json:"name,omitempty"` - // QuicOverride: Specifies the QUIC override policy for this - // TargetHttpsProxy resource. This determines whether the load balancer - // will attempt to negotiate QUIC with clients or not. Can specify one - // of NONE, ENABLE, or DISABLE. Specify ENABLE to always enable QUIC, - // Enables QUIC when set to ENABLE, and disables QUIC when set to - // DISABLE. If NONE is specified, uses the QUIC policy with no user - // overrides, which is equivalent to DISABLE. Not specifying this field - // is equivalent to specifying NONE. - // - // Possible values: - // "DISABLE" - // "ENABLE" - // "NONE" - QuicOverride string `json:"quicOverride,omitempty"` + // PrivateKey: A write-only private key in PEM format. Only insert + // requests will include this field. + PrivateKey string `json:"privateKey,omitempty"` - // SelfLink: [Output Only] Server-defined URL for the resource. + // SelfLink: [Output only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // SslCertificates: URLs to SslCertificate resources that are used to - // authenticate connections between users and the load balancer. At - // least one SSL certificate must be specified. Currently, you may - // specify up to 15 SSL certificates. - SslCertificates []string `json:"sslCertificates,omitempty"` - - // SslPolicy: URL of SslPolicy resource that will be associated with the - // TargetHttpsProxy resource. If not set, the TargetHttpsProxy resource - // will not have any SSL policy configured. - SslPolicy string `json:"sslPolicy,omitempty"` - - // UrlMap: A fully-qualified or valid partial URL to the UrlMap resource - // that defines the mapping from URL to the BackendService. For example, - // the following are all valid URLs for specifying a URL map: - // - - // https://www.googleapis.compute/v1/projects/project/global/urlMaps/url-map - // - projects/project/global/urlMaps/url-map - // - global/urlMaps/url-map - UrlMap string `json:"urlMap,omitempty"` - // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "Certificate") to + // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "Certificate") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *TargetHttpsProxy) MarshalJSON() ([]byte, error) { - type NoMethod TargetHttpsProxy +func (s *SslCertificate) MarshalJSON() ([]byte, error) { + type NoMethod SslCertificate raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetHttpsProxyList: Contains a list of TargetHttpsProxy resources. -type TargetHttpsProxyList struct { +// SslCertificateList: Contains a list of SslCertificate resources. +type SslCertificateList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of TargetHttpsProxy resources. - Items []*TargetHttpsProxy `json:"items,omitempty"` + // Items: A list of SslCertificate resources. + Items []*SslCertificate `json:"items,omitempty"` - // Kind: Type of resource. Always compute#targetHttpsProxyList for lists - // of target HTTPS proxies. + // Kind: Type of resource. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -24933,7 +25173,7 @@ type TargetHttpsProxyList struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *TargetHttpsProxyListWarning `json:"warning,omitempty"` + Warning *SslCertificateListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -24956,15 +25196,15 @@ type TargetHttpsProxyList struct { NullFields []string `json:"-"` } -func (s *TargetHttpsProxyList) MarshalJSON() ([]byte, error) { - type NoMethod TargetHttpsProxyList +func (s *SslCertificateList) MarshalJSON() ([]byte, error) { + type NoMethod SslCertificateList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetHttpsProxyListWarning: [Output Only] Informational warning +// SslCertificateListWarning: [Output Only] Informational warning // message. -type TargetHttpsProxyListWarning struct { +type SslCertificateListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -24998,7 +25238,7 @@ type TargetHttpsProxyListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*TargetHttpsProxyListWarningData `json:"data,omitempty"` + Data []*SslCertificateListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -25021,13 +25261,13 @@ type TargetHttpsProxyListWarning struct { NullFields []string `json:"-"` } -func (s *TargetHttpsProxyListWarning) MarshalJSON() ([]byte, error) { - type NoMethod TargetHttpsProxyListWarning +func (s *SslCertificateListWarning) MarshalJSON() ([]byte, error) { + type NoMethod SslCertificateListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetHttpsProxyListWarningData struct { +type SslCertificateListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -25058,104 +25298,22 @@ type TargetHttpsProxyListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetHttpsProxyListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod TargetHttpsProxyListWarningData +func (s *SslCertificateListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod SslCertificateListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetInstance: A TargetInstance resource. This resource defines an -// endpoint instance that terminates traffic of certain protocols. (== -// resource_for beta.targetInstances ==) (== resource_for -// v1.targetInstances ==) -type TargetInstance struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Instance: A URL to the virtual machine instance that handles traffic - // for this target instance. When creating a target instance, you can - // provide the fully-qualified URL or a valid partial URL to the desired - // virtual machine. For example, the following are all valid URLs: - // - - // https://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/instance - // - projects/project/zones/zone/instances/instance - // - zones/zone/instances/instance - Instance string `json:"instance,omitempty"` - - // Kind: [Output Only] The type of the resource. Always - // compute#targetInstance for target instances. - Kind string `json:"kind,omitempty"` - - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // NatPolicy: NAT option controlling how IPs are NAT'ed to the instance. - // Currently only NO_NAT (default value) is supported. - // - // Possible values: - // "NO_NAT" - NatPolicy string `json:"natPolicy,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // Zone: [Output Only] URL of the zone where the target instance - // resides. You must specify this field as part of the HTTP request URL. - // It is not settable as a field in the request body. - Zone string `json:"zone,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *TargetInstance) MarshalJSON() ([]byte, error) { - type NoMethod TargetInstance - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type TargetInstanceAggregatedList struct { +type SslPoliciesList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of TargetInstance resources. - Items map[string]TargetInstancesScopedList `json:"items,omitempty"` + // Items: A list of SslPolicy resources. + Items []*SslPolicy `json:"items,omitempty"` - // Kind: Type of resource. + // Kind: [Output Only] Type of the resource. Always + // compute#sslPoliciesList for lists of sslPolicies. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -25170,7 +25328,7 @@ type TargetInstanceAggregatedList struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *TargetInstanceAggregatedListWarning `json:"warning,omitempty"` + Warning *SslPoliciesListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -25193,15 +25351,14 @@ type TargetInstanceAggregatedList struct { NullFields []string `json:"-"` } -func (s *TargetInstanceAggregatedList) MarshalJSON() ([]byte, error) { - type NoMethod TargetInstanceAggregatedList +func (s *SslPoliciesList) MarshalJSON() ([]byte, error) { + type NoMethod SslPoliciesList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetInstanceAggregatedListWarning: [Output Only] Informational -// warning message. -type TargetInstanceAggregatedListWarning struct { +// SslPoliciesListWarning: [Output Only] Informational warning message. +type SslPoliciesListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -25235,7 +25392,7 @@ type TargetInstanceAggregatedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*TargetInstanceAggregatedListWarningData `json:"data,omitempty"` + Data []*SslPoliciesListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -25258,13 +25415,13 @@ type TargetInstanceAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *TargetInstanceAggregatedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod TargetInstanceAggregatedListWarning +func (s *SslPoliciesListWarning) MarshalJSON() ([]byte, error) { + type NoMethod SslPoliciesListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetInstanceAggregatedListWarningData struct { +type SslPoliciesListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -25295,68 +25452,153 @@ type TargetInstanceAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetInstanceAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod TargetInstanceAggregatedListWarningData +func (s *SslPoliciesListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod SslPoliciesListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetInstanceList: Contains a list of TargetInstance resources. -type TargetInstanceList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the +type SslPoliciesListAvailableFeaturesResponse struct { + Features []string `json:"features,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the // server. - Id string `json:"id,omitempty"` + googleapi.ServerResponse `json:"-"` - // Items: A list of TargetInstance resources. - Items []*TargetInstance `json:"items,omitempty"` + // ForceSendFields is a list of field names (e.g. "Features") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // Kind: Type of resource. + // NullFields is a list of field names (e.g. "Features") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SslPoliciesListAvailableFeaturesResponse) MarshalJSON() ([]byte, error) { + type NoMethod SslPoliciesListAvailableFeaturesResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SslPolicy: A SSL policy specifies the server-side support for SSL +// features. This can be attached to a TargetHttpsProxy or a +// TargetSslProxy. This affects connections between clients and the +// HTTPS or SSL proxy load balancer. They do not affect the connection +// between the load balancers and the backends. +type SslPolicy struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // CustomFeatures: A list of features enabled when the selected profile + // is CUSTOM. The + // - method returns the set of features that can be specified in this + // list. This field must be empty if the profile is not CUSTOM. + CustomFeatures []string `json:"customFeatures,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // EnabledFeatures: [Output Only] The list of features enabled in the + // SSL policy. + EnabledFeatures []string `json:"enabledFeatures,omitempty"` + + // Fingerprint: Fingerprint of this resource. A hash of the contents + // stored in this object. This field is used in optimistic locking. This + // field will be ignored when inserting a SslPolicy. An up-to-date + // fingerprint must be provided in order to update the SslPolicy, + // otherwise the request will fail with error 412 conditionNotMet. + // + // To see the latest fingerprint, make a get() request to retrieve an + // SslPolicy. + Fingerprint string `json:"fingerprint,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output only] Type of the resource. Always compute#sslPolicyfor + // SSL policies. Kind string `json:"kind,omitempty"` - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` + // MinTlsVersion: The minimum version of SSL protocol that can be used + // by the clients to establish a connection with the load balancer. This + // can be one of TLS_1_0, TLS_1_1, TLS_1_2. + // + // Possible values: + // "TLS_1_0" + // "TLS_1_1" + // "TLS_1_2" + MinTlsVersion string `json:"minTlsVersion,omitempty"` - // SelfLink: [Output Only] Server-defined URL for this resource. + // Name: Name of the resource. The name must be 1-63 characters long, + // and comply with RFC1035. Specifically, the name must be 1-63 + // characters long and match the regular expression + // `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be + // a lowercase letter, and all following characters must be a dash, + // lowercase letter, or digit, except the last character, which cannot + // be a dash. + Name string `json:"name,omitempty"` + + // Profile: Profile specifies the set of SSL features that can be used + // by the load balancer when negotiating SSL with clients. This can be + // one of COMPATIBLE, MODERN, RESTRICTED, or CUSTOM. If using CUSTOM, + // the set of SSL features to enable must be specified in the + // customFeatures field. + // + // Possible values: + // "COMPATIBLE" + // "CUSTOM" + // "MODERN" + // "RESTRICTED" + Profile string `json:"profile,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // Warning: [Output Only] Informational warning message. - Warning *TargetInstanceListWarning `json:"warning,omitempty"` + // Warnings: [Output Only] If potential misconfigurations are detected + // for this SSL policy, this field will be populated with warning + // messages. + Warnings []*SslPolicyWarnings `json:"warnings,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *TargetInstanceList) MarshalJSON() ([]byte, error) { - type NoMethod TargetInstanceList +func (s *SslPolicy) MarshalJSON() ([]byte, error) { + type NoMethod SslPolicy raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetInstanceListWarning: [Output Only] Informational warning -// message. -type TargetInstanceListWarning struct { +type SslPolicyWarnings struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -25390,7 +25632,7 @@ type TargetInstanceListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*TargetInstanceListWarningData `json:"data,omitempty"` + Data []*SslPolicyWarningsData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -25413,13 +25655,13 @@ type TargetInstanceListWarning struct { NullFields []string `json:"-"` } -func (s *TargetInstanceListWarning) MarshalJSON() ([]byte, error) { - type NoMethod TargetInstanceListWarning +func (s *SslPolicyWarnings) MarshalJSON() ([]byte, error) { + type NoMethod SslPolicyWarnings raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetInstanceListWarningData struct { +type SslPolicyWarningsData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -25450,21 +25692,19 @@ type TargetInstanceListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetInstanceListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod TargetInstanceListWarningData +func (s *SslPolicyWarningsData) MarshalJSON() ([]byte, error) { + type NoMethod SslPolicyWarningsData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetInstancesScopedList struct { - // TargetInstances: A list of target instances contained in this scope. - TargetInstances []*TargetInstance `json:"targetInstances,omitempty"` - - // Warning: Informational warning which replaces the list of addresses - // when the list is empty. - Warning *TargetInstancesScopedListWarning `json:"warning,omitempty"` +type SslPolicyReference struct { + // SslPolicy: URL of the SSL policy resource. Set this to empty string + // to clear any existing SSL policy associated with the target proxy + // resource. + SslPolicy string `json:"sslPolicy,omitempty"` - // ForceSendFields is a list of field names (e.g. "TargetInstances") to + // ForceSendFields is a list of field names (e.g. "SslPolicy") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -25472,7 +25712,115 @@ type TargetInstancesScopedList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "TargetInstances") to + // NullFields is a list of field names (e.g. "SslPolicy") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SslPolicyReference) MarshalJSON() ([]byte, error) { + type NoMethod SslPolicyReference + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Subnetwork: A Subnetwork resource. (== resource_for beta.subnetworks +// ==) (== resource_for v1.subnetworks ==) +type Subnetwork struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. This field can be set only at + // resource creation time. + Description string `json:"description,omitempty"` + + // EnableFlowLogs: Whether to enable flow logging for this subnetwork. + // If this field is not explicitly set, it will not appear in get + // listings. If not set the default behavior is to disable flow logging. + EnableFlowLogs bool `json:"enableFlowLogs,omitempty"` + + // Fingerprint: Fingerprint of this resource. A hash of the contents + // stored in this object. This field is used in optimistic locking. This + // field will be ignored when inserting a Subnetwork. An up-to-date + // fingerprint must be provided in order to update the Subnetwork, + // otherwise the request will fail with error 412 conditionNotMet. + // + // To see the latest fingerprint, make a get() request to retrieve a + // Subnetwork. + Fingerprint string `json:"fingerprint,omitempty"` + + // GatewayAddress: [Output Only] The gateway address for default routes + // to reach destination addresses outside this subnetwork. + GatewayAddress string `json:"gatewayAddress,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // IpCidrRange: The range of internal addresses that are owned by this + // subnetwork. Provide this property when you create the subnetwork. For + // example, 10.0.0.0/8 or 192.168.0.0/16. Ranges must be unique and + // non-overlapping within a network. Only IPv4 is supported. This field + // can be set only at resource creation time. + IpCidrRange string `json:"ipCidrRange,omitempty"` + + // Kind: [Output Only] Type of the resource. Always compute#subnetwork + // for Subnetwork resources. + Kind string `json:"kind,omitempty"` + + // Name: The name of the resource, provided by the client when initially + // creating the resource. The name must be 1-63 characters long, and + // comply with RFC1035. Specifically, the name must be 1-63 characters + // long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + // which means the first character must be a lowercase letter, and all + // following characters must be a dash, lowercase letter, or digit, + // except the last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // Network: The URL of the network to which this subnetwork belongs, + // provided by the client when initially creating the subnetwork. Only + // networks that are in the distributed mode can have subnetworks. This + // field can be set only at resource creation time. + Network string `json:"network,omitempty"` + + // PrivateIpGoogleAccess: Whether the VMs in this subnet can access + // Google services without assigned external IP addresses. This field + // can be both set at resource creation time and updated using + // setPrivateIpGoogleAccess. + PrivateIpGoogleAccess bool `json:"privateIpGoogleAccess,omitempty"` + + // Region: URL of the region where the Subnetwork resides. This field + // can be set only at resource creation time. + Region string `json:"region,omitempty"` + + // SecondaryIpRanges: An array of configurations for secondary IP ranges + // for VM instances contained in this subnetwork. The primary IP of such + // VM must belong to the primary ipCidrRange of the subnetwork. The + // alias IPs may belong to either primary or secondary ranges. This + // field can be updated with a patch request. + SecondaryIpRanges []*SubnetworkSecondaryRange `json:"secondaryIpRanges,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -25482,15 +25830,68 @@ type TargetInstancesScopedList struct { NullFields []string `json:"-"` } -func (s *TargetInstancesScopedList) MarshalJSON() ([]byte, error) { - type NoMethod TargetInstancesScopedList +func (s *Subnetwork) MarshalJSON() ([]byte, error) { + type NoMethod Subnetwork raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetInstancesScopedListWarning: Informational warning which -// replaces the list of addresses when the list is empty. -type TargetInstancesScopedListWarning struct { +type SubnetworkAggregatedList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of SubnetworksScopedList resources. + Items map[string]SubnetworksScopedList `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always + // compute#subnetworkAggregatedList for aggregated lists of subnetworks. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *SubnetworkAggregatedListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SubnetworkAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod SubnetworkAggregatedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SubnetworkAggregatedListWarning: [Output Only] Informational warning +// message. +type SubnetworkAggregatedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -25524,7 +25925,7 @@ type TargetInstancesScopedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*TargetInstancesScopedListWarningData `json:"data,omitempty"` + Data []*SubnetworkAggregatedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -25547,13 +25948,13 @@ type TargetInstancesScopedListWarning struct { NullFields []string `json:"-"` } -func (s *TargetInstancesScopedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod TargetInstancesScopedListWarning +func (s *SubnetworkAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod SubnetworkAggregatedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetInstancesScopedListWarningData struct { +type SubnetworkAggregatedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -25584,168 +25985,24 @@ type TargetInstancesScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetInstancesScopedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod TargetInstancesScopedListWarningData +func (s *SubnetworkAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod SubnetworkAggregatedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetPool: A TargetPool resource. This resource defines a pool of -// instances, an associated HttpHealthCheck resource, and the fallback -// target pool. (== resource_for beta.targetPools ==) (== resource_for -// v1.targetPools ==) -type TargetPool struct { - // BackupPool: This field is applicable only when the containing target - // pool is serving a forwarding rule as the primary pool, and its - // failoverRatio field is properly set to a value between [0, - // 1]. - // - // backupPool and failoverRatio together define the fallback behavior of - // the primary target pool: if the ratio of the healthy instances in the - // primary pool is at or below failoverRatio, traffic arriving at the - // load-balanced IP will be directed to the backup pool. - // - // In case where failoverRatio and backupPool are not set, or all the - // instances in the backup pool are unhealthy, the traffic will be - // directed back to the primary pool in the "force" mode, where traffic - // will be spread to the healthy instances with the best effort, or to - // all instances when no instance is healthy. - BackupPool string `json:"backupPool,omitempty"` +// SubnetworkList: Contains a list of Subnetwork resources. +type SubnetworkList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` + // Items: A list of Subnetwork resources. + Items []*Subnetwork `json:"items,omitempty"` - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // FailoverRatio: This field is applicable only when the containing - // target pool is serving a forwarding rule as the primary pool (i.e., - // not as a backup pool to some other target pool). The value of the - // field must be in [0, 1]. - // - // If set, backupPool must also be set. They together define the - // fallback behavior of the primary target pool: if the ratio of the - // healthy instances in the primary pool is at or below this number, - // traffic arriving at the load-balanced IP will be directed to the - // backup pool. - // - // In case where failoverRatio is not set or all the instances in the - // backup pool are unhealthy, the traffic will be directed back to the - // primary pool in the "force" mode, where traffic will be spread to the - // healthy instances with the best effort, or to all instances when no - // instance is healthy. - FailoverRatio float64 `json:"failoverRatio,omitempty"` - - // HealthChecks: The URL of the HttpHealthCheck resource. A member - // instance in this pool is considered healthy if and only if the health - // checks pass. An empty list means all member instances will be - // considered healthy at all times. Only HttpHealthChecks are supported. - // Only one health check may be specified. - HealthChecks []string `json:"healthChecks,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Instances: A list of resource URLs to the virtual machine instances - // serving this pool. They must live in zones contained in the same - // region as this pool. - Instances []string `json:"instances,omitempty"` - - // Kind: [Output Only] Type of the resource. Always compute#targetPool - // for target pools. - Kind string `json:"kind,omitempty"` - - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // Region: [Output Only] URL of the region where the target pool - // resides. - Region string `json:"region,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // SessionAffinity: Session affinity option, must be one of the - // following values: - // NONE: Connections from the same client IP may go to any instance in - // the pool. - // CLIENT_IP: Connections from the same client IP will go to the same - // instance in the pool while that instance remains - // healthy. - // CLIENT_IP_PROTO: Connections from the same client IP with the same IP - // protocol will go to the same instance in the pool while that instance - // remains healthy. - // - // Possible values: - // "CLIENT_IP" - // "CLIENT_IP_PORT_PROTO" - // "CLIENT_IP_PROTO" - // "GENERATED_COOKIE" - // "NONE" - SessionAffinity string `json:"sessionAffinity,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "BackupPool") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "BackupPool") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TargetPool) MarshalJSON() ([]byte, error) { - type NoMethod TargetPool - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -func (s *TargetPool) UnmarshalJSON(data []byte) error { - type NoMethod TargetPool - var s1 struct { - FailoverRatio gensupport.JSONFloat64 `json:"failoverRatio"` - *NoMethod - } - s1.NoMethod = (*NoMethod)(s) - if err := json.Unmarshal(data, &s1); err != nil { - return err - } - s.FailoverRatio = float64(s1.FailoverRatio) - return nil -} - -type TargetPoolAggregatedList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of TargetPool resources. - Items map[string]TargetPoolsScopedList `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always - // compute#targetPoolAggregatedList for aggregated lists of target - // pools. - Kind string `json:"kind,omitempty"` + // Kind: [Output Only] Type of resource. Always compute#subnetworkList + // for lists of subnetworks. + Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next // page of results for list requests. If the number of results is larger @@ -25759,7 +26016,7 @@ type TargetPoolAggregatedList struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *TargetPoolAggregatedListWarning `json:"warning,omitempty"` + Warning *SubnetworkListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -25782,15 +26039,14 @@ type TargetPoolAggregatedList struct { NullFields []string `json:"-"` } -func (s *TargetPoolAggregatedList) MarshalJSON() ([]byte, error) { - type NoMethod TargetPoolAggregatedList +func (s *SubnetworkList) MarshalJSON() ([]byte, error) { + type NoMethod SubnetworkList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetPoolAggregatedListWarning: [Output Only] Informational warning -// message. -type TargetPoolAggregatedListWarning struct { +// SubnetworkListWarning: [Output Only] Informational warning message. +type SubnetworkListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -25824,7 +26080,7 @@ type TargetPoolAggregatedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*TargetPoolAggregatedListWarningData `json:"data,omitempty"` + Data []*SubnetworkListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -25847,13 +26103,13 @@ type TargetPoolAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *TargetPoolAggregatedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod TargetPoolAggregatedListWarning +func (s *SubnetworkListWarning) MarshalJSON() ([]byte, error) { + type NoMethod SubnetworkListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetPoolAggregatedListWarningData struct { +type SubnetworkListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -25884,25 +26140,29 @@ type TargetPoolAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetPoolAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod TargetPoolAggregatedListWarningData +func (s *SubnetworkListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod SubnetworkListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetPoolInstanceHealth struct { - HealthStatus []*HealthStatus `json:"healthStatus,omitempty"` - - // Kind: [Output Only] Type of resource. Always - // compute#targetPoolInstanceHealth when checking the health of an - // instance. - Kind string `json:"kind,omitempty"` +// SubnetworkSecondaryRange: Represents a secondary IP range of a +// subnetwork. +type SubnetworkSecondaryRange struct { + // IpCidrRange: The range of IP addresses belonging to this subnetwork + // secondary range. Provide this property when you create the + // subnetwork. Ranges must be unique and non-overlapping with all + // primary and secondary IP ranges within a network. Only IPv4 is + // supported. + IpCidrRange string `json:"ipCidrRange,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // RangeName: The name associated with this subnetwork secondary range, + // used when adding an alias IP range to a VM instance. The name must be + // 1-63 characters long, and comply with RFC1035. The name must be + // unique within the subnetwork. + RangeName string `json:"rangeName,omitempty"` - // ForceSendFields is a list of field names (e.g. "HealthStatus") to + // ForceSendFields is a list of field names (e.g. "IpCidrRange") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -25910,7 +26170,7 @@ type TargetPoolInstanceHealth struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "HealthStatus") to include + // NullFields is a list of field names (e.g. "IpCidrRange") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as @@ -25919,44 +26179,52 @@ type TargetPoolInstanceHealth struct { NullFields []string `json:"-"` } -func (s *TargetPoolInstanceHealth) MarshalJSON() ([]byte, error) { - type NoMethod TargetPoolInstanceHealth +func (s *SubnetworkSecondaryRange) MarshalJSON() ([]byte, error) { + type NoMethod SubnetworkSecondaryRange raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetPoolList: Contains a list of TargetPool resources. -type TargetPoolList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of TargetPool resources. - Items []*TargetPool `json:"items,omitempty"` +type SubnetworksExpandIpCidrRangeRequest struct { + // IpCidrRange: The IP (in CIDR format or netmask) of internal addresses + // that are legal on this Subnetwork. This range should be disjoint from + // other subnetworks within this network. This range can only be larger + // than (i.e. a superset of) the range previously defined before the + // update. + IpCidrRange string `json:"ipCidrRange,omitempty"` - // Kind: [Output Only] Type of resource. Always compute#targetPoolList - // for lists of target pools. - Kind string `json:"kind,omitempty"` + // ForceSendFields is a list of field names (e.g. "IpCidrRange") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` + // NullFields is a list of field names (e.g. "IpCidrRange") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` +func (s *SubnetworksExpandIpCidrRangeRequest) MarshalJSON() ([]byte, error) { + type NoMethod SubnetworksExpandIpCidrRangeRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} - // Warning: [Output Only] Informational warning message. - Warning *TargetPoolListWarning `json:"warning,omitempty"` +type SubnetworksScopedList struct { + // Subnetworks: A list of subnetworks contained in this scope. + Subnetworks []*Subnetwork `json:"subnetworks,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Warning: An informational warning that appears when the list of + // addresses is empty. + Warning *SubnetworksScopedListWarning `json:"warning,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "Subnetworks") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -25964,23 +26232,24 @@ type TargetPoolList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Subnetworks") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *TargetPoolList) MarshalJSON() ([]byte, error) { - type NoMethod TargetPoolList +func (s *SubnetworksScopedList) MarshalJSON() ([]byte, error) { + type NoMethod SubnetworksScopedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetPoolListWarning: [Output Only] Informational warning message. -type TargetPoolListWarning struct { +// SubnetworksScopedListWarning: An informational warning that appears +// when the list of addresses is empty. +type SubnetworksScopedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -26014,7 +26283,7 @@ type TargetPoolListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*TargetPoolListWarningData `json:"data,omitempty"` + Data []*SubnetworksScopedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -26037,13 +26306,13 @@ type TargetPoolListWarning struct { NullFields []string `json:"-"` } -func (s *TargetPoolListWarning) MarshalJSON() ([]byte, error) { - type NoMethod TargetPoolListWarning +func (s *SubnetworksScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod SubnetworksScopedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetPoolListWarningData struct { +type SubnetworksScopedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -26074,50 +26343,97 @@ type TargetPoolListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetPoolListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod TargetPoolListWarningData +func (s *SubnetworksScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod SubnetworksScopedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetPoolsAddHealthCheckRequest struct { - // HealthChecks: The HttpHealthCheck to add to the target pool. - HealthChecks []*HealthCheckReference `json:"healthChecks,omitempty"` +type SubnetworksSetPrivateIpGoogleAccessRequest struct { + PrivateIpGoogleAccess bool `json:"privateIpGoogleAccess,omitempty"` - // ForceSendFields is a list of field names (e.g. "HealthChecks") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. + // "PrivateIpGoogleAccess") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "HealthChecks") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "PrivateIpGoogleAccess") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *TargetPoolsAddHealthCheckRequest) MarshalJSON() ([]byte, error) { - type NoMethod TargetPoolsAddHealthCheckRequest +func (s *SubnetworksSetPrivateIpGoogleAccessRequest) MarshalJSON() ([]byte, error) { + type NoMethod SubnetworksSetPrivateIpGoogleAccessRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetPoolsAddInstanceRequest struct { - // Instances: A full or partial URL to an instance to add to this target - // pool. This can be a full or partial URL. For example, the following - // are valid URLs: - // - - // https://www.googleapis.com/compute/v1/projects/project-id/zones/zone/instances/instance-name - // - projects/project-id/zones/zone/instances/instance-name - // - zones/zone/instances/instance-name - Instances []*InstanceReference `json:"instances,omitempty"` +type TCPHealthCheck struct { + // Port: The TCP port number for the health check request. The default + // value is 80. Valid values are 1 through 65535. + Port int64 `json:"port,omitempty"` - // ForceSendFields is a list of field names (e.g. "Instances") to + // PortName: Port name as defined in InstanceGroup#NamedPort#name. If + // both port and port_name are defined, port takes precedence. + PortName string `json:"portName,omitempty"` + + // PortSpecification: Specifies how port is selected for health + // checking, can be one of following values: + // USE_FIXED_PORT: The port number in + // port + // is used for health checking. + // USE_NAMED_PORT: The + // portName + // is used for health checking. + // USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for + // each network endpoint is used for health checking. For other + // backends, the port or named port specified in the Backend Service is + // used for health checking. + // + // + // If not specified, TCP health check follows behavior specified + // in + // port + // and + // portName + // fields. + // + // Possible values: + // "USE_FIXED_PORT" + // "USE_NAMED_PORT" + // "USE_SERVING_PORT" + PortSpecification string `json:"portSpecification,omitempty"` + + // ProxyHeader: Specifies the type of proxy header to append before + // sending data to the backend, either NONE or PROXY_V1. The default is + // NONE. + // + // Possible values: + // "NONE" + // "PROXY_V1" + ProxyHeader string `json:"proxyHeader,omitempty"` + + // Request: The application data to send once the TCP connection has + // been established (default value is empty). If both request and + // response are empty, the connection establishment alone will indicate + // health. The request data can only be ASCII. + Request string `json:"request,omitempty"` + + // Response: The bytes to match against the beginning of the response + // data. If left empty (the default value), any response will indicate + // health. The response data can only be ASCII. + Response string `json:"response,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Port") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -26125,8 +26441,8 @@ type TargetPoolsAddInstanceRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Instances") to include in - // API requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Port") to include in API + // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -26134,22 +26450,29 @@ type TargetPoolsAddInstanceRequest struct { NullFields []string `json:"-"` } -func (s *TargetPoolsAddInstanceRequest) MarshalJSON() ([]byte, error) { - type NoMethod TargetPoolsAddInstanceRequest +func (s *TCPHealthCheck) MarshalJSON() ([]byte, error) { + type NoMethod TCPHealthCheck raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetPoolsRemoveHealthCheckRequest struct { - // HealthChecks: Health check URL to be removed. This can be a full or - // valid partial URL. For example, the following are valid URLs: - // - - // https://www.googleapis.com/compute/beta/projects/project/global/httpHealthChecks/health-check - // - projects/project/global/httpHealthChecks/health-check - // - global/httpHealthChecks/health-check - HealthChecks []*HealthCheckReference `json:"healthChecks,omitempty"` +// Tags: A set of instance tags. +type Tags struct { + // Fingerprint: Specifies a fingerprint for this request, which is + // essentially a hash of the tags' contents and used for optimistic + // locking. The fingerprint is initially generated by Compute Engine and + // changes after every request to modify or update tags. You must always + // provide an up-to-date fingerprint hash in order to update or change + // tags. + // + // To see the latest fingerprint, make get() request to the instance. + Fingerprint string `json:"fingerprint,omitempty"` - // ForceSendFields is a list of field names (e.g. "HealthChecks") to + // Items: An array of tags. Each tag must be 1-63 characters long, and + // comply with RFC1035. + Items []string `json:"items,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Fingerprint") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -26157,7 +26480,7 @@ type TargetPoolsRemoveHealthCheckRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "HealthChecks") to include + // NullFields is a list of field names (e.g. "Fingerprint") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as @@ -26166,48 +26489,108 @@ type TargetPoolsRemoveHealthCheckRequest struct { NullFields []string `json:"-"` } -func (s *TargetPoolsRemoveHealthCheckRequest) MarshalJSON() ([]byte, error) { - type NoMethod TargetPoolsRemoveHealthCheckRequest +func (s *Tags) MarshalJSON() ([]byte, error) { + type NoMethod Tags raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetPoolsRemoveInstanceRequest struct { - // Instances: URLs of the instances to be removed from target pool. - Instances []*InstanceReference `json:"instances,omitempty"` +// TargetHttpProxy: A TargetHttpProxy resource. This resource defines an +// HTTP proxy. (== resource_for beta.targetHttpProxies ==) (== +// resource_for v1.targetHttpProxies ==) +type TargetHttpProxy struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` - // ForceSendFields is a list of field names (e.g. "Instances") to - // unconditionally include in API requests. By default, fields with + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of resource. Always compute#targetHttpProxy + // for target HTTP proxies. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // UrlMap: URL to the UrlMap resource that defines the mapping from URL + // to the BackendService. + UrlMap string `json:"urlMap,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Instances") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *TargetPoolsRemoveInstanceRequest) MarshalJSON() ([]byte, error) { - type NoMethod TargetPoolsRemoveInstanceRequest +func (s *TargetHttpProxy) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpProxy raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetPoolsScopedList struct { - // TargetPools: A list of target pools contained in this scope. - TargetPools []*TargetPool `json:"targetPools,omitempty"` +// TargetHttpProxyList: A list of TargetHttpProxy resources. +type TargetHttpProxyList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` - // Warning: Informational warning which replaces the list of addresses - // when the list is empty. - Warning *TargetPoolsScopedListWarning `json:"warning,omitempty"` + // Items: A list of TargetHttpProxy resources. + Items []*TargetHttpProxy `json:"items,omitempty"` - // ForceSendFields is a list of field names (e.g. "TargetPools") to + // Kind: Type of resource. Always compute#targetHttpProxyList for lists + // of target HTTP proxies. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *TargetHttpProxyListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -26215,24 +26598,24 @@ type TargetPoolsScopedList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "TargetPools") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *TargetPoolsScopedList) MarshalJSON() ([]byte, error) { - type NoMethod TargetPoolsScopedList +func (s *TargetHttpProxyList) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpProxyList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetPoolsScopedListWarning: Informational warning which replaces -// the list of addresses when the list is empty. -type TargetPoolsScopedListWarning struct { +// TargetHttpProxyListWarning: [Output Only] Informational warning +// message. +type TargetHttpProxyListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -26266,7 +26649,7 @@ type TargetPoolsScopedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*TargetPoolsScopedListWarningData `json:"data,omitempty"` + Data []*TargetHttpProxyListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -26289,13 +26672,13 @@ type TargetPoolsScopedListWarning struct { NullFields []string `json:"-"` } -func (s *TargetPoolsScopedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod TargetPoolsScopedListWarning +func (s *TargetHttpProxyListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpProxyListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetPoolsScopedListWarningData struct { +type TargetHttpProxyListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -26326,76 +26709,22 @@ type TargetPoolsScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetPoolsScopedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod TargetPoolsScopedListWarningData +func (s *TargetHttpProxyListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpProxyListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetReference struct { - Target string `json:"target,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Target") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Target") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TargetReference) MarshalJSON() ([]byte, error) { - type NoMethod TargetReference - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type TargetSslProxiesSetBackendServiceRequest struct { - // Service: The URL of the new BackendService resource for the - // targetSslProxy. - Service string `json:"service,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Service") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Service") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TargetSslProxiesSetBackendServiceRequest) MarshalJSON() ([]byte, error) { - type NoMethod TargetSslProxiesSetBackendServiceRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type TargetSslProxiesSetProxyHeaderRequest struct { - // ProxyHeader: The new type of proxy header to append before sending - // data to the backend. NONE or PROXY_V1 are allowed. +type TargetHttpsProxiesSetQuicOverrideRequest struct { + // QuicOverride: QUIC policy for the TargetHttpsProxy resource. // // Possible values: + // "DISABLE" + // "ENABLE" // "NONE" - // "PROXY_V1" - ProxyHeader string `json:"proxyHeader,omitempty"` + QuicOverride string `json:"quicOverride,omitempty"` - // ForceSendFields is a list of field names (e.g. "ProxyHeader") to + // ForceSendFields is a list of field names (e.g. "QuicOverride") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -26403,7 +26732,7 @@ type TargetSslProxiesSetProxyHeaderRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ProxyHeader") to include + // NullFields is a list of field names (e.g. "QuicOverride") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as @@ -26412,16 +26741,16 @@ type TargetSslProxiesSetProxyHeaderRequest struct { NullFields []string `json:"-"` } -func (s *TargetSslProxiesSetProxyHeaderRequest) MarshalJSON() ([]byte, error) { - type NoMethod TargetSslProxiesSetProxyHeaderRequest +func (s *TargetHttpsProxiesSetQuicOverrideRequest) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpsProxiesSetQuicOverrideRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetSslProxiesSetSslCertificatesRequest struct { - // SslCertificates: New set of URLs to SslCertificate resources to - // associate with this TargetSslProxy. Currently exactly one ssl - // certificate must be specified. +type TargetHttpsProxiesSetSslCertificatesRequest struct { + // SslCertificates: New set of SslCertificate resources to associate + // with this TargetHttpsProxy resource. Currently exactly one + // SslCertificate resource must be specified. SslCertificates []string `json:"sslCertificates,omitempty"` // ForceSendFields is a list of field names (e.g. "SslCertificates") to @@ -26442,16 +26771,16 @@ type TargetSslProxiesSetSslCertificatesRequest struct { NullFields []string `json:"-"` } -func (s *TargetSslProxiesSetSslCertificatesRequest) MarshalJSON() ([]byte, error) { - type NoMethod TargetSslProxiesSetSslCertificatesRequest +func (s *TargetHttpsProxiesSetSslCertificatesRequest) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpsProxiesSetSslCertificatesRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetSslProxy: A TargetSslProxy resource. This resource defines an -// SSL proxy. (== resource_for beta.targetSslProxies ==) (== -// resource_for v1.targetSslProxies ==) -type TargetSslProxy struct { +// TargetHttpsProxy: A TargetHttpsProxy resource. This resource defines +// an HTTPS proxy. (== resource_for beta.targetHttpsProxies ==) (== +// resource_for v1.targetHttpsProxies ==) +type TargetHttpsProxy struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -26464,8 +26793,8 @@ type TargetSslProxy struct { // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` - // Kind: [Output Only] Type of the resource. Always - // compute#targetSslProxy for target SSL proxies. + // Kind: [Output Only] Type of resource. Always compute#targetHttpsProxy + // for target HTTPS proxies. Kind string `json:"kind,omitempty"` // Name: Name of the resource. Provided by the client when the resource @@ -26477,32 +26806,44 @@ type TargetSslProxy struct { // last character, which cannot be a dash. Name string `json:"name,omitempty"` - // ProxyHeader: Specifies the type of proxy header to append before - // sending data to the backend, either NONE or PROXY_V1. The default is - // NONE. + // QuicOverride: Specifies the QUIC override policy for this + // TargetHttpsProxy resource. This determines whether the load balancer + // will attempt to negotiate QUIC with clients or not. Can specify one + // of NONE, ENABLE, or DISABLE. Specify ENABLE to always enable QUIC, + // Enables QUIC when set to ENABLE, and disables QUIC when set to + // DISABLE. If NONE is specified, uses the QUIC policy with no user + // overrides, which is equivalent to DISABLE. Not specifying this field + // is equivalent to specifying NONE. // // Possible values: + // "DISABLE" + // "ENABLE" // "NONE" - // "PROXY_V1" - ProxyHeader string `json:"proxyHeader,omitempty"` + QuicOverride string `json:"quicOverride,omitempty"` // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // Service: URL to the BackendService resource. - Service string `json:"service,omitempty"` - // SslCertificates: URLs to SslCertificate resources that are used to - // authenticate connections to Backends. At least one SSL certificate - // must be specified. Currently, you may specify up to 15 SSL - // certificates. + // authenticate connections between users and the load balancer. At + // least one SSL certificate must be specified. Currently, you may + // specify up to 15 SSL certificates. SslCertificates []string `json:"sslCertificates,omitempty"` // SslPolicy: URL of SslPolicy resource that will be associated with the - // TargetSslProxy resource. If not set, the TargetSslProxy resource will - // not have any SSL policy configured. + // TargetHttpsProxy resource. If not set, the TargetHttpsProxy resource + // will not have any SSL policy configured. SslPolicy string `json:"sslPolicy,omitempty"` + // UrlMap: A fully-qualified or valid partial URL to the UrlMap resource + // that defines the mapping from URL to the BackendService. For example, + // the following are all valid URLs for specifying a URL map: + // - + // https://www.googleapis.compute/v1/projects/project/global/urlMaps/url-map + // - projects/project/global/urlMaps/url-map + // - global/urlMaps/url-map + UrlMap string `json:"urlMap,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -26525,22 +26866,23 @@ type TargetSslProxy struct { NullFields []string `json:"-"` } -func (s *TargetSslProxy) MarshalJSON() ([]byte, error) { - type NoMethod TargetSslProxy +func (s *TargetHttpsProxy) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpsProxy raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetSslProxyList: Contains a list of TargetSslProxy resources. -type TargetSslProxyList struct { +// TargetHttpsProxyList: Contains a list of TargetHttpsProxy resources. +type TargetHttpsProxyList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of TargetSslProxy resources. - Items []*TargetSslProxy `json:"items,omitempty"` + // Items: A list of TargetHttpsProxy resources. + Items []*TargetHttpsProxy `json:"items,omitempty"` - // Kind: Type of resource. + // Kind: Type of resource. Always compute#targetHttpsProxyList for lists + // of target HTTPS proxies. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -26555,7 +26897,7 @@ type TargetSslProxyList struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *TargetSslProxyListWarning `json:"warning,omitempty"` + Warning *TargetHttpsProxyListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -26578,15 +26920,15 @@ type TargetSslProxyList struct { NullFields []string `json:"-"` } -func (s *TargetSslProxyList) MarshalJSON() ([]byte, error) { - type NoMethod TargetSslProxyList +func (s *TargetHttpsProxyList) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpsProxyList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetSslProxyListWarning: [Output Only] Informational warning +// TargetHttpsProxyListWarning: [Output Only] Informational warning // message. -type TargetSslProxyListWarning struct { +type TargetHttpsProxyListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -26620,7 +26962,7 @@ type TargetSslProxyListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*TargetSslProxyListWarningData `json:"data,omitempty"` + Data []*TargetHttpsProxyListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -26643,13 +26985,13 @@ type TargetSslProxyListWarning struct { NullFields []string `json:"-"` } -func (s *TargetSslProxyListWarning) MarshalJSON() ([]byte, error) { - type NoMethod TargetSslProxyListWarning +func (s *TargetHttpsProxyListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpsProxyListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetSslProxyListWarningData struct { +type TargetHttpsProxyListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -26680,76 +27022,17 @@ type TargetSslProxyListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetSslProxyListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod TargetSslProxyListWarningData - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type TargetTcpProxiesSetBackendServiceRequest struct { - // Service: The URL of the new BackendService resource for the - // targetTcpProxy. - Service string `json:"service,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Service") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Service") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TargetTcpProxiesSetBackendServiceRequest) MarshalJSON() ([]byte, error) { - type NoMethod TargetTcpProxiesSetBackendServiceRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type TargetTcpProxiesSetProxyHeaderRequest struct { - // ProxyHeader: The new type of proxy header to append before sending - // data to the backend. NONE or PROXY_V1 are allowed. - // - // Possible values: - // "NONE" - // "PROXY_V1" - ProxyHeader string `json:"proxyHeader,omitempty"` - - // ForceSendFields is a list of field names (e.g. "ProxyHeader") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "ProxyHeader") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TargetTcpProxiesSetProxyHeaderRequest) MarshalJSON() ([]byte, error) { - type NoMethod TargetTcpProxiesSetProxyHeaderRequest +func (s *TargetHttpsProxyListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetHttpsProxyListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetTcpProxy: A TargetTcpProxy resource. This resource defines a -// TCP proxy. (== resource_for beta.targetTcpProxies ==) (== -// resource_for v1.targetTcpProxies ==) -type TargetTcpProxy struct { +// TargetInstance: A TargetInstance resource. This resource defines an +// endpoint instance that terminates traffic of certain protocols. (== +// resource_for beta.targetInstances ==) (== resource_for +// v1.targetInstances ==) +type TargetInstance struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -26762,8 +27045,18 @@ type TargetTcpProxy struct { // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` - // Kind: [Output Only] Type of the resource. Always - // compute#targetTcpProxy for target TCP proxies. + // Instance: A URL to the virtual machine instance that handles traffic + // for this target instance. When creating a target instance, you can + // provide the fully-qualified URL or a valid partial URL to the desired + // virtual machine. For example, the following are all valid URLs: + // - + // https://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/instance + // - projects/project/zones/zone/instances/instance + // - zones/zone/instances/instance + Instance string `json:"instance,omitempty"` + + // Kind: [Output Only] The type of the resource. Always + // compute#targetInstance for target instances. Kind string `json:"kind,omitempty"` // Name: Name of the resource. Provided by the client when the resource @@ -26775,20 +27068,20 @@ type TargetTcpProxy struct { // last character, which cannot be a dash. Name string `json:"name,omitempty"` - // ProxyHeader: Specifies the type of proxy header to append before - // sending data to the backend, either NONE or PROXY_V1. The default is - // NONE. + // NatPolicy: NAT option controlling how IPs are NAT'ed to the instance. + // Currently only NO_NAT (default value) is supported. // // Possible values: - // "NONE" - // "PROXY_V1" - ProxyHeader string `json:"proxyHeader,omitempty"` + // "NO_NAT" + NatPolicy string `json:"natPolicy,omitempty"` // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // Service: URL to the BackendService resource. - Service string `json:"service,omitempty"` + // Zone: [Output Only] URL of the zone where the target instance + // resides. You must specify this field as part of the HTTP request URL. + // It is not settable as a field in the request body. + Zone string `json:"zone,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -26812,20 +27105,19 @@ type TargetTcpProxy struct { NullFields []string `json:"-"` } -func (s *TargetTcpProxy) MarshalJSON() ([]byte, error) { - type NoMethod TargetTcpProxy +func (s *TargetInstance) MarshalJSON() ([]byte, error) { + type NoMethod TargetInstance raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetTcpProxyList: Contains a list of TargetTcpProxy resources. -type TargetTcpProxyList struct { +type TargetInstanceAggregatedList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of TargetTcpProxy resources. - Items []*TargetTcpProxy `json:"items,omitempty"` + // Items: A list of TargetInstance resources. + Items map[string]TargetInstancesScopedList `json:"items,omitempty"` // Kind: Type of resource. Kind string `json:"kind,omitempty"` @@ -26842,7 +27134,7 @@ type TargetTcpProxyList struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *TargetTcpProxyListWarning `json:"warning,omitempty"` + Warning *TargetInstanceAggregatedListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -26865,15 +27157,15 @@ type TargetTcpProxyList struct { NullFields []string `json:"-"` } -func (s *TargetTcpProxyList) MarshalJSON() ([]byte, error) { - type NoMethod TargetTcpProxyList +func (s *TargetInstanceAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod TargetInstanceAggregatedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetTcpProxyListWarning: [Output Only] Informational warning -// message. -type TargetTcpProxyListWarning struct { +// TargetInstanceAggregatedListWarning: [Output Only] Informational +// warning message. +type TargetInstanceAggregatedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -26907,7 +27199,7 @@ type TargetTcpProxyListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*TargetTcpProxyListWarningData `json:"data,omitempty"` + Data []*TargetInstanceAggregatedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -26930,13 +27222,13 @@ type TargetTcpProxyListWarning struct { NullFields []string `json:"-"` } -func (s *TargetTcpProxyListWarning) MarshalJSON() ([]byte, error) { - type NoMethod TargetTcpProxyListWarning +func (s *TargetInstanceAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetInstanceAggregatedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetTcpProxyListWarningData struct { +type TargetInstanceAggregatedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -26967,110 +27259,22 @@ type TargetTcpProxyListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetTcpProxyListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod TargetTcpProxyListWarningData - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// TargetVpnGateway: Represents a Target VPN gateway resource. (== -// resource_for beta.targetVpnGateways ==) (== resource_for -// v1.targetVpnGateways ==) -type TargetVpnGateway struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // ForwardingRules: [Output Only] A list of URLs to the ForwardingRule - // resources. ForwardingRules are created using - // compute.forwardingRules.insert and associated to a VPN gateway. - ForwardingRules []string `json:"forwardingRules,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] Type of resource. Always compute#targetVpnGateway - // for target VPN gateways. - Kind string `json:"kind,omitempty"` - - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // Network: URL of the network to which this VPN gateway is attached. - // Provided by the client when the VPN gateway is created. - Network string `json:"network,omitempty"` - - // Region: [Output Only] URL of the region where the target VPN gateway - // resides. You must specify this field as part of the HTTP request URL. - // It is not settable as a field in the request body. - Region string `json:"region,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // Status: [Output Only] The status of the VPN gateway. - // - // Possible values: - // "CREATING" - // "DELETING" - // "FAILED" - // "READY" - Status string `json:"status,omitempty"` - - // Tunnels: [Output Only] A list of URLs to VpnTunnel resources. - // VpnTunnels are created using compute.vpntunnels.insert method and - // associated to a VPN gateway. - Tunnels []string `json:"tunnels,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *TargetVpnGateway) MarshalJSON() ([]byte, error) { - type NoMethod TargetVpnGateway +func (s *TargetInstanceAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetInstanceAggregatedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetVpnGatewayAggregatedList struct { +// TargetInstanceList: Contains a list of TargetInstance resources. +type TargetInstanceList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of TargetVpnGateway resources. - Items map[string]TargetVpnGatewaysScopedList `json:"items,omitempty"` + // Items: A list of TargetInstance resources. + Items []*TargetInstance `json:"items,omitempty"` - // Kind: [Output Only] Type of resource. Always compute#targetVpnGateway - // for target VPN gateways. + // Kind: Type of resource. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -27085,7 +27289,7 @@ type TargetVpnGatewayAggregatedList struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *TargetVpnGatewayAggregatedListWarning `json:"warning,omitempty"` + Warning *TargetInstanceListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -27108,15 +27312,15 @@ type TargetVpnGatewayAggregatedList struct { NullFields []string `json:"-"` } -func (s *TargetVpnGatewayAggregatedList) MarshalJSON() ([]byte, error) { - type NoMethod TargetVpnGatewayAggregatedList +func (s *TargetInstanceList) MarshalJSON() ([]byte, error) { + type NoMethod TargetInstanceList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetVpnGatewayAggregatedListWarning: [Output Only] Informational -// warning message. -type TargetVpnGatewayAggregatedListWarning struct { +// TargetInstanceListWarning: [Output Only] Informational warning +// message. +type TargetInstanceListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -27150,7 +27354,7 @@ type TargetVpnGatewayAggregatedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*TargetVpnGatewayAggregatedListWarningData `json:"data,omitempty"` + Data []*TargetInstanceListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -27173,13 +27377,13 @@ type TargetVpnGatewayAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *TargetVpnGatewayAggregatedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod TargetVpnGatewayAggregatedListWarning +func (s *TargetInstanceListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetInstanceListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetVpnGatewayAggregatedListWarningData struct { +type TargetInstanceListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -27210,44 +27414,21 @@ type TargetVpnGatewayAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetVpnGatewayAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod TargetVpnGatewayAggregatedListWarningData +func (s *TargetInstanceListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetInstanceListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetVpnGatewayList: Contains a list of TargetVpnGateway resources. -type TargetVpnGatewayList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of TargetVpnGateway resources. - Items []*TargetVpnGateway `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always compute#targetVpnGateway - // for target VPN gateways. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // Warning: [Output Only] Informational warning message. - Warning *TargetVpnGatewayListWarning `json:"warning,omitempty"` +type TargetInstancesScopedList struct { + // TargetInstances: A list of target instances contained in this scope. + TargetInstances []*TargetInstance `json:"targetInstances,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Warning: Informational warning which replaces the list of addresses + // when the list is empty. + Warning *TargetInstancesScopedListWarning `json:"warning,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "TargetInstances") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -27255,24 +27436,25 @@ type TargetVpnGatewayList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "TargetInstances") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *TargetVpnGatewayList) MarshalJSON() ([]byte, error) { - type NoMethod TargetVpnGatewayList +func (s *TargetInstancesScopedList) MarshalJSON() ([]byte, error) { + type NoMethod TargetInstancesScopedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetVpnGatewayListWarning: [Output Only] Informational warning -// message. -type TargetVpnGatewayListWarning struct { +// TargetInstancesScopedListWarning: Informational warning which +// replaces the list of addresses when the list is empty. +type TargetInstancesScopedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -27306,7 +27488,7 @@ type TargetVpnGatewayListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*TargetVpnGatewayListWarningData `json:"data,omitempty"` + Data []*TargetInstancesScopedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -27329,13 +27511,13 @@ type TargetVpnGatewayListWarning struct { NullFields []string `json:"-"` } -func (s *TargetVpnGatewayListWarning) MarshalJSON() ([]byte, error) { - type NoMethod TargetVpnGatewayListWarning +func (s *TargetInstancesScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetInstancesScopedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetVpnGatewayListWarningData struct { +type TargetInstancesScopedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -27366,48 +27548,213 @@ type TargetVpnGatewayListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetVpnGatewayListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod TargetVpnGatewayListWarningData +func (s *TargetInstancesScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetInstancesScopedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetVpnGatewaysScopedList struct { - // TargetVpnGateways: [Output Only] A list of target vpn gateways - // contained in this scope. - TargetVpnGateways []*TargetVpnGateway `json:"targetVpnGateways,omitempty"` - - // Warning: [Output Only] Informational warning which replaces the list - // of addresses when the list is empty. - Warning *TargetVpnGatewaysScopedListWarning `json:"warning,omitempty"` - - // ForceSendFields is a list of field names (e.g. "TargetVpnGateways") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "TargetVpnGateways") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. +// TargetPool: A TargetPool resource. This resource defines a pool of +// instances, an associated HttpHealthCheck resource, and the fallback +// target pool. (== resource_for beta.targetPools ==) (== resource_for +// v1.targetPools ==) +type TargetPool struct { + // BackupPool: This field is applicable only when the containing target + // pool is serving a forwarding rule as the primary pool, and its + // failoverRatio field is properly set to a value between [0, + // 1]. + // + // backupPool and failoverRatio together define the fallback behavior of + // the primary target pool: if the ratio of the healthy instances in the + // primary pool is at or below failoverRatio, traffic arriving at the + // load-balanced IP will be directed to the backup pool. + // + // In case where failoverRatio and backupPool are not set, or all the + // instances in the backup pool are unhealthy, the traffic will be + // directed back to the primary pool in the "force" mode, where traffic + // will be spread to the healthy instances with the best effort, or to + // all instances when no instance is healthy. + BackupPool string `json:"backupPool,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // FailoverRatio: This field is applicable only when the containing + // target pool is serving a forwarding rule as the primary pool (i.e., + // not as a backup pool to some other target pool). The value of the + // field must be in [0, 1]. + // + // If set, backupPool must also be set. They together define the + // fallback behavior of the primary target pool: if the ratio of the + // healthy instances in the primary pool is at or below this number, + // traffic arriving at the load-balanced IP will be directed to the + // backup pool. + // + // In case where failoverRatio is not set or all the instances in the + // backup pool are unhealthy, the traffic will be directed back to the + // primary pool in the "force" mode, where traffic will be spread to the + // healthy instances with the best effort, or to all instances when no + // instance is healthy. + FailoverRatio float64 `json:"failoverRatio,omitempty"` + + // HealthChecks: The URL of the HttpHealthCheck resource. A member + // instance in this pool is considered healthy if and only if the health + // checks pass. An empty list means all member instances will be + // considered healthy at all times. Only HttpHealthChecks are supported. + // Only one health check may be specified. + HealthChecks []string `json:"healthChecks,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Instances: A list of resource URLs to the virtual machine instances + // serving this pool. They must live in zones contained in the same + // region as this pool. + Instances []string `json:"instances,omitempty"` + + // Kind: [Output Only] Type of the resource. Always compute#targetPool + // for target pools. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // Region: [Output Only] URL of the region where the target pool + // resides. + Region string `json:"region,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // SessionAffinity: Session affinity option, must be one of the + // following values: + // NONE: Connections from the same client IP may go to any instance in + // the pool. + // CLIENT_IP: Connections from the same client IP will go to the same + // instance in the pool while that instance remains + // healthy. + // CLIENT_IP_PROTO: Connections from the same client IP with the same IP + // protocol will go to the same instance in the pool while that instance + // remains healthy. + // + // Possible values: + // "CLIENT_IP" + // "CLIENT_IP_PORT_PROTO" + // "CLIENT_IP_PROTO" + // "GENERATED_COOKIE" + // "NONE" + SessionAffinity string `json:"sessionAffinity,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "BackupPool") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BackupPool") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *TargetVpnGatewaysScopedList) MarshalJSON() ([]byte, error) { - type NoMethod TargetVpnGatewaysScopedList +func (s *TargetPool) MarshalJSON() ([]byte, error) { + type NoMethod TargetPool raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetVpnGatewaysScopedListWarning: [Output Only] Informational -// warning which replaces the list of addresses when the list is empty. -type TargetVpnGatewaysScopedListWarning struct { +func (s *TargetPool) UnmarshalJSON(data []byte) error { + type NoMethod TargetPool + var s1 struct { + FailoverRatio gensupport.JSONFloat64 `json:"failoverRatio"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.FailoverRatio = float64(s1.FailoverRatio) + return nil +} + +type TargetPoolAggregatedList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of TargetPool resources. + Items map[string]TargetPoolsScopedList `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always + // compute#targetPoolAggregatedList for aggregated lists of target + // pools. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *TargetPoolAggregatedListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetPoolAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod TargetPoolAggregatedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetPoolAggregatedListWarning: [Output Only] Informational warning +// message. +type TargetPoolAggregatedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -27441,7 +27788,7 @@ type TargetVpnGatewaysScopedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*TargetVpnGatewaysScopedListWarningData `json:"data,omitempty"` + Data []*TargetPoolAggregatedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -27464,13 +27811,13 @@ type TargetVpnGatewaysScopedListWarning struct { NullFields []string `json:"-"` } -func (s *TargetVpnGatewaysScopedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod TargetVpnGatewaysScopedListWarning +func (s *TargetPoolAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetPoolAggregatedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetVpnGatewaysScopedListWarningData struct { +type TargetPoolAggregatedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -27501,83 +27848,25 @@ type TargetVpnGatewaysScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetVpnGatewaysScopedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod TargetVpnGatewaysScopedListWarningData - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type TestFailure struct { - ActualService string `json:"actualService,omitempty"` - - ExpectedService string `json:"expectedService,omitempty"` - - Host string `json:"host,omitempty"` - - Path string `json:"path,omitempty"` - - // ForceSendFields is a list of field names (e.g. "ActualService") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "ActualService") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TestFailure) MarshalJSON() ([]byte, error) { - type NoMethod TestFailure +func (s *TargetPoolAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetPoolAggregatedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TestPermissionsRequest struct { - // Permissions: The set of permissions to check for the 'resource'. - // Permissions with wildcards (such as '*' or 'storage.*') are not - // allowed. - Permissions []string `json:"permissions,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Permissions") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Permissions") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TestPermissionsRequest) MarshalJSON() ([]byte, error) { - type NoMethod TestPermissionsRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} +type TargetPoolInstanceHealth struct { + HealthStatus []*HealthStatus `json:"healthStatus,omitempty"` -type TestPermissionsResponse struct { - // Permissions: A subset of `TestPermissionsRequest.permissions` that - // the caller is allowed. - Permissions []string `json:"permissions,omitempty"` + // Kind: [Output Only] Type of resource. Always + // compute#targetPoolInstanceHealth when checking the health of an + // instance. + Kind string `json:"kind,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Permissions") to + // ForceSendFields is a list of field names (e.g. "HealthStatus") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -27585,7 +27874,7 @@ type TestPermissionsResponse struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Permissions") to include + // NullFields is a list of field names (e.g. "HealthStatus") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as @@ -27594,112 +27883,23 @@ type TestPermissionsResponse struct { NullFields []string `json:"-"` } -func (s *TestPermissionsResponse) MarshalJSON() ([]byte, error) { - type NoMethod TestPermissionsResponse - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// UrlMap: A UrlMap resource. This resource defines the mapping from URL -// to the BackendService resource, based on the "longest-match" of the -// URL's host and path. -type UrlMap struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // DefaultService: The URL of the backendService resource if none of the - // hostRules match. - // Use defaultService instead of defaultRouteAction when simple routing - // to a backendService is desired and other advanced capabilities like - // traffic splitting and rewrites are not required. - // Only one of defaultService, defaultRouteAction or defaultUrlRedirect - // should must be set. - DefaultService string `json:"defaultService,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // Fingerprint: Fingerprint of this resource. A hash of the contents - // stored in this object. This field is used in optimistic locking. This - // field will be ignored when inserting a UrlMap. An up-to-date - // fingerprint must be provided in order to update the UrlMap, otherwise - // the request will fail with error 412 conditionNotMet. - // - // To see the latest fingerprint, make a get() request to retrieve a - // UrlMap. - Fingerprint string `json:"fingerprint,omitempty"` - - // HostRules: The list of HostRules to use against the URL. - HostRules []*HostRule `json:"hostRules,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] Type of the resource. Always compute#urlMaps for - // url maps. - Kind string `json:"kind,omitempty"` - - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // PathMatchers: The list of named PathMatchers to use against the URL. - PathMatchers []*PathMatcher `json:"pathMatchers,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // Tests: The list of expected URL mapping tests. Request to update this - // UrlMap will succeed only if all of the test cases pass. You can - // specify a maximum of 100 tests per UrlMap. - Tests []*UrlMapTest `json:"tests,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *UrlMap) MarshalJSON() ([]byte, error) { - type NoMethod UrlMap +func (s *TargetPoolInstanceHealth) MarshalJSON() ([]byte, error) { + type NoMethod TargetPoolInstanceHealth raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// UrlMapList: Contains a list of UrlMap resources. -type UrlMapList struct { +// TargetPoolList: Contains a list of TargetPool resources. +type TargetPoolList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of UrlMap resources. - Items []*UrlMap `json:"items,omitempty"` + // Items: A list of TargetPool resources. + Items []*TargetPool `json:"items,omitempty"` - // Kind: Type of resource. + // Kind: [Output Only] Type of resource. Always compute#targetPoolList + // for lists of target pools. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -27714,7 +27914,7 @@ type UrlMapList struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *UrlMapListWarning `json:"warning,omitempty"` + Warning *TargetPoolListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -27737,14 +27937,14 @@ type UrlMapList struct { NullFields []string `json:"-"` } -func (s *UrlMapList) MarshalJSON() ([]byte, error) { - type NoMethod UrlMapList +func (s *TargetPoolList) MarshalJSON() ([]byte, error) { + type NoMethod TargetPoolList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// UrlMapListWarning: [Output Only] Informational warning message. -type UrlMapListWarning struct { +// TargetPoolListWarning: [Output Only] Informational warning message. +type TargetPoolListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -27778,7 +27978,7 @@ type UrlMapListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*UrlMapListWarningData `json:"data,omitempty"` + Data []*TargetPoolListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -27801,13 +28001,13 @@ type UrlMapListWarning struct { NullFields []string `json:"-"` } -func (s *UrlMapListWarning) MarshalJSON() ([]byte, error) { - type NoMethod UrlMapListWarning +func (s *TargetPoolListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetPoolListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type UrlMapListWarningData struct { +type TargetPoolListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -27838,16 +28038,17 @@ type UrlMapListWarningData struct { NullFields []string `json:"-"` } -func (s *UrlMapListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod UrlMapListWarningData +func (s *TargetPoolListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetPoolListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type UrlMapReference struct { - UrlMap string `json:"urlMap,omitempty"` +type TargetPoolsAddHealthCheckRequest struct { + // HealthChecks: The HttpHealthCheck to add to the target pool. + HealthChecks []*HealthCheckReference `json:"healthChecks,omitempty"` - // ForceSendFields is a list of field names (e.g. "UrlMap") to + // ForceSendFields is a list of field names (e.g. "HealthChecks") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -27855,37 +28056,32 @@ type UrlMapReference struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "UrlMap") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "HealthChecks") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *UrlMapReference) MarshalJSON() ([]byte, error) { - type NoMethod UrlMapReference +func (s *TargetPoolsAddHealthCheckRequest) MarshalJSON() ([]byte, error) { + type NoMethod TargetPoolsAddHealthCheckRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// UrlMapTest: Message for the expected URL mappings. -type UrlMapTest struct { - // Description: Description of this test case. - Description string `json:"description,omitempty"` - - // Host: Host portion of the URL. - Host string `json:"host,omitempty"` - - // Path: Path portion of the URL. - Path string `json:"path,omitempty"` - - // Service: Expected BackendService resource the given URL should be - // mapped to. - Service string `json:"service,omitempty"` +type TargetPoolsAddInstanceRequest struct { + // Instances: A full or partial URL to an instance to add to this target + // pool. This can be a full or partial URL. For example, the following + // are valid URLs: + // - + // https://www.googleapis.com/compute/v1/projects/project-id/zones/zone/instances/instance-name + // - projects/project-id/zones/zone/instances/instance-name + // - zones/zone/instances/instance-name + Instances []*InstanceReference `json:"instances,omitempty"` - // ForceSendFields is a list of field names (e.g. "Description") to + // ForceSendFields is a list of field names (e.g. "Instances") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -27893,38 +28089,31 @@ type UrlMapTest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Description") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Instances") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *UrlMapTest) MarshalJSON() ([]byte, error) { - type NoMethod UrlMapTest +func (s *TargetPoolsAddInstanceRequest) MarshalJSON() ([]byte, error) { + type NoMethod TargetPoolsAddInstanceRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// UrlMapValidationResult: Message representing the validation result -// for a UrlMap. -type UrlMapValidationResult struct { - LoadErrors []string `json:"loadErrors,omitempty"` - - // LoadSucceeded: Whether the given UrlMap can be successfully loaded. - // If false, 'loadErrors' indicates the reasons. - LoadSucceeded bool `json:"loadSucceeded,omitempty"` - - TestFailures []*TestFailure `json:"testFailures,omitempty"` - - // TestPassed: If successfully loaded, this field indicates whether the - // test passed. If false, 'testFailures's indicate the reason of - // failure. - TestPassed bool `json:"testPassed,omitempty"` +type TargetPoolsRemoveHealthCheckRequest struct { + // HealthChecks: Health check URL to be removed. This can be a full or + // valid partial URL. For example, the following are valid URLs: + // - + // https://www.googleapis.com/compute/beta/projects/project/global/httpHealthChecks/health-check + // - projects/project/global/httpHealthChecks/health-check + // - global/httpHealthChecks/health-check + HealthChecks []*HealthCheckReference `json:"healthChecks,omitempty"` - // ForceSendFields is a list of field names (e.g. "LoadErrors") to + // ForceSendFields is a list of field names (e.g. "HealthChecks") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -27932,26 +28121,26 @@ type UrlMapValidationResult struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "LoadErrors") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "HealthChecks") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *UrlMapValidationResult) MarshalJSON() ([]byte, error) { - type NoMethod UrlMapValidationResult +func (s *TargetPoolsRemoveHealthCheckRequest) MarshalJSON() ([]byte, error) { + type NoMethod TargetPoolsRemoveHealthCheckRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type UrlMapsValidateRequest struct { - // Resource: Content of the UrlMap to be validated. - Resource *UrlMap `json:"resource,omitempty"` +type TargetPoolsRemoveInstanceRequest struct { + // Instances: URLs of the instances to be removed from target pool. + Instances []*InstanceReference `json:"instances,omitempty"` - // ForceSendFields is a list of field names (e.g. "Resource") to + // ForceSendFields is a list of field names (e.g. "Instances") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -27959,7 +28148,7 @@ type UrlMapsValidateRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Resource") to include in + // NullFields is a list of field names (e.g. "Instances") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -27968,20 +28157,21 @@ type UrlMapsValidateRequest struct { NullFields []string `json:"-"` } -func (s *UrlMapsValidateRequest) MarshalJSON() ([]byte, error) { - type NoMethod UrlMapsValidateRequest +func (s *TargetPoolsRemoveInstanceRequest) MarshalJSON() ([]byte, error) { + type NoMethod TargetPoolsRemoveInstanceRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type UrlMapsValidateResponse struct { - Result *UrlMapValidationResult `json:"result,omitempty"` +type TargetPoolsScopedList struct { + // TargetPools: A list of target pools contained in this scope. + TargetPools []*TargetPool `json:"targetPools,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Warning: Informational warning which replaces the list of addresses + // when the list is empty. + Warning *TargetPoolsScopedListWarning `json:"warning,omitempty"` - // ForceSendFields is a list of field names (e.g. "Result") to + // ForceSendFields is a list of field names (e.g. "TargetPools") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -27989,7 +28179,72 @@ type UrlMapsValidateResponse struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Result") to include in API + // NullFields is a list of field names (e.g. "TargetPools") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetPoolsScopedList) MarshalJSON() ([]byte, error) { + type NoMethod TargetPoolsScopedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetPoolsScopedListWarning: Informational warning which replaces +// the list of addresses when the list is empty. +type TargetPoolsScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetPoolsScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -27998,29 +28253,53 @@ type UrlMapsValidateResponse struct { NullFields []string `json:"-"` } -func (s *UrlMapsValidateResponse) MarshalJSON() ([]byte, error) { - type NoMethod UrlMapsValidateResponse +func (s *TargetPoolsScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetPoolsScopedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// UsableSubnetwork: Subnetwork which the current user has -// compute.subnetworks.use permission on. -type UsableSubnetwork struct { - // IpCidrRange: The range of internal addresses that are owned by this - // subnetwork. - IpCidrRange string `json:"ipCidrRange,omitempty"` +type TargetPoolsScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` - // Network: Network URL. - Network string `json:"network,omitempty"` + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` - // SecondaryIpRanges: Secondary IP ranges. - SecondaryIpRanges []*UsableSubnetworkSecondaryRange `json:"secondaryIpRanges,omitempty"` + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // Subnetwork: Subnetwork URL. - Subnetwork string `json:"subnetwork,omitempty"` + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} - // ForceSendFields is a list of field names (e.g. "IpCidrRange") to +func (s *TargetPoolsScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetPoolsScopedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetReference struct { + Target string `json:"target,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Target") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -28028,35 +28307,59 @@ type UsableSubnetwork struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "IpCidrRange") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Target") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *UsableSubnetwork) MarshalJSON() ([]byte, error) { - type NoMethod UsableSubnetwork +func (s *TargetReference) MarshalJSON() ([]byte, error) { + type NoMethod TargetReference raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// UsableSubnetworkSecondaryRange: Secondary IP range of a usable -// subnetwork. -type UsableSubnetworkSecondaryRange struct { - // IpCidrRange: The range of IP addresses belonging to this subnetwork - // secondary range. - IpCidrRange string `json:"ipCidrRange,omitempty"` +type TargetSslProxiesSetBackendServiceRequest struct { + // Service: The URL of the new BackendService resource for the + // targetSslProxy. + Service string `json:"service,omitempty"` - // RangeName: The name associated with this subnetwork secondary range, - // used when adding an alias IP range to a VM instance. The name must be - // 1-63 characters long, and comply with RFC1035. The name must be - // unique within the subnetwork. - RangeName string `json:"rangeName,omitempty"` + // ForceSendFields is a list of field names (e.g. "Service") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // ForceSendFields is a list of field names (e.g. "IpCidrRange") to + // NullFields is a list of field names (e.g. "Service") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetSslProxiesSetBackendServiceRequest) MarshalJSON() ([]byte, error) { + type NoMethod TargetSslProxiesSetBackendServiceRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetSslProxiesSetProxyHeaderRequest struct { + // ProxyHeader: The new type of proxy header to append before sending + // data to the backend. NONE or PROXY_V1 are allowed. + // + // Possible values: + // "NONE" + // "PROXY_V1" + ProxyHeader string `json:"proxyHeader,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ProxyHeader") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -28064,7 +28367,7 @@ type UsableSubnetworkSecondaryRange struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "IpCidrRange") to include + // NullFields is a list of field names (e.g. "ProxyHeader") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as @@ -28073,23 +28376,135 @@ type UsableSubnetworkSecondaryRange struct { NullFields []string `json:"-"` } -func (s *UsableSubnetworkSecondaryRange) MarshalJSON() ([]byte, error) { - type NoMethod UsableSubnetworkSecondaryRange +func (s *TargetSslProxiesSetProxyHeaderRequest) MarshalJSON() ([]byte, error) { + type NoMethod TargetSslProxiesSetProxyHeaderRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type UsableSubnetworksAggregatedList struct { +type TargetSslProxiesSetSslCertificatesRequest struct { + // SslCertificates: New set of URLs to SslCertificate resources to + // associate with this TargetSslProxy. Currently exactly one ssl + // certificate must be specified. + SslCertificates []string `json:"sslCertificates,omitempty"` + + // ForceSendFields is a list of field names (e.g. "SslCertificates") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "SslCertificates") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *TargetSslProxiesSetSslCertificatesRequest) MarshalJSON() ([]byte, error) { + type NoMethod TargetSslProxiesSetSslCertificatesRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetSslProxy: A TargetSslProxy resource. This resource defines an +// SSL proxy. (== resource_for beta.targetSslProxies ==) (== +// resource_for v1.targetSslProxies ==) +type TargetSslProxy struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always + // compute#targetSslProxy for target SSL proxies. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // ProxyHeader: Specifies the type of proxy header to append before + // sending data to the backend, either NONE or PROXY_V1. The default is + // NONE. + // + // Possible values: + // "NONE" + // "PROXY_V1" + ProxyHeader string `json:"proxyHeader,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Service: URL to the BackendService resource. + Service string `json:"service,omitempty"` + + // SslCertificates: URLs to SslCertificate resources that are used to + // authenticate connections to Backends. At least one SSL certificate + // must be specified. Currently, you may specify up to 15 SSL + // certificates. + SslCertificates []string `json:"sslCertificates,omitempty"` + + // SslPolicy: URL of SslPolicy resource that will be associated with the + // TargetSslProxy resource. If not set, the TargetSslProxy resource will + // not have any SSL policy configured. + SslPolicy string `json:"sslPolicy,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *TargetSslProxy) MarshalJSON() ([]byte, error) { + type NoMethod TargetSslProxy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetSslProxyList: Contains a list of TargetSslProxy resources. +type TargetSslProxyList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. Id string `json:"id,omitempty"` - // Items: [Output] A list of usable subnetwork URLs. - Items []*UsableSubnetwork `json:"items,omitempty"` + // Items: A list of TargetSslProxy resources. + Items []*TargetSslProxy `json:"items,omitempty"` - // Kind: [Output Only] Type of resource. Always - // compute#usableSubnetworksAggregatedList for aggregated lists of - // usable subnetworks. + // Kind: Type of resource. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -28104,7 +28519,7 @@ type UsableSubnetworksAggregatedList struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *UsableSubnetworksAggregatedListWarning `json:"warning,omitempty"` + Warning *TargetSslProxyListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -28127,15 +28542,15 @@ type UsableSubnetworksAggregatedList struct { NullFields []string `json:"-"` } -func (s *UsableSubnetworksAggregatedList) MarshalJSON() ([]byte, error) { - type NoMethod UsableSubnetworksAggregatedList +func (s *TargetSslProxyList) MarshalJSON() ([]byte, error) { + type NoMethod TargetSslProxyList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// UsableSubnetworksAggregatedListWarning: [Output Only] Informational -// warning message. -type UsableSubnetworksAggregatedListWarning struct { +// TargetSslProxyListWarning: [Output Only] Informational warning +// message. +type TargetSslProxyListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -28169,7 +28584,7 @@ type UsableSubnetworksAggregatedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*UsableSubnetworksAggregatedListWarningData `json:"data,omitempty"` + Data []*TargetSslProxyListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -28192,13 +28607,13 @@ type UsableSubnetworksAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *UsableSubnetworksAggregatedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod UsableSubnetworksAggregatedListWarning +func (s *TargetSslProxyListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetSslProxyListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type UsableSubnetworksAggregatedListWarningData struct { +type TargetSslProxyListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -28229,33 +28644,18 @@ type UsableSubnetworksAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *UsableSubnetworksAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod UsableSubnetworksAggregatedListWarningData +func (s *TargetSslProxyListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetSslProxyListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// UsageExportLocation: The location in Cloud Storage and naming method -// of the daily usage report. Contains bucket_name and report_name -// prefix. -type UsageExportLocation struct { - // BucketName: The name of an existing bucket in Cloud Storage where the - // usage report object is stored. The Google Service Account is granted - // write access to this bucket. This can either be the bucket name by - // itself, such as example-bucket, or the bucket name with gs:// or - // https://storage.googleapis.com/ in front of it, such as - // gs://example-bucket. - BucketName string `json:"bucketName,omitempty"` - - // ReportNamePrefix: An optional prefix for the name of the usage report - // object stored in bucketName. If not supplied, defaults to usage. The - // report is stored as a CSV file named - // report_name_prefix_gce_YYYYMMDD.csv where YYYYMMDD is the day of the - // usage according to Pacific Time. If you supply a prefix, it should - // conform to Cloud Storage object naming conventions. - ReportNamePrefix string `json:"reportNamePrefix,omitempty"` +type TargetTcpProxiesSetBackendServiceRequest struct { + // Service: The URL of the new BackendService resource for the + // targetTcpProxy. + Service string `json:"service,omitempty"` - // ForceSendFields is a list of field names (e.g. "BucketName") to + // ForceSendFields is a list of field names (e.g. "Service") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -28263,7 +28663,7 @@ type UsageExportLocation struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "BucketName") to include in + // NullFields is a list of field names (e.g. "Service") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -28272,21 +28672,22 @@ type UsageExportLocation struct { NullFields []string `json:"-"` } -func (s *UsageExportLocation) MarshalJSON() ([]byte, error) { - type NoMethod UsageExportLocation +func (s *TargetTcpProxiesSetBackendServiceRequest) MarshalJSON() ([]byte, error) { + type NoMethod TargetTcpProxiesSetBackendServiceRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// VmEndpointNatMappings: Contain information of Nat mapping for a VM -// endpoint (i.e., NIC). -type VmEndpointNatMappings struct { - // InstanceName: Name of the VM instance which the endpoint belongs to - InstanceName string `json:"instanceName,omitempty"` - - InterfaceNatMappings []*VmEndpointNatMappingsInterfaceNatMappings `json:"interfaceNatMappings,omitempty"` +type TargetTcpProxiesSetProxyHeaderRequest struct { + // ProxyHeader: The new type of proxy header to append before sending + // data to the backend. NONE or PROXY_V1 are allowed. + // + // Possible values: + // "NONE" + // "PROXY_V1" + ProxyHeader string `json:"proxyHeader,omitempty"` - // ForceSendFields is a list of field names (e.g. "InstanceName") to + // ForceSendFields is a list of field names (e.g. "ProxyHeader") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -28294,7 +28695,7 @@ type VmEndpointNatMappings struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "InstanceName") to include + // NullFields is a list of field names (e.g. "ProxyHeader") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as @@ -28303,43 +28704,69 @@ type VmEndpointNatMappings struct { NullFields []string `json:"-"` } -func (s *VmEndpointNatMappings) MarshalJSON() ([]byte, error) { - type NoMethod VmEndpointNatMappings +func (s *TargetTcpProxiesSetProxyHeaderRequest) MarshalJSON() ([]byte, error) { + type NoMethod TargetTcpProxiesSetProxyHeaderRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// VmEndpointNatMappingsInterfaceNatMappings: Contain information of Nat -// mapping for an interface of this endpoint. -type VmEndpointNatMappingsInterfaceNatMappings struct { - // NatIpPortRanges: A list of all IP:port-range mappings assigned to - // this interface. These ranges are inclusive, that is, both the first - // and the last ports can be used for NAT. Example: - // ["2.2.2.2:12345-12355", "1.1.1.1:2234-2234"]. - NatIpPortRanges []string `json:"natIpPortRanges,omitempty"` +// TargetTcpProxy: A TargetTcpProxy resource. This resource defines a +// TCP proxy. (== resource_for beta.targetTcpProxies ==) (== +// resource_for v1.targetTcpProxies ==) +type TargetTcpProxy struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` - // NumTotalNatPorts: Total number of ports across all NAT IPs allocated - // to this interface. It equals to the aggregated port number in the - // field nat_ip_port_ranges. - NumTotalNatPorts int64 `json:"numTotalNatPorts,omitempty"` + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` - // SourceAliasIpRange: Alias IP range for this interface endpoint. It - // will be a private (RFC 1918) IP range. Examples: "10.33.4.55/32", or - // "192.168.5.0/24". - SourceAliasIpRange string `json:"sourceAliasIpRange,omitempty"` + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` - // SourceVirtualIp: Primary IP of the VM for this NIC. - SourceVirtualIp string `json:"sourceVirtualIp,omitempty"` + // Kind: [Output Only] Type of the resource. Always + // compute#targetTcpProxy for target TCP proxies. + Kind string `json:"kind,omitempty"` - // ForceSendFields is a list of field names (e.g. "NatIpPortRanges") to - // unconditionally include in API requests. By default, fields with + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // ProxyHeader: Specifies the type of proxy header to append before + // sending data to the backend, either NONE or PROXY_V1. The default is + // NONE. + // + // Possible values: + // "NONE" + // "PROXY_V1" + ProxyHeader string `json:"proxyHeader,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Service: URL to the BackendService resource. + Service string `json:"service,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "NatIpPortRanges") to + // NullFields is a list of field names (e.g. "CreationTimestamp") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -28349,21 +28776,22 @@ type VmEndpointNatMappingsInterfaceNatMappings struct { NullFields []string `json:"-"` } -func (s *VmEndpointNatMappingsInterfaceNatMappings) MarshalJSON() ([]byte, error) { - type NoMethod VmEndpointNatMappingsInterfaceNatMappings +func (s *TargetTcpProxy) MarshalJSON() ([]byte, error) { + type NoMethod TargetTcpProxy raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// VmEndpointNatMappingsList: Contains a list of VmEndpointNatMappings. -type VmEndpointNatMappingsList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. +// TargetTcpProxyList: Contains a list of TargetTcpProxy resources. +type TargetTcpProxyList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. Id string `json:"id,omitempty"` - // Kind: [Output Only] Type of resource. Always - // compute#vmEndpointNatMappingsList for lists of Nat mappings of VM - // endpoints. + // Items: A list of TargetTcpProxy resources. + Items []*TargetTcpProxy `json:"items,omitempty"` + + // Kind: Type of resource. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -28374,15 +28802,11 @@ type VmEndpointNatMappingsList struct { // the results. NextPageToken string `json:"nextPageToken,omitempty"` - // Result: [Output Only] A list of Nat mapping information of VM - // endpoints. - Result []*VmEndpointNatMappings `json:"result,omitempty"` - // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *VmEndpointNatMappingsListWarning `json:"warning,omitempty"` + Warning *TargetTcpProxyListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -28405,15 +28829,15 @@ type VmEndpointNatMappingsList struct { NullFields []string `json:"-"` } -func (s *VmEndpointNatMappingsList) MarshalJSON() ([]byte, error) { - type NoMethod VmEndpointNatMappingsList +func (s *TargetTcpProxyList) MarshalJSON() ([]byte, error) { + type NoMethod TargetTcpProxyList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// VmEndpointNatMappingsListWarning: [Output Only] Informational warning +// TargetTcpProxyListWarning: [Output Only] Informational warning // message. -type VmEndpointNatMappingsListWarning struct { +type TargetTcpProxyListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -28447,7 +28871,7 @@ type VmEndpointNatMappingsListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*VmEndpointNatMappingsListWarningData `json:"data,omitempty"` + Data []*TargetTcpProxyListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -28470,13 +28894,13 @@ type VmEndpointNatMappingsListWarning struct { NullFields []string `json:"-"` } -func (s *VmEndpointNatMappingsListWarning) MarshalJSON() ([]byte, error) { - type NoMethod VmEndpointNatMappingsListWarning +func (s *TargetTcpProxyListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetTcpProxyListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type VmEndpointNatMappingsListWarningData struct { +type TargetTcpProxyListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -28507,15 +28931,16 @@ type VmEndpointNatMappingsListWarningData struct { NullFields []string `json:"-"` } -func (s *VmEndpointNatMappingsListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod VmEndpointNatMappingsListWarningData +func (s *TargetTcpProxyListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetTcpProxyListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// VpnTunnel: VPN tunnel resource. (== resource_for beta.vpnTunnels ==) -// (== resource_for v1.vpnTunnels ==) -type VpnTunnel struct { +// TargetVpnGateway: Represents a Target VPN gateway resource. (== +// resource_for beta.targetVpnGateways ==) (== resource_for +// v1.targetVpnGateways ==) +type TargetVpnGateway struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -28524,29 +28949,19 @@ type VpnTunnel struct { // property when you create the resource. Description string `json:"description,omitempty"` - // DetailedStatus: [Output Only] Detailed status message for the VPN - // tunnel. - DetailedStatus string `json:"detailedStatus,omitempty"` + // ForwardingRules: [Output Only] A list of URLs to the ForwardingRule + // resources. ForwardingRules are created using + // compute.forwardingRules.insert and associated with a VPN gateway. + ForwardingRules []string `json:"forwardingRules,omitempty"` // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` - // IkeVersion: IKE protocol version to use when establishing the VPN - // tunnel with peer VPN gateway. Acceptable IKE versions are 1 or 2. - // Default version is 2. - IkeVersion int64 `json:"ikeVersion,omitempty"` - - // Kind: [Output Only] Type of resource. Always compute#vpnTunnel for - // VPN tunnels. + // Kind: [Output Only] Type of resource. Always compute#targetVpnGateway + // for target VPN gateways. Kind string `json:"kind,omitempty"` - // LocalTrafficSelector: Local traffic selector to use when establishing - // the VPN tunnel with peer VPN gateway. The value should be a CIDR - // formatted string, for example: 192.168.0.0/16. The ranges should be - // disjoint. Only IPv4 is supported. - LocalTrafficSelector []string `json:"localTrafficSelector,omitempty"` - // Name: Name of the resource. Provided by the client when the resource // is created. The name must be 1-63 characters long, and comply with // RFC1035. Specifically, the name must be 1-63 characters long and @@ -28556,54 +28971,32 @@ type VpnTunnel struct { // last character, which cannot be a dash. Name string `json:"name,omitempty"` - // PeerIp: IP address of the peer VPN gateway. Only IPv4 is supported. - PeerIp string `json:"peerIp,omitempty"` + // Network: URL of the network to which this VPN gateway is attached. + // Provided by the client when the VPN gateway is created. + Network string `json:"network,omitempty"` - // Region: [Output Only] URL of the region where the VPN tunnel resides. - // You must specify this field as part of the HTTP request URL. It is - // not settable as a field in the request body. + // Region: [Output Only] URL of the region where the target VPN gateway + // resides. You must specify this field as part of the HTTP request URL. + // It is not settable as a field in the request body. Region string `json:"region,omitempty"` - // RemoteTrafficSelector: Remote traffic selectors to use when - // establishing the VPN tunnel with peer VPN gateway. The value should - // be a CIDR formatted string, for example: 192.168.0.0/16. The ranges - // should be disjoint. Only IPv4 is supported. - RemoteTrafficSelector []string `json:"remoteTrafficSelector,omitempty"` - - // Router: URL of router resource to be used for dynamic routing. - Router string `json:"router,omitempty"` - // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // SharedSecret: Shared secret used to set the secure session between - // the Cloud VPN gateway and the peer VPN gateway. - SharedSecret string `json:"sharedSecret,omitempty"` - - // SharedSecretHash: Hash of the shared secret. - SharedSecretHash string `json:"sharedSecretHash,omitempty"` - - // Status: [Output Only] The status of the VPN tunnel. + // Status: [Output Only] The status of the VPN gateway, which can be one + // of the following: CREATING, READY, FAILED, or DELETING. // // Possible values: - // "ALLOCATING_RESOURCES" - // "AUTHORIZATION_ERROR" - // "DEPROVISIONING" - // "ESTABLISHED" + // "CREATING" + // "DELETING" // "FAILED" - // "FIRST_HANDSHAKE" - // "NEGOTIATION_FAILURE" - // "NETWORK_ERROR" - // "NO_INCOMING_PACKETS" - // "PROVISIONING" - // "REJECTED" - // "WAITING_FOR_FULL_CONFIG" + // "READY" Status string `json:"status,omitempty"` - // TargetVpnGateway: URL of the Target VPN gateway with which this VPN - // tunnel is associated. Provided by the client when the VPN tunnel is - // created. - TargetVpnGateway string `json:"targetVpnGateway,omitempty"` + // Tunnels: [Output Only] A list of URLs to VpnTunnel resources. + // VpnTunnels are created using the compute.vpntunnels.insert method and + // associated with a VPN gateway. + Tunnels []string `json:"tunnels,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -28627,22 +29020,22 @@ type VpnTunnel struct { NullFields []string `json:"-"` } -func (s *VpnTunnel) MarshalJSON() ([]byte, error) { - type NoMethod VpnTunnel +func (s *TargetVpnGateway) MarshalJSON() ([]byte, error) { + type NoMethod TargetVpnGateway raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type VpnTunnelAggregatedList struct { +type TargetVpnGatewayAggregatedList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of VpnTunnelsScopedList resources. - Items map[string]VpnTunnelsScopedList `json:"items,omitempty"` + // Items: A list of TargetVpnGateway resources. + Items map[string]TargetVpnGatewaysScopedList `json:"items,omitempty"` - // Kind: [Output Only] Type of resource. Always compute#vpnTunnel for - // VPN tunnels. + // Kind: [Output Only] Type of resource. Always compute#targetVpnGateway + // for target VPN gateways. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -28657,7 +29050,7 @@ type VpnTunnelAggregatedList struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *VpnTunnelAggregatedListWarning `json:"warning,omitempty"` + Warning *TargetVpnGatewayAggregatedListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -28680,15 +29073,15 @@ type VpnTunnelAggregatedList struct { NullFields []string `json:"-"` } -func (s *VpnTunnelAggregatedList) MarshalJSON() ([]byte, error) { - type NoMethod VpnTunnelAggregatedList +func (s *TargetVpnGatewayAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod TargetVpnGatewayAggregatedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// VpnTunnelAggregatedListWarning: [Output Only] Informational warning -// message. -type VpnTunnelAggregatedListWarning struct { +// TargetVpnGatewayAggregatedListWarning: [Output Only] Informational +// warning message. +type TargetVpnGatewayAggregatedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -28722,7 +29115,7 @@ type VpnTunnelAggregatedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*VpnTunnelAggregatedListWarningData `json:"data,omitempty"` + Data []*TargetVpnGatewayAggregatedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -28745,13 +29138,13 @@ type VpnTunnelAggregatedListWarning struct { NullFields []string `json:"-"` } -func (s *VpnTunnelAggregatedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod VpnTunnelAggregatedListWarning +func (s *TargetVpnGatewayAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetVpnGatewayAggregatedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type VpnTunnelAggregatedListWarningData struct { +type TargetVpnGatewayAggregatedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -28782,23 +29175,23 @@ type VpnTunnelAggregatedListWarningData struct { NullFields []string `json:"-"` } -func (s *VpnTunnelAggregatedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod VpnTunnelAggregatedListWarningData +func (s *TargetVpnGatewayAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetVpnGatewayAggregatedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// VpnTunnelList: Contains a list of VpnTunnel resources. -type VpnTunnelList struct { +// TargetVpnGatewayList: Contains a list of TargetVpnGateway resources. +type TargetVpnGatewayList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of VpnTunnel resources. - Items []*VpnTunnel `json:"items,omitempty"` + // Items: A list of TargetVpnGateway resources. + Items []*TargetVpnGateway `json:"items,omitempty"` - // Kind: [Output Only] Type of resource. Always compute#vpnTunnel for - // VPN tunnels. + // Kind: [Output Only] Type of resource. Always compute#targetVpnGateway + // for target VPN gateways. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -28813,7 +29206,7 @@ type VpnTunnelList struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *VpnTunnelListWarning `json:"warning,omitempty"` + Warning *TargetVpnGatewayListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -28836,14 +29229,15 @@ type VpnTunnelList struct { NullFields []string `json:"-"` } -func (s *VpnTunnelList) MarshalJSON() ([]byte, error) { - type NoMethod VpnTunnelList +func (s *TargetVpnGatewayList) MarshalJSON() ([]byte, error) { + type NoMethod TargetVpnGatewayList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// VpnTunnelListWarning: [Output Only] Informational warning message. -type VpnTunnelListWarning struct { +// TargetVpnGatewayListWarning: [Output Only] Informational warning +// message. +type TargetVpnGatewayListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -28877,7 +29271,7 @@ type VpnTunnelListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*VpnTunnelListWarningData `json:"data,omitempty"` + Data []*TargetVpnGatewayListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -28900,13 +29294,13 @@ type VpnTunnelListWarning struct { NullFields []string `json:"-"` } -func (s *VpnTunnelListWarning) MarshalJSON() ([]byte, error) { - type NoMethod VpnTunnelListWarning +func (s *TargetVpnGatewayListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetVpnGatewayListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type VpnTunnelListWarningData struct { +type TargetVpnGatewayListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -28937,46 +29331,48 @@ type VpnTunnelListWarningData struct { NullFields []string `json:"-"` } -func (s *VpnTunnelListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod VpnTunnelListWarningData +func (s *TargetVpnGatewayListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetVpnGatewayListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type VpnTunnelsScopedList struct { - // VpnTunnels: A list of vpn tunnels contained in this scope. - VpnTunnels []*VpnTunnel `json:"vpnTunnels,omitempty"` +type TargetVpnGatewaysScopedList struct { + // TargetVpnGateways: [Output Only] A list of target VPN gateways + // contained in this scope. + TargetVpnGateways []*TargetVpnGateway `json:"targetVpnGateways,omitempty"` - // Warning: Informational warning which replaces the list of addresses - // when the list is empty. - Warning *VpnTunnelsScopedListWarning `json:"warning,omitempty"` + // Warning: [Output Only] Informational warning which replaces the list + // of addresses when the list is empty. + Warning *TargetVpnGatewaysScopedListWarning `json:"warning,omitempty"` - // ForceSendFields is a list of field names (e.g. "VpnTunnels") to - // unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "TargetVpnGateways") + // to unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "VpnTunnels") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "TargetVpnGateways") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *VpnTunnelsScopedList) MarshalJSON() ([]byte, error) { - type NoMethod VpnTunnelsScopedList +func (s *TargetVpnGatewaysScopedList) MarshalJSON() ([]byte, error) { + type NoMethod TargetVpnGatewaysScopedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// VpnTunnelsScopedListWarning: Informational warning which replaces the -// list of addresses when the list is empty. -type VpnTunnelsScopedListWarning struct { +// TargetVpnGatewaysScopedListWarning: [Output Only] Informational +// warning which replaces the list of addresses when the list is empty. +type TargetVpnGatewaysScopedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -29010,7 +29406,7 @@ type VpnTunnelsScopedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*VpnTunnelsScopedListWarningData `json:"data,omitempty"` + Data []*TargetVpnGatewaysScopedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -29033,13 +29429,13 @@ type VpnTunnelsScopedListWarning struct { NullFields []string `json:"-"` } -func (s *VpnTunnelsScopedListWarning) MarshalJSON() ([]byte, error) { - type NoMethod VpnTunnelsScopedListWarning +func (s *TargetVpnGatewaysScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetVpnGatewaysScopedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type VpnTunnelsScopedListWarningData struct { +type TargetVpnGatewaysScopedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -29070,107 +29466,22 @@ type VpnTunnelsScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *VpnTunnelsScopedListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod VpnTunnelsScopedListWarningData +func (s *TargetVpnGatewaysScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetVpnGatewaysScopedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type XpnHostList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: [Output Only] A list of shared VPC host project URLs. - Items []*Project `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always compute#xpnHostList for - // lists of shared VPC hosts. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // Warning: [Output Only] Informational warning message. - Warning *XpnHostListWarning `json:"warning,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *XpnHostList) MarshalJSON() ([]byte, error) { - type NoMethod XpnHostList - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} +type TestFailure struct { + ActualService string `json:"actualService,omitempty"` -// XpnHostListWarning: [Output Only] Informational warning message. -type XpnHostListWarning struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. - // - // Possible values: - // "CLEANUP_FAILED" - // "DEPRECATED_RESOURCE_USED" - // "DEPRECATED_TYPE_USED" - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - // "EXPERIMENTAL_TYPE_USED" - // "EXTERNAL_API_WARNING" - // "FIELD_VALUE_OVERRIDEN" - // "INJECTED_KERNELS_DEPRECATED" - // "MISSING_TYPE_DEPENDENCY" - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - // "NEXT_HOP_CANNOT_IP_FORWARD" - // "NEXT_HOP_INSTANCE_NOT_FOUND" - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - // "NEXT_HOP_NOT_RUNNING" - // "NOT_CRITICAL_ERROR" - // "NO_RESULTS_ON_PAGE" - // "REQUIRED_TOS_AGREEMENT" - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - // "RESOURCE_NOT_DELETED" - // "SCHEMA_VALIDATION_IGNORED" - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - // "UNDECLARED_PROPERTIES" - // "UNREACHABLE" - Code string `json:"code,omitempty"` + ExpectedService string `json:"expectedService,omitempty"` - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: - // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*XpnHostListWarningData `json:"data,omitempty"` + Host string `json:"host,omitempty"` - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` + Path string `json:"path,omitempty"` - // ForceSendFields is a list of field names (e.g. "Code") to + // ForceSendFields is a list of field names (e.g. "ActualService") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -29178,36 +29489,28 @@ type XpnHostListWarning struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "ActualService") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *XpnHostListWarning) MarshalJSON() ([]byte, error) { - type NoMethod XpnHostListWarning +func (s *TestFailure) MarshalJSON() ([]byte, error) { + type NoMethod TestFailure raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type XpnHostListWarningData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` - - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` +type TestPermissionsRequest struct { + // Permissions: The set of permissions to check for the 'resource'. + // Permissions with wildcards (such as '*' or 'storage.*') are not + // allowed. + Permissions []string `json:"permissions,omitempty"` - // ForceSendFields is a list of field names (e.g. "Key") to + // ForceSendFields is a list of field names (e.g. "Permissions") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -29215,36 +29518,31 @@ type XpnHostListWarningData struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Key") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Permissions") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *XpnHostListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod XpnHostListWarningData +func (s *TestPermissionsRequest) MarshalJSON() ([]byte, error) { + type NoMethod TestPermissionsRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// XpnResourceId: Service resource (a.k.a service project) ID. -type XpnResourceId struct { - // Id: The ID of the service resource. In the case of projects, this - // field supports project id (e.g., my-project-123) and project number - // (e.g. 12345678). - Id string `json:"id,omitempty"` +type TestPermissionsResponse struct { + // Permissions: A subset of `TestPermissionsRequest.permissions` that + // the caller is allowed. + Permissions []string `json:"permissions,omitempty"` - // Type: The type of the service resource. - // - // Possible values: - // "PROJECT" - // "XPN_RESOURCE_TYPE_UNSPECIFIED" - Type string `json:"type,omitempty"` + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "Permissions") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -29252,78 +29550,99 @@ type XpnResourceId struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Permissions") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *XpnResourceId) MarshalJSON() ([]byte, error) { - type NoMethod XpnResourceId +func (s *TestPermissionsResponse) MarshalJSON() ([]byte, error) { + type NoMethod TestPermissionsResponse raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Zone: A Zone resource. (== resource_for beta.zones ==) (== -// resource_for v1.zones ==) -type Zone struct { - // AvailableCpuPlatforms: [Output Only] Available cpu/platform - // selections for the zone. - AvailableCpuPlatforms []string `json:"availableCpuPlatforms,omitempty"` - +// UrlMap: A UrlMap resource. This resource defines the mapping from URL +// to the BackendService resource, based on the "longest-match" of the +// URL's host and path. +type UrlMap struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` - // Deprecated: [Output Only] The deprecation status associated with this - // zone. - Deprecated *DeprecationStatus `json:"deprecated,omitempty"` + // DefaultService: The full or partial URL of the defaultService + // resource to which traffic is directed if none of the hostRules match. + // If defaultRouteAction is additionally specified, advanced routing + // actions like URL Rewrites, etc. take effect prior to sending the + // request to the backend. However, if defaultService is specified, + // defaultRouteAction cannot contain any weightedBackendServices. + // Conversely, if routeAction specifies any weightedBackendServices, + // service must not be specified. + // Only one of defaultService, defaultUrlRedirect or + // defaultRouteAction.weightedBackendService must be set. + DefaultService string `json:"defaultService,omitempty"` - // Description: [Output Only] Textual description of the resource. + // Description: An optional description of this resource. Provide this + // property when you create the resource. Description string `json:"description,omitempty"` + // Fingerprint: Fingerprint of this resource. A hash of the contents + // stored in this object. This field is used in optimistic locking. This + // field will be ignored when inserting a UrlMap. An up-to-date + // fingerprint must be provided in order to update the UrlMap, otherwise + // the request will fail with error 412 conditionNotMet. + // + // To see the latest fingerprint, make a get() request to retrieve a + // UrlMap. + Fingerprint string `json:"fingerprint,omitempty"` + + // HostRules: The list of HostRules to use against the URL. + HostRules []*HostRule `json:"hostRules,omitempty"` + // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` - // Kind: [Output Only] Type of the resource. Always compute#zone for - // zones. + // Kind: [Output Only] Type of the resource. Always compute#urlMaps for + // url maps. Kind string `json:"kind,omitempty"` - // Name: [Output Only] Name of the resource. + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. Name string `json:"name,omitempty"` - // Region: [Output Only] Full URL reference to the region which hosts - // the zone. - Region string `json:"region,omitempty"` + // PathMatchers: The list of named PathMatchers to use against the URL. + PathMatchers []*PathMatcher `json:"pathMatchers,omitempty"` // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // Status: [Output Only] Status of the zone, either UP or DOWN. - // - // Possible values: - // "DOWN" - // "UP" - Status string `json:"status,omitempty"` + // Tests: The list of expected URL mapping tests. Request to update this + // UrlMap will succeed only if all of the test cases pass. You can + // specify a maximum of 100 tests per UrlMap. + Tests []*UrlMapTest `json:"tests,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. - // "AvailableCpuPlatforms") to unconditionally include in API requests. - // By default, fields with empty values are omitted from API requests. - // However, any non-pointer, non-interface field appearing in - // ForceSendFields will be sent to the server regardless of whether the - // field is empty or not. This may be used to include empty fields in - // Patch requests. + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AvailableCpuPlatforms") to + // NullFields is a list of field names (e.g. "CreationTimestamp") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -29333,20 +29652,20 @@ type Zone struct { NullFields []string `json:"-"` } -func (s *Zone) MarshalJSON() ([]byte, error) { - type NoMethod Zone +func (s *UrlMap) MarshalJSON() ([]byte, error) { + type NoMethod UrlMap raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ZoneList: Contains a list of zone resources. -type ZoneList struct { +// UrlMapList: Contains a list of UrlMap resources. +type UrlMapList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of Zone resources. - Items []*Zone `json:"items,omitempty"` + // Items: A list of UrlMap resources. + Items []*UrlMap `json:"items,omitempty"` // Kind: Type of resource. Kind string `json:"kind,omitempty"` @@ -29363,7 +29682,7 @@ type ZoneList struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *ZoneListWarning `json:"warning,omitempty"` + Warning *UrlMapListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -29386,14 +29705,14 @@ type ZoneList struct { NullFields []string `json:"-"` } -func (s *ZoneList) MarshalJSON() ([]byte, error) { - type NoMethod ZoneList +func (s *UrlMapList) MarshalJSON() ([]byte, error) { + type NoMethod UrlMapList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ZoneListWarning: [Output Only] Informational warning message. -type ZoneListWarning struct { +// UrlMapListWarning: [Output Only] Informational warning message. +type UrlMapListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -29427,7 +29746,7 @@ type ZoneListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*ZoneListWarningData `json:"data,omitempty"` + Data []*UrlMapListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -29450,13 +29769,13 @@ type ZoneListWarning struct { NullFields []string `json:"-"` } -func (s *ZoneListWarning) MarshalJSON() ([]byte, error) { - type NoMethod ZoneListWarning +func (s *UrlMapListWarning) MarshalJSON() ([]byte, error) { + type NoMethod UrlMapListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type ZoneListWarningData struct { +type UrlMapListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -29487,25 +29806,16 @@ type ZoneListWarningData struct { NullFields []string `json:"-"` } -func (s *ZoneListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod ZoneListWarningData +func (s *UrlMapListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod UrlMapListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type ZoneSetLabelsRequest struct { - // LabelFingerprint: The fingerprint of the previous set of labels for - // this resource, used to detect conflicts. The fingerprint is initially - // generated by Compute Engine and changes after every request to modify - // or update labels. You must always provide an up-to-date fingerprint - // hash in order to update or change labels. Make a get() request to the - // resource to get the latest fingerprint. - LabelFingerprint string `json:"labelFingerprint,omitempty"` - - // Labels: The labels to set for this resource. - Labels map[string]string `json:"labels,omitempty"` +type UrlMapReference struct { + UrlMap string `json:"urlMap,omitempty"` - // ForceSendFields is a list of field names (e.g. "LabelFingerprint") to + // ForceSendFields is a list of field names (e.g. "UrlMap") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -29513,75 +29823,4235 @@ type ZoneSetLabelsRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "LabelFingerprint") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "UrlMap") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *ZoneSetLabelsRequest) MarshalJSON() ([]byte, error) { - type NoMethod ZoneSetLabelsRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +func (s *UrlMapReference) MarshalJSON() ([]byte, error) { + type NoMethod UrlMapReference + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UrlMapTest: Message for the expected URL mappings. +type UrlMapTest struct { + // Description: Description of this test case. + Description string `json:"description,omitempty"` + + // Host: Host portion of the URL. + Host string `json:"host,omitempty"` + + // Path: Path portion of the URL. + Path string `json:"path,omitempty"` + + // Service: Expected BackendService resource the given URL should be + // mapped to. + Service string `json:"service,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Description") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Description") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UrlMapTest) MarshalJSON() ([]byte, error) { + type NoMethod UrlMapTest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UrlMapValidationResult: Message representing the validation result +// for a UrlMap. +type UrlMapValidationResult struct { + LoadErrors []string `json:"loadErrors,omitempty"` + + // LoadSucceeded: Whether the given UrlMap can be successfully loaded. + // If false, 'loadErrors' indicates the reasons. + LoadSucceeded bool `json:"loadSucceeded,omitempty"` + + TestFailures []*TestFailure `json:"testFailures,omitempty"` + + // TestPassed: If successfully loaded, this field indicates whether the + // test passed. If false, 'testFailures's indicate the reason of + // failure. + TestPassed bool `json:"testPassed,omitempty"` + + // ForceSendFields is a list of field names (e.g. "LoadErrors") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "LoadErrors") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UrlMapValidationResult) MarshalJSON() ([]byte, error) { + type NoMethod UrlMapValidationResult + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type UrlMapsValidateRequest struct { + // Resource: Content of the UrlMap to be validated. + Resource *UrlMap `json:"resource,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Resource") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Resource") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UrlMapsValidateRequest) MarshalJSON() ([]byte, error) { + type NoMethod UrlMapsValidateRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type UrlMapsValidateResponse struct { + Result *UrlMapValidationResult `json:"result,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Result") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Result") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UrlMapsValidateResponse) MarshalJSON() ([]byte, error) { + type NoMethod UrlMapsValidateResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UsableSubnetwork: Subnetwork which the current user has +// compute.subnetworks.use permission on. +type UsableSubnetwork struct { + // IpCidrRange: The range of internal addresses that are owned by this + // subnetwork. + IpCidrRange string `json:"ipCidrRange,omitempty"` + + // Network: Network URL. + Network string `json:"network,omitempty"` + + // SecondaryIpRanges: Secondary IP ranges. + SecondaryIpRanges []*UsableSubnetworkSecondaryRange `json:"secondaryIpRanges,omitempty"` + + // Subnetwork: Subnetwork URL. + Subnetwork string `json:"subnetwork,omitempty"` + + // ForceSendFields is a list of field names (e.g. "IpCidrRange") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "IpCidrRange") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UsableSubnetwork) MarshalJSON() ([]byte, error) { + type NoMethod UsableSubnetwork + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UsableSubnetworkSecondaryRange: Secondary IP range of a usable +// subnetwork. +type UsableSubnetworkSecondaryRange struct { + // IpCidrRange: The range of IP addresses belonging to this subnetwork + // secondary range. + IpCidrRange string `json:"ipCidrRange,omitempty"` + + // RangeName: The name associated with this subnetwork secondary range, + // used when adding an alias IP range to a VM instance. The name must be + // 1-63 characters long, and comply with RFC1035. The name must be + // unique within the subnetwork. + RangeName string `json:"rangeName,omitempty"` + + // ForceSendFields is a list of field names (e.g. "IpCidrRange") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "IpCidrRange") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UsableSubnetworkSecondaryRange) MarshalJSON() ([]byte, error) { + type NoMethod UsableSubnetworkSecondaryRange + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type UsableSubnetworksAggregatedList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: [Output] A list of usable subnetwork URLs. + Items []*UsableSubnetwork `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always + // compute#usableSubnetworksAggregatedList for aggregated lists of + // usable subnetworks. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *UsableSubnetworksAggregatedListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UsableSubnetworksAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod UsableSubnetworksAggregatedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UsableSubnetworksAggregatedListWarning: [Output Only] Informational +// warning message. +type UsableSubnetworksAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*UsableSubnetworksAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UsableSubnetworksAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod UsableSubnetworksAggregatedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type UsableSubnetworksAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UsableSubnetworksAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod UsableSubnetworksAggregatedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UsageExportLocation: The location in Cloud Storage and naming method +// of the daily usage report. Contains bucket_name and report_name +// prefix. +type UsageExportLocation struct { + // BucketName: The name of an existing bucket in Cloud Storage where the + // usage report object is stored. The Google Service Account is granted + // write access to this bucket. This can either be the bucket name by + // itself, such as example-bucket, or the bucket name with gs:// or + // https://storage.googleapis.com/ in front of it, such as + // gs://example-bucket. + BucketName string `json:"bucketName,omitempty"` + + // ReportNamePrefix: An optional prefix for the name of the usage report + // object stored in bucketName. If not supplied, defaults to usage. The + // report is stored as a CSV file named + // report_name_prefix_gce_YYYYMMDD.csv where YYYYMMDD is the day of the + // usage according to Pacific Time. If you supply a prefix, it should + // conform to Cloud Storage object naming conventions. + ReportNamePrefix string `json:"reportNamePrefix,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BucketName") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BucketName") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UsageExportLocation) MarshalJSON() ([]byte, error) { + type NoMethod UsageExportLocation + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VmEndpointNatMappings: Contain information of Nat mapping for a VM +// endpoint (i.e., NIC). +type VmEndpointNatMappings struct { + // InstanceName: Name of the VM instance which the endpoint belongs to + InstanceName string `json:"instanceName,omitempty"` + + InterfaceNatMappings []*VmEndpointNatMappingsInterfaceNatMappings `json:"interfaceNatMappings,omitempty"` + + // ForceSendFields is a list of field names (e.g. "InstanceName") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "InstanceName") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VmEndpointNatMappings) MarshalJSON() ([]byte, error) { + type NoMethod VmEndpointNatMappings + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VmEndpointNatMappingsInterfaceNatMappings: Contain information of Nat +// mapping for an interface of this endpoint. +type VmEndpointNatMappingsInterfaceNatMappings struct { + // NatIpPortRanges: A list of all IP:port-range mappings assigned to + // this interface. These ranges are inclusive, that is, both the first + // and the last ports can be used for NAT. Example: + // ["2.2.2.2:12345-12355", "1.1.1.1:2234-2234"]. + NatIpPortRanges []string `json:"natIpPortRanges,omitempty"` + + // NumTotalNatPorts: Total number of ports across all NAT IPs allocated + // to this interface. It equals to the aggregated port number in the + // field nat_ip_port_ranges. + NumTotalNatPorts int64 `json:"numTotalNatPorts,omitempty"` + + // SourceAliasIpRange: Alias IP range for this interface endpoint. It + // will be a private (RFC 1918) IP range. Examples: "10.33.4.55/32", or + // "192.168.5.0/24". + SourceAliasIpRange string `json:"sourceAliasIpRange,omitempty"` + + // SourceVirtualIp: Primary IP of the VM for this NIC. + SourceVirtualIp string `json:"sourceVirtualIp,omitempty"` + + // ForceSendFields is a list of field names (e.g. "NatIpPortRanges") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NatIpPortRanges") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *VmEndpointNatMappingsInterfaceNatMappings) MarshalJSON() ([]byte, error) { + type NoMethod VmEndpointNatMappingsInterfaceNatMappings + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VmEndpointNatMappingsList: Contains a list of VmEndpointNatMappings. +type VmEndpointNatMappingsList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Kind: [Output Only] Type of resource. Always + // compute#vmEndpointNatMappingsList for lists of Nat mappings of VM + // endpoints. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // Result: [Output Only] A list of Nat mapping information of VM + // endpoints. + Result []*VmEndpointNatMappings `json:"result,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *VmEndpointNatMappingsListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VmEndpointNatMappingsList) MarshalJSON() ([]byte, error) { + type NoMethod VmEndpointNatMappingsList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VmEndpointNatMappingsListWarning: [Output Only] Informational warning +// message. +type VmEndpointNatMappingsListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*VmEndpointNatMappingsListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VmEndpointNatMappingsListWarning) MarshalJSON() ([]byte, error) { + type NoMethod VmEndpointNatMappingsListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type VmEndpointNatMappingsListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VmEndpointNatMappingsListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod VmEndpointNatMappingsListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VpnTunnel: VPN tunnel resource. (== resource_for beta.vpnTunnels ==) +// (== resource_for v1.vpnTunnels ==) +type VpnTunnel struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // DetailedStatus: [Output Only] Detailed status message for the VPN + // tunnel. + DetailedStatus string `json:"detailedStatus,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // IkeVersion: IKE protocol version to use when establishing the VPN + // tunnel with the peer VPN gateway. Acceptable IKE versions are 1 or 2. + // The default version is 2. + IkeVersion int64 `json:"ikeVersion,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#vpnTunnel for + // VPN tunnels. + Kind string `json:"kind,omitempty"` + + // LocalTrafficSelector: Local traffic selector to use when establishing + // the VPN tunnel with the peer VPN gateway. The value should be a CIDR + // formatted string, for example: 192.168.0.0/16. The ranges must be + // disjoint. Only IPv4 is supported. + LocalTrafficSelector []string `json:"localTrafficSelector,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // PeerIp: IP address of the peer VPN gateway. Only IPv4 is supported. + PeerIp string `json:"peerIp,omitempty"` + + // Region: [Output Only] URL of the region where the VPN tunnel resides. + // You must specify this field as part of the HTTP request URL. It is + // not settable as a field in the request body. + Region string `json:"region,omitempty"` + + // RemoteTrafficSelector: Remote traffic selectors to use when + // establishing the VPN tunnel with the peer VPN gateway. The value + // should be a CIDR formatted string, for example: 192.168.0.0/16. The + // ranges should be disjoint. Only IPv4 is supported. + RemoteTrafficSelector []string `json:"remoteTrafficSelector,omitempty"` + + // Router: URL of the router resource to be used for dynamic routing. + Router string `json:"router,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // SharedSecret: Shared secret used to set the secure session between + // the Cloud VPN gateway and the peer VPN gateway. + SharedSecret string `json:"sharedSecret,omitempty"` + + // SharedSecretHash: Hash of the shared secret. + SharedSecretHash string `json:"sharedSecretHash,omitempty"` + + // Status: [Output Only] The status of the VPN tunnel, which can be one + // of the following: + // - PROVISIONING: Resource is being allocated for the VPN tunnel. + // - WAITING_FOR_FULL_CONFIG: Waiting to receive all VPN-related configs + // from the user. Network, TargetVpnGateway, VpnTunnel, ForwardingRule, + // and Route resources are needed to setup the VPN tunnel. + // - FIRST_HANDSHAKE: Successful first handshake with the peer VPN. + // - ESTABLISHED: Secure session is successfully established with the + // peer VPN. + // - NETWORK_ERROR: Deprecated, replaced by NO_INCOMING_PACKETS + // - AUTHORIZATION_ERROR: Auth error (for example, bad shared secret). + // + // - NEGOTIATION_FAILURE: Handshake failed. + // - DEPROVISIONING: Resources are being deallocated for the VPN tunnel. + // + // - FAILED: Tunnel creation has failed and the tunnel is not ready to + // be used. + // + // Possible values: + // "ALLOCATING_RESOURCES" + // "AUTHORIZATION_ERROR" + // "DEPROVISIONING" + // "ESTABLISHED" + // "FAILED" + // "FIRST_HANDSHAKE" + // "NEGOTIATION_FAILURE" + // "NETWORK_ERROR" + // "NO_INCOMING_PACKETS" + // "PROVISIONING" + // "REJECTED" + // "WAITING_FOR_FULL_CONFIG" + Status string `json:"status,omitempty"` + + // TargetVpnGateway: URL of the Target VPN gateway with which this VPN + // tunnel is associated. Provided by the client when the VPN tunnel is + // created. + TargetVpnGateway string `json:"targetVpnGateway,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnel) MarshalJSON() ([]byte, error) { + type NoMethod VpnTunnel + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type VpnTunnelAggregatedList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of VpnTunnelsScopedList resources. + Items map[string]VpnTunnelsScopedList `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#vpnTunnel for + // VPN tunnels. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *VpnTunnelAggregatedListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnelAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod VpnTunnelAggregatedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VpnTunnelAggregatedListWarning: [Output Only] Informational warning +// message. +type VpnTunnelAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*VpnTunnelAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnelAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod VpnTunnelAggregatedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type VpnTunnelAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnelAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod VpnTunnelAggregatedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VpnTunnelList: Contains a list of VpnTunnel resources. +type VpnTunnelList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of VpnTunnel resources. + Items []*VpnTunnel `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#vpnTunnel for + // VPN tunnels. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *VpnTunnelListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnelList) MarshalJSON() ([]byte, error) { + type NoMethod VpnTunnelList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VpnTunnelListWarning: [Output Only] Informational warning message. +type VpnTunnelListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*VpnTunnelListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnelListWarning) MarshalJSON() ([]byte, error) { + type NoMethod VpnTunnelListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type VpnTunnelListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnelListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod VpnTunnelListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type VpnTunnelsScopedList struct { + // VpnTunnels: A list of VPN tunnels contained in this scope. + VpnTunnels []*VpnTunnel `json:"vpnTunnels,omitempty"` + + // Warning: Informational warning which replaces the list of addresses + // when the list is empty. + Warning *VpnTunnelsScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "VpnTunnels") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "VpnTunnels") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnelsScopedList) MarshalJSON() ([]byte, error) { + type NoMethod VpnTunnelsScopedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VpnTunnelsScopedListWarning: Informational warning which replaces the +// list of addresses when the list is empty. +type VpnTunnelsScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*VpnTunnelsScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnelsScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod VpnTunnelsScopedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type VpnTunnelsScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnelsScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod VpnTunnelsScopedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type XpnHostList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: [Output Only] A list of shared VPC host project URLs. + Items []*Project `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#xpnHostList for + // lists of shared VPC hosts. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *XpnHostListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *XpnHostList) MarshalJSON() ([]byte, error) { + type NoMethod XpnHostList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// XpnHostListWarning: [Output Only] Informational warning message. +type XpnHostListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*XpnHostListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *XpnHostListWarning) MarshalJSON() ([]byte, error) { + type NoMethod XpnHostListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type XpnHostListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *XpnHostListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod XpnHostListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// XpnResourceId: Service resource (a.k.a service project) ID. +type XpnResourceId struct { + // Id: The ID of the service resource. In the case of projects, this + // field supports project id (e.g., my-project-123) and project number + // (e.g. 12345678). + Id string `json:"id,omitempty"` + + // Type: The type of the service resource. + // + // Possible values: + // "PROJECT" + // "XPN_RESOURCE_TYPE_UNSPECIFIED" + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *XpnResourceId) MarshalJSON() ([]byte, error) { + type NoMethod XpnResourceId + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Zone: A Zone resource. (== resource_for beta.zones ==) (== +// resource_for v1.zones ==) Next ID: 17 +type Zone struct { + // AvailableCpuPlatforms: [Output Only] Available cpu/platform + // selections for the zone. + AvailableCpuPlatforms []string `json:"availableCpuPlatforms,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Deprecated: [Output Only] The deprecation status associated with this + // zone. + Deprecated *DeprecationStatus `json:"deprecated,omitempty"` + + // Description: [Output Only] Textual description of the resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always compute#zone for + // zones. + Kind string `json:"kind,omitempty"` + + // Name: [Output Only] Name of the resource. + Name string `json:"name,omitempty"` + + // Region: [Output Only] Full URL reference to the region which hosts + // the zone. + Region string `json:"region,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Status: [Output Only] Status of the zone, either UP or DOWN. + // + // Possible values: + // "DOWN" + // "UP" + Status string `json:"status,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. + // "AvailableCpuPlatforms") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AvailableCpuPlatforms") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *Zone) MarshalJSON() ([]byte, error) { + type NoMethod Zone + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ZoneList: Contains a list of zone resources. +type ZoneList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of Zone resources. + Items []*Zone `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *ZoneListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ZoneList) MarshalJSON() ([]byte, error) { + type NoMethod ZoneList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ZoneListWarning: [Output Only] Informational warning message. +type ZoneListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*ZoneListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ZoneListWarning) MarshalJSON() ([]byte, error) { + type NoMethod ZoneListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ZoneListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ZoneListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod ZoneListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ZoneSetLabelsRequest struct { + // LabelFingerprint: The fingerprint of the previous set of labels for + // this resource, used to detect conflicts. The fingerprint is initially + // generated by Compute Engine and changes after every request to modify + // or update labels. You must always provide an up-to-date fingerprint + // hash in order to update or change labels. Make a get() request to the + // resource to get the latest fingerprint. + LabelFingerprint string `json:"labelFingerprint,omitempty"` + + // Labels: The labels to set for this resource. + Labels map[string]string `json:"labels,omitempty"` + + // ForceSendFields is a list of field names (e.g. "LabelFingerprint") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "LabelFingerprint") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *ZoneSetLabelsRequest) MarshalJSON() ([]byte, error) { + type NoMethod ZoneSetLabelsRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ZoneSetPolicyRequest struct { + // Bindings: Flatten Policy to create a backwacd compatible wire-format. + // Deprecated. Use 'policy' to specify bindings. + Bindings []*Binding `json:"bindings,omitempty"` + + // Etag: Flatten Policy to create a backward compatible wire-format. + // Deprecated. Use 'policy' to specify the etag. + Etag string `json:"etag,omitempty"` + + // Policy: REQUIRED: The complete policy to be applied to the + // 'resource'. The size of the policy is limited to a few 10s of KB. An + // empty policy is in general a valid policy but certain services (like + // Projects) might reject them. + Policy *Policy `json:"policy,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Bindings") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Bindings") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ZoneSetPolicyRequest) MarshalJSON() ([]byte, error) { + type NoMethod ZoneSetPolicyRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// method id "compute.acceleratorTypes.aggregatedList": + +type AcceleratorTypesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves an aggregated list of accelerator types. +func (r *AcceleratorTypesService) AggregatedList(project string) *AcceleratorTypesAggregatedListCall { + c := &AcceleratorTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *AcceleratorTypesAggregatedListCall) Filter(filter string) *AcceleratorTypesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *AcceleratorTypesAggregatedListCall) MaxResults(maxResults int64) *AcceleratorTypesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *AcceleratorTypesAggregatedListCall) OrderBy(orderBy string) *AcceleratorTypesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *AcceleratorTypesAggregatedListCall) PageToken(pageToken string) *AcceleratorTypesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AcceleratorTypesAggregatedListCall) Fields(s ...googleapi.Field) *AcceleratorTypesAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AcceleratorTypesAggregatedListCall) IfNoneMatch(entityTag string) *AcceleratorTypesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AcceleratorTypesAggregatedListCall) Context(ctx context.Context) *AcceleratorTypesAggregatedListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AcceleratorTypesAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AcceleratorTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/acceleratorTypes") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.acceleratorTypes.aggregatedList" call. +// Exactly one of *AcceleratorTypeAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *AcceleratorTypeAggregatedList.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AcceleratorTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*AcceleratorTypeAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &AcceleratorTypeAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an aggregated list of accelerator types.", + // "httpMethod": "GET", + // "id": "compute.acceleratorTypes.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/acceleratorTypes", + // "response": { + // "$ref": "AcceleratorTypeAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AcceleratorTypesAggregatedListCall) Pages(ctx context.Context, f func(*AcceleratorTypeAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.acceleratorTypes.get": + +type AcceleratorTypesGetCall struct { + s *Service + project string + zone string + acceleratorType string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified accelerator type. +func (r *AcceleratorTypesService) Get(project string, zone string, acceleratorType string) *AcceleratorTypesGetCall { + c := &AcceleratorTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.acceleratorType = acceleratorType + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AcceleratorTypesGetCall) Fields(s ...googleapi.Field) *AcceleratorTypesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AcceleratorTypesGetCall) IfNoneMatch(entityTag string) *AcceleratorTypesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AcceleratorTypesGetCall) Context(ctx context.Context) *AcceleratorTypesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AcceleratorTypesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AcceleratorTypesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/acceleratorTypes/{acceleratorType}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "acceleratorType": c.acceleratorType, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.acceleratorTypes.get" call. +// Exactly one of *AcceleratorType or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *AcceleratorType.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AcceleratorTypesGetCall) Do(opts ...googleapi.CallOption) (*AcceleratorType, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &AcceleratorType{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified accelerator type.", + // "httpMethod": "GET", + // "id": "compute.acceleratorTypes.get", + // "parameterOrder": [ + // "project", + // "zone", + // "acceleratorType" + // ], + // "parameters": { + // "acceleratorType": { + // "description": "Name of the accelerator type to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/acceleratorTypes/{acceleratorType}", + // "response": { + // "$ref": "AcceleratorType" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.acceleratorTypes.list": + +type AcceleratorTypesListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of accelerator types available to the +// specified project. +func (r *AcceleratorTypesService) List(project string, zone string) *AcceleratorTypesListCall { + c := &AcceleratorTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *AcceleratorTypesListCall) Filter(filter string) *AcceleratorTypesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *AcceleratorTypesListCall) MaxResults(maxResults int64) *AcceleratorTypesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *AcceleratorTypesListCall) OrderBy(orderBy string) *AcceleratorTypesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *AcceleratorTypesListCall) PageToken(pageToken string) *AcceleratorTypesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AcceleratorTypesListCall) Fields(s ...googleapi.Field) *AcceleratorTypesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AcceleratorTypesListCall) IfNoneMatch(entityTag string) *AcceleratorTypesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AcceleratorTypesListCall) Context(ctx context.Context) *AcceleratorTypesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AcceleratorTypesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AcceleratorTypesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/acceleratorTypes") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.acceleratorTypes.list" call. +// Exactly one of *AcceleratorTypeList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *AcceleratorTypeList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AcceleratorTypesListCall) Do(opts ...googleapi.CallOption) (*AcceleratorTypeList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &AcceleratorTypeList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of accelerator types available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.acceleratorTypes.list", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/acceleratorTypes", + // "response": { + // "$ref": "AcceleratorTypeList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AcceleratorTypesListCall) Pages(ctx context.Context, f func(*AcceleratorTypeList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.addresses.aggregatedList": + +type AddressesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves an aggregated list of addresses. +// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/aggregatedList +func (r *AddressesService) AggregatedList(project string) *AddressesAggregatedListCall { + c := &AddressesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *AddressesAggregatedListCall) Filter(filter string) *AddressesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *AddressesAggregatedListCall) MaxResults(maxResults int64) *AddressesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *AddressesAggregatedListCall) OrderBy(orderBy string) *AddressesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *AddressesAggregatedListCall) PageToken(pageToken string) *AddressesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AddressesAggregatedListCall) Fields(s ...googleapi.Field) *AddressesAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AddressesAggregatedListCall) IfNoneMatch(entityTag string) *AddressesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AddressesAggregatedListCall) Context(ctx context.Context) *AddressesAggregatedListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AddressesAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AddressesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/addresses") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.addresses.aggregatedList" call. +// Exactly one of *AddressAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *AddressAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AddressesAggregatedListCall) Do(opts ...googleapi.CallOption) (*AddressAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &AddressAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an aggregated list of addresses.", + // "httpMethod": "GET", + // "id": "compute.addresses.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/addresses", + // "response": { + // "$ref": "AddressAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AddressesAggregatedListCall) Pages(ctx context.Context, f func(*AddressAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.addresses.delete": + +type AddressesDeleteCall struct { + s *Service + project string + region string + address string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified address resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/delete +func (r *AddressesService) Delete(project string, region string, address string) *AddressesDeleteCall { + c := &AddressesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.address = address + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *AddressesDeleteCall) RequestId(requestId string) *AddressesDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AddressesDeleteCall) Fields(s ...googleapi.Field) *AddressesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AddressesDeleteCall) Context(ctx context.Context) *AddressesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AddressesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AddressesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses/{address}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "address": c.address, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.addresses.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified address resource.", + // "httpMethod": "DELETE", + // "id": "compute.addresses.delete", + // "parameterOrder": [ + // "project", + // "region", + // "address" + // ], + // "parameters": { + // "address": { + // "description": "Name of the address resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/addresses/{address}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.addresses.get": + +type AddressesGetCall struct { + s *Service + project string + region string + address string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified address resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/get +func (r *AddressesService) Get(project string, region string, address string) *AddressesGetCall { + c := &AddressesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.address = address + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AddressesGetCall) Fields(s ...googleapi.Field) *AddressesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AddressesGetCall) IfNoneMatch(entityTag string) *AddressesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AddressesGetCall) Context(ctx context.Context) *AddressesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AddressesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AddressesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses/{address}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "address": c.address, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.addresses.get" call. +// Exactly one of *Address or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Address.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *AddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Address{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified address resource.", + // "httpMethod": "GET", + // "id": "compute.addresses.get", + // "parameterOrder": [ + // "project", + // "region", + // "address" + // ], + // "parameters": { + // "address": { + // "description": "Name of the address resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/addresses/{address}", + // "response": { + // "$ref": "Address" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.addresses.insert": + +type AddressesInsertCall struct { + s *Service + project string + region string + address *Address + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates an address resource in the specified project using +// the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/insert +func (r *AddressesService) Insert(project string, region string, address *Address) *AddressesInsertCall { + c := &AddressesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.address = address + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *AddressesInsertCall) RequestId(requestId string) *AddressesInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AddressesInsertCall) Fields(s ...googleapi.Field) *AddressesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AddressesInsertCall) Context(ctx context.Context) *AddressesInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AddressesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AddressesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.address) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.addresses.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates an address resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.addresses.insert", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/addresses", + // "request": { + // "$ref": "Address" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.addresses.list": + +type AddressesListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of addresses contained within the specified +// region. +// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/list +func (r *AddressesService) List(project string, region string) *AddressesListCall { + c := &AddressesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *AddressesListCall) Filter(filter string) *AddressesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *AddressesListCall) MaxResults(maxResults int64) *AddressesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *AddressesListCall) OrderBy(orderBy string) *AddressesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *AddressesListCall) PageToken(pageToken string) *AddressesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AddressesListCall) Fields(s ...googleapi.Field) *AddressesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AddressesListCall) IfNoneMatch(entityTag string) *AddressesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AddressesListCall) Context(ctx context.Context) *AddressesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AddressesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AddressesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.addresses.list" call. +// Exactly one of *AddressList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *AddressList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &AddressList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of addresses contained within the specified region.", + // "httpMethod": "GET", + // "id": "compute.addresses.list", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/addresses", + // "response": { + // "$ref": "AddressList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AddressesListCall) Pages(ctx context.Context, f func(*AddressList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.autoscalers.aggregatedList": + +type AutoscalersAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves an aggregated list of autoscalers. +func (r *AutoscalersService) AggregatedList(project string) *AutoscalersAggregatedListCall { + c := &AutoscalersAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *AutoscalersAggregatedListCall) Filter(filter string) *AutoscalersAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *AutoscalersAggregatedListCall) MaxResults(maxResults int64) *AutoscalersAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *AutoscalersAggregatedListCall) OrderBy(orderBy string) *AutoscalersAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *AutoscalersAggregatedListCall) PageToken(pageToken string) *AutoscalersAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersAggregatedListCall) Fields(s ...googleapi.Field) *AutoscalersAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AutoscalersAggregatedListCall) IfNoneMatch(entityTag string) *AutoscalersAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersAggregatedListCall) Context(ctx context.Context) *AutoscalersAggregatedListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AutoscalersAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AutoscalersAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/autoscalers") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.autoscalers.aggregatedList" call. +// Exactly one of *AutoscalerAggregatedList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *AutoscalerAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*AutoscalerAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &AutoscalerAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an aggregated list of autoscalers.", + // "httpMethod": "GET", + // "id": "compute.autoscalers.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/autoscalers", + // "response": { + // "$ref": "AutoscalerAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AutoscalersAggregatedListCall) Pages(ctx context.Context, f func(*AutoscalerAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.autoscalers.delete": + +type AutoscalersDeleteCall struct { + s *Service + project string + zone string + autoscaler string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified autoscaler. +func (r *AutoscalersService) Delete(project string, zone string, autoscaler string) *AutoscalersDeleteCall { + c := &AutoscalersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.autoscaler = autoscaler + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *AutoscalersDeleteCall) RequestId(requestId string) *AutoscalersDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersDeleteCall) Fields(s ...googleapi.Field) *AutoscalersDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersDeleteCall) Context(ctx context.Context) *AutoscalersDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AutoscalersDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AutoscalersDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers/{autoscaler}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "autoscaler": c.autoscaler, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.autoscalers.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified autoscaler.", + // "httpMethod": "DELETE", + // "id": "compute.autoscalers.delete", + // "parameterOrder": [ + // "project", + // "zone", + // "autoscaler" + // ], + // "parameters": { + // "autoscaler": { + // "description": "Name of the autoscaler to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/autoscalers/{autoscaler}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.autoscalers.get": + +type AutoscalersGetCall struct { + s *Service + project string + zone string + autoscaler string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified autoscaler resource. Gets a list of +// available autoscalers by making a list() request. +func (r *AutoscalersService) Get(project string, zone string, autoscaler string) *AutoscalersGetCall { + c := &AutoscalersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.autoscaler = autoscaler + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersGetCall) Fields(s ...googleapi.Field) *AutoscalersGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AutoscalersGetCall) IfNoneMatch(entityTag string) *AutoscalersGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersGetCall) Context(ctx context.Context) *AutoscalersGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AutoscalersGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AutoscalersGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers/{autoscaler}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "autoscaler": c.autoscaler, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.autoscalers.get" call. +// Exactly one of *Autoscaler or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Autoscaler.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Autoscaler{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified autoscaler resource. Gets a list of available autoscalers by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.autoscalers.get", + // "parameterOrder": [ + // "project", + // "zone", + // "autoscaler" + // ], + // "parameters": { + // "autoscaler": { + // "description": "Name of the autoscaler to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/autoscalers/{autoscaler}", + // "response": { + // "$ref": "Autoscaler" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.autoscalers.insert": + +type AutoscalersInsertCall struct { + s *Service + project string + zone string + autoscaler *Autoscaler + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates an autoscaler in the specified project using the data +// included in the request. +func (r *AutoscalersService) Insert(project string, zone string, autoscaler *Autoscaler) *AutoscalersInsertCall { + c := &AutoscalersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.autoscaler = autoscaler + return c } -type ZoneSetPolicyRequest struct { - // Bindings: Flatten Policy to create a backwacd compatible wire-format. - // Deprecated. Use 'policy' to specify bindings. - Bindings []*Binding `json:"bindings,omitempty"` +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *AutoscalersInsertCall) RequestId(requestId string) *AutoscalersInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} - // Etag: Flatten Policy to create a backward compatible wire-format. - // Deprecated. Use 'policy' to specify the etag. - Etag string `json:"etag,omitempty"` +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersInsertCall) Fields(s ...googleapi.Field) *AutoscalersInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} - // Policy: REQUIRED: The complete policy to be applied to the - // 'resource'. The size of the policy is limited to a few 10s of KB. An - // empty policy is in general a valid policy but certain services (like - // Projects) might reject them. - Policy *Policy `json:"policy,omitempty"` +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersInsertCall) Context(ctx context.Context) *AutoscalersInsertCall { + c.ctx_ = ctx + return c +} - // ForceSendFields is a list of field names (e.g. "Bindings") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AutoscalersInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} - // NullFields is a list of field names (e.g. "Bindings") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` +func (c *AutoscalersInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } -func (s *ZoneSetPolicyRequest) MarshalJSON() ([]byte, error) { - type NoMethod ZoneSetPolicyRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +// Do executes the "compute.autoscalers.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates an autoscaler in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.autoscalers.insert", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/autoscalers", + // "request": { + // "$ref": "Autoscaler" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + } -// method id "compute.acceleratorTypes.aggregatedList": +// method id "compute.autoscalers.list": -type AcceleratorTypesAggregatedListCall struct { +type AutoscalersListCall struct { s *Service project string + zone string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// AggregatedList: Retrieves an aggregated list of accelerator types. -func (r *AcceleratorTypesService) AggregatedList(project string) *AcceleratorTypesAggregatedListCall { - c := &AcceleratorTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of autoscalers contained within the specified +// zone. +func (r *AutoscalersService) List(project string, zone string) *AutoscalersListCall { + c := &AutoscalersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.zone = zone return c } @@ -29607,7 +34077,7 @@ func (r *AcceleratorTypesService) AggregatedList(project string) *AcceleratorTyp // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *AcceleratorTypesAggregatedListCall) Filter(filter string) *AcceleratorTypesAggregatedListCall { +func (c *AutoscalersListCall) Filter(filter string) *AutoscalersListCall { c.urlParams_.Set("filter", filter) return c } @@ -29618,7 +34088,7 @@ func (c *AcceleratorTypesAggregatedListCall) Filter(filter string) *AcceleratorT // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *AcceleratorTypesAggregatedListCall) MaxResults(maxResults int64) *AcceleratorTypesAggregatedListCall { +func (c *AutoscalersListCall) MaxResults(maxResults int64) *AutoscalersListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -29635,7 +34105,7 @@ func (c *AcceleratorTypesAggregatedListCall) MaxResults(maxResults int64) *Accel // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *AcceleratorTypesAggregatedListCall) OrderBy(orderBy string) *AcceleratorTypesAggregatedListCall { +func (c *AutoscalersListCall) OrderBy(orderBy string) *AutoscalersListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -29643,7 +34113,7 @@ func (c *AcceleratorTypesAggregatedListCall) OrderBy(orderBy string) *Accelerato // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *AcceleratorTypesAggregatedListCall) PageToken(pageToken string) *AcceleratorTypesAggregatedListCall { +func (c *AutoscalersListCall) PageToken(pageToken string) *AutoscalersListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -29651,7 +34121,7 @@ func (c *AcceleratorTypesAggregatedListCall) PageToken(pageToken string) *Accele // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AcceleratorTypesAggregatedListCall) Fields(s ...googleapi.Field) *AcceleratorTypesAggregatedListCall { +func (c *AutoscalersListCall) Fields(s ...googleapi.Field) *AutoscalersListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -29661,7 +34131,7 @@ func (c *AcceleratorTypesAggregatedListCall) Fields(s ...googleapi.Field) *Accel // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *AcceleratorTypesAggregatedListCall) IfNoneMatch(entityTag string) *AcceleratorTypesAggregatedListCall { +func (c *AutoscalersListCall) IfNoneMatch(entityTag string) *AutoscalersListCall { c.ifNoneMatch_ = entityTag return c } @@ -29669,21 +34139,21 @@ func (c *AcceleratorTypesAggregatedListCall) IfNoneMatch(entityTag string) *Acce // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AcceleratorTypesAggregatedListCall) Context(ctx context.Context) *AcceleratorTypesAggregatedListCall { +func (c *AutoscalersListCall) Context(ctx context.Context) *AutoscalersListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AcceleratorTypesAggregatedListCall) Header() http.Header { +func (c *AutoscalersListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AcceleratorTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *AutoscalersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -29695,7 +34165,7 @@ func (c *AcceleratorTypesAggregatedListCall) doRequest(alt string) (*http.Respon var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/acceleratorTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -29704,18 +34174,19 @@ func (c *AcceleratorTypesAggregatedListCall) doRequest(alt string) (*http.Respon req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.acceleratorTypes.aggregatedList" call. -// Exactly one of *AcceleratorTypeAggregatedList or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *AcceleratorTypeAggregatedList.ServerResponse.Header or (if a -// response was returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.autoscalers.list" call. +// Exactly one of *AutoscalerList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *AutoscalerList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *AcceleratorTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*AcceleratorTypeAggregatedList, error) { +func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -29734,7 +34205,7 @@ func (c *AcceleratorTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (* if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &AcceleratorTypeAggregatedList{ + ret := &AutoscalerList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -29746,11 +34217,12 @@ func (c *AcceleratorTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Retrieves an aggregated list of accelerator types.", + // "description": "Retrieves a list of autoscalers contained within the specified zone.", // "httpMethod": "GET", - // "id": "compute.acceleratorTypes.aggregatedList", + // "id": "compute.autoscalers.list", // "parameterOrder": [ - // "project" + // "project", + // "zone" // ], // "parameters": { // "filter": { @@ -29782,11 +34254,18 @@ func (c *AcceleratorTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (* // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/aggregated/acceleratorTypes", + // "path": "{project}/zones/{zone}/autoscalers", // "response": { - // "$ref": "AcceleratorTypeAggregatedList" + // "$ref": "AutoscalerList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -29800,7 +34279,7 @@ func (c *AcceleratorTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (* // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *AcceleratorTypesAggregatedListCall) Pages(ctx context.Context, f func(*AcceleratorTypeAggregatedList) error) error { +func (c *AutoscalersListCall) Pages(ctx context.Context, f func(*AutoscalerList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -29818,98 +34297,304 @@ func (c *AcceleratorTypesAggregatedListCall) Pages(ctx context.Context, f func(* } } -// method id "compute.acceleratorTypes.get": +// method id "compute.autoscalers.patch": -type AcceleratorTypesGetCall struct { - s *Service - project string - zone string - acceleratorType string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type AutoscalersPatchCall struct { + s *Service + project string + zone string + autoscaler *Autoscaler + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified accelerator type. -func (r *AcceleratorTypesService) Get(project string, zone string, acceleratorType string) *AcceleratorTypesGetCall { - c := &AcceleratorTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates an autoscaler in the specified project using the data +// included in the request. This method supports PATCH semantics and +// uses the JSON merge patch format and processing rules. +func (r *AutoscalersService) Patch(project string, zone string, autoscaler *Autoscaler) *AutoscalersPatchCall { + c := &AutoscalersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.acceleratorType = acceleratorType + c.autoscaler = autoscaler + return c +} + +// Autoscaler sets the optional parameter "autoscaler": Name of the +// autoscaler to patch. +func (c *AutoscalersPatchCall) Autoscaler(autoscaler string) *AutoscalersPatchCall { + c.urlParams_.Set("autoscaler", autoscaler) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *AutoscalersPatchCall) RequestId(requestId string) *AutoscalersPatchCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AcceleratorTypesGetCall) Fields(s ...googleapi.Field) *AcceleratorTypesGetCall { +func (c *AutoscalersPatchCall) Fields(s ...googleapi.Field) *AutoscalersPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *AcceleratorTypesGetCall) IfNoneMatch(entityTag string) *AcceleratorTypesGetCall { - c.ifNoneMatch_ = entityTag +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersPatchCall) Context(ctx context.Context) *AutoscalersPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AutoscalersPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AutoscalersPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.autoscalers.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an autoscaler in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.autoscalers.patch", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "autoscaler": { + // "description": "Name of the autoscaler to patch.", + // "location": "query", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/autoscalers", + // "request": { + // "$ref": "Autoscaler" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.autoscalers.update": + +type AutoscalersUpdateCall struct { + s *Service + project string + zone string + autoscaler *Autoscaler + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates an autoscaler in the specified project using the data +// included in the request. +func (r *AutoscalersService) Update(project string, zone string, autoscaler *Autoscaler) *AutoscalersUpdateCall { + c := &AutoscalersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.autoscaler = autoscaler + return c +} + +// Autoscaler sets the optional parameter "autoscaler": Name of the +// autoscaler to update. +func (c *AutoscalersUpdateCall) Autoscaler(autoscaler string) *AutoscalersUpdateCall { + c.urlParams_.Set("autoscaler", autoscaler) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *AutoscalersUpdateCall) RequestId(requestId string) *AutoscalersUpdateCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersUpdateCall) Fields(s ...googleapi.Field) *AutoscalersUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AcceleratorTypesGetCall) Context(ctx context.Context) *AcceleratorTypesGetCall { +func (c *AutoscalersUpdateCall) Context(ctx context.Context) *AutoscalersUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AcceleratorTypesGetCall) Header() http.Header { +func (c *AutoscalersUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AcceleratorTypesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *AutoscalersUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/acceleratorTypes/{acceleratorType}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "acceleratorType": c.acceleratorType, + "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.acceleratorTypes.get" call. -// Exactly one of *AcceleratorType or error will be non-nil. Any non-2xx +// Do executes the "compute.autoscalers.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *AcceleratorType.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *AcceleratorTypesGetCall) Do(opts ...googleapi.CallOption) (*AcceleratorType, error) { +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -29928,7 +34613,7 @@ func (c *AcceleratorTypesGetCall) Do(opts ...googleapi.CallOption) (*Accelerator if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &AcceleratorType{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -29940,20 +34625,18 @@ func (c *AcceleratorTypesGetCall) Do(opts ...googleapi.CallOption) (*Accelerator } return ret, nil // { - // "description": "Returns the specified accelerator type.", - // "httpMethod": "GET", - // "id": "compute.acceleratorTypes.get", + // "description": "Updates an autoscaler in the specified project using the data included in the request.", + // "httpMethod": "PUT", + // "id": "compute.autoscalers.update", // "parameterOrder": [ // "project", - // "zone", - // "acceleratorType" + // "zone" // ], // "parameters": { - // "acceleratorType": { - // "description": "Name of the accelerator type to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "autoscaler": { + // "description": "Name of the autoscaler to update.", + // "location": "query", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "type": "string" // }, // "project": { @@ -29963,180 +34646,136 @@ func (c *AcceleratorTypesGetCall) Do(opts ...googleapi.CallOption) (*Accelerator // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { - // "description": "The name of the zone for this request.", + // "description": "Name of the zone for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/acceleratorTypes/{acceleratorType}", + // "path": "{project}/zones/{zone}/autoscalers", + // "request": { + // "$ref": "Autoscaler" + // }, // "response": { - // "$ref": "AcceleratorType" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.acceleratorTypes.list": +// method id "compute.backendBuckets.addSignedUrlKey": -type AcceleratorTypesListCall struct { - s *Service - project string - zone string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type BackendBucketsAddSignedUrlKeyCall struct { + s *Service + project string + backendBucket string + signedurlkey *SignedUrlKey + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves a list of accelerator types available to the -// specified project. -func (r *AcceleratorTypesService) List(project string, zone string) *AcceleratorTypesListCall { - c := &AcceleratorTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AddSignedUrlKey: Adds a key for validating requests with signed URLs +// for this backend bucket. +func (r *BackendBucketsService) AddSignedUrlKey(project string, backendBucket string, signedurlkey *SignedUrlKey) *BackendBucketsAddSignedUrlKeyCall { + c := &BackendBucketsAddSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *AcceleratorTypesListCall) Filter(filter string) *AcceleratorTypesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *AcceleratorTypesListCall) MaxResults(maxResults int64) *AcceleratorTypesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + c.backendBucket = backendBucket + c.signedurlkey = signedurlkey return c } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *AcceleratorTypesListCall) OrderBy(orderBy string) *AcceleratorTypesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *AcceleratorTypesListCall) PageToken(pageToken string) *AcceleratorTypesListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendBucketsAddSignedUrlKeyCall) RequestId(requestId string) *BackendBucketsAddSignedUrlKeyCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AcceleratorTypesListCall) Fields(s ...googleapi.Field) *AcceleratorTypesListCall { +func (c *BackendBucketsAddSignedUrlKeyCall) Fields(s ...googleapi.Field) *BackendBucketsAddSignedUrlKeyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *AcceleratorTypesListCall) IfNoneMatch(entityTag string) *AcceleratorTypesListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AcceleratorTypesListCall) Context(ctx context.Context) *AcceleratorTypesListCall { +func (c *BackendBucketsAddSignedUrlKeyCall) Context(ctx context.Context) *BackendBucketsAddSignedUrlKeyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AcceleratorTypesListCall) Header() http.Header { +func (c *BackendBucketsAddSignedUrlKeyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AcceleratorTypesListCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendBucketsAddSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.signedurlkey) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/acceleratorTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}/addSignedUrlKey") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "backendBucket": c.backendBucket, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.acceleratorTypes.list" call. -// Exactly one of *AcceleratorTypeList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *AcceleratorTypeList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *AcceleratorTypesListCall) Do(opts ...googleapi.CallOption) (*AcceleratorTypeList, error) { +// Do executes the "compute.backendBuckets.addSignedUrlKey" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendBucketsAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -30155,7 +34794,7 @@ func (c *AcceleratorTypesListCall) Do(opts ...googleapi.CallOption) (*Accelerato if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &AcceleratorTypeList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -30167,35 +34806,18 @@ func (c *AcceleratorTypesListCall) Do(opts ...googleapi.CallOption) (*Accelerato } return ret, nil // { - // "description": "Retrieves a list of accelerator types available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.acceleratorTypes.list", + // "description": "Adds a key for validating requests with signed URLs for this backend bucket.", + // "httpMethod": "POST", + // "id": "compute.backendBuckets.addSignedUrlKey", // "parameterOrder": [ // "project", - // "zone" + // "backendBucket" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "backendBucket": { + // "description": "Name of the BackendBucket resource to which the Signed URL Key should be added. The name should conform to RFC1035.", + // "location": "path", + // "required": true, // "type": "string" // }, // "project": { @@ -30205,198 +34827,121 @@ func (c *AcceleratorTypesListCall) Do(opts ...googleapi.CallOption) (*Accelerato // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/acceleratorTypes", + // "path": "{project}/global/backendBuckets/{backendBucket}/addSignedUrlKey", + // "request": { + // "$ref": "SignedUrlKey" + // }, // "response": { - // "$ref": "AcceleratorTypeList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *AcceleratorTypesListCall) Pages(ctx context.Context, f func(*AcceleratorTypeList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.addresses.aggregatedList": +// method id "compute.backendBuckets.delete": -type AddressesAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type BackendBucketsDeleteCall struct { + s *Service + project string + backendBucket string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AggregatedList: Retrieves an aggregated list of addresses. -// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/aggregatedList -func (r *AddressesService) AggregatedList(project string) *AddressesAggregatedListCall { - c := &AddressesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified BackendBucket resource. +func (r *BackendBucketsService) Delete(project string, backendBucket string) *BackendBucketsDeleteCall { + c := &BackendBucketsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.backendBucket = backendBucket return c } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *AddressesAggregatedListCall) Filter(filter string) *AddressesAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *AddressesAggregatedListCall) MaxResults(maxResults int64) *AddressesAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *AddressesAggregatedListCall) OrderBy(orderBy string) *AddressesAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *AddressesAggregatedListCall) PageToken(pageToken string) *AddressesAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendBucketsDeleteCall) RequestId(requestId string) *BackendBucketsDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AddressesAggregatedListCall) Fields(s ...googleapi.Field) *AddressesAggregatedListCall { +func (c *BackendBucketsDeleteCall) Fields(s ...googleapi.Field) *BackendBucketsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *AddressesAggregatedListCall) IfNoneMatch(entityTag string) *AddressesAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AddressesAggregatedListCall) Context(ctx context.Context) *AddressesAggregatedListCall { +func (c *BackendBucketsDeleteCall) Context(ctx context.Context) *BackendBucketsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AddressesAggregatedListCall) Header() http.Header { +func (c *BackendBucketsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AddressesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendBucketsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/addresses") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "backendBucket": c.backendBucket, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.addresses.aggregatedList" call. -// Exactly one of *AddressAggregatedList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *AddressAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *AddressesAggregatedListCall) Do(opts ...googleapi.CallOption) (*AddressAggregatedList, error) { +// Do executes the "compute.backendBuckets.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendBucketsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -30415,7 +34960,7 @@ func (c *AddressesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Address if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &AddressAggregatedList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -30427,34 +34972,19 @@ func (c *AddressesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Address } return ret, nil // { - // "description": "Retrieves an aggregated list of addresses.", - // "httpMethod": "GET", - // "id": "compute.addresses.aggregatedList", + // "description": "Deletes the specified BackendBucket resource.", + // "httpMethod": "DELETE", + // "id": "compute.backendBuckets.delete", // "parameterOrder": [ - // "project" + // "project", + // "backendBucket" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "backendBucket": { + // "description": "Name of the BackendBucket resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "project": { @@ -30463,61 +34993,43 @@ func (c *AddressesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Address // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/aggregated/addresses", + // "path": "{project}/global/backendBuckets/{backendBucket}", // "response": { - // "$ref": "AddressAggregatedList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *AddressesAggregatedListCall) Pages(ctx context.Context, f func(*AddressAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.addresses.delete": +// method id "compute.backendBuckets.deleteSignedUrlKey": -type AddressesDeleteCall struct { - s *Service - project string - region string - address string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type BackendBucketsDeleteSignedUrlKeyCall struct { + s *Service + project string + backendBucket string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified address resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/delete -func (r *AddressesService) Delete(project string, region string, address string) *AddressesDeleteCall { - c := &AddressesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// DeleteSignedUrlKey: Deletes a key for validating requests with signed +// URLs for this backend bucket. +func (r *BackendBucketsService) DeleteSignedUrlKey(project string, backendBucket string, keyName string) *BackendBucketsDeleteSignedUrlKeyCall { + c := &BackendBucketsDeleteSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.address = address + c.backendBucket = backendBucket + c.urlParams_.Set("keyName", keyName) return c } @@ -30535,7 +35047,7 @@ func (r *AddressesService) Delete(project string, region string, address string) // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *AddressesDeleteCall) RequestId(requestId string) *AddressesDeleteCall { +func (c *BackendBucketsDeleteSignedUrlKeyCall) RequestId(requestId string) *BackendBucketsDeleteSignedUrlKeyCall { c.urlParams_.Set("requestId", requestId) return c } @@ -30543,7 +35055,7 @@ func (c *AddressesDeleteCall) RequestId(requestId string) *AddressesDeleteCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AddressesDeleteCall) Fields(s ...googleapi.Field) *AddressesDeleteCall { +func (c *BackendBucketsDeleteSignedUrlKeyCall) Fields(s ...googleapi.Field) *BackendBucketsDeleteSignedUrlKeyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -30551,21 +35063,21 @@ func (c *AddressesDeleteCall) Fields(s ...googleapi.Field) *AddressesDeleteCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AddressesDeleteCall) Context(ctx context.Context) *AddressesDeleteCall { +func (c *BackendBucketsDeleteSignedUrlKeyCall) Context(ctx context.Context) *BackendBucketsDeleteSignedUrlKeyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AddressesDeleteCall) Header() http.Header { +func (c *BackendBucketsDeleteSignedUrlKeyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AddressesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendBucketsDeleteSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -30574,29 +35086,28 @@ func (c *AddressesDeleteCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses/{address}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}/deleteSignedUrlKey") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "address": c.address, + "project": c.project, + "backendBucket": c.backendBucket, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.addresses.delete" call. +// Do executes the "compute.backendBuckets.deleteSignedUrlKey" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *AddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *BackendBucketsDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -30627,33 +35138,31 @@ func (c *AddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Deletes the specified address resource.", - // "httpMethod": "DELETE", - // "id": "compute.addresses.delete", + // "description": "Deletes a key for validating requests with signed URLs for this backend bucket.", + // "httpMethod": "POST", + // "id": "compute.backendBuckets.deleteSignedUrlKey", // "parameterOrder": [ // "project", - // "region", - // "address" + // "backendBucket", + // "keyName" // ], // "parameters": { - // "address": { - // "description": "Name of the address resource to delete.", + // "backendBucket": { + // "description": "Name of the BackendBucket resource to which the Signed URL Key should be added. The name should conform to RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "keyName": { + // "description": "The name of the Signed URL Key to delete.", + // "location": "query", // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, @@ -30663,7 +35172,7 @@ func (c *AddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "type": "string" // } // }, - // "path": "{project}/regions/{region}/addresses/{address}", + // "path": "{project}/global/backendBuckets/{backendBucket}/deleteSignedUrlKey", // "response": { // "$ref": "Operation" // }, @@ -30675,33 +35184,31 @@ func (c *AddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro } -// method id "compute.addresses.get": +// method id "compute.backendBuckets.get": -type AddressesGetCall struct { - s *Service - project string - region string - address string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type BackendBucketsGetCall struct { + s *Service + project string + backendBucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified address resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/get -func (r *AddressesService) Get(project string, region string, address string) *AddressesGetCall { - c := &AddressesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified BackendBucket resource. Gets a list of +// available backend buckets by making a list() request. +func (r *BackendBucketsService) Get(project string, backendBucket string) *BackendBucketsGetCall { + c := &BackendBucketsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.address = address + c.backendBucket = backendBucket return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AddressesGetCall) Fields(s ...googleapi.Field) *AddressesGetCall { +func (c *BackendBucketsGetCall) Fields(s ...googleapi.Field) *BackendBucketsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -30711,7 +35218,7 @@ func (c *AddressesGetCall) Fields(s ...googleapi.Field) *AddressesGetCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *AddressesGetCall) IfNoneMatch(entityTag string) *AddressesGetCall { +func (c *BackendBucketsGetCall) IfNoneMatch(entityTag string) *BackendBucketsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -30719,21 +35226,21 @@ func (c *AddressesGetCall) IfNoneMatch(entityTag string) *AddressesGetCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AddressesGetCall) Context(ctx context.Context) *AddressesGetCall { +func (c *BackendBucketsGetCall) Context(ctx context.Context) *BackendBucketsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AddressesGetCall) Header() http.Header { +func (c *BackendBucketsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AddressesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendBucketsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -30745,7 +35252,7 @@ func (c *AddressesGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses/{address}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -30753,21 +35260,20 @@ func (c *AddressesGetCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "address": c.address, + "project": c.project, + "backendBucket": c.backendBucket, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.addresses.get" call. -// Exactly one of *Address or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Address.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *AddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { +// Do executes the "compute.backendBuckets.get" call. +// Exactly one of *BackendBucket or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *BackendBucket.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BackendBucketsGetCall) Do(opts ...googleapi.CallOption) (*BackendBucket, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -30786,7 +35292,7 @@ func (c *AddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Address{ + ret := &BackendBucket{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -30798,19 +35304,18 @@ func (c *AddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { } return ret, nil // { - // "description": "Returns the specified address resource.", + // "description": "Returns the specified BackendBucket resource. Gets a list of available backend buckets by making a list() request.", // "httpMethod": "GET", - // "id": "compute.addresses.get", + // "id": "compute.backendBuckets.get", // "parameterOrder": [ // "project", - // "region", - // "address" + // "backendBucket" // ], // "parameters": { - // "address": { - // "description": "Name of the address resource to return.", + // "backendBucket": { + // "description": "Name of the BackendBucket resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -30820,18 +35325,11 @@ func (c *AddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/addresses/{address}", + // "path": "{project}/global/backendBuckets/{backendBucket}", // "response": { - // "$ref": "Address" + // "$ref": "BackendBucket" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -30842,26 +35340,23 @@ func (c *AddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { } -// method id "compute.addresses.insert": +// method id "compute.backendBuckets.insert": -type AddressesInsertCall struct { - s *Service - project string - region string - address *Address - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type BackendBucketsInsertCall struct { + s *Service + project string + backendbucket *BackendBucket + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates an address resource in the specified project using -// the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/insert -func (r *AddressesService) Insert(project string, region string, address *Address) *AddressesInsertCall { - c := &AddressesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a BackendBucket resource in the specified project +// using the data included in the request. +func (r *BackendBucketsService) Insert(project string, backendbucket *BackendBucket) *BackendBucketsInsertCall { + c := &BackendBucketsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.address = address + c.backendbucket = backendbucket return c } @@ -30879,7 +35374,7 @@ func (r *AddressesService) Insert(project string, region string, address *Addres // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *AddressesInsertCall) RequestId(requestId string) *AddressesInsertCall { +func (c *BackendBucketsInsertCall) RequestId(requestId string) *BackendBucketsInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -30887,7 +35382,7 @@ func (c *AddressesInsertCall) RequestId(requestId string) *AddressesInsertCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AddressesInsertCall) Fields(s ...googleapi.Field) *AddressesInsertCall { +func (c *BackendBucketsInsertCall) Fields(s ...googleapi.Field) *BackendBucketsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -30895,35 +35390,35 @@ func (c *AddressesInsertCall) Fields(s ...googleapi.Field) *AddressesInsertCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AddressesInsertCall) Context(ctx context.Context) *AddressesInsertCall { +func (c *BackendBucketsInsertCall) Context(ctx context.Context) *BackendBucketsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AddressesInsertCall) Header() http.Header { +func (c *BackendBucketsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AddressesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendBucketsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.address) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendbucket) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -30932,19 +35427,18 @@ func (c *AddressesInsertCall) doRequest(alt string) (*http.Response, error) { req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.addresses.insert" call. +// Do executes the "compute.backendBuckets.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *AddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *BackendBucketsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -30975,12 +35469,11 @@ func (c *AddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Creates an address resource in the specified project using the data included in the request.", + // "description": "Creates a BackendBucket resource in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.addresses.insert", + // "id": "compute.backendBuckets.insert", // "parameterOrder": [ - // "project", - // "region" + // "project" // ], // "parameters": { // "project": { @@ -30990,22 +35483,15 @@ func (c *AddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "{project}/regions/{region}/addresses", + // "path": "{project}/global/backendBuckets", // "request": { - // "$ref": "Address" + // "$ref": "BackendBucket" // }, // "response": { // "$ref": "Operation" @@ -31018,273 +35504,9 @@ func (c *AddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro } -// method id "compute.addresses.list": - -type AddressesListCall struct { - s *Service - project string - region string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves a list of addresses contained within the specified -// region. -// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/list -func (r *AddressesService) List(project string, region string) *AddressesListCall { - c := &AddressesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *AddressesListCall) Filter(filter string) *AddressesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *AddressesListCall) MaxResults(maxResults int64) *AddressesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *AddressesListCall) OrderBy(orderBy string) *AddressesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *AddressesListCall) PageToken(pageToken string) *AddressesListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *AddressesListCall) Fields(s ...googleapi.Field) *AddressesListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *AddressesListCall) IfNoneMatch(entityTag string) *AddressesListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *AddressesListCall) Context(ctx context.Context) *AddressesListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *AddressesListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *AddressesListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.addresses.list" call. -// Exactly one of *AddressList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *AddressList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *AddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &AddressList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves a list of addresses contained within the specified region.", - // "httpMethod": "GET", - // "id": "compute.addresses.list", - // "parameterOrder": [ - // "project", - // "region" - // ], - // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/addresses", - // "response": { - // "$ref": "AddressList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *AddressesListCall) Pages(ctx context.Context, f func(*AddressList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.autoscalers.aggregatedList": +// method id "compute.backendBuckets.list": -type AutoscalersAggregatedListCall struct { +type BackendBucketsListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -31293,9 +35515,10 @@ type AutoscalersAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves an aggregated list of autoscalers. -func (r *AutoscalersService) AggregatedList(project string) *AutoscalersAggregatedListCall { - c := &AutoscalersAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of BackendBucket resources available to the +// specified project. +func (r *BackendBucketsService) List(project string) *BackendBucketsListCall { + c := &BackendBucketsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -31322,7 +35545,7 @@ func (r *AutoscalersService) AggregatedList(project string) *AutoscalersAggregat // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *AutoscalersAggregatedListCall) Filter(filter string) *AutoscalersAggregatedListCall { +func (c *BackendBucketsListCall) Filter(filter string) *BackendBucketsListCall { c.urlParams_.Set("filter", filter) return c } @@ -31333,7 +35556,7 @@ func (c *AutoscalersAggregatedListCall) Filter(filter string) *AutoscalersAggreg // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *AutoscalersAggregatedListCall) MaxResults(maxResults int64) *AutoscalersAggregatedListCall { +func (c *BackendBucketsListCall) MaxResults(maxResults int64) *BackendBucketsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -31350,7 +35573,7 @@ func (c *AutoscalersAggregatedListCall) MaxResults(maxResults int64) *Autoscaler // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *AutoscalersAggregatedListCall) OrderBy(orderBy string) *AutoscalersAggregatedListCall { +func (c *BackendBucketsListCall) OrderBy(orderBy string) *BackendBucketsListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -31358,7 +35581,7 @@ func (c *AutoscalersAggregatedListCall) OrderBy(orderBy string) *AutoscalersAggr // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *AutoscalersAggregatedListCall) PageToken(pageToken string) *AutoscalersAggregatedListCall { +func (c *BackendBucketsListCall) PageToken(pageToken string) *BackendBucketsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -31366,7 +35589,7 @@ func (c *AutoscalersAggregatedListCall) PageToken(pageToken string) *Autoscalers // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AutoscalersAggregatedListCall) Fields(s ...googleapi.Field) *AutoscalersAggregatedListCall { +func (c *BackendBucketsListCall) Fields(s ...googleapi.Field) *BackendBucketsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -31376,7 +35599,7 @@ func (c *AutoscalersAggregatedListCall) Fields(s ...googleapi.Field) *Autoscaler // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *AutoscalersAggregatedListCall) IfNoneMatch(entityTag string) *AutoscalersAggregatedListCall { +func (c *BackendBucketsListCall) IfNoneMatch(entityTag string) *BackendBucketsListCall { c.ifNoneMatch_ = entityTag return c } @@ -31384,21 +35607,21 @@ func (c *AutoscalersAggregatedListCall) IfNoneMatch(entityTag string) *Autoscale // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AutoscalersAggregatedListCall) Context(ctx context.Context) *AutoscalersAggregatedListCall { +func (c *BackendBucketsListCall) Context(ctx context.Context) *BackendBucketsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AutoscalersAggregatedListCall) Header() http.Header { +func (c *BackendBucketsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AutoscalersAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendBucketsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -31410,7 +35633,7 @@ func (c *AutoscalersAggregatedListCall) doRequest(alt string) (*http.Response, e var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/autoscalers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -31423,14 +35646,14 @@ func (c *AutoscalersAggregatedListCall) doRequest(alt string) (*http.Response, e return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.autoscalers.aggregatedList" call. -// Exactly one of *AutoscalerAggregatedList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *AutoscalerAggregatedList.ServerResponse.Header or (if a response was +// Do executes the "compute.backendBuckets.list" call. +// Exactly one of *BackendBucketList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *BackendBucketList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*AutoscalerAggregatedList, error) { +func (c *BackendBucketsListCall) Do(opts ...googleapi.CallOption) (*BackendBucketList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -31449,7 +35672,7 @@ func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*Autos if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &AutoscalerAggregatedList{ + ret := &BackendBucketList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -31461,9 +35684,9 @@ func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*Autos } return ret, nil // { - // "description": "Retrieves an aggregated list of autoscalers.", + // "description": "Retrieves the list of BackendBucket resources available to the specified project.", // "httpMethod": "GET", - // "id": "compute.autoscalers.aggregatedList", + // "id": "compute.backendBuckets.list", // "parameterOrder": [ // "project" // ], @@ -31499,9 +35722,9 @@ func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*Autos // "type": "string" // } // }, - // "path": "{project}/aggregated/autoscalers", + // "path": "{project}/global/backendBuckets", // "response": { - // "$ref": "AutoscalerAggregatedList" + // "$ref": "BackendBucketList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -31515,7 +35738,7 @@ func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*Autos // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *AutoscalersAggregatedListCall) Pages(ctx context.Context, f func(*AutoscalerAggregatedList) error) error { +func (c *BackendBucketsListCall) Pages(ctx context.Context, f func(*BackendBucketList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -31533,24 +35756,26 @@ func (c *AutoscalersAggregatedListCall) Pages(ctx context.Context, f func(*Autos } } -// method id "compute.autoscalers.delete": +// method id "compute.backendBuckets.patch": -type AutoscalersDeleteCall struct { - s *Service - project string - zone string - autoscaler string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type BackendBucketsPatchCall struct { + s *Service + project string + backendBucket string + backendbucket *BackendBucket + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified autoscaler. -func (r *AutoscalersService) Delete(project string, zone string, autoscaler string) *AutoscalersDeleteCall { - c := &AutoscalersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates the specified BackendBucket resource with the data +// included in the request. This method supports PATCH semantics and +// uses the JSON merge patch format and processing rules. +func (r *BackendBucketsService) Patch(project string, backendBucket string, backendbucket *BackendBucket) *BackendBucketsPatchCall { + c := &BackendBucketsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.autoscaler = autoscaler + c.backendBucket = backendBucket + c.backendbucket = backendbucket return c } @@ -31568,7 +35793,7 @@ func (r *AutoscalersService) Delete(project string, zone string, autoscaler stri // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *AutoscalersDeleteCall) RequestId(requestId string) *AutoscalersDeleteCall { +func (c *BackendBucketsPatchCall) RequestId(requestId string) *BackendBucketsPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -31576,7 +35801,7 @@ func (c *AutoscalersDeleteCall) RequestId(requestId string) *AutoscalersDeleteCa // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AutoscalersDeleteCall) Fields(s ...googleapi.Field) *AutoscalersDeleteCall { +func (c *BackendBucketsPatchCall) Fields(s ...googleapi.Field) *BackendBucketsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -31584,52 +35809,56 @@ func (c *AutoscalersDeleteCall) Fields(s ...googleapi.Field) *AutoscalersDeleteC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AutoscalersDeleteCall) Context(ctx context.Context) *AutoscalersDeleteCall { +func (c *BackendBucketsPatchCall) Context(ctx context.Context) *BackendBucketsPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AutoscalersDeleteCall) Header() http.Header { +func (c *BackendBucketsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AutoscalersDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendBucketsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendbucket) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers/{autoscaler}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "autoscaler": c.autoscaler, + "project": c.project, + "backendBucket": c.backendBucket, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.autoscalers.delete" call. +// Do executes the "compute.backendBuckets.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *AutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *BackendBucketsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -31660,19 +35889,18 @@ func (c *AutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Deletes the specified autoscaler.", - // "httpMethod": "DELETE", - // "id": "compute.autoscalers.delete", + // "description": "Updates the specified BackendBucket resource with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.backendBuckets.patch", // "parameterOrder": [ // "project", - // "zone", - // "autoscaler" + // "backendBucket" // ], // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to delete.", + // "backendBucket": { + // "description": "Name of the BackendBucket resource to patch.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -31687,16 +35915,12 @@ func (c *AutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "Name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/autoscalers/{autoscaler}", + // "path": "{project}/global/backendBuckets/{backendBucket}", + // "request": { + // "$ref": "BackendBucket" + // }, // "response": { // "$ref": "Operation" // }, @@ -31708,99 +35932,108 @@ func (c *AutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er } -// method id "compute.autoscalers.get": +// method id "compute.backendBuckets.update": -type AutoscalersGetCall struct { - s *Service - project string - zone string - autoscaler string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type BackendBucketsUpdateCall struct { + s *Service + project string + backendBucket string + backendbucket *BackendBucket + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified autoscaler resource. Gets a list of -// available autoscalers by making a list() request. -func (r *AutoscalersService) Get(project string, zone string, autoscaler string) *AutoscalersGetCall { - c := &AutoscalersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates the specified BackendBucket resource with the data +// included in the request. +func (r *BackendBucketsService) Update(project string, backendBucket string, backendbucket *BackendBucket) *BackendBucketsUpdateCall { + c := &BackendBucketsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.autoscaler = autoscaler + c.backendBucket = backendBucket + c.backendbucket = backendbucket + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendBucketsUpdateCall) RequestId(requestId string) *BackendBucketsUpdateCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AutoscalersGetCall) Fields(s ...googleapi.Field) *AutoscalersGetCall { +func (c *BackendBucketsUpdateCall) Fields(s ...googleapi.Field) *BackendBucketsUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *AutoscalersGetCall) IfNoneMatch(entityTag string) *AutoscalersGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AutoscalersGetCall) Context(ctx context.Context) *AutoscalersGetCall { +func (c *BackendBucketsUpdateCall) Context(ctx context.Context) *BackendBucketsUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AutoscalersGetCall) Header() http.Header { +func (c *BackendBucketsUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AutoscalersGetCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendBucketsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendbucket) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers/{autoscaler}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "autoscaler": c.autoscaler, + "project": c.project, + "backendBucket": c.backendBucket, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.autoscalers.get" call. -// Exactly one of *Autoscaler or error will be non-nil. Any non-2xx +// Do executes the "compute.backendBuckets.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Autoscaler.ServerResponse.Header or (if a response was returned at +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *AutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, error) { +func (c *BackendBucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -31819,7 +36052,7 @@ func (c *AutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, erro if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Autoscaler{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -31831,19 +36064,18 @@ func (c *AutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, erro } return ret, nil // { - // "description": "Returns the specified autoscaler resource. Gets a list of available autoscalers by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.autoscalers.get", + // "description": "Updates the specified BackendBucket resource with the data included in the request.", + // "httpMethod": "PUT", + // "id": "compute.backendBuckets.update", // "parameterOrder": [ // "project", - // "zone", - // "autoscaler" + // "backendBucket" // ], // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to return.", + // "backendBucket": { + // "description": "Name of the BackendBucket resource to update.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -31854,46 +36086,46 @@ func (c *AutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, erro // "required": true, // "type": "string" // }, - // "zone": { - // "description": "Name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/autoscalers/{autoscaler}", + // "path": "{project}/global/backendBuckets/{backendBucket}", + // "request": { + // "$ref": "BackendBucket" + // }, // "response": { - // "$ref": "Autoscaler" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.autoscalers.insert": +// method id "compute.backendServices.addSignedUrlKey": -type AutoscalersInsertCall struct { - s *Service - project string - zone string - autoscaler *Autoscaler - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type BackendServicesAddSignedUrlKeyCall struct { + s *Service + project string + backendService string + signedurlkey *SignedUrlKey + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates an autoscaler in the specified project using the data -// included in the request. -func (r *AutoscalersService) Insert(project string, zone string, autoscaler *Autoscaler) *AutoscalersInsertCall { - c := &AutoscalersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AddSignedUrlKey: Adds a key for validating requests with signed URLs +// for this backend service. +func (r *BackendServicesService) AddSignedUrlKey(project string, backendService string, signedurlkey *SignedUrlKey) *BackendServicesAddSignedUrlKeyCall { + c := &BackendServicesAddSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.autoscaler = autoscaler + c.backendService = backendService + c.signedurlkey = signedurlkey return c } @@ -31911,7 +36143,7 @@ func (r *AutoscalersService) Insert(project string, zone string, autoscaler *Aut // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *AutoscalersInsertCall) RequestId(requestId string) *AutoscalersInsertCall { +func (c *BackendServicesAddSignedUrlKeyCall) RequestId(requestId string) *BackendServicesAddSignedUrlKeyCall { c.urlParams_.Set("requestId", requestId) return c } @@ -31919,7 +36151,7 @@ func (c *AutoscalersInsertCall) RequestId(requestId string) *AutoscalersInsertCa // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AutoscalersInsertCall) Fields(s ...googleapi.Field) *AutoscalersInsertCall { +func (c *BackendServicesAddSignedUrlKeyCall) Fields(s ...googleapi.Field) *BackendServicesAddSignedUrlKeyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -31927,35 +36159,35 @@ func (c *AutoscalersInsertCall) Fields(s ...googleapi.Field) *AutoscalersInsertC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AutoscalersInsertCall) Context(ctx context.Context) *AutoscalersInsertCall { +func (c *BackendServicesAddSignedUrlKeyCall) Context(ctx context.Context) *BackendServicesAddSignedUrlKeyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AutoscalersInsertCall) Header() http.Header { +func (c *BackendServicesAddSignedUrlKeyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AutoscalersInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendServicesAddSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.signedurlkey) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/addSignedUrlKey") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -31963,20 +36195,20 @@ func (c *AutoscalersInsertCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.autoscalers.insert" call. +// Do executes the "compute.backendServices.addSignedUrlKey" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *AutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *BackendServicesAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -32007,14 +36239,20 @@ func (c *AutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Creates an autoscaler in the specified project using the data included in the request.", + // "description": "Adds a key for validating requests with signed URLs for this backend service.", // "httpMethod": "POST", - // "id": "compute.autoscalers.insert", + // "id": "compute.backendServices.addSignedUrlKey", // "parameterOrder": [ // "project", - // "zone" + // "backendService" // ], // "parameters": { + // "backendService": { + // "description": "Name of the BackendService resource to which the Signed URL Key should be added. The name should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -32026,18 +36264,11 @@ func (c *AutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "Name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/autoscalers", + // "path": "{project}/global/backendServices/{backendService}/addSignedUrlKey", // "request": { - // "$ref": "Autoscaler" + // "$ref": "SignedUrlKey" // }, // "response": { // "$ref": "Operation" @@ -32050,24 +36281,22 @@ func (c *AutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er } -// method id "compute.autoscalers.list": +// method id "compute.backendServices.aggregatedList": -type AutoscalersListCall struct { +type BackendServicesAggregatedListCall struct { s *Service project string - zone string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves a list of autoscalers contained within the specified -// zone. -func (r *AutoscalersService) List(project string, zone string) *AutoscalersListCall { - c := &AutoscalersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves the list of all BackendService resources, +// regional and global, available to the specified project. +func (r *BackendServicesService) AggregatedList(project string) *BackendServicesAggregatedListCall { + c := &BackendServicesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone return c } @@ -32093,7 +36322,7 @@ func (r *AutoscalersService) List(project string, zone string) *AutoscalersListC // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *AutoscalersListCall) Filter(filter string) *AutoscalersListCall { +func (c *BackendServicesAggregatedListCall) Filter(filter string) *BackendServicesAggregatedListCall { c.urlParams_.Set("filter", filter) return c } @@ -32104,7 +36333,7 @@ func (c *AutoscalersListCall) Filter(filter string) *AutoscalersListCall { // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *AutoscalersListCall) MaxResults(maxResults int64) *AutoscalersListCall { +func (c *BackendServicesAggregatedListCall) MaxResults(maxResults int64) *BackendServicesAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -32121,7 +36350,7 @@ func (c *AutoscalersListCall) MaxResults(maxResults int64) *AutoscalersListCall // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *AutoscalersListCall) OrderBy(orderBy string) *AutoscalersListCall { +func (c *BackendServicesAggregatedListCall) OrderBy(orderBy string) *BackendServicesAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -32129,7 +36358,7 @@ func (c *AutoscalersListCall) OrderBy(orderBy string) *AutoscalersListCall { // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *AutoscalersListCall) PageToken(pageToken string) *AutoscalersListCall { +func (c *BackendServicesAggregatedListCall) PageToken(pageToken string) *BackendServicesAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -32137,7 +36366,7 @@ func (c *AutoscalersListCall) PageToken(pageToken string) *AutoscalersListCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AutoscalersListCall) Fields(s ...googleapi.Field) *AutoscalersListCall { +func (c *BackendServicesAggregatedListCall) Fields(s ...googleapi.Field) *BackendServicesAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -32147,7 +36376,7 @@ func (c *AutoscalersListCall) Fields(s ...googleapi.Field) *AutoscalersListCall // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *AutoscalersListCall) IfNoneMatch(entityTag string) *AutoscalersListCall { +func (c *BackendServicesAggregatedListCall) IfNoneMatch(entityTag string) *BackendServicesAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -32155,21 +36384,21 @@ func (c *AutoscalersListCall) IfNoneMatch(entityTag string) *AutoscalersListCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AutoscalersListCall) Context(ctx context.Context) *AutoscalersListCall { +func (c *BackendServicesAggregatedListCall) Context(ctx context.Context) *BackendServicesAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AutoscalersListCall) Header() http.Header { +func (c *BackendServicesAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AutoscalersListCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendServicesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -32181,7 +36410,7 @@ func (c *AutoscalersListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/backendServices") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -32190,19 +36419,18 @@ func (c *AutoscalersListCall) doRequest(alt string) (*http.Response, error) { req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.autoscalers.list" call. -// Exactly one of *AutoscalerList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *AutoscalerList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.backendServices.aggregatedList" call. +// Exactly one of *BackendServiceAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *BackendServiceAggregatedList.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, error) { +func (c *BackendServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*BackendServiceAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -32221,7 +36449,7 @@ func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &AutoscalerList{ + ret := &BackendServiceAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -32233,12 +36461,11 @@ func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, } return ret, nil // { - // "description": "Retrieves a list of autoscalers contained within the specified zone.", + // "description": "Retrieves the list of all BackendService resources, regional and global, available to the specified project.", // "httpMethod": "GET", - // "id": "compute.autoscalers.list", + // "id": "compute.backendServices.aggregatedList", // "parameterOrder": [ - // "project", - // "zone" + // "project" // ], // "parameters": { // "filter": { @@ -32265,23 +36492,16 @@ func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, // "type": "string" // }, // "project": { - // "description": "Project ID for this request.", + // "description": "Name of the project scoping this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "zone": { - // "description": "Name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/autoscalers", + // "path": "{project}/aggregated/backendServices", // "response": { - // "$ref": "AutoscalerList" + // "$ref": "BackendServiceAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -32295,7 +36515,7 @@ func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *AutoscalersListCall) Pages(ctx context.Context, f func(*AutoscalerList) error) error { +func (c *BackendServicesAggregatedListCall) Pages(ctx context.Context, f func(*BackendServiceAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -32313,33 +36533,23 @@ func (c *AutoscalersListCall) Pages(ctx context.Context, f func(*AutoscalerList) } } -// method id "compute.autoscalers.patch": +// method id "compute.backendServices.delete": -type AutoscalersPatchCall struct { - s *Service - project string - zone string - autoscaler *Autoscaler - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type BackendServicesDeleteCall struct { + s *Service + project string + backendService string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates an autoscaler in the specified project using the data -// included in the request. This method supports PATCH semantics and -// uses the JSON merge patch format and processing rules. -func (r *AutoscalersService) Patch(project string, zone string, autoscaler *Autoscaler) *AutoscalersPatchCall { - c := &AutoscalersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified BackendService resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/delete +func (r *BackendServicesService) Delete(project string, backendService string) *BackendServicesDeleteCall { + c := &BackendServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.autoscaler = autoscaler - return c -} - -// Autoscaler sets the optional parameter "autoscaler": Name of the -// autoscaler to patch. -func (c *AutoscalersPatchCall) Autoscaler(autoscaler string) *AutoscalersPatchCall { - c.urlParams_.Set("autoscaler", autoscaler) + c.backendService = backendService return c } @@ -32357,7 +36567,7 @@ func (c *AutoscalersPatchCall) Autoscaler(autoscaler string) *AutoscalersPatchCa // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *AutoscalersPatchCall) RequestId(requestId string) *AutoscalersPatchCall { +func (c *BackendServicesDeleteCall) RequestId(requestId string) *BackendServicesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -32365,7 +36575,7 @@ func (c *AutoscalersPatchCall) RequestId(requestId string) *AutoscalersPatchCall // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AutoscalersPatchCall) Fields(s ...googleapi.Field) *AutoscalersPatchCall { +func (c *BackendServicesDeleteCall) Fields(s ...googleapi.Field) *BackendServicesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -32373,56 +36583,51 @@ func (c *AutoscalersPatchCall) Fields(s ...googleapi.Field) *AutoscalersPatchCal // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AutoscalersPatchCall) Context(ctx context.Context) *AutoscalersPatchCall { +func (c *BackendServicesDeleteCall) Context(ctx context.Context) *BackendServicesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AutoscalersPatchCall) Header() http.Header { +func (c *BackendServicesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AutoscalersPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendServicesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.autoscalers.patch" call. +// Do executes the "compute.backendServices.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *AutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *BackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -32453,18 +36658,19 @@ func (c *AutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, err } return ret, nil // { - // "description": "Updates an autoscaler in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.autoscalers.patch", + // "description": "Deletes the specified BackendService resource.", + // "httpMethod": "DELETE", + // "id": "compute.backendServices.delete", // "parameterOrder": [ // "project", - // "zone" + // "backendService" // ], // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to patch.", - // "location": "query", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "backendService": { + // "description": "Name of the BackendService resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "project": { @@ -32478,19 +36684,9 @@ func (c *AutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, err // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "Name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/autoscalers", - // "request": { - // "$ref": "Autoscaler" - // }, + // "path": "{project}/global/backendServices/{backendService}", // "response": { // "$ref": "Operation" // }, @@ -32502,32 +36698,24 @@ func (c *AutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, err } -// method id "compute.autoscalers.update": +// method id "compute.backendServices.deleteSignedUrlKey": -type AutoscalersUpdateCall struct { - s *Service - project string - zone string - autoscaler *Autoscaler - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type BackendServicesDeleteSignedUrlKeyCall struct { + s *Service + project string + backendService string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Update: Updates an autoscaler in the specified project using the data -// included in the request. -func (r *AutoscalersService) Update(project string, zone string, autoscaler *Autoscaler) *AutoscalersUpdateCall { - c := &AutoscalersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// DeleteSignedUrlKey: Deletes a key for validating requests with signed +// URLs for this backend service. +func (r *BackendServicesService) DeleteSignedUrlKey(project string, backendService string, keyName string) *BackendServicesDeleteSignedUrlKeyCall { + c := &BackendServicesDeleteSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.autoscaler = autoscaler - return c -} - -// Autoscaler sets the optional parameter "autoscaler": Name of the -// autoscaler to update. -func (c *AutoscalersUpdateCall) Autoscaler(autoscaler string) *AutoscalersUpdateCall { - c.urlParams_.Set("autoscaler", autoscaler) + c.backendService = backendService + c.urlParams_.Set("keyName", keyName) return c } @@ -32545,7 +36733,7 @@ func (c *AutoscalersUpdateCall) Autoscaler(autoscaler string) *AutoscalersUpdate // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *AutoscalersUpdateCall) RequestId(requestId string) *AutoscalersUpdateCall { +func (c *BackendServicesDeleteSignedUrlKeyCall) RequestId(requestId string) *BackendServicesDeleteSignedUrlKeyCall { c.urlParams_.Set("requestId", requestId) return c } @@ -32553,7 +36741,7 @@ func (c *AutoscalersUpdateCall) RequestId(requestId string) *AutoscalersUpdateCa // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AutoscalersUpdateCall) Fields(s ...googleapi.Field) *AutoscalersUpdateCall { +func (c *BackendServicesDeleteSignedUrlKeyCall) Fields(s ...googleapi.Field) *BackendServicesDeleteSignedUrlKeyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -32561,56 +36749,51 @@ func (c *AutoscalersUpdateCall) Fields(s ...googleapi.Field) *AutoscalersUpdateC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AutoscalersUpdateCall) Context(ctx context.Context) *AutoscalersUpdateCall { +func (c *BackendServicesDeleteSignedUrlKeyCall) Context(ctx context.Context) *BackendServicesDeleteSignedUrlKeyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AutoscalersUpdateCall) Header() http.Header { +func (c *BackendServicesDeleteSignedUrlKeyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AutoscalersUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendServicesDeleteSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/deleteSignedUrlKey") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.autoscalers.update" call. +// Do executes the "compute.backendServices.deleteSignedUrlKey" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *AutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *BackendServicesDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -32641,18 +36824,25 @@ func (c *AutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Updates an autoscaler in the specified project using the data included in the request.", - // "httpMethod": "PUT", - // "id": "compute.autoscalers.update", + // "description": "Deletes a key for validating requests with signed URLs for this backend service.", + // "httpMethod": "POST", + // "id": "compute.backendServices.deleteSignedUrlKey", // "parameterOrder": [ // "project", - // "zone" + // "backendService", + // "keyName" // ], // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to update.", + // "backendService": { + // "description": "Name of the BackendService resource to which the Signed URL Key should be added. The name should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "keyName": { + // "description": "The name of the Signed URL Key to delete.", // "location": "query", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // }, // "project": { @@ -32666,19 +36856,9 @@ func (c *AutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, er // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "Name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/autoscalers", - // "request": { - // "$ref": "Autoscaler" - // }, + // "path": "{project}/global/backendServices/{backendService}/deleteSignedUrlKey", // "response": { // "$ref": "Operation" // }, @@ -32690,108 +36870,97 @@ func (c *AutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, er } -// method id "compute.backendBuckets.addSignedUrlKey": +// method id "compute.backendServices.get": -type BackendBucketsAddSignedUrlKeyCall struct { - s *Service - project string - backendBucket string - signedurlkey *SignedUrlKey - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type BackendServicesGetCall struct { + s *Service + project string + backendService string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// AddSignedUrlKey: Adds a key for validating requests with signed URLs -// for this backend bucket. -func (r *BackendBucketsService) AddSignedUrlKey(project string, backendBucket string, signedurlkey *SignedUrlKey) *BackendBucketsAddSignedUrlKeyCall { - c := &BackendBucketsAddSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified BackendService resource. Gets a list of +// available backend services. +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/get +func (r *BackendServicesService) Get(project string, backendService string) *BackendServicesGetCall { + c := &BackendServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendBucket = backendBucket - c.signedurlkey = signedurlkey - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendBucketsAddSignedUrlKeyCall) RequestId(requestId string) *BackendBucketsAddSignedUrlKeyCall { - c.urlParams_.Set("requestId", requestId) + c.backendService = backendService return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsAddSignedUrlKeyCall) Fields(s ...googleapi.Field) *BackendBucketsAddSignedUrlKeyCall { +func (c *BackendServicesGetCall) Fields(s ...googleapi.Field) *BackendServicesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BackendServicesGetCall) IfNoneMatch(entityTag string) *BackendServicesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsAddSignedUrlKeyCall) Context(ctx context.Context) *BackendBucketsAddSignedUrlKeyCall { +func (c *BackendServicesGetCall) Context(ctx context.Context) *BackendServicesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsAddSignedUrlKeyCall) Header() http.Header { +func (c *BackendServicesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsAddSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendServicesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.signedurlkey) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}/addSignedUrlKey") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendBucket": c.backendBucket, + "project": c.project, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.addSignedUrlKey" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.backendServices.get" call. +// Exactly one of *BackendService or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *BackendBucketsAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// *BackendService.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BackendServicesGetCall) Do(opts ...googleapi.CallOption) (*BackendService, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -32810,7 +36979,7 @@ func (c *BackendBucketsAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*O if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &BackendService{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -32822,17 +36991,18 @@ func (c *BackendBucketsAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*O } return ret, nil // { - // "description": "Adds a key for validating requests with signed URLs for this backend bucket.", - // "httpMethod": "POST", - // "id": "compute.backendBuckets.addSignedUrlKey", + // "description": "Returns the specified BackendService resource. Gets a list of available backend services.", + // "httpMethod": "GET", + // "id": "compute.backendServices.get", // "parameterOrder": [ // "project", - // "backendBucket" + // "backendService" // ], // "parameters": { - // "backendBucket": { - // "description": "Name of the BackendBucket resource to which the Signed URL Key should be added. The name should conform to RFC1035.", + // "backendService": { + // "description": "Name of the BackendService resource to return.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -32842,70 +37012,48 @@ func (c *BackendBucketsAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*O // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/global/backendBuckets/{backendBucket}/addSignedUrlKey", - // "request": { - // "$ref": "SignedUrlKey" - // }, + // "path": "{project}/global/backendServices/{backendService}", // "response": { - // "$ref": "Operation" + // "$ref": "BackendService" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.backendBuckets.delete": +// method id "compute.backendServices.getHealth": -type BackendBucketsDeleteCall struct { - s *Service - project string - backendBucket string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type BackendServicesGetHealthCall struct { + s *Service + project string + backendService string + resourcegroupreference *ResourceGroupReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified BackendBucket resource. -func (r *BackendBucketsService) Delete(project string, backendBucket string) *BackendBucketsDeleteCall { - c := &BackendBucketsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetHealth: Gets the most recent health check results for this +// BackendService. +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/getHealth +func (r *BackendServicesService) GetHealth(project string, backendService string, resourcegroupreference *ResourceGroupReference) *BackendServicesGetHealthCall { + c := &BackendServicesGetHealthCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendBucket = backendBucket - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendBucketsDeleteCall) RequestId(requestId string) *BackendBucketsDeleteCall { - c.urlParams_.Set("requestId", requestId) + c.backendService = backendService + c.resourcegroupreference = resourcegroupreference return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsDeleteCall) Fields(s ...googleapi.Field) *BackendBucketsDeleteCall { +func (c *BackendServicesGetHealthCall) Fields(s ...googleapi.Field) *BackendServicesGetHealthCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -32913,51 +37061,56 @@ func (c *BackendBucketsDeleteCall) Fields(s ...googleapi.Field) *BackendBucketsD // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsDeleteCall) Context(ctx context.Context) *BackendBucketsDeleteCall { +func (c *BackendServicesGetHealthCall) Context(ctx context.Context) *BackendServicesGetHealthCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsDeleteCall) Header() http.Header { +func (c *BackendServicesGetHealthCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendServicesGetHealthCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.resourcegroupreference) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/getHealth") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendBucket": c.backendBucket, + "project": c.project, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *BackendBucketsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.backendServices.getHealth" call. +// Exactly one of *BackendServiceGroupHealth or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *BackendServiceGroupHealth.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (*BackendServiceGroupHealth, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -32976,7 +37129,7 @@ func (c *BackendBucketsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &BackendServiceGroupHealth{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -32988,64 +37141,64 @@ func (c *BackendBucketsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Deletes the specified BackendBucket resource.", - // "httpMethod": "DELETE", - // "id": "compute.backendBuckets.delete", + // "description": "Gets the most recent health check results for this BackendService.", + // "httpMethod": "POST", + // "id": "compute.backendServices.getHealth", // "parameterOrder": [ // "project", - // "backendBucket" + // "backendService" // ], // "parameters": { - // "backendBucket": { - // "description": "Name of the BackendBucket resource to delete.", + // "backendService": { + // "description": "Name of the BackendService resource to which the queried instance belongs.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, // "project": { - // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/global/backendBuckets/{backendBucket}", + // "path": "{project}/global/backendServices/{backendService}/getHealth", + // "request": { + // "$ref": "ResourceGroupReference" + // }, // "response": { - // "$ref": "Operation" + // "$ref": "BackendServiceGroupHealth" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.backendBuckets.deleteSignedUrlKey": +// method id "compute.backendServices.insert": -type BackendBucketsDeleteSignedUrlKeyCall struct { - s *Service - project string - backendBucket string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type BackendServicesInsertCall struct { + s *Service + project string + backendservice *BackendService + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// DeleteSignedUrlKey: Deletes a key for validating requests with signed -// URLs for this backend bucket. -func (r *BackendBucketsService) DeleteSignedUrlKey(project string, backendBucket string, keyName string) *BackendBucketsDeleteSignedUrlKeyCall { - c := &BackendBucketsDeleteSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a BackendService resource in the specified project +// using the data included in the request. There are several +// restrictions and guidelines to keep in mind when creating a backend +// service. Read Restrictions and Guidelines for more information. +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/insert +func (r *BackendServicesService) Insert(project string, backendservice *BackendService) *BackendServicesInsertCall { + c := &BackendServicesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendBucket = backendBucket - c.urlParams_.Set("keyName", keyName) + c.backendservice = backendservice return c } @@ -33063,7 +37216,7 @@ func (r *BackendBucketsService) DeleteSignedUrlKey(project string, backendBucket // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendBucketsDeleteSignedUrlKeyCall) RequestId(requestId string) *BackendBucketsDeleteSignedUrlKeyCall { +func (c *BackendServicesInsertCall) RequestId(requestId string) *BackendServicesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -33071,7 +37224,7 @@ func (c *BackendBucketsDeleteSignedUrlKeyCall) RequestId(requestId string) *Back // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsDeleteSignedUrlKeyCall) Fields(s ...googleapi.Field) *BackendBucketsDeleteSignedUrlKeyCall { +func (c *BackendServicesInsertCall) Fields(s ...googleapi.Field) *BackendServicesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -33079,30 +37232,35 @@ func (c *BackendBucketsDeleteSignedUrlKeyCall) Fields(s ...googleapi.Field) *Bac // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsDeleteSignedUrlKeyCall) Context(ctx context.Context) *BackendBucketsDeleteSignedUrlKeyCall { +func (c *BackendServicesInsertCall) Context(ctx context.Context) *BackendServicesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsDeleteSignedUrlKeyCall) Header() http.Header { +func (c *BackendServicesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsDeleteSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendServicesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}/deleteSignedUrlKey") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -33110,20 +37268,19 @@ func (c *BackendBucketsDeleteSignedUrlKeyCall) doRequest(alt string) (*http.Resp } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendBucket": c.backendBucket, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.deleteSignedUrlKey" call. +// Do executes the "compute.backendServices.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendBucketsDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *BackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -33154,27 +37311,13 @@ func (c *BackendBucketsDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Deletes a key for validating requests with signed URLs for this backend bucket.", + // "description": "Creates a BackendService resource in the specified project using the data included in the request. There are several restrictions and guidelines to keep in mind when creating a backend service. Read Restrictions and Guidelines for more information.", // "httpMethod": "POST", - // "id": "compute.backendBuckets.deleteSignedUrlKey", + // "id": "compute.backendServices.insert", // "parameterOrder": [ - // "project", - // "backendBucket", - // "keyName" + // "project" // ], // "parameters": { - // "backendBucket": { - // "description": "Name of the BackendBucket resource to which the Signed URL Key should be added. The name should conform to RFC1035.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "keyName": { - // "description": "The name of the Signed URL Key to delete.", - // "location": "query", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -33188,7 +37331,10 @@ func (c *BackendBucketsDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) // "type": "string" // } // }, - // "path": "{project}/global/backendBuckets/{backendBucket}/deleteSignedUrlKey", + // "path": "{project}/global/backendServices", + // "request": { + // "$ref": "BackendService" + // }, // "response": { // "$ref": "Operation" // }, @@ -33200,31 +37346,93 @@ func (c *BackendBucketsDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) } -// method id "compute.backendBuckets.get": +// method id "compute.backendServices.list": -type BackendBucketsGetCall struct { - s *Service - project string - backendBucket string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type BackendServicesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified BackendBucket resource. Gets a list of -// available backend buckets by making a list() request. -func (r *BackendBucketsService) Get(project string, backendBucket string) *BackendBucketsGetCall { - c := &BackendBucketsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of BackendService resources available to the +// specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/list +func (r *BackendServicesService) List(project string) *BackendServicesListCall { + c := &BackendServicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendBucket = backendBucket + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *BackendServicesListCall) Filter(filter string) *BackendServicesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *BackendServicesListCall) MaxResults(maxResults int64) *BackendServicesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *BackendServicesListCall) OrderBy(orderBy string) *BackendServicesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *BackendServicesListCall) PageToken(pageToken string) *BackendServicesListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsGetCall) Fields(s ...googleapi.Field) *BackendBucketsGetCall { +func (c *BackendServicesListCall) Fields(s ...googleapi.Field) *BackendServicesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -33234,7 +37442,7 @@ func (c *BackendBucketsGetCall) Fields(s ...googleapi.Field) *BackendBucketsGetC // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *BackendBucketsGetCall) IfNoneMatch(entityTag string) *BackendBucketsGetCall { +func (c *BackendServicesListCall) IfNoneMatch(entityTag string) *BackendServicesListCall { c.ifNoneMatch_ = entityTag return c } @@ -33242,21 +37450,21 @@ func (c *BackendBucketsGetCall) IfNoneMatch(entityTag string) *BackendBucketsGet // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsGetCall) Context(ctx context.Context) *BackendBucketsGetCall { +func (c *BackendServicesListCall) Context(ctx context.Context) *BackendServicesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsGetCall) Header() http.Header { +func (c *BackendServicesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendServicesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -33268,7 +37476,7 @@ func (c *BackendBucketsGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -33276,20 +37484,19 @@ func (c *BackendBucketsGetCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendBucket": c.backendBucket, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.get" call. -// Exactly one of *BackendBucket or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *BackendBucket.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.backendServices.list" call. +// Exactly one of *BackendServiceList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *BackendServiceList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *BackendBucketsGetCall) Do(opts ...googleapi.CallOption) (*BackendBucket, error) { +func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServiceList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -33308,7 +37515,7 @@ func (c *BackendBucketsGetCall) Do(opts ...googleapi.CallOption) (*BackendBucket if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &BackendBucket{ + ret := &BackendServiceList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -33320,19 +37527,34 @@ func (c *BackendBucketsGetCall) Do(opts ...googleapi.CallOption) (*BackendBucket } return ret, nil // { - // "description": "Returns the specified BackendBucket resource. Gets a list of available backend buckets by making a list() request.", + // "description": "Retrieves the list of BackendService resources available to the specified project.", // "httpMethod": "GET", - // "id": "compute.backendBuckets.get", + // "id": "compute.backendServices.list", // "parameterOrder": [ - // "project", - // "backendBucket" + // "project" // ], // "parameters": { - // "backendBucket": { - // "description": "Name of the BackendBucket resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -33343,9 +37565,9 @@ func (c *BackendBucketsGetCall) Do(opts ...googleapi.CallOption) (*BackendBucket // "type": "string" // } // }, - // "path": "{project}/global/backendBuckets/{backendBucket}", + // "path": "{project}/global/backendServices", // "response": { - // "$ref": "BackendBucket" + // "$ref": "BackendServiceList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -33356,23 +37578,51 @@ func (c *BackendBucketsGetCall) Do(opts ...googleapi.CallOption) (*BackendBucket } -// method id "compute.backendBuckets.insert": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *BackendServicesListCall) Pages(ctx context.Context, f func(*BackendServiceList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type BackendBucketsInsertCall struct { - s *Service - project string - backendbucket *BackendBucket - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.backendServices.patch": + +type BackendServicesPatchCall struct { + s *Service + project string + backendService string + backendservice *BackendService + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a BackendBucket resource in the specified project -// using the data included in the request. -func (r *BackendBucketsService) Insert(project string, backendbucket *BackendBucket) *BackendBucketsInsertCall { - c := &BackendBucketsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Patches the specified BackendService resource with the data +// included in the request. There are several restrictions and +// guidelines to keep in mind when updating a backend service. Read +// Restrictions and Guidelines for more information. This method +// supports PATCH semantics and uses the JSON merge patch format and +// processing rules. +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/patch +func (r *BackendServicesService) Patch(project string, backendService string, backendservice *BackendService) *BackendServicesPatchCall { + c := &BackendServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendbucket = backendbucket + c.backendService = backendService + c.backendservice = backendservice return c } @@ -33390,7 +37640,7 @@ func (r *BackendBucketsService) Insert(project string, backendbucket *BackendBuc // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendBucketsInsertCall) RequestId(requestId string) *BackendBucketsInsertCall { +func (c *BackendServicesPatchCall) RequestId(requestId string) *BackendServicesPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -33398,7 +37648,7 @@ func (c *BackendBucketsInsertCall) RequestId(requestId string) *BackendBucketsIn // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsInsertCall) Fields(s ...googleapi.Field) *BackendBucketsInsertCall { +func (c *BackendServicesPatchCall) Fields(s ...googleapi.Field) *BackendServicesPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -33406,55 +37656,56 @@ func (c *BackendBucketsInsertCall) Fields(s ...googleapi.Field) *BackendBucketsI // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsInsertCall) Context(ctx context.Context) *BackendBucketsInsertCall { +func (c *BackendServicesPatchCall) Context(ctx context.Context) *BackendServicesPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsInsertCall) Header() http.Header { +func (c *BackendServicesPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendServicesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendbucket) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.insert" call. +// Do executes the "compute.backendServices.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendBucketsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *BackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -33485,13 +37736,21 @@ func (c *BackendBucketsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Creates a BackendBucket resource in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.backendBuckets.insert", + // "description": "Patches the specified BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.backendServices.patch", // "parameterOrder": [ - // "project" + // "project", + // "backendService" // ], // "parameters": { + // "backendService": { + // "description": "Name of the BackendService resource to patch.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -33505,9 +37764,9 @@ func (c *BackendBucketsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // } // }, - // "path": "{project}/global/backendBuckets", + // "path": "{project}/global/backendServices/{backendService}", // "request": { - // "$ref": "BackendBucket" + // "$ref": "BackendService" // }, // "response": { // "$ref": "Operation" @@ -33520,156 +37779,108 @@ func (c *BackendBucketsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.backendBuckets.list": +// method id "compute.backendServices.setSecurityPolicy": -type BackendBucketsListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type BackendServicesSetSecurityPolicyCall struct { + s *Service + project string + backendService string + securitypolicyreference *SecurityPolicyReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves the list of BackendBucket resources available to the -// specified project. -func (r *BackendBucketsService) List(project string) *BackendBucketsListCall { - c := &BackendBucketsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetSecurityPolicy: Sets the security policy for the specified backend +// service. +func (r *BackendServicesService) SetSecurityPolicy(project string, backendService string, securitypolicyreference *SecurityPolicyReference) *BackendServicesSetSecurityPolicyCall { + c := &BackendServicesSetSecurityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.backendService = backendService + c.securitypolicyreference = securitypolicyreference return c } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *BackendBucketsListCall) Filter(filter string) *BackendBucketsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *BackendBucketsListCall) MaxResults(maxResults int64) *BackendBucketsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *BackendBucketsListCall) OrderBy(orderBy string) *BackendBucketsListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *BackendBucketsListCall) PageToken(pageToken string) *BackendBucketsListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendServicesSetSecurityPolicyCall) RequestId(requestId string) *BackendServicesSetSecurityPolicyCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsListCall) Fields(s ...googleapi.Field) *BackendBucketsListCall { +func (c *BackendServicesSetSecurityPolicyCall) Fields(s ...googleapi.Field) *BackendServicesSetSecurityPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *BackendBucketsListCall) IfNoneMatch(entityTag string) *BackendBucketsListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsListCall) Context(ctx context.Context) *BackendBucketsListCall { +func (c *BackendServicesSetSecurityPolicyCall) Context(ctx context.Context) *BackendServicesSetSecurityPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsListCall) Header() http.Header { +func (c *BackendServicesSetSecurityPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsListCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendServicesSetSecurityPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicyreference) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/setSecurityPolicy") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.list" call. -// Exactly one of *BackendBucketList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *BackendBucketList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *BackendBucketsListCall) Do(opts ...googleapi.CallOption) (*BackendBucketList, error) { +// Do executes the "compute.backendServices.setSecurityPolicy" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendServicesSetSecurityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -33688,7 +37899,7 @@ func (c *BackendBucketsListCall) Do(opts ...googleapi.CallOption) (*BackendBucke if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &BackendBucketList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -33700,34 +37911,18 @@ func (c *BackendBucketsListCall) Do(opts ...googleapi.CallOption) (*BackendBucke } return ret, nil // { - // "description": "Retrieves the list of BackendBucket resources available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.backendBuckets.list", + // "description": "Sets the security policy for the specified backend service.", + // "httpMethod": "POST", + // "id": "compute.backendServices.setSecurityPolicy", // "parameterOrder": [ - // "project" + // "project", + // "backendService" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "backendService": { + // "description": "Name of the BackendService resource to which the security policy should be set. The name should conform to RFC1035.", + // "location": "path", + // "required": true, // "type": "string" // }, // "project": { @@ -33736,62 +37931,50 @@ func (c *BackendBucketsListCall) Do(opts ...googleapi.CallOption) (*BackendBucke // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/global/backendBuckets", + // "path": "{project}/global/backendServices/{backendService}/setSecurityPolicy", + // "request": { + // "$ref": "SecurityPolicyReference" + // }, // "response": { - // "$ref": "BackendBucketList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *BackendBucketsListCall) Pages(ctx context.Context, f func(*BackendBucketList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.backendBuckets.patch": +// method id "compute.backendServices.update": -type BackendBucketsPatchCall struct { - s *Service - project string - backendBucket string - backendbucket *BackendBucket - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type BackendServicesUpdateCall struct { + s *Service + project string + backendService string + backendservice *BackendService + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates the specified BackendBucket resource with the data -// included in the request. This method supports PATCH semantics and -// uses the JSON merge patch format and processing rules. -func (r *BackendBucketsService) Patch(project string, backendBucket string, backendbucket *BackendBucket) *BackendBucketsPatchCall { - c := &BackendBucketsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates the specified BackendService resource with the data +// included in the request. There are several restrictions and +// guidelines to keep in mind when updating a backend service. Read +// Restrictions and Guidelines for more information. +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/update +func (r *BackendServicesService) Update(project string, backendService string, backendservice *BackendService) *BackendServicesUpdateCall { + c := &BackendServicesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendBucket = backendBucket - c.backendbucket = backendbucket + c.backendService = backendService + c.backendservice = backendservice return c } @@ -33809,7 +37992,7 @@ func (r *BackendBucketsService) Patch(project string, backendBucket string, back // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendBucketsPatchCall) RequestId(requestId string) *BackendBucketsPatchCall { +func (c *BackendServicesUpdateCall) RequestId(requestId string) *BackendServicesUpdateCall { c.urlParams_.Set("requestId", requestId) return c } @@ -33817,7 +38000,7 @@ func (c *BackendBucketsPatchCall) RequestId(requestId string) *BackendBucketsPat // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsPatchCall) Fields(s ...googleapi.Field) *BackendBucketsPatchCall { +func (c *BackendServicesUpdateCall) Fields(s ...googleapi.Field) *BackendServicesUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -33825,56 +38008,56 @@ func (c *BackendBucketsPatchCall) Fields(s ...googleapi.Field) *BackendBucketsPa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsPatchCall) Context(ctx context.Context) *BackendBucketsPatchCall { +func (c *BackendServicesUpdateCall) Context(ctx context.Context) *BackendServicesUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsPatchCall) Header() http.Header { +func (c *BackendServicesUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendServicesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendbucket) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendBucket": c.backendBucket, + "project": c.project, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.patch" call. +// Do executes the "compute.backendServices.update" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendBucketsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *BackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -33905,18 +38088,18 @@ func (c *BackendBucketsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Updates the specified BackendBucket resource with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.backendBuckets.patch", + // "description": "Updates the specified BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information.", + // "httpMethod": "PUT", + // "id": "compute.backendServices.update", // "parameterOrder": [ // "project", - // "backendBucket" + // "backendService" // ], // "parameters": { - // "backendBucket": { - // "description": "Name of the BackendBucket resource to patch.", + // "backendService": { + // "description": "Name of the BackendService resource to update.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -33933,9 +38116,9 @@ func (c *BackendBucketsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // } // }, - // "path": "{project}/global/backendBuckets/{backendBucket}", + // "path": "{project}/global/backendServices/{backendService}", // "request": { - // "$ref": "BackendBucket" + // "$ref": "BackendService" // }, // "response": { // "$ref": "Operation" @@ -33948,108 +38131,156 @@ func (c *BackendBucketsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.backendBuckets.update": +// method id "compute.diskTypes.aggregatedList": -type BackendBucketsUpdateCall struct { - s *Service - project string - backendBucket string - backendbucket *BackendBucket - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type DiskTypesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Update: Updates the specified BackendBucket resource with the data -// included in the request. -func (r *BackendBucketsService) Update(project string, backendBucket string, backendbucket *BackendBucket) *BackendBucketsUpdateCall { - c := &BackendBucketsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of disk types. +// For details, see https://cloud.google.com/compute/docs/reference/latest/diskTypes/aggregatedList +func (r *DiskTypesService) AggregatedList(project string) *DiskTypesAggregatedListCall { + c := &DiskTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendBucket = backendBucket - c.backendbucket = backendbucket return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendBucketsUpdateCall) RequestId(requestId string) *BackendBucketsUpdateCall { - c.urlParams_.Set("requestId", requestId) +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *DiskTypesAggregatedListCall) Filter(filter string) *DiskTypesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *DiskTypesAggregatedListCall) MaxResults(maxResults int64) *DiskTypesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *DiskTypesAggregatedListCall) OrderBy(orderBy string) *DiskTypesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *DiskTypesAggregatedListCall) PageToken(pageToken string) *DiskTypesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsUpdateCall) Fields(s ...googleapi.Field) *BackendBucketsUpdateCall { +func (c *DiskTypesAggregatedListCall) Fields(s ...googleapi.Field) *DiskTypesAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DiskTypesAggregatedListCall) IfNoneMatch(entityTag string) *DiskTypesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsUpdateCall) Context(ctx context.Context) *BackendBucketsUpdateCall { +func (c *DiskTypesAggregatedListCall) Context(ctx context.Context) *DiskTypesAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsUpdateCall) Header() http.Header { +func (c *DiskTypesAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *DiskTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendbucket) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/diskTypes") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendBucket": c.backendBucket, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.update" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *BackendBucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.diskTypes.aggregatedList" call. +// Exactly one of *DiskTypeAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *DiskTypeAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTypeAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -34068,7 +38299,7 @@ func (c *BackendBucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &DiskTypeAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -34080,19 +38311,34 @@ func (c *BackendBucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Updates the specified BackendBucket resource with the data included in the request.", - // "httpMethod": "PUT", - // "id": "compute.backendBuckets.update", + // "description": "Retrieves an aggregated list of disk types.", + // "httpMethod": "GET", + // "id": "compute.diskTypes.aggregatedList", // "parameterOrder": [ - // "project", - // "backendBucket" + // "project" // ], // "parameters": { - // "backendBucket": { - // "description": "Name of the BackendBucket resource to update.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -34101,130 +38347,136 @@ func (c *BackendBucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/global/backendBuckets/{backendBucket}", - // "request": { - // "$ref": "BackendBucket" - // }, + // "path": "{project}/aggregated/diskTypes", // "response": { - // "$ref": "Operation" + // "$ref": "DiskTypeAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.backendServices.addSignedUrlKey": - -type BackendServicesAddSignedUrlKeyCall struct { - s *Service - project string - backendService string - signedurlkey *SignedUrlKey - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *DiskTypesAggregatedListCall) Pages(ctx context.Context, f func(*DiskTypeAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } } -// AddSignedUrlKey: Adds a key for validating requests with signed URLs -// for this backend service. -func (r *BackendServicesService) AddSignedUrlKey(project string, backendService string, signedurlkey *SignedUrlKey) *BackendServicesAddSignedUrlKeyCall { - c := &BackendServicesAddSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.backendService = backendService - c.signedurlkey = signedurlkey - return c +// method id "compute.diskTypes.get": + +type DiskTypesGetCall struct { + s *Service + project string + zone string + diskType string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendServicesAddSignedUrlKeyCall) RequestId(requestId string) *BackendServicesAddSignedUrlKeyCall { - c.urlParams_.Set("requestId", requestId) +// Get: Returns the specified disk type. Gets a list of available disk +// types by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/diskTypes/get +func (r *DiskTypesService) Get(project string, zone string, diskType string) *DiskTypesGetCall { + c := &DiskTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.diskType = diskType return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesAddSignedUrlKeyCall) Fields(s ...googleapi.Field) *BackendServicesAddSignedUrlKeyCall { +func (c *DiskTypesGetCall) Fields(s ...googleapi.Field) *DiskTypesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DiskTypesGetCall) IfNoneMatch(entityTag string) *DiskTypesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesAddSignedUrlKeyCall) Context(ctx context.Context) *BackendServicesAddSignedUrlKeyCall { +func (c *DiskTypesGetCall) Context(ctx context.Context) *DiskTypesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesAddSignedUrlKeyCall) Header() http.Header { +func (c *DiskTypesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesAddSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { +func (c *DiskTypesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.signedurlkey) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/addSignedUrlKey") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/diskTypes/{diskType}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendService": c.backendService, + "project": c.project, + "zone": c.zone, + "diskType": c.diskType, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.addSignedUrlKey" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// Do executes the "compute.diskTypes.get" call. +// Exactly one of *DiskType or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *DiskType.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendServicesAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *DiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -34243,7 +38495,7 @@ func (c *BackendServicesAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (* if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &DiskType{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -34255,17 +38507,19 @@ func (c *BackendServicesAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Adds a key for validating requests with signed URLs for this backend service.", - // "httpMethod": "POST", - // "id": "compute.backendServices.addSignedUrlKey", + // "description": "Returns the specified disk type. Gets a list of available disk types by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.diskTypes.get", // "parameterOrder": [ // "project", - // "backendService" + // "zone", + // "diskType" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to which the Signed URL Key should be added. The name should conform to RFC1035.", + // "diskType": { + // "description": "Name of the disk type to return.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -34276,43 +38530,46 @@ func (c *BackendServicesAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (* // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/backendServices/{backendService}/addSignedUrlKey", - // "request": { - // "$ref": "SignedUrlKey" - // }, + // "path": "{project}/zones/{zone}/diskTypes/{diskType}", // "response": { - // "$ref": "Operation" + // "$ref": "DiskType" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.backendServices.aggregatedList": +// method id "compute.diskTypes.list": -type BackendServicesAggregatedListCall struct { +type DiskTypesListCall struct { s *Service project string + zone string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// AggregatedList: Retrieves the list of all BackendService resources, -// regional and global, available to the specified project. -func (r *BackendServicesService) AggregatedList(project string) *BackendServicesAggregatedListCall { - c := &BackendServicesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of disk types available to the specified +// project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/diskTypes/list +func (r *DiskTypesService) List(project string, zone string) *DiskTypesListCall { + c := &DiskTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.zone = zone return c } @@ -34338,7 +38595,7 @@ func (r *BackendServicesService) AggregatedList(project string) *BackendServices // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *BackendServicesAggregatedListCall) Filter(filter string) *BackendServicesAggregatedListCall { +func (c *DiskTypesListCall) Filter(filter string) *DiskTypesListCall { c.urlParams_.Set("filter", filter) return c } @@ -34349,7 +38606,7 @@ func (c *BackendServicesAggregatedListCall) Filter(filter string) *BackendServic // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *BackendServicesAggregatedListCall) MaxResults(maxResults int64) *BackendServicesAggregatedListCall { +func (c *DiskTypesListCall) MaxResults(maxResults int64) *DiskTypesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -34366,7 +38623,7 @@ func (c *BackendServicesAggregatedListCall) MaxResults(maxResults int64) *Backen // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *BackendServicesAggregatedListCall) OrderBy(orderBy string) *BackendServicesAggregatedListCall { +func (c *DiskTypesListCall) OrderBy(orderBy string) *DiskTypesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -34374,7 +38631,7 @@ func (c *BackendServicesAggregatedListCall) OrderBy(orderBy string) *BackendServ // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *BackendServicesAggregatedListCall) PageToken(pageToken string) *BackendServicesAggregatedListCall { +func (c *DiskTypesListCall) PageToken(pageToken string) *DiskTypesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -34382,7 +38639,7 @@ func (c *BackendServicesAggregatedListCall) PageToken(pageToken string) *Backend // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesAggregatedListCall) Fields(s ...googleapi.Field) *BackendServicesAggregatedListCall { +func (c *DiskTypesListCall) Fields(s ...googleapi.Field) *DiskTypesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -34392,7 +38649,7 @@ func (c *BackendServicesAggregatedListCall) Fields(s ...googleapi.Field) *Backen // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *BackendServicesAggregatedListCall) IfNoneMatch(entityTag string) *BackendServicesAggregatedListCall { +func (c *DiskTypesListCall) IfNoneMatch(entityTag string) *DiskTypesListCall { c.ifNoneMatch_ = entityTag return c } @@ -34400,21 +38657,21 @@ func (c *BackendServicesAggregatedListCall) IfNoneMatch(entityTag string) *Backe // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesAggregatedListCall) Context(ctx context.Context) *BackendServicesAggregatedListCall { +func (c *DiskTypesListCall) Context(ctx context.Context) *DiskTypesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesAggregatedListCall) Header() http.Header { +func (c *DiskTypesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *DiskTypesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -34426,7 +38683,7 @@ func (c *BackendServicesAggregatedListCall) doRequest(alt string) (*http.Respons var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/backendServices") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/diskTypes") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -34435,18 +38692,19 @@ func (c *BackendServicesAggregatedListCall) doRequest(alt string) (*http.Respons req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.aggregatedList" call. -// Exactly one of *BackendServiceAggregatedList or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *BackendServiceAggregatedList.ServerResponse.Header or (if a -// response was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *BackendServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*BackendServiceAggregatedList, error) { +// Do executes the "compute.diskTypes.list" call. +// Exactly one of *DiskTypeList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *DiskTypeList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -34465,7 +38723,7 @@ func (c *BackendServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*B if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &BackendServiceAggregatedList{ + ret := &DiskTypeList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -34477,11 +38735,12 @@ func (c *BackendServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*B } return ret, nil // { - // "description": "Retrieves the list of all BackendService resources, regional and global, available to the specified project.", + // "description": "Retrieves a list of disk types available to the specified project.", // "httpMethod": "GET", - // "id": "compute.backendServices.aggregatedList", + // "id": "compute.diskTypes.list", // "parameterOrder": [ - // "project" + // "project", + // "zone" // ], // "parameters": { // "filter": { @@ -34508,16 +38767,23 @@ func (c *BackendServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*B // "type": "string" // }, // "project": { - // "description": "Name of the project scoping this request.", + // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/aggregated/backendServices", + // "path": "{project}/zones/{zone}/diskTypes", // "response": { - // "$ref": "BackendServiceAggregatedList" + // "$ref": "DiskTypeList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -34531,7 +38797,7 @@ func (c *BackendServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*B // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *BackendServicesAggregatedListCall) Pages(ctx context.Context, f func(*BackendServiceAggregatedList) error) error { +func (c *DiskTypesListCall) Pages(ctx context.Context, f func(*DiskTypeList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -34549,101 +38815,156 @@ func (c *BackendServicesAggregatedListCall) Pages(ctx context.Context, f func(*B } } -// method id "compute.backendServices.delete": +// method id "compute.disks.aggregatedList": -type BackendServicesDeleteCall struct { - s *Service - project string - backendService string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type DisksAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified BackendService resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/delete -func (r *BackendServicesService) Delete(project string, backendService string) *BackendServicesDeleteCall { - c := &BackendServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of persistent disks. +// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/aggregatedList +func (r *DisksService) AggregatedList(project string) *DisksAggregatedListCall { + c := &DisksAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendService = backendService return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendServicesDeleteCall) RequestId(requestId string) *BackendServicesDeleteCall { - c.urlParams_.Set("requestId", requestId) +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *DisksAggregatedListCall) Filter(filter string) *DisksAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *DisksAggregatedListCall) MaxResults(maxResults int64) *DisksAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *DisksAggregatedListCall) OrderBy(orderBy string) *DisksAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *DisksAggregatedListCall) PageToken(pageToken string) *DisksAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesDeleteCall) Fields(s ...googleapi.Field) *BackendServicesDeleteCall { +func (c *DisksAggregatedListCall) Fields(s ...googleapi.Field) *DisksAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DisksAggregatedListCall) IfNoneMatch(entityTag string) *DisksAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesDeleteCall) Context(ctx context.Context) *BackendServicesDeleteCall { +func (c *DisksAggregatedListCall) Context(ctx context.Context) *DisksAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesDeleteCall) Header() http.Header { +func (c *DisksAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/disks") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendService": c.backendService, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *BackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.disks.aggregatedList" call. +// Exactly one of *DiskAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *DiskAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -34662,7 +38983,7 @@ func (c *BackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &DiskAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -34674,19 +38995,34 @@ func (c *BackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Deletes the specified BackendService resource.", - // "httpMethod": "DELETE", - // "id": "compute.backendServices.delete", + // "description": "Retrieves an aggregated list of persistent disks.", + // "httpMethod": "GET", + // "id": "compute.disks.aggregatedList", // "parameterOrder": [ - // "project", - // "backendService" + // "project" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -34695,43 +39031,72 @@ func (c *BackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/global/backendServices/{backendService}", + // "path": "{project}/aggregated/disks", // "response": { - // "$ref": "Operation" + // "$ref": "DiskAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.backendServices.deleteSignedUrlKey": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *DisksAggregatedListCall) Pages(ctx context.Context, f func(*DiskAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type BackendServicesDeleteSignedUrlKeyCall struct { - s *Service - project string - backendService string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.disks.createSnapshot": + +type DisksCreateSnapshotCall struct { + s *Service + project string + zone string + disk string + snapshot *Snapshot + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// DeleteSignedUrlKey: Deletes a key for validating requests with signed -// URLs for this backend service. -func (r *BackendServicesService) DeleteSignedUrlKey(project string, backendService string, keyName string) *BackendServicesDeleteSignedUrlKeyCall { - c := &BackendServicesDeleteSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// CreateSnapshot: Creates a snapshot of a specified persistent disk. +// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/createSnapshot +func (r *DisksService) CreateSnapshot(project string, zone string, disk string, snapshot *Snapshot) *DisksCreateSnapshotCall { + c := &DisksCreateSnapshotCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendService = backendService - c.urlParams_.Set("keyName", keyName) + c.zone = zone + c.disk = disk + c.snapshot = snapshot + return c +} + +// GuestFlush sets the optional parameter "guestFlush": [Input Only] +// Specifies to create an application consistent snapshot by informing +// the OS to prepare for the snapshot process. Currently only supported +// on Windows instances using the Volume Shadow Copy Service (VSS). +func (c *DisksCreateSnapshotCall) GuestFlush(guestFlush bool) *DisksCreateSnapshotCall { + c.urlParams_.Set("guestFlush", fmt.Sprint(guestFlush)) return c } @@ -34749,7 +39114,7 @@ func (r *BackendServicesService) DeleteSignedUrlKey(project string, backendServi // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendServicesDeleteSignedUrlKeyCall) RequestId(requestId string) *BackendServicesDeleteSignedUrlKeyCall { +func (c *DisksCreateSnapshotCall) RequestId(requestId string) *DisksCreateSnapshotCall { c.urlParams_.Set("requestId", requestId) return c } @@ -34757,7 +39122,7 @@ func (c *BackendServicesDeleteSignedUrlKeyCall) RequestId(requestId string) *Bac // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesDeleteSignedUrlKeyCall) Fields(s ...googleapi.Field) *BackendServicesDeleteSignedUrlKeyCall { +func (c *DisksCreateSnapshotCall) Fields(s ...googleapi.Field) *DisksCreateSnapshotCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -34765,218 +39130,57 @@ func (c *BackendServicesDeleteSignedUrlKeyCall) Fields(s ...googleapi.Field) *Ba // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesDeleteSignedUrlKeyCall) Context(ctx context.Context) *BackendServicesDeleteSignedUrlKeyCall { +func (c *DisksCreateSnapshotCall) Context(ctx context.Context) *DisksCreateSnapshotCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesDeleteSignedUrlKeyCall) Header() http.Header { +func (c *DisksCreateSnapshotCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesDeleteSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksCreateSnapshotCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/deleteSignedUrlKey") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendService": c.backendService, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.backendServices.deleteSignedUrlKey" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *BackendServicesDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } + body, err := googleapi.WithoutDataWrapper.JSONReader(c.snapshot) if err != nil { return nil, err } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes a key for validating requests with signed URLs for this backend service.", - // "httpMethod": "POST", - // "id": "compute.backendServices.deleteSignedUrlKey", - // "parameterOrder": [ - // "project", - // "backendService", - // "keyName" - // ], - // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to which the Signed URL Key should be added. The name should conform to RFC1035.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "keyName": { - // "description": "The name of the Signed URL Key to delete.", - // "location": "query", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "{project}/global/backendServices/{backendService}/deleteSignedUrlKey", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.backendServices.get": - -type BackendServicesGetCall struct { - s *Service - project string - backendService string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified BackendService resource. Gets a list of -// available backend services. -// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/get -func (r *BackendServicesService) Get(project string, backendService string) *BackendServicesGetCall { - c := &BackendServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.backendService = backendService - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BackendServicesGetCall) Fields(s ...googleapi.Field) *BackendServicesGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *BackendServicesGetCall) IfNoneMatch(entityTag string) *BackendServicesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BackendServicesGetCall) Context(ctx context.Context) *BackendServicesGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BackendServicesGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BackendServicesGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/createSnapshot") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendService": c.backendService, + "project": c.project, + "zone": c.zone, + "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.get" call. -// Exactly one of *BackendService or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *BackendService.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *BackendServicesGetCall) Do(opts ...googleapi.CallOption) (*BackendService, error) { +// Do executes the "compute.disks.createSnapshot" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -34995,7 +39199,7 @@ func (c *BackendServicesGetCall) Do(opts ...googleapi.CallOption) (*BackendServi if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &BackendService{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -35007,69 +39211,110 @@ func (c *BackendServicesGetCall) Do(opts ...googleapi.CallOption) (*BackendServi } return ret, nil // { - // "description": "Returns the specified BackendService resource. Gets a list of available backend services.", - // "httpMethod": "GET", - // "id": "compute.backendServices.get", + // "description": "Creates a snapshot of a specified persistent disk.", + // "httpMethod": "POST", + // "id": "compute.disks.createSnapshot", // "parameterOrder": [ // "project", - // "backendService" + // "zone", + // "disk" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to return.", + // "disk": { + // "description": "Name of the persistent disk to snapshot.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, + // "guestFlush": { + // "description": "[Input Only] Specifies to create an application consistent snapshot by informing the OS to prepare for the snapshot process. Currently only supported on Windows instances using the Volume Shadow Copy Service (VSS).", + // "location": "query", + // "type": "boolean" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/backendServices/{backendService}", + // "path": "{project}/zones/{zone}/disks/{disk}/createSnapshot", + // "request": { + // "$ref": "Snapshot" + // }, // "response": { - // "$ref": "BackendService" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.backendServices.getHealth": +// method id "compute.disks.delete": -type BackendServicesGetHealthCall struct { - s *Service - project string - backendService string - resourcegroupreference *ResourceGroupReference - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type DisksDeleteCall struct { + s *Service + project string + zone string + disk string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetHealth: Gets the most recent health check results for this -// BackendService. -// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/getHealth -func (r *BackendServicesService) GetHealth(project string, backendService string, resourcegroupreference *ResourceGroupReference) *BackendServicesGetHealthCall { - c := &BackendServicesGetHealthCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified persistent disk. Deleting a disk +// removes its data permanently and is irreversible. However, deleting a +// disk does not delete any snapshots previously made from the disk. You +// must separately delete snapshots. +// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/delete +func (r *DisksService) Delete(project string, zone string, disk string) *DisksDeleteCall { + c := &DisksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendService = backendService - c.resourcegroupreference = resourcegroupreference + c.zone = zone + c.disk = disk + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *DisksDeleteCall) RequestId(requestId string) *DisksDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesGetHealthCall) Fields(s ...googleapi.Field) *BackendServicesGetHealthCall { +func (c *DisksDeleteCall) Fields(s ...googleapi.Field) *DisksDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -35077,56 +39322,52 @@ func (c *BackendServicesGetHealthCall) Fields(s ...googleapi.Field) *BackendServ // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesGetHealthCall) Context(ctx context.Context) *BackendServicesGetHealthCall { +func (c *DisksDeleteCall) Context(ctx context.Context) *DisksDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesGetHealthCall) Header() http.Header { +func (c *DisksDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesGetHealthCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.resourcegroupreference) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/getHealth") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendService": c.backendService, + "project": c.project, + "zone": c.zone, + "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.getHealth" call. -// Exactly one of *BackendServiceGroupHealth or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *BackendServiceGroupHealth.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *BackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (*BackendServiceGroupHealth, error) { +// Do executes the "compute.disks.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -35145,7 +39386,7 @@ func (c *BackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (*Backen if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &BackendServiceGroupHealth{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -35157,146 +39398,147 @@ func (c *BackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (*Backen } return ret, nil // { - // "description": "Gets the most recent health check results for this BackendService.", - // "httpMethod": "POST", - // "id": "compute.backendServices.getHealth", + // "description": "Deletes the specified persistent disk. Deleting a disk removes its data permanently and is irreversible. However, deleting a disk does not delete any snapshots previously made from the disk. You must separately delete snapshots.", + // "httpMethod": "DELETE", + // "id": "compute.disks.delete", // "parameterOrder": [ // "project", - // "backendService" + // "zone", + // "disk" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to which the queried instance belongs.", + // "disk": { + // "description": "Name of the persistent disk to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, // "project": { + // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/backendServices/{backendService}/getHealth", - // "request": { - // "$ref": "ResourceGroupReference" - // }, + // "path": "{project}/zones/{zone}/disks/{disk}", // "response": { - // "$ref": "BackendServiceGroupHealth" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.backendServices.insert": +// method id "compute.disks.get": -type BackendServicesInsertCall struct { - s *Service - project string - backendservice *BackendService - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type DisksGetCall struct { + s *Service + project string + zone string + disk string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Insert: Creates a BackendService resource in the specified project -// using the data included in the request. There are several -// restrictions and guidelines to keep in mind when creating a backend -// service. Read Restrictions and Guidelines for more information. -// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/insert -func (r *BackendServicesService) Insert(project string, backendservice *BackendService) *BackendServicesInsertCall { - c := &BackendServicesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns a specified persistent disk. Gets a list of available +// persistent disks by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/get +func (r *DisksService) Get(project string, zone string, disk string) *DisksGetCall { + c := &DisksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendservice = backendservice - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendServicesInsertCall) RequestId(requestId string) *BackendServicesInsertCall { - c.urlParams_.Set("requestId", requestId) + c.zone = zone + c.disk = disk return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesInsertCall) Fields(s ...googleapi.Field) *BackendServicesInsertCall { +func (c *DisksGetCall) Fields(s ...googleapi.Field) *DisksGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DisksGetCall) IfNoneMatch(entityTag string) *DisksGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesInsertCall) Context(ctx context.Context) *BackendServicesInsertCall { +func (c *DisksGetCall) Context(ctx context.Context) *DisksGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesInsertCall) Header() http.Header { +func (c *DisksGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "zone": c.zone, + "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *BackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.disks.get" call. +// Exactly one of *Disk or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Disk.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *DisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -35315,7 +39557,7 @@ func (c *BackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Disk{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -35327,13 +39569,22 @@ func (c *BackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Creates a BackendService resource in the specified project using the data included in the request. There are several restrictions and guidelines to keep in mind when creating a backend service. Read Restrictions and Guidelines for more information.", - // "httpMethod": "POST", - // "id": "compute.backendServices.insert", + // "description": "Returns a specified persistent disk. Gets a list of available persistent disks by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.disks.get", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "disk" // ], // "parameters": { + // "disk": { + // "description": "Name of the persistent disk to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -35341,114 +39592,54 @@ func (c *BackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/backendServices", - // "request": { - // "$ref": "BackendService" - // }, + // "path": "{project}/zones/{zone}/disks/{disk}", // "response": { - // "$ref": "Operation" + // "$ref": "Disk" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.backendServices.list": +// method id "compute.disks.getIamPolicy": -type BackendServicesListCall struct { +type DisksGetIamPolicyCall struct { s *Service project string + zone string + resource string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves the list of BackendService resources available to the -// specified project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/list -func (r *BackendServicesService) List(project string) *BackendServicesListCall { - c := &BackendServicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. +func (r *DisksService) GetIamPolicy(project string, zone string, resource string) *DisksGetIamPolicyCall { + c := &DisksGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *BackendServicesListCall) Filter(filter string) *BackendServicesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *BackendServicesListCall) MaxResults(maxResults int64) *BackendServicesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *BackendServicesListCall) OrderBy(orderBy string) *BackendServicesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *BackendServicesListCall) PageToken(pageToken string) *BackendServicesListCall { - c.urlParams_.Set("pageToken", pageToken) + c.zone = zone + c.resource = resource return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesListCall) Fields(s ...googleapi.Field) *BackendServicesListCall { +func (c *DisksGetIamPolicyCall) Fields(s ...googleapi.Field) *DisksGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -35458,7 +39649,7 @@ func (c *BackendServicesListCall) Fields(s ...googleapi.Field) *BackendServicesL // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *BackendServicesListCall) IfNoneMatch(entityTag string) *BackendServicesListCall { +func (c *DisksGetIamPolicyCall) IfNoneMatch(entityTag string) *DisksGetIamPolicyCall { c.ifNoneMatch_ = entityTag return c } @@ -35466,21 +39657,21 @@ func (c *BackendServicesListCall) IfNoneMatch(entityTag string) *BackendServices // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesListCall) Context(ctx context.Context) *BackendServicesListCall { +func (c *DisksGetIamPolicyCall) Context(ctx context.Context) *DisksGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesListCall) Header() http.Header { +func (c *DisksGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesListCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -35492,7 +39683,7 @@ func (c *BackendServicesListCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -35500,19 +39691,21 @@ func (c *BackendServicesListCall) doRequest(alt string) (*http.Response, error) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.list" call. -// Exactly one of *BackendServiceList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *BackendServiceList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServiceList, error) { +// Do executes the "compute.disks.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *DisksGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -35531,7 +39724,7 @@ func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServ if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &BackendServiceList{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -35543,47 +39736,40 @@ func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServ } return ret, nil // { - // "description": "Retrieves the list of BackendService resources available to the specified project.", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", // "httpMethod": "GET", - // "id": "compute.backendServices.list", + // "id": "compute.disks.getIamPolicy", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "resource" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/backendServices", + // "path": "{project}/zones/{zone}/disks/{resource}/getIamPolicy", // "response": { - // "$ref": "BackendServiceList" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -35591,54 +39777,32 @@ func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServ // "https://www.googleapis.com/auth/compute.readonly" // ] // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *BackendServicesListCall) Pages(ctx context.Context, f func(*BackendServiceList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.backendServices.patch": - -type BackendServicesPatchCall struct { - s *Service - project string - backendService string - backendservice *BackendService - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header + } -// Patch: Patches the specified BackendService resource with the data -// included in the request. There are several restrictions and -// guidelines to keep in mind when updating a backend service. Read -// Restrictions and Guidelines for more information. This method -// supports PATCH semantics and uses the JSON merge patch format and -// processing rules. -// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/patch -func (r *BackendServicesService) Patch(project string, backendService string, backendservice *BackendService) *BackendServicesPatchCall { - c := &BackendServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// method id "compute.disks.insert": + +type DisksInsertCall struct { + s *Service + project string + zone string + disk *Disk + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a persistent disk in the specified project using the +// data in the request. You can create a disk with a sourceImage, a +// sourceSnapshot, or create an empty 500 GB data disk by omitting all +// properties. You can also create a disk that is larger than the +// default size by specifying the sizeGb property. +// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/insert +func (r *DisksService) Insert(project string, zone string, disk *Disk) *DisksInsertCall { + c := &DisksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendService = backendService - c.backendservice = backendservice + c.zone = zone + c.disk = disk return c } @@ -35656,15 +39820,22 @@ func (r *BackendServicesService) Patch(project string, backendService string, ba // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendServicesPatchCall) RequestId(requestId string) *BackendServicesPatchCall { +func (c *DisksInsertCall) RequestId(requestId string) *DisksInsertCall { c.urlParams_.Set("requestId", requestId) return c } +// SourceImage sets the optional parameter "sourceImage": Source image +// to restore onto a disk. +func (c *DisksInsertCall) SourceImage(sourceImage string) *DisksInsertCall { + c.urlParams_.Set("sourceImage", sourceImage) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesPatchCall) Fields(s ...googleapi.Field) *BackendServicesPatchCall { +func (c *DisksInsertCall) Fields(s ...googleapi.Field) *DisksInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -35672,56 +39843,56 @@ func (c *BackendServicesPatchCall) Fields(s ...googleapi.Field) *BackendServices // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesPatchCall) Context(ctx context.Context) *BackendServicesPatchCall { +func (c *DisksInsertCall) Context(ctx context.Context) *DisksInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesPatchCall) Header() http.Header { +func (c *DisksInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.disk) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendService": c.backendService, + "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.patch" call. +// Do executes the "compute.disks.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *DisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -35752,21 +39923,14 @@ func (c *BackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Patches the specified BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.backendServices.patch", + // "description": "Creates a persistent disk in the specified project using the data in the request. You can create a disk with a sourceImage, a sourceSnapshot, or create an empty 500 GB data disk by omitting all properties. You can also create a disk that is larger than the default size by specifying the sizeGb property.", + // "httpMethod": "POST", + // "id": "compute.disks.insert", // "parameterOrder": [ // "project", - // "backendService" + // "zone" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to patch.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -35778,11 +39942,23 @@ func (c *BackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "sourceImage": { + // "description": "Optional. Source image to restore onto a disk.", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/backendServices/{backendService}", + // "path": "{project}/zones/{zone}/disks", // "request": { - // "$ref": "BackendService" + // "$ref": "Disk" // }, // "response": { // "$ref": "Operation" @@ -35795,108 +39971,160 @@ func (c *BackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.backendServices.setSecurityPolicy": +// method id "compute.disks.list": -type BackendServicesSetSecurityPolicyCall struct { - s *Service - project string - backendService string - securitypolicyreference *SecurityPolicyReference - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type DisksListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetSecurityPolicy: Sets the security policy for the specified backend -// service. -func (r *BackendServicesService) SetSecurityPolicy(project string, backendService string, securitypolicyreference *SecurityPolicyReference) *BackendServicesSetSecurityPolicyCall { - c := &BackendServicesSetSecurityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of persistent disks contained within the +// specified zone. +// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/list +func (r *DisksService) List(project string, zone string) *DisksListCall { + c := &DisksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendService = backendService - c.securitypolicyreference = securitypolicyreference + c.zone = zone return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendServicesSetSecurityPolicyCall) RequestId(requestId string) *BackendServicesSetSecurityPolicyCall { - c.urlParams_.Set("requestId", requestId) +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *DisksListCall) Filter(filter string) *DisksListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *DisksListCall) MaxResults(maxResults int64) *DisksListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *DisksListCall) OrderBy(orderBy string) *DisksListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *DisksListCall) PageToken(pageToken string) *DisksListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesSetSecurityPolicyCall) Fields(s ...googleapi.Field) *BackendServicesSetSecurityPolicyCall { +func (c *DisksListCall) Fields(s ...googleapi.Field) *DisksListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DisksListCall) IfNoneMatch(entityTag string) *DisksListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesSetSecurityPolicyCall) Context(ctx context.Context) *BackendServicesSetSecurityPolicyCall { +func (c *DisksListCall) Context(ctx context.Context) *DisksListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesSetSecurityPolicyCall) Header() http.Header { +func (c *DisksListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesSetSecurityPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicyreference) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/setSecurityPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendService": c.backendService, + "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.setSecurityPolicy" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// Do executes the "compute.disks.list" call. +// Exactly one of *DiskList or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *DiskList.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendServicesSetSecurityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *DisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -35915,7 +40143,7 @@ func (c *BackendServicesSetSecurityPolicyCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &DiskList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -35927,18 +40155,35 @@ func (c *BackendServicesSetSecurityPolicyCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Sets the security policy for the specified backend service.", - // "httpMethod": "POST", - // "id": "compute.backendServices.setSecurityPolicy", + // "description": "Retrieves a list of persistent disks contained within the specified zone.", + // "httpMethod": "GET", + // "id": "compute.disks.list", // "parameterOrder": [ // "project", - // "backendService" + // "zone" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to which the security policy should be set. The name should conform to RFC1035.", - // "location": "path", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -35948,49 +40193,69 @@ func (c *BackendServicesSetSecurityPolicyCall) Do(opts ...googleapi.CallOption) // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/backendServices/{backendService}/setSecurityPolicy", - // "request": { - // "$ref": "SecurityPolicyReference" - // }, + // "path": "{project}/zones/{zone}/disks", // "response": { - // "$ref": "Operation" + // "$ref": "DiskList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.backendServices.update": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *DisksListCall) Pages(ctx context.Context, f func(*DiskList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type BackendServicesUpdateCall struct { - s *Service - project string - backendService string - backendservice *BackendService - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.disks.resize": + +type DisksResizeCall struct { + s *Service + project string + zone string + disk string + disksresizerequest *DisksResizeRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Update: Updates the specified BackendService resource with the data -// included in the request. There are several restrictions and -// guidelines to keep in mind when updating a backend service. Read -// Restrictions and Guidelines for more information. -// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/update -func (r *BackendServicesService) Update(project string, backendService string, backendservice *BackendService) *BackendServicesUpdateCall { - c := &BackendServicesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Resize: Resizes the specified persistent disk. You can only increase +// the size of the disk. +func (r *DisksService) Resize(project string, zone string, disk string, disksresizerequest *DisksResizeRequest) *DisksResizeCall { + c := &DisksResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendService = backendService - c.backendservice = backendservice + c.zone = zone + c.disk = disk + c.disksresizerequest = disksresizerequest return c } @@ -36008,7 +40273,7 @@ func (r *BackendServicesService) Update(project string, backendService string, b // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendServicesUpdateCall) RequestId(requestId string) *BackendServicesUpdateCall { +func (c *DisksResizeCall) RequestId(requestId string) *DisksResizeCall { c.urlParams_.Set("requestId", requestId) return c } @@ -36016,7 +40281,7 @@ func (c *BackendServicesUpdateCall) RequestId(requestId string) *BackendServices // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesUpdateCall) Fields(s ...googleapi.Field) *BackendServicesUpdateCall { +func (c *DisksResizeCall) Fields(s ...googleapi.Field) *DisksResizeCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -36024,56 +40289,57 @@ func (c *BackendServicesUpdateCall) Fields(s ...googleapi.Field) *BackendService // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesUpdateCall) Context(ctx context.Context) *BackendServicesUpdateCall { +func (c *DisksResizeCall) Context(ctx context.Context) *DisksResizeCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesUpdateCall) Header() http.Header { +func (c *DisksResizeCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksResizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.disksresizerequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/resize") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendService": c.backendService, + "project": c.project, + "zone": c.zone, + "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.update" call. +// Do executes the "compute.disks.resize" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *DisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -36104,18 +40370,19 @@ func (c *BackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Updates the specified BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information.", - // "httpMethod": "PUT", - // "id": "compute.backendServices.update", + // "description": "Resizes the specified persistent disk. You can only increase the size of the disk.", + // "httpMethod": "POST", + // "id": "compute.disks.resize", // "parameterOrder": [ // "project", - // "backendService" + // "zone", + // "disk" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to update.", + // "disk": { + // "description": "The name of the persistent disk.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -36130,11 +40397,18 @@ func (c *BackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/backendServices/{backendService}", + // "path": "{project}/zones/{zone}/disks/{disk}/resize", // "request": { - // "$ref": "BackendService" + // "$ref": "DisksResizeRequest" // }, // "response": { // "$ref": "Operation" @@ -36147,156 +40421,92 @@ func (c *BackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.diskTypes.aggregatedList": +// method id "compute.disks.setIamPolicy": -type DiskTypesAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type DisksSetIamPolicyCall struct { + s *Service + project string + zone string + resource string + zonesetpolicyrequest *ZoneSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AggregatedList: Retrieves an aggregated list of disk types. -// For details, see https://cloud.google.com/compute/docs/reference/latest/diskTypes/aggregatedList -func (r *DiskTypesService) AggregatedList(project string) *DiskTypesAggregatedListCall { - c := &DiskTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +func (r *DisksService) SetIamPolicy(project string, zone string, resource string, zonesetpolicyrequest *ZoneSetPolicyRequest) *DisksSetIamPolicyCall { + c := &DisksSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *DiskTypesAggregatedListCall) Filter(filter string) *DiskTypesAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *DiskTypesAggregatedListCall) MaxResults(maxResults int64) *DiskTypesAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *DiskTypesAggregatedListCall) OrderBy(orderBy string) *DiskTypesAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *DiskTypesAggregatedListCall) PageToken(pageToken string) *DiskTypesAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) + c.zone = zone + c.resource = resource + c.zonesetpolicyrequest = zonesetpolicyrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DiskTypesAggregatedListCall) Fields(s ...googleapi.Field) *DiskTypesAggregatedListCall { +func (c *DisksSetIamPolicyCall) Fields(s ...googleapi.Field) *DisksSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *DiskTypesAggregatedListCall) IfNoneMatch(entityTag string) *DiskTypesAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DiskTypesAggregatedListCall) Context(ctx context.Context) *DiskTypesAggregatedListCall { +func (c *DisksSetIamPolicyCall) Context(ctx context.Context) *DisksSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DiskTypesAggregatedListCall) Header() http.Header { +func (c *DisksSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DiskTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.zonesetpolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/diskTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.diskTypes.aggregatedList" call. -// Exactly one of *DiskTypeAggregatedList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *DiskTypeAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTypeAggregatedList, error) { +// Do executes the "compute.disks.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *DisksSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -36315,7 +40525,7 @@ func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTyp if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &DiskTypeAggregatedList{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -36327,152 +40537,137 @@ func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTyp } return ret, nil // { - // "description": "Retrieves an aggregated list of disk types.", - // "httpMethod": "GET", - // "id": "compute.diskTypes.aggregatedList", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "httpMethod": "POST", + // "id": "compute.disks.setIamPolicy", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "resource" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/aggregated/diskTypes", + // "path": "{project}/zones/{zone}/disks/{resource}/setIamPolicy", + // "request": { + // "$ref": "ZoneSetPolicyRequest" + // }, // "response": { - // "$ref": "DiskTypeAggregatedList" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *DiskTypesAggregatedListCall) Pages(ctx context.Context, f func(*DiskTypeAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.diskTypes.get": +// method id "compute.disks.setLabels": -type DiskTypesGetCall struct { - s *Service - project string - zone string - diskType string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type DisksSetLabelsCall struct { + s *Service + project string + zone string + resource string + zonesetlabelsrequest *ZoneSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified disk type. Gets a list of available disk -// types by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/diskTypes/get -func (r *DiskTypesService) Get(project string, zone string, diskType string) *DiskTypesGetCall { - c := &DiskTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetLabels: Sets the labels on a disk. To learn more about labels, +// read the Labeling Resources documentation. +func (r *DisksService) SetLabels(project string, zone string, resource string, zonesetlabelsrequest *ZoneSetLabelsRequest) *DisksSetLabelsCall { + c := &DisksSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.diskType = diskType + c.resource = resource + c.zonesetlabelsrequest = zonesetlabelsrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *DisksSetLabelsCall) RequestId(requestId string) *DisksSetLabelsCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DiskTypesGetCall) Fields(s ...googleapi.Field) *DiskTypesGetCall { +func (c *DisksSetLabelsCall) Fields(s ...googleapi.Field) *DisksSetLabelsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *DiskTypesGetCall) IfNoneMatch(entityTag string) *DiskTypesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DiskTypesGetCall) Context(ctx context.Context) *DiskTypesGetCall { +func (c *DisksSetLabelsCall) Context(ctx context.Context) *DisksSetLabelsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DiskTypesGetCall) Header() http.Header { +func (c *DisksSetLabelsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DiskTypesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.zonesetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/diskTypes/{diskType}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -36480,19 +40675,19 @@ func (c *DiskTypesGetCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, - "diskType": c.diskType, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.diskTypes.get" call. -// Exactly one of *DiskType or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *DiskType.ServerResponse.Header or (if a response was returned at +// Do executes the "compute.disks.setLabels" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *DiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { +func (c *DisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -36511,7 +40706,7 @@ func (c *DiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &DiskType{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -36523,26 +40718,31 @@ func (c *DiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { } return ret, nil // { - // "description": "Returns the specified disk type. Gets a list of available disk types by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.diskTypes.get", + // "description": "Sets the labels on a disk. To learn more about labels, read the Labeling Resources documentation.", + // "httpMethod": "POST", + // "id": "compute.disks.setLabels", // "parameterOrder": [ // "project", // "zone", - // "diskType" + // "resource" // ], // "parameters": { - // "diskType": { - // "description": "Name of the disk type to return.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -36554,173 +40754,107 @@ func (c *DiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/diskTypes/{diskType}", + // "path": "{project}/zones/{zone}/disks/{resource}/setLabels", + // "request": { + // "$ref": "ZoneSetLabelsRequest" + // }, // "response": { - // "$ref": "DiskType" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.diskTypes.list": +// method id "compute.disks.testIamPermissions": -type DiskTypesListCall struct { - s *Service - project string - zone string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type DisksTestIamPermissionsCall struct { + s *Service + project string + zone string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves a list of disk types available to the specified -// project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/diskTypes/list -func (r *DiskTypesService) List(project string, zone string) *DiskTypesListCall { - c := &DiskTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *DisksService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *DisksTestIamPermissionsCall { + c := &DisksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *DiskTypesListCall) Filter(filter string) *DiskTypesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *DiskTypesListCall) MaxResults(maxResults int64) *DiskTypesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *DiskTypesListCall) OrderBy(orderBy string) *DiskTypesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *DiskTypesListCall) PageToken(pageToken string) *DiskTypesListCall { - c.urlParams_.Set("pageToken", pageToken) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DiskTypesListCall) Fields(s ...googleapi.Field) *DiskTypesListCall { +func (c *DisksTestIamPermissionsCall) Fields(s ...googleapi.Field) *DisksTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *DiskTypesListCall) IfNoneMatch(entityTag string) *DiskTypesListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DiskTypesListCall) Context(ctx context.Context) *DiskTypesListCall { +func (c *DisksTestIamPermissionsCall) Context(ctx context.Context) *DisksTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DiskTypesListCall) Header() http.Header { +func (c *DisksTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DiskTypesListCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/diskTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "zone": c.zone, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.diskTypes.list" call. -// Exactly one of *DiskTypeList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *DiskTypeList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, error) { +// Do executes the "compute.disks.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -36739,7 +40873,7 @@ func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, err if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &DiskTypeList{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -36751,37 +40885,15 @@ func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, err } return ret, nil // { - // "description": "Retrieves a list of disk types available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.diskTypes.list", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.disks.testIamPermissions", // "parameterOrder": [ // "project", - // "zone" + // "zone", + // "resource" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -36789,6 +40901,13 @@ func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, err // "required": true, // "type": "string" // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -36797,9 +40916,12 @@ func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, err // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/diskTypes", + // "path": "{project}/zones/{zone}/disks/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "DiskTypeList" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -36810,177 +40932,101 @@ func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, err } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *DiskTypesListCall) Pages(ctx context.Context, f func(*DiskTypeList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.disks.aggregatedList": - -type DisksAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// AggregatedList: Retrieves an aggregated list of persistent disks. -// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/aggregatedList -func (r *DisksService) AggregatedList(project string) *DisksAggregatedListCall { - c := &DisksAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} +// method id "compute.firewalls.delete": -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *DisksAggregatedListCall) Filter(filter string) *DisksAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c +type FirewallsDeleteCall struct { + s *Service + project string + firewall string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *DisksAggregatedListCall) MaxResults(maxResults int64) *DisksAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) +// Delete: Deletes the specified firewall. +// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/delete +func (r *FirewallsService) Delete(project string, firewall string) *FirewallsDeleteCall { + c := &FirewallsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.firewall = firewall return c } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *DisksAggregatedListCall) OrderBy(orderBy string) *DisksAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *DisksAggregatedListCall) PageToken(pageToken string) *DisksAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *FirewallsDeleteCall) RequestId(requestId string) *FirewallsDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksAggregatedListCall) Fields(s ...googleapi.Field) *DisksAggregatedListCall { +func (c *FirewallsDeleteCall) Fields(s ...googleapi.Field) *FirewallsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *DisksAggregatedListCall) IfNoneMatch(entityTag string) *DisksAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksAggregatedListCall) Context(ctx context.Context) *DisksAggregatedListCall { +func (c *FirewallsDeleteCall) Context(ctx context.Context) *FirewallsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksAggregatedListCall) Header() http.Header { +func (c *FirewallsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *FirewallsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/disks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "firewall": c.firewall, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.aggregatedList" call. -// Exactly one of *DiskAggregatedList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *DiskAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggregatedList, error) { +// Do executes the "compute.firewalls.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *FirewallsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -36999,7 +41045,7 @@ func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggrega if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &DiskAggregatedList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -37011,34 +41057,19 @@ func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggrega } return ret, nil // { - // "description": "Retrieves an aggregated list of persistent disks.", - // "httpMethod": "GET", - // "id": "compute.disks.aggregatedList", + // "description": "Deletes the specified firewall.", + // "httpMethod": "DELETE", + // "id": "compute.firewalls.delete", // "parameterOrder": [ - // "project" + // "project", + // "firewall" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "firewall": { + // "description": "Name of the firewall rule to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "project": { @@ -37047,153 +41078,115 @@ func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggrega // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/aggregated/disks", + // "path": "{project}/global/firewalls/{firewall}", // "response": { - // "$ref": "DiskAggregatedList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *DisksAggregatedListCall) Pages(ctx context.Context, f func(*DiskAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.disks.createSnapshot": +// method id "compute.firewalls.get": -type DisksCreateSnapshotCall struct { - s *Service - project string - zone string - disk string - snapshot *Snapshot - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type FirewallsGetCall struct { + s *Service + project string + firewall string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// CreateSnapshot: Creates a snapshot of a specified persistent disk. -// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/createSnapshot -func (r *DisksService) CreateSnapshot(project string, zone string, disk string, snapshot *Snapshot) *DisksCreateSnapshotCall { - c := &DisksCreateSnapshotCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified firewall. +// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/get +func (r *FirewallsService) Get(project string, firewall string) *FirewallsGetCall { + c := &FirewallsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.disk = disk - c.snapshot = snapshot - return c -} - -// GuestFlush sets the optional parameter "guestFlush": -func (c *DisksCreateSnapshotCall) GuestFlush(guestFlush bool) *DisksCreateSnapshotCall { - c.urlParams_.Set("guestFlush", fmt.Sprint(guestFlush)) - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *DisksCreateSnapshotCall) RequestId(requestId string) *DisksCreateSnapshotCall { - c.urlParams_.Set("requestId", requestId) + c.firewall = firewall return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksCreateSnapshotCall) Fields(s ...googleapi.Field) *DisksCreateSnapshotCall { +func (c *FirewallsGetCall) Fields(s ...googleapi.Field) *FirewallsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *FirewallsGetCall) IfNoneMatch(entityTag string) *FirewallsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksCreateSnapshotCall) Context(ctx context.Context) *DisksCreateSnapshotCall { +func (c *FirewallsGetCall) Context(ctx context.Context) *FirewallsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksCreateSnapshotCall) Header() http.Header { +func (c *FirewallsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksCreateSnapshotCall) doRequest(alt string) (*http.Response, error) { +func (c *FirewallsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.snapshot) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/createSnapshot") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "disk": c.disk, + "project": c.project, + "firewall": c.firewall, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.createSnapshot" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// Do executes the "compute.firewalls.get" call. +// Exactly one of *Firewall or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Firewall.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *DisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *FirewallsGetCall) Do(opts ...googleapi.CallOption) (*Firewall, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -37212,7 +41205,7 @@ func (c *DisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Firewall{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -37224,83 +41217,60 @@ func (c *DisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Creates a snapshot of a specified persistent disk.", - // "httpMethod": "POST", - // "id": "compute.disks.createSnapshot", + // "description": "Returns the specified firewall.", + // "httpMethod": "GET", + // "id": "compute.firewalls.get", // "parameterOrder": [ // "project", - // "zone", - // "disk" + // "firewall" // ], // "parameters": { - // "disk": { - // "description": "Name of the persistent disk to snapshot.", + // "firewall": { + // "description": "Name of the firewall rule to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "guestFlush": { - // "location": "query", - // "type": "boolean" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks/{disk}/createSnapshot", - // "request": { - // "$ref": "Snapshot" - // }, + // "path": "{project}/global/firewalls/{firewall}", // "response": { - // "$ref": "Operation" + // "$ref": "Firewall" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.disks.delete": +// method id "compute.firewalls.insert": -type DisksDeleteCall struct { +type FirewallsInsertCall struct { s *Service project string - zone string - disk string + firewall *Firewall urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Delete: Deletes the specified persistent disk. Deleting a disk -// removes its data permanently and is irreversible. However, deleting a -// disk does not delete any snapshots previously made from the disk. You -// must separately delete snapshots. -// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/delete -func (r *DisksService) Delete(project string, zone string, disk string) *DisksDeleteCall { - c := &DisksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a firewall rule in the specified project using the +// data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/insert +func (r *FirewallsService) Insert(project string, firewall *Firewall) *FirewallsInsertCall { + c := &FirewallsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.disk = disk + c.firewall = firewall return c } @@ -37318,7 +41288,7 @@ func (r *DisksService) Delete(project string, zone string, disk string) *DisksDe // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *DisksDeleteCall) RequestId(requestId string) *DisksDeleteCall { +func (c *FirewallsInsertCall) RequestId(requestId string) *FirewallsInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -37326,7 +41296,7 @@ func (c *DisksDeleteCall) RequestId(requestId string) *DisksDeleteCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksDeleteCall) Fields(s ...googleapi.Field) *DisksDeleteCall { +func (c *FirewallsInsertCall) Fields(s ...googleapi.Field) *FirewallsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -37334,52 +41304,55 @@ func (c *DisksDeleteCall) Fields(s ...googleapi.Field) *DisksDeleteCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksDeleteCall) Context(ctx context.Context) *DisksDeleteCall { +func (c *FirewallsInsertCall) Context(ctx context.Context) *FirewallsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksDeleteCall) Header() http.Header { +func (c *FirewallsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *FirewallsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, - "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.delete" call. +// Do executes the "compute.firewalls.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *DisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *FirewallsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -37410,21 +41383,13 @@ func (c *DisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { } return ret, nil // { - // "description": "Deletes the specified persistent disk. Deleting a disk removes its data permanently and is irreversible. However, deleting a disk does not delete any snapshots previously made from the disk. You must separately delete snapshots.", - // "httpMethod": "DELETE", - // "id": "compute.disks.delete", + // "description": "Creates a firewall rule in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.firewalls.insert", // "parameterOrder": [ - // "project", - // "zone", - // "disk" + // "project" // ], // "parameters": { - // "disk": { - // "description": "Name of the persistent disk to delete.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -37436,16 +41401,12 @@ func (c *DisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks/{disk}", + // "path": "{project}/global/firewalls", + // "request": { + // "$ref": "Firewall" + // }, // "response": { // "$ref": "Operation" // }, @@ -37457,34 +41418,93 @@ func (c *DisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { } -// method id "compute.disks.get": +// method id "compute.firewalls.list": -type DisksGetCall struct { +type FirewallsListCall struct { s *Service project string - zone string - disk string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Returns a specified persistent disk. Gets a list of available -// persistent disks by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/get -func (r *DisksService) Get(project string, zone string, disk string) *DisksGetCall { - c := &DisksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of firewall rules available to the specified +// project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/list +func (r *FirewallsService) List(project string) *FirewallsListCall { + c := &FirewallsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.disk = disk + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *FirewallsListCall) Filter(filter string) *FirewallsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *FirewallsListCall) MaxResults(maxResults int64) *FirewallsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *FirewallsListCall) OrderBy(orderBy string) *FirewallsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *FirewallsListCall) PageToken(pageToken string) *FirewallsListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksGetCall) Fields(s ...googleapi.Field) *DisksGetCall { +func (c *FirewallsListCall) Fields(s ...googleapi.Field) *FirewallsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -37494,7 +41514,7 @@ func (c *DisksGetCall) Fields(s ...googleapi.Field) *DisksGetCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *DisksGetCall) IfNoneMatch(entityTag string) *DisksGetCall { +func (c *FirewallsListCall) IfNoneMatch(entityTag string) *FirewallsListCall { c.ifNoneMatch_ = entityTag return c } @@ -37502,21 +41522,21 @@ func (c *DisksGetCall) IfNoneMatch(entityTag string) *DisksGetCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksGetCall) Context(ctx context.Context) *DisksGetCall { +func (c *FirewallsListCall) Context(ctx context.Context) *FirewallsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksGetCall) Header() http.Header { +func (c *FirewallsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksGetCall) doRequest(alt string) (*http.Response, error) { +func (c *FirewallsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -37528,7 +41548,7 @@ func (c *DisksGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -37537,20 +41557,18 @@ func (c *DisksGetCall) doRequest(alt string) (*http.Response, error) { req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, - "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.get" call. -// Exactly one of *Disk or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Disk.ServerResponse.Header or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *DisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { +// Do executes the "compute.firewalls.list" call. +// Exactly one of *FirewallList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *FirewallList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *FirewallsListCall) Do(opts ...googleapi.CallOption) (*FirewallList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -37569,7 +41587,7 @@ func (c *DisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Disk{ + ret := &FirewallList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -37581,20 +41599,34 @@ func (c *DisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { } return ret, nil // { - // "description": "Returns a specified persistent disk. Gets a list of available persistent disks by making a list() request.", + // "description": "Retrieves the list of firewall rules available to the specified project.", // "httpMethod": "GET", - // "id": "compute.disks.get", + // "id": "compute.firewalls.list", // "parameterOrder": [ - // "project", - // "zone", - // "disk" + // "project" // ], // "parameters": { - // "disk": { - // "description": "Name of the persistent disk to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -37603,18 +41635,11 @@ func (c *DisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks/{disk}", + // "path": "{project}/global/firewalls", // "response": { - // "$ref": "Disk" + // "$ref": "FirewallList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -37625,99 +41650,131 @@ func (c *DisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { } -// method id "compute.disks.getIamPolicy": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *FirewallsListCall) Pages(ctx context.Context, f func(*FirewallList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type DisksGetIamPolicyCall struct { - s *Service - project string - zone string - resource string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +// method id "compute.firewalls.patch": + +type FirewallsPatchCall struct { + s *Service + project string + firewall string + firewall2 *Firewall + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. -func (r *DisksService) GetIamPolicy(project string, zone string, resource string) *DisksGetIamPolicyCall { - c := &DisksGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates the specified firewall rule with the data included in +// the request. This method supports PATCH semantics and uses the JSON +// merge patch format and processing rules. +// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/patch +func (r *FirewallsService) Patch(project string, firewall string, firewall2 *Firewall) *FirewallsPatchCall { + c := &FirewallsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.resource = resource + c.firewall = firewall + c.firewall2 = firewall2 + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *FirewallsPatchCall) RequestId(requestId string) *FirewallsPatchCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksGetIamPolicyCall) Fields(s ...googleapi.Field) *DisksGetIamPolicyCall { +func (c *FirewallsPatchCall) Fields(s ...googleapi.Field) *FirewallsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *DisksGetIamPolicyCall) IfNoneMatch(entityTag string) *DisksGetIamPolicyCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksGetIamPolicyCall) Context(ctx context.Context) *DisksGetIamPolicyCall { +func (c *FirewallsPatchCall) Context(ctx context.Context) *FirewallsPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksGetIamPolicyCall) Header() http.Header { +func (c *FirewallsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *FirewallsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{resource}/getIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, - "resource": c.resource, + "firewall": c.firewall, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *DisksGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.firewalls.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *FirewallsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -37736,7 +41793,7 @@ func (c *DisksGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -37748,73 +41805,71 @@ func (c *DisksGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", - // "httpMethod": "GET", - // "id": "compute.disks.getIamPolicy", + // "description": "Updates the specified firewall rule with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.firewalls.patch", // "parameterOrder": [ // "project", - // "zone", - // "resource" + // "firewall" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "firewall": { + // "description": "Name of the firewall rule to patch.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks/{resource}/getIamPolicy", + // "path": "{project}/global/firewalls/{firewall}", + // "request": { + // "$ref": "Firewall" + // }, // "response": { - // "$ref": "Policy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.disks.insert": +// method id "compute.firewalls.update": -type DisksInsertCall struct { +type FirewallsUpdateCall struct { s *Service project string - zone string - disk *Disk + firewall string + firewall2 *Firewall urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Insert: Creates a persistent disk in the specified project using the -// data in the request. You can create a disk with a sourceImage, a -// sourceSnapshot, or create an empty 500 GB data disk by omitting all -// properties. You can also create a disk that is larger than the -// default size by specifying the sizeGb property. -// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/insert -func (r *DisksService) Insert(project string, zone string, disk *Disk) *DisksInsertCall { - c := &DisksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates the specified firewall rule with the data included in +// the request. The PUT method can only update the following fields of +// firewall rule: allowed, description, sourceRanges, sourceTags, +// targetTags. +// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/update +func (r *FirewallsService) Update(project string, firewall string, firewall2 *Firewall) *FirewallsUpdateCall { + c := &FirewallsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.disk = disk + c.firewall = firewall + c.firewall2 = firewall2 return c } @@ -37832,22 +41887,15 @@ func (r *DisksService) Insert(project string, zone string, disk *Disk) *DisksIns // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *DisksInsertCall) RequestId(requestId string) *DisksInsertCall { +func (c *FirewallsUpdateCall) RequestId(requestId string) *FirewallsUpdateCall { c.urlParams_.Set("requestId", requestId) return c } -// SourceImage sets the optional parameter "sourceImage": Source image -// to restore onto a disk. -func (c *DisksInsertCall) SourceImage(sourceImage string) *DisksInsertCall { - c.urlParams_.Set("sourceImage", sourceImage) - return c -} - // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksInsertCall) Fields(s ...googleapi.Field) *DisksInsertCall { +func (c *FirewallsUpdateCall) Fields(s ...googleapi.Field) *FirewallsUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -37855,56 +41903,56 @@ func (c *DisksInsertCall) Fields(s ...googleapi.Field) *DisksInsertCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksInsertCall) Context(ctx context.Context) *DisksInsertCall { +func (c *FirewallsUpdateCall) Context(ctx context.Context) *FirewallsUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksInsertCall) Header() http.Header { +func (c *FirewallsUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *FirewallsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.disk) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall2) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "firewall": c.firewall, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.insert" call. +// Do executes the "compute.firewalls.update" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *DisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *FirewallsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -37935,14 +41983,21 @@ func (c *DisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { } return ret, nil // { - // "description": "Creates a persistent disk in the specified project using the data in the request. You can create a disk with a sourceImage, a sourceSnapshot, or create an empty 500 GB data disk by omitting all properties. You can also create a disk that is larger than the default size by specifying the sizeGb property.", - // "httpMethod": "POST", - // "id": "compute.disks.insert", + // "description": "Updates the specified firewall rule with the data included in the request. The PUT method can only update the following fields of firewall rule: allowed, description, sourceRanges, sourceTags, targetTags.", + // "httpMethod": "PUT", + // "id": "compute.firewalls.update", // "parameterOrder": [ // "project", - // "zone" + // "firewall" // ], // "parameters": { + // "firewall": { + // "description": "Name of the firewall rule to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -37954,23 +42009,11 @@ func (c *DisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "sourceImage": { - // "description": "Optional. Source image to restore onto a disk.", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks", + // "path": "{project}/global/firewalls/{firewall}", // "request": { - // "$ref": "Disk" + // "$ref": "Firewall" // }, // "response": { // "$ref": "Operation" @@ -37983,25 +42026,22 @@ func (c *DisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { } -// method id "compute.disks.list": +// method id "compute.forwardingRules.aggregatedList": -type DisksListCall struct { +type ForwardingRulesAggregatedListCall struct { s *Service project string - zone string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves a list of persistent disks contained within the -// specified zone. -// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/list -func (r *DisksService) List(project string, zone string) *DisksListCall { - c := &DisksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of forwarding rules. +// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/aggregatedList +func (r *ForwardingRulesService) AggregatedList(project string) *ForwardingRulesAggregatedListCall { + c := &ForwardingRulesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone return c } @@ -38027,7 +42067,7 @@ func (r *DisksService) List(project string, zone string) *DisksListCall { // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *DisksListCall) Filter(filter string) *DisksListCall { +func (c *ForwardingRulesAggregatedListCall) Filter(filter string) *ForwardingRulesAggregatedListCall { c.urlParams_.Set("filter", filter) return c } @@ -38038,7 +42078,7 @@ func (c *DisksListCall) Filter(filter string) *DisksListCall { // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *DisksListCall) MaxResults(maxResults int64) *DisksListCall { +func (c *ForwardingRulesAggregatedListCall) MaxResults(maxResults int64) *ForwardingRulesAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -38055,7 +42095,7 @@ func (c *DisksListCall) MaxResults(maxResults int64) *DisksListCall { // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *DisksListCall) OrderBy(orderBy string) *DisksListCall { +func (c *ForwardingRulesAggregatedListCall) OrderBy(orderBy string) *ForwardingRulesAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -38063,7 +42103,7 @@ func (c *DisksListCall) OrderBy(orderBy string) *DisksListCall { // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *DisksListCall) PageToken(pageToken string) *DisksListCall { +func (c *ForwardingRulesAggregatedListCall) PageToken(pageToken string) *ForwardingRulesAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -38071,7 +42111,7 @@ func (c *DisksListCall) PageToken(pageToken string) *DisksListCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksListCall) Fields(s ...googleapi.Field) *DisksListCall { +func (c *ForwardingRulesAggregatedListCall) Fields(s ...googleapi.Field) *ForwardingRulesAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -38081,7 +42121,7 @@ func (c *DisksListCall) Fields(s ...googleapi.Field) *DisksListCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *DisksListCall) IfNoneMatch(entityTag string) *DisksListCall { +func (c *ForwardingRulesAggregatedListCall) IfNoneMatch(entityTag string) *ForwardingRulesAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -38089,21 +42129,21 @@ func (c *DisksListCall) IfNoneMatch(entityTag string) *DisksListCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksListCall) Context(ctx context.Context) *DisksListCall { +func (c *ForwardingRulesAggregatedListCall) Context(ctx context.Context) *ForwardingRulesAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksListCall) Header() http.Header { +func (c *ForwardingRulesAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksListCall) doRequest(alt string) (*http.Response, error) { +func (c *ForwardingRulesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -38115,7 +42155,7 @@ func (c *DisksListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/forwardingRules") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -38124,19 +42164,18 @@ func (c *DisksListCall) doRequest(alt string) (*http.Response, error) { req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.list" call. -// Exactly one of *DiskList or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *DiskList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *DisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { +// Do executes the "compute.forwardingRules.aggregatedList" call. +// Exactly one of *ForwardingRuleAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *ForwardingRuleAggregatedList.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ForwardingRulesAggregatedListCall) Do(opts ...googleapi.CallOption) (*ForwardingRuleAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -38155,7 +42194,7 @@ func (c *DisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &DiskList{ + ret := &ForwardingRuleAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -38167,12 +42206,11 @@ func (c *DisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { } return ret, nil // { - // "description": "Retrieves a list of persistent disks contained within the specified zone.", + // "description": "Retrieves an aggregated list of forwarding rules.", // "httpMethod": "GET", - // "id": "compute.disks.list", + // "id": "compute.forwardingRules.aggregatedList", // "parameterOrder": [ - // "project", - // "zone" + // "project" // ], // "parameters": { // "filter": { @@ -38204,18 +42242,11 @@ func (c *DisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks", + // "path": "{project}/aggregated/forwardingRules", // "response": { - // "$ref": "DiskList" + // "$ref": "ForwardingRuleAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -38229,7 +42260,7 @@ func (c *DisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *DisksListCall) Pages(ctx context.Context, f func(*DiskList) error) error { +func (c *ForwardingRulesAggregatedListCall) Pages(ctx context.Context, f func(*ForwardingRuleAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -38247,111 +42278,275 @@ func (c *DisksListCall) Pages(ctx context.Context, f func(*DiskList) error) erro } } -// method id "compute.disks.resize": +// method id "compute.forwardingRules.delete": + +type ForwardingRulesDeleteCall struct { + s *Service + project string + region string + forwardingRule string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified ForwardingRule resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/delete +func (r *ForwardingRulesService) Delete(project string, region string, forwardingRule string) *ForwardingRulesDeleteCall { + c := &ForwardingRulesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.forwardingRule = forwardingRule + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ForwardingRulesDeleteCall) RequestId(requestId string) *ForwardingRulesDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ForwardingRulesDeleteCall) Fields(s ...googleapi.Field) *ForwardingRulesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ForwardingRulesDeleteCall) Context(ctx context.Context) *ForwardingRulesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ForwardingRulesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "forwardingRule": c.forwardingRule, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.forwardingRules.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified ForwardingRule resource.", + // "httpMethod": "DELETE", + // "id": "compute.forwardingRules.delete", + // "parameterOrder": [ + // "project", + // "region", + // "forwardingRule" + // ], + // "parameters": { + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.forwardingRules.get": -type DisksResizeCall struct { - s *Service - project string - zone string - disk string - disksresizerequest *DisksResizeRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ForwardingRulesGetCall struct { + s *Service + project string + region string + forwardingRule string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Resize: Resizes the specified persistent disk. You can only increase -// the size of the disk. -func (r *DisksService) Resize(project string, zone string, disk string, disksresizerequest *DisksResizeRequest) *DisksResizeCall { - c := &DisksResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified ForwardingRule resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/get +func (r *ForwardingRulesService) Get(project string, region string, forwardingRule string) *ForwardingRulesGetCall { + c := &ForwardingRulesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.disk = disk - c.disksresizerequest = disksresizerequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *DisksResizeCall) RequestId(requestId string) *DisksResizeCall { - c.urlParams_.Set("requestId", requestId) + c.region = region + c.forwardingRule = forwardingRule return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksResizeCall) Fields(s ...googleapi.Field) *DisksResizeCall { +func (c *ForwardingRulesGetCall) Fields(s ...googleapi.Field) *ForwardingRulesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ForwardingRulesGetCall) IfNoneMatch(entityTag string) *ForwardingRulesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksResizeCall) Context(ctx context.Context) *DisksResizeCall { +func (c *ForwardingRulesGetCall) Context(ctx context.Context) *ForwardingRulesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksResizeCall) Header() http.Header { +func (c *ForwardingRulesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksResizeCall) doRequest(alt string) (*http.Response, error) { +func (c *ForwardingRulesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.disksresizerequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/resize") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "disk": c.disk, + "project": c.project, + "region": c.region, + "forwardingRule": c.forwardingRule, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.resize" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.forwardingRules.get" call. +// Exactly one of *ForwardingRule or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *DisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// *ForwardingRule.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRule, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -38370,7 +42565,7 @@ func (c *DisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &ForwardingRule{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -38382,17 +42577,17 @@ func (c *DisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { } return ret, nil // { - // "description": "Resizes the specified persistent disk. You can only increase the size of the disk.", - // "httpMethod": "POST", - // "id": "compute.disks.resize", + // "description": "Returns the specified ForwardingRule resource.", + // "httpMethod": "GET", + // "id": "compute.forwardingRules.get", // "parameterOrder": [ // "project", - // "zone", - // "disk" + // "region", + // "forwardingRule" // ], // "parameters": { - // "disk": { - // "description": "The name of the persistent disk.", + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -38405,62 +42600,73 @@ func (c *DisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks/{disk}/resize", - // "request": { - // "$ref": "DisksResizeRequest" - // }, + // "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}", // "response": { - // "$ref": "Operation" + // "$ref": "ForwardingRule" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.disks.setIamPolicy": +// method id "compute.forwardingRules.insert": -type DisksSetIamPolicyCall struct { - s *Service - project string - zone string - resource string - zonesetpolicyrequest *ZoneSetPolicyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ForwardingRulesInsertCall struct { + s *Service + project string + region string + forwardingrule *ForwardingRule + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. -func (r *DisksService) SetIamPolicy(project string, zone string, resource string, zonesetpolicyrequest *ZoneSetPolicyRequest) *DisksSetIamPolicyCall { - c := &DisksSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a ForwardingRule resource in the specified project +// and region using the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/insert +func (r *ForwardingRulesService) Insert(project string, region string, forwardingrule *ForwardingRule) *ForwardingRulesInsertCall { + c := &ForwardingRulesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.resource = resource - c.zonesetpolicyrequest = zonesetpolicyrequest + c.region = region + c.forwardingrule = forwardingrule + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ForwardingRulesInsertCall) RequestId(requestId string) *ForwardingRulesInsertCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksSetIamPolicyCall) Fields(s ...googleapi.Field) *DisksSetIamPolicyCall { +func (c *ForwardingRulesInsertCall) Fields(s ...googleapi.Field) *ForwardingRulesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -38468,35 +42674,35 @@ func (c *DisksSetIamPolicyCall) Fields(s ...googleapi.Field) *DisksSetIamPolicyC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksSetIamPolicyCall) Context(ctx context.Context) *DisksSetIamPolicyCall { +func (c *ForwardingRulesInsertCall) Context(ctx context.Context) *ForwardingRulesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksSetIamPolicyCall) Header() http.Header { +func (c *ForwardingRulesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *ForwardingRulesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.zonesetpolicyrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.forwardingrule) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{resource}/setIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -38504,21 +42710,20 @@ func (c *DisksSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *DisksSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.forwardingRules.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -38537,7 +42742,7 @@ func (c *DisksSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -38549,13 +42754,12 @@ func (c *DisksSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "description": "Creates a ForwardingRule resource in the specified project and region using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.disks.setIamPolicy", + // "id": "compute.forwardingRules.insert", // "parameterOrder": [ // "project", - // "zone", - // "resource" + // "region" // ], // "parameters": { // "project": { @@ -38565,27 +42769,25 @@ func (c *DisksSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks/{resource}/setIamPolicy", + // "path": "{project}/regions/{region}/forwardingRules", // "request": { - // "$ref": "ZoneSetPolicyRequest" + // "$ref": "ForwardingRule" // }, // "response": { - // "$ref": "Policy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -38595,111 +42797,160 @@ func (c *DisksSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error } -// method id "compute.disks.setLabels": +// method id "compute.forwardingRules.list": -type DisksSetLabelsCall struct { - s *Service - project string - zone string - resource string - zonesetlabelsrequest *ZoneSetLabelsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ForwardingRulesListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetLabels: Sets the labels on a disk. To learn more about labels, -// read the Labeling Resources documentation. -func (r *DisksService) SetLabels(project string, zone string, resource string, zonesetlabelsrequest *ZoneSetLabelsRequest) *DisksSetLabelsCall { - c := &DisksSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of ForwardingRule resources available to the +// specified project and region. +// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/list +func (r *ForwardingRulesService) List(project string, region string) *ForwardingRulesListCall { + c := &ForwardingRulesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.resource = resource - c.zonesetlabelsrequest = zonesetlabelsrequest + c.region = region return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *DisksSetLabelsCall) RequestId(requestId string) *DisksSetLabelsCall { - c.urlParams_.Set("requestId", requestId) +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *ForwardingRulesListCall) Filter(filter string) *ForwardingRulesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *ForwardingRulesListCall) MaxResults(maxResults int64) *ForwardingRulesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *ForwardingRulesListCall) OrderBy(orderBy string) *ForwardingRulesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *ForwardingRulesListCall) PageToken(pageToken string) *ForwardingRulesListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksSetLabelsCall) Fields(s ...googleapi.Field) *DisksSetLabelsCall { +func (c *ForwardingRulesListCall) Fields(s ...googleapi.Field) *ForwardingRulesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ForwardingRulesListCall) IfNoneMatch(entityTag string) *ForwardingRulesListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksSetLabelsCall) Context(ctx context.Context) *DisksSetLabelsCall { +func (c *ForwardingRulesListCall) Context(ctx context.Context) *ForwardingRulesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksSetLabelsCall) Header() http.Header { +func (c *ForwardingRulesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksSetLabelsCall) doRequest(alt string) (*http.Response, error) { +func (c *ForwardingRulesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.zonesetlabelsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{resource}/setLabels") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.setLabels" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *DisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.forwardingRules.list" call. +// Exactly one of *ForwardingRuleList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ForwardingRuleList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingRuleList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -38718,7 +42969,7 @@ func (c *DisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &ForwardingRuleList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -38730,85 +42981,134 @@ func (c *DisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Sets the labels on a disk. To learn more about labels, read the Labeling Resources documentation.", - // "httpMethod": "POST", - // "id": "compute.disks.setLabels", + // "description": "Retrieves a list of ForwardingRule resources available to the specified project and region.", + // "httpMethod": "GET", + // "id": "compute.forwardingRules.list", // "parameterOrder": [ // "project", - // "zone", - // "resource" + // "region" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", // "location": "query", // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks/{resource}/setLabels", - // "request": { - // "$ref": "ZoneSetLabelsRequest" - // }, + // "path": "{project}/regions/{region}/forwardingRules", // "response": { - // "$ref": "Operation" + // "$ref": "ForwardingRuleList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.disks.testIamPermissions": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ForwardingRulesListCall) Pages(ctx context.Context, f func(*ForwardingRuleList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type DisksTestIamPermissionsCall struct { - s *Service - project string - zone string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.forwardingRules.setTarget": + +type ForwardingRulesSetTargetCall struct { + s *Service + project string + region string + forwardingRule string + targetreference *TargetReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetTarget: Changes target URL for forwarding rule. The new target +// should be of the same type as the old target. +// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/setTarget +func (r *ForwardingRulesService) SetTarget(project string, region string, forwardingRule string, targetreference *TargetReference) *ForwardingRulesSetTargetCall { + c := &ForwardingRulesSetTargetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.forwardingRule = forwardingRule + c.targetreference = targetreference + return c } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *DisksService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *DisksTestIamPermissionsCall { - c := &DisksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ForwardingRulesSetTargetCall) RequestId(requestId string) *ForwardingRulesSetTargetCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksTestIamPermissionsCall) Fields(s ...googleapi.Field) *DisksTestIamPermissionsCall { +func (c *ForwardingRulesSetTargetCall) Fields(s ...googleapi.Field) *ForwardingRulesSetTargetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -38816,35 +43116,35 @@ func (c *DisksTestIamPermissionsCall) Fields(s ...googleapi.Field) *DisksTestIam // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksTestIamPermissionsCall) Context(ctx context.Context) *DisksTestIamPermissionsCall { +func (c *ForwardingRulesSetTargetCall) Context(ctx context.Context) *ForwardingRulesSetTargetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksTestIamPermissionsCall) Header() http.Header { +func (c *ForwardingRulesSetTargetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *ForwardingRulesSetTargetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetreference) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}/setTarget") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -38852,21 +43152,21 @@ func (c *DisksTestIamPermissionsCall) doRequest(alt string) (*http.Response, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, + "project": c.project, + "region": c.region, + "forwardingRule": c.forwardingRule, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.forwardingRules.setTarget" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -38885,7 +43185,7 @@ func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPer if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -38897,70 +43197,74 @@ func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPer } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", + // "description": "Changes target URL for forwarding rule. The new target should be of the same type as the old target.", // "httpMethod": "POST", - // "id": "compute.disks.testIamPermissions", + // "id": "compute.forwardingRules.setTarget", // "parameterOrder": [ // "project", - // "zone", - // "resource" + // "region", + // "forwardingRule" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource in which target is to be set.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks/{resource}/testIamPermissions", + // "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}/setTarget", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "TargetReference" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.firewalls.delete": +// method id "compute.globalAddresses.delete": -type FirewallsDeleteCall struct { +type GlobalAddressesDeleteCall struct { s *Service project string - firewall string + address string urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Delete: Deletes the specified firewall. -// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/delete -func (r *FirewallsService) Delete(project string, firewall string) *FirewallsDeleteCall { - c := &FirewallsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified address resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/delete +func (r *GlobalAddressesService) Delete(project string, address string) *GlobalAddressesDeleteCall { + c := &GlobalAddressesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.firewall = firewall + c.address = address return c } @@ -38978,7 +43282,7 @@ func (r *FirewallsService) Delete(project string, firewall string) *FirewallsDel // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *FirewallsDeleteCall) RequestId(requestId string) *FirewallsDeleteCall { +func (c *GlobalAddressesDeleteCall) RequestId(requestId string) *GlobalAddressesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -38986,7 +43290,7 @@ func (c *FirewallsDeleteCall) RequestId(requestId string) *FirewallsDeleteCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FirewallsDeleteCall) Fields(s ...googleapi.Field) *FirewallsDeleteCall { +func (c *GlobalAddressesDeleteCall) Fields(s ...googleapi.Field) *GlobalAddressesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -38994,21 +43298,21 @@ func (c *FirewallsDeleteCall) Fields(s ...googleapi.Field) *FirewallsDeleteCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FirewallsDeleteCall) Context(ctx context.Context) *FirewallsDeleteCall { +func (c *GlobalAddressesDeleteCall) Context(ctx context.Context) *GlobalAddressesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FirewallsDeleteCall) Header() http.Header { +func (c *GlobalAddressesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FirewallsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalAddressesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -39017,7 +43321,7 @@ func (c *FirewallsDeleteCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses/{address}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { @@ -39025,20 +43329,20 @@ func (c *FirewallsDeleteCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "firewall": c.firewall, + "project": c.project, + "address": c.address, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.firewalls.delete" call. +// Do executes the "compute.globalAddresses.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *FirewallsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *GlobalAddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -39069,16 +43373,16 @@ func (c *FirewallsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Deletes the specified firewall.", + // "description": "Deletes the specified address resource.", // "httpMethod": "DELETE", - // "id": "compute.firewalls.delete", + // "id": "compute.globalAddresses.delete", // "parameterOrder": [ // "project", - // "firewall" + // "address" // ], // "parameters": { - // "firewall": { - // "description": "Name of the firewall rule to delete.", + // "address": { + // "description": "Name of the address resource to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -39097,7 +43401,7 @@ func (c *FirewallsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "type": "string" // } // }, - // "path": "{project}/global/firewalls/{firewall}", + // "path": "{project}/global/addresses/{address}", // "response": { // "$ref": "Operation" // }, @@ -39109,31 +43413,32 @@ func (c *FirewallsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro } -// method id "compute.firewalls.get": +// method id "compute.globalAddresses.get": -type FirewallsGetCall struct { +type GlobalAddressesGetCall struct { s *Service project string - firewall string + address string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Returns the specified firewall. -// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/get -func (r *FirewallsService) Get(project string, firewall string) *FirewallsGetCall { - c := &FirewallsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified address resource. Gets a list of available +// addresses by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/get +func (r *GlobalAddressesService) Get(project string, address string) *GlobalAddressesGetCall { + c := &GlobalAddressesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.firewall = firewall + c.address = address return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FirewallsGetCall) Fields(s ...googleapi.Field) *FirewallsGetCall { +func (c *GlobalAddressesGetCall) Fields(s ...googleapi.Field) *GlobalAddressesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -39143,7 +43448,7 @@ func (c *FirewallsGetCall) Fields(s ...googleapi.Field) *FirewallsGetCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *FirewallsGetCall) IfNoneMatch(entityTag string) *FirewallsGetCall { +func (c *GlobalAddressesGetCall) IfNoneMatch(entityTag string) *GlobalAddressesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -39151,21 +43456,21 @@ func (c *FirewallsGetCall) IfNoneMatch(entityTag string) *FirewallsGetCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FirewallsGetCall) Context(ctx context.Context) *FirewallsGetCall { +func (c *GlobalAddressesGetCall) Context(ctx context.Context) *GlobalAddressesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FirewallsGetCall) Header() http.Header { +func (c *GlobalAddressesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FirewallsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalAddressesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -39177,7 +43482,7 @@ func (c *FirewallsGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses/{address}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -39185,20 +43490,20 @@ func (c *FirewallsGetCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "firewall": c.firewall, + "project": c.project, + "address": c.address, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.firewalls.get" call. -// Exactly one of *Firewall or error will be non-nil. Any non-2xx status +// Do executes the "compute.globalAddresses.get" call. +// Exactly one of *Address or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either -// *Firewall.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *FirewallsGetCall) Do(opts ...googleapi.CallOption) (*Firewall, error) { +// *Address.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *GlobalAddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -39217,7 +43522,7 @@ func (c *FirewallsGetCall) Do(opts ...googleapi.CallOption) (*Firewall, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Firewall{ + ret := &Address{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -39229,16 +43534,16 @@ func (c *FirewallsGetCall) Do(opts ...googleapi.CallOption) (*Firewall, error) { } return ret, nil // { - // "description": "Returns the specified firewall.", + // "description": "Returns the specified address resource. Gets a list of available addresses by making a list() request.", // "httpMethod": "GET", - // "id": "compute.firewalls.get", + // "id": "compute.globalAddresses.get", // "parameterOrder": [ // "project", - // "firewall" + // "address" // ], // "parameters": { - // "firewall": { - // "description": "Name of the firewall rule to return.", + // "address": { + // "description": "Name of the address resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -39252,9 +43557,9 @@ func (c *FirewallsGetCall) Do(opts ...googleapi.CallOption) (*Firewall, error) { // "type": "string" // } // }, - // "path": "{project}/global/firewalls/{firewall}", + // "path": "{project}/global/addresses/{address}", // "response": { - // "$ref": "Firewall" + // "$ref": "Address" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -39265,24 +43570,24 @@ func (c *FirewallsGetCall) Do(opts ...googleapi.CallOption) (*Firewall, error) { } -// method id "compute.firewalls.insert": +// method id "compute.globalAddresses.insert": -type FirewallsInsertCall struct { +type GlobalAddressesInsertCall struct { s *Service project string - firewall *Firewall + address *Address urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Insert: Creates a firewall rule in the specified project using the -// data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/insert -func (r *FirewallsService) Insert(project string, firewall *Firewall) *FirewallsInsertCall { - c := &FirewallsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates an address resource in the specified project using +// the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/insert +func (r *GlobalAddressesService) Insert(project string, address *Address) *GlobalAddressesInsertCall { + c := &GlobalAddressesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.firewall = firewall + c.address = address return c } @@ -39300,7 +43605,7 @@ func (r *FirewallsService) Insert(project string, firewall *Firewall) *Firewalls // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *FirewallsInsertCall) RequestId(requestId string) *FirewallsInsertCall { +func (c *GlobalAddressesInsertCall) RequestId(requestId string) *GlobalAddressesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -39308,7 +43613,7 @@ func (c *FirewallsInsertCall) RequestId(requestId string) *FirewallsInsertCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FirewallsInsertCall) Fields(s ...googleapi.Field) *FirewallsInsertCall { +func (c *GlobalAddressesInsertCall) Fields(s ...googleapi.Field) *GlobalAddressesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -39316,35 +43621,35 @@ func (c *FirewallsInsertCall) Fields(s ...googleapi.Field) *FirewallsInsertCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FirewallsInsertCall) Context(ctx context.Context) *FirewallsInsertCall { +func (c *GlobalAddressesInsertCall) Context(ctx context.Context) *GlobalAddressesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FirewallsInsertCall) Header() http.Header { +func (c *GlobalAddressesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FirewallsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalAddressesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.address) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -39357,14 +43662,14 @@ func (c *FirewallsInsertCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.firewalls.insert" call. +// Do executes the "compute.globalAddresses.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *FirewallsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *GlobalAddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -39395,9 +43700,9 @@ func (c *FirewallsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Creates a firewall rule in the specified project using the data included in the request.", + // "description": "Creates an address resource in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.firewalls.insert", + // "id": "compute.globalAddresses.insert", // "parameterOrder": [ // "project" // ], @@ -39415,9 +43720,9 @@ func (c *FirewallsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "type": "string" // } // }, - // "path": "{project}/global/firewalls", + // "path": "{project}/global/addresses", // "request": { - // "$ref": "Firewall" + // "$ref": "Address" // }, // "response": { // "$ref": "Operation" @@ -39430,9 +43735,9 @@ func (c *FirewallsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro } -// method id "compute.firewalls.list": +// method id "compute.globalAddresses.list": -type FirewallsListCall struct { +type GlobalAddressesListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -39441,11 +43746,10 @@ type FirewallsListCall struct { header_ http.Header } -// List: Retrieves the list of firewall rules available to the specified -// project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/list -func (r *FirewallsService) List(project string) *FirewallsListCall { - c := &FirewallsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of global addresses. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/list +func (r *GlobalAddressesService) List(project string) *GlobalAddressesListCall { + c := &GlobalAddressesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -39472,7 +43776,7 @@ func (r *FirewallsService) List(project string) *FirewallsListCall { // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *FirewallsListCall) Filter(filter string) *FirewallsListCall { +func (c *GlobalAddressesListCall) Filter(filter string) *GlobalAddressesListCall { c.urlParams_.Set("filter", filter) return c } @@ -39483,7 +43787,7 @@ func (c *FirewallsListCall) Filter(filter string) *FirewallsListCall { // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *FirewallsListCall) MaxResults(maxResults int64) *FirewallsListCall { +func (c *GlobalAddressesListCall) MaxResults(maxResults int64) *GlobalAddressesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -39500,7 +43804,7 @@ func (c *FirewallsListCall) MaxResults(maxResults int64) *FirewallsListCall { // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *FirewallsListCall) OrderBy(orderBy string) *FirewallsListCall { +func (c *GlobalAddressesListCall) OrderBy(orderBy string) *GlobalAddressesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -39508,7 +43812,7 @@ func (c *FirewallsListCall) OrderBy(orderBy string) *FirewallsListCall { // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *FirewallsListCall) PageToken(pageToken string) *FirewallsListCall { +func (c *GlobalAddressesListCall) PageToken(pageToken string) *GlobalAddressesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -39516,7 +43820,7 @@ func (c *FirewallsListCall) PageToken(pageToken string) *FirewallsListCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FirewallsListCall) Fields(s ...googleapi.Field) *FirewallsListCall { +func (c *GlobalAddressesListCall) Fields(s ...googleapi.Field) *GlobalAddressesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -39526,7 +43830,7 @@ func (c *FirewallsListCall) Fields(s ...googleapi.Field) *FirewallsListCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *FirewallsListCall) IfNoneMatch(entityTag string) *FirewallsListCall { +func (c *GlobalAddressesListCall) IfNoneMatch(entityTag string) *GlobalAddressesListCall { c.ifNoneMatch_ = entityTag return c } @@ -39534,21 +43838,21 @@ func (c *FirewallsListCall) IfNoneMatch(entityTag string) *FirewallsListCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FirewallsListCall) Context(ctx context.Context) *FirewallsListCall { +func (c *GlobalAddressesListCall) Context(ctx context.Context) *GlobalAddressesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FirewallsListCall) Header() http.Header { +func (c *GlobalAddressesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FirewallsListCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalAddressesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -39560,7 +43864,7 @@ func (c *FirewallsListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -39573,14 +43877,14 @@ func (c *FirewallsListCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.firewalls.list" call. -// Exactly one of *FirewallList or error will be non-nil. Any non-2xx +// Do executes the "compute.globalAddresses.list" call. +// Exactly one of *AddressList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *FirewallList.ServerResponse.Header or (if a response was returned at +// *AddressList.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *FirewallsListCall) Do(opts ...googleapi.CallOption) (*FirewallList, error) { +func (c *GlobalAddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -39599,7 +43903,7 @@ func (c *FirewallsListCall) Do(opts ...googleapi.CallOption) (*FirewallList, err if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &FirewallList{ + ret := &AddressList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -39611,9 +43915,9 @@ func (c *FirewallsListCall) Do(opts ...googleapi.CallOption) (*FirewallList, err } return ret, nil // { - // "description": "Retrieves the list of firewall rules available to the specified project.", + // "description": "Retrieves a list of global addresses.", // "httpMethod": "GET", - // "id": "compute.firewalls.list", + // "id": "compute.globalAddresses.list", // "parameterOrder": [ // "project" // ], @@ -39649,9 +43953,9 @@ func (c *FirewallsListCall) Do(opts ...googleapi.CallOption) (*FirewallList, err // "type": "string" // } // }, - // "path": "{project}/global/firewalls", + // "path": "{project}/global/addresses", // "response": { - // "$ref": "FirewallList" + // "$ref": "AddressList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -39665,7 +43969,7 @@ func (c *FirewallsListCall) Do(opts ...googleapi.CallOption) (*FirewallList, err // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *FirewallsListCall) Pages(ctx context.Context, f func(*FirewallList) error) error { +func (c *GlobalAddressesListCall) Pages(ctx context.Context, f func(*AddressList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -39683,27 +43987,23 @@ func (c *FirewallsListCall) Pages(ctx context.Context, f func(*FirewallList) err } } -// method id "compute.firewalls.patch": +// method id "compute.globalForwardingRules.delete": -type FirewallsPatchCall struct { - s *Service - project string - firewall string - firewall2 *Firewall - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalForwardingRulesDeleteCall struct { + s *Service + project string + forwardingRule string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates the specified firewall rule with the data included in -// the request. This method supports PATCH semantics and uses the JSON -// merge patch format and processing rules. -// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/patch -func (r *FirewallsService) Patch(project string, firewall string, firewall2 *Firewall) *FirewallsPatchCall { - c := &FirewallsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified GlobalForwardingRule resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/delete +func (r *GlobalForwardingRulesService) Delete(project string, forwardingRule string) *GlobalForwardingRulesDeleteCall { + c := &GlobalForwardingRulesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.firewall = firewall - c.firewall2 = firewall2 + c.forwardingRule = forwardingRule return c } @@ -39721,7 +44021,7 @@ func (r *FirewallsService) Patch(project string, firewall string, firewall2 *Fir // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *FirewallsPatchCall) RequestId(requestId string) *FirewallsPatchCall { +func (c *GlobalForwardingRulesDeleteCall) RequestId(requestId string) *GlobalForwardingRulesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -39729,7 +44029,7 @@ func (c *FirewallsPatchCall) RequestId(requestId string) *FirewallsPatchCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FirewallsPatchCall) Fields(s ...googleapi.Field) *FirewallsPatchCall { +func (c *GlobalForwardingRulesDeleteCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -39737,56 +44037,212 @@ func (c *FirewallsPatchCall) Fields(s ...googleapi.Field) *FirewallsPatchCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FirewallsPatchCall) Context(ctx context.Context) *FirewallsPatchCall { +func (c *GlobalForwardingRulesDeleteCall) Context(ctx context.Context) *GlobalForwardingRulesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FirewallsPatchCall) Header() http.Header { +func (c *GlobalForwardingRulesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FirewallsPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "forwardingRule": c.forwardingRule, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.globalForwardingRules.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *GlobalForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified GlobalForwardingRule resource.", + // "httpMethod": "DELETE", + // "id": "compute.globalForwardingRules.delete", + // "parameterOrder": [ + // "project", + // "forwardingRule" + // ], + // "parameters": { + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/forwardingRules/{forwardingRule}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.globalForwardingRules.get": + +type GlobalForwardingRulesGetCall struct { + s *Service + project string + forwardingRule string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified GlobalForwardingRule resource. Gets a list +// of available forwarding rules by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/get +func (r *GlobalForwardingRulesService) Get(project string, forwardingRule string) *GlobalForwardingRulesGetCall { + c := &GlobalForwardingRulesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.forwardingRule = forwardingRule + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *GlobalForwardingRulesGetCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *GlobalForwardingRulesGetCall) IfNoneMatch(entityTag string) *GlobalForwardingRulesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *GlobalForwardingRulesGetCall) Context(ctx context.Context) *GlobalForwardingRulesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *GlobalForwardingRulesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *GlobalForwardingRulesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall2) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "firewall": c.firewall, + "project": c.project, + "forwardingRule": c.forwardingRule, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.firewalls.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.globalForwardingRules.get" call. +// Exactly one of *ForwardingRule or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *FirewallsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// *ForwardingRule.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *GlobalForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRule, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -39805,7 +44261,7 @@ func (c *FirewallsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &ForwardingRule{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -39817,16 +44273,16 @@ func (c *FirewallsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Updates the specified firewall rule with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.firewalls.patch", + // "description": "Returns the specified GlobalForwardingRule resource. Gets a list of available forwarding rules by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.globalForwardingRules.get", // "parameterOrder": [ // "project", - // "firewall" + // "forwardingRule" // ], // "parameters": { - // "firewall": { - // "description": "Name of the firewall rule to patch.", + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -39838,50 +44294,39 @@ func (c *FirewallsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/global/firewalls/{firewall}", - // "request": { - // "$ref": "Firewall" - // }, + // "path": "{project}/global/forwardingRules/{forwardingRule}", // "response": { - // "$ref": "Operation" + // "$ref": "ForwardingRule" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.firewalls.update": +// method id "compute.globalForwardingRules.insert": -type FirewallsUpdateCall struct { - s *Service - project string - firewall string - firewall2 *Firewall - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalForwardingRulesInsertCall struct { + s *Service + project string + forwardingrule *ForwardingRule + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Update: Updates the specified firewall rule with the data included in -// the request. The PUT method can only update the following fields of -// firewall rule: allowed, description, sourceRanges, sourceTags, -// targetTags. -// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/update -func (r *FirewallsService) Update(project string, firewall string, firewall2 *Firewall) *FirewallsUpdateCall { - c := &FirewallsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a GlobalForwardingRule resource in the specified +// project using the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/insert +func (r *GlobalForwardingRulesService) Insert(project string, forwardingrule *ForwardingRule) *GlobalForwardingRulesInsertCall { + c := &GlobalForwardingRulesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.firewall = firewall - c.firewall2 = firewall2 + c.forwardingrule = forwardingrule return c } @@ -39899,7 +44344,7 @@ func (r *FirewallsService) Update(project string, firewall string, firewall2 *Fi // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *FirewallsUpdateCall) RequestId(requestId string) *FirewallsUpdateCall { +func (c *GlobalForwardingRulesInsertCall) RequestId(requestId string) *GlobalForwardingRulesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -39907,7 +44352,7 @@ func (c *FirewallsUpdateCall) RequestId(requestId string) *FirewallsUpdateCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FirewallsUpdateCall) Fields(s ...googleapi.Field) *FirewallsUpdateCall { +func (c *GlobalForwardingRulesInsertCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -39915,56 +44360,55 @@ func (c *FirewallsUpdateCall) Fields(s ...googleapi.Field) *FirewallsUpdateCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FirewallsUpdateCall) Context(ctx context.Context) *FirewallsUpdateCall { +func (c *GlobalForwardingRulesInsertCall) Context(ctx context.Context) *GlobalForwardingRulesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FirewallsUpdateCall) Header() http.Header { +func (c *GlobalForwardingRulesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FirewallsUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalForwardingRulesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall2) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.forwardingrule) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "firewall": c.firewall, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.firewalls.update" call. +// Do executes the "compute.globalForwardingRules.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *FirewallsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *GlobalForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -39995,21 +44439,13 @@ func (c *FirewallsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Updates the specified firewall rule with the data included in the request. The PUT method can only update the following fields of firewall rule: allowed, description, sourceRanges, sourceTags, targetTags.", - // "httpMethod": "PUT", - // "id": "compute.firewalls.update", + // "description": "Creates a GlobalForwardingRule resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.globalForwardingRules.insert", // "parameterOrder": [ - // "project", - // "firewall" + // "project" // ], // "parameters": { - // "firewall": { - // "description": "Name of the firewall rule to update.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -40023,9 +44459,9 @@ func (c *FirewallsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "type": "string" // } // }, - // "path": "{project}/global/firewalls/{firewall}", + // "path": "{project}/global/forwardingRules", // "request": { - // "$ref": "Firewall" + // "$ref": "ForwardingRule" // }, // "response": { // "$ref": "Operation" @@ -40038,9 +44474,9 @@ func (c *FirewallsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, erro } -// method id "compute.forwardingRules.aggregatedList": +// method id "compute.globalForwardingRules.list": -type ForwardingRulesAggregatedListCall struct { +type GlobalForwardingRulesListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -40049,10 +44485,11 @@ type ForwardingRulesAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves an aggregated list of forwarding rules. -// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/aggregatedList -func (r *ForwardingRulesService) AggregatedList(project string) *ForwardingRulesAggregatedListCall { - c := &ForwardingRulesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of GlobalForwardingRule resources available to +// the specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/list +func (r *GlobalForwardingRulesService) List(project string) *GlobalForwardingRulesListCall { + c := &GlobalForwardingRulesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -40079,7 +44516,7 @@ func (r *ForwardingRulesService) AggregatedList(project string) *ForwardingRules // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *ForwardingRulesAggregatedListCall) Filter(filter string) *ForwardingRulesAggregatedListCall { +func (c *GlobalForwardingRulesListCall) Filter(filter string) *GlobalForwardingRulesListCall { c.urlParams_.Set("filter", filter) return c } @@ -40090,7 +44527,7 @@ func (c *ForwardingRulesAggregatedListCall) Filter(filter string) *ForwardingRul // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *ForwardingRulesAggregatedListCall) MaxResults(maxResults int64) *ForwardingRulesAggregatedListCall { +func (c *GlobalForwardingRulesListCall) MaxResults(maxResults int64) *GlobalForwardingRulesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -40107,7 +44544,7 @@ func (c *ForwardingRulesAggregatedListCall) MaxResults(maxResults int64) *Forwar // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *ForwardingRulesAggregatedListCall) OrderBy(orderBy string) *ForwardingRulesAggregatedListCall { +func (c *GlobalForwardingRulesListCall) OrderBy(orderBy string) *GlobalForwardingRulesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -40115,7 +44552,7 @@ func (c *ForwardingRulesAggregatedListCall) OrderBy(orderBy string) *ForwardingR // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *ForwardingRulesAggregatedListCall) PageToken(pageToken string) *ForwardingRulesAggregatedListCall { +func (c *GlobalForwardingRulesListCall) PageToken(pageToken string) *GlobalForwardingRulesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -40123,7 +44560,7 @@ func (c *ForwardingRulesAggregatedListCall) PageToken(pageToken string) *Forward // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ForwardingRulesAggregatedListCall) Fields(s ...googleapi.Field) *ForwardingRulesAggregatedListCall { +func (c *GlobalForwardingRulesListCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -40133,7 +44570,7 @@ func (c *ForwardingRulesAggregatedListCall) Fields(s ...googleapi.Field) *Forwar // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ForwardingRulesAggregatedListCall) IfNoneMatch(entityTag string) *ForwardingRulesAggregatedListCall { +func (c *GlobalForwardingRulesListCall) IfNoneMatch(entityTag string) *GlobalForwardingRulesListCall { c.ifNoneMatch_ = entityTag return c } @@ -40141,21 +44578,21 @@ func (c *ForwardingRulesAggregatedListCall) IfNoneMatch(entityTag string) *Forwa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ForwardingRulesAggregatedListCall) Context(ctx context.Context) *ForwardingRulesAggregatedListCall { +func (c *GlobalForwardingRulesListCall) Context(ctx context.Context) *GlobalForwardingRulesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ForwardingRulesAggregatedListCall) Header() http.Header { +func (c *GlobalForwardingRulesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ForwardingRulesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalForwardingRulesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -40167,7 +44604,7 @@ func (c *ForwardingRulesAggregatedListCall) doRequest(alt string) (*http.Respons var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/forwardingRules") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -40180,14 +44617,14 @@ func (c *ForwardingRulesAggregatedListCall) doRequest(alt string) (*http.Respons return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.forwardingRules.aggregatedList" call. -// Exactly one of *ForwardingRuleAggregatedList or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *ForwardingRuleAggregatedList.ServerResponse.Header or (if a -// response was returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.globalForwardingRules.list" call. +// Exactly one of *ForwardingRuleList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ForwardingRuleList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *ForwardingRulesAggregatedListCall) Do(opts ...googleapi.CallOption) (*ForwardingRuleAggregatedList, error) { +func (c *GlobalForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingRuleList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -40206,7 +44643,7 @@ func (c *ForwardingRulesAggregatedListCall) Do(opts ...googleapi.CallOption) (*F if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ForwardingRuleAggregatedList{ + ret := &ForwardingRuleList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -40218,9 +44655,9 @@ func (c *ForwardingRulesAggregatedListCall) Do(opts ...googleapi.CallOption) (*F } return ret, nil // { - // "description": "Retrieves an aggregated list of forwarding rules.", + // "description": "Retrieves a list of GlobalForwardingRule resources available to the specified project.", // "httpMethod": "GET", - // "id": "compute.forwardingRules.aggregatedList", + // "id": "compute.globalForwardingRules.list", // "parameterOrder": [ // "project" // ], @@ -40256,9 +44693,9 @@ func (c *ForwardingRulesAggregatedListCall) Do(opts ...googleapi.CallOption) (*F // "type": "string" // } // }, - // "path": "{project}/aggregated/forwardingRules", + // "path": "{project}/global/forwardingRules", // "response": { - // "$ref": "ForwardingRuleAggregatedList" + // "$ref": "ForwardingRuleList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -40272,7 +44709,7 @@ func (c *ForwardingRulesAggregatedListCall) Do(opts ...googleapi.CallOption) (*F // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *ForwardingRulesAggregatedListCall) Pages(ctx context.Context, f func(*ForwardingRuleAggregatedList) error) error { +func (c *GlobalForwardingRulesListCall) Pages(ctx context.Context, f func(*ForwardingRuleList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -40290,25 +44727,26 @@ func (c *ForwardingRulesAggregatedListCall) Pages(ctx context.Context, f func(*F } } -// method id "compute.forwardingRules.delete": +// method id "compute.globalForwardingRules.setTarget": -type ForwardingRulesDeleteCall struct { - s *Service - project string - region string - forwardingRule string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalForwardingRulesSetTargetCall struct { + s *Service + project string + forwardingRule string + targetreference *TargetReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified ForwardingRule resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/delete -func (r *ForwardingRulesService) Delete(project string, region string, forwardingRule string) *ForwardingRulesDeleteCall { - c := &ForwardingRulesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetTarget: Changes target URL for the GlobalForwardingRule resource. +// The new target should be of the same type as the old target. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/setTarget +func (r *GlobalForwardingRulesService) SetTarget(project string, forwardingRule string, targetreference *TargetReference) *GlobalForwardingRulesSetTargetCall { + c := &GlobalForwardingRulesSetTargetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region c.forwardingRule = forwardingRule + c.targetreference = targetreference return c } @@ -40326,7 +44764,7 @@ func (r *ForwardingRulesService) Delete(project string, region string, forwardin // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ForwardingRulesDeleteCall) RequestId(requestId string) *ForwardingRulesDeleteCall { +func (c *GlobalForwardingRulesSetTargetCall) RequestId(requestId string) *GlobalForwardingRulesSetTargetCall { c.urlParams_.Set("requestId", requestId) return c } @@ -40334,7 +44772,7 @@ func (c *ForwardingRulesDeleteCall) RequestId(requestId string) *ForwardingRules // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ForwardingRulesDeleteCall) Fields(s ...googleapi.Field) *ForwardingRulesDeleteCall { +func (c *GlobalForwardingRulesSetTargetCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesSetTargetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -40342,52 +44780,56 @@ func (c *ForwardingRulesDeleteCall) Fields(s ...googleapi.Field) *ForwardingRule // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ForwardingRulesDeleteCall) Context(ctx context.Context) *ForwardingRulesDeleteCall { +func (c *GlobalForwardingRulesSetTargetCall) Context(ctx context.Context) *GlobalForwardingRulesSetTargetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ForwardingRulesDeleteCall) Header() http.Header { +func (c *GlobalForwardingRulesSetTargetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalForwardingRulesSetTargetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetreference) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}/setTarget") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, "forwardingRule": c.forwardingRule, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.forwardingRules.delete" call. +// Do executes the "compute.globalForwardingRules.setTarget" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *GlobalForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -40418,19 +44860,18 @@ func (c *ForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Deletes the specified ForwardingRule resource.", - // "httpMethod": "DELETE", - // "id": "compute.forwardingRules.delete", + // "description": "Changes target URL for the GlobalForwardingRule resource. The new target should be of the same type as the old target.", + // "httpMethod": "POST", + // "id": "compute.globalForwardingRules.setTarget", // "parameterOrder": [ // "project", - // "region", // "forwardingRule" // ], // "parameters": { // "forwardingRule": { - // "description": "Name of the ForwardingRule resource to delete.", + // "description": "Name of the ForwardingRule resource in which target is to be set.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -40441,20 +44882,16 @@ func (c *ForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}", + // "path": "{project}/global/forwardingRules/{forwardingRule}/setTarget", + // "request": { + // "$ref": "TargetReference" + // }, // "response": { // "$ref": "Operation" // }, @@ -40466,142 +44903,351 @@ func (c *ForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.forwardingRules.get": +// method id "compute.globalOperations.aggregatedList": + +type GlobalOperationsAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves an aggregated list of all operations. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/aggregatedList +func (r *GlobalOperationsService) AggregatedList(project string) *GlobalOperationsAggregatedListCall { + c := &GlobalOperationsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *GlobalOperationsAggregatedListCall) Filter(filter string) *GlobalOperationsAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *GlobalOperationsAggregatedListCall) MaxResults(maxResults int64) *GlobalOperationsAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *GlobalOperationsAggregatedListCall) OrderBy(orderBy string) *GlobalOperationsAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *GlobalOperationsAggregatedListCall) PageToken(pageToken string) *GlobalOperationsAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *GlobalOperationsAggregatedListCall) Fields(s ...googleapi.Field) *GlobalOperationsAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *GlobalOperationsAggregatedListCall) IfNoneMatch(entityTag string) *GlobalOperationsAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *GlobalOperationsAggregatedListCall) Context(ctx context.Context) *GlobalOperationsAggregatedListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *GlobalOperationsAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *GlobalOperationsAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/operations") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.globalOperations.aggregatedList" call. +// Exactly one of *OperationAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *OperationAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *GlobalOperationsAggregatedListCall) Do(opts ...googleapi.CallOption) (*OperationAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &OperationAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an aggregated list of all operations.", + // "httpMethod": "GET", + // "id": "compute.globalOperations.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/operations", + // "response": { + // "$ref": "OperationAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *GlobalOperationsAggregatedListCall) Pages(ctx context.Context, f func(*OperationAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.globalOperations.delete": -type ForwardingRulesGetCall struct { - s *Service - project string - region string - forwardingRule string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type GlobalOperationsDeleteCall struct { + s *Service + project string + operation string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified ForwardingRule resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/get -func (r *ForwardingRulesService) Get(project string, region string, forwardingRule string) *ForwardingRulesGetCall { - c := &ForwardingRulesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified Operations resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/delete +func (r *GlobalOperationsService) Delete(project string, operation string) *GlobalOperationsDeleteCall { + c := &GlobalOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.forwardingRule = forwardingRule + c.operation = operation return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ForwardingRulesGetCall) Fields(s ...googleapi.Field) *ForwardingRulesGetCall { +func (c *GlobalOperationsDeleteCall) Fields(s ...googleapi.Field) *GlobalOperationsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ForwardingRulesGetCall) IfNoneMatch(entityTag string) *ForwardingRulesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ForwardingRulesGetCall) Context(ctx context.Context) *ForwardingRulesGetCall { +func (c *GlobalOperationsDeleteCall) Context(ctx context.Context) *GlobalOperationsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ForwardingRulesGetCall) Header() http.Header { +func (c *GlobalOperationsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ForwardingRulesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations/{operation}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "forwardingRule": c.forwardingRule, + "project": c.project, + "operation": c.operation, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.forwardingRules.get" call. -// Exactly one of *ForwardingRule or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *ForwardingRule.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRule, error) { +// Do executes the "compute.globalOperations.delete" call. +func (c *GlobalOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } if err != nil { - return nil, err + return err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ForwardingRule{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err + return err } - return ret, nil + return nil // { - // "description": "Returns the specified ForwardingRule resource.", - // "httpMethod": "GET", - // "id": "compute.forwardingRules.get", + // "description": "Deletes the specified Operations resource.", + // "httpMethod": "DELETE", + // "id": "compute.globalOperations.delete", // "parameterOrder": [ // "project", - // "region", - // "forwardingRule" + // "operation" // ], // "parameters": { - // "forwardingRule": { - // "description": "Name of the ForwardingRule resource to return.", + // "operation": { + // "description": "Name of the Operations resource to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -40611,131 +45257,108 @@ func (c *ForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRu // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}", - // "response": { - // "$ref": "ForwardingRule" - // }, + // "path": "{project}/global/operations/{operation}", // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.forwardingRules.insert": +// method id "compute.globalOperations.get": -type ForwardingRulesInsertCall struct { - s *Service - project string - region string - forwardingrule *ForwardingRule - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalOperationsGetCall struct { + s *Service + project string + operation string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Insert: Creates a ForwardingRule resource in the specified project -// and region using the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/insert -func (r *ForwardingRulesService) Insert(project string, region string, forwardingrule *ForwardingRule) *ForwardingRulesInsertCall { - c := &ForwardingRulesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Retrieves the specified Operations resource. Gets a list of +// operations by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/get +func (r *GlobalOperationsService) Get(project string, operation string) *GlobalOperationsGetCall { + c := &GlobalOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.forwardingrule = forwardingrule - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *ForwardingRulesInsertCall) RequestId(requestId string) *ForwardingRulesInsertCall { - c.urlParams_.Set("requestId", requestId) + c.operation = operation return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ForwardingRulesInsertCall) Fields(s ...googleapi.Field) *ForwardingRulesInsertCall { +func (c *GlobalOperationsGetCall) Fields(s ...googleapi.Field) *GlobalOperationsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *GlobalOperationsGetCall) IfNoneMatch(entityTag string) *GlobalOperationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ForwardingRulesInsertCall) Context(ctx context.Context) *ForwardingRulesInsertCall { +func (c *GlobalOperationsGetCall) Context(ctx context.Context) *GlobalOperationsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ForwardingRulesInsertCall) Header() http.Header { +func (c *GlobalOperationsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ForwardingRulesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.forwardingrule) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations/{operation}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "operation": c.operation, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.forwardingRules.insert" call. +// Do executes the "compute.globalOperations.get" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *GlobalOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -40766,68 +45389,59 @@ func (c *ForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Creates a ForwardingRule resource in the specified project and region using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.forwardingRules.insert", + // "description": "Retrieves the specified Operations resource. Gets a list of operations by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.globalOperations.get", // "parameterOrder": [ // "project", - // "region" + // "operation" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "operation": { + // "description": "Name of the Operations resource to return.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region scoping this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/forwardingRules", - // "request": { - // "$ref": "ForwardingRule" - // }, + // "path": "{project}/global/operations/{operation}", // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.forwardingRules.list": +// method id "compute.globalOperations.list": -type ForwardingRulesListCall struct { +type GlobalOperationsListCall struct { s *Service project string - region string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves a list of ForwardingRule resources available to the -// specified project and region. -// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/list -func (r *ForwardingRulesService) List(project string, region string) *ForwardingRulesListCall { - c := &ForwardingRulesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of Operation resources contained within the +// specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/list +func (r *GlobalOperationsService) List(project string) *GlobalOperationsListCall { + c := &GlobalOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region return c } @@ -40853,7 +45467,7 @@ func (r *ForwardingRulesService) List(project string, region string) *Forwarding // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *ForwardingRulesListCall) Filter(filter string) *ForwardingRulesListCall { +func (c *GlobalOperationsListCall) Filter(filter string) *GlobalOperationsListCall { c.urlParams_.Set("filter", filter) return c } @@ -40864,7 +45478,7 @@ func (c *ForwardingRulesListCall) Filter(filter string) *ForwardingRulesListCall // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *ForwardingRulesListCall) MaxResults(maxResults int64) *ForwardingRulesListCall { +func (c *GlobalOperationsListCall) MaxResults(maxResults int64) *GlobalOperationsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -40881,7 +45495,7 @@ func (c *ForwardingRulesListCall) MaxResults(maxResults int64) *ForwardingRulesL // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *ForwardingRulesListCall) OrderBy(orderBy string) *ForwardingRulesListCall { +func (c *GlobalOperationsListCall) OrderBy(orderBy string) *GlobalOperationsListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -40889,7 +45503,7 @@ func (c *ForwardingRulesListCall) OrderBy(orderBy string) *ForwardingRulesListCa // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *ForwardingRulesListCall) PageToken(pageToken string) *ForwardingRulesListCall { +func (c *GlobalOperationsListCall) PageToken(pageToken string) *GlobalOperationsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -40897,7 +45511,7 @@ func (c *ForwardingRulesListCall) PageToken(pageToken string) *ForwardingRulesLi // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ForwardingRulesListCall) Fields(s ...googleapi.Field) *ForwardingRulesListCall { +func (c *GlobalOperationsListCall) Fields(s ...googleapi.Field) *GlobalOperationsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -40907,7 +45521,7 @@ func (c *ForwardingRulesListCall) Fields(s ...googleapi.Field) *ForwardingRulesL // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ForwardingRulesListCall) IfNoneMatch(entityTag string) *ForwardingRulesListCall { +func (c *GlobalOperationsListCall) IfNoneMatch(entityTag string) *GlobalOperationsListCall { c.ifNoneMatch_ = entityTag return c } @@ -40915,21 +45529,21 @@ func (c *ForwardingRulesListCall) IfNoneMatch(entityTag string) *ForwardingRules // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ForwardingRulesListCall) Context(ctx context.Context) *ForwardingRulesListCall { +func (c *GlobalOperationsListCall) Context(ctx context.Context) *GlobalOperationsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ForwardingRulesListCall) Header() http.Header { +func (c *GlobalOperationsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ForwardingRulesListCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -40941,7 +45555,7 @@ func (c *ForwardingRulesListCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -40950,19 +45564,18 @@ func (c *ForwardingRulesListCall) doRequest(alt string) (*http.Response, error) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.forwardingRules.list" call. -// Exactly one of *ForwardingRuleList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ForwardingRuleList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.globalOperations.list" call. +// Exactly one of *OperationList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *OperationList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *ForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingRuleList, error) { +func (c *GlobalOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -40981,7 +45594,7 @@ func (c *ForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingR if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ForwardingRuleList{ + ret := &OperationList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -40993,12 +45606,11 @@ func (c *ForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingR } return ret, nil // { - // "description": "Retrieves a list of ForwardingRule resources available to the specified project and region.", + // "description": "Retrieves a list of Operation resources contained within the specified project.", // "httpMethod": "GET", - // "id": "compute.forwardingRules.list", + // "id": "compute.globalOperations.list", // "parameterOrder": [ - // "project", - // "region" + // "project" // ], // "parameters": { // "filter": { @@ -41030,18 +45642,11 @@ func (c *ForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingR // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/forwardingRules", + // "path": "{project}/global/operations", // "response": { - // "$ref": "ForwardingRuleList" + // "$ref": "OperationList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -41055,7 +45660,7 @@ func (c *ForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingR // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *ForwardingRulesListCall) Pages(ctx context.Context, f func(*ForwardingRuleList) error) error { +func (c *GlobalOperationsListCall) Pages(ctx context.Context, f func(*OperationList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -41073,28 +45678,22 @@ func (c *ForwardingRulesListCall) Pages(ctx context.Context, f func(*ForwardingR } } -// method id "compute.forwardingRules.setTarget": +// method id "compute.healthChecks.delete": -type ForwardingRulesSetTargetCall struct { - s *Service - project string - region string - forwardingRule string - targetreference *TargetReference - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HealthChecksDeleteCall struct { + s *Service + project string + healthCheck string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetTarget: Changes target URL for forwarding rule. The new target -// should be of the same type as the old target. -// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/setTarget -func (r *ForwardingRulesService) SetTarget(project string, region string, forwardingRule string, targetreference *TargetReference) *ForwardingRulesSetTargetCall { - c := &ForwardingRulesSetTargetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified HealthCheck resource. +func (r *HealthChecksService) Delete(project string, healthCheck string) *HealthChecksDeleteCall { + c := &HealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.forwardingRule = forwardingRule - c.targetreference = targetreference + c.healthCheck = healthCheck return c } @@ -41112,7 +45711,7 @@ func (r *ForwardingRulesService) SetTarget(project string, region string, forwar // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ForwardingRulesSetTargetCall) RequestId(requestId string) *ForwardingRulesSetTargetCall { +func (c *HealthChecksDeleteCall) RequestId(requestId string) *HealthChecksDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -41120,65 +45719,219 @@ func (c *ForwardingRulesSetTargetCall) RequestId(requestId string) *ForwardingRu // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ForwardingRulesSetTargetCall) Fields(s ...googleapi.Field) *ForwardingRulesSetTargetCall { +func (c *HealthChecksDeleteCall) Fields(s ...googleapi.Field) *HealthChecksDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *HealthChecksDeleteCall) Context(ctx context.Context) *HealthChecksDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *HealthChecksDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *HealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "healthCheck": c.healthCheck, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.healthChecks.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *HealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified HealthCheck resource.", + // "httpMethod": "DELETE", + // "id": "compute.healthChecks.delete", + // "parameterOrder": [ + // "project", + // "healthCheck" + // ], + // "parameters": { + // "healthCheck": { + // "description": "Name of the HealthCheck resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/healthChecks/{healthCheck}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.healthChecks.get": + +type HealthChecksGetCall struct { + s *Service + project string + healthCheck string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified HealthCheck resource. Gets a list of +// available health checks by making a list() request. +func (r *HealthChecksService) Get(project string, healthCheck string) *HealthChecksGetCall { + c := &HealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.healthCheck = healthCheck + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *HealthChecksGetCall) Fields(s ...googleapi.Field) *HealthChecksGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *HealthChecksGetCall) IfNoneMatch(entityTag string) *HealthChecksGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ForwardingRulesSetTargetCall) Context(ctx context.Context) *ForwardingRulesSetTargetCall { +func (c *HealthChecksGetCall) Context(ctx context.Context) *HealthChecksGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ForwardingRulesSetTargetCall) Header() http.Header { +func (c *HealthChecksGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ForwardingRulesSetTargetCall) doRequest(alt string) (*http.Response, error) { +func (c *HealthChecksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetreference) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}/setTarget") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "forwardingRule": c.forwardingRule, + "project": c.project, + "healthCheck": c.healthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.forwardingRules.setTarget" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.healthChecks.get" call. +// Exactly one of *HealthCheck or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// *HealthCheck.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -41197,7 +45950,7 @@ func (c *ForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operat if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &HealthCheck{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -41209,19 +45962,18 @@ func (c *ForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operat } return ret, nil // { - // "description": "Changes target URL for forwarding rule. The new target should be of the same type as the old target.", - // "httpMethod": "POST", - // "id": "compute.forwardingRules.setTarget", + // "description": "Returns the specified HealthCheck resource. Gets a list of available health checks by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.healthChecks.get", // "parameterOrder": [ // "project", - // "region", - // "forwardingRule" + // "healthCheck" // ], // "parameters": { - // "forwardingRule": { - // "description": "Name of the ForwardingRule resource in which target is to be set.", + // "healthCheck": { + // "description": "Name of the HealthCheck resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -41231,52 +45983,38 @@ func (c *ForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operat // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}/setTarget", - // "request": { - // "$ref": "TargetReference" - // }, + // "path": "{project}/global/healthChecks/{healthCheck}", // "response": { - // "$ref": "Operation" + // "$ref": "HealthCheck" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.globalAddresses.delete": +// method id "compute.healthChecks.insert": -type GlobalAddressesDeleteCall struct { - s *Service - project string - address string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HealthChecksInsertCall struct { + s *Service + project string + healthcheck *HealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified address resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/delete -func (r *GlobalAddressesService) Delete(project string, address string) *GlobalAddressesDeleteCall { - c := &GlobalAddressesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a HealthCheck resource in the specified project using +// the data included in the request. +func (r *HealthChecksService) Insert(project string, healthcheck *HealthCheck) *HealthChecksInsertCall { + c := &HealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.address = address + c.healthcheck = healthcheck return c } @@ -41294,7 +46032,7 @@ func (r *GlobalAddressesService) Delete(project string, address string) *GlobalA // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *GlobalAddressesDeleteCall) RequestId(requestId string) *GlobalAddressesDeleteCall { +func (c *HealthChecksInsertCall) RequestId(requestId string) *HealthChecksInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -41302,7 +46040,7 @@ func (c *GlobalAddressesDeleteCall) RequestId(requestId string) *GlobalAddresses // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalAddressesDeleteCall) Fields(s ...googleapi.Field) *GlobalAddressesDeleteCall { +func (c *HealthChecksInsertCall) Fields(s ...googleapi.Field) *HealthChecksInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -41310,51 +46048,55 @@ func (c *GlobalAddressesDeleteCall) Fields(s ...googleapi.Field) *GlobalAddresse // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalAddressesDeleteCall) Context(ctx context.Context) *GlobalAddressesDeleteCall { +func (c *HealthChecksInsertCall) Context(ctx context.Context) *HealthChecksInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalAddressesDeleteCall) Header() http.Header { +func (c *HealthChecksInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalAddressesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *HealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses/{address}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "address": c.address, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalAddresses.delete" call. +// Do executes the "compute.healthChecks.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *GlobalAddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -41385,21 +46127,13 @@ func (c *GlobalAddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Deletes the specified address resource.", - // "httpMethod": "DELETE", - // "id": "compute.globalAddresses.delete", + // "description": "Creates a HealthCheck resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.healthChecks.insert", // "parameterOrder": [ - // "project", - // "address" + // "project" // ], // "parameters": { - // "address": { - // "description": "Name of the address resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -41413,7 +46147,10 @@ func (c *GlobalAddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation // "type": "string" // } // }, - // "path": "{project}/global/addresses/{address}", + // "path": "{project}/global/healthChecks", + // "request": { + // "$ref": "HealthCheck" + // }, // "response": { // "$ref": "Operation" // }, @@ -41425,32 +46162,92 @@ func (c *GlobalAddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.globalAddresses.get": +// method id "compute.healthChecks.list": -type GlobalAddressesGetCall struct { +type HealthChecksListCall struct { s *Service project string - address string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Returns the specified address resource. Gets a list of available -// addresses by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/get -func (r *GlobalAddressesService) Get(project string, address string) *GlobalAddressesGetCall { - c := &GlobalAddressesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of HealthCheck resources available to the +// specified project. +func (r *HealthChecksService) List(project string) *HealthChecksListCall { + c := &HealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.address = address + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *HealthChecksListCall) Filter(filter string) *HealthChecksListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *HealthChecksListCall) MaxResults(maxResults int64) *HealthChecksListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *HealthChecksListCall) OrderBy(orderBy string) *HealthChecksListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *HealthChecksListCall) PageToken(pageToken string) *HealthChecksListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalAddressesGetCall) Fields(s ...googleapi.Field) *GlobalAddressesGetCall { +func (c *HealthChecksListCall) Fields(s ...googleapi.Field) *HealthChecksListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -41460,7 +46257,7 @@ func (c *GlobalAddressesGetCall) Fields(s ...googleapi.Field) *GlobalAddressesGe // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *GlobalAddressesGetCall) IfNoneMatch(entityTag string) *GlobalAddressesGetCall { +func (c *HealthChecksListCall) IfNoneMatch(entityTag string) *HealthChecksListCall { c.ifNoneMatch_ = entityTag return c } @@ -41468,21 +46265,21 @@ func (c *GlobalAddressesGetCall) IfNoneMatch(entityTag string) *GlobalAddressesG // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalAddressesGetCall) Context(ctx context.Context) *GlobalAddressesGetCall { +func (c *HealthChecksListCall) Context(ctx context.Context) *HealthChecksListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalAddressesGetCall) Header() http.Header { +func (c *HealthChecksListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalAddressesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *HealthChecksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -41494,7 +46291,7 @@ func (c *GlobalAddressesGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses/{address}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -41503,19 +46300,18 @@ func (c *GlobalAddressesGetCall) doRequest(alt string) (*http.Response, error) { req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "address": c.address, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalAddresses.get" call. -// Exactly one of *Address or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Address.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *GlobalAddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { +// Do executes the "compute.healthChecks.list" call. +// Exactly one of *HealthCheckList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *HealthCheckList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -41534,7 +46330,7 @@ func (c *GlobalAddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, err if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Address{ + ret := &HealthCheckList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -41546,19 +46342,34 @@ func (c *GlobalAddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, err } return ret, nil // { - // "description": "Returns the specified address resource. Gets a list of available addresses by making a list() request.", + // "description": "Retrieves the list of HealthCheck resources available to the specified project.", // "httpMethod": "GET", - // "id": "compute.globalAddresses.get", + // "id": "compute.healthChecks.list", // "parameterOrder": [ - // "project", - // "address" + // "project" // ], // "parameters": { - // "address": { - // "description": "Name of the address resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -41569,9 +46380,9 @@ func (c *GlobalAddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, err // "type": "string" // } // }, - // "path": "{project}/global/addresses/{address}", + // "path": "{project}/global/healthChecks", // "response": { - // "$ref": "Address" + // "$ref": "HealthCheckList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -41582,24 +46393,47 @@ func (c *GlobalAddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, err } -// method id "compute.globalAddresses.insert": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *HealthChecksListCall) Pages(ctx context.Context, f func(*HealthCheckList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type GlobalAddressesInsertCall struct { - s *Service - project string - address *Address - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.healthChecks.patch": + +type HealthChecksPatchCall struct { + s *Service + project string + healthCheck string + healthcheck *HealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates an address resource in the specified project using -// the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/insert -func (r *GlobalAddressesService) Insert(project string, address *Address) *GlobalAddressesInsertCall { - c := &GlobalAddressesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates a HealthCheck resource in the specified project using +// the data included in the request. This method supports PATCH +// semantics and uses the JSON merge patch format and processing rules. +func (r *HealthChecksService) Patch(project string, healthCheck string, healthcheck *HealthCheck) *HealthChecksPatchCall { + c := &HealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.address = address + c.healthCheck = healthCheck + c.healthcheck = healthcheck return c } @@ -41617,7 +46451,7 @@ func (r *GlobalAddressesService) Insert(project string, address *Address) *Globa // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *GlobalAddressesInsertCall) RequestId(requestId string) *GlobalAddressesInsertCall { +func (c *HealthChecksPatchCall) RequestId(requestId string) *HealthChecksPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -41625,7 +46459,7 @@ func (c *GlobalAddressesInsertCall) RequestId(requestId string) *GlobalAddresses // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalAddressesInsertCall) Fields(s ...googleapi.Field) *GlobalAddressesInsertCall { +func (c *HealthChecksPatchCall) Fields(s ...googleapi.Field) *HealthChecksPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -41633,55 +46467,56 @@ func (c *GlobalAddressesInsertCall) Fields(s ...googleapi.Field) *GlobalAddresse // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalAddressesInsertCall) Context(ctx context.Context) *GlobalAddressesInsertCall { +func (c *HealthChecksPatchCall) Context(ctx context.Context) *HealthChecksPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalAddressesInsertCall) Header() http.Header { +func (c *HealthChecksPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalAddressesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *HealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.address) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "healthCheck": c.healthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalAddresses.insert" call. +// Do executes the "compute.healthChecks.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *GlobalAddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -41712,13 +46547,21 @@ func (c *GlobalAddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Creates an address resource in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.globalAddresses.insert", + // "description": "Updates a HealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.healthChecks.patch", // "parameterOrder": [ - // "project" + // "project", + // "healthCheck" // ], // "parameters": { + // "healthCheck": { + // "description": "Name of the HealthCheck resource to patch.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -41732,9 +46575,9 @@ func (c *GlobalAddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation // "type": "string" // } // }, - // "path": "{project}/global/addresses", + // "path": "{project}/global/healthChecks/{healthCheck}", // "request": { - // "$ref": "Address" + // "$ref": "HealthCheck" // }, // "response": { // "$ref": "Operation" @@ -41747,156 +46590,108 @@ func (c *GlobalAddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.globalAddresses.list": +// method id "compute.healthChecks.update": -type GlobalAddressesListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type HealthChecksUpdateCall struct { + s *Service + project string + healthCheck string + healthcheck *HealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves a list of global addresses. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/list -func (r *GlobalAddressesService) List(project string) *GlobalAddressesListCall { - c := &GlobalAddressesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates a HealthCheck resource in the specified project using +// the data included in the request. +func (r *HealthChecksService) Update(project string, healthCheck string, healthcheck *HealthCheck) *HealthChecksUpdateCall { + c := &HealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.healthCheck = healthCheck + c.healthcheck = healthcheck return c } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *GlobalAddressesListCall) Filter(filter string) *GlobalAddressesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *GlobalAddressesListCall) MaxResults(maxResults int64) *GlobalAddressesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *GlobalAddressesListCall) OrderBy(orderBy string) *GlobalAddressesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *GlobalAddressesListCall) PageToken(pageToken string) *GlobalAddressesListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *HealthChecksUpdateCall) RequestId(requestId string) *HealthChecksUpdateCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalAddressesListCall) Fields(s ...googleapi.Field) *GlobalAddressesListCall { +func (c *HealthChecksUpdateCall) Fields(s ...googleapi.Field) *HealthChecksUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *GlobalAddressesListCall) IfNoneMatch(entityTag string) *GlobalAddressesListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalAddressesListCall) Context(ctx context.Context) *GlobalAddressesListCall { +func (c *HealthChecksUpdateCall) Context(ctx context.Context) *HealthChecksUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalAddressesListCall) Header() http.Header { +func (c *HealthChecksUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalAddressesListCall) doRequest(alt string) (*http.Response, error) { +func (c *HealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "healthCheck": c.healthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalAddresses.list" call. -// Exactly one of *AddressList or error will be non-nil. Any non-2xx +// Do executes the "compute.healthChecks.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *AddressList.ServerResponse.Header or (if a response was returned at +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *GlobalAddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, error) { +func (c *HealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -41915,7 +46710,7 @@ func (c *GlobalAddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &AddressList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -41927,34 +46722,19 @@ func (c *GlobalAddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList } return ret, nil // { - // "description": "Retrieves a list of global addresses.", - // "httpMethod": "GET", - // "id": "compute.globalAddresses.list", + // "description": "Updates a HealthCheck resource in the specified project using the data included in the request.", + // "httpMethod": "PUT", + // "id": "compute.healthChecks.update", // "parameterOrder": [ - // "project" + // "project", + // "healthCheck" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "healthCheck": { + // "description": "Name of the HealthCheck resource to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "project": { @@ -41963,59 +46743,45 @@ func (c *GlobalAddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/global/addresses", + // "path": "{project}/global/healthChecks/{healthCheck}", + // "request": { + // "$ref": "HealthCheck" + // }, // "response": { - // "$ref": "AddressList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *GlobalAddressesListCall) Pages(ctx context.Context, f func(*AddressList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.globalForwardingRules.delete": +// method id "compute.httpHealthChecks.delete": -type GlobalForwardingRulesDeleteCall struct { - s *Service - project string - forwardingRule string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HttpHealthChecksDeleteCall struct { + s *Service + project string + httpHealthCheck string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified GlobalForwardingRule resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/delete -func (r *GlobalForwardingRulesService) Delete(project string, forwardingRule string) *GlobalForwardingRulesDeleteCall { - c := &GlobalForwardingRulesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified HttpHealthCheck resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/delete +func (r *HttpHealthChecksService) Delete(project string, httpHealthCheck string) *HttpHealthChecksDeleteCall { + c := &HttpHealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.forwardingRule = forwardingRule + c.httpHealthCheck = httpHealthCheck return c } @@ -42033,7 +46799,7 @@ func (r *GlobalForwardingRulesService) Delete(project string, forwardingRule str // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *GlobalForwardingRulesDeleteCall) RequestId(requestId string) *GlobalForwardingRulesDeleteCall { +func (c *HttpHealthChecksDeleteCall) RequestId(requestId string) *HttpHealthChecksDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -42041,7 +46807,7 @@ func (c *GlobalForwardingRulesDeleteCall) RequestId(requestId string) *GlobalFor // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalForwardingRulesDeleteCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesDeleteCall { +func (c *HttpHealthChecksDeleteCall) Fields(s ...googleapi.Field) *HttpHealthChecksDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -42049,21 +46815,21 @@ func (c *GlobalForwardingRulesDeleteCall) Fields(s ...googleapi.Field) *GlobalFo // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalForwardingRulesDeleteCall) Context(ctx context.Context) *GlobalForwardingRulesDeleteCall { +func (c *HttpHealthChecksDeleteCall) Context(ctx context.Context) *HttpHealthChecksDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalForwardingRulesDeleteCall) Header() http.Header { +func (c *HttpHealthChecksDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -42072,7 +46838,7 @@ func (c *GlobalForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { @@ -42080,20 +46846,20 @@ func (c *GlobalForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "forwardingRule": c.forwardingRule, + "project": c.project, + "httpHealthCheck": c.httpHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalForwardingRules.delete" call. +// Do executes the "compute.httpHealthChecks.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *GlobalForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HttpHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -42124,18 +46890,18 @@ func (c *GlobalForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Deletes the specified GlobalForwardingRule resource.", + // "description": "Deletes the specified HttpHealthCheck resource.", // "httpMethod": "DELETE", - // "id": "compute.globalForwardingRules.delete", + // "id": "compute.httpHealthChecks.delete", // "parameterOrder": [ // "project", - // "forwardingRule" + // "httpHealthCheck" // ], // "parameters": { - // "forwardingRule": { - // "description": "Name of the ForwardingRule resource to delete.", + // "httpHealthCheck": { + // "description": "Name of the HttpHealthCheck resource to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -42152,7 +46918,7 @@ func (c *GlobalForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope // "type": "string" // } // }, - // "path": "{project}/global/forwardingRules/{forwardingRule}", + // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", // "response": { // "$ref": "Operation" // }, @@ -42164,32 +46930,32 @@ func (c *GlobalForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.globalForwardingRules.get": +// method id "compute.httpHealthChecks.get": -type GlobalForwardingRulesGetCall struct { - s *Service - project string - forwardingRule string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type HttpHealthChecksGetCall struct { + s *Service + project string + httpHealthCheck string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified GlobalForwardingRule resource. Gets a list -// of available forwarding rules by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/get -func (r *GlobalForwardingRulesService) Get(project string, forwardingRule string) *GlobalForwardingRulesGetCall { - c := &GlobalForwardingRulesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified HttpHealthCheck resource. Gets a list of +// available HTTP health checks by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/get +func (r *HttpHealthChecksService) Get(project string, httpHealthCheck string) *HttpHealthChecksGetCall { + c := &HttpHealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.forwardingRule = forwardingRule + c.httpHealthCheck = httpHealthCheck return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalForwardingRulesGetCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesGetCall { +func (c *HttpHealthChecksGetCall) Fields(s ...googleapi.Field) *HttpHealthChecksGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -42199,7 +46965,7 @@ func (c *GlobalForwardingRulesGetCall) Fields(s ...googleapi.Field) *GlobalForwa // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *GlobalForwardingRulesGetCall) IfNoneMatch(entityTag string) *GlobalForwardingRulesGetCall { +func (c *HttpHealthChecksGetCall) IfNoneMatch(entityTag string) *HttpHealthChecksGetCall { c.ifNoneMatch_ = entityTag return c } @@ -42207,21 +46973,21 @@ func (c *GlobalForwardingRulesGetCall) IfNoneMatch(entityTag string) *GlobalForw // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalForwardingRulesGetCall) Context(ctx context.Context) *GlobalForwardingRulesGetCall { +func (c *HttpHealthChecksGetCall) Context(ctx context.Context) *HttpHealthChecksGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalForwardingRulesGetCall) Header() http.Header { +func (c *HttpHealthChecksGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalForwardingRulesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -42233,7 +46999,7 @@ func (c *GlobalForwardingRulesGetCall) doRequest(alt string) (*http.Response, er var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -42241,20 +47007,20 @@ func (c *GlobalForwardingRulesGetCall) doRequest(alt string) (*http.Response, er } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "forwardingRule": c.forwardingRule, + "project": c.project, + "httpHealthCheck": c.httpHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalForwardingRules.get" call. -// Exactly one of *ForwardingRule or error will be non-nil. Any non-2xx +// Do executes the "compute.httpHealthChecks.get" call. +// Exactly one of *HttpHealthCheck or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *ForwardingRule.ServerResponse.Header or (if a response was returned +// *HttpHealthCheck.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *GlobalForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRule, error) { +func (c *HttpHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpHealthCheck, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -42273,7 +47039,7 @@ func (c *GlobalForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*Forwar if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ForwardingRule{ + ret := &HttpHealthCheck{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -42285,18 +47051,18 @@ func (c *GlobalForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*Forwar } return ret, nil // { - // "description": "Returns the specified GlobalForwardingRule resource. Gets a list of available forwarding rules by making a list() request.", + // "description": "Returns the specified HttpHealthCheck resource. Gets a list of available HTTP health checks by making a list() request.", // "httpMethod": "GET", - // "id": "compute.globalForwardingRules.get", + // "id": "compute.httpHealthChecks.get", // "parameterOrder": [ // "project", - // "forwardingRule" + // "httpHealthCheck" // ], // "parameters": { - // "forwardingRule": { - // "description": "Name of the ForwardingRule resource to return.", + // "httpHealthCheck": { + // "description": "Name of the HttpHealthCheck resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -42308,9 +47074,9 @@ func (c *GlobalForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*Forwar // "type": "string" // } // }, - // "path": "{project}/global/forwardingRules/{forwardingRule}", + // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", // "response": { - // "$ref": "ForwardingRule" + // "$ref": "HttpHealthCheck" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -42321,24 +47087,24 @@ func (c *GlobalForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*Forwar } -// method id "compute.globalForwardingRules.insert": +// method id "compute.httpHealthChecks.insert": -type GlobalForwardingRulesInsertCall struct { - s *Service - project string - forwardingrule *ForwardingRule - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HttpHealthChecksInsertCall struct { + s *Service + project string + httphealthcheck *HttpHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a GlobalForwardingRule resource in the specified -// project using the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/insert -func (r *GlobalForwardingRulesService) Insert(project string, forwardingrule *ForwardingRule) *GlobalForwardingRulesInsertCall { - c := &GlobalForwardingRulesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a HttpHealthCheck resource in the specified project +// using the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/insert +func (r *HttpHealthChecksService) Insert(project string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksInsertCall { + c := &HttpHealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.forwardingrule = forwardingrule + c.httphealthcheck = httphealthcheck return c } @@ -42356,7 +47122,7 @@ func (r *GlobalForwardingRulesService) Insert(project string, forwardingrule *Fo // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *GlobalForwardingRulesInsertCall) RequestId(requestId string) *GlobalForwardingRulesInsertCall { +func (c *HttpHealthChecksInsertCall) RequestId(requestId string) *HttpHealthChecksInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -42364,7 +47130,7 @@ func (c *GlobalForwardingRulesInsertCall) RequestId(requestId string) *GlobalFor // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalForwardingRulesInsertCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesInsertCall { +func (c *HttpHealthChecksInsertCall) Fields(s ...googleapi.Field) *HttpHealthChecksInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -42372,35 +47138,35 @@ func (c *GlobalForwardingRulesInsertCall) Fields(s ...googleapi.Field) *GlobalFo // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalForwardingRulesInsertCall) Context(ctx context.Context) *GlobalForwardingRulesInsertCall { +func (c *HttpHealthChecksInsertCall) Context(ctx context.Context) *HttpHealthChecksInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalForwardingRulesInsertCall) Header() http.Header { +func (c *HttpHealthChecksInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalForwardingRulesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.forwardingrule) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -42413,14 +47179,14 @@ func (c *GlobalForwardingRulesInsertCall) doRequest(alt string) (*http.Response, return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalForwardingRules.insert" call. +// Do executes the "compute.httpHealthChecks.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *GlobalForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HttpHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -42451,9 +47217,9 @@ func (c *GlobalForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Creates a GlobalForwardingRule resource in the specified project using the data included in the request.", + // "description": "Creates a HttpHealthCheck resource in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.globalForwardingRules.insert", + // "id": "compute.httpHealthChecks.insert", // "parameterOrder": [ // "project" // ], @@ -42471,9 +47237,9 @@ func (c *GlobalForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Ope // "type": "string" // } // }, - // "path": "{project}/global/forwardingRules", + // "path": "{project}/global/httpHealthChecks", // "request": { - // "$ref": "ForwardingRule" + // "$ref": "HttpHealthCheck" // }, // "response": { // "$ref": "Operation" @@ -42486,9 +47252,9 @@ func (c *GlobalForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.globalForwardingRules.list": +// method id "compute.httpHealthChecks.list": -type GlobalForwardingRulesListCall struct { +type HttpHealthChecksListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -42497,11 +47263,11 @@ type GlobalForwardingRulesListCall struct { header_ http.Header } -// List: Retrieves a list of GlobalForwardingRule resources available to +// List: Retrieves the list of HttpHealthCheck resources available to // the specified project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/list -func (r *GlobalForwardingRulesService) List(project string) *GlobalForwardingRulesListCall { - c := &GlobalForwardingRulesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/list +func (r *HttpHealthChecksService) List(project string) *HttpHealthChecksListCall { + c := &HttpHealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -42528,7 +47294,7 @@ func (r *GlobalForwardingRulesService) List(project string) *GlobalForwardingRul // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *GlobalForwardingRulesListCall) Filter(filter string) *GlobalForwardingRulesListCall { +func (c *HttpHealthChecksListCall) Filter(filter string) *HttpHealthChecksListCall { c.urlParams_.Set("filter", filter) return c } @@ -42539,7 +47305,7 @@ func (c *GlobalForwardingRulesListCall) Filter(filter string) *GlobalForwardingR // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *GlobalForwardingRulesListCall) MaxResults(maxResults int64) *GlobalForwardingRulesListCall { +func (c *HttpHealthChecksListCall) MaxResults(maxResults int64) *HttpHealthChecksListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -42556,7 +47322,7 @@ func (c *GlobalForwardingRulesListCall) MaxResults(maxResults int64) *GlobalForw // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *GlobalForwardingRulesListCall) OrderBy(orderBy string) *GlobalForwardingRulesListCall { +func (c *HttpHealthChecksListCall) OrderBy(orderBy string) *HttpHealthChecksListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -42564,7 +47330,7 @@ func (c *GlobalForwardingRulesListCall) OrderBy(orderBy string) *GlobalForwardin // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *GlobalForwardingRulesListCall) PageToken(pageToken string) *GlobalForwardingRulesListCall { +func (c *HttpHealthChecksListCall) PageToken(pageToken string) *HttpHealthChecksListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -42572,7 +47338,7 @@ func (c *GlobalForwardingRulesListCall) PageToken(pageToken string) *GlobalForwa // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalForwardingRulesListCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesListCall { +func (c *HttpHealthChecksListCall) Fields(s ...googleapi.Field) *HttpHealthChecksListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -42582,7 +47348,7 @@ func (c *GlobalForwardingRulesListCall) Fields(s ...googleapi.Field) *GlobalForw // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *GlobalForwardingRulesListCall) IfNoneMatch(entityTag string) *GlobalForwardingRulesListCall { +func (c *HttpHealthChecksListCall) IfNoneMatch(entityTag string) *HttpHealthChecksListCall { c.ifNoneMatch_ = entityTag return c } @@ -42590,21 +47356,21 @@ func (c *GlobalForwardingRulesListCall) IfNoneMatch(entityTag string) *GlobalFor // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalForwardingRulesListCall) Context(ctx context.Context) *GlobalForwardingRulesListCall { +func (c *HttpHealthChecksListCall) Context(ctx context.Context) *HttpHealthChecksListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalForwardingRulesListCall) Header() http.Header { +func (c *HttpHealthChecksListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalForwardingRulesListCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpHealthChecksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -42616,7 +47382,7 @@ func (c *GlobalForwardingRulesListCall) doRequest(alt string) (*http.Response, e var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -42629,14 +47395,220 @@ func (c *GlobalForwardingRulesListCall) doRequest(alt string) (*http.Response, e return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalForwardingRules.list" call. -// Exactly one of *ForwardingRuleList or error will be non-nil. Any +// Do executes the "compute.httpHealthChecks.list" call. +// Exactly one of *HttpHealthCheckList or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *ForwardingRuleList.ServerResponse.Header or (if a response was +// *HttpHealthCheckList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *GlobalForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingRuleList, error) { +func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealthCheckList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &HttpHealthCheckList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of HttpHealthCheck resources available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.httpHealthChecks.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/httpHealthChecks", + // "response": { + // "$ref": "HttpHealthCheckList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *HttpHealthChecksListCall) Pages(ctx context.Context, f func(*HttpHealthCheckList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.httpHealthChecks.patch": + +type HttpHealthChecksPatchCall struct { + s *Service + project string + httpHealthCheck string + httphealthcheck *HttpHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates a HttpHealthCheck resource in the specified project +// using the data included in the request. This method supports PATCH +// semantics and uses the JSON merge patch format and processing rules. +// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/patch +func (r *HttpHealthChecksService) Patch(project string, httpHealthCheck string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksPatchCall { + c := &HttpHealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.httpHealthCheck = httpHealthCheck + c.httphealthcheck = httphealthcheck + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *HttpHealthChecksPatchCall) RequestId(requestId string) *HttpHealthChecksPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *HttpHealthChecksPatchCall) Fields(s ...googleapi.Field) *HttpHealthChecksPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *HttpHealthChecksPatchCall) Context(ctx context.Context) *HttpHealthChecksPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *HttpHealthChecksPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *HttpHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "httpHealthCheck": c.httpHealthCheck, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.httpHealthChecks.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *HttpHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -42655,7 +47627,7 @@ func (c *GlobalForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*Forwa if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ForwardingRuleList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -42667,34 +47639,19 @@ func (c *GlobalForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*Forwa } return ret, nil // { - // "description": "Retrieves a list of GlobalForwardingRule resources available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.globalForwardingRules.list", + // "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.httpHealthChecks.patch", // "parameterOrder": [ - // "project" + // "project", + // "httpHealthCheck" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "httpHealthCheck": { + // "description": "Name of the HttpHealthCheck resource to patch.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "project": { @@ -42703,62 +47660,48 @@ func (c *GlobalForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*Forwa // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/global/forwardingRules", + // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + // "request": { + // "$ref": "HttpHealthCheck" + // }, // "response": { - // "$ref": "ForwardingRuleList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *GlobalForwardingRulesListCall) Pages(ctx context.Context, f func(*ForwardingRuleList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.globalForwardingRules.setTarget": +// method id "compute.httpHealthChecks.update": -type GlobalForwardingRulesSetTargetCall struct { +type HttpHealthChecksUpdateCall struct { s *Service project string - forwardingRule string - targetreference *TargetReference + httpHealthCheck string + httphealthcheck *HttpHealthCheck urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// SetTarget: Changes target URL for the GlobalForwardingRule resource. -// The new target should be of the same type as the old target. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/setTarget -func (r *GlobalForwardingRulesService) SetTarget(project string, forwardingRule string, targetreference *TargetReference) *GlobalForwardingRulesSetTargetCall { - c := &GlobalForwardingRulesSetTargetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates a HttpHealthCheck resource in the specified project +// using the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/update +func (r *HttpHealthChecksService) Update(project string, httpHealthCheck string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksUpdateCall { + c := &HttpHealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.forwardingRule = forwardingRule - c.targetreference = targetreference + c.httpHealthCheck = httpHealthCheck + c.httphealthcheck = httphealthcheck return c } @@ -42776,7 +47719,7 @@ func (r *GlobalForwardingRulesService) SetTarget(project string, forwardingRule // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *GlobalForwardingRulesSetTargetCall) RequestId(requestId string) *GlobalForwardingRulesSetTargetCall { +func (c *HttpHealthChecksUpdateCall) RequestId(requestId string) *HttpHealthChecksUpdateCall { c.urlParams_.Set("requestId", requestId) return c } @@ -42784,7 +47727,7 @@ func (c *GlobalForwardingRulesSetTargetCall) RequestId(requestId string) *Global // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalForwardingRulesSetTargetCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesSetTargetCall { +func (c *HttpHealthChecksUpdateCall) Fields(s ...googleapi.Field) *HttpHealthChecksUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -42792,56 +47735,56 @@ func (c *GlobalForwardingRulesSetTargetCall) Fields(s ...googleapi.Field) *Globa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalForwardingRulesSetTargetCall) Context(ctx context.Context) *GlobalForwardingRulesSetTargetCall { +func (c *HttpHealthChecksUpdateCall) Context(ctx context.Context) *HttpHealthChecksUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalForwardingRulesSetTargetCall) Header() http.Header { +func (c *HttpHealthChecksUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalForwardingRulesSetTargetCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetreference) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}/setTarget") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "forwardingRule": c.forwardingRule, + "project": c.project, + "httpHealthCheck": c.httpHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalForwardingRules.setTarget" call. +// Do executes the "compute.httpHealthChecks.update" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *GlobalForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HttpHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -42872,18 +47815,18 @@ func (c *GlobalForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Changes target URL for the GlobalForwardingRule resource. The new target should be of the same type as the old target.", - // "httpMethod": "POST", - // "id": "compute.globalForwardingRules.setTarget", + // "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request.", + // "httpMethod": "PUT", + // "id": "compute.httpHealthChecks.update", // "parameterOrder": [ // "project", - // "forwardingRule" + // "httpHealthCheck" // ], // "parameters": { - // "forwardingRule": { - // "description": "Name of the ForwardingRule resource in which target is to be set.", + // "httpHealthCheck": { + // "description": "Name of the HttpHealthCheck resource to update.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -42900,9 +47843,9 @@ func (c *GlobalForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (* // "type": "string" // } // }, - // "path": "{project}/global/forwardingRules/{forwardingRule}/setTarget", + // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", // "request": { - // "$ref": "TargetReference" + // "$ref": "HttpHealthCheck" // }, // "response": { // "$ref": "Operation" @@ -42915,156 +47858,100 @@ func (c *GlobalForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (* } -// method id "compute.globalOperations.aggregatedList": +// method id "compute.httpsHealthChecks.delete": -type GlobalOperationsAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type HttpsHealthChecksDeleteCall struct { + s *Service + project string + httpsHealthCheck string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AggregatedList: Retrieves an aggregated list of all operations. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/aggregatedList -func (r *GlobalOperationsService) AggregatedList(project string) *GlobalOperationsAggregatedListCall { - c := &GlobalOperationsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified HttpsHealthCheck resource. +func (r *HttpsHealthChecksService) Delete(project string, httpsHealthCheck string) *HttpsHealthChecksDeleteCall { + c := &HttpsHealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.httpsHealthCheck = httpsHealthCheck return c } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *GlobalOperationsAggregatedListCall) Filter(filter string) *GlobalOperationsAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *GlobalOperationsAggregatedListCall) MaxResults(maxResults int64) *GlobalOperationsAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *GlobalOperationsAggregatedListCall) OrderBy(orderBy string) *GlobalOperationsAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *GlobalOperationsAggregatedListCall) PageToken(pageToken string) *GlobalOperationsAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *HttpsHealthChecksDeleteCall) RequestId(requestId string) *HttpsHealthChecksDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalOperationsAggregatedListCall) Fields(s ...googleapi.Field) *GlobalOperationsAggregatedListCall { +func (c *HttpsHealthChecksDeleteCall) Fields(s ...googleapi.Field) *HttpsHealthChecksDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *GlobalOperationsAggregatedListCall) IfNoneMatch(entityTag string) *GlobalOperationsAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalOperationsAggregatedListCall) Context(ctx context.Context) *GlobalOperationsAggregatedListCall { +func (c *HttpsHealthChecksDeleteCall) Context(ctx context.Context) *HttpsHealthChecksDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalOperationsAggregatedListCall) Header() http.Header { +func (c *HttpsHealthChecksDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalOperationsAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpsHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/operations") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "httpsHealthCheck": c.httpsHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalOperations.aggregatedList" call. -// Exactly one of *OperationAggregatedList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *OperationAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *GlobalOperationsAggregatedListCall) Do(opts ...googleapi.CallOption) (*OperationAggregatedList, error) { +// Do executes the "compute.httpsHealthChecks.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *HttpsHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -43083,7 +47970,7 @@ func (c *GlobalOperationsAggregatedListCall) Do(opts ...googleapi.CallOption) (* if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &OperationAggregatedList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -43095,34 +47982,19 @@ func (c *GlobalOperationsAggregatedListCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Retrieves an aggregated list of all operations.", - // "httpMethod": "GET", - // "id": "compute.globalOperations.aggregatedList", + // "description": "Deletes the specified HttpsHealthCheck resource.", + // "httpMethod": "DELETE", + // "id": "compute.httpsHealthChecks.delete", // "parameterOrder": [ - // "project" + // "project", + // "httpsHealthCheck" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "httpsHealthCheck": { + // "description": "Name of the HttpsHealthCheck resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "project": { @@ -43131,135 +48003,157 @@ func (c *GlobalOperationsAggregatedListCall) Do(opts ...googleapi.CallOption) (* // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/aggregated/operations", + // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", // "response": { - // "$ref": "OperationAggregatedList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *GlobalOperationsAggregatedListCall) Pages(ctx context.Context, f func(*OperationAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.globalOperations.delete": +// method id "compute.httpsHealthChecks.get": -type GlobalOperationsDeleteCall struct { - s *Service - project string - operation string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HttpsHealthChecksGetCall struct { + s *Service + project string + httpsHealthCheck string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified Operations resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/delete -func (r *GlobalOperationsService) Delete(project string, operation string) *GlobalOperationsDeleteCall { - c := &GlobalOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified HttpsHealthCheck resource. Gets a list of +// available HTTPS health checks by making a list() request. +func (r *HttpsHealthChecksService) Get(project string, httpsHealthCheck string) *HttpsHealthChecksGetCall { + c := &HttpsHealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.operation = operation + c.httpsHealthCheck = httpsHealthCheck return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalOperationsDeleteCall) Fields(s ...googleapi.Field) *GlobalOperationsDeleteCall { +func (c *HttpsHealthChecksGetCall) Fields(s ...googleapi.Field) *HttpsHealthChecksGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *HttpsHealthChecksGetCall) IfNoneMatch(entityTag string) *HttpsHealthChecksGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalOperationsDeleteCall) Context(ctx context.Context) *GlobalOperationsDeleteCall { +func (c *HttpsHealthChecksGetCall) Context(ctx context.Context) *HttpsHealthChecksGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalOperationsDeleteCall) Header() http.Header { +func (c *HttpsHealthChecksGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpsHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations/{operation}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "operation": c.operation, + "project": c.project, + "httpsHealthCheck": c.httpsHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalOperations.delete" call. -func (c *GlobalOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { +// Do executes the "compute.httpsHealthChecks.get" call. +// Exactly one of *HttpsHealthCheck or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *HttpsHealthCheck.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *HttpsHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpsHealthCheck, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } if err != nil { - return err + return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return nil, err } - return nil + ret := &HttpsHealthCheck{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil // { - // "description": "Deletes the specified Operations resource.", - // "httpMethod": "DELETE", - // "id": "compute.globalOperations.delete", + // "description": "Returns the specified HttpsHealthCheck resource. Gets a list of available HTTPS health checks by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.httpsHealthChecks.get", // "parameterOrder": [ // "project", - // "operation" + // "httpsHealthCheck" // ], // "parameters": { - // "operation": { - // "description": "Name of the Operations resource to delete.", + // "httpsHealthCheck": { + // "description": "Name of the HttpsHealthCheck resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -43271,106 +48165,118 @@ func (c *GlobalOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { // "type": "string" // } // }, - // "path": "{project}/global/operations/{operation}", + // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", + // "response": { + // "$ref": "HttpsHealthCheck" + // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.globalOperations.get": +// method id "compute.httpsHealthChecks.insert": -type GlobalOperationsGetCall struct { - s *Service - project string - operation string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type HttpsHealthChecksInsertCall struct { + s *Service + project string + httpshealthcheck *HttpsHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Retrieves the specified Operations resource. Gets a list of -// operations by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/get -func (r *GlobalOperationsService) Get(project string, operation string) *GlobalOperationsGetCall { - c := &GlobalOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a HttpsHealthCheck resource in the specified project +// using the data included in the request. +func (r *HttpsHealthChecksService) Insert(project string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksInsertCall { + c := &HttpsHealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.operation = operation + c.httpshealthcheck = httpshealthcheck + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *HttpsHealthChecksInsertCall) RequestId(requestId string) *HttpsHealthChecksInsertCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalOperationsGetCall) Fields(s ...googleapi.Field) *GlobalOperationsGetCall { +func (c *HttpsHealthChecksInsertCall) Fields(s ...googleapi.Field) *HttpsHealthChecksInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *GlobalOperationsGetCall) IfNoneMatch(entityTag string) *GlobalOperationsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalOperationsGetCall) Context(ctx context.Context) *GlobalOperationsGetCall { +func (c *HttpsHealthChecksInsertCall) Context(ctx context.Context) *HttpsHealthChecksInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalOperationsGetCall) Header() http.Header { +func (c *HttpsHealthChecksInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalOperationsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpsHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations/{operation}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "operation": c.operation, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalOperations.get" call. +// Do executes the "compute.httpsHealthChecks.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *GlobalOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HttpsHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -43401,45 +48307,44 @@ func (c *GlobalOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Retrieves the specified Operations resource. Gets a list of operations by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.globalOperations.get", + // "description": "Creates a HttpsHealthCheck resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.httpsHealthChecks.insert", // "parameterOrder": [ - // "project", - // "operation" + // "project" // ], // "parameters": { - // "operation": { - // "description": "Name of the Operations resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/global/operations/{operation}", + // "path": "{project}/global/httpsHealthChecks", + // "request": { + // "$ref": "HttpsHealthCheck" + // }, // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.globalOperations.list": +// method id "compute.httpsHealthChecks.list": -type GlobalOperationsListCall struct { +type HttpsHealthChecksListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -43448,11 +48353,10 @@ type GlobalOperationsListCall struct { header_ http.Header } -// List: Retrieves a list of Operation resources contained within the -// specified project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/list -func (r *GlobalOperationsService) List(project string) *GlobalOperationsListCall { - c := &GlobalOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of HttpsHealthCheck resources available to +// the specified project. +func (r *HttpsHealthChecksService) List(project string) *HttpsHealthChecksListCall { + c := &HttpsHealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -43479,7 +48383,7 @@ func (r *GlobalOperationsService) List(project string) *GlobalOperationsListCall // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *GlobalOperationsListCall) Filter(filter string) *GlobalOperationsListCall { +func (c *HttpsHealthChecksListCall) Filter(filter string) *HttpsHealthChecksListCall { c.urlParams_.Set("filter", filter) return c } @@ -43490,7 +48394,7 @@ func (c *GlobalOperationsListCall) Filter(filter string) *GlobalOperationsListCa // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *GlobalOperationsListCall) MaxResults(maxResults int64) *GlobalOperationsListCall { +func (c *HttpsHealthChecksListCall) MaxResults(maxResults int64) *HttpsHealthChecksListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -43507,7 +48411,7 @@ func (c *GlobalOperationsListCall) MaxResults(maxResults int64) *GlobalOperation // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *GlobalOperationsListCall) OrderBy(orderBy string) *GlobalOperationsListCall { +func (c *HttpsHealthChecksListCall) OrderBy(orderBy string) *HttpsHealthChecksListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -43515,7 +48419,7 @@ func (c *GlobalOperationsListCall) OrderBy(orderBy string) *GlobalOperationsList // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *GlobalOperationsListCall) PageToken(pageToken string) *GlobalOperationsListCall { +func (c *HttpsHealthChecksListCall) PageToken(pageToken string) *HttpsHealthChecksListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -43523,7 +48427,7 @@ func (c *GlobalOperationsListCall) PageToken(pageToken string) *GlobalOperations // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalOperationsListCall) Fields(s ...googleapi.Field) *GlobalOperationsListCall { +func (c *HttpsHealthChecksListCall) Fields(s ...googleapi.Field) *HttpsHealthChecksListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -43533,7 +48437,7 @@ func (c *GlobalOperationsListCall) Fields(s ...googleapi.Field) *GlobalOperation // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *GlobalOperationsListCall) IfNoneMatch(entityTag string) *GlobalOperationsListCall { +func (c *HttpsHealthChecksListCall) IfNoneMatch(entityTag string) *HttpsHealthChecksListCall { c.ifNoneMatch_ = entityTag return c } @@ -43541,21 +48445,21 @@ func (c *GlobalOperationsListCall) IfNoneMatch(entityTag string) *GlobalOperatio // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalOperationsListCall) Context(ctx context.Context) *GlobalOperationsListCall { +func (c *HttpsHealthChecksListCall) Context(ctx context.Context) *HttpsHealthChecksListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalOperationsListCall) Header() http.Header { +func (c *HttpsHealthChecksListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalOperationsListCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpsHealthChecksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -43567,7 +48471,7 @@ func (c *GlobalOperationsListCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -43580,14 +48484,14 @@ func (c *GlobalOperationsListCall) doRequest(alt string) (*http.Response, error) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalOperations.list" call. -// Exactly one of *OperationList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *OperationList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.httpsHealthChecks.list" call. +// Exactly one of *HttpsHealthCheckList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *HttpsHealthCheckList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *GlobalOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationList, error) { +func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHealthCheckList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -43606,7 +48510,7 @@ func (c *GlobalOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &OperationList{ + ret := &HttpsHealthCheckList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -43618,9 +48522,9 @@ func (c *GlobalOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL } return ret, nil // { - // "description": "Retrieves a list of Operation resources contained within the specified project.", + // "description": "Retrieves the list of HttpsHealthCheck resources available to the specified project.", // "httpMethod": "GET", - // "id": "compute.globalOperations.list", + // "id": "compute.httpsHealthChecks.list", // "parameterOrder": [ // "project" // ], @@ -43656,9 +48560,9 @@ func (c *GlobalOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL // "type": "string" // } // }, - // "path": "{project}/global/operations", + // "path": "{project}/global/httpsHealthChecks", // "response": { - // "$ref": "OperationList" + // "$ref": "HttpsHealthCheckList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -43672,7 +48576,7 @@ func (c *GlobalOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *GlobalOperationsListCall) Pages(ctx context.Context, f func(*OperationList) error) error { +func (c *HttpsHealthChecksListCall) Pages(ctx context.Context, f func(*HttpsHealthCheckList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -43690,22 +48594,26 @@ func (c *GlobalOperationsListCall) Pages(ctx context.Context, f func(*OperationL } } -// method id "compute.healthChecks.delete": +// method id "compute.httpsHealthChecks.patch": -type HealthChecksDeleteCall struct { - s *Service - project string - healthCheck string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HttpsHealthChecksPatchCall struct { + s *Service + project string + httpsHealthCheck string + httpshealthcheck *HttpsHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified HealthCheck resource. -func (r *HealthChecksService) Delete(project string, healthCheck string) *HealthChecksDeleteCall { - c := &HealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates a HttpsHealthCheck resource in the specified project +// using the data included in the request. This method supports PATCH +// semantics and uses the JSON merge patch format and processing rules. +func (r *HttpsHealthChecksService) Patch(project string, httpsHealthCheck string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksPatchCall { + c := &HttpsHealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.healthCheck = healthCheck + c.httpsHealthCheck = httpsHealthCheck + c.httpshealthcheck = httpshealthcheck return c } @@ -43723,7 +48631,7 @@ func (r *HealthChecksService) Delete(project string, healthCheck string) *Health // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *HealthChecksDeleteCall) RequestId(requestId string) *HealthChecksDeleteCall { +func (c *HttpsHealthChecksPatchCall) RequestId(requestId string) *HttpsHealthChecksPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -43731,7 +48639,7 @@ func (c *HealthChecksDeleteCall) RequestId(requestId string) *HealthChecksDelete // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HealthChecksDeleteCall) Fields(s ...googleapi.Field) *HealthChecksDeleteCall { +func (c *HttpsHealthChecksPatchCall) Fields(s ...googleapi.Field) *HttpsHealthChecksPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -43739,51 +48647,56 @@ func (c *HealthChecksDeleteCall) Fields(s ...googleapi.Field) *HealthChecksDelet // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HealthChecksDeleteCall) Context(ctx context.Context) *HealthChecksDeleteCall { +func (c *HttpsHealthChecksPatchCall) Context(ctx context.Context) *HttpsHealthChecksPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HealthChecksDeleteCall) Header() http.Header { +func (c *HttpsHealthChecksPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpsHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "healthCheck": c.healthCheck, + "project": c.project, + "httpsHealthCheck": c.httpsHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.healthChecks.delete" call. +// Do executes the "compute.httpsHealthChecks.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HttpsHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -43814,18 +48727,18 @@ func (c *HealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Deletes the specified HealthCheck resource.", - // "httpMethod": "DELETE", - // "id": "compute.healthChecks.delete", + // "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.httpsHealthChecks.patch", // "parameterOrder": [ // "project", - // "healthCheck" + // "httpsHealthCheck" // ], // "parameters": { - // "healthCheck": { - // "description": "Name of the HealthCheck resource to delete.", + // "httpsHealthCheck": { + // "description": "Name of the HttpsHealthCheck resource to patch.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -43842,7 +48755,10 @@ func (c *HealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, e // "type": "string" // } // }, - // "path": "{project}/global/healthChecks/{healthCheck}", + // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", + // "request": { + // "$ref": "HttpsHealthCheck" + // }, // "response": { // "$ref": "Operation" // }, @@ -43854,179 +48770,25 @@ func (c *HealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, e } -// method id "compute.healthChecks.get": - -type HealthChecksGetCall struct { - s *Service - project string - healthCheck string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified HealthCheck resource. Gets a list of -// available health checks by making a list() request. -func (r *HealthChecksService) Get(project string, healthCheck string) *HealthChecksGetCall { - c := &HealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.healthCheck = healthCheck - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *HealthChecksGetCall) Fields(s ...googleapi.Field) *HealthChecksGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *HealthChecksGetCall) IfNoneMatch(entityTag string) *HealthChecksGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *HealthChecksGetCall) Context(ctx context.Context) *HealthChecksGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *HealthChecksGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *HealthChecksGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "healthCheck": c.healthCheck, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.healthChecks.get" call. -// Exactly one of *HealthCheck or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *HealthCheck.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *HealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &HealthCheck{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified HealthCheck resource. Gets a list of available health checks by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.healthChecks.get", - // "parameterOrder": [ - // "project", - // "healthCheck" - // ], - // "parameters": { - // "healthCheck": { - // "description": "Name of the HealthCheck resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/healthChecks/{healthCheck}", - // "response": { - // "$ref": "HealthCheck" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.healthChecks.insert": +// method id "compute.httpsHealthChecks.update": -type HealthChecksInsertCall struct { - s *Service - project string - healthcheck *HealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HttpsHealthChecksUpdateCall struct { + s *Service + project string + httpsHealthCheck string + httpshealthcheck *HttpsHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a HealthCheck resource in the specified project using -// the data included in the request. -func (r *HealthChecksService) Insert(project string, healthcheck *HealthCheck) *HealthChecksInsertCall { - c := &HealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates a HttpsHealthCheck resource in the specified project +// using the data included in the request. +func (r *HttpsHealthChecksService) Update(project string, httpsHealthCheck string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksUpdateCall { + c := &HttpsHealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.healthcheck = healthcheck + c.httpsHealthCheck = httpsHealthCheck + c.httpshealthcheck = httpshealthcheck return c } @@ -44044,7 +48806,7 @@ func (r *HealthChecksService) Insert(project string, healthcheck *HealthCheck) * // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *HealthChecksInsertCall) RequestId(requestId string) *HealthChecksInsertCall { +func (c *HttpsHealthChecksUpdateCall) RequestId(requestId string) *HttpsHealthChecksUpdateCall { c.urlParams_.Set("requestId", requestId) return c } @@ -44052,7 +48814,7 @@ func (c *HealthChecksInsertCall) RequestId(requestId string) *HealthChecksInsert // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HealthChecksInsertCall) Fields(s ...googleapi.Field) *HealthChecksInsertCall { +func (c *HttpsHealthChecksUpdateCall) Fields(s ...googleapi.Field) *HttpsHealthChecksUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -44060,55 +48822,56 @@ func (c *HealthChecksInsertCall) Fields(s ...googleapi.Field) *HealthChecksInser // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HealthChecksInsertCall) Context(ctx context.Context) *HealthChecksInsertCall { +func (c *HttpsHealthChecksUpdateCall) Context(ctx context.Context) *HttpsHealthChecksUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HealthChecksInsertCall) Header() http.Header { +func (c *HttpsHealthChecksUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpsHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "httpsHealthCheck": c.httpsHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.healthChecks.insert" call. +// Do executes the "compute.httpsHealthChecks.update" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HttpsHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -44139,13 +48902,21 @@ func (c *HealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Creates a HealthCheck resource in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.healthChecks.insert", + // "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request.", + // "httpMethod": "PUT", + // "id": "compute.httpsHealthChecks.update", // "parameterOrder": [ - // "project" + // "project", + // "httpsHealthCheck" // ], // "parameters": { + // "httpsHealthCheck": { + // "description": "Name of the HttpsHealthCheck resource to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -44159,9 +48930,9 @@ func (c *HealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, e // "type": "string" // } // }, - // "path": "{project}/global/healthChecks", + // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", // "request": { - // "$ref": "HealthCheck" + // "$ref": "HttpsHealthCheck" // }, // "response": { // "$ref": "Operation" @@ -44174,156 +48945,101 @@ func (c *HealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, e } -// method id "compute.healthChecks.list": +// method id "compute.images.delete": -type HealthChecksListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type ImagesDeleteCall struct { + s *Service + project string + image string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves the list of HealthCheck resources available to the -// specified project. -func (r *HealthChecksService) List(project string) *HealthChecksListCall { - c := &HealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified image. +// For details, see https://cloud.google.com/compute/docs/reference/latest/images/delete +func (r *ImagesService) Delete(project string, image string) *ImagesDeleteCall { + c := &ImagesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.image = image return c } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *HealthChecksListCall) Filter(filter string) *HealthChecksListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *HealthChecksListCall) MaxResults(maxResults int64) *HealthChecksListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *HealthChecksListCall) OrderBy(orderBy string) *HealthChecksListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *HealthChecksListCall) PageToken(pageToken string) *HealthChecksListCall { - c.urlParams_.Set("pageToken", pageToken) +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ImagesDeleteCall) RequestId(requestId string) *ImagesDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HealthChecksListCall) Fields(s ...googleapi.Field) *HealthChecksListCall { +func (c *ImagesDeleteCall) Fields(s ...googleapi.Field) *ImagesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *HealthChecksListCall) IfNoneMatch(entityTag string) *HealthChecksListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HealthChecksListCall) Context(ctx context.Context) *HealthChecksListCall { +func (c *ImagesDeleteCall) Context(ctx context.Context) *ImagesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HealthChecksListCall) Header() http.Header { +func (c *ImagesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HealthChecksListCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "image": c.image, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.healthChecks.list" call. -// Exactly one of *HealthCheckList or error will be non-nil. Any non-2xx +// Do executes the "compute.images.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *HealthCheckList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckList, error) { +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -44342,7 +49058,7 @@ func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckLis if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &HealthCheckList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -44354,34 +49070,19 @@ func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckLis } return ret, nil // { - // "description": "Retrieves the list of HealthCheck resources available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.healthChecks.list", + // "description": "Deletes the specified image.", + // "httpMethod": "DELETE", + // "id": "compute.images.delete", // "parameterOrder": [ - // "project" + // "project", + // "image" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "image": { + // "description": "Name of the image resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "project": { @@ -44390,62 +49091,47 @@ func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckLis // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/global/healthChecks", + // "path": "{project}/global/images/{image}", // "response": { - // "$ref": "HealthCheckList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *HealthChecksListCall) Pages(ctx context.Context, f func(*HealthCheckList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.healthChecks.patch": +// method id "compute.images.deprecate": -type HealthChecksPatchCall struct { - s *Service - project string - healthCheck string - healthcheck *HealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ImagesDeprecateCall struct { + s *Service + project string + image string + deprecationstatus *DeprecationStatus + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates a HealthCheck resource in the specified project using -// the data included in the request. This method supports PATCH -// semantics and uses the JSON merge patch format and processing rules. -func (r *HealthChecksService) Patch(project string, healthCheck string, healthcheck *HealthCheck) *HealthChecksPatchCall { - c := &HealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Deprecate: Sets the deprecation status of an image. +// +// If an empty request body is given, clears the deprecation status +// instead. +// For details, see https://cloud.google.com/compute/docs/reference/latest/images/deprecate +func (r *ImagesService) Deprecate(project string, image string, deprecationstatus *DeprecationStatus) *ImagesDeprecateCall { + c := &ImagesDeprecateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.healthCheck = healthCheck - c.healthcheck = healthcheck + c.image = image + c.deprecationstatus = deprecationstatus return c } @@ -44463,7 +49149,7 @@ func (r *HealthChecksService) Patch(project string, healthCheck string, healthch // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *HealthChecksPatchCall) RequestId(requestId string) *HealthChecksPatchCall { +func (c *ImagesDeprecateCall) RequestId(requestId string) *ImagesDeprecateCall { c.urlParams_.Set("requestId", requestId) return c } @@ -44471,7 +49157,7 @@ func (c *HealthChecksPatchCall) RequestId(requestId string) *HealthChecksPatchCa // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HealthChecksPatchCall) Fields(s ...googleapi.Field) *HealthChecksPatchCall { +func (c *ImagesDeprecateCall) Fields(s ...googleapi.Field) *ImagesDeprecateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -44479,56 +49165,56 @@ func (c *HealthChecksPatchCall) Fields(s ...googleapi.Field) *HealthChecksPatchC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HealthChecksPatchCall) Context(ctx context.Context) *HealthChecksPatchCall { +func (c *ImagesDeprecateCall) Context(ctx context.Context) *ImagesDeprecateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HealthChecksPatchCall) Header() http.Header { +func (c *ImagesDeprecateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesDeprecateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.deprecationstatus) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}/deprecate") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "healthCheck": c.healthCheck, + "project": c.project, + "image": c.image, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.healthChecks.patch" call. +// Do executes the "compute.images.deprecate" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ImagesDeprecateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -44559,18 +49245,18 @@ func (c *HealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Updates a HealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.healthChecks.patch", + // "description": "Sets the deprecation status of an image.\n\nIf an empty request body is given, clears the deprecation status instead.", + // "httpMethod": "POST", + // "id": "compute.images.deprecate", // "parameterOrder": [ // "project", - // "healthCheck" + // "image" // ], // "parameters": { - // "healthCheck": { - // "description": "Name of the HealthCheck resource to patch.", + // "image": { + // "description": "Image name.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -44587,9 +49273,9 @@ func (c *HealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, er // "type": "string" // } // }, - // "path": "{project}/global/healthChecks/{healthCheck}", + // "path": "{project}/global/images/{image}/deprecate", // "request": { - // "$ref": "HealthCheck" + // "$ref": "DeprecationStatus" // }, // "response": { // "$ref": "Operation" @@ -44602,108 +49288,97 @@ func (c *HealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, er } -// method id "compute.healthChecks.update": +// method id "compute.images.get": -type HealthChecksUpdateCall struct { - s *Service - project string - healthCheck string - healthcheck *HealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ImagesGetCall struct { + s *Service + project string + image string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Update: Updates a HealthCheck resource in the specified project using -// the data included in the request. -func (r *HealthChecksService) Update(project string, healthCheck string, healthcheck *HealthCheck) *HealthChecksUpdateCall { - c := &HealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified image. Gets a list of available images by +// making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/images/get +func (r *ImagesService) Get(project string, image string) *ImagesGetCall { + c := &ImagesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.healthCheck = healthCheck - c.healthcheck = healthcheck - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *HealthChecksUpdateCall) RequestId(requestId string) *HealthChecksUpdateCall { - c.urlParams_.Set("requestId", requestId) + c.image = image return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HealthChecksUpdateCall) Fields(s ...googleapi.Field) *HealthChecksUpdateCall { +func (c *ImagesGetCall) Fields(s ...googleapi.Field) *ImagesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ImagesGetCall) IfNoneMatch(entityTag string) *ImagesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HealthChecksUpdateCall) Context(ctx context.Context) *HealthChecksUpdateCall { +func (c *ImagesGetCall) Context(ctx context.Context) *ImagesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HealthChecksUpdateCall) Header() http.Header { +func (c *ImagesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "healthCheck": c.healthCheck, + "project": c.project, + "image": c.image, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.healthChecks.update" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *HealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.images.get" call. +// Exactly one of *Image or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Image.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ImagesGetCall) Do(opts ...googleapi.CallOption) (*Image, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -44722,7 +49397,7 @@ func (c *HealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, e if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Image{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -44734,18 +49409,18 @@ func (c *HealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Updates a HealthCheck resource in the specified project using the data included in the request.", - // "httpMethod": "PUT", - // "id": "compute.healthChecks.update", + // "description": "Returns the specified image. Gets a list of available images by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.images.get", // "parameterOrder": [ // "project", - // "healthCheck" + // "image" // ], // "parameters": { - // "healthCheck": { - // "description": "Name of the HealthCheck resource to update.", + // "image": { + // "description": "Name of the image resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -44755,123 +49430,111 @@ func (c *HealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, e // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/global/healthChecks/{healthCheck}", - // "request": { - // "$ref": "HealthCheck" - // }, + // "path": "{project}/global/images/{image}", // "response": { - // "$ref": "Operation" + // "$ref": "Image" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.httpHealthChecks.delete": +// method id "compute.images.getFromFamily": -type HttpHealthChecksDeleteCall struct { - s *Service - project string - httpHealthCheck string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ImagesGetFromFamilyCall struct { + s *Service + project string + family string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified HttpHealthCheck resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/delete -func (r *HttpHealthChecksService) Delete(project string, httpHealthCheck string) *HttpHealthChecksDeleteCall { - c := &HttpHealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetFromFamily: Returns the latest image that is part of an image +// family and is not deprecated. +func (r *ImagesService) GetFromFamily(project string, family string) *ImagesGetFromFamilyCall { + c := &ImagesGetFromFamilyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpHealthCheck = httpHealthCheck - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *HttpHealthChecksDeleteCall) RequestId(requestId string) *HttpHealthChecksDeleteCall { - c.urlParams_.Set("requestId", requestId) + c.family = family return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpHealthChecksDeleteCall) Fields(s ...googleapi.Field) *HttpHealthChecksDeleteCall { +func (c *ImagesGetFromFamilyCall) Fields(s ...googleapi.Field) *ImagesGetFromFamilyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ImagesGetFromFamilyCall) IfNoneMatch(entityTag string) *ImagesGetFromFamilyCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpHealthChecksDeleteCall) Context(ctx context.Context) *HttpHealthChecksDeleteCall { +func (c *ImagesGetFromFamilyCall) Context(ctx context.Context) *ImagesGetFromFamilyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpHealthChecksDeleteCall) Header() http.Header { +func (c *ImagesGetFromFamilyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesGetFromFamilyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/family/{family}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpHealthCheck": c.httpHealthCheck, + "project": c.project, + "family": c.family, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpHealthChecks.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *HttpHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.images.getFromFamily" call. +// Exactly one of *Image or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Image.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ImagesGetFromFamilyCall) Do(opts ...googleapi.CallOption) (*Image, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -44890,7 +49553,7 @@ func (c *HttpHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Image{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -44902,18 +49565,18 @@ func (c *HttpHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Deletes the specified HttpHealthCheck resource.", - // "httpMethod": "DELETE", - // "id": "compute.httpHealthChecks.delete", + // "description": "Returns the latest image that is part of an image family and is not deprecated.", + // "httpMethod": "GET", + // "id": "compute.images.getFromFamily", // "parameterOrder": [ // "project", - // "httpHealthCheck" + // "family" // ], // "parameters": { - // "httpHealthCheck": { - // "description": "Name of the HttpHealthCheck resource to delete.", + // "family": { + // "description": "Name of the image family to search for.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -44923,51 +49586,46 @@ func (c *HttpHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + // "path": "{project}/global/images/family/{family}", // "response": { - // "$ref": "Operation" + // "$ref": "Image" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.httpHealthChecks.get": +// method id "compute.images.getIamPolicy": -type HttpHealthChecksGetCall struct { - s *Service - project string - httpHealthCheck string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type ImagesGetIamPolicyCall struct { + s *Service + project string + resource string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified HttpHealthCheck resource. Gets a list of -// available HTTP health checks by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/get -func (r *HttpHealthChecksService) Get(project string, httpHealthCheck string) *HttpHealthChecksGetCall { - c := &HttpHealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. +func (r *ImagesService) GetIamPolicy(project string, resource string) *ImagesGetIamPolicyCall { + c := &ImagesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpHealthCheck = httpHealthCheck + c.resource = resource return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpHealthChecksGetCall) Fields(s ...googleapi.Field) *HttpHealthChecksGetCall { +func (c *ImagesGetIamPolicyCall) Fields(s ...googleapi.Field) *ImagesGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -44977,7 +49635,7 @@ func (c *HttpHealthChecksGetCall) Fields(s ...googleapi.Field) *HttpHealthChecks // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *HttpHealthChecksGetCall) IfNoneMatch(entityTag string) *HttpHealthChecksGetCall { +func (c *ImagesGetIamPolicyCall) IfNoneMatch(entityTag string) *ImagesGetIamPolicyCall { c.ifNoneMatch_ = entityTag return c } @@ -44985,21 +49643,21 @@ func (c *HttpHealthChecksGetCall) IfNoneMatch(entityTag string) *HttpHealthCheck // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpHealthChecksGetCall) Context(ctx context.Context) *HttpHealthChecksGetCall { +func (c *ImagesGetIamPolicyCall) Context(ctx context.Context) *ImagesGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpHealthChecksGetCall) Header() http.Header { +func (c *ImagesGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -45011,7 +49669,7 @@ func (c *HttpHealthChecksGetCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -45019,20 +49677,20 @@ func (c *HttpHealthChecksGetCall) doRequest(alt string) (*http.Response, error) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpHealthCheck": c.httpHealthCheck, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpHealthChecks.get" call. -// Exactly one of *HttpHealthCheck or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *HttpHealthCheck.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *HttpHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpHealthCheck, error) { +// Do executes the "compute.images.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ImagesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -45051,7 +49709,7 @@ func (c *HttpHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpHealthC if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &HttpHealthCheck{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -45063,32 +49721,32 @@ func (c *HttpHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpHealthC } return ret, nil // { - // "description": "Returns the specified HttpHealthCheck resource. Gets a list of available HTTP health checks by making a list() request.", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", // "httpMethod": "GET", - // "id": "compute.httpHealthChecks.get", + // "id": "compute.images.getIamPolicy", // "parameterOrder": [ // "project", - // "httpHealthCheck" + // "resource" // ], // "parameters": { - // "httpHealthCheck": { - // "description": "Name of the HttpHealthCheck resource to return.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + // "path": "{project}/global/images/{resource}/getIamPolicy", // "response": { - // "$ref": "HttpHealthCheck" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -45099,24 +49757,31 @@ func (c *HttpHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpHealthC } -// method id "compute.httpHealthChecks.insert": +// method id "compute.images.insert": -type HttpHealthChecksInsertCall struct { - s *Service - project string - httphealthcheck *HttpHealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ImagesInsertCall struct { + s *Service + project string + image *Image + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a HttpHealthCheck resource in the specified project -// using the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/insert -func (r *HttpHealthChecksService) Insert(project string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksInsertCall { - c := &HttpHealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates an image in the specified project using the data +// included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/images/insert +func (r *ImagesService) Insert(project string, image *Image) *ImagesInsertCall { + c := &ImagesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httphealthcheck = httphealthcheck + c.image = image + return c +} + +// ForceCreate sets the optional parameter "forceCreate": Force image +// creation if true. +func (c *ImagesInsertCall) ForceCreate(forceCreate bool) *ImagesInsertCall { + c.urlParams_.Set("forceCreate", fmt.Sprint(forceCreate)) return c } @@ -45134,7 +49799,7 @@ func (r *HttpHealthChecksService) Insert(project string, httphealthcheck *HttpHe // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *HttpHealthChecksInsertCall) RequestId(requestId string) *HttpHealthChecksInsertCall { +func (c *ImagesInsertCall) RequestId(requestId string) *ImagesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -45142,7 +49807,7 @@ func (c *HttpHealthChecksInsertCall) RequestId(requestId string) *HttpHealthChec // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpHealthChecksInsertCall) Fields(s ...googleapi.Field) *HttpHealthChecksInsertCall { +func (c *ImagesInsertCall) Fields(s ...googleapi.Field) *ImagesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -45150,35 +49815,35 @@ func (c *HttpHealthChecksInsertCall) Fields(s ...googleapi.Field) *HttpHealthChe // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpHealthChecksInsertCall) Context(ctx context.Context) *HttpHealthChecksInsertCall { +func (c *ImagesInsertCall) Context(ctx context.Context) *ImagesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpHealthChecksInsertCall) Header() http.Header { +func (c *ImagesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.image) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -45191,14 +49856,14 @@ func (c *HttpHealthChecksInsertCall) doRequest(alt string) (*http.Response, erro return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpHealthChecks.insert" call. +// Do executes the "compute.images.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HttpHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -45229,13 +49894,18 @@ func (c *HttpHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Creates a HttpHealthCheck resource in the specified project using the data included in the request.", + // "description": "Creates an image in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.httpHealthChecks.insert", + // "id": "compute.images.insert", // "parameterOrder": [ // "project" // ], // "parameters": { + // "forceCreate": { + // "description": "Force image creation if true.", + // "location": "query", + // "type": "boolean" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -45249,24 +49919,27 @@ func (c *HttpHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operatio // "type": "string" // } // }, - // "path": "{project}/global/httpHealthChecks", + // "path": "{project}/global/images", // "request": { - // "$ref": "HttpHealthCheck" + // "$ref": "Image" // }, // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } -// method id "compute.httpHealthChecks.list": +// method id "compute.images.list": -type HttpHealthChecksListCall struct { +type ImagesListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -45275,11 +49948,16 @@ type HttpHealthChecksListCall struct { header_ http.Header } -// List: Retrieves the list of HttpHealthCheck resources available to -// the specified project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/list -func (r *HttpHealthChecksService) List(project string) *HttpHealthChecksListCall { - c := &HttpHealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of custom images available to the specified +// project. Custom images are images you create that belong to your +// project. This method does not get any images that belong to other +// projects, including publicly-available images, like Debian 8. If you +// want to get a list of publicly-available images, use this method to +// make a request to the respective image project, such as debian-cloud +// or windows-cloud. +// For details, see https://cloud.google.com/compute/docs/reference/latest/images/list +func (r *ImagesService) List(project string) *ImagesListCall { + c := &ImagesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -45306,7 +49984,7 @@ func (r *HttpHealthChecksService) List(project string) *HttpHealthChecksListCall // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *HttpHealthChecksListCall) Filter(filter string) *HttpHealthChecksListCall { +func (c *ImagesListCall) Filter(filter string) *ImagesListCall { c.urlParams_.Set("filter", filter) return c } @@ -45317,7 +49995,7 @@ func (c *HttpHealthChecksListCall) Filter(filter string) *HttpHealthChecksListCa // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *HttpHealthChecksListCall) MaxResults(maxResults int64) *HttpHealthChecksListCall { +func (c *ImagesListCall) MaxResults(maxResults int64) *ImagesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -45334,7 +50012,7 @@ func (c *HttpHealthChecksListCall) MaxResults(maxResults int64) *HttpHealthCheck // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *HttpHealthChecksListCall) OrderBy(orderBy string) *HttpHealthChecksListCall { +func (c *ImagesListCall) OrderBy(orderBy string) *ImagesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -45342,7 +50020,7 @@ func (c *HttpHealthChecksListCall) OrderBy(orderBy string) *HttpHealthChecksList // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *HttpHealthChecksListCall) PageToken(pageToken string) *HttpHealthChecksListCall { +func (c *ImagesListCall) PageToken(pageToken string) *ImagesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -45350,7 +50028,7 @@ func (c *HttpHealthChecksListCall) PageToken(pageToken string) *HttpHealthChecks // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpHealthChecksListCall) Fields(s ...googleapi.Field) *HttpHealthChecksListCall { +func (c *ImagesListCall) Fields(s ...googleapi.Field) *ImagesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -45360,7 +50038,7 @@ func (c *HttpHealthChecksListCall) Fields(s ...googleapi.Field) *HttpHealthCheck // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *HttpHealthChecksListCall) IfNoneMatch(entityTag string) *HttpHealthChecksListCall { +func (c *ImagesListCall) IfNoneMatch(entityTag string) *ImagesListCall { c.ifNoneMatch_ = entityTag return c } @@ -45368,21 +50046,21 @@ func (c *HttpHealthChecksListCall) IfNoneMatch(entityTag string) *HttpHealthChec // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpHealthChecksListCall) Context(ctx context.Context) *HttpHealthChecksListCall { +func (c *ImagesListCall) Context(ctx context.Context) *ImagesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpHealthChecksListCall) Header() http.Header { +func (c *ImagesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpHealthChecksListCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -45394,7 +50072,7 @@ func (c *HttpHealthChecksListCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -45407,14 +50085,14 @@ func (c *HttpHealthChecksListCall) doRequest(alt string) (*http.Response, error) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpHealthChecks.list" call. -// Exactly one of *HttpHealthCheckList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *HttpHealthCheckList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealthCheckList, error) { +// Do executes the "compute.images.list" call. +// Exactly one of *ImageList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ImageList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -45433,7 +50111,7 @@ func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealth if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &HttpHealthCheckList{ + ret := &ImageList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -45445,9 +50123,9 @@ func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealth } return ret, nil // { - // "description": "Retrieves the list of HttpHealthCheck resources available to the specified project.", + // "description": "Retrieves the list of custom images available to the specified project. Custom images are images you create that belong to your project. This method does not get any images that belong to other projects, including publicly-available images, like Debian 8. If you want to get a list of publicly-available images, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.", // "httpMethod": "GET", - // "id": "compute.httpHealthChecks.list", + // "id": "compute.images.list", // "parameterOrder": [ // "project" // ], @@ -45483,9 +50161,9 @@ func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealth // "type": "string" // } // }, - // "path": "{project}/global/httpHealthChecks", + // "path": "{project}/global/images", // "response": { - // "$ref": "HttpHealthCheckList" + // "$ref": "ImageList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -45499,7 +50177,7 @@ func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealth // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *HttpHealthChecksListCall) Pages(ctx context.Context, f func(*HttpHealthCheckList) error) error { +func (c *ImagesListCall) Pages(ctx context.Context, f func(*ImageList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -45517,53 +50195,32 @@ func (c *HttpHealthChecksListCall) Pages(ctx context.Context, f func(*HttpHealth } } -// method id "compute.httpHealthChecks.patch": +// method id "compute.images.setIamPolicy": -type HttpHealthChecksPatchCall struct { - s *Service - project string - httpHealthCheck string - httphealthcheck *HttpHealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ImagesSetIamPolicyCall struct { + s *Service + project string + resource string + globalsetpolicyrequest *GlobalSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates a HttpHealthCheck resource in the specified project -// using the data included in the request. This method supports PATCH -// semantics and uses the JSON merge patch format and processing rules. -// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/patch -func (r *HttpHealthChecksService) Patch(project string, httpHealthCheck string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksPatchCall { - c := &HttpHealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +func (r *ImagesService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *ImagesSetIamPolicyCall { + c := &ImagesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpHealthCheck = httpHealthCheck - c.httphealthcheck = httphealthcheck - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *HttpHealthChecksPatchCall) RequestId(requestId string) *HttpHealthChecksPatchCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.globalsetpolicyrequest = globalsetpolicyrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpHealthChecksPatchCall) Fields(s ...googleapi.Field) *HttpHealthChecksPatchCall { +func (c *ImagesSetIamPolicyCall) Fields(s ...googleapi.Field) *ImagesSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -45571,56 +50228,56 @@ func (c *HttpHealthChecksPatchCall) Fields(s ...googleapi.Field) *HttpHealthChec // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpHealthChecksPatchCall) Context(ctx context.Context) *HttpHealthChecksPatchCall { +func (c *ImagesSetIamPolicyCall) Context(ctx context.Context) *ImagesSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpHealthChecksPatchCall) Header() http.Header { +func (c *ImagesSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetpolicyrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpHealthCheck": c.httpHealthCheck, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpHealthChecks.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *HttpHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.images.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -45639,7 +50296,7 @@ func (c *HttpHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -45651,21 +50308,14 @@ func (c *HttpHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.httpHealthChecks.patch", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "httpMethod": "POST", + // "id": "compute.images.setIamPolicy", // "parameterOrder": [ // "project", - // "httpHealthCheck" + // "resource" // ], // "parameters": { - // "httpHealthCheck": { - // "description": "Name of the HttpHealthCheck resource to patch.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -45673,18 +50323,20 @@ func (c *HttpHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + // "path": "{project}/global/images/{resource}/setIamPolicy", // "request": { - // "$ref": "HttpHealthCheck" + // "$ref": "GlobalSetPolicyRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -45694,52 +50346,32 @@ func (c *HttpHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.httpHealthChecks.update": +// method id "compute.images.setLabels": -type HttpHealthChecksUpdateCall struct { - s *Service - project string - httpHealthCheck string - httphealthcheck *HttpHealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ImagesSetLabelsCall struct { + s *Service + project string + resource string + globalsetlabelsrequest *GlobalSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Update: Updates a HttpHealthCheck resource in the specified project -// using the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/update -func (r *HttpHealthChecksService) Update(project string, httpHealthCheck string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksUpdateCall { - c := &HttpHealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetLabels: Sets the labels on an image. To learn more about labels, +// read the Labeling Resources documentation. +func (r *ImagesService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *ImagesSetLabelsCall { + c := &ImagesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpHealthCheck = httpHealthCheck - c.httphealthcheck = httphealthcheck - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *HttpHealthChecksUpdateCall) RequestId(requestId string) *HttpHealthChecksUpdateCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.globalsetlabelsrequest = globalsetlabelsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpHealthChecksUpdateCall) Fields(s ...googleapi.Field) *HttpHealthChecksUpdateCall { +func (c *ImagesSetLabelsCall) Fields(s ...googleapi.Field) *ImagesSetLabelsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -45747,223 +50379,56 @@ func (c *HttpHealthChecksUpdateCall) Fields(s ...googleapi.Field) *HttpHealthChe // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpHealthChecksUpdateCall) Context(ctx context.Context) *HttpHealthChecksUpdateCall { +func (c *ImagesSetLabelsCall) Context(ctx context.Context) *ImagesSetLabelsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpHealthChecksUpdateCall) Header() http.Header { +func (c *ImagesSetLabelsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpHealthCheck": c.httpHealthCheck, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.httpHealthChecks.update" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *HttpHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request.", - // "httpMethod": "PUT", - // "id": "compute.httpHealthChecks.update", - // "parameterOrder": [ - // "project", - // "httpHealthCheck" - // ], - // "parameters": { - // "httpHealthCheck": { - // "description": "Name of the HttpHealthCheck resource to update.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", - // "request": { - // "$ref": "HttpHealthCheck" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.httpsHealthChecks.delete": - -type HttpsHealthChecksDeleteCall struct { - s *Service - project string - httpsHealthCheck string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes the specified HttpsHealthCheck resource. -func (r *HttpsHealthChecksService) Delete(project string, httpsHealthCheck string) *HttpsHealthChecksDeleteCall { - c := &HttpsHealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.httpsHealthCheck = httpsHealthCheck - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *HttpsHealthChecksDeleteCall) RequestId(requestId string) *HttpsHealthChecksDeleteCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *HttpsHealthChecksDeleteCall) Fields(s ...googleapi.Field) *HttpsHealthChecksDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *HttpsHealthChecksDeleteCall) Context(ctx context.Context) *HttpsHealthChecksDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *HttpsHealthChecksDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *HttpsHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpsHealthCheck": c.httpsHealthCheck, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpsHealthChecks.delete" call. +// Do executes the "compute.images.setLabels" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HttpsHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ImagesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -45994,21 +50459,14 @@ func (c *HttpsHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Deletes the specified HttpsHealthCheck resource.", - // "httpMethod": "DELETE", - // "id": "compute.httpsHealthChecks.delete", + // "description": "Sets the labels on an image. To learn more about labels, read the Labeling Resources documentation.", + // "httpMethod": "POST", + // "id": "compute.images.setLabels", // "parameterOrder": [ // "project", - // "httpsHealthCheck" + // "resource" // ], // "parameters": { - // "httpsHealthCheck": { - // "description": "Name of the HttpsHealthCheck resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -46016,13 +50474,18 @@ func (c *HttpsHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operati // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", + // "path": "{project}/global/images/{resource}/setLabels", + // "request": { + // "$ref": "GlobalSetLabelsRequest" + // }, // "response": { // "$ref": "Operation" // }, @@ -46034,96 +50497,89 @@ func (c *HttpsHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.httpsHealthChecks.get": +// method id "compute.images.testIamPermissions": -type HttpsHealthChecksGetCall struct { - s *Service - project string - httpsHealthCheck string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type ImagesTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified HttpsHealthCheck resource. Gets a list of -// available HTTPS health checks by making a list() request. -func (r *HttpsHealthChecksService) Get(project string, httpsHealthCheck string) *HttpsHealthChecksGetCall { - c := &HttpsHealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *ImagesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *ImagesTestIamPermissionsCall { + c := &ImagesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpsHealthCheck = httpsHealthCheck + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpsHealthChecksGetCall) Fields(s ...googleapi.Field) *HttpsHealthChecksGetCall { +func (c *ImagesTestIamPermissionsCall) Fields(s ...googleapi.Field) *ImagesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *HttpsHealthChecksGetCall) IfNoneMatch(entityTag string) *HttpsHealthChecksGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpsHealthChecksGetCall) Context(ctx context.Context) *HttpsHealthChecksGetCall { +func (c *ImagesTestIamPermissionsCall) Context(ctx context.Context) *ImagesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpsHealthChecksGetCall) Header() http.Header { +func (c *ImagesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpsHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpsHealthCheck": c.httpsHealthCheck, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpsHealthChecks.get" call. -// Exactly one of *HttpsHealthCheck or error will be non-nil. Any +// Do executes the "compute.images.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *HttpsHealthCheck.ServerResponse.Header or (if a response was +// *TestPermissionsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *HttpsHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpsHealthCheck, error) { +func (c *ImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -46142,7 +50598,7 @@ func (c *HttpsHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpsHealt if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &HttpsHealthCheck{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -46154,32 +50610,35 @@ func (c *HttpsHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpsHealt } return ret, nil // { - // "description": "Returns the specified HttpsHealthCheck resource. Gets a list of available HTTPS health checks by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.httpsHealthChecks.get", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.images.testIamPermissions", // "parameterOrder": [ // "project", - // "httpsHealthCheck" + // "resource" // ], // "parameters": { - // "httpsHealthCheck": { - // "description": "Name of the HttpsHealthCheck resource to return.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", + // "path": "{project}/global/images/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "HttpsHealthCheck" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -46190,23 +50649,42 @@ func (c *HttpsHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpsHealt } -// method id "compute.httpsHealthChecks.insert": +// method id "compute.instanceGroupManagers.abandonInstances": -type HttpsHealthChecksInsertCall struct { - s *Service - project string - httpshealthcheck *HttpsHealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersAbandonInstancesCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagersabandoninstancesrequest *InstanceGroupManagersAbandonInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a HttpsHealthCheck resource in the specified project -// using the data included in the request. -func (r *HttpsHealthChecksService) Insert(project string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksInsertCall { - c := &HttpsHealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AbandonInstances: Flags the specified instances to be removed from +// the managed instance group. Abandoning an instance does not delete +// the instance, but it does remove the instance from any target pools +// that are applied by the managed instance group. This method reduces +// the targetSize of the managed instance group by the number of +// instances that you abandon. This operation is marked as DONE when the +// action is scheduled even if the instances have not yet been removed +// from the group. You must separately verify the status of the +// abandoning action with the listmanagedinstances method. +// +// If the group is part of a backend service that has enabled connection +// draining, it can take up to 60 seconds after the connection draining +// duration has elapsed before the VM instance is removed or +// deleted. +// +// You can specify a maximum of 1000 instances with this method per +// request. +func (r *InstanceGroupManagersService) AbandonInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersabandoninstancesrequest *InstanceGroupManagersAbandonInstancesRequest) *InstanceGroupManagersAbandonInstancesCall { + c := &InstanceGroupManagersAbandonInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpshealthcheck = httpshealthcheck + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagersabandoninstancesrequest = instancegroupmanagersabandoninstancesrequest return c } @@ -46224,7 +50702,7 @@ func (r *HttpsHealthChecksService) Insert(project string, httpshealthcheck *Http // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *HttpsHealthChecksInsertCall) RequestId(requestId string) *HttpsHealthChecksInsertCall { +func (c *InstanceGroupManagersAbandonInstancesCall) RequestId(requestId string) *InstanceGroupManagersAbandonInstancesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -46232,7 +50710,7 @@ func (c *HttpsHealthChecksInsertCall) RequestId(requestId string) *HttpsHealthCh // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpsHealthChecksInsertCall) Fields(s ...googleapi.Field) *HttpsHealthChecksInsertCall { +func (c *InstanceGroupManagersAbandonInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersAbandonInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -46240,35 +50718,35 @@ func (c *HttpsHealthChecksInsertCall) Fields(s ...googleapi.Field) *HttpsHealthC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpsHealthChecksInsertCall) Context(ctx context.Context) *HttpsHealthChecksInsertCall { +func (c *InstanceGroupManagersAbandonInstancesCall) Context(ctx context.Context) *InstanceGroupManagersAbandonInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpsHealthChecksInsertCall) Header() http.Header { +func (c *InstanceGroupManagersAbandonInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpsHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersAbandonInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersabandoninstancesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/abandonInstances") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -46276,19 +50754,21 @@ func (c *HttpsHealthChecksInsertCall) doRequest(alt string) (*http.Response, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpsHealthChecks.insert" call. +// Do executes the "compute.instanceGroupManagers.abandonInstances" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HttpsHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -46319,13 +50799,21 @@ func (c *HttpsHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Creates a HttpsHealthCheck resource in the specified project using the data included in the request.", + // "description": "Flags the specified instances to be removed from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", // "httpMethod": "POST", - // "id": "compute.httpsHealthChecks.insert", + // "id": "compute.instanceGroupManagers.abandonInstances", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "instanceGroupManager" // ], // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -46337,11 +50825,17 @@ func (c *HttpsHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operati // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/httpsHealthChecks", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/abandonInstances", // "request": { - // "$ref": "HttpsHealthCheck" + // "$ref": "InstanceGroupManagersAbandonInstancesRequest" // }, // "response": { // "$ref": "Operation" @@ -46354,9 +50848,9 @@ func (c *HttpsHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.httpsHealthChecks.list": +// method id "compute.instanceGroupManagers.aggregatedList": -type HttpsHealthChecksListCall struct { +type InstanceGroupManagersAggregatedListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -46365,10 +50859,10 @@ type HttpsHealthChecksListCall struct { header_ http.Header } -// List: Retrieves the list of HttpsHealthCheck resources available to -// the specified project. -func (r *HttpsHealthChecksService) List(project string) *HttpsHealthChecksListCall { - c := &HttpsHealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves the list of managed instance groups and +// groups them by zone. +func (r *InstanceGroupManagersService) AggregatedList(project string) *InstanceGroupManagersAggregatedListCall { + c := &InstanceGroupManagersAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -46395,7 +50889,7 @@ func (r *HttpsHealthChecksService) List(project string) *HttpsHealthChecksListCa // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *HttpsHealthChecksListCall) Filter(filter string) *HttpsHealthChecksListCall { +func (c *InstanceGroupManagersAggregatedListCall) Filter(filter string) *InstanceGroupManagersAggregatedListCall { c.urlParams_.Set("filter", filter) return c } @@ -46406,7 +50900,7 @@ func (c *HttpsHealthChecksListCall) Filter(filter string) *HttpsHealthChecksList // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *HttpsHealthChecksListCall) MaxResults(maxResults int64) *HttpsHealthChecksListCall { +func (c *InstanceGroupManagersAggregatedListCall) MaxResults(maxResults int64) *InstanceGroupManagersAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -46423,7 +50917,7 @@ func (c *HttpsHealthChecksListCall) MaxResults(maxResults int64) *HttpsHealthChe // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *HttpsHealthChecksListCall) OrderBy(orderBy string) *HttpsHealthChecksListCall { +func (c *InstanceGroupManagersAggregatedListCall) OrderBy(orderBy string) *InstanceGroupManagersAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -46431,7 +50925,7 @@ func (c *HttpsHealthChecksListCall) OrderBy(orderBy string) *HttpsHealthChecksLi // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *HttpsHealthChecksListCall) PageToken(pageToken string) *HttpsHealthChecksListCall { +func (c *InstanceGroupManagersAggregatedListCall) PageToken(pageToken string) *InstanceGroupManagersAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -46439,7 +50933,7 @@ func (c *HttpsHealthChecksListCall) PageToken(pageToken string) *HttpsHealthChec // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpsHealthChecksListCall) Fields(s ...googleapi.Field) *HttpsHealthChecksListCall { +func (c *InstanceGroupManagersAggregatedListCall) Fields(s ...googleapi.Field) *InstanceGroupManagersAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -46449,7 +50943,7 @@ func (c *HttpsHealthChecksListCall) Fields(s ...googleapi.Field) *HttpsHealthChe // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *HttpsHealthChecksListCall) IfNoneMatch(entityTag string) *HttpsHealthChecksListCall { +func (c *InstanceGroupManagersAggregatedListCall) IfNoneMatch(entityTag string) *InstanceGroupManagersAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -46457,21 +50951,21 @@ func (c *HttpsHealthChecksListCall) IfNoneMatch(entityTag string) *HttpsHealthCh // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpsHealthChecksListCall) Context(ctx context.Context) *HttpsHealthChecksListCall { +func (c *InstanceGroupManagersAggregatedListCall) Context(ctx context.Context) *InstanceGroupManagersAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpsHealthChecksListCall) Header() http.Header { +func (c *InstanceGroupManagersAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpsHealthChecksListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -46483,7 +50977,7 @@ func (c *HttpsHealthChecksListCall) doRequest(alt string) (*http.Response, error var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instanceGroupManagers") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -46496,14 +50990,15 @@ func (c *HttpsHealthChecksListCall) doRequest(alt string) (*http.Response, error return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpsHealthChecks.list" call. -// Exactly one of *HttpsHealthCheckList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *HttpsHealthCheckList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHealthCheckList, error) { +// Do executes the "compute.instanceGroupManagers.aggregatedList" call. +// Exactly one of *InstanceGroupManagerAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *InstanceGroupManagerAggregatedList.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagerAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -46522,7 +51017,7 @@ func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHeal if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &HttpsHealthCheckList{ + ret := &InstanceGroupManagerAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -46534,9 +51029,9 @@ func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHeal } return ret, nil // { - // "description": "Retrieves the list of HttpsHealthCheck resources available to the specified project.", + // "description": "Retrieves the list of managed instance groups and groups them by zone.", // "httpMethod": "GET", - // "id": "compute.httpsHealthChecks.list", + // "id": "compute.instanceGroupManagers.aggregatedList", // "parameterOrder": [ // "project" // ], @@ -46572,9 +51067,9 @@ func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHeal // "type": "string" // } // }, - // "path": "{project}/global/httpsHealthChecks", + // "path": "{project}/aggregated/instanceGroupManagers", // "response": { - // "$ref": "HttpsHealthCheckList" + // "$ref": "InstanceGroupManagerAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -46588,7 +51083,7 @@ func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHeal // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *HttpsHealthChecksListCall) Pages(ctx context.Context, f func(*HttpsHealthCheckList) error) error { +func (c *InstanceGroupManagersAggregatedListCall) Pages(ctx context.Context, f func(*InstanceGroupManagerAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -46606,26 +51101,27 @@ func (c *HttpsHealthChecksListCall) Pages(ctx context.Context, f func(*HttpsHeal } } -// method id "compute.httpsHealthChecks.patch": +// method id "compute.instanceGroupManagers.delete": -type HttpsHealthChecksPatchCall struct { - s *Service - project string - httpsHealthCheck string - httpshealthcheck *HttpsHealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersDeleteCall struct { + s *Service + project string + zone string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates a HttpsHealthCheck resource in the specified project -// using the data included in the request. This method supports PATCH -// semantics and uses the JSON merge patch format and processing rules. -func (r *HttpsHealthChecksService) Patch(project string, httpsHealthCheck string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksPatchCall { - c := &HttpsHealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified managed instance group and all of the +// instances in that group. Note that the instance group must not belong +// to a backend service. Read Deleting an instance group for more +// information. +func (r *InstanceGroupManagersService) Delete(project string, zone string, instanceGroupManager string) *InstanceGroupManagersDeleteCall { + c := &InstanceGroupManagersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpsHealthCheck = httpsHealthCheck - c.httpshealthcheck = httpshealthcheck + c.zone = zone + c.instanceGroupManager = instanceGroupManager return c } @@ -46643,7 +51139,7 @@ func (r *HttpsHealthChecksService) Patch(project string, httpsHealthCheck string // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *HttpsHealthChecksPatchCall) RequestId(requestId string) *HttpsHealthChecksPatchCall { +func (c *InstanceGroupManagersDeleteCall) RequestId(requestId string) *InstanceGroupManagersDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -46651,7 +51147,7 @@ func (c *HttpsHealthChecksPatchCall) RequestId(requestId string) *HttpsHealthChe // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpsHealthChecksPatchCall) Fields(s ...googleapi.Field) *HttpsHealthChecksPatchCall { +func (c *InstanceGroupManagersDeleteCall) Fields(s ...googleapi.Field) *InstanceGroupManagersDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -46659,56 +51155,52 @@ func (c *HttpsHealthChecksPatchCall) Fields(s ...googleapi.Field) *HttpsHealthCh // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpsHealthChecksPatchCall) Context(ctx context.Context) *HttpsHealthChecksPatchCall { +func (c *InstanceGroupManagersDeleteCall) Context(ctx context.Context) *InstanceGroupManagersDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpsHealthChecksPatchCall) Header() http.Header { +func (c *InstanceGroupManagersDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpsHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpsHealthCheck": c.httpsHealthCheck, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpsHealthChecks.patch" call. +// Do executes the "compute.instanceGroupManagers.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HttpsHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -46739,18 +51231,18 @@ func (c *HttpsHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.httpsHealthChecks.patch", + // "description": "Deletes the specified managed instance group and all of the instances in that group. Note that the instance group must not belong to a backend service. Read Deleting an instance group for more information.", + // "httpMethod": "DELETE", + // "id": "compute.instanceGroupManagers.delete", // "parameterOrder": [ // "project", - // "httpsHealthCheck" + // "zone", + // "instanceGroupManager" // ], // "parameters": { - // "httpsHealthCheck": { - // "description": "Name of the HttpsHealthCheck resource to patch.", + // "instanceGroupManager": { + // "description": "The name of the managed instance group to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -46765,12 +51257,15 @@ func (c *HttpsHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operatio // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", - // "request": { - // "$ref": "HttpsHealthCheck" - // }, + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", // "response": { // "$ref": "Operation" // }, @@ -46782,25 +51277,41 @@ func (c *HttpsHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operatio } -// method id "compute.httpsHealthChecks.update": +// method id "compute.instanceGroupManagers.deleteInstances": -type HttpsHealthChecksUpdateCall struct { - s *Service - project string - httpsHealthCheck string - httpshealthcheck *HttpsHealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersDeleteInstancesCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagersdeleteinstancesrequest *InstanceGroupManagersDeleteInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Update: Updates a HttpsHealthCheck resource in the specified project -// using the data included in the request. -func (r *HttpsHealthChecksService) Update(project string, httpsHealthCheck string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksUpdateCall { - c := &HttpsHealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// DeleteInstances: Flags the specified instances in the managed +// instance group for immediate deletion. The instances are also removed +// from any target pools of which they were a member. This method +// reduces the targetSize of the managed instance group by the number of +// instances that you delete. This operation is marked as DONE when the +// action is scheduled even if the instances are still being deleted. +// You must separately verify the status of the deleting action with the +// listmanagedinstances method. +// +// If the group is part of a backend service that has enabled connection +// draining, it can take up to 60 seconds after the connection draining +// duration has elapsed before the VM instance is removed or +// deleted. +// +// You can specify a maximum of 1000 instances with this method per +// request. +func (r *InstanceGroupManagersService) DeleteInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersdeleteinstancesrequest *InstanceGroupManagersDeleteInstancesRequest) *InstanceGroupManagersDeleteInstancesCall { + c := &InstanceGroupManagersDeleteInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpsHealthCheck = httpsHealthCheck - c.httpshealthcheck = httpshealthcheck + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagersdeleteinstancesrequest = instancegroupmanagersdeleteinstancesrequest return c } @@ -46818,7 +51329,7 @@ func (r *HttpsHealthChecksService) Update(project string, httpsHealthCheck strin // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *HttpsHealthChecksUpdateCall) RequestId(requestId string) *HttpsHealthChecksUpdateCall { +func (c *InstanceGroupManagersDeleteInstancesCall) RequestId(requestId string) *InstanceGroupManagersDeleteInstancesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -46826,7 +51337,7 @@ func (c *HttpsHealthChecksUpdateCall) RequestId(requestId string) *HttpsHealthCh // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpsHealthChecksUpdateCall) Fields(s ...googleapi.Field) *HttpsHealthChecksUpdateCall { +func (c *InstanceGroupManagersDeleteInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersDeleteInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -46834,56 +51345,57 @@ func (c *HttpsHealthChecksUpdateCall) Fields(s ...googleapi.Field) *HttpsHealthC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpsHealthChecksUpdateCall) Context(ctx context.Context) *HttpsHealthChecksUpdateCall { +func (c *InstanceGroupManagersDeleteInstancesCall) Context(ctx context.Context) *InstanceGroupManagersDeleteInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpsHealthChecksUpdateCall) Header() http.Header { +func (c *InstanceGroupManagersDeleteInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpsHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersDeleteInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersdeleteinstancesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deleteInstances") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpsHealthCheck": c.httpsHealthCheck, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpsHealthChecks.update" call. +// Do executes the "compute.instanceGroupManagers.deleteInstances" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HttpsHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -46914,18 +51426,18 @@ func (c *HttpsHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request.", - // "httpMethod": "PUT", - // "id": "compute.httpsHealthChecks.update", + // "description": "Flags the specified instances in the managed instance group for immediate deletion. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. This operation is marked as DONE when the action is scheduled even if the instances are still being deleted. You must separately verify the status of the deleting action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.deleteInstances", // "parameterOrder": [ // "project", - // "httpsHealthCheck" + // "zone", + // "instanceGroupManager" // ], // "parameters": { - // "httpsHealthCheck": { - // "description": "Name of the HttpsHealthCheck resource to update.", + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -46940,11 +51452,17 @@ func (c *HttpsHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operati // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deleteInstances", // "request": { - // "$ref": "HttpsHealthCheck" + // "$ref": "InstanceGroupManagersDeleteInstancesRequest" // }, // "response": { // "$ref": "Operation" @@ -46957,101 +51475,100 @@ func (c *HttpsHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.images.delete": +// method id "compute.instanceGroupManagers.get": -type ImagesDeleteCall struct { - s *Service - project string - image string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersGetCall struct { + s *Service + project string + zone string + instanceGroupManager string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified image. -// For details, see https://cloud.google.com/compute/docs/reference/latest/images/delete -func (r *ImagesService) Delete(project string, image string) *ImagesDeleteCall { - c := &ImagesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns all of the details about the specified managed instance +// group. Gets a list of available managed instance groups by making a +// list() request. +func (r *InstanceGroupManagersService) Get(project string, zone string, instanceGroupManager string) *InstanceGroupManagersGetCall { + c := &InstanceGroupManagersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.image = image - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *ImagesDeleteCall) RequestId(requestId string) *ImagesDeleteCall { - c.urlParams_.Set("requestId", requestId) + c.zone = zone + c.instanceGroupManager = instanceGroupManager return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesDeleteCall) Fields(s ...googleapi.Field) *ImagesDeleteCall { +func (c *InstanceGroupManagersGetCall) Fields(s ...googleapi.Field) *InstanceGroupManagersGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstanceGroupManagersGetCall) IfNoneMatch(entityTag string) *InstanceGroupManagersGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesDeleteCall) Context(ctx context.Context) *ImagesDeleteCall { +func (c *InstanceGroupManagersGetCall) Context(ctx context.Context) *InstanceGroupManagersGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesDeleteCall) Header() http.Header { +func (c *InstanceGroupManagersGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "image": c.image, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instanceGroupManagers.get" call. +// Exactly one of *InstanceGroupManager or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceGroupManager.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManager, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -47070,7 +51587,7 @@ func (c *ImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &InstanceGroupManager{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -47082,18 +51599,18 @@ func (c *ImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Deletes the specified image.", - // "httpMethod": "DELETE", - // "id": "compute.images.delete", + // "description": "Returns all of the details about the specified managed instance group. Gets a list of available managed instance groups by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.instanceGroupManagers.get", // "parameterOrder": [ // "project", - // "image" + // "zone", + // "instanceGroupManager" // ], // "parameters": { - // "image": { - // "description": "Name of the image resource to delete.", + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -47104,46 +51621,53 @@ func (c *ImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/images/{image}", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", // "response": { - // "$ref": "Operation" + // "$ref": "InstanceGroupManager" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.images.deprecate": +// method id "compute.instanceGroupManagers.insert": -type ImagesDeprecateCall struct { - s *Service - project string - image string - deprecationstatus *DeprecationStatus - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersInsertCall struct { + s *Service + project string + zone string + instancegroupmanager *InstanceGroupManager + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Deprecate: Sets the deprecation status of an image. +// Insert: Creates a managed instance group using the information that +// you specify in the request. After the group is created, instances in +// the group are created using the specified instance template. This +// operation is marked as DONE when the group is created even if the +// instances in the group have not yet been created. You must separately +// verify the status of the individual instances with the +// listmanagedinstances method. // -// If an empty request body is given, clears the deprecation status -// instead. -// For details, see https://cloud.google.com/compute/docs/reference/latest/images/deprecate -func (r *ImagesService) Deprecate(project string, image string, deprecationstatus *DeprecationStatus) *ImagesDeprecateCall { - c := &ImagesDeprecateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// A managed instance group can have up to 1000 VM instances per group. +// Please contact Cloud Support if you need an increase in this limit. +func (r *InstanceGroupManagersService) Insert(project string, zone string, instancegroupmanager *InstanceGroupManager) *InstanceGroupManagersInsertCall { + c := &InstanceGroupManagersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.image = image - c.deprecationstatus = deprecationstatus + c.zone = zone + c.instancegroupmanager = instancegroupmanager return c } @@ -47161,7 +51685,7 @@ func (r *ImagesService) Deprecate(project string, image string, deprecationstatu // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ImagesDeprecateCall) RequestId(requestId string) *ImagesDeprecateCall { +func (c *InstanceGroupManagersInsertCall) RequestId(requestId string) *InstanceGroupManagersInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -47169,7 +51693,7 @@ func (c *ImagesDeprecateCall) RequestId(requestId string) *ImagesDeprecateCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesDeprecateCall) Fields(s ...googleapi.Field) *ImagesDeprecateCall { +func (c *InstanceGroupManagersInsertCall) Fields(s ...googleapi.Field) *InstanceGroupManagersInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -47177,35 +51701,35 @@ func (c *ImagesDeprecateCall) Fields(s ...googleapi.Field) *ImagesDeprecateCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesDeprecateCall) Context(ctx context.Context) *ImagesDeprecateCall { +func (c *InstanceGroupManagersInsertCall) Context(ctx context.Context) *InstanceGroupManagersInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesDeprecateCall) Header() http.Header { +func (c *InstanceGroupManagersInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesDeprecateCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.deprecationstatus) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}/deprecate") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -47214,19 +51738,19 @@ func (c *ImagesDeprecateCall) doRequest(alt string) (*http.Response, error) { req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "image": c.image, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.deprecate" call. +// Do executes the "compute.instanceGroupManagers.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ImagesDeprecateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -47257,21 +51781,14 @@ func (c *ImagesDeprecateCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Sets the deprecation status of an image.\n\nIf an empty request body is given, clears the deprecation status instead.", + // "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, instances in the group are created using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.\n\nA managed instance group can have up to 1000 VM instances per group. Please contact Cloud Support if you need an increase in this limit.", // "httpMethod": "POST", - // "id": "compute.images.deprecate", + // "id": "compute.instanceGroupManagers.insert", // "parameterOrder": [ // "project", - // "image" + // "zone" // ], // "parameters": { - // "image": { - // "description": "Image name.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -47283,11 +51800,17 @@ func (c *ImagesDeprecateCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where you want to create the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/images/{image}/deprecate", + // "path": "{project}/zones/{zone}/instanceGroupManagers", // "request": { - // "$ref": "DeprecationStatus" + // "$ref": "InstanceGroupManager" // }, // "response": { // "$ref": "Operation" @@ -47300,188 +51823,94 @@ func (c *ImagesDeprecateCall) Do(opts ...googleapi.CallOption) (*Operation, erro } -// method id "compute.images.get": +// method id "compute.instanceGroupManagers.list": -type ImagesGetCall struct { +type InstanceGroupManagersListCall struct { s *Service project string - image string + zone string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Returns the specified image. Gets a list of available images by -// making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/images/get -func (r *ImagesService) Get(project string, image string) *ImagesGetCall { - c := &ImagesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of managed instance groups that are contained +// within the specified project and zone. +func (r *InstanceGroupManagersService) List(project string, zone string) *InstanceGroupManagersListCall { + c := &InstanceGroupManagersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.image = image + c.zone = zone return c } -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ImagesGetCall) Fields(s ...googleapi.Field) *ImagesGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *InstanceGroupManagersListCall) Filter(filter string) *InstanceGroupManagersListCall { + c.urlParams_.Set("filter", filter) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ImagesGetCall) IfNoneMatch(entityTag string) *ImagesGetCall { - c.ifNoneMatch_ = entityTag +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *InstanceGroupManagersListCall) MaxResults(maxResults int64) *InstanceGroupManagersListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ImagesGetCall) Context(ctx context.Context) *ImagesGetCall { - c.ctx_ = ctx +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *InstanceGroupManagersListCall) OrderBy(orderBy string) *InstanceGroupManagersListCall { + c.urlParams_.Set("orderBy", orderBy) return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ImagesGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ImagesGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "image": c.image, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.images.get" call. -// Exactly one of *Image or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Image.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ImagesGetCall) Do(opts ...googleapi.CallOption) (*Image, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Image{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified image. Gets a list of available images by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.images.get", - // "parameterOrder": [ - // "project", - // "image" - // ], - // "parameters": { - // "image": { - // "description": "Name of the image resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/images/{image}", - // "response": { - // "$ref": "Image" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.images.getFromFamily": - -type ImagesGetFromFamilyCall struct { - s *Service - project string - family string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// GetFromFamily: Returns the latest image that is part of an image -// family and is not deprecated. -func (r *ImagesService) GetFromFamily(project string, family string) *ImagesGetFromFamilyCall { - c := &ImagesGetFromFamilyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.family = family +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InstanceGroupManagersListCall) PageToken(pageToken string) *InstanceGroupManagersListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesGetFromFamilyCall) Fields(s ...googleapi.Field) *ImagesGetFromFamilyCall { +func (c *InstanceGroupManagersListCall) Fields(s ...googleapi.Field) *InstanceGroupManagersListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -47491,7 +51920,7 @@ func (c *ImagesGetFromFamilyCall) Fields(s ...googleapi.Field) *ImagesGetFromFam // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ImagesGetFromFamilyCall) IfNoneMatch(entityTag string) *ImagesGetFromFamilyCall { +func (c *InstanceGroupManagersListCall) IfNoneMatch(entityTag string) *InstanceGroupManagersListCall { c.ifNoneMatch_ = entityTag return c } @@ -47499,21 +51928,21 @@ func (c *ImagesGetFromFamilyCall) IfNoneMatch(entityTag string) *ImagesGetFromFa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesGetFromFamilyCall) Context(ctx context.Context) *ImagesGetFromFamilyCall { +func (c *InstanceGroupManagersListCall) Context(ctx context.Context) *InstanceGroupManagersListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesGetFromFamilyCall) Header() http.Header { +func (c *InstanceGroupManagersListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesGetFromFamilyCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -47525,7 +51954,7 @@ func (c *ImagesGetFromFamilyCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/family/{family}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -47534,19 +51963,19 @@ func (c *ImagesGetFromFamilyCall) doRequest(alt string) (*http.Response, error) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "family": c.family, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.getFromFamily" call. -// Exactly one of *Image or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Image.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ImagesGetFromFamilyCall) Do(opts ...googleapi.CallOption) (*Image, error) { +// Do executes the "compute.instanceGroupManagers.list" call. +// Exactly one of *InstanceGroupManagerList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *InstanceGroupManagerList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagerList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -47565,7 +51994,7 @@ func (c *ImagesGetFromFamilyCall) Do(opts ...googleapi.CallOption) (*Image, erro if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Image{ + ret := &InstanceGroupManagerList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -47577,19 +52006,35 @@ func (c *ImagesGetFromFamilyCall) Do(opts ...googleapi.CallOption) (*Image, erro } return ret, nil // { - // "description": "Returns the latest image that is part of an image family and is not deprecated.", + // "description": "Retrieves a list of managed instance groups that are contained within the specified project and zone.", // "httpMethod": "GET", - // "id": "compute.images.getFromFamily", + // "id": "compute.instanceGroupManagers.list", // "parameterOrder": [ // "project", - // "family" + // "zone" // ], // "parameters": { - // "family": { - // "description": "Name of the image family to search for.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -47598,11 +52043,17 @@ func (c *ImagesGetFromFamilyCall) Do(opts ...googleapi.CallOption) (*Image, erro // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/images/family/{family}", + // "path": "{project}/zones/{zone}/instanceGroupManagers", // "response": { - // "$ref": "Image" + // "$ref": "InstanceGroupManagerList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -47613,96 +52064,175 @@ func (c *ImagesGetFromFamilyCall) Do(opts ...googleapi.CallOption) (*Image, erro } -// method id "compute.images.getIamPolicy": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstanceGroupManagersListCall) Pages(ctx context.Context, f func(*InstanceGroupManagerList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type ImagesGetIamPolicyCall struct { - s *Service - project string - resource string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +// method id "compute.instanceGroupManagers.listManagedInstances": + +type InstanceGroupManagersListManagedInstancesCall struct { + s *Service + project string + zone string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. -func (r *ImagesService) GetIamPolicy(project string, resource string) *ImagesGetIamPolicyCall { - c := &ImagesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ListManagedInstances: Lists all of the instances in the managed +// instance group. Each instance in the list has a currentAction, which +// indicates the action that the managed instance group is performing on +// the instance. For example, if the group is still creating an +// instance, the currentAction is CREATING. If a previous action failed, +// the list displays the errors for that failed action. +func (r *InstanceGroupManagersService) ListManagedInstances(project string, zone string, instanceGroupManager string) *InstanceGroupManagersListManagedInstancesCall { + c := &InstanceGroupManagersListManagedInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource + c.zone = zone + c.instanceGroupManager = instanceGroupManager + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *InstanceGroupManagersListManagedInstancesCall) Filter(filter string) *InstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *InstanceGroupManagersListManagedInstancesCall) MaxResults(maxResults int64) *InstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "order_by": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *InstanceGroupManagersListManagedInstancesCall) OrderBy(orderBy string) *InstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("order_by", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InstanceGroupManagersListManagedInstancesCall) PageToken(pageToken string) *InstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesGetIamPolicyCall) Fields(s ...googleapi.Field) *ImagesGetIamPolicyCall { +func (c *InstanceGroupManagersListManagedInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersListManagedInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ImagesGetIamPolicyCall) IfNoneMatch(entityTag string) *ImagesGetIamPolicyCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesGetIamPolicyCall) Context(ctx context.Context) *ImagesGetIamPolicyCall { +func (c *InstanceGroupManagersListManagedInstancesCall) Context(ctx context.Context) *InstanceGroupManagersListManagedInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesGetIamPolicyCall) Header() http.Header { +func (c *InstanceGroupManagersListManagedInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersListManagedInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{resource}/getIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ImagesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.instanceGroupManagers.listManagedInstances" call. +// Exactly one of *InstanceGroupManagersListManagedInstancesResponse or +// error will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *InstanceGroupManagersListManagedInstancesResponse.ServerResponse.Head +// er or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagersListManagedInstancesResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -47721,7 +52251,7 @@ func (c *ImagesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, erro if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &InstanceGroupManagersListManagedInstancesResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -47733,14 +52263,44 @@ func (c *ImagesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, erro } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", - // "httpMethod": "GET", - // "id": "compute.images.getIamPolicy", + // "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.listManagedInstances", // "parameterOrder": [ // "project", - // "resource" + // "zone", + // "instanceGroupManager" // ], // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "order_by": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -47748,17 +52308,16 @@ func (c *ImagesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, erro // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/images/{resource}/getIamPolicy", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", // "response": { - // "$ref": "Policy" + // "$ref": "InstanceGroupManagersListManagedInstancesResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -47769,31 +52328,32 @@ func (c *ImagesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, erro } -// method id "compute.images.insert": +// method id "compute.instanceGroupManagers.patch": -type ImagesInsertCall struct { - s *Service - project string - image *Image - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersPatchCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanager *InstanceGroupManager + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates an image in the specified project using the data -// included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/images/insert -func (r *ImagesService) Insert(project string, image *Image) *ImagesInsertCall { - c := &ImagesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates a managed instance group using the information that +// you specify in the request. This operation is marked as DONE when the +// group is patched even if the instances in the group are still in the +// process of being patched. You must separately verify the status of +// the individual instances with the listManagedInstances method. This +// method supports PATCH semantics and uses the JSON merge patch format +// and processing rules. +func (r *InstanceGroupManagersService) Patch(project string, zone string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *InstanceGroupManagersPatchCall { + c := &InstanceGroupManagersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.image = image - return c -} - -// ForceCreate sets the optional parameter "forceCreate": Force image -// creation if true. -func (c *ImagesInsertCall) ForceCreate(forceCreate bool) *ImagesInsertCall { - c.urlParams_.Set("forceCreate", fmt.Sprint(forceCreate)) + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanager = instancegroupmanager return c } @@ -47811,7 +52371,7 @@ func (c *ImagesInsertCall) ForceCreate(forceCreate bool) *ImagesInsertCall { // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ImagesInsertCall) RequestId(requestId string) *ImagesInsertCall { +func (c *InstanceGroupManagersPatchCall) RequestId(requestId string) *InstanceGroupManagersPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -47819,7 +52379,7 @@ func (c *ImagesInsertCall) RequestId(requestId string) *ImagesInsertCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesInsertCall) Fields(s ...googleapi.Field) *ImagesInsertCall { +func (c *InstanceGroupManagersPatchCall) Fields(s ...googleapi.Field) *InstanceGroupManagersPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -47827,55 +52387,57 @@ func (c *ImagesInsertCall) Fields(s ...googleapi.Field) *ImagesInsertCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesInsertCall) Context(ctx context.Context) *ImagesInsertCall { +func (c *InstanceGroupManagersPatchCall) Context(ctx context.Context) *InstanceGroupManagersPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesInsertCall) Header() http.Header { +func (c *InstanceGroupManagersPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.image) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.insert" call. +// Do executes the "compute.instanceGroupManagers.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -47906,17 +52468,20 @@ func (c *ImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Creates an image in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.images.insert", + // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listManagedInstances method. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.instanceGroupManagers.patch", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "instanceGroupManager" // ], // "parameters": { - // "forceCreate": { - // "description": "Force image creation if true.", - // "location": "query", - // "type": "boolean" + // "instanceGroupManager": { + // "description": "The name of the instance group manager.", + // "location": "path", + // "required": true, + // "type": "string" // }, // "project": { // "description": "Project ID for this request.", @@ -47929,182 +52494,147 @@ func (c *ImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where you want to create the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/images", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", // "request": { - // "$ref": "Image" + // "$ref": "InstanceGroupManager" // }, // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.images.list": - -type ImagesListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} +// method id "compute.instanceGroupManagers.recreateInstances": -// List: Retrieves the list of custom images available to the specified -// project. Custom images are images you create that belong to your -// project. This method does not get any images that belong to other -// projects, including publicly-available images, like Debian 8. If you -// want to get a list of publicly-available images, use this method to -// make a request to the respective image project, such as debian-cloud -// or windows-cloud. -// For details, see https://cloud.google.com/compute/docs/reference/latest/images/list -func (r *ImagesService) List(project string) *ImagesListCall { - c := &ImagesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c +type InstanceGroupManagersRecreateInstancesCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagersrecreateinstancesrequest *InstanceGroupManagersRecreateInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. +// RecreateInstances: Flags the specified instances in the managed +// instance group to be immediately recreated. The instances are deleted +// and recreated using the current instance template for the managed +// instance group. This operation is marked as DONE when the flag is set +// even if the instances have not yet been recreated. You must +// separately verify the status of the recreating action with the +// listmanagedinstances method. // -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. +// If the group is part of a backend service that has enabled connection +// draining, it can take up to 60 seconds after the connection draining +// duration has elapsed before the VM instance is removed or +// deleted. // -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *ImagesListCall) Filter(filter string) *ImagesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *ImagesListCall) MaxResults(maxResults int64) *ImagesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) +// You can specify a maximum of 1000 instances with this method per +// request. +func (r *InstanceGroupManagersService) RecreateInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersrecreateinstancesrequest *InstanceGroupManagersRecreateInstancesRequest) *InstanceGroupManagersRecreateInstancesCall { + c := &InstanceGroupManagersRecreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagersrecreateinstancesrequest = instancegroupmanagersrecreateinstancesrequest return c } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *ImagesListCall) OrderBy(orderBy string) *ImagesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *ImagesListCall) PageToken(pageToken string) *ImagesListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupManagersRecreateInstancesCall) RequestId(requestId string) *InstanceGroupManagersRecreateInstancesCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesListCall) Fields(s ...googleapi.Field) *ImagesListCall { +func (c *InstanceGroupManagersRecreateInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersRecreateInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ImagesListCall) IfNoneMatch(entityTag string) *ImagesListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesListCall) Context(ctx context.Context) *ImagesListCall { +func (c *InstanceGroupManagersRecreateInstancesCall) Context(ctx context.Context) *InstanceGroupManagersRecreateInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesListCall) Header() http.Header { +func (c *InstanceGroupManagersRecreateInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersrecreateinstancesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.list" call. -// Exactly one of *ImageList or error will be non-nil. Any non-2xx +// Do executes the "compute.instanceGroupManagers.recreateInstances" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *ImageList.ServerResponse.Header or (if a response was returned at +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { +func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -48123,7 +52653,7 @@ func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ImageList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -48135,104 +52665,120 @@ func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { } return ret, nil // { - // "description": "Retrieves the list of custom images available to the specified project. Custom images are images you create that belong to your project. This method does not get any images that belong to other projects, including publicly-available images, like Debian 8. If you want to get a list of publicly-available images, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.", - // "httpMethod": "GET", - // "id": "compute.images.list", + // "description": "Flags the specified instances in the managed instance group to be immediately recreated. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the flag is set even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.recreateInstances", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "instanceGroupManager" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, // "type": "string" // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/images", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", + // "request": { + // "$ref": "InstanceGroupManagersRecreateInstancesRequest" + // }, // "response": { - // "$ref": "ImageList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ImagesListCall) Pages(ctx context.Context, f func(*ImageList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.images.setIamPolicy": +// method id "compute.instanceGroupManagers.resize": -type ImagesSetIamPolicyCall struct { - s *Service - project string - resource string - globalsetpolicyrequest *GlobalSetPolicyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersResizeCall struct { + s *Service + project string + zone string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. -func (r *ImagesService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *ImagesSetIamPolicyCall { - c := &ImagesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Resize: Resizes the managed instance group. If you increase the size, +// the group creates new instances using the current instance template. +// If you decrease the size, the group deletes instances. The resize +// operation is marked DONE when the resize actions are scheduled even +// if the group has not yet added or deleted any instances. You must +// separately verify the status of the creating or deleting actions with +// the listmanagedinstances method. +// +// When resizing down, the instance group arbitrarily chooses the order +// in which VMs are deleted. The group takes into account some VM +// attributes when making the selection including: +// +// + The status of the VM instance. + The health of the VM instance. + +// The instance template version the VM is based on. + For regional +// managed instance groups, the location of the VM instance. +// +// This list is subject to change. +// +// If the group is part of a backend service that has enabled connection +// draining, it can take up to 60 seconds after the connection draining +// duration has elapsed before the VM instance is removed or deleted. +func (r *InstanceGroupManagersService) Resize(project string, zone string, instanceGroupManager string, size int64) *InstanceGroupManagersResizeCall { + c := &InstanceGroupManagersResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.globalsetpolicyrequest = globalsetpolicyrequest + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.urlParams_.Set("size", fmt.Sprint(size)) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupManagersResizeCall) RequestId(requestId string) *InstanceGroupManagersResizeCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesSetIamPolicyCall) Fields(s ...googleapi.Field) *ImagesSetIamPolicyCall { +func (c *InstanceGroupManagersResizeCall) Fields(s ...googleapi.Field) *InstanceGroupManagersResizeCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -48240,35 +52786,30 @@ func (c *ImagesSetIamPolicyCall) Fields(s ...googleapi.Field) *ImagesSetIamPolic // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesSetIamPolicyCall) Context(ctx context.Context) *ImagesSetIamPolicyCall { +func (c *InstanceGroupManagersResizeCall) Context(ctx context.Context) *InstanceGroupManagersResizeCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesSetIamPolicyCall) Header() http.Header { +func (c *InstanceGroupManagersResizeCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetpolicyrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{resource}/setIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -48276,20 +52817,21 @@ func (c *ImagesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.instanceGroupManagers.resize" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -48308,7 +52850,7 @@ func (c *ImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, erro if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -48320,14 +52862,22 @@ func (c *ImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, erro } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "description": "Resizes the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.\n\nWhen resizing down, the instance group arbitrarily chooses the order in which VMs are deleted. The group takes into account some VM attributes when making the selection including:\n\n+ The status of the VM instance. + The health of the VM instance. + The instance template version the VM is based on. + For regional managed instance groups, the location of the VM instance.\n\nThis list is subject to change.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", // "httpMethod": "POST", - // "id": "compute.images.setIamPolicy", + // "id": "compute.instanceGroupManagers.resize", // "parameterOrder": [ // "project", - // "resource" + // "zone", + // "instanceGroupManager", + // "size" // ], // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -48335,20 +52885,28 @@ func (c *ImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, erro // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "size": { + // "description": "The number of running instances that the managed instance group should maintain at any given time. The group automatically adds or removes instances to maintain the number of instances specified by this parameter.", + // "format": "int32", + // "location": "query", + // "required": true, + // "type": "integer" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/images/{resource}/setIamPolicy", - // "request": { - // "$ref": "GlobalSetPolicyRequest" - // }, + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize", // "response": { - // "$ref": "Policy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -48358,32 +52916,54 @@ func (c *ImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, erro } -// method id "compute.images.setLabels": +// method id "compute.instanceGroupManagers.setInstanceTemplate": -type ImagesSetLabelsCall struct { - s *Service - project string - resource string - globalsetlabelsrequest *GlobalSetLabelsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersSetInstanceTemplateCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagerssetinstancetemplaterequest *InstanceGroupManagersSetInstanceTemplateRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetLabels: Sets the labels on an image. To learn more about labels, -// read the Labeling Resources documentation. -func (r *ImagesService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *ImagesSetLabelsCall { - c := &ImagesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetInstanceTemplate: Specifies the instance template to use when +// creating new instances in this group. The templates for existing +// instances in the group do not change unless you recreate them. +func (r *InstanceGroupManagersService) SetInstanceTemplate(project string, zone string, instanceGroupManager string, instancegroupmanagerssetinstancetemplaterequest *InstanceGroupManagersSetInstanceTemplateRequest) *InstanceGroupManagersSetInstanceTemplateCall { + c := &InstanceGroupManagersSetInstanceTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.globalsetlabelsrequest = globalsetlabelsrequest + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagerssetinstancetemplaterequest = instancegroupmanagerssetinstancetemplaterequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupManagersSetInstanceTemplateCall) RequestId(requestId string) *InstanceGroupManagersSetInstanceTemplateCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesSetLabelsCall) Fields(s ...googleapi.Field) *ImagesSetLabelsCall { +func (c *InstanceGroupManagersSetInstanceTemplateCall) Fields(s ...googleapi.Field) *InstanceGroupManagersSetInstanceTemplateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -48391,35 +52971,35 @@ func (c *ImagesSetLabelsCall) Fields(s ...googleapi.Field) *ImagesSetLabelsCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesSetLabelsCall) Context(ctx context.Context) *ImagesSetLabelsCall { +func (c *InstanceGroupManagersSetInstanceTemplateCall) Context(ctx context.Context) *InstanceGroupManagersSetInstanceTemplateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesSetLabelsCall) Header() http.Header { +func (c *InstanceGroupManagersSetInstanceTemplateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesSetLabelsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerssetinstancetemplaterequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{resource}/setLabels") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -48427,20 +53007,21 @@ func (c *ImagesSetLabelsCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.setLabels" call. +// Do executes the "compute.instanceGroupManagers.setInstanceTemplate" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ImagesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -48471,14 +53052,21 @@ func (c *ImagesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Sets the labels on an image. To learn more about labels, read the Labeling Resources documentation.", + // "description": "Specifies the instance template to use when creating new instances in this group. The templates for existing instances in the group do not change unless you recreate them.", // "httpMethod": "POST", - // "id": "compute.images.setLabels", + // "id": "compute.instanceGroupManagers.setInstanceTemplate", // "parameterOrder": [ // "project", - // "resource" + // "zone", + // "instanceGroupManager" // ], // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -48486,17 +53074,21 @@ func (c *ImagesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/images/{resource}/setLabels", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", // "request": { - // "$ref": "GlobalSetLabelsRequest" + // "$ref": "InstanceGroupManagersSetInstanceTemplateRequest" // }, // "response": { // "$ref": "Operation" @@ -48509,32 +53101,58 @@ func (c *ImagesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, erro } -// method id "compute.images.testIamPermissions": +// method id "compute.instanceGroupManagers.setTargetPools": -type ImagesTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersSetTargetPoolsCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagerssettargetpoolsrequest *InstanceGroupManagersSetTargetPoolsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *ImagesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *ImagesTestIamPermissionsCall { - c := &ImagesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetTargetPools: Modifies the target pools to which all instances in +// this managed instance group are assigned. The target pools +// automatically apply to all of the instances in the managed instance +// group. This operation is marked DONE when you make the request even +// if the instances have not yet been added to their target pools. The +// change might take some time to apply to all of the instances in the +// group depending on the size of the group. +func (r *InstanceGroupManagersService) SetTargetPools(project string, zone string, instanceGroupManager string, instancegroupmanagerssettargetpoolsrequest *InstanceGroupManagersSetTargetPoolsRequest) *InstanceGroupManagersSetTargetPoolsCall { + c := &InstanceGroupManagersSetTargetPoolsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagerssettargetpoolsrequest = instancegroupmanagerssettargetpoolsrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupManagersSetTargetPoolsCall) RequestId(requestId string) *InstanceGroupManagersSetTargetPoolsCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesTestIamPermissionsCall) Fields(s ...googleapi.Field) *ImagesTestIamPermissionsCall { +func (c *InstanceGroupManagersSetTargetPoolsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersSetTargetPoolsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -48542,35 +53160,35 @@ func (c *ImagesTestIamPermissionsCall) Fields(s ...googleapi.Field) *ImagesTestI // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesTestIamPermissionsCall) Context(ctx context.Context) *ImagesTestIamPermissionsCall { +func (c *InstanceGroupManagersSetTargetPoolsCall) Context(ctx context.Context) *InstanceGroupManagersSetTargetPoolsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesTestIamPermissionsCall) Header() http.Header { +func (c *InstanceGroupManagersSetTargetPoolsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerssettargetpoolsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setTargetPools") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -48578,20 +53196,21 @@ func (c *ImagesTestIamPermissionsCall) doRequest(alt string) (*http.Response, er } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.instanceGroupManagers.setTargetPools" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -48610,7 +53229,7 @@ func (c *ImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPe if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -48622,14 +53241,21 @@ func (c *ImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPe } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", + // "description": "Modifies the target pools to which all instances in this managed instance group are assigned. The target pools automatically apply to all of the instances in the managed instance group. This operation is marked DONE when you make the request even if the instances have not yet been added to their target pools. The change might take some time to apply to all of the instances in the group depending on the size of the group.", // "httpMethod": "POST", - // "id": "compute.images.testIamPermissions", + // "id": "compute.instanceGroupManagers.setTargetPools", // "parameterOrder": [ // "project", - // "resource" + // "zone", + // "instanceGroupManager" // ], // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -48637,66 +53263,55 @@ func (c *ImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPe // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/images/{resource}/testIamPermissions", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "InstanceGroupManagersSetTargetPoolsRequest" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instanceGroupManagers.abandonInstances": +// method id "compute.instanceGroups.addInstances": -type InstanceGroupManagersAbandonInstancesCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagersabandoninstancesrequest *InstanceGroupManagersAbandonInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupsAddInstancesCall struct { + s *Service + project string + zone string + instanceGroup string + instancegroupsaddinstancesrequest *InstanceGroupsAddInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AbandonInstances: Flags the specified instances to be removed from -// the managed instance group. Abandoning an instance does not delete -// the instance, but it does remove the instance from any target pools -// that are applied by the managed instance group. This method reduces -// the targetSize of the managed instance group by the number of -// instances that you abandon. This operation is marked as DONE when the -// action is scheduled even if the instances have not yet been removed -// from the group. You must separately verify the status of the -// abandoning action with the listmanagedinstances method. -// -// If the group is part of a backend service that has enabled connection -// draining, it can take up to 60 seconds after the connection draining -// duration has elapsed before the VM instance is removed or -// deleted. -// -// You can specify a maximum of 1000 instances with this method per -// request. -func (r *InstanceGroupManagersService) AbandonInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersabandoninstancesrequest *InstanceGroupManagersAbandonInstancesRequest) *InstanceGroupManagersAbandonInstancesCall { - c := &InstanceGroupManagersAbandonInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AddInstances: Adds a list of instances to the specified instance +// group. All of the instances in the instance group must be in the same +// network/subnetwork. Read Adding instances for more information. +func (r *InstanceGroupsService) AddInstances(project string, zone string, instanceGroup string, instancegroupsaddinstancesrequest *InstanceGroupsAddInstancesRequest) *InstanceGroupsAddInstancesCall { + c := &InstanceGroupsAddInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagersabandoninstancesrequest = instancegroupmanagersabandoninstancesrequest + c.instanceGroup = instanceGroup + c.instancegroupsaddinstancesrequest = instancegroupsaddinstancesrequest return c } @@ -48714,7 +53329,7 @@ func (r *InstanceGroupManagersService) AbandonInstances(project string, zone str // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersAbandonInstancesCall) RequestId(requestId string) *InstanceGroupManagersAbandonInstancesCall { +func (c *InstanceGroupsAddInstancesCall) RequestId(requestId string) *InstanceGroupsAddInstancesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -48722,7 +53337,7 @@ func (c *InstanceGroupManagersAbandonInstancesCall) RequestId(requestId string) // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersAbandonInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersAbandonInstancesCall { +func (c *InstanceGroupsAddInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsAddInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -48730,35 +53345,35 @@ func (c *InstanceGroupManagersAbandonInstancesCall) Fields(s ...googleapi.Field) // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersAbandonInstancesCall) Context(ctx context.Context) *InstanceGroupManagersAbandonInstancesCall { +func (c *InstanceGroupsAddInstancesCall) Context(ctx context.Context) *InstanceGroupsAddInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersAbandonInstancesCall) Header() http.Header { +func (c *InstanceGroupsAddInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersAbandonInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsAddInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersabandoninstancesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupsaddinstancesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/abandonInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/addInstances") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -48766,21 +53381,21 @@ func (c *InstanceGroupManagersAbandonInstancesCall) doRequest(alt string) (*http } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.abandonInstances" call. +// Do executes the "compute.instanceGroups.addInstances" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupsAddInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -48811,17 +53426,17 @@ func (c *InstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOpt } return ret, nil // { - // "description": "Flags the specified instances to be removed from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + // "description": "Adds a list of instances to the specified instance group. All of the instances in the instance group must be in the same network/subnetwork. Read Adding instances for more information.", // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.abandonInstances", + // "id": "compute.instanceGroups.addInstances", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "instanceGroup" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "instanceGroup": { + // "description": "The name of the instance group where you are adding instances.", // "location": "path", // "required": true, // "type": "string" @@ -48839,15 +53454,15 @@ func (c *InstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOpt // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone where the instance group is located.", // "location": "path", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/abandonInstances", + // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/addInstances", // "request": { - // "$ref": "InstanceGroupManagersAbandonInstancesRequest" + // "$ref": "InstanceGroupsAddInstancesRequest" // }, // "response": { // "$ref": "Operation" @@ -48860,9 +53475,9 @@ func (c *InstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOpt } -// method id "compute.instanceGroupManagers.aggregatedList": +// method id "compute.instanceGroups.aggregatedList": -type InstanceGroupManagersAggregatedListCall struct { +type InstanceGroupsAggregatedListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -48871,10 +53486,10 @@ type InstanceGroupManagersAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves the list of managed instance groups and -// groups them by zone. -func (r *InstanceGroupManagersService) AggregatedList(project string) *InstanceGroupManagersAggregatedListCall { - c := &InstanceGroupManagersAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves the list of instance groups and sorts them +// by zone. +func (r *InstanceGroupsService) AggregatedList(project string) *InstanceGroupsAggregatedListCall { + c := &InstanceGroupsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -48901,7 +53516,7 @@ func (r *InstanceGroupManagersService) AggregatedList(project string) *InstanceG // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *InstanceGroupManagersAggregatedListCall) Filter(filter string) *InstanceGroupManagersAggregatedListCall { +func (c *InstanceGroupsAggregatedListCall) Filter(filter string) *InstanceGroupsAggregatedListCall { c.urlParams_.Set("filter", filter) return c } @@ -48912,7 +53527,7 @@ func (c *InstanceGroupManagersAggregatedListCall) Filter(filter string) *Instanc // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *InstanceGroupManagersAggregatedListCall) MaxResults(maxResults int64) *InstanceGroupManagersAggregatedListCall { +func (c *InstanceGroupsAggregatedListCall) MaxResults(maxResults int64) *InstanceGroupsAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -48929,7 +53544,7 @@ func (c *InstanceGroupManagersAggregatedListCall) MaxResults(maxResults int64) * // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *InstanceGroupManagersAggregatedListCall) OrderBy(orderBy string) *InstanceGroupManagersAggregatedListCall { +func (c *InstanceGroupsAggregatedListCall) OrderBy(orderBy string) *InstanceGroupsAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -48937,7 +53552,7 @@ func (c *InstanceGroupManagersAggregatedListCall) OrderBy(orderBy string) *Insta // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *InstanceGroupManagersAggregatedListCall) PageToken(pageToken string) *InstanceGroupManagersAggregatedListCall { +func (c *InstanceGroupsAggregatedListCall) PageToken(pageToken string) *InstanceGroupsAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -48945,7 +53560,7 @@ func (c *InstanceGroupManagersAggregatedListCall) PageToken(pageToken string) *I // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersAggregatedListCall) Fields(s ...googleapi.Field) *InstanceGroupManagersAggregatedListCall { +func (c *InstanceGroupsAggregatedListCall) Fields(s ...googleapi.Field) *InstanceGroupsAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -48955,7 +53570,7 @@ func (c *InstanceGroupManagersAggregatedListCall) Fields(s ...googleapi.Field) * // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InstanceGroupManagersAggregatedListCall) IfNoneMatch(entityTag string) *InstanceGroupManagersAggregatedListCall { +func (c *InstanceGroupsAggregatedListCall) IfNoneMatch(entityTag string) *InstanceGroupsAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -48963,21 +53578,21 @@ func (c *InstanceGroupManagersAggregatedListCall) IfNoneMatch(entityTag string) // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersAggregatedListCall) Context(ctx context.Context) *InstanceGroupManagersAggregatedListCall { +func (c *InstanceGroupsAggregatedListCall) Context(ctx context.Context) *InstanceGroupsAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersAggregatedListCall) Header() http.Header { +func (c *InstanceGroupsAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -48989,7 +53604,7 @@ func (c *InstanceGroupManagersAggregatedListCall) doRequest(alt string) (*http.R var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instanceGroupManagers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instanceGroups") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -49002,15 +53617,14 @@ func (c *InstanceGroupManagersAggregatedListCall) doRequest(alt string) (*http.R return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.aggregatedList" call. -// Exactly one of *InstanceGroupManagerAggregatedList or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *InstanceGroupManagerAggregatedList.ServerResponse.Header or -// (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagerAggregatedList, error) { +// Do executes the "compute.instanceGroups.aggregatedList" call. +// Exactly one of *InstanceGroupAggregatedList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *InstanceGroupAggregatedList.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -49029,7 +53643,7 @@ func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOptio if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroupManagerAggregatedList{ + ret := &InstanceGroupAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -49041,9 +53655,9 @@ func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Retrieves the list of managed instance groups and groups them by zone.", + // "description": "Retrieves the list of instance groups and sorts them by zone.", // "httpMethod": "GET", - // "id": "compute.instanceGroupManagers.aggregatedList", + // "id": "compute.instanceGroups.aggregatedList", // "parameterOrder": [ // "project" // ], @@ -49079,9 +53693,9 @@ func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOptio // "type": "string" // } // }, - // "path": "{project}/aggregated/instanceGroupManagers", + // "path": "{project}/aggregated/instanceGroups", // "response": { - // "$ref": "InstanceGroupManagerAggregatedList" + // "$ref": "InstanceGroupAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -49095,7 +53709,7 @@ func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOptio // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *InstanceGroupManagersAggregatedListCall) Pages(ctx context.Context, f func(*InstanceGroupManagerAggregatedList) error) error { +func (c *InstanceGroupsAggregatedListCall) Pages(ctx context.Context, f func(*InstanceGroupAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -49113,27 +53727,27 @@ func (c *InstanceGroupManagersAggregatedListCall) Pages(ctx context.Context, f f } } -// method id "compute.instanceGroupManagers.delete": +// method id "compute.instanceGroups.delete": -type InstanceGroupManagersDeleteCall struct { - s *Service - project string - zone string - instanceGroupManager string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupsDeleteCall struct { + s *Service + project string + zone string + instanceGroup string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified managed instance group and all of the -// instances in that group. Note that the instance group must not belong -// to a backend service. Read Deleting an instance group for more +// Delete: Deletes the specified instance group. The instances in the +// group are not deleted. Note that instance group must not belong to a +// backend service. Read Deleting an instance group for more // information. -func (r *InstanceGroupManagersService) Delete(project string, zone string, instanceGroupManager string) *InstanceGroupManagersDeleteCall { - c := &InstanceGroupManagersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *InstanceGroupsService) Delete(project string, zone string, instanceGroup string) *InstanceGroupsDeleteCall { + c := &InstanceGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager + c.instanceGroup = instanceGroup return c } @@ -49151,7 +53765,7 @@ func (r *InstanceGroupManagersService) Delete(project string, zone string, insta // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersDeleteCall) RequestId(requestId string) *InstanceGroupManagersDeleteCall { +func (c *InstanceGroupsDeleteCall) RequestId(requestId string) *InstanceGroupsDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -49159,7 +53773,7 @@ func (c *InstanceGroupManagersDeleteCall) RequestId(requestId string) *InstanceG // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersDeleteCall) Fields(s ...googleapi.Field) *InstanceGroupManagersDeleteCall { +func (c *InstanceGroupsDeleteCall) Fields(s ...googleapi.Field) *InstanceGroupsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -49167,21 +53781,21 @@ func (c *InstanceGroupManagersDeleteCall) Fields(s ...googleapi.Field) *Instance // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersDeleteCall) Context(ctx context.Context) *InstanceGroupManagersDeleteCall { +func (c *InstanceGroupsDeleteCall) Context(ctx context.Context) *InstanceGroupsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersDeleteCall) Header() http.Header { +func (c *InstanceGroupsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -49190,7 +53804,7 @@ func (c *InstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Response, var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { @@ -49198,21 +53812,21 @@ func (c *InstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Response, } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.delete" call. +// Do executes the "compute.instanceGroups.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -49243,212 +53857,17 @@ func (c *InstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Deletes the specified managed instance group and all of the instances in that group. Note that the instance group must not belong to a backend service. Read Deleting an instance group for more information.", + // "description": "Deletes the specified instance group. The instances in the group are not deleted. Note that instance group must not belong to a backend service. Read Deleting an instance group for more information.", // "httpMethod": "DELETE", - // "id": "compute.instanceGroupManagers.delete", - // "parameterOrder": [ - // "project", - // "zone", - // "instanceGroupManager" - // ], - // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group to delete.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the managed instance group is located.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.instanceGroupManagers.deleteInstances": - -type InstanceGroupManagersDeleteInstancesCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagersdeleteinstancesrequest *InstanceGroupManagersDeleteInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// DeleteInstances: Flags the specified instances in the managed -// instance group for immediate deletion. The instances are also removed -// from any target pools of which they were a member. This method -// reduces the targetSize of the managed instance group by the number of -// instances that you delete. This operation is marked as DONE when the -// action is scheduled even if the instances are still being deleted. -// You must separately verify the status of the deleting action with the -// listmanagedinstances method. -// -// If the group is part of a backend service that has enabled connection -// draining, it can take up to 60 seconds after the connection draining -// duration has elapsed before the VM instance is removed or -// deleted. -// -// You can specify a maximum of 1000 instances with this method per -// request. -func (r *InstanceGroupManagersService) DeleteInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersdeleteinstancesrequest *InstanceGroupManagersDeleteInstancesRequest) *InstanceGroupManagersDeleteInstancesCall { - c := &InstanceGroupManagersDeleteInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagersdeleteinstancesrequest = instancegroupmanagersdeleteinstancesrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersDeleteInstancesCall) RequestId(requestId string) *InstanceGroupManagersDeleteInstancesCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstanceGroupManagersDeleteInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersDeleteInstancesCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstanceGroupManagersDeleteInstancesCall) Context(ctx context.Context) *InstanceGroupManagersDeleteInstancesCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstanceGroupManagersDeleteInstancesCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstanceGroupManagersDeleteInstancesCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersdeleteinstancesrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deleteInstances") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instanceGroupManagers.deleteInstances" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Flags the specified instances in the managed instance group for immediate deletion. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. This operation is marked as DONE when the action is scheduled even if the instances are still being deleted. You must separately verify the status of the deleting action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", - // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.deleteInstances", + // "id": "compute.instanceGroups.delete", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "instanceGroup" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "instanceGroup": { + // "description": "The name of the instance group to delete.", // "location": "path", // "required": true, // "type": "string" @@ -49466,16 +53885,13 @@ func (c *InstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOpti // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone where the instance group is located.", // "location": "path", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deleteInstances", - // "request": { - // "$ref": "InstanceGroupManagersDeleteInstancesRequest" - // }, + // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}", // "response": { // "$ref": "Operation" // }, @@ -49487,34 +53903,33 @@ func (c *InstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOpti } -// method id "compute.instanceGroupManagers.get": +// method id "compute.instanceGroups.get": -type InstanceGroupManagersGetCall struct { - s *Service - project string - zone string - instanceGroupManager string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstanceGroupsGetCall struct { + s *Service + project string + zone string + instanceGroup string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns all of the details about the specified managed instance -// group. Gets a list of available managed instance groups by making a -// list() request. -func (r *InstanceGroupManagersService) Get(project string, zone string, instanceGroupManager string) *InstanceGroupManagersGetCall { - c := &InstanceGroupManagersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified instance group. Gets a list of available +// instance groups by making a list() request. +func (r *InstanceGroupsService) Get(project string, zone string, instanceGroup string) *InstanceGroupsGetCall { + c := &InstanceGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager + c.instanceGroup = instanceGroup return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersGetCall) Fields(s ...googleapi.Field) *InstanceGroupManagersGetCall { +func (c *InstanceGroupsGetCall) Fields(s ...googleapi.Field) *InstanceGroupsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -49524,7 +53939,7 @@ func (c *InstanceGroupManagersGetCall) Fields(s ...googleapi.Field) *InstanceGro // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InstanceGroupManagersGetCall) IfNoneMatch(entityTag string) *InstanceGroupManagersGetCall { +func (c *InstanceGroupsGetCall) IfNoneMatch(entityTag string) *InstanceGroupsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -49532,21 +53947,21 @@ func (c *InstanceGroupManagersGetCall) IfNoneMatch(entityTag string) *InstanceGr // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersGetCall) Context(ctx context.Context) *InstanceGroupManagersGetCall { +func (c *InstanceGroupsGetCall) Context(ctx context.Context) *InstanceGroupsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersGetCall) Header() http.Header { +func (c *InstanceGroupsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersGetCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -49558,7 +53973,7 @@ func (c *InstanceGroupManagersGetCall) doRequest(alt string) (*http.Response, er var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -49566,21 +53981,21 @@ func (c *InstanceGroupManagersGetCall) doRequest(alt string) (*http.Response, er } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.get" call. -// Exactly one of *InstanceGroupManager or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InstanceGroupManager.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.instanceGroups.get" call. +// Exactly one of *InstanceGroup or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *InstanceGroup.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *InstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManager, error) { +func (c *InstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -49599,7 +54014,7 @@ func (c *InstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*Instan if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroupManager{ + ret := &InstanceGroup{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -49611,17 +54026,17 @@ func (c *InstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*Instan } return ret, nil // { - // "description": "Returns all of the details about the specified managed instance group. Gets a list of available managed instance groups by making a list() request.", + // "description": "Returns the specified instance group. Gets a list of available instance groups by making a list() request.", // "httpMethod": "GET", - // "id": "compute.instanceGroupManagers.get", + // "id": "compute.instanceGroups.get", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "instanceGroup" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "instanceGroup": { + // "description": "The name of the instance group.", // "location": "path", // "required": true, // "type": "string" @@ -49634,15 +54049,15 @@ func (c *InstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*Instan // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone where the instance group is located.", // "location": "path", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", + // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}", // "response": { - // "$ref": "InstanceGroupManager" + // "$ref": "InstanceGroup" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -49653,33 +54068,25 @@ func (c *InstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*Instan } -// method id "compute.instanceGroupManagers.insert": +// method id "compute.instanceGroups.insert": -type InstanceGroupManagersInsertCall struct { - s *Service - project string - zone string - instancegroupmanager *InstanceGroupManager - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupsInsertCall struct { + s *Service + project string + zone string + instancegroup *InstanceGroup + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a managed instance group using the information that -// you specify in the request. After the group is created, instances in -// the group are created using the specified instance template. This -// operation is marked as DONE when the group is created even if the -// instances in the group have not yet been created. You must separately -// verify the status of the individual instances with the -// listmanagedinstances method. -// -// A managed instance group can have up to 1000 VM instances per group. -// Please contact Cloud Support if you need an increase in this limit. -func (r *InstanceGroupManagersService) Insert(project string, zone string, instancegroupmanager *InstanceGroupManager) *InstanceGroupManagersInsertCall { - c := &InstanceGroupManagersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates an instance group in the specified project using the +// parameters that are included in the request. +func (r *InstanceGroupsService) Insert(project string, zone string, instancegroup *InstanceGroup) *InstanceGroupsInsertCall { + c := &InstanceGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instancegroupmanager = instancegroupmanager + c.instancegroup = instancegroup return c } @@ -49697,7 +54104,7 @@ func (r *InstanceGroupManagersService) Insert(project string, zone string, insta // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersInsertCall) RequestId(requestId string) *InstanceGroupManagersInsertCall { +func (c *InstanceGroupsInsertCall) RequestId(requestId string) *InstanceGroupsInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -49705,7 +54112,7 @@ func (c *InstanceGroupManagersInsertCall) RequestId(requestId string) *InstanceG // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersInsertCall) Fields(s ...googleapi.Field) *InstanceGroupManagersInsertCall { +func (c *InstanceGroupsInsertCall) Fields(s ...googleapi.Field) *InstanceGroupsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -49713,35 +54120,35 @@ func (c *InstanceGroupManagersInsertCall) Fields(s ...googleapi.Field) *Instance // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersInsertCall) Context(ctx context.Context) *InstanceGroupManagersInsertCall { +func (c *InstanceGroupsInsertCall) Context(ctx context.Context) *InstanceGroupsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersInsertCall) Header() http.Header { +func (c *InstanceGroupsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroup) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -49755,14 +54162,14 @@ func (c *InstanceGroupManagersInsertCall) doRequest(alt string) (*http.Response, return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.insert" call. +// Do executes the "compute.instanceGroups.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -49793,9 +54200,9 @@ func (c *InstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, instances in the group are created using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.\n\nA managed instance group can have up to 1000 VM instances per group. Please contact Cloud Support if you need an increase in this limit.", + // "description": "Creates an instance group in the specified project using the parameters that are included in the request.", // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.insert", + // "id": "compute.instanceGroups.insert", // "parameterOrder": [ // "project", // "zone" @@ -49814,15 +54221,15 @@ func (c *InstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Ope // "type": "string" // }, // "zone": { - // "description": "The name of the zone where you want to create the managed instance group.", + // "description": "The name of the zone where you want to create the instance group.", // "location": "path", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers", + // "path": "{project}/zones/{zone}/instanceGroups", // "request": { - // "$ref": "InstanceGroupManager" + // "$ref": "InstanceGroup" // }, // "response": { // "$ref": "Operation" @@ -49835,9 +54242,9 @@ func (c *InstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.instanceGroupManagers.list": +// method id "compute.instanceGroups.list": -type InstanceGroupManagersListCall struct { +type InstanceGroupsListCall struct { s *Service project string zone string @@ -49847,10 +54254,10 @@ type InstanceGroupManagersListCall struct { header_ http.Header } -// List: Retrieves a list of managed instance groups that are contained -// within the specified project and zone. -func (r *InstanceGroupManagersService) List(project string, zone string) *InstanceGroupManagersListCall { - c := &InstanceGroupManagersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of instance groups that are located in the +// specified project and zone. +func (r *InstanceGroupsService) List(project string, zone string) *InstanceGroupsListCall { + c := &InstanceGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone return c @@ -49878,7 +54285,7 @@ func (r *InstanceGroupManagersService) List(project string, zone string) *Instan // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *InstanceGroupManagersListCall) Filter(filter string) *InstanceGroupManagersListCall { +func (c *InstanceGroupsListCall) Filter(filter string) *InstanceGroupsListCall { c.urlParams_.Set("filter", filter) return c } @@ -49889,7 +54296,7 @@ func (c *InstanceGroupManagersListCall) Filter(filter string) *InstanceGroupMana // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *InstanceGroupManagersListCall) MaxResults(maxResults int64) *InstanceGroupManagersListCall { +func (c *InstanceGroupsListCall) MaxResults(maxResults int64) *InstanceGroupsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -49906,7 +54313,7 @@ func (c *InstanceGroupManagersListCall) MaxResults(maxResults int64) *InstanceGr // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *InstanceGroupManagersListCall) OrderBy(orderBy string) *InstanceGroupManagersListCall { +func (c *InstanceGroupsListCall) OrderBy(orderBy string) *InstanceGroupsListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -49914,7 +54321,7 @@ func (c *InstanceGroupManagersListCall) OrderBy(orderBy string) *InstanceGroupMa // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *InstanceGroupManagersListCall) PageToken(pageToken string) *InstanceGroupManagersListCall { +func (c *InstanceGroupsListCall) PageToken(pageToken string) *InstanceGroupsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -49922,7 +54329,7 @@ func (c *InstanceGroupManagersListCall) PageToken(pageToken string) *InstanceGro // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersListCall) Fields(s ...googleapi.Field) *InstanceGroupManagersListCall { +func (c *InstanceGroupsListCall) Fields(s ...googleapi.Field) *InstanceGroupsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -49932,7 +54339,7 @@ func (c *InstanceGroupManagersListCall) Fields(s ...googleapi.Field) *InstanceGr // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InstanceGroupManagersListCall) IfNoneMatch(entityTag string) *InstanceGroupManagersListCall { +func (c *InstanceGroupsListCall) IfNoneMatch(entityTag string) *InstanceGroupsListCall { c.ifNoneMatch_ = entityTag return c } @@ -49940,21 +54347,21 @@ func (c *InstanceGroupManagersListCall) IfNoneMatch(entityTag string) *InstanceG // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersListCall) Context(ctx context.Context) *InstanceGroupManagersListCall { +func (c *InstanceGroupsListCall) Context(ctx context.Context) *InstanceGroupsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersListCall) Header() http.Header { +func (c *InstanceGroupsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -49966,7 +54373,7 @@ func (c *InstanceGroupManagersListCall) doRequest(alt string) (*http.Response, e var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -49980,14 +54387,14 @@ func (c *InstanceGroupManagersListCall) doRequest(alt string) (*http.Response, e return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.list" call. -// Exactly one of *InstanceGroupManagerList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *InstanceGroupManagerList.ServerResponse.Header or (if a response was +// Do executes the "compute.instanceGroups.list" call. +// Exactly one of *InstanceGroupList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceGroupList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagerList, error) { +func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -50006,7 +54413,7 @@ func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*Insta if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroupManagerList{ + ret := &InstanceGroupList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -50018,9 +54425,9 @@ func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*Insta } return ret, nil // { - // "description": "Retrieves a list of managed instance groups that are contained within the specified project and zone.", + // "description": "Retrieves the list of instance groups that are located in the specified project and zone.", // "httpMethod": "GET", - // "id": "compute.instanceGroupManagers.list", + // "id": "compute.instanceGroups.list", // "parameterOrder": [ // "project", // "zone" @@ -50057,15 +54464,15 @@ func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*Insta // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone where the instance group is located.", // "location": "path", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers", + // "path": "{project}/zones/{zone}/instanceGroups", // "response": { - // "$ref": "InstanceGroupManagerList" + // "$ref": "InstanceGroupList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -50079,7 +54486,7 @@ func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*Insta // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *InstanceGroupManagersListCall) Pages(ctx context.Context, f func(*InstanceGroupManagerList) error) error { +func (c *InstanceGroupsListCall) Pages(ctx context.Context, f func(*InstanceGroupList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -50097,29 +54504,26 @@ func (c *InstanceGroupManagersListCall) Pages(ctx context.Context, f func(*Insta } } -// method id "compute.instanceGroupManagers.listManagedInstances": +// method id "compute.instanceGroups.listInstances": -type InstanceGroupManagersListManagedInstancesCall struct { - s *Service - project string - zone string - instanceGroupManager string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupsListInstancesCall struct { + s *Service + project string + zone string + instanceGroup string + instancegroupslistinstancesrequest *InstanceGroupsListInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// ListManagedInstances: Lists all of the instances in the managed -// instance group. Each instance in the list has a currentAction, which -// indicates the action that the managed instance group is performing on -// the instance. For example, if the group is still creating an -// instance, the currentAction is CREATING. If a previous action failed, -// the list displays the errors for that failed action. -func (r *InstanceGroupManagersService) ListManagedInstances(project string, zone string, instanceGroupManager string) *InstanceGroupManagersListManagedInstancesCall { - c := &InstanceGroupManagersListManagedInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ListInstances: Lists the instances in the specified instance group. +func (r *InstanceGroupsService) ListInstances(project string, zone string, instanceGroup string, instancegroupslistinstancesrequest *InstanceGroupsListInstancesRequest) *InstanceGroupsListInstancesCall { + c := &InstanceGroupsListInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager + c.instanceGroup = instanceGroup + c.instancegroupslistinstancesrequest = instancegroupslistinstancesrequest return c } @@ -50145,7 +54549,7 @@ func (r *InstanceGroupManagersService) ListManagedInstances(project string, zone // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *InstanceGroupManagersListManagedInstancesCall) Filter(filter string) *InstanceGroupManagersListManagedInstancesCall { +func (c *InstanceGroupsListInstancesCall) Filter(filter string) *InstanceGroupsListInstancesCall { c.urlParams_.Set("filter", filter) return c } @@ -50156,12 +54560,12 @@ func (c *InstanceGroupManagersListManagedInstancesCall) Filter(filter string) *I // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *InstanceGroupManagersListManagedInstancesCall) MaxResults(maxResults int64) *InstanceGroupManagersListManagedInstancesCall { +func (c *InstanceGroupsListInstancesCall) MaxResults(maxResults int64) *InstanceGroupsListInstancesCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } -// OrderBy sets the optional parameter "order_by": Sorts list results by +// OrderBy sets the optional parameter "orderBy": Sorts list results by // a certain order. By default, results are returned in alphanumerical // order based on the resource name. // @@ -50173,15 +54577,15 @@ func (c *InstanceGroupManagersListManagedInstancesCall) MaxResults(maxResults in // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *InstanceGroupManagersListManagedInstancesCall) OrderBy(orderBy string) *InstanceGroupManagersListManagedInstancesCall { - c.urlParams_.Set("order_by", orderBy) +func (c *InstanceGroupsListInstancesCall) OrderBy(orderBy string) *InstanceGroupsListInstancesCall { + c.urlParams_.Set("orderBy", orderBy) return c } // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *InstanceGroupManagersListManagedInstancesCall) PageToken(pageToken string) *InstanceGroupManagersListManagedInstancesCall { +func (c *InstanceGroupsListInstancesCall) PageToken(pageToken string) *InstanceGroupsListInstancesCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -50189,7 +54593,7 @@ func (c *InstanceGroupManagersListManagedInstancesCall) PageToken(pageToken stri // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersListManagedInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersListManagedInstancesCall { +func (c *InstanceGroupsListInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsListInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -50197,30 +54601,35 @@ func (c *InstanceGroupManagersListManagedInstancesCall) Fields(s ...googleapi.Fi // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersListManagedInstancesCall) Context(ctx context.Context) *InstanceGroupManagersListManagedInstancesCall { +func (c *InstanceGroupsListInstancesCall) Context(ctx context.Context) *InstanceGroupsListInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersListManagedInstancesCall) Header() http.Header { +func (c *InstanceGroupsListInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersListManagedInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupslistinstancesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/listInstances") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -50228,23 +54637,21 @@ func (c *InstanceGroupManagersListManagedInstancesCall) doRequest(alt string) (* } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.listManagedInstances" call. -// Exactly one of *InstanceGroupManagersListManagedInstancesResponse or -// error will be non-nil. Any non-2xx status code is an error. Response -// headers are in either -// *InstanceGroupManagersListManagedInstancesResponse.ServerResponse.Head -// er or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagersListManagedInstancesResponse, error) { +// Do executes the "compute.instanceGroups.listInstances" call. +// Exactly one of *InstanceGroupsListInstances or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *InstanceGroupsListInstances.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*InstanceGroupsListInstances, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -50263,7 +54670,7 @@ func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.Cal if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroupManagersListManagedInstancesResponse{ + ret := &InstanceGroupsListInstances{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -50275,13 +54682,13 @@ func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.Cal } return ret, nil // { - // "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action.", + // "description": "Lists the instances in the specified instance group.", // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.listManagedInstances", + // "id": "compute.instanceGroups.listInstances", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "instanceGroup" // ], // "parameters": { // "filter": { @@ -50289,8 +54696,8 @@ func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.Cal // "location": "query", // "type": "string" // }, - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "instanceGroup": { + // "description": "The name of the instance group from which you want to generate a list of included instances.", // "location": "path", // "required": true, // "type": "string" @@ -50303,7 +54710,7 @@ func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.Cal // "minimum": "0", // "type": "integer" // }, - // "order_by": { + // "orderBy": { // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", // "location": "query", // "type": "string" @@ -50321,15 +54728,18 @@ func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.Cal // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone where the instance group is located.", // "location": "path", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", + // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/listInstances", + // "request": { + // "$ref": "InstanceGroupsListInstancesRequest" + // }, // "response": { - // "$ref": "InstanceGroupManagersListManagedInstancesResponse" + // "$ref": "InstanceGroupsListInstances" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -50340,32 +54750,52 @@ func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.Cal } -// method id "compute.instanceGroupManagers.patch": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstanceGroupsListInstancesCall) Pages(ctx context.Context, f func(*InstanceGroupsListInstances) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InstanceGroupManagersPatchCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanager *InstanceGroupManager - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.instanceGroups.removeInstances": + +type InstanceGroupsRemoveInstancesCall struct { + s *Service + project string + zone string + instanceGroup string + instancegroupsremoveinstancesrequest *InstanceGroupsRemoveInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates a managed instance group using the information that -// you specify in the request. This operation is marked as DONE when the -// group is patched even if the instances in the group are still in the -// process of being patched. You must separately verify the status of -// the individual instances with the listManagedInstances method. This -// method supports PATCH semantics and uses the JSON merge patch format -// and processing rules. -func (r *InstanceGroupManagersService) Patch(project string, zone string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *InstanceGroupManagersPatchCall { - c := &InstanceGroupManagersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// RemoveInstances: Removes one or more instances from the specified +// instance group, but does not delete those instances. +// +// If the group is part of a backend service that has enabled connection +// draining, it can take up to 60 seconds after the connection draining +// duration before the VM instance is removed or deleted. +func (r *InstanceGroupsService) RemoveInstances(project string, zone string, instanceGroup string, instancegroupsremoveinstancesrequest *InstanceGroupsRemoveInstancesRequest) *InstanceGroupsRemoveInstancesCall { + c := &InstanceGroupsRemoveInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanager = instancegroupmanager + c.instanceGroup = instanceGroup + c.instancegroupsremoveinstancesrequest = instancegroupsremoveinstancesrequest return c } @@ -50383,7 +54813,7 @@ func (r *InstanceGroupManagersService) Patch(project string, zone string, instan // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersPatchCall) RequestId(requestId string) *InstanceGroupManagersPatchCall { +func (c *InstanceGroupsRemoveInstancesCall) RequestId(requestId string) *InstanceGroupsRemoveInstancesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -50391,7 +54821,7 @@ func (c *InstanceGroupManagersPatchCall) RequestId(requestId string) *InstanceGr // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersPatchCall) Fields(s ...googleapi.Field) *InstanceGroupManagersPatchCall { +func (c *InstanceGroupsRemoveInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsRemoveInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -50399,57 +54829,57 @@ func (c *InstanceGroupManagersPatchCall) Fields(s ...googleapi.Field) *InstanceG // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersPatchCall) Context(ctx context.Context) *InstanceGroupManagersPatchCall { +func (c *InstanceGroupsRemoveInstancesCall) Context(ctx context.Context) *InstanceGroupsRemoveInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersPatchCall) Header() http.Header { +func (c *InstanceGroupsRemoveInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsRemoveInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupsremoveinstancesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/removeInstances") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.patch" call. +// Do executes the "compute.instanceGroups.removeInstances" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupsRemoveInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -50480,17 +54910,17 @@ func (c *InstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listManagedInstances method. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.instanceGroupManagers.patch", + // "description": "Removes one or more instances from the specified instance group, but does not delete those instances.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration before the VM instance is removed or deleted.", + // "httpMethod": "POST", + // "id": "compute.instanceGroups.removeInstances", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "instanceGroup" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the instance group manager.", + // "instanceGroup": { + // "description": "The name of the instance group where the specified instances will be removed.", // "location": "path", // "required": true, // "type": "string" @@ -50508,15 +54938,15 @@ func (c *InstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Oper // "type": "string" // }, // "zone": { - // "description": "The name of the zone where you want to create the managed instance group.", + // "description": "The name of the zone where the instance group is located.", // "location": "path", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", + // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/removeInstances", // "request": { - // "$ref": "InstanceGroupManager" + // "$ref": "InstanceGroupsRemoveInstancesRequest" // }, // "response": { // "$ref": "Operation" @@ -50529,40 +54959,26 @@ func (c *InstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Oper } -// method id "compute.instanceGroupManagers.recreateInstances": +// method id "compute.instanceGroups.setNamedPorts": -type InstanceGroupManagersRecreateInstancesCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagersrecreateinstancesrequest *InstanceGroupManagersRecreateInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupsSetNamedPortsCall struct { + s *Service + project string + zone string + instanceGroup string + instancegroupssetnamedportsrequest *InstanceGroupsSetNamedPortsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// RecreateInstances: Flags the specified instances in the managed -// instance group to be immediately recreated. The instances are deleted -// and recreated using the current instance template for the managed -// instance group. This operation is marked as DONE when the flag is set -// even if the instances have not yet been recreated. You must -// separately verify the status of the recreating action with the -// listmanagedinstances method. -// -// If the group is part of a backend service that has enabled connection -// draining, it can take up to 60 seconds after the connection draining -// duration has elapsed before the VM instance is removed or -// deleted. -// -// You can specify a maximum of 1000 instances with this method per -// request. -func (r *InstanceGroupManagersService) RecreateInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersrecreateinstancesrequest *InstanceGroupManagersRecreateInstancesRequest) *InstanceGroupManagersRecreateInstancesCall { - c := &InstanceGroupManagersRecreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetNamedPorts: Sets the named ports for the specified instance group. +func (r *InstanceGroupsService) SetNamedPorts(project string, zone string, instanceGroup string, instancegroupssetnamedportsrequest *InstanceGroupsSetNamedPortsRequest) *InstanceGroupsSetNamedPortsCall { + c := &InstanceGroupsSetNamedPortsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagersrecreateinstancesrequest = instancegroupmanagersrecreateinstancesrequest + c.instanceGroup = instanceGroup + c.instancegroupssetnamedportsrequest = instancegroupssetnamedportsrequest return c } @@ -50580,7 +54996,7 @@ func (r *InstanceGroupManagersService) RecreateInstances(project string, zone st // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersRecreateInstancesCall) RequestId(requestId string) *InstanceGroupManagersRecreateInstancesCall { +func (c *InstanceGroupsSetNamedPortsCall) RequestId(requestId string) *InstanceGroupsSetNamedPortsCall { c.urlParams_.Set("requestId", requestId) return c } @@ -50588,7 +55004,7 @@ func (c *InstanceGroupManagersRecreateInstancesCall) RequestId(requestId string) // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersRecreateInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersRecreateInstancesCall { +func (c *InstanceGroupsSetNamedPortsCall) Fields(s ...googleapi.Field) *InstanceGroupsSetNamedPortsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -50596,35 +55012,35 @@ func (c *InstanceGroupManagersRecreateInstancesCall) Fields(s ...googleapi.Field // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersRecreateInstancesCall) Context(ctx context.Context) *InstanceGroupManagersRecreateInstancesCall { +func (c *InstanceGroupsSetNamedPortsCall) Context(ctx context.Context) *InstanceGroupsSetNamedPortsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersRecreateInstancesCall) Header() http.Header { +func (c *InstanceGroupsSetNamedPortsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersrecreateinstancesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupssetnamedportsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/setNamedPorts") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -50632,21 +55048,21 @@ func (c *InstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*htt } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.recreateInstances" call. +// Do executes the "compute.instanceGroups.setNamedPorts" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -50677,17 +55093,17 @@ func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOp } return ret, nil // { - // "description": "Flags the specified instances in the managed instance group to be immediately recreated. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the flag is set even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + // "description": "Sets the named ports for the specified instance group.", // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.recreateInstances", + // "id": "compute.instanceGroups.setNamedPorts", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "instanceGroup" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "instanceGroup": { + // "description": "The name of the instance group where the named ports are updated.", // "location": "path", // "required": true, // "type": "string" @@ -50705,15 +55121,15 @@ func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOp // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone where the instance group is located.", // "location": "path", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", + // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/setNamedPorts", // "request": { - // "$ref": "InstanceGroupManagersRecreateInstancesRequest" + // "$ref": "InstanceGroupsSetNamedPortsRequest" // }, // "response": { // "$ref": "Operation" @@ -50726,45 +55142,25 @@ func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOp } -// method id "compute.instanceGroupManagers.resize": +// method id "compute.instanceTemplates.delete": -type InstanceGroupManagersResizeCall struct { - s *Service - project string - zone string - instanceGroupManager string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceTemplatesDeleteCall struct { + s *Service + project string + instanceTemplate string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Resize: Resizes the managed instance group. If you increase the size, -// the group creates new instances using the current instance template. -// If you decrease the size, the group deletes instances. The resize -// operation is marked DONE when the resize actions are scheduled even -// if the group has not yet added or deleted any instances. You must -// separately verify the status of the creating or deleting actions with -// the listmanagedinstances method. -// -// When resizing down, the instance group arbitrarily chooses the order -// in which VMs are deleted. The group takes into account some VM -// attributes when making the selection including: -// -// + The status of the VM instance. + The health of the VM instance. + -// The instance template version the VM is based on. + For regional -// managed instance groups, the location of the VM instance. -// -// This list is subject to change. -// -// If the group is part of a backend service that has enabled connection -// draining, it can take up to 60 seconds after the connection draining -// duration has elapsed before the VM instance is removed or deleted. -func (r *InstanceGroupManagersService) Resize(project string, zone string, instanceGroupManager string, size int64) *InstanceGroupManagersResizeCall { - c := &InstanceGroupManagersResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified instance template. Deleting an instance +// template is permanent and cannot be undone. It is not possible to +// delete templates that are already in use by a managed instance group. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/delete +func (r *InstanceTemplatesService) Delete(project string, instanceTemplate string) *InstanceTemplatesDeleteCall { + c := &InstanceTemplatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.urlParams_.Set("size", fmt.Sprint(size)) + c.instanceTemplate = instanceTemplate return c } @@ -50782,7 +55178,7 @@ func (r *InstanceGroupManagersService) Resize(project string, zone string, insta // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersResizeCall) RequestId(requestId string) *InstanceGroupManagersResizeCall { +func (c *InstanceTemplatesDeleteCall) RequestId(requestId string) *InstanceTemplatesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -50790,7 +55186,7 @@ func (c *InstanceGroupManagersResizeCall) RequestId(requestId string) *InstanceG // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersResizeCall) Fields(s ...googleapi.Field) *InstanceGroupManagersResizeCall { +func (c *InstanceTemplatesDeleteCall) Fields(s ...googleapi.Field) *InstanceTemplatesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -50798,21 +55194,21 @@ func (c *InstanceGroupManagersResizeCall) Fields(s ...googleapi.Field) *Instance // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersResizeCall) Context(ctx context.Context) *InstanceGroupManagersResizeCall { +func (c *InstanceTemplatesDeleteCall) Context(ctx context.Context) *InstanceTemplatesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersResizeCall) Header() http.Header { +func (c *InstanceTemplatesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -50821,29 +55217,28 @@ func (c *InstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{instanceTemplate}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "instanceTemplate": c.instanceTemplate, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.resize" call. +// Do executes the "compute.instanceTemplates.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -50874,19 +55269,18 @@ func (c *InstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Resizes the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.\n\nWhen resizing down, the instance group arbitrarily chooses the order in which VMs are deleted. The group takes into account some VM attributes when making the selection including:\n\n+ The status of the VM instance. + The health of the VM instance. + The instance template version the VM is based on. + For regional managed instance groups, the location of the VM instance.\n\nThis list is subject to change.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", - // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.resize", + // "description": "Deletes the specified instance template. Deleting an instance template is permanent and cannot be undone. It is not possible to delete templates that are already in use by a managed instance group.", + // "httpMethod": "DELETE", + // "id": "compute.instanceTemplates.delete", // "parameterOrder": [ // "project", - // "zone", - // "instanceGroupManager", - // "size" + // "instanceTemplate" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "instanceTemplate": { + // "description": "The name of the instance template to delete.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -50901,22 +55295,9 @@ func (c *InstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Ope // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "size": { - // "description": "The number of running instances that the managed instance group should maintain at any given time. The group automatically adds or removes instances to maintain the number of instances specified by this parameter.", - // "format": "int32", - // "location": "query", - // "required": true, - // "type": "integer" - // }, - // "zone": { - // "description": "The name of the zone where the managed instance group is located.", - // "location": "path", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize", + // "path": "{project}/global/instanceTemplates/{instanceTemplate}", // "response": { // "$ref": "Operation" // }, @@ -50928,112 +55309,97 @@ func (c *InstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.instanceGroupManagers.setInstanceTemplate": +// method id "compute.instanceTemplates.get": -type InstanceGroupManagersSetInstanceTemplateCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagerssetinstancetemplaterequest *InstanceGroupManagersSetInstanceTemplateRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceTemplatesGetCall struct { + s *Service + project string + instanceTemplate string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetInstanceTemplate: Specifies the instance template to use when -// creating new instances in this group. The templates for existing -// instances in the group do not change unless you recreate them. -func (r *InstanceGroupManagersService) SetInstanceTemplate(project string, zone string, instanceGroupManager string, instancegroupmanagerssetinstancetemplaterequest *InstanceGroupManagersSetInstanceTemplateRequest) *InstanceGroupManagersSetInstanceTemplateCall { - c := &InstanceGroupManagersSetInstanceTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified instance template. Gets a list of +// available instance templates by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/get +func (r *InstanceTemplatesService) Get(project string, instanceTemplate string) *InstanceTemplatesGetCall { + c := &InstanceTemplatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagerssetinstancetemplaterequest = instancegroupmanagerssetinstancetemplaterequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersSetInstanceTemplateCall) RequestId(requestId string) *InstanceGroupManagersSetInstanceTemplateCall { - c.urlParams_.Set("requestId", requestId) + c.instanceTemplate = instanceTemplate return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersSetInstanceTemplateCall) Fields(s ...googleapi.Field) *InstanceGroupManagersSetInstanceTemplateCall { +func (c *InstanceTemplatesGetCall) Fields(s ...googleapi.Field) *InstanceTemplatesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstanceTemplatesGetCall) IfNoneMatch(entityTag string) *InstanceTemplatesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersSetInstanceTemplateCall) Context(ctx context.Context) *InstanceGroupManagersSetInstanceTemplateCall { +func (c *InstanceTemplatesGetCall) Context(ctx context.Context) *InstanceTemplatesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersSetInstanceTemplateCall) Header() http.Header { +func (c *InstanceTemplatesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceTemplatesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerssetinstancetemplaterequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{instanceTemplate}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "instanceTemplate": c.instanceTemplate, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.setInstanceTemplate" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instanceTemplates.get" call. +// Exactly one of *InstanceTemplate or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceTemplate.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTemplate, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -51052,7 +55418,7 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.Call if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &InstanceTemplate{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -51064,18 +55430,18 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.Call } return ret, nil // { - // "description": "Specifies the instance template to use when creating new instances in this group. The templates for existing instances in the group do not change unless you recreate them.", - // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.setInstanceTemplate", + // "description": "Returns the specified instance template. Gets a list of available instance templates by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.instanceTemplates.get", // "parameterOrder": [ // "project", - // "zone", - // "instanceGroupManager" + // "instanceTemplate" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "instanceTemplate": { + // "description": "The name of the instance template.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -51085,144 +55451,111 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.Call // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the managed instance group is located.", - // "location": "path", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", - // "request": { - // "$ref": "InstanceGroupManagersSetInstanceTemplateRequest" - // }, + // "path": "{project}/global/instanceTemplates/{instanceTemplate}", // "response": { - // "$ref": "Operation" + // "$ref": "InstanceTemplate" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instanceGroupManagers.setTargetPools": +// method id "compute.instanceTemplates.getIamPolicy": -type InstanceGroupManagersSetTargetPoolsCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagerssettargetpoolsrequest *InstanceGroupManagersSetTargetPoolsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceTemplatesGetIamPolicyCall struct { + s *Service + project string + resource string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetTargetPools: Modifies the target pools to which all instances in -// this managed instance group are assigned. The target pools -// automatically apply to all of the instances in the managed instance -// group. This operation is marked DONE when you make the request even -// if the instances have not yet been added to their target pools. The -// change might take some time to apply to all of the instances in the -// group depending on the size of the group. -func (r *InstanceGroupManagersService) SetTargetPools(project string, zone string, instanceGroupManager string, instancegroupmanagerssettargetpoolsrequest *InstanceGroupManagersSetTargetPoolsRequest) *InstanceGroupManagersSetTargetPoolsCall { - c := &InstanceGroupManagersSetTargetPoolsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. +func (r *InstanceTemplatesService) GetIamPolicy(project string, resource string) *InstanceTemplatesGetIamPolicyCall { + c := &InstanceTemplatesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagerssettargetpoolsrequest = instancegroupmanagerssettargetpoolsrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersSetTargetPoolsCall) RequestId(requestId string) *InstanceGroupManagersSetTargetPoolsCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersSetTargetPoolsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersSetTargetPoolsCall { +func (c *InstanceTemplatesGetIamPolicyCall) Fields(s ...googleapi.Field) *InstanceTemplatesGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstanceTemplatesGetIamPolicyCall) IfNoneMatch(entityTag string) *InstanceTemplatesGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersSetTargetPoolsCall) Context(ctx context.Context) *InstanceGroupManagersSetTargetPoolsCall { +func (c *InstanceTemplatesGetIamPolicyCall) Context(ctx context.Context) *InstanceTemplatesGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersSetTargetPoolsCall) Header() http.Header { +func (c *InstanceTemplatesGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceTemplatesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerssettargetpoolsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setTargetPools") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.setTargetPools" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instanceTemplates.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *InstanceTemplatesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -51241,7 +55574,7 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOptio if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -51253,21 +55586,14 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Modifies the target pools to which all instances in this managed instance group are assigned. The target pools automatically apply to all of the instances in the managed instance group. This operation is marked DONE when you make the request even if the instances have not yet been added to their target pools. The change might take some time to apply to all of the instances in the group depending on the size of the group.", - // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.setTargetPools", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "httpMethod": "GET", + // "id": "compute.instanceTemplates.getIamPolicy", // "parameterOrder": [ // "project", - // "zone", - // "instanceGroupManager" + // "resource" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -51275,55 +55601,48 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOptio // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", - // "request": { - // "$ref": "InstanceGroupManagersSetTargetPoolsRequest" - // }, + // "path": "{project}/global/instanceTemplates/{resource}/getIamPolicy", // "response": { - // "$ref": "Operation" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instanceGroups.addInstances": +// method id "compute.instanceTemplates.insert": -type InstanceGroupsAddInstancesCall struct { - s *Service - project string - zone string - instanceGroup string - instancegroupsaddinstancesrequest *InstanceGroupsAddInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceTemplatesInsertCall struct { + s *Service + project string + instancetemplate *InstanceTemplate + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AddInstances: Adds a list of instances to the specified instance -// group. All of the instances in the instance group must be in the same -// network/subnetwork. Read Adding instances for more information. -func (r *InstanceGroupsService) AddInstances(project string, zone string, instanceGroup string, instancegroupsaddinstancesrequest *InstanceGroupsAddInstancesRequest) *InstanceGroupsAddInstancesCall { - c := &InstanceGroupsAddInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates an instance template in the specified project using +// the data that is included in the request. If you are creating a new +// template to update an existing instance group, your new instance +// template must use the same network or, if applicable, the same +// subnetwork as the original template. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/insert +func (r *InstanceTemplatesService) Insert(project string, instancetemplate *InstanceTemplate) *InstanceTemplatesInsertCall { + c := &InstanceTemplatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instanceGroup = instanceGroup - c.instancegroupsaddinstancesrequest = instancegroupsaddinstancesrequest + c.instancetemplate = instancetemplate return c } @@ -51341,7 +55660,7 @@ func (r *InstanceGroupsService) AddInstances(project string, zone string, instan // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupsAddInstancesCall) RequestId(requestId string) *InstanceGroupsAddInstancesCall { +func (c *InstanceTemplatesInsertCall) RequestId(requestId string) *InstanceTemplatesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -51349,7 +55668,7 @@ func (c *InstanceGroupsAddInstancesCall) RequestId(requestId string) *InstanceGr // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsAddInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsAddInstancesCall { +func (c *InstanceTemplatesInsertCall) Fields(s ...googleapi.Field) *InstanceTemplatesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -51357,35 +55676,35 @@ func (c *InstanceGroupsAddInstancesCall) Fields(s ...googleapi.Field) *InstanceG // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsAddInstancesCall) Context(ctx context.Context) *InstanceGroupsAddInstancesCall { +func (c *InstanceTemplatesInsertCall) Context(ctx context.Context) *InstanceTemplatesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsAddInstancesCall) Header() http.Header { +func (c *InstanceTemplatesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsAddInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceTemplatesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupsaddinstancesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancetemplate) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/addInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -51393,21 +55712,19 @@ func (c *InstanceGroupsAddInstancesCall) doRequest(alt string) (*http.Response, } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroup": c.instanceGroup, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.addInstances" call. +// Do executes the "compute.instanceTemplates.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupsAddInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -51438,21 +55755,13 @@ func (c *InstanceGroupsAddInstancesCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Adds a list of instances to the specified instance group. All of the instances in the instance group must be in the same network/subnetwork. Read Adding instances for more information.", + // "description": "Creates an instance template in the specified project using the data that is included in the request. If you are creating a new template to update an existing instance group, your new instance template must use the same network or, if applicable, the same subnetwork as the original template.", // "httpMethod": "POST", - // "id": "compute.instanceGroups.addInstances", + // "id": "compute.instanceTemplates.insert", // "parameterOrder": [ - // "project", - // "zone", - // "instanceGroup" + // "project" // ], // "parameters": { - // "instanceGroup": { - // "description": "The name of the instance group where you are adding instances.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -51464,17 +55773,11 @@ func (c *InstanceGroupsAddInstancesCall) Do(opts ...googleapi.CallOption) (*Oper // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the instance group is located.", - // "location": "path", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/addInstances", + // "path": "{project}/global/instanceTemplates", // "request": { - // "$ref": "InstanceGroupsAddInstancesRequest" + // "$ref": "InstanceTemplate" // }, // "response": { // "$ref": "Operation" @@ -51487,9 +55790,9 @@ func (c *InstanceGroupsAddInstancesCall) Do(opts ...googleapi.CallOption) (*Oper } -// method id "compute.instanceGroups.aggregatedList": +// method id "compute.instanceTemplates.list": -type InstanceGroupsAggregatedListCall struct { +type InstanceTemplatesListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -51498,10 +55801,11 @@ type InstanceGroupsAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves the list of instance groups and sorts them -// by zone. -func (r *InstanceGroupsService) AggregatedList(project string) *InstanceGroupsAggregatedListCall { - c := &InstanceGroupsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of instance templates that are contained +// within the specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/list +func (r *InstanceTemplatesService) List(project string) *InstanceTemplatesListCall { + c := &InstanceTemplatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -51528,317 +55832,115 @@ func (r *InstanceGroupsService) AggregatedList(project string) *InstanceGroupsAg // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *InstanceGroupsAggregatedListCall) Filter(filter string) *InstanceGroupsAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InstanceGroupsAggregatedListCall) MaxResults(maxResults int64) *InstanceGroupsAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *InstanceGroupsAggregatedListCall) OrderBy(orderBy string) *InstanceGroupsAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InstanceGroupsAggregatedListCall) PageToken(pageToken string) *InstanceGroupsAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstanceGroupsAggregatedListCall) Fields(s ...googleapi.Field) *InstanceGroupsAggregatedListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstanceGroupsAggregatedListCall) IfNoneMatch(entityTag string) *InstanceGroupsAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstanceGroupsAggregatedListCall) Context(ctx context.Context) *InstanceGroupsAggregatedListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstanceGroupsAggregatedListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstanceGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instanceGroups") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instanceGroups.aggregatedList" call. -// Exactly one of *InstanceGroupAggregatedList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *InstanceGroupAggregatedList.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupAggregatedList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &InstanceGroupAggregatedList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves the list of instance groups and sorts them by zone.", - // "httpMethod": "GET", - // "id": "compute.instanceGroups.aggregatedList", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/aggregated/instanceGroups", - // "response": { - // "$ref": "InstanceGroupAggregatedList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstanceGroupsAggregatedListCall) Pages(ctx context.Context, f func(*InstanceGroupAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instanceGroups.delete": - -type InstanceGroupsDeleteCall struct { - s *Service - project string - zone string - instanceGroup string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +func (c *InstanceTemplatesListCall) Filter(filter string) *InstanceTemplatesListCall { + c.urlParams_.Set("filter", filter) + return c } -// Delete: Deletes the specified instance group. The instances in the -// group are not deleted. Note that instance group must not belong to a -// backend service. Read Deleting an instance group for more -// information. -func (r *InstanceGroupsService) Delete(project string, zone string, instanceGroup string) *InstanceGroupsDeleteCall { - c := &InstanceGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instanceGroup = instanceGroup +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *InstanceTemplatesListCall) MaxResults(maxResults int64) *InstanceTemplatesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupsDeleteCall) RequestId(requestId string) *InstanceGroupsDeleteCall { - c.urlParams_.Set("requestId", requestId) +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *InstanceTemplatesListCall) OrderBy(orderBy string) *InstanceTemplatesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InstanceTemplatesListCall) PageToken(pageToken string) *InstanceTemplatesListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsDeleteCall) Fields(s ...googleapi.Field) *InstanceGroupsDeleteCall { +func (c *InstanceTemplatesListCall) Fields(s ...googleapi.Field) *InstanceTemplatesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstanceTemplatesListCall) IfNoneMatch(entityTag string) *InstanceTemplatesListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsDeleteCall) Context(ctx context.Context) *InstanceGroupsDeleteCall { +func (c *InstanceTemplatesListCall) Context(ctx context.Context) *InstanceTemplatesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsDeleteCall) Header() http.Header { +func (c *InstanceTemplatesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceTemplatesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroup": c.instanceGroup, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instanceTemplates.list" call. +// Exactly one of *InstanceTemplateList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceTemplateList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceTemplateList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -51857,7 +55959,7 @@ func (c *InstanceGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &InstanceTemplateList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -51869,145 +55971,161 @@ func (c *InstanceGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Deletes the specified instance group. The instances in the group are not deleted. Note that instance group must not belong to a backend service. Read Deleting an instance group for more information.", - // "httpMethod": "DELETE", - // "id": "compute.instanceGroups.delete", + // "description": "Retrieves a list of instance templates that are contained within the specified project.", + // "httpMethod": "GET", + // "id": "compute.instanceTemplates.list", // "parameterOrder": [ - // "project", - // "zone", - // "instanceGroup" + // "project" // ], // "parameters": { - // "instanceGroup": { - // "description": "The name of the instance group to delete.", - // "location": "path", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, - // "zone": { - // "description": "The name of the zone where the instance group is located.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}", + // "path": "{project}/global/instanceTemplates", // "response": { - // "$ref": "Operation" + // "$ref": "InstanceTemplateList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instanceGroups.get": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstanceTemplatesListCall) Pages(ctx context.Context, f func(*InstanceTemplateList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InstanceGroupsGetCall struct { - s *Service - project string - zone string - instanceGroup string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +// method id "compute.instanceTemplates.setIamPolicy": + +type InstanceTemplatesSetIamPolicyCall struct { + s *Service + project string + resource string + globalsetpolicyrequest *GlobalSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified instance group. Gets a list of available -// instance groups by making a list() request. -func (r *InstanceGroupsService) Get(project string, zone string, instanceGroup string) *InstanceGroupsGetCall { - c := &InstanceGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +func (r *InstanceTemplatesService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *InstanceTemplatesSetIamPolicyCall { + c := &InstanceTemplatesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instanceGroup = instanceGroup + c.resource = resource + c.globalsetpolicyrequest = globalsetpolicyrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsGetCall) Fields(s ...googleapi.Field) *InstanceGroupsGetCall { +func (c *InstanceTemplatesSetIamPolicyCall) Fields(s ...googleapi.Field) *InstanceTemplatesSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstanceGroupsGetCall) IfNoneMatch(entityTag string) *InstanceGroupsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsGetCall) Context(ctx context.Context) *InstanceGroupsGetCall { +func (c *InstanceTemplatesSetIamPolicyCall) Context(ctx context.Context) *InstanceTemplatesSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsGetCall) Header() http.Header { +func (c *InstanceTemplatesSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceTemplatesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetpolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroup": c.instanceGroup, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.get" call. -// Exactly one of *InstanceGroup or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *InstanceGroup.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup, error) { +// Do executes the "compute.instanceTemplates.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *InstanceTemplatesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -52026,7 +56144,7 @@ func (c *InstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroup{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -52038,21 +56156,14 @@ func (c *InstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup } return ret, nil // { - // "description": "Returns the specified instance group. Gets a list of available instance groups by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.instanceGroups.get", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "httpMethod": "POST", + // "id": "compute.instanceTemplates.setIamPolicy", // "parameterOrder": [ // "project", - // "zone", - // "instanceGroup" + // "resource" // ], // "parameters": { - // "instanceGroup": { - // "description": "The name of the instance group.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -52060,71 +56171,55 @@ func (c *InstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone where the instance group is located.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}", + // "path": "{project}/global/instanceTemplates/{resource}/setIamPolicy", + // "request": { + // "$ref": "GlobalSetPolicyRequest" + // }, // "response": { - // "$ref": "InstanceGroup" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instanceGroups.insert": +// method id "compute.instanceTemplates.testIamPermissions": -type InstanceGroupsInsertCall struct { - s *Service - project string - zone string - instancegroup *InstanceGroup - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceTemplatesTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates an instance group in the specified project using the -// parameters that are included in the request. -func (r *InstanceGroupsService) Insert(project string, zone string, instancegroup *InstanceGroup) *InstanceGroupsInsertCall { - c := &InstanceGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *InstanceTemplatesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *InstanceTemplatesTestIamPermissionsCall { + c := &InstanceTemplatesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instancegroup = instancegroup - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupsInsertCall) RequestId(requestId string) *InstanceGroupsInsertCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsInsertCall) Fields(s ...googleapi.Field) *InstanceGroupsInsertCall { +func (c *InstanceTemplatesTestIamPermissionsCall) Fields(s ...googleapi.Field) *InstanceTemplatesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -52132,35 +56227,35 @@ func (c *InstanceGroupsInsertCall) Fields(s ...googleapi.Field) *InstanceGroupsI // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsInsertCall) Context(ctx context.Context) *InstanceGroupsInsertCall { +func (c *InstanceTemplatesTestIamPermissionsCall) Context(ctx context.Context) *InstanceTemplatesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsInsertCall) Header() http.Header { +func (c *InstanceTemplatesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceTemplatesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroup) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -52168,20 +56263,20 @@ func (c *InstanceGroupsInsertCall) doRequest(alt string) (*http.Response, error) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instanceTemplates.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -52200,7 +56295,7 @@ func (c *InstanceGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -52212,12 +56307,12 @@ func (c *InstanceGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Creates an instance group in the specified project using the parameters that are included in the request.", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.instanceGroups.insert", + // "id": "compute.instanceTemplates.testIamPermissions", // "parameterOrder": [ // "project", - // "zone" + // "resource" // ], // "parameters": { // "project": { @@ -52227,186 +56322,137 @@ func (c *InstanceGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where you want to create the instance group.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroups", + // "path": "{project}/global/instanceTemplates/{resource}/testIamPermissions", // "request": { - // "$ref": "InstanceGroup" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instanceGroups.list": +// method id "compute.instances.addAccessConfig": -type InstanceGroupsListCall struct { +type InstancesAddAccessConfigCall struct { s *Service project string zone string + instance string + accessconfig *AccessConfig urlParams_ gensupport.URLParams - ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves the list of instance groups that are located in the -// specified project and zone. -func (r *InstanceGroupsService) List(project string, zone string) *InstanceGroupsListCall { - c := &InstanceGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AddAccessConfig: Adds an access config to an instance's network +// interface. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/addAccessConfig +func (r *InstancesService) AddAccessConfig(project string, zone string, instance string, networkInterface string, accessconfig *AccessConfig) *InstancesAddAccessConfigCall { + c := &InstancesAddAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone + c.instance = instance + c.urlParams_.Set("networkInterface", networkInterface) + c.accessconfig = accessconfig return c } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *InstanceGroupsListCall) Filter(filter string) *InstanceGroupsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InstanceGroupsListCall) MaxResults(maxResults int64) *InstanceGroupsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *InstanceGroupsListCall) OrderBy(orderBy string) *InstanceGroupsListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InstanceGroupsListCall) PageToken(pageToken string) *InstanceGroupsListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesAddAccessConfigCall) RequestId(requestId string) *InstancesAddAccessConfigCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsListCall) Fields(s ...googleapi.Field) *InstanceGroupsListCall { +func (c *InstancesAddAccessConfigCall) Fields(s ...googleapi.Field) *InstancesAddAccessConfigCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstanceGroupsListCall) IfNoneMatch(entityTag string) *InstanceGroupsListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsListCall) Context(ctx context.Context) *InstanceGroupsListCall { +func (c *InstancesAddAccessConfigCall) Context(ctx context.Context) *InstancesAddAccessConfigCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsListCall) Header() http.Header { +func (c *InstancesAddAccessConfigCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesAddAccessConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.accessconfig) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/addAccessConfig") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.list" call. -// Exactly one of *InstanceGroupList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InstanceGroupList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupList, error) { +// Do executes the "compute.instances.addAccessConfig" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesAddAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -52425,7 +56471,7 @@ func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGrou if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroupList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -52437,35 +56483,27 @@ func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGrou } return ret, nil // { - // "description": "Retrieves the list of instance groups that are located in the specified project and zone.", - // "httpMethod": "GET", - // "id": "compute.instanceGroups.list", + // "description": "Adds an access config to an instance's network interface.", + // "httpMethod": "POST", + // "id": "compute.instances.addAccessConfig", // "parameterOrder": [ // "project", - // "zone" + // "zone", + // "instance", + // "networkInterface" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", + // "instance": { + // "description": "The instance name for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "networkInterface": { + // "description": "The name of the network interface to add to this instance.", // "location": "query", + // "required": true, // "type": "string" // }, // "project": { @@ -52475,67 +56513,51 @@ func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGrou // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { - // "description": "The name of the zone where the instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroups", + // "path": "{project}/zones/{zone}/instances/{instance}/addAccessConfig", + // "request": { + // "$ref": "AccessConfig" + // }, // "response": { - // "$ref": "InstanceGroupList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstanceGroupsListCall) Pages(ctx context.Context, f func(*InstanceGroupList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instanceGroups.listInstances": +// method id "compute.instances.aggregatedList": -type InstanceGroupsListInstancesCall struct { - s *Service - project string - zone string - instanceGroup string - instancegroupslistinstancesrequest *InstanceGroupsListInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// ListInstances: Lists the instances in the specified instance group. -func (r *InstanceGroupsService) ListInstances(project string, zone string, instanceGroup string, instancegroupslistinstancesrequest *InstanceGroupsListInstancesRequest) *InstanceGroupsListInstancesCall { - c := &InstanceGroupsListInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves aggregated list of all of the instances in +// your project across all regions and zones. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/aggregatedList +func (r *InstancesService) AggregatedList(project string) *InstancesAggregatedListCall { + c := &InstancesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instanceGroup = instanceGroup - c.instancegroupslistinstancesrequest = instancegroupslistinstancesrequest return c } @@ -52561,7 +56583,7 @@ func (r *InstanceGroupsService) ListInstances(project string, zone string, insta // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *InstanceGroupsListInstancesCall) Filter(filter string) *InstanceGroupsListInstancesCall { +func (c *InstancesAggregatedListCall) Filter(filter string) *InstancesAggregatedListCall { c.urlParams_.Set("filter", filter) return c } @@ -52572,7 +56594,7 @@ func (c *InstanceGroupsListInstancesCall) Filter(filter string) *InstanceGroupsL // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *InstanceGroupsListInstancesCall) MaxResults(maxResults int64) *InstanceGroupsListInstancesCall { +func (c *InstancesAggregatedListCall) MaxResults(maxResults int64) *InstancesAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -52589,7 +56611,7 @@ func (c *InstanceGroupsListInstancesCall) MaxResults(maxResults int64) *Instance // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *InstanceGroupsListInstancesCall) OrderBy(orderBy string) *InstanceGroupsListInstancesCall { +func (c *InstancesAggregatedListCall) OrderBy(orderBy string) *InstancesAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -52597,7 +56619,7 @@ func (c *InstanceGroupsListInstancesCall) OrderBy(orderBy string) *InstanceGroup // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *InstanceGroupsListInstancesCall) PageToken(pageToken string) *InstanceGroupsListInstancesCall { +func (c *InstancesAggregatedListCall) PageToken(pageToken string) *InstancesAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -52605,65 +56627,71 @@ func (c *InstanceGroupsListInstancesCall) PageToken(pageToken string) *InstanceG // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsListInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsListInstancesCall { +func (c *InstancesAggregatedListCall) Fields(s ...googleapi.Field) *InstancesAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesAggregatedListCall) IfNoneMatch(entityTag string) *InstancesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsListInstancesCall) Context(ctx context.Context) *InstanceGroupsListInstancesCall { +func (c *InstancesAggregatedListCall) Context(ctx context.Context) *InstancesAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsListInstancesCall) Header() http.Header { +func (c *InstancesAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupslistinstancesrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/listInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instances") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroup": c.instanceGroup, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.listInstances" call. -// Exactly one of *InstanceGroupsListInstances or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *InstanceGroupsListInstances.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.instances.aggregatedList" call. +// Exactly one of *InstanceAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*InstanceGroupsListInstances, error) { +func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -52682,7 +56710,7 @@ func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*Ins if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroupsListInstances{ + ret := &InstanceAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -52694,13 +56722,11 @@ func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*Ins } return ret, nil // { - // "description": "Lists the instances in the specified instance group.", - // "httpMethod": "POST", - // "id": "compute.instanceGroups.listInstances", + // "description": "Retrieves aggregated list of all of the instances in your project across all regions and zones.", + // "httpMethod": "GET", + // "id": "compute.instances.aggregatedList", // "parameterOrder": [ - // "project", - // "zone", - // "instanceGroup" + // "project" // ], // "parameters": { // "filter": { @@ -52708,12 +56734,6 @@ func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*Ins // "location": "query", // "type": "string" // }, - // "instanceGroup": { - // "description": "The name of the instance group from which you want to generate a list of included instances.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "maxResults": { // "default": "500", // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", @@ -52738,20 +56758,11 @@ func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*Ins // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the instance group is located.", - // "location": "path", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/listInstances", - // "request": { - // "$ref": "InstanceGroupsListInstancesRequest" - // }, + // "path": "{project}/aggregated/instances", // "response": { - // "$ref": "InstanceGroupsListInstances" + // "$ref": "InstanceAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -52765,7 +56776,7 @@ func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*Ins // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *InstanceGroupsListInstancesCall) Pages(ctx context.Context, f func(*InstanceGroupsListInstances) error) error { +func (c *InstancesAggregatedListCall) Pages(ctx context.Context, f func(*InstanceAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -52783,214 +56794,38 @@ func (c *InstanceGroupsListInstancesCall) Pages(ctx context.Context, f func(*Ins } } -// method id "compute.instanceGroups.removeInstances": +// method id "compute.instances.attachDisk": -type InstanceGroupsRemoveInstancesCall struct { - s *Service - project string - zone string - instanceGroup string - instancegroupsremoveinstancesrequest *InstanceGroupsRemoveInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesAttachDiskCall struct { + s *Service + project string + zone string + instance string + attacheddisk *AttachedDisk + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// RemoveInstances: Removes one or more instances from the specified -// instance group, but does not delete those instances. -// -// If the group is part of a backend service that has enabled connection -// draining, it can take up to 60 seconds after the connection draining -// duration before the VM instance is removed or deleted. -func (r *InstanceGroupsService) RemoveInstances(project string, zone string, instanceGroup string, instancegroupsremoveinstancesrequest *InstanceGroupsRemoveInstancesRequest) *InstanceGroupsRemoveInstancesCall { - c := &InstanceGroupsRemoveInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AttachDisk: Attaches an existing Disk resource to an instance. You +// must first create the disk before you can attach it. It is not +// possible to create and attach a disk at the same time. For more +// information, read Adding a persistent disk to your instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/attachDisk +func (r *InstancesService) AttachDisk(project string, zone string, instance string, attacheddisk *AttachedDisk) *InstancesAttachDiskCall { + c := &InstancesAttachDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroup = instanceGroup - c.instancegroupsremoveinstancesrequest = instancegroupsremoveinstancesrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupsRemoveInstancesCall) RequestId(requestId string) *InstanceGroupsRemoveInstancesCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstanceGroupsRemoveInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsRemoveInstancesCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstanceGroupsRemoveInstancesCall) Context(ctx context.Context) *InstanceGroupsRemoveInstancesCall { - c.ctx_ = ctx + c.instance = instance + c.attacheddisk = attacheddisk return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstanceGroupsRemoveInstancesCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstanceGroupsRemoveInstancesCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupsremoveinstancesrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/removeInstances") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroup": c.instanceGroup, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instanceGroups.removeInstances" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupsRemoveInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Removes one or more instances from the specified instance group, but does not delete those instances.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration before the VM instance is removed or deleted.", - // "httpMethod": "POST", - // "id": "compute.instanceGroups.removeInstances", - // "parameterOrder": [ - // "project", - // "zone", - // "instanceGroup" - // ], - // "parameters": { - // "instanceGroup": { - // "description": "The name of the instance group where the specified instances will be removed.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the instance group is located.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/removeInstances", - // "request": { - // "$ref": "InstanceGroupsRemoveInstancesRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.instanceGroups.setNamedPorts": - -type InstanceGroupsSetNamedPortsCall struct { - s *Service - project string - zone string - instanceGroup string - instancegroupssetnamedportsrequest *InstanceGroupsSetNamedPortsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetNamedPorts: Sets the named ports for the specified instance group. -func (r *InstanceGroupsService) SetNamedPorts(project string, zone string, instanceGroup string, instancegroupssetnamedportsrequest *InstanceGroupsSetNamedPortsRequest) *InstanceGroupsSetNamedPortsCall { - c := &InstanceGroupsSetNamedPortsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instanceGroup = instanceGroup - c.instancegroupssetnamedportsrequest = instancegroupssetnamedportsrequest +// ForceAttach sets the optional parameter "forceAttach": Whether to +// force attach the disk even if it's currently attached to another +// instance. +func (c *InstancesAttachDiskCall) ForceAttach(forceAttach bool) *InstancesAttachDiskCall { + c.urlParams_.Set("forceAttach", fmt.Sprint(forceAttach)) return c } @@ -53008,7 +56843,7 @@ func (r *InstanceGroupsService) SetNamedPorts(project string, zone string, insta // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupsSetNamedPortsCall) RequestId(requestId string) *InstanceGroupsSetNamedPortsCall { +func (c *InstancesAttachDiskCall) RequestId(requestId string) *InstancesAttachDiskCall { c.urlParams_.Set("requestId", requestId) return c } @@ -53016,7 +56851,7 @@ func (c *InstanceGroupsSetNamedPortsCall) RequestId(requestId string) *InstanceG // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsSetNamedPortsCall) Fields(s ...googleapi.Field) *InstanceGroupsSetNamedPortsCall { +func (c *InstancesAttachDiskCall) Fields(s ...googleapi.Field) *InstancesAttachDiskCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -53024,35 +56859,35 @@ func (c *InstanceGroupsSetNamedPortsCall) Fields(s ...googleapi.Field) *Instance // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsSetNamedPortsCall) Context(ctx context.Context) *InstanceGroupsSetNamedPortsCall { +func (c *InstancesAttachDiskCall) Context(ctx context.Context) *InstancesAttachDiskCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsSetNamedPortsCall) Header() http.Header { +func (c *InstancesAttachDiskCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesAttachDiskCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupssetnamedportsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.attacheddisk) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/setNamedPorts") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/attachDisk") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -53060,21 +56895,21 @@ func (c *InstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroup": c.instanceGroup, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.setNamedPorts" call. +// Do executes the "compute.instances.attachDisk" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesAttachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -53105,18 +56940,24 @@ func (c *InstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Sets the named ports for the specified instance group.", + // "description": "Attaches an existing Disk resource to an instance. You must first create the disk before you can attach it. It is not possible to create and attach a disk at the same time. For more information, read Adding a persistent disk to your instance.", // "httpMethod": "POST", - // "id": "compute.instanceGroups.setNamedPorts", + // "id": "compute.instances.attachDisk", // "parameterOrder": [ // "project", // "zone", - // "instanceGroup" + // "instance" // ], // "parameters": { - // "instanceGroup": { - // "description": "The name of the instance group where the named ports are updated.", + // "forceAttach": { + // "description": "Whether to force attach the disk even if it's currently attached to another instance.", + // "location": "query", + // "type": "boolean" + // }, + // "instance": { + // "description": "The instance name for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -53133,15 +56974,16 @@ func (c *InstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Ope // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/setNamedPorts", + // "path": "{project}/zones/{zone}/instances/{instance}/attachDisk", // "request": { - // "$ref": "InstanceGroupsSetNamedPortsRequest" + // "$ref": "AttachedDisk" // }, // "response": { // "$ref": "Operation" @@ -53154,25 +56996,26 @@ func (c *InstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.instanceTemplates.delete": +// method id "compute.instances.delete": -type InstanceTemplatesDeleteCall struct { - s *Service - project string - instanceTemplate string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesDeleteCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified instance template. Deleting an instance -// template is permanent and cannot be undone. It's not possible to -// delete templates which are in use by an instance group. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/delete -func (r *InstanceTemplatesService) Delete(project string, instanceTemplate string) *InstanceTemplatesDeleteCall { - c := &InstanceTemplatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified Instance resource. For more +// information, see Stopping or Deleting an Instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/delete +func (r *InstancesService) Delete(project string, zone string, instance string) *InstancesDeleteCall { + c := &InstancesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.instanceTemplate = instanceTemplate + c.zone = zone + c.instance = instance return c } @@ -53190,7 +57033,7 @@ func (r *InstanceTemplatesService) Delete(project string, instanceTemplate strin // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceTemplatesDeleteCall) RequestId(requestId string) *InstanceTemplatesDeleteCall { +func (c *InstancesDeleteCall) RequestId(requestId string) *InstancesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -53198,7 +57041,7 @@ func (c *InstanceTemplatesDeleteCall) RequestId(requestId string) *InstanceTempl // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceTemplatesDeleteCall) Fields(s ...googleapi.Field) *InstanceTemplatesDeleteCall { +func (c *InstancesDeleteCall) Fields(s ...googleapi.Field) *InstancesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -53206,21 +57049,21 @@ func (c *InstanceTemplatesDeleteCall) Fields(s ...googleapi.Field) *InstanceTemp // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceTemplatesDeleteCall) Context(ctx context.Context) *InstanceTemplatesDeleteCall { +func (c *InstancesDeleteCall) Context(ctx context.Context) *InstancesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceTemplatesDeleteCall) Header() http.Header { +func (c *InstancesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -53229,7 +57072,7 @@ func (c *InstanceTemplatesDeleteCall) doRequest(alt string) (*http.Response, err var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{instanceTemplate}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { @@ -53237,20 +57080,21 @@ func (c *InstanceTemplatesDeleteCall) doRequest(alt string) (*http.Response, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "instanceTemplate": c.instanceTemplate, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceTemplates.delete" call. +// Do executes the "compute.instances.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -53281,18 +57125,19 @@ func (c *InstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Deletes the specified instance template. Deleting an instance template is permanent and cannot be undone. It's not possible to delete templates which are in use by an instance group.", + // "description": "Deletes the specified Instance resource. For more information, see Stopping or Deleting an Instance.", // "httpMethod": "DELETE", - // "id": "compute.instanceTemplates.delete", + // "id": "compute.instances.delete", // "parameterOrder": [ // "project", - // "instanceTemplate" + // "zone", + // "instance" // ], // "parameters": { - // "instanceTemplate": { - // "description": "The name of the instance template to delete.", + // "instance": { + // "description": "Name of the instance resource to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -53307,9 +57152,16 @@ func (c *InstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operati // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/instanceTemplates/{instanceTemplate}", + // "path": "{project}/zones/{zone}/instances/{instance}", // "response": { // "$ref": "Operation" // }, @@ -53321,97 +57173,107 @@ func (c *InstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.instanceTemplates.get": +// method id "compute.instances.deleteAccessConfig": -type InstanceTemplatesGetCall struct { - s *Service - project string - instanceTemplate string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstancesDeleteAccessConfigCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified instance template. Gets a list of -// available instance templates by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/get -func (r *InstanceTemplatesService) Get(project string, instanceTemplate string) *InstanceTemplatesGetCall { - c := &InstanceTemplatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// DeleteAccessConfig: Deletes an access config from an instance's +// network interface. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/deleteAccessConfig +func (r *InstancesService) DeleteAccessConfig(project string, zone string, instance string, accessConfig string, networkInterface string) *InstancesDeleteAccessConfigCall { + c := &InstancesDeleteAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.instanceTemplate = instanceTemplate + c.zone = zone + c.instance = instance + c.urlParams_.Set("accessConfig", accessConfig) + c.urlParams_.Set("networkInterface", networkInterface) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesDeleteAccessConfigCall) RequestId(requestId string) *InstancesDeleteAccessConfigCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceTemplatesGetCall) Fields(s ...googleapi.Field) *InstanceTemplatesGetCall { +func (c *InstancesDeleteAccessConfigCall) Fields(s ...googleapi.Field) *InstancesDeleteAccessConfigCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstanceTemplatesGetCall) IfNoneMatch(entityTag string) *InstanceTemplatesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceTemplatesGetCall) Context(ctx context.Context) *InstanceTemplatesGetCall { +func (c *InstancesDeleteAccessConfigCall) Context(ctx context.Context) *InstancesDeleteAccessConfigCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceTemplatesGetCall) Header() http.Header { +func (c *InstancesDeleteAccessConfigCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceTemplatesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesDeleteAccessConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{instanceTemplate}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/deleteAccessConfig") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "instanceTemplate": c.instanceTemplate, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceTemplates.get" call. -// Exactly one of *InstanceTemplate or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InstanceTemplate.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTemplate, error) { +// Do executes the "compute.instances.deleteAccessConfig" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesDeleteAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -53430,7 +57292,7 @@ func (c *InstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTe if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceTemplate{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -53442,18 +57304,33 @@ func (c *InstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTe } return ret, nil // { - // "description": "Returns the specified instance template. Gets a list of available instance templates by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.instanceTemplates.get", + // "description": "Deletes an access config from an instance's network interface.", + // "httpMethod": "POST", + // "id": "compute.instances.deleteAccessConfig", // "parameterOrder": [ // "project", - // "instanceTemplate" + // "zone", + // "instance", + // "accessConfig", + // "networkInterface" // ], // "parameters": { - // "instanceTemplate": { - // "description": "The name of the instance template.", + // "accessConfig": { + // "description": "The name of the access config to delete.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "instance": { + // "description": "The instance name for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "networkInterface": { + // "description": "The name of the network interface.", + // "location": "query", // "required": true, // "type": "string" // }, @@ -53463,111 +57340,131 @@ func (c *InstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTe // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/instanceTemplates/{instanceTemplate}", + // "path": "{project}/zones/{zone}/instances/{instance}/deleteAccessConfig", // "response": { - // "$ref": "InstanceTemplate" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instanceTemplates.getIamPolicy": +// method id "compute.instances.detachDisk": -type InstanceTemplatesGetIamPolicyCall struct { - s *Service - project string - resource string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstancesDetachDiskCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. -func (r *InstanceTemplatesService) GetIamPolicy(project string, resource string) *InstanceTemplatesGetIamPolicyCall { - c := &InstanceTemplatesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// DetachDisk: Detaches a disk from an instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/detachDisk +func (r *InstancesService) DetachDisk(project string, zone string, instance string, deviceName string) *InstancesDetachDiskCall { + c := &InstancesDetachDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource + c.zone = zone + c.instance = instance + c.urlParams_.Set("deviceName", deviceName) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesDetachDiskCall) RequestId(requestId string) *InstancesDetachDiskCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceTemplatesGetIamPolicyCall) Fields(s ...googleapi.Field) *InstanceTemplatesGetIamPolicyCall { +func (c *InstancesDetachDiskCall) Fields(s ...googleapi.Field) *InstancesDetachDiskCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstanceTemplatesGetIamPolicyCall) IfNoneMatch(entityTag string) *InstanceTemplatesGetIamPolicyCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceTemplatesGetIamPolicyCall) Context(ctx context.Context) *InstanceTemplatesGetIamPolicyCall { +func (c *InstancesDetachDiskCall) Context(ctx context.Context) *InstancesDetachDiskCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceTemplatesGetIamPolicyCall) Header() http.Header { +func (c *InstancesDetachDiskCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceTemplatesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesDetachDiskCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{resource}/getIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/detachDisk") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "resource": c.resource, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceTemplates.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *InstanceTemplatesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.instances.detachDisk" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesDetachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -53586,7 +57483,7 @@ func (c *InstanceTemplatesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*P if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -53598,14 +57495,29 @@ func (c *InstanceTemplatesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*P } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", - // "httpMethod": "GET", - // "id": "compute.instanceTemplates.getIamPolicy", + // "description": "Detaches a disk from an instance.", + // "httpMethod": "POST", + // "id": "compute.instances.detachDisk", // "parameterOrder": [ // "project", - // "resource" + // "zone", + // "instance", + // "deviceName" // ], // "parameters": { + // "deviceName": { + // "description": "The device name of the disk to detach. Make a get() request on the instance to view currently attached disks and device names.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "instance": { + // "description": "Instance name for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -53613,130 +57525,125 @@ func (c *InstanceTemplatesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*P // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/instanceTemplates/{resource}/getIamPolicy", + // "path": "{project}/zones/{zone}/instances/{instance}/detachDisk", // "response": { - // "$ref": "Policy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instanceTemplates.insert": +// method id "compute.instances.get": -type InstanceTemplatesInsertCall struct { - s *Service - project string - instancetemplate *InstanceTemplate - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesGetCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Insert: Creates an instance template in the specified project using -// the data that is included in the request. If you are creating a new -// template to update an existing instance group, your new instance -// template must use the same network or, if applicable, the same -// subnetwork as the original template. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/insert -func (r *InstanceTemplatesService) Insert(project string, instancetemplate *InstanceTemplate) *InstanceTemplatesInsertCall { - c := &InstanceTemplatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified Instance resource. Gets a list of +// available instances by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/get +func (r *InstancesService) Get(project string, zone string, instance string) *InstancesGetCall { + c := &InstancesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.instancetemplate = instancetemplate - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceTemplatesInsertCall) RequestId(requestId string) *InstanceTemplatesInsertCall { - c.urlParams_.Set("requestId", requestId) + c.zone = zone + c.instance = instance return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceTemplatesInsertCall) Fields(s ...googleapi.Field) *InstanceTemplatesInsertCall { +func (c *InstancesGetCall) Fields(s ...googleapi.Field) *InstancesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesGetCall) IfNoneMatch(entityTag string) *InstancesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceTemplatesInsertCall) Context(ctx context.Context) *InstanceTemplatesInsertCall { +func (c *InstancesGetCall) Context(ctx context.Context) *InstancesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceTemplatesInsertCall) Header() http.Header { +func (c *InstancesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceTemplatesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancetemplate) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceTemplates.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// Do executes the "compute.instances.get" call. +// Exactly one of *Instance or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Instance.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -53755,7 +57662,7 @@ func (c *InstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operati if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Instance{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -53767,128 +57674,77 @@ func (c *InstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Creates an instance template in the specified project using the data that is included in the request. If you are creating a new template to update an existing instance group, your new instance template must use the same network or, if applicable, the same subnetwork as the original template.", - // "httpMethod": "POST", - // "id": "compute.instanceTemplates.insert", + // "description": "Returns the specified Instance resource. Gets a list of available instances by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.instances.get", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "instance" // ], // "parameters": { + // "instance": { + // "description": "Name of the instance resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "{project}/global/instanceTemplates", - // "request": { - // "$ref": "InstanceTemplate" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.instanceTemplates.list": - -type InstanceTemplatesListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves a list of instance templates that are contained -// within the specified project and zone. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/list -func (r *InstanceTemplatesService) List(project string) *InstanceTemplatesListCall { - c := &InstanceTemplatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *InstanceTemplatesListCall) Filter(filter string) *InstanceTemplatesListCall { - c.urlParams_.Set("filter", filter) - return c -} + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{instance}", + // "response": { + // "$ref": "Instance" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InstanceTemplatesListCall) MaxResults(maxResults int64) *InstanceTemplatesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *InstanceTemplatesListCall) OrderBy(orderBy string) *InstanceTemplatesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c +// method id "compute.instances.getIamPolicy": + +type InstancesGetIamPolicyCall struct { + s *Service + project string + zone string + resource string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InstanceTemplatesListCall) PageToken(pageToken string) *InstanceTemplatesListCall { - c.urlParams_.Set("pageToken", pageToken) +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. +func (r *InstancesService) GetIamPolicy(project string, zone string, resource string) *InstancesGetIamPolicyCall { + c := &InstancesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.resource = resource return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceTemplatesListCall) Fields(s ...googleapi.Field) *InstanceTemplatesListCall { +func (c *InstancesGetIamPolicyCall) Fields(s ...googleapi.Field) *InstancesGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -53898,7 +57754,7 @@ func (c *InstanceTemplatesListCall) Fields(s ...googleapi.Field) *InstanceTempla // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InstanceTemplatesListCall) IfNoneMatch(entityTag string) *InstanceTemplatesListCall { +func (c *InstancesGetIamPolicyCall) IfNoneMatch(entityTag string) *InstancesGetIamPolicyCall { c.ifNoneMatch_ = entityTag return c } @@ -53906,21 +57762,21 @@ func (c *InstanceTemplatesListCall) IfNoneMatch(entityTag string) *InstanceTempl // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceTemplatesListCall) Context(ctx context.Context) *InstanceTemplatesListCall { +func (c *InstancesGetIamPolicyCall) Context(ctx context.Context) *InstancesGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceTemplatesListCall) Header() http.Header { +func (c *InstancesGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceTemplatesListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -53932,7 +57788,7 @@ func (c *InstanceTemplatesListCall) doRequest(alt string) (*http.Response, error var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -53940,19 +57796,21 @@ func (c *InstanceTemplatesListCall) doRequest(alt string) (*http.Response, error } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceTemplates.list" call. -// Exactly one of *InstanceTemplateList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InstanceTemplateList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceTemplateList, error) { +// Do executes the "compute.instances.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *InstancesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -53971,7 +57829,7 @@ func (c *InstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceT if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceTemplateList{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -53983,47 +57841,40 @@ func (c *InstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceT } return ret, nil // { - // "description": "Retrieves a list of instance templates that are contained within the specified project and zone.", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", // "httpMethod": "GET", - // "id": "compute.instanceTemplates.list", + // "id": "compute.instances.getIamPolicy", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "resource" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/instanceTemplates", + // "path": "{project}/zones/{zone}/instances/{resource}/getIamPolicy", // "response": { - // "$ref": "InstanceTemplateList" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -54034,110 +57885,118 @@ func (c *InstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceT } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstanceTemplatesListCall) Pages(ctx context.Context, f func(*InstanceTemplateList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } +// method id "compute.instances.getSerialPortOutput": + +type InstancesGetSerialPortOutputCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// method id "compute.instanceTemplates.setIamPolicy": +// GetSerialPortOutput: Returns the last 1 MB of serial port output from +// the specified instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/getSerialPortOutput +func (r *InstancesService) GetSerialPortOutput(project string, zone string, instance string) *InstancesGetSerialPortOutputCall { + c := &InstancesGetSerialPortOutputCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + return c +} -type InstanceTemplatesSetIamPolicyCall struct { - s *Service - project string - resource string - globalsetpolicyrequest *GlobalSetPolicyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// Port sets the optional parameter "port": Specifies which COM or +// serial port to retrieve data from. +func (c *InstancesGetSerialPortOutputCall) Port(port int64) *InstancesGetSerialPortOutputCall { + c.urlParams_.Set("port", fmt.Sprint(port)) + return c } -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. -func (r *InstanceTemplatesService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *InstanceTemplatesSetIamPolicyCall { - c := &InstanceTemplatesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.resource = resource - c.globalsetpolicyrequest = globalsetpolicyrequest +// Start sets the optional parameter "start": Returns output starting +// from a specific byte position. Use this to page through output when +// the output is too large to return in a single request. For the +// initial request, leave this field unspecified. For subsequent calls, +// this field should be set to the next value returned in the previous +// call. +func (c *InstancesGetSerialPortOutputCall) Start(start int64) *InstancesGetSerialPortOutputCall { + c.urlParams_.Set("start", fmt.Sprint(start)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceTemplatesSetIamPolicyCall) Fields(s ...googleapi.Field) *InstanceTemplatesSetIamPolicyCall { +func (c *InstancesGetSerialPortOutputCall) Fields(s ...googleapi.Field) *InstancesGetSerialPortOutputCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesGetSerialPortOutputCall) IfNoneMatch(entityTag string) *InstancesGetSerialPortOutputCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceTemplatesSetIamPolicyCall) Context(ctx context.Context) *InstanceTemplatesSetIamPolicyCall { +func (c *InstancesGetSerialPortOutputCall) Context(ctx context.Context) *InstancesGetSerialPortOutputCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceTemplatesSetIamPolicyCall) Header() http.Header { +func (c *InstancesGetSerialPortOutputCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceTemplatesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesGetSerialPortOutputCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetpolicyrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{resource}/setIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/serialPort") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "resource": c.resource, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceTemplates.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *InstanceTemplatesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.instances.getSerialPortOutput" call. +// Exactly one of *SerialPortOutput or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *SerialPortOutput.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*SerialPortOutput, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -54156,7 +58015,7 @@ func (c *InstanceTemplatesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*P if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &SerialPortOutput{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -54168,14 +58027,31 @@ func (c *InstanceTemplatesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*P } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", - // "httpMethod": "POST", - // "id": "compute.instanceTemplates.setIamPolicy", + // "description": "Returns the last 1 MB of serial port output from the specified instance.", + // "httpMethod": "GET", + // "id": "compute.instances.getSerialPortOutput", // "parameterOrder": [ // "project", - // "resource" + // "zone", + // "instance" // ], // "parameters": { + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "port": { + // "default": "1", + // "description": "Specifies which COM or serial port to retrieve data from.", + // "format": "int32", + // "location": "query", + // "maximum": "4", + // "minimum": "1", + // "type": "integer" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -54183,112 +58059,126 @@ func (c *InstanceTemplatesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*P // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "start": { + // "description": "Returns output starting from a specific byte position. Use this to page through output when the output is too large to return in a single request. For the initial request, leave this field unspecified. For subsequent calls, this field should be set to the next value returned in the previous call.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/instanceTemplates/{resource}/setIamPolicy", - // "request": { - // "$ref": "GlobalSetPolicyRequest" - // }, + // "path": "{project}/zones/{zone}/instances/{instance}/serialPort", // "response": { - // "$ref": "Policy" + // "$ref": "SerialPortOutput" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instanceTemplates.testIamPermissions": +// method id "compute.instances.getShieldedInstanceIdentity": -type InstanceTemplatesTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesGetShieldedInstanceIdentityCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *InstanceTemplatesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *InstanceTemplatesTestIamPermissionsCall { - c := &InstanceTemplatesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetShieldedInstanceIdentity: Returns the Shielded Instance Identity +// of an instance +func (r *InstancesService) GetShieldedInstanceIdentity(project string, zone string, instance string) *InstancesGetShieldedInstanceIdentityCall { + c := &InstancesGetShieldedInstanceIdentityCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.zone = zone + c.instance = instance return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceTemplatesTestIamPermissionsCall) Fields(s ...googleapi.Field) *InstanceTemplatesTestIamPermissionsCall { +func (c *InstancesGetShieldedInstanceIdentityCall) Fields(s ...googleapi.Field) *InstancesGetShieldedInstanceIdentityCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesGetShieldedInstanceIdentityCall) IfNoneMatch(entityTag string) *InstancesGetShieldedInstanceIdentityCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceTemplatesTestIamPermissionsCall) Context(ctx context.Context) *InstanceTemplatesTestIamPermissionsCall { +func (c *InstancesGetShieldedInstanceIdentityCall) Context(ctx context.Context) *InstancesGetShieldedInstanceIdentityCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceTemplatesTestIamPermissionsCall) Header() http.Header { +func (c *InstancesGetShieldedInstanceIdentityCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceTemplatesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesGetShieldedInstanceIdentityCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/getShieldedInstanceIdentity") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "resource": c.resource, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceTemplates.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// Do executes the "compute.instances.getShieldedInstanceIdentity" call. +// Exactly one of *ShieldedInstanceIdentity or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *ShieldedInstanceIdentity.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *InstanceTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *InstancesGetShieldedInstanceIdentityCall) Do(opts ...googleapi.CallOption) (*ShieldedInstanceIdentity, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -54307,7 +58197,7 @@ func (c *InstanceTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOptio if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &ShieldedInstanceIdentity{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -54319,14 +58209,22 @@ func (c *InstanceTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.instanceTemplates.testIamPermissions", + // "description": "Returns the Shielded Instance Identity of an instance", + // "httpMethod": "GET", + // "id": "compute.instances.getShieldedInstanceIdentity", // "parameterOrder": [ // "project", - // "resource" + // "zone", + // "instance" // ], // "parameters": { + // "instance": { + // "description": "Name or id of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -54334,20 +58232,17 @@ func (c *InstanceTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOptio // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/instanceTemplates/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/zones/{zone}/instances/{instance}/getShieldedInstanceIdentity", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "ShieldedInstanceIdentity" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -54358,29 +58253,26 @@ func (c *InstanceTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOptio } -// method id "compute.instances.addAccessConfig": +// method id "compute.instances.insert": -type InstancesAddAccessConfigCall struct { - s *Service - project string - zone string - instance string - accessconfig *AccessConfig - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesInsertCall struct { + s *Service + project string + zone string + instance *Instance + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AddAccessConfig: Adds an access config to an instance's network -// interface. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/addAccessConfig -func (r *InstancesService) AddAccessConfig(project string, zone string, instance string, networkInterface string, accessconfig *AccessConfig) *InstancesAddAccessConfigCall { - c := &InstancesAddAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates an instance resource in the specified project using +// the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/insert +func (r *InstancesService) Insert(project string, zone string, instance *Instance) *InstancesInsertCall { + c := &InstancesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance - c.urlParams_.Set("networkInterface", networkInterface) - c.accessconfig = accessconfig return c } @@ -54398,15 +58290,30 @@ func (r *InstancesService) AddAccessConfig(project string, zone string, instance // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesAddAccessConfigCall) RequestId(requestId string) *InstancesAddAccessConfigCall { +func (c *InstancesInsertCall) RequestId(requestId string) *InstancesInsertCall { c.urlParams_.Set("requestId", requestId) return c } +// SourceInstanceTemplate sets the optional parameter +// "sourceInstanceTemplate": Specifies instance template to create the +// instance. +// +// This field is optional. It can be a full or partial URL. For example, +// the following are all valid URLs to an instance template: +// - +// https://www.googleapis.com/compute/v1/projects/project/global/instanceTemplates/instanceTemplate +// - projects/project/global/instanceTemplates/instanceTemplate +// - global/instanceTemplates/instanceTemplate +func (c *InstancesInsertCall) SourceInstanceTemplate(sourceInstanceTemplate string) *InstancesInsertCall { + c.urlParams_.Set("sourceInstanceTemplate", sourceInstanceTemplate) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesAddAccessConfigCall) Fields(s ...googleapi.Field) *InstancesAddAccessConfigCall { +func (c *InstancesInsertCall) Fields(s ...googleapi.Field) *InstancesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -54414,35 +58321,35 @@ func (c *InstancesAddAccessConfigCall) Fields(s ...googleapi.Field) *InstancesAd // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesAddAccessConfigCall) Context(ctx context.Context) *InstancesAddAccessConfigCall { +func (c *InstancesInsertCall) Context(ctx context.Context) *InstancesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesAddAccessConfigCall) Header() http.Header { +func (c *InstancesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesAddAccessConfigCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.accessconfig) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instance) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/addAccessConfig") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -54450,21 +58357,20 @@ func (c *InstancesAddAccessConfigCall) doRequest(alt string) (*http.Response, er } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.addAccessConfig" call. +// Do executes the "compute.instances.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesAddAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -54495,29 +58401,14 @@ func (c *InstancesAddAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operat } return ret, nil // { - // "description": "Adds an access config to an instance's network interface.", + // "description": "Creates an instance resource in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.instances.addAccessConfig", + // "id": "compute.instances.insert", // "parameterOrder": [ // "project", - // "zone", - // "instance", - // "networkInterface" + // "zone" // ], // "parameters": { - // "instance": { - // "description": "The instance name for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "networkInterface": { - // "description": "The name of the network interface to add to this instance.", - // "location": "query", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -54530,6 +58421,11 @@ func (c *InstancesAddAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operat // "location": "query", // "type": "string" // }, + // "sourceInstanceTemplate": { + // "description": "Specifies instance template to create the instance.\n\nThis field is optional. It can be a full or partial URL. For example, the following are all valid URLs to an instance template: \n- https://www.googleapis.com/compute/v1/projects/project/global/instanceTemplates/instanceTemplate \n- projects/project/global/instanceTemplates/instanceTemplate \n- global/instanceTemplates/instanceTemplate", + // "location": "query", + // "type": "string" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -54538,9 +58434,9 @@ func (c *InstancesAddAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operat // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/addAccessConfig", + // "path": "{project}/zones/{zone}/instances", // "request": { - // "$ref": "AccessConfig" + // "$ref": "Instance" // }, // "response": { // "$ref": "Operation" @@ -54553,23 +58449,25 @@ func (c *InstancesAddAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operat } -// method id "compute.instances.aggregatedList": +// method id "compute.instances.list": -type InstancesAggregatedListCall struct { +type InstancesListCall struct { s *Service project string + zone string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// AggregatedList: Retrieves aggregated list of all of the instances in -// your project across all regions and zones. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/aggregatedList -func (r *InstancesService) AggregatedList(project string) *InstancesAggregatedListCall { - c := &InstancesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of instances contained within the specified +// zone. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/list +func (r *InstancesService) List(project string, zone string) *InstancesListCall { + c := &InstancesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.zone = zone return c } @@ -54595,7 +58493,7 @@ func (r *InstancesService) AggregatedList(project string) *InstancesAggregatedLi // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *InstancesAggregatedListCall) Filter(filter string) *InstancesAggregatedListCall { +func (c *InstancesListCall) Filter(filter string) *InstancesListCall { c.urlParams_.Set("filter", filter) return c } @@ -54606,7 +58504,7 @@ func (c *InstancesAggregatedListCall) Filter(filter string) *InstancesAggregated // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *InstancesAggregatedListCall) MaxResults(maxResults int64) *InstancesAggregatedListCall { +func (c *InstancesListCall) MaxResults(maxResults int64) *InstancesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -54623,7 +58521,7 @@ func (c *InstancesAggregatedListCall) MaxResults(maxResults int64) *InstancesAgg // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *InstancesAggregatedListCall) OrderBy(orderBy string) *InstancesAggregatedListCall { +func (c *InstancesListCall) OrderBy(orderBy string) *InstancesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -54631,7 +58529,7 @@ func (c *InstancesAggregatedListCall) OrderBy(orderBy string) *InstancesAggregat // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *InstancesAggregatedListCall) PageToken(pageToken string) *InstancesAggregatedListCall { +func (c *InstancesListCall) PageToken(pageToken string) *InstancesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -54639,7 +58537,7 @@ func (c *InstancesAggregatedListCall) PageToken(pageToken string) *InstancesAggr // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesAggregatedListCall) Fields(s ...googleapi.Field) *InstancesAggregatedListCall { +func (c *InstancesListCall) Fields(s ...googleapi.Field) *InstancesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -54649,7 +58547,7 @@ func (c *InstancesAggregatedListCall) Fields(s ...googleapi.Field) *InstancesAgg // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InstancesAggregatedListCall) IfNoneMatch(entityTag string) *InstancesAggregatedListCall { +func (c *InstancesListCall) IfNoneMatch(entityTag string) *InstancesListCall { c.ifNoneMatch_ = entityTag return c } @@ -54657,21 +58555,21 @@ func (c *InstancesAggregatedListCall) IfNoneMatch(entityTag string) *InstancesAg // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesAggregatedListCall) Context(ctx context.Context) *InstancesAggregatedListCall { +func (c *InstancesListCall) Context(ctx context.Context) *InstancesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesAggregatedListCall) Header() http.Header { +func (c *InstancesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -54683,7 +58581,7 @@ func (c *InstancesAggregatedListCall) doRequest(alt string) (*http.Response, err var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -54692,18 +58590,19 @@ func (c *InstancesAggregatedListCall) doRequest(alt string) (*http.Response, err req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.aggregatedList" call. -// Exactly one of *InstanceAggregatedList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InstanceAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceAggregatedList, error) { +// Do executes the "compute.instances.list" call. +// Exactly one of *InstanceList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *InstanceList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -54722,7 +58621,7 @@ func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Instanc if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceAggregatedList{ + ret := &InstanceList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -54734,11 +58633,12 @@ func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Instanc } return ret, nil // { - // "description": "Retrieves aggregated list of all of the instances in your project across all regions and zones.", + // "description": "Retrieves the list of instances contained within the specified zone.", // "httpMethod": "GET", - // "id": "compute.instances.aggregatedList", + // "id": "compute.instances.list", // "parameterOrder": [ - // "project" + // "project", + // "zone" // ], // "parameters": { // "filter": { @@ -54770,11 +58670,18 @@ func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Instanc // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/aggregated/instances", + // "path": "{project}/zones/{zone}/instances", // "response": { - // "$ref": "InstanceAggregatedList" + // "$ref": "InstanceList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -54788,7 +58695,7 @@ func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Instanc // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *InstancesAggregatedListCall) Pages(ctx context.Context, f func(*InstanceAggregatedList) error) error { +func (c *InstancesListCall) Pages(ctx context.Context, f func(*InstanceList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -54806,102 +58713,143 @@ func (c *InstancesAggregatedListCall) Pages(ctx context.Context, f func(*Instanc } } -// method id "compute.instances.attachDisk": +// method id "compute.instances.listReferrers": -type InstancesAttachDiskCall struct { +type InstancesListReferrersCall struct { s *Service project string zone string instance string - attacheddisk *AttachedDisk urlParams_ gensupport.URLParams + ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// AttachDisk: Attaches an existing Disk resource to an instance. You -// must first create the disk before you can attach it. It is not -// possible to create and attach a disk at the same time. For more -// information, read Adding a persistent disk to your instance. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/attachDisk -func (r *InstancesService) AttachDisk(project string, zone string, instance string, attacheddisk *AttachedDisk) *InstancesAttachDiskCall { - c := &InstancesAttachDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ListReferrers: Retrieves the list of referrers to instances contained +// within the specified zone. For more information, read Viewing +// Referrers to VM Instances. +func (r *InstancesService) ListReferrers(project string, zone string, instance string) *InstancesListReferrersCall { + c := &InstancesListReferrersCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance - c.attacheddisk = attacheddisk return c } -// ForceAttach sets the optional parameter "forceAttach": Whether to -// force attach the disk even if it's currently attached to another -// instance. This is only available for regional disks. -func (c *InstancesAttachDiskCall) ForceAttach(forceAttach bool) *InstancesAttachDiskCall { - c.urlParams_.Set("forceAttach", fmt.Sprint(forceAttach)) +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *InstancesListReferrersCall) Filter(filter string) *InstancesListReferrersCall { + c.urlParams_.Set("filter", filter) return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *InstancesListReferrersCall) MaxResults(maxResults int64) *InstancesListReferrersCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesAttachDiskCall) RequestId(requestId string) *InstancesAttachDiskCall { - c.urlParams_.Set("requestId", requestId) +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *InstancesListReferrersCall) OrderBy(orderBy string) *InstancesListReferrersCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InstancesListReferrersCall) PageToken(pageToken string) *InstancesListReferrersCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesAttachDiskCall) Fields(s ...googleapi.Field) *InstancesAttachDiskCall { +func (c *InstancesListReferrersCall) Fields(s ...googleapi.Field) *InstancesListReferrersCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesListReferrersCall) IfNoneMatch(entityTag string) *InstancesListReferrersCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesAttachDiskCall) Context(ctx context.Context) *InstancesAttachDiskCall { +func (c *InstancesListReferrersCall) Context(ctx context.Context) *InstancesListReferrersCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesAttachDiskCall) Header() http.Header { +func (c *InstancesListReferrersCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesAttachDiskCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesListReferrersCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.attacheddisk) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/attachDisk") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/referrers") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } @@ -54914,14 +58862,14 @@ func (c *InstancesAttachDiskCall) doRequest(alt string) (*http.Response, error) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.attachDisk" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesAttachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instances.listReferrers" call. +// Exactly one of *InstanceListReferrers or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceListReferrers.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*InstanceListReferrers, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -54940,7 +58888,7 @@ func (c *InstancesAttachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &InstanceListReferrers{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -54952,27 +58900,45 @@ func (c *InstancesAttachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Attaches an existing Disk resource to an instance. You must first create the disk before you can attach it. It is not possible to create and attach a disk at the same time. For more information, read Adding a persistent disk to your instance.", - // "httpMethod": "POST", - // "id": "compute.instances.attachDisk", + // "description": "Retrieves the list of referrers to instances contained within the specified zone. For more information, read Viewing Referrers to VM Instances.", + // "httpMethod": "GET", + // "id": "compute.instances.listReferrers", // "parameterOrder": [ // "project", // "zone", // "instance" // ], // "parameters": { - // "forceAttach": { - // "description": "Whether to force attach the disk even if it's currently attached to another instance. This is only available for regional disks.", + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", - // "type": "boolean" + // "type": "string" // }, // "instance": { - // "description": "The instance name for this request.", + // "description": "Name of the target instance scoping this request, or '-' if the request should span over all instances in the container.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "-|[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -54980,11 +58946,6 @@ func (c *InstancesAttachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -54993,24 +58954,43 @@ func (c *InstancesAttachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/attachDisk", - // "request": { - // "$ref": "AttachedDisk" - // }, + // "path": "{project}/zones/{zone}/instances/{instance}/referrers", // "response": { - // "$ref": "Operation" + // "$ref": "InstanceListReferrers" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.delete": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstancesListReferrersCall) Pages(ctx context.Context, f func(*InstanceListReferrers) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InstancesDeleteCall struct { +// method id "compute.instances.reset": + +type InstancesResetCall struct { s *Service project string zone string @@ -55020,11 +59000,12 @@ type InstancesDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified Instance resource. For more -// information, see Stopping or Deleting an Instance. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/delete -func (r *InstancesService) Delete(project string, zone string, instance string) *InstancesDeleteCall { - c := &InstancesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Reset: Performs a reset on the instance. This is a hard reset the VM +// does not do a graceful shutdown. For more information, see Resetting +// an instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/reset +func (r *InstancesService) Reset(project string, zone string, instance string) *InstancesResetCall { + c := &InstancesResetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance @@ -55045,7 +59026,7 @@ func (r *InstancesService) Delete(project string, zone string, instance string) // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesDeleteCall) RequestId(requestId string) *InstancesDeleteCall { +func (c *InstancesResetCall) RequestId(requestId string) *InstancesResetCall { c.urlParams_.Set("requestId", requestId) return c } @@ -55053,7 +59034,7 @@ func (c *InstancesDeleteCall) RequestId(requestId string) *InstancesDeleteCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesDeleteCall) Fields(s ...googleapi.Field) *InstancesDeleteCall { +func (c *InstancesResetCall) Fields(s ...googleapi.Field) *InstancesResetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -55061,21 +59042,21 @@ func (c *InstancesDeleteCall) Fields(s ...googleapi.Field) *InstancesDeleteCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesDeleteCall) Context(ctx context.Context) *InstancesDeleteCall { +func (c *InstancesResetCall) Context(ctx context.Context) *InstancesResetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesDeleteCall) Header() http.Header { +func (c *InstancesResetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesResetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -55084,9 +59065,9 @@ func (c *InstancesDeleteCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/reset") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -55099,14 +59080,14 @@ func (c *InstancesDeleteCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.delete" call. +// Do executes the "compute.instances.reset" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesResetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -55137,9 +59118,9 @@ func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Deletes the specified Instance resource. For more information, see Stopping or Deleting an Instance.", - // "httpMethod": "DELETE", - // "id": "compute.instances.delete", + // "description": "Performs a reset on the instance. This is a hard reset the VM does not do a graceful shutdown. For more information, see Resetting an instance.", + // "httpMethod": "POST", + // "id": "compute.instances.reset", // "parameterOrder": [ // "project", // "zone", @@ -55147,7 +59128,7 @@ func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro // ], // "parameters": { // "instance": { - // "description": "Name of the instance resource to delete.", + // "description": "Name of the instance scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -55173,7 +59154,7 @@ func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}", + // "path": "{project}/zones/{zone}/instances/{instance}/reset", // "response": { // "$ref": "Operation" // }, @@ -55185,28 +59166,31 @@ func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro } -// method id "compute.instances.deleteAccessConfig": +// method id "compute.instances.setDeletionProtection": -type InstancesDeleteAccessConfigCall struct { +type InstancesSetDeletionProtectionCall struct { s *Service project string zone string - instance string + resource string urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// DeleteAccessConfig: Deletes an access config from an instance's -// network interface. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/deleteAccessConfig -func (r *InstancesService) DeleteAccessConfig(project string, zone string, instance string, accessConfig string, networkInterface string) *InstancesDeleteAccessConfigCall { - c := &InstancesDeleteAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetDeletionProtection: Sets deletion protection on the instance. +func (r *InstancesService) SetDeletionProtection(project string, zone string, resource string) *InstancesSetDeletionProtectionCall { + c := &InstancesSetDeletionProtectionCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instance = instance - c.urlParams_.Set("accessConfig", accessConfig) - c.urlParams_.Set("networkInterface", networkInterface) + c.resource = resource + return c +} + +// DeletionProtection sets the optional parameter "deletionProtection": +// Whether the resource should be protected against deletion. +func (c *InstancesSetDeletionProtectionCall) DeletionProtection(deletionProtection bool) *InstancesSetDeletionProtectionCall { + c.urlParams_.Set("deletionProtection", fmt.Sprint(deletionProtection)) return c } @@ -55224,7 +59208,7 @@ func (r *InstancesService) DeleteAccessConfig(project string, zone string, insta // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesDeleteAccessConfigCall) RequestId(requestId string) *InstancesDeleteAccessConfigCall { +func (c *InstancesSetDeletionProtectionCall) RequestId(requestId string) *InstancesSetDeletionProtectionCall { c.urlParams_.Set("requestId", requestId) return c } @@ -55232,7 +59216,7 @@ func (c *InstancesDeleteAccessConfigCall) RequestId(requestId string) *Instances // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesDeleteAccessConfigCall) Fields(s ...googleapi.Field) *InstancesDeleteAccessConfigCall { +func (c *InstancesSetDeletionProtectionCall) Fields(s ...googleapi.Field) *InstancesSetDeletionProtectionCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -55240,21 +59224,21 @@ func (c *InstancesDeleteAccessConfigCall) Fields(s ...googleapi.Field) *Instance // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesDeleteAccessConfigCall) Context(ctx context.Context) *InstancesDeleteAccessConfigCall { +func (c *InstancesSetDeletionProtectionCall) Context(ctx context.Context) *InstancesSetDeletionProtectionCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesDeleteAccessConfigCall) Header() http.Header { +func (c *InstancesSetDeletionProtectionCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesDeleteAccessConfigCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetDeletionProtectionCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -55263,7 +59247,7 @@ func (c *InstancesDeleteAccessConfigCall) doRequest(alt string) (*http.Response, var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/deleteAccessConfig") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{resource}/setDeletionProtection") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -55273,19 +59257,19 @@ func (c *InstancesDeleteAccessConfigCall) doRequest(alt string) (*http.Response, googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, - "instance": c.instance, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.deleteAccessConfig" call. +// Do executes the "compute.instances.setDeletionProtection" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesDeleteAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSetDeletionProtectionCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -55316,35 +59300,20 @@ func (c *InstancesDeleteAccessConfigCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Deletes an access config from an instance's network interface.", + // "description": "Sets deletion protection on the instance.", // "httpMethod": "POST", - // "id": "compute.instances.deleteAccessConfig", + // "id": "compute.instances.setDeletionProtection", // "parameterOrder": [ // "project", // "zone", - // "instance", - // "accessConfig", - // "networkInterface" + // "resource" // ], // "parameters": { - // "accessConfig": { - // "description": "The name of the access config to delete.", - // "location": "query", - // "required": true, - // "type": "string" - // }, - // "instance": { - // "description": "The instance name for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "networkInterface": { - // "description": "The name of the network interface.", + // "deletionProtection": { + // "default": "true", + // "description": "Whether the resource should be protected against deletion.", // "location": "query", - // "required": true, - // "type": "string" + // "type": "boolean" // }, // "project": { // "description": "Project ID for this request.", @@ -55358,6 +59327,13 @@ func (c *InstancesDeleteAccessConfigCall) Do(opts ...googleapi.CallOption) (*Ope // "location": "query", // "type": "string" // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -55366,7 +59342,7 @@ func (c *InstancesDeleteAccessConfigCall) Do(opts ...googleapi.CallOption) (*Ope // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/deleteAccessConfig", + // "path": "{project}/zones/{zone}/instances/{resource}/setDeletionProtection", // "response": { // "$ref": "Operation" // }, @@ -55378,9 +59354,9 @@ func (c *InstancesDeleteAccessConfigCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.instances.detachDisk": +// method id "compute.instances.setDiskAutoDelete": -type InstancesDetachDiskCall struct { +type InstancesSetDiskAutoDeleteCall struct { s *Service project string zone string @@ -55390,13 +59366,15 @@ type InstancesDetachDiskCall struct { header_ http.Header } -// DetachDisk: Detaches a disk from an instance. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/detachDisk -func (r *InstancesService) DetachDisk(project string, zone string, instance string, deviceName string) *InstancesDetachDiskCall { - c := &InstancesDetachDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetDiskAutoDelete: Sets the auto-delete flag for a disk attached to +// an instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setDiskAutoDelete +func (r *InstancesService) SetDiskAutoDelete(project string, zone string, instance string, autoDelete bool, deviceName string) *InstancesSetDiskAutoDeleteCall { + c := &InstancesSetDiskAutoDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance + c.urlParams_.Set("autoDelete", fmt.Sprint(autoDelete)) c.urlParams_.Set("deviceName", deviceName) return c } @@ -55415,7 +59393,7 @@ func (r *InstancesService) DetachDisk(project string, zone string, instance stri // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesDetachDiskCall) RequestId(requestId string) *InstancesDetachDiskCall { +func (c *InstancesSetDiskAutoDeleteCall) RequestId(requestId string) *InstancesSetDiskAutoDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -55423,7 +59401,7 @@ func (c *InstancesDetachDiskCall) RequestId(requestId string) *InstancesDetachDi // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesDetachDiskCall) Fields(s ...googleapi.Field) *InstancesDetachDiskCall { +func (c *InstancesSetDiskAutoDeleteCall) Fields(s ...googleapi.Field) *InstancesSetDiskAutoDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -55431,21 +59409,21 @@ func (c *InstancesDetachDiskCall) Fields(s ...googleapi.Field) *InstancesDetachD // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesDetachDiskCall) Context(ctx context.Context) *InstancesDetachDiskCall { +func (c *InstancesSetDiskAutoDeleteCall) Context(ctx context.Context) *InstancesSetDiskAutoDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesDetachDiskCall) Header() http.Header { +func (c *InstancesSetDiskAutoDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesDetachDiskCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetDiskAutoDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -55454,7 +59432,7 @@ func (c *InstancesDetachDiskCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/detachDisk") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -55469,14 +59447,14 @@ func (c *InstancesDetachDiskCall) doRequest(alt string) (*http.Response, error) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.detachDisk" call. +// Do executes the "compute.instances.setDiskAutoDelete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesDetachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSetDiskAutoDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -55507,24 +59485,32 @@ func (c *InstancesDetachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Detaches a disk from an instance.", + // "description": "Sets the auto-delete flag for a disk attached to an instance.", // "httpMethod": "POST", - // "id": "compute.instances.detachDisk", + // "id": "compute.instances.setDiskAutoDelete", // "parameterOrder": [ // "project", // "zone", // "instance", + // "autoDelete", // "deviceName" // ], // "parameters": { + // "autoDelete": { + // "description": "Whether to auto-delete the disk when the instance is deleted.", + // "location": "query", + // "required": true, + // "type": "boolean" + // }, // "deviceName": { - // "description": "The device name of the disk to detach. Make a get() request on the instance to view currently attached disks and device names.", + // "description": "The device name of the disk to modify. Make a get() request on the instance to view currently attached disks and device names.", // "location": "query", + // "pattern": "\\w[\\w.-]{0,254}", // "required": true, // "type": "string" // }, // "instance": { - // "description": "Instance name for this request.", + // "description": "The instance name for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -55550,7 +59536,7 @@ func (c *InstancesDetachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/detachDisk", + // "path": "{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete", // "response": { // "$ref": "Operation" // }, @@ -55562,80 +59548,72 @@ func (c *InstancesDetachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.instances.get": +// method id "compute.instances.setIamPolicy": -type InstancesGetCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstancesSetIamPolicyCall struct { + s *Service + project string + zone string + resource string + zonesetpolicyrequest *ZoneSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified Instance resource. Gets a list of -// available instances by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/get -func (r *InstancesService) Get(project string, zone string, instance string) *InstancesGetCall { - c := &InstancesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +func (r *InstancesService) SetIamPolicy(project string, zone string, resource string, zonesetpolicyrequest *ZoneSetPolicyRequest) *InstancesSetIamPolicyCall { + c := &InstancesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instance = instance + c.resource = resource + c.zonesetpolicyrequest = zonesetpolicyrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesGetCall) Fields(s ...googleapi.Field) *InstancesGetCall { +func (c *InstancesSetIamPolicyCall) Fields(s ...googleapi.Field) *InstancesSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstancesGetCall) IfNoneMatch(entityTag string) *InstancesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesGetCall) Context(ctx context.Context) *InstancesGetCall { +func (c *InstancesSetIamPolicyCall) Context(ctx context.Context) *InstancesSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesGetCall) Header() http.Header { +func (c *InstancesSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.zonesetpolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -55643,19 +59621,19 @@ func (c *InstancesGetCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, - "instance": c.instance, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.get" call. -// Exactly one of *Instance or error will be non-nil. Any non-2xx status +// Do executes the "compute.instances.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either -// *Instance.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *InstancesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -55674,7 +59652,7 @@ func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Instance{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -55686,26 +59664,26 @@ func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { } return ret, nil // { - // "description": "Returns the specified Instance resource. Gets a list of available instances by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.instances.get", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "httpMethod": "POST", + // "id": "compute.instances.setIamPolicy", // "parameterOrder": [ // "project", // "zone", - // "instance" + // "resource" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance resource to return.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -55717,92 +59695,106 @@ func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}", + // "path": "{project}/zones/{zone}/instances/{resource}/setIamPolicy", + // "request": { + // "$ref": "ZoneSetPolicyRequest" + // }, // "response": { - // "$ref": "Instance" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instances.getIamPolicy": +// method id "compute.instances.setLabels": -type InstancesGetIamPolicyCall struct { - s *Service - project string - zone string - resource string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstancesSetLabelsCall struct { + s *Service + project string + zone string + instance string + instancessetlabelsrequest *InstancesSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. -func (r *InstancesService) GetIamPolicy(project string, zone string, resource string) *InstancesGetIamPolicyCall { - c := &InstancesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetLabels: Sets labels on an instance. To learn more about labels, +// read the Labeling Resources documentation. +func (r *InstancesService) SetLabels(project string, zone string, instance string, instancessetlabelsrequest *InstancesSetLabelsRequest) *InstancesSetLabelsCall { + c := &InstancesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.resource = resource + c.instance = instance + c.instancessetlabelsrequest = instancessetlabelsrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesSetLabelsCall) RequestId(requestId string) *InstancesSetLabelsCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesGetIamPolicyCall) Fields(s ...googleapi.Field) *InstancesGetIamPolicyCall { +func (c *InstancesSetLabelsCall) Fields(s ...googleapi.Field) *InstancesSetLabelsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstancesGetIamPolicyCall) IfNoneMatch(entityTag string) *InstancesGetIamPolicyCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesGetIamPolicyCall) Context(ctx context.Context) *InstancesGetIamPolicyCall { +func (c *InstancesSetLabelsCall) Context(ctx context.Context) *InstancesSetLabelsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesGetIamPolicyCall) Header() http.Header { +func (c *InstancesSetLabelsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{resource}/getIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setLabels") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -55810,19 +59802,19 @@ func (c *InstancesGetIamPolicyCall) doRequest(alt string) (*http.Response, error googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, - "resource": c.resource, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *InstancesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.instances.setLabels" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -55841,7 +59833,7 @@ func (c *InstancesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -55853,15 +59845,22 @@ func (c *InstancesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", - // "httpMethod": "GET", - // "id": "compute.instances.getIamPolicy", + // "description": "Sets labels on an instance. To learn more about labels, read the Labeling Resources documentation.", + // "httpMethod": "POST", + // "id": "compute.instances.setLabels", // "parameterOrder": [ // "project", // "zone", - // "resource" + // "instance" // ], // "parameters": { + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -55869,11 +59868,9 @@ func (c *InstancesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // }, // "zone": { @@ -55884,111 +59881,106 @@ func (c *InstancesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{resource}/getIamPolicy", + // "path": "{project}/zones/{zone}/instances/{instance}/setLabels", + // "request": { + // "$ref": "InstancesSetLabelsRequest" + // }, // "response": { - // "$ref": "Policy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instances.getSerialPortOutput": +// method id "compute.instances.setMachineResources": -type InstancesGetSerialPortOutputCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstancesSetMachineResourcesCall struct { + s *Service + project string + zone string + instance string + instancessetmachineresourcesrequest *InstancesSetMachineResourcesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetSerialPortOutput: Returns the last 1 MB of serial port output from -// the specified instance. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/getSerialPortOutput -func (r *InstancesService) GetSerialPortOutput(project string, zone string, instance string) *InstancesGetSerialPortOutputCall { - c := &InstancesGetSerialPortOutputCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetMachineResources: Changes the number and/or type of accelerator +// for a stopped instance to the values specified in the request. +func (r *InstancesService) SetMachineResources(project string, zone string, instance string, instancessetmachineresourcesrequest *InstancesSetMachineResourcesRequest) *InstancesSetMachineResourcesCall { + c := &InstancesSetMachineResourcesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance + c.instancessetmachineresourcesrequest = instancessetmachineresourcesrequest return c } -// Port sets the optional parameter "port": Specifies which COM or -// serial port to retrieve data from. -func (c *InstancesGetSerialPortOutputCall) Port(port int64) *InstancesGetSerialPortOutputCall { - c.urlParams_.Set("port", fmt.Sprint(port)) - return c -} - -// Start sets the optional parameter "start": Returns output starting -// from a specific byte position. Use this to page through output when -// the output is too large to return in a single request. For the -// initial request, leave this field unspecified. For subsequent calls, -// this field should be set to the next value returned in the previous -// call. -func (c *InstancesGetSerialPortOutputCall) Start(start int64) *InstancesGetSerialPortOutputCall { - c.urlParams_.Set("start", fmt.Sprint(start)) +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesSetMachineResourcesCall) RequestId(requestId string) *InstancesSetMachineResourcesCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesGetSerialPortOutputCall) Fields(s ...googleapi.Field) *InstancesGetSerialPortOutputCall { +func (c *InstancesSetMachineResourcesCall) Fields(s ...googleapi.Field) *InstancesSetMachineResourcesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstancesGetSerialPortOutputCall) IfNoneMatch(entityTag string) *InstancesGetSerialPortOutputCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesGetSerialPortOutputCall) Context(ctx context.Context) *InstancesGetSerialPortOutputCall { +func (c *InstancesSetMachineResourcesCall) Context(ctx context.Context) *InstancesSetMachineResourcesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesGetSerialPortOutputCall) Header() http.Header { +func (c *InstancesSetMachineResourcesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesGetSerialPortOutputCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetMachineResourcesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetmachineresourcesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/serialPort") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMachineResources") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -56001,14 +59993,14 @@ func (c *InstancesGetSerialPortOutputCall) doRequest(alt string) (*http.Response return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.getSerialPortOutput" call. -// Exactly one of *SerialPortOutput or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *SerialPortOutput.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*SerialPortOutput, error) { +// Do executes the "compute.instances.setMachineResources" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSetMachineResourcesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -56027,7 +60019,7 @@ func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*Se if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &SerialPortOutput{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -56039,9 +60031,9 @@ func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*Se } return ret, nil // { - // "description": "Returns the last 1 MB of serial port output from the specified instance.", - // "httpMethod": "GET", - // "id": "compute.instances.getSerialPortOutput", + // "description": "Changes the number and/or type of accelerator for a stopped instance to the values specified in the request.", + // "httpMethod": "POST", + // "id": "compute.instances.setMachineResources", // "parameterOrder": [ // "project", // "zone", @@ -56055,15 +60047,6 @@ func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*Se // "required": true, // "type": "string" // }, - // "port": { - // "default": "1", - // "description": "Specifies which COM or serial port to retrieve data from.", - // "format": "int32", - // "location": "query", - // "maximum": "4", - // "minimum": "1", - // "type": "integer" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -56071,9 +60054,8 @@ func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*Se // "required": true, // "type": "string" // }, - // "start": { - // "description": "Returns output starting from a specific byte position. Use this to page through output when the output is too large to return in a single request. For the initial request, leave this field unspecified. For subsequent calls, this field should be set to the next value returned in the previous call.", - // "format": "int64", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // }, @@ -56085,39 +60067,42 @@ func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*Se // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/serialPort", + // "path": "{project}/zones/{zone}/instances/{instance}/setMachineResources", + // "request": { + // "$ref": "InstancesSetMachineResourcesRequest" + // }, // "response": { - // "$ref": "SerialPortOutput" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instances.insert": +// method id "compute.instances.setMachineType": -type InstancesInsertCall struct { - s *Service - project string - zone string - instance *Instance - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSetMachineTypeCall struct { + s *Service + project string + zone string + instance string + instancessetmachinetyperequest *InstancesSetMachineTypeRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates an instance resource in the specified project using -// the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/insert -func (r *InstancesService) Insert(project string, zone string, instance *Instance) *InstancesInsertCall { - c := &InstancesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetMachineType: Changes the machine type for a stopped instance to +// the machine type specified in the request. +func (r *InstancesService) SetMachineType(project string, zone string, instance string, instancessetmachinetyperequest *InstancesSetMachineTypeRequest) *InstancesSetMachineTypeCall { + c := &InstancesSetMachineTypeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance + c.instancessetmachinetyperequest = instancessetmachinetyperequest return c } @@ -56135,30 +60120,15 @@ func (r *InstancesService) Insert(project string, zone string, instance *Instanc // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesInsertCall) RequestId(requestId string) *InstancesInsertCall { +func (c *InstancesSetMachineTypeCall) RequestId(requestId string) *InstancesSetMachineTypeCall { c.urlParams_.Set("requestId", requestId) return c } -// SourceInstanceTemplate sets the optional parameter -// "sourceInstanceTemplate": Specifies instance template to create the -// instance. -// -// This field is optional. It can be a full or partial URL. For example, -// the following are all valid URLs to an instance template: -// - -// https://www.googleapis.com/compute/v1/projects/project/global/instanceTemplates/instanceTemplate -// - projects/project/global/instanceTemplates/instanceTemplate -// - global/instanceTemplates/instanceTemplate -func (c *InstancesInsertCall) SourceInstanceTemplate(sourceInstanceTemplate string) *InstancesInsertCall { - c.urlParams_.Set("sourceInstanceTemplate", sourceInstanceTemplate) - return c -} - // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesInsertCall) Fields(s ...googleapi.Field) *InstancesInsertCall { +func (c *InstancesSetMachineTypeCall) Fields(s ...googleapi.Field) *InstancesSetMachineTypeCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -56166,35 +60136,35 @@ func (c *InstancesInsertCall) Fields(s ...googleapi.Field) *InstancesInsertCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesInsertCall) Context(ctx context.Context) *InstancesInsertCall { +func (c *InstancesSetMachineTypeCall) Context(ctx context.Context) *InstancesSetMachineTypeCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesInsertCall) Header() http.Header { +func (c *InstancesSetMachineTypeCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetMachineTypeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instance) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetmachinetyperequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMachineType") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -56202,20 +60172,21 @@ func (c *InstancesInsertCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.insert" call. +// Do executes the "compute.instances.setMachineType" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSetMachineTypeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -56246,14 +60217,22 @@ func (c *InstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Creates an instance resource in the specified project using the data included in the request.", + // "description": "Changes the machine type for a stopped instance to the machine type specified in the request.", // "httpMethod": "POST", - // "id": "compute.instances.insert", + // "id": "compute.instances.setMachineType", // "parameterOrder": [ // "project", - // "zone" + // "zone", + // "instance" // ], // "parameters": { + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -56266,11 +60245,6 @@ func (c *InstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "location": "query", // "type": "string" // }, - // "sourceInstanceTemplate": { - // "description": "Specifies instance template to create the instance.\n\nThis field is optional. It can be a full or partial URL. For example, the following are all valid URLs to an instance template: \n- https://www.googleapis.com/compute/v1/projects/project/global/instanceTemplates/instanceTemplate \n- projects/project/global/instanceTemplates/instanceTemplate \n- global/instanceTemplates/instanceTemplate", - // "location": "query", - // "type": "string" - // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -56279,9 +60253,9 @@ func (c *InstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances", + // "path": "{project}/zones/{zone}/instances/{instance}/setMachineType", // "request": { - // "$ref": "Instance" + // "$ref": "InstancesSetMachineTypeRequest" // }, // "response": { // "$ref": "Operation" @@ -56294,160 +60268,112 @@ func (c *InstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro } -// method id "compute.instances.list": +// method id "compute.instances.setMetadata": -type InstancesListCall struct { - s *Service - project string - zone string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstancesSetMetadataCall struct { + s *Service + project string + zone string + instance string + metadata *Metadata + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves the list of instances contained within the specified -// zone. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/list -func (r *InstancesService) List(project string, zone string) *InstancesListCall { - c := &InstancesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetMetadata: Sets metadata for the specified instance to the data +// included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setMetadata +func (r *InstancesService) SetMetadata(project string, zone string, instance string, metadata *Metadata) *InstancesSetMetadataCall { + c := &InstancesSetMetadataCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone + c.instance = instance + c.metadata = metadata return c } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *InstancesListCall) Filter(filter string) *InstancesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InstancesListCall) MaxResults(maxResults int64) *InstancesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *InstancesListCall) OrderBy(orderBy string) *InstancesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InstancesListCall) PageToken(pageToken string) *InstancesListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesSetMetadataCall) RequestId(requestId string) *InstancesSetMetadataCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesListCall) Fields(s ...googleapi.Field) *InstancesListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstancesListCall) IfNoneMatch(entityTag string) *InstancesListCall { - c.ifNoneMatch_ = entityTag +func (c *InstancesSetMetadataCall) Fields(s ...googleapi.Field) *InstancesSetMetadataCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesListCall) Context(ctx context.Context) *InstancesListCall { +func (c *InstancesSetMetadataCall) Context(ctx context.Context) *InstancesSetMetadataCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesListCall) Header() http.Header { +func (c *InstancesSetMetadataCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetMetadataCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.metadata) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMetadata") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.list" call. -// Exactly one of *InstanceList or error will be non-nil. Any non-2xx +// Do executes the "compute.instances.setMetadata" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *InstanceList.ServerResponse.Header or (if a response was returned at +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, error) { +func (c *InstancesSetMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -56466,7 +60392,7 @@ func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, err if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -56478,35 +60404,20 @@ func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, err } return ret, nil // { - // "description": "Retrieves the list of instances contained within the specified zone.", - // "httpMethod": "GET", - // "id": "compute.instances.list", + // "description": "Sets metadata for the specified instance to the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.instances.setMetadata", // "parameterOrder": [ // "project", - // "zone" + // "zone", + // "instance" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "project": { @@ -56516,6 +60427,11 @@ func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, err // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -56524,177 +60440,108 @@ func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, err // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances", + // "path": "{project}/zones/{zone}/instances/{instance}/setMetadata", + // "request": { + // "$ref": "Metadata" + // }, // "response": { - // "$ref": "InstanceList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstancesListCall) Pages(ctx context.Context, f func(*InstanceList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instances.listReferrers": +// method id "compute.instances.setMinCpuPlatform": -type InstancesListReferrersCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstancesSetMinCpuPlatformCall struct { + s *Service + project string + zone string + instance string + instancessetmincpuplatformrequest *InstancesSetMinCpuPlatformRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// ListReferrers: Retrieves the list of referrers to instances contained -// within the specified zone. For more information, read Viewing -// Referrers to VM Instances. -func (r *InstancesService) ListReferrers(project string, zone string, instance string) *InstancesListReferrersCall { - c := &InstancesListReferrersCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetMinCpuPlatform: Changes the minimum CPU platform that this +// instance should use. This method can only be called on a stopped +// instance. For more information, read Specifying a Minimum CPU +// Platform. +func (r *InstancesService) SetMinCpuPlatform(project string, zone string, instance string, instancessetmincpuplatformrequest *InstancesSetMinCpuPlatformRequest) *InstancesSetMinCpuPlatformCall { + c := &InstancesSetMinCpuPlatformCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance + c.instancessetmincpuplatformrequest = instancessetmincpuplatformrequest return c } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *InstancesListReferrersCall) Filter(filter string) *InstancesListReferrersCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InstancesListReferrersCall) MaxResults(maxResults int64) *InstancesListReferrersCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *InstancesListReferrersCall) OrderBy(orderBy string) *InstancesListReferrersCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InstancesListReferrersCall) PageToken(pageToken string) *InstancesListReferrersCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesSetMinCpuPlatformCall) RequestId(requestId string) *InstancesSetMinCpuPlatformCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesListReferrersCall) Fields(s ...googleapi.Field) *InstancesListReferrersCall { +func (c *InstancesSetMinCpuPlatformCall) Fields(s ...googleapi.Field) *InstancesSetMinCpuPlatformCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstancesListReferrersCall) IfNoneMatch(entityTag string) *InstancesListReferrersCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesListReferrersCall) Context(ctx context.Context) *InstancesListReferrersCall { +func (c *InstancesSetMinCpuPlatformCall) Context(ctx context.Context) *InstancesSetMinCpuPlatformCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesListReferrersCall) Header() http.Header { +func (c *InstancesSetMinCpuPlatformCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesListReferrersCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetMinCpuPlatformCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetmincpuplatformrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/referrers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMinCpuPlatform") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -56707,14 +60554,14 @@ func (c *InstancesListReferrersCall) doRequest(alt string) (*http.Response, erro return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.listReferrers" call. -// Exactly one of *InstanceListReferrers or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InstanceListReferrers.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*InstanceListReferrers, error) { +// Do executes the "compute.instances.setMinCpuPlatform" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSetMinCpuPlatformCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -56733,7 +60580,7 @@ func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*Instance if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceListReferrers{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -56745,45 +60592,22 @@ func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*Instance } return ret, nil // { - // "description": "Retrieves the list of referrers to instances contained within the specified zone. For more information, read Viewing Referrers to VM Instances.", - // "httpMethod": "GET", - // "id": "compute.instances.listReferrers", + // "description": "Changes the minimum CPU platform that this instance should use. This method can only be called on a stopped instance. For more information, read Specifying a Minimum CPU Platform.", + // "httpMethod": "POST", + // "id": "compute.instances.setMinCpuPlatform", // "parameterOrder": [ // "project", // "zone", // "instance" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, // "instance": { - // "description": "Name of the target instance scoping this request, or '-' if the request should span over all instances in the container.", + // "description": "Name of the instance scoping this request.", // "location": "path", - // "pattern": "-|[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -56791,6 +60615,11 @@ func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*Instance // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -56799,60 +60628,42 @@ func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*Instance // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/referrers", + // "path": "{project}/zones/{zone}/instances/{instance}/setMinCpuPlatform", + // "request": { + // "$ref": "InstancesSetMinCpuPlatformRequest" + // }, // "response": { - // "$ref": "InstanceListReferrers" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstancesListReferrersCall) Pages(ctx context.Context, f func(*InstanceListReferrers) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instances.reset": +// method id "compute.instances.setScheduling": -type InstancesResetCall struct { +type InstancesSetSchedulingCall struct { s *Service project string zone string instance string + scheduling *Scheduling urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Reset: Performs a reset on the instance. For more information, see -// Resetting an instance. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/reset -func (r *InstancesService) Reset(project string, zone string, instance string) *InstancesResetCall { - c := &InstancesResetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetScheduling: Sets an instance's scheduling options. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setScheduling +func (r *InstancesService) SetScheduling(project string, zone string, instance string, scheduling *Scheduling) *InstancesSetSchedulingCall { + c := &InstancesSetSchedulingCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance + c.scheduling = scheduling return c } @@ -56870,7 +60681,7 @@ func (r *InstancesService) Reset(project string, zone string, instance string) * // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesResetCall) RequestId(requestId string) *InstancesResetCall { +func (c *InstancesSetSchedulingCall) RequestId(requestId string) *InstancesSetSchedulingCall { c.urlParams_.Set("requestId", requestId) return c } @@ -56878,7 +60689,7 @@ func (c *InstancesResetCall) RequestId(requestId string) *InstancesResetCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesResetCall) Fields(s ...googleapi.Field) *InstancesResetCall { +func (c *InstancesSetSchedulingCall) Fields(s ...googleapi.Field) *InstancesSetSchedulingCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -56886,30 +60697,35 @@ func (c *InstancesResetCall) Fields(s ...googleapi.Field) *InstancesResetCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesResetCall) Context(ctx context.Context) *InstancesResetCall { +func (c *InstancesSetSchedulingCall) Context(ctx context.Context) *InstancesSetSchedulingCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesResetCall) Header() http.Header { +func (c *InstancesSetSchedulingCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesResetCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetSchedulingCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.scheduling) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/reset") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setScheduling") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -56924,14 +60740,14 @@ func (c *InstancesResetCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.reset" call. +// Do executes the "compute.instances.setScheduling" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesResetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -56962,9 +60778,9 @@ func (c *InstancesResetCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Performs a reset on the instance. For more information, see Resetting an instance.", + // "description": "Sets an instance's scheduling options.", // "httpMethod": "POST", - // "id": "compute.instances.reset", + // "id": "compute.instances.setScheduling", // "parameterOrder": [ // "project", // "zone", @@ -56972,7 +60788,7 @@ func (c *InstancesResetCall) Do(opts ...googleapi.CallOption) (*Operation, error // ], // "parameters": { // "instance": { - // "description": "Name of the instance scoping this request.", + // "description": "Instance name for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -56998,7 +60814,10 @@ func (c *InstancesResetCall) Do(opts ...googleapi.CallOption) (*Operation, error // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/reset", + // "path": "{project}/zones/{zone}/instances/{instance}/setScheduling", + // "request": { + // "$ref": "Scheduling" + // }, // "response": { // "$ref": "Operation" // }, @@ -57010,31 +60829,28 @@ func (c *InstancesResetCall) Do(opts ...googleapi.CallOption) (*Operation, error } -// method id "compute.instances.setDeletionProtection": +// method id "compute.instances.setServiceAccount": -type InstancesSetDeletionProtectionCall struct { - s *Service - project string - zone string - resource string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSetServiceAccountCall struct { + s *Service + project string + zone string + instance string + instancessetserviceaccountrequest *InstancesSetServiceAccountRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetDeletionProtection: Sets deletion protection on the instance. -func (r *InstancesService) SetDeletionProtection(project string, zone string, resource string) *InstancesSetDeletionProtectionCall { - c := &InstancesSetDeletionProtectionCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetServiceAccount: Sets the service account on the instance. For more +// information, read Changing the service account and access scopes for +// an instance. +func (r *InstancesService) SetServiceAccount(project string, zone string, instance string, instancessetserviceaccountrequest *InstancesSetServiceAccountRequest) *InstancesSetServiceAccountCall { + c := &InstancesSetServiceAccountCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.resource = resource - return c -} - -// DeletionProtection sets the optional parameter "deletionProtection": -// Whether the resource should be protected against deletion. -func (c *InstancesSetDeletionProtectionCall) DeletionProtection(deletionProtection bool) *InstancesSetDeletionProtectionCall { - c.urlParams_.Set("deletionProtection", fmt.Sprint(deletionProtection)) + c.instance = instance + c.instancessetserviceaccountrequest = instancessetserviceaccountrequest return c } @@ -57052,7 +60868,7 @@ func (c *InstancesSetDeletionProtectionCall) DeletionProtection(deletionProtecti // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetDeletionProtectionCall) RequestId(requestId string) *InstancesSetDeletionProtectionCall { +func (c *InstancesSetServiceAccountCall) RequestId(requestId string) *InstancesSetServiceAccountCall { c.urlParams_.Set("requestId", requestId) return c } @@ -57060,7 +60876,7 @@ func (c *InstancesSetDeletionProtectionCall) RequestId(requestId string) *Instan // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetDeletionProtectionCall) Fields(s ...googleapi.Field) *InstancesSetDeletionProtectionCall { +func (c *InstancesSetServiceAccountCall) Fields(s ...googleapi.Field) *InstancesSetServiceAccountCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -57068,30 +60884,35 @@ func (c *InstancesSetDeletionProtectionCall) Fields(s ...googleapi.Field) *Insta // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetDeletionProtectionCall) Context(ctx context.Context) *InstancesSetDeletionProtectionCall { +func (c *InstancesSetServiceAccountCall) Context(ctx context.Context) *InstancesSetServiceAccountCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetDeletionProtectionCall) Header() http.Header { +func (c *InstancesSetServiceAccountCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetDeletionProtectionCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetServiceAccountCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetserviceaccountrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{resource}/setDeletionProtection") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setServiceAccount") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -57101,19 +60922,19 @@ func (c *InstancesSetDeletionProtectionCall) doRequest(alt string) (*http.Respon googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, - "resource": c.resource, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setDeletionProtection" call. +// Do executes the "compute.instances.setServiceAccount" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSetDeletionProtectionCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -57144,20 +60965,21 @@ func (c *InstancesSetDeletionProtectionCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Sets deletion protection on the instance.", + // "description": "Sets the service account on the instance. For more information, read Changing the service account and access scopes for an instance.", // "httpMethod": "POST", - // "id": "compute.instances.setDeletionProtection", + // "id": "compute.instances.setServiceAccount", // "parameterOrder": [ // "project", // "zone", - // "resource" + // "instance" // ], // "parameters": { - // "deletionProtection": { - // "default": "true", - // "description": "Whether the resource should be protected against deletion.", - // "location": "query", - // "type": "boolean" + // "instance": { + // "description": "Name of the instance resource to start.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // }, // "project": { // "description": "Project ID for this request.", @@ -57171,13 +60993,6 @@ func (c *InstancesSetDeletionProtectionCall) Do(opts ...googleapi.CallOption) (* // "location": "query", // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -57186,7 +61001,10 @@ func (c *InstancesSetDeletionProtectionCall) Do(opts ...googleapi.CallOption) (* // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{resource}/setDeletionProtection", + // "path": "{project}/zones/{zone}/instances/{instance}/setServiceAccount", + // "request": { + // "$ref": "InstancesSetServiceAccountRequest" + // }, // "response": { // "$ref": "Operation" // }, @@ -57198,28 +61016,29 @@ func (c *InstancesSetDeletionProtectionCall) Do(opts ...googleapi.CallOption) (* } -// method id "compute.instances.setDiskAutoDelete": +// method id "compute.instances.setShieldedInstanceIntegrityPolicy": -type InstancesSetDiskAutoDeleteCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSetShieldedInstanceIntegrityPolicyCall struct { + s *Service + project string + zone string + instance string + shieldedinstanceintegritypolicy *ShieldedInstanceIntegrityPolicy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetDiskAutoDelete: Sets the auto-delete flag for a disk attached to -// an instance. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setDiskAutoDelete -func (r *InstancesService) SetDiskAutoDelete(project string, zone string, instance string, autoDelete bool, deviceName string) *InstancesSetDiskAutoDeleteCall { - c := &InstancesSetDiskAutoDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetShieldedInstanceIntegrityPolicy: Sets the Shielded Instance +// integrity policy for an instance. You can only use this method on a +// running instance. This method supports PATCH semantics and uses the +// JSON merge patch format and processing rules. +func (r *InstancesService) SetShieldedInstanceIntegrityPolicy(project string, zone string, instance string, shieldedinstanceintegritypolicy *ShieldedInstanceIntegrityPolicy) *InstancesSetShieldedInstanceIntegrityPolicyCall { + c := &InstancesSetShieldedInstanceIntegrityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance - c.urlParams_.Set("autoDelete", fmt.Sprint(autoDelete)) - c.urlParams_.Set("deviceName", deviceName) + c.shieldedinstanceintegritypolicy = shieldedinstanceintegritypolicy return c } @@ -57237,7 +61056,7 @@ func (r *InstancesService) SetDiskAutoDelete(project string, zone string, instan // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetDiskAutoDeleteCall) RequestId(requestId string) *InstancesSetDiskAutoDeleteCall { +func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) RequestId(requestId string) *InstancesSetShieldedInstanceIntegrityPolicyCall { c.urlParams_.Set("requestId", requestId) return c } @@ -57245,7 +61064,7 @@ func (c *InstancesSetDiskAutoDeleteCall) RequestId(requestId string) *InstancesS // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetDiskAutoDeleteCall) Fields(s ...googleapi.Field) *InstancesSetDiskAutoDeleteCall { +func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Fields(s ...googleapi.Field) *InstancesSetShieldedInstanceIntegrityPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -57253,32 +61072,37 @@ func (c *InstancesSetDiskAutoDeleteCall) Fields(s ...googleapi.Field) *Instances // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetDiskAutoDeleteCall) Context(ctx context.Context) *InstancesSetDiskAutoDeleteCall { +func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Context(ctx context.Context) *InstancesSetShieldedInstanceIntegrityPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetDiskAutoDeleteCall) Header() http.Header { +func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetDiskAutoDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.shieldedinstanceintegritypolicy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } @@ -57291,14 +61115,14 @@ func (c *InstancesSetDiskAutoDeleteCall) doRequest(alt string) (*http.Response, return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setDiskAutoDelete" call. +// Do executes the "compute.instances.setShieldedInstanceIntegrityPolicy" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSetDiskAutoDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -57329,32 +61153,17 @@ func (c *InstancesSetDiskAutoDeleteCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Sets the auto-delete flag for a disk attached to an instance.", - // "httpMethod": "POST", - // "id": "compute.instances.setDiskAutoDelete", + // "description": "Sets the Shielded Instance integrity policy for an instance. You can only use this method on a running instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.instances.setShieldedInstanceIntegrityPolicy", // "parameterOrder": [ // "project", // "zone", - // "instance", - // "autoDelete", - // "deviceName" + // "instance" // ], // "parameters": { - // "autoDelete": { - // "description": "Whether to auto-delete the disk when the instance is deleted.", - // "location": "query", - // "required": true, - // "type": "boolean" - // }, - // "deviceName": { - // "description": "The device name of the disk to modify. Make a get() request on the instance to view currently attached disks and device names.", - // "location": "query", - // "pattern": "\\w[\\w.-]{0,254}", - // "required": true, - // "type": "string" - // }, // "instance": { - // "description": "The instance name for this request.", + // "description": "Name or id of the instance scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -57380,7 +61189,10 @@ func (c *InstancesSetDiskAutoDeleteCall) Do(opts ...googleapi.CallOption) (*Oper // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete", + // "path": "{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy", + // "request": { + // "$ref": "ShieldedInstanceIntegrityPolicy" + // }, // "response": { // "$ref": "Operation" // }, @@ -57392,34 +61204,54 @@ func (c *InstancesSetDiskAutoDeleteCall) Do(opts ...googleapi.CallOption) (*Oper } -// method id "compute.instances.setIamPolicy": +// method id "compute.instances.setTags": -type InstancesSetIamPolicyCall struct { - s *Service - project string - zone string - resource string - zonesetpolicyrequest *ZoneSetPolicyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSetTagsCall struct { + s *Service + project string + zone string + instance string + tags *Tags + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. -func (r *InstancesService) SetIamPolicy(project string, zone string, resource string, zonesetpolicyrequest *ZoneSetPolicyRequest) *InstancesSetIamPolicyCall { - c := &InstancesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetTags: Sets network tags for the specified instance to the data +// included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setTags +func (r *InstancesService) SetTags(project string, zone string, instance string, tags *Tags) *InstancesSetTagsCall { + c := &InstancesSetTagsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.resource = resource - c.zonesetpolicyrequest = zonesetpolicyrequest + c.instance = instance + c.tags = tags + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesSetTagsCall) RequestId(requestId string) *InstancesSetTagsCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetIamPolicyCall) Fields(s ...googleapi.Field) *InstancesSetIamPolicyCall { +func (c *InstancesSetTagsCall) Fields(s ...googleapi.Field) *InstancesSetTagsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -57427,35 +61259,35 @@ func (c *InstancesSetIamPolicyCall) Fields(s ...googleapi.Field) *InstancesSetIa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetIamPolicyCall) Context(ctx context.Context) *InstancesSetIamPolicyCall { +func (c *InstancesSetTagsCall) Context(ctx context.Context) *InstancesSetTagsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetIamPolicyCall) Header() http.Header { +func (c *InstancesSetTagsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetTagsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.zonesetpolicyrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.tags) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{resource}/setIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setTags") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -57465,19 +61297,19 @@ func (c *InstancesSetIamPolicyCall) doRequest(alt string) (*http.Response, error googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, - "resource": c.resource, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *InstancesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.instances.setTags" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -57496,7 +61328,7 @@ func (c *InstancesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -57508,15 +61340,22 @@ func (c *InstancesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "description": "Sets network tags for the specified instance to the data included in the request.", // "httpMethod": "POST", - // "id": "compute.instances.setIamPolicy", + // "id": "compute.instances.setTags", // "parameterOrder": [ // "project", // "zone", - // "resource" + // "instance" // ], // "parameters": { + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -57524,11 +61363,9 @@ func (c *InstancesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // }, // "zone": { @@ -57539,12 +61376,12 @@ func (c *InstancesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{resource}/setIamPolicy", + // "path": "{project}/zones/{zone}/instances/{instance}/setTags", // "request": { - // "$ref": "ZoneSetPolicyRequest" + // "$ref": "Tags" // }, // "response": { - // "$ref": "Policy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -57554,53 +61391,32 @@ func (c *InstancesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e } -// method id "compute.instances.setLabels": +// method id "compute.instances.simulateMaintenanceEvent": -type InstancesSetLabelsCall struct { - s *Service - project string - zone string - instance string - instancessetlabelsrequest *InstancesSetLabelsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSimulateMaintenanceEventCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetLabels: Sets labels on an instance. To learn more about labels, -// read the Labeling Resources documentation. -func (r *InstancesService) SetLabels(project string, zone string, instance string, instancessetlabelsrequest *InstancesSetLabelsRequest) *InstancesSetLabelsCall { - c := &InstancesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SimulateMaintenanceEvent: Simulates a maintenance event on the +// instance. +func (r *InstancesService) SimulateMaintenanceEvent(project string, zone string, instance string) *InstancesSimulateMaintenanceEventCall { + c := &InstancesSimulateMaintenanceEventCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance - c.instancessetlabelsrequest = instancessetlabelsrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetLabelsCall) RequestId(requestId string) *InstancesSetLabelsCall { - c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetLabelsCall) Fields(s ...googleapi.Field) *InstancesSetLabelsCall { +func (c *InstancesSimulateMaintenanceEventCall) Fields(s ...googleapi.Field) *InstancesSimulateMaintenanceEventCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -57608,35 +61424,30 @@ func (c *InstancesSetLabelsCall) Fields(s ...googleapi.Field) *InstancesSetLabel // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetLabelsCall) Context(ctx context.Context) *InstancesSetLabelsCall { +func (c *InstancesSimulateMaintenanceEventCall) Context(ctx context.Context) *InstancesSimulateMaintenanceEventCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetLabelsCall) Header() http.Header { +func (c *InstancesSimulateMaintenanceEventCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetLabelsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSimulateMaintenanceEventCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetlabelsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setLabels") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -57651,14 +61462,14 @@ func (c *InstancesSetLabelsCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setLabels" call. +// Do executes the "compute.instances.simulateMaintenanceEvent" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -57689,9 +61500,9 @@ func (c *InstancesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Sets labels on an instance. To learn more about labels, read the Labeling Resources documentation.", + // "description": "Simulates a maintenance event on the instance.", // "httpMethod": "POST", - // "id": "compute.instances.setLabels", + // "id": "compute.instances.simulateMaintenanceEvent", // "parameterOrder": [ // "project", // "zone", @@ -57712,11 +61523,6 @@ func (c *InstancesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, e // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -57725,10 +61531,7 @@ func (c *InstancesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, e // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setLabels", - // "request": { - // "$ref": "InstancesSetLabelsRequest" - // }, + // "path": "{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent", // "response": { // "$ref": "Operation" // }, @@ -57740,27 +61543,26 @@ func (c *InstancesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, e } -// method id "compute.instances.setMachineResources": +// method id "compute.instances.start": -type InstancesSetMachineResourcesCall struct { - s *Service - project string - zone string - instance string - instancessetmachineresourcesrequest *InstancesSetMachineResourcesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesStartCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetMachineResources: Changes the number and/or type of accelerator -// for a stopped instance to the values specified in the request. -func (r *InstancesService) SetMachineResources(project string, zone string, instance string, instancessetmachineresourcesrequest *InstancesSetMachineResourcesRequest) *InstancesSetMachineResourcesCall { - c := &InstancesSetMachineResourcesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Start: Starts an instance that was stopped using the instances().stop +// method. For more information, see Restart an instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/start +func (r *InstancesService) Start(project string, zone string, instance string) *InstancesStartCall { + c := &InstancesStartCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance - c.instancessetmachineresourcesrequest = instancessetmachineresourcesrequest return c } @@ -57778,7 +61580,7 @@ func (r *InstancesService) SetMachineResources(project string, zone string, inst // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetMachineResourcesCall) RequestId(requestId string) *InstancesSetMachineResourcesCall { +func (c *InstancesStartCall) RequestId(requestId string) *InstancesStartCall { c.urlParams_.Set("requestId", requestId) return c } @@ -57786,7 +61588,7 @@ func (c *InstancesSetMachineResourcesCall) RequestId(requestId string) *Instance // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetMachineResourcesCall) Fields(s ...googleapi.Field) *InstancesSetMachineResourcesCall { +func (c *InstancesStartCall) Fields(s ...googleapi.Field) *InstancesStartCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -57794,35 +61596,30 @@ func (c *InstancesSetMachineResourcesCall) Fields(s ...googleapi.Field) *Instanc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetMachineResourcesCall) Context(ctx context.Context) *InstancesSetMachineResourcesCall { +func (c *InstancesStartCall) Context(ctx context.Context) *InstancesStartCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetMachineResourcesCall) Header() http.Header { +func (c *InstancesStartCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetMachineResourcesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesStartCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetmachineresourcesrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMachineResources") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/start") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -57837,14 +61634,14 @@ func (c *InstancesSetMachineResourcesCall) doRequest(alt string) (*http.Response return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setMachineResources" call. +// Do executes the "compute.instances.start" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSetMachineResourcesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -57875,9 +61672,9 @@ func (c *InstancesSetMachineResourcesCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Changes the number and/or type of accelerator for a stopped instance to the values specified in the request.", + // "description": "Starts an instance that was stopped using the instances().stop method. For more information, see Restart an instance.", // "httpMethod": "POST", - // "id": "compute.instances.setMachineResources", + // "id": "compute.instances.start", // "parameterOrder": [ // "project", // "zone", @@ -57885,7 +61682,7 @@ func (c *InstancesSetMachineResourcesCall) Do(opts ...googleapi.CallOption) (*Op // ], // "parameters": { // "instance": { - // "description": "Name of the instance scoping this request.", + // "description": "Name of the instance resource to start.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -57911,10 +61708,7 @@ func (c *InstancesSetMachineResourcesCall) Do(opts ...googleapi.CallOption) (*Op // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setMachineResources", - // "request": { - // "$ref": "InstancesSetMachineResourcesRequest" - // }, + // "path": "{project}/zones/{zone}/instances/{instance}/start", // "response": { // "$ref": "Operation" // }, @@ -57926,27 +61720,28 @@ func (c *InstancesSetMachineResourcesCall) Do(opts ...googleapi.CallOption) (*Op } -// method id "compute.instances.setMachineType": +// method id "compute.instances.startWithEncryptionKey": -type InstancesSetMachineTypeCall struct { - s *Service - project string - zone string - instance string - instancessetmachinetyperequest *InstancesSetMachineTypeRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesStartWithEncryptionKeyCall struct { + s *Service + project string + zone string + instance string + instancesstartwithencryptionkeyrequest *InstancesStartWithEncryptionKeyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetMachineType: Changes the machine type for a stopped instance to -// the machine type specified in the request. -func (r *InstancesService) SetMachineType(project string, zone string, instance string, instancessetmachinetyperequest *InstancesSetMachineTypeRequest) *InstancesSetMachineTypeCall { - c := &InstancesSetMachineTypeCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// StartWithEncryptionKey: Starts an instance that was stopped using the +// instances().stop method. For more information, see Restart an +// instance. +func (r *InstancesService) StartWithEncryptionKey(project string, zone string, instance string, instancesstartwithencryptionkeyrequest *InstancesStartWithEncryptionKeyRequest) *InstancesStartWithEncryptionKeyCall { + c := &InstancesStartWithEncryptionKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance - c.instancessetmachinetyperequest = instancessetmachinetyperequest + c.instancesstartwithencryptionkeyrequest = instancesstartwithencryptionkeyrequest return c } @@ -57964,7 +61759,7 @@ func (r *InstancesService) SetMachineType(project string, zone string, instance // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetMachineTypeCall) RequestId(requestId string) *InstancesSetMachineTypeCall { +func (c *InstancesStartWithEncryptionKeyCall) RequestId(requestId string) *InstancesStartWithEncryptionKeyCall { c.urlParams_.Set("requestId", requestId) return c } @@ -57972,7 +61767,7 @@ func (c *InstancesSetMachineTypeCall) RequestId(requestId string) *InstancesSetM // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetMachineTypeCall) Fields(s ...googleapi.Field) *InstancesSetMachineTypeCall { +func (c *InstancesStartWithEncryptionKeyCall) Fields(s ...googleapi.Field) *InstancesStartWithEncryptionKeyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -57980,35 +61775,35 @@ func (c *InstancesSetMachineTypeCall) Fields(s ...googleapi.Field) *InstancesSet // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetMachineTypeCall) Context(ctx context.Context) *InstancesSetMachineTypeCall { +func (c *InstancesStartWithEncryptionKeyCall) Context(ctx context.Context) *InstancesStartWithEncryptionKeyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetMachineTypeCall) Header() http.Header { +func (c *InstancesStartWithEncryptionKeyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetMachineTypeCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesStartWithEncryptionKeyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetmachinetyperequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesstartwithencryptionkeyrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMachineType") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -58023,14 +61818,14 @@ func (c *InstancesSetMachineTypeCall) doRequest(alt string) (*http.Response, err return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setMachineType" call. +// Do executes the "compute.instances.startWithEncryptionKey" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSetMachineTypeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -58061,9 +61856,9 @@ func (c *InstancesSetMachineTypeCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Changes the machine type for a stopped instance to the machine type specified in the request.", + // "description": "Starts an instance that was stopped using the instances().stop method. For more information, see Restart an instance.", // "httpMethod": "POST", - // "id": "compute.instances.setMachineType", + // "id": "compute.instances.startWithEncryptionKey", // "parameterOrder": [ // "project", // "zone", @@ -58071,7 +61866,7 @@ func (c *InstancesSetMachineTypeCall) Do(opts ...googleapi.CallOption) (*Operati // ], // "parameters": { // "instance": { - // "description": "Name of the instance scoping this request.", + // "description": "Name of the instance resource to start.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -58097,9 +61892,9 @@ func (c *InstancesSetMachineTypeCall) Do(opts ...googleapi.CallOption) (*Operati // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setMachineType", + // "path": "{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey", // "request": { - // "$ref": "InstancesSetMachineTypeRequest" + // "$ref": "InstancesStartWithEncryptionKeyRequest" // }, // "response": { // "$ref": "Operation" @@ -58112,28 +61907,30 @@ func (c *InstancesSetMachineTypeCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.instances.setMetadata": +// method id "compute.instances.stop": -type InstancesSetMetadataCall struct { +type InstancesStopCall struct { s *Service project string zone string instance string - metadata *Metadata urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// SetMetadata: Sets metadata for the specified instance to the data -// included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setMetadata -func (r *InstancesService) SetMetadata(project string, zone string, instance string, metadata *Metadata) *InstancesSetMetadataCall { - c := &InstancesSetMetadataCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Stop: Stops a running instance, shutting it down cleanly, and allows +// you to restart the instance at a later time. Stopped instances do not +// incur VM usage charges while they are stopped. However, resources +// that the VM is using, such as persistent disks and static IP +// addresses, will continue to be charged until they are deleted. For +// more information, see Stopping an instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/stop +func (r *InstancesService) Stop(project string, zone string, instance string) *InstancesStopCall { + c := &InstancesStopCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance - c.metadata = metadata return c } @@ -58151,7 +61948,7 @@ func (r *InstancesService) SetMetadata(project string, zone string, instance str // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetMetadataCall) RequestId(requestId string) *InstancesSetMetadataCall { +func (c *InstancesStopCall) RequestId(requestId string) *InstancesStopCall { c.urlParams_.Set("requestId", requestId) return c } @@ -58159,7 +61956,7 @@ func (c *InstancesSetMetadataCall) RequestId(requestId string) *InstancesSetMeta // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetMetadataCall) Fields(s ...googleapi.Field) *InstancesSetMetadataCall { +func (c *InstancesStopCall) Fields(s ...googleapi.Field) *InstancesStopCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -58167,35 +61964,30 @@ func (c *InstancesSetMetadataCall) Fields(s ...googleapi.Field) *InstancesSetMet // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetMetadataCall) Context(ctx context.Context) *InstancesSetMetadataCall { +func (c *InstancesStopCall) Context(ctx context.Context) *InstancesStopCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetMetadataCall) Header() http.Header { +func (c *InstancesStopCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetMetadataCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesStopCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.metadata) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMetadata") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/stop") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -58210,14 +62002,14 @@ func (c *InstancesSetMetadataCall) doRequest(alt string) (*http.Response, error) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setMetadata" call. +// Do executes the "compute.instances.stop" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSetMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -58248,9 +62040,9 @@ func (c *InstancesSetMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Sets metadata for the specified instance to the data included in the request.", + // "description": "Stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time. Stopped instances do not incur VM usage charges while they are stopped. However, resources that the VM is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted. For more information, see Stopping an instance.", // "httpMethod": "POST", - // "id": "compute.instances.setMetadata", + // "id": "compute.instances.stop", // "parameterOrder": [ // "project", // "zone", @@ -58258,7 +62050,7 @@ func (c *InstancesSetMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, // ], // "parameters": { // "instance": { - // "description": "Name of the instance scoping this request.", + // "description": "Name of the instance resource to stop.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -58284,10 +62076,7 @@ func (c *InstancesSetMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setMetadata", - // "request": { - // "$ref": "Metadata" - // }, + // "path": "{project}/zones/{zone}/instances/{instance}/stop", // "response": { // "$ref": "Operation" // }, @@ -58299,55 +62088,34 @@ func (c *InstancesSetMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.instances.setMinCpuPlatform": +// method id "compute.instances.testIamPermissions": -type InstancesSetMinCpuPlatformCall struct { - s *Service - project string - zone string - instance string - instancessetmincpuplatformrequest *InstancesSetMinCpuPlatformRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesTestIamPermissionsCall struct { + s *Service + project string + zone string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetMinCpuPlatform: Changes the minimum CPU platform that this -// instance should use. This method can only be called on a stopped -// instance. For more information, read Specifying a Minimum CPU -// Platform. -func (r *InstancesService) SetMinCpuPlatform(project string, zone string, instance string, instancessetmincpuplatformrequest *InstancesSetMinCpuPlatformRequest) *InstancesSetMinCpuPlatformCall { - c := &InstancesSetMinCpuPlatformCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *InstancesService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *InstancesTestIamPermissionsCall { + c := &InstancesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.instancessetmincpuplatformrequest = instancessetmincpuplatformrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetMinCpuPlatformCall) RequestId(requestId string) *InstancesSetMinCpuPlatformCall { - c.urlParams_.Set("requestId", requestId) + c.zone = zone + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetMinCpuPlatformCall) Fields(s ...googleapi.Field) *InstancesSetMinCpuPlatformCall { +func (c *InstancesTestIamPermissionsCall) Fields(s ...googleapi.Field) *InstancesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -58355,35 +62123,35 @@ func (c *InstancesSetMinCpuPlatformCall) Fields(s ...googleapi.Field) *Instances // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetMinCpuPlatformCall) Context(ctx context.Context) *InstancesSetMinCpuPlatformCall { +func (c *InstancesTestIamPermissionsCall) Context(ctx context.Context) *InstancesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetMinCpuPlatformCall) Header() http.Header { +func (c *InstancesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetMinCpuPlatformCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetmincpuplatformrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMinCpuPlatform") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -58393,19 +62161,19 @@ func (c *InstancesSetMinCpuPlatformCall) doRequest(alt string) (*http.Response, googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, - "instance": c.instance, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setMinCpuPlatform" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesSetMinCpuPlatformCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instances.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstancesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -58424,7 +62192,7 @@ func (c *InstancesSetMinCpuPlatformCall) Do(opts ...googleapi.CallOption) (*Oper if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -58436,22 +62204,15 @@ func (c *InstancesSetMinCpuPlatformCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Changes the minimum CPU platform that this instance should use. This method can only be called on a stopped instance. For more information, read Specifying a Minimum CPU Platform.", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.instances.setMinCpuPlatform", + // "id": "compute.instances.testIamPermissions", // "parameterOrder": [ // "project", // "zone", - // "instance" + // "resource" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -58459,9 +62220,11 @@ func (c *InstancesSetMinCpuPlatformCall) Do(opts ...googleapi.CallOption) (*Oper // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // }, // "zone": { @@ -58472,42 +62235,46 @@ func (c *InstancesSetMinCpuPlatformCall) Do(opts ...googleapi.CallOption) (*Oper // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setMinCpuPlatform", + // "path": "{project}/zones/{zone}/instances/{resource}/testIamPermissions", // "request": { - // "$ref": "InstancesSetMinCpuPlatformRequest" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.setScheduling": +// method id "compute.instances.updateAccessConfig": -type InstancesSetSchedulingCall struct { - s *Service - project string - zone string - instance string - scheduling *Scheduling - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesUpdateAccessConfigCall struct { + s *Service + project string + zone string + instance string + accessconfig *AccessConfig + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetScheduling: Sets an instance's scheduling options. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setScheduling -func (r *InstancesService) SetScheduling(project string, zone string, instance string, scheduling *Scheduling) *InstancesSetSchedulingCall { - c := &InstancesSetSchedulingCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// UpdateAccessConfig: Updates the specified access config from an +// instance's network interface with the data included in the request. +// This method supports PATCH semantics and uses the JSON merge patch +// format and processing rules. +func (r *InstancesService) UpdateAccessConfig(project string, zone string, instance string, networkInterface string, accessconfig *AccessConfig) *InstancesUpdateAccessConfigCall { + c := &InstancesUpdateAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance - c.scheduling = scheduling + c.urlParams_.Set("networkInterface", networkInterface) + c.accessconfig = accessconfig return c } @@ -58525,7 +62292,7 @@ func (r *InstancesService) SetScheduling(project string, zone string, instance s // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetSchedulingCall) RequestId(requestId string) *InstancesSetSchedulingCall { +func (c *InstancesUpdateAccessConfigCall) RequestId(requestId string) *InstancesUpdateAccessConfigCall { c.urlParams_.Set("requestId", requestId) return c } @@ -58533,7 +62300,7 @@ func (c *InstancesSetSchedulingCall) RequestId(requestId string) *InstancesSetSc // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetSchedulingCall) Fields(s ...googleapi.Field) *InstancesSetSchedulingCall { +func (c *InstancesUpdateAccessConfigCall) Fields(s ...googleapi.Field) *InstancesUpdateAccessConfigCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -58541,35 +62308,35 @@ func (c *InstancesSetSchedulingCall) Fields(s ...googleapi.Field) *InstancesSetS // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetSchedulingCall) Context(ctx context.Context) *InstancesSetSchedulingCall { +func (c *InstancesUpdateAccessConfigCall) Context(ctx context.Context) *InstancesUpdateAccessConfigCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetSchedulingCall) Header() http.Header { +func (c *InstancesUpdateAccessConfigCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetSchedulingCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesUpdateAccessConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.scheduling) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.accessconfig) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setScheduling") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/updateAccessConfig") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -58584,14 +62351,14 @@ func (c *InstancesSetSchedulingCall) doRequest(alt string) (*http.Response, erro return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setScheduling" call. +// Do executes the "compute.instances.updateAccessConfig" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesUpdateAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -58622,22 +62389,29 @@ func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Sets an instance's scheduling options.", + // "description": "Updates the specified access config from an instance's network interface with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", // "httpMethod": "POST", - // "id": "compute.instances.setScheduling", + // "id": "compute.instances.updateAccessConfig", // "parameterOrder": [ // "project", // "zone", - // "instance" + // "instance", + // "networkInterface" // ], // "parameters": { // "instance": { - // "description": "Instance name for this request.", + // "description": "The instance name for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, + // "networkInterface": { + // "description": "The name of the network interface where the access config is attached.", + // "location": "query", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -58658,9 +62432,9 @@ func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operatio // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setScheduling", + // "path": "{project}/zones/{zone}/instances/{instance}/updateAccessConfig", // "request": { - // "$ref": "Scheduling" + // "$ref": "AccessConfig" // }, // "response": { // "$ref": "Operation" @@ -58673,28 +62447,28 @@ func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operatio } -// method id "compute.instances.setServiceAccount": +// method id "compute.instances.updateNetworkInterface": -type InstancesSetServiceAccountCall struct { - s *Service - project string - zone string - instance string - instancessetserviceaccountrequest *InstancesSetServiceAccountRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesUpdateNetworkInterfaceCall struct { + s *Service + project string + zone string + instance string + networkinterface *NetworkInterface + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetServiceAccount: Sets the service account on the instance. For more -// information, read Changing the service account and access scopes for -// an instance. -func (r *InstancesService) SetServiceAccount(project string, zone string, instance string, instancessetserviceaccountrequest *InstancesSetServiceAccountRequest) *InstancesSetServiceAccountCall { - c := &InstancesSetServiceAccountCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// UpdateNetworkInterface: Updates an instance's network interface. This +// method follows PATCH semantics. +func (r *InstancesService) UpdateNetworkInterface(project string, zone string, instance string, networkInterface string, networkinterface *NetworkInterface) *InstancesUpdateNetworkInterfaceCall { + c := &InstancesUpdateNetworkInterfaceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance - c.instancessetserviceaccountrequest = instancessetserviceaccountrequest + c.urlParams_.Set("networkInterface", networkInterface) + c.networkinterface = networkinterface return c } @@ -58712,7 +62486,7 @@ func (r *InstancesService) SetServiceAccount(project string, zone string, instan // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetServiceAccountCall) RequestId(requestId string) *InstancesSetServiceAccountCall { +func (c *InstancesUpdateNetworkInterfaceCall) RequestId(requestId string) *InstancesUpdateNetworkInterfaceCall { c.urlParams_.Set("requestId", requestId) return c } @@ -58720,7 +62494,7 @@ func (c *InstancesSetServiceAccountCall) RequestId(requestId string) *InstancesS // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetServiceAccountCall) Fields(s ...googleapi.Field) *InstancesSetServiceAccountCall { +func (c *InstancesUpdateNetworkInterfaceCall) Fields(s ...googleapi.Field) *InstancesUpdateNetworkInterfaceCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -58728,37 +62502,37 @@ func (c *InstancesSetServiceAccountCall) Fields(s ...googleapi.Field) *Instances // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetServiceAccountCall) Context(ctx context.Context) *InstancesSetServiceAccountCall { +func (c *InstancesUpdateNetworkInterfaceCall) Context(ctx context.Context) *InstancesUpdateNetworkInterfaceCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetServiceAccountCall) Header() http.Header { +func (c *InstancesUpdateNetworkInterfaceCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetServiceAccountCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesUpdateNetworkInterfaceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetserviceaccountrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkinterface) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setServiceAccount") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/updateNetworkInterface") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } @@ -58771,14 +62545,14 @@ func (c *InstancesSetServiceAccountCall) doRequest(alt string) (*http.Response, return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setServiceAccount" call. +// Do executes the "compute.instances.updateNetworkInterface" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesUpdateNetworkInterfaceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -58809,22 +62583,29 @@ func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Sets the service account on the instance. For more information, read Changing the service account and access scopes for an instance.", - // "httpMethod": "POST", - // "id": "compute.instances.setServiceAccount", + // "description": "Updates an instance's network interface. This method follows PATCH semantics.", + // "httpMethod": "PATCH", + // "id": "compute.instances.updateNetworkInterface", // "parameterOrder": [ // "project", // "zone", - // "instance" + // "instance", + // "networkInterface" // ], // "parameters": { // "instance": { - // "description": "Name of the instance resource to start.", + // "description": "The instance name for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, + // "networkInterface": { + // "description": "The name of the network interface to update.", + // "location": "query", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -58845,9 +62626,9 @@ func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Oper // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setServiceAccount", + // "path": "{project}/zones/{zone}/instances/{instance}/updateNetworkInterface", // "request": { - // "$ref": "InstancesSetServiceAccountRequest" + // "$ref": "NetworkInterface" // }, // "response": { // "$ref": "Operation" @@ -58860,28 +62641,29 @@ func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Oper } -// method id "compute.instances.setTags": +// method id "compute.instances.updateShieldedInstanceConfig": -type InstancesSetTagsCall struct { - s *Service - project string - zone string - instance string - tags *Tags - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesUpdateShieldedInstanceConfigCall struct { + s *Service + project string + zone string + instance string + shieldedinstanceconfig *ShieldedInstanceConfig + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetTags: Sets network tags for the specified instance to the data -// included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setTags -func (r *InstancesService) SetTags(project string, zone string, instance string, tags *Tags) *InstancesSetTagsCall { - c := &InstancesSetTagsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// UpdateShieldedInstanceConfig: Updates the Shielded Instance config +// for an instance. You can only use this method on a stopped instance. +// This method supports PATCH semantics and uses the JSON merge patch +// format and processing rules. +func (r *InstancesService) UpdateShieldedInstanceConfig(project string, zone string, instance string, shieldedinstanceconfig *ShieldedInstanceConfig) *InstancesUpdateShieldedInstanceConfigCall { + c := &InstancesUpdateShieldedInstanceConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance - c.tags = tags + c.shieldedinstanceconfig = shieldedinstanceconfig return c } @@ -58899,7 +62681,7 @@ func (r *InstancesService) SetTags(project string, zone string, instance string, // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetTagsCall) RequestId(requestId string) *InstancesSetTagsCall { +func (c *InstancesUpdateShieldedInstanceConfigCall) RequestId(requestId string) *InstancesUpdateShieldedInstanceConfigCall { c.urlParams_.Set("requestId", requestId) return c } @@ -58907,7 +62689,7 @@ func (c *InstancesSetTagsCall) RequestId(requestId string) *InstancesSetTagsCall // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetTagsCall) Fields(s ...googleapi.Field) *InstancesSetTagsCall { +func (c *InstancesUpdateShieldedInstanceConfigCall) Fields(s ...googleapi.Field) *InstancesUpdateShieldedInstanceConfigCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -58915,37 +62697,37 @@ func (c *InstancesSetTagsCall) Fields(s ...googleapi.Field) *InstancesSetTagsCal // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetTagsCall) Context(ctx context.Context) *InstancesSetTagsCall { +func (c *InstancesUpdateShieldedInstanceConfigCall) Context(ctx context.Context) *InstancesUpdateShieldedInstanceConfigCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetTagsCall) Header() http.Header { +func (c *InstancesUpdateShieldedInstanceConfigCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetTagsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesUpdateShieldedInstanceConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.tags) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.shieldedinstanceconfig) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setTags") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/updateShieldedInstanceConfig") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } @@ -58958,14 +62740,14 @@ func (c *InstancesSetTagsCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setTags" call. +// Do executes the "compute.instances.updateShieldedInstanceConfig" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesUpdateShieldedInstanceConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -58996,9 +62778,9 @@ func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, err } return ret, nil // { - // "description": "Sets network tags for the specified instance to the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.instances.setTags", + // "description": "Updates the Shielded Instance config for an instance. You can only use this method on a stopped instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.instances.updateShieldedInstanceConfig", // "parameterOrder": [ // "project", // "zone", @@ -59006,7 +62788,7 @@ func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, err // ], // "parameters": { // "instance": { - // "description": "Name of the instance scoping this request.", + // "description": "Name or id of the instance scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -59032,9 +62814,9 @@ func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, err // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setTags", + // "path": "{project}/zones/{zone}/instances/{instance}/updateShieldedInstanceConfig", // "request": { - // "$ref": "Tags" + // "$ref": "ShieldedInstanceConfig" // }, // "response": { // "$ref": "Operation" @@ -59047,85 +62829,157 @@ func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, err } -// method id "compute.instances.simulateMaintenanceEvent": +// method id "compute.interconnectAttachments.aggregatedList": -type InstancesSimulateMaintenanceEventCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InterconnectAttachmentsAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SimulateMaintenanceEvent: Simulates a maintenance event on the -// instance. -func (r *InstancesService) SimulateMaintenanceEvent(project string, zone string, instance string) *InstancesSimulateMaintenanceEventCall { - c := &InstancesSimulateMaintenanceEventCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of interconnect +// attachments. +func (r *InterconnectAttachmentsService) AggregatedList(project string) *InterconnectAttachmentsAggregatedListCall { + c := &InterconnectAttachmentsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *InterconnectAttachmentsAggregatedListCall) Filter(filter string) *InterconnectAttachmentsAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *InterconnectAttachmentsAggregatedListCall) MaxResults(maxResults int64) *InterconnectAttachmentsAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *InterconnectAttachmentsAggregatedListCall) OrderBy(orderBy string) *InterconnectAttachmentsAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InterconnectAttachmentsAggregatedListCall) PageToken(pageToken string) *InterconnectAttachmentsAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSimulateMaintenanceEventCall) Fields(s ...googleapi.Field) *InstancesSimulateMaintenanceEventCall { +func (c *InterconnectAttachmentsAggregatedListCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InterconnectAttachmentsAggregatedListCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSimulateMaintenanceEventCall) Context(ctx context.Context) *InstancesSimulateMaintenanceEventCall { +func (c *InterconnectAttachmentsAggregatedListCall) Context(ctx context.Context) *InterconnectAttachmentsAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSimulateMaintenanceEventCall) Header() http.Header { +func (c *InterconnectAttachmentsAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSimulateMaintenanceEventCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectAttachmentsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/interconnectAttachments") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.simulateMaintenanceEvent" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.interconnectAttachments.aggregatedList" call. +// Exactly one of *InterconnectAttachmentAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *InterconnectAttachmentAggregatedList.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachmentAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -59144,7 +62998,7 @@ func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &InterconnectAttachmentAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -59156,20 +63010,34 @@ func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Simulates a maintenance event on the instance.", - // "httpMethod": "POST", - // "id": "compute.instances.simulateMaintenanceEvent", + // "description": "Retrieves an aggregated list of interconnect attachments.", + // "httpMethod": "GET", + // "id": "compute.interconnectAttachments.aggregatedList", // "parameterOrder": [ - // "project", - // "zone", - // "instance" + // "project" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -59178,47 +63046,60 @@ func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent", + // "path": "{project}/aggregated/interconnectAttachments", // "response": { - // "$ref": "Operation" + // "$ref": "InterconnectAttachmentAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.start": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InterconnectAttachmentsAggregatedListCall) Pages(ctx context.Context, f func(*InterconnectAttachmentAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InstancesStartCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.interconnectAttachments.delete": + +type InterconnectAttachmentsDeleteCall struct { + s *Service + project string + region string + interconnectAttachment string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Start: Starts an instance that was stopped using the instances().stop -// method. For more information, see Restart an instance. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/start -func (r *InstancesService) Start(project string, zone string, instance string) *InstancesStartCall { - c := &InstancesStartCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified interconnect attachment. +func (r *InterconnectAttachmentsService) Delete(project string, region string, interconnectAttachment string) *InterconnectAttachmentsDeleteCall { + c := &InterconnectAttachmentsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance + c.region = region + c.interconnectAttachment = interconnectAttachment return c } @@ -59236,7 +63117,7 @@ func (r *InstancesService) Start(project string, zone string, instance string) * // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesStartCall) RequestId(requestId string) *InstancesStartCall { +func (c *InterconnectAttachmentsDeleteCall) RequestId(requestId string) *InterconnectAttachmentsDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -59244,7 +63125,7 @@ func (c *InstancesStartCall) RequestId(requestId string) *InstancesStartCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesStartCall) Fields(s ...googleapi.Field) *InstancesStartCall { +func (c *InterconnectAttachmentsDeleteCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -59252,21 +63133,21 @@ func (c *InstancesStartCall) Fields(s ...googleapi.Field) *InstancesStartCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesStartCall) Context(ctx context.Context) *InstancesStartCall { +func (c *InterconnectAttachmentsDeleteCall) Context(ctx context.Context) *InterconnectAttachmentsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesStartCall) Header() http.Header { +func (c *InterconnectAttachmentsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesStartCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectAttachmentsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -59275,29 +63156,29 @@ func (c *InstancesStartCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/start") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "region": c.region, + "interconnectAttachment": c.interconnectAttachment, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.start" call. +// Do executes the "compute.interconnectAttachments.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InterconnectAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -59328,17 +63209,17 @@ func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Starts an instance that was stopped using the instances().stop method. For more information, see Restart an instance.", - // "httpMethod": "POST", - // "id": "compute.instances.start", + // "description": "Deletes the specified interconnect attachment.", + // "httpMethod": "DELETE", + // "id": "compute.interconnectAttachments.delete", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "region", + // "interconnectAttachment" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance resource to start.", + // "interconnectAttachment": { + // "description": "Name of the interconnect attachment to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -59351,20 +63232,20 @@ func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/start", + // "path": "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", // "response": { // "$ref": "Operation" // }, @@ -59376,112 +63257,98 @@ func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error } -// method id "compute.instances.startWithEncryptionKey": +// method id "compute.interconnectAttachments.get": -type InstancesStartWithEncryptionKeyCall struct { - s *Service - project string - zone string - instance string - instancesstartwithencryptionkeyrequest *InstancesStartWithEncryptionKeyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InterconnectAttachmentsGetCall struct { + s *Service + project string + region string + interconnectAttachment string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// StartWithEncryptionKey: Starts an instance that was stopped using the -// instances().stop method. For more information, see Restart an -// instance. -func (r *InstancesService) StartWithEncryptionKey(project string, zone string, instance string, instancesstartwithencryptionkeyrequest *InstancesStartWithEncryptionKeyRequest) *InstancesStartWithEncryptionKeyCall { - c := &InstancesStartWithEncryptionKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified interconnect attachment. +func (r *InterconnectAttachmentsService) Get(project string, region string, interconnectAttachment string) *InterconnectAttachmentsGetCall { + c := &InterconnectAttachmentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.instancesstartwithencryptionkeyrequest = instancesstartwithencryptionkeyrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesStartWithEncryptionKeyCall) RequestId(requestId string) *InstancesStartWithEncryptionKeyCall { - c.urlParams_.Set("requestId", requestId) + c.region = region + c.interconnectAttachment = interconnectAttachment return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesStartWithEncryptionKeyCall) Fields(s ...googleapi.Field) *InstancesStartWithEncryptionKeyCall { +func (c *InterconnectAttachmentsGetCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InterconnectAttachmentsGetCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesStartWithEncryptionKeyCall) Context(ctx context.Context) *InstancesStartWithEncryptionKeyCall { +func (c *InterconnectAttachmentsGetCall) Context(ctx context.Context) *InterconnectAttachmentsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesStartWithEncryptionKeyCall) Header() http.Header { +func (c *InterconnectAttachmentsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesStartWithEncryptionKeyCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectAttachmentsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesstartwithencryptionkeyrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "region": c.region, + "interconnectAttachment": c.interconnectAttachment, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.startWithEncryptionKey" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.interconnectAttachments.get" call. +// Exactly one of *InterconnectAttachment or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InterconnectAttachment.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InterconnectAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachment, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -59500,7 +63367,7 @@ func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) ( if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &InterconnectAttachment{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -59512,17 +63379,17 @@ func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Starts an instance that was stopped using the instances().stop method. For more information, see Restart an instance.", - // "httpMethod": "POST", - // "id": "compute.instances.startWithEncryptionKey", + // "description": "Returns the specified interconnect attachment.", + // "httpMethod": "GET", + // "id": "compute.interconnectAttachments.get", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "region", + // "interconnectAttachment" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance resource to start.", + // "interconnectAttachment": { + // "description": "Name of the interconnect attachment to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -59535,58 +63402,46 @@ func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) ( // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey", - // "request": { - // "$ref": "InstancesStartWithEncryptionKeyRequest" - // }, + // "path": "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", // "response": { - // "$ref": "Operation" + // "$ref": "InterconnectAttachment" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.stop": +// method id "compute.interconnectAttachments.insert": -type InstancesStopCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InterconnectAttachmentsInsertCall struct { + s *Service + project string + region string + interconnectattachment *InterconnectAttachment + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Stop: Stops a running instance, shutting it down cleanly, and allows -// you to restart the instance at a later time. Stopped instances do not -// incur VM usage charges while they are stopped. However, resources -// that the VM is using, such as persistent disks and static IP -// addresses, will continue to be charged until they are deleted. For -// more information, see Stopping an instance. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/stop -func (r *InstancesService) Stop(project string, zone string, instance string) *InstancesStopCall { - c := &InstancesStopCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates an InterconnectAttachment in the specified project +// using the data included in the request. +func (r *InterconnectAttachmentsService) Insert(project string, region string, interconnectattachment *InterconnectAttachment) *InterconnectAttachmentsInsertCall { + c := &InterconnectAttachmentsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance + c.region = region + c.interconnectattachment = interconnectattachment return c } @@ -59604,7 +63459,7 @@ func (r *InstancesService) Stop(project string, zone string, instance string) *I // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesStopCall) RequestId(requestId string) *InstancesStopCall { +func (c *InterconnectAttachmentsInsertCall) RequestId(requestId string) *InterconnectAttachmentsInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -59612,7 +63467,7 @@ func (c *InstancesStopCall) RequestId(requestId string) *InstancesStopCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesStopCall) Fields(s ...googleapi.Field) *InstancesStopCall { +func (c *InterconnectAttachmentsInsertCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -59620,30 +63475,35 @@ func (c *InstancesStopCall) Fields(s ...googleapi.Field) *InstancesStopCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesStopCall) Context(ctx context.Context) *InstancesStopCall { +func (c *InterconnectAttachmentsInsertCall) Context(ctx context.Context) *InterconnectAttachmentsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesStopCall) Header() http.Header { +func (c *InterconnectAttachmentsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesStopCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectAttachmentsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnectattachment) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/stop") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -59651,21 +63511,20 @@ func (c *InstancesStopCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.stop" call. +// Do executes the "compute.interconnectAttachments.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InterconnectAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -59696,26 +63555,25 @@ func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time. Stopped instances do not incur VM usage charges while they are stopped. However, resources that the VM is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted. For more information, see Stopping an instance.", + // "description": "Creates an InterconnectAttachment in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.instances.stop", + // "id": "compute.interconnectAttachments.insert", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "region" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance resource to stop.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "region": { + // "description": "Name of the region for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -59723,16 +63581,12 @@ func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/stop", + // "path": "{project}/regions/{region}/interconnectAttachments", + // "request": { + // "$ref": "InterconnectAttachment" + // }, // "response": { // "$ref": "Operation" // }, @@ -59744,92 +63598,159 @@ func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) } -// method id "compute.instances.testIamPermissions": +// method id "compute.interconnectAttachments.list": -type InstancesTestIamPermissionsCall struct { - s *Service - project string - zone string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InterconnectAttachmentsListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *InstancesService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *InstancesTestIamPermissionsCall { - c := &InstancesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of interconnect attachments contained within +// the specified region. +func (r *InterconnectAttachmentsService) List(project string, region string) *InterconnectAttachmentsListCall { + c := &InterconnectAttachmentsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.region = region + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *InterconnectAttachmentsListCall) Filter(filter string) *InterconnectAttachmentsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *InterconnectAttachmentsListCall) MaxResults(maxResults int64) *InterconnectAttachmentsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *InterconnectAttachmentsListCall) OrderBy(orderBy string) *InterconnectAttachmentsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InterconnectAttachmentsListCall) PageToken(pageToken string) *InterconnectAttachmentsListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesTestIamPermissionsCall) Fields(s ...googleapi.Field) *InstancesTestIamPermissionsCall { +func (c *InterconnectAttachmentsListCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InterconnectAttachmentsListCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesTestIamPermissionsCall) Context(ctx context.Context) *InstancesTestIamPermissionsCall { +func (c *InterconnectAttachmentsListCall) Context(ctx context.Context) *InterconnectAttachmentsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesTestIamPermissionsCall) Header() http.Header { +func (c *InterconnectAttachmentsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectAttachmentsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.interconnectAttachments.list" call. +// Exactly one of *InterconnectAttachmentList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *InterconnectAttachmentList.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *InstancesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachmentList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -59848,7 +63769,7 @@ func (c *InstancesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &InterconnectAttachmentList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -59860,15 +63781,37 @@ func (c *InstancesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.instances.testIamPermissions", + // "description": "Retrieves the list of interconnect attachments contained within the specified region.", + // "httpMethod": "GET", + // "id": "compute.interconnectAttachments.list", // "parameterOrder": [ // "project", - // "zone", - // "resource" + // "region" // ], // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -59876,27 +63819,17 @@ func (c *InstancesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/regions/{region}/interconnectAttachments", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "InterconnectAttachmentList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -59907,30 +63840,49 @@ func (c *InstancesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes } -// method id "compute.instances.updateAccessConfig": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InterconnectAttachmentsListCall) Pages(ctx context.Context, f func(*InterconnectAttachmentList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InstancesUpdateAccessConfigCall struct { - s *Service - project string - zone string - instance string - accessconfig *AccessConfig - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.interconnectAttachments.patch": + +type InterconnectAttachmentsPatchCall struct { + s *Service + project string + region string + interconnectAttachment string + interconnectattachment *InterconnectAttachment + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// UpdateAccessConfig: Updates the specified access config from an -// instance's network interface with the data included in the request. -// This method supports PATCH semantics and uses the JSON merge patch -// format and processing rules. -func (r *InstancesService) UpdateAccessConfig(project string, zone string, instance string, networkInterface string, accessconfig *AccessConfig) *InstancesUpdateAccessConfigCall { - c := &InstancesUpdateAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates the specified interconnect attachment with the data +// included in the request. This method supports PATCH semantics and +// uses the JSON merge patch format and processing rules. +func (r *InterconnectAttachmentsService) Patch(project string, region string, interconnectAttachment string, interconnectattachment *InterconnectAttachment) *InterconnectAttachmentsPatchCall { + c := &InterconnectAttachmentsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.urlParams_.Set("networkInterface", networkInterface) - c.accessconfig = accessconfig + c.region = region + c.interconnectAttachment = interconnectAttachment + c.interconnectattachment = interconnectattachment return c } @@ -59948,7 +63900,7 @@ func (r *InstancesService) UpdateAccessConfig(project string, zone string, insta // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesUpdateAccessConfigCall) RequestId(requestId string) *InstancesUpdateAccessConfigCall { +func (c *InterconnectAttachmentsPatchCall) RequestId(requestId string) *InterconnectAttachmentsPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -59956,7 +63908,7 @@ func (c *InstancesUpdateAccessConfigCall) RequestId(requestId string) *Instances // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesUpdateAccessConfigCall) Fields(s ...googleapi.Field) *InstancesUpdateAccessConfigCall { +func (c *InterconnectAttachmentsPatchCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -59964,57 +63916,57 @@ func (c *InstancesUpdateAccessConfigCall) Fields(s ...googleapi.Field) *Instance // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesUpdateAccessConfigCall) Context(ctx context.Context) *InstancesUpdateAccessConfigCall { +func (c *InterconnectAttachmentsPatchCall) Context(ctx context.Context) *InterconnectAttachmentsPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesUpdateAccessConfigCall) Header() http.Header { +func (c *InterconnectAttachmentsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesUpdateAccessConfigCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectAttachmentsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.accessconfig) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnectattachment) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/updateAccessConfig") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "region": c.region, + "interconnectAttachment": c.interconnectAttachment, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.updateAccessConfig" call. +// Do executes the "compute.interconnectAttachments.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesUpdateAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InterconnectAttachmentsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -60045,29 +63997,22 @@ func (c *InstancesUpdateAccessConfigCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Updates the specified access config from an instance's network interface with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "POST", - // "id": "compute.instances.updateAccessConfig", + // "description": "Updates the specified interconnect attachment with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.interconnectAttachments.patch", // "parameterOrder": [ // "project", - // "zone", - // "instance", - // "networkInterface" + // "region", + // "interconnectAttachment" // ], // "parameters": { - // "instance": { - // "description": "The instance name for this request.", + // "interconnectAttachment": { + // "description": "Name of the interconnect attachment to patch.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "networkInterface": { - // "description": "The name of the network interface where the access config is attached.", - // "location": "query", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -60075,22 +64020,22 @@ func (c *InstancesUpdateAccessConfigCall) Do(opts ...googleapi.CallOption) (*Ope // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/updateAccessConfig", + // "path": "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", // "request": { - // "$ref": "AccessConfig" + // "$ref": "InterconnectAttachment" // }, // "response": { // "$ref": "Operation" @@ -60103,112 +64048,97 @@ func (c *InstancesUpdateAccessConfigCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.instances.updateNetworkInterface": +// method id "compute.interconnectLocations.get": -type InstancesUpdateNetworkInterfaceCall struct { - s *Service - project string - zone string - instance string - networkinterface *NetworkInterface - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InterconnectLocationsGetCall struct { + s *Service + project string + interconnectLocation string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// UpdateNetworkInterface: Updates an instance's network interface. This -// method follows PATCH semantics. -func (r *InstancesService) UpdateNetworkInterface(project string, zone string, instance string, networkInterface string, networkinterface *NetworkInterface) *InstancesUpdateNetworkInterfaceCall { - c := &InstancesUpdateNetworkInterfaceCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the details for the specified interconnect location. +// Gets a list of available interconnect locations by making a list() +// request. +func (r *InterconnectLocationsService) Get(project string, interconnectLocation string) *InterconnectLocationsGetCall { + c := &InterconnectLocationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.urlParams_.Set("networkInterface", networkInterface) - c.networkinterface = networkinterface - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesUpdateNetworkInterfaceCall) RequestId(requestId string) *InstancesUpdateNetworkInterfaceCall { - c.urlParams_.Set("requestId", requestId) + c.interconnectLocation = interconnectLocation return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesUpdateNetworkInterfaceCall) Fields(s ...googleapi.Field) *InstancesUpdateNetworkInterfaceCall { +func (c *InterconnectLocationsGetCall) Fields(s ...googleapi.Field) *InterconnectLocationsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InterconnectLocationsGetCall) IfNoneMatch(entityTag string) *InterconnectLocationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesUpdateNetworkInterfaceCall) Context(ctx context.Context) *InstancesUpdateNetworkInterfaceCall { +func (c *InterconnectLocationsGetCall) Context(ctx context.Context) *InterconnectLocationsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesUpdateNetworkInterfaceCall) Header() http.Header { +func (c *InterconnectLocationsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesUpdateNetworkInterfaceCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectLocationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkinterface) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/updateNetworkInterface") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnectLocations/{interconnectLocation}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "interconnectLocation": c.interconnectLocation, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.updateNetworkInterface" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesUpdateNetworkInterfaceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.interconnectLocations.get" call. +// Exactly one of *InterconnectLocation or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InterconnectLocation.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InterconnectLocationsGetCall) Do(opts ...googleapi.CallOption) (*InterconnectLocation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -60227,7 +64157,7 @@ func (c *InstancesUpdateNetworkInterfaceCall) Do(opts ...googleapi.CallOption) ( if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &InterconnectLocation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -60239,67 +64169,45 @@ func (c *InstancesUpdateNetworkInterfaceCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Updates an instance's network interface. This method follows PATCH semantics.", - // "httpMethod": "PATCH", - // "id": "compute.instances.updateNetworkInterface", + // "description": "Returns the details for the specified interconnect location. Gets a list of available interconnect locations by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.interconnectLocations.get", // "parameterOrder": [ // "project", - // "zone", - // "instance", - // "networkInterface" + // "interconnectLocation" // ], // "parameters": { - // "instance": { - // "description": "The instance name for this request.", + // "interconnectLocation": { + // "description": "Name of the interconnect location to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "networkInterface": { - // "description": "The name of the network interface to update.", - // "location": "query", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/updateNetworkInterface", - // "request": { - // "$ref": "NetworkInterface" - // }, + // "path": "{project}/global/interconnectLocations/{interconnectLocation}", // "response": { - // "$ref": "Operation" + // "$ref": "InterconnectLocation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.interconnectAttachments.aggregatedList": +// method id "compute.interconnectLocations.list": -type InterconnectAttachmentsAggregatedListCall struct { +type InterconnectLocationsListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -60308,10 +64216,10 @@ type InterconnectAttachmentsAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves an aggregated list of interconnect -// attachments. -func (r *InterconnectAttachmentsService) AggregatedList(project string) *InterconnectAttachmentsAggregatedListCall { - c := &InterconnectAttachmentsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of interconnect locations available to the +// specified project. +func (r *InterconnectLocationsService) List(project string) *InterconnectLocationsListCall { + c := &InterconnectLocationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -60338,7 +64246,7 @@ func (r *InterconnectAttachmentsService) AggregatedList(project string) *Interco // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *InterconnectAttachmentsAggregatedListCall) Filter(filter string) *InterconnectAttachmentsAggregatedListCall { +func (c *InterconnectLocationsListCall) Filter(filter string) *InterconnectLocationsListCall { c.urlParams_.Set("filter", filter) return c } @@ -60349,7 +64257,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) Filter(filter string) *Inter // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *InterconnectAttachmentsAggregatedListCall) MaxResults(maxResults int64) *InterconnectAttachmentsAggregatedListCall { +func (c *InterconnectLocationsListCall) MaxResults(maxResults int64) *InterconnectLocationsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -60366,7 +64274,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) MaxResults(maxResults int64) // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *InterconnectAttachmentsAggregatedListCall) OrderBy(orderBy string) *InterconnectAttachmentsAggregatedListCall { +func (c *InterconnectLocationsListCall) OrderBy(orderBy string) *InterconnectLocationsListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -60374,7 +64282,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) OrderBy(orderBy string) *Int // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *InterconnectAttachmentsAggregatedListCall) PageToken(pageToken string) *InterconnectAttachmentsAggregatedListCall { +func (c *InterconnectLocationsListCall) PageToken(pageToken string) *InterconnectLocationsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -60382,7 +64290,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) PageToken(pageToken string) // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectAttachmentsAggregatedListCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsAggregatedListCall { +func (c *InterconnectLocationsListCall) Fields(s ...googleapi.Field) *InterconnectLocationsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -60392,7 +64300,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) Fields(s ...googleapi.Field) // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InterconnectAttachmentsAggregatedListCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsAggregatedListCall { +func (c *InterconnectLocationsListCall) IfNoneMatch(entityTag string) *InterconnectLocationsListCall { c.ifNoneMatch_ = entityTag return c } @@ -60400,21 +64308,21 @@ func (c *InterconnectAttachmentsAggregatedListCall) IfNoneMatch(entityTag string // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectAttachmentsAggregatedListCall) Context(ctx context.Context) *InterconnectAttachmentsAggregatedListCall { +func (c *InterconnectLocationsListCall) Context(ctx context.Context) *InterconnectLocationsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectAttachmentsAggregatedListCall) Header() http.Header { +func (c *InterconnectLocationsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectAttachmentsAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectLocationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -60426,7 +64334,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) doRequest(alt string) (*http var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/interconnectAttachments") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnectLocations") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -60439,15 +64347,14 @@ func (c *InterconnectAttachmentsAggregatedListCall) doRequest(alt string) (*http return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnectAttachments.aggregatedList" call. -// Exactly one of *InterconnectAttachmentAggregatedList or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *InterconnectAttachmentAggregatedList.ServerResponse.Header or -// (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachmentAggregatedList, error) { +// Do executes the "compute.interconnectLocations.list" call. +// Exactly one of *InterconnectLocationList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *InterconnectLocationList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*InterconnectLocationList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -60466,7 +64373,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOpt if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InterconnectAttachmentAggregatedList{ + ret := &InterconnectLocationList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -60478,9 +64385,9 @@ func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOpt } return ret, nil // { - // "description": "Retrieves an aggregated list of interconnect attachments.", + // "description": "Retrieves the list of interconnect locations available to the specified project.", // "httpMethod": "GET", - // "id": "compute.interconnectAttachments.aggregatedList", + // "id": "compute.interconnectLocations.list", // "parameterOrder": [ // "project" // ], @@ -60516,9 +64423,9 @@ func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOpt // "type": "string" // } // }, - // "path": "{project}/aggregated/interconnectAttachments", + // "path": "{project}/global/interconnectLocations", // "response": { - // "$ref": "InterconnectAttachmentAggregatedList" + // "$ref": "InterconnectLocationList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -60532,7 +64439,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOpt // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *InterconnectAttachmentsAggregatedListCall) Pages(ctx context.Context, f func(*InterconnectAttachmentAggregatedList) error) error { +func (c *InterconnectLocationsListCall) Pages(ctx context.Context, f func(*InterconnectLocationList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -60550,24 +64457,22 @@ func (c *InterconnectAttachmentsAggregatedListCall) Pages(ctx context.Context, f } } -// method id "compute.interconnectAttachments.delete": +// method id "compute.interconnects.delete": -type InterconnectAttachmentsDeleteCall struct { - s *Service - project string - region string - interconnectAttachment string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InterconnectsDeleteCall struct { + s *Service + project string + interconnect string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified interconnect attachment. -func (r *InterconnectAttachmentsService) Delete(project string, region string, interconnectAttachment string) *InterconnectAttachmentsDeleteCall { - c := &InterconnectAttachmentsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified interconnect. +func (r *InterconnectsService) Delete(project string, interconnect string) *InterconnectsDeleteCall { + c := &InterconnectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.interconnectAttachment = interconnectAttachment + c.interconnect = interconnect return c } @@ -60585,7 +64490,7 @@ func (r *InterconnectAttachmentsService) Delete(project string, region string, i // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InterconnectAttachmentsDeleteCall) RequestId(requestId string) *InterconnectAttachmentsDeleteCall { +func (c *InterconnectsDeleteCall) RequestId(requestId string) *InterconnectsDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -60593,7 +64498,7 @@ func (c *InterconnectAttachmentsDeleteCall) RequestId(requestId string) *Interco // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectAttachmentsDeleteCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsDeleteCall { +func (c *InterconnectsDeleteCall) Fields(s ...googleapi.Field) *InterconnectsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -60601,21 +64506,21 @@ func (c *InterconnectAttachmentsDeleteCall) Fields(s ...googleapi.Field) *Interc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectAttachmentsDeleteCall) Context(ctx context.Context) *InterconnectAttachmentsDeleteCall { +func (c *InterconnectsDeleteCall) Context(ctx context.Context) *InterconnectsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectAttachmentsDeleteCall) Header() http.Header { +func (c *InterconnectsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectAttachmentsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -60624,7 +64529,7 @@ func (c *InterconnectAttachmentsDeleteCall) doRequest(alt string) (*http.Respons var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { @@ -60632,21 +64537,20 @@ func (c *InterconnectAttachmentsDeleteCall) doRequest(alt string) (*http.Respons } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "interconnectAttachment": c.interconnectAttachment, + "project": c.project, + "interconnect": c.interconnect, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnectAttachments.delete" call. +// Do executes the "compute.interconnects.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InterconnectAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InterconnectsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -60677,19 +64581,18 @@ func (c *InterconnectAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*O } return ret, nil // { - // "description": "Deletes the specified interconnect attachment.", + // "description": "Deletes the specified interconnect.", // "httpMethod": "DELETE", - // "id": "compute.interconnectAttachments.delete", + // "id": "compute.interconnects.delete", // "parameterOrder": [ // "project", - // "region", - // "interconnectAttachment" + // "interconnect" // ], // "parameters": { - // "interconnectAttachment": { - // "description": "Name of the interconnect attachment to delete.", + // "interconnect": { + // "description": "Name of the interconnect to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -60700,20 +64603,13 @@ func (c *InterconnectAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*O // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + // "path": "{project}/global/interconnects/{interconnect}", // "response": { // "$ref": "Operation" // }, @@ -60725,32 +64621,187 @@ func (c *InterconnectAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*O } -// method id "compute.interconnectAttachments.get": +// method id "compute.interconnects.get": -type InterconnectAttachmentsGetCall struct { - s *Service - project string - region string - interconnectAttachment string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InterconnectsGetCall struct { + s *Service + project string + interconnect string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified interconnect. Get a list of available +// interconnects by making a list() request. +func (r *InterconnectsService) Get(project string, interconnect string) *InterconnectsGetCall { + c := &InterconnectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.interconnect = interconnect + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InterconnectsGetCall) Fields(s ...googleapi.Field) *InterconnectsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InterconnectsGetCall) IfNoneMatch(entityTag string) *InterconnectsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InterconnectsGetCall) Context(ctx context.Context) *InterconnectsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InterconnectsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InterconnectsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "interconnect": c.interconnect, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.interconnects.get" call. +// Exactly one of *Interconnect or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Interconnect.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InterconnectsGetCall) Do(opts ...googleapi.CallOption) (*Interconnect, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Interconnect{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified interconnect. Get a list of available interconnects by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.interconnects.get", + // "parameterOrder": [ + // "project", + // "interconnect" + // ], + // "parameters": { + // "interconnect": { + // "description": "Name of the interconnect to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/interconnects/{interconnect}", + // "response": { + // "$ref": "Interconnect" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.interconnects.getDiagnostics": + +type InterconnectsGetDiagnosticsCall struct { + s *Service + project string + interconnect string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified interconnect attachment. -func (r *InterconnectAttachmentsService) Get(project string, region string, interconnectAttachment string) *InterconnectAttachmentsGetCall { - c := &InterconnectAttachmentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetDiagnostics: Returns the interconnectDiagnostics for the specified +// interconnect. +func (r *InterconnectsService) GetDiagnostics(project string, interconnect string) *InterconnectsGetDiagnosticsCall { + c := &InterconnectsGetDiagnosticsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.interconnectAttachment = interconnectAttachment + c.interconnect = interconnect return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectAttachmentsGetCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsGetCall { +func (c *InterconnectsGetDiagnosticsCall) Fields(s ...googleapi.Field) *InterconnectsGetDiagnosticsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -60760,7 +64811,7 @@ func (c *InterconnectAttachmentsGetCall) Fields(s ...googleapi.Field) *Interconn // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InterconnectAttachmentsGetCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsGetCall { +func (c *InterconnectsGetDiagnosticsCall) IfNoneMatch(entityTag string) *InterconnectsGetDiagnosticsCall { c.ifNoneMatch_ = entityTag return c } @@ -60768,21 +64819,21 @@ func (c *InterconnectAttachmentsGetCall) IfNoneMatch(entityTag string) *Intercon // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectAttachmentsGetCall) Context(ctx context.Context) *InterconnectAttachmentsGetCall { +func (c *InterconnectsGetDiagnosticsCall) Context(ctx context.Context) *InterconnectsGetDiagnosticsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectAttachmentsGetCall) Header() http.Header { +func (c *InterconnectsGetDiagnosticsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectAttachmentsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectsGetDiagnosticsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -60794,7 +64845,7 @@ func (c *InterconnectAttachmentsGetCall) doRequest(alt string) (*http.Response, var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}/getDiagnostics") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -60802,21 +64853,21 @@ func (c *InterconnectAttachmentsGetCall) doRequest(alt string) (*http.Response, } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "interconnectAttachment": c.interconnectAttachment, + "project": c.project, + "interconnect": c.interconnect, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnectAttachments.get" call. -// Exactly one of *InterconnectAttachment or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InterconnectAttachment.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InterconnectAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachment, error) { +// Do executes the "compute.interconnects.getDiagnostics" call. +// Exactly one of *InterconnectsGetDiagnosticsResponse or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *InterconnectsGetDiagnosticsResponse.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *InterconnectsGetDiagnosticsCall) Do(opts ...googleapi.CallOption) (*InterconnectsGetDiagnosticsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -60835,7 +64886,7 @@ func (c *InterconnectAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*Inte if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InterconnectAttachment{ + ret := &InterconnectsGetDiagnosticsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -60847,19 +64898,18 @@ func (c *InterconnectAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*Inte } return ret, nil // { - // "description": "Returns the specified interconnect attachment.", + // "description": "Returns the interconnectDiagnostics for the specified interconnect.", // "httpMethod": "GET", - // "id": "compute.interconnectAttachments.get", + // "id": "compute.interconnects.getDiagnostics", // "parameterOrder": [ // "project", - // "region", - // "interconnectAttachment" + // "interconnect" // ], // "parameters": { - // "interconnectAttachment": { - // "description": "Name of the interconnect attachment to return.", + // "interconnect": { + // "description": "Name of the interconnect resource to query.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -60869,18 +64919,11 @@ func (c *InterconnectAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*Inte // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + // "path": "{project}/global/interconnects/{interconnect}/getDiagnostics", // "response": { - // "$ref": "InterconnectAttachment" + // "$ref": "InterconnectsGetDiagnosticsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -60891,25 +64934,23 @@ func (c *InterconnectAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*Inte } -// method id "compute.interconnectAttachments.insert": +// method id "compute.interconnects.insert": -type InterconnectAttachmentsInsertCall struct { - s *Service - project string - region string - interconnectattachment *InterconnectAttachment - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InterconnectsInsertCall struct { + s *Service + project string + interconnect *Interconnect + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates an InterconnectAttachment in the specified project -// using the data included in the request. -func (r *InterconnectAttachmentsService) Insert(project string, region string, interconnectattachment *InterconnectAttachment) *InterconnectAttachmentsInsertCall { - c := &InterconnectAttachmentsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a Interconnect in the specified project using the +// data included in the request. +func (r *InterconnectsService) Insert(project string, interconnect *Interconnect) *InterconnectsInsertCall { + c := &InterconnectsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.interconnectattachment = interconnectattachment + c.interconnect = interconnect return c } @@ -60927,7 +64968,7 @@ func (r *InterconnectAttachmentsService) Insert(project string, region string, i // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InterconnectAttachmentsInsertCall) RequestId(requestId string) *InterconnectAttachmentsInsertCall { +func (c *InterconnectsInsertCall) RequestId(requestId string) *InterconnectsInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -60935,7 +64976,7 @@ func (c *InterconnectAttachmentsInsertCall) RequestId(requestId string) *Interco // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectAttachmentsInsertCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsInsertCall { +func (c *InterconnectsInsertCall) Fields(s ...googleapi.Field) *InterconnectsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -60943,35 +64984,35 @@ func (c *InterconnectAttachmentsInsertCall) Fields(s ...googleapi.Field) *Interc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectAttachmentsInsertCall) Context(ctx context.Context) *InterconnectAttachmentsInsertCall { +func (c *InterconnectsInsertCall) Context(ctx context.Context) *InterconnectsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectAttachmentsInsertCall) Header() http.Header { +func (c *InterconnectsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectAttachmentsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnectattachment) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnect) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -60980,19 +65021,18 @@ func (c *InterconnectAttachmentsInsertCall) doRequest(alt string) (*http.Respons req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnectAttachments.insert" call. +// Do executes the "compute.interconnects.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InterconnectAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InterconnectsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -61023,12 +65063,11 @@ func (c *InterconnectAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*O } return ret, nil // { - // "description": "Creates an InterconnectAttachment in the specified project using the data included in the request.", + // "description": "Creates a Interconnect in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.interconnectAttachments.insert", + // "id": "compute.interconnects.insert", // "parameterOrder": [ - // "project", - // "region" + // "project" // ], // "parameters": { // "project": { @@ -61038,22 +65077,15 @@ func (c *InterconnectAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*O // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "{project}/regions/{region}/interconnectAttachments", + // "path": "{project}/global/interconnects", // "request": { - // "$ref": "InterconnectAttachment" + // "$ref": "Interconnect" // }, // "response": { // "$ref": "Operation" @@ -61066,24 +65098,22 @@ func (c *InterconnectAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*O } -// method id "compute.interconnectAttachments.list": +// method id "compute.interconnects.list": -type InterconnectAttachmentsListCall struct { +type InterconnectsListCall struct { s *Service project string - region string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves the list of interconnect attachments contained within -// the specified region. -func (r *InterconnectAttachmentsService) List(project string, region string) *InterconnectAttachmentsListCall { - c := &InterconnectAttachmentsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of interconnect available to the specified +// project. +func (r *InterconnectsService) List(project string) *InterconnectsListCall { + c := &InterconnectsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region return c } @@ -61109,7 +65139,7 @@ func (r *InterconnectAttachmentsService) List(project string, region string) *In // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *InterconnectAttachmentsListCall) Filter(filter string) *InterconnectAttachmentsListCall { +func (c *InterconnectsListCall) Filter(filter string) *InterconnectsListCall { c.urlParams_.Set("filter", filter) return c } @@ -61120,7 +65150,7 @@ func (c *InterconnectAttachmentsListCall) Filter(filter string) *InterconnectAtt // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *InterconnectAttachmentsListCall) MaxResults(maxResults int64) *InterconnectAttachmentsListCall { +func (c *InterconnectsListCall) MaxResults(maxResults int64) *InterconnectsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -61137,7 +65167,7 @@ func (c *InterconnectAttachmentsListCall) MaxResults(maxResults int64) *Intercon // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *InterconnectAttachmentsListCall) OrderBy(orderBy string) *InterconnectAttachmentsListCall { +func (c *InterconnectsListCall) OrderBy(orderBy string) *InterconnectsListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -61145,7 +65175,7 @@ func (c *InterconnectAttachmentsListCall) OrderBy(orderBy string) *InterconnectA // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *InterconnectAttachmentsListCall) PageToken(pageToken string) *InterconnectAttachmentsListCall { +func (c *InterconnectsListCall) PageToken(pageToken string) *InterconnectsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -61153,7 +65183,7 @@ func (c *InterconnectAttachmentsListCall) PageToken(pageToken string) *Interconn // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectAttachmentsListCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsListCall { +func (c *InterconnectsListCall) Fields(s ...googleapi.Field) *InterconnectsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -61163,7 +65193,7 @@ func (c *InterconnectAttachmentsListCall) Fields(s ...googleapi.Field) *Intercon // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InterconnectAttachmentsListCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsListCall { +func (c *InterconnectsListCall) IfNoneMatch(entityTag string) *InterconnectsListCall { c.ifNoneMatch_ = entityTag return c } @@ -61171,21 +65201,21 @@ func (c *InterconnectAttachmentsListCall) IfNoneMatch(entityTag string) *Interco // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectAttachmentsListCall) Context(ctx context.Context) *InterconnectAttachmentsListCall { +func (c *InterconnectsListCall) Context(ctx context.Context) *InterconnectsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectAttachmentsListCall) Header() http.Header { +func (c *InterconnectsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectAttachmentsListCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -61197,7 +65227,7 @@ func (c *InterconnectAttachmentsListCall) doRequest(alt string) (*http.Response, var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -61206,19 +65236,18 @@ func (c *InterconnectAttachmentsListCall) doRequest(alt string) (*http.Response, req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnectAttachments.list" call. -// Exactly one of *InterconnectAttachmentList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *InterconnectAttachmentList.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.interconnects.list" call. +// Exactly one of *InterconnectList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InterconnectList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachmentList, error) { +func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -61237,7 +65266,7 @@ func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*Int if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InterconnectAttachmentList{ + ret := &InterconnectList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -61249,12 +65278,11 @@ func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*Int } return ret, nil // { - // "description": "Retrieves the list of interconnect attachments contained within the specified region.", + // "description": "Retrieves the list of interconnect available to the specified project.", // "httpMethod": "GET", - // "id": "compute.interconnectAttachments.list", + // "id": "compute.interconnects.list", // "parameterOrder": [ - // "project", - // "region" + // "project" // ], // "parameters": { // "filter": { @@ -61286,18 +65314,11 @@ func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*Int // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/interconnectAttachments", + // "path": "{project}/global/interconnects", // "response": { - // "$ref": "InterconnectAttachmentList" + // "$ref": "InterconnectList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -61311,7 +65332,7 @@ func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*Int // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *InterconnectAttachmentsListCall) Pages(ctx context.Context, f func(*InterconnectAttachmentList) error) error { +func (c *InterconnectsListCall) Pages(ctx context.Context, f func(*InterconnectList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -61329,28 +65350,26 @@ func (c *InterconnectAttachmentsListCall) Pages(ctx context.Context, f func(*Int } } -// method id "compute.interconnectAttachments.patch": +// method id "compute.interconnects.patch": -type InterconnectAttachmentsPatchCall struct { - s *Service - project string - region string - interconnectAttachment string - interconnectattachment *InterconnectAttachment - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InterconnectsPatchCall struct { + s *Service + project string + interconnect string + interconnect2 *Interconnect + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates the specified interconnect attachment with the data -// included in the request. This method supports PATCH semantics and -// uses the JSON merge patch format and processing rules. -func (r *InterconnectAttachmentsService) Patch(project string, region string, interconnectAttachment string, interconnectattachment *InterconnectAttachment) *InterconnectAttachmentsPatchCall { - c := &InterconnectAttachmentsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates the specified interconnect with the data included in +// the request. This method supports PATCH semantics and uses the JSON +// merge patch format and processing rules. +func (r *InterconnectsService) Patch(project string, interconnect string, interconnect2 *Interconnect) *InterconnectsPatchCall { + c := &InterconnectsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.interconnectAttachment = interconnectAttachment - c.interconnectattachment = interconnectattachment + c.interconnect = interconnect + c.interconnect2 = interconnect2 return c } @@ -61368,7 +65387,7 @@ func (r *InterconnectAttachmentsService) Patch(project string, region string, in // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InterconnectAttachmentsPatchCall) RequestId(requestId string) *InterconnectAttachmentsPatchCall { +func (c *InterconnectsPatchCall) RequestId(requestId string) *InterconnectsPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -61376,7 +65395,7 @@ func (c *InterconnectAttachmentsPatchCall) RequestId(requestId string) *Intercon // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectAttachmentsPatchCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsPatchCall { +func (c *InterconnectsPatchCall) Fields(s ...googleapi.Field) *InterconnectsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -61384,35 +65403,35 @@ func (c *InterconnectAttachmentsPatchCall) Fields(s ...googleapi.Field) *Interco // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectAttachmentsPatchCall) Context(ctx context.Context) *InterconnectAttachmentsPatchCall { +func (c *InterconnectsPatchCall) Context(ctx context.Context) *InterconnectsPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectAttachmentsPatchCall) Header() http.Header { +func (c *InterconnectsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectAttachmentsPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnectattachment) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnect2) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("PATCH", urls, body) if err != nil { @@ -61420,21 +65439,20 @@ func (c *InterconnectAttachmentsPatchCall) doRequest(alt string) (*http.Response } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "interconnectAttachment": c.interconnectAttachment, + "project": c.project, + "interconnect": c.interconnect, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnectAttachments.patch" call. +// Do executes the "compute.interconnects.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InterconnectAttachmentsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InterconnectsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -61465,19 +65483,18 @@ func (c *InterconnectAttachmentsPatchCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Updates the specified interconnect attachment with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "description": "Updates the specified interconnect with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", // "httpMethod": "PATCH", - // "id": "compute.interconnectAttachments.patch", + // "id": "compute.interconnects.patch", // "parameterOrder": [ // "project", - // "region", - // "interconnectAttachment" + // "interconnect" // ], // "parameters": { - // "interconnectAttachment": { - // "description": "Name of the interconnect attachment to patch.", + // "interconnect": { + // "description": "Name of the interconnect to update.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -61488,22 +65505,15 @@ func (c *InterconnectAttachmentsPatchCall) Do(opts ...googleapi.CallOption) (*Op // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + // "path": "{project}/global/interconnects/{interconnect}", // "request": { - // "$ref": "InterconnectAttachment" + // "$ref": "Interconnect" // }, // "response": { // "$ref": "Operation" @@ -61516,32 +65526,31 @@ func (c *InterconnectAttachmentsPatchCall) Do(opts ...googleapi.CallOption) (*Op } -// method id "compute.interconnectLocations.get": +// method id "compute.licenseCodes.get": -type InterconnectLocationsGetCall struct { - s *Service - project string - interconnectLocation string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type LicenseCodesGetCall struct { + s *Service + project string + licenseCode string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the details for the specified interconnect location. -// Gets a list of available interconnect locations by making a list() -// request. -func (r *InterconnectLocationsService) Get(project string, interconnectLocation string) *InterconnectLocationsGetCall { - c := &InterconnectLocationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Return a specified license code. License codes are mirrored +// across all projects that have permissions to read the License Code. +func (r *LicenseCodesService) Get(project string, licenseCode string) *LicenseCodesGetCall { + c := &LicenseCodesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.interconnectLocation = interconnectLocation + c.licenseCode = licenseCode return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectLocationsGetCall) Fields(s ...googleapi.Field) *InterconnectLocationsGetCall { +func (c *LicenseCodesGetCall) Fields(s ...googleapi.Field) *LicenseCodesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -61551,7 +65560,7 @@ func (c *InterconnectLocationsGetCall) Fields(s ...googleapi.Field) *Interconnec // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InterconnectLocationsGetCall) IfNoneMatch(entityTag string) *InterconnectLocationsGetCall { +func (c *LicenseCodesGetCall) IfNoneMatch(entityTag string) *LicenseCodesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -61559,21 +65568,21 @@ func (c *InterconnectLocationsGetCall) IfNoneMatch(entityTag string) *Interconne // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectLocationsGetCall) Context(ctx context.Context) *InterconnectLocationsGetCall { +func (c *LicenseCodesGetCall) Context(ctx context.Context) *LicenseCodesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectLocationsGetCall) Header() http.Header { +func (c *LicenseCodesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectLocationsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *LicenseCodesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -61585,7 +65594,7 @@ func (c *InterconnectLocationsGetCall) doRequest(alt string) (*http.Response, er var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnectLocations/{interconnectLocation}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenseCodes/{licenseCode}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -61593,20 +65602,20 @@ func (c *InterconnectLocationsGetCall) doRequest(alt string) (*http.Response, er } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "interconnectLocation": c.interconnectLocation, + "project": c.project, + "licenseCode": c.licenseCode, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnectLocations.get" call. -// Exactly one of *InterconnectLocation or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InterconnectLocation.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InterconnectLocationsGetCall) Do(opts ...googleapi.CallOption) (*InterconnectLocation, error) { +// Do executes the "compute.licenseCodes.get" call. +// Exactly one of *LicenseCode or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *LicenseCode.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *LicenseCodesGetCall) Do(opts ...googleapi.CallOption) (*LicenseCode, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -61625,7 +65634,7 @@ func (c *InterconnectLocationsGetCall) Do(opts ...googleapi.CallOption) (*Interc if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InterconnectLocation{ + ret := &LicenseCode{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -61637,18 +65646,18 @@ func (c *InterconnectLocationsGetCall) Do(opts ...googleapi.CallOption) (*Interc } return ret, nil // { - // "description": "Returns the details for the specified interconnect location. Gets a list of available interconnect locations by making a list() request.", + // "description": "Return a specified license code. License codes are mirrored across all projects that have permissions to read the License Code.", // "httpMethod": "GET", - // "id": "compute.interconnectLocations.get", + // "id": "compute.licenseCodes.get", // "parameterOrder": [ // "project", - // "interconnectLocation" + // "licenseCode" // ], // "parameters": { - // "interconnectLocation": { - // "description": "Name of the interconnect location to return.", + // "licenseCode": { + // "description": "Number corresponding to the License code resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[0-9]{0,61}?", // "required": true, // "type": "string" // }, @@ -61660,9 +65669,9 @@ func (c *InterconnectLocationsGetCall) Do(opts ...googleapi.CallOption) (*Interc // "type": "string" // } // }, - // "path": "{project}/global/interconnectLocations/{interconnectLocation}", + // "path": "{project}/global/licenseCodes/{licenseCode}", // "response": { - // "$ref": "InterconnectLocation" + // "$ref": "LicenseCode" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -61673,156 +65682,89 @@ func (c *InterconnectLocationsGetCall) Do(opts ...googleapi.CallOption) (*Interc } -// method id "compute.interconnectLocations.list": +// method id "compute.licenseCodes.testIamPermissions": -type InterconnectLocationsListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type LicenseCodesTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves the list of interconnect locations available to the -// specified project. -func (r *InterconnectLocationsService) List(project string) *InterconnectLocationsListCall { - c := &InterconnectLocationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *LicenseCodesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *LicenseCodesTestIamPermissionsCall { + c := &LicenseCodesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *InterconnectLocationsListCall) Filter(filter string) *InterconnectLocationsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InterconnectLocationsListCall) MaxResults(maxResults int64) *InterconnectLocationsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *InterconnectLocationsListCall) OrderBy(orderBy string) *InterconnectLocationsListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InterconnectLocationsListCall) PageToken(pageToken string) *InterconnectLocationsListCall { - c.urlParams_.Set("pageToken", pageToken) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectLocationsListCall) Fields(s ...googleapi.Field) *InterconnectLocationsListCall { +func (c *LicenseCodesTestIamPermissionsCall) Fields(s ...googleapi.Field) *LicenseCodesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InterconnectLocationsListCall) IfNoneMatch(entityTag string) *InterconnectLocationsListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectLocationsListCall) Context(ctx context.Context) *InterconnectLocationsListCall { +func (c *LicenseCodesTestIamPermissionsCall) Context(ctx context.Context) *LicenseCodesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectLocationsListCall) Header() http.Header { +func (c *LicenseCodesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectLocationsListCall) doRequest(alt string) (*http.Response, error) { +func (c *LicenseCodesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnectLocations") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenseCodes/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnectLocations.list" call. -// Exactly one of *InterconnectLocationList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *InterconnectLocationList.ServerResponse.Header or (if a response was +// Do executes the "compute.licenseCodes.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*InterconnectLocationList, error) { +func (c *LicenseCodesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -61841,7 +65783,7 @@ func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*Inter if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InterconnectLocationList{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -61853,47 +65795,35 @@ func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*Inter } return ret, nil // { - // "description": "Retrieves the list of interconnect locations available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.interconnectLocations.list", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.licenseCodes.testIamPermissions", // "parameterOrder": [ - // "project" + // "project", + // "resource" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/interconnectLocations", + // "path": "{project}/global/licenseCodes/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "InterconnectLocationList" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -61904,43 +65834,22 @@ func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*Inter } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InterconnectLocationsListCall) Pages(ctx context.Context, f func(*InterconnectLocationList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.interconnects.delete": +// method id "compute.licenses.delete": -type InterconnectsDeleteCall struct { - s *Service - project string - interconnect string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type LicensesDeleteCall struct { + s *Service + project string + license string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified interconnect. -func (r *InterconnectsService) Delete(project string, interconnect string) *InterconnectsDeleteCall { - c := &InterconnectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified license. +func (r *LicensesService) Delete(project string, license string) *LicensesDeleteCall { + c := &LicensesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.interconnect = interconnect + c.license = license return c } @@ -61958,7 +65867,7 @@ func (r *InterconnectsService) Delete(project string, interconnect string) *Inte // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InterconnectsDeleteCall) RequestId(requestId string) *InterconnectsDeleteCall { +func (c *LicensesDeleteCall) RequestId(requestId string) *LicensesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -61966,7 +65875,7 @@ func (c *InterconnectsDeleteCall) RequestId(requestId string) *InterconnectsDele // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectsDeleteCall) Fields(s ...googleapi.Field) *InterconnectsDeleteCall { +func (c *LicensesDeleteCall) Fields(s ...googleapi.Field) *LicensesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -61974,21 +65883,21 @@ func (c *InterconnectsDeleteCall) Fields(s ...googleapi.Field) *InterconnectsDel // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectsDeleteCall) Context(ctx context.Context) *InterconnectsDeleteCall { +func (c *LicensesDeleteCall) Context(ctx context.Context) *LicensesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectsDeleteCall) Header() http.Header { +func (c *LicensesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *LicensesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -61997,7 +65906,7 @@ func (c *InterconnectsDeleteCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{license}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { @@ -62005,20 +65914,20 @@ func (c *InterconnectsDeleteCall) doRequest(alt string) (*http.Response, error) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "interconnect": c.interconnect, + "project": c.project, + "license": c.license, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnects.delete" call. +// Do executes the "compute.licenses.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InterconnectsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *LicensesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -62049,18 +65958,18 @@ func (c *InterconnectsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Deletes the specified interconnect.", + // "description": "Deletes the specified license.", // "httpMethod": "DELETE", - // "id": "compute.interconnects.delete", + // "id": "compute.licenses.delete", // "parameterOrder": [ // "project", - // "interconnect" + // "license" // ], // "parameters": { - // "interconnect": { - // "description": "Name of the interconnect to delete.", + // "license": { + // "description": "Name of the license resource to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -62077,7 +65986,7 @@ func (c *InterconnectsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // } // }, - // "path": "{project}/global/interconnects/{interconnect}", + // "path": "{project}/global/licenses/{license}", // "response": { // "$ref": "Operation" // }, @@ -62089,31 +65998,31 @@ func (c *InterconnectsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.interconnects.get": +// method id "compute.licenses.get": -type InterconnectsGetCall struct { +type LicensesGetCall struct { s *Service project string - interconnect string + license string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Returns the specified interconnect. Get a list of available -// interconnects by making a list() request. -func (r *InterconnectsService) Get(project string, interconnect string) *InterconnectsGetCall { - c := &InterconnectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified License resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/licenses/get +func (r *LicensesService) Get(project string, license string) *LicensesGetCall { + c := &LicensesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.interconnect = interconnect + c.license = license return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectsGetCall) Fields(s ...googleapi.Field) *InterconnectsGetCall { +func (c *LicensesGetCall) Fields(s ...googleapi.Field) *LicensesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -62123,7 +66032,7 @@ func (c *InterconnectsGetCall) Fields(s ...googleapi.Field) *InterconnectsGetCal // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InterconnectsGetCall) IfNoneMatch(entityTag string) *InterconnectsGetCall { +func (c *LicensesGetCall) IfNoneMatch(entityTag string) *LicensesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -62131,21 +66040,21 @@ func (c *InterconnectsGetCall) IfNoneMatch(entityTag string) *InterconnectsGetCa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectsGetCall) Context(ctx context.Context) *InterconnectsGetCall { +func (c *LicensesGetCall) Context(ctx context.Context) *LicensesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectsGetCall) Header() http.Header { +func (c *LicensesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *LicensesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -62157,7 +66066,7 @@ func (c *InterconnectsGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{license}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -62165,20 +66074,20 @@ func (c *InterconnectsGetCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "interconnect": c.interconnect, + "project": c.project, + "license": c.license, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnects.get" call. -// Exactly one of *Interconnect or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Interconnect.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InterconnectsGetCall) Do(opts ...googleapi.CallOption) (*Interconnect, error) { +// Do executes the "compute.licenses.get" call. +// Exactly one of *License or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *License.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -62197,7 +66106,7 @@ func (c *InterconnectsGetCall) Do(opts ...googleapi.CallOption) (*Interconnect, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Interconnect{ + ret := &License{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -62209,18 +66118,18 @@ func (c *InterconnectsGetCall) Do(opts ...googleapi.CallOption) (*Interconnect, } return ret, nil // { - // "description": "Returns the specified interconnect. Get a list of available interconnects by making a list() request.", + // "description": "Returns the specified License resource.", // "httpMethod": "GET", - // "id": "compute.interconnects.get", + // "id": "compute.licenses.get", // "parameterOrder": [ // "project", - // "interconnect" + // "license" // ], // "parameters": { - // "interconnect": { - // "description": "Name of the interconnect to return.", + // "license": { + // "description": "Name of the License resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -62232,9 +66141,9 @@ func (c *InterconnectsGetCall) Do(opts ...googleapi.CallOption) (*Interconnect, // "type": "string" // } // }, - // "path": "{project}/global/interconnects/{interconnect}", + // "path": "{project}/global/licenses/{license}", // "response": { - // "$ref": "Interconnect" + // "$ref": "License" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -62245,31 +66154,31 @@ func (c *InterconnectsGetCall) Do(opts ...googleapi.CallOption) (*Interconnect, } -// method id "compute.interconnects.getDiagnostics": +// method id "compute.licenses.getIamPolicy": -type InterconnectsGetDiagnosticsCall struct { +type LicensesGetIamPolicyCall struct { s *Service project string - interconnect string + resource string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// GetDiagnostics: Returns the interconnectDiagnostics for the specified -// interconnect. -func (r *InterconnectsService) GetDiagnostics(project string, interconnect string) *InterconnectsGetDiagnosticsCall { - c := &InterconnectsGetDiagnosticsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. +func (r *LicensesService) GetIamPolicy(project string, resource string) *LicensesGetIamPolicyCall { + c := &LicensesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.interconnect = interconnect + c.resource = resource return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectsGetDiagnosticsCall) Fields(s ...googleapi.Field) *InterconnectsGetDiagnosticsCall { +func (c *LicensesGetIamPolicyCall) Fields(s ...googleapi.Field) *LicensesGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -62279,7 +66188,7 @@ func (c *InterconnectsGetDiagnosticsCall) Fields(s ...googleapi.Field) *Intercon // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InterconnectsGetDiagnosticsCall) IfNoneMatch(entityTag string) *InterconnectsGetDiagnosticsCall { +func (c *LicensesGetIamPolicyCall) IfNoneMatch(entityTag string) *LicensesGetIamPolicyCall { c.ifNoneMatch_ = entityTag return c } @@ -62287,21 +66196,21 @@ func (c *InterconnectsGetDiagnosticsCall) IfNoneMatch(entityTag string) *Interco // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectsGetDiagnosticsCall) Context(ctx context.Context) *InterconnectsGetDiagnosticsCall { +func (c *LicensesGetIamPolicyCall) Context(ctx context.Context) *LicensesGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectsGetDiagnosticsCall) Header() http.Header { +func (c *LicensesGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectsGetDiagnosticsCall) doRequest(alt string) (*http.Response, error) { +func (c *LicensesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -62313,7 +66222,7 @@ func (c *InterconnectsGetDiagnosticsCall) doRequest(alt string) (*http.Response, var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}/getDiagnostics") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -62321,21 +66230,20 @@ func (c *InterconnectsGetDiagnosticsCall) doRequest(alt string) (*http.Response, } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "interconnect": c.interconnect, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnects.getDiagnostics" call. -// Exactly one of *InterconnectsGetDiagnosticsResponse or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *InterconnectsGetDiagnosticsResponse.ServerResponse.Header or -// (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *InterconnectsGetDiagnosticsCall) Do(opts ...googleapi.CallOption) (*InterconnectsGetDiagnosticsResponse, error) { +// Do executes the "compute.licenses.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *LicensesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -62354,7 +66262,7 @@ func (c *InterconnectsGetDiagnosticsCall) Do(opts ...googleapi.CallOption) (*Int if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InterconnectsGetDiagnosticsResponse{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -62366,32 +66274,32 @@ func (c *InterconnectsGetDiagnosticsCall) Do(opts ...googleapi.CallOption) (*Int } return ret, nil // { - // "description": "Returns the interconnectDiagnostics for the specified interconnect.", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", // "httpMethod": "GET", - // "id": "compute.interconnects.getDiagnostics", + // "id": "compute.licenses.getIamPolicy", // "parameterOrder": [ // "project", - // "interconnect" + // "resource" // ], // "parameters": { - // "interconnect": { - // "description": "Name of the interconnect resource to query.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "resource": { + // "description": "Name or id of the resource for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/interconnects/{interconnect}/getDiagnostics", + // "path": "{project}/global/licenses/{resource}/getIamPolicy", // "response": { - // "$ref": "InterconnectsGetDiagnosticsResponse" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -62402,23 +66310,22 @@ func (c *InterconnectsGetDiagnosticsCall) Do(opts ...googleapi.CallOption) (*Int } -// method id "compute.interconnects.insert": +// method id "compute.licenses.insert": -type InterconnectsInsertCall struct { - s *Service - project string - interconnect *Interconnect - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type LicensesInsertCall struct { + s *Service + project string + license *License + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a Interconnect in the specified project using the -// data included in the request. -func (r *InterconnectsService) Insert(project string, interconnect *Interconnect) *InterconnectsInsertCall { - c := &InterconnectsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Create a License resource in the specified project. +func (r *LicensesService) Insert(project string, license *License) *LicensesInsertCall { + c := &LicensesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.interconnect = interconnect + c.license = license return c } @@ -62436,7 +66343,7 @@ func (r *InterconnectsService) Insert(project string, interconnect *Interconnect // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InterconnectsInsertCall) RequestId(requestId string) *InterconnectsInsertCall { +func (c *LicensesInsertCall) RequestId(requestId string) *LicensesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -62444,7 +66351,7 @@ func (c *InterconnectsInsertCall) RequestId(requestId string) *InterconnectsInse // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectsInsertCall) Fields(s ...googleapi.Field) *InterconnectsInsertCall { +func (c *LicensesInsertCall) Fields(s ...googleapi.Field) *LicensesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -62452,35 +66359,35 @@ func (c *InterconnectsInsertCall) Fields(s ...googleapi.Field) *InterconnectsIns // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectsInsertCall) Context(ctx context.Context) *InterconnectsInsertCall { +func (c *LicensesInsertCall) Context(ctx context.Context) *LicensesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectsInsertCall) Header() http.Header { +func (c *LicensesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *LicensesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnect) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.license) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -62493,14 +66400,14 @@ func (c *InterconnectsInsertCall) doRequest(alt string) (*http.Response, error) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnects.insert" call. +// Do executes the "compute.licenses.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InterconnectsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *LicensesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -62531,9 +66438,9 @@ func (c *InterconnectsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Creates a Interconnect in the specified project using the data included in the request.", + // "description": "Create a License resource in the specified project.", // "httpMethod": "POST", - // "id": "compute.interconnects.insert", + // "id": "compute.licenses.insert", // "parameterOrder": [ // "project" // ], @@ -62551,24 +66458,27 @@ func (c *InterconnectsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // } // }, - // "path": "{project}/global/interconnects", + // "path": "{project}/global/licenses", // "request": { - // "$ref": "Interconnect" + // "$ref": "License" // }, // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } -// method id "compute.interconnects.list": +// method id "compute.licenses.list": -type InterconnectsListCall struct { +type LicensesListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -62577,10 +66487,14 @@ type InterconnectsListCall struct { header_ http.Header } -// List: Retrieves the list of interconnect available to the specified -// project. -func (r *InterconnectsService) List(project string) *InterconnectsListCall { - c := &InterconnectsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of licenses available in the specified +// project. This method does not get any licenses that belong to other +// projects, including licenses attached to publicly-available images, +// like Debian 9. If you want to get a list of publicly-available +// licenses, use this method to make a request to the respective image +// project, such as debian-cloud or windows-cloud. +func (r *LicensesService) List(project string) *LicensesListCall { + c := &LicensesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -62607,7 +66521,7 @@ func (r *InterconnectsService) List(project string) *InterconnectsListCall { // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *InterconnectsListCall) Filter(filter string) *InterconnectsListCall { +func (c *LicensesListCall) Filter(filter string) *LicensesListCall { c.urlParams_.Set("filter", filter) return c } @@ -62618,7 +66532,7 @@ func (c *InterconnectsListCall) Filter(filter string) *InterconnectsListCall { // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *InterconnectsListCall) MaxResults(maxResults int64) *InterconnectsListCall { +func (c *LicensesListCall) MaxResults(maxResults int64) *LicensesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -62635,7 +66549,7 @@ func (c *InterconnectsListCall) MaxResults(maxResults int64) *InterconnectsListC // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *InterconnectsListCall) OrderBy(orderBy string) *InterconnectsListCall { +func (c *LicensesListCall) OrderBy(orderBy string) *LicensesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -62643,7 +66557,7 @@ func (c *InterconnectsListCall) OrderBy(orderBy string) *InterconnectsListCall { // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *InterconnectsListCall) PageToken(pageToken string) *InterconnectsListCall { +func (c *LicensesListCall) PageToken(pageToken string) *LicensesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -62651,7 +66565,7 @@ func (c *InterconnectsListCall) PageToken(pageToken string) *InterconnectsListCa // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectsListCall) Fields(s ...googleapi.Field) *InterconnectsListCall { +func (c *LicensesListCall) Fields(s ...googleapi.Field) *LicensesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -62661,7 +66575,7 @@ func (c *InterconnectsListCall) Fields(s ...googleapi.Field) *InterconnectsListC // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InterconnectsListCall) IfNoneMatch(entityTag string) *InterconnectsListCall { +func (c *LicensesListCall) IfNoneMatch(entityTag string) *LicensesListCall { c.ifNoneMatch_ = entityTag return c } @@ -62669,21 +66583,21 @@ func (c *InterconnectsListCall) IfNoneMatch(entityTag string) *InterconnectsList // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectsListCall) Context(ctx context.Context) *InterconnectsListCall { +func (c *LicensesListCall) Context(ctx context.Context) *LicensesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectsListCall) Header() http.Header { +func (c *LicensesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectsListCall) doRequest(alt string) (*http.Response, error) { +func (c *LicensesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -62695,7 +66609,7 @@ func (c *InterconnectsListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -62708,14 +66622,14 @@ func (c *InterconnectsListCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnects.list" call. -// Exactly one of *InterconnectList or error will be non-nil. Any +// Do executes the "compute.licenses.list" call. +// Exactly one of *LicensesListResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *InterconnectList.ServerResponse.Header or (if a response was +// *LicensesListResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectList, error) { +func (c *LicensesListCall) Do(opts ...googleapi.CallOption) (*LicensesListResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -62734,7 +66648,7 @@ func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectL if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InterconnectList{ + ret := &LicensesListResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -62746,9 +66660,9 @@ func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectL } return ret, nil // { - // "description": "Retrieves the list of interconnect available to the specified project.", + // "description": "Retrieves the list of licenses available in the specified project. This method does not get any licenses that belong to other projects, including licenses attached to publicly-available images, like Debian 9. If you want to get a list of publicly-available licenses, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.", // "httpMethod": "GET", - // "id": "compute.interconnects.list", + // "id": "compute.licenses.list", // "parameterOrder": [ // "project" // ], @@ -62784,9 +66698,9 @@ func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectL // "type": "string" // } // }, - // "path": "{project}/global/interconnects", + // "path": "{project}/global/licenses", // "response": { - // "$ref": "InterconnectList" + // "$ref": "LicensesListResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -62800,7 +66714,7 @@ func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectL // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *InterconnectsListCall) Pages(ctx context.Context, f func(*InterconnectList) error) error { +func (c *LicensesListCall) Pages(ctx context.Context, f func(*LicensesListResponse) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -62818,52 +66732,183 @@ func (c *InterconnectsListCall) Pages(ctx context.Context, f func(*InterconnectL } } -// method id "compute.interconnects.patch": +// method id "compute.licenses.setIamPolicy": + +type LicensesSetIamPolicyCall struct { + s *Service + project string + resource string + globalsetpolicyrequest *GlobalSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +func (r *LicensesService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *LicensesSetIamPolicyCall { + c := &LicensesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource + c.globalsetpolicyrequest = globalsetpolicyrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *LicensesSetIamPolicyCall) Fields(s ...googleapi.Field) *LicensesSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *LicensesSetIamPolicyCall) Context(ctx context.Context) *LicensesSetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *LicensesSetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *LicensesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetpolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{resource}/setIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.licenses.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *LicensesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "httpMethod": "POST", + // "id": "compute.licenses.setIamPolicy", + // "parameterOrder": [ + // "project", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/licenses/{resource}/setIamPolicy", + // "request": { + // "$ref": "GlobalSetPolicyRequest" + // }, + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.licenses.testIamPermissions": -type InterconnectsPatchCall struct { - s *Service - project string - interconnect string - interconnect2 *Interconnect - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type LicensesTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates the specified interconnect with the data included in -// the request. This method supports PATCH semantics and uses the JSON -// merge patch format and processing rules. -func (r *InterconnectsService) Patch(project string, interconnect string, interconnect2 *Interconnect) *InterconnectsPatchCall { - c := &InterconnectsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *LicensesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *LicensesTestIamPermissionsCall { + c := &LicensesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.interconnect = interconnect - c.interconnect2 = interconnect2 - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InterconnectsPatchCall) RequestId(requestId string) *InterconnectsPatchCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectsPatchCall) Fields(s ...googleapi.Field) *InterconnectsPatchCall { +func (c *LicensesTestIamPermissionsCall) Fields(s ...googleapi.Field) *LicensesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -62871,56 +66916,56 @@ func (c *InterconnectsPatchCall) Fields(s ...googleapi.Field) *InterconnectsPatc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectsPatchCall) Context(ctx context.Context) *InterconnectsPatchCall { +func (c *LicensesTestIamPermissionsCall) Context(ctx context.Context) *LicensesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectsPatchCall) Header() http.Header { +func (c *LicensesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectsPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *LicensesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnect2) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "interconnect": c.interconnect, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnects.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InterconnectsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.licenses.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *LicensesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -62939,7 +66984,7 @@ func (c *InterconnectsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, e if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -62951,21 +66996,14 @@ func (c *InterconnectsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Updates the specified interconnect with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.interconnects.patch", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.licenses.testIamPermissions", // "parameterOrder": [ // "project", - // "interconnect" + // "resource" // ], // "parameters": { - // "interconnect": { - // "description": "Name of the interconnect to update.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -62973,52 +67011,116 @@ func (c *InterconnectsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, e // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/interconnects/{interconnect}", + // "path": "{project}/global/licenses/{resource}/testIamPermissions", // "request": { - // "$ref": "Interconnect" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.licenseCodes.get": +// method id "compute.machineTypes.aggregatedList": -type LicenseCodesGetCall struct { +type MachineTypesAggregatedListCall struct { s *Service project string - licenseCode string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Return a specified license code. License codes are mirrored -// across all projects that have permissions to read the License Code. -func (r *LicenseCodesService) Get(project string, licenseCode string) *LicenseCodesGetCall { - c := &LicenseCodesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of machine types. +// For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/aggregatedList +func (r *MachineTypesService) AggregatedList(project string) *MachineTypesAggregatedListCall { + c := &MachineTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.licenseCode = licenseCode + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *MachineTypesAggregatedListCall) Filter(filter string) *MachineTypesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *MachineTypesAggregatedListCall) MaxResults(maxResults int64) *MachineTypesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *MachineTypesAggregatedListCall) OrderBy(orderBy string) *MachineTypesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *MachineTypesAggregatedListCall) PageToken(pageToken string) *MachineTypesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LicenseCodesGetCall) Fields(s ...googleapi.Field) *LicenseCodesGetCall { +func (c *MachineTypesAggregatedListCall) Fields(s ...googleapi.Field) *MachineTypesAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -63028,7 +67130,7 @@ func (c *LicenseCodesGetCall) Fields(s ...googleapi.Field) *LicenseCodesGetCall // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *LicenseCodesGetCall) IfNoneMatch(entityTag string) *LicenseCodesGetCall { +func (c *MachineTypesAggregatedListCall) IfNoneMatch(entityTag string) *MachineTypesAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -63036,21 +67138,21 @@ func (c *LicenseCodesGetCall) IfNoneMatch(entityTag string) *LicenseCodesGetCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LicenseCodesGetCall) Context(ctx context.Context) *LicenseCodesGetCall { +func (c *MachineTypesAggregatedListCall) Context(ctx context.Context) *MachineTypesAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LicenseCodesGetCall) Header() http.Header { +func (c *MachineTypesAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LicenseCodesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *MachineTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -63062,7 +67164,7 @@ func (c *LicenseCodesGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenseCodes/{licenseCode}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/machineTypes") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -63070,20 +67172,19 @@ func (c *LicenseCodesGetCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "licenseCode": c.licenseCode, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.licenseCodes.get" call. -// Exactly one of *LicenseCode or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *LicenseCode.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *LicenseCodesGetCall) Do(opts ...googleapi.CallOption) (*LicenseCode, error) { +// Do executes the "compute.machineTypes.aggregatedList" call. +// Exactly one of *MachineTypeAggregatedList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *MachineTypeAggregatedList.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*MachineTypeAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -63102,7 +67203,7 @@ func (c *LicenseCodesGetCall) Do(opts ...googleapi.CallOption) (*LicenseCode, er if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &LicenseCode{ + ret := &MachineTypeAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -63114,19 +67215,34 @@ func (c *LicenseCodesGetCall) Do(opts ...googleapi.CallOption) (*LicenseCode, er } return ret, nil // { - // "description": "Return a specified license code. License codes are mirrored across all projects that have permissions to read the License Code.", + // "description": "Retrieves an aggregated list of machine types.", // "httpMethod": "GET", - // "id": "compute.licenseCodes.get", + // "id": "compute.machineTypes.aggregatedList", // "parameterOrder": [ - // "project", - // "licenseCode" + // "project" // ], // "parameters": { - // "licenseCode": { - // "description": "Number corresponding to the License code resource to return.", - // "location": "path", - // "pattern": "[0-9]{0,61}?", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -63137,9 +67253,9 @@ func (c *LicenseCodesGetCall) Do(opts ...googleapi.CallOption) (*LicenseCode, er // "type": "string" // } // }, - // "path": "{project}/global/licenseCodes/{licenseCode}", + // "path": "{project}/aggregated/machineTypes", // "response": { - // "$ref": "LicenseCode" + // "$ref": "MachineTypeAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -63150,89 +67266,121 @@ func (c *LicenseCodesGetCall) Do(opts ...googleapi.CallOption) (*LicenseCode, er } -// method id "compute.licenseCodes.testIamPermissions": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *MachineTypesAggregatedListCall) Pages(ctx context.Context, f func(*MachineTypeAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type LicenseCodesTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.machineTypes.get": + +type MachineTypesGetCall struct { + s *Service + project string + zone string + machineType string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *LicenseCodesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *LicenseCodesTestIamPermissionsCall { - c := &LicenseCodesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified machine type. Gets a list of available +// machine types by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/get +func (r *MachineTypesService) Get(project string, zone string, machineType string) *MachineTypesGetCall { + c := &MachineTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.zone = zone + c.machineType = machineType return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LicenseCodesTestIamPermissionsCall) Fields(s ...googleapi.Field) *LicenseCodesTestIamPermissionsCall { +func (c *MachineTypesGetCall) Fields(s ...googleapi.Field) *MachineTypesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *MachineTypesGetCall) IfNoneMatch(entityTag string) *MachineTypesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LicenseCodesTestIamPermissionsCall) Context(ctx context.Context) *LicenseCodesTestIamPermissionsCall { +func (c *MachineTypesGetCall) Context(ctx context.Context) *MachineTypesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LicenseCodesTestIamPermissionsCall) Header() http.Header { +func (c *MachineTypesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LicenseCodesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *MachineTypesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenseCodes/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/machineTypes/{machineType}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "zone": c.zone, + "machineType": c.machineType, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.licenseCodes.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *LicenseCodesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.machineTypes.get" call. +// Exactly one of *MachineType or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *MachineType.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -63251,7 +67399,7 @@ func (c *LicenseCodesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (* if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &MachineType{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -63263,14 +67411,22 @@ func (c *LicenseCodesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.licenseCodes.testIamPermissions", + // "description": "Returns the specified machine type. Gets a list of available machine types by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.machineTypes.get", // "parameterOrder": [ // "project", - // "resource" + // "zone", + // "machineType" // ], // "parameters": { + // "machineType": { + // "description": "Name of the machine type to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -63278,20 +67434,17 @@ func (c *LicenseCodesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (* // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/licenseCodes/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/zones/{zone}/machineTypes/{machineType}", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "MachineType" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -63302,100 +67455,160 @@ func (c *LicenseCodesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (* } -// method id "compute.licenses.delete": +// method id "compute.machineTypes.list": -type LicensesDeleteCall struct { - s *Service - project string - license string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type MachineTypesListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified license. -func (r *LicensesService) Delete(project string, license string) *LicensesDeleteCall { - c := &LicensesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of machine types available to the specified +// project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/list +func (r *MachineTypesService) List(project string, zone string) *MachineTypesListCall { + c := &MachineTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.license = license + c.zone = zone return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *LicensesDeleteCall) RequestId(requestId string) *LicensesDeleteCall { - c.urlParams_.Set("requestId", requestId) +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *MachineTypesListCall) Filter(filter string) *MachineTypesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *MachineTypesListCall) MaxResults(maxResults int64) *MachineTypesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *MachineTypesListCall) OrderBy(orderBy string) *MachineTypesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *MachineTypesListCall) PageToken(pageToken string) *MachineTypesListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LicensesDeleteCall) Fields(s ...googleapi.Field) *LicensesDeleteCall { +func (c *MachineTypesListCall) Fields(s ...googleapi.Field) *MachineTypesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *MachineTypesListCall) IfNoneMatch(entityTag string) *MachineTypesListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LicensesDeleteCall) Context(ctx context.Context) *LicensesDeleteCall { +func (c *MachineTypesListCall) Context(ctx context.Context) *MachineTypesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LicensesDeleteCall) Header() http.Header { +func (c *MachineTypesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LicensesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *MachineTypesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{license}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/machineTypes") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "license": c.license, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.licenses.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.machineTypes.list" call. +// Exactly one of *MachineTypeList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *LicensesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// *MachineTypeList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -63414,7 +67627,7 @@ func (c *LicensesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &MachineTypeList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -63426,19 +67639,35 @@ func (c *LicensesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Deletes the specified license.", - // "httpMethod": "DELETE", - // "id": "compute.licenses.delete", + // "description": "Retrieves a list of machine types available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.machineTypes.list", // "parameterOrder": [ // "project", - // "license" + // "zone" // ], // "parameters": { - // "license": { - // "description": "Name of the license resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -63448,49 +67677,134 @@ func (c *LicensesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/licenses/{license}", + // "path": "{project}/zones/{zone}/machineTypes", // "response": { - // "$ref": "Operation" + // "$ref": "MachineTypeList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.licenses.get": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *MachineTypesListCall) Pages(ctx context.Context, f func(*MachineTypeList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type LicensesGetCall struct { +// method id "compute.networkEndpointGroups.aggregatedList": + +type NetworkEndpointGroupsAggregatedListCall struct { s *Service project string - license string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Returns the specified License resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/licenses/get -func (r *LicensesService) Get(project string, license string) *LicensesGetCall { - c := &LicensesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves the list of network endpoint groups and +// sorts them by zone. +func (r *NetworkEndpointGroupsService) AggregatedList(project string) *NetworkEndpointGroupsAggregatedListCall { + c := &NetworkEndpointGroupsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.license = license + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. +// +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). +func (c *NetworkEndpointGroupsAggregatedListCall) Filter(filter string) *NetworkEndpointGroupsAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *NetworkEndpointGroupsAggregatedListCall) MaxResults(maxResults int64) *NetworkEndpointGroupsAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *NetworkEndpointGroupsAggregatedListCall) OrderBy(orderBy string) *NetworkEndpointGroupsAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *NetworkEndpointGroupsAggregatedListCall) PageToken(pageToken string) *NetworkEndpointGroupsAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LicensesGetCall) Fields(s ...googleapi.Field) *LicensesGetCall { +func (c *NetworkEndpointGroupsAggregatedListCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -63500,7 +67814,7 @@ func (c *LicensesGetCall) Fields(s ...googleapi.Field) *LicensesGetCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *LicensesGetCall) IfNoneMatch(entityTag string) *LicensesGetCall { +func (c *NetworkEndpointGroupsAggregatedListCall) IfNoneMatch(entityTag string) *NetworkEndpointGroupsAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -63508,21 +67822,21 @@ func (c *LicensesGetCall) IfNoneMatch(entityTag string) *LicensesGetCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LicensesGetCall) Context(ctx context.Context) *LicensesGetCall { +func (c *NetworkEndpointGroupsAggregatedListCall) Context(ctx context.Context) *NetworkEndpointGroupsAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LicensesGetCall) Header() http.Header { +func (c *NetworkEndpointGroupsAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LicensesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkEndpointGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -63534,7 +67848,7 @@ func (c *LicensesGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{license}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/networkEndpointGroups") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -63543,19 +67857,19 @@ func (c *LicensesGetCall) doRequest(alt string) (*http.Response, error) { req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "license": c.license, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.licenses.get" call. -// Exactly one of *License or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *License.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { +// Do executes the "compute.networkEndpointGroups.aggregatedList" call. +// Exactly one of *NetworkEndpointGroupAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *NetworkEndpointGroupAggregatedList.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *NetworkEndpointGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroupAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -63574,7 +67888,7 @@ func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &License{ + ret := &NetworkEndpointGroupAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -63586,19 +67900,34 @@ func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { } return ret, nil // { - // "description": "Returns the specified License resource.", + // "description": "Retrieves the list of network endpoint groups and sorts them by zone.", // "httpMethod": "GET", - // "id": "compute.licenses.get", + // "id": "compute.networkEndpointGroups.aggregatedList", // "parameterOrder": [ - // "project", - // "license" + // "project" // ], // "parameters": { - // "license": { - // "description": "Name of the License resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -63609,9 +67938,9 @@ func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { // "type": "string" // } // }, - // "path": "{project}/global/licenses/{license}", + // "path": "{project}/aggregated/networkEndpointGroups", // "response": { - // "$ref": "License" + // "$ref": "NetworkEndpointGroupAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -63622,96 +67951,132 @@ func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { } -// method id "compute.licenses.getIamPolicy": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *NetworkEndpointGroupsAggregatedListCall) Pages(ctx context.Context, f func(*NetworkEndpointGroupAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type LicensesGetIamPolicyCall struct { - s *Service - project string - resource string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +// method id "compute.networkEndpointGroups.attachNetworkEndpoints": + +type NetworkEndpointGroupsAttachNetworkEndpointsCall struct { + s *Service + project string + zone string + networkEndpointGroup string + networkendpointgroupsattachendpointsrequest *NetworkEndpointGroupsAttachEndpointsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. -func (r *LicensesService) GetIamPolicy(project string, resource string) *LicensesGetIamPolicyCall { - c := &LicensesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AttachNetworkEndpoints: Attach a list of network endpoints to the +// specified network endpoint group. +func (r *NetworkEndpointGroupsService) AttachNetworkEndpoints(project string, zone string, networkEndpointGroup string, networkendpointgroupsattachendpointsrequest *NetworkEndpointGroupsAttachEndpointsRequest) *NetworkEndpointGroupsAttachNetworkEndpointsCall { + c := &NetworkEndpointGroupsAttachNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource + c.zone = zone + c.networkEndpointGroup = networkEndpointGroup + c.networkendpointgroupsattachendpointsrequest = networkendpointgroupsattachendpointsrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) RequestId(requestId string) *NetworkEndpointGroupsAttachNetworkEndpointsCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LicensesGetIamPolicyCall) Fields(s ...googleapi.Field) *LicensesGetIamPolicyCall { +func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsAttachNetworkEndpointsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *LicensesGetIamPolicyCall) IfNoneMatch(entityTag string) *LicensesGetIamPolicyCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LicensesGetIamPolicyCall) Context(ctx context.Context) *LicensesGetIamPolicyCall { +func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Context(ctx context.Context) *NetworkEndpointGroupsAttachNetworkEndpointsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LicensesGetIamPolicyCall) Header() http.Header { +func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LicensesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroupsattachendpointsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{resource}/getIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "zone": c.zone, + "networkEndpointGroup": c.networkEndpointGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.licenses.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *LicensesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.networkEndpointGroups.attachNetworkEndpoints" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -63730,7 +68095,7 @@ func (c *LicensesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -63742,14 +68107,21 @@ func (c *LicensesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", - // "httpMethod": "GET", - // "id": "compute.licenses.getIamPolicy", + // "description": "Attach a list of network endpoints to the specified network endpoint group.", + // "httpMethod": "POST", + // "id": "compute.networkEndpointGroups.attachNetworkEndpoints", // "parameterOrder": [ // "project", - // "resource" + // "zone", + // "networkEndpointGroup" // ], // "parameters": { + // "networkEndpointGroup": { + // "description": "The name of the network endpoint group where you are attaching network endpoints to. It should comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -63757,43 +68129,54 @@ func (c *LicensesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/licenses/{resource}/getIamPolicy", + // "path": "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints", + // "request": { + // "$ref": "NetworkEndpointGroupsAttachEndpointsRequest" + // }, // "response": { - // "$ref": "Policy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.licenses.insert": +// method id "compute.networkEndpointGroups.delete": -type LicensesInsertCall struct { - s *Service - project string - license *License - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworkEndpointGroupsDeleteCall struct { + s *Service + project string + zone string + networkEndpointGroup string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Create a License resource in the specified project. -func (r *LicensesService) Insert(project string, license *License) *LicensesInsertCall { - c := &LicensesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified network endpoint group. The network +// endpoints in the NEG and the VM instances they belong to are not +// terminated when the NEG is deleted. Note that the NEG cannot be +// deleted if there are backend services referencing it. +func (r *NetworkEndpointGroupsService) Delete(project string, zone string, networkEndpointGroup string) *NetworkEndpointGroupsDeleteCall { + c := &NetworkEndpointGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.license = license + c.zone = zone + c.networkEndpointGroup = networkEndpointGroup return c } @@ -63811,7 +68194,7 @@ func (r *LicensesService) Insert(project string, license *License) *LicensesInse // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *LicensesInsertCall) RequestId(requestId string) *LicensesInsertCall { +func (c *NetworkEndpointGroupsDeleteCall) RequestId(requestId string) *NetworkEndpointGroupsDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -63819,7 +68202,7 @@ func (c *LicensesInsertCall) RequestId(requestId string) *LicensesInsertCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LicensesInsertCall) Fields(s ...googleapi.Field) *LicensesInsertCall { +func (c *NetworkEndpointGroupsDeleteCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -63827,55 +68210,52 @@ func (c *LicensesInsertCall) Fields(s ...googleapi.Field) *LicensesInsertCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LicensesInsertCall) Context(ctx context.Context) *LicensesInsertCall { +func (c *NetworkEndpointGroupsDeleteCall) Context(ctx context.Context) *NetworkEndpointGroupsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LicensesInsertCall) Header() http.Header { +func (c *NetworkEndpointGroupsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LicensesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkEndpointGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.license) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "networkEndpointGroup": c.networkEndpointGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.licenses.insert" call. +// Do executes the "compute.networkEndpointGroups.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *LicensesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NetworkEndpointGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -63906,13 +68286,21 @@ func (c *LicensesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Create a License resource in the specified project.", - // "httpMethod": "POST", - // "id": "compute.licenses.insert", + // "description": "Deletes the specified network endpoint group. The network endpoints in the NEG and the VM instances they belong to are not terminated when the NEG is deleted. Note that the NEG cannot be deleted if there are backend services referencing it.", + // "httpMethod": "DELETE", + // "id": "compute.networkEndpointGroups.delete", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "networkEndpointGroup" // ], // "parameters": { + // "networkEndpointGroup": { + // "description": "The name of the network endpoint group to delete. It should comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -63924,180 +68312,131 @@ func (c *LicensesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/licenses", - // "request": { - // "$ref": "License" - // }, + // "path": "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.licenses.list": +// method id "compute.networkEndpointGroups.detachNetworkEndpoints": -type LicensesListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type NetworkEndpointGroupsDetachNetworkEndpointsCall struct { + s *Service + project string + zone string + networkEndpointGroup string + networkendpointgroupsdetachendpointsrequest *NetworkEndpointGroupsDetachEndpointsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves the list of licenses available in the specified -// project. This method does not get any licenses that belong to other -// projects, including licenses attached to publicly-available images, -// like Debian 9. If you want to get a list of publicly-available -// licenses, use this method to make a request to the respective image -// project, such as debian-cloud or windows-cloud. -func (r *LicensesService) List(project string) *LicensesListCall { - c := &LicensesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// DetachNetworkEndpoints: Detach a list of network endpoints from the +// specified network endpoint group. +func (r *NetworkEndpointGroupsService) DetachNetworkEndpoints(project string, zone string, networkEndpointGroup string, networkendpointgroupsdetachendpointsrequest *NetworkEndpointGroupsDetachEndpointsRequest) *NetworkEndpointGroupsDetachNetworkEndpointsCall { + c := &NetworkEndpointGroupsDetachNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.zone = zone + c.networkEndpointGroup = networkEndpointGroup + c.networkendpointgroupsdetachendpointsrequest = networkendpointgroupsdetachendpointsrequest return c } -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. The expression must specify -// the field name, a comparison operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The comparison operator must be either =, !=, >, or <. -// -// For example, if you are filtering Compute Engine instances, you can -// exclude instances named example-instance by specifying name != -// example-instance. -// -// You can also filter nested fields. For example, you could specify -// scheduling.automaticRestart = false to include instances only if they -// are not scheduled for automatic restarts. You can use filtering on -// nested fields to filter based on resource labels. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart = true) -// (cpuPlatform = "Intel Skylake"). By default, each expression is an -// AND expression. However, you can include AND and OR expressions -// explicitly. For example, (cpuPlatform = "Intel Skylake") OR -// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = -// true). -func (c *LicensesListCall) Filter(filter string) *LicensesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *LicensesListCall) MaxResults(maxResults int64) *LicensesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *LicensesListCall) OrderBy(orderBy string) *LicensesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *LicensesListCall) PageToken(pageToken string) *LicensesListCall { - c.urlParams_.Set("pageToken", pageToken) +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) RequestId(requestId string) *NetworkEndpointGroupsDetachNetworkEndpointsCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LicensesListCall) Fields(s ...googleapi.Field) *LicensesListCall { +func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsDetachNetworkEndpointsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *LicensesListCall) IfNoneMatch(entityTag string) *LicensesListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LicensesListCall) Context(ctx context.Context) *LicensesListCall { +func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Context(ctx context.Context) *NetworkEndpointGroupsDetachNetworkEndpointsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LicensesListCall) Header() http.Header { +func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LicensesListCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroupsdetachendpointsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "networkEndpointGroup": c.networkEndpointGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.licenses.list" call. -// Exactly one of *LicensesListResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *LicensesListResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *LicensesListCall) Do(opts ...googleapi.CallOption) (*LicensesListResponse, error) { +// Do executes the "compute.networkEndpointGroups.detachNetworkEndpoints" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -64116,7 +68455,7 @@ func (c *LicensesListCall) Do(opts ...googleapi.CallOption) (*LicensesListRespon if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &LicensesListResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -64128,161 +68467,148 @@ func (c *LicensesListCall) Do(opts ...googleapi.CallOption) (*LicensesListRespon } return ret, nil // { - // "description": "Retrieves the list of licenses available in the specified project. This method does not get any licenses that belong to other projects, including licenses attached to publicly-available images, like Debian 9. If you want to get a list of publicly-available licenses, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.", - // "httpMethod": "GET", - // "id": "compute.licenses.list", + // "description": "Detach a list of network endpoints from the specified network endpoint group.", + // "httpMethod": "POST", + // "id": "compute.networkEndpointGroups.detachNetworkEndpoints", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "networkEndpointGroup" // ], // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", - // "location": "query", + // "networkEndpointGroup": { + // "description": "The name of the network endpoint group where you are removing network endpoints. It should comply with RFC1035.", + // "location": "path", + // "required": true, // "type": "string" // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "zone": { + // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/licenses", + // "path": "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints", + // "request": { + // "$ref": "NetworkEndpointGroupsDetachEndpointsRequest" + // }, // "response": { - // "$ref": "LicensesListResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *LicensesListCall) Pages(ctx context.Context, f func(*LicensesListResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.licenses.setIamPolicy": +// method id "compute.networkEndpointGroups.get": -type LicensesSetIamPolicyCall struct { - s *Service - project string - resource string - globalsetpolicyrequest *GlobalSetPolicyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworkEndpointGroupsGetCall struct { + s *Service + project string + zone string + networkEndpointGroup string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. -func (r *LicensesService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *LicensesSetIamPolicyCall { - c := &LicensesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified network endpoint group. Gets a list of +// available network endpoint groups by making a list() request. +func (r *NetworkEndpointGroupsService) Get(project string, zone string, networkEndpointGroup string) *NetworkEndpointGroupsGetCall { + c := &NetworkEndpointGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.globalsetpolicyrequest = globalsetpolicyrequest + c.zone = zone + c.networkEndpointGroup = networkEndpointGroup return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LicensesSetIamPolicyCall) Fields(s ...googleapi.Field) *LicensesSetIamPolicyCall { +func (c *NetworkEndpointGroupsGetCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NetworkEndpointGroupsGetCall) IfNoneMatch(entityTag string) *NetworkEndpointGroupsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LicensesSetIamPolicyCall) Context(ctx context.Context) *LicensesSetIamPolicyCall { +func (c *NetworkEndpointGroupsGetCall) Context(ctx context.Context) *NetworkEndpointGroupsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LicensesSetIamPolicyCall) Header() http.Header { +func (c *NetworkEndpointGroupsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LicensesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkEndpointGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetpolicyrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{resource}/setIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "zone": c.zone, + "networkEndpointGroup": c.networkEndpointGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.licenses.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *LicensesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.networkEndpointGroups.get" call. +// Exactly one of *NetworkEndpointGroup or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *NetworkEndpointGroup.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroup, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -64301,7 +68627,7 @@ func (c *LicensesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &NetworkEndpointGroup{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -64313,14 +68639,21 @@ func (c *LicensesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", - // "httpMethod": "POST", - // "id": "compute.licenses.setIamPolicy", + // "description": "Returns the specified network endpoint group. Gets a list of available network endpoint groups by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.networkEndpointGroups.get", // "parameterOrder": [ // "project", - // "resource" + // "zone", + // "networkEndpointGroup" // ], // "parameters": { + // "networkEndpointGroup": { + // "description": "The name of the network endpoint group. It should comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -64328,55 +68661,71 @@ func (c *LicensesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "zone": { + // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/licenses/{resource}/setIamPolicy", - // "request": { - // "$ref": "GlobalSetPolicyRequest" - // }, + // "path": "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", // "response": { - // "$ref": "Policy" + // "$ref": "NetworkEndpointGroup" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.licenses.testIamPermissions": +// method id "compute.networkEndpointGroups.insert": -type LicensesTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworkEndpointGroupsInsertCall struct { + s *Service + project string + zone string + networkendpointgroup *NetworkEndpointGroup + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *LicensesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *LicensesTestIamPermissionsCall { - c := &LicensesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a network endpoint group in the specified project +// using the parameters that are included in the request. +func (r *NetworkEndpointGroupsService) Insert(project string, zone string, networkendpointgroup *NetworkEndpointGroup) *NetworkEndpointGroupsInsertCall { + c := &NetworkEndpointGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.zone = zone + c.networkendpointgroup = networkendpointgroup + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *NetworkEndpointGroupsInsertCall) RequestId(requestId string) *NetworkEndpointGroupsInsertCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LicensesTestIamPermissionsCall) Fields(s ...googleapi.Field) *LicensesTestIamPermissionsCall { +func (c *NetworkEndpointGroupsInsertCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -64384,35 +68733,35 @@ func (c *LicensesTestIamPermissionsCall) Fields(s ...googleapi.Field) *LicensesT // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LicensesTestIamPermissionsCall) Context(ctx context.Context) *LicensesTestIamPermissionsCall { +func (c *NetworkEndpointGroupsInsertCall) Context(ctx context.Context) *NetworkEndpointGroupsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LicensesTestIamPermissionsCall) Header() http.Header { +func (c *NetworkEndpointGroupsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LicensesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkEndpointGroupsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroup) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -64420,20 +68769,20 @@ func (c *LicensesTestIamPermissionsCall) doRequest(alt string) (*http.Response, } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.licenses.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *LicensesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.networkEndpointGroups.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NetworkEndpointGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -64452,7 +68801,7 @@ func (c *LicensesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Test if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -64464,12 +68813,12 @@ func (c *LicensesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Test } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", + // "description": "Creates a network endpoint group in the specified project using the parameters that are included in the request.", // "httpMethod": "POST", - // "id": "compute.licenses.testIamPermissions", + // "id": "compute.networkEndpointGroups.insert", // "parameterOrder": [ // "project", - // "resource" + // "zone" // ], // "parameters": { // "project": { @@ -64479,46 +68828,51 @@ func (c *LicensesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Test // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where you want to create the network endpoint group. It should comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/licenses/{resource}/testIamPermissions", + // "path": "{project}/zones/{zone}/networkEndpointGroups", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "NetworkEndpointGroup" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.machineTypes.aggregatedList": +// method id "compute.networkEndpointGroups.list": -type MachineTypesAggregatedListCall struct { +type NetworkEndpointGroupsListCall struct { s *Service project string + zone string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// AggregatedList: Retrieves an aggregated list of machine types. -// For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/aggregatedList -func (r *MachineTypesService) AggregatedList(project string) *MachineTypesAggregatedListCall { - c := &MachineTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of network endpoint groups that are located +// in the specified project and zone. +func (r *NetworkEndpointGroupsService) List(project string, zone string) *NetworkEndpointGroupsListCall { + c := &NetworkEndpointGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.zone = zone return c } @@ -64544,7 +68898,7 @@ func (r *MachineTypesService) AggregatedList(project string) *MachineTypesAggreg // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *MachineTypesAggregatedListCall) Filter(filter string) *MachineTypesAggregatedListCall { +func (c *NetworkEndpointGroupsListCall) Filter(filter string) *NetworkEndpointGroupsListCall { c.urlParams_.Set("filter", filter) return c } @@ -64555,7 +68909,7 @@ func (c *MachineTypesAggregatedListCall) Filter(filter string) *MachineTypesAggr // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *MachineTypesAggregatedListCall) MaxResults(maxResults int64) *MachineTypesAggregatedListCall { +func (c *NetworkEndpointGroupsListCall) MaxResults(maxResults int64) *NetworkEndpointGroupsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -64572,7 +68926,7 @@ func (c *MachineTypesAggregatedListCall) MaxResults(maxResults int64) *MachineTy // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *MachineTypesAggregatedListCall) OrderBy(orderBy string) *MachineTypesAggregatedListCall { +func (c *NetworkEndpointGroupsListCall) OrderBy(orderBy string) *NetworkEndpointGroupsListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -64580,7 +68934,7 @@ func (c *MachineTypesAggregatedListCall) OrderBy(orderBy string) *MachineTypesAg // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *MachineTypesAggregatedListCall) PageToken(pageToken string) *MachineTypesAggregatedListCall { +func (c *NetworkEndpointGroupsListCall) PageToken(pageToken string) *NetworkEndpointGroupsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -64588,7 +68942,7 @@ func (c *MachineTypesAggregatedListCall) PageToken(pageToken string) *MachineTyp // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *MachineTypesAggregatedListCall) Fields(s ...googleapi.Field) *MachineTypesAggregatedListCall { +func (c *NetworkEndpointGroupsListCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -64598,7 +68952,7 @@ func (c *MachineTypesAggregatedListCall) Fields(s ...googleapi.Field) *MachineTy // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *MachineTypesAggregatedListCall) IfNoneMatch(entityTag string) *MachineTypesAggregatedListCall { +func (c *NetworkEndpointGroupsListCall) IfNoneMatch(entityTag string) *NetworkEndpointGroupsListCall { c.ifNoneMatch_ = entityTag return c } @@ -64606,21 +68960,21 @@ func (c *MachineTypesAggregatedListCall) IfNoneMatch(entityTag string) *MachineT // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *MachineTypesAggregatedListCall) Context(ctx context.Context) *MachineTypesAggregatedListCall { +func (c *NetworkEndpointGroupsListCall) Context(ctx context.Context) *NetworkEndpointGroupsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *MachineTypesAggregatedListCall) Header() http.Header { +func (c *NetworkEndpointGroupsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *MachineTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkEndpointGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -64632,7 +68986,7 @@ func (c *MachineTypesAggregatedListCall) doRequest(alt string) (*http.Response, var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/machineTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -64641,18 +68995,19 @@ func (c *MachineTypesAggregatedListCall) doRequest(alt string) (*http.Response, req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.machineTypes.aggregatedList" call. -// Exactly one of *MachineTypeAggregatedList or error will be non-nil. +// Do executes the "compute.networkEndpointGroups.list" call. +// Exactly one of *NetworkEndpointGroupList or error will be non-nil. // Any non-2xx status code is an error. Response headers are in either -// *MachineTypeAggregatedList.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use +// *NetworkEndpointGroupList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*MachineTypeAggregatedList, error) { +func (c *NetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroupList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -64671,7 +69026,7 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &MachineTypeAggregatedList{ + ret := &NetworkEndpointGroupList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -64683,11 +69038,12 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach } return ret, nil // { - // "description": "Retrieves an aggregated list of machine types.", + // "description": "Retrieves the list of network endpoint groups that are located in the specified project and zone.", // "httpMethod": "GET", - // "id": "compute.machineTypes.aggregatedList", + // "id": "compute.networkEndpointGroups.list", // "parameterOrder": [ - // "project" + // "project", + // "zone" // ], // "parameters": { // "filter": { @@ -64719,11 +69075,17 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/aggregated/machineTypes", + // "path": "{project}/zones/{zone}/networkEndpointGroups", // "response": { - // "$ref": "MachineTypeAggregatedList" + // "$ref": "NetworkEndpointGroupList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -64737,7 +69099,7 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *MachineTypesAggregatedListCall) Pages(ctx context.Context, f func(*MachineTypeAggregatedList) error) error { +func (c *NetworkEndpointGroupsListCall) Pages(ctx context.Context, f func(*NetworkEndpointGroupList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -64755,193 +69117,27 @@ func (c *MachineTypesAggregatedListCall) Pages(ctx context.Context, f func(*Mach } } -// method id "compute.machineTypes.get": - -type MachineTypesGetCall struct { - s *Service - project string - zone string - machineType string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified machine type. Gets a list of available -// machine types by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/get -func (r *MachineTypesService) Get(project string, zone string, machineType string) *MachineTypesGetCall { - c := &MachineTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.machineType = machineType - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *MachineTypesGetCall) Fields(s ...googleapi.Field) *MachineTypesGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *MachineTypesGetCall) IfNoneMatch(entityTag string) *MachineTypesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *MachineTypesGetCall) Context(ctx context.Context) *MachineTypesGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *MachineTypesGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *MachineTypesGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/machineTypes/{machineType}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "machineType": c.machineType, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.machineTypes.get" call. -// Exactly one of *MachineType or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *MachineType.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &MachineType{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified machine type. Gets a list of available machine types by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.machineTypes.get", - // "parameterOrder": [ - // "project", - // "zone", - // "machineType" - // ], - // "parameters": { - // "machineType": { - // "description": "Name of the machine type to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/machineTypes/{machineType}", - // "response": { - // "$ref": "MachineType" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.machineTypes.list": +// method id "compute.networkEndpointGroups.listNetworkEndpoints": -type MachineTypesListCall struct { - s *Service - project string - zone string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type NetworkEndpointGroupsListNetworkEndpointsCall struct { + s *Service + project string + zone string + networkEndpointGroup string + networkendpointgroupslistendpointsrequest *NetworkEndpointGroupsListEndpointsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves a list of machine types available to the specified -// project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/list -func (r *MachineTypesService) List(project string, zone string) *MachineTypesListCall { - c := &MachineTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ListNetworkEndpoints: Lists the network endpoints in the specified +// network endpoint group. +func (r *NetworkEndpointGroupsService) ListNetworkEndpoints(project string, zone string, networkEndpointGroup string, networkendpointgroupslistendpointsrequest *NetworkEndpointGroupsListEndpointsRequest) *NetworkEndpointGroupsListNetworkEndpointsCall { + c := &NetworkEndpointGroupsListNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone + c.networkEndpointGroup = networkEndpointGroup + c.networkendpointgroupslistendpointsrequest = networkendpointgroupslistendpointsrequest return c } @@ -64967,7 +69163,7 @@ func (r *MachineTypesService) List(project string, zone string) *MachineTypesLis // explicitly. For example, (cpuPlatform = "Intel Skylake") OR // (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = // true). -func (c *MachineTypesListCall) Filter(filter string) *MachineTypesListCall { +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Filter(filter string) *NetworkEndpointGroupsListNetworkEndpointsCall { c.urlParams_.Set("filter", filter) return c } @@ -64978,7 +69174,7 @@ func (c *MachineTypesListCall) Filter(filter string) *MachineTypesListCall { // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *MachineTypesListCall) MaxResults(maxResults int64) *MachineTypesListCall { +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) MaxResults(maxResults int64) *NetworkEndpointGroupsListNetworkEndpointsCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -64995,7 +69191,7 @@ func (c *MachineTypesListCall) MaxResults(maxResults int64) *MachineTypesListCal // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *MachineTypesListCall) OrderBy(orderBy string) *MachineTypesListCall { +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) OrderBy(orderBy string) *NetworkEndpointGroupsListNetworkEndpointsCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -65003,7 +69199,7 @@ func (c *MachineTypesListCall) OrderBy(orderBy string) *MachineTypesListCall { // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *MachineTypesListCall) PageToken(pageToken string) *MachineTypesListCall { +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) PageToken(pageToken string) *NetworkEndpointGroupsListNetworkEndpointsCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -65011,72 +69207,67 @@ func (c *MachineTypesListCall) PageToken(pageToken string) *MachineTypesListCall // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *MachineTypesListCall) Fields(s ...googleapi.Field) *MachineTypesListCall { +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsListNetworkEndpointsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *MachineTypesListCall) IfNoneMatch(entityTag string) *MachineTypesListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *MachineTypesListCall) Context(ctx context.Context) *MachineTypesListCall { +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Context(ctx context.Context) *NetworkEndpointGroupsListNetworkEndpointsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *MachineTypesListCall) Header() http.Header { +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *MachineTypesListCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroupslistendpointsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/machineTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "zone": c.zone, + "networkEndpointGroup": c.networkEndpointGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.machineTypes.list" call. -// Exactly one of *MachineTypeList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *MachineTypeList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeList, error) { +// Do executes the "compute.networkEndpointGroups.listNetworkEndpoints" call. +// Exactly one of *NetworkEndpointGroupsListNetworkEndpoints or error +// will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *NetworkEndpointGroupsListNetworkEndpoints.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroupsListNetworkEndpoints, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -65095,7 +69286,7 @@ func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeLis if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &MachineTypeList{ + ret := &NetworkEndpointGroupsListNetworkEndpoints{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -65107,12 +69298,13 @@ func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeLis } return ret, nil // { - // "description": "Retrieves a list of machine types available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.machineTypes.list", + // "description": "Lists the network endpoints in the specified network endpoint group.", + // "httpMethod": "POST", + // "id": "compute.networkEndpointGroups.listNetworkEndpoints", // "parameterOrder": [ // "project", - // "zone" + // "zone", + // "networkEndpointGroup" // ], // "parameters": { // "filter": { @@ -65128,6 +69320,12 @@ func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeLis // "minimum": "0", // "type": "integer" // }, + // "networkEndpointGroup": { + // "description": "The name of the network endpoint group from which you want to generate a list of included network endpoints. It should comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "orderBy": { // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", // "location": "query", @@ -65146,16 +69344,18 @@ func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeLis // "type": "string" // }, // "zone": { - // "description": "The name of the zone for this request.", + // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/machineTypes", + // "path": "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints", + // "request": { + // "$ref": "NetworkEndpointGroupsListEndpointsRequest" + // }, // "response": { - // "$ref": "MachineTypeList" + // "$ref": "NetworkEndpointGroupsListNetworkEndpoints" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -65169,7 +69369,7 @@ func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeLis // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *MachineTypesListCall) Pages(ctx context.Context, f func(*MachineTypeList) error) error { +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Pages(ctx context.Context, f func(*NetworkEndpointGroupsListNetworkEndpoints) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -65187,6 +69387,169 @@ func (c *MachineTypesListCall) Pages(ctx context.Context, f func(*MachineTypeLis } } +// method id "compute.networkEndpointGroups.testIamPermissions": + +type NetworkEndpointGroupsTestIamPermissionsCall struct { + s *Service + project string + zone string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *NetworkEndpointGroupsService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *NetworkEndpointGroupsTestIamPermissionsCall { + c := &NetworkEndpointGroupsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NetworkEndpointGroupsTestIamPermissionsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NetworkEndpointGroupsTestIamPermissionsCall) Context(ctx context.Context) *NetworkEndpointGroupsTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NetworkEndpointGroupsTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NetworkEndpointGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{resource}/testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.networkEndpointGroups.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NetworkEndpointGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TestPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.networkEndpointGroups.testIamPermissions", + // "parameterOrder": [ + // "project", + // "zone", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/networkEndpointGroups/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, + // "response": { + // "$ref": "TestPermissionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + // method id "compute.networks.addPeering": type NetworksAddPeeringCall struct { @@ -66762,7 +71125,7 @@ func (c *NodeGroupsAddNodesCall) Do(opts ...googleapi.CallOption) (*Operation, e // "nodeGroup": { // "description": "Name of the NodeGroup resource.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -67192,189 +71555,189 @@ func (c *NodeGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, err // "nodeGroup": { // "description": "Name of the NodeGroup resource to delete.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/nodeGroups/{nodeGroup}", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.nodeGroups.deleteNodes": - -type NodeGroupsDeleteNodesCall struct { - s *Service - project string - zone string - nodeGroup string - nodegroupsdeletenodesrequest *NodeGroupsDeleteNodesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// DeleteNodes: Deletes specified nodes from the node group. -func (r *NodeGroupsService) DeleteNodes(project string, zone string, nodeGroup string, nodegroupsdeletenodesrequest *NodeGroupsDeleteNodesRequest) *NodeGroupsDeleteNodesCall { - c := &NodeGroupsDeleteNodesCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.nodeGroup = nodeGroup - c.nodegroupsdeletenodesrequest = nodegroupsdeletenodesrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *NodeGroupsDeleteNodesCall) RequestId(requestId string) *NodeGroupsDeleteNodesCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *NodeGroupsDeleteNodesCall) Fields(s ...googleapi.Field) *NodeGroupsDeleteNodesCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *NodeGroupsDeleteNodesCall) Context(ctx context.Context) *NodeGroupsDeleteNodesCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *NodeGroupsDeleteNodesCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *NodeGroupsDeleteNodesCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.nodegroupsdeletenodesrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}/deleteNodes") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "nodeGroup": c.nodeGroup, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.nodeGroups.deleteNodes" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *NodeGroupsDeleteNodesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes specified nodes from the node group.", - // "httpMethod": "POST", - // "id": "compute.nodeGroups.deleteNodes", - // "parameterOrder": [ - // "project", - // "zone", - // "nodeGroup" - // ], - // "parameters": { - // "nodeGroup": { - // "description": "Name of the NodeGroup resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/nodeGroups/{nodeGroup}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.nodeGroups.deleteNodes": + +type NodeGroupsDeleteNodesCall struct { + s *Service + project string + zone string + nodeGroup string + nodegroupsdeletenodesrequest *NodeGroupsDeleteNodesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// DeleteNodes: Deletes specified nodes from the node group. +func (r *NodeGroupsService) DeleteNodes(project string, zone string, nodeGroup string, nodegroupsdeletenodesrequest *NodeGroupsDeleteNodesRequest) *NodeGroupsDeleteNodesCall { + c := &NodeGroupsDeleteNodesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.nodeGroup = nodeGroup + c.nodegroupsdeletenodesrequest = nodegroupsdeletenodesrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *NodeGroupsDeleteNodesCall) RequestId(requestId string) *NodeGroupsDeleteNodesCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NodeGroupsDeleteNodesCall) Fields(s ...googleapi.Field) *NodeGroupsDeleteNodesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NodeGroupsDeleteNodesCall) Context(ctx context.Context) *NodeGroupsDeleteNodesCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NodeGroupsDeleteNodesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NodeGroupsDeleteNodesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.nodegroupsdeletenodesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/nodeGroups/{nodeGroup}/deleteNodes") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "nodeGroup": c.nodeGroup, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.nodeGroups.deleteNodes" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NodeGroupsDeleteNodesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes specified nodes from the node group.", + // "httpMethod": "POST", + // "id": "compute.nodeGroups.deleteNodes", + // "parameterOrder": [ + // "project", + // "zone", + // "nodeGroup" + // ], + // "parameters": { + // "nodeGroup": { + // "description": "Name of the NodeGroup resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -67549,7 +71912,7 @@ func (c *NodeGroupsGetCall) Do(opts ...googleapi.CallOption) (*NodeGroup, error) // "nodeGroup": { // "description": "Name of the node group to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -68392,7 +72755,7 @@ func (c *NodeGroupsListNodesCall) Do(opts ...googleapi.CallOption) (*NodeGroupsL // "nodeGroup": { // "description": "Name of the NodeGroup resource whose nodes you want to list.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -68761,9 +73124,9 @@ func (c *NodeGroupsSetNodeTemplateCall) Do(opts ...googleapi.CallOption) (*Opera // ], // "parameters": { // "nodeGroup": { - // "description": "Name of the NodeGroup resource to delete.", + // "description": "Name of the NodeGroup resource to update.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -69355,7 +73718,7 @@ func (c *NodeTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, // "nodeTemplate": { // "description": "Name of the NodeTemplate resource to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -69526,7 +73889,7 @@ func (c *NodeTemplatesGetCall) Do(opts ...googleapi.CallOption) (*NodeTemplate, // "nodeTemplate": { // "description": "Name of the node template to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -70874,7 +75237,7 @@ func (c *NodeTypesGetCall) Do(opts ...googleapi.CallOption) (*NodeType, error) { // "nodeType": { // "description": "Name of the node type to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -73558,7 +77921,7 @@ func (c *RegionAutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operati // "autoscaler": { // "description": "Name of the autoscaler to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -73728,7 +78091,7 @@ func (c *RegionAutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler // "autoscaler": { // "description": "Name of the autoscaler to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -74349,7 +78712,7 @@ func (c *RegionAutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operatio // "autoscaler": { // "description": "Name of the autoscaler to patch.", // "location": "query", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "type": "string" // }, // "project": { @@ -74537,7 +78900,7 @@ func (c *RegionAutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operati // "autoscaler": { // "description": "Name of the autoscaler to update.", // "location": "query", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "type": "string" // }, // "project": { @@ -74714,7 +79077,7 @@ func (c *RegionBackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope // "backendService": { // "description": "Name of the BackendService resource to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -74884,7 +79247,7 @@ func (c *RegionBackendServicesGetCall) Do(opts ...googleapi.CallOption) (*Backen // "backendService": { // "description": "Name of the BackendService resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -75044,7 +79407,7 @@ func (c *RegionBackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (* // "backendService": { // "description": "Name of the BackendService resource for which to get health.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -75858,7 +80221,7 @@ func (c *RegionBackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Ope // "backendService": { // "description": "Name of the BackendService resource to update.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -76283,7 +80646,7 @@ func (c *RegionCommitmentsGetCall) Do(opts ...googleapi.CallOption) (*Commitment // "commitment": { // "description": "Name of the commitment to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -76888,7 +81251,7 @@ func (c *RegionDiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, er // "diskType": { // "description": "Name of the disk type to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -82043,7 +86406,7 @@ func (c *RegionOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { // "operation": { // "description": "Name of the Operations resource to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -82206,7 +86569,7 @@ func (c *RegionOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, // "operation": { // "description": "Name of the Operations resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -82641,7 +87004,7 @@ func (c *RegionsGetCall) Do(opts ...googleapi.CallOption) (*Region, error) { // "region": { // "description": "Name of the region resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -83321,7 +87684,7 @@ func (c *RoutersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "router": { // "description": "Name of the Router resource to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -83487,7 +87850,7 @@ func (c *RoutersGetCall) Do(opts ...googleapi.CallOption) (*Router, error) { // "router": { // "description": "Name of the Router resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -83740,7 +88103,7 @@ func (c *RoutersGetNatMappingInfoCall) Do(opts ...googleapi.CallOption) (*VmEndp // "router": { // "description": "Name of the Router resource to query for Nat Mapping information of VM endpoints.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -83928,7 +88291,7 @@ func (c *RoutersGetRouterStatusCall) Do(opts ...googleapi.CallOption) (*RouterSt // "router": { // "description": "Name of the Router resource to query.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -84551,7 +88914,7 @@ func (c *RoutersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "router": { // "description": "Name of the Router resource to patch.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -84714,7 +89077,7 @@ func (c *RoutersPreviewCall) Do(opts ...googleapi.CallOption) (*RoutersPreviewRe // "router": { // "description": "Name of the Router resource to query.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -84901,7 +89264,7 @@ func (c *RoutersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "router": { // "description": "Name of the Router resource to update.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -85791,7 +90154,7 @@ func (c *SecurityPoliciesAddRuleCall) Do(opts ...googleapi.CallOption) (*Operati // "securityPolicy": { // "description": "Name of the security policy to update.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -85958,7 +90321,7 @@ func (c *SecurityPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio // "securityPolicy": { // "description": "Name of the security policy to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -86113,7 +90476,7 @@ func (c *SecurityPoliciesGetCall) Do(opts ...googleapi.CallOption) (*SecurityPol // "securityPolicy": { // "description": "Name of the security policy to get.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -86281,7 +90644,7 @@ func (c *SecurityPoliciesGetRuleCall) Do(opts ...googleapi.CallOption) (*Securit // "securityPolicy": { // "description": "Name of the security policy to which the queried rule belongs.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -86870,7 +91233,7 @@ func (c *SecurityPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Operation // "securityPolicy": { // "description": "Name of the security policy to update.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -87033,7 +91396,7 @@ func (c *SecurityPoliciesPatchRuleCall) Do(opts ...googleapi.CallOption) (*Opera // "securityPolicy": { // "description": "Name of the security policy to update.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -87189,7 +91552,7 @@ func (c *SecurityPoliciesRemoveRuleCall) Do(opts ...googleapi.CallOption) (*Oper // "securityPolicy": { // "description": "Name of the security policy to update.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -88544,7 +92907,7 @@ func (c *SslCertificatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation // "sslCertificate": { // "description": "Name of the SslCertificate resource to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -88699,7 +93062,7 @@ func (c *SslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCertifica // "sslCertificate": { // "description": "Name of the SslCertificate resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -90685,7 +95048,7 @@ func (c *SubnetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er // "subnetwork": { // "description": "Name of the Subnetwork resource to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -90868,7 +95231,7 @@ func (c *SubnetworksExpandIpCidrRangeCall) Do(opts ...googleapi.CallOption) (*Op // "subnetwork": { // "description": "Name of the Subnetwork resource to update.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -91037,7 +95400,7 @@ func (c *SubnetworksGetCall) Do(opts ...googleapi.CallOption) (*Subnetwork, erro // "subnetwork": { // "description": "Name of the Subnetwork resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -92079,7 +96442,7 @@ func (c *SubnetworksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, err // "subnetwork": { // "description": "Name of the Subnetwork resource to patch.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -92428,7 +96791,7 @@ func (c *SubnetworksSetPrivateIpGoogleAccessCall) Do(opts ...googleapi.CallOptio // "subnetwork": { // "description": "Name of the Subnetwork resource.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -92759,7 +97122,7 @@ func (c *TargetHttpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operati // "targetHttpProxy": { // "description": "Name of the TargetHttpProxy resource to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -92915,7 +97278,7 @@ func (c *TargetHttpProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHttp // "targetHttpProxy": { // "description": "Name of the TargetHttpProxy resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -93506,7 +97869,7 @@ func (c *TargetHttpProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Oper // "targetHttpProxy": { // "description": "Name of the TargetHttpProxy to set a URL map for.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -93673,7 +98036,7 @@ func (c *TargetHttpsProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operat // "targetHttpsProxy": { // "description": "Name of the TargetHttpsProxy resource to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -93828,7 +98191,7 @@ func (c *TargetHttpsProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHtt // "targetHttpsProxy": { // "description": "Name of the TargetHttpsProxy resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -94589,7 +98952,7 @@ func (c *TargetHttpsProxiesSetSslCertificatesCall) Do(opts ...googleapi.CallOpti // "targetHttpsProxy": { // "description": "Name of the TargetHttpsProxy resource to set an SslCertificates resource for.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -94940,7 +99303,7 @@ func (c *TargetHttpsProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Ope // "targetHttpsProxy": { // "description": "Name of the TargetHttpsProxy resource whose URL map is to be set.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -95364,7 +99727,7 @@ func (c *TargetInstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation // "targetInstance": { // "description": "Name of the TargetInstance resource to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -95531,7 +99894,7 @@ func (c *TargetInstancesGetCall) Do(opts ...googleapi.CallOption) (*TargetInstan // "targetInstance": { // "description": "Name of the TargetInstance resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -96162,7 +100525,7 @@ func (c *TargetPoolsAddHealthCheckCall) Do(opts ...googleapi.CallOption) (*Opera // "targetPool": { // "description": "Name of the target pool to add a health check to.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -96348,7 +100711,7 @@ func (c *TargetPoolsAddInstanceCall) Do(opts ...googleapi.CallOption) (*Operatio // "targetPool": { // "description": "Name of the TargetPool resource to add instances to.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -96779,7 +101142,7 @@ func (c *TargetPoolsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er // "targetPool": { // "description": "Name of the TargetPool resource to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -96946,7 +101309,7 @@ func (c *TargetPoolsGetCall) Do(opts ...googleapi.CallOption) (*TargetPool, erro // "targetPool": { // "description": "Name of the TargetPool resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -97107,7 +101470,7 @@ func (c *TargetPoolsGetHealthCall) Do(opts ...googleapi.CallOption) (*TargetPool // "targetPool": { // "description": "Name of the TargetPool resource to which the queried instance belongs.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -97734,7 +102097,7 @@ func (c *TargetPoolsRemoveHealthCheckCall) Do(opts ...googleapi.CallOption) (*Op // "targetPool": { // "description": "Name of the target pool to remove health checks from.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -97920,7 +102283,7 @@ func (c *TargetPoolsRemoveInstanceCall) Do(opts ...googleapi.CallOption) (*Opera // "targetPool": { // "description": "Name of the TargetPool resource to remove instances from.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -98119,7 +102482,7 @@ func (c *TargetPoolsSetBackupCall) Do(opts ...googleapi.CallOption) (*Operation, // "targetPool": { // "description": "Name of the TargetPool resource to set a backup pool for.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -98286,7 +102649,7 @@ func (c *TargetSslProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio // "targetSslProxy": { // "description": "Name of the TargetSslProxy resource to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -98441,7 +102804,7 @@ func (c *TargetSslProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetSslPr // "targetSslProxy": { // "description": "Name of the TargetSslProxy resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -99029,7 +103392,7 @@ func (c *TargetSslProxiesSetBackendServiceCall) Do(opts ...googleapi.CallOption) // "targetSslProxy": { // "description": "Name of the TargetSslProxy resource whose BackendService resource is to be set.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -99203,7 +103566,7 @@ func (c *TargetSslProxiesSetProxyHeaderCall) Do(opts ...googleapi.CallOption) (* // "targetSslProxy": { // "description": "Name of the TargetSslProxy resource whose ProxyHeader is to be set.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -99377,7 +103740,7 @@ func (c *TargetSslProxiesSetSslCertificatesCall) Do(opts ...googleapi.CallOption // "targetSslProxy": { // "description": "Name of the TargetSslProxy resource whose SslCertificate resource is to be set.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -99720,7 +104083,7 @@ func (c *TargetTcpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio // "targetTcpProxy": { // "description": "Name of the TargetTcpProxy resource to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -99875,7 +104238,7 @@ func (c *TargetTcpProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetTcpPr // "targetTcpProxy": { // "description": "Name of the TargetTcpProxy resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -100463,7 +104826,7 @@ func (c *TargetTcpProxiesSetBackendServiceCall) Do(opts ...googleapi.CallOption) // "targetTcpProxy": { // "description": "Name of the TargetTcpProxy resource whose BackendService resource is to be set.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -100637,7 +105000,7 @@ func (c *TargetTcpProxiesSetProxyHeaderCall) Do(opts ...googleapi.CallOption) (* // "targetTcpProxy": { // "description": "Name of the TargetTcpProxy resource whose ProxyHeader is to be set.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -101066,7 +105429,7 @@ func (c *TargetVpnGatewaysDeleteCall) Do(opts ...googleapi.CallOption) (*Operati // "targetVpnGateway": { // "description": "Name of the target VPN gateway to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -101232,7 +105595,7 @@ func (c *TargetVpnGatewaysGetCall) Do(opts ...googleapi.CallOption) (*TargetVpnG // "targetVpnGateway": { // "description": "Name of the target VPN gateway to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -101836,7 +106199,7 @@ func (c *UrlMapsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "urlMap": { // "description": "Name of the UrlMap resource to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -101992,7 +106355,7 @@ func (c *UrlMapsGetCall) Do(opts ...googleapi.CallOption) (*UrlMap, error) { // "urlMap": { // "description": "Name of the UrlMap resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -102330,7 +106693,7 @@ func (c *UrlMapsInvalidateCacheCall) Do(opts ...googleapi.CallOption) (*Operatio // "urlMap": { // "description": "Name of the UrlMap scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -102760,7 +107123,7 @@ func (c *UrlMapsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "urlMap": { // "description": "Name of the UrlMap resource to patch.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -102936,7 +107299,7 @@ func (c *UrlMapsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "urlMap": { // "description": "Name of the UrlMap resource to update.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -103089,7 +107452,7 @@ func (c *UrlMapsValidateCall) Do(opts ...googleapi.CallOption) (*UrlMapsValidate // "urlMap": { // "description": "Name of the UrlMap resource to be validated as.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -103518,7 +107881,7 @@ func (c *VpnTunnelsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, err // "vpnTunnel": { // "description": "Name of the VpnTunnel resource to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -103684,7 +108047,7 @@ func (c *VpnTunnelsGetCall) Do(opts ...googleapi.CallOption) (*VpnTunnel, error) // "vpnTunnel": { // "description": "Name of the VpnTunnel resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } @@ -104236,7 +108599,7 @@ func (c *ZoneOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { // "operation": { // "description": "Name of the Operations resource to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -104399,7 +108762,7 @@ func (c *ZoneOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, er // "operation": { // "description": "Name of the Operations resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -104834,7 +109197,7 @@ func (c *ZonesGetCall) Do(opts ...googleapi.CallOption) (*Zone, error) { // "zone": { // "description": "Name of the zone resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } diff --git a/vendor/google.golang.org/api/container/v1/BUILD.bazel b/vendor/google.golang.org/api/container/v1/BUILD.bazel index b4c78badd6dcf..2e7f067620a14 100644 --- a/vendor/google.golang.org/api/container/v1/BUILD.bazel +++ b/vendor/google.golang.org/api/container/v1/BUILD.bazel @@ -9,5 +9,7 @@ go_library( deps = [ "//vendor/google.golang.org/api/gensupport:go_default_library", "//vendor/google.golang.org/api/googleapi:go_default_library", + "//vendor/google.golang.org/api/option:go_default_library", + "//vendor/google.golang.org/api/transport/http:go_default_library", ], ) diff --git a/vendor/google.golang.org/api/container/v1/container-api.json b/vendor/google.golang.org/api/container/v1/container-api.json index e8718f8f34811..7cb9634b7835f 100644 --- a/vendor/google.golang.org/api/container/v1/container-api.json +++ b/vendor/google.golang.org/api/container/v1/container-api.json @@ -106,10 +106,59 @@ "resources": { "projects": { "resources": { + "aggregated": { + "resources": { + "usableSubnetworks": { + "methods": { + "list": { + "description": "Lists subnetworks that are usable for creating clusters in a project.", + "flatPath": "v1/projects/{projectsId}/aggregated/usableSubnetworks", + "httpMethod": "GET", + "id": "container.projects.aggregated.usableSubnetworks.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "filter": { + "description": "Filtering currently only supports equality on the networkProjectId and must\nbe in the form: \"networkProjectId=[PROJECTID]\", where `networkProjectId`\nis the project which owns the listed subnetworks. This defaults to the\nparent project ID.", + "location": "query", + "type": "string" + }, + "pageSize": { + "description": "The max number of results per page that should be returned. If the number\nof available results is larger than `page_size`, a `next_page_token` is\nreturned which can be used to get the next page of results in subsequent\nrequests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Specifies a page token to use. Set this to the nextPageToken returned by\nprevious list requests to get the next page of results.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "The parent project where subnetworks are usable.\nSpecified in the format 'projects/*'.", + "location": "path", + "pattern": "^projects/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+parent}/aggregated/usableSubnetworks", + "response": { + "$ref": "ListUsableSubnetworksResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + } + } + }, "locations": { "methods": { "getServerConfig": { - "description": "Returns configuration info about the Kubernetes Engine service.", + "description": "Returns configuration info about the Google Kubernetes Engine service.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/serverConfig", "httpMethod": "GET", "id": "container.projects.locations.getServerConfig", @@ -118,7 +167,7 @@ ], "parameters": { "name": { - "description": "The name (project and location) of the server config to get\nSpecified in the format 'projects/*/locations/*'.", + "description": "The name (project and location) of the server config to get,\nspecified in the format 'projects/*/locations/*'.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+$", "required": true, @@ -176,7 +225,7 @@ ] }, "create": { - "description": "Creates a cluster, consisting of the specified number and type of Google\nCompute Engine instances.\n\nBy default, the cluster is created in the project's\n[default network](/compute/docs/networks-and-firewalls#networks).\n\nOne firewall is added for the cluster. After cluster creation,\nthe cluster creates routes for each node to allow the containers\non that node to communicate with all other instances in the\ncluster.\n\nFinally, an entry is added to the project's global metadata indicating\nwhich CIDR range is being used by the cluster.", + "description": "Creates a cluster, consisting of the specified number and type of Google\nCompute Engine instances.\n\nBy default, the cluster is created in the project's\n[default network](/compute/docs/networks-and-firewalls#networks).\n\nOne firewall is added for the cluster. After cluster creation,\nthe Kubelet creates routes for each node to allow the containers\non that node to communicate with all other instances in the\ncluster.\n\nFinally, an entry is added to the project's global metadata indicating\nwhich CIDR range the cluster is using.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters", "httpMethod": "POST", "id": "container.projects.locations.clusters.create", @@ -204,7 +253,7 @@ ] }, "delete": { - "description": "Deletes the cluster, including the Kubernetes endpoint and all worker\nnodes.\n\nFirewalls and routes that were configured during cluster creation\nare also deleted.\n\nOther Google Compute Engine resources that might be in use by the cluster\n(e.g. load balancer resources) will not be deleted if they weren't present\nat the initial create time.", + "description": "Deletes the cluster, including the Kubernetes endpoint and all worker\nnodes.\n\nFirewalls and routes that were configured during cluster creation\nare also deleted.\n\nOther Google Compute Engine resources that might be in use by the cluster,\nsuch as load balancer resources, are not deleted if they weren't present\nwhen the cluster was initially created.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}", "httpMethod": "DELETE", "id": "container.projects.locations.clusters.delete", @@ -283,6 +332,28 @@ "https://www.googleapis.com/auth/cloud-platform" ] }, + "getJwks": { + "description": "Gets the public component of the cluster signing keys in\nJSON Web Key format.\nThis API is not yet intended for general use, and is not available for all\nclusters.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/jwks", + "httpMethod": "GET", + "id": "container.projects.locations.clusters.getJwks", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "The cluster (project, location, cluster id) to get keys for. Specified in\nthe format 'projects/*/locations/*/clusters/*'.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+parent}/jwks", + "response": { + "$ref": "GetJSONWebKeysResponse" + } + }, "list": { "description": "Lists all clusters owned by a project in either the specified zone or all\nzones.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters", @@ -459,7 +530,7 @@ ] }, "setMasterAuth": { - "description": "Used to set master auth materials. Currently supports :-\nChanging the admin password for a specific cluster.\nThis can be either via password generation or explicitly set the password.", + "description": "Sets master auth materials. Currently supports changing the admin password\nor a specific cluster, either via password generation or explicitly setting\nthe password.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:setMasterAuth", "httpMethod": "POST", "id": "container.projects.locations.clusters.setMasterAuth", @@ -515,7 +586,7 @@ ] }, "setNetworkPolicy": { - "description": "Enables/Disables Network Policy for a cluster.", + "description": "Enables or disables Network Policy for a cluster.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:setNetworkPolicy", "httpMethod": "POST", "id": "container.projects.locations.clusters.setNetworkPolicy", @@ -571,7 +642,7 @@ ] }, "startIpRotation": { - "description": "Start master IP rotation.", + "description": "Starts master IP rotation.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:startIpRotation", "httpMethod": "POST", "id": "container.projects.locations.clusters.startIpRotation", @@ -732,7 +803,7 @@ ] }, "get": { - "description": "Retrieves the node pool requested.", + "description": "Retrieves the requested node pool.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/nodePools/{nodePoolsId}", "httpMethod": "GET", "id": "container.projects.locations.clusters.nodePools.get", @@ -817,7 +888,7 @@ ] }, "rollback": { - "description": "Roll back the previously Aborted or Failed NodePool upgrade.\nThis will be an no-op if the last upgrade successfully completed.", + "description": "Rolls back a previously Aborted or Failed NodePool upgrade.\nThis makes no changes if the last upgrade successfully completed.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/nodePools/{nodePoolsId}:rollback", "httpMethod": "POST", "id": "container.projects.locations.clusters.nodePools.rollback", @@ -845,7 +916,7 @@ ] }, "setAutoscaling": { - "description": "Sets the autoscaling settings for a specific node pool.", + "description": "Sets the autoscaling settings for the specified node pool.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/nodePools/{nodePoolsId}:setAutoscaling", "httpMethod": "POST", "id": "container.projects.locations.clusters.nodePools.setAutoscaling", @@ -929,7 +1000,7 @@ ] }, "update": { - "description": "Updates the version and/or image type for a specific node pool.", + "description": "Updates the version and/or image type for the specified node pool.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/nodePools/{nodePoolsId}", "httpMethod": "PUT", "id": "container.projects.locations.clusters.nodePools.update", @@ -957,6 +1028,32 @@ ] } } + }, + "well-known": { + "methods": { + "getOpenid-configuration": { + "description": "Gets the OIDC discovery document for the cluster.\nSee the\n[OpenID Connect Discovery 1.0\nspecification](https://openid.net/specs/openid-connect-discovery-1_0.html)\nfor details.\nThis API is not yet intended for general use, and is not available for all\nclusters.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/.well-known/openid-configuration", + "httpMethod": "GET", + "id": "container.projects.locations.clusters.well-known.getOpenid-configuration", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "The cluster (project, location, cluster id) to get the discovery document\nfor. Specified in the format 'projects/*/locations/*/clusters/*'.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+parent}/.well-known/openid-configuration", + "response": { + "$ref": "GetOpenIDConfigResponse" + } + } + } } } }, @@ -1072,7 +1169,7 @@ "zones": { "methods": { "getServerconfig": { - "description": "Returns configuration info about the Kubernetes Engine service.", + "description": "Returns configuration info about the Google Kubernetes Engine service.", "flatPath": "v1/projects/{projectId}/zones/{zone}/serverconfig", "httpMethod": "GET", "id": "container.projects.zones.getServerconfig", @@ -1082,7 +1179,7 @@ ], "parameters": { "name": { - "description": "The name (project and location) of the server config to get\nSpecified in the format 'projects/*/locations/*'.", + "description": "The name (project and location) of the server config to get,\nspecified in the format 'projects/*/locations/*'.", "location": "query", "type": "string" }, @@ -1194,7 +1291,7 @@ ] }, "create": { - "description": "Creates a cluster, consisting of the specified number and type of Google\nCompute Engine instances.\n\nBy default, the cluster is created in the project's\n[default network](/compute/docs/networks-and-firewalls#networks).\n\nOne firewall is added for the cluster. After cluster creation,\nthe cluster creates routes for each node to allow the containers\non that node to communicate with all other instances in the\ncluster.\n\nFinally, an entry is added to the project's global metadata indicating\nwhich CIDR range is being used by the cluster.", + "description": "Creates a cluster, consisting of the specified number and type of Google\nCompute Engine instances.\n\nBy default, the cluster is created in the project's\n[default network](/compute/docs/networks-and-firewalls#networks).\n\nOne firewall is added for the cluster. After cluster creation,\nthe Kubelet creates routes for each node to allow the containers\non that node to communicate with all other instances in the\ncluster.\n\nFinally, an entry is added to the project's global metadata indicating\nwhich CIDR range the cluster is using.", "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters", "httpMethod": "POST", "id": "container.projects.zones.clusters.create", @@ -1228,7 +1325,7 @@ ] }, "delete": { - "description": "Deletes the cluster, including the Kubernetes endpoint and all worker\nnodes.\n\nFirewalls and routes that were configured during cluster creation\nare also deleted.\n\nOther Google Compute Engine resources that might be in use by the cluster\n(e.g. load balancer resources) will not be deleted if they weren't present\nat the initial create time.", + "description": "Deletes the cluster, including the Kubernetes endpoint and all worker\nnodes.\n\nFirewalls and routes that were configured during cluster creation\nare also deleted.\n\nOther Google Compute Engine resources that might be in use by the cluster,\nsuch as load balancer resources, are not deleted if they weren't present\nwhen the cluster was initially created.", "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}", "httpMethod": "DELETE", "id": "container.projects.zones.clusters.delete", @@ -1637,7 +1734,7 @@ ] }, "setMasterAuth": { - "description": "Used to set master auth materials. Currently supports :-\nChanging the admin password for a specific cluster.\nThis can be either via password generation or explicitly set the password.", + "description": "Sets master auth materials. Currently supports changing the admin password\nor a specific cluster, either via password generation or explicitly setting\nthe password.", "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:setMasterAuth", "httpMethod": "POST", "id": "container.projects.zones.clusters.setMasterAuth", @@ -1678,7 +1775,7 @@ ] }, "setNetworkPolicy": { - "description": "Enables/Disables Network Policy for a cluster.", + "description": "Enables or disables Network Policy for a cluster.", "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:setNetworkPolicy", "httpMethod": "POST", "id": "container.projects.zones.clusters.setNetworkPolicy", @@ -1719,7 +1816,7 @@ ] }, "startIpRotation": { - "description": "Start master IP rotation.", + "description": "Starts master IP rotation.", "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:startIpRotation", "httpMethod": "POST", "id": "container.projects.zones.clusters.startIpRotation", @@ -1805,7 +1902,7 @@ "nodePools": { "methods": { "autoscaling": { - "description": "Sets the autoscaling settings for a specific node pool.", + "description": "Sets the autoscaling settings for the specified node pool.", "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}/autoscaling", "httpMethod": "POST", "id": "container.projects.zones.clusters.nodePools.autoscaling", @@ -1944,7 +2041,7 @@ ] }, "get": { - "description": "Retrieves the node pool requested.", + "description": "Retrieves the requested node pool.", "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}", "httpMethod": "GET", "id": "container.projects.zones.clusters.nodePools.get", @@ -2037,7 +2134,7 @@ ] }, "rollback": { - "description": "Roll back the previously Aborted or Failed NodePool upgrade.\nThis will be an no-op if the last upgrade successfully completed.", + "description": "Rolls back a previously Aborted or Failed NodePool upgrade.\nThis makes no changes if the last upgrade successfully completed.", "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}:rollback", "httpMethod": "POST", "id": "container.projects.zones.clusters.nodePools.rollback", @@ -2181,7 +2278,7 @@ ] }, "update": { - "description": "Updates the version and/or image type for a specific node pool.", + "description": "Updates the version and/or image type for the specified node pool.", "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}/update", "httpMethod": "POST", "id": "container.projects.zones.clusters.nodePools.update", @@ -2361,7 +2458,7 @@ } } }, - "revision": "20181109", + "revision": "20190514", "rootUrl": "https://container.googleapis.com/", "schemas": { "AcceleratorConfig": { @@ -2500,9 +2597,13 @@ "type": "integer" }, "currentNodeVersion": { - "description": "[Output only] Deprecated, use\n[NodePool.version](/kubernetes-engine/docs/reference/rest/v1/projects.zones.clusters.nodePool)\ninstead. The current version of the node software components. If they are\ncurrently at multiple versions because they're in the process of being\nupgraded, this reflects the minimum version of all nodes.", + "description": "[Output only] Deprecated, use\n[NodePools.version](/kubernetes-engine/docs/reference/rest/v1/projects.zones.clusters.nodePools)\ninstead. The current version of the node software components. If they are\ncurrently at multiple versions because they're in the process of being\nupgraded, this reflects the minimum version of all nodes.", "type": "string" }, + "defaultMaxPodsConstraint": { + "$ref": "MaxPodsConstraint", + "description": "The default constraint on the maximum number of pods that can be run\nsimultaneously on a node in the node pool of this cluster. Only honored\nif cluster created with IP Alias support." + }, "description": { "description": "An optional description of this cluster.", "type": "string" @@ -2511,6 +2612,10 @@ "description": "Kubernetes alpha features are enabled on this cluster. This includes alpha\nAPI groups (e.g. v1alpha1) and features that may not be production ready in\nthe kubernetes version of the master and nodes.\nThe cluster has no SLA for uptime and master/node upgrades are disabled.\nAlpha enabled clusters are automatically deleted thirty days after\ncreation.", "type": "boolean" }, + "enableTpu": { + "description": "Enable the ability to use Cloud TPUs in this cluster.", + "type": "boolean" + }, "endpoint": { "description": "[Output only] The IP address of this cluster's master endpoint.\nThe endpoint can be accessed from the internet at\n`https://username:password@endpoint/`.\n\nSee the `masterAuth` property of this resource for username and\npassword information.", "type": "string" @@ -2524,7 +2629,7 @@ "type": "string" }, "initialNodeCount": { - "description": "The number of nodes to create in this cluster. You must ensure that your\nCompute Engine \u003ca href=\"/compute/docs/resource-quotas\"\u003eresource quota\u003c/a\u003e\nis sufficient for this number of instances. You must also have available\nfirewall and routes quota.\nFor requests, this field should only be used in lieu of a\n\"node_pool\" object, since this configuration (along with the\n\"node_config\") will be used to create a \"NodePool\" object with an\nauto-generated name. Do not use this and a node_pool at the same time.", + "description": "The number of nodes to create in this cluster. You must ensure that your\nCompute Engine \u003ca href=\"/compute/docs/resource-quotas\"\u003eresource quota\u003c/a\u003e\nis sufficient for this number of instances. You must also have available\nfirewall and routes quota.\nFor requests, this field should only be used in lieu of a\n\"node_pool\" object, since this configuration (along with the\n\"node_config\") will be used to create a \"NodePool\" object with an\nauto-generated name. Do not use this and a node_pool at the same time.\n\nThis field is deprecated, use node_pool.initial_node_count instead.", "format": "int32", "type": "integer" }, @@ -2596,10 +2701,10 @@ }, "nodeConfig": { "$ref": "NodeConfig", - "description": "Parameters used in creating the cluster's nodes.\nSee `nodeConfig` for the description of its properties.\nFor requests, this field should only be used in lieu of a\n\"node_pool\" object, since this configuration (along with the\n\"initial_node_count\") will be used to create a \"NodePool\" object with an\nauto-generated name. Do not use this and a node_pool at the same time.\nFor responses, this field will be populated with the node configuration of\nthe first node pool.\n\nIf unspecified, the defaults are used." + "description": "Parameters used in creating the cluster's nodes.\nFor requests, this field should only be used in lieu of a\n\"node_pool\" object, since this configuration (along with the\n\"initial_node_count\") will be used to create a \"NodePool\" object with an\nauto-generated name. Do not use this and a node_pool at the same time.\nFor responses, this field will be populated with the node configuration of\nthe first node pool. (For configuration of each node pool, see\n`node_pool.config`)\n\nIf unspecified, the defaults are used.\nThis field is deprecated, use node_pool.config instead." }, "nodeIpv4CidrSize": { - "description": "[Output only] The size of the address space on each node for hosting\ncontainers. This is provisioned from within the `container_ipv4_cidr`\nrange.", + "description": "[Output only] The size of the address space on each node for hosting\ncontainers. This is provisioned from within the `container_ipv4_cidr`\nrange. This field will only be set when cluster is in route-based network\nmode.", "format": "int32", "type": "integer" }, @@ -2659,6 +2764,10 @@ "description": "The name of the Google Compute Engine\n[subnetwork](/compute/docs/subnetworks) to which the\ncluster is connected.", "type": "string" }, + "tpuIpv4CidrBlock": { + "description": "[Output only] The IP address range of the Cloud TPUs in this cluster, in\n[CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)\nnotation (e.g. `1.2.3.4/29`).", + "type": "string" + }, "zone": { "description": "[Output only] The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field is deprecated, use location instead.", "type": "string" @@ -2794,7 +2903,7 @@ "type": "string" }, "startTime": { - "description": "Time within the maintenance window to start the maintenance operations.\nTime format should be in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt)\nformat \"HH:MM”, where HH : [00-23] and MM : [00-59] GMT.", + "description": "Time within the maintenance window to start the maintenance operations.\nTime format should be in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt)\nformat \"HH:MM\", where HH : [00-23] and MM : [00-59] GMT.", "type": "string" } }, @@ -2806,6 +2915,70 @@ "properties": {}, "type": "object" }, + "GetJSONWebKeysResponse": { + "description": "GetJSONWebKeysResponse is a valid JSON Web Key Set as specififed in rfc 7517", + "id": "GetJSONWebKeysResponse", + "properties": { + "keys": { + "description": "The public component of the keys used by the cluster to sign token\nrequests.", + "items": { + "$ref": "Jwk" + }, + "type": "array" + } + }, + "type": "object" + }, + "GetOpenIDConfigResponse": { + "description": "GetOpenIDConfigResponse is an OIDC discovery document for the cluster.\nSee the OpenID Connect Discovery 1.0 specification for details.", + "id": "GetOpenIDConfigResponse", + "properties": { + "claims_supported": { + "description": "Supported claims.", + "items": { + "type": "string" + }, + "type": "array" + }, + "grant_types": { + "description": "Supported grant types.", + "items": { + "type": "string" + }, + "type": "array" + }, + "id_token_signing_alg_values_supported": { + "description": "supported ID Token signing Algorithms.", + "items": { + "type": "string" + }, + "type": "array" + }, + "issuer": { + "description": "OIDC Issuer.", + "type": "string" + }, + "jwks_uri": { + "description": "JSON Web Key uri.", + "type": "string" + }, + "response_types_supported": { + "description": "Supported response types.", + "items": { + "type": "string" + }, + "type": "array" + }, + "subject_types_supported": { + "description": "Supported subject types.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, "HorizontalPodAutoscaling": { "description": "Configuration options for the horizontal pod autoscaling feature, which\nincreases or decreases the number of replica pods a replication controller\nhas based on the resource usage of the existing pods.", "id": "HorizontalPodAutoscaling", @@ -2872,6 +3045,10 @@ "description": "A custom subnetwork name to be used if `create_subnetwork` is true. If\nthis field is empty, then an automatic name will be chosen for the new\nsubnetwork.", "type": "string" }, + "tpuIpv4CidrBlock": { + "description": "The IP address range of the Cloud TPUs in this cluster. If unspecified, a\nrange will be automatically chosen with the default size.\n\nThis field is only applicable when `use_ip_aliases` is true.\n\nIf unspecified, the range will use the default size.\n\nSet to /netmask (e.g. `/14`) to have a range chosen with a specific\nnetmask.\n\nSet to a\n[CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)\nnotation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g.\n`10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range\nto use.", + "type": "string" + }, "useIpAliases": { "description": "Whether alias IPs will be used for pod IPs in the cluster.", "type": "boolean" @@ -2879,6 +3056,49 @@ }, "type": "object" }, + "Jwk": { + "description": "Jwk is a JSON Web Key as specified in RFC 7517", + "id": "Jwk", + "properties": { + "alg": { + "description": "Algorithm.", + "type": "string" + }, + "crv": { + "description": "Used for ECDSA keys.", + "type": "string" + }, + "e": { + "description": "Used for RSA keys.", + "type": "string" + }, + "kid": { + "description": "Key ID.", + "type": "string" + }, + "kty": { + "description": "Key Type.", + "type": "string" + }, + "n": { + "description": "Used for RSA keys.", + "type": "string" + }, + "use": { + "description": "Permitted uses for the public keys.", + "type": "string" + }, + "x": { + "description": "Used for ECDSA keys.", + "type": "string" + }, + "y": { + "description": "Used for ECDSA keys.", + "type": "string" + } + }, + "type": "object" + }, "KubernetesDashboard": { "description": "Configuration for the Kubernetes Dashboard.", "id": "KubernetesDashboard", @@ -2957,6 +3177,24 @@ }, "type": "object" }, + "ListUsableSubnetworksResponse": { + "description": "ListUsableSubnetworksResponse is the response of\nListUsableSubnetworksRequest.", + "id": "ListUsableSubnetworksResponse", + "properties": { + "nextPageToken": { + "description": "This token allows you to get the next page of results for list requests.\nIf the number of results is larger than `page_size`, use the\n`next_page_token` as a value for the query parameter `page_token` in the\nnext request. The value will become empty when there are no more pages.", + "type": "string" + }, + "subnetworks": { + "description": "A list of usable subnetworks in the specified network project.", + "items": { + "$ref": "UsableSubnetwork" + }, + "type": "array" + } + }, + "type": "object" + }, "MaintenancePolicy": { "description": "MaintenancePolicy defines the maintenance policy to be used for the cluster.", "id": "MaintenancePolicy", @@ -3015,7 +3253,7 @@ "id": "MasterAuthorizedNetworksConfig", "properties": { "cidrBlocks": { - "description": "cidr_blocks define up to 10 external networks that could access\nKubernetes master through HTTPS.", + "description": "cidr_blocks define up to 50 external networks that could access\nKubernetes master through HTTPS.", "items": { "$ref": "CidrBlock" }, @@ -3028,6 +3266,18 @@ }, "type": "object" }, + "MaxPodsConstraint": { + "description": "Constraints applied to pods.", + "id": "MaxPodsConstraint", + "properties": { + "maxPodsPerNode": { + "description": "Constraint enforced on the max num of pods per node.", + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, "NetworkConfig": { "description": "NetworkConfig reports the relative names of network \u0026 subnetwork.", "id": "NetworkConfig", @@ -3121,7 +3371,7 @@ "additionalProperties": { "type": "string" }, - "description": "The metadata key/value pairs assigned to instances in the cluster.\n\nKeys must conform to the regexp [a-zA-Z0-9-_]+ and be less than 128 bytes\nin length. These are reflected as part of a URL in the metadata server.\nAdditionally, to avoid ambiguity, keys must not conflict with any other\nmetadata keys for the project or be one of the reserved keys:\n \"cluster-location\"\n \"cluster-name\"\n \"cluster-uid\"\n \"configure-sh\"\n \"enable-os-login\"\n \"gci-update-strategy\"\n \"gci-ensure-gke-docker\"\n \"instance-template\"\n \"kube-env\"\n \"startup-script\"\n \"user-data\"\n\nValues are free-form strings, and only have meaning as interpreted by\nthe image running in the instance. The only restriction placed on them is\nthat each value's size must be less than or equal to 32 KB.\n\nThe total size of all keys and values must be less than 512 KB.", + "description": "The metadata key/value pairs assigned to instances in the cluster.\n\nKeys must conform to the regexp [a-zA-Z0-9-_]+ and be less than 128 bytes\nin length. These are reflected as part of a URL in the metadata server.\nAdditionally, to avoid ambiguity, keys must not conflict with any other\nmetadata keys for the project or be one of the reserved keys:\n \"cluster-location\"\n \"cluster-name\"\n \"cluster-uid\"\n \"configure-sh\"\n \"containerd-configure-sh\"\n \"enable-os-login\"\n \"gci-update-strategy\"\n \"gci-ensure-gke-docker\"\n \"instance-template\"\n \"kube-env\"\n \"startup-script\"\n \"user-data\"\n\nValues are free-form strings, and only have meaning as interpreted by\nthe image running in the instance. The only restriction placed on them is\nthat each value's size must be less than or equal to 32 KB.\n\nThe total size of all keys and values must be less than 512 KB.", "type": "object" }, "minCpuPlatform": { @@ -3214,10 +3464,19 @@ "$ref": "NodeManagement", "description": "NodeManagement configuration for this NodePool." }, + "maxPodsConstraint": { + "$ref": "MaxPodsConstraint", + "description": "The constraint on the maximum number of pods that can be run\nsimultaneously on a node in the node pool." + }, "name": { "description": "The name of the node pool.", "type": "string" }, + "podIpv4CidrSize": { + "description": "[Output only] The pod CIDR block size per node in this node pool.", + "format": "int32", + "type": "integer" + }, "selfLink": { "description": "[Output only] Server-defined URL for the resource.", "type": "string" @@ -4021,6 +4280,69 @@ } }, "type": "object" + }, + "UsableSubnetwork": { + "description": "UsableSubnetwork resource returns the subnetwork name, its associated network\nand the primary CIDR range.", + "id": "UsableSubnetwork", + "properties": { + "ipCidrRange": { + "description": "The range of internal addresses that are owned by this subnetwork.", + "type": "string" + }, + "network": { + "description": "Network Name.\nExample: projects/my-project/global/networks/my-network", + "type": "string" + }, + "secondaryIpRanges": { + "description": "Secondary IP ranges.", + "items": { + "$ref": "UsableSubnetworkSecondaryRange" + }, + "type": "array" + }, + "statusMessage": { + "description": "A human readable status message representing the reasons for cases where\nthe caller cannot use the secondary ranges under the subnet. For example if\nthe secondary_ip_ranges is empty due to a permission issue, an insufficient\npermission message will be given by status_message.", + "type": "string" + }, + "subnetwork": { + "description": "Subnetwork Name.\nExample: projects/my-project/regions/us-central1/subnetworks/my-subnet", + "type": "string" + } + }, + "type": "object" + }, + "UsableSubnetworkSecondaryRange": { + "description": "Secondary IP range of a usable subnetwork.", + "id": "UsableSubnetworkSecondaryRange", + "properties": { + "ipCidrRange": { + "description": "The range of IP addresses belonging to this subnetwork secondary range.", + "type": "string" + }, + "rangeName": { + "description": "The name associated with this subnetwork secondary range, used when adding\nan alias IP range to a VM instance.", + "type": "string" + }, + "status": { + "description": "This field is to determine the status of the secondary range programmably.", + "enum": [ + "UNKNOWN", + "UNUSED", + "IN_USE_SERVICE", + "IN_USE_SHAREABLE_POD", + "IN_USE_MANAGED_POD" + ], + "enumDescriptions": [ + "UNKNOWN is the zero value of the Status enum. It's not a valid status.", + "UNUSED denotes that this range is unclaimed by any cluster.", + "IN_USE_SERVICE denotes that this range is claimed by a cluster for\nservices. It cannot be used for other clusters.", + "IN_USE_SHAREABLE_POD denotes this range was created by the network admin\nand is currently claimed by a cluster for pods. It can only be used by\nother clusters as a pod range.", + "IN_USE_MANAGED_POD denotes this range was created by GKE and is claimed\nfor pods. It cannot be used for other clusters." + ], + "type": "string" + } + }, + "type": "object" } }, "servicePath": "", diff --git a/vendor/google.golang.org/api/container/v1/container-gen.go b/vendor/google.golang.org/api/container/v1/container-gen.go index 33719b12642c0..c6d02b0fd8cf3 100644 --- a/vendor/google.golang.org/api/container/v1/container-gen.go +++ b/vendor/google.golang.org/api/container/v1/container-gen.go @@ -1,4 +1,4 @@ -// Copyright 2018 Google Inc. All rights reserved. +// Copyright 2019 Google LLC. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -6,13 +6,35 @@ // Package container provides access to the Kubernetes Engine API. // -// See https://cloud.google.com/container-engine/ +// For product documentation, see: https://cloud.google.com/container-engine/ +// +// Creating a client // // Usage example: // // import "google.golang.org/api/container/v1" // ... -// containerService, err := container.New(oauthHttpClient) +// ctx := context.Background() +// containerService, err := container.NewService(ctx) +// +// In this example, Google Application Default Credentials are used for authentication. +// +// For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. +// +// Other authentication options +// +// To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey: +// +// containerService, err := container.NewService(ctx, option.WithAPIKey("AIza...")) +// +// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource: +// +// config := &oauth2.Config{...} +// // ... +// token, err := config.Exchange(ctx, ...) +// containerService, err := container.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) +// +// See https://godoc.org/google.golang.org/api/option/ for details on options. package container // import "google.golang.org/api/container/v1" import ( @@ -29,6 +51,8 @@ import ( gensupport "google.golang.org/api/gensupport" googleapi "google.golang.org/api/googleapi" + option "google.golang.org/api/option" + htransport "google.golang.org/api/transport/http" ) // Always reference these packages, just in case the auto-generated code @@ -56,6 +80,32 @@ const ( CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" ) +// NewService creates a new Service. +func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, error) { + scopesOption := option.WithScopes( + "https://www.googleapis.com/auth/cloud-platform", + ) + // NOTE: prepend, so we don't override user-specified scopes. + opts = append([]option.ClientOption{scopesOption}, opts...) + client, endpoint, err := htransport.NewClient(ctx, opts...) + if err != nil { + return nil, err + } + s, err := New(client) + if err != nil { + return nil, err + } + if endpoint != "" { + s.BasePath = endpoint + } + return s, nil +} + +// New creates a new Service. It uses the provided http.Client for requests. +// +// Deprecated: please use NewService instead. +// To provide a custom HTTP client, use option.WithHTTPClient. +// If you are using google.golang.org/api/googleapis/transport.APIKey, use option.WithAPIKey with NewService instead. func New(client *http.Client) (*Service, error) { if client == nil { return nil, errors.New("client is nil") @@ -82,6 +132,7 @@ func (s *Service) userAgent() string { func NewProjectsService(s *Service) *ProjectsService { rs := &ProjectsService{s: s} + rs.Aggregated = NewProjectsAggregatedService(s) rs.Locations = NewProjectsLocationsService(s) rs.Zones = NewProjectsZonesService(s) return rs @@ -90,11 +141,34 @@ func NewProjectsService(s *Service) *ProjectsService { type ProjectsService struct { s *Service + Aggregated *ProjectsAggregatedService + Locations *ProjectsLocationsService Zones *ProjectsZonesService } +func NewProjectsAggregatedService(s *Service) *ProjectsAggregatedService { + rs := &ProjectsAggregatedService{s: s} + rs.UsableSubnetworks = NewProjectsAggregatedUsableSubnetworksService(s) + return rs +} + +type ProjectsAggregatedService struct { + s *Service + + UsableSubnetworks *ProjectsAggregatedUsableSubnetworksService +} + +func NewProjectsAggregatedUsableSubnetworksService(s *Service) *ProjectsAggregatedUsableSubnetworksService { + rs := &ProjectsAggregatedUsableSubnetworksService{s: s} + return rs +} + +type ProjectsAggregatedUsableSubnetworksService struct { + s *Service +} + func NewProjectsLocationsService(s *Service) *ProjectsLocationsService { rs := &ProjectsLocationsService{s: s} rs.Clusters = NewProjectsLocationsClustersService(s) @@ -113,6 +187,7 @@ type ProjectsLocationsService struct { func NewProjectsLocationsClustersService(s *Service) *ProjectsLocationsClustersService { rs := &ProjectsLocationsClustersService{s: s} rs.NodePools = NewProjectsLocationsClustersNodePoolsService(s) + rs.WellKnown = NewProjectsLocationsClustersWellKnownService(s) return rs } @@ -120,6 +195,8 @@ type ProjectsLocationsClustersService struct { s *Service NodePools *ProjectsLocationsClustersNodePoolsService + + WellKnown *ProjectsLocationsClustersWellKnownService } func NewProjectsLocationsClustersNodePoolsService(s *Service) *ProjectsLocationsClustersNodePoolsService { @@ -131,6 +208,15 @@ type ProjectsLocationsClustersNodePoolsService struct { s *Service } +func NewProjectsLocationsClustersWellKnownService(s *Service) *ProjectsLocationsClustersWellKnownService { + rs := &ProjectsLocationsClustersWellKnownService{s: s} + return rs +} + +type ProjectsLocationsClustersWellKnownService struct { + s *Service +} + func NewProjectsLocationsOperationsService(s *Service) *ProjectsLocationsOperationsService { rs := &ProjectsLocationsOperationsService{s: s} return rs @@ -461,8 +547,8 @@ type Cluster struct { // CurrentNodeVersion: [Output only] Deprecated, // use - // [NodePool.version](/kubernetes-engine/docs/reference/rest/v1/proje - // cts.zones.clusters.nodePool) + // [NodePools.version](/kubernetes-engine/docs/reference/rest/v1/proj + // ects.zones.clusters.nodePools) // instead. The current version of the node software components. If they // are // currently at multiple versions because they're in the process of @@ -470,6 +556,13 @@ type Cluster struct { // upgraded, this reflects the minimum version of all nodes. CurrentNodeVersion string `json:"currentNodeVersion,omitempty"` + // DefaultMaxPodsConstraint: The default constraint on the maximum + // number of pods that can be run + // simultaneously on a node in the node pool of this cluster. Only + // honored + // if cluster created with IP Alias support. + DefaultMaxPodsConstraint *MaxPodsConstraint `json:"defaultMaxPodsConstraint,omitempty"` + // Description: An optional description of this cluster. Description string `json:"description,omitempty"` @@ -485,6 +578,9 @@ type Cluster struct { // creation. EnableKubernetesAlpha bool `json:"enableKubernetesAlpha,omitempty"` + // EnableTpu: Enable the ability to use Cloud TPUs in this cluster. + EnableTpu bool `json:"enableTpu,omitempty"` + // Endpoint: [Output only] The IP address of this cluster's master // endpoint. // The endpoint can be accessed from the internet @@ -536,6 +632,8 @@ type Cluster struct { // an // auto-generated name. Do not use this and a node_pool at the same // time. + // + // This field is deprecated, use node_pool.initial_node_count instead. InitialNodeCount int64 `json:"initialNodeCount,omitempty"` // InstanceGroupUrls: Deprecated. Use node_pools.instance_group_urls. @@ -629,7 +727,6 @@ type Cluster struct { NetworkPolicy *NetworkPolicy `json:"networkPolicy,omitempty"` // NodeConfig: Parameters used in creating the cluster's nodes. - // See `nodeConfig` for the description of its properties. // For requests, this field should only be used in lieu of a // "node_pool" object, since this configuration (along with // the @@ -639,16 +736,21 @@ type Cluster struct { // time. // For responses, this field will be populated with the node // configuration of - // the first node pool. + // the first node pool. (For configuration of each node pool, + // see + // `node_pool.config`) // // If unspecified, the defaults are used. + // This field is deprecated, use node_pool.config instead. NodeConfig *NodeConfig `json:"nodeConfig,omitempty"` // NodeIpv4CidrSize: [Output only] The size of the address space on each // node for hosting // containers. This is provisioned from within the // `container_ipv4_cidr` - // range. + // range. This field will only be set when cluster is in route-based + // network + // mode. NodeIpv4CidrSize int64 `json:"nodeIpv4CidrSize,omitempty"` // NodePools: The node pools associated with this cluster. @@ -714,6 +816,14 @@ type Cluster struct { // cluster is connected. Subnetwork string `json:"subnetwork,omitempty"` + // TpuIpv4CidrBlock: [Output only] The IP address range of the Cloud + // TPUs in this cluster, + // in + // [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) + // + // notation (e.g. `1.2.3.4/29`). + TpuIpv4CidrBlock string `json:"tpuIpv4CidrBlock,omitempty"` + // Zone: [Output only] The name of the Google Compute // Engine // [zone](/compute/docs/zones#available) in which the @@ -1030,7 +1140,7 @@ type DailyMaintenanceWindow struct { // maintenance operations. // Time format should be in // [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) - // format "HH:MM”, where HH : [00-23] and MM : [00-59] GMT. + // format "HH:MM", where HH : [00-23] and MM : [00-59] GMT. StartTime string `json:"startTime,omitempty"` // ForceSendFields is a list of field names (e.g. "Duration") to @@ -1074,6 +1184,95 @@ type Empty struct { googleapi.ServerResponse `json:"-"` } +// GetJSONWebKeysResponse: GetJSONWebKeysResponse is a valid JSON Web +// Key Set as specififed in rfc 7517 +type GetJSONWebKeysResponse struct { + // Keys: The public component of the keys used by the cluster to sign + // token + // requests. + Keys []*Jwk `json:"keys,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Keys") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Keys") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GetJSONWebKeysResponse) MarshalJSON() ([]byte, error) { + type NoMethod GetJSONWebKeysResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GetOpenIDConfigResponse: GetOpenIDConfigResponse is an OIDC discovery +// document for the cluster. +// See the OpenID Connect Discovery 1.0 specification for details. +type GetOpenIDConfigResponse struct { + // ClaimsSupported: Supported claims. + ClaimsSupported []string `json:"claims_supported,omitempty"` + + // GrantTypes: Supported grant types. + GrantTypes []string `json:"grant_types,omitempty"` + + // IdTokenSigningAlgValuesSupported: supported ID Token signing + // Algorithms. + IdTokenSigningAlgValuesSupported []string `json:"id_token_signing_alg_values_supported,omitempty"` + + // Issuer: OIDC Issuer. + Issuer string `json:"issuer,omitempty"` + + // JwksUri: JSON Web Key uri. + JwksUri string `json:"jwks_uri,omitempty"` + + // ResponseTypesSupported: Supported response types. + ResponseTypesSupported []string `json:"response_types_supported,omitempty"` + + // SubjectTypesSupported: Supported subject types. + SubjectTypesSupported []string `json:"subject_types_supported,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "ClaimsSupported") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ClaimsSupported") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GetOpenIDConfigResponse) MarshalJSON() ([]byte, error) { + type NoMethod GetOpenIDConfigResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // HorizontalPodAutoscaling: Configuration options for the horizontal // pod autoscaling feature, which // increases or decreases the number of replica pods a replication @@ -1262,6 +1461,29 @@ type IPAllocationPolicy struct { // subnetwork. SubnetworkName string `json:"subnetworkName,omitempty"` + // TpuIpv4CidrBlock: The IP address range of the Cloud TPUs in this + // cluster. If unspecified, a + // range will be automatically chosen with the default size. + // + // This field is only applicable when `use_ip_aliases` is true. + // + // If unspecified, the range will use the default size. + // + // Set to /netmask (e.g. `/14`) to have a range chosen with a + // specific + // netmask. + // + // Set to + // a + // [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) + // + // notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks + // (e.g. + // `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific + // range + // to use. + TpuIpv4CidrBlock string `json:"tpuIpv4CidrBlock,omitempty"` + // UseIpAliases: Whether alias IPs will be used for pod IPs in the // cluster. UseIpAliases bool `json:"useIpAliases,omitempty"` @@ -1290,6 +1512,58 @@ func (s *IPAllocationPolicy) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// Jwk: Jwk is a JSON Web Key as specified in RFC 7517 +type Jwk struct { + // Alg: Algorithm. + Alg string `json:"alg,omitempty"` + + // Crv: Used for ECDSA keys. + Crv string `json:"crv,omitempty"` + + // E: Used for RSA keys. + E string `json:"e,omitempty"` + + // Kid: Key ID. + Kid string `json:"kid,omitempty"` + + // Kty: Key Type. + Kty string `json:"kty,omitempty"` + + // N: Used for RSA keys. + N string `json:"n,omitempty"` + + // Use: Permitted uses for the public keys. + Use string `json:"use,omitempty"` + + // X: Used for ECDSA keys. + X string `json:"x,omitempty"` + + // Y: Used for ECDSA keys. + Y string `json:"y,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Alg") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Alg") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Jwk) MarshalJSON() ([]byte, error) { + type NoMethod Jwk + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // KubernetesDashboard: Configuration for the Kubernetes Dashboard. type KubernetesDashboard struct { // Disabled: Whether the Kubernetes Dashboard is enabled for this @@ -1467,6 +1741,51 @@ func (s *ListOperationsResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ListUsableSubnetworksResponse: ListUsableSubnetworksResponse is the +// response of +// ListUsableSubnetworksRequest. +type ListUsableSubnetworksResponse struct { + // NextPageToken: This token allows you to get the next page of results + // for list requests. + // If the number of results is larger than `page_size`, use + // the + // `next_page_token` as a value for the query parameter `page_token` in + // the + // next request. The value will become empty when there are no more + // pages. + NextPageToken string `json:"nextPageToken,omitempty"` + + // Subnetworks: A list of usable subnetworks in the specified network + // project. + Subnetworks []*UsableSubnetwork `json:"subnetworks,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NextPageToken") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ListUsableSubnetworksResponse) MarshalJSON() ([]byte, error) { + type NoMethod ListUsableSubnetworksResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // MaintenancePolicy: MaintenancePolicy defines the maintenance policy // to be used for the cluster. type MaintenancePolicy struct { @@ -1605,7 +1924,7 @@ func (s *MasterAuth) MarshalJSON() ([]byte, error) { // blocks, // Google Compute Engine Public IPs and Google Prod IPs. type MasterAuthorizedNetworksConfig struct { - // CidrBlocks: cidr_blocks define up to 10 external networks that could + // CidrBlocks: cidr_blocks define up to 50 external networks that could // access // Kubernetes master through HTTPS. CidrBlocks []*CidrBlock `json:"cidrBlocks,omitempty"` @@ -1636,6 +1955,35 @@ func (s *MasterAuthorizedNetworksConfig) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// MaxPodsConstraint: Constraints applied to pods. +type MaxPodsConstraint struct { + // MaxPodsPerNode: Constraint enforced on the max num of pods per node. + MaxPodsPerNode int64 `json:"maxPodsPerNode,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. "MaxPodsPerNode") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "MaxPodsPerNode") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *MaxPodsConstraint) MarshalJSON() ([]byte, error) { + type NoMethod MaxPodsConstraint + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // NetworkConfig: NetworkConfig reports the relative names of network & // subnetwork. type NetworkConfig struct { @@ -1824,6 +2172,7 @@ type NodeConfig struct { // "cluster-name" // "cluster-uid" // "configure-sh" + // "containerd-configure-sh" // "enable-os-login" // "gci-update-strategy" // "gci-ensure-gke-docker" @@ -2020,9 +2369,18 @@ type NodePool struct { // Management: NodeManagement configuration for this NodePool. Management *NodeManagement `json:"management,omitempty"` + // MaxPodsConstraint: The constraint on the maximum number of pods that + // can be run + // simultaneously on a node in the node pool. + MaxPodsConstraint *MaxPodsConstraint `json:"maxPodsConstraint,omitempty"` + // Name: The name of the node pool. Name string `json:"name,omitempty"` + // PodIpv4CidrSize: [Output only] The pod CIDR block size per node in + // this node pool. + PodIpv4CidrSize int64 `json:"podIpv4CidrSize,omitempty"` + // SelfLink: [Output only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` @@ -3435,19 +3793,340 @@ type UpdateNodePoolRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ClusterId") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` + // NullFields is a list of field names (e.g. "ClusterId") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UpdateNodePoolRequest) MarshalJSON() ([]byte, error) { + type NoMethod UpdateNodePoolRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UsableSubnetwork: UsableSubnetwork resource returns the subnetwork +// name, its associated network +// and the primary CIDR range. +type UsableSubnetwork struct { + // IpCidrRange: The range of internal addresses that are owned by this + // subnetwork. + IpCidrRange string `json:"ipCidrRange,omitempty"` + + // Network: Network Name. + // Example: projects/my-project/global/networks/my-network + Network string `json:"network,omitempty"` + + // SecondaryIpRanges: Secondary IP ranges. + SecondaryIpRanges []*UsableSubnetworkSecondaryRange `json:"secondaryIpRanges,omitempty"` + + // StatusMessage: A human readable status message representing the + // reasons for cases where + // the caller cannot use the secondary ranges under the subnet. For + // example if + // the secondary_ip_ranges is empty due to a permission issue, an + // insufficient + // permission message will be given by status_message. + StatusMessage string `json:"statusMessage,omitempty"` + + // Subnetwork: Subnetwork Name. + // Example: + // projects/my-project/regions/us-central1/subnetworks/my-subnet + Subnetwork string `json:"subnetwork,omitempty"` + + // ForceSendFields is a list of field names (e.g. "IpCidrRange") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "IpCidrRange") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UsableSubnetwork) MarshalJSON() ([]byte, error) { + type NoMethod UsableSubnetwork + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UsableSubnetworkSecondaryRange: Secondary IP range of a usable +// subnetwork. +type UsableSubnetworkSecondaryRange struct { + // IpCidrRange: The range of IP addresses belonging to this subnetwork + // secondary range. + IpCidrRange string `json:"ipCidrRange,omitempty"` + + // RangeName: The name associated with this subnetwork secondary range, + // used when adding + // an alias IP range to a VM instance. + RangeName string `json:"rangeName,omitempty"` + + // Status: This field is to determine the status of the secondary range + // programmably. + // + // Possible values: + // "UNKNOWN" - UNKNOWN is the zero value of the Status enum. It's not + // a valid status. + // "UNUSED" - UNUSED denotes that this range is unclaimed by any + // cluster. + // "IN_USE_SERVICE" - IN_USE_SERVICE denotes that this range is + // claimed by a cluster for + // services. It cannot be used for other clusters. + // "IN_USE_SHAREABLE_POD" - IN_USE_SHAREABLE_POD denotes this range + // was created by the network admin + // and is currently claimed by a cluster for pods. It can only be used + // by + // other clusters as a pod range. + // "IN_USE_MANAGED_POD" - IN_USE_MANAGED_POD denotes this range was + // created by GKE and is claimed + // for pods. It cannot be used for other clusters. + Status string `json:"status,omitempty"` + + // ForceSendFields is a list of field names (e.g. "IpCidrRange") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "IpCidrRange") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UsableSubnetworkSecondaryRange) MarshalJSON() ([]byte, error) { + type NoMethod UsableSubnetworkSecondaryRange + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// method id "container.projects.aggregated.usableSubnetworks.list": + +type ProjectsAggregatedUsableSubnetworksListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists subnetworks that are usable for creating clusters in a +// project. +func (r *ProjectsAggregatedUsableSubnetworksService) List(parent string) *ProjectsAggregatedUsableSubnetworksListCall { + c := &ProjectsAggregatedUsableSubnetworksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// Filter sets the optional parameter "filter": Filtering currently only +// supports equality on the networkProjectId and must +// be in the form: "networkProjectId=[PROJECTID]", where +// `networkProjectId` +// is the project which owns the listed subnetworks. This defaults to +// the +// parent project ID. +func (c *ProjectsAggregatedUsableSubnetworksListCall) Filter(filter string) *ProjectsAggregatedUsableSubnetworksListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// PageSize sets the optional parameter "pageSize": The max number of +// results per page that should be returned. If the number +// of available results is larger than `page_size`, a `next_page_token` +// is +// returned which can be used to get the next page of results in +// subsequent +// requests. Acceptable values are 0 to 500, inclusive. (Default: 500) +func (c *ProjectsAggregatedUsableSubnetworksListCall) PageSize(pageSize int64) *ProjectsAggregatedUsableSubnetworksListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set this to the nextPageToken returned by +// previous list requests to get the next page of results. +func (c *ProjectsAggregatedUsableSubnetworksListCall) PageToken(pageToken string) *ProjectsAggregatedUsableSubnetworksListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsAggregatedUsableSubnetworksListCall) Fields(s ...googleapi.Field) *ProjectsAggregatedUsableSubnetworksListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsAggregatedUsableSubnetworksListCall) IfNoneMatch(entityTag string) *ProjectsAggregatedUsableSubnetworksListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsAggregatedUsableSubnetworksListCall) Context(ctx context.Context) *ProjectsAggregatedUsableSubnetworksListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsAggregatedUsableSubnetworksListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsAggregatedUsableSubnetworksListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/aggregated/usableSubnetworks") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "container.projects.aggregated.usableSubnetworks.list" call. +// Exactly one of *ListUsableSubnetworksResponse or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *ListUsableSubnetworksResponse.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsAggregatedUsableSubnetworksListCall) Do(opts ...googleapi.CallOption) (*ListUsableSubnetworksResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListUsableSubnetworksResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists subnetworks that are usable for creating clusters in a project.", + // "flatPath": "v1/projects/{projectsId}/aggregated/usableSubnetworks", + // "httpMethod": "GET", + // "id": "container.projects.aggregated.usableSubnetworks.list", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "filter": { + // "description": "Filtering currently only supports equality on the networkProjectId and must\nbe in the form: \"networkProjectId=[PROJECTID]\", where `networkProjectId`\nis the project which owns the listed subnetworks. This defaults to the\nparent project ID.", + // "location": "query", + // "type": "string" + // }, + // "pageSize": { + // "description": "The max number of results per page that should be returned. If the number\nof available results is larger than `page_size`, a `next_page_token` is\nreturned which can be used to get the next page of results in subsequent\nrequests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set this to the nextPageToken returned by\nprevious list requests to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "parent": { + // "description": "The parent project where subnetworks are usable.\nSpecified in the format 'projects/*'.", + // "location": "path", + // "pattern": "^projects/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+parent}/aggregated/usableSubnetworks", + // "response": { + // "$ref": "ListUsableSubnetworksResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + } -func (s *UpdateNodePoolRequest) MarshalJSON() ([]byte, error) { - type NoMethod UpdateNodePoolRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsAggregatedUsableSubnetworksListCall) Pages(ctx context.Context, f func(*ListUsableSubnetworksResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } } // method id "container.projects.locations.getServerConfig": @@ -3461,8 +4140,8 @@ type ProjectsLocationsGetServerConfigCall struct { header_ http.Header } -// GetServerConfig: Returns configuration info about the Kubernetes -// Engine service. +// GetServerConfig: Returns configuration info about the Google +// Kubernetes Engine service. func (r *ProjectsLocationsService) GetServerConfig(name string) *ProjectsLocationsGetServerConfigCall { c := &ProjectsLocationsGetServerConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -3587,7 +4266,7 @@ func (c *ProjectsLocationsGetServerConfigCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Returns configuration info about the Kubernetes Engine service.", + // "description": "Returns configuration info about the Google Kubernetes Engine service.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/serverConfig", // "httpMethod": "GET", // "id": "container.projects.locations.getServerConfig", @@ -3596,7 +4275,7 @@ func (c *ProjectsLocationsGetServerConfigCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "name": { - // "description": "The name (project and location) of the server config to get\nSpecified in the format 'projects/*/locations/*'.", + // "description": "The name (project and location) of the server config to get,\nspecified in the format 'projects/*/locations/*'.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+$", // "required": true, @@ -3783,14 +4462,14 @@ type ProjectsLocationsClustersCreateCall struct { // network](/compute/docs/networks-and-firewalls#networks). // // One firewall is added for the cluster. After cluster creation, -// the cluster creates routes for each node to allow the containers +// the Kubelet creates routes for each node to allow the containers // on that node to communicate with all other instances in // the // cluster. // // Finally, an entry is added to the project's global metadata // indicating -// which CIDR range is being used by the cluster. +// which CIDR range the cluster is using. func (r *ProjectsLocationsClustersService) Create(parent string, createclusterrequest *CreateClusterRequest) *ProjectsLocationsClustersCreateCall { c := &ProjectsLocationsClustersCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -3888,7 +4567,7 @@ func (c *ProjectsLocationsClustersCreateCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Creates a cluster, consisting of the specified number and type of Google\nCompute Engine instances.\n\nBy default, the cluster is created in the project's\n[default network](/compute/docs/networks-and-firewalls#networks).\n\nOne firewall is added for the cluster. After cluster creation,\nthe cluster creates routes for each node to allow the containers\non that node to communicate with all other instances in the\ncluster.\n\nFinally, an entry is added to the project's global metadata indicating\nwhich CIDR range is being used by the cluster.", + // "description": "Creates a cluster, consisting of the specified number and type of Google\nCompute Engine instances.\n\nBy default, the cluster is created in the project's\n[default network](/compute/docs/networks-and-firewalls#networks).\n\nOne firewall is added for the cluster. After cluster creation,\nthe Kubelet creates routes for each node to allow the containers\non that node to communicate with all other instances in the\ncluster.\n\nFinally, an entry is added to the project's global metadata indicating\nwhich CIDR range the cluster is using.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters", // "httpMethod": "POST", // "id": "container.projects.locations.clusters.create", @@ -3936,10 +4615,10 @@ type ProjectsLocationsClustersDeleteCall struct { // are also deleted. // // Other Google Compute Engine resources that might be in use by the -// cluster -// (e.g. load balancer resources) will not be deleted if they weren't +// cluster, +// such as load balancer resources, are not deleted if they weren't // present -// at the initial create time. +// when the cluster was initially created. func (r *ProjectsLocationsClustersService) Delete(name string) *ProjectsLocationsClustersDeleteCall { c := &ProjectsLocationsClustersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -4061,7 +4740,7 @@ func (c *ProjectsLocationsClustersDeleteCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Deletes the cluster, including the Kubernetes endpoint and all worker\nnodes.\n\nFirewalls and routes that were configured during cluster creation\nare also deleted.\n\nOther Google Compute Engine resources that might be in use by the cluster\n(e.g. load balancer resources) will not be deleted if they weren't present\nat the initial create time.", + // "description": "Deletes the cluster, including the Kubernetes endpoint and all worker\nnodes.\n\nFirewalls and routes that were configured during cluster creation\nare also deleted.\n\nOther Google Compute Engine resources that might be in use by the cluster,\nsuch as load balancer resources, are not deleted if they weren't present\nwhen the cluster was initially created.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}", // "httpMethod": "DELETE", // "id": "container.projects.locations.clusters.delete", @@ -4291,6 +4970,151 @@ func (c *ProjectsLocationsClustersGetCall) Do(opts ...googleapi.CallOption) (*Cl } +// method id "container.projects.locations.clusters.getJwks": + +type ProjectsLocationsClustersGetJwksCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetJwks: Gets the public component of the cluster signing keys +// in +// JSON Web Key format. +// This API is not yet intended for general use, and is not available +// for all +// clusters. +func (r *ProjectsLocationsClustersService) GetJwks(parent string) *ProjectsLocationsClustersGetJwksCall { + c := &ProjectsLocationsClustersGetJwksCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsClustersGetJwksCall) Fields(s ...googleapi.Field) *ProjectsLocationsClustersGetJwksCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsLocationsClustersGetJwksCall) IfNoneMatch(entityTag string) *ProjectsLocationsClustersGetJwksCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsClustersGetJwksCall) Context(ctx context.Context) *ProjectsLocationsClustersGetJwksCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsClustersGetJwksCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsClustersGetJwksCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/jwks") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "container.projects.locations.clusters.getJwks" call. +// Exactly one of *GetJSONWebKeysResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *GetJSONWebKeysResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsLocationsClustersGetJwksCall) Do(opts ...googleapi.CallOption) (*GetJSONWebKeysResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GetJSONWebKeysResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the public component of the cluster signing keys in\nJSON Web Key format.\nThis API is not yet intended for general use, and is not available for all\nclusters.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/jwks", + // "httpMethod": "GET", + // "id": "container.projects.locations.clusters.getJwks", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "The cluster (project, location, cluster id) to get keys for. Specified in\nthe format 'projects/*/locations/*/clusters/*'.", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+parent}/jwks", + // "response": { + // "$ref": "GetJSONWebKeysResponse" + // } + // } + +} + // method id "container.projects.locations.clusters.list": type ProjectsLocationsClustersListCall struct { @@ -5174,11 +5998,11 @@ type ProjectsLocationsClustersSetMasterAuthCall struct { header_ http.Header } -// SetMasterAuth: Used to set master auth materials. Currently supports -// :- -// Changing the admin password for a specific cluster. -// This can be either via password generation or explicitly set the -// password. +// SetMasterAuth: Sets master auth materials. Currently supports +// changing the admin password +// or a specific cluster, either via password generation or explicitly +// setting +// the password. func (r *ProjectsLocationsClustersService) SetMasterAuth(name string, setmasterauthrequest *SetMasterAuthRequest) *ProjectsLocationsClustersSetMasterAuthCall { c := &ProjectsLocationsClustersSetMasterAuthCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -5276,7 +6100,7 @@ func (c *ProjectsLocationsClustersSetMasterAuthCall) Do(opts ...googleapi.CallOp } return ret, nil // { - // "description": "Used to set master auth materials. Currently supports :-\nChanging the admin password for a specific cluster.\nThis can be either via password generation or explicitly set the password.", + // "description": "Sets master auth materials. Currently supports changing the admin password\nor a specific cluster, either via password generation or explicitly setting\nthe password.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:setMasterAuth", // "httpMethod": "POST", // "id": "container.projects.locations.clusters.setMasterAuth", @@ -5456,7 +6280,7 @@ type ProjectsLocationsClustersSetNetworkPolicyCall struct { header_ http.Header } -// SetNetworkPolicy: Enables/Disables Network Policy for a cluster. +// SetNetworkPolicy: Enables or disables Network Policy for a cluster. func (r *ProjectsLocationsClustersService) SetNetworkPolicy(name string, setnetworkpolicyrequest *SetNetworkPolicyRequest) *ProjectsLocationsClustersSetNetworkPolicyCall { c := &ProjectsLocationsClustersSetNetworkPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -5554,7 +6378,7 @@ func (c *ProjectsLocationsClustersSetNetworkPolicyCall) Do(opts ...googleapi.Cal } return ret, nil // { - // "description": "Enables/Disables Network Policy for a cluster.", + // "description": "Enables or disables Network Policy for a cluster.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:setNetworkPolicy", // "httpMethod": "POST", // "id": "container.projects.locations.clusters.setNetworkPolicy", @@ -5734,7 +6558,7 @@ type ProjectsLocationsClustersStartIpRotationCall struct { header_ http.Header } -// StartIpRotation: Start master IP rotation. +// StartIpRotation: Starts master IP rotation. func (r *ProjectsLocationsClustersService) StartIpRotation(name string, startiprotationrequest *StartIPRotationRequest) *ProjectsLocationsClustersStartIpRotationCall { c := &ProjectsLocationsClustersStartIpRotationCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -5832,7 +6656,7 @@ func (c *ProjectsLocationsClustersStartIpRotationCall) Do(opts ...googleapi.Call } return ret, nil // { - // "description": "Start master IP rotation.", + // "description": "Starts master IP rotation.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:startIpRotation", // "httpMethod": "POST", // "id": "container.projects.locations.clusters.startIpRotation", @@ -6477,7 +7301,7 @@ type ProjectsLocationsClustersNodePoolsGetCall struct { header_ http.Header } -// Get: Retrieves the node pool requested. +// Get: Retrieves the requested node pool. func (r *ProjectsLocationsClustersNodePoolsService) Get(name string) *ProjectsLocationsClustersNodePoolsGetCall { c := &ProjectsLocationsClustersNodePoolsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -6620,7 +7444,7 @@ func (c *ProjectsLocationsClustersNodePoolsGetCall) Do(opts ...googleapi.CallOpt } return ret, nil // { - // "description": "Retrieves the node pool requested.", + // "description": "Retrieves the requested node pool.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/nodePools/{nodePoolsId}", // "httpMethod": "GET", // "id": "container.projects.locations.clusters.nodePools.get", @@ -6866,9 +7690,9 @@ type ProjectsLocationsClustersNodePoolsRollbackCall struct { header_ http.Header } -// Rollback: Roll back the previously Aborted or Failed NodePool +// Rollback: Rolls back a previously Aborted or Failed NodePool // upgrade. -// This will be an no-op if the last upgrade successfully completed. +// This makes no changes if the last upgrade successfully completed. func (r *ProjectsLocationsClustersNodePoolsService) Rollback(name string, rollbacknodepoolupgraderequest *RollbackNodePoolUpgradeRequest) *ProjectsLocationsClustersNodePoolsRollbackCall { c := &ProjectsLocationsClustersNodePoolsRollbackCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -6966,7 +7790,7 @@ func (c *ProjectsLocationsClustersNodePoolsRollbackCall) Do(opts ...googleapi.Ca } return ret, nil // { - // "description": "Roll back the previously Aborted or Failed NodePool upgrade.\nThis will be an no-op if the last upgrade successfully completed.", + // "description": "Rolls back a previously Aborted or Failed NodePool upgrade.\nThis makes no changes if the last upgrade successfully completed.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/nodePools/{nodePoolsId}:rollback", // "httpMethod": "POST", // "id": "container.projects.locations.clusters.nodePools.rollback", @@ -7007,7 +7831,7 @@ type ProjectsLocationsClustersNodePoolsSetAutoscalingCall struct { header_ http.Header } -// SetAutoscaling: Sets the autoscaling settings for a specific node +// SetAutoscaling: Sets the autoscaling settings for the specified node // pool. func (r *ProjectsLocationsClustersNodePoolsService) SetAutoscaling(name string, setnodepoolautoscalingrequest *SetNodePoolAutoscalingRequest) *ProjectsLocationsClustersNodePoolsSetAutoscalingCall { c := &ProjectsLocationsClustersNodePoolsSetAutoscalingCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -7106,7 +7930,7 @@ func (c *ProjectsLocationsClustersNodePoolsSetAutoscalingCall) Do(opts ...google } return ret, nil // { - // "description": "Sets the autoscaling settings for a specific node pool.", + // "description": "Sets the autoscaling settings for the specified node pool.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/nodePools/{nodePoolsId}:setAutoscaling", // "httpMethod": "POST", // "id": "container.projects.locations.clusters.nodePools.setAutoscaling", @@ -7425,7 +8249,7 @@ type ProjectsLocationsClustersNodePoolsUpdateCall struct { header_ http.Header } -// Update: Updates the version and/or image type for a specific node +// Update: Updates the version and/or image type for the specified node // pool. func (r *ProjectsLocationsClustersNodePoolsService) Update(name string, updatenodepoolrequest *UpdateNodePoolRequest) *ProjectsLocationsClustersNodePoolsUpdateCall { c := &ProjectsLocationsClustersNodePoolsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -7524,7 +8348,7 @@ func (c *ProjectsLocationsClustersNodePoolsUpdateCall) Do(opts ...googleapi.Call } return ret, nil // { - // "description": "Updates the version and/or image type for a specific node pool.", + // "description": "Updates the version and/or image type for the specified node pool.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/nodePools/{nodePoolsId}", // "httpMethod": "PUT", // "id": "container.projects.locations.clusters.nodePools.update", @@ -7554,6 +8378,156 @@ func (c *ProjectsLocationsClustersNodePoolsUpdateCall) Do(opts ...googleapi.Call } +// method id "container.projects.locations.clusters.well-known.getOpenid-configuration": + +type ProjectsLocationsClustersWellKnownGetOpenidConfigurationCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetOpenidConfiguration: Gets the OIDC discovery document for the +// cluster. +// See the +// [OpenID Connect Discovery +// 1.0 +// specification](https://openid.net/specs/openid-connect-discovery-1 +// _0.html) +// for details. +// This API is not yet intended for general use, and is not available +// for all +// clusters. +func (r *ProjectsLocationsClustersWellKnownService) GetOpenidConfiguration(parent string) *ProjectsLocationsClustersWellKnownGetOpenidConfigurationCall { + c := &ProjectsLocationsClustersWellKnownGetOpenidConfigurationCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsClustersWellKnownGetOpenidConfigurationCall) Fields(s ...googleapi.Field) *ProjectsLocationsClustersWellKnownGetOpenidConfigurationCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsLocationsClustersWellKnownGetOpenidConfigurationCall) IfNoneMatch(entityTag string) *ProjectsLocationsClustersWellKnownGetOpenidConfigurationCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsClustersWellKnownGetOpenidConfigurationCall) Context(ctx context.Context) *ProjectsLocationsClustersWellKnownGetOpenidConfigurationCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsClustersWellKnownGetOpenidConfigurationCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsClustersWellKnownGetOpenidConfigurationCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/.well-known/openid-configuration") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "container.projects.locations.clusters.well-known.getOpenid-configuration" call. +// Exactly one of *GetOpenIDConfigResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *GetOpenIDConfigResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsLocationsClustersWellKnownGetOpenidConfigurationCall) Do(opts ...googleapi.CallOption) (*GetOpenIDConfigResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GetOpenIDConfigResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the OIDC discovery document for the cluster.\nSee the\n[OpenID Connect Discovery 1.0\nspecification](https://openid.net/specs/openid-connect-discovery-1_0.html)\nfor details.\nThis API is not yet intended for general use, and is not available for all\nclusters.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/.well-known/openid-configuration", + // "httpMethod": "GET", + // "id": "container.projects.locations.clusters.well-known.getOpenid-configuration", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "The cluster (project, location, cluster id) to get the discovery document\nfor. Specified in the format 'projects/*/locations/*/clusters/*'.", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+parent}/.well-known/openid-configuration", + // "response": { + // "$ref": "GetOpenIDConfigResponse" + // } + // } + +} + // method id "container.projects.locations.operations.cancel": type ProjectsLocationsOperationsCancelCall struct { @@ -8069,8 +9043,8 @@ type ProjectsZonesGetServerconfigCall struct { header_ http.Header } -// GetServerconfig: Returns configuration info about the Kubernetes -// Engine service. +// GetServerconfig: Returns configuration info about the Google +// Kubernetes Engine service. func (r *ProjectsZonesService) GetServerconfig(projectId string, zone string) *ProjectsZonesGetServerconfigCall { c := &ProjectsZonesGetServerconfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -8079,8 +9053,8 @@ func (r *ProjectsZonesService) GetServerconfig(projectId string, zone string) *P } // Name sets the optional parameter "name": The name (project and -// location) of the server config to get -// Specified in the format 'projects/*/locations/*'. +// location) of the server config to get, +// specified in the format 'projects/*/locations/*'. func (c *ProjectsZonesGetServerconfigCall) Name(name string) *ProjectsZonesGetServerconfigCall { c.urlParams_.Set("name", name) return c @@ -8185,7 +9159,7 @@ func (c *ProjectsZonesGetServerconfigCall) Do(opts ...googleapi.CallOption) (*Se } return ret, nil // { - // "description": "Returns configuration info about the Kubernetes Engine service.", + // "description": "Returns configuration info about the Google Kubernetes Engine service.", // "flatPath": "v1/projects/{projectId}/zones/{zone}/serverconfig", // "httpMethod": "GET", // "id": "container.projects.zones.getServerconfig", @@ -8195,7 +9169,7 @@ func (c *ProjectsZonesGetServerconfigCall) Do(opts ...googleapi.CallOption) (*Se // ], // "parameters": { // "name": { - // "description": "The name (project and location) of the server config to get\nSpecified in the format 'projects/*/locations/*'.", + // "description": "The name (project and location) of the server config to get,\nspecified in the format 'projects/*/locations/*'.", // "location": "query", // "type": "string" // }, @@ -8560,14 +9534,14 @@ type ProjectsZonesClustersCreateCall struct { // network](/compute/docs/networks-and-firewalls#networks). // // One firewall is added for the cluster. After cluster creation, -// the cluster creates routes for each node to allow the containers +// the Kubelet creates routes for each node to allow the containers // on that node to communicate with all other instances in // the // cluster. // // Finally, an entry is added to the project's global metadata // indicating -// which CIDR range is being used by the cluster. +// which CIDR range the cluster is using. func (r *ProjectsZonesClustersService) Create(projectId string, zone string, createclusterrequest *CreateClusterRequest) *ProjectsZonesClustersCreateCall { c := &ProjectsZonesClustersCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -8667,7 +9641,7 @@ func (c *ProjectsZonesClustersCreateCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Creates a cluster, consisting of the specified number and type of Google\nCompute Engine instances.\n\nBy default, the cluster is created in the project's\n[default network](/compute/docs/networks-and-firewalls#networks).\n\nOne firewall is added for the cluster. After cluster creation,\nthe cluster creates routes for each node to allow the containers\non that node to communicate with all other instances in the\ncluster.\n\nFinally, an entry is added to the project's global metadata indicating\nwhich CIDR range is being used by the cluster.", + // "description": "Creates a cluster, consisting of the specified number and type of Google\nCompute Engine instances.\n\nBy default, the cluster is created in the project's\n[default network](/compute/docs/networks-and-firewalls#networks).\n\nOne firewall is added for the cluster. After cluster creation,\nthe Kubelet creates routes for each node to allow the containers\non that node to communicate with all other instances in the\ncluster.\n\nFinally, an entry is added to the project's global metadata indicating\nwhich CIDR range the cluster is using.", // "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters", // "httpMethod": "POST", // "id": "container.projects.zones.clusters.create", @@ -8723,10 +9697,10 @@ type ProjectsZonesClustersDeleteCall struct { // are also deleted. // // Other Google Compute Engine resources that might be in use by the -// cluster -// (e.g. load balancer resources) will not be deleted if they weren't +// cluster, +// such as load balancer resources, are not deleted if they weren't // present -// at the initial create time. +// when the cluster was initially created. func (r *ProjectsZonesClustersService) Delete(projectId string, zone string, clusterId string) *ProjectsZonesClustersDeleteCall { c := &ProjectsZonesClustersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -8830,7 +9804,7 @@ func (c *ProjectsZonesClustersDeleteCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Deletes the cluster, including the Kubernetes endpoint and all worker\nnodes.\n\nFirewalls and routes that were configured during cluster creation\nare also deleted.\n\nOther Google Compute Engine resources that might be in use by the cluster\n(e.g. load balancer resources) will not be deleted if they weren't present\nat the initial create time.", + // "description": "Deletes the cluster, including the Kubernetes endpoint and all worker\nnodes.\n\nFirewalls and routes that were configured during cluster creation\nare also deleted.\n\nOther Google Compute Engine resources that might be in use by the cluster,\nsuch as load balancer resources, are not deleted if they weren't present\nwhen the cluster was initially created.", // "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}", // "httpMethod": "DELETE", // "id": "container.projects.zones.clusters.delete", @@ -10338,11 +11312,11 @@ type ProjectsZonesClustersSetMasterAuthCall struct { header_ http.Header } -// SetMasterAuth: Used to set master auth materials. Currently supports -// :- -// Changing the admin password for a specific cluster. -// This can be either via password generation or explicitly set the -// password. +// SetMasterAuth: Sets master auth materials. Currently supports +// changing the admin password +// or a specific cluster, either via password generation or explicitly +// setting +// the password. func (r *ProjectsZonesClustersService) SetMasterAuth(projectId string, zone string, clusterId string, setmasterauthrequest *SetMasterAuthRequest) *ProjectsZonesClustersSetMasterAuthCall { c := &ProjectsZonesClustersSetMasterAuthCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -10444,7 +11418,7 @@ func (c *ProjectsZonesClustersSetMasterAuthCall) Do(opts ...googleapi.CallOption } return ret, nil // { - // "description": "Used to set master auth materials. Currently supports :-\nChanging the admin password for a specific cluster.\nThis can be either via password generation or explicitly set the password.", + // "description": "Sets master auth materials. Currently supports changing the admin password\nor a specific cluster, either via password generation or explicitly setting\nthe password.", // "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:setMasterAuth", // "httpMethod": "POST", // "id": "container.projects.zones.clusters.setMasterAuth", @@ -10500,7 +11474,7 @@ type ProjectsZonesClustersSetNetworkPolicyCall struct { header_ http.Header } -// SetNetworkPolicy: Enables/Disables Network Policy for a cluster. +// SetNetworkPolicy: Enables or disables Network Policy for a cluster. func (r *ProjectsZonesClustersService) SetNetworkPolicy(projectId string, zone string, clusterId string, setnetworkpolicyrequest *SetNetworkPolicyRequest) *ProjectsZonesClustersSetNetworkPolicyCall { c := &ProjectsZonesClustersSetNetworkPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -10602,7 +11576,7 @@ func (c *ProjectsZonesClustersSetNetworkPolicyCall) Do(opts ...googleapi.CallOpt } return ret, nil // { - // "description": "Enables/Disables Network Policy for a cluster.", + // "description": "Enables or disables Network Policy for a cluster.", // "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:setNetworkPolicy", // "httpMethod": "POST", // "id": "container.projects.zones.clusters.setNetworkPolicy", @@ -10658,7 +11632,7 @@ type ProjectsZonesClustersStartIpRotationCall struct { header_ http.Header } -// StartIpRotation: Start master IP rotation. +// StartIpRotation: Starts master IP rotation. func (r *ProjectsZonesClustersService) StartIpRotation(projectId string, zone string, clusterId string, startiprotationrequest *StartIPRotationRequest) *ProjectsZonesClustersStartIpRotationCall { c := &ProjectsZonesClustersStartIpRotationCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -10760,7 +11734,7 @@ func (c *ProjectsZonesClustersStartIpRotationCall) Do(opts ...googleapi.CallOpti } return ret, nil // { - // "description": "Start master IP rotation.", + // "description": "Starts master IP rotation.", // "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:startIpRotation", // "httpMethod": "POST", // "id": "container.projects.zones.clusters.startIpRotation", @@ -10975,7 +11949,8 @@ type ProjectsZonesClustersNodePoolsAutoscalingCall struct { header_ http.Header } -// Autoscaling: Sets the autoscaling settings for a specific node pool. +// Autoscaling: Sets the autoscaling settings for the specified node +// pool. func (r *ProjectsZonesClustersNodePoolsService) Autoscaling(projectId string, zone string, clusterId string, nodePoolId string, setnodepoolautoscalingrequest *SetNodePoolAutoscalingRequest) *ProjectsZonesClustersNodePoolsAutoscalingCall { c := &ProjectsZonesClustersNodePoolsAutoscalingCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -11079,7 +12054,7 @@ func (c *ProjectsZonesClustersNodePoolsAutoscalingCall) Do(opts ...googleapi.Cal } return ret, nil // { - // "description": "Sets the autoscaling settings for a specific node pool.", + // "description": "Sets the autoscaling settings for the specified node pool.", // "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}/autoscaling", // "httpMethod": "POST", // "id": "container.projects.zones.clusters.nodePools.autoscaling", @@ -11474,7 +12449,7 @@ type ProjectsZonesClustersNodePoolsGetCall struct { header_ http.Header } -// Get: Retrieves the node pool requested. +// Get: Retrieves the requested node pool. func (r *ProjectsZonesClustersNodePoolsService) Get(projectId string, zone string, clusterId string, nodePoolId string) *ProjectsZonesClustersNodePoolsGetCall { c := &ProjectsZonesClustersNodePoolsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -11595,7 +12570,7 @@ func (c *ProjectsZonesClustersNodePoolsGetCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Retrieves the node pool requested.", + // "description": "Retrieves the requested node pool.", // "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}", // "httpMethod": "GET", // "id": "container.projects.zones.clusters.nodePools.get", @@ -11836,9 +12811,9 @@ type ProjectsZonesClustersNodePoolsRollbackCall struct { header_ http.Header } -// Rollback: Roll back the previously Aborted or Failed NodePool +// Rollback: Rolls back a previously Aborted or Failed NodePool // upgrade. -// This will be an no-op if the last upgrade successfully completed. +// This makes no changes if the last upgrade successfully completed. func (r *ProjectsZonesClustersNodePoolsService) Rollback(projectId string, zone string, clusterId string, nodePoolId string, rollbacknodepoolupgraderequest *RollbackNodePoolUpgradeRequest) *ProjectsZonesClustersNodePoolsRollbackCall { c := &ProjectsZonesClustersNodePoolsRollbackCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -11942,7 +12917,7 @@ func (c *ProjectsZonesClustersNodePoolsRollbackCall) Do(opts ...googleapi.CallOp } return ret, nil // { - // "description": "Roll back the previously Aborted or Failed NodePool upgrade.\nThis will be an no-op if the last upgrade successfully completed.", + // "description": "Rolls back a previously Aborted or Failed NodePool upgrade.\nThis makes no changes if the last upgrade successfully completed.", // "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}:rollback", // "httpMethod": "POST", // "id": "container.projects.zones.clusters.nodePools.rollback", @@ -12342,7 +13317,7 @@ type ProjectsZonesClustersNodePoolsUpdateCall struct { header_ http.Header } -// Update: Updates the version and/or image type for a specific node +// Update: Updates the version and/or image type for the specified node // pool. func (r *ProjectsZonesClustersNodePoolsService) Update(projectId string, zone string, clusterId string, nodePoolId string, updatenodepoolrequest *UpdateNodePoolRequest) *ProjectsZonesClustersNodePoolsUpdateCall { c := &ProjectsZonesClustersNodePoolsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -12447,7 +13422,7 @@ func (c *ProjectsZonesClustersNodePoolsUpdateCall) Do(opts ...googleapi.CallOpti } return ret, nil // { - // "description": "Updates the version and/or image type for a specific node pool.", + // "description": "Updates the version and/or image type for the specified node pool.", // "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}/update", // "httpMethod": "POST", // "id": "container.projects.zones.clusters.nodePools.update", diff --git a/vendor/google.golang.org/api/dns/v1/BUILD.bazel b/vendor/google.golang.org/api/dns/v1/BUILD.bazel index 4bacab5e4fb5c..6f0b05cd69d85 100644 --- a/vendor/google.golang.org/api/dns/v1/BUILD.bazel +++ b/vendor/google.golang.org/api/dns/v1/BUILD.bazel @@ -9,5 +9,7 @@ go_library( deps = [ "//vendor/google.golang.org/api/gensupport:go_default_library", "//vendor/google.golang.org/api/googleapi:go_default_library", + "//vendor/google.golang.org/api/option:go_default_library", + "//vendor/google.golang.org/api/transport/http:go_default_library", ], ) diff --git a/vendor/google.golang.org/api/dns/v1/dns-api.json b/vendor/google.golang.org/api/dns/v1/dns-api.json index ab721983f5c2e..a663247b453e2 100644 --- a/vendor/google.golang.org/api/dns/v1/dns-api.json +++ b/vendor/google.golang.org/api/dns/v1/dns-api.json @@ -23,7 +23,7 @@ "description": "Configures and serves authoritative DNS records.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/cloud-dns", - "etag": "\"J3WqvAcMk4eQjJXvfSI4Yr8VouA/EQMOijfSxjBH7q8fB_7QzVLzbgs\"", + "etag": "\"VPK3KBfpaEgZ16pozGOoMYfKc0U/lpqiZRIZVeruudGbozkBqz2t94g\"", "icons": { "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" @@ -760,11 +760,11 @@ } } }, - "revision": "20180808", + "revision": "20190418", "rootUrl": "https://www.googleapis.com/", "schemas": { "Change": { - "description": "An atomic update to a collection of ResourceRecordSets.", + "description": "A Change represents a set of ResourceRecordSet additions and deletions applied atomically to a ManagedZone. ResourceRecordSets within a ManagedZone are modified by creating a new Change element in the Changes collection. In turn the Changes collection also records the past modifications to the ResourceRecordSets in a ManagedZone. The current state of the ManagedZone is the sum effect of applying all Change elements in the Changes collection in sequence.", "id": "Change", "properties": { "additions": { @@ -799,7 +799,7 @@ "type": "string" }, "status": { - "description": "Status of the operation (output only).", + "description": "Status of the operation (output only). A status of \"done\" means that the request to update the authoritative servers has been sent, but the servers might not be updated yet.", "enum": [ "done", "pending" @@ -970,7 +970,7 @@ "type": "integer" }, "keyType": { - "description": "One of \"KEY_SIGNING\" or \"ZONE_SIGNING\". Keys of type KEY_SIGNING have the Secure Entry Point flag set and, when active, will be used to sign only resource record sets of type DNSKEY. Otherwise, the Secure Entry Point flag will be cleared and this key will be used to sign only resource record sets of other types.", + "description": "Specifies whether this is a key signing key (KSK) or a zone signing key (ZSK). Key signing keys have the Secure Entry Point flag set and, when active, will only be used to sign resource record sets of type DNSKEY. Zone signing keys do not have the Secure Entry Point flag set and will be used to sign all other types of resource record sets.", "enum": [ "keySigning", "zoneSigning" @@ -1066,6 +1066,22 @@ "type": "string" }, "type": "array" + }, + "privateVisibilityConfig": { + "$ref": "ManagedZonePrivateVisibilityConfig", + "description": "For privately visible zones, the set of Virtual Private Cloud resources that the zone is visible from." + }, + "visibility": { + "description": "The zone's visibility: public zones are exposed to the Internet, while private zones are visible only to Virtual Private Cloud resources.", + "enum": [ + "private", + "public" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" } }, "type": "object" @@ -1139,6 +1155,39 @@ }, "type": "object" }, + "ManagedZonePrivateVisibilityConfig": { + "id": "ManagedZonePrivateVisibilityConfig", + "properties": { + "kind": { + "default": "dns#managedZonePrivateVisibilityConfig", + "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#managedZonePrivateVisibilityConfig\".", + "type": "string" + }, + "networks": { + "description": "The list of VPC networks that can see this zone.", + "items": { + "$ref": "ManagedZonePrivateVisibilityConfigNetwork" + }, + "type": "array" + } + }, + "type": "object" + }, + "ManagedZonePrivateVisibilityConfigNetwork": { + "id": "ManagedZonePrivateVisibilityConfigNetwork", + "properties": { + "kind": { + "default": "dns#managedZonePrivateVisibilityConfigNetwork", + "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#managedZonePrivateVisibilityConfigNetwork\".", + "type": "string" + }, + "networkUrl": { + "description": "The fully qualified URL of the VPC network to bind to. This should be formatted like https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network}", + "type": "string" + } + }, + "type": "object" + }, "ManagedZonesListResponse": { "id": "ManagedZonesListResponse", "properties": { @@ -1186,7 +1235,7 @@ "type": "string" }, "status": { - "description": "Status of the operation. Can be one of the following: \"PENDING\" or \"DONE\" (output only).", + "description": "Status of the operation. Can be one of the following: \"PENDING\" or \"DONE\" (output only). A status of \"DONE\" means that the request to update the authoritative servers has been sent, but the servers might not be updated yet.", "enum": [ "done", "pending" @@ -1284,6 +1333,16 @@ "format": "int32", "type": "integer" }, + "managedZonesPerNetwork": { + "description": "Maximum allowed number of managed zones which can be attached to a network.", + "format": "int32", + "type": "integer" + }, + "networksPerManagedZone": { + "description": "Maximum allowed number of networks to which a privately scoped zone can be attached.", + "format": "int32", + "type": "integer" + }, "resourceRecordsPerRrset": { "description": "Maximum allowed number of ResourceRecords per ResourceRecordSet.", "format": "int32", @@ -1333,7 +1392,7 @@ "type": "string" }, "rrdatas": { - "description": "As defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1).", + "description": "As defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) -- see examples.", "items": { "type": "string" }, @@ -1352,7 +1411,7 @@ "type": "integer" }, "type": { - "description": "The identifier of a supported record type, for example, A, AAAA, MX, TXT, and so on.", + "description": "The identifier of a supported record type. See the list of Supported DNS record types.", "type": "string" } }, diff --git a/vendor/google.golang.org/api/dns/v1/dns-gen.go b/vendor/google.golang.org/api/dns/v1/dns-gen.go index 9bee01d842252..f900962e8dbf2 100644 --- a/vendor/google.golang.org/api/dns/v1/dns-gen.go +++ b/vendor/google.golang.org/api/dns/v1/dns-gen.go @@ -1,4 +1,4 @@ -// Copyright 2018 Google Inc. All rights reserved. +// Copyright 2019 Google LLC. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -6,13 +6,39 @@ // Package dns provides access to the Google Cloud DNS API. // -// See https://developers.google.com/cloud-dns +// For product documentation, see: https://developers.google.com/cloud-dns +// +// Creating a client // // Usage example: // // import "google.golang.org/api/dns/v1" // ... -// dnsService, err := dns.New(oauthHttpClient) +// ctx := context.Background() +// dnsService, err := dns.NewService(ctx) +// +// In this example, Google Application Default Credentials are used for authentication. +// +// For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. +// +// Other authentication options +// +// By default, all available scopes (see "Constants") are used to authenticate. To restrict scopes, use option.WithScopes: +// +// dnsService, err := dns.NewService(ctx, option.WithScopes(dns.NdevClouddnsReadwriteScope)) +// +// To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey: +// +// dnsService, err := dns.NewService(ctx, option.WithAPIKey("AIza...")) +// +// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource: +// +// config := &oauth2.Config{...} +// // ... +// token, err := config.Exchange(ctx, ...) +// dnsService, err := dns.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) +// +// See https://godoc.org/google.golang.org/api/option/ for details on options. package dns // import "google.golang.org/api/dns/v1" import ( @@ -29,6 +55,8 @@ import ( gensupport "google.golang.org/api/gensupport" googleapi "google.golang.org/api/googleapi" + option "google.golang.org/api/option" + htransport "google.golang.org/api/transport/http" ) // Always reference these packages, just in case the auto-generated code @@ -65,6 +93,35 @@ const ( NdevClouddnsReadwriteScope = "https://www.googleapis.com/auth/ndev.clouddns.readwrite" ) +// NewService creates a new Service. +func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, error) { + scopesOption := option.WithScopes( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/ndev.clouddns.readonly", + "https://www.googleapis.com/auth/ndev.clouddns.readwrite", + ) + // NOTE: prepend, so we don't override user-specified scopes. + opts = append([]option.ClientOption{scopesOption}, opts...) + client, endpoint, err := htransport.NewClient(ctx, opts...) + if err != nil { + return nil, err + } + s, err := New(client) + if err != nil { + return nil, err + } + if endpoint != "" { + s.BasePath = endpoint + } + return s, nil +} + +// New creates a new Service. It uses the provided http.Client for requests. +// +// Deprecated: please use NewService instead. +// To provide a custom HTTP client, use option.WithHTTPClient. +// If you are using google.golang.org/api/googleapis/transport.APIKey, use option.WithAPIKey with NewService instead. func New(client *http.Client) (*Service, error) { if client == nil { return nil, errors.New("client is nil") @@ -158,7 +215,13 @@ type ResourceRecordSetsService struct { s *Service } -// Change: An atomic update to a collection of ResourceRecordSets. +// Change: A Change represents a set of ResourceRecordSet additions and +// deletions applied atomically to a ManagedZone. ResourceRecordSets +// within a ManagedZone are modified by creating a new Change element in +// the Changes collection. In turn the Changes collection also records +// the past modifications to the ResourceRecordSets in a ManagedZone. +// The current state of the ManagedZone is the sum effect of applying +// all Change elements in the Changes collection in sequence. type Change struct { // Additions: Which ResourceRecordSets to add? Additions []*ResourceRecordSet `json:"additions,omitempty"` @@ -182,7 +245,9 @@ type Change struct { // (output only). This is in RFC3339 text format. StartTime string `json:"startTime,omitempty"` - // Status: Status of the operation (output only). + // Status: Status of the operation (output only). A status of "done" + // means that the request to update the authoritative servers has been + // sent, but the servers might not be updated yet. // // Possible values: // "done" @@ -416,11 +481,12 @@ type DnsKeySpec struct { // KeyLength: Length of the keys in bits. KeyLength int64 `json:"keyLength,omitempty"` - // KeyType: One of "KEY_SIGNING" or "ZONE_SIGNING". Keys of type - // KEY_SIGNING have the Secure Entry Point flag set and, when active, - // will be used to sign only resource record sets of type DNSKEY. - // Otherwise, the Secure Entry Point flag will be cleared and this key - // will be used to sign only resource record sets of other types. + // KeyType: Specifies whether this is a key signing key (KSK) or a zone + // signing key (ZSK). Key signing keys have the Secure Entry Point flag + // set and, when active, will only be used to sign resource record sets + // of type DNSKEY. Zone signing keys do not have the Secure Entry Point + // flag set and will be used to sign all other types of resource record + // sets. // // Possible values: // "keySigning" @@ -551,6 +617,19 @@ type ManagedZone struct { // servers; defined by the server (output only) NameServers []string `json:"nameServers,omitempty"` + // PrivateVisibilityConfig: For privately visible zones, the set of + // Virtual Private Cloud resources that the zone is visible from. + PrivateVisibilityConfig *ManagedZonePrivateVisibilityConfig `json:"privateVisibilityConfig,omitempty"` + + // Visibility: The zone's visibility: public zones are exposed to the + // Internet, while private zones are visible only to Virtual Private + // Cloud resources. + // + // Possible values: + // "private" + // "public" + Visibility string `json:"visibility,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -677,6 +756,70 @@ func (s *ManagedZoneOperationsListResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type ManagedZonePrivateVisibilityConfig struct { + // Kind: Identifies what kind of resource this is. Value: the fixed + // string "dns#managedZonePrivateVisibilityConfig". + Kind string `json:"kind,omitempty"` + + // Networks: The list of VPC networks that can see this zone. + Networks []*ManagedZonePrivateVisibilityConfigNetwork `json:"networks,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Kind") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ManagedZonePrivateVisibilityConfig) MarshalJSON() ([]byte, error) { + type NoMethod ManagedZonePrivateVisibilityConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ManagedZonePrivateVisibilityConfigNetwork struct { + // Kind: Identifies what kind of resource this is. Value: the fixed + // string "dns#managedZonePrivateVisibilityConfigNetwork". + Kind string `json:"kind,omitempty"` + + // NetworkUrl: The fully qualified URL of the VPC network to bind to. + // This should be formatted like + // https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network} + NetworkUrl string `json:"networkUrl,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Kind") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ManagedZonePrivateVisibilityConfigNetwork) MarshalJSON() ([]byte, error) { + type NoMethod ManagedZonePrivateVisibilityConfigNetwork + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type ManagedZonesListResponse struct { Header *ResponseHeader `json:"header,omitempty"` @@ -752,7 +895,9 @@ type Operation struct { StartTime string `json:"startTime,omitempty"` // Status: Status of the operation. Can be one of the following: - // "PENDING" or "DONE" (output only). + // "PENDING" or "DONE" (output only). A status of "DONE" means that the + // request to update the authoritative servers has been sent, but the + // servers might not be updated yet. // // Possible values: // "done" @@ -917,6 +1062,14 @@ type Quota struct { // ManagedZones: Maximum allowed number of managed zones in the project. ManagedZones int64 `json:"managedZones,omitempty"` + // ManagedZonesPerNetwork: Maximum allowed number of managed zones which + // can be attached to a network. + ManagedZonesPerNetwork int64 `json:"managedZonesPerNetwork,omitempty"` + + // NetworksPerManagedZone: Maximum allowed number of networks to which a + // privately scoped zone can be attached. + NetworksPerManagedZone int64 `json:"networksPerManagedZone,omitempty"` + // ResourceRecordsPerRrset: Maximum allowed number of ResourceRecords // per ResourceRecordSet. ResourceRecordsPerRrset int64 `json:"resourceRecordsPerRrset,omitempty"` @@ -977,7 +1130,7 @@ type ResourceRecordSet struct { Name string `json:"name,omitempty"` // Rrdatas: As defined in RFC 1035 (section 5) and RFC 1034 (section - // 3.6.1). + // 3.6.1) -- see examples. Rrdatas []string `json:"rrdatas,omitempty"` // SignatureRrdatas: As defined in RFC 4034 (section 3.2). @@ -987,8 +1140,8 @@ type ResourceRecordSet struct { // resolvers. Ttl int64 `json:"ttl,omitempty"` - // Type: The identifier of a supported record type, for example, A, - // AAAA, MX, TXT, and so on. + // Type: The identifier of a supported record type. See the list of + // Supported DNS record types. Type string `json:"type,omitempty"` // ForceSendFields is a list of field names (e.g. "Kind") to diff --git a/vendor/google.golang.org/api/gensupport/media.go b/vendor/google.golang.org/api/gensupport/media.go index 4cef4adbb660d..0ef96b3f102b9 100644 --- a/vendor/google.golang.org/api/gensupport/media.go +++ b/vendor/google.golang.org/api/gensupport/media.go @@ -9,6 +9,7 @@ import ( "fmt" "io" "io/ioutil" + "mime" "mime/multipart" "net/http" "net/textproto" @@ -115,11 +116,15 @@ type multipartReader struct { pipeOpen bool } -func newMultipartReader(parts []typeReader) *multipartReader { +// boundary optionally specifies the MIME boundary +func newMultipartReader(parts []typeReader, boundary string) *multipartReader { mp := &multipartReader{pipeOpen: true} var pw *io.PipeWriter mp.pr, pw = io.Pipe() mpw := multipart.NewWriter(pw) + if boundary != "" { + mpw.SetBoundary(boundary) + } mp.ctype = "multipart/related; boundary=" + mpw.Boundary() go func() { for _, part := range parts { @@ -163,10 +168,15 @@ func (mp *multipartReader) Close() error { // // The caller must call Close on the returned ReadCloser if reads are abandoned before reaching EOF. func CombineBodyMedia(body io.Reader, bodyContentType string, media io.Reader, mediaContentType string) (io.ReadCloser, string) { + return combineBodyMedia(body, bodyContentType, media, mediaContentType, "") +} + +// combineBodyMedia is CombineBodyMedia but with an optional mimeBoundary field. +func combineBodyMedia(body io.Reader, bodyContentType string, media io.Reader, mediaContentType, mimeBoundary string) (io.ReadCloser, string) { mp := newMultipartReader([]typeReader{ {body, bodyContentType}, {media, mediaContentType}, - }) + }, mimeBoundary) return mp, mp.ctype } @@ -284,7 +294,11 @@ func (mi *MediaInfo) UploadRequest(reqHeaders http.Header, body io.Reader) (newB getBody = func() (io.ReadCloser, error) { rb := ioutil.NopCloser(fb()) rm := ioutil.NopCloser(fm()) - r, _ := CombineBodyMedia(rb, "application/json", rm, mi.mType) + var mimeBoundary string + if _, params, err := mime.ParseMediaType(ctype); err == nil { + mimeBoundary = params["boundary"] + } + r, _ := combineBodyMedia(rb, "application/json", rm, mi.mType, mimeBoundary) return r, nil } } @@ -336,7 +350,14 @@ func (mi *MediaInfo) ResumableUpload(locURI string) *ResumableUpload { } } -// SetGetBody sets the GetBody field of req to f. +// SetGetBody sets the GetBody field of req to f. This was once needed +// to gracefully support Go 1.7 and earlier which didn't have that +// field. +// +// Deprecated: the code generator no longer uses this as of +// 2019-02-19. Nothing else should be calling this anyway, but we +// won't delete this immediately; it will be deleted in as early as 6 +// months. func SetGetBody(req *http.Request, f func() (io.ReadCloser, error)) { req.GetBody = f } diff --git a/vendor/google.golang.org/api/googleapi/googleapi.go b/vendor/google.golang.org/api/googleapi/googleapi.go index 8cdb03bb3fcf4..ab53767624a3a 100644 --- a/vendor/google.golang.org/api/googleapi/googleapi.go +++ b/vendor/google.golang.org/api/googleapi/googleapi.go @@ -189,32 +189,6 @@ func (wrap MarshalStyle) JSONReader(v interface{}) (io.Reader, error) { return buf, nil } -// endingWithErrorReader from r until it returns an error. If the -// final error from r is io.EOF and e is non-nil, e is used instead. -type endingWithErrorReader struct { - r io.Reader - e error -} - -func (er endingWithErrorReader) Read(p []byte) (n int, err error) { - n, err = er.r.Read(p) - if err == io.EOF && er.e != nil { - err = er.e - } - return -} - -// countingWriter counts the number of bytes it receives to write, but -// discards them. -type countingWriter struct { - n *int64 -} - -func (w countingWriter) Write(p []byte) (int, error) { - *w.n += int64(len(p)) - return len(p), nil -} - // ProgressUpdater is a function that is called upon every progress update of a resumable upload. // This is the only part of a resumable upload (from googleapi) that is usable by the developer. // The remaining usable pieces of resumable uploads is exposed in each auto-generated API. diff --git a/vendor/google.golang.org/api/googleapi/transport/BUILD.bazel b/vendor/google.golang.org/api/googleapi/transport/BUILD.bazel new file mode 100644 index 0000000000000..56dcbb3da4b2f --- /dev/null +++ b/vendor/google.golang.org/api/googleapi/transport/BUILD.bazel @@ -0,0 +1,9 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["apikey.go"], + importmap = "k8s.io/kops/vendor/google.golang.org/api/googleapi/transport", + importpath = "google.golang.org/api/googleapi/transport", + visibility = ["//visibility:public"], +) diff --git a/vendor/google.golang.org/api/googleapi/transport/apikey.go b/vendor/google.golang.org/api/googleapi/transport/apikey.go new file mode 100644 index 0000000000000..eca1ea2507711 --- /dev/null +++ b/vendor/google.golang.org/api/googleapi/transport/apikey.go @@ -0,0 +1,38 @@ +// Copyright 2012 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package transport contains HTTP transports used to make +// authenticated API requests. +package transport + +import ( + "errors" + "net/http" +) + +// APIKey is an HTTP Transport which wraps an underlying transport and +// appends an API Key "key" parameter to the URL of outgoing requests. +type APIKey struct { + // Key is the API Key to set on requests. + Key string + + // Transport is the underlying HTTP transport. + // If nil, http.DefaultTransport is used. + Transport http.RoundTripper +} + +func (t *APIKey) RoundTrip(req *http.Request) (*http.Response, error) { + rt := t.Transport + if rt == nil { + rt = http.DefaultTransport + if rt == nil { + return nil, errors.New("googleapi/transport: no Transport specified or available") + } + } + newReq := *req + args := newReq.URL.Query() + args.Set("key", t.Key) + newReq.URL.RawQuery = args.Encode() + return rt.RoundTrip(&newReq) +} diff --git a/vendor/google.golang.org/api/iam/v1/BUILD.bazel b/vendor/google.golang.org/api/iam/v1/BUILD.bazel index 1e0d70bc4ecc3..bdc2afbdc3f98 100644 --- a/vendor/google.golang.org/api/iam/v1/BUILD.bazel +++ b/vendor/google.golang.org/api/iam/v1/BUILD.bazel @@ -9,5 +9,7 @@ go_library( deps = [ "//vendor/google.golang.org/api/gensupport:go_default_library", "//vendor/google.golang.org/api/googleapi:go_default_library", + "//vendor/google.golang.org/api/option:go_default_library", + "//vendor/google.golang.org/api/transport/http:go_default_library", ], ) diff --git a/vendor/google.golang.org/api/iam/v1/iam-api.json b/vendor/google.golang.org/api/iam/v1/iam-api.json index 4536f018d484f..8664e06927ccf 100644 --- a/vendor/google.golang.org/api/iam/v1/iam-api.json +++ b/vendor/google.golang.org/api/iam/v1/iam-api.json @@ -265,7 +265,7 @@ "type": "boolean" }, "view": { - "description": "Optional view for the returned Role objects.", + "description": "Optional view for the returned Role objects. When `FULL` is specified,\nthe `includedPermissions` field is returned, which includes a list of all\npermissions in the role. The default value is `BASIC`, which does not\nreturn the `includedPermissions` field.", "enum": [ "BASIC", "FULL" @@ -491,7 +491,7 @@ "type": "boolean" }, "view": { - "description": "Optional view for the returned Role objects.", + "description": "Optional view for the returned Role objects. When `FULL` is specified,\nthe `includedPermissions` field is returned, which includes a list of all\npermissions in the role. The default value is `BASIC`, which does not\nreturn the `includedPermissions` field.", "enum": [ "BASIC", "FULL" @@ -627,6 +627,62 @@ "https://www.googleapis.com/auth/cloud-platform" ] }, + "disable": { + "description": "DisableServiceAccount is currently in the alpha launch stage.\n\nDisables a ServiceAccount,\nwhich immediately prevents the service account from authenticating and\ngaining access to APIs.\n\nDisabled service accounts can be safely restored by using\nEnableServiceAccount at any point. Deleted service accounts cannot be\nrestored using this method.\n\nDisabling a service account that is bound to VMs, Apps, Functions, or\nother jobs will cause those jobs to lose access to resources if they are\nusing the disabled service account.\n\nTo improve reliability of your services and avoid unexpected outages, it\nis recommended to first disable a service account rather than delete it.\nAfter disabling the service account, wait at least 24 hours to verify there\nare no unintended consequences, and then delete the service account.", + "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:disable", + "httpMethod": "POST", + "id": "iam.projects.serviceAccounts.disable", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.\nUsing `-` as a wildcard for the `PROJECT_ID` will infer the project from\nthe account. The `ACCOUNT` value can be the `email` address or the\n`unique_id` of the service account.", + "location": "path", + "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}:disable", + "request": { + "$ref": "DisableServiceAccountRequest" + }, + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "enable": { + "description": "EnableServiceAccount is currently in the alpha launch stage.\n\n Restores a disabled ServiceAccount\n that has been manually disabled by using DisableServiceAccount. Service\n accounts that have been disabled by other means or for other reasons,\n such as abuse, cannot be restored using this method.\n\n EnableServiceAccount will have no effect on a service account that is\n not disabled. Enabling an already enabled service account will have no\n effect.", + "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:enable", + "httpMethod": "POST", + "id": "iam.projects.serviceAccounts.enable", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT_UNIQUE_ID}'.\nUsing `-` as a wildcard for the `PROJECT_ID` will infer the project from\nthe account.", + "location": "path", + "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}:enable", + "request": { + "$ref": "EnableServiceAccountRequest" + }, + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, "get": { "description": "Gets a ServiceAccount.", "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}", @@ -653,7 +709,7 @@ ] }, "getIamPolicy": { - "description": "Returns the IAM access control policy for a\nServiceAccount.", + "description": "Returns the Cloud IAM access control policy for a\nServiceAccount.\n\nNote: Service accounts are both\n[resources and\nidentities](/iam/docs/service-accounts#service_account_permissions). This\nmethod treats the service account as a resource. It returns the Cloud IAM\npolicy that reflects what members have access to the service account.\n\nThis method does not return what resources the service account has access\nto. To see if a service account has access to a resource, call the\n`getIamPolicy` method on the target resource. For example, to view grants\nfor a project, call the\n[projects.getIamPolicy](/resource-manager/reference/rest/v1/projects/getIamPolicy)\nmethod.", "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:getIamPolicy", "httpMethod": "POST", "id": "iam.projects.serviceAccounts.getIamPolicy", @@ -713,8 +769,36 @@ "https://www.googleapis.com/auth/cloud-platform" ] }, + "patch": { + "description": "Patches a ServiceAccount.\n\nCurrently, only the following fields are updatable:\n`display_name` and `description`.\n\nOnly fields specified in the request are guaranteed to be returned in\nthe response. Other fields in the response may be empty.\n\nNote: The field mask is required.", + "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}", + "httpMethod": "PATCH", + "id": "iam.projects.serviceAccounts.patch", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.\n\nRequests using `-` as a wildcard for the `PROJECT_ID` will infer the\nproject from the `account` and the `ACCOUNT` value can be the `email`\naddress or the `unique_id` of the service account.\n\nIn responses the resource name will always be in the format\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.", + "location": "path", + "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}", + "request": { + "$ref": "PatchServiceAccountRequest" + }, + "response": { + "$ref": "ServiceAccount" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, "setIamPolicy": { - "description": "Sets the IAM access control policy for a\nServiceAccount.", + "description": "Sets the Cloud IAM access control policy for a\nServiceAccount.\n\nNote: Service accounts are both\n[resources and\nidentities](/iam/docs/service-accounts#service_account_permissions). This\nmethod treats the service account as a resource. Use it to grant members\naccess to the service account, such as when they need to impersonate it.\n\nThis method does not grant the service account access to other resources,\nsuch as projects. To grant a service account access to resources, include\nthe service account in the Cloud IAM policy for the desired resource, then\ncall the appropriate `setIamPolicy` method on the target resource. For\nexample, to grant a service account access to a project, call the\n[projects.setIamPolicy](/resource-manager/reference/rest/v1/projects/setIamPolicy)\nmethod.", "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:setIamPolicy", "httpMethod": "POST", "id": "iam.projects.serviceAccounts.setIamPolicy", @@ -742,7 +826,7 @@ ] }, "signBlob": { - "description": "Signs a blob using a service account's system-managed private key.", + "description": "**Note**: This method is in the process of being deprecated. Call the\n[`signBlob()`](/iam/credentials/reference/rest/v1/projects.serviceAccounts/signBlob)\nmethod of the Cloud IAM Service Account Credentials API instead.\n\nSigns a blob using a service account's system-managed private key.", "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:signBlob", "httpMethod": "POST", "id": "iam.projects.serviceAccounts.signBlob", @@ -770,7 +854,7 @@ ] }, "signJwt": { - "description": "Signs a JWT using a service account's system-managed private key.\n\nIf no expiry time (`exp`) is provided in the `SignJwtRequest`, IAM sets an\nan expiry time of one hour by default. If you request an expiry time of\nmore than one hour, the request will fail.", + "description": "**Note**: This method is in the process of being deprecated. Call the\n[`signJwt()`](/iam/credentials/reference/rest/v1/projects.serviceAccounts/signJwt)\nmethod of the Cloud IAM Service Account Credentials API instead.\n\nSigns a JWT using a service account's system-managed private key.\n\nIf no expiry time (`exp`) is provided in the `SignJwtRequest`, IAM sets an\nan expiry time of one hour by default. If you request an expiry time of\nmore than one hour, the request will fail.", "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:signJwt", "httpMethod": "POST", "id": "iam.projects.serviceAccounts.signJwt", @@ -825,8 +909,36 @@ "https://www.googleapis.com/auth/cloud-platform" ] }, + "undelete": { + "description": "Restores a deleted ServiceAccount.\nThis is to be used as an action of last resort. A service account may\nnot always be restorable.", + "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:undelete", + "httpMethod": "POST", + "id": "iam.projects.serviceAccounts.undelete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT_UNIQUE_ID}'.\nUsing `-` as a wildcard for the `PROJECT_ID` will infer the project from\nthe account.", + "location": "path", + "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}:undelete", + "request": { + "$ref": "UndeleteServiceAccountRequest" + }, + "response": { + "$ref": "UndeleteServiceAccountResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, "update": { - "description": "Updates a ServiceAccount.\n\nCurrently, only the following fields are updatable:\n`display_name` .\nThe `etag` is mandatory.", + "description": "Note: This method is in the process of being deprecated. Use\nPatchServiceAccount instead.\n\nUpdates a ServiceAccount.\n\nCurrently, only the following fields are updatable:\n`display_name` and `description`.", "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}", "httpMethod": "PUT", "id": "iam.projects.serviceAccounts.update", @@ -1043,7 +1155,7 @@ "type": "boolean" }, "view": { - "description": "Optional view for the returned Role objects.", + "description": "Optional view for the returned Role objects. When `FULL` is specified,\nthe `includedPermissions` field is returned, which includes a list of all\npermissions in the role. The default value is `BASIC`, which does not\nreturn the `includedPermissions` field.", "enum": [ "BASIC", "FULL" @@ -1081,9 +1193,20 @@ } } }, - "revision": "20181005", + "revision": "20190429", "rootUrl": "https://iam.googleapis.com/", "schemas": { + "AdminAuditData": { + "description": "Audit log information specific to Cloud IAM admin APIs. This message is\nserialized as an `Any` type in the `ServiceData` message of an\n`AuditLog` message.", + "id": "AdminAuditData", + "properties": { + "permissionDelta": { + "$ref": "PermissionDelta", + "description": "The permission_delta when when creating or updating a Role." + } + }, + "type": "object" + }, "AuditConfig": { "description": "Specifies the audit configuration for a service.\nThe configuration determines which permission types are logged, and what\nidentities, if any, are exempted from logging.\nAn AuditConfig must have one or more AuditLogConfigs.\n\nIf there are AuditConfigs for both `allServices` and a specific service,\nthe union of the two AuditConfigs is used for that service: the log_types\nspecified in each AuditConfig are enabled, and the exempted_members in each\nAuditLogConfig are exempted.\n\nExample Policy with multiple AuditConfigs:\n\n {\n \"audit_configs\": [\n {\n \"service\": \"allServices\"\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n \"exempted_members\": [\n \"user:foo@gmail.com\"\n ]\n },\n {\n \"log_type\": \"DATA_WRITE\",\n },\n {\n \"log_type\": \"ADMIN_READ\",\n }\n ]\n },\n {\n \"service\": \"fooservice.googleapis.com\"\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n },\n {\n \"log_type\": \"DATA_WRITE\",\n \"exempted_members\": [\n \"user:bar@gmail.com\"\n ]\n }\n ]\n }\n ]\n }\n\nFor fooservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ\nlogging. It also exempts foo@gmail.com from DATA_READ logging, and\nbar@gmail.com from DATA_WRITE logging.", "id": "AuditConfig", @@ -1160,10 +1283,10 @@ "properties": { "condition": { "$ref": "Expr", - "description": "Unimplemented. The condition that is associated with this binding.\nNOTE: an unsatisfied condition will not allow user access via current\nbinding. Different bindings, including their conditions, are examined\nindependently." + "description": "The condition that is associated with this binding.\nNOTE: An unsatisfied condition will not allow user access via current\nbinding. Different bindings, including their conditions, are examined\nindependently." }, "members": { - "description": "Specifies the identities requesting access for a Cloud Platform resource.\n`members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is\n on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone\n who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google\n account. For example, `alice@gmail.com` .\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service\n account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group.\n For example, `admins@example.com`.\n\n\n* `domain:{domain}`: A Google Apps domain name that represents all the\n users of that domain. For example, `google.com` or `example.com`.\n\n", + "description": "Specifies the identities requesting access for a Cloud Platform resource.\n`members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is\n on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone\n who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google\n account. For example, `alice@gmail.com` .\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service\n account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group.\n For example, `admins@example.com`.\n\n\n* `domain:{domain}`: The G Suite domain (primary) that represents all the\n users of that domain. For example, `google.com` or `example.com`.\n\n", "items": { "type": "string" }, @@ -1269,17 +1392,29 @@ }, "serviceAccount": { "$ref": "ServiceAccount", - "description": "The ServiceAccount resource to create.\nCurrently, only the following values are user assignable:\n`display_name` ." + "description": "The ServiceAccount resource to\ncreate. Currently, only the following values are user assignable:\n`display_name` ." } }, "type": "object" }, + "DisableServiceAccountRequest": { + "description": "The service account disable request.", + "id": "DisableServiceAccountRequest", + "properties": {}, + "type": "object" + }, "Empty": { "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", "id": "Empty", "properties": {}, "type": "object" }, + "EnableServiceAccountRequest": { + "description": "The service account enable request.", + "id": "EnableServiceAccountRequest", + "properties": {}, + "type": "object" + }, "Expr": { "description": "Represents an expression text. Example:\n\n title: \"User account presence\"\n description: \"Determines whether the request has a user account\"\n expression: \"size(request.user) \u003e 0\"", "id": "Expr", @@ -1463,6 +1598,20 @@ }, "type": "object" }, + "PatchServiceAccountRequest": { + "description": "The patch service account request.", + "id": "PatchServiceAccountRequest", + "properties": { + "serviceAccount": { + "$ref": "ServiceAccount" + }, + "updateMask": { + "format": "google-fieldmask", + "type": "string" + } + }, + "type": "object" + }, "Permission": { "description": "A permission which can be included by a role.", "id": "Permission", @@ -1520,6 +1669,27 @@ }, "type": "object" }, + "PermissionDelta": { + "description": "A PermissionDelta message to record the added_permissions and\nremoved_permissions inside a role.", + "id": "PermissionDelta", + "properties": { + "addedPermissions": { + "description": "Added permissions.", + "items": { + "type": "string" + }, + "type": "array" + }, + "removedPermissions": { + "description": "Removed permissions.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, "Policy": { "description": "Defines an Identity and Access Management (IAM) policy. It is used to\nspecify access control policies for Cloud Platform resources.\n\n\nA `Policy` consists of a list of `bindings`. A `binding` binds a list of\n`members` to a `role`, where the members can be user accounts, Google groups,\nGoogle domains, and service accounts. A `role` is a named list of permissions\ndefined by IAM.\n\n**JSON Example**\n\n {\n \"bindings\": [\n {\n \"role\": \"roles/owner\",\n \"members\": [\n \"user:mike@example.com\",\n \"group:admins@example.com\",\n \"domain:google.com\",\n \"serviceAccount:my-other-app@appspot.gserviceaccount.com\"\n ]\n },\n {\n \"role\": \"roles/viewer\",\n \"members\": [\"user:sean@example.com\"]\n }\n ]\n }\n\n**YAML Example**\n\n bindings:\n - members:\n - user:mike@example.com\n - group:admins@example.com\n - domain:google.com\n - serviceAccount:my-other-app@appspot.gserviceaccount.com\n role: roles/owner\n - members:\n - user:sean@example.com\n role: roles/viewer\n\n\nFor a description of IAM and its features, see the\n[IAM developer's guide](https://cloud.google.com/iam/docs).", "id": "Policy", @@ -1736,6 +1906,14 @@ "description": "A service account in the Identity and Access Management API.\n\nTo create a service account, specify the `project_id` and the `account_id`\nfor the account. The `account_id` is unique within the project, and is used\nto generate the service account email address and a stable\n`unique_id`.\n\nIf the account already exists, the account's resource name is returned\nin the format of projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}. The caller\ncan use the name in other methods to access the account.\n\nAll other methods can identify the service account using the format\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.\nUsing `-` as a wildcard for the `PROJECT_ID` will infer the project from\nthe account. The `ACCOUNT` value can be the `email` address or the\n`unique_id` of the service account.", "id": "ServiceAccount", "properties": { + "description": { + "description": "Optional. A user-specified opaque description of the service account.\nMust be less than or equal to 256 UTF-8 bytes.", + "type": "string" + }, + "disabled": { + "description": "@OutputOnly A bool indicate if the service account is disabled.\nThe field is currently in alpha phase.", + "type": "boolean" + }, "displayName": { "description": "Optional. A user-specified name for the service account.\nMust be less than or equal to 100 UTF-8 bytes.", "type": "string" @@ -1936,6 +2114,22 @@ } }, "type": "object" + }, + "UndeleteServiceAccountRequest": { + "description": "The service account undelete request.", + "id": "UndeleteServiceAccountRequest", + "properties": {}, + "type": "object" + }, + "UndeleteServiceAccountResponse": { + "id": "UndeleteServiceAccountResponse", + "properties": { + "restoredAccount": { + "$ref": "ServiceAccount", + "description": "Metadata for the restored service account." + } + }, + "type": "object" } }, "servicePath": "", diff --git a/vendor/google.golang.org/api/iam/v1/iam-gen.go b/vendor/google.golang.org/api/iam/v1/iam-gen.go index 8645f10ae6cc7..6ec73553ad273 100644 --- a/vendor/google.golang.org/api/iam/v1/iam-gen.go +++ b/vendor/google.golang.org/api/iam/v1/iam-gen.go @@ -1,4 +1,4 @@ -// Copyright 2018 Google Inc. All rights reserved. +// Copyright 2019 Google LLC. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -6,13 +6,35 @@ // Package iam provides access to the Identity and Access Management (IAM) API. // -// See https://cloud.google.com/iam/ +// For product documentation, see: https://cloud.google.com/iam/ +// +// Creating a client // // Usage example: // // import "google.golang.org/api/iam/v1" // ... -// iamService, err := iam.New(oauthHttpClient) +// ctx := context.Background() +// iamService, err := iam.NewService(ctx) +// +// In this example, Google Application Default Credentials are used for authentication. +// +// For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. +// +// Other authentication options +// +// To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey: +// +// iamService, err := iam.NewService(ctx, option.WithAPIKey("AIza...")) +// +// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource: +// +// config := &oauth2.Config{...} +// // ... +// token, err := config.Exchange(ctx, ...) +// iamService, err := iam.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) +// +// See https://godoc.org/google.golang.org/api/option/ for details on options. package iam // import "google.golang.org/api/iam/v1" import ( @@ -29,6 +51,8 @@ import ( gensupport "google.golang.org/api/gensupport" googleapi "google.golang.org/api/googleapi" + option "google.golang.org/api/option" + htransport "google.golang.org/api/transport/http" ) // Always reference these packages, just in case the auto-generated code @@ -56,6 +80,32 @@ const ( CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" ) +// NewService creates a new Service. +func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, error) { + scopesOption := option.WithScopes( + "https://www.googleapis.com/auth/cloud-platform", + ) + // NOTE: prepend, so we don't override user-specified scopes. + opts = append([]option.ClientOption{scopesOption}, opts...) + client, endpoint, err := htransport.NewClient(ctx, opts...) + if err != nil { + return nil, err + } + s, err := New(client) + if err != nil { + return nil, err + } + if endpoint != "" { + s.BasePath = endpoint + } + return s, nil +} + +// New creates a new Service. It uses the provided http.Client for requests. +// +// Deprecated: please use NewService instead. +// To provide a custom HTTP client, use option.WithHTTPClient. +// If you are using google.golang.org/api/googleapis/transport.APIKey, use option.WithAPIKey with NewService instead. func New(client *http.Client) (*Service, error) { if client == nil { return nil, errors.New("client is nil") @@ -185,6 +235,40 @@ type RolesService struct { s *Service } +// AdminAuditData: Audit log information specific to Cloud IAM admin +// APIs. This message is +// serialized as an `Any` type in the `ServiceData` message of +// an +// `AuditLog` message. +type AdminAuditData struct { + // PermissionDelta: The permission_delta when when creating or updating + // a Role. + PermissionDelta *PermissionDelta `json:"permissionDelta,omitempty"` + + // ForceSendFields is a list of field names (e.g. "PermissionDelta") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "PermissionDelta") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *AdminAuditData) MarshalJSON() ([]byte, error) { + type NoMethod AdminAuditData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // AuditConfig: Specifies the audit configuration for a service. // The configuration determines which permission types are logged, and // what @@ -402,9 +486,8 @@ func (s *AuditableService) MarshalJSON() ([]byte, error) { // Binding: Associates `members` with a `role`. type Binding struct { - // Condition: Unimplemented. The condition that is associated with this - // binding. - // NOTE: an unsatisfied condition will not allow user access via + // Condition: The condition that is associated with this binding. + // NOTE: An unsatisfied condition will not allow user access via // current // binding. Different bindings, including their conditions, are // examined @@ -438,7 +521,7 @@ type Binding struct { // For example, `admins@example.com`. // // - // * `domain:{domain}`: A Google Apps domain name that represents all + // * `domain:{domain}`: The G Suite domain (primary) that represents all // the // users of that domain. For example, `google.com` or // `example.com`. @@ -620,8 +703,8 @@ type CreateServiceAccountRequest struct { // `[a-z]([-a-z0-9]*[a-z0-9])` to comply with RFC1035. AccountId string `json:"accountId,omitempty"` - // ServiceAccount: The ServiceAccount resource to create. - // Currently, only the following values are user + // ServiceAccount: The ServiceAccount resource to + // create. Currently, only the following values are user // assignable: // `display_name` . ServiceAccount *ServiceAccount `json:"serviceAccount,omitempty"` @@ -649,6 +732,10 @@ func (s *CreateServiceAccountRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// DisableServiceAccountRequest: The service account disable request. +type DisableServiceAccountRequest struct { +} + // Empty: A generic empty message that you can re-use to avoid defining // duplicated // empty messages in your APIs. A typical example is to use it as the @@ -667,6 +754,10 @@ type Empty struct { googleapi.ServerResponse `json:"-"` } +// EnableServiceAccountRequest: The service account enable request. +type EnableServiceAccountRequest struct { +} + // Expr: Represents an expression text. Example: // // title: "User account presence" @@ -1078,6 +1169,36 @@ func (s *ListServiceAccountsResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// PatchServiceAccountRequest: The patch service account request. +type PatchServiceAccountRequest struct { + ServiceAccount *ServiceAccount `json:"serviceAccount,omitempty"` + + UpdateMask string `json:"updateMask,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ServiceAccount") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ServiceAccount") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *PatchServiceAccountRequest) MarshalJSON() ([]byte, error) { + type NoMethod PatchServiceAccountRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Permission: A permission which can be included by a role. type Permission struct { // ApiDisabled: The service API associated with the permission is not @@ -1138,6 +1259,40 @@ func (s *Permission) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// PermissionDelta: A PermissionDelta message to record the +// added_permissions and +// removed_permissions inside a role. +type PermissionDelta struct { + // AddedPermissions: Added permissions. + AddedPermissions []string `json:"addedPermissions,omitempty"` + + // RemovedPermissions: Removed permissions. + RemovedPermissions []string `json:"removedPermissions,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AddedPermissions") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AddedPermissions") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *PermissionDelta) MarshalJSON() ([]byte, error) { + type NoMethod PermissionDelta + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Policy: Defines an Identity and Access Management (IAM) policy. It is // used to // specify access control policies for Cloud Platform resources. @@ -1627,6 +1782,16 @@ func (s *Role) MarshalJSON() ([]byte, error) { // the // `unique_id` of the service account. type ServiceAccount struct { + // Description: Optional. A user-specified opaque description of the + // service account. + // Must be less than or equal to 256 UTF-8 bytes. + Description string `json:"description,omitempty"` + + // Disabled: @OutputOnly A bool indicate if the service account is + // disabled. + // The field is currently in alpha phase. + Disabled bool `json:"disabled,omitempty"` + // DisplayName: Optional. A user-specified name for the service // account. // Must be less than or equal to 100 UTF-8 bytes. @@ -1674,7 +1839,7 @@ type ServiceAccount struct { // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "DisplayName") to + // ForceSendFields is a list of field names (e.g. "Description") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -1682,7 +1847,7 @@ type ServiceAccount struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "DisplayName") to include + // NullFields is a list of field names (e.g. "Description") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as @@ -2075,6 +2240,42 @@ func (s *UndeleteRoleRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// UndeleteServiceAccountRequest: The service account undelete request. +type UndeleteServiceAccountRequest struct { +} + +type UndeleteServiceAccountResponse struct { + // RestoredAccount: Metadata for the restored service account. + RestoredAccount *ServiceAccount `json:"restoredAccount,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "RestoredAccount") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "RestoredAccount") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *UndeleteServiceAccountResponse) MarshalJSON() ([]byte, error) { + type NoMethod UndeleteServiceAccountResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // method id "iam.iamPolicies.lintPolicy": type IamPoliciesLintPolicyCall struct { @@ -2830,7 +3031,12 @@ func (c *OrganizationsRolesListCall) ShowDeleted(showDeleted bool) *Organization } // View sets the optional parameter "view": Optional view for the -// returned Role objects. +// returned Role objects. When `FULL` is specified, +// the `includedPermissions` field is returned, which includes a list of +// all +// permissions in the role. The default value is `BASIC`, which does +// not +// return the `includedPermissions` field. // // Possible values: // "BASIC" @@ -2970,7 +3176,7 @@ func (c *OrganizationsRolesListCall) Do(opts ...googleapi.CallOption) (*ListRole // "type": "boolean" // }, // "view": { - // "description": "Optional view for the returned Role objects.", + // "description": "Optional view for the returned Role objects. When `FULL` is specified,\nthe `includedPermissions` field is returned, which includes a list of all\npermissions in the role. The default value is `BASIC`, which does not\nreturn the `includedPermissions` field.", // "enum": [ // "BASIC", // "FULL" @@ -3925,7 +4131,12 @@ func (c *ProjectsRolesListCall) ShowDeleted(showDeleted bool) *ProjectsRolesList } // View sets the optional parameter "view": Optional view for the -// returned Role objects. +// returned Role objects. When `FULL` is specified, +// the `includedPermissions` field is returned, which includes a list of +// all +// permissions in the role. The default value is `BASIC`, which does +// not +// return the `includedPermissions` field. // // Possible values: // "BASIC" @@ -4065,7 +4276,7 @@ func (c *ProjectsRolesListCall) Do(opts ...googleapi.CallOption) (*ListRolesResp // "type": "boolean" // }, // "view": { - // "description": "Optional view for the returned Role objects.", + // "description": "Optional view for the returned Role objects. When `FULL` is specified,\nthe `includedPermissions` field is returned, which includes a list of all\npermissions in the role. The default value is `BASIC`, which does not\nreturn the `includedPermissions` field.", // "enum": [ // "BASIC", // "FULL" @@ -4666,74 +4877,93 @@ func (c *ProjectsServiceAccountsDeleteCall) Do(opts ...googleapi.CallOption) (*E } -// method id "iam.projects.serviceAccounts.get": +// method id "iam.projects.serviceAccounts.disable": -type ProjectsServiceAccountsGetCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type ProjectsServiceAccountsDisableCall struct { + s *Service + name string + disableserviceaccountrequest *DisableServiceAccountRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Gets a ServiceAccount. -func (r *ProjectsServiceAccountsService) Get(name string) *ProjectsServiceAccountsGetCall { - c := &ProjectsServiceAccountsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Disable: DisableServiceAccount is currently in the alpha launch +// stage. +// +// Disables a ServiceAccount, +// which immediately prevents the service account from authenticating +// and +// gaining access to APIs. +// +// Disabled service accounts can be safely restored by +// using +// EnableServiceAccount at any point. Deleted service accounts cannot +// be +// restored using this method. +// +// Disabling a service account that is bound to VMs, Apps, Functions, +// or +// other jobs will cause those jobs to lose access to resources if they +// are +// using the disabled service account. +// +// To improve reliability of your services and avoid unexpected outages, +// it +// is recommended to first disable a service account rather than delete +// it. +// After disabling the service account, wait at least 24 hours to verify +// there +// are no unintended consequences, and then delete the service account. +func (r *ProjectsServiceAccountsService) Disable(name string, disableserviceaccountrequest *DisableServiceAccountRequest) *ProjectsServiceAccountsDisableCall { + c := &ProjectsServiceAccountsDisableCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name + c.disableserviceaccountrequest = disableserviceaccountrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsServiceAccountsGetCall) Fields(s ...googleapi.Field) *ProjectsServiceAccountsGetCall { +func (c *ProjectsServiceAccountsDisableCall) Fields(s ...googleapi.Field) *ProjectsServiceAccountsDisableCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsServiceAccountsGetCall) IfNoneMatch(entityTag string) *ProjectsServiceAccountsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsServiceAccountsGetCall) Context(ctx context.Context) *ProjectsServiceAccountsGetCall { +func (c *ProjectsServiceAccountsDisableCall) Context(ctx context.Context) *ProjectsServiceAccountsDisableCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsServiceAccountsGetCall) Header() http.Header { +func (c *ProjectsServiceAccountsDisableCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsServiceAccountsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsServiceAccountsDisableCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.disableserviceaccountrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:disable") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -4744,14 +4974,14 @@ func (c *ProjectsServiceAccountsGetCall) doRequest(alt string) (*http.Response, return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "iam.projects.serviceAccounts.get" call. -// Exactly one of *ServiceAccount or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *ServiceAccount.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsServiceAccountsGetCall) Do(opts ...googleapi.CallOption) (*ServiceAccount, error) { +// Do executes the "iam.projects.serviceAccounts.disable" call. +// Exactly one of *Empty or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsServiceAccountsDisableCall) Do(opts ...googleapi.CallOption) (*Empty, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -4770,7 +5000,7 @@ func (c *ProjectsServiceAccountsGetCall) Do(opts ...googleapi.CallOption) (*Serv if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ServiceAccount{ + ret := &Empty{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -4782,10 +5012,10 @@ func (c *ProjectsServiceAccountsGetCall) Do(opts ...googleapi.CallOption) (*Serv } return ret, nil // { - // "description": "Gets a ServiceAccount.", - // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}", - // "httpMethod": "GET", - // "id": "iam.projects.serviceAccounts.get", + // "description": "DisableServiceAccount is currently in the alpha launch stage.\n\nDisables a ServiceAccount,\nwhich immediately prevents the service account from authenticating and\ngaining access to APIs.\n\nDisabled service accounts can be safely restored by using\nEnableServiceAccount at any point. Deleted service accounts cannot be\nrestored using this method.\n\nDisabling a service account that is bound to VMs, Apps, Functions, or\nother jobs will cause those jobs to lose access to resources if they are\nusing the disabled service account.\n\nTo improve reliability of your services and avoid unexpected outages, it\nis recommended to first disable a service account rather than delete it.\nAfter disabling the service account, wait at least 24 hours to verify there\nare no unintended consequences, and then delete the service account.", + // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:disable", + // "httpMethod": "POST", + // "id": "iam.projects.serviceAccounts.disable", // "parameterOrder": [ // "name" // ], @@ -4798,9 +5028,12 @@ func (c *ProjectsServiceAccountsGetCall) Do(opts ...googleapi.CallOption) (*Serv // "type": "string" // } // }, - // "path": "v1/{+name}", + // "path": "v1/{+name}:disable", + // "request": { + // "$ref": "DisableServiceAccountRequest" + // }, // "response": { - // "$ref": "ServiceAccount" + // "$ref": "Empty" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform" @@ -4809,29 +5042,43 @@ func (c *ProjectsServiceAccountsGetCall) Do(opts ...googleapi.CallOption) (*Serv } -// method id "iam.projects.serviceAccounts.getIamPolicy": +// method id "iam.projects.serviceAccounts.enable": -type ProjectsServiceAccountsGetIamPolicyCall struct { - s *Service - resource string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsServiceAccountsEnableCall struct { + s *Service + name string + enableserviceaccountrequest *EnableServiceAccountRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetIamPolicy: Returns the IAM access control policy for -// a -// ServiceAccount. -func (r *ProjectsServiceAccountsService) GetIamPolicy(resource string) *ProjectsServiceAccountsGetIamPolicyCall { - c := &ProjectsServiceAccountsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.resource = resource +// Enable: EnableServiceAccount is currently in the alpha launch +// stage. +// +// Restores a disabled ServiceAccount +// that has been manually disabled by using DisableServiceAccount. +// Service +// accounts that have been disabled by other means or for other +// reasons, +// such as abuse, cannot be restored using this method. +// +// EnableServiceAccount will have no effect on a service account that +// is +// not disabled. Enabling an already enabled service account will have +// no +// effect. +func (r *ProjectsServiceAccountsService) Enable(name string, enableserviceaccountrequest *EnableServiceAccountRequest) *ProjectsServiceAccountsEnableCall { + c := &ProjectsServiceAccountsEnableCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.enableserviceaccountrequest = enableserviceaccountrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsServiceAccountsGetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsServiceAccountsGetIamPolicyCall { +func (c *ProjectsServiceAccountsEnableCall) Fields(s ...googleapi.Field) *ProjectsServiceAccountsEnableCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -4839,30 +5086,35 @@ func (c *ProjectsServiceAccountsGetIamPolicyCall) Fields(s ...googleapi.Field) * // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsServiceAccountsGetIamPolicyCall) Context(ctx context.Context) *ProjectsServiceAccountsGetIamPolicyCall { +func (c *ProjectsServiceAccountsEnableCall) Context(ctx context.Context) *ProjectsServiceAccountsEnableCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsServiceAccountsGetIamPolicyCall) Header() http.Header { +func (c *ProjectsServiceAccountsEnableCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsServiceAccountsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsServiceAccountsEnableCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.enableserviceaccountrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:getIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:enable") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -4870,19 +5122,19 @@ func (c *ProjectsServiceAccountsGetIamPolicyCall) doRequest(alt string) (*http.R } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "resource": c.resource, + "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "iam.projects.serviceAccounts.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// Do executes the "iam.projects.serviceAccounts.enable" call. +// Exactly one of *Empty or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) +// *Empty.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *ProjectsServiceAccountsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +func (c *ProjectsServiceAccountsEnableCall) Do(opts ...googleapi.CallOption) (*Empty, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -4901,7 +5153,7 @@ func (c *ProjectsServiceAccountsGetIamPolicyCall) Do(opts ...googleapi.CallOptio if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &Empty{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -4913,25 +5165,28 @@ func (c *ProjectsServiceAccountsGetIamPolicyCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Returns the IAM access control policy for a\nServiceAccount.", - // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:getIamPolicy", + // "description": "EnableServiceAccount is currently in the alpha launch stage.\n\n Restores a disabled ServiceAccount\n that has been manually disabled by using DisableServiceAccount. Service\n accounts that have been disabled by other means or for other reasons,\n such as abuse, cannot be restored using this method.\n\n EnableServiceAccount will have no effect on a service account that is\n not disabled. Enabling an already enabled service account will have no\n effect.", + // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:enable", // "httpMethod": "POST", - // "id": "iam.projects.serviceAccounts.getIamPolicy", + // "id": "iam.projects.serviceAccounts.enable", // "parameterOrder": [ - // "resource" + // "name" // ], // "parameters": { - // "resource": { - // "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "name": { + // "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT_UNIQUE_ID}'.\nUsing `-` as a wildcard for the `PROJECT_ID` will infer the project from\nthe account.", // "location": "path", // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", // "required": true, // "type": "string" // } // }, - // "path": "v1/{+resource}:getIamPolicy", + // "path": "v1/{+name}:enable", + // "request": { + // "$ref": "EnableServiceAccountRequest" + // }, // "response": { - // "$ref": "Policy" + // "$ref": "Empty" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform" @@ -4940,9 +5195,9 @@ func (c *ProjectsServiceAccountsGetIamPolicyCall) Do(opts ...googleapi.CallOptio } -// method id "iam.projects.serviceAccounts.list": +// method id "iam.projects.serviceAccounts.get": -type ProjectsServiceAccountsListCall struct { +type ProjectsServiceAccountsGetCall struct { s *Service name string urlParams_ gensupport.URLParams @@ -4951,15 +5206,311 @@ type ProjectsServiceAccountsListCall struct { header_ http.Header } -// List: Lists ServiceAccounts for a project. -func (r *ProjectsServiceAccountsService) List(name string) *ProjectsServiceAccountsListCall { - c := &ProjectsServiceAccountsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Gets a ServiceAccount. +func (r *ProjectsServiceAccountsService) Get(name string) *ProjectsServiceAccountsGetCall { + c := &ProjectsServiceAccountsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } -// PageSize sets the optional parameter "pageSize": Optional limit on -// the number of service accounts to include in the +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsServiceAccountsGetCall) Fields(s ...googleapi.Field) *ProjectsServiceAccountsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsServiceAccountsGetCall) IfNoneMatch(entityTag string) *ProjectsServiceAccountsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsServiceAccountsGetCall) Context(ctx context.Context) *ProjectsServiceAccountsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsServiceAccountsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsServiceAccountsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "iam.projects.serviceAccounts.get" call. +// Exactly one of *ServiceAccount or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ServiceAccount.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsServiceAccountsGetCall) Do(opts ...googleapi.CallOption) (*ServiceAccount, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ServiceAccount{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets a ServiceAccount.", + // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}", + // "httpMethod": "GET", + // "id": "iam.projects.serviceAccounts.get", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.\nUsing `-` as a wildcard for the `PROJECT_ID` will infer the project from\nthe account. The `ACCOUNT` value can be the `email` address or the\n`unique_id` of the service account.", + // "location": "path", + // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}", + // "response": { + // "$ref": "ServiceAccount" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "iam.projects.serviceAccounts.getIamPolicy": + +type ProjectsServiceAccountsGetIamPolicyCall struct { + s *Service + resource string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// GetIamPolicy: Returns the Cloud IAM access control policy for +// a +// ServiceAccount. +// +// Note: Service accounts are both +// [resources +// and +// identities](/iam/docs/service-accounts#service_account_permissions +// ). This +// method treats the service account as a resource. It returns the Cloud +// IAM +// policy that reflects what members have access to the service +// account. +// +// This method does not return what resources the service account has +// access +// to. To see if a service account has access to a resource, call +// the +// `getIamPolicy` method on the target resource. For example, to view +// grants +// for a project, call +// the +// [projects.getIamPolicy](/resource-manager/reference/rest/v1/projec +// ts/getIamPolicy) +// method. +func (r *ProjectsServiceAccountsService) GetIamPolicy(resource string) *ProjectsServiceAccountsGetIamPolicyCall { + c := &ProjectsServiceAccountsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.resource = resource + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsServiceAccountsGetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsServiceAccountsGetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsServiceAccountsGetIamPolicyCall) Context(ctx context.Context) *ProjectsServiceAccountsGetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsServiceAccountsGetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsServiceAccountsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:getIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "iam.projects.serviceAccounts.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsServiceAccountsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the Cloud IAM access control policy for a\nServiceAccount.\n\nNote: Service accounts are both\n[resources and\nidentities](/iam/docs/service-accounts#service_account_permissions). This\nmethod treats the service account as a resource. It returns the Cloud IAM\npolicy that reflects what members have access to the service account.\n\nThis method does not return what resources the service account has access\nto. To see if a service account has access to a resource, call the\n`getIamPolicy` method on the target resource. For example, to view grants\nfor a project, call the\n[projects.getIamPolicy](/resource-manager/reference/rest/v1/projects/getIamPolicy)\nmethod.", + // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:getIamPolicy", + // "httpMethod": "POST", + // "id": "iam.projects.serviceAccounts.getIamPolicy", + // "parameterOrder": [ + // "resource" + // ], + // "parameters": { + // "resource": { + // "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "location": "path", + // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+resource}:getIamPolicy", + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "iam.projects.serviceAccounts.list": + +type ProjectsServiceAccountsListCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists ServiceAccounts for a project. +func (r *ProjectsServiceAccountsService) List(name string) *ProjectsServiceAccountsListCall { + c := &ProjectsServiceAccountsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// PageSize sets the optional parameter "pageSize": Optional limit on +// the number of service accounts to include in the // response. Further accounts can subsequently be obtained by including // the // ListServiceAccountsResponse.next_page_token @@ -5135,6 +5686,154 @@ func (c *ProjectsServiceAccountsListCall) Pages(ctx context.Context, f func(*Lis } } +// method id "iam.projects.serviceAccounts.patch": + +type ProjectsServiceAccountsPatchCall struct { + s *Service + name string + patchserviceaccountrequest *PatchServiceAccountRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Patches a ServiceAccount. +// +// Currently, only the following fields are updatable: +// `display_name` and `description`. +// +// Only fields specified in the request are guaranteed to be returned +// in +// the response. Other fields in the response may be empty. +// +// Note: The field mask is required. +func (r *ProjectsServiceAccountsService) Patch(name string, patchserviceaccountrequest *PatchServiceAccountRequest) *ProjectsServiceAccountsPatchCall { + c := &ProjectsServiceAccountsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.patchserviceaccountrequest = patchserviceaccountrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsServiceAccountsPatchCall) Fields(s ...googleapi.Field) *ProjectsServiceAccountsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsServiceAccountsPatchCall) Context(ctx context.Context) *ProjectsServiceAccountsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsServiceAccountsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsServiceAccountsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.patchserviceaccountrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "iam.projects.serviceAccounts.patch" call. +// Exactly one of *ServiceAccount or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ServiceAccount.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsServiceAccountsPatchCall) Do(opts ...googleapi.CallOption) (*ServiceAccount, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ServiceAccount{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patches a ServiceAccount.\n\nCurrently, only the following fields are updatable:\n`display_name` and `description`.\n\nOnly fields specified in the request are guaranteed to be returned in\nthe response. Other fields in the response may be empty.\n\nNote: The field mask is required.", + // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}", + // "httpMethod": "PATCH", + // "id": "iam.projects.serviceAccounts.patch", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.\n\nRequests using `-` as a wildcard for the `PROJECT_ID` will infer the\nproject from the `account` and the `ACCOUNT` value can be the `email`\naddress or the `unique_id` of the service account.\n\nIn responses the resource name will always be in the format\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.", + // "location": "path", + // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}", + // "request": { + // "$ref": "PatchServiceAccountRequest" + // }, + // "response": { + // "$ref": "ServiceAccount" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + // method id "iam.projects.serviceAccounts.setIamPolicy": type ProjectsServiceAccountsSetIamPolicyCall struct { @@ -5146,9 +5845,33 @@ type ProjectsServiceAccountsSetIamPolicyCall struct { header_ http.Header } -// SetIamPolicy: Sets the IAM access control policy for +// SetIamPolicy: Sets the Cloud IAM access control policy for // a // ServiceAccount. +// +// Note: Service accounts are both +// [resources +// and +// identities](/iam/docs/service-accounts#service_account_permissions +// ). This +// method treats the service account as a resource. Use it to grant +// members +// access to the service account, such as when they need to impersonate +// it. +// +// This method does not grant the service account access to other +// resources, +// such as projects. To grant a service account access to resources, +// include +// the service account in the Cloud IAM policy for the desired resource, +// then +// call the appropriate `setIamPolicy` method on the target resource. +// For +// example, to grant a service account access to a project, call +// the +// [projects.setIamPolicy](/resource-manager/reference/rest/v1/projec +// ts/setIamPolicy) +// method. func (r *ProjectsServiceAccountsService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsServiceAccountsSetIamPolicyCall { c := &ProjectsServiceAccountsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -5246,7 +5969,7 @@ func (c *ProjectsServiceAccountsSetIamPolicyCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Sets the IAM access control policy for a\nServiceAccount.", + // "description": "Sets the Cloud IAM access control policy for a\nServiceAccount.\n\nNote: Service accounts are both\n[resources and\nidentities](/iam/docs/service-accounts#service_account_permissions). This\nmethod treats the service account as a resource. Use it to grant members\naccess to the service account, such as when they need to impersonate it.\n\nThis method does not grant the service account access to other resources,\nsuch as projects. To grant a service account access to resources, include\nthe service account in the Cloud IAM policy for the desired resource, then\ncall the appropriate `setIamPolicy` method on the target resource. For\nexample, to grant a service account access to a project, call the\n[projects.setIamPolicy](/resource-manager/reference/rest/v1/projects/setIamPolicy)\nmethod.", // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:setIamPolicy", // "httpMethod": "POST", // "id": "iam.projects.serviceAccounts.setIamPolicy", @@ -5287,8 +6010,15 @@ type ProjectsServiceAccountsSignBlobCall struct { header_ http.Header } -// SignBlob: Signs a blob using a service account's system-managed -// private key. +// SignBlob: **Note**: This method is in the process of being +// deprecated. Call +// the +// [`signBlob()`](/iam/credentials/reference/rest/v1/projects.service +// Accounts/signBlob) +// method of the Cloud IAM Service Account Credentials API +// instead. +// +// Signs a blob using a service account's system-managed private key. func (r *ProjectsServiceAccountsService) SignBlob(name string, signblobrequest *SignBlobRequest) *ProjectsServiceAccountsSignBlobCall { c := &ProjectsServiceAccountsSignBlobCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -5386,7 +6116,7 @@ func (c *ProjectsServiceAccountsSignBlobCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Signs a blob using a service account's system-managed private key.", + // "description": "**Note**: This method is in the process of being deprecated. Call the\n[`signBlob()`](/iam/credentials/reference/rest/v1/projects.serviceAccounts/signBlob)\nmethod of the Cloud IAM Service Account Credentials API instead.\n\nSigns a blob using a service account's system-managed private key.", // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:signBlob", // "httpMethod": "POST", // "id": "iam.projects.serviceAccounts.signBlob", @@ -5427,8 +6157,15 @@ type ProjectsServiceAccountsSignJwtCall struct { header_ http.Header } -// SignJwt: Signs a JWT using a service account's system-managed private -// key. +// SignJwt: **Note**: This method is in the process of being deprecated. +// Call +// the +// [`signJwt()`](/iam/credentials/reference/rest/v1/projects.serviceA +// ccounts/signJwt) +// method of the Cloud IAM Service Account Credentials API +// instead. +// +// Signs a JWT using a service account's system-managed private key. // // If no expiry time (`exp`) is provided in the `SignJwtRequest`, IAM // sets an @@ -5532,7 +6269,7 @@ func (c *ProjectsServiceAccountsSignJwtCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Signs a JWT using a service account's system-managed private key.\n\nIf no expiry time (`exp`) is provided in the `SignJwtRequest`, IAM sets an\nan expiry time of one hour by default. If you request an expiry time of\nmore than one hour, the request will fail.", + // "description": "**Note**: This method is in the process of being deprecated. Call the\n[`signJwt()`](/iam/credentials/reference/rest/v1/projects.serviceAccounts/signJwt)\nmethod of the Cloud IAM Service Account Credentials API instead.\n\nSigns a JWT using a service account's system-managed private key.\n\nIf no expiry time (`exp`) is provided in the `SignJwtRequest`, IAM sets an\nan expiry time of one hour by default. If you request an expiry time of\nmore than one hour, the request will fail.", // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:signJwt", // "httpMethod": "POST", // "id": "iam.projects.serviceAccounts.signJwt", @@ -5703,6 +6440,148 @@ func (c *ProjectsServiceAccountsTestIamPermissionsCall) Do(opts ...googleapi.Cal } +// method id "iam.projects.serviceAccounts.undelete": + +type ProjectsServiceAccountsUndeleteCall struct { + s *Service + name string + undeleteserviceaccountrequest *UndeleteServiceAccountRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Undelete: Restores a deleted ServiceAccount. +// This is to be used as an action of last resort. A service account +// may +// not always be restorable. +func (r *ProjectsServiceAccountsService) Undelete(name string, undeleteserviceaccountrequest *UndeleteServiceAccountRequest) *ProjectsServiceAccountsUndeleteCall { + c := &ProjectsServiceAccountsUndeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.undeleteserviceaccountrequest = undeleteserviceaccountrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsServiceAccountsUndeleteCall) Fields(s ...googleapi.Field) *ProjectsServiceAccountsUndeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsServiceAccountsUndeleteCall) Context(ctx context.Context) *ProjectsServiceAccountsUndeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsServiceAccountsUndeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsServiceAccountsUndeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.undeleteserviceaccountrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:undelete") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "iam.projects.serviceAccounts.undelete" call. +// Exactly one of *UndeleteServiceAccountResponse or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *UndeleteServiceAccountResponse.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsServiceAccountsUndeleteCall) Do(opts ...googleapi.CallOption) (*UndeleteServiceAccountResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &UndeleteServiceAccountResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Restores a deleted ServiceAccount.\nThis is to be used as an action of last resort. A service account may\nnot always be restorable.", + // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:undelete", + // "httpMethod": "POST", + // "id": "iam.projects.serviceAccounts.undelete", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT_UNIQUE_ID}'.\nUsing `-` as a wildcard for the `PROJECT_ID` will infer the project from\nthe account.", + // "location": "path", + // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}:undelete", + // "request": { + // "$ref": "UndeleteServiceAccountRequest" + // }, + // "response": { + // "$ref": "UndeleteServiceAccountResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + // method id "iam.projects.serviceAccounts.update": type ProjectsServiceAccountsUpdateCall struct { @@ -5714,11 +6593,14 @@ type ProjectsServiceAccountsUpdateCall struct { header_ http.Header } -// Update: Updates a ServiceAccount. +// Update: Note: This method is in the process of being deprecated. +// Use +// PatchServiceAccount instead. +// +// Updates a ServiceAccount. // // Currently, only the following fields are updatable: -// `display_name` . -// The `etag` is mandatory. +// `display_name` and `description`. func (r *ProjectsServiceAccountsService) Update(name string, serviceaccount *ServiceAccount) *ProjectsServiceAccountsUpdateCall { c := &ProjectsServiceAccountsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -5816,7 +6698,7 @@ func (c *ProjectsServiceAccountsUpdateCall) Do(opts ...googleapi.CallOption) (*S } return ret, nil // { - // "description": "Updates a ServiceAccount.\n\nCurrently, only the following fields are updatable:\n`display_name` .\nThe `etag` is mandatory.", + // "description": "Note: This method is in the process of being deprecated. Use\nPatchServiceAccount instead.\n\nUpdates a ServiceAccount.\n\nCurrently, only the following fields are updatable:\n`display_name` and `description`.", // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}", // "httpMethod": "PUT", // "id": "iam.projects.serviceAccounts.update", @@ -6642,7 +7524,12 @@ func (c *RolesListCall) ShowDeleted(showDeleted bool) *RolesListCall { } // View sets the optional parameter "view": Optional view for the -// returned Role objects. +// returned Role objects. When `FULL` is specified, +// the `includedPermissions` field is returned, which includes a list of +// all +// permissions in the role. The default value is `BASIC`, which does +// not +// return the `includedPermissions` field. // // Possible values: // "BASIC" @@ -6775,7 +7662,7 @@ func (c *RolesListCall) Do(opts ...googleapi.CallOption) (*ListRolesResponse, er // "type": "boolean" // }, // "view": { - // "description": "Optional view for the returned Role objects.", + // "description": "Optional view for the returned Role objects. When `FULL` is specified,\nthe `includedPermissions` field is returned, which includes a list of all\npermissions in the role. The default value is `BASIC`, which does not\nreturn the `includedPermissions` field.", // "enum": [ // "BASIC", // "FULL" diff --git a/vendor/google.golang.org/api/internal/BUILD.bazel b/vendor/google.golang.org/api/internal/BUILD.bazel new file mode 100644 index 0000000000000..011f72cf4c4e6 --- /dev/null +++ b/vendor/google.golang.org/api/internal/BUILD.bazel @@ -0,0 +1,19 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "creds.go", + "pool.go", + "settings.go", + ], + importmap = "k8s.io/kops/vendor/google.golang.org/api/internal", + importpath = "google.golang.org/api/internal", + visibility = ["//vendor/google.golang.org/api:__subpackages__"], + deps = [ + "//vendor/golang.org/x/oauth2:go_default_library", + "//vendor/golang.org/x/oauth2/google:go_default_library", + "//vendor/google.golang.org/grpc:go_default_library", + "//vendor/google.golang.org/grpc/naming:go_default_library", + ], +) diff --git a/vendor/google.golang.org/api/internal/creds.go b/vendor/google.golang.org/api/internal/creds.go new file mode 100644 index 0000000000000..69b8659fddbd1 --- /dev/null +++ b/vendor/google.golang.org/api/internal/creds.go @@ -0,0 +1,102 @@ +// Copyright 2017 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + + "golang.org/x/oauth2" + + "golang.org/x/oauth2/google" +) + +// Creds returns credential information obtained from DialSettings, or if none, then +// it returns default credential information. +func Creds(ctx context.Context, ds *DialSettings) (*google.Credentials, error) { + if ds.Credentials != nil { + return ds.Credentials, nil + } + if ds.CredentialsJSON != nil { + return credentialsFromJSON(ctx, ds.CredentialsJSON, ds.Endpoint, ds.Scopes, ds.Audiences) + } + if ds.CredentialsFile != "" { + data, err := ioutil.ReadFile(ds.CredentialsFile) + if err != nil { + return nil, fmt.Errorf("cannot read credentials file: %v", err) + } + return credentialsFromJSON(ctx, data, ds.Endpoint, ds.Scopes, ds.Audiences) + } + if ds.TokenSource != nil { + return &google.Credentials{TokenSource: ds.TokenSource}, nil + } + cred, err := google.FindDefaultCredentials(ctx, ds.Scopes...) + if err != nil { + return nil, err + } + if len(cred.JSON) > 0 { + return credentialsFromJSON(ctx, cred.JSON, ds.Endpoint, ds.Scopes, ds.Audiences) + } + // For GAE and GCE, the JSON is empty so return the default credentials directly. + return cred, nil +} + +// JSON key file type. +const ( + serviceAccountKey = "service_account" +) + +// credentialsFromJSON returns a google.Credentials based on the input. +// +// - If the JSON is a service account and no scopes provided, returns self-signed JWT auth flow +// - Otherwise, returns OAuth 2.0 flow. +func credentialsFromJSON(ctx context.Context, data []byte, endpoint string, scopes []string, audiences []string) (*google.Credentials, error) { + cred, err := google.CredentialsFromJSON(ctx, data, scopes...) + if err != nil { + return nil, err + } + if len(data) > 0 && len(scopes) == 0 { + var f struct { + Type string `json:"type"` + // The rest JSON fields are omitted because they are not used. + } + if err := json.Unmarshal(cred.JSON, &f); err != nil { + return nil, err + } + if f.Type == serviceAccountKey { + ts, err := selfSignedJWTTokenSource(data, endpoint, audiences) + if err != nil { + return nil, err + } + cred.TokenSource = ts + } + } + return cred, err +} + +func selfSignedJWTTokenSource(data []byte, endpoint string, audiences []string) (oauth2.TokenSource, error) { + // Use the API endpoint as the default audience + audience := endpoint + if len(audiences) > 0 { + // TODO(shinfan): Update golang oauth to support multiple audiences. + if len(audiences) > 1 { + return nil, fmt.Errorf("multiple audiences support is not implemented") + } + audience = audiences[0] + } + return google.JWTAccessTokenSourceFromJSON(data, audience) +} diff --git a/vendor/google.golang.org/api/internal/pool.go b/vendor/google.golang.org/api/internal/pool.go new file mode 100644 index 0000000000000..a4426dcb700fd --- /dev/null +++ b/vendor/google.golang.org/api/internal/pool.go @@ -0,0 +1,61 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "errors" + + "google.golang.org/grpc/naming" +) + +// PoolResolver provides a fixed list of addresses to load balance between +// and does not provide further updates. +type PoolResolver struct { + poolSize int + dialOpt *DialSettings + ch chan []*naming.Update +} + +// NewPoolResolver returns a PoolResolver +// This is an EXPERIMENTAL API and may be changed or removed in the future. +func NewPoolResolver(size int, o *DialSettings) *PoolResolver { + return &PoolResolver{poolSize: size, dialOpt: o} +} + +// Resolve returns a Watcher for the endpoint defined by the DialSettings +// provided to NewPoolResolver. +func (r *PoolResolver) Resolve(target string) (naming.Watcher, error) { + if r.dialOpt.Endpoint == "" { + return nil, errors.New("no endpoint configured") + } + addrs := make([]*naming.Update, 0, r.poolSize) + for i := 0; i < r.poolSize; i++ { + addrs = append(addrs, &naming.Update{Op: naming.Add, Addr: r.dialOpt.Endpoint, Metadata: i}) + } + r.ch = make(chan []*naming.Update, 1) + r.ch <- addrs + return r, nil +} + +// Next returns a static list of updates on the first call, +// and blocks indefinitely until Close is called on subsequent calls. +func (r *PoolResolver) Next() ([]*naming.Update, error) { + return <-r.ch, nil +} + +// Close releases resources associated with the pool and causes Next to unblock. +func (r *PoolResolver) Close() { + close(r.ch) +} diff --git a/vendor/google.golang.org/api/internal/service-account.json b/vendor/google.golang.org/api/internal/service-account.json new file mode 100644 index 0000000000000..6b36a92961ead --- /dev/null +++ b/vendor/google.golang.org/api/internal/service-account.json @@ -0,0 +1,12 @@ +{ + "type": "service_account", + "project_id": "project_id", + "private_key_id": "private_key_id", + "private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCzd9ZdbPLAR4/g\nj+Rodu15kEasMpxf/Mz+gKRb2fmgR2Y18Y/iRBYZ4SkmF2pBSfzvwE/aTCzSPBGl\njHhPzohXnSN029eWoItmxVONlqCbR29pD07aLzv08LGeIGdHIEdhVjhvRwTkYZIF\ndXmlHNDRUU/EbJN9D+3ahw22BNnC4PaDgfIWTs3xIlTCSf2rL39I4DSNLTS/LzxK\n/XrQfBMtfwMWwyQaemXbc7gRgzOy8L56wa1W1zyXx99th97j1bLnoAXBGplhB4Co\n25ohyDAuhxRm+XGMEaO0Mzo7u97kvhj48a569RH1QRhOf7EBf60jO4h5eOmfi5P5\nPV3l7041AgMBAAECggEAEZ0RTNoEeRqM5F067YW+iM/AH+ZXspP9Cn1VpC4gcbqQ\nLXsnw+0qvh97CmIB66Z3TJBzRdl0DK4YjUbcB/kdKHwjnrR01DOtesijCqJd4N+B\n762w73jzSXbV9872U+S3HLZ5k3JE6KUqz55X8fyCAgkY6w4862lEzs2yasrPFHEV\nRoQp3PM0Miif8R3hGDhOWcHxcobullthG6JHAQFfc1ctwEjZI4TK0iWqlzfWGyKN\nT9UgvjUDud5cGvS9el0AiLN6keAf77tcPn1zetUVhxN1KN4bVAm1Q+6O8esl63Rj\n7JXpHzxaRnit9S6/aH/twHsGGtLg5Puw6jey6xs4AQKBgQD2JNy1wzewCRkD+jug\n8CHbJ+LIJVRNIaWa/RK1QD8/UjmFPkIzRQSF3AKC5mRAWSa2FL3yVK3N/DD7hazW\n85XSBB7IDcnoJnA9SkUeWwqQGkDx3EntlU3gX8Kn/+ofF8O9jLXxAa901MAVXVuf\n5YDzrl4PNE3bFnPCdiNmSdRfhQKBgQC6p4DsCpwqbeTu9f5ak9VW/fQP47Fgt+Mf\nwGjBnKP5PbbNJpHCfamF7jqSRH83Xy0KNssH7jD/NZ2oT594sMmiQPUC5ni9VYY6\nsuYB0JbD5Mq+EjKIVhYtxaQJ76LzHreEI+G4z6k3H7/hRpr3/C48n9G/uVkT9DbJ\noplxxEx68QKBgQCdJ23vcwO0Firtmi/GEmtbVHz70rGfSXNFoHz4UlvPXv0wsE5u\nE4vOt2i3EMhDOWh46odYGG6bzH+tp2xyFTW70Dui+QLHgPs6dpfoyLHWzZxXj5F3\n6lK9hgZvYvqk/XRRKmzjwnK2wjsdqOyeC1covlR5mqh20D/6kZkKbur0TQKBgAwy\nCZBimRWEnKKoW/gbFKNccGfhXqONID/g2Hdd/rC4QYth68AjacIgcJ9B7nX1uAGk\n1tsryvPB0w0+NpMyKdp6GAgaeuUUA3MuYSzZLiCagEyu77JMvaI7+Z3UlHcCGMd/\neK4Uk1/QqT7U2Cc/yN2ZK6E1QQa2vCWshA4U31JhAoGAbtbSSSsul1c+PsJ13Cfk\n6qVnqYzPqt23QTyOZmGAvUHH/M4xRiQpOE0cDF4t/r5PwenAQPQzTvMmWRzj6uAY\n3eaU0eAK7ZfoweCoOIAPnpFbbRLrXfoY46H7MYh7euWGXOKEpxz5yzuEkd9ByNUE\n86vSEidqbMIiXVgEgnu/k08=\n-----END PRIVATE KEY-----\n", + "client_email": "xyz@developer.gserviceaccount.com", + "client_id": "123", + "auth_uri": "https://accounts.google.com/o/oauth2/auth", + "token_uri": "https://accounts.google.com/o/oauth2/token", + "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/xyz%40developer.gserviceaccount.com" +} diff --git a/vendor/google.golang.org/api/internal/settings.go b/vendor/google.golang.org/api/internal/settings.go new file mode 100644 index 0000000000000..062301c65fd5c --- /dev/null +++ b/vendor/google.golang.org/api/internal/settings.go @@ -0,0 +1,96 @@ +// Copyright 2017 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package internal supports the options and transport packages. +package internal + +import ( + "errors" + "net/http" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "google.golang.org/grpc" +) + +// DialSettings holds information needed to establish a connection with a +// Google API service. +type DialSettings struct { + Endpoint string + Scopes []string + TokenSource oauth2.TokenSource + Credentials *google.Credentials + CredentialsFile string // if set, Token Source is ignored. + CredentialsJSON []byte + UserAgent string + APIKey string + Audiences []string + HTTPClient *http.Client + GRPCDialOpts []grpc.DialOption + GRPCConn *grpc.ClientConn + NoAuth bool + + // Google API system parameters. For more information please read: + // https://cloud.google.com/apis/docs/system-parameters + QuotaProject string + RequestReason string +} + +// Validate reports an error if ds is invalid. +func (ds *DialSettings) Validate() error { + hasCreds := ds.APIKey != "" || ds.TokenSource != nil || ds.CredentialsFile != "" || ds.Credentials != nil + if ds.NoAuth && hasCreds { + return errors.New("options.WithoutAuthentication is incompatible with any option that provides credentials") + } + // Credentials should not appear with other options. + // We currently allow TokenSource and CredentialsFile to coexist. + // TODO(jba): make TokenSource & CredentialsFile an error (breaking change). + nCreds := 0 + if ds.Credentials != nil { + nCreds++ + } + if ds.CredentialsJSON != nil { + nCreds++ + } + if ds.CredentialsFile != "" { + nCreds++ + } + if ds.APIKey != "" { + nCreds++ + } + if ds.TokenSource != nil { + nCreds++ + } + if len(ds.Scopes) > 0 && len(ds.Audiences) > 0 { + return errors.New("WithScopes is incompatible with WithAudience") + } + // Accept only one form of credentials, except we allow TokenSource and CredentialsFile for backwards compatibility. + if nCreds > 1 && !(nCreds == 2 && ds.TokenSource != nil && ds.CredentialsFile != "") { + return errors.New("multiple credential options provided") + } + if ds.HTTPClient != nil && ds.GRPCConn != nil { + return errors.New("WithHTTPClient is incompatible with WithGRPCConn") + } + if ds.HTTPClient != nil && ds.GRPCDialOpts != nil { + return errors.New("WithHTTPClient is incompatible with gRPC dial options") + } + if ds.HTTPClient != nil && ds.QuotaProject != "" { + return errors.New("WithHTTPClient is incompatible with QuotaProject") + } + if ds.HTTPClient != nil && ds.RequestReason != "" { + return errors.New("WithHTTPClient is incompatible with RequestReason") + } + + return nil +} diff --git a/vendor/google.golang.org/api/oauth2/v2/BUILD.bazel b/vendor/google.golang.org/api/oauth2/v2/BUILD.bazel index b2af0a8b3ee62..ba1bd46e23c6c 100644 --- a/vendor/google.golang.org/api/oauth2/v2/BUILD.bazel +++ b/vendor/google.golang.org/api/oauth2/v2/BUILD.bazel @@ -9,5 +9,7 @@ go_library( deps = [ "//vendor/google.golang.org/api/gensupport:go_default_library", "//vendor/google.golang.org/api/googleapi:go_default_library", + "//vendor/google.golang.org/api/option:go_default_library", + "//vendor/google.golang.org/api/transport/http:go_default_library", ], ) diff --git a/vendor/google.golang.org/api/oauth2/v2/oauth2-api.json b/vendor/google.golang.org/api/oauth2/v2/oauth2-api.json index 18204dc0d2d39..060d052ae68bb 100644 --- a/vendor/google.golang.org/api/oauth2/v2/oauth2-api.json +++ b/vendor/google.golang.org/api/oauth2/v2/oauth2-api.json @@ -2,17 +2,14 @@ "auth": { "oauth2": { "scopes": { - "https://www.googleapis.com/auth/plus.login": { - "description": "Know the list of people in your circles, your age range, and language" - }, "https://www.googleapis.com/auth/plus.me": { - "description": "Know who you are on Google" + "description": "Associate you with your personal info on Google" }, "https://www.googleapis.com/auth/userinfo.email": { "description": "View your email address" }, "https://www.googleapis.com/auth/userinfo.profile": { - "description": "View your basic profile info" + "description": "See your personal info, including any personal info you've made publicly available" } } } @@ -23,7 +20,7 @@ "description": "Obtains end-user authorization grants for use with other Google APIs.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/accounts/docs/OAuth2", - "etag": "\"Zkyw9ACJZUvcYmlFaKGChzhmtnE/aQYw8bQfY3xIJbtzdw5QvIFJYtI\"", + "etag": "\"VPK3KBfpaEgZ16pozGOoMYfKc0U/W-JRJm0Bex0eEJ4XYBVnRKTeRQ8\"", "icons": { "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" @@ -122,7 +119,6 @@ "$ref": "Userinfoplus" }, "scopes": [ - "https://www.googleapis.com/auth/plus.login", "https://www.googleapis.com/auth/plus.me", "https://www.googleapis.com/auth/userinfo.email", "https://www.googleapis.com/auth/userinfo.profile" @@ -142,7 +138,6 @@ "$ref": "Userinfoplus" }, "scopes": [ - "https://www.googleapis.com/auth/plus.login", "https://www.googleapis.com/auth/plus.me", "https://www.googleapis.com/auth/userinfo.email", "https://www.googleapis.com/auth/userinfo.profile" @@ -155,7 +150,7 @@ } } }, - "revision": "20180208", + "revision": "20190313", "rootUrl": "https://www.googleapis.com/", "schemas": { "Jwk": { diff --git a/vendor/google.golang.org/api/oauth2/v2/oauth2-gen.go b/vendor/google.golang.org/api/oauth2/v2/oauth2-gen.go index 4a1eea3d951a0..29cc6fbf1266d 100644 --- a/vendor/google.golang.org/api/oauth2/v2/oauth2-gen.go +++ b/vendor/google.golang.org/api/oauth2/v2/oauth2-gen.go @@ -1,4 +1,4 @@ -// Copyright 2018 Google Inc. All rights reserved. +// Copyright 2019 Google LLC. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -6,13 +6,39 @@ // Package oauth2 provides access to the Google OAuth2 API. // -// See https://developers.google.com/accounts/docs/OAuth2 +// For product documentation, see: https://developers.google.com/accounts/docs/OAuth2 +// +// Creating a client // // Usage example: // // import "google.golang.org/api/oauth2/v2" // ... -// oauth2Service, err := oauth2.New(oauthHttpClient) +// ctx := context.Background() +// oauth2Service, err := oauth2.NewService(ctx) +// +// In this example, Google Application Default Credentials are used for authentication. +// +// For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. +// +// Other authentication options +// +// By default, all available scopes (see "Constants") are used to authenticate. To restrict scopes, use option.WithScopes: +// +// oauth2Service, err := oauth2.NewService(ctx, option.WithScopes(oauth2.UserinfoProfileScope)) +// +// To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey: +// +// oauth2Service, err := oauth2.NewService(ctx, option.WithAPIKey("AIza...")) +// +// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource: +// +// config := &oauth2.Config{...} +// // ... +// token, err := config.Exchange(ctx, ...) +// oauth2Service, err := oauth2.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) +// +// See https://godoc.org/google.golang.org/api/option/ for details on options. package oauth2 // import "google.golang.org/api/oauth2/v2" import ( @@ -29,6 +55,8 @@ import ( gensupport "google.golang.org/api/gensupport" googleapi "google.golang.org/api/googleapi" + option "google.golang.org/api/option" + htransport "google.golang.org/api/transport/http" ) // Always reference these packages, just in case the auto-generated code @@ -52,19 +80,45 @@ const basePath = "https://www.googleapis.com/" // OAuth2 scopes used by this API. const ( - // Know the list of people in your circles, your age range, and language - PlusLoginScope = "https://www.googleapis.com/auth/plus.login" - - // Know who you are on Google + // Associate you with your personal info on Google PlusMeScope = "https://www.googleapis.com/auth/plus.me" // View your email address UserinfoEmailScope = "https://www.googleapis.com/auth/userinfo.email" - // View your basic profile info + // See your personal info, including any personal info you've made + // publicly available UserinfoProfileScope = "https://www.googleapis.com/auth/userinfo.profile" ) +// NewService creates a new Service. +func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, error) { + scopesOption := option.WithScopes( + "https://www.googleapis.com/auth/plus.me", + "https://www.googleapis.com/auth/userinfo.email", + "https://www.googleapis.com/auth/userinfo.profile", + ) + // NOTE: prepend, so we don't override user-specified scopes. + opts = append([]option.ClientOption{scopesOption}, opts...) + client, endpoint, err := htransport.NewClient(ctx, opts...) + if err != nil { + return nil, err + } + s, err := New(client) + if err != nil { + return nil, err + } + if endpoint != "" { + s.BasePath = endpoint + } + return s, nil +} + +// New creates a new Service. It uses the provided http.Client for requests. +// +// Deprecated: please use NewService instead. +// To provide a custom HTTP client, use option.WithHTTPClient. +// If you are using google.golang.org/api/googleapis/transport.APIKey, use option.WithAPIKey with NewService instead. func New(client *http.Client) (*Service, error) { if client == nil { return nil, errors.New("client is nil") @@ -693,7 +747,6 @@ func (c *UserinfoGetCall) Do(opts ...googleapi.CallOption) (*Userinfoplus, error // "$ref": "Userinfoplus" // }, // "scopes": [ - // "https://www.googleapis.com/auth/plus.login", // "https://www.googleapis.com/auth/plus.me", // "https://www.googleapis.com/auth/userinfo.email", // "https://www.googleapis.com/auth/userinfo.profile" @@ -820,7 +873,6 @@ func (c *UserinfoV2MeGetCall) Do(opts ...googleapi.CallOption) (*Userinfoplus, e // "$ref": "Userinfoplus" // }, // "scopes": [ - // "https://www.googleapis.com/auth/plus.login", // "https://www.googleapis.com/auth/plus.me", // "https://www.googleapis.com/auth/userinfo.email", // "https://www.googleapis.com/auth/userinfo.profile" diff --git a/vendor/google.golang.org/api/option/BUILD.bazel b/vendor/google.golang.org/api/option/BUILD.bazel new file mode 100644 index 0000000000000..d320aca06acfc --- /dev/null +++ b/vendor/google.golang.org/api/option/BUILD.bazel @@ -0,0 +1,19 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "credentials_go19.go", + "credentials_notgo19.go", + "option.go", + ], + importmap = "k8s.io/kops/vendor/google.golang.org/api/option", + importpath = "google.golang.org/api/option", + visibility = ["//visibility:public"], + deps = [ + "//vendor/golang.org/x/oauth2:go_default_library", + "//vendor/golang.org/x/oauth2/google:go_default_library", + "//vendor/google.golang.org/api/internal:go_default_library", + "//vendor/google.golang.org/grpc:go_default_library", + ], +) diff --git a/vendor/google.golang.org/api/option/credentials_go19.go b/vendor/google.golang.org/api/option/credentials_go19.go new file mode 100644 index 0000000000000..0636a8294547a --- /dev/null +++ b/vendor/google.golang.org/api/option/credentials_go19.go @@ -0,0 +1,33 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.9 + +package option + +import ( + "golang.org/x/oauth2/google" + "google.golang.org/api/internal" +) + +type withCreds google.Credentials + +func (w *withCreds) Apply(o *internal.DialSettings) { + o.Credentials = (*google.Credentials)(w) +} + +// WithCredentials returns a ClientOption that authenticates API calls. +func WithCredentials(creds *google.Credentials) ClientOption { + return (*withCreds)(creds) +} diff --git a/vendor/google.golang.org/api/option/credentials_notgo19.go b/vendor/google.golang.org/api/option/credentials_notgo19.go new file mode 100644 index 0000000000000..74d3a4b5b918c --- /dev/null +++ b/vendor/google.golang.org/api/option/credentials_notgo19.go @@ -0,0 +1,32 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.9 + +package option + +import ( + "golang.org/x/oauth2/google" + "google.golang.org/api/internal" +) + +type withCreds google.DefaultCredentials + +func (w *withCreds) Apply(o *internal.DialSettings) { + o.Credentials = (*google.DefaultCredentials)(w) +} + +func WithCredentials(creds *google.DefaultCredentials) ClientOption { + return (*withCreds)(creds) +} diff --git a/vendor/google.golang.org/api/option/option.go b/vendor/google.golang.org/api/option/option.go new file mode 100644 index 0000000000000..0a1c2dba9e382 --- /dev/null +++ b/vendor/google.golang.org/api/option/option.go @@ -0,0 +1,235 @@ +// Copyright 2017 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package option contains options for Google API clients. +package option + +import ( + "net/http" + + "golang.org/x/oauth2" + "google.golang.org/api/internal" + "google.golang.org/grpc" +) + +// A ClientOption is an option for a Google API client. +type ClientOption interface { + Apply(*internal.DialSettings) +} + +// WithTokenSource returns a ClientOption that specifies an OAuth2 token +// source to be used as the basis for authentication. +func WithTokenSource(s oauth2.TokenSource) ClientOption { + return withTokenSource{s} +} + +type withTokenSource struct{ ts oauth2.TokenSource } + +func (w withTokenSource) Apply(o *internal.DialSettings) { + o.TokenSource = w.ts +} + +type withCredFile string + +func (w withCredFile) Apply(o *internal.DialSettings) { + o.CredentialsFile = string(w) +} + +// WithCredentialsFile returns a ClientOption that authenticates +// API calls with the given service account or refresh token JSON +// credentials file. +func WithCredentialsFile(filename string) ClientOption { + return withCredFile(filename) +} + +// WithServiceAccountFile returns a ClientOption that uses a Google service +// account credentials file to authenticate. +// +// Deprecated: Use WithCredentialsFile instead. +func WithServiceAccountFile(filename string) ClientOption { + return WithCredentialsFile(filename) +} + +// WithCredentialsJSON returns a ClientOption that authenticates +// API calls with the given service account or refresh token JSON +// credentials. +func WithCredentialsJSON(p []byte) ClientOption { + return withCredentialsJSON(p) +} + +type withCredentialsJSON []byte + +func (w withCredentialsJSON) Apply(o *internal.DialSettings) { + o.CredentialsJSON = make([]byte, len(w)) + copy(o.CredentialsJSON, w) +} + +// WithEndpoint returns a ClientOption that overrides the default endpoint +// to be used for a service. +func WithEndpoint(url string) ClientOption { + return withEndpoint(url) +} + +type withEndpoint string + +func (w withEndpoint) Apply(o *internal.DialSettings) { + o.Endpoint = string(w) +} + +// WithScopes returns a ClientOption that overrides the default OAuth2 scopes +// to be used for a service. +func WithScopes(scope ...string) ClientOption { + return withScopes(scope) +} + +type withScopes []string + +func (w withScopes) Apply(o *internal.DialSettings) { + o.Scopes = make([]string, len(w)) + copy(o.Scopes, w) +} + +// WithUserAgent returns a ClientOption that sets the User-Agent. +func WithUserAgent(ua string) ClientOption { + return withUA(ua) +} + +type withUA string + +func (w withUA) Apply(o *internal.DialSettings) { o.UserAgent = string(w) } + +// WithHTTPClient returns a ClientOption that specifies the HTTP client to use +// as the basis of communications. This option may only be used with services +// that support HTTP as their communication transport. When used, the +// WithHTTPClient option takes precedent over all other supplied options. +func WithHTTPClient(client *http.Client) ClientOption { + return withHTTPClient{client} +} + +type withHTTPClient struct{ client *http.Client } + +func (w withHTTPClient) Apply(o *internal.DialSettings) { + o.HTTPClient = w.client +} + +// WithGRPCConn returns a ClientOption that specifies the gRPC client +// connection to use as the basis of communications. This option many only be +// used with services that support gRPC as their communication transport. When +// used, the WithGRPCConn option takes precedent over all other supplied +// options. +func WithGRPCConn(conn *grpc.ClientConn) ClientOption { + return withGRPCConn{conn} +} + +type withGRPCConn struct{ conn *grpc.ClientConn } + +func (w withGRPCConn) Apply(o *internal.DialSettings) { + o.GRPCConn = w.conn +} + +// WithGRPCDialOption returns a ClientOption that appends a new grpc.DialOption +// to an underlying gRPC dial. It does not work with WithGRPCConn. +func WithGRPCDialOption(opt grpc.DialOption) ClientOption { + return withGRPCDialOption{opt} +} + +type withGRPCDialOption struct{ opt grpc.DialOption } + +func (w withGRPCDialOption) Apply(o *internal.DialSettings) { + o.GRPCDialOpts = append(o.GRPCDialOpts, w.opt) +} + +// WithGRPCConnectionPool returns a ClientOption that creates a pool of gRPC +// connections that requests will be balanced between. +// This is an EXPERIMENTAL API and may be changed or removed in the future. +func WithGRPCConnectionPool(size int) ClientOption { + return withGRPCConnectionPool(size) +} + +type withGRPCConnectionPool int + +func (w withGRPCConnectionPool) Apply(o *internal.DialSettings) { + balancer := grpc.RoundRobin(internal.NewPoolResolver(int(w), o)) + o.GRPCDialOpts = append(o.GRPCDialOpts, grpc.WithBalancer(balancer)) +} + +// WithAPIKey returns a ClientOption that specifies an API key to be used +// as the basis for authentication. +// +// API Keys can only be used for JSON-over-HTTP APIs, including those under +// the import path google.golang.org/api/.... +func WithAPIKey(apiKey string) ClientOption { + return withAPIKey(apiKey) +} + +type withAPIKey string + +func (w withAPIKey) Apply(o *internal.DialSettings) { o.APIKey = string(w) } + +// WithAudiences returns a ClientOption that specifies an audience to be used +// as the audience field ("aud") for the JWT token authentication. +func WithAudiences(audience ...string) ClientOption { + return withAudiences(audience) +} + +type withAudiences []string + +func (w withAudiences) Apply(o *internal.DialSettings) { + o.Audiences = make([]string, len(w)) + copy(o.Audiences, w) +} + +// WithoutAuthentication returns a ClientOption that specifies that no +// authentication should be used. It is suitable only for testing and for +// accessing public resources, like public Google Cloud Storage buckets. +// It is an error to provide both WithoutAuthentication and any of WithAPIKey, +// WithTokenSource, WithCredentialsFile or WithServiceAccountFile. +func WithoutAuthentication() ClientOption { + return withoutAuthentication{} +} + +type withoutAuthentication struct{} + +func (w withoutAuthentication) Apply(o *internal.DialSettings) { o.NoAuth = true } + +// WithQuotaProject returns a ClientOption that specifies the project used +// for quota and billing purposes. +// +// For more information please read: +// https://cloud.google.com/apis/docs/system-parameters +func WithQuotaProject(quotaProject string) ClientOption { + return withQuotaProject(quotaProject) +} + +type withQuotaProject string + +func (w withQuotaProject) Apply(o *internal.DialSettings) { + o.QuotaProject = string(w) +} + +// WithRequestReason returns a ClientOption that specifies a reason for +// making the request, which is intended to be recorded in audit logging. +// An example reason would be a support-case ticket number. +// +// For more information please read: +// https://cloud.google.com/apis/docs/system-parameters +func WithRequestReason(requestReason string) ClientOption { + return withRequestReason(requestReason) +} + +type withRequestReason string + +func (w withRequestReason) Apply(o *internal.DialSettings) { + o.RequestReason = string(w) +} diff --git a/vendor/google.golang.org/api/storage/v1/BUILD.bazel b/vendor/google.golang.org/api/storage/v1/BUILD.bazel index 99e91c1c666f1..4c057e7eb5a44 100644 --- a/vendor/google.golang.org/api/storage/v1/BUILD.bazel +++ b/vendor/google.golang.org/api/storage/v1/BUILD.bazel @@ -9,5 +9,7 @@ go_library( deps = [ "//vendor/google.golang.org/api/gensupport:go_default_library", "//vendor/google.golang.org/api/googleapi:go_default_library", + "//vendor/google.golang.org/api/option:go_default_library", + "//vendor/google.golang.org/api/transport/http:go_default_library", ], ) diff --git a/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/google.golang.org/api/storage/v1/storage-api.json index 49d0a518115a3..ca630362b2863 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-api.json +++ b/vendor/google.golang.org/api/storage/v1/storage-api.json @@ -26,7 +26,7 @@ "description": "Stores and retrieves potentially large, immutable data objects.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/storage/docs/json_api/", - "etag": "\"J3WqvAcMk4eQjJXvfSI4Yr8VouA/KPalWULMnQfaqumeaBhBrVfHFNM\"", + "etag": "\"VPK3KBfpaEgZ16pozGOoMYfKc0U/g26t9M1iNI-j1-5ry7bpKWNPs60\"", "icons": { "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png", "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png" @@ -109,6 +109,11 @@ "required": true, "type": "string" }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -142,6 +147,11 @@ "required": true, "type": "string" }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -171,6 +181,11 @@ "required": true, "type": "string" }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -203,6 +218,11 @@ "required": true, "type": "string" }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -239,6 +259,11 @@ "required": true, "type": "string" }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -278,6 +303,11 @@ "required": true, "type": "string" }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -326,6 +356,11 @@ "location": "query", "type": "string" }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -378,6 +413,11 @@ "location": "query", "type": "string" }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -410,6 +450,11 @@ "required": true, "type": "string" }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -422,10 +467,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/devstorage.read_write" + "https://www.googleapis.com/auth/devstorage.full_control" ] }, "insert": { @@ -495,6 +537,11 @@ "location": "query", "type": "string" }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, "userProject": { "description": "The project to be billed for this request.", "location": "query", @@ -559,6 +606,11 @@ "location": "query", "type": "string" }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, "userProject": { "description": "The project to be billed for this request.", "location": "query", @@ -599,6 +651,11 @@ "required": true, "type": "string" }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -694,6 +751,11 @@ "location": "query", "type": "string" }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -726,6 +788,11 @@ "required": true, "type": "string" }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -741,8 +808,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_write" + "https://www.googleapis.com/auth/devstorage.full_control" ] }, "testIamPermissions": { @@ -767,6 +833,11 @@ "required": true, "type": "string" }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -864,6 +935,11 @@ "location": "query", "type": "string" }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -928,6 +1004,11 @@ "required": true, "type": "string" }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -961,6 +1042,11 @@ "required": true, "type": "string" }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -990,6 +1076,11 @@ "required": true, "type": "string" }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -1034,6 +1125,11 @@ "location": "query", "type": "string" }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -1070,6 +1166,11 @@ "required": true, "type": "string" }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -1109,6 +1210,11 @@ "required": true, "type": "string" }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -1152,6 +1258,11 @@ "required": true, "type": "string" }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -1186,6 +1297,11 @@ "required": true, "type": "string" }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -1218,6 +1334,11 @@ "required": true, "type": "string" }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -1251,6 +1372,11 @@ "required": true, "type": "string" }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -1307,6 +1433,11 @@ "required": true, "type": "string" }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -1353,6 +1484,11 @@ "required": true, "type": "string" }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -1395,6 +1531,11 @@ "required": true, "type": "string" }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -1440,6 +1581,11 @@ "required": true, "type": "string" }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -1489,6 +1635,11 @@ "required": true, "type": "string" }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -1541,6 +1692,11 @@ "required": true, "type": "string" }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -1622,6 +1778,11 @@ "location": "query", "type": "string" }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -1746,6 +1907,11 @@ "location": "query", "type": "string" }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, "sourceBucket": { "description": "Name of the bucket in which to find the source object.", "location": "path", @@ -1834,6 +2000,11 @@ "required": true, "type": "string" }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -1911,6 +2082,11 @@ "location": "query", "type": "string" }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -1958,6 +2134,11 @@ "required": true, "type": "string" }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -2078,6 +2259,11 @@ "location": "query", "type": "string" }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -2153,6 +2339,11 @@ "location": "query", "type": "string" }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -2262,6 +2453,11 @@ "location": "query", "type": "string" }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, "userProject": { "description": "The project to be billed for this request, for Requester Pays buckets.", "location": "query", @@ -2396,6 +2592,11 @@ "location": "query", "type": "string" }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, "rewriteToken": { "description": "Include this field (from the previous rewrite response) on each rewrite request after the first one, until the rewrite response 'done' flag is true. Calls that provide a rewriteToken can omit all other request fields, but if included those fields must match the values provided in the first rewrite request.", "location": "query", @@ -2465,6 +2666,11 @@ "required": true, "type": "string" }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -2519,6 +2725,11 @@ "required": true, "type": "string" }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -2622,6 +2833,11 @@ "location": "query", "type": "string" }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -2695,6 +2911,11 @@ "location": "query", "type": "string" }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -2727,6 +2948,210 @@ }, "projects": { "resources": { + "hmacKeys": { + "methods": { + "create": { + "description": "Creates a new HMAC key for the specified service account.", + "httpMethod": "POST", + "id": "storage.projects.hmacKeys.create", + "parameterOrder": [ + "projectId", + "serviceAccountEmail" + ], + "parameters": { + "projectId": { + "description": "Project ID owning the service account.", + "location": "path", + "required": true, + "type": "string" + }, + "serviceAccountEmail": { + "description": "Email address of the service account.", + "location": "query", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request.", + "location": "query", + "type": "string" + } + }, + "path": "projects/{projectId}/hmacKeys", + "response": { + "$ref": "HmacKey" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "delete": { + "description": "Deletes an HMAC key.", + "httpMethod": "DELETE", + "id": "storage.projects.hmacKeys.delete", + "parameterOrder": [ + "projectId", + "accessId" + ], + "parameters": { + "accessId": { + "description": "Name of the HMAC key to be deleted.", + "location": "path", + "required": true, + "type": "string" + }, + "projectId": { + "description": "Project ID owning the requested key", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request.", + "location": "query", + "type": "string" + } + }, + "path": "projects/{projectId}/hmacKeys/{accessId}", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "get": { + "description": "Retrieves an HMAC key's metadata", + "httpMethod": "GET", + "id": "storage.projects.hmacKeys.get", + "parameterOrder": [ + "projectId", + "accessId" + ], + "parameters": { + "accessId": { + "description": "Name of the HMAC key.", + "location": "path", + "required": true, + "type": "string" + }, + "projectId": { + "description": "Project ID owning the service account of the requested key.", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request.", + "location": "query", + "type": "string" + } + }, + "path": "projects/{projectId}/hmacKeys/{accessId}", + "response": { + "$ref": "HmacKeyMetadata" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.read_only" + ] + }, + "list": { + "description": "Retrieves a list of HMAC keys matching the criteria.", + "httpMethod": "GET", + "id": "storage.projects.hmacKeys.list", + "parameterOrder": [ + "projectId" + ], + "parameters": { + "maxResults": { + "default": "250", + "description": "Maximum number of items to return in a single page of responses. The service uses this parameter or 250 items, whichever is smaller. The max number of items per page will also be limited by the number of distinct service accounts in the response. If the number of service accounts in a single response is too high, the page will truncated and a next page token will be returned.", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "pageToken": { + "description": "A previously-returned page token representing part of the larger set of results to view.", + "location": "query", + "type": "string" + }, + "projectId": { + "description": "Name of the project in which to look for HMAC keys.", + "location": "path", + "required": true, + "type": "string" + }, + "serviceAccountEmail": { + "description": "If present, only keys for the given service account are returned.", + "location": "query", + "type": "string" + }, + "showDeletedKeys": { + "description": "Whether or not to show keys in the DELETED state.", + "location": "query", + "type": "boolean" + }, + "userProject": { + "description": "The project to be billed for this request.", + "location": "query", + "type": "string" + } + }, + "path": "projects/{projectId}/hmacKeys", + "response": { + "$ref": "HmacKeysMetadata" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only" + ] + }, + "update": { + "description": "Updates the state of an HMAC key. See the HMAC Key resource descriptor for valid states.", + "httpMethod": "PUT", + "id": "storage.projects.hmacKeys.update", + "parameterOrder": [ + "projectId", + "accessId" + ], + "parameters": { + "accessId": { + "description": "Name of the HMAC key being updated.", + "location": "path", + "required": true, + "type": "string" + }, + "projectId": { + "description": "Project ID owning the service account of the updated key.", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request.", + "location": "query", + "type": "string" + } + }, + "path": "projects/{projectId}/hmacKeys/{accessId}", + "request": { + "$ref": "HmacKeyMetadata" + }, + "response": { + "$ref": "HmacKeyMetadata" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + } + } + }, "serviceAccount": { "methods": { "get": { @@ -2743,6 +3168,11 @@ "required": true, "type": "string" }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, "userProject": { "description": "The project to be billed for this request.", "location": "query", @@ -2766,7 +3196,7 @@ } } }, - "revision": "20181109", + "revision": "20190523", "rootUrl": "https://www.googleapis.com/", "schemas": { "Bucket": { @@ -2859,6 +3289,7 @@ "description": "The bucket's IAM configuration.", "properties": { "bucketPolicyOnly": { + "description": "The bucket's Bucket Policy Only configuration.", "properties": { "enabled": { "description": "If set, access checks only use bucket-level IAM policies or above.", @@ -2961,6 +3392,10 @@ "description": "The location of the bucket. Object data for objects in the bucket resides in physical storage within this region. Defaults to US. See the developer's guide for the authoritative list.", "type": "string" }, + "locationType": { + "description": "The type of the bucket location.", + "type": "string" + }, "logging": { "description": "The bucket's logging configuration, which defines the destination bucket and optional name prefix for the current bucket's logs.", "properties": { @@ -3295,6 +3730,122 @@ }, "type": "object" }, + "Expr": { + "description": "Represents an expression text. Example: title: \"User account presence\" description: \"Determines whether the request has a user account\" expression: \"size(request.user) \u003e 0\"", + "id": "Expr", + "properties": { + "description": { + "description": "An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.", + "type": "string" + }, + "expression": { + "description": "Textual representation of an expression in Common Expression Language syntax. The application context of the containing message determines which well-known feature set of CEL is supported.", + "type": "string" + }, + "location": { + "description": "An optional string indicating the location of the expression for error reporting, e.g. a file name and a position in the file.", + "type": "string" + }, + "title": { + "description": "An optional title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.", + "type": "string" + } + }, + "type": "object" + }, + "HmacKey": { + "description": "JSON template to produce a JSON-style HMAC Key resource for Create responses.", + "id": "HmacKey", + "properties": { + "kind": { + "default": "storage#hmacKey", + "description": "The kind of item this is. For HMAC keys, this is always storage#hmacKey.", + "type": "string" + }, + "metadata": { + "$ref": "HmacKeyMetadata", + "description": "Key metadata." + }, + "secret": { + "description": "HMAC secret key material.", + "type": "string" + } + }, + "type": "object" + }, + "HmacKeyMetadata": { + "description": "JSON template to produce a JSON-style HMAC Key metadata resource.", + "id": "HmacKeyMetadata", + "properties": { + "accessId": { + "description": "The ID of the HMAC Key.", + "type": "string" + }, + "etag": { + "description": "HTTP 1.1 Entity tag for the HMAC key.", + "type": "string" + }, + "id": { + "description": "The ID of the HMAC key, including the Project ID and the Access ID.", + "type": "string" + }, + "kind": { + "default": "storage#hmacKeyMetadata", + "description": "The kind of item this is. For HMAC Key metadata, this is always storage#hmacKeyMetadata.", + "type": "string" + }, + "projectId": { + "description": "Project ID owning the service account to which the key authenticates.", + "type": "string" + }, + "selfLink": { + "description": "The link to this resource.", + "type": "string" + }, + "serviceAccountEmail": { + "description": "The email address of the key's associated service account.", + "type": "string" + }, + "state": { + "description": "The state of the key. Can be one of ACTIVE, INACTIVE, or DELETED.", + "type": "string" + }, + "timeCreated": { + "description": "The creation time of the HMAC key in RFC 3339 format.", + "format": "date-time", + "type": "string" + }, + "updated": { + "description": "The last modification time of the HMAC key metadata in RFC 3339 format.", + "format": "date-time", + "type": "string" + } + }, + "type": "object" + }, + "HmacKeysMetadata": { + "description": "A list of hmacKeys.", + "id": "HmacKeysMetadata", + "properties": { + "items": { + "description": "The list of items.", + "items": { + "$ref": "HmacKeyMetadata" + }, + "type": "array" + }, + "kind": { + "default": "storage#hmacKeysMetadata", + "description": "The kind of item this is. For lists of hmacKeys, this is always storage#hmacKeysMetadata.", + "type": "string" + }, + "nextPageToken": { + "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results.", + "type": "string" + } + }, + "type": "object" + }, "Notification": { "description": "A subscription to receive Google PubSub notifications.", "id": "Notification", @@ -3695,7 +4246,8 @@ "items": { "properties": { "condition": { - "type": "any" + "$ref": "Expr", + "description": "The condition that is associated with this binding. NOTE: an unsatisfied condition will not allow user access via current binding. Different bindings, including their conditions, are examined independently." }, "members": { "annotations": { diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go index 606686f874a2d..36e37a1a593f4 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-gen.go +++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go @@ -1,4 +1,4 @@ -// Copyright 2018 Google Inc. All rights reserved. +// Copyright 2019 Google LLC. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -8,13 +8,39 @@ // // This package is DEPRECATED. Use package cloud.google.com/go/storage instead. // -// See https://developers.google.com/storage/docs/json_api/ +// For product documentation, see: https://developers.google.com/storage/docs/json_api/ +// +// Creating a client // // Usage example: // // import "google.golang.org/api/storage/v1" // ... -// storageService, err := storage.New(oauthHttpClient) +// ctx := context.Background() +// storageService, err := storage.NewService(ctx) +// +// In this example, Google Application Default Credentials are used for authentication. +// +// For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. +// +// Other authentication options +// +// By default, all available scopes (see "Constants") are used to authenticate. To restrict scopes, use option.WithScopes: +// +// storageService, err := storage.NewService(ctx, option.WithScopes(storage.DevstorageReadWriteScope)) +// +// To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey: +// +// storageService, err := storage.NewService(ctx, option.WithAPIKey("AIza...")) +// +// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource: +// +// config := &oauth2.Config{...} +// // ... +// token, err := config.Exchange(ctx, ...) +// storageService, err := storage.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) +// +// See https://godoc.org/google.golang.org/api/option/ for details on options. package storage // import "google.golang.org/api/storage/v1" import ( @@ -31,6 +57,8 @@ import ( gensupport "google.golang.org/api/gensupport" googleapi "google.golang.org/api/googleapi" + option "google.golang.org/api/option" + htransport "google.golang.org/api/transport/http" ) // Always reference these packages, just in case the auto-generated code @@ -70,6 +98,36 @@ const ( DevstorageReadWriteScope = "https://www.googleapis.com/auth/devstorage.read_write" ) +// NewService creates a new Service. +func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, error) { + scopesOption := option.WithScopes( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write", + ) + // NOTE: prepend, so we don't override user-specified scopes. + opts = append([]option.ClientOption{scopesOption}, opts...) + client, endpoint, err := htransport.NewClient(ctx, opts...) + if err != nil { + return nil, err + } + s, err := New(client) + if err != nil { + return nil, err + } + if endpoint != "" { + s.BasePath = endpoint + } + return s, nil +} + +// New creates a new Service. It uses the provided http.Client for requests. +// +// Deprecated: please use NewService instead. +// To provide a custom HTTP client, use option.WithHTTPClient. +// If you are using google.golang.org/api/googleapis/transport.APIKey, use option.WithAPIKey with NewService instead. func New(client *http.Client) (*Service, error) { if client == nil { return nil, errors.New("client is nil") @@ -180,6 +238,7 @@ type ObjectsService struct { func NewProjectsService(s *Service) *ProjectsService { rs := &ProjectsService{s: s} + rs.HmacKeys = NewProjectsHmacKeysService(s) rs.ServiceAccount = NewProjectsServiceAccountService(s) return rs } @@ -187,9 +246,20 @@ func NewProjectsService(s *Service) *ProjectsService { type ProjectsService struct { s *Service + HmacKeys *ProjectsHmacKeysService + ServiceAccount *ProjectsServiceAccountService } +func NewProjectsHmacKeysService(s *Service) *ProjectsHmacKeysService { + rs := &ProjectsHmacKeysService{s: s} + return rs +} + +type ProjectsHmacKeysService struct { + s *Service +} + func NewProjectsServiceAccountService(s *Service) *ProjectsServiceAccountService { rs := &ProjectsServiceAccountService{s: s} return rs @@ -260,6 +330,9 @@ type Bucket struct { // US. See the developer's guide for the authoritative list. Location string `json:"location,omitempty"` + // LocationType: The type of the bucket location. + LocationType string `json:"locationType,omitempty"` + // Logging: The bucket's logging configuration, which defines the // destination bucket and optional name prefix for the current bucket's // logs. @@ -449,6 +522,7 @@ func (s *BucketEncryption) MarshalJSON() ([]byte, error) { // BucketIamConfiguration: The bucket's IAM configuration. type BucketIamConfiguration struct { + // BucketPolicyOnly: The bucket's Bucket Policy Only configuration. BucketPolicyOnly *BucketIamConfigurationBucketPolicyOnly `json:"bucketPolicyOnly,omitempty"` // ForceSendFields is a list of field names (e.g. "BucketPolicyOnly") to @@ -475,6 +549,8 @@ func (s *BucketIamConfiguration) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// BucketIamConfigurationBucketPolicyOnly: The bucket's Bucket Policy +// Only configuration. type BucketIamConfigurationBucketPolicyOnly struct { // Enabled: If set, access checks only use bucket-level IAM policies or // above. @@ -1201,6 +1277,200 @@ func (s *ComposeRequestSourceObjectsObjectPreconditions) MarshalJSON() ([]byte, return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// Expr: Represents an expression text. Example: title: "User account +// presence" description: "Determines whether the request has a user +// account" expression: "size(request.user) > 0" +type Expr struct { + // Description: An optional description of the expression. This is a + // longer text which describes the expression, e.g. when hovered over it + // in a UI. + Description string `json:"description,omitempty"` + + // Expression: Textual representation of an expression in Common + // Expression Language syntax. The application context of the containing + // message determines which well-known feature set of CEL is supported. + Expression string `json:"expression,omitempty"` + + // Location: An optional string indicating the location of the + // expression for error reporting, e.g. a file name and a position in + // the file. + Location string `json:"location,omitempty"` + + // Title: An optional title for the expression, i.e. a short string + // describing its purpose. This can be used e.g. in UIs which allow to + // enter the expression. + Title string `json:"title,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Description") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Description") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Expr) MarshalJSON() ([]byte, error) { + type NoMethod Expr + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// HmacKey: JSON template to produce a JSON-style HMAC Key resource for +// Create responses. +type HmacKey struct { + // Kind: The kind of item this is. For HMAC keys, this is always + // storage#hmacKey. + Kind string `json:"kind,omitempty"` + + // Metadata: Key metadata. + Metadata *HmacKeyMetadata `json:"metadata,omitempty"` + + // Secret: HMAC secret key material. + Secret string `json:"secret,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Kind") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HmacKey) MarshalJSON() ([]byte, error) { + type NoMethod HmacKey + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// HmacKeyMetadata: JSON template to produce a JSON-style HMAC Key +// metadata resource. +type HmacKeyMetadata struct { + // AccessId: The ID of the HMAC Key. + AccessId string `json:"accessId,omitempty"` + + // Etag: HTTP 1.1 Entity tag for the HMAC key. + Etag string `json:"etag,omitempty"` + + // Id: The ID of the HMAC key, including the Project ID and the Access + // ID. + Id string `json:"id,omitempty"` + + // Kind: The kind of item this is. For HMAC Key metadata, this is always + // storage#hmacKeyMetadata. + Kind string `json:"kind,omitempty"` + + // ProjectId: Project ID owning the service account to which the key + // authenticates. + ProjectId string `json:"projectId,omitempty"` + + // SelfLink: The link to this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServiceAccountEmail: The email address of the key's associated + // service account. + ServiceAccountEmail string `json:"serviceAccountEmail,omitempty"` + + // State: The state of the key. Can be one of ACTIVE, INACTIVE, or + // DELETED. + State string `json:"state,omitempty"` + + // TimeCreated: The creation time of the HMAC key in RFC 3339 format. + TimeCreated string `json:"timeCreated,omitempty"` + + // Updated: The last modification time of the HMAC key metadata in RFC + // 3339 format. + Updated string `json:"updated,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "AccessId") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AccessId") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HmacKeyMetadata) MarshalJSON() ([]byte, error) { + type NoMethod HmacKeyMetadata + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// HmacKeysMetadata: A list of hmacKeys. +type HmacKeysMetadata struct { + // Items: The list of items. + Items []*HmacKeyMetadata `json:"items,omitempty"` + + // Kind: The kind of item this is. For lists of hmacKeys, this is always + // storage#hmacKeysMetadata. + Kind string `json:"kind,omitempty"` + + // NextPageToken: The continuation token, used to page through large + // result sets. Provide this value in a subsequent request to return the + // next page of results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Items") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Items") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HmacKeysMetadata) MarshalJSON() ([]byte, error) { + type NoMethod HmacKeysMetadata + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Notification: A subscription to receive Google PubSub notifications. type Notification struct { // CustomAttributes: An optional list of additional attributes to attach @@ -1784,7 +2054,11 @@ func (s *Policy) MarshalJSON() ([]byte, error) { } type PolicyBindings struct { - Condition interface{} `json:"condition,omitempty"` + // Condition: The condition that is associated with this binding. NOTE: + // an unsatisfied condition will not allow user access via current + // binding. Different bindings, including their conditions, are examined + // independently. + Condition *Expr `json:"condition,omitempty"` // Members: A collection of identifiers for members who may assume the // provided role. Recognized identifiers are as follows: @@ -2031,6 +2305,14 @@ func (r *BucketAccessControlsService) Delete(bucket string, entity string) *Buck return c } +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *BucketAccessControlsDeleteCall) ProvisionalUserProject(provisionalUserProject string) *BucketAccessControlsDeleteCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *BucketAccessControlsDeleteCall) UserProject(userProject string) *BucketAccessControlsDeleteCall { @@ -2119,6 +2401,11 @@ func (c *BucketAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error // "required": true, // "type": "string" // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -2155,6 +2442,14 @@ func (r *BucketAccessControlsService) Get(bucket string, entity string) *BucketA return c } +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *BucketAccessControlsGetCall) ProvisionalUserProject(provisionalUserProject string) *BucketAccessControlsGetCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *BucketAccessControlsGetCall) UserProject(userProject string) *BucketAccessControlsGetCall { @@ -2281,6 +2576,11 @@ func (c *BucketAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*BucketA // "required": true, // "type": "string" // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -2318,6 +2618,14 @@ func (r *BucketAccessControlsService) Insert(bucket string, bucketaccesscontrol return c } +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *BucketAccessControlsInsertCall) ProvisionalUserProject(provisionalUserProject string) *BucketAccessControlsInsertCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *BucketAccessControlsInsertCall) UserProject(userProject string) *BucketAccessControlsInsertCall { @@ -2428,6 +2736,11 @@ func (c *BucketAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*Buck // "required": true, // "type": "string" // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -2467,6 +2780,14 @@ func (r *BucketAccessControlsService) List(bucket string) *BucketAccessControlsL return c } +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *BucketAccessControlsListCall) ProvisionalUserProject(provisionalUserProject string) *BucketAccessControlsListCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *BucketAccessControlsListCall) UserProject(userProject string) *BucketAccessControlsListCall { @@ -2585,6 +2906,11 @@ func (c *BucketAccessControlsListCall) Do(opts ...googleapi.CallOption) (*Bucket // "required": true, // "type": "string" // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -2624,6 +2950,14 @@ func (r *BucketAccessControlsService) Patch(bucket string, entity string, bucket return c } +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *BucketAccessControlsPatchCall) ProvisionalUserProject(provisionalUserProject string) *BucketAccessControlsPatchCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *BucketAccessControlsPatchCall) UserProject(userProject string) *BucketAccessControlsPatchCall { @@ -2742,6 +3076,11 @@ func (c *BucketAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*Bucke // "required": true, // "type": "string" // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -2784,6 +3123,14 @@ func (r *BucketAccessControlsService) Update(bucket string, entity string, bucke return c } +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *BucketAccessControlsUpdateCall) ProvisionalUserProject(provisionalUserProject string) *BucketAccessControlsUpdateCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *BucketAccessControlsUpdateCall) UserProject(userProject string) *BucketAccessControlsUpdateCall { @@ -2902,6 +3249,11 @@ func (c *BucketAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*Buck // "required": true, // "type": "string" // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -2956,6 +3308,14 @@ func (c *BucketsDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch in return c } +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *BucketsDeleteCall) ProvisionalUserProject(provisionalUserProject string) *BucketsDeleteCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *BucketsDeleteCall) UserProject(userProject string) *BucketsDeleteCall { @@ -3048,6 +3408,11 @@ func (c *BucketsDeleteCall) Do(opts ...googleapi.CallOption) error { // "location": "query", // "type": "string" // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -3111,6 +3476,14 @@ func (c *BucketsGetCall) Projection(projection string) *BucketsGetCall { return c } +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *BucketsGetCall) ProvisionalUserProject(provisionalUserProject string) *BucketsGetCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *BucketsGetCall) UserProject(userProject string) *BucketsGetCall { @@ -3254,6 +3627,11 @@ func (c *BucketsGetCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { // "location": "query", // "type": "string" // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -3293,6 +3671,14 @@ func (r *BucketsService) GetIamPolicy(bucket string) *BucketsGetIamPolicyCall { return c } +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *BucketsGetIamPolicyCall) ProvisionalUserProject(provisionalUserProject string) *BucketsGetIamPolicyCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *BucketsGetIamPolicyCall) UserProject(userProject string) *BucketsGetIamPolicyCall { @@ -3411,6 +3797,11 @@ func (c *BucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err // "required": true, // "type": "string" // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -3423,10 +3814,7 @@ func (c *BucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" + // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } @@ -3502,6 +3890,14 @@ func (c *BucketsInsertCall) Projection(projection string) *BucketsInsertCall { return c } +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *BucketsInsertCall) ProvisionalUserProject(provisionalUserProject string) *BucketsInsertCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + // UserProject sets the optional parameter "userProject": The project to // be billed for this request. func (c *BucketsInsertCall) UserProject(userProject string) *BucketsInsertCall { @@ -3662,6 +4058,11 @@ func (c *BucketsInsertCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { // "location": "query", // "type": "string" // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request.", // "location": "query", @@ -3735,6 +4136,14 @@ func (c *BucketsListCall) Projection(projection string) *BucketsListCall { return c } +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *BucketsListCall) ProvisionalUserProject(provisionalUserProject string) *BucketsListCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + // UserProject sets the optional parameter "userProject": The project to // be billed for this request. func (c *BucketsListCall) UserProject(userProject string) *BucketsListCall { @@ -3881,6 +4290,11 @@ func (c *BucketsListCall) Do(opts ...googleapi.CallOption) (*Buckets, error) { // "location": "query", // "type": "string" // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request.", // "location": "query", @@ -3941,6 +4355,14 @@ func (r *BucketsService) LockRetentionPolicy(bucket string, ifMetagenerationMatc return c } +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *BucketsLockRetentionPolicyCall) ProvisionalUserProject(provisionalUserProject string) *BucketsLockRetentionPolicyCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *BucketsLockRetentionPolicyCall) UserProject(userProject string) *BucketsLockRetentionPolicyCall { @@ -4054,6 +4476,11 @@ func (c *BucketsLockRetentionPolicyCall) Do(opts ...googleapi.CallOption) (*Buck // "required": true, // "type": "string" // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -4162,6 +4589,14 @@ func (c *BucketsPatchCall) Projection(projection string) *BucketsPatchCall { return c } +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *BucketsPatchCall) ProvisionalUserProject(provisionalUserProject string) *BucketsPatchCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *BucketsPatchCall) UserProject(userProject string) *BucketsPatchCall { @@ -4337,6 +4772,11 @@ func (c *BucketsPatchCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { // "location": "query", // "type": "string" // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -4377,6 +4817,14 @@ func (r *BucketsService) SetIamPolicy(bucket string, policy *Policy) *BucketsSet return c } +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *BucketsSetIamPolicyCall) ProvisionalUserProject(provisionalUserProject string) *BucketsSetIamPolicyCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *BucketsSetIamPolicyCall) UserProject(userProject string) *BucketsSetIamPolicyCall { @@ -4487,6 +4935,11 @@ func (c *BucketsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err // "required": true, // "type": "string" // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -4502,8 +4955,7 @@ func (c *BucketsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" + // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } @@ -4529,6 +4981,14 @@ func (r *BucketsService) TestIamPermissions(bucket string, permissions []string) return c } +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *BucketsTestIamPermissionsCall) ProvisionalUserProject(provisionalUserProject string) *BucketsTestIamPermissionsCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *BucketsTestIamPermissionsCall) UserProject(userProject string) *BucketsTestIamPermissionsCall { @@ -4655,6 +5115,11 @@ func (c *BucketsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestI // "required": true, // "type": "string" // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -4765,8 +5230,16 @@ func (c *BucketsUpdateCall) Projection(projection string) *BucketsUpdateCall { return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *BucketsUpdateCall) ProvisionalUserProject(provisionalUserProject string) *BucketsUpdateCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. func (c *BucketsUpdateCall) UserProject(userProject string) *BucketsUpdateCall { c.urlParams_.Set("userProject", userProject) return c @@ -4940,6 +5413,11 @@ func (c *BucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { // "location": "query", // "type": "string" // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -5079,6 +5557,14 @@ func (r *DefaultObjectAccessControlsService) Delete(bucket string, entity string return c } +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *DefaultObjectAccessControlsDeleteCall) ProvisionalUserProject(provisionalUserProject string) *DefaultObjectAccessControlsDeleteCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *DefaultObjectAccessControlsDeleteCall) UserProject(userProject string) *DefaultObjectAccessControlsDeleteCall { @@ -5167,6 +5653,11 @@ func (c *DefaultObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) // "required": true, // "type": "string" // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -5203,6 +5694,14 @@ func (r *DefaultObjectAccessControlsService) Get(bucket string, entity string) * return c } +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *DefaultObjectAccessControlsGetCall) ProvisionalUserProject(provisionalUserProject string) *DefaultObjectAccessControlsGetCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *DefaultObjectAccessControlsGetCall) UserProject(userProject string) *DefaultObjectAccessControlsGetCall { @@ -5329,6 +5828,11 @@ func (c *DefaultObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (* // "required": true, // "type": "string" // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -5367,6 +5871,14 @@ func (r *DefaultObjectAccessControlsService) Insert(bucket string, objectaccessc return c } +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *DefaultObjectAccessControlsInsertCall) ProvisionalUserProject(provisionalUserProject string) *DefaultObjectAccessControlsInsertCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *DefaultObjectAccessControlsInsertCall) UserProject(userProject string) *DefaultObjectAccessControlsInsertCall { @@ -5477,6 +5989,11 @@ func (c *DefaultObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) // "required": true, // "type": "string" // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -5533,6 +6050,14 @@ func (c *DefaultObjectAccessControlsListCall) IfMetagenerationNotMatch(ifMetagen return c } +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *DefaultObjectAccessControlsListCall) ProvisionalUserProject(provisionalUserProject string) *DefaultObjectAccessControlsListCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *DefaultObjectAccessControlsListCall) UserProject(userProject string) *DefaultObjectAccessControlsListCall { @@ -5663,6 +6188,11 @@ func (c *DefaultObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) ( // "location": "query", // "type": "string" // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -5702,6 +6232,14 @@ func (r *DefaultObjectAccessControlsService) Patch(bucket string, entity string, return c } +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *DefaultObjectAccessControlsPatchCall) ProvisionalUserProject(provisionalUserProject string) *DefaultObjectAccessControlsPatchCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *DefaultObjectAccessControlsPatchCall) UserProject(userProject string) *DefaultObjectAccessControlsPatchCall { @@ -5820,6 +6358,11 @@ func (c *DefaultObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) // "required": true, // "type": "string" // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -5862,6 +6405,14 @@ func (r *DefaultObjectAccessControlsService) Update(bucket string, entity string return c } +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *DefaultObjectAccessControlsUpdateCall) ProvisionalUserProject(provisionalUserProject string) *DefaultObjectAccessControlsUpdateCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *DefaultObjectAccessControlsUpdateCall) UserProject(userProject string) *DefaultObjectAccessControlsUpdateCall { @@ -5980,6 +6531,11 @@ func (c *DefaultObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) // "required": true, // "type": "string" // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -6020,6 +6576,14 @@ func (r *NotificationsService) Delete(bucket string, notification string) *Notif return c } +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *NotificationsDeleteCall) ProvisionalUserProject(provisionalUserProject string) *NotificationsDeleteCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *NotificationsDeleteCall) UserProject(userProject string) *NotificationsDeleteCall { @@ -6108,6 +6672,11 @@ func (c *NotificationsDeleteCall) Do(opts ...googleapi.CallOption) error { // "required": true, // "type": "string" // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -6144,6 +6713,14 @@ func (r *NotificationsService) Get(bucket string, notification string) *Notifica return c } +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *NotificationsGetCall) ProvisionalUserProject(provisionalUserProject string) *NotificationsGetCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *NotificationsGetCall) UserProject(userProject string) *NotificationsGetCall { @@ -6270,6 +6847,11 @@ func (c *NotificationsGetCall) Do(opts ...googleapi.CallOption) (*Notification, // "required": true, // "type": "string" // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -6310,6 +6892,14 @@ func (r *NotificationsService) Insert(bucket string, notification *Notification) return c } +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *NotificationsInsertCall) ProvisionalUserProject(provisionalUserProject string) *NotificationsInsertCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *NotificationsInsertCall) UserProject(userProject string) *NotificationsInsertCall { @@ -6420,6 +7010,11 @@ func (c *NotificationsInsertCall) Do(opts ...googleapi.CallOption) (*Notificatio // "required": true, // "type": "string" // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -6461,6 +7056,14 @@ func (r *NotificationsService) List(bucket string) *NotificationsListCall { return c } +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *NotificationsListCall) ProvisionalUserProject(provisionalUserProject string) *NotificationsListCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *NotificationsListCall) UserProject(userProject string) *NotificationsListCall { @@ -6579,6 +7182,11 @@ func (c *NotificationsListCall) Do(opts ...googleapi.CallOption) (*Notifications // "required": true, // "type": "string" // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -6630,6 +7238,14 @@ func (c *ObjectAccessControlsDeleteCall) Generation(generation int64) *ObjectAcc return c } +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *ObjectAccessControlsDeleteCall) ProvisionalUserProject(provisionalUserProject string) *ObjectAccessControlsDeleteCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *ObjectAccessControlsDeleteCall) UserProject(userProject string) *ObjectAccessControlsDeleteCall { @@ -6732,6 +7348,11 @@ func (c *ObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error // "required": true, // "type": "string" // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -6778,6 +7399,14 @@ func (c *ObjectAccessControlsGetCall) Generation(generation int64) *ObjectAccess return c } +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *ObjectAccessControlsGetCall) ProvisionalUserProject(provisionalUserProject string) *ObjectAccessControlsGetCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *ObjectAccessControlsGetCall) UserProject(userProject string) *ObjectAccessControlsGetCall { @@ -6918,6 +7547,11 @@ func (c *ObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectA // "required": true, // "type": "string" // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -6965,6 +7599,14 @@ func (c *ObjectAccessControlsInsertCall) Generation(generation int64) *ObjectAcc return c } +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *ObjectAccessControlsInsertCall) ProvisionalUserProject(provisionalUserProject string) *ObjectAccessControlsInsertCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *ObjectAccessControlsInsertCall) UserProject(userProject string) *ObjectAccessControlsInsertCall { @@ -7089,6 +7731,11 @@ func (c *ObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*Obje // "required": true, // "type": "string" // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -7138,6 +7785,14 @@ func (c *ObjectAccessControlsListCall) Generation(generation int64) *ObjectAcces return c } +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *ObjectAccessControlsListCall) ProvisionalUserProject(provisionalUserProject string) *ObjectAccessControlsListCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *ObjectAccessControlsListCall) UserProject(userProject string) *ObjectAccessControlsListCall { @@ -7270,6 +7925,11 @@ func (c *ObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*Object // "required": true, // "type": "string" // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -7319,6 +7979,14 @@ func (c *ObjectAccessControlsPatchCall) Generation(generation int64) *ObjectAcce return c } +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *ObjectAccessControlsPatchCall) ProvisionalUserProject(provisionalUserProject string) *ObjectAccessControlsPatchCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *ObjectAccessControlsPatchCall) UserProject(userProject string) *ObjectAccessControlsPatchCall { @@ -7451,6 +8119,11 @@ func (c *ObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*Objec // "required": true, // "type": "string" // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -7503,6 +8176,14 @@ func (c *ObjectAccessControlsUpdateCall) Generation(generation int64) *ObjectAcc return c } +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *ObjectAccessControlsUpdateCall) ProvisionalUserProject(provisionalUserProject string) *ObjectAccessControlsUpdateCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *ObjectAccessControlsUpdateCall) UserProject(userProject string) *ObjectAccessControlsUpdateCall { @@ -7635,6 +8316,11 @@ func (c *ObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*Obje // "required": true, // "type": "string" // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -7726,6 +8412,14 @@ func (c *ObjectsComposeCall) KmsKeyName(kmsKeyName string) *ObjectsComposeCall { return c } +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *ObjectsComposeCall) ProvisionalUserProject(provisionalUserProject string) *ObjectsComposeCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *ObjectsComposeCall) UserProject(userProject string) *ObjectsComposeCall { @@ -7882,6 +8576,11 @@ func (c *ObjectsComposeCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "location": "query", // "type": "string" // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -8036,6 +8735,14 @@ func (c *ObjectsCopyCall) Projection(projection string) *ObjectsCopyCall { return c } +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *ObjectsCopyCall) ProvisionalUserProject(provisionalUserProject string) *ObjectsCopyCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + // SourceGeneration sets the optional parameter "sourceGeneration": If // present, selects a specific revision of the source object (as opposed // to the latest version, the default). @@ -8248,6 +8955,11 @@ func (c *ObjectsCopyCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "location": "query", // "type": "string" // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, // "sourceBucket": { // "description": "Name of the bucket in which to find the source object.", // "location": "path", @@ -8353,6 +9065,14 @@ func (c *ObjectsDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch in return c } +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *ObjectsDeleteCall) ProvisionalUserProject(provisionalUserProject string) *ObjectsDeleteCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *ObjectsDeleteCall) UserProject(userProject string) *ObjectsDeleteCall { @@ -8471,6 +9191,11 @@ func (c *ObjectsDeleteCall) Do(opts ...googleapi.CallOption) error { // "required": true, // "type": "string" // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -8562,6 +9287,14 @@ func (c *ObjectsGetCall) Projection(projection string) *ObjectsGetCall { return c } +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *ObjectsGetCall) ProvisionalUserProject(provisionalUserProject string) *ObjectsGetCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *ObjectsGetCall) UserProject(userProject string) *ObjectsGetCall { @@ -8747,6 +9480,11 @@ func (c *ObjectsGetCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "location": "query", // "type": "string" // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -8798,6 +9536,14 @@ func (c *ObjectsGetIamPolicyCall) Generation(generation int64) *ObjectsGetIamPol return c } +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *ObjectsGetIamPolicyCall) ProvisionalUserProject(provisionalUserProject string) *ObjectsGetIamPolicyCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *ObjectsGetIamPolicyCall) UserProject(userProject string) *ObjectsGetIamPolicyCall { @@ -8930,6 +9676,11 @@ func (c *ObjectsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err // "required": true, // "type": "string" // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -9069,6 +9820,14 @@ func (c *ObjectsInsertCall) Projection(projection string) *ObjectsInsertCall { return c } +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *ObjectsInsertCall) ProvisionalUserProject(provisionalUserProject string) *ObjectsInsertCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *ObjectsInsertCall) UserProject(userProject string) *ObjectsInsertCall { @@ -9174,7 +9933,7 @@ func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { return nil, err } req.Header = reqHeaders - gensupport.SetGetBody(req, getBody) + req.GetBody = getBody googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) @@ -9337,6 +10096,11 @@ func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "location": "query", // "type": "string" // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -9434,6 +10198,14 @@ func (c *ObjectsListCall) Projection(projection string) *ObjectsListCall { return c } +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *ObjectsListCall) ProvisionalUserProject(provisionalUserProject string) *ObjectsListCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *ObjectsListCall) UserProject(userProject string) *ObjectsListCall { @@ -9601,6 +10373,11 @@ func (c *ObjectsListCall) Do(opts ...googleapi.CallOption) (*Objects, error) { // "location": "query", // "type": "string" // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -9745,6 +10522,14 @@ func (c *ObjectsPatchCall) Projection(projection string) *ObjectsPatchCall { return c } +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *ObjectsPatchCall) ProvisionalUserProject(provisionalUserProject string) *ObjectsPatchCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + // UserProject sets the optional parameter "userProject": The project to // be billed for this request, for Requester Pays buckets. func (c *ObjectsPatchCall) UserProject(userProject string) *ObjectsPatchCall { @@ -9927,6 +10712,11 @@ func (c *ObjectsPatchCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "location": "query", // "type": "string" // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request, for Requester Pays buckets.", // "location": "query", @@ -10104,6 +10894,14 @@ func (c *ObjectsRewriteCall) Projection(projection string) *ObjectsRewriteCall { return c } +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *ObjectsRewriteCall) ProvisionalUserProject(provisionalUserProject string) *ObjectsRewriteCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + // RewriteToken sets the optional parameter "rewriteToken": Include this // field (from the previous rewrite response) on each rewrite request // after the first one, until the rewrite response 'done' flag is true. @@ -10338,6 +11136,11 @@ func (c *ObjectsRewriteCall) Do(opts ...googleapi.CallOption) (*RewriteResponse, // "location": "query", // "type": "string" // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, // "rewriteToken": { // "description": "Include this field (from the previous rewrite response) on each rewrite request after the first one, until the rewrite response 'done' flag is true. Calls that provide a rewriteToken can omit all other request fields, but if included those fields must match the values provided in the first rewrite request.", // "location": "query", @@ -10412,6 +11215,14 @@ func (c *ObjectsSetIamPolicyCall) Generation(generation int64) *ObjectsSetIamPol return c } +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *ObjectsSetIamPolicyCall) ProvisionalUserProject(provisionalUserProject string) *ObjectsSetIamPolicyCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *ObjectsSetIamPolicyCall) UserProject(userProject string) *ObjectsSetIamPolicyCall { @@ -10536,6 +11347,11 @@ func (c *ObjectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err // "required": true, // "type": "string" // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -10588,6 +11404,14 @@ func (c *ObjectsTestIamPermissionsCall) Generation(generation int64) *ObjectsTes return c } +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *ObjectsTestIamPermissionsCall) ProvisionalUserProject(provisionalUserProject string) *ObjectsTestIamPermissionsCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *ObjectsTestIamPermissionsCall) UserProject(userProject string) *ObjectsTestIamPermissionsCall { @@ -10728,6 +11552,11 @@ func (c *ObjectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestI // "required": true, // "type": "string" // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -10845,6 +11674,14 @@ func (c *ObjectsUpdateCall) Projection(projection string) *ObjectsUpdateCall { return c } +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *ObjectsUpdateCall) ProvisionalUserProject(provisionalUserProject string) *ObjectsUpdateCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *ObjectsUpdateCall) UserProject(userProject string) *ObjectsUpdateCall { @@ -11027,6 +11864,11 @@ func (c *ObjectsUpdateCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "location": "query", // "type": "string" // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -11123,6 +11965,14 @@ func (c *ObjectsWatchAllCall) Projection(projection string) *ObjectsWatchAllCall return c } +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *ObjectsWatchAllCall) ProvisionalUserProject(provisionalUserProject string) *ObjectsWatchAllCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *ObjectsWatchAllCall) UserProject(userProject string) *ObjectsWatchAllCall { @@ -11282,6 +12132,11 @@ func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error) // "location": "query", // "type": "string" // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -11313,28 +12168,27 @@ func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error) } -// method id "storage.projects.serviceAccount.get": +// method id "storage.projects.hmacKeys.create": -type ProjectsServiceAccountGetCall struct { - s *Service - projectId string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type ProjectsHmacKeysCreateCall struct { + s *Service + projectId string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Get the email address of this project's Google Cloud Storage -// service account. -func (r *ProjectsServiceAccountService) Get(projectId string) *ProjectsServiceAccountGetCall { - c := &ProjectsServiceAccountGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Create: Creates a new HMAC key for the specified service account. +func (r *ProjectsHmacKeysService) Create(projectId string, serviceAccountEmail string) *ProjectsHmacKeysCreateCall { + c := &ProjectsHmacKeysCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId + c.urlParams_.Set("serviceAccountEmail", serviceAccountEmail) return c } // UserProject sets the optional parameter "userProject": The project to // be billed for this request. -func (c *ProjectsServiceAccountGetCall) UserProject(userProject string) *ProjectsServiceAccountGetCall { +func (c *ProjectsHmacKeysCreateCall) UserProject(userProject string) *ProjectsHmacKeysCreateCall { c.urlParams_.Set("userProject", userProject) return c } @@ -11342,53 +12196,40 @@ func (c *ProjectsServiceAccountGetCall) UserProject(userProject string) *Project // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsServiceAccountGetCall) Fields(s ...googleapi.Field) *ProjectsServiceAccountGetCall { +func (c *ProjectsHmacKeysCreateCall) Fields(s ...googleapi.Field) *ProjectsHmacKeysCreateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsServiceAccountGetCall) IfNoneMatch(entityTag string) *ProjectsServiceAccountGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsServiceAccountGetCall) Context(ctx context.Context) *ProjectsServiceAccountGetCall { +func (c *ProjectsHmacKeysCreateCall) Context(ctx context.Context) *ProjectsHmacKeysCreateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsServiceAccountGetCall) Header() http.Header { +func (c *ProjectsHmacKeysCreateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsServiceAccountGetCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsHmacKeysCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/serviceAccount") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/hmacKeys") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -11399,14 +12240,14 @@ func (c *ProjectsServiceAccountGetCall) doRequest(alt string) (*http.Response, e return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.projects.serviceAccount.get" call. -// Exactly one of *ServiceAccount or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *ServiceAccount.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsServiceAccountGetCall) Do(opts ...googleapi.CallOption) (*ServiceAccount, error) { +// Do executes the "storage.projects.hmacKeys.create" call. +// Exactly one of *HmacKey or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *HmacKey.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsHmacKeysCreateCall) Do(opts ...googleapi.CallOption) (*HmacKey, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -11425,7 +12266,7 @@ func (c *ProjectsServiceAccountGetCall) Do(opts ...googleapi.CallOption) (*Servi if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ServiceAccount{ + ret := &HmacKey{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -11437,19 +12278,878 @@ func (c *ProjectsServiceAccountGetCall) Do(opts ...googleapi.CallOption) (*Servi } return ret, nil // { - // "description": "Get the email address of this project's Google Cloud Storage service account.", - // "httpMethod": "GET", - // "id": "storage.projects.serviceAccount.get", + // "description": "Creates a new HMAC key for the specified service account.", + // "httpMethod": "POST", + // "id": "storage.projects.hmacKeys.create", // "parameterOrder": [ - // "projectId" + // "projectId", + // "serviceAccountEmail" // ], // "parameters": { // "projectId": { - // "description": "Project ID", + // "description": "Project ID owning the service account.", // "location": "path", // "required": true, // "type": "string" // }, + // "serviceAccountEmail": { + // "description": "Email address of the service account.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{projectId}/hmacKeys", + // "response": { + // "$ref": "HmacKey" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.projects.hmacKeys.delete": + +type ProjectsHmacKeysDeleteCall struct { + s *Service + projectId string + accessId string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes an HMAC key. +func (r *ProjectsHmacKeysService) Delete(projectId string, accessId string) *ProjectsHmacKeysDeleteCall { + c := &ProjectsHmacKeysDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.accessId = accessId + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. +func (c *ProjectsHmacKeysDeleteCall) UserProject(userProject string) *ProjectsHmacKeysDeleteCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsHmacKeysDeleteCall) Fields(s ...googleapi.Field) *ProjectsHmacKeysDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsHmacKeysDeleteCall) Context(ctx context.Context) *ProjectsHmacKeysDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsHmacKeysDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsHmacKeysDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/hmacKeys/{accessId}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "accessId": c.accessId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.projects.hmacKeys.delete" call. +func (c *ProjectsHmacKeysDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Deletes an HMAC key.", + // "httpMethod": "DELETE", + // "id": "storage.projects.hmacKeys.delete", + // "parameterOrder": [ + // "projectId", + // "accessId" + // ], + // "parameters": { + // "accessId": { + // "description": "Name of the HMAC key to be deleted.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "Project ID owning the requested key", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{projectId}/hmacKeys/{accessId}", + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.projects.hmacKeys.get": + +type ProjectsHmacKeysGetCall struct { + s *Service + projectId string + accessId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Retrieves an HMAC key's metadata +func (r *ProjectsHmacKeysService) Get(projectId string, accessId string) *ProjectsHmacKeysGetCall { + c := &ProjectsHmacKeysGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.accessId = accessId + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. +func (c *ProjectsHmacKeysGetCall) UserProject(userProject string) *ProjectsHmacKeysGetCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsHmacKeysGetCall) Fields(s ...googleapi.Field) *ProjectsHmacKeysGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsHmacKeysGetCall) IfNoneMatch(entityTag string) *ProjectsHmacKeysGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsHmacKeysGetCall) Context(ctx context.Context) *ProjectsHmacKeysGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsHmacKeysGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsHmacKeysGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/hmacKeys/{accessId}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "accessId": c.accessId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.projects.hmacKeys.get" call. +// Exactly one of *HmacKeyMetadata or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *HmacKeyMetadata.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsHmacKeysGetCall) Do(opts ...googleapi.CallOption) (*HmacKeyMetadata, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &HmacKeyMetadata{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an HMAC key's metadata", + // "httpMethod": "GET", + // "id": "storage.projects.hmacKeys.get", + // "parameterOrder": [ + // "projectId", + // "accessId" + // ], + // "parameters": { + // "accessId": { + // "description": "Name of the HMAC key.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "Project ID owning the service account of the requested key.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{projectId}/hmacKeys/{accessId}", + // "response": { + // "$ref": "HmacKeyMetadata" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.read_only" + // ] + // } + +} + +// method id "storage.projects.hmacKeys.list": + +type ProjectsHmacKeysListCall struct { + s *Service + projectId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of HMAC keys matching the criteria. +func (r *ProjectsHmacKeysService) List(projectId string) *ProjectsHmacKeysListCall { + c := &ProjectsHmacKeysListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of items to return in a single page of responses. The service uses +// this parameter or 250 items, whichever is smaller. The max number of +// items per page will also be limited by the number of distinct service +// accounts in the response. If the number of service accounts in a +// single response is too high, the page will truncated and a next page +// token will be returned. +func (c *ProjectsHmacKeysListCall) MaxResults(maxResults int64) *ProjectsHmacKeysListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": A +// previously-returned page token representing part of the larger set of +// results to view. +func (c *ProjectsHmacKeysListCall) PageToken(pageToken string) *ProjectsHmacKeysListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ServiceAccountEmail sets the optional parameter +// "serviceAccountEmail": If present, only keys for the given service +// account are returned. +func (c *ProjectsHmacKeysListCall) ServiceAccountEmail(serviceAccountEmail string) *ProjectsHmacKeysListCall { + c.urlParams_.Set("serviceAccountEmail", serviceAccountEmail) + return c +} + +// ShowDeletedKeys sets the optional parameter "showDeletedKeys": +// Whether or not to show keys in the DELETED state. +func (c *ProjectsHmacKeysListCall) ShowDeletedKeys(showDeletedKeys bool) *ProjectsHmacKeysListCall { + c.urlParams_.Set("showDeletedKeys", fmt.Sprint(showDeletedKeys)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. +func (c *ProjectsHmacKeysListCall) UserProject(userProject string) *ProjectsHmacKeysListCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsHmacKeysListCall) Fields(s ...googleapi.Field) *ProjectsHmacKeysListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsHmacKeysListCall) IfNoneMatch(entityTag string) *ProjectsHmacKeysListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsHmacKeysListCall) Context(ctx context.Context) *ProjectsHmacKeysListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsHmacKeysListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsHmacKeysListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/hmacKeys") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.projects.hmacKeys.list" call. +// Exactly one of *HmacKeysMetadata or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *HmacKeysMetadata.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsHmacKeysListCall) Do(opts ...googleapi.CallOption) (*HmacKeysMetadata, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &HmacKeysMetadata{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of HMAC keys matching the criteria.", + // "httpMethod": "GET", + // "id": "storage.projects.hmacKeys.list", + // "parameterOrder": [ + // "projectId" + // ], + // "parameters": { + // "maxResults": { + // "default": "250", + // "description": "Maximum number of items to return in a single page of responses. The service uses this parameter or 250 items, whichever is smaller. The max number of items per page will also be limited by the number of distinct service accounts in the response. If the number of service accounts in a single response is too high, the page will truncated and a next page token will be returned.", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "A previously-returned page token representing part of the larger set of results to view.", + // "location": "query", + // "type": "string" + // }, + // "projectId": { + // "description": "Name of the project in which to look for HMAC keys.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "serviceAccountEmail": { + // "description": "If present, only keys for the given service account are returned.", + // "location": "query", + // "type": "string" + // }, + // "showDeletedKeys": { + // "description": "Whether or not to show keys in the DELETED state.", + // "location": "query", + // "type": "boolean" + // }, + // "userProject": { + // "description": "The project to be billed for this request.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{projectId}/hmacKeys", + // "response": { + // "$ref": "HmacKeysMetadata" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsHmacKeysListCall) Pages(ctx context.Context, f func(*HmacKeysMetadata) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "storage.projects.hmacKeys.update": + +type ProjectsHmacKeysUpdateCall struct { + s *Service + projectId string + accessId string + hmackeymetadata *HmacKeyMetadata + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates the state of an HMAC key. See the HMAC Key resource +// descriptor for valid states. +func (r *ProjectsHmacKeysService) Update(projectId string, accessId string, hmackeymetadata *HmacKeyMetadata) *ProjectsHmacKeysUpdateCall { + c := &ProjectsHmacKeysUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.accessId = accessId + c.hmackeymetadata = hmackeymetadata + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. +func (c *ProjectsHmacKeysUpdateCall) UserProject(userProject string) *ProjectsHmacKeysUpdateCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsHmacKeysUpdateCall) Fields(s ...googleapi.Field) *ProjectsHmacKeysUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsHmacKeysUpdateCall) Context(ctx context.Context) *ProjectsHmacKeysUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsHmacKeysUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsHmacKeysUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.hmackeymetadata) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/hmacKeys/{accessId}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PUT", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "accessId": c.accessId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.projects.hmacKeys.update" call. +// Exactly one of *HmacKeyMetadata or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *HmacKeyMetadata.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsHmacKeysUpdateCall) Do(opts ...googleapi.CallOption) (*HmacKeyMetadata, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &HmacKeyMetadata{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the state of an HMAC key. See the HMAC Key resource descriptor for valid states.", + // "httpMethod": "PUT", + // "id": "storage.projects.hmacKeys.update", + // "parameterOrder": [ + // "projectId", + // "accessId" + // ], + // "parameters": { + // "accessId": { + // "description": "Name of the HMAC key being updated.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "Project ID owning the service account of the updated key.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{projectId}/hmacKeys/{accessId}", + // "request": { + // "$ref": "HmacKeyMetadata" + // }, + // "response": { + // "$ref": "HmacKeyMetadata" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.projects.serviceAccount.get": + +type ProjectsServiceAccountGetCall struct { + s *Service + projectId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Get the email address of this project's Google Cloud Storage +// service account. +func (r *ProjectsServiceAccountService) Get(projectId string) *ProjectsServiceAccountGetCall { + c := &ProjectsServiceAccountGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + return c +} + +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *ProjectsServiceAccountGetCall) ProvisionalUserProject(provisionalUserProject string) *ProjectsServiceAccountGetCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. +func (c *ProjectsServiceAccountGetCall) UserProject(userProject string) *ProjectsServiceAccountGetCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsServiceAccountGetCall) Fields(s ...googleapi.Field) *ProjectsServiceAccountGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsServiceAccountGetCall) IfNoneMatch(entityTag string) *ProjectsServiceAccountGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsServiceAccountGetCall) Context(ctx context.Context) *ProjectsServiceAccountGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsServiceAccountGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsServiceAccountGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/serviceAccount") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.projects.serviceAccount.get" call. +// Exactly one of *ServiceAccount or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ServiceAccount.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsServiceAccountGetCall) Do(opts ...googleapi.CallOption) (*ServiceAccount, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ServiceAccount{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Get the email address of this project's Google Cloud Storage service account.", + // "httpMethod": "GET", + // "id": "storage.projects.serviceAccount.get", + // "parameterOrder": [ + // "projectId" + // ], + // "parameters": { + // "projectId": { + // "description": "Project ID", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request.", // "location": "query", diff --git a/vendor/google.golang.org/api/tpu/v1/BUILD.bazel b/vendor/google.golang.org/api/tpu/v1/BUILD.bazel index 60942bd5b85d2..ccc55356caef5 100644 --- a/vendor/google.golang.org/api/tpu/v1/BUILD.bazel +++ b/vendor/google.golang.org/api/tpu/v1/BUILD.bazel @@ -9,5 +9,7 @@ go_library( deps = [ "//vendor/google.golang.org/api/gensupport:go_default_library", "//vendor/google.golang.org/api/googleapi:go_default_library", + "//vendor/google.golang.org/api/option:go_default_library", + "//vendor/google.golang.org/api/transport/http:go_default_library", ], ) diff --git a/vendor/google.golang.org/api/tpu/v1/tpu-api.json b/vendor/google.golang.org/api/tpu/v1/tpu-api.json index aed51d83d097a..acd7f0256e0bd 100644 --- a/vendor/google.golang.org/api/tpu/v1/tpu-api.json +++ b/vendor/google.golang.org/api/tpu/v1/tpu-api.json @@ -658,7 +658,7 @@ } } }, - "revision": "20181010", + "revision": "20190509", "rootUrl": "https://tpu.googleapis.com/", "schemas": { "AcceleratorType": { @@ -855,14 +855,18 @@ "enum": [ "HEALTH_UNSPECIFIED", "HEALTHY", - "UNHEALTHY", - "TIMEOUT" + "DEPRECATED_UNHEALTHY", + "TIMEOUT", + "UNHEALTHY_TENSORFLOW", + "UNHEALTHY_MAINTENANCE" ], "enumDescriptions": [ "Health status is unknown: not initialized or failed to retrieve.", "The resource is healthy.", "The resource is unhealthy.", - "The resource is unresponsive." + "The resource is unresponsive.", + "The in-guest ML stack is unhealthy.", + "The node is under maintenance/priority boost caused rescheduling and\nwill resume running once rescheduled." ], "type": "string" }, @@ -890,7 +894,7 @@ "type": "string" }, "networkEndpoints": { - "description": "Output only. The network endpoints where TPU workers can be accessed and sent work.\nIt is recommended that Tensorflow clients of the node reach out to the 0th\nentry in this map first.", + "description": "Output only. The network endpoints where TPU workers can be accessed and\nsent work. It is recommended that Tensorflow clients of the node reach out\nto the 0th entry in this map first.", "items": { "$ref": "NetworkEndpoint" }, @@ -921,7 +925,10 @@ "STOPPING", "STARTING", "PREEMPTED", - "TERMINATED" + "TERMINATED", + "HIDING", + "HIDDEN", + "UNHIDING" ], "enumDescriptions": [ "TPU node state is not known/set.", @@ -931,11 +938,14 @@ "TPU node is undergoing reimaging.", "TPU node is being deleted.", "TPU node is being repaired and may be unusable. Details can be\nfound in the `help_description` field.", - "7 - Reserved. Was SUSPENDED.\nTPU node is stopped.", + "TPU node is stopped.", "TPU node is currently stopping.", "TPU node is currently starting.", "TPU node has been preempted. Only applies to Preemptible TPU Nodes.", - "TPU node has been terminated due to maintenance or has reached the end of\nits life cycle (for preemptible nodes)." + "TPU node has been terminated due to maintenance or has reached the end of\nits life cycle (for preemptible nodes).", + "TPU node is currently hiding.", + "TPU node has been hidden.", + "TPU node is currently unhiding." ], "type": "string" }, @@ -967,7 +977,7 @@ "type": "object" }, "name": { - "description": "The server-assigned name, which is only unique within the same service that\noriginally returns it. If you use the default HTTP mapping, the\n`name` should have the format of `operations/some/unique/name`.", + "description": "The server-assigned name, which is only unique within the same service that\noriginally returns it. If you use the default HTTP mapping, the\n`name` should be a resource name ending with `operations/{unique_id}`.", "type": "string" }, "response": { @@ -1034,6 +1044,10 @@ "properties": { "preemptible": { "type": "boolean" + }, + "reserved": { + "description": "Whether the node is created under a reservation.", + "type": "boolean" } }, "type": "object" @@ -1045,7 +1059,7 @@ "type": "object" }, "Status": { - "description": "The `Status` type defines a logical error model that is suitable for different\nprogramming environments, including REST APIs and RPC APIs. It is used by\n[gRPC](https://github.com/grpc). The error model is designed to be:\n\n- Simple to use and understand for most users\n- Flexible enough to meet unexpected needs\n\n# Overview\n\nThe `Status` message contains three pieces of data: error code, error message,\nand error details. The error code should be an enum value of\ngoogle.rpc.Code, but it may accept additional error codes if needed. The\nerror message should be a developer-facing English message that helps\ndevelopers *understand* and *resolve* the error. If a localized user-facing\nerror message is needed, put the localized message in the error details or\nlocalize it in the client. The optional error details may contain arbitrary\ninformation about the error. There is a predefined set of error detail types\nin the package `google.rpc` that can be used for common error conditions.\n\n# Language mapping\n\nThe `Status` message is the logical representation of the error model, but it\nis not necessarily the actual wire format. When the `Status` message is\nexposed in different client libraries and different wire protocols, it can be\nmapped differently. For example, it will likely be mapped to some exceptions\nin Java, but more likely mapped to some error codes in C.\n\n# Other uses\n\nThe error model and the `Status` message can be used in a variety of\nenvironments, either with or without APIs, to provide a\nconsistent developer experience across different environments.\n\nExample uses of this error model include:\n\n- Partial errors. If a service needs to return partial errors to the client,\n it may embed the `Status` in the normal response to indicate the partial\n errors.\n\n- Workflow errors. A typical workflow has multiple steps. Each step may\n have a `Status` message for error reporting.\n\n- Batch operations. If a client uses batch request and batch response, the\n `Status` message should be used directly inside batch response, one for\n each error sub-response.\n\n- Asynchronous operations. If an API call embeds asynchronous operation\n results in its response, the status of those operations should be\n represented directly using the `Status` message.\n\n- Logging. If some API errors are stored in logs, the message `Status` could\n be used directly after any stripping needed for security/privacy reasons.", + "description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). The error model is designed to be:\n\n- Simple to use and understand for most users\n- Flexible enough to meet unexpected needs\n\n# Overview\n\nThe `Status` message contains three pieces of data: error code, error\nmessage, and error details. The error code should be an enum value of\ngoogle.rpc.Code, but it may accept additional error codes if needed. The\nerror message should be a developer-facing English message that helps\ndevelopers *understand* and *resolve* the error. If a localized user-facing\nerror message is needed, put the localized message in the error details or\nlocalize it in the client. The optional error details may contain arbitrary\ninformation about the error. There is a predefined set of error detail types\nin the package `google.rpc` that can be used for common error conditions.\n\n# Language mapping\n\nThe `Status` message is the logical representation of the error model, but it\nis not necessarily the actual wire format. When the `Status` message is\nexposed in different client libraries and different wire protocols, it can be\nmapped differently. For example, it will likely be mapped to some exceptions\nin Java, but more likely mapped to some error codes in C.\n\n# Other uses\n\nThe error model and the `Status` message can be used in a variety of\nenvironments, either with or without APIs, to provide a\nconsistent developer experience across different environments.\n\nExample uses of this error model include:\n\n- Partial errors. If a service needs to return partial errors to the client,\n it may embed the `Status` in the normal response to indicate the partial\n errors.\n\n- Workflow errors. A typical workflow has multiple steps. Each step may\n have a `Status` message for error reporting.\n\n- Batch operations. If a client uses batch request and batch response, the\n `Status` message should be used directly inside batch response, one for\n each error sub-response.\n\n- Asynchronous operations. If an API call embeds asynchronous operation\n results in its response, the status of those operations should be\n represented directly using the `Status` message.\n\n- Logging. If some API errors are stored in logs, the message `Status` could\n be used directly after any stripping needed for security/privacy reasons.", "id": "Status", "properties": { "code": { diff --git a/vendor/google.golang.org/api/tpu/v1/tpu-gen.go b/vendor/google.golang.org/api/tpu/v1/tpu-gen.go index 14401983aae02..7e88f6e98d9a0 100644 --- a/vendor/google.golang.org/api/tpu/v1/tpu-gen.go +++ b/vendor/google.golang.org/api/tpu/v1/tpu-gen.go @@ -1,4 +1,4 @@ -// Copyright 2018 Google Inc. All rights reserved. +// Copyright 2019 Google LLC. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -6,13 +6,35 @@ // Package tpu provides access to the Cloud TPU API. // -// See https://cloud.google.com/tpu/ +// For product documentation, see: https://cloud.google.com/tpu/ +// +// Creating a client // // Usage example: // // import "google.golang.org/api/tpu/v1" // ... -// tpuService, err := tpu.New(oauthHttpClient) +// ctx := context.Background() +// tpuService, err := tpu.NewService(ctx) +// +// In this example, Google Application Default Credentials are used for authentication. +// +// For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. +// +// Other authentication options +// +// To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey: +// +// tpuService, err := tpu.NewService(ctx, option.WithAPIKey("AIza...")) +// +// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource: +// +// config := &oauth2.Config{...} +// // ... +// token, err := config.Exchange(ctx, ...) +// tpuService, err := tpu.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) +// +// See https://godoc.org/google.golang.org/api/option/ for details on options. package tpu // import "google.golang.org/api/tpu/v1" import ( @@ -29,6 +51,8 @@ import ( gensupport "google.golang.org/api/gensupport" googleapi "google.golang.org/api/googleapi" + option "google.golang.org/api/option" + htransport "google.golang.org/api/transport/http" ) // Always reference these packages, just in case the auto-generated code @@ -56,6 +80,32 @@ const ( CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" ) +// NewService creates a new Service. +func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, error) { + scopesOption := option.WithScopes( + "https://www.googleapis.com/auth/cloud-platform", + ) + // NOTE: prepend, so we don't override user-specified scopes. + opts = append([]option.ClientOption{scopesOption}, opts...) + client, endpoint, err := htransport.NewClient(ctx, opts...) + if err != nil { + return nil, err + } + s, err := New(client) + if err != nil { + return nil, err + } + if endpoint != "" { + s.BasePath = endpoint + } + return s, nil +} + +// New creates a new Service. It uses the provided http.Client for requests. +// +// Deprecated: please use NewService instead. +// To provide a custom HTTP client, use option.WithHTTPClient. +// If you are using google.golang.org/api/googleapis/transport.APIKey, use option.WithAPIKey with NewService instead. func New(client *http.Client) (*Service, error) { if client == nil { return nil, errors.New("client is nil") @@ -510,8 +560,12 @@ type Node struct { // "HEALTH_UNSPECIFIED" - Health status is unknown: not initialized or // failed to retrieve. // "HEALTHY" - The resource is healthy. - // "UNHEALTHY" - The resource is unhealthy. + // "DEPRECATED_UNHEALTHY" - The resource is unhealthy. // "TIMEOUT" - The resource is unresponsive. + // "UNHEALTHY_TENSORFLOW" - The in-guest ML stack is unhealthy. + // "UNHEALTHY_MAINTENANCE" - The node is under maintenance/priority + // boost caused rescheduling and + // will resume running once rescheduled. Health string `json:"health,omitempty"` // HealthDescription: Output only. @@ -542,10 +596,10 @@ type Node struct { Network string `json:"network,omitempty"` // NetworkEndpoints: Output only. The network endpoints where TPU - // workers can be accessed and sent work. - // It is recommended that Tensorflow clients of the node reach out to - // the 0th - // entry in this map first. + // workers can be accessed and + // sent work. It is recommended that Tensorflow clients of the node + // reach out + // to the 0th entry in this map first. NetworkEndpoints []*NetworkEndpoint `json:"networkEndpoints,omitempty"` // Port: Output only. @@ -579,8 +633,7 @@ type Node struct { // "REPAIRING" - TPU node is being repaired and may be unusable. // Details can be // found in the `help_description` field. - // "STOPPED" - 7 - Reserved. Was SUSPENDED. - // TPU node is stopped. + // "STOPPED" - TPU node is stopped. // "STOPPING" - TPU node is currently stopping. // "STARTING" - TPU node is currently starting. // "PREEMPTED" - TPU node has been preempted. Only applies to @@ -588,6 +641,9 @@ type Node struct { // "TERMINATED" - TPU node has been terminated due to maintenance or // has reached the end of // its life cycle (for preemptible nodes). + // "HIDING" - TPU node is currently hiding. + // "HIDDEN" - TPU node has been hidden. + // "UNHIDING" - TPU node is currently unhiding. State string `json:"state,omitempty"` // TensorflowVersion: The version of Tensorflow running in the @@ -651,7 +707,8 @@ type Operation struct { // service that // originally returns it. If you use the default HTTP mapping, // the - // `name` should have the format of `operations/some/unique/name`. + // `name` should be a resource name ending with + // `operations/{unique_id}`. Name string `json:"name,omitempty"` // Response: The normal response of the operation in case of success. @@ -785,6 +842,9 @@ func (s *ReimageNodeRequest) MarshalJSON() ([]byte, error) { type SchedulingConfig struct { Preemptible bool `json:"preemptible,omitempty"` + // Reserved: Whether the node is created under a reservation. + Reserved bool `json:"reserved,omitempty"` + // ForceSendFields is a list of field names (e.g. "Preemptible") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -813,20 +873,20 @@ type StartNodeRequest struct { } // Status: The `Status` type defines a logical error model that is -// suitable for different -// programming environments, including REST APIs and RPC APIs. It is -// used by -// [gRPC](https://github.com/grpc). The error model is designed to -// be: +// suitable for +// different programming environments, including REST APIs and RPC APIs. +// It is +// used by [gRPC](https://github.com/grpc). The error model is designed +// to be: // // - Simple to use and understand for most users // - Flexible enough to meet unexpected needs // // # Overview // -// The `Status` message contains three pieces of data: error code, error -// message, -// and error details. The error code should be an enum value +// The `Status` message contains three pieces of data: error code, +// error +// message, and error details. The error code should be an enum value // of // google.rpc.Code, but it may accept additional error codes if needed. // The diff --git a/vendor/google.golang.org/api/transport/http/BUILD.bazel b/vendor/google.golang.org/api/transport/http/BUILD.bazel new file mode 100644 index 0000000000000..2d2f1079f7379 --- /dev/null +++ b/vendor/google.golang.org/api/transport/http/BUILD.bazel @@ -0,0 +1,17 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["dial.go"], + importmap = "k8s.io/kops/vendor/google.golang.org/api/transport/http", + importpath = "google.golang.org/api/transport/http", + visibility = ["//visibility:public"], + deps = [ + "//vendor/go.opencensus.io/plugin/ochttp:go_default_library", + "//vendor/golang.org/x/oauth2:go_default_library", + "//vendor/google.golang.org/api/googleapi/transport:go_default_library", + "//vendor/google.golang.org/api/internal:go_default_library", + "//vendor/google.golang.org/api/option:go_default_library", + "//vendor/google.golang.org/api/transport/http/internal/propagation:go_default_library", + ], +) diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go new file mode 100644 index 0000000000000..c0d8bf20b0223 --- /dev/null +++ b/vendor/google.golang.org/api/transport/http/dial.go @@ -0,0 +1,161 @@ +// Copyright 2015 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package http supports network connections to HTTP servers. +// This package is not intended for use by end developers. Use the +// google.golang.org/api/option package to configure API clients. +package http + +import ( + "context" + "errors" + "net/http" + + "go.opencensus.io/plugin/ochttp" + "golang.org/x/oauth2" + "google.golang.org/api/googleapi/transport" + "google.golang.org/api/internal" + "google.golang.org/api/option" + "google.golang.org/api/transport/http/internal/propagation" +) + +// NewClient returns an HTTP client for use communicating with a Google cloud +// service, configured with the given ClientOptions. It also returns the endpoint +// for the service as specified in the options. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*http.Client, string, error) { + settings, err := newSettings(opts) + if err != nil { + return nil, "", err + } + // TODO(cbro): consider injecting the User-Agent even if an explicit HTTP client is provided? + if settings.HTTPClient != nil { + return settings.HTTPClient, settings.Endpoint, nil + } + trans, err := newTransport(ctx, defaultBaseTransport(ctx), settings) + if err != nil { + return nil, "", err + } + return &http.Client{Transport: trans}, settings.Endpoint, nil +} + +// NewTransport creates an http.RoundTripper for use communicating with a Google +// cloud service, configured with the given ClientOptions. Its RoundTrip method delegates to base. +func NewTransport(ctx context.Context, base http.RoundTripper, opts ...option.ClientOption) (http.RoundTripper, error) { + settings, err := newSettings(opts) + if err != nil { + return nil, err + } + if settings.HTTPClient != nil { + return nil, errors.New("transport/http: WithHTTPClient passed to NewTransport") + } + return newTransport(ctx, base, settings) +} + +func newTransport(ctx context.Context, base http.RoundTripper, settings *internal.DialSettings) (http.RoundTripper, error) { + trans := base + trans = parameterTransport{ + base: trans, + userAgent: settings.UserAgent, + quotaProject: settings.QuotaProject, + requestReason: settings.RequestReason, + } + trans = addOCTransport(trans) + switch { + case settings.NoAuth: + // Do nothing. + case settings.APIKey != "": + trans = &transport.APIKey{ + Transport: trans, + Key: settings.APIKey, + } + default: + creds, err := internal.Creds(ctx, settings) + if err != nil { + return nil, err + } + trans = &oauth2.Transport{ + Base: trans, + Source: creds.TokenSource, + } + } + return trans, nil +} + +func newSettings(opts []option.ClientOption) (*internal.DialSettings, error) { + var o internal.DialSettings + for _, opt := range opts { + opt.Apply(&o) + } + if err := o.Validate(); err != nil { + return nil, err + } + if o.GRPCConn != nil { + return nil, errors.New("unsupported gRPC connection specified") + } + return &o, nil +} + +type parameterTransport struct { + userAgent string + quotaProject string + requestReason string + + base http.RoundTripper +} + +func (t parameterTransport) RoundTrip(req *http.Request) (*http.Response, error) { + rt := t.base + if rt == nil { + return nil, errors.New("transport: no Transport specified") + } + if t.userAgent == "" { + return rt.RoundTrip(req) + } + newReq := *req + newReq.Header = make(http.Header) + for k, vv := range req.Header { + newReq.Header[k] = vv + } + // TODO(cbro): append to existing User-Agent header? + newReq.Header.Set("User-Agent", t.userAgent) + + // Attach system parameters into the header + if t.quotaProject != "" { + newReq.Header.Set("X-Goog-User-Project", t.quotaProject) + } + if t.requestReason != "" { + newReq.Header.Set("X-Goog-Request-Reason", t.requestReason) + } + + return rt.RoundTrip(&newReq) +} + +// Set at init time by dial_appengine.go. If nil, we're not on App Engine. +var appengineUrlfetchHook func(context.Context) http.RoundTripper + +// defaultBaseTransport returns the base HTTP transport. +// On App Engine, this is urlfetch.Transport, otherwise it's http.DefaultTransport. +func defaultBaseTransport(ctx context.Context) http.RoundTripper { + if appengineUrlfetchHook != nil { + return appengineUrlfetchHook(ctx) + } + return http.DefaultTransport +} + +func addOCTransport(trans http.RoundTripper) http.RoundTripper { + return &ochttp.Transport{ + Base: trans, + Propagation: &propagation.HTTPFormat{}, + } +} diff --git a/vendor/google.golang.org/api/transport/http/dial_appengine.go b/vendor/google.golang.org/api/transport/http/dial_appengine.go new file mode 100644 index 0000000000000..04c81413c5198 --- /dev/null +++ b/vendor/google.golang.org/api/transport/http/dial_appengine.go @@ -0,0 +1,30 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build appengine + +package http + +import ( + "context" + "net/http" + + "google.golang.org/appengine/urlfetch" +) + +func init() { + appengineUrlfetchHook = func(ctx context.Context) http.RoundTripper { + return &urlfetch.Transport{Context: ctx} + } +} diff --git a/vendor/google.golang.org/api/transport/http/internal/propagation/BUILD.bazel b/vendor/google.golang.org/api/transport/http/internal/propagation/BUILD.bazel new file mode 100644 index 0000000000000..3904efcc80898 --- /dev/null +++ b/vendor/google.golang.org/api/transport/http/internal/propagation/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["http.go"], + importmap = "k8s.io/kops/vendor/google.golang.org/api/transport/http/internal/propagation", + importpath = "google.golang.org/api/transport/http/internal/propagation", + visibility = ["//vendor/google.golang.org/api/transport/http:__subpackages__"], + deps = [ + "//vendor/go.opencensus.io/trace:go_default_library", + "//vendor/go.opencensus.io/trace/propagation:go_default_library", + ], +) diff --git a/vendor/google.golang.org/api/transport/http/internal/propagation/http.go b/vendor/google.golang.org/api/transport/http/internal/propagation/http.go new file mode 100644 index 0000000000000..24b4f0d291561 --- /dev/null +++ b/vendor/google.golang.org/api/transport/http/internal/propagation/http.go @@ -0,0 +1,96 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.8 + +// Package propagation implements X-Cloud-Trace-Context header propagation used +// by Google Cloud products. +package propagation + +import ( + "encoding/binary" + "encoding/hex" + "fmt" + "net/http" + "strconv" + "strings" + + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" +) + +const ( + httpHeaderMaxSize = 200 + httpHeader = `X-Cloud-Trace-Context` +) + +var _ propagation.HTTPFormat = (*HTTPFormat)(nil) + +// HTTPFormat implements propagation.HTTPFormat to propagate +// traces in HTTP headers for Google Cloud Platform and Stackdriver Trace. +type HTTPFormat struct{} + +// SpanContextFromRequest extracts a Stackdriver Trace span context from incoming requests. +func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) { + h := req.Header.Get(httpHeader) + // See https://cloud.google.com/trace/docs/faq for the header HTTPFormat. + // Return if the header is empty or missing, or if the header is unreasonably + // large, to avoid making unnecessary copies of a large string. + if h == "" || len(h) > httpHeaderMaxSize { + return trace.SpanContext{}, false + } + + // Parse the trace id field. + slash := strings.Index(h, `/`) + if slash == -1 { + return trace.SpanContext{}, false + } + tid, h := h[:slash], h[slash+1:] + + buf, err := hex.DecodeString(tid) + if err != nil { + return trace.SpanContext{}, false + } + copy(sc.TraceID[:], buf) + + // Parse the span id field. + spanstr := h + semicolon := strings.Index(h, `;`) + if semicolon != -1 { + spanstr, h = h[:semicolon], h[semicolon+1:] + } + sid, err := strconv.ParseUint(spanstr, 10, 64) + if err != nil { + return trace.SpanContext{}, false + } + binary.BigEndian.PutUint64(sc.SpanID[:], sid) + + // Parse the options field, options field is optional. + if !strings.HasPrefix(h, "o=") { + return sc, true + } + o, err := strconv.ParseUint(h[2:], 10, 64) + if err != nil { + return trace.SpanContext{}, false + } + sc.TraceOptions = trace.TraceOptions(o) + return sc, true +} + +// SpanContextToRequest modifies the given request to include a Stackdriver Trace header. +func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) { + sid := binary.BigEndian.Uint64(sc.SpanID[:]) + header := fmt.Sprintf("%s/%d;o=%d", hex.EncodeToString(sc.TraceID[:]), sid, int64(sc.TraceOptions)) + req.Header.Set(httpHeader, header) +} diff --git a/vendor/k8s.io/kubernetes/LICENSE b/vendor/google.golang.org/genproto/LICENSE similarity index 100% rename from vendor/k8s.io/kubernetes/LICENSE rename to vendor/google.golang.org/genproto/LICENSE diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/BUILD.bazel b/vendor/google.golang.org/genproto/googleapis/rpc/status/BUILD.bazel new file mode 100644 index 0000000000000..73b0a240356a7 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["status.pb.go"], + importmap = "k8s.io/kops/vendor/google.golang.org/genproto/googleapis/rpc/status", + importpath = "google.golang.org/genproto/googleapis/rpc/status", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/golang/protobuf/proto:go_default_library", + "//vendor/github.com/golang/protobuf/ptypes/any:go_default_library", + ], +) diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go new file mode 100644 index 0000000000000..57ae35f6b5281 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -0,0 +1,159 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/rpc/status.proto + +package status // import "google.golang.org/genproto/googleapis/rpc/status" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import any "github.com/golang/protobuf/ptypes/any" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The `Status` type defines a logical error model that is suitable for +// different programming environments, including REST APIs and RPC APIs. It is +// used by [gRPC](https://github.com/grpc). The error model is designed to be: +// +// - Simple to use and understand for most users +// - Flexible enough to meet unexpected needs +// +// # Overview +// +// The `Status` message contains three pieces of data: error code, error +// message, and error details. The error code should be an enum value of +// [google.rpc.Code][google.rpc.Code], but it may accept additional error codes +// if needed. The error message should be a developer-facing English message +// that helps developers *understand* and *resolve* the error. If a localized +// user-facing error message is needed, put the localized message in the error +// details or localize it in the client. The optional error details may contain +// arbitrary information about the error. There is a predefined set of error +// detail types in the package `google.rpc` that can be used for common error +// conditions. +// +// # Language mapping +// +// The `Status` message is the logical representation of the error model, but it +// is not necessarily the actual wire format. When the `Status` message is +// exposed in different client libraries and different wire protocols, it can be +// mapped differently. For example, it will likely be mapped to some exceptions +// in Java, but more likely mapped to some error codes in C. +// +// # Other uses +// +// The error model and the `Status` message can be used in a variety of +// environments, either with or without APIs, to provide a +// consistent developer experience across different environments. +// +// Example uses of this error model include: +// +// - Partial errors. If a service needs to return partial errors to the client, +// it may embed the `Status` in the normal response to indicate the partial +// errors. +// +// - Workflow errors. A typical workflow has multiple steps. Each step may +// have a `Status` message for error reporting. +// +// - Batch operations. If a client uses batch request and batch response, the +// `Status` message should be used directly inside batch response, one for +// each error sub-response. +// +// - Asynchronous operations. If an API call embeds asynchronous operation +// results in its response, the status of those operations should be +// represented directly using the `Status` message. +// +// - Logging. If some API errors are stored in logs, the message `Status` could +// be used directly after any stripping needed for security/privacy reasons. +type Status struct { + // The status code, which should be an enum value of + // [google.rpc.Code][google.rpc.Code]. + Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + // A developer-facing error message, which should be in English. Any + // user-facing error message should be localized and sent in the + // [google.rpc.Status.details][google.rpc.Status.details] field, or localized + // by the client. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + // A list of messages that carry the error details. There is a common set of + // message types for APIs to use. + Details []*any.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Status) Reset() { *m = Status{} } +func (m *Status) String() string { return proto.CompactTextString(m) } +func (*Status) ProtoMessage() {} +func (*Status) Descriptor() ([]byte, []int) { + return fileDescriptor_status_ced6ddf76350620b, []int{0} +} +func (m *Status) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Status.Unmarshal(m, b) +} +func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Status.Marshal(b, m, deterministic) +} +func (dst *Status) XXX_Merge(src proto.Message) { + xxx_messageInfo_Status.Merge(dst, src) +} +func (m *Status) XXX_Size() int { + return xxx_messageInfo_Status.Size(m) +} +func (m *Status) XXX_DiscardUnknown() { + xxx_messageInfo_Status.DiscardUnknown(m) +} + +var xxx_messageInfo_Status proto.InternalMessageInfo + +func (m *Status) GetCode() int32 { + if m != nil { + return m.Code + } + return 0 +} + +func (m *Status) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +func (m *Status) GetDetails() []*any.Any { + if m != nil { + return m.Details + } + return nil +} + +func init() { + proto.RegisterType((*Status)(nil), "google.rpc.Status") +} + +func init() { proto.RegisterFile("google/rpc/status.proto", fileDescriptor_status_ced6ddf76350620b) } + +var fileDescriptor_status_ced6ddf76350620b = []byte{ + // 209 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4f, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x2a, 0x48, 0xd6, 0x2f, 0x2e, 0x49, 0x2c, 0x29, 0x2d, 0xd6, 0x2b, 0x28, + 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0x48, 0xe8, 0x15, 0x15, 0x24, 0x4b, 0x49, 0x42, 0x15, 0x81, + 0x65, 0x92, 0x4a, 0xd3, 0xf4, 0x13, 0xf3, 0x2a, 0x21, 0xca, 0x94, 0xd2, 0xb8, 0xd8, 0x82, 0xc1, + 0xda, 0x84, 0x84, 0xb8, 0x58, 0x92, 0xf3, 0x53, 0x52, 0x25, 0x18, 0x15, 0x18, 0x35, 0x58, 0x83, + 0xc0, 0x6c, 0x21, 0x09, 0x2e, 0xf6, 0xdc, 0xd4, 0xe2, 0xe2, 0xc4, 0xf4, 0x54, 0x09, 0x26, 0x05, + 0x46, 0x0d, 0xce, 0x20, 0x18, 0x57, 0x48, 0x8f, 0x8b, 0x3d, 0x25, 0xb5, 0x24, 0x31, 0x33, 0xa7, + 0x58, 0x82, 0x59, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x44, 0x0f, 0x6a, 0x21, 0xcc, 0x12, 0x3d, 0xc7, + 0xbc, 0xca, 0x20, 0x98, 0x22, 0xa7, 0x38, 0x2e, 0xbe, 0xe4, 0xfc, 0x5c, 0x3d, 0x84, 0xa3, 0x9c, + 0xb8, 0x21, 0xf6, 0x06, 0x80, 0x94, 0x07, 0x30, 0x46, 0x99, 0x43, 0xa5, 0xd2, 0xf3, 0x73, 0x12, + 0xf3, 0xd2, 0xf5, 0xf2, 0x8b, 0xd2, 0xf5, 0xd3, 0x53, 0xf3, 0xc0, 0x86, 0xe9, 0x43, 0xa4, 0x12, + 0x0b, 0x32, 0x8b, 0x91, 0xfc, 0x69, 0x0d, 0xa1, 0x16, 0x31, 0x31, 0x07, 0x05, 0x38, 0x27, 0xb1, + 0x81, 0x55, 0x1a, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0xa4, 0x53, 0xf0, 0x7c, 0x10, 0x01, 0x00, + 0x00, +} diff --git a/vendor/google.golang.org/grpc/.travis.yml b/vendor/google.golang.org/grpc/.travis.yml new file mode 100644 index 0000000000000..024408e64625d --- /dev/null +++ b/vendor/google.golang.org/grpc/.travis.yml @@ -0,0 +1,39 @@ +language: go + +matrix: + include: + - go: 1.12.x + env: VET=1 GO111MODULE=on + - go: 1.12.x + env: RACE=1 GO111MODULE=on + - go: 1.12.x + env: RUN386=1 + - go: 1.12.x + env: GRPC_GO_RETRY=on + - go: 1.11.x + env: GO111MODULE=on + - go: 1.10.x + - go: 1.9.x + - go: 1.9.x + env: GAE=1 + +go_import_path: google.golang.org/grpc + +before_install: + - if [[ "${GO111MODULE}" = "on" ]]; then mkdir "${HOME}/go"; export GOPATH="${HOME}/go"; fi + - if [[ -n "${RUN386}" ]]; then export GOARCH=386; fi + - if [[ "${TRAVIS_EVENT_TYPE}" = "cron" && -z "${RUN386}" ]]; then RACE=1; fi + - if [[ "${TRAVIS_EVENT_TYPE}" != "cron" ]]; then VET_SKIP_PROTO=1; fi + +install: + - try3() { eval "$*" || eval "$*" || eval "$*"; } + - try3 'if [[ "${GO111MODULE}" = "on" ]]; then go mod download; else make testdeps; fi' + - if [[ "${GAE}" = 1 ]]; then source ./install_gae.sh; make testappenginedeps; fi + - if [[ "${VET}" = 1 ]]; then ./vet.sh -install; fi + +script: + - set -e + - if [[ "${VET}" = 1 ]]; then ./vet.sh; fi + - if [[ "${GAE}" = 1 ]]; then make testappengine; exit 0; fi + - if [[ "${RACE}" = 1 ]]; then make testrace; exit 0; fi + - make test diff --git a/vendor/google.golang.org/grpc/AUTHORS b/vendor/google.golang.org/grpc/AUTHORS new file mode 100644 index 0000000000000..e491a9e7f7831 --- /dev/null +++ b/vendor/google.golang.org/grpc/AUTHORS @@ -0,0 +1 @@ +Google Inc. diff --git a/vendor/google.golang.org/grpc/BUILD.bazel b/vendor/google.golang.org/grpc/BUILD.bazel new file mode 100644 index 0000000000000..dbcb331cea759 --- /dev/null +++ b/vendor/google.golang.org/grpc/BUILD.bazel @@ -0,0 +1,62 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "backoff.go", + "balancer.go", + "balancer_conn_wrappers.go", + "balancer_v1_wrapper.go", + "call.go", + "clientconn.go", + "codec.go", + "dialoptions.go", + "doc.go", + "interceptor.go", + "picker_wrapper.go", + "pickfirst.go", + "preloader.go", + "proxy.go", + "resolver_conn_wrapper.go", + "rpc_util.go", + "server.go", + "service_config.go", + "stream.go", + "trace.go", + "version.go", + ], + importmap = "k8s.io/kops/vendor/google.golang.org/grpc", + importpath = "google.golang.org/grpc", + visibility = ["//visibility:public"], + deps = [ + "//vendor/golang.org/x/net/trace:go_default_library", + "//vendor/google.golang.org/grpc/balancer:go_default_library", + "//vendor/google.golang.org/grpc/balancer/roundrobin:go_default_library", + "//vendor/google.golang.org/grpc/codes:go_default_library", + "//vendor/google.golang.org/grpc/connectivity:go_default_library", + "//vendor/google.golang.org/grpc/credentials:go_default_library", + "//vendor/google.golang.org/grpc/encoding:go_default_library", + "//vendor/google.golang.org/grpc/encoding/proto:go_default_library", + "//vendor/google.golang.org/grpc/grpclog:go_default_library", + "//vendor/google.golang.org/grpc/internal:go_default_library", + "//vendor/google.golang.org/grpc/internal/backoff:go_default_library", + "//vendor/google.golang.org/grpc/internal/balancerload:go_default_library", + "//vendor/google.golang.org/grpc/internal/binarylog:go_default_library", + "//vendor/google.golang.org/grpc/internal/channelz:go_default_library", + "//vendor/google.golang.org/grpc/internal/envconfig:go_default_library", + "//vendor/google.golang.org/grpc/internal/grpcrand:go_default_library", + "//vendor/google.golang.org/grpc/internal/grpcsync:go_default_library", + "//vendor/google.golang.org/grpc/internal/transport:go_default_library", + "//vendor/google.golang.org/grpc/keepalive:go_default_library", + "//vendor/google.golang.org/grpc/metadata:go_default_library", + "//vendor/google.golang.org/grpc/naming:go_default_library", + "//vendor/google.golang.org/grpc/peer:go_default_library", + "//vendor/google.golang.org/grpc/resolver:go_default_library", + "//vendor/google.golang.org/grpc/resolver/dns:go_default_library", + "//vendor/google.golang.org/grpc/resolver/passthrough:go_default_library", + "//vendor/google.golang.org/grpc/serviceconfig:go_default_library", + "//vendor/google.golang.org/grpc/stats:go_default_library", + "//vendor/google.golang.org/grpc/status:go_default_library", + "//vendor/google.golang.org/grpc/tap:go_default_library", + ], +) diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md new file mode 100644 index 0000000000000..6e69b28c27060 --- /dev/null +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -0,0 +1,60 @@ +# How to contribute + +We definitely welcome your patches and contributions to gRPC! + +If you are new to github, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/) + +## Legal requirements + +In order to protect both you and ourselves, you will need to sign the +[Contributor License Agreement](https://identity.linuxfoundation.org/projects/cncf). + +## Guidelines for Pull Requests +How to get your contributions merged smoothly and quickly. + +- Create **small PRs** that are narrowly focused on **addressing a single + concern**. We often times receive PRs that are trying to fix several things at + a time, but only one fix is considered acceptable, nothing gets merged and + both author's & review's time is wasted. Create more PRs to address different + concerns and everyone will be happy. + +- The grpc package should only depend on standard Go packages and a small number + of exceptions. If your contribution introduces new dependencies which are NOT + in the [list](https://godoc.org/google.golang.org/grpc?imports), you need a + discussion with gRPC-Go authors and consultants. + +- For speculative changes, consider opening an issue and discussing it first. If + you are suggesting a behavioral or API change, consider starting with a [gRFC + proposal](https://github.com/grpc/proposal). + +- Provide a good **PR description** as a record of **what** change is being made + and **why** it was made. Link to a github issue if it exists. + +- Don't fix code style and formatting unless you are already changing that line + to address an issue. PRs with irrelevant changes won't be merged. If you do + want to fix formatting or style, do that in a separate PR. + +- Unless your PR is trivial, you should expect there will be reviewer comments + that you'll need to address before merging. We expect you to be reasonably + responsive to those comments, otherwise the PR will be closed after 2-3 weeks + of inactivity. + +- Maintain **clean commit history** and use **meaningful commit messages**. PRs + with messy commit history are difficult to review and won't be merged. Use + `rebase -i upstream/master` to curate your commit history and/or to bring in + latest changes from master (but avoid rebasing in the middle of a code + review). + +- Keep your PR up to date with upstream/master (if there are merge conflicts, we + can't really merge your change). + +- **All tests need to be passing** before your change can be merged. We + recommend you **run tests locally** before creating your PR to catch breakages + early on. + - `make all` to test everything, OR + - `make vet` to catch vet errors + - `make test` to run the tests + - `make testrace` to run tests in race mode + - optional `make testappengine` to run tests with appengine + +- Exceptions to the rules can be made if there's a compelling reason for doing so. diff --git a/vendor/google.golang.org/grpc/LICENSE b/vendor/google.golang.org/grpc/LICENSE new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/vendor/google.golang.org/grpc/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile new file mode 100644 index 0000000000000..db982aabde6f7 --- /dev/null +++ b/vendor/google.golang.org/grpc/Makefile @@ -0,0 +1,60 @@ +all: vet test testrace + +build: deps + go build google.golang.org/grpc/... + +clean: + go clean -i google.golang.org/grpc/... + +deps: + go get -d -v google.golang.org/grpc/... + +proto: + @ if ! which protoc > /dev/null; then \ + echo "error: protoc not installed" >&2; \ + exit 1; \ + fi + go generate google.golang.org/grpc/... + +test: testdeps + go test -cpu 1,4 -timeout 7m google.golang.org/grpc/... + +testappengine: testappenginedeps + goapp test -cpu 1,4 -timeout 7m google.golang.org/grpc/... + +testappenginedeps: + goapp get -d -v -t -tags 'appengine appenginevm' google.golang.org/grpc/... + +testdeps: + go get -d -v -t google.golang.org/grpc/... + +testrace: testdeps + go test -race -cpu 1,4 -timeout 7m google.golang.org/grpc/... + +updatedeps: + go get -d -v -u -f google.golang.org/grpc/... + +updatetestdeps: + go get -d -v -t -u -f google.golang.org/grpc/... + +vet: vetdeps + ./vet.sh + +vetdeps: + ./vet.sh -install + +.PHONY: \ + all \ + build \ + clean \ + deps \ + proto \ + test \ + testappengine \ + testappenginedeps \ + testdeps \ + testrace \ + updatedeps \ + updatetestdeps \ + vet \ + vetdeps diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md new file mode 100644 index 0000000000000..afbc43db5105e --- /dev/null +++ b/vendor/google.golang.org/grpc/README.md @@ -0,0 +1,121 @@ +# gRPC-Go + +[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) +[![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc) +[![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go) + +The Go implementation of [gRPC](https://grpc.io/): A high performance, open +source, general RPC framework that puts mobile and HTTP/2 first. For more +information see the [gRPC Quick Start: +Go](https://grpc.io/docs/quickstart/go.html) guide. + +Installation +------------ + +To install this package, you need to install Go and setup your Go workspace on +your computer. The simplest way to install the library is to run: + +``` +$ go get -u google.golang.org/grpc +``` + +With Go module support (Go 1.11+), simply `import "google.golang.org/grpc"` in +your source code and `go [build|run|test]` will automatically download the +necessary dependencies ([Go modules +ref](https://github.com/golang/go/wiki/Modules)). + +If you are trying to access grpc-go from within China, please see the +[FAQ](#FAQ) below. + +Prerequisites +------------- +gRPC-Go requires Go 1.9 or later. + +Documentation +------------- +- See [godoc](https://godoc.org/google.golang.org/grpc) for package and API + descriptions. +- Documentation on specific topics can be found in the [Documentation + directory](Documentation/). +- Examples can be found in the [examples directory](examples/). + +Performance +----------- +Performance benchmark data for grpc-go and other languages is maintained in +[this +dashboard](https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5652536396611584&widget=490377658&container=1286539696). + +Status +------ +General Availability [Google Cloud Platform Launch +Stages](https://cloud.google.com/terms/launch-stages). + +FAQ +--- + +#### I/O Timeout Errors + +The `golang.org` domain may be blocked from some countries. `go get` usually +produces an error like the following when this happens: + +``` +$ go get -u google.golang.org/grpc +package google.golang.org/grpc: unrecognized import path "google.golang.org/grpc" (https fetch: Get https://google.golang.org/grpc?go-get=1: dial tcp 216.239.37.1:443: i/o timeout) +``` + +To build Go code, there are several options: + +- Set up a VPN and access google.golang.org through that. + +- Without Go module support: `git clone` the repo manually: + + ``` + git clone https://github.com/grpc/grpc-go.git $GOPATH/src/google.golang.org/grpc + ``` + + You will need to do the same for all of grpc's dependencies in `golang.org`, + e.g. `golang.org/x/net`. + +- With Go module support: it is possible to use the `replace` feature of `go + mod` to create aliases for golang.org packages. In your project's directory: + + ``` + go mod edit -replace=google.golang.org/grpc=github.com/grpc/grpc-go@latest + go mod tidy + go mod vendor + go build -mod=vendor + ``` + + Again, this will need to be done for all transitive dependencies hosted on + golang.org as well. Please refer to [this + issue](https://github.com/golang/go/issues/28652) in the golang repo regarding + this concern. + +#### Compiling error, undefined: grpc.SupportPackageIsVersion + +Please update proto package, gRPC package and rebuild the proto files: + - `go get -u github.com/golang/protobuf/{proto,protoc-gen-go}` + - `go get -u google.golang.org/grpc` + - `protoc --go_out=plugins=grpc:. *.proto` + +#### How to turn on logging + +The default logger is controlled by the environment variables. Turn everything +on by setting: + +``` +GRPC_GO_LOG_VERBOSITY_LEVEL=99 GRPC_GO_LOG_SEVERITY_LEVEL=info +``` + +#### The RPC failed with error `"code = Unavailable desc = transport is closing"` + +This error means the connection the RPC is using was closed, and there are many +possible reasons, including: + 1. mis-configured transport credentials, connection failed on handshaking + 1. bytes disrupted, possibly by a proxy in between + 1. server shutdown + +It can be tricky to debug this because the error happens on the client side but +the root cause of the connection being closed is on the server side. Turn on +logging on __both client and server__, and see if there are any transport +errors. diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go new file mode 100644 index 0000000000000..97c6e2568f452 --- /dev/null +++ b/vendor/google.golang.org/grpc/backoff.go @@ -0,0 +1,38 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// See internal/backoff package for the backoff implementation. This file is +// kept for the exported types and API backward compatibility. + +package grpc + +import ( + "time" +) + +// DefaultBackoffConfig uses values specified for backoff in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +var DefaultBackoffConfig = BackoffConfig{ + MaxDelay: 120 * time.Second, +} + +// BackoffConfig defines the parameters for the default gRPC backoff strategy. +type BackoffConfig struct { + // MaxDelay is the upper bound of backoff delay. + MaxDelay time.Duration +} diff --git a/vendor/google.golang.org/grpc/balancer.go b/vendor/google.golang.org/grpc/balancer.go new file mode 100644 index 0000000000000..a8eb0f4760919 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer.go @@ -0,0 +1,391 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "net" + "sync" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/naming" + "google.golang.org/grpc/status" +) + +// Address represents a server the client connects to. +// +// Deprecated: please use package balancer. +type Address struct { + // Addr is the server address on which a connection will be established. + Addr string + // Metadata is the information associated with Addr, which may be used + // to make load balancing decision. + Metadata interface{} +} + +// BalancerConfig specifies the configurations for Balancer. +// +// Deprecated: please use package balancer. May be removed in a future 1.x release. +type BalancerConfig struct { + // DialCreds is the transport credential the Balancer implementation can + // use to dial to a remote load balancer server. The Balancer implementations + // can ignore this if it does not need to talk to another party securely. + DialCreds credentials.TransportCredentials + // Dialer is the custom dialer the Balancer implementation can use to dial + // to a remote load balancer server. The Balancer implementations + // can ignore this if it doesn't need to talk to remote balancer. + Dialer func(context.Context, string) (net.Conn, error) +} + +// BalancerGetOptions configures a Get call. +// +// Deprecated: please use package balancer. May be removed in a future 1.x release. +type BalancerGetOptions struct { + // BlockingWait specifies whether Get should block when there is no + // connected address. + BlockingWait bool +} + +// Balancer chooses network addresses for RPCs. +// +// Deprecated: please use package balancer. May be removed in a future 1.x release. +type Balancer interface { + // Start does the initialization work to bootstrap a Balancer. For example, + // this function may start the name resolution and watch the updates. It will + // be called when dialing. + Start(target string, config BalancerConfig) error + // Up informs the Balancer that gRPC has a connection to the server at + // addr. It returns down which is called once the connection to addr gets + // lost or closed. + // TODO: It is not clear how to construct and take advantage of the meaningful error + // parameter for down. Need realistic demands to guide. + Up(addr Address) (down func(error)) + // Get gets the address of a server for the RPC corresponding to ctx. + // i) If it returns a connected address, gRPC internals issues the RPC on the + // connection to this address; + // ii) If it returns an address on which the connection is under construction + // (initiated by Notify(...)) but not connected, gRPC internals + // * fails RPC if the RPC is fail-fast and connection is in the TransientFailure or + // Shutdown state; + // or + // * issues RPC on the connection otherwise. + // iii) If it returns an address on which the connection does not exist, gRPC + // internals treats it as an error and will fail the corresponding RPC. + // + // Therefore, the following is the recommended rule when writing a custom Balancer. + // If opts.BlockingWait is true, it should return a connected address or + // block if there is no connected address. It should respect the timeout or + // cancellation of ctx when blocking. If opts.BlockingWait is false (for fail-fast + // RPCs), it should return an address it has notified via Notify(...) immediately + // instead of blocking. + // + // The function returns put which is called once the rpc has completed or failed. + // put can collect and report RPC stats to a remote load balancer. + // + // This function should only return the errors Balancer cannot recover by itself. + // gRPC internals will fail the RPC if an error is returned. + Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) + // Notify returns a channel that is used by gRPC internals to watch the addresses + // gRPC needs to connect. The addresses might be from a name resolver or remote + // load balancer. gRPC internals will compare it with the existing connected + // addresses. If the address Balancer notified is not in the existing connected + // addresses, gRPC starts to connect the address. If an address in the existing + // connected addresses is not in the notification list, the corresponding connection + // is shutdown gracefully. Otherwise, there are no operations to take. Note that + // the Address slice must be the full list of the Addresses which should be connected. + // It is NOT delta. + Notify() <-chan []Address + // Close shuts down the balancer. + Close() error +} + +// RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch +// the name resolution updates and updates the addresses available correspondingly. +// +// Deprecated: please use package balancer/roundrobin. May be removed in a future 1.x release. +func RoundRobin(r naming.Resolver) Balancer { + return &roundRobin{r: r} +} + +type addrInfo struct { + addr Address + connected bool +} + +type roundRobin struct { + r naming.Resolver + w naming.Watcher + addrs []*addrInfo // all the addresses the client should potentially connect + mu sync.Mutex + addrCh chan []Address // the channel to notify gRPC internals the list of addresses the client should connect to. + next int // index of the next address to return for Get() + waitCh chan struct{} // the channel to block when there is no connected address available + done bool // The Balancer is closed. +} + +func (rr *roundRobin) watchAddrUpdates() error { + updates, err := rr.w.Next() + if err != nil { + grpclog.Warningf("grpc: the naming watcher stops working due to %v.", err) + return err + } + rr.mu.Lock() + defer rr.mu.Unlock() + for _, update := range updates { + addr := Address{ + Addr: update.Addr, + Metadata: update.Metadata, + } + switch update.Op { + case naming.Add: + var exist bool + for _, v := range rr.addrs { + if addr == v.addr { + exist = true + grpclog.Infoln("grpc: The name resolver wanted to add an existing address: ", addr) + break + } + } + if exist { + continue + } + rr.addrs = append(rr.addrs, &addrInfo{addr: addr}) + case naming.Delete: + for i, v := range rr.addrs { + if addr == v.addr { + copy(rr.addrs[i:], rr.addrs[i+1:]) + rr.addrs = rr.addrs[:len(rr.addrs)-1] + break + } + } + default: + grpclog.Errorln("Unknown update.Op ", update.Op) + } + } + // Make a copy of rr.addrs and write it onto rr.addrCh so that gRPC internals gets notified. + open := make([]Address, len(rr.addrs)) + for i, v := range rr.addrs { + open[i] = v.addr + } + if rr.done { + return ErrClientConnClosing + } + select { + case <-rr.addrCh: + default: + } + rr.addrCh <- open + return nil +} + +func (rr *roundRobin) Start(target string, config BalancerConfig) error { + rr.mu.Lock() + defer rr.mu.Unlock() + if rr.done { + return ErrClientConnClosing + } + if rr.r == nil { + // If there is no name resolver installed, it is not needed to + // do name resolution. In this case, target is added into rr.addrs + // as the only address available and rr.addrCh stays nil. + rr.addrs = append(rr.addrs, &addrInfo{addr: Address{Addr: target}}) + return nil + } + w, err := rr.r.Resolve(target) + if err != nil { + return err + } + rr.w = w + rr.addrCh = make(chan []Address, 1) + go func() { + for { + if err := rr.watchAddrUpdates(); err != nil { + return + } + } + }() + return nil +} + +// Up sets the connected state of addr and sends notification if there are pending +// Get() calls. +func (rr *roundRobin) Up(addr Address) func(error) { + rr.mu.Lock() + defer rr.mu.Unlock() + var cnt int + for _, a := range rr.addrs { + if a.addr == addr { + if a.connected { + return nil + } + a.connected = true + } + if a.connected { + cnt++ + } + } + // addr is only one which is connected. Notify the Get() callers who are blocking. + if cnt == 1 && rr.waitCh != nil { + close(rr.waitCh) + rr.waitCh = nil + } + return func(err error) { + rr.down(addr, err) + } +} + +// down unsets the connected state of addr. +func (rr *roundRobin) down(addr Address, err error) { + rr.mu.Lock() + defer rr.mu.Unlock() + for _, a := range rr.addrs { + if addr == a.addr { + a.connected = false + break + } + } +} + +// Get returns the next addr in the rotation. +func (rr *roundRobin) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) { + var ch chan struct{} + rr.mu.Lock() + if rr.done { + rr.mu.Unlock() + err = ErrClientConnClosing + return + } + + if len(rr.addrs) > 0 { + if rr.next >= len(rr.addrs) { + rr.next = 0 + } + next := rr.next + for { + a := rr.addrs[next] + next = (next + 1) % len(rr.addrs) + if a.connected { + addr = a.addr + rr.next = next + rr.mu.Unlock() + return + } + if next == rr.next { + // Has iterated all the possible address but none is connected. + break + } + } + } + if !opts.BlockingWait { + if len(rr.addrs) == 0 { + rr.mu.Unlock() + err = status.Errorf(codes.Unavailable, "there is no address available") + return + } + // Returns the next addr on rr.addrs for failfast RPCs. + addr = rr.addrs[rr.next].addr + rr.next++ + rr.mu.Unlock() + return + } + // Wait on rr.waitCh for non-failfast RPCs. + if rr.waitCh == nil { + ch = make(chan struct{}) + rr.waitCh = ch + } else { + ch = rr.waitCh + } + rr.mu.Unlock() + for { + select { + case <-ctx.Done(): + err = ctx.Err() + return + case <-ch: + rr.mu.Lock() + if rr.done { + rr.mu.Unlock() + err = ErrClientConnClosing + return + } + + if len(rr.addrs) > 0 { + if rr.next >= len(rr.addrs) { + rr.next = 0 + } + next := rr.next + for { + a := rr.addrs[next] + next = (next + 1) % len(rr.addrs) + if a.connected { + addr = a.addr + rr.next = next + rr.mu.Unlock() + return + } + if next == rr.next { + // Has iterated all the possible address but none is connected. + break + } + } + } + // The newly added addr got removed by Down() again. + if rr.waitCh == nil { + ch = make(chan struct{}) + rr.waitCh = ch + } else { + ch = rr.waitCh + } + rr.mu.Unlock() + } + } +} + +func (rr *roundRobin) Notify() <-chan []Address { + return rr.addrCh +} + +func (rr *roundRobin) Close() error { + rr.mu.Lock() + defer rr.mu.Unlock() + if rr.done { + return errBalancerClosed + } + rr.done = true + if rr.w != nil { + rr.w.Close() + } + if rr.waitCh != nil { + close(rr.waitCh) + rr.waitCh = nil + } + if rr.addrCh != nil { + close(rr.addrCh) + } + return nil +} + +// pickFirst is used to test multi-addresses in one addrConn in which all addresses share the same addrConn. +// It is a wrapper around roundRobin balancer. The logic of all methods works fine because balancer.Get() +// returns the only address Up by resetTransport(). +type pickFirst struct { + *roundRobin +} diff --git a/vendor/google.golang.org/grpc/balancer/BUILD.bazel b/vendor/google.golang.org/grpc/balancer/BUILD.bazel new file mode 100644 index 0000000000000..2cbd2b0786dc6 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/BUILD.bazel @@ -0,0 +1,17 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["balancer.go"], + importmap = "k8s.io/kops/vendor/google.golang.org/grpc/balancer", + importpath = "google.golang.org/grpc/balancer", + visibility = ["//visibility:public"], + deps = [ + "//vendor/google.golang.org/grpc/connectivity:go_default_library", + "//vendor/google.golang.org/grpc/credentials:go_default_library", + "//vendor/google.golang.org/grpc/internal:go_default_library", + "//vendor/google.golang.org/grpc/metadata:go_default_library", + "//vendor/google.golang.org/grpc/resolver:go_default_library", + "//vendor/google.golang.org/grpc/serviceconfig:go_default_library", + ], +) diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go new file mode 100644 index 0000000000000..c266f4ec102c7 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -0,0 +1,364 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package balancer defines APIs for load balancing in gRPC. +// All APIs in this package are experimental. +package balancer + +import ( + "context" + "encoding/json" + "errors" + "net" + "strings" + + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +var ( + // m is a map from name to balancer builder. + m = make(map[string]Builder) +) + +// Register registers the balancer builder to the balancer map. b.Name +// (lowercased) will be used as the name registered with this builder. If the +// Builder implements ConfigParser, ParseConfig will be called when new service +// configs are received by the resolver, and the result will be provided to the +// Balancer in UpdateClientConnState. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Balancers are +// registered with the same name, the one registered last will take effect. +func Register(b Builder) { + m[strings.ToLower(b.Name())] = b +} + +// unregisterForTesting deletes the balancer with the given name from the +// balancer map. +// +// This function is not thread-safe. +func unregisterForTesting(name string) { + delete(m, name) +} + +func init() { + internal.BalancerUnregister = unregisterForTesting +} + +// Get returns the resolver builder registered with the given name. +// Note that the compare is done in a case-insensitive fashion. +// If no builder is register with the name, nil will be returned. +func Get(name string) Builder { + if b, ok := m[strings.ToLower(name)]; ok { + return b + } + return nil +} + +// SubConn represents a gRPC sub connection. +// Each sub connection contains a list of addresses. gRPC will +// try to connect to them (in sequence), and stop trying the +// remainder once one connection is successful. +// +// The reconnect backoff will be applied on the list, not a single address. +// For example, try_on_all_addresses -> backoff -> try_on_all_addresses. +// +// All SubConns start in IDLE, and will not try to connect. To trigger +// the connecting, Balancers must call Connect. +// When the connection encounters an error, it will reconnect immediately. +// When the connection becomes IDLE, it will not reconnect unless Connect is +// called. +// +// This interface is to be implemented by gRPC. Users should not need a +// brand new implementation of this interface. For the situations like +// testing, the new implementation should embed this interface. This allows +// gRPC to add new methods to this interface. +type SubConn interface { + // UpdateAddresses updates the addresses used in this SubConn. + // gRPC checks if currently-connected address is still in the new list. + // If it's in the list, the connection will be kept. + // If it's not in the list, the connection will gracefully closed, and + // a new connection will be created. + // + // This will trigger a state transition for the SubConn. + UpdateAddresses([]resolver.Address) + // Connect starts the connecting for this SubConn. + Connect() +} + +// NewSubConnOptions contains options to create new SubConn. +type NewSubConnOptions struct { + // CredsBundle is the credentials bundle that will be used in the created + // SubConn. If it's nil, the original creds from grpc DialOptions will be + // used. + CredsBundle credentials.Bundle + // HealthCheckEnabled indicates whether health check service should be + // enabled on this SubConn + HealthCheckEnabled bool +} + +// ClientConn represents a gRPC ClientConn. +// +// This interface is to be implemented by gRPC. Users should not need a +// brand new implementation of this interface. For the situations like +// testing, the new implementation should embed this interface. This allows +// gRPC to add new methods to this interface. +type ClientConn interface { + // NewSubConn is called by balancer to create a new SubConn. + // It doesn't block and wait for the connections to be established. + // Behaviors of the SubConn can be controlled by options. + NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error) + // RemoveSubConn removes the SubConn from ClientConn. + // The SubConn will be shutdown. + RemoveSubConn(SubConn) + + // UpdateBalancerState is called by balancer to notify gRPC that some internal + // state in balancer has changed. + // + // gRPC will update the connectivity state of the ClientConn, and will call pick + // on the new picker to pick new SubConn. + UpdateBalancerState(s connectivity.State, p Picker) + + // ResolveNow is called by balancer to notify gRPC to do a name resolving. + ResolveNow(resolver.ResolveNowOption) + + // Target returns the dial target for this ClientConn. + // + // Deprecated: Use the Target field in the BuildOptions instead. + Target() string +} + +// BuildOptions contains additional information for Build. +type BuildOptions struct { + // DialCreds is the transport credential the Balancer implementation can + // use to dial to a remote load balancer server. The Balancer implementations + // can ignore this if it does not need to talk to another party securely. + DialCreds credentials.TransportCredentials + // CredsBundle is the credentials bundle that the Balancer can use. + CredsBundle credentials.Bundle + // Dialer is the custom dialer the Balancer implementation can use to dial + // to a remote load balancer server. The Balancer implementations + // can ignore this if it doesn't need to talk to remote balancer. + Dialer func(context.Context, string) (net.Conn, error) + // ChannelzParentID is the entity parent's channelz unique identification number. + ChannelzParentID int64 + // Target contains the parsed address info of the dial target. It is the same resolver.Target as + // passed to the resolver. + // See the documentation for the resolver.Target type for details about what it contains. + Target resolver.Target +} + +// Builder creates a balancer. +type Builder interface { + // Build creates a new balancer with the ClientConn. + Build(cc ClientConn, opts BuildOptions) Balancer + // Name returns the name of balancers built by this builder. + // It will be used to pick balancers (for example in service config). + Name() string +} + +// ConfigParser parses load balancer configs. +type ConfigParser interface { + // ParseConfig parses the JSON load balancer config provided into an + // internal form or returns an error if the config is invalid. For future + // compatibility reasons, unknown fields in the config should be ignored. + ParseConfig(LoadBalancingConfigJSON json.RawMessage) (serviceconfig.LoadBalancingConfig, error) +} + +// PickOptions contains addition information for the Pick operation. +type PickOptions struct { + // FullMethodName is the method name that NewClientStream() is called + // with. The canonical format is /service/Method. + FullMethodName string +} + +// DoneInfo contains additional information for done. +type DoneInfo struct { + // Err is the rpc error the RPC finished with. It could be nil. + Err error + // Trailer contains the metadata from the RPC's trailer, if present. + Trailer metadata.MD + // BytesSent indicates if any bytes have been sent to the server. + BytesSent bool + // BytesReceived indicates if any byte has been received from the server. + BytesReceived bool + // ServerLoad is the load received from server. It's usually sent as part of + // trailing metadata. + // + // The only supported type now is *orca_v1.LoadReport. + ServerLoad interface{} +} + +var ( + // ErrNoSubConnAvailable indicates no SubConn is available for pick(). + // gRPC will block the RPC until a new picker is available via UpdateBalancerState(). + ErrNoSubConnAvailable = errors.New("no SubConn is available") + // ErrTransientFailure indicates all SubConns are in TransientFailure. + // WaitForReady RPCs will block, non-WaitForReady RPCs will fail. + ErrTransientFailure = errors.New("all SubConns are in TransientFailure") +) + +// Picker is used by gRPC to pick a SubConn to send an RPC. +// Balancer is expected to generate a new picker from its snapshot every time its +// internal state has changed. +// +// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState(). +type Picker interface { + // Pick returns the SubConn to be used to send the RPC. + // The returned SubConn must be one returned by NewSubConn(). + // + // This functions is expected to return: + // - a SubConn that is known to be READY; + // - ErrNoSubConnAvailable if no SubConn is available, but progress is being + // made (for example, some SubConn is in CONNECTING mode); + // - other errors if no active connecting is happening (for example, all SubConn + // are in TRANSIENT_FAILURE mode). + // + // If a SubConn is returned: + // - If it is READY, gRPC will send the RPC on it; + // - If it is not ready, or becomes not ready after it's returned, gRPC will + // block until UpdateBalancerState() is called and will call pick on the + // new picker. The done function returned from Pick(), if not nil, will be + // called with nil error, no bytes sent and no bytes received. + // + // If the returned error is not nil: + // - If the error is ErrNoSubConnAvailable, gRPC will block until UpdateBalancerState() + // - If the error is ErrTransientFailure: + // - If the RPC is wait-for-ready, gRPC will block until UpdateBalancerState() + // is called to pick again; + // - Otherwise, RPC will fail with unavailable error. + // - Else (error is other non-nil error): + // - The RPC will fail with unavailable error. + // + // The returned done() function will be called once the rpc has finished, + // with the final status of that RPC. If the SubConn returned is not a + // valid SubConn type, done may not be called. done may be nil if balancer + // doesn't care about the RPC status. + Pick(ctx context.Context, opts PickOptions) (conn SubConn, done func(DoneInfo), err error) +} + +// Balancer takes input from gRPC, manages SubConns, and collects and aggregates +// the connectivity states. +// +// It also generates and updates the Picker used by gRPC to pick SubConns for RPCs. +// +// HandleSubConnectionStateChange, HandleResolvedAddrs and Close are guaranteed +// to be called synchronously from the same goroutine. +// There's no guarantee on picker.Pick, it may be called anytime. +type Balancer interface { + // HandleSubConnStateChange is called by gRPC when the connectivity state + // of sc has changed. + // Balancer is expected to aggregate all the state of SubConn and report + // that back to gRPC. + // Balancer should also generate and update Pickers when its internal state has + // been changed by the new state. + // + // Deprecated: if V2Balancer is implemented by the Balancer, + // UpdateSubConnState will be called instead. + HandleSubConnStateChange(sc SubConn, state connectivity.State) + // HandleResolvedAddrs is called by gRPC to send updated resolved addresses to + // balancers. + // Balancer can create new SubConn or remove SubConn with the addresses. + // An empty address slice and a non-nil error will be passed if the resolver returns + // non-nil error to gRPC. + // + // Deprecated: if V2Balancer is implemented by the Balancer, + // UpdateClientConnState will be called instead. + HandleResolvedAddrs([]resolver.Address, error) + // Close closes the balancer. The balancer is not required to call + // ClientConn.RemoveSubConn for its existing SubConns. + Close() +} + +// SubConnState describes the state of a SubConn. +type SubConnState struct { + ConnectivityState connectivity.State + // TODO: add last connection error +} + +// ClientConnState describes the state of a ClientConn relevant to the +// balancer. +type ClientConnState struct { + ResolverState resolver.State + // The parsed load balancing configuration returned by the builder's + // ParseConfig method, if implemented. + BalancerConfig serviceconfig.LoadBalancingConfig +} + +// V2Balancer is defined for documentation purposes. If a Balancer also +// implements V2Balancer, its UpdateClientConnState method will be called +// instead of HandleResolvedAddrs and its UpdateSubConnState will be called +// instead of HandleSubConnStateChange. +type V2Balancer interface { + // UpdateClientConnState is called by gRPC when the state of the ClientConn + // changes. + UpdateClientConnState(ClientConnState) + // UpdateSubConnState is called by gRPC when the state of a SubConn + // changes. + UpdateSubConnState(SubConn, SubConnState) + // Close closes the balancer. The balancer is not required to call + // ClientConn.RemoveSubConn for its existing SubConns. + Close() +} + +// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns +// and returns one aggregated connectivity state. +// +// It's not thread safe. +type ConnectivityStateEvaluator struct { + numReady uint64 // Number of addrConns in ready state. + numConnecting uint64 // Number of addrConns in connecting state. + numTransientFailure uint64 // Number of addrConns in transientFailure. +} + +// RecordTransition records state change happening in subConn and based on that +// it evaluates what aggregated state should be. +// +// - If at least one SubConn in Ready, the aggregated state is Ready; +// - Else if at least one SubConn in Connecting, the aggregated state is Connecting; +// - Else the aggregated state is TransientFailure. +// +// Idle and Shutdown are not considered. +func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State { + // Update counters. + for idx, state := range []connectivity.State{oldState, newState} { + updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. + switch state { + case connectivity.Ready: + cse.numReady += updateVal + case connectivity.Connecting: + cse.numConnecting += updateVal + case connectivity.TransientFailure: + cse.numTransientFailure += updateVal + } + } + + // Evaluate. + if cse.numReady > 0 { + return connectivity.Ready + } + if cse.numConnecting > 0 { + return connectivity.Connecting + } + return connectivity.TransientFailure +} diff --git a/vendor/google.golang.org/grpc/balancer/base/BUILD.bazel b/vendor/google.golang.org/grpc/balancer/base/BUILD.bazel new file mode 100644 index 0000000000000..ce31305bbb2eb --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/base/BUILD.bazel @@ -0,0 +1,18 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "balancer.go", + "base.go", + ], + importmap = "k8s.io/kops/vendor/google.golang.org/grpc/balancer/base", + importpath = "google.golang.org/grpc/balancer/base", + visibility = ["//visibility:public"], + deps = [ + "//vendor/google.golang.org/grpc/balancer:go_default_library", + "//vendor/google.golang.org/grpc/connectivity:go_default_library", + "//vendor/google.golang.org/grpc/grpclog:go_default_library", + "//vendor/google.golang.org/grpc/resolver:go_default_library", + ], +) diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go new file mode 100644 index 0000000000000..1af88f0a3f1b3 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -0,0 +1,184 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package base + +import ( + "context" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +type baseBuilder struct { + name string + pickerBuilder PickerBuilder + config Config +} + +func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { + return &baseBalancer{ + cc: cc, + pickerBuilder: bb.pickerBuilder, + + subConns: make(map[resolver.Address]balancer.SubConn), + scStates: make(map[balancer.SubConn]connectivity.State), + csEvltr: &balancer.ConnectivityStateEvaluator{}, + // Initialize picker to a picker that always return + // ErrNoSubConnAvailable, because when state of a SubConn changes, we + // may call UpdateBalancerState with this picker. + picker: NewErrPicker(balancer.ErrNoSubConnAvailable), + config: bb.config, + } +} + +func (bb *baseBuilder) Name() string { + return bb.name +} + +type baseBalancer struct { + cc balancer.ClientConn + pickerBuilder PickerBuilder + + csEvltr *balancer.ConnectivityStateEvaluator + state connectivity.State + + subConns map[resolver.Address]balancer.SubConn + scStates map[balancer.SubConn]connectivity.State + picker balancer.Picker + config Config +} + +func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { + panic("not implemented") +} + +func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) { + // TODO: handle s.ResolverState.Err (log if not nil) once implemented. + // TODO: handle s.ResolverState.ServiceConfig? + if grpclog.V(2) { + grpclog.Infoln("base.baseBalancer: got new ClientConn state: ", s) + } + // addrsSet is the set converted from addrs, it's used for quick lookup of an address. + addrsSet := make(map[resolver.Address]struct{}) + for _, a := range s.ResolverState.Addresses { + addrsSet[a] = struct{}{} + if _, ok := b.subConns[a]; !ok { + // a is a new address (not existing in b.subConns). + sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck}) + if err != nil { + grpclog.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) + continue + } + b.subConns[a] = sc + b.scStates[sc] = connectivity.Idle + sc.Connect() + } + } + for a, sc := range b.subConns { + // a was removed by resolver. + if _, ok := addrsSet[a]; !ok { + b.cc.RemoveSubConn(sc) + delete(b.subConns, a) + // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. + // The entry will be deleted in HandleSubConnStateChange. + } + } +} + +// regeneratePicker takes a snapshot of the balancer, and generates a picker +// from it. The picker is +// - errPicker with ErrTransientFailure if the balancer is in TransientFailure, +// - built by the pickerBuilder with all READY SubConns otherwise. +func (b *baseBalancer) regeneratePicker() { + if b.state == connectivity.TransientFailure { + b.picker = NewErrPicker(balancer.ErrTransientFailure) + return + } + readySCs := make(map[resolver.Address]balancer.SubConn) + + // Filter out all ready SCs from full subConn map. + for addr, sc := range b.subConns { + if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { + readySCs[addr] = sc + } + } + b.picker = b.pickerBuilder.Build(readySCs) +} + +func (b *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + panic("not implemented") +} + +func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + s := state.ConnectivityState + if grpclog.V(2) { + grpclog.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s) + } + oldS, ok := b.scStates[sc] + if !ok { + if grpclog.V(2) { + grpclog.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) + } + return + } + b.scStates[sc] = s + switch s { + case connectivity.Idle: + sc.Connect() + case connectivity.Shutdown: + // When an address was removed by resolver, b called RemoveSubConn but + // kept the sc's state in scStates. Remove state for this sc here. + delete(b.scStates, sc) + } + + oldAggrState := b.state + b.state = b.csEvltr.RecordTransition(oldS, s) + + // Regenerate picker when one of the following happens: + // - this sc became ready from not-ready + // - this sc became not-ready from ready + // - the aggregated state of balancer became TransientFailure from non-TransientFailure + // - the aggregated state of balancer became non-TransientFailure from TransientFailure + if (s == connectivity.Ready) != (oldS == connectivity.Ready) || + (b.state == connectivity.TransientFailure) != (oldAggrState == connectivity.TransientFailure) { + b.regeneratePicker() + } + + b.cc.UpdateBalancerState(b.state, b.picker) +} + +// Close is a nop because base balancer doesn't have internal state to clean up, +// and it doesn't need to call RemoveSubConn for the SubConns. +func (b *baseBalancer) Close() { +} + +// NewErrPicker returns a picker that always returns err on Pick(). +func NewErrPicker(err error) balancer.Picker { + return &errPicker{err: err} +} + +type errPicker struct { + err error // Pick() always returns this err. +} + +func (p *errPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { + return nil, nil, p.err +} diff --git a/vendor/google.golang.org/grpc/balancer/base/base.go b/vendor/google.golang.org/grpc/balancer/base/base.go new file mode 100644 index 0000000000000..34b1f2994a7d6 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/base/base.go @@ -0,0 +1,64 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package base defines a balancer base that can be used to build balancers with +// different picking algorithms. +// +// The base balancer creates a new SubConn for each resolved address. The +// provided picker will only be notified about READY SubConns. +// +// This package is the base of round_robin balancer, its purpose is to be used +// to build round_robin like balancers with complex picking algorithms. +// Balancers with more complicated logic should try to implement a balancer +// builder from scratch. +// +// All APIs in this package are experimental. +package base + +import ( + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/resolver" +) + +// PickerBuilder creates balancer.Picker. +type PickerBuilder interface { + // Build takes a slice of ready SubConns, and returns a picker that will be + // used by gRPC to pick a SubConn. + Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker +} + +// NewBalancerBuilder returns a balancer builder. The balancers +// built by this builder will use the picker builder to build pickers. +func NewBalancerBuilder(name string, pb PickerBuilder) balancer.Builder { + return NewBalancerBuilderWithConfig(name, pb, Config{}) +} + +// Config contains the config info about the base balancer builder. +type Config struct { + // HealthCheck indicates whether health checking should be enabled for this specific balancer. + HealthCheck bool +} + +// NewBalancerBuilderWithConfig returns a base balancer builder configured by the provided config. +func NewBalancerBuilderWithConfig(name string, pb PickerBuilder, config Config) balancer.Builder { + return &baseBuilder{ + name: name, + pickerBuilder: pb, + config: config, + } +} diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/BUILD.bazel b/vendor/google.golang.org/grpc/balancer/roundrobin/BUILD.bazel new file mode 100644 index 0000000000000..e842c8e072d7e --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/BUILD.bazel @@ -0,0 +1,16 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["roundrobin.go"], + importmap = "k8s.io/kops/vendor/google.golang.org/grpc/balancer/roundrobin", + importpath = "google.golang.org/grpc/balancer/roundrobin", + visibility = ["//visibility:public"], + deps = [ + "//vendor/google.golang.org/grpc/balancer:go_default_library", + "//vendor/google.golang.org/grpc/balancer/base:go_default_library", + "//vendor/google.golang.org/grpc/grpclog:go_default_library", + "//vendor/google.golang.org/grpc/internal/grpcrand:go_default_library", + "//vendor/google.golang.org/grpc/resolver:go_default_library", + ], +) diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go new file mode 100644 index 0000000000000..29f7a4ddd68f5 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -0,0 +1,83 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package roundrobin defines a roundrobin balancer. Roundrobin balancer is +// installed as one of the default balancers in gRPC, users don't need to +// explicitly install this balancer. +package roundrobin + +import ( + "context" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/resolver" +) + +// Name is the name of round_robin balancer. +const Name = "round_robin" + +// newBuilder creates a new roundrobin balancer builder. +func newBuilder() balancer.Builder { + return base.NewBalancerBuilderWithConfig(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true}) +} + +func init() { + balancer.Register(newBuilder()) +} + +type rrPickerBuilder struct{} + +func (*rrPickerBuilder) Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker { + grpclog.Infof("roundrobinPicker: newPicker called with readySCs: %v", readySCs) + if len(readySCs) == 0 { + return base.NewErrPicker(balancer.ErrNoSubConnAvailable) + } + var scs []balancer.SubConn + for _, sc := range readySCs { + scs = append(scs, sc) + } + return &rrPicker{ + subConns: scs, + // Start at a random index, as the same RR balancer rebuilds a new + // picker when SubConn states change, and we don't want to apply excess + // load to the first server in the list. + next: grpcrand.Intn(len(scs)), + } +} + +type rrPicker struct { + // subConns is the snapshot of the roundrobin balancer when this picker was + // created. The slice is immutable. Each Get() will do a round robin + // selection from it and return the selected SubConn. + subConns []balancer.SubConn + + mu sync.Mutex + next int +} + +func (p *rrPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { + p.mu.Lock() + sc := p.subConns[p.next] + p.next = (p.next + 1) % len(p.subConns) + p.mu.Unlock() + return sc, nil, nil +} diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go new file mode 100644 index 0000000000000..8df4095ca9517 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -0,0 +1,318 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "fmt" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +// scStateUpdate contains the subConn and the new state it changed to. +type scStateUpdate struct { + sc balancer.SubConn + state connectivity.State +} + +// scStateUpdateBuffer is an unbounded channel for scStateChangeTuple. +// TODO make a general purpose buffer that uses interface{}. +type scStateUpdateBuffer struct { + c chan *scStateUpdate + mu sync.Mutex + backlog []*scStateUpdate +} + +func newSCStateUpdateBuffer() *scStateUpdateBuffer { + return &scStateUpdateBuffer{ + c: make(chan *scStateUpdate, 1), + } +} + +func (b *scStateUpdateBuffer) put(t *scStateUpdate) { + b.mu.Lock() + defer b.mu.Unlock() + if len(b.backlog) == 0 { + select { + case b.c <- t: + return + default: + } + } + b.backlog = append(b.backlog, t) +} + +func (b *scStateUpdateBuffer) load() { + b.mu.Lock() + defer b.mu.Unlock() + if len(b.backlog) > 0 { + select { + case b.c <- b.backlog[0]: + b.backlog[0] = nil + b.backlog = b.backlog[1:] + default: + } + } +} + +// get returns the channel that the scStateUpdate will be sent to. +// +// Upon receiving, the caller should call load to send another +// scStateChangeTuple onto the channel if there is any. +func (b *scStateUpdateBuffer) get() <-chan *scStateUpdate { + return b.c +} + +// ccBalancerWrapper is a wrapper on top of cc for balancers. +// It implements balancer.ClientConn interface. +type ccBalancerWrapper struct { + cc *ClientConn + balancer balancer.Balancer + stateChangeQueue *scStateUpdateBuffer + ccUpdateCh chan *balancer.ClientConnState + done chan struct{} + + mu sync.Mutex + subConns map[*acBalancerWrapper]struct{} +} + +func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper { + ccb := &ccBalancerWrapper{ + cc: cc, + stateChangeQueue: newSCStateUpdateBuffer(), + ccUpdateCh: make(chan *balancer.ClientConnState, 1), + done: make(chan struct{}), + subConns: make(map[*acBalancerWrapper]struct{}), + } + go ccb.watcher() + ccb.balancer = b.Build(ccb, bopts) + return ccb +} + +// watcher balancer functions sequentially, so the balancer can be implemented +// lock-free. +func (ccb *ccBalancerWrapper) watcher() { + for { + select { + case t := <-ccb.stateChangeQueue.get(): + ccb.stateChangeQueue.load() + select { + case <-ccb.done: + ccb.balancer.Close() + return + default: + } + if ub, ok := ccb.balancer.(balancer.V2Balancer); ok { + ub.UpdateSubConnState(t.sc, balancer.SubConnState{ConnectivityState: t.state}) + } else { + ccb.balancer.HandleSubConnStateChange(t.sc, t.state) + } + case s := <-ccb.ccUpdateCh: + select { + case <-ccb.done: + ccb.balancer.Close() + return + default: + } + if ub, ok := ccb.balancer.(balancer.V2Balancer); ok { + ub.UpdateClientConnState(*s) + } else { + ccb.balancer.HandleResolvedAddrs(s.ResolverState.Addresses, nil) + } + case <-ccb.done: + } + + select { + case <-ccb.done: + ccb.balancer.Close() + ccb.mu.Lock() + scs := ccb.subConns + ccb.subConns = nil + ccb.mu.Unlock() + for acbw := range scs { + ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) + } + ccb.UpdateBalancerState(connectivity.Connecting, nil) + return + default: + } + ccb.cc.firstResolveEvent.Fire() + } +} + +func (ccb *ccBalancerWrapper) close() { + close(ccb.done) +} + +func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + // When updating addresses for a SubConn, if the address in use is not in + // the new addresses, the old ac will be tearDown() and a new ac will be + // created. tearDown() generates a state change with Shutdown state, we + // don't want the balancer to receive this state change. So before + // tearDown() on the old ac, ac.acbw (acWrapper) will be set to nil, and + // this function will be called with (nil, Shutdown). We don't need to call + // balancer method in this case. + if sc == nil { + return + } + ccb.stateChangeQueue.put(&scStateUpdate{ + sc: sc, + state: s, + }) +} + +func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) { + if ccb.cc.curBalancerName != grpclbName { + // Filter any grpclb addresses since we don't have the grpclb balancer. + s := &ccs.ResolverState + for i := 0; i < len(s.Addresses); { + if s.Addresses[i].Type == resolver.GRPCLB { + copy(s.Addresses[i:], s.Addresses[i+1:]) + s.Addresses = s.Addresses[:len(s.Addresses)-1] + continue + } + i++ + } + } + select { + case <-ccb.ccUpdateCh: + default: + } + ccb.ccUpdateCh <- ccs +} + +func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + if len(addrs) <= 0 { + return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") + } + ccb.mu.Lock() + defer ccb.mu.Unlock() + if ccb.subConns == nil { + return nil, fmt.Errorf("grpc: ClientConn balancer wrapper was closed") + } + ac, err := ccb.cc.newAddrConn(addrs, opts) + if err != nil { + return nil, err + } + acbw := &acBalancerWrapper{ac: ac} + acbw.ac.mu.Lock() + ac.acbw = acbw + acbw.ac.mu.Unlock() + ccb.subConns[acbw] = struct{}{} + return acbw, nil +} + +func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { + acbw, ok := sc.(*acBalancerWrapper) + if !ok { + return + } + ccb.mu.Lock() + defer ccb.mu.Unlock() + if ccb.subConns == nil { + return + } + delete(ccb.subConns, acbw) + ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) +} + +func (ccb *ccBalancerWrapper) UpdateBalancerState(s connectivity.State, p balancer.Picker) { + ccb.mu.Lock() + defer ccb.mu.Unlock() + if ccb.subConns == nil { + return + } + // Update picker before updating state. Even though the ordering here does + // not matter, it can lead to multiple calls of Pick in the common start-up + // case where we wait for ready and then perform an RPC. If the picker is + // updated later, we could call the "connecting" picker when the state is + // updated, and then call the "ready" picker after the picker gets updated. + ccb.cc.blockingpicker.updatePicker(p) + ccb.cc.csMgr.updateState(s) +} + +func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOption) { + ccb.cc.resolveNow(o) +} + +func (ccb *ccBalancerWrapper) Target() string { + return ccb.cc.target +} + +// acBalancerWrapper is a wrapper on top of ac for balancers. +// It implements balancer.SubConn interface. +type acBalancerWrapper struct { + mu sync.Mutex + ac *addrConn +} + +func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { + acbw.mu.Lock() + defer acbw.mu.Unlock() + if len(addrs) <= 0 { + acbw.ac.tearDown(errConnDrain) + return + } + if !acbw.ac.tryUpdateAddrs(addrs) { + cc := acbw.ac.cc + opts := acbw.ac.scopts + acbw.ac.mu.Lock() + // Set old ac.acbw to nil so the Shutdown state update will be ignored + // by balancer. + // + // TODO(bar) the state transition could be wrong when tearDown() old ac + // and creating new ac, fix the transition. + acbw.ac.acbw = nil + acbw.ac.mu.Unlock() + acState := acbw.ac.getState() + acbw.ac.tearDown(errConnDrain) + + if acState == connectivity.Shutdown { + return + } + + ac, err := cc.newAddrConn(addrs, opts) + if err != nil { + grpclog.Warningf("acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) + return + } + acbw.ac = ac + ac.mu.Lock() + ac.acbw = acbw + ac.mu.Unlock() + if acState != connectivity.Idle { + ac.connect() + } + } +} + +func (acbw *acBalancerWrapper) Connect() { + acbw.mu.Lock() + defer acbw.mu.Unlock() + acbw.ac.connect() +} + +func (acbw *acBalancerWrapper) getAddrConn() *addrConn { + acbw.mu.Lock() + defer acbw.mu.Unlock() + return acbw.ac +} diff --git a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go new file mode 100644 index 0000000000000..66e9a44ac4dae --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go @@ -0,0 +1,334 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +type balancerWrapperBuilder struct { + b Balancer // The v1 balancer. +} + +func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + bwb.b.Start(opts.Target.Endpoint, BalancerConfig{ + DialCreds: opts.DialCreds, + Dialer: opts.Dialer, + }) + _, pickfirst := bwb.b.(*pickFirst) + bw := &balancerWrapper{ + balancer: bwb.b, + pickfirst: pickfirst, + cc: cc, + targetAddr: opts.Target.Endpoint, + startCh: make(chan struct{}), + conns: make(map[resolver.Address]balancer.SubConn), + connSt: make(map[balancer.SubConn]*scState), + csEvltr: &balancer.ConnectivityStateEvaluator{}, + state: connectivity.Idle, + } + cc.UpdateBalancerState(connectivity.Idle, bw) + go bw.lbWatcher() + return bw +} + +func (bwb *balancerWrapperBuilder) Name() string { + return "wrapper" +} + +type scState struct { + addr Address // The v1 address type. + s connectivity.State + down func(error) +} + +type balancerWrapper struct { + balancer Balancer // The v1 balancer. + pickfirst bool + + cc balancer.ClientConn + targetAddr string // Target without the scheme. + + mu sync.Mutex + conns map[resolver.Address]balancer.SubConn + connSt map[balancer.SubConn]*scState + // This channel is closed when handling the first resolver result. + // lbWatcher blocks until this is closed, to avoid race between + // - NewSubConn is created, cc wants to notify balancer of state changes; + // - Build hasn't return, cc doesn't have access to balancer. + startCh chan struct{} + + // To aggregate the connectivity state. + csEvltr *balancer.ConnectivityStateEvaluator + state connectivity.State +} + +// lbWatcher watches the Notify channel of the balancer and manages +// connections accordingly. +func (bw *balancerWrapper) lbWatcher() { + <-bw.startCh + notifyCh := bw.balancer.Notify() + if notifyCh == nil { + // There's no resolver in the balancer. Connect directly. + a := resolver.Address{ + Addr: bw.targetAddr, + Type: resolver.Backend, + } + sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{}) + if err != nil { + grpclog.Warningf("Error creating connection to %v. Err: %v", a, err) + } else { + bw.mu.Lock() + bw.conns[a] = sc + bw.connSt[sc] = &scState{ + addr: Address{Addr: bw.targetAddr}, + s: connectivity.Idle, + } + bw.mu.Unlock() + sc.Connect() + } + return + } + + for addrs := range notifyCh { + grpclog.Infof("balancerWrapper: got update addr from Notify: %v", addrs) + if bw.pickfirst { + var ( + oldA resolver.Address + oldSC balancer.SubConn + ) + bw.mu.Lock() + for oldA, oldSC = range bw.conns { + break + } + bw.mu.Unlock() + if len(addrs) <= 0 { + if oldSC != nil { + // Teardown old sc. + bw.mu.Lock() + delete(bw.conns, oldA) + delete(bw.connSt, oldSC) + bw.mu.Unlock() + bw.cc.RemoveSubConn(oldSC) + } + continue + } + + var newAddrs []resolver.Address + for _, a := range addrs { + newAddr := resolver.Address{ + Addr: a.Addr, + Type: resolver.Backend, // All addresses from balancer are all backends. + ServerName: "", + Metadata: a.Metadata, + } + newAddrs = append(newAddrs, newAddr) + } + if oldSC == nil { + // Create new sc. + sc, err := bw.cc.NewSubConn(newAddrs, balancer.NewSubConnOptions{}) + if err != nil { + grpclog.Warningf("Error creating connection to %v. Err: %v", newAddrs, err) + } else { + bw.mu.Lock() + // For pickfirst, there should be only one SubConn, so the + // address doesn't matter. All states updating (up and down) + // and picking should all happen on that only SubConn. + bw.conns[resolver.Address{}] = sc + bw.connSt[sc] = &scState{ + addr: addrs[0], // Use the first address. + s: connectivity.Idle, + } + bw.mu.Unlock() + sc.Connect() + } + } else { + bw.mu.Lock() + bw.connSt[oldSC].addr = addrs[0] + bw.mu.Unlock() + oldSC.UpdateAddresses(newAddrs) + } + } else { + var ( + add []resolver.Address // Addresses need to setup connections. + del []balancer.SubConn // Connections need to tear down. + ) + resAddrs := make(map[resolver.Address]Address) + for _, a := range addrs { + resAddrs[resolver.Address{ + Addr: a.Addr, + Type: resolver.Backend, // All addresses from balancer are all backends. + ServerName: "", + Metadata: a.Metadata, + }] = a + } + bw.mu.Lock() + for a := range resAddrs { + if _, ok := bw.conns[a]; !ok { + add = append(add, a) + } + } + for a, c := range bw.conns { + if _, ok := resAddrs[a]; !ok { + del = append(del, c) + delete(bw.conns, a) + // Keep the state of this sc in bw.connSt until its state becomes Shutdown. + } + } + bw.mu.Unlock() + for _, a := range add { + sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{}) + if err != nil { + grpclog.Warningf("Error creating connection to %v. Err: %v", a, err) + } else { + bw.mu.Lock() + bw.conns[a] = sc + bw.connSt[sc] = &scState{ + addr: resAddrs[a], + s: connectivity.Idle, + } + bw.mu.Unlock() + sc.Connect() + } + } + for _, c := range del { + bw.cc.RemoveSubConn(c) + } + } + } +} + +func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + bw.mu.Lock() + defer bw.mu.Unlock() + scSt, ok := bw.connSt[sc] + if !ok { + return + } + if s == connectivity.Idle { + sc.Connect() + } + oldS := scSt.s + scSt.s = s + if oldS != connectivity.Ready && s == connectivity.Ready { + scSt.down = bw.balancer.Up(scSt.addr) + } else if oldS == connectivity.Ready && s != connectivity.Ready { + if scSt.down != nil { + scSt.down(errConnClosing) + } + } + sa := bw.csEvltr.RecordTransition(oldS, s) + if bw.state != sa { + bw.state = sa + } + bw.cc.UpdateBalancerState(bw.state, bw) + if s == connectivity.Shutdown { + // Remove state for this sc. + delete(bw.connSt, sc) + } +} + +func (bw *balancerWrapper) HandleResolvedAddrs([]resolver.Address, error) { + bw.mu.Lock() + defer bw.mu.Unlock() + select { + case <-bw.startCh: + default: + close(bw.startCh) + } + // There should be a resolver inside the balancer. + // All updates here, if any, are ignored. +} + +func (bw *balancerWrapper) Close() { + bw.mu.Lock() + defer bw.mu.Unlock() + select { + case <-bw.startCh: + default: + close(bw.startCh) + } + bw.balancer.Close() +} + +// The picker is the balancerWrapper itself. +// It either blocks or returns error, consistent with v1 balancer Get(). +func (bw *balancerWrapper) Pick(ctx context.Context, opts balancer.PickOptions) (sc balancer.SubConn, done func(balancer.DoneInfo), err error) { + failfast := true // Default failfast is true. + if ss, ok := rpcInfoFromContext(ctx); ok { + failfast = ss.failfast + } + a, p, err := bw.balancer.Get(ctx, BalancerGetOptions{BlockingWait: !failfast}) + if err != nil { + return nil, nil, err + } + if p != nil { + done = func(balancer.DoneInfo) { p() } + defer func() { + if err != nil { + p() + } + }() + } + + bw.mu.Lock() + defer bw.mu.Unlock() + if bw.pickfirst { + // Get the first sc in conns. + for _, sc := range bw.conns { + return sc, done, nil + } + return nil, nil, balancer.ErrNoSubConnAvailable + } + sc, ok1 := bw.conns[resolver.Address{ + Addr: a.Addr, + Type: resolver.Backend, + ServerName: "", + Metadata: a.Metadata, + }] + s, ok2 := bw.connSt[sc] + if !ok1 || !ok2 { + // This can only happen due to a race where Get() returned an address + // that was subsequently removed by Notify. In this case we should + // retry always. + return nil, nil, balancer.ErrNoSubConnAvailable + } + switch s.s { + case connectivity.Ready, connectivity.Idle: + return sc, done, nil + case connectivity.Shutdown, connectivity.TransientFailure: + // If the returned sc has been shut down or is in transient failure, + // return error, and this RPC will fail or wait for another picker (if + // non-failfast). + return nil, nil, balancer.ErrTransientFailure + default: + // For other states (connecting or unknown), the v1 balancer would + // traditionally wait until ready and then issue the RPC. Returning + // ErrNoSubConnAvailable will be a slight improvement in that it will + // allow the balancer to choose another address in case others are + // connected. + return nil, nil, balancer.ErrNoSubConnAvailable + } +} diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/BUILD.bazel b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/BUILD.bazel new file mode 100644 index 0000000000000..7eaa964db6535 --- /dev/null +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/BUILD.bazel @@ -0,0 +1,14 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["binarylog.pb.go"], + importmap = "k8s.io/kops/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1", + importpath = "google.golang.org/grpc/binarylog/grpc_binarylog_v1", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/golang/protobuf/proto:go_default_library", + "//vendor/github.com/golang/protobuf/ptypes/duration:go_default_library", + "//vendor/github.com/golang/protobuf/ptypes/timestamp:go_default_library", + ], +) diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go new file mode 100644 index 0000000000000..f393bb66187e8 --- /dev/null +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -0,0 +1,900 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc/binarylog/grpc_binarylog_v1/binarylog.proto + +package grpc_binarylog_v1 // import "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import duration "github.com/golang/protobuf/ptypes/duration" +import timestamp "github.com/golang/protobuf/ptypes/timestamp" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Enumerates the type of event +// Note the terminology is different from the RPC semantics +// definition, but the same meaning is expressed here. +type GrpcLogEntry_EventType int32 + +const ( + GrpcLogEntry_EVENT_TYPE_UNKNOWN GrpcLogEntry_EventType = 0 + // Header sent from client to server + GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER GrpcLogEntry_EventType = 1 + // Header sent from server to client + GrpcLogEntry_EVENT_TYPE_SERVER_HEADER GrpcLogEntry_EventType = 2 + // Message sent from client to server + GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE GrpcLogEntry_EventType = 3 + // Message sent from server to client + GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE GrpcLogEntry_EventType = 4 + // A signal that client is done sending + GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE GrpcLogEntry_EventType = 5 + // Trailer indicates the end of the RPC. + // On client side, this event means a trailer was either received + // from the network or the gRPC library locally generated a status + // to inform the application about a failure. + // On server side, this event means the server application requested + // to send a trailer. Note: EVENT_TYPE_CANCEL may still arrive after + // this due to races on server side. + GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER GrpcLogEntry_EventType = 6 + // A signal that the RPC is cancelled. On client side, this + // indicates the client application requests a cancellation. + // On server side, this indicates that cancellation was detected. + // Note: This marks the end of the RPC. Events may arrive after + // this due to races. For example, on client side a trailer + // may arrive even though the application requested to cancel the RPC. + GrpcLogEntry_EVENT_TYPE_CANCEL GrpcLogEntry_EventType = 7 +) + +var GrpcLogEntry_EventType_name = map[int32]string{ + 0: "EVENT_TYPE_UNKNOWN", + 1: "EVENT_TYPE_CLIENT_HEADER", + 2: "EVENT_TYPE_SERVER_HEADER", + 3: "EVENT_TYPE_CLIENT_MESSAGE", + 4: "EVENT_TYPE_SERVER_MESSAGE", + 5: "EVENT_TYPE_CLIENT_HALF_CLOSE", + 6: "EVENT_TYPE_SERVER_TRAILER", + 7: "EVENT_TYPE_CANCEL", +} +var GrpcLogEntry_EventType_value = map[string]int32{ + "EVENT_TYPE_UNKNOWN": 0, + "EVENT_TYPE_CLIENT_HEADER": 1, + "EVENT_TYPE_SERVER_HEADER": 2, + "EVENT_TYPE_CLIENT_MESSAGE": 3, + "EVENT_TYPE_SERVER_MESSAGE": 4, + "EVENT_TYPE_CLIENT_HALF_CLOSE": 5, + "EVENT_TYPE_SERVER_TRAILER": 6, + "EVENT_TYPE_CANCEL": 7, +} + +func (x GrpcLogEntry_EventType) String() string { + return proto.EnumName(GrpcLogEntry_EventType_name, int32(x)) +} +func (GrpcLogEntry_EventType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_binarylog_264c8c9c551ce911, []int{0, 0} +} + +// Enumerates the entity that generates the log entry +type GrpcLogEntry_Logger int32 + +const ( + GrpcLogEntry_LOGGER_UNKNOWN GrpcLogEntry_Logger = 0 + GrpcLogEntry_LOGGER_CLIENT GrpcLogEntry_Logger = 1 + GrpcLogEntry_LOGGER_SERVER GrpcLogEntry_Logger = 2 +) + +var GrpcLogEntry_Logger_name = map[int32]string{ + 0: "LOGGER_UNKNOWN", + 1: "LOGGER_CLIENT", + 2: "LOGGER_SERVER", +} +var GrpcLogEntry_Logger_value = map[string]int32{ + "LOGGER_UNKNOWN": 0, + "LOGGER_CLIENT": 1, + "LOGGER_SERVER": 2, +} + +func (x GrpcLogEntry_Logger) String() string { + return proto.EnumName(GrpcLogEntry_Logger_name, int32(x)) +} +func (GrpcLogEntry_Logger) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_binarylog_264c8c9c551ce911, []int{0, 1} +} + +type Address_Type int32 + +const ( + Address_TYPE_UNKNOWN Address_Type = 0 + // address is in 1.2.3.4 form + Address_TYPE_IPV4 Address_Type = 1 + // address is in IPv6 canonical form (RFC5952 section 4) + // The scope is NOT included in the address string. + Address_TYPE_IPV6 Address_Type = 2 + // address is UDS string + Address_TYPE_UNIX Address_Type = 3 +) + +var Address_Type_name = map[int32]string{ + 0: "TYPE_UNKNOWN", + 1: "TYPE_IPV4", + 2: "TYPE_IPV6", + 3: "TYPE_UNIX", +} +var Address_Type_value = map[string]int32{ + "TYPE_UNKNOWN": 0, + "TYPE_IPV4": 1, + "TYPE_IPV6": 2, + "TYPE_UNIX": 3, +} + +func (x Address_Type) String() string { + return proto.EnumName(Address_Type_name, int32(x)) +} +func (Address_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_binarylog_264c8c9c551ce911, []int{7, 0} +} + +// Log entry we store in binary logs +type GrpcLogEntry struct { + // The timestamp of the binary log message + Timestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // Uniquely identifies a call. The value must not be 0 in order to disambiguate + // from an unset value. + // Each call may have several log entries, they will all have the same call_id. + // Nothing is guaranteed about their value other than they are unique across + // different RPCs in the same gRPC process. + CallId uint64 `protobuf:"varint,2,opt,name=call_id,json=callId,proto3" json:"call_id,omitempty"` + // The entry sequence id for this call. The first GrpcLogEntry has a + // value of 1, to disambiguate from an unset value. The purpose of + // this field is to detect missing entries in environments where + // durability or ordering is not guaranteed. + SequenceIdWithinCall uint64 `protobuf:"varint,3,opt,name=sequence_id_within_call,json=sequenceIdWithinCall,proto3" json:"sequence_id_within_call,omitempty"` + Type GrpcLogEntry_EventType `protobuf:"varint,4,opt,name=type,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_EventType" json:"type,omitempty"` + Logger GrpcLogEntry_Logger `protobuf:"varint,5,opt,name=logger,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_Logger" json:"logger,omitempty"` + // The logger uses one of the following fields to record the payload, + // according to the type of the log entry. + // + // Types that are valid to be assigned to Payload: + // *GrpcLogEntry_ClientHeader + // *GrpcLogEntry_ServerHeader + // *GrpcLogEntry_Message + // *GrpcLogEntry_Trailer + Payload isGrpcLogEntry_Payload `protobuf_oneof:"payload"` + // true if payload does not represent the full message or metadata. + PayloadTruncated bool `protobuf:"varint,10,opt,name=payload_truncated,json=payloadTruncated,proto3" json:"payload_truncated,omitempty"` + // Peer address information, will only be recorded on the first + // incoming event. On client side, peer is logged on + // EVENT_TYPE_SERVER_HEADER normally or EVENT_TYPE_SERVER_TRAILER in + // the case of trailers-only. On server side, peer is always + // logged on EVENT_TYPE_CLIENT_HEADER. + Peer *Address `protobuf:"bytes,11,opt,name=peer,proto3" json:"peer,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GrpcLogEntry) Reset() { *m = GrpcLogEntry{} } +func (m *GrpcLogEntry) String() string { return proto.CompactTextString(m) } +func (*GrpcLogEntry) ProtoMessage() {} +func (*GrpcLogEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_binarylog_264c8c9c551ce911, []int{0} +} +func (m *GrpcLogEntry) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GrpcLogEntry.Unmarshal(m, b) +} +func (m *GrpcLogEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GrpcLogEntry.Marshal(b, m, deterministic) +} +func (dst *GrpcLogEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_GrpcLogEntry.Merge(dst, src) +} +func (m *GrpcLogEntry) XXX_Size() int { + return xxx_messageInfo_GrpcLogEntry.Size(m) +} +func (m *GrpcLogEntry) XXX_DiscardUnknown() { + xxx_messageInfo_GrpcLogEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_GrpcLogEntry proto.InternalMessageInfo + +func (m *GrpcLogEntry) GetTimestamp() *timestamp.Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + +func (m *GrpcLogEntry) GetCallId() uint64 { + if m != nil { + return m.CallId + } + return 0 +} + +func (m *GrpcLogEntry) GetSequenceIdWithinCall() uint64 { + if m != nil { + return m.SequenceIdWithinCall + } + return 0 +} + +func (m *GrpcLogEntry) GetType() GrpcLogEntry_EventType { + if m != nil { + return m.Type + } + return GrpcLogEntry_EVENT_TYPE_UNKNOWN +} + +func (m *GrpcLogEntry) GetLogger() GrpcLogEntry_Logger { + if m != nil { + return m.Logger + } + return GrpcLogEntry_LOGGER_UNKNOWN +} + +type isGrpcLogEntry_Payload interface { + isGrpcLogEntry_Payload() +} + +type GrpcLogEntry_ClientHeader struct { + ClientHeader *ClientHeader `protobuf:"bytes,6,opt,name=client_header,json=clientHeader,proto3,oneof"` +} + +type GrpcLogEntry_ServerHeader struct { + ServerHeader *ServerHeader `protobuf:"bytes,7,opt,name=server_header,json=serverHeader,proto3,oneof"` +} + +type GrpcLogEntry_Message struct { + Message *Message `protobuf:"bytes,8,opt,name=message,proto3,oneof"` +} + +type GrpcLogEntry_Trailer struct { + Trailer *Trailer `protobuf:"bytes,9,opt,name=trailer,proto3,oneof"` +} + +func (*GrpcLogEntry_ClientHeader) isGrpcLogEntry_Payload() {} + +func (*GrpcLogEntry_ServerHeader) isGrpcLogEntry_Payload() {} + +func (*GrpcLogEntry_Message) isGrpcLogEntry_Payload() {} + +func (*GrpcLogEntry_Trailer) isGrpcLogEntry_Payload() {} + +func (m *GrpcLogEntry) GetPayload() isGrpcLogEntry_Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (m *GrpcLogEntry) GetClientHeader() *ClientHeader { + if x, ok := m.GetPayload().(*GrpcLogEntry_ClientHeader); ok { + return x.ClientHeader + } + return nil +} + +func (m *GrpcLogEntry) GetServerHeader() *ServerHeader { + if x, ok := m.GetPayload().(*GrpcLogEntry_ServerHeader); ok { + return x.ServerHeader + } + return nil +} + +func (m *GrpcLogEntry) GetMessage() *Message { + if x, ok := m.GetPayload().(*GrpcLogEntry_Message); ok { + return x.Message + } + return nil +} + +func (m *GrpcLogEntry) GetTrailer() *Trailer { + if x, ok := m.GetPayload().(*GrpcLogEntry_Trailer); ok { + return x.Trailer + } + return nil +} + +func (m *GrpcLogEntry) GetPayloadTruncated() bool { + if m != nil { + return m.PayloadTruncated + } + return false +} + +func (m *GrpcLogEntry) GetPeer() *Address { + if m != nil { + return m.Peer + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*GrpcLogEntry) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _GrpcLogEntry_OneofMarshaler, _GrpcLogEntry_OneofUnmarshaler, _GrpcLogEntry_OneofSizer, []interface{}{ + (*GrpcLogEntry_ClientHeader)(nil), + (*GrpcLogEntry_ServerHeader)(nil), + (*GrpcLogEntry_Message)(nil), + (*GrpcLogEntry_Trailer)(nil), + } +} + +func _GrpcLogEntry_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*GrpcLogEntry) + // payload + switch x := m.Payload.(type) { + case *GrpcLogEntry_ClientHeader: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ClientHeader); err != nil { + return err + } + case *GrpcLogEntry_ServerHeader: + b.EncodeVarint(7<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ServerHeader); err != nil { + return err + } + case *GrpcLogEntry_Message: + b.EncodeVarint(8<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Message); err != nil { + return err + } + case *GrpcLogEntry_Trailer: + b.EncodeVarint(9<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Trailer); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("GrpcLogEntry.Payload has unexpected type %T", x) + } + return nil +} + +func _GrpcLogEntry_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*GrpcLogEntry) + switch tag { + case 6: // payload.client_header + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ClientHeader) + err := b.DecodeMessage(msg) + m.Payload = &GrpcLogEntry_ClientHeader{msg} + return true, err + case 7: // payload.server_header + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ServerHeader) + err := b.DecodeMessage(msg) + m.Payload = &GrpcLogEntry_ServerHeader{msg} + return true, err + case 8: // payload.message + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Message) + err := b.DecodeMessage(msg) + m.Payload = &GrpcLogEntry_Message{msg} + return true, err + case 9: // payload.trailer + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Trailer) + err := b.DecodeMessage(msg) + m.Payload = &GrpcLogEntry_Trailer{msg} + return true, err + default: + return false, nil + } +} + +func _GrpcLogEntry_OneofSizer(msg proto.Message) (n int) { + m := msg.(*GrpcLogEntry) + // payload + switch x := m.Payload.(type) { + case *GrpcLogEntry_ClientHeader: + s := proto.Size(x.ClientHeader) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *GrpcLogEntry_ServerHeader: + s := proto.Size(x.ServerHeader) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *GrpcLogEntry_Message: + s := proto.Size(x.Message) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *GrpcLogEntry_Trailer: + s := proto.Size(x.Trailer) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type ClientHeader struct { + // This contains only the metadata from the application. + Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` + // The name of the RPC method, which looks something like: + // // + // Note the leading "/" character. + MethodName string `protobuf:"bytes,2,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"` + // A single process may be used to run multiple virtual + // servers with different identities. + // The authority is the name of such a server identitiy. + // It is typically a portion of the URI in the form of + // or : . + Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"` + // the RPC timeout + Timeout *duration.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClientHeader) Reset() { *m = ClientHeader{} } +func (m *ClientHeader) String() string { return proto.CompactTextString(m) } +func (*ClientHeader) ProtoMessage() {} +func (*ClientHeader) Descriptor() ([]byte, []int) { + return fileDescriptor_binarylog_264c8c9c551ce911, []int{1} +} +func (m *ClientHeader) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClientHeader.Unmarshal(m, b) +} +func (m *ClientHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClientHeader.Marshal(b, m, deterministic) +} +func (dst *ClientHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClientHeader.Merge(dst, src) +} +func (m *ClientHeader) XXX_Size() int { + return xxx_messageInfo_ClientHeader.Size(m) +} +func (m *ClientHeader) XXX_DiscardUnknown() { + xxx_messageInfo_ClientHeader.DiscardUnknown(m) +} + +var xxx_messageInfo_ClientHeader proto.InternalMessageInfo + +func (m *ClientHeader) GetMetadata() *Metadata { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *ClientHeader) GetMethodName() string { + if m != nil { + return m.MethodName + } + return "" +} + +func (m *ClientHeader) GetAuthority() string { + if m != nil { + return m.Authority + } + return "" +} + +func (m *ClientHeader) GetTimeout() *duration.Duration { + if m != nil { + return m.Timeout + } + return nil +} + +type ServerHeader struct { + // This contains only the metadata from the application. + Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServerHeader) Reset() { *m = ServerHeader{} } +func (m *ServerHeader) String() string { return proto.CompactTextString(m) } +func (*ServerHeader) ProtoMessage() {} +func (*ServerHeader) Descriptor() ([]byte, []int) { + return fileDescriptor_binarylog_264c8c9c551ce911, []int{2} +} +func (m *ServerHeader) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServerHeader.Unmarshal(m, b) +} +func (m *ServerHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServerHeader.Marshal(b, m, deterministic) +} +func (dst *ServerHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServerHeader.Merge(dst, src) +} +func (m *ServerHeader) XXX_Size() int { + return xxx_messageInfo_ServerHeader.Size(m) +} +func (m *ServerHeader) XXX_DiscardUnknown() { + xxx_messageInfo_ServerHeader.DiscardUnknown(m) +} + +var xxx_messageInfo_ServerHeader proto.InternalMessageInfo + +func (m *ServerHeader) GetMetadata() *Metadata { + if m != nil { + return m.Metadata + } + return nil +} + +type Trailer struct { + // This contains only the metadata from the application. + Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` + // The gRPC status code. + StatusCode uint32 `protobuf:"varint,2,opt,name=status_code,json=statusCode,proto3" json:"status_code,omitempty"` + // An original status message before any transport specific + // encoding. + StatusMessage string `protobuf:"bytes,3,opt,name=status_message,json=statusMessage,proto3" json:"status_message,omitempty"` + // The value of the 'grpc-status-details-bin' metadata key. If + // present, this is always an encoded 'google.rpc.Status' message. + StatusDetails []byte `protobuf:"bytes,4,opt,name=status_details,json=statusDetails,proto3" json:"status_details,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Trailer) Reset() { *m = Trailer{} } +func (m *Trailer) String() string { return proto.CompactTextString(m) } +func (*Trailer) ProtoMessage() {} +func (*Trailer) Descriptor() ([]byte, []int) { + return fileDescriptor_binarylog_264c8c9c551ce911, []int{3} +} +func (m *Trailer) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Trailer.Unmarshal(m, b) +} +func (m *Trailer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Trailer.Marshal(b, m, deterministic) +} +func (dst *Trailer) XXX_Merge(src proto.Message) { + xxx_messageInfo_Trailer.Merge(dst, src) +} +func (m *Trailer) XXX_Size() int { + return xxx_messageInfo_Trailer.Size(m) +} +func (m *Trailer) XXX_DiscardUnknown() { + xxx_messageInfo_Trailer.DiscardUnknown(m) +} + +var xxx_messageInfo_Trailer proto.InternalMessageInfo + +func (m *Trailer) GetMetadata() *Metadata { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *Trailer) GetStatusCode() uint32 { + if m != nil { + return m.StatusCode + } + return 0 +} + +func (m *Trailer) GetStatusMessage() string { + if m != nil { + return m.StatusMessage + } + return "" +} + +func (m *Trailer) GetStatusDetails() []byte { + if m != nil { + return m.StatusDetails + } + return nil +} + +// Message payload, used by CLIENT_MESSAGE and SERVER_MESSAGE +type Message struct { + // Length of the message. It may not be the same as the length of the + // data field, as the logging payload can be truncated or omitted. + Length uint32 `protobuf:"varint,1,opt,name=length,proto3" json:"length,omitempty"` + // May be truncated or omitted. + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} +func (*Message) Descriptor() ([]byte, []int) { + return fileDescriptor_binarylog_264c8c9c551ce911, []int{4} +} +func (m *Message) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Message.Unmarshal(m, b) +} +func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Message.Marshal(b, m, deterministic) +} +func (dst *Message) XXX_Merge(src proto.Message) { + xxx_messageInfo_Message.Merge(dst, src) +} +func (m *Message) XXX_Size() int { + return xxx_messageInfo_Message.Size(m) +} +func (m *Message) XXX_DiscardUnknown() { + xxx_messageInfo_Message.DiscardUnknown(m) +} + +var xxx_messageInfo_Message proto.InternalMessageInfo + +func (m *Message) GetLength() uint32 { + if m != nil { + return m.Length + } + return 0 +} + +func (m *Message) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +// A list of metadata pairs, used in the payload of client header, +// server header, and server trailer. +// Implementations may omit some entries to honor the header limits +// of GRPC_BINARY_LOG_CONFIG. +// +// Header keys added by gRPC are omitted. To be more specific, +// implementations will not log the following entries, and this is +// not to be treated as a truncation: +// - entries handled by grpc that are not user visible, such as those +// that begin with 'grpc-' (with exception of grpc-trace-bin) +// or keys like 'lb-token' +// - transport specific entries, including but not limited to: +// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc +// - entries added for call credentials +// +// Implementations must always log grpc-trace-bin if it is present. +// Practically speaking it will only be visible on server side because +// grpc-trace-bin is managed by low level client side mechanisms +// inaccessible from the application level. On server side, the +// header is just a normal metadata key. +// The pair will not count towards the size limit. +type Metadata struct { + Entry []*MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Metadata) Reset() { *m = Metadata{} } +func (m *Metadata) String() string { return proto.CompactTextString(m) } +func (*Metadata) ProtoMessage() {} +func (*Metadata) Descriptor() ([]byte, []int) { + return fileDescriptor_binarylog_264c8c9c551ce911, []int{5} +} +func (m *Metadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Metadata.Unmarshal(m, b) +} +func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Metadata.Marshal(b, m, deterministic) +} +func (dst *Metadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metadata.Merge(dst, src) +} +func (m *Metadata) XXX_Size() int { + return xxx_messageInfo_Metadata.Size(m) +} +func (m *Metadata) XXX_DiscardUnknown() { + xxx_messageInfo_Metadata.DiscardUnknown(m) +} + +var xxx_messageInfo_Metadata proto.InternalMessageInfo + +func (m *Metadata) GetEntry() []*MetadataEntry { + if m != nil { + return m.Entry + } + return nil +} + +// A metadata key value pair +type MetadataEntry struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MetadataEntry) Reset() { *m = MetadataEntry{} } +func (m *MetadataEntry) String() string { return proto.CompactTextString(m) } +func (*MetadataEntry) ProtoMessage() {} +func (*MetadataEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_binarylog_264c8c9c551ce911, []int{6} +} +func (m *MetadataEntry) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MetadataEntry.Unmarshal(m, b) +} +func (m *MetadataEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MetadataEntry.Marshal(b, m, deterministic) +} +func (dst *MetadataEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetadataEntry.Merge(dst, src) +} +func (m *MetadataEntry) XXX_Size() int { + return xxx_messageInfo_MetadataEntry.Size(m) +} +func (m *MetadataEntry) XXX_DiscardUnknown() { + xxx_messageInfo_MetadataEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_MetadataEntry proto.InternalMessageInfo + +func (m *MetadataEntry) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *MetadataEntry) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +// Address information +type Address struct { + Type Address_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.binarylog.v1.Address_Type" json:"type,omitempty"` + Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` + // only for TYPE_IPV4 and TYPE_IPV6 + IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Address) Reset() { *m = Address{} } +func (m *Address) String() string { return proto.CompactTextString(m) } +func (*Address) ProtoMessage() {} +func (*Address) Descriptor() ([]byte, []int) { + return fileDescriptor_binarylog_264c8c9c551ce911, []int{7} +} +func (m *Address) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Address.Unmarshal(m, b) +} +func (m *Address) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Address.Marshal(b, m, deterministic) +} +func (dst *Address) XXX_Merge(src proto.Message) { + xxx_messageInfo_Address.Merge(dst, src) +} +func (m *Address) XXX_Size() int { + return xxx_messageInfo_Address.Size(m) +} +func (m *Address) XXX_DiscardUnknown() { + xxx_messageInfo_Address.DiscardUnknown(m) +} + +var xxx_messageInfo_Address proto.InternalMessageInfo + +func (m *Address) GetType() Address_Type { + if m != nil { + return m.Type + } + return Address_TYPE_UNKNOWN +} + +func (m *Address) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +func (m *Address) GetIpPort() uint32 { + if m != nil { + return m.IpPort + } + return 0 +} + +func init() { + proto.RegisterType((*GrpcLogEntry)(nil), "grpc.binarylog.v1.GrpcLogEntry") + proto.RegisterType((*ClientHeader)(nil), "grpc.binarylog.v1.ClientHeader") + proto.RegisterType((*ServerHeader)(nil), "grpc.binarylog.v1.ServerHeader") + proto.RegisterType((*Trailer)(nil), "grpc.binarylog.v1.Trailer") + proto.RegisterType((*Message)(nil), "grpc.binarylog.v1.Message") + proto.RegisterType((*Metadata)(nil), "grpc.binarylog.v1.Metadata") + proto.RegisterType((*MetadataEntry)(nil), "grpc.binarylog.v1.MetadataEntry") + proto.RegisterType((*Address)(nil), "grpc.binarylog.v1.Address") + proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_EventType", GrpcLogEntry_EventType_name, GrpcLogEntry_EventType_value) + proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_Logger", GrpcLogEntry_Logger_name, GrpcLogEntry_Logger_value) + proto.RegisterEnum("grpc.binarylog.v1.Address_Type", Address_Type_name, Address_Type_value) +} + +func init() { + proto.RegisterFile("grpc/binarylog/grpc_binarylog_v1/binarylog.proto", fileDescriptor_binarylog_264c8c9c551ce911) +} + +var fileDescriptor_binarylog_264c8c9c551ce911 = []byte{ + // 900 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x51, 0x6f, 0xe3, 0x44, + 0x10, 0x3e, 0x37, 0x69, 0xdc, 0x4c, 0x92, 0xca, 0x5d, 0x95, 0x3b, 0x5f, 0x29, 0x34, 0xb2, 0x04, + 0x0a, 0x42, 0x72, 0xb9, 0x94, 0xeb, 0xf1, 0x02, 0x52, 0x92, 0xfa, 0xd2, 0x88, 0x5c, 0x1a, 0x6d, + 0x72, 0x3d, 0x40, 0x48, 0xd6, 0x36, 0x5e, 0x1c, 0x0b, 0xc7, 0x6b, 0xd6, 0x9b, 0xa0, 0xfc, 0x2c, + 0xde, 0x90, 0xee, 0x77, 0xf1, 0x8e, 0xbc, 0x6b, 0x27, 0xa6, 0x69, 0x0f, 0x09, 0xde, 0x3c, 0xdf, + 0x7c, 0xf3, 0xcd, 0xee, 0x78, 0x66, 0x16, 0xbe, 0xf2, 0x79, 0x3c, 0x3b, 0xbf, 0x0b, 0x22, 0xc2, + 0xd7, 0x21, 0xf3, 0xcf, 0x53, 0xd3, 0xdd, 0x98, 0xee, 0xea, 0xc5, 0xd6, 0x67, 0xc7, 0x9c, 0x09, + 0x86, 0x8e, 0x52, 0x8a, 0xbd, 0x45, 0x57, 0x2f, 0x4e, 0x3e, 0xf5, 0x19, 0xf3, 0x43, 0x7a, 0x2e, + 0x09, 0x77, 0xcb, 0x5f, 0xce, 0xbd, 0x25, 0x27, 0x22, 0x60, 0x91, 0x0a, 0x39, 0x39, 0xbb, 0xef, + 0x17, 0xc1, 0x82, 0x26, 0x82, 0x2c, 0x62, 0x45, 0xb0, 0xde, 0xeb, 0x50, 0xef, 0xf3, 0x78, 0x36, + 0x64, 0xbe, 0x13, 0x09, 0xbe, 0x46, 0xdf, 0x40, 0x75, 0xc3, 0x31, 0xb5, 0xa6, 0xd6, 0xaa, 0xb5, + 0x4f, 0x6c, 0xa5, 0x62, 0xe7, 0x2a, 0xf6, 0x34, 0x67, 0xe0, 0x2d, 0x19, 0x3d, 0x03, 0x7d, 0x46, + 0xc2, 0xd0, 0x0d, 0x3c, 0x73, 0xaf, 0xa9, 0xb5, 0xca, 0xb8, 0x92, 0x9a, 0x03, 0x0f, 0xbd, 0x84, + 0x67, 0x09, 0xfd, 0x6d, 0x49, 0xa3, 0x19, 0x75, 0x03, 0xcf, 0xfd, 0x3d, 0x10, 0xf3, 0x20, 0x72, + 0x53, 0xa7, 0x59, 0x92, 0xc4, 0xe3, 0xdc, 0x3d, 0xf0, 0xde, 0x49, 0x67, 0x8f, 0x84, 0x21, 0xfa, + 0x16, 0xca, 0x62, 0x1d, 0x53, 0xb3, 0xdc, 0xd4, 0x5a, 0x87, 0xed, 0x2f, 0xec, 0x9d, 0xdb, 0xdb, + 0xc5, 0x83, 0xdb, 0xce, 0x8a, 0x46, 0x62, 0xba, 0x8e, 0x29, 0x96, 0x61, 0xe8, 0x3b, 0xa8, 0x84, + 0xcc, 0xf7, 0x29, 0x37, 0xf7, 0xa5, 0xc0, 0xe7, 0xff, 0x26, 0x30, 0x94, 0x6c, 0x9c, 0x45, 0xa1, + 0xd7, 0xd0, 0x98, 0x85, 0x01, 0x8d, 0x84, 0x3b, 0xa7, 0xc4, 0xa3, 0xdc, 0xac, 0xc8, 0x62, 0x9c, + 0x3d, 0x20, 0xd3, 0x93, 0xbc, 0x6b, 0x49, 0xbb, 0x7e, 0x82, 0xeb, 0xb3, 0x82, 0x9d, 0xea, 0x24, + 0x94, 0xaf, 0x28, 0xcf, 0x75, 0xf4, 0x47, 0x75, 0x26, 0x92, 0xb7, 0xd5, 0x49, 0x0a, 0x36, 0xba, + 0x04, 0x7d, 0x41, 0x93, 0x84, 0xf8, 0xd4, 0x3c, 0xc8, 0x7f, 0xcb, 0x8e, 0xc2, 0x1b, 0xc5, 0xb8, + 0x7e, 0x82, 0x73, 0x72, 0x1a, 0x27, 0x38, 0x09, 0x42, 0xca, 0xcd, 0xea, 0xa3, 0x71, 0x53, 0xc5, + 0x48, 0xe3, 0x32, 0x32, 0xfa, 0x12, 0x8e, 0x62, 0xb2, 0x0e, 0x19, 0xf1, 0x5c, 0xc1, 0x97, 0xd1, + 0x8c, 0x08, 0xea, 0x99, 0xd0, 0xd4, 0x5a, 0x07, 0xd8, 0xc8, 0x1c, 0xd3, 0x1c, 0x47, 0x36, 0x94, + 0x63, 0x4a, 0xb9, 0x59, 0x7b, 0x34, 0x43, 0xc7, 0xf3, 0x38, 0x4d, 0x12, 0x2c, 0x79, 0xd6, 0x5f, + 0x1a, 0x54, 0x37, 0x3f, 0x0c, 0x3d, 0x05, 0xe4, 0xdc, 0x3a, 0xa3, 0xa9, 0x3b, 0xfd, 0x71, 0xec, + 0xb8, 0x6f, 0x47, 0xdf, 0x8f, 0x6e, 0xde, 0x8d, 0x8c, 0x27, 0xe8, 0x14, 0xcc, 0x02, 0xde, 0x1b, + 0x0e, 0xd2, 0xef, 0x6b, 0xa7, 0x73, 0xe5, 0x60, 0x43, 0xbb, 0xe7, 0x9d, 0x38, 0xf8, 0xd6, 0xc1, + 0xb9, 0x77, 0x0f, 0x7d, 0x02, 0xcf, 0x77, 0x63, 0xdf, 0x38, 0x93, 0x49, 0xa7, 0xef, 0x18, 0xa5, + 0x7b, 0xee, 0x2c, 0x38, 0x77, 0x97, 0x51, 0x13, 0x4e, 0x1f, 0xc8, 0xdc, 0x19, 0xbe, 0x76, 0x7b, + 0xc3, 0x9b, 0x89, 0x63, 0xec, 0x3f, 0x2c, 0x30, 0xc5, 0x9d, 0xc1, 0xd0, 0xc1, 0x46, 0x05, 0x7d, + 0x04, 0x47, 0x45, 0x81, 0xce, 0xa8, 0xe7, 0x0c, 0x0d, 0xdd, 0xea, 0x42, 0x45, 0xb5, 0x19, 0x42, + 0x70, 0x38, 0xbc, 0xe9, 0xf7, 0x1d, 0x5c, 0xb8, 0xef, 0x11, 0x34, 0x32, 0x4c, 0x65, 0x34, 0xb4, + 0x02, 0xa4, 0x52, 0x18, 0x7b, 0xdd, 0x2a, 0xe8, 0x59, 0xfd, 0xad, 0xf7, 0x1a, 0xd4, 0x8b, 0xcd, + 0x87, 0x5e, 0xc1, 0xc1, 0x82, 0x0a, 0xe2, 0x11, 0x41, 0xb2, 0xe1, 0xfd, 0xf8, 0xc1, 0x2e, 0x51, + 0x14, 0xbc, 0x21, 0xa3, 0x33, 0xa8, 0x2d, 0xa8, 0x98, 0x33, 0xcf, 0x8d, 0xc8, 0x82, 0xca, 0x01, + 0xae, 0x62, 0x50, 0xd0, 0x88, 0x2c, 0x28, 0x3a, 0x85, 0x2a, 0x59, 0x8a, 0x39, 0xe3, 0x81, 0x58, + 0xcb, 0xb1, 0xad, 0xe2, 0x2d, 0x80, 0x2e, 0x40, 0x4f, 0x17, 0x01, 0x5b, 0x0a, 0x39, 0xae, 0xb5, + 0xf6, 0xf3, 0x9d, 0x9d, 0x71, 0x95, 0x6d, 0x26, 0x9c, 0x33, 0xad, 0x3e, 0xd4, 0x8b, 0x1d, 0xff, + 0x9f, 0x0f, 0x6f, 0xfd, 0xa1, 0x81, 0x9e, 0x75, 0xf0, 0xff, 0xaa, 0x40, 0x22, 0x88, 0x58, 0x26, + 0xee, 0x8c, 0x79, 0xaa, 0x02, 0x0d, 0x0c, 0x0a, 0xea, 0x31, 0x8f, 0xa2, 0xcf, 0xe0, 0x30, 0x23, + 0xe4, 0x73, 0xa8, 0xca, 0xd0, 0x50, 0x68, 0x36, 0x7a, 0x05, 0x9a, 0x47, 0x05, 0x09, 0xc2, 0x44, + 0x56, 0xa4, 0x9e, 0xd3, 0xae, 0x14, 0x68, 0xbd, 0x04, 0x3d, 0x8f, 0x78, 0x0a, 0x95, 0x90, 0x46, + 0xbe, 0x98, 0xcb, 0x03, 0x37, 0x70, 0x66, 0x21, 0x04, 0x65, 0x79, 0x8d, 0x3d, 0x19, 0x2f, 0xbf, + 0xad, 0x2e, 0x1c, 0xe4, 0x67, 0x47, 0x97, 0xb0, 0x4f, 0xd3, 0xcd, 0x65, 0x6a, 0xcd, 0x52, 0xab, + 0xd6, 0x6e, 0x7e, 0xe0, 0x9e, 0x72, 0xc3, 0x61, 0x45, 0xb7, 0x5e, 0x41, 0xe3, 0x1f, 0x38, 0x32, + 0xa0, 0xf4, 0x2b, 0x5d, 0xcb, 0xec, 0x55, 0x9c, 0x7e, 0xa2, 0x63, 0xd8, 0x5f, 0x91, 0x70, 0x49, + 0xb3, 0xdc, 0xca, 0xb0, 0xfe, 0xd4, 0x40, 0xcf, 0xe6, 0x18, 0x5d, 0x64, 0xdb, 0x59, 0x93, 0xcb, + 0xf5, 0xec, 0xf1, 0x89, 0xb7, 0x0b, 0x3b, 0xd9, 0x04, 0x9d, 0x28, 0x34, 0xeb, 0xb0, 0xdc, 0x4c, + 0x1f, 0x8f, 0x20, 0x76, 0x63, 0xc6, 0x85, 0xac, 0x6a, 0x03, 0x57, 0x82, 0x78, 0xcc, 0xb8, 0xb0, + 0x1c, 0x28, 0xcb, 0x1d, 0x61, 0x40, 0xfd, 0xde, 0x76, 0x68, 0x40, 0x55, 0x22, 0x83, 0xf1, 0xed, + 0xd7, 0x86, 0x56, 0x34, 0x2f, 0x8d, 0xbd, 0x8d, 0xf9, 0x76, 0x34, 0xf8, 0xc1, 0x28, 0x75, 0x7f, + 0x86, 0xe3, 0x80, 0xed, 0x1e, 0xb2, 0x7b, 0xd8, 0x95, 0xd6, 0x90, 0xf9, 0xe3, 0xb4, 0x51, 0xc7, + 0xda, 0x4f, 0xed, 0xac, 0x71, 0x7d, 0x16, 0x92, 0xc8, 0xb7, 0x19, 0x57, 0x4f, 0xf3, 0x87, 0x5e, + 0xea, 0xbb, 0x8a, 0xec, 0xf2, 0x8b, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0xe7, 0xf6, 0x4b, 0x50, + 0xd4, 0x07, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go new file mode 100644 index 0000000000000..9e20e4d385f9d --- /dev/null +++ b/vendor/google.golang.org/grpc/call.go @@ -0,0 +1,74 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" +) + +// Invoke sends the RPC request on the wire and returns after response is +// received. This is typically called by generated code. +// +// All errors returned by Invoke are compatible with the status package. +func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error { + // allow interceptor to see all applicable call options, which means those + // configured as defaults from dial option as well as per-call options + opts = combine(cc.dopts.callOptions, opts) + + if cc.dopts.unaryInt != nil { + return cc.dopts.unaryInt(ctx, method, args, reply, cc, invoke, opts...) + } + return invoke(ctx, method, args, reply, cc, opts...) +} + +func combine(o1 []CallOption, o2 []CallOption) []CallOption { + // we don't use append because o1 could have extra capacity whose + // elements would be overwritten, which could cause inadvertent + // sharing (and race conditions) between concurrent calls + if len(o1) == 0 { + return o2 + } else if len(o2) == 0 { + return o1 + } + ret := make([]CallOption, len(o1)+len(o2)) + copy(ret, o1) + copy(ret[len(o1):], o2) + return ret +} + +// Invoke sends the RPC request on the wire and returns after response is +// received. This is typically called by generated code. +// +// DEPRECATED: Use ClientConn.Invoke instead. +func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error { + return cc.Invoke(ctx, method, args, reply, opts...) +} + +var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false} + +func invoke(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { + cs, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...) + if err != nil { + return err + } + if err := cs.SendMsg(req); err != nil { + return err + } + return cs.RecvMsg(reply) +} diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go new file mode 100644 index 0000000000000..a7643df7d297a --- /dev/null +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -0,0 +1,1447 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "errors" + "fmt" + "math" + "net" + "reflect" + "strings" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/balancer" + _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin. + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/resolver" + _ "google.golang.org/grpc/resolver/dns" // To register dns resolver. + _ "google.golang.org/grpc/resolver/passthrough" // To register passthrough resolver. + "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/status" +) + +const ( + // minimum time to give a connection to complete + minConnectTimeout = 20 * time.Second + // must match grpclbName in grpclb/grpclb.go + grpclbName = "grpclb" +) + +var ( + // ErrClientConnClosing indicates that the operation is illegal because + // the ClientConn is closing. + // + // Deprecated: this error should not be relied upon by users; use the status + // code of Canceled instead. + ErrClientConnClosing = status.Error(codes.Canceled, "grpc: the client connection is closing") + // errConnDrain indicates that the connection starts to be drained and does not accept any new RPCs. + errConnDrain = errors.New("grpc: the connection is drained") + // errConnClosing indicates that the connection is closing. + errConnClosing = errors.New("grpc: the connection is closing") + // errBalancerClosed indicates that the balancer is closed. + errBalancerClosed = errors.New("grpc: balancer is closed") + // invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default + // service config. + invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid" +) + +// The following errors are returned from Dial and DialContext +var ( + // errNoTransportSecurity indicates that there is no transport security + // being set for ClientConn. Users should either set one or explicitly + // call WithInsecure DialOption to disable security. + errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)") + // errTransportCredsAndBundle indicates that creds bundle is used together + // with other individual Transport Credentials. + errTransportCredsAndBundle = errors.New("grpc: credentials.Bundle may not be used with individual TransportCredentials") + // errTransportCredentialsMissing indicates that users want to transmit security + // information (e.g., OAuth2 token) which requires secure connection on an insecure + // connection. + errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)") + // errCredentialsConflict indicates that grpc.WithTransportCredentials() + // and grpc.WithInsecure() are both called for a connection. + errCredentialsConflict = errors.New("grpc: transport credentials are set for an insecure connection (grpc.WithTransportCredentials() and grpc.WithInsecure() are both called)") +) + +const ( + defaultClientMaxReceiveMessageSize = 1024 * 1024 * 4 + defaultClientMaxSendMessageSize = math.MaxInt32 + // http2IOBufSize specifies the buffer size for sending frames. + defaultWriteBufSize = 32 * 1024 + defaultReadBufSize = 32 * 1024 +) + +// Dial creates a client connection to the given target. +func Dial(target string, opts ...DialOption) (*ClientConn, error) { + return DialContext(context.Background(), target, opts...) +} + +// DialContext creates a client connection to the given target. By default, it's +// a non-blocking dial (the function won't wait for connections to be +// established, and connecting happens in the background). To make it a blocking +// dial, use WithBlock() dial option. +// +// In the non-blocking case, the ctx does not act against the connection. It +// only controls the setup steps. +// +// In the blocking case, ctx can be used to cancel or expire the pending +// connection. Once this function returns, the cancellation and expiration of +// ctx will be noop. Users should call ClientConn.Close to terminate all the +// pending operations after this function returns. +// +// The target name syntax is defined in +// https://github.com/grpc/grpc/blob/master/doc/naming.md. +// e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. +func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { + cc := &ClientConn{ + target: target, + csMgr: &connectivityStateManager{}, + conns: make(map[*addrConn]struct{}), + dopts: defaultDialOptions(), + blockingpicker: newPickerWrapper(), + czData: new(channelzData), + firstResolveEvent: grpcsync.NewEvent(), + } + cc.retryThrottler.Store((*retryThrottler)(nil)) + cc.ctx, cc.cancel = context.WithCancel(context.Background()) + + for _, opt := range opts { + opt.apply(&cc.dopts) + } + + chainUnaryClientInterceptors(cc) + chainStreamClientInterceptors(cc) + + defer func() { + if err != nil { + cc.Close() + } + }() + + if channelz.IsOn() { + if cc.dopts.channelzParentID != 0 { + cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) + channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ + Desc: "Channel Created", + Severity: channelz.CtINFO, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID), + Severity: channelz.CtINFO, + }, + }) + } else { + cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target) + channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ + Desc: "Channel Created", + Severity: channelz.CtINFO, + }) + } + cc.csMgr.channelzID = cc.channelzID + } + + if !cc.dopts.insecure { + if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { + return nil, errNoTransportSecurity + } + if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { + return nil, errTransportCredsAndBundle + } + } else { + if cc.dopts.copts.TransportCredentials != nil || cc.dopts.copts.CredsBundle != nil { + return nil, errCredentialsConflict + } + for _, cd := range cc.dopts.copts.PerRPCCredentials { + if cd.RequireTransportSecurity() { + return nil, errTransportCredentialsMissing + } + } + } + + if cc.dopts.defaultServiceConfigRawJSON != nil { + sc, err := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON) + if err != nil { + return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, err) + } + cc.dopts.defaultServiceConfig = sc + } + cc.mkp = cc.dopts.copts.KeepaliveParams + + if cc.dopts.copts.Dialer == nil { + cc.dopts.copts.Dialer = newProxyDialer( + func(ctx context.Context, addr string) (net.Conn, error) { + network, addr := parseDialTarget(addr) + return (&net.Dialer{}).DialContext(ctx, network, addr) + }, + ) + } + + if cc.dopts.copts.UserAgent != "" { + cc.dopts.copts.UserAgent += " " + grpcUA + } else { + cc.dopts.copts.UserAgent = grpcUA + } + + if cc.dopts.timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, cc.dopts.timeout) + defer cancel() + } + defer func() { + select { + case <-ctx.Done(): + conn, err = nil, ctx.Err() + default: + } + }() + + scSet := false + if cc.dopts.scChan != nil { + // Try to get an initial service config. + select { + case sc, ok := <-cc.dopts.scChan: + if ok { + cc.sc = &sc + scSet = true + } + default: + } + } + if cc.dopts.bs == nil { + cc.dopts.bs = backoff.Exponential{ + MaxDelay: DefaultBackoffConfig.MaxDelay, + } + } + if cc.dopts.resolverBuilder == nil { + // Only try to parse target when resolver builder is not already set. + cc.parsedTarget = parseTarget(cc.target) + grpclog.Infof("parsed scheme: %q", cc.parsedTarget.Scheme) + cc.dopts.resolverBuilder = resolver.Get(cc.parsedTarget.Scheme) + if cc.dopts.resolverBuilder == nil { + // If resolver builder is still nil, the parsed target's scheme is + // not registered. Fallback to default resolver and set Endpoint to + // the original target. + grpclog.Infof("scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme) + cc.parsedTarget = resolver.Target{ + Scheme: resolver.GetDefaultScheme(), + Endpoint: target, + } + cc.dopts.resolverBuilder = resolver.Get(cc.parsedTarget.Scheme) + } + } else { + cc.parsedTarget = resolver.Target{Endpoint: target} + } + creds := cc.dopts.copts.TransportCredentials + if creds != nil && creds.Info().ServerName != "" { + cc.authority = creds.Info().ServerName + } else if cc.dopts.insecure && cc.dopts.authority != "" { + cc.authority = cc.dopts.authority + } else { + // Use endpoint from "scheme://authority/endpoint" as the default + // authority for ClientConn. + cc.authority = cc.parsedTarget.Endpoint + } + + if cc.dopts.scChan != nil && !scSet { + // Blocking wait for the initial service config. + select { + case sc, ok := <-cc.dopts.scChan: + if ok { + cc.sc = &sc + } + case <-ctx.Done(): + return nil, ctx.Err() + } + } + if cc.dopts.scChan != nil { + go cc.scWatcher() + } + + var credsClone credentials.TransportCredentials + if creds := cc.dopts.copts.TransportCredentials; creds != nil { + credsClone = creds.Clone() + } + cc.balancerBuildOpts = balancer.BuildOptions{ + DialCreds: credsClone, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + ChannelzParentID: cc.channelzID, + Target: cc.parsedTarget, + } + + // Build the resolver. + rWrapper, err := newCCResolverWrapper(cc) + if err != nil { + return nil, fmt.Errorf("failed to build resolver: %v", err) + } + + cc.mu.Lock() + cc.resolverWrapper = rWrapper + cc.mu.Unlock() + // A blocking dial blocks until the clientConn is ready. + if cc.dopts.block { + for { + s := cc.GetState() + if s == connectivity.Ready { + break + } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure { + if err = cc.blockingpicker.connectionError(); err != nil { + terr, ok := err.(interface { + Temporary() bool + }) + if ok && !terr.Temporary() { + return nil, err + } + } + } + if !cc.WaitForStateChange(ctx, s) { + // ctx got timeout or canceled. + return nil, ctx.Err() + } + } + } + + return cc, nil +} + +// chainUnaryClientInterceptors chains all unary client interceptors into one. +func chainUnaryClientInterceptors(cc *ClientConn) { + interceptors := cc.dopts.chainUnaryInts + // Prepend dopts.unaryInt to the chaining interceptors if it exists, since unaryInt will + // be executed before any other chained interceptors. + if cc.dopts.unaryInt != nil { + interceptors = append([]UnaryClientInterceptor{cc.dopts.unaryInt}, interceptors...) + } + var chainedInt UnaryClientInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { + return interceptors[0](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, 0, invoker), opts...) + } + } + cc.dopts.unaryInt = chainedInt +} + +// getChainUnaryInvoker recursively generate the chained unary invoker. +func getChainUnaryInvoker(interceptors []UnaryClientInterceptor, curr int, finalInvoker UnaryInvoker) UnaryInvoker { + if curr == len(interceptors)-1 { + return finalInvoker + } + return func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { + return interceptors[curr+1](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, curr+1, finalInvoker), opts...) + } +} + +// chainStreamClientInterceptors chains all stream client interceptors into one. +func chainStreamClientInterceptors(cc *ClientConn) { + interceptors := cc.dopts.chainStreamInts + // Prepend dopts.streamInt to the chaining interceptors if it exists, since streamInt will + // be executed before any other chained interceptors. + if cc.dopts.streamInt != nil { + interceptors = append([]StreamClientInterceptor{cc.dopts.streamInt}, interceptors...) + } + var chainedInt StreamClientInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) { + return interceptors[0](ctx, desc, cc, method, getChainStreamer(interceptors, 0, streamer), opts...) + } + } + cc.dopts.streamInt = chainedInt +} + +// getChainStreamer recursively generate the chained client stream constructor. +func getChainStreamer(interceptors []StreamClientInterceptor, curr int, finalStreamer Streamer) Streamer { + if curr == len(interceptors)-1 { + return finalStreamer + } + return func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { + return interceptors[curr+1](ctx, desc, cc, method, getChainStreamer(interceptors, curr+1, finalStreamer), opts...) + } +} + +// connectivityStateManager keeps the connectivity.State of ClientConn. +// This struct will eventually be exported so the balancers can access it. +type connectivityStateManager struct { + mu sync.Mutex + state connectivity.State + notifyChan chan struct{} + channelzID int64 +} + +// updateState updates the connectivity.State of ClientConn. +// If there's a change it notifies goroutines waiting on state change to +// happen. +func (csm *connectivityStateManager) updateState(state connectivity.State) { + csm.mu.Lock() + defer csm.mu.Unlock() + if csm.state == connectivity.Shutdown { + return + } + if csm.state == state { + return + } + csm.state = state + if channelz.IsOn() { + channelz.AddTraceEvent(csm.channelzID, &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Channel Connectivity change to %v", state), + Severity: channelz.CtINFO, + }) + } + if csm.notifyChan != nil { + // There are other goroutines waiting on this channel. + close(csm.notifyChan) + csm.notifyChan = nil + } +} + +func (csm *connectivityStateManager) getState() connectivity.State { + csm.mu.Lock() + defer csm.mu.Unlock() + return csm.state +} + +func (csm *connectivityStateManager) getNotifyChan() <-chan struct{} { + csm.mu.Lock() + defer csm.mu.Unlock() + if csm.notifyChan == nil { + csm.notifyChan = make(chan struct{}) + } + return csm.notifyChan +} + +// ClientConn represents a client connection to an RPC server. +type ClientConn struct { + ctx context.Context + cancel context.CancelFunc + + target string + parsedTarget resolver.Target + authority string + dopts dialOptions + csMgr *connectivityStateManager + + balancerBuildOpts balancer.BuildOptions + blockingpicker *pickerWrapper + + mu sync.RWMutex + resolverWrapper *ccResolverWrapper + sc *ServiceConfig + conns map[*addrConn]struct{} + // Keepalive parameter can be updated if a GoAway is received. + mkp keepalive.ClientParameters + curBalancerName string + balancerWrapper *ccBalancerWrapper + retryThrottler atomic.Value + + firstResolveEvent *grpcsync.Event + + channelzID int64 // channelz unique identification number + czData *channelzData +} + +// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or +// ctx expires. A true value is returned in former case and false in latter. +// This is an EXPERIMENTAL API. +func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool { + ch := cc.csMgr.getNotifyChan() + if cc.csMgr.getState() != sourceState { + return true + } + select { + case <-ctx.Done(): + return false + case <-ch: + return true + } +} + +// GetState returns the connectivity.State of ClientConn. +// This is an EXPERIMENTAL API. +func (cc *ClientConn) GetState() connectivity.State { + return cc.csMgr.getState() +} + +func (cc *ClientConn) scWatcher() { + for { + select { + case sc, ok := <-cc.dopts.scChan: + if !ok { + return + } + cc.mu.Lock() + // TODO: load balance policy runtime change is ignored. + // We may revisit this decision in the future. + cc.sc = &sc + cc.mu.Unlock() + case <-cc.ctx.Done(): + return + } + } +} + +// waitForResolvedAddrs blocks until the resolver has provided addresses or the +// context expires. Returns nil unless the context expires first; otherwise +// returns a status error based on the context. +func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error { + // This is on the RPC path, so we use a fast path to avoid the + // more-expensive "select" below after the resolver has returned once. + if cc.firstResolveEvent.HasFired() { + return nil + } + select { + case <-cc.firstResolveEvent.Done(): + return nil + case <-ctx.Done(): + return status.FromContextError(ctx.Err()).Err() + case <-cc.ctx.Done(): + return ErrClientConnClosing + } +} + +func (cc *ClientConn) updateResolverState(s resolver.State) error { + cc.mu.Lock() + defer cc.mu.Unlock() + // Check if the ClientConn is already closed. Some fields (e.g. + // balancerWrapper) are set to nil when closing the ClientConn, and could + // cause nil pointer panic if we don't have this check. + if cc.conns == nil { + return nil + } + + if cc.dopts.disableServiceConfig || s.ServiceConfig == nil { + if cc.dopts.defaultServiceConfig != nil && cc.sc == nil { + cc.applyServiceConfig(cc.dopts.defaultServiceConfig) + } + } else if sc, ok := s.ServiceConfig.(*ServiceConfig); ok { + cc.applyServiceConfig(sc) + } + + var balCfg serviceconfig.LoadBalancingConfig + if cc.dopts.balancerBuilder == nil { + // Only look at balancer types and switch balancer if balancer dial + // option is not set. + var newBalancerName string + if cc.sc != nil && cc.sc.lbConfig != nil { + newBalancerName = cc.sc.lbConfig.name + balCfg = cc.sc.lbConfig.cfg + } else { + var isGRPCLB bool + for _, a := range s.Addresses { + if a.Type == resolver.GRPCLB { + isGRPCLB = true + break + } + } + if isGRPCLB { + newBalancerName = grpclbName + } else if cc.sc != nil && cc.sc.LB != nil { + newBalancerName = *cc.sc.LB + } else { + newBalancerName = PickFirstBalancerName + } + } + cc.switchBalancer(newBalancerName) + } else if cc.balancerWrapper == nil { + // Balancer dial option was set, and this is the first time handling + // resolved addresses. Build a balancer with dopts.balancerBuilder. + cc.curBalancerName = cc.dopts.balancerBuilder.Name() + cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts) + } + + cc.balancerWrapper.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg}) + return nil +} + +// switchBalancer starts the switching from current balancer to the balancer +// with the given name. +// +// It will NOT send the current address list to the new balancer. If needed, +// caller of this function should send address list to the new balancer after +// this function returns. +// +// Caller must hold cc.mu. +func (cc *ClientConn) switchBalancer(name string) { + if strings.EqualFold(cc.curBalancerName, name) { + return + } + + grpclog.Infof("ClientConn switching balancer to %q", name) + if cc.dopts.balancerBuilder != nil { + grpclog.Infoln("ignoring balancer switching: Balancer DialOption used instead") + return + } + if cc.balancerWrapper != nil { + cc.balancerWrapper.close() + } + + builder := balancer.Get(name) + if channelz.IsOn() { + if builder == nil { + channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName), + Severity: channelz.CtWarning, + }) + } else { + channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Channel switches to new LB policy %q", name), + Severity: channelz.CtINFO, + }) + } + } + if builder == nil { + grpclog.Infof("failed to get balancer builder for: %v, using pick_first instead", name) + builder = newPickfirstBuilder() + } + + cc.curBalancerName = builder.Name() + cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts) +} + +func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return + } + // TODO(bar switching) send updates to all balancer wrappers when balancer + // gracefully switching is supported. + cc.balancerWrapper.handleSubConnStateChange(sc, s) + cc.mu.Unlock() +} + +// newAddrConn creates an addrConn for addrs and adds it to cc.conns. +// +// Caller needs to make sure len(addrs) > 0. +func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) { + ac := &addrConn{ + cc: cc, + addrs: addrs, + scopts: opts, + dopts: cc.dopts, + czData: new(channelzData), + resetBackoff: make(chan struct{}), + } + ac.ctx, ac.cancel = context.WithCancel(cc.ctx) + // Track ac in cc. This needs to be done before any getTransport(...) is called. + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return nil, ErrClientConnClosing + } + if channelz.IsOn() { + ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "") + channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ + Desc: "Subchannel Created", + Severity: channelz.CtINFO, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID), + Severity: channelz.CtINFO, + }, + }) + } + cc.conns[ac] = struct{}{} + cc.mu.Unlock() + return ac, nil +} + +// removeAddrConn removes the addrConn in the subConn from clientConn. +// It also tears down the ac with the given error. +func (cc *ClientConn) removeAddrConn(ac *addrConn, err error) { + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return + } + delete(cc.conns, ac) + cc.mu.Unlock() + ac.tearDown(err) +} + +func (cc *ClientConn) channelzMetric() *channelz.ChannelInternalMetric { + return &channelz.ChannelInternalMetric{ + State: cc.GetState(), + Target: cc.target, + CallsStarted: atomic.LoadInt64(&cc.czData.callsStarted), + CallsSucceeded: atomic.LoadInt64(&cc.czData.callsSucceeded), + CallsFailed: atomic.LoadInt64(&cc.czData.callsFailed), + LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&cc.czData.lastCallStartedTime)), + } +} + +// Target returns the target string of the ClientConn. +// This is an EXPERIMENTAL API. +func (cc *ClientConn) Target() string { + return cc.target +} + +func (cc *ClientConn) incrCallsStarted() { + atomic.AddInt64(&cc.czData.callsStarted, 1) + atomic.StoreInt64(&cc.czData.lastCallStartedTime, time.Now().UnixNano()) +} + +func (cc *ClientConn) incrCallsSucceeded() { + atomic.AddInt64(&cc.czData.callsSucceeded, 1) +} + +func (cc *ClientConn) incrCallsFailed() { + atomic.AddInt64(&cc.czData.callsFailed, 1) +} + +// connect starts creating a transport. +// It does nothing if the ac is not IDLE. +// TODO(bar) Move this to the addrConn section. +func (ac *addrConn) connect() error { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return errConnClosing + } + if ac.state != connectivity.Idle { + ac.mu.Unlock() + return nil + } + // Update connectivity state within the lock to prevent subsequent or + // concurrent calls from resetting the transport more than once. + ac.updateConnectivityState(connectivity.Connecting) + ac.mu.Unlock() + + // Start a goroutine connecting to the server asynchronously. + go ac.resetTransport() + return nil +} + +// tryUpdateAddrs tries to update ac.addrs with the new addresses list. +// +// If ac is Connecting, it returns false. The caller should tear down the ac and +// create a new one. Note that the backoff will be reset when this happens. +// +// If ac is TransientFailure, it updates ac.addrs and returns true. The updated +// addresses will be picked up by retry in the next iteration after backoff. +// +// If ac is Shutdown or Idle, it updates ac.addrs and returns true. +// +// If ac is Ready, it checks whether current connected address of ac is in the +// new addrs list. +// - If true, it updates ac.addrs and returns true. The ac will keep using +// the existing connection. +// - If false, it does nothing and returns false. +func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { + ac.mu.Lock() + defer ac.mu.Unlock() + grpclog.Infof("addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) + if ac.state == connectivity.Shutdown || + ac.state == connectivity.TransientFailure || + ac.state == connectivity.Idle { + ac.addrs = addrs + return true + } + + if ac.state == connectivity.Connecting { + return false + } + + // ac.state is Ready, try to find the connected address. + var curAddrFound bool + for _, a := range addrs { + if reflect.DeepEqual(ac.curAddr, a) { + curAddrFound = true + break + } + } + grpclog.Infof("addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound) + if curAddrFound { + ac.addrs = addrs + } + + return curAddrFound +} + +// GetMethodConfig gets the method config of the input method. +// If there's an exact match for input method (i.e. /service/method), we return +// the corresponding MethodConfig. +// If there isn't an exact match for the input method, we look for the default config +// under the service (i.e /service/). If there is a default MethodConfig for +// the service, we return it. +// Otherwise, we return an empty MethodConfig. +func (cc *ClientConn) GetMethodConfig(method string) MethodConfig { + // TODO: Avoid the locking here. + cc.mu.RLock() + defer cc.mu.RUnlock() + if cc.sc == nil { + return MethodConfig{} + } + m, ok := cc.sc.Methods[method] + if !ok { + i := strings.LastIndex(method, "/") + m = cc.sc.Methods[method[:i+1]] + } + return m +} + +func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { + cc.mu.RLock() + defer cc.mu.RUnlock() + if cc.sc == nil { + return nil + } + return cc.sc.healthCheckConfig +} + +func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) { + t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickOptions{ + FullMethodName: method, + }) + if err != nil { + return nil, nil, toRPCErr(err) + } + return t, done, nil +} + +func (cc *ClientConn) applyServiceConfig(sc *ServiceConfig) error { + if sc == nil { + // should never reach here. + return fmt.Errorf("got nil pointer for service config") + } + cc.sc = sc + + if cc.sc.retryThrottling != nil { + newThrottler := &retryThrottler{ + tokens: cc.sc.retryThrottling.MaxTokens, + max: cc.sc.retryThrottling.MaxTokens, + thresh: cc.sc.retryThrottling.MaxTokens / 2, + ratio: cc.sc.retryThrottling.TokenRatio, + } + cc.retryThrottler.Store(newThrottler) + } else { + cc.retryThrottler.Store((*retryThrottler)(nil)) + } + + return nil +} + +func (cc *ClientConn) resolveNow(o resolver.ResolveNowOption) { + cc.mu.RLock() + r := cc.resolverWrapper + cc.mu.RUnlock() + if r == nil { + return + } + go r.resolveNow(o) +} + +// ResetConnectBackoff wakes up all subchannels in transient failure and causes +// them to attempt another connection immediately. It also resets the backoff +// times used for subsequent attempts regardless of the current state. +// +// In general, this function should not be used. Typical service or network +// outages result in a reasonable client reconnection strategy by default. +// However, if a previously unavailable network becomes available, this may be +// used to trigger an immediate reconnect. +// +// This API is EXPERIMENTAL. +func (cc *ClientConn) ResetConnectBackoff() { + cc.mu.Lock() + defer cc.mu.Unlock() + for ac := range cc.conns { + ac.resetConnectBackoff() + } +} + +// Close tears down the ClientConn and all underlying connections. +func (cc *ClientConn) Close() error { + defer cc.cancel() + + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return ErrClientConnClosing + } + conns := cc.conns + cc.conns = nil + cc.csMgr.updateState(connectivity.Shutdown) + + rWrapper := cc.resolverWrapper + cc.resolverWrapper = nil + bWrapper := cc.balancerWrapper + cc.balancerWrapper = nil + cc.mu.Unlock() + + cc.blockingpicker.close() + + if rWrapper != nil { + rWrapper.close() + } + if bWrapper != nil { + bWrapper.close() + } + + for ac := range conns { + ac.tearDown(ErrClientConnClosing) + } + if channelz.IsOn() { + ted := &channelz.TraceEventDesc{ + Desc: "Channel Deleted", + Severity: channelz.CtINFO, + } + if cc.dopts.channelzParentID != 0 { + ted.Parent = &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID), + Severity: channelz.CtINFO, + } + } + channelz.AddTraceEvent(cc.channelzID, ted) + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to + // the entity being deleted, and thus prevent it from being deleted right away. + channelz.RemoveEntry(cc.channelzID) + } + return nil +} + +// addrConn is a network connection to a given address. +type addrConn struct { + ctx context.Context + cancel context.CancelFunc + + cc *ClientConn + dopts dialOptions + acbw balancer.SubConn + scopts balancer.NewSubConnOptions + + // transport is set when there's a viable transport (note: ac state may not be READY as LB channel + // health checking may require server to report healthy to set ac to READY), and is reset + // to nil when the current transport should no longer be used to create a stream (e.g. after GoAway + // is received, transport is closed, ac has been torn down). + transport transport.ClientTransport // The current transport. + + mu sync.Mutex + curAddr resolver.Address // The current address. + addrs []resolver.Address // All addresses that the resolver resolved to. + + // Use updateConnectivityState for updating addrConn's connectivity state. + state connectivity.State + + backoffIdx int // Needs to be stateful for resetConnectBackoff. + resetBackoff chan struct{} + + channelzID int64 // channelz unique identification number. + czData *channelzData +} + +// Note: this requires a lock on ac.mu. +func (ac *addrConn) updateConnectivityState(s connectivity.State) { + if ac.state == s { + return + } + + updateMsg := fmt.Sprintf("Subchannel Connectivity change to %v", s) + ac.state = s + if channelz.IsOn() { + channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ + Desc: updateMsg, + Severity: channelz.CtINFO, + }) + } + ac.cc.handleSubConnStateChange(ac.acbw, s) +} + +// adjustParams updates parameters used to create transports upon +// receiving a GoAway. +func (ac *addrConn) adjustParams(r transport.GoAwayReason) { + switch r { + case transport.GoAwayTooManyPings: + v := 2 * ac.dopts.copts.KeepaliveParams.Time + ac.cc.mu.Lock() + if v > ac.cc.mkp.Time { + ac.cc.mkp.Time = v + } + ac.cc.mu.Unlock() + } +} + +func (ac *addrConn) resetTransport() { + for i := 0; ; i++ { + if i > 0 { + ac.cc.resolveNow(resolver.ResolveNowOption{}) + } + + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return + } + + addrs := ac.addrs + backoffFor := ac.dopts.bs.Backoff(ac.backoffIdx) + // This will be the duration that dial gets to finish. + dialDuration := minConnectTimeout + if ac.dopts.minConnectTimeout != nil { + dialDuration = ac.dopts.minConnectTimeout() + } + + if dialDuration < backoffFor { + // Give dial more time as we keep failing to connect. + dialDuration = backoffFor + } + // We can potentially spend all the time trying the first address, and + // if the server accepts the connection and then hangs, the following + // addresses will never be tried. + // + // The spec doesn't mention what should be done for multiple addresses. + // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm + connectDeadline := time.Now().Add(dialDuration) + + ac.updateConnectivityState(connectivity.Connecting) + ac.transport = nil + ac.mu.Unlock() + + newTr, addr, reconnect, err := ac.tryAllAddrs(addrs, connectDeadline) + if err != nil { + // After exhausting all addresses, the addrConn enters + // TRANSIENT_FAILURE. + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return + } + ac.updateConnectivityState(connectivity.TransientFailure) + + // Backoff. + b := ac.resetBackoff + ac.mu.Unlock() + + timer := time.NewTimer(backoffFor) + select { + case <-timer.C: + ac.mu.Lock() + ac.backoffIdx++ + ac.mu.Unlock() + case <-b: + timer.Stop() + case <-ac.ctx.Done(): + timer.Stop() + return + } + continue + } + + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + newTr.Close() + return + } + ac.curAddr = addr + ac.transport = newTr + ac.backoffIdx = 0 + + hctx, hcancel := context.WithCancel(ac.ctx) + ac.startHealthCheck(hctx) + ac.mu.Unlock() + + // Block until the created transport is down. And when this happens, + // we restart from the top of the addr list. + <-reconnect.Done() + hcancel() + // restart connecting - the top of the loop will set state to + // CONNECTING. This is against the current connectivity semantics doc, + // however it allows for graceful behavior for RPCs not yet dispatched + // - unfortunate timing would otherwise lead to the RPC failing even + // though the TRANSIENT_FAILURE state (called for by the doc) would be + // instantaneous. + // + // Ideally we should transition to Idle here and block until there is + // RPC activity that leads to the balancer requesting a reconnect of + // the associated SubConn. + } +} + +// tryAllAddrs tries to creates a connection to the addresses, and stop when at the +// first successful one. It returns the transport, the address and a Event in +// the successful case. The Event fires when the returned transport disconnects. +func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) (transport.ClientTransport, resolver.Address, *grpcsync.Event, error) { + for _, addr := range addrs { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return nil, resolver.Address{}, nil, errConnClosing + } + + ac.cc.mu.RLock() + ac.dopts.copts.KeepaliveParams = ac.cc.mkp + ac.cc.mu.RUnlock() + + copts := ac.dopts.copts + if ac.scopts.CredsBundle != nil { + copts.CredsBundle = ac.scopts.CredsBundle + } + ac.mu.Unlock() + + if channelz.IsOn() { + channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchannel picks a new address %q to connect", addr.Addr), + Severity: channelz.CtINFO, + }) + } + + newTr, reconnect, err := ac.createTransport(addr, copts, connectDeadline) + if err == nil { + return newTr, addr, reconnect, nil + } + ac.cc.blockingpicker.updateConnectionError(err) + } + + // Couldn't connect to any address. + return nil, resolver.Address{}, nil, fmt.Errorf("couldn't connect to any address") +} + +// createTransport creates a connection to addr. It returns the transport and a +// Event in the successful case. The Event fires when the returned transport +// disconnects. +func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) (transport.ClientTransport, *grpcsync.Event, error) { + prefaceReceived := make(chan struct{}) + onCloseCalled := make(chan struct{}) + reconnect := grpcsync.NewEvent() + + target := transport.TargetInfo{ + Addr: addr.Addr, + Metadata: addr.Metadata, + Authority: ac.cc.authority, + } + + once := sync.Once{} + onGoAway := func(r transport.GoAwayReason) { + ac.mu.Lock() + ac.adjustParams(r) + once.Do(func() { + if ac.state == connectivity.Ready { + // Prevent this SubConn from being used for new RPCs by setting its + // state to Connecting. + // + // TODO: this should be Idle when grpc-go properly supports it. + ac.updateConnectivityState(connectivity.Connecting) + } + }) + ac.mu.Unlock() + reconnect.Fire() + } + + onClose := func() { + ac.mu.Lock() + once.Do(func() { + if ac.state == connectivity.Ready { + // Prevent this SubConn from being used for new RPCs by setting its + // state to Connecting. + // + // TODO: this should be Idle when grpc-go properly supports it. + ac.updateConnectivityState(connectivity.Connecting) + } + }) + ac.mu.Unlock() + close(onCloseCalled) + reconnect.Fire() + } + + onPrefaceReceipt := func() { + close(prefaceReceived) + } + + connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline) + defer cancel() + if channelz.IsOn() { + copts.ChannelzParentID = ac.channelzID + } + + newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, target, copts, onPrefaceReceipt, onGoAway, onClose) + if err != nil { + // newTr is either nil, or closed. + grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v. Err :%v. Reconnecting...", addr, err) + return nil, nil, err + } + + select { + case <-time.After(connectDeadline.Sub(time.Now())): + // We didn't get the preface in time. + newTr.Close() + grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr) + return nil, nil, errors.New("timed out waiting for server handshake") + case <-prefaceReceived: + // We got the preface - huzzah! things are good. + case <-onCloseCalled: + // The transport has already closed - noop. + return nil, nil, errors.New("connection closed") + // TODO(deklerk) this should bail on ac.ctx.Done(). Add a test and fix. + } + return newTr, reconnect, nil +} + +// startHealthCheck starts the health checking stream (RPC) to watch the health +// stats of this connection if health checking is requested and configured. +// +// LB channel health checking is enabled when all requirements below are met: +// 1. it is not disabled by the user with the WithDisableHealthCheck DialOption +// 2. internal.HealthCheckFunc is set by importing the grpc/healthcheck package +// 3. a service config with non-empty healthCheckConfig field is provided +// 4. the load balancer requests it +// +// It sets addrConn to READY if the health checking stream is not started. +// +// Caller must hold ac.mu. +func (ac *addrConn) startHealthCheck(ctx context.Context) { + var healthcheckManagingState bool + defer func() { + if !healthcheckManagingState { + ac.updateConnectivityState(connectivity.Ready) + } + }() + + if ac.cc.dopts.disableHealthCheck { + return + } + healthCheckConfig := ac.cc.healthCheckConfig() + if healthCheckConfig == nil { + return + } + if !ac.scopts.HealthCheckEnabled { + return + } + healthCheckFunc := ac.cc.dopts.healthCheckFunc + if healthCheckFunc == nil { + // The health package is not imported to set health check function. + // + // TODO: add a link to the health check doc in the error message. + grpclog.Error("Health check is requested but health check function is not set.") + return + } + + healthcheckManagingState = true + + // Set up the health check helper functions. + currentTr := ac.transport + newStream := func(method string) (interface{}, error) { + ac.mu.Lock() + if ac.transport != currentTr { + ac.mu.Unlock() + return nil, status.Error(codes.Canceled, "the provided transport is no longer valid to use") + } + ac.mu.Unlock() + return newNonRetryClientStream(ctx, &StreamDesc{ServerStreams: true}, method, currentTr, ac) + } + setConnectivityState := func(s connectivity.State) { + ac.mu.Lock() + defer ac.mu.Unlock() + if ac.transport != currentTr { + return + } + ac.updateConnectivityState(s) + } + // Start the health checking stream. + go func() { + err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName) + if err != nil { + if status.Code(err) == codes.Unimplemented { + if channelz.IsOn() { + channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ + Desc: "Subchannel health check is unimplemented at server side, thus health check is disabled", + Severity: channelz.CtError, + }) + } + grpclog.Error("Subchannel health check is unimplemented at server side, thus health check is disabled") + } else { + grpclog.Errorf("HealthCheckFunc exits with unexpected error %v", err) + } + } + }() +} + +func (ac *addrConn) resetConnectBackoff() { + ac.mu.Lock() + close(ac.resetBackoff) + ac.backoffIdx = 0 + ac.resetBackoff = make(chan struct{}) + ac.mu.Unlock() +} + +// getReadyTransport returns the transport if ac's state is READY. +// Otherwise it returns nil, false. +// If ac's state is IDLE, it will trigger ac to connect. +func (ac *addrConn) getReadyTransport() (transport.ClientTransport, bool) { + ac.mu.Lock() + if ac.state == connectivity.Ready && ac.transport != nil { + t := ac.transport + ac.mu.Unlock() + return t, true + } + var idle bool + if ac.state == connectivity.Idle { + idle = true + } + ac.mu.Unlock() + // Trigger idle ac to connect. + if idle { + ac.connect() + } + return nil, false +} + +// tearDown starts to tear down the addrConn. +// TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in +// some edge cases (e.g., the caller opens and closes many addrConn's in a +// tight loop. +// tearDown doesn't remove ac from ac.cc.conns. +func (ac *addrConn) tearDown(err error) { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return + } + curTr := ac.transport + ac.transport = nil + // We have to set the state to Shutdown before anything else to prevent races + // between setting the state and logic that waits on context cancelation / etc. + ac.updateConnectivityState(connectivity.Shutdown) + ac.cancel() + ac.curAddr = resolver.Address{} + if err == errConnDrain && curTr != nil { + // GracefulClose(...) may be executed multiple times when + // i) receiving multiple GoAway frames from the server; or + // ii) there are concurrent name resolver/Balancer triggered + // address removal and GoAway. + // We have to unlock and re-lock here because GracefulClose => Close => onClose, which requires locking ac.mu. + ac.mu.Unlock() + curTr.GracefulClose() + ac.mu.Lock() + } + if channelz.IsOn() { + channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ + Desc: "Subchannel Deleted", + Severity: channelz.CtINFO, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchanel(id:%d) deleted", ac.channelzID), + Severity: channelz.CtINFO, + }, + }) + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to + // the entity beng deleted, and thus prevent it from being deleted right away. + channelz.RemoveEntry(ac.channelzID) + } + ac.mu.Unlock() +} + +func (ac *addrConn) getState() connectivity.State { + ac.mu.Lock() + defer ac.mu.Unlock() + return ac.state +} + +func (ac *addrConn) ChannelzMetric() *channelz.ChannelInternalMetric { + ac.mu.Lock() + addr := ac.curAddr.Addr + ac.mu.Unlock() + return &channelz.ChannelInternalMetric{ + State: ac.getState(), + Target: addr, + CallsStarted: atomic.LoadInt64(&ac.czData.callsStarted), + CallsSucceeded: atomic.LoadInt64(&ac.czData.callsSucceeded), + CallsFailed: atomic.LoadInt64(&ac.czData.callsFailed), + LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&ac.czData.lastCallStartedTime)), + } +} + +func (ac *addrConn) incrCallsStarted() { + atomic.AddInt64(&ac.czData.callsStarted, 1) + atomic.StoreInt64(&ac.czData.lastCallStartedTime, time.Now().UnixNano()) +} + +func (ac *addrConn) incrCallsSucceeded() { + atomic.AddInt64(&ac.czData.callsSucceeded, 1) +} + +func (ac *addrConn) incrCallsFailed() { + atomic.AddInt64(&ac.czData.callsFailed, 1) +} + +type retryThrottler struct { + max float64 + thresh float64 + ratio float64 + + mu sync.Mutex + tokens float64 // TODO(dfawley): replace with atomic and remove lock. +} + +// throttle subtracts a retry token from the pool and returns whether a retry +// should be throttled (disallowed) based upon the retry throttling policy in +// the service config. +func (rt *retryThrottler) throttle() bool { + if rt == nil { + return false + } + rt.mu.Lock() + defer rt.mu.Unlock() + rt.tokens-- + if rt.tokens < 0 { + rt.tokens = 0 + } + return rt.tokens <= rt.thresh +} + +func (rt *retryThrottler) successfulRPC() { + if rt == nil { + return + } + rt.mu.Lock() + defer rt.mu.Unlock() + rt.tokens += rt.ratio + if rt.tokens > rt.max { + rt.tokens = rt.max + } +} + +type channelzChannel struct { + cc *ClientConn +} + +func (c *channelzChannel) ChannelzMetric() *channelz.ChannelInternalMetric { + return c.cc.channelzMetric() +} + +// ErrClientConnTimeout indicates that the ClientConn cannot establish the +// underlying connections within the specified timeout. +// +// Deprecated: This error is never returned by grpc and should not be +// referenced by users. +var ErrClientConnTimeout = errors.New("grpc: timed out when dialing") diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go new file mode 100644 index 0000000000000..129776547811b --- /dev/null +++ b/vendor/google.golang.org/grpc/codec.go @@ -0,0 +1,50 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "google.golang.org/grpc/encoding" + _ "google.golang.org/grpc/encoding/proto" // to register the Codec for "proto" +) + +// baseCodec contains the functionality of both Codec and encoding.Codec, but +// omits the name/string, which vary between the two and are not needed for +// anything besides the registry in the encoding package. +type baseCodec interface { + Marshal(v interface{}) ([]byte, error) + Unmarshal(data []byte, v interface{}) error +} + +var _ baseCodec = Codec(nil) +var _ baseCodec = encoding.Codec(nil) + +// Codec defines the interface gRPC uses to encode and decode messages. +// Note that implementations of this interface must be thread safe; +// a Codec's methods can be called from concurrent goroutines. +// +// Deprecated: use encoding.Codec instead. +type Codec interface { + // Marshal returns the wire format of v. + Marshal(v interface{}) ([]byte, error) + // Unmarshal parses the wire format into v. + Unmarshal(data []byte, v interface{}) error + // String returns the name of the Codec implementation. This is unused by + // gRPC. + String() string +} diff --git a/vendor/google.golang.org/grpc/codegen.sh b/vendor/google.golang.org/grpc/codegen.sh new file mode 100644 index 0000000000000..4cdc6ba7c0909 --- /dev/null +++ b/vendor/google.golang.org/grpc/codegen.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +# This script serves as an example to demonstrate how to generate the gRPC-Go +# interface and the related messages from .proto file. +# +# It assumes the installation of i) Google proto buffer compiler at +# https://github.com/google/protobuf (after v2.6.1) and ii) the Go codegen +# plugin at https://github.com/golang/protobuf (after 2015-02-20). If you have +# not, please install them first. +# +# We recommend running this script at $GOPATH/src. +# +# If this is not what you need, feel free to make your own scripts. Again, this +# script is for demonstration purpose. +# +proto=$1 +protoc --go_out=plugins=grpc:. $proto diff --git a/vendor/google.golang.org/grpc/codes/BUILD.bazel b/vendor/google.golang.org/grpc/codes/BUILD.bazel new file mode 100644 index 0000000000000..f0fa0550d5ccc --- /dev/null +++ b/vendor/google.golang.org/grpc/codes/BUILD.bazel @@ -0,0 +1,12 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "code_string.go", + "codes.go", + ], + importmap = "k8s.io/kops/vendor/google.golang.org/grpc/codes", + importpath = "google.golang.org/grpc/codes", + visibility = ["//visibility:public"], +) diff --git a/vendor/google.golang.org/grpc/codes/code_string.go b/vendor/google.golang.org/grpc/codes/code_string.go new file mode 100644 index 0000000000000..0b206a57822af --- /dev/null +++ b/vendor/google.golang.org/grpc/codes/code_string.go @@ -0,0 +1,62 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package codes + +import "strconv" + +func (c Code) String() string { + switch c { + case OK: + return "OK" + case Canceled: + return "Canceled" + case Unknown: + return "Unknown" + case InvalidArgument: + return "InvalidArgument" + case DeadlineExceeded: + return "DeadlineExceeded" + case NotFound: + return "NotFound" + case AlreadyExists: + return "AlreadyExists" + case PermissionDenied: + return "PermissionDenied" + case ResourceExhausted: + return "ResourceExhausted" + case FailedPrecondition: + return "FailedPrecondition" + case Aborted: + return "Aborted" + case OutOfRange: + return "OutOfRange" + case Unimplemented: + return "Unimplemented" + case Internal: + return "Internal" + case Unavailable: + return "Unavailable" + case DataLoss: + return "DataLoss" + case Unauthenticated: + return "Unauthenticated" + default: + return "Code(" + strconv.FormatInt(int64(c), 10) + ")" + } +} diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go new file mode 100644 index 0000000000000..02738839dd98f --- /dev/null +++ b/vendor/google.golang.org/grpc/codes/codes.go @@ -0,0 +1,198 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package codes defines the canonical error codes used by gRPC. It is +// consistent across various languages. +package codes // import "google.golang.org/grpc/codes" + +import ( + "fmt" + "strconv" +) + +// A Code is an unsigned 32-bit error code as defined in the gRPC spec. +type Code uint32 + +const ( + // OK is returned on success. + OK Code = 0 + + // Canceled indicates the operation was canceled (typically by the caller). + Canceled Code = 1 + + // Unknown error. An example of where this error may be returned is + // if a Status value received from another address space belongs to + // an error-space that is not known in this address space. Also + // errors raised by APIs that do not return enough error information + // may be converted to this error. + Unknown Code = 2 + + // InvalidArgument indicates client specified an invalid argument. + // Note that this differs from FailedPrecondition. It indicates arguments + // that are problematic regardless of the state of the system + // (e.g., a malformed file name). + InvalidArgument Code = 3 + + // DeadlineExceeded means operation expired before completion. + // For operations that change the state of the system, this error may be + // returned even if the operation has completed successfully. For + // example, a successful response from a server could have been delayed + // long enough for the deadline to expire. + DeadlineExceeded Code = 4 + + // NotFound means some requested entity (e.g., file or directory) was + // not found. + NotFound Code = 5 + + // AlreadyExists means an attempt to create an entity failed because one + // already exists. + AlreadyExists Code = 6 + + // PermissionDenied indicates the caller does not have permission to + // execute the specified operation. It must not be used for rejections + // caused by exhausting some resource (use ResourceExhausted + // instead for those errors). It must not be + // used if the caller cannot be identified (use Unauthenticated + // instead for those errors). + PermissionDenied Code = 7 + + // ResourceExhausted indicates some resource has been exhausted, perhaps + // a per-user quota, or perhaps the entire file system is out of space. + ResourceExhausted Code = 8 + + // FailedPrecondition indicates operation was rejected because the + // system is not in a state required for the operation's execution. + // For example, directory to be deleted may be non-empty, an rmdir + // operation is applied to a non-directory, etc. + // + // A litmus test that may help a service implementor in deciding + // between FailedPrecondition, Aborted, and Unavailable: + // (a) Use Unavailable if the client can retry just the failing call. + // (b) Use Aborted if the client should retry at a higher-level + // (e.g., restarting a read-modify-write sequence). + // (c) Use FailedPrecondition if the client should not retry until + // the system state has been explicitly fixed. E.g., if an "rmdir" + // fails because the directory is non-empty, FailedPrecondition + // should be returned since the client should not retry unless + // they have first fixed up the directory by deleting files from it. + // (d) Use FailedPrecondition if the client performs conditional + // REST Get/Update/Delete on a resource and the resource on the + // server does not match the condition. E.g., conflicting + // read-modify-write on the same resource. + FailedPrecondition Code = 9 + + // Aborted indicates the operation was aborted, typically due to a + // concurrency issue like sequencer check failures, transaction aborts, + // etc. + // + // See litmus test above for deciding between FailedPrecondition, + // Aborted, and Unavailable. + Aborted Code = 10 + + // OutOfRange means operation was attempted past the valid range. + // E.g., seeking or reading past end of file. + // + // Unlike InvalidArgument, this error indicates a problem that may + // be fixed if the system state changes. For example, a 32-bit file + // system will generate InvalidArgument if asked to read at an + // offset that is not in the range [0,2^32-1], but it will generate + // OutOfRange if asked to read from an offset past the current + // file size. + // + // There is a fair bit of overlap between FailedPrecondition and + // OutOfRange. We recommend using OutOfRange (the more specific + // error) when it applies so that callers who are iterating through + // a space can easily look for an OutOfRange error to detect when + // they are done. + OutOfRange Code = 11 + + // Unimplemented indicates operation is not implemented or not + // supported/enabled in this service. + Unimplemented Code = 12 + + // Internal errors. Means some invariants expected by underlying + // system has been broken. If you see one of these errors, + // something is very broken. + Internal Code = 13 + + // Unavailable indicates the service is currently unavailable. + // This is a most likely a transient condition and may be corrected + // by retrying with a backoff. Note that it is not always safe to retry + // non-idempotent operations. + // + // See litmus test above for deciding between FailedPrecondition, + // Aborted, and Unavailable. + Unavailable Code = 14 + + // DataLoss indicates unrecoverable data loss or corruption. + DataLoss Code = 15 + + // Unauthenticated indicates the request does not have valid + // authentication credentials for the operation. + Unauthenticated Code = 16 + + _maxCode = 17 +) + +var strToCode = map[string]Code{ + `"OK"`: OK, + `"CANCELLED"`:/* [sic] */ Canceled, + `"UNKNOWN"`: Unknown, + `"INVALID_ARGUMENT"`: InvalidArgument, + `"DEADLINE_EXCEEDED"`: DeadlineExceeded, + `"NOT_FOUND"`: NotFound, + `"ALREADY_EXISTS"`: AlreadyExists, + `"PERMISSION_DENIED"`: PermissionDenied, + `"RESOURCE_EXHAUSTED"`: ResourceExhausted, + `"FAILED_PRECONDITION"`: FailedPrecondition, + `"ABORTED"`: Aborted, + `"OUT_OF_RANGE"`: OutOfRange, + `"UNIMPLEMENTED"`: Unimplemented, + `"INTERNAL"`: Internal, + `"UNAVAILABLE"`: Unavailable, + `"DATA_LOSS"`: DataLoss, + `"UNAUTHENTICATED"`: Unauthenticated, +} + +// UnmarshalJSON unmarshals b into the Code. +func (c *Code) UnmarshalJSON(b []byte) error { + // From json.Unmarshaler: By convention, to approximate the behavior of + // Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as + // a no-op. + if string(b) == "null" { + return nil + } + if c == nil { + return fmt.Errorf("nil receiver passed to UnmarshalJSON") + } + + if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil { + if ci >= _maxCode { + return fmt.Errorf("invalid code: %q", ci) + } + + *c = Code(ci) + return nil + } + + if jc, ok := strToCode[string(b)]; ok { + *c = jc + return nil + } + return fmt.Errorf("invalid code: %q", string(b)) +} diff --git a/vendor/google.golang.org/grpc/connectivity/BUILD.bazel b/vendor/google.golang.org/grpc/connectivity/BUILD.bazel new file mode 100644 index 0000000000000..0648ee209ef4a --- /dev/null +++ b/vendor/google.golang.org/grpc/connectivity/BUILD.bazel @@ -0,0 +1,10 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["connectivity.go"], + importmap = "k8s.io/kops/vendor/google.golang.org/grpc/connectivity", + importpath = "google.golang.org/grpc/connectivity", + visibility = ["//visibility:public"], + deps = ["//vendor/google.golang.org/grpc/grpclog:go_default_library"], +) diff --git a/vendor/google.golang.org/grpc/connectivity/connectivity.go b/vendor/google.golang.org/grpc/connectivity/connectivity.go new file mode 100644 index 0000000000000..34ec36fbf6d9c --- /dev/null +++ b/vendor/google.golang.org/grpc/connectivity/connectivity.go @@ -0,0 +1,73 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package connectivity defines connectivity semantics. +// For details, see https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md. +// All APIs in this package are experimental. +package connectivity + +import ( + "context" + + "google.golang.org/grpc/grpclog" +) + +// State indicates the state of connectivity. +// It can be the state of a ClientConn or SubConn. +type State int + +func (s State) String() string { + switch s { + case Idle: + return "IDLE" + case Connecting: + return "CONNECTING" + case Ready: + return "READY" + case TransientFailure: + return "TRANSIENT_FAILURE" + case Shutdown: + return "SHUTDOWN" + default: + grpclog.Errorf("unknown connectivity state: %d", s) + return "Invalid-State" + } +} + +const ( + // Idle indicates the ClientConn is idle. + Idle State = iota + // Connecting indicates the ClientConn is connecting. + Connecting + // Ready indicates the ClientConn is ready for work. + Ready + // TransientFailure indicates the ClientConn has seen a failure but expects to recover. + TransientFailure + // Shutdown indicates the ClientConn has started shutting down. + Shutdown +) + +// Reporter reports the connectivity states. +type Reporter interface { + // CurrentState returns the current state of the reporter. + CurrentState() State + // WaitForStateChange blocks until the reporter's state is different from the given state, + // and returns true. + // It returns false if <-ctx.Done() can proceed (ctx got timeout or got canceled). + WaitForStateChange(context.Context, State) bool +} diff --git a/vendor/google.golang.org/grpc/credentials/BUILD.bazel b/vendor/google.golang.org/grpc/credentials/BUILD.bazel new file mode 100644 index 0000000000000..84473a2a3a4b4 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/BUILD.bazel @@ -0,0 +1,16 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "credentials.go", + "tls13.go", + ], + importmap = "k8s.io/kops/vendor/google.golang.org/grpc/credentials", + importpath = "google.golang.org/grpc/credentials", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/golang/protobuf/proto:go_default_library", + "//vendor/google.golang.org/grpc/credentials/internal:go_default_library", + ], +) diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go new file mode 100644 index 0000000000000..8ea3d4a1dc284 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -0,0 +1,336 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package credentials implements various credentials supported by gRPC library, +// which encapsulate all the state needed by a client to authenticate with a +// server and make various assertions, e.g., about the client's identity, role, +// or whether it is authorized to make a particular call. +package credentials // import "google.golang.org/grpc/credentials" + +import ( + "context" + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "io/ioutil" + "net" + "strings" + + "github.com/golang/protobuf/proto" + "google.golang.org/grpc/credentials/internal" +) + +// PerRPCCredentials defines the common interface for the credentials which need to +// attach security information to every RPC (e.g., oauth2). +type PerRPCCredentials interface { + // GetRequestMetadata gets the current request metadata, refreshing + // tokens if required. This should be called by the transport layer on + // each request, and the data should be populated in headers or other + // context. If a status code is returned, it will be used as the status + // for the RPC. uri is the URI of the entry point for the request. + // When supported by the underlying implementation, ctx can be used for + // timeout and cancellation. + // TODO(zhaoq): Define the set of the qualified keys instead of leaving + // it as an arbitrary string. + GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) + // RequireTransportSecurity indicates whether the credentials requires + // transport security. + RequireTransportSecurity() bool +} + +// ProtocolInfo provides information regarding the gRPC wire protocol version, +// security protocol, security protocol version in use, server name, etc. +type ProtocolInfo struct { + // ProtocolVersion is the gRPC wire protocol version. + ProtocolVersion string + // SecurityProtocol is the security protocol in use. + SecurityProtocol string + // SecurityVersion is the security protocol version. + SecurityVersion string + // ServerName is the user-configured server name. + ServerName string +} + +// AuthInfo defines the common interface for the auth information the users are interested in. +type AuthInfo interface { + AuthType() string +} + +// ErrConnDispatched indicates that rawConn has been dispatched out of gRPC +// and the caller should not close rawConn. +var ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC") + +// TransportCredentials defines the common interface for all the live gRPC wire +// protocols and supported transport security protocols (e.g., TLS, SSL). +type TransportCredentials interface { + // ClientHandshake does the authentication handshake specified by the corresponding + // authentication protocol on rawConn for clients. It returns the authenticated + // connection and the corresponding auth information about the connection. + // Implementations must use the provided context to implement timely cancellation. + // gRPC will try to reconnect if the error returned is a temporary error + // (io.EOF, context.DeadlineExceeded or err.Temporary() == true). + // If the returned error is a wrapper error, implementations should make sure that + // the error implements Temporary() to have the correct retry behaviors. + // + // If the returned net.Conn is closed, it MUST close the net.Conn provided. + ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error) + // ServerHandshake does the authentication handshake for servers. It returns + // the authenticated connection and the corresponding auth information about + // the connection. + // + // If the returned net.Conn is closed, it MUST close the net.Conn provided. + ServerHandshake(net.Conn) (net.Conn, AuthInfo, error) + // Info provides the ProtocolInfo of this TransportCredentials. + Info() ProtocolInfo + // Clone makes a copy of this TransportCredentials. + Clone() TransportCredentials + // OverrideServerName overrides the server name used to verify the hostname on the returned certificates from the server. + // gRPC internals also use it to override the virtual hosting name if it is set. + // It must be called before dialing. Currently, this is only used by grpclb. + OverrideServerName(string) error +} + +// Bundle is a combination of TransportCredentials and PerRPCCredentials. +// +// It also contains a mode switching method, so it can be used as a combination +// of different credential policies. +// +// Bundle cannot be used together with individual TransportCredentials. +// PerRPCCredentials from Bundle will be appended to other PerRPCCredentials. +// +// This API is experimental. +type Bundle interface { + TransportCredentials() TransportCredentials + PerRPCCredentials() PerRPCCredentials + // NewWithMode should make a copy of Bundle, and switch mode. Modifying the + // existing Bundle may cause races. + // + // NewWithMode returns nil if the requested mode is not supported. + NewWithMode(mode string) (Bundle, error) +} + +// TLSInfo contains the auth information for a TLS authenticated connection. +// It implements the AuthInfo interface. +type TLSInfo struct { + State tls.ConnectionState +} + +// AuthType returns the type of TLSInfo as a string. +func (t TLSInfo) AuthType() string { + return "tls" +} + +// GetSecurityValue returns security info requested by channelz. +func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue { + v := &TLSChannelzSecurityValue{ + StandardName: cipherSuiteLookup[t.State.CipherSuite], + } + // Currently there's no way to get LocalCertificate info from tls package. + if len(t.State.PeerCertificates) > 0 { + v.RemoteCertificate = t.State.PeerCertificates[0].Raw + } + return v +} + +// tlsCreds is the credentials required for authenticating a connection using TLS. +type tlsCreds struct { + // TLS configuration + config *tls.Config +} + +func (c tlsCreds) Info() ProtocolInfo { + return ProtocolInfo{ + SecurityProtocol: "tls", + SecurityVersion: "1.2", + ServerName: c.config.ServerName, + } +} + +func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) { + // use local cfg to avoid clobbering ServerName if using multiple endpoints + cfg := cloneTLSConfig(c.config) + if cfg.ServerName == "" { + colonPos := strings.LastIndex(authority, ":") + if colonPos == -1 { + colonPos = len(authority) + } + cfg.ServerName = authority[:colonPos] + } + conn := tls.Client(rawConn, cfg) + errChannel := make(chan error, 1) + go func() { + errChannel <- conn.Handshake() + }() + select { + case err := <-errChannel: + if err != nil { + return nil, nil, err + } + case <-ctx.Done(): + return nil, nil, ctx.Err() + } + return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState()}, nil +} + +func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) { + conn := tls.Server(rawConn, c.config) + if err := conn.Handshake(); err != nil { + return nil, nil, err + } + return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState()}, nil +} + +func (c *tlsCreds) Clone() TransportCredentials { + return NewTLS(c.config) +} + +func (c *tlsCreds) OverrideServerName(serverNameOverride string) error { + c.config.ServerName = serverNameOverride + return nil +} + +const alpnProtoStrH2 = "h2" + +func appendH2ToNextProtos(ps []string) []string { + for _, p := range ps { + if p == alpnProtoStrH2 { + return ps + } + } + ret := make([]string, 0, len(ps)+1) + ret = append(ret, ps...) + return append(ret, alpnProtoStrH2) +} + +// NewTLS uses c to construct a TransportCredentials based on TLS. +func NewTLS(c *tls.Config) TransportCredentials { + tc := &tlsCreds{cloneTLSConfig(c)} + tc.config.NextProtos = appendH2ToNextProtos(tc.config.NextProtos) + return tc +} + +// NewClientTLSFromCert constructs TLS credentials from the input certificate for client. +// serverNameOverride is for testing only. If set to a non empty string, +// it will override the virtual host name of authority (e.g. :authority header field) in requests. +func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials { + return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}) +} + +// NewClientTLSFromFile constructs TLS credentials from the input certificate file for client. +// serverNameOverride is for testing only. If set to a non empty string, +// it will override the virtual host name of authority (e.g. :authority header field) in requests. +func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { + b, err := ioutil.ReadFile(certFile) + if err != nil { + return nil, err + } + cp := x509.NewCertPool() + if !cp.AppendCertsFromPEM(b) { + return nil, fmt.Errorf("credentials: failed to append certificates") + } + return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil +} + +// NewServerTLSFromCert constructs TLS credentials from the input certificate for server. +func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials { + return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}}) +} + +// NewServerTLSFromFile constructs TLS credentials from the input certificate file and key +// file for server. +func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) { + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return nil, err + } + return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil +} + +// ChannelzSecurityInfo defines the interface that security protocols should implement +// in order to provide security info to channelz. +type ChannelzSecurityInfo interface { + GetSecurityValue() ChannelzSecurityValue +} + +// ChannelzSecurityValue defines the interface that GetSecurityValue() return value +// should satisfy. This interface should only be satisfied by *TLSChannelzSecurityValue +// and *OtherChannelzSecurityValue. +type ChannelzSecurityValue interface { + isChannelzSecurityValue() +} + +// TLSChannelzSecurityValue defines the struct that TLS protocol should return +// from GetSecurityValue(), containing security info like cipher and certificate used. +type TLSChannelzSecurityValue struct { + ChannelzSecurityValue + StandardName string + LocalCertificate []byte + RemoteCertificate []byte +} + +// OtherChannelzSecurityValue defines the struct that non-TLS protocol should return +// from GetSecurityValue(), which contains protocol specific security info. Note +// the Value field will be sent to users of channelz requesting channel info, and +// thus sensitive info should better be avoided. +type OtherChannelzSecurityValue struct { + ChannelzSecurityValue + Name string + Value proto.Message +} + +var cipherSuiteLookup = map[uint16]string{ + tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA", + tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA", + tls.TLS_RSA_WITH_AES_128_CBC_SHA: "TLS_RSA_WITH_AES_128_CBC_SHA", + tls.TLS_RSA_WITH_AES_256_CBC_SHA: "TLS_RSA_WITH_AES_256_CBC_SHA", + tls.TLS_RSA_WITH_AES_128_GCM_SHA256: "TLS_RSA_WITH_AES_128_GCM_SHA256", + tls.TLS_RSA_WITH_AES_256_GCM_SHA384: "TLS_RSA_WITH_AES_256_GCM_SHA384", + tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA: "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA: "TLS_ECDHE_RSA_WITH_RC4_128_SHA", + tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", + tls.TLS_FALLBACK_SCSV: "TLS_FALLBACK_SCSV", + tls.TLS_RSA_WITH_AES_128_CBC_SHA256: "TLS_RSA_WITH_AES_128_CBC_SHA256", + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", + tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", + tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", +} + +// cloneTLSConfig returns a shallow clone of the exported +// fields of cfg, ignoring the unexported sync.Once, which +// contains a mutex and must not be copied. +// +// If cfg is nil, a new zero tls.Config is returned. +// +// TODO: inline this function if possible. +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + + return cfg.Clone() +} diff --git a/vendor/google.golang.org/grpc/credentials/internal/BUILD.bazel b/vendor/google.golang.org/grpc/credentials/internal/BUILD.bazel new file mode 100644 index 0000000000000..96812607a9af7 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/internal/BUILD.bazel @@ -0,0 +1,9 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["syscallconn.go"], + importmap = "k8s.io/kops/vendor/google.golang.org/grpc/credentials/internal", + importpath = "google.golang.org/grpc/credentials/internal", + visibility = ["//vendor/google.golang.org/grpc/credentials:__subpackages__"], +) diff --git a/vendor/google.golang.org/grpc/credentials/internal/syscallconn.go b/vendor/google.golang.org/grpc/credentials/internal/syscallconn.go new file mode 100644 index 0000000000000..2f4472becc7c9 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/internal/syscallconn.go @@ -0,0 +1,61 @@ +// +build !appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains credentials-internal code. +package internal + +import ( + "net" + "syscall" +) + +type sysConn = syscall.Conn + +// syscallConn keeps reference of rawConn to support syscall.Conn for channelz. +// SyscallConn() (the method in interface syscall.Conn) is explicitly +// implemented on this type, +// +// Interface syscall.Conn is implemented by most net.Conn implementations (e.g. +// TCPConn, UnixConn), but is not part of net.Conn interface. So wrapper conns +// that embed net.Conn don't implement syscall.Conn. (Side note: tls.Conn +// doesn't embed net.Conn, so even if syscall.Conn is part of net.Conn, it won't +// help here). +type syscallConn struct { + net.Conn + // sysConn is a type alias of syscall.Conn. It's necessary because the name + // `Conn` collides with `net.Conn`. + sysConn +} + +// WrapSyscallConn tries to wrap rawConn and newConn into a net.Conn that +// implements syscall.Conn. rawConn will be used to support syscall, and newConn +// will be used for read/write. +// +// This function returns newConn if rawConn doesn't implement syscall.Conn. +func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn { + sysConn, ok := rawConn.(syscall.Conn) + if !ok { + return newConn + } + return &syscallConn{ + Conn: newConn, + sysConn: sysConn, + } +} diff --git a/vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go b/vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go new file mode 100644 index 0000000000000..d4346e9eabe6f --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go @@ -0,0 +1,30 @@ +// +build appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +import ( + "net" +) + +// WrapSyscallConn returns newConn on appengine. +func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn { + return newConn +} diff --git a/vendor/google.golang.org/grpc/credentials/tls13.go b/vendor/google.golang.org/grpc/credentials/tls13.go new file mode 100644 index 0000000000000..ccbf35b331258 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/tls13.go @@ -0,0 +1,30 @@ +// +build go1.12 + +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import "crypto/tls" + +// This init function adds cipher suite constants only defined in Go 1.12. +func init() { + cipherSuiteLookup[tls.TLS_AES_128_GCM_SHA256] = "TLS_AES_128_GCM_SHA256" + cipherSuiteLookup[tls.TLS_AES_256_GCM_SHA384] = "TLS_AES_256_GCM_SHA384" + cipherSuiteLookup[tls.TLS_CHACHA20_POLY1305_SHA256] = "TLS_CHACHA20_POLY1305_SHA256" +} diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go new file mode 100644 index 0000000000000..e8f34d0d6eaad --- /dev/null +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -0,0 +1,554 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "fmt" + "net" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/stats" +) + +// dialOptions configure a Dial call. dialOptions are set by the DialOption +// values passed to Dial. +type dialOptions struct { + unaryInt UnaryClientInterceptor + streamInt StreamClientInterceptor + + chainUnaryInts []UnaryClientInterceptor + chainStreamInts []StreamClientInterceptor + + cp Compressor + dc Decompressor + bs backoff.Strategy + block bool + insecure bool + timeout time.Duration + scChan <-chan ServiceConfig + authority string + copts transport.ConnectOptions + callOptions []CallOption + // This is used by v1 balancer dial option WithBalancer to support v1 + // balancer, and also by WithBalancerName dial option. + balancerBuilder balancer.Builder + // This is to support grpclb. + resolverBuilder resolver.Builder + channelzParentID int64 + disableServiceConfig bool + disableRetry bool + disableHealthCheck bool + healthCheckFunc internal.HealthChecker + minConnectTimeout func() time.Duration + defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON. + defaultServiceConfigRawJSON *string +} + +// DialOption configures how we set up the connection. +type DialOption interface { + apply(*dialOptions) +} + +// EmptyDialOption does not alter the dial configuration. It can be embedded in +// another structure to build custom dial options. +// +// This API is EXPERIMENTAL. +type EmptyDialOption struct{} + +func (EmptyDialOption) apply(*dialOptions) {} + +// funcDialOption wraps a function that modifies dialOptions into an +// implementation of the DialOption interface. +type funcDialOption struct { + f func(*dialOptions) +} + +func (fdo *funcDialOption) apply(do *dialOptions) { + fdo.f(do) +} + +func newFuncDialOption(f func(*dialOptions)) *funcDialOption { + return &funcDialOption{ + f: f, + } +} + +// WithWriteBufferSize determines how much data can be batched before doing a +// write on the wire. The corresponding memory allocation for this buffer will +// be twice the size to keep syscalls low. The default value for this buffer is +// 32KB. +// +// Zero will disable the write buffer such that each write will be on underlying +// connection. Note: A Send call may not directly translate to a write. +func WithWriteBufferSize(s int) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.WriteBufferSize = s + }) +} + +// WithReadBufferSize lets you set the size of read buffer, this determines how +// much data can be read at most for each read syscall. +// +// The default value for this buffer is 32KB. Zero will disable read buffer for +// a connection so data framer can access the underlying conn directly. +func WithReadBufferSize(s int) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.ReadBufferSize = s + }) +} + +// WithInitialWindowSize returns a DialOption which sets the value for initial +// window size on a stream. The lower bound for window size is 64K and any value +// smaller than that will be ignored. +func WithInitialWindowSize(s int32) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.InitialWindowSize = s + }) +} + +// WithInitialConnWindowSize returns a DialOption which sets the value for +// initial window size on a connection. The lower bound for window size is 64K +// and any value smaller than that will be ignored. +func WithInitialConnWindowSize(s int32) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.InitialConnWindowSize = s + }) +} + +// WithMaxMsgSize returns a DialOption which sets the maximum message size the +// client can receive. +// +// Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead. Will +// be supported throughout 1.x. +func WithMaxMsgSize(s int) DialOption { + return WithDefaultCallOptions(MaxCallRecvMsgSize(s)) +} + +// WithDefaultCallOptions returns a DialOption which sets the default +// CallOptions for calls over the connection. +func WithDefaultCallOptions(cos ...CallOption) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.callOptions = append(o.callOptions, cos...) + }) +} + +// WithCodec returns a DialOption which sets a codec for message marshaling and +// unmarshaling. +// +// Deprecated: use WithDefaultCallOptions(ForceCodec(_)) instead. Will be +// supported throughout 1.x. +func WithCodec(c Codec) DialOption { + return WithDefaultCallOptions(CallCustomCodec(c)) +} + +// WithCompressor returns a DialOption which sets a Compressor to use for +// message compression. It has lower priority than the compressor set by the +// UseCompressor CallOption. +// +// Deprecated: use UseCompressor instead. Will be supported throughout 1.x. +func WithCompressor(cp Compressor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.cp = cp + }) +} + +// WithDecompressor returns a DialOption which sets a Decompressor to use for +// incoming message decompression. If incoming response messages are encoded +// using the decompressor's Type(), it will be used. Otherwise, the message +// encoding will be used to look up the compressor registered via +// encoding.RegisterCompressor, which will then be used to decompress the +// message. If no compressor is registered for the encoding, an Unimplemented +// status error will be returned. +// +// Deprecated: use encoding.RegisterCompressor instead. Will be supported +// throughout 1.x. +func WithDecompressor(dc Decompressor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.dc = dc + }) +} + +// WithBalancer returns a DialOption which sets a load balancer with the v1 API. +// Name resolver will be ignored if this DialOption is specified. +// +// Deprecated: use the new balancer APIs in balancer package and +// WithBalancerName. Will be removed in a future 1.x release. +func WithBalancer(b Balancer) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.balancerBuilder = &balancerWrapperBuilder{ + b: b, + } + }) +} + +// WithBalancerName sets the balancer that the ClientConn will be initialized +// with. Balancer registered with balancerName will be used. This function +// panics if no balancer was registered by balancerName. +// +// The balancer cannot be overridden by balancer option specified by service +// config. +// +// Deprecated: use WithDefaultServiceConfig and WithDisableServiceConfig +// instead. Will be removed in a future 1.x release. +func WithBalancerName(balancerName string) DialOption { + builder := balancer.Get(balancerName) + if builder == nil { + panic(fmt.Sprintf("grpc.WithBalancerName: no balancer is registered for name %v", balancerName)) + } + return newFuncDialOption(func(o *dialOptions) { + o.balancerBuilder = builder + }) +} + +// withResolverBuilder is only for grpclb. +func withResolverBuilder(b resolver.Builder) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.resolverBuilder = b + }) +} + +// WithServiceConfig returns a DialOption which has a channel to read the +// service configuration. +// +// Deprecated: service config should be received through name resolver or via +// WithDefaultServiceConfig, as specified at +// https://github.com/grpc/grpc/blob/master/doc/service_config.md. Will be +// removed in a future 1.x release. +func WithServiceConfig(c <-chan ServiceConfig) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.scChan = c + }) +} + +// WithBackoffMaxDelay configures the dialer to use the provided maximum delay +// when backing off after failed connection attempts. +func WithBackoffMaxDelay(md time.Duration) DialOption { + return WithBackoffConfig(BackoffConfig{MaxDelay: md}) +} + +// WithBackoffConfig configures the dialer to use the provided backoff +// parameters after connection failures. +// +// Use WithBackoffMaxDelay until more parameters on BackoffConfig are opened up +// for use. +func WithBackoffConfig(b BackoffConfig) DialOption { + return withBackoff(backoff.Exponential{ + MaxDelay: b.MaxDelay, + }) +} + +// withBackoff sets the backoff strategy used for connectRetryNum after a failed +// connection attempt. +// +// This can be exported if arbitrary backoff strategies are allowed by gRPC. +func withBackoff(bs backoff.Strategy) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.bs = bs + }) +} + +// WithBlock returns a DialOption which makes caller of Dial blocks until the +// underlying connection is up. Without this, Dial returns immediately and +// connecting the server happens in background. +func WithBlock() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.block = true + }) +} + +// WithInsecure returns a DialOption which disables transport security for this +// ClientConn. Note that transport security is required unless WithInsecure is +// set. +func WithInsecure() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.insecure = true + }) +} + +// WithTransportCredentials returns a DialOption which configures a connection +// level security credentials (e.g., TLS/SSL). This should not be used together +// with WithCredentialsBundle. +func WithTransportCredentials(creds credentials.TransportCredentials) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.TransportCredentials = creds + }) +} + +// WithPerRPCCredentials returns a DialOption which sets credentials and places +// auth state on each outbound RPC. +func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds) + }) +} + +// WithCredentialsBundle returns a DialOption to set a credentials bundle for +// the ClientConn.WithCreds. This should not be used together with +// WithTransportCredentials. +// +// This API is experimental. +func WithCredentialsBundle(b credentials.Bundle) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.CredsBundle = b + }) +} + +// WithTimeout returns a DialOption that configures a timeout for dialing a +// ClientConn initially. This is valid if and only if WithBlock() is present. +// +// Deprecated: use DialContext and context.WithTimeout instead. Will be +// supported throughout 1.x. +func WithTimeout(d time.Duration) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.timeout = d + }) +} + +// WithContextDialer returns a DialOption that sets a dialer to create +// connections. If FailOnNonTempDialError() is set to true, and an error is +// returned by f, gRPC checks the error's Temporary() method to decide if it +// should try to reconnect to the network address. +func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.Dialer = f + }) +} + +func init() { + internal.WithResolverBuilder = withResolverBuilder + internal.WithHealthCheckFunc = withHealthCheckFunc +} + +// WithDialer returns a DialOption that specifies a function to use for dialing +// network addresses. If FailOnNonTempDialError() is set to true, and an error +// is returned by f, gRPC checks the error's Temporary() method to decide if it +// should try to reconnect to the network address. +// +// Deprecated: use WithContextDialer instead. Will be supported throughout +// 1.x. +func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption { + return WithContextDialer( + func(ctx context.Context, addr string) (net.Conn, error) { + if deadline, ok := ctx.Deadline(); ok { + return f(addr, time.Until(deadline)) + } + return f(addr, 0) + }) +} + +// WithStatsHandler returns a DialOption that specifies the stats handler for +// all the RPCs and underlying network connections in this ClientConn. +func WithStatsHandler(h stats.Handler) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.StatsHandler = h + }) +} + +// FailOnNonTempDialError returns a DialOption that specifies if gRPC fails on +// non-temporary dial errors. If f is true, and dialer returns a non-temporary +// error, gRPC will fail the connection to the network address and won't try to +// reconnect. The default value of FailOnNonTempDialError is false. +// +// FailOnNonTempDialError only affects the initial dial, and does not do +// anything useful unless you are also using WithBlock(). +// +// This is an EXPERIMENTAL API. +func FailOnNonTempDialError(f bool) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.FailOnNonTempDialError = f + }) +} + +// WithUserAgent returns a DialOption that specifies a user agent string for all +// the RPCs. +func WithUserAgent(s string) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.UserAgent = s + }) +} + +// WithKeepaliveParams returns a DialOption that specifies keepalive parameters +// for the client transport. +func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption { + if kp.Time < internal.KeepaliveMinPingTime { + grpclog.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime) + kp.Time = internal.KeepaliveMinPingTime + } + return newFuncDialOption(func(o *dialOptions) { + o.copts.KeepaliveParams = kp + }) +} + +// WithUnaryInterceptor returns a DialOption that specifies the interceptor for +// unary RPCs. +func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.unaryInt = f + }) +} + +// WithChainUnaryInterceptor returns a DialOption that specifies the chained +// interceptor for unary RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All interceptors added by this method will be chained, and the interceptor +// defined by WithUnaryInterceptor will always be prepended to the chain. +func WithChainUnaryInterceptor(interceptors ...UnaryClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.chainUnaryInts = append(o.chainUnaryInts, interceptors...) + }) +} + +// WithStreamInterceptor returns a DialOption that specifies the interceptor for +// streaming RPCs. +func WithStreamInterceptor(f StreamClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.streamInt = f + }) +} + +// WithChainStreamInterceptor returns a DialOption that specifies the chained +// interceptor for unary RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All interceptors added by this method will be chained, and the interceptor +// defined by WithStreamInterceptor will always be prepended to the chain. +func WithChainStreamInterceptor(interceptors ...StreamClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.chainStreamInts = append(o.chainStreamInts, interceptors...) + }) +} + +// WithAuthority returns a DialOption that specifies the value to be used as the +// :authority pseudo-header. This value only works with WithInsecure and has no +// effect if TransportCredentials are present. +func WithAuthority(a string) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.authority = a + }) +} + +// WithChannelzParentID returns a DialOption that specifies the channelz ID of +// current ClientConn's parent. This function is used in nested channel creation +// (e.g. grpclb dial). +func WithChannelzParentID(id int64) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.channelzParentID = id + }) +} + +// WithDisableServiceConfig returns a DialOption that causes gRPC to ignore any +// service config provided by the resolver and provides a hint to the resolver +// to not fetch service configs. +// +// Note that this dial option only disables service config from resolver. If +// default service config is provided, gRPC will use the default service config. +func WithDisableServiceConfig() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.disableServiceConfig = true + }) +} + +// WithDefaultServiceConfig returns a DialOption that configures the default +// service config, which will be used in cases where: +// +// 1. WithDisableServiceConfig is also used. +// 2. Resolver does not return a service config or if the resolver returns an +// invalid service config. +// +// This API is EXPERIMENTAL. +func WithDefaultServiceConfig(s string) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.defaultServiceConfigRawJSON = &s + }) +} + +// WithDisableRetry returns a DialOption that disables retries, even if the +// service config enables them. This does not impact transparent retries, which +// will happen automatically if no data is written to the wire or if the RPC is +// unprocessed by the remote server. +// +// Retry support is currently disabled by default, but will be enabled by +// default in the future. Until then, it may be enabled by setting the +// environment variable "GRPC_GO_RETRY" to "on". +// +// This API is EXPERIMENTAL. +func WithDisableRetry() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.disableRetry = true + }) +} + +// WithMaxHeaderListSize returns a DialOption that specifies the maximum +// (uncompressed) size of header list that the client is prepared to accept. +func WithMaxHeaderListSize(s uint32) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.MaxHeaderListSize = &s + }) +} + +// WithDisableHealthCheck disables the LB channel health checking for all +// SubConns of this ClientConn. +// +// This API is EXPERIMENTAL. +func WithDisableHealthCheck() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.disableHealthCheck = true + }) +} + +// withHealthCheckFunc replaces the default health check function with the +// provided one. It makes tests easier to change the health check function. +// +// For testing purpose only. +func withHealthCheckFunc(f internal.HealthChecker) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.healthCheckFunc = f + }) +} + +func defaultDialOptions() dialOptions { + return dialOptions{ + disableRetry: !envconfig.Retry, + healthCheckFunc: internal.HealthCheckFunc, + copts: transport.ConnectOptions{ + WriteBufferSize: defaultWriteBufSize, + ReadBufferSize: defaultReadBufSize, + }, + } +} + +// withGetMinConnectDeadline specifies the function that clientconn uses to +// get minConnectDeadline. This can be used to make connection attempts happen +// faster/slower. +// +// For testing purpose only. +func withMinConnectDeadline(f func() time.Duration) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.minConnectTimeout = f + }) +} diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go new file mode 100644 index 0000000000000..187adbb117f2e --- /dev/null +++ b/vendor/google.golang.org/grpc/doc.go @@ -0,0 +1,24 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/* +Package grpc implements an RPC system called gRPC. + +See grpc.io for more information about gRPC. +*/ +package grpc // import "google.golang.org/grpc" diff --git a/vendor/google.golang.org/grpc/encoding/BUILD.bazel b/vendor/google.golang.org/grpc/encoding/BUILD.bazel new file mode 100644 index 0000000000000..4992afcae7164 --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/BUILD.bazel @@ -0,0 +1,9 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["encoding.go"], + importmap = "k8s.io/kops/vendor/google.golang.org/grpc/encoding", + importpath = "google.golang.org/grpc/encoding", + visibility = ["//visibility:public"], +) diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go new file mode 100644 index 0000000000000..30a75da99d5e9 --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -0,0 +1,118 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package encoding defines the interface for the compressor and codec, and +// functions to register and retrieve compressors and codecs. +// +// This package is EXPERIMENTAL. +package encoding + +import ( + "io" + "strings" +) + +// Identity specifies the optional encoding for uncompressed streams. +// It is intended for grpc internal use only. +const Identity = "identity" + +// Compressor is used for compressing and decompressing when sending or +// receiving messages. +type Compressor interface { + // Compress writes the data written to wc to w after compressing it. If an + // error occurs while initializing the compressor, that error is returned + // instead. + Compress(w io.Writer) (io.WriteCloser, error) + // Decompress reads data from r, decompresses it, and provides the + // uncompressed data via the returned io.Reader. If an error occurs while + // initializing the decompressor, that error is returned instead. + Decompress(r io.Reader) (io.Reader, error) + // Name is the name of the compression codec and is used to set the content + // coding header. The result must be static; the result cannot change + // between calls. + Name() string +} + +var registeredCompressor = make(map[string]Compressor) + +// RegisterCompressor registers the compressor with gRPC by its name. It can +// be activated when sending an RPC via grpc.UseCompressor(). It will be +// automatically accessed when receiving a message based on the content coding +// header. Servers also use it to send a response with the same encoding as +// the request. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Compressors are +// registered with the same name, the one registered last will take effect. +func RegisterCompressor(c Compressor) { + registeredCompressor[c.Name()] = c +} + +// GetCompressor returns Compressor for the given compressor name. +func GetCompressor(name string) Compressor { + return registeredCompressor[name] +} + +// Codec defines the interface gRPC uses to encode and decode messages. Note +// that implementations of this interface must be thread safe; a Codec's +// methods can be called from concurrent goroutines. +type Codec interface { + // Marshal returns the wire format of v. + Marshal(v interface{}) ([]byte, error) + // Unmarshal parses the wire format into v. + Unmarshal(data []byte, v interface{}) error + // Name returns the name of the Codec implementation. The returned string + // will be used as part of content type in transmission. The result must be + // static; the result cannot change between calls. + Name() string +} + +var registeredCodecs = make(map[string]Codec) + +// RegisterCodec registers the provided Codec for use with all gRPC clients and +// servers. +// +// The Codec will be stored and looked up by result of its Name() method, which +// should match the content-subtype of the encoding handled by the Codec. This +// is case-insensitive, and is stored and looked up as lowercase. If the +// result of calling Name() is an empty string, RegisterCodec will panic. See +// Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Compressors are +// registered with the same name, the one registered last will take effect. +func RegisterCodec(codec Codec) { + if codec == nil { + panic("cannot register a nil Codec") + } + if codec.Name() == "" { + panic("cannot register Codec with empty string result for Name()") + } + contentSubtype := strings.ToLower(codec.Name()) + registeredCodecs[contentSubtype] = codec +} + +// GetCodec gets a registered Codec by content-subtype, or nil if no Codec is +// registered for the content-subtype. +// +// The content-subtype is expected to be lowercase. +func GetCodec(contentSubtype string) Codec { + return registeredCodecs[contentSubtype] +} diff --git a/vendor/google.golang.org/grpc/encoding/proto/BUILD.bazel b/vendor/google.golang.org/grpc/encoding/proto/BUILD.bazel new file mode 100644 index 0000000000000..548af745951ba --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/proto/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["proto.go"], + importmap = "k8s.io/kops/vendor/google.golang.org/grpc/encoding/proto", + importpath = "google.golang.org/grpc/encoding/proto", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/golang/protobuf/proto:go_default_library", + "//vendor/google.golang.org/grpc/encoding:go_default_library", + ], +) diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go new file mode 100644 index 0000000000000..66b97a6f692a6 --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go @@ -0,0 +1,110 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package proto defines the protobuf codec. Importing this package will +// register the codec. +package proto + +import ( + "math" + "sync" + + "github.com/golang/protobuf/proto" + "google.golang.org/grpc/encoding" +) + +// Name is the name registered for the proto compressor. +const Name = "proto" + +func init() { + encoding.RegisterCodec(codec{}) +} + +// codec is a Codec implementation with protobuf. It is the default codec for gRPC. +type codec struct{} + +type cachedProtoBuffer struct { + lastMarshaledSize uint32 + proto.Buffer +} + +func capToMaxInt32(val int) uint32 { + if val > math.MaxInt32 { + return uint32(math.MaxInt32) + } + return uint32(val) +} + +func marshal(v interface{}, cb *cachedProtoBuffer) ([]byte, error) { + protoMsg := v.(proto.Message) + newSlice := make([]byte, 0, cb.lastMarshaledSize) + + cb.SetBuf(newSlice) + cb.Reset() + if err := cb.Marshal(protoMsg); err != nil { + return nil, err + } + out := cb.Bytes() + cb.lastMarshaledSize = capToMaxInt32(len(out)) + return out, nil +} + +func (codec) Marshal(v interface{}) ([]byte, error) { + if pm, ok := v.(proto.Marshaler); ok { + // object can marshal itself, no need for buffer + return pm.Marshal() + } + + cb := protoBufferPool.Get().(*cachedProtoBuffer) + out, err := marshal(v, cb) + + // put back buffer and lose the ref to the slice + cb.SetBuf(nil) + protoBufferPool.Put(cb) + return out, err +} + +func (codec) Unmarshal(data []byte, v interface{}) error { + protoMsg := v.(proto.Message) + protoMsg.Reset() + + if pu, ok := protoMsg.(proto.Unmarshaler); ok { + // object can unmarshal itself, no need for buffer + return pu.Unmarshal(data) + } + + cb := protoBufferPool.Get().(*cachedProtoBuffer) + cb.SetBuf(data) + err := cb.Unmarshal(protoMsg) + cb.SetBuf(nil) + protoBufferPool.Put(cb) + return err +} + +func (codec) Name() string { + return Name +} + +var protoBufferPool = &sync.Pool{ + New: func() interface{} { + return &cachedProtoBuffer{ + Buffer: proto.Buffer{}, + lastMarshaledSize: 16, + } + }, +} diff --git a/vendor/google.golang.org/grpc/go.mod b/vendor/google.golang.org/grpc/go.mod new file mode 100644 index 0000000000000..c1a8340c5baef --- /dev/null +++ b/vendor/google.golang.org/grpc/go.mod @@ -0,0 +1,19 @@ +module google.golang.org/grpc + +require ( + cloud.google.com/go v0.26.0 // indirect + github.com/BurntSushi/toml v0.3.1 // indirect + github.com/client9/misspell v0.3.4 + github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b + github.com/golang/mock v1.1.1 + github.com/golang/protobuf v1.2.0 + github.com/google/go-cmp v0.2.0 + golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 + golang.org/x/net v0.0.0-20190311183353-d8887717615a + golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be + golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a + golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135 + google.golang.org/appengine v1.1.0 // indirect + google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 + honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc +) diff --git a/vendor/google.golang.org/grpc/go.sum b/vendor/google.golang.org/grpc/go.sum new file mode 100644 index 0000000000000..741677d2e81a6 --- /dev/null +++ b/vendor/google.golang.org/grpc/go.sum @@ -0,0 +1,37 @@ +cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd h1:/e+gpKk9r3dJobndpTytxS2gOy6m5uvpg+ISQoEcusQ= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135 h1:5Beo0mZN8dRzgrMMkDp0jc8YXQKx9DiJ2k1dkvGsn5A= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +google.golang.org/appengine v1.1.0 h1:igQkv0AAhEIvTEpD5LIpAfav2eeVO9HBTjvKHVJPRSs= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/google.golang.org/grpc/grpclog/BUILD.bazel b/vendor/google.golang.org/grpc/grpclog/BUILD.bazel new file mode 100644 index 0000000000000..40c211400d641 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "grpclog.go", + "logger.go", + "loggerv2.go", + ], + importmap = "k8s.io/kops/vendor/google.golang.org/grpc/grpclog", + importpath = "google.golang.org/grpc/grpclog", + visibility = ["//visibility:public"], +) diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go new file mode 100644 index 0000000000000..51bb9457cdabe --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -0,0 +1,126 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpclog defines logging for grpc. +// +// All logs in transport and grpclb packages only go to verbose level 2. +// All logs in other packages in grpc are logged in spite of the verbosity level. +// +// In the default logger, +// severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL, +// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL. +package grpclog // import "google.golang.org/grpc/grpclog" + +import "os" + +var logger = newLoggerV2() + +// V reports whether verbosity level l is at least the requested verbose level. +func V(l int) bool { + return logger.V(l) +} + +// Info logs to the INFO log. +func Info(args ...interface{}) { + logger.Info(args...) +} + +// Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. +func Infof(format string, args ...interface{}) { + logger.Infof(format, args...) +} + +// Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. +func Infoln(args ...interface{}) { + logger.Infoln(args...) +} + +// Warning logs to the WARNING log. +func Warning(args ...interface{}) { + logger.Warning(args...) +} + +// Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. +func Warningf(format string, args ...interface{}) { + logger.Warningf(format, args...) +} + +// Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. +func Warningln(args ...interface{}) { + logger.Warningln(args...) +} + +// Error logs to the ERROR log. +func Error(args ...interface{}) { + logger.Error(args...) +} + +// Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. +func Errorf(format string, args ...interface{}) { + logger.Errorf(format, args...) +} + +// Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. +func Errorln(args ...interface{}) { + logger.Errorln(args...) +} + +// Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. +// It calls os.Exit() with exit code 1. +func Fatal(args ...interface{}) { + logger.Fatal(args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. +// It calles os.Exit() with exit code 1. +func Fatalf(format string, args ...interface{}) { + logger.Fatalf(format, args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. +// It calle os.Exit()) with exit code 1. +func Fatalln(args ...interface{}) { + logger.Fatalln(args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Print prints to the logger. Arguments are handled in the manner of fmt.Print. +// +// Deprecated: use Info. +func Print(args ...interface{}) { + logger.Info(args...) +} + +// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. +// +// Deprecated: use Infof. +func Printf(format string, args ...interface{}) { + logger.Infof(format, args...) +} + +// Println prints to the logger. Arguments are handled in the manner of fmt.Println. +// +// Deprecated: use Infoln. +func Println(args ...interface{}) { + logger.Infoln(args...) +} diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go new file mode 100644 index 0000000000000..097494f710f5b --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/logger.go @@ -0,0 +1,85 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +// Logger mimics golang's standard Logger as an interface. +// +// Deprecated: use LoggerV2. +type Logger interface { + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Fatalln(args ...interface{}) + Print(args ...interface{}) + Printf(format string, args ...interface{}) + Println(args ...interface{}) +} + +// SetLogger sets the logger that is used in grpc. Call only from +// init() functions. +// +// Deprecated: use SetLoggerV2. +func SetLogger(l Logger) { + logger = &loggerWrapper{Logger: l} +} + +// loggerWrapper wraps Logger into a LoggerV2. +type loggerWrapper struct { + Logger +} + +func (g *loggerWrapper) Info(args ...interface{}) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Infoln(args ...interface{}) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Infof(format string, args ...interface{}) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) Warning(args ...interface{}) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Warningln(args ...interface{}) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Warningf(format string, args ...interface{}) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) Error(args ...interface{}) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Errorln(args ...interface{}) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Errorf(format string, args ...interface{}) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) V(l int) bool { + // Returns true for all verbose level. + return true +} diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go new file mode 100644 index 0000000000000..d4932577695f8 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -0,0 +1,195 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import ( + "io" + "io/ioutil" + "log" + "os" + "strconv" +) + +// LoggerV2 does underlying logging work for grpclog. +type LoggerV2 interface { + // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. + Info(args ...interface{}) + // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. + Infoln(args ...interface{}) + // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. + Infof(format string, args ...interface{}) + // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. + Warning(args ...interface{}) + // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. + Warningln(args ...interface{}) + // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. + Warningf(format string, args ...interface{}) + // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. + Error(args ...interface{}) + // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + Errorln(args ...interface{}) + // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + Errorf(format string, args ...interface{}) + // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatal(args ...interface{}) + // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalln(args ...interface{}) + // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalf(format string, args ...interface{}) + // V reports whether verbosity level l is at least the requested verbose level. + V(l int) bool +} + +// SetLoggerV2 sets logger that is used in grpc to a V2 logger. +// Not mutex-protected, should be called before any gRPC functions. +func SetLoggerV2(l LoggerV2) { + logger = l +} + +const ( + // infoLog indicates Info severity. + infoLog int = iota + // warningLog indicates Warning severity. + warningLog + // errorLog indicates Error severity. + errorLog + // fatalLog indicates Fatal severity. + fatalLog +) + +// severityName contains the string representation of each severity. +var severityName = []string{ + infoLog: "INFO", + warningLog: "WARNING", + errorLog: "ERROR", + fatalLog: "FATAL", +} + +// loggerT is the default logger used by grpclog. +type loggerT struct { + m []*log.Logger + v int +} + +// NewLoggerV2 creates a loggerV2 with the provided writers. +// Fatal logs will be written to errorW, warningW, infoW, followed by exit(1). +// Error logs will be written to errorW, warningW and infoW. +// Warning logs will be written to warningW and infoW. +// Info logs will be written to infoW. +func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 { + return NewLoggerV2WithVerbosity(infoW, warningW, errorW, 0) +} + +// NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and +// verbosity level. +func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 { + var m []*log.Logger + m = append(m, log.New(infoW, severityName[infoLog]+": ", log.LstdFlags)) + m = append(m, log.New(io.MultiWriter(infoW, warningW), severityName[warningLog]+": ", log.LstdFlags)) + ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. + m = append(m, log.New(ew, severityName[errorLog]+": ", log.LstdFlags)) + m = append(m, log.New(ew, severityName[fatalLog]+": ", log.LstdFlags)) + return &loggerT{m: m, v: v} +} + +// newLoggerV2 creates a loggerV2 to be used as default logger. +// All logs are written to stderr. +func newLoggerV2() LoggerV2 { + errorW := ioutil.Discard + warningW := ioutil.Discard + infoW := ioutil.Discard + + logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL") + switch logLevel { + case "", "ERROR", "error": // If env is unset, set level to ERROR. + errorW = os.Stderr + case "WARNING", "warning": + warningW = os.Stderr + case "INFO", "info": + infoW = os.Stderr + } + + var v int + vLevel := os.Getenv("GRPC_GO_LOG_VERBOSITY_LEVEL") + if vl, err := strconv.Atoi(vLevel); err == nil { + v = vl + } + return NewLoggerV2WithVerbosity(infoW, warningW, errorW, v) +} + +func (g *loggerT) Info(args ...interface{}) { + g.m[infoLog].Print(args...) +} + +func (g *loggerT) Infoln(args ...interface{}) { + g.m[infoLog].Println(args...) +} + +func (g *loggerT) Infof(format string, args ...interface{}) { + g.m[infoLog].Printf(format, args...) +} + +func (g *loggerT) Warning(args ...interface{}) { + g.m[warningLog].Print(args...) +} + +func (g *loggerT) Warningln(args ...interface{}) { + g.m[warningLog].Println(args...) +} + +func (g *loggerT) Warningf(format string, args ...interface{}) { + g.m[warningLog].Printf(format, args...) +} + +func (g *loggerT) Error(args ...interface{}) { + g.m[errorLog].Print(args...) +} + +func (g *loggerT) Errorln(args ...interface{}) { + g.m[errorLog].Println(args...) +} + +func (g *loggerT) Errorf(format string, args ...interface{}) { + g.m[errorLog].Printf(format, args...) +} + +func (g *loggerT) Fatal(args ...interface{}) { + g.m[fatalLog].Fatal(args...) + // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). +} + +func (g *loggerT) Fatalln(args ...interface{}) { + g.m[fatalLog].Fatalln(args...) + // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). +} + +func (g *loggerT) Fatalf(format string, args ...interface{}) { + g.m[fatalLog].Fatalf(format, args...) + // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). +} + +func (g *loggerT) V(l int) bool { + return l <= g.v +} diff --git a/vendor/google.golang.org/grpc/install_gae.sh b/vendor/google.golang.org/grpc/install_gae.sh new file mode 100644 index 0000000000000..7c7bcada50442 --- /dev/null +++ b/vendor/google.golang.org/grpc/install_gae.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +TMP=$(mktemp -d /tmp/sdk.XXX) \ +&& curl -o $TMP.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.68.zip" \ +&& unzip -q $TMP.zip -d $TMP \ +&& export PATH="$PATH:$TMP/go_appengine" diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go new file mode 100644 index 0000000000000..8b7350022ad73 --- /dev/null +++ b/vendor/google.golang.org/grpc/interceptor.go @@ -0,0 +1,77 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" +) + +// UnaryInvoker is called by UnaryClientInterceptor to complete RPCs. +type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error + +// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. invoker is the handler to complete the RPC +// and it is the responsibility of the interceptor to call it. +// This is an EXPERIMENTAL API. +type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error + +// Streamer is called by StreamClientInterceptor to create a ClientStream. +type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) + +// StreamClientInterceptor intercepts the creation of ClientStream. It may return a custom ClientStream to intercept all I/O +// operations. streamer is the handler to create a ClientStream and it is the responsibility of the interceptor to call it. +// This is an EXPERIMENTAL API. +type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) + +// UnaryServerInfo consists of various information about a unary RPC on +// server side. All per-rpc information may be mutated by the interceptor. +type UnaryServerInfo struct { + // Server is the service implementation the user provides. This is read-only. + Server interface{} + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string +} + +// UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal +// execution of a unary RPC. If a UnaryHandler returns an error, it should be produced by the +// status package, or else gRPC will use codes.Unknown as the status code and err.Error() as +// the status message of the RPC. +type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error) + +// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info +// contains all the information of this RPC the interceptor can operate on. And handler is the wrapper +// of the service method implementation. It is the responsibility of the interceptor to invoke handler +// to complete the RPC. +type UnaryServerInterceptor func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error) + +// StreamServerInfo consists of various information about a streaming RPC on +// server side. All per-rpc information may be mutated by the interceptor. +type StreamServerInfo struct { + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool +} + +// StreamServerInterceptor provides a hook to intercept the execution of a streaming RPC on the server. +// info contains all the information of this RPC the interceptor can operate on. And handler is the +// service method implementation. It is the responsibility of the interceptor to invoke handler to +// complete the RPC. +type StreamServerInterceptor func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error diff --git a/vendor/google.golang.org/grpc/internal/BUILD.bazel b/vendor/google.golang.org/grpc/internal/BUILD.bazel new file mode 100644 index 0000000000000..8a92b27412567 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/BUILD.bazel @@ -0,0 +1,10 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["internal.go"], + importmap = "k8s.io/kops/vendor/google.golang.org/grpc/internal", + importpath = "google.golang.org/grpc/internal", + visibility = ["//vendor/google.golang.org/grpc:__subpackages__"], + deps = ["//vendor/google.golang.org/grpc/connectivity:go_default_library"], +) diff --git a/vendor/google.golang.org/grpc/internal/backoff/BUILD.bazel b/vendor/google.golang.org/grpc/internal/backoff/BUILD.bazel new file mode 100644 index 0000000000000..265735777010e --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/backoff/BUILD.bazel @@ -0,0 +1,10 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["backoff.go"], + importmap = "k8s.io/kops/vendor/google.golang.org/grpc/internal/backoff", + importpath = "google.golang.org/grpc/internal/backoff", + visibility = ["//vendor/google.golang.org/grpc:__subpackages__"], + deps = ["//vendor/google.golang.org/grpc/internal/grpcrand:go_default_library"], +) diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go new file mode 100644 index 0000000000000..1bd0cce5abdfa --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go @@ -0,0 +1,78 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package backoff implement the backoff strategy for gRPC. +// +// This is kept in internal until the gRPC project decides whether or not to +// allow alternative backoff strategies. +package backoff + +import ( + "time" + + "google.golang.org/grpc/internal/grpcrand" +) + +// Strategy defines the methodology for backing off after a grpc connection +// failure. +// +type Strategy interface { + // Backoff returns the amount of time to wait before the next retry given + // the number of consecutive failures. + Backoff(retries int) time.Duration +} + +const ( + // baseDelay is the amount of time to wait before retrying after the first + // failure. + baseDelay = 1.0 * time.Second + // factor is applied to the backoff after each retry. + factor = 1.6 + // jitter provides a range to randomize backoff delays. + jitter = 0.2 +) + +// Exponential implements exponential backoff algorithm as defined in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +type Exponential struct { + // MaxDelay is the upper bound of backoff delay. + MaxDelay time.Duration +} + +// Backoff returns the amount of time to wait before the next retry given the +// number of retries. +func (bc Exponential) Backoff(retries int) time.Duration { + if retries == 0 { + return baseDelay + } + backoff, max := float64(baseDelay), float64(bc.MaxDelay) + for backoff < max && retries > 0 { + backoff *= factor + retries-- + } + if backoff > max { + backoff = max + } + // Randomize backoff delays so that if a cluster of requests start at + // the same time, they won't operate in lockstep. + backoff *= 1 + jitter*(grpcrand.Float64()*2-1) + if backoff < 0 { + return 0 + } + return time.Duration(backoff) +} diff --git a/vendor/google.golang.org/grpc/internal/balancerload/BUILD.bazel b/vendor/google.golang.org/grpc/internal/balancerload/BUILD.bazel new file mode 100644 index 0000000000000..d8b17506cc7b4 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/balancerload/BUILD.bazel @@ -0,0 +1,10 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["load.go"], + importmap = "k8s.io/kops/vendor/google.golang.org/grpc/internal/balancerload", + importpath = "google.golang.org/grpc/internal/balancerload", + visibility = ["//vendor/google.golang.org/grpc:__subpackages__"], + deps = ["//vendor/google.golang.org/grpc/metadata:go_default_library"], +) diff --git a/vendor/google.golang.org/grpc/internal/balancerload/load.go b/vendor/google.golang.org/grpc/internal/balancerload/load.go new file mode 100644 index 0000000000000..3a905d96657e1 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/balancerload/load.go @@ -0,0 +1,46 @@ +/* + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package balancerload defines APIs to parse server loads in trailers. The +// parsed loads are sent to balancers in DoneInfo. +package balancerload + +import ( + "google.golang.org/grpc/metadata" +) + +// Parser converts loads from metadata into a concrete type. +type Parser interface { + // Parse parses loads from metadata. + Parse(md metadata.MD) interface{} +} + +var parser Parser + +// SetParser sets the load parser. +// +// Not mutex-protected, should be called before any gRPC functions. +func SetParser(lr Parser) { + parser = lr +} + +// Parse calls parser.Read(). +func Parse(md metadata.MD) interface{} { + if parser == nil { + return nil + } + return parser.Parse(md) +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/BUILD.bazel b/vendor/google.golang.org/grpc/internal/binarylog/BUILD.bazel new file mode 100644 index 0000000000000..335362e3ab176 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/BUILD.bazel @@ -0,0 +1,24 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "binarylog.go", + "binarylog_testutil.go", + "env_config.go", + "method_logger.go", + "sink.go", + "util.go", + ], + importmap = "k8s.io/kops/vendor/google.golang.org/grpc/internal/binarylog", + importpath = "google.golang.org/grpc/internal/binarylog", + visibility = ["//vendor/google.golang.org/grpc:__subpackages__"], + deps = [ + "//vendor/github.com/golang/protobuf/proto:go_default_library", + "//vendor/github.com/golang/protobuf/ptypes:go_default_library", + "//vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1:go_default_library", + "//vendor/google.golang.org/grpc/grpclog:go_default_library", + "//vendor/google.golang.org/grpc/metadata:go_default_library", + "//vendor/google.golang.org/grpc/status:go_default_library", + ], +) diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go new file mode 100644 index 0000000000000..fee6aecd08f60 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go @@ -0,0 +1,167 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package binarylog implementation binary logging as defined in +// https://github.com/grpc/proposal/blob/master/A16-binary-logging.md. +package binarylog + +import ( + "fmt" + "os" + + "google.golang.org/grpc/grpclog" +) + +// Logger is the global binary logger. It can be used to get binary logger for +// each method. +type Logger interface { + getMethodLogger(methodName string) *MethodLogger +} + +// binLogger is the global binary logger for the binary. One of this should be +// built at init time from the configuration (environment varialbe or flags). +// +// It is used to get a methodLogger for each individual method. +var binLogger Logger + +// SetLogger sets the binarg logger. +// +// Only call this at init time. +func SetLogger(l Logger) { + binLogger = l +} + +// GetMethodLogger returns the methodLogger for the given methodName. +// +// methodName should be in the format of "/service/method". +// +// Each methodLogger returned by this method is a new instance. This is to +// generate sequence id within the call. +func GetMethodLogger(methodName string) *MethodLogger { + if binLogger == nil { + return nil + } + return binLogger.getMethodLogger(methodName) +} + +func init() { + const envStr = "GRPC_BINARY_LOG_FILTER" + configStr := os.Getenv(envStr) + binLogger = NewLoggerFromConfigString(configStr) +} + +type methodLoggerConfig struct { + // Max length of header and message. + hdr, msg uint64 +} + +type logger struct { + all *methodLoggerConfig + services map[string]*methodLoggerConfig + methods map[string]*methodLoggerConfig + + blacklist map[string]struct{} +} + +// newEmptyLogger creates an empty logger. The map fields need to be filled in +// using the set* functions. +func newEmptyLogger() *logger { + return &logger{} +} + +// Set method logger for "*". +func (l *logger) setDefaultMethodLogger(ml *methodLoggerConfig) error { + if l.all != nil { + return fmt.Errorf("conflicting global rules found") + } + l.all = ml + return nil +} + +// Set method logger for "service/*". +// +// New methodLogger with same service overrides the old one. +func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig) error { + if _, ok := l.services[service]; ok { + return fmt.Errorf("conflicting rules for service %v found", service) + } + if l.services == nil { + l.services = make(map[string]*methodLoggerConfig) + } + l.services[service] = ml + return nil +} + +// Set method logger for "service/method". +// +// New methodLogger with same method overrides the old one. +func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) error { + if _, ok := l.blacklist[method]; ok { + return fmt.Errorf("conflicting rules for method %v found", method) + } + if _, ok := l.methods[method]; ok { + return fmt.Errorf("conflicting rules for method %v found", method) + } + if l.methods == nil { + l.methods = make(map[string]*methodLoggerConfig) + } + l.methods[method] = ml + return nil +} + +// Set blacklist method for "-service/method". +func (l *logger) setBlacklist(method string) error { + if _, ok := l.blacklist[method]; ok { + return fmt.Errorf("conflicting rules for method %v found", method) + } + if _, ok := l.methods[method]; ok { + return fmt.Errorf("conflicting rules for method %v found", method) + } + if l.blacklist == nil { + l.blacklist = make(map[string]struct{}) + } + l.blacklist[method] = struct{}{} + return nil +} + +// getMethodLogger returns the methodLogger for the given methodName. +// +// methodName should be in the format of "/service/method". +// +// Each methodLogger returned by this method is a new instance. This is to +// generate sequence id within the call. +func (l *logger) getMethodLogger(methodName string) *MethodLogger { + s, m, err := parseMethodName(methodName) + if err != nil { + grpclog.Infof("binarylogging: failed to parse %q: %v", methodName, err) + return nil + } + if ml, ok := l.methods[s+"/"+m]; ok { + return newMethodLogger(ml.hdr, ml.msg) + } + if _, ok := l.blacklist[s+"/"+m]; ok { + return nil + } + if ml, ok := l.services[s]; ok { + return newMethodLogger(ml.hdr, ml.msg) + } + if l.all == nil { + return nil + } + return newMethodLogger(l.all.hdr, l.all.msg) +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go new file mode 100644 index 0000000000000..1ee00a39ac7c7 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go @@ -0,0 +1,42 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// This file contains exported variables/functions that are exported for testing +// only. +// +// An ideal way for this would be to put those in a *_test.go but in binarylog +// package. But this doesn't work with staticcheck with go module. Error was: +// "MdToMetadataProto not declared by package binarylog". This could be caused +// by the way staticcheck looks for files for a certain package, which doesn't +// support *_test.go files. +// +// Move those to binary_test.go when staticcheck is fixed. + +package binarylog + +var ( + // AllLogger is a logger that logs all headers/messages for all RPCs. It's + // for testing only. + AllLogger = NewLoggerFromConfigString("*") + // MdToMetadataProto converts metadata to a binary logging proto message. + // It's for testing only. + MdToMetadataProto = mdToMetadataProto + // AddrToProto converts an address to a binary logging proto message. It's + // for testing only. + AddrToProto = addrToProto +) diff --git a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go new file mode 100644 index 0000000000000..4cc2525df7fbe --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go @@ -0,0 +1,210 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package binarylog + +import ( + "errors" + "fmt" + "regexp" + "strconv" + "strings" + + "google.golang.org/grpc/grpclog" +) + +// NewLoggerFromConfigString reads the string and build a logger. It can be used +// to build a new logger and assign it to binarylog.Logger. +// +// Example filter config strings: +// - "" Nothing will be logged +// - "*" All headers and messages will be fully logged. +// - "*{h}" Only headers will be logged. +// - "*{m:256}" Only the first 256 bytes of each message will be logged. +// - "Foo/*" Logs every method in service Foo +// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar +// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method +// /Foo/Bar, logs all headers and messages in every other method in service +// Foo. +// +// If two configs exist for one certain method or service, the one specified +// later overrides the privous config. +func NewLoggerFromConfigString(s string) Logger { + if s == "" { + return nil + } + l := newEmptyLogger() + methods := strings.Split(s, ",") + for _, method := range methods { + if err := l.fillMethodLoggerWithConfigString(method); err != nil { + grpclog.Warningf("failed to parse binary log config: %v", err) + return nil + } + } + return l +} + +// fillMethodLoggerWithConfigString parses config, creates methodLogger and adds +// it to the right map in the logger. +func (l *logger) fillMethodLoggerWithConfigString(config string) error { + // "" is invalid. + if config == "" { + return errors.New("empty string is not a valid method binary logging config") + } + + // "-service/method", blacklist, no * or {} allowed. + if config[0] == '-' { + s, m, suffix, err := parseMethodConfigAndSuffix(config[1:]) + if err != nil { + return fmt.Errorf("invalid config: %q, %v", config, err) + } + if m == "*" { + return fmt.Errorf("invalid config: %q, %v", config, "* not allowd in blacklist config") + } + if suffix != "" { + return fmt.Errorf("invalid config: %q, %v", config, "header/message limit not allowed in blacklist config") + } + if err := l.setBlacklist(s + "/" + m); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + return nil + } + + // "*{h:256;m:256}" + if config[0] == '*' { + hdr, msg, err := parseHeaderMessageLengthConfig(config[1:]) + if err != nil { + return fmt.Errorf("invalid config: %q, %v", config, err) + } + if err := l.setDefaultMethodLogger(&methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + return nil + } + + s, m, suffix, err := parseMethodConfigAndSuffix(config) + if err != nil { + return fmt.Errorf("invalid config: %q, %v", config, err) + } + hdr, msg, err := parseHeaderMessageLengthConfig(suffix) + if err != nil { + return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err) + } + if m == "*" { + if err := l.setServiceMethodLogger(s, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + } else { + if err := l.setMethodMethodLogger(s+"/"+m, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + } + return nil +} + +const ( + // TODO: this const is only used by env_config now. But could be useful for + // other config. Move to binarylog.go if necessary. + maxUInt = ^uint64(0) + + // For "p.s/m" plus any suffix. Suffix will be parsed again. See test for + // expected output. + longMethodConfigRegexpStr = `^([\w./]+)/((?:\w+)|[*])(.+)?$` + + // For suffix from above, "{h:123,m:123}". See test for expected output. + optionalLengthRegexpStr = `(?::(\d+))?` // Optional ":123". + headerConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `}$` + messageConfigRegexpStr = `^{m` + optionalLengthRegexpStr + `}$` + headerMessageConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `;m` + optionalLengthRegexpStr + `}$` +) + +var ( + longMethodConfigRegexp = regexp.MustCompile(longMethodConfigRegexpStr) + headerConfigRegexp = regexp.MustCompile(headerConfigRegexpStr) + messageConfigRegexp = regexp.MustCompile(messageConfigRegexpStr) + headerMessageConfigRegexp = regexp.MustCompile(headerMessageConfigRegexpStr) +) + +// Turn "service/method{h;m}" into "service", "method", "{h;m}". +func parseMethodConfigAndSuffix(c string) (service, method, suffix string, _ error) { + // Regexp result: + // + // in: "p.s/m{h:123,m:123}", + // out: []string{"p.s/m{h:123,m:123}", "p.s", "m", "{h:123,m:123}"}, + match := longMethodConfigRegexp.FindStringSubmatch(c) + if match == nil { + return "", "", "", fmt.Errorf("%q contains invalid substring", c) + } + service = match[1] + method = match[2] + suffix = match[3] + return +} + +// Turn "{h:123;m:345}" into 123, 345. +// +// Return maxUInt if length is unspecified. +func parseHeaderMessageLengthConfig(c string) (hdrLenStr, msgLenStr uint64, err error) { + if c == "" { + return maxUInt, maxUInt, nil + } + // Header config only. + if match := headerConfigRegexp.FindStringSubmatch(c); match != nil { + if s := match[1]; s != "" { + hdrLenStr, err = strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("failed to convert %q to uint", s) + } + return hdrLenStr, 0, nil + } + return maxUInt, 0, nil + } + + // Message config only. + if match := messageConfigRegexp.FindStringSubmatch(c); match != nil { + if s := match[1]; s != "" { + msgLenStr, err = strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("failed to convert %q to uint", s) + } + return 0, msgLenStr, nil + } + return 0, maxUInt, nil + } + + // Header and message config both. + if match := headerMessageConfigRegexp.FindStringSubmatch(c); match != nil { + // Both hdr and msg are specified, but one or two of them might be empty. + hdrLenStr = maxUInt + msgLenStr = maxUInt + if s := match[1]; s != "" { + hdrLenStr, err = strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("failed to convert %q to uint", s) + } + } + if s := match[2]; s != "" { + msgLenStr, err = strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("failed to convert %q to uint", s) + } + } + return hdrLenStr, msgLenStr, nil + } + return 0, 0, fmt.Errorf("%q contains invalid substring", c) +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go new file mode 100644 index 0000000000000..160f6e8616f5b --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -0,0 +1,423 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package binarylog + +import ( + "net" + "strings" + "sync/atomic" + "time" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +type callIDGenerator struct { + id uint64 +} + +func (g *callIDGenerator) next() uint64 { + id := atomic.AddUint64(&g.id, 1) + return id +} + +// reset is for testing only, and doesn't need to be thread safe. +func (g *callIDGenerator) reset() { + g.id = 0 +} + +var idGen callIDGenerator + +// MethodLogger is the sub-logger for each method. +type MethodLogger struct { + headerMaxLen, messageMaxLen uint64 + + callID uint64 + idWithinCallGen *callIDGenerator + + sink Sink // TODO(blog): make this plugable. +} + +func newMethodLogger(h, m uint64) *MethodLogger { + return &MethodLogger{ + headerMaxLen: h, + messageMaxLen: m, + + callID: idGen.next(), + idWithinCallGen: &callIDGenerator{}, + + sink: defaultSink, // TODO(blog): make it plugable. + } +} + +// Log creates a proto binary log entry, and logs it to the sink. +func (ml *MethodLogger) Log(c LogEntryConfig) { + m := c.toProto() + timestamp, _ := ptypes.TimestampProto(time.Now()) + m.Timestamp = timestamp + m.CallId = ml.callID + m.SequenceIdWithinCall = ml.idWithinCallGen.next() + + switch pay := m.Payload.(type) { + case *pb.GrpcLogEntry_ClientHeader: + m.PayloadTruncated = ml.truncateMetadata(pay.ClientHeader.GetMetadata()) + case *pb.GrpcLogEntry_ServerHeader: + m.PayloadTruncated = ml.truncateMetadata(pay.ServerHeader.GetMetadata()) + case *pb.GrpcLogEntry_Message: + m.PayloadTruncated = ml.truncateMessage(pay.Message) + } + + ml.sink.Write(m) +} + +func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { + if ml.headerMaxLen == maxUInt { + return false + } + var ( + bytesLimit = ml.headerMaxLen + index int + ) + // At the end of the loop, index will be the first entry where the total + // size is greater than the limit: + // + // len(entry[:index]) <= ml.hdr && len(entry[:index+1]) > ml.hdr. + for ; index < len(mdPb.Entry); index++ { + entry := mdPb.Entry[index] + if entry.Key == "grpc-trace-bin" { + // "grpc-trace-bin" is a special key. It's kept in the log entry, + // but not counted towards the size limit. + continue + } + currentEntryLen := uint64(len(entry.Value)) + if currentEntryLen > bytesLimit { + break + } + bytesLimit -= currentEntryLen + } + truncated = index < len(mdPb.Entry) + mdPb.Entry = mdPb.Entry[:index] + return truncated +} + +func (ml *MethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { + if ml.messageMaxLen == maxUInt { + return false + } + if ml.messageMaxLen >= uint64(len(msgPb.Data)) { + return false + } + msgPb.Data = msgPb.Data[:ml.messageMaxLen] + return true +} + +// LogEntryConfig represents the configuration for binary log entry. +type LogEntryConfig interface { + toProto() *pb.GrpcLogEntry +} + +// ClientHeader configs the binary log entry to be a ClientHeader entry. +type ClientHeader struct { + OnClientSide bool + Header metadata.MD + MethodName string + Authority string + Timeout time.Duration + // PeerAddr is required only when it's on server side. + PeerAddr net.Addr +} + +func (c *ClientHeader) toProto() *pb.GrpcLogEntry { + // This function doesn't need to set all the fields (e.g. seq ID). The Log + // function will set the fields when necessary. + clientHeader := &pb.ClientHeader{ + Metadata: mdToMetadataProto(c.Header), + MethodName: c.MethodName, + Authority: c.Authority, + } + if c.Timeout > 0 { + clientHeader.Timeout = ptypes.DurationProto(c.Timeout) + } + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, + Payload: &pb.GrpcLogEntry_ClientHeader{ + ClientHeader: clientHeader, + }, + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + if c.PeerAddr != nil { + ret.Peer = addrToProto(c.PeerAddr) + } + return ret +} + +// ServerHeader configs the binary log entry to be a ServerHeader entry. +type ServerHeader struct { + OnClientSide bool + Header metadata.MD + // PeerAddr is required only when it's on client side. + PeerAddr net.Addr +} + +func (c *ServerHeader) toProto() *pb.GrpcLogEntry { + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, + Payload: &pb.GrpcLogEntry_ServerHeader{ + ServerHeader: &pb.ServerHeader{ + Metadata: mdToMetadataProto(c.Header), + }, + }, + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + if c.PeerAddr != nil { + ret.Peer = addrToProto(c.PeerAddr) + } + return ret +} + +// ClientMessage configs the binary log entry to be a ClientMessage entry. +type ClientMessage struct { + OnClientSide bool + // Message can be a proto.Message or []byte. Other messages formats are not + // supported. + Message interface{} +} + +func (c *ClientMessage) toProto() *pb.GrpcLogEntry { + var ( + data []byte + err error + ) + if m, ok := c.Message.(proto.Message); ok { + data, err = proto.Marshal(m) + if err != nil { + grpclog.Infof("binarylogging: failed to marshal proto message: %v", err) + } + } else if b, ok := c.Message.([]byte); ok { + data = b + } else { + grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte") + } + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, + Payload: &pb.GrpcLogEntry_Message{ + Message: &pb.Message{ + Length: uint32(len(data)), + Data: data, + }, + }, + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + return ret +} + +// ServerMessage configs the binary log entry to be a ServerMessage entry. +type ServerMessage struct { + OnClientSide bool + // Message can be a proto.Message or []byte. Other messages formats are not + // supported. + Message interface{} +} + +func (c *ServerMessage) toProto() *pb.GrpcLogEntry { + var ( + data []byte + err error + ) + if m, ok := c.Message.(proto.Message); ok { + data, err = proto.Marshal(m) + if err != nil { + grpclog.Infof("binarylogging: failed to marshal proto message: %v", err) + } + } else if b, ok := c.Message.([]byte); ok { + data = b + } else { + grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte") + } + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, + Payload: &pb.GrpcLogEntry_Message{ + Message: &pb.Message{ + Length: uint32(len(data)), + Data: data, + }, + }, + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + return ret +} + +// ClientHalfClose configs the binary log entry to be a ClientHalfClose entry. +type ClientHalfClose struct { + OnClientSide bool +} + +func (c *ClientHalfClose) toProto() *pb.GrpcLogEntry { + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE, + Payload: nil, // No payload here. + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + return ret +} + +// ServerTrailer configs the binary log entry to be a ServerTrailer entry. +type ServerTrailer struct { + OnClientSide bool + Trailer metadata.MD + // Err is the status error. + Err error + // PeerAddr is required only when it's on client side and the RPC is trailer + // only. + PeerAddr net.Addr +} + +func (c *ServerTrailer) toProto() *pb.GrpcLogEntry { + st, ok := status.FromError(c.Err) + if !ok { + grpclog.Info("binarylogging: error in trailer is not a status error") + } + var ( + detailsBytes []byte + err error + ) + stProto := st.Proto() + if stProto != nil && len(stProto.Details) != 0 { + detailsBytes, err = proto.Marshal(stProto) + if err != nil { + grpclog.Infof("binarylogging: failed to marshal status proto: %v", err) + } + } + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, + Payload: &pb.GrpcLogEntry_Trailer{ + Trailer: &pb.Trailer{ + Metadata: mdToMetadataProto(c.Trailer), + StatusCode: uint32(st.Code()), + StatusMessage: st.Message(), + StatusDetails: detailsBytes, + }, + }, + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + if c.PeerAddr != nil { + ret.Peer = addrToProto(c.PeerAddr) + } + return ret +} + +// Cancel configs the binary log entry to be a Cancel entry. +type Cancel struct { + OnClientSide bool +} + +func (c *Cancel) toProto() *pb.GrpcLogEntry { + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_CANCEL, + Payload: nil, + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + return ret +} + +// metadataKeyOmit returns whether the metadata entry with this key should be +// omitted. +func metadataKeyOmit(key string) bool { + switch key { + case "lb-token", ":path", ":authority", "content-encoding", "content-type", "user-agent", "te": + return true + case "grpc-trace-bin": // grpc-trace-bin is special because it's visiable to users. + return false + } + return strings.HasPrefix(key, "grpc-") +} + +func mdToMetadataProto(md metadata.MD) *pb.Metadata { + ret := &pb.Metadata{} + for k, vv := range md { + if metadataKeyOmit(k) { + continue + } + for _, v := range vv { + ret.Entry = append(ret.Entry, + &pb.MetadataEntry{ + Key: k, + Value: []byte(v), + }, + ) + } + } + return ret +} + +func addrToProto(addr net.Addr) *pb.Address { + ret := &pb.Address{} + switch a := addr.(type) { + case *net.TCPAddr: + if a.IP.To4() != nil { + ret.Type = pb.Address_TYPE_IPV4 + } else if a.IP.To16() != nil { + ret.Type = pb.Address_TYPE_IPV6 + } else { + ret.Type = pb.Address_TYPE_UNKNOWN + // Do not set address and port fields. + break + } + ret.Address = a.IP.String() + ret.IpPort = uint32(a.Port) + case *net.UnixAddr: + ret.Type = pb.Address_TYPE_UNIX + ret.Address = a.String() + default: + ret.Type = pb.Address_TYPE_UNKNOWN + } + return ret +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh b/vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh new file mode 100644 index 0000000000000..113d40cbe16c6 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh @@ -0,0 +1,33 @@ +#!/bin/bash +# Copyright 2018 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eux -o pipefail + +TMP=$(mktemp -d) + +function finish { + rm -rf "$TMP" +} +trap finish EXIT + +pushd "$TMP" +mkdir -p grpc/binarylog/grpc_binarylog_v1 +curl https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/binlog/v1/binarylog.proto > grpc/binarylog/grpc_binarylog_v1/binarylog.proto + +protoc --go_out=plugins=grpc,paths=source_relative:. -I. grpc/binarylog/grpc_binarylog_v1/*.proto +popd +rm -f ./grpc_binarylog_v1/*.pb.go +cp "$TMP"/grpc/binarylog/grpc_binarylog_v1/*.pb.go ../../binarylog/grpc_binarylog_v1/ + diff --git a/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/vendor/google.golang.org/grpc/internal/binarylog/sink.go new file mode 100644 index 0000000000000..20d044f0fd711 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/sink.go @@ -0,0 +1,162 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package binarylog + +import ( + "bufio" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "sync" + "time" + + "github.com/golang/protobuf/proto" + pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + "google.golang.org/grpc/grpclog" +) + +var ( + defaultSink Sink = &noopSink{} // TODO(blog): change this default (file in /tmp). +) + +// SetDefaultSink sets the sink where binary logs will be written to. +// +// Not thread safe. Only set during initialization. +func SetDefaultSink(s Sink) { + if defaultSink != nil { + defaultSink.Close() + } + defaultSink = s +} + +// Sink writes log entry into the binary log sink. +type Sink interface { + // Write will be called to write the log entry into the sink. + // + // It should be thread-safe so it can be called in parallel. + Write(*pb.GrpcLogEntry) error + // Close will be called when the Sink is replaced by a new Sink. + Close() error +} + +type noopSink struct{} + +func (ns *noopSink) Write(*pb.GrpcLogEntry) error { return nil } +func (ns *noopSink) Close() error { return nil } + +// newWriterSink creates a binary log sink with the given writer. +// +// Write() marshalls the proto message and writes it to the given writer. Each +// message is prefixed with a 4 byte big endian unsigned integer as the length. +// +// No buffer is done, Close() doesn't try to close the writer. +func newWriterSink(w io.Writer) *writerSink { + return &writerSink{out: w} +} + +type writerSink struct { + out io.Writer +} + +func (ws *writerSink) Write(e *pb.GrpcLogEntry) error { + b, err := proto.Marshal(e) + if err != nil { + grpclog.Infof("binary logging: failed to marshal proto message: %v", err) + } + hdr := make([]byte, 4) + binary.BigEndian.PutUint32(hdr, uint32(len(b))) + if _, err := ws.out.Write(hdr); err != nil { + return err + } + if _, err := ws.out.Write(b); err != nil { + return err + } + return nil +} + +func (ws *writerSink) Close() error { return nil } + +type bufWriteCloserSink struct { + mu sync.Mutex + closer io.Closer + out *writerSink // out is built on buf. + buf *bufio.Writer // buf is kept for flush. + + writeStartOnce sync.Once + writeTicker *time.Ticker +} + +func (fs *bufWriteCloserSink) Write(e *pb.GrpcLogEntry) error { + // Start the write loop when Write is called. + fs.writeStartOnce.Do(fs.startFlushGoroutine) + fs.mu.Lock() + if err := fs.out.Write(e); err != nil { + fs.mu.Unlock() + return err + } + fs.mu.Unlock() + return nil +} + +const ( + bufFlushDuration = 60 * time.Second +) + +func (fs *bufWriteCloserSink) startFlushGoroutine() { + fs.writeTicker = time.NewTicker(bufFlushDuration) + go func() { + for range fs.writeTicker.C { + fs.mu.Lock() + fs.buf.Flush() + fs.mu.Unlock() + } + }() +} + +func (fs *bufWriteCloserSink) Close() error { + if fs.writeTicker != nil { + fs.writeTicker.Stop() + } + fs.mu.Lock() + fs.buf.Flush() + fs.closer.Close() + fs.out.Close() + fs.mu.Unlock() + return nil +} + +func newBufWriteCloserSink(o io.WriteCloser) Sink { + bufW := bufio.NewWriter(o) + return &bufWriteCloserSink{ + closer: o, + out: newWriterSink(bufW), + buf: bufW, + } +} + +// NewTempFileSink creates a temp file and returns a Sink that writes to this +// file. +func NewTempFileSink() (Sink, error) { + tempFile, err := ioutil.TempFile("/tmp", "grpcgo_binarylog_*.txt") + if err != nil { + return nil, fmt.Errorf("failed to create temp file: %v", err) + } + return newBufWriteCloserSink(tempFile), nil +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/util.go b/vendor/google.golang.org/grpc/internal/binarylog/util.go new file mode 100644 index 0000000000000..15dc7803d8bff --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/util.go @@ -0,0 +1,41 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package binarylog + +import ( + "errors" + "strings" +) + +// parseMethodName splits service and method from the input. It expects format +// "/service/method". +// +// TODO: move to internal/grpcutil. +func parseMethodName(methodName string) (service, method string, _ error) { + if !strings.HasPrefix(methodName, "/") { + return "", "", errors.New("invalid method name: should start with /") + } + methodName = methodName[1:] + + pos := strings.LastIndex(methodName, "/") + if pos < 0 { + return "", "", errors.New("invalid method name: suffix /method is missing") + } + return methodName[:pos], methodName[pos+1:], nil +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/BUILD.bazel b/vendor/google.golang.org/grpc/internal/channelz/BUILD.bazel new file mode 100644 index 0000000000000..e9f18f1051a9f --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/BUILD.bazel @@ -0,0 +1,29 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "funcs.go", + "types.go", + "types_linux.go", + "types_nonlinux.go", + "util_linux.go", + "util_nonlinux.go", + ], + importmap = "k8s.io/kops/vendor/google.golang.org/grpc/internal/channelz", + importpath = "google.golang.org/grpc/internal/channelz", + visibility = ["//vendor/google.golang.org/grpc:__subpackages__"], + deps = [ + "//vendor/google.golang.org/grpc/connectivity:go_default_library", + "//vendor/google.golang.org/grpc/credentials:go_default_library", + "//vendor/google.golang.org/grpc/grpclog:go_default_library", + ] + select({ + "@io_bazel_rules_go//go/platform:android": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "//conditions:default": [], + }), +) diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go new file mode 100644 index 0000000000000..f0744f9937e94 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -0,0 +1,727 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package channelz defines APIs for enabling channelz service, entry +// registration/deletion, and accessing channelz data. It also defines channelz +// metric struct formats. +// +// All APIs in this package are experimental. +package channelz + +import ( + "fmt" + "sort" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/grpclog" +) + +const ( + defaultMaxTraceEntry int32 = 30 +) + +var ( + db dbWrapper + idGen idGenerator + // EntryPerPage defines the number of channelz entries to be shown on a web page. + EntryPerPage = int64(50) + curState int32 + maxTraceEntry = defaultMaxTraceEntry +) + +// TurnOn turns on channelz data collection. +func TurnOn() { + if !IsOn() { + NewChannelzStorage() + atomic.StoreInt32(&curState, 1) + } +} + +// IsOn returns whether channelz data collection is on. +func IsOn() bool { + return atomic.CompareAndSwapInt32(&curState, 1, 1) +} + +// SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel). +// Setting it to 0 will disable channel tracing. +func SetMaxTraceEntry(i int32) { + atomic.StoreInt32(&maxTraceEntry, i) +} + +// ResetMaxTraceEntryToDefault resets the maximum number of trace entry per entity to default. +func ResetMaxTraceEntryToDefault() { + atomic.StoreInt32(&maxTraceEntry, defaultMaxTraceEntry) +} + +func getMaxTraceEntry() int { + i := atomic.LoadInt32(&maxTraceEntry) + return int(i) +} + +// dbWarpper wraps around a reference to internal channelz data storage, and +// provide synchronized functionality to set and get the reference. +type dbWrapper struct { + mu sync.RWMutex + DB *channelMap +} + +func (d *dbWrapper) set(db *channelMap) { + d.mu.Lock() + d.DB = db + d.mu.Unlock() +} + +func (d *dbWrapper) get() *channelMap { + d.mu.RLock() + defer d.mu.RUnlock() + return d.DB +} + +// NewChannelzStorage initializes channelz data storage and id generator. +// +// This function returns a cleanup function to wait for all channelz state to be reset by the +// grpc goroutines when those entities get closed. By using this cleanup function, we make sure tests +// don't mess up each other, i.e. lingering goroutine from previous test doing entity removal happen +// to remove some entity just register by the new test, since the id space is the same. +// +// Note: This function is exported for testing purpose only. User should not call +// it in most cases. +func NewChannelzStorage() (cleanup func() error) { + db.set(&channelMap{ + topLevelChannels: make(map[int64]struct{}), + channels: make(map[int64]*channel), + listenSockets: make(map[int64]*listenSocket), + normalSockets: make(map[int64]*normalSocket), + servers: make(map[int64]*server), + subChannels: make(map[int64]*subChannel), + }) + idGen.reset() + return func() error { + var err error + cm := db.get() + if cm == nil { + return nil + } + for i := 0; i < 1000; i++ { + cm.mu.Lock() + if len(cm.topLevelChannels) == 0 && len(cm.servers) == 0 && len(cm.channels) == 0 && len(cm.subChannels) == 0 && len(cm.listenSockets) == 0 && len(cm.normalSockets) == 0 { + cm.mu.Unlock() + // all things stored in the channelz map have been cleared. + return nil + } + cm.mu.Unlock() + time.Sleep(10 * time.Millisecond) + } + + cm.mu.Lock() + err = fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets)) + cm.mu.Unlock() + return err + } +} + +// GetTopChannels returns a slice of top channel's ChannelMetric, along with a +// boolean indicating whether there's more top channels to be queried for. +// +// The arg id specifies that only top channel with id at or above it will be included +// in the result. The returned slice is up to a length of the arg maxResults or +// EntryPerPage if maxResults is zero, and is sorted in ascending id order. +func GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) { + return db.get().GetTopChannels(id, maxResults) +} + +// GetServers returns a slice of server's ServerMetric, along with a +// boolean indicating whether there's more servers to be queried for. +// +// The arg id specifies that only server with id at or above it will be included +// in the result. The returned slice is up to a length of the arg maxResults or +// EntryPerPage if maxResults is zero, and is sorted in ascending id order. +func GetServers(id int64, maxResults int64) ([]*ServerMetric, bool) { + return db.get().GetServers(id, maxResults) +} + +// GetServerSockets returns a slice of server's (identified by id) normal socket's +// SocketMetric, along with a boolean indicating whether there's more sockets to +// be queried for. +// +// The arg startID specifies that only sockets with id at or above it will be +// included in the result. The returned slice is up to a length of the arg maxResults +// or EntryPerPage if maxResults is zero, and is sorted in ascending id order. +func GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) { + return db.get().GetServerSockets(id, startID, maxResults) +} + +// GetChannel returns the ChannelMetric for the channel (identified by id). +func GetChannel(id int64) *ChannelMetric { + return db.get().GetChannel(id) +} + +// GetSubChannel returns the SubChannelMetric for the subchannel (identified by id). +func GetSubChannel(id int64) *SubChannelMetric { + return db.get().GetSubChannel(id) +} + +// GetSocket returns the SocketInternalMetric for the socket (identified by id). +func GetSocket(id int64) *SocketMetric { + return db.get().GetSocket(id) +} + +// GetServer returns the ServerMetric for the server (identified by id). +func GetServer(id int64) *ServerMetric { + return db.get().GetServer(id) +} + +// RegisterChannel registers the given channel c in channelz database with ref +// as its reference name, and add it to the child list of its parent (identified +// by pid). pid = 0 means no parent. It returns the unique channelz tracking id +// assigned to this channel. +func RegisterChannel(c Channel, pid int64, ref string) int64 { + id := idGen.genID() + cn := &channel{ + refName: ref, + c: c, + subChans: make(map[int64]string), + nestedChans: make(map[int64]string), + id: id, + pid: pid, + trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, + } + if pid == 0 { + db.get().addChannel(id, cn, true, pid, ref) + } else { + db.get().addChannel(id, cn, false, pid, ref) + } + return id +} + +// RegisterSubChannel registers the given channel c in channelz database with ref +// as its reference name, and add it to the child list of its parent (identified +// by pid). It returns the unique channelz tracking id assigned to this subchannel. +func RegisterSubChannel(c Channel, pid int64, ref string) int64 { + if pid == 0 { + grpclog.Error("a SubChannel's parent id cannot be 0") + return 0 + } + id := idGen.genID() + sc := &subChannel{ + refName: ref, + c: c, + sockets: make(map[int64]string), + id: id, + pid: pid, + trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, + } + db.get().addSubChannel(id, sc, pid, ref) + return id +} + +// RegisterServer registers the given server s in channelz database. It returns +// the unique channelz tracking id assigned to this server. +func RegisterServer(s Server, ref string) int64 { + id := idGen.genID() + svr := &server{ + refName: ref, + s: s, + sockets: make(map[int64]string), + listenSockets: make(map[int64]string), + id: id, + } + db.get().addServer(id, svr) + return id +} + +// RegisterListenSocket registers the given listen socket s in channelz database +// with ref as its reference name, and add it to the child list of its parent +// (identified by pid). It returns the unique channelz tracking id assigned to +// this listen socket. +func RegisterListenSocket(s Socket, pid int64, ref string) int64 { + if pid == 0 { + grpclog.Error("a ListenSocket's parent id cannot be 0") + return 0 + } + id := idGen.genID() + ls := &listenSocket{refName: ref, s: s, id: id, pid: pid} + db.get().addListenSocket(id, ls, pid, ref) + return id +} + +// RegisterNormalSocket registers the given normal socket s in channelz database +// with ref as its reference name, and add it to the child list of its parent +// (identified by pid). It returns the unique channelz tracking id assigned to +// this normal socket. +func RegisterNormalSocket(s Socket, pid int64, ref string) int64 { + if pid == 0 { + grpclog.Error("a NormalSocket's parent id cannot be 0") + return 0 + } + id := idGen.genID() + ns := &normalSocket{refName: ref, s: s, id: id, pid: pid} + db.get().addNormalSocket(id, ns, pid, ref) + return id +} + +// RemoveEntry removes an entry with unique channelz trakcing id to be id from +// channelz database. +func RemoveEntry(id int64) { + db.get().removeEntry(id) +} + +// TraceEventDesc is what the caller of AddTraceEvent should provide to describe the event to be added +// to the channel trace. +// The Parent field is optional. It is used for event that will be recorded in the entity's parent +// trace also. +type TraceEventDesc struct { + Desc string + Severity Severity + Parent *TraceEventDesc +} + +// AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc. +func AddTraceEvent(id int64, desc *TraceEventDesc) { + if getMaxTraceEntry() == 0 { + return + } + db.get().traceEvent(id, desc) +} + +// channelMap is the storage data structure for channelz. +// Methods of channelMap can be divided in two two categories with respect to locking. +// 1. Methods acquire the global lock. +// 2. Methods that can only be called when global lock is held. +// A second type of method need always to be called inside a first type of method. +type channelMap struct { + mu sync.RWMutex + topLevelChannels map[int64]struct{} + servers map[int64]*server + channels map[int64]*channel + subChannels map[int64]*subChannel + listenSockets map[int64]*listenSocket + normalSockets map[int64]*normalSocket +} + +func (c *channelMap) addServer(id int64, s *server) { + c.mu.Lock() + s.cm = c + c.servers[id] = s + c.mu.Unlock() +} + +func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64, ref string) { + c.mu.Lock() + cn.cm = c + cn.trace.cm = c + c.channels[id] = cn + if isTopChannel { + c.topLevelChannels[id] = struct{}{} + } else { + c.findEntry(pid).addChild(id, cn) + } + c.mu.Unlock() +} + +func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref string) { + c.mu.Lock() + sc.cm = c + sc.trace.cm = c + c.subChannels[id] = sc + c.findEntry(pid).addChild(id, sc) + c.mu.Unlock() +} + +func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64, ref string) { + c.mu.Lock() + ls.cm = c + c.listenSockets[id] = ls + c.findEntry(pid).addChild(id, ls) + c.mu.Unlock() +} + +func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64, ref string) { + c.mu.Lock() + ns.cm = c + c.normalSockets[id] = ns + c.findEntry(pid).addChild(id, ns) + c.mu.Unlock() +} + +// removeEntry triggers the removal of an entry, which may not indeed delete the entry, if it has to +// wait on the deletion of its children and until no other entity's channel trace references it. +// It may lead to a chain of entry deletion. For example, deleting the last socket of a gracefully +// shutting down server will lead to the server being also deleted. +func (c *channelMap) removeEntry(id int64) { + c.mu.Lock() + c.findEntry(id).triggerDelete() + c.mu.Unlock() +} + +// c.mu must be held by the caller +func (c *channelMap) decrTraceRefCount(id int64) { + e := c.findEntry(id) + if v, ok := e.(tracedChannel); ok { + v.decrTraceRefCount() + e.deleteSelfIfReady() + } +} + +// c.mu must be held by the caller. +func (c *channelMap) findEntry(id int64) entry { + var v entry + var ok bool + if v, ok = c.channels[id]; ok { + return v + } + if v, ok = c.subChannels[id]; ok { + return v + } + if v, ok = c.servers[id]; ok { + return v + } + if v, ok = c.listenSockets[id]; ok { + return v + } + if v, ok = c.normalSockets[id]; ok { + return v + } + return &dummyEntry{idNotFound: id} +} + +// c.mu must be held by the caller +// deleteEntry simply deletes an entry from the channelMap. Before calling this +// method, caller must check this entry is ready to be deleted, i.e removeEntry() +// has been called on it, and no children still exist. +// Conditionals are ordered by the expected frequency of deletion of each entity +// type, in order to optimize performance. +func (c *channelMap) deleteEntry(id int64) { + var ok bool + if _, ok = c.normalSockets[id]; ok { + delete(c.normalSockets, id) + return + } + if _, ok = c.subChannels[id]; ok { + delete(c.subChannels, id) + return + } + if _, ok = c.channels[id]; ok { + delete(c.channels, id) + delete(c.topLevelChannels, id) + return + } + if _, ok = c.listenSockets[id]; ok { + delete(c.listenSockets, id) + return + } + if _, ok = c.servers[id]; ok { + delete(c.servers, id) + return + } +} + +func (c *channelMap) traceEvent(id int64, desc *TraceEventDesc) { + c.mu.Lock() + child := c.findEntry(id) + childTC, ok := child.(tracedChannel) + if !ok { + c.mu.Unlock() + return + } + childTC.getChannelTrace().append(&TraceEvent{Desc: desc.Desc, Severity: desc.Severity, Timestamp: time.Now()}) + if desc.Parent != nil { + parent := c.findEntry(child.getParentID()) + var chanType RefChannelType + switch child.(type) { + case *channel: + chanType = RefChannel + case *subChannel: + chanType = RefSubChannel + } + if parentTC, ok := parent.(tracedChannel); ok { + parentTC.getChannelTrace().append(&TraceEvent{ + Desc: desc.Parent.Desc, + Severity: desc.Parent.Severity, + Timestamp: time.Now(), + RefID: id, + RefName: childTC.getRefName(), + RefType: chanType, + }) + childTC.incrTraceRefCount() + } + } + c.mu.Unlock() +} + +type int64Slice []int64 + +func (s int64Slice) Len() int { return len(s) } +func (s int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] } + +func copyMap(m map[int64]string) map[int64]string { + n := make(map[int64]string) + for k, v := range m { + n[k] = v + } + return n +} + +func min(a, b int64) int64 { + if a < b { + return a + } + return b +} + +func (c *channelMap) GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) { + if maxResults <= 0 { + maxResults = EntryPerPage + } + c.mu.RLock() + l := int64(len(c.topLevelChannels)) + ids := make([]int64, 0, l) + cns := make([]*channel, 0, min(l, maxResults)) + + for k := range c.topLevelChannels { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) + count := int64(0) + var end bool + var t []*ChannelMetric + for i, v := range ids[idx:] { + if count == maxResults { + break + } + if cn, ok := c.channels[v]; ok { + cns = append(cns, cn) + t = append(t, &ChannelMetric{ + NestedChans: copyMap(cn.nestedChans), + SubChans: copyMap(cn.subChans), + }) + count++ + } + if i == len(ids[idx:])-1 { + end = true + break + } + } + c.mu.RUnlock() + if count == 0 { + end = true + } + + for i, cn := range cns { + t[i].ChannelData = cn.c.ChannelzMetric() + t[i].ID = cn.id + t[i].RefName = cn.refName + t[i].Trace = cn.trace.dumpData() + } + return t, end +} + +func (c *channelMap) GetServers(id, maxResults int64) ([]*ServerMetric, bool) { + if maxResults <= 0 { + maxResults = EntryPerPage + } + c.mu.RLock() + l := int64(len(c.servers)) + ids := make([]int64, 0, l) + ss := make([]*server, 0, min(l, maxResults)) + for k := range c.servers { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) + count := int64(0) + var end bool + var s []*ServerMetric + for i, v := range ids[idx:] { + if count == maxResults { + break + } + if svr, ok := c.servers[v]; ok { + ss = append(ss, svr) + s = append(s, &ServerMetric{ + ListenSockets: copyMap(svr.listenSockets), + }) + count++ + } + if i == len(ids[idx:])-1 { + end = true + break + } + } + c.mu.RUnlock() + if count == 0 { + end = true + } + + for i, svr := range ss { + s[i].ServerData = svr.s.ChannelzMetric() + s[i].ID = svr.id + s[i].RefName = svr.refName + } + return s, end +} + +func (c *channelMap) GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) { + if maxResults <= 0 { + maxResults = EntryPerPage + } + var svr *server + var ok bool + c.mu.RLock() + if svr, ok = c.servers[id]; !ok { + // server with id doesn't exist. + c.mu.RUnlock() + return nil, true + } + svrskts := svr.sockets + l := int64(len(svrskts)) + ids := make([]int64, 0, l) + sks := make([]*normalSocket, 0, min(l, maxResults)) + for k := range svrskts { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= startID }) + count := int64(0) + var end bool + for i, v := range ids[idx:] { + if count == maxResults { + break + } + if ns, ok := c.normalSockets[v]; ok { + sks = append(sks, ns) + count++ + } + if i == len(ids[idx:])-1 { + end = true + break + } + } + c.mu.RUnlock() + if count == 0 { + end = true + } + var s []*SocketMetric + for _, ns := range sks { + sm := &SocketMetric{} + sm.SocketData = ns.s.ChannelzMetric() + sm.ID = ns.id + sm.RefName = ns.refName + s = append(s, sm) + } + return s, end +} + +func (c *channelMap) GetChannel(id int64) *ChannelMetric { + cm := &ChannelMetric{} + var cn *channel + var ok bool + c.mu.RLock() + if cn, ok = c.channels[id]; !ok { + // channel with id doesn't exist. + c.mu.RUnlock() + return nil + } + cm.NestedChans = copyMap(cn.nestedChans) + cm.SubChans = copyMap(cn.subChans) + // cn.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of cn.c when + // holding the lock to prevent potential data race. + chanCopy := cn.c + c.mu.RUnlock() + cm.ChannelData = chanCopy.ChannelzMetric() + cm.ID = cn.id + cm.RefName = cn.refName + cm.Trace = cn.trace.dumpData() + return cm +} + +func (c *channelMap) GetSubChannel(id int64) *SubChannelMetric { + cm := &SubChannelMetric{} + var sc *subChannel + var ok bool + c.mu.RLock() + if sc, ok = c.subChannels[id]; !ok { + // subchannel with id doesn't exist. + c.mu.RUnlock() + return nil + } + cm.Sockets = copyMap(sc.sockets) + // sc.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of sc.c when + // holding the lock to prevent potential data race. + chanCopy := sc.c + c.mu.RUnlock() + cm.ChannelData = chanCopy.ChannelzMetric() + cm.ID = sc.id + cm.RefName = sc.refName + cm.Trace = sc.trace.dumpData() + return cm +} + +func (c *channelMap) GetSocket(id int64) *SocketMetric { + sm := &SocketMetric{} + c.mu.RLock() + if ls, ok := c.listenSockets[id]; ok { + c.mu.RUnlock() + sm.SocketData = ls.s.ChannelzMetric() + sm.ID = ls.id + sm.RefName = ls.refName + return sm + } + if ns, ok := c.normalSockets[id]; ok { + c.mu.RUnlock() + sm.SocketData = ns.s.ChannelzMetric() + sm.ID = ns.id + sm.RefName = ns.refName + return sm + } + c.mu.RUnlock() + return nil +} + +func (c *channelMap) GetServer(id int64) *ServerMetric { + sm := &ServerMetric{} + var svr *server + var ok bool + c.mu.RLock() + if svr, ok = c.servers[id]; !ok { + c.mu.RUnlock() + return nil + } + sm.ListenSockets = copyMap(svr.listenSockets) + c.mu.RUnlock() + sm.ID = svr.id + sm.RefName = svr.refName + sm.ServerData = svr.s.ChannelzMetric() + return sm +} + +type idGenerator struct { + id int64 +} + +func (i *idGenerator) reset() { + atomic.StoreInt64(&i.id, 0) +} + +func (i *idGenerator) genID() int64 { + return atomic.AddInt64(&i.id, 1) +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go new file mode 100644 index 0000000000000..17c2274cb3de5 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/types.go @@ -0,0 +1,702 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "net" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" +) + +// entry represents a node in the channelz database. +type entry interface { + // addChild adds a child e, whose channelz id is id to child list + addChild(id int64, e entry) + // deleteChild deletes a child with channelz id to be id from child list + deleteChild(id int64) + // triggerDelete tries to delete self from channelz database. However, if child + // list is not empty, then deletion from the database is on hold until the last + // child is deleted from database. + triggerDelete() + // deleteSelfIfReady check whether triggerDelete() has been called before, and whether child + // list is now empty. If both conditions are met, then delete self from database. + deleteSelfIfReady() + // getParentID returns parent ID of the entry. 0 value parent ID means no parent. + getParentID() int64 +} + +// dummyEntry is a fake entry to handle entry not found case. +type dummyEntry struct { + idNotFound int64 +} + +func (d *dummyEntry) addChild(id int64, e entry) { + // Note: It is possible for a normal program to reach here under race condition. + // For example, there could be a race between ClientConn.Close() info being propagated + // to addrConn and http2Client. ClientConn.Close() cancel the context and result + // in http2Client to error. The error info is then caught by transport monitor + // and before addrConn.tearDown() is called in side ClientConn.Close(). Therefore, + // the addrConn will create a new transport. And when registering the new transport in + // channelz, its parent addrConn could have already been torn down and deleted + // from channelz tracking, and thus reach the code here. + grpclog.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound) +} + +func (d *dummyEntry) deleteChild(id int64) { + // It is possible for a normal program to reach here under race condition. + // Refer to the example described in addChild(). + grpclog.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound) +} + +func (d *dummyEntry) triggerDelete() { + grpclog.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound) +} + +func (*dummyEntry) deleteSelfIfReady() { + // code should not reach here. deleteSelfIfReady is always called on an existing entry. +} + +func (*dummyEntry) getParentID() int64 { + return 0 +} + +// ChannelMetric defines the info channelz provides for a specific Channel, which +// includes ChannelInternalMetric and channelz-specific data, such as channelz id, +// child list, etc. +type ChannelMetric struct { + // ID is the channelz id of this channel. + ID int64 + // RefName is the human readable reference string of this channel. + RefName string + // ChannelData contains channel internal metric reported by the channel through + // ChannelzMetric(). + ChannelData *ChannelInternalMetric + // NestedChans tracks the nested channel type children of this channel in the format of + // a map from nested channel channelz id to corresponding reference string. + NestedChans map[int64]string + // SubChans tracks the subchannel type children of this channel in the format of a + // map from subchannel channelz id to corresponding reference string. + SubChans map[int64]string + // Sockets tracks the socket type children of this channel in the format of a map + // from socket channelz id to corresponding reference string. + // Note current grpc implementation doesn't allow channel having sockets directly, + // therefore, this is field is unused. + Sockets map[int64]string + // Trace contains the most recent traced events. + Trace *ChannelTrace +} + +// SubChannelMetric defines the info channelz provides for a specific SubChannel, +// which includes ChannelInternalMetric and channelz-specific data, such as +// channelz id, child list, etc. +type SubChannelMetric struct { + // ID is the channelz id of this subchannel. + ID int64 + // RefName is the human readable reference string of this subchannel. + RefName string + // ChannelData contains subchannel internal metric reported by the subchannel + // through ChannelzMetric(). + ChannelData *ChannelInternalMetric + // NestedChans tracks the nested channel type children of this subchannel in the format of + // a map from nested channel channelz id to corresponding reference string. + // Note current grpc implementation doesn't allow subchannel to have nested channels + // as children, therefore, this field is unused. + NestedChans map[int64]string + // SubChans tracks the subchannel type children of this subchannel in the format of a + // map from subchannel channelz id to corresponding reference string. + // Note current grpc implementation doesn't allow subchannel to have subchannels + // as children, therefore, this field is unused. + SubChans map[int64]string + // Sockets tracks the socket type children of this subchannel in the format of a map + // from socket channelz id to corresponding reference string. + Sockets map[int64]string + // Trace contains the most recent traced events. + Trace *ChannelTrace +} + +// ChannelInternalMetric defines the struct that the implementor of Channel interface +// should return from ChannelzMetric(). +type ChannelInternalMetric struct { + // current connectivity state of the channel. + State connectivity.State + // The target this channel originally tried to connect to. May be absent + Target string + // The number of calls started on the channel. + CallsStarted int64 + // The number of calls that have completed with an OK status. + CallsSucceeded int64 + // The number of calls that have a completed with a non-OK status. + CallsFailed int64 + // The last time a call was started on the channel. + LastCallStartedTimestamp time.Time +} + +// ChannelTrace stores traced events on a channel/subchannel and related info. +type ChannelTrace struct { + // EventNum is the number of events that ever got traced (i.e. including those that have been deleted) + EventNum int64 + // CreationTime is the creation time of the trace. + CreationTime time.Time + // Events stores the most recent trace events (up to $maxTraceEntry, newer event will overwrite the + // oldest one) + Events []*TraceEvent +} + +// TraceEvent represent a single trace event +type TraceEvent struct { + // Desc is a simple description of the trace event. + Desc string + // Severity states the severity of this trace event. + Severity Severity + // Timestamp is the event time. + Timestamp time.Time + // RefID is the id of the entity that gets referenced in the event. RefID is 0 if no other entity is + // involved in this event. + // e.g. SubChannel (id: 4[]) Created. --> RefID = 4, RefName = "" (inside []) + RefID int64 + // RefName is the reference name for the entity that gets referenced in the event. + RefName string + // RefType indicates the referenced entity type, i.e Channel or SubChannel. + RefType RefChannelType +} + +// Channel is the interface that should be satisfied in order to be tracked by +// channelz as Channel or SubChannel. +type Channel interface { + ChannelzMetric() *ChannelInternalMetric +} + +type dummyChannel struct{} + +func (d *dummyChannel) ChannelzMetric() *ChannelInternalMetric { + return &ChannelInternalMetric{} +} + +type channel struct { + refName string + c Channel + closeCalled bool + nestedChans map[int64]string + subChans map[int64]string + id int64 + pid int64 + cm *channelMap + trace *channelTrace + // traceRefCount is the number of trace events that reference this channel. + // Non-zero traceRefCount means the trace of this channel cannot be deleted. + traceRefCount int32 +} + +func (c *channel) addChild(id int64, e entry) { + switch v := e.(type) { + case *subChannel: + c.subChans[id] = v.refName + case *channel: + c.nestedChans[id] = v.refName + default: + grpclog.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e) + } +} + +func (c *channel) deleteChild(id int64) { + delete(c.subChans, id) + delete(c.nestedChans, id) + c.deleteSelfIfReady() +} + +func (c *channel) triggerDelete() { + c.closeCalled = true + c.deleteSelfIfReady() +} + +func (c *channel) getParentID() int64 { + return c.pid +} + +// deleteSelfFromTree tries to delete the channel from the channelz entry relation tree, which means +// deleting the channel reference from its parent's child list. +// +// In order for a channel to be deleted from the tree, it must meet the criteria that, removal of the +// corresponding grpc object has been invoked, and the channel does not have any children left. +// +// The returned boolean value indicates whether the channel has been successfully deleted from tree. +func (c *channel) deleteSelfFromTree() (deleted bool) { + if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 { + return false + } + // not top channel + if c.pid != 0 { + c.cm.findEntry(c.pid).deleteChild(c.id) + } + return true +} + +// deleteSelfFromMap checks whether it is valid to delete the channel from the map, which means +// deleting the channel from channelz's tracking entirely. Users can no longer use id to query the +// channel, and its memory will be garbage collected. +// +// The trace reference count of the channel must be 0 in order to be deleted from the map. This is +// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, +// the trace of the referenced entity must not be deleted. In order to release the resource allocated +// by grpc, the reference to the grpc object is reset to a dummy object. +// +// deleteSelfFromMap must be called after deleteSelfFromTree returns true. +// +// It returns a bool to indicate whether the channel can be safely deleted from map. +func (c *channel) deleteSelfFromMap() (delete bool) { + if c.getTraceRefCount() != 0 { + c.c = &dummyChannel{} + return false + } + return true +} + +// deleteSelfIfReady tries to delete the channel itself from the channelz database. +// The delete process includes two steps: +// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its +// parent's child list. +// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id +// will return entry not found error. +func (c *channel) deleteSelfIfReady() { + if !c.deleteSelfFromTree() { + return + } + if !c.deleteSelfFromMap() { + return + } + c.cm.deleteEntry(c.id) + c.trace.clear() +} + +func (c *channel) getChannelTrace() *channelTrace { + return c.trace +} + +func (c *channel) incrTraceRefCount() { + atomic.AddInt32(&c.traceRefCount, 1) +} + +func (c *channel) decrTraceRefCount() { + atomic.AddInt32(&c.traceRefCount, -1) +} + +func (c *channel) getTraceRefCount() int { + i := atomic.LoadInt32(&c.traceRefCount) + return int(i) +} + +func (c *channel) getRefName() string { + return c.refName +} + +type subChannel struct { + refName string + c Channel + closeCalled bool + sockets map[int64]string + id int64 + pid int64 + cm *channelMap + trace *channelTrace + traceRefCount int32 +} + +func (sc *subChannel) addChild(id int64, e entry) { + if v, ok := e.(*normalSocket); ok { + sc.sockets[id] = v.refName + } else { + grpclog.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e) + } +} + +func (sc *subChannel) deleteChild(id int64) { + delete(sc.sockets, id) + sc.deleteSelfIfReady() +} + +func (sc *subChannel) triggerDelete() { + sc.closeCalled = true + sc.deleteSelfIfReady() +} + +func (sc *subChannel) getParentID() int64 { + return sc.pid +} + +// deleteSelfFromTree tries to delete the subchannel from the channelz entry relation tree, which +// means deleting the subchannel reference from its parent's child list. +// +// In order for a subchannel to be deleted from the tree, it must meet the criteria that, removal of +// the corresponding grpc object has been invoked, and the subchannel does not have any children left. +// +// The returned boolean value indicates whether the channel has been successfully deleted from tree. +func (sc *subChannel) deleteSelfFromTree() (deleted bool) { + if !sc.closeCalled || len(sc.sockets) != 0 { + return false + } + sc.cm.findEntry(sc.pid).deleteChild(sc.id) + return true +} + +// deleteSelfFromMap checks whether it is valid to delete the subchannel from the map, which means +// deleting the subchannel from channelz's tracking entirely. Users can no longer use id to query +// the subchannel, and its memory will be garbage collected. +// +// The trace reference count of the subchannel must be 0 in order to be deleted from the map. This is +// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, +// the trace of the referenced entity must not be deleted. In order to release the resource allocated +// by grpc, the reference to the grpc object is reset to a dummy object. +// +// deleteSelfFromMap must be called after deleteSelfFromTree returns true. +// +// It returns a bool to indicate whether the channel can be safely deleted from map. +func (sc *subChannel) deleteSelfFromMap() (delete bool) { + if sc.getTraceRefCount() != 0 { + // free the grpc struct (i.e. addrConn) + sc.c = &dummyChannel{} + return false + } + return true +} + +// deleteSelfIfReady tries to delete the subchannel itself from the channelz database. +// The delete process includes two steps: +// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from +// its parent's child list. +// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup +// by id will return entry not found error. +func (sc *subChannel) deleteSelfIfReady() { + if !sc.deleteSelfFromTree() { + return + } + if !sc.deleteSelfFromMap() { + return + } + sc.cm.deleteEntry(sc.id) + sc.trace.clear() +} + +func (sc *subChannel) getChannelTrace() *channelTrace { + return sc.trace +} + +func (sc *subChannel) incrTraceRefCount() { + atomic.AddInt32(&sc.traceRefCount, 1) +} + +func (sc *subChannel) decrTraceRefCount() { + atomic.AddInt32(&sc.traceRefCount, -1) +} + +func (sc *subChannel) getTraceRefCount() int { + i := atomic.LoadInt32(&sc.traceRefCount) + return int(i) +} + +func (sc *subChannel) getRefName() string { + return sc.refName +} + +// SocketMetric defines the info channelz provides for a specific Socket, which +// includes SocketInternalMetric and channelz-specific data, such as channelz id, etc. +type SocketMetric struct { + // ID is the channelz id of this socket. + ID int64 + // RefName is the human readable reference string of this socket. + RefName string + // SocketData contains socket internal metric reported by the socket through + // ChannelzMetric(). + SocketData *SocketInternalMetric +} + +// SocketInternalMetric defines the struct that the implementor of Socket interface +// should return from ChannelzMetric(). +type SocketInternalMetric struct { + // The number of streams that have been started. + StreamsStarted int64 + // The number of streams that have ended successfully: + // On client side, receiving frame with eos bit set. + // On server side, sending frame with eos bit set. + StreamsSucceeded int64 + // The number of streams that have ended unsuccessfully: + // On client side, termination without receiving frame with eos bit set. + // On server side, termination without sending frame with eos bit set. + StreamsFailed int64 + // The number of messages successfully sent on this socket. + MessagesSent int64 + MessagesReceived int64 + // The number of keep alives sent. This is typically implemented with HTTP/2 + // ping messages. + KeepAlivesSent int64 + // The last time a stream was created by this endpoint. Usually unset for + // servers. + LastLocalStreamCreatedTimestamp time.Time + // The last time a stream was created by the remote endpoint. Usually unset + // for clients. + LastRemoteStreamCreatedTimestamp time.Time + // The last time a message was sent by this endpoint. + LastMessageSentTimestamp time.Time + // The last time a message was received by this endpoint. + LastMessageReceivedTimestamp time.Time + // The amount of window, granted to the local endpoint by the remote endpoint. + // This may be slightly out of date due to network latency. This does NOT + // include stream level or TCP level flow control info. + LocalFlowControlWindow int64 + // The amount of window, granted to the remote endpoint by the local endpoint. + // This may be slightly out of date due to network latency. This does NOT + // include stream level or TCP level flow control info. + RemoteFlowControlWindow int64 + // The locally bound address. + LocalAddr net.Addr + // The remote bound address. May be absent. + RemoteAddr net.Addr + // Optional, represents the name of the remote endpoint, if different than + // the original target name. + RemoteName string + SocketOptions *SocketOptionData + Security credentials.ChannelzSecurityValue +} + +// Socket is the interface that should be satisfied in order to be tracked by +// channelz as Socket. +type Socket interface { + ChannelzMetric() *SocketInternalMetric +} + +type listenSocket struct { + refName string + s Socket + id int64 + pid int64 + cm *channelMap +} + +func (ls *listenSocket) addChild(id int64, e entry) { + grpclog.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e) +} + +func (ls *listenSocket) deleteChild(id int64) { + grpclog.Errorf("cannot delete a child (id = %d) from a listen socket", id) +} + +func (ls *listenSocket) triggerDelete() { + ls.cm.deleteEntry(ls.id) + ls.cm.findEntry(ls.pid).deleteChild(ls.id) +} + +func (ls *listenSocket) deleteSelfIfReady() { + grpclog.Errorf("cannot call deleteSelfIfReady on a listen socket") +} + +func (ls *listenSocket) getParentID() int64 { + return ls.pid +} + +type normalSocket struct { + refName string + s Socket + id int64 + pid int64 + cm *channelMap +} + +func (ns *normalSocket) addChild(id int64, e entry) { + grpclog.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e) +} + +func (ns *normalSocket) deleteChild(id int64) { + grpclog.Errorf("cannot delete a child (id = %d) from a normal socket", id) +} + +func (ns *normalSocket) triggerDelete() { + ns.cm.deleteEntry(ns.id) + ns.cm.findEntry(ns.pid).deleteChild(ns.id) +} + +func (ns *normalSocket) deleteSelfIfReady() { + grpclog.Errorf("cannot call deleteSelfIfReady on a normal socket") +} + +func (ns *normalSocket) getParentID() int64 { + return ns.pid +} + +// ServerMetric defines the info channelz provides for a specific Server, which +// includes ServerInternalMetric and channelz-specific data, such as channelz id, +// child list, etc. +type ServerMetric struct { + // ID is the channelz id of this server. + ID int64 + // RefName is the human readable reference string of this server. + RefName string + // ServerData contains server internal metric reported by the server through + // ChannelzMetric(). + ServerData *ServerInternalMetric + // ListenSockets tracks the listener socket type children of this server in the + // format of a map from socket channelz id to corresponding reference string. + ListenSockets map[int64]string +} + +// ServerInternalMetric defines the struct that the implementor of Server interface +// should return from ChannelzMetric(). +type ServerInternalMetric struct { + // The number of incoming calls started on the server. + CallsStarted int64 + // The number of incoming calls that have completed with an OK status. + CallsSucceeded int64 + // The number of incoming calls that have a completed with a non-OK status. + CallsFailed int64 + // The last time a call was started on the server. + LastCallStartedTimestamp time.Time +} + +// Server is the interface to be satisfied in order to be tracked by channelz as +// Server. +type Server interface { + ChannelzMetric() *ServerInternalMetric +} + +type server struct { + refName string + s Server + closeCalled bool + sockets map[int64]string + listenSockets map[int64]string + id int64 + cm *channelMap +} + +func (s *server) addChild(id int64, e entry) { + switch v := e.(type) { + case *normalSocket: + s.sockets[id] = v.refName + case *listenSocket: + s.listenSockets[id] = v.refName + default: + grpclog.Errorf("cannot add a child (id = %d) of type %T to a server", id, e) + } +} + +func (s *server) deleteChild(id int64) { + delete(s.sockets, id) + delete(s.listenSockets, id) + s.deleteSelfIfReady() +} + +func (s *server) triggerDelete() { + s.closeCalled = true + s.deleteSelfIfReady() +} + +func (s *server) deleteSelfIfReady() { + if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 { + return + } + s.cm.deleteEntry(s.id) +} + +func (s *server) getParentID() int64 { + return 0 +} + +type tracedChannel interface { + getChannelTrace() *channelTrace + incrTraceRefCount() + decrTraceRefCount() + getRefName() string +} + +type channelTrace struct { + cm *channelMap + createdTime time.Time + eventCount int64 + mu sync.Mutex + events []*TraceEvent +} + +func (c *channelTrace) append(e *TraceEvent) { + c.mu.Lock() + if len(c.events) == getMaxTraceEntry() { + del := c.events[0] + c.events = c.events[1:] + if del.RefID != 0 { + // start recursive cleanup in a goroutine to not block the call originated from grpc. + go func() { + // need to acquire c.cm.mu lock to call the unlocked attemptCleanup func. + c.cm.mu.Lock() + c.cm.decrTraceRefCount(del.RefID) + c.cm.mu.Unlock() + }() + } + } + e.Timestamp = time.Now() + c.events = append(c.events, e) + c.eventCount++ + c.mu.Unlock() +} + +func (c *channelTrace) clear() { + c.mu.Lock() + for _, e := range c.events { + if e.RefID != 0 { + // caller should have already held the c.cm.mu lock. + c.cm.decrTraceRefCount(e.RefID) + } + } + c.mu.Unlock() +} + +// Severity is the severity level of a trace event. +// The canonical enumeration of all valid values is here: +// https://github.com/grpc/grpc-proto/blob/9b13d199cc0d4703c7ea26c9c330ba695866eb23/grpc/channelz/v1/channelz.proto#L126. +type Severity int + +const ( + // CtUNKNOWN indicates unknown severity of a trace event. + CtUNKNOWN Severity = iota + // CtINFO indicates info level severity of a trace event. + CtINFO + // CtWarning indicates warning level severity of a trace event. + CtWarning + // CtError indicates error level severity of a trace event. + CtError +) + +// RefChannelType is the type of the entity being referenced in a trace event. +type RefChannelType int + +const ( + // RefChannel indicates the referenced entity is a Channel. + RefChannel RefChannelType = iota + // RefSubChannel indicates the referenced entity is a SubChannel. + RefSubChannel +) + +func (c *channelTrace) dumpData() *ChannelTrace { + c.mu.Lock() + ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime} + ct.Events = c.events[:len(c.events)] + c.mu.Unlock() + return ct +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go new file mode 100644 index 0000000000000..692dd61817788 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go @@ -0,0 +1,53 @@ +// +build !appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +// SocketOptionData defines the struct to hold socket option data, and related +// getter function to obtain info from fd. +type SocketOptionData struct { + Linger *unix.Linger + RecvTimeout *unix.Timeval + SendTimeout *unix.Timeval + TCPInfo *unix.TCPInfo +} + +// Getsockopt defines the function to get socket options requested by channelz. +// It is to be passed to syscall.RawConn.Control(). +func (s *SocketOptionData) Getsockopt(fd uintptr) { + if v, err := unix.GetsockoptLinger(int(fd), syscall.SOL_SOCKET, syscall.SO_LINGER); err == nil { + s.Linger = v + } + if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO); err == nil { + s.RecvTimeout = v + } + if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_SNDTIMEO); err == nil { + s.SendTimeout = v + } + if v, err := unix.GetsockoptTCPInfo(int(fd), syscall.SOL_TCP, syscall.TCP_INFO); err == nil { + s.TCPInfo = v + } +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go new file mode 100644 index 0000000000000..79edbefc43318 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go @@ -0,0 +1,44 @@ +// +build !linux appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "sync" + + "google.golang.org/grpc/grpclog" +) + +var once sync.Once + +// SocketOptionData defines the struct to hold socket option data, and related +// getter function to obtain info from fd. +// Windows OS doesn't support Socket Option +type SocketOptionData struct { +} + +// Getsockopt defines the function to get socket options requested by channelz. +// It is to be passed to syscall.RawConn.Control(). +// Windows OS doesn't support Socket Option +func (s *SocketOptionData) Getsockopt(fd uintptr) { + once.Do(func() { + grpclog.Warningln("Channelz: socket options are not supported on non-linux os and appengine.") + }) +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go new file mode 100644 index 0000000000000..fdf409d55de3b --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go @@ -0,0 +1,39 @@ +// +build linux,!appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "syscall" +) + +// GetSocketOption gets the socket option info of the conn. +func GetSocketOption(socket interface{}) *SocketOptionData { + c, ok := socket.(syscall.Conn) + if !ok { + return nil + } + data := &SocketOptionData{} + if rawConn, err := c.SyscallConn(); err == nil { + rawConn.Control(data.Getsockopt) + return data + } + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go new file mode 100644 index 0000000000000..8864a08111642 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go @@ -0,0 +1,26 @@ +// +build !linux appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +// GetSocketOption gets the socket option info of the conn. +func GetSocketOption(c interface{}) *SocketOptionData { + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/envconfig/BUILD.bazel b/vendor/google.golang.org/grpc/internal/envconfig/BUILD.bazel new file mode 100644 index 0000000000000..bab6d800f5398 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/envconfig/BUILD.bazel @@ -0,0 +1,9 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["envconfig.go"], + importmap = "k8s.io/kops/vendor/google.golang.org/grpc/internal/envconfig", + importpath = "google.golang.org/grpc/internal/envconfig", + visibility = ["//vendor/google.golang.org/grpc:__subpackages__"], +) diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go new file mode 100644 index 0000000000000..3ee8740f1f93c --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -0,0 +1,35 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package envconfig contains grpc settings configured by environment variables. +package envconfig + +import ( + "os" + "strings" +) + +const ( + prefix = "GRPC_GO_" + retryStr = prefix + "RETRY" +) + +var ( + // Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on". + Retry = strings.EqualFold(os.Getenv(retryStr), "on") +) diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/BUILD.bazel b/vendor/google.golang.org/grpc/internal/grpcrand/BUILD.bazel new file mode 100644 index 0000000000000..ce1490bc34e48 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcrand/BUILD.bazel @@ -0,0 +1,9 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["grpcrand.go"], + importmap = "k8s.io/kops/vendor/google.golang.org/grpc/internal/grpcrand", + importpath = "google.golang.org/grpc/internal/grpcrand", + visibility = ["//vendor/google.golang.org/grpc:__subpackages__"], +) diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go new file mode 100644 index 0000000000000..200b115ca209e --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go @@ -0,0 +1,56 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpcrand implements math/rand functions in a concurrent-safe way +// with a global random source, independent of math/rand's global source. +package grpcrand + +import ( + "math/rand" + "sync" + "time" +) + +var ( + r = rand.New(rand.NewSource(time.Now().UnixNano())) + mu sync.Mutex +) + +// Int63n implements rand.Int63n on the grpcrand global source. +func Int63n(n int64) int64 { + mu.Lock() + res := r.Int63n(n) + mu.Unlock() + return res +} + +// Intn implements rand.Intn on the grpcrand global source. +func Intn(n int) int { + mu.Lock() + res := r.Intn(n) + mu.Unlock() + return res +} + +// Float64 implements rand.Float64 on the grpcrand global source. +func Float64() float64 { + mu.Lock() + res := r.Float64() + mu.Unlock() + return res +} diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/BUILD.bazel b/vendor/google.golang.org/grpc/internal/grpcsync/BUILD.bazel new file mode 100644 index 0000000000000..9b9ca4782ebcf --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcsync/BUILD.bazel @@ -0,0 +1,9 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["event.go"], + importmap = "k8s.io/kops/vendor/google.golang.org/grpc/internal/grpcsync", + importpath = "google.golang.org/grpc/internal/grpcsync", + visibility = ["//vendor/google.golang.org/grpc:__subpackages__"], +) diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/event.go b/vendor/google.golang.org/grpc/internal/grpcsync/event.go new file mode 100644 index 0000000000000..fbe697c37684c --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcsync/event.go @@ -0,0 +1,61 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpcsync implements additional synchronization primitives built upon +// the sync package. +package grpcsync + +import ( + "sync" + "sync/atomic" +) + +// Event represents a one-time event that may occur in the future. +type Event struct { + fired int32 + c chan struct{} + o sync.Once +} + +// Fire causes e to complete. It is safe to call multiple times, and +// concurrently. It returns true iff this call to Fire caused the signaling +// channel returned by Done to close. +func (e *Event) Fire() bool { + ret := false + e.o.Do(func() { + atomic.StoreInt32(&e.fired, 1) + close(e.c) + ret = true + }) + return ret +} + +// Done returns a channel that will be closed when Fire is called. +func (e *Event) Done() <-chan struct{} { + return e.c +} + +// HasFired returns true if Fire has been called. +func (e *Event) HasFired() bool { + return atomic.LoadInt32(&e.fired) == 1 +} + +// NewEvent returns a new, ready-to-use Event. +func NewEvent() *Event { + return &Event{c: make(chan struct{})} +} diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go new file mode 100644 index 0000000000000..bc1f99ac80306 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -0,0 +1,71 @@ +/* + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains gRPC-internal code, to avoid polluting +// the godoc of the top-level grpc package. It must not import any grpc +// symbols to avoid circular dependencies. +package internal + +import ( + "context" + "time" + + "google.golang.org/grpc/connectivity" +) + +var ( + // WithResolverBuilder is exported by dialoptions.go + WithResolverBuilder interface{} // func (resolver.Builder) grpc.DialOption + // WithHealthCheckFunc is not exported by dialoptions.go + WithHealthCheckFunc interface{} // func (HealthChecker) DialOption + // HealthCheckFunc is used to provide client-side LB channel health checking + HealthCheckFunc HealthChecker + // BalancerUnregister is exported by package balancer to unregister a balancer. + BalancerUnregister func(name string) + // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by + // default, but tests may wish to set it lower for convenience. + KeepaliveMinPingTime = 10 * time.Second + // ParseServiceConfig is a function to parse JSON service configs into + // opaque data structures. + ParseServiceConfig func(sc string) (interface{}, error) + // StatusRawProto is exported by status/status.go. This func returns a + // pointer to the wrapped Status proto for a given status.Status without a + // call to proto.Clone(). The returned Status proto should not be mutated by + // the caller. + StatusRawProto interface{} // func (*status.Status) *spb.Status +) + +// HealthChecker defines the signature of the client-side LB channel health checking function. +// +// The implementation is expected to create a health checking RPC stream by +// calling newStream(), watch for the health status of serviceName, and report +// it's health back by calling setConnectivityState(). +// +// The health checking protocol is defined at: +// https://github.com/grpc/grpc/blob/master/doc/health-checking.md +type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State), serviceName string) error + +const ( + // CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode. + CredsBundleModeFallback = "fallback" + // CredsBundleModeBalancer switches GoogleDefaultCreds to grpclb balancer + // mode. + CredsBundleModeBalancer = "balancer" + // CredsBundleModeBackendFromBalancer switches GoogleDefaultCreds to mode + // that supports backend returned by grpclb balancer. + CredsBundleModeBackendFromBalancer = "backend-from-balancer" +) diff --git a/vendor/google.golang.org/grpc/internal/syscall/BUILD.bazel b/vendor/google.golang.org/grpc/internal/syscall/BUILD.bazel new file mode 100644 index 0000000000000..03c7296c81702 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/syscall/BUILD.bazel @@ -0,0 +1,53 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "syscall_linux.go", + "syscall_nonlinux.go", + ], + importmap = "k8s.io/kops/vendor/google.golang.org/grpc/internal/syscall", + importpath = "google.golang.org/grpc/internal/syscall", + visibility = ["//vendor/google.golang.org/grpc:__subpackages__"], + deps = select({ + "@io_bazel_rules_go//go/platform:android": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + "//vendor/google.golang.org/grpc/grpclog:go_default_library", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "//vendor/google.golang.org/grpc/grpclog:go_default_library", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "//vendor/google.golang.org/grpc/grpclog:go_default_library", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "//vendor/google.golang.org/grpc/grpclog:go_default_library", + ], + "@io_bazel_rules_go//go/platform:ios": [ + "//vendor/google.golang.org/grpc/grpclog:go_default_library", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + "//vendor/google.golang.org/grpc/grpclog:go_default_library", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "//vendor/google.golang.org/grpc/grpclog:go_default_library", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "//vendor/google.golang.org/grpc/grpclog:go_default_library", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "//vendor/google.golang.org/grpc/grpclog:go_default_library", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "//vendor/google.golang.org/grpc/grpclog:go_default_library", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "//vendor/google.golang.org/grpc/grpclog:go_default_library", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "//vendor/google.golang.org/grpc/grpclog:go_default_library", + ], + "//conditions:default": [], + }), +) diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go new file mode 100644 index 0000000000000..43281a3e078db --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go @@ -0,0 +1,114 @@ +// +build !appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package syscall provides functionalities that grpc uses to get low-level operating system +// stats/info. +package syscall + +import ( + "fmt" + "net" + "syscall" + "time" + + "golang.org/x/sys/unix" + "google.golang.org/grpc/grpclog" +) + +// GetCPUTime returns the how much CPU time has passed since the start of this process. +func GetCPUTime() int64 { + var ts unix.Timespec + if err := unix.ClockGettime(unix.CLOCK_PROCESS_CPUTIME_ID, &ts); err != nil { + grpclog.Fatal(err) + } + return ts.Nano() +} + +// Rusage is an alias for syscall.Rusage under linux non-appengine environment. +type Rusage syscall.Rusage + +// GetRusage returns the resource usage of current process. +func GetRusage() (rusage *Rusage) { + rusage = new(Rusage) + syscall.Getrusage(syscall.RUSAGE_SELF, (*syscall.Rusage)(rusage)) + return +} + +// CPUTimeDiff returns the differences of user CPU time and system CPU time used +// between two Rusage structs. +func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { + f := (*syscall.Rusage)(first) + l := (*syscall.Rusage)(latest) + var ( + utimeDiffs = l.Utime.Sec - f.Utime.Sec + utimeDiffus = l.Utime.Usec - f.Utime.Usec + stimeDiffs = l.Stime.Sec - f.Stime.Sec + stimeDiffus = l.Stime.Usec - f.Stime.Usec + ) + + uTimeElapsed := float64(utimeDiffs) + float64(utimeDiffus)*1.0e-6 + sTimeElapsed := float64(stimeDiffs) + float64(stimeDiffus)*1.0e-6 + + return uTimeElapsed, sTimeElapsed +} + +// SetTCPUserTimeout sets the TCP user timeout on a connection's socket +func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { + tcpconn, ok := conn.(*net.TCPConn) + if !ok { + // not a TCP connection. exit early + return nil + } + rawConn, err := tcpconn.SyscallConn() + if err != nil { + return fmt.Errorf("error getting raw connection: %v", err) + } + err = rawConn.Control(func(fd uintptr) { + err = syscall.SetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT, int(timeout/time.Millisecond)) + }) + if err != nil { + return fmt.Errorf("error setting option on socket: %v", err) + } + + return nil +} + +// GetTCPUserTimeout gets the TCP user timeout on a connection's socket +func GetTCPUserTimeout(conn net.Conn) (opt int, err error) { + tcpconn, ok := conn.(*net.TCPConn) + if !ok { + err = fmt.Errorf("conn is not *net.TCPConn. got %T", conn) + return + } + rawConn, err := tcpconn.SyscallConn() + if err != nil { + err = fmt.Errorf("error getting raw connection: %v", err) + return + } + err = rawConn.Control(func(fd uintptr) { + opt, err = syscall.GetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT) + }) + if err != nil { + err = fmt.Errorf("error getting option on socket: %v", err) + return + } + + return +} diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go new file mode 100644 index 0000000000000..d3fd9dab33317 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go @@ -0,0 +1,73 @@ +// +build !linux appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package syscall + +import ( + "net" + "sync" + "time" + + "google.golang.org/grpc/grpclog" +) + +var once sync.Once + +func log() { + once.Do(func() { + grpclog.Info("CPU time info is unavailable on non-linux or appengine environment.") + }) +} + +// GetCPUTime returns the how much CPU time has passed since the start of this process. +// It always returns 0 under non-linux or appengine environment. +func GetCPUTime() int64 { + log() + return 0 +} + +// Rusage is an empty struct under non-linux or appengine environment. +type Rusage struct{} + +// GetRusage is a no-op function under non-linux or appengine environment. +func GetRusage() (rusage *Rusage) { + log() + return nil +} + +// CPUTimeDiff returns the differences of user CPU time and system CPU time used +// between two Rusage structs. It a no-op function for non-linux or appengine environment. +func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { + log() + return 0, 0 +} + +// SetTCPUserTimeout is a no-op function under non-linux or appengine environments +func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { + log() + return nil +} + +// GetTCPUserTimeout is a no-op function under non-linux or appengine environments +// a negative return value indicates the operation is not supported +func GetTCPUserTimeout(conn net.Conn) (int, error) { + log() + return -1, nil +} diff --git a/vendor/google.golang.org/grpc/internal/transport/BUILD.bazel b/vendor/google.golang.org/grpc/internal/transport/BUILD.bazel new file mode 100644 index 0000000000000..9226237ad0acd --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/BUILD.bazel @@ -0,0 +1,39 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "bdp_estimator.go", + "controlbuf.go", + "defaults.go", + "flowcontrol.go", + "handler_server.go", + "http2_client.go", + "http2_server.go", + "http_util.go", + "log.go", + "transport.go", + ], + importmap = "k8s.io/kops/vendor/google.golang.org/grpc/internal/transport", + importpath = "google.golang.org/grpc/internal/transport", + visibility = ["//vendor/google.golang.org/grpc:__subpackages__"], + deps = [ + "//vendor/github.com/golang/protobuf/proto:go_default_library", + "//vendor/golang.org/x/net/http2:go_default_library", + "//vendor/golang.org/x/net/http2/hpack:go_default_library", + "//vendor/google.golang.org/genproto/googleapis/rpc/status:go_default_library", + "//vendor/google.golang.org/grpc/codes:go_default_library", + "//vendor/google.golang.org/grpc/credentials:go_default_library", + "//vendor/google.golang.org/grpc/grpclog:go_default_library", + "//vendor/google.golang.org/grpc/internal:go_default_library", + "//vendor/google.golang.org/grpc/internal/channelz:go_default_library", + "//vendor/google.golang.org/grpc/internal/grpcrand:go_default_library", + "//vendor/google.golang.org/grpc/internal/syscall:go_default_library", + "//vendor/google.golang.org/grpc/keepalive:go_default_library", + "//vendor/google.golang.org/grpc/metadata:go_default_library", + "//vendor/google.golang.org/grpc/peer:go_default_library", + "//vendor/google.golang.org/grpc/stats:go_default_library", + "//vendor/google.golang.org/grpc/status:go_default_library", + "//vendor/google.golang.org/grpc/tap:go_default_library", + ], +) diff --git a/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go b/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go new file mode 100644 index 0000000000000..070680edbac12 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go @@ -0,0 +1,141 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "sync" + "time" +) + +const ( + // bdpLimit is the maximum value the flow control windows will be increased + // to. TCP typically limits this to 4MB, but some systems go up to 16MB. + // Since this is only a limit, it is safe to make it optimistic. + bdpLimit = (1 << 20) * 16 + // alpha is a constant factor used to keep a moving average + // of RTTs. + alpha = 0.9 + // If the current bdp sample is greater than or equal to + // our beta * our estimated bdp and the current bandwidth + // sample is the maximum bandwidth observed so far, we + // increase our bbp estimate by a factor of gamma. + beta = 0.66 + // To put our bdp to be smaller than or equal to twice the real BDP, + // we should multiply our current sample with 4/3, however to round things out + // we use 2 as the multiplication factor. + gamma = 2 +) + +// Adding arbitrary data to ping so that its ack can be identified. +// Easter-egg: what does the ping message say? +var bdpPing = &ping{data: [8]byte{2, 4, 16, 16, 9, 14, 7, 7}} + +type bdpEstimator struct { + // sentAt is the time when the ping was sent. + sentAt time.Time + + mu sync.Mutex + // bdp is the current bdp estimate. + bdp uint32 + // sample is the number of bytes received in one measurement cycle. + sample uint32 + // bwMax is the maximum bandwidth noted so far (bytes/sec). + bwMax float64 + // bool to keep track of the beginning of a new measurement cycle. + isSent bool + // Callback to update the window sizes. + updateFlowControl func(n uint32) + // sampleCount is the number of samples taken so far. + sampleCount uint64 + // round trip time (seconds) + rtt float64 +} + +// timesnap registers the time bdp ping was sent out so that +// network rtt can be calculated when its ack is received. +// It is called (by controller) when the bdpPing is +// being written on the wire. +func (b *bdpEstimator) timesnap(d [8]byte) { + if bdpPing.data != d { + return + } + b.sentAt = time.Now() +} + +// add adds bytes to the current sample for calculating bdp. +// It returns true only if a ping must be sent. This can be used +// by the caller (handleData) to make decision about batching +// a window update with it. +func (b *bdpEstimator) add(n uint32) bool { + b.mu.Lock() + defer b.mu.Unlock() + if b.bdp == bdpLimit { + return false + } + if !b.isSent { + b.isSent = true + b.sample = n + b.sentAt = time.Time{} + b.sampleCount++ + return true + } + b.sample += n + return false +} + +// calculate is called when an ack for a bdp ping is received. +// Here we calculate the current bdp and bandwidth sample and +// decide if the flow control windows should go up. +func (b *bdpEstimator) calculate(d [8]byte) { + // Check if the ping acked for was the bdp ping. + if bdpPing.data != d { + return + } + b.mu.Lock() + rttSample := time.Since(b.sentAt).Seconds() + if b.sampleCount < 10 { + // Bootstrap rtt with an average of first 10 rtt samples. + b.rtt += (rttSample - b.rtt) / float64(b.sampleCount) + } else { + // Heed to the recent past more. + b.rtt += (rttSample - b.rtt) * float64(alpha) + } + b.isSent = false + // The number of bytes accumulated so far in the sample is smaller + // than or equal to 1.5 times the real BDP on a saturated connection. + bwCurrent := float64(b.sample) / (b.rtt * float64(1.5)) + if bwCurrent > b.bwMax { + b.bwMax = bwCurrent + } + // If the current sample (which is smaller than or equal to the 1.5 times the real BDP) is + // greater than or equal to 2/3rd our perceived bdp AND this is the maximum bandwidth seen so far, we + // should update our perception of the network BDP. + if float64(b.sample) >= beta*float64(b.bdp) && bwCurrent == b.bwMax && b.bdp != bdpLimit { + sampleFloat := float64(b.sample) + b.bdp = uint32(gamma * sampleFloat) + if b.bdp > bdpLimit { + b.bdp = bdpLimit + } + bdp := b.bdp + b.mu.Unlock() + b.updateFlowControl(bdp) + return + } + b.mu.Unlock() +} diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go new file mode 100644 index 0000000000000..b8e0aa4db275f --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -0,0 +1,930 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bytes" + "fmt" + "runtime" + "sync" + "sync/atomic" + + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" +) + +var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) { + e.SetMaxDynamicTableSizeLimit(v) +} + +type itemNode struct { + it interface{} + next *itemNode +} + +type itemList struct { + head *itemNode + tail *itemNode +} + +func (il *itemList) enqueue(i interface{}) { + n := &itemNode{it: i} + if il.tail == nil { + il.head, il.tail = n, n + return + } + il.tail.next = n + il.tail = n +} + +// peek returns the first item in the list without removing it from the +// list. +func (il *itemList) peek() interface{} { + return il.head.it +} + +func (il *itemList) dequeue() interface{} { + if il.head == nil { + return nil + } + i := il.head.it + il.head = il.head.next + if il.head == nil { + il.tail = nil + } + return i +} + +func (il *itemList) dequeueAll() *itemNode { + h := il.head + il.head, il.tail = nil, nil + return h +} + +func (il *itemList) isEmpty() bool { + return il.head == nil +} + +// The following defines various control items which could flow through +// the control buffer of transport. They represent different aspects of +// control tasks, e.g., flow control, settings, streaming resetting, etc. + +// maxQueuedTransportResponseFrames is the most queued "transport response" +// frames we will buffer before preventing new reads from occurring on the +// transport. These are control frames sent in response to client requests, +// such as RST_STREAM due to bad headers or settings acks. +const maxQueuedTransportResponseFrames = 50 + +type cbItem interface { + isTransportResponseFrame() bool +} + +// registerStream is used to register an incoming stream with loopy writer. +type registerStream struct { + streamID uint32 + wq *writeQuota +} + +func (*registerStream) isTransportResponseFrame() bool { return false } + +// headerFrame is also used to register stream on the client-side. +type headerFrame struct { + streamID uint32 + hf []hpack.HeaderField + endStream bool // Valid on server side. + initStream func(uint32) (bool, error) // Used only on the client side. + onWrite func() + wq *writeQuota // write quota for the stream created. + cleanup *cleanupStream // Valid on the server side. + onOrphaned func(error) // Valid on client-side +} + +func (h *headerFrame) isTransportResponseFrame() bool { + return h.cleanup != nil && h.cleanup.rst // Results in a RST_STREAM +} + +type cleanupStream struct { + streamID uint32 + rst bool + rstCode http2.ErrCode + onWrite func() +} + +func (c *cleanupStream) isTransportResponseFrame() bool { return c.rst } // Results in a RST_STREAM + +type dataFrame struct { + streamID uint32 + endStream bool + h []byte + d []byte + // onEachWrite is called every time + // a part of d is written out. + onEachWrite func() +} + +func (*dataFrame) isTransportResponseFrame() bool { return false } + +type incomingWindowUpdate struct { + streamID uint32 + increment uint32 +} + +func (*incomingWindowUpdate) isTransportResponseFrame() bool { return false } + +type outgoingWindowUpdate struct { + streamID uint32 + increment uint32 +} + +func (*outgoingWindowUpdate) isTransportResponseFrame() bool { + return false // window updates are throttled by thresholds +} + +type incomingSettings struct { + ss []http2.Setting +} + +func (*incomingSettings) isTransportResponseFrame() bool { return true } // Results in a settings ACK + +type outgoingSettings struct { + ss []http2.Setting +} + +func (*outgoingSettings) isTransportResponseFrame() bool { return false } + +type incomingGoAway struct { +} + +func (*incomingGoAway) isTransportResponseFrame() bool { return false } + +type goAway struct { + code http2.ErrCode + debugData []byte + headsUp bool + closeConn bool +} + +func (*goAway) isTransportResponseFrame() bool { return false } + +type ping struct { + ack bool + data [8]byte +} + +func (*ping) isTransportResponseFrame() bool { return true } + +type outFlowControlSizeRequest struct { + resp chan uint32 +} + +func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false } + +type outStreamState int + +const ( + active outStreamState = iota + empty + waitingOnStreamQuota +) + +type outStream struct { + id uint32 + state outStreamState + itl *itemList + bytesOutStanding int + wq *writeQuota + + next *outStream + prev *outStream +} + +func (s *outStream) deleteSelf() { + if s.prev != nil { + s.prev.next = s.next + } + if s.next != nil { + s.next.prev = s.prev + } + s.next, s.prev = nil, nil +} + +type outStreamList struct { + // Following are sentinel objects that mark the + // beginning and end of the list. They do not + // contain any item lists. All valid objects are + // inserted in between them. + // This is needed so that an outStream object can + // deleteSelf() in O(1) time without knowing which + // list it belongs to. + head *outStream + tail *outStream +} + +func newOutStreamList() *outStreamList { + head, tail := new(outStream), new(outStream) + head.next = tail + tail.prev = head + return &outStreamList{ + head: head, + tail: tail, + } +} + +func (l *outStreamList) enqueue(s *outStream) { + e := l.tail.prev + e.next = s + s.prev = e + s.next = l.tail + l.tail.prev = s +} + +// remove from the beginning of the list. +func (l *outStreamList) dequeue() *outStream { + b := l.head.next + if b == l.tail { + return nil + } + b.deleteSelf() + return b +} + +// controlBuffer is a way to pass information to loopy. +// Information is passed as specific struct types called control frames. +// A control frame not only represents data, messages or headers to be sent out +// but can also be used to instruct loopy to update its internal state. +// It shouldn't be confused with an HTTP2 frame, although some of the control frames +// like dataFrame and headerFrame do go out on wire as HTTP2 frames. +type controlBuffer struct { + ch chan struct{} + done <-chan struct{} + mu sync.Mutex + consumerWaiting bool + list *itemList + err error + + // transportResponseFrames counts the number of queued items that represent + // the response of an action initiated by the peer. trfChan is created + // when transportResponseFrames >= maxQueuedTransportResponseFrames and is + // closed and nilled when transportResponseFrames drops below the + // threshold. Both fields are protected by mu. + transportResponseFrames int + trfChan atomic.Value // *chan struct{} +} + +func newControlBuffer(done <-chan struct{}) *controlBuffer { + return &controlBuffer{ + ch: make(chan struct{}, 1), + list: &itemList{}, + done: done, + } +} + +// throttle blocks if there are too many incomingSettings/cleanupStreams in the +// controlbuf. +func (c *controlBuffer) throttle() { + ch, _ := c.trfChan.Load().(*chan struct{}) + if ch != nil { + select { + case <-*ch: + case <-c.done: + } + } +} + +func (c *controlBuffer) put(it cbItem) error { + _, err := c.executeAndPut(nil, it) + return err +} + +func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (bool, error) { + var wakeUp bool + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return false, c.err + } + if f != nil { + if !f(it) { // f wasn't successful + c.mu.Unlock() + return false, nil + } + } + if c.consumerWaiting { + wakeUp = true + c.consumerWaiting = false + } + c.list.enqueue(it) + if it.isTransportResponseFrame() { + c.transportResponseFrames++ + if c.transportResponseFrames == maxQueuedTransportResponseFrames { + // We are adding the frame that puts us over the threshold; create + // a throttling channel. + ch := make(chan struct{}) + c.trfChan.Store(&ch) + } + } + c.mu.Unlock() + if wakeUp { + select { + case c.ch <- struct{}{}: + default: + } + } + return true, nil +} + +// Note argument f should never be nil. +func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bool, error) { + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return false, c.err + } + if !f(it) { // f wasn't successful + c.mu.Unlock() + return false, nil + } + c.mu.Unlock() + return true, nil +} + +func (c *controlBuffer) get(block bool) (interface{}, error) { + for { + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return nil, c.err + } + if !c.list.isEmpty() { + h := c.list.dequeue().(cbItem) + if h.isTransportResponseFrame() { + if c.transportResponseFrames == maxQueuedTransportResponseFrames { + // We are removing the frame that put us over the + // threshold; close and clear the throttling channel. + ch := c.trfChan.Load().(*chan struct{}) + close(*ch) + c.trfChan.Store((*chan struct{})(nil)) + } + c.transportResponseFrames-- + } + c.mu.Unlock() + return h, nil + } + if !block { + c.mu.Unlock() + return nil, nil + } + c.consumerWaiting = true + c.mu.Unlock() + select { + case <-c.ch: + case <-c.done: + c.finish() + return nil, ErrConnClosing + } + } +} + +func (c *controlBuffer) finish() { + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return + } + c.err = ErrConnClosing + // There may be headers for streams in the control buffer. + // These streams need to be cleaned out since the transport + // is still not aware of these yet. + for head := c.list.dequeueAll(); head != nil; head = head.next { + hdr, ok := head.it.(*headerFrame) + if !ok { + continue + } + if hdr.onOrphaned != nil { // It will be nil on the server-side. + hdr.onOrphaned(ErrConnClosing) + } + } + c.mu.Unlock() +} + +type side int + +const ( + clientSide side = iota + serverSide +) + +// Loopy receives frames from the control buffer. +// Each frame is handled individually; most of the work done by loopy goes +// into handling data frames. Loopy maintains a queue of active streams, and each +// stream maintains a queue of data frames; as loopy receives data frames +// it gets added to the queue of the relevant stream. +// Loopy goes over this list of active streams by processing one node every iteration, +// thereby closely resemebling to a round-robin scheduling over all streams. While +// processing a stream, loopy writes out data bytes from this stream capped by the min +// of http2MaxFrameLen, connection-level flow control and stream-level flow control. +type loopyWriter struct { + side side + cbuf *controlBuffer + sendQuota uint32 + oiws uint32 // outbound initial window size. + // estdStreams is map of all established streams that are not cleaned-up yet. + // On client-side, this is all streams whose headers were sent out. + // On server-side, this is all streams whose headers were received. + estdStreams map[uint32]*outStream // Established streams. + // activeStreams is a linked-list of all streams that have data to send and some + // stream-level flow control quota. + // Each of these streams internally have a list of data items(and perhaps trailers + // on the server-side) to be sent out. + activeStreams *outStreamList + framer *framer + hBuf *bytes.Buffer // The buffer for HPACK encoding. + hEnc *hpack.Encoder // HPACK encoder. + bdpEst *bdpEstimator + draining bool + + // Side-specific handlers + ssGoAwayHandler func(*goAway) (bool, error) +} + +func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator) *loopyWriter { + var buf bytes.Buffer + l := &loopyWriter{ + side: s, + cbuf: cbuf, + sendQuota: defaultWindowSize, + oiws: defaultWindowSize, + estdStreams: make(map[uint32]*outStream), + activeStreams: newOutStreamList(), + framer: fr, + hBuf: &buf, + hEnc: hpack.NewEncoder(&buf), + bdpEst: bdpEst, + } + return l +} + +const minBatchSize = 1000 + +// run should be run in a separate goroutine. +// It reads control frames from controlBuf and processes them by: +// 1. Updating loopy's internal state, or/and +// 2. Writing out HTTP2 frames on the wire. +// +// Loopy keeps all active streams with data to send in a linked-list. +// All streams in the activeStreams linked-list must have both: +// 1. Data to send, and +// 2. Stream level flow control quota available. +// +// In each iteration of run loop, other than processing the incoming control +// frame, loopy calls processData, which processes one node from the activeStreams linked-list. +// This results in writing of HTTP2 frames into an underlying write buffer. +// When there's no more control frames to read from controlBuf, loopy flushes the write buffer. +// As an optimization, to increase the batch size for each flush, loopy yields the processor, once +// if the batch size is too low to give stream goroutines a chance to fill it up. +func (l *loopyWriter) run() (err error) { + defer func() { + if err == ErrConnClosing { + // Don't log ErrConnClosing as error since it happens + // 1. When the connection is closed by some other known issue. + // 2. User closed the connection. + // 3. A graceful close of connection. + infof("transport: loopyWriter.run returning. %v", err) + err = nil + } + }() + for { + it, err := l.cbuf.get(true) + if err != nil { + return err + } + if err = l.handle(it); err != nil { + return err + } + if _, err = l.processData(); err != nil { + return err + } + gosched := true + hasdata: + for { + it, err := l.cbuf.get(false) + if err != nil { + return err + } + if it != nil { + if err = l.handle(it); err != nil { + return err + } + if _, err = l.processData(); err != nil { + return err + } + continue hasdata + } + isEmpty, err := l.processData() + if err != nil { + return err + } + if !isEmpty { + continue hasdata + } + if gosched { + gosched = false + if l.framer.writer.offset < minBatchSize { + runtime.Gosched() + continue hasdata + } + } + l.framer.writer.Flush() + break hasdata + + } + } +} + +func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error { + return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment) +} + +func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error { + // Otherwise update the quota. + if w.streamID == 0 { + l.sendQuota += w.increment + return nil + } + // Find the stream and update it. + if str, ok := l.estdStreams[w.streamID]; ok { + str.bytesOutStanding -= int(w.increment) + if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota { + str.state = active + l.activeStreams.enqueue(str) + return nil + } + } + return nil +} + +func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error { + return l.framer.fr.WriteSettings(s.ss...) +} + +func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error { + if err := l.applySettings(s.ss); err != nil { + return err + } + return l.framer.fr.WriteSettingsAck() +} + +func (l *loopyWriter) registerStreamHandler(h *registerStream) error { + str := &outStream{ + id: h.streamID, + state: empty, + itl: &itemList{}, + wq: h.wq, + } + l.estdStreams[h.streamID] = str + return nil +} + +func (l *loopyWriter) headerHandler(h *headerFrame) error { + if l.side == serverSide { + str, ok := l.estdStreams[h.streamID] + if !ok { + warningf("transport: loopy doesn't recognize the stream: %d", h.streamID) + return nil + } + // Case 1.A: Server is responding back with headers. + if !h.endStream { + return l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite) + } + // else: Case 1.B: Server wants to close stream. + + if str.state != empty { // either active or waiting on stream quota. + // add it str's list of items. + str.itl.enqueue(h) + return nil + } + if err := l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite); err != nil { + return err + } + return l.cleanupStreamHandler(h.cleanup) + } + // Case 2: Client wants to originate stream. + str := &outStream{ + id: h.streamID, + state: empty, + itl: &itemList{}, + wq: h.wq, + } + str.itl.enqueue(h) + return l.originateStream(str) +} + +func (l *loopyWriter) originateStream(str *outStream) error { + hdr := str.itl.dequeue().(*headerFrame) + sendPing, err := hdr.initStream(str.id) + if err != nil { + if err == ErrConnClosing { + return err + } + // Other errors(errStreamDrain) need not close transport. + return nil + } + if err = l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil { + return err + } + l.estdStreams[str.id] = str + if sendPing { + return l.pingHandler(&ping{data: [8]byte{}}) + } + return nil +} + +func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.HeaderField, onWrite func()) error { + if onWrite != nil { + onWrite() + } + l.hBuf.Reset() + for _, f := range hf { + if err := l.hEnc.WriteField(f); err != nil { + warningf("transport: loopyWriter.writeHeader encountered error while encoding headers:", err) + } + } + var ( + err error + endHeaders, first bool + ) + first = true + for !endHeaders { + size := l.hBuf.Len() + if size > http2MaxFrameLen { + size = http2MaxFrameLen + } else { + endHeaders = true + } + if first { + first = false + err = l.framer.fr.WriteHeaders(http2.HeadersFrameParam{ + StreamID: streamID, + BlockFragment: l.hBuf.Next(size), + EndStream: endStream, + EndHeaders: endHeaders, + }) + } else { + err = l.framer.fr.WriteContinuation( + streamID, + endHeaders, + l.hBuf.Next(size), + ) + } + if err != nil { + return err + } + } + return nil +} + +func (l *loopyWriter) preprocessData(df *dataFrame) error { + str, ok := l.estdStreams[df.streamID] + if !ok { + return nil + } + // If we got data for a stream it means that + // stream was originated and the headers were sent out. + str.itl.enqueue(df) + if str.state == empty { + str.state = active + l.activeStreams.enqueue(str) + } + return nil +} + +func (l *loopyWriter) pingHandler(p *ping) error { + if !p.ack { + l.bdpEst.timesnap(p.data) + } + return l.framer.fr.WritePing(p.ack, p.data) + +} + +func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) error { + o.resp <- l.sendQuota + return nil +} + +func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { + c.onWrite() + if str, ok := l.estdStreams[c.streamID]; ok { + // On the server side it could be a trailers-only response or + // a RST_STREAM before stream initialization thus the stream might + // not be established yet. + delete(l.estdStreams, c.streamID) + str.deleteSelf() + } + if c.rst { // If RST_STREAM needs to be sent. + if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil { + return err + } + } + if l.side == clientSide && l.draining && len(l.estdStreams) == 0 { + return ErrConnClosing + } + return nil +} + +func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error { + if l.side == clientSide { + l.draining = true + if len(l.estdStreams) == 0 { + return ErrConnClosing + } + } + return nil +} + +func (l *loopyWriter) goAwayHandler(g *goAway) error { + // Handling of outgoing GoAway is very specific to side. + if l.ssGoAwayHandler != nil { + draining, err := l.ssGoAwayHandler(g) + if err != nil { + return err + } + l.draining = draining + } + return nil +} + +func (l *loopyWriter) handle(i interface{}) error { + switch i := i.(type) { + case *incomingWindowUpdate: + return l.incomingWindowUpdateHandler(i) + case *outgoingWindowUpdate: + return l.outgoingWindowUpdateHandler(i) + case *incomingSettings: + return l.incomingSettingsHandler(i) + case *outgoingSettings: + return l.outgoingSettingsHandler(i) + case *headerFrame: + return l.headerHandler(i) + case *registerStream: + return l.registerStreamHandler(i) + case *cleanupStream: + return l.cleanupStreamHandler(i) + case *incomingGoAway: + return l.incomingGoAwayHandler(i) + case *dataFrame: + return l.preprocessData(i) + case *ping: + return l.pingHandler(i) + case *goAway: + return l.goAwayHandler(i) + case *outFlowControlSizeRequest: + return l.outFlowControlSizeRequestHandler(i) + default: + return fmt.Errorf("transport: unknown control message type %T", i) + } +} + +func (l *loopyWriter) applySettings(ss []http2.Setting) error { + for _, s := range ss { + switch s.ID { + case http2.SettingInitialWindowSize: + o := l.oiws + l.oiws = s.Val + if o < l.oiws { + // If the new limit is greater make all depleted streams active. + for _, stream := range l.estdStreams { + if stream.state == waitingOnStreamQuota { + stream.state = active + l.activeStreams.enqueue(stream) + } + } + } + case http2.SettingHeaderTableSize: + updateHeaderTblSize(l.hEnc, s.Val) + } + } + return nil +} + +// processData removes the first stream from active streams, writes out at most 16KB +// of its data and then puts it at the end of activeStreams if there's still more data +// to be sent and stream has some stream-level flow control. +func (l *loopyWriter) processData() (bool, error) { + if l.sendQuota == 0 { + return true, nil + } + str := l.activeStreams.dequeue() // Remove the first stream. + if str == nil { + return true, nil + } + dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream. + // A data item is represented by a dataFrame, since it later translates into + // multiple HTTP2 data frames. + // Every dataFrame has two buffers; h that keeps grpc-message header and d that is acutal data. + // As an optimization to keep wire traffic low, data from d is copied to h to make as big as the + // maximum possilbe HTTP2 frame size. + + if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame + // Client sends out empty data frame with endStream = true + if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil { + return false, err + } + str.itl.dequeue() // remove the empty data item from stream + if str.itl.isEmpty() { + str.state = empty + } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers. + if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil { + return false, err + } + if err := l.cleanupStreamHandler(trailer.cleanup); err != nil { + return false, nil + } + } else { + l.activeStreams.enqueue(str) + } + return false, nil + } + var ( + idx int + buf []byte + ) + if len(dataItem.h) != 0 { // data header has not been written out yet. + buf = dataItem.h + } else { + idx = 1 + buf = dataItem.d + } + size := http2MaxFrameLen + if len(buf) < size { + size = len(buf) + } + if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control. + str.state = waitingOnStreamQuota + return false, nil + } else if strQuota < size { + size = strQuota + } + + if l.sendQuota < uint32(size) { // connection-level flow control. + size = int(l.sendQuota) + } + // Now that outgoing flow controls are checked we can replenish str's write quota + str.wq.replenish(size) + var endStream bool + // If this is the last data message on this stream and all of it can be written in this iteration. + if dataItem.endStream && size == len(buf) { + // buf contains either data or it contains header but data is empty. + if idx == 1 || len(dataItem.d) == 0 { + endStream = true + } + } + if dataItem.onEachWrite != nil { + dataItem.onEachWrite() + } + if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil { + return false, err + } + buf = buf[size:] + str.bytesOutStanding += size + l.sendQuota -= uint32(size) + if idx == 0 { + dataItem.h = buf + } else { + dataItem.d = buf + } + + if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out. + str.itl.dequeue() + } + if str.itl.isEmpty() { + str.state = empty + } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // The next item is trailers. + if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil { + return false, err + } + if err := l.cleanupStreamHandler(trailer.cleanup); err != nil { + return false, err + } + } else if int(l.oiws)-str.bytesOutStanding <= 0 { // Ran out of stream quota. + str.state = waitingOnStreamQuota + } else { // Otherwise add it back to the list of active streams. + l.activeStreams.enqueue(str) + } + return false, nil +} diff --git a/vendor/google.golang.org/grpc/internal/transport/defaults.go b/vendor/google.golang.org/grpc/internal/transport/defaults.go new file mode 100644 index 0000000000000..9fa306b2e07a2 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/defaults.go @@ -0,0 +1,49 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "math" + "time" +) + +const ( + // The default value of flow control window size in HTTP2 spec. + defaultWindowSize = 65535 + // The initial window size for flow control. + initialWindowSize = defaultWindowSize // for an RPC + infinity = time.Duration(math.MaxInt64) + defaultClientKeepaliveTime = infinity + defaultClientKeepaliveTimeout = 20 * time.Second + defaultMaxStreamsClient = 100 + defaultMaxConnectionIdle = infinity + defaultMaxConnectionAge = infinity + defaultMaxConnectionAgeGrace = infinity + defaultServerKeepaliveTime = 2 * time.Hour + defaultServerKeepaliveTimeout = 20 * time.Second + defaultKeepalivePolicyMinTime = 5 * time.Minute + // max window limit set by HTTP2 Specs. + maxWindowSize = math.MaxInt32 + // defaultWriteQuota is the default value for number of data + // bytes that each stream can schedule before some of it being + // flushed out. + defaultWriteQuota = 64 * 1024 + defaultClientMaxHeaderListSize = uint32(16 << 20) + defaultServerMaxHeaderListSize = uint32(16 << 20) +) diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go new file mode 100644 index 0000000000000..f262edd8ecda3 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go @@ -0,0 +1,217 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "fmt" + "math" + "sync" + "sync/atomic" +) + +// writeQuota is a soft limit on the amount of data a stream can +// schedule before some of it is written out. +type writeQuota struct { + quota int32 + // get waits on read from when quota goes less than or equal to zero. + // replenish writes on it when quota goes positive again. + ch chan struct{} + // done is triggered in error case. + done <-chan struct{} + // replenish is called by loopyWriter to give quota back to. + // It is implemented as a field so that it can be updated + // by tests. + replenish func(n int) +} + +func newWriteQuota(sz int32, done <-chan struct{}) *writeQuota { + w := &writeQuota{ + quota: sz, + ch: make(chan struct{}, 1), + done: done, + } + w.replenish = w.realReplenish + return w +} + +func (w *writeQuota) get(sz int32) error { + for { + if atomic.LoadInt32(&w.quota) > 0 { + atomic.AddInt32(&w.quota, -sz) + return nil + } + select { + case <-w.ch: + continue + case <-w.done: + return errStreamDone + } + } +} + +func (w *writeQuota) realReplenish(n int) { + sz := int32(n) + a := atomic.AddInt32(&w.quota, sz) + b := a - sz + if b <= 0 && a > 0 { + select { + case w.ch <- struct{}{}: + default: + } + } +} + +type trInFlow struct { + limit uint32 + unacked uint32 + effectiveWindowSize uint32 +} + +func (f *trInFlow) newLimit(n uint32) uint32 { + d := n - f.limit + f.limit = n + f.updateEffectiveWindowSize() + return d +} + +func (f *trInFlow) onData(n uint32) uint32 { + f.unacked += n + if f.unacked >= f.limit/4 { + w := f.unacked + f.unacked = 0 + f.updateEffectiveWindowSize() + return w + } + f.updateEffectiveWindowSize() + return 0 +} + +func (f *trInFlow) reset() uint32 { + w := f.unacked + f.unacked = 0 + f.updateEffectiveWindowSize() + return w +} + +func (f *trInFlow) updateEffectiveWindowSize() { + atomic.StoreUint32(&f.effectiveWindowSize, f.limit-f.unacked) +} + +func (f *trInFlow) getSize() uint32 { + return atomic.LoadUint32(&f.effectiveWindowSize) +} + +// TODO(mmukhi): Simplify this code. +// inFlow deals with inbound flow control +type inFlow struct { + mu sync.Mutex + // The inbound flow control limit for pending data. + limit uint32 + // pendingData is the overall data which have been received but not been + // consumed by applications. + pendingData uint32 + // The amount of data the application has consumed but grpc has not sent + // window update for them. Used to reduce window update frequency. + pendingUpdate uint32 + // delta is the extra window update given by receiver when an application + // is reading data bigger in size than the inFlow limit. + delta uint32 +} + +// newLimit updates the inflow window to a new value n. +// It assumes that n is always greater than the old limit. +func (f *inFlow) newLimit(n uint32) uint32 { + f.mu.Lock() + d := n - f.limit + f.limit = n + f.mu.Unlock() + return d +} + +func (f *inFlow) maybeAdjust(n uint32) uint32 { + if n > uint32(math.MaxInt32) { + n = uint32(math.MaxInt32) + } + f.mu.Lock() + defer f.mu.Unlock() + // estSenderQuota is the receiver's view of the maximum number of bytes the sender + // can send without a window update. + estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate)) + // estUntransmittedData is the maximum number of bytes the sends might not have put + // on the wire yet. A value of 0 or less means that we have already received all or + // more bytes than the application is requesting to read. + estUntransmittedData := int32(n - f.pendingData) // Casting into int32 since it could be negative. + // This implies that unless we send a window update, the sender won't be able to send all the bytes + // for this message. Therefore we must send an update over the limit since there's an active read + // request from the application. + if estUntransmittedData > estSenderQuota { + // Sender's window shouldn't go more than 2^31 - 1 as specified in the HTTP spec. + if f.limit+n > maxWindowSize { + f.delta = maxWindowSize - f.limit + } else { + // Send a window update for the whole message and not just the difference between + // estUntransmittedData and estSenderQuota. This will be helpful in case the message + // is padded; We will fallback on the current available window(at least a 1/4th of the limit). + f.delta = n + } + return f.delta + } + return 0 +} + +// onData is invoked when some data frame is received. It updates pendingData. +func (f *inFlow) onData(n uint32) error { + f.mu.Lock() + f.pendingData += n + if f.pendingData+f.pendingUpdate > f.limit+f.delta { + limit := f.limit + rcvd := f.pendingData + f.pendingUpdate + f.mu.Unlock() + return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", rcvd, limit) + } + f.mu.Unlock() + return nil +} + +// onRead is invoked when the application reads the data. It returns the window size +// to be sent to the peer. +func (f *inFlow) onRead(n uint32) uint32 { + f.mu.Lock() + if f.pendingData == 0 { + f.mu.Unlock() + return 0 + } + f.pendingData -= n + if n > f.delta { + n -= f.delta + f.delta = 0 + } else { + f.delta -= n + n = 0 + } + f.pendingUpdate += n + if f.pendingUpdate >= f.limit/4 { + wu := f.pendingUpdate + f.pendingUpdate = 0 + f.mu.Unlock() + return wu + } + f.mu.Unlock() + return 0 +} diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go new file mode 100644 index 0000000000000..78f9ddc3d3aba --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -0,0 +1,431 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// This file is the implementation of a gRPC server using HTTP/2 which +// uses the standard Go http2 Server implementation (via the +// http.Handler interface), rather than speaking low-level HTTP/2 +// frames itself. It is the implementation of *grpc.Server.ServeHTTP. + +package transport + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "net" + "net/http" + "strings" + "sync" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/http2" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// NewServerHandlerTransport returns a ServerTransport handling gRPC +// from inside an http.Handler. It requires that the http Server +// supports HTTP/2. +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats stats.Handler) (ServerTransport, error) { + if r.ProtoMajor != 2 { + return nil, errors.New("gRPC requires HTTP/2") + } + if r.Method != "POST" { + return nil, errors.New("invalid gRPC request method") + } + contentType := r.Header.Get("Content-Type") + // TODO: do we assume contentType is lowercase? we did before + contentSubtype, validContentType := contentSubtype(contentType) + if !validContentType { + return nil, errors.New("invalid gRPC request content-type") + } + if _, ok := w.(http.Flusher); !ok { + return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher") + } + + st := &serverHandlerTransport{ + rw: w, + req: r, + closedCh: make(chan struct{}), + writes: make(chan func()), + contentType: contentType, + contentSubtype: contentSubtype, + stats: stats, + } + + if v := r.Header.Get("grpc-timeout"); v != "" { + to, err := decodeTimeout(v) + if err != nil { + return nil, status.Errorf(codes.Internal, "malformed time-out: %v", err) + } + st.timeoutSet = true + st.timeout = to + } + + metakv := []string{"content-type", contentType} + if r.Host != "" { + metakv = append(metakv, ":authority", r.Host) + } + for k, vv := range r.Header { + k = strings.ToLower(k) + if isReservedHeader(k) && !isWhitelistedHeader(k) { + continue + } + for _, v := range vv { + v, err := decodeMetadataHeader(k, v) + if err != nil { + return nil, status.Errorf(codes.Internal, "malformed binary metadata: %v", err) + } + metakv = append(metakv, k, v) + } + } + st.headerMD = metadata.Pairs(metakv...) + + return st, nil +} + +// serverHandlerTransport is an implementation of ServerTransport +// which replies to exactly one gRPC request (exactly one HTTP request), +// using the net/http.Handler interface. This http.Handler is guaranteed +// at this point to be speaking over HTTP/2, so it's able to speak valid +// gRPC. +type serverHandlerTransport struct { + rw http.ResponseWriter + req *http.Request + timeoutSet bool + timeout time.Duration + didCommonHeaders bool + + headerMD metadata.MD + + closeOnce sync.Once + closedCh chan struct{} // closed on Close + + // writes is a channel of code to run serialized in the + // ServeHTTP (HandleStreams) goroutine. The channel is closed + // when WriteStatus is called. + writes chan func() + + // block concurrent WriteStatus calls + // e.g. grpc/(*serverStream).SendMsg/RecvMsg + writeStatusMu sync.Mutex + + // we just mirror the request content-type + contentType string + // we store both contentType and contentSubtype so we don't keep recreating them + // TODO make sure this is consistent across handler_server and http2_server + contentSubtype string + + stats stats.Handler +} + +func (ht *serverHandlerTransport) Close() error { + ht.closeOnce.Do(ht.closeCloseChanOnce) + return nil +} + +func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) } + +func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) } + +// strAddr is a net.Addr backed by either a TCP "ip:port" string, or +// the empty string if unknown. +type strAddr string + +func (a strAddr) Network() string { + if a != "" { + // Per the documentation on net/http.Request.RemoteAddr, if this is + // set, it's set to the IP:port of the peer (hence, TCP): + // https://golang.org/pkg/net/http/#Request + // + // If we want to support Unix sockets later, we can + // add our own grpc-specific convention within the + // grpc codebase to set RemoteAddr to a different + // format, or probably better: we can attach it to the + // context and use that from serverHandlerTransport.RemoteAddr. + return "tcp" + } + return "" +} + +func (a strAddr) String() string { return string(a) } + +// do runs fn in the ServeHTTP goroutine. +func (ht *serverHandlerTransport) do(fn func()) error { + select { + case <-ht.closedCh: + return ErrConnClosing + case ht.writes <- fn: + return nil + } +} + +func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error { + ht.writeStatusMu.Lock() + defer ht.writeStatusMu.Unlock() + + err := ht.do(func() { + ht.writeCommonHeaders(s) + + // And flush, in case no header or body has been sent yet. + // This forces a separation of headers and trailers if this is the + // first call (for example, in end2end tests's TestNoService). + ht.rw.(http.Flusher).Flush() + + h := ht.rw.Header() + h.Set("Grpc-Status", fmt.Sprintf("%d", st.Code())) + if m := st.Message(); m != "" { + h.Set("Grpc-Message", encodeGrpcMessage(m)) + } + + if p := st.Proto(); p != nil && len(p.Details) > 0 { + stBytes, err := proto.Marshal(p) + if err != nil { + // TODO: return error instead, when callers are able to handle it. + panic(err) + } + + h.Set("Grpc-Status-Details-Bin", encodeBinHeader(stBytes)) + } + + if md := s.Trailer(); len(md) > 0 { + for k, vv := range md { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + if isReservedHeader(k) { + continue + } + for _, v := range vv { + // http2 ResponseWriter mechanism to send undeclared Trailers after + // the headers have possibly been written. + h.Add(http2.TrailerPrefix+k, encodeMetadataHeader(k, v)) + } + } + } + }) + + if err == nil { // transport has not been closed + if ht.stats != nil { + ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{}) + } + } + ht.Close() + return err +} + +// writeCommonHeaders sets common headers on the first write +// call (Write, WriteHeader, or WriteStatus). +func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { + if ht.didCommonHeaders { + return + } + ht.didCommonHeaders = true + + h := ht.rw.Header() + h["Date"] = nil // suppress Date to make tests happy; TODO: restore + h.Set("Content-Type", ht.contentType) + + // Predeclare trailers we'll set later in WriteStatus (after the body). + // This is a SHOULD in the HTTP RFC, and the way you add (known) + // Trailers per the net/http.ResponseWriter contract. + // See https://golang.org/pkg/net/http/#ResponseWriter + // and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers + h.Add("Trailer", "Grpc-Status") + h.Add("Trailer", "Grpc-Message") + h.Add("Trailer", "Grpc-Status-Details-Bin") + + if s.sendCompress != "" { + h.Set("Grpc-Encoding", s.sendCompress) + } +} + +func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { + return ht.do(func() { + ht.writeCommonHeaders(s) + ht.rw.Write(hdr) + ht.rw.Write(data) + ht.rw.(http.Flusher).Flush() + }) +} + +func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { + err := ht.do(func() { + ht.writeCommonHeaders(s) + h := ht.rw.Header() + for k, vv := range md { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + if isReservedHeader(k) { + continue + } + for _, v := range vv { + v = encodeMetadataHeader(k, v) + h.Add(k, v) + } + } + ht.rw.WriteHeader(200) + ht.rw.(http.Flusher).Flush() + }) + + if err == nil { + if ht.stats != nil { + ht.stats.HandleRPC(s.Context(), &stats.OutHeader{}) + } + } + return err +} + +func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) { + // With this transport type there will be exactly 1 stream: this HTTP request. + + ctx := ht.req.Context() + var cancel context.CancelFunc + if ht.timeoutSet { + ctx, cancel = context.WithTimeout(ctx, ht.timeout) + } else { + ctx, cancel = context.WithCancel(ctx) + } + + // requestOver is closed when the status has been written via WriteStatus. + requestOver := make(chan struct{}) + go func() { + select { + case <-requestOver: + case <-ht.closedCh: + case <-ht.req.Context().Done(): + } + cancel() + ht.Close() + }() + + req := ht.req + + s := &Stream{ + id: 0, // irrelevant + requestRead: func(int) {}, + cancel: cancel, + buf: newRecvBuffer(), + st: ht, + method: req.URL.Path, + recvCompress: req.Header.Get("grpc-encoding"), + contentSubtype: ht.contentSubtype, + } + pr := &peer.Peer{ + Addr: ht.RemoteAddr(), + } + if req.TLS != nil { + pr.AuthInfo = credentials.TLSInfo{State: *req.TLS} + } + ctx = metadata.NewIncomingContext(ctx, ht.headerMD) + s.ctx = peer.NewContext(ctx, pr) + if ht.stats != nil { + s.ctx = ht.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) + inHeader := &stats.InHeader{ + FullMethod: s.method, + RemoteAddr: ht.RemoteAddr(), + Compression: s.recvCompress, + } + ht.stats.HandleRPC(s.ctx, inHeader) + } + s.trReader = &transportReader{ + reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}}, + windowHandler: func(int) {}, + } + + // readerDone is closed when the Body.Read-ing goroutine exits. + readerDone := make(chan struct{}) + go func() { + defer close(readerDone) + + // TODO: minimize garbage, optimize recvBuffer code/ownership + const readSize = 8196 + for buf := make([]byte, readSize); ; { + n, err := req.Body.Read(buf) + if n > 0 { + s.buf.put(recvMsg{buffer: bytes.NewBuffer(buf[:n:n])}) + buf = buf[n:] + } + if err != nil { + s.buf.put(recvMsg{err: mapRecvMsgError(err)}) + return + } + if len(buf) == 0 { + buf = make([]byte, readSize) + } + } + }() + + // startStream is provided by the *grpc.Server's serveStreams. + // It starts a goroutine serving s and exits immediately. + // The goroutine that is started is the one that then calls + // into ht, calling WriteHeader, Write, WriteStatus, Close, etc. + startStream(s) + + ht.runStream() + close(requestOver) + + // Wait for reading goroutine to finish. + req.Body.Close() + <-readerDone +} + +func (ht *serverHandlerTransport) runStream() { + for { + select { + case fn := <-ht.writes: + fn() + case <-ht.closedCh: + return + } + } +} + +func (ht *serverHandlerTransport) IncrMsgSent() {} + +func (ht *serverHandlerTransport) IncrMsgRecv() {} + +func (ht *serverHandlerTransport) Drain() { + panic("Drain() is not implemented") +} + +// mapRecvMsgError returns the non-nil err into the appropriate +// error value as expected by callers of *grpc.parser.recvMsg. +// In particular, in can only be: +// * io.EOF +// * io.ErrUnexpectedEOF +// * of type transport.ConnectionError +// * an error from the status package +func mapRecvMsgError(err error) error { + if err == io.EOF || err == io.ErrUnexpectedEOF { + return err + } + if se, ok := err.(http2.StreamError); ok { + if code, ok := http2ErrConvTab[se.Code]; ok { + return status.Error(code, se.Error()) + } + } + if strings.Contains(err.Error(), "body closed by handler") { + return status.Error(codes.Canceled, err.Error()) + } + return connectionErrorf(true, err, err.Error()) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go new file mode 100644 index 0000000000000..41a79c567027a --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -0,0 +1,1411 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "context" + "fmt" + "io" + "math" + "net" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/syscall" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// http2Client implements the ClientTransport interface with HTTP2. +type http2Client struct { + ctx context.Context + cancel context.CancelFunc + ctxDone <-chan struct{} // Cache the ctx.Done() chan. + userAgent string + md interface{} + conn net.Conn // underlying communication channel + loopy *loopyWriter + remoteAddr net.Addr + localAddr net.Addr + authInfo credentials.AuthInfo // auth info about the connection + + readerDone chan struct{} // sync point to enable testing. + writerDone chan struct{} // sync point to enable testing. + // goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor) + // that the server sent GoAway on this transport. + goAway chan struct{} + // awakenKeepalive is used to wake up keepalive when after it has gone dormant. + awakenKeepalive chan struct{} + + framer *framer + // controlBuf delivers all the control related tasks (e.g., window + // updates, reset streams, and various settings) to the controller. + controlBuf *controlBuffer + fc *trInFlow + // The scheme used: https if TLS is on, http otherwise. + scheme string + + isSecure bool + + perRPCCreds []credentials.PerRPCCredentials + + // Boolean to keep track of reading activity on transport. + // 1 is true and 0 is false. + activity uint32 // Accessed atomically. + kp keepalive.ClientParameters + keepaliveEnabled bool + + statsHandler stats.Handler + + initialWindowSize int32 + + // configured by peer through SETTINGS_MAX_HEADER_LIST_SIZE + maxSendHeaderListSize *uint32 + + bdpEst *bdpEstimator + // onPrefaceReceipt is a callback that client transport calls upon + // receiving server preface to signal that a succefull HTTP2 + // connection was established. + onPrefaceReceipt func() + + maxConcurrentStreams uint32 + streamQuota int64 + streamsQuotaAvailable chan struct{} + waitingStreams uint32 + nextID uint32 + + mu sync.Mutex // guard the following variables + state transportState + activeStreams map[uint32]*Stream + // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame. + prevGoAwayID uint32 + // goAwayReason records the http2.ErrCode and debug data received with the + // GoAway frame. + goAwayReason GoAwayReason + + // Fields below are for channelz metric collection. + channelzID int64 // channelz unique identification number + czData *channelzData + + onGoAway func(GoAwayReason) + onClose func() + + bufferPool *bufferPool +} + +func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) { + if fn != nil { + return fn(ctx, addr) + } + return (&net.Dialer{}).DialContext(ctx, "tcp", addr) +} + +func isTemporary(err error) bool { + switch err := err.(type) { + case interface { + Temporary() bool + }: + return err.Temporary() + case interface { + Timeout() bool + }: + // Timeouts may be resolved upon retry, and are thus treated as + // temporary. + return err.Timeout() + } + return true +} + +// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 +// and starts to receive messages on it. Non-nil error returns if construction +// fails. +func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) { + scheme := "http" + ctx, cancel := context.WithCancel(ctx) + defer func() { + if err != nil { + cancel() + } + }() + + conn, err := dial(connectCtx, opts.Dialer, addr.Addr) + if err != nil { + if opts.FailOnNonTempDialError { + return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err) + } + return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err) + } + // Any further errors will close the underlying connection + defer func(conn net.Conn) { + if err != nil { + conn.Close() + } + }(conn) + kp := opts.KeepaliveParams + // Validate keepalive parameters. + if kp.Time == 0 { + kp.Time = defaultClientKeepaliveTime + } + if kp.Timeout == 0 { + kp.Timeout = defaultClientKeepaliveTimeout + } + keepaliveEnabled := false + if kp.Time != infinity { + if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { + return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) + } + keepaliveEnabled = true + } + var ( + isSecure bool + authInfo credentials.AuthInfo + ) + transportCreds := opts.TransportCredentials + perRPCCreds := opts.PerRPCCredentials + + if b := opts.CredsBundle; b != nil { + if t := b.TransportCredentials(); t != nil { + transportCreds = t + } + if t := b.PerRPCCredentials(); t != nil { + perRPCCreds = append(perRPCCreds, t) + } + } + if transportCreds != nil { + scheme = "https" + conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.Authority, conn) + if err != nil { + return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err) + } + isSecure = true + } + dynamicWindow := true + icwz := int32(initialWindowSize) + if opts.InitialConnWindowSize >= defaultWindowSize { + icwz = opts.InitialConnWindowSize + dynamicWindow = false + } + writeBufSize := opts.WriteBufferSize + readBufSize := opts.ReadBufferSize + maxHeaderListSize := defaultClientMaxHeaderListSize + if opts.MaxHeaderListSize != nil { + maxHeaderListSize = *opts.MaxHeaderListSize + } + t := &http2Client{ + ctx: ctx, + ctxDone: ctx.Done(), // Cache Done chan. + cancel: cancel, + userAgent: opts.UserAgent, + md: addr.Metadata, + conn: conn, + remoteAddr: conn.RemoteAddr(), + localAddr: conn.LocalAddr(), + authInfo: authInfo, + readerDone: make(chan struct{}), + writerDone: make(chan struct{}), + goAway: make(chan struct{}), + awakenKeepalive: make(chan struct{}, 1), + framer: newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize), + fc: &trInFlow{limit: uint32(icwz)}, + scheme: scheme, + activeStreams: make(map[uint32]*Stream), + isSecure: isSecure, + perRPCCreds: perRPCCreds, + kp: kp, + statsHandler: opts.StatsHandler, + initialWindowSize: initialWindowSize, + onPrefaceReceipt: onPrefaceReceipt, + nextID: 1, + maxConcurrentStreams: defaultMaxStreamsClient, + streamQuota: defaultMaxStreamsClient, + streamsQuotaAvailable: make(chan struct{}, 1), + czData: new(channelzData), + onGoAway: onGoAway, + onClose: onClose, + keepaliveEnabled: keepaliveEnabled, + bufferPool: newBufferPool(), + } + t.controlBuf = newControlBuffer(t.ctxDone) + if opts.InitialWindowSize >= defaultWindowSize { + t.initialWindowSize = opts.InitialWindowSize + dynamicWindow = false + } + if dynamicWindow { + t.bdpEst = &bdpEstimator{ + bdp: initialWindowSize, + updateFlowControl: t.updateFlowControl, + } + } + // Make sure awakenKeepalive can't be written upon. + // keepalive routine will make it writable, if need be. + t.awakenKeepalive <- struct{}{} + if t.statsHandler != nil { + t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{ + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + }) + connBegin := &stats.ConnBegin{ + Client: true, + } + t.statsHandler.HandleConn(t.ctx, connBegin) + } + if channelz.IsOn() { + t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) + } + if t.keepaliveEnabled { + go t.keepalive() + } + // Start the reader goroutine for incoming message. Each transport has + // a dedicated goroutine which reads HTTP2 frame from network. Then it + // dispatches the frame to the corresponding stream entity. + go t.reader() + + // Send connection preface to server. + n, err := t.conn.Write(clientPreface) + if err != nil { + t.Close() + return nil, connectionErrorf(true, err, "transport: failed to write client preface: %v", err) + } + if n != len(clientPreface) { + t.Close() + return nil, connectionErrorf(true, err, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) + } + var ss []http2.Setting + + if t.initialWindowSize != defaultWindowSize { + ss = append(ss, http2.Setting{ + ID: http2.SettingInitialWindowSize, + Val: uint32(t.initialWindowSize), + }) + } + if opts.MaxHeaderListSize != nil { + ss = append(ss, http2.Setting{ + ID: http2.SettingMaxHeaderListSize, + Val: *opts.MaxHeaderListSize, + }) + } + err = t.framer.fr.WriteSettings(ss...) + if err != nil { + t.Close() + return nil, connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err) + } + // Adjust the connection flow control window if needed. + if delta := uint32(icwz - defaultWindowSize); delta > 0 { + if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil { + t.Close() + return nil, connectionErrorf(true, err, "transport: failed to write window update: %v", err) + } + } + + if err := t.framer.writer.Flush(); err != nil { + return nil, err + } + go func() { + t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst) + err := t.loopy.run() + if err != nil { + errorf("transport: loopyWriter.run returning. Err: %v", err) + } + // If it's a connection error, let reader goroutine handle it + // since there might be data in the buffers. + if _, ok := err.(net.Error); !ok { + t.conn.Close() + } + close(t.writerDone) + }() + return t, nil +} + +func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { + // TODO(zhaoq): Handle uint32 overflow of Stream.id. + s := &Stream{ + done: make(chan struct{}), + method: callHdr.Method, + sendCompress: callHdr.SendCompress, + buf: newRecvBuffer(), + headerChan: make(chan struct{}), + contentSubtype: callHdr.ContentSubtype, + } + s.wq = newWriteQuota(defaultWriteQuota, s.done) + s.requestRead = func(n int) { + t.adjustWindow(s, uint32(n)) + } + // The client side stream context should have exactly the same life cycle with the user provided context. + // That means, s.ctx should be read-only. And s.ctx is done iff ctx is done. + // So we use the original context here instead of creating a copy. + s.ctx = ctx + s.trReader = &transportReader{ + reader: &recvBufferReader{ + ctx: s.ctx, + ctxDone: s.ctx.Done(), + recv: s.buf, + closeStream: func(err error) { + t.CloseStream(s, err) + }, + freeBuffer: t.bufferPool.put, + }, + windowHandler: func(n int) { + t.updateWindow(s, uint32(n)) + }, + } + return s +} + +func (t *http2Client) getPeer() *peer.Peer { + pr := &peer.Peer{ + Addr: t.remoteAddr, + } + // Attach Auth info if there is any. + if t.authInfo != nil { + pr.AuthInfo = t.authInfo + } + return pr +} + +func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) ([]hpack.HeaderField, error) { + aud := t.createAudience(callHdr) + authData, err := t.getTrAuthData(ctx, aud) + if err != nil { + return nil, err + } + callAuthData, err := t.getCallAuthData(ctx, aud, callHdr) + if err != nil { + return nil, err + } + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + // Make the slice of certain predictable size to reduce allocations made by append. + hfLen := 7 // :method, :scheme, :path, :authority, content-type, user-agent, te + hfLen += len(authData) + len(callAuthData) + headerFields := make([]hpack.HeaderField, 0, hfLen) + headerFields = append(headerFields, hpack.HeaderField{Name: ":method", Value: "POST"}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":path", Value: callHdr.Method}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":authority", Value: callHdr.Host}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(callHdr.ContentSubtype)}) + headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent}) + headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"}) + if callHdr.PreviousAttempts > 0 { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)}) + } + + if callHdr.SendCompress != "" { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) + } + if dl, ok := ctx.Deadline(); ok { + // Send out timeout regardless its value. The server can detect timeout context by itself. + // TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire. + timeout := time.Until(dl) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: encodeTimeout(timeout)}) + } + for k, v := range authData { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + for k, v := range callAuthData { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + if b := stats.OutgoingTags(ctx); b != nil { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-tags-bin", Value: encodeBinHeader(b)}) + } + if b := stats.OutgoingTrace(ctx); b != nil { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)}) + } + + if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { + var k string + for k, vv := range md { + // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. + if isReservedHeader(k) { + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + for _, vv := range added { + for i, v := range vv { + if i%2 == 0 { + k = v + continue + } + // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. + if isReservedHeader(k) { + continue + } + headerFields = append(headerFields, hpack.HeaderField{Name: strings.ToLower(k), Value: encodeMetadataHeader(k, v)}) + } + } + } + if md, ok := t.md.(*metadata.MD); ok { + for k, vv := range *md { + if isReservedHeader(k) { + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + } + return headerFields, nil +} + +func (t *http2Client) createAudience(callHdr *CallHdr) string { + // Create an audience string only if needed. + if len(t.perRPCCreds) == 0 && callHdr.Creds == nil { + return "" + } + // Construct URI required to get auth request metadata. + // Omit port if it is the default one. + host := strings.TrimSuffix(callHdr.Host, ":443") + pos := strings.LastIndex(callHdr.Method, "/") + if pos == -1 { + pos = len(callHdr.Method) + } + return "https://" + host + callHdr.Method[:pos] +} + +func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[string]string, error) { + if len(t.perRPCCreds) == 0 { + return nil, nil + } + authData := map[string]string{} + for _, c := range t.perRPCCreds { + data, err := c.GetRequestMetadata(ctx, audience) + if err != nil { + if _, ok := status.FromError(err); ok { + return nil, err + } + + return nil, status.Errorf(codes.Unauthenticated, "transport: %v", err) + } + for k, v := range data { + // Capital header names are illegal in HTTP/2. + k = strings.ToLower(k) + authData[k] = v + } + } + return authData, nil +} + +func (t *http2Client) getCallAuthData(ctx context.Context, audience string, callHdr *CallHdr) (map[string]string, error) { + var callAuthData map[string]string + // Check if credentials.PerRPCCredentials were provided via call options. + // Note: if these credentials are provided both via dial options and call + // options, then both sets of credentials will be applied. + if callCreds := callHdr.Creds; callCreds != nil { + if !t.isSecure && callCreds.RequireTransportSecurity() { + return nil, status.Error(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection") + } + data, err := callCreds.GetRequestMetadata(ctx, audience) + if err != nil { + return nil, status.Errorf(codes.Internal, "transport: %v", err) + } + callAuthData = make(map[string]string, len(data)) + for k, v := range data { + // Capital header names are illegal in HTTP/2 + k = strings.ToLower(k) + callAuthData[k] = v + } + } + return callAuthData, nil +} + +// NewStream creates a stream and registers it into the transport as "active" +// streams. +func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { + ctx = peer.NewContext(ctx, t.getPeer()) + headerFields, err := t.createHeaderFields(ctx, callHdr) + if err != nil { + return nil, err + } + s := t.newStream(ctx, callHdr) + cleanup := func(err error) { + if s.swapState(streamDone) == streamDone { + // If it was already done, return. + return + } + // The stream was unprocessed by the server. + atomic.StoreUint32(&s.unprocessed, 1) + s.write(recvMsg{err: err}) + close(s.done) + // If headerChan isn't closed, then close it. + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + close(s.headerChan) + } + } + hdr := &headerFrame{ + hf: headerFields, + endStream: false, + initStream: func(id uint32) (bool, error) { + t.mu.Lock() + if state := t.state; state != reachable { + t.mu.Unlock() + // Do a quick cleanup. + err := error(errStreamDrain) + if state == closing { + err = ErrConnClosing + } + cleanup(err) + return false, err + } + t.activeStreams[id] = s + if channelz.IsOn() { + atomic.AddInt64(&t.czData.streamsStarted, 1) + atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) + } + var sendPing bool + // If the number of active streams change from 0 to 1, then check if keepalive + // has gone dormant. If so, wake it up. + if len(t.activeStreams) == 1 && t.keepaliveEnabled { + select { + case t.awakenKeepalive <- struct{}{}: + sendPing = true + // Fill the awakenKeepalive channel again as this channel must be + // kept non-writable except at the point that the keepalive() + // goroutine is waiting either to be awaken or shutdown. + t.awakenKeepalive <- struct{}{} + default: + } + } + t.mu.Unlock() + return sendPing, nil + }, + onOrphaned: cleanup, + wq: s.wq, + } + firstTry := true + var ch chan struct{} + checkForStreamQuota := func(it interface{}) bool { + if t.streamQuota <= 0 { // Can go negative if server decreases it. + if firstTry { + t.waitingStreams++ + } + ch = t.streamsQuotaAvailable + return false + } + if !firstTry { + t.waitingStreams-- + } + t.streamQuota-- + h := it.(*headerFrame) + h.streamID = t.nextID + t.nextID += 2 + s.id = h.streamID + s.fc = &inFlow{limit: uint32(t.initialWindowSize)} + if t.streamQuota > 0 && t.waitingStreams > 0 { + select { + case t.streamsQuotaAvailable <- struct{}{}: + default: + } + } + return true + } + var hdrListSizeErr error + checkForHeaderListSize := func(it interface{}) bool { + if t.maxSendHeaderListSize == nil { + return true + } + hdrFrame := it.(*headerFrame) + var sz int64 + for _, f := range hdrFrame.hf { + if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { + hdrListSizeErr = status.Errorf(codes.Internal, "header list size to send violates the maximum size (%d bytes) set by server", *t.maxSendHeaderListSize) + return false + } + } + return true + } + for { + success, err := t.controlBuf.executeAndPut(func(it interface{}) bool { + if !checkForStreamQuota(it) { + return false + } + if !checkForHeaderListSize(it) { + return false + } + return true + }, hdr) + if err != nil { + return nil, err + } + if success { + break + } + if hdrListSizeErr != nil { + return nil, hdrListSizeErr + } + firstTry = false + select { + case <-ch: + case <-s.ctx.Done(): + return nil, ContextErr(s.ctx.Err()) + case <-t.goAway: + return nil, errStreamDrain + case <-t.ctx.Done(): + return nil, ErrConnClosing + } + } + if t.statsHandler != nil { + outHeader := &stats.OutHeader{ + Client: true, + FullMethod: callHdr.Method, + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + Compression: callHdr.SendCompress, + } + t.statsHandler.HandleRPC(s.ctx, outHeader) + } + return s, nil +} + +// CloseStream clears the footprint of a stream when the stream is not needed any more. +// This must not be executed in reader's goroutine. +func (t *http2Client) CloseStream(s *Stream, err error) { + var ( + rst bool + rstCode http2.ErrCode + ) + if err != nil { + rst = true + rstCode = http2.ErrCodeCancel + } + t.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false) +} + +func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) { + // Set stream status to done. + if s.swapState(streamDone) == streamDone { + // If it was already done, return. If multiple closeStream calls + // happen simultaneously, wait for the first to finish. + <-s.done + return + } + // status and trailers can be updated here without any synchronization because the stream goroutine will + // only read it after it sees an io.EOF error from read or write and we'll write those errors + // only after updating this. + s.status = st + if len(mdata) > 0 { + s.trailer = mdata + } + if err != nil { + // This will unblock reads eventually. + s.write(recvMsg{err: err}) + } + // If headerChan isn't closed, then close it. + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + s.noHeaders = true + close(s.headerChan) + } + cleanup := &cleanupStream{ + streamID: s.id, + onWrite: func() { + t.mu.Lock() + if t.activeStreams != nil { + delete(t.activeStreams, s.id) + } + t.mu.Unlock() + if channelz.IsOn() { + if eosReceived { + atomic.AddInt64(&t.czData.streamsSucceeded, 1) + } else { + atomic.AddInt64(&t.czData.streamsFailed, 1) + } + } + }, + rst: rst, + rstCode: rstCode, + } + addBackStreamQuota := func(interface{}) bool { + t.streamQuota++ + if t.streamQuota > 0 && t.waitingStreams > 0 { + select { + case t.streamsQuotaAvailable <- struct{}{}: + default: + } + } + return true + } + t.controlBuf.executeAndPut(addBackStreamQuota, cleanup) + // This will unblock write. + close(s.done) +} + +// Close kicks off the shutdown process of the transport. This should be called +// only once on a transport. Once it is called, the transport should not be +// accessed any more. +// +// This method blocks until the addrConn that initiated this transport is +// re-connected. This happens because t.onClose() begins reconnect logic at the +// addrConn level and blocks until the addrConn is successfully connected. +func (t *http2Client) Close() error { + t.mu.Lock() + // Make sure we only Close once. + if t.state == closing { + t.mu.Unlock() + return nil + } + // Call t.onClose before setting the state to closing to prevent the client + // from attempting to create new streams ASAP. + t.onClose() + t.state = closing + streams := t.activeStreams + t.activeStreams = nil + t.mu.Unlock() + t.controlBuf.finish() + t.cancel() + err := t.conn.Close() + if channelz.IsOn() { + channelz.RemoveEntry(t.channelzID) + } + // Notify all active streams. + for _, s := range streams { + t.closeStream(s, ErrConnClosing, false, http2.ErrCodeNo, status.New(codes.Unavailable, ErrConnClosing.Desc), nil, false) + } + if t.statsHandler != nil { + connEnd := &stats.ConnEnd{ + Client: true, + } + t.statsHandler.HandleConn(t.ctx, connEnd) + } + return err +} + +// GracefulClose sets the state to draining, which prevents new streams from +// being created and causes the transport to be closed when the last active +// stream is closed. If there are no active streams, the transport is closed +// immediately. This does nothing if the transport is already draining or +// closing. +func (t *http2Client) GracefulClose() { + t.mu.Lock() + // Make sure we move to draining only from active. + if t.state == draining || t.state == closing { + t.mu.Unlock() + return + } + t.state = draining + active := len(t.activeStreams) + t.mu.Unlock() + if active == 0 { + t.Close() + return + } + t.controlBuf.put(&incomingGoAway{}) +} + +// Write formats the data into HTTP2 data frame(s) and sends it out. The caller +// should proceed only if Write returns nil. +func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { + if opts.Last { + // If it's the last message, update stream state. + if !s.compareAndSwapState(streamActive, streamWriteDone) { + return errStreamDone + } + } else if s.getState() != streamActive { + return errStreamDone + } + df := &dataFrame{ + streamID: s.id, + endStream: opts.Last, + } + if hdr != nil || data != nil { // If it's not an empty data frame. + // Add some data to grpc message header so that we can equally + // distribute bytes across frames. + emptyLen := http2MaxFrameLen - len(hdr) + if emptyLen > len(data) { + emptyLen = len(data) + } + hdr = append(hdr, data[:emptyLen]...) + data = data[emptyLen:] + df.h, df.d = hdr, data + // TODO(mmukhi): The above logic in this if can be moved to loopyWriter's data handler. + if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { + return err + } + } + return t.controlBuf.put(df) +} + +func (t *http2Client) getStream(f http2.Frame) (*Stream, bool) { + t.mu.Lock() + defer t.mu.Unlock() + s, ok := t.activeStreams[f.Header().StreamID] + return s, ok +} + +// adjustWindow sends out extra window update over the initial window size +// of stream if the application is requesting data larger in size than +// the window. +func (t *http2Client) adjustWindow(s *Stream, n uint32) { + if w := s.fc.maybeAdjust(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) + } +} + +// updateWindow adjusts the inbound quota for the stream. +// Window updates will be sent out when the cumulative quota +// exceeds the corresponding threshold. +func (t *http2Client) updateWindow(s *Stream, n uint32) { + if w := s.fc.onRead(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) + } +} + +// updateFlowControl updates the incoming flow control windows +// for the transport and the stream based on the current bdp +// estimation. +func (t *http2Client) updateFlowControl(n uint32) { + t.mu.Lock() + for _, s := range t.activeStreams { + s.fc.newLimit(n) + } + t.mu.Unlock() + updateIWS := func(interface{}) bool { + t.initialWindowSize = int32(n) + return true + } + t.controlBuf.executeAndPut(updateIWS, &outgoingWindowUpdate{streamID: 0, increment: t.fc.newLimit(n)}) + t.controlBuf.put(&outgoingSettings{ + ss: []http2.Setting{ + { + ID: http2.SettingInitialWindowSize, + Val: n, + }, + }, + }) +} + +func (t *http2Client) handleData(f *http2.DataFrame) { + size := f.Header().Length + var sendBDPPing bool + if t.bdpEst != nil { + sendBDPPing = t.bdpEst.add(size) + } + // Decouple connection's flow control from application's read. + // An update on connection's flow control should not depend on + // whether user application has read the data or not. Such a + // restriction is already imposed on the stream's flow control, + // and therefore the sender will be blocked anyways. + // Decoupling the connection flow control will prevent other + // active(fast) streams from starving in presence of slow or + // inactive streams. + // + if w := t.fc.onData(size); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + if sendBDPPing { + // Avoid excessive ping detection (e.g. in an L7 proxy) + // by sending a window update prior to the BDP ping. + + if w := t.fc.reset(); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + + t.controlBuf.put(bdpPing) + } + // Select the right stream to dispatch. + s, ok := t.getStream(f) + if !ok { + return + } + if size > 0 { + if err := s.fc.onData(size); err != nil { + t.closeStream(s, io.EOF, true, http2.ErrCodeFlowControl, status.New(codes.Internal, err.Error()), nil, false) + return + } + if f.Header().Flags.Has(http2.FlagDataPadded) { + if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{s.id, w}) + } + } + // TODO(bradfitz, zhaoq): A copy is required here because there is no + // guarantee f.Data() is consumed before the arrival of next frame. + // Can this copy be eliminated? + if len(f.Data()) > 0 { + buffer := t.bufferPool.get() + buffer.Reset() + buffer.Write(f.Data()) + s.write(recvMsg{buffer: buffer}) + } + } + // The server has closed the stream without sending trailers. Record that + // the read direction is closed, and set the status appropriately. + if f.FrameHeader.Flags.Has(http2.FlagDataEndStream) { + t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.New(codes.Internal, "server closed the stream without sending trailers"), nil, true) + } +} + +func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { + s, ok := t.getStream(f) + if !ok { + return + } + if f.ErrCode == http2.ErrCodeRefusedStream { + // The stream was unprocessed by the server. + atomic.StoreUint32(&s.unprocessed, 1) + } + statusCode, ok := http2ErrConvTab[f.ErrCode] + if !ok { + warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode) + statusCode = codes.Unknown + } + if statusCode == codes.Canceled { + if d, ok := s.ctx.Deadline(); ok && !d.After(time.Now()) { + // Our deadline was already exceeded, and that was likely the cause + // of this cancelation. Alter the status code accordingly. + statusCode = codes.DeadlineExceeded + } + } + t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode), nil, false) +} + +func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) { + if f.IsAck() { + return + } + var maxStreams *uint32 + var ss []http2.Setting + var updateFuncs []func() + f.ForeachSetting(func(s http2.Setting) error { + switch s.ID { + case http2.SettingMaxConcurrentStreams: + maxStreams = new(uint32) + *maxStreams = s.Val + case http2.SettingMaxHeaderListSize: + updateFuncs = append(updateFuncs, func() { + t.maxSendHeaderListSize = new(uint32) + *t.maxSendHeaderListSize = s.Val + }) + default: + ss = append(ss, s) + } + return nil + }) + if isFirst && maxStreams == nil { + maxStreams = new(uint32) + *maxStreams = math.MaxUint32 + } + sf := &incomingSettings{ + ss: ss, + } + if maxStreams != nil { + updateStreamQuota := func() { + delta := int64(*maxStreams) - int64(t.maxConcurrentStreams) + t.maxConcurrentStreams = *maxStreams + t.streamQuota += delta + if delta > 0 && t.waitingStreams > 0 { + close(t.streamsQuotaAvailable) // wake all of them up. + t.streamsQuotaAvailable = make(chan struct{}, 1) + } + } + updateFuncs = append(updateFuncs, updateStreamQuota) + } + t.controlBuf.executeAndPut(func(interface{}) bool { + for _, f := range updateFuncs { + f() + } + return true + }, sf) +} + +func (t *http2Client) handlePing(f *http2.PingFrame) { + if f.IsAck() { + // Maybe it's a BDP ping. + if t.bdpEst != nil { + t.bdpEst.calculate(f.Data) + } + return + } + pingAck := &ping{ack: true} + copy(pingAck.data[:], f.Data[:]) + t.controlBuf.put(pingAck) +} + +func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { + t.mu.Lock() + if t.state == closing { + t.mu.Unlock() + return + } + if f.ErrCode == http2.ErrCodeEnhanceYourCalm { + infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.") + } + id := f.LastStreamID + if id > 0 && id%2 != 1 { + t.mu.Unlock() + t.Close() + return + } + // A client can receive multiple GoAways from the server (see + // https://github.com/grpc/grpc-go/issues/1387). The idea is that the first + // GoAway will be sent with an ID of MaxInt32 and the second GoAway will be + // sent after an RTT delay with the ID of the last stream the server will + // process. + // + // Therefore, when we get the first GoAway we don't necessarily close any + // streams. While in case of second GoAway we close all streams created after + // the GoAwayId. This way streams that were in-flight while the GoAway from + // server was being sent don't get killed. + select { + case <-t.goAway: // t.goAway has been closed (i.e.,multiple GoAways). + // If there are multiple GoAways the first one should always have an ID greater than the following ones. + if id > t.prevGoAwayID { + t.mu.Unlock() + t.Close() + return + } + default: + t.setGoAwayReason(f) + close(t.goAway) + t.controlBuf.put(&incomingGoAway{}) + // Notify the clientconn about the GOAWAY before we set the state to + // draining, to allow the client to stop attempting to create streams + // before disallowing new streams on this connection. + t.onGoAway(t.goAwayReason) + t.state = draining + } + // All streams with IDs greater than the GoAwayId + // and smaller than the previous GoAway ID should be killed. + upperLimit := t.prevGoAwayID + if upperLimit == 0 { // This is the first GoAway Frame. + upperLimit = math.MaxUint32 // Kill all streams after the GoAway ID. + } + for streamID, stream := range t.activeStreams { + if streamID > id && streamID <= upperLimit { + // The stream was unprocessed by the server. + atomic.StoreUint32(&stream.unprocessed, 1) + t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) + } + } + t.prevGoAwayID = id + active := len(t.activeStreams) + t.mu.Unlock() + if active == 0 { + t.Close() + } +} + +// setGoAwayReason sets the value of t.goAwayReason based +// on the GoAway frame received. +// It expects a lock on transport's mutext to be held by +// the caller. +func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { + t.goAwayReason = GoAwayNoReason + switch f.ErrCode { + case http2.ErrCodeEnhanceYourCalm: + if string(f.DebugData()) == "too_many_pings" { + t.goAwayReason = GoAwayTooManyPings + } + } +} + +func (t *http2Client) GetGoAwayReason() GoAwayReason { + t.mu.Lock() + defer t.mu.Unlock() + return t.goAwayReason +} + +func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) { + t.controlBuf.put(&incomingWindowUpdate{ + streamID: f.Header().StreamID, + increment: f.Increment, + }) +} + +// operateHeaders takes action on the decoded headers. +func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { + s, ok := t.getStream(frame) + if !ok { + return + } + endStream := frame.StreamEnded() + atomic.StoreUint32(&s.bytesReceived, 1) + initialHeader := atomic.LoadUint32(&s.headerChanClosed) == 0 + + if !initialHeader && !endStream { + // As specified by gRPC over HTTP2, a HEADERS frame (and associated CONTINUATION frames) can only appear at the start or end of a stream. Therefore, second HEADERS frame must have EOS bit set. + st := status.New(codes.Internal, "a HEADERS frame cannot appear in the middle of a stream") + t.closeStream(s, st.Err(), true, http2.ErrCodeProtocol, st, nil, false) + return + } + + state := &decodeState{} + // Initialize isGRPC value to be !initialHeader, since if a gRPC Response-Headers has already been received, then it means that the peer is speaking gRPC and we are in gRPC mode. + state.data.isGRPC = !initialHeader + if err := state.decodeHeader(frame); err != nil { + t.closeStream(s, err, true, http2.ErrCodeProtocol, status.Convert(err), nil, endStream) + return + } + + isHeader := false + defer func() { + if t.statsHandler != nil { + if isHeader { + inHeader := &stats.InHeader{ + Client: true, + WireLength: int(frame.Header().Length), + } + t.statsHandler.HandleRPC(s.ctx, inHeader) + } else { + inTrailer := &stats.InTrailer{ + Client: true, + WireLength: int(frame.Header().Length), + } + t.statsHandler.HandleRPC(s.ctx, inTrailer) + } + } + }() + + // If headerChan hasn't been closed yet + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + if !endStream { + // HEADERS frame block carries a Response-Headers. + isHeader = true + // These values can be set without any synchronization because + // stream goroutine will read it only after seeing a closed + // headerChan which we'll close after setting this. + s.recvCompress = state.data.encoding + if len(state.data.mdata) > 0 { + s.header = state.data.mdata + } + } else { + // HEADERS frame block carries a Trailers-Only. + s.noHeaders = true + } + close(s.headerChan) + } + + if !endStream { + return + } + + // if client received END_STREAM from server while stream was still active, send RST_STREAM + rst := s.getState() == streamActive + t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, state.status(), state.data.mdata, true) +} + +// reader runs as a separate goroutine in charge of reading data from network +// connection. +// +// TODO(zhaoq): currently one reader per transport. Investigate whether this is +// optimal. +// TODO(zhaoq): Check the validity of the incoming frame sequence. +func (t *http2Client) reader() { + defer close(t.readerDone) + // Check the validity of server preface. + frame, err := t.framer.fr.ReadFrame() + if err != nil { + t.Close() // this kicks off resetTransport, so must be last before return + return + } + t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!) + if t.keepaliveEnabled { + atomic.CompareAndSwapUint32(&t.activity, 0, 1) + } + sf, ok := frame.(*http2.SettingsFrame) + if !ok { + t.Close() // this kicks off resetTransport, so must be last before return + return + } + t.onPrefaceReceipt() + t.handleSettings(sf, true) + + // loop to keep reading incoming messages on this transport. + for { + t.controlBuf.throttle() + frame, err := t.framer.fr.ReadFrame() + if t.keepaliveEnabled { + atomic.CompareAndSwapUint32(&t.activity, 0, 1) + } + if err != nil { + // Abort an active stream if the http2.Framer returns a + // http2.StreamError. This can happen only if the server's response + // is malformed http2. + if se, ok := err.(http2.StreamError); ok { + t.mu.Lock() + s := t.activeStreams[se.StreamID] + t.mu.Unlock() + if s != nil { + // use error detail to provide better err message + code := http2ErrConvTab[se.Code] + msg := t.framer.fr.ErrorDetail().Error() + t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false) + } + continue + } else { + // Transport error. + t.Close() + return + } + } + switch frame := frame.(type) { + case *http2.MetaHeadersFrame: + t.operateHeaders(frame) + case *http2.DataFrame: + t.handleData(frame) + case *http2.RSTStreamFrame: + t.handleRSTStream(frame) + case *http2.SettingsFrame: + t.handleSettings(frame, false) + case *http2.PingFrame: + t.handlePing(frame) + case *http2.GoAwayFrame: + t.handleGoAway(frame) + case *http2.WindowUpdateFrame: + t.handleWindowUpdate(frame) + default: + errorf("transport: http2Client.reader got unhandled frame type %v.", frame) + } + } +} + +// keepalive running in a separate goroutune makes sure the connection is alive by sending pings. +func (t *http2Client) keepalive() { + p := &ping{data: [8]byte{}} + timer := time.NewTimer(t.kp.Time) + for { + select { + case <-timer.C: + if atomic.CompareAndSwapUint32(&t.activity, 1, 0) { + timer.Reset(t.kp.Time) + continue + } + // Check if keepalive should go dormant. + t.mu.Lock() + if len(t.activeStreams) < 1 && !t.kp.PermitWithoutStream { + // Make awakenKeepalive writable. + <-t.awakenKeepalive + t.mu.Unlock() + select { + case <-t.awakenKeepalive: + // If the control gets here a ping has been sent + // need to reset the timer with keepalive.Timeout. + case <-t.ctx.Done(): + return + } + } else { + t.mu.Unlock() + if channelz.IsOn() { + atomic.AddInt64(&t.czData.kpCount, 1) + } + // Send ping. + t.controlBuf.put(p) + } + + // By the time control gets here a ping has been sent one way or the other. + timer.Reset(t.kp.Timeout) + select { + case <-timer.C: + if atomic.CompareAndSwapUint32(&t.activity, 1, 0) { + timer.Reset(t.kp.Time) + continue + } + infof("transport: closing client transport due to idleness.") + t.Close() + return + case <-t.ctx.Done(): + if !timer.Stop() { + <-timer.C + } + return + } + case <-t.ctx.Done(): + if !timer.Stop() { + <-timer.C + } + return + } + } +} + +func (t *http2Client) Error() <-chan struct{} { + return t.ctx.Done() +} + +func (t *http2Client) GoAway() <-chan struct{} { + return t.goAway +} + +func (t *http2Client) ChannelzMetric() *channelz.SocketInternalMetric { + s := channelz.SocketInternalMetric{ + StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted), + StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded), + StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed), + MessagesSent: atomic.LoadInt64(&t.czData.msgSent), + MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv), + KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount), + LastLocalStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)), + LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)), + LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), + LocalFlowControlWindow: int64(t.fc.getSize()), + SocketOptions: channelz.GetSocketOption(t.conn), + LocalAddr: t.localAddr, + RemoteAddr: t.remoteAddr, + // RemoteName : + } + if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok { + s.Security = au.GetSecurityValue() + } + s.RemoteFlowControlWindow = t.getOutFlowWindow() + return &s +} + +func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr } + +func (t *http2Client) IncrMsgSent() { + atomic.AddInt64(&t.czData.msgSent, 1) + atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano()) +} + +func (t *http2Client) IncrMsgRecv() { + atomic.AddInt64(&t.czData.msgRecv, 1) + atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano()) +} + +func (t *http2Client) getOutFlowWindow() int64 { + resp := make(chan uint32, 1) + timer := time.NewTimer(time.Second) + defer timer.Stop() + t.controlBuf.put(&outFlowControlSizeRequest{resp}) + select { + case sz := <-resp: + return int64(sz) + case <-t.ctxDone: + return -1 + case <-timer.C: + return -2 + } +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go new file mode 100644 index 0000000000000..83439b5627d9b --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -0,0 +1,1220 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "math" + "net" + "strconv" + "sync" + "sync/atomic" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + + spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/tap" +) + +var ( + // ErrIllegalHeaderWrite indicates that setting header is illegal because of + // the stream's state. + ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called") + // ErrHeaderListSizeLimitViolation indicates that the header list size is larger + // than the limit set by peer. + ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer") + // statusRawProto is a function to get to the raw status proto wrapped in a + // status.Status without a proto.Clone(). + statusRawProto = internal.StatusRawProto.(func(*status.Status) *spb.Status) +) + +// http2Server implements the ServerTransport interface with HTTP2. +type http2Server struct { + ctx context.Context + ctxDone <-chan struct{} // Cache the context.Done() chan + cancel context.CancelFunc + conn net.Conn + loopy *loopyWriter + readerDone chan struct{} // sync point to enable testing. + writerDone chan struct{} // sync point to enable testing. + remoteAddr net.Addr + localAddr net.Addr + maxStreamID uint32 // max stream ID ever seen + authInfo credentials.AuthInfo // auth info about the connection + inTapHandle tap.ServerInHandle + framer *framer + // The max number of concurrent streams. + maxStreams uint32 + // controlBuf delivers all the control related tasks (e.g., window + // updates, reset streams, and various settings) to the controller. + controlBuf *controlBuffer + fc *trInFlow + stats stats.Handler + // Flag to keep track of reading activity on transport. + // 1 is true and 0 is false. + activity uint32 // Accessed atomically. + // Keepalive and max-age parameters for the server. + kp keepalive.ServerParameters + + // Keepalive enforcement policy. + kep keepalive.EnforcementPolicy + // The time instance last ping was received. + lastPingAt time.Time + // Number of times the client has violated keepalive ping policy so far. + pingStrikes uint8 + // Flag to signify that number of ping strikes should be reset to 0. + // This is set whenever data or header frames are sent. + // 1 means yes. + resetPingStrikes uint32 // Accessed atomically. + initialWindowSize int32 + bdpEst *bdpEstimator + maxSendHeaderListSize *uint32 + + mu sync.Mutex // guard the following + + // drainChan is initialized when drain(...) is called the first time. + // After which the server writes out the first GoAway(with ID 2^31-1) frame. + // Then an independent goroutine will be launched to later send the second GoAway. + // During this time we don't want to write another first GoAway(with ID 2^31 -1) frame. + // Thus call to drain(...) will be a no-op if drainChan is already initialized since draining is + // already underway. + drainChan chan struct{} + state transportState + activeStreams map[uint32]*Stream + // idle is the time instant when the connection went idle. + // This is either the beginning of the connection or when the number of + // RPCs go down to 0. + // When the connection is busy, this value is set to 0. + idle time.Time + + // Fields below are for channelz metric collection. + channelzID int64 // channelz unique identification number + czData *channelzData + bufferPool *bufferPool +} + +// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is +// returned if something goes wrong. +func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) { + writeBufSize := config.WriteBufferSize + readBufSize := config.ReadBufferSize + maxHeaderListSize := defaultServerMaxHeaderListSize + if config.MaxHeaderListSize != nil { + maxHeaderListSize = *config.MaxHeaderListSize + } + framer := newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize) + // Send initial settings as connection preface to client. + var isettings []http2.Setting + // TODO(zhaoq): Have a better way to signal "no limit" because 0 is + // permitted in the HTTP2 spec. + maxStreams := config.MaxStreams + if maxStreams == 0 { + maxStreams = math.MaxUint32 + } else { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingMaxConcurrentStreams, + Val: maxStreams, + }) + } + dynamicWindow := true + iwz := int32(initialWindowSize) + if config.InitialWindowSize >= defaultWindowSize { + iwz = config.InitialWindowSize + dynamicWindow = false + } + icwz := int32(initialWindowSize) + if config.InitialConnWindowSize >= defaultWindowSize { + icwz = config.InitialConnWindowSize + dynamicWindow = false + } + if iwz != defaultWindowSize { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingInitialWindowSize, + Val: uint32(iwz)}) + } + if config.MaxHeaderListSize != nil { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingMaxHeaderListSize, + Val: *config.MaxHeaderListSize, + }) + } + if err := framer.fr.WriteSettings(isettings...); err != nil { + return nil, connectionErrorf(false, err, "transport: %v", err) + } + // Adjust the connection flow control window if needed. + if delta := uint32(icwz - defaultWindowSize); delta > 0 { + if err := framer.fr.WriteWindowUpdate(0, delta); err != nil { + return nil, connectionErrorf(false, err, "transport: %v", err) + } + } + kp := config.KeepaliveParams + if kp.MaxConnectionIdle == 0 { + kp.MaxConnectionIdle = defaultMaxConnectionIdle + } + if kp.MaxConnectionAge == 0 { + kp.MaxConnectionAge = defaultMaxConnectionAge + } + // Add a jitter to MaxConnectionAge. + kp.MaxConnectionAge += getJitter(kp.MaxConnectionAge) + if kp.MaxConnectionAgeGrace == 0 { + kp.MaxConnectionAgeGrace = defaultMaxConnectionAgeGrace + } + if kp.Time == 0 { + kp.Time = defaultServerKeepaliveTime + } + if kp.Timeout == 0 { + kp.Timeout = defaultServerKeepaliveTimeout + } + kep := config.KeepalivePolicy + if kep.MinTime == 0 { + kep.MinTime = defaultKeepalivePolicyMinTime + } + ctx, cancel := context.WithCancel(context.Background()) + t := &http2Server{ + ctx: ctx, + cancel: cancel, + ctxDone: ctx.Done(), + conn: conn, + remoteAddr: conn.RemoteAddr(), + localAddr: conn.LocalAddr(), + authInfo: config.AuthInfo, + framer: framer, + readerDone: make(chan struct{}), + writerDone: make(chan struct{}), + maxStreams: maxStreams, + inTapHandle: config.InTapHandle, + fc: &trInFlow{limit: uint32(icwz)}, + state: reachable, + activeStreams: make(map[uint32]*Stream), + stats: config.StatsHandler, + kp: kp, + idle: time.Now(), + kep: kep, + initialWindowSize: iwz, + czData: new(channelzData), + bufferPool: newBufferPool(), + } + t.controlBuf = newControlBuffer(t.ctxDone) + if dynamicWindow { + t.bdpEst = &bdpEstimator{ + bdp: initialWindowSize, + updateFlowControl: t.updateFlowControl, + } + } + if t.stats != nil { + t.ctx = t.stats.TagConn(t.ctx, &stats.ConnTagInfo{ + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + }) + connBegin := &stats.ConnBegin{} + t.stats.HandleConn(t.ctx, connBegin) + } + if channelz.IsOn() { + t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) + } + t.framer.writer.Flush() + + defer func() { + if err != nil { + t.Close() + } + }() + + // Check the validity of client preface. + preface := make([]byte, len(clientPreface)) + if _, err := io.ReadFull(t.conn, preface); err != nil { + return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) + } + if !bytes.Equal(preface, clientPreface) { + return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams received bogus greeting from client: %q", preface) + } + + frame, err := t.framer.fr.ReadFrame() + if err == io.EOF || err == io.ErrUnexpectedEOF { + return nil, err + } + if err != nil { + return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to read initial settings frame: %v", err) + } + atomic.StoreUint32(&t.activity, 1) + sf, ok := frame.(*http2.SettingsFrame) + if !ok { + return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams saw invalid preface type %T from client", frame) + } + t.handleSettings(sf) + + go func() { + t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst) + t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler + if err := t.loopy.run(); err != nil { + errorf("transport: loopyWriter.run returning. Err: %v", err) + } + t.conn.Close() + close(t.writerDone) + }() + go t.keepalive() + return t, nil +} + +// operateHeader takes action on the decoded headers. +func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) { + streamID := frame.Header().StreamID + state := &decodeState{ + serverSide: true, + } + if err := state.decodeHeader(frame); err != nil { + if se, ok := status.FromError(err); ok { + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: statusCodeConvTab[se.Code()], + onWrite: func() {}, + }) + } + return false + } + + buf := newRecvBuffer() + s := &Stream{ + id: streamID, + st: t, + buf: buf, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, + recvCompress: state.data.encoding, + method: state.data.method, + contentSubtype: state.data.contentSubtype, + } + if frame.StreamEnded() { + // s is just created by the caller. No lock needed. + s.state = streamReadDone + } + if state.data.timeoutSet { + s.ctx, s.cancel = context.WithTimeout(t.ctx, state.data.timeout) + } else { + s.ctx, s.cancel = context.WithCancel(t.ctx) + } + pr := &peer.Peer{ + Addr: t.remoteAddr, + } + // Attach Auth info if there is any. + if t.authInfo != nil { + pr.AuthInfo = t.authInfo + } + s.ctx = peer.NewContext(s.ctx, pr) + // Attach the received metadata to the context. + if len(state.data.mdata) > 0 { + s.ctx = metadata.NewIncomingContext(s.ctx, state.data.mdata) + } + if state.data.statsTags != nil { + s.ctx = stats.SetIncomingTags(s.ctx, state.data.statsTags) + } + if state.data.statsTrace != nil { + s.ctx = stats.SetIncomingTrace(s.ctx, state.data.statsTrace) + } + if t.inTapHandle != nil { + var err error + info := &tap.Info{ + FullMethodName: state.data.method, + } + s.ctx, err = t.inTapHandle(s.ctx, info) + if err != nil { + warningf("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) + t.controlBuf.put(&cleanupStream{ + streamID: s.id, + rst: true, + rstCode: http2.ErrCodeRefusedStream, + onWrite: func() {}, + }) + return false + } + } + t.mu.Lock() + if t.state != reachable { + t.mu.Unlock() + return false + } + if uint32(len(t.activeStreams)) >= t.maxStreams { + t.mu.Unlock() + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: http2.ErrCodeRefusedStream, + onWrite: func() {}, + }) + return false + } + if streamID%2 != 1 || streamID <= t.maxStreamID { + t.mu.Unlock() + // illegal gRPC stream id. + errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) + return true + } + t.maxStreamID = streamID + t.activeStreams[streamID] = s + if len(t.activeStreams) == 1 { + t.idle = time.Time{} + } + t.mu.Unlock() + if channelz.IsOn() { + atomic.AddInt64(&t.czData.streamsStarted, 1) + atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) + } + s.requestRead = func(n int) { + t.adjustWindow(s, uint32(n)) + } + s.ctx = traceCtx(s.ctx, s.method) + if t.stats != nil { + s.ctx = t.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) + inHeader := &stats.InHeader{ + FullMethod: s.method, + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + Compression: s.recvCompress, + WireLength: int(frame.Header().Length), + } + t.stats.HandleRPC(s.ctx, inHeader) + } + s.ctxDone = s.ctx.Done() + s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) + s.trReader = &transportReader{ + reader: &recvBufferReader{ + ctx: s.ctx, + ctxDone: s.ctxDone, + recv: s.buf, + freeBuffer: t.bufferPool.put, + }, + windowHandler: func(n int) { + t.updateWindow(s, uint32(n)) + }, + } + // Register the stream with loopy. + t.controlBuf.put(®isterStream{ + streamID: s.id, + wq: s.wq, + }) + handle(s) + return false +} + +// HandleStreams receives incoming streams using the given handler. This is +// typically run in a separate goroutine. +// traceCtx attaches trace to ctx and returns the new context. +func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) { + defer close(t.readerDone) + for { + t.controlBuf.throttle() + frame, err := t.framer.fr.ReadFrame() + atomic.StoreUint32(&t.activity, 1) + if err != nil { + if se, ok := err.(http2.StreamError); ok { + warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se) + t.mu.Lock() + s := t.activeStreams[se.StreamID] + t.mu.Unlock() + if s != nil { + t.closeStream(s, true, se.Code, false) + } else { + t.controlBuf.put(&cleanupStream{ + streamID: se.StreamID, + rst: true, + rstCode: se.Code, + onWrite: func() {}, + }) + } + continue + } + if err == io.EOF || err == io.ErrUnexpectedEOF { + t.Close() + return + } + warningf("transport: http2Server.HandleStreams failed to read frame: %v", err) + t.Close() + return + } + switch frame := frame.(type) { + case *http2.MetaHeadersFrame: + if t.operateHeaders(frame, handle, traceCtx) { + t.Close() + break + } + case *http2.DataFrame: + t.handleData(frame) + case *http2.RSTStreamFrame: + t.handleRSTStream(frame) + case *http2.SettingsFrame: + t.handleSettings(frame) + case *http2.PingFrame: + t.handlePing(frame) + case *http2.WindowUpdateFrame: + t.handleWindowUpdate(frame) + case *http2.GoAwayFrame: + // TODO: Handle GoAway from the client appropriately. + default: + errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) + } + } +} + +func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { + t.mu.Lock() + defer t.mu.Unlock() + if t.activeStreams == nil { + // The transport is closing. + return nil, false + } + s, ok := t.activeStreams[f.Header().StreamID] + if !ok { + // The stream is already done. + return nil, false + } + return s, true +} + +// adjustWindow sends out extra window update over the initial window size +// of stream if the application is requesting data larger in size than +// the window. +func (t *http2Server) adjustWindow(s *Stream, n uint32) { + if w := s.fc.maybeAdjust(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) + } + +} + +// updateWindow adjusts the inbound quota for the stream and the transport. +// Window updates will deliver to the controller for sending when +// the cumulative quota exceeds the corresponding threshold. +func (t *http2Server) updateWindow(s *Stream, n uint32) { + if w := s.fc.onRead(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, + increment: w, + }) + } +} + +// updateFlowControl updates the incoming flow control windows +// for the transport and the stream based on the current bdp +// estimation. +func (t *http2Server) updateFlowControl(n uint32) { + t.mu.Lock() + for _, s := range t.activeStreams { + s.fc.newLimit(n) + } + t.initialWindowSize = int32(n) + t.mu.Unlock() + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: t.fc.newLimit(n), + }) + t.controlBuf.put(&outgoingSettings{ + ss: []http2.Setting{ + { + ID: http2.SettingInitialWindowSize, + Val: n, + }, + }, + }) + +} + +func (t *http2Server) handleData(f *http2.DataFrame) { + size := f.Header().Length + var sendBDPPing bool + if t.bdpEst != nil { + sendBDPPing = t.bdpEst.add(size) + } + // Decouple connection's flow control from application's read. + // An update on connection's flow control should not depend on + // whether user application has read the data or not. Such a + // restriction is already imposed on the stream's flow control, + // and therefore the sender will be blocked anyways. + // Decoupling the connection flow control will prevent other + // active(fast) streams from starving in presence of slow or + // inactive streams. + if w := t.fc.onData(size); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + if sendBDPPing { + // Avoid excessive ping detection (e.g. in an L7 proxy) + // by sending a window update prior to the BDP ping. + if w := t.fc.reset(); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + t.controlBuf.put(bdpPing) + } + // Select the right stream to dispatch. + s, ok := t.getStream(f) + if !ok { + return + } + if size > 0 { + if err := s.fc.onData(size); err != nil { + t.closeStream(s, true, http2.ErrCodeFlowControl, false) + return + } + if f.Header().Flags.Has(http2.FlagDataPadded) { + if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{s.id, w}) + } + } + // TODO(bradfitz, zhaoq): A copy is required here because there is no + // guarantee f.Data() is consumed before the arrival of next frame. + // Can this copy be eliminated? + if len(f.Data()) > 0 { + buffer := t.bufferPool.get() + buffer.Reset() + buffer.Write(f.Data()) + s.write(recvMsg{buffer: buffer}) + } + } + if f.Header().Flags.Has(http2.FlagDataEndStream) { + // Received the end of stream from the client. + s.compareAndSwapState(streamActive, streamReadDone) + s.write(recvMsg{err: io.EOF}) + } +} + +func (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) { + // If the stream is not deleted from the transport's active streams map, then do a regular close stream. + if s, ok := t.getStream(f); ok { + t.closeStream(s, false, 0, false) + return + } + // If the stream is already deleted from the active streams map, then put a cleanupStream item into controlbuf to delete the stream from loopy writer's established streams map. + t.controlBuf.put(&cleanupStream{ + streamID: f.Header().StreamID, + rst: false, + rstCode: 0, + onWrite: func() {}, + }) +} + +func (t *http2Server) handleSettings(f *http2.SettingsFrame) { + if f.IsAck() { + return + } + var ss []http2.Setting + var updateFuncs []func() + f.ForeachSetting(func(s http2.Setting) error { + switch s.ID { + case http2.SettingMaxHeaderListSize: + updateFuncs = append(updateFuncs, func() { + t.maxSendHeaderListSize = new(uint32) + *t.maxSendHeaderListSize = s.Val + }) + default: + ss = append(ss, s) + } + return nil + }) + t.controlBuf.executeAndPut(func(interface{}) bool { + for _, f := range updateFuncs { + f() + } + return true + }, &incomingSettings{ + ss: ss, + }) +} + +const ( + maxPingStrikes = 2 + defaultPingTimeout = 2 * time.Hour +) + +func (t *http2Server) handlePing(f *http2.PingFrame) { + if f.IsAck() { + if f.Data == goAwayPing.data && t.drainChan != nil { + close(t.drainChan) + return + } + // Maybe it's a BDP ping. + if t.bdpEst != nil { + t.bdpEst.calculate(f.Data) + } + return + } + pingAck := &ping{ack: true} + copy(pingAck.data[:], f.Data[:]) + t.controlBuf.put(pingAck) + + now := time.Now() + defer func() { + t.lastPingAt = now + }() + // A reset ping strikes means that we don't need to check for policy + // violation for this ping and the pingStrikes counter should be set + // to 0. + if atomic.CompareAndSwapUint32(&t.resetPingStrikes, 1, 0) { + t.pingStrikes = 0 + return + } + t.mu.Lock() + ns := len(t.activeStreams) + t.mu.Unlock() + if ns < 1 && !t.kep.PermitWithoutStream { + // Keepalive shouldn't be active thus, this new ping should + // have come after at least defaultPingTimeout. + if t.lastPingAt.Add(defaultPingTimeout).After(now) { + t.pingStrikes++ + } + } else { + // Check if keepalive policy is respected. + if t.lastPingAt.Add(t.kep.MinTime).After(now) { + t.pingStrikes++ + } + } + + if t.pingStrikes > maxPingStrikes { + // Send goaway and close the connection. + errorf("transport: Got too many pings from the client, closing the connection.") + t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true}) + } +} + +func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) { + t.controlBuf.put(&incomingWindowUpdate{ + streamID: f.Header().StreamID, + increment: f.Increment, + }) +} + +func appendHeaderFieldsFromMD(headerFields []hpack.HeaderField, md metadata.MD) []hpack.HeaderField { + for k, vv := range md { + if isReservedHeader(k) { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + return headerFields +} + +func (t *http2Server) checkForHeaderListSize(it interface{}) bool { + if t.maxSendHeaderListSize == nil { + return true + } + hdrFrame := it.(*headerFrame) + var sz int64 + for _, f := range hdrFrame.hf { + if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { + errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize) + return false + } + } + return true +} + +// WriteHeader sends the header metedata md back to the client. +func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { + if s.updateHeaderSent() || s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + if md.Len() > 0 { + if s.header.Len() > 0 { + s.header = metadata.Join(s.header, md) + } else { + s.header = md + } + } + if err := t.writeHeaderLocked(s); err != nil { + s.hdrMu.Unlock() + return err + } + s.hdrMu.Unlock() + return nil +} + +func (t *http2Server) setResetPingStrikes() { + atomic.StoreUint32(&t.resetPingStrikes, 1) +} + +func (t *http2Server) writeHeaderLocked(s *Stream) error { + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else. + headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)}) + if s.sendCompress != "" { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) + } + headerFields = appendHeaderFieldsFromMD(headerFields, s.header) + success, err := t.controlBuf.executeAndPut(t.checkForHeaderListSize, &headerFrame{ + streamID: s.id, + hf: headerFields, + endStream: false, + onWrite: t.setResetPingStrikes, + }) + if !success { + if err != nil { + return err + } + t.closeStream(s, true, http2.ErrCodeInternal, false) + return ErrHeaderListSizeLimitViolation + } + if t.stats != nil { + // Note: WireLength is not set in outHeader. + // TODO(mmukhi): Revisit this later, if needed. + outHeader := &stats.OutHeader{} + t.stats.HandleRPC(s.Context(), outHeader) + } + return nil +} + +// WriteStatus sends stream status to the client and terminates the stream. +// There is no further I/O operations being able to perform on this stream. +// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early +// OK is adopted. +func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { + if s.getState() == streamDone { + return nil + } + s.hdrMu.Lock() + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else. + if !s.updateHeaderSent() { // No headers have been sent. + if len(s.header) > 0 { // Send a separate header frame. + if err := t.writeHeaderLocked(s); err != nil { + s.hdrMu.Unlock() + return err + } + } else { // Send a trailer only response. + headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)}) + } + } + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) + + if p := statusRawProto(st); p != nil && len(p.Details) > 0 { + stBytes, err := proto.Marshal(p) + if err != nil { + // TODO: return error instead, when callers are able to handle it. + grpclog.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err) + } else { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) + } + } + + // Attach the trailer metadata. + headerFields = appendHeaderFieldsFromMD(headerFields, s.trailer) + trailingHeader := &headerFrame{ + streamID: s.id, + hf: headerFields, + endStream: true, + onWrite: t.setResetPingStrikes, + } + s.hdrMu.Unlock() + success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader) + if !success { + if err != nil { + return err + } + t.closeStream(s, true, http2.ErrCodeInternal, false) + return ErrHeaderListSizeLimitViolation + } + // Send a RST_STREAM after the trailers if the client has not already half-closed. + rst := s.getState() == streamActive + t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true) + if t.stats != nil { + t.stats.HandleRPC(s.Context(), &stats.OutTrailer{}) + } + return nil +} + +// Write converts the data into HTTP2 data frame and sends it out. Non-nil error +// is returns if it fails (e.g., framing error, transport error). +func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { + if !s.isHeaderSent() { // Headers haven't been written yet. + if err := t.WriteHeader(s, nil); err != nil { + if _, ok := err.(ConnectionError); ok { + return err + } + // TODO(mmukhi, dfawley): Make sure this is the right code to return. + return status.Errorf(codes.Internal, "transport: %v", err) + } + } else { + // Writing headers checks for this condition. + if s.getState() == streamDone { + // TODO(mmukhi, dfawley): Should the server write also return io.EOF? + s.cancel() + select { + case <-t.ctx.Done(): + return ErrConnClosing + default: + } + return ContextErr(s.ctx.Err()) + } + } + // Add some data to header frame so that we can equally distribute bytes across frames. + emptyLen := http2MaxFrameLen - len(hdr) + if emptyLen > len(data) { + emptyLen = len(data) + } + hdr = append(hdr, data[:emptyLen]...) + data = data[emptyLen:] + df := &dataFrame{ + streamID: s.id, + h: hdr, + d: data, + onEachWrite: t.setResetPingStrikes, + } + if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { + select { + case <-t.ctx.Done(): + return ErrConnClosing + default: + } + return ContextErr(s.ctx.Err()) + } + return t.controlBuf.put(df) +} + +// keepalive running in a separate goroutine does the following: +// 1. Gracefully closes an idle connection after a duration of keepalive.MaxConnectionIdle. +// 2. Gracefully closes any connection after a duration of keepalive.MaxConnectionAge. +// 3. Forcibly closes a connection after an additive period of keepalive.MaxConnectionAgeGrace over keepalive.MaxConnectionAge. +// 4. Makes sure a connection is alive by sending pings with a frequency of keepalive.Time and closes a non-responsive connection +// after an additional duration of keepalive.Timeout. +func (t *http2Server) keepalive() { + p := &ping{} + var pingSent bool + maxIdle := time.NewTimer(t.kp.MaxConnectionIdle) + maxAge := time.NewTimer(t.kp.MaxConnectionAge) + keepalive := time.NewTimer(t.kp.Time) + // NOTE: All exit paths of this function should reset their + // respective timers. A failure to do so will cause the + // following clean-up to deadlock and eventually leak. + defer func() { + if !maxIdle.Stop() { + <-maxIdle.C + } + if !maxAge.Stop() { + <-maxAge.C + } + if !keepalive.Stop() { + <-keepalive.C + } + }() + for { + select { + case <-maxIdle.C: + t.mu.Lock() + idle := t.idle + if idle.IsZero() { // The connection is non-idle. + t.mu.Unlock() + maxIdle.Reset(t.kp.MaxConnectionIdle) + continue + } + val := t.kp.MaxConnectionIdle - time.Since(idle) + t.mu.Unlock() + if val <= 0 { + // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more. + // Gracefully close the connection. + t.drain(http2.ErrCodeNo, []byte{}) + // Resetting the timer so that the clean-up doesn't deadlock. + maxIdle.Reset(infinity) + return + } + maxIdle.Reset(val) + case <-maxAge.C: + t.drain(http2.ErrCodeNo, []byte{}) + maxAge.Reset(t.kp.MaxConnectionAgeGrace) + select { + case <-maxAge.C: + // Close the connection after grace period. + infof("transport: closing server transport due to maximum connection age.") + t.Close() + // Resetting the timer so that the clean-up doesn't deadlock. + maxAge.Reset(infinity) + case <-t.ctx.Done(): + } + return + case <-keepalive.C: + if atomic.CompareAndSwapUint32(&t.activity, 1, 0) { + pingSent = false + keepalive.Reset(t.kp.Time) + continue + } + if pingSent { + infof("transport: closing server transport due to idleness.") + t.Close() + // Resetting the timer so that the clean-up doesn't deadlock. + keepalive.Reset(infinity) + return + } + pingSent = true + if channelz.IsOn() { + atomic.AddInt64(&t.czData.kpCount, 1) + } + t.controlBuf.put(p) + keepalive.Reset(t.kp.Timeout) + case <-t.ctx.Done(): + return + } + } +} + +// Close starts shutting down the http2Server transport. +// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This +// could cause some resource issue. Revisit this later. +func (t *http2Server) Close() error { + t.mu.Lock() + if t.state == closing { + t.mu.Unlock() + return errors.New("transport: Close() was already called") + } + t.state = closing + streams := t.activeStreams + t.activeStreams = nil + t.mu.Unlock() + t.controlBuf.finish() + t.cancel() + err := t.conn.Close() + if channelz.IsOn() { + channelz.RemoveEntry(t.channelzID) + } + // Cancel all active streams. + for _, s := range streams { + s.cancel() + } + if t.stats != nil { + connEnd := &stats.ConnEnd{} + t.stats.HandleConn(t.ctx, connEnd) + } + return err +} + +// deleteStream deletes the stream s from transport's active streams. +func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() + + t.mu.Lock() + if _, ok := t.activeStreams[s.id]; ok { + delete(t.activeStreams, s.id) + if len(t.activeStreams) == 0 { + t.idle = time.Now() + } + } + t.mu.Unlock() + + if channelz.IsOn() { + if eosReceived { + atomic.AddInt64(&t.czData.streamsSucceeded, 1) + } else { + atomic.AddInt64(&t.czData.streamsFailed, 1) + } + } +} + +// finishStream closes the stream and puts the trailing headerFrame into controlbuf. +func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { + oldState := s.swapState(streamDone) + if oldState == streamDone { + // If the stream was already done, return. + return + } + + hdr.cleanup = &cleanupStream{ + streamID: s.id, + rst: rst, + rstCode: rstCode, + onWrite: func() { + t.deleteStream(s, eosReceived) + }, + } + t.controlBuf.put(hdr) +} + +// closeStream clears the footprint of a stream when the stream is not needed any more. +func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) { + s.swapState(streamDone) + t.deleteStream(s, eosReceived) + + t.controlBuf.put(&cleanupStream{ + streamID: s.id, + rst: rst, + rstCode: rstCode, + onWrite: func() {}, + }) +} + +func (t *http2Server) RemoteAddr() net.Addr { + return t.remoteAddr +} + +func (t *http2Server) Drain() { + t.drain(http2.ErrCodeNo, []byte{}) +} + +func (t *http2Server) drain(code http2.ErrCode, debugData []byte) { + t.mu.Lock() + defer t.mu.Unlock() + if t.drainChan != nil { + return + } + t.drainChan = make(chan struct{}) + t.controlBuf.put(&goAway{code: code, debugData: debugData, headsUp: true}) +} + +var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} + +// Handles outgoing GoAway and returns true if loopy needs to put itself +// in draining mode. +func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { + t.mu.Lock() + if t.state == closing { // TODO(mmukhi): This seems unnecessary. + t.mu.Unlock() + // The transport is closing. + return false, ErrConnClosing + } + sid := t.maxStreamID + if !g.headsUp { + // Stop accepting more streams now. + t.state = draining + if len(t.activeStreams) == 0 { + g.closeConn = true + } + t.mu.Unlock() + if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil { + return false, err + } + if g.closeConn { + // Abruptly close the connection following the GoAway (via + // loopywriter). But flush out what's inside the buffer first. + t.framer.writer.Flush() + return false, fmt.Errorf("transport: Connection closing") + } + return true, nil + } + t.mu.Unlock() + // For a graceful close, send out a GoAway with stream ID of MaxUInt32, + // Follow that with a ping and wait for the ack to come back or a timer + // to expire. During this time accept new streams since they might have + // originated before the GoAway reaches the client. + // After getting the ack or timer expiration send out another GoAway this + // time with an ID of the max stream server intends to process. + if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil { + return false, err + } + if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil { + return false, err + } + go func() { + timer := time.NewTimer(time.Minute) + defer timer.Stop() + select { + case <-t.drainChan: + case <-timer.C: + case <-t.ctx.Done(): + return + } + t.controlBuf.put(&goAway{code: g.code, debugData: g.debugData}) + }() + return false, nil +} + +func (t *http2Server) ChannelzMetric() *channelz.SocketInternalMetric { + s := channelz.SocketInternalMetric{ + StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted), + StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded), + StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed), + MessagesSent: atomic.LoadInt64(&t.czData.msgSent), + MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv), + KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount), + LastRemoteStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)), + LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)), + LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), + LocalFlowControlWindow: int64(t.fc.getSize()), + SocketOptions: channelz.GetSocketOption(t.conn), + LocalAddr: t.localAddr, + RemoteAddr: t.remoteAddr, + // RemoteName : + } + if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok { + s.Security = au.GetSecurityValue() + } + s.RemoteFlowControlWindow = t.getOutFlowWindow() + return &s +} + +func (t *http2Server) IncrMsgSent() { + atomic.AddInt64(&t.czData.msgSent, 1) + atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano()) +} + +func (t *http2Server) IncrMsgRecv() { + atomic.AddInt64(&t.czData.msgRecv, 1) + atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano()) +} + +func (t *http2Server) getOutFlowWindow() int64 { + resp := make(chan uint32, 1) + timer := time.NewTimer(time.Second) + defer timer.Stop() + t.controlBuf.put(&outFlowControlSizeRequest{resp}) + select { + case sz := <-resp: + return int64(sz) + case <-t.ctxDone: + return -1 + case <-timer.C: + return -2 + } +} + +func getJitter(v time.Duration) time.Duration { + if v == infinity { + return 0 + } + // Generate a jitter between +/- 10% of the value. + r := int64(v / 10) + j := grpcrand.Int63n(2*r) - r + return time.Duration(j) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go new file mode 100644 index 0000000000000..9d212867ce2e5 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -0,0 +1,676 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bufio" + "bytes" + "encoding/base64" + "fmt" + "io" + "math" + "net" + "net/http" + "strconv" + "strings" + "time" + "unicode/utf8" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +const ( + // http2MaxFrameLen specifies the max length of a HTTP2 frame. + http2MaxFrameLen = 16384 // 16KB frame + // http://http2.github.io/http2-spec/#SettingValues + http2InitHeaderTableSize = 4096 + // baseContentType is the base content-type for gRPC. This is a valid + // content-type on it's own, but can also include a content-subtype such as + // "proto" as a suffix after "+" or ";". See + // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests + // for more details. + baseContentType = "application/grpc" +) + +var ( + clientPreface = []byte(http2.ClientPreface) + http2ErrConvTab = map[http2.ErrCode]codes.Code{ + http2.ErrCodeNo: codes.Internal, + http2.ErrCodeProtocol: codes.Internal, + http2.ErrCodeInternal: codes.Internal, + http2.ErrCodeFlowControl: codes.ResourceExhausted, + http2.ErrCodeSettingsTimeout: codes.Internal, + http2.ErrCodeStreamClosed: codes.Internal, + http2.ErrCodeFrameSize: codes.Internal, + http2.ErrCodeRefusedStream: codes.Unavailable, + http2.ErrCodeCancel: codes.Canceled, + http2.ErrCodeCompression: codes.Internal, + http2.ErrCodeConnect: codes.Internal, + http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted, + http2.ErrCodeInadequateSecurity: codes.PermissionDenied, + http2.ErrCodeHTTP11Required: codes.Internal, + } + statusCodeConvTab = map[codes.Code]http2.ErrCode{ + codes.Internal: http2.ErrCodeInternal, + codes.Canceled: http2.ErrCodeCancel, + codes.Unavailable: http2.ErrCodeRefusedStream, + codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm, + codes.PermissionDenied: http2.ErrCodeInadequateSecurity, + } + // HTTPStatusConvTab is the HTTP status code to gRPC error code conversion table. + HTTPStatusConvTab = map[int]codes.Code{ + // 400 Bad Request - INTERNAL. + http.StatusBadRequest: codes.Internal, + // 401 Unauthorized - UNAUTHENTICATED. + http.StatusUnauthorized: codes.Unauthenticated, + // 403 Forbidden - PERMISSION_DENIED. + http.StatusForbidden: codes.PermissionDenied, + // 404 Not Found - UNIMPLEMENTED. + http.StatusNotFound: codes.Unimplemented, + // 429 Too Many Requests - UNAVAILABLE. + http.StatusTooManyRequests: codes.Unavailable, + // 502 Bad Gateway - UNAVAILABLE. + http.StatusBadGateway: codes.Unavailable, + // 503 Service Unavailable - UNAVAILABLE. + http.StatusServiceUnavailable: codes.Unavailable, + // 504 Gateway timeout - UNAVAILABLE. + http.StatusGatewayTimeout: codes.Unavailable, + } +) + +type parsedHeaderData struct { + encoding string + // statusGen caches the stream status received from the trailer the server + // sent. Client side only. Do not access directly. After all trailers are + // parsed, use the status method to retrieve the status. + statusGen *status.Status + // rawStatusCode and rawStatusMsg are set from the raw trailer fields and are not + // intended for direct access outside of parsing. + rawStatusCode *int + rawStatusMsg string + httpStatus *int + // Server side only fields. + timeoutSet bool + timeout time.Duration + method string + // key-value metadata map from the peer. + mdata map[string][]string + statsTags []byte + statsTrace []byte + contentSubtype string + + // isGRPC field indicates whether the peer is speaking gRPC (otherwise HTTP). + // + // We are in gRPC mode (peer speaking gRPC) if: + // * We are client side and have already received a HEADER frame that indicates gRPC peer. + // * The header contains valid a content-type, i.e. a string starts with "application/grpc" + // And we should handle error specific to gRPC. + // + // Otherwise (i.e. a content-type string starts without "application/grpc", or does not exist), we + // are in HTTP fallback mode, and should handle error specific to HTTP. + isGRPC bool + grpcErr error + httpErr error + contentTypeErr string +} + +// decodeState configures decoding criteria and records the decoded data. +type decodeState struct { + // whether decoding on server side or not + serverSide bool + + // Records the states during HPACK decoding. It will be filled with info parsed from HTTP HEADERS + // frame once decodeHeader function has been invoked and returned. + data parsedHeaderData +} + +// isReservedHeader checks whether hdr belongs to HTTP2 headers +// reserved by gRPC protocol. Any other headers are classified as the +// user-specified metadata. +func isReservedHeader(hdr string) bool { + if hdr != "" && hdr[0] == ':' { + return true + } + switch hdr { + case "content-type", + "user-agent", + "grpc-message-type", + "grpc-encoding", + "grpc-message", + "grpc-status", + "grpc-timeout", + "grpc-status-details-bin", + // Intentionally exclude grpc-previous-rpc-attempts and + // grpc-retry-pushback-ms, which are "reserved", but their API + // intentionally works via metadata. + "te": + return true + default: + return false + } +} + +// isWhitelistedHeader checks whether hdr should be propagated into metadata +// visible to users, even though it is classified as "reserved", above. +func isWhitelistedHeader(hdr string) bool { + switch hdr { + case ":authority", "user-agent": + return true + default: + return false + } +} + +// contentSubtype returns the content-subtype for the given content-type. The +// given content-type must be a valid content-type that starts with +// "application/grpc". A content-subtype will follow "application/grpc" after a +// "+" or ";". See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// If contentType is not a valid content-type for gRPC, the boolean +// will be false, otherwise true. If content-type == "application/grpc", +// "application/grpc+", or "application/grpc;", the boolean will be true, +// but no content-subtype will be returned. +// +// contentType is assumed to be lowercase already. +func contentSubtype(contentType string) (string, bool) { + if contentType == baseContentType { + return "", true + } + if !strings.HasPrefix(contentType, baseContentType) { + return "", false + } + // guaranteed since != baseContentType and has baseContentType prefix + switch contentType[len(baseContentType)] { + case '+', ';': + // this will return true for "application/grpc+" or "application/grpc;" + // which the previous validContentType function tested to be valid, so we + // just say that no content-subtype is specified in this case + return contentType[len(baseContentType)+1:], true + default: + return "", false + } +} + +// contentSubtype is assumed to be lowercase +func contentType(contentSubtype string) string { + if contentSubtype == "" { + return baseContentType + } + return baseContentType + "+" + contentSubtype +} + +func (d *decodeState) status() *status.Status { + if d.data.statusGen == nil { + // No status-details were provided; generate status using code/msg. + d.data.statusGen = status.New(codes.Code(int32(*(d.data.rawStatusCode))), d.data.rawStatusMsg) + } + return d.data.statusGen +} + +const binHdrSuffix = "-bin" + +func encodeBinHeader(v []byte) string { + return base64.RawStdEncoding.EncodeToString(v) +} + +func decodeBinHeader(v string) ([]byte, error) { + if len(v)%4 == 0 { + // Input was padded, or padding was not necessary. + return base64.StdEncoding.DecodeString(v) + } + return base64.RawStdEncoding.DecodeString(v) +} + +func encodeMetadataHeader(k, v string) string { + if strings.HasSuffix(k, binHdrSuffix) { + return encodeBinHeader(([]byte)(v)) + } + return v +} + +func decodeMetadataHeader(k, v string) (string, error) { + if strings.HasSuffix(k, binHdrSuffix) { + b, err := decodeBinHeader(v) + return string(b), err + } + return v, nil +} + +func (d *decodeState) decodeHeader(frame *http2.MetaHeadersFrame) error { + // frame.Truncated is set to true when framer detects that the current header + // list size hits MaxHeaderListSize limit. + if frame.Truncated { + return status.Error(codes.Internal, "peer header list size exceeded limit") + } + + for _, hf := range frame.Fields { + d.processHeaderField(hf) + } + + if d.data.isGRPC { + if d.data.grpcErr != nil { + return d.data.grpcErr + } + if d.serverSide { + return nil + } + if d.data.rawStatusCode == nil && d.data.statusGen == nil { + // gRPC status doesn't exist. + // Set rawStatusCode to be unknown and return nil error. + // So that, if the stream has ended this Unknown status + // will be propagated to the user. + // Otherwise, it will be ignored. In which case, status from + // a later trailer, that has StreamEnded flag set, is propagated. + code := int(codes.Unknown) + d.data.rawStatusCode = &code + } + return nil + } + + // HTTP fallback mode + if d.data.httpErr != nil { + return d.data.httpErr + } + + var ( + code = codes.Internal // when header does not include HTTP status, return INTERNAL + ok bool + ) + + if d.data.httpStatus != nil { + code, ok = HTTPStatusConvTab[*(d.data.httpStatus)] + if !ok { + code = codes.Unknown + } + } + + return status.Error(code, d.constructHTTPErrMsg()) +} + +// constructErrMsg constructs error message to be returned in HTTP fallback mode. +// Format: HTTP status code and its corresponding message + content-type error message. +func (d *decodeState) constructHTTPErrMsg() string { + var errMsgs []string + + if d.data.httpStatus == nil { + errMsgs = append(errMsgs, "malformed header: missing HTTP status") + } else { + errMsgs = append(errMsgs, fmt.Sprintf("%s: HTTP status code %d", http.StatusText(*(d.data.httpStatus)), *d.data.httpStatus)) + } + + if d.data.contentTypeErr == "" { + errMsgs = append(errMsgs, "transport: missing content-type field") + } else { + errMsgs = append(errMsgs, d.data.contentTypeErr) + } + + return strings.Join(errMsgs, "; ") +} + +func (d *decodeState) addMetadata(k, v string) { + if d.data.mdata == nil { + d.data.mdata = make(map[string][]string) + } + d.data.mdata[k] = append(d.data.mdata[k], v) +} + +func (d *decodeState) processHeaderField(f hpack.HeaderField) { + switch f.Name { + case "content-type": + contentSubtype, validContentType := contentSubtype(f.Value) + if !validContentType { + d.data.contentTypeErr = fmt.Sprintf("transport: received the unexpected content-type %q", f.Value) + return + } + d.data.contentSubtype = contentSubtype + // TODO: do we want to propagate the whole content-type in the metadata, + // or come up with a way to just propagate the content-subtype if it was set? + // ie {"content-type": "application/grpc+proto"} or {"content-subtype": "proto"} + // in the metadata? + d.addMetadata(f.Name, f.Value) + d.data.isGRPC = true + case "grpc-encoding": + d.data.encoding = f.Value + case "grpc-status": + code, err := strconv.Atoi(f.Value) + if err != nil { + d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status: %v", err) + return + } + d.data.rawStatusCode = &code + case "grpc-message": + d.data.rawStatusMsg = decodeGrpcMessage(f.Value) + case "grpc-status-details-bin": + v, err := decodeBinHeader(f.Value) + if err != nil { + d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) + return + } + s := &spb.Status{} + if err := proto.Unmarshal(v, s); err != nil { + d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) + return + } + d.data.statusGen = status.FromProto(s) + case "grpc-timeout": + d.data.timeoutSet = true + var err error + if d.data.timeout, err = decodeTimeout(f.Value); err != nil { + d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed time-out: %v", err) + } + case ":path": + d.data.method = f.Value + case ":status": + code, err := strconv.Atoi(f.Value) + if err != nil { + d.data.httpErr = status.Errorf(codes.Internal, "transport: malformed http-status: %v", err) + return + } + d.data.httpStatus = &code + case "grpc-tags-bin": + v, err := decodeBinHeader(f.Value) + if err != nil { + d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-tags-bin: %v", err) + return + } + d.data.statsTags = v + d.addMetadata(f.Name, string(v)) + case "grpc-trace-bin": + v, err := decodeBinHeader(f.Value) + if err != nil { + d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-trace-bin: %v", err) + return + } + d.data.statsTrace = v + d.addMetadata(f.Name, string(v)) + default: + if isReservedHeader(f.Name) && !isWhitelistedHeader(f.Name) { + break + } + v, err := decodeMetadataHeader(f.Name, f.Value) + if err != nil { + errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err) + return + } + d.addMetadata(f.Name, v) + } +} + +type timeoutUnit uint8 + +const ( + hour timeoutUnit = 'H' + minute timeoutUnit = 'M' + second timeoutUnit = 'S' + millisecond timeoutUnit = 'm' + microsecond timeoutUnit = 'u' + nanosecond timeoutUnit = 'n' +) + +func timeoutUnitToDuration(u timeoutUnit) (d time.Duration, ok bool) { + switch u { + case hour: + return time.Hour, true + case minute: + return time.Minute, true + case second: + return time.Second, true + case millisecond: + return time.Millisecond, true + case microsecond: + return time.Microsecond, true + case nanosecond: + return time.Nanosecond, true + default: + } + return +} + +const maxTimeoutValue int64 = 100000000 - 1 + +// div does integer division and round-up the result. Note that this is +// equivalent to (d+r-1)/r but has less chance to overflow. +func div(d, r time.Duration) int64 { + if m := d % r; m > 0 { + return int64(d/r + 1) + } + return int64(d / r) +} + +// TODO(zhaoq): It is the simplistic and not bandwidth efficient. Improve it. +func encodeTimeout(t time.Duration) string { + if t <= 0 { + return "0n" + } + if d := div(t, time.Nanosecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "n" + } + if d := div(t, time.Microsecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "u" + } + if d := div(t, time.Millisecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "m" + } + if d := div(t, time.Second); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "S" + } + if d := div(t, time.Minute); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "M" + } + // Note that maxTimeoutValue * time.Hour > MaxInt64. + return strconv.FormatInt(div(t, time.Hour), 10) + "H" +} + +func decodeTimeout(s string) (time.Duration, error) { + size := len(s) + if size < 2 { + return 0, fmt.Errorf("transport: timeout string is too short: %q", s) + } + if size > 9 { + // Spec allows for 8 digits plus the unit. + return 0, fmt.Errorf("transport: timeout string is too long: %q", s) + } + unit := timeoutUnit(s[size-1]) + d, ok := timeoutUnitToDuration(unit) + if !ok { + return 0, fmt.Errorf("transport: timeout unit is not recognized: %q", s) + } + t, err := strconv.ParseInt(s[:size-1], 10, 64) + if err != nil { + return 0, err + } + const maxHours = math.MaxInt64 / int64(time.Hour) + if d == time.Hour && t > maxHours { + // This timeout would overflow math.MaxInt64; clamp it. + return time.Duration(math.MaxInt64), nil + } + return d * time.Duration(t), nil +} + +const ( + spaceByte = ' ' + tildeByte = '~' + percentByte = '%' +) + +// encodeGrpcMessage is used to encode status code in header field +// "grpc-message". It does percent encoding and also replaces invalid utf-8 +// characters with Unicode replacement character. +// +// It checks to see if each individual byte in msg is an allowable byte, and +// then either percent encoding or passing it through. When percent encoding, +// the byte is converted into hexadecimal notation with a '%' prepended. +func encodeGrpcMessage(msg string) string { + if msg == "" { + return "" + } + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + c := msg[i] + if !(c >= spaceByte && c <= tildeByte && c != percentByte) { + return encodeGrpcMessageUnchecked(msg) + } + } + return msg +} + +func encodeGrpcMessageUnchecked(msg string) string { + var buf bytes.Buffer + for len(msg) > 0 { + r, size := utf8.DecodeRuneInString(msg) + for _, b := range []byte(string(r)) { + if size > 1 { + // If size > 1, r is not ascii. Always do percent encoding. + buf.WriteString(fmt.Sprintf("%%%02X", b)) + continue + } + + // The for loop is necessary even if size == 1. r could be + // utf8.RuneError. + // + // fmt.Sprintf("%%%02X", utf8.RuneError) gives "%FFFD". + if b >= spaceByte && b <= tildeByte && b != percentByte { + buf.WriteByte(b) + } else { + buf.WriteString(fmt.Sprintf("%%%02X", b)) + } + } + msg = msg[size:] + } + return buf.String() +} + +// decodeGrpcMessage decodes the msg encoded by encodeGrpcMessage. +func decodeGrpcMessage(msg string) string { + if msg == "" { + return "" + } + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + if msg[i] == percentByte && i+2 < lenMsg { + return decodeGrpcMessageUnchecked(msg) + } + } + return msg +} + +func decodeGrpcMessageUnchecked(msg string) string { + var buf bytes.Buffer + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + c := msg[i] + if c == percentByte && i+2 < lenMsg { + parsed, err := strconv.ParseUint(msg[i+1:i+3], 16, 8) + if err != nil { + buf.WriteByte(c) + } else { + buf.WriteByte(byte(parsed)) + i += 2 + } + } else { + buf.WriteByte(c) + } + } + return buf.String() +} + +type bufWriter struct { + buf []byte + offset int + batchSize int + conn net.Conn + err error + + onFlush func() +} + +func newBufWriter(conn net.Conn, batchSize int) *bufWriter { + return &bufWriter{ + buf: make([]byte, batchSize*2), + batchSize: batchSize, + conn: conn, + } +} + +func (w *bufWriter) Write(b []byte) (n int, err error) { + if w.err != nil { + return 0, w.err + } + if w.batchSize == 0 { // Buffer has been disabled. + return w.conn.Write(b) + } + for len(b) > 0 { + nn := copy(w.buf[w.offset:], b) + b = b[nn:] + w.offset += nn + n += nn + if w.offset >= w.batchSize { + err = w.Flush() + } + } + return n, err +} + +func (w *bufWriter) Flush() error { + if w.err != nil { + return w.err + } + if w.offset == 0 { + return nil + } + if w.onFlush != nil { + w.onFlush() + } + _, w.err = w.conn.Write(w.buf[:w.offset]) + w.offset = 0 + return w.err +} + +type framer struct { + writer *bufWriter + fr *http2.Framer +} + +func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderListSize uint32) *framer { + if writeBufferSize < 0 { + writeBufferSize = 0 + } + var r io.Reader = conn + if readBufferSize > 0 { + r = bufio.NewReaderSize(r, readBufferSize) + } + w := newBufWriter(conn, writeBufferSize) + f := &framer{ + writer: w, + fr: http2.NewFramer(w, r), + } + // Opt-in to Frame reuse API on framer to reduce garbage. + // Frames aren't safe to read from after a subsequent call to ReadFrame. + f.fr.SetReuseFrames() + f.fr.MaxHeaderListSize = maxHeaderListSize + f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil) + return f +} diff --git a/vendor/google.golang.org/grpc/internal/transport/log.go b/vendor/google.golang.org/grpc/internal/transport/log.go new file mode 100644 index 0000000000000..879df80c4de7f --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/log.go @@ -0,0 +1,44 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// This file contains wrappers for grpclog functions. +// The transport package only logs to verbose level 2 by default. + +package transport + +import "google.golang.org/grpc/grpclog" + +const logLevel = 2 + +func infof(format string, args ...interface{}) { + if grpclog.V(logLevel) { + grpclog.Infof(format, args...) + } +} + +func warningf(format string, args ...interface{}) { + if grpclog.V(logLevel) { + grpclog.Warningf(format, args...) + } +} + +func errorf(format string, args ...interface{}) { + if grpclog.V(logLevel) { + grpclog.Errorf(format, args...) + } +} diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go new file mode 100644 index 0000000000000..1c1d106709ac0 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -0,0 +1,816 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package transport defines and implements message oriented communication +// channel to complete various transactions (e.g., an RPC). It is meant for +// grpc-internal usage and is not intended to be imported directly by users. +package transport + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "net" + "sync" + "sync/atomic" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/tap" +) + +type bufferPool struct { + pool sync.Pool +} + +func newBufferPool() *bufferPool { + return &bufferPool{ + pool: sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, + }, + } +} + +func (p *bufferPool) get() *bytes.Buffer { + return p.pool.Get().(*bytes.Buffer) +} + +func (p *bufferPool) put(b *bytes.Buffer) { + p.pool.Put(b) +} + +// recvMsg represents the received msg from the transport. All transport +// protocol specific info has been removed. +type recvMsg struct { + buffer *bytes.Buffer + // nil: received some data + // io.EOF: stream is completed. data is nil. + // other non-nil error: transport failure. data is nil. + err error +} + +// recvBuffer is an unbounded channel of recvMsg structs. +// Note recvBuffer differs from controlBuffer only in that recvBuffer +// holds a channel of only recvMsg structs instead of objects implementing "item" interface. +// recvBuffer is written to much more often than +// controlBuffer and using strict recvMsg structs helps avoid allocation in "recvBuffer.put" +type recvBuffer struct { + c chan recvMsg + mu sync.Mutex + backlog []recvMsg + err error +} + +func newRecvBuffer() *recvBuffer { + b := &recvBuffer{ + c: make(chan recvMsg, 1), + } + return b +} + +func (b *recvBuffer) put(r recvMsg) { + b.mu.Lock() + if b.err != nil { + b.mu.Unlock() + // An error had occurred earlier, don't accept more + // data or errors. + return + } + b.err = r.err + if len(b.backlog) == 0 { + select { + case b.c <- r: + b.mu.Unlock() + return + default: + } + } + b.backlog = append(b.backlog, r) + b.mu.Unlock() +} + +func (b *recvBuffer) load() { + b.mu.Lock() + if len(b.backlog) > 0 { + select { + case b.c <- b.backlog[0]: + b.backlog[0] = recvMsg{} + b.backlog = b.backlog[1:] + default: + } + } + b.mu.Unlock() +} + +// get returns the channel that receives a recvMsg in the buffer. +// +// Upon receipt of a recvMsg, the caller should call load to send another +// recvMsg onto the channel if there is any. +func (b *recvBuffer) get() <-chan recvMsg { + return b.c +} + +// recvBufferReader implements io.Reader interface to read the data from +// recvBuffer. +type recvBufferReader struct { + closeStream func(error) // Closes the client transport stream with the given error and nil trailer metadata. + ctx context.Context + ctxDone <-chan struct{} // cache of ctx.Done() (for performance). + recv *recvBuffer + last *bytes.Buffer // Stores the remaining data in the previous calls. + err error + freeBuffer func(*bytes.Buffer) +} + +// Read reads the next len(p) bytes from last. If last is drained, it tries to +// read additional data from recv. It blocks if there no additional data available +// in recv. If Read returns any non-nil error, it will continue to return that error. +func (r *recvBufferReader) Read(p []byte) (n int, err error) { + if r.err != nil { + return 0, r.err + } + if r.last != nil { + // Read remaining data left in last call. + copied, _ := r.last.Read(p) + if r.last.Len() == 0 { + r.freeBuffer(r.last) + r.last = nil + } + return copied, nil + } + if r.closeStream != nil { + n, r.err = r.readClient(p) + } else { + n, r.err = r.read(p) + } + return n, r.err +} + +func (r *recvBufferReader) read(p []byte) (n int, err error) { + select { + case <-r.ctxDone: + return 0, ContextErr(r.ctx.Err()) + case m := <-r.recv.get(): + return r.readAdditional(m, p) + } +} + +func (r *recvBufferReader) readClient(p []byte) (n int, err error) { + // If the context is canceled, then closes the stream with nil metadata. + // closeStream writes its error parameter to r.recv as a recvMsg. + // r.readAdditional acts on that message and returns the necessary error. + select { + case <-r.ctxDone: + // Note that this adds the ctx error to the end of recv buffer, and + // reads from the head. This will delay the error until recv buffer is + // empty, thus will delay ctx cancellation in Recv(). + // + // It's done this way to fix a race between ctx cancel and trailer. The + // race was, stream.Recv() may return ctx error if ctxDone wins the + // race, but stream.Trailer() may return a non-nil md because the stream + // was not marked as done when trailer is received. This closeStream + // call will mark stream as done, thus fix the race. + // + // TODO: delaying ctx error seems like a unnecessary side effect. What + // we really want is to mark the stream as done, and return ctx error + // faster. + r.closeStream(ContextErr(r.ctx.Err())) + m := <-r.recv.get() + return r.readAdditional(m, p) + case m := <-r.recv.get(): + return r.readAdditional(m, p) + } +} + +func (r *recvBufferReader) readAdditional(m recvMsg, p []byte) (n int, err error) { + r.recv.load() + if m.err != nil { + return 0, m.err + } + copied, _ := m.buffer.Read(p) + if m.buffer.Len() == 0 { + r.freeBuffer(m.buffer) + r.last = nil + } else { + r.last = m.buffer + } + return copied, nil +} + +type streamState uint32 + +const ( + streamActive streamState = iota + streamWriteDone // EndStream sent + streamReadDone // EndStream received + streamDone // the entire stream is finished. +) + +// Stream represents an RPC in the transport layer. +type Stream struct { + id uint32 + st ServerTransport // nil for client side Stream + ctx context.Context // the associated context of the stream + cancel context.CancelFunc // always nil for client side Stream + done chan struct{} // closed at the end of stream to unblock writers. On the client side. + ctxDone <-chan struct{} // same as done chan but for server side. Cache of ctx.Done() (for performance) + method string // the associated RPC method of the stream + recvCompress string + sendCompress string + buf *recvBuffer + trReader io.Reader + fc *inFlow + wq *writeQuota + + // Callback to state application's intentions to read data. This + // is used to adjust flow control, if needed. + requestRead func(int) + + headerChan chan struct{} // closed to indicate the end of header metadata. + headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. + + // hdrMu protects header and trailer metadata on the server-side. + hdrMu sync.Mutex + // On client side, header keeps the received header metadata. + // + // On server side, header keeps the header set by SetHeader(). The complete + // header will merged into this after t.WriteHeader() is called. + header metadata.MD + trailer metadata.MD // the key-value map of trailer metadata. + + noHeaders bool // set if the client never received headers (set only after the stream is done). + + // On the server-side, headerSent is atomically set to 1 when the headers are sent out. + headerSent uint32 + + state streamState + + // On client-side it is the status error received from the server. + // On server-side it is unused. + status *status.Status + + bytesReceived uint32 // indicates whether any bytes have been received on this stream + unprocessed uint32 // set if the server sends a refused stream or GOAWAY including this stream + + // contentSubtype is the content-subtype for requests. + // this must be lowercase or the behavior is undefined. + contentSubtype string +} + +// isHeaderSent is only valid on the server-side. +func (s *Stream) isHeaderSent() bool { + return atomic.LoadUint32(&s.headerSent) == 1 +} + +// updateHeaderSent updates headerSent and returns true +// if it was alreay set. It is valid only on server-side. +func (s *Stream) updateHeaderSent() bool { + return atomic.SwapUint32(&s.headerSent, 1) == 1 +} + +func (s *Stream) swapState(st streamState) streamState { + return streamState(atomic.SwapUint32((*uint32)(&s.state), uint32(st))) +} + +func (s *Stream) compareAndSwapState(oldState, newState streamState) bool { + return atomic.CompareAndSwapUint32((*uint32)(&s.state), uint32(oldState), uint32(newState)) +} + +func (s *Stream) getState() streamState { + return streamState(atomic.LoadUint32((*uint32)(&s.state))) +} + +func (s *Stream) waitOnHeader() error { + if s.headerChan == nil { + // On the server headerChan is always nil since a stream originates + // only after having received headers. + return nil + } + select { + case <-s.ctx.Done(): + // We prefer success over failure when reading messages because we delay + // context error in stream.Read(). To keep behavior consistent, we also + // prefer success here. + select { + case <-s.headerChan: + return nil + default: + } + return ContextErr(s.ctx.Err()) + case <-s.headerChan: + return nil + } +} + +// RecvCompress returns the compression algorithm applied to the inbound +// message. It is empty string if there is no compression applied. +func (s *Stream) RecvCompress() string { + if err := s.waitOnHeader(); err != nil { + return "" + } + return s.recvCompress +} + +// SetSendCompress sets the compression algorithm to the stream. +func (s *Stream) SetSendCompress(str string) { + s.sendCompress = str +} + +// Done returns a channel which is closed when it receives the final status +// from the server. +func (s *Stream) Done() <-chan struct{} { + return s.done +} + +// Header returns the header metadata of the stream. +// +// On client side, it acquires the key-value pairs of header metadata once it is +// available. It blocks until i) the metadata is ready or ii) there is no header +// metadata or iii) the stream is canceled/expired. +// +// On server side, it returns the out header after t.WriteHeader is called. +func (s *Stream) Header() (metadata.MD, error) { + if s.headerChan == nil && s.header != nil { + // On server side, return the header in stream. It will be the out + // header after t.WriteHeader is called. + return s.header.Copy(), nil + } + err := s.waitOnHeader() + // Even if the stream is closed, header is returned if available. + select { + case <-s.headerChan: + if s.header == nil { + return nil, nil + } + return s.header.Copy(), nil + default: + } + return nil, err +} + +// TrailersOnly blocks until a header or trailers-only frame is received and +// then returns true if the stream was trailers-only. If the stream ends +// before headers are received, returns true, nil. If a context error happens +// first, returns it as a status error. Client-side only. +func (s *Stream) TrailersOnly() (bool, error) { + err := s.waitOnHeader() + if err != nil { + return false, err + } + return s.noHeaders, nil +} + +// Trailer returns the cached trailer metedata. Note that if it is not called +// after the entire stream is done, it could return an empty MD. Client +// side only. +// It can be safely read only after stream has ended that is either read +// or write have returned io.EOF. +func (s *Stream) Trailer() metadata.MD { + c := s.trailer.Copy() + return c +} + +// ContentSubtype returns the content-subtype for a request. For example, a +// content-subtype of "proto" will result in a content-type of +// "application/grpc+proto". This will always be lowercase. See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +func (s *Stream) ContentSubtype() string { + return s.contentSubtype +} + +// Context returns the context of the stream. +func (s *Stream) Context() context.Context { + return s.ctx +} + +// Method returns the method for the stream. +func (s *Stream) Method() string { + return s.method +} + +// Status returns the status received from the server. +// Status can be read safely only after the stream has ended, +// that is, after Done() is closed. +func (s *Stream) Status() *status.Status { + return s.status +} + +// SetHeader sets the header metadata. This can be called multiple times. +// Server side only. +// This should not be called in parallel to other data writes. +func (s *Stream) SetHeader(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + if s.isHeaderSent() || s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + s.header = metadata.Join(s.header, md) + s.hdrMu.Unlock() + return nil +} + +// SendHeader sends the given header metadata. The given metadata is +// combined with any metadata set by previous calls to SetHeader and +// then written to the transport stream. +func (s *Stream) SendHeader(md metadata.MD) error { + return s.st.WriteHeader(s, md) +} + +// SetTrailer sets the trailer metadata which will be sent with the RPC status +// by the server. This can be called multiple times. Server side only. +// This should not be called parallel to other data writes. +func (s *Stream) SetTrailer(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + if s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + s.trailer = metadata.Join(s.trailer, md) + s.hdrMu.Unlock() + return nil +} + +func (s *Stream) write(m recvMsg) { + s.buf.put(m) +} + +// Read reads all p bytes from the wire for this stream. +func (s *Stream) Read(p []byte) (n int, err error) { + // Don't request a read if there was an error earlier + if er := s.trReader.(*transportReader).er; er != nil { + return 0, er + } + s.requestRead(len(p)) + return io.ReadFull(s.trReader, p) +} + +// tranportReader reads all the data available for this Stream from the transport and +// passes them into the decoder, which converts them into a gRPC message stream. +// The error is io.EOF when the stream is done or another non-nil error if +// the stream broke. +type transportReader struct { + reader io.Reader + // The handler to control the window update procedure for both this + // particular stream and the associated transport. + windowHandler func(int) + er error +} + +func (t *transportReader) Read(p []byte) (n int, err error) { + n, err = t.reader.Read(p) + if err != nil { + t.er = err + return + } + t.windowHandler(n) + return +} + +// BytesReceived indicates whether any bytes have been received on this stream. +func (s *Stream) BytesReceived() bool { + return atomic.LoadUint32(&s.bytesReceived) == 1 +} + +// Unprocessed indicates whether the server did not process this stream -- +// i.e. it sent a refused stream or GOAWAY including this stream ID. +func (s *Stream) Unprocessed() bool { + return atomic.LoadUint32(&s.unprocessed) == 1 +} + +// GoString is implemented by Stream so context.String() won't +// race when printing %#v. +func (s *Stream) GoString() string { + return fmt.Sprintf("", s, s.method) +} + +// state of transport +type transportState int + +const ( + reachable transportState = iota + closing + draining +) + +// ServerConfig consists of all the configurations to establish a server transport. +type ServerConfig struct { + MaxStreams uint32 + AuthInfo credentials.AuthInfo + InTapHandle tap.ServerInHandle + StatsHandler stats.Handler + KeepaliveParams keepalive.ServerParameters + KeepalivePolicy keepalive.EnforcementPolicy + InitialWindowSize int32 + InitialConnWindowSize int32 + WriteBufferSize int + ReadBufferSize int + ChannelzParentID int64 + MaxHeaderListSize *uint32 +} + +// NewServerTransport creates a ServerTransport with conn or non-nil error +// if it fails. +func NewServerTransport(protocol string, conn net.Conn, config *ServerConfig) (ServerTransport, error) { + return newHTTP2Server(conn, config) +} + +// ConnectOptions covers all relevant options for communicating with the server. +type ConnectOptions struct { + // UserAgent is the application user agent. + UserAgent string + // Dialer specifies how to dial a network address. + Dialer func(context.Context, string) (net.Conn, error) + // FailOnNonTempDialError specifies if gRPC fails on non-temporary dial errors. + FailOnNonTempDialError bool + // PerRPCCredentials stores the PerRPCCredentials required to issue RPCs. + PerRPCCredentials []credentials.PerRPCCredentials + // TransportCredentials stores the Authenticator required to setup a client + // connection. Only one of TransportCredentials and CredsBundle is non-nil. + TransportCredentials credentials.TransportCredentials + // CredsBundle is the credentials bundle to be used. Only one of + // TransportCredentials and CredsBundle is non-nil. + CredsBundle credentials.Bundle + // KeepaliveParams stores the keepalive parameters. + KeepaliveParams keepalive.ClientParameters + // StatsHandler stores the handler for stats. + StatsHandler stats.Handler + // InitialWindowSize sets the initial window size for a stream. + InitialWindowSize int32 + // InitialConnWindowSize sets the initial window size for a connection. + InitialConnWindowSize int32 + // WriteBufferSize sets the size of write buffer which in turn determines how much data can be batched before it's written on the wire. + WriteBufferSize int + // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. + ReadBufferSize int + // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. + ChannelzParentID int64 + // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. + MaxHeaderListSize *uint32 +} + +// TargetInfo contains the information of the target such as network address and metadata. +type TargetInfo struct { + Addr string + Metadata interface{} + Authority string +} + +// NewClientTransport establishes the transport with the required ConnectOptions +// and returns it to the caller. +func NewClientTransport(connectCtx, ctx context.Context, target TargetInfo, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) { + return newHTTP2Client(connectCtx, ctx, target, opts, onPrefaceReceipt, onGoAway, onClose) +} + +// Options provides additional hints and information for message +// transmission. +type Options struct { + // Last indicates whether this write is the last piece for + // this stream. + Last bool +} + +// CallHdr carries the information of a particular RPC. +type CallHdr struct { + // Host specifies the peer's host. + Host string + + // Method specifies the operation to perform. + Method string + + // SendCompress specifies the compression algorithm applied on + // outbound message. + SendCompress string + + // Creds specifies credentials.PerRPCCredentials for a call. + Creds credentials.PerRPCCredentials + + // ContentSubtype specifies the content-subtype for a request. For example, a + // content-subtype of "proto" will result in a content-type of + // "application/grpc+proto". The value of ContentSubtype must be all + // lowercase, otherwise the behavior is undefined. See + // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests + // for more details. + ContentSubtype string + + PreviousAttempts int // value of grpc-previous-rpc-attempts header to set +} + +// ClientTransport is the common interface for all gRPC client-side transport +// implementations. +type ClientTransport interface { + // Close tears down this transport. Once it returns, the transport + // should not be accessed any more. The caller must make sure this + // is called only once. + Close() error + + // GracefulClose starts to tear down the transport: the transport will stop + // accepting new RPCs and NewStream will return error. Once all streams are + // finished, the transport will close. + // + // It does not block. + GracefulClose() + + // Write sends the data for the given stream. A nil stream indicates + // the write is to be performed on the transport as a whole. + Write(s *Stream, hdr []byte, data []byte, opts *Options) error + + // NewStream creates a Stream for an RPC. + NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) + + // CloseStream clears the footprint of a stream when the stream is + // not needed any more. The err indicates the error incurred when + // CloseStream is called. Must be called when a stream is finished + // unless the associated transport is closing. + CloseStream(stream *Stream, err error) + + // Error returns a channel that is closed when some I/O error + // happens. Typically the caller should have a goroutine to monitor + // this in order to take action (e.g., close the current transport + // and create a new one) in error case. It should not return nil + // once the transport is initiated. + Error() <-chan struct{} + + // GoAway returns a channel that is closed when ClientTransport + // receives the draining signal from the server (e.g., GOAWAY frame in + // HTTP/2). + GoAway() <-chan struct{} + + // GetGoAwayReason returns the reason why GoAway frame was received. + GetGoAwayReason() GoAwayReason + + // RemoteAddr returns the remote network address. + RemoteAddr() net.Addr + + // IncrMsgSent increments the number of message sent through this transport. + IncrMsgSent() + + // IncrMsgRecv increments the number of message received through this transport. + IncrMsgRecv() +} + +// ServerTransport is the common interface for all gRPC server-side transport +// implementations. +// +// Methods may be called concurrently from multiple goroutines, but +// Write methods for a given Stream will be called serially. +type ServerTransport interface { + // HandleStreams receives incoming streams using the given handler. + HandleStreams(func(*Stream), func(context.Context, string) context.Context) + + // WriteHeader sends the header metadata for the given stream. + // WriteHeader may not be called on all streams. + WriteHeader(s *Stream, md metadata.MD) error + + // Write sends the data for the given stream. + // Write may not be called on all streams. + Write(s *Stream, hdr []byte, data []byte, opts *Options) error + + // WriteStatus sends the status of a stream to the client. WriteStatus is + // the final call made on a stream and always occurs. + WriteStatus(s *Stream, st *status.Status) error + + // Close tears down the transport. Once it is called, the transport + // should not be accessed any more. All the pending streams and their + // handlers will be terminated asynchronously. + Close() error + + // RemoteAddr returns the remote network address. + RemoteAddr() net.Addr + + // Drain notifies the client this ServerTransport stops accepting new RPCs. + Drain() + + // IncrMsgSent increments the number of message sent through this transport. + IncrMsgSent() + + // IncrMsgRecv increments the number of message received through this transport. + IncrMsgRecv() +} + +// connectionErrorf creates an ConnectionError with the specified error description. +func connectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError { + return ConnectionError{ + Desc: fmt.Sprintf(format, a...), + temp: temp, + err: e, + } +} + +// ConnectionError is an error that results in the termination of the +// entire connection and the retry of all the active streams. +type ConnectionError struct { + Desc string + temp bool + err error +} + +func (e ConnectionError) Error() string { + return fmt.Sprintf("connection error: desc = %q", e.Desc) +} + +// Temporary indicates if this connection error is temporary or fatal. +func (e ConnectionError) Temporary() bool { + return e.temp +} + +// Origin returns the original error of this connection error. +func (e ConnectionError) Origin() error { + // Never return nil error here. + // If the original error is nil, return itself. + if e.err == nil { + return e + } + return e.err +} + +var ( + // ErrConnClosing indicates that the transport is closing. + ErrConnClosing = connectionErrorf(true, nil, "transport is closing") + // errStreamDrain indicates that the stream is rejected because the + // connection is draining. This could be caused by goaway or balancer + // removing the address. + errStreamDrain = status.Error(codes.Unavailable, "the connection is draining") + // errStreamDone is returned from write at the client side to indiacte application + // layer of an error. + errStreamDone = errors.New("the stream is done") + // StatusGoAway indicates that the server sent a GOAWAY that included this + // stream's ID in unprocessed RPCs. + statusGoAway = status.New(codes.Unavailable, "the stream is rejected because server is draining the connection") +) + +// GoAwayReason contains the reason for the GoAway frame received. +type GoAwayReason uint8 + +const ( + // GoAwayInvalid indicates that no GoAway frame is received. + GoAwayInvalid GoAwayReason = 0 + // GoAwayNoReason is the default value when GoAway frame is received. + GoAwayNoReason GoAwayReason = 1 + // GoAwayTooManyPings indicates that a GoAway frame with + // ErrCodeEnhanceYourCalm was received and that the debug data said + // "too_many_pings". + GoAwayTooManyPings GoAwayReason = 2 +) + +// channelzData is used to store channelz related data for http2Client and http2Server. +// These fields cannot be embedded in the original structs (e.g. http2Client), since to do atomic +// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment. +// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment. +type channelzData struct { + kpCount int64 + // The number of streams that have started, including already finished ones. + streamsStarted int64 + // Client side: The number of streams that have ended successfully by receiving + // EoS bit set frame from server. + // Server side: The number of streams that have ended successfully by sending + // frame with EoS bit set. + streamsSucceeded int64 + streamsFailed int64 + // lastStreamCreatedTime stores the timestamp that the last stream gets created. It is of int64 type + // instead of time.Time since it's more costly to atomically update time.Time variable than int64 + // variable. The same goes for lastMsgSentTime and lastMsgRecvTime. + lastStreamCreatedTime int64 + msgSent int64 + msgRecv int64 + lastMsgSentTime int64 + lastMsgRecvTime int64 +} + +// ContextErr converts the error from context package into a status error. +func ContextErr(err error) error { + switch err { + case context.DeadlineExceeded: + return status.Error(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return status.Error(codes.Canceled, err.Error()) + } + return status.Errorf(codes.Internal, "Unexpected error from context packet: %v", err) +} diff --git a/vendor/google.golang.org/grpc/keepalive/BUILD.bazel b/vendor/google.golang.org/grpc/keepalive/BUILD.bazel new file mode 100644 index 0000000000000..96b18cff2b149 --- /dev/null +++ b/vendor/google.golang.org/grpc/keepalive/BUILD.bazel @@ -0,0 +1,9 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["keepalive.go"], + importmap = "k8s.io/kops/vendor/google.golang.org/grpc/keepalive", + importpath = "google.golang.org/grpc/keepalive", + visibility = ["//visibility:public"], +) diff --git a/vendor/google.golang.org/grpc/keepalive/keepalive.go b/vendor/google.golang.org/grpc/keepalive/keepalive.go new file mode 100644 index 0000000000000..34d31b5e7d311 --- /dev/null +++ b/vendor/google.golang.org/grpc/keepalive/keepalive.go @@ -0,0 +1,85 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package keepalive defines configurable parameters for point-to-point +// healthcheck. +package keepalive + +import ( + "time" +) + +// ClientParameters is used to set keepalive parameters on the client-side. +// These configure how the client will actively probe to notice when a +// connection is broken and send pings so intermediaries will be aware of the +// liveness of the connection. Make sure these parameters are set in +// coordination with the keepalive policy on the server, as incompatible +// settings can result in closing of connection. +type ClientParameters struct { + // After a duration of this time if the client doesn't see any activity it + // pings the server to see if the transport is still alive. + // If set below 10s, a minimum value of 10s will be used instead. + Time time.Duration // The current default value is infinity. + // After having pinged for keepalive check, the client waits for a duration + // of Timeout and if no activity is seen even after that the connection is + // closed. + Timeout time.Duration // The current default value is 20 seconds. + // If true, client sends keepalive pings even with no active RPCs. If false, + // when there are no active RPCs, Time and Timeout will be ignored and no + // keepalive pings will be sent. + PermitWithoutStream bool // false by default. +} + +// ServerParameters is used to set keepalive and max-age parameters on the +// server-side. +type ServerParameters struct { + // MaxConnectionIdle is a duration for the amount of time after which an + // idle connection would be closed by sending a GoAway. Idleness duration is + // defined since the most recent time the number of outstanding RPCs became + // zero or the connection establishment. + MaxConnectionIdle time.Duration // The current default value is infinity. + // MaxConnectionAge is a duration for the maximum amount of time a + // connection may exist before it will be closed by sending a GoAway. A + // random jitter of +/-10% will be added to MaxConnectionAge to spread out + // connection storms. + MaxConnectionAge time.Duration // The current default value is infinity. + // MaxConnectionAgeGrace is an additive period after MaxConnectionAge after + // which the connection will be forcibly closed. + MaxConnectionAgeGrace time.Duration // The current default value is infinity. + // After a duration of this time if the server doesn't see any activity it + // pings the client to see if the transport is still alive. + // If set below 1s, a minimum value of 1s will be used instead. + Time time.Duration // The current default value is 2 hours. + // After having pinged for keepalive check, the server waits for a duration + // of Timeout and if no activity is seen even after that the connection is + // closed. + Timeout time.Duration // The current default value is 20 seconds. +} + +// EnforcementPolicy is used to set keepalive enforcement policy on the +// server-side. Server will close connection with a client that violates this +// policy. +type EnforcementPolicy struct { + // MinTime is the minimum amount of time a client should wait before sending + // a keepalive ping. + MinTime time.Duration // The current default value is 5 minutes. + // If true, server allows keepalive pings even when there are no active + // streams(RPCs). If false, and client sends ping when there are no active + // streams, server will send GOAWAY and close the connection. + PermitWithoutStream bool // false by default. +} diff --git a/vendor/google.golang.org/grpc/metadata/BUILD.bazel b/vendor/google.golang.org/grpc/metadata/BUILD.bazel new file mode 100644 index 0000000000000..fcb34f5b33f5d --- /dev/null +++ b/vendor/google.golang.org/grpc/metadata/BUILD.bazel @@ -0,0 +1,9 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["metadata.go"], + importmap = "k8s.io/kops/vendor/google.golang.org/grpc/metadata", + importpath = "google.golang.org/grpc/metadata", + visibility = ["//visibility:public"], +) diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go new file mode 100644 index 0000000000000..cf6d1b94781ca --- /dev/null +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -0,0 +1,209 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package metadata define the structure of the metadata supported by gRPC library. +// Please refer to https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md +// for more information about custom-metadata. +package metadata // import "google.golang.org/grpc/metadata" + +import ( + "context" + "fmt" + "strings" +) + +// DecodeKeyValue returns k, v, nil. +// +// Deprecated: use k and v directly instead. +func DecodeKeyValue(k, v string) (string, string, error) { + return k, v, nil +} + +// MD is a mapping from metadata keys to values. Users should use the following +// two convenience functions New and Pairs to generate MD. +type MD map[string][]string + +// New creates an MD from a given key-value map. +// +// Only the following ASCII characters are allowed in keys: +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// Uppercase letters are automatically converted to lowercase. +// +// Keys beginning with "grpc-" are reserved for grpc-internal use only and may +// result in errors if set in metadata. +func New(m map[string]string) MD { + md := MD{} + for k, val := range m { + key := strings.ToLower(k) + md[key] = append(md[key], val) + } + return md +} + +// Pairs returns an MD formed by the mapping of key, value ... +// Pairs panics if len(kv) is odd. +// +// Only the following ASCII characters are allowed in keys: +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// Uppercase letters are automatically converted to lowercase. +// +// Keys beginning with "grpc-" are reserved for grpc-internal use only and may +// result in errors if set in metadata. +func Pairs(kv ...string) MD { + if len(kv)%2 == 1 { + panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv))) + } + md := MD{} + var key string + for i, s := range kv { + if i%2 == 0 { + key = strings.ToLower(s) + continue + } + md[key] = append(md[key], s) + } + return md +} + +// Len returns the number of items in md. +func (md MD) Len() int { + return len(md) +} + +// Copy returns a copy of md. +func (md MD) Copy() MD { + return Join(md) +} + +// Get obtains the values for a given key. +func (md MD) Get(k string) []string { + k = strings.ToLower(k) + return md[k] +} + +// Set sets the value of a given key with a slice of values. +func (md MD) Set(k string, vals ...string) { + if len(vals) == 0 { + return + } + k = strings.ToLower(k) + md[k] = vals +} + +// Append adds the values to key k, not overwriting what was already stored at that key. +func (md MD) Append(k string, vals ...string) { + if len(vals) == 0 { + return + } + k = strings.ToLower(k) + md[k] = append(md[k], vals...) +} + +// Join joins any number of mds into a single MD. +// The order of values for each key is determined by the order in which +// the mds containing those values are presented to Join. +func Join(mds ...MD) MD { + out := MD{} + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return out +} + +type mdIncomingKey struct{} +type mdOutgoingKey struct{} + +// NewIncomingContext creates a new context with incoming md attached. +func NewIncomingContext(ctx context.Context, md MD) context.Context { + return context.WithValue(ctx, mdIncomingKey{}, md) +} + +// NewOutgoingContext creates a new context with outgoing md attached. If used +// in conjunction with AppendToOutgoingContext, NewOutgoingContext will +// overwrite any previously-appended metadata. +func NewOutgoingContext(ctx context.Context, md MD) context.Context { + return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md}) +} + +// AppendToOutgoingContext returns a new context with the provided kv merged +// with any existing metadata in the context. Please refer to the +// documentation of Pairs for a description of kv. +func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context { + if len(kv)%2 == 1 { + panic(fmt.Sprintf("metadata: AppendToOutgoingContext got an odd number of input pairs for metadata: %d", len(kv))) + } + md, _ := ctx.Value(mdOutgoingKey{}).(rawMD) + added := make([][]string, len(md.added)+1) + copy(added, md.added) + added[len(added)-1] = make([]string, len(kv)) + copy(added[len(added)-1], kv) + return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added}) +} + +// FromIncomingContext returns the incoming metadata in ctx if it exists. The +// returned MD should not be modified. Writing to it may cause races. +// Modification should be made to copies of the returned MD. +func FromIncomingContext(ctx context.Context) (md MD, ok bool) { + md, ok = ctx.Value(mdIncomingKey{}).(MD) + return +} + +// FromOutgoingContextRaw returns the un-merged, intermediary contents +// of rawMD. Remember to perform strings.ToLower on the keys. The returned +// MD should not be modified. Writing to it may cause races. Modification +// should be made to copies of the returned MD. +// +// This is intended for gRPC-internal use ONLY. +func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { + raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) + if !ok { + return nil, nil, false + } + + return raw.md, raw.added, true +} + +// FromOutgoingContext returns the outgoing metadata in ctx if it exists. The +// returned MD should not be modified. Writing to it may cause races. +// Modification should be made to copies of the returned MD. +func FromOutgoingContext(ctx context.Context) (MD, bool) { + raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) + if !ok { + return nil, false + } + + mds := make([]MD, 0, len(raw.added)+1) + mds = append(mds, raw.md) + for _, vv := range raw.added { + mds = append(mds, Pairs(vv...)) + } + return Join(mds...), ok +} + +type rawMD struct { + md MD + added [][]string +} diff --git a/vendor/google.golang.org/grpc/naming/BUILD.bazel b/vendor/google.golang.org/grpc/naming/BUILD.bazel new file mode 100644 index 0000000000000..98c380f36934c --- /dev/null +++ b/vendor/google.golang.org/grpc/naming/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "dns_resolver.go", + "naming.go", + ], + importmap = "k8s.io/kops/vendor/google.golang.org/grpc/naming", + importpath = "google.golang.org/grpc/naming", + visibility = ["//visibility:public"], + deps = ["//vendor/google.golang.org/grpc/grpclog:go_default_library"], +) diff --git a/vendor/google.golang.org/grpc/naming/dns_resolver.go b/vendor/google.golang.org/grpc/naming/dns_resolver.go new file mode 100644 index 0000000000000..c9f79dc533624 --- /dev/null +++ b/vendor/google.golang.org/grpc/naming/dns_resolver.go @@ -0,0 +1,293 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package naming + +import ( + "context" + "errors" + "fmt" + "net" + "strconv" + "time" + + "google.golang.org/grpc/grpclog" +) + +const ( + defaultPort = "443" + defaultFreq = time.Minute * 30 +) + +var ( + errMissingAddr = errors.New("missing address") + errWatcherClose = errors.New("watcher has been closed") + + lookupHost = net.DefaultResolver.LookupHost + lookupSRV = net.DefaultResolver.LookupSRV +) + +// NewDNSResolverWithFreq creates a DNS Resolver that can resolve DNS names, and +// create watchers that poll the DNS server using the frequency set by freq. +func NewDNSResolverWithFreq(freq time.Duration) (Resolver, error) { + return &dnsResolver{freq: freq}, nil +} + +// NewDNSResolver creates a DNS Resolver that can resolve DNS names, and create +// watchers that poll the DNS server using the default frequency defined by defaultFreq. +func NewDNSResolver() (Resolver, error) { + return NewDNSResolverWithFreq(defaultFreq) +} + +// dnsResolver handles name resolution for names following the DNS scheme +type dnsResolver struct { + // frequency of polling the DNS server that the watchers created by this resolver will use. + freq time.Duration +} + +// formatIP returns ok = false if addr is not a valid textual representation of an IP address. +// If addr is an IPv4 address, return the addr and ok = true. +// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. +func formatIP(addr string) (addrIP string, ok bool) { + ip := net.ParseIP(addr) + if ip == nil { + return "", false + } + if ip.To4() != nil { + return addr, true + } + return "[" + addr + "]", true +} + +// parseTarget takes the user input target string, returns formatted host and port info. +// If target doesn't specify a port, set the port to be the defaultPort. +// If target is in IPv6 format and host-name is enclosed in square brackets, brackets +// are stripped when setting the host. +// examples: +// target: "www.google.com" returns host: "www.google.com", port: "443" +// target: "ipv4-host:80" returns host: "ipv4-host", port: "80" +// target: "[ipv6-host]" returns host: "ipv6-host", port: "443" +// target: ":80" returns host: "localhost", port: "80" +// target: ":" returns host: "localhost", port: "443" +func parseTarget(target string) (host, port string, err error) { + if target == "" { + return "", "", errMissingAddr + } + + if ip := net.ParseIP(target); ip != nil { + // target is an IPv4 or IPv6(without brackets) address + return target, defaultPort, nil + } + if host, port, err := net.SplitHostPort(target); err == nil { + // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port + if host == "" { + // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. + host = "localhost" + } + if port == "" { + // If the port field is empty(target ends with colon), e.g. "[::1]:", defaultPort is used. + port = defaultPort + } + return host, port, nil + } + if host, port, err := net.SplitHostPort(target + ":" + defaultPort); err == nil { + // target doesn't have port + return host, port, nil + } + return "", "", fmt.Errorf("invalid target address %v", target) +} + +// Resolve creates a watcher that watches the name resolution of the target. +func (r *dnsResolver) Resolve(target string) (Watcher, error) { + host, port, err := parseTarget(target) + if err != nil { + return nil, err + } + + if net.ParseIP(host) != nil { + ipWatcher := &ipWatcher{ + updateChan: make(chan *Update, 1), + } + host, _ = formatIP(host) + ipWatcher.updateChan <- &Update{Op: Add, Addr: host + ":" + port} + return ipWatcher, nil + } + + ctx, cancel := context.WithCancel(context.Background()) + return &dnsWatcher{ + r: r, + host: host, + port: port, + ctx: ctx, + cancel: cancel, + t: time.NewTimer(0), + }, nil +} + +// dnsWatcher watches for the name resolution update for a specific target +type dnsWatcher struct { + r *dnsResolver + host string + port string + // The latest resolved address set + curAddrs map[string]*Update + ctx context.Context + cancel context.CancelFunc + t *time.Timer +} + +// ipWatcher watches for the name resolution update for an IP address. +type ipWatcher struct { + updateChan chan *Update +} + +// Next returns the address resolution Update for the target. For IP address, +// the resolution is itself, thus polling name server is unnecessary. Therefore, +// Next() will return an Update the first time it is called, and will be blocked +// for all following calls as no Update exists until watcher is closed. +func (i *ipWatcher) Next() ([]*Update, error) { + u, ok := <-i.updateChan + if !ok { + return nil, errWatcherClose + } + return []*Update{u}, nil +} + +// Close closes the ipWatcher. +func (i *ipWatcher) Close() { + close(i.updateChan) +} + +// AddressType indicates the address type returned by name resolution. +type AddressType uint8 + +const ( + // Backend indicates the server is a backend server. + Backend AddressType = iota + // GRPCLB indicates the server is a grpclb load balancer. + GRPCLB +) + +// AddrMetadataGRPCLB contains the information the name resolver for grpclb should provide. The +// name resolver used by the grpclb balancer is required to provide this type of metadata in +// its address updates. +type AddrMetadataGRPCLB struct { + // AddrType is the type of server (grpc load balancer or backend). + AddrType AddressType + // ServerName is the name of the grpc load balancer. Used for authentication. + ServerName string +} + +// compileUpdate compares the old resolved addresses and newly resolved addresses, +// and generates an update list +func (w *dnsWatcher) compileUpdate(newAddrs map[string]*Update) []*Update { + var res []*Update + for a, u := range w.curAddrs { + if _, ok := newAddrs[a]; !ok { + u.Op = Delete + res = append(res, u) + } + } + for a, u := range newAddrs { + if _, ok := w.curAddrs[a]; !ok { + res = append(res, u) + } + } + return res +} + +func (w *dnsWatcher) lookupSRV() map[string]*Update { + newAddrs := make(map[string]*Update) + _, srvs, err := lookupSRV(w.ctx, "grpclb", "tcp", w.host) + if err != nil { + grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err) + return nil + } + for _, s := range srvs { + lbAddrs, err := lookupHost(w.ctx, s.Target) + if err != nil { + grpclog.Warningf("grpc: failed load balancer address dns lookup due to %v.\n", err) + continue + } + for _, a := range lbAddrs { + a, ok := formatIP(a) + if !ok { + grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) + continue + } + addr := a + ":" + strconv.Itoa(int(s.Port)) + newAddrs[addr] = &Update{Addr: addr, + Metadata: AddrMetadataGRPCLB{AddrType: GRPCLB, ServerName: s.Target}} + } + } + return newAddrs +} + +func (w *dnsWatcher) lookupHost() map[string]*Update { + newAddrs := make(map[string]*Update) + addrs, err := lookupHost(w.ctx, w.host) + if err != nil { + grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err) + return nil + } + for _, a := range addrs { + a, ok := formatIP(a) + if !ok { + grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) + continue + } + addr := a + ":" + w.port + newAddrs[addr] = &Update{Addr: addr} + } + return newAddrs +} + +func (w *dnsWatcher) lookup() []*Update { + newAddrs := w.lookupSRV() + if newAddrs == nil { + // If failed to get any balancer address (either no corresponding SRV for the + // target, or caused by failure during resolution/parsing of the balancer target), + // return any A record info available. + newAddrs = w.lookupHost() + } + result := w.compileUpdate(newAddrs) + w.curAddrs = newAddrs + return result +} + +// Next returns the resolved address update(delta) for the target. If there's no +// change, it will sleep for 30 mins and try to resolve again after that. +func (w *dnsWatcher) Next() ([]*Update, error) { + for { + select { + case <-w.ctx.Done(): + return nil, errWatcherClose + case <-w.t.C: + } + result := w.lookup() + // Next lookup should happen after an interval defined by w.r.freq. + w.t.Reset(w.r.freq) + if len(result) > 0 { + return result, nil + } + } +} + +func (w *dnsWatcher) Close() { + w.cancel() +} diff --git a/vendor/google.golang.org/grpc/naming/naming.go b/vendor/google.golang.org/grpc/naming/naming.go new file mode 100644 index 0000000000000..f4c1c8b689474 --- /dev/null +++ b/vendor/google.golang.org/grpc/naming/naming.go @@ -0,0 +1,68 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package naming defines the naming API and related data structures for gRPC. +// +// This package is deprecated: please use package resolver instead. +package naming + +// Operation defines the corresponding operations for a name resolution change. +// +// Deprecated: please use package resolver. +type Operation uint8 + +const ( + // Add indicates a new address is added. + Add Operation = iota + // Delete indicates an existing address is deleted. + Delete +) + +// Update defines a name resolution update. Notice that it is not valid having both +// empty string Addr and nil Metadata in an Update. +// +// Deprecated: please use package resolver. +type Update struct { + // Op indicates the operation of the update. + Op Operation + // Addr is the updated address. It is empty string if there is no address update. + Addr string + // Metadata is the updated metadata. It is nil if there is no metadata update. + // Metadata is not required for a custom naming implementation. + Metadata interface{} +} + +// Resolver creates a Watcher for a target to track its resolution changes. +// +// Deprecated: please use package resolver. +type Resolver interface { + // Resolve creates a Watcher for target. + Resolve(target string) (Watcher, error) +} + +// Watcher watches for the updates on the specified target. +// +// Deprecated: please use package resolver. +type Watcher interface { + // Next blocks until an update or error happens. It may return one or more + // updates. The first call should get the full set of the results. It should + // return an error if and only if Watcher cannot recover. + Next() ([]*Update, error) + // Close closes the Watcher. + Close() +} diff --git a/vendor/google.golang.org/grpc/peer/BUILD.bazel b/vendor/google.golang.org/grpc/peer/BUILD.bazel new file mode 100644 index 0000000000000..97a07eb951eeb --- /dev/null +++ b/vendor/google.golang.org/grpc/peer/BUILD.bazel @@ -0,0 +1,10 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["peer.go"], + importmap = "k8s.io/kops/vendor/google.golang.org/grpc/peer", + importpath = "google.golang.org/grpc/peer", + visibility = ["//visibility:public"], + deps = ["//vendor/google.golang.org/grpc/credentials:go_default_library"], +) diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go new file mode 100644 index 0000000000000..e01d219ffbc58 --- /dev/null +++ b/vendor/google.golang.org/grpc/peer/peer.go @@ -0,0 +1,51 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package peer defines various peer information associated with RPCs and +// corresponding utils. +package peer + +import ( + "context" + "net" + + "google.golang.org/grpc/credentials" +) + +// Peer contains the information of the peer for an RPC, such as the address +// and authentication information. +type Peer struct { + // Addr is the peer address. + Addr net.Addr + // AuthInfo is the authentication information of the transport. + // It is nil if there is no transport security being used. + AuthInfo credentials.AuthInfo +} + +type peerKey struct{} + +// NewContext creates a new context with peer information attached. +func NewContext(ctx context.Context, p *Peer) context.Context { + return context.WithValue(ctx, peerKey{}, p) +} + +// FromContext returns the peer information in ctx if it exists. +func FromContext(ctx context.Context) (p *Peer, ok bool) { + p, ok = ctx.Value(peerKey{}).(*Peer) + return +} diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go new file mode 100644 index 0000000000000..45baa2ae13da9 --- /dev/null +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -0,0 +1,197 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "io" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/status" +) + +// pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick +// actions and unblock when there's a picker update. +type pickerWrapper struct { + mu sync.Mutex + done bool + blockingCh chan struct{} + picker balancer.Picker + + // The latest connection happened. + connErrMu sync.Mutex + connErr error +} + +func newPickerWrapper() *pickerWrapper { + bp := &pickerWrapper{blockingCh: make(chan struct{})} + return bp +} + +func (bp *pickerWrapper) updateConnectionError(err error) { + bp.connErrMu.Lock() + bp.connErr = err + bp.connErrMu.Unlock() +} + +func (bp *pickerWrapper) connectionError() error { + bp.connErrMu.Lock() + err := bp.connErr + bp.connErrMu.Unlock() + return err +} + +// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. +func (bp *pickerWrapper) updatePicker(p balancer.Picker) { + bp.mu.Lock() + if bp.done { + bp.mu.Unlock() + return + } + bp.picker = p + // bp.blockingCh should never be nil. + close(bp.blockingCh) + bp.blockingCh = make(chan struct{}) + bp.mu.Unlock() +} + +func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) func(balancer.DoneInfo) { + acw.mu.Lock() + ac := acw.ac + acw.mu.Unlock() + ac.incrCallsStarted() + return func(b balancer.DoneInfo) { + if b.Err != nil && b.Err != io.EOF { + ac.incrCallsFailed() + } else { + ac.incrCallsSucceeded() + } + if done != nil { + done(b) + } + } +} + +// pick returns the transport that will be used for the RPC. +// It may block in the following cases: +// - there's no picker +// - the current picker returns ErrNoSubConnAvailable +// - the current picker returns other errors and failfast is false. +// - the subConn returned by the current picker is not READY +// When one of these situations happens, pick blocks until the picker gets updated. +func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.PickOptions) (transport.ClientTransport, func(balancer.DoneInfo), error) { + var ch chan struct{} + + for { + bp.mu.Lock() + if bp.done { + bp.mu.Unlock() + return nil, nil, ErrClientConnClosing + } + + if bp.picker == nil { + ch = bp.blockingCh + } + if ch == bp.blockingCh { + // This could happen when either: + // - bp.picker is nil (the previous if condition), or + // - has called pick on the current picker. + bp.mu.Unlock() + select { + case <-ctx.Done(): + if connectionErr := bp.connectionError(); connectionErr != nil { + switch ctx.Err() { + case context.DeadlineExceeded: + return nil, nil, status.Errorf(codes.DeadlineExceeded, "latest connection error: %v", connectionErr) + case context.Canceled: + return nil, nil, status.Errorf(codes.Canceled, "latest connection error: %v", connectionErr) + } + } + return nil, nil, ctx.Err() + case <-ch: + } + continue + } + + ch = bp.blockingCh + p := bp.picker + bp.mu.Unlock() + + subConn, done, err := p.Pick(ctx, opts) + + if err != nil { + switch err { + case balancer.ErrNoSubConnAvailable: + continue + case balancer.ErrTransientFailure: + if !failfast { + continue + } + return nil, nil, status.Errorf(codes.Unavailable, "%v, latest connection error: %v", err, bp.connectionError()) + case context.DeadlineExceeded: + return nil, nil, status.Error(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return nil, nil, status.Error(codes.Canceled, err.Error()) + default: + if _, ok := status.FromError(err); ok { + return nil, nil, err + } + // err is some other error. + return nil, nil, status.Error(codes.Unknown, err.Error()) + } + } + + acw, ok := subConn.(*acBalancerWrapper) + if !ok { + grpclog.Error("subconn returned from pick is not *acBalancerWrapper") + continue + } + if t, ok := acw.getAddrConn().getReadyTransport(); ok { + if channelz.IsOn() { + return t, doneChannelzWrapper(acw, done), nil + } + return t, done, nil + } + if done != nil { + // Calling done with nil error, no bytes sent and no bytes received. + // DoneInfo with default value works. + done(balancer.DoneInfo{}) + } + grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick") + // If ok == false, ac.state is not READY. + // A valid picker always returns READY subConn. This means the state of ac + // just changed, and picker will be updated shortly. + // continue back to the beginning of the for loop to repick. + } +} + +func (bp *pickerWrapper) close() { + bp.mu.Lock() + defer bp.mu.Unlock() + if bp.done { + return + } + bp.done = true + close(bp.blockingCh) +} diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go new file mode 100644 index 0000000000000..ed05b02ed96e0 --- /dev/null +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -0,0 +1,118 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +// PickFirstBalancerName is the name of the pick_first balancer. +const PickFirstBalancerName = "pick_first" + +func newPickfirstBuilder() balancer.Builder { + return &pickfirstBuilder{} +} + +type pickfirstBuilder struct{} + +func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { + return &pickfirstBalancer{cc: cc} +} + +func (*pickfirstBuilder) Name() string { + return PickFirstBalancerName +} + +type pickfirstBalancer struct { + cc balancer.ClientConn + sc balancer.SubConn +} + +func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { + if err != nil { + if grpclog.V(2) { + grpclog.Infof("pickfirstBalancer: HandleResolvedAddrs called with error %v", err) + } + return + } + if b.sc == nil { + b.sc, err = b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{}) + if err != nil { + //TODO(yuxuanli): why not change the cc state to Idle? + if grpclog.V(2) { + grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) + } + return + } + b.cc.UpdateBalancerState(connectivity.Idle, &picker{sc: b.sc}) + b.sc.Connect() + } else { + b.sc.UpdateAddresses(addrs) + b.sc.Connect() + } +} + +func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + if grpclog.V(2) { + grpclog.Infof("pickfirstBalancer: HandleSubConnStateChange: %p, %v", sc, s) + } + if b.sc != sc { + if grpclog.V(2) { + grpclog.Infof("pickfirstBalancer: ignored state change because sc is not recognized") + } + return + } + if s == connectivity.Shutdown { + b.sc = nil + return + } + + switch s { + case connectivity.Ready, connectivity.Idle: + b.cc.UpdateBalancerState(s, &picker{sc: sc}) + case connectivity.Connecting: + b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrNoSubConnAvailable}) + case connectivity.TransientFailure: + b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrTransientFailure}) + } +} + +func (b *pickfirstBalancer) Close() { +} + +type picker struct { + err error + sc balancer.SubConn +} + +func (p *picker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { + if p.err != nil { + return nil, nil, p.err + } + return p.sc, nil, nil +} + +func init() { + balancer.Register(newPickfirstBuilder()) +} diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go new file mode 100644 index 0000000000000..76acbbcc93b91 --- /dev/null +++ b/vendor/google.golang.org/grpc/preloader.go @@ -0,0 +1,64 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// PreparedMsg is responsible for creating a Marshalled and Compressed object. +// +// This API is EXPERIMENTAL. +type PreparedMsg struct { + // Struct for preparing msg before sending them + encodedData []byte + hdr []byte + payload []byte +} + +// Encode marshalls and compresses the message using the codec and compressor for the stream. +func (p *PreparedMsg) Encode(s Stream, msg interface{}) error { + ctx := s.Context() + rpcInfo, ok := rpcInfoFromContext(ctx) + if !ok { + return status.Errorf(codes.Internal, "grpc: unable to get rpcInfo") + } + + // check if the context has the relevant information to prepareMsg + if rpcInfo.preloaderInfo == nil { + return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo is nil") + } + if rpcInfo.preloaderInfo.codec == nil { + return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo.codec is nil") + } + + // prepare the msg + data, err := encode(rpcInfo.preloaderInfo.codec, msg) + if err != nil { + return err + } + p.encodedData = data + compData, err := compress(data, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp) + if err != nil { + return err + } + p.hdr, p.payload = msgHeader(data, compData) + return nil +} diff --git a/vendor/google.golang.org/grpc/proxy.go b/vendor/google.golang.org/grpc/proxy.go new file mode 100644 index 0000000000000..f8f69bfb70fd9 --- /dev/null +++ b/vendor/google.golang.org/grpc/proxy.go @@ -0,0 +1,152 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "bufio" + "context" + "encoding/base64" + "errors" + "fmt" + "io" + "net" + "net/http" + "net/http/httputil" + "net/url" +) + +const proxyAuthHeaderKey = "Proxy-Authorization" + +var ( + // errDisabled indicates that proxy is disabled for the address. + errDisabled = errors.New("proxy is disabled for the address") + // The following variable will be overwritten in the tests. + httpProxyFromEnvironment = http.ProxyFromEnvironment +) + +func mapAddress(ctx context.Context, address string) (*url.URL, error) { + req := &http.Request{ + URL: &url.URL{ + Scheme: "https", + Host: address, + }, + } + url, err := httpProxyFromEnvironment(req) + if err != nil { + return nil, err + } + if url == nil { + return nil, errDisabled + } + return url, nil +} + +// To read a response from a net.Conn, http.ReadResponse() takes a bufio.Reader. +// It's possible that this reader reads more than what's need for the response and stores +// those bytes in the buffer. +// bufConn wraps the original net.Conn and the bufio.Reader to make sure we don't lose the +// bytes in the buffer. +type bufConn struct { + net.Conn + r io.Reader +} + +func (c *bufConn) Read(b []byte) (int, error) { + return c.r.Read(b) +} + +func basicAuth(username, password string) string { + auth := username + ":" + password + return base64.StdEncoding.EncodeToString([]byte(auth)) +} + +func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr string, proxyURL *url.URL) (_ net.Conn, err error) { + defer func() { + if err != nil { + conn.Close() + } + }() + + req := &http.Request{ + Method: http.MethodConnect, + URL: &url.URL{Host: backendAddr}, + Header: map[string][]string{"User-Agent": {grpcUA}}, + } + if t := proxyURL.User; t != nil { + u := t.Username() + p, _ := t.Password() + req.Header.Add(proxyAuthHeaderKey, "Basic "+basicAuth(u, p)) + } + + if err := sendHTTPRequest(ctx, req, conn); err != nil { + return nil, fmt.Errorf("failed to write the HTTP request: %v", err) + } + + r := bufio.NewReader(conn) + resp, err := http.ReadResponse(r, req) + if err != nil { + return nil, fmt.Errorf("reading server HTTP response: %v", err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + dump, err := httputil.DumpResponse(resp, true) + if err != nil { + return nil, fmt.Errorf("failed to do connect handshake, status code: %s", resp.Status) + } + return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump) + } + + return &bufConn{Conn: conn, r: r}, nil +} + +// newProxyDialer returns a dialer that connects to proxy first if necessary. +// The returned dialer checks if a proxy is necessary, dial to the proxy with the +// provided dialer, does HTTP CONNECT handshake and returns the connection. +func newProxyDialer(dialer func(context.Context, string) (net.Conn, error)) func(context.Context, string) (net.Conn, error) { + return func(ctx context.Context, addr string) (conn net.Conn, err error) { + var newAddr string + proxyURL, err := mapAddress(ctx, addr) + if err != nil { + if err != errDisabled { + return nil, err + } + newAddr = addr + } else { + newAddr = proxyURL.Host + } + + conn, err = dialer(ctx, newAddr) + if err != nil { + return + } + if proxyURL != nil { + // proxy is disabled if proxyURL is nil. + conn, err = doHTTPConnectHandshake(ctx, conn, addr, proxyURL) + } + return + } +} + +func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { + req = req.WithContext(ctx) + if err := req.Write(conn); err != nil { + return fmt.Errorf("failed to write the HTTP request: %v", err) + } + return nil +} diff --git a/vendor/google.golang.org/grpc/resolver/BUILD.bazel b/vendor/google.golang.org/grpc/resolver/BUILD.bazel new file mode 100644 index 0000000000000..bcf1e756ea6ca --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/BUILD.bazel @@ -0,0 +1,10 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["resolver.go"], + importmap = "k8s.io/kops/vendor/google.golang.org/grpc/resolver", + importpath = "google.golang.org/grpc/resolver", + visibility = ["//visibility:public"], + deps = ["//vendor/google.golang.org/grpc/serviceconfig:go_default_library"], +) diff --git a/vendor/google.golang.org/grpc/resolver/dns/BUILD.bazel b/vendor/google.golang.org/grpc/resolver/dns/BUILD.bazel new file mode 100644 index 0000000000000..6e8fc98cb432c --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/dns/BUILD.bazel @@ -0,0 +1,15 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["dns_resolver.go"], + importmap = "k8s.io/kops/vendor/google.golang.org/grpc/resolver/dns", + importpath = "google.golang.org/grpc/resolver/dns", + visibility = ["//visibility:public"], + deps = [ + "//vendor/google.golang.org/grpc/grpclog:go_default_library", + "//vendor/google.golang.org/grpc/internal/backoff:go_default_library", + "//vendor/google.golang.org/grpc/internal/grpcrand:go_default_library", + "//vendor/google.golang.org/grpc/resolver:go_default_library", + ], +) diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go new file mode 100644 index 0000000000000..297492e87af45 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go @@ -0,0 +1,457 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package dns implements a dns resolver to be installed as the default resolver +// in grpc. +package dns + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net" + "os" + "strconv" + "strings" + "sync" + "time" + + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/resolver" +) + +func init() { + resolver.Register(NewBuilder()) +} + +const ( + defaultPort = "443" + defaultFreq = time.Minute * 30 + defaultDNSSvrPort = "53" + golang = "GO" + // txtPrefix is the prefix string to be prepended to the host name for txt record lookup. + txtPrefix = "_grpc_config." + // In DNS, service config is encoded in a TXT record via the mechanism + // described in RFC-1464 using the attribute name grpc_config. + txtAttribute = "grpc_config=" +) + +var ( + errMissingAddr = errors.New("dns resolver: missing address") + + // Addresses ending with a colon that is supposed to be the separator + // between host and port is not allowed. E.g. "::" is a valid address as + // it is an IPv6 address (host only) and "[::]:" is invalid as it ends with + // a colon as the host and port separator + errEndsWithColon = errors.New("dns resolver: missing port after port-separator colon") +) + +var ( + defaultResolver netResolver = net.DefaultResolver + // To prevent excessive re-resolution, we enforce a rate limit on DNS + // resolution requests. + minDNSResRate = 30 * time.Second +) + +var customAuthorityDialler = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) { + return func(ctx context.Context, network, address string) (net.Conn, error) { + var dialer net.Dialer + return dialer.DialContext(ctx, network, authority) + } +} + +var customAuthorityResolver = func(authority string) (netResolver, error) { + host, port, err := parseTarget(authority, defaultDNSSvrPort) + if err != nil { + return nil, err + } + + authorityWithPort := net.JoinHostPort(host, port) + + return &net.Resolver{ + PreferGo: true, + Dial: customAuthorityDialler(authorityWithPort), + }, nil +} + +// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers. +func NewBuilder() resolver.Builder { + return &dnsBuilder{minFreq: defaultFreq} +} + +type dnsBuilder struct { + // minimum frequency of polling the DNS server. + minFreq time.Duration +} + +// Build creates and starts a DNS resolver that watches the name resolution of the target. +func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) { + host, port, err := parseTarget(target.Endpoint, defaultPort) + if err != nil { + return nil, err + } + + // IP address. + if net.ParseIP(host) != nil { + host, _ = formatIP(host) + addr := []resolver.Address{{Addr: host + ":" + port}} + i := &ipResolver{ + cc: cc, + ip: addr, + rn: make(chan struct{}, 1), + q: make(chan struct{}), + } + cc.NewAddress(addr) + go i.watcher() + return i, nil + } + + // DNS address (non-IP). + ctx, cancel := context.WithCancel(context.Background()) + d := &dnsResolver{ + freq: b.minFreq, + backoff: backoff.Exponential{MaxDelay: b.minFreq}, + host: host, + port: port, + ctx: ctx, + cancel: cancel, + cc: cc, + t: time.NewTimer(0), + rn: make(chan struct{}, 1), + disableServiceConfig: opts.DisableServiceConfig, + } + + if target.Authority == "" { + d.resolver = defaultResolver + } else { + d.resolver, err = customAuthorityResolver(target.Authority) + if err != nil { + return nil, err + } + } + + d.wg.Add(1) + go d.watcher() + return d, nil +} + +// Scheme returns the naming scheme of this resolver builder, which is "dns". +func (b *dnsBuilder) Scheme() string { + return "dns" +} + +type netResolver interface { + LookupHost(ctx context.Context, host string) (addrs []string, err error) + LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error) + LookupTXT(ctx context.Context, name string) (txts []string, err error) +} + +// ipResolver watches for the name resolution update for an IP address. +type ipResolver struct { + cc resolver.ClientConn + ip []resolver.Address + // rn channel is used by ResolveNow() to force an immediate resolution of the target. + rn chan struct{} + q chan struct{} +} + +// ResolveNow resend the address it stores, no resolution is needed. +func (i *ipResolver) ResolveNow(opt resolver.ResolveNowOption) { + select { + case i.rn <- struct{}{}: + default: + } +} + +// Close closes the ipResolver. +func (i *ipResolver) Close() { + close(i.q) +} + +func (i *ipResolver) watcher() { + for { + select { + case <-i.rn: + i.cc.NewAddress(i.ip) + case <-i.q: + return + } + } +} + +// dnsResolver watches for the name resolution update for a non-IP target. +type dnsResolver struct { + freq time.Duration + backoff backoff.Exponential + retryCount int + host string + port string + resolver netResolver + ctx context.Context + cancel context.CancelFunc + cc resolver.ClientConn + // rn channel is used by ResolveNow() to force an immediate resolution of the target. + rn chan struct{} + t *time.Timer + // wg is used to enforce Close() to return after the watcher() goroutine has finished. + // Otherwise, data race will be possible. [Race Example] in dns_resolver_test we + // replace the real lookup functions with mocked ones to facilitate testing. + // If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes + // will warns lookup (READ the lookup function pointers) inside watcher() goroutine + // has data race with replaceNetFunc (WRITE the lookup function pointers). + wg sync.WaitGroup + disableServiceConfig bool +} + +// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches. +func (d *dnsResolver) ResolveNow(opt resolver.ResolveNowOption) { + select { + case d.rn <- struct{}{}: + default: + } +} + +// Close closes the dnsResolver. +func (d *dnsResolver) Close() { + d.cancel() + d.wg.Wait() + d.t.Stop() +} + +func (d *dnsResolver) watcher() { + defer d.wg.Done() + for { + select { + case <-d.ctx.Done(): + return + case <-d.t.C: + case <-d.rn: + if !d.t.Stop() { + // Before resetting a timer, it should be stopped to prevent racing with + // reads on it's channel. + <-d.t.C + } + } + + result, sc := d.lookup() + // Next lookup should happen within an interval defined by d.freq. It may be + // more often due to exponential retry on empty address list. + if len(result) == 0 { + d.retryCount++ + d.t.Reset(d.backoff.Backoff(d.retryCount)) + } else { + d.retryCount = 0 + d.t.Reset(d.freq) + } + d.cc.NewServiceConfig(sc) + d.cc.NewAddress(result) + + // Sleep to prevent excessive re-resolutions. Incoming resolution requests + // will be queued in d.rn. + t := time.NewTimer(minDNSResRate) + select { + case <-t.C: + case <-d.ctx.Done(): + t.Stop() + return + } + } +} + +func (d *dnsResolver) lookupSRV() []resolver.Address { + var newAddrs []resolver.Address + _, srvs, err := d.resolver.LookupSRV(d.ctx, "grpclb", "tcp", d.host) + if err != nil { + grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err) + return nil + } + for _, s := range srvs { + lbAddrs, err := d.resolver.LookupHost(d.ctx, s.Target) + if err != nil { + grpclog.Infof("grpc: failed load balancer address dns lookup due to %v.\n", err) + continue + } + for _, a := range lbAddrs { + a, ok := formatIP(a) + if !ok { + grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) + continue + } + addr := a + ":" + strconv.Itoa(int(s.Port)) + newAddrs = append(newAddrs, resolver.Address{Addr: addr, Type: resolver.GRPCLB, ServerName: s.Target}) + } + } + return newAddrs +} + +func (d *dnsResolver) lookupTXT() string { + ss, err := d.resolver.LookupTXT(d.ctx, txtPrefix+d.host) + if err != nil { + grpclog.Infof("grpc: failed dns TXT record lookup due to %v.\n", err) + return "" + } + var res string + for _, s := range ss { + res += s + } + + // TXT record must have "grpc_config=" attribute in order to be used as service config. + if !strings.HasPrefix(res, txtAttribute) { + grpclog.Warningf("grpc: TXT record %v missing %v attribute", res, txtAttribute) + return "" + } + return strings.TrimPrefix(res, txtAttribute) +} + +func (d *dnsResolver) lookupHost() []resolver.Address { + var newAddrs []resolver.Address + addrs, err := d.resolver.LookupHost(d.ctx, d.host) + if err != nil { + grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err) + return nil + } + for _, a := range addrs { + a, ok := formatIP(a) + if !ok { + grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) + continue + } + addr := a + ":" + d.port + newAddrs = append(newAddrs, resolver.Address{Addr: addr}) + } + return newAddrs +} + +func (d *dnsResolver) lookup() ([]resolver.Address, string) { + newAddrs := d.lookupSRV() + // Support fallback to non-balancer address. + newAddrs = append(newAddrs, d.lookupHost()...) + if d.disableServiceConfig { + return newAddrs, "" + } + sc := d.lookupTXT() + return newAddrs, canaryingSC(sc) +} + +// formatIP returns ok = false if addr is not a valid textual representation of an IP address. +// If addr is an IPv4 address, return the addr and ok = true. +// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. +func formatIP(addr string) (addrIP string, ok bool) { + ip := net.ParseIP(addr) + if ip == nil { + return "", false + } + if ip.To4() != nil { + return addr, true + } + return "[" + addr + "]", true +} + +// parseTarget takes the user input target string and default port, returns formatted host and port info. +// If target doesn't specify a port, set the port to be the defaultPort. +// If target is in IPv6 format and host-name is enclosed in square brackets, brackets +// are stripped when setting the host. +// examples: +// target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443" +// target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80" +// target: "[ipv6-host]" defaultPort: "443" returns host: "ipv6-host", port: "443" +// target: ":80" defaultPort: "443" returns host: "localhost", port: "80" +func parseTarget(target, defaultPort string) (host, port string, err error) { + if target == "" { + return "", "", errMissingAddr + } + if ip := net.ParseIP(target); ip != nil { + // target is an IPv4 or IPv6(without brackets) address + return target, defaultPort, nil + } + if host, port, err = net.SplitHostPort(target); err == nil { + if port == "" { + // If the port field is empty (target ends with colon), e.g. "[::1]:", this is an error. + return "", "", errEndsWithColon + } + // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port + if host == "" { + // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. + host = "localhost" + } + return host, port, nil + } + if host, port, err = net.SplitHostPort(target + ":" + defaultPort); err == nil { + // target doesn't have port + return host, port, nil + } + return "", "", fmt.Errorf("invalid target address %v, error info: %v", target, err) +} + +type rawChoice struct { + ClientLanguage *[]string `json:"clientLanguage,omitempty"` + Percentage *int `json:"percentage,omitempty"` + ClientHostName *[]string `json:"clientHostName,omitempty"` + ServiceConfig *json.RawMessage `json:"serviceConfig,omitempty"` +} + +func containsString(a *[]string, b string) bool { + if a == nil { + return true + } + for _, c := range *a { + if c == b { + return true + } + } + return false +} + +func chosenByPercentage(a *int) bool { + if a == nil { + return true + } + return grpcrand.Intn(100)+1 <= *a +} + +func canaryingSC(js string) string { + if js == "" { + return "" + } + var rcs []rawChoice + err := json.Unmarshal([]byte(js), &rcs) + if err != nil { + grpclog.Warningf("grpc: failed to parse service config json string due to %v.\n", err) + return "" + } + cliHostname, err := os.Hostname() + if err != nil { + grpclog.Warningf("grpc: failed to get client hostname due to %v.\n", err) + return "" + } + var sc string + for _, c := range rcs { + if !containsString(c.ClientLanguage, golang) || + !chosenByPercentage(c.Percentage) || + !containsString(c.ClientHostName, cliHostname) || + c.ServiceConfig == nil { + continue + } + sc = string(*c.ServiceConfig) + break + } + return sc +} diff --git a/vendor/google.golang.org/grpc/resolver/passthrough/BUILD.bazel b/vendor/google.golang.org/grpc/resolver/passthrough/BUILD.bazel new file mode 100644 index 0000000000000..b730fe07df8b1 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/passthrough/BUILD.bazel @@ -0,0 +1,10 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["passthrough.go"], + importmap = "k8s.io/kops/vendor/google.golang.org/grpc/resolver/passthrough", + importpath = "google.golang.org/grpc/resolver/passthrough", + visibility = ["//visibility:public"], + deps = ["//vendor/google.golang.org/grpc/resolver:go_default_library"], +) diff --git a/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go new file mode 100644 index 0000000000000..893d5d12cb0cf --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go @@ -0,0 +1,57 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package passthrough implements a pass-through resolver. It sends the target +// name without scheme back to gRPC as resolved address. +package passthrough + +import "google.golang.org/grpc/resolver" + +const scheme = "passthrough" + +type passthroughBuilder struct{} + +func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) { + r := &passthroughResolver{ + target: target, + cc: cc, + } + r.start() + return r, nil +} + +func (*passthroughBuilder) Scheme() string { + return scheme +} + +type passthroughResolver struct { + target resolver.Target + cc resolver.ClientConn +} + +func (r *passthroughResolver) start() { + r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint}}}) +} + +func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOption) {} + +func (*passthroughResolver) Close() {} + +func init() { + resolver.Register(&passthroughBuilder{}) +} diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go new file mode 100644 index 0000000000000..e83da346a5cde --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -0,0 +1,193 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package resolver defines APIs for name resolution in gRPC. +// All APIs in this package are experimental. +package resolver + +import ( + "google.golang.org/grpc/serviceconfig" +) + +var ( + // m is a map from scheme to resolver builder. + m = make(map[string]Builder) + // defaultScheme is the default scheme to use. + defaultScheme = "passthrough" +) + +// TODO(bar) install dns resolver in init(){}. + +// Register registers the resolver builder to the resolver map. b.Scheme will be +// used as the scheme registered with this builder. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Resolvers are +// registered with the same name, the one registered last will take effect. +func Register(b Builder) { + m[b.Scheme()] = b +} + +// Get returns the resolver builder registered with the given scheme. +// +// If no builder is register with the scheme, nil will be returned. +func Get(scheme string) Builder { + if b, ok := m[scheme]; ok { + return b + } + return nil +} + +// SetDefaultScheme sets the default scheme that will be used. The default +// default scheme is "passthrough". +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. The scheme set last overrides +// previously set values. +func SetDefaultScheme(scheme string) { + defaultScheme = scheme +} + +// GetDefaultScheme gets the default scheme that will be used. +func GetDefaultScheme() string { + return defaultScheme +} + +// AddressType indicates the address type returned by name resolution. +type AddressType uint8 + +const ( + // Backend indicates the address is for a backend server. + Backend AddressType = iota + // GRPCLB indicates the address is for a grpclb load balancer. + GRPCLB +) + +// Address represents a server the client connects to. +// This is the EXPERIMENTAL API and may be changed or extended in the future. +type Address struct { + // Addr is the server address on which a connection will be established. + Addr string + // Type is the type of this address. + Type AddressType + // ServerName is the name of this address. + // + // e.g. if Type is GRPCLB, ServerName should be the name of the remote load + // balancer, not the name of the backend. + ServerName string + // Metadata is the information associated with Addr, which may be used + // to make load balancing decision. + Metadata interface{} +} + +// BuildOption includes additional information for the builder to create +// the resolver. +type BuildOption struct { + // DisableServiceConfig indicates whether resolver should fetch service config data. + DisableServiceConfig bool +} + +// State contains the current Resolver state relevant to the ClientConn. +type State struct { + Addresses []Address // Resolved addresses for the target + // ServiceConfig is the parsed service config; obtained from + // serviceconfig.Parse. + ServiceConfig serviceconfig.Config + + // TODO: add Err error +} + +// ClientConn contains the callbacks for resolver to notify any updates +// to the gRPC ClientConn. +// +// This interface is to be implemented by gRPC. Users should not need a +// brand new implementation of this interface. For the situations like +// testing, the new implementation should embed this interface. This allows +// gRPC to add new methods to this interface. +type ClientConn interface { + // UpdateState updates the state of the ClientConn appropriately. + UpdateState(State) + // NewAddress is called by resolver to notify ClientConn a new list + // of resolved addresses. + // The address list should be the complete list of resolved addresses. + // + // Deprecated: Use UpdateState instead. + NewAddress(addresses []Address) + // NewServiceConfig is called by resolver to notify ClientConn a new + // service config. The service config should be provided as a json string. + // + // Deprecated: Use UpdateState instead. + NewServiceConfig(serviceConfig string) +} + +// Target represents a target for gRPC, as specified in: +// https://github.com/grpc/grpc/blob/master/doc/naming.md. +// It is parsed from the target string that gets passed into Dial or DialContext by the user. And +// grpc passes it to the resolver and the balancer. +// +// If the target follows the naming spec, and the parsed scheme is registered with grpc, we will +// parse the target string according to the spec. e.g. "dns://some_authority/foo.bar" will be parsed +// into &Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} +// +// If the target does not contain a scheme, we will apply the default scheme, and set the Target to +// be the full target string. e.g. "foo.bar" will be parsed into +// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"}. +// +// If the parsed scheme is not registered (i.e. no corresponding resolver available to resolve the +// endpoint), we set the Scheme to be the default scheme, and set the Endpoint to be the full target +// string. e.g. target string "unknown_scheme://authority/endpoint" will be parsed into +// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"}. +type Target struct { + Scheme string + Authority string + Endpoint string +} + +// Builder creates a resolver that will be used to watch name resolution updates. +type Builder interface { + // Build creates a new resolver for the given target. + // + // gRPC dial calls Build synchronously, and fails if the returned error is + // not nil. + Build(target Target, cc ClientConn, opts BuildOption) (Resolver, error) + // Scheme returns the scheme supported by this resolver. + // Scheme is defined at https://github.com/grpc/grpc/blob/master/doc/naming.md. + Scheme() string +} + +// ResolveNowOption includes additional information for ResolveNow. +type ResolveNowOption struct{} + +// Resolver watches for the updates on the specified target. +// Updates include address updates and service config updates. +type Resolver interface { + // ResolveNow will be called by gRPC to try to resolve the target name + // again. It's just a hint, resolver can ignore this if it's not necessary. + // + // It could be called multiple times concurrently. + ResolveNow(ResolveNowOption) + // Close closes the resolver. + Close() +} + +// UnregisterForTesting removes the resolver builder with the given scheme from the +// resolver map. +// This function is for testing only. +func UnregisterForTesting(scheme string) { + delete(m, scheme) +} diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go new file mode 100644 index 0000000000000..6934905b0f6cb --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -0,0 +1,168 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "fmt" + "strings" + "sync/atomic" + + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/resolver" +) + +// ccResolverWrapper is a wrapper on top of cc for resolvers. +// It implements resolver.ClientConnection interface. +type ccResolverWrapper struct { + cc *ClientConn + resolver resolver.Resolver + addrCh chan []resolver.Address + scCh chan string + done uint32 // accessed atomically; set to 1 when closed. + curState resolver.State +} + +// split2 returns the values from strings.SplitN(s, sep, 2). +// If sep is not found, it returns ("", "", false) instead. +func split2(s, sep string) (string, string, bool) { + spl := strings.SplitN(s, sep, 2) + if len(spl) < 2 { + return "", "", false + } + return spl[0], spl[1], true +} + +// parseTarget splits target into a struct containing scheme, authority and +// endpoint. +// +// If target is not a valid scheme://authority/endpoint, it returns {Endpoint: +// target}. +func parseTarget(target string) (ret resolver.Target) { + var ok bool + ret.Scheme, ret.Endpoint, ok = split2(target, "://") + if !ok { + return resolver.Target{Endpoint: target} + } + ret.Authority, ret.Endpoint, ok = split2(ret.Endpoint, "/") + if !ok { + return resolver.Target{Endpoint: target} + } + return ret +} + +// newCCResolverWrapper parses cc.target for scheme and gets the resolver +// builder for this scheme and builds the resolver. The monitoring goroutine +// for it is not started yet and can be created by calling start(). +// +// If withResolverBuilder dial option is set, the specified resolver will be +// used instead. +func newCCResolverWrapper(cc *ClientConn) (*ccResolverWrapper, error) { + rb := cc.dopts.resolverBuilder + if rb == nil { + return nil, fmt.Errorf("could not get resolver for scheme: %q", cc.parsedTarget.Scheme) + } + + ccr := &ccResolverWrapper{ + cc: cc, + addrCh: make(chan []resolver.Address, 1), + scCh: make(chan string, 1), + } + + var err error + ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, resolver.BuildOption{DisableServiceConfig: cc.dopts.disableServiceConfig}) + if err != nil { + return nil, err + } + return ccr, nil +} + +func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOption) { + ccr.resolver.ResolveNow(o) +} + +func (ccr *ccResolverWrapper) close() { + ccr.resolver.Close() + atomic.StoreUint32(&ccr.done, 1) +} + +func (ccr *ccResolverWrapper) isDone() bool { + return atomic.LoadUint32(&ccr.done) == 1 +} + +func (ccr *ccResolverWrapper) UpdateState(s resolver.State) { + if ccr.isDone() { + return + } + grpclog.Infof("ccResolverWrapper: sending update to cc: %v", s) + if channelz.IsOn() { + ccr.addChannelzTraceEvent(s) + } + ccr.cc.updateResolverState(s) + ccr.curState = s +} + +// NewAddress is called by the resolver implementation to send addresses to gRPC. +func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { + if ccr.isDone() { + return + } + grpclog.Infof("ccResolverWrapper: sending new addresses to cc: %v", addrs) + if channelz.IsOn() { + ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) + } + ccr.curState.Addresses = addrs + ccr.cc.updateResolverState(ccr.curState) +} + +// NewServiceConfig is called by the resolver implementation to send service +// configs to gRPC. +func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { + if ccr.isDone() { + return + } + grpclog.Infof("ccResolverWrapper: got new service config: %v", sc) + c, err := parseServiceConfig(sc) + if err != nil { + return + } + if channelz.IsOn() { + ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: c}) + } + ccr.curState.ServiceConfig = c + ccr.cc.updateResolverState(ccr.curState) +} + +func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { + var updates []string + oldSC, oldOK := ccr.curState.ServiceConfig.(*ServiceConfig) + newSC, newOK := s.ServiceConfig.(*ServiceConfig) + if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) { + updates = append(updates, "service config updated") + } + if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 { + updates = append(updates, "resolver returned an empty address list") + } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { + updates = append(updates, "resolver returned new addresses") + } + channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Resolver state updated: %+v (%v)", s, strings.Join(updates, "; ")), + Severity: channelz.CtINFO, + }) +} diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go new file mode 100644 index 0000000000000..088c3f1b25289 --- /dev/null +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -0,0 +1,863 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "bytes" + "compress/gzip" + "context" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "math" + "net/url" + "strings" + "sync" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/encoding" + "google.golang.org/grpc/encoding/proto" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// Compressor defines the interface gRPC uses to compress a message. +// +// Deprecated: use package encoding. +type Compressor interface { + // Do compresses p into w. + Do(w io.Writer, p []byte) error + // Type returns the compression algorithm the Compressor uses. + Type() string +} + +type gzipCompressor struct { + pool sync.Pool +} + +// NewGZIPCompressor creates a Compressor based on GZIP. +// +// Deprecated: use package encoding/gzip. +func NewGZIPCompressor() Compressor { + c, _ := NewGZIPCompressorWithLevel(gzip.DefaultCompression) + return c +} + +// NewGZIPCompressorWithLevel is like NewGZIPCompressor but specifies the gzip compression level instead +// of assuming DefaultCompression. +// +// The error returned will be nil if the level is valid. +// +// Deprecated: use package encoding/gzip. +func NewGZIPCompressorWithLevel(level int) (Compressor, error) { + if level < gzip.DefaultCompression || level > gzip.BestCompression { + return nil, fmt.Errorf("grpc: invalid compression level: %d", level) + } + return &gzipCompressor{ + pool: sync.Pool{ + New: func() interface{} { + w, err := gzip.NewWriterLevel(ioutil.Discard, level) + if err != nil { + panic(err) + } + return w + }, + }, + }, nil +} + +func (c *gzipCompressor) Do(w io.Writer, p []byte) error { + z := c.pool.Get().(*gzip.Writer) + defer c.pool.Put(z) + z.Reset(w) + if _, err := z.Write(p); err != nil { + return err + } + return z.Close() +} + +func (c *gzipCompressor) Type() string { + return "gzip" +} + +// Decompressor defines the interface gRPC uses to decompress a message. +// +// Deprecated: use package encoding. +type Decompressor interface { + // Do reads the data from r and uncompress them. + Do(r io.Reader) ([]byte, error) + // Type returns the compression algorithm the Decompressor uses. + Type() string +} + +type gzipDecompressor struct { + pool sync.Pool +} + +// NewGZIPDecompressor creates a Decompressor based on GZIP. +// +// Deprecated: use package encoding/gzip. +func NewGZIPDecompressor() Decompressor { + return &gzipDecompressor{} +} + +func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) { + var z *gzip.Reader + switch maybeZ := d.pool.Get().(type) { + case nil: + newZ, err := gzip.NewReader(r) + if err != nil { + return nil, err + } + z = newZ + case *gzip.Reader: + z = maybeZ + if err := z.Reset(r); err != nil { + d.pool.Put(z) + return nil, err + } + } + + defer func() { + z.Close() + d.pool.Put(z) + }() + return ioutil.ReadAll(z) +} + +func (d *gzipDecompressor) Type() string { + return "gzip" +} + +// callInfo contains all related configuration and information about an RPC. +type callInfo struct { + compressorType string + failFast bool + stream ClientStream + maxReceiveMessageSize *int + maxSendMessageSize *int + creds credentials.PerRPCCredentials + contentSubtype string + codec baseCodec + maxRetryRPCBufferSize int +} + +func defaultCallInfo() *callInfo { + return &callInfo{ + failFast: true, + maxRetryRPCBufferSize: 256 * 1024, // 256KB + } +} + +// CallOption configures a Call before it starts or extracts information from +// a Call after it completes. +type CallOption interface { + // before is called before the call is sent to any server. If before + // returns a non-nil error, the RPC fails with that error. + before(*callInfo) error + + // after is called after the call has completed. after cannot return an + // error, so any failures should be reported via output parameters. + after(*callInfo) +} + +// EmptyCallOption does not alter the Call configuration. +// It can be embedded in another structure to carry satellite data for use +// by interceptors. +type EmptyCallOption struct{} + +func (EmptyCallOption) before(*callInfo) error { return nil } +func (EmptyCallOption) after(*callInfo) {} + +// Header returns a CallOptions that retrieves the header metadata +// for a unary RPC. +func Header(md *metadata.MD) CallOption { + return HeaderCallOption{HeaderAddr: md} +} + +// HeaderCallOption is a CallOption for collecting response header metadata. +// The metadata field will be populated *after* the RPC completes. +// This is an EXPERIMENTAL API. +type HeaderCallOption struct { + HeaderAddr *metadata.MD +} + +func (o HeaderCallOption) before(c *callInfo) error { return nil } +func (o HeaderCallOption) after(c *callInfo) { + if c.stream != nil { + *o.HeaderAddr, _ = c.stream.Header() + } +} + +// Trailer returns a CallOptions that retrieves the trailer metadata +// for a unary RPC. +func Trailer(md *metadata.MD) CallOption { + return TrailerCallOption{TrailerAddr: md} +} + +// TrailerCallOption is a CallOption for collecting response trailer metadata. +// The metadata field will be populated *after* the RPC completes. +// This is an EXPERIMENTAL API. +type TrailerCallOption struct { + TrailerAddr *metadata.MD +} + +func (o TrailerCallOption) before(c *callInfo) error { return nil } +func (o TrailerCallOption) after(c *callInfo) { + if c.stream != nil { + *o.TrailerAddr = c.stream.Trailer() + } +} + +// Peer returns a CallOption that retrieves peer information for a unary RPC. +// The peer field will be populated *after* the RPC completes. +func Peer(p *peer.Peer) CallOption { + return PeerCallOption{PeerAddr: p} +} + +// PeerCallOption is a CallOption for collecting the identity of the remote +// peer. The peer field will be populated *after* the RPC completes. +// This is an EXPERIMENTAL API. +type PeerCallOption struct { + PeerAddr *peer.Peer +} + +func (o PeerCallOption) before(c *callInfo) error { return nil } +func (o PeerCallOption) after(c *callInfo) { + if c.stream != nil { + if x, ok := peer.FromContext(c.stream.Context()); ok { + *o.PeerAddr = *x + } + } +} + +// WaitForReady configures the action to take when an RPC is attempted on broken +// connections or unreachable servers. If waitForReady is false, the RPC will fail +// immediately. Otherwise, the RPC client will block the call until a +// connection is available (or the call is canceled or times out) and will +// retry the call if it fails due to a transient error. gRPC will not retry if +// data was written to the wire unless the server indicates it did not process +// the data. Please refer to +// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md. +// +// By default, RPCs don't "wait for ready". +func WaitForReady(waitForReady bool) CallOption { + return FailFastCallOption{FailFast: !waitForReady} +} + +// FailFast is the opposite of WaitForReady. +// +// Deprecated: use WaitForReady. +func FailFast(failFast bool) CallOption { + return FailFastCallOption{FailFast: failFast} +} + +// FailFastCallOption is a CallOption for indicating whether an RPC should fail +// fast or not. +// This is an EXPERIMENTAL API. +type FailFastCallOption struct { + FailFast bool +} + +func (o FailFastCallOption) before(c *callInfo) error { + c.failFast = o.FailFast + return nil +} +func (o FailFastCallOption) after(c *callInfo) {} + +// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size the client can receive. +func MaxCallRecvMsgSize(s int) CallOption { + return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: s} +} + +// MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message +// size the client can receive. +// This is an EXPERIMENTAL API. +type MaxRecvMsgSizeCallOption struct { + MaxRecvMsgSize int +} + +func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error { + c.maxReceiveMessageSize = &o.MaxRecvMsgSize + return nil +} +func (o MaxRecvMsgSizeCallOption) after(c *callInfo) {} + +// MaxCallSendMsgSize returns a CallOption which sets the maximum message size the client can send. +func MaxCallSendMsgSize(s int) CallOption { + return MaxSendMsgSizeCallOption{MaxSendMsgSize: s} +} + +// MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message +// size the client can send. +// This is an EXPERIMENTAL API. +type MaxSendMsgSizeCallOption struct { + MaxSendMsgSize int +} + +func (o MaxSendMsgSizeCallOption) before(c *callInfo) error { + c.maxSendMessageSize = &o.MaxSendMsgSize + return nil +} +func (o MaxSendMsgSizeCallOption) after(c *callInfo) {} + +// PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials +// for a call. +func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption { + return PerRPCCredsCallOption{Creds: creds} +} + +// PerRPCCredsCallOption is a CallOption that indicates the per-RPC +// credentials to use for the call. +// This is an EXPERIMENTAL API. +type PerRPCCredsCallOption struct { + Creds credentials.PerRPCCredentials +} + +func (o PerRPCCredsCallOption) before(c *callInfo) error { + c.creds = o.Creds + return nil +} +func (o PerRPCCredsCallOption) after(c *callInfo) {} + +// UseCompressor returns a CallOption which sets the compressor used when +// sending the request. If WithCompressor is also set, UseCompressor has +// higher priority. +// +// This API is EXPERIMENTAL. +func UseCompressor(name string) CallOption { + return CompressorCallOption{CompressorType: name} +} + +// CompressorCallOption is a CallOption that indicates the compressor to use. +// This is an EXPERIMENTAL API. +type CompressorCallOption struct { + CompressorType string +} + +func (o CompressorCallOption) before(c *callInfo) error { + c.compressorType = o.CompressorType + return nil +} +func (o CompressorCallOption) after(c *callInfo) {} + +// CallContentSubtype returns a CallOption that will set the content-subtype +// for a call. For example, if content-subtype is "json", the Content-Type over +// the wire will be "application/grpc+json". The content-subtype is converted +// to lowercase before being included in Content-Type. See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// If ForceCodec is not also used, the content-subtype will be used to look up +// the Codec to use in the registry controlled by RegisterCodec. See the +// documentation on RegisterCodec for details on registration. The lookup of +// content-subtype is case-insensitive. If no such Codec is found, the call +// will result in an error with code codes.Internal. +// +// If ForceCodec is also used, that Codec will be used for all request and +// response messages, with the content-subtype set to the given contentSubtype +// here for requests. +func CallContentSubtype(contentSubtype string) CallOption { + return ContentSubtypeCallOption{ContentSubtype: strings.ToLower(contentSubtype)} +} + +// ContentSubtypeCallOption is a CallOption that indicates the content-subtype +// used for marshaling messages. +// This is an EXPERIMENTAL API. +type ContentSubtypeCallOption struct { + ContentSubtype string +} + +func (o ContentSubtypeCallOption) before(c *callInfo) error { + c.contentSubtype = o.ContentSubtype + return nil +} +func (o ContentSubtypeCallOption) after(c *callInfo) {} + +// ForceCodec returns a CallOption that will set the given Codec to be +// used for all request and response messages for a call. The result of calling +// String() will be used as the content-subtype in a case-insensitive manner. +// +// See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. Also see the documentation on RegisterCodec and +// CallContentSubtype for more details on the interaction between Codec and +// content-subtype. +// +// This function is provided for advanced users; prefer to use only +// CallContentSubtype to select a registered codec instead. +// +// This is an EXPERIMENTAL API. +func ForceCodec(codec encoding.Codec) CallOption { + return ForceCodecCallOption{Codec: codec} +} + +// ForceCodecCallOption is a CallOption that indicates the codec used for +// marshaling messages. +// +// This is an EXPERIMENTAL API. +type ForceCodecCallOption struct { + Codec encoding.Codec +} + +func (o ForceCodecCallOption) before(c *callInfo) error { + c.codec = o.Codec + return nil +} +func (o ForceCodecCallOption) after(c *callInfo) {} + +// CallCustomCodec behaves like ForceCodec, but accepts a grpc.Codec instead of +// an encoding.Codec. +// +// Deprecated: use ForceCodec instead. +func CallCustomCodec(codec Codec) CallOption { + return CustomCodecCallOption{Codec: codec} +} + +// CustomCodecCallOption is a CallOption that indicates the codec used for +// marshaling messages. +// +// This is an EXPERIMENTAL API. +type CustomCodecCallOption struct { + Codec Codec +} + +func (o CustomCodecCallOption) before(c *callInfo) error { + c.codec = o.Codec + return nil +} +func (o CustomCodecCallOption) after(c *callInfo) {} + +// MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory +// used for buffering this RPC's requests for retry purposes. +// +// This API is EXPERIMENTAL. +func MaxRetryRPCBufferSize(bytes int) CallOption { + return MaxRetryRPCBufferSizeCallOption{bytes} +} + +// MaxRetryRPCBufferSizeCallOption is a CallOption indicating the amount of +// memory to be used for caching this RPC for retry purposes. +// This is an EXPERIMENTAL API. +type MaxRetryRPCBufferSizeCallOption struct { + MaxRetryRPCBufferSize int +} + +func (o MaxRetryRPCBufferSizeCallOption) before(c *callInfo) error { + c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize + return nil +} +func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo) {} + +// The format of the payload: compressed or not? +type payloadFormat uint8 + +const ( + compressionNone payloadFormat = 0 // no compression + compressionMade payloadFormat = 1 // compressed +) + +// parser reads complete gRPC messages from the underlying reader. +type parser struct { + // r is the underlying reader. + // See the comment on recvMsg for the permissible + // error types. + r io.Reader + + // The header of a gRPC message. Find more detail at + // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md + header [5]byte +} + +// recvMsg reads a complete gRPC message from the stream. +// +// It returns the message and its payload (compression/encoding) +// format. The caller owns the returned msg memory. +// +// If there is an error, possible values are: +// * io.EOF, when no messages remain +// * io.ErrUnexpectedEOF +// * of type transport.ConnectionError +// * an error from the status package +// No other error values or types must be returned, which also means +// that the underlying io.Reader must not return an incompatible +// error. +func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byte, err error) { + if _, err := p.r.Read(p.header[:]); err != nil { + return 0, nil, err + } + + pf = payloadFormat(p.header[0]) + length := binary.BigEndian.Uint32(p.header[1:]) + + if length == 0 { + return pf, nil, nil + } + if int64(length) > int64(maxInt) { + return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max length allowed on current machine (%d vs. %d)", length, maxInt) + } + if int(length) > maxReceiveMessageSize { + return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) + } + // TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead + // of making it for each message: + msg = make([]byte, int(length)) + if _, err := p.r.Read(msg); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return 0, nil, err + } + return pf, msg, nil +} + +// encode serializes msg and returns a buffer containing the message, or an +// error if it is too large to be transmitted by grpc. If msg is nil, it +// generates an empty message. +func encode(c baseCodec, msg interface{}) ([]byte, error) { + if msg == nil { // NOTE: typed nils will not be caught by this check + return nil, nil + } + b, err := c.Marshal(msg) + if err != nil { + return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error()) + } + if uint(len(b)) > math.MaxUint32 { + return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b)) + } + return b, nil +} + +// compress returns the input bytes compressed by compressor or cp. If both +// compressors are nil, returns nil. +// +// TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor. +func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) { + if compressor == nil && cp == nil { + return nil, nil + } + wrapErr := func(err error) error { + return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) + } + cbuf := &bytes.Buffer{} + if compressor != nil { + z, err := compressor.Compress(cbuf) + if err != nil { + return nil, wrapErr(err) + } + if _, err := z.Write(in); err != nil { + return nil, wrapErr(err) + } + if err := z.Close(); err != nil { + return nil, wrapErr(err) + } + } else { + if err := cp.Do(cbuf, in); err != nil { + return nil, wrapErr(err) + } + } + return cbuf.Bytes(), nil +} + +const ( + payloadLen = 1 + sizeLen = 4 + headerLen = payloadLen + sizeLen +) + +// msgHeader returns a 5-byte header for the message being transmitted and the +// payload, which is compData if non-nil or data otherwise. +func msgHeader(data, compData []byte) (hdr []byte, payload []byte) { + hdr = make([]byte, headerLen) + if compData != nil { + hdr[0] = byte(compressionMade) + data = compData + } else { + hdr[0] = byte(compressionNone) + } + + // Write length of payload into buf + binary.BigEndian.PutUint32(hdr[payloadLen:], uint32(len(data))) + return hdr, data +} + +func outPayload(client bool, msg interface{}, data, payload []byte, t time.Time) *stats.OutPayload { + return &stats.OutPayload{ + Client: client, + Payload: msg, + Data: data, + Length: len(data), + WireLength: len(payload) + headerLen, + SentTime: t, + } +} + +func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool) *status.Status { + switch pf { + case compressionNone: + case compressionMade: + if recvCompress == "" || recvCompress == encoding.Identity { + return status.New(codes.Internal, "grpc: compressed flag set with identity or empty encoding") + } + if !haveCompressor { + return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + } + default: + return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf) + } + return nil +} + +type payloadInfo struct { + wireLength int // The compressed length got from wire. + uncompressedBytes []byte +} + +func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) ([]byte, error) { + pf, d, err := p.recvMsg(maxReceiveMessageSize) + if err != nil { + return nil, err + } + if payInfo != nil { + payInfo.wireLength = len(d) + } + + if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { + return nil, st.Err() + } + + if pf == compressionMade { + // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, + // use this decompressor as the default. + if dc != nil { + d, err = dc.Do(bytes.NewReader(d)) + if err != nil { + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) + } + } else { + dcReader, err := compressor.Decompress(bytes.NewReader(d)) + if err != nil { + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) + } + // Read from LimitReader with limit max+1. So if the underlying + // reader is over limit, the result will be bigger than max. + d, err = ioutil.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + if err != nil { + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) + } + } + } + if len(d) > maxReceiveMessageSize { + // TODO: Revisit the error code. Currently keep it consistent with java + // implementation. + return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize) + } + return d, nil +} + +// For the two compressor parameters, both should not be set, but if they are, +// dc takes precedence over compressor. +// TODO(dfawley): wrap the old compressor/decompressor using the new API? +func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { + d, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) + if err != nil { + return err + } + if err := c.Unmarshal(d, m); err != nil { + return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err) + } + if payInfo != nil { + payInfo.uncompressedBytes = d + } + return nil +} + +// Information about RPC +type rpcInfo struct { + failfast bool + preloaderInfo *compressorInfo +} + +// Information about Preloader +// Responsible for storing codec, and compressors +// If stream (s) has context s.Context which stores rpcInfo that has non nil +// pointers to codec, and compressors, then we can use preparedMsg for Async message prep +// and reuse marshalled bytes +type compressorInfo struct { + codec baseCodec + cp Compressor + comp encoding.Compressor +} + +type rpcInfoContextKey struct{} + +func newContextWithRPCInfo(ctx context.Context, failfast bool, codec baseCodec, cp Compressor, comp encoding.Compressor) context.Context { + return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{ + failfast: failfast, + preloaderInfo: &compressorInfo{ + codec: codec, + cp: cp, + comp: comp, + }, + }) +} + +func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) { + s, ok = ctx.Value(rpcInfoContextKey{}).(*rpcInfo) + return +} + +// Code returns the error code for err if it was produced by the rpc system. +// Otherwise, it returns codes.Unknown. +// +// Deprecated: use status.Code instead. +func Code(err error) codes.Code { + return status.Code(err) +} + +// ErrorDesc returns the error description of err if it was produced by the rpc system. +// Otherwise, it returns err.Error() or empty string when err is nil. +// +// Deprecated: use status.Convert and Message method instead. +func ErrorDesc(err error) string { + return status.Convert(err).Message() +} + +// Errorf returns an error containing an error code and a description; +// Errorf returns nil if c is OK. +// +// Deprecated: use status.Errorf instead. +func Errorf(c codes.Code, format string, a ...interface{}) error { + return status.Errorf(c, format, a...) +} + +// toRPCErr converts an error into an error from the status package. +func toRPCErr(err error) error { + if err == nil || err == io.EOF { + return err + } + if err == io.ErrUnexpectedEOF { + return status.Error(codes.Internal, err.Error()) + } + if _, ok := status.FromError(err); ok { + return err + } + switch e := err.(type) { + case transport.ConnectionError: + return status.Error(codes.Unavailable, e.Desc) + default: + switch err { + case context.DeadlineExceeded: + return status.Error(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return status.Error(codes.Canceled, err.Error()) + } + } + return status.Error(codes.Unknown, err.Error()) +} + +// setCallInfoCodec should only be called after CallOptions have been applied. +func setCallInfoCodec(c *callInfo) error { + if c.codec != nil { + // codec was already set by a CallOption; use it. + return nil + } + + if c.contentSubtype == "" { + // No codec specified in CallOptions; use proto by default. + c.codec = encoding.GetCodec(proto.Name) + return nil + } + + // c.contentSubtype is already lowercased in CallContentSubtype + c.codec = encoding.GetCodec(c.contentSubtype) + if c.codec == nil { + return status.Errorf(codes.Internal, "no codec registered for content-subtype %s", c.contentSubtype) + } + return nil +} + +// parseDialTarget returns the network and address to pass to dialer +func parseDialTarget(target string) (net string, addr string) { + net = "tcp" + + m1 := strings.Index(target, ":") + m2 := strings.Index(target, ":/") + + // handle unix:addr which will fail with url.Parse + if m1 >= 0 && m2 < 0 { + if n := target[0:m1]; n == "unix" { + net = n + addr = target[m1+1:] + return net, addr + } + } + if m2 >= 0 { + t, err := url.Parse(target) + if err != nil { + return net, target + } + scheme := t.Scheme + addr = t.Path + if scheme == "unix" { + net = scheme + if addr == "" { + addr = t.Host + } + return net, addr + } + } + + return net, target +} + +// channelzData is used to store channelz related data for ClientConn, addrConn and Server. +// These fields cannot be embedded in the original structs (e.g. ClientConn), since to do atomic +// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment. +// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment. +type channelzData struct { + callsStarted int64 + callsFailed int64 + callsSucceeded int64 + // lastCallStartedTime stores the timestamp that last call starts. It is of int64 type instead of + // time.Time since it's more costly to atomically update time.Time variable than int64 variable. + lastCallStartedTime int64 +} + +// The SupportPackageIsVersion variables are referenced from generated protocol +// buffer files to ensure compatibility with the gRPC version used. The latest +// support package version is 5. +// +// Older versions are kept for compatibility. They may be removed if +// compatibility cannot be maintained. +// +// These constants should not be referenced from any other code. +const ( + SupportPackageIsVersion3 = true + SupportPackageIsVersion4 = true + SupportPackageIsVersion5 = true +) + +const grpcUA = "grpc-go/" + Version diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go new file mode 100644 index 0000000000000..f064b73e555d4 --- /dev/null +++ b/vendor/google.golang.org/grpc/server.go @@ -0,0 +1,1510 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "errors" + "fmt" + "io" + "math" + "net" + "net/http" + "reflect" + "runtime" + "strings" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/trace" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/encoding" + "google.golang.org/grpc/encoding/proto" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/binarylog" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/tap" +) + +const ( + defaultServerMaxReceiveMessageSize = 1024 * 1024 * 4 + defaultServerMaxSendMessageSize = math.MaxInt32 +) + +var statusOK = status.New(codes.OK, "") + +type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error) + +// MethodDesc represents an RPC service's method specification. +type MethodDesc struct { + MethodName string + Handler methodHandler +} + +// ServiceDesc represents an RPC service's specification. +type ServiceDesc struct { + ServiceName string + // The pointer to the service interface. Used to check whether the user + // provided implementation satisfies the interface requirements. + HandlerType interface{} + Methods []MethodDesc + Streams []StreamDesc + Metadata interface{} +} + +// service consists of the information of the server serving this service and +// the methods in this service. +type service struct { + server interface{} // the server for service methods + md map[string]*MethodDesc + sd map[string]*StreamDesc + mdata interface{} +} + +// Server is a gRPC server to serve RPC requests. +type Server struct { + opts serverOptions + + mu sync.Mutex // guards following + lis map[net.Listener]bool + conns map[transport.ServerTransport]bool + serve bool + drain bool + cv *sync.Cond // signaled when connections close for GracefulStop + m map[string]*service // service name -> service info + events trace.EventLog + + quit *grpcsync.Event + done *grpcsync.Event + channelzRemoveOnce sync.Once + serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop + + channelzID int64 // channelz unique identification number + czData *channelzData +} + +type serverOptions struct { + creds credentials.TransportCredentials + codec baseCodec + cp Compressor + dc Decompressor + unaryInt UnaryServerInterceptor + streamInt StreamServerInterceptor + inTapHandle tap.ServerInHandle + statsHandler stats.Handler + maxConcurrentStreams uint32 + maxReceiveMessageSize int + maxSendMessageSize int + unknownStreamDesc *StreamDesc + keepaliveParams keepalive.ServerParameters + keepalivePolicy keepalive.EnforcementPolicy + initialWindowSize int32 + initialConnWindowSize int32 + writeBufferSize int + readBufferSize int + connectionTimeout time.Duration + maxHeaderListSize *uint32 +} + +var defaultServerOptions = serverOptions{ + maxReceiveMessageSize: defaultServerMaxReceiveMessageSize, + maxSendMessageSize: defaultServerMaxSendMessageSize, + connectionTimeout: 120 * time.Second, + writeBufferSize: defaultWriteBufSize, + readBufferSize: defaultReadBufSize, +} + +// A ServerOption sets options such as credentials, codec and keepalive parameters, etc. +type ServerOption interface { + apply(*serverOptions) +} + +// EmptyServerOption does not alter the server configuration. It can be embedded +// in another structure to build custom server options. +// +// This API is EXPERIMENTAL. +type EmptyServerOption struct{} + +func (EmptyServerOption) apply(*serverOptions) {} + +// funcServerOption wraps a function that modifies serverOptions into an +// implementation of the ServerOption interface. +type funcServerOption struct { + f func(*serverOptions) +} + +func (fdo *funcServerOption) apply(do *serverOptions) { + fdo.f(do) +} + +func newFuncServerOption(f func(*serverOptions)) *funcServerOption { + return &funcServerOption{ + f: f, + } +} + +// WriteBufferSize determines how much data can be batched before doing a write on the wire. +// The corresponding memory allocation for this buffer will be twice the size to keep syscalls low. +// The default value for this buffer is 32KB. +// Zero will disable the write buffer such that each write will be on underlying connection. +// Note: A Send call may not directly translate to a write. +func WriteBufferSize(s int) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.writeBufferSize = s + }) +} + +// ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most +// for one read syscall. +// The default value for this buffer is 32KB. +// Zero will disable read buffer for a connection so data framer can access the underlying +// conn directly. +func ReadBufferSize(s int) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.readBufferSize = s + }) +} + +// InitialWindowSize returns a ServerOption that sets window size for stream. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func InitialWindowSize(s int32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.initialWindowSize = s + }) +} + +// InitialConnWindowSize returns a ServerOption that sets window size for a connection. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func InitialConnWindowSize(s int32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.initialConnWindowSize = s + }) +} + +// KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server. +func KeepaliveParams(kp keepalive.ServerParameters) ServerOption { + if kp.Time > 0 && kp.Time < time.Second { + grpclog.Warning("Adjusting keepalive ping interval to minimum period of 1s") + kp.Time = time.Second + } + + return newFuncServerOption(func(o *serverOptions) { + o.keepaliveParams = kp + }) +} + +// KeepaliveEnforcementPolicy returns a ServerOption that sets keepalive enforcement policy for the server. +func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.keepalivePolicy = kep + }) +} + +// CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling. +// +// This will override any lookups by content-subtype for Codecs registered with RegisterCodec. +func CustomCodec(codec Codec) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.codec = codec + }) +} + +// RPCCompressor returns a ServerOption that sets a compressor for outbound +// messages. For backward compatibility, all outbound messages will be sent +// using this compressor, regardless of incoming message compression. By +// default, server messages will be sent using the same compressor with which +// request messages were sent. +// +// Deprecated: use encoding.RegisterCompressor instead. +func RPCCompressor(cp Compressor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.cp = cp + }) +} + +// RPCDecompressor returns a ServerOption that sets a decompressor for inbound +// messages. It has higher priority than decompressors registered via +// encoding.RegisterCompressor. +// +// Deprecated: use encoding.RegisterCompressor instead. +func RPCDecompressor(dc Decompressor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.dc = dc + }) +} + +// MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive. +// If this is not set, gRPC uses the default limit. +// +// Deprecated: use MaxRecvMsgSize instead. +func MaxMsgSize(m int) ServerOption { + return MaxRecvMsgSize(m) +} + +// MaxRecvMsgSize returns a ServerOption to set the max message size in bytes the server can receive. +// If this is not set, gRPC uses the default 4MB. +func MaxRecvMsgSize(m int) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.maxReceiveMessageSize = m + }) +} + +// MaxSendMsgSize returns a ServerOption to set the max message size in bytes the server can send. +// If this is not set, gRPC uses the default `math.MaxInt32`. +func MaxSendMsgSize(m int) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.maxSendMessageSize = m + }) +} + +// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number +// of concurrent streams to each ServerTransport. +func MaxConcurrentStreams(n uint32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.maxConcurrentStreams = n + }) +} + +// Creds returns a ServerOption that sets credentials for server connections. +func Creds(c credentials.TransportCredentials) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.creds = c + }) +} + +// UnaryInterceptor returns a ServerOption that sets the UnaryServerInterceptor for the +// server. Only one unary interceptor can be installed. The construction of multiple +// interceptors (e.g., chaining) can be implemented at the caller. +func UnaryInterceptor(i UnaryServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + if o.unaryInt != nil { + panic("The unary server interceptor was already set and may not be reset.") + } + o.unaryInt = i + }) +} + +// StreamInterceptor returns a ServerOption that sets the StreamServerInterceptor for the +// server. Only one stream interceptor can be installed. +func StreamInterceptor(i StreamServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + if o.streamInt != nil { + panic("The stream server interceptor was already set and may not be reset.") + } + o.streamInt = i + }) +} + +// InTapHandle returns a ServerOption that sets the tap handle for all the server +// transport to be created. Only one can be installed. +func InTapHandle(h tap.ServerInHandle) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + if o.inTapHandle != nil { + panic("The tap handle was already set and may not be reset.") + } + o.inTapHandle = h + }) +} + +// StatsHandler returns a ServerOption that sets the stats handler for the server. +func StatsHandler(h stats.Handler) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.statsHandler = h + }) +} + +// UnknownServiceHandler returns a ServerOption that allows for adding a custom +// unknown service handler. The provided method is a bidi-streaming RPC service +// handler that will be invoked instead of returning the "unimplemented" gRPC +// error whenever a request is received for an unregistered service or method. +// The handling function has full access to the Context of the request and the +// stream, and the invocation bypasses interceptors. +func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.unknownStreamDesc = &StreamDesc{ + StreamName: "unknown_service_handler", + Handler: streamHandler, + // We need to assume that the users of the streamHandler will want to use both. + ClientStreams: true, + ServerStreams: true, + } + }) +} + +// ConnectionTimeout returns a ServerOption that sets the timeout for +// connection establishment (up to and including HTTP/2 handshaking) for all +// new connections. If this is not set, the default is 120 seconds. A zero or +// negative value will result in an immediate timeout. +// +// This API is EXPERIMENTAL. +func ConnectionTimeout(d time.Duration) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.connectionTimeout = d + }) +} + +// MaxHeaderListSize returns a ServerOption that sets the max (uncompressed) size +// of header list that the server is prepared to accept. +func MaxHeaderListSize(s uint32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.maxHeaderListSize = &s + }) +} + +// NewServer creates a gRPC server which has no service registered and has not +// started to accept requests yet. +func NewServer(opt ...ServerOption) *Server { + opts := defaultServerOptions + for _, o := range opt { + o.apply(&opts) + } + s := &Server{ + lis: make(map[net.Listener]bool), + opts: opts, + conns: make(map[transport.ServerTransport]bool), + m: make(map[string]*service), + quit: grpcsync.NewEvent(), + done: grpcsync.NewEvent(), + czData: new(channelzData), + } + s.cv = sync.NewCond(&s.mu) + if EnableTracing { + _, file, line, _ := runtime.Caller(1) + s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line)) + } + + if channelz.IsOn() { + s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") + } + return s +} + +// printf records an event in s's event log, unless s has been stopped. +// REQUIRES s.mu is held. +func (s *Server) printf(format string, a ...interface{}) { + if s.events != nil { + s.events.Printf(format, a...) + } +} + +// errorf records an error in s's event log, unless s has been stopped. +// REQUIRES s.mu is held. +func (s *Server) errorf(format string, a ...interface{}) { + if s.events != nil { + s.events.Errorf(format, a...) + } +} + +// RegisterService registers a service and its implementation to the gRPC +// server. It is called from the IDL generated code. This must be called before +// invoking Serve. +func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { + ht := reflect.TypeOf(sd.HandlerType).Elem() + st := reflect.TypeOf(ss) + if !st.Implements(ht) { + grpclog.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht) + } + s.register(sd, ss) +} + +func (s *Server) register(sd *ServiceDesc, ss interface{}) { + s.mu.Lock() + defer s.mu.Unlock() + s.printf("RegisterService(%q)", sd.ServiceName) + if s.serve { + grpclog.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName) + } + if _, ok := s.m[sd.ServiceName]; ok { + grpclog.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName) + } + srv := &service{ + server: ss, + md: make(map[string]*MethodDesc), + sd: make(map[string]*StreamDesc), + mdata: sd.Metadata, + } + for i := range sd.Methods { + d := &sd.Methods[i] + srv.md[d.MethodName] = d + } + for i := range sd.Streams { + d := &sd.Streams[i] + srv.sd[d.StreamName] = d + } + s.m[sd.ServiceName] = srv +} + +// MethodInfo contains the information of an RPC including its method name and type. +type MethodInfo struct { + // Name is the method name only, without the service name or package name. + Name string + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool +} + +// ServiceInfo contains unary RPC method info, streaming RPC method info and metadata for a service. +type ServiceInfo struct { + Methods []MethodInfo + // Metadata is the metadata specified in ServiceDesc when registering service. + Metadata interface{} +} + +// GetServiceInfo returns a map from service names to ServiceInfo. +// Service names include the package names, in the form of .. +func (s *Server) GetServiceInfo() map[string]ServiceInfo { + ret := make(map[string]ServiceInfo) + for n, srv := range s.m { + methods := make([]MethodInfo, 0, len(srv.md)+len(srv.sd)) + for m := range srv.md { + methods = append(methods, MethodInfo{ + Name: m, + IsClientStream: false, + IsServerStream: false, + }) + } + for m, d := range srv.sd { + methods = append(methods, MethodInfo{ + Name: m, + IsClientStream: d.ClientStreams, + IsServerStream: d.ServerStreams, + }) + } + + ret[n] = ServiceInfo{ + Methods: methods, + Metadata: srv.mdata, + } + } + return ret +} + +// ErrServerStopped indicates that the operation is now illegal because of +// the server being stopped. +var ErrServerStopped = errors.New("grpc: the server has been stopped") + +func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + if s.opts.creds == nil { + return rawConn, nil, nil + } + return s.opts.creds.ServerHandshake(rawConn) +} + +type listenSocket struct { + net.Listener + channelzID int64 +} + +func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { + return &channelz.SocketInternalMetric{ + SocketOptions: channelz.GetSocketOption(l.Listener), + LocalAddr: l.Listener.Addr(), + } +} + +func (l *listenSocket) Close() error { + err := l.Listener.Close() + if channelz.IsOn() { + channelz.RemoveEntry(l.channelzID) + } + return err +} + +// Serve accepts incoming connections on the listener lis, creating a new +// ServerTransport and service goroutine for each. The service goroutines +// read gRPC requests and then call the registered handlers to reply to them. +// Serve returns when lis.Accept fails with fatal errors. lis will be closed when +// this method returns. +// Serve will return a non-nil error unless Stop or GracefulStop is called. +func (s *Server) Serve(lis net.Listener) error { + s.mu.Lock() + s.printf("serving") + s.serve = true + if s.lis == nil { + // Serve called after Stop or GracefulStop. + s.mu.Unlock() + lis.Close() + return ErrServerStopped + } + + s.serveWG.Add(1) + defer func() { + s.serveWG.Done() + if s.quit.HasFired() { + // Stop or GracefulStop called; block until done and return nil. + <-s.done.Done() + } + }() + + ls := &listenSocket{Listener: lis} + s.lis[ls] = true + + if channelz.IsOn() { + ls.channelzID = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String()) + } + s.mu.Unlock() + + defer func() { + s.mu.Lock() + if s.lis != nil && s.lis[ls] { + ls.Close() + delete(s.lis, ls) + } + s.mu.Unlock() + }() + + var tempDelay time.Duration // how long to sleep on accept failure + + for { + rawConn, err := lis.Accept() + if err != nil { + if ne, ok := err.(interface { + Temporary() bool + }); ok && ne.Temporary() { + if tempDelay == 0 { + tempDelay = 5 * time.Millisecond + } else { + tempDelay *= 2 + } + if max := 1 * time.Second; tempDelay > max { + tempDelay = max + } + s.mu.Lock() + s.printf("Accept error: %v; retrying in %v", err, tempDelay) + s.mu.Unlock() + timer := time.NewTimer(tempDelay) + select { + case <-timer.C: + case <-s.quit.Done(): + timer.Stop() + return nil + } + continue + } + s.mu.Lock() + s.printf("done serving; Accept = %v", err) + s.mu.Unlock() + + if s.quit.HasFired() { + return nil + } + return err + } + tempDelay = 0 + // Start a new goroutine to deal with rawConn so we don't stall this Accept + // loop goroutine. + // + // Make sure we account for the goroutine so GracefulStop doesn't nil out + // s.conns before this conn can be added. + s.serveWG.Add(1) + go func() { + s.handleRawConn(rawConn) + s.serveWG.Done() + }() + } +} + +// handleRawConn forks a goroutine to handle a just-accepted connection that +// has not had any I/O performed on it yet. +func (s *Server) handleRawConn(rawConn net.Conn) { + if s.quit.HasFired() { + rawConn.Close() + return + } + rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout)) + conn, authInfo, err := s.useTransportAuthenticator(rawConn) + if err != nil { + // ErrConnDispatched means that the connection was dispatched away from + // gRPC; those connections should be left open. + if err != credentials.ErrConnDispatched { + s.mu.Lock() + s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) + s.mu.Unlock() + grpclog.Warningf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) + rawConn.Close() + } + rawConn.SetDeadline(time.Time{}) + return + } + + // Finish handshaking (HTTP2) + st := s.newHTTP2Transport(conn, authInfo) + if st == nil { + return + } + + rawConn.SetDeadline(time.Time{}) + if !s.addConn(st) { + return + } + go func() { + s.serveStreams(st) + s.removeConn(st) + }() +} + +// newHTTP2Transport sets up a http/2 transport (using the +// gRPC http2 server transport in transport/http2_server.go). +func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) transport.ServerTransport { + config := &transport.ServerConfig{ + MaxStreams: s.opts.maxConcurrentStreams, + AuthInfo: authInfo, + InTapHandle: s.opts.inTapHandle, + StatsHandler: s.opts.statsHandler, + KeepaliveParams: s.opts.keepaliveParams, + KeepalivePolicy: s.opts.keepalivePolicy, + InitialWindowSize: s.opts.initialWindowSize, + InitialConnWindowSize: s.opts.initialConnWindowSize, + WriteBufferSize: s.opts.writeBufferSize, + ReadBufferSize: s.opts.readBufferSize, + ChannelzParentID: s.channelzID, + MaxHeaderListSize: s.opts.maxHeaderListSize, + } + st, err := transport.NewServerTransport("http2", c, config) + if err != nil { + s.mu.Lock() + s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) + s.mu.Unlock() + c.Close() + grpclog.Warningln("grpc: Server.Serve failed to create ServerTransport: ", err) + return nil + } + + return st +} + +func (s *Server) serveStreams(st transport.ServerTransport) { + defer st.Close() + var wg sync.WaitGroup + st.HandleStreams(func(stream *transport.Stream) { + wg.Add(1) + go func() { + defer wg.Done() + s.handleStream(st, stream, s.traceInfo(st, stream)) + }() + }, func(ctx context.Context, method string) context.Context { + if !EnableTracing { + return ctx + } + tr := trace.New("grpc.Recv."+methodFamily(method), method) + return trace.NewContext(ctx, tr) + }) + wg.Wait() +} + +var _ http.Handler = (*Server)(nil) + +// ServeHTTP implements the Go standard library's http.Handler +// interface by responding to the gRPC request r, by looking up +// the requested gRPC method in the gRPC server s. +// +// The provided HTTP request must have arrived on an HTTP/2 +// connection. When using the Go standard library's server, +// practically this means that the Request must also have arrived +// over TLS. +// +// To share one port (such as 443 for https) between gRPC and an +// existing http.Handler, use a root http.Handler such as: +// +// if r.ProtoMajor == 2 && strings.HasPrefix( +// r.Header.Get("Content-Type"), "application/grpc") { +// grpcServer.ServeHTTP(w, r) +// } else { +// yourMux.ServeHTTP(w, r) +// } +// +// Note that ServeHTTP uses Go's HTTP/2 server implementation which is totally +// separate from grpc-go's HTTP/2 server. Performance and features may vary +// between the two paths. ServeHTTP does not support some gRPC features +// available through grpc-go's HTTP/2 server, and it is currently EXPERIMENTAL +// and subject to change. +func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { + st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandler) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + if !s.addConn(st) { + return + } + defer s.removeConn(st) + s.serveStreams(st) +} + +// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled. +// If tracing is not enabled, it returns nil. +func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) { + if !EnableTracing { + return nil + } + tr, ok := trace.FromContext(stream.Context()) + if !ok { + return nil + } + + trInfo = &traceInfo{ + tr: tr, + firstLine: firstLine{ + client: false, + remoteAddr: st.RemoteAddr(), + }, + } + if dl, ok := stream.Context().Deadline(); ok { + trInfo.firstLine.deadline = time.Until(dl) + } + return trInfo +} + +func (s *Server) addConn(st transport.ServerTransport) bool { + s.mu.Lock() + defer s.mu.Unlock() + if s.conns == nil { + st.Close() + return false + } + if s.drain { + // Transport added after we drained our existing conns: drain it + // immediately. + st.Drain() + } + s.conns[st] = true + return true +} + +func (s *Server) removeConn(st transport.ServerTransport) { + s.mu.Lock() + defer s.mu.Unlock() + if s.conns != nil { + delete(s.conns, st) + s.cv.Broadcast() + } +} + +func (s *Server) channelzMetric() *channelz.ServerInternalMetric { + return &channelz.ServerInternalMetric{ + CallsStarted: atomic.LoadInt64(&s.czData.callsStarted), + CallsSucceeded: atomic.LoadInt64(&s.czData.callsSucceeded), + CallsFailed: atomic.LoadInt64(&s.czData.callsFailed), + LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&s.czData.lastCallStartedTime)), + } +} + +func (s *Server) incrCallsStarted() { + atomic.AddInt64(&s.czData.callsStarted, 1) + atomic.StoreInt64(&s.czData.lastCallStartedTime, time.Now().UnixNano()) +} + +func (s *Server) incrCallsSucceeded() { + atomic.AddInt64(&s.czData.callsSucceeded, 1) +} + +func (s *Server) incrCallsFailed() { + atomic.AddInt64(&s.czData.callsFailed, 1) +} + +func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { + data, err := encode(s.getCodec(stream.ContentSubtype()), msg) + if err != nil { + grpclog.Errorln("grpc: server failed to encode response: ", err) + return err + } + compData, err := compress(data, cp, comp) + if err != nil { + grpclog.Errorln("grpc: server failed to compress response: ", err) + return err + } + hdr, payload := msgHeader(data, compData) + // TODO(dfawley): should we be checking len(data) instead? + if len(payload) > s.opts.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize) + } + err = t.Write(stream, hdr, payload, opts) + if err == nil && s.opts.statsHandler != nil { + s.opts.statsHandler.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) + } + return err +} + +func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) { + if channelz.IsOn() { + s.incrCallsStarted() + defer func() { + if err != nil && err != io.EOF { + s.incrCallsFailed() + } else { + s.incrCallsSucceeded() + } + }() + } + sh := s.opts.statsHandler + if sh != nil { + beginTime := time.Now() + begin := &stats.Begin{ + BeginTime: beginTime, + } + sh.HandleRPC(stream.Context(), begin) + defer func() { + end := &stats.End{ + BeginTime: beginTime, + EndTime: time.Now(), + } + if err != nil && err != io.EOF { + end.Error = toRPCErr(err) + } + sh.HandleRPC(stream.Context(), end) + }() + } + if trInfo != nil { + defer trInfo.tr.Finish() + trInfo.tr.LazyLog(&trInfo.firstLine, false) + defer func() { + if err != nil && err != io.EOF { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } + }() + } + + binlog := binarylog.GetMethodLogger(stream.Method()) + if binlog != nil { + ctx := stream.Context() + md, _ := metadata.FromIncomingContext(ctx) + logEntry := &binarylog.ClientHeader{ + Header: md, + MethodName: stream.Method(), + PeerAddr: nil, + } + if deadline, ok := ctx.Deadline(); ok { + logEntry.Timeout = time.Until(deadline) + if logEntry.Timeout < 0 { + logEntry.Timeout = 0 + } + } + if a := md[":authority"]; len(a) > 0 { + logEntry.Authority = a[0] + } + if peer, ok := peer.FromContext(ctx); ok { + logEntry.PeerAddr = peer.Addr + } + binlog.Log(logEntry) + } + + // comp and cp are used for compression. decomp and dc are used for + // decompression. If comp and decomp are both set, they are the same; + // however they are kept separate to ensure that at most one of the + // compressor/decompressor variable pairs are set for use later. + var comp, decomp encoding.Compressor + var cp Compressor + var dc Decompressor + + // If dc is set and matches the stream's compression, use it. Otherwise, try + // to find a matching registered compressor for decomp. + if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc { + dc = s.opts.dc + } else if rc != "" && rc != encoding.Identity { + decomp = encoding.GetCompressor(rc) + if decomp == nil { + st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) + t.WriteStatus(stream, st) + return st.Err() + } + } + + // If cp is set, use it. Otherwise, attempt to compress the response using + // the incoming message compression method. + // + // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. + if s.opts.cp != nil { + cp = s.opts.cp + stream.SetSendCompress(cp.Type()) + } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { + // Legacy compressor not specified; attempt to respond with same encoding. + comp = encoding.GetCompressor(rc) + if comp != nil { + stream.SetSendCompress(rc) + } + } + + var payInfo *payloadInfo + if sh != nil || binlog != nil { + payInfo = &payloadInfo{} + } + d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) + if err != nil { + if st, ok := status.FromError(err); ok { + if e := t.WriteStatus(stream, st); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) + } + } + return err + } + if channelz.IsOn() { + t.IncrMsgRecv() + } + df := func(v interface{}) error { + if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { + return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) + } + if sh != nil { + sh.HandleRPC(stream.Context(), &stats.InPayload{ + RecvTime: time.Now(), + Payload: v, + WireLength: payInfo.wireLength, + Data: d, + Length: len(d), + }) + } + if binlog != nil { + binlog.Log(&binarylog.ClientMessage{ + Message: d, + }) + } + if trInfo != nil { + trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) + } + return nil + } + ctx := NewContextWithServerTransportStream(stream.Context(), stream) + reply, appErr := md.Handler(srv.server, ctx, df, s.opts.unaryInt) + if appErr != nil { + appStatus, ok := status.FromError(appErr) + if !ok { + // Convert appErr if it is not a grpc status error. + appErr = status.Error(codes.Unknown, appErr.Error()) + appStatus, _ = status.FromError(appErr) + } + if trInfo != nil { + trInfo.tr.LazyLog(stringer(appStatus.Message()), true) + trInfo.tr.SetError() + } + if e := t.WriteStatus(stream, appStatus); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e) + } + if binlog != nil { + if h, _ := stream.Header(); h.Len() > 0 { + // Only log serverHeader if there was header. Otherwise it can + // be trailer only. + binlog.Log(&binarylog.ServerHeader{ + Header: h, + }) + } + binlog.Log(&binarylog.ServerTrailer{ + Trailer: stream.Trailer(), + Err: appErr, + }) + } + return appErr + } + if trInfo != nil { + trInfo.tr.LazyLog(stringer("OK"), false) + } + opts := &transport.Options{Last: true} + + if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil { + if err == io.EOF { + // The entire stream is done (for unary RPC only). + return err + } + if s, ok := status.FromError(err); ok { + if e := t.WriteStatus(stream, s); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e) + } + } else { + switch st := err.(type) { + case transport.ConnectionError: + // Nothing to do here. + default: + panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st)) + } + } + if binlog != nil { + h, _ := stream.Header() + binlog.Log(&binarylog.ServerHeader{ + Header: h, + }) + binlog.Log(&binarylog.ServerTrailer{ + Trailer: stream.Trailer(), + Err: appErr, + }) + } + return err + } + if binlog != nil { + h, _ := stream.Header() + binlog.Log(&binarylog.ServerHeader{ + Header: h, + }) + binlog.Log(&binarylog.ServerMessage{ + Message: reply, + }) + } + if channelz.IsOn() { + t.IncrMsgSent() + } + if trInfo != nil { + trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) + } + // TODO: Should we be logging if writing status failed here, like above? + // Should the logging be in WriteStatus? Should we ignore the WriteStatus + // error or allow the stats handler to see it? + err = t.WriteStatus(stream, statusOK) + if binlog != nil { + binlog.Log(&binarylog.ServerTrailer{ + Trailer: stream.Trailer(), + Err: appErr, + }) + } + return err +} + +func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) { + if channelz.IsOn() { + s.incrCallsStarted() + defer func() { + if err != nil && err != io.EOF { + s.incrCallsFailed() + } else { + s.incrCallsSucceeded() + } + }() + } + sh := s.opts.statsHandler + if sh != nil { + beginTime := time.Now() + begin := &stats.Begin{ + BeginTime: beginTime, + } + sh.HandleRPC(stream.Context(), begin) + defer func() { + end := &stats.End{ + BeginTime: beginTime, + EndTime: time.Now(), + } + if err != nil && err != io.EOF { + end.Error = toRPCErr(err) + } + sh.HandleRPC(stream.Context(), end) + }() + } + ctx := NewContextWithServerTransportStream(stream.Context(), stream) + ss := &serverStream{ + ctx: ctx, + t: t, + s: stream, + p: &parser{r: stream}, + codec: s.getCodec(stream.ContentSubtype()), + maxReceiveMessageSize: s.opts.maxReceiveMessageSize, + maxSendMessageSize: s.opts.maxSendMessageSize, + trInfo: trInfo, + statsHandler: sh, + } + + ss.binlog = binarylog.GetMethodLogger(stream.Method()) + if ss.binlog != nil { + md, _ := metadata.FromIncomingContext(ctx) + logEntry := &binarylog.ClientHeader{ + Header: md, + MethodName: stream.Method(), + PeerAddr: nil, + } + if deadline, ok := ctx.Deadline(); ok { + logEntry.Timeout = time.Until(deadline) + if logEntry.Timeout < 0 { + logEntry.Timeout = 0 + } + } + if a := md[":authority"]; len(a) > 0 { + logEntry.Authority = a[0] + } + if peer, ok := peer.FromContext(ss.Context()); ok { + logEntry.PeerAddr = peer.Addr + } + ss.binlog.Log(logEntry) + } + + // If dc is set and matches the stream's compression, use it. Otherwise, try + // to find a matching registered compressor for decomp. + if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc { + ss.dc = s.opts.dc + } else if rc != "" && rc != encoding.Identity { + ss.decomp = encoding.GetCompressor(rc) + if ss.decomp == nil { + st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) + t.WriteStatus(ss.s, st) + return st.Err() + } + } + + // If cp is set, use it. Otherwise, attempt to compress the response using + // the incoming message compression method. + // + // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. + if s.opts.cp != nil { + ss.cp = s.opts.cp + stream.SetSendCompress(s.opts.cp.Type()) + } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { + // Legacy compressor not specified; attempt to respond with same encoding. + ss.comp = encoding.GetCompressor(rc) + if ss.comp != nil { + stream.SetSendCompress(rc) + } + } + + if trInfo != nil { + trInfo.tr.LazyLog(&trInfo.firstLine, false) + defer func() { + ss.mu.Lock() + if err != nil && err != io.EOF { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.SetError() + } + ss.trInfo.tr.Finish() + ss.trInfo.tr = nil + ss.mu.Unlock() + }() + } + var appErr error + var server interface{} + if srv != nil { + server = srv.server + } + if s.opts.streamInt == nil { + appErr = sd.Handler(server, ss) + } else { + info := &StreamServerInfo{ + FullMethod: stream.Method(), + IsClientStream: sd.ClientStreams, + IsServerStream: sd.ServerStreams, + } + appErr = s.opts.streamInt(server, ss, info, sd.Handler) + } + if appErr != nil { + appStatus, ok := status.FromError(appErr) + if !ok { + appStatus = status.New(codes.Unknown, appErr.Error()) + appErr = appStatus.Err() + } + if trInfo != nil { + ss.mu.Lock() + ss.trInfo.tr.LazyLog(stringer(appStatus.Message()), true) + ss.trInfo.tr.SetError() + ss.mu.Unlock() + } + t.WriteStatus(ss.s, appStatus) + if ss.binlog != nil { + ss.binlog.Log(&binarylog.ServerTrailer{ + Trailer: ss.s.Trailer(), + Err: appErr, + }) + } + // TODO: Should we log an error from WriteStatus here and below? + return appErr + } + if trInfo != nil { + ss.mu.Lock() + ss.trInfo.tr.LazyLog(stringer("OK"), false) + ss.mu.Unlock() + } + err = t.WriteStatus(ss.s, statusOK) + if ss.binlog != nil { + ss.binlog.Log(&binarylog.ServerTrailer{ + Trailer: ss.s.Trailer(), + Err: appErr, + }) + } + return err +} + +func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) { + sm := stream.Method() + if sm != "" && sm[0] == '/' { + sm = sm[1:] + } + pos := strings.LastIndex(sm, "/") + if pos == -1 { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true) + trInfo.tr.SetError() + } + errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) + if err := t.WriteStatus(stream, status.New(codes.ResourceExhausted, errDesc)); err != nil { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } + grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err) + } + if trInfo != nil { + trInfo.tr.Finish() + } + return + } + service := sm[:pos] + method := sm[pos+1:] + + srv, knownService := s.m[service] + if knownService { + if md, ok := srv.md[method]; ok { + s.processUnaryRPC(t, stream, srv, md, trInfo) + return + } + if sd, ok := srv.sd[method]; ok { + s.processStreamingRPC(t, stream, srv, sd, trInfo) + return + } + } + // Unknown service, or known server unknown method. + if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { + s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo) + return + } + var errDesc string + if !knownService { + errDesc = fmt.Sprintf("unknown service %v", service) + } else { + errDesc = fmt.Sprintf("unknown method %v for service %v", method, service) + } + if trInfo != nil { + trInfo.tr.LazyPrintf("%s", errDesc) + trInfo.tr.SetError() + } + if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } + grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err) + } + if trInfo != nil { + trInfo.tr.Finish() + } +} + +// The key to save ServerTransportStream in the context. +type streamKey struct{} + +// NewContextWithServerTransportStream creates a new context from ctx and +// attaches stream to it. +// +// This API is EXPERIMENTAL. +func NewContextWithServerTransportStream(ctx context.Context, stream ServerTransportStream) context.Context { + return context.WithValue(ctx, streamKey{}, stream) +} + +// ServerTransportStream is a minimal interface that a transport stream must +// implement. This can be used to mock an actual transport stream for tests of +// handler code that use, for example, grpc.SetHeader (which requires some +// stream to be in context). +// +// See also NewContextWithServerTransportStream. +// +// This API is EXPERIMENTAL. +type ServerTransportStream interface { + Method() string + SetHeader(md metadata.MD) error + SendHeader(md metadata.MD) error + SetTrailer(md metadata.MD) error +} + +// ServerTransportStreamFromContext returns the ServerTransportStream saved in +// ctx. Returns nil if the given context has no stream associated with it +// (which implies it is not an RPC invocation context). +// +// This API is EXPERIMENTAL. +func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream { + s, _ := ctx.Value(streamKey{}).(ServerTransportStream) + return s +} + +// Stop stops the gRPC server. It immediately closes all open +// connections and listeners. +// It cancels all active RPCs on the server side and the corresponding +// pending RPCs on the client side will get notified by connection +// errors. +func (s *Server) Stop() { + s.quit.Fire() + + defer func() { + s.serveWG.Wait() + s.done.Fire() + }() + + s.channelzRemoveOnce.Do(func() { + if channelz.IsOn() { + channelz.RemoveEntry(s.channelzID) + } + }) + + s.mu.Lock() + listeners := s.lis + s.lis = nil + st := s.conns + s.conns = nil + // interrupt GracefulStop if Stop and GracefulStop are called concurrently. + s.cv.Broadcast() + s.mu.Unlock() + + for lis := range listeners { + lis.Close() + } + for c := range st { + c.Close() + } + + s.mu.Lock() + if s.events != nil { + s.events.Finish() + s.events = nil + } + s.mu.Unlock() +} + +// GracefulStop stops the gRPC server gracefully. It stops the server from +// accepting new connections and RPCs and blocks until all the pending RPCs are +// finished. +func (s *Server) GracefulStop() { + s.quit.Fire() + defer s.done.Fire() + + s.channelzRemoveOnce.Do(func() { + if channelz.IsOn() { + channelz.RemoveEntry(s.channelzID) + } + }) + s.mu.Lock() + if s.conns == nil { + s.mu.Unlock() + return + } + + for lis := range s.lis { + lis.Close() + } + s.lis = nil + if !s.drain { + for st := range s.conns { + st.Drain() + } + s.drain = true + } + + // Wait for serving threads to be ready to exit. Only then can we be sure no + // new conns will be created. + s.mu.Unlock() + s.serveWG.Wait() + s.mu.Lock() + + for len(s.conns) != 0 { + s.cv.Wait() + } + s.conns = nil + if s.events != nil { + s.events.Finish() + s.events = nil + } + s.mu.Unlock() +} + +// contentSubtype must be lowercase +// cannot return nil +func (s *Server) getCodec(contentSubtype string) baseCodec { + if s.opts.codec != nil { + return s.opts.codec + } + if contentSubtype == "" { + return encoding.GetCodec(proto.Name) + } + codec := encoding.GetCodec(contentSubtype) + if codec == nil { + return encoding.GetCodec(proto.Name) + } + return codec +} + +// SetHeader sets the header metadata. +// When called multiple times, all the provided metadata will be merged. +// All the metadata will be sent out when one of the following happens: +// - grpc.SendHeader() is called; +// - The first response is sent out; +// - An RPC status is sent out (error or success). +func SetHeader(ctx context.Context, md metadata.MD) error { + if md.Len() == 0 { + return nil + } + stream := ServerTransportStreamFromContext(ctx) + if stream == nil { + return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) + } + return stream.SetHeader(md) +} + +// SendHeader sends header metadata. It may be called at most once. +// The provided md and headers set by SetHeader() will be sent. +func SendHeader(ctx context.Context, md metadata.MD) error { + stream := ServerTransportStreamFromContext(ctx) + if stream == nil { + return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) + } + if err := stream.SendHeader(md); err != nil { + return toRPCErr(err) + } + return nil +} + +// SetTrailer sets the trailer metadata that will be sent when an RPC returns. +// When called more than once, all the provided metadata will be merged. +func SetTrailer(ctx context.Context, md metadata.MD) error { + if md.Len() == 0 { + return nil + } + stream := ServerTransportStreamFromContext(ctx) + if stream == nil { + return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) + } + return stream.SetTrailer(md) +} + +// Method returns the method string for the server context. The returned +// string is in the format of "/service/method". +func Method(ctx context.Context) (string, bool) { + s := ServerTransportStreamFromContext(ctx) + if s == nil { + return "", false + } + return s.Method(), true +} + +type channelzServer struct { + s *Server +} + +func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric { + return c.s.channelzMetric() +} diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go new file mode 100644 index 0000000000000..d0787f1e2a1c4 --- /dev/null +++ b/vendor/google.golang.org/grpc/service_config.go @@ -0,0 +1,429 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/serviceconfig" +) + +const maxInt = int(^uint(0) >> 1) + +// MethodConfig defines the configuration recommended by the service providers for a +// particular method. +// +// Deprecated: Users should not use this struct. Service config should be received +// through name resolver, as specified here +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +type MethodConfig struct { + // WaitForReady indicates whether RPCs sent to this method should wait until + // the connection is ready by default (!failfast). The value specified via the + // gRPC client API will override the value set here. + WaitForReady *bool + // Timeout is the default timeout for RPCs sent to this method. The actual + // deadline used will be the minimum of the value specified here and the value + // set by the application via the gRPC client API. If either one is not set, + // then the other will be used. If neither is set, then the RPC has no deadline. + Timeout *time.Duration + // MaxReqSize is the maximum allowed payload size for an individual request in a + // stream (client->server) in bytes. The size which is measured is the serialized + // payload after per-message compression (but before stream compression) in bytes. + // The actual value used is the minimum of the value specified here and the value set + // by the application via the gRPC client API. If either one is not set, then the other + // will be used. If neither is set, then the built-in default is used. + MaxReqSize *int + // MaxRespSize is the maximum allowed payload size for an individual response in a + // stream (server->client) in bytes. + MaxRespSize *int + // RetryPolicy configures retry options for the method. + retryPolicy *retryPolicy +} + +type lbConfig struct { + name string + cfg serviceconfig.LoadBalancingConfig +} + +// ServiceConfig is provided by the service provider and contains parameters for how +// clients that connect to the service should behave. +// +// Deprecated: Users should not use this struct. Service config should be received +// through name resolver, as specified here +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +type ServiceConfig struct { + serviceconfig.Config + + // LB is the load balancer the service providers recommends. The balancer + // specified via grpc.WithBalancer will override this. This is deprecated; + // lbConfigs is preferred. If lbConfig and LB are both present, lbConfig + // will be used. + LB *string + + // lbConfig is the service config's load balancing configuration. If + // lbConfig and LB are both present, lbConfig will be used. + lbConfig *lbConfig + + // Methods contains a map for the methods in this service. If there is an + // exact match for a method (i.e. /service/method) in the map, use the + // corresponding MethodConfig. If there's no exact match, look for the + // default config for the service (/service/) and use the corresponding + // MethodConfig if it exists. Otherwise, the method has no MethodConfig to + // use. + Methods map[string]MethodConfig + + // If a retryThrottlingPolicy is provided, gRPC will automatically throttle + // retry attempts and hedged RPCs when the client’s ratio of failures to + // successes exceeds a threshold. + // + // For each server name, the gRPC client will maintain a token_count which is + // initially set to maxTokens, and can take values between 0 and maxTokens. + // + // Every outgoing RPC (regardless of service or method invoked) will change + // token_count as follows: + // + // - Every failed RPC will decrement the token_count by 1. + // - Every successful RPC will increment the token_count by tokenRatio. + // + // If token_count is less than or equal to maxTokens / 2, then RPCs will not + // be retried and hedged RPCs will not be sent. + retryThrottling *retryThrottlingPolicy + // healthCheckConfig must be set as one of the requirement to enable LB channel + // health check. + healthCheckConfig *healthCheckConfig + // rawJSONString stores service config json string that get parsed into + // this service config struct. + rawJSONString string +} + +// healthCheckConfig defines the go-native version of the LB channel health check config. +type healthCheckConfig struct { + // serviceName is the service name to use in the health-checking request. + ServiceName string +} + +// retryPolicy defines the go-native version of the retry policy defined by the +// service config here: +// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config +type retryPolicy struct { + // MaxAttempts is the maximum number of attempts, including the original RPC. + // + // This field is required and must be two or greater. + maxAttempts int + + // Exponential backoff parameters. The initial retry attempt will occur at + // random(0, initialBackoffMS). In general, the nth attempt will occur at + // random(0, + // min(initialBackoffMS*backoffMultiplier**(n-1), maxBackoffMS)). + // + // These fields are required and must be greater than zero. + initialBackoff time.Duration + maxBackoff time.Duration + backoffMultiplier float64 + + // The set of status codes which may be retried. + // + // Status codes are specified as strings, e.g., "UNAVAILABLE". + // + // This field is required and must be non-empty. + // Note: a set is used to store this for easy lookup. + retryableStatusCodes map[codes.Code]bool +} + +type jsonRetryPolicy struct { + MaxAttempts int + InitialBackoff string + MaxBackoff string + BackoffMultiplier float64 + RetryableStatusCodes []codes.Code +} + +// retryThrottlingPolicy defines the go-native version of the retry throttling +// policy defined by the service config here: +// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config +type retryThrottlingPolicy struct { + // The number of tokens starts at maxTokens. The token_count will always be + // between 0 and maxTokens. + // + // This field is required and must be greater than zero. + MaxTokens float64 + // The amount of tokens to add on each successful RPC. Typically this will + // be some number between 0 and 1, e.g., 0.1. + // + // This field is required and must be greater than zero. Up to 3 decimal + // places are supported. + TokenRatio float64 +} + +func parseDuration(s *string) (*time.Duration, error) { + if s == nil { + return nil, nil + } + if !strings.HasSuffix(*s, "s") { + return nil, fmt.Errorf("malformed duration %q", *s) + } + ss := strings.SplitN((*s)[:len(*s)-1], ".", 3) + if len(ss) > 2 { + return nil, fmt.Errorf("malformed duration %q", *s) + } + // hasDigits is set if either the whole or fractional part of the number is + // present, since both are optional but one is required. + hasDigits := false + var d time.Duration + if len(ss[0]) > 0 { + i, err := strconv.ParseInt(ss[0], 10, 32) + if err != nil { + return nil, fmt.Errorf("malformed duration %q: %v", *s, err) + } + d = time.Duration(i) * time.Second + hasDigits = true + } + if len(ss) == 2 && len(ss[1]) > 0 { + if len(ss[1]) > 9 { + return nil, fmt.Errorf("malformed duration %q", *s) + } + f, err := strconv.ParseInt(ss[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("malformed duration %q: %v", *s, err) + } + for i := 9; i > len(ss[1]); i-- { + f *= 10 + } + d += time.Duration(f) + hasDigits = true + } + if !hasDigits { + return nil, fmt.Errorf("malformed duration %q", *s) + } + + return &d, nil +} + +type jsonName struct { + Service *string + Method *string +} + +func (j jsonName) generatePath() (string, bool) { + if j.Service == nil { + return "", false + } + res := "/" + *j.Service + "/" + if j.Method != nil { + res += *j.Method + } + return res, true +} + +// TODO(lyuxuan): delete this struct after cleaning up old service config implementation. +type jsonMC struct { + Name *[]jsonName + WaitForReady *bool + Timeout *string + MaxRequestMessageBytes *int64 + MaxResponseMessageBytes *int64 + RetryPolicy *jsonRetryPolicy +} + +type loadBalancingConfig map[string]json.RawMessage + +// TODO(lyuxuan): delete this struct after cleaning up old service config implementation. +type jsonSC struct { + LoadBalancingPolicy *string + LoadBalancingConfig *[]loadBalancingConfig + MethodConfig *[]jsonMC + RetryThrottling *retryThrottlingPolicy + HealthCheckConfig *healthCheckConfig +} + +func init() { + internal.ParseServiceConfig = func(sc string) (interface{}, error) { + return parseServiceConfig(sc) + } +} + +func parseServiceConfig(js string) (*ServiceConfig, error) { + if len(js) == 0 { + return nil, fmt.Errorf("no JSON service config provided") + } + var rsc jsonSC + err := json.Unmarshal([]byte(js), &rsc) + if err != nil { + grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + return nil, err + } + sc := ServiceConfig{ + LB: rsc.LoadBalancingPolicy, + Methods: make(map[string]MethodConfig), + retryThrottling: rsc.RetryThrottling, + healthCheckConfig: rsc.HealthCheckConfig, + rawJSONString: js, + } + if rsc.LoadBalancingConfig != nil { + for i, lbcfg := range *rsc.LoadBalancingConfig { + if len(lbcfg) != 1 { + err := fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg) + grpclog.Warningf(err.Error()) + return nil, err + } + var name string + var jsonCfg json.RawMessage + for name, jsonCfg = range lbcfg { + } + builder := balancer.Get(name) + if builder == nil { + continue + } + sc.lbConfig = &lbConfig{name: name} + if parser, ok := builder.(balancer.ConfigParser); ok { + var err error + sc.lbConfig.cfg, err = parser.ParseConfig(jsonCfg) + if err != nil { + return nil, fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err) + } + } else if string(jsonCfg) != "{}" { + grpclog.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg)) + } + break + } + } + + if rsc.MethodConfig == nil { + return &sc, nil + } + for _, m := range *rsc.MethodConfig { + if m.Name == nil { + continue + } + d, err := parseDuration(m.Timeout) + if err != nil { + grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + return nil, err + } + + mc := MethodConfig{ + WaitForReady: m.WaitForReady, + Timeout: d, + } + if mc.retryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { + grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + return nil, err + } + if m.MaxRequestMessageBytes != nil { + if *m.MaxRequestMessageBytes > int64(maxInt) { + mc.MaxReqSize = newInt(maxInt) + } else { + mc.MaxReqSize = newInt(int(*m.MaxRequestMessageBytes)) + } + } + if m.MaxResponseMessageBytes != nil { + if *m.MaxResponseMessageBytes > int64(maxInt) { + mc.MaxRespSize = newInt(maxInt) + } else { + mc.MaxRespSize = newInt(int(*m.MaxResponseMessageBytes)) + } + } + for _, n := range *m.Name { + if path, valid := n.generatePath(); valid { + sc.Methods[path] = mc + } + } + } + + if sc.retryThrottling != nil { + if mt := sc.retryThrottling.MaxTokens; mt <= 0 || mt > 1000 { + return nil, fmt.Errorf("invalid retry throttling config: maxTokens (%v) out of range (0, 1000]", mt) + } + if tr := sc.retryThrottling.TokenRatio; tr <= 0 { + return nil, fmt.Errorf("invalid retry throttling config: tokenRatio (%v) may not be negative", tr) + } + } + return &sc, nil +} + +func convertRetryPolicy(jrp *jsonRetryPolicy) (p *retryPolicy, err error) { + if jrp == nil { + return nil, nil + } + ib, err := parseDuration(&jrp.InitialBackoff) + if err != nil { + return nil, err + } + mb, err := parseDuration(&jrp.MaxBackoff) + if err != nil { + return nil, err + } + + if jrp.MaxAttempts <= 1 || + *ib <= 0 || + *mb <= 0 || + jrp.BackoffMultiplier <= 0 || + len(jrp.RetryableStatusCodes) == 0 { + grpclog.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp) + return nil, nil + } + + rp := &retryPolicy{ + maxAttempts: jrp.MaxAttempts, + initialBackoff: *ib, + maxBackoff: *mb, + backoffMultiplier: jrp.BackoffMultiplier, + retryableStatusCodes: make(map[codes.Code]bool), + } + if rp.maxAttempts > 5 { + // TODO(retry): Make the max maxAttempts configurable. + rp.maxAttempts = 5 + } + for _, code := range jrp.RetryableStatusCodes { + rp.retryableStatusCodes[code] = true + } + return rp, nil +} + +func min(a, b *int) *int { + if *a < *b { + return a + } + return b +} + +func getMaxSize(mcMax, doptMax *int, defaultVal int) *int { + if mcMax == nil && doptMax == nil { + return &defaultVal + } + if mcMax != nil && doptMax != nil { + return min(mcMax, doptMax) + } + if mcMax != nil { + return mcMax + } + return doptMax +} + +func newInt(b int) *int { + return &b +} diff --git a/vendor/google.golang.org/grpc/serviceconfig/BUILD.bazel b/vendor/google.golang.org/grpc/serviceconfig/BUILD.bazel new file mode 100644 index 0000000000000..6e412b75c0212 --- /dev/null +++ b/vendor/google.golang.org/grpc/serviceconfig/BUILD.bazel @@ -0,0 +1,10 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["serviceconfig.go"], + importmap = "k8s.io/kops/vendor/google.golang.org/grpc/serviceconfig", + importpath = "google.golang.org/grpc/serviceconfig", + visibility = ["//visibility:public"], + deps = ["//vendor/google.golang.org/grpc/internal:go_default_library"], +) diff --git a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go new file mode 100644 index 0000000000000..53b27875a1ac2 --- /dev/null +++ b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go @@ -0,0 +1,48 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package serviceconfig defines types and methods for operating on gRPC +// service configs. +// +// This package is EXPERIMENTAL. +package serviceconfig + +import ( + "google.golang.org/grpc/internal" +) + +// Config represents an opaque data structure holding a service config. +type Config interface { + isConfig() +} + +// LoadBalancingConfig represents an opaque data structure holding a load +// balancer config. +type LoadBalancingConfig interface { + isLoadBalancingConfig() +} + +// Parse parses the JSON service config provided into an internal form or +// returns an error if the config is invalid. +func Parse(ServiceConfigJSON string) (Config, error) { + c, err := internal.ParseServiceConfig(ServiceConfigJSON) + if err != nil { + return nil, err + } + return c.(Config), err +} diff --git a/vendor/google.golang.org/grpc/stats/BUILD.bazel b/vendor/google.golang.org/grpc/stats/BUILD.bazel new file mode 100644 index 0000000000000..2c91f856d5989 --- /dev/null +++ b/vendor/google.golang.org/grpc/stats/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "handlers.go", + "stats.go", + ], + importmap = "k8s.io/kops/vendor/google.golang.org/grpc/stats", + importpath = "google.golang.org/grpc/stats", + visibility = ["//visibility:public"], + deps = ["//vendor/google.golang.org/grpc/metadata:go_default_library"], +) diff --git a/vendor/google.golang.org/grpc/stats/handlers.go b/vendor/google.golang.org/grpc/stats/handlers.go new file mode 100644 index 0000000000000..dc03731e45efa --- /dev/null +++ b/vendor/google.golang.org/grpc/stats/handlers.go @@ -0,0 +1,63 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package stats + +import ( + "context" + "net" +) + +// ConnTagInfo defines the relevant information needed by connection context tagger. +type ConnTagInfo struct { + // RemoteAddr is the remote address of the corresponding connection. + RemoteAddr net.Addr + // LocalAddr is the local address of the corresponding connection. + LocalAddr net.Addr +} + +// RPCTagInfo defines the relevant information needed by RPC context tagger. +type RPCTagInfo struct { + // FullMethodName is the RPC method in the format of /package.service/method. + FullMethodName string + // FailFast indicates if this RPC is failfast. + // This field is only valid on client side, it's always false on server side. + FailFast bool +} + +// Handler defines the interface for the related stats handling (e.g., RPCs, connections). +type Handler interface { + // TagRPC can attach some information to the given context. + // The context used for the rest lifetime of the RPC will be derived from + // the returned context. + TagRPC(context.Context, *RPCTagInfo) context.Context + // HandleRPC processes the RPC stats. + HandleRPC(context.Context, RPCStats) + + // TagConn can attach some information to the given context. + // The returned context will be used for stats handling. + // For conn stats handling, the context used in HandleConn for this + // connection will be derived from the context returned. + // For RPC stats handling, + // - On server side, the context used in HandleRPC for all RPCs on this + // connection will be derived from the context returned. + // - On client side, the context is not derived from the context returned. + TagConn(context.Context, *ConnTagInfo) context.Context + // HandleConn processes the Conn stats. + HandleConn(context.Context, ConnStats) +} diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go new file mode 100644 index 0000000000000..f3f593c844323 --- /dev/null +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -0,0 +1,300 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +//go:generate protoc --go_out=plugins=grpc:. grpc_testing/test.proto + +// Package stats is for collecting and reporting various network and RPC stats. +// This package is for monitoring purpose only. All fields are read-only. +// All APIs are experimental. +package stats // import "google.golang.org/grpc/stats" + +import ( + "context" + "net" + "time" + + "google.golang.org/grpc/metadata" +) + +// RPCStats contains stats information about RPCs. +type RPCStats interface { + isRPCStats() + // IsClient returns true if this RPCStats is from client side. + IsClient() bool +} + +// Begin contains stats when an RPC begins. +// FailFast is only valid if this Begin is from client side. +type Begin struct { + // Client is true if this Begin is from client side. + Client bool + // BeginTime is the time when the RPC begins. + BeginTime time.Time + // FailFast indicates if this RPC is failfast. + FailFast bool +} + +// IsClient indicates if the stats information is from client side. +func (s *Begin) IsClient() bool { return s.Client } + +func (s *Begin) isRPCStats() {} + +// InPayload contains the information for an incoming payload. +type InPayload struct { + // Client is true if this InPayload is from client side. + Client bool + // Payload is the payload with original type. + Payload interface{} + // Data is the serialized message payload. + Data []byte + // Length is the length of uncompressed data. + Length int + // WireLength is the length of data on wire (compressed, signed, encrypted). + WireLength int + // RecvTime is the time when the payload is received. + RecvTime time.Time +} + +// IsClient indicates if the stats information is from client side. +func (s *InPayload) IsClient() bool { return s.Client } + +func (s *InPayload) isRPCStats() {} + +// InHeader contains stats when a header is received. +type InHeader struct { + // Client is true if this InHeader is from client side. + Client bool + // WireLength is the wire length of header. + WireLength int + + // The following fields are valid only if Client is false. + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + // RemoteAddr is the remote address of the corresponding connection. + RemoteAddr net.Addr + // LocalAddr is the local address of the corresponding connection. + LocalAddr net.Addr + // Compression is the compression algorithm used for the RPC. + Compression string +} + +// IsClient indicates if the stats information is from client side. +func (s *InHeader) IsClient() bool { return s.Client } + +func (s *InHeader) isRPCStats() {} + +// InTrailer contains stats when a trailer is received. +type InTrailer struct { + // Client is true if this InTrailer is from client side. + Client bool + // WireLength is the wire length of trailer. + WireLength int +} + +// IsClient indicates if the stats information is from client side. +func (s *InTrailer) IsClient() bool { return s.Client } + +func (s *InTrailer) isRPCStats() {} + +// OutPayload contains the information for an outgoing payload. +type OutPayload struct { + // Client is true if this OutPayload is from client side. + Client bool + // Payload is the payload with original type. + Payload interface{} + // Data is the serialized message payload. + Data []byte + // Length is the length of uncompressed data. + Length int + // WireLength is the length of data on wire (compressed, signed, encrypted). + WireLength int + // SentTime is the time when the payload is sent. + SentTime time.Time +} + +// IsClient indicates if this stats information is from client side. +func (s *OutPayload) IsClient() bool { return s.Client } + +func (s *OutPayload) isRPCStats() {} + +// OutHeader contains stats when a header is sent. +type OutHeader struct { + // Client is true if this OutHeader is from client side. + Client bool + + // The following fields are valid only if Client is true. + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + // RemoteAddr is the remote address of the corresponding connection. + RemoteAddr net.Addr + // LocalAddr is the local address of the corresponding connection. + LocalAddr net.Addr + // Compression is the compression algorithm used for the RPC. + Compression string +} + +// IsClient indicates if this stats information is from client side. +func (s *OutHeader) IsClient() bool { return s.Client } + +func (s *OutHeader) isRPCStats() {} + +// OutTrailer contains stats when a trailer is sent. +type OutTrailer struct { + // Client is true if this OutTrailer is from client side. + Client bool + // WireLength is the wire length of trailer. + WireLength int +} + +// IsClient indicates if this stats information is from client side. +func (s *OutTrailer) IsClient() bool { return s.Client } + +func (s *OutTrailer) isRPCStats() {} + +// End contains stats when an RPC ends. +type End struct { + // Client is true if this End is from client side. + Client bool + // BeginTime is the time when the RPC began. + BeginTime time.Time + // EndTime is the time when the RPC ends. + EndTime time.Time + // Trailer contains the trailer metadata received from the server. This + // field is only valid if this End is from the client side. + Trailer metadata.MD + // Error is the error the RPC ended with. It is an error generated from + // status.Status and can be converted back to status.Status using + // status.FromError if non-nil. + Error error +} + +// IsClient indicates if this is from client side. +func (s *End) IsClient() bool { return s.Client } + +func (s *End) isRPCStats() {} + +// ConnStats contains stats information about connections. +type ConnStats interface { + isConnStats() + // IsClient returns true if this ConnStats is from client side. + IsClient() bool +} + +// ConnBegin contains the stats of a connection when it is established. +type ConnBegin struct { + // Client is true if this ConnBegin is from client side. + Client bool +} + +// IsClient indicates if this is from client side. +func (s *ConnBegin) IsClient() bool { return s.Client } + +func (s *ConnBegin) isConnStats() {} + +// ConnEnd contains the stats of a connection when it ends. +type ConnEnd struct { + // Client is true if this ConnEnd is from client side. + Client bool +} + +// IsClient indicates if this is from client side. +func (s *ConnEnd) IsClient() bool { return s.Client } + +func (s *ConnEnd) isConnStats() {} + +type incomingTagsKey struct{} +type outgoingTagsKey struct{} + +// SetTags attaches stats tagging data to the context, which will be sent in +// the outgoing RPC with the header grpc-tags-bin. Subsequent calls to +// SetTags will overwrite the values from earlier calls. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func SetTags(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, outgoingTagsKey{}, b) +} + +// Tags returns the tags from the context for the inbound RPC. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func Tags(ctx context.Context) []byte { + b, _ := ctx.Value(incomingTagsKey{}).([]byte) + return b +} + +// SetIncomingTags attaches stats tagging data to the context, to be read by +// the application (not sent in outgoing RPCs). +// +// This is intended for gRPC-internal use ONLY. +func SetIncomingTags(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, incomingTagsKey{}, b) +} + +// OutgoingTags returns the tags from the context for the outbound RPC. +// +// This is intended for gRPC-internal use ONLY. +func OutgoingTags(ctx context.Context) []byte { + b, _ := ctx.Value(outgoingTagsKey{}).([]byte) + return b +} + +type incomingTraceKey struct{} +type outgoingTraceKey struct{} + +// SetTrace attaches stats tagging data to the context, which will be sent in +// the outgoing RPC with the header grpc-trace-bin. Subsequent calls to +// SetTrace will overwrite the values from earlier calls. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func SetTrace(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, outgoingTraceKey{}, b) +} + +// Trace returns the trace from the context for the inbound RPC. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func Trace(ctx context.Context) []byte { + b, _ := ctx.Value(incomingTraceKey{}).([]byte) + return b +} + +// SetIncomingTrace attaches stats tagging data to the context, to be read by +// the application (not sent in outgoing RPCs). It is intended for +// gRPC-internal use. +func SetIncomingTrace(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, incomingTraceKey{}, b) +} + +// OutgoingTrace returns the trace from the context for the outbound RPC. It is +// intended for gRPC-internal use. +func OutgoingTrace(ctx context.Context) []byte { + b, _ := ctx.Value(outgoingTraceKey{}).([]byte) + return b +} diff --git a/vendor/google.golang.org/grpc/status/BUILD.bazel b/vendor/google.golang.org/grpc/status/BUILD.bazel new file mode 100644 index 0000000000000..1dda4bff6bdec --- /dev/null +++ b/vendor/google.golang.org/grpc/status/BUILD.bazel @@ -0,0 +1,16 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["status.go"], + importmap = "k8s.io/kops/vendor/google.golang.org/grpc/status", + importpath = "google.golang.org/grpc/status", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/golang/protobuf/proto:go_default_library", + "//vendor/github.com/golang/protobuf/ptypes:go_default_library", + "//vendor/google.golang.org/genproto/googleapis/rpc/status:go_default_library", + "//vendor/google.golang.org/grpc/codes:go_default_library", + "//vendor/google.golang.org/grpc/internal:go_default_library", + ], +) diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go new file mode 100644 index 0000000000000..a1348e9b16bd6 --- /dev/null +++ b/vendor/google.golang.org/grpc/status/status.go @@ -0,0 +1,228 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package status implements errors returned by gRPC. These errors are +// serialized and transmitted on the wire between server and client, and allow +// for additional data to be transmitted via the Details field in the status +// proto. gRPC service handlers should return an error created by this +// package, and gRPC clients should expect a corresponding error to be +// returned from the RPC call. +// +// This package upholds the invariants that a non-nil error may not +// contain an OK code, and an OK code must result in a nil error. +package status + +import ( + "context" + "errors" + "fmt" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal" +) + +func init() { + internal.StatusRawProto = statusRawProto +} + +func statusRawProto(s *Status) *spb.Status { return s.s } + +// statusError is an alias of a status proto. It implements error and Status, +// and a nil statusError should never be returned by this package. +type statusError spb.Status + +func (se *statusError) Error() string { + p := (*spb.Status)(se) + return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(p.GetCode()), p.GetMessage()) +} + +func (se *statusError) GRPCStatus() *Status { + return &Status{s: (*spb.Status)(se)} +} + +// Is implements future error.Is functionality. +// A statusError is equivalent if the code and message are identical. +func (se *statusError) Is(target error) bool { + tse, ok := target.(*statusError) + if !ok { + return false + } + + return proto.Equal((*spb.Status)(se), (*spb.Status)(tse)) +} + +// Status represents an RPC status code, message, and details. It is immutable +// and should be created with New, Newf, or FromProto. +type Status struct { + s *spb.Status +} + +// Code returns the status code contained in s. +func (s *Status) Code() codes.Code { + if s == nil || s.s == nil { + return codes.OK + } + return codes.Code(s.s.Code) +} + +// Message returns the message contained in s. +func (s *Status) Message() string { + if s == nil || s.s == nil { + return "" + } + return s.s.Message +} + +// Proto returns s's status as an spb.Status proto message. +func (s *Status) Proto() *spb.Status { + if s == nil { + return nil + } + return proto.Clone(s.s).(*spb.Status) +} + +// Err returns an immutable error representing s; returns nil if s.Code() is +// OK. +func (s *Status) Err() error { + if s.Code() == codes.OK { + return nil + } + return (*statusError)(s.s) +} + +// New returns a Status representing c and msg. +func New(c codes.Code, msg string) *Status { + return &Status{s: &spb.Status{Code: int32(c), Message: msg}} +} + +// Newf returns New(c, fmt.Sprintf(format, a...)). +func Newf(c codes.Code, format string, a ...interface{}) *Status { + return New(c, fmt.Sprintf(format, a...)) +} + +// Error returns an error representing c and msg. If c is OK, returns nil. +func Error(c codes.Code, msg string) error { + return New(c, msg).Err() +} + +// Errorf returns Error(c, fmt.Sprintf(format, a...)). +func Errorf(c codes.Code, format string, a ...interface{}) error { + return Error(c, fmt.Sprintf(format, a...)) +} + +// ErrorProto returns an error representing s. If s.Code is OK, returns nil. +func ErrorProto(s *spb.Status) error { + return FromProto(s).Err() +} + +// FromProto returns a Status representing s. +func FromProto(s *spb.Status) *Status { + return &Status{s: proto.Clone(s).(*spb.Status)} +} + +// FromError returns a Status representing err if it was produced from this +// package or has a method `GRPCStatus() *Status`. Otherwise, ok is false and a +// Status is returned with codes.Unknown and the original error message. +func FromError(err error) (s *Status, ok bool) { + if err == nil { + return nil, true + } + if se, ok := err.(interface { + GRPCStatus() *Status + }); ok { + return se.GRPCStatus(), true + } + return New(codes.Unknown, err.Error()), false +} + +// Convert is a convenience function which removes the need to handle the +// boolean return value from FromError. +func Convert(err error) *Status { + s, _ := FromError(err) + return s +} + +// WithDetails returns a new status with the provided details messages appended to the status. +// If any errors are encountered, it returns nil and the first error encountered. +func (s *Status) WithDetails(details ...proto.Message) (*Status, error) { + if s.Code() == codes.OK { + return nil, errors.New("no error details for status with code OK") + } + // s.Code() != OK implies that s.Proto() != nil. + p := s.Proto() + for _, detail := range details { + any, err := ptypes.MarshalAny(detail) + if err != nil { + return nil, err + } + p.Details = append(p.Details, any) + } + return &Status{s: p}, nil +} + +// Details returns a slice of details messages attached to the status. +// If a detail cannot be decoded, the error is returned in place of the detail. +func (s *Status) Details() []interface{} { + if s == nil || s.s == nil { + return nil + } + details := make([]interface{}, 0, len(s.s.Details)) + for _, any := range s.s.Details { + detail := &ptypes.DynamicAny{} + if err := ptypes.UnmarshalAny(any, detail); err != nil { + details = append(details, err) + continue + } + details = append(details, detail.Message) + } + return details +} + +// Code returns the Code of the error if it is a Status error, codes.OK if err +// is nil, or codes.Unknown otherwise. +func Code(err error) codes.Code { + // Don't use FromError to avoid allocation of OK status. + if err == nil { + return codes.OK + } + if se, ok := err.(interface { + GRPCStatus() *Status + }); ok { + return se.GRPCStatus().Code() + } + return codes.Unknown +} + +// FromContextError converts a context error into a Status. It returns a +// Status with codes.OK if err is nil, or a Status with codes.Unknown if err is +// non-nil and not a context error. +func FromContextError(err error) *Status { + switch err { + case nil: + return nil + case context.DeadlineExceeded: + return New(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return New(codes.Canceled, err.Error()) + default: + return New(codes.Unknown, err.Error()) + } +} diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go new file mode 100644 index 0000000000000..134a624a15dfd --- /dev/null +++ b/vendor/google.golang.org/grpc/stream.go @@ -0,0 +1,1529 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "errors" + "io" + "math" + "strconv" + "sync" + "time" + + "golang.org/x/net/trace" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/encoding" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/balancerload" + "google.golang.org/grpc/internal/binarylog" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// StreamHandler defines the handler called by gRPC server to complete the +// execution of a streaming RPC. If a StreamHandler returns an error, it +// should be produced by the status package, or else gRPC will use +// codes.Unknown as the status code and err.Error() as the status message +// of the RPC. +type StreamHandler func(srv interface{}, stream ServerStream) error + +// StreamDesc represents a streaming RPC service's method specification. +type StreamDesc struct { + StreamName string + Handler StreamHandler + + // At least one of these is true. + ServerStreams bool + ClientStreams bool +} + +// Stream defines the common interface a client or server stream has to satisfy. +// +// Deprecated: See ClientStream and ServerStream documentation instead. +type Stream interface { + // Deprecated: See ClientStream and ServerStream documentation instead. + Context() context.Context + // Deprecated: See ClientStream and ServerStream documentation instead. + SendMsg(m interface{}) error + // Deprecated: See ClientStream and ServerStream documentation instead. + RecvMsg(m interface{}) error +} + +// ClientStream defines the client-side behavior of a streaming RPC. +// +// All errors returned from ClientStream methods are compatible with the +// status package. +type ClientStream interface { + // Header returns the header metadata received from the server if there + // is any. It blocks if the metadata is not ready to read. + Header() (metadata.MD, error) + // Trailer returns the trailer metadata from the server, if there is any. + // It must only be called after stream.CloseAndRecv has returned, or + // stream.Recv has returned a non-nil error (including io.EOF). + Trailer() metadata.MD + // CloseSend closes the send direction of the stream. It closes the stream + // when non-nil error is met. It is also not safe to call CloseSend + // concurrently with SendMsg. + CloseSend() error + // Context returns the context for this stream. + // + // It should not be called until after Header or RecvMsg has returned. Once + // called, subsequent client-side retries are disabled. + Context() context.Context + // SendMsg is generally called by generated code. On error, SendMsg aborts + // the stream. If the error was generated by the client, the status is + // returned directly; otherwise, io.EOF is returned and the status of + // the stream may be discovered using RecvMsg. + // + // SendMsg blocks until: + // - There is sufficient flow control to schedule m with the transport, or + // - The stream is done, or + // - The stream breaks. + // + // SendMsg does not wait until the message is received by the server. An + // untimely stream closure may result in lost messages. To ensure delivery, + // users should ensure the RPC completed successfully using RecvMsg. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not safe + // to call SendMsg on the same stream in different goroutines. It is also + // not safe to call CloseSend concurrently with SendMsg. + SendMsg(m interface{}) error + // RecvMsg blocks until it receives a message into m or the stream is + // done. It returns io.EOF when the stream completes successfully. On + // any other error, the stream is aborted and the error contains the RPC + // status. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not + // safe to call RecvMsg on the same stream in different goroutines. + RecvMsg(m interface{}) error +} + +// NewStream creates a new Stream for the client side. This is typically +// called by generated code. ctx is used for the lifetime of the stream. +// +// To ensure resources are not leaked due to the stream returned, one of the following +// actions must be performed: +// +// 1. Call Close on the ClientConn. +// 2. Cancel the context provided. +// 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated +// client-streaming RPC, for instance, might use the helper function +// CloseAndRecv (note that CloseSend does not Recv, therefore is not +// guaranteed to release all resources). +// 4. Receive a non-nil, non-io.EOF error from Header or SendMsg. +// +// If none of the above happen, a goroutine and a context will be leaked, and grpc +// will not call the optionally-configured stats handler with a stats.End message. +func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { + // allow interceptor to see all applicable call options, which means those + // configured as defaults from dial option as well as per-call options + opts = combine(cc.dopts.callOptions, opts) + + if cc.dopts.streamInt != nil { + return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...) + } + return newClientStream(ctx, desc, cc, method, opts...) +} + +// NewClientStream is a wrapper for ClientConn.NewStream. +func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { + return cc.NewStream(ctx, desc, method, opts...) +} + +func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { + if channelz.IsOn() { + cc.incrCallsStarted() + defer func() { + if err != nil { + cc.incrCallsFailed() + } + }() + } + c := defaultCallInfo() + // Provide an opportunity for the first RPC to see the first service config + // provided by the resolver. + if err := cc.waitForResolvedAddrs(ctx); err != nil { + return nil, err + } + mc := cc.GetMethodConfig(method) + if mc.WaitForReady != nil { + c.failFast = !*mc.WaitForReady + } + + // Possible context leak: + // The cancel function for the child context we create will only be called + // when RecvMsg returns a non-nil error, if the ClientConn is closed, or if + // an error is generated by SendMsg. + // https://github.com/grpc/grpc-go/issues/1818. + var cancel context.CancelFunc + if mc.Timeout != nil && *mc.Timeout >= 0 { + ctx, cancel = context.WithTimeout(ctx, *mc.Timeout) + } else { + ctx, cancel = context.WithCancel(ctx) + } + defer func() { + if err != nil { + cancel() + } + }() + + for _, o := range opts { + if err := o.before(c); err != nil { + return nil, toRPCErr(err) + } + } + c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize) + c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) + if err := setCallInfoCodec(c); err != nil { + return nil, err + } + + callHdr := &transport.CallHdr{ + Host: cc.authority, + Method: method, + ContentSubtype: c.contentSubtype, + } + + // Set our outgoing compression according to the UseCompressor CallOption, if + // set. In that case, also find the compressor from the encoding package. + // Otherwise, use the compressor configured by the WithCompressor DialOption, + // if set. + var cp Compressor + var comp encoding.Compressor + if ct := c.compressorType; ct != "" { + callHdr.SendCompress = ct + if ct != encoding.Identity { + comp = encoding.GetCompressor(ct) + if comp == nil { + return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct) + } + } + } else if cc.dopts.cp != nil { + callHdr.SendCompress = cc.dopts.cp.Type() + cp = cc.dopts.cp + } + if c.creds != nil { + callHdr.Creds = c.creds + } + var trInfo *traceInfo + if EnableTracing { + trInfo = &traceInfo{ + tr: trace.New("grpc.Sent."+methodFamily(method), method), + firstLine: firstLine{ + client: true, + }, + } + if deadline, ok := ctx.Deadline(); ok { + trInfo.firstLine.deadline = time.Until(deadline) + } + trInfo.tr.LazyLog(&trInfo.firstLine, false) + ctx = trace.NewContext(ctx, trInfo.tr) + } + ctx = newContextWithRPCInfo(ctx, c.failFast, c.codec, cp, comp) + sh := cc.dopts.copts.StatsHandler + var beginTime time.Time + if sh != nil { + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast}) + beginTime = time.Now() + begin := &stats.Begin{ + Client: true, + BeginTime: beginTime, + FailFast: c.failFast, + } + sh.HandleRPC(ctx, begin) + } + + cs := &clientStream{ + callHdr: callHdr, + ctx: ctx, + methodConfig: &mc, + opts: opts, + callInfo: c, + cc: cc, + desc: desc, + codec: c.codec, + cp: cp, + comp: comp, + cancel: cancel, + beginTime: beginTime, + firstAttempt: true, + } + if !cc.dopts.disableRetry { + cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler) + } + cs.binlog = binarylog.GetMethodLogger(method) + + cs.callInfo.stream = cs + // Only this initial attempt has stats/tracing. + // TODO(dfawley): move to newAttempt when per-attempt stats are implemented. + if err := cs.newAttemptLocked(sh, trInfo); err != nil { + cs.finish(err) + return nil, err + } + + op := func(a *csAttempt) error { return a.newStream() } + if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil { + cs.finish(err) + return nil, err + } + + if cs.binlog != nil { + md, _ := metadata.FromOutgoingContext(ctx) + logEntry := &binarylog.ClientHeader{ + OnClientSide: true, + Header: md, + MethodName: method, + Authority: cs.cc.authority, + } + if deadline, ok := ctx.Deadline(); ok { + logEntry.Timeout = time.Until(deadline) + if logEntry.Timeout < 0 { + logEntry.Timeout = 0 + } + } + cs.binlog.Log(logEntry) + } + + if desc != unaryStreamDesc { + // Listen on cc and stream contexts to cleanup when the user closes the + // ClientConn or cancels the stream context. In all other cases, an error + // should already be injected into the recv buffer by the transport, which + // the client will eventually receive, and then we will cancel the stream's + // context in clientStream.finish. + go func() { + select { + case <-cc.ctx.Done(): + cs.finish(ErrClientConnClosing) + case <-ctx.Done(): + cs.finish(toRPCErr(ctx.Err())) + } + }() + } + return cs, nil +} + +// newAttemptLocked creates a new attempt with a transport. +// If it succeeds, then it replaces clientStream's attempt with this new attempt. +func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo *traceInfo) (retErr error) { + newAttempt := &csAttempt{ + cs: cs, + dc: cs.cc.dopts.dc, + statsHandler: sh, + trInfo: trInfo, + } + defer func() { + if retErr != nil { + // This attempt is not set in the clientStream, so it's finish won't + // be called. Call it here for stats and trace in case they are not + // nil. + newAttempt.finish(retErr) + } + }() + + if err := cs.ctx.Err(); err != nil { + return toRPCErr(err) + } + t, done, err := cs.cc.getTransport(cs.ctx, cs.callInfo.failFast, cs.callHdr.Method) + if err != nil { + return err + } + if trInfo != nil { + trInfo.firstLine.SetRemoteAddr(t.RemoteAddr()) + } + newAttempt.t = t + newAttempt.done = done + cs.attempt = newAttempt + return nil +} + +func (a *csAttempt) newStream() error { + cs := a.cs + cs.callHdr.PreviousAttempts = cs.numRetries + s, err := a.t.NewStream(cs.ctx, cs.callHdr) + if err != nil { + return toRPCErr(err) + } + cs.attempt.s = s + cs.attempt.p = &parser{r: s} + return nil +} + +// clientStream implements a client side Stream. +type clientStream struct { + callHdr *transport.CallHdr + opts []CallOption + callInfo *callInfo + cc *ClientConn + desc *StreamDesc + + codec baseCodec + cp Compressor + comp encoding.Compressor + + cancel context.CancelFunc // cancels all attempts + + sentLast bool // sent an end stream + beginTime time.Time + + methodConfig *MethodConfig + + ctx context.Context // the application's context, wrapped by stats/tracing + + retryThrottler *retryThrottler // The throttler active when the RPC began. + + binlog *binarylog.MethodLogger // Binary logger, can be nil. + // serverHeaderBinlogged is a boolean for whether server header has been + // logged. Server header will be logged when the first time one of those + // happens: stream.Header(), stream.Recv(). + // + // It's only read and used by Recv() and Header(), so it doesn't need to be + // synchronized. + serverHeaderBinlogged bool + + mu sync.Mutex + firstAttempt bool // if true, transparent retry is valid + numRetries int // exclusive of transparent retry attempt(s) + numRetriesSincePushback int // retries since pushback; to reset backoff + finished bool // TODO: replace with atomic cmpxchg or sync.Once? + // attempt is the active client stream attempt. + // The only place where it is written is the newAttemptLocked method and this method never writes nil. + // So, attempt can be nil only inside newClientStream function when clientStream is first created. + // One of the first things done after clientStream's creation, is to call newAttemptLocked which either + // assigns a non nil value to the attempt or returns an error. If an error is returned from newAttemptLocked, + // then newClientStream calls finish on the clientStream and returns. So, finish method is the only + // place where we need to check if the attempt is nil. + attempt *csAttempt + // TODO(hedging): hedging will have multiple attempts simultaneously. + committed bool // active attempt committed for retry? + buffer []func(a *csAttempt) error // operations to replay on retry + bufferSize int // current size of buffer +} + +// csAttempt implements a single transport stream attempt within a +// clientStream. +type csAttempt struct { + cs *clientStream + t transport.ClientTransport + s *transport.Stream + p *parser + done func(balancer.DoneInfo) + + finished bool + dc Decompressor + decomp encoding.Compressor + decompSet bool + + mu sync.Mutex // guards trInfo.tr + // trInfo may be nil (if EnableTracing is false). + // trInfo.tr is set when created (if EnableTracing is true), + // and cleared when the finish method is called. + trInfo *traceInfo + + statsHandler stats.Handler +} + +func (cs *clientStream) commitAttemptLocked() { + cs.committed = true + cs.buffer = nil +} + +func (cs *clientStream) commitAttempt() { + cs.mu.Lock() + cs.commitAttemptLocked() + cs.mu.Unlock() +} + +// shouldRetry returns nil if the RPC should be retried; otherwise it returns +// the error that should be returned by the operation. +func (cs *clientStream) shouldRetry(err error) error { + if cs.attempt.s == nil && !cs.callInfo.failFast { + // In the event of any error from NewStream (attempt.s == nil), we + // never attempted to write anything to the wire, so we can retry + // indefinitely for non-fail-fast RPCs. + return nil + } + if cs.finished || cs.committed { + // RPC is finished or committed; cannot retry. + return err + } + // Wait for the trailers. + if cs.attempt.s != nil { + <-cs.attempt.s.Done() + } + if cs.firstAttempt && (cs.attempt.s == nil || cs.attempt.s.Unprocessed()) { + // First attempt, stream unprocessed: transparently retry. + cs.firstAttempt = false + return nil + } + cs.firstAttempt = false + if cs.cc.dopts.disableRetry { + return err + } + + pushback := 0 + hasPushback := false + if cs.attempt.s != nil { + if to, toErr := cs.attempt.s.TrailersOnly(); toErr != nil || !to { + return err + } + + // TODO(retry): Move down if the spec changes to not check server pushback + // before considering this a failure for throttling. + sps := cs.attempt.s.Trailer()["grpc-retry-pushback-ms"] + if len(sps) == 1 { + var e error + if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 { + grpclog.Infof("Server retry pushback specified to abort (%q).", sps[0]) + cs.retryThrottler.throttle() // This counts as a failure for throttling. + return err + } + hasPushback = true + } else if len(sps) > 1 { + grpclog.Warningf("Server retry pushback specified multiple values (%q); not retrying.", sps) + cs.retryThrottler.throttle() // This counts as a failure for throttling. + return err + } + } + + var code codes.Code + if cs.attempt.s != nil { + code = cs.attempt.s.Status().Code() + } else { + code = status.Convert(err).Code() + } + + rp := cs.methodConfig.retryPolicy + if rp == nil || !rp.retryableStatusCodes[code] { + return err + } + + // Note: the ordering here is important; we count this as a failure + // only if the code matched a retryable code. + if cs.retryThrottler.throttle() { + return err + } + if cs.numRetries+1 >= rp.maxAttempts { + return err + } + + var dur time.Duration + if hasPushback { + dur = time.Millisecond * time.Duration(pushback) + cs.numRetriesSincePushback = 0 + } else { + fact := math.Pow(rp.backoffMultiplier, float64(cs.numRetriesSincePushback)) + cur := float64(rp.initialBackoff) * fact + if max := float64(rp.maxBackoff); cur > max { + cur = max + } + dur = time.Duration(grpcrand.Int63n(int64(cur))) + cs.numRetriesSincePushback++ + } + + // TODO(dfawley): we could eagerly fail here if dur puts us past the + // deadline, but unsure if it is worth doing. + t := time.NewTimer(dur) + select { + case <-t.C: + cs.numRetries++ + return nil + case <-cs.ctx.Done(): + t.Stop() + return status.FromContextError(cs.ctx.Err()).Err() + } +} + +// Returns nil if a retry was performed and succeeded; error otherwise. +func (cs *clientStream) retryLocked(lastErr error) error { + for { + cs.attempt.finish(lastErr) + if err := cs.shouldRetry(lastErr); err != nil { + cs.commitAttemptLocked() + return err + } + if err := cs.newAttemptLocked(nil, nil); err != nil { + return err + } + if lastErr = cs.replayBufferLocked(); lastErr == nil { + return nil + } + } +} + +func (cs *clientStream) Context() context.Context { + cs.commitAttempt() + // No need to lock before using attempt, since we know it is committed and + // cannot change. + return cs.attempt.s.Context() +} + +func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error { + cs.mu.Lock() + for { + if cs.committed { + cs.mu.Unlock() + return op(cs.attempt) + } + a := cs.attempt + cs.mu.Unlock() + err := op(a) + cs.mu.Lock() + if a != cs.attempt { + // We started another attempt already. + continue + } + if err == io.EOF { + <-a.s.Done() + } + if err == nil || (err == io.EOF && a.s.Status().Code() == codes.OK) { + onSuccess() + cs.mu.Unlock() + return err + } + if err := cs.retryLocked(err); err != nil { + cs.mu.Unlock() + return err + } + } +} + +func (cs *clientStream) Header() (metadata.MD, error) { + var m metadata.MD + err := cs.withRetry(func(a *csAttempt) error { + var err error + m, err = a.s.Header() + return toRPCErr(err) + }, cs.commitAttemptLocked) + if err != nil { + cs.finish(err) + return nil, err + } + if cs.binlog != nil && !cs.serverHeaderBinlogged { + // Only log if binary log is on and header has not been logged. + logEntry := &binarylog.ServerHeader{ + OnClientSide: true, + Header: m, + PeerAddr: nil, + } + if peer, ok := peer.FromContext(cs.Context()); ok { + logEntry.PeerAddr = peer.Addr + } + cs.binlog.Log(logEntry) + cs.serverHeaderBinlogged = true + } + return m, err +} + +func (cs *clientStream) Trailer() metadata.MD { + // On RPC failure, we never need to retry, because usage requires that + // RecvMsg() returned a non-nil error before calling this function is valid. + // We would have retried earlier if necessary. + // + // Commit the attempt anyway, just in case users are not following those + // directions -- it will prevent races and should not meaningfully impact + // performance. + cs.commitAttempt() + if cs.attempt.s == nil { + return nil + } + return cs.attempt.s.Trailer() +} + +func (cs *clientStream) replayBufferLocked() error { + a := cs.attempt + for _, f := range cs.buffer { + if err := f(a); err != nil { + return err + } + } + return nil +} + +func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error) { + // Note: we still will buffer if retry is disabled (for transparent retries). + if cs.committed { + return + } + cs.bufferSize += sz + if cs.bufferSize > cs.callInfo.maxRetryRPCBufferSize { + cs.commitAttemptLocked() + return + } + cs.buffer = append(cs.buffer, op) +} + +func (cs *clientStream) SendMsg(m interface{}) (err error) { + defer func() { + if err != nil && err != io.EOF { + // Call finish on the client stream for errors generated by this SendMsg + // call, as these indicate problems created by this client. (Transport + // errors are converted to an io.EOF error in csAttempt.sendMsg; the real + // error will be returned from RecvMsg eventually in that case, or be + // retried.) + cs.finish(err) + } + }() + if cs.sentLast { + return status.Errorf(codes.Internal, "SendMsg called after CloseSend") + } + if !cs.desc.ClientStreams { + cs.sentLast = true + } + + // load hdr, payload, data + hdr, payload, data, err := prepareMsg(m, cs.codec, cs.cp, cs.comp) + if err != nil { + return err + } + + // TODO(dfawley): should we be checking len(data) instead? + if len(payload) > *cs.callInfo.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize) + } + msgBytes := data // Store the pointer before setting to nil. For binary logging. + op := func(a *csAttempt) error { + err := a.sendMsg(m, hdr, payload, data) + // nil out the message and uncomp when replaying; they are only needed for + // stats which is disabled for subsequent attempts. + m, data = nil, nil + return err + } + err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) }) + if cs.binlog != nil && err == nil { + cs.binlog.Log(&binarylog.ClientMessage{ + OnClientSide: true, + Message: msgBytes, + }) + } + return +} + +func (cs *clientStream) RecvMsg(m interface{}) error { + if cs.binlog != nil && !cs.serverHeaderBinlogged { + // Call Header() to binary log header if it's not already logged. + cs.Header() + } + var recvInfo *payloadInfo + if cs.binlog != nil { + recvInfo = &payloadInfo{} + } + err := cs.withRetry(func(a *csAttempt) error { + return a.recvMsg(m, recvInfo) + }, cs.commitAttemptLocked) + if cs.binlog != nil && err == nil { + cs.binlog.Log(&binarylog.ServerMessage{ + OnClientSide: true, + Message: recvInfo.uncompressedBytes, + }) + } + if err != nil || !cs.desc.ServerStreams { + // err != nil or non-server-streaming indicates end of stream. + cs.finish(err) + + if cs.binlog != nil { + // finish will not log Trailer. Log Trailer here. + logEntry := &binarylog.ServerTrailer{ + OnClientSide: true, + Trailer: cs.Trailer(), + Err: err, + } + if logEntry.Err == io.EOF { + logEntry.Err = nil + } + if peer, ok := peer.FromContext(cs.Context()); ok { + logEntry.PeerAddr = peer.Addr + } + cs.binlog.Log(logEntry) + } + } + return err +} + +func (cs *clientStream) CloseSend() error { + if cs.sentLast { + // TODO: return an error and finish the stream instead, due to API misuse? + return nil + } + cs.sentLast = true + op := func(a *csAttempt) error { + a.t.Write(a.s, nil, nil, &transport.Options{Last: true}) + // Always return nil; io.EOF is the only error that might make sense + // instead, but there is no need to signal the client to call RecvMsg + // as the only use left for the stream after CloseSend is to call + // RecvMsg. This also matches historical behavior. + return nil + } + cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }) + if cs.binlog != nil { + cs.binlog.Log(&binarylog.ClientHalfClose{ + OnClientSide: true, + }) + } + // We never returned an error here for reasons. + return nil +} + +func (cs *clientStream) finish(err error) { + if err == io.EOF { + // Ending a stream with EOF indicates a success. + err = nil + } + cs.mu.Lock() + if cs.finished { + cs.mu.Unlock() + return + } + cs.finished = true + cs.commitAttemptLocked() + cs.mu.Unlock() + // For binary logging. only log cancel in finish (could be caused by RPC ctx + // canceled or ClientConn closed). Trailer will be logged in RecvMsg. + // + // Only one of cancel or trailer needs to be logged. In the cases where + // users don't call RecvMsg, users must have already canceled the RPC. + if cs.binlog != nil && status.Code(err) == codes.Canceled { + cs.binlog.Log(&binarylog.Cancel{ + OnClientSide: true, + }) + } + if err == nil { + cs.retryThrottler.successfulRPC() + } + if channelz.IsOn() { + if err != nil { + cs.cc.incrCallsFailed() + } else { + cs.cc.incrCallsSucceeded() + } + } + if cs.attempt != nil { + cs.attempt.finish(err) + // after functions all rely upon having a stream. + if cs.attempt.s != nil { + for _, o := range cs.opts { + o.after(cs.callInfo) + } + } + } + cs.cancel() +} + +func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { + cs := a.cs + if a.trInfo != nil { + a.mu.Lock() + if a.trInfo.tr != nil { + a.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) + } + a.mu.Unlock() + } + if err := a.t.Write(a.s, hdr, payld, &transport.Options{Last: !cs.desc.ClientStreams}); err != nil { + if !cs.desc.ClientStreams { + // For non-client-streaming RPCs, we return nil instead of EOF on error + // because the generated code requires it. finish is not called; RecvMsg() + // will call it with the stream's status independently. + return nil + } + return io.EOF + } + if a.statsHandler != nil { + a.statsHandler.HandleRPC(cs.ctx, outPayload(true, m, data, payld, time.Now())) + } + if channelz.IsOn() { + a.t.IncrMsgSent() + } + return nil +} + +func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { + cs := a.cs + if a.statsHandler != nil && payInfo == nil { + payInfo = &payloadInfo{} + } + + if !a.decompSet { + // Block until we receive headers containing received message encoding. + if ct := a.s.RecvCompress(); ct != "" && ct != encoding.Identity { + if a.dc == nil || a.dc.Type() != ct { + // No configured decompressor, or it does not match the incoming + // message encoding; attempt to find a registered compressor that does. + a.dc = nil + a.decomp = encoding.GetCompressor(ct) + } + } else { + // No compression is used; disable our decompressor. + a.dc = nil + } + // Only initialize this state once per stream. + a.decompSet = true + } + err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp) + if err != nil { + if err == io.EOF { + if statusErr := a.s.Status().Err(); statusErr != nil { + return statusErr + } + return io.EOF // indicates successful end of stream. + } + return toRPCErr(err) + } + if a.trInfo != nil { + a.mu.Lock() + if a.trInfo.tr != nil { + a.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) + } + a.mu.Unlock() + } + if a.statsHandler != nil { + a.statsHandler.HandleRPC(cs.ctx, &stats.InPayload{ + Client: true, + RecvTime: time.Now(), + Payload: m, + // TODO truncate large payload. + Data: payInfo.uncompressedBytes, + WireLength: payInfo.wireLength, + Length: len(payInfo.uncompressedBytes), + }) + } + if channelz.IsOn() { + a.t.IncrMsgRecv() + } + if cs.desc.ServerStreams { + // Subsequent messages should be received by subsequent RecvMsg calls. + return nil + } + // Special handling for non-server-stream rpcs. + // This recv expects EOF or errors, so we don't collect inPayload. + err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp) + if err == nil { + return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) + } + if err == io.EOF { + return a.s.Status().Err() // non-server streaming Recv returns nil on success + } + return toRPCErr(err) +} + +func (a *csAttempt) finish(err error) { + a.mu.Lock() + if a.finished { + a.mu.Unlock() + return + } + a.finished = true + if err == io.EOF { + // Ending a stream with EOF indicates a success. + err = nil + } + var tr metadata.MD + if a.s != nil { + a.t.CloseStream(a.s, err) + tr = a.s.Trailer() + } + + if a.done != nil { + br := false + if a.s != nil { + br = a.s.BytesReceived() + } + a.done(balancer.DoneInfo{ + Err: err, + Trailer: tr, + BytesSent: a.s != nil, + BytesReceived: br, + ServerLoad: balancerload.Parse(tr), + }) + } + if a.statsHandler != nil { + end := &stats.End{ + Client: true, + BeginTime: a.cs.beginTime, + EndTime: time.Now(), + Trailer: tr, + Error: err, + } + a.statsHandler.HandleRPC(a.cs.ctx, end) + } + if a.trInfo != nil && a.trInfo.tr != nil { + if err == nil { + a.trInfo.tr.LazyPrintf("RPC: [OK]") + } else { + a.trInfo.tr.LazyPrintf("RPC: [%v]", err) + a.trInfo.tr.SetError() + } + a.trInfo.tr.Finish() + a.trInfo.tr = nil + } + a.mu.Unlock() +} + +// newClientStream creates a ClientStream with the specified transport, on the +// given addrConn. +// +// It's expected that the given transport is either the same one in addrConn, or +// is already closed. To avoid race, transport is specified separately, instead +// of using ac.transpot. +// +// Main difference between this and ClientConn.NewStream: +// - no retry +// - no service config (or wait for service config) +// - no tracing or stats +func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method string, t transport.ClientTransport, ac *addrConn, opts ...CallOption) (_ ClientStream, err error) { + if t == nil { + // TODO: return RPC error here? + return nil, errors.New("transport provided is nil") + } + // defaultCallInfo contains unnecessary info(i.e. failfast, maxRetryRPCBufferSize), so we just initialize an empty struct. + c := &callInfo{} + + // Possible context leak: + // The cancel function for the child context we create will only be called + // when RecvMsg returns a non-nil error, if the ClientConn is closed, or if + // an error is generated by SendMsg. + // https://github.com/grpc/grpc-go/issues/1818. + ctx, cancel := context.WithCancel(ctx) + defer func() { + if err != nil { + cancel() + } + }() + + for _, o := range opts { + if err := o.before(c); err != nil { + return nil, toRPCErr(err) + } + } + c.maxReceiveMessageSize = getMaxSize(nil, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) + c.maxSendMessageSize = getMaxSize(nil, c.maxSendMessageSize, defaultServerMaxSendMessageSize) + if err := setCallInfoCodec(c); err != nil { + return nil, err + } + + callHdr := &transport.CallHdr{ + Host: ac.cc.authority, + Method: method, + ContentSubtype: c.contentSubtype, + } + + // Set our outgoing compression according to the UseCompressor CallOption, if + // set. In that case, also find the compressor from the encoding package. + // Otherwise, use the compressor configured by the WithCompressor DialOption, + // if set. + var cp Compressor + var comp encoding.Compressor + if ct := c.compressorType; ct != "" { + callHdr.SendCompress = ct + if ct != encoding.Identity { + comp = encoding.GetCompressor(ct) + if comp == nil { + return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct) + } + } + } else if ac.cc.dopts.cp != nil { + callHdr.SendCompress = ac.cc.dopts.cp.Type() + cp = ac.cc.dopts.cp + } + if c.creds != nil { + callHdr.Creds = c.creds + } + + // Use a special addrConnStream to avoid retry. + as := &addrConnStream{ + callHdr: callHdr, + ac: ac, + ctx: ctx, + cancel: cancel, + opts: opts, + callInfo: c, + desc: desc, + codec: c.codec, + cp: cp, + comp: comp, + t: t, + } + + as.callInfo.stream = as + s, err := as.t.NewStream(as.ctx, as.callHdr) + if err != nil { + err = toRPCErr(err) + return nil, err + } + as.s = s + as.p = &parser{r: s} + ac.incrCallsStarted() + if desc != unaryStreamDesc { + // Listen on cc and stream contexts to cleanup when the user closes the + // ClientConn or cancels the stream context. In all other cases, an error + // should already be injected into the recv buffer by the transport, which + // the client will eventually receive, and then we will cancel the stream's + // context in clientStream.finish. + go func() { + select { + case <-ac.ctx.Done(): + as.finish(status.Error(codes.Canceled, "grpc: the SubConn is closing")) + case <-ctx.Done(): + as.finish(toRPCErr(ctx.Err())) + } + }() + } + return as, nil +} + +type addrConnStream struct { + s *transport.Stream + ac *addrConn + callHdr *transport.CallHdr + cancel context.CancelFunc + opts []CallOption + callInfo *callInfo + t transport.ClientTransport + ctx context.Context + sentLast bool + desc *StreamDesc + codec baseCodec + cp Compressor + comp encoding.Compressor + decompSet bool + dc Decompressor + decomp encoding.Compressor + p *parser + mu sync.Mutex + finished bool +} + +func (as *addrConnStream) Header() (metadata.MD, error) { + m, err := as.s.Header() + if err != nil { + as.finish(toRPCErr(err)) + } + return m, err +} + +func (as *addrConnStream) Trailer() metadata.MD { + return as.s.Trailer() +} + +func (as *addrConnStream) CloseSend() error { + if as.sentLast { + // TODO: return an error and finish the stream instead, due to API misuse? + return nil + } + as.sentLast = true + + as.t.Write(as.s, nil, nil, &transport.Options{Last: true}) + // Always return nil; io.EOF is the only error that might make sense + // instead, but there is no need to signal the client to call RecvMsg + // as the only use left for the stream after CloseSend is to call + // RecvMsg. This also matches historical behavior. + return nil +} + +func (as *addrConnStream) Context() context.Context { + return as.s.Context() +} + +func (as *addrConnStream) SendMsg(m interface{}) (err error) { + defer func() { + if err != nil && err != io.EOF { + // Call finish on the client stream for errors generated by this SendMsg + // call, as these indicate problems created by this client. (Transport + // errors are converted to an io.EOF error in csAttempt.sendMsg; the real + // error will be returned from RecvMsg eventually in that case, or be + // retried.) + as.finish(err) + } + }() + if as.sentLast { + return status.Errorf(codes.Internal, "SendMsg called after CloseSend") + } + if !as.desc.ClientStreams { + as.sentLast = true + } + + // load hdr, payload, data + hdr, payld, _, err := prepareMsg(m, as.codec, as.cp, as.comp) + if err != nil { + return err + } + + // TODO(dfawley): should we be checking len(data) instead? + if len(payld) > *as.callInfo.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payld), *as.callInfo.maxSendMessageSize) + } + + if err := as.t.Write(as.s, hdr, payld, &transport.Options{Last: !as.desc.ClientStreams}); err != nil { + if !as.desc.ClientStreams { + // For non-client-streaming RPCs, we return nil instead of EOF on error + // because the generated code requires it. finish is not called; RecvMsg() + // will call it with the stream's status independently. + return nil + } + return io.EOF + } + + if channelz.IsOn() { + as.t.IncrMsgSent() + } + return nil +} + +func (as *addrConnStream) RecvMsg(m interface{}) (err error) { + defer func() { + if err != nil || !as.desc.ServerStreams { + // err != nil or non-server-streaming indicates end of stream. + as.finish(err) + } + }() + + if !as.decompSet { + // Block until we receive headers containing received message encoding. + if ct := as.s.RecvCompress(); ct != "" && ct != encoding.Identity { + if as.dc == nil || as.dc.Type() != ct { + // No configured decompressor, or it does not match the incoming + // message encoding; attempt to find a registered compressor that does. + as.dc = nil + as.decomp = encoding.GetCompressor(ct) + } + } else { + // No compression is used; disable our decompressor. + as.dc = nil + } + // Only initialize this state once per stream. + as.decompSet = true + } + err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) + if err != nil { + if err == io.EOF { + if statusErr := as.s.Status().Err(); statusErr != nil { + return statusErr + } + return io.EOF // indicates successful end of stream. + } + return toRPCErr(err) + } + + if channelz.IsOn() { + as.t.IncrMsgRecv() + } + if as.desc.ServerStreams { + // Subsequent messages should be received by subsequent RecvMsg calls. + return nil + } + + // Special handling for non-server-stream rpcs. + // This recv expects EOF or errors, so we don't collect inPayload. + err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) + if err == nil { + return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) + } + if err == io.EOF { + return as.s.Status().Err() // non-server streaming Recv returns nil on success + } + return toRPCErr(err) +} + +func (as *addrConnStream) finish(err error) { + as.mu.Lock() + if as.finished { + as.mu.Unlock() + return + } + as.finished = true + if err == io.EOF { + // Ending a stream with EOF indicates a success. + err = nil + } + if as.s != nil { + as.t.CloseStream(as.s, err) + } + + if err != nil { + as.ac.incrCallsFailed() + } else { + as.ac.incrCallsSucceeded() + } + as.cancel() + as.mu.Unlock() +} + +// ServerStream defines the server-side behavior of a streaming RPC. +// +// All errors returned from ServerStream methods are compatible with the +// status package. +type ServerStream interface { + // SetHeader sets the header metadata. It may be called multiple times. + // When call multiple times, all the provided metadata will be merged. + // All the metadata will be sent out when one of the following happens: + // - ServerStream.SendHeader() is called; + // - The first response is sent out; + // - An RPC status is sent out (error or success). + SetHeader(metadata.MD) error + // SendHeader sends the header metadata. + // The provided md and headers set by SetHeader() will be sent. + // It fails if called multiple times. + SendHeader(metadata.MD) error + // SetTrailer sets the trailer metadata which will be sent with the RPC status. + // When called more than once, all the provided metadata will be merged. + SetTrailer(metadata.MD) + // Context returns the context for this stream. + Context() context.Context + // SendMsg sends a message. On error, SendMsg aborts the stream and the + // error is returned directly. + // + // SendMsg blocks until: + // - There is sufficient flow control to schedule m with the transport, or + // - The stream is done, or + // - The stream breaks. + // + // SendMsg does not wait until the message is received by the client. An + // untimely stream closure may result in lost messages. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not safe + // to call SendMsg on the same stream in different goroutines. + SendMsg(m interface{}) error + // RecvMsg blocks until it receives a message into m or the stream is + // done. It returns io.EOF when the client has performed a CloseSend. On + // any non-EOF error, the stream is aborted and the error contains the + // RPC status. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not + // safe to call RecvMsg on the same stream in different goroutines. + RecvMsg(m interface{}) error +} + +// serverStream implements a server side Stream. +type serverStream struct { + ctx context.Context + t transport.ServerTransport + s *transport.Stream + p *parser + codec baseCodec + + cp Compressor + dc Decompressor + comp encoding.Compressor + decomp encoding.Compressor + + maxReceiveMessageSize int + maxSendMessageSize int + trInfo *traceInfo + + statsHandler stats.Handler + + binlog *binarylog.MethodLogger + // serverHeaderBinlogged indicates whether server header has been logged. It + // will happen when one of the following two happens: stream.SendHeader(), + // stream.Send(). + // + // It's only checked in send and sendHeader, doesn't need to be + // synchronized. + serverHeaderBinlogged bool + + mu sync.Mutex // protects trInfo.tr after the service handler runs. +} + +func (ss *serverStream) Context() context.Context { + return ss.ctx +} + +func (ss *serverStream) SetHeader(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + return ss.s.SetHeader(md) +} + +func (ss *serverStream) SendHeader(md metadata.MD) error { + err := ss.t.WriteHeader(ss.s, md) + if ss.binlog != nil && !ss.serverHeaderBinlogged { + h, _ := ss.s.Header() + ss.binlog.Log(&binarylog.ServerHeader{ + Header: h, + }) + ss.serverHeaderBinlogged = true + } + return err +} + +func (ss *serverStream) SetTrailer(md metadata.MD) { + if md.Len() == 0 { + return + } + ss.s.SetTrailer(md) +} + +func (ss *serverStream) SendMsg(m interface{}) (err error) { + defer func() { + if ss.trInfo != nil { + ss.mu.Lock() + if ss.trInfo.tr != nil { + if err == nil { + ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) + } else { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.SetError() + } + } + ss.mu.Unlock() + } + if err != nil && err != io.EOF { + st, _ := status.FromError(toRPCErr(err)) + ss.t.WriteStatus(ss.s, st) + // Non-user specified status was sent out. This should be an error + // case (as a server side Cancel maybe). + // + // This is not handled specifically now. User will return a final + // status from the service handler, we will log that error instead. + // This behavior is similar to an interceptor. + } + if channelz.IsOn() && err == nil { + ss.t.IncrMsgSent() + } + }() + + // load hdr, payload, data + hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp) + if err != nil { + return err + } + + // TODO(dfawley): should we be checking len(data) instead? + if len(payload) > ss.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize) + } + if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { + return toRPCErr(err) + } + if ss.binlog != nil { + if !ss.serverHeaderBinlogged { + h, _ := ss.s.Header() + ss.binlog.Log(&binarylog.ServerHeader{ + Header: h, + }) + ss.serverHeaderBinlogged = true + } + ss.binlog.Log(&binarylog.ServerMessage{ + Message: data, + }) + } + if ss.statsHandler != nil { + ss.statsHandler.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) + } + return nil +} + +func (ss *serverStream) RecvMsg(m interface{}) (err error) { + defer func() { + if ss.trInfo != nil { + ss.mu.Lock() + if ss.trInfo.tr != nil { + if err == nil { + ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) + } else if err != io.EOF { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.SetError() + } + } + ss.mu.Unlock() + } + if err != nil && err != io.EOF { + st, _ := status.FromError(toRPCErr(err)) + ss.t.WriteStatus(ss.s, st) + // Non-user specified status was sent out. This should be an error + // case (as a server side Cancel maybe). + // + // This is not handled specifically now. User will return a final + // status from the service handler, we will log that error instead. + // This behavior is similar to an interceptor. + } + if channelz.IsOn() && err == nil { + ss.t.IncrMsgRecv() + } + }() + var payInfo *payloadInfo + if ss.statsHandler != nil || ss.binlog != nil { + payInfo = &payloadInfo{} + } + if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil { + if err == io.EOF { + if ss.binlog != nil { + ss.binlog.Log(&binarylog.ClientHalfClose{}) + } + return err + } + if err == io.ErrUnexpectedEOF { + err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) + } + return toRPCErr(err) + } + if ss.statsHandler != nil { + ss.statsHandler.HandleRPC(ss.s.Context(), &stats.InPayload{ + RecvTime: time.Now(), + Payload: m, + // TODO truncate large payload. + Data: payInfo.uncompressedBytes, + WireLength: payInfo.wireLength, + Length: len(payInfo.uncompressedBytes), + }) + } + if ss.binlog != nil { + ss.binlog.Log(&binarylog.ClientMessage{ + Message: payInfo.uncompressedBytes, + }) + } + return nil +} + +// MethodFromServerStream returns the method string for the input stream. +// The returned string is in the format of "/service/method". +func MethodFromServerStream(stream ServerStream) (string, bool) { + return Method(stream.Context()) +} + +// prepareMsg returns the hdr, payload and data +// using the compressors passed or using the +// passed preparedmsg +func prepareMsg(m interface{}, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { + if preparedMsg, ok := m.(*PreparedMsg); ok { + return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil + } + // The input interface is not a prepared msg. + // Marshal and Compress the data at this point + data, err = encode(codec, m) + if err != nil { + return nil, nil, nil, err + } + compData, err := compress(data, cp, comp) + if err != nil { + return nil, nil, nil, err + } + hdr, payload = msgHeader(data, compData) + return hdr, payload, data, nil +} diff --git a/vendor/github.com/fatih/camelcase/BUILD.bazel b/vendor/google.golang.org/grpc/tap/BUILD.bazel similarity index 50% rename from vendor/github.com/fatih/camelcase/BUILD.bazel rename to vendor/google.golang.org/grpc/tap/BUILD.bazel index c458178912791..de269b4ac6fa2 100644 --- a/vendor/github.com/fatih/camelcase/BUILD.bazel +++ b/vendor/google.golang.org/grpc/tap/BUILD.bazel @@ -2,8 +2,8 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", - srcs = ["camelcase.go"], - importmap = "k8s.io/kops/vendor/github.com/fatih/camelcase", - importpath = "github.com/fatih/camelcase", + srcs = ["tap.go"], + importmap = "k8s.io/kops/vendor/google.golang.org/grpc/tap", + importpath = "google.golang.org/grpc/tap", visibility = ["//visibility:public"], ) diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go new file mode 100644 index 0000000000000..584360f681b81 --- /dev/null +++ b/vendor/google.golang.org/grpc/tap/tap.go @@ -0,0 +1,51 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package tap defines the function handles which are executed on the transport +// layer of gRPC-Go and related information. Everything here is EXPERIMENTAL. +package tap + +import ( + "context" +) + +// Info defines the relevant information needed by the handles. +type Info struct { + // FullMethodName is the string of grpc method (in the format of + // /package.service/method). + FullMethodName string + // TODO: More to be added. +} + +// ServerInHandle defines the function which runs before a new stream is created +// on the server side. If it returns a non-nil error, the stream will not be +// created and a RST_STREAM will be sent back to the client with REFUSED_STREAM. +// The client will receive an RPC error "code = Unavailable, desc = stream +// terminated by RST_STREAM with error code: REFUSED_STREAM". +// +// It's intended to be used in situations where you don't want to waste the +// resources to accept the new stream (e.g. rate-limiting). And the content of +// the error will be ignored and won't be sent back to the client. For other +// general usages, please use interceptors. +// +// Note that it is executed in the per-connection I/O goroutine(s) instead of +// per-RPC goroutine. Therefore, users should NOT have any +// blocking/time-consuming work in this handle. Otherwise all the RPCs would +// slow down. Also, for the same reason, this handle won't be called +// concurrently by gRPC. +type ServerInHandle func(ctx context.Context, info *Info) (context.Context, error) diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go new file mode 100644 index 0000000000000..0a57b9994812b --- /dev/null +++ b/vendor/google.golang.org/grpc/trace.go @@ -0,0 +1,126 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "bytes" + "fmt" + "io" + "net" + "strings" + "sync" + "time" + + "golang.org/x/net/trace" +) + +// EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package. +// This should only be set before any RPCs are sent or received by this program. +var EnableTracing bool + +// methodFamily returns the trace family for the given method. +// It turns "/pkg.Service/GetFoo" into "pkg.Service". +func methodFamily(m string) string { + m = strings.TrimPrefix(m, "/") // remove leading slash + if i := strings.Index(m, "/"); i >= 0 { + m = m[:i] // remove everything from second slash + } + if i := strings.LastIndex(m, "."); i >= 0 { + m = m[i+1:] // cut down to last dotted component + } + return m +} + +// traceInfo contains tracing information for an RPC. +type traceInfo struct { + tr trace.Trace + firstLine firstLine +} + +// firstLine is the first line of an RPC trace. +// It may be mutated after construction; remoteAddr specifically may change +// during client-side use. +type firstLine struct { + mu sync.Mutex + client bool // whether this is a client (outgoing) RPC + remoteAddr net.Addr + deadline time.Duration // may be zero +} + +func (f *firstLine) SetRemoteAddr(addr net.Addr) { + f.mu.Lock() + f.remoteAddr = addr + f.mu.Unlock() +} + +func (f *firstLine) String() string { + f.mu.Lock() + defer f.mu.Unlock() + + var line bytes.Buffer + io.WriteString(&line, "RPC: ") + if f.client { + io.WriteString(&line, "to") + } else { + io.WriteString(&line, "from") + } + fmt.Fprintf(&line, " %v deadline:", f.remoteAddr) + if f.deadline != 0 { + fmt.Fprint(&line, f.deadline) + } else { + io.WriteString(&line, "none") + } + return line.String() +} + +const truncateSize = 100 + +func truncate(x string, l int) string { + if l > len(x) { + return x + } + return x[:l] +} + +// payload represents an RPC request or response payload. +type payload struct { + sent bool // whether this is an outgoing payload + msg interface{} // e.g. a proto.Message + // TODO(dsymonds): add stringifying info to codec, and limit how much we hold here? +} + +func (p payload) String() string { + if p.sent { + return truncate(fmt.Sprintf("sent: %v", p.msg), truncateSize) + } + return truncate(fmt.Sprintf("recv: %v", p.msg), truncateSize) +} + +type fmtStringer struct { + format string + a []interface{} +} + +func (f *fmtStringer) String() string { + return fmt.Sprintf(f.format, f.a...) +} + +type stringer string + +func (s stringer) String() string { return string(s) } diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go new file mode 100644 index 0000000000000..5411a73a22e31 --- /dev/null +++ b/vendor/google.golang.org/grpc/version.go @@ -0,0 +1,22 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +// Version is the current grpc version. +const Version = "1.23.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh new file mode 100644 index 0000000000000..661e1e1de9b68 --- /dev/null +++ b/vendor/google.golang.org/grpc/vet.sh @@ -0,0 +1,134 @@ +#!/bin/bash + +if [[ `uname -a` = *"Darwin"* ]]; then + echo "It seems you are running on Mac. This script does not work on Mac. See https://github.com/grpc/grpc-go/issues/2047" + exit 1 +fi + +set -ex # Exit on error; debugging enabled. +set -o pipefail # Fail a pipe if any sub-command fails. + +die() { + echo "$@" >&2 + exit 1 +} + +fail_on_output() { + tee /dev/stderr | (! read) +} + +# Check to make sure it's safe to modify the user's git repo. +git status --porcelain | fail_on_output + +# Undo any edits made by this script. +cleanup() { + git reset --hard HEAD +} +trap cleanup EXIT + +PATH="${GOPATH}/bin:${GOROOT}/bin:${PATH}" + +if [[ "$1" = "-install" ]]; then + # Check for module support + if go help mod >& /dev/null; then + go install \ + golang.org/x/lint/golint \ + golang.org/x/tools/cmd/goimports \ + honnef.co/go/tools/cmd/staticcheck \ + github.com/client9/misspell/cmd/misspell \ + github.com/golang/protobuf/protoc-gen-go + else + # Ye olde `go get` incantation. + # Note: this gets the latest version of all tools (vs. the pinned versions + # with Go modules). + go get -u \ + golang.org/x/lint/golint \ + golang.org/x/tools/cmd/goimports \ + honnef.co/go/tools/cmd/staticcheck \ + github.com/client9/misspell/cmd/misspell \ + github.com/golang/protobuf/protoc-gen-go + fi + if [[ -z "${VET_SKIP_PROTO}" ]]; then + if [[ "${TRAVIS}" = "true" ]]; then + PROTOBUF_VERSION=3.3.0 + PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip + pushd /home/travis + wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} + unzip ${PROTOC_FILENAME} + bin/protoc --version + popd + elif ! which protoc > /dev/null; then + die "Please install protoc into your path" + fi + fi + exit 0 +elif [[ "$#" -ne 0 ]]; then + die "Unknown argument(s): $*" +fi + +# - Ensure all source files contain a copyright message. +git ls-files "*.go" | xargs grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" 2>&1 | fail_on_output + +# - Make sure all tests in grpc and grpc/test use leakcheck via Teardown. +(! grep 'func Test[^(]' *_test.go) +(! grep 'func Test[^(]' test/*.go) + +# - Do not import math/rand for real library code. Use internal/grpcrand for +# thread safety. +git ls-files "*.go" | xargs grep -l '"math/rand"' 2>&1 | (! grep -v '^examples\|^stress\|grpcrand\|wrr_test') + +# - Ensure all ptypes proto packages are renamed when importing. +git ls-files "*.go" | (! xargs grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/") + +# - Check imports that are illegal in appengine (until Go 1.11). +# TODO: Remove when we drop Go 1.10 support +go list -f {{.Dir}} ./... | xargs go run test/go_vet/vet.go + +# - gofmt, goimports, golint (with exceptions for generated code), go vet. +gofmt -s -d -l . 2>&1 | fail_on_output +goimports -l . 2>&1 | (! grep -vE "(_mock|\.pb)\.go:") | fail_on_output +golint ./... 2>&1 | (! grep -vE "(_mock|\.pb)\.go:") +go vet -all . + +# - Check that generated proto files are up to date. +if [[ -z "${VET_SKIP_PROTO}" ]]; then + PATH="/home/travis/bin:${PATH}" make proto && \ + git status --porcelain 2>&1 | fail_on_output || \ + (git status; git --no-pager diff; exit 1) +fi + +# - Check that our module is tidy. +if go help mod >& /dev/null; then + go mod tidy && \ + git status --porcelain 2>&1 | fail_on_output || \ + (git status; git --no-pager diff; exit 1) +fi + +# - Collection of static analysis checks +# TODO(dfawley): don't use deprecated functions in examples. +staticcheck -go 1.9 -checks 'inherit,-ST1015' -ignore ' +google.golang.org/grpc/balancer.go:SA1019 +google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go:SA1019 +google.golang.org/grpc/balancer/roundrobin/roundrobin_test.go:SA1019 +google.golang.org/grpc/xds/internal/balancer/edsbalancer/balancergroup.go:SA1019 +google.golang.org/grpc/xds/internal/balancer/xds.go:SA1019 +google.golang.org/grpc/xds/internal/balancer/xds_client.go:SA1019 +google.golang.org/grpc/balancer_conn_wrappers.go:SA1019 +google.golang.org/grpc/balancer_test.go:SA1019 +google.golang.org/grpc/benchmark/benchmain/main.go:SA1019 +google.golang.org/grpc/benchmark/worker/benchmark_client.go:SA1019 +google.golang.org/grpc/clientconn.go:S1024 +google.golang.org/grpc/clientconn_state_transition_test.go:SA1019 +google.golang.org/grpc/clientconn_test.go:SA1019 +google.golang.org/grpc/examples/features/debugging/client/main.go:SA1019 +google.golang.org/grpc/examples/features/load_balancing/client/main.go:SA1019 +google.golang.org/grpc/internal/transport/handler_server.go:SA1019 +google.golang.org/grpc/internal/transport/handler_server_test.go:SA1019 +google.golang.org/grpc/resolver/dns/dns_resolver.go:SA1019 +google.golang.org/grpc/stats/stats_test.go:SA1019 +google.golang.org/grpc/test/balancer_test.go:SA1019 +google.golang.org/grpc/test/channelz_test.go:SA1019 +google.golang.org/grpc/test/end2end_test.go:SA1019 +google.golang.org/grpc/test/healthcheck_test.go:SA1019 +' ./... +misspell -error . diff --git a/vendor/gopkg.in/yaml.v2/decode.go b/vendor/gopkg.in/yaml.v2/decode.go index e4e56e28e0e80..531087655559e 100644 --- a/vendor/gopkg.in/yaml.v2/decode.go +++ b/vendor/gopkg.in/yaml.v2/decode.go @@ -229,6 +229,10 @@ type decoder struct { mapType reflect.Type terrors []string strict bool + + decodeCount int + aliasCount int + aliasDepth int } var ( @@ -314,7 +318,39 @@ func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unm return out, false, false } +const ( + // 400,000 decode operations is ~500kb of dense object declarations, or ~5kb of dense object declarations with 10000% alias expansion + alias_ratio_range_low = 400000 + // 4,000,000 decode operations is ~5MB of dense object declarations, or ~4.5MB of dense object declarations with 10% alias expansion + alias_ratio_range_high = 4000000 + // alias_ratio_range is the range over which we scale allowed alias ratios + alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low) +) + +func allowedAliasRatio(decodeCount int) float64 { + switch { + case decodeCount <= alias_ratio_range_low: + // allow 99% to come from alias expansion for small-to-medium documents + return 0.99 + case decodeCount >= alias_ratio_range_high: + // allow 10% to come from alias expansion for very large documents + return 0.10 + default: + // scale smoothly from 99% down to 10% over the range. + // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range. + // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps). + return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range) + } +} + func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { + d.decodeCount++ + if d.aliasDepth > 0 { + d.aliasCount++ + } + if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) { + failf("document contains excessive aliasing") + } switch n.kind { case documentNode: return d.document(n, out) @@ -353,7 +389,9 @@ func (d *decoder) alias(n *node, out reflect.Value) (good bool) { failf("anchor '%s' value contains itself", n.value) } d.aliases[n] = true + d.aliasDepth++ good = d.unmarshal(n.alias, out) + d.aliasDepth-- delete(d.aliases, n) return good } diff --git a/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/gopkg.in/yaml.v2/resolve.go index 6c151db6fbd58..4120e0c9160a1 100644 --- a/vendor/gopkg.in/yaml.v2/resolve.go +++ b/vendor/gopkg.in/yaml.v2/resolve.go @@ -81,7 +81,7 @@ func resolvableTag(tag string) bool { return false } -var yamlStyleFloat = regexp.MustCompile(`^[-+]?[0-9]*\.?[0-9]+([eE][-+][0-9]+)?$`) +var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) func resolve(tag string, in string) (rtag string, out interface{}) { if !resolvableTag(tag) { diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v2/scannerc.go index 077fd1dd2d446..570b8ecd10fd5 100644 --- a/vendor/gopkg.in/yaml.v2/scannerc.go +++ b/vendor/gopkg.in/yaml.v2/scannerc.go @@ -906,6 +906,9 @@ func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { return true } +// max_flow_level limits the flow_level +const max_flow_level = 10000 + // Increase the flow level and resize the simple key list if needed. func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { // Reset the simple key on the next level. @@ -913,6 +916,11 @@ func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { // Increase the flow level. parser.flow_level++ + if parser.flow_level > max_flow_level { + return yaml_parser_set_scanner_error(parser, + "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_flow_level)) + } return true } @@ -925,6 +933,9 @@ func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { return true } +// max_indents limits the indents stack size +const max_indents = 10000 + // Push the current indentation level to the stack and set the new level // the current column is greater than the indentation level. In this case, // append or insert the specified token into the token queue. @@ -939,6 +950,11 @@ func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml // indentation level. parser.indents = append(parser.indents, parser.indent) parser.indent = column + if len(parser.indents) > max_indents { + return yaml_parser_set_scanner_error(parser, + "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_indents)) + } // Create a token and insert it into the queue. token := yaml_token_t{ diff --git a/vendor/k8s.io/api/admission/v1/BUILD.bazel b/vendor/k8s.io/api/admission/v1/BUILD.bazel new file mode 100644 index 0000000000000..c1e8a920d1608 --- /dev/null +++ b/vendor/k8s.io/api/admission/v1/BUILD.bazel @@ -0,0 +1,25 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "generated.pb.go", + "register.go", + "types.go", + "types_swagger_doc_generated.go", + "zz_generated.deepcopy.go", + ], + importmap = "k8s.io/kops/vendor/k8s.io/api/admission/v1", + importpath = "k8s.io/api/admission/v1", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/gogo/protobuf/proto:go_default_library", + "//vendor/github.com/gogo/protobuf/sortkeys:go_default_library", + "//vendor/k8s.io/api/authentication/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/doc.go b/vendor/k8s.io/api/admission/v1/doc.go similarity index 58% rename from vendor/k8s.io/kubernetes/pkg/kubectl/doc.go rename to vendor/k8s.io/api/admission/v1/doc.go index 52afc49f19f4d..cbc6bb59dd961 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/doc.go +++ b/vendor/k8s.io/api/admission/v1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2014 The Kubernetes Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,8 +14,10 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package kubectl provides the functions used by the kubectl command line tool -// under k8s.io/kubernetes/cmd. The functions are kept in this package to better -// support unit testing. The main() method for kubectl is only an entry point -// and should contain no functionality. -package kubectl // import "k8s.io/kubernetes/pkg/kubectl" +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=false + +// +groupName=admission.k8s.io + +package v1 // import "k8s.io/api/admission/v1" diff --git a/vendor/k8s.io/api/admission/v1/generated.pb.go b/vendor/k8s.io/api/admission/v1/generated.pb.go new file mode 100644 index 0000000000000..e6b4f7240efc9 --- /dev/null +++ b/vendor/k8s.io/api/admission/v1/generated.pb.go @@ -0,0 +1,1769 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/admission/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + + k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *AdmissionRequest) Reset() { *m = AdmissionRequest{} } +func (*AdmissionRequest) ProtoMessage() {} +func (*AdmissionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_4b73421fd5edef9f, []int{0} +} +func (m *AdmissionRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AdmissionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AdmissionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AdmissionRequest.Merge(m, src) +} +func (m *AdmissionRequest) XXX_Size() int { + return m.Size() +} +func (m *AdmissionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AdmissionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AdmissionRequest proto.InternalMessageInfo + +func (m *AdmissionResponse) Reset() { *m = AdmissionResponse{} } +func (*AdmissionResponse) ProtoMessage() {} +func (*AdmissionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_4b73421fd5edef9f, []int{1} +} +func (m *AdmissionResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AdmissionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AdmissionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_AdmissionResponse.Merge(m, src) +} +func (m *AdmissionResponse) XXX_Size() int { + return m.Size() +} +func (m *AdmissionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_AdmissionResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_AdmissionResponse proto.InternalMessageInfo + +func (m *AdmissionReview) Reset() { *m = AdmissionReview{} } +func (*AdmissionReview) ProtoMessage() {} +func (*AdmissionReview) Descriptor() ([]byte, []int) { + return fileDescriptor_4b73421fd5edef9f, []int{2} +} +func (m *AdmissionReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AdmissionReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AdmissionReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_AdmissionReview.Merge(m, src) +} +func (m *AdmissionReview) XXX_Size() int { + return m.Size() +} +func (m *AdmissionReview) XXX_DiscardUnknown() { + xxx_messageInfo_AdmissionReview.DiscardUnknown(m) +} + +var xxx_messageInfo_AdmissionReview proto.InternalMessageInfo + +func init() { + proto.RegisterType((*AdmissionRequest)(nil), "k8s.io.api.admission.v1.AdmissionRequest") + proto.RegisterType((*AdmissionResponse)(nil), "k8s.io.api.admission.v1.AdmissionResponse") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.admission.v1.AdmissionResponse.AuditAnnotationsEntry") + proto.RegisterType((*AdmissionReview)(nil), "k8s.io.api.admission.v1.AdmissionReview") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/admission/v1/generated.proto", fileDescriptor_4b73421fd5edef9f) +} + +var fileDescriptor_4b73421fd5edef9f = []byte{ + // 898 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0x4d, 0x6f, 0x1b, 0x45, + 0x18, 0xf6, 0xc6, 0x89, 0xed, 0x1d, 0x87, 0xda, 0x9d, 0x82, 0x58, 0xf9, 0xb0, 0x36, 0x39, 0x20, + 0x17, 0xb5, 0xb3, 0x24, 0x82, 0x2a, 0xaa, 0x38, 0x34, 0x4b, 0x2a, 0x14, 0x90, 0x9a, 0x68, 0xda, + 0xa0, 0x8a, 0x03, 0xd2, 0xd8, 0x3b, 0xb5, 0x17, 0xdb, 0x33, 0xcb, 0xce, 0xac, 0x83, 0x6f, 0x9c, + 0x38, 0xf3, 0x0f, 0xf8, 0x1d, 0xfc, 0x83, 0x1c, 0x7b, 0xec, 0xc9, 0x22, 0xe6, 0x5f, 0x44, 0x42, + 0x42, 0x33, 0x3b, 0xfb, 0xd1, 0x7c, 0x88, 0xd0, 0xf4, 0xe4, 0x79, 0x3f, 0x9e, 0xe7, 0x7d, 0xfd, + 0xbc, 0x3b, 0xef, 0x80, 0x27, 0x93, 0x5d, 0x81, 0x42, 0xee, 0x4d, 0x92, 0x01, 0x8d, 0x19, 0x95, + 0x54, 0x78, 0x73, 0xca, 0x02, 0x1e, 0x7b, 0x26, 0x40, 0xa2, 0xd0, 0x23, 0xc1, 0x2c, 0x14, 0x22, + 0xe4, 0xcc, 0x9b, 0x6f, 0x7b, 0x23, 0xca, 0x68, 0x4c, 0x24, 0x0d, 0x50, 0x14, 0x73, 0xc9, 0xe1, + 0xc7, 0x69, 0x22, 0x22, 0x51, 0x88, 0xf2, 0x44, 0x34, 0xdf, 0xee, 0x3c, 0x1c, 0x85, 0x72, 0x9c, + 0x0c, 0xd0, 0x90, 0xcf, 0xbc, 0x11, 0x1f, 0x71, 0x4f, 0xe7, 0x0f, 0x92, 0x57, 0xda, 0xd2, 0x86, + 0x3e, 0xa5, 0x3c, 0x9d, 0x07, 0xe5, 0x82, 0x89, 0x1c, 0x53, 0x26, 0xc3, 0x21, 0x91, 0x57, 0x57, + 0xed, 0x7c, 0x51, 0x64, 0xcf, 0xc8, 0x70, 0x1c, 0x32, 0x1a, 0x2f, 0xbc, 0x68, 0x32, 0x52, 0x0e, + 0xe1, 0xcd, 0xa8, 0x24, 0x57, 0xa1, 0xbc, 0xeb, 0x50, 0x71, 0xc2, 0x64, 0x38, 0xa3, 0x97, 0x00, + 0x8f, 0xfe, 0x0b, 0x20, 0x86, 0x63, 0x3a, 0x23, 0x17, 0x71, 0x5b, 0x7f, 0xd8, 0xa0, 0xbd, 0x97, + 0x89, 0x81, 0xe9, 0xcf, 0x09, 0x15, 0x12, 0xfa, 0xa0, 0x9a, 0x84, 0x81, 0x63, 0xf5, 0xac, 0xbe, + 0xed, 0x7f, 0x7e, 0xba, 0xec, 0x56, 0x56, 0xcb, 0x6e, 0xf5, 0xf8, 0x60, 0xff, 0x7c, 0xd9, 0xfd, + 0xe4, 0xba, 0x42, 0x72, 0x11, 0x51, 0x81, 0x8e, 0x0f, 0xf6, 0xb1, 0x02, 0xc3, 0x97, 0x60, 0x7d, + 0x12, 0xb2, 0xc0, 0x59, 0xeb, 0x59, 0xfd, 0xe6, 0xce, 0x23, 0x54, 0x88, 0x9f, 0xc3, 0x50, 0x34, + 0x19, 0x29, 0x87, 0x40, 0x4a, 0x06, 0x34, 0xdf, 0x46, 0xdf, 0xc4, 0x3c, 0x89, 0xbe, 0xa7, 0xb1, + 0x6a, 0xe6, 0xbb, 0x90, 0x05, 0xfe, 0xa6, 0x29, 0xbe, 0xae, 0x2c, 0xac, 0x19, 0xe1, 0x18, 0x34, + 0x62, 0x2a, 0x78, 0x12, 0x0f, 0xa9, 0x53, 0xd5, 0xec, 0x8f, 0xff, 0x3f, 0x3b, 0x36, 0x0c, 0x7e, + 0xdb, 0x54, 0x68, 0x64, 0x1e, 0x9c, 0xb3, 0xc3, 0x2f, 0x41, 0x53, 0x24, 0x83, 0x2c, 0xe0, 0xac, + 0x6b, 0x3d, 0xee, 0x19, 0x40, 0xf3, 0x79, 0x11, 0xc2, 0xe5, 0x3c, 0x18, 0x82, 0x66, 0x9c, 0x2a, + 0xa9, 0xba, 0x76, 0x3e, 0xb8, 0x95, 0x02, 0x2d, 0x55, 0x0a, 0x17, 0x74, 0xb8, 0xcc, 0x0d, 0x17, + 0xa0, 0x65, 0xcc, 0xbc, 0xcb, 0x3b, 0xb7, 0x96, 0xe4, 0xde, 0x6a, 0xd9, 0x6d, 0xe1, 0xb7, 0x69, + 0xf1, 0xc5, 0x3a, 0xf0, 0x5b, 0x00, 0x8d, 0xab, 0x24, 0x84, 0xd3, 0xd2, 0x1a, 0x75, 0x8c, 0x46, + 0x10, 0x5f, 0xca, 0xc0, 0x57, 0xa0, 0x60, 0x0f, 0xac, 0x33, 0x32, 0xa3, 0xce, 0x86, 0x46, 0xe7, + 0x43, 0x7f, 0x46, 0x66, 0x14, 0xeb, 0x08, 0xf4, 0x80, 0xad, 0x7e, 0x45, 0x44, 0x86, 0xd4, 0xa9, + 0xe9, 0xb4, 0xbb, 0x26, 0xcd, 0x7e, 0x96, 0x05, 0x70, 0x91, 0x03, 0xbf, 0x02, 0x36, 0x8f, 0xd4, + 0xa7, 0x1e, 0x72, 0xe6, 0xd4, 0x35, 0xc0, 0xcd, 0x00, 0x87, 0x59, 0xe0, 0xbc, 0x6c, 0xe0, 0x02, + 0x00, 0x5f, 0x80, 0x46, 0x22, 0x68, 0x7c, 0xc0, 0x5e, 0x71, 0xa7, 0xa1, 0x05, 0xfd, 0x14, 0x95, + 0xd7, 0xc7, 0x5b, 0xd7, 0x5e, 0x09, 0x79, 0x6c, 0xb2, 0x8b, 0xef, 0x29, 0xf3, 0xe0, 0x9c, 0x09, + 0x1e, 0x83, 0x1a, 0x1f, 0xfc, 0x44, 0x87, 0xd2, 0xb1, 0x35, 0xe7, 0xc3, 0x6b, 0x87, 0x64, 0x6e, + 0x2d, 0xc2, 0xe4, 0xe4, 0xe9, 0x2f, 0x92, 0x32, 0x35, 0x1f, 0xff, 0x8e, 0xa1, 0xae, 0x1d, 0x6a, + 0x12, 0x6c, 0xc8, 0xe0, 0x8f, 0xc0, 0xe6, 0xd3, 0x20, 0x75, 0x3a, 0xe0, 0x5d, 0x98, 0x73, 0x29, + 0x0f, 0x33, 0x1e, 0x5c, 0x50, 0xc2, 0x2d, 0x50, 0x0b, 0xe2, 0x05, 0x4e, 0x98, 0xd3, 0xec, 0x59, + 0xfd, 0x86, 0x0f, 0x54, 0x0f, 0xfb, 0xda, 0x83, 0x4d, 0x04, 0xbe, 0x04, 0x75, 0x1e, 0x29, 0x31, + 0x84, 0xb3, 0xf9, 0x2e, 0x1d, 0xb4, 0x4c, 0x07, 0xf5, 0xc3, 0x94, 0x05, 0x67, 0x74, 0x5b, 0xff, + 0x54, 0xc1, 0xdd, 0xd2, 0x86, 0x12, 0x11, 0x67, 0x82, 0xbe, 0x97, 0x15, 0x75, 0x1f, 0xd4, 0xc9, + 0x74, 0xca, 0x4f, 0x68, 0xba, 0xa5, 0x1a, 0x45, 0x13, 0x7b, 0xa9, 0x1b, 0x67, 0x71, 0x78, 0x04, + 0x6a, 0x42, 0x12, 0x99, 0x08, 0xb3, 0x71, 0x1e, 0xdc, 0xec, 0x7a, 0x3d, 0xd7, 0x98, 0x54, 0x30, + 0x4c, 0x45, 0x32, 0x95, 0xd8, 0xf0, 0xc0, 0x2e, 0xd8, 0x88, 0x88, 0x1c, 0x8e, 0xf5, 0x56, 0xd9, + 0xf4, 0xed, 0xd5, 0xb2, 0xbb, 0x71, 0xa4, 0x1c, 0x38, 0xf5, 0xc3, 0x5d, 0x60, 0xeb, 0xc3, 0x8b, + 0x45, 0x94, 0x5d, 0x8c, 0x8e, 0x1a, 0xd1, 0x51, 0xe6, 0x3c, 0x2f, 0x1b, 0xb8, 0x48, 0x86, 0xbf, + 0x59, 0xa0, 0x4d, 0x92, 0x20, 0x94, 0x7b, 0x8c, 0x71, 0x49, 0xd2, 0xa9, 0xd4, 0x7a, 0xd5, 0x7e, + 0x73, 0xe7, 0x09, 0xba, 0xe6, 0x11, 0x44, 0x97, 0x24, 0x46, 0x7b, 0x17, 0x28, 0x9e, 0x32, 0x19, + 0x2f, 0x7c, 0xc7, 0x68, 0xd4, 0xbe, 0x18, 0xc6, 0x97, 0x6a, 0x76, 0xbe, 0x06, 0x1f, 0x5d, 0x49, + 0x02, 0xdb, 0xa0, 0x3a, 0xa1, 0x8b, 0x74, 0x7a, 0x58, 0x1d, 0xe1, 0x87, 0x60, 0x63, 0x4e, 0xa6, + 0x09, 0xd5, 0x93, 0xb0, 0x71, 0x6a, 0x3c, 0x5e, 0xdb, 0xb5, 0xb6, 0xfe, 0xb4, 0x40, 0xab, 0xd4, + 0xdc, 0x3c, 0xa4, 0x27, 0xf0, 0x08, 0xd4, 0xcd, 0x16, 0xd1, 0x1c, 0xcd, 0x9d, 0xfb, 0x37, 0xf9, + 0x5f, 0x1a, 0xe0, 0x37, 0xd5, 0x80, 0xb3, 0xed, 0x96, 0xd1, 0xa8, 0x0b, 0x1f, 0x9b, 0x3f, 0x6e, + 0x9e, 0xac, 0xcf, 0x6e, 0x2e, 0x95, 0xbf, 0x69, 0x1e, 0x10, 0x6d, 0xe1, 0x9c, 0xc9, 0xef, 0x9f, + 0x9e, 0xb9, 0x95, 0xd7, 0x67, 0x6e, 0xe5, 0xcd, 0x99, 0x5b, 0xf9, 0x75, 0xe5, 0x5a, 0xa7, 0x2b, + 0xd7, 0x7a, 0xbd, 0x72, 0xad, 0x37, 0x2b, 0xd7, 0xfa, 0x6b, 0xe5, 0x5a, 0xbf, 0xff, 0xed, 0x56, + 0x7e, 0x58, 0x9b, 0x6f, 0xff, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x37, 0xc7, 0x3f, 0x71, 0xdf, 0x08, + 0x00, 0x00, +} + +func (m *AdmissionRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AdmissionRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AdmissionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.RequestSubResource) + copy(dAtA[i:], m.RequestSubResource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RequestSubResource))) + i-- + dAtA[i] = 0x7a + if m.RequestResource != nil { + { + size, err := m.RequestResource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + } + if m.RequestKind != nil { + { + size, err := m.RequestKind.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } + { + size, err := m.Options.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + if m.DryRun != nil { + i-- + if *m.DryRun { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x58 + } + { + size, err := m.OldObject.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + { + size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + { + size, err := m.UserInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + i -= len(m.Operation) + copy(dAtA[i:], m.Operation) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operation))) + i-- + dAtA[i] = 0x3a + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x32 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x2a + i -= len(m.SubResource) + copy(dAtA[i:], m.SubResource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubResource))) + i-- + dAtA[i] = 0x22 + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Kind.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *AdmissionResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AdmissionResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AdmissionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AuditAnnotations) > 0 { + keysForAuditAnnotations := make([]string, 0, len(m.AuditAnnotations)) + for k := range m.AuditAnnotations { + keysForAuditAnnotations = append(keysForAuditAnnotations, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAuditAnnotations) + for iNdEx := len(keysForAuditAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.AuditAnnotations[string(keysForAuditAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForAuditAnnotations[iNdEx]) + copy(dAtA[i:], keysForAuditAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAuditAnnotations[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x32 + } + } + if m.PatchType != nil { + i -= len(*m.PatchType) + copy(dAtA[i:], *m.PatchType) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PatchType))) + i-- + dAtA[i] = 0x2a + } + if m.Patch != nil { + i -= len(m.Patch) + copy(dAtA[i:], m.Patch) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Patch))) + i-- + dAtA[i] = 0x22 + } + if m.Result != nil { + { + size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + i-- + if m.Allowed { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *AdmissionReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AdmissionReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AdmissionReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Response != nil { + { + size, err := m.Response.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Request != nil { + { + size, err := m.Request.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *AdmissionRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Kind.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Resource.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SubResource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operation) + n += 1 + l + sovGenerated(uint64(l)) + l = m.UserInfo.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Object.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.OldObject.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.DryRun != nil { + n += 2 + } + l = m.Options.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.RequestKind != nil { + l = m.RequestKind.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RequestResource != nil { + l = m.RequestResource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.RequestSubResource) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *AdmissionResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.Result != nil { + l = m.Result.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Patch != nil { + l = len(m.Patch) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PatchType != nil { + l = len(*m.PatchType) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.AuditAnnotations) > 0 { + for k, v := range m.AuditAnnotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *AdmissionReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Request != nil { + l = m.Request.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Response != nil { + l = m.Response.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AdmissionRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AdmissionRequest{`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `Kind:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Kind), "GroupVersionKind", "v1.GroupVersionKind", 1), `&`, ``, 1) + `,`, + `Resource:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resource), "GroupVersionResource", "v1.GroupVersionResource", 1), `&`, ``, 1) + `,`, + `SubResource:` + fmt.Sprintf("%v", this.SubResource) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Operation:` + fmt.Sprintf("%v", this.Operation) + `,`, + `UserInfo:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.UserInfo), "UserInfo", "v11.UserInfo", 1), `&`, ``, 1) + `,`, + `Object:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Object), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `OldObject:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.OldObject), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `DryRun:` + valueToStringGenerated(this.DryRun) + `,`, + `Options:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Options), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `RequestKind:` + strings.Replace(fmt.Sprintf("%v", this.RequestKind), "GroupVersionKind", "v1.GroupVersionKind", 1) + `,`, + `RequestResource:` + strings.Replace(fmt.Sprintf("%v", this.RequestResource), "GroupVersionResource", "v1.GroupVersionResource", 1) + `,`, + `RequestSubResource:` + fmt.Sprintf("%v", this.RequestSubResource) + `,`, + `}`, + }, "") + return s +} +func (this *AdmissionResponse) String() string { + if this == nil { + return "nil" + } + keysForAuditAnnotations := make([]string, 0, len(this.AuditAnnotations)) + for k := range this.AuditAnnotations { + keysForAuditAnnotations = append(keysForAuditAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAuditAnnotations) + mapStringForAuditAnnotations := "map[string]string{" + for _, k := range keysForAuditAnnotations { + mapStringForAuditAnnotations += fmt.Sprintf("%v: %v,", k, this.AuditAnnotations[k]) + } + mapStringForAuditAnnotations += "}" + s := strings.Join([]string{`&AdmissionResponse{`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `Allowed:` + fmt.Sprintf("%v", this.Allowed) + `,`, + `Result:` + strings.Replace(fmt.Sprintf("%v", this.Result), "Status", "v1.Status", 1) + `,`, + `Patch:` + valueToStringGenerated(this.Patch) + `,`, + `PatchType:` + valueToStringGenerated(this.PatchType) + `,`, + `AuditAnnotations:` + mapStringForAuditAnnotations + `,`, + `}`, + }, "") + return s +} +func (this *AdmissionReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AdmissionReview{`, + `Request:` + strings.Replace(this.Request.String(), "AdmissionRequest", "AdmissionRequest", 1) + `,`, + `Response:` + strings.Replace(this.Response.String(), "AdmissionResponse", "AdmissionResponse", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AdmissionRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AdmissionRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Kind.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SubResource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SubResource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operation", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operation = Operation(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UserInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OldObject", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.OldObject.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.DryRun = &b + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestKind", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RequestKind == nil { + m.RequestKind = &v1.GroupVersionKind{} + } + if err := m.RequestKind.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestResource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RequestResource == nil { + m.RequestResource = &v1.GroupVersionResource{} + } + if err := m.RequestResource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestSubResource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RequestSubResource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AdmissionResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AdmissionResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AdmissionResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Allowed", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Allowed = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Result == nil { + m.Result = &v1.Status{} + } + if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Patch", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Patch = append(m.Patch[:0], dAtA[iNdEx:postIndex]...) + if m.Patch == nil { + m.Patch = []byte{} + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PatchType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := PatchType(dAtA[iNdEx:postIndex]) + m.PatchType = &s + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuditAnnotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuditAnnotations == nil { + m.AuditAnnotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.AuditAnnotations[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AdmissionReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AdmissionReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AdmissionReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Request == nil { + m.Request = &AdmissionRequest{} + } + if err := m.Request.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Response", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Response == nil { + m.Response = &AdmissionResponse{} + } + if err := m.Response.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/k8s.io/api/admission/v1/generated.proto b/vendor/k8s.io/api/admission/v1/generated.proto new file mode 100644 index 0000000000000..8d960a17d326c --- /dev/null +++ b/vendor/k8s.io/api/admission/v1/generated.proto @@ -0,0 +1,160 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.admission.v1; + +import "k8s.io/api/authentication/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// AdmissionRequest describes the admission.Attributes for the admission request. +message AdmissionRequest { + // UID is an identifier for the individual request/response. It allows us to distinguish instances of requests which are + // otherwise identical (parallel requests, requests when earlier requests did not modify etc) + // The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request. + // It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging. + optional string uid = 1; + + // Kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale) + optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind kind = 2; + + // Resource is the fully-qualified resource being requested (for example, v1.pods) + optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource resource = 3; + + // SubResource is the subresource being requested, if any (for example, "status" or "scale") + // +optional + optional string subResource = 4; + + // RequestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale). + // If this is specified and differs from the value in "kind", an equivalent match and conversion was performed. + // + // For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of + // `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`, + // an API request to apps/v1beta1 deployments would be converted and sent to the webhook + // with `kind: {group:"apps", version:"v1", kind:"Deployment"}` (matching the rule the webhook registered for), + // and `requestKind: {group:"apps", version:"v1beta1", kind:"Deployment"}` (indicating the kind of the original API request). + // + // See documentation for the "matchPolicy" field in the webhook configuration type for more details. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind requestKind = 13; + + // RequestResource is the fully-qualified resource of the original API request (for example, v1.pods). + // If this is specified and differs from the value in "resource", an equivalent match and conversion was performed. + // + // For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of + // `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`, + // an API request to apps/v1beta1 deployments would be converted and sent to the webhook + // with `resource: {group:"apps", version:"v1", resource:"deployments"}` (matching the resource the webhook registered for), + // and `requestResource: {group:"apps", version:"v1beta1", resource:"deployments"}` (indicating the resource of the original API request). + // + // See documentation for the "matchPolicy" field in the webhook configuration type. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource requestResource = 14; + + // RequestSubResource is the name of the subresource of the original API request, if any (for example, "status" or "scale") + // If this is specified and differs from the value in "subResource", an equivalent match and conversion was performed. + // See documentation for the "matchPolicy" field in the webhook configuration type. + // +optional + optional string requestSubResource = 15; + + // Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and + // rely on the server to generate the name. If that is the case, this field will contain an empty string. + // +optional + optional string name = 5; + + // Namespace is the namespace associated with the request (if any). + // +optional + optional string namespace = 6; + + // Operation is the operation being performed. This may be different than the operation + // requested. e.g. a patch can result in either a CREATE or UPDATE Operation. + optional string operation = 7; + + // UserInfo is information about the requesting user + optional k8s.io.api.authentication.v1.UserInfo userInfo = 8; + + // Object is the object from the incoming request. + // +optional + optional k8s.io.apimachinery.pkg.runtime.RawExtension object = 9; + + // OldObject is the existing object. Only populated for DELETE and UPDATE requests. + // +optional + optional k8s.io.apimachinery.pkg.runtime.RawExtension oldObject = 10; + + // DryRun indicates that modifications will definitely not be persisted for this request. + // Defaults to false. + // +optional + optional bool dryRun = 11; + + // Options is the operation option structure of the operation being performed. + // e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be + // different than the options the caller provided. e.g. for a patch request the performed + // Operation might be a CREATE, in which case the Options will a + // `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`. + // +optional + optional k8s.io.apimachinery.pkg.runtime.RawExtension options = 12; +} + +// AdmissionResponse describes an admission response. +message AdmissionResponse { + // UID is an identifier for the individual request/response. + // This must be copied over from the corresponding AdmissionRequest. + optional string uid = 1; + + // Allowed indicates whether or not the admission request was permitted. + optional bool allowed = 2; + + // Result contains extra details into why an admission request was denied. + // This field IS NOT consulted in any way if "Allowed" is "true". + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Status status = 3; + + // The patch body. Currently we only support "JSONPatch" which implements RFC 6902. + // +optional + optional bytes patch = 4; + + // The type of Patch. Currently we only allow "JSONPatch". + // +optional + optional string patchType = 5; + + // AuditAnnotations is an unstructured key value map set by remote admission controller (e.g. error=image-blacklisted). + // MutatingAdmissionWebhook and ValidatingAdmissionWebhook admission controller will prefix the keys with + // admission webhook name (e.g. imagepolicy.example.com/error=image-blacklisted). AuditAnnotations will be provided by + // the admission webhook to add additional context to the audit log for this request. + // +optional + map auditAnnotations = 6; +} + +// AdmissionReview describes an admission review request/response. +message AdmissionReview { + // Request describes the attributes for the admission request. + // +optional + optional AdmissionRequest request = 1; + + // Response describes the attributes for the admission response. + // +optional + optional AdmissionResponse response = 2; +} + diff --git a/vendor/k8s.io/api/admission/v1/register.go b/vendor/k8s.io/api/admission/v1/register.go new file mode 100644 index 0000000000000..b548509ab3257 --- /dev/null +++ b/vendor/k8s.io/api/admission/v1/register.go @@ -0,0 +1,51 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name for this API. +const GroupName = "admission.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &AdmissionReview{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/admission/v1/types.go b/vendor/k8s.io/api/admission/v1/types.go new file mode 100644 index 0000000000000..a40cb0d52e270 --- /dev/null +++ b/vendor/k8s.io/api/admission/v1/types.go @@ -0,0 +1,162 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + authenticationv1 "k8s.io/api/authentication/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AdmissionReview describes an admission review request/response. +type AdmissionReview struct { + metav1.TypeMeta `json:",inline"` + // Request describes the attributes for the admission request. + // +optional + Request *AdmissionRequest `json:"request,omitempty" protobuf:"bytes,1,opt,name=request"` + // Response describes the attributes for the admission response. + // +optional + Response *AdmissionResponse `json:"response,omitempty" protobuf:"bytes,2,opt,name=response"` +} + +// AdmissionRequest describes the admission.Attributes for the admission request. +type AdmissionRequest struct { + // UID is an identifier for the individual request/response. It allows us to distinguish instances of requests which are + // otherwise identical (parallel requests, requests when earlier requests did not modify etc) + // The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request. + // It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging. + UID types.UID `json:"uid" protobuf:"bytes,1,opt,name=uid"` + // Kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale) + Kind metav1.GroupVersionKind `json:"kind" protobuf:"bytes,2,opt,name=kind"` + // Resource is the fully-qualified resource being requested (for example, v1.pods) + Resource metav1.GroupVersionResource `json:"resource" protobuf:"bytes,3,opt,name=resource"` + // SubResource is the subresource being requested, if any (for example, "status" or "scale") + // +optional + SubResource string `json:"subResource,omitempty" protobuf:"bytes,4,opt,name=subResource"` + + // RequestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale). + // If this is specified and differs from the value in "kind", an equivalent match and conversion was performed. + // + // For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of + // `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`, + // an API request to apps/v1beta1 deployments would be converted and sent to the webhook + // with `kind: {group:"apps", version:"v1", kind:"Deployment"}` (matching the rule the webhook registered for), + // and `requestKind: {group:"apps", version:"v1beta1", kind:"Deployment"}` (indicating the kind of the original API request). + // + // See documentation for the "matchPolicy" field in the webhook configuration type for more details. + // +optional + RequestKind *metav1.GroupVersionKind `json:"requestKind,omitempty" protobuf:"bytes,13,opt,name=requestKind"` + // RequestResource is the fully-qualified resource of the original API request (for example, v1.pods). + // If this is specified and differs from the value in "resource", an equivalent match and conversion was performed. + // + // For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of + // `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`, + // an API request to apps/v1beta1 deployments would be converted and sent to the webhook + // with `resource: {group:"apps", version:"v1", resource:"deployments"}` (matching the resource the webhook registered for), + // and `requestResource: {group:"apps", version:"v1beta1", resource:"deployments"}` (indicating the resource of the original API request). + // + // See documentation for the "matchPolicy" field in the webhook configuration type. + // +optional + RequestResource *metav1.GroupVersionResource `json:"requestResource,omitempty" protobuf:"bytes,14,opt,name=requestResource"` + // RequestSubResource is the name of the subresource of the original API request, if any (for example, "status" or "scale") + // If this is specified and differs from the value in "subResource", an equivalent match and conversion was performed. + // See documentation for the "matchPolicy" field in the webhook configuration type. + // +optional + RequestSubResource string `json:"requestSubResource,omitempty" protobuf:"bytes,15,opt,name=requestSubResource"` + + // Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and + // rely on the server to generate the name. If that is the case, this field will contain an empty string. + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,5,opt,name=name"` + // Namespace is the namespace associated with the request (if any). + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,6,opt,name=namespace"` + // Operation is the operation being performed. This may be different than the operation + // requested. e.g. a patch can result in either a CREATE or UPDATE Operation. + Operation Operation `json:"operation" protobuf:"bytes,7,opt,name=operation"` + // UserInfo is information about the requesting user + UserInfo authenticationv1.UserInfo `json:"userInfo" protobuf:"bytes,8,opt,name=userInfo"` + // Object is the object from the incoming request. + // +optional + Object runtime.RawExtension `json:"object,omitempty" protobuf:"bytes,9,opt,name=object"` + // OldObject is the existing object. Only populated for DELETE and UPDATE requests. + // +optional + OldObject runtime.RawExtension `json:"oldObject,omitempty" protobuf:"bytes,10,opt,name=oldObject"` + // DryRun indicates that modifications will definitely not be persisted for this request. + // Defaults to false. + // +optional + DryRun *bool `json:"dryRun,omitempty" protobuf:"varint,11,opt,name=dryRun"` + // Options is the operation option structure of the operation being performed. + // e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be + // different than the options the caller provided. e.g. for a patch request the performed + // Operation might be a CREATE, in which case the Options will a + // `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`. + // +optional + Options runtime.RawExtension `json:"options,omitempty" protobuf:"bytes,12,opt,name=options"` +} + +// AdmissionResponse describes an admission response. +type AdmissionResponse struct { + // UID is an identifier for the individual request/response. + // This must be copied over from the corresponding AdmissionRequest. + UID types.UID `json:"uid" protobuf:"bytes,1,opt,name=uid"` + + // Allowed indicates whether or not the admission request was permitted. + Allowed bool `json:"allowed" protobuf:"varint,2,opt,name=allowed"` + + // Result contains extra details into why an admission request was denied. + // This field IS NOT consulted in any way if "Allowed" is "true". + // +optional + Result *metav1.Status `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` + + // The patch body. Currently we only support "JSONPatch" which implements RFC 6902. + // +optional + Patch []byte `json:"patch,omitempty" protobuf:"bytes,4,opt,name=patch"` + + // The type of Patch. Currently we only allow "JSONPatch". + // +optional + PatchType *PatchType `json:"patchType,omitempty" protobuf:"bytes,5,opt,name=patchType"` + + // AuditAnnotations is an unstructured key value map set by remote admission controller (e.g. error=image-blacklisted). + // MutatingAdmissionWebhook and ValidatingAdmissionWebhook admission controller will prefix the keys with + // admission webhook name (e.g. imagepolicy.example.com/error=image-blacklisted). AuditAnnotations will be provided by + // the admission webhook to add additional context to the audit log for this request. + // +optional + AuditAnnotations map[string]string `json:"auditAnnotations,omitempty" protobuf:"bytes,6,opt,name=auditAnnotations"` +} + +// PatchType is the type of patch being used to represent the mutated object +type PatchType string + +// PatchType constants. +const ( + PatchTypeJSONPatch PatchType = "JSONPatch" +) + +// Operation is the type of resource operation being checked for admission control +type Operation string + +// Operation constants +const ( + Create Operation = "CREATE" + Update Operation = "UPDATE" + Delete Operation = "DELETE" + Connect Operation = "CONNECT" +) diff --git a/vendor/k8s.io/api/admission/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admission/v1/types_swagger_doc_generated.go new file mode 100644 index 0000000000000..62351b161774e --- /dev/null +++ b/vendor/k8s.io/api/admission/v1/types_swagger_doc_generated.go @@ -0,0 +1,77 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_AdmissionRequest = map[string]string{ + "": "AdmissionRequest describes the admission.Attributes for the admission request.", + "uid": "UID is an identifier for the individual request/response. It allows us to distinguish instances of requests which are otherwise identical (parallel requests, requests when earlier requests did not modify etc) The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request. It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging.", + "kind": "Kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale)", + "resource": "Resource is the fully-qualified resource being requested (for example, v1.pods)", + "subResource": "SubResource is the subresource being requested, if any (for example, \"status\" or \"scale\")", + "requestKind": "RequestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale). If this is specified and differs from the value in \"kind\", an equivalent match and conversion was performed.\n\nFor example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]` and `matchPolicy: Equivalent`, an API request to apps/v1beta1 deployments would be converted and sent to the webhook with `kind: {group:\"apps\", version:\"v1\", kind:\"Deployment\"}` (matching the rule the webhook registered for), and `requestKind: {group:\"apps\", version:\"v1beta1\", kind:\"Deployment\"}` (indicating the kind of the original API request).\n\nSee documentation for the \"matchPolicy\" field in the webhook configuration type for more details.", + "requestResource": "RequestResource is the fully-qualified resource of the original API request (for example, v1.pods). If this is specified and differs from the value in \"resource\", an equivalent match and conversion was performed.\n\nFor example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]` and `matchPolicy: Equivalent`, an API request to apps/v1beta1 deployments would be converted and sent to the webhook with `resource: {group:\"apps\", version:\"v1\", resource:\"deployments\"}` (matching the resource the webhook registered for), and `requestResource: {group:\"apps\", version:\"v1beta1\", resource:\"deployments\"}` (indicating the resource of the original API request).\n\nSee documentation for the \"matchPolicy\" field in the webhook configuration type.", + "requestSubResource": "RequestSubResource is the name of the subresource of the original API request, if any (for example, \"status\" or \"scale\") If this is specified and differs from the value in \"subResource\", an equivalent match and conversion was performed. See documentation for the \"matchPolicy\" field in the webhook configuration type.", + "name": "Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and rely on the server to generate the name. If that is the case, this field will contain an empty string.", + "namespace": "Namespace is the namespace associated with the request (if any).", + "operation": "Operation is the operation being performed. This may be different than the operation requested. e.g. a patch can result in either a CREATE or UPDATE Operation.", + "userInfo": "UserInfo is information about the requesting user", + "object": "Object is the object from the incoming request.", + "oldObject": "OldObject is the existing object. Only populated for DELETE and UPDATE requests.", + "dryRun": "DryRun indicates that modifications will definitely not be persisted for this request. Defaults to false.", + "options": "Options is the operation option structure of the operation being performed. e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be different than the options the caller provided. e.g. for a patch request the performed Operation might be a CREATE, in which case the Options will a `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`.", +} + +func (AdmissionRequest) SwaggerDoc() map[string]string { + return map_AdmissionRequest +} + +var map_AdmissionResponse = map[string]string{ + "": "AdmissionResponse describes an admission response.", + "uid": "UID is an identifier for the individual request/response. This must be copied over from the corresponding AdmissionRequest.", + "allowed": "Allowed indicates whether or not the admission request was permitted.", + "status": "Result contains extra details into why an admission request was denied. This field IS NOT consulted in any way if \"Allowed\" is \"true\".", + "patch": "The patch body. Currently we only support \"JSONPatch\" which implements RFC 6902.", + "patchType": "The type of Patch. Currently we only allow \"JSONPatch\".", + "auditAnnotations": "AuditAnnotations is an unstructured key value map set by remote admission controller (e.g. error=image-blacklisted). MutatingAdmissionWebhook and ValidatingAdmissionWebhook admission controller will prefix the keys with admission webhook name (e.g. imagepolicy.example.com/error=image-blacklisted). AuditAnnotations will be provided by the admission webhook to add additional context to the audit log for this request.", +} + +func (AdmissionResponse) SwaggerDoc() map[string]string { + return map_AdmissionResponse +} + +var map_AdmissionReview = map[string]string{ + "": "AdmissionReview describes an admission review request/response.", + "request": "Request describes the attributes for the admission request.", + "response": "Response describes the attributes for the admission response.", +} + +func (AdmissionReview) SwaggerDoc() map[string]string { + return map_AdmissionReview +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/admission/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admission/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..42954ca427270 --- /dev/null +++ b/vendor/k8s.io/api/admission/v1/zz_generated.deepcopy.go @@ -0,0 +1,136 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionRequest) DeepCopyInto(out *AdmissionRequest) { + *out = *in + out.Kind = in.Kind + out.Resource = in.Resource + if in.RequestKind != nil { + in, out := &in.RequestKind, &out.RequestKind + *out = new(metav1.GroupVersionKind) + **out = **in + } + if in.RequestResource != nil { + in, out := &in.RequestResource, &out.RequestResource + *out = new(metav1.GroupVersionResource) + **out = **in + } + in.UserInfo.DeepCopyInto(&out.UserInfo) + in.Object.DeepCopyInto(&out.Object) + in.OldObject.DeepCopyInto(&out.OldObject) + if in.DryRun != nil { + in, out := &in.DryRun, &out.DryRun + *out = new(bool) + **out = **in + } + in.Options.DeepCopyInto(&out.Options) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionRequest. +func (in *AdmissionRequest) DeepCopy() *AdmissionRequest { + if in == nil { + return nil + } + out := new(AdmissionRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionResponse) DeepCopyInto(out *AdmissionResponse) { + *out = *in + if in.Result != nil { + in, out := &in.Result, &out.Result + *out = new(metav1.Status) + (*in).DeepCopyInto(*out) + } + if in.Patch != nil { + in, out := &in.Patch, &out.Patch + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.PatchType != nil { + in, out := &in.PatchType, &out.PatchType + *out = new(PatchType) + **out = **in + } + if in.AuditAnnotations != nil { + in, out := &in.AuditAnnotations, &out.AuditAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionResponse. +func (in *AdmissionResponse) DeepCopy() *AdmissionResponse { + if in == nil { + return nil + } + out := new(AdmissionResponse) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionReview) DeepCopyInto(out *AdmissionReview) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Request != nil { + in, out := &in.Request, &out.Request + *out = new(AdmissionRequest) + (*in).DeepCopyInto(*out) + } + if in.Response != nil { + in, out := &in.Response, &out.Response + *out = new(AdmissionResponse) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionReview. +func (in *AdmissionReview) DeepCopy() *AdmissionReview { + if in == nil { + return nil + } + out := new(AdmissionReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AdmissionReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/k8s.io/api/admission/v1beta1/generated.pb.go b/vendor/k8s.io/api/admission/v1beta1/generated.pb.go index 3bf0d1c622556..10d3bead6f01c 100644 --- a/vendor/k8s.io/api/admission/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/admission/v1beta1/generated.pb.go @@ -17,37 +17,23 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/admission/v1beta1/generated.proto -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/admission/v1beta1/generated.proto - - It has these top-level messages: - AdmissionRequest - AdmissionResponse - AdmissionReview -*/ package v1beta1 import ( fmt "fmt" - proto "github.com/gogo/protobuf/proto" - - math "math" - - k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" + io "io" + proto "github.com/gogo/protobuf/proto" github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - strings "strings" - + math "math" + math_bits "math/bits" reflect "reflect" + strings "strings" - io "io" + k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" ) // Reference imports to suppress errors if they are not otherwise used. @@ -61,27 +47,166 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *AdmissionRequest) Reset() { *m = AdmissionRequest{} } -func (*AdmissionRequest) ProtoMessage() {} -func (*AdmissionRequest) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *AdmissionRequest) Reset() { *m = AdmissionRequest{} } +func (*AdmissionRequest) ProtoMessage() {} +func (*AdmissionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_b87c2352de86eab9, []int{0} +} +func (m *AdmissionRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AdmissionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AdmissionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AdmissionRequest.Merge(m, src) +} +func (m *AdmissionRequest) XXX_Size() int { + return m.Size() +} +func (m *AdmissionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AdmissionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AdmissionRequest proto.InternalMessageInfo + +func (m *AdmissionResponse) Reset() { *m = AdmissionResponse{} } +func (*AdmissionResponse) ProtoMessage() {} +func (*AdmissionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_b87c2352de86eab9, []int{1} +} +func (m *AdmissionResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AdmissionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AdmissionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_AdmissionResponse.Merge(m, src) +} +func (m *AdmissionResponse) XXX_Size() int { + return m.Size() +} +func (m *AdmissionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_AdmissionResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_AdmissionResponse proto.InternalMessageInfo -func (m *AdmissionResponse) Reset() { *m = AdmissionResponse{} } -func (*AdmissionResponse) ProtoMessage() {} -func (*AdmissionResponse) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (m *AdmissionReview) Reset() { *m = AdmissionReview{} } +func (*AdmissionReview) ProtoMessage() {} +func (*AdmissionReview) Descriptor() ([]byte, []int) { + return fileDescriptor_b87c2352de86eab9, []int{2} +} +func (m *AdmissionReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AdmissionReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AdmissionReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_AdmissionReview.Merge(m, src) +} +func (m *AdmissionReview) XXX_Size() int { + return m.Size() +} +func (m *AdmissionReview) XXX_DiscardUnknown() { + xxx_messageInfo_AdmissionReview.DiscardUnknown(m) +} -func (m *AdmissionReview) Reset() { *m = AdmissionReview{} } -func (*AdmissionReview) ProtoMessage() {} -func (*AdmissionReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_AdmissionReview proto.InternalMessageInfo func init() { proto.RegisterType((*AdmissionRequest)(nil), "k8s.io.api.admission.v1beta1.AdmissionRequest") proto.RegisterType((*AdmissionResponse)(nil), "k8s.io.api.admission.v1beta1.AdmissionResponse") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.admission.v1beta1.AdmissionResponse.AuditAnnotationsEntry") proto.RegisterType((*AdmissionReview)(nil), "k8s.io.api.admission.v1beta1.AdmissionReview") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/admission/v1beta1/generated.proto", fileDescriptor_b87c2352de86eab9) +} + +var fileDescriptor_b87c2352de86eab9 = []byte{ + // 902 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0x4f, 0x6f, 0x1b, 0x45, + 0x14, 0xf7, 0xd6, 0x8e, 0xed, 0x1d, 0x87, 0xda, 0x9d, 0x82, 0xb4, 0xb2, 0xd0, 0xda, 0xe4, 0x80, + 0x82, 0xd4, 0xcc, 0x92, 0x08, 0xaa, 0xa8, 0xe2, 0x92, 0x25, 0x11, 0x0a, 0x48, 0x4d, 0x34, 0xad, + 0x51, 0xe1, 0x80, 0x34, 0xf6, 0x4e, 0xed, 0xc5, 0xf6, 0xcc, 0xb2, 0x33, 0xeb, 0xe0, 0x1b, 0xe2, + 0xca, 0x85, 0x6f, 0xc0, 0x87, 0xe1, 0x92, 0x63, 0x8f, 0x3d, 0x59, 0xc4, 0x7c, 0x8b, 0x9c, 0xd0, + 0xcc, 0xce, 0x7a, 0xb7, 0x76, 0x02, 0xfd, 0xc3, 0xc9, 0xf3, 0xfe, 0xfc, 0x7e, 0xef, 0xf9, 0xf7, + 0x76, 0xde, 0x80, 0x93, 0xf1, 0xa1, 0x40, 0x21, 0xf7, 0xc6, 0x49, 0x9f, 0xc6, 0x8c, 0x4a, 0x2a, + 0xbc, 0x19, 0x65, 0x01, 0x8f, 0x3d, 0x13, 0x20, 0x51, 0xe8, 0x91, 0x60, 0x1a, 0x0a, 0x11, 0x72, + 0xe6, 0xcd, 0xf6, 0xfb, 0x54, 0x92, 0x7d, 0x6f, 0x48, 0x19, 0x8d, 0x89, 0xa4, 0x01, 0x8a, 0x62, + 0x2e, 0x39, 0xfc, 0x30, 0xcd, 0x46, 0x24, 0x0a, 0xd1, 0x2a, 0x1b, 0x99, 0xec, 0xf6, 0xde, 0x30, + 0x94, 0xa3, 0xa4, 0x8f, 0x06, 0x7c, 0xea, 0x0d, 0xf9, 0x90, 0x7b, 0x1a, 0xd4, 0x4f, 0x9e, 0x6b, + 0x4b, 0x1b, 0xfa, 0x94, 0x92, 0xb5, 0x1f, 0x14, 0x4b, 0x27, 0x72, 0x44, 0x99, 0x0c, 0x07, 0x44, + 0xa6, 0xf5, 0xd7, 0x4b, 0xb7, 0x3f, 0xcb, 0xb3, 0xa7, 0x64, 0x30, 0x0a, 0x19, 0x8d, 0xe7, 0x5e, + 0x34, 0x1e, 0x2a, 0x87, 0xf0, 0xa6, 0x54, 0x92, 0x9b, 0x50, 0xde, 0x6d, 0xa8, 0x38, 0x61, 0x32, + 0x9c, 0xd2, 0x0d, 0xc0, 0xc3, 0xff, 0x02, 0x88, 0xc1, 0x88, 0x4e, 0xc9, 0x3a, 0x6e, 0xe7, 0x0f, + 0x1b, 0xb4, 0x8e, 0x32, 0x45, 0x30, 0xfd, 0x29, 0xa1, 0x42, 0x42, 0x1f, 0x94, 0x93, 0x30, 0x70, + 0xac, 0xae, 0xb5, 0x6b, 0xfb, 0x9f, 0x5e, 0x2e, 0x3a, 0xa5, 0xe5, 0xa2, 0x53, 0xee, 0x9d, 0x1e, + 0x5f, 0x2f, 0x3a, 0x1f, 0xdd, 0x56, 0x48, 0xce, 0x23, 0x2a, 0x50, 0xef, 0xf4, 0x18, 0x2b, 0x30, + 0x7c, 0x06, 0x2a, 0xe3, 0x90, 0x05, 0xce, 0x9d, 0xae, 0xb5, 0xdb, 0x38, 0x78, 0x88, 0xf2, 0x09, + 0xac, 0x60, 0x28, 0x1a, 0x0f, 0x95, 0x43, 0x20, 0x25, 0x03, 0x9a, 0xed, 0xa3, 0xaf, 0x62, 0x9e, + 0x44, 0xdf, 0xd2, 0x58, 0x35, 0xf3, 0x4d, 0xc8, 0x02, 0x7f, 0xdb, 0x14, 0xaf, 0x28, 0x0b, 0x6b, + 0x46, 0x38, 0x02, 0xf5, 0x98, 0x0a, 0x9e, 0xc4, 0x03, 0xea, 0x94, 0x35, 0xfb, 0xa3, 0x37, 0x67, + 0xc7, 0x86, 0xc1, 0x6f, 0x99, 0x0a, 0xf5, 0xcc, 0x83, 0x57, 0xec, 0xf0, 0x73, 0xd0, 0x10, 0x49, + 0x3f, 0x0b, 0x38, 0x15, 0xad, 0xc7, 0x7d, 0x03, 0x68, 0x3c, 0xc9, 0x43, 0xb8, 0x98, 0x07, 0x43, + 0xd0, 0x88, 0x53, 0x25, 0x55, 0xd7, 0xce, 0x7b, 0xef, 0xa4, 0x40, 0x53, 0x95, 0xc2, 0x39, 0x1d, + 0x2e, 0x72, 0xc3, 0x39, 0x68, 0x1a, 0x73, 0xd5, 0xe5, 0xdd, 0x77, 0x96, 0xe4, 0xfe, 0x72, 0xd1, + 0x69, 0xe2, 0x57, 0x69, 0xf1, 0x7a, 0x1d, 0xf8, 0x35, 0x80, 0xc6, 0x55, 0x10, 0xc2, 0x69, 0x6a, + 0x8d, 0xda, 0x46, 0x23, 0x88, 0x37, 0x32, 0xf0, 0x0d, 0x28, 0xd8, 0x05, 0x15, 0x46, 0xa6, 0xd4, + 0xd9, 0xd2, 0xe8, 0xd5, 0xd0, 0x1f, 0x93, 0x29, 0xc5, 0x3a, 0x02, 0x3d, 0x60, 0xab, 0x5f, 0x11, + 0x91, 0x01, 0x75, 0xaa, 0x3a, 0xed, 0x9e, 0x49, 0xb3, 0x1f, 0x67, 0x01, 0x9c, 0xe7, 0xc0, 0x2f, + 0x80, 0xcd, 0x23, 0xf5, 0xa9, 0x87, 0x9c, 0x39, 0x35, 0x0d, 0x70, 0x33, 0xc0, 0x59, 0x16, 0xb8, + 0x2e, 0x1a, 0x38, 0x07, 0xc0, 0xa7, 0xa0, 0x9e, 0x08, 0x1a, 0x9f, 0xb2, 0xe7, 0xdc, 0xa9, 0x6b, + 0x41, 0x3f, 0x46, 0xc5, 0x1d, 0xf2, 0xca, 0xb5, 0x57, 0x42, 0xf6, 0x4c, 0x76, 0xfe, 0x3d, 0x65, + 0x1e, 0xbc, 0x62, 0x82, 0x3d, 0x50, 0xe5, 0xfd, 0x1f, 0xe9, 0x40, 0x3a, 0xb6, 0xe6, 0xdc, 0xbb, + 0x75, 0x48, 0xe6, 0xd6, 0x22, 0x4c, 0x2e, 0x4e, 0x7e, 0x96, 0x94, 0xa9, 0xf9, 0xf8, 0x77, 0x0d, + 0x75, 0xf5, 0x4c, 0x93, 0x60, 0x43, 0x06, 0x7f, 0x00, 0x36, 0x9f, 0x04, 0xa9, 0xd3, 0x01, 0x6f, + 0xc3, 0xbc, 0x92, 0xf2, 0x2c, 0xe3, 0xc1, 0x39, 0x25, 0xdc, 0x01, 0xd5, 0x20, 0x9e, 0xe3, 0x84, + 0x39, 0x8d, 0xae, 0xb5, 0x5b, 0xf7, 0x81, 0xea, 0xe1, 0x58, 0x7b, 0xb0, 0x89, 0xc0, 0x67, 0xa0, + 0xc6, 0x23, 0x25, 0x86, 0x70, 0xb6, 0xdf, 0xa6, 0x83, 0xa6, 0xe9, 0xa0, 0x76, 0x96, 0xb2, 0xe0, + 0x8c, 0x6e, 0xe7, 0xd7, 0x0a, 0xb8, 0x57, 0xd8, 0x50, 0x22, 0xe2, 0x4c, 0xd0, 0xff, 0x65, 0x45, + 0x7d, 0x02, 0x6a, 0x64, 0x32, 0xe1, 0x17, 0x34, 0xdd, 0x52, 0xf5, 0xbc, 0x89, 0xa3, 0xd4, 0x8d, + 0xb3, 0x38, 0x3c, 0x07, 0x55, 0x21, 0x89, 0x4c, 0x84, 0xd9, 0x38, 0x0f, 0x5e, 0xef, 0x7a, 0x3d, + 0xd1, 0x98, 0x54, 0x30, 0x4c, 0x45, 0x32, 0x91, 0xd8, 0xf0, 0xc0, 0x0e, 0xd8, 0x8a, 0x88, 0x1c, + 0x8c, 0xf4, 0x56, 0xd9, 0xf6, 0xed, 0xe5, 0xa2, 0xb3, 0x75, 0xae, 0x1c, 0x38, 0xf5, 0xc3, 0x43, + 0x60, 0xeb, 0xc3, 0xd3, 0x79, 0x94, 0x5d, 0x8c, 0xb6, 0x1a, 0xd1, 0x79, 0xe6, 0xbc, 0x2e, 0x1a, + 0x38, 0x4f, 0x86, 0xbf, 0x59, 0xa0, 0x45, 0x92, 0x20, 0x94, 0x47, 0x8c, 0x71, 0x49, 0xd2, 0xa9, + 0x54, 0xbb, 0xe5, 0xdd, 0xc6, 0xc1, 0x09, 0xfa, 0xb7, 0x97, 0x10, 0x6d, 0xe8, 0x8c, 0x8e, 0xd6, + 0x78, 0x4e, 0x98, 0x8c, 0xe7, 0xbe, 0x63, 0x84, 0x6a, 0xad, 0x87, 0xf1, 0x46, 0xe1, 0xf6, 0x97, + 0xe0, 0x83, 0x1b, 0x49, 0x60, 0x0b, 0x94, 0xc7, 0x74, 0x9e, 0x8e, 0x10, 0xab, 0x23, 0x7c, 0x1f, + 0x6c, 0xcd, 0xc8, 0x24, 0xa1, 0x7a, 0x1c, 0x36, 0x4e, 0x8d, 0x47, 0x77, 0x0e, 0xad, 0x9d, 0x3f, + 0x2d, 0xd0, 0x2c, 0x34, 0x37, 0x0b, 0xe9, 0x05, 0xec, 0x81, 0x9a, 0x59, 0x25, 0x9a, 0xa3, 0x71, + 0x80, 0x5e, 0xfb, 0xcf, 0x69, 0x94, 0xdf, 0x50, 0xa3, 0xce, 0xf6, 0x5c, 0xc6, 0x05, 0xbf, 0xd3, + 0xcf, 0x8b, 0xfe, 0xf7, 0xe6, 0xf1, 0xf2, 0xde, 0x50, 0x34, 0x7f, 0xdb, 0xbc, 0x27, 0xda, 0xc2, + 0x2b, 0x3a, 0x7f, 0xef, 0xf2, 0xca, 0x2d, 0xbd, 0xb8, 0x72, 0x4b, 0x2f, 0xaf, 0xdc, 0xd2, 0x2f, + 0x4b, 0xd7, 0xba, 0x5c, 0xba, 0xd6, 0x8b, 0xa5, 0x6b, 0xbd, 0x5c, 0xba, 0xd6, 0x5f, 0x4b, 0xd7, + 0xfa, 0xfd, 0x6f, 0xb7, 0xf4, 0x7d, 0xcd, 0x10, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0x8b, 0xd1, + 0x27, 0x74, 0xfd, 0x08, 0x00, 0x00, +} + func (m *AdmissionRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -89,119 +214,146 @@ func (m *AdmissionRequest) Marshal() (dAtA []byte, err error) { } func (m *AdmissionRequest) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AdmissionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Kind.Size())) - n1, err := m.Kind.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Resource.Size())) - n2, err := m.Resource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubResource))) - i += copy(dAtA[i:], m.SubResource) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operation))) - i += copy(dAtA[i:], m.Operation) - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UserInfo.Size())) - n3, err := m.UserInfo.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= len(m.RequestSubResource) + copy(dAtA[i:], m.RequestSubResource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RequestSubResource))) + i-- + dAtA[i] = 0x7a + if m.RequestResource != nil { + { + size, err := m.RequestResource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 } - i += n3 - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) - n4, err := m.Object.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.RequestKind != nil { + { + size, err := m.RequestKind.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a } - i += n4 - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.OldObject.Size())) - n5, err := m.OldObject.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Options.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 + i-- + dAtA[i] = 0x62 if m.DryRun != nil { - dAtA[i] = 0x58 - i++ + i-- if *m.DryRun { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ + i-- + dAtA[i] = 0x58 } - dAtA[i] = 0x62 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Options.Size())) - n6, err := m.Options.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.OldObject.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n6 - if m.RequestKind != nil { - dAtA[i] = 0x6a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RequestKind.Size())) - n7, err := m.RequestKind.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x52 + { + size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n7 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.RequestResource != nil { - dAtA[i] = 0x72 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RequestResource.Size())) - n8, err := m.RequestResource.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x4a + { + size, err := m.UserInfo.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n8 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x7a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RequestSubResource))) - i += copy(dAtA[i:], m.RequestSubResource) - return i, nil + i-- + dAtA[i] = 0x42 + i -= len(m.Operation) + copy(dAtA[i:], m.Operation) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operation))) + i-- + dAtA[i] = 0x3a + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x32 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x2a + i -= len(m.SubResource) + copy(dAtA[i:], m.SubResource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubResource))) + i-- + dAtA[i] = 0x22 + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Kind.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *AdmissionResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -209,73 +361,85 @@ func (m *AdmissionResponse) Marshal() (dAtA []byte, err error) { } func (m *AdmissionResponse) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AdmissionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - dAtA[i] = 0x10 - i++ - if m.Allowed { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.Result != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Result.Size())) - n9, err := m.Result.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - } - if m.Patch != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Patch))) - i += copy(dAtA[i:], m.Patch) - } - if m.PatchType != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PatchType))) - i += copy(dAtA[i:], *m.PatchType) - } if len(m.AuditAnnotations) > 0 { keysForAuditAnnotations := make([]string, 0, len(m.AuditAnnotations)) for k := range m.AuditAnnotations { keysForAuditAnnotations = append(keysForAuditAnnotations, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForAuditAnnotations) - for _, k := range keysForAuditAnnotations { - dAtA[i] = 0x32 - i++ - v := m.AuditAnnotations[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ + for iNdEx := len(keysForAuditAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.AuditAnnotations[string(keysForAuditAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) + i-- + dAtA[i] = 0x12 + i -= len(keysForAuditAnnotations[iNdEx]) + copy(dAtA[i:], keysForAuditAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAuditAnnotations[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x32 + } + } + if m.PatchType != nil { + i -= len(*m.PatchType) + copy(dAtA[i:], *m.PatchType) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PatchType))) + i-- + dAtA[i] = 0x2a + } + if m.Patch != nil { + i -= len(m.Patch) + copy(dAtA[i:], m.Patch) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Patch))) + i-- + dAtA[i] = 0x22 + } + if m.Result != nil { + { + size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1a + } + i-- + if m.Allowed { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return i, nil + i-- + dAtA[i] = 0x10 + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *AdmissionReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -283,43 +447,57 @@ func (m *AdmissionReview) Marshal() (dAtA []byte, err error) { } func (m *AdmissionReview) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AdmissionReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Request != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Request.Size())) - n10, err := m.Request.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - } if m.Response != nil { + { + size, err := m.Response.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Response.Size())) - n11, err := m.Response.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if m.Request != nil { + { + size, err := m.Request.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n11 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *AdmissionRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.UID) @@ -361,6 +539,9 @@ func (m *AdmissionRequest) Size() (n int) { } func (m *AdmissionResponse) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.UID) @@ -390,6 +571,9 @@ func (m *AdmissionResponse) Size() (n int) { } func (m *AdmissionReview) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Request != nil { @@ -404,14 +588,7 @@ func (m *AdmissionReview) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -422,19 +599,19 @@ func (this *AdmissionRequest) String() string { } s := strings.Join([]string{`&AdmissionRequest{`, `UID:` + fmt.Sprintf("%v", this.UID) + `,`, - `Kind:` + strings.Replace(strings.Replace(this.Kind.String(), "GroupVersionKind", "k8s_io_apimachinery_pkg_apis_meta_v1.GroupVersionKind", 1), `&`, ``, 1) + `,`, - `Resource:` + strings.Replace(strings.Replace(this.Resource.String(), "GroupVersionResource", "k8s_io_apimachinery_pkg_apis_meta_v1.GroupVersionResource", 1), `&`, ``, 1) + `,`, + `Kind:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Kind), "GroupVersionKind", "v1.GroupVersionKind", 1), `&`, ``, 1) + `,`, + `Resource:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resource), "GroupVersionResource", "v1.GroupVersionResource", 1), `&`, ``, 1) + `,`, `SubResource:` + fmt.Sprintf("%v", this.SubResource) + `,`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, `Operation:` + fmt.Sprintf("%v", this.Operation) + `,`, - `UserInfo:` + strings.Replace(strings.Replace(this.UserInfo.String(), "UserInfo", "k8s_io_api_authentication_v1.UserInfo", 1), `&`, ``, 1) + `,`, - `Object:` + strings.Replace(strings.Replace(this.Object.String(), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, - `OldObject:` + strings.Replace(strings.Replace(this.OldObject.String(), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `UserInfo:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.UserInfo), "UserInfo", "v11.UserInfo", 1), `&`, ``, 1) + `,`, + `Object:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Object), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `OldObject:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.OldObject), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, `DryRun:` + valueToStringGenerated(this.DryRun) + `,`, - `Options:` + strings.Replace(strings.Replace(this.Options.String(), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, - `RequestKind:` + strings.Replace(fmt.Sprintf("%v", this.RequestKind), "GroupVersionKind", "k8s_io_apimachinery_pkg_apis_meta_v1.GroupVersionKind", 1) + `,`, - `RequestResource:` + strings.Replace(fmt.Sprintf("%v", this.RequestResource), "GroupVersionResource", "k8s_io_apimachinery_pkg_apis_meta_v1.GroupVersionResource", 1) + `,`, + `Options:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Options), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `RequestKind:` + strings.Replace(fmt.Sprintf("%v", this.RequestKind), "GroupVersionKind", "v1.GroupVersionKind", 1) + `,`, + `RequestResource:` + strings.Replace(fmt.Sprintf("%v", this.RequestResource), "GroupVersionResource", "v1.GroupVersionResource", 1) + `,`, `RequestSubResource:` + fmt.Sprintf("%v", this.RequestSubResource) + `,`, `}`, }, "") @@ -457,7 +634,7 @@ func (this *AdmissionResponse) String() string { s := strings.Join([]string{`&AdmissionResponse{`, `UID:` + fmt.Sprintf("%v", this.UID) + `,`, `Allowed:` + fmt.Sprintf("%v", this.Allowed) + `,`, - `Result:` + strings.Replace(fmt.Sprintf("%v", this.Result), "Status", "k8s_io_apimachinery_pkg_apis_meta_v1.Status", 1) + `,`, + `Result:` + strings.Replace(fmt.Sprintf("%v", this.Result), "Status", "v1.Status", 1) + `,`, `Patch:` + valueToStringGenerated(this.Patch) + `,`, `PatchType:` + valueToStringGenerated(this.PatchType) + `,`, `AuditAnnotations:` + mapStringForAuditAnnotations + `,`, @@ -470,8 +647,8 @@ func (this *AdmissionReview) String() string { return "nil" } s := strings.Join([]string{`&AdmissionReview{`, - `Request:` + strings.Replace(fmt.Sprintf("%v", this.Request), "AdmissionRequest", "AdmissionRequest", 1) + `,`, - `Response:` + strings.Replace(fmt.Sprintf("%v", this.Response), "AdmissionResponse", "AdmissionResponse", 1) + `,`, + `Request:` + strings.Replace(this.Request.String(), "AdmissionRequest", "AdmissionRequest", 1) + `,`, + `Response:` + strings.Replace(this.Response.String(), "AdmissionResponse", "AdmissionResponse", 1) + `,`, `}`, }, "") return s @@ -499,7 +676,7 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -527,7 +704,7 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -537,6 +714,9 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -556,7 +736,7 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -565,6 +745,9 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -586,7 +769,7 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -595,6 +778,9 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -616,7 +802,7 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -626,6 +812,9 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -645,7 +834,7 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -655,6 +844,9 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -674,7 +866,7 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -684,6 +876,9 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -703,7 +898,7 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -713,6 +908,9 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -732,7 +930,7 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -741,6 +939,9 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -762,7 +963,7 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -771,6 +972,9 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -792,7 +996,7 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -801,6 +1005,9 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -822,7 +1029,7 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -843,7 +1050,7 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -852,6 +1059,9 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -873,7 +1083,7 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -882,11 +1092,14 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.RequestKind == nil { - m.RequestKind = &k8s_io_apimachinery_pkg_apis_meta_v1.GroupVersionKind{} + m.RequestKind = &v1.GroupVersionKind{} } if err := m.RequestKind.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -906,7 +1119,7 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -915,11 +1128,14 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.RequestResource == nil { - m.RequestResource = &k8s_io_apimachinery_pkg_apis_meta_v1.GroupVersionResource{} + m.RequestResource = &v1.GroupVersionResource{} } if err := m.RequestResource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -939,7 +1155,7 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -949,6 +1165,9 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -963,6 +1182,9 @@ func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -990,7 +1212,7 @@ func (m *AdmissionResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1018,7 +1240,7 @@ func (m *AdmissionResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1028,6 +1250,9 @@ func (m *AdmissionResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1047,7 +1272,7 @@ func (m *AdmissionResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1067,7 +1292,7 @@ func (m *AdmissionResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1076,11 +1301,14 @@ func (m *AdmissionResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Result == nil { - m.Result = &k8s_io_apimachinery_pkg_apis_meta_v1.Status{} + m.Result = &v1.Status{} } if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1100,7 +1328,7 @@ func (m *AdmissionResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1109,6 +1337,9 @@ func (m *AdmissionResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1131,7 +1362,7 @@ func (m *AdmissionResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1141,6 +1372,9 @@ func (m *AdmissionResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1161,7 +1395,7 @@ func (m *AdmissionResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1170,6 +1404,9 @@ func (m *AdmissionResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1190,7 +1427,7 @@ func (m *AdmissionResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1207,7 +1444,7 @@ func (m *AdmissionResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1217,6 +1454,9 @@ func (m *AdmissionResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -1233,7 +1473,7 @@ func (m *AdmissionResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1243,6 +1483,9 @@ func (m *AdmissionResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -1274,6 +1517,9 @@ func (m *AdmissionResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1301,7 +1547,7 @@ func (m *AdmissionReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1329,7 +1575,7 @@ func (m *AdmissionReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1338,6 +1584,9 @@ func (m *AdmissionReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1362,7 +1611,7 @@ func (m *AdmissionReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1371,6 +1620,9 @@ func (m *AdmissionReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1390,6 +1642,9 @@ func (m *AdmissionReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1456,10 +1711,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -1488,6 +1746,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -1506,68 +1767,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/admission/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 905 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0x4d, 0x6f, 0x23, 0x35, - 0x18, 0xce, 0x6c, 0xd2, 0x24, 0xe3, 0x94, 0x4d, 0xd6, 0x0b, 0xd2, 0x28, 0x42, 0x93, 0xd0, 0x03, - 0x2a, 0xd2, 0xd6, 0x43, 0x2b, 0x58, 0x55, 0x2b, 0x2e, 0x1d, 0x5a, 0xa1, 0x82, 0xb4, 0xad, 0xbc, - 0x1b, 0xb4, 0x70, 0x40, 0x72, 0x32, 0xde, 0x64, 0x48, 0x62, 0x0f, 0x63, 0x4f, 0x4a, 0x6e, 0x88, - 0x2b, 0x17, 0xfe, 0x01, 0x3f, 0x86, 0x4b, 0x8f, 0x7b, 0xdc, 0x53, 0x44, 0xc3, 0xbf, 0xe8, 0x09, - 0xd9, 0xe3, 0xc9, 0xcc, 0x26, 0x2d, 0xec, 0x07, 0xa7, 0x99, 0xf7, 0xe3, 0x79, 0x5e, 0xfb, 0x79, - 0x5f, 0xdb, 0xe0, 0x64, 0x7c, 0x28, 0x50, 0xc8, 0xbd, 0x71, 0xd2, 0xa7, 0x31, 0xa3, 0x92, 0x0a, - 0x6f, 0x46, 0x59, 0xc0, 0x63, 0xcf, 0x04, 0x48, 0x14, 0x7a, 0x24, 0x98, 0x86, 0x42, 0x84, 0x9c, - 0x79, 0xb3, 0xfd, 0x3e, 0x95, 0x64, 0xdf, 0x1b, 0x52, 0x46, 0x63, 0x22, 0x69, 0x80, 0xa2, 0x98, - 0x4b, 0x0e, 0x3f, 0x4c, 0xb3, 0x11, 0x89, 0x42, 0xb4, 0xca, 0x46, 0x26, 0xbb, 0xbd, 0x37, 0x0c, - 0xe5, 0x28, 0xe9, 0xa3, 0x01, 0x9f, 0x7a, 0x43, 0x3e, 0xe4, 0x9e, 0x06, 0xf5, 0x93, 0xe7, 0xda, - 0xd2, 0x86, 0xfe, 0x4b, 0xc9, 0xda, 0x0f, 0x8a, 0xa5, 0x13, 0x39, 0xa2, 0x4c, 0x86, 0x03, 0x22, - 0xd3, 0xfa, 0xeb, 0xa5, 0xdb, 0x9f, 0xe5, 0xd9, 0x53, 0x32, 0x18, 0x85, 0x8c, 0xc6, 0x73, 0x2f, - 0x1a, 0x0f, 0x95, 0x43, 0x78, 0x53, 0x2a, 0xc9, 0x4d, 0x28, 0xef, 0x36, 0x54, 0x9c, 0x30, 0x19, - 0x4e, 0xe9, 0x06, 0xe0, 0xe1, 0x7f, 0x01, 0xc4, 0x60, 0x44, 0xa7, 0x64, 0x1d, 0xb7, 0xf3, 0x87, - 0x0d, 0x5a, 0x47, 0x99, 0x22, 0x98, 0xfe, 0x94, 0x50, 0x21, 0xa1, 0x0f, 0xca, 0x49, 0x18, 0x38, - 0x56, 0xd7, 0xda, 0xb5, 0xfd, 0x4f, 0x2f, 0x17, 0x9d, 0xd2, 0x72, 0xd1, 0x29, 0xf7, 0x4e, 0x8f, - 0xaf, 0x17, 0x9d, 0x8f, 0x6e, 0x2b, 0x24, 0xe7, 0x11, 0x15, 0xa8, 0x77, 0x7a, 0x8c, 0x15, 0x18, - 0x3e, 0x03, 0x95, 0x71, 0xc8, 0x02, 0xe7, 0x4e, 0xd7, 0xda, 0x6d, 0x1c, 0x3c, 0x44, 0x79, 0x07, - 0x56, 0x30, 0x14, 0x8d, 0x87, 0xca, 0x21, 0x90, 0x92, 0x01, 0xcd, 0xf6, 0xd1, 0x57, 0x31, 0x4f, - 0xa2, 0x6f, 0x69, 0xac, 0x16, 0xf3, 0x4d, 0xc8, 0x02, 0x7f, 0xdb, 0x14, 0xaf, 0x28, 0x0b, 0x6b, - 0x46, 0x38, 0x02, 0xf5, 0x98, 0x0a, 0x9e, 0xc4, 0x03, 0xea, 0x94, 0x35, 0xfb, 0xa3, 0x37, 0x67, - 0xc7, 0x86, 0xc1, 0x6f, 0x99, 0x0a, 0xf5, 0xcc, 0x83, 0x57, 0xec, 0xf0, 0x73, 0xd0, 0x10, 0x49, - 0x3f, 0x0b, 0x38, 0x15, 0xad, 0xc7, 0x7d, 0x03, 0x68, 0x3c, 0xc9, 0x43, 0xb8, 0x98, 0x07, 0xbb, - 0xa0, 0xc2, 0xc8, 0x94, 0x3a, 0x5b, 0x3a, 0x7f, 0xb5, 0x85, 0xc7, 0x64, 0x4a, 0xb1, 0x8e, 0x40, - 0x0f, 0xd8, 0xea, 0x2b, 0x22, 0x32, 0xa0, 0x4e, 0x55, 0xa7, 0xdd, 0x33, 0x69, 0xf6, 0xe3, 0x2c, - 0x80, 0xf3, 0x1c, 0xf8, 0x05, 0xb0, 0x79, 0xa4, 0x1a, 0x17, 0x72, 0xe6, 0xd4, 0x34, 0xc0, 0xcd, - 0x00, 0x67, 0x59, 0xe0, 0xba, 0x68, 0xe0, 0x1c, 0x00, 0x9f, 0x82, 0x7a, 0x22, 0x68, 0x7c, 0xca, - 0x9e, 0x73, 0xa7, 0xae, 0x15, 0xfb, 0x18, 0x15, 0x4f, 0xc4, 0x2b, 0x43, 0xac, 0x94, 0xea, 0x99, - 0xec, 0x5c, 0x9d, 0xcc, 0x83, 0x57, 0x4c, 0xb0, 0x07, 0xaa, 0xbc, 0xff, 0x23, 0x1d, 0x48, 0xc7, - 0xd6, 0x9c, 0x7b, 0xb7, 0x76, 0xc1, 0xcc, 0x20, 0xc2, 0xe4, 0xe2, 0xe4, 0x67, 0x49, 0x99, 0x6a, - 0x80, 0x7f, 0xd7, 0x50, 0x57, 0xcf, 0x34, 0x09, 0x36, 0x64, 0xf0, 0x07, 0x60, 0xf3, 0x49, 0x90, - 0x3a, 0x1d, 0xf0, 0x36, 0xcc, 0x2b, 0x29, 0xcf, 0x32, 0x1e, 0x9c, 0x53, 0xc2, 0x1d, 0x50, 0x0d, - 0xe2, 0x39, 0x4e, 0x98, 0xd3, 0xe8, 0x5a, 0xbb, 0x75, 0x1f, 0xa8, 0x35, 0x1c, 0x6b, 0x0f, 0x36, - 0x11, 0xf8, 0x0c, 0xd4, 0x78, 0xa4, 0xc4, 0x10, 0xce, 0xf6, 0xdb, 0xac, 0xa0, 0x69, 0x56, 0x50, - 0x3b, 0x4b, 0x59, 0x70, 0x46, 0x07, 0x43, 0xd0, 0x88, 0xd3, 0x53, 0xa6, 0x26, 0xda, 0x79, 0xef, - 0x9d, 0x4e, 0x47, 0x53, 0x8d, 0x21, 0xce, 0xe9, 0x70, 0x91, 0x1b, 0xce, 0x41, 0xd3, 0x98, 0xab, - 0x09, 0xbe, 0xfb, 0xce, 0xc7, 0xe5, 0xfe, 0x72, 0xd1, 0x69, 0xe2, 0x57, 0x69, 0xf1, 0x7a, 0x1d, - 0xf8, 0x35, 0x80, 0xc6, 0x55, 0x38, 0x24, 0x4e, 0x53, 0xcf, 0x6d, 0xdb, 0x68, 0x03, 0xf1, 0x46, - 0x06, 0xbe, 0x01, 0xb5, 0xf3, 0x6b, 0x05, 0xdc, 0x2b, 0xdc, 0x50, 0x22, 0xe2, 0x4c, 0xd0, 0xff, - 0xe5, 0x8a, 0xfa, 0x04, 0xd4, 0xc8, 0x64, 0xc2, 0x2f, 0x68, 0x7a, 0x4b, 0xd5, 0xf3, 0xb6, 0x1d, - 0xa5, 0x6e, 0x9c, 0xc5, 0xe1, 0x39, 0xa8, 0x0a, 0x49, 0x64, 0x22, 0xcc, 0x8d, 0xf3, 0xe0, 0xf5, - 0x24, 0x7c, 0xa2, 0x31, 0xe9, 0x88, 0x61, 0x2a, 0x92, 0x89, 0xc4, 0x86, 0x07, 0x76, 0xc0, 0x56, - 0x44, 0xe4, 0x60, 0xa4, 0x6f, 0x95, 0x6d, 0xdf, 0x5e, 0x2e, 0x3a, 0x5b, 0xe7, 0xca, 0x81, 0x53, - 0x3f, 0x3c, 0x04, 0xb6, 0xfe, 0x79, 0x3a, 0x8f, 0xb2, 0xab, 0xa4, 0xad, 0x86, 0xfa, 0x3c, 0x73, - 0x5e, 0x17, 0x0d, 0x9c, 0x27, 0xc3, 0xdf, 0x2c, 0xd0, 0x22, 0x49, 0x10, 0xca, 0x23, 0xc6, 0xb8, - 0x24, 0xe9, 0x1c, 0x57, 0xbb, 0xe5, 0xdd, 0xc6, 0xc1, 0x09, 0xfa, 0xb7, 0x97, 0x10, 0x6d, 0xe8, - 0x8c, 0x8e, 0xd6, 0x78, 0x4e, 0x98, 0x8c, 0xe7, 0xbe, 0x63, 0x84, 0x6a, 0xad, 0x87, 0xf1, 0x46, - 0xe1, 0xf6, 0x97, 0xe0, 0x83, 0x1b, 0x49, 0x60, 0x0b, 0x94, 0xc7, 0x74, 0x9e, 0xb6, 0x10, 0xab, - 0x5f, 0xf8, 0x3e, 0xd8, 0x9a, 0x91, 0x49, 0x42, 0x75, 0x3b, 0x6c, 0x9c, 0x1a, 0x8f, 0xee, 0x1c, - 0x5a, 0x3b, 0x7f, 0x5a, 0xa0, 0x59, 0x58, 0xdc, 0x2c, 0xa4, 0x17, 0xb0, 0x07, 0x6a, 0x66, 0x5c, - 0x34, 0x47, 0xe3, 0x00, 0xbd, 0xf6, 0xe6, 0x34, 0xca, 0x6f, 0xa8, 0x56, 0x67, 0xb3, 0x9c, 0x71, - 0xc1, 0xef, 0xf4, 0xf3, 0xa2, 0x77, 0x6f, 0x1e, 0x2f, 0xef, 0x0d, 0x45, 0xf3, 0xb7, 0xcd, 0x7b, - 0xa2, 0x2d, 0xbc, 0xa2, 0xf3, 0xf7, 0x2e, 0xaf, 0xdc, 0xd2, 0x8b, 0x2b, 0xb7, 0xf4, 0xf2, 0xca, - 0x2d, 0xfd, 0xb2, 0x74, 0xad, 0xcb, 0xa5, 0x6b, 0xbd, 0x58, 0xba, 0xd6, 0xcb, 0xa5, 0x6b, 0xfd, - 0xb5, 0x74, 0xad, 0xdf, 0xff, 0x76, 0x4b, 0xdf, 0xd7, 0x0c, 0xf1, 0x3f, 0x01, 0x00, 0x00, 0xff, - 0xff, 0xda, 0xe1, 0x0b, 0x41, 0xfd, 0x08, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/admission/v1beta1/generated.proto b/vendor/k8s.io/api/admission/v1beta1/generated.proto index 59f92a70d2b49..6999b80c27a05 100644 --- a/vendor/k8s.io/api/admission/v1beta1/generated.proto +++ b/vendor/k8s.io/api/admission/v1beta1/generated.proto @@ -80,7 +80,7 @@ message AdmissionRequest { optional string requestSubResource = 15; // Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and - // rely on the server to generate the name. If that is the case, this method will return the empty string. + // rely on the server to generate the name. If that is the case, this field will contain an empty string. // +optional optional string name = 5; @@ -95,11 +95,11 @@ message AdmissionRequest { // UserInfo is information about the requesting user optional k8s.io.api.authentication.v1.UserInfo userInfo = 8; - // Object is the object from the incoming request prior to default values being applied + // Object is the object from the incoming request. // +optional optional k8s.io.apimachinery.pkg.runtime.RawExtension object = 9; - // OldObject is the existing object. Only populated for UPDATE requests. + // OldObject is the existing object. Only populated for DELETE and UPDATE requests. // +optional optional k8s.io.apimachinery.pkg.runtime.RawExtension oldObject = 10; diff --git a/vendor/k8s.io/api/admission/v1beta1/types.go b/vendor/k8s.io/api/admission/v1beta1/types.go index e968720e71ada..2cb9ea55a3892 100644 --- a/vendor/k8s.io/api/admission/v1beta1/types.go +++ b/vendor/k8s.io/api/admission/v1beta1/types.go @@ -82,7 +82,7 @@ type AdmissionRequest struct { RequestSubResource string `json:"requestSubResource,omitempty" protobuf:"bytes,15,opt,name=requestSubResource"` // Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and - // rely on the server to generate the name. If that is the case, this method will return the empty string. + // rely on the server to generate the name. If that is the case, this field will contain an empty string. // +optional Name string `json:"name,omitempty" protobuf:"bytes,5,opt,name=name"` // Namespace is the namespace associated with the request (if any). @@ -93,10 +93,10 @@ type AdmissionRequest struct { Operation Operation `json:"operation" protobuf:"bytes,7,opt,name=operation"` // UserInfo is information about the requesting user UserInfo authenticationv1.UserInfo `json:"userInfo" protobuf:"bytes,8,opt,name=userInfo"` - // Object is the object from the incoming request prior to default values being applied + // Object is the object from the incoming request. // +optional Object runtime.RawExtension `json:"object,omitempty" protobuf:"bytes,9,opt,name=object"` - // OldObject is the existing object. Only populated for UPDATE requests. + // OldObject is the existing object. Only populated for DELETE and UPDATE requests. // +optional OldObject runtime.RawExtension `json:"oldObject,omitempty" protobuf:"bytes,10,opt,name=oldObject"` // DryRun indicates that modifications will definitely not be persisted for this request. diff --git a/vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go index 727080d0d4311..2ef98db8729f3 100644 --- a/vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go @@ -36,12 +36,12 @@ var map_AdmissionRequest = map[string]string{ "requestKind": "RequestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale). If this is specified and differs from the value in \"kind\", an equivalent match and conversion was performed.\n\nFor example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]` and `matchPolicy: Equivalent`, an API request to apps/v1beta1 deployments would be converted and sent to the webhook with `kind: {group:\"apps\", version:\"v1\", kind:\"Deployment\"}` (matching the rule the webhook registered for), and `requestKind: {group:\"apps\", version:\"v1beta1\", kind:\"Deployment\"}` (indicating the kind of the original API request).\n\nSee documentation for the \"matchPolicy\" field in the webhook configuration type for more details.", "requestResource": "RequestResource is the fully-qualified resource of the original API request (for example, v1.pods). If this is specified and differs from the value in \"resource\", an equivalent match and conversion was performed.\n\nFor example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]` and `matchPolicy: Equivalent`, an API request to apps/v1beta1 deployments would be converted and sent to the webhook with `resource: {group:\"apps\", version:\"v1\", resource:\"deployments\"}` (matching the resource the webhook registered for), and `requestResource: {group:\"apps\", version:\"v1beta1\", resource:\"deployments\"}` (indicating the resource of the original API request).\n\nSee documentation for the \"matchPolicy\" field in the webhook configuration type.", "requestSubResource": "RequestSubResource is the name of the subresource of the original API request, if any (for example, \"status\" or \"scale\") If this is specified and differs from the value in \"subResource\", an equivalent match and conversion was performed. See documentation for the \"matchPolicy\" field in the webhook configuration type.", - "name": "Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and rely on the server to generate the name. If that is the case, this method will return the empty string.", + "name": "Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and rely on the server to generate the name. If that is the case, this field will contain an empty string.", "namespace": "Namespace is the namespace associated with the request (if any).", "operation": "Operation is the operation being performed. This may be different than the operation requested. e.g. a patch can result in either a CREATE or UPDATE Operation.", "userInfo": "UserInfo is information about the requesting user", - "object": "Object is the object from the incoming request prior to default values being applied", - "oldObject": "OldObject is the existing object. Only populated for UPDATE requests.", + "object": "Object is the object from the incoming request.", + "oldObject": "OldObject is the existing object. Only populated for DELETE and UPDATE requests.", "dryRun": "DryRun indicates that modifications will definitely not be persisted for this request. Defaults to false.", "options": "Options is the operation option structure of the operation being performed. e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be different than the options the caller provided. e.g. for a patch request the performed Operation might be a CREATE, in which case the Options will a `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`.", } diff --git a/vendor/k8s.io/api/admissionregistration/v1/BUILD.bazel b/vendor/k8s.io/api/admissionregistration/v1/BUILD.bazel new file mode 100644 index 0000000000000..490c21c8e1188 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1/BUILD.bazel @@ -0,0 +1,22 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "generated.pb.go", + "register.go", + "types.go", + "types_swagger_doc_generated.go", + "zz_generated.deepcopy.go", + ], + importmap = "k8s.io/kops/vendor/k8s.io/api/admissionregistration/v1", + importpath = "k8s.io/api/admissionregistration/v1", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/gogo/protobuf/proto:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + ], +) diff --git a/vendor/k8s.io/api/admissionregistration/v1/doc.go b/vendor/k8s.io/api/admissionregistration/v1/doc.go new file mode 100644 index 0000000000000..c3940f090c8cd --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1/doc.go @@ -0,0 +1,26 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true +// +groupName=admissionregistration.k8s.io + +// Package v1 is the v1 version of the API. +// AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration +// MutatingWebhookConfiguration and ValidatingWebhookConfiguration are for the +// new dynamic admission controller configuration. +package v1 // import "k8s.io/api/admissionregistration/v1" diff --git a/vendor/k8s.io/api/admissionregistration/v1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1/generated.pb.go new file mode 100644 index 0000000000000..1acb6345a62ae --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1/generated.pb.go @@ -0,0 +1,3469 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *MutatingWebhook) Reset() { *m = MutatingWebhook{} } +func (*MutatingWebhook) ProtoMessage() {} +func (*MutatingWebhook) Descriptor() ([]byte, []int) { + return fileDescriptor_aaac5994f79683e8, []int{0} +} +func (m *MutatingWebhook) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MutatingWebhook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MutatingWebhook) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutatingWebhook.Merge(m, src) +} +func (m *MutatingWebhook) XXX_Size() int { + return m.Size() +} +func (m *MutatingWebhook) XXX_DiscardUnknown() { + xxx_messageInfo_MutatingWebhook.DiscardUnknown(m) +} + +var xxx_messageInfo_MutatingWebhook proto.InternalMessageInfo + +func (m *MutatingWebhookConfiguration) Reset() { *m = MutatingWebhookConfiguration{} } +func (*MutatingWebhookConfiguration) ProtoMessage() {} +func (*MutatingWebhookConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_aaac5994f79683e8, []int{1} +} +func (m *MutatingWebhookConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MutatingWebhookConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MutatingWebhookConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutatingWebhookConfiguration.Merge(m, src) +} +func (m *MutatingWebhookConfiguration) XXX_Size() int { + return m.Size() +} +func (m *MutatingWebhookConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_MutatingWebhookConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_MutatingWebhookConfiguration proto.InternalMessageInfo + +func (m *MutatingWebhookConfigurationList) Reset() { *m = MutatingWebhookConfigurationList{} } +func (*MutatingWebhookConfigurationList) ProtoMessage() {} +func (*MutatingWebhookConfigurationList) Descriptor() ([]byte, []int) { + return fileDescriptor_aaac5994f79683e8, []int{2} +} +func (m *MutatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MutatingWebhookConfigurationList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MutatingWebhookConfigurationList) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutatingWebhookConfigurationList.Merge(m, src) +} +func (m *MutatingWebhookConfigurationList) XXX_Size() int { + return m.Size() +} +func (m *MutatingWebhookConfigurationList) XXX_DiscardUnknown() { + xxx_messageInfo_MutatingWebhookConfigurationList.DiscardUnknown(m) +} + +var xxx_messageInfo_MutatingWebhookConfigurationList proto.InternalMessageInfo + +func (m *Rule) Reset() { *m = Rule{} } +func (*Rule) ProtoMessage() {} +func (*Rule) Descriptor() ([]byte, []int) { + return fileDescriptor_aaac5994f79683e8, []int{3} +} +func (m *Rule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Rule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Rule) XXX_Merge(src proto.Message) { + xxx_messageInfo_Rule.Merge(m, src) +} +func (m *Rule) XXX_Size() int { + return m.Size() +} +func (m *Rule) XXX_DiscardUnknown() { + xxx_messageInfo_Rule.DiscardUnknown(m) +} + +var xxx_messageInfo_Rule proto.InternalMessageInfo + +func (m *RuleWithOperations) Reset() { *m = RuleWithOperations{} } +func (*RuleWithOperations) ProtoMessage() {} +func (*RuleWithOperations) Descriptor() ([]byte, []int) { + return fileDescriptor_aaac5994f79683e8, []int{4} +} +func (m *RuleWithOperations) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuleWithOperations) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RuleWithOperations) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuleWithOperations.Merge(m, src) +} +func (m *RuleWithOperations) XXX_Size() int { + return m.Size() +} +func (m *RuleWithOperations) XXX_DiscardUnknown() { + xxx_messageInfo_RuleWithOperations.DiscardUnknown(m) +} + +var xxx_messageInfo_RuleWithOperations proto.InternalMessageInfo + +func (m *ServiceReference) Reset() { *m = ServiceReference{} } +func (*ServiceReference) ProtoMessage() {} +func (*ServiceReference) Descriptor() ([]byte, []int) { + return fileDescriptor_aaac5994f79683e8, []int{5} +} +func (m *ServiceReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceReference.Merge(m, src) +} +func (m *ServiceReference) XXX_Size() int { + return m.Size() +} +func (m *ServiceReference) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceReference.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceReference proto.InternalMessageInfo + +func (m *ValidatingWebhook) Reset() { *m = ValidatingWebhook{} } +func (*ValidatingWebhook) ProtoMessage() {} +func (*ValidatingWebhook) Descriptor() ([]byte, []int) { + return fileDescriptor_aaac5994f79683e8, []int{6} +} +func (m *ValidatingWebhook) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingWebhook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingWebhook) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingWebhook.Merge(m, src) +} +func (m *ValidatingWebhook) XXX_Size() int { + return m.Size() +} +func (m *ValidatingWebhook) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingWebhook.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingWebhook proto.InternalMessageInfo + +func (m *ValidatingWebhookConfiguration) Reset() { *m = ValidatingWebhookConfiguration{} } +func (*ValidatingWebhookConfiguration) ProtoMessage() {} +func (*ValidatingWebhookConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_aaac5994f79683e8, []int{7} +} +func (m *ValidatingWebhookConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingWebhookConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingWebhookConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingWebhookConfiguration.Merge(m, src) +} +func (m *ValidatingWebhookConfiguration) XXX_Size() int { + return m.Size() +} +func (m *ValidatingWebhookConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingWebhookConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingWebhookConfiguration proto.InternalMessageInfo + +func (m *ValidatingWebhookConfigurationList) Reset() { *m = ValidatingWebhookConfigurationList{} } +func (*ValidatingWebhookConfigurationList) ProtoMessage() {} +func (*ValidatingWebhookConfigurationList) Descriptor() ([]byte, []int) { + return fileDescriptor_aaac5994f79683e8, []int{8} +} +func (m *ValidatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingWebhookConfigurationList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingWebhookConfigurationList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingWebhookConfigurationList.Merge(m, src) +} +func (m *ValidatingWebhookConfigurationList) XXX_Size() int { + return m.Size() +} +func (m *ValidatingWebhookConfigurationList) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingWebhookConfigurationList.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingWebhookConfigurationList proto.InternalMessageInfo + +func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } +func (*WebhookClientConfig) ProtoMessage() {} +func (*WebhookClientConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_aaac5994f79683e8, []int{9} +} +func (m *WebhookClientConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WebhookClientConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WebhookClientConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_WebhookClientConfig.Merge(m, src) +} +func (m *WebhookClientConfig) XXX_Size() int { + return m.Size() +} +func (m *WebhookClientConfig) XXX_DiscardUnknown() { + xxx_messageInfo_WebhookClientConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_WebhookClientConfig proto.InternalMessageInfo + +func init() { + proto.RegisterType((*MutatingWebhook)(nil), "k8s.io.api.admissionregistration.v1.MutatingWebhook") + proto.RegisterType((*MutatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1.MutatingWebhookConfiguration") + proto.RegisterType((*MutatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1.MutatingWebhookConfigurationList") + proto.RegisterType((*Rule)(nil), "k8s.io.api.admissionregistration.v1.Rule") + proto.RegisterType((*RuleWithOperations)(nil), "k8s.io.api.admissionregistration.v1.RuleWithOperations") + proto.RegisterType((*ServiceReference)(nil), "k8s.io.api.admissionregistration.v1.ServiceReference") + proto.RegisterType((*ValidatingWebhook)(nil), "k8s.io.api.admissionregistration.v1.ValidatingWebhook") + proto.RegisterType((*ValidatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1.ValidatingWebhookConfiguration") + proto.RegisterType((*ValidatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1.ValidatingWebhookConfigurationList") + proto.RegisterType((*WebhookClientConfig)(nil), "k8s.io.api.admissionregistration.v1.WebhookClientConfig") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1/generated.proto", fileDescriptor_aaac5994f79683e8) +} + +var fileDescriptor_aaac5994f79683e8 = []byte{ + // 1104 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0x4f, 0x6f, 0x1b, 0x45, + 0x14, 0xcf, 0xc6, 0x76, 0x63, 0x8f, 0xf3, 0xa7, 0x19, 0xa0, 0x35, 0xa1, 0xf2, 0x5a, 0x46, 0x42, + 0x46, 0xc0, 0x6e, 0x13, 0x4a, 0xa9, 0xb8, 0xa0, 0x6c, 0xf8, 0xa3, 0x88, 0xa4, 0x8d, 0x26, 0x6d, + 0x8a, 0x50, 0x0e, 0x1d, 0xaf, 0xc7, 0xf6, 0x10, 0x7b, 0x67, 0x35, 0x33, 0xeb, 0x92, 0x1b, 0x1f, + 0x81, 0xaf, 0x00, 0x9f, 0x82, 0x1b, 0xe2, 0x96, 0x63, 0x8f, 0x39, 0xa0, 0x85, 0x2c, 0x17, 0x0e, + 0x7c, 0x82, 0x9c, 0xd0, 0xcc, 0xae, 0x77, 0xfd, 0x27, 0x09, 0x56, 0x0e, 0x3d, 0xe5, 0xb6, 0xf3, + 0x7b, 0xf3, 0x7e, 0x6f, 0xde, 0xdb, 0xf7, 0xde, 0x0f, 0xec, 0x1c, 0x3d, 0x12, 0x16, 0x65, 0xf6, + 0x51, 0xd0, 0x24, 0xdc, 0x23, 0x92, 0x08, 0x7b, 0x40, 0xbc, 0x16, 0xe3, 0x76, 0x62, 0xc0, 0x3e, + 0xb5, 0x71, 0xab, 0x4f, 0x85, 0xa0, 0xcc, 0xe3, 0xa4, 0x43, 0x85, 0xe4, 0x58, 0x52, 0xe6, 0xd9, + 0x83, 0x75, 0xbb, 0x43, 0x3c, 0xc2, 0xb1, 0x24, 0x2d, 0xcb, 0xe7, 0x4c, 0x32, 0xf8, 0x6e, 0xec, + 0x64, 0x61, 0x9f, 0x5a, 0x17, 0x3a, 0x59, 0x83, 0xf5, 0xb5, 0x8f, 0x3a, 0x54, 0x76, 0x83, 0xa6, + 0xe5, 0xb2, 0xbe, 0xdd, 0x61, 0x1d, 0x66, 0x6b, 0xdf, 0x66, 0xd0, 0xd6, 0x27, 0x7d, 0xd0, 0x5f, + 0x31, 0xe7, 0xda, 0x83, 0xec, 0x21, 0x7d, 0xec, 0x76, 0xa9, 0x47, 0xf8, 0xb1, 0xed, 0x1f, 0x75, + 0x14, 0x20, 0xec, 0x3e, 0x91, 0xf8, 0x82, 0x97, 0xac, 0xd9, 0x97, 0x79, 0xf1, 0xc0, 0x93, 0xb4, + 0x4f, 0xa6, 0x1c, 0x1e, 0xfe, 0x9f, 0x83, 0x70, 0xbb, 0xa4, 0x8f, 0x27, 0xfd, 0xea, 0xbf, 0x2d, + 0x80, 0x95, 0xdd, 0x40, 0x62, 0x49, 0xbd, 0xce, 0x73, 0xd2, 0xec, 0x32, 0x76, 0x04, 0x6b, 0x20, + 0xef, 0xe1, 0x3e, 0xa9, 0x18, 0x35, 0xa3, 0x51, 0x72, 0x16, 0x4f, 0x42, 0x73, 0x2e, 0x0a, 0xcd, + 0xfc, 0x63, 0xdc, 0x27, 0x48, 0x5b, 0x20, 0x07, 0x8b, 0x6e, 0x8f, 0x12, 0x4f, 0x6e, 0x31, 0xaf, + 0x4d, 0x3b, 0x95, 0xf9, 0x9a, 0xd1, 0x28, 0x6f, 0x3c, 0xb2, 0x66, 0xa8, 0x9f, 0x95, 0x44, 0xd9, + 0x1a, 0xf1, 0x77, 0xde, 0x4c, 0x62, 0x2c, 0x8e, 0xa2, 0x68, 0x2c, 0x06, 0x3c, 0x04, 0x05, 0x1e, + 0xf4, 0x88, 0xa8, 0xe4, 0x6a, 0xb9, 0x46, 0x79, 0xe3, 0xd3, 0x99, 0x82, 0xa1, 0xa0, 0x47, 0x9e, + 0x53, 0xd9, 0x7d, 0xe2, 0x93, 0x18, 0x14, 0xce, 0x52, 0x12, 0xab, 0xa0, 0x6c, 0x02, 0xc5, 0xa4, + 0x70, 0x07, 0x2c, 0xb5, 0x31, 0xed, 0x05, 0x9c, 0xec, 0xb1, 0x1e, 0x75, 0x8f, 0x2b, 0x79, 0x9d, + 0xfc, 0x7b, 0x51, 0x68, 0x2e, 0x7d, 0x35, 0x6a, 0x38, 0x0f, 0xcd, 0xd5, 0x31, 0xe0, 0xe9, 0xb1, + 0x4f, 0xd0, 0xb8, 0x33, 0xfc, 0x02, 0x94, 0xfb, 0x58, 0xba, 0xdd, 0x84, 0xab, 0xa4, 0xb9, 0xea, + 0x51, 0x68, 0x96, 0x77, 0x33, 0xf8, 0x3c, 0x34, 0x57, 0x46, 0x8e, 0x9a, 0x67, 0xd4, 0x0d, 0xfe, + 0x00, 0x56, 0x55, 0xb5, 0x85, 0x8f, 0x5d, 0xb2, 0x4f, 0x7a, 0xc4, 0x95, 0x8c, 0x57, 0x0a, 0xba, + 0xd4, 0x1f, 0x8f, 0x64, 0x9f, 0xfe, 0x6f, 0xcb, 0x3f, 0xea, 0x28, 0x40, 0x58, 0xaa, 0xad, 0x54, + 0xfa, 0x3b, 0xb8, 0x49, 0x7a, 0x43, 0x57, 0xe7, 0xad, 0x28, 0x34, 0x57, 0x1f, 0x4f, 0x32, 0xa2, + 0xe9, 0x20, 0x90, 0x81, 0x65, 0xd6, 0xfc, 0x9e, 0xb8, 0x32, 0x0d, 0x5b, 0xbe, 0x7e, 0x58, 0x18, + 0x85, 0xe6, 0xf2, 0x93, 0x31, 0x3a, 0x34, 0x41, 0xaf, 0x0a, 0x26, 0x68, 0x8b, 0x7c, 0xd9, 0x6e, + 0x13, 0x57, 0x8a, 0xca, 0xad, 0xac, 0x60, 0xfb, 0x19, 0xac, 0x0a, 0x96, 0x1d, 0xb7, 0x7a, 0x58, + 0x08, 0x34, 0xea, 0x06, 0x3f, 0x03, 0xcb, 0xaa, 0xd7, 0x59, 0x20, 0xf7, 0x89, 0xcb, 0xbc, 0x96, + 0xa8, 0x2c, 0xd4, 0x8c, 0x46, 0x21, 0x7e, 0xc1, 0xd3, 0x31, 0x0b, 0x9a, 0xb8, 0x09, 0x9f, 0x81, + 0xbb, 0x69, 0x17, 0x21, 0x32, 0xa0, 0xe4, 0xe5, 0x01, 0xe1, 0xea, 0x20, 0x2a, 0xc5, 0x5a, 0xae, + 0x51, 0x72, 0xde, 0x89, 0x42, 0xf3, 0xee, 0xe6, 0xc5, 0x57, 0xd0, 0x65, 0xbe, 0xf0, 0x05, 0x80, + 0x9c, 0x50, 0x6f, 0xc0, 0x5c, 0xdd, 0x7e, 0x49, 0x43, 0x00, 0x9d, 0xdf, 0xfd, 0x28, 0x34, 0x21, + 0x9a, 0xb2, 0x9e, 0x87, 0xe6, 0x9d, 0x69, 0x54, 0xb7, 0xc7, 0x05, 0x5c, 0xf5, 0x53, 0x03, 0xdc, + 0x9b, 0x98, 0xe0, 0x78, 0x62, 0x82, 0xb8, 0xe3, 0xe1, 0x0b, 0x50, 0x54, 0x3f, 0xa6, 0x85, 0x25, + 0xd6, 0x23, 0x5d, 0xde, 0xb8, 0x3f, 0xdb, 0x6f, 0x8c, 0xff, 0xd9, 0x2e, 0x91, 0xd8, 0x81, 0xc9, + 0xd0, 0x80, 0x0c, 0x43, 0x29, 0x2b, 0x3c, 0x00, 0xc5, 0x24, 0xb2, 0xa8, 0xcc, 0xeb, 0xe9, 0x7c, + 0x30, 0xd3, 0x74, 0x4e, 0x3c, 0xdb, 0xc9, 0xab, 0x28, 0xa8, 0xf8, 0x32, 0xe1, 0xaa, 0xff, 0x63, + 0x80, 0xda, 0x55, 0xa9, 0xed, 0x50, 0x21, 0xe1, 0xe1, 0x54, 0x7a, 0xd6, 0x8c, 0x5d, 0x4a, 0x45, + 0x9c, 0xdc, 0xed, 0x24, 0xb9, 0xe2, 0x10, 0x19, 0x49, 0xad, 0x0d, 0x0a, 0x54, 0x92, 0xfe, 0x30, + 0xaf, 0xcd, 0xeb, 0xe4, 0x35, 0xf6, 0xe6, 0x6c, 0xff, 0x6c, 0x2b, 0x5e, 0x14, 0xd3, 0xd7, 0x7f, + 0x37, 0x40, 0x5e, 0x2d, 0x24, 0xf8, 0x01, 0x28, 0x61, 0x9f, 0x7e, 0xcd, 0x59, 0xe0, 0x8b, 0x8a, + 0xa1, 0x3b, 0x6f, 0x29, 0x0a, 0xcd, 0xd2, 0xe6, 0xde, 0x76, 0x0c, 0xa2, 0xcc, 0x0e, 0xd7, 0x41, + 0x19, 0xfb, 0x34, 0x6d, 0xd4, 0x79, 0x7d, 0x7d, 0x45, 0x8d, 0xcd, 0xe6, 0xde, 0x76, 0xda, 0x9c, + 0xa3, 0x77, 0x14, 0x3f, 0x27, 0x82, 0x05, 0xdc, 0x4d, 0x56, 0x69, 0xc2, 0x8f, 0x86, 0x20, 0xca, + 0xec, 0xf0, 0x43, 0x50, 0x10, 0x2e, 0xf3, 0x49, 0xb2, 0x0d, 0xef, 0xa8, 0x67, 0xef, 0x2b, 0xe0, + 0x3c, 0x34, 0x4b, 0xfa, 0x43, 0xb7, 0x65, 0x7c, 0xa9, 0xfe, 0x8b, 0x01, 0xe0, 0xf4, 0xc2, 0x85, + 0x9f, 0x03, 0xc0, 0xd2, 0x53, 0x92, 0x92, 0xa9, 0x7b, 0x29, 0x45, 0xcf, 0x43, 0x73, 0x29, 0x3d, + 0x69, 0xca, 0x11, 0x17, 0xf8, 0x0d, 0xc8, 0xab, 0x25, 0x9d, 0xa8, 0xcc, 0xfb, 0x33, 0x2f, 0xfe, + 0x4c, 0xba, 0xd4, 0x09, 0x69, 0x92, 0xfa, 0xcf, 0x06, 0xb8, 0xbd, 0x4f, 0xf8, 0x80, 0xba, 0x04, + 0x91, 0x36, 0xe1, 0xc4, 0x73, 0x09, 0xb4, 0x41, 0x29, 0x5d, 0x82, 0x89, 0xec, 0xad, 0x26, 0xbe, + 0xa5, 0x74, 0x61, 0xa2, 0xec, 0x4e, 0x2a, 0x91, 0xf3, 0x97, 0x4a, 0xe4, 0x3d, 0x90, 0xf7, 0xb1, + 0xec, 0x56, 0x72, 0xfa, 0x46, 0x51, 0x59, 0xf7, 0xb0, 0xec, 0x22, 0x8d, 0x6a, 0x2b, 0xe3, 0x52, + 0xd7, 0xb5, 0x90, 0x58, 0x19, 0x97, 0x48, 0xa3, 0xf5, 0x3f, 0x6f, 0x81, 0xd5, 0x03, 0xdc, 0xa3, + 0xad, 0x1b, 0x59, 0xbe, 0x91, 0xe5, 0x2b, 0x65, 0x19, 0xdc, 0xc8, 0xf2, 0x75, 0x64, 0xb9, 0xfe, + 0x87, 0x01, 0xaa, 0x53, 0x13, 0xf6, 0xba, 0x65, 0xf3, 0xdb, 0x29, 0xd9, 0x7c, 0x38, 0xd3, 0xf4, + 0x4c, 0x3d, 0x7c, 0x4a, 0x38, 0xff, 0x35, 0x40, 0xfd, 0xea, 0xf4, 0x5e, 0x83, 0x74, 0x76, 0xc7, + 0xa5, 0x73, 0xeb, 0x7a, 0xb9, 0xcd, 0x22, 0x9e, 0xbf, 0x1a, 0xe0, 0x8d, 0x0b, 0xf6, 0x17, 0x7c, + 0x1b, 0xe4, 0x02, 0xde, 0x4b, 0x56, 0xf0, 0x42, 0x14, 0x9a, 0xb9, 0x67, 0x68, 0x07, 0x29, 0x0c, + 0x1e, 0x82, 0x05, 0x11, 0xab, 0x40, 0x92, 0xf9, 0x27, 0x33, 0x3d, 0x6f, 0x52, 0x39, 0x9c, 0x72, + 0x14, 0x9a, 0x0b, 0x43, 0x74, 0x48, 0x09, 0x1b, 0xa0, 0xe8, 0x62, 0x27, 0xf0, 0x5a, 0x89, 0x6a, + 0x2d, 0x3a, 0x8b, 0xaa, 0x48, 0x5b, 0x9b, 0x31, 0x86, 0x52, 0xab, 0xd3, 0x38, 0x39, 0xab, 0xce, + 0xbd, 0x3a, 0xab, 0xce, 0x9d, 0x9e, 0x55, 0xe7, 0x7e, 0x8c, 0xaa, 0xc6, 0x49, 0x54, 0x35, 0x5e, + 0x45, 0x55, 0xe3, 0x34, 0xaa, 0x1a, 0x7f, 0x45, 0x55, 0xe3, 0xa7, 0xbf, 0xab, 0x73, 0xdf, 0xcd, + 0x0f, 0xd6, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x57, 0x91, 0x2c, 0x7b, 0xeb, 0x0e, 0x00, 0x00, +} + +func (m *MutatingWebhook) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MutatingWebhook) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MutatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ObjectSelector != nil { + { + size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + if m.ReinvocationPolicy != nil { + i -= len(*m.ReinvocationPolicy) + copy(dAtA[i:], *m.ReinvocationPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ReinvocationPolicy))) + i-- + dAtA[i] = 0x52 + } + if m.MatchPolicy != nil { + i -= len(*m.MatchPolicy) + copy(dAtA[i:], *m.MatchPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy))) + i-- + dAtA[i] = 0x4a + } + if len(m.AdmissionReviewVersions) > 0 { + for iNdEx := len(m.AdmissionReviewVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AdmissionReviewVersions[iNdEx]) + copy(dAtA[i:], m.AdmissionReviewVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdmissionReviewVersions[iNdEx]))) + i-- + dAtA[i] = 0x42 + } + } + if m.TimeoutSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + i-- + dAtA[i] = 0x38 + } + if m.SideEffects != nil { + i -= len(*m.SideEffects) + copy(dAtA[i:], *m.SideEffects) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects))) + i-- + dAtA[i] = 0x32 + } + if m.NamespaceSelector != nil { + { + size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.FailurePolicy != nil { + i -= len(*m.FailurePolicy) + copy(dAtA[i:], *m.FailurePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) + i-- + dAtA[i] = 0x22 + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + { + size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MutatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MutatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MutatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Webhooks) > 0 { + for iNdEx := len(m.Webhooks) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Webhooks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MutatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MutatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MutatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Rule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Rule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Rule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Scope != nil { + i -= len(*m.Scope) + copy(dAtA[i:], *m.Scope) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Scope))) + i-- + dAtA[i] = 0x22 + } + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Resources[iNdEx]) + copy(dAtA[i:], m.Resources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.APIVersions) > 0 { + for iNdEx := len(m.APIVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIVersions[iNdEx]) + copy(dAtA[i:], m.APIVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersions[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.APIGroups) > 0 { + for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIGroups[iNdEx]) + copy(dAtA[i:], m.APIGroups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *RuleWithOperations) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuleWithOperations) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuleWithOperations) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Rule.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Operations) > 0 { + for iNdEx := len(m.Operations) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Operations[iNdEx]) + copy(dAtA[i:], m.Operations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operations[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ServiceReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Port != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Port)) + i-- + dAtA[i] = 0x20 + } + if m.Path != nil { + i -= len(*m.Path) + copy(dAtA[i:], *m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path))) + i-- + dAtA[i] = 0x1a + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ValidatingWebhook) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatingWebhook) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ObjectSelector != nil { + { + size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + if m.MatchPolicy != nil { + i -= len(*m.MatchPolicy) + copy(dAtA[i:], *m.MatchPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy))) + i-- + dAtA[i] = 0x4a + } + if len(m.AdmissionReviewVersions) > 0 { + for iNdEx := len(m.AdmissionReviewVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AdmissionReviewVersions[iNdEx]) + copy(dAtA[i:], m.AdmissionReviewVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdmissionReviewVersions[iNdEx]))) + i-- + dAtA[i] = 0x42 + } + } + if m.TimeoutSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + i-- + dAtA[i] = 0x38 + } + if m.SideEffects != nil { + i -= len(*m.SideEffects) + copy(dAtA[i:], *m.SideEffects) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects))) + i-- + dAtA[i] = 0x32 + } + if m.NamespaceSelector != nil { + { + size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.FailurePolicy != nil { + i -= len(*m.FailurePolicy) + copy(dAtA[i:], *m.FailurePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) + i-- + dAtA[i] = 0x22 + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + { + size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ValidatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Webhooks) > 0 { + for iNdEx := len(m.Webhooks) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Webhooks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ValidatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WebhookClientConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.URL != nil { + i -= len(*m.URL) + copy(dAtA[i:], *m.URL) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) + i-- + dAtA[i] = 0x1a + } + if m.CABundle != nil { + i -= len(m.CABundle) + copy(dAtA[i:], m.CABundle) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) + i-- + dAtA[i] = 0x12 + } + if m.Service != nil { + { + size, err := m.Service.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *MutatingWebhook) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.ClientConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.FailurePolicy != nil { + l = len(*m.FailurePolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NamespaceSelector != nil { + l = m.NamespaceSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SideEffects != nil { + l = len(*m.SideEffects) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.TimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) + } + if len(m.AdmissionReviewVersions) > 0 { + for _, s := range m.AdmissionReviewVersions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.MatchPolicy != nil { + l = len(*m.MatchPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ReinvocationPolicy != nil { + l = len(*m.ReinvocationPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ObjectSelector != nil { + l = m.ObjectSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *MutatingWebhookConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Webhooks) > 0 { + for _, e := range m.Webhooks { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *MutatingWebhookConfigurationList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Rule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.APIVersions) > 0 { + for _, s := range m.APIVersions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Scope != nil { + l = len(*m.Scope) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RuleWithOperations) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Operations) > 0 { + for _, s := range m.Operations { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.Rule.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ServiceReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Path != nil { + l = len(*m.Path) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Port != nil { + n += 1 + sovGenerated(uint64(*m.Port)) + } + return n +} + +func (m *ValidatingWebhook) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.ClientConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.FailurePolicy != nil { + l = len(*m.FailurePolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NamespaceSelector != nil { + l = m.NamespaceSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SideEffects != nil { + l = len(*m.SideEffects) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.TimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) + } + if len(m.AdmissionReviewVersions) > 0 { + for _, s := range m.AdmissionReviewVersions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.MatchPolicy != nil { + l = len(*m.MatchPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ObjectSelector != nil { + l = m.ObjectSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ValidatingWebhookConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Webhooks) > 0 { + for _, e := range m.Webhooks { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingWebhookConfigurationList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *WebhookClientConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Service != nil { + l = m.Service.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CABundle != nil { + l = len(m.CABundle) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.URL != nil { + l = len(*m.URL) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *MutatingWebhook) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]RuleWithOperations{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + s := strings.Join([]string{`&MutatingWebhook{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, + `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, + `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, + `ReinvocationPolicy:` + valueToStringGenerated(this.ReinvocationPolicy) + `,`, + `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *MutatingWebhookConfiguration) String() string { + if this == nil { + return "nil" + } + repeatedStringForWebhooks := "[]MutatingWebhook{" + for _, f := range this.Webhooks { + repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "MutatingWebhook", "MutatingWebhook", 1), `&`, ``, 1) + "," + } + repeatedStringForWebhooks += "}" + s := strings.Join([]string{`&MutatingWebhookConfiguration{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Webhooks:` + repeatedStringForWebhooks + `,`, + `}`, + }, "") + return s +} +func (this *MutatingWebhookConfigurationList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]MutatingWebhookConfiguration{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "MutatingWebhookConfiguration", "MutatingWebhookConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&MutatingWebhookConfigurationList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *Rule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Rule{`, + `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, + `APIVersions:` + fmt.Sprintf("%v", this.APIVersions) + `,`, + `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, + `Scope:` + valueToStringGenerated(this.Scope) + `,`, + `}`, + }, "") + return s +} +func (this *RuleWithOperations) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RuleWithOperations{`, + `Operations:` + fmt.Sprintf("%v", this.Operations) + `,`, + `Rule:` + strings.Replace(strings.Replace(this.Rule.String(), "Rule", "Rule", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceReference{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Path:` + valueToStringGenerated(this.Path) + `,`, + `Port:` + valueToStringGenerated(this.Port) + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingWebhook) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]RuleWithOperations{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + s := strings.Join([]string{`&ValidatingWebhook{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, + `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, + `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, + `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingWebhookConfiguration) String() string { + if this == nil { + return "nil" + } + repeatedStringForWebhooks := "[]ValidatingWebhook{" + for _, f := range this.Webhooks { + repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "ValidatingWebhook", "ValidatingWebhook", 1), `&`, ``, 1) + "," + } + repeatedStringForWebhooks += "}" + s := strings.Join([]string{`&ValidatingWebhookConfiguration{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Webhooks:` + repeatedStringForWebhooks + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingWebhookConfigurationList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ValidatingWebhookConfiguration{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingWebhookConfiguration", "ValidatingWebhookConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ValidatingWebhookConfigurationList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *WebhookClientConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WebhookClientConfig{`, + `Service:` + strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1) + `,`, + `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, + `URL:` + valueToStringGenerated(this.URL) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingWebhook: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingWebhook: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, RuleWithOperations{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := FailurePolicyType(dAtA[iNdEx:postIndex]) + m.FailurePolicy = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NamespaceSelector == nil { + m.NamespaceSelector = &v1.LabelSelector{} + } + if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SideEffects", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := SideEffectClass(dAtA[iNdEx:postIndex]) + m.SideEffects = &s + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TimeoutSeconds = &v + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdmissionReviewVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AdmissionReviewVersions = append(m.AdmissionReviewVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := MatchPolicyType(dAtA[iNdEx:postIndex]) + m.MatchPolicy = &s + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReinvocationPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := ReinvocationPolicyType(dAtA[iNdEx:postIndex]) + m.ReinvocationPolicy = &s + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ObjectSelector == nil { + m.ObjectSelector = &v1.LabelSelector{} + } + if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingWebhookConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Webhooks = append(m.Webhooks, MutatingWebhook{}) + if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingWebhookConfigurationList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, MutatingWebhookConfiguration{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Rule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Rule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Rule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersions = append(m.APIVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := ScopeType(dAtA[iNdEx:postIndex]) + m.Scope = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuleWithOperations: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuleWithOperations: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operations", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operations = append(m.Operations, OperationType(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Rule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Path = &s + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Port = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingWebhook: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingWebhook: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, RuleWithOperations{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := FailurePolicyType(dAtA[iNdEx:postIndex]) + m.FailurePolicy = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NamespaceSelector == nil { + m.NamespaceSelector = &v1.LabelSelector{} + } + if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SideEffects", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := SideEffectClass(dAtA[iNdEx:postIndex]) + m.SideEffects = &s + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TimeoutSeconds = &v + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdmissionReviewVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AdmissionReviewVersions = append(m.AdmissionReviewVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := MatchPolicyType(dAtA[iNdEx:postIndex]) + m.MatchPolicy = &s + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ObjectSelector == nil { + m.ObjectSelector = &v1.LabelSelector{} + } + if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingWebhookConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Webhooks = append(m.Webhooks, ValidatingWebhook{}) + if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingWebhookConfigurationList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ValidatingWebhookConfiguration{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WebhookClientConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WebhookClientConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Service == nil { + m.Service = &ServiceReference{} + } + if err := m.Service.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CABundle", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CABundle = append(m.CABundle[:0], dAtA[iNdEx:postIndex]...) + if m.CABundle == nil { + m.CABundle = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.URL = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/k8s.io/api/admissionregistration/v1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1/generated.proto new file mode 100644 index 0000000000000..f102f3a7ec504 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1/generated.proto @@ -0,0 +1,479 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.admissionregistration.v1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// MutatingWebhook describes an admission webhook and the resources and operations it applies to. +message MutatingWebhook { + // The name of the admission webhook. + // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where + // "imagepolicy" is the name of the webhook, and kubernetes.io is the name + // of the organization. + // Required. + optional string name = 1; + + // ClientConfig defines how to communicate with the hook. + // Required + optional WebhookClientConfig clientConfig = 2; + + // Rules describes what operations on what resources/subresources the webhook cares about. + // The webhook cares about an operation if it matches _any_ Rule. + // However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks + // from putting the cluster in a state which cannot be recovered from without completely + // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called + // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. + repeated RuleWithOperations rules = 3; + + // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - + // allowed values are Ignore or Fail. Defaults to Fail. + // +optional + optional string failurePolicy = 4; + + // matchPolicy defines how the "rules" list is used to match incoming requests. + // Allowed values are "Exact" or "Equivalent". + // + // - Exact: match a request only if it exactly matches a specified rule. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook. + // + // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook. + // + // Defaults to "Equivalent" + // +optional + optional string matchPolicy = 9; + + // NamespaceSelector decides whether to run the webhook on an object based + // on whether the namespace for that object matches the selector. If the + // object itself is a namespace, the matching is performed on + // object.metadata.labels. If the object is another cluster scoped resource, + // it never skips the webhook. + // + // For example, to run the webhook on any objects whose namespace is not + // associated with "runlevel" of "0" or "1"; you will set the selector as + // follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "runlevel", + // "operator": "NotIn", + // "values": [ + // "0", + // "1" + // ] + // } + // ] + // } + // + // If instead you want to only run the webhook on any objects whose + // namespace is associated with the "environment" of "prod" or "staging"; + // you will set the selector as follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "environment", + // "operator": "In", + // "values": [ + // "prod", + // "staging" + // ] + // } + // ] + // } + // + // See + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + // for more examples of label selectors. + // + // Default to the empty LabelSelector, which matches everything. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; + + // ObjectSelector decides whether to run the webhook based on if the + // object has matching labels. objectSelector is evaluated against both + // the oldObject and newObject that would be sent to the webhook, and + // is considered to match if either object matches the selector. A null + // object (oldObject in the case of create, or newObject in the case of + // delete) or an object that cannot have labels (like a + // DeploymentRollback or a PodProxyOptions object) is not considered to + // match. + // Use the object selector only if the webhook is opt-in, because end + // users may skip the admission webhook by setting the labels. + // Default to the empty LabelSelector, which matches everything. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 11; + + // SideEffects states whether this webhook has side effects. + // Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). + // Webhooks with side effects MUST implement a reconciliation system, since a request may be + // rejected by a future step in the admission change and the side effects therefore need to be undone. + // Requests with the dryRun attribute will be auto-rejected if they match a webhook with + // sideEffects == Unknown or Some. + optional string sideEffects = 6; + + // TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, + // the webhook call will be ignored or the API call will fail based on the + // failure policy. + // The timeout value must be between 1 and 30 seconds. + // Default to 10 seconds. + // +optional + optional int32 timeoutSeconds = 7; + + // AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` + // versions the Webhook expects. API server will try to use first version in + // the list which it supports. If none of the versions specified in this list + // supported by API server, validation will fail for this object. + // If a persisted webhook configuration specifies allowed versions and does not + // include any versions known to the API Server, calls to the webhook will fail + // and be subject to the failure policy. + repeated string admissionReviewVersions = 8; + + // reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. + // Allowed values are "Never" and "IfNeeded". + // + // Never: the webhook will not be called more than once in a single admission evaluation. + // + // IfNeeded: the webhook will be called at least one additional time as part of the admission evaluation + // if the object being admitted is modified by other admission plugins after the initial webhook call. + // Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. + // Note: + // * the number of additional invocations is not guaranteed to be exactly one. + // * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. + // * webhooks that use this option may be reordered to minimize the number of additional invocations. + // * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead. + // + // Defaults to "Never". + // +optional + optional string reinvocationPolicy = 10; +} + +// MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object. +message MutatingWebhookConfiguration { + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Webhooks is a list of webhooks and the affected resources and operations. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + repeated MutatingWebhook Webhooks = 2; +} + +// MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration. +message MutatingWebhookConfigurationList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of MutatingWebhookConfiguration. + repeated MutatingWebhookConfiguration items = 2; +} + +// Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended +// to make sure that all the tuple expansions are valid. +message Rule { + // APIGroups is the API groups the resources belong to. '*' is all groups. + // If '*' is present, the length of the slice must be one. + // Required. + repeated string apiGroups = 1; + + // APIVersions is the API versions the resources belong to. '*' is all versions. + // If '*' is present, the length of the slice must be one. + // Required. + repeated string apiVersions = 2; + + // Resources is a list of resources this rule applies to. + // + // For example: + // 'pods' means pods. + // 'pods/log' means the log subresource of pods. + // '*' means all resources, but not subresources. + // 'pods/*' means all subresources of pods. + // '*/scale' means all scale subresources. + // '*/*' means all resources and their subresources. + // + // If wildcard is present, the validation rule will ensure resources do not + // overlap with each other. + // + // Depending on the enclosing object, subresources might not be allowed. + // Required. + repeated string resources = 3; + + // scope specifies the scope of this rule. + // Valid values are "Cluster", "Namespaced", and "*" + // "Cluster" means that only cluster-scoped resources will match this rule. + // Namespace API objects are cluster-scoped. + // "Namespaced" means that only namespaced resources will match this rule. + // "*" means that there are no scope restrictions. + // Subresources match the scope of their parent resource. + // Default is "*". + // + // +optional + optional string scope = 4; +} + +// RuleWithOperations is a tuple of Operations and Resources. It is recommended to make +// sure that all the tuple expansions are valid. +message RuleWithOperations { + // Operations is the operations the admission hook cares about - CREATE, UPDATE, or * + // for all operations. + // If '*' is present, the length of the slice must be one. + // Required. + repeated string operations = 1; + + // Rule is embedded, it describes other criteria of the rule, like + // APIGroups, APIVersions, Resources, etc. + optional Rule rule = 2; +} + +// ServiceReference holds a reference to Service.legacy.k8s.io +message ServiceReference { + // `namespace` is the namespace of the service. + // Required + optional string namespace = 1; + + // `name` is the name of the service. + // Required + optional string name = 2; + + // `path` is an optional URL path which will be sent in any request to + // this service. + // +optional + optional string path = 3; + + // If specified, the port on the service that hosting webhook. + // Default to 443 for backward compatibility. + // `port` should be a valid port number (1-65535, inclusive). + // +optional + optional int32 port = 4; +} + +// ValidatingWebhook describes an admission webhook and the resources and operations it applies to. +message ValidatingWebhook { + // The name of the admission webhook. + // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where + // "imagepolicy" is the name of the webhook, and kubernetes.io is the name + // of the organization. + // Required. + optional string name = 1; + + // ClientConfig defines how to communicate with the hook. + // Required + optional WebhookClientConfig clientConfig = 2; + + // Rules describes what operations on what resources/subresources the webhook cares about. + // The webhook cares about an operation if it matches _any_ Rule. + // However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks + // from putting the cluster in a state which cannot be recovered from without completely + // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called + // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. + repeated RuleWithOperations rules = 3; + + // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - + // allowed values are Ignore or Fail. Defaults to Fail. + // +optional + optional string failurePolicy = 4; + + // matchPolicy defines how the "rules" list is used to match incoming requests. + // Allowed values are "Exact" or "Equivalent". + // + // - Exact: match a request only if it exactly matches a specified rule. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook. + // + // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook. + // + // Defaults to "Equivalent" + // +optional + optional string matchPolicy = 9; + + // NamespaceSelector decides whether to run the webhook on an object based + // on whether the namespace for that object matches the selector. If the + // object itself is a namespace, the matching is performed on + // object.metadata.labels. If the object is another cluster scoped resource, + // it never skips the webhook. + // + // For example, to run the webhook on any objects whose namespace is not + // associated with "runlevel" of "0" or "1"; you will set the selector as + // follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "runlevel", + // "operator": "NotIn", + // "values": [ + // "0", + // "1" + // ] + // } + // ] + // } + // + // If instead you want to only run the webhook on any objects whose + // namespace is associated with the "environment" of "prod" or "staging"; + // you will set the selector as follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "environment", + // "operator": "In", + // "values": [ + // "prod", + // "staging" + // ] + // } + // ] + // } + // + // See + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels + // for more examples of label selectors. + // + // Default to the empty LabelSelector, which matches everything. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; + + // ObjectSelector decides whether to run the webhook based on if the + // object has matching labels. objectSelector is evaluated against both + // the oldObject and newObject that would be sent to the webhook, and + // is considered to match if either object matches the selector. A null + // object (oldObject in the case of create, or newObject in the case of + // delete) or an object that cannot have labels (like a + // DeploymentRollback or a PodProxyOptions object) is not considered to + // match. + // Use the object selector only if the webhook is opt-in, because end + // users may skip the admission webhook by setting the labels. + // Default to the empty LabelSelector, which matches everything. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 10; + + // SideEffects states whether this webhook has side effects. + // Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). + // Webhooks with side effects MUST implement a reconciliation system, since a request may be + // rejected by a future step in the admission change and the side effects therefore need to be undone. + // Requests with the dryRun attribute will be auto-rejected if they match a webhook with + // sideEffects == Unknown or Some. + optional string sideEffects = 6; + + // TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, + // the webhook call will be ignored or the API call will fail based on the + // failure policy. + // The timeout value must be between 1 and 30 seconds. + // Default to 10 seconds. + // +optional + optional int32 timeoutSeconds = 7; + + // AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` + // versions the Webhook expects. API server will try to use first version in + // the list which it supports. If none of the versions specified in this list + // supported by API server, validation will fail for this object. + // If a persisted webhook configuration specifies allowed versions and does not + // include any versions known to the API Server, calls to the webhook will fail + // and be subject to the failure policy. + repeated string admissionReviewVersions = 8; +} + +// ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. +message ValidatingWebhookConfiguration { + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Webhooks is a list of webhooks and the affected resources and operations. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + repeated ValidatingWebhook Webhooks = 2; +} + +// ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration. +message ValidatingWebhookConfigurationList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of ValidatingWebhookConfiguration. + repeated ValidatingWebhookConfiguration items = 2; +} + +// WebhookClientConfig contains the information to make a TLS +// connection with the webhook +message WebhookClientConfig { + // `url` gives the location of the webhook, in standard URL form + // (`scheme://host:port/path`). Exactly one of `url` or `service` + // must be specified. + // + // The `host` should not refer to a service running in the cluster; use + // the `service` field instead. The host might be resolved via external + // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve + // in-cluster DNS as that would be a layering violation). `host` may + // also be an IP address. + // + // Please note that using `localhost` or `127.0.0.1` as a `host` is + // risky unless you take great care to run this webhook on all hosts + // which run an apiserver which might need to make calls to this + // webhook. Such installs are likely to be non-portable, i.e., not easy + // to turn up in a new cluster. + // + // The scheme must be "https"; the URL must begin with "https://". + // + // A path is optional, and if present may be any string permissible in + // a URL. You may use the path to pass an arbitrary string to the + // webhook, for example, a cluster identifier. + // + // Attempting to use a user or basic auth e.g. "user:password@" is not + // allowed. Fragments ("#...") and query parameters ("?...") are not + // allowed, either. + // + // +optional + optional string url = 3; + + // `service` is a reference to the service for this webhook. Either + // `service` or `url` must be specified. + // + // If the webhook is running within the cluster, then you should use `service`. + // + // +optional + optional ServiceReference service = 1; + + // `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. + // If unspecified, system trust roots on the apiserver are used. + // +optional + optional bytes caBundle = 2; +} + diff --git a/vendor/k8s.io/api/admissionregistration/v1/register.go b/vendor/k8s.io/api/admissionregistration/v1/register.go new file mode 100644 index 0000000000000..716ce7fc5d266 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const GroupName = "admissionregistration.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &ValidatingWebhookConfiguration{}, + &ValidatingWebhookConfigurationList{}, + &MutatingWebhookConfiguration{}, + &MutatingWebhookConfigurationList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/admissionregistration/v1/types.go b/vendor/k8s.io/api/admissionregistration/v1/types.go new file mode 100644 index 0000000000000..114a4c68a9375 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1/types.go @@ -0,0 +1,551 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended +// to make sure that all the tuple expansions are valid. +type Rule struct { + // APIGroups is the API groups the resources belong to. '*' is all groups. + // If '*' is present, the length of the slice must be one. + // Required. + APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,1,rep,name=apiGroups"` + + // APIVersions is the API versions the resources belong to. '*' is all versions. + // If '*' is present, the length of the slice must be one. + // Required. + APIVersions []string `json:"apiVersions,omitempty" protobuf:"bytes,2,rep,name=apiVersions"` + + // Resources is a list of resources this rule applies to. + // + // For example: + // 'pods' means pods. + // 'pods/log' means the log subresource of pods. + // '*' means all resources, but not subresources. + // 'pods/*' means all subresources of pods. + // '*/scale' means all scale subresources. + // '*/*' means all resources and their subresources. + // + // If wildcard is present, the validation rule will ensure resources do not + // overlap with each other. + // + // Depending on the enclosing object, subresources might not be allowed. + // Required. + Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` + + // scope specifies the scope of this rule. + // Valid values are "Cluster", "Namespaced", and "*" + // "Cluster" means that only cluster-scoped resources will match this rule. + // Namespace API objects are cluster-scoped. + // "Namespaced" means that only namespaced resources will match this rule. + // "*" means that there are no scope restrictions. + // Subresources match the scope of their parent resource. + // Default is "*". + // + // +optional + Scope *ScopeType `json:"scope,omitempty" protobuf:"bytes,4,rep,name=scope"` +} + +type ScopeType string + +const ( + // ClusterScope means that scope is limited to cluster-scoped objects. + // Namespace objects are cluster-scoped. + ClusterScope ScopeType = "Cluster" + // NamespacedScope means that scope is limited to namespaced objects. + NamespacedScope ScopeType = "Namespaced" + // AllScopes means that all scopes are included. + AllScopes ScopeType = "*" +) + +type FailurePolicyType string + +const ( + // Ignore means that an error calling the webhook is ignored. + Ignore FailurePolicyType = "Ignore" + // Fail means that an error calling the webhook causes the admission to fail. + Fail FailurePolicyType = "Fail" +) + +// MatchPolicyType specifies the type of match policy +type MatchPolicyType string + +const ( + // Exact means requests should only be sent to the webhook if they exactly match a given rule + Exact MatchPolicyType = "Exact" + // Equivalent means requests should be sent to the webhook if they modify a resource listed in rules via another API group or version. + Equivalent MatchPolicyType = "Equivalent" +) + +type SideEffectClass string + +const ( + // SideEffectClassUnknown means that no information is known about the side effects of calling the webhook. + // If a request with the dry-run attribute would trigger a call to this webhook, the request will instead fail. + SideEffectClassUnknown SideEffectClass = "Unknown" + // SideEffectClassNone means that calling the webhook will have no side effects. + SideEffectClassNone SideEffectClass = "None" + // SideEffectClassSome means that calling the webhook will possibly have side effects. + // If a request with the dry-run attribute would trigger a call to this webhook, the request will instead fail. + SideEffectClassSome SideEffectClass = "Some" + // SideEffectClassNoneOnDryRun means that calling the webhook will possibly have side effects, but if the + // request being reviewed has the dry-run attribute, the side effects will be suppressed. + SideEffectClassNoneOnDryRun SideEffectClass = "NoneOnDryRun" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. +type ValidatingWebhookConfiguration struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Webhooks is a list of webhooks and the affected resources and operations. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + Webhooks []ValidatingWebhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration. +type ValidatingWebhookConfigurationList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // List of ValidatingWebhookConfiguration. + Items []ValidatingWebhookConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object. +type MutatingWebhookConfiguration struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Webhooks is a list of webhooks and the affected resources and operations. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + Webhooks []MutatingWebhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration. +type MutatingWebhookConfigurationList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // List of MutatingWebhookConfiguration. + Items []MutatingWebhookConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ValidatingWebhook describes an admission webhook and the resources and operations it applies to. +type ValidatingWebhook struct { + // The name of the admission webhook. + // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where + // "imagepolicy" is the name of the webhook, and kubernetes.io is the name + // of the organization. + // Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // ClientConfig defines how to communicate with the hook. + // Required + ClientConfig WebhookClientConfig `json:"clientConfig" protobuf:"bytes,2,opt,name=clientConfig"` + + // Rules describes what operations on what resources/subresources the webhook cares about. + // The webhook cares about an operation if it matches _any_ Rule. + // However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks + // from putting the cluster in a state which cannot be recovered from without completely + // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called + // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. + Rules []RuleWithOperations `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` + + // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - + // allowed values are Ignore or Fail. Defaults to Fail. + // +optional + FailurePolicy *FailurePolicyType `json:"failurePolicy,omitempty" protobuf:"bytes,4,opt,name=failurePolicy,casttype=FailurePolicyType"` + + // matchPolicy defines how the "rules" list is used to match incoming requests. + // Allowed values are "Exact" or "Equivalent". + // + // - Exact: match a request only if it exactly matches a specified rule. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook. + // + // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook. + // + // Defaults to "Equivalent" + // +optional + MatchPolicy *MatchPolicyType `json:"matchPolicy,omitempty" protobuf:"bytes,9,opt,name=matchPolicy,casttype=MatchPolicyType"` + + // NamespaceSelector decides whether to run the webhook on an object based + // on whether the namespace for that object matches the selector. If the + // object itself is a namespace, the matching is performed on + // object.metadata.labels. If the object is another cluster scoped resource, + // it never skips the webhook. + // + // For example, to run the webhook on any objects whose namespace is not + // associated with "runlevel" of "0" or "1"; you will set the selector as + // follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "runlevel", + // "operator": "NotIn", + // "values": [ + // "0", + // "1" + // ] + // } + // ] + // } + // + // If instead you want to only run the webhook on any objects whose + // namespace is associated with the "environment" of "prod" or "staging"; + // you will set the selector as follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "environment", + // "operator": "In", + // "values": [ + // "prod", + // "staging" + // ] + // } + // ] + // } + // + // See + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels + // for more examples of label selectors. + // + // Default to the empty LabelSelector, which matches everything. + // +optional + NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,5,opt,name=namespaceSelector"` + + // ObjectSelector decides whether to run the webhook based on if the + // object has matching labels. objectSelector is evaluated against both + // the oldObject and newObject that would be sent to the webhook, and + // is considered to match if either object matches the selector. A null + // object (oldObject in the case of create, or newObject in the case of + // delete) or an object that cannot have labels (like a + // DeploymentRollback or a PodProxyOptions object) is not considered to + // match. + // Use the object selector only if the webhook is opt-in, because end + // users may skip the admission webhook by setting the labels. + // Default to the empty LabelSelector, which matches everything. + // +optional + ObjectSelector *metav1.LabelSelector `json:"objectSelector,omitempty" protobuf:"bytes,10,opt,name=objectSelector"` + + // SideEffects states whether this webhook has side effects. + // Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). + // Webhooks with side effects MUST implement a reconciliation system, since a request may be + // rejected by a future step in the admission change and the side effects therefore need to be undone. + // Requests with the dryRun attribute will be auto-rejected if they match a webhook with + // sideEffects == Unknown or Some. + SideEffects *SideEffectClass `json:"sideEffects" protobuf:"bytes,6,opt,name=sideEffects,casttype=SideEffectClass"` + + // TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, + // the webhook call will be ignored or the API call will fail based on the + // failure policy. + // The timeout value must be between 1 and 30 seconds. + // Default to 10 seconds. + // +optional + TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,7,opt,name=timeoutSeconds"` + + // AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` + // versions the Webhook expects. API server will try to use first version in + // the list which it supports. If none of the versions specified in this list + // supported by API server, validation will fail for this object. + // If a persisted webhook configuration specifies allowed versions and does not + // include any versions known to the API Server, calls to the webhook will fail + // and be subject to the failure policy. + AdmissionReviewVersions []string `json:"admissionReviewVersions" protobuf:"bytes,8,rep,name=admissionReviewVersions"` +} + +// MutatingWebhook describes an admission webhook and the resources and operations it applies to. +type MutatingWebhook struct { + // The name of the admission webhook. + // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where + // "imagepolicy" is the name of the webhook, and kubernetes.io is the name + // of the organization. + // Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // ClientConfig defines how to communicate with the hook. + // Required + ClientConfig WebhookClientConfig `json:"clientConfig" protobuf:"bytes,2,opt,name=clientConfig"` + + // Rules describes what operations on what resources/subresources the webhook cares about. + // The webhook cares about an operation if it matches _any_ Rule. + // However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks + // from putting the cluster in a state which cannot be recovered from without completely + // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called + // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. + Rules []RuleWithOperations `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` + + // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - + // allowed values are Ignore or Fail. Defaults to Fail. + // +optional + FailurePolicy *FailurePolicyType `json:"failurePolicy,omitempty" protobuf:"bytes,4,opt,name=failurePolicy,casttype=FailurePolicyType"` + + // matchPolicy defines how the "rules" list is used to match incoming requests. + // Allowed values are "Exact" or "Equivalent". + // + // - Exact: match a request only if it exactly matches a specified rule. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook. + // + // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook. + // + // Defaults to "Equivalent" + // +optional + MatchPolicy *MatchPolicyType `json:"matchPolicy,omitempty" protobuf:"bytes,9,opt,name=matchPolicy,casttype=MatchPolicyType"` + + // NamespaceSelector decides whether to run the webhook on an object based + // on whether the namespace for that object matches the selector. If the + // object itself is a namespace, the matching is performed on + // object.metadata.labels. If the object is another cluster scoped resource, + // it never skips the webhook. + // + // For example, to run the webhook on any objects whose namespace is not + // associated with "runlevel" of "0" or "1"; you will set the selector as + // follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "runlevel", + // "operator": "NotIn", + // "values": [ + // "0", + // "1" + // ] + // } + // ] + // } + // + // If instead you want to only run the webhook on any objects whose + // namespace is associated with the "environment" of "prod" or "staging"; + // you will set the selector as follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "environment", + // "operator": "In", + // "values": [ + // "prod", + // "staging" + // ] + // } + // ] + // } + // + // See + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + // for more examples of label selectors. + // + // Default to the empty LabelSelector, which matches everything. + // +optional + NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,5,opt,name=namespaceSelector"` + + // ObjectSelector decides whether to run the webhook based on if the + // object has matching labels. objectSelector is evaluated against both + // the oldObject and newObject that would be sent to the webhook, and + // is considered to match if either object matches the selector. A null + // object (oldObject in the case of create, or newObject in the case of + // delete) or an object that cannot have labels (like a + // DeploymentRollback or a PodProxyOptions object) is not considered to + // match. + // Use the object selector only if the webhook is opt-in, because end + // users may skip the admission webhook by setting the labels. + // Default to the empty LabelSelector, which matches everything. + // +optional + ObjectSelector *metav1.LabelSelector `json:"objectSelector,omitempty" protobuf:"bytes,11,opt,name=objectSelector"` + + // SideEffects states whether this webhook has side effects. + // Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). + // Webhooks with side effects MUST implement a reconciliation system, since a request may be + // rejected by a future step in the admission change and the side effects therefore need to be undone. + // Requests with the dryRun attribute will be auto-rejected if they match a webhook with + // sideEffects == Unknown or Some. + SideEffects *SideEffectClass `json:"sideEffects" protobuf:"bytes,6,opt,name=sideEffects,casttype=SideEffectClass"` + + // TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, + // the webhook call will be ignored or the API call will fail based on the + // failure policy. + // The timeout value must be between 1 and 30 seconds. + // Default to 10 seconds. + // +optional + TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,7,opt,name=timeoutSeconds"` + + // AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` + // versions the Webhook expects. API server will try to use first version in + // the list which it supports. If none of the versions specified in this list + // supported by API server, validation will fail for this object. + // If a persisted webhook configuration specifies allowed versions and does not + // include any versions known to the API Server, calls to the webhook will fail + // and be subject to the failure policy. + AdmissionReviewVersions []string `json:"admissionReviewVersions" protobuf:"bytes,8,rep,name=admissionReviewVersions"` + + // reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. + // Allowed values are "Never" and "IfNeeded". + // + // Never: the webhook will not be called more than once in a single admission evaluation. + // + // IfNeeded: the webhook will be called at least one additional time as part of the admission evaluation + // if the object being admitted is modified by other admission plugins after the initial webhook call. + // Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. + // Note: + // * the number of additional invocations is not guaranteed to be exactly one. + // * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. + // * webhooks that use this option may be reordered to minimize the number of additional invocations. + // * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead. + // + // Defaults to "Never". + // +optional + ReinvocationPolicy *ReinvocationPolicyType `json:"reinvocationPolicy,omitempty" protobuf:"bytes,10,opt,name=reinvocationPolicy,casttype=ReinvocationPolicyType"` +} + +// ReinvocationPolicyType specifies what type of policy the admission hook uses. +type ReinvocationPolicyType string + +const ( + // NeverReinvocationPolicy indicates that the webhook must not be called more than once in a + // single admission evaluation. + NeverReinvocationPolicy ReinvocationPolicyType = "Never" + // IfNeededReinvocationPolicy indicates that the webhook may be called at least one + // additional time as part of the admission evaluation if the object being admitted is + // modified by other admission plugins after the initial webhook call. + IfNeededReinvocationPolicy ReinvocationPolicyType = "IfNeeded" +) + +// RuleWithOperations is a tuple of Operations and Resources. It is recommended to make +// sure that all the tuple expansions are valid. +type RuleWithOperations struct { + // Operations is the operations the admission hook cares about - CREATE, UPDATE, or * + // for all operations. + // If '*' is present, the length of the slice must be one. + // Required. + Operations []OperationType `json:"operations,omitempty" protobuf:"bytes,1,rep,name=operations,casttype=OperationType"` + // Rule is embedded, it describes other criteria of the rule, like + // APIGroups, APIVersions, Resources, etc. + Rule `json:",inline" protobuf:"bytes,2,opt,name=rule"` +} + +type OperationType string + +// The constants should be kept in sync with those defined in k8s.io/kubernetes/pkg/admission/interface.go. +const ( + OperationAll OperationType = "*" + Create OperationType = "CREATE" + Update OperationType = "UPDATE" + Delete OperationType = "DELETE" + Connect OperationType = "CONNECT" +) + +// WebhookClientConfig contains the information to make a TLS +// connection with the webhook +type WebhookClientConfig struct { + // `url` gives the location of the webhook, in standard URL form + // (`scheme://host:port/path`). Exactly one of `url` or `service` + // must be specified. + // + // The `host` should not refer to a service running in the cluster; use + // the `service` field instead. The host might be resolved via external + // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve + // in-cluster DNS as that would be a layering violation). `host` may + // also be an IP address. + // + // Please note that using `localhost` or `127.0.0.1` as a `host` is + // risky unless you take great care to run this webhook on all hosts + // which run an apiserver which might need to make calls to this + // webhook. Such installs are likely to be non-portable, i.e., not easy + // to turn up in a new cluster. + // + // The scheme must be "https"; the URL must begin with "https://". + // + // A path is optional, and if present may be any string permissible in + // a URL. You may use the path to pass an arbitrary string to the + // webhook, for example, a cluster identifier. + // + // Attempting to use a user or basic auth e.g. "user:password@" is not + // allowed. Fragments ("#...") and query parameters ("?...") are not + // allowed, either. + // + // +optional + URL *string `json:"url,omitempty" protobuf:"bytes,3,opt,name=url"` + + // `service` is a reference to the service for this webhook. Either + // `service` or `url` must be specified. + // + // If the webhook is running within the cluster, then you should use `service`. + // + // +optional + Service *ServiceReference `json:"service,omitempty" protobuf:"bytes,1,opt,name=service"` + + // `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. + // If unspecified, system trust roots on the apiserver are used. + // +optional + CABundle []byte `json:"caBundle,omitempty" protobuf:"bytes,2,opt,name=caBundle"` +} + +// ServiceReference holds a reference to Service.legacy.k8s.io +type ServiceReference struct { + // `namespace` is the namespace of the service. + // Required + Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"` + // `name` is the name of the service. + // Required + Name string `json:"name" protobuf:"bytes,2,opt,name=name"` + + // `path` is an optional URL path which will be sent in any request to + // this service. + // +optional + Path *string `json:"path,omitempty" protobuf:"bytes,3,opt,name=path"` + + // If specified, the port on the service that hosting webhook. + // Default to 443 for backward compatibility. + // `port` should be a valid port number (1-65535, inclusive). + // +optional + Port *int32 `json:"port,omitempty" protobuf:"varint,4,opt,name=port"` +} diff --git a/vendor/k8s.io/api/admissionregistration/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1/types_swagger_doc_generated.go new file mode 100644 index 0000000000000..2fde0ce37de0b --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1/types_swagger_doc_generated.go @@ -0,0 +1,151 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_MutatingWebhook = map[string]string{ + "": "MutatingWebhook describes an admission webhook and the resources and operations it applies to.", + "name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", + "clientConfig": "ClientConfig defines how to communicate with the hook. Required", + "rules": "Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.", + "failurePolicy": "FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Fail.", + "matchPolicy": "matchPolicy defines how the \"rules\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".\n\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook.\n\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook.\n\nDefaults to \"Equivalent\"", + "namespaceSelector": "NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the webhook on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.", + "objectSelector": "ObjectSelector decides whether to run the webhook based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the webhook, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything.", + "sideEffects": "SideEffects states whether this webhook has side effects. Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission change and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some.", + "timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 10 seconds.", + "admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy.", + "reinvocationPolicy": "reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. Allowed values are \"Never\" and \"IfNeeded\".\n\nNever: the webhook will not be called more than once in a single admission evaluation.\n\nIfNeeded: the webhook will be called at least one additional time as part of the admission evaluation if the object being admitted is modified by other admission plugins after the initial webhook call. Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. Note: * the number of additional invocations is not guaranteed to be exactly one. * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. * webhooks that use this option may be reordered to minimize the number of additional invocations. * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead.\n\nDefaults to \"Never\".", +} + +func (MutatingWebhook) SwaggerDoc() map[string]string { + return map_MutatingWebhook +} + +var map_MutatingWebhookConfiguration = map[string]string{ + "": "MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "webhooks": "Webhooks is a list of webhooks and the affected resources and operations.", +} + +func (MutatingWebhookConfiguration) SwaggerDoc() map[string]string { + return map_MutatingWebhookConfiguration +} + +var map_MutatingWebhookConfigurationList = map[string]string{ + "": "MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "List of MutatingWebhookConfiguration.", +} + +func (MutatingWebhookConfigurationList) SwaggerDoc() map[string]string { + return map_MutatingWebhookConfigurationList +} + +var map_Rule = map[string]string{ + "": "Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended to make sure that all the tuple expansions are valid.", + "apiGroups": "APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required.", + "apiVersions": "APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required.", + "resources": "Resources is a list of resources this rule applies to.\n\nFor example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources.\n\nIf wildcard is present, the validation rule will ensure resources do not overlap with each other.\n\nDepending on the enclosing object, subresources might not be allowed. Required.", + "scope": "scope specifies the scope of this rule. Valid values are \"Cluster\", \"Namespaced\", and \"*\" \"Cluster\" means that only cluster-scoped resources will match this rule. Namespace API objects are cluster-scoped. \"Namespaced\" means that only namespaced resources will match this rule. \"*\" means that there are no scope restrictions. Subresources match the scope of their parent resource. Default is \"*\".", +} + +func (Rule) SwaggerDoc() map[string]string { + return map_Rule +} + +var map_RuleWithOperations = map[string]string{ + "": "RuleWithOperations is a tuple of Operations and Resources. It is recommended to make sure that all the tuple expansions are valid.", + "operations": "Operations is the operations the admission hook cares about - CREATE, UPDATE, or * for all operations. If '*' is present, the length of the slice must be one. Required.", +} + +func (RuleWithOperations) SwaggerDoc() map[string]string { + return map_RuleWithOperations +} + +var map_ServiceReference = map[string]string{ + "": "ServiceReference holds a reference to Service.legacy.k8s.io", + "namespace": "`namespace` is the namespace of the service. Required", + "name": "`name` is the name of the service. Required", + "path": "`path` is an optional URL path which will be sent in any request to this service.", + "port": "If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive).", +} + +func (ServiceReference) SwaggerDoc() map[string]string { + return map_ServiceReference +} + +var map_ValidatingWebhook = map[string]string{ + "": "ValidatingWebhook describes an admission webhook and the resources and operations it applies to.", + "name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", + "clientConfig": "ClientConfig defines how to communicate with the hook. Required", + "rules": "Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.", + "failurePolicy": "FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Fail.", + "matchPolicy": "matchPolicy defines how the \"rules\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".\n\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook.\n\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook.\n\nDefaults to \"Equivalent\"", + "namespaceSelector": "NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the webhook on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.", + "objectSelector": "ObjectSelector decides whether to run the webhook based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the webhook, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything.", + "sideEffects": "SideEffects states whether this webhook has side effects. Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission change and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some.", + "timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 10 seconds.", + "admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy.", +} + +func (ValidatingWebhook) SwaggerDoc() map[string]string { + return map_ValidatingWebhook +} + +var map_ValidatingWebhookConfiguration = map[string]string{ + "": "ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "webhooks": "Webhooks is a list of webhooks and the affected resources and operations.", +} + +func (ValidatingWebhookConfiguration) SwaggerDoc() map[string]string { + return map_ValidatingWebhookConfiguration +} + +var map_ValidatingWebhookConfigurationList = map[string]string{ + "": "ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "List of ValidatingWebhookConfiguration.", +} + +func (ValidatingWebhookConfigurationList) SwaggerDoc() map[string]string { + return map_ValidatingWebhookConfigurationList +} + +var map_WebhookClientConfig = map[string]string{ + "": "WebhookClientConfig contains the information to make a TLS connection with the webhook", + "url": "`url` gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified.\n\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\n\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\n\nThe scheme must be \"https\"; the URL must begin with \"https://\".\n\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\n\nAttempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either.", + "service": "`service` is a reference to the service for this webhook. Either `service` or `url` must be specified.\n\nIf the webhook is running within the cluster, then you should use `service`.", + "caBundle": "`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used.", +} + +func (WebhookClientConfig) SwaggerDoc() map[string]string { + return map_WebhookClientConfig +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/admissionregistration/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admissionregistration/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..3afb7467377c2 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1/zz_generated.deepcopy.go @@ -0,0 +1,396 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutatingWebhook) DeepCopyInto(out *MutatingWebhook) { + *out = *in + in.ClientConfig.DeepCopyInto(&out.ClientConfig) + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]RuleWithOperations, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FailurePolicy != nil { + in, out := &in.FailurePolicy, &out.FailurePolicy + *out = new(FailurePolicyType) + **out = **in + } + if in.MatchPolicy != nil { + in, out := &in.MatchPolicy, &out.MatchPolicy + *out = new(MatchPolicyType) + **out = **in + } + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.ObjectSelector != nil { + in, out := &in.ObjectSelector, &out.ObjectSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.SideEffects != nil { + in, out := &in.SideEffects, &out.SideEffects + *out = new(SideEffectClass) + **out = **in + } + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int32) + **out = **in + } + if in.AdmissionReviewVersions != nil { + in, out := &in.AdmissionReviewVersions, &out.AdmissionReviewVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ReinvocationPolicy != nil { + in, out := &in.ReinvocationPolicy, &out.ReinvocationPolicy + *out = new(ReinvocationPolicyType) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingWebhook. +func (in *MutatingWebhook) DeepCopy() *MutatingWebhook { + if in == nil { + return nil + } + out := new(MutatingWebhook) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutatingWebhookConfiguration) DeepCopyInto(out *MutatingWebhookConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Webhooks != nil { + in, out := &in.Webhooks, &out.Webhooks + *out = make([]MutatingWebhook, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingWebhookConfiguration. +func (in *MutatingWebhookConfiguration) DeepCopy() *MutatingWebhookConfiguration { + if in == nil { + return nil + } + out := new(MutatingWebhookConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MutatingWebhookConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutatingWebhookConfigurationList) DeepCopyInto(out *MutatingWebhookConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MutatingWebhookConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingWebhookConfigurationList. +func (in *MutatingWebhookConfigurationList) DeepCopy() *MutatingWebhookConfigurationList { + if in == nil { + return nil + } + out := new(MutatingWebhookConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MutatingWebhookConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Rule) DeepCopyInto(out *Rule) { + *out = *in + if in.APIGroups != nil { + in, out := &in.APIGroups, &out.APIGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.APIVersions != nil { + in, out := &in.APIVersions, &out.APIVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(ScopeType) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Rule. +func (in *Rule) DeepCopy() *Rule { + if in == nil { + return nil + } + out := new(Rule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleWithOperations) DeepCopyInto(out *RuleWithOperations) { + *out = *in + if in.Operations != nil { + in, out := &in.Operations, &out.Operations + *out = make([]OperationType, len(*in)) + copy(*out, *in) + } + in.Rule.DeepCopyInto(&out.Rule) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleWithOperations. +func (in *RuleWithOperations) DeepCopy() *RuleWithOperations { + if in == nil { + return nil + } + out := new(RuleWithOperations) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceReference) DeepCopyInto(out *ServiceReference) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceReference. +func (in *ServiceReference) DeepCopy() *ServiceReference { + if in == nil { + return nil + } + out := new(ServiceReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingWebhook) DeepCopyInto(out *ValidatingWebhook) { + *out = *in + in.ClientConfig.DeepCopyInto(&out.ClientConfig) + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]RuleWithOperations, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FailurePolicy != nil { + in, out := &in.FailurePolicy, &out.FailurePolicy + *out = new(FailurePolicyType) + **out = **in + } + if in.MatchPolicy != nil { + in, out := &in.MatchPolicy, &out.MatchPolicy + *out = new(MatchPolicyType) + **out = **in + } + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.ObjectSelector != nil { + in, out := &in.ObjectSelector, &out.ObjectSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.SideEffects != nil { + in, out := &in.SideEffects, &out.SideEffects + *out = new(SideEffectClass) + **out = **in + } + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int32) + **out = **in + } + if in.AdmissionReviewVersions != nil { + in, out := &in.AdmissionReviewVersions, &out.AdmissionReviewVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingWebhook. +func (in *ValidatingWebhook) DeepCopy() *ValidatingWebhook { + if in == nil { + return nil + } + out := new(ValidatingWebhook) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingWebhookConfiguration) DeepCopyInto(out *ValidatingWebhookConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Webhooks != nil { + in, out := &in.Webhooks, &out.Webhooks + *out = make([]ValidatingWebhook, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingWebhookConfiguration. +func (in *ValidatingWebhookConfiguration) DeepCopy() *ValidatingWebhookConfiguration { + if in == nil { + return nil + } + out := new(ValidatingWebhookConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ValidatingWebhookConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingWebhookConfigurationList) DeepCopyInto(out *ValidatingWebhookConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ValidatingWebhookConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingWebhookConfigurationList. +func (in *ValidatingWebhookConfigurationList) DeepCopy() *ValidatingWebhookConfigurationList { + if in == nil { + return nil + } + out := new(ValidatingWebhookConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ValidatingWebhookConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookClientConfig) DeepCopyInto(out *WebhookClientConfig) { + *out = *in + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(ServiceReference) + (*in).DeepCopyInto(*out) + } + if in.CABundle != nil { + in, out := &in.CABundle, &out.CABundle + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookClientConfig. +func (in *WebhookClientConfig) DeepCopy() *WebhookClientConfig { + if in == nil { + return nil + } + out := new(WebhookClientConfig) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go index 9c3742e587f8c..d84d8b63428d5 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go @@ -17,40 +17,20 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto - - It has these top-level messages: - MutatingWebhook - MutatingWebhookConfiguration - MutatingWebhookConfigurationList - Rule - RuleWithOperations - ServiceReference - ValidatingWebhook - ValidatingWebhookConfiguration - ValidatingWebhookConfigurationList - WebhookClientConfig -*/ package v1beta1 import ( fmt "fmt" + io "io" + proto "github.com/gogo/protobuf/proto" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" math "math" - - k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - strings "strings" - + math_bits "math/bits" reflect "reflect" - - io "io" + strings "strings" ) // Reference imports to suppress errors if they are not otherwise used. @@ -64,53 +44,285 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *MutatingWebhook) Reset() { *m = MutatingWebhook{} } -func (*MutatingWebhook) ProtoMessage() {} -func (*MutatingWebhook) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *MutatingWebhook) Reset() { *m = MutatingWebhook{} } +func (*MutatingWebhook) ProtoMessage() {} +func (*MutatingWebhook) Descriptor() ([]byte, []int) { + return fileDescriptor_abeea74cbc46f55a, []int{0} +} +func (m *MutatingWebhook) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MutatingWebhook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MutatingWebhook) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutatingWebhook.Merge(m, src) +} +func (m *MutatingWebhook) XXX_Size() int { + return m.Size() +} +func (m *MutatingWebhook) XXX_DiscardUnknown() { + xxx_messageInfo_MutatingWebhook.DiscardUnknown(m) +} + +var xxx_messageInfo_MutatingWebhook proto.InternalMessageInfo func (m *MutatingWebhookConfiguration) Reset() { *m = MutatingWebhookConfiguration{} } func (*MutatingWebhookConfiguration) ProtoMessage() {} func (*MutatingWebhookConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{1} + return fileDescriptor_abeea74cbc46f55a, []int{1} +} +func (m *MutatingWebhookConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MutatingWebhookConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MutatingWebhookConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutatingWebhookConfiguration.Merge(m, src) +} +func (m *MutatingWebhookConfiguration) XXX_Size() int { + return m.Size() +} +func (m *MutatingWebhookConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_MutatingWebhookConfiguration.DiscardUnknown(m) } +var xxx_messageInfo_MutatingWebhookConfiguration proto.InternalMessageInfo + func (m *MutatingWebhookConfigurationList) Reset() { *m = MutatingWebhookConfigurationList{} } func (*MutatingWebhookConfigurationList) ProtoMessage() {} func (*MutatingWebhookConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{2} + return fileDescriptor_abeea74cbc46f55a, []int{2} +} +func (m *MutatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MutatingWebhookConfigurationList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MutatingWebhookConfigurationList) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutatingWebhookConfigurationList.Merge(m, src) +} +func (m *MutatingWebhookConfigurationList) XXX_Size() int { + return m.Size() +} +func (m *MutatingWebhookConfigurationList) XXX_DiscardUnknown() { + xxx_messageInfo_MutatingWebhookConfigurationList.DiscardUnknown(m) } -func (m *Rule) Reset() { *m = Rule{} } -func (*Rule) ProtoMessage() {} -func (*Rule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_MutatingWebhookConfigurationList proto.InternalMessageInfo -func (m *RuleWithOperations) Reset() { *m = RuleWithOperations{} } -func (*RuleWithOperations) ProtoMessage() {} -func (*RuleWithOperations) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *Rule) Reset() { *m = Rule{} } +func (*Rule) ProtoMessage() {} +func (*Rule) Descriptor() ([]byte, []int) { + return fileDescriptor_abeea74cbc46f55a, []int{3} +} +func (m *Rule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Rule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Rule) XXX_Merge(src proto.Message) { + xxx_messageInfo_Rule.Merge(m, src) +} +func (m *Rule) XXX_Size() int { + return m.Size() +} +func (m *Rule) XXX_DiscardUnknown() { + xxx_messageInfo_Rule.DiscardUnknown(m) +} -func (m *ServiceReference) Reset() { *m = ServiceReference{} } -func (*ServiceReference) ProtoMessage() {} -func (*ServiceReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_Rule proto.InternalMessageInfo -func (m *ValidatingWebhook) Reset() { *m = ValidatingWebhook{} } -func (*ValidatingWebhook) ProtoMessage() {} -func (*ValidatingWebhook) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (m *RuleWithOperations) Reset() { *m = RuleWithOperations{} } +func (*RuleWithOperations) ProtoMessage() {} +func (*RuleWithOperations) Descriptor() ([]byte, []int) { + return fileDescriptor_abeea74cbc46f55a, []int{4} +} +func (m *RuleWithOperations) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuleWithOperations) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RuleWithOperations) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuleWithOperations.Merge(m, src) +} +func (m *RuleWithOperations) XXX_Size() int { + return m.Size() +} +func (m *RuleWithOperations) XXX_DiscardUnknown() { + xxx_messageInfo_RuleWithOperations.DiscardUnknown(m) +} + +var xxx_messageInfo_RuleWithOperations proto.InternalMessageInfo + +func (m *ServiceReference) Reset() { *m = ServiceReference{} } +func (*ServiceReference) ProtoMessage() {} +func (*ServiceReference) Descriptor() ([]byte, []int) { + return fileDescriptor_abeea74cbc46f55a, []int{5} +} +func (m *ServiceReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceReference.Merge(m, src) +} +func (m *ServiceReference) XXX_Size() int { + return m.Size() +} +func (m *ServiceReference) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceReference.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceReference proto.InternalMessageInfo + +func (m *ValidatingWebhook) Reset() { *m = ValidatingWebhook{} } +func (*ValidatingWebhook) ProtoMessage() {} +func (*ValidatingWebhook) Descriptor() ([]byte, []int) { + return fileDescriptor_abeea74cbc46f55a, []int{6} +} +func (m *ValidatingWebhook) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingWebhook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingWebhook) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingWebhook.Merge(m, src) +} +func (m *ValidatingWebhook) XXX_Size() int { + return m.Size() +} +func (m *ValidatingWebhook) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingWebhook.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingWebhook proto.InternalMessageInfo func (m *ValidatingWebhookConfiguration) Reset() { *m = ValidatingWebhookConfiguration{} } func (*ValidatingWebhookConfiguration) ProtoMessage() {} func (*ValidatingWebhookConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{7} + return fileDescriptor_abeea74cbc46f55a, []int{7} } +func (m *ValidatingWebhookConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingWebhookConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingWebhookConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingWebhookConfiguration.Merge(m, src) +} +func (m *ValidatingWebhookConfiguration) XXX_Size() int { + return m.Size() +} +func (m *ValidatingWebhookConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingWebhookConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingWebhookConfiguration proto.InternalMessageInfo func (m *ValidatingWebhookConfigurationList) Reset() { *m = ValidatingWebhookConfigurationList{} } func (*ValidatingWebhookConfigurationList) ProtoMessage() {} func (*ValidatingWebhookConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{8} + return fileDescriptor_abeea74cbc46f55a, []int{8} +} +func (m *ValidatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingWebhookConfigurationList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingWebhookConfigurationList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingWebhookConfigurationList.Merge(m, src) +} +func (m *ValidatingWebhookConfigurationList) XXX_Size() int { + return m.Size() +} +func (m *ValidatingWebhookConfigurationList) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingWebhookConfigurationList.DiscardUnknown(m) } -func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } -func (*WebhookClientConfig) ProtoMessage() {} -func (*WebhookClientConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +var xxx_messageInfo_ValidatingWebhookConfigurationList proto.InternalMessageInfo + +func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } +func (*WebhookClientConfig) ProtoMessage() {} +func (*WebhookClientConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_abeea74cbc46f55a, []int{9} +} +func (m *WebhookClientConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WebhookClientConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WebhookClientConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_WebhookClientConfig.Merge(m, src) +} +func (m *WebhookClientConfig) XXX_Size() int { + return m.Size() +} +func (m *WebhookClientConfig) XXX_DiscardUnknown() { + xxx_messageInfo_WebhookClientConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_WebhookClientConfig proto.InternalMessageInfo func init() { proto.RegisterType((*MutatingWebhook)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhook") @@ -124,10 +336,89 @@ func init() { proto.RegisterType((*ValidatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingWebhookConfigurationList") proto.RegisterType((*WebhookClientConfig)(nil), "k8s.io.api.admissionregistration.v1beta1.WebhookClientConfig") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto", fileDescriptor_abeea74cbc46f55a) +} + +var fileDescriptor_abeea74cbc46f55a = []byte{ + // 1114 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0x4d, 0x6f, 0x23, 0x45, + 0x13, 0xce, 0xc4, 0xf6, 0xda, 0x6e, 0x27, 0xbb, 0x9b, 0x7e, 0x5f, 0x76, 0x4d, 0x58, 0x79, 0x2c, + 0x1f, 0x90, 0x25, 0xd8, 0x99, 0x4d, 0x40, 0x08, 0x16, 0x10, 0x8a, 0x03, 0x0b, 0x91, 0x92, 0xdd, + 0xd0, 0xd9, 0x0f, 0x89, 0x0f, 0x69, 0xdb, 0xe3, 0xb2, 0xdd, 0xd8, 0x9e, 0x1e, 0x4d, 0xf7, 0x38, + 0xe4, 0xc6, 0x4f, 0xe0, 0x2f, 0x70, 0xe2, 0x57, 0x70, 0xe0, 0x16, 0x6e, 0x7b, 0xdc, 0x0b, 0x23, + 0x32, 0x9c, 0x38, 0x70, 0xe0, 0x9a, 0x13, 0x9a, 0x9e, 0xf6, 0xf8, 0x2b, 0x59, 0x4c, 0x90, 0xf6, + 0x94, 0xdb, 0xf4, 0x53, 0x5d, 0x4f, 0x75, 0xd5, 0x54, 0xd5, 0x83, 0x3e, 0xef, 0xbd, 0x2b, 0x2c, + 0xc6, 0xed, 0x5e, 0xd0, 0x04, 0xdf, 0x05, 0x09, 0xc2, 0x1e, 0x82, 0xdb, 0xe2, 0xbe, 0xad, 0x0d, + 0xd4, 0x63, 0x36, 0x6d, 0x0d, 0x98, 0x10, 0x8c, 0xbb, 0x3e, 0x74, 0x98, 0x90, 0x3e, 0x95, 0x8c, + 0xbb, 0xf6, 0x70, 0xa3, 0x09, 0x92, 0x6e, 0xd8, 0x1d, 0x70, 0xc1, 0xa7, 0x12, 0x5a, 0x96, 0xe7, + 0x73, 0xc9, 0x71, 0x3d, 0xf1, 0xb4, 0xa8, 0xc7, 0xac, 0x33, 0x3d, 0x2d, 0xed, 0xb9, 0x7e, 0xbb, + 0xc3, 0x64, 0x37, 0x68, 0x5a, 0x0e, 0x1f, 0xd8, 0x1d, 0xde, 0xe1, 0xb6, 0x22, 0x68, 0x06, 0x6d, + 0x75, 0x52, 0x07, 0xf5, 0x95, 0x10, 0xaf, 0xbf, 0x3d, 0x7e, 0xd2, 0x80, 0x3a, 0x5d, 0xe6, 0x82, + 0x7f, 0x64, 0x7b, 0xbd, 0x4e, 0x0c, 0x08, 0x7b, 0x00, 0x92, 0xda, 0xc3, 0xb9, 0xe7, 0xac, 0xdb, + 0xe7, 0x79, 0xf9, 0x81, 0x2b, 0xd9, 0x00, 0xe6, 0x1c, 0xde, 0xf9, 0x27, 0x07, 0xe1, 0x74, 0x61, + 0x40, 0x67, 0xfd, 0x6a, 0xbf, 0xe4, 0xd1, 0xb5, 0xbd, 0x40, 0x52, 0xc9, 0xdc, 0xce, 0x13, 0x68, + 0x76, 0x39, 0xef, 0xe1, 0x2a, 0xca, 0xba, 0x74, 0x00, 0x65, 0xa3, 0x6a, 0xd4, 0x8b, 0x8d, 0x95, + 0xe3, 0xd0, 0x5c, 0x8a, 0x42, 0x33, 0x7b, 0x9f, 0x0e, 0x80, 0x28, 0x0b, 0x3e, 0x44, 0x2b, 0x4e, + 0x9f, 0x81, 0x2b, 0xb7, 0xb9, 0xdb, 0x66, 0x9d, 0xf2, 0x72, 0xd5, 0xa8, 0x97, 0x36, 0x3f, 0xb4, + 0x16, 0x2d, 0xa2, 0xa5, 0x43, 0x6d, 0x4f, 0x90, 0x34, 0xfe, 0xaf, 0x03, 0xad, 0x4c, 0xa2, 0x64, + 0x2a, 0x10, 0xa6, 0x28, 0xe7, 0x07, 0x7d, 0x10, 0xe5, 0x4c, 0x35, 0x53, 0x2f, 0x6d, 0x7e, 0xb0, + 0x78, 0x44, 0x12, 0xf4, 0xe1, 0x09, 0x93, 0xdd, 0x07, 0x1e, 0x24, 0x16, 0xd1, 0x58, 0xd5, 0x01, + 0x73, 0xb1, 0x4d, 0x90, 0x84, 0x19, 0xef, 0xa2, 0xd5, 0x36, 0x65, 0xfd, 0xc0, 0x87, 0x7d, 0xde, + 0x67, 0xce, 0x51, 0x39, 0xab, 0xca, 0xf0, 0x7a, 0x14, 0x9a, 0xab, 0xf7, 0x26, 0x0d, 0xa7, 0xa1, + 0xb9, 0x36, 0x05, 0x3c, 0x3c, 0xf2, 0x80, 0x4c, 0x3b, 0xe3, 0x8f, 0x51, 0x69, 0x40, 0xa5, 0xd3, + 0xd5, 0x5c, 0x45, 0xc5, 0x55, 0x8b, 0x42, 0xb3, 0xb4, 0x37, 0x86, 0x4f, 0x43, 0xf3, 0xda, 0xc4, + 0x51, 0xf1, 0x4c, 0xba, 0xe1, 0x6f, 0xd1, 0x5a, 0x5c, 0x77, 0xe1, 0x51, 0x07, 0x0e, 0xa0, 0x0f, + 0x8e, 0xe4, 0x7e, 0x39, 0xa7, 0x8a, 0xfe, 0xd6, 0x44, 0x09, 0xd2, 0x3f, 0x6f, 0x79, 0xbd, 0x4e, + 0x0c, 0x08, 0x2b, 0x6e, 0x30, 0x6b, 0xb8, 0x61, 0xed, 0xd2, 0x26, 0xf4, 0x47, 0xae, 0x8d, 0x57, + 0xa2, 0xd0, 0x5c, 0xbb, 0x3f, 0xcb, 0x48, 0xe6, 0x83, 0x60, 0x8e, 0xae, 0xf2, 0xe6, 0x37, 0xe0, + 0xc8, 0x34, 0x6c, 0xe9, 0xe2, 0x61, 0x71, 0x14, 0x9a, 0x57, 0x1f, 0x4c, 0xd1, 0x91, 0x19, 0xfa, + 0xb8, 0x60, 0x82, 0xb5, 0xe0, 0x93, 0x76, 0x1b, 0x1c, 0x29, 0xca, 0x57, 0xc6, 0x05, 0x3b, 0x18, + 0xc3, 0x71, 0xc1, 0xc6, 0xc7, 0xed, 0x3e, 0x15, 0x82, 0x4c, 0xba, 0xe1, 0xbb, 0xe8, 0x6a, 0xdc, + 0xf5, 0x3c, 0x90, 0x07, 0xe0, 0x70, 0xb7, 0x25, 0xca, 0xf9, 0xaa, 0x51, 0xcf, 0x25, 0x2f, 0x78, + 0x38, 0x65, 0x21, 0x33, 0x37, 0xf1, 0x23, 0x74, 0x33, 0x6d, 0x25, 0x02, 0x43, 0x06, 0x87, 0x8f, + 0xc1, 0x8f, 0x0f, 0xa2, 0x5c, 0xa8, 0x66, 0xea, 0xc5, 0xc6, 0x6b, 0x51, 0x68, 0xde, 0xdc, 0x3a, + 0xfb, 0x0a, 0x39, 0xcf, 0x17, 0x3f, 0x45, 0xd8, 0x07, 0xe6, 0x0e, 0xb9, 0xa3, 0xda, 0x4f, 0x37, + 0x04, 0x52, 0xf9, 0xdd, 0x89, 0x42, 0x13, 0x93, 0x39, 0xeb, 0x69, 0x68, 0xde, 0x98, 0x47, 0x55, + 0x7b, 0x9c, 0xc1, 0x55, 0xfb, 0xd5, 0x40, 0xb7, 0x66, 0x66, 0x39, 0x19, 0x9b, 0x20, 0xe9, 0x78, + 0xfc, 0x14, 0x15, 0xe2, 0x1f, 0xd3, 0xa2, 0x92, 0xaa, 0xe1, 0x2e, 0x6d, 0xde, 0x59, 0xec, 0x37, + 0x26, 0xff, 0x6c, 0x0f, 0x24, 0x6d, 0x60, 0x3d, 0x34, 0x68, 0x8c, 0x91, 0x94, 0x15, 0x7f, 0x89, + 0x0a, 0x3a, 0xb2, 0x28, 0x2f, 0xab, 0x11, 0x7d, 0x6f, 0xf1, 0x11, 0x9d, 0x79, 0x7b, 0x23, 0x1b, + 0x87, 0x22, 0x85, 0x43, 0x4d, 0x58, 0xfb, 0xd3, 0x40, 0xd5, 0x17, 0xe5, 0xb7, 0xcb, 0x84, 0xc4, + 0x5f, 0xcd, 0xe5, 0x68, 0x2d, 0xd8, 0xaa, 0x4c, 0x24, 0x19, 0x5e, 0xd7, 0x19, 0x16, 0x46, 0xc8, + 0x44, 0x7e, 0x3d, 0x94, 0x63, 0x12, 0x06, 0xa3, 0xe4, 0xee, 0x5d, 0x38, 0xb9, 0xa9, 0x87, 0x8f, + 0x37, 0xd1, 0x4e, 0x4c, 0x4e, 0x92, 0x18, 0xb5, 0x9f, 0x0d, 0x94, 0x8d, 0x57, 0x13, 0x7e, 0x03, + 0x15, 0xa9, 0xc7, 0x3e, 0xf5, 0x79, 0xe0, 0x89, 0xb2, 0xa1, 0x7a, 0x70, 0x35, 0x0a, 0xcd, 0xe2, + 0xd6, 0xfe, 0x4e, 0x02, 0x92, 0xb1, 0x1d, 0x6f, 0xa0, 0x12, 0xf5, 0x58, 0xda, 0xb2, 0xcb, 0xea, + 0xfa, 0xb5, 0x78, 0x80, 0xb6, 0xf6, 0x77, 0xd2, 0x36, 0x9d, 0xbc, 0x13, 0xf3, 0xfb, 0x20, 0x78, + 0xe0, 0x3b, 0x7a, 0xb3, 0x6a, 0x7e, 0x32, 0x02, 0xc9, 0xd8, 0x8e, 0xdf, 0x44, 0x39, 0xe1, 0x70, + 0x0f, 0xf4, 0x5e, 0xbc, 0x11, 0x3f, 0xfb, 0x20, 0x06, 0x4e, 0x43, 0xb3, 0xa8, 0x3e, 0x54, 0x83, + 0x26, 0x97, 0x6a, 0x3f, 0x1a, 0x08, 0xcf, 0xaf, 0x5e, 0xfc, 0x11, 0x42, 0x3c, 0x3d, 0xe9, 0x94, + 0x4c, 0xd5, 0x55, 0x29, 0x7a, 0x1a, 0x9a, 0xab, 0xe9, 0x49, 0x51, 0x4e, 0xb8, 0xe0, 0x7d, 0x94, + 0x8d, 0xd7, 0xb5, 0x56, 0x1e, 0xeb, 0xdf, 0xe9, 0xc0, 0x58, 0xd3, 0xe2, 0x13, 0x51, 0x4c, 0xb5, + 0x1f, 0x0c, 0x74, 0xfd, 0x00, 0xfc, 0x21, 0x73, 0x80, 0x40, 0x1b, 0x7c, 0x70, 0x1d, 0xc0, 0x36, + 0x2a, 0xa6, 0x3b, 0x51, 0xeb, 0xe1, 0x9a, 0xf6, 0x2d, 0xa6, 0xfb, 0x93, 0x8c, 0xef, 0xa4, 0xda, + 0xb9, 0x7c, 0xae, 0x76, 0xde, 0x42, 0x59, 0x8f, 0xca, 0x6e, 0x39, 0xa3, 0x6e, 0x14, 0x62, 0xeb, + 0x3e, 0x95, 0x5d, 0xa2, 0x50, 0x65, 0xe5, 0xbe, 0x54, 0xc5, 0xcd, 0x69, 0x2b, 0xf7, 0x25, 0x51, + 0x68, 0xed, 0x8f, 0x2b, 0x68, 0xed, 0x31, 0xed, 0xb3, 0xd6, 0xa5, 0x5e, 0x5f, 0xea, 0xf5, 0x82, + 0x7a, 0x8d, 0x2e, 0xf5, 0xfa, 0x22, 0x7a, 0x5d, 0x3b, 0x31, 0x50, 0x65, 0x6e, 0xd6, 0x5e, 0xb6, + 0x9e, 0x7e, 0x3d, 0xa7, 0xa7, 0xef, 0x2f, 0x3e, 0x42, 0x73, 0xaf, 0x9f, 0x53, 0xd4, 0xbf, 0x0c, + 0x54, 0x7b, 0x71, 0x8e, 0x2f, 0x41, 0x53, 0x07, 0xd3, 0x9a, 0xfa, 0xd9, 0x7f, 0x48, 0x70, 0x11, + 0x55, 0xfd, 0xc9, 0x40, 0xff, 0x3b, 0x63, 0x9d, 0xe1, 0x57, 0x51, 0x26, 0xf0, 0xfb, 0x7a, 0x2d, + 0xe7, 0xa3, 0xd0, 0xcc, 0x3c, 0x22, 0xbb, 0x24, 0xc6, 0x30, 0x45, 0x79, 0x91, 0x28, 0x83, 0x4e, + 0xff, 0xee, 0xe2, 0x6f, 0x9c, 0x95, 0x94, 0x46, 0x29, 0x0a, 0xcd, 0xfc, 0x08, 0x1d, 0xf1, 0xe2, + 0x3a, 0x2a, 0x38, 0xb4, 0x11, 0xb8, 0x2d, 0xad, 0x69, 0x2b, 0x8d, 0x95, 0xb8, 0x5c, 0xdb, 0x5b, + 0x09, 0x46, 0x52, 0x6b, 0xe3, 0xf6, 0xf1, 0x49, 0x65, 0xe9, 0xd9, 0x49, 0x65, 0xe9, 0xf9, 0x49, + 0x65, 0xe9, 0xbb, 0xa8, 0x62, 0x1c, 0x47, 0x15, 0xe3, 0x59, 0x54, 0x31, 0x9e, 0x47, 0x15, 0xe3, + 0xb7, 0xa8, 0x62, 0x7c, 0xff, 0x7b, 0x65, 0xe9, 0x8b, 0xbc, 0x8e, 0xff, 0x77, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xae, 0x27, 0x2b, 0xcb, 0x2c, 0x0f, 0x00, 0x00, +} + func (m *MutatingWebhook) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -135,105 +426,117 @@ func (m *MutatingWebhook) Marshal() (dAtA []byte, err error) { } func (m *MutatingWebhook) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MutatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ClientConfig.Size())) - n1, err := m.ClientConfig.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.ObjectSelector != nil { + { + size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x5a } - if m.FailurePolicy != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) - i += copy(dAtA[i:], *m.FailurePolicy) + if m.ReinvocationPolicy != nil { + i -= len(*m.ReinvocationPolicy) + copy(dAtA[i:], *m.ReinvocationPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ReinvocationPolicy))) + i-- + dAtA[i] = 0x52 } - if m.NamespaceSelector != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NamespaceSelector.Size())) - n2, err := m.NamespaceSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 + if m.MatchPolicy != nil { + i -= len(*m.MatchPolicy) + copy(dAtA[i:], *m.MatchPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy))) + i-- + dAtA[i] = 0x4a } - if m.SideEffects != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects))) - i += copy(dAtA[i:], *m.SideEffects) + if len(m.AdmissionReviewVersions) > 0 { + for iNdEx := len(m.AdmissionReviewVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AdmissionReviewVersions[iNdEx]) + copy(dAtA[i:], m.AdmissionReviewVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdmissionReviewVersions[iNdEx]))) + i-- + dAtA[i] = 0x42 + } } if m.TimeoutSeconds != nil { - dAtA[i] = 0x38 - i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + i-- + dAtA[i] = 0x38 } - if len(m.AdmissionReviewVersions) > 0 { - for _, s := range m.AdmissionReviewVersions { - dAtA[i] = 0x42 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if m.SideEffects != nil { + i -= len(*m.SideEffects) + copy(dAtA[i:], *m.SideEffects) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects))) + i-- + dAtA[i] = 0x32 + } + if m.NamespaceSelector != nil { + { + size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x2a } - if m.MatchPolicy != nil { - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy))) - i += copy(dAtA[i:], *m.MatchPolicy) + if m.FailurePolicy != nil { + i -= len(*m.FailurePolicy) + copy(dAtA[i:], *m.FailurePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) + i-- + dAtA[i] = 0x22 } - if m.ReinvocationPolicy != nil { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ReinvocationPolicy))) - i += copy(dAtA[i:], *m.ReinvocationPolicy) + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } } - if m.ObjectSelector != nil { - dAtA[i] = 0x5a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectSelector.Size())) - n3, err := m.ObjectSelector.MarshalTo(dAtA[i:]) + { + size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n3 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *MutatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -241,37 +544,46 @@ func (m *MutatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { } func (m *MutatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MutatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 if len(m.Webhooks) > 0 { - for _, msg := range m.Webhooks { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Webhooks) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Webhooks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *MutatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -279,37 +591,46 @@ func (m *MutatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { } func (m *MutatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MutatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n5, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Rule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -317,68 +638,56 @@ func (m *Rule) Marshal() (dAtA []byte, err error) { } func (m *Rule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Rule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.APIGroups) > 0 { - for _, s := range m.APIGroups { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if m.Scope != nil { + i -= len(*m.Scope) + copy(dAtA[i:], *m.Scope) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Scope))) + i-- + dAtA[i] = 0x22 + } + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Resources[iNdEx]) + copy(dAtA[i:], m.Resources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) + i-- + dAtA[i] = 0x1a } } if len(m.APIVersions) > 0 { - for _, s := range m.APIVersions { + for iNdEx := len(m.APIVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIVersions[iNdEx]) + copy(dAtA[i:], m.APIVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersions[iNdEx]))) + i-- dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - if len(m.Resources) > 0 { - for _, s := range m.Resources { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.APIGroups) > 0 { + for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIGroups[iNdEx]) + copy(dAtA[i:], m.APIGroups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) + i-- + dAtA[i] = 0xa } } - if m.Scope != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Scope))) - i += copy(dAtA[i:], *m.Scope) - } - return i, nil + return len(dAtA) - i, nil } func (m *RuleWithOperations) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -386,40 +695,41 @@ func (m *RuleWithOperations) Marshal() (dAtA []byte, err error) { } func (m *RuleWithOperations) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuleWithOperations) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Operations) > 0 { - for _, s := range m.Operations { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + { + size, err := m.Rule.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Rule.Size())) - n6, err := m.Rule.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Operations) > 0 { + for iNdEx := len(m.Operations) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Operations[iNdEx]) + copy(dAtA[i:], m.Operations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operations[iNdEx]))) + i-- + dAtA[i] = 0xa + } } - i += n6 - return i, nil + return len(dAtA) - i, nil } func (m *ServiceReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -427,36 +737,44 @@ func (m *ServiceReference) Marshal() (dAtA []byte, err error) { } func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - if m.Path != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path))) - i += copy(dAtA[i:], *m.Path) - } if m.Port != nil { - dAtA[i] = 0x20 - i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.Port)) + i-- + dAtA[i] = 0x20 + } + if m.Path != nil { + i -= len(*m.Path) + copy(dAtA[i:], *m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path))) + i-- + dAtA[i] = 0x1a } - return i, nil + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ValidatingWebhook) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -464,99 +782,110 @@ func (m *ValidatingWebhook) Marshal() (dAtA []byte, err error) { } func (m *ValidatingWebhook) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ClientConfig.Size())) - n7, err := m.ClientConfig.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.ObjectSelector != nil { + { + size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x52 } - if m.FailurePolicy != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) - i += copy(dAtA[i:], *m.FailurePolicy) + if m.MatchPolicy != nil { + i -= len(*m.MatchPolicy) + copy(dAtA[i:], *m.MatchPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy))) + i-- + dAtA[i] = 0x4a } - if m.NamespaceSelector != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NamespaceSelector.Size())) - n8, err := m.NamespaceSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.AdmissionReviewVersions) > 0 { + for iNdEx := len(m.AdmissionReviewVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AdmissionReviewVersions[iNdEx]) + copy(dAtA[i:], m.AdmissionReviewVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdmissionReviewVersions[iNdEx]))) + i-- + dAtA[i] = 0x42 } - i += n8 - } - if m.SideEffects != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects))) - i += copy(dAtA[i:], *m.SideEffects) } if m.TimeoutSeconds != nil { - dAtA[i] = 0x38 - i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + i-- + dAtA[i] = 0x38 } - if len(m.AdmissionReviewVersions) > 0 { - for _, s := range m.AdmissionReviewVersions { - dAtA[i] = 0x42 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if m.SideEffects != nil { + i -= len(*m.SideEffects) + copy(dAtA[i:], *m.SideEffects) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects))) + i-- + dAtA[i] = 0x32 + } + if m.NamespaceSelector != nil { + { + size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x2a } - if m.MatchPolicy != nil { - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy))) - i += copy(dAtA[i:], *m.MatchPolicy) + if m.FailurePolicy != nil { + i -= len(*m.FailurePolicy) + copy(dAtA[i:], *m.FailurePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) + i-- + dAtA[i] = 0x22 } - if m.ObjectSelector != nil { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectSelector.Size())) - n9, err := m.ObjectSelector.MarshalTo(dAtA[i:]) + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + { + size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n9 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ValidatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -564,37 +893,46 @@ func (m *ValidatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { } func (m *ValidatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n10, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 if len(m.Webhooks) > 0 { - for _, msg := range m.Webhooks { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Webhooks) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Webhooks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ValidatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -602,37 +940,46 @@ func (m *ValidatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) } func (m *ValidatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n11, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -640,45 +987,59 @@ func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { } func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WebhookClientConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Service != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Service.Size())) - n12, err := m.Service.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n12 + if m.URL != nil { + i -= len(*m.URL) + copy(dAtA[i:], *m.URL) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) + i-- + dAtA[i] = 0x1a } if m.CABundle != nil { - dAtA[i] = 0x12 - i++ + i -= len(m.CABundle) + copy(dAtA[i:], m.CABundle) i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) - i += copy(dAtA[i:], m.CABundle) + i-- + dAtA[i] = 0x12 } - if m.URL != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) - i += copy(dAtA[i:], *m.URL) + if m.Service != nil { + { + size, err := m.Service.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *MutatingWebhook) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -728,6 +1089,9 @@ func (m *MutatingWebhook) Size() (n int) { } func (m *MutatingWebhookConfiguration) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -742,6 +1106,9 @@ func (m *MutatingWebhookConfiguration) Size() (n int) { } func (m *MutatingWebhookConfigurationList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -756,6 +1123,9 @@ func (m *MutatingWebhookConfigurationList) Size() (n int) { } func (m *Rule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.APIGroups) > 0 { @@ -784,6 +1154,9 @@ func (m *Rule) Size() (n int) { } func (m *RuleWithOperations) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Operations) > 0 { @@ -798,6 +1171,9 @@ func (m *RuleWithOperations) Size() (n int) { } func (m *ServiceReference) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Namespace) @@ -815,6 +1191,9 @@ func (m *ServiceReference) Size() (n int) { } func (m *ValidatingWebhook) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -860,6 +1239,9 @@ func (m *ValidatingWebhook) Size() (n int) { } func (m *ValidatingWebhookConfiguration) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -874,6 +1256,9 @@ func (m *ValidatingWebhookConfiguration) Size() (n int) { } func (m *ValidatingWebhookConfigurationList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -888,6 +1273,9 @@ func (m *ValidatingWebhookConfigurationList) Size() (n int) { } func (m *WebhookClientConfig) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Service != nil { @@ -906,14 +1294,7 @@ func (m *WebhookClientConfig) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -922,18 +1303,23 @@ func (this *MutatingWebhook) String() string { if this == nil { return "nil" } + repeatedStringForRules := "[]RuleWithOperations{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" s := strings.Join([]string{`&MutatingWebhook{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, - `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, - `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, `ReinvocationPolicy:` + valueToStringGenerated(this.ReinvocationPolicy) + `,`, - `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, `}`, }, "") return s @@ -942,9 +1328,14 @@ func (this *MutatingWebhookConfiguration) String() string { if this == nil { return "nil" } + repeatedStringForWebhooks := "[]MutatingWebhook{" + for _, f := range this.Webhooks { + repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "MutatingWebhook", "MutatingWebhook", 1), `&`, ``, 1) + "," + } + repeatedStringForWebhooks += "}" s := strings.Join([]string{`&MutatingWebhookConfiguration{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Webhooks:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Webhooks), "MutatingWebhook", "MutatingWebhook", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Webhooks:` + repeatedStringForWebhooks + `,`, `}`, }, "") return s @@ -953,9 +1344,14 @@ func (this *MutatingWebhookConfigurationList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]MutatingWebhookConfiguration{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "MutatingWebhookConfiguration", "MutatingWebhookConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&MutatingWebhookConfigurationList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "MutatingWebhookConfiguration", "MutatingWebhookConfiguration", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1001,17 +1397,22 @@ func (this *ValidatingWebhook) String() string { if this == nil { return "nil" } + repeatedStringForRules := "[]RuleWithOperations{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" s := strings.Join([]string{`&ValidatingWebhook{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, - `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, - `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, - `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, `}`, }, "") return s @@ -1020,9 +1421,14 @@ func (this *ValidatingWebhookConfiguration) String() string { if this == nil { return "nil" } + repeatedStringForWebhooks := "[]ValidatingWebhook{" + for _, f := range this.Webhooks { + repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "ValidatingWebhook", "ValidatingWebhook", 1), `&`, ``, 1) + "," + } + repeatedStringForWebhooks += "}" s := strings.Join([]string{`&ValidatingWebhookConfiguration{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Webhooks:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Webhooks), "ValidatingWebhook", "ValidatingWebhook", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Webhooks:` + repeatedStringForWebhooks + `,`, `}`, }, "") return s @@ -1031,9 +1437,14 @@ func (this *ValidatingWebhookConfigurationList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]ValidatingWebhookConfiguration{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingWebhookConfiguration", "ValidatingWebhookConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&ValidatingWebhookConfigurationList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ValidatingWebhookConfiguration", "ValidatingWebhookConfiguration", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1043,7 +1454,7 @@ func (this *WebhookClientConfig) String() string { return "nil" } s := strings.Join([]string{`&WebhookClientConfig{`, - `Service:` + strings.Replace(fmt.Sprintf("%v", this.Service), "ServiceReference", "ServiceReference", 1) + `,`, + `Service:` + strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1) + `,`, `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, `URL:` + valueToStringGenerated(this.URL) + `,`, `}`, @@ -1073,7 +1484,7 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1101,7 +1512,7 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1111,6 +1522,9 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1130,7 +1544,7 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1139,6 +1553,9 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1160,7 +1577,7 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1169,6 +1586,9 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1191,7 +1611,7 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1201,6 +1621,9 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1221,7 +1644,7 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1230,11 +1653,14 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.NamespaceSelector == nil { - m.NamespaceSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.NamespaceSelector = &v1.LabelSelector{} } if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1254,7 +1680,7 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1264,6 +1690,9 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1284,7 +1713,7 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -1304,7 +1733,7 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1314,6 +1743,9 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1333,7 +1765,7 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1343,6 +1775,9 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1363,7 +1798,7 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1373,6 +1808,9 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1393,7 +1831,7 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1402,11 +1840,14 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.ObjectSelector == nil { - m.ObjectSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.ObjectSelector = &v1.LabelSelector{} } if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1421,6 +1862,9 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1448,7 +1892,7 @@ func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1476,7 +1920,7 @@ func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1485,6 +1929,9 @@ func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1506,7 +1953,7 @@ func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1515,6 +1962,9 @@ func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1532,6 +1982,9 @@ func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1559,7 +2012,7 @@ func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1587,7 +2040,7 @@ func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1596,6 +2049,9 @@ func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1617,7 +2073,7 @@ func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1626,6 +2082,9 @@ func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1643,6 +2102,9 @@ func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1670,7 +2132,7 @@ func (m *Rule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1698,7 +2160,7 @@ func (m *Rule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1708,6 +2170,9 @@ func (m *Rule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1727,7 +2192,7 @@ func (m *Rule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1737,6 +2202,9 @@ func (m *Rule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1756,7 +2224,7 @@ func (m *Rule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1766,6 +2234,9 @@ func (m *Rule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1785,7 +2256,7 @@ func (m *Rule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1795,6 +2266,9 @@ func (m *Rule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1810,6 +2284,9 @@ func (m *Rule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1837,7 +2314,7 @@ func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1865,7 +2342,7 @@ func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1875,6 +2352,9 @@ func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1894,7 +2374,7 @@ func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1903,6 +2383,9 @@ func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1919,6 +2402,9 @@ func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1946,7 +2432,7 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1974,7 +2460,7 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1984,6 +2470,9 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2003,7 +2492,7 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2013,6 +2502,9 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2032,7 +2524,7 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2042,6 +2534,9 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2062,7 +2557,7 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2077,6 +2572,9 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2104,7 +2602,7 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2132,7 +2630,7 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2142,6 +2640,9 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2161,7 +2662,7 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2170,6 +2671,9 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2191,7 +2695,7 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2200,6 +2704,9 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2222,7 +2729,7 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2232,6 +2739,9 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2252,7 +2762,7 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2261,11 +2771,14 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.NamespaceSelector == nil { - m.NamespaceSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.NamespaceSelector = &v1.LabelSelector{} } if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2285,7 +2798,7 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2295,6 +2808,9 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2315,7 +2831,7 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2335,7 +2851,7 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2345,6 +2861,9 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2364,7 +2883,7 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2374,6 +2893,9 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2394,7 +2916,7 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2403,11 +2925,14 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.ObjectSelector == nil { - m.ObjectSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.ObjectSelector = &v1.LabelSelector{} } if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2422,6 +2947,9 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2449,7 +2977,7 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2477,7 +3005,7 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2486,6 +3014,9 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2507,7 +3038,7 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2516,6 +3047,9 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2533,6 +3067,9 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2560,7 +3097,7 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2588,7 +3125,7 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2597,6 +3134,9 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2618,7 +3158,7 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2627,6 +3167,9 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2644,6 +3187,9 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2671,7 +3217,7 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2699,7 +3245,7 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2708,6 +3254,9 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2732,7 +3281,7 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2741,6 +3290,9 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2763,7 +3315,7 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2773,6 +3325,9 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2788,6 +3343,9 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2854,10 +3412,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -2886,6 +3447,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -2904,81 +3468,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 1113 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0x4d, 0x6f, 0x1b, 0xc5, - 0x1b, 0xcf, 0xc6, 0x76, 0x6d, 0x8f, 0x93, 0xa6, 0x99, 0xff, 0x9f, 0xd6, 0x84, 0xca, 0x6b, 0xf9, - 0x80, 0x2c, 0x41, 0x77, 0x9b, 0x80, 0x10, 0x14, 0x10, 0xca, 0x06, 0x0a, 0x91, 0x92, 0x36, 0x4c, - 0xfa, 0x22, 0xf1, 0x22, 0x75, 0xbc, 0x1e, 0xdb, 0x83, 0xed, 0x9d, 0xd5, 0xce, 0xac, 0x43, 0x6e, - 0x7c, 0x04, 0xbe, 0x02, 0x27, 0x3e, 0x05, 0x07, 0x6e, 0xe1, 0xd6, 0x63, 0x2f, 0xac, 0xc8, 0x72, - 0xe2, 0xc0, 0x81, 0x6b, 0x4e, 0x68, 0x66, 0xc7, 0xeb, 0x97, 0x4d, 0x8a, 0x29, 0xa2, 0x17, 0x7a, - 0xdb, 0xf9, 0x3d, 0xf3, 0xfc, 0x9e, 0x97, 0xd9, 0xe7, 0xf9, 0x81, 0x4f, 0xfb, 0x6f, 0x73, 0x8b, - 0x32, 0xbb, 0x1f, 0xb6, 0x48, 0xe0, 0x11, 0x41, 0xb8, 0x3d, 0x22, 0x5e, 0x9b, 0x05, 0xb6, 0x36, - 0x60, 0x9f, 0xda, 0xb8, 0x3d, 0xa4, 0x9c, 0x53, 0xe6, 0x05, 0xa4, 0x4b, 0xb9, 0x08, 0xb0, 0xa0, - 0xcc, 0xb3, 0x47, 0x9b, 0x2d, 0x22, 0xf0, 0xa6, 0xdd, 0x25, 0x1e, 0x09, 0xb0, 0x20, 0x6d, 0xcb, - 0x0f, 0x98, 0x60, 0xb0, 0x99, 0x78, 0x5a, 0xd8, 0xa7, 0xd6, 0xb9, 0x9e, 0x96, 0xf6, 0xdc, 0xb8, - 0xd1, 0xa5, 0xa2, 0x17, 0xb6, 0x2c, 0x97, 0x0d, 0xed, 0x2e, 0xeb, 0x32, 0x5b, 0x11, 0xb4, 0xc2, - 0x8e, 0x3a, 0xa9, 0x83, 0xfa, 0x4a, 0x88, 0x37, 0xde, 0x9c, 0xa4, 0x34, 0xc4, 0x6e, 0x8f, 0x7a, - 0x24, 0x38, 0xb6, 0xfd, 0x7e, 0x57, 0x02, 0xdc, 0x1e, 0x12, 0x81, 0xed, 0x51, 0x26, 0x9d, 0x0d, - 0xfb, 0x22, 0xaf, 0x20, 0xf4, 0x04, 0x1d, 0x92, 0x8c, 0xc3, 0x5b, 0x7f, 0xe5, 0xc0, 0xdd, 0x1e, - 0x19, 0xe2, 0x79, 0xbf, 0xc6, 0x4f, 0x45, 0xb0, 0xb6, 0x1f, 0x0a, 0x2c, 0xa8, 0xd7, 0x7d, 0x48, - 0x5a, 0x3d, 0xc6, 0xfa, 0xb0, 0x0e, 0xf2, 0x1e, 0x1e, 0x92, 0xaa, 0x51, 0x37, 0x9a, 0x65, 0x67, - 0xe5, 0x24, 0x32, 0x97, 0xe2, 0xc8, 0xcc, 0xdf, 0xc1, 0x43, 0x82, 0x94, 0x05, 0x1e, 0x81, 0x15, - 0x77, 0x40, 0x89, 0x27, 0x76, 0x98, 0xd7, 0xa1, 0xdd, 0xea, 0x72, 0xdd, 0x68, 0x56, 0xb6, 0xde, - 0xb7, 0x16, 0x6d, 0xa2, 0xa5, 0x43, 0xed, 0x4c, 0x91, 0x38, 0xff, 0xd7, 0x81, 0x56, 0xa6, 0x51, - 0x34, 0x13, 0x08, 0x62, 0x50, 0x08, 0xc2, 0x01, 0xe1, 0xd5, 0x5c, 0x3d, 0xd7, 0xac, 0x6c, 0xbd, - 0xb7, 0x78, 0x44, 0x14, 0x0e, 0xc8, 0x43, 0x2a, 0x7a, 0x77, 0x7d, 0x92, 0x58, 0xb8, 0xb3, 0xaa, - 0x03, 0x16, 0xa4, 0x8d, 0xa3, 0x84, 0x19, 0xee, 0x81, 0xd5, 0x0e, 0xa6, 0x83, 0x30, 0x20, 0x07, - 0x6c, 0x40, 0xdd, 0xe3, 0x6a, 0x5e, 0xb5, 0xe1, 0xd5, 0x38, 0x32, 0x57, 0x6f, 0x4f, 0x1b, 0xce, - 0x22, 0x73, 0x7d, 0x06, 0xb8, 0x77, 0xec, 0x13, 0x34, 0xeb, 0x0c, 0xbf, 0x06, 0xeb, 0xb2, 0x63, - 0xdc, 0xc7, 0x2e, 0x39, 0x24, 0x03, 0xe2, 0x0a, 0x16, 0x54, 0x0b, 0xaa, 0x5d, 0x6f, 0x4c, 0x25, - 0x9f, 0xbe, 0x99, 0xe5, 0xf7, 0xbb, 0x12, 0xe0, 0x96, 0xfc, 0x35, 0xac, 0xd1, 0xa6, 0xb5, 0x87, - 0x5b, 0x64, 0x30, 0x76, 0x75, 0x5e, 0x8a, 0x23, 0x73, 0xfd, 0xce, 0x3c, 0x23, 0xca, 0x06, 0x81, - 0x1f, 0x82, 0x0a, 0xa7, 0x6d, 0xf2, 0x51, 0xa7, 0x43, 0x5c, 0xc1, 0xab, 0x97, 0x54, 0x15, 0x8d, - 0x38, 0x32, 0x2b, 0x87, 0x13, 0xf8, 0x2c, 0x32, 0xd7, 0x26, 0xc7, 0x9d, 0x01, 0xe6, 0x1c, 0x4d, - 0xbb, 0xc1, 0x5b, 0xe0, 0xb2, 0xfc, 0x7d, 0x58, 0x28, 0x0e, 0x89, 0xcb, 0xbc, 0x36, 0xaf, 0x16, - 0xeb, 0x46, 0xb3, 0xe0, 0xc0, 0x38, 0x32, 0x2f, 0xdf, 0x9b, 0xb1, 0xa0, 0xb9, 0x9b, 0xf0, 0x3e, - 0xb8, 0x96, 0xbe, 0x09, 0x22, 0x23, 0x4a, 0x8e, 0x1e, 0x90, 0x40, 0x1e, 0x78, 0xb5, 0x54, 0xcf, - 0x35, 0xcb, 0xce, 0x2b, 0x71, 0x64, 0x5e, 0xdb, 0x3e, 0xff, 0x0a, 0xba, 0xc8, 0x57, 0x16, 0x36, - 0xc4, 0xc2, 0xed, 0xe9, 0xe7, 0x29, 0x4f, 0x0a, 0xdb, 0x9f, 0xc0, 0xb2, 0xb0, 0xa9, 0xa3, 0x7a, - 0x9a, 0x69, 0x37, 0xf8, 0x08, 0xc0, 0x80, 0x50, 0x6f, 0xc4, 0x5c, 0xf5, 0x37, 0x68, 0x32, 0xa0, - 0xc8, 0x6e, 0xc6, 0x91, 0x09, 0x51, 0xc6, 0x7a, 0x16, 0x99, 0x57, 0xb3, 0xa8, 0xa2, 0x3e, 0x87, - 0x0b, 0x32, 0x70, 0x99, 0xb5, 0xbe, 0x22, 0xae, 0x48, 0xdf, 0xbd, 0xf2, 0xec, 0xef, 0xae, 0xfa, - 0x7d, 0x77, 0x86, 0x0e, 0xcd, 0xd1, 0x37, 0x7e, 0x36, 0xc0, 0xf5, 0xb9, 0x59, 0x4e, 0xc6, 0x26, - 0x4c, 0xfe, 0x78, 0xf8, 0x08, 0x94, 0x24, 0x7b, 0x1b, 0x0b, 0xac, 0x86, 0xbb, 0xb2, 0x75, 0x73, - 0xb1, 0x5c, 0x92, 0xc0, 0xfb, 0x44, 0x60, 0x07, 0xea, 0xa1, 0x01, 0x13, 0x0c, 0xa5, 0xac, 0xf0, - 0x73, 0x50, 0xd2, 0x91, 0x79, 0x75, 0x59, 0x8d, 0xe8, 0x3b, 0x8b, 0x8f, 0xe8, 0x5c, 0xee, 0x4e, - 0x5e, 0x86, 0x42, 0xa5, 0x23, 0x4d, 0xd8, 0xf8, 0xdd, 0x00, 0xf5, 0xa7, 0xd5, 0xb7, 0x47, 0xb9, - 0x80, 0x5f, 0x64, 0x6a, 0xb4, 0x16, 0xec, 0x37, 0xe5, 0x49, 0x85, 0x57, 0x74, 0x85, 0xa5, 0x31, - 0x32, 0x55, 0x5f, 0x1f, 0x14, 0xa8, 0x20, 0xc3, 0x71, 0x71, 0xb7, 0x9f, 0xb9, 0xb8, 0x99, 0xc4, - 0x27, 0x9b, 0x68, 0x57, 0x92, 0xa3, 0x24, 0x46, 0xe3, 0x47, 0x03, 0xe4, 0xe5, 0x6a, 0x82, 0xaf, - 0x81, 0x32, 0xf6, 0xe9, 0xc7, 0x01, 0x0b, 0x7d, 0x5e, 0x35, 0xd4, 0xe8, 0xac, 0xc6, 0x91, 0x59, - 0xde, 0x3e, 0xd8, 0x4d, 0x40, 0x34, 0xb1, 0xc3, 0x4d, 0x50, 0xc1, 0x3e, 0x4d, 0x27, 0x6d, 0x59, - 0x5d, 0x5f, 0x93, 0xe3, 0xb1, 0x7d, 0xb0, 0x9b, 0x4e, 0xd7, 0xf4, 0x1d, 0xc9, 0x1f, 0x10, 0xce, - 0xc2, 0xc0, 0xd5, 0x9b, 0x55, 0xf3, 0xa3, 0x31, 0x88, 0x26, 0x76, 0xf8, 0x3a, 0x28, 0x70, 0x97, - 0xf9, 0x44, 0xef, 0xc5, 0xab, 0x32, 0xed, 0x43, 0x09, 0x9c, 0x45, 0x66, 0x59, 0x7d, 0xa8, 0x89, - 0x48, 0x2e, 0x35, 0xbe, 0x37, 0x00, 0xcc, 0xae, 0x5e, 0xf8, 0x01, 0x00, 0x2c, 0x3d, 0xe9, 0x92, - 0x4c, 0xf5, 0x57, 0xa5, 0xe8, 0x59, 0x64, 0xae, 0xa6, 0x27, 0x45, 0x39, 0xe5, 0x02, 0x0f, 0x40, - 0x5e, 0xae, 0x6b, 0xad, 0x3c, 0xd6, 0xdf, 0xd3, 0x81, 0x89, 0xa6, 0xc9, 0x13, 0x52, 0x4c, 0x8d, - 0xef, 0x0c, 0x70, 0xe5, 0x90, 0x04, 0x23, 0xea, 0x12, 0x44, 0x3a, 0x24, 0x20, 0x9e, 0x4b, 0xa0, - 0x0d, 0xca, 0xe9, 0x66, 0xd5, 0x7a, 0xb8, 0xae, 0x7d, 0xcb, 0xe9, 0x16, 0x46, 0x93, 0x3b, 0xa9, - 0x76, 0x2e, 0x5f, 0xa8, 0x9d, 0xd7, 0x41, 0xde, 0xc7, 0xa2, 0x57, 0xcd, 0xa9, 0x1b, 0x25, 0x69, - 0x3d, 0xc0, 0xa2, 0x87, 0x14, 0xaa, 0xac, 0x2c, 0x10, 0xaa, 0xb9, 0x05, 0x6d, 0x65, 0x81, 0x40, - 0x0a, 0x6d, 0xfc, 0x76, 0x09, 0xac, 0x3f, 0xc0, 0x03, 0xda, 0x7e, 0xa1, 0xd7, 0x2f, 0xf4, 0xfa, - 0xbf, 0xa5, 0xd7, 0x59, 0x35, 0x05, 0xff, 0xae, 0x9a, 0x9e, 0x1a, 0xa0, 0x96, 0x99, 0xb5, 0xe7, - 0xad, 0xa7, 0x5f, 0x66, 0xf4, 0xf4, 0xdd, 0xc5, 0x47, 0x28, 0x93, 0x7d, 0x46, 0x51, 0xff, 0x30, - 0x40, 0xe3, 0xe9, 0x35, 0x3e, 0x07, 0x4d, 0x1d, 0xce, 0x6a, 0xea, 0x27, 0xff, 0xa0, 0xc0, 0x45, - 0x54, 0xf5, 0x07, 0x03, 0xfc, 0xef, 0x9c, 0x75, 0x06, 0x31, 0x28, 0xf2, 0x64, 0xfd, 0xeb, 0x1a, - 0x6f, 0x2d, 0x9e, 0xc8, 0xbc, 0x6e, 0x38, 0x95, 0x38, 0x32, 0x8b, 0x63, 0x74, 0xcc, 0x0b, 0x9b, - 0xa0, 0xe4, 0x62, 0x27, 0xf4, 0xda, 0x5a, 0xb8, 0x56, 0x9c, 0x15, 0xd9, 0x93, 0x9d, 0xed, 0x04, - 0x43, 0xa9, 0x15, 0xbe, 0x0c, 0x72, 0x61, 0x30, 0xd0, 0x1a, 0x51, 0x8c, 0x23, 0x33, 0x77, 0x1f, - 0xed, 0x21, 0x89, 0x39, 0x37, 0x4e, 0x4e, 0x6b, 0x4b, 0x8f, 0x4f, 0x6b, 0x4b, 0x4f, 0x4e, 0x6b, - 0x4b, 0xdf, 0xc4, 0x35, 0xe3, 0x24, 0xae, 0x19, 0x8f, 0xe3, 0x9a, 0xf1, 0x24, 0xae, 0x19, 0xbf, - 0xc4, 0x35, 0xe3, 0xdb, 0x5f, 0x6b, 0x4b, 0x9f, 0x15, 0x75, 0x6a, 0x7f, 0x06, 0x00, 0x00, 0xff, - 0xff, 0xc3, 0x6f, 0x8b, 0x7e, 0x2c, 0x0f, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto index c133192647d8c..17de1a587aadc 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto @@ -179,8 +179,9 @@ message MutatingWebhook { } // MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object. +// Deprecated in v1.16, planned for removal in v1.19. Use admissionregistration.k8s.io/v1 MutatingWebhookConfiguration instead. message MutatingWebhookConfiguration { - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -194,7 +195,7 @@ message MutatingWebhookConfiguration { // MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration. message MutatingWebhookConfigurationList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -414,8 +415,9 @@ message ValidatingWebhook { } // ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. +// Deprecated in v1.16, planned for removal in v1.19. Use admissionregistration.k8s.io/v1 ValidatingWebhookConfiguration instead. message ValidatingWebhookConfiguration { - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -429,7 +431,7 @@ message ValidatingWebhookConfiguration { // ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration. message ValidatingWebhookConfigurationList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go index 6b8c5a23a724c..cf065a286c6fe 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go @@ -115,9 +115,10 @@ const ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. +// Deprecated in v1.16, planned for removal in v1.19. Use admissionregistration.k8s.io/v1 ValidatingWebhookConfiguration instead. type ValidatingWebhookConfiguration struct { metav1.TypeMeta `json:",inline"` - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Webhooks is a list of webhooks and the affected resources and operations. @@ -133,7 +134,7 @@ type ValidatingWebhookConfiguration struct { type ValidatingWebhookConfigurationList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of ValidatingWebhookConfiguration. @@ -145,9 +146,10 @@ type ValidatingWebhookConfigurationList struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object. +// Deprecated in v1.16, planned for removal in v1.19. Use admissionregistration.k8s.io/v1 MutatingWebhookConfiguration instead. type MutatingWebhookConfiguration struct { metav1.TypeMeta `json:",inline"` - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Webhooks is a list of webhooks and the affected resources and operations. @@ -163,7 +165,7 @@ type MutatingWebhookConfiguration struct { type MutatingWebhookConfigurationList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of MutatingWebhookConfiguration. diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go index 39e86db9769fd..f40a15d50cb4f 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go @@ -47,8 +47,8 @@ func (MutatingWebhook) SwaggerDoc() map[string]string { } var map_MutatingWebhookConfiguration = map[string]string{ - "": "MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object.", - "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", + "": "MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object. Deprecated in v1.16, planned for removal in v1.19. Use admissionregistration.k8s.io/v1 MutatingWebhookConfiguration instead.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", "webhooks": "Webhooks is a list of webhooks and the affected resources and operations.", } @@ -58,7 +58,7 @@ func (MutatingWebhookConfiguration) SwaggerDoc() map[string]string { var map_MutatingWebhookConfigurationList = map[string]string{ "": "MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of MutatingWebhookConfiguration.", } @@ -118,8 +118,8 @@ func (ValidatingWebhook) SwaggerDoc() map[string]string { } var map_ValidatingWebhookConfiguration = map[string]string{ - "": "ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it.", - "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", + "": "ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. Deprecated in v1.16, planned for removal in v1.19. Use admissionregistration.k8s.io/v1 ValidatingWebhookConfiguration instead.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", "webhooks": "Webhooks is a list of webhooks and the affected resources and operations.", } @@ -129,7 +129,7 @@ func (ValidatingWebhookConfiguration) SwaggerDoc() map[string]string { var map_ValidatingWebhookConfigurationList = map[string]string{ "": "ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of ValidatingWebhookConfiguration.", } diff --git a/vendor/k8s.io/api/apps/v1/generated.pb.go b/vendor/k8s.io/api/apps/v1/generated.pb.go index 89bc75f4d4892..425144d857bfe 100644 --- a/vendor/k8s.io/api/apps/v1/generated.pb.go +++ b/vendor/k8s.io/api/apps/v1/generated.pb.go @@ -17,62 +17,24 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/apps/v1/generated.proto -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/apps/v1/generated.proto - - It has these top-level messages: - ControllerRevision - ControllerRevisionList - DaemonSet - DaemonSetCondition - DaemonSetList - DaemonSetSpec - DaemonSetStatus - DaemonSetUpdateStrategy - Deployment - DeploymentCondition - DeploymentList - DeploymentSpec - DeploymentStatus - DeploymentStrategy - ReplicaSet - ReplicaSetCondition - ReplicaSetList - ReplicaSetSpec - ReplicaSetStatus - RollingUpdateDaemonSet - RollingUpdateDeployment - RollingUpdateStatefulSetStrategy - StatefulSet - StatefulSetCondition - StatefulSetList - StatefulSetSpec - StatefulSetStatus - StatefulSetUpdateStrategy -*/ package v1 import ( fmt "fmt" - proto "github.com/gogo/protobuf/proto" - - math "math" + io "io" + proto "github.com/gogo/protobuf/proto" k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" - - strings "strings" - + math "math" + math_bits "math/bits" reflect "reflect" + strings "strings" - io "io" + intstr "k8s.io/apimachinery/pkg/util/intstr" ) // Reference imports to suppress errors if they are not otherwise used. @@ -86,124 +48,790 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *ControllerRevision) Reset() { *m = ControllerRevision{} } -func (*ControllerRevision) ProtoMessage() {} -func (*ControllerRevision) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *ControllerRevision) Reset() { *m = ControllerRevision{} } +func (*ControllerRevision) ProtoMessage() {} +func (*ControllerRevision) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{0} +} +func (m *ControllerRevision) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ControllerRevision) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ControllerRevision) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerRevision.Merge(m, src) +} +func (m *ControllerRevision) XXX_Size() int { + return m.Size() +} +func (m *ControllerRevision) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerRevision.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerRevision proto.InternalMessageInfo + +func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} } +func (*ControllerRevisionList) ProtoMessage() {} +func (*ControllerRevisionList) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{1} +} +func (m *ControllerRevisionList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ControllerRevisionList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ControllerRevisionList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerRevisionList.Merge(m, src) +} +func (m *ControllerRevisionList) XXX_Size() int { + return m.Size() +} +func (m *ControllerRevisionList) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerRevisionList.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerRevisionList proto.InternalMessageInfo + +func (m *DaemonSet) Reset() { *m = DaemonSet{} } +func (*DaemonSet) ProtoMessage() {} +func (*DaemonSet) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{2} +} +func (m *DaemonSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSet.Merge(m, src) +} +func (m *DaemonSet) XXX_Size() int { + return m.Size() +} +func (m *DaemonSet) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSet.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSet proto.InternalMessageInfo + +func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } +func (*DaemonSetCondition) ProtoMessage() {} +func (*DaemonSetCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{3} +} +func (m *DaemonSetCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetCondition.Merge(m, src) +} +func (m *DaemonSetCondition) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetCondition) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSetCondition proto.InternalMessageInfo + +func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } +func (*DaemonSetList) ProtoMessage() {} +func (*DaemonSetList) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{4} +} +func (m *DaemonSetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetList.Merge(m, src) +} +func (m *DaemonSetList) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetList) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetList.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSetList proto.InternalMessageInfo + +func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } +func (*DaemonSetSpec) ProtoMessage() {} +func (*DaemonSetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{5} +} +func (m *DaemonSetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetSpec.Merge(m, src) +} +func (m *DaemonSetSpec) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSetSpec proto.InternalMessageInfo + +func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } +func (*DaemonSetStatus) ProtoMessage() {} +func (*DaemonSetStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{6} +} +func (m *DaemonSetStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetStatus.Merge(m, src) +} +func (m *DaemonSetStatus) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetStatus) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSetStatus proto.InternalMessageInfo + +func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } +func (*DaemonSetUpdateStrategy) ProtoMessage() {} +func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{7} +} +func (m *DaemonSetUpdateStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetUpdateStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetUpdateStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetUpdateStrategy.Merge(m, src) +} +func (m *DaemonSetUpdateStrategy) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetUpdateStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetUpdateStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSetUpdateStrategy proto.InternalMessageInfo + +func (m *Deployment) Reset() { *m = Deployment{} } +func (*Deployment) ProtoMessage() {} +func (*Deployment) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{8} +} +func (m *Deployment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Deployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Deployment) XXX_Merge(src proto.Message) { + xxx_messageInfo_Deployment.Merge(m, src) +} +func (m *Deployment) XXX_Size() int { + return m.Size() +} +func (m *Deployment) XXX_DiscardUnknown() { + xxx_messageInfo_Deployment.DiscardUnknown(m) +} + +var xxx_messageInfo_Deployment proto.InternalMessageInfo + +func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } +func (*DeploymentCondition) ProtoMessage() {} +func (*DeploymentCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{9} +} +func (m *DeploymentCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentCondition.Merge(m, src) +} +func (m *DeploymentCondition) XXX_Size() int { + return m.Size() +} +func (m *DeploymentCondition) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentCondition proto.InternalMessageInfo + +func (m *DeploymentList) Reset() { *m = DeploymentList{} } +func (*DeploymentList) ProtoMessage() {} +func (*DeploymentList) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{10} +} +func (m *DeploymentList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentList) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentList.Merge(m, src) +} +func (m *DeploymentList) XXX_Size() int { + return m.Size() +} +func (m *DeploymentList) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentList.DiscardUnknown(m) +} -func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} } -func (*ControllerRevisionList) ProtoMessage() {} -func (*ControllerRevisionList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_DeploymentList proto.InternalMessageInfo -func (m *DaemonSet) Reset() { *m = DaemonSet{} } -func (*DaemonSet) ProtoMessage() {} -func (*DaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } +func (*DeploymentSpec) ProtoMessage() {} +func (*DeploymentSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{11} +} +func (m *DeploymentSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentSpec.Merge(m, src) +} +func (m *DeploymentSpec) XXX_Size() int { + return m.Size() +} +func (m *DeploymentSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentSpec.DiscardUnknown(m) +} -func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } -func (*DaemonSetCondition) ProtoMessage() {} -func (*DaemonSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_DeploymentSpec proto.InternalMessageInfo -func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } -func (*DaemonSetList) ProtoMessage() {} -func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } +func (*DeploymentStatus) ProtoMessage() {} +func (*DeploymentStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{12} +} +func (m *DeploymentStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentStatus.Merge(m, src) +} +func (m *DeploymentStatus) XXX_Size() int { + return m.Size() +} +func (m *DeploymentStatus) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentStatus.DiscardUnknown(m) +} -func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } -func (*DaemonSetSpec) ProtoMessage() {} -func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_DeploymentStatus proto.InternalMessageInfo -func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } -func (*DaemonSetStatus) ProtoMessage() {} -func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } +func (*DeploymentStrategy) ProtoMessage() {} +func (*DeploymentStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{13} +} +func (m *DeploymentStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentStrategy.Merge(m, src) +} +func (m *DeploymentStrategy) XXX_Size() int { + return m.Size() +} +func (m *DeploymentStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentStrategy.DiscardUnknown(m) +} -func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } -func (*DaemonSetUpdateStrategy) ProtoMessage() {} -func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_DeploymentStrategy proto.InternalMessageInfo -func (m *Deployment) Reset() { *m = Deployment{} } -func (*Deployment) ProtoMessage() {} -func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } +func (*ReplicaSet) ProtoMessage() {} +func (*ReplicaSet) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{14} +} +func (m *ReplicaSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSet.Merge(m, src) +} +func (m *ReplicaSet) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSet) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSet.DiscardUnknown(m) +} -func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } -func (*DeploymentCondition) ProtoMessage() {} -func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +var xxx_messageInfo_ReplicaSet proto.InternalMessageInfo -func (m *DeploymentList) Reset() { *m = DeploymentList{} } -func (*DeploymentList) ProtoMessage() {} -func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } +func (*ReplicaSetCondition) ProtoMessage() {} +func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{15} +} +func (m *ReplicaSetCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetCondition.Merge(m, src) +} +func (m *ReplicaSetCondition) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetCondition) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetCondition.DiscardUnknown(m) +} -func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } -func (*DeploymentSpec) ProtoMessage() {} -func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +var xxx_messageInfo_ReplicaSetCondition proto.InternalMessageInfo -func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } -func (*DeploymentStatus) ProtoMessage() {} -func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } +func (*ReplicaSetList) ProtoMessage() {} +func (*ReplicaSetList) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{16} +} +func (m *ReplicaSetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetList.Merge(m, src) +} +func (m *ReplicaSetList) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetList) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetList.DiscardUnknown(m) +} -func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } -func (*DeploymentStrategy) ProtoMessage() {} -func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +var xxx_messageInfo_ReplicaSetList proto.InternalMessageInfo -func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } -func (*ReplicaSet) ProtoMessage() {} -func (*ReplicaSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } +func (*ReplicaSetSpec) ProtoMessage() {} +func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{17} +} +func (m *ReplicaSetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetSpec.Merge(m, src) +} +func (m *ReplicaSetSpec) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetSpec.DiscardUnknown(m) +} -func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } -func (*ReplicaSetCondition) ProtoMessage() {} -func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +var xxx_messageInfo_ReplicaSetSpec proto.InternalMessageInfo -func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } -func (*ReplicaSetList) ProtoMessage() {} -func (*ReplicaSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } +func (*ReplicaSetStatus) ProtoMessage() {} +func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{18} +} +func (m *ReplicaSetStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetStatus.Merge(m, src) +} +func (m *ReplicaSetStatus) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetStatus.DiscardUnknown(m) +} -func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } -func (*ReplicaSetSpec) ProtoMessage() {} -func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +var xxx_messageInfo_ReplicaSetStatus proto.InternalMessageInfo -func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } -func (*ReplicaSetStatus) ProtoMessage() {} -func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } +func (*RollingUpdateDaemonSet) ProtoMessage() {} +func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{19} +} +func (m *RollingUpdateDaemonSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollingUpdateDaemonSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollingUpdateDaemonSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollingUpdateDaemonSet.Merge(m, src) +} +func (m *RollingUpdateDaemonSet) XXX_Size() int { + return m.Size() +} +func (m *RollingUpdateDaemonSet) XXX_DiscardUnknown() { + xxx_messageInfo_RollingUpdateDaemonSet.DiscardUnknown(m) +} -func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } -func (*RollingUpdateDaemonSet) ProtoMessage() {} -func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } +var xxx_messageInfo_RollingUpdateDaemonSet proto.InternalMessageInfo func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } func (*RollingUpdateDeployment) ProtoMessage() {} func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{20} + return fileDescriptor_e1014cab6f31e43b, []int{20} +} +func (m *RollingUpdateDeployment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } +func (m *RollingUpdateDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollingUpdateDeployment) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollingUpdateDeployment.Merge(m, src) +} +func (m *RollingUpdateDeployment) XXX_Size() int { + return m.Size() +} +func (m *RollingUpdateDeployment) XXX_DiscardUnknown() { + xxx_messageInfo_RollingUpdateDeployment.DiscardUnknown(m) +} + +var xxx_messageInfo_RollingUpdateDeployment proto.InternalMessageInfo func (m *RollingUpdateStatefulSetStrategy) Reset() { *m = RollingUpdateStatefulSetStrategy{} } func (*RollingUpdateStatefulSetStrategy) ProtoMessage() {} func (*RollingUpdateStatefulSetStrategy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{21} + return fileDescriptor_e1014cab6f31e43b, []int{21} +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollingUpdateStatefulSetStrategy.Merge(m, src) +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Size() int { + return m.Size() +} +func (m *RollingUpdateStatefulSetStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_RollingUpdateStatefulSetStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_RollingUpdateStatefulSetStrategy proto.InternalMessageInfo + +func (m *StatefulSet) Reset() { *m = StatefulSet{} } +func (*StatefulSet) ProtoMessage() {} +func (*StatefulSet) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{22} +} +func (m *StatefulSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSet.Merge(m, src) +} +func (m *StatefulSet) XXX_Size() int { + return m.Size() +} +func (m *StatefulSet) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSet.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSet proto.InternalMessageInfo + +func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} } +func (*StatefulSetCondition) ProtoMessage() {} +func (*StatefulSetCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{23} +} +func (m *StatefulSetCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetCondition.Merge(m, src) +} +func (m *StatefulSetCondition) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetCondition) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetCondition proto.InternalMessageInfo + +func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } +func (*StatefulSetList) ProtoMessage() {} +func (*StatefulSetList) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{24} +} +func (m *StatefulSetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetList.Merge(m, src) +} +func (m *StatefulSetList) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetList) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetList.DiscardUnknown(m) } -func (m *StatefulSet) Reset() { *m = StatefulSet{} } -func (*StatefulSet) ProtoMessage() {} -func (*StatefulSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } +var xxx_messageInfo_StatefulSetList proto.InternalMessageInfo -func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} } -func (*StatefulSetCondition) ProtoMessage() {} -func (*StatefulSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } +func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } +func (*StatefulSetSpec) ProtoMessage() {} +func (*StatefulSetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{25} +} +func (m *StatefulSetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetSpec.Merge(m, src) +} +func (m *StatefulSetSpec) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetSpec.DiscardUnknown(m) +} -func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } -func (*StatefulSetList) ProtoMessage() {} -func (*StatefulSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } +var xxx_messageInfo_StatefulSetSpec proto.InternalMessageInfo -func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } -func (*StatefulSetSpec) ProtoMessage() {} -func (*StatefulSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } +func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } +func (*StatefulSetStatus) ProtoMessage() {} +func (*StatefulSetStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{26} +} +func (m *StatefulSetStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetStatus.Merge(m, src) +} +func (m *StatefulSetStatus) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetStatus) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetStatus.DiscardUnknown(m) +} -func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } -func (*StatefulSetStatus) ProtoMessage() {} -func (*StatefulSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } +var xxx_messageInfo_StatefulSetStatus proto.InternalMessageInfo func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} } func (*StatefulSetUpdateStrategy) ProtoMessage() {} func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{27} + return fileDescriptor_e1014cab6f31e43b, []int{27} +} +func (m *StatefulSetUpdateStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetUpdateStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetUpdateStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetUpdateStrategy.Merge(m, src) +} +func (m *StatefulSetUpdateStrategy) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetUpdateStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetUpdateStrategy.DiscardUnknown(m) } +var xxx_messageInfo_StatefulSetUpdateStrategy proto.InternalMessageInfo + func init() { proto.RegisterType((*ControllerRevision)(nil), "k8s.io.api.apps.v1.ControllerRevision") proto.RegisterType((*ControllerRevisionList)(nil), "k8s.io.api.apps.v1.ControllerRevisionList") @@ -234,10 +862,146 @@ func init() { proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.api.apps.v1.StatefulSetStatus") proto.RegisterType((*StatefulSetUpdateStrategy)(nil), "k8s.io.api.apps.v1.StatefulSetUpdateStrategy") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apps/v1/generated.proto", fileDescriptor_e1014cab6f31e43b) +} + +var fileDescriptor_e1014cab6f31e43b = []byte{ + // 2031 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x6f, 0x24, 0x47, + 0x1d, 0x75, 0xcf, 0x87, 0x3d, 0x2e, 0xaf, 0xed, 0xdd, 0xb2, 0xb1, 0x27, 0xbb, 0x64, 0x66, 0x19, + 0x60, 0xe3, 0x64, 0xb3, 0x3d, 0xec, 0x66, 0x13, 0xa1, 0x2c, 0x02, 0x79, 0xc6, 0x21, 0x84, 0x78, + 0x6c, 0x53, 0x5e, 0xef, 0x61, 0x09, 0x12, 0xe5, 0xe9, 0xda, 0x71, 0xc7, 0xfd, 0xa5, 0xee, 0xea, + 0x61, 0x47, 0x5c, 0x10, 0x12, 0x9c, 0x38, 0xf0, 0x9f, 0x20, 0x84, 0xe0, 0x86, 0x22, 0xc4, 0x65, + 0x2f, 0x48, 0x11, 0x17, 0x72, 0xb2, 0xd8, 0xc9, 0x09, 0xa1, 0x1c, 0xb9, 0xe4, 0x02, 0xaa, 0xea, + 0xea, 0xef, 0x6a, 0xcf, 0xd8, 0x9b, 0x38, 0x24, 0xca, 0xcd, 0x53, 0xf5, 0x7e, 0xaf, 0x7f, 0x55, + 0xf5, 0xab, 0x7a, 0xaf, 0xab, 0x0d, 0xee, 0x1d, 0x7f, 0xdb, 0x53, 0x75, 0xbb, 0x7d, 0xec, 0x1f, + 0x12, 0xd7, 0x22, 0x94, 0x78, 0xed, 0x21, 0xb1, 0x34, 0xdb, 0x6d, 0x8b, 0x0e, 0xec, 0xe8, 0x6d, + 0xec, 0x38, 0x5e, 0x7b, 0x78, 0xbb, 0x3d, 0x20, 0x16, 0x71, 0x31, 0x25, 0x9a, 0xea, 0xb8, 0x36, + 0xb5, 0x21, 0x0c, 0x30, 0x2a, 0x76, 0x74, 0x95, 0x61, 0xd4, 0xe1, 0xed, 0xab, 0xb7, 0x06, 0x3a, + 0x3d, 0xf2, 0x0f, 0xd5, 0xbe, 0x6d, 0xb6, 0x07, 0xf6, 0xc0, 0x6e, 0x73, 0xe8, 0xa1, 0xff, 0x88, + 0xff, 0xe2, 0x3f, 0xf8, 0x5f, 0x01, 0xc5, 0xd5, 0x56, 0xe2, 0x31, 0x7d, 0xdb, 0x25, 0x92, 0xc7, + 0x5c, 0xbd, 0x1b, 0x63, 0x4c, 0xdc, 0x3f, 0xd2, 0x2d, 0xe2, 0x8e, 0xda, 0xce, 0xf1, 0x80, 0x35, + 0x78, 0x6d, 0x93, 0x50, 0x2c, 0x8b, 0x6a, 0x17, 0x45, 0xb9, 0xbe, 0x45, 0x75, 0x93, 0xe4, 0x02, + 0x5e, 0x9b, 0x14, 0xe0, 0xf5, 0x8f, 0x88, 0x89, 0x73, 0x71, 0xaf, 0x14, 0xc5, 0xf9, 0x54, 0x37, + 0xda, 0xba, 0x45, 0x3d, 0xea, 0x66, 0x83, 0x5a, 0xff, 0x51, 0x00, 0xec, 0xda, 0x16, 0x75, 0x6d, + 0xc3, 0x20, 0x2e, 0x22, 0x43, 0xdd, 0xd3, 0x6d, 0x0b, 0xfe, 0x14, 0xd4, 0xd8, 0x78, 0x34, 0x4c, + 0x71, 0x5d, 0xb9, 0xae, 0x6c, 0x2c, 0xdc, 0xf9, 0x96, 0x1a, 0x4f, 0x72, 0x44, 0xaf, 0x3a, 0xc7, + 0x03, 0xd6, 0xe0, 0xa9, 0x0c, 0xad, 0x0e, 0x6f, 0xab, 0xbb, 0x87, 0xef, 0x92, 0x3e, 0xed, 0x11, + 0x8a, 0x3b, 0xf0, 0xc9, 0x49, 0x73, 0x66, 0x7c, 0xd2, 0x04, 0x71, 0x1b, 0x8a, 0x58, 0xe1, 0x2e, + 0xa8, 0x70, 0xf6, 0x12, 0x67, 0xbf, 0x55, 0xc8, 0x2e, 0x06, 0xad, 0x22, 0xfc, 0xb3, 0x37, 0x1e, + 0x53, 0x62, 0xb1, 0xf4, 0x3a, 0x97, 0x04, 0x75, 0x65, 0x0b, 0x53, 0x8c, 0x38, 0x11, 0x7c, 0x19, + 0xd4, 0x5c, 0x91, 0x7e, 0xbd, 0x7c, 0x5d, 0xd9, 0x28, 0x77, 0x2e, 0x0b, 0x54, 0x2d, 0x1c, 0x16, + 0x8a, 0x10, 0xad, 0xbf, 0x2a, 0x60, 0x2d, 0x3f, 0xee, 0x6d, 0xdd, 0xa3, 0xf0, 0x9d, 0xdc, 0xd8, + 0xd5, 0xe9, 0xc6, 0xce, 0xa2, 0xf9, 0xc8, 0xa3, 0x07, 0x87, 0x2d, 0x89, 0x71, 0xbf, 0x0d, 0xaa, + 0x3a, 0x25, 0xa6, 0x57, 0x2f, 0x5d, 0x2f, 0x6f, 0x2c, 0xdc, 0xb9, 0xa1, 0xe6, 0x6b, 0x57, 0xcd, + 0x27, 0xd6, 0x59, 0x14, 0x94, 0xd5, 0xb7, 0x58, 0x30, 0x0a, 0x38, 0x5a, 0xff, 0x55, 0xc0, 0xfc, + 0x16, 0x26, 0xa6, 0x6d, 0xed, 0x13, 0x7a, 0x01, 0x8b, 0xd6, 0x05, 0x15, 0xcf, 0x21, 0x7d, 0xb1, + 0x68, 0x5f, 0x93, 0xe5, 0x1e, 0xa5, 0xb3, 0xef, 0x90, 0x7e, 0xbc, 0x50, 0xec, 0x17, 0xe2, 0xc1, + 0xf0, 0x6d, 0x30, 0xeb, 0x51, 0x4c, 0x7d, 0x8f, 0x2f, 0xd3, 0xc2, 0x9d, 0xaf, 0x9f, 0x4e, 0xc3, + 0xa1, 0x9d, 0x25, 0x41, 0x34, 0x1b, 0xfc, 0x46, 0x82, 0xa2, 0xf5, 0xaf, 0x12, 0x80, 0x11, 0xb6, + 0x6b, 0x5b, 0x9a, 0x4e, 0x59, 0xfd, 0xbe, 0x0e, 0x2a, 0x74, 0xe4, 0x10, 0x3e, 0x0d, 0xf3, 0x9d, + 0x1b, 0x61, 0x16, 0xf7, 0x47, 0x0e, 0xf9, 0xf8, 0xa4, 0xb9, 0x96, 0x8f, 0x60, 0x3d, 0x88, 0xc7, + 0xc0, 0xed, 0x28, 0xbf, 0x12, 0x8f, 0xbe, 0x9b, 0x7e, 0xf4, 0xc7, 0x27, 0x4d, 0xc9, 0x61, 0xa1, + 0x46, 0x4c, 0xe9, 0x04, 0xe1, 0x10, 0x40, 0x03, 0x7b, 0xf4, 0xbe, 0x8b, 0x2d, 0x2f, 0x78, 0x92, + 0x6e, 0x12, 0x31, 0xf2, 0x97, 0xa6, 0x5b, 0x1e, 0x16, 0xd1, 0xb9, 0x2a, 0xb2, 0x80, 0xdb, 0x39, + 0x36, 0x24, 0x79, 0x02, 0xbc, 0x01, 0x66, 0x5d, 0x82, 0x3d, 0xdb, 0xaa, 0x57, 0xf8, 0x28, 0xa2, + 0x09, 0x44, 0xbc, 0x15, 0x89, 0x5e, 0xf8, 0x22, 0x98, 0x33, 0x89, 0xe7, 0xe1, 0x01, 0xa9, 0x57, + 0x39, 0x70, 0x59, 0x00, 0xe7, 0x7a, 0x41, 0x33, 0x0a, 0xfb, 0x5b, 0xbf, 0x57, 0xc0, 0x62, 0x34, + 0x73, 0x17, 0xb0, 0x55, 0x3a, 0xe9, 0xad, 0xf2, 0xfc, 0xa9, 0x75, 0x52, 0xb0, 0x43, 0xde, 0x2b, + 0x27, 0x72, 0x66, 0x45, 0x08, 0x7f, 0x02, 0x6a, 0x1e, 0x31, 0x48, 0x9f, 0xda, 0xae, 0xc8, 0xf9, + 0x95, 0x29, 0x73, 0xc6, 0x87, 0xc4, 0xd8, 0x17, 0xa1, 0x9d, 0x4b, 0x2c, 0xe9, 0xf0, 0x17, 0x8a, + 0x28, 0xe1, 0x8f, 0x40, 0x8d, 0x12, 0xd3, 0x31, 0x30, 0x25, 0x62, 0x9b, 0xa4, 0xea, 0x9b, 0x95, + 0x0b, 0x23, 0xdb, 0xb3, 0xb5, 0xfb, 0x02, 0xc6, 0x37, 0x4a, 0x34, 0x0f, 0x61, 0x2b, 0x8a, 0x68, + 0xe0, 0x31, 0x58, 0xf2, 0x1d, 0x8d, 0x21, 0x29, 0x3b, 0xba, 0x07, 0x23, 0x51, 0x3e, 0x37, 0x4f, + 0x9d, 0x90, 0x83, 0x54, 0x48, 0x67, 0x4d, 0x3c, 0x60, 0x29, 0xdd, 0x8e, 0x32, 0xd4, 0x70, 0x13, + 0x2c, 0x9b, 0xba, 0x85, 0x08, 0xd6, 0x46, 0xfb, 0xa4, 0x6f, 0x5b, 0x9a, 0xc7, 0x0b, 0xa8, 0xda, + 0x59, 0x17, 0x04, 0xcb, 0xbd, 0x74, 0x37, 0xca, 0xe2, 0xe1, 0x36, 0x58, 0x0d, 0xcf, 0xd9, 0x1f, + 0xe8, 0x1e, 0xb5, 0xdd, 0xd1, 0xb6, 0x6e, 0xea, 0xb4, 0x3e, 0xcb, 0x79, 0xea, 0xe3, 0x93, 0xe6, + 0x2a, 0x92, 0xf4, 0x23, 0x69, 0x54, 0xeb, 0x37, 0xb3, 0x60, 0x39, 0x73, 0x1a, 0xc0, 0x07, 0x60, + 0xad, 0xef, 0xbb, 0x2e, 0xb1, 0xe8, 0x8e, 0x6f, 0x1e, 0x12, 0x77, 0xbf, 0x7f, 0x44, 0x34, 0xdf, + 0x20, 0x1a, 0x5f, 0xd1, 0x6a, 0xa7, 0x21, 0x72, 0x5d, 0xeb, 0x4a, 0x51, 0xa8, 0x20, 0x1a, 0xfe, + 0x10, 0x40, 0x8b, 0x37, 0xf5, 0x74, 0xcf, 0x8b, 0x38, 0x4b, 0x9c, 0x33, 0xda, 0x80, 0x3b, 0x39, + 0x04, 0x92, 0x44, 0xb1, 0x1c, 0x35, 0xe2, 0xe9, 0x2e, 0xd1, 0xb2, 0x39, 0x96, 0xd3, 0x39, 0x6e, + 0x49, 0x51, 0xa8, 0x20, 0x1a, 0xbe, 0x0a, 0x16, 0x82, 0xa7, 0xf1, 0x39, 0x17, 0x8b, 0xb3, 0x22, + 0xc8, 0x16, 0x76, 0xe2, 0x2e, 0x94, 0xc4, 0xb1, 0xa1, 0xd9, 0x87, 0x1e, 0x71, 0x87, 0x44, 0x7b, + 0x33, 0xf0, 0x00, 0x4c, 0x28, 0xab, 0x5c, 0x28, 0xa3, 0xa1, 0xed, 0xe6, 0x10, 0x48, 0x12, 0xc5, + 0x86, 0x16, 0x54, 0x4d, 0x6e, 0x68, 0xb3, 0xe9, 0xa1, 0x1d, 0x48, 0x51, 0xa8, 0x20, 0x9a, 0xd5, + 0x5e, 0x90, 0xf2, 0xe6, 0x10, 0xeb, 0x06, 0x3e, 0x34, 0x48, 0x7d, 0x2e, 0x5d, 0x7b, 0x3b, 0xe9, + 0x6e, 0x94, 0xc5, 0xc3, 0x37, 0xc1, 0x95, 0xa0, 0xe9, 0xc0, 0xc2, 0x11, 0x49, 0x8d, 0x93, 0x3c, + 0x27, 0x48, 0xae, 0xec, 0x64, 0x01, 0x28, 0x1f, 0x03, 0x5f, 0x07, 0x4b, 0x7d, 0xdb, 0x30, 0x78, + 0x3d, 0x76, 0x6d, 0xdf, 0xa2, 0xf5, 0x79, 0xce, 0x02, 0xd9, 0x1e, 0xea, 0xa6, 0x7a, 0x50, 0x06, + 0x09, 0x1f, 0x02, 0xd0, 0x0f, 0xe5, 0xc0, 0xab, 0x83, 0x62, 0xa1, 0xcf, 0xeb, 0x50, 0x2c, 0xc0, + 0x51, 0x93, 0x87, 0x12, 0x6c, 0xad, 0xf7, 0x14, 0xb0, 0x5e, 0xb0, 0xc7, 0xe1, 0xf7, 0x52, 0xaa, + 0x77, 0x33, 0xa3, 0x7a, 0xd7, 0x0a, 0xc2, 0x12, 0xd2, 0xd7, 0x07, 0x8b, 0xcc, 0x77, 0xe8, 0xd6, + 0x20, 0x80, 0x88, 0x13, 0xec, 0x25, 0x59, 0xee, 0x28, 0x09, 0x8c, 0x8f, 0xe1, 0x2b, 0xe3, 0x93, + 0xe6, 0x62, 0xaa, 0x0f, 0xa5, 0x39, 0x5b, 0xbf, 0x2c, 0x01, 0xb0, 0x45, 0x1c, 0xc3, 0x1e, 0x99, + 0xc4, 0xba, 0x08, 0xd7, 0xb2, 0x95, 0x72, 0x2d, 0x2d, 0xe9, 0x42, 0x44, 0xf9, 0x14, 0xda, 0x96, + 0xed, 0x8c, 0x6d, 0xf9, 0xc6, 0x04, 0x9e, 0xd3, 0x7d, 0xcb, 0x3f, 0xca, 0x60, 0x25, 0x06, 0xc7, + 0xc6, 0xe5, 0x5e, 0x6a, 0x09, 0x5f, 0xc8, 0x2c, 0xe1, 0xba, 0x24, 0xe4, 0x53, 0x73, 0x2e, 0xef, + 0x82, 0x25, 0xe6, 0x2b, 0x82, 0x55, 0xe3, 0xae, 0x65, 0xf6, 0xcc, 0xae, 0x25, 0x52, 0x9d, 0xed, + 0x14, 0x13, 0xca, 0x30, 0x17, 0xb8, 0xa4, 0xb9, 0xcf, 0xa3, 0x4b, 0xfa, 0x83, 0x02, 0x96, 0xe2, + 0x65, 0xba, 0x00, 0x9b, 0xd4, 0x4d, 0xdb, 0xa4, 0xc6, 0xe9, 0x75, 0x59, 0xe0, 0x93, 0xfe, 0x5e, + 0x49, 0x66, 0xcd, 0x8d, 0xd2, 0x06, 0x7b, 0xa1, 0x72, 0x0c, 0xbd, 0x8f, 0x3d, 0x21, 0xab, 0x97, + 0x82, 0x97, 0xa9, 0xa0, 0x0d, 0x45, 0xbd, 0x29, 0x4b, 0x55, 0xfa, 0x74, 0x2d, 0x55, 0xf9, 0x93, + 0xb1, 0x54, 0xf7, 0x41, 0xcd, 0x0b, 0xcd, 0x54, 0x85, 0x53, 0xde, 0x98, 0xb4, 0x9d, 0x85, 0x8f, + 0x8a, 0x58, 0x23, 0x07, 0x15, 0x31, 0xc9, 0xbc, 0x53, 0xf5, 0xb3, 0xf4, 0x4e, 0xac, 0xbc, 0x1d, + 0xec, 0x7b, 0x44, 0xe3, 0x5b, 0xa9, 0x16, 0x97, 0xf7, 0x1e, 0x6f, 0x45, 0xa2, 0x17, 0x1e, 0x80, + 0x75, 0xc7, 0xb5, 0x07, 0x2e, 0xf1, 0xbc, 0x2d, 0x82, 0x35, 0x43, 0xb7, 0x48, 0x38, 0x80, 0x40, + 0xf5, 0xae, 0x8d, 0x4f, 0x9a, 0xeb, 0x7b, 0x72, 0x08, 0x2a, 0x8a, 0x6d, 0xfd, 0xb9, 0x02, 0x2e, + 0x67, 0x4f, 0xc4, 0x02, 0x23, 0xa2, 0x9c, 0xcb, 0x88, 0xbc, 0x9c, 0x28, 0xd1, 0xc0, 0xa5, 0x25, + 0xde, 0xf9, 0x73, 0x65, 0xba, 0x09, 0x96, 0x85, 0xf1, 0x08, 0x3b, 0x85, 0x15, 0x8b, 0x96, 0xe7, + 0x20, 0xdd, 0x8d, 0xb2, 0x78, 0x78, 0x0f, 0x2c, 0xba, 0xdc, 0x5b, 0x85, 0x04, 0x81, 0x3f, 0xf9, + 0x8a, 0x20, 0x58, 0x44, 0xc9, 0x4e, 0x94, 0xc6, 0x32, 0x6f, 0x12, 0x5b, 0x8e, 0x90, 0xa0, 0x92, + 0xf6, 0x26, 0x9b, 0x59, 0x00, 0xca, 0xc7, 0xc0, 0x1e, 0x58, 0xf1, 0xad, 0x3c, 0x55, 0x50, 0x6b, + 0xd7, 0x04, 0xd5, 0xca, 0x41, 0x1e, 0x82, 0x64, 0x71, 0xf0, 0xc7, 0x29, 0xbb, 0x32, 0xcb, 0x4f, + 0x91, 0x17, 0x4e, 0xdf, 0x0e, 0x53, 0xfb, 0x15, 0x89, 0x8f, 0xaa, 0x4d, 0xeb, 0xa3, 0x5a, 0x7f, + 0x52, 0x00, 0xcc, 0x6f, 0xc1, 0x89, 0x2f, 0xf7, 0xb9, 0x88, 0x84, 0x44, 0x6a, 0x72, 0x87, 0x73, + 0x73, 0xb2, 0xc3, 0x89, 0x4f, 0xd0, 0xe9, 0x2c, 0x8e, 0x98, 0xde, 0x8b, 0xb9, 0x98, 0x99, 0xc2, + 0xe2, 0xc4, 0xf9, 0x3c, 0x9b, 0xc5, 0x49, 0xf0, 0x9c, 0x6e, 0x71, 0xfe, 0x5d, 0x02, 0x2b, 0x31, + 0x78, 0x6a, 0x8b, 0x23, 0x09, 0xf9, 0xf2, 0x72, 0x66, 0x3a, 0xdb, 0x11, 0x4f, 0xdd, 0xff, 0x89, + 0xed, 0x88, 0x13, 0x2a, 0xb0, 0x1d, 0xbf, 0x2b, 0x25, 0xb3, 0x3e, 0xa3, 0xed, 0xf8, 0x04, 0xae, + 0x2a, 0x3e, 0x77, 0xce, 0xa5, 0xf5, 0x97, 0x32, 0xb8, 0x9c, 0xdd, 0x82, 0x29, 0x1d, 0x54, 0x26, + 0xea, 0xe0, 0x1e, 0x58, 0x7d, 0xe4, 0x1b, 0xc6, 0x88, 0x8f, 0x21, 0x21, 0x86, 0x81, 0x82, 0x7e, + 0x55, 0x44, 0xae, 0x7e, 0x5f, 0x82, 0x41, 0xd2, 0xc8, 0xbc, 0x2c, 0x56, 0x9e, 0x55, 0x16, 0xab, + 0xe7, 0x90, 0x45, 0xb9, 0xb3, 0x28, 0x9f, 0xcb, 0x59, 0x4c, 0xad, 0x89, 0x92, 0xe3, 0x6a, 0xe2, + 0x3b, 0xfc, 0xaf, 0x15, 0xb0, 0x26, 0x7f, 0x7d, 0x86, 0x06, 0x58, 0x32, 0xf1, 0xe3, 0xe4, 0xe5, + 0xc5, 0x24, 0xc1, 0xf0, 0xa9, 0x6e, 0xa8, 0xc1, 0xd7, 0x1d, 0xf5, 0x2d, 0x8b, 0xee, 0xba, 0xfb, + 0xd4, 0xd5, 0xad, 0x41, 0x20, 0xb0, 0xbd, 0x14, 0x17, 0xca, 0x70, 0xb7, 0x3e, 0x54, 0xc0, 0x7a, + 0x81, 0xca, 0x5d, 0x6c, 0x26, 0xf0, 0x21, 0xa8, 0x99, 0xf8, 0xf1, 0xbe, 0xef, 0x0e, 0x42, 0x49, + 0x3e, 0xfb, 0x73, 0xf8, 0x2e, 0xec, 0x09, 0x16, 0x14, 0xf1, 0xb5, 0x76, 0xc1, 0xf5, 0xd4, 0x20, + 0xd9, 0xa6, 0x21, 0x8f, 0x7c, 0x83, 0xef, 0x1f, 0xe1, 0x29, 0x6e, 0x82, 0x79, 0x07, 0xbb, 0x54, + 0x8f, 0xcc, 0x68, 0xb5, 0xb3, 0x38, 0x3e, 0x69, 0xce, 0xef, 0x85, 0x8d, 0x28, 0xee, 0x6f, 0xfd, + 0xaa, 0x04, 0x16, 0x12, 0x24, 0x17, 0xa0, 0xef, 0x6f, 0xa4, 0xf4, 0x5d, 0xfa, 0xc5, 0x24, 0x39, + 0xaa, 0x22, 0x81, 0xef, 0x65, 0x04, 0xfe, 0x9b, 0x93, 0x88, 0x4e, 0x57, 0xf8, 0x8f, 0x4a, 0x60, + 0x35, 0x81, 0x8e, 0x25, 0xfe, 0x3b, 0x29, 0x89, 0xdf, 0xc8, 0x48, 0x7c, 0x5d, 0x16, 0xf3, 0xa5, + 0xc6, 0x4f, 0xd6, 0xf8, 0x3f, 0x2a, 0x60, 0x39, 0x31, 0x77, 0x17, 0x20, 0xf2, 0x5b, 0x69, 0x91, + 0x6f, 0x4e, 0xa8, 0x97, 0x02, 0x95, 0x7f, 0x52, 0x4d, 0xe5, 0xfd, 0x85, 0xbf, 0x5d, 0xf8, 0x39, + 0x58, 0x1d, 0xda, 0x86, 0x6f, 0x92, 0xae, 0x81, 0x75, 0x33, 0x04, 0x30, 0x55, 0x64, 0x93, 0xf8, + 0xa2, 0x94, 0x9e, 0xb8, 0x9e, 0xee, 0x51, 0x62, 0xd1, 0x07, 0x71, 0x64, 0xac, 0xc5, 0x0f, 0x24, + 0x74, 0x48, 0xfa, 0x10, 0xf8, 0x2a, 0x58, 0x60, 0x6a, 0xa6, 0xf7, 0xc9, 0x0e, 0x36, 0xc3, 0x9a, + 0x8a, 0xbe, 0x0f, 0xec, 0xc7, 0x5d, 0x28, 0x89, 0x83, 0x47, 0x60, 0xc5, 0xb1, 0xb5, 0x1e, 0xb6, + 0xf0, 0x80, 0xb0, 0xf3, 0x7f, 0xcf, 0x36, 0xf4, 0xfe, 0x88, 0xdf, 0x3b, 0xcc, 0x77, 0x5e, 0x0b, + 0xdf, 0x29, 0xf7, 0xf2, 0x10, 0xe6, 0xd9, 0x25, 0xcd, 0x7c, 0x3f, 0xcb, 0x28, 0xa1, 0x99, 0xfb, + 0x9c, 0x35, 0x97, 0xfb, 0x1f, 0x00, 0x59, 0x71, 0x9d, 0xf3, 0x83, 0x56, 0xd1, 0x8d, 0x4a, 0xed, + 0x5c, 0x5f, 0xa3, 0x3e, 0xaa, 0x80, 0x2b, 0xb9, 0x03, 0xf2, 0x33, 0xbc, 0xd3, 0xc8, 0x39, 0xaf, + 0xf2, 0x19, 0x9c, 0xd7, 0x26, 0x58, 0x16, 0x1f, 0xc2, 0x32, 0xc6, 0x2d, 0x32, 0xd0, 0xdd, 0x74, + 0x37, 0xca, 0xe2, 0x65, 0x77, 0x2a, 0xd5, 0x33, 0xde, 0xa9, 0x24, 0xb3, 0x10, 0xff, 0xbf, 0x11, + 0x54, 0x5d, 0x3e, 0x0b, 0xf1, 0x6f, 0x1c, 0x59, 0x3c, 0xfc, 0x6e, 0x58, 0x52, 0x11, 0xc3, 0x1c, + 0x67, 0xc8, 0xd4, 0x48, 0x44, 0x90, 0x41, 0x3f, 0xd3, 0xc7, 0x9e, 0x77, 0x24, 0x1f, 0x7b, 0x36, + 0x26, 0x94, 0xf2, 0xf4, 0x56, 0xf1, 0x6f, 0x0a, 0x78, 0xae, 0x70, 0x0f, 0xc0, 0xcd, 0x94, 0xce, + 0xde, 0xca, 0xe8, 0xec, 0xf3, 0x85, 0x81, 0x09, 0xb1, 0x35, 0xe5, 0x17, 0x22, 0x77, 0x27, 0x5e, + 0x88, 0x48, 0x5c, 0xd4, 0xe4, 0x9b, 0x91, 0xce, 0xc6, 0x93, 0xa7, 0x8d, 0x99, 0xf7, 0x9f, 0x36, + 0x66, 0x3e, 0x78, 0xda, 0x98, 0xf9, 0xc5, 0xb8, 0xa1, 0x3c, 0x19, 0x37, 0x94, 0xf7, 0xc7, 0x0d, + 0xe5, 0x83, 0x71, 0x43, 0xf9, 0xe7, 0xb8, 0xa1, 0xfc, 0xf6, 0xc3, 0xc6, 0xcc, 0xc3, 0xd2, 0xf0, + 0xf6, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x59, 0xb3, 0x11, 0xc0, 0x12, 0x26, 0x00, 0x00, +} + func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -245,36 +1009,45 @@ func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { } func (m *ControllerRevision) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ControllerRevision) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) + i-- + dAtA[i] = 0x18 + { + size, err := m.Data.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Data.Size())) - n2, err := m.Data.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ControllerRevisionList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -282,37 +1055,46 @@ func (m *ControllerRevisionList) Marshal() (dAtA []byte, err error) { } func (m *ControllerRevisionList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ControllerRevisionList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n3, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DaemonSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -320,41 +1102,52 @@ func (m *DaemonSet) Marshal() (dAtA []byte, err error) { } func (m *DaemonSet) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n5, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n6, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n6 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DaemonSetCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -362,41 +1155,52 @@ func (m *DaemonSetCondition) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n7, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DaemonSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -404,37 +1208,46 @@ func (m *DaemonSetList) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n8, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DaemonSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -442,51 +1255,62 @@ func (m *DaemonSetSpec) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Selector != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n9, err := m.Selector.MarshalTo(dAtA[i:]) + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x30 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x20 + { + size, err := m.UpdateStrategy.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n9 - } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n10, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n10 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) - n11, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n11 - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - if m.RevisionHistoryLimit != nil { - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x12 + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *DaemonSetStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -494,58 +1318,65 @@ func (m *DaemonSetStatus) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentNumberScheduled)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberMisscheduled)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredNumberScheduled)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberReady)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedNumberScheduled)) - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberAvailable)) - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberUnavailable)) - if m.CollisionCount != nil { - dAtA[i] = 0x48 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - } if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x52 } } - return i, nil + if m.CollisionCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + i-- + dAtA[i] = 0x48 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberUnavailable)) + i-- + dAtA[i] = 0x40 + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberAvailable)) + i-- + dAtA[i] = 0x38 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedNumberScheduled)) + i-- + dAtA[i] = 0x30 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberReady)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredNumberScheduled)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberMisscheduled)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentNumberScheduled)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *DaemonSetUpdateStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -553,31 +1384,39 @@ func (m *DaemonSetUpdateStrategy) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetUpdateStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) if m.RollingUpdate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n12, err := m.RollingUpdate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n12 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Deployment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -585,41 +1424,52 @@ func (m *Deployment) Marshal() (dAtA []byte, err error) { } func (m *Deployment) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Deployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n13, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n13 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n14, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n14 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n15, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n15 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -627,49 +1477,62 @@ func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { } func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastUpdateTime.Size())) - n16, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n16 + i-- dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n17, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n17 - return i, nil + i-- + dAtA[i] = 0x32 + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -677,37 +1540,46 @@ func (m *DeploymentList) Marshal() (dAtA []byte, err error) { } func (m *DeploymentList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n18, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n18 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -715,69 +1587,80 @@ func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) { } func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + if m.ProgressDeadlineSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds)) + i-- + dAtA[i] = 0x48 } - if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n19, err := m.Selector.MarshalTo(dAtA[i:]) + i-- + if m.Paused { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x30 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x28 + { + size, err := m.Strategy.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n19 - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n20, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n20 + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Strategy.Size())) - n21, err := m.Strategy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n21 - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - if m.RevisionHistoryLimit != nil { - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x38 - i++ - if m.Paused { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + i-- + dAtA[i] = 0x1a + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - i++ - if m.ProgressDeadlineSeconds != nil { - dAtA[i] = 0x48 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds)) + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -785,52 +1668,59 @@ func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) { } func (m *DeploymentStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) + if m.CollisionCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + i-- + dAtA[i] = 0x40 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x38 if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x32 } } - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - if m.CollisionCount != nil { - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - } - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -838,31 +1728,39 @@ func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { } func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) if m.RollingUpdate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n22, err := m.RollingUpdate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n22 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -870,41 +1768,52 @@ func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSet) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n23, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n23 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n24, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n24 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n25, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n25 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -912,41 +1821,52 @@ func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n26, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n26 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -954,37 +1874,46 @@ func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n27, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n27 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -992,43 +1921,52 @@ func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) - } - if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n28, err := m.Selector.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x20 + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n28 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n29, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - i += n29 - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - return i, nil + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1036,44 +1974,51 @@ func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + _ = l if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x32 } } - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *RollingUpdateDaemonSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1081,27 +2026,34 @@ func (m *RollingUpdateDaemonSet) Marshal() (dAtA []byte, err error) { } func (m *RollingUpdateDaemonSet) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateDaemonSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if m.MaxUnavailable != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n30, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n30 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1109,37 +2061,46 @@ func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { } func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.MaxUnavailable != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n31, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n31 - } if m.MaxSurge != nil { + { + size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxSurge.Size())) - n32, err := m.MaxSurge.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if m.MaxUnavailable != nil { + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n32 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *RollingUpdateStatefulSetStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1147,22 +2108,27 @@ func (m *RollingUpdateStatefulSetStrategy) Marshal() (dAtA []byte, err error) { } func (m *RollingUpdateStatefulSetStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateStatefulSetStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if m.Partition != nil { - dAtA[i] = 0x8 - i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.Partition)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *StatefulSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1170,41 +2136,52 @@ func (m *StatefulSet) Marshal() (dAtA []byte, err error) { } func (m *StatefulSet) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n33, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n33 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n34, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n34 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n35, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n35 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *StatefulSetCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1212,41 +2189,52 @@ func (m *StatefulSetCondition) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n36, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n36 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *StatefulSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1254,37 +2242,46 @@ func (m *StatefulSetList) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n37, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n37 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *StatefulSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1292,73 +2289,88 @@ func (m *StatefulSetSpec) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x40 } - if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n38, err := m.Selector.MarshalTo(dAtA[i:]) + { + size, err := m.UpdateStrategy.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n38 - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n39, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n39 + i-- + dAtA[i] = 0x3a + i -= len(m.PodManagementPolicy) + copy(dAtA[i:], m.PodManagementPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodManagementPolicy))) + i-- + dAtA[i] = 0x32 + i -= len(m.ServiceName) + copy(dAtA[i:], m.ServiceName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) + i-- + dAtA[i] = 0x2a if len(m.VolumeClaimTemplates) > 0 { - for _, msg := range m.VolumeClaimTemplates { + for iNdEx := len(m.VolumeClaimTemplates) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumeClaimTemplates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + } + } + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) - i += copy(dAtA[i:], m.ServiceName) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodManagementPolicy))) - i += copy(dAtA[i:], m.PodManagementPolicy) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) - n40, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n40 - if m.RevisionHistoryLimit != nil { - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *StatefulSetStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1366,57 +2378,66 @@ func (m *StatefulSetStatus) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CurrentRevision))) - i += copy(dAtA[i:], m.CurrentRevision) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UpdateRevision))) - i += copy(dAtA[i:], m.UpdateRevision) - if m.CollisionCount != nil { - dAtA[i] = 0x48 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - } if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x52 } } - return i, nil + if m.CollisionCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + i-- + dAtA[i] = 0x48 + } + i -= len(m.UpdateRevision) + copy(dAtA[i:], m.UpdateRevision) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UpdateRevision))) + i-- + dAtA[i] = 0x3a + i -= len(m.CurrentRevision) + copy(dAtA[i:], m.CurrentRevision) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CurrentRevision))) + i-- + dAtA[i] = 0x32 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *StatefulSetUpdateStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1424,37 +2445,50 @@ func (m *StatefulSetUpdateStrategy) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetUpdateStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) if m.RollingUpdate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n41, err := m.RollingUpdate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n41 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *ControllerRevision) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1466,6 +2500,9 @@ func (m *ControllerRevision) Size() (n int) { } func (m *ControllerRevisionList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1480,6 +2517,9 @@ func (m *ControllerRevisionList) Size() (n int) { } func (m *DaemonSet) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1492,6 +2532,9 @@ func (m *DaemonSet) Size() (n int) { } func (m *DaemonSetCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1508,6 +2551,9 @@ func (m *DaemonSetCondition) Size() (n int) { } func (m *DaemonSetList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1522,6 +2568,9 @@ func (m *DaemonSetList) Size() (n int) { } func (m *DaemonSetSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Selector != nil { @@ -1540,6 +2589,9 @@ func (m *DaemonSetSpec) Size() (n int) { } func (m *DaemonSetStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.CurrentNumberScheduled)) @@ -1563,6 +2615,9 @@ func (m *DaemonSetStatus) Size() (n int) { } func (m *DaemonSetUpdateStrategy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1575,6 +2630,9 @@ func (m *DaemonSetUpdateStrategy) Size() (n int) { } func (m *Deployment) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1587,6 +2645,9 @@ func (m *Deployment) Size() (n int) { } func (m *DeploymentCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1605,6 +2666,9 @@ func (m *DeploymentCondition) Size() (n int) { } func (m *DeploymentList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1619,6 +2683,9 @@ func (m *DeploymentList) Size() (n int) { } func (m *DeploymentSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Replicas != nil { @@ -1644,6 +2711,9 @@ func (m *DeploymentSpec) Size() (n int) { } func (m *DeploymentStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.ObservedGeneration)) @@ -1665,6 +2735,9 @@ func (m *DeploymentStatus) Size() (n int) { } func (m *DeploymentStrategy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1677,6 +2750,9 @@ func (m *DeploymentStrategy) Size() (n int) { } func (m *ReplicaSet) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1689,6 +2765,9 @@ func (m *ReplicaSet) Size() (n int) { } func (m *ReplicaSetCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1705,6 +2784,9 @@ func (m *ReplicaSetCondition) Size() (n int) { } func (m *ReplicaSetList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1719,6 +2801,9 @@ func (m *ReplicaSetList) Size() (n int) { } func (m *ReplicaSetSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Replicas != nil { @@ -1735,6 +2820,9 @@ func (m *ReplicaSetSpec) Size() (n int) { } func (m *ReplicaSetStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Replicas)) @@ -1752,6 +2840,9 @@ func (m *ReplicaSetStatus) Size() (n int) { } func (m *RollingUpdateDaemonSet) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.MaxUnavailable != nil { @@ -1762,6 +2853,9 @@ func (m *RollingUpdateDaemonSet) Size() (n int) { } func (m *RollingUpdateDeployment) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.MaxUnavailable != nil { @@ -1776,6 +2870,9 @@ func (m *RollingUpdateDeployment) Size() (n int) { } func (m *RollingUpdateStatefulSetStrategy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Partition != nil { @@ -1785,6 +2882,9 @@ func (m *RollingUpdateStatefulSetStrategy) Size() (n int) { } func (m *StatefulSet) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1797,6 +2897,9 @@ func (m *StatefulSet) Size() (n int) { } func (m *StatefulSetCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1813,6 +2916,9 @@ func (m *StatefulSetCondition) Size() (n int) { } func (m *StatefulSetList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1827,6 +2933,9 @@ func (m *StatefulSetList) Size() (n int) { } func (m *StatefulSetSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Replicas != nil { @@ -1857,6 +2966,9 @@ func (m *StatefulSetSpec) Size() (n int) { } func (m *StatefulSetStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.ObservedGeneration)) @@ -1881,6 +2993,9 @@ func (m *StatefulSetStatus) Size() (n int) { } func (m *StatefulSetUpdateStrategy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1893,14 +3008,7 @@ func (m *StatefulSetUpdateStrategy) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -1910,8 +3018,8 @@ func (this *ControllerRevision) String() string { return "nil" } s := strings.Join([]string{`&ControllerRevision{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Data:` + strings.Replace(strings.Replace(this.Data.String(), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Data:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Data), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, `}`, }, "") @@ -1921,9 +3029,14 @@ func (this *ControllerRevisionList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]ControllerRevision{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ControllerRevision", "ControllerRevision", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&ControllerRevisionList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ControllerRevision", "ControllerRevision", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1933,7 +3046,7 @@ func (this *DaemonSet) String() string { return "nil" } s := strings.Join([]string{`&DaemonSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DaemonSetSpec", "DaemonSetSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DaemonSetStatus", "DaemonSetStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1947,7 +3060,7 @@ func (this *DaemonSetCondition) String() string { s := strings.Join([]string{`&DaemonSetCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -1958,9 +3071,14 @@ func (this *DaemonSetList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]DaemonSet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&DaemonSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1970,8 +3088,8 @@ func (this *DaemonSetSpec) String() string { return "nil" } s := strings.Join([]string{`&DaemonSetSpec{`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "DaemonSetUpdateStrategy", "DaemonSetUpdateStrategy", 1), `&`, ``, 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, @@ -1983,6 +3101,11 @@ func (this *DaemonSetStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]DaemonSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&DaemonSetStatus{`, `CurrentNumberScheduled:` + fmt.Sprintf("%v", this.CurrentNumberScheduled) + `,`, `NumberMisscheduled:` + fmt.Sprintf("%v", this.NumberMisscheduled) + `,`, @@ -1993,7 +3116,7 @@ func (this *DaemonSetStatus) String() string { `NumberAvailable:` + fmt.Sprintf("%v", this.NumberAvailable) + `,`, `NumberUnavailable:` + fmt.Sprintf("%v", this.NumberUnavailable) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `}`, }, "") return s @@ -2004,7 +3127,7 @@ func (this *DaemonSetUpdateStrategy) String() string { } s := strings.Join([]string{`&DaemonSetUpdateStrategy{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`, `}`, }, "") return s @@ -2014,7 +3137,7 @@ func (this *Deployment) String() string { return "nil" } s := strings.Join([]string{`&Deployment{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentSpec", "DeploymentSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentStatus", "DeploymentStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -2030,8 +3153,8 @@ func (this *DeploymentCondition) String() string { `Status:` + fmt.Sprintf("%v", this.Status) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `LastUpdateTime:` + strings.Replace(strings.Replace(this.LastUpdateTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -2040,9 +3163,14 @@ func (this *DeploymentList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]Deployment{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Deployment", "Deployment", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&DeploymentList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Deployment", "Deployment", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -2053,8 +3181,8 @@ func (this *DeploymentSpec) String() string { } s := strings.Join([]string{`&DeploymentSpec{`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, @@ -2068,13 +3196,18 @@ func (this *DeploymentStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]DeploymentCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&DeploymentStatus{`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, `}`, @@ -2087,7 +3220,7 @@ func (this *DeploymentStrategy) String() string { } s := strings.Join([]string{`&DeploymentStrategy{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, `}`, }, "") return s @@ -2097,7 +3230,7 @@ func (this *ReplicaSet) String() string { return "nil" } s := strings.Join([]string{`&ReplicaSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicaSetSpec", "ReplicaSetSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicaSetStatus", "ReplicaSetStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -2111,7 +3244,7 @@ func (this *ReplicaSetCondition) String() string { s := strings.Join([]string{`&ReplicaSetCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -2122,9 +3255,14 @@ func (this *ReplicaSetList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]ReplicaSet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&ReplicaSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -2135,8 +3273,8 @@ func (this *ReplicaSetSpec) String() string { } s := strings.Join([]string{`&ReplicaSetSpec{`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `}`, }, "") @@ -2146,13 +3284,18 @@ func (this *ReplicaSetStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]ReplicaSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&ReplicaSetStatus{`, `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `}`, }, "") return s @@ -2162,7 +3305,7 @@ func (this *RollingUpdateDaemonSet) String() string { return "nil" } s := strings.Join([]string{`&RollingUpdateDaemonSet{`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, `}`, }, "") return s @@ -2172,8 +3315,8 @@ func (this *RollingUpdateDeployment) String() string { return "nil" } s := strings.Join([]string{`&RollingUpdateDeployment{`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, - `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`, `}`, }, "") return s @@ -2193,7 +3336,7 @@ func (this *StatefulSet) String() string { return "nil" } s := strings.Join([]string{`&StatefulSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "StatefulSetSpec", "StatefulSetSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "StatefulSetStatus", "StatefulSetStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -2207,7 +3350,7 @@ func (this *StatefulSetCondition) String() string { s := strings.Join([]string{`&StatefulSetCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -2218,9 +3361,14 @@ func (this *StatefulSetList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]StatefulSet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "StatefulSet", "StatefulSet", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&StatefulSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StatefulSet", "StatefulSet", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -2229,11 +3377,16 @@ func (this *StatefulSetSpec) String() string { if this == nil { return "nil" } + repeatedStringForVolumeClaimTemplates := "[]PersistentVolumeClaim{" + for _, f := range this.VolumeClaimTemplates { + repeatedStringForVolumeClaimTemplates += fmt.Sprintf("%v", f) + "," + } + repeatedStringForVolumeClaimTemplates += "}" s := strings.Join([]string{`&StatefulSetSpec{`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `VolumeClaimTemplates:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeClaimTemplates), "PersistentVolumeClaim", "k8s_io_api_core_v1.PersistentVolumeClaim", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `VolumeClaimTemplates:` + repeatedStringForVolumeClaimTemplates + `,`, `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, `PodManagementPolicy:` + fmt.Sprintf("%v", this.PodManagementPolicy) + `,`, `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "StatefulSetUpdateStrategy", "StatefulSetUpdateStrategy", 1), `&`, ``, 1) + `,`, @@ -2246,6 +3399,11 @@ func (this *StatefulSetStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]StatefulSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "StatefulSetCondition", "StatefulSetCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&StatefulSetStatus{`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, @@ -2255,7 +3413,7 @@ func (this *StatefulSetStatus) String() string { `CurrentRevision:` + fmt.Sprintf("%v", this.CurrentRevision) + `,`, `UpdateRevision:` + fmt.Sprintf("%v", this.UpdateRevision) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "StatefulSetCondition", "StatefulSetCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `}`, }, "") return s @@ -2266,7 +3424,7 @@ func (this *StatefulSetUpdateStrategy) String() string { } s := strings.Join([]string{`&StatefulSetUpdateStrategy{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateStatefulSetStrategy", "RollingUpdateStatefulSetStrategy", 1) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateStatefulSetStrategy", "RollingUpdateStatefulSetStrategy", 1) + `,`, `}`, }, "") return s @@ -2294,7 +3452,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2322,7 +3480,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2331,6 +3489,9 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2352,7 +3513,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2361,6 +3522,9 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2382,7 +3546,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Revision |= (int64(b) & 0x7F) << shift + m.Revision |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -2396,6 +3560,9 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2423,7 +3590,7 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2451,7 +3618,7 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2460,6 +3627,9 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2481,7 +3651,7 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2490,6 +3660,9 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2507,6 +3680,9 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2534,7 +3710,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2562,7 +3738,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2571,6 +3747,9 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2592,7 +3771,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2601,6 +3780,9 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2622,7 +3804,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2631,6 +3813,9 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2647,6 +3832,9 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2674,7 +3862,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2702,7 +3890,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2712,6 +3900,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2731,7 +3922,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2741,6 +3932,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2760,7 +3954,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2769,6 +3963,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2790,7 +3987,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2800,6 +3997,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2819,7 +4019,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2829,6 +4029,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2843,6 +4046,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2870,7 +4076,7 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2898,7 +4104,7 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2907,6 +4113,9 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2928,7 +4137,7 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2937,6 +4146,9 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2954,6 +4166,9 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2981,7 +4196,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3009,7 +4224,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3018,11 +4233,14 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3042,7 +4260,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3051,6 +4269,9 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3072,7 +4293,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3081,6 +4302,9 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3102,7 +4326,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift + m.MinReadySeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3121,7 +4345,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3136,6 +4360,9 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3163,7 +4390,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3191,7 +4418,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CurrentNumberScheduled |= (int32(b) & 0x7F) << shift + m.CurrentNumberScheduled |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3210,7 +4437,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberMisscheduled |= (int32(b) & 0x7F) << shift + m.NumberMisscheduled |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3229,7 +4456,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.DesiredNumberScheduled |= (int32(b) & 0x7F) << shift + m.DesiredNumberScheduled |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3248,7 +4475,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberReady |= (int32(b) & 0x7F) << shift + m.NumberReady |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3267,7 +4494,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -3286,7 +4513,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedNumberScheduled |= (int32(b) & 0x7F) << shift + m.UpdatedNumberScheduled |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3305,7 +4532,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberAvailable |= (int32(b) & 0x7F) << shift + m.NumberAvailable |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3324,7 +4551,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberUnavailable |= (int32(b) & 0x7F) << shift + m.NumberUnavailable |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3343,7 +4570,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3363,7 +4590,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3372,6 +4599,9 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3389,6 +4619,9 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3416,7 +4649,7 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3444,7 +4677,7 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3454,6 +4687,9 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3473,7 +4709,7 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3482,6 +4718,9 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3501,6 +4740,9 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3528,7 +4770,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3556,7 +4798,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3565,6 +4807,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3586,7 +4831,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3595,6 +4840,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3616,7 +4864,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3625,6 +4873,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3641,6 +4892,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3668,7 +4922,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3696,7 +4950,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3706,6 +4960,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3725,7 +4982,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3735,6 +4992,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3754,7 +5014,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3764,6 +5024,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3783,7 +5046,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3793,6 +5056,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3812,7 +5078,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3821,6 +5087,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3842,7 +5111,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3851,6 +5120,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3867,6 +5139,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3894,7 +5169,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3922,7 +5197,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3931,6 +5206,9 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3952,7 +5230,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3961,6 +5239,9 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3978,6 +5259,9 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4005,7 +5289,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4033,7 +5317,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4053,7 +5337,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4062,11 +5346,14 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -4086,7 +5373,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4095,6 +5382,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4116,7 +5406,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4125,6 +5415,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4146,7 +5439,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift + m.MinReadySeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4165,7 +5458,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4185,7 +5478,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4205,7 +5498,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4220,6 +5513,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4247,7 +5543,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4275,7 +5571,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -4294,7 +5590,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4313,7 +5609,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedReplicas |= (int32(b) & 0x7F) << shift + m.UpdatedReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4332,7 +5628,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AvailableReplicas |= (int32(b) & 0x7F) << shift + m.AvailableReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4351,7 +5647,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UnavailableReplicas |= (int32(b) & 0x7F) << shift + m.UnavailableReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4370,7 +5666,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4379,6 +5675,9 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4401,7 +5700,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift + m.ReadyReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4420,7 +5719,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4435,6 +5734,9 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4462,7 +5764,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4490,7 +5792,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4500,6 +5802,9 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4519,7 +5824,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4528,6 +5833,9 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4547,6 +5855,9 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4574,7 +5885,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4602,7 +5913,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4611,6 +5922,9 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4632,7 +5946,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4641,6 +5955,9 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4662,7 +5979,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4671,6 +5988,9 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4687,6 +6007,9 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4714,7 +6037,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4742,7 +6065,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4752,6 +6075,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4771,7 +6097,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4781,6 +6107,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4800,7 +6129,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4809,6 +6138,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4830,7 +6162,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4840,6 +6172,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4859,7 +6194,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4869,6 +6204,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4883,6 +6221,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4910,7 +6251,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4938,7 +6279,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4947,6 +6288,9 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4968,7 +6312,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4977,6 +6321,9 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4994,6 +6341,9 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5021,7 +6371,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5049,7 +6399,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5069,7 +6419,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5078,11 +6428,14 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -5102,7 +6455,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5111,6 +6464,9 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5132,7 +6488,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift + m.MinReadySeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5146,6 +6502,9 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5173,7 +6532,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5201,7 +6560,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5220,7 +6579,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.FullyLabeledReplicas |= (int32(b) & 0x7F) << shift + m.FullyLabeledReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5239,7 +6598,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -5258,7 +6617,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift + m.ReadyReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5277,7 +6636,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AvailableReplicas |= (int32(b) & 0x7F) << shift + m.AvailableReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5296,7 +6655,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5305,6 +6664,9 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5322,6 +6684,9 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5349,7 +6714,7 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5377,7 +6742,7 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5386,11 +6751,14 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxUnavailable == nil { - m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.MaxUnavailable = &intstr.IntOrString{} } if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -5405,6 +6773,9 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5432,7 +6803,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5460,7 +6831,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5469,11 +6840,14 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxUnavailable == nil { - m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.MaxUnavailable = &intstr.IntOrString{} } if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -5493,7 +6867,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5502,11 +6876,14 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxSurge == nil { - m.MaxSurge = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.MaxSurge = &intstr.IntOrString{} } if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -5521,6 +6898,9 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5548,7 +6928,7 @@ func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5576,7 +6956,7 @@ func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5591,6 +6971,9 @@ func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5618,7 +7001,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5646,7 +7029,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5655,6 +7038,9 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5676,7 +7062,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5685,6 +7071,9 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5706,7 +7095,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5715,6 +7104,9 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5731,6 +7123,9 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5758,7 +7153,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5786,7 +7181,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5796,6 +7191,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5815,7 +7213,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5825,6 +7223,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5844,7 +7245,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5853,6 +7254,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5874,7 +7278,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5884,6 +7288,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5903,7 +7310,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5913,6 +7320,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5927,6 +7337,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5954,7 +7367,7 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5982,7 +7395,7 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5991,6 +7404,9 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6012,7 +7428,7 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6021,6 +7437,9 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6038,6 +7457,9 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6065,7 +7487,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6093,7 +7515,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6113,7 +7535,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6122,11 +7544,14 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -6146,7 +7571,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6155,6 +7580,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6176,7 +7604,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6185,10 +7613,13 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, k8s_io_api_core_v1.PersistentVolumeClaim{}) + m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, v11.PersistentVolumeClaim{}) if err := m.VolumeClaimTemplates[len(m.VolumeClaimTemplates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -6207,7 +7638,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6217,6 +7648,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6236,7 +7670,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6246,6 +7680,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6265,7 +7702,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6274,6 +7711,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6295,7 +7735,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6310,6 +7750,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6337,7 +7780,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6365,7 +7808,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -6384,7 +7827,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6403,7 +7846,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift + m.ReadyReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6422,7 +7865,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CurrentReplicas |= (int32(b) & 0x7F) << shift + m.CurrentReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6441,7 +7884,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedReplicas |= (int32(b) & 0x7F) << shift + m.UpdatedReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6460,7 +7903,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6470,6 +7913,9 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6489,7 +7935,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6499,6 +7945,9 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6518,7 +7967,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6538,7 +7987,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6547,6 +7996,9 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6564,6 +8016,9 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6591,7 +8046,7 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6619,7 +8074,7 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6629,6 +8084,9 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6648,7 +8106,7 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6657,6 +8115,9 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6676,6 +8137,9 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6742,10 +8206,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -6774,6 +8241,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -6792,139 +8262,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apps/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 2037 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x6f, 0x24, 0x47, - 0x1d, 0x75, 0xcf, 0x87, 0x3d, 0x2e, 0xaf, 0xed, 0xdd, 0xb2, 0xb1, 0x27, 0xbb, 0x64, 0x66, 0x19, - 0x60, 0xe3, 0x64, 0xb3, 0x3d, 0xec, 0x66, 0x13, 0xa1, 0x2c, 0x02, 0x79, 0xc6, 0x21, 0x84, 0x78, - 0x6c, 0x53, 0x5e, 0xef, 0x61, 0x09, 0x12, 0xe5, 0xe9, 0xda, 0x71, 0xc7, 0xfd, 0xa5, 0xee, 0xea, - 0x61, 0x47, 0x5c, 0x10, 0x12, 0x9c, 0x38, 0xf0, 0x9f, 0x20, 0x84, 0xe0, 0x86, 0x22, 0xc4, 0x65, - 0x2f, 0x48, 0x11, 0x17, 0x72, 0xb2, 0xd8, 0xc9, 0x09, 0xa1, 0x1c, 0xb9, 0xe4, 0x02, 0xaa, 0xea, - 0xea, 0xef, 0x6a, 0xcf, 0xd8, 0x9b, 0x75, 0x50, 0x94, 0x9b, 0xa7, 0xea, 0xfd, 0x5e, 0xff, 0xaa, - 0xea, 0x57, 0xf5, 0x5e, 0x57, 0x1b, 0xdc, 0x3b, 0xfe, 0xb6, 0xa7, 0xea, 0x76, 0xfb, 0xd8, 0x3f, - 0x24, 0xae, 0x45, 0x28, 0xf1, 0xda, 0x43, 0x62, 0x69, 0xb6, 0xdb, 0x16, 0x1d, 0xd8, 0xd1, 0xdb, - 0xd8, 0x71, 0xbc, 0xf6, 0xf0, 0x76, 0x7b, 0x40, 0x2c, 0xe2, 0x62, 0x4a, 0x34, 0xd5, 0x71, 0x6d, - 0x6a, 0x43, 0x18, 0x60, 0x54, 0xec, 0xe8, 0x2a, 0xc3, 0xa8, 0xc3, 0xdb, 0x57, 0x6f, 0x0d, 0x74, - 0x7a, 0xe4, 0x1f, 0xaa, 0x7d, 0xdb, 0x6c, 0x0f, 0xec, 0x81, 0xdd, 0xe6, 0xd0, 0x43, 0xff, 0x11, - 0xff, 0xc5, 0x7f, 0xf0, 0xbf, 0x02, 0x8a, 0xab, 0xad, 0xc4, 0x63, 0xfa, 0xb6, 0x4b, 0x24, 0x8f, - 0xb9, 0x7a, 0x37, 0xc6, 0x98, 0xb8, 0x7f, 0xa4, 0x5b, 0xc4, 0x1d, 0xb5, 0x9d, 0xe3, 0x01, 0x6b, - 0xf0, 0xda, 0x26, 0xa1, 0x58, 0x16, 0xd5, 0x2e, 0x8a, 0x72, 0x7d, 0x8b, 0xea, 0x26, 0xc9, 0x05, - 0xbc, 0x31, 0x29, 0xc0, 0xeb, 0x1f, 0x11, 0x13, 0xe7, 0xe2, 0x5e, 0x2b, 0x8a, 0xf3, 0xa9, 0x6e, - 0xb4, 0x75, 0x8b, 0x7a, 0xd4, 0xcd, 0x06, 0xb5, 0xfe, 0xa3, 0x00, 0xd8, 0xb5, 0x2d, 0xea, 0xda, - 0x86, 0x41, 0x5c, 0x44, 0x86, 0xba, 0xa7, 0xdb, 0x16, 0xfc, 0x29, 0xa8, 0xb1, 0xf1, 0x68, 0x98, - 0xe2, 0xba, 0x72, 0x5d, 0xd9, 0x58, 0xb8, 0xf3, 0x2d, 0x35, 0x9e, 0xe4, 0x88, 0x5e, 0x75, 0x8e, - 0x07, 0xac, 0xc1, 0x53, 0x19, 0x5a, 0x1d, 0xde, 0x56, 0x77, 0x0f, 0xdf, 0x27, 0x7d, 0xda, 0x23, - 0x14, 0x77, 0xe0, 0x93, 0x93, 0xe6, 0xcc, 0xf8, 0xa4, 0x09, 0xe2, 0x36, 0x14, 0xb1, 0xc2, 0x5d, - 0x50, 0xe1, 0xec, 0x25, 0xce, 0x7e, 0xab, 0x90, 0x5d, 0x0c, 0x5a, 0x45, 0xf8, 0x67, 0x6f, 0x3d, - 0xa6, 0xc4, 0x62, 0xe9, 0x75, 0x2e, 0x09, 0xea, 0xca, 0x16, 0xa6, 0x18, 0x71, 0x22, 0xf8, 0x2a, - 0xa8, 0xb9, 0x22, 0xfd, 0x7a, 0xf9, 0xba, 0xb2, 0x51, 0xee, 0x5c, 0x16, 0xa8, 0x5a, 0x38, 0x2c, - 0x14, 0x21, 0x5a, 0x7f, 0x55, 0xc0, 0x5a, 0x7e, 0xdc, 0xdb, 0xba, 0x47, 0xe1, 0x7b, 0xb9, 0xb1, - 0xab, 0xd3, 0x8d, 0x9d, 0x45, 0xf3, 0x91, 0x47, 0x0f, 0x0e, 0x5b, 0x12, 0xe3, 0x7e, 0x17, 0x54, - 0x75, 0x4a, 0x4c, 0xaf, 0x5e, 0xba, 0x5e, 0xde, 0x58, 0xb8, 0x73, 0x43, 0xcd, 0xd7, 0xae, 0x9a, - 0x4f, 0xac, 0xb3, 0x28, 0x28, 0xab, 0xef, 0xb0, 0x60, 0x14, 0x70, 0xb4, 0xfe, 0xab, 0x80, 0xf9, - 0x2d, 0x4c, 0x4c, 0xdb, 0xda, 0x27, 0xf4, 0x02, 0x16, 0xad, 0x0b, 0x2a, 0x9e, 0x43, 0xfa, 0x62, - 0xd1, 0xbe, 0x26, 0xcb, 0x3d, 0x4a, 0x67, 0xdf, 0x21, 0xfd, 0x78, 0xa1, 0xd8, 0x2f, 0xc4, 0x83, - 0xe1, 0xbb, 0x60, 0xd6, 0xa3, 0x98, 0xfa, 0x1e, 0x5f, 0xa6, 0x85, 0x3b, 0x5f, 0x3f, 0x9d, 0x86, - 0x43, 0x3b, 0x4b, 0x82, 0x68, 0x36, 0xf8, 0x8d, 0x04, 0x45, 0xeb, 0x5f, 0x25, 0x00, 0x23, 0x6c, - 0xd7, 0xb6, 0x34, 0x9d, 0xb2, 0xfa, 0x7d, 0x13, 0x54, 0xe8, 0xc8, 0x21, 0x7c, 0x1a, 0xe6, 0x3b, - 0x37, 0xc2, 0x2c, 0xee, 0x8f, 0x1c, 0xf2, 0xe9, 0x49, 0x73, 0x2d, 0x1f, 0xc1, 0x7a, 0x10, 0x8f, - 0x81, 0xdb, 0x51, 0x7e, 0x25, 0x1e, 0x7d, 0x37, 0xfd, 0xe8, 0x4f, 0x4f, 0x9a, 0x92, 0xc3, 0x42, - 0x8d, 0x98, 0xd2, 0x09, 0xc2, 0x21, 0x80, 0x06, 0xf6, 0xe8, 0x7d, 0x17, 0x5b, 0x5e, 0xf0, 0x24, - 0xdd, 0x24, 0x62, 0xe4, 0xaf, 0x4c, 0xb7, 0x3c, 0x2c, 0xa2, 0x73, 0x55, 0x64, 0x01, 0xb7, 0x73, - 0x6c, 0x48, 0xf2, 0x04, 0x78, 0x03, 0xcc, 0xba, 0x04, 0x7b, 0xb6, 0x55, 0xaf, 0xf0, 0x51, 0x44, - 0x13, 0x88, 0x78, 0x2b, 0x12, 0xbd, 0xf0, 0x65, 0x30, 0x67, 0x12, 0xcf, 0xc3, 0x03, 0x52, 0xaf, - 0x72, 0xe0, 0xb2, 0x00, 0xce, 0xf5, 0x82, 0x66, 0x14, 0xf6, 0xb7, 0x7e, 0xaf, 0x80, 0xc5, 0x68, - 0xe6, 0x2e, 0x60, 0xab, 0x74, 0xd2, 0x5b, 0xe5, 0xc5, 0x53, 0xeb, 0xa4, 0x60, 0x87, 0x7c, 0x50, - 0x4e, 0xe4, 0xcc, 0x8a, 0x10, 0xfe, 0x04, 0xd4, 0x3c, 0x62, 0x90, 0x3e, 0xb5, 0x5d, 0x91, 0xf3, - 0x6b, 0x53, 0xe6, 0x8c, 0x0f, 0x89, 0xb1, 0x2f, 0x42, 0x3b, 0x97, 0x58, 0xd2, 0xe1, 0x2f, 0x14, - 0x51, 0xc2, 0x1f, 0x81, 0x1a, 0x25, 0xa6, 0x63, 0x60, 0x4a, 0xc4, 0x36, 0x49, 0xd5, 0x37, 0x2b, - 0x17, 0x46, 0xb6, 0x67, 0x6b, 0xf7, 0x05, 0x8c, 0x6f, 0x94, 0x68, 0x1e, 0xc2, 0x56, 0x14, 0xd1, - 0xc0, 0x63, 0xb0, 0xe4, 0x3b, 0x1a, 0x43, 0x52, 0x76, 0x74, 0x0f, 0x46, 0xa2, 0x7c, 0x6e, 0x9e, - 0x3a, 0x21, 0x07, 0xa9, 0x90, 0xce, 0x9a, 0x78, 0xc0, 0x52, 0xba, 0x1d, 0x65, 0xa8, 0xe1, 0x26, - 0x58, 0x36, 0x75, 0x0b, 0x11, 0xac, 0x8d, 0xf6, 0x49, 0xdf, 0xb6, 0x34, 0x8f, 0x17, 0x50, 0xb5, - 0xb3, 0x2e, 0x08, 0x96, 0x7b, 0xe9, 0x6e, 0x94, 0xc5, 0xc3, 0x6d, 0xb0, 0x1a, 0x9e, 0xb3, 0x3f, - 0xd0, 0x3d, 0x6a, 0xbb, 0xa3, 0x6d, 0xdd, 0xd4, 0x69, 0x7d, 0x96, 0xf3, 0xd4, 0xc7, 0x27, 0xcd, - 0x55, 0x24, 0xe9, 0x47, 0xd2, 0xa8, 0xd6, 0x6f, 0x66, 0xc1, 0x72, 0xe6, 0x34, 0x80, 0x0f, 0xc0, - 0x5a, 0xdf, 0x77, 0x5d, 0x62, 0xd1, 0x1d, 0xdf, 0x3c, 0x24, 0xee, 0x7e, 0xff, 0x88, 0x68, 0xbe, - 0x41, 0x34, 0xbe, 0xa2, 0xd5, 0x4e, 0x43, 0xe4, 0xba, 0xd6, 0x95, 0xa2, 0x50, 0x41, 0x34, 0xfc, - 0x21, 0x80, 0x16, 0x6f, 0xea, 0xe9, 0x9e, 0x17, 0x71, 0x96, 0x38, 0x67, 0xb4, 0x01, 0x77, 0x72, - 0x08, 0x24, 0x89, 0x62, 0x39, 0x6a, 0xc4, 0xd3, 0x5d, 0xa2, 0x65, 0x73, 0x2c, 0xa7, 0x73, 0xdc, - 0x92, 0xa2, 0x50, 0x41, 0x34, 0x7c, 0x1d, 0x2c, 0x04, 0x4f, 0xe3, 0x73, 0x2e, 0x16, 0x67, 0x45, - 0x90, 0x2d, 0xec, 0xc4, 0x5d, 0x28, 0x89, 0x63, 0x43, 0xb3, 0x0f, 0x3d, 0xe2, 0x0e, 0x89, 0xf6, - 0x76, 0xe0, 0x01, 0x98, 0x50, 0x56, 0xb9, 0x50, 0x46, 0x43, 0xdb, 0xcd, 0x21, 0x90, 0x24, 0x8a, - 0x0d, 0x2d, 0xa8, 0x9a, 0xdc, 0xd0, 0x66, 0xd3, 0x43, 0x3b, 0x90, 0xa2, 0x50, 0x41, 0x34, 0xab, - 0xbd, 0x20, 0xe5, 0xcd, 0x21, 0xd6, 0x0d, 0x7c, 0x68, 0x90, 0xfa, 0x5c, 0xba, 0xf6, 0x76, 0xd2, - 0xdd, 0x28, 0x8b, 0x87, 0x6f, 0x83, 0x2b, 0x41, 0xd3, 0x81, 0x85, 0x23, 0x92, 0x1a, 0x27, 0x79, - 0x41, 0x90, 0x5c, 0xd9, 0xc9, 0x02, 0x50, 0x3e, 0x06, 0xbe, 0x09, 0x96, 0xfa, 0xb6, 0x61, 0xf0, - 0x7a, 0xec, 0xda, 0xbe, 0x45, 0xeb, 0xf3, 0x9c, 0x05, 0xb2, 0x3d, 0xd4, 0x4d, 0xf5, 0xa0, 0x0c, - 0x12, 0x3e, 0x04, 0xa0, 0x1f, 0xca, 0x81, 0x57, 0x07, 0xc5, 0x42, 0x9f, 0xd7, 0xa1, 0x58, 0x80, - 0xa3, 0x26, 0x0f, 0x25, 0xd8, 0x5a, 0x1f, 0x28, 0x60, 0xbd, 0x60, 0x8f, 0xc3, 0xef, 0xa5, 0x54, - 0xef, 0x66, 0x46, 0xf5, 0xae, 0x15, 0x84, 0x25, 0xa4, 0xaf, 0x0f, 0x16, 0x99, 0xef, 0xd0, 0xad, - 0x41, 0x00, 0x11, 0x27, 0xd8, 0x2b, 0xb2, 0xdc, 0x51, 0x12, 0x18, 0x1f, 0xc3, 0x57, 0xc6, 0x27, - 0xcd, 0xc5, 0x54, 0x1f, 0x4a, 0x73, 0xb6, 0x7e, 0x59, 0x02, 0x60, 0x8b, 0x38, 0x86, 0x3d, 0x32, - 0x89, 0x75, 0x11, 0xae, 0x65, 0x2b, 0xe5, 0x5a, 0x5a, 0xd2, 0x85, 0x88, 0xf2, 0x29, 0xb4, 0x2d, - 0xdb, 0x19, 0xdb, 0xf2, 0x8d, 0x09, 0x3c, 0xa7, 0xfb, 0x96, 0x7f, 0x94, 0xc1, 0x4a, 0x0c, 0x8e, - 0x8d, 0xcb, 0xbd, 0xd4, 0x12, 0xbe, 0x94, 0x59, 0xc2, 0x75, 0x49, 0xc8, 0x73, 0x73, 0x2e, 0x9f, - 0xbd, 0x83, 0x80, 0xef, 0x83, 0x25, 0x66, 0x55, 0x82, 0x42, 0xe0, 0x46, 0x68, 0xf6, 0xcc, 0x46, - 0x28, 0x12, 0xb2, 0xed, 0x14, 0x13, 0xca, 0x30, 0x17, 0x18, 0xaf, 0xb9, 0xe7, 0x6d, 0xbc, 0x5a, - 0x7f, 0x50, 0xc0, 0x52, 0xbc, 0x4c, 0x17, 0x60, 0x93, 0xba, 0x69, 0x9b, 0xd4, 0x38, 0xbd, 0x2e, - 0x0b, 0x7c, 0xd2, 0xdf, 0x2b, 0xc9, 0xac, 0xb9, 0x51, 0xda, 0x60, 0x2f, 0x54, 0x8e, 0xa1, 0xf7, - 0xb1, 0x27, 0x64, 0xf5, 0x52, 0xf0, 0x32, 0x15, 0xb4, 0xa1, 0xa8, 0x37, 0x65, 0xa9, 0x4a, 0xcf, - 0xd7, 0x52, 0x95, 0x3f, 0x1b, 0x4b, 0x75, 0x1f, 0xd4, 0xbc, 0xd0, 0x4c, 0x55, 0x38, 0xe5, 0x8d, - 0x49, 0xdb, 0x59, 0xf8, 0xa8, 0x88, 0x35, 0x72, 0x50, 0x11, 0x93, 0xcc, 0x3b, 0x55, 0x3f, 0x4f, - 0xef, 0xc4, 0xb6, 0xb0, 0x83, 0x7d, 0x8f, 0x68, 0xbc, 0xee, 0x6b, 0xf1, 0x16, 0xde, 0xe3, 0xad, - 0x48, 0xf4, 0xc2, 0x03, 0xb0, 0xee, 0xb8, 0xf6, 0xc0, 0x25, 0x9e, 0xb7, 0x45, 0xb0, 0x66, 0xe8, - 0x16, 0x09, 0x07, 0x10, 0xa8, 0xde, 0xb5, 0xf1, 0x49, 0x73, 0x7d, 0x4f, 0x0e, 0x41, 0x45, 0xb1, - 0xad, 0x3f, 0x57, 0xc0, 0xe5, 0xec, 0x89, 0x58, 0x60, 0x44, 0x94, 0x73, 0x19, 0x91, 0x57, 0x13, - 0x25, 0x1a, 0xb8, 0xb4, 0xc4, 0x3b, 0x7f, 0xae, 0x4c, 0x37, 0xc1, 0xb2, 0x30, 0x1e, 0x61, 0xa7, - 0xb0, 0x62, 0xd1, 0xf2, 0x1c, 0xa4, 0xbb, 0x51, 0x16, 0xcf, 0xec, 0x45, 0xec, 0x1a, 0x42, 0x92, - 0x4a, 0xda, 0x5e, 0x6c, 0x66, 0x01, 0x28, 0x1f, 0x03, 0x7b, 0x60, 0xc5, 0xb7, 0xf2, 0x54, 0x41, - 0xb9, 0x5c, 0x13, 0x54, 0x2b, 0x07, 0x79, 0x08, 0x92, 0xc5, 0xc1, 0x1f, 0xa7, 0x1c, 0xc7, 0x2c, - 0x3f, 0x08, 0x5e, 0x3a, 0xbd, 0xa2, 0xa7, 0xb6, 0x1c, 0xf0, 0x1e, 0x58, 0x74, 0xb9, 0xa1, 0x0c, - 0xb3, 0x0c, 0x4c, 0xd9, 0x57, 0x44, 0xd8, 0x22, 0x4a, 0x76, 0xa2, 0x34, 0x56, 0xe2, 0xa3, 0x6a, - 0xd3, 0xfa, 0xa8, 0xd6, 0x9f, 0x14, 0x00, 0xf3, 0x5b, 0x70, 0xe2, 0xcb, 0x7d, 0x2e, 0x22, 0x21, - 0x91, 0x9a, 0xdc, 0xe1, 0xdc, 0x9c, 0xec, 0x70, 0xe2, 0x13, 0x74, 0x3a, 0x8b, 0x23, 0x66, 0xe0, - 0x62, 0x2e, 0x66, 0xa6, 0xb0, 0x38, 0x71, 0x3e, 0xcf, 0x66, 0x71, 0x12, 0x3c, 0xa7, 0x5b, 0x9c, - 0x7f, 0x97, 0xc0, 0x4a, 0x0c, 0x9e, 0xda, 0xe2, 0x48, 0x42, 0xbe, 0xbc, 0x9c, 0x99, 0x7c, 0x39, - 0xc3, 0x6c, 0x47, 0x3c, 0x75, 0xff, 0x27, 0xb6, 0x23, 0x4e, 0xa8, 0xc0, 0x76, 0xfc, 0xae, 0x94, - 0xcc, 0xfa, 0x0b, 0x6f, 0x3b, 0x9e, 0xfd, 0x72, 0xa5, 0xf5, 0x97, 0x32, 0xb8, 0x9c, 0xdd, 0x82, - 0x29, 0x1d, 0x54, 0x26, 0xea, 0xe0, 0x1e, 0x58, 0x7d, 0xe4, 0x1b, 0xc6, 0x88, 0x4f, 0x43, 0x42, - 0x0c, 0x03, 0x05, 0xfd, 0xaa, 0x88, 0x5c, 0xfd, 0xbe, 0x04, 0x83, 0xa4, 0x91, 0x05, 0x9a, 0x5e, - 0x3e, 0x97, 0xa6, 0xe7, 0xd4, 0xa6, 0x72, 0x06, 0xb5, 0x91, 0xea, 0x73, 0xf5, 0x1c, 0xfa, 0x3c, - 0xb5, 0xa0, 0x4a, 0x8e, 0xab, 0x89, 0xef, 0xf0, 0xbf, 0x56, 0xc0, 0x9a, 0xfc, 0xf5, 0x19, 0x1a, - 0x60, 0xc9, 0xc4, 0x8f, 0x93, 0x97, 0x17, 0x93, 0x04, 0xc3, 0xa7, 0xba, 0xa1, 0x06, 0x5f, 0x77, - 0xd4, 0x77, 0x2c, 0xba, 0xeb, 0xee, 0x53, 0x57, 0xb7, 0x06, 0x81, 0xc0, 0xf6, 0x52, 0x5c, 0x28, - 0xc3, 0xdd, 0xfa, 0x58, 0x01, 0xeb, 0x05, 0x2a, 0x77, 0xb1, 0x99, 0xc0, 0x87, 0xa0, 0x66, 0xe2, - 0xc7, 0xfb, 0xbe, 0x3b, 0x08, 0x25, 0xf9, 0xec, 0xcf, 0xe1, 0x1b, 0xb9, 0x27, 0x58, 0x50, 0xc4, - 0xd7, 0xda, 0x05, 0xd7, 0x53, 0x83, 0x64, 0x9b, 0x86, 0x3c, 0xf2, 0x0d, 0xbe, 0x7f, 0x84, 0xa7, - 0xb8, 0x09, 0xe6, 0x1d, 0xec, 0x52, 0x3d, 0x32, 0xa3, 0xd5, 0xce, 0xe2, 0xf8, 0xa4, 0x39, 0xbf, - 0x17, 0x36, 0xa2, 0xb8, 0xbf, 0xf5, 0xab, 0x12, 0x58, 0x48, 0x90, 0x5c, 0x80, 0xbe, 0xbf, 0x95, - 0xd2, 0x77, 0xe9, 0x17, 0x93, 0xe4, 0xa8, 0x8a, 0x04, 0xbe, 0x97, 0x11, 0xf8, 0x6f, 0x4e, 0x22, - 0x3a, 0x5d, 0xe1, 0x3f, 0x29, 0x81, 0xd5, 0x04, 0x3a, 0x96, 0xf8, 0xef, 0xa4, 0x24, 0x7e, 0x23, - 0x23, 0xf1, 0x75, 0x59, 0xcc, 0x97, 0x1a, 0x3f, 0x59, 0xe3, 0xff, 0xa8, 0x80, 0xe5, 0xc4, 0xdc, - 0x5d, 0x80, 0xc8, 0x6f, 0xa5, 0x45, 0xbe, 0x39, 0xa1, 0x5e, 0x0a, 0x54, 0xfe, 0x49, 0x35, 0x95, - 0xf7, 0x17, 0x5e, 0xe6, 0x7f, 0x0e, 0x56, 0x87, 0xb6, 0xe1, 0x9b, 0xa4, 0x6b, 0x60, 0xdd, 0x0c, - 0x01, 0x4c, 0xc9, 0xd8, 0x24, 0xbe, 0x2c, 0xa5, 0x27, 0xae, 0xa7, 0x7b, 0x94, 0x58, 0xf4, 0x41, - 0x1c, 0x19, 0x6b, 0xf1, 0x03, 0x09, 0x1d, 0x92, 0x3e, 0x04, 0xbe, 0x0e, 0x16, 0x98, 0xa6, 0xea, - 0x7d, 0xb2, 0x83, 0xcd, 0xb0, 0xa6, 0xa2, 0xef, 0x03, 0xfb, 0x71, 0x17, 0x4a, 0xe2, 0xe0, 0x11, - 0x58, 0x71, 0x6c, 0xad, 0x87, 0x2d, 0x3c, 0x20, 0xec, 0xfc, 0xdf, 0xb3, 0x0d, 0xbd, 0x3f, 0xe2, - 0xf7, 0x0e, 0xf3, 0x9d, 0x37, 0xc2, 0x17, 0xd2, 0xbd, 0x3c, 0x84, 0x79, 0x76, 0x49, 0x33, 0xdf, - 0xcf, 0x32, 0x4a, 0x68, 0xe6, 0x3e, 0x67, 0xcd, 0xe5, 0xfe, 0x07, 0x40, 0x56, 0x5c, 0xe7, 0xfc, - 0xa0, 0x55, 0x74, 0xa3, 0x52, 0x3b, 0xd7, 0xd7, 0xa8, 0x4f, 0x2a, 0xe0, 0x4a, 0xee, 0x80, 0xfc, - 0x1c, 0xef, 0x34, 0x72, 0x6e, 0xa9, 0x7c, 0x06, 0xb7, 0xb4, 0x09, 0x96, 0xc5, 0x87, 0xb0, 0x8c, - 0xd9, 0x8a, 0xec, 0x68, 0x37, 0xdd, 0x8d, 0xb2, 0x78, 0xd9, 0x9d, 0x4a, 0xf5, 0x8c, 0x77, 0x2a, - 0xc9, 0x2c, 0xc4, 0xff, 0x6f, 0x04, 0x55, 0x97, 0xcf, 0x42, 0xfc, 0x1b, 0x47, 0x16, 0x0f, 0xbf, - 0x1b, 0x96, 0x54, 0xc4, 0x30, 0xc7, 0x19, 0x32, 0x35, 0x12, 0x11, 0x64, 0xd0, 0xcf, 0xf4, 0xb1, - 0xe7, 0x3d, 0xc9, 0xc7, 0x9e, 0x8d, 0x09, 0xa5, 0x3c, 0xbd, 0x55, 0xfc, 0x9b, 0x02, 0x5e, 0x28, - 0xdc, 0x03, 0x70, 0x33, 0xa5, 0xb3, 0xb7, 0x32, 0x3a, 0xfb, 0x62, 0x61, 0x60, 0x42, 0x6c, 0x4d, - 0xf9, 0x85, 0xc8, 0xdd, 0x89, 0x17, 0x22, 0x12, 0x17, 0x35, 0xf9, 0x66, 0xa4, 0xb3, 0xf1, 0xe4, - 0x69, 0x63, 0xe6, 0xc3, 0xa7, 0x8d, 0x99, 0x8f, 0x9e, 0x36, 0x66, 0x7e, 0x31, 0x6e, 0x28, 0x4f, - 0xc6, 0x0d, 0xe5, 0xc3, 0x71, 0x43, 0xf9, 0x68, 0xdc, 0x50, 0xfe, 0x39, 0x6e, 0x28, 0xbf, 0xfd, - 0xb8, 0x31, 0xf3, 0xb0, 0x34, 0xbc, 0xfd, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x6b, 0x01, - 0x7b, 0x12, 0x26, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/apps/v1/generated.proto b/vendor/k8s.io/api/apps/v1/generated.proto index fea81922f3b6b..6c55279745761 100644 --- a/vendor/k8s.io/api/apps/v1/generated.proto +++ b/vendor/k8s.io/api/apps/v1/generated.proto @@ -41,7 +41,7 @@ option go_package = "v1"; // depend on its stability. It is primarily for internal use by controllers. message ControllerRevision { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -54,7 +54,7 @@ message ControllerRevision { // ControllerRevisionList is a resource containing a list of ControllerRevision objects. message ControllerRevisionList { - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -65,12 +65,12 @@ message ControllerRevisionList { // DaemonSet represents the configuration of a daemon set. message DaemonSet { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // The desired behavior of this daemon set. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional DaemonSetSpec spec = 2; @@ -78,7 +78,7 @@ message DaemonSet { // out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional DaemonSetStatus status = 3; } @@ -107,7 +107,7 @@ message DaemonSetCondition { // DaemonSetList is a collection of daemon sets. message DaemonSetList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -366,12 +366,12 @@ message DeploymentStrategy { message ReplicaSet { // If the Labels of a ReplicaSet are empty, they are defaulted to // be the same as the Pod(s) that the ReplicaSet manages. - // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the specification of the desired behavior of the ReplicaSet. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional ReplicaSetSpec spec = 2; @@ -379,7 +379,7 @@ message ReplicaSet { // This data may be out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional ReplicaSetStatus status = 3; } @@ -408,7 +408,7 @@ message ReplicaSetCondition { // ReplicaSetList is a collection of ReplicaSets. message ReplicaSetList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; diff --git a/vendor/k8s.io/api/apps/v1/types.go b/vendor/k8s.io/api/apps/v1/types.go index 2fe8574581211..e003a0c4f7c05 100644 --- a/vendor/k8s.io/api/apps/v1/types.go +++ b/vendor/k8s.io/api/apps/v1/types.go @@ -95,13 +95,13 @@ const ( // ordering constraints. When a scale operation is performed with this // strategy, new Pods will be created from the specification version indicated // by the StatefulSet's updateRevision. - RollingUpdateStatefulSetStrategyType = "RollingUpdate" + RollingUpdateStatefulSetStrategyType StatefulSetUpdateStrategyType = "RollingUpdate" // OnDeleteStatefulSetStrategyType triggers the legacy behavior. Version // tracking and ordered rolling restarts are disabled. Pods are recreated // from the StatefulSetSpec when they are manually deleted. When a scale // operation is performed with this strategy,specification version indicated // by the StatefulSet's currentRevision. - OnDeleteStatefulSetStrategyType = "OnDelete" + OnDeleteStatefulSetStrategyType StatefulSetUpdateStrategyType = "OnDelete" ) // RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. @@ -618,12 +618,12 @@ type DaemonSetCondition struct { type DaemonSet struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // The desired behavior of this daemon set. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec DaemonSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -631,7 +631,7 @@ type DaemonSet struct { // out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status DaemonSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -649,7 +649,7 @@ const ( type DaemonSetList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -668,12 +668,12 @@ type ReplicaSet struct { // If the Labels of a ReplicaSet are empty, they are defaulted to // be the same as the Pod(s) that the ReplicaSet manages. - // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the specification of the desired behavior of the ReplicaSet. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec ReplicaSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -681,7 +681,7 @@ type ReplicaSet struct { // This data may be out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status ReplicaSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -692,7 +692,7 @@ type ReplicaSet struct { type ReplicaSetList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -800,7 +800,7 @@ type ReplicaSetCondition struct { type ControllerRevision struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -817,7 +817,7 @@ type ControllerRevision struct { type ControllerRevisionList struct { metav1.TypeMeta `json:",inline"` - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go index 7e992c58469a1..3f0299d03335e 100644 --- a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go @@ -29,7 +29,7 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ControllerRevision = map[string]string{ "": "ControllerRevision implements an immutable snapshot of state data. Clients are responsible for serializing and deserializing the objects that contain their internal state. Once a ControllerRevision has been successfully created, it can not be updated. The API Server will fail validation of all requests that attempt to mutate the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, it may be subject to name and representation changes in future releases, and clients should not depend on its stability. It is primarily for internal use by controllers.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "data": "Data is the serialized representation of the state.", "revision": "Revision indicates the revision of the state represented by Data.", } @@ -40,7 +40,7 @@ func (ControllerRevision) SwaggerDoc() map[string]string { var map_ControllerRevisionList = map[string]string{ "": "ControllerRevisionList is a resource containing a list of ControllerRevision objects.", - "metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is the list of ControllerRevisions", } @@ -50,9 +50,9 @@ func (ControllerRevisionList) SwaggerDoc() map[string]string { var map_DaemonSet = map[string]string{ "": "DaemonSet represents the configuration of a daemon set.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (DaemonSet) SwaggerDoc() map[string]string { @@ -74,7 +74,7 @@ func (DaemonSetCondition) SwaggerDoc() map[string]string { var map_DaemonSetList = map[string]string{ "": "DaemonSetList is a collection of daemon sets.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "A list of daemon sets.", } @@ -202,9 +202,9 @@ func (DeploymentStrategy) SwaggerDoc() map[string]string { var map_ReplicaSet = map[string]string{ "": "ReplicaSet ensures that a specified number of pod replicas are running at any given time.", - "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (ReplicaSet) SwaggerDoc() map[string]string { @@ -226,7 +226,7 @@ func (ReplicaSetCondition) SwaggerDoc() map[string]string { var map_ReplicaSetList = map[string]string{ "": "ReplicaSetList is a collection of ReplicaSets.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller", } diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.pb.go b/vendor/k8s.io/api/apps/v1beta1/generated.pb.go index d2d49187c0556..921e055cf8b6e 100644 --- a/vendor/k8s.io/api/apps/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/apps/v1beta1/generated.pb.go @@ -17,57 +17,25 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta1/generated.proto -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta1/generated.proto - - It has these top-level messages: - ControllerRevision - ControllerRevisionList - Deployment - DeploymentCondition - DeploymentList - DeploymentRollback - DeploymentSpec - DeploymentStatus - DeploymentStrategy - RollbackConfig - RollingUpdateDeployment - RollingUpdateStatefulSetStrategy - Scale - ScaleSpec - ScaleStatus - StatefulSet - StatefulSetCondition - StatefulSetList - StatefulSetSpec - StatefulSetStatus - StatefulSetUpdateStrategy -*/ package v1beta1 import ( fmt "fmt" - proto "github.com/gogo/protobuf/proto" - - math "math" - - k8s_io_api_core_v1 "k8s.io/api/core/v1" - - k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" + io "io" + proto "github.com/gogo/protobuf/proto" github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - strings "strings" - + math "math" + math_bits "math/bits" reflect "reflect" + strings "strings" - io "io" + intstr "k8s.io/apimachinery/pkg/util/intstr" ) // Reference imports to suppress errors if they are not otherwise used. @@ -81,96 +49,594 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *ControllerRevision) Reset() { *m = ControllerRevision{} } -func (*ControllerRevision) ProtoMessage() {} -func (*ControllerRevision) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *ControllerRevision) Reset() { *m = ControllerRevision{} } +func (*ControllerRevision) ProtoMessage() {} +func (*ControllerRevision) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{0} +} +func (m *ControllerRevision) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ControllerRevision) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ControllerRevision) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerRevision.Merge(m, src) +} +func (m *ControllerRevision) XXX_Size() int { + return m.Size() +} +func (m *ControllerRevision) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerRevision.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerRevision proto.InternalMessageInfo + +func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} } +func (*ControllerRevisionList) ProtoMessage() {} +func (*ControllerRevisionList) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{1} +} +func (m *ControllerRevisionList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ControllerRevisionList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ControllerRevisionList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerRevisionList.Merge(m, src) +} +func (m *ControllerRevisionList) XXX_Size() int { + return m.Size() +} +func (m *ControllerRevisionList) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerRevisionList.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerRevisionList proto.InternalMessageInfo + +func (m *Deployment) Reset() { *m = Deployment{} } +func (*Deployment) ProtoMessage() {} +func (*Deployment) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{2} +} +func (m *Deployment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Deployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Deployment) XXX_Merge(src proto.Message) { + xxx_messageInfo_Deployment.Merge(m, src) +} +func (m *Deployment) XXX_Size() int { + return m.Size() +} +func (m *Deployment) XXX_DiscardUnknown() { + xxx_messageInfo_Deployment.DiscardUnknown(m) +} + +var xxx_messageInfo_Deployment proto.InternalMessageInfo + +func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } +func (*DeploymentCondition) ProtoMessage() {} +func (*DeploymentCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{3} +} +func (m *DeploymentCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentCondition.Merge(m, src) +} +func (m *DeploymentCondition) XXX_Size() int { + return m.Size() +} +func (m *DeploymentCondition) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentCondition proto.InternalMessageInfo + +func (m *DeploymentList) Reset() { *m = DeploymentList{} } +func (*DeploymentList) ProtoMessage() {} +func (*DeploymentList) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{4} +} +func (m *DeploymentList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentList) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentList.Merge(m, src) +} +func (m *DeploymentList) XXX_Size() int { + return m.Size() +} +func (m *DeploymentList) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentList.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentList proto.InternalMessageInfo + +func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} } +func (*DeploymentRollback) ProtoMessage() {} +func (*DeploymentRollback) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{5} +} +func (m *DeploymentRollback) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentRollback) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentRollback) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentRollback.Merge(m, src) +} +func (m *DeploymentRollback) XXX_Size() int { + return m.Size() +} +func (m *DeploymentRollback) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentRollback.DiscardUnknown(m) +} -func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} } -func (*ControllerRevisionList) ProtoMessage() {} -func (*ControllerRevisionList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_DeploymentRollback proto.InternalMessageInfo -func (m *Deployment) Reset() { *m = Deployment{} } -func (*Deployment) ProtoMessage() {} -func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } +func (*DeploymentSpec) ProtoMessage() {} +func (*DeploymentSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{6} +} +func (m *DeploymentSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentSpec.Merge(m, src) +} +func (m *DeploymentSpec) XXX_Size() int { + return m.Size() +} +func (m *DeploymentSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentSpec.DiscardUnknown(m) +} -func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } -func (*DeploymentCondition) ProtoMessage() {} -func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_DeploymentSpec proto.InternalMessageInfo -func (m *DeploymentList) Reset() { *m = DeploymentList{} } -func (*DeploymentList) ProtoMessage() {} -func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } +func (*DeploymentStatus) ProtoMessage() {} +func (*DeploymentStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{7} +} +func (m *DeploymentStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentStatus.Merge(m, src) +} +func (m *DeploymentStatus) XXX_Size() int { + return m.Size() +} +func (m *DeploymentStatus) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentStatus.DiscardUnknown(m) +} -func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} } -func (*DeploymentRollback) ProtoMessage() {} -func (*DeploymentRollback) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_DeploymentStatus proto.InternalMessageInfo -func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } -func (*DeploymentSpec) ProtoMessage() {} -func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } +func (*DeploymentStrategy) ProtoMessage() {} +func (*DeploymentStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{8} +} +func (m *DeploymentStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentStrategy.Merge(m, src) +} +func (m *DeploymentStrategy) XXX_Size() int { + return m.Size() +} +func (m *DeploymentStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentStrategy.DiscardUnknown(m) +} -func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } -func (*DeploymentStatus) ProtoMessage() {} -func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_DeploymentStrategy proto.InternalMessageInfo -func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } -func (*DeploymentStrategy) ProtoMessage() {} -func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (m *RollbackConfig) Reset() { *m = RollbackConfig{} } +func (*RollbackConfig) ProtoMessage() {} +func (*RollbackConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{9} +} +func (m *RollbackConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollbackConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollbackConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollbackConfig.Merge(m, src) +} +func (m *RollbackConfig) XXX_Size() int { + return m.Size() +} +func (m *RollbackConfig) XXX_DiscardUnknown() { + xxx_messageInfo_RollbackConfig.DiscardUnknown(m) +} -func (m *RollbackConfig) Reset() { *m = RollbackConfig{} } -func (*RollbackConfig) ProtoMessage() {} -func (*RollbackConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +var xxx_messageInfo_RollbackConfig proto.InternalMessageInfo func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } func (*RollingUpdateDeployment) ProtoMessage() {} func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{10} + return fileDescriptor_2a07313e8f66e805, []int{10} +} +func (m *RollingUpdateDeployment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollingUpdateDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } +func (m *RollingUpdateDeployment) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollingUpdateDeployment.Merge(m, src) +} +func (m *RollingUpdateDeployment) XXX_Size() int { + return m.Size() +} +func (m *RollingUpdateDeployment) XXX_DiscardUnknown() { + xxx_messageInfo_RollingUpdateDeployment.DiscardUnknown(m) +} + +var xxx_messageInfo_RollingUpdateDeployment proto.InternalMessageInfo func (m *RollingUpdateStatefulSetStrategy) Reset() { *m = RollingUpdateStatefulSetStrategy{} } func (*RollingUpdateStatefulSetStrategy) ProtoMessage() {} func (*RollingUpdateStatefulSetStrategy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{11} + return fileDescriptor_2a07313e8f66e805, []int{11} +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollingUpdateStatefulSetStrategy.Merge(m, src) +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Size() int { + return m.Size() +} +func (m *RollingUpdateStatefulSetStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_RollingUpdateStatefulSetStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_RollingUpdateStatefulSetStrategy proto.InternalMessageInfo + +func (m *Scale) Reset() { *m = Scale{} } +func (*Scale) ProtoMessage() {} +func (*Scale) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{12} +} +func (m *Scale) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Scale) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Scale) XXX_Merge(src proto.Message) { + xxx_messageInfo_Scale.Merge(m, src) +} +func (m *Scale) XXX_Size() int { + return m.Size() +} +func (m *Scale) XXX_DiscardUnknown() { + xxx_messageInfo_Scale.DiscardUnknown(m) +} + +var xxx_messageInfo_Scale proto.InternalMessageInfo + +func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } +func (*ScaleSpec) ProtoMessage() {} +func (*ScaleSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{13} +} +func (m *ScaleSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScaleSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ScaleSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScaleSpec.Merge(m, src) +} +func (m *ScaleSpec) XXX_Size() int { + return m.Size() +} +func (m *ScaleSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ScaleSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ScaleSpec proto.InternalMessageInfo + +func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } +func (*ScaleStatus) ProtoMessage() {} +func (*ScaleStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{14} +} +func (m *ScaleStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScaleStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ScaleStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScaleStatus.Merge(m, src) +} +func (m *ScaleStatus) XXX_Size() int { + return m.Size() +} +func (m *ScaleStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ScaleStatus.DiscardUnknown(m) } -func (m *Scale) Reset() { *m = Scale{} } -func (*Scale) ProtoMessage() {} -func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +var xxx_messageInfo_ScaleStatus proto.InternalMessageInfo -func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } -func (*ScaleSpec) ProtoMessage() {} -func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +func (m *StatefulSet) Reset() { *m = StatefulSet{} } +func (*StatefulSet) ProtoMessage() {} +func (*StatefulSet) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{15} +} +func (m *StatefulSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSet.Merge(m, src) +} +func (m *StatefulSet) XXX_Size() int { + return m.Size() +} +func (m *StatefulSet) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSet.DiscardUnknown(m) +} -func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } -func (*ScaleStatus) ProtoMessage() {} -func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +var xxx_messageInfo_StatefulSet proto.InternalMessageInfo + +func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} } +func (*StatefulSetCondition) ProtoMessage() {} +func (*StatefulSetCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{16} +} +func (m *StatefulSetCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetCondition.Merge(m, src) +} +func (m *StatefulSetCondition) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetCondition) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetCondition proto.InternalMessageInfo + +func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } +func (*StatefulSetList) ProtoMessage() {} +func (*StatefulSetList) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{17} +} +func (m *StatefulSetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetList.Merge(m, src) +} +func (m *StatefulSetList) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetList) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetList.DiscardUnknown(m) +} -func (m *StatefulSet) Reset() { *m = StatefulSet{} } -func (*StatefulSet) ProtoMessage() {} -func (*StatefulSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +var xxx_messageInfo_StatefulSetList proto.InternalMessageInfo -func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} } -func (*StatefulSetCondition) ProtoMessage() {} -func (*StatefulSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } +func (*StatefulSetSpec) ProtoMessage() {} +func (*StatefulSetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{18} +} +func (m *StatefulSetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetSpec.Merge(m, src) +} +func (m *StatefulSetSpec) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetSpec.DiscardUnknown(m) +} -func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } -func (*StatefulSetList) ProtoMessage() {} -func (*StatefulSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +var xxx_messageInfo_StatefulSetSpec proto.InternalMessageInfo -func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } -func (*StatefulSetSpec) ProtoMessage() {} -func (*StatefulSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } +func (*StatefulSetStatus) ProtoMessage() {} +func (*StatefulSetStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{19} +} +func (m *StatefulSetStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetStatus.Merge(m, src) +} +func (m *StatefulSetStatus) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetStatus) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetStatus.DiscardUnknown(m) +} -func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } -func (*StatefulSetStatus) ProtoMessage() {} -func (*StatefulSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } +var xxx_messageInfo_StatefulSetStatus proto.InternalMessageInfo func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} } func (*StatefulSetUpdateStrategy) ProtoMessage() {} func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{20} + return fileDescriptor_2a07313e8f66e805, []int{20} +} +func (m *StatefulSetUpdateStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetUpdateStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetUpdateStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetUpdateStrategy.Merge(m, src) +} +func (m *StatefulSetUpdateStrategy) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetUpdateStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetUpdateStrategy.DiscardUnknown(m) } +var xxx_messageInfo_StatefulSetUpdateStrategy proto.InternalMessageInfo + func init() { proto.RegisterType((*ControllerRevision)(nil), "k8s.io.api.apps.v1beta1.ControllerRevision") proto.RegisterType((*ControllerRevisionList)(nil), "k8s.io.api.apps.v1beta1.ControllerRevisionList") @@ -178,6 +644,7 @@ func init() { proto.RegisterType((*DeploymentCondition)(nil), "k8s.io.api.apps.v1beta1.DeploymentCondition") proto.RegisterType((*DeploymentList)(nil), "k8s.io.api.apps.v1beta1.DeploymentList") proto.RegisterType((*DeploymentRollback)(nil), "k8s.io.api.apps.v1beta1.DeploymentRollback") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.apps.v1beta1.DeploymentRollback.UpdatedAnnotationsEntry") proto.RegisterType((*DeploymentSpec)(nil), "k8s.io.api.apps.v1beta1.DeploymentSpec") proto.RegisterType((*DeploymentStatus)(nil), "k8s.io.api.apps.v1beta1.DeploymentStatus") proto.RegisterType((*DeploymentStrategy)(nil), "k8s.io.api.apps.v1beta1.DeploymentStrategy") @@ -187,6 +654,7 @@ func init() { proto.RegisterType((*Scale)(nil), "k8s.io.api.apps.v1beta1.Scale") proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.apps.v1beta1.ScaleSpec") proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.apps.v1beta1.ScaleStatus") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.apps.v1beta1.ScaleStatus.SelectorEntry") proto.RegisterType((*StatefulSet)(nil), "k8s.io.api.apps.v1beta1.StatefulSet") proto.RegisterType((*StatefulSetCondition)(nil), "k8s.io.api.apps.v1beta1.StatefulSetCondition") proto.RegisterType((*StatefulSetList)(nil), "k8s.io.api.apps.v1beta1.StatefulSetList") @@ -194,10 +662,135 @@ func init() { proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.api.apps.v1beta1.StatefulSetStatus") proto.RegisterType((*StatefulSetUpdateStrategy)(nil), "k8s.io.api.apps.v1beta1.StatefulSetUpdateStrategy") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta1/generated.proto", fileDescriptor_2a07313e8f66e805) +} + +var fileDescriptor_2a07313e8f66e805 = []byte{ + // 1855 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcd, 0x6f, 0x24, 0x47, + 0x15, 0x77, 0x8f, 0x67, 0xec, 0xf1, 0x73, 0x3c, 0xde, 0x2d, 0x9b, 0xf5, 0xc4, 0x81, 0xb1, 0x35, + 0x44, 0x89, 0xf3, 0xe1, 0x9e, 0xac, 0x13, 0xa2, 0x64, 0x17, 0x45, 0x78, 0xbc, 0x4b, 0xb2, 0x91, + 0x8d, 0x9d, 0xb2, 0x1d, 0x44, 0x00, 0x29, 0x35, 0x3d, 0xb5, 0xb3, 0x1d, 0xf7, 0x97, 0xba, 0xab, + 0x87, 0x1d, 0x71, 0xe1, 0x0f, 0x40, 0x0a, 0x67, 0xfe, 0x0a, 0x8e, 0x08, 0x6e, 0x9c, 0xf6, 0x82, + 0x14, 0x71, 0x21, 0x27, 0x8b, 0x9d, 0x5c, 0x81, 0x1b, 0x97, 0x95, 0x90, 0x50, 0x55, 0x57, 0x7f, + 0x77, 0xdb, 0x6d, 0xa4, 0xb5, 0x04, 0xb7, 0xe9, 0x7a, 0xef, 0xfd, 0x5e, 0x7d, 0xbc, 0xaf, 0xdf, + 0xc0, 0x0f, 0xce, 0xde, 0xf3, 0x54, 0xdd, 0xee, 0x9d, 0xf9, 0x03, 0xea, 0x5a, 0x94, 0x51, 0xaf, + 0x37, 0xa6, 0xd6, 0xd0, 0x76, 0x7b, 0x52, 0x40, 0x1c, 0xbd, 0x47, 0x1c, 0xc7, 0xeb, 0x8d, 0x6f, + 0x0f, 0x28, 0x23, 0xb7, 0x7b, 0x23, 0x6a, 0x51, 0x97, 0x30, 0x3a, 0x54, 0x1d, 0xd7, 0x66, 0x36, + 0x5a, 0x0b, 0x14, 0x55, 0xe2, 0xe8, 0x2a, 0x57, 0x54, 0xa5, 0xe2, 0xfa, 0xf6, 0x48, 0x67, 0x8f, + 0xfc, 0x81, 0xaa, 0xd9, 0x66, 0x6f, 0x64, 0x8f, 0xec, 0x9e, 0xd0, 0x1f, 0xf8, 0x0f, 0xc5, 0x97, + 0xf8, 0x10, 0xbf, 0x02, 0x9c, 0xf5, 0x6e, 0xc2, 0xa1, 0x66, 0xbb, 0xb4, 0x37, 0xce, 0xf9, 0x5a, + 0x7f, 0x27, 0xd6, 0x31, 0x89, 0xf6, 0x48, 0xb7, 0xa8, 0x3b, 0xe9, 0x39, 0x67, 0x23, 0xbe, 0xe0, + 0xf5, 0x4c, 0xca, 0x48, 0x91, 0x55, 0xaf, 0xcc, 0xca, 0xf5, 0x2d, 0xa6, 0x9b, 0x34, 0x67, 0xf0, + 0xee, 0x65, 0x06, 0x9e, 0xf6, 0x88, 0x9a, 0x24, 0x67, 0xf7, 0x76, 0x99, 0x9d, 0xcf, 0x74, 0xa3, + 0xa7, 0x5b, 0xcc, 0x63, 0x6e, 0xd6, 0xa8, 0xfb, 0x2f, 0x05, 0xd0, 0x9e, 0x6d, 0x31, 0xd7, 0x36, + 0x0c, 0xea, 0x62, 0x3a, 0xd6, 0x3d, 0xdd, 0xb6, 0xd0, 0xe7, 0xd0, 0xe4, 0xe7, 0x19, 0x12, 0x46, + 0xda, 0xca, 0xa6, 0xb2, 0xb5, 0xb8, 0xf3, 0x96, 0x1a, 0xdf, 0x74, 0x04, 0xaf, 0x3a, 0x67, 0x23, + 0xbe, 0xe0, 0xa9, 0x5c, 0x5b, 0x1d, 0xdf, 0x56, 0x0f, 0x07, 0x5f, 0x50, 0x8d, 0x1d, 0x50, 0x46, + 0xfa, 0xe8, 0xc9, 0xf9, 0xc6, 0xcc, 0xf4, 0x7c, 0x03, 0xe2, 0x35, 0x1c, 0xa1, 0xa2, 0x43, 0xa8, + 0x0b, 0xf4, 0x9a, 0x40, 0xdf, 0x2e, 0x45, 0x97, 0x87, 0x56, 0x31, 0xf9, 0xc5, 0xfd, 0xc7, 0x8c, + 0x5a, 0x7c, 0x7b, 0xfd, 0x17, 0x24, 0x74, 0xfd, 0x1e, 0x61, 0x04, 0x0b, 0x20, 0xf4, 0x26, 0x34, + 0x5d, 0xb9, 0xfd, 0xf6, 0xec, 0xa6, 0xb2, 0x35, 0xdb, 0xbf, 0x21, 0xb5, 0x9a, 0xe1, 0xb1, 0x70, + 0xa4, 0xd1, 0x7d, 0xa2, 0xc0, 0xad, 0xfc, 0xb9, 0xf7, 0x75, 0x8f, 0xa1, 0x9f, 0xe5, 0xce, 0xae, + 0x56, 0x3b, 0x3b, 0xb7, 0x16, 0x27, 0x8f, 0x1c, 0x87, 0x2b, 0x89, 0x73, 0x1f, 0x41, 0x43, 0x67, + 0xd4, 0xf4, 0xda, 0xb5, 0xcd, 0xd9, 0xad, 0xc5, 0x9d, 0x37, 0xd4, 0x92, 0x00, 0x56, 0xf3, 0xbb, + 0xeb, 0x2f, 0x49, 0xdc, 0xc6, 0x03, 0x8e, 0x80, 0x03, 0xa0, 0xee, 0xaf, 0x6b, 0x00, 0xf7, 0xa8, + 0x63, 0xd8, 0x13, 0x93, 0x5a, 0xec, 0x1a, 0x9e, 0xee, 0x01, 0xd4, 0x3d, 0x87, 0x6a, 0xf2, 0xe9, + 0x5e, 0x2d, 0x3d, 0x41, 0xbc, 0xa9, 0x63, 0x87, 0x6a, 0xf1, 0xa3, 0xf1, 0x2f, 0x2c, 0x20, 0xd0, + 0x27, 0x30, 0xe7, 0x31, 0xc2, 0x7c, 0x4f, 0x3c, 0xd9, 0xe2, 0xce, 0x6b, 0x55, 0xc0, 0x84, 0x41, + 0xbf, 0x25, 0xe1, 0xe6, 0x82, 0x6f, 0x2c, 0x81, 0xba, 0x7f, 0x9d, 0x85, 0x95, 0x58, 0x79, 0xcf, + 0xb6, 0x86, 0x3a, 0xe3, 0x21, 0x7d, 0x17, 0xea, 0x6c, 0xe2, 0x50, 0x71, 0x27, 0x0b, 0xfd, 0x57, + 0xc3, 0xcd, 0x9c, 0x4c, 0x1c, 0xfa, 0xec, 0x7c, 0x63, 0xad, 0xc0, 0x84, 0x8b, 0xb0, 0x30, 0x42, + 0xfb, 0xd1, 0x3e, 0x6b, 0xc2, 0xfc, 0x9d, 0xb4, 0xf3, 0x67, 0xe7, 0x1b, 0x05, 0x05, 0x44, 0x8d, + 0x90, 0xd2, 0x5b, 0x44, 0x5f, 0x40, 0xcb, 0x20, 0x1e, 0x3b, 0x75, 0x86, 0x84, 0xd1, 0x13, 0xdd, + 0xa4, 0xed, 0x39, 0x71, 0xfa, 0xd7, 0xab, 0x3d, 0x14, 0xb7, 0xe8, 0xdf, 0x92, 0x3b, 0x68, 0xed, + 0xa7, 0x90, 0x70, 0x06, 0x19, 0x8d, 0x01, 0xf1, 0x95, 0x13, 0x97, 0x58, 0x5e, 0x70, 0x2a, 0xee, + 0x6f, 0xfe, 0xca, 0xfe, 0xd6, 0xa5, 0x3f, 0xb4, 0x9f, 0x43, 0xc3, 0x05, 0x1e, 0xd0, 0x2b, 0x30, + 0xe7, 0x52, 0xe2, 0xd9, 0x56, 0xbb, 0x2e, 0x6e, 0x2c, 0x7a, 0x2e, 0x2c, 0x56, 0xb1, 0x94, 0xa2, + 0xd7, 0x60, 0xde, 0xa4, 0x9e, 0x47, 0x46, 0xb4, 0xdd, 0x10, 0x8a, 0xcb, 0x52, 0x71, 0xfe, 0x20, + 0x58, 0xc6, 0xa1, 0xbc, 0xfb, 0x7b, 0x05, 0x5a, 0xf1, 0x33, 0x5d, 0x43, 0xae, 0x7e, 0x94, 0xce, + 0xd5, 0xef, 0x56, 0x08, 0xce, 0x92, 0x1c, 0xfd, 0x7b, 0x0d, 0x50, 0xac, 0x84, 0x6d, 0xc3, 0x18, + 0x10, 0xed, 0x0c, 0x6d, 0x42, 0xdd, 0x22, 0x66, 0x18, 0x93, 0x51, 0x82, 0xfc, 0x88, 0x98, 0x14, + 0x0b, 0x09, 0xfa, 0x52, 0x01, 0xe4, 0x8b, 0xd7, 0x1c, 0xee, 0x5a, 0x96, 0xcd, 0x08, 0xbf, 0xe0, + 0x70, 0x43, 0x7b, 0x15, 0x36, 0x14, 0xfa, 0x52, 0x4f, 0x73, 0x28, 0xf7, 0x2d, 0xe6, 0x4e, 0xe2, + 0x87, 0xcd, 0x2b, 0xe0, 0x02, 0xd7, 0xe8, 0xa7, 0x00, 0xae, 0xc4, 0x3c, 0xb1, 0x65, 0xda, 0x96, + 0xd7, 0x80, 0xd0, 0xfd, 0x9e, 0x6d, 0x3d, 0xd4, 0x47, 0x71, 0x61, 0xc1, 0x11, 0x04, 0x4e, 0xc0, + 0xad, 0xdf, 0x87, 0xb5, 0x92, 0x7d, 0xa2, 0x1b, 0x30, 0x7b, 0x46, 0x27, 0xc1, 0x55, 0x61, 0xfe, + 0x13, 0xad, 0x42, 0x63, 0x4c, 0x0c, 0x9f, 0x06, 0x39, 0x89, 0x83, 0x8f, 0x3b, 0xb5, 0xf7, 0x94, + 0xee, 0xef, 0x1a, 0xc9, 0x48, 0xe1, 0xf5, 0x06, 0x6d, 0xf1, 0xf6, 0xe0, 0x18, 0xba, 0x46, 0x3c, + 0x81, 0xd1, 0xe8, 0xbf, 0x10, 0xb4, 0x86, 0x60, 0x0d, 0x47, 0x52, 0xf4, 0x73, 0x68, 0x7a, 0xd4, + 0xa0, 0x1a, 0xb3, 0x5d, 0x59, 0xe2, 0xde, 0xae, 0x18, 0x53, 0x64, 0x40, 0x8d, 0x63, 0x69, 0x1a, + 0xc0, 0x87, 0x5f, 0x38, 0x82, 0x44, 0x9f, 0x40, 0x93, 0x51, 0xd3, 0x31, 0x08, 0xa3, 0xf2, 0xf6, + 0x52, 0x71, 0xc5, 0x6b, 0x07, 0x07, 0x3b, 0xb2, 0x87, 0x27, 0x52, 0x4d, 0x54, 0xcf, 0x28, 0x4e, + 0xc3, 0x55, 0x1c, 0xc1, 0xa0, 0x9f, 0x40, 0xd3, 0x63, 0xbc, 0xab, 0x8f, 0x26, 0x22, 0xdb, 0x2e, + 0x6a, 0x2b, 0xc9, 0x3a, 0x1a, 0x98, 0xc4, 0xd0, 0xe1, 0x0a, 0x8e, 0xe0, 0xd0, 0x2e, 0x2c, 0x9b, + 0xba, 0x85, 0x29, 0x19, 0x4e, 0x8e, 0xa9, 0x66, 0x5b, 0x43, 0x4f, 0xa4, 0x69, 0xa3, 0xbf, 0x26, + 0x8d, 0x96, 0x0f, 0xd2, 0x62, 0x9c, 0xd5, 0x47, 0xfb, 0xb0, 0x1a, 0xb6, 0xdd, 0x8f, 0x74, 0x8f, + 0xd9, 0xee, 0x64, 0x5f, 0x37, 0x75, 0x26, 0x6a, 0x5e, 0xa3, 0xdf, 0x9e, 0x9e, 0x6f, 0xac, 0xe2, + 0x02, 0x39, 0x2e, 0xb4, 0xe2, 0x75, 0xc5, 0x21, 0xbe, 0x47, 0x87, 0xa2, 0x86, 0x35, 0xe3, 0xba, + 0x72, 0x24, 0x56, 0xb1, 0x94, 0xa2, 0x1f, 0xa7, 0xc2, 0xb4, 0x79, 0xb5, 0x30, 0x6d, 0x95, 0x87, + 0x28, 0x3a, 0x85, 0x35, 0xc7, 0xb5, 0x47, 0x2e, 0xf5, 0xbc, 0x7b, 0x94, 0x0c, 0x0d, 0xdd, 0xa2, + 0xe1, 0xcd, 0x2c, 0x88, 0x13, 0xbd, 0x34, 0x3d, 0xdf, 0x58, 0x3b, 0x2a, 0x56, 0xc1, 0x65, 0xb6, + 0xdd, 0x3f, 0xd5, 0xe1, 0x46, 0xb6, 0xc7, 0xa1, 0x8f, 0x01, 0xd9, 0x03, 0x8f, 0xba, 0x63, 0x3a, + 0xfc, 0x30, 0x18, 0xdc, 0xf8, 0x74, 0xa3, 0x88, 0xe9, 0x26, 0xca, 0xdb, 0xc3, 0x9c, 0x06, 0x2e, + 0xb0, 0x0a, 0xe6, 0x23, 0x99, 0x00, 0x35, 0xb1, 0xd1, 0xc4, 0x7c, 0x94, 0x4b, 0x82, 0x5d, 0x58, + 0x96, 0xb9, 0x1f, 0x0a, 0x45, 0xb0, 0x26, 0xde, 0xfd, 0x34, 0x2d, 0xc6, 0x59, 0x7d, 0x74, 0x17, + 0x96, 0x5c, 0x1e, 0x07, 0x11, 0xc0, 0xbc, 0x00, 0xf8, 0x96, 0x04, 0x58, 0xc2, 0x49, 0x21, 0x4e, + 0xeb, 0xa2, 0x0f, 0xe1, 0x26, 0x19, 0x13, 0xdd, 0x20, 0x03, 0x83, 0x46, 0x00, 0x75, 0x01, 0xf0, + 0xa2, 0x04, 0xb8, 0xb9, 0x9b, 0x55, 0xc0, 0x79, 0x1b, 0x74, 0x00, 0x2b, 0xbe, 0x95, 0x87, 0x0a, + 0x82, 0xf8, 0x25, 0x09, 0xb5, 0x72, 0x9a, 0x57, 0xc1, 0x45, 0x76, 0xe8, 0x73, 0x00, 0x2d, 0xec, + 0xea, 0x5e, 0x7b, 0x4e, 0x94, 0xe1, 0x37, 0x2b, 0x24, 0x5b, 0x34, 0x0a, 0xc4, 0x25, 0x30, 0x5a, + 0xf2, 0x70, 0x02, 0x13, 0xdd, 0x81, 0x96, 0x66, 0x1b, 0x86, 0x88, 0xfc, 0x3d, 0xdb, 0xb7, 0x98, + 0x08, 0xde, 0x46, 0x1f, 0xf1, 0x66, 0xbf, 0x97, 0x92, 0xe0, 0x8c, 0x66, 0xf7, 0x8f, 0x4a, 0xb2, + 0xcd, 0x84, 0xe9, 0x8c, 0xee, 0xa4, 0x46, 0x9f, 0x57, 0x32, 0xa3, 0xcf, 0xad, 0xbc, 0x45, 0x62, + 0xf2, 0xd1, 0x61, 0x89, 0x07, 0xbf, 0x6e, 0x8d, 0x82, 0x07, 0x97, 0x25, 0xf1, 0xad, 0x0b, 0x53, + 0x29, 0xd2, 0x4e, 0x34, 0xc6, 0x9b, 0xe2, 0xcd, 0x93, 0x42, 0x9c, 0x46, 0xee, 0x7e, 0x00, 0xad, + 0x74, 0x1e, 0xa6, 0x66, 0x7a, 0xe5, 0xd2, 0x99, 0xfe, 0x1b, 0x05, 0xd6, 0x4a, 0xbc, 0x23, 0x03, + 0x5a, 0x26, 0x79, 0x9c, 0x78, 0xe6, 0x4b, 0x67, 0x63, 0xce, 0x9a, 0xd4, 0x80, 0x35, 0xa9, 0x0f, + 0x2c, 0x76, 0xe8, 0x1e, 0x33, 0x57, 0xb7, 0x46, 0xc1, 0x3b, 0x1c, 0xa4, 0xb0, 0x70, 0x06, 0x1b, + 0x7d, 0x06, 0x4d, 0x93, 0x3c, 0x3e, 0xf6, 0xdd, 0x51, 0xd1, 0x7d, 0x55, 0xf3, 0x23, 0xfa, 0xc7, + 0x81, 0x44, 0xc1, 0x11, 0x5e, 0xf7, 0x10, 0x36, 0x53, 0x87, 0xe4, 0xa5, 0x82, 0x3e, 0xf4, 0x8d, + 0x63, 0x1a, 0x3f, 0xf8, 0x1b, 0xb0, 0xe0, 0x10, 0x97, 0xe9, 0x51, 0xb9, 0x68, 0xf4, 0x97, 0xa6, + 0xe7, 0x1b, 0x0b, 0x47, 0xe1, 0x22, 0x8e, 0xe5, 0xdd, 0x7f, 0x2b, 0xd0, 0x38, 0xd6, 0x88, 0x41, + 0xaf, 0x81, 0x3a, 0xdc, 0x4b, 0x51, 0x87, 0x6e, 0x69, 0x10, 0x89, 0xfd, 0x94, 0xb2, 0x86, 0xfd, + 0x0c, 0x6b, 0x78, 0xf9, 0x12, 0x9c, 0x8b, 0x09, 0xc3, 0xfb, 0xb0, 0x10, 0xb9, 0x4b, 0x55, 0x49, + 0xe5, 0xb2, 0x2a, 0xd9, 0xfd, 0x6d, 0x0d, 0x16, 0x13, 0x2e, 0xae, 0x66, 0xcd, 0xaf, 0x3b, 0x31, + 0x68, 0xf0, 0x4a, 0xb2, 0x53, 0xe5, 0x20, 0x6a, 0x38, 0x54, 0x04, 0xf3, 0x5b, 0xdc, 0xbd, 0xf3, + 0xb3, 0xc6, 0x07, 0xd0, 0x62, 0xc4, 0x1d, 0x51, 0x16, 0xca, 0xc4, 0x85, 0x2d, 0xc4, 0xe4, 0xe1, + 0x24, 0x25, 0xc5, 0x19, 0xed, 0xf5, 0xbb, 0xb0, 0x94, 0x72, 0x76, 0xa5, 0x21, 0xec, 0x4b, 0x7e, + 0x39, 0x71, 0x70, 0x5e, 0x43, 0x74, 0x7d, 0x9c, 0x8a, 0xae, 0xad, 0xf2, 0xcb, 0x4c, 0xa4, 0x4c, + 0x59, 0x8c, 0xe1, 0x4c, 0x8c, 0xbd, 0x5e, 0x09, 0xed, 0xe2, 0x48, 0xfb, 0x47, 0x0d, 0x56, 0x13, + 0xda, 0x31, 0x37, 0xfd, 0x7e, 0xaa, 0x40, 0x6f, 0x65, 0x0a, 0x74, 0xbb, 0xc8, 0xe6, 0xb9, 0x91, + 0xd3, 0x62, 0xc2, 0x38, 0xfb, 0xbf, 0x48, 0x18, 0xff, 0xa0, 0xc0, 0x72, 0xe2, 0xee, 0xae, 0x81, + 0x31, 0x3e, 0x48, 0x33, 0xc6, 0x97, 0xab, 0x04, 0x4d, 0x09, 0x65, 0xfc, 0x73, 0x23, 0xb5, 0xf9, + 0xff, 0x7b, 0x12, 0xf3, 0x4b, 0x58, 0x1d, 0xdb, 0x86, 0x6f, 0xd2, 0x3d, 0x83, 0xe8, 0x66, 0xa8, + 0xc0, 0x87, 0xbe, 0xd9, 0xec, 0x1f, 0x43, 0x11, 0x3c, 0x75, 0x3d, 0xdd, 0x63, 0xd4, 0x62, 0x9f, + 0xc6, 0x96, 0xfd, 0x6f, 0x4b, 0x27, 0xab, 0x9f, 0x16, 0xc0, 0xe1, 0x42, 0x27, 0xe8, 0x7b, 0xb0, + 0xc8, 0x07, 0x66, 0x5d, 0xa3, 0x9c, 0x7b, 0xcb, 0xc0, 0x5a, 0x91, 0x40, 0x8b, 0xc7, 0xb1, 0x08, + 0x27, 0xf5, 0xd0, 0x23, 0x58, 0x71, 0xec, 0xe1, 0x01, 0xb1, 0xc8, 0x88, 0xf2, 0x31, 0xe3, 0xc8, + 0x36, 0x74, 0x6d, 0x22, 0x98, 0xcd, 0x42, 0xff, 0xdd, 0x70, 0xb8, 0x3c, 0xca, 0xab, 0x3c, 0xe3, + 0x14, 0x21, 0xbf, 0x2c, 0x92, 0xba, 0x08, 0x12, 0xb9, 0xd0, 0xf2, 0x65, 0xbb, 0x97, 0x44, 0x2f, + 0xf8, 0x0b, 0x67, 0xa7, 0x4a, 0x84, 0x9d, 0xa6, 0x2c, 0xe3, 0xea, 0x9f, 0x5e, 0xc7, 0x19, 0x0f, + 0xa5, 0xc4, 0xad, 0xf9, 0xdf, 0x10, 0xb7, 0xee, 0x3f, 0xeb, 0x70, 0x33, 0x57, 0x2a, 0xd1, 0x0f, + 0x2f, 0x60, 0x38, 0xb7, 0x9e, 0x1b, 0xbb, 0xc9, 0x51, 0x93, 0xd9, 0x2b, 0x50, 0x93, 0x5d, 0x58, + 0xd6, 0x7c, 0xd7, 0xa5, 0x16, 0xcb, 0x10, 0x93, 0x88, 0x1a, 0xed, 0xa5, 0xc5, 0x38, 0xab, 0x5f, + 0xc4, 0xae, 0x1a, 0x57, 0x64, 0x57, 0xc9, 0x5d, 0xc8, 0x09, 0x39, 0x08, 0xbb, 0xfc, 0x2e, 0xe4, + 0xa0, 0x9c, 0xd5, 0xe7, 0xd3, 0x41, 0x80, 0x1a, 0x21, 0xcc, 0xa7, 0xa7, 0x83, 0xd3, 0x94, 0x14, + 0x67, 0xb4, 0x0b, 0x98, 0xca, 0x42, 0x55, 0xa6, 0x82, 0x48, 0x8a, 0x47, 0x81, 0xc8, 0xf1, 0xed, + 0x2a, 0xb1, 0x5c, 0x99, 0x48, 0x75, 0xff, 0xa2, 0xc0, 0x8b, 0xa5, 0x49, 0x80, 0x76, 0x53, 0x2d, + 0x77, 0x3b, 0xd3, 0x72, 0xbf, 0x53, 0x6a, 0x98, 0xe8, 0xbb, 0x6e, 0x31, 0x35, 0x7a, 0xbf, 0x1a, + 0x35, 0x2a, 0x98, 0xdb, 0x2f, 0xe7, 0x48, 0xfd, 0xed, 0x27, 0x4f, 0x3b, 0x33, 0x5f, 0x3d, 0xed, + 0xcc, 0x7c, 0xfd, 0xb4, 0x33, 0xf3, 0xab, 0x69, 0x47, 0x79, 0x32, 0xed, 0x28, 0x5f, 0x4d, 0x3b, + 0xca, 0xd7, 0xd3, 0x8e, 0xf2, 0xb7, 0x69, 0x47, 0xf9, 0xcd, 0x37, 0x9d, 0x99, 0xcf, 0xe6, 0xa5, + 0xc7, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x99, 0x8d, 0x1e, 0xaf, 0x61, 0x1b, 0x00, 0x00, +} + func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -205,36 +798,45 @@ func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { } func (m *ControllerRevision) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ControllerRevision) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) + i-- + dAtA[i] = 0x18 + { + size, err := m.Data.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Data.Size())) - n2, err := m.Data.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ControllerRevisionList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -242,37 +844,46 @@ func (m *ControllerRevisionList) Marshal() (dAtA []byte, err error) { } func (m *ControllerRevisionList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ControllerRevisionList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n3, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Deployment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -280,41 +891,52 @@ func (m *Deployment) Marshal() (dAtA []byte, err error) { } func (m *Deployment) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Deployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n5, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n6, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n6 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -322,49 +944,62 @@ func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { } func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastUpdateTime.Size())) - n7, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n7 + i-- dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n8, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 - return i, nil + i-- + dAtA[i] = 0x32 + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -372,37 +1007,46 @@ func (m *DeploymentList) Marshal() (dAtA []byte, err error) { } func (m *DeploymentList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n9, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentRollback) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -410,51 +1054,61 @@ func (m *DeploymentRollback) Marshal() (dAtA []byte, err error) { } func (m *DeploymentRollback) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentRollback) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) + { + size, err := m.RollbackTo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a if len(m.UpdatedAnnotations) > 0 { keysForUpdatedAnnotations := make([]string, 0, len(m.UpdatedAnnotations)) for k := range m.UpdatedAnnotations { keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations) - for _, k := range keysForUpdatedAnnotations { + for iNdEx := len(keysForUpdatedAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.UpdatedAnnotations[string(keysForUpdatedAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- dAtA[i] = 0x12 - i++ - v := m.UpdatedAnnotations[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + i -= len(keysForUpdatedAnnotations[iNdEx]) + copy(dAtA[i:], keysForUpdatedAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForUpdatedAnnotations[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) } } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollbackTo.Size())) - n10, err := m.RollbackTo.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - return i, nil + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -462,79 +1116,92 @@ func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) { } func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + if m.ProgressDeadlineSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds)) + i-- + dAtA[i] = 0x48 } - if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n11, err := m.Selector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.RollbackTo != nil { + { + size, err := m.RollbackTo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n11 - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n12, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n12 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Strategy.Size())) - n13, err := m.Strategy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n13 - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - if m.RevisionHistoryLimit != nil { - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x42 } - dAtA[i] = 0x38 - i++ + i-- if m.Paused { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - if m.RollbackTo != nil { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollbackTo.Size())) - n14, err := m.RollbackTo.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x38 + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x30 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x28 + { + size, err := m.Strategy.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n14 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.ProgressDeadlineSeconds != nil { - dAtA[i] = 0x48 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds)) + i-- + dAtA[i] = 0x22 + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0x1a + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -542,52 +1209,59 @@ func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) { } func (m *DeploymentStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CollisionCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + i-- + dAtA[i] = 0x40 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x38 if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x32 } } - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - if m.CollisionCount != nil { - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - } - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -595,31 +1269,39 @@ func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { } func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) if m.RollingUpdate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n15, err := m.RollingUpdate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n15 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RollbackConfig) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -627,20 +1309,25 @@ func (m *RollbackConfig) Marshal() (dAtA []byte, err error) { } func (m *RollbackConfig) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollbackConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -648,37 +1335,46 @@ func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { } func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.MaxUnavailable != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n16, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n16 - } if m.MaxSurge != nil { + { + size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxSurge.Size())) - n17, err := m.MaxSurge.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if m.MaxUnavailable != nil { + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n17 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *RollingUpdateStatefulSetStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -686,22 +1382,27 @@ func (m *RollingUpdateStatefulSetStrategy) Marshal() (dAtA []byte, err error) { } func (m *RollingUpdateStatefulSetStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateStatefulSetStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if m.Partition != nil { - dAtA[i] = 0x8 - i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.Partition)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *Scale) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -709,41 +1410,52 @@ func (m *Scale) Marshal() (dAtA []byte, err error) { } func (m *Scale) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Scale) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n18, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n18 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n19, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n19 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n20, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n20 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -751,20 +1463,25 @@ func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { } func (m *ScaleSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScaleSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -772,46 +1489,54 @@ func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { } func (m *ScaleStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScaleStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i -= len(m.TargetSelector) + copy(dAtA[i:], m.TargetSelector) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetSelector))) + i-- + dAtA[i] = 0x1a if len(m.Selector) > 0 { keysForSelector := make([]string, 0, len(m.Selector)) for k := range m.Selector { keysForSelector = append(keysForSelector, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - for _, k := range keysForSelector { + for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.Selector[string(keysForSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- dAtA[i] = 0x12 - i++ - v := m.Selector[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + i -= len(keysForSelector[iNdEx]) + copy(dAtA[i:], keysForSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSelector[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) } } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetSelector))) - i += copy(dAtA[i:], m.TargetSelector) - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *StatefulSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -819,41 +1544,52 @@ func (m *StatefulSet) Marshal() (dAtA []byte, err error) { } func (m *StatefulSet) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n21, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n21 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n22, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n22 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n23, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n23 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *StatefulSetCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -861,41 +1597,52 @@ func (m *StatefulSetCondition) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n24, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n24 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *StatefulSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -903,37 +1650,46 @@ func (m *StatefulSetList) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n25, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n25 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *StatefulSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -941,73 +1697,88 @@ func (m *StatefulSetSpec) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x40 } - if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n26, err := m.Selector.MarshalTo(dAtA[i:]) + { + size, err := m.UpdateStrategy.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n26 - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n27, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n27 + i-- + dAtA[i] = 0x3a + i -= len(m.PodManagementPolicy) + copy(dAtA[i:], m.PodManagementPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodManagementPolicy))) + i-- + dAtA[i] = 0x32 + i -= len(m.ServiceName) + copy(dAtA[i:], m.ServiceName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) + i-- + dAtA[i] = 0x2a if len(m.VolumeClaimTemplates) > 0 { - for _, msg := range m.VolumeClaimTemplates { + for iNdEx := len(m.VolumeClaimTemplates) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumeClaimTemplates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + } + } + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) - i += copy(dAtA[i:], m.ServiceName) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodManagementPolicy))) - i += copy(dAtA[i:], m.PodManagementPolicy) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) - n28, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n28 - if m.RevisionHistoryLimit != nil { - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *StatefulSetStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1015,59 +1786,68 @@ func (m *StatefulSetStatus) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.ObservedGeneration != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) - } - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CurrentRevision))) - i += copy(dAtA[i:], m.CurrentRevision) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UpdateRevision))) - i += copy(dAtA[i:], m.UpdateRevision) - if m.CollisionCount != nil { - dAtA[i] = 0x48 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - } if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x52 } } - return i, nil + if m.CollisionCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + i-- + dAtA[i] = 0x48 + } + i -= len(m.UpdateRevision) + copy(dAtA[i:], m.UpdateRevision) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UpdateRevision))) + i-- + dAtA[i] = 0x3a + i -= len(m.CurrentRevision) + copy(dAtA[i:], m.CurrentRevision) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CurrentRevision))) + i-- + dAtA[i] = 0x32 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x10 + if m.ObservedGeneration != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } func (m *StatefulSetUpdateStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1075,37 +1855,50 @@ func (m *StatefulSetUpdateStrategy) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetUpdateStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) if m.RollingUpdate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n29, err := m.RollingUpdate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n29 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *ControllerRevision) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1117,6 +1910,9 @@ func (m *ControllerRevision) Size() (n int) { } func (m *ControllerRevisionList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1131,6 +1927,9 @@ func (m *ControllerRevisionList) Size() (n int) { } func (m *Deployment) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1143,6 +1942,9 @@ func (m *Deployment) Size() (n int) { } func (m *DeploymentCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1161,6 +1963,9 @@ func (m *DeploymentCondition) Size() (n int) { } func (m *DeploymentList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1175,6 +1980,9 @@ func (m *DeploymentList) Size() (n int) { } func (m *DeploymentRollback) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -1193,6 +2001,9 @@ func (m *DeploymentRollback) Size() (n int) { } func (m *DeploymentSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Replicas != nil { @@ -1222,6 +2033,9 @@ func (m *DeploymentSpec) Size() (n int) { } func (m *DeploymentStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.ObservedGeneration)) @@ -1243,6 +2057,9 @@ func (m *DeploymentStatus) Size() (n int) { } func (m *DeploymentStrategy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1255,6 +2072,9 @@ func (m *DeploymentStrategy) Size() (n int) { } func (m *RollbackConfig) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Revision)) @@ -1262,6 +2082,9 @@ func (m *RollbackConfig) Size() (n int) { } func (m *RollingUpdateDeployment) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.MaxUnavailable != nil { @@ -1276,6 +2099,9 @@ func (m *RollingUpdateDeployment) Size() (n int) { } func (m *RollingUpdateStatefulSetStrategy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Partition != nil { @@ -1285,6 +2111,9 @@ func (m *RollingUpdateStatefulSetStrategy) Size() (n int) { } func (m *Scale) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1297,6 +2126,9 @@ func (m *Scale) Size() (n int) { } func (m *ScaleSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Replicas)) @@ -1304,6 +2136,9 @@ func (m *ScaleSpec) Size() (n int) { } func (m *ScaleStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Replicas)) @@ -1321,6 +2156,9 @@ func (m *ScaleStatus) Size() (n int) { } func (m *StatefulSet) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1333,6 +2171,9 @@ func (m *StatefulSet) Size() (n int) { } func (m *StatefulSetCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1349,6 +2190,9 @@ func (m *StatefulSetCondition) Size() (n int) { } func (m *StatefulSetList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1363,6 +2207,9 @@ func (m *StatefulSetList) Size() (n int) { } func (m *StatefulSetSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Replicas != nil { @@ -1393,6 +2240,9 @@ func (m *StatefulSetSpec) Size() (n int) { } func (m *StatefulSetStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.ObservedGeneration != nil { @@ -1419,6 +2269,9 @@ func (m *StatefulSetStatus) Size() (n int) { } func (m *StatefulSetUpdateStrategy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1431,14 +2284,7 @@ func (m *StatefulSetUpdateStrategy) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -1448,8 +2294,8 @@ func (this *ControllerRevision) String() string { return "nil" } s := strings.Join([]string{`&ControllerRevision{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Data:` + strings.Replace(strings.Replace(this.Data.String(), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Data:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Data), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, `}`, }, "") @@ -1459,9 +2305,14 @@ func (this *ControllerRevisionList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]ControllerRevision{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ControllerRevision", "ControllerRevision", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&ControllerRevisionList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ControllerRevision", "ControllerRevision", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1471,7 +2322,7 @@ func (this *Deployment) String() string { return "nil" } s := strings.Join([]string{`&Deployment{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentSpec", "DeploymentSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentStatus", "DeploymentStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1487,8 +2338,8 @@ func (this *DeploymentCondition) String() string { `Status:` + fmt.Sprintf("%v", this.Status) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `LastUpdateTime:` + strings.Replace(strings.Replace(this.LastUpdateTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -1497,9 +2348,14 @@ func (this *DeploymentList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]Deployment{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Deployment", "Deployment", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&DeploymentList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Deployment", "Deployment", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1532,13 +2388,13 @@ func (this *DeploymentSpec) String() string { } s := strings.Join([]string{`&DeploymentSpec{`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`, - `RollbackTo:` + strings.Replace(fmt.Sprintf("%v", this.RollbackTo), "RollbackConfig", "RollbackConfig", 1) + `,`, + `RollbackTo:` + strings.Replace(this.RollbackTo.String(), "RollbackConfig", "RollbackConfig", 1) + `,`, `ProgressDeadlineSeconds:` + valueToStringGenerated(this.ProgressDeadlineSeconds) + `,`, `}`, }, "") @@ -1548,13 +2404,18 @@ func (this *DeploymentStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]DeploymentCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&DeploymentStatus{`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, `}`, @@ -1567,7 +2428,7 @@ func (this *DeploymentStrategy) String() string { } s := strings.Join([]string{`&DeploymentStrategy{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, `}`, }, "") return s @@ -1587,8 +2448,8 @@ func (this *RollingUpdateDeployment) String() string { return "nil" } s := strings.Join([]string{`&RollingUpdateDeployment{`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, - `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`, `}`, }, "") return s @@ -1608,7 +2469,7 @@ func (this *Scale) String() string { return "nil" } s := strings.Join([]string{`&Scale{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ScaleSpec", "ScaleSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ScaleStatus", "ScaleStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1652,7 +2513,7 @@ func (this *StatefulSet) String() string { return "nil" } s := strings.Join([]string{`&StatefulSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "StatefulSetSpec", "StatefulSetSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "StatefulSetStatus", "StatefulSetStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1666,7 +2527,7 @@ func (this *StatefulSetCondition) String() string { s := strings.Join([]string{`&StatefulSetCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -1677,9 +2538,14 @@ func (this *StatefulSetList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]StatefulSet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "StatefulSet", "StatefulSet", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&StatefulSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StatefulSet", "StatefulSet", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1688,11 +2554,16 @@ func (this *StatefulSetSpec) String() string { if this == nil { return "nil" } + repeatedStringForVolumeClaimTemplates := "[]PersistentVolumeClaim{" + for _, f := range this.VolumeClaimTemplates { + repeatedStringForVolumeClaimTemplates += fmt.Sprintf("%v", f) + "," + } + repeatedStringForVolumeClaimTemplates += "}" s := strings.Join([]string{`&StatefulSetSpec{`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `VolumeClaimTemplates:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeClaimTemplates), "PersistentVolumeClaim", "k8s_io_api_core_v1.PersistentVolumeClaim", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `VolumeClaimTemplates:` + repeatedStringForVolumeClaimTemplates + `,`, `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, `PodManagementPolicy:` + fmt.Sprintf("%v", this.PodManagementPolicy) + `,`, `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "StatefulSetUpdateStrategy", "StatefulSetUpdateStrategy", 1), `&`, ``, 1) + `,`, @@ -1705,6 +2576,11 @@ func (this *StatefulSetStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]StatefulSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "StatefulSetCondition", "StatefulSetCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&StatefulSetStatus{`, `ObservedGeneration:` + valueToStringGenerated(this.ObservedGeneration) + `,`, `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, @@ -1714,7 +2590,7 @@ func (this *StatefulSetStatus) String() string { `CurrentRevision:` + fmt.Sprintf("%v", this.CurrentRevision) + `,`, `UpdateRevision:` + fmt.Sprintf("%v", this.UpdateRevision) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "StatefulSetCondition", "StatefulSetCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `}`, }, "") return s @@ -1725,7 +2601,7 @@ func (this *StatefulSetUpdateStrategy) String() string { } s := strings.Join([]string{`&StatefulSetUpdateStrategy{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateStatefulSetStrategy", "RollingUpdateStatefulSetStrategy", 1) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateStatefulSetStrategy", "RollingUpdateStatefulSetStrategy", 1) + `,`, `}`, }, "") return s @@ -1753,7 +2629,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1781,7 +2657,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1790,6 +2666,9 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1811,7 +2690,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1820,6 +2699,9 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1841,7 +2723,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Revision |= (int64(b) & 0x7F) << shift + m.Revision |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -1855,6 +2737,9 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1882,7 +2767,7 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1910,7 +2795,7 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1919,6 +2804,9 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1940,7 +2828,7 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1949,6 +2837,9 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1966,6 +2857,9 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1993,7 +2887,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2021,7 +2915,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2030,6 +2924,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2051,7 +2948,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2060,6 +2957,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2081,7 +2981,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2090,6 +2990,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2106,6 +3009,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2133,7 +3039,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2161,7 +3067,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2171,6 +3077,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2190,7 +3099,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2200,6 +3109,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2219,7 +3131,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2229,6 +3141,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2248,7 +3163,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2258,6 +3173,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2277,7 +3195,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2286,6 +3204,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2307,7 +3228,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2316,6 +3237,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2332,6 +3256,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2359,7 +3286,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2387,7 +3314,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2396,6 +3323,9 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2417,7 +3347,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2426,6 +3356,9 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2443,6 +3376,9 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2470,7 +3406,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2498,7 +3434,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2508,6 +3444,9 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2527,7 +3466,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2536,6 +3475,9 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2556,7 +3498,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2573,7 +3515,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2583,6 +3525,9 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -2599,7 +3544,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2609,6 +3554,9 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -2645,7 +3593,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2654,6 +3602,9 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2670,6 +3621,9 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2697,7 +3651,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2725,7 +3679,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2745,7 +3699,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2754,11 +3708,14 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2778,7 +3735,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2787,6 +3744,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2808,7 +3768,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2817,6 +3777,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2838,7 +3801,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift + m.MinReadySeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2857,7 +3820,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2877,7 +3840,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2897,7 +3860,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2906,6 +3869,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2930,7 +3896,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2945,6 +3911,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2972,7 +3941,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3000,7 +3969,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -3019,7 +3988,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3038,7 +4007,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedReplicas |= (int32(b) & 0x7F) << shift + m.UpdatedReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3057,7 +4026,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AvailableReplicas |= (int32(b) & 0x7F) << shift + m.AvailableReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3076,7 +4045,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UnavailableReplicas |= (int32(b) & 0x7F) << shift + m.UnavailableReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3095,7 +4064,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3104,6 +4073,9 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3126,7 +4098,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift + m.ReadyReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3145,7 +4117,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3160,6 +4132,9 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3187,7 +4162,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3215,7 +4190,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3225,6 +4200,9 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3244,7 +4222,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3253,6 +4231,9 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3272,6 +4253,9 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3299,7 +4283,7 @@ func (m *RollbackConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3327,7 +4311,7 @@ func (m *RollbackConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Revision |= (int64(b) & 0x7F) << shift + m.Revision |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -3341,6 +4325,9 @@ func (m *RollbackConfig) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3368,7 +4355,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3396,7 +4383,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3405,11 +4392,14 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxUnavailable == nil { - m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.MaxUnavailable = &intstr.IntOrString{} } if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3429,7 +4419,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3438,11 +4428,14 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxSurge == nil { - m.MaxSurge = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.MaxSurge = &intstr.IntOrString{} } if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3457,6 +4450,9 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3484,7 +4480,7 @@ func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3512,7 +4508,7 @@ func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3527,6 +4523,9 @@ func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3554,7 +4553,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3582,7 +4581,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3591,6 +4590,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3612,7 +4614,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3621,6 +4623,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3642,7 +4647,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3651,6 +4656,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3667,6 +4675,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3694,7 +4705,7 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3722,7 +4733,7 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3736,6 +4747,9 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3763,7 +4777,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3791,7 +4805,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3810,7 +4824,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3819,6 +4833,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3839,7 +4856,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3856,7 +4873,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3866,6 +4883,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -3882,7 +4902,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3892,6 +4912,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -3928,7 +4951,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3938,6 +4961,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3952,6 +4978,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3979,7 +5008,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4007,7 +5036,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4016,6 +5045,9 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4037,7 +5069,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4046,6 +5078,9 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4067,7 +5102,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4076,6 +5111,9 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4092,6 +5130,9 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4119,7 +5160,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4147,7 +5188,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4157,6 +5198,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4176,7 +5220,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4186,6 +5230,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4205,7 +5252,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4214,6 +5261,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4235,7 +5285,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4245,6 +5295,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4264,7 +5317,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4274,6 +5327,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4288,6 +5344,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4315,7 +5374,7 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4343,7 +5402,7 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4352,6 +5411,9 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4373,7 +5435,7 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4382,6 +5444,9 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4399,6 +5464,9 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4426,7 +5494,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4454,7 +5522,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4474,7 +5542,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4483,11 +5551,14 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -4507,7 +5578,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4516,6 +5587,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4537,7 +5611,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4546,10 +5620,13 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, k8s_io_api_core_v1.PersistentVolumeClaim{}) + m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, v11.PersistentVolumeClaim{}) if err := m.VolumeClaimTemplates[len(m.VolumeClaimTemplates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -4568,7 +5645,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4578,6 +5655,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4597,7 +5677,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4607,6 +5687,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4626,7 +5709,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4635,6 +5718,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4656,7 +5742,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4671,6 +5757,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4698,7 +5787,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4726,7 +5815,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -4746,7 +5835,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4765,7 +5854,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift + m.ReadyReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4784,7 +5873,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CurrentReplicas |= (int32(b) & 0x7F) << shift + m.CurrentReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4803,7 +5892,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedReplicas |= (int32(b) & 0x7F) << shift + m.UpdatedReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4822,7 +5911,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4832,6 +5921,9 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4851,7 +5943,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4861,6 +5953,9 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4880,7 +5975,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4900,7 +5995,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4909,6 +6004,9 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4926,6 +6024,9 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4953,7 +6054,7 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4981,7 +6082,7 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4991,6 +6092,9 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5010,7 +6114,7 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5019,6 +6123,9 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5038,6 +6145,9 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5104,10 +6214,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -5136,6 +6249,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -5154,128 +6270,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 1859 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcd, 0x6f, 0x24, 0x47, - 0x15, 0x77, 0x8f, 0x67, 0xec, 0xf1, 0x73, 0x3c, 0xde, 0x2d, 0x9b, 0xf5, 0xc4, 0x81, 0xb1, 0xd5, - 0x44, 0x89, 0xf3, 0xe1, 0x9e, 0xac, 0x13, 0xa2, 0x64, 0x17, 0x45, 0x78, 0xbc, 0x4b, 0xb2, 0x91, - 0x8d, 0x9d, 0xb2, 0x1d, 0x44, 0x00, 0x29, 0x35, 0x3d, 0xb5, 0xb3, 0x1d, 0xf7, 0x97, 0xba, 0x6b, - 0x86, 0x1d, 0x71, 0xe1, 0x0f, 0x40, 0x0a, 0x67, 0xfe, 0x0a, 0x8e, 0x08, 0x6e, 0x9c, 0xf6, 0x82, - 0x14, 0x71, 0x21, 0x27, 0x8b, 0x9d, 0x5c, 0x81, 0x1b, 0x97, 0x95, 0x90, 0x50, 0x55, 0x57, 0x7f, - 0x77, 0xdb, 0x6d, 0xa4, 0xf5, 0x21, 0xb7, 0xe9, 0x7a, 0xef, 0xfd, 0x5e, 0x7d, 0xbc, 0xaf, 0xdf, - 0xc0, 0x8f, 0xce, 0xde, 0xf3, 0x35, 0xc3, 0xe9, 0x9e, 0x8d, 0xfa, 0xd4, 0xb3, 0x29, 0xa3, 0x7e, - 0x77, 0x4c, 0xed, 0x81, 0xe3, 0x75, 0xa5, 0x80, 0xb8, 0x46, 0x97, 0xb8, 0xae, 0xdf, 0x1d, 0xdf, - 0xee, 0x53, 0x46, 0x6e, 0x77, 0x87, 0xd4, 0xa6, 0x1e, 0x61, 0x74, 0xa0, 0xb9, 0x9e, 0xc3, 0x1c, - 0xb4, 0x16, 0x28, 0x6a, 0xc4, 0x35, 0x34, 0xae, 0xa8, 0x49, 0xc5, 0xf5, 0xed, 0xa1, 0xc1, 0x1e, - 0x8d, 0xfa, 0x9a, 0xee, 0x58, 0xdd, 0xa1, 0x33, 0x74, 0xba, 0x42, 0xbf, 0x3f, 0x7a, 0x28, 0xbe, - 0xc4, 0x87, 0xf8, 0x15, 0xe0, 0xac, 0xab, 0x09, 0x87, 0xba, 0xe3, 0xd1, 0xee, 0x38, 0xe7, 0x6b, - 0xfd, 0x9d, 0x58, 0xc7, 0x22, 0xfa, 0x23, 0xc3, 0xa6, 0xde, 0xa4, 0xeb, 0x9e, 0x0d, 0xf9, 0x82, - 0xdf, 0xb5, 0x28, 0x23, 0x45, 0x56, 0xdd, 0x32, 0x2b, 0x6f, 0x64, 0x33, 0xc3, 0xa2, 0x39, 0x83, - 0x77, 0x2f, 0x33, 0xf0, 0xf5, 0x47, 0xd4, 0x22, 0x39, 0xbb, 0xb7, 0xcb, 0xec, 0x46, 0xcc, 0x30, - 0xbb, 0x86, 0xcd, 0x7c, 0xe6, 0x65, 0x8d, 0xd4, 0xff, 0x28, 0x80, 0xf6, 0x1c, 0x9b, 0x79, 0x8e, - 0x69, 0x52, 0x0f, 0xd3, 0xb1, 0xe1, 0x1b, 0x8e, 0x8d, 0x3e, 0x87, 0x26, 0x3f, 0xcf, 0x80, 0x30, - 0xd2, 0x56, 0x36, 0x95, 0xad, 0xc5, 0x9d, 0xb7, 0xb4, 0xf8, 0xa6, 0x23, 0x78, 0xcd, 0x3d, 0x1b, - 0xf2, 0x05, 0x5f, 0xe3, 0xda, 0xda, 0xf8, 0xb6, 0x76, 0xd8, 0xff, 0x82, 0xea, 0xec, 0x80, 0x32, - 0xd2, 0x43, 0x4f, 0xce, 0x37, 0x66, 0xa6, 0xe7, 0x1b, 0x10, 0xaf, 0xe1, 0x08, 0x15, 0x1d, 0x42, - 0x5d, 0xa0, 0xd7, 0x04, 0xfa, 0x76, 0x29, 0xba, 0x3c, 0xb4, 0x86, 0xc9, 0xaf, 0xee, 0x3f, 0x66, - 0xd4, 0xe6, 0xdb, 0xeb, 0xbd, 0x20, 0xa1, 0xeb, 0xf7, 0x08, 0x23, 0x58, 0x00, 0xa1, 0x37, 0xa1, - 0xe9, 0xc9, 0xed, 0xb7, 0x67, 0x37, 0x95, 0xad, 0xd9, 0xde, 0x0d, 0xa9, 0xd5, 0x0c, 0x8f, 0x85, - 0x23, 0x0d, 0xf5, 0x89, 0x02, 0xb7, 0xf2, 0xe7, 0xde, 0x37, 0x7c, 0x86, 0x7e, 0x91, 0x3b, 0xbb, - 0x56, 0xed, 0xec, 0xdc, 0x5a, 0x9c, 0x3c, 0x72, 0x1c, 0xae, 0x24, 0xce, 0x7d, 0x04, 0x0d, 0x83, - 0x51, 0xcb, 0x6f, 0xd7, 0x36, 0x67, 0xb7, 0x16, 0x77, 0xde, 0xd0, 0x4a, 0x02, 0x58, 0xcb, 0xef, - 0xae, 0xb7, 0x24, 0x71, 0x1b, 0x0f, 0x38, 0x02, 0x0e, 0x80, 0xd4, 0xdf, 0xd6, 0x00, 0xee, 0x51, - 0xd7, 0x74, 0x26, 0x16, 0xb5, 0xd9, 0x35, 0x3c, 0xdd, 0x03, 0xa8, 0xfb, 0x2e, 0xd5, 0xe5, 0xd3, - 0xbd, 0x5a, 0x7a, 0x82, 0x78, 0x53, 0xc7, 0x2e, 0xd5, 0xe3, 0x47, 0xe3, 0x5f, 0x58, 0x40, 0xa0, - 0x4f, 0x60, 0xce, 0x67, 0x84, 0x8d, 0x7c, 0xf1, 0x64, 0x8b, 0x3b, 0xaf, 0x55, 0x01, 0x13, 0x06, - 0xbd, 0x96, 0x84, 0x9b, 0x0b, 0xbe, 0xb1, 0x04, 0x52, 0xff, 0x3e, 0x0b, 0x2b, 0xb1, 0xf2, 0x9e, - 0x63, 0x0f, 0x0c, 0xc6, 0x43, 0xfa, 0x2e, 0xd4, 0xd9, 0xc4, 0xa5, 0xe2, 0x4e, 0x16, 0x7a, 0xaf, - 0x86, 0x9b, 0x39, 0x99, 0xb8, 0xf4, 0xd9, 0xf9, 0xc6, 0x5a, 0x81, 0x09, 0x17, 0x61, 0x61, 0x84, - 0xf6, 0xa3, 0x7d, 0xd6, 0x84, 0xf9, 0x3b, 0x69, 0xe7, 0xcf, 0xce, 0x37, 0x0a, 0x0a, 0x88, 0x16, - 0x21, 0xa5, 0xb7, 0x88, 0x5e, 0x81, 0x39, 0x8f, 0x12, 0xdf, 0xb1, 0xdb, 0x75, 0x81, 0x16, 0x1d, - 0x05, 0x8b, 0x55, 0x2c, 0xa5, 0xe8, 0x35, 0x98, 0xb7, 0xa8, 0xef, 0x93, 0x21, 0x6d, 0x37, 0x84, - 0xe2, 0xb2, 0x54, 0x9c, 0x3f, 0x08, 0x96, 0x71, 0x28, 0x47, 0x5f, 0x40, 0xcb, 0x24, 0x3e, 0x3b, - 0x75, 0x07, 0x84, 0xd1, 0x13, 0xc3, 0xa2, 0xed, 0x39, 0x71, 0xa1, 0xaf, 0x57, 0x7b, 0x7b, 0x6e, - 0xd1, 0xbb, 0x25, 0xd1, 0x5b, 0xfb, 0x29, 0x24, 0x9c, 0x41, 0x46, 0x63, 0x40, 0x7c, 0xe5, 0xc4, - 0x23, 0xb6, 0x1f, 0x5c, 0x14, 0xf7, 0x37, 0x7f, 0x65, 0x7f, 0xeb, 0xd2, 0x1f, 0xda, 0xcf, 0xa1, - 0xe1, 0x02, 0x0f, 0xea, 0x1f, 0x15, 0x68, 0xc5, 0xcf, 0x74, 0x0d, 0xb9, 0xfa, 0x51, 0x3a, 0x57, - 0xbf, 0x5f, 0x21, 0x38, 0x4b, 0x72, 0xf4, 0x9f, 0x35, 0x40, 0xb1, 0x12, 0x76, 0x4c, 0xb3, 0x4f, - 0xf4, 0x33, 0xb4, 0x09, 0x75, 0x9b, 0x58, 0x61, 0x4c, 0x46, 0x09, 0xf2, 0x13, 0x62, 0x51, 0x2c, - 0x24, 0xe8, 0x4b, 0x05, 0xd0, 0x48, 0x5c, 0xfd, 0x60, 0xd7, 0xb6, 0x1d, 0x46, 0xf8, 0x6d, 0x84, - 0x1b, 0xda, 0xab, 0xb0, 0xa1, 0xd0, 0x97, 0x76, 0x9a, 0x43, 0xb9, 0x6f, 0x33, 0x6f, 0x12, 0xbf, - 0x42, 0x5e, 0x01, 0x17, 0xb8, 0x46, 0x3f, 0x07, 0xf0, 0x24, 0xe6, 0x89, 0x23, 0xd3, 0xb6, 0xbc, - 0x06, 0x84, 0xee, 0xf7, 0x1c, 0xfb, 0xa1, 0x31, 0x8c, 0x0b, 0x0b, 0x8e, 0x20, 0x70, 0x02, 0x6e, - 0xfd, 0x3e, 0xac, 0x95, 0xec, 0x13, 0xdd, 0x80, 0xd9, 0x33, 0x3a, 0x09, 0xae, 0x0a, 0xf3, 0x9f, - 0x68, 0x15, 0x1a, 0x63, 0x62, 0x8e, 0x68, 0x90, 0x93, 0x38, 0xf8, 0xb8, 0x53, 0x7b, 0x4f, 0x51, - 0xff, 0xd0, 0x48, 0x46, 0x0a, 0xaf, 0x37, 0x68, 0x8b, 0xb7, 0x07, 0xd7, 0x34, 0x74, 0xe2, 0x0b, - 0x8c, 0x46, 0xef, 0x85, 0xa0, 0x35, 0x04, 0x6b, 0x38, 0x92, 0xa2, 0x5f, 0x42, 0xd3, 0xa7, 0x26, - 0xd5, 0x99, 0xe3, 0xc9, 0x12, 0xf7, 0x76, 0xc5, 0x98, 0x22, 0x7d, 0x6a, 0x1e, 0x4b, 0xd3, 0x00, - 0x3e, 0xfc, 0xc2, 0x11, 0x24, 0xfa, 0x04, 0x9a, 0x8c, 0x5a, 0xae, 0x49, 0x18, 0x95, 0xb7, 0x97, - 0x8a, 0x2b, 0x5e, 0x3b, 0x38, 0xd8, 0x91, 0x33, 0x38, 0x91, 0x6a, 0xa2, 0x7a, 0x46, 0x71, 0x1a, - 0xae, 0xe2, 0x08, 0x06, 0xfd, 0x0c, 0x9a, 0x3e, 0xe3, 0x5d, 0x7d, 0x38, 0x11, 0x15, 0xe5, 0xa2, - 0xb6, 0x92, 0xac, 0xa3, 0x81, 0x49, 0x0c, 0x1d, 0xae, 0xe0, 0x08, 0x0e, 0xed, 0xc2, 0xb2, 0x65, - 0xd8, 0x98, 0x92, 0xc1, 0xe4, 0x98, 0xea, 0x8e, 0x3d, 0xf0, 0x45, 0x29, 0x6a, 0xf4, 0xd6, 0xa4, - 0xd1, 0xf2, 0x41, 0x5a, 0x8c, 0xb3, 0xfa, 0x68, 0x1f, 0x56, 0xc3, 0xb6, 0xfb, 0x91, 0xe1, 0x33, - 0xc7, 0x9b, 0xec, 0x1b, 0x96, 0xc1, 0x44, 0x81, 0x6a, 0xf4, 0xda, 0xd3, 0xf3, 0x8d, 0x55, 0x5c, - 0x20, 0xc7, 0x85, 0x56, 0xbc, 0x76, 0xba, 0x64, 0xe4, 0xd3, 0x81, 0x28, 0x38, 0xcd, 0xb8, 0x76, - 0x1e, 0x89, 0x55, 0x2c, 0xa5, 0xe8, 0xa7, 0xa9, 0x30, 0x6d, 0x5e, 0x2d, 0x4c, 0x5b, 0xe5, 0x21, - 0x8a, 0x4e, 0x61, 0xcd, 0xf5, 0x9c, 0xa1, 0x47, 0x7d, 0xff, 0x1e, 0x25, 0x03, 0xd3, 0xb0, 0x69, - 0x78, 0x33, 0x0b, 0xe2, 0x44, 0x2f, 0x4d, 0xcf, 0x37, 0xd6, 0x8e, 0x8a, 0x55, 0x70, 0x99, 0xad, - 0xfa, 0x97, 0x3a, 0xdc, 0xc8, 0xf6, 0x38, 0xf4, 0x31, 0x20, 0xa7, 0xef, 0x53, 0x6f, 0x4c, 0x07, - 0x1f, 0x06, 0x83, 0x1b, 0x9f, 0x6e, 0x14, 0x31, 0xdd, 0x44, 0x79, 0x7b, 0x98, 0xd3, 0xc0, 0x05, - 0x56, 0xc1, 0x7c, 0x24, 0x13, 0xa0, 0x26, 0x36, 0x9a, 0x98, 0x8f, 0x72, 0x49, 0xb0, 0x0b, 0xcb, - 0x32, 0xf7, 0x43, 0xa1, 0x08, 0xd6, 0xc4, 0xbb, 0x9f, 0xa6, 0xc5, 0x38, 0xab, 0x8f, 0x3e, 0x84, - 0x9b, 0x64, 0x4c, 0x0c, 0x93, 0xf4, 0x4d, 0x1a, 0x81, 0xd4, 0x05, 0xc8, 0x8b, 0x12, 0xe4, 0xe6, - 0x6e, 0x56, 0x01, 0xe7, 0x6d, 0xd0, 0x01, 0xac, 0x8c, 0xec, 0x3c, 0x54, 0x10, 0x87, 0x2f, 0x49, - 0xa8, 0x95, 0xd3, 0xbc, 0x0a, 0x2e, 0xb2, 0x43, 0x9f, 0x03, 0xe8, 0x61, 0x63, 0xf6, 0xdb, 0x73, - 0xa2, 0x92, 0xbe, 0x59, 0x21, 0x5f, 0xa2, 0x6e, 0x1e, 0x57, 0xb1, 0x68, 0xc9, 0xc7, 0x09, 0x4c, - 0x74, 0x17, 0x96, 0x3c, 0x9e, 0x01, 0xd1, 0x56, 0xe7, 0xc5, 0x56, 0xbf, 0x23, 0xcd, 0x96, 0x70, - 0x52, 0x88, 0xd3, 0xba, 0xe8, 0x0e, 0xb4, 0x74, 0xc7, 0x34, 0x45, 0xe4, 0xef, 0x39, 0x23, 0x9b, - 0x89, 0xe0, 0x6d, 0xf4, 0x10, 0xef, 0xcc, 0x7b, 0x29, 0x09, 0xce, 0x68, 0xaa, 0x7f, 0x56, 0x92, - 0x6d, 0x26, 0x4c, 0x67, 0x74, 0x27, 0x35, 0xfa, 0xbc, 0x92, 0x19, 0x7d, 0x6e, 0xe5, 0x2d, 0x12, - 0x93, 0x8f, 0x01, 0x4b, 0x3c, 0xf8, 0x0d, 0x7b, 0x18, 0x3c, 0xb8, 0x2c, 0x89, 0x6f, 0x5d, 0x98, - 0x4a, 0x91, 0x76, 0xa2, 0x31, 0xde, 0x14, 0x27, 0x4f, 0x0a, 0x71, 0x1a, 0x59, 0xfd, 0x00, 0x5a, - 0xe9, 0x3c, 0x4c, 0xcd, 0xf4, 0xca, 0xa5, 0x33, 0xfd, 0x37, 0x0a, 0xac, 0x95, 0x78, 0x47, 0x26, - 0xb4, 0x2c, 0xf2, 0x38, 0x11, 0x23, 0x97, 0xce, 0xc6, 0x9c, 0x35, 0x69, 0x01, 0x6b, 0xd2, 0x1e, - 0xd8, 0xec, 0xd0, 0x3b, 0x66, 0x9e, 0x61, 0x0f, 0x83, 0x77, 0x38, 0x48, 0x61, 0xe1, 0x0c, 0x36, - 0xfa, 0x0c, 0x9a, 0x16, 0x79, 0x7c, 0x3c, 0xf2, 0x86, 0x45, 0xf7, 0x55, 0xcd, 0x8f, 0xe8, 0x1f, - 0x07, 0x12, 0x05, 0x47, 0x78, 0xea, 0x21, 0x6c, 0xa6, 0x0e, 0xc9, 0x4b, 0x05, 0x7d, 0x38, 0x32, - 0x8f, 0x69, 0xfc, 0xe0, 0x6f, 0xc0, 0x82, 0x4b, 0x3c, 0x66, 0x44, 0xe5, 0xa2, 0xd1, 0x5b, 0x9a, - 0x9e, 0x6f, 0x2c, 0x1c, 0x85, 0x8b, 0x38, 0x96, 0xab, 0xff, 0x55, 0xa0, 0x71, 0xac, 0x13, 0x93, - 0x5e, 0x03, 0x75, 0xb8, 0x97, 0xa2, 0x0e, 0x6a, 0x69, 0x10, 0x89, 0xfd, 0x94, 0xb2, 0x86, 0xfd, - 0x0c, 0x6b, 0x78, 0xf9, 0x12, 0x9c, 0x8b, 0x09, 0xc3, 0xfb, 0xb0, 0x10, 0xb9, 0x4b, 0x55, 0x49, - 0xe5, 0xb2, 0x2a, 0xa9, 0xfe, 0xbe, 0x06, 0x8b, 0x09, 0x17, 0x57, 0xb3, 0xe6, 0xd7, 0x9d, 0x18, - 0x34, 0x78, 0x19, 0xda, 0xa9, 0x72, 0x10, 0x2d, 0x1c, 0x2a, 0x82, 0xf9, 0x2d, 0xee, 0xde, 0xf9, - 0x59, 0xe3, 0x03, 0x68, 0x31, 0xe2, 0x0d, 0x29, 0x0b, 0x65, 0xe2, 0xc2, 0x16, 0xe2, 0x49, 0xff, - 0x24, 0x25, 0xc5, 0x19, 0xed, 0xf5, 0xbb, 0xb0, 0x94, 0x72, 0x76, 0xa5, 0x21, 0xec, 0x4b, 0x7e, - 0x39, 0x71, 0x70, 0x5e, 0x43, 0x74, 0x7d, 0x9c, 0x8a, 0xae, 0xad, 0xf2, 0xcb, 0x4c, 0xa4, 0x4c, - 0x59, 0x8c, 0xe1, 0x4c, 0x8c, 0xbd, 0x5e, 0x09, 0xed, 0xe2, 0x48, 0xfb, 0x57, 0x0d, 0x56, 0x13, - 0xda, 0x31, 0x37, 0xfd, 0x61, 0xaa, 0x40, 0x6f, 0x65, 0x0a, 0x74, 0xbb, 0xc8, 0xe6, 0xb9, 0x91, - 0xd3, 0x62, 0x76, 0x37, 0xfb, 0xbc, 0xd9, 0xdd, 0x73, 0x20, 0xc5, 0xea, 0x9f, 0x14, 0x58, 0x4e, - 0xdc, 0xdd, 0x35, 0x30, 0xc6, 0x07, 0x69, 0xc6, 0xf8, 0x72, 0x95, 0xa0, 0x29, 0xa1, 0x8c, 0x7f, - 0x6d, 0xa4, 0x36, 0xff, 0xad, 0x27, 0x31, 0xbf, 0x86, 0xd5, 0xb1, 0x63, 0x8e, 0x2c, 0xba, 0x67, - 0x12, 0xc3, 0x0a, 0x15, 0xf8, 0xc4, 0x38, 0x9b, 0xfd, 0x63, 0x28, 0x82, 0xa7, 0x9e, 0x6f, 0xf8, - 0x8c, 0xda, 0xec, 0xd3, 0xd8, 0xb2, 0xf7, 0x5d, 0xe9, 0x64, 0xf5, 0xd3, 0x02, 0x38, 0x5c, 0xe8, - 0x04, 0xfd, 0x00, 0x16, 0xf9, 0xc0, 0x6c, 0xe8, 0x94, 0x73, 0x6f, 0x19, 0x58, 0x2b, 0x12, 0x68, - 0xf1, 0x38, 0x16, 0xe1, 0xa4, 0x1e, 0x7a, 0x04, 0x2b, 0xae, 0x33, 0x38, 0x20, 0x36, 0x19, 0x52, - 0x3e, 0x66, 0x1c, 0x39, 0xa6, 0xa1, 0x4f, 0x04, 0xb3, 0x59, 0xe8, 0xbd, 0x1b, 0x4e, 0xa6, 0x47, - 0x79, 0x95, 0x67, 0x9c, 0x22, 0xe4, 0x97, 0x45, 0x52, 0x17, 0x41, 0x22, 0x0f, 0x5a, 0x23, 0xd9, - 0xee, 0x25, 0xd1, 0x0b, 0xfe, 0x6f, 0xd9, 0xa9, 0x12, 0x61, 0xa7, 0x29, 0xcb, 0xb8, 0xfa, 0xa7, - 0xd7, 0x71, 0xc6, 0x43, 0x29, 0x71, 0x6b, 0xfe, 0x3f, 0xc4, 0x4d, 0xfd, 0x77, 0x1d, 0x6e, 0xe6, - 0x4a, 0x25, 0xfa, 0xf1, 0x05, 0x0c, 0xe7, 0xd6, 0x73, 0x63, 0x37, 0xb9, 0x01, 0x7d, 0xf6, 0x0a, - 0x03, 0xfa, 0x2e, 0x2c, 0xeb, 0x23, 0xcf, 0xa3, 0x36, 0xcb, 0xb0, 0x9a, 0x88, 0x1a, 0xed, 0xa5, - 0xc5, 0x38, 0xab, 0x5f, 0xc4, 0xae, 0x1a, 0x57, 0x64, 0x57, 0xc9, 0x5d, 0xc8, 0x09, 0x39, 0x08, - 0xbb, 0xfc, 0x2e, 0xe4, 0xa0, 0x9c, 0xd5, 0xe7, 0xd3, 0x41, 0x80, 0x1a, 0x21, 0xcc, 0xa7, 0xa7, - 0x83, 0xd3, 0x94, 0x14, 0x67, 0xb4, 0x0b, 0x98, 0xca, 0x42, 0x55, 0xa6, 0x82, 0x48, 0x8a, 0x84, - 0x81, 0xc8, 0xf1, 0xed, 0x2a, 0xb1, 0x5c, 0x99, 0x85, 0xa9, 0x7f, 0x53, 0xe0, 0xc5, 0xd2, 0x24, - 0x40, 0xbb, 0xa9, 0x96, 0xbb, 0x9d, 0x69, 0xb9, 0xdf, 0x2b, 0x35, 0x4c, 0xf4, 0x5d, 0xaf, 0x98, - 0x1a, 0xbd, 0x5f, 0x8d, 0x1a, 0x15, 0xcc, 0xed, 0x97, 0x73, 0xa4, 0xde, 0xf6, 0x93, 0xa7, 0x9d, - 0x99, 0xaf, 0x9e, 0x76, 0x66, 0xbe, 0x7e, 0xda, 0x99, 0xf9, 0xcd, 0xb4, 0xa3, 0x3c, 0x99, 0x76, - 0x94, 0xaf, 0xa6, 0x1d, 0xe5, 0xeb, 0x69, 0x47, 0xf9, 0xc7, 0xb4, 0xa3, 0xfc, 0xee, 0x9b, 0xce, - 0xcc, 0x67, 0xf3, 0xd2, 0xe3, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0xe9, 0x89, 0x29, 0x5c, 0x61, - 0x1b, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.proto b/vendor/k8s.io/api/apps/v1beta1/generated.proto index 7942b997b60dd..694f6570d8c42 100644 --- a/vendor/k8s.io/api/apps/v1beta1/generated.proto +++ b/vendor/k8s.io/api/apps/v1beta1/generated.proto @@ -43,7 +43,7 @@ option go_package = "v1beta1"; // depend on its stability. It is primarily for internal use by controllers. message ControllerRevision { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -56,7 +56,7 @@ message ControllerRevision { // ControllerRevisionList is a resource containing a list of ControllerRevision objects. message ControllerRevisionList { - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -277,15 +277,15 @@ message RollingUpdateStatefulSetStrategy { // Scale represents a scaling request for a resource. message Scale { - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional optional ScaleSpec spec = 2; - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. // +optional optional ScaleStatus status = 3; } diff --git a/vendor/k8s.io/api/apps/v1beta1/types.go b/vendor/k8s.io/api/apps/v1beta1/types.go index cf6039df693df..b77fcf7af21a4 100644 --- a/vendor/k8s.io/api/apps/v1beta1/types.go +++ b/vendor/k8s.io/api/apps/v1beta1/types.go @@ -60,15 +60,15 @@ type ScaleStatus struct { // Scale represents a scaling request for a resource. type Scale struct { metav1.TypeMeta `json:",inline"` - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. // +optional Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -134,13 +134,13 @@ const ( // ordering constraints. When a scale operation is performed with this // strategy, new Pods will be created from the specification version indicated // by the StatefulSet's updateRevision. - RollingUpdateStatefulSetStrategyType = "RollingUpdate" + RollingUpdateStatefulSetStrategyType StatefulSetUpdateStrategyType = "RollingUpdate" // OnDeleteStatefulSetStrategyType triggers the legacy behavior. Version // tracking and ordered rolling restarts are disabled. Pods are recreated // from the StatefulSetSpec when they are manually deleted. When a scale // operation is performed with this strategy,specification version indicated // by the StatefulSet's currentRevision. - OnDeleteStatefulSetStrategyType = "OnDelete" + OnDeleteStatefulSetStrategyType StatefulSetUpdateStrategyType = "OnDelete" ) // RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. @@ -541,7 +541,7 @@ type DeploymentList struct { type ControllerRevision struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -558,7 +558,7 @@ type ControllerRevision struct { type ControllerRevisionList struct { metav1.TypeMeta `json:",inline"` - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go index da1eb5996eb37..504b858639ea7 100644 --- a/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go @@ -29,7 +29,7 @@ package v1beta1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ControllerRevision = map[string]string{ "": "DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1beta2/ControllerRevision. See the release notes for more information. ControllerRevision implements an immutable snapshot of state data. Clients are responsible for serializing and deserializing the objects that contain their internal state. Once a ControllerRevision has been successfully created, it can not be updated. The API Server will fail validation of all requests that attempt to mutate the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, it may be subject to name and representation changes in future releases, and clients should not depend on its stability. It is primarily for internal use by controllers.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "data": "Data is the serialized representation of the state.", "revision": "Revision indicates the revision of the state represented by Data.", } @@ -40,7 +40,7 @@ func (ControllerRevision) SwaggerDoc() map[string]string { var map_ControllerRevisionList = map[string]string{ "": "ControllerRevisionList is a resource containing a list of ControllerRevision objects.", - "metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is the list of ControllerRevisions", } @@ -167,9 +167,9 @@ func (RollingUpdateStatefulSetStrategy) SwaggerDoc() map[string]string { var map_Scale = map[string]string{ "": "Scale represents a scaling request for a resource.", - "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", - "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.", - "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", + "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.", } func (Scale) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.pb.go b/vendor/k8s.io/api/apps/v1beta2/generated.pb.go index 05f47d5f7e1f9..624bb94259d73 100644 --- a/vendor/k8s.io/api/apps/v1beta2/generated.pb.go +++ b/vendor/k8s.io/api/apps/v1beta2/generated.pb.go @@ -17,67 +17,25 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta2/generated.proto -/* - Package v1beta2 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta2/generated.proto - - It has these top-level messages: - ControllerRevision - ControllerRevisionList - DaemonSet - DaemonSetCondition - DaemonSetList - DaemonSetSpec - DaemonSetStatus - DaemonSetUpdateStrategy - Deployment - DeploymentCondition - DeploymentList - DeploymentSpec - DeploymentStatus - DeploymentStrategy - ReplicaSet - ReplicaSetCondition - ReplicaSetList - ReplicaSetSpec - ReplicaSetStatus - RollingUpdateDaemonSet - RollingUpdateDeployment - RollingUpdateStatefulSetStrategy - Scale - ScaleSpec - ScaleStatus - StatefulSet - StatefulSetCondition - StatefulSetList - StatefulSetSpec - StatefulSetStatus - StatefulSetUpdateStrategy -*/ package v1beta2 import ( fmt "fmt" - proto "github.com/gogo/protobuf/proto" - - math "math" - - k8s_io_api_core_v1 "k8s.io/api/core/v1" - - k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" + io "io" + proto "github.com/gogo/protobuf/proto" github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - strings "strings" - + math "math" + math_bits "math/bits" reflect "reflect" + strings "strings" - io "io" + intstr "k8s.io/apimachinery/pkg/util/intstr" ) // Reference imports to suppress errors if they are not otherwise used. @@ -91,136 +49,874 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *ControllerRevision) Reset() { *m = ControllerRevision{} } -func (*ControllerRevision) ProtoMessage() {} -func (*ControllerRevision) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *ControllerRevision) Reset() { *m = ControllerRevision{} } +func (*ControllerRevision) ProtoMessage() {} +func (*ControllerRevision) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{0} +} +func (m *ControllerRevision) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ControllerRevision) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ControllerRevision) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerRevision.Merge(m, src) +} +func (m *ControllerRevision) XXX_Size() int { + return m.Size() +} +func (m *ControllerRevision) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerRevision.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerRevision proto.InternalMessageInfo + +func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} } +func (*ControllerRevisionList) ProtoMessage() {} +func (*ControllerRevisionList) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{1} +} +func (m *ControllerRevisionList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ControllerRevisionList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ControllerRevisionList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerRevisionList.Merge(m, src) +} +func (m *ControllerRevisionList) XXX_Size() int { + return m.Size() +} +func (m *ControllerRevisionList) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerRevisionList.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerRevisionList proto.InternalMessageInfo + +func (m *DaemonSet) Reset() { *m = DaemonSet{} } +func (*DaemonSet) ProtoMessage() {} +func (*DaemonSet) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{2} +} +func (m *DaemonSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSet.Merge(m, src) +} +func (m *DaemonSet) XXX_Size() int { + return m.Size() +} +func (m *DaemonSet) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSet.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSet proto.InternalMessageInfo + +func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } +func (*DaemonSetCondition) ProtoMessage() {} +func (*DaemonSetCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{3} +} +func (m *DaemonSetCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetCondition.Merge(m, src) +} +func (m *DaemonSetCondition) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetCondition) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSetCondition proto.InternalMessageInfo + +func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } +func (*DaemonSetList) ProtoMessage() {} +func (*DaemonSetList) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{4} +} +func (m *DaemonSetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetList.Merge(m, src) +} +func (m *DaemonSetList) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetList) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetList.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSetList proto.InternalMessageInfo + +func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } +func (*DaemonSetSpec) ProtoMessage() {} +func (*DaemonSetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{5} +} +func (m *DaemonSetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetSpec.Merge(m, src) +} +func (m *DaemonSetSpec) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSetSpec proto.InternalMessageInfo + +func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } +func (*DaemonSetStatus) ProtoMessage() {} +func (*DaemonSetStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{6} +} +func (m *DaemonSetStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetStatus.Merge(m, src) +} +func (m *DaemonSetStatus) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetStatus) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSetStatus proto.InternalMessageInfo + +func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } +func (*DaemonSetUpdateStrategy) ProtoMessage() {} +func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{7} +} +func (m *DaemonSetUpdateStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetUpdateStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetUpdateStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetUpdateStrategy.Merge(m, src) +} +func (m *DaemonSetUpdateStrategy) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetUpdateStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetUpdateStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSetUpdateStrategy proto.InternalMessageInfo + +func (m *Deployment) Reset() { *m = Deployment{} } +func (*Deployment) ProtoMessage() {} +func (*Deployment) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{8} +} +func (m *Deployment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Deployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Deployment) XXX_Merge(src proto.Message) { + xxx_messageInfo_Deployment.Merge(m, src) +} +func (m *Deployment) XXX_Size() int { + return m.Size() +} +func (m *Deployment) XXX_DiscardUnknown() { + xxx_messageInfo_Deployment.DiscardUnknown(m) +} + +var xxx_messageInfo_Deployment proto.InternalMessageInfo + +func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } +func (*DeploymentCondition) ProtoMessage() {} +func (*DeploymentCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{9} +} +func (m *DeploymentCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentCondition.Merge(m, src) +} +func (m *DeploymentCondition) XXX_Size() int { + return m.Size() +} +func (m *DeploymentCondition) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentCondition proto.InternalMessageInfo + +func (m *DeploymentList) Reset() { *m = DeploymentList{} } +func (*DeploymentList) ProtoMessage() {} +func (*DeploymentList) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{10} +} +func (m *DeploymentList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentList) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentList.Merge(m, src) +} +func (m *DeploymentList) XXX_Size() int { + return m.Size() +} +func (m *DeploymentList) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentList.DiscardUnknown(m) +} -func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} } -func (*ControllerRevisionList) ProtoMessage() {} -func (*ControllerRevisionList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_DeploymentList proto.InternalMessageInfo -func (m *DaemonSet) Reset() { *m = DaemonSet{} } -func (*DaemonSet) ProtoMessage() {} -func (*DaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } +func (*DeploymentSpec) ProtoMessage() {} +func (*DeploymentSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{11} +} +func (m *DeploymentSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentSpec.Merge(m, src) +} +func (m *DeploymentSpec) XXX_Size() int { + return m.Size() +} +func (m *DeploymentSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentSpec.DiscardUnknown(m) +} -func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } -func (*DaemonSetCondition) ProtoMessage() {} -func (*DaemonSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_DeploymentSpec proto.InternalMessageInfo -func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } -func (*DaemonSetList) ProtoMessage() {} -func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } +func (*DeploymentStatus) ProtoMessage() {} +func (*DeploymentStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{12} +} +func (m *DeploymentStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentStatus.Merge(m, src) +} +func (m *DeploymentStatus) XXX_Size() int { + return m.Size() +} +func (m *DeploymentStatus) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentStatus.DiscardUnknown(m) +} -func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } -func (*DaemonSetSpec) ProtoMessage() {} -func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_DeploymentStatus proto.InternalMessageInfo -func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } -func (*DaemonSetStatus) ProtoMessage() {} -func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } +func (*DeploymentStrategy) ProtoMessage() {} +func (*DeploymentStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{13} +} +func (m *DeploymentStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentStrategy.Merge(m, src) +} +func (m *DeploymentStrategy) XXX_Size() int { + return m.Size() +} +func (m *DeploymentStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentStrategy.DiscardUnknown(m) +} -func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } -func (*DaemonSetUpdateStrategy) ProtoMessage() {} -func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_DeploymentStrategy proto.InternalMessageInfo -func (m *Deployment) Reset() { *m = Deployment{} } -func (*Deployment) ProtoMessage() {} -func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } +func (*ReplicaSet) ProtoMessage() {} +func (*ReplicaSet) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{14} +} +func (m *ReplicaSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSet.Merge(m, src) +} +func (m *ReplicaSet) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSet) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSet.DiscardUnknown(m) +} -func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } -func (*DeploymentCondition) ProtoMessage() {} -func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +var xxx_messageInfo_ReplicaSet proto.InternalMessageInfo -func (m *DeploymentList) Reset() { *m = DeploymentList{} } -func (*DeploymentList) ProtoMessage() {} -func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } +func (*ReplicaSetCondition) ProtoMessage() {} +func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{15} +} +func (m *ReplicaSetCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetCondition.Merge(m, src) +} +func (m *ReplicaSetCondition) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetCondition) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetCondition.DiscardUnknown(m) +} -func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } -func (*DeploymentSpec) ProtoMessage() {} -func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +var xxx_messageInfo_ReplicaSetCondition proto.InternalMessageInfo -func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } -func (*DeploymentStatus) ProtoMessage() {} -func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } +func (*ReplicaSetList) ProtoMessage() {} +func (*ReplicaSetList) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{16} +} +func (m *ReplicaSetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetList.Merge(m, src) +} +func (m *ReplicaSetList) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetList) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetList.DiscardUnknown(m) +} -func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } -func (*DeploymentStrategy) ProtoMessage() {} -func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +var xxx_messageInfo_ReplicaSetList proto.InternalMessageInfo -func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } -func (*ReplicaSet) ProtoMessage() {} -func (*ReplicaSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } +func (*ReplicaSetSpec) ProtoMessage() {} +func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{17} +} +func (m *ReplicaSetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetSpec.Merge(m, src) +} +func (m *ReplicaSetSpec) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetSpec.DiscardUnknown(m) +} -func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } -func (*ReplicaSetCondition) ProtoMessage() {} -func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +var xxx_messageInfo_ReplicaSetSpec proto.InternalMessageInfo -func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } -func (*ReplicaSetList) ProtoMessage() {} -func (*ReplicaSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } +func (*ReplicaSetStatus) ProtoMessage() {} +func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{18} +} +func (m *ReplicaSetStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetStatus.Merge(m, src) +} +func (m *ReplicaSetStatus) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetStatus.DiscardUnknown(m) +} -func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } -func (*ReplicaSetSpec) ProtoMessage() {} -func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +var xxx_messageInfo_ReplicaSetStatus proto.InternalMessageInfo -func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } -func (*ReplicaSetStatus) ProtoMessage() {} -func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } +func (*RollingUpdateDaemonSet) ProtoMessage() {} +func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{19} +} +func (m *RollingUpdateDaemonSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollingUpdateDaemonSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollingUpdateDaemonSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollingUpdateDaemonSet.Merge(m, src) +} +func (m *RollingUpdateDaemonSet) XXX_Size() int { + return m.Size() +} +func (m *RollingUpdateDaemonSet) XXX_DiscardUnknown() { + xxx_messageInfo_RollingUpdateDaemonSet.DiscardUnknown(m) +} -func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } -func (*RollingUpdateDaemonSet) ProtoMessage() {} -func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } +var xxx_messageInfo_RollingUpdateDaemonSet proto.InternalMessageInfo func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } func (*RollingUpdateDeployment) ProtoMessage() {} func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{20} + return fileDescriptor_42fe616264472f7e, []int{20} +} +func (m *RollingUpdateDeployment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } +func (m *RollingUpdateDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollingUpdateDeployment) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollingUpdateDeployment.Merge(m, src) +} +func (m *RollingUpdateDeployment) XXX_Size() int { + return m.Size() +} +func (m *RollingUpdateDeployment) XXX_DiscardUnknown() { + xxx_messageInfo_RollingUpdateDeployment.DiscardUnknown(m) +} + +var xxx_messageInfo_RollingUpdateDeployment proto.InternalMessageInfo func (m *RollingUpdateStatefulSetStrategy) Reset() { *m = RollingUpdateStatefulSetStrategy{} } func (*RollingUpdateStatefulSetStrategy) ProtoMessage() {} func (*RollingUpdateStatefulSetStrategy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{21} + return fileDescriptor_42fe616264472f7e, []int{21} +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollingUpdateStatefulSetStrategy.Merge(m, src) +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Size() int { + return m.Size() +} +func (m *RollingUpdateStatefulSetStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_RollingUpdateStatefulSetStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_RollingUpdateStatefulSetStrategy proto.InternalMessageInfo + +func (m *Scale) Reset() { *m = Scale{} } +func (*Scale) ProtoMessage() {} +func (*Scale) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{22} +} +func (m *Scale) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Scale) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Scale) XXX_Merge(src proto.Message) { + xxx_messageInfo_Scale.Merge(m, src) +} +func (m *Scale) XXX_Size() int { + return m.Size() +} +func (m *Scale) XXX_DiscardUnknown() { + xxx_messageInfo_Scale.DiscardUnknown(m) +} + +var xxx_messageInfo_Scale proto.InternalMessageInfo + +func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } +func (*ScaleSpec) ProtoMessage() {} +func (*ScaleSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{23} +} +func (m *ScaleSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScaleSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ScaleSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScaleSpec.Merge(m, src) +} +func (m *ScaleSpec) XXX_Size() int { + return m.Size() +} +func (m *ScaleSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ScaleSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ScaleSpec proto.InternalMessageInfo + +func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } +func (*ScaleStatus) ProtoMessage() {} +func (*ScaleStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{24} +} +func (m *ScaleStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScaleStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ScaleStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScaleStatus.Merge(m, src) +} +func (m *ScaleStatus) XXX_Size() int { + return m.Size() +} +func (m *ScaleStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ScaleStatus.DiscardUnknown(m) } -func (m *Scale) Reset() { *m = Scale{} } -func (*Scale) ProtoMessage() {} -func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } +var xxx_messageInfo_ScaleStatus proto.InternalMessageInfo -func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } -func (*ScaleSpec) ProtoMessage() {} -func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } +func (m *StatefulSet) Reset() { *m = StatefulSet{} } +func (*StatefulSet) ProtoMessage() {} +func (*StatefulSet) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{25} +} +func (m *StatefulSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSet.Merge(m, src) +} +func (m *StatefulSet) XXX_Size() int { + return m.Size() +} +func (m *StatefulSet) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSet.DiscardUnknown(m) +} -func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } -func (*ScaleStatus) ProtoMessage() {} -func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } +var xxx_messageInfo_StatefulSet proto.InternalMessageInfo -func (m *StatefulSet) Reset() { *m = StatefulSet{} } -func (*StatefulSet) ProtoMessage() {} -func (*StatefulSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } +func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} } +func (*StatefulSetCondition) ProtoMessage() {} +func (*StatefulSetCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{26} +} +func (m *StatefulSetCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetCondition.Merge(m, src) +} +func (m *StatefulSetCondition) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetCondition) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetCondition proto.InternalMessageInfo -func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} } -func (*StatefulSetCondition) ProtoMessage() {} -func (*StatefulSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } +func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } +func (*StatefulSetList) ProtoMessage() {} +func (*StatefulSetList) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{27} +} +func (m *StatefulSetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetList.Merge(m, src) +} +func (m *StatefulSetList) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetList) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetList.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetList proto.InternalMessageInfo + +func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } +func (*StatefulSetSpec) ProtoMessage() {} +func (*StatefulSetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{28} +} +func (m *StatefulSetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetSpec.Merge(m, src) +} +func (m *StatefulSetSpec) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetSpec.DiscardUnknown(m) +} -func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } -func (*StatefulSetList) ProtoMessage() {} -func (*StatefulSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } +var xxx_messageInfo_StatefulSetSpec proto.InternalMessageInfo -func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } -func (*StatefulSetSpec) ProtoMessage() {} -func (*StatefulSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } +func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } +func (*StatefulSetStatus) ProtoMessage() {} +func (*StatefulSetStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{29} +} +func (m *StatefulSetStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetStatus.Merge(m, src) +} +func (m *StatefulSetStatus) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetStatus) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetStatus.DiscardUnknown(m) +} -func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } -func (*StatefulSetStatus) ProtoMessage() {} -func (*StatefulSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } +var xxx_messageInfo_StatefulSetStatus proto.InternalMessageInfo func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} } func (*StatefulSetUpdateStrategy) ProtoMessage() {} func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{30} + return fileDescriptor_42fe616264472f7e, []int{30} +} +func (m *StatefulSetUpdateStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetUpdateStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetUpdateStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetUpdateStrategy.Merge(m, src) +} +func (m *StatefulSetUpdateStrategy) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetUpdateStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetUpdateStrategy.DiscardUnknown(m) } +var xxx_messageInfo_StatefulSetUpdateStrategy proto.InternalMessageInfo + func init() { proto.RegisterType((*ControllerRevision)(nil), "k8s.io.api.apps.v1beta2.ControllerRevision") proto.RegisterType((*ControllerRevisionList)(nil), "k8s.io.api.apps.v1beta2.ControllerRevisionList") @@ -247,6 +943,7 @@ func init() { proto.RegisterType((*Scale)(nil), "k8s.io.api.apps.v1beta2.Scale") proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.apps.v1beta2.ScaleSpec") proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.apps.v1beta2.ScaleStatus") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.apps.v1beta2.ScaleStatus.SelectorEntry") proto.RegisterType((*StatefulSet)(nil), "k8s.io.api.apps.v1beta2.StatefulSet") proto.RegisterType((*StatefulSetCondition)(nil), "k8s.io.api.apps.v1beta2.StatefulSetCondition") proto.RegisterType((*StatefulSetList)(nil), "k8s.io.api.apps.v1beta2.StatefulSetList") @@ -254,10 +951,155 @@ func init() { proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.api.apps.v1beta2.StatefulSetStatus") proto.RegisterType((*StatefulSetUpdateStrategy)(nil), "k8s.io.api.apps.v1beta2.StatefulSetUpdateStrategy") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta2/generated.proto", fileDescriptor_42fe616264472f7e) +} + +var fileDescriptor_42fe616264472f7e = []byte{ + // 2171 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x6f, 0x1c, 0xb7, + 0xf9, 0xd6, 0xec, 0x87, 0xb4, 0xa2, 0x2c, 0xc9, 0xa6, 0xf4, 0x93, 0x36, 0xf2, 0xaf, 0x2b, 0x63, + 0x13, 0x38, 0x4a, 0x6c, 0xcd, 0xda, 0xca, 0x07, 0x12, 0xbb, 0x68, 0xab, 0x95, 0x52, 0xdb, 0x81, + 0xbe, 0x42, 0x59, 0x06, 0x1a, 0xb4, 0xa8, 0xa9, 0x5d, 0x7a, 0x35, 0xd1, 0x7c, 0x61, 0x86, 0xb3, + 0xf5, 0xa2, 0x97, 0x9e, 0x0a, 0x14, 0x28, 0xd0, 0xf6, 0xda, 0x7f, 0xa2, 0xb7, 0xa2, 0x68, 0x6f, + 0x45, 0x50, 0xf8, 0x52, 0x20, 0xe8, 0x25, 0x39, 0x09, 0xf5, 0xe6, 0x54, 0x14, 0xbd, 0x14, 0xe8, + 0x25, 0x40, 0x81, 0x82, 0x1c, 0xce, 0x07, 0xe7, 0xc3, 0x3b, 0x52, 0x1c, 0xa5, 0x29, 0x72, 0xd3, + 0x92, 0xcf, 0xfb, 0xf0, 0x7d, 0xc9, 0x97, 0x7c, 0x1f, 0x72, 0x04, 0xbe, 0x73, 0xfc, 0x96, 0xab, + 0x6a, 0x56, 0xeb, 0xd8, 0x3b, 0x24, 0x8e, 0x49, 0x28, 0x71, 0x5b, 0x7d, 0x62, 0x76, 0x2d, 0xa7, + 0x25, 0x3a, 0xb0, 0xad, 0xb5, 0xb0, 0x6d, 0xbb, 0xad, 0xfe, 0xcd, 0x43, 0x42, 0xf1, 0x5a, 0xab, + 0x47, 0x4c, 0xe2, 0x60, 0x4a, 0xba, 0xaa, 0xed, 0x58, 0xd4, 0x82, 0x8b, 0x3e, 0x50, 0xc5, 0xb6, + 0xa6, 0x32, 0xa0, 0x2a, 0x80, 0x4b, 0xab, 0x3d, 0x8d, 0x1e, 0x79, 0x87, 0x6a, 0xc7, 0x32, 0x5a, + 0x3d, 0xab, 0x67, 0xb5, 0x38, 0xfe, 0xd0, 0x7b, 0xc4, 0x7f, 0xf1, 0x1f, 0xfc, 0x2f, 0x9f, 0x67, + 0xa9, 0x19, 0x1b, 0xb0, 0x63, 0x39, 0xa4, 0xd5, 0xbf, 0x99, 0x1c, 0x6b, 0xe9, 0xf5, 0x08, 0x63, + 0xe0, 0xce, 0x91, 0x66, 0x12, 0x67, 0xd0, 0xb2, 0x8f, 0x7b, 0xac, 0xc1, 0x6d, 0x19, 0x84, 0xe2, + 0x2c, 0xab, 0x56, 0x9e, 0x95, 0xe3, 0x99, 0x54, 0x33, 0x48, 0xca, 0xe0, 0xcd, 0x51, 0x06, 0x6e, + 0xe7, 0x88, 0x18, 0x38, 0x65, 0xf7, 0x5a, 0x9e, 0x9d, 0x47, 0x35, 0xbd, 0xa5, 0x99, 0xd4, 0xa5, + 0x4e, 0xd2, 0xa8, 0xf9, 0x2f, 0x05, 0xc0, 0x0d, 0xcb, 0xa4, 0x8e, 0xa5, 0xeb, 0xc4, 0x41, 0xa4, + 0xaf, 0xb9, 0x9a, 0x65, 0xc2, 0x87, 0xa0, 0xc6, 0xe2, 0xe9, 0x62, 0x8a, 0xeb, 0xca, 0x15, 0x65, + 0x65, 0x6a, 0xed, 0x86, 0x1a, 0xcd, 0x74, 0x48, 0xaf, 0xda, 0xc7, 0x3d, 0xd6, 0xe0, 0xaa, 0x0c, + 0xad, 0xf6, 0x6f, 0xaa, 0xbb, 0x87, 0x1f, 0x90, 0x0e, 0xdd, 0x26, 0x14, 0xb7, 0xe1, 0x93, 0x93, + 0xe5, 0xb1, 0xe1, 0xc9, 0x32, 0x88, 0xda, 0x50, 0xc8, 0x0a, 0x77, 0x41, 0x85, 0xb3, 0x97, 0x38, + 0xfb, 0x6a, 0x2e, 0xbb, 0x08, 0x5a, 0x45, 0xf8, 0x47, 0xef, 0x3c, 0xa6, 0xc4, 0x64, 0xee, 0xb5, + 0x2f, 0x08, 0xea, 0xca, 0x26, 0xa6, 0x18, 0x71, 0x22, 0x78, 0x1d, 0xd4, 0x1c, 0xe1, 0x7e, 0xbd, + 0x7c, 0x45, 0x59, 0x29, 0xb7, 0x2f, 0x0a, 0x54, 0x2d, 0x08, 0x0b, 0x85, 0x88, 0xe6, 0x13, 0x05, + 0x2c, 0xa4, 0xe3, 0xde, 0xd2, 0x5c, 0x0a, 0xbf, 0x9f, 0x8a, 0x5d, 0x2d, 0x16, 0x3b, 0xb3, 0xe6, + 0x91, 0x87, 0x03, 0x07, 0x2d, 0xb1, 0xb8, 0xf7, 0x40, 0x55, 0xa3, 0xc4, 0x70, 0xeb, 0xa5, 0x2b, + 0xe5, 0x95, 0xa9, 0xb5, 0x6b, 0x6a, 0x4e, 0x02, 0xab, 0x69, 0xef, 0xda, 0xd3, 0x82, 0xb7, 0x7a, + 0x8f, 0x31, 0x20, 0x9f, 0xa8, 0xf9, 0xb3, 0x12, 0x98, 0xdc, 0xc4, 0xc4, 0xb0, 0xcc, 0x7d, 0x42, + 0xcf, 0x61, 0xe5, 0xee, 0x82, 0x8a, 0x6b, 0x93, 0x8e, 0x58, 0xb9, 0xab, 0xb9, 0x01, 0x84, 0x3e, + 0xed, 0xdb, 0xa4, 0x13, 0x2d, 0x19, 0xfb, 0x85, 0x38, 0x03, 0xdc, 0x03, 0xe3, 0x2e, 0xc5, 0xd4, + 0x73, 0xf9, 0x82, 0x4d, 0xad, 0xad, 0x14, 0xe0, 0xe2, 0xf8, 0xf6, 0x8c, 0x60, 0x1b, 0xf7, 0x7f, + 0x23, 0xc1, 0xd3, 0xfc, 0x5b, 0x09, 0xc0, 0x10, 0xbb, 0x61, 0x99, 0x5d, 0x8d, 0xb2, 0x74, 0xbe, + 0x05, 0x2a, 0x74, 0x60, 0x13, 0x3e, 0x21, 0x93, 0xed, 0xab, 0x81, 0x2b, 0xf7, 0x07, 0x36, 0xf9, + 0xec, 0x64, 0x79, 0x21, 0x6d, 0xc1, 0x7a, 0x10, 0xb7, 0x81, 0x5b, 0xa1, 0x93, 0x25, 0x6e, 0xfd, + 0xba, 0x3c, 0xf4, 0x67, 0x27, 0xcb, 0x19, 0x67, 0x87, 0x1a, 0x32, 0xc9, 0x0e, 0xc2, 0x3e, 0x80, + 0x3a, 0x76, 0xe9, 0x7d, 0x07, 0x9b, 0xae, 0x3f, 0x92, 0x66, 0x10, 0x11, 0xfe, 0xab, 0xc5, 0x16, + 0x8a, 0x59, 0xb4, 0x97, 0x84, 0x17, 0x70, 0x2b, 0xc5, 0x86, 0x32, 0x46, 0x80, 0x57, 0xc1, 0xb8, + 0x43, 0xb0, 0x6b, 0x99, 0xf5, 0x0a, 0x8f, 0x22, 0x9c, 0x40, 0xc4, 0x5b, 0x91, 0xe8, 0x85, 0xaf, + 0x80, 0x09, 0x83, 0xb8, 0x2e, 0xee, 0x91, 0x7a, 0x95, 0x03, 0x67, 0x05, 0x70, 0x62, 0xdb, 0x6f, + 0x46, 0x41, 0x7f, 0xf3, 0xb7, 0x0a, 0x98, 0x0e, 0x67, 0xee, 0x1c, 0x76, 0xce, 0x1d, 0x79, 0xe7, + 0x34, 0x47, 0x27, 0x4b, 0xce, 0x86, 0xf9, 0xb0, 0x1c, 0x73, 0x9c, 0xa5, 0x23, 0xfc, 0x01, 0xa8, + 0xb9, 0x44, 0x27, 0x1d, 0x6a, 0x39, 0xc2, 0xf1, 0xd7, 0x0a, 0x3a, 0x8e, 0x0f, 0x89, 0xbe, 0x2f, + 0x4c, 0xdb, 0x17, 0x98, 0xe7, 0xc1, 0x2f, 0x14, 0x52, 0xc2, 0xf7, 0x40, 0x8d, 0x12, 0xc3, 0xd6, + 0x31, 0x25, 0x62, 0xd7, 0xbc, 0x18, 0x77, 0x9e, 0xe5, 0x0c, 0x23, 0xdb, 0xb3, 0xba, 0xf7, 0x05, + 0x8c, 0x6f, 0x99, 0x70, 0x32, 0x82, 0x56, 0x14, 0xd2, 0x40, 0x1b, 0xcc, 0x78, 0x76, 0x97, 0x21, + 0x29, 0x3b, 0xce, 0x7b, 0x03, 0x91, 0x43, 0x37, 0x46, 0xcf, 0xca, 0x81, 0x64, 0xd7, 0x5e, 0x10, + 0xa3, 0xcc, 0xc8, 0xed, 0x28, 0xc1, 0x0f, 0xd7, 0xc1, 0xac, 0xa1, 0x99, 0x88, 0xe0, 0xee, 0x60, + 0x9f, 0x74, 0x2c, 0xb3, 0xeb, 0xf2, 0x54, 0xaa, 0xb6, 0x17, 0x05, 0xc1, 0xec, 0xb6, 0xdc, 0x8d, + 0x92, 0x78, 0xb8, 0x05, 0xe6, 0x83, 0x03, 0xf8, 0xae, 0xe6, 0x52, 0xcb, 0x19, 0x6c, 0x69, 0x86, + 0x46, 0xeb, 0xe3, 0x9c, 0xa7, 0x3e, 0x3c, 0x59, 0x9e, 0x47, 0x19, 0xfd, 0x28, 0xd3, 0xaa, 0xf9, + 0xab, 0x71, 0x30, 0x9b, 0x38, 0x17, 0xe0, 0x03, 0xb0, 0xd0, 0xf1, 0x1c, 0x87, 0x98, 0x74, 0xc7, + 0x33, 0x0e, 0x89, 0xb3, 0xdf, 0x39, 0x22, 0x5d, 0x4f, 0x27, 0x5d, 0xbe, 0xac, 0xd5, 0x76, 0x43, + 0xf8, 0xba, 0xb0, 0x91, 0x89, 0x42, 0x39, 0xd6, 0xf0, 0x5d, 0x00, 0x4d, 0xde, 0xb4, 0xad, 0xb9, + 0x6e, 0xc8, 0x59, 0xe2, 0x9c, 0xe1, 0x56, 0xdc, 0x49, 0x21, 0x50, 0x86, 0x15, 0xf3, 0xb1, 0x4b, + 0x5c, 0xcd, 0x21, 0xdd, 0xa4, 0x8f, 0x65, 0xd9, 0xc7, 0xcd, 0x4c, 0x14, 0xca, 0xb1, 0x86, 0x6f, + 0x80, 0x29, 0x7f, 0x34, 0x3e, 0xe7, 0x62, 0x71, 0xe6, 0x04, 0xd9, 0xd4, 0x4e, 0xd4, 0x85, 0xe2, + 0x38, 0x16, 0x9a, 0x75, 0xe8, 0x12, 0xa7, 0x4f, 0xba, 0x77, 0x7c, 0x71, 0xc0, 0x2a, 0x68, 0x95, + 0x57, 0xd0, 0x30, 0xb4, 0xdd, 0x14, 0x02, 0x65, 0x58, 0xb1, 0xd0, 0xfc, 0xac, 0x49, 0x85, 0x36, + 0x2e, 0x87, 0x76, 0x90, 0x89, 0x42, 0x39, 0xd6, 0x2c, 0xf7, 0x7c, 0x97, 0xd7, 0xfb, 0x58, 0xd3, + 0xf1, 0xa1, 0x4e, 0xea, 0x13, 0x72, 0xee, 0xed, 0xc8, 0xdd, 0x28, 0x89, 0x87, 0x77, 0xc0, 0x25, + 0xbf, 0xe9, 0xc0, 0xc4, 0x21, 0x49, 0x8d, 0x93, 0xbc, 0x20, 0x48, 0x2e, 0xed, 0x24, 0x01, 0x28, + 0x6d, 0x03, 0x6f, 0x81, 0x99, 0x8e, 0xa5, 0xeb, 0x3c, 0x1f, 0x37, 0x2c, 0xcf, 0xa4, 0xf5, 0x49, + 0xce, 0x02, 0xd9, 0x1e, 0xda, 0x90, 0x7a, 0x50, 0x02, 0x09, 0x7f, 0x08, 0x40, 0x27, 0x28, 0x0c, + 0x6e, 0x1d, 0x8c, 0x50, 0x00, 0xe9, 0xb2, 0x14, 0x55, 0xe6, 0xb0, 0xc9, 0x45, 0x31, 0xca, 0xe6, + 0x87, 0x0a, 0x58, 0xcc, 0xd9, 0xe8, 0xf0, 0xdb, 0x52, 0x11, 0xbc, 0x96, 0x28, 0x82, 0x97, 0x73, + 0xcc, 0x62, 0x95, 0xf0, 0x08, 0x4c, 0x33, 0x41, 0xa2, 0x99, 0x3d, 0x1f, 0x22, 0xce, 0xb2, 0x56, + 0x6e, 0x00, 0x28, 0x8e, 0x8e, 0x4e, 0xe5, 0x4b, 0xc3, 0x93, 0xe5, 0x69, 0xa9, 0x0f, 0xc9, 0xc4, + 0xcd, 0x9f, 0x97, 0x00, 0xd8, 0x24, 0xb6, 0x6e, 0x0d, 0x0c, 0x62, 0x9e, 0x87, 0xa6, 0xb9, 0x27, + 0x69, 0x9a, 0x97, 0xf3, 0x97, 0x24, 0x74, 0x2a, 0x57, 0xd4, 0xbc, 0x97, 0x10, 0x35, 0xaf, 0x14, + 0x21, 0x7b, 0xb6, 0xaa, 0xf9, 0xb8, 0x0c, 0xe6, 0x22, 0x70, 0x24, 0x6b, 0x6e, 0x4b, 0x2b, 0xfa, + 0x72, 0x62, 0x45, 0x17, 0x33, 0x4c, 0xbe, 0x30, 0x5d, 0xf3, 0x01, 0x98, 0x61, 0xaa, 0xc3, 0x5f, + 0x3f, 0xae, 0x69, 0xc6, 0x4f, 0xad, 0x69, 0xc2, 0x4a, 0xb4, 0x25, 0x31, 0xa1, 0x04, 0x73, 0x8e, + 0x86, 0x9a, 0xf8, 0x2a, 0x6a, 0xa8, 0xdf, 0x29, 0x60, 0x26, 0x5a, 0xa6, 0x73, 0x10, 0x51, 0x77, + 0x65, 0x11, 0xf5, 0x62, 0x81, 0xe4, 0xcc, 0x51, 0x51, 0x1f, 0x57, 0xe2, 0xae, 0x73, 0x19, 0xb5, + 0xc2, 0xae, 0x60, 0xb6, 0xae, 0x75, 0xb0, 0x2b, 0xea, 0xed, 0x05, 0xff, 0xfa, 0xe5, 0xb7, 0xa1, + 0xb0, 0x57, 0x12, 0x5c, 0xa5, 0x2f, 0x56, 0x70, 0x95, 0x9f, 0x8f, 0xe0, 0xfa, 0x1e, 0xa8, 0xb9, + 0x81, 0xd4, 0xaa, 0x70, 0xca, 0x6b, 0x85, 0x36, 0xb6, 0x50, 0x59, 0x21, 0x75, 0xa8, 0xaf, 0x42, + 0xba, 0x2c, 0x65, 0x55, 0xfd, 0x32, 0x95, 0x15, 0x4b, 0x74, 0x1b, 0x7b, 0x2e, 0xe9, 0xf2, 0x4d, + 0x55, 0x8b, 0x12, 0x7d, 0x8f, 0xb7, 0x22, 0xd1, 0x0b, 0x0f, 0xc0, 0xa2, 0xed, 0x58, 0x3d, 0x87, + 0xb8, 0xee, 0x26, 0xc1, 0x5d, 0x5d, 0x33, 0x49, 0x10, 0x80, 0x5f, 0x13, 0x2f, 0x0f, 0x4f, 0x96, + 0x17, 0xf7, 0xb2, 0x21, 0x28, 0xcf, 0xb6, 0xf9, 0xc7, 0x0a, 0xb8, 0x98, 0x3c, 0x1b, 0x73, 0x64, + 0x8a, 0x72, 0x26, 0x99, 0x72, 0x3d, 0x96, 0xa7, 0xbe, 0x86, 0x8b, 0x3d, 0x15, 0xa4, 0x72, 0x75, + 0x1d, 0xcc, 0x0a, 0x59, 0x12, 0x74, 0x0a, 0xa1, 0x16, 0x2e, 0xcf, 0x81, 0xdc, 0x8d, 0x92, 0x78, + 0x78, 0x1b, 0x4c, 0x3b, 0x5c, 0x79, 0x05, 0x04, 0xbe, 0x7a, 0xf9, 0x3f, 0x41, 0x30, 0x8d, 0xe2, + 0x9d, 0x48, 0xc6, 0x32, 0xe5, 0x12, 0x09, 0x92, 0x80, 0xa0, 0x22, 0x2b, 0x97, 0xf5, 0x24, 0x00, + 0xa5, 0x6d, 0xe0, 0x36, 0x98, 0xf3, 0xcc, 0x34, 0x95, 0x9f, 0x6b, 0x97, 0x05, 0xd5, 0xdc, 0x41, + 0x1a, 0x82, 0xb2, 0xec, 0xe0, 0x43, 0x49, 0xcc, 0x8c, 0xf3, 0xf3, 0xe4, 0x7a, 0x81, 0x3d, 0x51, + 0x58, 0xcd, 0x64, 0x48, 0xad, 0x5a, 0x51, 0xa9, 0xd5, 0xfc, 0x83, 0x02, 0x60, 0x7a, 0x1f, 0x8e, + 0x7c, 0x09, 0x48, 0x59, 0xc4, 0x2a, 0xa6, 0x96, 0xad, 0x7f, 0x6e, 0x14, 0xd4, 0x3f, 0xd1, 0x81, + 0x5a, 0x4c, 0x00, 0x89, 0x89, 0x3e, 0x9f, 0x47, 0x9d, 0xa2, 0x02, 0x28, 0x72, 0xea, 0x39, 0x08, + 0xa0, 0x18, 0xd9, 0xb3, 0x05, 0xd0, 0xdf, 0x4b, 0x60, 0x2e, 0x02, 0x17, 0x16, 0x40, 0x19, 0x26, + 0x5f, 0x3f, 0xec, 0x14, 0x13, 0x25, 0xd1, 0xd4, 0xfd, 0x37, 0x89, 0x92, 0xc8, 0xab, 0x1c, 0x51, + 0xf2, 0x9b, 0x52, 0xdc, 0xf5, 0x53, 0x8a, 0x92, 0xe7, 0xf0, 0xc2, 0xf1, 0x95, 0xd3, 0x35, 0xcd, + 0x3f, 0x95, 0xc1, 0xc5, 0xe4, 0x3e, 0x94, 0x0a, 0xa4, 0x32, 0xb2, 0x40, 0xee, 0x81, 0xf9, 0x47, + 0x9e, 0xae, 0x0f, 0x78, 0x0c, 0xb1, 0x2a, 0xe9, 0x97, 0xd6, 0xff, 0x17, 0x96, 0xf3, 0xdf, 0xcd, + 0xc0, 0xa0, 0x4c, 0xcb, 0x74, 0xbd, 0xac, 0x7c, 0xde, 0x7a, 0x59, 0x3d, 0x43, 0xbd, 0xcc, 0x96, + 0x1c, 0xe5, 0x33, 0x49, 0x8e, 0xd3, 0x15, 0xcb, 0x8c, 0x83, 0x6b, 0xe4, 0xd5, 0xff, 0xa7, 0x0a, + 0x58, 0xc8, 0xbe, 0x70, 0x43, 0x1d, 0xcc, 0x18, 0xf8, 0x71, 0xfc, 0xe1, 0x63, 0x54, 0x11, 0xf1, + 0xa8, 0xa6, 0xab, 0xfe, 0x27, 0x23, 0xf5, 0x9e, 0x49, 0x77, 0x9d, 0x7d, 0xea, 0x68, 0x66, 0xcf, + 0xaf, 0xbc, 0xdb, 0x12, 0x17, 0x4a, 0x70, 0x37, 0x3f, 0x55, 0xc0, 0x62, 0x4e, 0xe5, 0x3b, 0x5f, + 0x4f, 0xe0, 0xfb, 0xa0, 0x66, 0xe0, 0xc7, 0xfb, 0x9e, 0xd3, 0xcb, 0xaa, 0xd5, 0xc5, 0xc6, 0xe1, + 0x5b, 0x71, 0x5b, 0xb0, 0xa0, 0x90, 0xaf, 0xb9, 0x0b, 0xae, 0x48, 0x41, 0xb2, 0x9d, 0x43, 0x1e, + 0x79, 0x3a, 0xdf, 0x44, 0x42, 0x6c, 0x5c, 0x03, 0x93, 0x36, 0x76, 0xa8, 0x16, 0x4a, 0xd5, 0x6a, + 0x7b, 0x7a, 0x78, 0xb2, 0x3c, 0xb9, 0x17, 0x34, 0xa2, 0xa8, 0xbf, 0xf9, 0x6f, 0x05, 0x54, 0xf7, + 0x3b, 0x58, 0x27, 0xe7, 0x50, 0xed, 0x37, 0xa5, 0x6a, 0x9f, 0xff, 0x92, 0xce, 0xfd, 0xc9, 0x2d, + 0xf4, 0x5b, 0x89, 0x42, 0xff, 0xd2, 0x08, 0x9e, 0x67, 0xd7, 0xf8, 0xb7, 0xc1, 0x64, 0x38, 0xdc, + 0xe9, 0x0e, 0xa0, 0xe6, 0xaf, 0x4b, 0x60, 0x2a, 0x36, 0xc4, 0x29, 0x8f, 0xaf, 0x87, 0xd2, 0x99, + 0xcd, 0x36, 0xe6, 0x5a, 0x91, 0x40, 0xd4, 0xe0, 0x7c, 0x7e, 0xc7, 0xa4, 0x4e, 0xfc, 0x82, 0x97, + 0x3e, 0xb6, 0xbf, 0x05, 0x66, 0x28, 0x76, 0x7a, 0x84, 0x06, 0x7d, 0x7c, 0xc2, 0x26, 0xa3, 0x07, + 0x8f, 0xfb, 0x52, 0x2f, 0x4a, 0xa0, 0x97, 0x6e, 0x83, 0x69, 0x69, 0x30, 0x78, 0x11, 0x94, 0x8f, + 0xc9, 0xc0, 0x97, 0x3d, 0x88, 0xfd, 0x09, 0xe7, 0x41, 0xb5, 0x8f, 0x75, 0xcf, 0xcf, 0xf3, 0x49, + 0xe4, 0xff, 0xb8, 0x55, 0x7a, 0x4b, 0x69, 0xfe, 0x82, 0x4d, 0x4e, 0x94, 0x9c, 0xe7, 0x90, 0x5d, + 0xef, 0x4a, 0xd9, 0x95, 0xff, 0x51, 0x2f, 0xbe, 0x65, 0xf2, 0x72, 0x0c, 0x25, 0x72, 0xec, 0xd5, + 0x42, 0x6c, 0xcf, 0xce, 0xb4, 0x7f, 0x94, 0xc0, 0x7c, 0x0c, 0x1d, 0xc9, 0xc9, 0x6f, 0x4a, 0x72, + 0x72, 0x25, 0x21, 0x27, 0xeb, 0x59, 0x36, 0x5f, 0xeb, 0xc9, 0xd1, 0x7a, 0xf2, 0xf7, 0x0a, 0x98, + 0x8d, 0xcd, 0xdd, 0x39, 0x08, 0xca, 0x7b, 0xb2, 0xa0, 0x7c, 0xa9, 0x48, 0xd2, 0xe4, 0x28, 0xca, + 0x3f, 0x57, 0x25, 0xe7, 0xff, 0xe7, 0xdf, 0xb9, 0x7e, 0x0c, 0xe6, 0xfb, 0x96, 0xee, 0x19, 0x64, + 0x43, 0xc7, 0x9a, 0x11, 0x00, 0x98, 0x02, 0x2b, 0x27, 0xef, 0x72, 0x21, 0x3d, 0x71, 0x5c, 0xcd, + 0xa5, 0xc4, 0xa4, 0x0f, 0x22, 0xcb, 0x48, 0xf7, 0x3d, 0xc8, 0xa0, 0x43, 0x99, 0x83, 0xc0, 0x37, + 0xc0, 0x14, 0x53, 0x4e, 0x5a, 0x87, 0xec, 0x60, 0x23, 0x48, 0xac, 0xf0, 0x13, 0xd6, 0x7e, 0xd4, + 0x85, 0xe2, 0x38, 0x78, 0x04, 0xe6, 0x6c, 0xab, 0xbb, 0x8d, 0x4d, 0xdc, 0x23, 0x4c, 0x66, 0xec, + 0x59, 0xba, 0xd6, 0x19, 0xf0, 0xc7, 0xaf, 0xc9, 0xf6, 0x9b, 0xc1, 0xc3, 0xc6, 0x5e, 0x1a, 0xc2, + 0x2e, 0x89, 0x19, 0xcd, 0x7c, 0x53, 0x67, 0x51, 0x42, 0x27, 0xf5, 0xd9, 0xd5, 0x7f, 0x76, 0x5e, + 0x2b, 0x92, 0x61, 0x67, 0xfc, 0xf0, 0x9a, 0xf7, 0xb6, 0x57, 0x3b, 0xd3, 0x57, 0xd3, 0x7f, 0x56, + 0xc0, 0xa5, 0xd4, 0x51, 0xf9, 0x25, 0xbe, 0xae, 0xa5, 0xa4, 0x7e, 0xf9, 0x14, 0x52, 0x7f, 0x1d, + 0xcc, 0x8a, 0x0f, 0xb6, 0x89, 0x9b, 0x42, 0x78, 0x63, 0xdb, 0x90, 0xbb, 0x51, 0x12, 0x9f, 0xf5, + 0xba, 0x57, 0x3d, 0xe5, 0xeb, 0x5e, 0xdc, 0x0b, 0xf1, 0x0f, 0x48, 0x7e, 0xea, 0xa5, 0xbd, 0x10, + 0xff, 0x87, 0x94, 0xc4, 0x33, 0x85, 0xe0, 0xb3, 0x86, 0x0c, 0x13, 0xb2, 0x42, 0x38, 0x90, 0x7a, + 0x51, 0x02, 0xfd, 0xb9, 0x3e, 0x4a, 0xe2, 0x8c, 0x8f, 0x92, 0xab, 0x45, 0xf2, 0xb9, 0xf8, 0xdd, + 0xe4, 0x2f, 0x0a, 0x78, 0x21, 0x77, 0x23, 0xc0, 0x75, 0xa9, 0xec, 0xae, 0x26, 0xca, 0xee, 0x37, + 0x72, 0x0d, 0x63, 0xb5, 0xd7, 0xc9, 0x7e, 0x9a, 0x7b, 0xbb, 0xd8, 0xd3, 0x5c, 0x86, 0x76, 0x1f, + 0xfd, 0x46, 0xd7, 0x5e, 0x7d, 0xf2, 0xb4, 0x31, 0xf6, 0xd1, 0xd3, 0xc6, 0xd8, 0x27, 0x4f, 0x1b, + 0x63, 0x3f, 0x19, 0x36, 0x94, 0x27, 0xc3, 0x86, 0xf2, 0xd1, 0xb0, 0xa1, 0x7c, 0x32, 0x6c, 0x28, + 0x7f, 0x1d, 0x36, 0x94, 0x5f, 0x7e, 0xda, 0x18, 0x7b, 0x7f, 0x42, 0x8c, 0xf8, 0x9f, 0x00, 0x00, + 0x00, 0xff, 0xff, 0x70, 0x2d, 0x26, 0x9d, 0xec, 0x28, 0x00, 0x00, +} + func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -265,36 +1107,45 @@ func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { } func (m *ControllerRevision) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ControllerRevision) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) + i-- + dAtA[i] = 0x18 + { + size, err := m.Data.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Data.Size())) - n2, err := m.Data.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ControllerRevisionList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -302,37 +1153,46 @@ func (m *ControllerRevisionList) Marshal() (dAtA []byte, err error) { } func (m *ControllerRevisionList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ControllerRevisionList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n3, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DaemonSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -340,41 +1200,52 @@ func (m *DaemonSet) Marshal() (dAtA []byte, err error) { } func (m *DaemonSet) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n5, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n6, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n6 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DaemonSetCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -382,41 +1253,52 @@ func (m *DaemonSetCondition) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n7, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DaemonSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -424,37 +1306,46 @@ func (m *DaemonSetList) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n8, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DaemonSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -462,51 +1353,62 @@ func (m *DaemonSetSpec) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Selector != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n9, err := m.Selector.MarshalTo(dAtA[i:]) + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x30 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x20 + { + size, err := m.UpdateStrategy.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n9 - } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n10, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n10 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) - n11, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n11 - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - if m.RevisionHistoryLimit != nil { - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x12 + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *DaemonSetStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -514,58 +1416,65 @@ func (m *DaemonSetStatus) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentNumberScheduled)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberMisscheduled)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredNumberScheduled)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberReady)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedNumberScheduled)) - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberAvailable)) - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberUnavailable)) - if m.CollisionCount != nil { - dAtA[i] = 0x48 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - } if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x52 } } - return i, nil + if m.CollisionCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + i-- + dAtA[i] = 0x48 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberUnavailable)) + i-- + dAtA[i] = 0x40 + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberAvailable)) + i-- + dAtA[i] = 0x38 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedNumberScheduled)) + i-- + dAtA[i] = 0x30 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberReady)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredNumberScheduled)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberMisscheduled)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentNumberScheduled)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *DaemonSetUpdateStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -573,31 +1482,39 @@ func (m *DaemonSetUpdateStrategy) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetUpdateStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) if m.RollingUpdate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n12, err := m.RollingUpdate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n12 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Deployment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -605,41 +1522,52 @@ func (m *Deployment) Marshal() (dAtA []byte, err error) { } func (m *Deployment) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Deployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n13, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n13 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n14, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n14 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n15, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n15 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -647,49 +1575,62 @@ func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { } func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastUpdateTime.Size())) - n16, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n16 + i-- dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n17, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n17 - return i, nil + i-- + dAtA[i] = 0x32 + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -697,37 +1638,46 @@ func (m *DeploymentList) Marshal() (dAtA []byte, err error) { } func (m *DeploymentList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n18, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n18 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -735,69 +1685,80 @@ func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) { } func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + if m.ProgressDeadlineSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds)) + i-- + dAtA[i] = 0x48 + } + i-- + if m.Paused { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n19, err := m.Selector.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x38 + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x30 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x28 + { + size, err := m.Strategy.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n19 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n20, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n20 + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Strategy.Size())) - n21, err := m.Strategy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n21 - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - if m.RevisionHistoryLimit != nil { - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x38 - i++ - if m.Paused { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + i-- + dAtA[i] = 0x1a + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - i++ - if m.ProgressDeadlineSeconds != nil { - dAtA[i] = 0x48 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds)) + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -805,52 +1766,59 @@ func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) { } func (m *DeploymentStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) + if m.CollisionCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + i-- + dAtA[i] = 0x40 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x38 if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x32 } } - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - if m.CollisionCount != nil { - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - } - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -858,31 +1826,39 @@ func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { } func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) if m.RollingUpdate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n22, err := m.RollingUpdate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n22 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -890,41 +1866,52 @@ func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSet) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n23, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n23 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n24, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n24 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n25, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n25 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -932,41 +1919,52 @@ func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n26, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n26 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -974,37 +1972,46 @@ func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n27, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n27 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1012,43 +2019,52 @@ func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) - } - if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n28, err := m.Selector.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x20 + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n28 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n29, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - i += n29 - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - return i, nil + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1056,44 +2072,51 @@ func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x32 } } - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *RollingUpdateDaemonSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1101,27 +2124,34 @@ func (m *RollingUpdateDaemonSet) Marshal() (dAtA []byte, err error) { } func (m *RollingUpdateDaemonSet) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateDaemonSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if m.MaxUnavailable != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n30, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n30 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1129,37 +2159,46 @@ func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { } func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.MaxUnavailable != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n31, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n31 - } if m.MaxSurge != nil { + { + size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxSurge.Size())) - n32, err := m.MaxSurge.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if m.MaxUnavailable != nil { + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n32 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *RollingUpdateStatefulSetStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1167,22 +2206,27 @@ func (m *RollingUpdateStatefulSetStrategy) Marshal() (dAtA []byte, err error) { } func (m *RollingUpdateStatefulSetStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateStatefulSetStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if m.Partition != nil { - dAtA[i] = 0x8 - i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.Partition)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *Scale) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1190,41 +2234,52 @@ func (m *Scale) Marshal() (dAtA []byte, err error) { } func (m *Scale) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Scale) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n33, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n33 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n34, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n34 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n35, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n35 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1232,20 +2287,25 @@ func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { } func (m *ScaleSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScaleSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1253,46 +2313,54 @@ func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { } func (m *ScaleStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScaleStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i -= len(m.TargetSelector) + copy(dAtA[i:], m.TargetSelector) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetSelector))) + i-- + dAtA[i] = 0x1a if len(m.Selector) > 0 { keysForSelector := make([]string, 0, len(m.Selector)) for k := range m.Selector { keysForSelector = append(keysForSelector, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - for _, k := range keysForSelector { + for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.Selector[string(keysForSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- dAtA[i] = 0x12 - i++ - v := m.Selector[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + i -= len(keysForSelector[iNdEx]) + copy(dAtA[i:], keysForSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSelector[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) } } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetSelector))) - i += copy(dAtA[i:], m.TargetSelector) - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *StatefulSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1300,41 +2368,52 @@ func (m *StatefulSet) Marshal() (dAtA []byte, err error) { } func (m *StatefulSet) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n36, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n36 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n37, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n37 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n38, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n38 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *StatefulSetCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1342,41 +2421,52 @@ func (m *StatefulSetCondition) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n39, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n39 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *StatefulSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1384,37 +2474,46 @@ func (m *StatefulSetList) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n40, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n40 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *StatefulSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1422,73 +2521,88 @@ func (m *StatefulSetSpec) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x40 } - if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n41, err := m.Selector.MarshalTo(dAtA[i:]) + { + size, err := m.UpdateStrategy.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n41 - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n42, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n42 + i-- + dAtA[i] = 0x3a + i -= len(m.PodManagementPolicy) + copy(dAtA[i:], m.PodManagementPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodManagementPolicy))) + i-- + dAtA[i] = 0x32 + i -= len(m.ServiceName) + copy(dAtA[i:], m.ServiceName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) + i-- + dAtA[i] = 0x2a if len(m.VolumeClaimTemplates) > 0 { - for _, msg := range m.VolumeClaimTemplates { + for iNdEx := len(m.VolumeClaimTemplates) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumeClaimTemplates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + } + } + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) - i += copy(dAtA[i:], m.ServiceName) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodManagementPolicy))) - i += copy(dAtA[i:], m.PodManagementPolicy) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) - n43, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n43 - if m.RevisionHistoryLimit != nil { - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *StatefulSetStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1496,57 +2610,66 @@ func (m *StatefulSetStatus) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CurrentRevision))) - i += copy(dAtA[i:], m.CurrentRevision) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UpdateRevision))) - i += copy(dAtA[i:], m.UpdateRevision) - if m.CollisionCount != nil { - dAtA[i] = 0x48 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - } if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x52 } } - return i, nil + if m.CollisionCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + i-- + dAtA[i] = 0x48 + } + i -= len(m.UpdateRevision) + copy(dAtA[i:], m.UpdateRevision) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UpdateRevision))) + i-- + dAtA[i] = 0x3a + i -= len(m.CurrentRevision) + copy(dAtA[i:], m.CurrentRevision) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CurrentRevision))) + i-- + dAtA[i] = 0x32 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *StatefulSetUpdateStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1554,37 +2677,50 @@ func (m *StatefulSetUpdateStrategy) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetUpdateStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) if m.RollingUpdate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n44, err := m.RollingUpdate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n44 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *ControllerRevision) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1596,6 +2732,9 @@ func (m *ControllerRevision) Size() (n int) { } func (m *ControllerRevisionList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1610,6 +2749,9 @@ func (m *ControllerRevisionList) Size() (n int) { } func (m *DaemonSet) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1622,6 +2764,9 @@ func (m *DaemonSet) Size() (n int) { } func (m *DaemonSetCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1638,6 +2783,9 @@ func (m *DaemonSetCondition) Size() (n int) { } func (m *DaemonSetList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1652,6 +2800,9 @@ func (m *DaemonSetList) Size() (n int) { } func (m *DaemonSetSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Selector != nil { @@ -1670,6 +2821,9 @@ func (m *DaemonSetSpec) Size() (n int) { } func (m *DaemonSetStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.CurrentNumberScheduled)) @@ -1693,6 +2847,9 @@ func (m *DaemonSetStatus) Size() (n int) { } func (m *DaemonSetUpdateStrategy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1705,6 +2862,9 @@ func (m *DaemonSetUpdateStrategy) Size() (n int) { } func (m *Deployment) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1717,6 +2877,9 @@ func (m *Deployment) Size() (n int) { } func (m *DeploymentCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1735,6 +2898,9 @@ func (m *DeploymentCondition) Size() (n int) { } func (m *DeploymentList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1749,6 +2915,9 @@ func (m *DeploymentList) Size() (n int) { } func (m *DeploymentSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Replicas != nil { @@ -1774,6 +2943,9 @@ func (m *DeploymentSpec) Size() (n int) { } func (m *DeploymentStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.ObservedGeneration)) @@ -1795,6 +2967,9 @@ func (m *DeploymentStatus) Size() (n int) { } func (m *DeploymentStrategy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1807,6 +2982,9 @@ func (m *DeploymentStrategy) Size() (n int) { } func (m *ReplicaSet) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1819,6 +2997,9 @@ func (m *ReplicaSet) Size() (n int) { } func (m *ReplicaSetCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1835,6 +3016,9 @@ func (m *ReplicaSetCondition) Size() (n int) { } func (m *ReplicaSetList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1849,6 +3033,9 @@ func (m *ReplicaSetList) Size() (n int) { } func (m *ReplicaSetSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Replicas != nil { @@ -1865,6 +3052,9 @@ func (m *ReplicaSetSpec) Size() (n int) { } func (m *ReplicaSetStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Replicas)) @@ -1882,6 +3072,9 @@ func (m *ReplicaSetStatus) Size() (n int) { } func (m *RollingUpdateDaemonSet) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.MaxUnavailable != nil { @@ -1892,6 +3085,9 @@ func (m *RollingUpdateDaemonSet) Size() (n int) { } func (m *RollingUpdateDeployment) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.MaxUnavailable != nil { @@ -1906,6 +3102,9 @@ func (m *RollingUpdateDeployment) Size() (n int) { } func (m *RollingUpdateStatefulSetStrategy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Partition != nil { @@ -1915,6 +3114,9 @@ func (m *RollingUpdateStatefulSetStrategy) Size() (n int) { } func (m *Scale) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1927,6 +3129,9 @@ func (m *Scale) Size() (n int) { } func (m *ScaleSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Replicas)) @@ -1934,6 +3139,9 @@ func (m *ScaleSpec) Size() (n int) { } func (m *ScaleStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Replicas)) @@ -1951,6 +3159,9 @@ func (m *ScaleStatus) Size() (n int) { } func (m *StatefulSet) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1963,6 +3174,9 @@ func (m *StatefulSet) Size() (n int) { } func (m *StatefulSetCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1979,6 +3193,9 @@ func (m *StatefulSetCondition) Size() (n int) { } func (m *StatefulSetList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1993,6 +3210,9 @@ func (m *StatefulSetList) Size() (n int) { } func (m *StatefulSetSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Replicas != nil { @@ -2023,6 +3243,9 @@ func (m *StatefulSetSpec) Size() (n int) { } func (m *StatefulSetStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.ObservedGeneration)) @@ -2047,6 +3270,9 @@ func (m *StatefulSetStatus) Size() (n int) { } func (m *StatefulSetUpdateStrategy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -2059,14 +3285,7 @@ func (m *StatefulSetUpdateStrategy) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -2076,8 +3295,8 @@ func (this *ControllerRevision) String() string { return "nil" } s := strings.Join([]string{`&ControllerRevision{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Data:` + strings.Replace(strings.Replace(this.Data.String(), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Data:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Data), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, `}`, }, "") @@ -2087,9 +3306,14 @@ func (this *ControllerRevisionList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]ControllerRevision{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ControllerRevision", "ControllerRevision", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&ControllerRevisionList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ControllerRevision", "ControllerRevision", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -2099,7 +3323,7 @@ func (this *DaemonSet) String() string { return "nil" } s := strings.Join([]string{`&DaemonSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DaemonSetSpec", "DaemonSetSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DaemonSetStatus", "DaemonSetStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -2113,7 +3337,7 @@ func (this *DaemonSetCondition) String() string { s := strings.Join([]string{`&DaemonSetCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -2124,9 +3348,14 @@ func (this *DaemonSetList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]DaemonSet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&DaemonSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -2136,8 +3365,8 @@ func (this *DaemonSetSpec) String() string { return "nil" } s := strings.Join([]string{`&DaemonSetSpec{`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "DaemonSetUpdateStrategy", "DaemonSetUpdateStrategy", 1), `&`, ``, 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, @@ -2149,6 +3378,11 @@ func (this *DaemonSetStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]DaemonSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&DaemonSetStatus{`, `CurrentNumberScheduled:` + fmt.Sprintf("%v", this.CurrentNumberScheduled) + `,`, `NumberMisscheduled:` + fmt.Sprintf("%v", this.NumberMisscheduled) + `,`, @@ -2159,7 +3393,7 @@ func (this *DaemonSetStatus) String() string { `NumberAvailable:` + fmt.Sprintf("%v", this.NumberAvailable) + `,`, `NumberUnavailable:` + fmt.Sprintf("%v", this.NumberUnavailable) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `}`, }, "") return s @@ -2170,7 +3404,7 @@ func (this *DaemonSetUpdateStrategy) String() string { } s := strings.Join([]string{`&DaemonSetUpdateStrategy{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`, `}`, }, "") return s @@ -2180,7 +3414,7 @@ func (this *Deployment) String() string { return "nil" } s := strings.Join([]string{`&Deployment{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentSpec", "DeploymentSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentStatus", "DeploymentStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -2196,8 +3430,8 @@ func (this *DeploymentCondition) String() string { `Status:` + fmt.Sprintf("%v", this.Status) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `LastUpdateTime:` + strings.Replace(strings.Replace(this.LastUpdateTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -2206,9 +3440,14 @@ func (this *DeploymentList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]Deployment{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Deployment", "Deployment", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&DeploymentList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Deployment", "Deployment", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -2219,8 +3458,8 @@ func (this *DeploymentSpec) String() string { } s := strings.Join([]string{`&DeploymentSpec{`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, @@ -2234,13 +3473,18 @@ func (this *DeploymentStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]DeploymentCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&DeploymentStatus{`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, `}`, @@ -2253,7 +3497,7 @@ func (this *DeploymentStrategy) String() string { } s := strings.Join([]string{`&DeploymentStrategy{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, `}`, }, "") return s @@ -2263,7 +3507,7 @@ func (this *ReplicaSet) String() string { return "nil" } s := strings.Join([]string{`&ReplicaSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicaSetSpec", "ReplicaSetSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicaSetStatus", "ReplicaSetStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -2277,7 +3521,7 @@ func (this *ReplicaSetCondition) String() string { s := strings.Join([]string{`&ReplicaSetCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -2288,9 +3532,14 @@ func (this *ReplicaSetList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]ReplicaSet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&ReplicaSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -2301,8 +3550,8 @@ func (this *ReplicaSetSpec) String() string { } s := strings.Join([]string{`&ReplicaSetSpec{`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `}`, }, "") @@ -2312,13 +3561,18 @@ func (this *ReplicaSetStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]ReplicaSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&ReplicaSetStatus{`, `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `}`, }, "") return s @@ -2328,7 +3582,7 @@ func (this *RollingUpdateDaemonSet) String() string { return "nil" } s := strings.Join([]string{`&RollingUpdateDaemonSet{`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, `}`, }, "") return s @@ -2338,8 +3592,8 @@ func (this *RollingUpdateDeployment) String() string { return "nil" } s := strings.Join([]string{`&RollingUpdateDeployment{`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, - `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`, `}`, }, "") return s @@ -2359,7 +3613,7 @@ func (this *Scale) String() string { return "nil" } s := strings.Join([]string{`&Scale{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ScaleSpec", "ScaleSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ScaleStatus", "ScaleStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -2403,7 +3657,7 @@ func (this *StatefulSet) String() string { return "nil" } s := strings.Join([]string{`&StatefulSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "StatefulSetSpec", "StatefulSetSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "StatefulSetStatus", "StatefulSetStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -2417,7 +3671,7 @@ func (this *StatefulSetCondition) String() string { s := strings.Join([]string{`&StatefulSetCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -2428,9 +3682,14 @@ func (this *StatefulSetList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]StatefulSet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "StatefulSet", "StatefulSet", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&StatefulSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StatefulSet", "StatefulSet", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -2439,11 +3698,16 @@ func (this *StatefulSetSpec) String() string { if this == nil { return "nil" } + repeatedStringForVolumeClaimTemplates := "[]PersistentVolumeClaim{" + for _, f := range this.VolumeClaimTemplates { + repeatedStringForVolumeClaimTemplates += fmt.Sprintf("%v", f) + "," + } + repeatedStringForVolumeClaimTemplates += "}" s := strings.Join([]string{`&StatefulSetSpec{`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `VolumeClaimTemplates:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeClaimTemplates), "PersistentVolumeClaim", "k8s_io_api_core_v1.PersistentVolumeClaim", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `VolumeClaimTemplates:` + repeatedStringForVolumeClaimTemplates + `,`, `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, `PodManagementPolicy:` + fmt.Sprintf("%v", this.PodManagementPolicy) + `,`, `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "StatefulSetUpdateStrategy", "StatefulSetUpdateStrategy", 1), `&`, ``, 1) + `,`, @@ -2456,6 +3720,11 @@ func (this *StatefulSetStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]StatefulSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "StatefulSetCondition", "StatefulSetCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&StatefulSetStatus{`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, @@ -2465,7 +3734,7 @@ func (this *StatefulSetStatus) String() string { `CurrentRevision:` + fmt.Sprintf("%v", this.CurrentRevision) + `,`, `UpdateRevision:` + fmt.Sprintf("%v", this.UpdateRevision) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "StatefulSetCondition", "StatefulSetCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `}`, }, "") return s @@ -2476,7 +3745,7 @@ func (this *StatefulSetUpdateStrategy) String() string { } s := strings.Join([]string{`&StatefulSetUpdateStrategy{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateStatefulSetStrategy", "RollingUpdateStatefulSetStrategy", 1) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateStatefulSetStrategy", "RollingUpdateStatefulSetStrategy", 1) + `,`, `}`, }, "") return s @@ -2504,7 +3773,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2532,7 +3801,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2541,6 +3810,9 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2562,7 +3834,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2571,6 +3843,9 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2592,7 +3867,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Revision |= (int64(b) & 0x7F) << shift + m.Revision |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -2606,6 +3881,9 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2633,7 +3911,7 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2661,7 +3939,7 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2670,6 +3948,9 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2691,7 +3972,7 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2700,6 +3981,9 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2717,6 +4001,9 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2744,7 +4031,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2772,7 +4059,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2781,6 +4068,9 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2802,7 +4092,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2811,6 +4101,9 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2832,7 +4125,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2841,6 +4134,9 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2857,6 +4153,9 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2884,7 +4183,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2912,7 +4211,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2922,6 +4221,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2941,7 +4243,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2951,6 +4253,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2970,7 +4275,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2979,6 +4284,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3000,7 +4308,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3010,6 +4318,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3029,7 +4340,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3039,6 +4350,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3053,6 +4367,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3080,7 +4397,7 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3108,7 +4425,7 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3117,6 +4434,9 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3138,7 +4458,7 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3147,6 +4467,9 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3164,6 +4487,9 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3191,7 +4517,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3219,7 +4545,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3228,11 +4554,14 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3252,7 +4581,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3261,6 +4590,9 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3282,7 +4614,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3291,6 +4623,9 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3312,7 +4647,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift + m.MinReadySeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3331,7 +4666,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3346,6 +4681,9 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3373,7 +4711,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3401,7 +4739,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CurrentNumberScheduled |= (int32(b) & 0x7F) << shift + m.CurrentNumberScheduled |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3420,7 +4758,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberMisscheduled |= (int32(b) & 0x7F) << shift + m.NumberMisscheduled |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3439,7 +4777,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.DesiredNumberScheduled |= (int32(b) & 0x7F) << shift + m.DesiredNumberScheduled |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3458,7 +4796,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberReady |= (int32(b) & 0x7F) << shift + m.NumberReady |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3477,7 +4815,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -3496,7 +4834,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedNumberScheduled |= (int32(b) & 0x7F) << shift + m.UpdatedNumberScheduled |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3515,7 +4853,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberAvailable |= (int32(b) & 0x7F) << shift + m.NumberAvailable |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3534,7 +4872,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberUnavailable |= (int32(b) & 0x7F) << shift + m.NumberUnavailable |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3553,7 +4891,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3573,7 +4911,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3582,6 +4920,9 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3599,6 +4940,9 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3626,7 +4970,7 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3654,7 +4998,7 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3664,6 +5008,9 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3683,7 +5030,7 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3692,6 +5039,9 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3711,6 +5061,9 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3738,7 +5091,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3766,7 +5119,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3775,6 +5128,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3796,7 +5152,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3805,6 +5161,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3826,7 +5185,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3835,6 +5194,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3851,6 +5213,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3878,7 +5243,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3906,7 +5271,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3916,6 +5281,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3935,7 +5303,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3945,6 +5313,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3964,7 +5335,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3974,6 +5345,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3993,7 +5367,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4003,6 +5377,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4022,7 +5399,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4031,6 +5408,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4052,7 +5432,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4061,6 +5441,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4077,6 +5460,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4104,7 +5490,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4132,7 +5518,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4141,6 +5527,9 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4162,7 +5551,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4171,6 +5560,9 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4188,6 +5580,9 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4215,7 +5610,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4243,7 +5638,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4263,7 +5658,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4272,11 +5667,14 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -4296,7 +5694,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4305,6 +5703,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4326,7 +5727,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4335,6 +5736,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4356,7 +5760,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift + m.MinReadySeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4375,7 +5779,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4395,7 +5799,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4415,7 +5819,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4430,6 +5834,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4457,7 +5864,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4485,7 +5892,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -4504,7 +5911,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4523,7 +5930,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedReplicas |= (int32(b) & 0x7F) << shift + m.UpdatedReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4542,7 +5949,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AvailableReplicas |= (int32(b) & 0x7F) << shift + m.AvailableReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4561,7 +5968,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UnavailableReplicas |= (int32(b) & 0x7F) << shift + m.UnavailableReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4580,7 +5987,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4589,6 +5996,9 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4611,7 +6021,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift + m.ReadyReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4630,7 +6040,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4645,6 +6055,9 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4672,7 +6085,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4700,7 +6113,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4710,6 +6123,9 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4729,7 +6145,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4738,6 +6154,9 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4757,6 +6176,9 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4784,7 +6206,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4812,7 +6234,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4821,6 +6243,9 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4842,7 +6267,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4851,6 +6276,9 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4872,7 +6300,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4881,6 +6309,9 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4897,6 +6328,9 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4924,7 +6358,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4952,7 +6386,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4962,6 +6396,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4981,7 +6418,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4991,6 +6428,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5010,7 +6450,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5019,6 +6459,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5040,7 +6483,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5050,6 +6493,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5069,7 +6515,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5079,6 +6525,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5093,6 +6542,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5120,7 +6572,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5148,7 +6600,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5157,6 +6609,9 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5178,7 +6633,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5187,6 +6642,9 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5204,6 +6662,9 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5231,7 +6692,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5259,7 +6720,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5279,7 +6740,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5288,11 +6749,14 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -5312,7 +6776,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5321,6 +6785,9 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5342,7 +6809,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift + m.MinReadySeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5356,6 +6823,9 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5383,7 +6853,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5411,7 +6881,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5430,7 +6900,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.FullyLabeledReplicas |= (int32(b) & 0x7F) << shift + m.FullyLabeledReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5449,7 +6919,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -5468,7 +6938,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift + m.ReadyReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5487,7 +6957,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AvailableReplicas |= (int32(b) & 0x7F) << shift + m.AvailableReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5506,7 +6976,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5515,6 +6985,9 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5532,6 +7005,9 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5559,7 +7035,7 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5587,7 +7063,7 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5596,11 +7072,14 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxUnavailable == nil { - m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.MaxUnavailable = &intstr.IntOrString{} } if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -5615,6 +7094,9 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5642,7 +7124,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5670,7 +7152,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5679,11 +7161,14 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxUnavailable == nil { - m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.MaxUnavailable = &intstr.IntOrString{} } if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -5703,7 +7188,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5712,11 +7197,14 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxSurge == nil { - m.MaxSurge = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.MaxSurge = &intstr.IntOrString{} } if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -5731,6 +7219,9 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5758,7 +7249,7 @@ func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5786,7 +7277,7 @@ func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5801,6 +7292,9 @@ func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5828,7 +7322,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5856,7 +7350,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5865,6 +7359,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5886,7 +7383,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5895,6 +7392,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5916,7 +7416,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5925,6 +7425,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5941,6 +7444,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5968,7 +7474,7 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5996,7 +7502,7 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6010,6 +7516,9 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6037,7 +7546,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6065,7 +7574,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6084,7 +7593,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6093,6 +7602,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6113,7 +7625,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6130,7 +7642,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6140,6 +7652,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -6156,7 +7671,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6166,6 +7681,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -6202,7 +7720,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6212,6 +7730,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6226,6 +7747,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6253,7 +7777,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6281,7 +7805,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6290,6 +7814,9 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6311,7 +7838,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6320,6 +7847,9 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6341,7 +7871,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6350,6 +7880,9 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6366,6 +7899,9 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6393,7 +7929,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6421,7 +7957,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6431,6 +7967,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6450,7 +7989,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6460,6 +7999,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6479,7 +8021,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6488,6 +8030,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6509,7 +8054,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6519,6 +8064,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6538,7 +8086,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6548,6 +8096,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6562,6 +8113,9 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6589,7 +8143,7 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6617,7 +8171,7 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6626,6 +8180,9 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6647,7 +8204,7 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6656,6 +8213,9 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6673,6 +8233,9 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6700,7 +8263,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6728,7 +8291,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6748,7 +8311,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6757,11 +8320,14 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -6781,7 +8347,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6790,6 +8356,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6811,7 +8380,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6820,10 +8389,13 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, k8s_io_api_core_v1.PersistentVolumeClaim{}) + m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, v11.PersistentVolumeClaim{}) if err := m.VolumeClaimTemplates[len(m.VolumeClaimTemplates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -6842,7 +8414,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6852,6 +8424,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6871,7 +8446,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6881,6 +8456,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6900,7 +8478,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6909,6 +8487,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6930,7 +8511,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6945,6 +8526,9 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6972,7 +8556,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7000,7 +8584,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -7019,7 +8603,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -7038,7 +8622,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift + m.ReadyReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -7057,7 +8641,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CurrentReplicas |= (int32(b) & 0x7F) << shift + m.CurrentReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -7076,7 +8660,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedReplicas |= (int32(b) & 0x7F) << shift + m.UpdatedReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -7095,7 +8679,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7105,6 +8689,9 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7124,7 +8711,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7134,6 +8721,9 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7153,7 +8743,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -7173,7 +8763,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7182,6 +8772,9 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7199,6 +8792,9 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7226,7 +8822,7 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7254,7 +8850,7 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7264,6 +8860,9 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7283,7 +8882,7 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7292,6 +8891,9 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7311,6 +8913,9 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7377,10 +8982,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -7409,6 +9017,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -7427,147 +9038,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta2/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 2176 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcb, 0x6f, 0x1c, 0xb7, - 0x19, 0xd7, 0xec, 0x43, 0x5a, 0x51, 0x91, 0x64, 0x53, 0xaa, 0xb4, 0x91, 0xdb, 0x95, 0xb1, 0x09, - 0x1c, 0x25, 0xb6, 0x66, 0x6d, 0xe5, 0x81, 0xc4, 0x2e, 0xda, 0x6a, 0xa5, 0xd4, 0x76, 0xa0, 0x57, - 0x28, 0xcb, 0x40, 0x83, 0x16, 0x35, 0xb5, 0x4b, 0xaf, 0x26, 0x9a, 0x17, 0x66, 0x38, 0x5b, 0x2f, - 0x7a, 0xe9, 0xa9, 0x40, 0x81, 0x02, 0x6d, 0xaf, 0xfd, 0x27, 0x7a, 0x2b, 0x8a, 0xf6, 0x56, 0x04, - 0x85, 0x2f, 0x05, 0x82, 0x5e, 0x92, 0x93, 0x50, 0x6f, 0x4e, 0x45, 0xd1, 0x4b, 0x81, 0x5e, 0x02, - 0x14, 0x28, 0xc8, 0xe1, 0x3c, 0x38, 0x0f, 0xef, 0x48, 0xb1, 0x95, 0x22, 0xc8, 0x6d, 0x87, 0xfc, - 0x7d, 0x3f, 0x7e, 0x24, 0xbf, 0x8f, 0xdf, 0x6f, 0x38, 0x0b, 0xbe, 0x77, 0xfc, 0xb6, 0xab, 0x6a, - 0x56, 0xeb, 0xd8, 0x3b, 0x24, 0x8e, 0x49, 0x28, 0x71, 0x5b, 0x7d, 0x62, 0x76, 0x2d, 0xa7, 0x25, - 0x3a, 0xb0, 0xad, 0xb5, 0xb0, 0x6d, 0xbb, 0xad, 0xfe, 0x8d, 0x43, 0x42, 0xf1, 0x5a, 0xab, 0x47, - 0x4c, 0xe2, 0x60, 0x4a, 0xba, 0xaa, 0xed, 0x58, 0xd4, 0x82, 0x8b, 0x3e, 0x50, 0xc5, 0xb6, 0xa6, - 0x32, 0xa0, 0x2a, 0x80, 0x4b, 0xab, 0x3d, 0x8d, 0x1e, 0x79, 0x87, 0x6a, 0xc7, 0x32, 0x5a, 0x3d, - 0xab, 0x67, 0xb5, 0x38, 0xfe, 0xd0, 0x7b, 0xc8, 0x9f, 0xf8, 0x03, 0xff, 0xe5, 0xf3, 0x2c, 0x35, - 0x63, 0x03, 0x76, 0x2c, 0x87, 0xb4, 0xfa, 0x37, 0x92, 0x63, 0x2d, 0xbd, 0x11, 0x61, 0x0c, 0xdc, - 0x39, 0xd2, 0x4c, 0xe2, 0x0c, 0x5a, 0xf6, 0x71, 0x8f, 0x35, 0xb8, 0x2d, 0x83, 0x50, 0x9c, 0x65, - 0xd5, 0xca, 0xb3, 0x72, 0x3c, 0x93, 0x6a, 0x06, 0x49, 0x19, 0xbc, 0x35, 0xca, 0xc0, 0xed, 0x1c, - 0x11, 0x03, 0xa7, 0xec, 0x5e, 0xcf, 0xb3, 0xf3, 0xa8, 0xa6, 0xb7, 0x34, 0x93, 0xba, 0xd4, 0x49, - 0x1a, 0x35, 0xff, 0xa3, 0x00, 0xb8, 0x61, 0x99, 0xd4, 0xb1, 0x74, 0x9d, 0x38, 0x88, 0xf4, 0x35, - 0x57, 0xb3, 0x4c, 0xf8, 0x00, 0xd4, 0xd8, 0x7c, 0xba, 0x98, 0xe2, 0xba, 0x72, 0x59, 0x59, 0x99, - 0x5a, 0xbb, 0xae, 0x46, 0x2b, 0x1d, 0xd2, 0xab, 0xf6, 0x71, 0x8f, 0x35, 0xb8, 0x2a, 0x43, 0xab, - 0xfd, 0x1b, 0xea, 0xee, 0xe1, 0x87, 0xa4, 0x43, 0xb7, 0x09, 0xc5, 0x6d, 0xf8, 0xf8, 0x64, 0x79, - 0x6c, 0x78, 0xb2, 0x0c, 0xa2, 0x36, 0x14, 0xb2, 0xc2, 0x5d, 0x50, 0xe1, 0xec, 0x25, 0xce, 0xbe, - 0x9a, 0xcb, 0x2e, 0x26, 0xad, 0x22, 0xfc, 0x93, 0x77, 0x1f, 0x51, 0x62, 0x32, 0xf7, 0xda, 0x2f, - 0x08, 0xea, 0xca, 0x26, 0xa6, 0x18, 0x71, 0x22, 0x78, 0x0d, 0xd4, 0x1c, 0xe1, 0x7e, 0xbd, 0x7c, - 0x59, 0x59, 0x29, 0xb7, 0x2f, 0x08, 0x54, 0x2d, 0x98, 0x16, 0x0a, 0x11, 0xcd, 0xc7, 0x0a, 0x58, - 0x48, 0xcf, 0x7b, 0x4b, 0x73, 0x29, 0xfc, 0x61, 0x6a, 0xee, 0x6a, 0xb1, 0xb9, 0x33, 0x6b, 0x3e, - 0xf3, 0x70, 0xe0, 0xa0, 0x25, 0x36, 0xef, 0x3d, 0x50, 0xd5, 0x28, 0x31, 0xdc, 0x7a, 0xe9, 0x72, - 0x79, 0x65, 0x6a, 0xed, 0xaa, 0x9a, 0x13, 0xc0, 0x6a, 0xda, 0xbb, 0xf6, 0xb4, 0xe0, 0xad, 0xde, - 0x65, 0x0c, 0xc8, 0x27, 0x6a, 0xfe, 0xa2, 0x04, 0x26, 0x37, 0x31, 0x31, 0x2c, 0x73, 0x9f, 0xd0, - 0x73, 0xd8, 0xb9, 0x3b, 0xa0, 0xe2, 0xda, 0xa4, 0x23, 0x76, 0xee, 0x4a, 0xee, 0x04, 0x42, 0x9f, - 0xf6, 0x6d, 0xd2, 0x89, 0xb6, 0x8c, 0x3d, 0x21, 0xce, 0x00, 0xf7, 0xc0, 0xb8, 0x4b, 0x31, 0xf5, - 0x5c, 0xbe, 0x61, 0x53, 0x6b, 0x2b, 0x05, 0xb8, 0x38, 0xbe, 0x3d, 0x23, 0xd8, 0xc6, 0xfd, 0x67, - 0x24, 0x78, 0x9a, 0xff, 0x28, 0x01, 0x18, 0x62, 0x37, 0x2c, 0xb3, 0xab, 0x51, 0x16, 0xce, 0x37, - 0x41, 0x85, 0x0e, 0x6c, 0xc2, 0x17, 0x64, 0xb2, 0x7d, 0x25, 0x70, 0xe5, 0xde, 0xc0, 0x26, 0x9f, - 0x9f, 0x2c, 0x2f, 0xa4, 0x2d, 0x58, 0x0f, 0xe2, 0x36, 0x70, 0x2b, 0x74, 0xb2, 0xc4, 0xad, 0xdf, - 0x90, 0x87, 0xfe, 0xfc, 0x64, 0x39, 0xe3, 0xec, 0x50, 0x43, 0x26, 0xd9, 0x41, 0xd8, 0x07, 0x50, - 0xc7, 0x2e, 0xbd, 0xe7, 0x60, 0xd3, 0xf5, 0x47, 0xd2, 0x0c, 0x22, 0xa6, 0xff, 0x5a, 0xb1, 0x8d, - 0x62, 0x16, 0xed, 0x25, 0xe1, 0x05, 0xdc, 0x4a, 0xb1, 0xa1, 0x8c, 0x11, 0xe0, 0x15, 0x30, 0xee, - 0x10, 0xec, 0x5a, 0x66, 0xbd, 0xc2, 0x67, 0x11, 0x2e, 0x20, 0xe2, 0xad, 0x48, 0xf4, 0xc2, 0x57, - 0xc1, 0x84, 0x41, 0x5c, 0x17, 0xf7, 0x48, 0xbd, 0xca, 0x81, 0xb3, 0x02, 0x38, 0xb1, 0xed, 0x37, - 0xa3, 0xa0, 0xbf, 0xf9, 0x7b, 0x05, 0x4c, 0x87, 0x2b, 0x77, 0x0e, 0x99, 0x73, 0x5b, 0xce, 0x9c, - 0xe6, 0xe8, 0x60, 0xc9, 0x49, 0x98, 0x8f, 0xca, 0x31, 0xc7, 0x59, 0x38, 0xc2, 0x1f, 0x81, 0x9a, - 0x4b, 0x74, 0xd2, 0xa1, 0x96, 0x23, 0x1c, 0x7f, 0xbd, 0xa0, 0xe3, 0xf8, 0x90, 0xe8, 0xfb, 0xc2, - 0xb4, 0xfd, 0x02, 0xf3, 0x3c, 0x78, 0x42, 0x21, 0x25, 0x7c, 0x1f, 0xd4, 0x28, 0x31, 0x6c, 0x1d, - 0x53, 0x22, 0xb2, 0xe6, 0xa5, 0xb8, 0xf3, 0x2c, 0x66, 0x18, 0xd9, 0x9e, 0xd5, 0xbd, 0x27, 0x60, - 0x3c, 0x65, 0xc2, 0xc5, 0x08, 0x5a, 0x51, 0x48, 0x03, 0x6d, 0x30, 0xe3, 0xd9, 0x5d, 0x86, 0xa4, - 0xec, 0x38, 0xef, 0x0d, 0x44, 0x0c, 0x5d, 0x1f, 0xbd, 0x2a, 0x07, 0x92, 0x5d, 0x7b, 0x41, 0x8c, - 0x32, 0x23, 0xb7, 0xa3, 0x04, 0x3f, 0x5c, 0x07, 0xb3, 0x86, 0x66, 0x22, 0x82, 0xbb, 0x83, 0x7d, - 0xd2, 0xb1, 0xcc, 0xae, 0xcb, 0x43, 0xa9, 0xda, 0x5e, 0x14, 0x04, 0xb3, 0xdb, 0x72, 0x37, 0x4a, - 0xe2, 0xe1, 0x16, 0x98, 0x0f, 0x0e, 0xe0, 0x3b, 0x9a, 0x4b, 0x2d, 0x67, 0xb0, 0xa5, 0x19, 0x1a, - 0xad, 0x8f, 0x73, 0x9e, 0xfa, 0xf0, 0x64, 0x79, 0x1e, 0x65, 0xf4, 0xa3, 0x4c, 0xab, 0xe6, 0x6f, - 0xc6, 0xc1, 0x6c, 0xe2, 0x5c, 0x80, 0xf7, 0xc1, 0x42, 0xc7, 0x73, 0x1c, 0x62, 0xd2, 0x1d, 0xcf, - 0x38, 0x24, 0xce, 0x7e, 0xe7, 0x88, 0x74, 0x3d, 0x9d, 0x74, 0xf9, 0xb6, 0x56, 0xdb, 0x0d, 0xe1, - 0xeb, 0xc2, 0x46, 0x26, 0x0a, 0xe5, 0x58, 0xc3, 0xf7, 0x00, 0x34, 0x79, 0xd3, 0xb6, 0xe6, 0xba, - 0x21, 0x67, 0x89, 0x73, 0x86, 0xa9, 0xb8, 0x93, 0x42, 0xa0, 0x0c, 0x2b, 0xe6, 0x63, 0x97, 0xb8, - 0x9a, 0x43, 0xba, 0x49, 0x1f, 0xcb, 0xb2, 0x8f, 0x9b, 0x99, 0x28, 0x94, 0x63, 0x0d, 0xdf, 0x04, - 0x53, 0xfe, 0x68, 0x7c, 0xcd, 0xc5, 0xe6, 0xcc, 0x09, 0xb2, 0xa9, 0x9d, 0xa8, 0x0b, 0xc5, 0x71, - 0x6c, 0x6a, 0xd6, 0xa1, 0x4b, 0x9c, 0x3e, 0xe9, 0xde, 0xf6, 0xc5, 0x01, 0xab, 0xa0, 0x55, 0x5e, - 0x41, 0xc3, 0xa9, 0xed, 0xa6, 0x10, 0x28, 0xc3, 0x8a, 0x4d, 0xcd, 0x8f, 0x9a, 0xd4, 0xd4, 0xc6, - 0xe5, 0xa9, 0x1d, 0x64, 0xa2, 0x50, 0x8e, 0x35, 0x8b, 0x3d, 0xdf, 0xe5, 0xf5, 0x3e, 0xd6, 0x74, - 0x7c, 0xa8, 0x93, 0xfa, 0x84, 0x1c, 0x7b, 0x3b, 0x72, 0x37, 0x4a, 0xe2, 0xe1, 0x6d, 0x70, 0xd1, - 0x6f, 0x3a, 0x30, 0x71, 0x48, 0x52, 0xe3, 0x24, 0x2f, 0x0a, 0x92, 0x8b, 0x3b, 0x49, 0x00, 0x4a, - 0xdb, 0xc0, 0x9b, 0x60, 0xa6, 0x63, 0xe9, 0x3a, 0x8f, 0xc7, 0x0d, 0xcb, 0x33, 0x69, 0x7d, 0x92, - 0xb3, 0x40, 0x96, 0x43, 0x1b, 0x52, 0x0f, 0x4a, 0x20, 0xe1, 0x8f, 0x01, 0xe8, 0x04, 0x85, 0xc1, - 0xad, 0x83, 0x11, 0x0a, 0x20, 0x5d, 0x96, 0xa2, 0xca, 0x1c, 0x36, 0xb9, 0x28, 0x46, 0xd9, 0xfc, - 0x48, 0x01, 0x8b, 0x39, 0x89, 0x0e, 0xbf, 0x2b, 0x15, 0xc1, 0xab, 0x89, 0x22, 0x78, 0x29, 0xc7, - 0x2c, 0x56, 0x09, 0x8f, 0xc0, 0x34, 0x13, 0x24, 0x9a, 0xd9, 0xf3, 0x21, 0xe2, 0x2c, 0x6b, 0xe5, - 0x4e, 0x00, 0xc5, 0xd1, 0xd1, 0xa9, 0x7c, 0x71, 0x78, 0xb2, 0x3c, 0x2d, 0xf5, 0x21, 0x99, 0xb8, - 0xf9, 0xcb, 0x12, 0x00, 0x9b, 0xc4, 0xd6, 0xad, 0x81, 0x41, 0xcc, 0xf3, 0xd0, 0x34, 0x77, 0x25, - 0x4d, 0xf3, 0x4a, 0xfe, 0x96, 0x84, 0x4e, 0xe5, 0x8a, 0x9a, 0xf7, 0x13, 0xa2, 0xe6, 0xd5, 0x22, - 0x64, 0x4f, 0x57, 0x35, 0x9f, 0x94, 0xc1, 0x5c, 0x04, 0x8e, 0x64, 0xcd, 0x2d, 0x69, 0x47, 0x5f, - 0x49, 0xec, 0xe8, 0x62, 0x86, 0xc9, 0x73, 0xd3, 0x35, 0xcf, 0x5e, 0x5f, 0xc0, 0x0f, 0xc1, 0x0c, - 0x13, 0x32, 0x7e, 0x48, 0x70, 0x99, 0x34, 0x7e, 0x6a, 0x99, 0x14, 0x16, 0xb7, 0x2d, 0x89, 0x09, - 0x25, 0x98, 0x73, 0x64, 0xd9, 0xc4, 0xf3, 0x96, 0x65, 0xcd, 0x3f, 0x28, 0x60, 0x26, 0xda, 0xa6, - 0x73, 0x10, 0x51, 0x77, 0x64, 0x11, 0xf5, 0x52, 0x81, 0xe0, 0xcc, 0x51, 0x51, 0x9f, 0x54, 0xe2, - 0xae, 0x73, 0x19, 0xb5, 0xc2, 0x5e, 0xc1, 0x6c, 0x5d, 0xeb, 0x60, 0x57, 0xd4, 0xdb, 0x17, 0xfc, - 0xd7, 0x2f, 0xbf, 0x0d, 0x85, 0xbd, 0x92, 0xe0, 0x2a, 0x3d, 0x5f, 0xc1, 0x55, 0x7e, 0x36, 0x82, - 0xeb, 0x07, 0xa0, 0xe6, 0x06, 0x52, 0xab, 0xc2, 0x29, 0xaf, 0x16, 0x4a, 0x6c, 0xa1, 0xb2, 0x42, - 0xea, 0x50, 0x5f, 0x85, 0x74, 0x59, 0xca, 0xaa, 0xfa, 0x65, 0x2a, 0x2b, 0x96, 0xcc, 0x36, 0xf6, - 0x5c, 0xd2, 0xe5, 0x19, 0x50, 0x8b, 0x92, 0x79, 0x8f, 0xb7, 0x22, 0xd1, 0x0b, 0x0f, 0xc0, 0xa2, - 0xed, 0x58, 0x3d, 0x87, 0xb8, 0xee, 0x26, 0xc1, 0x5d, 0x5d, 0x33, 0x49, 0x30, 0x01, 0xbf, 0x26, - 0x5e, 0x1a, 0x9e, 0x2c, 0x2f, 0xee, 0x65, 0x43, 0x50, 0x9e, 0x6d, 0xf3, 0xcf, 0x15, 0x70, 0x21, - 0x79, 0x36, 0xe6, 0xc8, 0x14, 0xe5, 0x4c, 0x32, 0xe5, 0x5a, 0x2c, 0x4e, 0x7d, 0x0d, 0x17, 0xbb, - 0x2a, 0x48, 0xc5, 0xea, 0x3a, 0x98, 0x15, 0xb2, 0x24, 0xe8, 0x14, 0x42, 0x2d, 0xdc, 0x9e, 0x03, - 0xb9, 0x1b, 0x25, 0xf1, 0x4c, 0x7c, 0x44, 0x9a, 0x22, 0x20, 0xa9, 0xc8, 0xe2, 0x63, 0x3d, 0x09, - 0x40, 0x69, 0x1b, 0xb8, 0x0d, 0xe6, 0x3c, 0x33, 0x4d, 0xe5, 0x87, 0xcb, 0x25, 0x41, 0x35, 0x77, - 0x90, 0x86, 0xa0, 0x2c, 0x3b, 0xf8, 0x40, 0xd2, 0x23, 0xe3, 0xfc, 0x48, 0xb8, 0x56, 0x20, 0xac, - 0x0b, 0x0b, 0x12, 0x78, 0x0b, 0x4c, 0x3b, 0x5c, 0x73, 0x06, 0xae, 0xfa, 0xba, 0xed, 0x1b, 0xc2, - 0x6c, 0x1a, 0xc5, 0x3b, 0x91, 0x8c, 0xcd, 0x90, 0x5a, 0xb5, 0xa2, 0x52, 0xab, 0xf9, 0x27, 0x05, - 0xc0, 0x74, 0x1e, 0x8e, 0xbc, 0x09, 0x48, 0x59, 0xc4, 0x2a, 0xa6, 0x96, 0xad, 0x7f, 0xae, 0x17, - 0xd4, 0x3f, 0xd1, 0x81, 0x5a, 0x4c, 0x00, 0x89, 0x65, 0x38, 0x9f, 0x4b, 0x9d, 0xa2, 0x02, 0x28, - 0x72, 0xea, 0x19, 0x08, 0xa0, 0x18, 0xd9, 0xd3, 0x05, 0xd0, 0x3f, 0x4b, 0x60, 0x2e, 0x02, 0x17, - 0x16, 0x40, 0x19, 0x26, 0x5f, 0x5f, 0xec, 0x8c, 0xbe, 0xd8, 0x61, 0xa2, 0x24, 0x5a, 0xba, 0xff, - 0x27, 0x51, 0x12, 0x79, 0x95, 0x23, 0x4a, 0x7e, 0x57, 0x8a, 0xbb, 0xfe, 0x95, 0x17, 0x25, 0x5f, - 0xfc, 0x4e, 0xa6, 0xf9, 0x97, 0x32, 0xb8, 0x90, 0xcc, 0x43, 0xa9, 0x40, 0x2a, 0x23, 0x0b, 0xe4, - 0x1e, 0x98, 0x7f, 0xe8, 0xe9, 0xfa, 0x80, 0x2f, 0x43, 0xac, 0x4a, 0xfa, 0xa5, 0xf5, 0x9b, 0xc2, - 0x72, 0xfe, 0xfb, 0x19, 0x18, 0x94, 0x69, 0x99, 0x53, 0xec, 0xcb, 0x67, 0x2a, 0xf6, 0xa9, 0x0a, - 0x54, 0x39, 0x45, 0x05, 0xca, 0x2c, 0xdc, 0xd5, 0x33, 0x14, 0xee, 0xd3, 0x55, 0xda, 0x8c, 0x83, - 0x6b, 0xe4, 0xab, 0xff, 0xcf, 0x15, 0xb0, 0x90, 0xfd, 0xc2, 0x0d, 0x75, 0x30, 0x63, 0xe0, 0x47, - 0xf1, 0x8b, 0x8f, 0x51, 0x45, 0xc4, 0xa3, 0x9a, 0xae, 0xfa, 0x9f, 0x8c, 0xd4, 0xbb, 0x26, 0xdd, - 0x75, 0xf6, 0xa9, 0xa3, 0x99, 0x3d, 0xbf, 0xf2, 0x6e, 0x4b, 0x5c, 0x28, 0xc1, 0xdd, 0xfc, 0x4c, - 0x01, 0x8b, 0x39, 0x95, 0xef, 0x7c, 0x3d, 0x81, 0x1f, 0x80, 0x9a, 0x81, 0x1f, 0xed, 0x7b, 0x4e, - 0x2f, 0xab, 0x56, 0x17, 0x1b, 0x87, 0x67, 0xf3, 0xb6, 0x60, 0x41, 0x21, 0x5f, 0x73, 0x17, 0x5c, - 0x96, 0x26, 0xc9, 0x32, 0x87, 0x3c, 0xf4, 0x74, 0x9e, 0x44, 0x42, 0x6c, 0x5c, 0x05, 0x93, 0x36, - 0x76, 0xa8, 0x16, 0x4a, 0xd5, 0x6a, 0x7b, 0x7a, 0x78, 0xb2, 0x3c, 0xb9, 0x17, 0x34, 0xa2, 0xa8, - 0xbf, 0xf9, 0x5f, 0x05, 0x54, 0xf7, 0x3b, 0x58, 0x27, 0xe7, 0x50, 0xed, 0x37, 0xa5, 0x6a, 0x9f, - 0x7f, 0x93, 0xce, 0xfd, 0xc9, 0x2d, 0xf4, 0x5b, 0x89, 0x42, 0xff, 0xf2, 0x08, 0x9e, 0xa7, 0xd7, - 0xf8, 0x77, 0xc0, 0x64, 0x38, 0xdc, 0xe9, 0x0e, 0xa0, 0xe6, 0x6f, 0x4b, 0x60, 0x2a, 0x36, 0xc4, - 0x29, 0x8f, 0xaf, 0x07, 0xd2, 0xb1, 0xcf, 0x12, 0x73, 0xad, 0xc8, 0x44, 0xd4, 0xe0, 0x88, 0x7f, - 0xd7, 0xa4, 0x4e, 0xfc, 0x05, 0x2f, 0x7d, 0xf2, 0x7f, 0x07, 0xcc, 0x50, 0xec, 0xf4, 0x08, 0x0d, - 0xfa, 0xf8, 0x82, 0x4d, 0x46, 0xb7, 0x13, 0xf7, 0xa4, 0x5e, 0x94, 0x40, 0x2f, 0xdd, 0x02, 0xd3, - 0xd2, 0x60, 0xf0, 0x02, 0x28, 0x1f, 0x93, 0x81, 0x2f, 0x7b, 0x10, 0xfb, 0x09, 0xe7, 0x41, 0xb5, - 0x8f, 0x75, 0xcf, 0x8f, 0xf3, 0x49, 0xe4, 0x3f, 0xdc, 0x2c, 0xbd, 0xad, 0x34, 0x7f, 0xc5, 0x16, - 0x27, 0x0a, 0xce, 0x73, 0x88, 0xae, 0xf7, 0xa4, 0xe8, 0xca, 0xff, 0xa8, 0x17, 0x4f, 0x99, 0xbc, - 0x18, 0x43, 0x89, 0x18, 0x7b, 0xad, 0x10, 0xdb, 0xd3, 0x23, 0xed, 0x5f, 0x25, 0x30, 0x1f, 0x43, - 0x47, 0x72, 0xf2, 0xdb, 0x92, 0x9c, 0x5c, 0x49, 0xc8, 0xc9, 0x7a, 0x96, 0xcd, 0xd7, 0x7a, 0x72, - 0xb4, 0x9e, 0xfc, 0xa3, 0x02, 0x66, 0x63, 0x6b, 0x77, 0x0e, 0x82, 0xf2, 0xae, 0x2c, 0x28, 0x5f, - 0x2e, 0x12, 0x34, 0x39, 0x8a, 0xf2, 0xaf, 0x55, 0xc9, 0xf9, 0xaf, 0xbc, 0xa4, 0xfc, 0x29, 0x98, - 0xef, 0x5b, 0xba, 0x67, 0x90, 0x0d, 0x1d, 0x6b, 0x46, 0x00, 0x60, 0xaa, 0xa9, 0x9c, 0x7c, 0x97, - 0x0b, 0xe9, 0x89, 0xe3, 0x6a, 0x2e, 0x25, 0x26, 0xbd, 0x1f, 0x59, 0x46, 0xba, 0xef, 0x7e, 0x06, - 0x1d, 0xca, 0x1c, 0x04, 0xbe, 0x09, 0xa6, 0x98, 0x7e, 0xd3, 0x3a, 0x64, 0x07, 0x1b, 0x41, 0x60, - 0x85, 0x9f, 0xb0, 0xf6, 0xa3, 0x2e, 0x14, 0xc7, 0xc1, 0x23, 0x30, 0x67, 0x5b, 0xdd, 0x6d, 0x6c, - 0xe2, 0x1e, 0x61, 0x32, 0x63, 0xcf, 0xd2, 0xb5, 0xce, 0x80, 0x5f, 0x7e, 0x4d, 0xb6, 0xdf, 0x0a, - 0x6e, 0x45, 0xf6, 0xd2, 0x10, 0xf6, 0x92, 0x98, 0xd1, 0xcc, 0x93, 0x3a, 0x8b, 0x12, 0x3a, 0xa9, - 0xcf, 0xae, 0xfe, 0x1d, 0xf1, 0x5a, 0x91, 0x08, 0x3b, 0xe3, 0x87, 0xd7, 0xbc, 0xbb, 0xbd, 0xda, - 0x99, 0xbe, 0x9a, 0xfe, 0xbb, 0x02, 0x2e, 0xa6, 0x8e, 0xca, 0x2f, 0xf1, 0x76, 0x2d, 0x25, 0xcf, - 0xcb, 0xa7, 0x90, 0xe7, 0xeb, 0x60, 0x56, 0x7c, 0xb0, 0x4d, 0xa8, 0xfb, 0xf0, 0xfd, 0x67, 0x43, - 0xee, 0x46, 0x49, 0x7c, 0xd6, 0xed, 0x5e, 0xf5, 0x94, 0xb7, 0x7b, 0x71, 0x2f, 0xc4, 0x1f, 0x90, - 0xfc, 0xd0, 0x4b, 0x7b, 0x21, 0xfe, 0x87, 0x94, 0xc4, 0x33, 0x85, 0xe0, 0xb3, 0x86, 0x0c, 0x13, - 0xb2, 0x42, 0x38, 0x90, 0x7a, 0x51, 0x02, 0xfd, 0x85, 0x3e, 0x4a, 0xe2, 0x8c, 0x8f, 0x92, 0xab, - 0x45, 0xe2, 0xb9, 0xf8, 0xbb, 0xc9, 0xdf, 0x14, 0xf0, 0x62, 0x6e, 0x22, 0xc0, 0x75, 0xa9, 0xec, - 0xae, 0x26, 0xca, 0xee, 0xb7, 0x72, 0x0d, 0x63, 0xb5, 0xd7, 0xc9, 0xbe, 0x9a, 0x7b, 0xa7, 0xd8, - 0xd5, 0x5c, 0x86, 0x76, 0x1f, 0x7d, 0x47, 0xd7, 0x5e, 0x7d, 0xfc, 0xa4, 0x31, 0xf6, 0xf1, 0x93, - 0xc6, 0xd8, 0xa7, 0x4f, 0x1a, 0x63, 0x3f, 0x1b, 0x36, 0x94, 0xc7, 0xc3, 0x86, 0xf2, 0xf1, 0xb0, - 0xa1, 0x7c, 0x3a, 0x6c, 0x28, 0x7f, 0x1f, 0x36, 0x94, 0x5f, 0x7f, 0xd6, 0x18, 0xfb, 0x60, 0x42, - 0x8c, 0xf8, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x80, 0x85, 0x43, 0x0a, 0xec, 0x28, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/apps/v1beta2/types.go b/vendor/k8s.io/api/apps/v1beta2/types.go index 39e07e278ae2e..d358455f0eb64 100644 --- a/vendor/k8s.io/api/apps/v1beta2/types.go +++ b/vendor/k8s.io/api/apps/v1beta2/types.go @@ -141,13 +141,13 @@ const ( // ordering constraints. When a scale operation is performed with this // strategy, new Pods will be created from the specification version indicated // by the StatefulSet's updateRevision. - RollingUpdateStatefulSetStrategyType = "RollingUpdate" + RollingUpdateStatefulSetStrategyType StatefulSetUpdateStrategyType = "RollingUpdate" // OnDeleteStatefulSetStrategyType triggers the legacy behavior. Version // tracking and ordered rolling restarts are disabled. Pods are recreated // from the StatefulSetSpec when they are manually deleted. When a scale // operation is performed with this strategy,specification version indicated // by the StatefulSet's currentRevision. - OnDeleteStatefulSetStrategyType = "OnDelete" + OnDeleteStatefulSetStrategyType StatefulSetUpdateStrategyType = "OnDelete" ) // RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. diff --git a/vendor/k8s.io/api/auditregistration/v1alpha1/generated.pb.go b/vendor/k8s.io/api/auditregistration/v1alpha1/generated.pb.go index 8acf47deeeb1a..003cc30bfbdd4 100644 --- a/vendor/k8s.io/api/auditregistration/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/auditregistration/v1alpha1/generated.pb.go @@ -17,36 +17,19 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto -/* - Package v1alpha1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto - - It has these top-level messages: - AuditSink - AuditSinkList - AuditSinkSpec - Policy - ServiceReference - Webhook - WebhookClientConfig - WebhookThrottleConfig -*/ package v1alpha1 import ( fmt "fmt" + io "io" + proto "github.com/gogo/protobuf/proto" math "math" - - strings "strings" - + math_bits "math/bits" reflect "reflect" - - io "io" + strings "strings" ) // Reference imports to suppress errors if they are not otherwise used. @@ -60,37 +43,229 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *AuditSink) Reset() { *m = AuditSink{} } -func (*AuditSink) ProtoMessage() {} -func (*AuditSink) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *AuditSink) Reset() { *m = AuditSink{} } +func (*AuditSink) ProtoMessage() {} +func (*AuditSink) Descriptor() ([]byte, []int) { + return fileDescriptor_642d3597c6afa8ba, []int{0} +} +func (m *AuditSink) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuditSink) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AuditSink) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuditSink.Merge(m, src) +} +func (m *AuditSink) XXX_Size() int { + return m.Size() +} +func (m *AuditSink) XXX_DiscardUnknown() { + xxx_messageInfo_AuditSink.DiscardUnknown(m) +} -func (m *AuditSinkList) Reset() { *m = AuditSinkList{} } -func (*AuditSinkList) ProtoMessage() {} -func (*AuditSinkList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_AuditSink proto.InternalMessageInfo -func (m *AuditSinkSpec) Reset() { *m = AuditSinkSpec{} } -func (*AuditSinkSpec) ProtoMessage() {} -func (*AuditSinkSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *AuditSinkList) Reset() { *m = AuditSinkList{} } +func (*AuditSinkList) ProtoMessage() {} +func (*AuditSinkList) Descriptor() ([]byte, []int) { + return fileDescriptor_642d3597c6afa8ba, []int{1} +} +func (m *AuditSinkList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuditSinkList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AuditSinkList) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuditSinkList.Merge(m, src) +} +func (m *AuditSinkList) XXX_Size() int { + return m.Size() +} +func (m *AuditSinkList) XXX_DiscardUnknown() { + xxx_messageInfo_AuditSinkList.DiscardUnknown(m) +} -func (m *Policy) Reset() { *m = Policy{} } -func (*Policy) ProtoMessage() {} -func (*Policy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_AuditSinkList proto.InternalMessageInfo -func (m *ServiceReference) Reset() { *m = ServiceReference{} } -func (*ServiceReference) ProtoMessage() {} -func (*ServiceReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *AuditSinkSpec) Reset() { *m = AuditSinkSpec{} } +func (*AuditSinkSpec) ProtoMessage() {} +func (*AuditSinkSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_642d3597c6afa8ba, []int{2} +} +func (m *AuditSinkSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuditSinkSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AuditSinkSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuditSinkSpec.Merge(m, src) +} +func (m *AuditSinkSpec) XXX_Size() int { + return m.Size() +} +func (m *AuditSinkSpec) XXX_DiscardUnknown() { + xxx_messageInfo_AuditSinkSpec.DiscardUnknown(m) +} -func (m *Webhook) Reset() { *m = Webhook{} } -func (*Webhook) ProtoMessage() {} -func (*Webhook) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_AuditSinkSpec proto.InternalMessageInfo -func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } -func (*WebhookClientConfig) ProtoMessage() {} -func (*WebhookClientConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (m *Policy) Reset() { *m = Policy{} } +func (*Policy) ProtoMessage() {} +func (*Policy) Descriptor() ([]byte, []int) { + return fileDescriptor_642d3597c6afa8ba, []int{3} +} +func (m *Policy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Policy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Policy) XXX_Merge(src proto.Message) { + xxx_messageInfo_Policy.Merge(m, src) +} +func (m *Policy) XXX_Size() int { + return m.Size() +} +func (m *Policy) XXX_DiscardUnknown() { + xxx_messageInfo_Policy.DiscardUnknown(m) +} -func (m *WebhookThrottleConfig) Reset() { *m = WebhookThrottleConfig{} } -func (*WebhookThrottleConfig) ProtoMessage() {} -func (*WebhookThrottleConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_Policy proto.InternalMessageInfo + +func (m *ServiceReference) Reset() { *m = ServiceReference{} } +func (*ServiceReference) ProtoMessage() {} +func (*ServiceReference) Descriptor() ([]byte, []int) { + return fileDescriptor_642d3597c6afa8ba, []int{4} +} +func (m *ServiceReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceReference.Merge(m, src) +} +func (m *ServiceReference) XXX_Size() int { + return m.Size() +} +func (m *ServiceReference) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceReference.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceReference proto.InternalMessageInfo + +func (m *Webhook) Reset() { *m = Webhook{} } +func (*Webhook) ProtoMessage() {} +func (*Webhook) Descriptor() ([]byte, []int) { + return fileDescriptor_642d3597c6afa8ba, []int{5} +} +func (m *Webhook) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Webhook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Webhook) XXX_Merge(src proto.Message) { + xxx_messageInfo_Webhook.Merge(m, src) +} +func (m *Webhook) XXX_Size() int { + return m.Size() +} +func (m *Webhook) XXX_DiscardUnknown() { + xxx_messageInfo_Webhook.DiscardUnknown(m) +} + +var xxx_messageInfo_Webhook proto.InternalMessageInfo + +func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } +func (*WebhookClientConfig) ProtoMessage() {} +func (*WebhookClientConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_642d3597c6afa8ba, []int{6} +} +func (m *WebhookClientConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WebhookClientConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WebhookClientConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_WebhookClientConfig.Merge(m, src) +} +func (m *WebhookClientConfig) XXX_Size() int { + return m.Size() +} +func (m *WebhookClientConfig) XXX_DiscardUnknown() { + xxx_messageInfo_WebhookClientConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_WebhookClientConfig proto.InternalMessageInfo + +func (m *WebhookThrottleConfig) Reset() { *m = WebhookThrottleConfig{} } +func (*WebhookThrottleConfig) ProtoMessage() {} +func (*WebhookThrottleConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_642d3597c6afa8ba, []int{7} +} +func (m *WebhookThrottleConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WebhookThrottleConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WebhookThrottleConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_WebhookThrottleConfig.Merge(m, src) +} +func (m *WebhookThrottleConfig) XXX_Size() int { + return m.Size() +} +func (m *WebhookThrottleConfig) XXX_DiscardUnknown() { + xxx_messageInfo_WebhookThrottleConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_WebhookThrottleConfig proto.InternalMessageInfo func init() { proto.RegisterType((*AuditSink)(nil), "k8s.io.api.auditregistration.v1alpha1.AuditSink") @@ -102,10 +277,67 @@ func init() { proto.RegisterType((*WebhookClientConfig)(nil), "k8s.io.api.auditregistration.v1alpha1.WebhookClientConfig") proto.RegisterType((*WebhookThrottleConfig)(nil), "k8s.io.api.auditregistration.v1alpha1.WebhookThrottleConfig") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto", fileDescriptor_642d3597c6afa8ba) +} + +var fileDescriptor_642d3597c6afa8ba = []byte{ + // 765 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x52, 0x41, 0x6f, 0x13, 0x47, + 0x14, 0xf6, 0xc6, 0x76, 0x6c, 0x4f, 0x9c, 0x36, 0x9d, 0xb4, 0x95, 0x1b, 0x55, 0x6b, 0x6b, 0xa5, + 0x4a, 0x91, 0xda, 0xcc, 0x36, 0x55, 0xd4, 0x56, 0x88, 0x4b, 0x36, 0x27, 0xa4, 0x10, 0xc2, 0x98, + 0x80, 0x40, 0x08, 0x31, 0x5e, 0x3f, 0xef, 0x0e, 0xb6, 0x77, 0x97, 0xdd, 0x59, 0xa3, 0xdc, 0xf8, + 0x09, 0xfc, 0x05, 0xfe, 0x06, 0x37, 0x24, 0x90, 0x72, 0xcc, 0x31, 0xa7, 0x88, 0x98, 0x03, 0xff, + 0x81, 0x13, 0x9a, 0xd9, 0x59, 0xdb, 0xc4, 0x41, 0x38, 0xb7, 0x79, 0xdf, 0x7b, 0xdf, 0xf7, 0xbe, + 0xf7, 0xde, 0xa0, 0x83, 0xfe, 0xff, 0x09, 0xe1, 0xa1, 0xdd, 0x4f, 0x3b, 0x10, 0x07, 0x20, 0x20, + 0xb1, 0x47, 0x10, 0x74, 0xc3, 0xd8, 0xd6, 0x09, 0x16, 0x71, 0x9b, 0xa5, 0x5d, 0x2e, 0x62, 0xf0, + 0x78, 0x22, 0x62, 0x26, 0x78, 0x18, 0xd8, 0xa3, 0x6d, 0x36, 0x88, 0x7c, 0xb6, 0x6d, 0x7b, 0x10, + 0x40, 0xcc, 0x04, 0x74, 0x49, 0x14, 0x87, 0x22, 0xc4, 0x7f, 0x64, 0x34, 0xc2, 0x22, 0x4e, 0xe6, + 0x68, 0x24, 0xa7, 0x6d, 0x6c, 0x79, 0x5c, 0xf8, 0x69, 0x87, 0xb8, 0xe1, 0xd0, 0xf6, 0x42, 0x2f, + 0xb4, 0x15, 0xbb, 0x93, 0xf6, 0x54, 0xa4, 0x02, 0xf5, 0xca, 0x54, 0x37, 0x76, 0xa6, 0x66, 0x86, + 0xcc, 0xf5, 0x79, 0x00, 0xf1, 0xb1, 0x1d, 0xf5, 0x3d, 0x09, 0x24, 0xf6, 0x10, 0x04, 0xb3, 0x47, + 0x73, 0x5e, 0x36, 0xec, 0x6f, 0xb1, 0xe2, 0x34, 0x10, 0x7c, 0x08, 0x73, 0x84, 0x7f, 0xbf, 0x47, + 0x48, 0x5c, 0x1f, 0x86, 0xec, 0x32, 0xcf, 0x7a, 0x6f, 0xa0, 0xda, 0xae, 0x1c, 0xb6, 0xcd, 0x83, + 0x3e, 0x7e, 0x8a, 0xaa, 0xd2, 0x51, 0x97, 0x09, 0xd6, 0x30, 0x5a, 0xc6, 0xe6, 0xca, 0x3f, 0x7f, + 0x93, 0xe9, 0x56, 0x26, 0xc2, 0x24, 0xea, 0x7b, 0x12, 0x48, 0x88, 0xac, 0x26, 0xa3, 0x6d, 0x72, + 0xa7, 0xf3, 0x0c, 0x5c, 0x71, 0x1b, 0x04, 0x73, 0xf0, 0xc9, 0x79, 0xb3, 0x30, 0x3e, 0x6f, 0xa2, + 0x29, 0x46, 0x27, 0xaa, 0xf8, 0x3e, 0x2a, 0x25, 0x11, 0xb8, 0x8d, 0x25, 0xa5, 0xbe, 0x43, 0x16, + 0xda, 0x39, 0x99, 0x38, 0x6c, 0x47, 0xe0, 0x3a, 0x75, 0xdd, 0xa1, 0x24, 0x23, 0xaa, 0xf4, 0xac, + 0x77, 0x06, 0x5a, 0x9d, 0x54, 0xed, 0xf3, 0x44, 0xe0, 0xc7, 0x73, 0xb3, 0x90, 0xc5, 0x66, 0x91, + 0x6c, 0x35, 0xc9, 0x9a, 0xee, 0x53, 0xcd, 0x91, 0x99, 0x39, 0x8e, 0x50, 0x99, 0x0b, 0x18, 0x26, + 0x8d, 0xa5, 0x56, 0xf1, 0xd2, 0x9a, 0x16, 0x1a, 0xc4, 0x59, 0xd5, 0xe2, 0xe5, 0x5b, 0x52, 0x86, + 0x66, 0x6a, 0xd6, 0xdb, 0xd9, 0x31, 0xe4, 0x78, 0xf8, 0x08, 0x2d, 0x47, 0xe1, 0x80, 0xbb, 0xc7, + 0x7a, 0x88, 0xad, 0x05, 0x3b, 0x1d, 0x2a, 0x92, 0xf3, 0x83, 0x6e, 0xb3, 0x9c, 0xc5, 0x54, 0x8b, + 0xe1, 0x87, 0xa8, 0xf2, 0x02, 0x3a, 0x7e, 0x18, 0xf6, 0xf5, 0x29, 0xc8, 0x82, 0xba, 0x0f, 0x32, + 0x96, 0xf3, 0xa3, 0x16, 0xae, 0x68, 0x80, 0xe6, 0x7a, 0x96, 0x8b, 0x74, 0x33, 0xfc, 0x17, 0x2a, + 0x0f, 0x60, 0x04, 0x03, 0x65, 0xbd, 0xe6, 0xfc, 0x9a, 0x8f, 0xbc, 0x2f, 0xc1, 0xcf, 0xf9, 0x83, + 0x66, 0x45, 0xf8, 0x4f, 0xb4, 0x9c, 0x08, 0xe6, 0x41, 0xb6, 0xd3, 0x9a, 0xb3, 0x2e, 0x6d, 0xb7, + 0x15, 0x22, 0x6b, 0xd5, 0x8b, 0xea, 0x12, 0xeb, 0xb5, 0x81, 0xd6, 0xda, 0x10, 0x8f, 0xb8, 0x0b, + 0x14, 0x7a, 0x10, 0x43, 0xe0, 0x02, 0xb6, 0x51, 0x2d, 0x60, 0x43, 0x48, 0x22, 0xe6, 0x82, 0xee, + 0xf9, 0x93, 0xee, 0x59, 0x3b, 0xc8, 0x13, 0x74, 0x5a, 0x83, 0x5b, 0xa8, 0x24, 0x03, 0xb5, 0x82, + 0xda, 0xf4, 0x5f, 0xc9, 0x5a, 0xaa, 0x32, 0xf8, 0x77, 0x54, 0x8a, 0x98, 0xf0, 0x1b, 0x45, 0x55, + 0x51, 0x95, 0xd9, 0x43, 0x26, 0x7c, 0xaa, 0x50, 0x95, 0x0d, 0x63, 0xd1, 0x28, 0xb5, 0x8c, 0xcd, + 0xb2, 0xce, 0x86, 0xb1, 0xa0, 0x0a, 0xb5, 0x3e, 0x19, 0x28, 0xdf, 0x0e, 0xee, 0xa1, 0xaa, 0xf0, + 0xe3, 0x50, 0x88, 0x01, 0xe8, 0x43, 0xde, 0xbc, 0xde, 0xc2, 0xef, 0x69, 0xf6, 0x5e, 0x18, 0xf4, + 0xb8, 0xe7, 0xd4, 0xe5, 0xbf, 0xcc, 0x31, 0x3a, 0xd1, 0xc6, 0x02, 0xd5, 0xdd, 0x01, 0x87, 0x40, + 0x64, 0x75, 0xfa, 0xb8, 0x37, 0xae, 0xd7, 0x6b, 0x6f, 0x46, 0xc1, 0xf9, 0x59, 0x6f, 0xa5, 0x3e, + 0x8b, 0xd2, 0xaf, 0xba, 0x58, 0x6f, 0x0c, 0xb4, 0x7e, 0x05, 0x17, 0xff, 0x86, 0x8a, 0x69, 0x9c, + 0x9f, 0xbf, 0x32, 0x3e, 0x6f, 0x16, 0x8f, 0xe8, 0x3e, 0x95, 0x18, 0x7e, 0x82, 0x2a, 0x49, 0x76, + 0x3f, 0xed, 0xf1, 0xbf, 0x05, 0x3d, 0x5e, 0xbe, 0xba, 0xb3, 0x22, 0x7f, 0x61, 0x8e, 0xe6, 0xa2, + 0x78, 0x13, 0x55, 0x5d, 0xe6, 0xa4, 0x41, 0x77, 0x00, 0xea, 0x78, 0xf5, 0x6c, 0x65, 0x7b, 0xbb, + 0x19, 0x46, 0x27, 0x59, 0xab, 0x8d, 0x7e, 0xb9, 0x72, 0xc7, 0xd2, 0xfd, 0xf3, 0x28, 0x51, 0xee, + 0x8b, 0x99, 0xfb, 0xbb, 0x87, 0x6d, 0x2a, 0x31, 0xdc, 0x44, 0xe5, 0x4e, 0x1a, 0x27, 0x42, 0x79, + 0x2f, 0x3a, 0x35, 0xf9, 0xab, 0x1d, 0x09, 0xd0, 0x0c, 0x77, 0xc8, 0xc9, 0x85, 0x59, 0x38, 0xbd, + 0x30, 0x0b, 0x67, 0x17, 0x66, 0xe1, 0xe5, 0xd8, 0x34, 0x4e, 0xc6, 0xa6, 0x71, 0x3a, 0x36, 0x8d, + 0xb3, 0xb1, 0x69, 0x7c, 0x18, 0x9b, 0xc6, 0xab, 0x8f, 0x66, 0xe1, 0x51, 0x35, 0x9f, 0xea, 0x4b, + 0x00, 0x00, 0x00, 0xff, 0xff, 0x0a, 0x6c, 0xff, 0x86, 0xcd, 0x06, 0x00, 0x00, +} + func (m *AuditSink) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -113,33 +345,42 @@ func (m *AuditSink) Marshal() (dAtA []byte, err error) { } func (m *AuditSink) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuditSink) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *AuditSinkList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -147,37 +388,46 @@ func (m *AuditSinkList) Marshal() (dAtA []byte, err error) { } func (m *AuditSinkList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuditSinkList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n3, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *AuditSinkSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -185,33 +435,42 @@ func (m *AuditSinkSpec) Marshal() (dAtA []byte, err error) { } func (m *AuditSinkSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuditSinkSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Policy.Size())) - n4, err := m.Policy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Webhook.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Webhook.Size())) - n5, err := m.Webhook.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Policy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Policy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -219,36 +478,36 @@ func (m *Policy) Marshal() (dAtA []byte, err error) { } func (m *Policy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Policy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Level))) - i += copy(dAtA[i:], m.Level) if len(m.Stages) > 0 { - for _, s := range m.Stages { + for iNdEx := len(m.Stages) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Stages[iNdEx]) + copy(dAtA[i:], m.Stages[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Stages[iNdEx]))) + i-- dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + i -= len(m.Level) + copy(dAtA[i:], m.Level) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Level))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ServiceReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -256,36 +515,44 @@ func (m *ServiceReference) Marshal() (dAtA []byte, err error) { } func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - if m.Path != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path))) - i += copy(dAtA[i:], *m.Path) - } if m.Port != nil { - dAtA[i] = 0x20 - i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.Port)) + i-- + dAtA[i] = 0x20 + } + if m.Path != nil { + i -= len(*m.Path) + copy(dAtA[i:], *m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path))) + i-- + dAtA[i] = 0x1a } - return i, nil + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Webhook) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -293,35 +560,44 @@ func (m *Webhook) Marshal() (dAtA []byte, err error) { } func (m *Webhook) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Webhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Throttle != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Throttle.Size())) - n6, err := m.Throttle.MarshalTo(dAtA[i:]) + { + size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n6 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ClientConfig.Size())) - n7, err := m.ClientConfig.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Throttle != nil { + { + size, err := m.Throttle.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - i += n7 - return i, nil + return len(dAtA) - i, nil } func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -329,39 +605,48 @@ func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { } func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WebhookClientConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.URL != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) - i += copy(dAtA[i:], *m.URL) + if m.CABundle != nil { + i -= len(m.CABundle) + copy(dAtA[i:], m.CABundle) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) + i-- + dAtA[i] = 0x1a } if m.Service != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Service.Size())) - n8, err := m.Service.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Service.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 + i-- + dAtA[i] = 0x12 } - if m.CABundle != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) - i += copy(dAtA[i:], m.CABundle) + if m.URL != nil { + i -= len(*m.URL) + copy(dAtA[i:], *m.URL) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *WebhookThrottleConfig) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -369,33 +654,43 @@ func (m *WebhookThrottleConfig) Marshal() (dAtA []byte, err error) { } func (m *WebhookThrottleConfig) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WebhookThrottleConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.QPS != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.QPS)) - } if m.Burst != nil { - dAtA[i] = 0x10 - i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.Burst)) + i-- + dAtA[i] = 0x10 + } + if m.QPS != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.QPS)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *AuditSink) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -406,6 +701,9 @@ func (m *AuditSink) Size() (n int) { } func (m *AuditSinkList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -420,6 +718,9 @@ func (m *AuditSinkList) Size() (n int) { } func (m *AuditSinkSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.Policy.Size() @@ -430,6 +731,9 @@ func (m *AuditSinkSpec) Size() (n int) { } func (m *Policy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Level) @@ -444,6 +748,9 @@ func (m *Policy) Size() (n int) { } func (m *ServiceReference) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Namespace) @@ -461,6 +768,9 @@ func (m *ServiceReference) Size() (n int) { } func (m *Webhook) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Throttle != nil { @@ -473,6 +783,9 @@ func (m *Webhook) Size() (n int) { } func (m *WebhookClientConfig) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.URL != nil { @@ -491,6 +804,9 @@ func (m *WebhookClientConfig) Size() (n int) { } func (m *WebhookThrottleConfig) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.QPS != nil { @@ -503,14 +819,7 @@ func (m *WebhookThrottleConfig) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -520,7 +829,7 @@ func (this *AuditSink) String() string { return "nil" } s := strings.Join([]string{`&AuditSink{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "AuditSinkSpec", "AuditSinkSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -530,9 +839,14 @@ func (this *AuditSinkList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]AuditSink{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "AuditSink", "AuditSink", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&AuditSinkList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "AuditSink", "AuditSink", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -577,7 +891,7 @@ func (this *Webhook) String() string { return "nil" } s := strings.Join([]string{`&Webhook{`, - `Throttle:` + strings.Replace(fmt.Sprintf("%v", this.Throttle), "WebhookThrottleConfig", "WebhookThrottleConfig", 1) + `,`, + `Throttle:` + strings.Replace(this.Throttle.String(), "WebhookThrottleConfig", "WebhookThrottleConfig", 1) + `,`, `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -589,7 +903,7 @@ func (this *WebhookClientConfig) String() string { } s := strings.Join([]string{`&WebhookClientConfig{`, `URL:` + valueToStringGenerated(this.URL) + `,`, - `Service:` + strings.Replace(fmt.Sprintf("%v", this.Service), "ServiceReference", "ServiceReference", 1) + `,`, + `Service:` + strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1) + `,`, `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, `}`, }, "") @@ -629,7 +943,7 @@ func (m *AuditSink) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -657,7 +971,7 @@ func (m *AuditSink) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -666,6 +980,9 @@ func (m *AuditSink) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -687,7 +1004,7 @@ func (m *AuditSink) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -696,6 +1013,9 @@ func (m *AuditSink) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -712,6 +1032,9 @@ func (m *AuditSink) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -739,7 +1062,7 @@ func (m *AuditSinkList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -767,7 +1090,7 @@ func (m *AuditSinkList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -776,6 +1099,9 @@ func (m *AuditSinkList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -797,7 +1123,7 @@ func (m *AuditSinkList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -806,6 +1132,9 @@ func (m *AuditSinkList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -823,6 +1152,9 @@ func (m *AuditSinkList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -850,7 +1182,7 @@ func (m *AuditSinkSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -878,7 +1210,7 @@ func (m *AuditSinkSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -887,6 +1219,9 @@ func (m *AuditSinkSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -908,7 +1243,7 @@ func (m *AuditSinkSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -917,6 +1252,9 @@ func (m *AuditSinkSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -933,6 +1271,9 @@ func (m *AuditSinkSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -960,7 +1301,7 @@ func (m *Policy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -988,7 +1329,7 @@ func (m *Policy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -998,6 +1339,9 @@ func (m *Policy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1017,7 +1361,7 @@ func (m *Policy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1027,6 +1371,9 @@ func (m *Policy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1041,6 +1388,9 @@ func (m *Policy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1068,7 +1418,7 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1096,7 +1446,7 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1106,6 +1456,9 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1125,7 +1478,7 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1135,6 +1488,9 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1154,7 +1510,7 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1164,6 +1520,9 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1184,7 +1543,7 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -1199,6 +1558,9 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1226,7 +1588,7 @@ func (m *Webhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1254,7 +1616,7 @@ func (m *Webhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1263,6 +1625,9 @@ func (m *Webhook) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1287,7 +1652,7 @@ func (m *Webhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1296,6 +1661,9 @@ func (m *Webhook) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1312,6 +1680,9 @@ func (m *Webhook) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1339,7 +1710,7 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1367,7 +1738,7 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1377,6 +1748,9 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1397,7 +1771,7 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1406,6 +1780,9 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1430,7 +1807,7 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1439,6 +1816,9 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1456,6 +1836,9 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1483,7 +1866,7 @@ func (m *WebhookThrottleConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1511,7 +1894,7 @@ func (m *WebhookThrottleConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -1531,7 +1914,7 @@ func (m *WebhookThrottleConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -1546,6 +1929,9 @@ func (m *WebhookThrottleConfig) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1612,10 +1998,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -1644,6 +2033,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -1662,59 +2054,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 765 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x52, 0x41, 0x6f, 0x13, 0x47, - 0x14, 0xf6, 0xc6, 0x76, 0x6c, 0x4f, 0x9c, 0x36, 0x9d, 0xb4, 0x95, 0x1b, 0x55, 0x6b, 0x6b, 0xa5, - 0x4a, 0x91, 0xda, 0xcc, 0x36, 0x55, 0xd4, 0x56, 0x88, 0x4b, 0x36, 0x27, 0xa4, 0x10, 0xc2, 0x98, - 0x80, 0x40, 0x08, 0x31, 0x5e, 0x3f, 0xef, 0x0e, 0xb6, 0x77, 0x97, 0xdd, 0x59, 0xa3, 0xdc, 0xf8, - 0x09, 0xfc, 0x05, 0xfe, 0x06, 0x37, 0x24, 0x90, 0x72, 0xcc, 0x31, 0xa7, 0x88, 0x98, 0x03, 0xff, - 0x81, 0x13, 0x9a, 0xd9, 0x59, 0xdb, 0xc4, 0x41, 0x38, 0xb7, 0x79, 0xdf, 0x7b, 0xdf, 0xf7, 0xbe, - 0xf7, 0xde, 0xa0, 0x83, 0xfe, 0xff, 0x09, 0xe1, 0xa1, 0xdd, 0x4f, 0x3b, 0x10, 0x07, 0x20, 0x20, - 0xb1, 0x47, 0x10, 0x74, 0xc3, 0xd8, 0xd6, 0x09, 0x16, 0x71, 0x9b, 0xa5, 0x5d, 0x2e, 0x62, 0xf0, - 0x78, 0x22, 0x62, 0x26, 0x78, 0x18, 0xd8, 0xa3, 0x6d, 0x36, 0x88, 0x7c, 0xb6, 0x6d, 0x7b, 0x10, - 0x40, 0xcc, 0x04, 0x74, 0x49, 0x14, 0x87, 0x22, 0xc4, 0x7f, 0x64, 0x34, 0xc2, 0x22, 0x4e, 0xe6, - 0x68, 0x24, 0xa7, 0x6d, 0x6c, 0x79, 0x5c, 0xf8, 0x69, 0x87, 0xb8, 0xe1, 0xd0, 0xf6, 0x42, 0x2f, - 0xb4, 0x15, 0xbb, 0x93, 0xf6, 0x54, 0xa4, 0x02, 0xf5, 0xca, 0x54, 0x37, 0x76, 0xa6, 0x66, 0x86, - 0xcc, 0xf5, 0x79, 0x00, 0xf1, 0xb1, 0x1d, 0xf5, 0x3d, 0x09, 0x24, 0xf6, 0x10, 0x04, 0xb3, 0x47, - 0x73, 0x5e, 0x36, 0xec, 0x6f, 0xb1, 0xe2, 0x34, 0x10, 0x7c, 0x08, 0x73, 0x84, 0x7f, 0xbf, 0x47, - 0x48, 0x5c, 0x1f, 0x86, 0xec, 0x32, 0xcf, 0x7a, 0x6f, 0xa0, 0xda, 0xae, 0x1c, 0xb6, 0xcd, 0x83, - 0x3e, 0x7e, 0x8a, 0xaa, 0xd2, 0x51, 0x97, 0x09, 0xd6, 0x30, 0x5a, 0xc6, 0xe6, 0xca, 0x3f, 0x7f, - 0x93, 0xe9, 0x56, 0x26, 0xc2, 0x24, 0xea, 0x7b, 0x12, 0x48, 0x88, 0xac, 0x26, 0xa3, 0x6d, 0x72, - 0xa7, 0xf3, 0x0c, 0x5c, 0x71, 0x1b, 0x04, 0x73, 0xf0, 0xc9, 0x79, 0xb3, 0x30, 0x3e, 0x6f, 0xa2, - 0x29, 0x46, 0x27, 0xaa, 0xf8, 0x3e, 0x2a, 0x25, 0x11, 0xb8, 0x8d, 0x25, 0xa5, 0xbe, 0x43, 0x16, - 0xda, 0x39, 0x99, 0x38, 0x6c, 0x47, 0xe0, 0x3a, 0x75, 0xdd, 0xa1, 0x24, 0x23, 0xaa, 0xf4, 0xac, - 0x77, 0x06, 0x5a, 0x9d, 0x54, 0xed, 0xf3, 0x44, 0xe0, 0xc7, 0x73, 0xb3, 0x90, 0xc5, 0x66, 0x91, - 0x6c, 0x35, 0xc9, 0x9a, 0xee, 0x53, 0xcd, 0x91, 0x99, 0x39, 0x8e, 0x50, 0x99, 0x0b, 0x18, 0x26, - 0x8d, 0xa5, 0x56, 0xf1, 0xd2, 0x9a, 0x16, 0x1a, 0xc4, 0x59, 0xd5, 0xe2, 0xe5, 0x5b, 0x52, 0x86, - 0x66, 0x6a, 0xd6, 0xdb, 0xd9, 0x31, 0xe4, 0x78, 0xf8, 0x08, 0x2d, 0x47, 0xe1, 0x80, 0xbb, 0xc7, - 0x7a, 0x88, 0xad, 0x05, 0x3b, 0x1d, 0x2a, 0x92, 0xf3, 0x83, 0x6e, 0xb3, 0x9c, 0xc5, 0x54, 0x8b, - 0xe1, 0x87, 0xa8, 0xf2, 0x02, 0x3a, 0x7e, 0x18, 0xf6, 0xf5, 0x29, 0xc8, 0x82, 0xba, 0x0f, 0x32, - 0x96, 0xf3, 0xa3, 0x16, 0xae, 0x68, 0x80, 0xe6, 0x7a, 0x96, 0x8b, 0x74, 0x33, 0xfc, 0x17, 0x2a, - 0x0f, 0x60, 0x04, 0x03, 0x65, 0xbd, 0xe6, 0xfc, 0x9a, 0x8f, 0xbc, 0x2f, 0xc1, 0xcf, 0xf9, 0x83, - 0x66, 0x45, 0xf8, 0x4f, 0xb4, 0x9c, 0x08, 0xe6, 0x41, 0xb6, 0xd3, 0x9a, 0xb3, 0x2e, 0x6d, 0xb7, - 0x15, 0x22, 0x6b, 0xd5, 0x8b, 0xea, 0x12, 0xeb, 0xb5, 0x81, 0xd6, 0xda, 0x10, 0x8f, 0xb8, 0x0b, - 0x14, 0x7a, 0x10, 0x43, 0xe0, 0x02, 0xb6, 0x51, 0x2d, 0x60, 0x43, 0x48, 0x22, 0xe6, 0x82, 0xee, - 0xf9, 0x93, 0xee, 0x59, 0x3b, 0xc8, 0x13, 0x74, 0x5a, 0x83, 0x5b, 0xa8, 0x24, 0x03, 0xb5, 0x82, - 0xda, 0xf4, 0x5f, 0xc9, 0x5a, 0xaa, 0x32, 0xf8, 0x77, 0x54, 0x8a, 0x98, 0xf0, 0x1b, 0x45, 0x55, - 0x51, 0x95, 0xd9, 0x43, 0x26, 0x7c, 0xaa, 0x50, 0x95, 0x0d, 0x63, 0xd1, 0x28, 0xb5, 0x8c, 0xcd, - 0xb2, 0xce, 0x86, 0xb1, 0xa0, 0x0a, 0xb5, 0x3e, 0x19, 0x28, 0xdf, 0x0e, 0xee, 0xa1, 0xaa, 0xf0, - 0xe3, 0x50, 0x88, 0x01, 0xe8, 0x43, 0xde, 0xbc, 0xde, 0xc2, 0xef, 0x69, 0xf6, 0x5e, 0x18, 0xf4, - 0xb8, 0xe7, 0xd4, 0xe5, 0xbf, 0xcc, 0x31, 0x3a, 0xd1, 0xc6, 0x02, 0xd5, 0xdd, 0x01, 0x87, 0x40, - 0x64, 0x75, 0xfa, 0xb8, 0x37, 0xae, 0xd7, 0x6b, 0x6f, 0x46, 0xc1, 0xf9, 0x59, 0x6f, 0xa5, 0x3e, - 0x8b, 0xd2, 0xaf, 0xba, 0x58, 0x6f, 0x0c, 0xb4, 0x7e, 0x05, 0x17, 0xff, 0x86, 0x8a, 0x69, 0x9c, - 0x9f, 0xbf, 0x32, 0x3e, 0x6f, 0x16, 0x8f, 0xe8, 0x3e, 0x95, 0x18, 0x7e, 0x82, 0x2a, 0x49, 0x76, - 0x3f, 0xed, 0xf1, 0xbf, 0x05, 0x3d, 0x5e, 0xbe, 0xba, 0xb3, 0x22, 0x7f, 0x61, 0x8e, 0xe6, 0xa2, - 0x78, 0x13, 0x55, 0x5d, 0xe6, 0xa4, 0x41, 0x77, 0x00, 0xea, 0x78, 0xf5, 0x6c, 0x65, 0x7b, 0xbb, - 0x19, 0x46, 0x27, 0x59, 0xab, 0x8d, 0x7e, 0xb9, 0x72, 0xc7, 0xd2, 0xfd, 0xf3, 0x28, 0x51, 0xee, - 0x8b, 0x99, 0xfb, 0xbb, 0x87, 0x6d, 0x2a, 0x31, 0xdc, 0x44, 0xe5, 0x4e, 0x1a, 0x27, 0x42, 0x79, - 0x2f, 0x3a, 0x35, 0xf9, 0xab, 0x1d, 0x09, 0xd0, 0x0c, 0x77, 0xc8, 0xc9, 0x85, 0x59, 0x38, 0xbd, - 0x30, 0x0b, 0x67, 0x17, 0x66, 0xe1, 0xe5, 0xd8, 0x34, 0x4e, 0xc6, 0xa6, 0x71, 0x3a, 0x36, 0x8d, - 0xb3, 0xb1, 0x69, 0x7c, 0x18, 0x9b, 0xc6, 0xab, 0x8f, 0x66, 0xe1, 0x51, 0x35, 0x9f, 0xea, 0x4b, - 0x00, 0x00, 0x00, 0xff, 0xff, 0x0a, 0x6c, 0xff, 0x86, 0xcd, 0x06, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/authentication/v1/generated.pb.go b/vendor/k8s.io/api/authentication/v1/generated.pb.go index f7248e105df3b..02be20dec53d7 100644 --- a/vendor/k8s.io/api/authentication/v1/generated.pb.go +++ b/vendor/k8s.io/api/authentication/v1/generated.pb.go @@ -17,41 +17,22 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1/generated.proto -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1/generated.proto - - It has these top-level messages: - BoundObjectReference - ExtraValue - TokenRequest - TokenRequestSpec - TokenRequestStatus - TokenReview - TokenReviewSpec - TokenReviewStatus - UserInfo -*/ package v1 import ( fmt "fmt" - proto "github.com/gogo/protobuf/proto" - - math "math" - - k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" + io "io" + proto "github.com/gogo/protobuf/proto" github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - strings "strings" - + math "math" + math_bits "math/bits" reflect "reflect" + strings "strings" - io "io" + k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" ) // Reference imports to suppress errors if they are not otherwise used. @@ -65,41 +46,257 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *BoundObjectReference) Reset() { *m = BoundObjectReference{} } -func (*BoundObjectReference) ProtoMessage() {} -func (*BoundObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *BoundObjectReference) Reset() { *m = BoundObjectReference{} } +func (*BoundObjectReference) ProtoMessage() {} +func (*BoundObjectReference) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{0} +} +func (m *BoundObjectReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BoundObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BoundObjectReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_BoundObjectReference.Merge(m, src) +} +func (m *BoundObjectReference) XXX_Size() int { + return m.Size() +} +func (m *BoundObjectReference) XXX_DiscardUnknown() { + xxx_messageInfo_BoundObjectReference.DiscardUnknown(m) +} + +var xxx_messageInfo_BoundObjectReference proto.InternalMessageInfo + +func (m *ExtraValue) Reset() { *m = ExtraValue{} } +func (*ExtraValue) ProtoMessage() {} +func (*ExtraValue) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{1} +} +func (m *ExtraValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExtraValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExtraValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtraValue.Merge(m, src) +} +func (m *ExtraValue) XXX_Size() int { + return m.Size() +} +func (m *ExtraValue) XXX_DiscardUnknown() { + xxx_messageInfo_ExtraValue.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtraValue proto.InternalMessageInfo + +func (m *TokenRequest) Reset() { *m = TokenRequest{} } +func (*TokenRequest) ProtoMessage() {} +func (*TokenRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{2} +} +func (m *TokenRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenRequest.Merge(m, src) +} +func (m *TokenRequest) XXX_Size() int { + return m.Size() +} +func (m *TokenRequest) XXX_DiscardUnknown() { + xxx_messageInfo_TokenRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_TokenRequest proto.InternalMessageInfo + +func (m *TokenRequestSpec) Reset() { *m = TokenRequestSpec{} } +func (*TokenRequestSpec) ProtoMessage() {} +func (*TokenRequestSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{3} +} +func (m *TokenRequestSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenRequestSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenRequestSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenRequestSpec.Merge(m, src) +} +func (m *TokenRequestSpec) XXX_Size() int { + return m.Size() +} +func (m *TokenRequestSpec) XXX_DiscardUnknown() { + xxx_messageInfo_TokenRequestSpec.DiscardUnknown(m) +} -func (m *ExtraValue) Reset() { *m = ExtraValue{} } -func (*ExtraValue) ProtoMessage() {} -func (*ExtraValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_TokenRequestSpec proto.InternalMessageInfo + +func (m *TokenRequestStatus) Reset() { *m = TokenRequestStatus{} } +func (*TokenRequestStatus) ProtoMessage() {} +func (*TokenRequestStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{4} +} +func (m *TokenRequestStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenRequestStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenRequestStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenRequestStatus.Merge(m, src) +} +func (m *TokenRequestStatus) XXX_Size() int { + return m.Size() +} +func (m *TokenRequestStatus) XXX_DiscardUnknown() { + xxx_messageInfo_TokenRequestStatus.DiscardUnknown(m) +} -func (m *TokenRequest) Reset() { *m = TokenRequest{} } -func (*TokenRequest) ProtoMessage() {} -func (*TokenRequest) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_TokenRequestStatus proto.InternalMessageInfo -func (m *TokenRequestSpec) Reset() { *m = TokenRequestSpec{} } -func (*TokenRequestSpec) ProtoMessage() {} -func (*TokenRequestSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (m *TokenReview) Reset() { *m = TokenReview{} } +func (*TokenReview) ProtoMessage() {} +func (*TokenReview) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{5} +} +func (m *TokenReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenReview.Merge(m, src) +} +func (m *TokenReview) XXX_Size() int { + return m.Size() +} +func (m *TokenReview) XXX_DiscardUnknown() { + xxx_messageInfo_TokenReview.DiscardUnknown(m) +} -func (m *TokenRequestStatus) Reset() { *m = TokenRequestStatus{} } -func (*TokenRequestStatus) ProtoMessage() {} -func (*TokenRequestStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +var xxx_messageInfo_TokenReview proto.InternalMessageInfo -func (m *TokenReview) Reset() { *m = TokenReview{} } -func (*TokenReview) ProtoMessage() {} -func (*TokenReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +func (m *TokenReviewSpec) Reset() { *m = TokenReviewSpec{} } +func (*TokenReviewSpec) ProtoMessage() {} +func (*TokenReviewSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{6} +} +func (m *TokenReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenReviewSpec.Merge(m, src) +} +func (m *TokenReviewSpec) XXX_Size() int { + return m.Size() +} +func (m *TokenReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_TokenReviewSpec.DiscardUnknown(m) +} -func (m *TokenReviewSpec) Reset() { *m = TokenReviewSpec{} } -func (*TokenReviewSpec) ProtoMessage() {} -func (*TokenReviewSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +var xxx_messageInfo_TokenReviewSpec proto.InternalMessageInfo -func (m *TokenReviewStatus) Reset() { *m = TokenReviewStatus{} } -func (*TokenReviewStatus) ProtoMessage() {} -func (*TokenReviewStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +func (m *TokenReviewStatus) Reset() { *m = TokenReviewStatus{} } +func (*TokenReviewStatus) ProtoMessage() {} +func (*TokenReviewStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{7} +} +func (m *TokenReviewStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenReviewStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenReviewStatus.Merge(m, src) +} +func (m *TokenReviewStatus) XXX_Size() int { + return m.Size() +} +func (m *TokenReviewStatus) XXX_DiscardUnknown() { + xxx_messageInfo_TokenReviewStatus.DiscardUnknown(m) +} -func (m *UserInfo) Reset() { *m = UserInfo{} } -func (*UserInfo) ProtoMessage() {} -func (*UserInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +var xxx_messageInfo_TokenReviewStatus proto.InternalMessageInfo + +func (m *UserInfo) Reset() { *m = UserInfo{} } +func (*UserInfo) ProtoMessage() {} +func (*UserInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{8} +} +func (m *UserInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UserInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *UserInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserInfo.Merge(m, src) +} +func (m *UserInfo) XXX_Size() int { + return m.Size() +} +func (m *UserInfo) XXX_DiscardUnknown() { + xxx_messageInfo_UserInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_UserInfo proto.InternalMessageInfo func init() { proto.RegisterType((*BoundObjectReference)(nil), "k8s.io.api.authentication.v1.BoundObjectReference") @@ -111,11 +308,78 @@ func init() { proto.RegisterType((*TokenReviewSpec)(nil), "k8s.io.api.authentication.v1.TokenReviewSpec") proto.RegisterType((*TokenReviewStatus)(nil), "k8s.io.api.authentication.v1.TokenReviewStatus") proto.RegisterType((*UserInfo)(nil), "k8s.io.api.authentication.v1.UserInfo") + proto.RegisterMapType((map[string]ExtraValue)(nil), "k8s.io.api.authentication.v1.UserInfo.ExtraEntry") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1/generated.proto", fileDescriptor_2953ea822e7ffe1e) } + +var fileDescriptor_2953ea822e7ffe1e = []byte{ + // 903 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcf, 0x6f, 0xe3, 0x44, + 0x14, 0x8e, 0xf3, 0xa3, 0x4a, 0x26, 0xdb, 0xd2, 0xce, 0xb2, 0x52, 0x54, 0x20, 0x2e, 0x5e, 0x09, + 0x55, 0xc0, 0xda, 0x9b, 0x08, 0xc1, 0x6a, 0x91, 0x90, 0x6a, 0x1a, 0x41, 0x84, 0x60, 0x57, 0xb3, + 0xdb, 0x82, 0x38, 0x31, 0xb1, 0x5f, 0x53, 0x13, 0x3c, 0x36, 0xf6, 0x38, 0x6c, 0x6e, 0xfb, 0x27, + 0x70, 0x04, 0x89, 0x03, 0x7f, 0x04, 0x12, 0xff, 0x42, 0x8f, 0x2b, 0x4e, 0x3d, 0xa0, 0x88, 0x9a, + 0x2b, 0x47, 0x4e, 0x9c, 0xd0, 0x8c, 0xa7, 0x71, 0x9c, 0xb4, 0x69, 0x4e, 0x7b, 0x8b, 0xdf, 0xfb, + 0xde, 0xf7, 0xde, 0xfb, 0xe6, 0xcb, 0x0c, 0xea, 0x8d, 0x1e, 0xc4, 0xa6, 0x17, 0x58, 0xa3, 0x64, + 0x00, 0x11, 0x03, 0x0e, 0xb1, 0x35, 0x06, 0xe6, 0x06, 0x91, 0xa5, 0x12, 0x34, 0xf4, 0x2c, 0x9a, + 0xf0, 0x53, 0x60, 0xdc, 0x73, 0x28, 0xf7, 0x02, 0x66, 0x8d, 0x3b, 0xd6, 0x10, 0x18, 0x44, 0x94, + 0x83, 0x6b, 0x86, 0x51, 0xc0, 0x03, 0xfc, 0x7a, 0x86, 0x36, 0x69, 0xe8, 0x99, 0x45, 0xb4, 0x39, + 0xee, 0xec, 0xde, 0x1b, 0x7a, 0xfc, 0x34, 0x19, 0x98, 0x4e, 0xe0, 0x5b, 0xc3, 0x60, 0x18, 0x58, + 0xb2, 0x68, 0x90, 0x9c, 0xc8, 0x2f, 0xf9, 0x21, 0x7f, 0x65, 0x64, 0xbb, 0xef, 0xe5, 0xad, 0x7d, + 0xea, 0x9c, 0x7a, 0x0c, 0xa2, 0x89, 0x15, 0x8e, 0x86, 0x22, 0x10, 0x5b, 0x3e, 0x70, 0x7a, 0xc5, + 0x08, 0xbb, 0xd6, 0x75, 0x55, 0x51, 0xc2, 0xb8, 0xe7, 0xc3, 0x52, 0xc1, 0xfb, 0x37, 0x15, 0xc4, + 0xce, 0x29, 0xf8, 0x74, 0xb1, 0xce, 0xf8, 0x43, 0x43, 0xaf, 0xda, 0x41, 0xc2, 0xdc, 0x47, 0x83, + 0x6f, 0xc1, 0xe1, 0x04, 0x4e, 0x20, 0x02, 0xe6, 0x00, 0xde, 0x43, 0xd5, 0x91, 0xc7, 0xdc, 0x96, + 0xb6, 0xa7, 0xed, 0x37, 0xec, 0x5b, 0x67, 0x53, 0xbd, 0x94, 0x4e, 0xf5, 0xea, 0x67, 0x1e, 0x73, + 0x89, 0xcc, 0xe0, 0x2e, 0x42, 0xf4, 0x71, 0xff, 0x18, 0xa2, 0xd8, 0x0b, 0x58, 0xab, 0x2c, 0x71, + 0x58, 0xe1, 0xd0, 0xc1, 0x2c, 0x43, 0xe6, 0x50, 0x82, 0x95, 0x51, 0x1f, 0x5a, 0x95, 0x22, 0xeb, + 0x17, 0xd4, 0x07, 0x22, 0x33, 0xd8, 0x46, 0x95, 0xa4, 0x7f, 0xd8, 0xaa, 0x4a, 0xc0, 0x7d, 0x05, + 0xa8, 0x1c, 0xf5, 0x0f, 0xff, 0x9b, 0xea, 0x6f, 0x5e, 0xb7, 0x24, 0x9f, 0x84, 0x10, 0x9b, 0x47, + 0xfd, 0x43, 0x22, 0x8a, 0x8d, 0x0f, 0x10, 0xea, 0x3d, 0xe3, 0x11, 0x3d, 0xa6, 0xdf, 0x25, 0x80, + 0x75, 0x54, 0xf3, 0x38, 0xf8, 0x71, 0x4b, 0xdb, 0xab, 0xec, 0x37, 0xec, 0x46, 0x3a, 0xd5, 0x6b, + 0x7d, 0x11, 0x20, 0x59, 0xfc, 0x61, 0xfd, 0xa7, 0x5f, 0xf5, 0xd2, 0xf3, 0x3f, 0xf7, 0x4a, 0xc6, + 0x2f, 0x65, 0x74, 0xeb, 0x69, 0x30, 0x02, 0x46, 0xe0, 0xfb, 0x04, 0x62, 0x8e, 0xbf, 0x41, 0x75, + 0x71, 0x44, 0x2e, 0xe5, 0x54, 0x2a, 0xd1, 0xec, 0xde, 0x37, 0x73, 0x77, 0xcc, 0x86, 0x30, 0xc3, + 0xd1, 0x50, 0x04, 0x62, 0x53, 0xa0, 0xcd, 0x71, 0xc7, 0xcc, 0xe4, 0xfc, 0x1c, 0x38, 0xcd, 0x35, + 0xc9, 0x63, 0x64, 0xc6, 0x8a, 0x1f, 0xa3, 0x6a, 0x1c, 0x82, 0x23, 0xf5, 0x6b, 0x76, 0x4d, 0x73, + 0x95, 0xf7, 0xcc, 0xf9, 0xd9, 0x9e, 0x84, 0xe0, 0xe4, 0x0a, 0x8a, 0x2f, 0x22, 0x99, 0xf0, 0x57, + 0x68, 0x23, 0xe6, 0x94, 0x27, 0xb1, 0x54, 0xb9, 0x38, 0xf1, 0x4d, 0x9c, 0xb2, 0xce, 0xde, 0x52, + 0xac, 0x1b, 0xd9, 0x37, 0x51, 0x7c, 0xc6, 0xbf, 0x1a, 0xda, 0x5e, 0x1c, 0x01, 0xbf, 0x83, 0x1a, + 0x34, 0x71, 0x3d, 0x61, 0x9a, 0x4b, 0x89, 0x37, 0xd3, 0xa9, 0xde, 0x38, 0xb8, 0x0c, 0x92, 0x3c, + 0x8f, 0x3f, 0x46, 0x3b, 0xf0, 0x2c, 0xf4, 0x22, 0xd9, 0xfd, 0x09, 0x38, 0x01, 0x73, 0x63, 0x79, + 0xd6, 0x15, 0xfb, 0x4e, 0x3a, 0xd5, 0x77, 0x7a, 0x8b, 0x49, 0xb2, 0x8c, 0xc7, 0x0c, 0x6d, 0x0d, + 0x0a, 0x96, 0x55, 0x8b, 0x76, 0x57, 0x2f, 0x7a, 0x95, 0xcd, 0x6d, 0x9c, 0x4e, 0xf5, 0xad, 0x62, + 0x86, 0x2c, 0xb0, 0x1b, 0xbf, 0x69, 0x08, 0x2f, 0xab, 0x84, 0xef, 0xa2, 0x1a, 0x17, 0x51, 0xf5, + 0x17, 0xd9, 0x54, 0xa2, 0xd5, 0x32, 0x68, 0x96, 0xc3, 0x13, 0x74, 0x3b, 0x5f, 0xe0, 0xa9, 0xe7, + 0x43, 0xcc, 0xa9, 0x1f, 0xaa, 0xd3, 0x7e, 0x7b, 0x3d, 0x2f, 0x89, 0x32, 0xfb, 0x35, 0x45, 0x7f, + 0xbb, 0xb7, 0x4c, 0x47, 0xae, 0xea, 0x61, 0xfc, 0x5c, 0x46, 0x4d, 0x35, 0xf6, 0xd8, 0x83, 0x1f, + 0x5e, 0x82, 0x97, 0x1f, 0x15, 0xbc, 0x7c, 0x6f, 0x2d, 0xdf, 0x89, 0xd1, 0xae, 0xb5, 0xf2, 0x97, + 0x0b, 0x56, 0xb6, 0xd6, 0xa7, 0x5c, 0xed, 0x64, 0x07, 0xbd, 0xb2, 0xd0, 0x7f, 0xbd, 0xe3, 0x2c, + 0x98, 0xbd, 0xbc, 0xda, 0xec, 0xc6, 0x3f, 0x1a, 0xda, 0x59, 0x1a, 0x09, 0x7f, 0x88, 0x36, 0xe7, + 0x26, 0x87, 0xec, 0x86, 0xad, 0xdb, 0x77, 0x54, 0xbf, 0xcd, 0x83, 0xf9, 0x24, 0x29, 0x62, 0xf1, + 0xa7, 0xa8, 0x9a, 0xc4, 0x10, 0x29, 0x85, 0xdf, 0x5a, 0x2d, 0xc7, 0x51, 0x0c, 0x51, 0x9f, 0x9d, + 0x04, 0xb9, 0xb4, 0x22, 0x42, 0x24, 0x43, 0x71, 0x93, 0xea, 0x0d, 0x7f, 0xdb, 0xbb, 0xa8, 0x06, + 0x51, 0x14, 0x44, 0xea, 0xde, 0x9e, 0x69, 0xd3, 0x13, 0x41, 0x92, 0xe5, 0x8c, 0xdf, 0xcb, 0xa8, + 0x7e, 0xd9, 0x12, 0xbf, 0x8b, 0xea, 0xa2, 0x8d, 0xbc, 0xec, 0x33, 0x41, 0xb7, 0x55, 0x91, 0xc4, + 0x88, 0x38, 0x99, 0x21, 0xf0, 0x1b, 0xa8, 0x92, 0x78, 0xae, 0x7a, 0x43, 0x9a, 0x73, 0x97, 0x3e, + 0x11, 0x71, 0x6c, 0xa0, 0x8d, 0x61, 0x14, 0x24, 0xa1, 0xb0, 0x81, 0x18, 0x14, 0x89, 0x13, 0xfd, + 0x44, 0x46, 0x88, 0xca, 0xe0, 0x63, 0x54, 0x03, 0x71, 0xe7, 0xcb, 0x5d, 0x9a, 0xdd, 0xce, 0x7a, + 0xd2, 0x98, 0xf2, 0x9d, 0xe8, 0x31, 0x1e, 0x4d, 0xe6, 0xb6, 0x12, 0x31, 0x92, 0xd1, 0xed, 0x0e, + 0xd4, 0x5b, 0x22, 0x31, 0x78, 0x1b, 0x55, 0x46, 0x30, 0xc9, 0x36, 0x22, 0xe2, 0x27, 0xfe, 0x08, + 0xd5, 0xc6, 0xe2, 0x99, 0x51, 0x47, 0xb2, 0xbf, 0xba, 0x6f, 0xfe, 0x2c, 0x91, 0xac, 0xec, 0x61, + 0xf9, 0x81, 0x66, 0xef, 0x9f, 0x5d, 0xb4, 0x4b, 0x2f, 0x2e, 0xda, 0xa5, 0xf3, 0x8b, 0x76, 0xe9, + 0x79, 0xda, 0xd6, 0xce, 0xd2, 0xb6, 0xf6, 0x22, 0x6d, 0x6b, 0xe7, 0x69, 0x5b, 0xfb, 0x2b, 0x6d, + 0x6b, 0x3f, 0xfe, 0xdd, 0x2e, 0x7d, 0x5d, 0x1e, 0x77, 0xfe, 0x0f, 0x00, 0x00, 0xff, 0xff, 0x8c, + 0x44, 0x87, 0xd0, 0xe2, 0x08, 0x00, 0x00, +} + func (m *BoundObjectReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -123,33 +387,42 @@ func (m *BoundObjectReference) Marshal() (dAtA []byte, err error) { } func (m *BoundObjectReference) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BoundObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x22 - i++ + i -= len(m.UID) + copy(dAtA[i:], m.UID) i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - return i, nil + i-- + dAtA[i] = 0x22 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0x12 + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m ExtraValue) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -157,32 +430,31 @@ func (m ExtraValue) Marshal() (dAtA []byte, err error) { } func (m ExtraValue) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m ExtraValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m) > 0 { - for _, s := range m { + for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m[iNdEx]) + copy(dAtA[i:], m[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + return len(dAtA) - i, nil } func (m *TokenRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -190,41 +462,52 @@ func (m *TokenRequest) Marshal() (dAtA []byte, err error) { } func (m *TokenRequest) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *TokenRequestSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -232,47 +515,48 @@ func (m *TokenRequestSpec) Marshal() (dAtA []byte, err error) { } func (m *TokenRequestSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenRequestSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Audiences) > 0 { - for _, s := range m.Audiences { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } + if m.ExpirationSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ExpirationSeconds)) + i-- + dAtA[i] = 0x20 } if m.BoundObjectRef != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.BoundObjectRef.Size())) - n4, err := m.BoundObjectRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.BoundObjectRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 + i-- + dAtA[i] = 0x1a } - if m.ExpirationSeconds != nil { - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ExpirationSeconds)) + if len(m.Audiences) > 0 { + for iNdEx := len(m.Audiences) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Audiences[iNdEx]) + copy(dAtA[i:], m.Audiences[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audiences[iNdEx]))) + i-- + dAtA[i] = 0xa + } } - return i, nil + return len(dAtA) - i, nil } func (m *TokenRequestStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -280,29 +564,37 @@ func (m *TokenRequestStatus) Marshal() (dAtA []byte, err error) { } func (m *TokenRequestStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenRequestStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token))) - i += copy(dAtA[i:], m.Token) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ExpirationTimestamp.Size())) - n5, err := m.ExpirationTimestamp.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ExpirationTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Token) + copy(dAtA[i:], m.Token) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *TokenReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -310,41 +602,52 @@ func (m *TokenReview) Marshal() (dAtA []byte, err error) { } func (m *TokenReview) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n6, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n7, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n7 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n8, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *TokenReviewSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -352,36 +655,36 @@ func (m *TokenReviewSpec) Marshal() (dAtA []byte, err error) { } func (m *TokenReviewSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token))) - i += copy(dAtA[i:], m.Token) if len(m.Audiences) > 0 { - for _, s := range m.Audiences { + for iNdEx := len(m.Audiences) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Audiences[iNdEx]) + copy(dAtA[i:], m.Audiences[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audiences[iNdEx]))) + i-- dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + i -= len(m.Token) + copy(dAtA[i:], m.Token) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *TokenReviewStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -389,52 +692,54 @@ func (m *TokenReviewStatus) Marshal() (dAtA []byte, err error) { } func (m *TokenReviewStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ + if len(m.Audiences) > 0 { + for iNdEx := len(m.Audiences) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Audiences[iNdEx]) + copy(dAtA[i:], m.Audiences[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audiences[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) + i-- + dAtA[i] = 0x1a + { + size, err := m.User.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i-- if m.Authenticated { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.User.Size())) - n9, err := m.User.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) - i += copy(dAtA[i:], m.Error) - if len(m.Audiences) > 0 { - for _, s := range m.Audiences { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *UserInfo) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -442,77 +747,81 @@ func (m *UserInfo) Marshal() (dAtA []byte, err error) { } func (m *UserInfo) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UserInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Username))) - i += copy(dAtA[i:], m.Username) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - if len(m.Groups) > 0 { - for _, s := range m.Groups { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } if len(m.Extra) > 0 { keysForExtra := make([]string, 0, len(m.Extra)) for k := range m.Extra { keysForExtra = append(keysForExtra, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) - for _, k := range keysForExtra { - dAtA[i] = 0x22 - i++ - v := m.Extra[string(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n10, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(keysForExtra) - 1; iNdEx >= 0; iNdEx-- { + v := m.Extra[string(keysForExtra[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n10 + i-- + dAtA[i] = 0x12 + i -= len(keysForExtra[iNdEx]) + copy(dAtA[i:], keysForExtra[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForExtra[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Groups) > 0 { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Groups[iNdEx]) + copy(dAtA[i:], m.Groups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx]))) + i-- + dAtA[i] = 0x1a } } - return i, nil + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x12 + i -= len(m.Username) + copy(dAtA[i:], m.Username) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Username))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *BoundObjectReference) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Kind) @@ -527,6 +836,9 @@ func (m *BoundObjectReference) Size() (n int) { } func (m ExtraValue) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m) > 0 { @@ -539,6 +851,9 @@ func (m ExtraValue) Size() (n int) { } func (m *TokenRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -551,6 +866,9 @@ func (m *TokenRequest) Size() (n int) { } func (m *TokenRequestSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Audiences) > 0 { @@ -570,6 +888,9 @@ func (m *TokenRequestSpec) Size() (n int) { } func (m *TokenRequestStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Token) @@ -580,6 +901,9 @@ func (m *TokenRequestStatus) Size() (n int) { } func (m *TokenReview) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -592,6 +916,9 @@ func (m *TokenReview) Size() (n int) { } func (m *TokenReviewSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Token) @@ -606,6 +933,9 @@ func (m *TokenReviewSpec) Size() (n int) { } func (m *TokenReviewStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 2 @@ -623,6 +953,9 @@ func (m *TokenReviewStatus) Size() (n int) { } func (m *UserInfo) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Username) @@ -648,14 +981,7 @@ func (m *UserInfo) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -678,7 +1004,7 @@ func (this *TokenRequest) String() string { return "nil" } s := strings.Join([]string{`&TokenRequest{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "TokenRequestSpec", "TokenRequestSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "TokenRequestStatus", "TokenRequestStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -691,7 +1017,7 @@ func (this *TokenRequestSpec) String() string { } s := strings.Join([]string{`&TokenRequestSpec{`, `Audiences:` + fmt.Sprintf("%v", this.Audiences) + `,`, - `BoundObjectRef:` + strings.Replace(fmt.Sprintf("%v", this.BoundObjectRef), "BoundObjectReference", "BoundObjectReference", 1) + `,`, + `BoundObjectRef:` + strings.Replace(this.BoundObjectRef.String(), "BoundObjectReference", "BoundObjectReference", 1) + `,`, `ExpirationSeconds:` + valueToStringGenerated(this.ExpirationSeconds) + `,`, `}`, }, "") @@ -703,7 +1029,7 @@ func (this *TokenRequestStatus) String() string { } s := strings.Join([]string{`&TokenRequestStatus{`, `Token:` + fmt.Sprintf("%v", this.Token) + `,`, - `ExpirationTimestamp:` + strings.Replace(strings.Replace(this.ExpirationTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `ExpirationTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ExpirationTimestamp), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -713,7 +1039,7 @@ func (this *TokenReview) String() string { return "nil" } s := strings.Join([]string{`&TokenReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "TokenReviewSpec", "TokenReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "TokenReviewStatus", "TokenReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -790,7 +1116,7 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -818,7 +1144,7 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -828,6 +1154,9 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -847,7 +1176,7 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -857,6 +1186,9 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -876,7 +1208,7 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -886,6 +1218,9 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -905,7 +1240,7 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -915,6 +1250,9 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -929,6 +1267,9 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -956,7 +1297,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -984,7 +1325,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -994,6 +1335,9 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1008,6 +1352,9 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1035,7 +1382,7 @@ func (m *TokenRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1063,7 +1410,7 @@ func (m *TokenRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1072,6 +1419,9 @@ func (m *TokenRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1093,7 +1443,7 @@ func (m *TokenRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1102,6 +1452,9 @@ func (m *TokenRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1123,7 +1476,7 @@ func (m *TokenRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1132,6 +1485,9 @@ func (m *TokenRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1148,6 +1504,9 @@ func (m *TokenRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1175,7 +1534,7 @@ func (m *TokenRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1203,7 +1562,7 @@ func (m *TokenRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1213,6 +1572,9 @@ func (m *TokenRequestSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1232,7 +1594,7 @@ func (m *TokenRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1241,6 +1603,9 @@ func (m *TokenRequestSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1265,7 +1630,7 @@ func (m *TokenRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -1280,6 +1645,9 @@ func (m *TokenRequestSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1307,7 +1675,7 @@ func (m *TokenRequestStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1335,7 +1703,7 @@ func (m *TokenRequestStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1345,6 +1713,9 @@ func (m *TokenRequestStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1364,7 +1735,7 @@ func (m *TokenRequestStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1373,6 +1744,9 @@ func (m *TokenRequestStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1389,6 +1763,9 @@ func (m *TokenRequestStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1416,7 +1793,7 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1444,7 +1821,7 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1453,6 +1830,9 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1474,7 +1854,7 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1483,6 +1863,9 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1504,7 +1887,7 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1513,6 +1896,9 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1529,6 +1915,9 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1556,7 +1945,7 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1584,7 +1973,7 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1594,6 +1983,9 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1613,7 +2005,7 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1623,6 +2015,9 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1637,6 +2032,9 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1664,7 +2062,7 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1692,7 +2090,7 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1712,7 +2110,7 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1721,6 +2119,9 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1742,7 +2143,7 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1752,6 +2153,9 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1771,7 +2175,7 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1781,6 +2185,9 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1795,6 +2202,9 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1822,7 +2232,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1850,7 +2260,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1860,6 +2270,9 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1879,7 +2292,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1889,6 +2302,9 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1908,7 +2324,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1918,6 +2334,9 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1937,7 +2356,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1946,6 +2365,9 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1966,7 +2388,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1983,7 +2405,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1993,6 +2415,9 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -2009,7 +2434,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2018,7 +2443,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { @@ -2055,6 +2480,9 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2121,10 +2549,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -2153,6 +2584,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -2171,68 +2605,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 900 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcf, 0x6f, 0xe3, 0x44, - 0x14, 0x8e, 0xf3, 0xa3, 0x4a, 0x26, 0xdb, 0xd2, 0xce, 0xb2, 0x52, 0x54, 0xc0, 0x2e, 0x41, 0x42, - 0x15, 0xb0, 0xf6, 0x26, 0x42, 0xb0, 0x5a, 0x24, 0xa4, 0x9a, 0x46, 0x10, 0x21, 0xd8, 0xd5, 0xec, - 0xb6, 0x20, 0x4e, 0x4c, 0xec, 0xd7, 0xc4, 0x04, 0x8f, 0x8d, 0x3d, 0x0e, 0x9b, 0xdb, 0xfe, 0x09, - 0x1c, 0x41, 0xe2, 0xc0, 0x1f, 0x81, 0xc4, 0xbf, 0xd0, 0xe3, 0x8a, 0xd3, 0x1e, 0x50, 0x44, 0xcd, - 0x95, 0x23, 0x27, 0x4e, 0x68, 0xc6, 0xd3, 0x38, 0x4e, 0xda, 0x34, 0x27, 0x6e, 0x9e, 0xf7, 0xbe, - 0xf7, 0xbd, 0x37, 0xdf, 0x7c, 0x9e, 0x41, 0xbd, 0xf1, 0xfd, 0xd8, 0xf4, 0x02, 0x6b, 0x9c, 0x0c, - 0x20, 0x62, 0xc0, 0x21, 0xb6, 0x26, 0xc0, 0xdc, 0x20, 0xb2, 0x54, 0x82, 0x86, 0x9e, 0x45, 0x13, - 0x3e, 0x02, 0xc6, 0x3d, 0x87, 0x72, 0x2f, 0x60, 0xd6, 0xa4, 0x63, 0x0d, 0x81, 0x41, 0x44, 0x39, - 0xb8, 0x66, 0x18, 0x05, 0x3c, 0xc0, 0xaf, 0x66, 0x68, 0x93, 0x86, 0x9e, 0x59, 0x44, 0x9b, 0x93, - 0xce, 0xfe, 0xdd, 0xa1, 0xc7, 0x47, 0xc9, 0xc0, 0x74, 0x02, 0xdf, 0x1a, 0x06, 0xc3, 0xc0, 0x92, - 0x45, 0x83, 0xe4, 0x4c, 0xae, 0xe4, 0x42, 0x7e, 0x65, 0x64, 0xfb, 0xef, 0xe6, 0xad, 0x7d, 0xea, - 0x8c, 0x3c, 0x06, 0xd1, 0xd4, 0x0a, 0xc7, 0x43, 0x11, 0x88, 0x2d, 0x1f, 0x38, 0xbd, 0x62, 0x84, - 0x7d, 0xeb, 0xba, 0xaa, 0x28, 0x61, 0xdc, 0xf3, 0x61, 0xa5, 0xe0, 0xbd, 0x9b, 0x0a, 0x62, 0x67, - 0x04, 0x3e, 0x5d, 0xae, 0x6b, 0xff, 0xae, 0xa1, 0x97, 0xed, 0x20, 0x61, 0xee, 0xc3, 0xc1, 0x37, - 0xe0, 0x70, 0x02, 0x67, 0x10, 0x01, 0x73, 0x00, 0x1f, 0xa0, 0xea, 0xd8, 0x63, 0x6e, 0x4b, 0x3b, - 0xd0, 0x0e, 0x1b, 0xf6, 0xad, 0xf3, 0x99, 0x51, 0x4a, 0x67, 0x46, 0xf5, 0x53, 0x8f, 0xb9, 0x44, - 0x66, 0x70, 0x17, 0x21, 0xfa, 0xa8, 0x7f, 0x0a, 0x51, 0xec, 0x05, 0xac, 0x55, 0x96, 0x38, 0xac, - 0x70, 0xe8, 0x68, 0x9e, 0x21, 0x0b, 0x28, 0xc1, 0xca, 0xa8, 0x0f, 0xad, 0x4a, 0x91, 0xf5, 0x73, - 0xea, 0x03, 0x91, 0x19, 0x6c, 0xa3, 0x4a, 0xd2, 0x3f, 0x6e, 0x55, 0x25, 0xe0, 0x9e, 0x02, 0x54, - 0x4e, 0xfa, 0xc7, 0xff, 0xce, 0x8c, 0xd7, 0xaf, 0xdb, 0x24, 0x9f, 0x86, 0x10, 0x9b, 0x27, 0xfd, - 0x63, 0x22, 0x8a, 0xdb, 0xef, 0x23, 0xd4, 0x7b, 0xca, 0x23, 0x7a, 0x4a, 0xbf, 0x4d, 0x00, 0x1b, - 0xa8, 0xe6, 0x71, 0xf0, 0xe3, 0x96, 0x76, 0x50, 0x39, 0x6c, 0xd8, 0x8d, 0x74, 0x66, 0xd4, 0xfa, - 0x22, 0x40, 0xb2, 0xf8, 0x83, 0xfa, 0x8f, 0xbf, 0x18, 0xa5, 0x67, 0x7f, 0x1c, 0x94, 0xda, 0x3f, - 0x97, 0xd1, 0xad, 0x27, 0xc1, 0x18, 0x18, 0x81, 0xef, 0x12, 0x88, 0x39, 0xfe, 0x1a, 0xd5, 0xc5, - 0x11, 0xb9, 0x94, 0x53, 0xa9, 0x44, 0xb3, 0x7b, 0xcf, 0xcc, 0xdd, 0x31, 0x1f, 0xc2, 0x0c, 0xc7, - 0x43, 0x11, 0x88, 0x4d, 0x81, 0x36, 0x27, 0x1d, 0x33, 0x93, 0xf3, 0x33, 0xe0, 0x34, 0xd7, 0x24, - 0x8f, 0x91, 0x39, 0x2b, 0x7e, 0x84, 0xaa, 0x71, 0x08, 0x8e, 0xd4, 0xaf, 0xd9, 0x35, 0xcd, 0x75, - 0xde, 0x33, 0x17, 0x67, 0x7b, 0x1c, 0x82, 0x93, 0x2b, 0x28, 0x56, 0x44, 0x32, 0xe1, 0x2f, 0xd1, - 0x56, 0xcc, 0x29, 0x4f, 0x62, 0xa9, 0x72, 0x71, 0xe2, 0x9b, 0x38, 0x65, 0x9d, 0xbd, 0xa3, 0x58, - 0xb7, 0xb2, 0x35, 0x51, 0x7c, 0xed, 0x7f, 0x34, 0xb4, 0xbb, 0x3c, 0x02, 0x7e, 0x1b, 0x35, 0x68, - 0xe2, 0x7a, 0xc2, 0x34, 0x97, 0x12, 0x6f, 0xa7, 0x33, 0xa3, 0x71, 0x74, 0x19, 0x24, 0x79, 0x1e, - 0x33, 0xb4, 0x33, 0x28, 0xb8, 0x4d, 0xcd, 0xd8, 0x5d, 0x3f, 0xe3, 0x55, 0x0e, 0xb5, 0x71, 0x3a, - 0x33, 0x76, 0x8a, 0x19, 0xb2, 0xc4, 0x8e, 0x3f, 0x42, 0x7b, 0xf0, 0x34, 0xf4, 0x22, 0xc9, 0xf4, - 0x18, 0x9c, 0x80, 0xb9, 0xb1, 0xf4, 0x56, 0xc5, 0xbe, 0x93, 0xce, 0x8c, 0xbd, 0xde, 0x72, 0x92, - 0xac, 0xe2, 0xdb, 0xbf, 0x6a, 0x08, 0xaf, 0xaa, 0x84, 0xdf, 0x40, 0x35, 0x2e, 0xa2, 0xea, 0x17, - 0xd9, 0x56, 0xa2, 0xd5, 0x32, 0x68, 0x96, 0xc3, 0x53, 0x74, 0x3b, 0x27, 0x7c, 0xe2, 0xf9, 0x10, - 0x73, 0xea, 0x87, 0xea, 0xb4, 0xdf, 0xda, 0xcc, 0x4b, 0xa2, 0xcc, 0x7e, 0x45, 0xd1, 0xdf, 0xee, - 0xad, 0xd2, 0x91, 0xab, 0x7a, 0xb4, 0x7f, 0x2a, 0xa3, 0xa6, 0x1a, 0x7b, 0xe2, 0xc1, 0xf7, 0xff, - 0x83, 0x97, 0x1f, 0x16, 0xbc, 0x7c, 0x77, 0x23, 0xdf, 0x89, 0xd1, 0xae, 0xb5, 0xf2, 0x17, 0x4b, - 0x56, 0xb6, 0x36, 0xa7, 0x5c, 0xef, 0x64, 0x07, 0xbd, 0xb4, 0xd4, 0x7f, 0xb3, 0xe3, 0x2c, 0x98, - 0xbd, 0xbc, 0xde, 0xec, 0xed, 0xbf, 0x35, 0xb4, 0xb7, 0x32, 0x12, 0xfe, 0x00, 0x6d, 0x2f, 0x4c, - 0x0e, 0xd9, 0x0d, 0x5b, 0xb7, 0xef, 0xa8, 0x7e, 0xdb, 0x47, 0x8b, 0x49, 0x52, 0xc4, 0xe2, 0x4f, - 0x50, 0x35, 0x89, 0x21, 0x52, 0x0a, 0xbf, 0xb9, 0x5e, 0x8e, 0x93, 0x18, 0xa2, 0x3e, 0x3b, 0x0b, - 0x72, 0x69, 0x45, 0x84, 0x48, 0x06, 0xb1, 0x5d, 0x88, 0xa2, 0x20, 0x52, 0x57, 0xf1, 0x7c, 0xbb, - 0x3d, 0x11, 0x24, 0x59, 0xae, 0xb8, 0xdd, 0xea, 0x0d, 0xdb, 0xfd, 0xad, 0x8c, 0xea, 0x97, 0x2d, - 0xf1, 0x3b, 0xa8, 0x2e, 0xda, 0xc8, 0xcb, 0x3e, 0x13, 0x74, 0x57, 0x75, 0x90, 0x18, 0x11, 0x27, - 0x73, 0x04, 0x7e, 0x0d, 0x55, 0x12, 0xcf, 0x55, 0x6f, 0x48, 0x73, 0xe1, 0xd2, 0x27, 0x22, 0x8e, - 0xdb, 0x68, 0x6b, 0x18, 0x05, 0x49, 0x28, 0x6c, 0x20, 0x66, 0x40, 0xe2, 0x44, 0x3f, 0x96, 0x11, - 0xa2, 0x32, 0xf8, 0x14, 0xd5, 0x40, 0xdc, 0xf9, 0x72, 0xcc, 0x66, 0xb7, 0xb3, 0x99, 0x34, 0xa6, - 0x7c, 0x27, 0x7a, 0x8c, 0x47, 0xd3, 0x05, 0x09, 0x44, 0x8c, 0x64, 0x74, 0xfb, 0x03, 0xf5, 0x96, - 0x48, 0x0c, 0xde, 0x45, 0x95, 0x31, 0x4c, 0xb3, 0x1d, 0x11, 0xf1, 0x89, 0x3f, 0x44, 0xb5, 0x89, - 0x78, 0x66, 0xd4, 0x91, 0x1c, 0xae, 0xef, 0x9b, 0x3f, 0x4b, 0x24, 0x2b, 0x7b, 0x50, 0xbe, 0xaf, - 0xd9, 0x87, 0xe7, 0x17, 0x7a, 0xe9, 0xf9, 0x85, 0x5e, 0x7a, 0x71, 0xa1, 0x97, 0x9e, 0xa5, 0xba, - 0x76, 0x9e, 0xea, 0xda, 0xf3, 0x54, 0xd7, 0x5e, 0xa4, 0xba, 0xf6, 0x67, 0xaa, 0x6b, 0x3f, 0xfc, - 0xa5, 0x97, 0xbe, 0x2a, 0x4f, 0x3a, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x5f, 0x04, 0x81, 0x6f, - 0xe2, 0x08, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go b/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go index 245d278f57ec2..0721bda875372 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go @@ -17,35 +17,20 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1beta1/generated.proto -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1beta1/generated.proto - - It has these top-level messages: - ExtraValue - TokenReview - TokenReviewSpec - TokenReviewStatus - UserInfo -*/ package v1beta1 import ( fmt "fmt" - proto "github.com/gogo/protobuf/proto" - - math "math" + io "io" + proto "github.com/gogo/protobuf/proto" github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - strings "strings" - + math "math" + math_bits "math/bits" reflect "reflect" - - io "io" + strings "strings" ) // Reference imports to suppress errors if they are not otherwise used. @@ -59,25 +44,145 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *ExtraValue) Reset() { *m = ExtraValue{} } -func (*ExtraValue) ProtoMessage() {} -func (*ExtraValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *ExtraValue) Reset() { *m = ExtraValue{} } +func (*ExtraValue) ProtoMessage() {} +func (*ExtraValue) Descriptor() ([]byte, []int) { + return fileDescriptor_77c9b20d3ad27844, []int{0} +} +func (m *ExtraValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExtraValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExtraValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtraValue.Merge(m, src) +} +func (m *ExtraValue) XXX_Size() int { + return m.Size() +} +func (m *ExtraValue) XXX_DiscardUnknown() { + xxx_messageInfo_ExtraValue.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtraValue proto.InternalMessageInfo -func (m *TokenReview) Reset() { *m = TokenReview{} } -func (*TokenReview) ProtoMessage() {} -func (*TokenReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (m *TokenReview) Reset() { *m = TokenReview{} } +func (*TokenReview) ProtoMessage() {} +func (*TokenReview) Descriptor() ([]byte, []int) { + return fileDescriptor_77c9b20d3ad27844, []int{1} +} +func (m *TokenReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenReview.Merge(m, src) +} +func (m *TokenReview) XXX_Size() int { + return m.Size() +} +func (m *TokenReview) XXX_DiscardUnknown() { + xxx_messageInfo_TokenReview.DiscardUnknown(m) +} + +var xxx_messageInfo_TokenReview proto.InternalMessageInfo + +func (m *TokenReviewSpec) Reset() { *m = TokenReviewSpec{} } +func (*TokenReviewSpec) ProtoMessage() {} +func (*TokenReviewSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_77c9b20d3ad27844, []int{2} +} +func (m *TokenReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenReviewSpec.Merge(m, src) +} +func (m *TokenReviewSpec) XXX_Size() int { + return m.Size() +} +func (m *TokenReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_TokenReviewSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_TokenReviewSpec proto.InternalMessageInfo + +func (m *TokenReviewStatus) Reset() { *m = TokenReviewStatus{} } +func (*TokenReviewStatus) ProtoMessage() {} +func (*TokenReviewStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_77c9b20d3ad27844, []int{3} +} +func (m *TokenReviewStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenReviewStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenReviewStatus.Merge(m, src) +} +func (m *TokenReviewStatus) XXX_Size() int { + return m.Size() +} +func (m *TokenReviewStatus) XXX_DiscardUnknown() { + xxx_messageInfo_TokenReviewStatus.DiscardUnknown(m) +} -func (m *TokenReviewSpec) Reset() { *m = TokenReviewSpec{} } -func (*TokenReviewSpec) ProtoMessage() {} -func (*TokenReviewSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_TokenReviewStatus proto.InternalMessageInfo -func (m *TokenReviewStatus) Reset() { *m = TokenReviewStatus{} } -func (*TokenReviewStatus) ProtoMessage() {} -func (*TokenReviewStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (m *UserInfo) Reset() { *m = UserInfo{} } +func (*UserInfo) ProtoMessage() {} +func (*UserInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_77c9b20d3ad27844, []int{4} +} +func (m *UserInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UserInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *UserInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserInfo.Merge(m, src) +} +func (m *UserInfo) XXX_Size() int { + return m.Size() +} +func (m *UserInfo) XXX_DiscardUnknown() { + xxx_messageInfo_UserInfo.DiscardUnknown(m) +} -func (m *UserInfo) Reset() { *m = UserInfo{} } -func (*UserInfo) ProtoMessage() {} -func (*UserInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +var xxx_messageInfo_UserInfo proto.InternalMessageInfo func init() { proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.authentication.v1beta1.ExtraValue") @@ -85,11 +190,63 @@ func init() { proto.RegisterType((*TokenReviewSpec)(nil), "k8s.io.api.authentication.v1beta1.TokenReviewSpec") proto.RegisterType((*TokenReviewStatus)(nil), "k8s.io.api.authentication.v1beta1.TokenReviewStatus") proto.RegisterType((*UserInfo)(nil), "k8s.io.api.authentication.v1beta1.UserInfo") + proto.RegisterMapType((map[string]ExtraValue)(nil), "k8s.io.api.authentication.v1beta1.UserInfo.ExtraEntry") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1beta1/generated.proto", fileDescriptor_77c9b20d3ad27844) +} + +var fileDescriptor_77c9b20d3ad27844 = []byte{ + // 663 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0xcd, 0x6e, 0xd3, 0x40, + 0x10, 0xb6, 0xf3, 0x53, 0x92, 0x0d, 0x81, 0xb2, 0x12, 0x52, 0x14, 0x09, 0xa7, 0x84, 0x4b, 0xa5, + 0xd2, 0x35, 0xad, 0xaa, 0x52, 0x95, 0x53, 0x0d, 0x15, 0x2a, 0x52, 0x85, 0xb4, 0xb4, 0x1c, 0x80, + 0x03, 0x1b, 0x67, 0xea, 0x98, 0xe0, 0x1f, 0xad, 0xd7, 0x81, 0xde, 0xfa, 0x08, 0x1c, 0x39, 0x22, + 0xf1, 0x24, 0xdc, 0x7a, 0xec, 0xb1, 0x07, 0x14, 0x51, 0xf3, 0x04, 0xbc, 0x01, 0xda, 0xf5, 0xb6, + 0x4e, 0x1b, 0x41, 0xdb, 0x9b, 0xf7, 0x9b, 0xf9, 0xbe, 0x99, 0xf9, 0xc6, 0x83, 0x5e, 0x0c, 0xd7, + 0x12, 0xe2, 0x47, 0xf6, 0x30, 0xed, 0x01, 0x0f, 0x41, 0x40, 0x62, 0x8f, 0x20, 0xec, 0x47, 0xdc, + 0xd6, 0x01, 0x16, 0xfb, 0x36, 0x4b, 0xc5, 0x00, 0x42, 0xe1, 0xbb, 0x4c, 0xf8, 0x51, 0x68, 0x8f, + 0x96, 0x7a, 0x20, 0xd8, 0x92, 0xed, 0x41, 0x08, 0x9c, 0x09, 0xe8, 0x93, 0x98, 0x47, 0x22, 0xc2, + 0xf7, 0x73, 0x0a, 0x61, 0xb1, 0x4f, 0xce, 0x53, 0x88, 0xa6, 0xb4, 0x17, 0x3d, 0x5f, 0x0c, 0xd2, + 0x1e, 0x71, 0xa3, 0xc0, 0xf6, 0x22, 0x2f, 0xb2, 0x15, 0xb3, 0x97, 0xee, 0xa9, 0x97, 0x7a, 0xa8, + 0xaf, 0x5c, 0xb1, 0xbd, 0x52, 0x34, 0x11, 0x30, 0x77, 0xe0, 0x87, 0xc0, 0xf7, 0xed, 0x78, 0xe8, + 0x49, 0x20, 0xb1, 0x03, 0x10, 0xcc, 0x1e, 0x4d, 0xf5, 0xd1, 0xb6, 0xff, 0xc5, 0xe2, 0x69, 0x28, + 0xfc, 0x00, 0xa6, 0x08, 0xab, 0x97, 0x11, 0x12, 0x77, 0x00, 0x01, 0xbb, 0xc8, 0xeb, 0x3e, 0x46, + 0x68, 0xf3, 0xb3, 0xe0, 0xec, 0x35, 0xfb, 0x98, 0x02, 0xee, 0xa0, 0xaa, 0x2f, 0x20, 0x48, 0x5a, + 0xe6, 0x5c, 0x79, 0xbe, 0xee, 0xd4, 0xb3, 0x71, 0xa7, 0xba, 0x25, 0x01, 0x9a, 0xe3, 0xeb, 0xb5, + 0xaf, 0xdf, 0x3a, 0xc6, 0xc1, 0xcf, 0x39, 0xa3, 0xfb, 0xbd, 0x84, 0x1a, 0x3b, 0xd1, 0x10, 0x42, + 0x0a, 0x23, 0x1f, 0x3e, 0xe1, 0xf7, 0xa8, 0x26, 0x87, 0xe9, 0x33, 0xc1, 0x5a, 0xe6, 0x9c, 0x39, + 0xdf, 0x58, 0x7e, 0x44, 0x0a, 0x33, 0xcf, 0x7a, 0x22, 0xf1, 0xd0, 0x93, 0x40, 0x42, 0x64, 0x36, + 0x19, 0x2d, 0x91, 0x97, 0xbd, 0x0f, 0xe0, 0x8a, 0x6d, 0x10, 0xcc, 0xc1, 0x87, 0xe3, 0x8e, 0x91, + 0x8d, 0x3b, 0xa8, 0xc0, 0xe8, 0x99, 0x2a, 0xde, 0x41, 0x95, 0x24, 0x06, 0xb7, 0x55, 0x52, 0xea, + 0xcb, 0xe4, 0xd2, 0x55, 0x91, 0x89, 0xfe, 0x5e, 0xc5, 0xe0, 0x3a, 0x37, 0xb5, 0x7e, 0x45, 0xbe, + 0xa8, 0x52, 0xc3, 0xef, 0xd0, 0x4c, 0x22, 0x98, 0x48, 0x93, 0x56, 0x59, 0xe9, 0xae, 0x5c, 0x53, + 0x57, 0x71, 0x9d, 0x5b, 0x5a, 0x79, 0x26, 0x7f, 0x53, 0xad, 0xd9, 0x75, 0xd1, 0xed, 0x0b, 0x4d, + 0xe0, 0x07, 0xa8, 0x2a, 0x24, 0xa4, 0x5c, 0xaa, 0x3b, 0x4d, 0xcd, 0xac, 0xe6, 0x79, 0x79, 0x0c, + 0x2f, 0xa0, 0x3a, 0x4b, 0xfb, 0x3e, 0x84, 0x2e, 0x24, 0xad, 0x92, 0x5a, 0x46, 0x33, 0x1b, 0x77, + 0xea, 0x1b, 0xa7, 0x20, 0x2d, 0xe2, 0xdd, 0x3f, 0x26, 0xba, 0x33, 0xd5, 0x12, 0x7e, 0x82, 0x9a, + 0x13, 0xed, 0x43, 0x5f, 0xd5, 0xab, 0x39, 0x77, 0x75, 0xbd, 0xe6, 0xc6, 0x64, 0x90, 0x9e, 0xcf, + 0xc5, 0xdb, 0xa8, 0x92, 0x26, 0xc0, 0xb5, 0xd7, 0x0b, 0x57, 0xf0, 0x64, 0x37, 0x01, 0xbe, 0x15, + 0xee, 0x45, 0x85, 0xc9, 0x12, 0xa1, 0x4a, 0xe6, 0xfc, 0x38, 0x95, 0xff, 0x8f, 0x23, 0x0d, 0x02, + 0xce, 0x23, 0xae, 0x16, 0x32, 0x61, 0xd0, 0xa6, 0x04, 0x69, 0x1e, 0xeb, 0xfe, 0x28, 0xa1, 0xda, + 0x69, 0x49, 0xfc, 0x10, 0xd5, 0x64, 0x99, 0x90, 0x05, 0xa0, 0x5d, 0x9d, 0xd5, 0x24, 0x95, 0x23, + 0x71, 0x7a, 0x96, 0x81, 0xef, 0xa1, 0x72, 0xea, 0xf7, 0xd5, 0x68, 0x75, 0xa7, 0xa1, 0x13, 0xcb, + 0xbb, 0x5b, 0xcf, 0xa8, 0xc4, 0x71, 0x17, 0xcd, 0x78, 0x3c, 0x4a, 0x63, 0xf9, 0x43, 0xc8, 0x46, + 0x91, 0x5c, 0xeb, 0x73, 0x85, 0x50, 0x1d, 0xc1, 0x6f, 0x51, 0x15, 0xe4, 0xd5, 0xa8, 0x59, 0x1a, + 0xcb, 0xab, 0xd7, 0xf0, 0x87, 0xa8, 0x73, 0xdb, 0x0c, 0x05, 0xdf, 0x9f, 0x18, 0x4d, 0x62, 0x34, + 0xd7, 0x6c, 0x7b, 0xfa, 0x24, 0x55, 0x0e, 0x9e, 0x45, 0xe5, 0x21, 0xec, 0xe7, 0x63, 0x51, 0xf9, + 0x89, 0x9f, 0xa2, 0xea, 0x48, 0x5e, 0xab, 0x5e, 0xce, 0xe2, 0x15, 0x8a, 0x17, 0x27, 0x4e, 0x73, + 0xee, 0x7a, 0x69, 0xcd, 0x74, 0x16, 0x0f, 0x4f, 0x2c, 0xe3, 0xe8, 0xc4, 0x32, 0x8e, 0x4f, 0x2c, + 0xe3, 0x20, 0xb3, 0xcc, 0xc3, 0xcc, 0x32, 0x8f, 0x32, 0xcb, 0x3c, 0xce, 0x2c, 0xf3, 0x57, 0x66, + 0x99, 0x5f, 0x7e, 0x5b, 0xc6, 0x9b, 0x1b, 0x5a, 0xe4, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x66, + 0xbb, 0x89, 0x53, 0x68, 0x05, 0x00, 0x00, } + func (m ExtraValue) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -97,32 +254,31 @@ func (m ExtraValue) Marshal() (dAtA []byte, err error) { } func (m ExtraValue) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m ExtraValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m) > 0 { - for _, s := range m { + for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m[iNdEx]) + copy(dAtA[i:], m[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + return len(dAtA) - i, nil } func (m *TokenReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -130,41 +286,52 @@ func (m *TokenReview) Marshal() (dAtA []byte, err error) { } func (m *TokenReview) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *TokenReviewSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -172,36 +339,36 @@ func (m *TokenReviewSpec) Marshal() (dAtA []byte, err error) { } func (m *TokenReviewSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token))) - i += copy(dAtA[i:], m.Token) if len(m.Audiences) > 0 { - for _, s := range m.Audiences { + for iNdEx := len(m.Audiences) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Audiences[iNdEx]) + copy(dAtA[i:], m.Audiences[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audiences[iNdEx]))) + i-- dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + i -= len(m.Token) + copy(dAtA[i:], m.Token) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *TokenReviewStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -209,52 +376,54 @@ func (m *TokenReviewStatus) Marshal() (dAtA []byte, err error) { } func (m *TokenReviewStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ + if len(m.Audiences) > 0 { + for iNdEx := len(m.Audiences) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Audiences[iNdEx]) + copy(dAtA[i:], m.Audiences[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audiences[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) + i-- + dAtA[i] = 0x1a + { + size, err := m.User.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i-- if m.Authenticated { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.User.Size())) - n4, err := m.User.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) - i += copy(dAtA[i:], m.Error) - if len(m.Audiences) > 0 { - for _, s := range m.Audiences { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *UserInfo) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -262,77 +431,81 @@ func (m *UserInfo) Marshal() (dAtA []byte, err error) { } func (m *UserInfo) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UserInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Username))) - i += copy(dAtA[i:], m.Username) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - if len(m.Groups) > 0 { - for _, s := range m.Groups { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } if len(m.Extra) > 0 { keysForExtra := make([]string, 0, len(m.Extra)) for k := range m.Extra { keysForExtra = append(keysForExtra, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) - for _, k := range keysForExtra { - dAtA[i] = 0x22 - i++ - v := m.Extra[string(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n5, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(keysForExtra) - 1; iNdEx >= 0; iNdEx-- { + v := m.Extra[string(keysForExtra[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 + i-- + dAtA[i] = 0x12 + i -= len(keysForExtra[iNdEx]) + copy(dAtA[i:], keysForExtra[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForExtra[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 } } - return i, nil + if len(m.Groups) > 0 { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Groups[iNdEx]) + copy(dAtA[i:], m.Groups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x12 + i -= len(m.Username) + copy(dAtA[i:], m.Username) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Username))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m ExtraValue) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m) > 0 { @@ -345,6 +518,9 @@ func (m ExtraValue) Size() (n int) { } func (m *TokenReview) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -357,6 +533,9 @@ func (m *TokenReview) Size() (n int) { } func (m *TokenReviewSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Token) @@ -371,6 +550,9 @@ func (m *TokenReviewSpec) Size() (n int) { } func (m *TokenReviewStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 2 @@ -388,6 +570,9 @@ func (m *TokenReviewStatus) Size() (n int) { } func (m *UserInfo) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Username) @@ -413,14 +598,7 @@ func (m *UserInfo) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -430,7 +608,7 @@ func (this *TokenReview) String() string { return "nil" } s := strings.Join([]string{`&TokenReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "TokenReviewSpec", "TokenReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "TokenReviewStatus", "TokenReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -507,7 +685,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -535,7 +713,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -545,6 +723,9 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -559,6 +740,9 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -586,7 +770,7 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -614,7 +798,7 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -623,6 +807,9 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -644,7 +831,7 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -653,6 +840,9 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -674,7 +864,7 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -683,6 +873,9 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -699,6 +892,9 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -726,7 +922,7 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -754,7 +950,7 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -764,6 +960,9 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -783,7 +982,7 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -793,6 +992,9 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -807,6 +1009,9 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -834,7 +1039,7 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -862,7 +1067,7 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -882,7 +1087,7 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -891,6 +1096,9 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -912,7 +1120,7 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -922,6 +1130,9 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -941,7 +1152,7 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -951,6 +1162,9 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -965,6 +1179,9 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -992,7 +1209,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1020,7 +1237,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1030,6 +1247,9 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1049,7 +1269,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1059,6 +1279,9 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1078,7 +1301,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1088,6 +1311,9 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1107,7 +1333,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1116,6 +1342,9 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1136,7 +1365,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1153,7 +1382,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1163,6 +1392,9 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -1179,7 +1411,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1188,7 +1420,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { @@ -1225,6 +1457,9 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1291,10 +1526,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -1323,6 +1561,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -1341,53 +1582,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 663 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0xcd, 0x4e, 0x14, 0x4d, - 0x14, 0xed, 0x9e, 0x1f, 0xbe, 0x99, 0x9a, 0x6f, 0x14, 0x2b, 0x31, 0x99, 0x4c, 0x62, 0x0f, 0x8e, - 0x1b, 0x12, 0xa4, 0x5a, 0x08, 0x41, 0x82, 0x2b, 0x5a, 0x89, 0xc1, 0x84, 0x98, 0x94, 0xe0, 0x42, - 0x5d, 0x58, 0xd3, 0x73, 0xe9, 0x69, 0xc7, 0xfe, 0x49, 0x55, 0xf5, 0x28, 0x3b, 0x1e, 0xc1, 0xa5, - 0x4b, 0x13, 0x9f, 0xc4, 0x1d, 0x4b, 0x96, 0x2c, 0xcc, 0x44, 0xda, 0x27, 0xf0, 0x0d, 0x4c, 0x55, - 0x17, 0xcc, 0x00, 0x31, 0xc0, 0xae, 0xeb, 0xdc, 0x7b, 0xce, 0x3d, 0xf7, 0x54, 0x17, 0x7a, 0x31, - 0x5c, 0x13, 0x24, 0x4c, 0xdc, 0x61, 0xd6, 0x03, 0x1e, 0x83, 0x04, 0xe1, 0x8e, 0x20, 0xee, 0x27, - 0xdc, 0x35, 0x05, 0x96, 0x86, 0x2e, 0xcb, 0xe4, 0x00, 0x62, 0x19, 0xfa, 0x4c, 0x86, 0x49, 0xec, - 0x8e, 0x96, 0x7a, 0x20, 0xd9, 0x92, 0x1b, 0x40, 0x0c, 0x9c, 0x49, 0xe8, 0x93, 0x94, 0x27, 0x32, - 0xc1, 0xf7, 0x0b, 0x0a, 0x61, 0x69, 0x48, 0xce, 0x53, 0x88, 0xa1, 0xb4, 0x17, 0x83, 0x50, 0x0e, - 0xb2, 0x1e, 0xf1, 0x93, 0xc8, 0x0d, 0x92, 0x20, 0x71, 0x35, 0xb3, 0x97, 0xed, 0xe9, 0x93, 0x3e, - 0xe8, 0xaf, 0x42, 0xb1, 0xbd, 0x32, 0x31, 0x11, 0x31, 0x7f, 0x10, 0xc6, 0xc0, 0xf7, 0xdd, 0x74, - 0x18, 0x28, 0x40, 0xb8, 0x11, 0x48, 0xe6, 0x8e, 0x2e, 0xf9, 0x68, 0xbb, 0xff, 0x62, 0xf1, 0x2c, - 0x96, 0x61, 0x04, 0x97, 0x08, 0xab, 0x57, 0x11, 0x84, 0x3f, 0x80, 0x88, 0x5d, 0xe4, 0x75, 0x1f, - 0x23, 0xb4, 0xf9, 0x59, 0x72, 0xf6, 0x9a, 0x7d, 0xcc, 0x00, 0x77, 0x50, 0x35, 0x94, 0x10, 0x89, - 0x96, 0x3d, 0x57, 0x9e, 0xaf, 0x7b, 0xf5, 0x7c, 0xdc, 0xa9, 0x6e, 0x29, 0x80, 0x16, 0xf8, 0x7a, - 0xed, 0xeb, 0xb7, 0x8e, 0x75, 0xf0, 0x73, 0xce, 0xea, 0x7e, 0x2f, 0xa1, 0xc6, 0x4e, 0x32, 0x84, - 0x98, 0xc2, 0x28, 0x84, 0x4f, 0xf8, 0x3d, 0xaa, 0xa9, 0x65, 0xfa, 0x4c, 0xb2, 0x96, 0x3d, 0x67, - 0xcf, 0x37, 0x96, 0x1f, 0x91, 0x49, 0x98, 0x67, 0x9e, 0x48, 0x3a, 0x0c, 0x14, 0x20, 0x88, 0xea, - 0x26, 0xa3, 0x25, 0xf2, 0xb2, 0xf7, 0x01, 0x7c, 0xb9, 0x0d, 0x92, 0x79, 0xf8, 0x70, 0xdc, 0xb1, - 0xf2, 0x71, 0x07, 0x4d, 0x30, 0x7a, 0xa6, 0x8a, 0x77, 0x50, 0x45, 0xa4, 0xe0, 0xb7, 0x4a, 0x5a, - 0x7d, 0x99, 0x5c, 0x79, 0x55, 0x64, 0xca, 0xdf, 0xab, 0x14, 0x7c, 0xef, 0x7f, 0xa3, 0x5f, 0x51, - 0x27, 0xaa, 0xd5, 0xf0, 0x3b, 0x34, 0x23, 0x24, 0x93, 0x99, 0x68, 0x95, 0xb5, 0xee, 0xca, 0x0d, - 0x75, 0x35, 0xd7, 0xbb, 0x65, 0x94, 0x67, 0x8a, 0x33, 0x35, 0x9a, 0x5d, 0x1f, 0xdd, 0xbe, 0x60, - 0x02, 0x3f, 0x40, 0x55, 0xa9, 0x20, 0x9d, 0x52, 0xdd, 0x6b, 0x1a, 0x66, 0xb5, 0xe8, 0x2b, 0x6a, - 0x78, 0x01, 0xd5, 0x59, 0xd6, 0x0f, 0x21, 0xf6, 0x41, 0xb4, 0x4a, 0xfa, 0x32, 0x9a, 0xf9, 0xb8, - 0x53, 0xdf, 0x38, 0x05, 0xe9, 0xa4, 0xde, 0xfd, 0x63, 0xa3, 0x3b, 0x97, 0x2c, 0xe1, 0x27, 0xa8, - 0x39, 0x65, 0x1f, 0xfa, 0x7a, 0x5e, 0xcd, 0xbb, 0x6b, 0xe6, 0x35, 0x37, 0xa6, 0x8b, 0xf4, 0x7c, - 0x2f, 0xde, 0x46, 0x95, 0x4c, 0x00, 0x37, 0x59, 0x2f, 0x5c, 0x23, 0x93, 0x5d, 0x01, 0x7c, 0x2b, - 0xde, 0x4b, 0x26, 0x21, 0x2b, 0x84, 0x6a, 0x19, 0xb5, 0x33, 0x70, 0x9e, 0x70, 0x9d, 0xf1, 0xd4, - 0xce, 0x9b, 0x0a, 0xa4, 0x45, 0xed, 0xfc, 0xce, 0x95, 0x2b, 0x76, 0xfe, 0x51, 0x42, 0xb5, 0xd3, - 0x91, 0xf8, 0x21, 0xaa, 0xa9, 0x31, 0x31, 0x8b, 0xc0, 0xa4, 0x3a, 0x6b, 0x26, 0xe8, 0x1e, 0x85, - 0xd3, 0xb3, 0x0e, 0x7c, 0x0f, 0x95, 0xb3, 0xb0, 0xaf, 0x57, 0xab, 0x7b, 0x0d, 0xd3, 0x58, 0xde, - 0xdd, 0x7a, 0x46, 0x15, 0x8e, 0xbb, 0x68, 0x26, 0xe0, 0x49, 0x96, 0xaa, 0x1f, 0x42, 0x79, 0x40, - 0xea, 0x5a, 0x9f, 0x6b, 0x84, 0x9a, 0x0a, 0x7e, 0x8b, 0xaa, 0xa0, 0x5e, 0x8d, 0xb6, 0xd9, 0x58, - 0x5e, 0xbd, 0x41, 0x3e, 0x44, 0x3f, 0xb7, 0xcd, 0x58, 0xf2, 0xfd, 0xa9, 0x1c, 0x14, 0x46, 0x0b, - 0xcd, 0x76, 0x60, 0x9e, 0xa4, 0xee, 0xc1, 0xb3, 0xa8, 0x3c, 0x84, 0xfd, 0x62, 0x2d, 0xaa, 0x3e, - 0xf1, 0x53, 0x54, 0x1d, 0xa9, 0xd7, 0x6a, 0x2e, 0x67, 0xf1, 0x1a, 0xc3, 0x27, 0x4f, 0x9c, 0x16, - 0xdc, 0xf5, 0xd2, 0x9a, 0xed, 0x2d, 0x1e, 0x9e, 0x38, 0xd6, 0xd1, 0x89, 0x63, 0x1d, 0x9f, 0x38, - 0xd6, 0x41, 0xee, 0xd8, 0x87, 0xb9, 0x63, 0x1f, 0xe5, 0x8e, 0x7d, 0x9c, 0x3b, 0xf6, 0xaf, 0xdc, - 0xb1, 0xbf, 0xfc, 0x76, 0xac, 0x37, 0xff, 0x19, 0x91, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xf7, - 0xd6, 0x32, 0x28, 0x68, 0x05, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/authorization/v1/generated.pb.go b/vendor/k8s.io/api/authorization/v1/generated.pb.go index 03040aa5a5541..0dc01bc92d440 100644 --- a/vendor/k8s.io/api/authorization/v1/generated.pb.go +++ b/vendor/k8s.io/api/authorization/v1/generated.pb.go @@ -17,44 +17,20 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1/generated.proto -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1/generated.proto - - It has these top-level messages: - ExtraValue - LocalSubjectAccessReview - NonResourceAttributes - NonResourceRule - ResourceAttributes - ResourceRule - SelfSubjectAccessReview - SelfSubjectAccessReviewSpec - SelfSubjectRulesReview - SelfSubjectRulesReviewSpec - SubjectAccessReview - SubjectAccessReviewSpec - SubjectAccessReviewStatus - SubjectRulesReviewStatus -*/ package v1 import ( fmt "fmt" - proto "github.com/gogo/protobuf/proto" - - math "math" + io "io" + proto "github.com/gogo/protobuf/proto" github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - strings "strings" - + math "math" + math_bits "math/bits" reflect "reflect" - - io "io" + strings "strings" ) // Reference imports to suppress errors if they are not otherwise used. @@ -68,73 +44,397 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *ExtraValue) Reset() { *m = ExtraValue{} } -func (*ExtraValue) ProtoMessage() {} -func (*ExtraValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *ExtraValue) Reset() { *m = ExtraValue{} } +func (*ExtraValue) ProtoMessage() {} +func (*ExtraValue) Descriptor() ([]byte, []int) { + return fileDescriptor_e50da13573e369bd, []int{0} +} +func (m *ExtraValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExtraValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExtraValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtraValue.Merge(m, src) +} +func (m *ExtraValue) XXX_Size() int { + return m.Size() +} +func (m *ExtraValue) XXX_DiscardUnknown() { + xxx_messageInfo_ExtraValue.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtraValue proto.InternalMessageInfo func (m *LocalSubjectAccessReview) Reset() { *m = LocalSubjectAccessReview{} } func (*LocalSubjectAccessReview) ProtoMessage() {} func (*LocalSubjectAccessReview) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{1} + return fileDescriptor_e50da13573e369bd, []int{1} +} +func (m *LocalSubjectAccessReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LocalSubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LocalSubjectAccessReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_LocalSubjectAccessReview.Merge(m, src) +} +func (m *LocalSubjectAccessReview) XXX_Size() int { + return m.Size() +} +func (m *LocalSubjectAccessReview) XXX_DiscardUnknown() { + xxx_messageInfo_LocalSubjectAccessReview.DiscardUnknown(m) +} + +var xxx_messageInfo_LocalSubjectAccessReview proto.InternalMessageInfo + +func (m *NonResourceAttributes) Reset() { *m = NonResourceAttributes{} } +func (*NonResourceAttributes) ProtoMessage() {} +func (*NonResourceAttributes) Descriptor() ([]byte, []int) { + return fileDescriptor_e50da13573e369bd, []int{2} +} +func (m *NonResourceAttributes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NonResourceAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NonResourceAttributes) XXX_Merge(src proto.Message) { + xxx_messageInfo_NonResourceAttributes.Merge(m, src) +} +func (m *NonResourceAttributes) XXX_Size() int { + return m.Size() +} +func (m *NonResourceAttributes) XXX_DiscardUnknown() { + xxx_messageInfo_NonResourceAttributes.DiscardUnknown(m) +} + +var xxx_messageInfo_NonResourceAttributes proto.InternalMessageInfo + +func (m *NonResourceRule) Reset() { *m = NonResourceRule{} } +func (*NonResourceRule) ProtoMessage() {} +func (*NonResourceRule) Descriptor() ([]byte, []int) { + return fileDescriptor_e50da13573e369bd, []int{3} +} +func (m *NonResourceRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NonResourceRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NonResourceRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_NonResourceRule.Merge(m, src) +} +func (m *NonResourceRule) XXX_Size() int { + return m.Size() +} +func (m *NonResourceRule) XXX_DiscardUnknown() { + xxx_messageInfo_NonResourceRule.DiscardUnknown(m) } -func (m *NonResourceAttributes) Reset() { *m = NonResourceAttributes{} } -func (*NonResourceAttributes) ProtoMessage() {} -func (*NonResourceAttributes) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_NonResourceRule proto.InternalMessageInfo -func (m *NonResourceRule) Reset() { *m = NonResourceRule{} } -func (*NonResourceRule) ProtoMessage() {} -func (*NonResourceRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (m *ResourceAttributes) Reset() { *m = ResourceAttributes{} } +func (*ResourceAttributes) ProtoMessage() {} +func (*ResourceAttributes) Descriptor() ([]byte, []int) { + return fileDescriptor_e50da13573e369bd, []int{4} +} +func (m *ResourceAttributes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceAttributes) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceAttributes.Merge(m, src) +} +func (m *ResourceAttributes) XXX_Size() int { + return m.Size() +} +func (m *ResourceAttributes) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceAttributes.DiscardUnknown(m) +} -func (m *ResourceAttributes) Reset() { *m = ResourceAttributes{} } -func (*ResourceAttributes) ProtoMessage() {} -func (*ResourceAttributes) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +var xxx_messageInfo_ResourceAttributes proto.InternalMessageInfo -func (m *ResourceRule) Reset() { *m = ResourceRule{} } -func (*ResourceRule) ProtoMessage() {} -func (*ResourceRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +func (m *ResourceRule) Reset() { *m = ResourceRule{} } +func (*ResourceRule) ProtoMessage() {} +func (*ResourceRule) Descriptor() ([]byte, []int) { + return fileDescriptor_e50da13573e369bd, []int{5} +} +func (m *ResourceRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceRule.Merge(m, src) +} +func (m *ResourceRule) XXX_Size() int { + return m.Size() +} +func (m *ResourceRule) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceRule.DiscardUnknown(m) +} -func (m *SelfSubjectAccessReview) Reset() { *m = SelfSubjectAccessReview{} } -func (*SelfSubjectAccessReview) ProtoMessage() {} -func (*SelfSubjectAccessReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +var xxx_messageInfo_ResourceRule proto.InternalMessageInfo + +func (m *SelfSubjectAccessReview) Reset() { *m = SelfSubjectAccessReview{} } +func (*SelfSubjectAccessReview) ProtoMessage() {} +func (*SelfSubjectAccessReview) Descriptor() ([]byte, []int) { + return fileDescriptor_e50da13573e369bd, []int{6} +} +func (m *SelfSubjectAccessReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectAccessReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectAccessReview.Merge(m, src) +} +func (m *SelfSubjectAccessReview) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectAccessReview) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectAccessReview.DiscardUnknown(m) +} + +var xxx_messageInfo_SelfSubjectAccessReview proto.InternalMessageInfo func (m *SelfSubjectAccessReviewSpec) Reset() { *m = SelfSubjectAccessReviewSpec{} } func (*SelfSubjectAccessReviewSpec) ProtoMessage() {} func (*SelfSubjectAccessReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{7} + return fileDescriptor_e50da13573e369bd, []int{7} +} +func (m *SelfSubjectAccessReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectAccessReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectAccessReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectAccessReviewSpec.Merge(m, src) +} +func (m *SelfSubjectAccessReviewSpec) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectAccessReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectAccessReviewSpec.DiscardUnknown(m) } -func (m *SelfSubjectRulesReview) Reset() { *m = SelfSubjectRulesReview{} } -func (*SelfSubjectRulesReview) ProtoMessage() {} -func (*SelfSubjectRulesReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +var xxx_messageInfo_SelfSubjectAccessReviewSpec proto.InternalMessageInfo + +func (m *SelfSubjectRulesReview) Reset() { *m = SelfSubjectRulesReview{} } +func (*SelfSubjectRulesReview) ProtoMessage() {} +func (*SelfSubjectRulesReview) Descriptor() ([]byte, []int) { + return fileDescriptor_e50da13573e369bd, []int{8} +} +func (m *SelfSubjectRulesReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectRulesReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectRulesReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectRulesReview.Merge(m, src) +} +func (m *SelfSubjectRulesReview) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectRulesReview) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectRulesReview.DiscardUnknown(m) +} + +var xxx_messageInfo_SelfSubjectRulesReview proto.InternalMessageInfo func (m *SelfSubjectRulesReviewSpec) Reset() { *m = SelfSubjectRulesReviewSpec{} } func (*SelfSubjectRulesReviewSpec) ProtoMessage() {} func (*SelfSubjectRulesReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{9} + return fileDescriptor_e50da13573e369bd, []int{9} +} +func (m *SelfSubjectRulesReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectRulesReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectRulesReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectRulesReviewSpec.Merge(m, src) +} +func (m *SelfSubjectRulesReviewSpec) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectRulesReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectRulesReviewSpec.DiscardUnknown(m) } -func (m *SubjectAccessReview) Reset() { *m = SubjectAccessReview{} } -func (*SubjectAccessReview) ProtoMessage() {} -func (*SubjectAccessReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +var xxx_messageInfo_SelfSubjectRulesReviewSpec proto.InternalMessageInfo + +func (m *SubjectAccessReview) Reset() { *m = SubjectAccessReview{} } +func (*SubjectAccessReview) ProtoMessage() {} +func (*SubjectAccessReview) Descriptor() ([]byte, []int) { + return fileDescriptor_e50da13573e369bd, []int{10} +} +func (m *SubjectAccessReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubjectAccessReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubjectAccessReview.Merge(m, src) +} +func (m *SubjectAccessReview) XXX_Size() int { + return m.Size() +} +func (m *SubjectAccessReview) XXX_DiscardUnknown() { + xxx_messageInfo_SubjectAccessReview.DiscardUnknown(m) +} + +var xxx_messageInfo_SubjectAccessReview proto.InternalMessageInfo func (m *SubjectAccessReviewSpec) Reset() { *m = SubjectAccessReviewSpec{} } func (*SubjectAccessReviewSpec) ProtoMessage() {} func (*SubjectAccessReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{11} + return fileDescriptor_e50da13573e369bd, []int{11} +} +func (m *SubjectAccessReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubjectAccessReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubjectAccessReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubjectAccessReviewSpec.Merge(m, src) +} +func (m *SubjectAccessReviewSpec) XXX_Size() int { + return m.Size() } +func (m *SubjectAccessReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_SubjectAccessReviewSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_SubjectAccessReviewSpec proto.InternalMessageInfo func (m *SubjectAccessReviewStatus) Reset() { *m = SubjectAccessReviewStatus{} } func (*SubjectAccessReviewStatus) ProtoMessage() {} func (*SubjectAccessReviewStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{12} + return fileDescriptor_e50da13573e369bd, []int{12} +} +func (m *SubjectAccessReviewStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } +func (m *SubjectAccessReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubjectAccessReviewStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubjectAccessReviewStatus.Merge(m, src) +} +func (m *SubjectAccessReviewStatus) XXX_Size() int { + return m.Size() +} +func (m *SubjectAccessReviewStatus) XXX_DiscardUnknown() { + xxx_messageInfo_SubjectAccessReviewStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_SubjectAccessReviewStatus proto.InternalMessageInfo func (m *SubjectRulesReviewStatus) Reset() { *m = SubjectRulesReviewStatus{} } func (*SubjectRulesReviewStatus) ProtoMessage() {} func (*SubjectRulesReviewStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{13} + return fileDescriptor_e50da13573e369bd, []int{13} +} +func (m *SubjectRulesReviewStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } +func (m *SubjectRulesReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubjectRulesReviewStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubjectRulesReviewStatus.Merge(m, src) +} +func (m *SubjectRulesReviewStatus) XXX_Size() int { + return m.Size() +} +func (m *SubjectRulesReviewStatus) XXX_DiscardUnknown() { + xxx_messageInfo_SubjectRulesReviewStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_SubjectRulesReviewStatus proto.InternalMessageInfo func init() { proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.authorization.v1.ExtraValue") @@ -149,13 +449,95 @@ func init() { proto.RegisterType((*SelfSubjectRulesReviewSpec)(nil), "k8s.io.api.authorization.v1.SelfSubjectRulesReviewSpec") proto.RegisterType((*SubjectAccessReview)(nil), "k8s.io.api.authorization.v1.SubjectAccessReview") proto.RegisterType((*SubjectAccessReviewSpec)(nil), "k8s.io.api.authorization.v1.SubjectAccessReviewSpec") + proto.RegisterMapType((map[string]ExtraValue)(nil), "k8s.io.api.authorization.v1.SubjectAccessReviewSpec.ExtraEntry") proto.RegisterType((*SubjectAccessReviewStatus)(nil), "k8s.io.api.authorization.v1.SubjectAccessReviewStatus") proto.RegisterType((*SubjectRulesReviewStatus)(nil), "k8s.io.api.authorization.v1.SubjectRulesReviewStatus") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1/generated.proto", fileDescriptor_e50da13573e369bd) +} + +var fileDescriptor_e50da13573e369bd = []byte{ + // 1140 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0xcf, 0x6f, 0x1b, 0xc5, + 0x17, 0xf7, 0xae, 0xed, 0xc4, 0x1e, 0x37, 0xdf, 0xa4, 0x13, 0xa5, 0xd9, 0xa6, 0xfa, 0xda, 0xd1, + 0x22, 0x41, 0x2a, 0xca, 0x2e, 0xb1, 0xda, 0x26, 0xaa, 0x54, 0xa1, 0x58, 0x89, 0x50, 0xa4, 0xb6, + 0x54, 0x13, 0x25, 0x12, 0x45, 0x20, 0xc6, 0xeb, 0x89, 0xbd, 0xc4, 0xde, 0x5d, 0x66, 0x66, 0x1d, + 0xc2, 0xa9, 0x12, 0xff, 0x00, 0x47, 0x0e, 0x1c, 0xf8, 0x0f, 0xb8, 0x20, 0x71, 0xe3, 0xc0, 0x01, + 0xe5, 0xd8, 0x63, 0x91, 0x90, 0x45, 0x96, 0x33, 0xff, 0x03, 0x9a, 0xd9, 0xb1, 0x77, 0x9d, 0xac, + 0xdd, 0x84, 0x03, 0xbd, 0xf4, 0xb6, 0xfb, 0x3e, 0x9f, 0xf7, 0xe6, 0xcd, 0xfb, 0x35, 0x0f, 0x6c, + 0x1f, 0x6d, 0x32, 0xcb, 0xf5, 0xed, 0xa3, 0xb0, 0x49, 0xa8, 0x47, 0x38, 0x61, 0x76, 0x9f, 0x78, + 0x2d, 0x9f, 0xda, 0x0a, 0xc0, 0x81, 0x6b, 0xe3, 0x90, 0x77, 0x7c, 0xea, 0x7e, 0x8d, 0xb9, 0xeb, + 0x7b, 0x76, 0x7f, 0xdd, 0x6e, 0x13, 0x8f, 0x50, 0xcc, 0x49, 0xcb, 0x0a, 0xa8, 0xcf, 0x7d, 0x78, + 0x2b, 0x26, 0x5b, 0x38, 0x70, 0xad, 0x31, 0xb2, 0xd5, 0x5f, 0x5f, 0x79, 0xaf, 0xed, 0xf2, 0x4e, + 0xd8, 0xb4, 0x1c, 0xbf, 0x67, 0xb7, 0xfd, 0xb6, 0x6f, 0x4b, 0x9d, 0x66, 0x78, 0x28, 0xff, 0xe4, + 0x8f, 0xfc, 0x8a, 0x6d, 0xad, 0xdc, 0x4d, 0x0e, 0xee, 0x61, 0xa7, 0xe3, 0x7a, 0x84, 0x9e, 0xd8, + 0xc1, 0x51, 0x5b, 0x08, 0x98, 0xdd, 0x23, 0x1c, 0x67, 0x78, 0xb0, 0x62, 0x4f, 0xd2, 0xa2, 0xa1, + 0xc7, 0xdd, 0x1e, 0xb9, 0xa0, 0x70, 0xff, 0x55, 0x0a, 0xcc, 0xe9, 0x90, 0x1e, 0x3e, 0xaf, 0x67, + 0x6e, 0x00, 0xb0, 0xf3, 0x15, 0xa7, 0xf8, 0x00, 0x77, 0x43, 0x02, 0x6b, 0xa0, 0xe8, 0x72, 0xd2, + 0x63, 0x86, 0xb6, 0x9a, 0x5f, 0x2b, 0x37, 0xca, 0xd1, 0xa0, 0x56, 0xdc, 0x15, 0x02, 0x14, 0xcb, + 0x1f, 0x94, 0xbe, 0xfb, 0xa1, 0x96, 0x7b, 0xfe, 0xc7, 0x6a, 0xce, 0xfc, 0x49, 0x07, 0xc6, 0x23, + 0xdf, 0xc1, 0xdd, 0xbd, 0xb0, 0xf9, 0x05, 0x71, 0xf8, 0x96, 0xe3, 0x10, 0xc6, 0x10, 0xe9, 0xbb, + 0xe4, 0x18, 0x7e, 0x0e, 0x4a, 0xe2, 0x66, 0x2d, 0xcc, 0xb1, 0xa1, 0xad, 0x6a, 0x6b, 0x95, 0xfa, + 0xfb, 0x56, 0x12, 0xd3, 0x91, 0x83, 0x56, 0x70, 0xd4, 0x16, 0x02, 0x66, 0x09, 0xb6, 0xd5, 0x5f, + 0xb7, 0x3e, 0x92, 0xb6, 0x1e, 0x13, 0x8e, 0x1b, 0xf0, 0x74, 0x50, 0xcb, 0x45, 0x83, 0x1a, 0x48, + 0x64, 0x68, 0x64, 0x15, 0x1e, 0x80, 0x02, 0x0b, 0x88, 0x63, 0xe8, 0xd2, 0xfa, 0x5d, 0x6b, 0x4a, + 0xc6, 0xac, 0x0c, 0x0f, 0xf7, 0x02, 0xe2, 0x34, 0xae, 0xa9, 0x13, 0x0a, 0xe2, 0x0f, 0x49, 0x7b, + 0xf0, 0x33, 0x30, 0xc3, 0x38, 0xe6, 0x21, 0x33, 0xf2, 0xd2, 0xf2, 0xfd, 0x2b, 0x5b, 0x96, 0xda, + 0x8d, 0xff, 0x29, 0xdb, 0x33, 0xf1, 0x3f, 0x52, 0x56, 0xcd, 0x4f, 0xc0, 0xd2, 0x13, 0xdf, 0x43, + 0x84, 0xf9, 0x21, 0x75, 0xc8, 0x16, 0xe7, 0xd4, 0x6d, 0x86, 0x9c, 0x30, 0xb8, 0x0a, 0x0a, 0x01, + 0xe6, 0x1d, 0x19, 0xae, 0x72, 0xe2, 0xda, 0x53, 0xcc, 0x3b, 0x48, 0x22, 0x82, 0xd1, 0x27, 0xb4, + 0x29, 0xaf, 0x9c, 0x62, 0x1c, 0x10, 0xda, 0x44, 0x12, 0x31, 0xbf, 0x04, 0xf3, 0x29, 0xe3, 0x28, + 0xec, 0xca, 0x8c, 0x0a, 0x68, 0x2c, 0xa3, 0x42, 0x83, 0xa1, 0x58, 0x0e, 0x1f, 0x82, 0x79, 0x2f, + 0xd1, 0xd9, 0x47, 0x8f, 0x98, 0xa1, 0x4b, 0xea, 0x62, 0x34, 0xa8, 0xa5, 0xcd, 0x09, 0x08, 0x9d, + 0xe7, 0x9a, 0xbf, 0xe8, 0x00, 0x66, 0xdc, 0xc6, 0x06, 0x65, 0x0f, 0xf7, 0x08, 0x0b, 0xb0, 0x43, + 0xd4, 0x95, 0xae, 0x2b, 0x87, 0xcb, 0x4f, 0x86, 0x00, 0x4a, 0x38, 0xaf, 0xbe, 0x1c, 0x7c, 0x0b, + 0x14, 0xdb, 0xd4, 0x0f, 0x03, 0x99, 0x98, 0x72, 0x63, 0x4e, 0x51, 0x8a, 0x1f, 0x0a, 0x21, 0x8a, + 0x31, 0x78, 0x1b, 0xcc, 0xf6, 0x09, 0x65, 0xae, 0xef, 0x19, 0x05, 0x49, 0x9b, 0x57, 0xb4, 0xd9, + 0x83, 0x58, 0x8c, 0x86, 0x38, 0xbc, 0x03, 0x4a, 0x54, 0x39, 0x6e, 0x14, 0x25, 0x77, 0x41, 0x71, + 0x4b, 0xa3, 0x08, 0x8e, 0x18, 0xf0, 0x1e, 0xa8, 0xb0, 0xb0, 0x39, 0x52, 0x98, 0x91, 0x0a, 0x8b, + 0x4a, 0xa1, 0xb2, 0x97, 0x40, 0x28, 0xcd, 0x13, 0xd7, 0x12, 0x77, 0x34, 0x66, 0xc7, 0xaf, 0x25, + 0x42, 0x80, 0x24, 0x62, 0xfe, 0xaa, 0x81, 0x6b, 0x57, 0xcb, 0xd8, 0xbb, 0xa0, 0x8c, 0x03, 0x57, + 0x5e, 0x7b, 0x98, 0xab, 0x39, 0x11, 0xd7, 0xad, 0xa7, 0xbb, 0xb1, 0x10, 0x25, 0xb8, 0x20, 0x0f, + 0x9d, 0x11, 0x25, 0x3d, 0x22, 0x0f, 0x8f, 0x64, 0x28, 0xc1, 0xe1, 0x06, 0x98, 0x1b, 0xfe, 0xc8, + 0x24, 0x19, 0x05, 0xa9, 0x70, 0x3d, 0x1a, 0xd4, 0xe6, 0x50, 0x1a, 0x40, 0xe3, 0x3c, 0xf3, 0x67, + 0x1d, 0x2c, 0xef, 0x91, 0xee, 0xe1, 0xeb, 0x99, 0x05, 0xcf, 0xc6, 0x66, 0xc1, 0xe6, 0xf4, 0x8e, + 0xcd, 0xf6, 0xf2, 0xb5, 0xcd, 0x83, 0xef, 0x75, 0x70, 0x6b, 0x8a, 0x4f, 0xf0, 0x18, 0x40, 0x7a, + 0xa1, 0xbd, 0x54, 0x1c, 0xed, 0xa9, 0xbe, 0x5c, 0xec, 0xca, 0xc6, 0x8d, 0x68, 0x50, 0xcb, 0xe8, + 0x56, 0x94, 0x71, 0x04, 0xfc, 0x46, 0x03, 0x4b, 0x5e, 0xd6, 0xa4, 0x52, 0x61, 0xae, 0x4f, 0x3d, + 0x3c, 0x73, 0xc6, 0x35, 0x6e, 0x46, 0x83, 0x5a, 0xf6, 0xf8, 0x43, 0xd9, 0x67, 0x89, 0x57, 0xe6, + 0x46, 0x2a, 0x3c, 0xa2, 0x41, 0xfe, 0xbb, 0xba, 0xfa, 0x78, 0xac, 0xae, 0x36, 0x2e, 0x5b, 0x57, + 0x29, 0x27, 0x27, 0x96, 0xd5, 0xa7, 0xe7, 0xca, 0xea, 0xde, 0x65, 0xca, 0x2a, 0x6d, 0x78, 0x7a, + 0x55, 0x3d, 0x06, 0x2b, 0x93, 0x1d, 0xba, 0xf2, 0x70, 0x36, 0x7f, 0xd4, 0xc1, 0xe2, 0x9b, 0x67, + 0xfe, 0x2a, 0x6d, 0xfd, 0x5b, 0x01, 0x2c, 0xbf, 0x69, 0xe9, 0x49, 0x8b, 0x4e, 0xc8, 0x08, 0x55, + 0xcf, 0xf8, 0x28, 0x39, 0xfb, 0x8c, 0x50, 0x24, 0x11, 0x68, 0x82, 0x99, 0x76, 0xfc, 0xba, 0xc5, + 0xef, 0x0f, 0x10, 0x01, 0x56, 0x4f, 0x9b, 0x42, 0x60, 0x0b, 0x14, 0x89, 0xd8, 0x5b, 0x8d, 0xe2, + 0x6a, 0x7e, 0xad, 0x52, 0xff, 0xe0, 0xdf, 0x54, 0x86, 0x25, 0x37, 0xdf, 0x1d, 0x8f, 0xd3, 0x93, + 0x64, 0x9d, 0x90, 0x32, 0x14, 0x1b, 0x87, 0xff, 0x07, 0xf9, 0xd0, 0x6d, 0xa9, 0xd7, 0xbe, 0xa2, + 0x28, 0xf9, 0xfd, 0xdd, 0x6d, 0x24, 0xe4, 0x2b, 0x58, 0x2d, 0xcf, 0xd2, 0x04, 0x5c, 0x00, 0xf9, + 0x23, 0x72, 0x12, 0x37, 0x14, 0x12, 0x9f, 0xf0, 0x21, 0x28, 0xf6, 0xc5, 0x5e, 0xad, 0xe2, 0xfb, + 0xce, 0x54, 0x27, 0x93, 0x35, 0x1c, 0xc5, 0x5a, 0x0f, 0xf4, 0x4d, 0xcd, 0xfc, 0x5d, 0x03, 0x37, + 0x27, 0x96, 0x9f, 0x58, 0x77, 0x70, 0xb7, 0xeb, 0x1f, 0x93, 0x96, 0x3c, 0xb6, 0x94, 0xac, 0x3b, + 0x5b, 0xb1, 0x18, 0x0d, 0x71, 0xf8, 0x36, 0x98, 0x69, 0x11, 0xcf, 0x25, 0x2d, 0xb9, 0x18, 0x95, + 0x92, 0xca, 0xdd, 0x96, 0x52, 0xa4, 0x50, 0xc1, 0xa3, 0x04, 0x33, 0xdf, 0x53, 0xab, 0xd8, 0x88, + 0x87, 0xa4, 0x14, 0x29, 0x14, 0x6e, 0x81, 0x79, 0x22, 0xdc, 0x94, 0xfe, 0xef, 0x50, 0xea, 0x0f, + 0x33, 0xba, 0xac, 0x14, 0xe6, 0x77, 0xc6, 0x61, 0x74, 0x9e, 0x6f, 0xfe, 0xad, 0x03, 0x63, 0xd2, + 0x68, 0x83, 0x87, 0xc9, 0x2e, 0x22, 0x41, 0xb9, 0x0e, 0x55, 0xea, 0xb7, 0x2f, 0xd5, 0x20, 0x42, + 0xa3, 0xb1, 0xa4, 0x1c, 0x99, 0x4b, 0x4b, 0x53, 0xab, 0x8b, 0xfc, 0x85, 0x14, 0x2c, 0x78, 0xe3, + 0x3b, 0x73, 0xbc, 0x54, 0x55, 0xea, 0x77, 0x2e, 0xdb, 0x0e, 0xf2, 0x34, 0x43, 0x9d, 0xb6, 0x70, + 0x0e, 0x60, 0xe8, 0x82, 0x7d, 0x58, 0x07, 0xc0, 0xf5, 0x1c, 0xbf, 0x17, 0x74, 0x09, 0x27, 0x32, + 0x6c, 0xa5, 0x64, 0x0e, 0xee, 0x8e, 0x10, 0x94, 0x62, 0x65, 0xc5, 0xbb, 0x70, 0xb5, 0x78, 0x37, + 0xd6, 0x4e, 0xcf, 0xaa, 0xb9, 0x17, 0x67, 0xd5, 0xdc, 0xcb, 0xb3, 0x6a, 0xee, 0x79, 0x54, 0xd5, + 0x4e, 0xa3, 0xaa, 0xf6, 0x22, 0xaa, 0x6a, 0x2f, 0xa3, 0xaa, 0xf6, 0x67, 0x54, 0xd5, 0xbe, 0xfd, + 0xab, 0x9a, 0x7b, 0xa6, 0xf7, 0xd7, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0x99, 0x87, 0xb8, 0x24, + 0x47, 0x0f, 0x00, 0x00, +} + func (m ExtraValue) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -163,32 +545,31 @@ func (m ExtraValue) Marshal() (dAtA []byte, err error) { } func (m ExtraValue) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m ExtraValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m) > 0 { - for _, s := range m { + for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m[iNdEx]) + copy(dAtA[i:], m[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + return len(dAtA) - i, nil } func (m *LocalSubjectAccessReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -196,41 +577,52 @@ func (m *LocalSubjectAccessReview) Marshal() (dAtA []byte, err error) { } func (m *LocalSubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LocalSubjectAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *NonResourceAttributes) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -238,25 +630,32 @@ func (m *NonResourceAttributes) Marshal() (dAtA []byte, err error) { } func (m *NonResourceAttributes) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NonResourceAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - dAtA[i] = 0x12 - i++ + i -= len(m.Verb) + copy(dAtA[i:], m.Verb) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verb))) - i += copy(dAtA[i:], m.Verb) - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *NonResourceRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -264,47 +663,40 @@ func (m *NonResourceRule) Marshal() (dAtA []byte, err error) { } func (m *NonResourceRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NonResourceRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Verbs) > 0 { - for _, s := range m.Verbs { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } if len(m.NonResourceURLs) > 0 { - for _, s := range m.NonResourceURLs { + for iNdEx := len(m.NonResourceURLs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.NonResourceURLs[iNdEx]) + copy(dAtA[i:], m.NonResourceURLs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NonResourceURLs[iNdEx]))) + i-- dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } func (m *ResourceAttributes) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -312,45 +704,57 @@ func (m *ResourceAttributes) Marshal() (dAtA []byte, err error) { } func (m *ResourceAttributes) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verb))) - i += copy(dAtA[i:], m.Verb) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i += copy(dAtA[i:], m.Group) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) - i += copy(dAtA[i:], m.Version) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) - i += copy(dAtA[i:], m.Resource) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subresource))) - i += copy(dAtA[i:], m.Subresource) - dAtA[i] = 0x3a - i++ + i -= len(m.Name) + copy(dAtA[i:], m.Name) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - return i, nil + i-- + dAtA[i] = 0x3a + i -= len(m.Subresource) + copy(dAtA[i:], m.Subresource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subresource))) + i-- + dAtA[i] = 0x32 + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x2a + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x22 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0x1a + i -= len(m.Verb) + copy(dAtA[i:], m.Verb) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verb))) + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ResourceRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -358,77 +762,58 @@ func (m *ResourceRule) Marshal() (dAtA []byte, err error) { } func (m *ResourceRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Verbs) > 0 { - for _, s := range m.Verbs { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.APIGroups) > 0 { - for _, s := range m.APIGroups { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.ResourceNames) > 0 { + for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResourceNames[iNdEx]) + copy(dAtA[i:], m.ResourceNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx]))) + i-- + dAtA[i] = 0x22 } } if len(m.Resources) > 0 { - for _, s := range m.Resources { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Resources[iNdEx]) + copy(dAtA[i:], m.Resources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) + i-- dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - if len(m.ResourceNames) > 0 { - for _, s := range m.ResourceNames { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.APIGroups) > 0 { + for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIGroups[iNdEx]) + copy(dAtA[i:], m.APIGroups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) + i-- + dAtA[i] = 0x12 } } - return i, nil + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } func (m *SelfSubjectAccessReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -436,41 +821,52 @@ func (m *SelfSubjectAccessReview) Marshal() (dAtA []byte, err error) { } func (m *SelfSubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n5, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n6, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n6 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *SelfSubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -478,37 +874,46 @@ func (m *SelfSubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { } func (m *SelfSubjectAccessReviewSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectAccessReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.ResourceAttributes != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceAttributes.Size())) - n7, err := m.ResourceAttributes.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - } if m.NonResourceAttributes != nil { + { + size, err := m.NonResourceAttributes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NonResourceAttributes.Size())) - n8, err := m.NonResourceAttributes.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if m.ResourceAttributes != nil { + { + size, err := m.ResourceAttributes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *SelfSubjectRulesReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -516,41 +921,52 @@ func (m *SelfSubjectRulesReview) Marshal() (dAtA []byte, err error) { } func (m *SelfSubjectRulesReview) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectRulesReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n9, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n10, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n10 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n11, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n11 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *SelfSubjectRulesReviewSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -558,21 +974,27 @@ func (m *SelfSubjectRulesReviewSpec) Marshal() (dAtA []byte, err error) { } func (m *SelfSubjectRulesReviewSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectRulesReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *SubjectAccessReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -580,41 +1002,52 @@ func (m *SubjectAccessReview) Marshal() (dAtA []byte, err error) { } func (m *SubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubjectAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n12, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n12 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n13, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n13 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n14, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n14 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *SubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -622,91 +1055,94 @@ func (m *SubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { } func (m *SubjectAccessReviewSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubjectAccessReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.ResourceAttributes != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceAttributes.Size())) - n15, err := m.ResourceAttributes.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x32 + if len(m.Extra) > 0 { + keysForExtra := make([]string, 0, len(m.Extra)) + for k := range m.Extra { + keysForExtra = append(keysForExtra, string(k)) } - i += n15 - } - if m.NonResourceAttributes != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NonResourceAttributes.Size())) - n16, err := m.NonResourceAttributes.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) + for iNdEx := len(keysForExtra) - 1; iNdEx >= 0; iNdEx-- { + v := m.Extra[string(keysForExtra[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForExtra[iNdEx]) + copy(dAtA[i:], keysForExtra[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForExtra[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a } - i += n16 } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) - i += copy(dAtA[i:], m.User) if len(m.Groups) > 0 { - for _, s := range m.Groups { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Groups[iNdEx]) + copy(dAtA[i:], m.Groups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx]))) + i-- dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - if len(m.Extra) > 0 { - keysForExtra := make([]string, 0, len(m.Extra)) - for k := range m.Extra { - keysForExtra = append(keysForExtra, string(k)) + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0x1a + if m.NonResourceAttributes != nil { + { + size, err := m.NonResourceAttributes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) - for _, k := range keysForExtra { - dAtA[i] = 0x2a - i++ - v := m.Extra[string(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n17, err := (&v).MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x12 + } + if m.ResourceAttributes != nil { + { + size, err := m.ResourceAttributes.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n17 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - return i, nil + return len(dAtA) - i, nil } func (m *SubjectAccessReviewStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -714,41 +1150,48 @@ func (m *SubjectAccessReviewStatus) Marshal() (dAtA []byte, err error) { } func (m *SubjectAccessReviewStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubjectAccessReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - if m.Allowed { + i-- + if m.Denied { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) - i += copy(dAtA[i:], m.EvaluationError) + i-- dAtA[i] = 0x20 - i++ - if m.Denied { + i -= len(m.EvaluationError) + copy(dAtA[i:], m.EvaluationError) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) + i-- + dAtA[i] = 0x1a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x12 + i-- + if m.Allowed { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *SubjectRulesReviewStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -756,59 +1199,74 @@ func (m *SubjectRulesReviewStatus) Marshal() (dAtA []byte, err error) { } func (m *SubjectRulesReviewStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubjectRulesReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.ResourceRules) > 0 { - for _, msg := range m.ResourceRules { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + i -= len(m.EvaluationError) + copy(dAtA[i:], m.EvaluationError) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) + i-- + dAtA[i] = 0x22 + i-- + if m.Incomplete { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x18 if len(m.NonResourceRules) > 0 { - for _, msg := range m.NonResourceRules { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.NonResourceRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.NonResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - dAtA[i] = 0x18 - i++ - if m.Incomplete { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if len(m.ResourceRules) > 0 { + for iNdEx := len(m.ResourceRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } } - i++ - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) - i += copy(dAtA[i:], m.EvaluationError) - return i, nil + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m ExtraValue) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m) > 0 { @@ -821,6 +1279,9 @@ func (m ExtraValue) Size() (n int) { } func (m *LocalSubjectAccessReview) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -833,6 +1294,9 @@ func (m *LocalSubjectAccessReview) Size() (n int) { } func (m *NonResourceAttributes) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Path) @@ -843,6 +1307,9 @@ func (m *NonResourceAttributes) Size() (n int) { } func (m *NonResourceRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Verbs) > 0 { @@ -861,6 +1328,9 @@ func (m *NonResourceRule) Size() (n int) { } func (m *ResourceAttributes) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Namespace) @@ -881,6 +1351,9 @@ func (m *ResourceAttributes) Size() (n int) { } func (m *ResourceRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Verbs) > 0 { @@ -911,6 +1384,9 @@ func (m *ResourceRule) Size() (n int) { } func (m *SelfSubjectAccessReview) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -923,6 +1399,9 @@ func (m *SelfSubjectAccessReview) Size() (n int) { } func (m *SelfSubjectAccessReviewSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.ResourceAttributes != nil { @@ -937,6 +1416,9 @@ func (m *SelfSubjectAccessReviewSpec) Size() (n int) { } func (m *SelfSubjectRulesReview) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -949,6 +1431,9 @@ func (m *SelfSubjectRulesReview) Size() (n int) { } func (m *SelfSubjectRulesReviewSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Namespace) @@ -957,6 +1442,9 @@ func (m *SelfSubjectRulesReviewSpec) Size() (n int) { } func (m *SubjectAccessReview) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -969,6 +1457,9 @@ func (m *SubjectAccessReview) Size() (n int) { } func (m *SubjectAccessReviewSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.ResourceAttributes != nil { @@ -1002,6 +1493,9 @@ func (m *SubjectAccessReviewSpec) Size() (n int) { } func (m *SubjectAccessReviewStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 2 @@ -1014,6 +1508,9 @@ func (m *SubjectAccessReviewStatus) Size() (n int) { } func (m *SubjectRulesReviewStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.ResourceRules) > 0 { @@ -1035,14 +1532,7 @@ func (m *SubjectRulesReviewStatus) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -1052,7 +1542,7 @@ func (this *LocalSubjectAccessReview) String() string { return "nil" } s := strings.Join([]string{`&LocalSubjectAccessReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SubjectAccessReviewSpec", "SubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1115,7 +1605,7 @@ func (this *SelfSubjectAccessReview) String() string { return "nil" } s := strings.Join([]string{`&SelfSubjectAccessReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SelfSubjectAccessReviewSpec", "SelfSubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1127,8 +1617,8 @@ func (this *SelfSubjectAccessReviewSpec) String() string { return "nil" } s := strings.Join([]string{`&SelfSubjectAccessReviewSpec{`, - `ResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.ResourceAttributes), "ResourceAttributes", "ResourceAttributes", 1) + `,`, - `NonResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.NonResourceAttributes), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, + `ResourceAttributes:` + strings.Replace(this.ResourceAttributes.String(), "ResourceAttributes", "ResourceAttributes", 1) + `,`, + `NonResourceAttributes:` + strings.Replace(this.NonResourceAttributes.String(), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, `}`, }, "") return s @@ -1138,7 +1628,7 @@ func (this *SelfSubjectRulesReview) String() string { return "nil" } s := strings.Join([]string{`&SelfSubjectRulesReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SelfSubjectRulesReviewSpec", "SelfSubjectRulesReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectRulesReviewStatus", "SubjectRulesReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1160,7 +1650,7 @@ func (this *SubjectAccessReview) String() string { return "nil" } s := strings.Join([]string{`&SubjectAccessReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SubjectAccessReviewSpec", "SubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1182,8 +1672,8 @@ func (this *SubjectAccessReviewSpec) String() string { } mapStringForExtra += "}" s := strings.Join([]string{`&SubjectAccessReviewSpec{`, - `ResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.ResourceAttributes), "ResourceAttributes", "ResourceAttributes", 1) + `,`, - `NonResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.NonResourceAttributes), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, + `ResourceAttributes:` + strings.Replace(this.ResourceAttributes.String(), "ResourceAttributes", "ResourceAttributes", 1) + `,`, + `NonResourceAttributes:` + strings.Replace(this.NonResourceAttributes.String(), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, `User:` + fmt.Sprintf("%v", this.User) + `,`, `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, `Extra:` + mapStringForExtra + `,`, @@ -1209,9 +1699,19 @@ func (this *SubjectRulesReviewStatus) String() string { if this == nil { return "nil" } + repeatedStringForResourceRules := "[]ResourceRule{" + for _, f := range this.ResourceRules { + repeatedStringForResourceRules += strings.Replace(strings.Replace(f.String(), "ResourceRule", "ResourceRule", 1), `&`, ``, 1) + "," + } + repeatedStringForResourceRules += "}" + repeatedStringForNonResourceRules := "[]NonResourceRule{" + for _, f := range this.NonResourceRules { + repeatedStringForNonResourceRules += strings.Replace(strings.Replace(f.String(), "NonResourceRule", "NonResourceRule", 1), `&`, ``, 1) + "," + } + repeatedStringForNonResourceRules += "}" s := strings.Join([]string{`&SubjectRulesReviewStatus{`, - `ResourceRules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ResourceRules), "ResourceRule", "ResourceRule", 1), `&`, ``, 1) + `,`, - `NonResourceRules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.NonResourceRules), "NonResourceRule", "NonResourceRule", 1), `&`, ``, 1) + `,`, + `ResourceRules:` + repeatedStringForResourceRules + `,`, + `NonResourceRules:` + repeatedStringForNonResourceRules + `,`, `Incomplete:` + fmt.Sprintf("%v", this.Incomplete) + `,`, `EvaluationError:` + fmt.Sprintf("%v", this.EvaluationError) + `,`, `}`, @@ -1241,7 +1741,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1269,7 +1769,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1279,6 +1779,9 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1293,6 +1796,9 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1320,7 +1826,7 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1348,7 +1854,7 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1357,6 +1863,9 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1378,7 +1887,7 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1387,6 +1896,9 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1408,7 +1920,7 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1417,6 +1929,9 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1433,6 +1948,9 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1460,7 +1978,7 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1488,7 +2006,7 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1498,6 +2016,9 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1517,7 +2038,7 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1527,6 +2048,9 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1541,6 +2065,9 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1568,7 +2095,7 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1596,7 +2123,7 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1606,6 +2133,9 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1625,7 +2155,7 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1635,6 +2165,9 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1649,6 +2182,9 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1676,7 +2212,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1704,7 +2240,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1714,6 +2250,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1733,7 +2272,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1743,6 +2282,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1762,7 +2304,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1772,6 +2314,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1791,7 +2336,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1801,6 +2346,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1820,7 +2368,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1830,6 +2378,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1849,7 +2400,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1859,6 +2410,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1878,7 +2432,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1888,6 +2442,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1902,6 +2459,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1929,7 +2489,7 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1957,7 +2517,7 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1967,6 +2527,9 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1986,7 +2549,7 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1996,6 +2559,9 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2015,7 +2581,7 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2025,6 +2591,9 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2044,7 +2613,7 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2054,6 +2623,9 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2068,6 +2640,9 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2095,7 +2670,7 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2123,7 +2698,7 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2132,6 +2707,9 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2153,7 +2731,7 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2162,6 +2740,9 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2183,7 +2764,7 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2192,6 +2773,9 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2208,6 +2792,9 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2235,7 +2822,7 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2263,7 +2850,7 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2272,6 +2859,9 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2296,7 +2886,7 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2305,6 +2895,9 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2324,6 +2917,9 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2351,7 +2947,7 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2379,7 +2975,7 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2388,6 +2984,9 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2409,7 +3008,7 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2418,6 +3017,9 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2439,7 +3041,7 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2448,6 +3050,9 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2464,6 +3069,9 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2491,7 +3099,7 @@ func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2519,7 +3127,7 @@ func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2529,6 +3137,9 @@ func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2543,6 +3154,9 @@ func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2570,7 +3184,7 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2598,7 +3212,7 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2607,6 +3221,9 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2628,7 +3245,7 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2637,6 +3254,9 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2658,7 +3278,7 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2667,6 +3287,9 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2683,6 +3306,9 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2710,7 +3336,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2738,7 +3364,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2747,6 +3373,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2771,7 +3400,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2780,6 +3409,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2804,7 +3436,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2814,6 +3446,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2833,7 +3468,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2843,6 +3478,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2862,7 +3500,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2871,6 +3509,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2891,7 +3532,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2908,7 +3549,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2918,6 +3559,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -2934,7 +3578,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2943,7 +3587,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { @@ -2985,7 +3629,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2995,6 +3639,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3009,6 +3656,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3036,7 +3686,7 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3064,7 +3714,7 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3084,7 +3734,7 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3094,6 +3744,9 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3113,7 +3766,7 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3123,6 +3776,9 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3142,7 +3798,7 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3157,6 +3813,9 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3184,7 +3843,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3212,7 +3871,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3221,6 +3880,9 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3243,7 +3905,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3252,6 +3914,9 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3274,7 +3939,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3294,7 +3959,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3304,6 +3969,9 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3318,6 +3986,9 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3384,10 +4055,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -3416,6 +4090,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -3434,83 +4111,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 1140 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0x4d, 0x6f, 0x1b, 0xc5, - 0x1b, 0xf7, 0xae, 0xed, 0xc4, 0x1e, 0x37, 0xff, 0xa4, 0x13, 0xa5, 0xd9, 0xa6, 0xfa, 0xdb, 0xd1, - 0x22, 0x41, 0x2a, 0xca, 0x2e, 0xb1, 0xda, 0x26, 0xaa, 0x54, 0xa1, 0x58, 0x89, 0x50, 0xa4, 0xb6, - 0x54, 0x13, 0x25, 0x12, 0x45, 0x20, 0xc6, 0xeb, 0x89, 0xbd, 0xc4, 0xde, 0x5d, 0x66, 0x66, 0x1d, - 0xc2, 0xa9, 0x12, 0x5f, 0x80, 0x23, 0x07, 0x0e, 0x7c, 0x03, 0x2e, 0x48, 0xdc, 0x38, 0x70, 0x40, - 0x39, 0xf6, 0x58, 0x24, 0x64, 0x91, 0xe5, 0xcc, 0x77, 0x40, 0x33, 0x3b, 0xf6, 0xae, 0x93, 0xb5, - 0x9b, 0x70, 0xa0, 0x97, 0xde, 0x76, 0x9f, 0xdf, 0xef, 0x79, 0x99, 0xe7, 0x65, 0xe6, 0x01, 0xdb, - 0x47, 0x9b, 0xcc, 0x72, 0x7d, 0xfb, 0x28, 0x6c, 0x12, 0xea, 0x11, 0x4e, 0x98, 0xdd, 0x27, 0x5e, - 0xcb, 0xa7, 0xb6, 0x02, 0x70, 0xe0, 0xda, 0x38, 0xe4, 0x1d, 0x9f, 0xba, 0x5f, 0x63, 0xee, 0xfa, - 0x9e, 0xdd, 0x5f, 0xb7, 0xdb, 0xc4, 0x23, 0x14, 0x73, 0xd2, 0xb2, 0x02, 0xea, 0x73, 0x1f, 0xde, - 0x8a, 0xc9, 0x16, 0x0e, 0x5c, 0x6b, 0x8c, 0x6c, 0xf5, 0xd7, 0x57, 0xde, 0x6b, 0xbb, 0xbc, 0x13, - 0x36, 0x2d, 0xc7, 0xef, 0xd9, 0x6d, 0xbf, 0xed, 0xdb, 0x52, 0xa7, 0x19, 0x1e, 0xca, 0x3f, 0xf9, - 0x23, 0xbf, 0x62, 0x5b, 0x2b, 0x77, 0x13, 0xc7, 0x3d, 0xec, 0x74, 0x5c, 0x8f, 0xd0, 0x13, 0x3b, - 0x38, 0x6a, 0x0b, 0x01, 0xb3, 0x7b, 0x84, 0xe3, 0x8c, 0x08, 0x56, 0xec, 0x49, 0x5a, 0x34, 0xf4, - 0xb8, 0xdb, 0x23, 0x17, 0x14, 0xee, 0xbf, 0x4a, 0x81, 0x39, 0x1d, 0xd2, 0xc3, 0xe7, 0xf5, 0xcc, - 0x0d, 0x00, 0x76, 0xbe, 0xe2, 0x14, 0x1f, 0xe0, 0x6e, 0x48, 0x60, 0x0d, 0x14, 0x5d, 0x4e, 0x7a, - 0xcc, 0xd0, 0x56, 0xf3, 0x6b, 0xe5, 0x46, 0x39, 0x1a, 0xd4, 0x8a, 0xbb, 0x42, 0x80, 0x62, 0xf9, - 0x83, 0xd2, 0x77, 0x3f, 0xd4, 0x72, 0xcf, 0xff, 0x58, 0xcd, 0x99, 0x3f, 0xe9, 0xc0, 0x78, 0xe4, - 0x3b, 0xb8, 0xbb, 0x17, 0x36, 0xbf, 0x20, 0x0e, 0xdf, 0x72, 0x1c, 0xc2, 0x18, 0x22, 0x7d, 0x97, - 0x1c, 0xc3, 0xcf, 0x41, 0x49, 0x9c, 0xac, 0x85, 0x39, 0x36, 0xb4, 0x55, 0x6d, 0xad, 0x52, 0x7f, - 0xdf, 0x4a, 0x72, 0x3a, 0x0a, 0xd0, 0x0a, 0x8e, 0xda, 0x42, 0xc0, 0x2c, 0xc1, 0xb6, 0xfa, 0xeb, - 0xd6, 0x47, 0xd2, 0xd6, 0x63, 0xc2, 0x71, 0x03, 0x9e, 0x0e, 0x6a, 0xb9, 0x68, 0x50, 0x03, 0x89, - 0x0c, 0x8d, 0xac, 0xc2, 0x03, 0x50, 0x60, 0x01, 0x71, 0x0c, 0x5d, 0x5a, 0xbf, 0x6b, 0x4d, 0xa9, - 0x98, 0x95, 0x11, 0xe1, 0x5e, 0x40, 0x9c, 0xc6, 0x35, 0xe5, 0xa1, 0x20, 0xfe, 0x90, 0xb4, 0x07, - 0x3f, 0x03, 0x33, 0x8c, 0x63, 0x1e, 0x32, 0x23, 0x2f, 0x2d, 0xdf, 0xbf, 0xb2, 0x65, 0xa9, 0xdd, - 0xf8, 0x9f, 0xb2, 0x3d, 0x13, 0xff, 0x23, 0x65, 0xd5, 0xfc, 0x04, 0x2c, 0x3d, 0xf1, 0x3d, 0x44, - 0x98, 0x1f, 0x52, 0x87, 0x6c, 0x71, 0x4e, 0xdd, 0x66, 0xc8, 0x09, 0x83, 0xab, 0xa0, 0x10, 0x60, - 0xde, 0x91, 0xe9, 0x2a, 0x27, 0xa1, 0x3d, 0xc5, 0xbc, 0x83, 0x24, 0x22, 0x18, 0x7d, 0x42, 0x9b, - 0xf2, 0xc8, 0x29, 0xc6, 0x01, 0xa1, 0x4d, 0x24, 0x11, 0xf3, 0x4b, 0x30, 0x9f, 0x32, 0x8e, 0xc2, - 0xae, 0xac, 0xa8, 0x80, 0xc6, 0x2a, 0x2a, 0x34, 0x18, 0x8a, 0xe5, 0xf0, 0x21, 0x98, 0xf7, 0x12, - 0x9d, 0x7d, 0xf4, 0x88, 0x19, 0xba, 0xa4, 0x2e, 0x46, 0x83, 0x5a, 0xda, 0x9c, 0x80, 0xd0, 0x79, - 0xae, 0xf9, 0x8b, 0x0e, 0x60, 0xc6, 0x69, 0x6c, 0x50, 0xf6, 0x70, 0x8f, 0xb0, 0x00, 0x3b, 0x44, - 0x1d, 0xe9, 0xba, 0x0a, 0xb8, 0xfc, 0x64, 0x08, 0xa0, 0x84, 0xf3, 0xea, 0xc3, 0xc1, 0xb7, 0x40, - 0xb1, 0x4d, 0xfd, 0x30, 0x90, 0x85, 0x29, 0x37, 0xe6, 0x14, 0xa5, 0xf8, 0xa1, 0x10, 0xa2, 0x18, - 0x83, 0xb7, 0xc1, 0x6c, 0x9f, 0x50, 0xe6, 0xfa, 0x9e, 0x51, 0x90, 0xb4, 0x79, 0x45, 0x9b, 0x3d, - 0x88, 0xc5, 0x68, 0x88, 0xc3, 0x3b, 0xa0, 0x44, 0x55, 0xe0, 0x46, 0x51, 0x72, 0x17, 0x14, 0xb7, - 0x34, 0xca, 0xe0, 0x88, 0x01, 0xef, 0x81, 0x0a, 0x0b, 0x9b, 0x23, 0x85, 0x19, 0xa9, 0xb0, 0xa8, - 0x14, 0x2a, 0x7b, 0x09, 0x84, 0xd2, 0x3c, 0x71, 0x2c, 0x71, 0x46, 0x63, 0x76, 0xfc, 0x58, 0x22, - 0x05, 0x48, 0x22, 0xe6, 0xaf, 0x1a, 0xb8, 0x76, 0xb5, 0x8a, 0xbd, 0x0b, 0xca, 0x38, 0x70, 0xe5, - 0xb1, 0x87, 0xb5, 0x9a, 0x13, 0x79, 0xdd, 0x7a, 0xba, 0x1b, 0x0b, 0x51, 0x82, 0x0b, 0xf2, 0x30, - 0x18, 0xd1, 0xd2, 0x23, 0xf2, 0xd0, 0x25, 0x43, 0x09, 0x0e, 0x37, 0xc0, 0xdc, 0xf0, 0x47, 0x16, - 0xc9, 0x28, 0x48, 0x85, 0xeb, 0xd1, 0xa0, 0x36, 0x87, 0xd2, 0x00, 0x1a, 0xe7, 0x99, 0x3f, 0xeb, - 0x60, 0x79, 0x8f, 0x74, 0x0f, 0x5f, 0xcf, 0x5d, 0xf0, 0x6c, 0xec, 0x2e, 0xd8, 0x9c, 0x3e, 0xb1, - 0xd9, 0x51, 0xbe, 0xb6, 0xfb, 0xe0, 0x7b, 0x1d, 0xdc, 0x9a, 0x12, 0x13, 0x3c, 0x06, 0x90, 0x5e, - 0x18, 0x2f, 0x95, 0x47, 0x7b, 0x6a, 0x2c, 0x17, 0xa7, 0xb2, 0x71, 0x23, 0x1a, 0xd4, 0x32, 0xa6, - 0x15, 0x65, 0xb8, 0x80, 0xdf, 0x68, 0x60, 0xc9, 0xcb, 0xba, 0xa9, 0x54, 0x9a, 0xeb, 0x53, 0x9d, - 0x67, 0xde, 0x71, 0x8d, 0x9b, 0xd1, 0xa0, 0x96, 0x7d, 0xfd, 0xa1, 0x6c, 0x5f, 0xe2, 0x95, 0xb9, - 0x91, 0x4a, 0x8f, 0x18, 0x90, 0xff, 0xae, 0xaf, 0x3e, 0x1e, 0xeb, 0xab, 0x8d, 0xcb, 0xf6, 0x55, - 0x2a, 0xc8, 0x89, 0x6d, 0xf5, 0xe9, 0xb9, 0xb6, 0xba, 0x77, 0x99, 0xb6, 0x4a, 0x1b, 0x9e, 0xde, - 0x55, 0x8f, 0xc1, 0xca, 0xe4, 0x80, 0xae, 0x7c, 0x39, 0x9b, 0x3f, 0xea, 0x60, 0xf1, 0xcd, 0x33, - 0x7f, 0x95, 0xb1, 0xfe, 0xad, 0x00, 0x96, 0xdf, 0x8c, 0xf4, 0xa4, 0x45, 0x27, 0x64, 0x84, 0xaa, - 0x67, 0x7c, 0x54, 0x9c, 0x7d, 0x46, 0x28, 0x92, 0x08, 0x34, 0xc1, 0x4c, 0x3b, 0x7e, 0xdd, 0xe2, - 0xf7, 0x07, 0x88, 0x04, 0xab, 0xa7, 0x4d, 0x21, 0xb0, 0x05, 0x8a, 0x44, 0xec, 0xad, 0x46, 0x71, - 0x35, 0xbf, 0x56, 0xa9, 0x7f, 0xf0, 0x6f, 0x3a, 0xc3, 0x92, 0x9b, 0xef, 0x8e, 0xc7, 0xe9, 0x49, - 0xb2, 0x4e, 0x48, 0x19, 0x8a, 0x8d, 0xc3, 0xff, 0x83, 0x7c, 0xe8, 0xb6, 0xd4, 0x6b, 0x5f, 0x51, - 0x94, 0xfc, 0xfe, 0xee, 0x36, 0x12, 0xf2, 0x15, 0xac, 0x96, 0x67, 0x69, 0x02, 0x2e, 0x80, 0xfc, - 0x11, 0x39, 0x89, 0x07, 0x0a, 0x89, 0x4f, 0xf8, 0x10, 0x14, 0xfb, 0x62, 0xaf, 0x56, 0xf9, 0x7d, - 0x67, 0x6a, 0x90, 0xc9, 0x1a, 0x8e, 0x62, 0xad, 0x07, 0xfa, 0xa6, 0x66, 0xfe, 0xae, 0x81, 0x9b, - 0x13, 0xdb, 0x4f, 0xac, 0x3b, 0xb8, 0xdb, 0xf5, 0x8f, 0x49, 0x4b, 0xba, 0x2d, 0x25, 0xeb, 0xce, - 0x56, 0x2c, 0x46, 0x43, 0x1c, 0xbe, 0x0d, 0x66, 0x28, 0xc1, 0xcc, 0xf7, 0xd4, 0x8a, 0x35, 0xea, - 0x5c, 0x24, 0xa5, 0x48, 0xa1, 0x70, 0x0b, 0xcc, 0x13, 0xe1, 0x5e, 0xc6, 0xb5, 0x43, 0xa9, 0x3f, - 0xac, 0xd4, 0xb2, 0x52, 0x98, 0xdf, 0x19, 0x87, 0xd1, 0x79, 0xbe, 0x70, 0xd5, 0x22, 0x9e, 0x4b, - 0x5a, 0x72, 0x07, 0x2b, 0x25, 0xae, 0xb6, 0xa5, 0x14, 0x29, 0xd4, 0xfc, 0x5b, 0x07, 0xc6, 0xa4, - 0xab, 0x0d, 0x1e, 0x26, 0xbb, 0x88, 0x04, 0xe5, 0x3a, 0x54, 0xa9, 0xdf, 0xbe, 0xd4, 0x80, 0x08, - 0x8d, 0xc6, 0x92, 0x72, 0x3b, 0x97, 0x96, 0xa6, 0x56, 0x17, 0xf9, 0x0b, 0x29, 0x58, 0xf0, 0xc6, - 0x77, 0xe6, 0x78, 0xa9, 0xaa, 0xd4, 0xef, 0x5c, 0x76, 0x1c, 0xa4, 0x37, 0x43, 0x79, 0x5b, 0x38, - 0x07, 0x30, 0x74, 0xc1, 0x3e, 0xac, 0x03, 0xe0, 0x7a, 0x8e, 0xdf, 0x0b, 0xba, 0x84, 0x13, 0x99, - 0xde, 0x52, 0x72, 0x0f, 0xee, 0x8e, 0x10, 0x94, 0x62, 0x65, 0xd5, 0xa5, 0x70, 0xb5, 0xba, 0x34, - 0xd6, 0x4e, 0xcf, 0xaa, 0xb9, 0x17, 0x67, 0xd5, 0xdc, 0xcb, 0xb3, 0x6a, 0xee, 0x79, 0x54, 0xd5, - 0x4e, 0xa3, 0xaa, 0xf6, 0x22, 0xaa, 0x6a, 0x2f, 0xa3, 0xaa, 0xf6, 0x67, 0x54, 0xd5, 0xbe, 0xfd, - 0xab, 0x9a, 0x7b, 0xa6, 0xf7, 0xd7, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0x9f, 0x85, 0x45, 0x74, - 0x47, 0x0f, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go b/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go index 9769af115c39d..f0def20b90995 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go @@ -17,44 +17,20 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1beta1/generated.proto -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1beta1/generated.proto - - It has these top-level messages: - ExtraValue - LocalSubjectAccessReview - NonResourceAttributes - NonResourceRule - ResourceAttributes - ResourceRule - SelfSubjectAccessReview - SelfSubjectAccessReviewSpec - SelfSubjectRulesReview - SelfSubjectRulesReviewSpec - SubjectAccessReview - SubjectAccessReviewSpec - SubjectAccessReviewStatus - SubjectRulesReviewStatus -*/ package v1beta1 import ( fmt "fmt" - proto "github.com/gogo/protobuf/proto" - - math "math" + io "io" + proto "github.com/gogo/protobuf/proto" github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - strings "strings" - + math "math" + math_bits "math/bits" reflect "reflect" - - io "io" + strings "strings" ) // Reference imports to suppress errors if they are not otherwise used. @@ -68,74 +44,398 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *ExtraValue) Reset() { *m = ExtraValue{} } -func (*ExtraValue) ProtoMessage() {} -func (*ExtraValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *ExtraValue) Reset() { *m = ExtraValue{} } +func (*ExtraValue) ProtoMessage() {} +func (*ExtraValue) Descriptor() ([]byte, []int) { + return fileDescriptor_43130d8376f09103, []int{0} +} +func (m *ExtraValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExtraValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExtraValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtraValue.Merge(m, src) +} +func (m *ExtraValue) XXX_Size() int { + return m.Size() +} +func (m *ExtraValue) XXX_DiscardUnknown() { + xxx_messageInfo_ExtraValue.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtraValue proto.InternalMessageInfo func (m *LocalSubjectAccessReview) Reset() { *m = LocalSubjectAccessReview{} } func (*LocalSubjectAccessReview) ProtoMessage() {} func (*LocalSubjectAccessReview) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{1} + return fileDescriptor_43130d8376f09103, []int{1} +} +func (m *LocalSubjectAccessReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LocalSubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LocalSubjectAccessReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_LocalSubjectAccessReview.Merge(m, src) +} +func (m *LocalSubjectAccessReview) XXX_Size() int { + return m.Size() +} +func (m *LocalSubjectAccessReview) XXX_DiscardUnknown() { + xxx_messageInfo_LocalSubjectAccessReview.DiscardUnknown(m) } -func (m *NonResourceAttributes) Reset() { *m = NonResourceAttributes{} } -func (*NonResourceAttributes) ProtoMessage() {} -func (*NonResourceAttributes) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_LocalSubjectAccessReview proto.InternalMessageInfo -func (m *NonResourceRule) Reset() { *m = NonResourceRule{} } -func (*NonResourceRule) ProtoMessage() {} -func (*NonResourceRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (m *NonResourceAttributes) Reset() { *m = NonResourceAttributes{} } +func (*NonResourceAttributes) ProtoMessage() {} +func (*NonResourceAttributes) Descriptor() ([]byte, []int) { + return fileDescriptor_43130d8376f09103, []int{2} +} +func (m *NonResourceAttributes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NonResourceAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NonResourceAttributes) XXX_Merge(src proto.Message) { + xxx_messageInfo_NonResourceAttributes.Merge(m, src) +} +func (m *NonResourceAttributes) XXX_Size() int { + return m.Size() +} +func (m *NonResourceAttributes) XXX_DiscardUnknown() { + xxx_messageInfo_NonResourceAttributes.DiscardUnknown(m) +} -func (m *ResourceAttributes) Reset() { *m = ResourceAttributes{} } -func (*ResourceAttributes) ProtoMessage() {} -func (*ResourceAttributes) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +var xxx_messageInfo_NonResourceAttributes proto.InternalMessageInfo -func (m *ResourceRule) Reset() { *m = ResourceRule{} } -func (*ResourceRule) ProtoMessage() {} -func (*ResourceRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +func (m *NonResourceRule) Reset() { *m = NonResourceRule{} } +func (*NonResourceRule) ProtoMessage() {} +func (*NonResourceRule) Descriptor() ([]byte, []int) { + return fileDescriptor_43130d8376f09103, []int{3} +} +func (m *NonResourceRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NonResourceRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NonResourceRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_NonResourceRule.Merge(m, src) +} +func (m *NonResourceRule) XXX_Size() int { + return m.Size() +} +func (m *NonResourceRule) XXX_DiscardUnknown() { + xxx_messageInfo_NonResourceRule.DiscardUnknown(m) +} + +var xxx_messageInfo_NonResourceRule proto.InternalMessageInfo + +func (m *ResourceAttributes) Reset() { *m = ResourceAttributes{} } +func (*ResourceAttributes) ProtoMessage() {} +func (*ResourceAttributes) Descriptor() ([]byte, []int) { + return fileDescriptor_43130d8376f09103, []int{4} +} +func (m *ResourceAttributes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceAttributes) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceAttributes.Merge(m, src) +} +func (m *ResourceAttributes) XXX_Size() int { + return m.Size() +} +func (m *ResourceAttributes) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceAttributes.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceAttributes proto.InternalMessageInfo + +func (m *ResourceRule) Reset() { *m = ResourceRule{} } +func (*ResourceRule) ProtoMessage() {} +func (*ResourceRule) Descriptor() ([]byte, []int) { + return fileDescriptor_43130d8376f09103, []int{5} +} +func (m *ResourceRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceRule.Merge(m, src) +} +func (m *ResourceRule) XXX_Size() int { + return m.Size() +} +func (m *ResourceRule) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceRule.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceRule proto.InternalMessageInfo + +func (m *SelfSubjectAccessReview) Reset() { *m = SelfSubjectAccessReview{} } +func (*SelfSubjectAccessReview) ProtoMessage() {} +func (*SelfSubjectAccessReview) Descriptor() ([]byte, []int) { + return fileDescriptor_43130d8376f09103, []int{6} +} +func (m *SelfSubjectAccessReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectAccessReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectAccessReview.Merge(m, src) +} +func (m *SelfSubjectAccessReview) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectAccessReview) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectAccessReview.DiscardUnknown(m) +} -func (m *SelfSubjectAccessReview) Reset() { *m = SelfSubjectAccessReview{} } -func (*SelfSubjectAccessReview) ProtoMessage() {} -func (*SelfSubjectAccessReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +var xxx_messageInfo_SelfSubjectAccessReview proto.InternalMessageInfo func (m *SelfSubjectAccessReviewSpec) Reset() { *m = SelfSubjectAccessReviewSpec{} } func (*SelfSubjectAccessReviewSpec) ProtoMessage() {} func (*SelfSubjectAccessReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{7} + return fileDescriptor_43130d8376f09103, []int{7} +} +func (m *SelfSubjectAccessReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectAccessReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectAccessReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectAccessReviewSpec.Merge(m, src) +} +func (m *SelfSubjectAccessReviewSpec) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectAccessReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectAccessReviewSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_SelfSubjectAccessReviewSpec proto.InternalMessageInfo + +func (m *SelfSubjectRulesReview) Reset() { *m = SelfSubjectRulesReview{} } +func (*SelfSubjectRulesReview) ProtoMessage() {} +func (*SelfSubjectRulesReview) Descriptor() ([]byte, []int) { + return fileDescriptor_43130d8376f09103, []int{8} +} +func (m *SelfSubjectRulesReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectRulesReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectRulesReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectRulesReview.Merge(m, src) +} +func (m *SelfSubjectRulesReview) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectRulesReview) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectRulesReview.DiscardUnknown(m) } -func (m *SelfSubjectRulesReview) Reset() { *m = SelfSubjectRulesReview{} } -func (*SelfSubjectRulesReview) ProtoMessage() {} -func (*SelfSubjectRulesReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +var xxx_messageInfo_SelfSubjectRulesReview proto.InternalMessageInfo func (m *SelfSubjectRulesReviewSpec) Reset() { *m = SelfSubjectRulesReviewSpec{} } func (*SelfSubjectRulesReviewSpec) ProtoMessage() {} func (*SelfSubjectRulesReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{9} + return fileDescriptor_43130d8376f09103, []int{9} +} +func (m *SelfSubjectRulesReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectRulesReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectRulesReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectRulesReviewSpec.Merge(m, src) +} +func (m *SelfSubjectRulesReviewSpec) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectRulesReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectRulesReviewSpec.DiscardUnknown(m) } -func (m *SubjectAccessReview) Reset() { *m = SubjectAccessReview{} } -func (*SubjectAccessReview) ProtoMessage() {} -func (*SubjectAccessReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +var xxx_messageInfo_SelfSubjectRulesReviewSpec proto.InternalMessageInfo + +func (m *SubjectAccessReview) Reset() { *m = SubjectAccessReview{} } +func (*SubjectAccessReview) ProtoMessage() {} +func (*SubjectAccessReview) Descriptor() ([]byte, []int) { + return fileDescriptor_43130d8376f09103, []int{10} +} +func (m *SubjectAccessReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubjectAccessReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubjectAccessReview.Merge(m, src) +} +func (m *SubjectAccessReview) XXX_Size() int { + return m.Size() +} +func (m *SubjectAccessReview) XXX_DiscardUnknown() { + xxx_messageInfo_SubjectAccessReview.DiscardUnknown(m) +} + +var xxx_messageInfo_SubjectAccessReview proto.InternalMessageInfo func (m *SubjectAccessReviewSpec) Reset() { *m = SubjectAccessReviewSpec{} } func (*SubjectAccessReviewSpec) ProtoMessage() {} func (*SubjectAccessReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{11} + return fileDescriptor_43130d8376f09103, []int{11} +} +func (m *SubjectAccessReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubjectAccessReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubjectAccessReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubjectAccessReviewSpec.Merge(m, src) } +func (m *SubjectAccessReviewSpec) XXX_Size() int { + return m.Size() +} +func (m *SubjectAccessReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_SubjectAccessReviewSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_SubjectAccessReviewSpec proto.InternalMessageInfo func (m *SubjectAccessReviewStatus) Reset() { *m = SubjectAccessReviewStatus{} } func (*SubjectAccessReviewStatus) ProtoMessage() {} func (*SubjectAccessReviewStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{12} + return fileDescriptor_43130d8376f09103, []int{12} } +func (m *SubjectAccessReviewStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubjectAccessReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubjectAccessReviewStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubjectAccessReviewStatus.Merge(m, src) +} +func (m *SubjectAccessReviewStatus) XXX_Size() int { + return m.Size() +} +func (m *SubjectAccessReviewStatus) XXX_DiscardUnknown() { + xxx_messageInfo_SubjectAccessReviewStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_SubjectAccessReviewStatus proto.InternalMessageInfo func (m *SubjectRulesReviewStatus) Reset() { *m = SubjectRulesReviewStatus{} } func (*SubjectRulesReviewStatus) ProtoMessage() {} func (*SubjectRulesReviewStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{13} + return fileDescriptor_43130d8376f09103, []int{13} +} +func (m *SubjectRulesReviewStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubjectRulesReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubjectRulesReviewStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubjectRulesReviewStatus.Merge(m, src) +} +func (m *SubjectRulesReviewStatus) XXX_Size() int { + return m.Size() +} +func (m *SubjectRulesReviewStatus) XXX_DiscardUnknown() { + xxx_messageInfo_SubjectRulesReviewStatus.DiscardUnknown(m) } +var xxx_messageInfo_SubjectRulesReviewStatus proto.InternalMessageInfo + func init() { proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.authorization.v1beta1.ExtraValue") proto.RegisterType((*LocalSubjectAccessReview)(nil), "k8s.io.api.authorization.v1beta1.LocalSubjectAccessReview") @@ -149,13 +449,95 @@ func init() { proto.RegisterType((*SelfSubjectRulesReviewSpec)(nil), "k8s.io.api.authorization.v1beta1.SelfSubjectRulesReviewSpec") proto.RegisterType((*SubjectAccessReview)(nil), "k8s.io.api.authorization.v1beta1.SubjectAccessReview") proto.RegisterType((*SubjectAccessReviewSpec)(nil), "k8s.io.api.authorization.v1beta1.SubjectAccessReviewSpec") + proto.RegisterMapType((map[string]ExtraValue)(nil), "k8s.io.api.authorization.v1beta1.SubjectAccessReviewSpec.ExtraEntry") proto.RegisterType((*SubjectAccessReviewStatus)(nil), "k8s.io.api.authorization.v1beta1.SubjectAccessReviewStatus") proto.RegisterType((*SubjectRulesReviewStatus)(nil), "k8s.io.api.authorization.v1beta1.SubjectRulesReviewStatus") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1beta1/generated.proto", fileDescriptor_43130d8376f09103) +} + +var fileDescriptor_43130d8376f09103 = []byte{ + // 1141 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0x4f, 0x6f, 0x1b, 0x45, + 0x14, 0xf7, 0xfa, 0x4f, 0x62, 0x8f, 0x1b, 0x92, 0x4e, 0x94, 0x66, 0x1b, 0x84, 0x6d, 0x19, 0x09, + 0x05, 0xd1, 0xee, 0x92, 0xa8, 0x90, 0x12, 0xe8, 0x21, 0x56, 0x22, 0x14, 0xa9, 0x2d, 0xd5, 0x44, + 0xc9, 0x81, 0x4a, 0xc0, 0xec, 0x7a, 0x62, 0x2f, 0xb6, 0x77, 0x97, 0x99, 0x59, 0x87, 0x20, 0x0e, + 0x3d, 0x72, 0xe4, 0xc8, 0x91, 0x13, 0xdf, 0x81, 0x0b, 0x12, 0x9c, 0x72, 0xec, 0x31, 0x48, 0xc8, + 0x22, 0xcb, 0x87, 0xe0, 0x8a, 0x66, 0x76, 0xec, 0x5d, 0x27, 0x9b, 0x38, 0xce, 0x81, 0x5e, 0x7a, + 0xdb, 0x79, 0xbf, 0xdf, 0x7b, 0xf3, 0xde, 0x9b, 0xf7, 0xde, 0x3e, 0xb0, 0xdb, 0x79, 0xc8, 0x0c, + 0xc7, 0x33, 0x3b, 0x81, 0x45, 0xa8, 0x4b, 0x38, 0x61, 0x66, 0x9f, 0xb8, 0x4d, 0x8f, 0x9a, 0x0a, + 0xc0, 0xbe, 0x63, 0xe2, 0x80, 0xb7, 0x3d, 0xea, 0x7c, 0x87, 0xb9, 0xe3, 0xb9, 0x66, 0x7f, 0xcd, + 0x22, 0x1c, 0xaf, 0x99, 0x2d, 0xe2, 0x12, 0x8a, 0x39, 0x69, 0x1a, 0x3e, 0xf5, 0xb8, 0x07, 0x6b, + 0x91, 0x86, 0x81, 0x7d, 0xc7, 0x18, 0xd3, 0x30, 0x94, 0xc6, 0xca, 0xfd, 0x96, 0xc3, 0xdb, 0x81, + 0x65, 0xd8, 0x5e, 0xcf, 0x6c, 0x79, 0x2d, 0xcf, 0x94, 0x8a, 0x56, 0x70, 0x28, 0x4f, 0xf2, 0x20, + 0xbf, 0x22, 0x83, 0x2b, 0x0f, 0x62, 0x17, 0x7a, 0xd8, 0x6e, 0x3b, 0x2e, 0xa1, 0xc7, 0xa6, 0xdf, + 0x69, 0x09, 0x01, 0x33, 0x7b, 0x84, 0x63, 0xb3, 0x7f, 0xc1, 0x8d, 0x15, 0xf3, 0x32, 0x2d, 0x1a, + 0xb8, 0xdc, 0xe9, 0x91, 0x0b, 0x0a, 0x1f, 0x4e, 0x52, 0x60, 0x76, 0x9b, 0xf4, 0xf0, 0x79, 0xbd, + 0xfa, 0x06, 0x00, 0x3b, 0xdf, 0x72, 0x8a, 0x0f, 0x70, 0x37, 0x20, 0xb0, 0x0a, 0x0a, 0x0e, 0x27, + 0x3d, 0xa6, 0x6b, 0xb5, 0xdc, 0x6a, 0xa9, 0x51, 0x0a, 0x07, 0xd5, 0xc2, 0xae, 0x10, 0xa0, 0x48, + 0xbe, 0x59, 0xfc, 0xe9, 0xe7, 0x6a, 0xe6, 0xc5, 0x5f, 0xb5, 0x4c, 0xfd, 0xb7, 0x2c, 0xd0, 0x1f, + 0x7b, 0x36, 0xee, 0xee, 0x05, 0xd6, 0xd7, 0xc4, 0xe6, 0x5b, 0xb6, 0x4d, 0x18, 0x43, 0xa4, 0xef, + 0x90, 0x23, 0xf8, 0x15, 0x28, 0x8a, 0xc8, 0x9a, 0x98, 0x63, 0x5d, 0xab, 0x69, 0xab, 0xe5, 0xf5, + 0xf7, 0x8d, 0x38, 0xb1, 0x23, 0x07, 0x0d, 0xbf, 0xd3, 0x12, 0x02, 0x66, 0x08, 0xb6, 0xd1, 0x5f, + 0x33, 0x3e, 0x93, 0xb6, 0x9e, 0x10, 0x8e, 0x1b, 0xf0, 0x64, 0x50, 0xcd, 0x84, 0x83, 0x2a, 0x88, + 0x65, 0x68, 0x64, 0x15, 0x3e, 0x07, 0x79, 0xe6, 0x13, 0x5b, 0xcf, 0x4a, 0xeb, 0x1f, 0x19, 0x93, + 0x9e, 0xcd, 0x48, 0x71, 0x73, 0xcf, 0x27, 0x76, 0xe3, 0x96, 0xba, 0x26, 0x2f, 0x4e, 0x48, 0x1a, + 0x85, 0x36, 0x98, 0x61, 0x1c, 0xf3, 0x80, 0xe9, 0x39, 0x69, 0xfe, 0xe3, 0x9b, 0x99, 0x97, 0x26, + 0x1a, 0x6f, 0xa8, 0x0b, 0x66, 0xa2, 0x33, 0x52, 0xa6, 0xeb, 0xcf, 0xc1, 0xd2, 0x53, 0xcf, 0x45, + 0x84, 0x79, 0x01, 0xb5, 0xc9, 0x16, 0xe7, 0xd4, 0xb1, 0x02, 0x4e, 0x18, 0xac, 0x81, 0xbc, 0x8f, + 0x79, 0x5b, 0x26, 0xae, 0x14, 0xfb, 0xf7, 0x0c, 0xf3, 0x36, 0x92, 0x88, 0x60, 0xf4, 0x09, 0xb5, + 0x64, 0xf0, 0x09, 0xc6, 0x01, 0xa1, 0x16, 0x92, 0x48, 0xfd, 0x1b, 0x30, 0x9f, 0x30, 0x8e, 0x82, + 0xae, 0x7c, 0x5b, 0x01, 0x8d, 0xbd, 0xad, 0xd0, 0x60, 0x28, 0x92, 0xc3, 0x47, 0x60, 0xde, 0x8d, + 0x75, 0xf6, 0xd1, 0x63, 0xa6, 0x67, 0x25, 0x75, 0x31, 0x1c, 0x54, 0x93, 0xe6, 0x04, 0x84, 0xce, + 0x73, 0x45, 0x41, 0xc0, 0x94, 0x68, 0x4c, 0x50, 0x72, 0x71, 0x8f, 0x30, 0x1f, 0xdb, 0x44, 0x85, + 0x74, 0x5b, 0x39, 0x5c, 0x7a, 0x3a, 0x04, 0x50, 0xcc, 0x99, 0x1c, 0x1c, 0x7c, 0x1b, 0x14, 0x5a, + 0xd4, 0x0b, 0x7c, 0xf9, 0x3a, 0xa5, 0xc6, 0x9c, 0xa2, 0x14, 0x3e, 0x15, 0x42, 0x14, 0x61, 0xf0, + 0x5d, 0x30, 0xdb, 0x27, 0x94, 0x39, 0x9e, 0xab, 0xe7, 0x25, 0x6d, 0x5e, 0xd1, 0x66, 0x0f, 0x22, + 0x31, 0x1a, 0xe2, 0xf0, 0x1e, 0x28, 0x52, 0xe5, 0xb8, 0x5e, 0x90, 0xdc, 0x05, 0xc5, 0x2d, 0x8e, + 0x32, 0x38, 0x62, 0xc0, 0x0f, 0x40, 0x99, 0x05, 0xd6, 0x48, 0x61, 0x46, 0x2a, 0x2c, 0x2a, 0x85, + 0xf2, 0x5e, 0x0c, 0xa1, 0x24, 0x4f, 0x84, 0x25, 0x62, 0xd4, 0x67, 0xc7, 0xc3, 0x12, 0x29, 0x40, + 0x12, 0xa9, 0xff, 0xa1, 0x81, 0x5b, 0xd3, 0xbd, 0xd8, 0x7b, 0xa0, 0x84, 0x7d, 0x47, 0x86, 0x3d, + 0x7c, 0xab, 0x39, 0x91, 0xd7, 0xad, 0x67, 0xbb, 0x91, 0x10, 0xc5, 0xb8, 0x20, 0x0f, 0x9d, 0x11, + 0x75, 0x3d, 0x22, 0x0f, 0xaf, 0x64, 0x28, 0xc6, 0xe1, 0x06, 0x98, 0x1b, 0x1e, 0xe4, 0x23, 0xe9, + 0x79, 0xa9, 0x70, 0x3b, 0x1c, 0x54, 0xe7, 0x50, 0x12, 0x40, 0xe3, 0xbc, 0xfa, 0xef, 0x59, 0xb0, + 0xbc, 0x47, 0xba, 0x87, 0xaf, 0x66, 0x2a, 0x7c, 0x39, 0x36, 0x15, 0x1e, 0x5d, 0xa3, 0x6d, 0xd3, + 0x5d, 0x7d, 0xb5, 0x93, 0xe1, 0x97, 0x2c, 0x78, 0xf3, 0x0a, 0xc7, 0xe0, 0xf7, 0x00, 0xd2, 0x0b, + 0x8d, 0xa6, 0x32, 0xfa, 0x60, 0xb2, 0x43, 0x17, 0x9b, 0xb4, 0x71, 0x27, 0x1c, 0x54, 0x53, 0x9a, + 0x17, 0xa5, 0xdc, 0x03, 0x7f, 0xd0, 0xc0, 0x92, 0x9b, 0x36, 0xb8, 0x54, 0xd6, 0x37, 0x26, 0x7b, + 0x90, 0x3a, 0xf7, 0x1a, 0x77, 0xc3, 0x41, 0x35, 0x7d, 0x24, 0xa2, 0xf4, 0x0b, 0xc5, 0xc8, 0xb9, + 0x93, 0x48, 0x94, 0x68, 0x9a, 0xff, 0xaf, 0xd6, 0xbe, 0x18, 0xab, 0xb5, 0x4f, 0xa6, 0xaa, 0xb5, + 0x84, 0xa7, 0x97, 0x96, 0x9a, 0x75, 0xae, 0xd4, 0x36, 0xaf, 0x5d, 0x6a, 0x49, 0xeb, 0x57, 0x57, + 0xda, 0x13, 0xb0, 0x72, 0xb9, 0x57, 0x53, 0x8f, 0xee, 0xfa, 0xaf, 0x59, 0xb0, 0xf8, 0x7a, 0x1d, + 0xb8, 0x59, 0xd3, 0x9f, 0xe6, 0xc1, 0xf2, 0xeb, 0x86, 0xbf, 0xba, 0xe1, 0xc5, 0x4f, 0x34, 0x60, + 0x84, 0xaa, 0x1f, 0xff, 0xe8, 0xad, 0xf6, 0x19, 0xa1, 0x48, 0x22, 0xb0, 0x36, 0xdc, 0x0d, 0xa2, + 0x1f, 0x16, 0x10, 0x99, 0x56, 0xff, 0x42, 0xb5, 0x18, 0x38, 0xa0, 0x40, 0xc4, 0xc6, 0xab, 0x17, + 0x6a, 0xb9, 0xd5, 0xf2, 0xfa, 0xf6, 0x8d, 0x6b, 0xc5, 0x90, 0x8b, 0xf3, 0x8e, 0xcb, 0xe9, 0x71, + 0xbc, 0x83, 0x48, 0x19, 0x8a, 0x6e, 0x80, 0x6f, 0x81, 0x5c, 0xe0, 0x34, 0xd5, 0x8a, 0x50, 0x56, + 0x94, 0xdc, 0xfe, 0xee, 0x36, 0x12, 0xf2, 0x95, 0x43, 0xb5, 0x7b, 0x4b, 0x13, 0x70, 0x01, 0xe4, + 0x3a, 0xe4, 0x38, 0xea, 0x33, 0x24, 0x3e, 0x61, 0x03, 0x14, 0xfa, 0x62, 0x2d, 0x57, 0x79, 0xbe, + 0x37, 0xd9, 0xd3, 0x78, 0x95, 0x47, 0x91, 0xea, 0x66, 0xf6, 0xa1, 0x56, 0xff, 0x53, 0x03, 0x77, + 0x2f, 0x2d, 0x48, 0xb1, 0x28, 0xe1, 0x6e, 0xd7, 0x3b, 0x22, 0x4d, 0x79, 0x77, 0x31, 0x5e, 0x94, + 0xb6, 0x22, 0x31, 0x1a, 0xe2, 0xf0, 0x1d, 0x30, 0xd3, 0x24, 0xae, 0x43, 0x9a, 0x72, 0xa5, 0x2a, + 0xc6, 0xb5, 0xbc, 0x2d, 0xa5, 0x48, 0xa1, 0x82, 0x47, 0x09, 0x66, 0x9e, 0xab, 0x96, 0xb8, 0x11, + 0x0f, 0x49, 0x29, 0x52, 0x28, 0xdc, 0x02, 0xf3, 0x44, 0xb8, 0x29, 0x83, 0xd8, 0xa1, 0xd4, 0x1b, + 0xbe, 0xec, 0xb2, 0x52, 0x98, 0xdf, 0x19, 0x87, 0xd1, 0x79, 0x7e, 0xfd, 0xdf, 0x2c, 0xd0, 0x2f, + 0x1b, 0x7b, 0xb0, 0x13, 0x6f, 0x31, 0x12, 0x94, 0x8b, 0x54, 0x79, 0xdd, 0xb8, 0x7e, 0xcb, 0x08, + 0xb5, 0xc6, 0x92, 0xf2, 0x66, 0x2e, 0x29, 0x4d, 0x6c, 0x3e, 0xf2, 0x08, 0x8f, 0xc0, 0x82, 0x3b, + 0xbe, 0x72, 0x47, 0x3b, 0x59, 0x79, 0x7d, 0x6d, 0xaa, 0x06, 0x91, 0x57, 0xea, 0xea, 0xca, 0x85, + 0x73, 0x00, 0x43, 0x17, 0x2e, 0x81, 0xeb, 0x00, 0x38, 0xae, 0xed, 0xf5, 0xfc, 0x2e, 0xe1, 0x44, + 0x26, 0xb0, 0x18, 0x4f, 0xcb, 0xdd, 0x11, 0x82, 0x12, 0xac, 0xb4, 0xcc, 0xe7, 0xa7, 0xcb, 0x7c, + 0xe3, 0xfe, 0xc9, 0x59, 0x25, 0xf3, 0xf2, 0xac, 0x92, 0x39, 0x3d, 0xab, 0x64, 0x5e, 0x84, 0x15, + 0xed, 0x24, 0xac, 0x68, 0x2f, 0xc3, 0x8a, 0x76, 0x1a, 0x56, 0xb4, 0xbf, 0xc3, 0x8a, 0xf6, 0xe3, + 0x3f, 0x95, 0xcc, 0xe7, 0xb3, 0x2a, 0xc2, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x7b, 0xc9, 0xa5, + 0x34, 0xa4, 0x0f, 0x00, 0x00, +} + func (m ExtraValue) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -163,32 +545,31 @@ func (m ExtraValue) Marshal() (dAtA []byte, err error) { } func (m ExtraValue) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m ExtraValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m) > 0 { - for _, s := range m { + for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m[iNdEx]) + copy(dAtA[i:], m[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + return len(dAtA) - i, nil } func (m *LocalSubjectAccessReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -196,41 +577,52 @@ func (m *LocalSubjectAccessReview) Marshal() (dAtA []byte, err error) { } func (m *LocalSubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LocalSubjectAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *NonResourceAttributes) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -238,25 +630,32 @@ func (m *NonResourceAttributes) Marshal() (dAtA []byte, err error) { } func (m *NonResourceAttributes) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NonResourceAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - dAtA[i] = 0x12 - i++ + i -= len(m.Verb) + copy(dAtA[i:], m.Verb) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verb))) - i += copy(dAtA[i:], m.Verb) - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *NonResourceRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -264,47 +663,40 @@ func (m *NonResourceRule) Marshal() (dAtA []byte, err error) { } func (m *NonResourceRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NonResourceRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Verbs) > 0 { - for _, s := range m.Verbs { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } if len(m.NonResourceURLs) > 0 { - for _, s := range m.NonResourceURLs { + for iNdEx := len(m.NonResourceURLs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.NonResourceURLs[iNdEx]) + copy(dAtA[i:], m.NonResourceURLs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NonResourceURLs[iNdEx]))) + i-- dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } func (m *ResourceAttributes) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -312,45 +704,57 @@ func (m *ResourceAttributes) Marshal() (dAtA []byte, err error) { } func (m *ResourceAttributes) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verb))) - i += copy(dAtA[i:], m.Verb) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i += copy(dAtA[i:], m.Group) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) - i += copy(dAtA[i:], m.Version) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) - i += copy(dAtA[i:], m.Resource) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subresource))) - i += copy(dAtA[i:], m.Subresource) - dAtA[i] = 0x3a - i++ + i -= len(m.Name) + copy(dAtA[i:], m.Name) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - return i, nil + i-- + dAtA[i] = 0x3a + i -= len(m.Subresource) + copy(dAtA[i:], m.Subresource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subresource))) + i-- + dAtA[i] = 0x32 + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x2a + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x22 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0x1a + i -= len(m.Verb) + copy(dAtA[i:], m.Verb) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verb))) + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ResourceRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -358,77 +762,58 @@ func (m *ResourceRule) Marshal() (dAtA []byte, err error) { } func (m *ResourceRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Verbs) > 0 { - for _, s := range m.Verbs { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.APIGroups) > 0 { - for _, s := range m.APIGroups { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.ResourceNames) > 0 { + for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResourceNames[iNdEx]) + copy(dAtA[i:], m.ResourceNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx]))) + i-- + dAtA[i] = 0x22 } } if len(m.Resources) > 0 { - for _, s := range m.Resources { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Resources[iNdEx]) + copy(dAtA[i:], m.Resources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) + i-- dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - if len(m.ResourceNames) > 0 { - for _, s := range m.ResourceNames { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.APIGroups) > 0 { + for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIGroups[iNdEx]) + copy(dAtA[i:], m.APIGroups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) + i-- + dAtA[i] = 0x12 } } - return i, nil + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } func (m *SelfSubjectAccessReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -436,41 +821,52 @@ func (m *SelfSubjectAccessReview) Marshal() (dAtA []byte, err error) { } func (m *SelfSubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n5, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n6, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n6 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *SelfSubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -478,37 +874,46 @@ func (m *SelfSubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { } func (m *SelfSubjectAccessReviewSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectAccessReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.ResourceAttributes != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceAttributes.Size())) - n7, err := m.ResourceAttributes.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - } if m.NonResourceAttributes != nil { + { + size, err := m.NonResourceAttributes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NonResourceAttributes.Size())) - n8, err := m.NonResourceAttributes.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if m.ResourceAttributes != nil { + { + size, err := m.ResourceAttributes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *SelfSubjectRulesReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -516,41 +921,52 @@ func (m *SelfSubjectRulesReview) Marshal() (dAtA []byte, err error) { } func (m *SelfSubjectRulesReview) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectRulesReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n9, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n10, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n10 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n11, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n11 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *SelfSubjectRulesReviewSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -558,21 +974,27 @@ func (m *SelfSubjectRulesReviewSpec) Marshal() (dAtA []byte, err error) { } func (m *SelfSubjectRulesReviewSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectRulesReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *SubjectAccessReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -580,133 +1002,147 @@ func (m *SubjectAccessReview) Marshal() (dAtA []byte, err error) { } func (m *SubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n12, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n12 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n13, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n13 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n14, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n14 - return i, nil -} - -func (m *SubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *SubjectAccessReviewSpec) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *SubjectAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.ResourceAttributes != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceAttributes.Size())) - n15, err := m.ResourceAttributes.MarshalTo(dAtA[i:]) + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n15 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.NonResourceAttributes != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NonResourceAttributes.Size())) - n16, err := m.NonResourceAttributes.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n16 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) - i += copy(dAtA[i:], m.User) - if len(m.Groups) > 0 { - for _, s := range m.Groups { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *SubjectAccessReviewSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubjectAccessReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x32 if len(m.Extra) > 0 { keysForExtra := make([]string, 0, len(m.Extra)) for k := range m.Extra { keysForExtra = append(keysForExtra, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) - for _, k := range keysForExtra { - dAtA[i] = 0x2a - i++ - v := m.Extra[string(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + for iNdEx := len(keysForExtra) - 1; iNdEx >= 0; iNdEx-- { + v := m.Extra[string(keysForExtra[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n17, err := (&v).MarshalTo(dAtA[i:]) + i -= len(keysForExtra[iNdEx]) + copy(dAtA[i:], keysForExtra[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForExtra[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + if len(m.Groups) > 0 { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Groups[iNdEx]) + copy(dAtA[i:], m.Groups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0x1a + if m.NonResourceAttributes != nil { + { + size, err := m.NonResourceAttributes.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n17 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - return i, nil + if m.ResourceAttributes != nil { + { + size, err := m.ResourceAttributes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } func (m *SubjectAccessReviewStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -714,41 +1150,48 @@ func (m *SubjectAccessReviewStatus) Marshal() (dAtA []byte, err error) { } func (m *SubjectAccessReviewStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubjectAccessReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - if m.Allowed { + i-- + if m.Denied { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) - i += copy(dAtA[i:], m.EvaluationError) + i-- dAtA[i] = 0x20 - i++ - if m.Denied { + i -= len(m.EvaluationError) + copy(dAtA[i:], m.EvaluationError) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) + i-- + dAtA[i] = 0x1a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x12 + i-- + if m.Allowed { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *SubjectRulesReviewStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -756,59 +1199,74 @@ func (m *SubjectRulesReviewStatus) Marshal() (dAtA []byte, err error) { } func (m *SubjectRulesReviewStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubjectRulesReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.ResourceRules) > 0 { - for _, msg := range m.ResourceRules { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + i -= len(m.EvaluationError) + copy(dAtA[i:], m.EvaluationError) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) + i-- + dAtA[i] = 0x22 + i-- + if m.Incomplete { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x18 if len(m.NonResourceRules) > 0 { - for _, msg := range m.NonResourceRules { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.NonResourceRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.NonResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - dAtA[i] = 0x18 - i++ - if m.Incomplete { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if len(m.ResourceRules) > 0 { + for iNdEx := len(m.ResourceRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } } - i++ - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) - i += copy(dAtA[i:], m.EvaluationError) - return i, nil + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m ExtraValue) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m) > 0 { @@ -821,6 +1279,9 @@ func (m ExtraValue) Size() (n int) { } func (m *LocalSubjectAccessReview) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -833,6 +1294,9 @@ func (m *LocalSubjectAccessReview) Size() (n int) { } func (m *NonResourceAttributes) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Path) @@ -843,6 +1307,9 @@ func (m *NonResourceAttributes) Size() (n int) { } func (m *NonResourceRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Verbs) > 0 { @@ -861,6 +1328,9 @@ func (m *NonResourceRule) Size() (n int) { } func (m *ResourceAttributes) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Namespace) @@ -881,6 +1351,9 @@ func (m *ResourceAttributes) Size() (n int) { } func (m *ResourceRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Verbs) > 0 { @@ -911,6 +1384,9 @@ func (m *ResourceRule) Size() (n int) { } func (m *SelfSubjectAccessReview) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -923,6 +1399,9 @@ func (m *SelfSubjectAccessReview) Size() (n int) { } func (m *SelfSubjectAccessReviewSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.ResourceAttributes != nil { @@ -937,6 +1416,9 @@ func (m *SelfSubjectAccessReviewSpec) Size() (n int) { } func (m *SelfSubjectRulesReview) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -949,6 +1431,9 @@ func (m *SelfSubjectRulesReview) Size() (n int) { } func (m *SelfSubjectRulesReviewSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Namespace) @@ -957,6 +1442,9 @@ func (m *SelfSubjectRulesReviewSpec) Size() (n int) { } func (m *SubjectAccessReview) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -969,6 +1457,9 @@ func (m *SubjectAccessReview) Size() (n int) { } func (m *SubjectAccessReviewSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.ResourceAttributes != nil { @@ -1002,6 +1493,9 @@ func (m *SubjectAccessReviewSpec) Size() (n int) { } func (m *SubjectAccessReviewStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 2 @@ -1014,6 +1508,9 @@ func (m *SubjectAccessReviewStatus) Size() (n int) { } func (m *SubjectRulesReviewStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.ResourceRules) > 0 { @@ -1035,14 +1532,7 @@ func (m *SubjectRulesReviewStatus) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -1052,7 +1542,7 @@ func (this *LocalSubjectAccessReview) String() string { return "nil" } s := strings.Join([]string{`&LocalSubjectAccessReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SubjectAccessReviewSpec", "SubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1115,7 +1605,7 @@ func (this *SelfSubjectAccessReview) String() string { return "nil" } s := strings.Join([]string{`&SelfSubjectAccessReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SelfSubjectAccessReviewSpec", "SelfSubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1127,8 +1617,8 @@ func (this *SelfSubjectAccessReviewSpec) String() string { return "nil" } s := strings.Join([]string{`&SelfSubjectAccessReviewSpec{`, - `ResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.ResourceAttributes), "ResourceAttributes", "ResourceAttributes", 1) + `,`, - `NonResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.NonResourceAttributes), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, + `ResourceAttributes:` + strings.Replace(this.ResourceAttributes.String(), "ResourceAttributes", "ResourceAttributes", 1) + `,`, + `NonResourceAttributes:` + strings.Replace(this.NonResourceAttributes.String(), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, `}`, }, "") return s @@ -1138,7 +1628,7 @@ func (this *SelfSubjectRulesReview) String() string { return "nil" } s := strings.Join([]string{`&SelfSubjectRulesReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SelfSubjectRulesReviewSpec", "SelfSubjectRulesReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectRulesReviewStatus", "SubjectRulesReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1160,7 +1650,7 @@ func (this *SubjectAccessReview) String() string { return "nil" } s := strings.Join([]string{`&SubjectAccessReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SubjectAccessReviewSpec", "SubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1182,8 +1672,8 @@ func (this *SubjectAccessReviewSpec) String() string { } mapStringForExtra += "}" s := strings.Join([]string{`&SubjectAccessReviewSpec{`, - `ResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.ResourceAttributes), "ResourceAttributes", "ResourceAttributes", 1) + `,`, - `NonResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.NonResourceAttributes), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, + `ResourceAttributes:` + strings.Replace(this.ResourceAttributes.String(), "ResourceAttributes", "ResourceAttributes", 1) + `,`, + `NonResourceAttributes:` + strings.Replace(this.NonResourceAttributes.String(), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, `User:` + fmt.Sprintf("%v", this.User) + `,`, `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, `Extra:` + mapStringForExtra + `,`, @@ -1209,9 +1699,19 @@ func (this *SubjectRulesReviewStatus) String() string { if this == nil { return "nil" } + repeatedStringForResourceRules := "[]ResourceRule{" + for _, f := range this.ResourceRules { + repeatedStringForResourceRules += strings.Replace(strings.Replace(f.String(), "ResourceRule", "ResourceRule", 1), `&`, ``, 1) + "," + } + repeatedStringForResourceRules += "}" + repeatedStringForNonResourceRules := "[]NonResourceRule{" + for _, f := range this.NonResourceRules { + repeatedStringForNonResourceRules += strings.Replace(strings.Replace(f.String(), "NonResourceRule", "NonResourceRule", 1), `&`, ``, 1) + "," + } + repeatedStringForNonResourceRules += "}" s := strings.Join([]string{`&SubjectRulesReviewStatus{`, - `ResourceRules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ResourceRules), "ResourceRule", "ResourceRule", 1), `&`, ``, 1) + `,`, - `NonResourceRules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.NonResourceRules), "NonResourceRule", "NonResourceRule", 1), `&`, ``, 1) + `,`, + `ResourceRules:` + repeatedStringForResourceRules + `,`, + `NonResourceRules:` + repeatedStringForNonResourceRules + `,`, `Incomplete:` + fmt.Sprintf("%v", this.Incomplete) + `,`, `EvaluationError:` + fmt.Sprintf("%v", this.EvaluationError) + `,`, `}`, @@ -1241,7 +1741,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1269,7 +1769,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1279,6 +1779,9 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1293,6 +1796,9 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1320,7 +1826,7 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1348,7 +1854,7 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1357,6 +1863,9 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1378,7 +1887,7 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1387,6 +1896,9 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1408,7 +1920,7 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1417,6 +1929,9 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1433,6 +1948,9 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1460,7 +1978,7 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1488,7 +2006,7 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1498,6 +2016,9 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1517,7 +2038,7 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1527,6 +2048,9 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1541,6 +2065,9 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1568,7 +2095,7 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1596,7 +2123,7 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1606,6 +2133,9 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1625,7 +2155,7 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1635,6 +2165,9 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1649,6 +2182,9 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1676,7 +2212,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1704,7 +2240,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1714,6 +2250,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1733,7 +2272,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1743,6 +2282,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1762,7 +2304,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1772,6 +2314,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1791,7 +2336,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1801,6 +2346,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1820,7 +2368,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1830,6 +2378,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1849,7 +2400,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1859,6 +2410,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1878,7 +2432,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1888,6 +2442,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1902,6 +2459,9 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1929,7 +2489,7 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1957,7 +2517,7 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1967,6 +2527,9 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1986,7 +2549,7 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1996,6 +2559,9 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2015,7 +2581,7 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2025,6 +2591,9 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2044,7 +2613,7 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2054,6 +2623,9 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2068,6 +2640,9 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2095,7 +2670,7 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2123,7 +2698,7 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2132,6 +2707,9 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2153,7 +2731,7 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2162,6 +2740,9 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2183,7 +2764,7 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2192,6 +2773,9 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2208,6 +2792,9 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2235,7 +2822,7 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2263,7 +2850,7 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2272,6 +2859,9 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2296,7 +2886,7 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2305,6 +2895,9 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2324,6 +2917,9 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2351,7 +2947,7 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2379,7 +2975,7 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2388,6 +2984,9 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2409,7 +3008,7 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2418,6 +3017,9 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2439,7 +3041,7 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2448,6 +3050,9 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2464,6 +3069,9 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2491,7 +3099,7 @@ func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2519,7 +3127,7 @@ func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2529,6 +3137,9 @@ func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2543,6 +3154,9 @@ func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2570,7 +3184,7 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2598,7 +3212,7 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2607,6 +3221,9 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2628,7 +3245,7 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2637,6 +3254,9 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2658,7 +3278,7 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2667,6 +3287,9 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2683,6 +3306,9 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2710,7 +3336,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2738,7 +3364,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2747,6 +3373,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2771,7 +3400,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2780,6 +3409,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2804,7 +3436,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2814,6 +3446,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2833,7 +3468,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2843,6 +3478,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2862,7 +3500,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2871,6 +3509,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2891,7 +3532,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2908,7 +3549,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2918,6 +3559,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -2934,7 +3578,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2943,7 +3587,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { @@ -2985,7 +3629,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2995,6 +3639,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3009,6 +3656,9 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3036,7 +3686,7 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3064,7 +3714,7 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3084,7 +3734,7 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3094,6 +3744,9 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3113,7 +3766,7 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3123,6 +3776,9 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3142,7 +3798,7 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3157,6 +3813,9 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3184,7 +3843,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3212,7 +3871,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3221,6 +3880,9 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3243,7 +3905,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3252,6 +3914,9 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3274,7 +3939,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3294,7 +3959,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3304,6 +3969,9 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3318,6 +3986,9 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3384,10 +4055,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -3416,6 +4090,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -3434,83 +4111,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 1137 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0xcb, 0x6f, 0x1b, 0x45, - 0x18, 0xf7, 0xfa, 0x91, 0xd8, 0xe3, 0x86, 0xa4, 0x13, 0xa5, 0xd9, 0x06, 0x61, 0x5b, 0x46, 0x42, - 0x41, 0xb4, 0xbb, 0x24, 0x2a, 0xa4, 0x04, 0x7a, 0x88, 0x95, 0x08, 0x45, 0x6a, 0x4b, 0x35, 0x51, - 0x72, 0xa0, 0x12, 0x30, 0xbb, 0x9e, 0xd8, 0x8b, 0xed, 0xdd, 0x65, 0x66, 0xd6, 0x21, 0x88, 0x43, - 0x8f, 0x1c, 0x39, 0x72, 0xe4, 0xc4, 0xff, 0xc0, 0x05, 0x09, 0x4e, 0x39, 0xf6, 0x18, 0x24, 0x64, - 0x91, 0xe5, 0x8f, 0xe0, 0x8a, 0x66, 0x76, 0xec, 0x5d, 0x27, 0x9b, 0x38, 0xce, 0x81, 0x5e, 0x7a, - 0xdb, 0xf9, 0x7e, 0xdf, 0xfb, 0xb5, 0x1f, 0xd8, 0xed, 0x3c, 0x64, 0x86, 0xe3, 0x99, 0x9d, 0xc0, - 0x22, 0xd4, 0x25, 0x9c, 0x30, 0xb3, 0x4f, 0xdc, 0xa6, 0x47, 0x4d, 0x05, 0x60, 0xdf, 0x31, 0x71, - 0xc0, 0xdb, 0x1e, 0x75, 0xbe, 0xc3, 0xdc, 0xf1, 0x5c, 0xb3, 0xbf, 0x66, 0x11, 0x8e, 0xd7, 0xcc, - 0x16, 0x71, 0x09, 0xc5, 0x9c, 0x34, 0x0d, 0x9f, 0x7a, 0xdc, 0x83, 0xb5, 0x48, 0xc2, 0xc0, 0xbe, - 0x63, 0x8c, 0x49, 0x18, 0x4a, 0x62, 0xe5, 0x7e, 0xcb, 0xe1, 0xed, 0xc0, 0x32, 0x6c, 0xaf, 0x67, - 0xb6, 0xbc, 0x96, 0x67, 0x4a, 0x41, 0x2b, 0x38, 0x94, 0x2f, 0xf9, 0x90, 0x5f, 0x91, 0xc2, 0x95, - 0x07, 0xb1, 0x0b, 0x3d, 0x6c, 0xb7, 0x1d, 0x97, 0xd0, 0x63, 0xd3, 0xef, 0xb4, 0x04, 0x81, 0x99, - 0x3d, 0xc2, 0xb1, 0xd9, 0xbf, 0xe0, 0xc6, 0x8a, 0x79, 0x99, 0x14, 0x0d, 0x5c, 0xee, 0xf4, 0xc8, - 0x05, 0x81, 0x0f, 0x27, 0x09, 0x30, 0xbb, 0x4d, 0x7a, 0xf8, 0xbc, 0x5c, 0x7d, 0x03, 0x80, 0x9d, - 0x6f, 0x39, 0xc5, 0x07, 0xb8, 0x1b, 0x10, 0x58, 0x05, 0x05, 0x87, 0x93, 0x1e, 0xd3, 0xb5, 0x5a, - 0x6e, 0xb5, 0xd4, 0x28, 0x85, 0x83, 0x6a, 0x61, 0x57, 0x10, 0x50, 0x44, 0xdf, 0x2c, 0xfe, 0xf4, - 0x73, 0x35, 0xf3, 0xe2, 0xaf, 0x5a, 0xa6, 0xfe, 0x5b, 0x16, 0xe8, 0x8f, 0x3d, 0x1b, 0x77, 0xf7, - 0x02, 0xeb, 0x6b, 0x62, 0xf3, 0x2d, 0xdb, 0x26, 0x8c, 0x21, 0xd2, 0x77, 0xc8, 0x11, 0xfc, 0x0a, - 0x14, 0x45, 0x64, 0x4d, 0xcc, 0xb1, 0xae, 0xd5, 0xb4, 0xd5, 0xf2, 0xfa, 0xfb, 0x46, 0x9c, 0xd8, - 0x91, 0x83, 0x86, 0xdf, 0x69, 0x09, 0x02, 0x33, 0x04, 0xb7, 0xd1, 0x5f, 0x33, 0x3e, 0x93, 0xba, - 0x9e, 0x10, 0x8e, 0x1b, 0xf0, 0x64, 0x50, 0xcd, 0x84, 0x83, 0x2a, 0x88, 0x69, 0x68, 0xa4, 0x15, - 0x3e, 0x07, 0x79, 0xe6, 0x13, 0x5b, 0xcf, 0x4a, 0xed, 0x1f, 0x19, 0x93, 0xca, 0x66, 0xa4, 0xb8, - 0xb9, 0xe7, 0x13, 0xbb, 0x71, 0x4b, 0x99, 0xc9, 0x8b, 0x17, 0x92, 0x4a, 0xa1, 0x0d, 0x66, 0x18, - 0xc7, 0x3c, 0x60, 0x7a, 0x4e, 0xaa, 0xff, 0xf8, 0x66, 0xea, 0xa5, 0x8a, 0xc6, 0x1b, 0xca, 0xc0, - 0x4c, 0xf4, 0x46, 0x4a, 0x75, 0xfd, 0x39, 0x58, 0x7a, 0xea, 0xb9, 0x88, 0x30, 0x2f, 0xa0, 0x36, - 0xd9, 0xe2, 0x9c, 0x3a, 0x56, 0xc0, 0x09, 0x83, 0x35, 0x90, 0xf7, 0x31, 0x6f, 0xcb, 0xc4, 0x95, - 0x62, 0xff, 0x9e, 0x61, 0xde, 0x46, 0x12, 0x11, 0x1c, 0x7d, 0x42, 0x2d, 0x19, 0x7c, 0x82, 0xe3, - 0x80, 0x50, 0x0b, 0x49, 0xa4, 0xfe, 0x0d, 0x98, 0x4f, 0x28, 0x47, 0x41, 0x57, 0xd6, 0x56, 0x40, - 0x63, 0xb5, 0x15, 0x12, 0x0c, 0x45, 0x74, 0xf8, 0x08, 0xcc, 0xbb, 0xb1, 0xcc, 0x3e, 0x7a, 0xcc, - 0xf4, 0xac, 0x64, 0x5d, 0x0c, 0x07, 0xd5, 0xa4, 0x3a, 0x01, 0xa1, 0xf3, 0xbc, 0xa2, 0x21, 0x60, - 0x4a, 0x34, 0x26, 0x28, 0xb9, 0xb8, 0x47, 0x98, 0x8f, 0x6d, 0xa2, 0x42, 0xba, 0xad, 0x1c, 0x2e, - 0x3d, 0x1d, 0x02, 0x28, 0xe6, 0x99, 0x1c, 0x1c, 0x7c, 0x1b, 0x14, 0x5a, 0xd4, 0x0b, 0x7c, 0x59, - 0x9d, 0x52, 0x63, 0x4e, 0xb1, 0x14, 0x3e, 0x15, 0x44, 0x14, 0x61, 0xf0, 0x5d, 0x30, 0xdb, 0x27, - 0x94, 0x39, 0x9e, 0xab, 0xe7, 0x25, 0xdb, 0xbc, 0x62, 0x9b, 0x3d, 0x88, 0xc8, 0x68, 0x88, 0xc3, - 0x7b, 0xa0, 0x48, 0x95, 0xe3, 0x7a, 0x41, 0xf2, 0x2e, 0x28, 0xde, 0xe2, 0x28, 0x83, 0x23, 0x0e, - 0xf8, 0x01, 0x28, 0xb3, 0xc0, 0x1a, 0x09, 0xcc, 0x48, 0x81, 0x45, 0x25, 0x50, 0xde, 0x8b, 0x21, - 0x94, 0xe4, 0x13, 0x61, 0x89, 0x18, 0xf5, 0xd9, 0xf1, 0xb0, 0x44, 0x0a, 0x90, 0x44, 0xea, 0x7f, - 0x68, 0xe0, 0xd6, 0x74, 0x15, 0x7b, 0x0f, 0x94, 0xb0, 0xef, 0xc8, 0xb0, 0x87, 0xb5, 0x9a, 0x13, - 0x79, 0xdd, 0x7a, 0xb6, 0x1b, 0x11, 0x51, 0x8c, 0x0b, 0xe6, 0xa1, 0x33, 0xa2, 0xaf, 0x47, 0xcc, - 0x43, 0x93, 0x0c, 0xc5, 0x38, 0xdc, 0x00, 0x73, 0xc3, 0x87, 0x2c, 0x92, 0x9e, 0x97, 0x02, 0xb7, - 0xc3, 0x41, 0x75, 0x0e, 0x25, 0x01, 0x34, 0xce, 0x57, 0xff, 0x3d, 0x0b, 0x96, 0xf7, 0x48, 0xf7, - 0xf0, 0xd5, 0x6c, 0x85, 0x2f, 0xc7, 0xb6, 0xc2, 0xa3, 0x6b, 0x8c, 0x6d, 0xba, 0xab, 0xaf, 0x76, - 0x33, 0xfc, 0x92, 0x05, 0x6f, 0x5e, 0xe1, 0x18, 0xfc, 0x1e, 0x40, 0x7a, 0x61, 0xd0, 0x54, 0x46, - 0x1f, 0x4c, 0x76, 0xe8, 0xe2, 0x90, 0x36, 0xee, 0x84, 0x83, 0x6a, 0xca, 0xf0, 0xa2, 0x14, 0x3b, - 0xf0, 0x07, 0x0d, 0x2c, 0xb9, 0x69, 0x8b, 0x4b, 0x65, 0x7d, 0x63, 0xb2, 0x07, 0xa9, 0x7b, 0xaf, - 0x71, 0x37, 0x1c, 0x54, 0xd3, 0x57, 0x22, 0x4a, 0x37, 0x28, 0x56, 0xce, 0x9d, 0x44, 0xa2, 0xc4, - 0xd0, 0xfc, 0x7f, 0xbd, 0xf6, 0xc5, 0x58, 0xaf, 0x7d, 0x32, 0x55, 0xaf, 0x25, 0x3c, 0xbd, 0xb4, - 0xd5, 0xac, 0x73, 0xad, 0xb6, 0x79, 0xed, 0x56, 0x4b, 0x6a, 0xbf, 0xba, 0xd3, 0x9e, 0x80, 0x95, - 0xcb, 0xbd, 0x9a, 0x7a, 0x75, 0xd7, 0x7f, 0xcd, 0x82, 0xc5, 0xd7, 0xe7, 0xc0, 0xcd, 0x86, 0xfe, - 0x34, 0x0f, 0x96, 0x5f, 0x0f, 0xfc, 0xd5, 0x03, 0x2f, 0x7e, 0xa2, 0x01, 0x23, 0x54, 0xfd, 0xf8, - 0x47, 0xb5, 0xda, 0x67, 0x84, 0x22, 0x89, 0xc0, 0xda, 0xf0, 0x36, 0x88, 0x7e, 0x58, 0x40, 0x64, - 0x5a, 0xfd, 0x0b, 0xd5, 0x61, 0xe0, 0x80, 0x02, 0x11, 0x17, 0xaf, 0x5e, 0xa8, 0xe5, 0x56, 0xcb, - 0xeb, 0xdb, 0x37, 0xee, 0x15, 0x43, 0x1e, 0xce, 0x3b, 0x2e, 0xa7, 0xc7, 0xf1, 0x0d, 0x22, 0x69, - 0x28, 0xb2, 0x00, 0xdf, 0x02, 0xb9, 0xc0, 0x69, 0xaa, 0x13, 0xa1, 0xac, 0x58, 0x72, 0xfb, 0xbb, - 0xdb, 0x48, 0xd0, 0x57, 0x0e, 0xd5, 0xed, 0x2d, 0x55, 0xc0, 0x05, 0x90, 0xeb, 0x90, 0xe3, 0x68, - 0xce, 0x90, 0xf8, 0x84, 0x0d, 0x50, 0xe8, 0x8b, 0xb3, 0x5c, 0xe5, 0xf9, 0xde, 0x64, 0x4f, 0xe3, - 0x53, 0x1e, 0x45, 0xa2, 0x9b, 0xd9, 0x87, 0x5a, 0xfd, 0x4f, 0x0d, 0xdc, 0xbd, 0xb4, 0x21, 0xc5, - 0xa1, 0x84, 0xbb, 0x5d, 0xef, 0x88, 0x34, 0xa5, 0xed, 0x62, 0x7c, 0x28, 0x6d, 0x45, 0x64, 0x34, - 0xc4, 0xe1, 0x3b, 0x60, 0x86, 0x12, 0xcc, 0x3c, 0x57, 0x1d, 0x67, 0xa3, 0x5e, 0x46, 0x92, 0x8a, - 0x14, 0x0a, 0xb7, 0xc0, 0x3c, 0x11, 0xe6, 0xa5, 0x73, 0x3b, 0x94, 0x7a, 0xc3, 0x8a, 0x2d, 0x2b, - 0x81, 0xf9, 0x9d, 0x71, 0x18, 0x9d, 0xe7, 0x17, 0xa6, 0x9a, 0xc4, 0x75, 0x48, 0x53, 0x5e, 0x6f, - 0xc5, 0xd8, 0xd4, 0xb6, 0xa4, 0x22, 0x85, 0xd6, 0xff, 0xcd, 0x02, 0xfd, 0xb2, 0xb5, 0x07, 0x3b, - 0xf1, 0x15, 0x23, 0x41, 0x79, 0x48, 0x95, 0xd7, 0x8d, 0xeb, 0x8f, 0x8c, 0x10, 0x6b, 0x2c, 0x29, - 0xdb, 0x73, 0x49, 0x6a, 0xe2, 0xf2, 0x91, 0x4f, 0x78, 0x04, 0x16, 0xdc, 0xf1, 0x93, 0x3b, 0xba, - 0xc9, 0xca, 0xeb, 0x6b, 0x53, 0x0d, 0x88, 0x34, 0xa9, 0x2b, 0x93, 0x0b, 0xe7, 0x00, 0x86, 0x2e, - 0x18, 0x81, 0xeb, 0x00, 0x38, 0xae, 0xed, 0xf5, 0xfc, 0x2e, 0xe1, 0x44, 0x26, 0xba, 0x18, 0x6f, - 0xcb, 0xdd, 0x11, 0x82, 0x12, 0x5c, 0x69, 0x15, 0xca, 0x4f, 0x57, 0xa1, 0xc6, 0xfd, 0x93, 0xb3, - 0x4a, 0xe6, 0xe5, 0x59, 0x25, 0x73, 0x7a, 0x56, 0xc9, 0xbc, 0x08, 0x2b, 0xda, 0x49, 0x58, 0xd1, - 0x5e, 0x86, 0x15, 0xed, 0x34, 0xac, 0x68, 0x7f, 0x87, 0x15, 0xed, 0xc7, 0x7f, 0x2a, 0x99, 0xcf, - 0x67, 0x55, 0x84, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0xc5, 0xba, 0xf8, 0x96, 0xa4, 0x0f, 0x00, - 0x00, -} diff --git a/vendor/k8s.io/api/autoscaling/v1/generated.pb.go b/vendor/k8s.io/api/autoscaling/v1/generated.pb.go index 82c393899ef89..174e6f5f8826d 100644 --- a/vendor/k8s.io/api/autoscaling/v1/generated.pb.go +++ b/vendor/k8s.io/api/autoscaling/v1/generated.pb.go @@ -17,53 +17,23 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v1/generated.proto -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v1/generated.proto - - It has these top-level messages: - CrossVersionObjectReference - ExternalMetricSource - ExternalMetricStatus - HorizontalPodAutoscaler - HorizontalPodAutoscalerCondition - HorizontalPodAutoscalerList - HorizontalPodAutoscalerSpec - HorizontalPodAutoscalerStatus - MetricSpec - MetricStatus - ObjectMetricSource - ObjectMetricStatus - PodsMetricSource - PodsMetricStatus - ResourceMetricSource - ResourceMetricStatus - Scale - ScaleSpec - ScaleStatus -*/ package v1 import ( fmt "fmt" - proto "github.com/gogo/protobuf/proto" - - math "math" - - k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" + io "io" - k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + proto "github.com/gogo/protobuf/proto" k8s_io_api_core_v1 "k8s.io/api/core/v1" + resource "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - strings "strings" - + math "math" + math_bits "math/bits" reflect "reflect" - - io "io" + strings "strings" ) // Reference imports to suppress errors if they are not otherwise used. @@ -80,114 +50,664 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} } func (*CrossVersionObjectReference) ProtoMessage() {} func (*CrossVersionObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{0} + return fileDescriptor_2bb1f2101a7f10e2, []int{0} +} +func (m *CrossVersionObjectReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CrossVersionObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CrossVersionObjectReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_CrossVersionObjectReference.Merge(m, src) +} +func (m *CrossVersionObjectReference) XXX_Size() int { + return m.Size() } +func (m *CrossVersionObjectReference) XXX_DiscardUnknown() { + xxx_messageInfo_CrossVersionObjectReference.DiscardUnknown(m) +} + +var xxx_messageInfo_CrossVersionObjectReference proto.InternalMessageInfo -func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } -func (*ExternalMetricSource) ProtoMessage() {} -func (*ExternalMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } +func (*ExternalMetricSource) ProtoMessage() {} +func (*ExternalMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{1} +} +func (m *ExternalMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExternalMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExternalMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExternalMetricSource.Merge(m, src) +} +func (m *ExternalMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ExternalMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ExternalMetricSource.DiscardUnknown(m) +} -func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } -func (*ExternalMetricStatus) ProtoMessage() {} -func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_ExternalMetricSource proto.InternalMessageInfo + +func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } +func (*ExternalMetricStatus) ProtoMessage() {} +func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{2} +} +func (m *ExternalMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExternalMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExternalMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExternalMetricStatus.Merge(m, src) +} +func (m *ExternalMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ExternalMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ExternalMetricStatus.DiscardUnknown(m) +} -func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } -func (*HorizontalPodAutoscaler) ProtoMessage() {} -func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_ExternalMetricStatus proto.InternalMessageInfo + +func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } +func (*HorizontalPodAutoscaler) ProtoMessage() {} +func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{3} +} +func (m *HorizontalPodAutoscaler) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscaler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscaler) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscaler.Merge(m, src) +} +func (m *HorizontalPodAutoscaler) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscaler) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscaler.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscaler proto.InternalMessageInfo func (m *HorizontalPodAutoscalerCondition) Reset() { *m = HorizontalPodAutoscalerCondition{} } func (*HorizontalPodAutoscalerCondition) ProtoMessage() {} func (*HorizontalPodAutoscalerCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{4} + return fileDescriptor_2bb1f2101a7f10e2, []int{4} +} +func (m *HorizontalPodAutoscalerCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerCondition.Merge(m, src) +} +func (m *HorizontalPodAutoscalerCondition) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerCondition) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerCondition.DiscardUnknown(m) } +var xxx_messageInfo_HorizontalPodAutoscalerCondition proto.InternalMessageInfo + func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } func (*HorizontalPodAutoscalerList) ProtoMessage() {} func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{5} + return fileDescriptor_2bb1f2101a7f10e2, []int{5} +} +func (m *HorizontalPodAutoscalerList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } +func (m *HorizontalPodAutoscalerList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerList) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerList.Merge(m, src) +} +func (m *HorizontalPodAutoscalerList) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerList) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerList.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscalerList proto.InternalMessageInfo func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{6} + return fileDescriptor_2bb1f2101a7f10e2, []int{6} +} +func (m *HorizontalPodAutoscalerSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerSpec.Merge(m, src) +} +func (m *HorizontalPodAutoscalerSpec) XXX_Size() int { + return m.Size() } +func (m *HorizontalPodAutoscalerSpec) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscalerSpec proto.InternalMessageInfo func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{7} + return fileDescriptor_2bb1f2101a7f10e2, []int{7} +} +func (m *HorizontalPodAutoscalerStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerStatus.Merge(m, src) +} +func (m *HorizontalPodAutoscalerStatus) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerStatus) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscalerStatus proto.InternalMessageInfo + +func (m *MetricSpec) Reset() { *m = MetricSpec{} } +func (*MetricSpec) ProtoMessage() {} +func (*MetricSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{8} +} +func (m *MetricSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricSpec.Merge(m, src) +} +func (m *MetricSpec) XXX_Size() int { + return m.Size() +} +func (m *MetricSpec) XXX_DiscardUnknown() { + xxx_messageInfo_MetricSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricSpec proto.InternalMessageInfo + +func (m *MetricStatus) Reset() { *m = MetricStatus{} } +func (*MetricStatus) ProtoMessage() {} +func (*MetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{9} +} +func (m *MetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricStatus.Merge(m, src) +} +func (m *MetricStatus) XXX_Size() int { + return m.Size() +} +func (m *MetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_MetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricStatus proto.InternalMessageInfo + +func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } +func (*ObjectMetricSource) ProtoMessage() {} +func (*ObjectMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{10} +} +func (m *ObjectMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ObjectMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ObjectMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectMetricSource.Merge(m, src) +} +func (m *ObjectMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ObjectMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ObjectMetricSource proto.InternalMessageInfo + +func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } +func (*ObjectMetricStatus) ProtoMessage() {} +func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{11} +} +func (m *ObjectMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ObjectMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ObjectMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectMetricStatus.Merge(m, src) +} +func (m *ObjectMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ObjectMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ObjectMetricStatus proto.InternalMessageInfo + +func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } +func (*PodsMetricSource) ProtoMessage() {} +func (*PodsMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{12} +} +func (m *PodsMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodsMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodsMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodsMetricSource.Merge(m, src) +} +func (m *PodsMetricSource) XXX_Size() int { + return m.Size() +} +func (m *PodsMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_PodsMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_PodsMetricSource proto.InternalMessageInfo + +func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } +func (*PodsMetricStatus) ProtoMessage() {} +func (*PodsMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{13} +} +func (m *PodsMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodsMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodsMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodsMetricStatus.Merge(m, src) +} +func (m *PodsMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *PodsMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PodsMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_PodsMetricStatus proto.InternalMessageInfo + +func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } +func (*ResourceMetricSource) ProtoMessage() {} +func (*ResourceMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{14} +} +func (m *ResourceMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceMetricSource.Merge(m, src) +} +func (m *ResourceMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ResourceMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceMetricSource proto.InternalMessageInfo + +func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } +func (*ResourceMetricStatus) ProtoMessage() {} +func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{15} +} +func (m *ResourceMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceMetricStatus.Merge(m, src) +} +func (m *ResourceMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ResourceMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceMetricStatus proto.InternalMessageInfo + +func (m *Scale) Reset() { *m = Scale{} } +func (*Scale) ProtoMessage() {} +func (*Scale) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{16} +} +func (m *Scale) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Scale) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Scale) XXX_Merge(src proto.Message) { + xxx_messageInfo_Scale.Merge(m, src) +} +func (m *Scale) XXX_Size() int { + return m.Size() +} +func (m *Scale) XXX_DiscardUnknown() { + xxx_messageInfo_Scale.DiscardUnknown(m) +} + +var xxx_messageInfo_Scale proto.InternalMessageInfo + +func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } +func (*ScaleSpec) ProtoMessage() {} +func (*ScaleSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{17} +} +func (m *ScaleSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScaleSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ScaleSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScaleSpec.Merge(m, src) +} +func (m *ScaleSpec) XXX_Size() int { + return m.Size() +} +func (m *ScaleSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ScaleSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ScaleSpec proto.InternalMessageInfo + +func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } +func (*ScaleStatus) ProtoMessage() {} +func (*ScaleStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{18} +} +func (m *ScaleStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScaleStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ScaleStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScaleStatus.Merge(m, src) +} +func (m *ScaleStatus) XXX_Size() int { + return m.Size() +} +func (m *ScaleStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ScaleStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ScaleStatus proto.InternalMessageInfo + +func init() { + proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.api.autoscaling.v1.CrossVersionObjectReference") + proto.RegisterType((*ExternalMetricSource)(nil), "k8s.io.api.autoscaling.v1.ExternalMetricSource") + proto.RegisterType((*ExternalMetricStatus)(nil), "k8s.io.api.autoscaling.v1.ExternalMetricStatus") + proto.RegisterType((*HorizontalPodAutoscaler)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscaler") + proto.RegisterType((*HorizontalPodAutoscalerCondition)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerCondition") + proto.RegisterType((*HorizontalPodAutoscalerList)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerList") + proto.RegisterType((*HorizontalPodAutoscalerSpec)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerSpec") + proto.RegisterType((*HorizontalPodAutoscalerStatus)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerStatus") + proto.RegisterType((*MetricSpec)(nil), "k8s.io.api.autoscaling.v1.MetricSpec") + proto.RegisterType((*MetricStatus)(nil), "k8s.io.api.autoscaling.v1.MetricStatus") + proto.RegisterType((*ObjectMetricSource)(nil), "k8s.io.api.autoscaling.v1.ObjectMetricSource") + proto.RegisterType((*ObjectMetricStatus)(nil), "k8s.io.api.autoscaling.v1.ObjectMetricStatus") + proto.RegisterType((*PodsMetricSource)(nil), "k8s.io.api.autoscaling.v1.PodsMetricSource") + proto.RegisterType((*PodsMetricStatus)(nil), "k8s.io.api.autoscaling.v1.PodsMetricStatus") + proto.RegisterType((*ResourceMetricSource)(nil), "k8s.io.api.autoscaling.v1.ResourceMetricSource") + proto.RegisterType((*ResourceMetricStatus)(nil), "k8s.io.api.autoscaling.v1.ResourceMetricStatus") + proto.RegisterType((*Scale)(nil), "k8s.io.api.autoscaling.v1.Scale") + proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.autoscaling.v1.ScaleSpec") + proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.autoscaling.v1.ScaleStatus") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v1/generated.proto", fileDescriptor_2bb1f2101a7f10e2) +} + +var fileDescriptor_2bb1f2101a7f10e2 = []byte{ + // 1516 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcf, 0x6f, 0x13, 0xc7, + 0x17, 0x8f, 0x7f, 0x24, 0x24, 0xe3, 0x90, 0xe4, 0x3b, 0x20, 0x08, 0xe1, 0x8b, 0x37, 0xda, 0x22, + 0x44, 0x7f, 0xb0, 0x6e, 0x52, 0x8a, 0xe8, 0x31, 0x76, 0x4b, 0x41, 0x8d, 0x21, 0x4c, 0x02, 0xa5, + 0x3f, 0xc5, 0x64, 0x3d, 0x38, 0x43, 0xbc, 0xbb, 0xd6, 0xec, 0xd8, 0x22, 0x48, 0x95, 0xda, 0x43, + 0xef, 0xbd, 0xb4, 0xea, 0xb1, 0x95, 0x7a, 0xed, 0x99, 0x73, 0x6f, 0x1c, 0x39, 0x20, 0x95, 0xd3, + 0xaa, 0x6c, 0x8f, 0xfd, 0x0f, 0x38, 0x55, 0xf3, 0xc3, 0xeb, 0x5d, 0xdb, 0xeb, 0x24, 0x26, 0x44, + 0x6d, 0x6f, 0x3b, 0x33, 0xef, 0x7d, 0xde, 0xec, 0x7b, 0x6f, 0xde, 0x2f, 0x50, 0xde, 0xbe, 0xec, + 0x5b, 0xd4, 0x2b, 0x6d, 0xb7, 0x36, 0x09, 0x73, 0x09, 0x27, 0x7e, 0xa9, 0x4d, 0xdc, 0x9a, 0xc7, + 0x4a, 0xfa, 0x00, 0x37, 0x69, 0x09, 0xb7, 0xb8, 0xe7, 0xdb, 0xb8, 0x41, 0xdd, 0x7a, 0xa9, 0xbd, + 0x54, 0xaa, 0x13, 0x97, 0x30, 0xcc, 0x49, 0xcd, 0x6a, 0x32, 0x8f, 0x7b, 0xf0, 0x94, 0x22, 0xb5, + 0x70, 0x93, 0x5a, 0x31, 0x52, 0xab, 0xbd, 0xb4, 0x70, 0xa1, 0x4e, 0xf9, 0x56, 0x6b, 0xd3, 0xb2, + 0x3d, 0xa7, 0x54, 0xf7, 0xea, 0x5e, 0x49, 0x72, 0x6c, 0xb6, 0xee, 0xc9, 0x95, 0x5c, 0xc8, 0x2f, + 0x85, 0xb4, 0x60, 0xc6, 0x84, 0xda, 0x1e, 0x23, 0x03, 0xa4, 0x2d, 0x5c, 0xec, 0xd2, 0x38, 0xd8, + 0xde, 0xa2, 0x2e, 0x61, 0x3b, 0xa5, 0xe6, 0x76, 0x5d, 0x32, 0x31, 0xe2, 0x7b, 0x2d, 0x66, 0x93, + 0x7d, 0x71, 0xf9, 0x25, 0x87, 0x70, 0x3c, 0x48, 0x56, 0x29, 0x8d, 0x8b, 0xb5, 0x5c, 0x4e, 0x9d, + 0x7e, 0x31, 0x97, 0x76, 0x63, 0xf0, 0xed, 0x2d, 0xe2, 0xe0, 0x5e, 0x3e, 0xf3, 0xfb, 0x0c, 0x38, + 0x5d, 0x61, 0x9e, 0xef, 0xdf, 0x26, 0xcc, 0xa7, 0x9e, 0x7b, 0x63, 0xf3, 0x3e, 0xb1, 0x39, 0x22, + 0xf7, 0x08, 0x23, 0xae, 0x4d, 0xe0, 0x22, 0xc8, 0x6f, 0x53, 0xb7, 0x36, 0x9f, 0x59, 0xcc, 0x9c, + 0x9f, 0x2a, 0x4f, 0x3f, 0x0e, 0x8c, 0xb1, 0x30, 0x30, 0xf2, 0x1f, 0x51, 0xb7, 0x86, 0xe4, 0x89, + 0xa0, 0x70, 0xb1, 0x43, 0xe6, 0xb3, 0x49, 0x8a, 0xeb, 0xd8, 0x21, 0x48, 0x9e, 0xc0, 0x65, 0x00, + 0x70, 0x93, 0x6a, 0x01, 0xf3, 0x39, 0x49, 0x07, 0x35, 0x1d, 0x58, 0x59, 0xbb, 0xa6, 0x4f, 0x50, + 0x8c, 0xca, 0xfc, 0x21, 0x07, 0x8e, 0x7f, 0xf0, 0x80, 0x13, 0xe6, 0xe2, 0x46, 0x95, 0x70, 0x46, + 0xed, 0x75, 0xa9, 0x5f, 0x01, 0xe6, 0xc8, 0xb5, 0x10, 0xa0, 0xaf, 0x15, 0x81, 0x55, 0xa3, 0x13, + 0x14, 0xa3, 0x82, 0x1e, 0x98, 0x51, 0xab, 0x75, 0xd2, 0x20, 0x36, 0xf7, 0x98, 0xbc, 0x6c, 0x61, + 0xf9, 0x1d, 0xab, 0xeb, 0x40, 0x91, 0xd6, 0xac, 0xe6, 0x76, 0x5d, 0x6c, 0xf8, 0x96, 0x30, 0x8e, + 0xd5, 0x5e, 0xb2, 0x56, 0xf1, 0x26, 0x69, 0x74, 0x58, 0xcb, 0x30, 0x0c, 0x8c, 0x99, 0x6a, 0x02, + 0x0e, 0xf5, 0xc0, 0x43, 0x0c, 0x0a, 0x1c, 0xb3, 0x3a, 0xe1, 0xb7, 0x71, 0xa3, 0x45, 0xe4, 0x2f, + 0x17, 0x96, 0xad, 0x61, 0xd2, 0xac, 0x8e, 0x03, 0x59, 0x37, 0x5b, 0xd8, 0xe5, 0x94, 0xef, 0x94, + 0x67, 0xc3, 0xc0, 0x28, 0x6c, 0x74, 0x61, 0x50, 0x1c, 0x13, 0xb6, 0x01, 0x54, 0xcb, 0x95, 0x36, + 0x61, 0xb8, 0x4e, 0x94, 0xa4, 0xfc, 0x48, 0x92, 0x4e, 0x84, 0x81, 0x01, 0x37, 0xfa, 0xd0, 0xd0, + 0x00, 0x09, 0xe6, 0x4f, 0xfd, 0x86, 0xe1, 0x98, 0xb7, 0xfc, 0x7f, 0x87, 0x61, 0xb6, 0xc0, 0xb4, + 0xdd, 0x62, 0x8c, 0xb8, 0x2f, 0x65, 0x99, 0xe3, 0xfa, 0xb7, 0xa6, 0x2b, 0x31, 0x2c, 0x94, 0x40, + 0x86, 0x3b, 0xe0, 0x98, 0x5e, 0x1f, 0x80, 0x81, 0x4e, 0x86, 0x81, 0x71, 0xac, 0xd2, 0x0f, 0x87, + 0x06, 0xc9, 0x30, 0x1f, 0x65, 0xc1, 0xc9, 0xab, 0x1e, 0xa3, 0x0f, 0x3d, 0x97, 0xe3, 0xc6, 0x9a, + 0x57, 0x5b, 0xd1, 0xb1, 0x91, 0x30, 0x78, 0x17, 0x4c, 0x0a, 0xed, 0xd5, 0x30, 0xc7, 0xd2, 0x46, + 0x85, 0xe5, 0xb7, 0xf7, 0xa6, 0x6b, 0x15, 0x18, 0xaa, 0x84, 0xe3, 0xae, 0x55, 0xbb, 0x7b, 0x28, + 0x42, 0x85, 0x77, 0x40, 0xde, 0x6f, 0x12, 0x5b, 0x5b, 0xf2, 0x92, 0x95, 0x1a, 0xa3, 0xad, 0x94, + 0x3b, 0xae, 0x37, 0x89, 0xdd, 0x8d, 0x23, 0x62, 0x85, 0x24, 0x22, 0xbc, 0x0b, 0x26, 0x7c, 0xe9, + 0x6b, 0xda, 0x6c, 0x97, 0x47, 0xc0, 0x96, 0xfc, 0xe5, 0x19, 0x8d, 0x3e, 0xa1, 0xd6, 0x48, 0xe3, + 0x9a, 0xdf, 0xe6, 0xc0, 0x62, 0x0a, 0x67, 0xc5, 0x73, 0x6b, 0x94, 0x53, 0xcf, 0x85, 0x57, 0x41, + 0x9e, 0xef, 0x34, 0x3b, 0x2e, 0x7e, 0xb1, 0x73, 0xd1, 0x8d, 0x9d, 0x26, 0x79, 0x11, 0x18, 0x67, + 0x77, 0xe3, 0x17, 0x74, 0x48, 0x22, 0xc0, 0xd5, 0xe8, 0x87, 0xb2, 0x09, 0x2c, 0x7d, 0xad, 0x17, + 0x81, 0x31, 0x20, 0x2f, 0x59, 0x11, 0x52, 0xf2, 0xf2, 0x22, 0x22, 0x34, 0xb0, 0xcf, 0x37, 0x18, + 0x76, 0x7d, 0x25, 0x89, 0x3a, 0x1d, 0x0f, 0x7f, 0x63, 0x6f, 0x46, 0x16, 0x1c, 0xe5, 0x05, 0x7d, + 0x0b, 0xb8, 0xda, 0x87, 0x86, 0x06, 0x48, 0x80, 0xe7, 0xc0, 0x04, 0x23, 0xd8, 0xf7, 0x5c, 0xe9, + 0xdc, 0x53, 0x5d, 0xe5, 0x22, 0xb9, 0x8b, 0xf4, 0x29, 0x7c, 0x1d, 0x1c, 0x71, 0x88, 0xef, 0xe3, + 0x3a, 0x99, 0x1f, 0x97, 0x84, 0xb3, 0x9a, 0xf0, 0x48, 0x55, 0x6d, 0xa3, 0xce, 0xb9, 0xf9, 0x34, + 0x03, 0x4e, 0xa7, 0xe8, 0x71, 0x95, 0xfa, 0x1c, 0x7e, 0xde, 0xe7, 0xc5, 0xd6, 0x1e, 0x23, 0x06, + 0xf5, 0x95, 0x0f, 0xcf, 0x69, 0xd9, 0x93, 0x9d, 0x9d, 0x98, 0x07, 0x7f, 0x0c, 0xc6, 0x29, 0x27, + 0x8e, 0xb0, 0x4a, 0xee, 0x7c, 0x61, 0x79, 0x79, 0xff, 0x6e, 0x56, 0x3e, 0xaa, 0xe1, 0xc7, 0xaf, + 0x09, 0x20, 0xa4, 0xf0, 0xcc, 0xbf, 0xb2, 0xa9, 0xbf, 0x25, 0xdc, 0x1c, 0xb6, 0xc1, 0x8c, 0x5c, + 0xa9, 0x50, 0x8c, 0xc8, 0x3d, 0xfd, 0x73, 0xc3, 0x1e, 0xd1, 0x90, 0xe4, 0x5d, 0x3e, 0xa1, 0x6f, + 0x31, 0xb3, 0x9e, 0x40, 0x45, 0x3d, 0x52, 0xe0, 0x12, 0x28, 0x38, 0xd4, 0x45, 0xa4, 0xd9, 0xa0, + 0x36, 0x56, 0xce, 0x38, 0xae, 0xd2, 0x4f, 0xb5, 0xbb, 0x8d, 0xe2, 0x34, 0xf0, 0x5d, 0x50, 0x70, + 0xf0, 0x83, 0x88, 0x25, 0x27, 0x59, 0x8e, 0x69, 0x79, 0x85, 0x6a, 0xf7, 0x08, 0xc5, 0xe9, 0xe0, + 0x7d, 0x50, 0x54, 0x39, 0xa5, 0xb2, 0x76, 0xeb, 0x16, 0xa7, 0x0d, 0xfa, 0x10, 0x0b, 0x3f, 0x5a, + 0x23, 0xcc, 0x26, 0x2e, 0x17, 0xae, 0x91, 0x97, 0x48, 0x66, 0x18, 0x18, 0xc5, 0x8d, 0xa1, 0x94, + 0x68, 0x17, 0x24, 0xf3, 0xb7, 0x1c, 0x38, 0x33, 0x34, 0x0c, 0xc0, 0x2b, 0x00, 0x7a, 0x9b, 0x3e, + 0x61, 0x6d, 0x52, 0xfb, 0x50, 0xd5, 0x45, 0xa2, 0x40, 0x11, 0x3a, 0xcf, 0xa9, 0x9c, 0x78, 0xa3, + 0xef, 0x14, 0x0d, 0xe0, 0x80, 0x36, 0x38, 0x2a, 0xde, 0x85, 0xd2, 0x32, 0xd5, 0xb5, 0xd0, 0xfe, + 0x1e, 0xdd, 0xff, 0xc2, 0xc0, 0x38, 0xba, 0x1a, 0x07, 0x41, 0x49, 0x4c, 0xb8, 0x02, 0x66, 0x75, + 0xb0, 0xef, 0xd1, 0xfa, 0x49, 0xad, 0xf5, 0xd9, 0x4a, 0xf2, 0x18, 0xf5, 0xd2, 0x0b, 0x88, 0x1a, + 0xf1, 0x29, 0x23, 0xb5, 0x08, 0x22, 0x9f, 0x84, 0x78, 0x3f, 0x79, 0x8c, 0x7a, 0xe9, 0xa1, 0x03, + 0x0c, 0x8d, 0x9a, 0x6a, 0xc1, 0x71, 0x09, 0xf9, 0x5a, 0x18, 0x18, 0x46, 0x65, 0x38, 0x29, 0xda, + 0x0d, 0x4b, 0x94, 0x81, 0xba, 0x76, 0x90, 0x0f, 0xe4, 0x62, 0x22, 0xf4, 0x2e, 0xf6, 0x84, 0xde, + 0xb9, 0x78, 0xa1, 0x18, 0x0b, 0xb3, 0x37, 0xc1, 0x84, 0x27, 0x5f, 0x86, 0xb6, 0xcb, 0x85, 0x21, + 0xcf, 0x29, 0x4a, 0x69, 0x11, 0x50, 0x19, 0x88, 0x58, 0xa6, 0x9f, 0x96, 0x06, 0x82, 0xd7, 0x40, + 0xbe, 0xe9, 0xd5, 0x3a, 0x89, 0xe8, 0xcd, 0x21, 0x80, 0x6b, 0x5e, 0xcd, 0x4f, 0xc0, 0x4d, 0x8a, + 0x1b, 0x8b, 0x5d, 0x24, 0x21, 0xe0, 0x27, 0x60, 0xb2, 0x93, 0xf0, 0x75, 0x75, 0x50, 0x1a, 0x02, + 0x87, 0x34, 0x69, 0x02, 0x72, 0x5a, 0x04, 0xb2, 0xce, 0x09, 0x8a, 0xe0, 0x04, 0x34, 0xd1, 0xa5, + 0x9a, 0xb4, 0xca, 0x70, 0xe8, 0x41, 0xe5, 0xb6, 0x82, 0xee, 0x9c, 0xa0, 0x08, 0xce, 0xfc, 0x31, + 0x07, 0xa6, 0x13, 0xe5, 0xdf, 0x21, 0x9b, 0x46, 0xe5, 0xf1, 0x03, 0x33, 0x8d, 0x82, 0x3b, 0x50, + 0xd3, 0x28, 0xc8, 0x57, 0x62, 0x9a, 0x18, 0xf4, 0x00, 0xd3, 0x3c, 0xcd, 0x01, 0xd8, 0xef, 0xc6, + 0xf0, 0x4b, 0x30, 0xa1, 0x02, 0xe6, 0x4b, 0x26, 0x95, 0x28, 0xbd, 0xeb, 0xfc, 0xa1, 0x51, 0x7b, + 0xea, 0xff, 0xec, 0x9e, 0xea, 0x7f, 0x72, 0x10, 0x7d, 0x52, 0x94, 0x75, 0x52, 0x7b, 0xa5, 0x2f, + 0xc0, 0xa4, 0xdf, 0x69, 0x30, 0xf2, 0xa3, 0x37, 0x18, 0x52, 0xe1, 0x51, 0x6b, 0x11, 0x41, 0xc2, + 0x1a, 0x98, 0xc6, 0xf1, 0x1a, 0x7f, 0x7c, 0xa4, 0xdf, 0x98, 0x13, 0x0d, 0x45, 0xa2, 0xb8, 0x4f, + 0xa0, 0x9a, 0xbf, 0xf7, 0x9a, 0x55, 0xbd, 0xbb, 0x7f, 0xa2, 0x59, 0x0f, 0xaf, 0xcb, 0xfa, 0x4f, + 0x58, 0xf6, 0xe7, 0x2c, 0x98, 0xeb, 0x4d, 0x13, 0x23, 0xb5, 0xd3, 0x0f, 0x07, 0xce, 0x04, 0xb2, + 0x23, 0x5d, 0x3a, 0xea, 0x02, 0xf6, 0x36, 0x17, 0x48, 0x58, 0x22, 0x77, 0xe0, 0x96, 0x30, 0x7f, + 0x49, 0xea, 0x68, 0xf4, 0x91, 0xc3, 0x57, 0x83, 0xfb, 0xf2, 0xd1, 0x94, 0x74, 0x5a, 0x0b, 0xdb, + 0x73, 0x6f, 0xfe, 0xaa, 0xd5, 0xf4, 0x6b, 0x16, 0x1c, 0x1f, 0x54, 0x22, 0xc0, 0x8a, 0x9e, 0xd2, + 0x29, 0x25, 0x95, 0xe2, 0x53, 0xba, 0x17, 0x81, 0x61, 0x0c, 0x68, 0x33, 0x3b, 0x30, 0xb1, 0x41, + 0xde, 0x1d, 0x30, 0x9f, 0xb0, 0x7c, 0xac, 0x66, 0xd3, 0x4d, 0xc3, 0xff, 0xc3, 0xc0, 0x98, 0xdf, + 0x48, 0xa1, 0x41, 0xa9, 0xdc, 0x29, 0xd3, 0xac, 0xdc, 0x2b, 0x9f, 0x66, 0x3d, 0xea, 0xd7, 0x97, + 0x72, 0xad, 0x03, 0xd1, 0xd7, 0x67, 0xe0, 0x54, 0xd2, 0x07, 0xfa, 0x15, 0x76, 0x26, 0x0c, 0x8c, + 0x53, 0x95, 0x34, 0x22, 0x94, 0xce, 0x9f, 0xe6, 0xc8, 0xb9, 0xc3, 0x71, 0x64, 0xf3, 0x9b, 0x2c, + 0x18, 0x97, 0xcd, 0xc9, 0x21, 0x8c, 0x94, 0xae, 0x24, 0x46, 0x4a, 0x67, 0x87, 0x64, 0x38, 0x79, + 0xa3, 0xd4, 0x01, 0xd2, 0xf5, 0x9e, 0x01, 0xd2, 0xb9, 0x5d, 0x91, 0x86, 0x8f, 0x8b, 0xde, 0x03, + 0x53, 0x91, 0x40, 0xf8, 0x96, 0x28, 0x16, 0x75, 0x57, 0x95, 0x91, 0xb6, 0x8d, 0x66, 0x0c, 0x51, + 0x3b, 0x15, 0x51, 0x98, 0x14, 0x14, 0x62, 0x12, 0xf6, 0xc7, 0x2c, 0xa8, 0xfd, 0xf8, 0xc0, 0x74, + 0xaa, 0x4b, 0xdd, 0x1f, 0x13, 0xca, 0xe7, 0x1f, 0x3f, 0x2f, 0x8e, 0x3d, 0x79, 0x5e, 0x1c, 0x7b, + 0xf6, 0xbc, 0x38, 0xf6, 0x75, 0x58, 0xcc, 0x3c, 0x0e, 0x8b, 0x99, 0x27, 0x61, 0x31, 0xf3, 0x2c, + 0x2c, 0x66, 0xfe, 0x08, 0x8b, 0x99, 0xef, 0xfe, 0x2c, 0x8e, 0x7d, 0x9a, 0x6d, 0x2f, 0xfd, 0x1d, + 0x00, 0x00, 0xff, 0xff, 0x3c, 0x26, 0x41, 0xcb, 0x94, 0x19, 0x00, 0x00, } -func (m *MetricSpec) Reset() { *m = MetricSpec{} } -func (*MetricSpec) ProtoMessage() {} -func (*MetricSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } - -func (m *MetricStatus) Reset() { *m = MetricStatus{} } -func (*MetricStatus) ProtoMessage() {} -func (*MetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } - -func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } -func (*ObjectMetricSource) ProtoMessage() {} -func (*ObjectMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } - -func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } -func (*ObjectMetricStatus) ProtoMessage() {} -func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } - -func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } -func (*PodsMetricSource) ProtoMessage() {} -func (*PodsMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } - -func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } -func (*PodsMetricStatus) ProtoMessage() {} -func (*PodsMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } - -func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } -func (*ResourceMetricSource) ProtoMessage() {} -func (*ResourceMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } - -func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } -func (*ResourceMetricStatus) ProtoMessage() {} -func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } - -func (m *Scale) Reset() { *m = Scale{} } -func (*Scale) ProtoMessage() {} -func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } - -func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } -func (*ScaleSpec) ProtoMessage() {} -func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } - -func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } -func (*ScaleStatus) ProtoMessage() {} -func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } - -func init() { - proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.api.autoscaling.v1.CrossVersionObjectReference") - proto.RegisterType((*ExternalMetricSource)(nil), "k8s.io.api.autoscaling.v1.ExternalMetricSource") - proto.RegisterType((*ExternalMetricStatus)(nil), "k8s.io.api.autoscaling.v1.ExternalMetricStatus") - proto.RegisterType((*HorizontalPodAutoscaler)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscaler") - proto.RegisterType((*HorizontalPodAutoscalerCondition)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerCondition") - proto.RegisterType((*HorizontalPodAutoscalerList)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerList") - proto.RegisterType((*HorizontalPodAutoscalerSpec)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerSpec") - proto.RegisterType((*HorizontalPodAutoscalerStatus)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerStatus") - proto.RegisterType((*MetricSpec)(nil), "k8s.io.api.autoscaling.v1.MetricSpec") - proto.RegisterType((*MetricStatus)(nil), "k8s.io.api.autoscaling.v1.MetricStatus") - proto.RegisterType((*ObjectMetricSource)(nil), "k8s.io.api.autoscaling.v1.ObjectMetricSource") - proto.RegisterType((*ObjectMetricStatus)(nil), "k8s.io.api.autoscaling.v1.ObjectMetricStatus") - proto.RegisterType((*PodsMetricSource)(nil), "k8s.io.api.autoscaling.v1.PodsMetricSource") - proto.RegisterType((*PodsMetricStatus)(nil), "k8s.io.api.autoscaling.v1.PodsMetricStatus") - proto.RegisterType((*ResourceMetricSource)(nil), "k8s.io.api.autoscaling.v1.ResourceMetricSource") - proto.RegisterType((*ResourceMetricStatus)(nil), "k8s.io.api.autoscaling.v1.ResourceMetricStatus") - proto.RegisterType((*Scale)(nil), "k8s.io.api.autoscaling.v1.Scale") - proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.autoscaling.v1.ScaleSpec") - proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.autoscaling.v1.ScaleStatus") -} func (m *CrossVersionObjectReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -195,29 +715,37 @@ func (m *CrossVersionObjectReference) Marshal() (dAtA []byte, err error) { } func (m *CrossVersionObjectReference) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CrossVersionObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x1a - i++ + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - return i, nil + i-- + dAtA[i] = 0x1a + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ExternalMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -225,51 +753,63 @@ func (m *ExternalMetricSource) Marshal() (dAtA []byte, err error) { } func (m *ExternalMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExternalMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - if m.MetricSelector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MetricSelector.Size())) - n1, err := m.MetricSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.TargetAverageValue != nil { + { + size, err := m.TargetAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 + i-- + dAtA[i] = 0x22 } if m.TargetValue != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetValue.Size())) - n2, err := m.TargetValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.TargetValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 + i-- + dAtA[i] = 0x1a } - if m.TargetAverageValue != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetAverageValue.Size())) - n3, err := m.TargetAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.MetricSelector != nil { + { + size, err := m.MetricSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ExternalMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -277,49 +817,61 @@ func (m *ExternalMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *ExternalMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExternalMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - if m.MetricSelector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MetricSelector.Size())) - n4, err := m.MetricSelector.MarshalTo(dAtA[i:]) + if m.CurrentAverageValue != nil { + { + size, err := m.CurrentAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + { + size, err := m.CurrentValue.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n4 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentValue.Size())) - n5, err := m.CurrentValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - if m.CurrentAverageValue != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentAverageValue.Size())) - n6, err := m.CurrentAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.MetricSelector != nil { + { + size, err := m.MetricSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n6 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscaler) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -327,41 +879,52 @@ func (m *HorizontalPodAutoscaler) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscaler) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscaler) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n8, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n9, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n9 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscalerCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -369,41 +932,52 @@ func (m *HorizontalPodAutoscalerCondition) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n10, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscalerList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -411,37 +985,46 @@ func (m *HorizontalPodAutoscalerList) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n11, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscalerSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -449,38 +1032,45 @@ func (m *HorizontalPodAutoscalerSpec) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleTargetRef.Size())) - n12, err := m.ScaleTargetRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.TargetCPUUtilizationPercentage != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TargetCPUUtilizationPercentage)) + i-- + dAtA[i] = 0x20 } - i += n12 + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxReplicas)) + i-- + dAtA[i] = 0x18 if m.MinReplicas != nil { - dAtA[i] = 0x10 - i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.MinReplicas)) + i-- + dAtA[i] = 0x10 } - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxReplicas)) - if m.TargetCPUUtilizationPercentage != nil { - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.TargetCPUUtilizationPercentage)) + { + size, err := m.ScaleTargetRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscalerStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -488,43 +1078,50 @@ func (m *HorizontalPodAutoscalerStatus) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.ObservedGeneration != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) + if m.CurrentCPUUtilizationPercentage != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CurrentCPUUtilizationPercentage)) + i-- + dAtA[i] = 0x28 } + i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) + i-- + dAtA[i] = 0x18 if m.LastScaleTime != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastScaleTime.Size())) - n13, err := m.LastScaleTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.LastScaleTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n13 + i-- + dAtA[i] = 0x12 } - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredReplicas)) - if m.CurrentCPUUtilizationPercentage != nil { - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CurrentCPUUtilizationPercentage)) + if m.ObservedGeneration != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *MetricSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -532,61 +1129,75 @@ func (m *MetricSpec) Marshal() (dAtA []byte, err error) { } func (m *MetricSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if m.Object != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) - n14, err := m.Object.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n14 - } - if m.Pods != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Pods.Size())) - n15, err := m.Pods.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.External != nil { + { + size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n15 + i-- + dAtA[i] = 0x2a } if m.Resource != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Resource.Size())) - n16, err := m.Resource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n16 + i-- + dAtA[i] = 0x22 } - if m.External != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.External.Size())) - n17, err := m.External.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Pods != nil { + { + size, err := m.Pods.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Object != nil { + { + size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n17 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *MetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -594,61 +1205,75 @@ func (m *MetricStatus) Marshal() (dAtA []byte, err error) { } func (m *MetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if m.Object != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) - n18, err := m.Object.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n18 - } - if m.Pods != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Pods.Size())) - n19, err := m.Pods.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.External != nil { + { + size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n19 + i-- + dAtA[i] = 0x2a } if m.Resource != nil { + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Resource.Size())) - n20, err := m.Resource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if m.Pods != nil { + { + size, err := m.Pods.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n20 + i-- + dAtA[i] = 0x1a } - if m.External != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.External.Size())) - n21, err := m.External.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Object != nil { + { + size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n21 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ObjectMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -656,57 +1281,71 @@ func (m *ObjectMetricSource) Marshal() (dAtA []byte, err error) { } func (m *ObjectMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ObjectMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) - n22, err := m.Target.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n22 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetValue.Size())) - n23, err := m.TargetValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.AverageValue != nil { + { + size, err := m.AverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a } - i += n23 if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n24, err := m.Selector.MarshalTo(dAtA[i:]) + } + { + size, err := m.TargetValue.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n24 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.AverageValue != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AverageValue.Size())) - n25, err := m.AverageValue.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x1a + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n25 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ObjectMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -714,57 +1353,71 @@ func (m *ObjectMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *ObjectMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ObjectMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) - n26, err := m.Target.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n26 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentValue.Size())) - n27, err := m.CurrentValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.AverageValue != nil { + { + size, err := m.AverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a } - i += n27 if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n28, err := m.Selector.MarshalTo(dAtA[i:]) + } + { + size, err := m.CurrentValue.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n28 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.AverageValue != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AverageValue.Size())) - n29, err := m.AverageValue.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x1a + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n29 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodsMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -772,39 +1425,49 @@ func (m *PodsMetricSource) Marshal() (dAtA []byte, err error) { } func (m *PodsMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodsMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetAverageValue.Size())) - n30, err := m.TargetAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n30 if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n31, err := m.Selector.MarshalTo(dAtA[i:]) + } + { + size, err := m.TargetAverageValue.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n31 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodsMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -812,39 +1475,49 @@ func (m *PodsMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *PodsMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodsMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentAverageValue.Size())) - n32, err := m.CurrentAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n32 if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n33, err := m.Selector.MarshalTo(dAtA[i:]) + } + { + size, err := m.CurrentAverageValue.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n33 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ResourceMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -852,36 +1525,44 @@ func (m *ResourceMetricSource) Marshal() (dAtA []byte, err error) { } func (m *ResourceMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - if m.TargetAverageUtilization != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.TargetAverageUtilization)) - } if m.TargetAverageValue != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetAverageValue.Size())) - n34, err := m.TargetAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.TargetAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n34 + i-- + dAtA[i] = 0x1a + } + if m.TargetAverageUtilization != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TargetAverageUtilization)) + i-- + dAtA[i] = 0x10 } - return i, nil + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ResourceMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -889,34 +1570,42 @@ func (m *ResourceMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *ResourceMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - if m.CurrentAverageUtilization != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CurrentAverageUtilization)) + { + size, err := m.CurrentAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentAverageValue.Size())) - n35, err := m.CurrentAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.CurrentAverageUtilization != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CurrentAverageUtilization)) + i-- + dAtA[i] = 0x10 } - i += n35 - return i, nil + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Scale) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -924,41 +1613,52 @@ func (m *Scale) Marshal() (dAtA []byte, err error) { } func (m *Scale) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Scale) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n36, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n36 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n37, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n37 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n38, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n38 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -966,20 +1666,25 @@ func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { } func (m *ScaleSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScaleSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -987,30 +1692,41 @@ func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { } func (m *ScaleStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScaleStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x12 - i++ + i -= len(m.Selector) + copy(dAtA[i:], m.Selector) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Selector))) - i += copy(dAtA[i:], m.Selector) - return i, nil + i-- + dAtA[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *CrossVersionObjectReference) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Kind) @@ -1023,6 +1739,9 @@ func (m *CrossVersionObjectReference) Size() (n int) { } func (m *ExternalMetricSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.MetricName) @@ -1043,6 +1762,9 @@ func (m *ExternalMetricSource) Size() (n int) { } func (m *ExternalMetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.MetricName) @@ -1061,6 +1783,9 @@ func (m *ExternalMetricStatus) Size() (n int) { } func (m *HorizontalPodAutoscaler) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1073,6 +1798,9 @@ func (m *HorizontalPodAutoscaler) Size() (n int) { } func (m *HorizontalPodAutoscalerCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1089,6 +1817,9 @@ func (m *HorizontalPodAutoscalerCondition) Size() (n int) { } func (m *HorizontalPodAutoscalerList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1103,6 +1834,9 @@ func (m *HorizontalPodAutoscalerList) Size() (n int) { } func (m *HorizontalPodAutoscalerSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ScaleTargetRef.Size() @@ -1118,6 +1852,9 @@ func (m *HorizontalPodAutoscalerSpec) Size() (n int) { } func (m *HorizontalPodAutoscalerStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.ObservedGeneration != nil { @@ -1136,6 +1873,9 @@ func (m *HorizontalPodAutoscalerStatus) Size() (n int) { } func (m *MetricSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1160,6 +1900,9 @@ func (m *MetricSpec) Size() (n int) { } func (m *MetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1184,6 +1927,9 @@ func (m *MetricStatus) Size() (n int) { } func (m *ObjectMetricSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.Target.Size() @@ -1204,6 +1950,9 @@ func (m *ObjectMetricSource) Size() (n int) { } func (m *ObjectMetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.Target.Size() @@ -1224,6 +1973,9 @@ func (m *ObjectMetricStatus) Size() (n int) { } func (m *PodsMetricSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.MetricName) @@ -1238,6 +1990,9 @@ func (m *PodsMetricSource) Size() (n int) { } func (m *PodsMetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.MetricName) @@ -1252,6 +2007,9 @@ func (m *PodsMetricStatus) Size() (n int) { } func (m *ResourceMetricSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -1267,6 +2025,9 @@ func (m *ResourceMetricSource) Size() (n int) { } func (m *ResourceMetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -1280,6 +2041,9 @@ func (m *ResourceMetricStatus) Size() (n int) { } func (m *Scale) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1292,6 +2056,9 @@ func (m *Scale) Size() (n int) { } func (m *ScaleSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Replicas)) @@ -1299,6 +2066,9 @@ func (m *ScaleSpec) Size() (n int) { } func (m *ScaleStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Replicas)) @@ -1308,14 +2078,7 @@ func (m *ScaleStatus) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -1338,9 +2101,9 @@ func (this *ExternalMetricSource) String() string { } s := strings.Join([]string{`&ExternalMetricSource{`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `MetricSelector:` + strings.Replace(fmt.Sprintf("%v", this.MetricSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `TargetValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, - `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `MetricSelector:` + strings.Replace(fmt.Sprintf("%v", this.MetricSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `TargetValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetValue), "Quantity", "resource.Quantity", 1) + `,`, + `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "resource.Quantity", 1) + `,`, `}`, }, "") return s @@ -1351,9 +2114,9 @@ func (this *ExternalMetricStatus) String() string { } s := strings.Join([]string{`&ExternalMetricStatus{`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `MetricSelector:` + strings.Replace(fmt.Sprintf("%v", this.MetricSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `CurrentValue:` + strings.Replace(strings.Replace(this.CurrentValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `CurrentAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `MetricSelector:` + strings.Replace(fmt.Sprintf("%v", this.MetricSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `CurrentValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `CurrentAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "resource.Quantity", 1) + `,`, `}`, }, "") return s @@ -1363,7 +2126,7 @@ func (this *HorizontalPodAutoscaler) String() string { return "nil" } s := strings.Join([]string{`&HorizontalPodAutoscaler{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "HorizontalPodAutoscalerSpec", "HorizontalPodAutoscalerSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "HorizontalPodAutoscalerStatus", "HorizontalPodAutoscalerStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1377,7 +2140,7 @@ func (this *HorizontalPodAutoscalerCondition) String() string { s := strings.Join([]string{`&HorizontalPodAutoscalerCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -1388,9 +2151,14 @@ func (this *HorizontalPodAutoscalerList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]HorizontalPodAutoscaler{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "HorizontalPodAutoscaler", "HorizontalPodAutoscaler", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&HorizontalPodAutoscalerList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "HorizontalPodAutoscaler", "HorizontalPodAutoscaler", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1414,7 +2182,7 @@ func (this *HorizontalPodAutoscalerStatus) String() string { } s := strings.Join([]string{`&HorizontalPodAutoscalerStatus{`, `ObservedGeneration:` + valueToStringGenerated(this.ObservedGeneration) + `,`, - `LastScaleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScaleTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, + `LastScaleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScaleTime), "Time", "v1.Time", 1) + `,`, `CurrentReplicas:` + fmt.Sprintf("%v", this.CurrentReplicas) + `,`, `DesiredReplicas:` + fmt.Sprintf("%v", this.DesiredReplicas) + `,`, `CurrentCPUUtilizationPercentage:` + valueToStringGenerated(this.CurrentCPUUtilizationPercentage) + `,`, @@ -1428,10 +2196,10 @@ func (this *MetricSpec) String() string { } s := strings.Join([]string{`&MetricSpec{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "ObjectMetricSource", "ObjectMetricSource", 1) + `,`, - `Pods:` + strings.Replace(fmt.Sprintf("%v", this.Pods), "PodsMetricSource", "PodsMetricSource", 1) + `,`, - `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, - `External:` + strings.Replace(fmt.Sprintf("%v", this.External), "ExternalMetricSource", "ExternalMetricSource", 1) + `,`, + `Object:` + strings.Replace(this.Object.String(), "ObjectMetricSource", "ObjectMetricSource", 1) + `,`, + `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricSource", "PodsMetricSource", 1) + `,`, + `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, + `External:` + strings.Replace(this.External.String(), "ExternalMetricSource", "ExternalMetricSource", 1) + `,`, `}`, }, "") return s @@ -1442,10 +2210,10 @@ func (this *MetricStatus) String() string { } s := strings.Join([]string{`&MetricStatus{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "ObjectMetricStatus", "ObjectMetricStatus", 1) + `,`, - `Pods:` + strings.Replace(fmt.Sprintf("%v", this.Pods), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, - `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, - `External:` + strings.Replace(fmt.Sprintf("%v", this.External), "ExternalMetricStatus", "ExternalMetricStatus", 1) + `,`, + `Object:` + strings.Replace(this.Object.String(), "ObjectMetricStatus", "ObjectMetricStatus", 1) + `,`, + `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, + `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, + `External:` + strings.Replace(this.External.String(), "ExternalMetricStatus", "ExternalMetricStatus", 1) + `,`, `}`, }, "") return s @@ -1457,9 +2225,9 @@ func (this *ObjectMetricSource) String() string { s := strings.Join([]string{`&ObjectMetricSource{`, `Target:` + strings.Replace(strings.Replace(this.Target.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `TargetValue:` + strings.Replace(strings.Replace(this.TargetValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `TargetValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TargetValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "resource.Quantity", 1) + `,`, `}`, }, "") return s @@ -1471,9 +2239,9 @@ func (this *ObjectMetricStatus) String() string { s := strings.Join([]string{`&ObjectMetricStatus{`, `Target:` + strings.Replace(strings.Replace(this.Target.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `CurrentValue:` + strings.Replace(strings.Replace(this.CurrentValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `CurrentValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "resource.Quantity", 1) + `,`, `}`, }, "") return s @@ -1484,8 +2252,8 @@ func (this *PodsMetricSource) String() string { } s := strings.Join([]string{`&PodsMetricSource{`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `TargetAverageValue:` + strings.Replace(strings.Replace(this.TargetAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `TargetAverageValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, `}`, }, "") return s @@ -1496,8 +2264,8 @@ func (this *PodsMetricStatus) String() string { } s := strings.Join([]string{`&PodsMetricStatus{`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `CurrentAverageValue:` + strings.Replace(strings.Replace(this.CurrentAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `CurrentAverageValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, `}`, }, "") return s @@ -1509,7 +2277,7 @@ func (this *ResourceMetricSource) String() string { s := strings.Join([]string{`&ResourceMetricSource{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `TargetAverageUtilization:` + valueToStringGenerated(this.TargetAverageUtilization) + `,`, - `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "resource.Quantity", 1) + `,`, `}`, }, "") return s @@ -1521,7 +2289,7 @@ func (this *ResourceMetricStatus) String() string { s := strings.Join([]string{`&ResourceMetricStatus{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `CurrentAverageUtilization:` + valueToStringGenerated(this.CurrentAverageUtilization) + `,`, - `CurrentAverageValue:` + strings.Replace(strings.Replace(this.CurrentAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `CurrentAverageValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -1531,7 +2299,7 @@ func (this *Scale) String() string { return "nil" } s := strings.Join([]string{`&Scale{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ScaleSpec", "ScaleSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ScaleStatus", "ScaleStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1582,7 +2350,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1610,7 +2378,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1620,6 +2388,9 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1639,7 +2410,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1649,6 +2420,9 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1668,7 +2442,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1678,6 +2452,9 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1692,6 +2469,9 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1719,7 +2499,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1747,7 +2527,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1757,6 +2537,9 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1776,7 +2559,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1785,11 +2568,14 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MetricSelector == nil { - m.MetricSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.MetricSelector = &v1.LabelSelector{} } if err := m.MetricSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1809,7 +2595,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1818,11 +2604,14 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.TargetValue == nil { - m.TargetValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.TargetValue = &resource.Quantity{} } if err := m.TargetValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1842,7 +2631,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1851,11 +2640,14 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.TargetAverageValue == nil { - m.TargetAverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.TargetAverageValue = &resource.Quantity{} } if err := m.TargetAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1870,6 +2662,9 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1897,7 +2692,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1925,7 +2720,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1935,6 +2730,9 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1954,7 +2752,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1963,11 +2761,14 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MetricSelector == nil { - m.MetricSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.MetricSelector = &v1.LabelSelector{} } if err := m.MetricSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1987,7 +2788,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1996,6 +2797,9 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2017,7 +2821,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2026,11 +2830,14 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.CurrentAverageValue == nil { - m.CurrentAverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.CurrentAverageValue = &resource.Quantity{} } if err := m.CurrentAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2045,6 +2852,9 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2072,7 +2882,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2100,7 +2910,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2109,6 +2919,9 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2130,7 +2943,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2139,6 +2952,9 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2160,7 +2976,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2169,6 +2985,9 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2185,6 +3004,9 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2212,7 +3034,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2240,7 +3062,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2250,6 +3072,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2269,7 +3094,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2279,6 +3104,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2298,7 +3126,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2307,6 +3135,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2328,7 +3159,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2338,6 +3169,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2357,7 +3191,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2367,6 +3201,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2381,6 +3218,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2408,7 +3248,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2436,7 +3276,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2445,6 +3285,9 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2466,7 +3309,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2475,6 +3318,9 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2492,6 +3338,9 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2519,7 +3368,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2547,7 +3396,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2556,6 +3405,9 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2577,7 +3429,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2597,7 +3449,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MaxReplicas |= (int32(b) & 0x7F) << shift + m.MaxReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2616,7 +3468,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2631,6 +3483,9 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2658,7 +3513,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2686,7 +3541,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -2706,7 +3561,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2715,11 +3570,14 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.LastScaleTime == nil { - m.LastScaleTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + m.LastScaleTime = &v1.Time{} } if err := m.LastScaleTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2739,7 +3597,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CurrentReplicas |= (int32(b) & 0x7F) << shift + m.CurrentReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2758,7 +3616,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.DesiredReplicas |= (int32(b) & 0x7F) << shift + m.DesiredReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2777,7 +3635,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2792,6 +3650,9 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2819,7 +3680,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2847,7 +3708,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2857,6 +3718,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2876,7 +3740,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2885,6 +3749,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2909,7 +3776,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2918,6 +3785,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2942,7 +3812,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2951,6 +3821,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2975,7 +3848,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2984,6 +3857,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3003,6 +3879,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3030,7 +3909,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3058,7 +3937,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3068,6 +3947,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3087,7 +3969,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3096,6 +3978,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3120,7 +4005,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3129,6 +4014,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3153,7 +4041,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3162,6 +4050,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3186,7 +4077,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3195,6 +4086,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3214,6 +4108,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3241,7 +4138,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3269,7 +4166,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3278,6 +4175,9 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3299,7 +4199,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3309,6 +4209,9 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3328,7 +4231,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3337,6 +4240,9 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3358,7 +4264,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3367,11 +4273,14 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3391,7 +4300,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3400,11 +4309,14 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.AverageValue == nil { - m.AverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.AverageValue = &resource.Quantity{} } if err := m.AverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3419,6 +4331,9 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3446,7 +4361,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3474,7 +4389,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3483,6 +4398,9 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3504,7 +4422,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3514,6 +4432,9 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3533,7 +4454,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3542,6 +4463,9 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3563,7 +4487,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3572,11 +4496,14 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3596,7 +4523,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3605,11 +4532,14 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.AverageValue == nil { - m.AverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.AverageValue = &resource.Quantity{} } if err := m.AverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3624,6 +4554,9 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3651,7 +4584,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3679,7 +4612,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3689,6 +4622,9 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3708,7 +4644,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3717,6 +4653,9 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3738,7 +4677,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3747,11 +4686,14 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3766,6 +4708,9 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3793,7 +4738,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3821,7 +4766,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3831,6 +4776,9 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3850,7 +4798,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3859,6 +4807,9 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3880,7 +4831,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3889,11 +4840,14 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3908,6 +4862,9 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3935,7 +4892,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3963,7 +4920,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3973,6 +4930,9 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3992,7 +4952,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4012,7 +4972,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4021,11 +4981,14 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.TargetAverageValue == nil { - m.TargetAverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.TargetAverageValue = &resource.Quantity{} } if err := m.TargetAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -4040,6 +5003,9 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4067,7 +5033,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4095,7 +5061,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4105,6 +5071,9 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4124,7 +5093,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4144,7 +5113,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4153,6 +5122,9 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4169,6 +5141,9 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4196,7 +5171,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4224,7 +5199,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4233,6 +5208,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4254,7 +5232,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4263,6 +5241,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4284,7 +5265,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4293,6 +5274,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4309,6 +5293,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4336,7 +5323,7 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4364,7 +5351,7 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4378,6 +5365,9 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4405,7 +5395,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4433,7 +5423,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4452,7 +5442,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4462,6 +5452,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4476,6 +5469,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4542,10 +5538,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -4574,6 +5573,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -4592,106 +5594,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 1516 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcf, 0x6f, 0x13, 0xc7, - 0x17, 0x8f, 0x7f, 0x24, 0x24, 0xe3, 0x90, 0xe4, 0x3b, 0x20, 0x08, 0xe1, 0x8b, 0x37, 0xda, 0x22, - 0x44, 0x7f, 0xb0, 0x6e, 0x52, 0x8a, 0xe8, 0x31, 0x76, 0x4b, 0x41, 0x8d, 0x21, 0x4c, 0x02, 0xa5, - 0x3f, 0xc5, 0x64, 0x3d, 0x38, 0x43, 0xbc, 0xbb, 0xd6, 0xec, 0xd8, 0x22, 0x48, 0x95, 0xda, 0x43, - 0xef, 0xbd, 0xb4, 0xea, 0xb1, 0x95, 0x7a, 0xed, 0x99, 0x73, 0x6f, 0x1c, 0x39, 0x20, 0x95, 0xd3, - 0xaa, 0x6c, 0x8f, 0xfd, 0x0f, 0x38, 0x55, 0xf3, 0xc3, 0xeb, 0x5d, 0xdb, 0xeb, 0x24, 0x26, 0x44, - 0x6d, 0x6f, 0x3b, 0x33, 0xef, 0x7d, 0xde, 0xec, 0x7b, 0x6f, 0xde, 0x2f, 0x50, 0xde, 0xbe, 0xec, - 0x5b, 0xd4, 0x2b, 0x6d, 0xb7, 0x36, 0x09, 0x73, 0x09, 0x27, 0x7e, 0xa9, 0x4d, 0xdc, 0x9a, 0xc7, - 0x4a, 0xfa, 0x00, 0x37, 0x69, 0x09, 0xb7, 0xb8, 0xe7, 0xdb, 0xb8, 0x41, 0xdd, 0x7a, 0xa9, 0xbd, - 0x54, 0xaa, 0x13, 0x97, 0x30, 0xcc, 0x49, 0xcd, 0x6a, 0x32, 0x8f, 0x7b, 0xf0, 0x94, 0x22, 0xb5, - 0x70, 0x93, 0x5a, 0x31, 0x52, 0xab, 0xbd, 0xb4, 0x70, 0xa1, 0x4e, 0xf9, 0x56, 0x6b, 0xd3, 0xb2, - 0x3d, 0xa7, 0x54, 0xf7, 0xea, 0x5e, 0x49, 0x72, 0x6c, 0xb6, 0xee, 0xc9, 0x95, 0x5c, 0xc8, 0x2f, - 0x85, 0xb4, 0x60, 0xc6, 0x84, 0xda, 0x1e, 0x23, 0x03, 0xa4, 0x2d, 0x5c, 0xec, 0xd2, 0x38, 0xd8, - 0xde, 0xa2, 0x2e, 0x61, 0x3b, 0xa5, 0xe6, 0x76, 0x5d, 0x32, 0x31, 0xe2, 0x7b, 0x2d, 0x66, 0x93, - 0x7d, 0x71, 0xf9, 0x25, 0x87, 0x70, 0x3c, 0x48, 0x56, 0x29, 0x8d, 0x8b, 0xb5, 0x5c, 0x4e, 0x9d, - 0x7e, 0x31, 0x97, 0x76, 0x63, 0xf0, 0xed, 0x2d, 0xe2, 0xe0, 0x5e, 0x3e, 0xf3, 0xfb, 0x0c, 0x38, - 0x5d, 0x61, 0x9e, 0xef, 0xdf, 0x26, 0xcc, 0xa7, 0x9e, 0x7b, 0x63, 0xf3, 0x3e, 0xb1, 0x39, 0x22, - 0xf7, 0x08, 0x23, 0xae, 0x4d, 0xe0, 0x22, 0xc8, 0x6f, 0x53, 0xb7, 0x36, 0x9f, 0x59, 0xcc, 0x9c, - 0x9f, 0x2a, 0x4f, 0x3f, 0x0e, 0x8c, 0xb1, 0x30, 0x30, 0xf2, 0x1f, 0x51, 0xb7, 0x86, 0xe4, 0x89, - 0xa0, 0x70, 0xb1, 0x43, 0xe6, 0xb3, 0x49, 0x8a, 0xeb, 0xd8, 0x21, 0x48, 0x9e, 0xc0, 0x65, 0x00, - 0x70, 0x93, 0x6a, 0x01, 0xf3, 0x39, 0x49, 0x07, 0x35, 0x1d, 0x58, 0x59, 0xbb, 0xa6, 0x4f, 0x50, - 0x8c, 0xca, 0xfc, 0x21, 0x07, 0x8e, 0x7f, 0xf0, 0x80, 0x13, 0xe6, 0xe2, 0x46, 0x95, 0x70, 0x46, - 0xed, 0x75, 0xa9, 0x5f, 0x01, 0xe6, 0xc8, 0xb5, 0x10, 0xa0, 0xaf, 0x15, 0x81, 0x55, 0xa3, 0x13, - 0x14, 0xa3, 0x82, 0x1e, 0x98, 0x51, 0xab, 0x75, 0xd2, 0x20, 0x36, 0xf7, 0x98, 0xbc, 0x6c, 0x61, - 0xf9, 0x1d, 0xab, 0xeb, 0x40, 0x91, 0xd6, 0xac, 0xe6, 0x76, 0x5d, 0x6c, 0xf8, 0x96, 0x30, 0x8e, - 0xd5, 0x5e, 0xb2, 0x56, 0xf1, 0x26, 0x69, 0x74, 0x58, 0xcb, 0x30, 0x0c, 0x8c, 0x99, 0x6a, 0x02, - 0x0e, 0xf5, 0xc0, 0x43, 0x0c, 0x0a, 0x1c, 0xb3, 0x3a, 0xe1, 0xb7, 0x71, 0xa3, 0x45, 0xe4, 0x2f, - 0x17, 0x96, 0xad, 0x61, 0xd2, 0xac, 0x8e, 0x03, 0x59, 0x37, 0x5b, 0xd8, 0xe5, 0x94, 0xef, 0x94, - 0x67, 0xc3, 0xc0, 0x28, 0x6c, 0x74, 0x61, 0x50, 0x1c, 0x13, 0xb6, 0x01, 0x54, 0xcb, 0x95, 0x36, - 0x61, 0xb8, 0x4e, 0x94, 0xa4, 0xfc, 0x48, 0x92, 0x4e, 0x84, 0x81, 0x01, 0x37, 0xfa, 0xd0, 0xd0, - 0x00, 0x09, 0xe6, 0x4f, 0xfd, 0x86, 0xe1, 0x98, 0xb7, 0xfc, 0x7f, 0x87, 0x61, 0xb6, 0xc0, 0xb4, - 0xdd, 0x62, 0x8c, 0xb8, 0x2f, 0x65, 0x99, 0xe3, 0xfa, 0xb7, 0xa6, 0x2b, 0x31, 0x2c, 0x94, 0x40, - 0x86, 0x3b, 0xe0, 0x98, 0x5e, 0x1f, 0x80, 0x81, 0x4e, 0x86, 0x81, 0x71, 0xac, 0xd2, 0x0f, 0x87, - 0x06, 0xc9, 0x30, 0x1f, 0x65, 0xc1, 0xc9, 0xab, 0x1e, 0xa3, 0x0f, 0x3d, 0x97, 0xe3, 0xc6, 0x9a, - 0x57, 0x5b, 0xd1, 0xb1, 0x91, 0x30, 0x78, 0x17, 0x4c, 0x0a, 0xed, 0xd5, 0x30, 0xc7, 0xd2, 0x46, - 0x85, 0xe5, 0xb7, 0xf7, 0xa6, 0x6b, 0x15, 0x18, 0xaa, 0x84, 0xe3, 0xae, 0x55, 0xbb, 0x7b, 0x28, - 0x42, 0x85, 0x77, 0x40, 0xde, 0x6f, 0x12, 0x5b, 0x5b, 0xf2, 0x92, 0x95, 0x1a, 0xa3, 0xad, 0x94, - 0x3b, 0xae, 0x37, 0x89, 0xdd, 0x8d, 0x23, 0x62, 0x85, 0x24, 0x22, 0xbc, 0x0b, 0x26, 0x7c, 0xe9, - 0x6b, 0xda, 0x6c, 0x97, 0x47, 0xc0, 0x96, 0xfc, 0xe5, 0x19, 0x8d, 0x3e, 0xa1, 0xd6, 0x48, 0xe3, - 0x9a, 0xdf, 0xe6, 0xc0, 0x62, 0x0a, 0x67, 0xc5, 0x73, 0x6b, 0x94, 0x53, 0xcf, 0x85, 0x57, 0x41, - 0x9e, 0xef, 0x34, 0x3b, 0x2e, 0x7e, 0xb1, 0x73, 0xd1, 0x8d, 0x9d, 0x26, 0x79, 0x11, 0x18, 0x67, - 0x77, 0xe3, 0x17, 0x74, 0x48, 0x22, 0xc0, 0xd5, 0xe8, 0x87, 0xb2, 0x09, 0x2c, 0x7d, 0xad, 0x17, - 0x81, 0x31, 0x20, 0x2f, 0x59, 0x11, 0x52, 0xf2, 0xf2, 0x22, 0x22, 0x34, 0xb0, 0xcf, 0x37, 0x18, - 0x76, 0x7d, 0x25, 0x89, 0x3a, 0x1d, 0x0f, 0x7f, 0x63, 0x6f, 0x46, 0x16, 0x1c, 0xe5, 0x05, 0x7d, - 0x0b, 0xb8, 0xda, 0x87, 0x86, 0x06, 0x48, 0x80, 0xe7, 0xc0, 0x04, 0x23, 0xd8, 0xf7, 0x5c, 0xe9, - 0xdc, 0x53, 0x5d, 0xe5, 0x22, 0xb9, 0x8b, 0xf4, 0x29, 0x7c, 0x1d, 0x1c, 0x71, 0x88, 0xef, 0xe3, - 0x3a, 0x99, 0x1f, 0x97, 0x84, 0xb3, 0x9a, 0xf0, 0x48, 0x55, 0x6d, 0xa3, 0xce, 0xb9, 0xf9, 0x34, - 0x03, 0x4e, 0xa7, 0xe8, 0x71, 0x95, 0xfa, 0x1c, 0x7e, 0xde, 0xe7, 0xc5, 0xd6, 0x1e, 0x23, 0x06, - 0xf5, 0x95, 0x0f, 0xcf, 0x69, 0xd9, 0x93, 0x9d, 0x9d, 0x98, 0x07, 0x7f, 0x0c, 0xc6, 0x29, 0x27, - 0x8e, 0xb0, 0x4a, 0xee, 0x7c, 0x61, 0x79, 0x79, 0xff, 0x6e, 0x56, 0x3e, 0xaa, 0xe1, 0xc7, 0xaf, - 0x09, 0x20, 0xa4, 0xf0, 0xcc, 0xbf, 0xb2, 0xa9, 0xbf, 0x25, 0xdc, 0x1c, 0xb6, 0xc1, 0x8c, 0x5c, - 0xa9, 0x50, 0x8c, 0xc8, 0x3d, 0xfd, 0x73, 0xc3, 0x1e, 0xd1, 0x90, 0xe4, 0x5d, 0x3e, 0xa1, 0x6f, - 0x31, 0xb3, 0x9e, 0x40, 0x45, 0x3d, 0x52, 0xe0, 0x12, 0x28, 0x38, 0xd4, 0x45, 0xa4, 0xd9, 0xa0, - 0x36, 0x56, 0xce, 0x38, 0xae, 0xd2, 0x4f, 0xb5, 0xbb, 0x8d, 0xe2, 0x34, 0xf0, 0x5d, 0x50, 0x70, - 0xf0, 0x83, 0x88, 0x25, 0x27, 0x59, 0x8e, 0x69, 0x79, 0x85, 0x6a, 0xf7, 0x08, 0xc5, 0xe9, 0xe0, - 0x7d, 0x50, 0x54, 0x39, 0xa5, 0xb2, 0x76, 0xeb, 0x16, 0xa7, 0x0d, 0xfa, 0x10, 0x0b, 0x3f, 0x5a, - 0x23, 0xcc, 0x26, 0x2e, 0x17, 0xae, 0x91, 0x97, 0x48, 0x66, 0x18, 0x18, 0xc5, 0x8d, 0xa1, 0x94, - 0x68, 0x17, 0x24, 0xf3, 0xb7, 0x1c, 0x38, 0x33, 0x34, 0x0c, 0xc0, 0x2b, 0x00, 0x7a, 0x9b, 0x3e, - 0x61, 0x6d, 0x52, 0xfb, 0x50, 0xd5, 0x45, 0xa2, 0x40, 0x11, 0x3a, 0xcf, 0xa9, 0x9c, 0x78, 0xa3, - 0xef, 0x14, 0x0d, 0xe0, 0x80, 0x36, 0x38, 0x2a, 0xde, 0x85, 0xd2, 0x32, 0xd5, 0xb5, 0xd0, 0xfe, - 0x1e, 0xdd, 0xff, 0xc2, 0xc0, 0x38, 0xba, 0x1a, 0x07, 0x41, 0x49, 0x4c, 0xb8, 0x02, 0x66, 0x75, - 0xb0, 0xef, 0xd1, 0xfa, 0x49, 0xad, 0xf5, 0xd9, 0x4a, 0xf2, 0x18, 0xf5, 0xd2, 0x0b, 0x88, 0x1a, - 0xf1, 0x29, 0x23, 0xb5, 0x08, 0x22, 0x9f, 0x84, 0x78, 0x3f, 0x79, 0x8c, 0x7a, 0xe9, 0xa1, 0x03, - 0x0c, 0x8d, 0x9a, 0x6a, 0xc1, 0x71, 0x09, 0xf9, 0x5a, 0x18, 0x18, 0x46, 0x65, 0x38, 0x29, 0xda, - 0x0d, 0x4b, 0x94, 0x81, 0xba, 0x76, 0x90, 0x0f, 0xe4, 0x62, 0x22, 0xf4, 0x2e, 0xf6, 0x84, 0xde, - 0xb9, 0x78, 0xa1, 0x18, 0x0b, 0xb3, 0x37, 0xc1, 0x84, 0x27, 0x5f, 0x86, 0xb6, 0xcb, 0x85, 0x21, - 0xcf, 0x29, 0x4a, 0x69, 0x11, 0x50, 0x19, 0x88, 0x58, 0xa6, 0x9f, 0x96, 0x06, 0x82, 0xd7, 0x40, - 0xbe, 0xe9, 0xd5, 0x3a, 0x89, 0xe8, 0xcd, 0x21, 0x80, 0x6b, 0x5e, 0xcd, 0x4f, 0xc0, 0x4d, 0x8a, - 0x1b, 0x8b, 0x5d, 0x24, 0x21, 0xe0, 0x27, 0x60, 0xb2, 0x93, 0xf0, 0x75, 0x75, 0x50, 0x1a, 0x02, - 0x87, 0x34, 0x69, 0x02, 0x72, 0x5a, 0x04, 0xb2, 0xce, 0x09, 0x8a, 0xe0, 0x04, 0x34, 0xd1, 0xa5, - 0x9a, 0xb4, 0xca, 0x70, 0xe8, 0x41, 0xe5, 0xb6, 0x82, 0xee, 0x9c, 0xa0, 0x08, 0xce, 0xfc, 0x31, - 0x07, 0xa6, 0x13, 0xe5, 0xdf, 0x21, 0x9b, 0x46, 0xe5, 0xf1, 0x03, 0x33, 0x8d, 0x82, 0x3b, 0x50, - 0xd3, 0x28, 0xc8, 0x57, 0x62, 0x9a, 0x18, 0xf4, 0x00, 0xd3, 0x3c, 0xcd, 0x01, 0xd8, 0xef, 0xc6, - 0xf0, 0x4b, 0x30, 0xa1, 0x02, 0xe6, 0x4b, 0x26, 0x95, 0x28, 0xbd, 0xeb, 0xfc, 0xa1, 0x51, 0x7b, - 0xea, 0xff, 0xec, 0x9e, 0xea, 0x7f, 0x72, 0x10, 0x7d, 0x52, 0x94, 0x75, 0x52, 0x7b, 0xa5, 0x2f, - 0xc0, 0xa4, 0xdf, 0x69, 0x30, 0xf2, 0xa3, 0x37, 0x18, 0x52, 0xe1, 0x51, 0x6b, 0x11, 0x41, 0xc2, - 0x1a, 0x98, 0xc6, 0xf1, 0x1a, 0x7f, 0x7c, 0xa4, 0xdf, 0x98, 0x13, 0x0d, 0x45, 0xa2, 0xb8, 0x4f, - 0xa0, 0x9a, 0xbf, 0xf7, 0x9a, 0x55, 0xbd, 0xbb, 0x7f, 0xa2, 0x59, 0x0f, 0xaf, 0xcb, 0xfa, 0x4f, - 0x58, 0xf6, 0xe7, 0x2c, 0x98, 0xeb, 0x4d, 0x13, 0x23, 0xb5, 0xd3, 0x0f, 0x07, 0xce, 0x04, 0xb2, - 0x23, 0x5d, 0x3a, 0xea, 0x02, 0xf6, 0x36, 0x17, 0x48, 0x58, 0x22, 0x77, 0xe0, 0x96, 0x30, 0x7f, - 0x49, 0xea, 0x68, 0xf4, 0x91, 0xc3, 0x57, 0x83, 0xfb, 0xf2, 0xd1, 0x94, 0x74, 0x5a, 0x0b, 0xdb, - 0x73, 0x6f, 0xfe, 0xaa, 0xd5, 0xf4, 0x6b, 0x16, 0x1c, 0x1f, 0x54, 0x22, 0xc0, 0x8a, 0x9e, 0xd2, - 0x29, 0x25, 0x95, 0xe2, 0x53, 0xba, 0x17, 0x81, 0x61, 0x0c, 0x68, 0x33, 0x3b, 0x30, 0xb1, 0x41, - 0xde, 0x1d, 0x30, 0x9f, 0xb0, 0x7c, 0xac, 0x66, 0xd3, 0x4d, 0xc3, 0xff, 0xc3, 0xc0, 0x98, 0xdf, - 0x48, 0xa1, 0x41, 0xa9, 0xdc, 0x29, 0xd3, 0xac, 0xdc, 0x2b, 0x9f, 0x66, 0x3d, 0xea, 0xd7, 0x97, - 0x72, 0xad, 0x03, 0xd1, 0xd7, 0x67, 0xe0, 0x54, 0xd2, 0x07, 0xfa, 0x15, 0x76, 0x26, 0x0c, 0x8c, - 0x53, 0x95, 0x34, 0x22, 0x94, 0xce, 0x9f, 0xe6, 0xc8, 0xb9, 0xc3, 0x71, 0x64, 0xf3, 0x9b, 0x2c, - 0x18, 0x97, 0xcd, 0xc9, 0x21, 0x8c, 0x94, 0xae, 0x24, 0x46, 0x4a, 0x67, 0x87, 0x64, 0x38, 0x79, - 0xa3, 0xd4, 0x01, 0xd2, 0xf5, 0x9e, 0x01, 0xd2, 0xb9, 0x5d, 0x91, 0x86, 0x8f, 0x8b, 0xde, 0x03, - 0x53, 0x91, 0x40, 0xf8, 0x96, 0x28, 0x16, 0x75, 0x57, 0x95, 0x91, 0xb6, 0x8d, 0x66, 0x0c, 0x51, - 0x3b, 0x15, 0x51, 0x98, 0x14, 0x14, 0x62, 0x12, 0xf6, 0xc7, 0x2c, 0xa8, 0xfd, 0xf8, 0xc0, 0x74, - 0xaa, 0x4b, 0xdd, 0x1f, 0x13, 0xca, 0xe7, 0x1f, 0x3f, 0x2f, 0x8e, 0x3d, 0x79, 0x5e, 0x1c, 0x7b, - 0xf6, 0xbc, 0x38, 0xf6, 0x75, 0x58, 0xcc, 0x3c, 0x0e, 0x8b, 0x99, 0x27, 0x61, 0x31, 0xf3, 0x2c, - 0x2c, 0x66, 0xfe, 0x08, 0x8b, 0x99, 0xef, 0xfe, 0x2c, 0x8e, 0x7d, 0x9a, 0x6d, 0x2f, 0xfd, 0x1d, - 0x00, 0x00, 0xff, 0xff, 0x3c, 0x26, 0x41, 0xcb, 0x94, 0x19, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/autoscaling/v1/generated.proto b/vendor/k8s.io/api/autoscaling/v1/generated.proto index 5b56b2ac83166..f50ed9d1f0906 100644 --- a/vendor/k8s.io/api/autoscaling/v1/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v1/generated.proto @@ -32,7 +32,7 @@ option go_package = "v1"; // CrossVersionObjectReference contains enough information to let you identify the referred resource. message CrossVersionObjectReference { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds" + // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" optional string kind = 1; // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names @@ -88,11 +88,11 @@ message ExternalMetricStatus { // configuration of a horizontal pod autoscaler. message HorizontalPodAutoscaler { - // Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional optional HorizontalPodAutoscalerSpec spec = 2; @@ -141,7 +141,11 @@ message HorizontalPodAutoscalerSpec { // and will set the desired number of pods by using its Scale subresource. optional CrossVersionObjectReference scaleTargetRef = 1; - // lower limit for the number of pods that can be set by the autoscaler, default 1. + // minReplicas is the lower limit for the number of replicas to which the autoscaler + // can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the + // alpha feature gate HPAScaleToZero is enabled and at least one Object or External + // metric is configured. Scaling is active as long as at least one metric value is + // available. // +optional optional int32 minReplicas = 2; @@ -380,15 +384,15 @@ message ResourceMetricStatus { // Scale represents a scaling request for a resource. message Scale { - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional optional ScaleSpec spec = 2; - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. // +optional optional ScaleStatus status = 3; } diff --git a/vendor/k8s.io/api/autoscaling/v1/types.go b/vendor/k8s.io/api/autoscaling/v1/types.go index c03af13ae21d4..519bd78694073 100644 --- a/vendor/k8s.io/api/autoscaling/v1/types.go +++ b/vendor/k8s.io/api/autoscaling/v1/types.go @@ -17,14 +17,14 @@ limitations under the License. package v1 import ( - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // CrossVersionObjectReference contains enough information to let you identify the referred resource. type CrossVersionObjectReference struct { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds" + // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names Name string `json:"name" protobuf:"bytes,2,opt,name=name"` @@ -38,7 +38,11 @@ type HorizontalPodAutoscalerSpec struct { // reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption // and will set the desired number of pods by using its Scale subresource. ScaleTargetRef CrossVersionObjectReference `json:"scaleTargetRef" protobuf:"bytes,1,opt,name=scaleTargetRef"` - // lower limit for the number of pods that can be set by the autoscaler, default 1. + // minReplicas is the lower limit for the number of replicas to which the autoscaler + // can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the + // alpha feature gate HPAScaleToZero is enabled and at least one Object or External + // metric is configured. Scaling is active as long as at least one metric value is + // available. // +optional MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` // upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. @@ -78,11 +82,11 @@ type HorizontalPodAutoscalerStatus struct { // configuration of a horizontal pod autoscaler. type HorizontalPodAutoscaler struct { metav1.TypeMeta `json:",inline"` - // Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -109,15 +113,15 @@ type HorizontalPodAutoscalerList struct { // Scale represents a scaling request for a resource. type Scale struct { metav1.TypeMeta `json:",inline"` - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. // +optional Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } diff --git a/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go index 72ac97271218d..129ce2b484e9a 100644 --- a/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go @@ -29,7 +29,7 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CrossVersionObjectReference = map[string]string{ "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", - "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds\"", + "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\"", "name": "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", "apiVersion": "API version of the referent", } @@ -64,8 +64,8 @@ func (ExternalMetricStatus) SwaggerDoc() map[string]string { var map_HorizontalPodAutoscaler = map[string]string{ "": "configuration of a horizontal pod autoscaler.", - "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.", + "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", "status": "current information about the autoscaler.", } @@ -99,7 +99,7 @@ func (HorizontalPodAutoscalerList) SwaggerDoc() map[string]string { var map_HorizontalPodAutoscalerSpec = map[string]string{ "": "specification of a horizontal pod autoscaler.", "scaleTargetRef": "reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption and will set the desired number of pods by using its Scale subresource.", - "minReplicas": "lower limit for the number of pods that can be set by the autoscaler, default 1.", + "minReplicas": "minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.", "maxReplicas": "upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.", "targetCPUUtilizationPercentage": "target average CPU utilization (represented as a percentage of requested CPU) over all the pods; if not specified the default autoscaling policy will be used.", } @@ -219,9 +219,9 @@ func (ResourceMetricStatus) SwaggerDoc() map[string]string { var map_Scale = map[string]string{ "": "Scale represents a scaling request for a resource.", - "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", - "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.", - "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", + "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.", } func (Scale) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go b/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go index ecb5ee7120cd0..0b6ed38158c65 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go @@ -17,50 +17,23 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto -/* - Package v2beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto - - It has these top-level messages: - CrossVersionObjectReference - ExternalMetricSource - ExternalMetricStatus - HorizontalPodAutoscaler - HorizontalPodAutoscalerCondition - HorizontalPodAutoscalerList - HorizontalPodAutoscalerSpec - HorizontalPodAutoscalerStatus - MetricSpec - MetricStatus - ObjectMetricSource - ObjectMetricStatus - PodsMetricSource - PodsMetricStatus - ResourceMetricSource - ResourceMetricStatus -*/ package v2beta1 import ( fmt "fmt" - proto "github.com/gogo/protobuf/proto" - - math "math" - - k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" + io "io" - k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + proto "github.com/gogo/protobuf/proto" k8s_io_api_core_v1 "k8s.io/api/core/v1" + resource "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - strings "strings" - + math "math" + math_bits "math/bits" reflect "reflect" - - io "io" + strings "strings" ) // Reference imports to suppress errors if they are not otherwise used. @@ -77,76 +50,450 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} } func (*CrossVersionObjectReference) ProtoMessage() {} func (*CrossVersionObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{0} + return fileDescriptor_26c1bfc7a52d0478, []int{0} +} +func (m *CrossVersionObjectReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CrossVersionObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CrossVersionObjectReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_CrossVersionObjectReference.Merge(m, src) +} +func (m *CrossVersionObjectReference) XXX_Size() int { + return m.Size() +} +func (m *CrossVersionObjectReference) XXX_DiscardUnknown() { + xxx_messageInfo_CrossVersionObjectReference.DiscardUnknown(m) +} + +var xxx_messageInfo_CrossVersionObjectReference proto.InternalMessageInfo + +func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } +func (*ExternalMetricSource) ProtoMessage() {} +func (*ExternalMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{1} +} +func (m *ExternalMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExternalMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExternalMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExternalMetricSource.Merge(m, src) +} +func (m *ExternalMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ExternalMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ExternalMetricSource.DiscardUnknown(m) } -func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } -func (*ExternalMetricSource) ProtoMessage() {} -func (*ExternalMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_ExternalMetricSource proto.InternalMessageInfo -func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } -func (*ExternalMetricStatus) ProtoMessage() {} -func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } +func (*ExternalMetricStatus) ProtoMessage() {} +func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{2} +} +func (m *ExternalMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExternalMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExternalMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExternalMetricStatus.Merge(m, src) +} +func (m *ExternalMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ExternalMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ExternalMetricStatus.DiscardUnknown(m) +} -func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } -func (*HorizontalPodAutoscaler) ProtoMessage() {} -func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_ExternalMetricStatus proto.InternalMessageInfo + +func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } +func (*HorizontalPodAutoscaler) ProtoMessage() {} +func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{3} +} +func (m *HorizontalPodAutoscaler) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscaler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscaler) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscaler.Merge(m, src) +} +func (m *HorizontalPodAutoscaler) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscaler) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscaler.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscaler proto.InternalMessageInfo func (m *HorizontalPodAutoscalerCondition) Reset() { *m = HorizontalPodAutoscalerCondition{} } func (*HorizontalPodAutoscalerCondition) ProtoMessage() {} func (*HorizontalPodAutoscalerCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{4} + return fileDescriptor_26c1bfc7a52d0478, []int{4} +} +func (m *HorizontalPodAutoscalerCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } +func (m *HorizontalPodAutoscalerCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerCondition.Merge(m, src) +} +func (m *HorizontalPodAutoscalerCondition) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerCondition) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscalerCondition proto.InternalMessageInfo func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } func (*HorizontalPodAutoscalerList) ProtoMessage() {} func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{5} + return fileDescriptor_26c1bfc7a52d0478, []int{5} +} +func (m *HorizontalPodAutoscalerList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerList) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerList.Merge(m, src) +} +func (m *HorizontalPodAutoscalerList) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerList) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerList.DiscardUnknown(m) } +var xxx_messageInfo_HorizontalPodAutoscalerList proto.InternalMessageInfo + func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{6} + return fileDescriptor_26c1bfc7a52d0478, []int{6} +} +func (m *HorizontalPodAutoscalerSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } +func (m *HorizontalPodAutoscalerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerSpec.Merge(m, src) +} +func (m *HorizontalPodAutoscalerSpec) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerSpec) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscalerSpec proto.InternalMessageInfo func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{7} + return fileDescriptor_26c1bfc7a52d0478, []int{7} +} +func (m *HorizontalPodAutoscalerStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerStatus.Merge(m, src) +} +func (m *HorizontalPodAutoscalerStatus) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerStatus) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscalerStatus proto.InternalMessageInfo + +func (m *MetricSpec) Reset() { *m = MetricSpec{} } +func (*MetricSpec) ProtoMessage() {} +func (*MetricSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{8} +} +func (m *MetricSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricSpec.Merge(m, src) +} +func (m *MetricSpec) XXX_Size() int { + return m.Size() +} +func (m *MetricSpec) XXX_DiscardUnknown() { + xxx_messageInfo_MetricSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricSpec proto.InternalMessageInfo + +func (m *MetricStatus) Reset() { *m = MetricStatus{} } +func (*MetricStatus) ProtoMessage() {} +func (*MetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{9} +} +func (m *MetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricStatus.Merge(m, src) +} +func (m *MetricStatus) XXX_Size() int { + return m.Size() +} +func (m *MetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_MetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricStatus proto.InternalMessageInfo + +func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } +func (*ObjectMetricSource) ProtoMessage() {} +func (*ObjectMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{10} +} +func (m *ObjectMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ObjectMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ObjectMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectMetricSource.Merge(m, src) +} +func (m *ObjectMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ObjectMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectMetricSource.DiscardUnknown(m) } -func (m *MetricSpec) Reset() { *m = MetricSpec{} } -func (*MetricSpec) ProtoMessage() {} -func (*MetricSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +var xxx_messageInfo_ObjectMetricSource proto.InternalMessageInfo -func (m *MetricStatus) Reset() { *m = MetricStatus{} } -func (*MetricStatus) ProtoMessage() {} -func (*MetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } +func (*ObjectMetricStatus) ProtoMessage() {} +func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{11} +} +func (m *ObjectMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ObjectMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ObjectMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectMetricStatus.Merge(m, src) +} +func (m *ObjectMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ObjectMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectMetricStatus.DiscardUnknown(m) +} -func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } -func (*ObjectMetricSource) ProtoMessage() {} -func (*ObjectMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +var xxx_messageInfo_ObjectMetricStatus proto.InternalMessageInfo + +func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } +func (*PodsMetricSource) ProtoMessage() {} +func (*PodsMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{12} +} +func (m *PodsMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodsMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodsMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodsMetricSource.Merge(m, src) +} +func (m *PodsMetricSource) XXX_Size() int { + return m.Size() +} +func (m *PodsMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_PodsMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_PodsMetricSource proto.InternalMessageInfo + +func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } +func (*PodsMetricStatus) ProtoMessage() {} +func (*PodsMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{13} +} +func (m *PodsMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodsMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodsMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodsMetricStatus.Merge(m, src) +} +func (m *PodsMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *PodsMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PodsMetricStatus.DiscardUnknown(m) +} -func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } -func (*ObjectMetricStatus) ProtoMessage() {} -func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +var xxx_messageInfo_PodsMetricStatus proto.InternalMessageInfo -func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } -func (*PodsMetricSource) ProtoMessage() {} -func (*PodsMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } +func (*ResourceMetricSource) ProtoMessage() {} +func (*ResourceMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{14} +} +func (m *ResourceMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceMetricSource.Merge(m, src) +} +func (m *ResourceMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ResourceMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceMetricSource.DiscardUnknown(m) +} -func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } -func (*PodsMetricStatus) ProtoMessage() {} -func (*PodsMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +var xxx_messageInfo_ResourceMetricSource proto.InternalMessageInfo -func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } -func (*ResourceMetricSource) ProtoMessage() {} -func (*ResourceMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } +func (*ResourceMetricStatus) ProtoMessage() {} +func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{15} +} +func (m *ResourceMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceMetricStatus.Merge(m, src) +} +func (m *ResourceMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ResourceMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceMetricStatus.DiscardUnknown(m) +} -func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } -func (*ResourceMetricStatus) ProtoMessage() {} -func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +var xxx_messageInfo_ResourceMetricStatus proto.InternalMessageInfo func init() { proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.api.autoscaling.v2beta1.CrossVersionObjectReference") @@ -166,10 +513,112 @@ func init() { proto.RegisterType((*ResourceMetricSource)(nil), "k8s.io.api.autoscaling.v2beta1.ResourceMetricSource") proto.RegisterType((*ResourceMetricStatus)(nil), "k8s.io.api.autoscaling.v2beta1.ResourceMetricStatus") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto", fileDescriptor_26c1bfc7a52d0478) +} + +var fileDescriptor_26c1bfc7a52d0478 = []byte{ + // 1475 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcb, 0x8f, 0x1b, 0x45, + 0x13, 0x5f, 0x3f, 0x76, 0xb3, 0x69, 0x6f, 0x76, 0xf7, 0xeb, 0x44, 0x89, 0xb3, 0xf9, 0x62, 0xaf, + 0x2c, 0x84, 0x42, 0x44, 0x66, 0x12, 0xb3, 0x3c, 0x24, 0x84, 0xc4, 0xda, 0x40, 0x12, 0xb1, 0x4e, + 0x42, 0xef, 0x26, 0x42, 0x90, 0x20, 0xda, 0x33, 0x1d, 0x6f, 0xb3, 0x9e, 0x19, 0x6b, 0xba, 0x6d, + 0x65, 0x83, 0x90, 0xb8, 0x70, 0xe7, 0x02, 0x67, 0x90, 0x38, 0x21, 0xb8, 0xc2, 0x99, 0x5b, 0x8e, + 0x39, 0x26, 0x02, 0x59, 0x64, 0xf8, 0x2f, 0x72, 0x42, 0xfd, 0x98, 0xf1, 0x8c, 0x1f, 0x6b, 0xc7, + 0x38, 0xe1, 0x71, 0x9b, 0xee, 0xaa, 0xfa, 0x55, 0x4f, 0xfd, 0xaa, 0xab, 0xbb, 0x1a, 0x5c, 0xdc, + 0x7b, 0x8d, 0x19, 0xd4, 0x33, 0xf7, 0xda, 0x75, 0xe2, 0xbb, 0x84, 0x13, 0x66, 0x76, 0x88, 0x6b, + 0x7b, 0xbe, 0xa9, 0x05, 0xb8, 0x45, 0x4d, 0xdc, 0xe6, 0x1e, 0xb3, 0x70, 0x93, 0xba, 0x0d, 0xb3, + 0x53, 0xae, 0x13, 0x8e, 0x2f, 0x98, 0x0d, 0xe2, 0x12, 0x1f, 0x73, 0x62, 0x1b, 0x2d, 0xdf, 0xe3, + 0x1e, 0x2c, 0x28, 0x7d, 0x03, 0xb7, 0xa8, 0x11, 0xd3, 0x37, 0xb4, 0xfe, 0xda, 0xb9, 0x06, 0xe5, + 0xbb, 0xed, 0xba, 0x61, 0x79, 0x8e, 0xd9, 0xf0, 0x1a, 0x9e, 0x29, 0xcd, 0xea, 0xed, 0xdb, 0x72, + 0x24, 0x07, 0xf2, 0x4b, 0xc1, 0xad, 0x95, 0x62, 0xee, 0x2d, 0xcf, 0x27, 0x66, 0x67, 0xc0, 0xe5, + 0xda, 0x46, 0x4f, 0xc7, 0xc1, 0xd6, 0x2e, 0x75, 0x89, 0xbf, 0x6f, 0xb6, 0xf6, 0x1a, 0xd2, 0xc8, + 0x27, 0xcc, 0x6b, 0xfb, 0x16, 0x79, 0x22, 0x2b, 0x66, 0x3a, 0x84, 0xe3, 0x61, 0xbe, 0xcc, 0x51, + 0x56, 0x7e, 0xdb, 0xe5, 0xd4, 0x19, 0x74, 0xf3, 0xca, 0x38, 0x03, 0x66, 0xed, 0x12, 0x07, 0xf7, + 0xdb, 0x95, 0xbe, 0x4a, 0x81, 0x53, 0x55, 0xdf, 0x63, 0xec, 0x06, 0xf1, 0x19, 0xf5, 0xdc, 0xab, + 0xf5, 0x4f, 0x88, 0xc5, 0x11, 0xb9, 0x4d, 0x7c, 0xe2, 0x5a, 0x04, 0xae, 0x83, 0xec, 0x1e, 0x75, + 0xed, 0x7c, 0x6a, 0x3d, 0x75, 0xe6, 0x70, 0x65, 0xe9, 0x5e, 0xb7, 0x38, 0x17, 0x74, 0x8b, 0xd9, + 0x77, 0xa9, 0x6b, 0x23, 0x29, 0x11, 0x1a, 0x2e, 0x76, 0x48, 0x3e, 0x9d, 0xd4, 0xb8, 0x82, 0x1d, + 0x82, 0xa4, 0x04, 0x96, 0x01, 0xc0, 0x2d, 0xaa, 0x1d, 0xe4, 0x33, 0x52, 0x0f, 0x6a, 0x3d, 0xb0, + 0x79, 0xed, 0xb2, 0x96, 0xa0, 0x98, 0x56, 0xe9, 0xeb, 0x0c, 0x38, 0xf6, 0xf6, 0x1d, 0x4e, 0x7c, + 0x17, 0x37, 0x6b, 0x84, 0xfb, 0xd4, 0xda, 0x96, 0xf1, 0x15, 0x60, 0x8e, 0x1c, 0x0b, 0x07, 0x7a, + 0x59, 0x11, 0x58, 0x2d, 0x92, 0xa0, 0x98, 0x16, 0xf4, 0xc0, 0xb2, 0x1a, 0x6d, 0x93, 0x26, 0xb1, + 0xb8, 0xe7, 0xcb, 0xc5, 0xe6, 0xca, 0x2f, 0x19, 0xbd, 0x2c, 0x8a, 0xa2, 0x66, 0xb4, 0xf6, 0x1a, + 0x62, 0x82, 0x19, 0x82, 0x1c, 0xa3, 0x73, 0xc1, 0xd8, 0xc2, 0x75, 0xd2, 0x0c, 0x4d, 0x2b, 0x30, + 0xe8, 0x16, 0x97, 0x6b, 0x09, 0x38, 0xd4, 0x07, 0x0f, 0x31, 0xc8, 0x71, 0xec, 0x37, 0x08, 0xbf, + 0x81, 0x9b, 0x6d, 0x22, 0x7f, 0x39, 0x57, 0x36, 0x0e, 0xf2, 0x66, 0x84, 0x09, 0x64, 0xbc, 0xd7, + 0xc6, 0x2e, 0xa7, 0x7c, 0xbf, 0xb2, 0x12, 0x74, 0x8b, 0xb9, 0x9d, 0x1e, 0x0c, 0x8a, 0x63, 0xc2, + 0x0e, 0x80, 0x6a, 0xb8, 0xd9, 0x21, 0x3e, 0x6e, 0x10, 0xe5, 0x29, 0x3b, 0x95, 0xa7, 0xe3, 0x41, + 0xb7, 0x08, 0x77, 0x06, 0xd0, 0xd0, 0x10, 0x0f, 0xa5, 0x6f, 0x06, 0x89, 0xe1, 0x98, 0xb7, 0xd9, + 0xbf, 0x83, 0x98, 0x5d, 0xb0, 0x64, 0xb5, 0x7d, 0x9f, 0xb8, 0x7f, 0x89, 0x99, 0x63, 0xfa, 0xb7, + 0x96, 0xaa, 0x31, 0x2c, 0x94, 0x40, 0x86, 0xfb, 0xe0, 0xa8, 0x1e, 0xcf, 0x80, 0xa0, 0x13, 0x41, + 0xb7, 0x78, 0xb4, 0x3a, 0x08, 0x87, 0x86, 0xf9, 0x28, 0xfd, 0x92, 0x06, 0x27, 0x2e, 0x79, 0x3e, + 0xbd, 0xeb, 0xb9, 0x1c, 0x37, 0xaf, 0x79, 0xf6, 0xa6, 0x2e, 0x90, 0xc4, 0x87, 0x1f, 0x83, 0x45, + 0x11, 0x3d, 0x1b, 0x73, 0x2c, 0x39, 0xca, 0x95, 0xcf, 0x4f, 0x16, 0x6b, 0x55, 0x18, 0x6a, 0x84, + 0xe3, 0x1e, 0xab, 0xbd, 0x39, 0x14, 0xa1, 0xc2, 0x5b, 0x20, 0xcb, 0x5a, 0xc4, 0xd2, 0x4c, 0xbe, + 0x6e, 0x1c, 0x5c, 0xa8, 0x8d, 0x11, 0x0b, 0xdd, 0x6e, 0x11, 0xab, 0x57, 0x4c, 0xc4, 0x08, 0x49, + 0x58, 0x48, 0xc0, 0x02, 0x93, 0x09, 0xa7, 0xb9, 0x7b, 0x63, 0x5a, 0x07, 0x12, 0xa4, 0xb2, 0xac, + 0x5d, 0x2c, 0xa8, 0x31, 0xd2, 0xe0, 0xa5, 0x2f, 0x32, 0x60, 0x7d, 0x84, 0x65, 0xd5, 0x73, 0x6d, + 0xca, 0xa9, 0xe7, 0xc2, 0x4b, 0x20, 0xcb, 0xf7, 0x5b, 0x61, 0xb2, 0x6f, 0x84, 0xab, 0xdd, 0xd9, + 0x6f, 0x91, 0xc7, 0xdd, 0xe2, 0x73, 0xe3, 0xec, 0x85, 0x1e, 0x92, 0x08, 0x70, 0x2b, 0xfa, 0xab, + 0x74, 0x02, 0x4b, 0x2f, 0xeb, 0x71, 0xb7, 0x38, 0xe4, 0x84, 0x32, 0x22, 0xa4, 0xe4, 0xe2, 0x45, + 0x6d, 0x68, 0x62, 0xc6, 0x77, 0x7c, 0xec, 0x32, 0xe5, 0x89, 0x3a, 0x61, 0xae, 0x9f, 0x9d, 0x8c, + 0x6e, 0x61, 0x51, 0x59, 0xd3, 0xab, 0x80, 0x5b, 0x03, 0x68, 0x68, 0x88, 0x07, 0xf8, 0x3c, 0x58, + 0xf0, 0x09, 0x66, 0x9e, 0x2b, 0xd3, 0xfc, 0x70, 0x2f, 0xb8, 0x48, 0xce, 0x22, 0x2d, 0x85, 0x2f, + 0x80, 0x43, 0x0e, 0x61, 0x0c, 0x37, 0x48, 0x7e, 0x5e, 0x2a, 0xae, 0x68, 0xc5, 0x43, 0x35, 0x35, + 0x8d, 0x42, 0x79, 0xe9, 0x61, 0x0a, 0x9c, 0x1a, 0x11, 0xc7, 0x2d, 0xca, 0x38, 0xbc, 0x39, 0x90, + 0xcf, 0xc6, 0x84, 0xb5, 0x83, 0x32, 0x95, 0xcd, 0xab, 0xda, 0xf7, 0x62, 0x38, 0x13, 0xcb, 0xe5, + 0x9b, 0x60, 0x9e, 0x72, 0xe2, 0x08, 0x56, 0x32, 0x67, 0x72, 0xe5, 0x57, 0xa7, 0xcc, 0xb5, 0xca, + 0x11, 0xed, 0x63, 0xfe, 0xb2, 0x40, 0x43, 0x0a, 0xb4, 0xf4, 0x6b, 0x7a, 0xe4, 0xbf, 0x89, 0x84, + 0x87, 0x9f, 0x82, 0x65, 0x39, 0x52, 0x95, 0x19, 0x91, 0xdb, 0xfa, 0x0f, 0xc7, 0xee, 0xa9, 0x03, + 0x0e, 0xf4, 0xca, 0x71, 0xbd, 0x94, 0xe5, 0xed, 0x04, 0x34, 0xea, 0x73, 0x05, 0x2f, 0x80, 0x9c, + 0x43, 0x5d, 0x44, 0x5a, 0x4d, 0x6a, 0x61, 0x95, 0x96, 0xf3, 0xea, 0x48, 0xaa, 0xf5, 0xa6, 0x51, + 0x5c, 0x07, 0xbe, 0x0c, 0x72, 0x0e, 0xbe, 0x13, 0x99, 0x64, 0xa4, 0xc9, 0x51, 0xed, 0x2f, 0x57, + 0xeb, 0x89, 0x50, 0x5c, 0x0f, 0x5e, 0x17, 0xd9, 0x20, 0xaa, 0x34, 0xcb, 0x67, 0x65, 0x98, 0xcf, + 0x8e, 0xfb, 0x3f, 0x5d, 0xe4, 0x45, 0x89, 0x88, 0x65, 0x8e, 0x84, 0x40, 0x21, 0x56, 0xe9, 0xa7, + 0x2c, 0x38, 0x7d, 0xe0, 0xde, 0x87, 0xef, 0x00, 0xe8, 0xd5, 0x19, 0xf1, 0x3b, 0xc4, 0xbe, 0xa8, + 0xae, 0x45, 0xe2, 0x7e, 0x22, 0x62, 0x9c, 0x51, 0x47, 0xe2, 0xd5, 0x01, 0x29, 0x1a, 0x62, 0x01, + 0x2d, 0x70, 0x44, 0x6c, 0x06, 0x15, 0x50, 0xaa, 0xaf, 0x42, 0x4f, 0xb6, 0xd3, 0xfe, 0x17, 0x74, + 0x8b, 0x47, 0xb6, 0xe2, 0x20, 0x28, 0x89, 0x09, 0x37, 0xc1, 0x8a, 0xae, 0xf5, 0x7d, 0x01, 0x3e, + 0xa1, 0x23, 0xb0, 0x52, 0x4d, 0x8a, 0x51, 0xbf, 0xbe, 0x80, 0xb0, 0x09, 0xa3, 0x3e, 0xb1, 0x23, + 0x88, 0x6c, 0x12, 0xe2, 0xad, 0xa4, 0x18, 0xf5, 0xeb, 0xc3, 0x26, 0x58, 0xd6, 0xa8, 0x3a, 0xde, + 0xf9, 0x79, 0x49, 0xd9, 0x8b, 0x13, 0x52, 0xa6, 0x8a, 0x6e, 0x94, 0x83, 0xd5, 0x04, 0x16, 0xea, + 0xc3, 0x86, 0x1c, 0x00, 0x2b, 0x2c, 0x71, 0x2c, 0xbf, 0x20, 0x3d, 0xbd, 0x39, 0xe5, 0x1e, 0x8c, + 0x6a, 0x65, 0xef, 0xf8, 0x8a, 0xa6, 0x18, 0x8a, 0xf9, 0x29, 0x7d, 0x9f, 0x01, 0xa0, 0x97, 0x61, + 0x70, 0x23, 0x51, 0xe4, 0xd7, 0xfb, 0x8a, 0xfc, 0x6a, 0xfc, 0x72, 0x1a, 0x2b, 0xe8, 0x37, 0xc0, + 0x82, 0x27, 0x77, 0x9e, 0x4e, 0x86, 0xf2, 0xb8, 0x65, 0x47, 0x67, 0x69, 0x84, 0x56, 0x01, 0xa2, + 0x74, 0xea, 0xfd, 0xab, 0xd1, 0xe0, 0x15, 0x90, 0x6d, 0x79, 0x76, 0x78, 0xf8, 0x9d, 0x1f, 0x87, + 0x7a, 0xcd, 0xb3, 0x59, 0x02, 0x73, 0x51, 0xac, 0x5d, 0xcc, 0x22, 0x89, 0x03, 0x3f, 0x02, 0x8b, + 0xe1, 0x75, 0x43, 0xdf, 0x4d, 0x36, 0xc6, 0x61, 0x22, 0xad, 0x9f, 0xc0, 0x5d, 0x12, 0x15, 0x34, + 0x94, 0xa0, 0x08, 0x53, 0xe0, 0x13, 0x7d, 0x5b, 0x94, 0xb5, 0x7e, 0x02, 0xfc, 0x61, 0xd7, 0x7e, + 0x85, 0x1f, 0x4a, 0x50, 0x84, 0x59, 0xfa, 0x21, 0x03, 0x96, 0x12, 0xd7, 0xd0, 0xbf, 0x83, 0x2e, + 0x95, 0xd5, 0xb3, 0xa5, 0x4b, 0x61, 0xce, 0x9e, 0x2e, 0x85, 0xfb, 0xf4, 0xe8, 0x8a, 0xe1, 0x0f, + 0xa1, 0xeb, 0x61, 0x06, 0xc0, 0xc1, 0x4c, 0x87, 0x16, 0x58, 0x50, 0xad, 0xc6, 0x2c, 0x4e, 0xb8, + 0xe8, 0xd6, 0xa1, 0x0f, 0x33, 0x0d, 0xdd, 0xd7, 0xa0, 0xa4, 0x27, 0x6a, 0x50, 0xc8, 0x2c, 0x1a, + 0xb9, 0xe8, 0x08, 0x1c, 0xd9, 0xcc, 0xdd, 0x02, 0x8b, 0x2c, 0xec, 0x80, 0xb2, 0xd3, 0x77, 0x40, + 0x32, 0xea, 0x51, 0xef, 0x13, 0x41, 0x42, 0x1b, 0x2c, 0xe1, 0x78, 0x13, 0x32, 0x3f, 0xd5, 0x6f, + 0xac, 0x8a, 0x8e, 0x27, 0xd1, 0x7d, 0x24, 0x50, 0x4b, 0xbf, 0xf5, 0x73, 0xab, 0x36, 0xe4, 0x3f, + 0x96, 0xdb, 0x67, 0xd7, 0x0b, 0xfe, 0x27, 0xe8, 0xfd, 0x36, 0x0d, 0x56, 0xfb, 0x8f, 0x93, 0xa9, + 0x9a, 0xfe, 0xbb, 0x43, 0x5f, 0x2e, 0xd2, 0x53, 0x2d, 0x3a, 0xea, 0x50, 0x26, 0x7b, 0xbd, 0x48, + 0x30, 0x91, 0x99, 0x39, 0x13, 0xa5, 0xef, 0x92, 0x31, 0x9a, 0xfe, 0x61, 0xe4, 0xb3, 0xe1, 0xaf, + 0x07, 0xd3, 0x05, 0xe9, 0x94, 0x76, 0x36, 0xf1, 0x0b, 0xc2, 0xd3, 0x0e, 0xd3, 0x8f, 0x69, 0x70, + 0x6c, 0xd8, 0x2d, 0x02, 0x56, 0xf5, 0x5b, 0xa2, 0x0a, 0x92, 0x19, 0x7f, 0x4b, 0x7c, 0xdc, 0x2d, + 0x16, 0x87, 0xb4, 0xc0, 0x21, 0x4c, 0xec, 0xb9, 0xf1, 0x7d, 0x90, 0x4f, 0x30, 0x7f, 0x9d, 0xd3, + 0x26, 0xbd, 0xab, 0x2e, 0xf7, 0xaa, 0x8d, 0xf9, 0x7f, 0xd0, 0x2d, 0xe6, 0x77, 0x46, 0xe8, 0xa0, + 0x91, 0xd6, 0x23, 0xde, 0xdc, 0x32, 0x4f, 0xfd, 0xcd, 0xed, 0xe7, 0xc1, 0x78, 0xa9, 0xd4, 0x9a, + 0x49, 0xbc, 0x3e, 0x04, 0x27, 0x93, 0x39, 0x30, 0x18, 0xb0, 0xd3, 0x41, 0xb7, 0x78, 0xb2, 0x3a, + 0x4a, 0x09, 0x8d, 0xb6, 0x1f, 0x95, 0xc8, 0x99, 0x67, 0x93, 0xc8, 0x95, 0x73, 0xf7, 0x1e, 0x15, + 0xe6, 0xee, 0x3f, 0x2a, 0xcc, 0x3d, 0x78, 0x54, 0x98, 0xfb, 0x3c, 0x28, 0xa4, 0xee, 0x05, 0x85, + 0xd4, 0xfd, 0xa0, 0x90, 0x7a, 0x10, 0x14, 0x52, 0xbf, 0x07, 0x85, 0xd4, 0x97, 0x7f, 0x14, 0xe6, + 0x3e, 0x38, 0xa4, 0x8f, 0x9e, 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x05, 0x26, 0x31, 0x5d, 0x9f, + 0x18, 0x00, 0x00, +} + func (m *CrossVersionObjectReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -177,29 +626,37 @@ func (m *CrossVersionObjectReference) Marshal() (dAtA []byte, err error) { } func (m *CrossVersionObjectReference) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CrossVersionObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x1a - i++ + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - return i, nil + i-- + dAtA[i] = 0x1a + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ExternalMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -207,51 +664,63 @@ func (m *ExternalMetricSource) Marshal() (dAtA []byte, err error) { } func (m *ExternalMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExternalMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - if m.MetricSelector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MetricSelector.Size())) - n1, err := m.MetricSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.TargetAverageValue != nil { + { + size, err := m.TargetAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 + i-- + dAtA[i] = 0x22 } if m.TargetValue != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetValue.Size())) - n2, err := m.TargetValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.TargetValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 + i-- + dAtA[i] = 0x1a } - if m.TargetAverageValue != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetAverageValue.Size())) - n3, err := m.TargetAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.MetricSelector != nil { + { + size, err := m.MetricSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ExternalMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -259,49 +728,61 @@ func (m *ExternalMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *ExternalMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExternalMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - if m.MetricSelector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MetricSelector.Size())) - n4, err := m.MetricSelector.MarshalTo(dAtA[i:]) + if m.CurrentAverageValue != nil { + { + size, err := m.CurrentAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + { + size, err := m.CurrentValue.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n4 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentValue.Size())) - n5, err := m.CurrentValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - if m.CurrentAverageValue != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentAverageValue.Size())) - n6, err := m.CurrentAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.MetricSelector != nil { + { + size, err := m.MetricSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n6 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscaler) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -309,41 +790,52 @@ func (m *HorizontalPodAutoscaler) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscaler) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscaler) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n8, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n9, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n9 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscalerCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -351,41 +843,52 @@ func (m *HorizontalPodAutoscalerCondition) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n10, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscalerList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -393,37 +896,46 @@ func (m *HorizontalPodAutoscalerList) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n11, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscalerSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -431,45 +943,54 @@ func (m *HorizontalPodAutoscalerSpec) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleTargetRef.Size())) - n12, err := m.ScaleTargetRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Metrics) > 0 { + for iNdEx := len(m.Metrics) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Metrics[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } } - i += n12 + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxReplicas)) + i-- + dAtA[i] = 0x18 if m.MinReplicas != nil { - dAtA[i] = 0x10 - i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.MinReplicas)) + i-- + dAtA[i] = 0x10 } - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxReplicas)) - if len(m.Metrics) > 0 { - for _, msg := range m.Metrics { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + { + size, err := m.ScaleTargetRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscalerStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -477,62 +998,73 @@ func (m *HorizontalPodAutoscalerStatus) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.ObservedGeneration != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) - } - if m.LastScaleTime != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastScaleTime.Size())) - n13, err := m.LastScaleTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 } - i += n13 } - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredReplicas)) if len(m.CurrentMetrics) > 0 { - for _, msg := range m.CurrentMetrics { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.CurrentMetrics) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.CurrentMetrics[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x2a } } - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) + i-- + dAtA[i] = 0x18 + if m.LastScaleTime != nil { + { + size, err := m.LastScaleTime.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - return i, nil + if m.ObservedGeneration != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } func (m *MetricSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -540,61 +1072,75 @@ func (m *MetricSpec) Marshal() (dAtA []byte, err error) { } func (m *MetricSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if m.Object != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) - n14, err := m.Object.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n14 - } - if m.Pods != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Pods.Size())) - n15, err := m.Pods.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.External != nil { + { + size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n15 + i-- + dAtA[i] = 0x2a } if m.Resource != nil { + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Resource.Size())) - n16, err := m.Resource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if m.Pods != nil { + { + size, err := m.Pods.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n16 + i-- + dAtA[i] = 0x1a } - if m.External != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.External.Size())) - n17, err := m.External.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Object != nil { + { + size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n17 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *MetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -602,61 +1148,75 @@ func (m *MetricStatus) Marshal() (dAtA []byte, err error) { } func (m *MetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if m.Object != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) - n18, err := m.Object.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n18 - } - if m.Pods != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Pods.Size())) - n19, err := m.Pods.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.External != nil { + { + size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n19 + i-- + dAtA[i] = 0x2a } if m.Resource != nil { + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Resource.Size())) - n20, err := m.Resource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if m.Pods != nil { + { + size, err := m.Pods.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n20 + i-- + dAtA[i] = 0x1a } - if m.External != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.External.Size())) - n21, err := m.External.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Object != nil { + { + size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n21 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ObjectMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -664,57 +1224,71 @@ func (m *ObjectMetricSource) Marshal() (dAtA []byte, err error) { } func (m *ObjectMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ObjectMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) - n22, err := m.Target.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n22 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetValue.Size())) - n23, err := m.TargetValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.AverageValue != nil { + { + size, err := m.AverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a } - i += n23 if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n24, err := m.Selector.MarshalTo(dAtA[i:]) + } + { + size, err := m.TargetValue.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n24 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.AverageValue != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AverageValue.Size())) - n25, err := m.AverageValue.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x1a + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n25 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ObjectMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -722,57 +1296,71 @@ func (m *ObjectMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *ObjectMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ObjectMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) - n26, err := m.Target.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n26 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentValue.Size())) - n27, err := m.CurrentValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.AverageValue != nil { + { + size, err := m.AverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a } - i += n27 if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n28, err := m.Selector.MarshalTo(dAtA[i:]) + } + { + size, err := m.CurrentValue.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n28 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.AverageValue != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AverageValue.Size())) - n29, err := m.AverageValue.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x1a + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n29 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodsMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -780,39 +1368,49 @@ func (m *PodsMetricSource) Marshal() (dAtA []byte, err error) { } func (m *PodsMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodsMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetAverageValue.Size())) - n30, err := m.TargetAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n30 if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n31, err := m.Selector.MarshalTo(dAtA[i:]) + } + { + size, err := m.TargetAverageValue.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n31 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodsMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -820,39 +1418,49 @@ func (m *PodsMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *PodsMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodsMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentAverageValue.Size())) - n32, err := m.CurrentAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n32 if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n33, err := m.Selector.MarshalTo(dAtA[i:]) + } + { + size, err := m.CurrentAverageValue.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n33 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ResourceMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -860,36 +1468,44 @@ func (m *ResourceMetricSource) Marshal() (dAtA []byte, err error) { } func (m *ResourceMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - if m.TargetAverageUtilization != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.TargetAverageUtilization)) - } if m.TargetAverageValue != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetAverageValue.Size())) - n34, err := m.TargetAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.TargetAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n34 + i-- + dAtA[i] = 0x1a } - return i, nil + if m.TargetAverageUtilization != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TargetAverageUtilization)) + i-- + dAtA[i] = 0x10 + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ResourceMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -897,40 +1513,53 @@ func (m *ResourceMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *ResourceMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - if m.CurrentAverageUtilization != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CurrentAverageUtilization)) + { + size, err := m.CurrentAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentAverageValue.Size())) - n35, err := m.CurrentAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.CurrentAverageUtilization != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CurrentAverageUtilization)) + i-- + dAtA[i] = 0x10 } - i += n35 - return i, nil + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *CrossVersionObjectReference) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Kind) @@ -943,6 +1572,9 @@ func (m *CrossVersionObjectReference) Size() (n int) { } func (m *ExternalMetricSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.MetricName) @@ -963,6 +1595,9 @@ func (m *ExternalMetricSource) Size() (n int) { } func (m *ExternalMetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.MetricName) @@ -981,6 +1616,9 @@ func (m *ExternalMetricStatus) Size() (n int) { } func (m *HorizontalPodAutoscaler) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -993,6 +1631,9 @@ func (m *HorizontalPodAutoscaler) Size() (n int) { } func (m *HorizontalPodAutoscalerCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1009,6 +1650,9 @@ func (m *HorizontalPodAutoscalerCondition) Size() (n int) { } func (m *HorizontalPodAutoscalerList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1023,6 +1667,9 @@ func (m *HorizontalPodAutoscalerList) Size() (n int) { } func (m *HorizontalPodAutoscalerSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ScaleTargetRef.Size() @@ -1041,6 +1688,9 @@ func (m *HorizontalPodAutoscalerSpec) Size() (n int) { } func (m *HorizontalPodAutoscalerStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.ObservedGeneration != nil { @@ -1068,6 +1718,9 @@ func (m *HorizontalPodAutoscalerStatus) Size() (n int) { } func (m *MetricSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1092,6 +1745,9 @@ func (m *MetricSpec) Size() (n int) { } func (m *MetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1116,6 +1772,9 @@ func (m *MetricStatus) Size() (n int) { } func (m *ObjectMetricSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.Target.Size() @@ -1136,6 +1795,9 @@ func (m *ObjectMetricSource) Size() (n int) { } func (m *ObjectMetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.Target.Size() @@ -1156,6 +1818,9 @@ func (m *ObjectMetricStatus) Size() (n int) { } func (m *PodsMetricSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.MetricName) @@ -1170,6 +1835,9 @@ func (m *PodsMetricSource) Size() (n int) { } func (m *PodsMetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.MetricName) @@ -1184,6 +1852,9 @@ func (m *PodsMetricStatus) Size() (n int) { } func (m *ResourceMetricSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -1199,6 +1870,9 @@ func (m *ResourceMetricSource) Size() (n int) { } func (m *ResourceMetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -1212,14 +1886,7 @@ func (m *ResourceMetricStatus) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -1242,9 +1909,9 @@ func (this *ExternalMetricSource) String() string { } s := strings.Join([]string{`&ExternalMetricSource{`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `MetricSelector:` + strings.Replace(fmt.Sprintf("%v", this.MetricSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `TargetValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, - `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `MetricSelector:` + strings.Replace(fmt.Sprintf("%v", this.MetricSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `TargetValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetValue), "Quantity", "resource.Quantity", 1) + `,`, + `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "resource.Quantity", 1) + `,`, `}`, }, "") return s @@ -1255,9 +1922,9 @@ func (this *ExternalMetricStatus) String() string { } s := strings.Join([]string{`&ExternalMetricStatus{`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `MetricSelector:` + strings.Replace(fmt.Sprintf("%v", this.MetricSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `CurrentValue:` + strings.Replace(strings.Replace(this.CurrentValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `CurrentAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `MetricSelector:` + strings.Replace(fmt.Sprintf("%v", this.MetricSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `CurrentValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `CurrentAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "resource.Quantity", 1) + `,`, `}`, }, "") return s @@ -1267,7 +1934,7 @@ func (this *HorizontalPodAutoscaler) String() string { return "nil" } s := strings.Join([]string{`&HorizontalPodAutoscaler{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "HorizontalPodAutoscalerSpec", "HorizontalPodAutoscalerSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "HorizontalPodAutoscalerStatus", "HorizontalPodAutoscalerStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1281,7 +1948,7 @@ func (this *HorizontalPodAutoscalerCondition) String() string { s := strings.Join([]string{`&HorizontalPodAutoscalerCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -1292,9 +1959,14 @@ func (this *HorizontalPodAutoscalerList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]HorizontalPodAutoscaler{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "HorizontalPodAutoscaler", "HorizontalPodAutoscaler", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&HorizontalPodAutoscalerList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "HorizontalPodAutoscaler", "HorizontalPodAutoscaler", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1303,11 +1975,16 @@ func (this *HorizontalPodAutoscalerSpec) String() string { if this == nil { return "nil" } + repeatedStringForMetrics := "[]MetricSpec{" + for _, f := range this.Metrics { + repeatedStringForMetrics += strings.Replace(strings.Replace(f.String(), "MetricSpec", "MetricSpec", 1), `&`, ``, 1) + "," + } + repeatedStringForMetrics += "}" s := strings.Join([]string{`&HorizontalPodAutoscalerSpec{`, `ScaleTargetRef:` + strings.Replace(strings.Replace(this.ScaleTargetRef.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, `MinReplicas:` + valueToStringGenerated(this.MinReplicas) + `,`, `MaxReplicas:` + fmt.Sprintf("%v", this.MaxReplicas) + `,`, - `Metrics:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Metrics), "MetricSpec", "MetricSpec", 1), `&`, ``, 1) + `,`, + `Metrics:` + repeatedStringForMetrics + `,`, `}`, }, "") return s @@ -1316,13 +1993,23 @@ func (this *HorizontalPodAutoscalerStatus) String() string { if this == nil { return "nil" } + repeatedStringForCurrentMetrics := "[]MetricStatus{" + for _, f := range this.CurrentMetrics { + repeatedStringForCurrentMetrics += strings.Replace(strings.Replace(f.String(), "MetricStatus", "MetricStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForCurrentMetrics += "}" + repeatedStringForConditions := "[]HorizontalPodAutoscalerCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "HorizontalPodAutoscalerCondition", "HorizontalPodAutoscalerCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&HorizontalPodAutoscalerStatus{`, `ObservedGeneration:` + valueToStringGenerated(this.ObservedGeneration) + `,`, - `LastScaleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScaleTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, + `LastScaleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScaleTime), "Time", "v1.Time", 1) + `,`, `CurrentReplicas:` + fmt.Sprintf("%v", this.CurrentReplicas) + `,`, `DesiredReplicas:` + fmt.Sprintf("%v", this.DesiredReplicas) + `,`, - `CurrentMetrics:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentMetrics), "MetricStatus", "MetricStatus", 1), `&`, ``, 1) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "HorizontalPodAutoscalerCondition", "HorizontalPodAutoscalerCondition", 1), `&`, ``, 1) + `,`, + `CurrentMetrics:` + repeatedStringForCurrentMetrics + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `}`, }, "") return s @@ -1333,10 +2020,10 @@ func (this *MetricSpec) String() string { } s := strings.Join([]string{`&MetricSpec{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "ObjectMetricSource", "ObjectMetricSource", 1) + `,`, - `Pods:` + strings.Replace(fmt.Sprintf("%v", this.Pods), "PodsMetricSource", "PodsMetricSource", 1) + `,`, - `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, - `External:` + strings.Replace(fmt.Sprintf("%v", this.External), "ExternalMetricSource", "ExternalMetricSource", 1) + `,`, + `Object:` + strings.Replace(this.Object.String(), "ObjectMetricSource", "ObjectMetricSource", 1) + `,`, + `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricSource", "PodsMetricSource", 1) + `,`, + `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, + `External:` + strings.Replace(this.External.String(), "ExternalMetricSource", "ExternalMetricSource", 1) + `,`, `}`, }, "") return s @@ -1347,10 +2034,10 @@ func (this *MetricStatus) String() string { } s := strings.Join([]string{`&MetricStatus{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "ObjectMetricStatus", "ObjectMetricStatus", 1) + `,`, - `Pods:` + strings.Replace(fmt.Sprintf("%v", this.Pods), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, - `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, - `External:` + strings.Replace(fmt.Sprintf("%v", this.External), "ExternalMetricStatus", "ExternalMetricStatus", 1) + `,`, + `Object:` + strings.Replace(this.Object.String(), "ObjectMetricStatus", "ObjectMetricStatus", 1) + `,`, + `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, + `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, + `External:` + strings.Replace(this.External.String(), "ExternalMetricStatus", "ExternalMetricStatus", 1) + `,`, `}`, }, "") return s @@ -1362,9 +2049,9 @@ func (this *ObjectMetricSource) String() string { s := strings.Join([]string{`&ObjectMetricSource{`, `Target:` + strings.Replace(strings.Replace(this.Target.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `TargetValue:` + strings.Replace(strings.Replace(this.TargetValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `TargetValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TargetValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "resource.Quantity", 1) + `,`, `}`, }, "") return s @@ -1376,9 +2063,9 @@ func (this *ObjectMetricStatus) String() string { s := strings.Join([]string{`&ObjectMetricStatus{`, `Target:` + strings.Replace(strings.Replace(this.Target.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `CurrentValue:` + strings.Replace(strings.Replace(this.CurrentValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `CurrentValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "resource.Quantity", 1) + `,`, `}`, }, "") return s @@ -1389,8 +2076,8 @@ func (this *PodsMetricSource) String() string { } s := strings.Join([]string{`&PodsMetricSource{`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `TargetAverageValue:` + strings.Replace(strings.Replace(this.TargetAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `TargetAverageValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, `}`, }, "") return s @@ -1401,8 +2088,8 @@ func (this *PodsMetricStatus) String() string { } s := strings.Join([]string{`&PodsMetricStatus{`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `CurrentAverageValue:` + strings.Replace(strings.Replace(this.CurrentAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `CurrentAverageValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, `}`, }, "") return s @@ -1414,7 +2101,7 @@ func (this *ResourceMetricSource) String() string { s := strings.Join([]string{`&ResourceMetricSource{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `TargetAverageUtilization:` + valueToStringGenerated(this.TargetAverageUtilization) + `,`, - `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "resource.Quantity", 1) + `,`, `}`, }, "") return s @@ -1426,7 +2113,7 @@ func (this *ResourceMetricStatus) String() string { s := strings.Join([]string{`&ResourceMetricStatus{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `CurrentAverageUtilization:` + valueToStringGenerated(this.CurrentAverageUtilization) + `,`, - `CurrentAverageValue:` + strings.Replace(strings.Replace(this.CurrentAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `CurrentAverageValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -1454,7 +2141,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1482,7 +2169,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1492,6 +2179,9 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1511,7 +2201,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1521,6 +2211,9 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1540,7 +2233,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1550,6 +2243,9 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1561,7 +2257,10 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1591,7 +2290,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1619,7 +2318,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1629,6 +2328,9 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1648,7 +2350,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1657,11 +2359,14 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MetricSelector == nil { - m.MetricSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.MetricSelector = &v1.LabelSelector{} } if err := m.MetricSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1681,7 +2386,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1690,11 +2395,14 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.TargetValue == nil { - m.TargetValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.TargetValue = &resource.Quantity{} } if err := m.TargetValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1714,7 +2422,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1723,11 +2431,14 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.TargetAverageValue == nil { - m.TargetAverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.TargetAverageValue = &resource.Quantity{} } if err := m.TargetAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1742,6 +2453,9 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1769,7 +2483,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1797,7 +2511,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1807,6 +2521,9 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1826,7 +2543,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1835,11 +2552,14 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MetricSelector == nil { - m.MetricSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.MetricSelector = &v1.LabelSelector{} } if err := m.MetricSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1859,7 +2579,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1868,6 +2588,9 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1889,7 +2612,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1898,11 +2621,14 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.CurrentAverageValue == nil { - m.CurrentAverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.CurrentAverageValue = &resource.Quantity{} } if err := m.CurrentAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1917,6 +2643,9 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1944,7 +2673,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1972,7 +2701,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1981,6 +2710,9 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2002,7 +2734,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2011,6 +2743,9 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2032,7 +2767,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2041,6 +2776,9 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2057,6 +2795,9 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2084,7 +2825,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2112,7 +2853,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2122,6 +2863,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2141,7 +2885,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2151,6 +2895,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2170,7 +2917,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2179,6 +2926,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2200,7 +2950,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2210,6 +2960,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2229,7 +2982,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2239,6 +2992,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2253,6 +3009,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2280,7 +3039,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2308,7 +3067,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2317,6 +3076,9 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2338,7 +3100,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2347,6 +3109,9 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2364,6 +3129,9 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2391,7 +3159,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2419,7 +3187,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2428,6 +3196,9 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2449,7 +3220,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2469,7 +3240,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MaxReplicas |= (int32(b) & 0x7F) << shift + m.MaxReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2488,7 +3259,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2497,6 +3268,9 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2514,6 +3288,9 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2541,7 +3318,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2569,7 +3346,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -2589,7 +3366,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2598,11 +3375,14 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.LastScaleTime == nil { - m.LastScaleTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + m.LastScaleTime = &v1.Time{} } if err := m.LastScaleTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2622,7 +3402,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CurrentReplicas |= (int32(b) & 0x7F) << shift + m.CurrentReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2641,7 +3421,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.DesiredReplicas |= (int32(b) & 0x7F) << shift + m.DesiredReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2660,7 +3440,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2669,6 +3449,9 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2691,7 +3474,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2700,6 +3483,9 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2717,6 +3503,9 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2744,7 +3533,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2772,7 +3561,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2782,6 +3571,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2801,7 +3593,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2810,6 +3602,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2834,7 +3629,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2843,6 +3638,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2867,7 +3665,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2876,6 +3674,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2900,7 +3701,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2909,6 +3710,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2928,6 +3732,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2955,7 +3762,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2983,7 +3790,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2993,6 +3800,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3012,7 +3822,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3021,6 +3831,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3045,7 +3858,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3054,6 +3867,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3078,7 +3894,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3087,6 +3903,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3111,7 +3930,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3120,6 +3939,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3139,6 +3961,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3166,7 +3991,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3194,7 +4019,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3203,6 +4028,9 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3224,7 +4052,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3234,6 +4062,9 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3253,7 +4084,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3262,6 +4093,9 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3283,7 +4117,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3292,11 +4126,14 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3316,7 +4153,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3325,11 +4162,14 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.AverageValue == nil { - m.AverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.AverageValue = &resource.Quantity{} } if err := m.AverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3344,6 +4184,9 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3371,7 +4214,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3399,7 +4242,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3408,6 +4251,9 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3429,7 +4275,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3439,6 +4285,9 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3458,7 +4307,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3467,6 +4316,9 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3488,7 +4340,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3497,11 +4349,14 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3521,7 +4376,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3530,11 +4385,14 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.AverageValue == nil { - m.AverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.AverageValue = &resource.Quantity{} } if err := m.AverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3549,6 +4407,9 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3576,7 +4437,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3604,7 +4465,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3614,6 +4475,9 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3633,7 +4497,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3642,6 +4506,9 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3663,7 +4530,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3672,11 +4539,14 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3691,6 +4561,9 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3718,7 +4591,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3746,7 +4619,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3756,6 +4629,9 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3775,7 +4651,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3784,6 +4660,9 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3805,7 +4684,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3814,11 +4693,14 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3833,6 +4715,9 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3860,7 +4745,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3888,7 +4773,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3898,6 +4783,9 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3917,7 +4805,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3937,7 +4825,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3946,11 +4834,14 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.TargetAverageValue == nil { - m.TargetAverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.TargetAverageValue = &resource.Quantity{} } if err := m.TargetAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3965,6 +4856,9 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3992,7 +4886,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4020,7 +4914,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4030,6 +4924,9 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4049,7 +4946,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4069,7 +4966,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4078,6 +4975,9 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4094,6 +4994,9 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4160,10 +5063,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -4192,6 +5098,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -4210,104 +5119,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 1475 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcb, 0x8f, 0x1b, 0x45, - 0x13, 0x5f, 0x3f, 0x76, 0xb3, 0x69, 0x6f, 0x76, 0xf7, 0xeb, 0x44, 0x89, 0xb3, 0xf9, 0x62, 0xaf, - 0x2c, 0x84, 0x42, 0x44, 0x66, 0x12, 0xb3, 0x3c, 0x24, 0x84, 0xc4, 0xda, 0x40, 0x12, 0xb1, 0x4e, - 0x42, 0xef, 0x26, 0x42, 0x90, 0x20, 0xda, 0x33, 0x1d, 0x6f, 0xb3, 0x9e, 0x19, 0x6b, 0xba, 0x6d, - 0x65, 0x83, 0x90, 0xb8, 0x70, 0xe7, 0x02, 0x67, 0x90, 0x38, 0x21, 0xb8, 0xc2, 0x99, 0x5b, 0x8e, - 0x39, 0x26, 0x02, 0x59, 0x64, 0xf8, 0x2f, 0x72, 0x42, 0xfd, 0x98, 0xf1, 0x8c, 0x1f, 0x6b, 0xc7, - 0x38, 0xe1, 0x71, 0x9b, 0xee, 0xaa, 0xfa, 0x55, 0x4f, 0xfd, 0xaa, 0xab, 0xbb, 0x1a, 0x5c, 0xdc, - 0x7b, 0x8d, 0x19, 0xd4, 0x33, 0xf7, 0xda, 0x75, 0xe2, 0xbb, 0x84, 0x13, 0x66, 0x76, 0x88, 0x6b, - 0x7b, 0xbe, 0xa9, 0x05, 0xb8, 0x45, 0x4d, 0xdc, 0xe6, 0x1e, 0xb3, 0x70, 0x93, 0xba, 0x0d, 0xb3, - 0x53, 0xae, 0x13, 0x8e, 0x2f, 0x98, 0x0d, 0xe2, 0x12, 0x1f, 0x73, 0x62, 0x1b, 0x2d, 0xdf, 0xe3, - 0x1e, 0x2c, 0x28, 0x7d, 0x03, 0xb7, 0xa8, 0x11, 0xd3, 0x37, 0xb4, 0xfe, 0xda, 0xb9, 0x06, 0xe5, - 0xbb, 0xed, 0xba, 0x61, 0x79, 0x8e, 0xd9, 0xf0, 0x1a, 0x9e, 0x29, 0xcd, 0xea, 0xed, 0xdb, 0x72, - 0x24, 0x07, 0xf2, 0x4b, 0xc1, 0xad, 0x95, 0x62, 0xee, 0x2d, 0xcf, 0x27, 0x66, 0x67, 0xc0, 0xe5, - 0xda, 0x46, 0x4f, 0xc7, 0xc1, 0xd6, 0x2e, 0x75, 0x89, 0xbf, 0x6f, 0xb6, 0xf6, 0x1a, 0xd2, 0xc8, - 0x27, 0xcc, 0x6b, 0xfb, 0x16, 0x79, 0x22, 0x2b, 0x66, 0x3a, 0x84, 0xe3, 0x61, 0xbe, 0xcc, 0x51, - 0x56, 0x7e, 0xdb, 0xe5, 0xd4, 0x19, 0x74, 0xf3, 0xca, 0x38, 0x03, 0x66, 0xed, 0x12, 0x07, 0xf7, - 0xdb, 0x95, 0xbe, 0x4a, 0x81, 0x53, 0x55, 0xdf, 0x63, 0xec, 0x06, 0xf1, 0x19, 0xf5, 0xdc, 0xab, - 0xf5, 0x4f, 0x88, 0xc5, 0x11, 0xb9, 0x4d, 0x7c, 0xe2, 0x5a, 0x04, 0xae, 0x83, 0xec, 0x1e, 0x75, - 0xed, 0x7c, 0x6a, 0x3d, 0x75, 0xe6, 0x70, 0x65, 0xe9, 0x5e, 0xb7, 0x38, 0x17, 0x74, 0x8b, 0xd9, - 0x77, 0xa9, 0x6b, 0x23, 0x29, 0x11, 0x1a, 0x2e, 0x76, 0x48, 0x3e, 0x9d, 0xd4, 0xb8, 0x82, 0x1d, - 0x82, 0xa4, 0x04, 0x96, 0x01, 0xc0, 0x2d, 0xaa, 0x1d, 0xe4, 0x33, 0x52, 0x0f, 0x6a, 0x3d, 0xb0, - 0x79, 0xed, 0xb2, 0x96, 0xa0, 0x98, 0x56, 0xe9, 0xeb, 0x0c, 0x38, 0xf6, 0xf6, 0x1d, 0x4e, 0x7c, - 0x17, 0x37, 0x6b, 0x84, 0xfb, 0xd4, 0xda, 0x96, 0xf1, 0x15, 0x60, 0x8e, 0x1c, 0x0b, 0x07, 0x7a, - 0x59, 0x11, 0x58, 0x2d, 0x92, 0xa0, 0x98, 0x16, 0xf4, 0xc0, 0xb2, 0x1a, 0x6d, 0x93, 0x26, 0xb1, - 0xb8, 0xe7, 0xcb, 0xc5, 0xe6, 0xca, 0x2f, 0x19, 0xbd, 0x2c, 0x8a, 0xa2, 0x66, 0xb4, 0xf6, 0x1a, - 0x62, 0x82, 0x19, 0x82, 0x1c, 0xa3, 0x73, 0xc1, 0xd8, 0xc2, 0x75, 0xd2, 0x0c, 0x4d, 0x2b, 0x30, - 0xe8, 0x16, 0x97, 0x6b, 0x09, 0x38, 0xd4, 0x07, 0x0f, 0x31, 0xc8, 0x71, 0xec, 0x37, 0x08, 0xbf, - 0x81, 0x9b, 0x6d, 0x22, 0x7f, 0x39, 0x57, 0x36, 0x0e, 0xf2, 0x66, 0x84, 0x09, 0x64, 0xbc, 0xd7, - 0xc6, 0x2e, 0xa7, 0x7c, 0xbf, 0xb2, 0x12, 0x74, 0x8b, 0xb9, 0x9d, 0x1e, 0x0c, 0x8a, 0x63, 0xc2, - 0x0e, 0x80, 0x6a, 0xb8, 0xd9, 0x21, 0x3e, 0x6e, 0x10, 0xe5, 0x29, 0x3b, 0x95, 0xa7, 0xe3, 0x41, - 0xb7, 0x08, 0x77, 0x06, 0xd0, 0xd0, 0x10, 0x0f, 0xa5, 0x6f, 0x06, 0x89, 0xe1, 0x98, 0xb7, 0xd9, - 0xbf, 0x83, 0x98, 0x5d, 0xb0, 0x64, 0xb5, 0x7d, 0x9f, 0xb8, 0x7f, 0x89, 0x99, 0x63, 0xfa, 0xb7, - 0x96, 0xaa, 0x31, 0x2c, 0x94, 0x40, 0x86, 0xfb, 0xe0, 0xa8, 0x1e, 0xcf, 0x80, 0xa0, 0x13, 0x41, - 0xb7, 0x78, 0xb4, 0x3a, 0x08, 0x87, 0x86, 0xf9, 0x28, 0xfd, 0x92, 0x06, 0x27, 0x2e, 0x79, 0x3e, - 0xbd, 0xeb, 0xb9, 0x1c, 0x37, 0xaf, 0x79, 0xf6, 0xa6, 0x2e, 0x90, 0xc4, 0x87, 0x1f, 0x83, 0x45, - 0x11, 0x3d, 0x1b, 0x73, 0x2c, 0x39, 0xca, 0x95, 0xcf, 0x4f, 0x16, 0x6b, 0x55, 0x18, 0x6a, 0x84, - 0xe3, 0x1e, 0xab, 0xbd, 0x39, 0x14, 0xa1, 0xc2, 0x5b, 0x20, 0xcb, 0x5a, 0xc4, 0xd2, 0x4c, 0xbe, - 0x6e, 0x1c, 0x5c, 0xa8, 0x8d, 0x11, 0x0b, 0xdd, 0x6e, 0x11, 0xab, 0x57, 0x4c, 0xc4, 0x08, 0x49, - 0x58, 0x48, 0xc0, 0x02, 0x93, 0x09, 0xa7, 0xb9, 0x7b, 0x63, 0x5a, 0x07, 0x12, 0xa4, 0xb2, 0xac, - 0x5d, 0x2c, 0xa8, 0x31, 0xd2, 0xe0, 0xa5, 0x2f, 0x32, 0x60, 0x7d, 0x84, 0x65, 0xd5, 0x73, 0x6d, - 0xca, 0xa9, 0xe7, 0xc2, 0x4b, 0x20, 0xcb, 0xf7, 0x5b, 0x61, 0xb2, 0x6f, 0x84, 0xab, 0xdd, 0xd9, - 0x6f, 0x91, 0xc7, 0xdd, 0xe2, 0x73, 0xe3, 0xec, 0x85, 0x1e, 0x92, 0x08, 0x70, 0x2b, 0xfa, 0xab, - 0x74, 0x02, 0x4b, 0x2f, 0xeb, 0x71, 0xb7, 0x38, 0xe4, 0x84, 0x32, 0x22, 0xa4, 0xe4, 0xe2, 0x45, - 0x6d, 0x68, 0x62, 0xc6, 0x77, 0x7c, 0xec, 0x32, 0xe5, 0x89, 0x3a, 0x61, 0xae, 0x9f, 0x9d, 0x8c, - 0x6e, 0x61, 0x51, 0x59, 0xd3, 0xab, 0x80, 0x5b, 0x03, 0x68, 0x68, 0x88, 0x07, 0xf8, 0x3c, 0x58, - 0xf0, 0x09, 0x66, 0x9e, 0x2b, 0xd3, 0xfc, 0x70, 0x2f, 0xb8, 0x48, 0xce, 0x22, 0x2d, 0x85, 0x2f, - 0x80, 0x43, 0x0e, 0x61, 0x0c, 0x37, 0x48, 0x7e, 0x5e, 0x2a, 0xae, 0x68, 0xc5, 0x43, 0x35, 0x35, - 0x8d, 0x42, 0x79, 0xe9, 0x61, 0x0a, 0x9c, 0x1a, 0x11, 0xc7, 0x2d, 0xca, 0x38, 0xbc, 0x39, 0x90, - 0xcf, 0xc6, 0x84, 0xb5, 0x83, 0x32, 0x95, 0xcd, 0xab, 0xda, 0xf7, 0x62, 0x38, 0x13, 0xcb, 0xe5, - 0x9b, 0x60, 0x9e, 0x72, 0xe2, 0x08, 0x56, 0x32, 0x67, 0x72, 0xe5, 0x57, 0xa7, 0xcc, 0xb5, 0xca, - 0x11, 0xed, 0x63, 0xfe, 0xb2, 0x40, 0x43, 0x0a, 0xb4, 0xf4, 0x6b, 0x7a, 0xe4, 0xbf, 0x89, 0x84, - 0x87, 0x9f, 0x82, 0x65, 0x39, 0x52, 0x95, 0x19, 0x91, 0xdb, 0xfa, 0x0f, 0xc7, 0xee, 0xa9, 0x03, - 0x0e, 0xf4, 0xca, 0x71, 0xbd, 0x94, 0xe5, 0xed, 0x04, 0x34, 0xea, 0x73, 0x05, 0x2f, 0x80, 0x9c, - 0x43, 0x5d, 0x44, 0x5a, 0x4d, 0x6a, 0x61, 0x95, 0x96, 0xf3, 0xea, 0x48, 0xaa, 0xf5, 0xa6, 0x51, - 0x5c, 0x07, 0xbe, 0x0c, 0x72, 0x0e, 0xbe, 0x13, 0x99, 0x64, 0xa4, 0xc9, 0x51, 0xed, 0x2f, 0x57, - 0xeb, 0x89, 0x50, 0x5c, 0x0f, 0x5e, 0x17, 0xd9, 0x20, 0xaa, 0x34, 0xcb, 0x67, 0x65, 0x98, 0xcf, - 0x8e, 0xfb, 0x3f, 0x5d, 0xe4, 0x45, 0x89, 0x88, 0x65, 0x8e, 0x84, 0x40, 0x21, 0x56, 0xe9, 0xa7, - 0x2c, 0x38, 0x7d, 0xe0, 0xde, 0x87, 0xef, 0x00, 0xe8, 0xd5, 0x19, 0xf1, 0x3b, 0xc4, 0xbe, 0xa8, - 0xae, 0x45, 0xe2, 0x7e, 0x22, 0x62, 0x9c, 0x51, 0x47, 0xe2, 0xd5, 0x01, 0x29, 0x1a, 0x62, 0x01, - 0x2d, 0x70, 0x44, 0x6c, 0x06, 0x15, 0x50, 0xaa, 0xaf, 0x42, 0x4f, 0xb6, 0xd3, 0xfe, 0x17, 0x74, - 0x8b, 0x47, 0xb6, 0xe2, 0x20, 0x28, 0x89, 0x09, 0x37, 0xc1, 0x8a, 0xae, 0xf5, 0x7d, 0x01, 0x3e, - 0xa1, 0x23, 0xb0, 0x52, 0x4d, 0x8a, 0x51, 0xbf, 0xbe, 0x80, 0xb0, 0x09, 0xa3, 0x3e, 0xb1, 0x23, - 0x88, 0x6c, 0x12, 0xe2, 0xad, 0xa4, 0x18, 0xf5, 0xeb, 0xc3, 0x26, 0x58, 0xd6, 0xa8, 0x3a, 0xde, - 0xf9, 0x79, 0x49, 0xd9, 0x8b, 0x13, 0x52, 0xa6, 0x8a, 0x6e, 0x94, 0x83, 0xd5, 0x04, 0x16, 0xea, - 0xc3, 0x86, 0x1c, 0x00, 0x2b, 0x2c, 0x71, 0x2c, 0xbf, 0x20, 0x3d, 0xbd, 0x39, 0xe5, 0x1e, 0x8c, - 0x6a, 0x65, 0xef, 0xf8, 0x8a, 0xa6, 0x18, 0x8a, 0xf9, 0x29, 0x7d, 0x9f, 0x01, 0xa0, 0x97, 0x61, - 0x70, 0x23, 0x51, 0xe4, 0xd7, 0xfb, 0x8a, 0xfc, 0x6a, 0xfc, 0x72, 0x1a, 0x2b, 0xe8, 0x37, 0xc0, - 0x82, 0x27, 0x77, 0x9e, 0x4e, 0x86, 0xf2, 0xb8, 0x65, 0x47, 0x67, 0x69, 0x84, 0x56, 0x01, 0xa2, - 0x74, 0xea, 0xfd, 0xab, 0xd1, 0xe0, 0x15, 0x90, 0x6d, 0x79, 0x76, 0x78, 0xf8, 0x9d, 0x1f, 0x87, - 0x7a, 0xcd, 0xb3, 0x59, 0x02, 0x73, 0x51, 0xac, 0x5d, 0xcc, 0x22, 0x89, 0x03, 0x3f, 0x02, 0x8b, - 0xe1, 0x75, 0x43, 0xdf, 0x4d, 0x36, 0xc6, 0x61, 0x22, 0xad, 0x9f, 0xc0, 0x5d, 0x12, 0x15, 0x34, - 0x94, 0xa0, 0x08, 0x53, 0xe0, 0x13, 0x7d, 0x5b, 0x94, 0xb5, 0x7e, 0x02, 0xfc, 0x61, 0xd7, 0x7e, - 0x85, 0x1f, 0x4a, 0x50, 0x84, 0x59, 0xfa, 0x21, 0x03, 0x96, 0x12, 0xd7, 0xd0, 0xbf, 0x83, 0x2e, - 0x95, 0xd5, 0xb3, 0xa5, 0x4b, 0x61, 0xce, 0x9e, 0x2e, 0x85, 0xfb, 0xf4, 0xe8, 0x8a, 0xe1, 0x0f, - 0xa1, 0xeb, 0x61, 0x06, 0xc0, 0xc1, 0x4c, 0x87, 0x16, 0x58, 0x50, 0xad, 0xc6, 0x2c, 0x4e, 0xb8, - 0xe8, 0xd6, 0xa1, 0x0f, 0x33, 0x0d, 0xdd, 0xd7, 0xa0, 0xa4, 0x27, 0x6a, 0x50, 0xc8, 0x2c, 0x1a, - 0xb9, 0xe8, 0x08, 0x1c, 0xd9, 0xcc, 0xdd, 0x02, 0x8b, 0x2c, 0xec, 0x80, 0xb2, 0xd3, 0x77, 0x40, - 0x32, 0xea, 0x51, 0xef, 0x13, 0x41, 0x42, 0x1b, 0x2c, 0xe1, 0x78, 0x13, 0x32, 0x3f, 0xd5, 0x6f, - 0xac, 0x8a, 0x8e, 0x27, 0xd1, 0x7d, 0x24, 0x50, 0x4b, 0xbf, 0xf5, 0x73, 0xab, 0x36, 0xe4, 0x3f, - 0x96, 0xdb, 0x67, 0xd7, 0x0b, 0xfe, 0x27, 0xe8, 0xfd, 0x36, 0x0d, 0x56, 0xfb, 0x8f, 0x93, 0xa9, - 0x9a, 0xfe, 0xbb, 0x43, 0x5f, 0x2e, 0xd2, 0x53, 0x2d, 0x3a, 0xea, 0x50, 0x26, 0x7b, 0xbd, 0x48, - 0x30, 0x91, 0x99, 0x39, 0x13, 0xa5, 0xef, 0x92, 0x31, 0x9a, 0xfe, 0x61, 0xe4, 0xb3, 0xe1, 0xaf, - 0x07, 0xd3, 0x05, 0xe9, 0x94, 0x76, 0x36, 0xf1, 0x0b, 0xc2, 0xd3, 0x0e, 0xd3, 0x8f, 0x69, 0x70, - 0x6c, 0xd8, 0x2d, 0x02, 0x56, 0xf5, 0x5b, 0xa2, 0x0a, 0x92, 0x19, 0x7f, 0x4b, 0x7c, 0xdc, 0x2d, - 0x16, 0x87, 0xb4, 0xc0, 0x21, 0x4c, 0xec, 0xb9, 0xf1, 0x7d, 0x90, 0x4f, 0x30, 0x7f, 0x9d, 0xd3, - 0x26, 0xbd, 0xab, 0x2e, 0xf7, 0xaa, 0x8d, 0xf9, 0x7f, 0xd0, 0x2d, 0xe6, 0x77, 0x46, 0xe8, 0xa0, - 0x91, 0xd6, 0x23, 0xde, 0xdc, 0x32, 0x4f, 0xfd, 0xcd, 0xed, 0xe7, 0xc1, 0x78, 0xa9, 0xd4, 0x9a, - 0x49, 0xbc, 0x3e, 0x04, 0x27, 0x93, 0x39, 0x30, 0x18, 0xb0, 0xd3, 0x41, 0xb7, 0x78, 0xb2, 0x3a, - 0x4a, 0x09, 0x8d, 0xb6, 0x1f, 0x95, 0xc8, 0x99, 0x67, 0x93, 0xc8, 0x95, 0x73, 0xf7, 0x1e, 0x15, - 0xe6, 0xee, 0x3f, 0x2a, 0xcc, 0x3d, 0x78, 0x54, 0x98, 0xfb, 0x3c, 0x28, 0xa4, 0xee, 0x05, 0x85, - 0xd4, 0xfd, 0xa0, 0x90, 0x7a, 0x10, 0x14, 0x52, 0xbf, 0x07, 0x85, 0xd4, 0x97, 0x7f, 0x14, 0xe6, - 0x3e, 0x38, 0xa4, 0x8f, 0x9e, 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x05, 0x26, 0x31, 0x5d, 0x9f, - 0x18, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto b/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto index 04bc0ed60143f..f90f93f9f4026 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto @@ -32,7 +32,7 @@ option go_package = "v2beta1"; // CrossVersionObjectReference contains enough information to let you identify the referred resource. message CrossVersionObjectReference { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds" + // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" optional string kind = 1; // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names @@ -92,12 +92,12 @@ message ExternalMetricStatus { // implementing the scale subresource based on the metrics specified. message HorizontalPodAutoscaler { // metadata is the standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec is the specification for the behaviour of the autoscaler. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional optional HorizontalPodAutoscalerSpec spec = 2; @@ -146,8 +146,11 @@ message HorizontalPodAutoscalerSpec { // should be collected, as well as to actually change the replica count. optional CrossVersionObjectReference scaleTargetRef = 1; - // minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. - // It defaults to 1 pod. + // minReplicas is the lower limit for the number of replicas to which the autoscaler + // can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the + // alpha feature gate HPAScaleToZero is enabled and at least one Object or External + // metric is configured. Scaling is active as long as at least one metric value is + // available. // +optional optional int32 minReplicas = 2; diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/types.go b/vendor/k8s.io/api/autoscaling/v2beta1/types.go index 6a30e6774db51..92fe5a2b156cf 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/types.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/types.go @@ -17,14 +17,14 @@ limitations under the License. package v2beta1 import ( - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // CrossVersionObjectReference contains enough information to let you identify the referred resource. type CrossVersionObjectReference struct { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds" + // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names Name string `json:"name" protobuf:"bytes,2,opt,name=name"` @@ -38,8 +38,11 @@ type HorizontalPodAutoscalerSpec struct { // scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics // should be collected, as well as to actually change the replica count. ScaleTargetRef CrossVersionObjectReference `json:"scaleTargetRef" protobuf:"bytes,1,opt,name=scaleTargetRef"` - // minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. - // It defaults to 1 pod. + // minReplicas is the lower limit for the number of replicas to which the autoscaler + // can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the + // alpha feature gate HPAScaleToZero is enabled and at least one Object or External + // metric is configured. Scaling is active as long as at least one metric value is + // available. // +optional MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` // maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. @@ -377,12 +380,12 @@ type ExternalMetricStatus struct { type HorizontalPodAutoscaler struct { metav1.TypeMeta `json:",inline"` // metadata is the standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // spec is the specification for the behaviour of the autoscaler. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go index 589408ace0cea..a0d5f533758ec 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go @@ -29,7 +29,7 @@ package v2beta1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CrossVersionObjectReference = map[string]string{ "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", - "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds\"", + "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\"", "name": "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", "apiVersion": "API version of the referent", } @@ -64,8 +64,8 @@ func (ExternalMetricStatus) SwaggerDoc() map[string]string { var map_HorizontalPodAutoscaler = map[string]string{ "": "HorizontalPodAutoscaler is the configuration for a horizontal pod autoscaler, which automatically manages the replica count of any resource implementing the scale subresource based on the metrics specified.", - "metadata": "metadata is the standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "spec is the specification for the behaviour of the autoscaler. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.", + "metadata": "metadata is the standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the specification for the behaviour of the autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", "status": "status is the current information about the autoscaler.", } @@ -99,7 +99,7 @@ func (HorizontalPodAutoscalerList) SwaggerDoc() map[string]string { var map_HorizontalPodAutoscalerSpec = map[string]string{ "": "HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler.", "scaleTargetRef": "scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics should be collected, as well as to actually change the replica count.", - "minReplicas": "minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod.", + "minReplicas": "minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.", "maxReplicas": "maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. It cannot be less that minReplicas.", "metrics": "metrics contains the specifications for which to use to calculate the desired replica count (the maximum replica count across all metrics will be used). The desired replica count is calculated multiplying the ratio between the target value and the current value by the current number of pods. Ergo, metrics used must decrease as the pod count is increased, and vice-versa. See the individual metric source types for more information about how each type of metric must respond.", } diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go b/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go index 1007d2756c2f1..23bc5b9832a6d 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go @@ -17,53 +17,23 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto -/* - Package v2beta2 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto - - It has these top-level messages: - CrossVersionObjectReference - ExternalMetricSource - ExternalMetricStatus - HorizontalPodAutoscaler - HorizontalPodAutoscalerCondition - HorizontalPodAutoscalerList - HorizontalPodAutoscalerSpec - HorizontalPodAutoscalerStatus - MetricIdentifier - MetricSpec - MetricStatus - MetricTarget - MetricValueStatus - ObjectMetricSource - ObjectMetricStatus - PodsMetricSource - PodsMetricStatus - ResourceMetricSource - ResourceMetricStatus -*/ package v2beta2 import ( fmt "fmt" - proto "github.com/gogo/protobuf/proto" - - math "math" - - k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" + io "io" - k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + proto "github.com/gogo/protobuf/proto" k8s_io_api_core_v1 "k8s.io/api/core/v1" + resource "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - strings "strings" - + math "math" + math_bits "math/bits" reflect "reflect" - - io "io" + strings "strings" ) // Reference imports to suppress errors if they are not otherwise used. @@ -80,88 +50,534 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} } func (*CrossVersionObjectReference) ProtoMessage() {} func (*CrossVersionObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{0} + return fileDescriptor_592ad94d7d6be24f, []int{0} +} +func (m *CrossVersionObjectReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CrossVersionObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CrossVersionObjectReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_CrossVersionObjectReference.Merge(m, src) +} +func (m *CrossVersionObjectReference) XXX_Size() int { + return m.Size() +} +func (m *CrossVersionObjectReference) XXX_DiscardUnknown() { + xxx_messageInfo_CrossVersionObjectReference.DiscardUnknown(m) } -func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } -func (*ExternalMetricSource) ProtoMessage() {} -func (*ExternalMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_CrossVersionObjectReference proto.InternalMessageInfo -func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } -func (*ExternalMetricStatus) ProtoMessage() {} -func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } +func (*ExternalMetricSource) ProtoMessage() {} +func (*ExternalMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{1} +} +func (m *ExternalMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExternalMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExternalMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExternalMetricSource.Merge(m, src) +} +func (m *ExternalMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ExternalMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ExternalMetricSource.DiscardUnknown(m) +} -func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } -func (*HorizontalPodAutoscaler) ProtoMessage() {} -func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_ExternalMetricSource proto.InternalMessageInfo + +func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } +func (*ExternalMetricStatus) ProtoMessage() {} +func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{2} +} +func (m *ExternalMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExternalMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExternalMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExternalMetricStatus.Merge(m, src) +} +func (m *ExternalMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ExternalMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ExternalMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ExternalMetricStatus proto.InternalMessageInfo + +func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } +func (*HorizontalPodAutoscaler) ProtoMessage() {} +func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{3} +} +func (m *HorizontalPodAutoscaler) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscaler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscaler) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscaler.Merge(m, src) +} +func (m *HorizontalPodAutoscaler) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscaler) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscaler.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscaler proto.InternalMessageInfo func (m *HorizontalPodAutoscalerCondition) Reset() { *m = HorizontalPodAutoscalerCondition{} } func (*HorizontalPodAutoscalerCondition) ProtoMessage() {} func (*HorizontalPodAutoscalerCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{4} + return fileDescriptor_592ad94d7d6be24f, []int{4} +} +func (m *HorizontalPodAutoscalerCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } +func (m *HorizontalPodAutoscalerCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerCondition.Merge(m, src) +} +func (m *HorizontalPodAutoscalerCondition) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerCondition) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscalerCondition proto.InternalMessageInfo func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } func (*HorizontalPodAutoscalerList) ProtoMessage() {} func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{5} + return fileDescriptor_592ad94d7d6be24f, []int{5} +} +func (m *HorizontalPodAutoscalerList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } +func (m *HorizontalPodAutoscalerList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerList) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerList.Merge(m, src) +} +func (m *HorizontalPodAutoscalerList) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerList) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerList.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscalerList proto.InternalMessageInfo func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{6} + return fileDescriptor_592ad94d7d6be24f, []int{6} +} +func (m *HorizontalPodAutoscalerSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerSpec.Merge(m, src) +} +func (m *HorizontalPodAutoscalerSpec) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerSpec) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerSpec.DiscardUnknown(m) } +var xxx_messageInfo_HorizontalPodAutoscalerSpec proto.InternalMessageInfo + func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{7} + return fileDescriptor_592ad94d7d6be24f, []int{7} +} +func (m *HorizontalPodAutoscalerStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerStatus.Merge(m, src) +} +func (m *HorizontalPodAutoscalerStatus) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerStatus) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerStatus.DiscardUnknown(m) } -func (m *MetricIdentifier) Reset() { *m = MetricIdentifier{} } -func (*MetricIdentifier) ProtoMessage() {} -func (*MetricIdentifier) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +var xxx_messageInfo_HorizontalPodAutoscalerStatus proto.InternalMessageInfo -func (m *MetricSpec) Reset() { *m = MetricSpec{} } -func (*MetricSpec) ProtoMessage() {} -func (*MetricSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (m *MetricIdentifier) Reset() { *m = MetricIdentifier{} } +func (*MetricIdentifier) ProtoMessage() {} +func (*MetricIdentifier) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{8} +} +func (m *MetricIdentifier) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricIdentifier) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricIdentifier) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricIdentifier.Merge(m, src) +} +func (m *MetricIdentifier) XXX_Size() int { + return m.Size() +} +func (m *MetricIdentifier) XXX_DiscardUnknown() { + xxx_messageInfo_MetricIdentifier.DiscardUnknown(m) +} -func (m *MetricStatus) Reset() { *m = MetricStatus{} } -func (*MetricStatus) ProtoMessage() {} -func (*MetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +var xxx_messageInfo_MetricIdentifier proto.InternalMessageInfo -func (m *MetricTarget) Reset() { *m = MetricTarget{} } -func (*MetricTarget) ProtoMessage() {} -func (*MetricTarget) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +func (m *MetricSpec) Reset() { *m = MetricSpec{} } +func (*MetricSpec) ProtoMessage() {} +func (*MetricSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{9} +} +func (m *MetricSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricSpec.Merge(m, src) +} +func (m *MetricSpec) XXX_Size() int { + return m.Size() +} +func (m *MetricSpec) XXX_DiscardUnknown() { + xxx_messageInfo_MetricSpec.DiscardUnknown(m) +} -func (m *MetricValueStatus) Reset() { *m = MetricValueStatus{} } -func (*MetricValueStatus) ProtoMessage() {} -func (*MetricValueStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +var xxx_messageInfo_MetricSpec proto.InternalMessageInfo -func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } -func (*ObjectMetricSource) ProtoMessage() {} -func (*ObjectMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +func (m *MetricStatus) Reset() { *m = MetricStatus{} } +func (*MetricStatus) ProtoMessage() {} +func (*MetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{10} +} +func (m *MetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricStatus.Merge(m, src) +} +func (m *MetricStatus) XXX_Size() int { + return m.Size() +} +func (m *MetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_MetricStatus.DiscardUnknown(m) +} -func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } -func (*ObjectMetricStatus) ProtoMessage() {} -func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +var xxx_messageInfo_MetricStatus proto.InternalMessageInfo -func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } -func (*PodsMetricSource) ProtoMessage() {} -func (*PodsMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +func (m *MetricTarget) Reset() { *m = MetricTarget{} } +func (*MetricTarget) ProtoMessage() {} +func (*MetricTarget) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{11} +} +func (m *MetricTarget) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricTarget) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricTarget) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricTarget.Merge(m, src) +} +func (m *MetricTarget) XXX_Size() int { + return m.Size() +} +func (m *MetricTarget) XXX_DiscardUnknown() { + xxx_messageInfo_MetricTarget.DiscardUnknown(m) +} -func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } -func (*PodsMetricStatus) ProtoMessage() {} -func (*PodsMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +var xxx_messageInfo_MetricTarget proto.InternalMessageInfo -func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } -func (*ResourceMetricSource) ProtoMessage() {} -func (*ResourceMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +func (m *MetricValueStatus) Reset() { *m = MetricValueStatus{} } +func (*MetricValueStatus) ProtoMessage() {} +func (*MetricValueStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{12} +} +func (m *MetricValueStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricValueStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricValueStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricValueStatus.Merge(m, src) +} +func (m *MetricValueStatus) XXX_Size() int { + return m.Size() +} +func (m *MetricValueStatus) XXX_DiscardUnknown() { + xxx_messageInfo_MetricValueStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricValueStatus proto.InternalMessageInfo + +func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } +func (*ObjectMetricSource) ProtoMessage() {} +func (*ObjectMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{13} +} +func (m *ObjectMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ObjectMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ObjectMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectMetricSource.Merge(m, src) +} +func (m *ObjectMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ObjectMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ObjectMetricSource proto.InternalMessageInfo + +func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } +func (*ObjectMetricStatus) ProtoMessage() {} +func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{14} +} +func (m *ObjectMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ObjectMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ObjectMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectMetricStatus.Merge(m, src) +} +func (m *ObjectMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ObjectMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ObjectMetricStatus proto.InternalMessageInfo + +func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } +func (*PodsMetricSource) ProtoMessage() {} +func (*PodsMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{15} +} +func (m *PodsMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodsMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodsMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodsMetricSource.Merge(m, src) +} +func (m *PodsMetricSource) XXX_Size() int { + return m.Size() +} +func (m *PodsMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_PodsMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_PodsMetricSource proto.InternalMessageInfo + +func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } +func (*PodsMetricStatus) ProtoMessage() {} +func (*PodsMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{16} +} +func (m *PodsMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodsMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodsMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodsMetricStatus.Merge(m, src) +} +func (m *PodsMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *PodsMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PodsMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_PodsMetricStatus proto.InternalMessageInfo + +func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } +func (*ResourceMetricSource) ProtoMessage() {} +func (*ResourceMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{17} +} +func (m *ResourceMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceMetricSource.Merge(m, src) +} +func (m *ResourceMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ResourceMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceMetricSource proto.InternalMessageInfo + +func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } +func (*ResourceMetricStatus) ProtoMessage() {} +func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{18} +} +func (m *ResourceMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceMetricStatus.Merge(m, src) +} +func (m *ResourceMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ResourceMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceMetricStatus.DiscardUnknown(m) +} -func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } -func (*ResourceMetricStatus) ProtoMessage() {} -func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +var xxx_messageInfo_ResourceMetricStatus proto.InternalMessageInfo func init() { proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.api.autoscaling.v2beta2.CrossVersionObjectReference") @@ -184,10 +600,109 @@ func init() { proto.RegisterType((*ResourceMetricSource)(nil), "k8s.io.api.autoscaling.v2beta2.ResourceMetricSource") proto.RegisterType((*ResourceMetricStatus)(nil), "k8s.io.api.autoscaling.v2beta2.ResourceMetricStatus") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto", fileDescriptor_592ad94d7d6be24f) +} + +var fileDescriptor_592ad94d7d6be24f = []byte{ + // 1425 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x58, 0xdd, 0x6f, 0x1b, 0xc5, + 0x16, 0xcf, 0xda, 0x8e, 0x93, 0x8e, 0xd3, 0x24, 0x9d, 0x5b, 0xb5, 0x56, 0xaa, 0x6b, 0x47, 0xab, + 0xab, 0xab, 0x52, 0xd1, 0x35, 0x31, 0xe1, 0x43, 0x42, 0x48, 0xc4, 0x01, 0xda, 0x8a, 0xa4, 0x2d, + 0x93, 0xb4, 0x42, 0xa8, 0x45, 0x8c, 0x77, 0x4f, 0xdc, 0x21, 0xde, 0x5d, 0x6b, 0x76, 0x6c, 0x35, + 0x45, 0x42, 0xbc, 0xf0, 0x8e, 0x40, 0xfc, 0x13, 0x88, 0x17, 0x5e, 0x90, 0x78, 0xe4, 0x43, 0xa8, + 0x42, 0x08, 0xf5, 0xb1, 0x08, 0xc9, 0xa2, 0xe6, 0xbf, 0xe8, 0x13, 0xda, 0x99, 0xd9, 0xf5, 0xae, + 0xed, 0xc4, 0x4e, 0x95, 0x14, 0xf5, 0xcd, 0x33, 0xe7, 0x9c, 0xdf, 0xf9, 0x9c, 0x73, 0xce, 0x1a, + 0x5d, 0xda, 0x7d, 0x35, 0xb0, 0x98, 0x5f, 0xd9, 0x6d, 0xd7, 0x81, 0x7b, 0x20, 0x20, 0xa8, 0x74, + 0xc0, 0x73, 0x7c, 0x5e, 0xd1, 0x04, 0xda, 0x62, 0x15, 0xda, 0x16, 0x7e, 0x60, 0xd3, 0x26, 0xf3, + 0x1a, 0x95, 0x4e, 0xb5, 0x0e, 0x82, 0x56, 0x2b, 0x0d, 0xf0, 0x80, 0x53, 0x01, 0x8e, 0xd5, 0xe2, + 0xbe, 0xf0, 0x71, 0x49, 0xf1, 0x5b, 0xb4, 0xc5, 0xac, 0x04, 0xbf, 0xa5, 0xf9, 0x97, 0x2e, 0x36, + 0x98, 0xb8, 0xd3, 0xae, 0x5b, 0xb6, 0xef, 0x56, 0x1a, 0x7e, 0xc3, 0xaf, 0x48, 0xb1, 0x7a, 0x7b, + 0x47, 0x9e, 0xe4, 0x41, 0xfe, 0x52, 0x70, 0x4b, 0x66, 0x42, 0xbd, 0xed, 0x73, 0xa8, 0x74, 0x56, + 0x06, 0x55, 0x2e, 0xad, 0xf6, 0x79, 0x5c, 0x6a, 0xdf, 0x61, 0x1e, 0xf0, 0xbd, 0x4a, 0x6b, 0xb7, + 0x21, 0x85, 0x38, 0x04, 0x7e, 0x9b, 0xdb, 0x70, 0x28, 0xa9, 0xa0, 0xe2, 0x82, 0xa0, 0xa3, 0x74, + 0x55, 0xf6, 0x93, 0xe2, 0x6d, 0x4f, 0x30, 0x77, 0x58, 0xcd, 0xcb, 0xe3, 0x04, 0x02, 0xfb, 0x0e, + 0xb8, 0x74, 0x50, 0xce, 0xfc, 0xca, 0x40, 0xe7, 0xd6, 0xb9, 0x1f, 0x04, 0x37, 0x81, 0x07, 0xcc, + 0xf7, 0xae, 0xd5, 0x3f, 0x02, 0x5b, 0x10, 0xd8, 0x01, 0x0e, 0x9e, 0x0d, 0x78, 0x19, 0xe5, 0x76, + 0x99, 0xe7, 0x14, 0x8d, 0x65, 0xe3, 0xfc, 0x89, 0xda, 0xdc, 0xfd, 0x6e, 0x79, 0xaa, 0xd7, 0x2d, + 0xe7, 0xde, 0x61, 0x9e, 0x43, 0x24, 0x25, 0xe4, 0xf0, 0xa8, 0x0b, 0xc5, 0x4c, 0x9a, 0xe3, 0x2a, + 0x75, 0x81, 0x48, 0x0a, 0xae, 0x22, 0x44, 0x5b, 0x4c, 0x2b, 0x28, 0x66, 0x25, 0x1f, 0xd6, 0x7c, + 0x68, 0xed, 0xfa, 0x15, 0x4d, 0x21, 0x09, 0x2e, 0xf3, 0x17, 0x03, 0x9d, 0x7e, 0xeb, 0xae, 0x00, + 0xee, 0xd1, 0xe6, 0x26, 0x08, 0xce, 0xec, 0x2d, 0x19, 0x5f, 0xfc, 0x1e, 0xca, 0xbb, 0xf2, 0x2c, + 0x4d, 0x2a, 0x54, 0x5f, 0xb0, 0x0e, 0xae, 0x04, 0x4b, 0x49, 0x5f, 0x71, 0xc0, 0x13, 0x6c, 0x87, + 0x01, 0xaf, 0xcd, 0x6b, 0xd5, 0x79, 0x45, 0x21, 0x1a, 0x0f, 0x6f, 0xa3, 0xbc, 0xa0, 0xbc, 0x01, + 0x42, 0xba, 0x52, 0xa8, 0x3e, 0x3f, 0x19, 0xf2, 0xb6, 0x94, 0xe9, 0xa3, 0xaa, 0x33, 0xd1, 0x58, + 0xe6, 0xef, 0xc3, 0x8e, 0x08, 0x2a, 0xda, 0xc1, 0x31, 0x3a, 0x72, 0x0b, 0xcd, 0xd8, 0x6d, 0xce, + 0xc1, 0x8b, 0x3c, 0x59, 0x99, 0x0c, 0xfa, 0x26, 0x6d, 0xb6, 0x41, 0x59, 0x57, 0x5b, 0xd0, 0xd8, + 0x33, 0xeb, 0x0a, 0x89, 0x44, 0x90, 0xe6, 0x0f, 0x19, 0x74, 0xf6, 0xb2, 0xcf, 0xd9, 0x3d, 0xdf, + 0x13, 0xb4, 0x79, 0xdd, 0x77, 0xd6, 0x34, 0x20, 0x70, 0xfc, 0x21, 0x9a, 0x0d, 0x2b, 0xda, 0xa1, + 0x82, 0x8e, 0xf0, 0x2a, 0x2e, 0x4c, 0xab, 0xb5, 0xdb, 0x08, 0x2f, 0x02, 0x2b, 0xe4, 0xb6, 0x3a, + 0x2b, 0x96, 0x2a, 0xbb, 0x4d, 0x10, 0xb4, 0x5f, 0x19, 0xfd, 0x3b, 0x12, 0xa3, 0xe2, 0xdb, 0x28, + 0x17, 0xb4, 0xc0, 0xd6, 0x8e, 0xbd, 0x36, 0xce, 0xb1, 0x7d, 0x0c, 0xdd, 0x6a, 0x81, 0xdd, 0x2f, + 0xd5, 0xf0, 0x44, 0x24, 0x2c, 0x06, 0x94, 0x0f, 0x64, 0x00, 0x64, 0x99, 0x16, 0xaa, 0xaf, 0x3f, + 0xa9, 0x02, 0x15, 0xc5, 0x38, 0x43, 0xea, 0x4c, 0x34, 0xb8, 0xf9, 0x59, 0x16, 0x2d, 0xef, 0x23, + 0xb9, 0xee, 0x7b, 0x0e, 0x13, 0xcc, 0xf7, 0xf0, 0x65, 0x94, 0x13, 0x7b, 0x2d, 0xd0, 0x4f, 0x6f, + 0x35, 0xb2, 0x76, 0x7b, 0xaf, 0x05, 0x8f, 0xbb, 0xe5, 0xff, 0x8d, 0x93, 0x0f, 0xf9, 0x88, 0x44, + 0xc0, 0x1b, 0xb1, 0x57, 0x99, 0x14, 0x96, 0x36, 0xeb, 0x71, 0xb7, 0x3c, 0xa2, 0xff, 0x59, 0x31, + 0x52, 0xda, 0x78, 0xdc, 0x41, 0xb8, 0x49, 0x03, 0xb1, 0xcd, 0xa9, 0x17, 0x28, 0x4d, 0xcc, 0x05, + 0x1d, 0xaf, 0x0b, 0x93, 0xa5, 0x3b, 0x94, 0xa8, 0x2d, 0x69, 0x2b, 0xf0, 0xc6, 0x10, 0x1a, 0x19, + 0xa1, 0x01, 0xff, 0x1f, 0xe5, 0x39, 0xd0, 0xc0, 0xf7, 0x8a, 0x39, 0xe9, 0x45, 0x1c, 0x5c, 0x22, + 0x6f, 0x89, 0xa6, 0xe2, 0xe7, 0xd0, 0x8c, 0x0b, 0x41, 0x40, 0x1b, 0x50, 0x9c, 0x96, 0x8c, 0x71, + 0x2d, 0x6f, 0xaa, 0x6b, 0x12, 0xd1, 0xcd, 0x3f, 0x0c, 0x74, 0x6e, 0x9f, 0x38, 0x6e, 0xb0, 0x40, + 0xe0, 0x5b, 0x43, 0xf5, 0x6c, 0x4d, 0xe6, 0x60, 0x28, 0x2d, 0xab, 0x79, 0x51, 0xeb, 0x9e, 0x8d, + 0x6e, 0x12, 0xb5, 0x7c, 0x0b, 0x4d, 0x33, 0x01, 0x6e, 0x98, 0x95, 0xec, 0xf9, 0x42, 0xf5, 0x95, + 0x27, 0xac, 0xb5, 0xda, 0x49, 0xad, 0x63, 0xfa, 0x4a, 0x88, 0x46, 0x14, 0xa8, 0xf9, 0x67, 0x66, + 0x5f, 0xdf, 0xc2, 0x82, 0xc7, 0x1f, 0xa3, 0x79, 0x79, 0xd2, 0xfd, 0x0a, 0x76, 0xb4, 0x87, 0x63, + 0xdf, 0xd4, 0x01, 0xe3, 0xa2, 0x76, 0x46, 0x9b, 0x32, 0xbf, 0x95, 0x82, 0x26, 0x03, 0xaa, 0xf0, + 0x0a, 0x2a, 0xb8, 0xcc, 0x23, 0xd0, 0x6a, 0x32, 0x9b, 0xaa, 0xb2, 0x9c, 0xae, 0x2d, 0xf4, 0xba, + 0xe5, 0xc2, 0x66, 0xff, 0x9a, 0x24, 0x79, 0xf0, 0x4b, 0xa8, 0xe0, 0xd2, 0xbb, 0xb1, 0x48, 0x56, + 0x8a, 0xfc, 0x47, 0xeb, 0x2b, 0x6c, 0xf6, 0x49, 0x24, 0xc9, 0x87, 0x6f, 0x84, 0xd5, 0x10, 0x76, + 0xb7, 0xa0, 0x98, 0x93, 0x61, 0xbe, 0x30, 0x59, 0x33, 0x94, 0x2d, 0x22, 0x51, 0x39, 0x12, 0x82, + 0x44, 0x58, 0xe6, 0x77, 0x39, 0xf4, 0xdf, 0x03, 0xdf, 0x3e, 0x7e, 0x1b, 0x61, 0xbf, 0x1e, 0x00, + 0xef, 0x80, 0x73, 0x49, 0x0d, 0xdd, 0x70, 0xfa, 0x85, 0x31, 0xce, 0xd6, 0xce, 0x84, 0x65, 0x7f, + 0x6d, 0x88, 0x4a, 0x46, 0x48, 0x60, 0x1b, 0x9d, 0x0c, 0x1f, 0x83, 0x0a, 0x28, 0xd3, 0x83, 0xf6, + 0x70, 0x2f, 0xed, 0x54, 0xaf, 0x5b, 0x3e, 0xb9, 0x91, 0x04, 0x21, 0x69, 0x4c, 0xbc, 0x86, 0x16, + 0x74, 0x7f, 0x1f, 0x08, 0xf0, 0x59, 0x1d, 0x81, 0x85, 0xf5, 0x34, 0x99, 0x0c, 0xf2, 0x87, 0x10, + 0x0e, 0x04, 0x8c, 0x83, 0x13, 0x43, 0xe4, 0xd2, 0x10, 0x6f, 0xa6, 0xc9, 0x64, 0x90, 0x1f, 0x37, + 0xd1, 0xbc, 0x46, 0xd5, 0xf1, 0x2e, 0x4e, 0xcb, 0x94, 0x4d, 0x38, 0x89, 0x75, 0xd3, 0x8d, 0x6b, + 0x70, 0x3d, 0x85, 0x45, 0x06, 0xb0, 0xb1, 0x40, 0xc8, 0x8e, 0x5a, 0x5c, 0x50, 0xcc, 0x4b, 0x4d, + 0x6f, 0x3c, 0xe1, 0x1b, 0x8c, 0x7b, 0x65, 0x7f, 0x7c, 0xc5, 0x57, 0x01, 0x49, 0xe8, 0x31, 0xbf, + 0x34, 0xd0, 0xe2, 0xe0, 0x24, 0x8f, 0x77, 0x28, 0x63, 0xdf, 0x1d, 0xea, 0x36, 0x9a, 0x0d, 0xa0, + 0x09, 0xb6, 0xf0, 0xb9, 0x2e, 0x80, 0x17, 0x27, 0xec, 0x44, 0xb4, 0x0e, 0xcd, 0x2d, 0x2d, 0x5a, + 0x9b, 0x0b, 0x5b, 0x51, 0x74, 0x22, 0x31, 0xa4, 0xf9, 0x75, 0x16, 0xa1, 0x7e, 0xdd, 0xe3, 0xd5, + 0xd4, 0xe8, 0x59, 0x1e, 0x18, 0x3d, 0x8b, 0xc9, 0x85, 0x2c, 0x31, 0x66, 0x6e, 0xa2, 0xbc, 0x2f, + 0xfb, 0x81, 0xb6, 0xb0, 0x3a, 0x2e, 0x98, 0xf1, 0x84, 0x8f, 0xd1, 0x6a, 0x28, 0x6c, 0xe8, 0xba, + 0xab, 0x68, 0x34, 0x7c, 0x15, 0xe5, 0x5a, 0xbe, 0x13, 0x8d, 0xe4, 0xb1, 0x7b, 0xd2, 0x75, 0xdf, + 0x09, 0x52, 0x98, 0xb3, 0xa1, 0xed, 0xe1, 0x2d, 0x91, 0x38, 0xf8, 0x03, 0x34, 0x1b, 0xad, 0xeb, + 0xb2, 0x44, 0x0b, 0xd5, 0xd5, 0x71, 0x98, 0x44, 0xf3, 0xa7, 0x70, 0x65, 0x30, 0x23, 0x0a, 0x89, + 0x31, 0x43, 0x7c, 0xd0, 0x1b, 0x9f, 0x9c, 0x40, 0x13, 0xe0, 0x8f, 0x5a, 0x75, 0x15, 0x7e, 0x44, + 0x21, 0x31, 0xa6, 0xf9, 0x4d, 0x16, 0xcd, 0xa5, 0x56, 0xc9, 0x7f, 0x23, 0x5d, 0xea, 0xad, 0x1d, + 0x6d, 0xba, 0x14, 0xe6, 0xd1, 0xa7, 0x4b, 0xe1, 0x1e, 0x5f, 0xba, 0x12, 0xf8, 0x23, 0xd2, 0xf5, + 0x53, 0x26, 0x4a, 0x97, 0x9a, 0x7f, 0x93, 0xa5, 0x4b, 0xf1, 0x26, 0xd2, 0x75, 0x0d, 0x4d, 0x77, + 0xc2, 0x05, 0x5d, 0x67, 0xeb, 0xc0, 0x45, 0xc4, 0x8a, 0x9c, 0xb3, 0xde, 0x6d, 0x53, 0x4f, 0x30, + 0xb1, 0x57, 0x3b, 0x11, 0x2e, 0x08, 0x72, 0xc3, 0x27, 0x0a, 0x07, 0x3b, 0x68, 0x8e, 0x76, 0x80, + 0xd3, 0x06, 0xc8, 0x6b, 0x9d, 0xaf, 0xc3, 0xe2, 0x2e, 0xf6, 0xba, 0xe5, 0xb9, 0xb5, 0x04, 0x0e, + 0x49, 0xa1, 0x86, 0x63, 0x50, 0x9f, 0x6f, 0x08, 0xd6, 0x64, 0xf7, 0xd4, 0x18, 0x54, 0x93, 0x41, + 0x8e, 0xc1, 0xb5, 0x21, 0x2a, 0x19, 0x21, 0x61, 0x7e, 0x91, 0x41, 0xa7, 0x86, 0x3e, 0x53, 0xfa, + 0x41, 0x31, 0x8e, 0x29, 0x28, 0x99, 0xa7, 0x18, 0x94, 0xec, 0xa1, 0x83, 0xf2, 0x73, 0x06, 0xe1, + 0xe1, 0x26, 0x8a, 0x3f, 0x91, 0xa3, 0xd8, 0xe6, 0xac, 0x0e, 0x8e, 0x22, 0x1f, 0xc5, 0x6e, 0x97, + 0x9c, 0xe3, 0x49, 0x6c, 0x32, 0xa8, 0xec, 0x78, 0xbe, 0xa4, 0x13, 0x1f, 0xcc, 0xd9, 0xa3, 0xfd, + 0x60, 0x36, 0x7f, 0x1b, 0x0c, 0xe3, 0x33, 0xfd, 0x85, 0x3e, 0x2a, 0xfd, 0xd9, 0xa7, 0x98, 0x7e, + 0xf3, 0x47, 0x03, 0x2d, 0x0e, 0x0e, 0xe1, 0x67, 0xee, 0x7f, 0x9b, 0x5f, 0xd3, 0x4e, 0x3c, 0xdb, + 0xff, 0xd9, 0x7c, 0x6b, 0xa0, 0xd3, 0xa3, 0x56, 0x18, 0xbc, 0x9e, 0x5a, 0x3c, 0x2b, 0xc9, 0xc5, + 0xf3, 0x71, 0xb7, 0x5c, 0x1e, 0xf1, 0xaf, 0x40, 0x04, 0x93, 0xd8, 0x4d, 0x8f, 0x27, 0x01, 0xdf, + 0x0f, 0xdb, 0xac, 0x92, 0x70, 0x24, 0x36, 0x1f, 0x6b, 0xbc, 0x6b, 0x17, 0xef, 0x3f, 0x2a, 0x4d, + 0x3d, 0x78, 0x54, 0x9a, 0x7a, 0xf8, 0xa8, 0x34, 0xf5, 0x69, 0xaf, 0x64, 0xdc, 0xef, 0x95, 0x8c, + 0x07, 0xbd, 0x92, 0xf1, 0xb0, 0x57, 0x32, 0xfe, 0xea, 0x95, 0x8c, 0xcf, 0xff, 0x2e, 0x4d, 0xbd, + 0x3f, 0xa3, 0xa1, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0x7e, 0xa0, 0xce, 0xf5, 0x16, 0x17, 0x00, + 0x00, +} + func (m *CrossVersionObjectReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -195,29 +710,37 @@ func (m *CrossVersionObjectReference) Marshal() (dAtA []byte, err error) { } func (m *CrossVersionObjectReference) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CrossVersionObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x1a - i++ + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - return i, nil + i-- + dAtA[i] = 0x1a + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ExternalMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -225,33 +748,42 @@ func (m *ExternalMetricSource) Marshal() (dAtA []byte, err error) { } func (m *ExternalMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExternalMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Metric.Size())) - n1, err := m.Metric.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) - n2, err := m.Target.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Metric.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ExternalMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -259,33 +791,42 @@ func (m *ExternalMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *ExternalMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExternalMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Metric.Size())) - n3, err := m.Metric.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Current.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Current.Size())) - n4, err := m.Current.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Metric.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscaler) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -293,41 +834,52 @@ func (m *HorizontalPodAutoscaler) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscaler) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscaler) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n5, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n6, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n6 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n7, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n7 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscalerCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -335,41 +887,52 @@ func (m *HorizontalPodAutoscalerCondition) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n8, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscalerList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -377,37 +940,46 @@ func (m *HorizontalPodAutoscalerList) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n9, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscalerSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -415,45 +987,54 @@ func (m *HorizontalPodAutoscalerSpec) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleTargetRef.Size())) - n10, err := m.ScaleTargetRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Metrics) > 0 { + for iNdEx := len(m.Metrics) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Metrics[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } } - i += n10 + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxReplicas)) + i-- + dAtA[i] = 0x18 if m.MinReplicas != nil { - dAtA[i] = 0x10 - i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.MinReplicas)) + i-- + dAtA[i] = 0x10 } - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxReplicas)) - if len(m.Metrics) > 0 { - for _, msg := range m.Metrics { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + { + size, err := m.ScaleTargetRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HorizontalPodAutoscalerStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -461,62 +1042,73 @@ func (m *HorizontalPodAutoscalerStatus) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.ObservedGeneration != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) - } - if m.LastScaleTime != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastScaleTime.Size())) - n11, err := m.LastScaleTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 } - i += n11 } - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredReplicas)) if len(m.CurrentMetrics) > 0 { - for _, msg := range m.CurrentMetrics { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.CurrentMetrics) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.CurrentMetrics[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x2a } } - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) + i-- + dAtA[i] = 0x18 + if m.LastScaleTime != nil { + { + size, err := m.LastScaleTime.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - return i, nil + if m.ObservedGeneration != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } func (m *MetricIdentifier) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -524,31 +1116,39 @@ func (m *MetricIdentifier) Marshal() (dAtA []byte, err error) { } func (m *MetricIdentifier) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricIdentifier) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n12, err := m.Selector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n12 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *MetricSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -556,61 +1156,75 @@ func (m *MetricSpec) Marshal() (dAtA []byte, err error) { } func (m *MetricSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if m.Object != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) - n13, err := m.Object.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n13 - } - if m.Pods != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Pods.Size())) - n14, err := m.Pods.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.External != nil { + { + size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n14 + i-- + dAtA[i] = 0x2a } if m.Resource != nil { + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Resource.Size())) - n15, err := m.Resource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if m.Pods != nil { + { + size, err := m.Pods.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n15 + i-- + dAtA[i] = 0x1a } - if m.External != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.External.Size())) - n16, err := m.External.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Object != nil { + { + size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n16 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *MetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -618,61 +1232,75 @@ func (m *MetricStatus) Marshal() (dAtA []byte, err error) { } func (m *MetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if m.Object != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) - n17, err := m.Object.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n17 - } - if m.Pods != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Pods.Size())) - n18, err := m.Pods.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.External != nil { + { + size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n18 + i-- + dAtA[i] = 0x2a } if m.Resource != nil { + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Resource.Size())) - n19, err := m.Resource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if m.Pods != nil { + { + size, err := m.Pods.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n19 + i-- + dAtA[i] = 0x1a } - if m.External != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.External.Size())) - n20, err := m.External.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Object != nil { + { + size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n20 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *MetricTarget) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -680,46 +1308,56 @@ func (m *MetricTarget) Marshal() (dAtA []byte, err error) { } func (m *MetricTarget) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricTarget) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if m.Value != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Value.Size())) - n21, err := m.Value.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n21 + if m.AverageUtilization != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.AverageUtilization)) + i-- + dAtA[i] = 0x20 } if m.AverageValue != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AverageValue.Size())) - n22, err := m.AverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.AverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n22 + i-- + dAtA[i] = 0x1a } - if m.AverageUtilization != nil { - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.AverageUtilization)) + if m.Value != nil { + { + size, err := m.Value.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *MetricValueStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -727,42 +1365,51 @@ func (m *MetricValueStatus) Marshal() (dAtA []byte, err error) { } func (m *MetricValueStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricValueStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Value != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Value.Size())) - n23, err := m.Value.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n23 + if m.AverageUtilization != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.AverageUtilization)) + i-- + dAtA[i] = 0x18 } if m.AverageValue != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AverageValue.Size())) - n24, err := m.AverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.AverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n24 + i-- + dAtA[i] = 0x12 } - if m.AverageUtilization != nil { - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.AverageUtilization)) + if m.Value != nil { + { + size, err := m.Value.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *ObjectMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -770,41 +1417,52 @@ func (m *ObjectMetricSource) Marshal() (dAtA []byte, err error) { } func (m *ObjectMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ObjectMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DescribedObject.Size())) - n25, err := m.DescribedObject.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n25 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) - n26, err := m.Target.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Metric.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n26 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Metric.Size())) - n27, err := m.Metric.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.DescribedObject.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n27 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ObjectMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -812,41 +1470,52 @@ func (m *ObjectMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *ObjectMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ObjectMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Metric.Size())) - n28, err := m.Metric.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n28 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Current.Size())) - n29, err := m.Current.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.DescribedObject.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n29 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DescribedObject.Size())) - n30, err := m.DescribedObject.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Current.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.Metric.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n30 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodsMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -854,33 +1523,42 @@ func (m *PodsMetricSource) Marshal() (dAtA []byte, err error) { } func (m *PodsMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodsMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Metric.Size())) - n31, err := m.Metric.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n31 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) - n32, err := m.Target.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Metric.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n32 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodsMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -888,33 +1566,42 @@ func (m *PodsMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *PodsMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodsMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Metric.Size())) - n33, err := m.Metric.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Current.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n33 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Current.Size())) - n34, err := m.Current.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Metric.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n34 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ResourceMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -922,29 +1609,37 @@ func (m *ResourceMetricSource) Marshal() (dAtA []byte, err error) { } func (m *ResourceMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) - n35, err := m.Target.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n35 - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ResourceMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -952,35 +1647,48 @@ func (m *ResourceMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *ResourceMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Current.Size())) - n36, err := m.Current.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Current.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n36 - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *CrossVersionObjectReference) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Kind) @@ -993,6 +1701,9 @@ func (m *CrossVersionObjectReference) Size() (n int) { } func (m *ExternalMetricSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.Metric.Size() @@ -1003,6 +1714,9 @@ func (m *ExternalMetricSource) Size() (n int) { } func (m *ExternalMetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.Metric.Size() @@ -1013,6 +1727,9 @@ func (m *ExternalMetricStatus) Size() (n int) { } func (m *HorizontalPodAutoscaler) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1025,6 +1742,9 @@ func (m *HorizontalPodAutoscaler) Size() (n int) { } func (m *HorizontalPodAutoscalerCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1041,6 +1761,9 @@ func (m *HorizontalPodAutoscalerCondition) Size() (n int) { } func (m *HorizontalPodAutoscalerList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1055,6 +1778,9 @@ func (m *HorizontalPodAutoscalerList) Size() (n int) { } func (m *HorizontalPodAutoscalerSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ScaleTargetRef.Size() @@ -1073,6 +1799,9 @@ func (m *HorizontalPodAutoscalerSpec) Size() (n int) { } func (m *HorizontalPodAutoscalerStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.ObservedGeneration != nil { @@ -1100,6 +1829,9 @@ func (m *HorizontalPodAutoscalerStatus) Size() (n int) { } func (m *MetricIdentifier) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -1112,6 +1844,9 @@ func (m *MetricIdentifier) Size() (n int) { } func (m *MetricSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1136,6 +1871,9 @@ func (m *MetricSpec) Size() (n int) { } func (m *MetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1160,6 +1898,9 @@ func (m *MetricStatus) Size() (n int) { } func (m *MetricTarget) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1179,6 +1920,9 @@ func (m *MetricTarget) Size() (n int) { } func (m *MetricValueStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Value != nil { @@ -1196,6 +1940,9 @@ func (m *MetricValueStatus) Size() (n int) { } func (m *ObjectMetricSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.DescribedObject.Size() @@ -1208,6 +1955,9 @@ func (m *ObjectMetricSource) Size() (n int) { } func (m *ObjectMetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.Metric.Size() @@ -1220,6 +1970,9 @@ func (m *ObjectMetricStatus) Size() (n int) { } func (m *PodsMetricSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.Metric.Size() @@ -1230,6 +1983,9 @@ func (m *PodsMetricSource) Size() (n int) { } func (m *PodsMetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.Metric.Size() @@ -1240,6 +1996,9 @@ func (m *PodsMetricStatus) Size() (n int) { } func (m *ResourceMetricSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -1250,6 +2009,9 @@ func (m *ResourceMetricSource) Size() (n int) { } func (m *ResourceMetricStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -1260,14 +2022,7 @@ func (m *ResourceMetricStatus) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -1311,7 +2066,7 @@ func (this *HorizontalPodAutoscaler) String() string { return "nil" } s := strings.Join([]string{`&HorizontalPodAutoscaler{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "HorizontalPodAutoscalerSpec", "HorizontalPodAutoscalerSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "HorizontalPodAutoscalerStatus", "HorizontalPodAutoscalerStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1325,7 +2080,7 @@ func (this *HorizontalPodAutoscalerCondition) String() string { s := strings.Join([]string{`&HorizontalPodAutoscalerCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -1336,9 +2091,14 @@ func (this *HorizontalPodAutoscalerList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]HorizontalPodAutoscaler{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "HorizontalPodAutoscaler", "HorizontalPodAutoscaler", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&HorizontalPodAutoscalerList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "HorizontalPodAutoscaler", "HorizontalPodAutoscaler", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1347,11 +2107,16 @@ func (this *HorizontalPodAutoscalerSpec) String() string { if this == nil { return "nil" } + repeatedStringForMetrics := "[]MetricSpec{" + for _, f := range this.Metrics { + repeatedStringForMetrics += strings.Replace(strings.Replace(f.String(), "MetricSpec", "MetricSpec", 1), `&`, ``, 1) + "," + } + repeatedStringForMetrics += "}" s := strings.Join([]string{`&HorizontalPodAutoscalerSpec{`, `ScaleTargetRef:` + strings.Replace(strings.Replace(this.ScaleTargetRef.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, `MinReplicas:` + valueToStringGenerated(this.MinReplicas) + `,`, `MaxReplicas:` + fmt.Sprintf("%v", this.MaxReplicas) + `,`, - `Metrics:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Metrics), "MetricSpec", "MetricSpec", 1), `&`, ``, 1) + `,`, + `Metrics:` + repeatedStringForMetrics + `,`, `}`, }, "") return s @@ -1360,13 +2125,23 @@ func (this *HorizontalPodAutoscalerStatus) String() string { if this == nil { return "nil" } + repeatedStringForCurrentMetrics := "[]MetricStatus{" + for _, f := range this.CurrentMetrics { + repeatedStringForCurrentMetrics += strings.Replace(strings.Replace(f.String(), "MetricStatus", "MetricStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForCurrentMetrics += "}" + repeatedStringForConditions := "[]HorizontalPodAutoscalerCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "HorizontalPodAutoscalerCondition", "HorizontalPodAutoscalerCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&HorizontalPodAutoscalerStatus{`, `ObservedGeneration:` + valueToStringGenerated(this.ObservedGeneration) + `,`, - `LastScaleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScaleTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, + `LastScaleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScaleTime), "Time", "v1.Time", 1) + `,`, `CurrentReplicas:` + fmt.Sprintf("%v", this.CurrentReplicas) + `,`, `DesiredReplicas:` + fmt.Sprintf("%v", this.DesiredReplicas) + `,`, - `CurrentMetrics:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentMetrics), "MetricStatus", "MetricStatus", 1), `&`, ``, 1) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "HorizontalPodAutoscalerCondition", "HorizontalPodAutoscalerCondition", 1), `&`, ``, 1) + `,`, + `CurrentMetrics:` + repeatedStringForCurrentMetrics + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `}`, }, "") return s @@ -1377,7 +2152,7 @@ func (this *MetricIdentifier) String() string { } s := strings.Join([]string{`&MetricIdentifier{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, `}`, }, "") return s @@ -1388,10 +2163,10 @@ func (this *MetricSpec) String() string { } s := strings.Join([]string{`&MetricSpec{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "ObjectMetricSource", "ObjectMetricSource", 1) + `,`, - `Pods:` + strings.Replace(fmt.Sprintf("%v", this.Pods), "PodsMetricSource", "PodsMetricSource", 1) + `,`, - `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, - `External:` + strings.Replace(fmt.Sprintf("%v", this.External), "ExternalMetricSource", "ExternalMetricSource", 1) + `,`, + `Object:` + strings.Replace(this.Object.String(), "ObjectMetricSource", "ObjectMetricSource", 1) + `,`, + `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricSource", "PodsMetricSource", 1) + `,`, + `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, + `External:` + strings.Replace(this.External.String(), "ExternalMetricSource", "ExternalMetricSource", 1) + `,`, `}`, }, "") return s @@ -1402,10 +2177,10 @@ func (this *MetricStatus) String() string { } s := strings.Join([]string{`&MetricStatus{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "ObjectMetricStatus", "ObjectMetricStatus", 1) + `,`, - `Pods:` + strings.Replace(fmt.Sprintf("%v", this.Pods), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, - `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, - `External:` + strings.Replace(fmt.Sprintf("%v", this.External), "ExternalMetricStatus", "ExternalMetricStatus", 1) + `,`, + `Object:` + strings.Replace(this.Object.String(), "ObjectMetricStatus", "ObjectMetricStatus", 1) + `,`, + `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, + `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, + `External:` + strings.Replace(this.External.String(), "ExternalMetricStatus", "ExternalMetricStatus", 1) + `,`, `}`, }, "") return s @@ -1416,8 +2191,8 @@ func (this *MetricTarget) String() string { } s := strings.Join([]string{`&MetricTarget{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Value:` + strings.Replace(fmt.Sprintf("%v", this.Value), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, - `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `Value:` + strings.Replace(fmt.Sprintf("%v", this.Value), "Quantity", "resource.Quantity", 1) + `,`, + `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "resource.Quantity", 1) + `,`, `AverageUtilization:` + valueToStringGenerated(this.AverageUtilization) + `,`, `}`, }, "") @@ -1428,8 +2203,8 @@ func (this *MetricValueStatus) String() string { return "nil" } s := strings.Join([]string{`&MetricValueStatus{`, - `Value:` + strings.Replace(fmt.Sprintf("%v", this.Value), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, - `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `Value:` + strings.Replace(fmt.Sprintf("%v", this.Value), "Quantity", "resource.Quantity", 1) + `,`, + `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "resource.Quantity", 1) + `,`, `AverageUtilization:` + valueToStringGenerated(this.AverageUtilization) + `,`, `}`, }, "") @@ -1526,7 +2301,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1554,7 +2329,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1564,6 +2339,9 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1583,7 +2361,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1593,6 +2371,9 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1612,7 +2393,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1622,6 +2403,9 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1633,7 +2417,10 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -1663,7 +2450,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1691,7 +2478,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1700,6 +2487,9 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1721,7 +2511,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1730,6 +2520,9 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1746,6 +2539,9 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1773,7 +2569,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1801,7 +2597,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1810,6 +2606,9 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1831,7 +2630,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1840,6 +2639,9 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1856,6 +2658,9 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1883,7 +2688,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1911,7 +2716,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1920,6 +2725,9 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1941,7 +2749,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1950,6 +2758,9 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1971,7 +2782,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1980,6 +2791,9 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1996,6 +2810,9 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2023,7 +2840,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2051,7 +2868,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2061,6 +2878,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2080,7 +2900,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2090,6 +2910,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2109,7 +2932,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2118,6 +2941,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2139,7 +2965,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2149,6 +2975,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2168,7 +2997,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2178,6 +3007,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2192,6 +3024,9 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2219,7 +3054,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2247,7 +3082,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2256,6 +3091,9 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2277,7 +3115,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2286,6 +3124,9 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2303,6 +3144,9 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2330,7 +3174,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2358,7 +3202,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2367,6 +3211,9 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2388,7 +3235,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2408,7 +3255,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MaxReplicas |= (int32(b) & 0x7F) << shift + m.MaxReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2427,7 +3274,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2436,6 +3283,9 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2453,6 +3303,9 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2480,7 +3333,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2508,7 +3361,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -2528,7 +3381,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2537,11 +3390,14 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.LastScaleTime == nil { - m.LastScaleTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + m.LastScaleTime = &v1.Time{} } if err := m.LastScaleTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2561,7 +3417,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CurrentReplicas |= (int32(b) & 0x7F) << shift + m.CurrentReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2580,7 +3436,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.DesiredReplicas |= (int32(b) & 0x7F) << shift + m.DesiredReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2599,7 +3455,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2608,6 +3464,9 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2630,7 +3489,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2639,6 +3498,9 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2656,6 +3518,9 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2683,7 +3548,7 @@ func (m *MetricIdentifier) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2711,7 +3576,7 @@ func (m *MetricIdentifier) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2721,6 +3586,9 @@ func (m *MetricIdentifier) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2740,7 +3608,7 @@ func (m *MetricIdentifier) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2749,11 +3617,14 @@ func (m *MetricIdentifier) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2768,6 +3639,9 @@ func (m *MetricIdentifier) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2795,7 +3669,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2823,7 +3697,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2833,6 +3707,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2852,7 +3729,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2861,6 +3738,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2885,7 +3765,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2894,6 +3774,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2918,7 +3801,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2927,6 +3810,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2951,7 +3837,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2960,6 +3846,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2979,6 +3868,9 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3006,7 +3898,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3034,7 +3926,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3044,6 +3936,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3063,7 +3958,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3072,6 +3967,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3096,7 +3994,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3105,6 +4003,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3129,7 +4030,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3138,6 +4039,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3162,7 +4066,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3171,6 +4075,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3190,6 +4097,9 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3217,7 +4127,7 @@ func (m *MetricTarget) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3245,7 +4155,7 @@ func (m *MetricTarget) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3255,6 +4165,9 @@ func (m *MetricTarget) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3274,7 +4187,7 @@ func (m *MetricTarget) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3283,11 +4196,14 @@ func (m *MetricTarget) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Value == nil { - m.Value = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.Value = &resource.Quantity{} } if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3307,7 +4223,7 @@ func (m *MetricTarget) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3316,11 +4232,14 @@ func (m *MetricTarget) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.AverageValue == nil { - m.AverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.AverageValue = &resource.Quantity{} } if err := m.AverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3340,7 +4259,7 @@ func (m *MetricTarget) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3355,6 +4274,9 @@ func (m *MetricTarget) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3382,7 +4304,7 @@ func (m *MetricValueStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3410,7 +4332,7 @@ func (m *MetricValueStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3419,11 +4341,14 @@ func (m *MetricValueStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Value == nil { - m.Value = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.Value = &resource.Quantity{} } if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3443,7 +4368,7 @@ func (m *MetricValueStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3452,11 +4377,14 @@ func (m *MetricValueStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.AverageValue == nil { - m.AverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + m.AverageValue = &resource.Quantity{} } if err := m.AverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3476,7 +4404,7 @@ func (m *MetricValueStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3491,6 +4419,9 @@ func (m *MetricValueStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3518,7 +4449,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3546,7 +4477,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3555,6 +4486,9 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3576,7 +4510,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3585,6 +4519,9 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3606,7 +4543,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3615,6 +4552,9 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3631,6 +4571,9 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3658,7 +4601,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3686,7 +4629,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3695,6 +4638,9 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3716,7 +4662,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3725,6 +4671,9 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3746,7 +4695,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3755,6 +4704,9 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3771,6 +4723,9 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3798,7 +4753,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3826,7 +4781,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3835,6 +4790,9 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3856,7 +4814,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3865,6 +4823,9 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3881,6 +4842,9 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3908,7 +4872,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3936,7 +4900,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3945,6 +4909,9 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3966,7 +4933,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3975,6 +4942,9 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3991,6 +4961,9 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4018,7 +4991,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4046,7 +5019,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4056,6 +5029,9 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4075,7 +5051,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4084,6 +5060,9 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4100,6 +5079,9 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4127,7 +5109,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4155,7 +5137,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4165,6 +5147,9 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4184,7 +5169,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4193,6 +5178,9 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4209,6 +5197,9 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4275,10 +5266,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -4307,6 +5301,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -4325,101 +5322,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 1425 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x58, 0xdd, 0x6f, 0x1b, 0xc5, - 0x16, 0xcf, 0xda, 0x8e, 0x93, 0x8e, 0xd3, 0x24, 0x9d, 0x5b, 0xb5, 0x56, 0xaa, 0x6b, 0x47, 0xab, - 0xab, 0xab, 0x52, 0xd1, 0x35, 0x31, 0xe1, 0x43, 0x42, 0x48, 0xc4, 0x01, 0xda, 0x8a, 0xa4, 0x2d, - 0x93, 0xb4, 0x42, 0xa8, 0x45, 0x8c, 0x77, 0x4f, 0xdc, 0x21, 0xde, 0x5d, 0x6b, 0x76, 0x6c, 0x35, - 0x45, 0x42, 0xbc, 0xf0, 0x8e, 0x40, 0xfc, 0x13, 0x88, 0x17, 0x5e, 0x90, 0x78, 0xe4, 0x43, 0xa8, - 0x42, 0x08, 0xf5, 0xb1, 0x08, 0xc9, 0xa2, 0xe6, 0xbf, 0xe8, 0x13, 0xda, 0x99, 0xd9, 0xf5, 0xae, - 0xed, 0xc4, 0x4e, 0x95, 0x14, 0xf5, 0xcd, 0x33, 0xe7, 0x9c, 0xdf, 0xf9, 0x9c, 0x73, 0xce, 0x1a, - 0x5d, 0xda, 0x7d, 0x35, 0xb0, 0x98, 0x5f, 0xd9, 0x6d, 0xd7, 0x81, 0x7b, 0x20, 0x20, 0xa8, 0x74, - 0xc0, 0x73, 0x7c, 0x5e, 0xd1, 0x04, 0xda, 0x62, 0x15, 0xda, 0x16, 0x7e, 0x60, 0xd3, 0x26, 0xf3, - 0x1a, 0x95, 0x4e, 0xb5, 0x0e, 0x82, 0x56, 0x2b, 0x0d, 0xf0, 0x80, 0x53, 0x01, 0x8e, 0xd5, 0xe2, - 0xbe, 0xf0, 0x71, 0x49, 0xf1, 0x5b, 0xb4, 0xc5, 0xac, 0x04, 0xbf, 0xa5, 0xf9, 0x97, 0x2e, 0x36, - 0x98, 0xb8, 0xd3, 0xae, 0x5b, 0xb6, 0xef, 0x56, 0x1a, 0x7e, 0xc3, 0xaf, 0x48, 0xb1, 0x7a, 0x7b, - 0x47, 0x9e, 0xe4, 0x41, 0xfe, 0x52, 0x70, 0x4b, 0x66, 0x42, 0xbd, 0xed, 0x73, 0xa8, 0x74, 0x56, - 0x06, 0x55, 0x2e, 0xad, 0xf6, 0x79, 0x5c, 0x6a, 0xdf, 0x61, 0x1e, 0xf0, 0xbd, 0x4a, 0x6b, 0xb7, - 0x21, 0x85, 0x38, 0x04, 0x7e, 0x9b, 0xdb, 0x70, 0x28, 0xa9, 0xa0, 0xe2, 0x82, 0xa0, 0xa3, 0x74, - 0x55, 0xf6, 0x93, 0xe2, 0x6d, 0x4f, 0x30, 0x77, 0x58, 0xcd, 0xcb, 0xe3, 0x04, 0x02, 0xfb, 0x0e, - 0xb8, 0x74, 0x50, 0xce, 0xfc, 0xca, 0x40, 0xe7, 0xd6, 0xb9, 0x1f, 0x04, 0x37, 0x81, 0x07, 0xcc, - 0xf7, 0xae, 0xd5, 0x3f, 0x02, 0x5b, 0x10, 0xd8, 0x01, 0x0e, 0x9e, 0x0d, 0x78, 0x19, 0xe5, 0x76, - 0x99, 0xe7, 0x14, 0x8d, 0x65, 0xe3, 0xfc, 0x89, 0xda, 0xdc, 0xfd, 0x6e, 0x79, 0xaa, 0xd7, 0x2d, - 0xe7, 0xde, 0x61, 0x9e, 0x43, 0x24, 0x25, 0xe4, 0xf0, 0xa8, 0x0b, 0xc5, 0x4c, 0x9a, 0xe3, 0x2a, - 0x75, 0x81, 0x48, 0x0a, 0xae, 0x22, 0x44, 0x5b, 0x4c, 0x2b, 0x28, 0x66, 0x25, 0x1f, 0xd6, 0x7c, - 0x68, 0xed, 0xfa, 0x15, 0x4d, 0x21, 0x09, 0x2e, 0xf3, 0x17, 0x03, 0x9d, 0x7e, 0xeb, 0xae, 0x00, - 0xee, 0xd1, 0xe6, 0x26, 0x08, 0xce, 0xec, 0x2d, 0x19, 0x5f, 0xfc, 0x1e, 0xca, 0xbb, 0xf2, 0x2c, - 0x4d, 0x2a, 0x54, 0x5f, 0xb0, 0x0e, 0xae, 0x04, 0x4b, 0x49, 0x5f, 0x71, 0xc0, 0x13, 0x6c, 0x87, - 0x01, 0xaf, 0xcd, 0x6b, 0xd5, 0x79, 0x45, 0x21, 0x1a, 0x0f, 0x6f, 0xa3, 0xbc, 0xa0, 0xbc, 0x01, - 0x42, 0xba, 0x52, 0xa8, 0x3e, 0x3f, 0x19, 0xf2, 0xb6, 0x94, 0xe9, 0xa3, 0xaa, 0x33, 0xd1, 0x58, - 0xe6, 0xef, 0xc3, 0x8e, 0x08, 0x2a, 0xda, 0xc1, 0x31, 0x3a, 0x72, 0x0b, 0xcd, 0xd8, 0x6d, 0xce, - 0xc1, 0x8b, 0x3c, 0x59, 0x99, 0x0c, 0xfa, 0x26, 0x6d, 0xb6, 0x41, 0x59, 0x57, 0x5b, 0xd0, 0xd8, - 0x33, 0xeb, 0x0a, 0x89, 0x44, 0x90, 0xe6, 0x0f, 0x19, 0x74, 0xf6, 0xb2, 0xcf, 0xd9, 0x3d, 0xdf, - 0x13, 0xb4, 0x79, 0xdd, 0x77, 0xd6, 0x34, 0x20, 0x70, 0xfc, 0x21, 0x9a, 0x0d, 0x2b, 0xda, 0xa1, - 0x82, 0x8e, 0xf0, 0x2a, 0x2e, 0x4c, 0xab, 0xb5, 0xdb, 0x08, 0x2f, 0x02, 0x2b, 0xe4, 0xb6, 0x3a, - 0x2b, 0x96, 0x2a, 0xbb, 0x4d, 0x10, 0xb4, 0x5f, 0x19, 0xfd, 0x3b, 0x12, 0xa3, 0xe2, 0xdb, 0x28, - 0x17, 0xb4, 0xc0, 0xd6, 0x8e, 0xbd, 0x36, 0xce, 0xb1, 0x7d, 0x0c, 0xdd, 0x6a, 0x81, 0xdd, 0x2f, - 0xd5, 0xf0, 0x44, 0x24, 0x2c, 0x06, 0x94, 0x0f, 0x64, 0x00, 0x64, 0x99, 0x16, 0xaa, 0xaf, 0x3f, - 0xa9, 0x02, 0x15, 0xc5, 0x38, 0x43, 0xea, 0x4c, 0x34, 0xb8, 0xf9, 0x59, 0x16, 0x2d, 0xef, 0x23, - 0xb9, 0xee, 0x7b, 0x0e, 0x13, 0xcc, 0xf7, 0xf0, 0x65, 0x94, 0x13, 0x7b, 0x2d, 0xd0, 0x4f, 0x6f, - 0x35, 0xb2, 0x76, 0x7b, 0xaf, 0x05, 0x8f, 0xbb, 0xe5, 0xff, 0x8d, 0x93, 0x0f, 0xf9, 0x88, 0x44, - 0xc0, 0x1b, 0xb1, 0x57, 0x99, 0x14, 0x96, 0x36, 0xeb, 0x71, 0xb7, 0x3c, 0xa2, 0xff, 0x59, 0x31, - 0x52, 0xda, 0x78, 0xdc, 0x41, 0xb8, 0x49, 0x03, 0xb1, 0xcd, 0xa9, 0x17, 0x28, 0x4d, 0xcc, 0x05, - 0x1d, 0xaf, 0x0b, 0x93, 0xa5, 0x3b, 0x94, 0xa8, 0x2d, 0x69, 0x2b, 0xf0, 0xc6, 0x10, 0x1a, 0x19, - 0xa1, 0x01, 0xff, 0x1f, 0xe5, 0x39, 0xd0, 0xc0, 0xf7, 0x8a, 0x39, 0xe9, 0x45, 0x1c, 0x5c, 0x22, - 0x6f, 0x89, 0xa6, 0xe2, 0xe7, 0xd0, 0x8c, 0x0b, 0x41, 0x40, 0x1b, 0x50, 0x9c, 0x96, 0x8c, 0x71, - 0x2d, 0x6f, 0xaa, 0x6b, 0x12, 0xd1, 0xcd, 0x3f, 0x0c, 0x74, 0x6e, 0x9f, 0x38, 0x6e, 0xb0, 0x40, - 0xe0, 0x5b, 0x43, 0xf5, 0x6c, 0x4d, 0xe6, 0x60, 0x28, 0x2d, 0xab, 0x79, 0x51, 0xeb, 0x9e, 0x8d, - 0x6e, 0x12, 0xb5, 0x7c, 0x0b, 0x4d, 0x33, 0x01, 0x6e, 0x98, 0x95, 0xec, 0xf9, 0x42, 0xf5, 0x95, - 0x27, 0xac, 0xb5, 0xda, 0x49, 0xad, 0x63, 0xfa, 0x4a, 0x88, 0x46, 0x14, 0xa8, 0xf9, 0x67, 0x66, - 0x5f, 0xdf, 0xc2, 0x82, 0xc7, 0x1f, 0xa3, 0x79, 0x79, 0xd2, 0xfd, 0x0a, 0x76, 0xb4, 0x87, 0x63, - 0xdf, 0xd4, 0x01, 0xe3, 0xa2, 0x76, 0x46, 0x9b, 0x32, 0xbf, 0x95, 0x82, 0x26, 0x03, 0xaa, 0xf0, - 0x0a, 0x2a, 0xb8, 0xcc, 0x23, 0xd0, 0x6a, 0x32, 0x9b, 0xaa, 0xb2, 0x9c, 0xae, 0x2d, 0xf4, 0xba, - 0xe5, 0xc2, 0x66, 0xff, 0x9a, 0x24, 0x79, 0xf0, 0x4b, 0xa8, 0xe0, 0xd2, 0xbb, 0xb1, 0x48, 0x56, - 0x8a, 0xfc, 0x47, 0xeb, 0x2b, 0x6c, 0xf6, 0x49, 0x24, 0xc9, 0x87, 0x6f, 0x84, 0xd5, 0x10, 0x76, - 0xb7, 0xa0, 0x98, 0x93, 0x61, 0xbe, 0x30, 0x59, 0x33, 0x94, 0x2d, 0x22, 0x51, 0x39, 0x12, 0x82, - 0x44, 0x58, 0xe6, 0x77, 0x39, 0xf4, 0xdf, 0x03, 0xdf, 0x3e, 0x7e, 0x1b, 0x61, 0xbf, 0x1e, 0x00, - 0xef, 0x80, 0x73, 0x49, 0x0d, 0xdd, 0x70, 0xfa, 0x85, 0x31, 0xce, 0xd6, 0xce, 0x84, 0x65, 0x7f, - 0x6d, 0x88, 0x4a, 0x46, 0x48, 0x60, 0x1b, 0x9d, 0x0c, 0x1f, 0x83, 0x0a, 0x28, 0xd3, 0x83, 0xf6, - 0x70, 0x2f, 0xed, 0x54, 0xaf, 0x5b, 0x3e, 0xb9, 0x91, 0x04, 0x21, 0x69, 0x4c, 0xbc, 0x86, 0x16, - 0x74, 0x7f, 0x1f, 0x08, 0xf0, 0x59, 0x1d, 0x81, 0x85, 0xf5, 0x34, 0x99, 0x0c, 0xf2, 0x87, 0x10, - 0x0e, 0x04, 0x8c, 0x83, 0x13, 0x43, 0xe4, 0xd2, 0x10, 0x6f, 0xa6, 0xc9, 0x64, 0x90, 0x1f, 0x37, - 0xd1, 0xbc, 0x46, 0xd5, 0xf1, 0x2e, 0x4e, 0xcb, 0x94, 0x4d, 0x38, 0x89, 0x75, 0xd3, 0x8d, 0x6b, - 0x70, 0x3d, 0x85, 0x45, 0x06, 0xb0, 0xb1, 0x40, 0xc8, 0x8e, 0x5a, 0x5c, 0x50, 0xcc, 0x4b, 0x4d, - 0x6f, 0x3c, 0xe1, 0x1b, 0x8c, 0x7b, 0x65, 0x7f, 0x7c, 0xc5, 0x57, 0x01, 0x49, 0xe8, 0x31, 0xbf, - 0x34, 0xd0, 0xe2, 0xe0, 0x24, 0x8f, 0x77, 0x28, 0x63, 0xdf, 0x1d, 0xea, 0x36, 0x9a, 0x0d, 0xa0, - 0x09, 0xb6, 0xf0, 0xb9, 0x2e, 0x80, 0x17, 0x27, 0xec, 0x44, 0xb4, 0x0e, 0xcd, 0x2d, 0x2d, 0x5a, - 0x9b, 0x0b, 0x5b, 0x51, 0x74, 0x22, 0x31, 0xa4, 0xf9, 0x75, 0x16, 0xa1, 0x7e, 0xdd, 0xe3, 0xd5, - 0xd4, 0xe8, 0x59, 0x1e, 0x18, 0x3d, 0x8b, 0xc9, 0x85, 0x2c, 0x31, 0x66, 0x6e, 0xa2, 0xbc, 0x2f, - 0xfb, 0x81, 0xb6, 0xb0, 0x3a, 0x2e, 0x98, 0xf1, 0x84, 0x8f, 0xd1, 0x6a, 0x28, 0x6c, 0xe8, 0xba, - 0xab, 0x68, 0x34, 0x7c, 0x15, 0xe5, 0x5a, 0xbe, 0x13, 0x8d, 0xe4, 0xb1, 0x7b, 0xd2, 0x75, 0xdf, - 0x09, 0x52, 0x98, 0xb3, 0xa1, 0xed, 0xe1, 0x2d, 0x91, 0x38, 0xf8, 0x03, 0x34, 0x1b, 0xad, 0xeb, - 0xb2, 0x44, 0x0b, 0xd5, 0xd5, 0x71, 0x98, 0x44, 0xf3, 0xa7, 0x70, 0x65, 0x30, 0x23, 0x0a, 0x89, - 0x31, 0x43, 0x7c, 0xd0, 0x1b, 0x9f, 0x9c, 0x40, 0x13, 0xe0, 0x8f, 0x5a, 0x75, 0x15, 0x7e, 0x44, - 0x21, 0x31, 0xa6, 0xf9, 0x4d, 0x16, 0xcd, 0xa5, 0x56, 0xc9, 0x7f, 0x23, 0x5d, 0xea, 0xad, 0x1d, - 0x6d, 0xba, 0x14, 0xe6, 0xd1, 0xa7, 0x4b, 0xe1, 0x1e, 0x5f, 0xba, 0x12, 0xf8, 0x23, 0xd2, 0xf5, - 0x53, 0x26, 0x4a, 0x97, 0x9a, 0x7f, 0x93, 0xa5, 0x4b, 0xf1, 0x26, 0xd2, 0x75, 0x0d, 0x4d, 0x77, - 0xc2, 0x05, 0x5d, 0x67, 0xeb, 0xc0, 0x45, 0xc4, 0x8a, 0x9c, 0xb3, 0xde, 0x6d, 0x53, 0x4f, 0x30, - 0xb1, 0x57, 0x3b, 0x11, 0x2e, 0x08, 0x72, 0xc3, 0x27, 0x0a, 0x07, 0x3b, 0x68, 0x8e, 0x76, 0x80, - 0xd3, 0x06, 0xc8, 0x6b, 0x9d, 0xaf, 0xc3, 0xe2, 0x2e, 0xf6, 0xba, 0xe5, 0xb9, 0xb5, 0x04, 0x0e, - 0x49, 0xa1, 0x86, 0x63, 0x50, 0x9f, 0x6f, 0x08, 0xd6, 0x64, 0xf7, 0xd4, 0x18, 0x54, 0x93, 0x41, - 0x8e, 0xc1, 0xb5, 0x21, 0x2a, 0x19, 0x21, 0x61, 0x7e, 0x91, 0x41, 0xa7, 0x86, 0x3e, 0x53, 0xfa, - 0x41, 0x31, 0x8e, 0x29, 0x28, 0x99, 0xa7, 0x18, 0x94, 0xec, 0xa1, 0x83, 0xf2, 0x73, 0x06, 0xe1, - 0xe1, 0x26, 0x8a, 0x3f, 0x91, 0xa3, 0xd8, 0xe6, 0xac, 0x0e, 0x8e, 0x22, 0x1f, 0xc5, 0x6e, 0x97, - 0x9c, 0xe3, 0x49, 0x6c, 0x32, 0xa8, 0xec, 0x78, 0xbe, 0xa4, 0x13, 0x1f, 0xcc, 0xd9, 0xa3, 0xfd, - 0x60, 0x36, 0x7f, 0x1b, 0x0c, 0xe3, 0x33, 0xfd, 0x85, 0x3e, 0x2a, 0xfd, 0xd9, 0xa7, 0x98, 0x7e, - 0xf3, 0x47, 0x03, 0x2d, 0x0e, 0x0e, 0xe1, 0x67, 0xee, 0x7f, 0x9b, 0x5f, 0xd3, 0x4e, 0x3c, 0xdb, - 0xff, 0xd9, 0x7c, 0x6b, 0xa0, 0xd3, 0xa3, 0x56, 0x18, 0xbc, 0x9e, 0x5a, 0x3c, 0x2b, 0xc9, 0xc5, - 0xf3, 0x71, 0xb7, 0x5c, 0x1e, 0xf1, 0xaf, 0x40, 0x04, 0x93, 0xd8, 0x4d, 0x8f, 0x27, 0x01, 0xdf, - 0x0f, 0xdb, 0xac, 0x92, 0x70, 0x24, 0x36, 0x1f, 0x6b, 0xbc, 0x6b, 0x17, 0xef, 0x3f, 0x2a, 0x4d, - 0x3d, 0x78, 0x54, 0x9a, 0x7a, 0xf8, 0xa8, 0x34, 0xf5, 0x69, 0xaf, 0x64, 0xdc, 0xef, 0x95, 0x8c, - 0x07, 0xbd, 0x92, 0xf1, 0xb0, 0x57, 0x32, 0xfe, 0xea, 0x95, 0x8c, 0xcf, 0xff, 0x2e, 0x4d, 0xbd, - 0x3f, 0xa3, 0xa1, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0x7e, 0xa0, 0xce, 0xf5, 0x16, 0x17, 0x00, - 0x00, -} diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto b/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto index b4e4c95a3b1ac..80f1d345d4411 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto @@ -32,7 +32,7 @@ option go_package = "v2beta2"; // CrossVersionObjectReference contains enough information to let you identify the referred resource. message CrossVersionObjectReference { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds" + // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" optional string kind = 1; // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names @@ -69,12 +69,12 @@ message ExternalMetricStatus { // implementing the scale subresource based on the metrics specified. message HorizontalPodAutoscaler { // metadata is the standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec is the specification for the behaviour of the autoscaler. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional optional HorizontalPodAutoscalerSpec spec = 2; @@ -123,8 +123,11 @@ message HorizontalPodAutoscalerSpec { // should be collected, as well as to actually change the replica count. optional CrossVersionObjectReference scaleTargetRef = 1; - // minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. - // It defaults to 1 pod. + // minReplicas is the lower limit for the number of replicas to which the autoscaler + // can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the + // alpha feature gate HPAScaleToZero is enabled and at least one Object or External + // metric is configured. Scaling is active as long as at least one metric value is + // available. // +optional optional int32 minReplicas = 2; diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/types.go b/vendor/k8s.io/api/autoscaling/v2beta2/types.go index 2d337953740e6..314d70950c575 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/types.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/types.go @@ -19,7 +19,7 @@ limitations under the License. package v2beta2 import ( - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -33,12 +33,12 @@ import ( type HorizontalPodAutoscaler struct { metav1.TypeMeta `json:",inline"` // metadata is the standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // spec is the specification for the behaviour of the autoscaler. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -52,8 +52,11 @@ type HorizontalPodAutoscalerSpec struct { // scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics // should be collected, as well as to actually change the replica count. ScaleTargetRef CrossVersionObjectReference `json:"scaleTargetRef" protobuf:"bytes,1,opt,name=scaleTargetRef"` - // minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. - // It defaults to 1 pod. + // minReplicas is the lower limit for the number of replicas to which the autoscaler + // can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the + // alpha feature gate HPAScaleToZero is enabled and at least one Object or External + // metric is configured. Scaling is active as long as at least one metric value is + // available. // +optional MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` // maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. @@ -73,7 +76,7 @@ type HorizontalPodAutoscalerSpec struct { // CrossVersionObjectReference contains enough information to let you identify the referred resource. type CrossVersionObjectReference struct { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds" + // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names Name string `json:"name" protobuf:"bytes,2,opt,name=name"` diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go index 996dc18401d75..bb85b9f0f45af 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go @@ -29,7 +29,7 @@ package v2beta2 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CrossVersionObjectReference = map[string]string{ "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", - "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds\"", + "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\"", "name": "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", "apiVersion": "API version of the referent", } @@ -60,8 +60,8 @@ func (ExternalMetricStatus) SwaggerDoc() map[string]string { var map_HorizontalPodAutoscaler = map[string]string{ "": "HorizontalPodAutoscaler is the configuration for a horizontal pod autoscaler, which automatically manages the replica count of any resource implementing the scale subresource based on the metrics specified.", - "metadata": "metadata is the standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "spec is the specification for the behaviour of the autoscaler. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.", + "metadata": "metadata is the standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the specification for the behaviour of the autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", "status": "status is the current information about the autoscaler.", } @@ -95,7 +95,7 @@ func (HorizontalPodAutoscalerList) SwaggerDoc() map[string]string { var map_HorizontalPodAutoscalerSpec = map[string]string{ "": "HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler.", "scaleTargetRef": "scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics should be collected, as well as to actually change the replica count.", - "minReplicas": "minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod.", + "minReplicas": "minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.", "maxReplicas": "maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. It cannot be less that minReplicas.", "metrics": "metrics contains the specifications for which to use to calculate the desired replica count (the maximum replica count across all metrics will be used). The desired replica count is calculated multiplying the ratio between the target value and the current value by the current number of pods. Ergo, metrics used must decrease as the pod count is increased, and vice-versa. See the individual metric source types for more information about how each type of metric must respond. If not set, the default metric will be set to 80% average CPU utilization.", } diff --git a/vendor/k8s.io/api/batch/v1/generated.pb.go b/vendor/k8s.io/api/batch/v1/generated.pb.go index 8f4cc88403663..fb9d21e17b029 100644 --- a/vendor/k8s.io/api/batch/v1/generated.pb.go +++ b/vendor/k8s.io/api/batch/v1/generated.pb.go @@ -17,37 +17,21 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/batch/v1/generated.proto -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/batch/v1/generated.proto - - It has these top-level messages: - Job - JobCondition - JobList - JobSpec - JobStatus -*/ package v1 import ( fmt "fmt" - proto "github.com/gogo/protobuf/proto" - - math "math" + io "io" + proto "github.com/gogo/protobuf/proto" k8s_io_api_core_v1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - strings "strings" - + math "math" + math_bits "math/bits" reflect "reflect" - - io "io" + strings "strings" ) // Reference imports to suppress errors if they are not otherwise used. @@ -61,25 +45,145 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *Job) Reset() { *m = Job{} } -func (*Job) ProtoMessage() {} -func (*Job) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *Job) Reset() { *m = Job{} } +func (*Job) ProtoMessage() {} +func (*Job) Descriptor() ([]byte, []int) { + return fileDescriptor_3b52da57c93de713, []int{0} +} +func (m *Job) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Job) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Job) XXX_Merge(src proto.Message) { + xxx_messageInfo_Job.Merge(m, src) +} +func (m *Job) XXX_Size() int { + return m.Size() +} +func (m *Job) XXX_DiscardUnknown() { + xxx_messageInfo_Job.DiscardUnknown(m) +} + +var xxx_messageInfo_Job proto.InternalMessageInfo + +func (m *JobCondition) Reset() { *m = JobCondition{} } +func (*JobCondition) ProtoMessage() {} +func (*JobCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_3b52da57c93de713, []int{1} +} +func (m *JobCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JobCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JobCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_JobCondition.Merge(m, src) +} +func (m *JobCondition) XXX_Size() int { + return m.Size() +} +func (m *JobCondition) XXX_DiscardUnknown() { + xxx_messageInfo_JobCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_JobCondition proto.InternalMessageInfo + +func (m *JobList) Reset() { *m = JobList{} } +func (*JobList) ProtoMessage() {} +func (*JobList) Descriptor() ([]byte, []int) { + return fileDescriptor_3b52da57c93de713, []int{2} +} +func (m *JobList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JobList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JobList) XXX_Merge(src proto.Message) { + xxx_messageInfo_JobList.Merge(m, src) +} +func (m *JobList) XXX_Size() int { + return m.Size() +} +func (m *JobList) XXX_DiscardUnknown() { + xxx_messageInfo_JobList.DiscardUnknown(m) +} + +var xxx_messageInfo_JobList proto.InternalMessageInfo -func (m *JobCondition) Reset() { *m = JobCondition{} } -func (*JobCondition) ProtoMessage() {} -func (*JobCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (m *JobSpec) Reset() { *m = JobSpec{} } +func (*JobSpec) ProtoMessage() {} +func (*JobSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_3b52da57c93de713, []int{3} +} +func (m *JobSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JobSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JobSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_JobSpec.Merge(m, src) +} +func (m *JobSpec) XXX_Size() int { + return m.Size() +} +func (m *JobSpec) XXX_DiscardUnknown() { + xxx_messageInfo_JobSpec.DiscardUnknown(m) +} -func (m *JobList) Reset() { *m = JobList{} } -func (*JobList) ProtoMessage() {} -func (*JobList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_JobSpec proto.InternalMessageInfo -func (m *JobSpec) Reset() { *m = JobSpec{} } -func (*JobSpec) ProtoMessage() {} -func (*JobSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (m *JobStatus) Reset() { *m = JobStatus{} } +func (*JobStatus) ProtoMessage() {} +func (*JobStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_3b52da57c93de713, []int{4} +} +func (m *JobStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JobStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JobStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_JobStatus.Merge(m, src) +} +func (m *JobStatus) XXX_Size() int { + return m.Size() +} +func (m *JobStatus) XXX_DiscardUnknown() { + xxx_messageInfo_JobStatus.DiscardUnknown(m) +} -func (m *JobStatus) Reset() { *m = JobStatus{} } -func (*JobStatus) ProtoMessage() {} -func (*JobStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +var xxx_messageInfo_JobStatus proto.InternalMessageInfo func init() { proto.RegisterType((*Job)(nil), "k8s.io.api.batch.v1.Job") @@ -88,10 +192,78 @@ func init() { proto.RegisterType((*JobSpec)(nil), "k8s.io.api.batch.v1.JobSpec") proto.RegisterType((*JobStatus)(nil), "k8s.io.api.batch.v1.JobStatus") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/batch/v1/generated.proto", fileDescriptor_3b52da57c93de713) +} + +var fileDescriptor_3b52da57c93de713 = []byte{ + // 929 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0x5d, 0x6f, 0xe3, 0x44, + 0x14, 0xad, 0x9b, 0xa6, 0x4d, 0xa6, 0x1f, 0x5b, 0x06, 0x55, 0x1b, 0x0a, 0xb2, 0x97, 0x20, 0xa1, + 0x82, 0x84, 0x4d, 0x4b, 0x85, 0x10, 0x02, 0xa4, 0x75, 0x51, 0x25, 0xaa, 0x54, 0x5b, 0x26, 0x59, + 0x21, 0x21, 0x90, 0x18, 0xdb, 0x37, 0x89, 0x89, 0xed, 0xb1, 0x3c, 0x93, 0x48, 0x7d, 0xe3, 0x27, + 0xf0, 0x23, 0x10, 0x7f, 0x82, 0x77, 0xd4, 0xc7, 0x7d, 0xdc, 0x27, 0x8b, 0x9a, 0x1f, 0xc0, 0xfb, + 0x3e, 0xa1, 0x19, 0x3b, 0xb6, 0xd3, 0x26, 0xa2, 0xcb, 0x5b, 0xe6, 0xcc, 0x39, 0xe7, 0x5e, 0xcf, + 0x3d, 0xb9, 0xe8, 0x8b, 0xc9, 0x67, 0xdc, 0xf4, 0x99, 0x35, 0x99, 0x3a, 0x90, 0x44, 0x20, 0x80, + 0x5b, 0x33, 0x88, 0x3c, 0x96, 0x58, 0xc5, 0x05, 0x8d, 0x7d, 0xcb, 0xa1, 0xc2, 0x1d, 0x5b, 0xb3, + 0x63, 0x6b, 0x04, 0x11, 0x24, 0x54, 0x80, 0x67, 0xc6, 0x09, 0x13, 0x0c, 0xbf, 0x99, 0x93, 0x4c, + 0x1a, 0xfb, 0xa6, 0x22, 0x99, 0xb3, 0xe3, 0xc3, 0x8f, 0x46, 0xbe, 0x18, 0x4f, 0x1d, 0xd3, 0x65, + 0xa1, 0x35, 0x62, 0x23, 0x66, 0x29, 0xae, 0x33, 0x1d, 0xaa, 0x93, 0x3a, 0xa8, 0x5f, 0xb9, 0xc7, + 0x61, 0xb7, 0x56, 0xc8, 0x65, 0x09, 0x2c, 0xa9, 0x73, 0x78, 0x5a, 0x71, 0x42, 0xea, 0x8e, 0xfd, + 0x08, 0x92, 0x6b, 0x2b, 0x9e, 0x8c, 0x24, 0xc0, 0xad, 0x10, 0x04, 0x5d, 0xa6, 0xb2, 0x56, 0xa9, + 0x92, 0x69, 0x24, 0xfc, 0x10, 0xee, 0x09, 0x3e, 0xfd, 0x2f, 0x01, 0x77, 0xc7, 0x10, 0xd2, 0xbb, + 0xba, 0xee, 0x3f, 0x1a, 0x6a, 0x5c, 0x30, 0x07, 0xff, 0x84, 0x5a, 0xb2, 0x17, 0x8f, 0x0a, 0xda, + 0xd1, 0x9e, 0x68, 0x47, 0xdb, 0x27, 0x1f, 0x9b, 0xd5, 0x0b, 0x95, 0x96, 0x66, 0x3c, 0x19, 0x49, + 0x80, 0x9b, 0x92, 0x6d, 0xce, 0x8e, 0xcd, 0x67, 0xce, 0xcf, 0xe0, 0x8a, 0x4b, 0x10, 0xd4, 0xc6, + 0x37, 0xa9, 0xb1, 0x96, 0xa5, 0x06, 0xaa, 0x30, 0x52, 0xba, 0xe2, 0xaf, 0xd0, 0x06, 0x8f, 0xc1, + 0xed, 0xac, 0x2b, 0xf7, 0x77, 0xcc, 0x25, 0xef, 0x6f, 0x5e, 0x30, 0xa7, 0x1f, 0x83, 0x6b, 0xef, + 0x14, 0x4e, 0x1b, 0xf2, 0x44, 0x94, 0x0e, 0x9f, 0xa3, 0x4d, 0x2e, 0xa8, 0x98, 0xf2, 0x4e, 0x43, + 0x39, 0xe8, 0x2b, 0x1d, 0x14, 0xcb, 0xde, 0x2b, 0x3c, 0x36, 0xf3, 0x33, 0x29, 0xd4, 0xdd, 0x3f, + 0x1b, 0x68, 0xe7, 0x82, 0x39, 0x67, 0x2c, 0xf2, 0x7c, 0xe1, 0xb3, 0x08, 0x9f, 0xa2, 0x0d, 0x71, + 0x1d, 0x83, 0xfa, 0xec, 0xb6, 0xfd, 0x64, 0x5e, 0x7a, 0x70, 0x1d, 0xc3, 0xab, 0xd4, 0xd8, 0xaf, + 0x73, 0x25, 0x46, 0x14, 0x1b, 0xf7, 0xca, 0x76, 0xd6, 0x95, 0xee, 0x74, 0xb1, 0xdc, 0xab, 0xd4, + 0x58, 0x92, 0x0e, 0xb3, 0x74, 0x5a, 0x6c, 0x0a, 0x8f, 0xd0, 0x6e, 0x40, 0xb9, 0xb8, 0x4a, 0x98, + 0x03, 0x03, 0x3f, 0x84, 0xe2, 0x1b, 0x3f, 0x7c, 0xd8, 0x0c, 0xa4, 0xc2, 0x3e, 0x28, 0x1a, 0xd8, + 0xed, 0xd5, 0x8d, 0xc8, 0xa2, 0x2f, 0x9e, 0x21, 0x2c, 0x81, 0x41, 0x42, 0x23, 0x9e, 0x7f, 0x92, + 0xac, 0xb6, 0xf1, 0xda, 0xd5, 0x0e, 0x8b, 0x6a, 0xb8, 0x77, 0xcf, 0x8d, 0x2c, 0xa9, 0x80, 0xdf, + 0x47, 0x9b, 0x09, 0x50, 0xce, 0xa2, 0x4e, 0x53, 0x3d, 0x57, 0x39, 0x1d, 0xa2, 0x50, 0x52, 0xdc, + 0xe2, 0x0f, 0xd0, 0x56, 0x08, 0x9c, 0xd3, 0x11, 0x74, 0x36, 0x15, 0xf1, 0x51, 0x41, 0xdc, 0xba, + 0xcc, 0x61, 0x32, 0xbf, 0xef, 0xfe, 0xae, 0xa1, 0xad, 0x0b, 0xe6, 0xf4, 0x7c, 0x2e, 0xf0, 0x0f, + 0xf7, 0xe2, 0x6b, 0x3e, 0xec, 0x63, 0xa4, 0x5a, 0x85, 0x77, 0xbf, 0xa8, 0xd3, 0x9a, 0x23, 0xb5, + 0xe8, 0x7e, 0x89, 0x9a, 0xbe, 0x80, 0x50, 0x8e, 0xba, 0x71, 0xb4, 0x7d, 0xd2, 0x59, 0x95, 0x3c, + 0x7b, 0xb7, 0x30, 0x69, 0x7e, 0x23, 0xe9, 0x24, 0x57, 0x75, 0xff, 0xd8, 0x50, 0x8d, 0xca, 0x2c, + 0xe3, 0x63, 0xb4, 0x1d, 0xd3, 0x84, 0x06, 0x01, 0x04, 0x3e, 0x0f, 0x55, 0xaf, 0x4d, 0xfb, 0x51, + 0x96, 0x1a, 0xdb, 0x57, 0x15, 0x4c, 0xea, 0x1c, 0x29, 0x71, 0x59, 0x18, 0x07, 0x20, 0x1f, 0x33, + 0x8f, 0x5b, 0x21, 0x39, 0xab, 0x60, 0x52, 0xe7, 0xe0, 0x67, 0xe8, 0x80, 0xba, 0xc2, 0x9f, 0xc1, + 0xd7, 0x40, 0xbd, 0xc0, 0x8f, 0xa0, 0x0f, 0x2e, 0x8b, 0xbc, 0xfc, 0xaf, 0xd3, 0xb0, 0xdf, 0xca, + 0x52, 0xe3, 0xe0, 0xe9, 0x32, 0x02, 0x59, 0xae, 0xc3, 0xa7, 0x68, 0xc7, 0xa1, 0xee, 0x84, 0x0d, + 0x87, 0x3d, 0x3f, 0xf4, 0x45, 0x67, 0x4b, 0x35, 0xb1, 0x9f, 0xa5, 0xc6, 0x8e, 0x5d, 0xc3, 0xc9, + 0x02, 0x0b, 0xff, 0x88, 0x5a, 0x1c, 0x02, 0x70, 0x05, 0x4b, 0x8a, 0x88, 0x7d, 0xf2, 0xc0, 0xa9, + 0x50, 0x07, 0x82, 0x7e, 0x21, 0xb5, 0x77, 0xe4, 0x58, 0xe6, 0x27, 0x52, 0x5a, 0xe2, 0xcf, 0xd1, + 0x5e, 0x48, 0xa3, 0x29, 0x2d, 0x99, 0x2a, 0x5b, 0x2d, 0x1b, 0x67, 0xa9, 0xb1, 0x77, 0xb9, 0x70, + 0x43, 0xee, 0x30, 0xf1, 0xb7, 0xa8, 0x25, 0x20, 0x8c, 0x03, 0x2a, 0xf2, 0xa0, 0x6d, 0x9f, 0xbc, + 0x57, 0x9f, 0xaa, 0xfc, 0xbf, 0xca, 0x46, 0xae, 0x98, 0x37, 0x28, 0x68, 0x6a, 0x31, 0x95, 0x29, + 0x99, 0xa3, 0xa4, 0xb4, 0xc1, 0xcf, 0xd1, 0x63, 0x21, 0x82, 0xe2, 0xc5, 0x9e, 0x0e, 0x05, 0x24, + 0xe7, 0x7e, 0xe4, 0xf3, 0x31, 0x78, 0x9d, 0x96, 0x7a, 0xae, 0xb7, 0xb3, 0xd4, 0x78, 0x3c, 0x18, + 0xf4, 0x96, 0x51, 0xc8, 0x2a, 0x6d, 0xf7, 0xb7, 0x06, 0x6a, 0x97, 0x5b, 0x0d, 0x3f, 0x47, 0xc8, + 0x9d, 0xef, 0x10, 0xde, 0xd1, 0x54, 0x1e, 0xdf, 0x5d, 0x95, 0xc7, 0x72, 0xdb, 0x54, 0xab, 0xb9, + 0x84, 0x38, 0xa9, 0x19, 0xe1, 0xef, 0x50, 0x9b, 0x0b, 0x9a, 0x08, 0xb5, 0x0d, 0xd6, 0x5f, 0x7b, + 0x1b, 0xec, 0x66, 0xa9, 0xd1, 0xee, 0xcf, 0x0d, 0x48, 0xe5, 0x85, 0x87, 0x68, 0xaf, 0x0a, 0xe6, + 0xff, 0xdc, 0x6c, 0x6a, 0x9e, 0x67, 0x0b, 0x2e, 0xe4, 0x8e, 0xab, 0xdc, 0x2f, 0x79, 0x72, 0x55, + 0xd0, 0x9a, 0xd5, 0x7e, 0xc9, 0x63, 0x4e, 0x8a, 0x5b, 0x6c, 0xa1, 0x36, 0x9f, 0xba, 0x2e, 0x80, + 0x07, 0x9e, 0x8a, 0x4b, 0xd3, 0x7e, 0xa3, 0xa0, 0xb6, 0xfb, 0xf3, 0x0b, 0x52, 0x71, 0xa4, 0xf1, + 0x90, 0xfa, 0x01, 0x78, 0x2a, 0x26, 0x35, 0xe3, 0x73, 0x85, 0x92, 0xe2, 0xd6, 0x3e, 0xba, 0xb9, + 0xd5, 0xd7, 0x5e, 0xdc, 0xea, 0x6b, 0x2f, 0x6f, 0xf5, 0xb5, 0x5f, 0x32, 0x5d, 0xbb, 0xc9, 0x74, + 0xed, 0x45, 0xa6, 0x6b, 0x2f, 0x33, 0x5d, 0xfb, 0x2b, 0xd3, 0xb5, 0x5f, 0xff, 0xd6, 0xd7, 0xbe, + 0x5f, 0x9f, 0x1d, 0xff, 0x1b, 0x00, 0x00, 0xff, 0xff, 0xc8, 0x73, 0xe7, 0x7a, 0xb8, 0x08, 0x00, + 0x00, +} + func (m *Job) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -99,41 +271,52 @@ func (m *Job) Marshal() (dAtA []byte, err error) { } func (m *Job) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Job) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *JobCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -141,49 +324,62 @@ func (m *JobCondition) Marshal() (dAtA []byte, err error) { } func (m *JobCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JobCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastProbeTime.Size())) - n4, err := m.LastProbeTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x32 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x2a + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n5, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.LastProbeTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *JobList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -191,37 +387,46 @@ func (m *JobList) Marshal() (dAtA []byte, err error) { } func (m *JobList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JobList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *JobSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -229,70 +434,79 @@ func (m *JobSpec) Marshal() (dAtA []byte, err error) { } func (m *JobSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JobSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Parallelism != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Parallelism)) - } - if m.Completions != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Completions)) + if m.TTLSecondsAfterFinished != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TTLSecondsAfterFinished)) + i-- + dAtA[i] = 0x40 } - if m.ActiveDeadlineSeconds != nil { - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ActiveDeadlineSeconds)) + if m.BackoffLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.BackoffLimit)) + i-- + dAtA[i] = 0x38 } - if m.Selector != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n7, err := m.Selector.MarshalTo(dAtA[i:]) + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n7 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x32 if m.ManualSelector != nil { - dAtA[i] = 0x28 - i++ + i-- if *m.ManualSelector { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ + i-- + dAtA[i] = 0x28 } - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n8, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 } - i += n8 - if m.BackoffLimit != nil { - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.BackoffLimit)) + if m.ActiveDeadlineSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ActiveDeadlineSeconds)) + i-- + dAtA[i] = 0x18 } - if m.TTLSecondsAfterFinished != nil { - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.TTLSecondsAfterFinished)) + if m.Completions != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Completions)) + i-- + dAtA[i] = 0x10 } - return i, nil + if m.Parallelism != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Parallelism)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } func (m *JobStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -300,64 +514,80 @@ func (m *JobStatus) Marshal() (dAtA []byte, err error) { } func (m *JobStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JobStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Failed)) + i-- + dAtA[i] = 0x30 + i = encodeVarintGenerated(dAtA, i, uint64(m.Succeeded)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.Active)) + i-- + dAtA[i] = 0x20 + if m.CompletionTime != nil { + { + size, err := m.CompletionTime.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1a } if m.StartTime != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.StartTime.Size())) - n9, err := m.StartTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.StartTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n9 + i-- + dAtA[i] = 0x12 } - if m.CompletionTime != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CompletionTime.Size())) - n10, err := m.CompletionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - i += n10 } - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Active)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Succeeded)) - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Failed)) - return i, nil + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *Job) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -370,6 +600,9 @@ func (m *Job) Size() (n int) { } func (m *JobCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -388,6 +621,9 @@ func (m *JobCondition) Size() (n int) { } func (m *JobList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -402,6 +638,9 @@ func (m *JobList) Size() (n int) { } func (m *JobSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Parallelism != nil { @@ -432,6 +671,9 @@ func (m *JobSpec) Size() (n int) { } func (m *JobStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Conditions) > 0 { @@ -455,14 +697,7 @@ func (m *JobStatus) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -472,7 +707,7 @@ func (this *Job) String() string { return "nil" } s := strings.Join([]string{`&Job{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "JobSpec", "JobSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "JobStatus", "JobStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -486,8 +721,8 @@ func (this *JobCondition) String() string { s := strings.Join([]string{`&JobCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastProbeTime:` + strings.Replace(strings.Replace(this.LastProbeTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastProbeTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastProbeTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -498,9 +733,14 @@ func (this *JobList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]Job{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Job", "Job", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&JobList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Job", "Job", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -513,9 +753,9 @@ func (this *JobSpec) String() string { `Parallelism:` + valueToStringGenerated(this.Parallelism) + `,`, `Completions:` + valueToStringGenerated(this.Completions) + `,`, `ActiveDeadlineSeconds:` + valueToStringGenerated(this.ActiveDeadlineSeconds) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, `ManualSelector:` + valueToStringGenerated(this.ManualSelector) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `BackoffLimit:` + valueToStringGenerated(this.BackoffLimit) + `,`, `TTLSecondsAfterFinished:` + valueToStringGenerated(this.TTLSecondsAfterFinished) + `,`, `}`, @@ -526,10 +766,15 @@ func (this *JobStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]JobCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "JobCondition", "JobCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&JobStatus{`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "JobCondition", "JobCondition", 1), `&`, ``, 1) + `,`, - `StartTime:` + strings.Replace(fmt.Sprintf("%v", this.StartTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, - `CompletionTime:` + strings.Replace(fmt.Sprintf("%v", this.CompletionTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `StartTime:` + strings.Replace(fmt.Sprintf("%v", this.StartTime), "Time", "v1.Time", 1) + `,`, + `CompletionTime:` + strings.Replace(fmt.Sprintf("%v", this.CompletionTime), "Time", "v1.Time", 1) + `,`, `Active:` + fmt.Sprintf("%v", this.Active) + `,`, `Succeeded:` + fmt.Sprintf("%v", this.Succeeded) + `,`, `Failed:` + fmt.Sprintf("%v", this.Failed) + `,`, @@ -560,7 +805,7 @@ func (m *Job) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -588,7 +833,7 @@ func (m *Job) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -597,6 +842,9 @@ func (m *Job) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -618,7 +866,7 @@ func (m *Job) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -627,6 +875,9 @@ func (m *Job) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -648,7 +899,7 @@ func (m *Job) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -657,6 +908,9 @@ func (m *Job) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -673,6 +927,9 @@ func (m *Job) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -700,7 +957,7 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -728,7 +985,7 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -738,6 +995,9 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -757,7 +1017,7 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -767,6 +1027,9 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -786,7 +1049,7 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -795,6 +1058,9 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -816,7 +1082,7 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -825,6 +1091,9 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -846,7 +1115,7 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -856,6 +1125,9 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -875,7 +1147,7 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -885,6 +1157,9 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -899,6 +1174,9 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -926,7 +1204,7 @@ func (m *JobList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -954,7 +1232,7 @@ func (m *JobList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -963,6 +1241,9 @@ func (m *JobList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -984,7 +1265,7 @@ func (m *JobList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -993,6 +1274,9 @@ func (m *JobList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1010,6 +1294,9 @@ func (m *JobList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1037,7 +1324,7 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1065,7 +1352,7 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -1085,7 +1372,7 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -1105,7 +1392,7 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -1125,7 +1412,7 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1134,11 +1421,14 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1158,7 +1448,7 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1179,7 +1469,7 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1188,6 +1478,9 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1209,7 +1502,7 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -1229,7 +1522,7 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -1244,6 +1537,9 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1271,7 +1567,7 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1299,7 +1595,7 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1308,6 +1604,9 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1330,7 +1629,7 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1339,11 +1638,14 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.StartTime == nil { - m.StartTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + m.StartTime = &v1.Time{} } if err := m.StartTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1363,7 +1665,7 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1372,11 +1674,14 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.CompletionTime == nil { - m.CompletionTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + m.CompletionTime = &v1.Time{} } if err := m.CompletionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1396,7 +1701,7 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Active |= (int32(b) & 0x7F) << shift + m.Active |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -1415,7 +1720,7 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Succeeded |= (int32(b) & 0x7F) << shift + m.Succeeded |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -1434,7 +1739,7 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Failed |= (int32(b) & 0x7F) << shift + m.Failed |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -1448,6 +1753,9 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1514,10 +1822,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -1546,6 +1857,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -1564,70 +1878,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/batch/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 929 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0x5d, 0x6f, 0xe3, 0x44, - 0x14, 0xad, 0x9b, 0xa6, 0x4d, 0xa6, 0x1f, 0x5b, 0x06, 0x55, 0x1b, 0x0a, 0xb2, 0x97, 0x20, 0xa1, - 0x82, 0x84, 0x4d, 0x4b, 0x85, 0x10, 0x02, 0xa4, 0x75, 0x51, 0x25, 0xaa, 0x54, 0x5b, 0x26, 0x59, - 0x21, 0x21, 0x90, 0x18, 0xdb, 0x37, 0x89, 0x89, 0xed, 0xb1, 0x3c, 0x93, 0x48, 0x7d, 0xe3, 0x27, - 0xf0, 0x23, 0x10, 0x7f, 0x82, 0x77, 0xd4, 0xc7, 0x7d, 0xdc, 0x27, 0x8b, 0x9a, 0x1f, 0xc0, 0xfb, - 0x3e, 0xa1, 0x19, 0x3b, 0xb6, 0xd3, 0x26, 0xa2, 0xcb, 0x5b, 0xe6, 0xcc, 0x39, 0xe7, 0x5e, 0xdf, - 0x39, 0xb9, 0xe8, 0x8b, 0xc9, 0x67, 0xdc, 0xf4, 0x99, 0x35, 0x99, 0x3a, 0x90, 0x44, 0x20, 0x80, - 0x5b, 0x33, 0x88, 0x3c, 0x96, 0x58, 0xc5, 0x05, 0x8d, 0x7d, 0xcb, 0xa1, 0xc2, 0x1d, 0x5b, 0xb3, - 0x63, 0x6b, 0x04, 0x11, 0x24, 0x54, 0x80, 0x67, 0xc6, 0x09, 0x13, 0x0c, 0xbf, 0x99, 0x93, 0x4c, - 0x1a, 0xfb, 0xa6, 0x22, 0x99, 0xb3, 0xe3, 0xc3, 0x8f, 0x46, 0xbe, 0x18, 0x4f, 0x1d, 0xd3, 0x65, - 0xa1, 0x35, 0x62, 0x23, 0x66, 0x29, 0xae, 0x33, 0x1d, 0xaa, 0x93, 0x3a, 0xa8, 0x5f, 0xb9, 0xc7, - 0x61, 0xb7, 0x56, 0xc8, 0x65, 0x09, 0x2c, 0xa9, 0x73, 0x78, 0x5a, 0x71, 0x42, 0xea, 0x8e, 0xfd, - 0x08, 0x92, 0x6b, 0x2b, 0x9e, 0x8c, 0x24, 0xc0, 0xad, 0x10, 0x04, 0x5d, 0xa6, 0xb2, 0x56, 0xa9, - 0x92, 0x69, 0x24, 0xfc, 0x10, 0xee, 0x09, 0x3e, 0xfd, 0x2f, 0x01, 0x77, 0xc7, 0x10, 0xd2, 0xbb, - 0xba, 0xee, 0x3f, 0x1a, 0x6a, 0x5c, 0x30, 0x07, 0xff, 0x84, 0x5a, 0xb2, 0x17, 0x8f, 0x0a, 0xda, - 0xd1, 0x9e, 0x68, 0x47, 0xdb, 0x27, 0x1f, 0x9b, 0xd5, 0x84, 0x4a, 0x4b, 0x33, 0x9e, 0x8c, 0x24, - 0xc0, 0x4d, 0xc9, 0x36, 0x67, 0xc7, 0xe6, 0x33, 0xe7, 0x67, 0x70, 0xc5, 0x25, 0x08, 0x6a, 0xe3, - 0x9b, 0xd4, 0x58, 0xcb, 0x52, 0x03, 0x55, 0x18, 0x29, 0x5d, 0xf1, 0x57, 0x68, 0x83, 0xc7, 0xe0, - 0x76, 0xd6, 0x95, 0xfb, 0x3b, 0xe6, 0x92, 0xf9, 0x9b, 0x17, 0xcc, 0xe9, 0xc7, 0xe0, 0xda, 0x3b, - 0x85, 0xd3, 0x86, 0x3c, 0x11, 0xa5, 0xc3, 0xe7, 0x68, 0x93, 0x0b, 0x2a, 0xa6, 0xbc, 0xd3, 0x50, - 0x0e, 0xfa, 0x4a, 0x07, 0xc5, 0xb2, 0xf7, 0x0a, 0x8f, 0xcd, 0xfc, 0x4c, 0x0a, 0x75, 0xf7, 0xcf, - 0x06, 0xda, 0xb9, 0x60, 0xce, 0x19, 0x8b, 0x3c, 0x5f, 0xf8, 0x2c, 0xc2, 0xa7, 0x68, 0x43, 0x5c, - 0xc7, 0xa0, 0x3e, 0xbb, 0x6d, 0x3f, 0x99, 0x97, 0x1e, 0x5c, 0xc7, 0xf0, 0x2a, 0x35, 0xf6, 0xeb, - 0x5c, 0x89, 0x11, 0xc5, 0xc6, 0xbd, 0xb2, 0x9d, 0x75, 0xa5, 0x3b, 0x5d, 0x2c, 0xf7, 0x2a, 0x35, - 0x96, 0xa4, 0xc3, 0x2c, 0x9d, 0x16, 0x9b, 0xc2, 0x23, 0xb4, 0x1b, 0x50, 0x2e, 0xae, 0x12, 0xe6, - 0xc0, 0xc0, 0x0f, 0xa1, 0xf8, 0xc6, 0x0f, 0x1f, 0xf6, 0x06, 0x52, 0x61, 0x1f, 0x14, 0x0d, 0xec, - 0xf6, 0xea, 0x46, 0x64, 0xd1, 0x17, 0xcf, 0x10, 0x96, 0xc0, 0x20, 0xa1, 0x11, 0xcf, 0x3f, 0x49, - 0x56, 0xdb, 0x78, 0xed, 0x6a, 0x87, 0x45, 0x35, 0xdc, 0xbb, 0xe7, 0x46, 0x96, 0x54, 0xc0, 0xef, - 0xa3, 0xcd, 0x04, 0x28, 0x67, 0x51, 0xa7, 0xa9, 0xc6, 0x55, 0xbe, 0x0e, 0x51, 0x28, 0x29, 0x6e, - 0xf1, 0x07, 0x68, 0x2b, 0x04, 0xce, 0xe9, 0x08, 0x3a, 0x9b, 0x8a, 0xf8, 0xa8, 0x20, 0x6e, 0x5d, - 0xe6, 0x30, 0x99, 0xdf, 0x77, 0x7f, 0xd7, 0xd0, 0xd6, 0x05, 0x73, 0x7a, 0x3e, 0x17, 0xf8, 0x87, - 0x7b, 0xf1, 0x35, 0x1f, 0xf6, 0x31, 0x52, 0xad, 0xc2, 0xbb, 0x5f, 0xd4, 0x69, 0xcd, 0x91, 0x5a, - 0x74, 0xbf, 0x44, 0x4d, 0x5f, 0x40, 0x28, 0x9f, 0xba, 0x71, 0xb4, 0x7d, 0xd2, 0x59, 0x95, 0x3c, - 0x7b, 0xb7, 0x30, 0x69, 0x7e, 0x23, 0xe9, 0x24, 0x57, 0x75, 0xff, 0xd8, 0x50, 0x8d, 0xca, 0x2c, - 0xe3, 0x63, 0xb4, 0x1d, 0xd3, 0x84, 0x06, 0x01, 0x04, 0x3e, 0x0f, 0x55, 0xaf, 0x4d, 0xfb, 0x51, - 0x96, 0x1a, 0xdb, 0x57, 0x15, 0x4c, 0xea, 0x1c, 0x29, 0x71, 0x59, 0x18, 0x07, 0x20, 0x87, 0x99, - 0xc7, 0xad, 0x90, 0x9c, 0x55, 0x30, 0xa9, 0x73, 0xf0, 0x33, 0x74, 0x40, 0x5d, 0xe1, 0xcf, 0xe0, - 0x6b, 0xa0, 0x5e, 0xe0, 0x47, 0xd0, 0x07, 0x97, 0x45, 0x5e, 0xfe, 0xd7, 0x69, 0xd8, 0x6f, 0x65, - 0xa9, 0x71, 0xf0, 0x74, 0x19, 0x81, 0x2c, 0xd7, 0xe1, 0x1f, 0x51, 0x8b, 0x43, 0x00, 0xae, 0x60, - 0x49, 0x11, 0x96, 0x4f, 0x1e, 0x38, 0x5f, 0xea, 0x40, 0xd0, 0x2f, 0xa4, 0xf6, 0x8e, 0x1c, 0xf0, - 0xfc, 0x44, 0x4a, 0x4b, 0xfc, 0x39, 0xda, 0x0b, 0x69, 0x34, 0xa5, 0x25, 0x53, 0xa5, 0xa4, 0x65, - 0xe3, 0x2c, 0x35, 0xf6, 0x2e, 0x17, 0x6e, 0xc8, 0x1d, 0x26, 0xfe, 0x16, 0xb5, 0x04, 0x84, 0x71, - 0x40, 0x45, 0x1e, 0x99, 0xed, 0x93, 0xf7, 0xea, 0xef, 0x23, 0xff, 0x79, 0xb2, 0x91, 0x2b, 0xe6, - 0x0d, 0x0a, 0x9a, 0x5a, 0x31, 0xe5, 0x7b, 0xcf, 0x51, 0x52, 0xda, 0xe0, 0x53, 0xb4, 0xe3, 0x50, - 0x77, 0xc2, 0x86, 0xc3, 0x9e, 0x1f, 0xfa, 0xa2, 0xb3, 0xa5, 0x46, 0xbe, 0x9f, 0xa5, 0xc6, 0x8e, - 0x5d, 0xc3, 0xc9, 0x02, 0x0b, 0x3f, 0x47, 0x8f, 0x85, 0x08, 0x8a, 0x89, 0x3d, 0x1d, 0x0a, 0x48, - 0xce, 0xfd, 0xc8, 0xe7, 0x63, 0xf0, 0x3a, 0x2d, 0x65, 0xf0, 0x76, 0x96, 0x1a, 0x8f, 0x07, 0x83, - 0xde, 0x32, 0x0a, 0x59, 0xa5, 0xed, 0xfe, 0xd6, 0x40, 0xed, 0x72, 0xab, 0xe1, 0xe7, 0x08, 0xb9, - 0xf3, 0x1d, 0xc2, 0x3b, 0x9a, 0xca, 0xe3, 0xbb, 0xab, 0xf2, 0x58, 0x6e, 0x9b, 0x6a, 0x35, 0x97, - 0x10, 0x27, 0x35, 0x23, 0xfc, 0x1d, 0x6a, 0x73, 0x41, 0x13, 0xa1, 0xb6, 0xc1, 0xfa, 0x6b, 0x6f, - 0x83, 0xdd, 0x2c, 0x35, 0xda, 0xfd, 0xb9, 0x01, 0xa9, 0xbc, 0xf0, 0x10, 0xed, 0x55, 0xc1, 0xfc, - 0x9f, 0x9b, 0x4d, 0xa5, 0xe0, 0x6c, 0xc1, 0x85, 0xdc, 0x71, 0x95, 0xfb, 0x25, 0x4f, 0xae, 0x8a, - 0x67, 0xb3, 0xda, 0x2f, 0x79, 0xcc, 0x49, 0x71, 0x8b, 0x2d, 0xd4, 0xe6, 0x53, 0xd7, 0x05, 0xf0, - 0xc0, 0x53, 0x21, 0x6b, 0xda, 0x6f, 0x14, 0xd4, 0x76, 0x7f, 0x7e, 0x41, 0x2a, 0x8e, 0x34, 0x1e, - 0x52, 0x3f, 0x00, 0x4f, 0x85, 0xab, 0x66, 0x7c, 0xae, 0x50, 0x52, 0xdc, 0xda, 0x47, 0x37, 0xb7, - 0xfa, 0xda, 0x8b, 0x5b, 0x7d, 0xed, 0xe5, 0xad, 0xbe, 0xf6, 0x4b, 0xa6, 0x6b, 0x37, 0x99, 0xae, - 0xbd, 0xc8, 0x74, 0xed, 0x65, 0xa6, 0x6b, 0x7f, 0x65, 0xba, 0xf6, 0xeb, 0xdf, 0xfa, 0xda, 0xf7, - 0xeb, 0xb3, 0xe3, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x13, 0xdb, 0x98, 0xf9, 0xb8, 0x08, 0x00, - 0x00, -} diff --git a/vendor/k8s.io/api/batch/v1/generated.proto b/vendor/k8s.io/api/batch/v1/generated.proto index 039149daba2b6..75de45f1710d6 100644 --- a/vendor/k8s.io/api/batch/v1/generated.proto +++ b/vendor/k8s.io/api/batch/v1/generated.proto @@ -32,17 +32,17 @@ option go_package = "v1"; // Job represents the configuration of a single job. message Job { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of a job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional JobSpec spec = 2; // Current status of a job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional JobStatus status = 3; } @@ -75,7 +75,7 @@ message JobCondition { // JobList is a collection of jobs. message JobList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; diff --git a/vendor/k8s.io/api/batch/v1/types.go b/vendor/k8s.io/api/batch/v1/types.go index 8dad9043d5cb4..646a3cd7efa1c 100644 --- a/vendor/k8s.io/api/batch/v1/types.go +++ b/vendor/k8s.io/api/batch/v1/types.go @@ -28,17 +28,17 @@ import ( type Job struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of a job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Current status of a job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status JobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -49,7 +49,7 @@ type Job struct { type JobList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go index d8e2bdd780fc9..0120e07d45e28 100644 --- a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go @@ -29,9 +29,9 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Job = map[string]string{ "": "Job represents the configuration of a single job.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Specification of the desired behavior of a job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Current status of a job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of a job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Current status of a job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Job) SwaggerDoc() map[string]string { @@ -54,7 +54,7 @@ func (JobCondition) SwaggerDoc() map[string]string { var map_JobList = map[string]string{ "": "JobList is a collection of jobs.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "items is the list of Jobs.", } diff --git a/vendor/k8s.io/api/batch/v1beta1/generated.pb.go b/vendor/k8s.io/api/batch/v1beta1/generated.pb.go index 524b7b2261624..837a2f9c1c2c9 100644 --- a/vendor/k8s.io/api/batch/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/batch/v1beta1/generated.pb.go @@ -17,38 +17,21 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/batch/v1beta1/generated.proto -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/batch/v1beta1/generated.proto - - It has these top-level messages: - CronJob - CronJobList - CronJobSpec - CronJobStatus - JobTemplate - JobTemplateSpec -*/ package v1beta1 import ( fmt "fmt" + io "io" + proto "github.com/gogo/protobuf/proto" + v11 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" math "math" - - k8s_io_api_core_v1 "k8s.io/api/core/v1" - - k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - strings "strings" - + math_bits "math/bits" reflect "reflect" - - io "io" + strings "strings" ) // Reference imports to suppress errors if they are not otherwise used. @@ -62,29 +45,173 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *CronJob) Reset() { *m = CronJob{} } -func (*CronJob) ProtoMessage() {} -func (*CronJob) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *CronJob) Reset() { *m = CronJob{} } +func (*CronJob) ProtoMessage() {} +func (*CronJob) Descriptor() ([]byte, []int) { + return fileDescriptor_e57b277b05179ae7, []int{0} +} +func (m *CronJob) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CronJob) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CronJob) XXX_Merge(src proto.Message) { + xxx_messageInfo_CronJob.Merge(m, src) +} +func (m *CronJob) XXX_Size() int { + return m.Size() +} +func (m *CronJob) XXX_DiscardUnknown() { + xxx_messageInfo_CronJob.DiscardUnknown(m) +} -func (m *CronJobList) Reset() { *m = CronJobList{} } -func (*CronJobList) ProtoMessage() {} -func (*CronJobList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_CronJob proto.InternalMessageInfo -func (m *CronJobSpec) Reset() { *m = CronJobSpec{} } -func (*CronJobSpec) ProtoMessage() {} -func (*CronJobSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *CronJobList) Reset() { *m = CronJobList{} } +func (*CronJobList) ProtoMessage() {} +func (*CronJobList) Descriptor() ([]byte, []int) { + return fileDescriptor_e57b277b05179ae7, []int{1} +} +func (m *CronJobList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CronJobList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CronJobList) XXX_Merge(src proto.Message) { + xxx_messageInfo_CronJobList.Merge(m, src) +} +func (m *CronJobList) XXX_Size() int { + return m.Size() +} +func (m *CronJobList) XXX_DiscardUnknown() { + xxx_messageInfo_CronJobList.DiscardUnknown(m) +} -func (m *CronJobStatus) Reset() { *m = CronJobStatus{} } -func (*CronJobStatus) ProtoMessage() {} -func (*CronJobStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_CronJobList proto.InternalMessageInfo -func (m *JobTemplate) Reset() { *m = JobTemplate{} } -func (*JobTemplate) ProtoMessage() {} -func (*JobTemplate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *CronJobSpec) Reset() { *m = CronJobSpec{} } +func (*CronJobSpec) ProtoMessage() {} +func (*CronJobSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_e57b277b05179ae7, []int{2} +} +func (m *CronJobSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CronJobSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CronJobSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_CronJobSpec.Merge(m, src) +} +func (m *CronJobSpec) XXX_Size() int { + return m.Size() +} +func (m *CronJobSpec) XXX_DiscardUnknown() { + xxx_messageInfo_CronJobSpec.DiscardUnknown(m) +} -func (m *JobTemplateSpec) Reset() { *m = JobTemplateSpec{} } -func (*JobTemplateSpec) ProtoMessage() {} -func (*JobTemplateSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_CronJobSpec proto.InternalMessageInfo + +func (m *CronJobStatus) Reset() { *m = CronJobStatus{} } +func (*CronJobStatus) ProtoMessage() {} +func (*CronJobStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_e57b277b05179ae7, []int{3} +} +func (m *CronJobStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CronJobStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CronJobStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_CronJobStatus.Merge(m, src) +} +func (m *CronJobStatus) XXX_Size() int { + return m.Size() +} +func (m *CronJobStatus) XXX_DiscardUnknown() { + xxx_messageInfo_CronJobStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_CronJobStatus proto.InternalMessageInfo + +func (m *JobTemplate) Reset() { *m = JobTemplate{} } +func (*JobTemplate) ProtoMessage() {} +func (*JobTemplate) Descriptor() ([]byte, []int) { + return fileDescriptor_e57b277b05179ae7, []int{4} +} +func (m *JobTemplate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JobTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JobTemplate) XXX_Merge(src proto.Message) { + xxx_messageInfo_JobTemplate.Merge(m, src) +} +func (m *JobTemplate) XXX_Size() int { + return m.Size() +} +func (m *JobTemplate) XXX_DiscardUnknown() { + xxx_messageInfo_JobTemplate.DiscardUnknown(m) +} + +var xxx_messageInfo_JobTemplate proto.InternalMessageInfo + +func (m *JobTemplateSpec) Reset() { *m = JobTemplateSpec{} } +func (*JobTemplateSpec) ProtoMessage() {} +func (*JobTemplateSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_e57b277b05179ae7, []int{5} +} +func (m *JobTemplateSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JobTemplateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JobTemplateSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_JobTemplateSpec.Merge(m, src) +} +func (m *JobTemplateSpec) XXX_Size() int { + return m.Size() +} +func (m *JobTemplateSpec) XXX_DiscardUnknown() { + xxx_messageInfo_JobTemplateSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_JobTemplateSpec proto.InternalMessageInfo func init() { proto.RegisterType((*CronJob)(nil), "k8s.io.api.batch.v1beta1.CronJob") @@ -94,10 +221,68 @@ func init() { proto.RegisterType((*JobTemplate)(nil), "k8s.io.api.batch.v1beta1.JobTemplate") proto.RegisterType((*JobTemplateSpec)(nil), "k8s.io.api.batch.v1beta1.JobTemplateSpec") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/batch/v1beta1/generated.proto", fileDescriptor_e57b277b05179ae7) +} + +var fileDescriptor_e57b277b05179ae7 = []byte{ + // 771 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x94, 0xcf, 0x6f, 0xe3, 0x44, + 0x14, 0xc7, 0xe3, 0x34, 0xbf, 0x76, 0xc2, 0x42, 0xd7, 0xa0, 0x5d, 0x2b, 0x20, 0x27, 0x64, 0xb5, + 0x22, 0x20, 0x76, 0x4c, 0x2b, 0x84, 0x38, 0x21, 0xad, 0x17, 0x2d, 0x50, 0x8a, 0x16, 0x39, 0x45, + 0x48, 0xa8, 0x42, 0x1d, 0x8f, 0x5f, 0x92, 0x69, 0x6c, 0x8f, 0xe5, 0x19, 0x47, 0xca, 0x8d, 0x0b, + 0x77, 0xfe, 0x11, 0x4e, 0xfc, 0x13, 0x11, 0xa7, 0x1e, 0x7b, 0x8a, 0xa8, 0xf9, 0x2f, 0x38, 0x21, + 0x4f, 0x9c, 0x1f, 0xcd, 0x8f, 0xb6, 0x7b, 0xe9, 0xcd, 0xf3, 0xe6, 0xfb, 0xfd, 0xcc, 0xf3, 0x7b, + 0x6f, 0x06, 0xbd, 0x18, 0x7e, 0x29, 0x30, 0xe3, 0xd6, 0x30, 0x71, 0x21, 0x0e, 0x41, 0x82, 0xb0, + 0x46, 0x10, 0x7a, 0x3c, 0xb6, 0xf2, 0x0d, 0x12, 0x31, 0xcb, 0x25, 0x92, 0x0e, 0xac, 0xd1, 0x81, + 0x0b, 0x92, 0x1c, 0x58, 0x7d, 0x08, 0x21, 0x26, 0x12, 0x3c, 0x1c, 0xc5, 0x5c, 0x72, 0xdd, 0x98, + 0x29, 0x31, 0x89, 0x18, 0x56, 0x4a, 0x9c, 0x2b, 0x1b, 0xcf, 0xfb, 0x4c, 0x0e, 0x12, 0x17, 0x53, + 0x1e, 0x58, 0x7d, 0xde, 0xe7, 0x96, 0x32, 0xb8, 0x49, 0x4f, 0xad, 0xd4, 0x42, 0x7d, 0xcd, 0x40, + 0x8d, 0xa7, 0x5b, 0x8e, 0x5c, 0x3f, 0xad, 0xd1, 0x5e, 0x11, 0x51, 0x1e, 0xc3, 0x36, 0xcd, 0xe7, + 0x4b, 0x4d, 0x40, 0xe8, 0x80, 0x85, 0x10, 0x8f, 0xad, 0x68, 0xd8, 0xcf, 0x02, 0xc2, 0x0a, 0x40, + 0x92, 0x6d, 0x2e, 0x6b, 0x97, 0x2b, 0x4e, 0x42, 0xc9, 0x02, 0xd8, 0x30, 0x7c, 0x71, 0x9b, 0x41, + 0xd0, 0x01, 0x04, 0x64, 0xdd, 0xd7, 0xfe, 0xbd, 0x88, 0xaa, 0x2f, 0x63, 0x1e, 0x1e, 0x71, 0x57, + 0x3f, 0x43, 0xb5, 0x2c, 0x1f, 0x8f, 0x48, 0x62, 0x68, 0x2d, 0xad, 0x53, 0x3f, 0xfc, 0x0c, 0x2f, + 0xeb, 0xb9, 0xc0, 0xe2, 0x68, 0xd8, 0xcf, 0x02, 0x02, 0x67, 0x6a, 0x3c, 0x3a, 0xc0, 0xaf, 0xdd, + 0x73, 0xa0, 0xf2, 0x07, 0x90, 0xc4, 0xd6, 0x27, 0xd3, 0x66, 0x21, 0x9d, 0x36, 0xd1, 0x32, 0xe6, + 0x2c, 0xa8, 0xfa, 0x37, 0xa8, 0x24, 0x22, 0xa0, 0x46, 0x51, 0xd1, 0x9f, 0xe1, 0x5d, 0xdd, 0xc2, + 0x79, 0x4a, 0xdd, 0x08, 0xa8, 0xfd, 0x56, 0x8e, 0x2c, 0x65, 0x2b, 0x47, 0x01, 0xf4, 0xd7, 0xa8, + 0x22, 0x24, 0x91, 0x89, 0x30, 0xf6, 0x14, 0xea, 0xa3, 0xdb, 0x51, 0x4a, 0x6e, 0xbf, 0x9d, 0xc3, + 0x2a, 0xb3, 0xb5, 0x93, 0x63, 0xda, 0x7f, 0x69, 0xa8, 0x9e, 0x2b, 0x8f, 0x99, 0x90, 0xfa, 0xe9, + 0x46, 0x2d, 0xf0, 0xdd, 0x6a, 0x91, 0xb9, 0x55, 0x25, 0xf6, 0xf3, 0x93, 0x6a, 0xf3, 0xc8, 0x4a, + 0x1d, 0x5e, 0xa1, 0x32, 0x93, 0x10, 0x08, 0xa3, 0xd8, 0xda, 0xeb, 0xd4, 0x0f, 0x3f, 0xbc, 0x35, + 0x7b, 0xfb, 0x61, 0x4e, 0x2b, 0x7f, 0x97, 0xf9, 0x9c, 0x99, 0xbd, 0xfd, 0x67, 0x69, 0x91, 0x75, + 0x56, 0x1c, 0xfd, 0x53, 0x54, 0xcb, 0xfa, 0xec, 0x25, 0x3e, 0xa8, 0xac, 0x1f, 0x2c, 0xb3, 0xe8, + 0xe6, 0x71, 0x67, 0xa1, 0xd0, 0x7f, 0x42, 0x4f, 0x84, 0x24, 0xb1, 0x64, 0x61, 0xff, 0x6b, 0x20, + 0x9e, 0xcf, 0x42, 0xe8, 0x02, 0xe5, 0xa1, 0x27, 0x54, 0x83, 0xf6, 0xec, 0xf7, 0xd3, 0x69, 0xf3, + 0x49, 0x77, 0xbb, 0xc4, 0xd9, 0xe5, 0xd5, 0x4f, 0xd1, 0x23, 0xca, 0x43, 0x9a, 0xc4, 0x31, 0x84, + 0x74, 0xfc, 0x23, 0xf7, 0x19, 0x1d, 0xab, 0x36, 0x3d, 0xb0, 0x71, 0x9e, 0xcd, 0xa3, 0x97, 0xeb, + 0x82, 0xff, 0xb6, 0x05, 0x9d, 0x4d, 0x90, 0xfe, 0x0c, 0x55, 0x45, 0x22, 0x22, 0x08, 0x3d, 0xa3, + 0xd4, 0xd2, 0x3a, 0x35, 0xbb, 0x9e, 0x4e, 0x9b, 0xd5, 0xee, 0x2c, 0xe4, 0xcc, 0xf7, 0xf4, 0x33, + 0x54, 0x3f, 0xe7, 0xee, 0x09, 0x04, 0x91, 0x4f, 0x24, 0x18, 0x65, 0xd5, 0xc2, 0x8f, 0x77, 0xd7, + 0xf9, 0x68, 0x29, 0x56, 0x43, 0xf7, 0x6e, 0x9e, 0x69, 0x7d, 0x65, 0xc3, 0x59, 0x45, 0xea, 0xbf, + 0xa2, 0x86, 0x48, 0x28, 0x05, 0x21, 0x7a, 0x89, 0x7f, 0xc4, 0x5d, 0xf1, 0x2d, 0x13, 0x92, 0xc7, + 0xe3, 0x63, 0x16, 0x30, 0x69, 0x54, 0x5a, 0x5a, 0xa7, 0x6c, 0x9b, 0xe9, 0xb4, 0xd9, 0xe8, 0xee, + 0x54, 0x39, 0x37, 0x10, 0x74, 0x07, 0x3d, 0xee, 0x11, 0xe6, 0x83, 0xb7, 0xc1, 0xae, 0x2a, 0x76, + 0x23, 0x9d, 0x36, 0x1f, 0xbf, 0xda, 0xaa, 0x70, 0x76, 0x38, 0xdb, 0x7f, 0x6b, 0xe8, 0xe1, 0xb5, + 0xfb, 0xa0, 0x7f, 0x8f, 0x2a, 0x84, 0x4a, 0x36, 0xca, 0xe6, 0x25, 0x1b, 0xc5, 0xa7, 0xab, 0x25, + 0xca, 0xde, 0xb4, 0xe5, 0xfd, 0x76, 0xa0, 0x07, 0x59, 0x27, 0x60, 0x79, 0x89, 0x5e, 0x28, 0xab, + 0x93, 0x23, 0x74, 0x1f, 0xed, 0xfb, 0x44, 0xc8, 0xf9, 0xa8, 0x9d, 0xb0, 0x00, 0x54, 0x93, 0xea, + 0x87, 0x9f, 0xdc, 0xed, 0xf2, 0x64, 0x0e, 0xfb, 0xbd, 0x74, 0xda, 0xdc, 0x3f, 0x5e, 0xe3, 0x38, + 0x1b, 0xe4, 0xf6, 0x44, 0x43, 0xab, 0xdd, 0xb9, 0x87, 0xe7, 0xeb, 0x67, 0x54, 0x93, 0xf3, 0x89, + 0x2a, 0xbe, 0xe9, 0x44, 0x2d, 0x6e, 0xe2, 0x62, 0x9c, 0x16, 0xb0, 0xec, 0xf5, 0x79, 0x67, 0x4d, + 0x7f, 0x0f, 0xbf, 0xf3, 0xd5, 0xb5, 0xd7, 0xf8, 0x83, 0x6d, 0xbf, 0x82, 0x6f, 0x78, 0x84, 0xed, + 0xe7, 0x93, 0x2b, 0xb3, 0x70, 0x71, 0x65, 0x16, 0x2e, 0xaf, 0xcc, 0xc2, 0x6f, 0xa9, 0xa9, 0x4d, + 0x52, 0x53, 0xbb, 0x48, 0x4d, 0xed, 0x32, 0x35, 0xb5, 0x7f, 0x52, 0x53, 0xfb, 0xe3, 0x5f, 0xb3, + 0xf0, 0x4b, 0x35, 0x2f, 0xc8, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0xf6, 0x9f, 0xb3, 0xdd, 0xdf, + 0x07, 0x00, 0x00, +} + func (m *CronJob) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -105,41 +290,52 @@ func (m *CronJob) Marshal() (dAtA []byte, err error) { } func (m *CronJob) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CronJob) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CronJobList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -147,37 +343,46 @@ func (m *CronJobList) Marshal() (dAtA []byte, err error) { } func (m *CronJobList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CronJobList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n4, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CronJobSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -185,58 +390,67 @@ func (m *CronJobSpec) Marshal() (dAtA []byte, err error) { } func (m *CronJobSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CronJobSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Schedule))) - i += copy(dAtA[i:], m.Schedule) - if m.StartingDeadlineSeconds != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.StartingDeadlineSeconds)) + if m.FailedJobsHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.FailedJobsHistoryLimit)) + i-- + dAtA[i] = 0x38 } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConcurrencyPolicy))) - i += copy(dAtA[i:], m.ConcurrencyPolicy) + if m.SuccessfulJobsHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.SuccessfulJobsHistoryLimit)) + i-- + dAtA[i] = 0x30 + } + { + size, err := m.JobTemplate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a if m.Suspend != nil { - dAtA[i] = 0x20 - i++ + i-- if *m.Suspend { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - } - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.JobTemplate.Size())) - n5, err := m.JobTemplate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - if m.SuccessfulJobsHistoryLimit != nil { - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.SuccessfulJobsHistoryLimit)) + i-- + dAtA[i] = 0x20 } - if m.FailedJobsHistoryLimit != nil { - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.FailedJobsHistoryLimit)) + i -= len(m.ConcurrencyPolicy) + copy(dAtA[i:], m.ConcurrencyPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConcurrencyPolicy))) + i-- + dAtA[i] = 0x1a + if m.StartingDeadlineSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.StartingDeadlineSeconds)) + i-- + dAtA[i] = 0x10 } - return i, nil + i -= len(m.Schedule) + copy(dAtA[i:], m.Schedule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Schedule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CronJobStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -244,39 +458,48 @@ func (m *CronJobStatus) Marshal() (dAtA []byte, err error) { } func (m *CronJobStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CronJobStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Active) > 0 { - for _, msg := range m.Active { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.LastScheduleTime != nil { + { + size, err := m.LastScheduleTime.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - } - if m.LastScheduleTime != nil { + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastScheduleTime.Size())) - n6, err := m.LastScheduleTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if len(m.Active) > 0 { + for iNdEx := len(m.Active) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Active[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - i += n6 } - return i, nil + return len(dAtA) - i, nil } func (m *JobTemplate) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -284,33 +507,42 @@ func (m *JobTemplate) Marshal() (dAtA []byte, err error) { } func (m *JobTemplate) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JobTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n7 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n8, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *JobTemplateSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -318,39 +550,53 @@ func (m *JobTemplateSpec) Marshal() (dAtA []byte, err error) { } func (m *JobTemplateSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JobTemplateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n9, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n9 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n10, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n10 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *CronJob) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -363,6 +609,9 @@ func (m *CronJob) Size() (n int) { } func (m *CronJobList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -377,6 +626,9 @@ func (m *CronJobList) Size() (n int) { } func (m *CronJobSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Schedule) @@ -401,6 +653,9 @@ func (m *CronJobSpec) Size() (n int) { } func (m *CronJobStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Active) > 0 { @@ -417,6 +672,9 @@ func (m *CronJobStatus) Size() (n int) { } func (m *JobTemplate) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -427,6 +685,9 @@ func (m *JobTemplate) Size() (n int) { } func (m *JobTemplateSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -437,14 +698,7 @@ func (m *JobTemplateSpec) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -454,7 +708,7 @@ func (this *CronJob) String() string { return "nil" } s := strings.Join([]string{`&CronJob{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CronJobSpec", "CronJobSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "CronJobStatus", "CronJobStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -465,9 +719,14 @@ func (this *CronJobList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]CronJob{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CronJob", "CronJob", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&CronJobList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CronJob", "CronJob", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -492,9 +751,14 @@ func (this *CronJobStatus) String() string { if this == nil { return "nil" } + repeatedStringForActive := "[]ObjectReference{" + for _, f := range this.Active { + repeatedStringForActive += fmt.Sprintf("%v", f) + "," + } + repeatedStringForActive += "}" s := strings.Join([]string{`&CronJobStatus{`, - `Active:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Active), "ObjectReference", "k8s_io_api_core_v1.ObjectReference", 1), `&`, ``, 1) + `,`, - `LastScheduleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScheduleTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, + `Active:` + repeatedStringForActive + `,`, + `LastScheduleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScheduleTime), "Time", "v1.Time", 1) + `,`, `}`, }, "") return s @@ -504,7 +768,7 @@ func (this *JobTemplate) String() string { return "nil" } s := strings.Join([]string{`&JobTemplate{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Template:` + strings.Replace(strings.Replace(this.Template.String(), "JobTemplateSpec", "JobTemplateSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -515,8 +779,8 @@ func (this *JobTemplateSpec) String() string { return "nil" } s := strings.Join([]string{`&JobTemplateSpec{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "JobSpec", "k8s_io_api_batch_v1.JobSpec", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Spec), "JobSpec", "v12.JobSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -544,7 +808,7 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -572,7 +836,7 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -581,6 +845,9 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -602,7 +869,7 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -611,6 +878,9 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -632,7 +902,7 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -641,6 +911,9 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -657,6 +930,9 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -684,7 +960,7 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -712,7 +988,7 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -721,6 +997,9 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -742,7 +1021,7 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -751,6 +1030,9 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -768,6 +1050,9 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -795,7 +1080,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -823,7 +1108,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -833,6 +1118,9 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -852,7 +1140,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -872,7 +1160,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -882,6 +1170,9 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -901,7 +1192,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -922,7 +1213,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -931,6 +1222,9 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -952,7 +1246,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -972,7 +1266,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -987,6 +1281,9 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1014,7 +1311,7 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1042,7 +1339,7 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1051,10 +1348,13 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Active = append(m.Active, k8s_io_api_core_v1.ObjectReference{}) + m.Active = append(m.Active, v11.ObjectReference{}) if err := m.Active[len(m.Active)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -1073,7 +1373,7 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1082,11 +1382,14 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.LastScheduleTime == nil { - m.LastScheduleTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + m.LastScheduleTime = &v1.Time{} } if err := m.LastScheduleTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1101,6 +1404,9 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1128,7 +1434,7 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1156,7 +1462,7 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1165,6 +1471,9 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1186,7 +1495,7 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1195,6 +1504,9 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1211,6 +1523,9 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1238,7 +1553,7 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1266,7 +1581,7 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1275,6 +1590,9 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1296,7 +1614,7 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1305,6 +1623,9 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1321,6 +1642,9 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1387,10 +1711,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -1419,6 +1746,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -1437,60 +1767,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/batch/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 771 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x94, 0xcf, 0x6f, 0xe3, 0x44, - 0x14, 0xc7, 0xe3, 0x34, 0xbf, 0x76, 0xc2, 0x42, 0xd7, 0xa0, 0x5d, 0x2b, 0x20, 0x27, 0x64, 0xb5, - 0x22, 0x20, 0x76, 0x4c, 0x2b, 0x84, 0x38, 0x21, 0xad, 0x17, 0x2d, 0x50, 0x8a, 0x16, 0x39, 0x45, - 0x48, 0xa8, 0x42, 0x1d, 0x8f, 0x5f, 0x92, 0x69, 0x6c, 0x8f, 0xe5, 0x19, 0x47, 0xca, 0x8d, 0x0b, - 0x77, 0xfe, 0x11, 0x4e, 0xfc, 0x13, 0x11, 0xa7, 0x1e, 0x7b, 0x8a, 0xa8, 0xf9, 0x2f, 0x38, 0x21, - 0x4f, 0x9c, 0x1f, 0xcd, 0x8f, 0xb6, 0x7b, 0xe9, 0xcd, 0xf3, 0xe6, 0xfb, 0xfd, 0xcc, 0xf3, 0x7b, - 0x6f, 0x06, 0xbd, 0x18, 0x7e, 0x29, 0x30, 0xe3, 0xd6, 0x30, 0x71, 0x21, 0x0e, 0x41, 0x82, 0xb0, - 0x46, 0x10, 0x7a, 0x3c, 0xb6, 0xf2, 0x0d, 0x12, 0x31, 0xcb, 0x25, 0x92, 0x0e, 0xac, 0xd1, 0x81, - 0x0b, 0x92, 0x1c, 0x58, 0x7d, 0x08, 0x21, 0x26, 0x12, 0x3c, 0x1c, 0xc5, 0x5c, 0x72, 0xdd, 0x98, - 0x29, 0x31, 0x89, 0x18, 0x56, 0x4a, 0x9c, 0x2b, 0x1b, 0xcf, 0xfb, 0x4c, 0x0e, 0x12, 0x17, 0x53, - 0x1e, 0x58, 0x7d, 0xde, 0xe7, 0x96, 0x32, 0xb8, 0x49, 0x4f, 0xad, 0xd4, 0x42, 0x7d, 0xcd, 0x40, - 0x8d, 0xa7, 0x5b, 0x8e, 0x5c, 0x3f, 0xad, 0xd1, 0x5e, 0x11, 0x51, 0x1e, 0xc3, 0x36, 0xcd, 0xe7, - 0x4b, 0x4d, 0x40, 0xe8, 0x80, 0x85, 0x10, 0x8f, 0xad, 0x68, 0xd8, 0xcf, 0x02, 0xc2, 0x0a, 0x40, - 0x92, 0x6d, 0x2e, 0x6b, 0x97, 0x2b, 0x4e, 0x42, 0xc9, 0x02, 0xd8, 0x30, 0x7c, 0x71, 0x9b, 0x41, - 0xd0, 0x01, 0x04, 0x64, 0xdd, 0xd7, 0xfe, 0xbd, 0x88, 0xaa, 0x2f, 0x63, 0x1e, 0x1e, 0x71, 0x57, - 0x3f, 0x43, 0xb5, 0x2c, 0x1f, 0x8f, 0x48, 0x62, 0x68, 0x2d, 0xad, 0x53, 0x3f, 0xfc, 0x0c, 0x2f, - 0xeb, 0xb9, 0xc0, 0xe2, 0x68, 0xd8, 0xcf, 0x02, 0x02, 0x67, 0x6a, 0x3c, 0x3a, 0xc0, 0xaf, 0xdd, - 0x73, 0xa0, 0xf2, 0x07, 0x90, 0xc4, 0xd6, 0x27, 0xd3, 0x66, 0x21, 0x9d, 0x36, 0xd1, 0x32, 0xe6, - 0x2c, 0xa8, 0xfa, 0x37, 0xa8, 0x24, 0x22, 0xa0, 0x46, 0x51, 0xd1, 0x9f, 0xe1, 0x5d, 0xdd, 0xc2, - 0x79, 0x4a, 0xdd, 0x08, 0xa8, 0xfd, 0x56, 0x8e, 0x2c, 0x65, 0x2b, 0x47, 0x01, 0xf4, 0xd7, 0xa8, - 0x22, 0x24, 0x91, 0x89, 0x30, 0xf6, 0x14, 0xea, 0xa3, 0xdb, 0x51, 0x4a, 0x6e, 0xbf, 0x9d, 0xc3, - 0x2a, 0xb3, 0xb5, 0x93, 0x63, 0xda, 0x7f, 0x69, 0xa8, 0x9e, 0x2b, 0x8f, 0x99, 0x90, 0xfa, 0xe9, - 0x46, 0x2d, 0xf0, 0xdd, 0x6a, 0x91, 0xb9, 0x55, 0x25, 0xf6, 0xf3, 0x93, 0x6a, 0xf3, 0xc8, 0x4a, - 0x1d, 0x5e, 0xa1, 0x32, 0x93, 0x10, 0x08, 0xa3, 0xd8, 0xda, 0xeb, 0xd4, 0x0f, 0x3f, 0xbc, 0x35, - 0x7b, 0xfb, 0x61, 0x4e, 0x2b, 0x7f, 0x97, 0xf9, 0x9c, 0x99, 0xbd, 0xfd, 0x67, 0x69, 0x91, 0x75, - 0x56, 0x1c, 0xfd, 0x53, 0x54, 0xcb, 0xfa, 0xec, 0x25, 0x3e, 0xa8, 0xac, 0x1f, 0x2c, 0xb3, 0xe8, - 0xe6, 0x71, 0x67, 0xa1, 0xd0, 0x7f, 0x42, 0x4f, 0x84, 0x24, 0xb1, 0x64, 0x61, 0xff, 0x6b, 0x20, - 0x9e, 0xcf, 0x42, 0xe8, 0x02, 0xe5, 0xa1, 0x27, 0x54, 0x83, 0xf6, 0xec, 0xf7, 0xd3, 0x69, 0xf3, - 0x49, 0x77, 0xbb, 0xc4, 0xd9, 0xe5, 0xd5, 0x4f, 0xd1, 0x23, 0xca, 0x43, 0x9a, 0xc4, 0x31, 0x84, - 0x74, 0xfc, 0x23, 0xf7, 0x19, 0x1d, 0xab, 0x36, 0x3d, 0xb0, 0x71, 0x9e, 0xcd, 0xa3, 0x97, 0xeb, - 0x82, 0xff, 0xb6, 0x05, 0x9d, 0x4d, 0x90, 0xfe, 0x0c, 0x55, 0x45, 0x22, 0x22, 0x08, 0x3d, 0xa3, - 0xd4, 0xd2, 0x3a, 0x35, 0xbb, 0x9e, 0x4e, 0x9b, 0xd5, 0xee, 0x2c, 0xe4, 0xcc, 0xf7, 0xf4, 0x33, - 0x54, 0x3f, 0xe7, 0xee, 0x09, 0x04, 0x91, 0x4f, 0x24, 0x18, 0x65, 0xd5, 0xc2, 0x8f, 0x77, 0xd7, - 0xf9, 0x68, 0x29, 0x56, 0x43, 0xf7, 0x6e, 0x9e, 0x69, 0x7d, 0x65, 0xc3, 0x59, 0x45, 0xea, 0xbf, - 0xa2, 0x86, 0x48, 0x28, 0x05, 0x21, 0x7a, 0x89, 0x7f, 0xc4, 0x5d, 0xf1, 0x2d, 0x13, 0x92, 0xc7, - 0xe3, 0x63, 0x16, 0x30, 0x69, 0x54, 0x5a, 0x5a, 0xa7, 0x6c, 0x9b, 0xe9, 0xb4, 0xd9, 0xe8, 0xee, - 0x54, 0x39, 0x37, 0x10, 0x74, 0x07, 0x3d, 0xee, 0x11, 0xe6, 0x83, 0xb7, 0xc1, 0xae, 0x2a, 0x76, - 0x23, 0x9d, 0x36, 0x1f, 0xbf, 0xda, 0xaa, 0x70, 0x76, 0x38, 0xdb, 0x7f, 0x6b, 0xe8, 0xe1, 0xb5, - 0xfb, 0xa0, 0x7f, 0x8f, 0x2a, 0x84, 0x4a, 0x36, 0xca, 0xe6, 0x25, 0x1b, 0xc5, 0xa7, 0xab, 0x25, - 0xca, 0xde, 0xb4, 0xe5, 0xfd, 0x76, 0xa0, 0x07, 0x59, 0x27, 0x60, 0x79, 0x89, 0x5e, 0x28, 0xab, - 0x93, 0x23, 0x74, 0x1f, 0xed, 0xfb, 0x44, 0xc8, 0xf9, 0xa8, 0x9d, 0xb0, 0x00, 0x54, 0x93, 0xea, - 0x87, 0x9f, 0xdc, 0xed, 0xf2, 0x64, 0x0e, 0xfb, 0xbd, 0x74, 0xda, 0xdc, 0x3f, 0x5e, 0xe3, 0x38, - 0x1b, 0xe4, 0xf6, 0x44, 0x43, 0xab, 0xdd, 0xb9, 0x87, 0xe7, 0xeb, 0x67, 0x54, 0x93, 0xf3, 0x89, - 0x2a, 0xbe, 0xe9, 0x44, 0x2d, 0x6e, 0xe2, 0x62, 0x9c, 0x16, 0xb0, 0xec, 0xf5, 0x79, 0x67, 0x4d, - 0x7f, 0x0f, 0xbf, 0xf3, 0xd5, 0xb5, 0xd7, 0xf8, 0x83, 0x6d, 0xbf, 0x82, 0x6f, 0x78, 0x84, 0xed, - 0xe7, 0x93, 0x2b, 0xb3, 0x70, 0x71, 0x65, 0x16, 0x2e, 0xaf, 0xcc, 0xc2, 0x6f, 0xa9, 0xa9, 0x4d, - 0x52, 0x53, 0xbb, 0x48, 0x4d, 0xed, 0x32, 0x35, 0xb5, 0x7f, 0x52, 0x53, 0xfb, 0xe3, 0x5f, 0xb3, - 0xf0, 0x4b, 0x35, 0x2f, 0xc8, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0xf6, 0x9f, 0xb3, 0xdd, 0xdf, - 0x07, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/batch/v1beta1/generated.proto b/vendor/k8s.io/api/batch/v1beta1/generated.proto index 043b3551b08b9..995b4f3f9d93c 100644 --- a/vendor/k8s.io/api/batch/v1beta1/generated.proto +++ b/vendor/k8s.io/api/batch/v1beta1/generated.proto @@ -33,17 +33,17 @@ option go_package = "v1beta1"; // CronJob represents the configuration of a single cron job. message CronJob { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of a cron job, including the schedule. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional CronJobSpec spec = 2; // Current status of a cron job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional CronJobStatus status = 3; } @@ -51,7 +51,7 @@ message CronJob { // CronJobList is a collection of cron jobs. message CronJobList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -112,12 +112,12 @@ message CronJobStatus { // JobTemplate describes a template for creating copies of a predefined pod. message JobTemplate { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Defines jobs that will be created from this template. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional JobTemplateSpec template = 2; } @@ -125,12 +125,12 @@ message JobTemplate { // JobTemplateSpec describes the data a Job should have when created from a template message JobTemplateSpec { // Standard object's metadata of the jobs created from this template. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional k8s.io.api.batch.v1.JobSpec spec = 2; } diff --git a/vendor/k8s.io/api/batch/v1beta1/types.go b/vendor/k8s.io/api/batch/v1beta1/types.go index cb5c9bad29464..2978747a488ec 100644 --- a/vendor/k8s.io/api/batch/v1beta1/types.go +++ b/vendor/k8s.io/api/batch/v1beta1/types.go @@ -28,12 +28,12 @@ import ( type JobTemplate struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Defines jobs that will be created from this template. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Template JobTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"` } @@ -41,12 +41,12 @@ type JobTemplate struct { // JobTemplateSpec describes the data a Job should have when created from a template type JobTemplateSpec struct { // Standard object's metadata of the jobs created from this template. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of the job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec batchv1.JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` } @@ -58,17 +58,17 @@ type JobTemplateSpec struct { type CronJob struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of a cron job, including the schedule. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec CronJobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Current status of a cron job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status CronJobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -80,7 +80,7 @@ type CronJobList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go index abbdfec010136..ecc914446067a 100644 --- a/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go @@ -29,9 +29,9 @@ package v1beta1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CronJob = map[string]string{ "": "CronJob represents the configuration of a single cron job.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Specification of the desired behavior of a cron job, including the schedule. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Current status of a cron job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of a cron job, including the schedule. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Current status of a cron job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (CronJob) SwaggerDoc() map[string]string { @@ -40,7 +40,7 @@ func (CronJob) SwaggerDoc() map[string]string { var map_CronJobList = map[string]string{ "": "CronJobList is a collection of cron jobs.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "items is the list of CronJobs.", } @@ -75,8 +75,8 @@ func (CronJobStatus) SwaggerDoc() map[string]string { var map_JobTemplate = map[string]string{ "": "JobTemplate describes a template for creating copies of a predefined pod.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "template": "Defines jobs that will be created from this template. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "template": "Defines jobs that will be created from this template. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (JobTemplate) SwaggerDoc() map[string]string { @@ -85,8 +85,8 @@ func (JobTemplate) SwaggerDoc() map[string]string { var map_JobTemplateSpec = map[string]string{ "": "JobTemplateSpec describes the data a Job should have when created from a template", - "metadata": "Standard object's metadata of the jobs created from this template. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Specification of the desired behavior of the job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata of the jobs created from this template. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of the job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (JobTemplateSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go b/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go index e6f17cce62771..8271c84111300 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go +++ b/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go @@ -17,38 +17,21 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/batch/v2alpha1/generated.proto -/* - Package v2alpha1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/batch/v2alpha1/generated.proto - - It has these top-level messages: - CronJob - CronJobList - CronJobSpec - CronJobStatus - JobTemplate - JobTemplateSpec -*/ package v2alpha1 import ( fmt "fmt" + io "io" + proto "github.com/gogo/protobuf/proto" + v11 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" math "math" - - k8s_io_api_core_v1 "k8s.io/api/core/v1" - - k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - strings "strings" - + math_bits "math/bits" reflect "reflect" - - io "io" + strings "strings" ) // Reference imports to suppress errors if they are not otherwise used. @@ -62,29 +45,173 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *CronJob) Reset() { *m = CronJob{} } -func (*CronJob) ProtoMessage() {} -func (*CronJob) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *CronJob) Reset() { *m = CronJob{} } +func (*CronJob) ProtoMessage() {} +func (*CronJob) Descriptor() ([]byte, []int) { + return fileDescriptor_5495a0550fe29c46, []int{0} +} +func (m *CronJob) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CronJob) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CronJob) XXX_Merge(src proto.Message) { + xxx_messageInfo_CronJob.Merge(m, src) +} +func (m *CronJob) XXX_Size() int { + return m.Size() +} +func (m *CronJob) XXX_DiscardUnknown() { + xxx_messageInfo_CronJob.DiscardUnknown(m) +} -func (m *CronJobList) Reset() { *m = CronJobList{} } -func (*CronJobList) ProtoMessage() {} -func (*CronJobList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_CronJob proto.InternalMessageInfo -func (m *CronJobSpec) Reset() { *m = CronJobSpec{} } -func (*CronJobSpec) ProtoMessage() {} -func (*CronJobSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *CronJobList) Reset() { *m = CronJobList{} } +func (*CronJobList) ProtoMessage() {} +func (*CronJobList) Descriptor() ([]byte, []int) { + return fileDescriptor_5495a0550fe29c46, []int{1} +} +func (m *CronJobList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CronJobList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CronJobList) XXX_Merge(src proto.Message) { + xxx_messageInfo_CronJobList.Merge(m, src) +} +func (m *CronJobList) XXX_Size() int { + return m.Size() +} +func (m *CronJobList) XXX_DiscardUnknown() { + xxx_messageInfo_CronJobList.DiscardUnknown(m) +} -func (m *CronJobStatus) Reset() { *m = CronJobStatus{} } -func (*CronJobStatus) ProtoMessage() {} -func (*CronJobStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_CronJobList proto.InternalMessageInfo -func (m *JobTemplate) Reset() { *m = JobTemplate{} } -func (*JobTemplate) ProtoMessage() {} -func (*JobTemplate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *CronJobSpec) Reset() { *m = CronJobSpec{} } +func (*CronJobSpec) ProtoMessage() {} +func (*CronJobSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_5495a0550fe29c46, []int{2} +} +func (m *CronJobSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CronJobSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CronJobSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_CronJobSpec.Merge(m, src) +} +func (m *CronJobSpec) XXX_Size() int { + return m.Size() +} +func (m *CronJobSpec) XXX_DiscardUnknown() { + xxx_messageInfo_CronJobSpec.DiscardUnknown(m) +} -func (m *JobTemplateSpec) Reset() { *m = JobTemplateSpec{} } -func (*JobTemplateSpec) ProtoMessage() {} -func (*JobTemplateSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_CronJobSpec proto.InternalMessageInfo + +func (m *CronJobStatus) Reset() { *m = CronJobStatus{} } +func (*CronJobStatus) ProtoMessage() {} +func (*CronJobStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_5495a0550fe29c46, []int{3} +} +func (m *CronJobStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CronJobStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CronJobStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_CronJobStatus.Merge(m, src) +} +func (m *CronJobStatus) XXX_Size() int { + return m.Size() +} +func (m *CronJobStatus) XXX_DiscardUnknown() { + xxx_messageInfo_CronJobStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_CronJobStatus proto.InternalMessageInfo + +func (m *JobTemplate) Reset() { *m = JobTemplate{} } +func (*JobTemplate) ProtoMessage() {} +func (*JobTemplate) Descriptor() ([]byte, []int) { + return fileDescriptor_5495a0550fe29c46, []int{4} +} +func (m *JobTemplate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JobTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JobTemplate) XXX_Merge(src proto.Message) { + xxx_messageInfo_JobTemplate.Merge(m, src) +} +func (m *JobTemplate) XXX_Size() int { + return m.Size() +} +func (m *JobTemplate) XXX_DiscardUnknown() { + xxx_messageInfo_JobTemplate.DiscardUnknown(m) +} + +var xxx_messageInfo_JobTemplate proto.InternalMessageInfo + +func (m *JobTemplateSpec) Reset() { *m = JobTemplateSpec{} } +func (*JobTemplateSpec) ProtoMessage() {} +func (*JobTemplateSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_5495a0550fe29c46, []int{5} +} +func (m *JobTemplateSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JobTemplateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JobTemplateSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_JobTemplateSpec.Merge(m, src) +} +func (m *JobTemplateSpec) XXX_Size() int { + return m.Size() +} +func (m *JobTemplateSpec) XXX_DiscardUnknown() { + xxx_messageInfo_JobTemplateSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_JobTemplateSpec proto.InternalMessageInfo func init() { proto.RegisterType((*CronJob)(nil), "k8s.io.api.batch.v2alpha1.CronJob") @@ -94,10 +221,68 @@ func init() { proto.RegisterType((*JobTemplate)(nil), "k8s.io.api.batch.v2alpha1.JobTemplate") proto.RegisterType((*JobTemplateSpec)(nil), "k8s.io.api.batch.v2alpha1.JobTemplateSpec") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/batch/v2alpha1/generated.proto", fileDescriptor_5495a0550fe29c46) +} + +var fileDescriptor_5495a0550fe29c46 = []byte{ + // 774 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x94, 0x4d, 0x6f, 0xdb, 0x36, + 0x18, 0xc7, 0x2d, 0xc7, 0x6f, 0xa1, 0x97, 0x2d, 0xd1, 0x86, 0xc4, 0xf3, 0x06, 0xd9, 0x50, 0xb0, + 0xc1, 0x18, 0x36, 0x6a, 0x09, 0x86, 0x61, 0xa7, 0x01, 0x53, 0x86, 0x36, 0x4d, 0x53, 0x34, 0x90, + 0x53, 0xa0, 0x28, 0x82, 0xa2, 0x14, 0x45, 0xdb, 0x8c, 0x25, 0x51, 0x10, 0x29, 0x03, 0xbe, 0xf5, + 0xd6, 0x6b, 0x3f, 0x49, 0x2f, 0xed, 0x87, 0x48, 0x7b, 0xca, 0x31, 0x27, 0xa3, 0x51, 0xbf, 0x45, + 0x4f, 0x85, 0x68, 0xf9, 0x25, 0x7e, 0x49, 0xd2, 0x4b, 0x6e, 0xe2, 0xa3, 0xff, 0xff, 0xc7, 0x87, + 0xcf, 0xf3, 0x90, 0xc0, 0xec, 0xfe, 0xc3, 0x21, 0x65, 0x46, 0x37, 0xb2, 0x49, 0xe8, 0x13, 0x41, + 0xb8, 0xd1, 0x23, 0xbe, 0xc3, 0x42, 0x23, 0xfd, 0x81, 0x02, 0x6a, 0xd8, 0x48, 0xe0, 0x8e, 0xd1, + 0xdb, 0x45, 0x6e, 0xd0, 0x41, 0x3b, 0x46, 0x9b, 0xf8, 0x24, 0x44, 0x82, 0x38, 0x30, 0x08, 0x99, + 0x60, 0xea, 0x8f, 0x43, 0x29, 0x44, 0x01, 0x85, 0x52, 0x0a, 0x47, 0xd2, 0xea, 0x1f, 0x6d, 0x2a, + 0x3a, 0x91, 0x0d, 0x31, 0xf3, 0x8c, 0x36, 0x6b, 0x33, 0x43, 0x3a, 0xec, 0xa8, 0x25, 0x57, 0x72, + 0x21, 0xbf, 0x86, 0xa4, 0xea, 0xf6, 0xfc, 0xa6, 0x73, 0xdb, 0x55, 0xf5, 0x29, 0x11, 0x66, 0x21, + 0x59, 0xa4, 0xf9, 0x6b, 0xa2, 0xf1, 0x10, 0xee, 0x50, 0x9f, 0x84, 0x7d, 0x23, 0xe8, 0xb6, 0x93, + 0x00, 0x37, 0x3c, 0x22, 0xd0, 0x22, 0x97, 0xb1, 0xcc, 0x15, 0x46, 0xbe, 0xa0, 0x1e, 0x99, 0x33, + 0xfc, 0x7d, 0x93, 0x81, 0xe3, 0x0e, 0xf1, 0xd0, 0xac, 0x4f, 0x7f, 0x95, 0x05, 0xc5, 0xbd, 0x90, + 0xf9, 0x07, 0xcc, 0x56, 0x5f, 0x80, 0x52, 0x92, 0x8f, 0x83, 0x04, 0xaa, 0x28, 0x75, 0xa5, 0x51, + 0xde, 0xfd, 0x13, 0x4e, 0x0a, 0x3a, 0xc6, 0xc2, 0xa0, 0xdb, 0x4e, 0x02, 0x1c, 0x26, 0x6a, 0xd8, + 0xdb, 0x81, 0x8f, 0xed, 0x53, 0x82, 0xc5, 0x23, 0x22, 0x90, 0xa9, 0x9e, 0x0d, 0x6a, 0x99, 0x78, + 0x50, 0x03, 0x93, 0x98, 0x35, 0xa6, 0xaa, 0xfb, 0x20, 0xc7, 0x03, 0x82, 0x2b, 0x59, 0x49, 0xff, + 0x15, 0x2e, 0x6d, 0x17, 0x4c, 0x73, 0x6a, 0x06, 0x04, 0x9b, 0xdf, 0xa4, 0xcc, 0x5c, 0xb2, 0xb2, + 0x24, 0x41, 0x3d, 0x02, 0x05, 0x2e, 0x90, 0x88, 0x78, 0x65, 0x45, 0xb2, 0x1a, 0xb7, 0x60, 0x49, + 0xbd, 0xf9, 0x6d, 0x4a, 0x2b, 0x0c, 0xd7, 0x56, 0xca, 0xd1, 0xdf, 0x29, 0xa0, 0x9c, 0x2a, 0x0f, + 0x29, 0x17, 0xea, 0xc9, 0x5c, 0x35, 0xe0, 0xed, 0xaa, 0x91, 0xb8, 0x65, 0x2d, 0xd6, 0xd3, 0x9d, + 0x4a, 0xa3, 0xc8, 0x54, 0x25, 0xee, 0x83, 0x3c, 0x15, 0xc4, 0xe3, 0x95, 0x6c, 0x7d, 0xa5, 0x51, + 0xde, 0xd5, 0x6f, 0x4e, 0xdf, 0x5c, 0x4b, 0x71, 0xf9, 0x07, 0x89, 0xd1, 0x1a, 0xfa, 0xf5, 0x37, + 0xb9, 0x71, 0xda, 0x49, 0x79, 0xd4, 0xdf, 0x41, 0x29, 0x69, 0xb5, 0x13, 0xb9, 0x44, 0xa6, 0xbd, + 0x3a, 0x49, 0xa3, 0x99, 0xc6, 0xad, 0xb1, 0x42, 0x7d, 0x02, 0xb6, 0xb8, 0x40, 0xa1, 0xa0, 0x7e, + 0xfb, 0x7f, 0x82, 0x1c, 0x97, 0xfa, 0xa4, 0x49, 0x30, 0xf3, 0x1d, 0x2e, 0x7b, 0xb4, 0x62, 0xfe, + 0x14, 0x0f, 0x6a, 0x5b, 0xcd, 0xc5, 0x12, 0x6b, 0x99, 0x57, 0x3d, 0x01, 0x1b, 0x98, 0xf9, 0x38, + 0x0a, 0x43, 0xe2, 0xe3, 0xfe, 0x11, 0x73, 0x29, 0xee, 0xcb, 0x46, 0xad, 0x9a, 0x30, 0xcd, 0x66, + 0x63, 0x6f, 0x56, 0xf0, 0x79, 0x51, 0xd0, 0x9a, 0x07, 0xa9, 0xbf, 0x80, 0x22, 0x8f, 0x78, 0x40, + 0x7c, 0xa7, 0x92, 0xab, 0x2b, 0x8d, 0x92, 0x59, 0x8e, 0x07, 0xb5, 0x62, 0x73, 0x18, 0xb2, 0x46, + 0xff, 0x54, 0x04, 0xca, 0xa7, 0xcc, 0x3e, 0x26, 0x5e, 0xe0, 0x22, 0x41, 0x2a, 0x79, 0xd9, 0xc3, + 0xdf, 0xae, 0x29, 0xf4, 0xc1, 0x44, 0x2d, 0xe7, 0xee, 0xfb, 0x34, 0xd5, 0xf2, 0xd4, 0x0f, 0x6b, + 0x9a, 0xa9, 0x3e, 0x07, 0x55, 0x1e, 0x61, 0x4c, 0x38, 0x6f, 0x45, 0xee, 0x01, 0xb3, 0xf9, 0x3e, + 0xe5, 0x82, 0x85, 0xfd, 0x43, 0xea, 0x51, 0x51, 0x29, 0xd4, 0x95, 0x46, 0xde, 0xd4, 0xe2, 0x41, + 0xad, 0xda, 0x5c, 0xaa, 0xb2, 0xae, 0x21, 0xa8, 0x16, 0xd8, 0x6c, 0x21, 0xea, 0x12, 0x67, 0x8e, + 0x5d, 0x94, 0xec, 0x6a, 0x3c, 0xa8, 0x6d, 0xde, 0x5b, 0xa8, 0xb0, 0x96, 0x38, 0xf5, 0x0f, 0x0a, + 0x58, 0xbb, 0x72, 0x23, 0xd4, 0x87, 0xa0, 0x80, 0xb0, 0xa0, 0xbd, 0x64, 0x60, 0x92, 0x61, 0xdc, + 0x9e, 0xae, 0x51, 0xf2, 0xae, 0x4d, 0xee, 0xb8, 0x45, 0x5a, 0x24, 0x69, 0x05, 0x99, 0x5c, 0xa3, + 0xff, 0xa4, 0xd5, 0x4a, 0x11, 0xaa, 0x0b, 0xd6, 0x5d, 0xc4, 0xc5, 0x68, 0xd6, 0x8e, 0xa9, 0x47, + 0x64, 0x97, 0xae, 0x96, 0xfe, 0x9a, 0xeb, 0x93, 0x38, 0xcc, 0x1f, 0xe2, 0x41, 0x6d, 0xfd, 0x70, + 0x86, 0x63, 0xcd, 0x91, 0xf5, 0xf7, 0x0a, 0x98, 0xee, 0xce, 0x1d, 0x3c, 0x61, 0x4f, 0x41, 0x49, + 0x8c, 0x46, 0x2a, 0xfb, 0xd5, 0x23, 0x35, 0xbe, 0x8b, 0xe3, 0x79, 0x1a, 0xd3, 0xf4, 0xb7, 0x0a, + 0xf8, 0x6e, 0x46, 0x7f, 0x07, 0xe7, 0xf9, 0xf7, 0xca, 0x93, 0xfc, 0xf3, 0x82, 0xb3, 0xc8, 0x53, + 0x2c, 0x7b, 0x88, 0x4d, 0x78, 0x76, 0xa9, 0x65, 0xce, 0x2f, 0xb5, 0xcc, 0xc5, 0xa5, 0x96, 0x79, + 0x19, 0x6b, 0xca, 0x59, 0xac, 0x29, 0xe7, 0xb1, 0xa6, 0x5c, 0xc4, 0x9a, 0xf2, 0x31, 0xd6, 0x94, + 0xd7, 0x9f, 0xb4, 0xcc, 0xb3, 0xd2, 0xa8, 0x22, 0x5f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x20, 0x1c, + 0xcf, 0x94, 0xe7, 0x07, 0x00, 0x00, +} + func (m *CronJob) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -105,41 +290,52 @@ func (m *CronJob) Marshal() (dAtA []byte, err error) { } func (m *CronJob) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CronJob) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CronJobList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -147,37 +343,46 @@ func (m *CronJobList) Marshal() (dAtA []byte, err error) { } func (m *CronJobList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CronJobList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n4, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CronJobSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -185,58 +390,67 @@ func (m *CronJobSpec) Marshal() (dAtA []byte, err error) { } func (m *CronJobSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CronJobSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Schedule))) - i += copy(dAtA[i:], m.Schedule) - if m.StartingDeadlineSeconds != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.StartingDeadlineSeconds)) + if m.FailedJobsHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.FailedJobsHistoryLimit)) + i-- + dAtA[i] = 0x38 } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConcurrencyPolicy))) - i += copy(dAtA[i:], m.ConcurrencyPolicy) + if m.SuccessfulJobsHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.SuccessfulJobsHistoryLimit)) + i-- + dAtA[i] = 0x30 + } + { + size, err := m.JobTemplate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a if m.Suspend != nil { - dAtA[i] = 0x20 - i++ + i-- if *m.Suspend { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - } - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.JobTemplate.Size())) - n5, err := m.JobTemplate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - if m.SuccessfulJobsHistoryLimit != nil { - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.SuccessfulJobsHistoryLimit)) + i-- + dAtA[i] = 0x20 } - if m.FailedJobsHistoryLimit != nil { - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.FailedJobsHistoryLimit)) + i -= len(m.ConcurrencyPolicy) + copy(dAtA[i:], m.ConcurrencyPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConcurrencyPolicy))) + i-- + dAtA[i] = 0x1a + if m.StartingDeadlineSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.StartingDeadlineSeconds)) + i-- + dAtA[i] = 0x10 } - return i, nil + i -= len(m.Schedule) + copy(dAtA[i:], m.Schedule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Schedule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CronJobStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -244,39 +458,48 @@ func (m *CronJobStatus) Marshal() (dAtA []byte, err error) { } func (m *CronJobStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CronJobStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Active) > 0 { - for _, msg := range m.Active { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.LastScheduleTime != nil { + { + size, err := m.LastScheduleTime.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - } - if m.LastScheduleTime != nil { + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastScheduleTime.Size())) - n6, err := m.LastScheduleTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if len(m.Active) > 0 { + for iNdEx := len(m.Active) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Active[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - i += n6 } - return i, nil + return len(dAtA) - i, nil } func (m *JobTemplate) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -284,33 +507,42 @@ func (m *JobTemplate) Marshal() (dAtA []byte, err error) { } func (m *JobTemplate) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JobTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n7 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n8, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *JobTemplateSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -318,39 +550,53 @@ func (m *JobTemplateSpec) Marshal() (dAtA []byte, err error) { } func (m *JobTemplateSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JobTemplateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n9, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n9 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n10, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n10 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *CronJob) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -363,6 +609,9 @@ func (m *CronJob) Size() (n int) { } func (m *CronJobList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -377,6 +626,9 @@ func (m *CronJobList) Size() (n int) { } func (m *CronJobSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Schedule) @@ -401,6 +653,9 @@ func (m *CronJobSpec) Size() (n int) { } func (m *CronJobStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Active) > 0 { @@ -417,6 +672,9 @@ func (m *CronJobStatus) Size() (n int) { } func (m *JobTemplate) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -427,6 +685,9 @@ func (m *JobTemplate) Size() (n int) { } func (m *JobTemplateSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -437,14 +698,7 @@ func (m *JobTemplateSpec) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -454,7 +708,7 @@ func (this *CronJob) String() string { return "nil" } s := strings.Join([]string{`&CronJob{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CronJobSpec", "CronJobSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "CronJobStatus", "CronJobStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -465,9 +719,14 @@ func (this *CronJobList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]CronJob{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CronJob", "CronJob", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&CronJobList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CronJob", "CronJob", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -492,9 +751,14 @@ func (this *CronJobStatus) String() string { if this == nil { return "nil" } + repeatedStringForActive := "[]ObjectReference{" + for _, f := range this.Active { + repeatedStringForActive += fmt.Sprintf("%v", f) + "," + } + repeatedStringForActive += "}" s := strings.Join([]string{`&CronJobStatus{`, - `Active:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Active), "ObjectReference", "k8s_io_api_core_v1.ObjectReference", 1), `&`, ``, 1) + `,`, - `LastScheduleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScheduleTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, + `Active:` + repeatedStringForActive + `,`, + `LastScheduleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScheduleTime), "Time", "v1.Time", 1) + `,`, `}`, }, "") return s @@ -504,7 +768,7 @@ func (this *JobTemplate) String() string { return "nil" } s := strings.Join([]string{`&JobTemplate{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Template:` + strings.Replace(strings.Replace(this.Template.String(), "JobTemplateSpec", "JobTemplateSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -515,8 +779,8 @@ func (this *JobTemplateSpec) String() string { return "nil" } s := strings.Join([]string{`&JobTemplateSpec{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "JobSpec", "k8s_io_api_batch_v1.JobSpec", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Spec), "JobSpec", "v12.JobSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -544,7 +808,7 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -572,7 +836,7 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -581,6 +845,9 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -602,7 +869,7 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -611,6 +878,9 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -632,7 +902,7 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -641,6 +911,9 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -657,6 +930,9 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -684,7 +960,7 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -712,7 +988,7 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -721,6 +997,9 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -742,7 +1021,7 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -751,6 +1030,9 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -768,6 +1050,9 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -795,7 +1080,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -823,7 +1108,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -833,6 +1118,9 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -852,7 +1140,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -872,7 +1160,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -882,6 +1170,9 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -901,7 +1192,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -922,7 +1213,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -931,6 +1222,9 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -952,7 +1246,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -972,7 +1266,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -987,6 +1281,9 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1014,7 +1311,7 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1042,7 +1339,7 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1051,10 +1348,13 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Active = append(m.Active, k8s_io_api_core_v1.ObjectReference{}) + m.Active = append(m.Active, v11.ObjectReference{}) if err := m.Active[len(m.Active)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -1073,7 +1373,7 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1082,11 +1382,14 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.LastScheduleTime == nil { - m.LastScheduleTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + m.LastScheduleTime = &v1.Time{} } if err := m.LastScheduleTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1101,6 +1404,9 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1128,7 +1434,7 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1156,7 +1462,7 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1165,6 +1471,9 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1186,7 +1495,7 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1195,6 +1504,9 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1211,6 +1523,9 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1238,7 +1553,7 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1266,7 +1581,7 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1275,6 +1590,9 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1296,7 +1614,7 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1305,6 +1623,9 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1321,6 +1642,9 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1387,10 +1711,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -1419,6 +1746,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -1437,60 +1767,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/batch/v2alpha1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 774 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x94, 0x4d, 0x6f, 0xdb, 0x36, - 0x18, 0xc7, 0x2d, 0xc7, 0x6f, 0xa1, 0x97, 0x2d, 0xd1, 0x86, 0xc4, 0xf3, 0x06, 0xd9, 0x50, 0xb0, - 0xc1, 0x18, 0x36, 0x6a, 0x09, 0x86, 0x61, 0xa7, 0x01, 0x53, 0x86, 0x36, 0x4d, 0x53, 0x34, 0x90, - 0x53, 0xa0, 0x28, 0x82, 0xa2, 0x14, 0x45, 0xdb, 0x8c, 0x25, 0x51, 0x10, 0x29, 0x03, 0xbe, 0xf5, - 0xd6, 0x6b, 0x3f, 0x49, 0x2f, 0xed, 0x87, 0x48, 0x7b, 0xca, 0x31, 0x27, 0xa3, 0x51, 0xbf, 0x45, - 0x4f, 0x85, 0x68, 0xf9, 0x25, 0x7e, 0x49, 0xd2, 0x4b, 0x6e, 0xe2, 0xa3, 0xff, 0xff, 0xc7, 0x87, - 0xcf, 0xf3, 0x90, 0xc0, 0xec, 0xfe, 0xc3, 0x21, 0x65, 0x46, 0x37, 0xb2, 0x49, 0xe8, 0x13, 0x41, - 0xb8, 0xd1, 0x23, 0xbe, 0xc3, 0x42, 0x23, 0xfd, 0x81, 0x02, 0x6a, 0xd8, 0x48, 0xe0, 0x8e, 0xd1, - 0xdb, 0x45, 0x6e, 0xd0, 0x41, 0x3b, 0x46, 0x9b, 0xf8, 0x24, 0x44, 0x82, 0x38, 0x30, 0x08, 0x99, - 0x60, 0xea, 0x8f, 0x43, 0x29, 0x44, 0x01, 0x85, 0x52, 0x0a, 0x47, 0xd2, 0xea, 0x1f, 0x6d, 0x2a, - 0x3a, 0x91, 0x0d, 0x31, 0xf3, 0x8c, 0x36, 0x6b, 0x33, 0x43, 0x3a, 0xec, 0xa8, 0x25, 0x57, 0x72, - 0x21, 0xbf, 0x86, 0xa4, 0xea, 0xf6, 0xfc, 0xa6, 0x73, 0xdb, 0x55, 0xf5, 0x29, 0x11, 0x66, 0x21, - 0x59, 0xa4, 0xf9, 0x6b, 0xa2, 0xf1, 0x10, 0xee, 0x50, 0x9f, 0x84, 0x7d, 0x23, 0xe8, 0xb6, 0x93, - 0x00, 0x37, 0x3c, 0x22, 0xd0, 0x22, 0x97, 0xb1, 0xcc, 0x15, 0x46, 0xbe, 0xa0, 0x1e, 0x99, 0x33, - 0xfc, 0x7d, 0x93, 0x81, 0xe3, 0x0e, 0xf1, 0xd0, 0xac, 0x4f, 0x7f, 0x95, 0x05, 0xc5, 0xbd, 0x90, - 0xf9, 0x07, 0xcc, 0x56, 0x5f, 0x80, 0x52, 0x92, 0x8f, 0x83, 0x04, 0xaa, 0x28, 0x75, 0xa5, 0x51, - 0xde, 0xfd, 0x13, 0x4e, 0x0a, 0x3a, 0xc6, 0xc2, 0xa0, 0xdb, 0x4e, 0x02, 0x1c, 0x26, 0x6a, 0xd8, - 0xdb, 0x81, 0x8f, 0xed, 0x53, 0x82, 0xc5, 0x23, 0x22, 0x90, 0xa9, 0x9e, 0x0d, 0x6a, 0x99, 0x78, - 0x50, 0x03, 0x93, 0x98, 0x35, 0xa6, 0xaa, 0xfb, 0x20, 0xc7, 0x03, 0x82, 0x2b, 0x59, 0x49, 0xff, - 0x15, 0x2e, 0x6d, 0x17, 0x4c, 0x73, 0x6a, 0x06, 0x04, 0x9b, 0xdf, 0xa4, 0xcc, 0x5c, 0xb2, 0xb2, - 0x24, 0x41, 0x3d, 0x02, 0x05, 0x2e, 0x90, 0x88, 0x78, 0x65, 0x45, 0xb2, 0x1a, 0xb7, 0x60, 0x49, - 0xbd, 0xf9, 0x6d, 0x4a, 0x2b, 0x0c, 0xd7, 0x56, 0xca, 0xd1, 0xdf, 0x29, 0xa0, 0x9c, 0x2a, 0x0f, - 0x29, 0x17, 0xea, 0xc9, 0x5c, 0x35, 0xe0, 0xed, 0xaa, 0x91, 0xb8, 0x65, 0x2d, 0xd6, 0xd3, 0x9d, - 0x4a, 0xa3, 0xc8, 0x54, 0x25, 0xee, 0x83, 0x3c, 0x15, 0xc4, 0xe3, 0x95, 0x6c, 0x7d, 0xa5, 0x51, - 0xde, 0xd5, 0x6f, 0x4e, 0xdf, 0x5c, 0x4b, 0x71, 0xf9, 0x07, 0x89, 0xd1, 0x1a, 0xfa, 0xf5, 0x37, - 0xb9, 0x71, 0xda, 0x49, 0x79, 0xd4, 0xdf, 0x41, 0x29, 0x69, 0xb5, 0x13, 0xb9, 0x44, 0xa6, 0xbd, - 0x3a, 0x49, 0xa3, 0x99, 0xc6, 0xad, 0xb1, 0x42, 0x7d, 0x02, 0xb6, 0xb8, 0x40, 0xa1, 0xa0, 0x7e, - 0xfb, 0x7f, 0x82, 0x1c, 0x97, 0xfa, 0xa4, 0x49, 0x30, 0xf3, 0x1d, 0x2e, 0x7b, 0xb4, 0x62, 0xfe, - 0x14, 0x0f, 0x6a, 0x5b, 0xcd, 0xc5, 0x12, 0x6b, 0x99, 0x57, 0x3d, 0x01, 0x1b, 0x98, 0xf9, 0x38, - 0x0a, 0x43, 0xe2, 0xe3, 0xfe, 0x11, 0x73, 0x29, 0xee, 0xcb, 0x46, 0xad, 0x9a, 0x30, 0xcd, 0x66, - 0x63, 0x6f, 0x56, 0xf0, 0x79, 0x51, 0xd0, 0x9a, 0x07, 0xa9, 0xbf, 0x80, 0x22, 0x8f, 0x78, 0x40, - 0x7c, 0xa7, 0x92, 0xab, 0x2b, 0x8d, 0x92, 0x59, 0x8e, 0x07, 0xb5, 0x62, 0x73, 0x18, 0xb2, 0x46, - 0xff, 0x54, 0x04, 0xca, 0xa7, 0xcc, 0x3e, 0x26, 0x5e, 0xe0, 0x22, 0x41, 0x2a, 0x79, 0xd9, 0xc3, - 0xdf, 0xae, 0x29, 0xf4, 0xc1, 0x44, 0x2d, 0xe7, 0xee, 0xfb, 0x34, 0xd5, 0xf2, 0xd4, 0x0f, 0x6b, - 0x9a, 0xa9, 0x3e, 0x07, 0x55, 0x1e, 0x61, 0x4c, 0x38, 0x6f, 0x45, 0xee, 0x01, 0xb3, 0xf9, 0x3e, - 0xe5, 0x82, 0x85, 0xfd, 0x43, 0xea, 0x51, 0x51, 0x29, 0xd4, 0x95, 0x46, 0xde, 0xd4, 0xe2, 0x41, - 0xad, 0xda, 0x5c, 0xaa, 0xb2, 0xae, 0x21, 0xa8, 0x16, 0xd8, 0x6c, 0x21, 0xea, 0x12, 0x67, 0x8e, - 0x5d, 0x94, 0xec, 0x6a, 0x3c, 0xa8, 0x6d, 0xde, 0x5b, 0xa8, 0xb0, 0x96, 0x38, 0xf5, 0x0f, 0x0a, - 0x58, 0xbb, 0x72, 0x23, 0xd4, 0x87, 0xa0, 0x80, 0xb0, 0xa0, 0xbd, 0x64, 0x60, 0x92, 0x61, 0xdc, - 0x9e, 0xae, 0x51, 0xf2, 0xae, 0x4d, 0xee, 0xb8, 0x45, 0x5a, 0x24, 0x69, 0x05, 0x99, 0x5c, 0xa3, - 0xff, 0xa4, 0xd5, 0x4a, 0x11, 0xaa, 0x0b, 0xd6, 0x5d, 0xc4, 0xc5, 0x68, 0xd6, 0x8e, 0xa9, 0x47, - 0x64, 0x97, 0xae, 0x96, 0xfe, 0x9a, 0xeb, 0x93, 0x38, 0xcc, 0x1f, 0xe2, 0x41, 0x6d, 0xfd, 0x70, - 0x86, 0x63, 0xcd, 0x91, 0xf5, 0xf7, 0x0a, 0x98, 0xee, 0xce, 0x1d, 0x3c, 0x61, 0x4f, 0x41, 0x49, - 0x8c, 0x46, 0x2a, 0xfb, 0xd5, 0x23, 0x35, 0xbe, 0x8b, 0xe3, 0x79, 0x1a, 0xd3, 0xf4, 0xb7, 0x0a, - 0xf8, 0x6e, 0x46, 0x7f, 0x07, 0xe7, 0xf9, 0xf7, 0xca, 0x93, 0xfc, 0xf3, 0x82, 0xb3, 0xc8, 0x53, - 0x2c, 0x7b, 0x88, 0x4d, 0x78, 0x76, 0xa9, 0x65, 0xce, 0x2f, 0xb5, 0xcc, 0xc5, 0xa5, 0x96, 0x79, - 0x19, 0x6b, 0xca, 0x59, 0xac, 0x29, 0xe7, 0xb1, 0xa6, 0x5c, 0xc4, 0x9a, 0xf2, 0x31, 0xd6, 0x94, - 0xd7, 0x9f, 0xb4, 0xcc, 0xb3, 0xd2, 0xa8, 0x22, 0x5f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x20, 0x1c, - 0xcf, 0x94, 0xe7, 0x07, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/batch/v2alpha1/generated.proto b/vendor/k8s.io/api/batch/v2alpha1/generated.proto index 4321c3361e1a6..0bba13b86a8df 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/generated.proto +++ b/vendor/k8s.io/api/batch/v2alpha1/generated.proto @@ -33,17 +33,17 @@ option go_package = "v2alpha1"; // CronJob represents the configuration of a single cron job. message CronJob { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of a cron job, including the schedule. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional CronJobSpec spec = 2; // Current status of a cron job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional CronJobStatus status = 3; } @@ -51,7 +51,7 @@ message CronJob { // CronJobList is a collection of cron jobs. message CronJobList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -110,12 +110,12 @@ message CronJobStatus { // JobTemplate describes a template for creating copies of a predefined pod. message JobTemplate { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Defines jobs that will be created from this template. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional JobTemplateSpec template = 2; } @@ -123,12 +123,12 @@ message JobTemplate { // JobTemplateSpec describes the data a Job should have when created from a template message JobTemplateSpec { // Standard object's metadata of the jobs created from this template. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional k8s.io.api.batch.v1.JobSpec spec = 2; } diff --git a/vendor/k8s.io/api/batch/v2alpha1/types.go b/vendor/k8s.io/api/batch/v2alpha1/types.go index cccff94ff2ace..465e614aec396 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/types.go +++ b/vendor/k8s.io/api/batch/v2alpha1/types.go @@ -28,12 +28,12 @@ import ( type JobTemplate struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Defines jobs that will be created from this template. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Template JobTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"` } @@ -41,12 +41,12 @@ type JobTemplate struct { // JobTemplateSpec describes the data a Job should have when created from a template type JobTemplateSpec struct { // Standard object's metadata of the jobs created from this template. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of the job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec batchv1.JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` } @@ -58,17 +58,17 @@ type JobTemplateSpec struct { type CronJob struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of a cron job, including the schedule. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec CronJobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Current status of a cron job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status CronJobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -80,7 +80,7 @@ type CronJobList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/batch/v2alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v2alpha1/types_swagger_doc_generated.go index f448a92cfe049..bc80eca48f0de 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/batch/v2alpha1/types_swagger_doc_generated.go @@ -29,9 +29,9 @@ package v2alpha1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CronJob = map[string]string{ "": "CronJob represents the configuration of a single cron job.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Specification of the desired behavior of a cron job, including the schedule. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Current status of a cron job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of a cron job, including the schedule. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Current status of a cron job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (CronJob) SwaggerDoc() map[string]string { @@ -40,7 +40,7 @@ func (CronJob) SwaggerDoc() map[string]string { var map_CronJobList = map[string]string{ "": "CronJobList is a collection of cron jobs.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "items is the list of CronJobs.", } @@ -75,8 +75,8 @@ func (CronJobStatus) SwaggerDoc() map[string]string { var map_JobTemplate = map[string]string{ "": "JobTemplate describes a template for creating copies of a predefined pod.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "template": "Defines jobs that will be created from this template. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "template": "Defines jobs that will be created from this template. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (JobTemplate) SwaggerDoc() map[string]string { @@ -85,8 +85,8 @@ func (JobTemplate) SwaggerDoc() map[string]string { var map_JobTemplateSpec = map[string]string{ "": "JobTemplateSpec describes the data a Job should have when created from a template", - "metadata": "Standard object's metadata of the jobs created from this template. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Specification of the desired behavior of the job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata of the jobs created from this template. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of the job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (JobTemplateSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go b/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go index 8b6964c8dbd94..2e61b568e85a4 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go @@ -17,36 +17,20 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/certificates/v1beta1/generated.proto -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/certificates/v1beta1/generated.proto - - It has these top-level messages: - CertificateSigningRequest - CertificateSigningRequestCondition - CertificateSigningRequestList - CertificateSigningRequestSpec - CertificateSigningRequestStatus - ExtraValue -*/ package v1beta1 import ( fmt "fmt" - proto "github.com/gogo/protobuf/proto" - - math "math" + io "io" + proto "github.com/gogo/protobuf/proto" github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - strings "strings" - + math "math" + math_bits "math/bits" reflect "reflect" - - io "io" + strings "strings" ) // Reference imports to suppress errors if they are not otherwise used. @@ -63,49 +47,244 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *CertificateSigningRequest) Reset() { *m = CertificateSigningRequest{} } func (*CertificateSigningRequest) ProtoMessage() {} func (*CertificateSigningRequest) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{0} + return fileDescriptor_09d156762b8218ef, []int{0} +} +func (m *CertificateSigningRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CertificateSigningRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CertificateSigningRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CertificateSigningRequest.Merge(m, src) +} +func (m *CertificateSigningRequest) XXX_Size() int { + return m.Size() +} +func (m *CertificateSigningRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CertificateSigningRequest.DiscardUnknown(m) } +var xxx_messageInfo_CertificateSigningRequest proto.InternalMessageInfo + func (m *CertificateSigningRequestCondition) Reset() { *m = CertificateSigningRequestCondition{} } func (*CertificateSigningRequestCondition) ProtoMessage() {} func (*CertificateSigningRequestCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{1} + return fileDescriptor_09d156762b8218ef, []int{1} +} +func (m *CertificateSigningRequestCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } +func (m *CertificateSigningRequestCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CertificateSigningRequestCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_CertificateSigningRequestCondition.Merge(m, src) +} +func (m *CertificateSigningRequestCondition) XXX_Size() int { + return m.Size() +} +func (m *CertificateSigningRequestCondition) XXX_DiscardUnknown() { + xxx_messageInfo_CertificateSigningRequestCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_CertificateSigningRequestCondition proto.InternalMessageInfo func (m *CertificateSigningRequestList) Reset() { *m = CertificateSigningRequestList{} } func (*CertificateSigningRequestList) ProtoMessage() {} func (*CertificateSigningRequestList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{2} + return fileDescriptor_09d156762b8218ef, []int{2} +} +func (m *CertificateSigningRequestList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CertificateSigningRequestList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CertificateSigningRequestList) XXX_Merge(src proto.Message) { + xxx_messageInfo_CertificateSigningRequestList.Merge(m, src) +} +func (m *CertificateSigningRequestList) XXX_Size() int { + return m.Size() } +func (m *CertificateSigningRequestList) XXX_DiscardUnknown() { + xxx_messageInfo_CertificateSigningRequestList.DiscardUnknown(m) +} + +var xxx_messageInfo_CertificateSigningRequestList proto.InternalMessageInfo func (m *CertificateSigningRequestSpec) Reset() { *m = CertificateSigningRequestSpec{} } func (*CertificateSigningRequestSpec) ProtoMessage() {} func (*CertificateSigningRequestSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{3} + return fileDescriptor_09d156762b8218ef, []int{3} +} +func (m *CertificateSigningRequestSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CertificateSigningRequestSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CertificateSigningRequestSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_CertificateSigningRequestSpec.Merge(m, src) +} +func (m *CertificateSigningRequestSpec) XXX_Size() int { + return m.Size() } +func (m *CertificateSigningRequestSpec) XXX_DiscardUnknown() { + xxx_messageInfo_CertificateSigningRequestSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_CertificateSigningRequestSpec proto.InternalMessageInfo func (m *CertificateSigningRequestStatus) Reset() { *m = CertificateSigningRequestStatus{} } func (*CertificateSigningRequestStatus) ProtoMessage() {} func (*CertificateSigningRequestStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{4} + return fileDescriptor_09d156762b8218ef, []int{4} +} +func (m *CertificateSigningRequestStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CertificateSigningRequestStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CertificateSigningRequestStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_CertificateSigningRequestStatus.Merge(m, src) +} +func (m *CertificateSigningRequestStatus) XXX_Size() int { + return m.Size() +} +func (m *CertificateSigningRequestStatus) XXX_DiscardUnknown() { + xxx_messageInfo_CertificateSigningRequestStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_CertificateSigningRequestStatus proto.InternalMessageInfo + +func (m *ExtraValue) Reset() { *m = ExtraValue{} } +func (*ExtraValue) ProtoMessage() {} +func (*ExtraValue) Descriptor() ([]byte, []int) { + return fileDescriptor_09d156762b8218ef, []int{5} +} +func (m *ExtraValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExtraValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExtraValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtraValue.Merge(m, src) +} +func (m *ExtraValue) XXX_Size() int { + return m.Size() +} +func (m *ExtraValue) XXX_DiscardUnknown() { + xxx_messageInfo_ExtraValue.DiscardUnknown(m) } -func (m *ExtraValue) Reset() { *m = ExtraValue{} } -func (*ExtraValue) ProtoMessage() {} -func (*ExtraValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_ExtraValue proto.InternalMessageInfo func init() { proto.RegisterType((*CertificateSigningRequest)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequest") proto.RegisterType((*CertificateSigningRequestCondition)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestCondition") proto.RegisterType((*CertificateSigningRequestList)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestList") proto.RegisterType((*CertificateSigningRequestSpec)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestSpec") + proto.RegisterMapType((map[string]ExtraValue)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestSpec.ExtraEntry") proto.RegisterType((*CertificateSigningRequestStatus)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestStatus") proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.certificates.v1beta1.ExtraValue") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/certificates/v1beta1/generated.proto", fileDescriptor_09d156762b8218ef) +} + +var fileDescriptor_09d156762b8218ef = []byte{ + // 805 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0x4b, 0x8f, 0x1b, 0x45, + 0x10, 0xf6, 0xf8, 0xb5, 0x76, 0x7b, 0xd9, 0x44, 0x2d, 0x14, 0x0d, 0x2b, 0x65, 0x66, 0x35, 0x02, + 0xb4, 0x3c, 0xd2, 0xc3, 0x46, 0x08, 0x56, 0x7b, 0x40, 0x30, 0x4b, 0x04, 0x2b, 0x12, 0x21, 0x75, + 0x62, 0x0e, 0x08, 0x89, 0xb4, 0xc7, 0x95, 0x71, 0xc7, 0x99, 0x07, 0xd3, 0x3d, 0x06, 0xdf, 0xf2, + 0x13, 0x38, 0x72, 0x41, 0xe2, 0x97, 0x70, 0x5e, 0x0e, 0x48, 0x39, 0xe6, 0x80, 0x2c, 0xd6, 0xfc, + 0x8b, 0x9c, 0x50, 0xf7, 0xb4, 0x3d, 0xc6, 0x2b, 0xe3, 0x28, 0x7b, 0x9b, 0xfa, 0xaa, 0xbe, 0xaf, + 0x1e, 0x5d, 0x35, 0xe8, 0xcb, 0xf1, 0xb1, 0x20, 0x3c, 0xf5, 0xc7, 0xc5, 0x00, 0xf2, 0x04, 0x24, + 0x08, 0x7f, 0x02, 0xc9, 0x30, 0xcd, 0x7d, 0xe3, 0x60, 0x19, 0xf7, 0x43, 0xc8, 0x25, 0x7f, 0xc4, + 0x43, 0xa6, 0xdd, 0x47, 0x03, 0x90, 0xec, 0xc8, 0x8f, 0x20, 0x81, 0x9c, 0x49, 0x18, 0x92, 0x2c, + 0x4f, 0x65, 0x8a, 0xdd, 0x92, 0x40, 0x58, 0xc6, 0xc9, 0x2a, 0x81, 0x18, 0xc2, 0xfe, 0xad, 0x88, + 0xcb, 0x51, 0x31, 0x20, 0x61, 0x1a, 0xfb, 0x51, 0x1a, 0xa5, 0xbe, 0xe6, 0x0d, 0x8a, 0x47, 0xda, + 0xd2, 0x86, 0xfe, 0x2a, 0xf5, 0xf6, 0x3f, 0xac, 0x0a, 0x88, 0x59, 0x38, 0xe2, 0x09, 0xe4, 0x53, + 0x3f, 0x1b, 0x47, 0x0a, 0x10, 0x7e, 0x0c, 0x92, 0xf9, 0x93, 0x4b, 0x55, 0xec, 0xfb, 0x9b, 0x58, + 0x79, 0x91, 0x48, 0x1e, 0xc3, 0x25, 0xc2, 0x47, 0xdb, 0x08, 0x22, 0x1c, 0x41, 0xcc, 0xd6, 0x79, + 0xde, 0x1f, 0x75, 0xf4, 0xc6, 0x69, 0xd5, 0xe6, 0x7d, 0x1e, 0x25, 0x3c, 0x89, 0x28, 0xfc, 0x50, + 0x80, 0x90, 0xf8, 0x21, 0xea, 0xa8, 0x0a, 0x87, 0x4c, 0x32, 0xdb, 0x3a, 0xb0, 0x0e, 0x7b, 0xb7, + 0x3f, 0x20, 0xd5, 0x7c, 0x96, 0x89, 0x48, 0x36, 0x8e, 0x14, 0x20, 0x88, 0x8a, 0x26, 0x93, 0x23, + 0xf2, 0xf5, 0xe0, 0x31, 0x84, 0xf2, 0x1e, 0x48, 0x16, 0xe0, 0xf3, 0x99, 0x5b, 0x9b, 0xcf, 0x5c, + 0x54, 0x61, 0x74, 0xa9, 0x8a, 0x1f, 0xa2, 0xa6, 0xc8, 0x20, 0xb4, 0xeb, 0x5a, 0xfd, 0x13, 0xb2, + 0x65, 0xfa, 0x64, 0x63, 0xad, 0xf7, 0x33, 0x08, 0x83, 0x5d, 0x93, 0xab, 0xa9, 0x2c, 0xaa, 0x95, + 0xf1, 0x08, 0xb5, 0x85, 0x64, 0xb2, 0x10, 0x76, 0x43, 0xe7, 0xf8, 0xf4, 0x0a, 0x39, 0xb4, 0x4e, + 0xb0, 0x67, 0xb2, 0xb4, 0x4b, 0x9b, 0x1a, 0x7d, 0xef, 0xd7, 0x3a, 0xf2, 0x36, 0x72, 0x4f, 0xd3, + 0x64, 0xc8, 0x25, 0x4f, 0x13, 0x7c, 0x8c, 0x9a, 0x72, 0x9a, 0x81, 0x1e, 0x68, 0x37, 0x78, 0x73, + 0x51, 0xf2, 0x83, 0x69, 0x06, 0x2f, 0x66, 0xee, 0xeb, 0xeb, 0xf1, 0x0a, 0xa7, 0x9a, 0x81, 0xdf, + 0x46, 0xed, 0x1c, 0x98, 0x48, 0x13, 0x3d, 0xae, 0x6e, 0x55, 0x08, 0xd5, 0x28, 0x35, 0x5e, 0xfc, + 0x0e, 0xda, 0x89, 0x41, 0x08, 0x16, 0x81, 0xee, 0xb9, 0x1b, 0x5c, 0x33, 0x81, 0x3b, 0xf7, 0x4a, + 0x98, 0x2e, 0xfc, 0xf8, 0x31, 0xda, 0x7b, 0xc2, 0x84, 0xec, 0x67, 0x43, 0x26, 0xe1, 0x01, 0x8f, + 0xc1, 0x6e, 0xea, 0x29, 0xbd, 0xfb, 0x72, 0xef, 0xac, 0x18, 0xc1, 0x0d, 0xa3, 0xbe, 0x77, 0xf7, + 0x3f, 0x4a, 0x74, 0x4d, 0xd9, 0x9b, 0x59, 0xe8, 0xe6, 0xc6, 0xf9, 0xdc, 0xe5, 0x42, 0xe2, 0xef, + 0x2e, 0xed, 0x1b, 0x79, 0xb9, 0x3a, 0x14, 0x5b, 0x6f, 0xdb, 0x75, 0x53, 0x4b, 0x67, 0x81, 0xac, + 0xec, 0xda, 0xf7, 0xa8, 0xc5, 0x25, 0xc4, 0xc2, 0xae, 0x1f, 0x34, 0x0e, 0x7b, 0xb7, 0x4f, 0x5e, + 0x7d, 0x11, 0x82, 0xd7, 0x4c, 0x9a, 0xd6, 0x99, 0x12, 0xa4, 0xa5, 0xae, 0xf7, 0x7b, 0xe3, 0x7f, + 0x1a, 0x54, 0x2b, 0x89, 0xdf, 0x42, 0x3b, 0x79, 0x69, 0xea, 0xfe, 0x76, 0x83, 0x9e, 0x7a, 0x15, + 0x13, 0x41, 0x17, 0x3e, 0x4c, 0x50, 0xbb, 0x50, 0xcf, 0x23, 0xec, 0xd6, 0x41, 0xe3, 0xb0, 0x1b, + 0xdc, 0x50, 0x8f, 0xdc, 0xd7, 0xc8, 0x8b, 0x99, 0xdb, 0xf9, 0x0a, 0xa6, 0xda, 0xa0, 0x26, 0x0a, + 0xbf, 0x8f, 0x3a, 0x85, 0x80, 0x3c, 0x61, 0x31, 0x98, 0xd5, 0x58, 0xce, 0xa1, 0x6f, 0x70, 0xba, + 0x8c, 0xc0, 0x37, 0x51, 0xa3, 0xe0, 0x43, 0xb3, 0x1a, 0x3d, 0x13, 0xd8, 0xe8, 0x9f, 0x7d, 0x4e, + 0x15, 0x8e, 0x3d, 0xd4, 0x8e, 0xf2, 0xb4, 0xc8, 0x84, 0xdd, 0xd4, 0xc9, 0x91, 0x4a, 0xfe, 0x85, + 0x46, 0xa8, 0xf1, 0xe0, 0x04, 0xb5, 0xe0, 0x27, 0x99, 0x33, 0xbb, 0xad, 0x47, 0x79, 0x76, 0xb5, + 0xbb, 0x25, 0x77, 0x94, 0xd6, 0x9d, 0x44, 0xe6, 0xd3, 0x6a, 0xb2, 0x1a, 0xa3, 0x65, 0x9a, 0x7d, + 0x40, 0xa8, 0x8a, 0xc1, 0xd7, 0x51, 0x63, 0x0c, 0xd3, 0xf2, 0x80, 0xa8, 0xfa, 0xc4, 0x9f, 0xa1, + 0xd6, 0x84, 0x3d, 0x29, 0xc0, 0xfc, 0x47, 0xde, 0xdb, 0x5a, 0x8f, 0x56, 0xfb, 0x46, 0x51, 0x68, + 0xc9, 0x3c, 0xa9, 0x1f, 0x5b, 0xde, 0x9f, 0x16, 0x72, 0xb7, 0x5c, 0x3f, 0xfe, 0x11, 0xa1, 0x70, + 0x71, 0x9b, 0xc2, 0xb6, 0x74, 0xff, 0xa7, 0xaf, 0xde, 0xff, 0xf2, 0xce, 0xab, 0x1f, 0xe5, 0x12, + 0x12, 0x74, 0x25, 0x15, 0x3e, 0x42, 0xbd, 0x15, 0x69, 0xdd, 0xe9, 0x6e, 0x70, 0x6d, 0x3e, 0x73, + 0x7b, 0x2b, 0xe2, 0x74, 0x35, 0xc6, 0xfb, 0xd8, 0x8c, 0x4d, 0x37, 0x8a, 0xdd, 0xc5, 0xfe, 0x5b, + 0xfa, 0x5d, 0xbb, 0xeb, 0xfb, 0x7b, 0xd2, 0xf9, 0xe5, 0x37, 0xb7, 0xf6, 0xf4, 0xaf, 0x83, 0x5a, + 0x70, 0xeb, 0xfc, 0xc2, 0xa9, 0x3d, 0xbb, 0x70, 0x6a, 0xcf, 0x2f, 0x9c, 0xda, 0xd3, 0xb9, 0x63, + 0x9d, 0xcf, 0x1d, 0xeb, 0xd9, 0xdc, 0xb1, 0x9e, 0xcf, 0x1d, 0xeb, 0xef, 0xb9, 0x63, 0xfd, 0xfc, + 0x8f, 0x53, 0xfb, 0x76, 0xc7, 0x74, 0xf7, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x39, 0x0e, 0xb6, + 0xcd, 0x7f, 0x07, 0x00, 0x00, +} + func (m *CertificateSigningRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -113,41 +292,52 @@ func (m *CertificateSigningRequest) Marshal() (dAtA []byte, err error) { } func (m *CertificateSigningRequest) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CertificateSigningRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CertificateSigningRequestCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -155,37 +345,47 @@ func (m *CertificateSigningRequestCondition) Marshal() (dAtA []byte, err error) } func (m *CertificateSigningRequestCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CertificateSigningRequestCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastUpdateTime.Size())) - n4, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 - return i, nil + i-- + dAtA[i] = 0x22 + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x1a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CertificateSigningRequestList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -193,37 +393,46 @@ func (m *CertificateSigningRequestList) Marshal() (dAtA []byte, err error) { } func (m *CertificateSigningRequestList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CertificateSigningRequestList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n5, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CertificateSigningRequestSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -231,92 +440,86 @@ func (m *CertificateSigningRequestSpec) Marshal() (dAtA []byte, err error) { } func (m *CertificateSigningRequestSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CertificateSigningRequestSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Request != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Request))) - i += copy(dAtA[i:], m.Request) - } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Username))) - i += copy(dAtA[i:], m.Username) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - if len(m.Groups) > 0 { - for _, s := range m.Groups { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.Usages) > 0 { - for _, s := range m.Usages { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } if len(m.Extra) > 0 { keysForExtra := make([]string, 0, len(m.Extra)) for k := range m.Extra { keysForExtra = append(keysForExtra, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) - for _, k := range keysForExtra { - dAtA[i] = 0x32 - i++ - v := m.Extra[string(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n6, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(keysForExtra) - 1; iNdEx >= 0; iNdEx-- { + v := m.Extra[string(keysForExtra[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n6 + i-- + dAtA[i] = 0x12 + i -= len(keysForExtra[iNdEx]) + copy(dAtA[i:], keysForExtra[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForExtra[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x32 } } - return i, nil + if len(m.Usages) > 0 { + for iNdEx := len(m.Usages) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Usages[iNdEx]) + copy(dAtA[i:], m.Usages[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Usages[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if len(m.Groups) > 0 { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Groups[iNdEx]) + copy(dAtA[i:], m.Groups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x1a + i -= len(m.Username) + copy(dAtA[i:], m.Username) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Username))) + i-- + dAtA[i] = 0x12 + if m.Request != nil { + i -= len(m.Request) + copy(dAtA[i:], m.Request) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Request))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } func (m *CertificateSigningRequestStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -324,35 +527,43 @@ func (m *CertificateSigningRequestStatus) Marshal() (dAtA []byte, err error) { } func (m *CertificateSigningRequestStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CertificateSigningRequestStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + if m.Certificate != nil { + i -= len(m.Certificate) + copy(dAtA[i:], m.Certificate) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Certificate))) + i-- + dAtA[i] = 0x12 + } if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - if m.Certificate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Certificate))) - i += copy(dAtA[i:], m.Certificate) - } - return i, nil + return len(dAtA) - i, nil } func (m ExtraValue) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -360,38 +571,42 @@ func (m ExtraValue) Marshal() (dAtA []byte, err error) { } func (m ExtraValue) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m ExtraValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m) > 0 { - for _, s := range m { + for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m[iNdEx]) + copy(dAtA[i:], m[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *CertificateSigningRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -404,6 +619,9 @@ func (m *CertificateSigningRequest) Size() (n int) { } func (m *CertificateSigningRequestCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -418,6 +636,9 @@ func (m *CertificateSigningRequestCondition) Size() (n int) { } func (m *CertificateSigningRequestList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -432,6 +653,9 @@ func (m *CertificateSigningRequestList) Size() (n int) { } func (m *CertificateSigningRequestSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Request != nil { @@ -467,6 +691,9 @@ func (m *CertificateSigningRequestSpec) Size() (n int) { } func (m *CertificateSigningRequestStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Conditions) > 0 { @@ -483,6 +710,9 @@ func (m *CertificateSigningRequestStatus) Size() (n int) { } func (m ExtraValue) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m) > 0 { @@ -495,14 +725,7 @@ func (m ExtraValue) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -512,7 +735,7 @@ func (this *CertificateSigningRequest) String() string { return "nil" } s := strings.Join([]string{`&CertificateSigningRequest{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CertificateSigningRequestSpec", "CertificateSigningRequestSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "CertificateSigningRequestStatus", "CertificateSigningRequestStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -527,7 +750,7 @@ func (this *CertificateSigningRequestCondition) String() string { `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `LastUpdateTime:` + strings.Replace(strings.Replace(this.LastUpdateTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -536,9 +759,14 @@ func (this *CertificateSigningRequestList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]CertificateSigningRequest{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CertificateSigningRequest", "CertificateSigningRequest", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&CertificateSigningRequestList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CertificateSigningRequest", "CertificateSigningRequest", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -572,8 +800,13 @@ func (this *CertificateSigningRequestStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]CertificateSigningRequestCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "CertificateSigningRequestCondition", "CertificateSigningRequestCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&CertificateSigningRequestStatus{`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "CertificateSigningRequestCondition", "CertificateSigningRequestCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `Certificate:` + valueToStringGenerated(this.Certificate) + `,`, `}`, }, "") @@ -602,7 +835,7 @@ func (m *CertificateSigningRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -630,7 +863,7 @@ func (m *CertificateSigningRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -639,6 +872,9 @@ func (m *CertificateSigningRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -660,7 +896,7 @@ func (m *CertificateSigningRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -669,6 +905,9 @@ func (m *CertificateSigningRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -690,7 +929,7 @@ func (m *CertificateSigningRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -699,6 +938,9 @@ func (m *CertificateSigningRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -715,6 +957,9 @@ func (m *CertificateSigningRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -742,7 +987,7 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -770,7 +1015,7 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -780,6 +1025,9 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -799,7 +1047,7 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -809,6 +1057,9 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -828,7 +1079,7 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -838,6 +1089,9 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -857,7 +1111,7 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -866,6 +1120,9 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -882,6 +1139,9 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -909,7 +1169,7 @@ func (m *CertificateSigningRequestList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -937,7 +1197,7 @@ func (m *CertificateSigningRequestList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -946,6 +1206,9 @@ func (m *CertificateSigningRequestList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -967,7 +1230,7 @@ func (m *CertificateSigningRequestList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -976,6 +1239,9 @@ func (m *CertificateSigningRequestList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -993,6 +1259,9 @@ func (m *CertificateSigningRequestList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1020,7 +1289,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1048,7 +1317,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1057,6 +1326,9 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1079,7 +1351,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1089,6 +1361,9 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1108,7 +1383,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1118,6 +1393,9 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1137,7 +1415,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1147,6 +1425,9 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1166,7 +1447,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1176,6 +1457,9 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1195,7 +1479,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1204,6 +1488,9 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1224,7 +1511,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1241,7 +1528,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1251,6 +1538,9 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -1267,7 +1557,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1276,7 +1566,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { @@ -1313,6 +1603,9 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1340,7 +1633,7 @@ func (m *CertificateSigningRequestStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1368,7 +1661,7 @@ func (m *CertificateSigningRequestStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1377,6 +1670,9 @@ func (m *CertificateSigningRequestStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1399,7 +1695,7 @@ func (m *CertificateSigningRequestStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1408,6 +1704,9 @@ func (m *CertificateSigningRequestStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1425,6 +1724,9 @@ func (m *CertificateSigningRequestStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1452,7 +1754,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1480,7 +1782,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1490,6 +1792,9 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1504,6 +1809,9 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1570,10 +1878,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -1602,6 +1913,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -1620,62 +1934,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/certificates/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 804 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0x4d, 0x8f, 0xdb, 0x44, - 0x18, 0x8e, 0xf3, 0xb5, 0xc9, 0x64, 0xd9, 0x56, 0x23, 0x54, 0x99, 0x95, 0x6a, 0xaf, 0x2c, 0x40, - 0xcb, 0x47, 0xc7, 0x6c, 0x85, 0x60, 0xb5, 0x07, 0x04, 0x5e, 0x2a, 0x58, 0xd1, 0x0a, 0x69, 0xda, - 0x70, 0x40, 0x48, 0x74, 0xe2, 0xbc, 0x75, 0xa6, 0xa9, 0x3f, 0xf0, 0x8c, 0x03, 0xb9, 0xf5, 0x27, - 0x70, 0xe4, 0x82, 0xc4, 0x2f, 0xe1, 0xbc, 0x1c, 0x90, 0x7a, 0xec, 0x01, 0x45, 0x6c, 0xf8, 0x17, - 0x3d, 0xa1, 0x19, 0x4f, 0xe2, 0x90, 0x55, 0x48, 0xd5, 0xbd, 0x79, 0x9e, 0xf7, 0x79, 0x9e, 0xf7, - 0x63, 0xde, 0x31, 0xfa, 0x72, 0x7c, 0x2c, 0x08, 0x4f, 0xfd, 0x71, 0x31, 0x80, 0x3c, 0x01, 0x09, - 0xc2, 0x9f, 0x40, 0x32, 0x4c, 0x73, 0xdf, 0x04, 0x58, 0xc6, 0xfd, 0x10, 0x72, 0xc9, 0x1f, 0xf1, - 0x90, 0xe9, 0xf0, 0xd1, 0x00, 0x24, 0x3b, 0xf2, 0x23, 0x48, 0x20, 0x67, 0x12, 0x86, 0x24, 0xcb, - 0x53, 0x99, 0x62, 0xb7, 0x14, 0x10, 0x96, 0x71, 0xb2, 0x2a, 0x20, 0x46, 0xb0, 0x7f, 0x2b, 0xe2, - 0x72, 0x54, 0x0c, 0x48, 0x98, 0xc6, 0x7e, 0x94, 0x46, 0xa9, 0xaf, 0x75, 0x83, 0xe2, 0x91, 0x3e, - 0xe9, 0x83, 0xfe, 0x2a, 0xfd, 0xf6, 0x3f, 0xac, 0x0a, 0x88, 0x59, 0x38, 0xe2, 0x09, 0xe4, 0x53, - 0x3f, 0x1b, 0x47, 0x0a, 0x10, 0x7e, 0x0c, 0x92, 0xf9, 0x93, 0x4b, 0x55, 0xec, 0xfb, 0x9b, 0x54, - 0x79, 0x91, 0x48, 0x1e, 0xc3, 0x25, 0xc1, 0x47, 0xdb, 0x04, 0x22, 0x1c, 0x41, 0xcc, 0xd6, 0x75, - 0xde, 0x1f, 0x75, 0xf4, 0xc6, 0x69, 0xd5, 0xe6, 0x7d, 0x1e, 0x25, 0x3c, 0x89, 0x28, 0xfc, 0x50, - 0x80, 0x90, 0xf8, 0x21, 0xea, 0xa8, 0x0a, 0x87, 0x4c, 0x32, 0xdb, 0x3a, 0xb0, 0x0e, 0x7b, 0xb7, - 0x3f, 0x20, 0xd5, 0x7c, 0x96, 0x89, 0x48, 0x36, 0x8e, 0x14, 0x20, 0x88, 0x62, 0x93, 0xc9, 0x11, - 0xf9, 0x7a, 0xf0, 0x18, 0x42, 0x79, 0x0f, 0x24, 0x0b, 0xf0, 0xf9, 0xcc, 0xad, 0xcd, 0x67, 0x2e, - 0xaa, 0x30, 0xba, 0x74, 0xc5, 0x0f, 0x51, 0x53, 0x64, 0x10, 0xda, 0x75, 0xed, 0xfe, 0x09, 0xd9, - 0x32, 0x7d, 0xb2, 0xb1, 0xd6, 0xfb, 0x19, 0x84, 0xc1, 0xae, 0xc9, 0xd5, 0x54, 0x27, 0xaa, 0x9d, - 0xf1, 0x08, 0xb5, 0x85, 0x64, 0xb2, 0x10, 0x76, 0x43, 0xe7, 0xf8, 0xf4, 0x0a, 0x39, 0xb4, 0x4f, - 0xb0, 0x67, 0xb2, 0xb4, 0xcb, 0x33, 0x35, 0xfe, 0xde, 0xaf, 0x75, 0xe4, 0x6d, 0xd4, 0x9e, 0xa6, - 0xc9, 0x90, 0x4b, 0x9e, 0x26, 0xf8, 0x18, 0x35, 0xe5, 0x34, 0x03, 0x3d, 0xd0, 0x6e, 0xf0, 0xe6, - 0xa2, 0xe4, 0x07, 0xd3, 0x0c, 0x5e, 0xcc, 0xdc, 0xd7, 0xd7, 0xf9, 0x0a, 0xa7, 0x5a, 0x81, 0xdf, - 0x46, 0xed, 0x1c, 0x98, 0x48, 0x13, 0x3d, 0xae, 0x6e, 0x55, 0x08, 0xd5, 0x28, 0x35, 0x51, 0xfc, - 0x0e, 0xda, 0x89, 0x41, 0x08, 0x16, 0x81, 0xee, 0xb9, 0x1b, 0x5c, 0x33, 0xc4, 0x9d, 0x7b, 0x25, - 0x4c, 0x17, 0x71, 0xfc, 0x18, 0xed, 0x3d, 0x61, 0x42, 0xf6, 0xb3, 0x21, 0x93, 0xf0, 0x80, 0xc7, - 0x60, 0x37, 0xf5, 0x94, 0xde, 0x7d, 0xb9, 0x7b, 0x56, 0x8a, 0xe0, 0x86, 0x71, 0xdf, 0xbb, 0xfb, - 0x1f, 0x27, 0xba, 0xe6, 0xec, 0xcd, 0x2c, 0x74, 0x73, 0xe3, 0x7c, 0xee, 0x72, 0x21, 0xf1, 0x77, - 0x97, 0xf6, 0x8d, 0xbc, 0x5c, 0x1d, 0x4a, 0xad, 0xb7, 0xed, 0xba, 0xa9, 0xa5, 0xb3, 0x40, 0x56, - 0x76, 0xed, 0x7b, 0xd4, 0xe2, 0x12, 0x62, 0x61, 0xd7, 0x0f, 0x1a, 0x87, 0xbd, 0xdb, 0x27, 0xaf, - 0xbe, 0x08, 0xc1, 0x6b, 0x26, 0x4d, 0xeb, 0x4c, 0x19, 0xd2, 0xd2, 0xd7, 0xfb, 0xbd, 0xf1, 0x3f, - 0x0d, 0xaa, 0x95, 0xc4, 0x6f, 0xa1, 0x9d, 0xbc, 0x3c, 0xea, 0xfe, 0x76, 0x83, 0x9e, 0xba, 0x15, - 0xc3, 0xa0, 0x8b, 0x18, 0x7e, 0x1f, 0x75, 0x0a, 0x01, 0x79, 0xc2, 0x62, 0x30, 0x57, 0xbd, 0xec, - 0xab, 0x6f, 0x70, 0xba, 0x64, 0xe0, 0x9b, 0xa8, 0x51, 0xf0, 0xa1, 0xb9, 0xea, 0x9e, 0x21, 0x36, - 0xfa, 0x67, 0x9f, 0x53, 0x85, 0x63, 0x0f, 0xb5, 0xa3, 0x3c, 0x2d, 0x32, 0x61, 0x37, 0x0f, 0x1a, - 0x87, 0xdd, 0x00, 0xa9, 0x8d, 0xf9, 0x42, 0x23, 0xd4, 0x44, 0x30, 0x41, 0xed, 0x42, 0xed, 0x83, - 0xb0, 0x5b, 0x9a, 0x73, 0x43, 0x71, 0xfa, 0x1a, 0x79, 0x31, 0x73, 0x3b, 0x5f, 0xc1, 0x54, 0x1f, - 0xa8, 0x61, 0xe1, 0x04, 0xb5, 0xe0, 0x27, 0x99, 0x33, 0xbb, 0xad, 0x47, 0x79, 0x76, 0xb5, 0x77, - 0x4b, 0xee, 0x28, 0xaf, 0x3b, 0x89, 0xcc, 0xa7, 0xd5, 0x64, 0x35, 0x46, 0xcb, 0x34, 0xfb, 0x80, - 0x50, 0xc5, 0xc1, 0xd7, 0x51, 0x63, 0x0c, 0xd3, 0xf2, 0x01, 0x51, 0xf5, 0x89, 0x3f, 0x43, 0xad, - 0x09, 0x7b, 0x52, 0x80, 0xf9, 0x8f, 0xbc, 0xb7, 0xb5, 0x1e, 0xed, 0xf6, 0x8d, 0x92, 0xd0, 0x52, - 0x79, 0x52, 0x3f, 0xb6, 0xbc, 0x3f, 0x2d, 0xe4, 0x6e, 0x79, 0xfd, 0xf8, 0x47, 0x84, 0xc2, 0xc5, - 0xdb, 0x14, 0xb6, 0xa5, 0xfb, 0x3f, 0x7d, 0xf5, 0xfe, 0x97, 0xef, 0xbc, 0xfa, 0x51, 0x2e, 0x21, - 0x41, 0x57, 0x52, 0xe1, 0x23, 0xd4, 0x5b, 0xb1, 0xd6, 0x9d, 0xee, 0x06, 0xd7, 0xe6, 0x33, 0xb7, - 0xb7, 0x62, 0x4e, 0x57, 0x39, 0xde, 0xc7, 0x66, 0x6c, 0xba, 0x51, 0xec, 0x2e, 0xf6, 0xdf, 0xd2, - 0x77, 0xdc, 0x5d, 0xdf, 0xdf, 0x93, 0xce, 0x2f, 0xbf, 0xb9, 0xb5, 0xa7, 0x7f, 0x1d, 0xd4, 0x82, - 0x5b, 0xe7, 0x17, 0x4e, 0xed, 0xd9, 0x85, 0x53, 0x7b, 0x7e, 0xe1, 0xd4, 0x9e, 0xce, 0x1d, 0xeb, - 0x7c, 0xee, 0x58, 0xcf, 0xe6, 0x8e, 0xf5, 0x7c, 0xee, 0x58, 0x7f, 0xcf, 0x1d, 0xeb, 0xe7, 0x7f, - 0x9c, 0xda, 0xb7, 0x3b, 0xa6, 0xbb, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xb7, 0x6b, 0x5b, 0xf9, - 0x7f, 0x07, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/coordination/v1/generated.pb.go b/vendor/k8s.io/api/coordination/v1/generated.pb.go index 1a87eaff7fe25..7e78be191c416 100644 --- a/vendor/k8s.io/api/coordination/v1/generated.pb.go +++ b/vendor/k8s.io/api/coordination/v1/generated.pb.go @@ -17,33 +17,20 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1/generated.proto -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1/generated.proto - - It has these top-level messages: - Lease - LeaseList - LeaseSpec -*/ package v1 import ( fmt "fmt" + io "io" + proto "github.com/gogo/protobuf/proto" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" math "math" - - k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - strings "strings" - + math_bits "math/bits" reflect "reflect" - - io "io" + strings "strings" ) // Reference imports to suppress errors if they are not otherwise used. @@ -57,27 +44,142 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *Lease) Reset() { *m = Lease{} } -func (*Lease) ProtoMessage() {} -func (*Lease) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *Lease) Reset() { *m = Lease{} } +func (*Lease) ProtoMessage() {} +func (*Lease) Descriptor() ([]byte, []int) { + return fileDescriptor_929e1148ad9baca3, []int{0} +} +func (m *Lease) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Lease) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Lease) XXX_Merge(src proto.Message) { + xxx_messageInfo_Lease.Merge(m, src) +} +func (m *Lease) XXX_Size() int { + return m.Size() +} +func (m *Lease) XXX_DiscardUnknown() { + xxx_messageInfo_Lease.DiscardUnknown(m) +} + +var xxx_messageInfo_Lease proto.InternalMessageInfo + +func (m *LeaseList) Reset() { *m = LeaseList{} } +func (*LeaseList) ProtoMessage() {} +func (*LeaseList) Descriptor() ([]byte, []int) { + return fileDescriptor_929e1148ad9baca3, []int{1} +} +func (m *LeaseList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeaseList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LeaseList) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaseList.Merge(m, src) +} +func (m *LeaseList) XXX_Size() int { + return m.Size() +} +func (m *LeaseList) XXX_DiscardUnknown() { + xxx_messageInfo_LeaseList.DiscardUnknown(m) +} + +var xxx_messageInfo_LeaseList proto.InternalMessageInfo -func (m *LeaseList) Reset() { *m = LeaseList{} } -func (*LeaseList) ProtoMessage() {} -func (*LeaseList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (m *LeaseSpec) Reset() { *m = LeaseSpec{} } +func (*LeaseSpec) ProtoMessage() {} +func (*LeaseSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_929e1148ad9baca3, []int{2} +} +func (m *LeaseSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeaseSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LeaseSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaseSpec.Merge(m, src) +} +func (m *LeaseSpec) XXX_Size() int { + return m.Size() +} +func (m *LeaseSpec) XXX_DiscardUnknown() { + xxx_messageInfo_LeaseSpec.DiscardUnknown(m) +} -func (m *LeaseSpec) Reset() { *m = LeaseSpec{} } -func (*LeaseSpec) ProtoMessage() {} -func (*LeaseSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_LeaseSpec proto.InternalMessageInfo func init() { proto.RegisterType((*Lease)(nil), "k8s.io.api.coordination.v1.Lease") proto.RegisterType((*LeaseList)(nil), "k8s.io.api.coordination.v1.LeaseList") proto.RegisterType((*LeaseSpec)(nil), "k8s.io.api.coordination.v1.LeaseSpec") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1/generated.proto", fileDescriptor_929e1148ad9baca3) +} + +var fileDescriptor_929e1148ad9baca3 = []byte{ + // 535 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x90, 0xc1, 0x6e, 0xd3, 0x40, + 0x10, 0x86, 0xe3, 0x36, 0x91, 0x9a, 0x0d, 0x2d, 0x91, 0x95, 0x83, 0x95, 0x83, 0x5d, 0x22, 0x21, + 0xe5, 0xc2, 0x2e, 0xa9, 0x10, 0x42, 0x9c, 0xc0, 0x20, 0xa0, 0x52, 0x2a, 0x24, 0xb7, 0x27, 0xd4, + 0x03, 0x1b, 0x7b, 0x70, 0x96, 0xd4, 0x5e, 0xb3, 0xbb, 0x0e, 0xea, 0x8d, 0x47, 0xe0, 0xca, 0x63, + 0xc0, 0x53, 0xe4, 0xd8, 0x63, 0x4f, 0x16, 0x31, 0x2f, 0x82, 0x76, 0x93, 0x36, 0x21, 0x49, 0xd5, + 0x8a, 0xdb, 0xee, 0xcc, 0xfc, 0xdf, 0xfc, 0xf3, 0xa3, 0x57, 0xa3, 0x67, 0x12, 0x33, 0x4e, 0x46, + 0xf9, 0x00, 0x44, 0x0a, 0x0a, 0x24, 0x19, 0x43, 0x1a, 0x71, 0x41, 0xe6, 0x0d, 0x9a, 0x31, 0x12, + 0x72, 0x2e, 0x22, 0x96, 0x52, 0xc5, 0x78, 0x4a, 0xc6, 0x3d, 0x12, 0x43, 0x0a, 0x82, 0x2a, 0x88, + 0x70, 0x26, 0xb8, 0xe2, 0x76, 0x7b, 0x36, 0x8b, 0x69, 0xc6, 0xf0, 0xf2, 0x2c, 0x1e, 0xf7, 0xda, + 0x8f, 0x62, 0xa6, 0x86, 0xf9, 0x00, 0x87, 0x3c, 0x21, 0x31, 0x8f, 0x39, 0x31, 0x92, 0x41, 0xfe, + 0xc9, 0xfc, 0xcc, 0xc7, 0xbc, 0x66, 0xa8, 0xf6, 0x93, 0xc5, 0xda, 0x84, 0x86, 0x43, 0x96, 0x82, + 0x38, 0x27, 0xd9, 0x28, 0xd6, 0x05, 0x49, 0x12, 0x50, 0x74, 0x83, 0x81, 0x36, 0xb9, 0x49, 0x25, + 0xf2, 0x54, 0xb1, 0x04, 0xd6, 0x04, 0x4f, 0x6f, 0x13, 0xc8, 0x70, 0x08, 0x09, 0x5d, 0xd5, 0x75, + 0x7e, 0x59, 0xa8, 0xd6, 0x07, 0x2a, 0xc1, 0xfe, 0x88, 0x76, 0xb4, 0x9b, 0x88, 0x2a, 0xea, 0x58, + 0xfb, 0x56, 0xb7, 0x71, 0xf0, 0x18, 0x2f, 0x62, 0xb8, 0x86, 0xe2, 0x6c, 0x14, 0xeb, 0x82, 0xc4, + 0x7a, 0x1a, 0x8f, 0x7b, 0xf8, 0xfd, 0xe0, 0x33, 0x84, 0xea, 0x08, 0x14, 0xf5, 0xed, 0x49, 0xe1, + 0x55, 0xca, 0xc2, 0x43, 0x8b, 0x5a, 0x70, 0x4d, 0xb5, 0xdf, 0xa2, 0xaa, 0xcc, 0x20, 0x74, 0xb6, + 0x0c, 0xfd, 0x21, 0xbe, 0x39, 0x64, 0x6c, 0x2c, 0x1d, 0x67, 0x10, 0xfa, 0xf7, 0xe6, 0xc8, 0xaa, + 0xfe, 0x05, 0x06, 0xd0, 0xf9, 0x69, 0xa1, 0xba, 0x99, 0xe8, 0x33, 0xa9, 0xec, 0xd3, 0x35, 0xe3, + 0xf8, 0x6e, 0xc6, 0xb5, 0xda, 0xd8, 0x6e, 0xce, 0x77, 0xec, 0x5c, 0x55, 0x96, 0x4c, 0xbf, 0x41, + 0x35, 0xa6, 0x20, 0x91, 0xce, 0xd6, 0xfe, 0x76, 0xb7, 0x71, 0xf0, 0xe0, 0x56, 0xd7, 0xfe, 0xee, + 0x9c, 0x56, 0x3b, 0xd4, 0xba, 0x60, 0x26, 0xef, 0xfc, 0xd8, 0x9e, 0x7b, 0xd6, 0x77, 0xd8, 0xcf, + 0xd1, 0xde, 0x90, 0x9f, 0x45, 0x20, 0x0e, 0x23, 0x48, 0x15, 0x53, 0xe7, 0xc6, 0x79, 0xdd, 0xb7, + 0xcb, 0xc2, 0xdb, 0x7b, 0xf7, 0x4f, 0x27, 0x58, 0x99, 0xb4, 0xfb, 0xa8, 0x75, 0xa6, 0x41, 0xaf, + 0x73, 0x61, 0x36, 0x1f, 0x43, 0xc8, 0xd3, 0x48, 0x9a, 0x58, 0x6b, 0xbe, 0x53, 0x16, 0x5e, 0xab, + 0xbf, 0xa1, 0x1f, 0x6c, 0x54, 0xd9, 0x03, 0xd4, 0xa0, 0xe1, 0x97, 0x9c, 0x09, 0x38, 0x61, 0x09, + 0x38, 0xdb, 0x26, 0x40, 0x72, 0xb7, 0x00, 0x8f, 0x58, 0x28, 0xb8, 0x96, 0xf9, 0xf7, 0xcb, 0xc2, + 0x6b, 0xbc, 0x5c, 0x70, 0x82, 0x65, 0xa8, 0x7d, 0x8a, 0xea, 0x02, 0x52, 0xf8, 0x6a, 0x36, 0x54, + 0xff, 0x6f, 0xc3, 0x6e, 0x59, 0x78, 0xf5, 0xe0, 0x8a, 0x12, 0x2c, 0x80, 0xf6, 0x0b, 0xd4, 0x34, + 0x97, 0x9d, 0x08, 0x9a, 0x4a, 0xa6, 0x6f, 0x93, 0x4e, 0xcd, 0x64, 0xd1, 0x2a, 0x0b, 0xaf, 0xd9, + 0x5f, 0xe9, 0x05, 0x6b, 0xd3, 0x7e, 0x77, 0x32, 0x75, 0x2b, 0x17, 0x53, 0xb7, 0x72, 0x39, 0x75, + 0x2b, 0xdf, 0x4a, 0xd7, 0x9a, 0x94, 0xae, 0x75, 0x51, 0xba, 0xd6, 0x65, 0xe9, 0x5a, 0xbf, 0x4b, + 0xd7, 0xfa, 0xfe, 0xc7, 0xad, 0x7c, 0xd8, 0x1a, 0xf7, 0xfe, 0x06, 0x00, 0x00, 0xff, 0xff, 0x41, + 0x5e, 0x94, 0x96, 0x5e, 0x04, 0x00, 0x00, +} + func (m *Lease) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -85,33 +187,42 @@ func (m *Lease) Marshal() (dAtA []byte, err error) { } func (m *Lease) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Lease) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *LeaseList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -119,37 +230,46 @@ func (m *LeaseList) Marshal() (dAtA []byte, err error) { } func (m *LeaseList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeaseList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n3, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *LeaseSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -157,59 +277,74 @@ func (m *LeaseSpec) Marshal() (dAtA []byte, err error) { } func (m *LeaseSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeaseSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.HolderIdentity != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.HolderIdentity))) - i += copy(dAtA[i:], *m.HolderIdentity) + if m.LeaseTransitions != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseTransitions)) + i-- + dAtA[i] = 0x28 } - if m.LeaseDurationSeconds != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseDurationSeconds)) + if m.RenewTime != nil { + { + size, err := m.RenewTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 } if m.AcquireTime != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AcquireTime.Size())) - n4, err := m.AcquireTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.AcquireTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 + i-- + dAtA[i] = 0x1a } - if m.RenewTime != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RenewTime.Size())) - n5, err := m.RenewTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 + if m.LeaseDurationSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseDurationSeconds)) + i-- + dAtA[i] = 0x10 } - if m.LeaseTransitions != nil { - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseTransitions)) + if m.HolderIdentity != nil { + i -= len(*m.HolderIdentity) + copy(dAtA[i:], *m.HolderIdentity) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.HolderIdentity))) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *Lease) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -220,6 +355,9 @@ func (m *Lease) Size() (n int) { } func (m *LeaseList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -234,6 +372,9 @@ func (m *LeaseList) Size() (n int) { } func (m *LeaseSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.HolderIdentity != nil { @@ -258,14 +399,7 @@ func (m *LeaseSpec) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -275,7 +409,7 @@ func (this *Lease) String() string { return "nil" } s := strings.Join([]string{`&Lease{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "LeaseSpec", "LeaseSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -285,9 +419,14 @@ func (this *LeaseList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]Lease{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Lease", "Lease", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&LeaseList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Lease", "Lease", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -299,8 +438,8 @@ func (this *LeaseSpec) String() string { s := strings.Join([]string{`&LeaseSpec{`, `HolderIdentity:` + valueToStringGenerated(this.HolderIdentity) + `,`, `LeaseDurationSeconds:` + valueToStringGenerated(this.LeaseDurationSeconds) + `,`, - `AcquireTime:` + strings.Replace(fmt.Sprintf("%v", this.AcquireTime), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1) + `,`, - `RenewTime:` + strings.Replace(fmt.Sprintf("%v", this.RenewTime), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1) + `,`, + `AcquireTime:` + strings.Replace(fmt.Sprintf("%v", this.AcquireTime), "MicroTime", "v1.MicroTime", 1) + `,`, + `RenewTime:` + strings.Replace(fmt.Sprintf("%v", this.RenewTime), "MicroTime", "v1.MicroTime", 1) + `,`, `LeaseTransitions:` + valueToStringGenerated(this.LeaseTransitions) + `,`, `}`, }, "") @@ -329,7 +468,7 @@ func (m *Lease) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -357,7 +496,7 @@ func (m *Lease) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -366,6 +505,9 @@ func (m *Lease) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -387,7 +529,7 @@ func (m *Lease) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -396,6 +538,9 @@ func (m *Lease) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -412,6 +557,9 @@ func (m *Lease) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -439,7 +587,7 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -467,7 +615,7 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -476,6 +624,9 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -497,7 +648,7 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -506,6 +657,9 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -523,6 +677,9 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -550,7 +707,7 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -578,7 +735,7 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -588,6 +745,9 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -608,7 +768,7 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -628,7 +788,7 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -637,11 +797,14 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.AcquireTime == nil { - m.AcquireTime = &k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime{} + m.AcquireTime = &v1.MicroTime{} } if err := m.AcquireTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -661,7 +824,7 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -670,11 +833,14 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.RenewTime == nil { - m.RenewTime = &k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime{} + m.RenewTime = &v1.MicroTime{} } if err := m.RenewTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -694,7 +860,7 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -709,6 +875,9 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -775,10 +944,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -807,6 +979,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -825,45 +1000,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 535 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x90, 0xc1, 0x6e, 0xd3, 0x40, - 0x10, 0x86, 0xe3, 0x36, 0x91, 0x9a, 0x0d, 0x2d, 0x91, 0x95, 0x83, 0x95, 0x83, 0x5d, 0x22, 0x21, - 0xe5, 0xc2, 0x2e, 0xa9, 0x10, 0x42, 0x9c, 0xc0, 0x20, 0xa0, 0x52, 0x2a, 0x24, 0xb7, 0x27, 0xd4, - 0x03, 0x1b, 0x7b, 0x70, 0x96, 0xd4, 0x5e, 0xb3, 0xbb, 0x0e, 0xea, 0x8d, 0x47, 0xe0, 0xca, 0x63, - 0xc0, 0x53, 0xe4, 0xd8, 0x63, 0x4f, 0x16, 0x31, 0x2f, 0x82, 0x76, 0x93, 0x36, 0x21, 0x49, 0xd5, - 0x8a, 0xdb, 0xee, 0xcc, 0xfc, 0xdf, 0xfc, 0xf3, 0xa3, 0x57, 0xa3, 0x67, 0x12, 0x33, 0x4e, 0x46, - 0xf9, 0x00, 0x44, 0x0a, 0x0a, 0x24, 0x19, 0x43, 0x1a, 0x71, 0x41, 0xe6, 0x0d, 0x9a, 0x31, 0x12, - 0x72, 0x2e, 0x22, 0x96, 0x52, 0xc5, 0x78, 0x4a, 0xc6, 0x3d, 0x12, 0x43, 0x0a, 0x82, 0x2a, 0x88, - 0x70, 0x26, 0xb8, 0xe2, 0x76, 0x7b, 0x36, 0x8b, 0x69, 0xc6, 0xf0, 0xf2, 0x2c, 0x1e, 0xf7, 0xda, - 0x8f, 0x62, 0xa6, 0x86, 0xf9, 0x00, 0x87, 0x3c, 0x21, 0x31, 0x8f, 0x39, 0x31, 0x92, 0x41, 0xfe, - 0xc9, 0xfc, 0xcc, 0xc7, 0xbc, 0x66, 0xa8, 0xf6, 0x93, 0xc5, 0xda, 0x84, 0x86, 0x43, 0x96, 0x82, - 0x38, 0x27, 0xd9, 0x28, 0xd6, 0x05, 0x49, 0x12, 0x50, 0x74, 0x83, 0x81, 0x36, 0xb9, 0x49, 0x25, - 0xf2, 0x54, 0xb1, 0x04, 0xd6, 0x04, 0x4f, 0x6f, 0x13, 0xc8, 0x70, 0x08, 0x09, 0x5d, 0xd5, 0x75, - 0x7e, 0x59, 0xa8, 0xd6, 0x07, 0x2a, 0xc1, 0xfe, 0x88, 0x76, 0xb4, 0x9b, 0x88, 0x2a, 0xea, 0x58, - 0xfb, 0x56, 0xb7, 0x71, 0xf0, 0x18, 0x2f, 0x62, 0xb8, 0x86, 0xe2, 0x6c, 0x14, 0xeb, 0x82, 0xc4, - 0x7a, 0x1a, 0x8f, 0x7b, 0xf8, 0xfd, 0xe0, 0x33, 0x84, 0xea, 0x08, 0x14, 0xf5, 0xed, 0x49, 0xe1, - 0x55, 0xca, 0xc2, 0x43, 0x8b, 0x5a, 0x70, 0x4d, 0xb5, 0xdf, 0xa2, 0xaa, 0xcc, 0x20, 0x74, 0xb6, - 0x0c, 0xfd, 0x21, 0xbe, 0x39, 0x64, 0x6c, 0x2c, 0x1d, 0x67, 0x10, 0xfa, 0xf7, 0xe6, 0xc8, 0xaa, - 0xfe, 0x05, 0x06, 0xd0, 0xf9, 0x69, 0xa1, 0xba, 0x99, 0xe8, 0x33, 0xa9, 0xec, 0xd3, 0x35, 0xe3, - 0xf8, 0x6e, 0xc6, 0xb5, 0xda, 0xd8, 0x6e, 0xce, 0x77, 0xec, 0x5c, 0x55, 0x96, 0x4c, 0xbf, 0x41, - 0x35, 0xa6, 0x20, 0x91, 0xce, 0xd6, 0xfe, 0x76, 0xb7, 0x71, 0xf0, 0xe0, 0x56, 0xd7, 0xfe, 0xee, - 0x9c, 0x56, 0x3b, 0xd4, 0xba, 0x60, 0x26, 0xef, 0xfc, 0xd8, 0x9e, 0x7b, 0xd6, 0x77, 0xd8, 0xcf, - 0xd1, 0xde, 0x90, 0x9f, 0x45, 0x20, 0x0e, 0x23, 0x48, 0x15, 0x53, 0xe7, 0xc6, 0x79, 0xdd, 0xb7, - 0xcb, 0xc2, 0xdb, 0x7b, 0xf7, 0x4f, 0x27, 0x58, 0x99, 0xb4, 0xfb, 0xa8, 0x75, 0xa6, 0x41, 0xaf, - 0x73, 0x61, 0x36, 0x1f, 0x43, 0xc8, 0xd3, 0x48, 0x9a, 0x58, 0x6b, 0xbe, 0x53, 0x16, 0x5e, 0xab, - 0xbf, 0xa1, 0x1f, 0x6c, 0x54, 0xd9, 0x03, 0xd4, 0xa0, 0xe1, 0x97, 0x9c, 0x09, 0x38, 0x61, 0x09, - 0x38, 0xdb, 0x26, 0x40, 0x72, 0xb7, 0x00, 0x8f, 0x58, 0x28, 0xb8, 0x96, 0xf9, 0xf7, 0xcb, 0xc2, - 0x6b, 0xbc, 0x5c, 0x70, 0x82, 0x65, 0xa8, 0x7d, 0x8a, 0xea, 0x02, 0x52, 0xf8, 0x6a, 0x36, 0x54, - 0xff, 0x6f, 0xc3, 0x6e, 0x59, 0x78, 0xf5, 0xe0, 0x8a, 0x12, 0x2c, 0x80, 0xf6, 0x0b, 0xd4, 0x34, - 0x97, 0x9d, 0x08, 0x9a, 0x4a, 0xa6, 0x6f, 0x93, 0x4e, 0xcd, 0x64, 0xd1, 0x2a, 0x0b, 0xaf, 0xd9, - 0x5f, 0xe9, 0x05, 0x6b, 0xd3, 0x7e, 0x77, 0x32, 0x75, 0x2b, 0x17, 0x53, 0xb7, 0x72, 0x39, 0x75, - 0x2b, 0xdf, 0x4a, 0xd7, 0x9a, 0x94, 0xae, 0x75, 0x51, 0xba, 0xd6, 0x65, 0xe9, 0x5a, 0xbf, 0x4b, - 0xd7, 0xfa, 0xfe, 0xc7, 0xad, 0x7c, 0xd8, 0x1a, 0xf7, 0xfe, 0x06, 0x00, 0x00, 0xff, 0xff, 0x41, - 0x5e, 0x94, 0x96, 0x5e, 0x04, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/coordination/v1/generated.proto b/vendor/k8s.io/api/coordination/v1/generated.proto index 99692e958d6fc..4206746d8323c 100644 --- a/vendor/k8s.io/api/coordination/v1/generated.proto +++ b/vendor/k8s.io/api/coordination/v1/generated.proto @@ -30,12 +30,12 @@ option go_package = "v1"; // Lease defines a lease concept. message Lease { - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the Lease. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional LeaseSpec spec = 2; } @@ -43,7 +43,7 @@ message Lease { // LeaseList is a list of Lease objects. message LeaseList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; diff --git a/vendor/k8s.io/api/coordination/v1/types.go b/vendor/k8s.io/api/coordination/v1/types.go index 8f9f24d04ebfb..7a5605ace171d 100644 --- a/vendor/k8s.io/api/coordination/v1/types.go +++ b/vendor/k8s.io/api/coordination/v1/types.go @@ -26,12 +26,12 @@ import ( // Lease defines a lease concept. type Lease struct { metav1.TypeMeta `json:",inline"` - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the Lease. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec LeaseSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` } @@ -65,7 +65,7 @@ type LeaseSpec struct { type LeaseList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go index bd02ad5daa64e..0f14404308c72 100644 --- a/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go @@ -29,8 +29,8 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Lease = map[string]string{ "": "Lease defines a lease concept.", - "metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Lease) SwaggerDoc() map[string]string { @@ -39,7 +39,7 @@ func (Lease) SwaggerDoc() map[string]string { var map_LeaseList = map[string]string{ "": "LeaseList is a list of Lease objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is a list of schema objects.", } diff --git a/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go b/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go index 62493ae17eda9..2463d6258e469 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go @@ -17,33 +17,20 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1beta1/generated.proto -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1beta1/generated.proto - - It has these top-level messages: - Lease - LeaseList - LeaseSpec -*/ package v1beta1 import ( fmt "fmt" + io "io" + proto "github.com/gogo/protobuf/proto" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" math "math" - - k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - strings "strings" - + math_bits "math/bits" reflect "reflect" - - io "io" + strings "strings" ) // Reference imports to suppress errors if they are not otherwise used. @@ -57,27 +44,142 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *Lease) Reset() { *m = Lease{} } -func (*Lease) ProtoMessage() {} -func (*Lease) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *Lease) Reset() { *m = Lease{} } +func (*Lease) ProtoMessage() {} +func (*Lease) Descriptor() ([]byte, []int) { + return fileDescriptor_daca6bcd2ff63a80, []int{0} +} +func (m *Lease) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Lease) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Lease) XXX_Merge(src proto.Message) { + xxx_messageInfo_Lease.Merge(m, src) +} +func (m *Lease) XXX_Size() int { + return m.Size() +} +func (m *Lease) XXX_DiscardUnknown() { + xxx_messageInfo_Lease.DiscardUnknown(m) +} + +var xxx_messageInfo_Lease proto.InternalMessageInfo + +func (m *LeaseList) Reset() { *m = LeaseList{} } +func (*LeaseList) ProtoMessage() {} +func (*LeaseList) Descriptor() ([]byte, []int) { + return fileDescriptor_daca6bcd2ff63a80, []int{1} +} +func (m *LeaseList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeaseList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LeaseList) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaseList.Merge(m, src) +} +func (m *LeaseList) XXX_Size() int { + return m.Size() +} +func (m *LeaseList) XXX_DiscardUnknown() { + xxx_messageInfo_LeaseList.DiscardUnknown(m) +} + +var xxx_messageInfo_LeaseList proto.InternalMessageInfo -func (m *LeaseList) Reset() { *m = LeaseList{} } -func (*LeaseList) ProtoMessage() {} -func (*LeaseList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (m *LeaseSpec) Reset() { *m = LeaseSpec{} } +func (*LeaseSpec) ProtoMessage() {} +func (*LeaseSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_daca6bcd2ff63a80, []int{2} +} +func (m *LeaseSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeaseSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LeaseSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaseSpec.Merge(m, src) +} +func (m *LeaseSpec) XXX_Size() int { + return m.Size() +} +func (m *LeaseSpec) XXX_DiscardUnknown() { + xxx_messageInfo_LeaseSpec.DiscardUnknown(m) +} -func (m *LeaseSpec) Reset() { *m = LeaseSpec{} } -func (*LeaseSpec) ProtoMessage() {} -func (*LeaseSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_LeaseSpec proto.InternalMessageInfo func init() { proto.RegisterType((*Lease)(nil), "k8s.io.api.coordination.v1beta1.Lease") proto.RegisterType((*LeaseList)(nil), "k8s.io.api.coordination.v1beta1.LeaseList") proto.RegisterType((*LeaseSpec)(nil), "k8s.io.api.coordination.v1beta1.LeaseSpec") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1beta1/generated.proto", fileDescriptor_daca6bcd2ff63a80) +} + +var fileDescriptor_daca6bcd2ff63a80 = []byte{ + // 540 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x91, 0xc1, 0x6e, 0xd3, 0x40, + 0x10, 0x86, 0xe3, 0xb6, 0x11, 0xcd, 0x86, 0x96, 0xc8, 0xca, 0xc1, 0xca, 0xc1, 0xae, 0x72, 0x40, + 0x15, 0x52, 0x77, 0x49, 0x85, 0x10, 0xe2, 0x04, 0x16, 0x87, 0x56, 0xb8, 0x42, 0x72, 0x7b, 0x42, + 0x3d, 0xb0, 0xb6, 0x07, 0x67, 0x49, 0xed, 0x35, 0xbb, 0xeb, 0xa0, 0xde, 0x78, 0x04, 0xae, 0xbc, + 0x08, 0xbc, 0x42, 0x8e, 0x3d, 0xf6, 0x64, 0x11, 0xf3, 0x22, 0xc8, 0x1b, 0xb7, 0x09, 0x49, 0x51, + 0x23, 0x6e, 0xde, 0x99, 0xf9, 0xbf, 0xf9, 0xe7, 0x37, 0x3a, 0x1a, 0xbd, 0x90, 0x98, 0x71, 0x32, + 0xca, 0x03, 0x10, 0x29, 0x28, 0x90, 0x64, 0x0c, 0x69, 0xc4, 0x05, 0xa9, 0x1b, 0x34, 0x63, 0x24, + 0xe4, 0x5c, 0x44, 0x2c, 0xa5, 0x8a, 0xf1, 0x94, 0x8c, 0x07, 0x01, 0x28, 0x3a, 0x20, 0x31, 0xa4, + 0x20, 0xa8, 0x82, 0x08, 0x67, 0x82, 0x2b, 0x6e, 0x3a, 0x33, 0x01, 0xa6, 0x19, 0xc3, 0x8b, 0x02, + 0x5c, 0x0b, 0x7a, 0x07, 0x31, 0x53, 0xc3, 0x3c, 0xc0, 0x21, 0x4f, 0x48, 0xcc, 0x63, 0x4e, 0xb4, + 0x2e, 0xc8, 0x3f, 0xea, 0x97, 0x7e, 0xe8, 0xaf, 0x19, 0xaf, 0xf7, 0x6c, 0x6e, 0x20, 0xa1, 0xe1, + 0x90, 0xa5, 0x20, 0x2e, 0x49, 0x36, 0x8a, 0xab, 0x82, 0x24, 0x09, 0x28, 0x4a, 0xc6, 0x2b, 0x2e, + 0x7a, 0xe4, 0x5f, 0x2a, 0x91, 0xa7, 0x8a, 0x25, 0xb0, 0x22, 0x78, 0x7e, 0x9f, 0x40, 0x86, 0x43, + 0x48, 0xe8, 0xb2, 0xae, 0xff, 0xd3, 0x40, 0x4d, 0x0f, 0xa8, 0x04, 0xf3, 0x03, 0xda, 0xae, 0xdc, + 0x44, 0x54, 0x51, 0xcb, 0xd8, 0x33, 0xf6, 0xdb, 0x87, 0x4f, 0xf1, 0x3c, 0x8b, 0x5b, 0x28, 0xce, + 0x46, 0x71, 0x55, 0x90, 0xb8, 0x9a, 0xc6, 0xe3, 0x01, 0x7e, 0x17, 0x7c, 0x82, 0x50, 0x9d, 0x80, + 0xa2, 0xae, 0x39, 0x29, 0x9c, 0x46, 0x59, 0x38, 0x68, 0x5e, 0xf3, 0x6f, 0xa9, 0xa6, 0x87, 0xb6, + 0x64, 0x06, 0xa1, 0xb5, 0xa1, 0xe9, 0x4f, 0xf0, 0x3d, 0x49, 0x63, 0xed, 0xeb, 0x34, 0x83, 0xd0, + 0x7d, 0x58, 0x73, 0xb7, 0xaa, 0x97, 0xaf, 0x29, 0xfd, 0x1f, 0x06, 0x6a, 0xe9, 0x09, 0x8f, 0x49, + 0x65, 0x9e, 0xaf, 0xb8, 0xc7, 0xeb, 0xb9, 0xaf, 0xd4, 0xda, 0x7b, 0xa7, 0xde, 0xb1, 0x7d, 0x53, + 0x59, 0x70, 0xfe, 0x16, 0x35, 0x99, 0x82, 0x44, 0x5a, 0x1b, 0x7b, 0x9b, 0xfb, 0xed, 0xc3, 0xc7, + 0xeb, 0x59, 0x77, 0x77, 0x6a, 0x64, 0xf3, 0xb8, 0x12, 0xfb, 0x33, 0x46, 0xff, 0xfb, 0x66, 0x6d, + 0xbc, 0x3a, 0xc6, 0x7c, 0x89, 0x76, 0x87, 0xfc, 0x22, 0x02, 0x71, 0x1c, 0x41, 0xaa, 0x98, 0xba, + 0xd4, 0xf6, 0x5b, 0xae, 0x59, 0x16, 0xce, 0xee, 0xd1, 0x5f, 0x1d, 0x7f, 0x69, 0xd2, 0xf4, 0x50, + 0xf7, 0xa2, 0x02, 0xbd, 0xc9, 0x85, 0x5e, 0x7f, 0x0a, 0x21, 0x4f, 0x23, 0xa9, 0x03, 0x6e, 0xba, + 0x56, 0x59, 0x38, 0x5d, 0xef, 0x8e, 0xbe, 0x7f, 0xa7, 0xca, 0x0c, 0x50, 0x9b, 0x86, 0x9f, 0x73, + 0x26, 0xe0, 0x8c, 0x25, 0x60, 0x6d, 0xea, 0x14, 0xc9, 0x7a, 0x29, 0x9e, 0xb0, 0x50, 0xf0, 0x4a, + 0xe6, 0x3e, 0x2a, 0x0b, 0xa7, 0xfd, 0x7a, 0xce, 0xf1, 0x17, 0xa1, 0xe6, 0x39, 0x6a, 0x09, 0x48, + 0xe1, 0x8b, 0xde, 0xb0, 0xf5, 0x7f, 0x1b, 0x76, 0xca, 0xc2, 0x69, 0xf9, 0x37, 0x14, 0x7f, 0x0e, + 0x34, 0x5f, 0xa1, 0x8e, 0xbe, 0xec, 0x4c, 0xd0, 0x54, 0xb2, 0xea, 0x36, 0x69, 0x35, 0x75, 0x16, + 0xdd, 0xb2, 0x70, 0x3a, 0xde, 0x52, 0xcf, 0x5f, 0x99, 0x76, 0x0f, 0x26, 0x53, 0xbb, 0x71, 0x35, + 0xb5, 0x1b, 0xd7, 0x53, 0xbb, 0xf1, 0xb5, 0xb4, 0x8d, 0x49, 0x69, 0x1b, 0x57, 0xa5, 0x6d, 0x5c, + 0x97, 0xb6, 0xf1, 0xab, 0xb4, 0x8d, 0x6f, 0xbf, 0xed, 0xc6, 0xfb, 0x07, 0xf5, 0x6f, 0xfe, 0x13, + 0x00, 0x00, 0xff, 0xff, 0x51, 0x34, 0x6a, 0x0f, 0x77, 0x04, 0x00, 0x00, +} + func (m *Lease) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -85,33 +187,42 @@ func (m *Lease) Marshal() (dAtA []byte, err error) { } func (m *Lease) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Lease) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *LeaseList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -119,37 +230,46 @@ func (m *LeaseList) Marshal() (dAtA []byte, err error) { } func (m *LeaseList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeaseList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n3, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *LeaseSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -157,59 +277,74 @@ func (m *LeaseSpec) Marshal() (dAtA []byte, err error) { } func (m *LeaseSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeaseSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.HolderIdentity != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.HolderIdentity))) - i += copy(dAtA[i:], *m.HolderIdentity) + if m.LeaseTransitions != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseTransitions)) + i-- + dAtA[i] = 0x28 } - if m.LeaseDurationSeconds != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseDurationSeconds)) + if m.RenewTime != nil { + { + size, err := m.RenewTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 } if m.AcquireTime != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AcquireTime.Size())) - n4, err := m.AcquireTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.AcquireTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 + i-- + dAtA[i] = 0x1a } - if m.RenewTime != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RenewTime.Size())) - n5, err := m.RenewTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 + if m.LeaseDurationSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseDurationSeconds)) + i-- + dAtA[i] = 0x10 } - if m.LeaseTransitions != nil { - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseTransitions)) + if m.HolderIdentity != nil { + i -= len(*m.HolderIdentity) + copy(dAtA[i:], *m.HolderIdentity) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.HolderIdentity))) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *Lease) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -220,6 +355,9 @@ func (m *Lease) Size() (n int) { } func (m *LeaseList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -234,6 +372,9 @@ func (m *LeaseList) Size() (n int) { } func (m *LeaseSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.HolderIdentity != nil { @@ -258,14 +399,7 @@ func (m *LeaseSpec) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -275,7 +409,7 @@ func (this *Lease) String() string { return "nil" } s := strings.Join([]string{`&Lease{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "LeaseSpec", "LeaseSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -285,9 +419,14 @@ func (this *LeaseList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]Lease{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Lease", "Lease", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&LeaseList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Lease", "Lease", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -299,8 +438,8 @@ func (this *LeaseSpec) String() string { s := strings.Join([]string{`&LeaseSpec{`, `HolderIdentity:` + valueToStringGenerated(this.HolderIdentity) + `,`, `LeaseDurationSeconds:` + valueToStringGenerated(this.LeaseDurationSeconds) + `,`, - `AcquireTime:` + strings.Replace(fmt.Sprintf("%v", this.AcquireTime), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1) + `,`, - `RenewTime:` + strings.Replace(fmt.Sprintf("%v", this.RenewTime), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1) + `,`, + `AcquireTime:` + strings.Replace(fmt.Sprintf("%v", this.AcquireTime), "MicroTime", "v1.MicroTime", 1) + `,`, + `RenewTime:` + strings.Replace(fmt.Sprintf("%v", this.RenewTime), "MicroTime", "v1.MicroTime", 1) + `,`, `LeaseTransitions:` + valueToStringGenerated(this.LeaseTransitions) + `,`, `}`, }, "") @@ -329,7 +468,7 @@ func (m *Lease) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -357,7 +496,7 @@ func (m *Lease) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -366,6 +505,9 @@ func (m *Lease) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -387,7 +529,7 @@ func (m *Lease) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -396,6 +538,9 @@ func (m *Lease) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -412,6 +557,9 @@ func (m *Lease) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -439,7 +587,7 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -467,7 +615,7 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -476,6 +624,9 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -497,7 +648,7 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -506,6 +657,9 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -523,6 +677,9 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -550,7 +707,7 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -578,7 +735,7 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -588,6 +745,9 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -608,7 +768,7 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -628,7 +788,7 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -637,11 +797,14 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.AcquireTime == nil { - m.AcquireTime = &k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime{} + m.AcquireTime = &v1.MicroTime{} } if err := m.AcquireTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -661,7 +824,7 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -670,11 +833,14 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.RenewTime == nil { - m.RenewTime = &k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime{} + m.RenewTime = &v1.MicroTime{} } if err := m.RenewTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -694,7 +860,7 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -709,6 +875,9 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -775,10 +944,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -807,6 +979,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -825,45 +1000,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 540 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x91, 0xc1, 0x6e, 0xd3, 0x40, - 0x10, 0x86, 0xe3, 0xb6, 0x11, 0xcd, 0x86, 0x96, 0xc8, 0xca, 0xc1, 0xca, 0xc1, 0xae, 0x72, 0x40, - 0x15, 0x52, 0x77, 0x49, 0x85, 0x10, 0xe2, 0x04, 0x16, 0x87, 0x56, 0xb8, 0x42, 0x72, 0x7b, 0x42, - 0x3d, 0xb0, 0xb6, 0x07, 0x67, 0x49, 0xed, 0x35, 0xbb, 0xeb, 0xa0, 0xde, 0x78, 0x04, 0xae, 0xbc, - 0x08, 0xbc, 0x42, 0x8e, 0x3d, 0xf6, 0x64, 0x11, 0xf3, 0x22, 0xc8, 0x1b, 0xb7, 0x09, 0x49, 0x51, - 0x23, 0x6e, 0xde, 0x99, 0xf9, 0xbf, 0xf9, 0xe7, 0x37, 0x3a, 0x1a, 0xbd, 0x90, 0x98, 0x71, 0x32, - 0xca, 0x03, 0x10, 0x29, 0x28, 0x90, 0x64, 0x0c, 0x69, 0xc4, 0x05, 0xa9, 0x1b, 0x34, 0x63, 0x24, - 0xe4, 0x5c, 0x44, 0x2c, 0xa5, 0x8a, 0xf1, 0x94, 0x8c, 0x07, 0x01, 0x28, 0x3a, 0x20, 0x31, 0xa4, - 0x20, 0xa8, 0x82, 0x08, 0x67, 0x82, 0x2b, 0x6e, 0x3a, 0x33, 0x01, 0xa6, 0x19, 0xc3, 0x8b, 0x02, - 0x5c, 0x0b, 0x7a, 0x07, 0x31, 0x53, 0xc3, 0x3c, 0xc0, 0x21, 0x4f, 0x48, 0xcc, 0x63, 0x4e, 0xb4, - 0x2e, 0xc8, 0x3f, 0xea, 0x97, 0x7e, 0xe8, 0xaf, 0x19, 0xaf, 0xf7, 0x6c, 0x6e, 0x20, 0xa1, 0xe1, - 0x90, 0xa5, 0x20, 0x2e, 0x49, 0x36, 0x8a, 0xab, 0x82, 0x24, 0x09, 0x28, 0x4a, 0xc6, 0x2b, 0x2e, - 0x7a, 0xe4, 0x5f, 0x2a, 0x91, 0xa7, 0x8a, 0x25, 0xb0, 0x22, 0x78, 0x7e, 0x9f, 0x40, 0x86, 0x43, - 0x48, 0xe8, 0xb2, 0xae, 0xff, 0xd3, 0x40, 0x4d, 0x0f, 0xa8, 0x04, 0xf3, 0x03, 0xda, 0xae, 0xdc, - 0x44, 0x54, 0x51, 0xcb, 0xd8, 0x33, 0xf6, 0xdb, 0x87, 0x4f, 0xf1, 0x3c, 0x8b, 0x5b, 0x28, 0xce, - 0x46, 0x71, 0x55, 0x90, 0xb8, 0x9a, 0xc6, 0xe3, 0x01, 0x7e, 0x17, 0x7c, 0x82, 0x50, 0x9d, 0x80, - 0xa2, 0xae, 0x39, 0x29, 0x9c, 0x46, 0x59, 0x38, 0x68, 0x5e, 0xf3, 0x6f, 0xa9, 0xa6, 0x87, 0xb6, - 0x64, 0x06, 0xa1, 0xb5, 0xa1, 0xe9, 0x4f, 0xf0, 0x3d, 0x49, 0x63, 0xed, 0xeb, 0x34, 0x83, 0xd0, - 0x7d, 0x58, 0x73, 0xb7, 0xaa, 0x97, 0xaf, 0x29, 0xfd, 0x1f, 0x06, 0x6a, 0xe9, 0x09, 0x8f, 0x49, - 0x65, 0x9e, 0xaf, 0xb8, 0xc7, 0xeb, 0xb9, 0xaf, 0xd4, 0xda, 0x7b, 0xa7, 0xde, 0xb1, 0x7d, 0x53, - 0x59, 0x70, 0xfe, 0x16, 0x35, 0x99, 0x82, 0x44, 0x5a, 0x1b, 0x7b, 0x9b, 0xfb, 0xed, 0xc3, 0xc7, - 0xeb, 0x59, 0x77, 0x77, 0x6a, 0x64, 0xf3, 0xb8, 0x12, 0xfb, 0x33, 0x46, 0xff, 0xfb, 0x66, 0x6d, - 0xbc, 0x3a, 0xc6, 0x7c, 0x89, 0x76, 0x87, 0xfc, 0x22, 0x02, 0x71, 0x1c, 0x41, 0xaa, 0x98, 0xba, - 0xd4, 0xf6, 0x5b, 0xae, 0x59, 0x16, 0xce, 0xee, 0xd1, 0x5f, 0x1d, 0x7f, 0x69, 0xd2, 0xf4, 0x50, - 0xf7, 0xa2, 0x02, 0xbd, 0xc9, 0x85, 0x5e, 0x7f, 0x0a, 0x21, 0x4f, 0x23, 0xa9, 0x03, 0x6e, 0xba, - 0x56, 0x59, 0x38, 0x5d, 0xef, 0x8e, 0xbe, 0x7f, 0xa7, 0xca, 0x0c, 0x50, 0x9b, 0x86, 0x9f, 0x73, - 0x26, 0xe0, 0x8c, 0x25, 0x60, 0x6d, 0xea, 0x14, 0xc9, 0x7a, 0x29, 0x9e, 0xb0, 0x50, 0xf0, 0x4a, - 0xe6, 0x3e, 0x2a, 0x0b, 0xa7, 0xfd, 0x7a, 0xce, 0xf1, 0x17, 0xa1, 0xe6, 0x39, 0x6a, 0x09, 0x48, - 0xe1, 0x8b, 0xde, 0xb0, 0xf5, 0x7f, 0x1b, 0x76, 0xca, 0xc2, 0x69, 0xf9, 0x37, 0x14, 0x7f, 0x0e, - 0x34, 0x5f, 0xa1, 0x8e, 0xbe, 0xec, 0x4c, 0xd0, 0x54, 0xb2, 0xea, 0x36, 0x69, 0x35, 0x75, 0x16, - 0xdd, 0xb2, 0x70, 0x3a, 0xde, 0x52, 0xcf, 0x5f, 0x99, 0x76, 0x0f, 0x26, 0x53, 0xbb, 0x71, 0x35, - 0xb5, 0x1b, 0xd7, 0x53, 0xbb, 0xf1, 0xb5, 0xb4, 0x8d, 0x49, 0x69, 0x1b, 0x57, 0xa5, 0x6d, 0x5c, - 0x97, 0xb6, 0xf1, 0xab, 0xb4, 0x8d, 0x6f, 0xbf, 0xed, 0xc6, 0xfb, 0x07, 0xf5, 0x6f, 0xfe, 0x13, - 0x00, 0x00, 0xff, 0xff, 0x51, 0x34, 0x6a, 0x0f, 0x77, 0x04, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/coordination/v1beta1/generated.proto b/vendor/k8s.io/api/coordination/v1beta1/generated.proto index 918e0de1c74c4..cfc2711c677b8 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/generated.proto +++ b/vendor/k8s.io/api/coordination/v1beta1/generated.proto @@ -30,12 +30,12 @@ option go_package = "v1beta1"; // Lease defines a lease concept. message Lease { - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the Lease. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional LeaseSpec spec = 2; } @@ -43,7 +43,7 @@ message Lease { // LeaseList is a list of Lease objects. message LeaseList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; diff --git a/vendor/k8s.io/api/coordination/v1beta1/types.go b/vendor/k8s.io/api/coordination/v1beta1/types.go index 846f72802808e..da88f675c0acd 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/types.go +++ b/vendor/k8s.io/api/coordination/v1beta1/types.go @@ -26,12 +26,12 @@ import ( // Lease defines a lease concept. type Lease struct { metav1.TypeMeta `json:",inline"` - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the Lease. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec LeaseSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` } @@ -65,7 +65,7 @@ type LeaseSpec struct { type LeaseList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go index 4532d322ab6f0..f557d265d4c52 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go @@ -29,8 +29,8 @@ package v1beta1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Lease = map[string]string{ "": "Lease defines a lease concept.", - "metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Lease) SwaggerDoc() map[string]string { @@ -39,7 +39,7 @@ func (Lease) SwaggerDoc() map[string]string { var map_LeaseList = map[string]string{ "": "LeaseList is a list of Lease objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is a list of schema objects.", } diff --git a/vendor/k8s.io/api/core/v1/generated.pb.go b/vendor/k8s.io/api/core/v1/generated.pb.go index a22473ec0cdb4..8f788035e6242 100644 --- a/vendor/k8s.io/api/core/v1/generated.pb.go +++ b/vendor/k8s.io/api/core/v1/generated.pb.go @@ -17,236 +17,25 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/core/v1/generated.proto -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/core/v1/generated.proto - - It has these top-level messages: - AWSElasticBlockStoreVolumeSource - Affinity - AttachedVolume - AvoidPods - AzureDiskVolumeSource - AzureFilePersistentVolumeSource - AzureFileVolumeSource - Binding - CSIPersistentVolumeSource - CSIVolumeSource - Capabilities - CephFSPersistentVolumeSource - CephFSVolumeSource - CinderPersistentVolumeSource - CinderVolumeSource - ClientIPConfig - ComponentCondition - ComponentStatus - ComponentStatusList - ConfigMap - ConfigMapEnvSource - ConfigMapKeySelector - ConfigMapList - ConfigMapNodeConfigSource - ConfigMapProjection - ConfigMapVolumeSource - Container - ContainerImage - ContainerPort - ContainerState - ContainerStateRunning - ContainerStateTerminated - ContainerStateWaiting - ContainerStatus - DaemonEndpoint - DownwardAPIProjection - DownwardAPIVolumeFile - DownwardAPIVolumeSource - EmptyDirVolumeSource - EndpointAddress - EndpointPort - EndpointSubset - Endpoints - EndpointsList - EnvFromSource - EnvVar - EnvVarSource - Event - EventList - EventSeries - EventSource - ExecAction - FCVolumeSource - FlexPersistentVolumeSource - FlexVolumeSource - FlockerVolumeSource - GCEPersistentDiskVolumeSource - GitRepoVolumeSource - GlusterfsPersistentVolumeSource - GlusterfsVolumeSource - HTTPGetAction - HTTPHeader - Handler - HostAlias - HostPathVolumeSource - ISCSIPersistentVolumeSource - ISCSIVolumeSource - KeyToPath - Lifecycle - LimitRange - LimitRangeItem - LimitRangeList - LimitRangeSpec - List - LoadBalancerIngress - LoadBalancerStatus - LocalObjectReference - LocalVolumeSource - NFSVolumeSource - Namespace - NamespaceList - NamespaceSpec - NamespaceStatus - Node - NodeAddress - NodeAffinity - NodeCondition - NodeConfigSource - NodeConfigStatus - NodeDaemonEndpoints - NodeList - NodeProxyOptions - NodeResources - NodeSelector - NodeSelectorRequirement - NodeSelectorTerm - NodeSpec - NodeStatus - NodeSystemInfo - ObjectFieldSelector - ObjectReference - PersistentVolume - PersistentVolumeClaim - PersistentVolumeClaimCondition - PersistentVolumeClaimList - PersistentVolumeClaimSpec - PersistentVolumeClaimStatus - PersistentVolumeClaimVolumeSource - PersistentVolumeList - PersistentVolumeSource - PersistentVolumeSpec - PersistentVolumeStatus - PhotonPersistentDiskVolumeSource - Pod - PodAffinity - PodAffinityTerm - PodAntiAffinity - PodAttachOptions - PodCondition - PodDNSConfig - PodDNSConfigOption - PodExecOptions - PodList - PodLogOptions - PodPortForwardOptions - PodProxyOptions - PodReadinessGate - PodSecurityContext - PodSignature - PodSpec - PodStatus - PodStatusResult - PodTemplate - PodTemplateList - PodTemplateSpec - PortworxVolumeSource - Preconditions - PreferAvoidPodsEntry - PreferredSchedulingTerm - Probe - ProjectedVolumeSource - QuobyteVolumeSource - RBDPersistentVolumeSource - RBDVolumeSource - RangeAllocation - ReplicationController - ReplicationControllerCondition - ReplicationControllerList - ReplicationControllerSpec - ReplicationControllerStatus - ResourceFieldSelector - ResourceQuota - ResourceQuotaList - ResourceQuotaSpec - ResourceQuotaStatus - ResourceRequirements - SELinuxOptions - ScaleIOPersistentVolumeSource - ScaleIOVolumeSource - ScopeSelector - ScopedResourceSelectorRequirement - Secret - SecretEnvSource - SecretKeySelector - SecretList - SecretProjection - SecretReference - SecretVolumeSource - SecurityContext - SerializedReference - Service - ServiceAccount - ServiceAccountList - ServiceAccountTokenProjection - ServiceList - ServicePort - ServiceProxyOptions - ServiceSpec - ServiceStatus - SessionAffinityConfig - StorageOSPersistentVolumeSource - StorageOSVolumeSource - Sysctl - TCPSocketAction - Taint - Toleration - TopologySelectorLabelRequirement - TopologySelectorTerm - TypedLocalObjectReference - Volume - VolumeDevice - VolumeMount - VolumeNodeAffinity - VolumeProjection - VolumeSource - VsphereVirtualDiskVolumeSource - WeightedPodAffinityTerm - WindowsSecurityContextOptions -*/ package v1 import ( fmt "fmt" - proto "github.com/gogo/protobuf/proto" - - math "math" - - k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" - - k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - k8s_io_apimachinery_pkg_runtime "k8s.io/apimachinery/pkg/runtime" - - k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" + io "io" + proto "github.com/gogo/protobuf/proto" github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + resource "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" - strings "strings" - + math "math" + math_bits "math/bits" reflect "reflect" + strings "strings" - io "io" + k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" ) // Reference imports to suppress errors if they are not otherwise used. @@ -263,17398 +52,27957 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *AWSElasticBlockStoreVolumeSource) Reset() { *m = AWSElasticBlockStoreVolumeSource{} } func (*AWSElasticBlockStoreVolumeSource) ProtoMessage() {} func (*AWSElasticBlockStoreVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{0} + return fileDescriptor_83c10c24ec417dc9, []int{0} +} +func (m *AWSElasticBlockStoreVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AWSElasticBlockStoreVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AWSElasticBlockStoreVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_AWSElasticBlockStoreVolumeSource.Merge(m, src) +} +func (m *AWSElasticBlockStoreVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *AWSElasticBlockStoreVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_AWSElasticBlockStoreVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_AWSElasticBlockStoreVolumeSource proto.InternalMessageInfo + +func (m *Affinity) Reset() { *m = Affinity{} } +func (*Affinity) ProtoMessage() {} +func (*Affinity) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{1} +} +func (m *Affinity) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Affinity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Affinity) XXX_Merge(src proto.Message) { + xxx_messageInfo_Affinity.Merge(m, src) +} +func (m *Affinity) XXX_Size() int { + return m.Size() +} +func (m *Affinity) XXX_DiscardUnknown() { + xxx_messageInfo_Affinity.DiscardUnknown(m) +} + +var xxx_messageInfo_Affinity proto.InternalMessageInfo + +func (m *AttachedVolume) Reset() { *m = AttachedVolume{} } +func (*AttachedVolume) ProtoMessage() {} +func (*AttachedVolume) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{2} +} +func (m *AttachedVolume) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AttachedVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AttachedVolume) XXX_Merge(src proto.Message) { + xxx_messageInfo_AttachedVolume.Merge(m, src) +} +func (m *AttachedVolume) XXX_Size() int { + return m.Size() +} +func (m *AttachedVolume) XXX_DiscardUnknown() { + xxx_messageInfo_AttachedVolume.DiscardUnknown(m) } -func (m *Affinity) Reset() { *m = Affinity{} } -func (*Affinity) ProtoMessage() {} -func (*Affinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_AttachedVolume proto.InternalMessageInfo -func (m *AttachedVolume) Reset() { *m = AttachedVolume{} } -func (*AttachedVolume) ProtoMessage() {} -func (*AttachedVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *AvoidPods) Reset() { *m = AvoidPods{} } +func (*AvoidPods) ProtoMessage() {} +func (*AvoidPods) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{3} +} +func (m *AvoidPods) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AvoidPods) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AvoidPods) XXX_Merge(src proto.Message) { + xxx_messageInfo_AvoidPods.Merge(m, src) +} +func (m *AvoidPods) XXX_Size() int { + return m.Size() +} +func (m *AvoidPods) XXX_DiscardUnknown() { + xxx_messageInfo_AvoidPods.DiscardUnknown(m) +} + +var xxx_messageInfo_AvoidPods proto.InternalMessageInfo -func (m *AvoidPods) Reset() { *m = AvoidPods{} } -func (*AvoidPods) ProtoMessage() {} -func (*AvoidPods) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (m *AzureDiskVolumeSource) Reset() { *m = AzureDiskVolumeSource{} } +func (*AzureDiskVolumeSource) ProtoMessage() {} +func (*AzureDiskVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{4} +} +func (m *AzureDiskVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AzureDiskVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AzureDiskVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_AzureDiskVolumeSource.Merge(m, src) +} +func (m *AzureDiskVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *AzureDiskVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_AzureDiskVolumeSource.DiscardUnknown(m) +} -func (m *AzureDiskVolumeSource) Reset() { *m = AzureDiskVolumeSource{} } -func (*AzureDiskVolumeSource) ProtoMessage() {} -func (*AzureDiskVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +var xxx_messageInfo_AzureDiskVolumeSource proto.InternalMessageInfo func (m *AzureFilePersistentVolumeSource) Reset() { *m = AzureFilePersistentVolumeSource{} } func (*AzureFilePersistentVolumeSource) ProtoMessage() {} func (*AzureFilePersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{5} + return fileDescriptor_83c10c24ec417dc9, []int{5} +} +func (m *AzureFilePersistentVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AzureFilePersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AzureFilePersistentVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_AzureFilePersistentVolumeSource.Merge(m, src) +} +func (m *AzureFilePersistentVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *AzureFilePersistentVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_AzureFilePersistentVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_AzureFilePersistentVolumeSource proto.InternalMessageInfo + +func (m *AzureFileVolumeSource) Reset() { *m = AzureFileVolumeSource{} } +func (*AzureFileVolumeSource) ProtoMessage() {} +func (*AzureFileVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{6} +} +func (m *AzureFileVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AzureFileVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AzureFileVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_AzureFileVolumeSource.Merge(m, src) +} +func (m *AzureFileVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *AzureFileVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_AzureFileVolumeSource.DiscardUnknown(m) } -func (m *AzureFileVolumeSource) Reset() { *m = AzureFileVolumeSource{} } -func (*AzureFileVolumeSource) ProtoMessage() {} -func (*AzureFileVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +var xxx_messageInfo_AzureFileVolumeSource proto.InternalMessageInfo + +func (m *Binding) Reset() { *m = Binding{} } +func (*Binding) ProtoMessage() {} +func (*Binding) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{7} +} +func (m *Binding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Binding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Binding) XXX_Merge(src proto.Message) { + xxx_messageInfo_Binding.Merge(m, src) +} +func (m *Binding) XXX_Size() int { + return m.Size() +} +func (m *Binding) XXX_DiscardUnknown() { + xxx_messageInfo_Binding.DiscardUnknown(m) +} -func (m *Binding) Reset() { *m = Binding{} } -func (*Binding) ProtoMessage() {} -func (*Binding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_Binding proto.InternalMessageInfo func (m *CSIPersistentVolumeSource) Reset() { *m = CSIPersistentVolumeSource{} } func (*CSIPersistentVolumeSource) ProtoMessage() {} func (*CSIPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{8} + return fileDescriptor_83c10c24ec417dc9, []int{8} } +func (m *CSIPersistentVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSIPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSIPersistentVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSIPersistentVolumeSource.Merge(m, src) +} +func (m *CSIPersistentVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *CSIPersistentVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_CSIPersistentVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_CSIPersistentVolumeSource proto.InternalMessageInfo + +func (m *CSIVolumeSource) Reset() { *m = CSIVolumeSource{} } +func (*CSIVolumeSource) ProtoMessage() {} +func (*CSIVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{9} +} +func (m *CSIVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSIVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSIVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSIVolumeSource.Merge(m, src) +} +func (m *CSIVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *CSIVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_CSIVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_CSIVolumeSource proto.InternalMessageInfo -func (m *CSIVolumeSource) Reset() { *m = CSIVolumeSource{} } -func (*CSIVolumeSource) ProtoMessage() {} -func (*CSIVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (m *Capabilities) Reset() { *m = Capabilities{} } +func (*Capabilities) ProtoMessage() {} +func (*Capabilities) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{10} +} +func (m *Capabilities) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Capabilities) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Capabilities) XXX_Merge(src proto.Message) { + xxx_messageInfo_Capabilities.Merge(m, src) +} +func (m *Capabilities) XXX_Size() int { + return m.Size() +} +func (m *Capabilities) XXX_DiscardUnknown() { + xxx_messageInfo_Capabilities.DiscardUnknown(m) +} -func (m *Capabilities) Reset() { *m = Capabilities{} } -func (*Capabilities) ProtoMessage() {} -func (*Capabilities) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +var xxx_messageInfo_Capabilities proto.InternalMessageInfo func (m *CephFSPersistentVolumeSource) Reset() { *m = CephFSPersistentVolumeSource{} } func (*CephFSPersistentVolumeSource) ProtoMessage() {} func (*CephFSPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{11} + return fileDescriptor_83c10c24ec417dc9, []int{11} +} +func (m *CephFSPersistentVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CephFSPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CephFSPersistentVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_CephFSPersistentVolumeSource.Merge(m, src) +} +func (m *CephFSPersistentVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *CephFSPersistentVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_CephFSPersistentVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_CephFSPersistentVolumeSource proto.InternalMessageInfo + +func (m *CephFSVolumeSource) Reset() { *m = CephFSVolumeSource{} } +func (*CephFSVolumeSource) ProtoMessage() {} +func (*CephFSVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{12} +} +func (m *CephFSVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CephFSVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CephFSVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_CephFSVolumeSource.Merge(m, src) +} +func (m *CephFSVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *CephFSVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_CephFSVolumeSource.DiscardUnknown(m) } -func (m *CephFSVolumeSource) Reset() { *m = CephFSVolumeSource{} } -func (*CephFSVolumeSource) ProtoMessage() {} -func (*CephFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +var xxx_messageInfo_CephFSVolumeSource proto.InternalMessageInfo func (m *CinderPersistentVolumeSource) Reset() { *m = CinderPersistentVolumeSource{} } func (*CinderPersistentVolumeSource) ProtoMessage() {} func (*CinderPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{13} + return fileDescriptor_83c10c24ec417dc9, []int{13} +} +func (m *CinderPersistentVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CinderPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CinderPersistentVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_CinderPersistentVolumeSource.Merge(m, src) +} +func (m *CinderPersistentVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *CinderPersistentVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_CinderPersistentVolumeSource.DiscardUnknown(m) } -func (m *CinderVolumeSource) Reset() { *m = CinderVolumeSource{} } -func (*CinderVolumeSource) ProtoMessage() {} -func (*CinderVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } - -func (m *ClientIPConfig) Reset() { *m = ClientIPConfig{} } -func (*ClientIPConfig) ProtoMessage() {} -func (*ClientIPConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } - -func (m *ComponentCondition) Reset() { *m = ComponentCondition{} } -func (*ComponentCondition) ProtoMessage() {} -func (*ComponentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +var xxx_messageInfo_CinderPersistentVolumeSource proto.InternalMessageInfo -func (m *ComponentStatus) Reset() { *m = ComponentStatus{} } -func (*ComponentStatus) ProtoMessage() {} -func (*ComponentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +func (m *CinderVolumeSource) Reset() { *m = CinderVolumeSource{} } +func (*CinderVolumeSource) ProtoMessage() {} +func (*CinderVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{14} +} +func (m *CinderVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CinderVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CinderVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_CinderVolumeSource.Merge(m, src) +} +func (m *CinderVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *CinderVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_CinderVolumeSource.DiscardUnknown(m) +} -func (m *ComponentStatusList) Reset() { *m = ComponentStatusList{} } -func (*ComponentStatusList) ProtoMessage() {} -func (*ComponentStatusList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +var xxx_messageInfo_CinderVolumeSource proto.InternalMessageInfo -func (m *ConfigMap) Reset() { *m = ConfigMap{} } -func (*ConfigMap) ProtoMessage() {} -func (*ConfigMap) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } +func (m *ClientIPConfig) Reset() { *m = ClientIPConfig{} } +func (*ClientIPConfig) ProtoMessage() {} +func (*ClientIPConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{15} +} +func (m *ClientIPConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClientIPConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClientIPConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClientIPConfig.Merge(m, src) +} +func (m *ClientIPConfig) XXX_Size() int { + return m.Size() +} +func (m *ClientIPConfig) XXX_DiscardUnknown() { + xxx_messageInfo_ClientIPConfig.DiscardUnknown(m) +} -func (m *ConfigMapEnvSource) Reset() { *m = ConfigMapEnvSource{} } -func (*ConfigMapEnvSource) ProtoMessage() {} -func (*ConfigMapEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } +var xxx_messageInfo_ClientIPConfig proto.InternalMessageInfo -func (m *ConfigMapKeySelector) Reset() { *m = ConfigMapKeySelector{} } -func (*ConfigMapKeySelector) ProtoMessage() {} -func (*ConfigMapKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } +func (m *ComponentCondition) Reset() { *m = ComponentCondition{} } +func (*ComponentCondition) ProtoMessage() {} +func (*ComponentCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{16} +} +func (m *ComponentCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ComponentCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ComponentCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_ComponentCondition.Merge(m, src) +} +func (m *ComponentCondition) XXX_Size() int { + return m.Size() +} +func (m *ComponentCondition) XXX_DiscardUnknown() { + xxx_messageInfo_ComponentCondition.DiscardUnknown(m) +} -func (m *ConfigMapList) Reset() { *m = ConfigMapList{} } -func (*ConfigMapList) ProtoMessage() {} -func (*ConfigMapList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } +var xxx_messageInfo_ComponentCondition proto.InternalMessageInfo -func (m *ConfigMapNodeConfigSource) Reset() { *m = ConfigMapNodeConfigSource{} } -func (*ConfigMapNodeConfigSource) ProtoMessage() {} -func (*ConfigMapNodeConfigSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{23} +func (m *ComponentStatus) Reset() { *m = ComponentStatus{} } +func (*ComponentStatus) ProtoMessage() {} +func (*ComponentStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{17} +} +func (m *ComponentStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ComponentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ComponentStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ComponentStatus.Merge(m, src) +} +func (m *ComponentStatus) XXX_Size() int { + return m.Size() +} +func (m *ComponentStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ComponentStatus.DiscardUnknown(m) } -func (m *ConfigMapProjection) Reset() { *m = ConfigMapProjection{} } -func (*ConfigMapProjection) ProtoMessage() {} -func (*ConfigMapProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } +var xxx_messageInfo_ComponentStatus proto.InternalMessageInfo -func (m *ConfigMapVolumeSource) Reset() { *m = ConfigMapVolumeSource{} } -func (*ConfigMapVolumeSource) ProtoMessage() {} -func (*ConfigMapVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } +func (m *ComponentStatusList) Reset() { *m = ComponentStatusList{} } +func (*ComponentStatusList) ProtoMessage() {} +func (*ComponentStatusList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{18} +} +func (m *ComponentStatusList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ComponentStatusList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ComponentStatusList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ComponentStatusList.Merge(m, src) +} +func (m *ComponentStatusList) XXX_Size() int { + return m.Size() +} +func (m *ComponentStatusList) XXX_DiscardUnknown() { + xxx_messageInfo_ComponentStatusList.DiscardUnknown(m) +} -func (m *Container) Reset() { *m = Container{} } -func (*Container) ProtoMessage() {} -func (*Container) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } +var xxx_messageInfo_ComponentStatusList proto.InternalMessageInfo -func (m *ContainerImage) Reset() { *m = ContainerImage{} } -func (*ContainerImage) ProtoMessage() {} -func (*ContainerImage) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } +func (m *ConfigMap) Reset() { *m = ConfigMap{} } +func (*ConfigMap) ProtoMessage() {} +func (*ConfigMap) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{19} +} +func (m *ConfigMap) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConfigMap) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConfigMap) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigMap.Merge(m, src) +} +func (m *ConfigMap) XXX_Size() int { + return m.Size() +} +func (m *ConfigMap) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigMap.DiscardUnknown(m) +} -func (m *ContainerPort) Reset() { *m = ContainerPort{} } -func (*ContainerPort) ProtoMessage() {} -func (*ContainerPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } +var xxx_messageInfo_ConfigMap proto.InternalMessageInfo -func (m *ContainerState) Reset() { *m = ContainerState{} } -func (*ContainerState) ProtoMessage() {} -func (*ContainerState) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } +func (m *ConfigMapEnvSource) Reset() { *m = ConfigMapEnvSource{} } +func (*ConfigMapEnvSource) ProtoMessage() {} +func (*ConfigMapEnvSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{20} +} +func (m *ConfigMapEnvSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConfigMapEnvSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConfigMapEnvSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigMapEnvSource.Merge(m, src) +} +func (m *ConfigMapEnvSource) XXX_Size() int { + return m.Size() +} +func (m *ConfigMapEnvSource) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigMapEnvSource.DiscardUnknown(m) +} -func (m *ContainerStateRunning) Reset() { *m = ContainerStateRunning{} } -func (*ContainerStateRunning) ProtoMessage() {} -func (*ContainerStateRunning) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } +var xxx_messageInfo_ConfigMapEnvSource proto.InternalMessageInfo -func (m *ContainerStateTerminated) Reset() { *m = ContainerStateTerminated{} } -func (*ContainerStateTerminated) ProtoMessage() {} -func (*ContainerStateTerminated) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{31} +func (m *ConfigMapKeySelector) Reset() { *m = ConfigMapKeySelector{} } +func (*ConfigMapKeySelector) ProtoMessage() {} +func (*ConfigMapKeySelector) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{21} +} +func (m *ConfigMapKeySelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConfigMapKeySelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConfigMapKeySelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigMapKeySelector.Merge(m, src) +} +func (m *ConfigMapKeySelector) XXX_Size() int { + return m.Size() +} +func (m *ConfigMapKeySelector) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigMapKeySelector.DiscardUnknown(m) } -func (m *ContainerStateWaiting) Reset() { *m = ContainerStateWaiting{} } -func (*ContainerStateWaiting) ProtoMessage() {} -func (*ContainerStateWaiting) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } +var xxx_messageInfo_ConfigMapKeySelector proto.InternalMessageInfo -func (m *ContainerStatus) Reset() { *m = ContainerStatus{} } -func (*ContainerStatus) ProtoMessage() {} -func (*ContainerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } +func (m *ConfigMapList) Reset() { *m = ConfigMapList{} } +func (*ConfigMapList) ProtoMessage() {} +func (*ConfigMapList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{22} +} +func (m *ConfigMapList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConfigMapList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConfigMapList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigMapList.Merge(m, src) +} +func (m *ConfigMapList) XXX_Size() int { + return m.Size() +} +func (m *ConfigMapList) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigMapList.DiscardUnknown(m) +} -func (m *DaemonEndpoint) Reset() { *m = DaemonEndpoint{} } -func (*DaemonEndpoint) ProtoMessage() {} -func (*DaemonEndpoint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } +var xxx_messageInfo_ConfigMapList proto.InternalMessageInfo -func (m *DownwardAPIProjection) Reset() { *m = DownwardAPIProjection{} } -func (*DownwardAPIProjection) ProtoMessage() {} -func (*DownwardAPIProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } +func (m *ConfigMapNodeConfigSource) Reset() { *m = ConfigMapNodeConfigSource{} } +func (*ConfigMapNodeConfigSource) ProtoMessage() {} +func (*ConfigMapNodeConfigSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{23} +} +func (m *ConfigMapNodeConfigSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConfigMapNodeConfigSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConfigMapNodeConfigSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigMapNodeConfigSource.Merge(m, src) +} +func (m *ConfigMapNodeConfigSource) XXX_Size() int { + return m.Size() +} +func (m *ConfigMapNodeConfigSource) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigMapNodeConfigSource.DiscardUnknown(m) +} -func (m *DownwardAPIVolumeFile) Reset() { *m = DownwardAPIVolumeFile{} } -func (*DownwardAPIVolumeFile) ProtoMessage() {} -func (*DownwardAPIVolumeFile) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } +var xxx_messageInfo_ConfigMapNodeConfigSource proto.InternalMessageInfo -func (m *DownwardAPIVolumeSource) Reset() { *m = DownwardAPIVolumeSource{} } -func (*DownwardAPIVolumeSource) ProtoMessage() {} -func (*DownwardAPIVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{37} +func (m *ConfigMapProjection) Reset() { *m = ConfigMapProjection{} } +func (*ConfigMapProjection) ProtoMessage() {} +func (*ConfigMapProjection) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{24} +} +func (m *ConfigMapProjection) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConfigMapProjection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConfigMapProjection) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigMapProjection.Merge(m, src) +} +func (m *ConfigMapProjection) XXX_Size() int { + return m.Size() +} +func (m *ConfigMapProjection) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigMapProjection.DiscardUnknown(m) } -func (m *EmptyDirVolumeSource) Reset() { *m = EmptyDirVolumeSource{} } -func (*EmptyDirVolumeSource) ProtoMessage() {} -func (*EmptyDirVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } +var xxx_messageInfo_ConfigMapProjection proto.InternalMessageInfo -func (m *EndpointAddress) Reset() { *m = EndpointAddress{} } -func (*EndpointAddress) ProtoMessage() {} -func (*EndpointAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } +func (m *ConfigMapVolumeSource) Reset() { *m = ConfigMapVolumeSource{} } +func (*ConfigMapVolumeSource) ProtoMessage() {} +func (*ConfigMapVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{25} +} +func (m *ConfigMapVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConfigMapVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConfigMapVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigMapVolumeSource.Merge(m, src) +} +func (m *ConfigMapVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *ConfigMapVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigMapVolumeSource.DiscardUnknown(m) +} -func (m *EndpointPort) Reset() { *m = EndpointPort{} } -func (*EndpointPort) ProtoMessage() {} -func (*EndpointPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } +var xxx_messageInfo_ConfigMapVolumeSource proto.InternalMessageInfo -func (m *EndpointSubset) Reset() { *m = EndpointSubset{} } -func (*EndpointSubset) ProtoMessage() {} -func (*EndpointSubset) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } +func (m *Container) Reset() { *m = Container{} } +func (*Container) ProtoMessage() {} +func (*Container) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{26} +} +func (m *Container) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Container) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Container) XXX_Merge(src proto.Message) { + xxx_messageInfo_Container.Merge(m, src) +} +func (m *Container) XXX_Size() int { + return m.Size() +} +func (m *Container) XXX_DiscardUnknown() { + xxx_messageInfo_Container.DiscardUnknown(m) +} -func (m *Endpoints) Reset() { *m = Endpoints{} } -func (*Endpoints) ProtoMessage() {} -func (*Endpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} } +var xxx_messageInfo_Container proto.InternalMessageInfo -func (m *EndpointsList) Reset() { *m = EndpointsList{} } -func (*EndpointsList) ProtoMessage() {} -func (*EndpointsList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} } +func (m *ContainerImage) Reset() { *m = ContainerImage{} } +func (*ContainerImage) ProtoMessage() {} +func (*ContainerImage) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{27} +} +func (m *ContainerImage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerImage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerImage) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerImage.Merge(m, src) +} +func (m *ContainerImage) XXX_Size() int { + return m.Size() +} +func (m *ContainerImage) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerImage.DiscardUnknown(m) +} -func (m *EnvFromSource) Reset() { *m = EnvFromSource{} } -func (*EnvFromSource) ProtoMessage() {} -func (*EnvFromSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{44} } +var xxx_messageInfo_ContainerImage proto.InternalMessageInfo -func (m *EnvVar) Reset() { *m = EnvVar{} } -func (*EnvVar) ProtoMessage() {} -func (*EnvVar) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{45} } +func (m *ContainerPort) Reset() { *m = ContainerPort{} } +func (*ContainerPort) ProtoMessage() {} +func (*ContainerPort) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{28} +} +func (m *ContainerPort) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerPort) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerPort.Merge(m, src) +} +func (m *ContainerPort) XXX_Size() int { + return m.Size() +} +func (m *ContainerPort) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerPort.DiscardUnknown(m) +} -func (m *EnvVarSource) Reset() { *m = EnvVarSource{} } -func (*EnvVarSource) ProtoMessage() {} -func (*EnvVarSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} } +var xxx_messageInfo_ContainerPort proto.InternalMessageInfo -func (m *Event) Reset() { *m = Event{} } -func (*Event) ProtoMessage() {} -func (*Event) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{47} } +func (m *ContainerState) Reset() { *m = ContainerState{} } +func (*ContainerState) ProtoMessage() {} +func (*ContainerState) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{29} +} +func (m *ContainerState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerState) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerState.Merge(m, src) +} +func (m *ContainerState) XXX_Size() int { + return m.Size() +} +func (m *ContainerState) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerState.DiscardUnknown(m) +} -func (m *EventList) Reset() { *m = EventList{} } -func (*EventList) ProtoMessage() {} -func (*EventList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{48} } +var xxx_messageInfo_ContainerState proto.InternalMessageInfo -func (m *EventSeries) Reset() { *m = EventSeries{} } -func (*EventSeries) ProtoMessage() {} -func (*EventSeries) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{49} } +func (m *ContainerStateRunning) Reset() { *m = ContainerStateRunning{} } +func (*ContainerStateRunning) ProtoMessage() {} +func (*ContainerStateRunning) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{30} +} +func (m *ContainerStateRunning) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerStateRunning) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerStateRunning) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerStateRunning.Merge(m, src) +} +func (m *ContainerStateRunning) XXX_Size() int { + return m.Size() +} +func (m *ContainerStateRunning) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerStateRunning.DiscardUnknown(m) +} -func (m *EventSource) Reset() { *m = EventSource{} } -func (*EventSource) ProtoMessage() {} -func (*EventSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{50} } +var xxx_messageInfo_ContainerStateRunning proto.InternalMessageInfo -func (m *ExecAction) Reset() { *m = ExecAction{} } -func (*ExecAction) ProtoMessage() {} -func (*ExecAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{51} } +func (m *ContainerStateTerminated) Reset() { *m = ContainerStateTerminated{} } +func (*ContainerStateTerminated) ProtoMessage() {} +func (*ContainerStateTerminated) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{31} +} +func (m *ContainerStateTerminated) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerStateTerminated) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerStateTerminated) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerStateTerminated.Merge(m, src) +} +func (m *ContainerStateTerminated) XXX_Size() int { + return m.Size() +} +func (m *ContainerStateTerminated) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerStateTerminated.DiscardUnknown(m) +} -func (m *FCVolumeSource) Reset() { *m = FCVolumeSource{} } -func (*FCVolumeSource) ProtoMessage() {} -func (*FCVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{52} } +var xxx_messageInfo_ContainerStateTerminated proto.InternalMessageInfo -func (m *FlexPersistentVolumeSource) Reset() { *m = FlexPersistentVolumeSource{} } -func (*FlexPersistentVolumeSource) ProtoMessage() {} -func (*FlexPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{53} +func (m *ContainerStateWaiting) Reset() { *m = ContainerStateWaiting{} } +func (*ContainerStateWaiting) ProtoMessage() {} +func (*ContainerStateWaiting) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{32} +} +func (m *ContainerStateWaiting) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerStateWaiting) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerStateWaiting) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerStateWaiting.Merge(m, src) +} +func (m *ContainerStateWaiting) XXX_Size() int { + return m.Size() +} +func (m *ContainerStateWaiting) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerStateWaiting.DiscardUnknown(m) } -func (m *FlexVolumeSource) Reset() { *m = FlexVolumeSource{} } -func (*FlexVolumeSource) ProtoMessage() {} -func (*FlexVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{54} } - -func (m *FlockerVolumeSource) Reset() { *m = FlockerVolumeSource{} } -func (*FlockerVolumeSource) ProtoMessage() {} -func (*FlockerVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{55} } +var xxx_messageInfo_ContainerStateWaiting proto.InternalMessageInfo -func (m *GCEPersistentDiskVolumeSource) Reset() { *m = GCEPersistentDiskVolumeSource{} } -func (*GCEPersistentDiskVolumeSource) ProtoMessage() {} -func (*GCEPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{56} +func (m *ContainerStatus) Reset() { *m = ContainerStatus{} } +func (*ContainerStatus) ProtoMessage() {} +func (*ContainerStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{33} +} +func (m *ContainerStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerStatus.Merge(m, src) +} +func (m *ContainerStatus) XXX_Size() int { + return m.Size() +} +func (m *ContainerStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerStatus.DiscardUnknown(m) } -func (m *GitRepoVolumeSource) Reset() { *m = GitRepoVolumeSource{} } -func (*GitRepoVolumeSource) ProtoMessage() {} -func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{57} } +var xxx_messageInfo_ContainerStatus proto.InternalMessageInfo -func (m *GlusterfsPersistentVolumeSource) Reset() { *m = GlusterfsPersistentVolumeSource{} } -func (*GlusterfsPersistentVolumeSource) ProtoMessage() {} -func (*GlusterfsPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{58} +func (m *DaemonEndpoint) Reset() { *m = DaemonEndpoint{} } +func (*DaemonEndpoint) ProtoMessage() {} +func (*DaemonEndpoint) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{34} +} +func (m *DaemonEndpoint) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonEndpoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonEndpoint) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonEndpoint.Merge(m, src) +} +func (m *DaemonEndpoint) XXX_Size() int { + return m.Size() +} +func (m *DaemonEndpoint) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonEndpoint.DiscardUnknown(m) } -func (m *GlusterfsVolumeSource) Reset() { *m = GlusterfsVolumeSource{} } -func (*GlusterfsVolumeSource) ProtoMessage() {} -func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{59} } - -func (m *HTTPGetAction) Reset() { *m = HTTPGetAction{} } -func (*HTTPGetAction) ProtoMessage() {} -func (*HTTPGetAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{60} } +var xxx_messageInfo_DaemonEndpoint proto.InternalMessageInfo -func (m *HTTPHeader) Reset() { *m = HTTPHeader{} } -func (*HTTPHeader) ProtoMessage() {} -func (*HTTPHeader) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{61} } +func (m *DownwardAPIProjection) Reset() { *m = DownwardAPIProjection{} } +func (*DownwardAPIProjection) ProtoMessage() {} +func (*DownwardAPIProjection) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{35} +} +func (m *DownwardAPIProjection) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DownwardAPIProjection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DownwardAPIProjection) XXX_Merge(src proto.Message) { + xxx_messageInfo_DownwardAPIProjection.Merge(m, src) +} +func (m *DownwardAPIProjection) XXX_Size() int { + return m.Size() +} +func (m *DownwardAPIProjection) XXX_DiscardUnknown() { + xxx_messageInfo_DownwardAPIProjection.DiscardUnknown(m) +} -func (m *Handler) Reset() { *m = Handler{} } -func (*Handler) ProtoMessage() {} -func (*Handler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{62} } +var xxx_messageInfo_DownwardAPIProjection proto.InternalMessageInfo -func (m *HostAlias) Reset() { *m = HostAlias{} } -func (*HostAlias) ProtoMessage() {} -func (*HostAlias) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{63} } +func (m *DownwardAPIVolumeFile) Reset() { *m = DownwardAPIVolumeFile{} } +func (*DownwardAPIVolumeFile) ProtoMessage() {} +func (*DownwardAPIVolumeFile) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{36} +} +func (m *DownwardAPIVolumeFile) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DownwardAPIVolumeFile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DownwardAPIVolumeFile) XXX_Merge(src proto.Message) { + xxx_messageInfo_DownwardAPIVolumeFile.Merge(m, src) +} +func (m *DownwardAPIVolumeFile) XXX_Size() int { + return m.Size() +} +func (m *DownwardAPIVolumeFile) XXX_DiscardUnknown() { + xxx_messageInfo_DownwardAPIVolumeFile.DiscardUnknown(m) +} -func (m *HostPathVolumeSource) Reset() { *m = HostPathVolumeSource{} } -func (*HostPathVolumeSource) ProtoMessage() {} -func (*HostPathVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{64} } +var xxx_messageInfo_DownwardAPIVolumeFile proto.InternalMessageInfo -func (m *ISCSIPersistentVolumeSource) Reset() { *m = ISCSIPersistentVolumeSource{} } -func (*ISCSIPersistentVolumeSource) ProtoMessage() {} -func (*ISCSIPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{65} +func (m *DownwardAPIVolumeSource) Reset() { *m = DownwardAPIVolumeSource{} } +func (*DownwardAPIVolumeSource) ProtoMessage() {} +func (*DownwardAPIVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{37} +} +func (m *DownwardAPIVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DownwardAPIVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DownwardAPIVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_DownwardAPIVolumeSource.Merge(m, src) +} +func (m *DownwardAPIVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *DownwardAPIVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_DownwardAPIVolumeSource.DiscardUnknown(m) } -func (m *ISCSIVolumeSource) Reset() { *m = ISCSIVolumeSource{} } -func (*ISCSIVolumeSource) ProtoMessage() {} -func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{66} } +var xxx_messageInfo_DownwardAPIVolumeSource proto.InternalMessageInfo -func (m *KeyToPath) Reset() { *m = KeyToPath{} } -func (*KeyToPath) ProtoMessage() {} -func (*KeyToPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{67} } +func (m *EmptyDirVolumeSource) Reset() { *m = EmptyDirVolumeSource{} } +func (*EmptyDirVolumeSource) ProtoMessage() {} +func (*EmptyDirVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{38} +} +func (m *EmptyDirVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EmptyDirVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EmptyDirVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_EmptyDirVolumeSource.Merge(m, src) +} +func (m *EmptyDirVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *EmptyDirVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_EmptyDirVolumeSource.DiscardUnknown(m) +} -func (m *Lifecycle) Reset() { *m = Lifecycle{} } -func (*Lifecycle) ProtoMessage() {} -func (*Lifecycle) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{68} } +var xxx_messageInfo_EmptyDirVolumeSource proto.InternalMessageInfo -func (m *LimitRange) Reset() { *m = LimitRange{} } -func (*LimitRange) ProtoMessage() {} -func (*LimitRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{69} } +func (m *EndpointAddress) Reset() { *m = EndpointAddress{} } +func (*EndpointAddress) ProtoMessage() {} +func (*EndpointAddress) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{39} +} +func (m *EndpointAddress) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndpointAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EndpointAddress) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndpointAddress.Merge(m, src) +} +func (m *EndpointAddress) XXX_Size() int { + return m.Size() +} +func (m *EndpointAddress) XXX_DiscardUnknown() { + xxx_messageInfo_EndpointAddress.DiscardUnknown(m) +} -func (m *LimitRangeItem) Reset() { *m = LimitRangeItem{} } -func (*LimitRangeItem) ProtoMessage() {} -func (*LimitRangeItem) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{70} } +var xxx_messageInfo_EndpointAddress proto.InternalMessageInfo -func (m *LimitRangeList) Reset() { *m = LimitRangeList{} } -func (*LimitRangeList) ProtoMessage() {} -func (*LimitRangeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{71} } +func (m *EndpointPort) Reset() { *m = EndpointPort{} } +func (*EndpointPort) ProtoMessage() {} +func (*EndpointPort) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{40} +} +func (m *EndpointPort) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndpointPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EndpointPort) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndpointPort.Merge(m, src) +} +func (m *EndpointPort) XXX_Size() int { + return m.Size() +} +func (m *EndpointPort) XXX_DiscardUnknown() { + xxx_messageInfo_EndpointPort.DiscardUnknown(m) +} -func (m *LimitRangeSpec) Reset() { *m = LimitRangeSpec{} } -func (*LimitRangeSpec) ProtoMessage() {} -func (*LimitRangeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{72} } +var xxx_messageInfo_EndpointPort proto.InternalMessageInfo -func (m *List) Reset() { *m = List{} } -func (*List) ProtoMessage() {} -func (*List) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{73} } +func (m *EndpointSubset) Reset() { *m = EndpointSubset{} } +func (*EndpointSubset) ProtoMessage() {} +func (*EndpointSubset) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{41} +} +func (m *EndpointSubset) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndpointSubset) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EndpointSubset) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndpointSubset.Merge(m, src) +} +func (m *EndpointSubset) XXX_Size() int { + return m.Size() +} +func (m *EndpointSubset) XXX_DiscardUnknown() { + xxx_messageInfo_EndpointSubset.DiscardUnknown(m) +} -func (m *LoadBalancerIngress) Reset() { *m = LoadBalancerIngress{} } -func (*LoadBalancerIngress) ProtoMessage() {} -func (*LoadBalancerIngress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{74} } +var xxx_messageInfo_EndpointSubset proto.InternalMessageInfo -func (m *LoadBalancerStatus) Reset() { *m = LoadBalancerStatus{} } -func (*LoadBalancerStatus) ProtoMessage() {} -func (*LoadBalancerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{75} } +func (m *Endpoints) Reset() { *m = Endpoints{} } +func (*Endpoints) ProtoMessage() {} +func (*Endpoints) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{42} +} +func (m *Endpoints) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Endpoints) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Endpoints) XXX_Merge(src proto.Message) { + xxx_messageInfo_Endpoints.Merge(m, src) +} +func (m *Endpoints) XXX_Size() int { + return m.Size() +} +func (m *Endpoints) XXX_DiscardUnknown() { + xxx_messageInfo_Endpoints.DiscardUnknown(m) +} -func (m *LocalObjectReference) Reset() { *m = LocalObjectReference{} } -func (*LocalObjectReference) ProtoMessage() {} -func (*LocalObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{76} } +var xxx_messageInfo_Endpoints proto.InternalMessageInfo -func (m *LocalVolumeSource) Reset() { *m = LocalVolumeSource{} } -func (*LocalVolumeSource) ProtoMessage() {} -func (*LocalVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{77} } +func (m *EndpointsList) Reset() { *m = EndpointsList{} } +func (*EndpointsList) ProtoMessage() {} +func (*EndpointsList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{43} +} +func (m *EndpointsList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndpointsList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EndpointsList) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndpointsList.Merge(m, src) +} +func (m *EndpointsList) XXX_Size() int { + return m.Size() +} +func (m *EndpointsList) XXX_DiscardUnknown() { + xxx_messageInfo_EndpointsList.DiscardUnknown(m) +} -func (m *NFSVolumeSource) Reset() { *m = NFSVolumeSource{} } -func (*NFSVolumeSource) ProtoMessage() {} -func (*NFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{78} } +var xxx_messageInfo_EndpointsList proto.InternalMessageInfo -func (m *Namespace) Reset() { *m = Namespace{} } -func (*Namespace) ProtoMessage() {} -func (*Namespace) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{79} } +func (m *EnvFromSource) Reset() { *m = EnvFromSource{} } +func (*EnvFromSource) ProtoMessage() {} +func (*EnvFromSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{44} +} +func (m *EnvFromSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EnvFromSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EnvFromSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnvFromSource.Merge(m, src) +} +func (m *EnvFromSource) XXX_Size() int { + return m.Size() +} +func (m *EnvFromSource) XXX_DiscardUnknown() { + xxx_messageInfo_EnvFromSource.DiscardUnknown(m) +} -func (m *NamespaceList) Reset() { *m = NamespaceList{} } -func (*NamespaceList) ProtoMessage() {} -func (*NamespaceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{80} } +var xxx_messageInfo_EnvFromSource proto.InternalMessageInfo -func (m *NamespaceSpec) Reset() { *m = NamespaceSpec{} } -func (*NamespaceSpec) ProtoMessage() {} -func (*NamespaceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{81} } +func (m *EnvVar) Reset() { *m = EnvVar{} } +func (*EnvVar) ProtoMessage() {} +func (*EnvVar) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{45} +} +func (m *EnvVar) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EnvVar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EnvVar) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnvVar.Merge(m, src) +} +func (m *EnvVar) XXX_Size() int { + return m.Size() +} +func (m *EnvVar) XXX_DiscardUnknown() { + xxx_messageInfo_EnvVar.DiscardUnknown(m) +} -func (m *NamespaceStatus) Reset() { *m = NamespaceStatus{} } -func (*NamespaceStatus) ProtoMessage() {} -func (*NamespaceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{82} } +var xxx_messageInfo_EnvVar proto.InternalMessageInfo -func (m *Node) Reset() { *m = Node{} } -func (*Node) ProtoMessage() {} -func (*Node) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{83} } +func (m *EnvVarSource) Reset() { *m = EnvVarSource{} } +func (*EnvVarSource) ProtoMessage() {} +func (*EnvVarSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{46} +} +func (m *EnvVarSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EnvVarSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EnvVarSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnvVarSource.Merge(m, src) +} +func (m *EnvVarSource) XXX_Size() int { + return m.Size() +} +func (m *EnvVarSource) XXX_DiscardUnknown() { + xxx_messageInfo_EnvVarSource.DiscardUnknown(m) +} -func (m *NodeAddress) Reset() { *m = NodeAddress{} } -func (*NodeAddress) ProtoMessage() {} -func (*NodeAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{84} } +var xxx_messageInfo_EnvVarSource proto.InternalMessageInfo -func (m *NodeAffinity) Reset() { *m = NodeAffinity{} } -func (*NodeAffinity) ProtoMessage() {} -func (*NodeAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{85} } +func (m *EphemeralContainer) Reset() { *m = EphemeralContainer{} } +func (*EphemeralContainer) ProtoMessage() {} +func (*EphemeralContainer) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{47} +} +func (m *EphemeralContainer) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EphemeralContainer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EphemeralContainer) XXX_Merge(src proto.Message) { + xxx_messageInfo_EphemeralContainer.Merge(m, src) +} +func (m *EphemeralContainer) XXX_Size() int { + return m.Size() +} +func (m *EphemeralContainer) XXX_DiscardUnknown() { + xxx_messageInfo_EphemeralContainer.DiscardUnknown(m) +} -func (m *NodeCondition) Reset() { *m = NodeCondition{} } -func (*NodeCondition) ProtoMessage() {} -func (*NodeCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{86} } +var xxx_messageInfo_EphemeralContainer proto.InternalMessageInfo -func (m *NodeConfigSource) Reset() { *m = NodeConfigSource{} } -func (*NodeConfigSource) ProtoMessage() {} -func (*NodeConfigSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{87} } +func (m *EphemeralContainerCommon) Reset() { *m = EphemeralContainerCommon{} } +func (*EphemeralContainerCommon) ProtoMessage() {} +func (*EphemeralContainerCommon) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{48} +} +func (m *EphemeralContainerCommon) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EphemeralContainerCommon) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EphemeralContainerCommon) XXX_Merge(src proto.Message) { + xxx_messageInfo_EphemeralContainerCommon.Merge(m, src) +} +func (m *EphemeralContainerCommon) XXX_Size() int { + return m.Size() +} +func (m *EphemeralContainerCommon) XXX_DiscardUnknown() { + xxx_messageInfo_EphemeralContainerCommon.DiscardUnknown(m) +} -func (m *NodeConfigStatus) Reset() { *m = NodeConfigStatus{} } -func (*NodeConfigStatus) ProtoMessage() {} -func (*NodeConfigStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{88} } +var xxx_messageInfo_EphemeralContainerCommon proto.InternalMessageInfo -func (m *NodeDaemonEndpoints) Reset() { *m = NodeDaemonEndpoints{} } -func (*NodeDaemonEndpoints) ProtoMessage() {} -func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{89} } +func (m *EphemeralContainers) Reset() { *m = EphemeralContainers{} } +func (*EphemeralContainers) ProtoMessage() {} +func (*EphemeralContainers) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{49} +} +func (m *EphemeralContainers) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EphemeralContainers) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EphemeralContainers) XXX_Merge(src proto.Message) { + xxx_messageInfo_EphemeralContainers.Merge(m, src) +} +func (m *EphemeralContainers) XXX_Size() int { + return m.Size() +} +func (m *EphemeralContainers) XXX_DiscardUnknown() { + xxx_messageInfo_EphemeralContainers.DiscardUnknown(m) +} -func (m *NodeList) Reset() { *m = NodeList{} } -func (*NodeList) ProtoMessage() {} -func (*NodeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{90} } +var xxx_messageInfo_EphemeralContainers proto.InternalMessageInfo -func (m *NodeProxyOptions) Reset() { *m = NodeProxyOptions{} } -func (*NodeProxyOptions) ProtoMessage() {} -func (*NodeProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{91} } +func (m *Event) Reset() { *m = Event{} } +func (*Event) ProtoMessage() {} +func (*Event) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{50} +} +func (m *Event) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Event) XXX_Merge(src proto.Message) { + xxx_messageInfo_Event.Merge(m, src) +} +func (m *Event) XXX_Size() int { + return m.Size() +} +func (m *Event) XXX_DiscardUnknown() { + xxx_messageInfo_Event.DiscardUnknown(m) +} -func (m *NodeResources) Reset() { *m = NodeResources{} } -func (*NodeResources) ProtoMessage() {} -func (*NodeResources) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{92} } +var xxx_messageInfo_Event proto.InternalMessageInfo -func (m *NodeSelector) Reset() { *m = NodeSelector{} } -func (*NodeSelector) ProtoMessage() {} -func (*NodeSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{93} } - -func (m *NodeSelectorRequirement) Reset() { *m = NodeSelectorRequirement{} } -func (*NodeSelectorRequirement) ProtoMessage() {} -func (*NodeSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{94} +func (m *EventList) Reset() { *m = EventList{} } +func (*EventList) ProtoMessage() {} +func (*EventList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{51} +} +func (m *EventList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EventList) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventList.Merge(m, src) +} +func (m *EventList) XXX_Size() int { + return m.Size() +} +func (m *EventList) XXX_DiscardUnknown() { + xxx_messageInfo_EventList.DiscardUnknown(m) } -func (m *NodeSelectorTerm) Reset() { *m = NodeSelectorTerm{} } -func (*NodeSelectorTerm) ProtoMessage() {} -func (*NodeSelectorTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{95} } - -func (m *NodeSpec) Reset() { *m = NodeSpec{} } -func (*NodeSpec) ProtoMessage() {} -func (*NodeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{96} } +var xxx_messageInfo_EventList proto.InternalMessageInfo -func (m *NodeStatus) Reset() { *m = NodeStatus{} } -func (*NodeStatus) ProtoMessage() {} -func (*NodeStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{97} } +func (m *EventSeries) Reset() { *m = EventSeries{} } +func (*EventSeries) ProtoMessage() {} +func (*EventSeries) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{52} +} +func (m *EventSeries) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EventSeries) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventSeries.Merge(m, src) +} +func (m *EventSeries) XXX_Size() int { + return m.Size() +} +func (m *EventSeries) XXX_DiscardUnknown() { + xxx_messageInfo_EventSeries.DiscardUnknown(m) +} -func (m *NodeSystemInfo) Reset() { *m = NodeSystemInfo{} } -func (*NodeSystemInfo) ProtoMessage() {} -func (*NodeSystemInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{98} } +var xxx_messageInfo_EventSeries proto.InternalMessageInfo -func (m *ObjectFieldSelector) Reset() { *m = ObjectFieldSelector{} } -func (*ObjectFieldSelector) ProtoMessage() {} -func (*ObjectFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{99} } +func (m *EventSource) Reset() { *m = EventSource{} } +func (*EventSource) ProtoMessage() {} +func (*EventSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{53} +} +func (m *EventSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EventSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventSource.Merge(m, src) +} +func (m *EventSource) XXX_Size() int { + return m.Size() +} +func (m *EventSource) XXX_DiscardUnknown() { + xxx_messageInfo_EventSource.DiscardUnknown(m) +} -func (m *ObjectReference) Reset() { *m = ObjectReference{} } -func (*ObjectReference) ProtoMessage() {} -func (*ObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{100} } +var xxx_messageInfo_EventSource proto.InternalMessageInfo -func (m *PersistentVolume) Reset() { *m = PersistentVolume{} } -func (*PersistentVolume) ProtoMessage() {} -func (*PersistentVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{101} } +func (m *ExecAction) Reset() { *m = ExecAction{} } +func (*ExecAction) ProtoMessage() {} +func (*ExecAction) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{54} +} +func (m *ExecAction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExecAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExecAction) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExecAction.Merge(m, src) +} +func (m *ExecAction) XXX_Size() int { + return m.Size() +} +func (m *ExecAction) XXX_DiscardUnknown() { + xxx_messageInfo_ExecAction.DiscardUnknown(m) +} -func (m *PersistentVolumeClaim) Reset() { *m = PersistentVolumeClaim{} } -func (*PersistentVolumeClaim) ProtoMessage() {} -func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{102} } +var xxx_messageInfo_ExecAction proto.InternalMessageInfo -func (m *PersistentVolumeClaimCondition) Reset() { *m = PersistentVolumeClaimCondition{} } -func (*PersistentVolumeClaimCondition) ProtoMessage() {} -func (*PersistentVolumeClaimCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{103} +func (m *FCVolumeSource) Reset() { *m = FCVolumeSource{} } +func (*FCVolumeSource) ProtoMessage() {} +func (*FCVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{55} } - -func (m *PersistentVolumeClaimList) Reset() { *m = PersistentVolumeClaimList{} } -func (*PersistentVolumeClaimList) ProtoMessage() {} -func (*PersistentVolumeClaimList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{104} +func (m *FCVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } - -func (m *PersistentVolumeClaimSpec) Reset() { *m = PersistentVolumeClaimSpec{} } -func (*PersistentVolumeClaimSpec) ProtoMessage() {} -func (*PersistentVolumeClaimSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{105} +func (m *FCVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } - -func (m *PersistentVolumeClaimStatus) Reset() { *m = PersistentVolumeClaimStatus{} } -func (*PersistentVolumeClaimStatus) ProtoMessage() {} -func (*PersistentVolumeClaimStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{106} +func (m *FCVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_FCVolumeSource.Merge(m, src) } - -func (m *PersistentVolumeClaimVolumeSource) Reset() { *m = PersistentVolumeClaimVolumeSource{} } -func (*PersistentVolumeClaimVolumeSource) ProtoMessage() {} -func (*PersistentVolumeClaimVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{107} +func (m *FCVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *FCVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_FCVolumeSource.DiscardUnknown(m) } -func (m *PersistentVolumeList) Reset() { *m = PersistentVolumeList{} } -func (*PersistentVolumeList) ProtoMessage() {} -func (*PersistentVolumeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{108} } +var xxx_messageInfo_FCVolumeSource proto.InternalMessageInfo -func (m *PersistentVolumeSource) Reset() { *m = PersistentVolumeSource{} } -func (*PersistentVolumeSource) ProtoMessage() {} -func (*PersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{109} +func (m *FlexPersistentVolumeSource) Reset() { *m = FlexPersistentVolumeSource{} } +func (*FlexPersistentVolumeSource) ProtoMessage() {} +func (*FlexPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{56} +} +func (m *FlexPersistentVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlexPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlexPersistentVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlexPersistentVolumeSource.Merge(m, src) +} +func (m *FlexPersistentVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *FlexPersistentVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_FlexPersistentVolumeSource.DiscardUnknown(m) } -func (m *PersistentVolumeSpec) Reset() { *m = PersistentVolumeSpec{} } -func (*PersistentVolumeSpec) ProtoMessage() {} -func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{110} } +var xxx_messageInfo_FlexPersistentVolumeSource proto.InternalMessageInfo -func (m *PersistentVolumeStatus) Reset() { *m = PersistentVolumeStatus{} } -func (*PersistentVolumeStatus) ProtoMessage() {} -func (*PersistentVolumeStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{111} +func (m *FlexVolumeSource) Reset() { *m = FlexVolumeSource{} } +func (*FlexVolumeSource) ProtoMessage() {} +func (*FlexVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{57} } - -func (m *PhotonPersistentDiskVolumeSource) Reset() { *m = PhotonPersistentDiskVolumeSource{} } -func (*PhotonPersistentDiskVolumeSource) ProtoMessage() {} -func (*PhotonPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{112} +func (m *FlexVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlexVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlexVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlexVolumeSource.Merge(m, src) +} +func (m *FlexVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *FlexVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_FlexVolumeSource.DiscardUnknown(m) } -func (m *Pod) Reset() { *m = Pod{} } -func (*Pod) ProtoMessage() {} -func (*Pod) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{113} } +var xxx_messageInfo_FlexVolumeSource proto.InternalMessageInfo -func (m *PodAffinity) Reset() { *m = PodAffinity{} } -func (*PodAffinity) ProtoMessage() {} -func (*PodAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{114} } +func (m *FlockerVolumeSource) Reset() { *m = FlockerVolumeSource{} } +func (*FlockerVolumeSource) ProtoMessage() {} +func (*FlockerVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{58} +} +func (m *FlockerVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlockerVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlockerVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlockerVolumeSource.Merge(m, src) +} +func (m *FlockerVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *FlockerVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_FlockerVolumeSource.DiscardUnknown(m) +} -func (m *PodAffinityTerm) Reset() { *m = PodAffinityTerm{} } -func (*PodAffinityTerm) ProtoMessage() {} -func (*PodAffinityTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{115} } +var xxx_messageInfo_FlockerVolumeSource proto.InternalMessageInfo -func (m *PodAntiAffinity) Reset() { *m = PodAntiAffinity{} } -func (*PodAntiAffinity) ProtoMessage() {} -func (*PodAntiAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{116} } +func (m *GCEPersistentDiskVolumeSource) Reset() { *m = GCEPersistentDiskVolumeSource{} } +func (*GCEPersistentDiskVolumeSource) ProtoMessage() {} +func (*GCEPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{59} +} +func (m *GCEPersistentDiskVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GCEPersistentDiskVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GCEPersistentDiskVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_GCEPersistentDiskVolumeSource.Merge(m, src) +} +func (m *GCEPersistentDiskVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *GCEPersistentDiskVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_GCEPersistentDiskVolumeSource.DiscardUnknown(m) +} -func (m *PodAttachOptions) Reset() { *m = PodAttachOptions{} } -func (*PodAttachOptions) ProtoMessage() {} -func (*PodAttachOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{117} } +var xxx_messageInfo_GCEPersistentDiskVolumeSource proto.InternalMessageInfo -func (m *PodCondition) Reset() { *m = PodCondition{} } -func (*PodCondition) ProtoMessage() {} -func (*PodCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{118} } +func (m *GitRepoVolumeSource) Reset() { *m = GitRepoVolumeSource{} } +func (*GitRepoVolumeSource) ProtoMessage() {} +func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{60} +} +func (m *GitRepoVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GitRepoVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GitRepoVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_GitRepoVolumeSource.Merge(m, src) +} +func (m *GitRepoVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *GitRepoVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_GitRepoVolumeSource.DiscardUnknown(m) +} -func (m *PodDNSConfig) Reset() { *m = PodDNSConfig{} } -func (*PodDNSConfig) ProtoMessage() {} -func (*PodDNSConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{119} } +var xxx_messageInfo_GitRepoVolumeSource proto.InternalMessageInfo -func (m *PodDNSConfigOption) Reset() { *m = PodDNSConfigOption{} } -func (*PodDNSConfigOption) ProtoMessage() {} -func (*PodDNSConfigOption) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{120} } +func (m *GlusterfsPersistentVolumeSource) Reset() { *m = GlusterfsPersistentVolumeSource{} } +func (*GlusterfsPersistentVolumeSource) ProtoMessage() {} +func (*GlusterfsPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{61} +} +func (m *GlusterfsPersistentVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GlusterfsPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GlusterfsPersistentVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_GlusterfsPersistentVolumeSource.Merge(m, src) +} +func (m *GlusterfsPersistentVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *GlusterfsPersistentVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_GlusterfsPersistentVolumeSource.DiscardUnknown(m) +} -func (m *PodExecOptions) Reset() { *m = PodExecOptions{} } -func (*PodExecOptions) ProtoMessage() {} -func (*PodExecOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{121} } +var xxx_messageInfo_GlusterfsPersistentVolumeSource proto.InternalMessageInfo -func (m *PodList) Reset() { *m = PodList{} } -func (*PodList) ProtoMessage() {} -func (*PodList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{122} } +func (m *GlusterfsVolumeSource) Reset() { *m = GlusterfsVolumeSource{} } +func (*GlusterfsVolumeSource) ProtoMessage() {} +func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{62} +} +func (m *GlusterfsVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GlusterfsVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GlusterfsVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_GlusterfsVolumeSource.Merge(m, src) +} +func (m *GlusterfsVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *GlusterfsVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_GlusterfsVolumeSource.DiscardUnknown(m) +} -func (m *PodLogOptions) Reset() { *m = PodLogOptions{} } -func (*PodLogOptions) ProtoMessage() {} -func (*PodLogOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{123} } +var xxx_messageInfo_GlusterfsVolumeSource proto.InternalMessageInfo -func (m *PodPortForwardOptions) Reset() { *m = PodPortForwardOptions{} } -func (*PodPortForwardOptions) ProtoMessage() {} -func (*PodPortForwardOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{124} } +func (m *HTTPGetAction) Reset() { *m = HTTPGetAction{} } +func (*HTTPGetAction) ProtoMessage() {} +func (*HTTPGetAction) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{63} +} +func (m *HTTPGetAction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HTTPGetAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HTTPGetAction) XXX_Merge(src proto.Message) { + xxx_messageInfo_HTTPGetAction.Merge(m, src) +} +func (m *HTTPGetAction) XXX_Size() int { + return m.Size() +} +func (m *HTTPGetAction) XXX_DiscardUnknown() { + xxx_messageInfo_HTTPGetAction.DiscardUnknown(m) +} -func (m *PodProxyOptions) Reset() { *m = PodProxyOptions{} } -func (*PodProxyOptions) ProtoMessage() {} -func (*PodProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{125} } +var xxx_messageInfo_HTTPGetAction proto.InternalMessageInfo -func (m *PodReadinessGate) Reset() { *m = PodReadinessGate{} } -func (*PodReadinessGate) ProtoMessage() {} -func (*PodReadinessGate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{126} } +func (m *HTTPHeader) Reset() { *m = HTTPHeader{} } +func (*HTTPHeader) ProtoMessage() {} +func (*HTTPHeader) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{64} +} +func (m *HTTPHeader) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HTTPHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HTTPHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_HTTPHeader.Merge(m, src) +} +func (m *HTTPHeader) XXX_Size() int { + return m.Size() +} +func (m *HTTPHeader) XXX_DiscardUnknown() { + xxx_messageInfo_HTTPHeader.DiscardUnknown(m) +} -func (m *PodSecurityContext) Reset() { *m = PodSecurityContext{} } -func (*PodSecurityContext) ProtoMessage() {} -func (*PodSecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{127} } +var xxx_messageInfo_HTTPHeader proto.InternalMessageInfo -func (m *PodSignature) Reset() { *m = PodSignature{} } -func (*PodSignature) ProtoMessage() {} -func (*PodSignature) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{128} } +func (m *Handler) Reset() { *m = Handler{} } +func (*Handler) ProtoMessage() {} +func (*Handler) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{65} +} +func (m *Handler) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Handler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Handler) XXX_Merge(src proto.Message) { + xxx_messageInfo_Handler.Merge(m, src) +} +func (m *Handler) XXX_Size() int { + return m.Size() +} +func (m *Handler) XXX_DiscardUnknown() { + xxx_messageInfo_Handler.DiscardUnknown(m) +} -func (m *PodSpec) Reset() { *m = PodSpec{} } -func (*PodSpec) ProtoMessage() {} -func (*PodSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{129} } +var xxx_messageInfo_Handler proto.InternalMessageInfo -func (m *PodStatus) Reset() { *m = PodStatus{} } -func (*PodStatus) ProtoMessage() {} -func (*PodStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{130} } +func (m *HostAlias) Reset() { *m = HostAlias{} } +func (*HostAlias) ProtoMessage() {} +func (*HostAlias) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{66} +} +func (m *HostAlias) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HostAlias) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HostAlias) XXX_Merge(src proto.Message) { + xxx_messageInfo_HostAlias.Merge(m, src) +} +func (m *HostAlias) XXX_Size() int { + return m.Size() +} +func (m *HostAlias) XXX_DiscardUnknown() { + xxx_messageInfo_HostAlias.DiscardUnknown(m) +} -func (m *PodStatusResult) Reset() { *m = PodStatusResult{} } -func (*PodStatusResult) ProtoMessage() {} -func (*PodStatusResult) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{131} } +var xxx_messageInfo_HostAlias proto.InternalMessageInfo -func (m *PodTemplate) Reset() { *m = PodTemplate{} } -func (*PodTemplate) ProtoMessage() {} -func (*PodTemplate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{132} } +func (m *HostPathVolumeSource) Reset() { *m = HostPathVolumeSource{} } +func (*HostPathVolumeSource) ProtoMessage() {} +func (*HostPathVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{67} +} +func (m *HostPathVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HostPathVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HostPathVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_HostPathVolumeSource.Merge(m, src) +} +func (m *HostPathVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *HostPathVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_HostPathVolumeSource.DiscardUnknown(m) +} -func (m *PodTemplateList) Reset() { *m = PodTemplateList{} } -func (*PodTemplateList) ProtoMessage() {} -func (*PodTemplateList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{133} } +var xxx_messageInfo_HostPathVolumeSource proto.InternalMessageInfo -func (m *PodTemplateSpec) Reset() { *m = PodTemplateSpec{} } -func (*PodTemplateSpec) ProtoMessage() {} -func (*PodTemplateSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{134} } +func (m *ISCSIPersistentVolumeSource) Reset() { *m = ISCSIPersistentVolumeSource{} } +func (*ISCSIPersistentVolumeSource) ProtoMessage() {} +func (*ISCSIPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{68} +} +func (m *ISCSIPersistentVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ISCSIPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ISCSIPersistentVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ISCSIPersistentVolumeSource.Merge(m, src) +} +func (m *ISCSIPersistentVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *ISCSIPersistentVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_ISCSIPersistentVolumeSource.DiscardUnknown(m) +} -func (m *PortworxVolumeSource) Reset() { *m = PortworxVolumeSource{} } -func (*PortworxVolumeSource) ProtoMessage() {} -func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{135} } +var xxx_messageInfo_ISCSIPersistentVolumeSource proto.InternalMessageInfo -func (m *Preconditions) Reset() { *m = Preconditions{} } -func (*Preconditions) ProtoMessage() {} -func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{136} } +func (m *ISCSIVolumeSource) Reset() { *m = ISCSIVolumeSource{} } +func (*ISCSIVolumeSource) ProtoMessage() {} +func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{69} +} +func (m *ISCSIVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ISCSIVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ISCSIVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ISCSIVolumeSource.Merge(m, src) +} +func (m *ISCSIVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *ISCSIVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_ISCSIVolumeSource.DiscardUnknown(m) +} -func (m *PreferAvoidPodsEntry) Reset() { *m = PreferAvoidPodsEntry{} } -func (*PreferAvoidPodsEntry) ProtoMessage() {} -func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{137} } +var xxx_messageInfo_ISCSIVolumeSource proto.InternalMessageInfo -func (m *PreferredSchedulingTerm) Reset() { *m = PreferredSchedulingTerm{} } -func (*PreferredSchedulingTerm) ProtoMessage() {} -func (*PreferredSchedulingTerm) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{138} +func (m *KeyToPath) Reset() { *m = KeyToPath{} } +func (*KeyToPath) ProtoMessage() {} +func (*KeyToPath) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{70} +} +func (m *KeyToPath) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *KeyToPath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *KeyToPath) XXX_Merge(src proto.Message) { + xxx_messageInfo_KeyToPath.Merge(m, src) +} +func (m *KeyToPath) XXX_Size() int { + return m.Size() +} +func (m *KeyToPath) XXX_DiscardUnknown() { + xxx_messageInfo_KeyToPath.DiscardUnknown(m) } -func (m *Probe) Reset() { *m = Probe{} } -func (*Probe) ProtoMessage() {} -func (*Probe) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{139} } +var xxx_messageInfo_KeyToPath proto.InternalMessageInfo -func (m *ProjectedVolumeSource) Reset() { *m = ProjectedVolumeSource{} } -func (*ProjectedVolumeSource) ProtoMessage() {} -func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{140} } +func (m *Lifecycle) Reset() { *m = Lifecycle{} } +func (*Lifecycle) ProtoMessage() {} +func (*Lifecycle) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{71} +} +func (m *Lifecycle) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Lifecycle) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Lifecycle) XXX_Merge(src proto.Message) { + xxx_messageInfo_Lifecycle.Merge(m, src) +} +func (m *Lifecycle) XXX_Size() int { + return m.Size() +} +func (m *Lifecycle) XXX_DiscardUnknown() { + xxx_messageInfo_Lifecycle.DiscardUnknown(m) +} -func (m *QuobyteVolumeSource) Reset() { *m = QuobyteVolumeSource{} } -func (*QuobyteVolumeSource) ProtoMessage() {} -func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{141} } +var xxx_messageInfo_Lifecycle proto.InternalMessageInfo -func (m *RBDPersistentVolumeSource) Reset() { *m = RBDPersistentVolumeSource{} } -func (*RBDPersistentVolumeSource) ProtoMessage() {} -func (*RBDPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{142} +func (m *LimitRange) Reset() { *m = LimitRange{} } +func (*LimitRange) ProtoMessage() {} +func (*LimitRange) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{72} +} +func (m *LimitRange) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LimitRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LimitRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_LimitRange.Merge(m, src) +} +func (m *LimitRange) XXX_Size() int { + return m.Size() +} +func (m *LimitRange) XXX_DiscardUnknown() { + xxx_messageInfo_LimitRange.DiscardUnknown(m) } -func (m *RBDVolumeSource) Reset() { *m = RBDVolumeSource{} } -func (*RBDVolumeSource) ProtoMessage() {} -func (*RBDVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{143} } +var xxx_messageInfo_LimitRange proto.InternalMessageInfo -func (m *RangeAllocation) Reset() { *m = RangeAllocation{} } -func (*RangeAllocation) ProtoMessage() {} -func (*RangeAllocation) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{144} } +func (m *LimitRangeItem) Reset() { *m = LimitRangeItem{} } +func (*LimitRangeItem) ProtoMessage() {} +func (*LimitRangeItem) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{73} +} +func (m *LimitRangeItem) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LimitRangeItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LimitRangeItem) XXX_Merge(src proto.Message) { + xxx_messageInfo_LimitRangeItem.Merge(m, src) +} +func (m *LimitRangeItem) XXX_Size() int { + return m.Size() +} +func (m *LimitRangeItem) XXX_DiscardUnknown() { + xxx_messageInfo_LimitRangeItem.DiscardUnknown(m) +} -func (m *ReplicationController) Reset() { *m = ReplicationController{} } -func (*ReplicationController) ProtoMessage() {} -func (*ReplicationController) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{145} } +var xxx_messageInfo_LimitRangeItem proto.InternalMessageInfo -func (m *ReplicationControllerCondition) Reset() { *m = ReplicationControllerCondition{} } -func (*ReplicationControllerCondition) ProtoMessage() {} -func (*ReplicationControllerCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{146} +func (m *LimitRangeList) Reset() { *m = LimitRangeList{} } +func (*LimitRangeList) ProtoMessage() {} +func (*LimitRangeList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{74} } - -func (m *ReplicationControllerList) Reset() { *m = ReplicationControllerList{} } -func (*ReplicationControllerList) ProtoMessage() {} -func (*ReplicationControllerList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{147} +func (m *LimitRangeList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } - -func (m *ReplicationControllerSpec) Reset() { *m = ReplicationControllerSpec{} } -func (*ReplicationControllerSpec) ProtoMessage() {} -func (*ReplicationControllerSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{148} +func (m *LimitRangeList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } - -func (m *ReplicationControllerStatus) Reset() { *m = ReplicationControllerStatus{} } -func (*ReplicationControllerStatus) ProtoMessage() {} -func (*ReplicationControllerStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{149} +func (m *LimitRangeList) XXX_Merge(src proto.Message) { + xxx_messageInfo_LimitRangeList.Merge(m, src) +} +func (m *LimitRangeList) XXX_Size() int { + return m.Size() +} +func (m *LimitRangeList) XXX_DiscardUnknown() { + xxx_messageInfo_LimitRangeList.DiscardUnknown(m) } -func (m *ResourceFieldSelector) Reset() { *m = ResourceFieldSelector{} } -func (*ResourceFieldSelector) ProtoMessage() {} -func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{150} } +var xxx_messageInfo_LimitRangeList proto.InternalMessageInfo -func (m *ResourceQuota) Reset() { *m = ResourceQuota{} } -func (*ResourceQuota) ProtoMessage() {} -func (*ResourceQuota) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{151} } +func (m *LimitRangeSpec) Reset() { *m = LimitRangeSpec{} } +func (*LimitRangeSpec) ProtoMessage() {} +func (*LimitRangeSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{75} +} +func (m *LimitRangeSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LimitRangeSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LimitRangeSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_LimitRangeSpec.Merge(m, src) +} +func (m *LimitRangeSpec) XXX_Size() int { + return m.Size() +} +func (m *LimitRangeSpec) XXX_DiscardUnknown() { + xxx_messageInfo_LimitRangeSpec.DiscardUnknown(m) +} -func (m *ResourceQuotaList) Reset() { *m = ResourceQuotaList{} } -func (*ResourceQuotaList) ProtoMessage() {} -func (*ResourceQuotaList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{152} } +var xxx_messageInfo_LimitRangeSpec proto.InternalMessageInfo -func (m *ResourceQuotaSpec) Reset() { *m = ResourceQuotaSpec{} } -func (*ResourceQuotaSpec) ProtoMessage() {} -func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{153} } +func (m *List) Reset() { *m = List{} } +func (*List) ProtoMessage() {} +func (*List) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{76} +} +func (m *List) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *List) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *List) XXX_Merge(src proto.Message) { + xxx_messageInfo_List.Merge(m, src) +} +func (m *List) XXX_Size() int { + return m.Size() +} +func (m *List) XXX_DiscardUnknown() { + xxx_messageInfo_List.DiscardUnknown(m) +} -func (m *ResourceQuotaStatus) Reset() { *m = ResourceQuotaStatus{} } -func (*ResourceQuotaStatus) ProtoMessage() {} -func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{154} } +var xxx_messageInfo_List proto.InternalMessageInfo -func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} } -func (*ResourceRequirements) ProtoMessage() {} -func (*ResourceRequirements) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{155} } +func (m *LoadBalancerIngress) Reset() { *m = LoadBalancerIngress{} } +func (*LoadBalancerIngress) ProtoMessage() {} +func (*LoadBalancerIngress) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{77} +} +func (m *LoadBalancerIngress) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LoadBalancerIngress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LoadBalancerIngress) XXX_Merge(src proto.Message) { + xxx_messageInfo_LoadBalancerIngress.Merge(m, src) +} +func (m *LoadBalancerIngress) XXX_Size() int { + return m.Size() +} +func (m *LoadBalancerIngress) XXX_DiscardUnknown() { + xxx_messageInfo_LoadBalancerIngress.DiscardUnknown(m) +} -func (m *SELinuxOptions) Reset() { *m = SELinuxOptions{} } -func (*SELinuxOptions) ProtoMessage() {} -func (*SELinuxOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{156} } +var xxx_messageInfo_LoadBalancerIngress proto.InternalMessageInfo -func (m *ScaleIOPersistentVolumeSource) Reset() { *m = ScaleIOPersistentVolumeSource{} } -func (*ScaleIOPersistentVolumeSource) ProtoMessage() {} -func (*ScaleIOPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{157} +func (m *LoadBalancerStatus) Reset() { *m = LoadBalancerStatus{} } +func (*LoadBalancerStatus) ProtoMessage() {} +func (*LoadBalancerStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{78} +} +func (m *LoadBalancerStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LoadBalancerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LoadBalancerStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_LoadBalancerStatus.Merge(m, src) +} +func (m *LoadBalancerStatus) XXX_Size() int { + return m.Size() +} +func (m *LoadBalancerStatus) XXX_DiscardUnknown() { + xxx_messageInfo_LoadBalancerStatus.DiscardUnknown(m) } -func (m *ScaleIOVolumeSource) Reset() { *m = ScaleIOVolumeSource{} } -func (*ScaleIOVolumeSource) ProtoMessage() {} -func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{158} } - -func (m *ScopeSelector) Reset() { *m = ScopeSelector{} } -func (*ScopeSelector) ProtoMessage() {} -func (*ScopeSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{159} } +var xxx_messageInfo_LoadBalancerStatus proto.InternalMessageInfo -func (m *ScopedResourceSelectorRequirement) Reset() { *m = ScopedResourceSelectorRequirement{} } -func (*ScopedResourceSelectorRequirement) ProtoMessage() {} -func (*ScopedResourceSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{160} +func (m *LocalObjectReference) Reset() { *m = LocalObjectReference{} } +func (*LocalObjectReference) ProtoMessage() {} +func (*LocalObjectReference) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{79} +} +func (m *LocalObjectReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LocalObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LocalObjectReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_LocalObjectReference.Merge(m, src) +} +func (m *LocalObjectReference) XXX_Size() int { + return m.Size() +} +func (m *LocalObjectReference) XXX_DiscardUnknown() { + xxx_messageInfo_LocalObjectReference.DiscardUnknown(m) } -func (m *Secret) Reset() { *m = Secret{} } -func (*Secret) ProtoMessage() {} -func (*Secret) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{161} } - -func (m *SecretEnvSource) Reset() { *m = SecretEnvSource{} } -func (*SecretEnvSource) ProtoMessage() {} -func (*SecretEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{162} } +var xxx_messageInfo_LocalObjectReference proto.InternalMessageInfo -func (m *SecretKeySelector) Reset() { *m = SecretKeySelector{} } -func (*SecretKeySelector) ProtoMessage() {} -func (*SecretKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{163} } +func (m *LocalVolumeSource) Reset() { *m = LocalVolumeSource{} } +func (*LocalVolumeSource) ProtoMessage() {} +func (*LocalVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{80} +} +func (m *LocalVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LocalVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LocalVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_LocalVolumeSource.Merge(m, src) +} +func (m *LocalVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *LocalVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_LocalVolumeSource.DiscardUnknown(m) +} -func (m *SecretList) Reset() { *m = SecretList{} } -func (*SecretList) ProtoMessage() {} -func (*SecretList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{164} } +var xxx_messageInfo_LocalVolumeSource proto.InternalMessageInfo -func (m *SecretProjection) Reset() { *m = SecretProjection{} } -func (*SecretProjection) ProtoMessage() {} -func (*SecretProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{165} } +func (m *NFSVolumeSource) Reset() { *m = NFSVolumeSource{} } +func (*NFSVolumeSource) ProtoMessage() {} +func (*NFSVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{81} +} +func (m *NFSVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NFSVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NFSVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_NFSVolumeSource.Merge(m, src) +} +func (m *NFSVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *NFSVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_NFSVolumeSource.DiscardUnknown(m) +} -func (m *SecretReference) Reset() { *m = SecretReference{} } -func (*SecretReference) ProtoMessage() {} -func (*SecretReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{166} } +var xxx_messageInfo_NFSVolumeSource proto.InternalMessageInfo -func (m *SecretVolumeSource) Reset() { *m = SecretVolumeSource{} } -func (*SecretVolumeSource) ProtoMessage() {} -func (*SecretVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{167} } +func (m *Namespace) Reset() { *m = Namespace{} } +func (*Namespace) ProtoMessage() {} +func (*Namespace) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{82} +} +func (m *Namespace) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Namespace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Namespace) XXX_Merge(src proto.Message) { + xxx_messageInfo_Namespace.Merge(m, src) +} +func (m *Namespace) XXX_Size() int { + return m.Size() +} +func (m *Namespace) XXX_DiscardUnknown() { + xxx_messageInfo_Namespace.DiscardUnknown(m) +} -func (m *SecurityContext) Reset() { *m = SecurityContext{} } -func (*SecurityContext) ProtoMessage() {} -func (*SecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{168} } +var xxx_messageInfo_Namespace proto.InternalMessageInfo -func (m *SerializedReference) Reset() { *m = SerializedReference{} } -func (*SerializedReference) ProtoMessage() {} -func (*SerializedReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{169} } +func (m *NamespaceCondition) Reset() { *m = NamespaceCondition{} } +func (*NamespaceCondition) ProtoMessage() {} +func (*NamespaceCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{83} +} +func (m *NamespaceCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NamespaceCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NamespaceCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamespaceCondition.Merge(m, src) +} +func (m *NamespaceCondition) XXX_Size() int { + return m.Size() +} +func (m *NamespaceCondition) XXX_DiscardUnknown() { + xxx_messageInfo_NamespaceCondition.DiscardUnknown(m) +} -func (m *Service) Reset() { *m = Service{} } -func (*Service) ProtoMessage() {} -func (*Service) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{170} } +var xxx_messageInfo_NamespaceCondition proto.InternalMessageInfo -func (m *ServiceAccount) Reset() { *m = ServiceAccount{} } -func (*ServiceAccount) ProtoMessage() {} -func (*ServiceAccount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{171} } +func (m *NamespaceList) Reset() { *m = NamespaceList{} } +func (*NamespaceList) ProtoMessage() {} +func (*NamespaceList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{84} +} +func (m *NamespaceList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NamespaceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NamespaceList) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamespaceList.Merge(m, src) +} +func (m *NamespaceList) XXX_Size() int { + return m.Size() +} +func (m *NamespaceList) XXX_DiscardUnknown() { + xxx_messageInfo_NamespaceList.DiscardUnknown(m) +} -func (m *ServiceAccountList) Reset() { *m = ServiceAccountList{} } -func (*ServiceAccountList) ProtoMessage() {} -func (*ServiceAccountList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{172} } +var xxx_messageInfo_NamespaceList proto.InternalMessageInfo -func (m *ServiceAccountTokenProjection) Reset() { *m = ServiceAccountTokenProjection{} } -func (*ServiceAccountTokenProjection) ProtoMessage() {} -func (*ServiceAccountTokenProjection) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{173} +func (m *NamespaceSpec) Reset() { *m = NamespaceSpec{} } +func (*NamespaceSpec) ProtoMessage() {} +func (*NamespaceSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{85} +} +func (m *NamespaceSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NamespaceSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NamespaceSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamespaceSpec.Merge(m, src) +} +func (m *NamespaceSpec) XXX_Size() int { + return m.Size() +} +func (m *NamespaceSpec) XXX_DiscardUnknown() { + xxx_messageInfo_NamespaceSpec.DiscardUnknown(m) } -func (m *ServiceList) Reset() { *m = ServiceList{} } -func (*ServiceList) ProtoMessage() {} -func (*ServiceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{174} } - -func (m *ServicePort) Reset() { *m = ServicePort{} } -func (*ServicePort) ProtoMessage() {} -func (*ServicePort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{175} } +var xxx_messageInfo_NamespaceSpec proto.InternalMessageInfo -func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} } -func (*ServiceProxyOptions) ProtoMessage() {} -func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{176} } +func (m *NamespaceStatus) Reset() { *m = NamespaceStatus{} } +func (*NamespaceStatus) ProtoMessage() {} +func (*NamespaceStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{86} +} +func (m *NamespaceStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NamespaceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NamespaceStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamespaceStatus.Merge(m, src) +} +func (m *NamespaceStatus) XXX_Size() int { + return m.Size() +} +func (m *NamespaceStatus) XXX_DiscardUnknown() { + xxx_messageInfo_NamespaceStatus.DiscardUnknown(m) +} -func (m *ServiceSpec) Reset() { *m = ServiceSpec{} } -func (*ServiceSpec) ProtoMessage() {} -func (*ServiceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{177} } +var xxx_messageInfo_NamespaceStatus proto.InternalMessageInfo -func (m *ServiceStatus) Reset() { *m = ServiceStatus{} } -func (*ServiceStatus) ProtoMessage() {} -func (*ServiceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{178} } +func (m *Node) Reset() { *m = Node{} } +func (*Node) ProtoMessage() {} +func (*Node) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{87} +} +func (m *Node) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Node) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Node) XXX_Merge(src proto.Message) { + xxx_messageInfo_Node.Merge(m, src) +} +func (m *Node) XXX_Size() int { + return m.Size() +} +func (m *Node) XXX_DiscardUnknown() { + xxx_messageInfo_Node.DiscardUnknown(m) +} -func (m *SessionAffinityConfig) Reset() { *m = SessionAffinityConfig{} } -func (*SessionAffinityConfig) ProtoMessage() {} -func (*SessionAffinityConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{179} } +var xxx_messageInfo_Node proto.InternalMessageInfo -func (m *StorageOSPersistentVolumeSource) Reset() { *m = StorageOSPersistentVolumeSource{} } -func (*StorageOSPersistentVolumeSource) ProtoMessage() {} -func (*StorageOSPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{180} +func (m *NodeAddress) Reset() { *m = NodeAddress{} } +func (*NodeAddress) ProtoMessage() {} +func (*NodeAddress) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{88} +} +func (m *NodeAddress) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeAddress) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeAddress.Merge(m, src) +} +func (m *NodeAddress) XXX_Size() int { + return m.Size() +} +func (m *NodeAddress) XXX_DiscardUnknown() { + xxx_messageInfo_NodeAddress.DiscardUnknown(m) } -func (m *StorageOSVolumeSource) Reset() { *m = StorageOSVolumeSource{} } -func (*StorageOSVolumeSource) ProtoMessage() {} -func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{181} } +var xxx_messageInfo_NodeAddress proto.InternalMessageInfo -func (m *Sysctl) Reset() { *m = Sysctl{} } -func (*Sysctl) ProtoMessage() {} -func (*Sysctl) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{182} } +func (m *NodeAffinity) Reset() { *m = NodeAffinity{} } +func (*NodeAffinity) ProtoMessage() {} +func (*NodeAffinity) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{89} +} +func (m *NodeAffinity) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeAffinity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeAffinity) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeAffinity.Merge(m, src) +} +func (m *NodeAffinity) XXX_Size() int { + return m.Size() +} +func (m *NodeAffinity) XXX_DiscardUnknown() { + xxx_messageInfo_NodeAffinity.DiscardUnknown(m) +} -func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} } -func (*TCPSocketAction) ProtoMessage() {} -func (*TCPSocketAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{183} } +var xxx_messageInfo_NodeAffinity proto.InternalMessageInfo -func (m *Taint) Reset() { *m = Taint{} } -func (*Taint) ProtoMessage() {} -func (*Taint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{184} } +func (m *NodeCondition) Reset() { *m = NodeCondition{} } +func (*NodeCondition) ProtoMessage() {} +func (*NodeCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{90} +} +func (m *NodeCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeCondition.Merge(m, src) +} +func (m *NodeCondition) XXX_Size() int { + return m.Size() +} +func (m *NodeCondition) XXX_DiscardUnknown() { + xxx_messageInfo_NodeCondition.DiscardUnknown(m) +} -func (m *Toleration) Reset() { *m = Toleration{} } -func (*Toleration) ProtoMessage() {} -func (*Toleration) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{185} } +var xxx_messageInfo_NodeCondition proto.InternalMessageInfo -func (m *TopologySelectorLabelRequirement) Reset() { *m = TopologySelectorLabelRequirement{} } -func (*TopologySelectorLabelRequirement) ProtoMessage() {} -func (*TopologySelectorLabelRequirement) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{186} +func (m *NodeConfigSource) Reset() { *m = NodeConfigSource{} } +func (*NodeConfigSource) ProtoMessage() {} +func (*NodeConfigSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{91} +} +func (m *NodeConfigSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeConfigSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeConfigSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeConfigSource.Merge(m, src) +} +func (m *NodeConfigSource) XXX_Size() int { + return m.Size() +} +func (m *NodeConfigSource) XXX_DiscardUnknown() { + xxx_messageInfo_NodeConfigSource.DiscardUnknown(m) } -func (m *TopologySelectorTerm) Reset() { *m = TopologySelectorTerm{} } -func (*TopologySelectorTerm) ProtoMessage() {} -func (*TopologySelectorTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{187} } +var xxx_messageInfo_NodeConfigSource proto.InternalMessageInfo -func (m *TypedLocalObjectReference) Reset() { *m = TypedLocalObjectReference{} } -func (*TypedLocalObjectReference) ProtoMessage() {} -func (*TypedLocalObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{188} +func (m *NodeConfigStatus) Reset() { *m = NodeConfigStatus{} } +func (*NodeConfigStatus) ProtoMessage() {} +func (*NodeConfigStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{92} +} +func (m *NodeConfigStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeConfigStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeConfigStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeConfigStatus.Merge(m, src) +} +func (m *NodeConfigStatus) XXX_Size() int { + return m.Size() } +func (m *NodeConfigStatus) XXX_DiscardUnknown() { + xxx_messageInfo_NodeConfigStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeConfigStatus proto.InternalMessageInfo -func (m *Volume) Reset() { *m = Volume{} } -func (*Volume) ProtoMessage() {} -func (*Volume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{189} } +func (m *NodeDaemonEndpoints) Reset() { *m = NodeDaemonEndpoints{} } +func (*NodeDaemonEndpoints) ProtoMessage() {} +func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{93} +} +func (m *NodeDaemonEndpoints) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeDaemonEndpoints) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeDaemonEndpoints) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeDaemonEndpoints.Merge(m, src) +} +func (m *NodeDaemonEndpoints) XXX_Size() int { + return m.Size() +} +func (m *NodeDaemonEndpoints) XXX_DiscardUnknown() { + xxx_messageInfo_NodeDaemonEndpoints.DiscardUnknown(m) +} -func (m *VolumeDevice) Reset() { *m = VolumeDevice{} } -func (*VolumeDevice) ProtoMessage() {} -func (*VolumeDevice) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{190} } +var xxx_messageInfo_NodeDaemonEndpoints proto.InternalMessageInfo -func (m *VolumeMount) Reset() { *m = VolumeMount{} } -func (*VolumeMount) ProtoMessage() {} -func (*VolumeMount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{191} } +func (m *NodeList) Reset() { *m = NodeList{} } +func (*NodeList) ProtoMessage() {} +func (*NodeList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{94} +} +func (m *NodeList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeList) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeList.Merge(m, src) +} +func (m *NodeList) XXX_Size() int { + return m.Size() +} +func (m *NodeList) XXX_DiscardUnknown() { + xxx_messageInfo_NodeList.DiscardUnknown(m) +} -func (m *VolumeNodeAffinity) Reset() { *m = VolumeNodeAffinity{} } -func (*VolumeNodeAffinity) ProtoMessage() {} -func (*VolumeNodeAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{192} } +var xxx_messageInfo_NodeList proto.InternalMessageInfo -func (m *VolumeProjection) Reset() { *m = VolumeProjection{} } -func (*VolumeProjection) ProtoMessage() {} -func (*VolumeProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{193} } +func (m *NodeProxyOptions) Reset() { *m = NodeProxyOptions{} } +func (*NodeProxyOptions) ProtoMessage() {} +func (*NodeProxyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{95} +} +func (m *NodeProxyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeProxyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeProxyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeProxyOptions.Merge(m, src) +} +func (m *NodeProxyOptions) XXX_Size() int { + return m.Size() +} +func (m *NodeProxyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_NodeProxyOptions.DiscardUnknown(m) +} -func (m *VolumeSource) Reset() { *m = VolumeSource{} } -func (*VolumeSource) ProtoMessage() {} -func (*VolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{194} } +var xxx_messageInfo_NodeProxyOptions proto.InternalMessageInfo -func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} } -func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {} -func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{195} +func (m *NodeResources) Reset() { *m = NodeResources{} } +func (*NodeResources) ProtoMessage() {} +func (*NodeResources) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{96} +} +func (m *NodeResources) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeResources) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeResources.Merge(m, src) +} +func (m *NodeResources) XXX_Size() int { + return m.Size() +} +func (m *NodeResources) XXX_DiscardUnknown() { + xxx_messageInfo_NodeResources.DiscardUnknown(m) } -func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} } -func (*WeightedPodAffinityTerm) ProtoMessage() {} -func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{196} +var xxx_messageInfo_NodeResources proto.InternalMessageInfo + +func (m *NodeSelector) Reset() { *m = NodeSelector{} } +func (*NodeSelector) ProtoMessage() {} +func (*NodeSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{97} +} +func (m *NodeSelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeSelector.Merge(m, src) +} +func (m *NodeSelector) XXX_Size() int { + return m.Size() +} +func (m *NodeSelector) XXX_DiscardUnknown() { + xxx_messageInfo_NodeSelector.DiscardUnknown(m) } -func (m *WindowsSecurityContextOptions) Reset() { *m = WindowsSecurityContextOptions{} } -func (*WindowsSecurityContextOptions) ProtoMessage() {} -func (*WindowsSecurityContextOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{197} +var xxx_messageInfo_NodeSelector proto.InternalMessageInfo + +func (m *NodeSelectorRequirement) Reset() { *m = NodeSelectorRequirement{} } +func (*NodeSelectorRequirement) ProtoMessage() {} +func (*NodeSelectorRequirement) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{98} +} +func (m *NodeSelectorRequirement) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeSelectorRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeSelectorRequirement) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeSelectorRequirement.Merge(m, src) +} +func (m *NodeSelectorRequirement) XXX_Size() int { + return m.Size() +} +func (m *NodeSelectorRequirement) XXX_DiscardUnknown() { + xxx_messageInfo_NodeSelectorRequirement.DiscardUnknown(m) } -func init() { - proto.RegisterType((*AWSElasticBlockStoreVolumeSource)(nil), "k8s.io.api.core.v1.AWSElasticBlockStoreVolumeSource") - proto.RegisterType((*Affinity)(nil), "k8s.io.api.core.v1.Affinity") - proto.RegisterType((*AttachedVolume)(nil), "k8s.io.api.core.v1.AttachedVolume") - proto.RegisterType((*AvoidPods)(nil), "k8s.io.api.core.v1.AvoidPods") - proto.RegisterType((*AzureDiskVolumeSource)(nil), "k8s.io.api.core.v1.AzureDiskVolumeSource") - proto.RegisterType((*AzureFilePersistentVolumeSource)(nil), "k8s.io.api.core.v1.AzureFilePersistentVolumeSource") - proto.RegisterType((*AzureFileVolumeSource)(nil), "k8s.io.api.core.v1.AzureFileVolumeSource") - proto.RegisterType((*Binding)(nil), "k8s.io.api.core.v1.Binding") - proto.RegisterType((*CSIPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CSIPersistentVolumeSource") - proto.RegisterType((*CSIVolumeSource)(nil), "k8s.io.api.core.v1.CSIVolumeSource") - proto.RegisterType((*Capabilities)(nil), "k8s.io.api.core.v1.Capabilities") - proto.RegisterType((*CephFSPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CephFSPersistentVolumeSource") - proto.RegisterType((*CephFSVolumeSource)(nil), "k8s.io.api.core.v1.CephFSVolumeSource") - proto.RegisterType((*CinderPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CinderPersistentVolumeSource") - proto.RegisterType((*CinderVolumeSource)(nil), "k8s.io.api.core.v1.CinderVolumeSource") - proto.RegisterType((*ClientIPConfig)(nil), "k8s.io.api.core.v1.ClientIPConfig") - proto.RegisterType((*ComponentCondition)(nil), "k8s.io.api.core.v1.ComponentCondition") - proto.RegisterType((*ComponentStatus)(nil), "k8s.io.api.core.v1.ComponentStatus") - proto.RegisterType((*ComponentStatusList)(nil), "k8s.io.api.core.v1.ComponentStatusList") - proto.RegisterType((*ConfigMap)(nil), "k8s.io.api.core.v1.ConfigMap") - proto.RegisterType((*ConfigMapEnvSource)(nil), "k8s.io.api.core.v1.ConfigMapEnvSource") - proto.RegisterType((*ConfigMapKeySelector)(nil), "k8s.io.api.core.v1.ConfigMapKeySelector") - proto.RegisterType((*ConfigMapList)(nil), "k8s.io.api.core.v1.ConfigMapList") - proto.RegisterType((*ConfigMapNodeConfigSource)(nil), "k8s.io.api.core.v1.ConfigMapNodeConfigSource") - proto.RegisterType((*ConfigMapProjection)(nil), "k8s.io.api.core.v1.ConfigMapProjection") - proto.RegisterType((*ConfigMapVolumeSource)(nil), "k8s.io.api.core.v1.ConfigMapVolumeSource") - proto.RegisterType((*Container)(nil), "k8s.io.api.core.v1.Container") - proto.RegisterType((*ContainerImage)(nil), "k8s.io.api.core.v1.ContainerImage") - proto.RegisterType((*ContainerPort)(nil), "k8s.io.api.core.v1.ContainerPort") - proto.RegisterType((*ContainerState)(nil), "k8s.io.api.core.v1.ContainerState") - proto.RegisterType((*ContainerStateRunning)(nil), "k8s.io.api.core.v1.ContainerStateRunning") - proto.RegisterType((*ContainerStateTerminated)(nil), "k8s.io.api.core.v1.ContainerStateTerminated") - proto.RegisterType((*ContainerStateWaiting)(nil), "k8s.io.api.core.v1.ContainerStateWaiting") - proto.RegisterType((*ContainerStatus)(nil), "k8s.io.api.core.v1.ContainerStatus") - proto.RegisterType((*DaemonEndpoint)(nil), "k8s.io.api.core.v1.DaemonEndpoint") - proto.RegisterType((*DownwardAPIProjection)(nil), "k8s.io.api.core.v1.DownwardAPIProjection") - proto.RegisterType((*DownwardAPIVolumeFile)(nil), "k8s.io.api.core.v1.DownwardAPIVolumeFile") - proto.RegisterType((*DownwardAPIVolumeSource)(nil), "k8s.io.api.core.v1.DownwardAPIVolumeSource") - proto.RegisterType((*EmptyDirVolumeSource)(nil), "k8s.io.api.core.v1.EmptyDirVolumeSource") - proto.RegisterType((*EndpointAddress)(nil), "k8s.io.api.core.v1.EndpointAddress") - proto.RegisterType((*EndpointPort)(nil), "k8s.io.api.core.v1.EndpointPort") - proto.RegisterType((*EndpointSubset)(nil), "k8s.io.api.core.v1.EndpointSubset") - proto.RegisterType((*Endpoints)(nil), "k8s.io.api.core.v1.Endpoints") - proto.RegisterType((*EndpointsList)(nil), "k8s.io.api.core.v1.EndpointsList") - proto.RegisterType((*EnvFromSource)(nil), "k8s.io.api.core.v1.EnvFromSource") - proto.RegisterType((*EnvVar)(nil), "k8s.io.api.core.v1.EnvVar") - proto.RegisterType((*EnvVarSource)(nil), "k8s.io.api.core.v1.EnvVarSource") - proto.RegisterType((*Event)(nil), "k8s.io.api.core.v1.Event") - proto.RegisterType((*EventList)(nil), "k8s.io.api.core.v1.EventList") - proto.RegisterType((*EventSeries)(nil), "k8s.io.api.core.v1.EventSeries") - proto.RegisterType((*EventSource)(nil), "k8s.io.api.core.v1.EventSource") - proto.RegisterType((*ExecAction)(nil), "k8s.io.api.core.v1.ExecAction") - proto.RegisterType((*FCVolumeSource)(nil), "k8s.io.api.core.v1.FCVolumeSource") - proto.RegisterType((*FlexPersistentVolumeSource)(nil), "k8s.io.api.core.v1.FlexPersistentVolumeSource") - proto.RegisterType((*FlexVolumeSource)(nil), "k8s.io.api.core.v1.FlexVolumeSource") - proto.RegisterType((*FlockerVolumeSource)(nil), "k8s.io.api.core.v1.FlockerVolumeSource") - proto.RegisterType((*GCEPersistentDiskVolumeSource)(nil), "k8s.io.api.core.v1.GCEPersistentDiskVolumeSource") - proto.RegisterType((*GitRepoVolumeSource)(nil), "k8s.io.api.core.v1.GitRepoVolumeSource") - proto.RegisterType((*GlusterfsPersistentVolumeSource)(nil), "k8s.io.api.core.v1.GlusterfsPersistentVolumeSource") - proto.RegisterType((*GlusterfsVolumeSource)(nil), "k8s.io.api.core.v1.GlusterfsVolumeSource") - proto.RegisterType((*HTTPGetAction)(nil), "k8s.io.api.core.v1.HTTPGetAction") - proto.RegisterType((*HTTPHeader)(nil), "k8s.io.api.core.v1.HTTPHeader") - proto.RegisterType((*Handler)(nil), "k8s.io.api.core.v1.Handler") - proto.RegisterType((*HostAlias)(nil), "k8s.io.api.core.v1.HostAlias") - proto.RegisterType((*HostPathVolumeSource)(nil), "k8s.io.api.core.v1.HostPathVolumeSource") - proto.RegisterType((*ISCSIPersistentVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIPersistentVolumeSource") - proto.RegisterType((*ISCSIVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIVolumeSource") - proto.RegisterType((*KeyToPath)(nil), "k8s.io.api.core.v1.KeyToPath") - proto.RegisterType((*Lifecycle)(nil), "k8s.io.api.core.v1.Lifecycle") - proto.RegisterType((*LimitRange)(nil), "k8s.io.api.core.v1.LimitRange") - proto.RegisterType((*LimitRangeItem)(nil), "k8s.io.api.core.v1.LimitRangeItem") - proto.RegisterType((*LimitRangeList)(nil), "k8s.io.api.core.v1.LimitRangeList") - proto.RegisterType((*LimitRangeSpec)(nil), "k8s.io.api.core.v1.LimitRangeSpec") - proto.RegisterType((*List)(nil), "k8s.io.api.core.v1.List") - proto.RegisterType((*LoadBalancerIngress)(nil), "k8s.io.api.core.v1.LoadBalancerIngress") - proto.RegisterType((*LoadBalancerStatus)(nil), "k8s.io.api.core.v1.LoadBalancerStatus") - proto.RegisterType((*LocalObjectReference)(nil), "k8s.io.api.core.v1.LocalObjectReference") - proto.RegisterType((*LocalVolumeSource)(nil), "k8s.io.api.core.v1.LocalVolumeSource") - proto.RegisterType((*NFSVolumeSource)(nil), "k8s.io.api.core.v1.NFSVolumeSource") - proto.RegisterType((*Namespace)(nil), "k8s.io.api.core.v1.Namespace") - proto.RegisterType((*NamespaceList)(nil), "k8s.io.api.core.v1.NamespaceList") - proto.RegisterType((*NamespaceSpec)(nil), "k8s.io.api.core.v1.NamespaceSpec") - proto.RegisterType((*NamespaceStatus)(nil), "k8s.io.api.core.v1.NamespaceStatus") - proto.RegisterType((*Node)(nil), "k8s.io.api.core.v1.Node") - proto.RegisterType((*NodeAddress)(nil), "k8s.io.api.core.v1.NodeAddress") - proto.RegisterType((*NodeAffinity)(nil), "k8s.io.api.core.v1.NodeAffinity") - proto.RegisterType((*NodeCondition)(nil), "k8s.io.api.core.v1.NodeCondition") - proto.RegisterType((*NodeConfigSource)(nil), "k8s.io.api.core.v1.NodeConfigSource") - proto.RegisterType((*NodeConfigStatus)(nil), "k8s.io.api.core.v1.NodeConfigStatus") - proto.RegisterType((*NodeDaemonEndpoints)(nil), "k8s.io.api.core.v1.NodeDaemonEndpoints") - proto.RegisterType((*NodeList)(nil), "k8s.io.api.core.v1.NodeList") - proto.RegisterType((*NodeProxyOptions)(nil), "k8s.io.api.core.v1.NodeProxyOptions") - proto.RegisterType((*NodeResources)(nil), "k8s.io.api.core.v1.NodeResources") - proto.RegisterType((*NodeSelector)(nil), "k8s.io.api.core.v1.NodeSelector") - proto.RegisterType((*NodeSelectorRequirement)(nil), "k8s.io.api.core.v1.NodeSelectorRequirement") - proto.RegisterType((*NodeSelectorTerm)(nil), "k8s.io.api.core.v1.NodeSelectorTerm") - proto.RegisterType((*NodeSpec)(nil), "k8s.io.api.core.v1.NodeSpec") - proto.RegisterType((*NodeStatus)(nil), "k8s.io.api.core.v1.NodeStatus") - proto.RegisterType((*NodeSystemInfo)(nil), "k8s.io.api.core.v1.NodeSystemInfo") - proto.RegisterType((*ObjectFieldSelector)(nil), "k8s.io.api.core.v1.ObjectFieldSelector") - proto.RegisterType((*ObjectReference)(nil), "k8s.io.api.core.v1.ObjectReference") - proto.RegisterType((*PersistentVolume)(nil), "k8s.io.api.core.v1.PersistentVolume") - proto.RegisterType((*PersistentVolumeClaim)(nil), "k8s.io.api.core.v1.PersistentVolumeClaim") - proto.RegisterType((*PersistentVolumeClaimCondition)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimCondition") - proto.RegisterType((*PersistentVolumeClaimList)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimList") - proto.RegisterType((*PersistentVolumeClaimSpec)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimSpec") - proto.RegisterType((*PersistentVolumeClaimStatus)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimStatus") - proto.RegisterType((*PersistentVolumeClaimVolumeSource)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimVolumeSource") - proto.RegisterType((*PersistentVolumeList)(nil), "k8s.io.api.core.v1.PersistentVolumeList") - proto.RegisterType((*PersistentVolumeSource)(nil), "k8s.io.api.core.v1.PersistentVolumeSource") - proto.RegisterType((*PersistentVolumeSpec)(nil), "k8s.io.api.core.v1.PersistentVolumeSpec") - proto.RegisterType((*PersistentVolumeStatus)(nil), "k8s.io.api.core.v1.PersistentVolumeStatus") - proto.RegisterType((*PhotonPersistentDiskVolumeSource)(nil), "k8s.io.api.core.v1.PhotonPersistentDiskVolumeSource") - proto.RegisterType((*Pod)(nil), "k8s.io.api.core.v1.Pod") - proto.RegisterType((*PodAffinity)(nil), "k8s.io.api.core.v1.PodAffinity") - proto.RegisterType((*PodAffinityTerm)(nil), "k8s.io.api.core.v1.PodAffinityTerm") - proto.RegisterType((*PodAntiAffinity)(nil), "k8s.io.api.core.v1.PodAntiAffinity") - proto.RegisterType((*PodAttachOptions)(nil), "k8s.io.api.core.v1.PodAttachOptions") - proto.RegisterType((*PodCondition)(nil), "k8s.io.api.core.v1.PodCondition") - proto.RegisterType((*PodDNSConfig)(nil), "k8s.io.api.core.v1.PodDNSConfig") - proto.RegisterType((*PodDNSConfigOption)(nil), "k8s.io.api.core.v1.PodDNSConfigOption") - proto.RegisterType((*PodExecOptions)(nil), "k8s.io.api.core.v1.PodExecOptions") - proto.RegisterType((*PodList)(nil), "k8s.io.api.core.v1.PodList") - proto.RegisterType((*PodLogOptions)(nil), "k8s.io.api.core.v1.PodLogOptions") - proto.RegisterType((*PodPortForwardOptions)(nil), "k8s.io.api.core.v1.PodPortForwardOptions") - proto.RegisterType((*PodProxyOptions)(nil), "k8s.io.api.core.v1.PodProxyOptions") - proto.RegisterType((*PodReadinessGate)(nil), "k8s.io.api.core.v1.PodReadinessGate") - proto.RegisterType((*PodSecurityContext)(nil), "k8s.io.api.core.v1.PodSecurityContext") - proto.RegisterType((*PodSignature)(nil), "k8s.io.api.core.v1.PodSignature") - proto.RegisterType((*PodSpec)(nil), "k8s.io.api.core.v1.PodSpec") - proto.RegisterType((*PodStatus)(nil), "k8s.io.api.core.v1.PodStatus") - proto.RegisterType((*PodStatusResult)(nil), "k8s.io.api.core.v1.PodStatusResult") - proto.RegisterType((*PodTemplate)(nil), "k8s.io.api.core.v1.PodTemplate") - proto.RegisterType((*PodTemplateList)(nil), "k8s.io.api.core.v1.PodTemplateList") - proto.RegisterType((*PodTemplateSpec)(nil), "k8s.io.api.core.v1.PodTemplateSpec") - proto.RegisterType((*PortworxVolumeSource)(nil), "k8s.io.api.core.v1.PortworxVolumeSource") - proto.RegisterType((*Preconditions)(nil), "k8s.io.api.core.v1.Preconditions") - proto.RegisterType((*PreferAvoidPodsEntry)(nil), "k8s.io.api.core.v1.PreferAvoidPodsEntry") - proto.RegisterType((*PreferredSchedulingTerm)(nil), "k8s.io.api.core.v1.PreferredSchedulingTerm") - proto.RegisterType((*Probe)(nil), "k8s.io.api.core.v1.Probe") - proto.RegisterType((*ProjectedVolumeSource)(nil), "k8s.io.api.core.v1.ProjectedVolumeSource") - proto.RegisterType((*QuobyteVolumeSource)(nil), "k8s.io.api.core.v1.QuobyteVolumeSource") - proto.RegisterType((*RBDPersistentVolumeSource)(nil), "k8s.io.api.core.v1.RBDPersistentVolumeSource") - proto.RegisterType((*RBDVolumeSource)(nil), "k8s.io.api.core.v1.RBDVolumeSource") - proto.RegisterType((*RangeAllocation)(nil), "k8s.io.api.core.v1.RangeAllocation") - proto.RegisterType((*ReplicationController)(nil), "k8s.io.api.core.v1.ReplicationController") - proto.RegisterType((*ReplicationControllerCondition)(nil), "k8s.io.api.core.v1.ReplicationControllerCondition") - proto.RegisterType((*ReplicationControllerList)(nil), "k8s.io.api.core.v1.ReplicationControllerList") - proto.RegisterType((*ReplicationControllerSpec)(nil), "k8s.io.api.core.v1.ReplicationControllerSpec") - proto.RegisterType((*ReplicationControllerStatus)(nil), "k8s.io.api.core.v1.ReplicationControllerStatus") - proto.RegisterType((*ResourceFieldSelector)(nil), "k8s.io.api.core.v1.ResourceFieldSelector") - proto.RegisterType((*ResourceQuota)(nil), "k8s.io.api.core.v1.ResourceQuota") - proto.RegisterType((*ResourceQuotaList)(nil), "k8s.io.api.core.v1.ResourceQuotaList") - proto.RegisterType((*ResourceQuotaSpec)(nil), "k8s.io.api.core.v1.ResourceQuotaSpec") - proto.RegisterType((*ResourceQuotaStatus)(nil), "k8s.io.api.core.v1.ResourceQuotaStatus") - proto.RegisterType((*ResourceRequirements)(nil), "k8s.io.api.core.v1.ResourceRequirements") - proto.RegisterType((*SELinuxOptions)(nil), "k8s.io.api.core.v1.SELinuxOptions") - proto.RegisterType((*ScaleIOPersistentVolumeSource)(nil), "k8s.io.api.core.v1.ScaleIOPersistentVolumeSource") - proto.RegisterType((*ScaleIOVolumeSource)(nil), "k8s.io.api.core.v1.ScaleIOVolumeSource") - proto.RegisterType((*ScopeSelector)(nil), "k8s.io.api.core.v1.ScopeSelector") - proto.RegisterType((*ScopedResourceSelectorRequirement)(nil), "k8s.io.api.core.v1.ScopedResourceSelectorRequirement") - proto.RegisterType((*Secret)(nil), "k8s.io.api.core.v1.Secret") - proto.RegisterType((*SecretEnvSource)(nil), "k8s.io.api.core.v1.SecretEnvSource") - proto.RegisterType((*SecretKeySelector)(nil), "k8s.io.api.core.v1.SecretKeySelector") - proto.RegisterType((*SecretList)(nil), "k8s.io.api.core.v1.SecretList") - proto.RegisterType((*SecretProjection)(nil), "k8s.io.api.core.v1.SecretProjection") - proto.RegisterType((*SecretReference)(nil), "k8s.io.api.core.v1.SecretReference") - proto.RegisterType((*SecretVolumeSource)(nil), "k8s.io.api.core.v1.SecretVolumeSource") - proto.RegisterType((*SecurityContext)(nil), "k8s.io.api.core.v1.SecurityContext") - proto.RegisterType((*SerializedReference)(nil), "k8s.io.api.core.v1.SerializedReference") - proto.RegisterType((*Service)(nil), "k8s.io.api.core.v1.Service") - proto.RegisterType((*ServiceAccount)(nil), "k8s.io.api.core.v1.ServiceAccount") - proto.RegisterType((*ServiceAccountList)(nil), "k8s.io.api.core.v1.ServiceAccountList") - proto.RegisterType((*ServiceAccountTokenProjection)(nil), "k8s.io.api.core.v1.ServiceAccountTokenProjection") - proto.RegisterType((*ServiceList)(nil), "k8s.io.api.core.v1.ServiceList") - proto.RegisterType((*ServicePort)(nil), "k8s.io.api.core.v1.ServicePort") - proto.RegisterType((*ServiceProxyOptions)(nil), "k8s.io.api.core.v1.ServiceProxyOptions") - proto.RegisterType((*ServiceSpec)(nil), "k8s.io.api.core.v1.ServiceSpec") - proto.RegisterType((*ServiceStatus)(nil), "k8s.io.api.core.v1.ServiceStatus") - proto.RegisterType((*SessionAffinityConfig)(nil), "k8s.io.api.core.v1.SessionAffinityConfig") - proto.RegisterType((*StorageOSPersistentVolumeSource)(nil), "k8s.io.api.core.v1.StorageOSPersistentVolumeSource") - proto.RegisterType((*StorageOSVolumeSource)(nil), "k8s.io.api.core.v1.StorageOSVolumeSource") - proto.RegisterType((*Sysctl)(nil), "k8s.io.api.core.v1.Sysctl") - proto.RegisterType((*TCPSocketAction)(nil), "k8s.io.api.core.v1.TCPSocketAction") - proto.RegisterType((*Taint)(nil), "k8s.io.api.core.v1.Taint") - proto.RegisterType((*Toleration)(nil), "k8s.io.api.core.v1.Toleration") - proto.RegisterType((*TopologySelectorLabelRequirement)(nil), "k8s.io.api.core.v1.TopologySelectorLabelRequirement") - proto.RegisterType((*TopologySelectorTerm)(nil), "k8s.io.api.core.v1.TopologySelectorTerm") - proto.RegisterType((*TypedLocalObjectReference)(nil), "k8s.io.api.core.v1.TypedLocalObjectReference") - proto.RegisterType((*Volume)(nil), "k8s.io.api.core.v1.Volume") - proto.RegisterType((*VolumeDevice)(nil), "k8s.io.api.core.v1.VolumeDevice") - proto.RegisterType((*VolumeMount)(nil), "k8s.io.api.core.v1.VolumeMount") - proto.RegisterType((*VolumeNodeAffinity)(nil), "k8s.io.api.core.v1.VolumeNodeAffinity") - proto.RegisterType((*VolumeProjection)(nil), "k8s.io.api.core.v1.VolumeProjection") - proto.RegisterType((*VolumeSource)(nil), "k8s.io.api.core.v1.VolumeSource") - proto.RegisterType((*VsphereVirtualDiskVolumeSource)(nil), "k8s.io.api.core.v1.VsphereVirtualDiskVolumeSource") - proto.RegisterType((*WeightedPodAffinityTerm)(nil), "k8s.io.api.core.v1.WeightedPodAffinityTerm") - proto.RegisterType((*WindowsSecurityContextOptions)(nil), "k8s.io.api.core.v1.WindowsSecurityContextOptions") +var xxx_messageInfo_NodeSelectorRequirement proto.InternalMessageInfo + +func (m *NodeSelectorTerm) Reset() { *m = NodeSelectorTerm{} } +func (*NodeSelectorTerm) ProtoMessage() {} +func (*NodeSelectorTerm) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{99} } -func (m *AWSElasticBlockStoreVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +func (m *NodeSelectorTerm) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeSelectorTerm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *NodeSelectorTerm) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeSelectorTerm.Merge(m, src) +} +func (m *NodeSelectorTerm) XXX_Size() int { + return m.Size() +} +func (m *NodeSelectorTerm) XXX_DiscardUnknown() { + xxx_messageInfo_NodeSelectorTerm.DiscardUnknown(m) } -func (m *AWSElasticBlockStoreVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID))) - i += copy(dAtA[i:], m.VolumeID) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Partition)) - dAtA[i] = 0x20 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 +var xxx_messageInfo_NodeSelectorTerm proto.InternalMessageInfo + +func (m *NodeSpec) Reset() { *m = NodeSpec{} } +func (*NodeSpec) ProtoMessage() {} +func (*NodeSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{100} +} +func (m *NodeSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - i++ - return i, nil + return b[:n], nil +} +func (m *NodeSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeSpec.Merge(m, src) +} +func (m *NodeSpec) XXX_Size() int { + return m.Size() +} +func (m *NodeSpec) XXX_DiscardUnknown() { + xxx_messageInfo_NodeSpec.DiscardUnknown(m) } -func (m *Affinity) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_NodeSpec proto.InternalMessageInfo + +func (m *NodeStatus) Reset() { *m = NodeStatus{} } +func (*NodeStatus) ProtoMessage() {} +func (*NodeStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{101} +} +func (m *NodeStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *NodeStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeStatus.Merge(m, src) +} +func (m *NodeStatus) XXX_Size() int { + return m.Size() +} +func (m *NodeStatus) XXX_DiscardUnknown() { + xxx_messageInfo_NodeStatus.DiscardUnknown(m) } -func (m *Affinity) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.NodeAffinity != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NodeAffinity.Size())) - n1, err := m.NodeAffinity.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - } - if m.PodAffinity != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PodAffinity.Size())) - n2, err := m.PodAffinity.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - } - if m.PodAntiAffinity != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PodAntiAffinity.Size())) - n3, err := m.PodAntiAffinity.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 +var xxx_messageInfo_NodeStatus proto.InternalMessageInfo + +func (m *NodeSystemInfo) Reset() { *m = NodeSystemInfo{} } +func (*NodeSystemInfo) ProtoMessage() {} +func (*NodeSystemInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{102} +} +func (m *NodeSystemInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeSystemInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *NodeSystemInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeSystemInfo.Merge(m, src) +} +func (m *NodeSystemInfo) XXX_Size() int { + return m.Size() +} +func (m *NodeSystemInfo) XXX_DiscardUnknown() { + xxx_messageInfo_NodeSystemInfo.DiscardUnknown(m) } -func (m *AttachedVolume) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_NodeSystemInfo proto.InternalMessageInfo + +func (m *ObjectFieldSelector) Reset() { *m = ObjectFieldSelector{} } +func (*ObjectFieldSelector) ProtoMessage() {} +func (*ObjectFieldSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{103} +} +func (m *ObjectFieldSelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ObjectFieldSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *AttachedVolume) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DevicePath))) - i += copy(dAtA[i:], m.DevicePath) - return i, nil +func (m *ObjectFieldSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectFieldSelector.Merge(m, src) +} +func (m *ObjectFieldSelector) XXX_Size() int { + return m.Size() +} +func (m *ObjectFieldSelector) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectFieldSelector.DiscardUnknown(m) } -func (m *AvoidPods) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_ObjectFieldSelector proto.InternalMessageInfo + +func (m *ObjectReference) Reset() { *m = ObjectReference{} } +func (*ObjectReference) ProtoMessage() {} +func (*ObjectReference) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{104} +} +func (m *ObjectReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *ObjectReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectReference.Merge(m, src) +} +func (m *ObjectReference) XXX_Size() int { + return m.Size() +} +func (m *ObjectReference) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectReference.DiscardUnknown(m) } -func (m *AvoidPods) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.PreferAvoidPods) > 0 { - for _, msg := range m.PreferAvoidPods { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } +var xxx_messageInfo_ObjectReference proto.InternalMessageInfo + +func (m *PersistentVolume) Reset() { *m = PersistentVolume{} } +func (*PersistentVolume) ProtoMessage() {} +func (*PersistentVolume) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{105} +} +func (m *PersistentVolume) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *PersistentVolume) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolume.Merge(m, src) +} +func (m *PersistentVolume) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolume) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolume.DiscardUnknown(m) } -func (m *AzureDiskVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PersistentVolume proto.InternalMessageInfo + +func (m *PersistentVolumeClaim) Reset() { *m = PersistentVolumeClaim{} } +func (*PersistentVolumeClaim) ProtoMessage() {} +func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{106} +} +func (m *PersistentVolumeClaim) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolumeClaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PersistentVolumeClaim) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolumeClaim.Merge(m, src) +} +func (m *PersistentVolumeClaim) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolumeClaim) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolumeClaim.DiscardUnknown(m) } -func (m *AzureDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DiskName))) - i += copy(dAtA[i:], m.DiskName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DataDiskURI))) - i += copy(dAtA[i:], m.DataDiskURI) - if m.CachingMode != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.CachingMode))) - i += copy(dAtA[i:], *m.CachingMode) - } - if m.FSType != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FSType))) - i += copy(dAtA[i:], *m.FSType) - } - if m.ReadOnly != nil { - dAtA[i] = 0x28 - i++ - if *m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ +var xxx_messageInfo_PersistentVolumeClaim proto.InternalMessageInfo + +func (m *PersistentVolumeClaimCondition) Reset() { *m = PersistentVolumeClaimCondition{} } +func (*PersistentVolumeClaimCondition) ProtoMessage() {} +func (*PersistentVolumeClaimCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{107} +} +func (m *PersistentVolumeClaimCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolumeClaimCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - if m.Kind != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Kind))) - i += copy(dAtA[i:], *m.Kind) + return b[:n], nil +} +func (m *PersistentVolumeClaimCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolumeClaimCondition.Merge(m, src) +} +func (m *PersistentVolumeClaimCondition) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolumeClaimCondition) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolumeClaimCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_PersistentVolumeClaimCondition proto.InternalMessageInfo + +func (m *PersistentVolumeClaimList) Reset() { *m = PersistentVolumeClaimList{} } +func (*PersistentVolumeClaimList) ProtoMessage() {} +func (*PersistentVolumeClaimList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{108} +} +func (m *PersistentVolumeClaimList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolumeClaimList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *PersistentVolumeClaimList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolumeClaimList.Merge(m, src) +} +func (m *PersistentVolumeClaimList) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolumeClaimList) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolumeClaimList.DiscardUnknown(m) } -func (m *AzureFilePersistentVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PersistentVolumeClaimList proto.InternalMessageInfo + +func (m *PersistentVolumeClaimSpec) Reset() { *m = PersistentVolumeClaimSpec{} } +func (*PersistentVolumeClaimSpec) ProtoMessage() {} +func (*PersistentVolumeClaimSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{109} +} +func (m *PersistentVolumeClaimSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolumeClaimSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PersistentVolumeClaimSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolumeClaimSpec.Merge(m, src) +} +func (m *PersistentVolumeClaimSpec) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolumeClaimSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolumeClaimSpec.DiscardUnknown(m) } -func (m *AzureFilePersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) - i += copy(dAtA[i:], m.SecretName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShareName))) - i += copy(dAtA[i:], m.ShareName) - dAtA[i] = 0x18 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 +var xxx_messageInfo_PersistentVolumeClaimSpec proto.InternalMessageInfo + +func (m *PersistentVolumeClaimStatus) Reset() { *m = PersistentVolumeClaimStatus{} } +func (*PersistentVolumeClaimStatus) ProtoMessage() {} +func (*PersistentVolumeClaimStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{110} +} +func (m *PersistentVolumeClaimStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolumeClaimStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - i++ - if m.SecretNamespace != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SecretNamespace))) - i += copy(dAtA[i:], *m.SecretNamespace) + return b[:n], nil +} +func (m *PersistentVolumeClaimStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolumeClaimStatus.Merge(m, src) +} +func (m *PersistentVolumeClaimStatus) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolumeClaimStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolumeClaimStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_PersistentVolumeClaimStatus proto.InternalMessageInfo + +func (m *PersistentVolumeClaimVolumeSource) Reset() { *m = PersistentVolumeClaimVolumeSource{} } +func (*PersistentVolumeClaimVolumeSource) ProtoMessage() {} +func (*PersistentVolumeClaimVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{111} +} +func (m *PersistentVolumeClaimVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolumeClaimVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *PersistentVolumeClaimVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolumeClaimVolumeSource.Merge(m, src) +} +func (m *PersistentVolumeClaimVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolumeClaimVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolumeClaimVolumeSource.DiscardUnknown(m) } -func (m *AzureFileVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PersistentVolumeClaimVolumeSource proto.InternalMessageInfo + +func (m *PersistentVolumeList) Reset() { *m = PersistentVolumeList{} } +func (*PersistentVolumeList) ProtoMessage() {} +func (*PersistentVolumeList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{112} +} +func (m *PersistentVolumeList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolumeList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PersistentVolumeList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolumeList.Merge(m, src) +} +func (m *PersistentVolumeList) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolumeList) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolumeList.DiscardUnknown(m) } -func (m *AzureFileVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) - i += copy(dAtA[i:], m.SecretName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShareName))) - i += copy(dAtA[i:], m.ShareName) - dAtA[i] = 0x18 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 +var xxx_messageInfo_PersistentVolumeList proto.InternalMessageInfo + +func (m *PersistentVolumeSource) Reset() { *m = PersistentVolumeSource{} } +func (*PersistentVolumeSource) ProtoMessage() {} +func (*PersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{113} +} +func (m *PersistentVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - i++ - return i, nil + return b[:n], nil +} +func (m *PersistentVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolumeSource.Merge(m, src) +} +func (m *PersistentVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolumeSource.DiscardUnknown(m) } -func (m *Binding) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PersistentVolumeSource proto.InternalMessageInfo + +func (m *PersistentVolumeSpec) Reset() { *m = PersistentVolumeSpec{} } +func (*PersistentVolumeSpec) ProtoMessage() {} +func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{114} +} +func (m *PersistentVolumeSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolumeSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PersistentVolumeSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolumeSpec.Merge(m, src) +} +func (m *PersistentVolumeSpec) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolumeSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolumeSpec.DiscardUnknown(m) } -func (m *Binding) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) +var xxx_messageInfo_PersistentVolumeSpec proto.InternalMessageInfo + +func (m *PersistentVolumeStatus) Reset() { *m = PersistentVolumeStatus{} } +func (*PersistentVolumeStatus) ProtoMessage() {} +func (*PersistentVolumeStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{115} +} +func (m *PersistentVolumeStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolumeStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n4 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) - n5, err := m.Target.MarshalTo(dAtA[i:]) + return b[:n], nil +} +func (m *PersistentVolumeStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolumeStatus.Merge(m, src) +} +func (m *PersistentVolumeStatus) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolumeStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolumeStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_PersistentVolumeStatus proto.InternalMessageInfo + +func (m *PhotonPersistentDiskVolumeSource) Reset() { *m = PhotonPersistentDiskVolumeSource{} } +func (*PhotonPersistentDiskVolumeSource) ProtoMessage() {} +func (*PhotonPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{116} +} +func (m *PhotonPersistentDiskVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PhotonPersistentDiskVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n5 - return i, nil + return b[:n], nil +} +func (m *PhotonPersistentDiskVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_PhotonPersistentDiskVolumeSource.Merge(m, src) +} +func (m *PhotonPersistentDiskVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *PhotonPersistentDiskVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_PhotonPersistentDiskVolumeSource.DiscardUnknown(m) } -func (m *CSIPersistentVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PhotonPersistentDiskVolumeSource proto.InternalMessageInfo + +func (m *Pod) Reset() { *m = Pod{} } +func (*Pod) ProtoMessage() {} +func (*Pod) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{117} +} +func (m *Pod) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Pod) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *Pod) XXX_Merge(src proto.Message) { + xxx_messageInfo_Pod.Merge(m, src) +} +func (m *Pod) XXX_Size() int { + return m.Size() +} +func (m *Pod) XXX_DiscardUnknown() { + xxx_messageInfo_Pod.DiscardUnknown(m) } -func (m *CSIPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) - i += copy(dAtA[i:], m.Driver) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeHandle))) - i += copy(dAtA[i:], m.VolumeHandle) - dAtA[i] = 0x18 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - if len(m.VolumeAttributes) > 0 { - keysForVolumeAttributes := make([]string, 0, len(m.VolumeAttributes)) - for k := range m.VolumeAttributes { - keysForVolumeAttributes = append(keysForVolumeAttributes, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes) - for _, k := range keysForVolumeAttributes { - dAtA[i] = 0x2a - i++ - v := m.VolumeAttributes[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - if m.ControllerPublishSecretRef != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ControllerPublishSecretRef.Size())) - n6, err := m.ControllerPublishSecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - } - if m.NodeStageSecretRef != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NodeStageSecretRef.Size())) - n7, err := m.NodeStageSecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - } - if m.NodePublishSecretRef != nil { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NodePublishSecretRef.Size())) - n8, err := m.NodePublishSecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - } - if m.ControllerExpandSecretRef != nil { - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ControllerExpandSecretRef.Size())) - n9, err := m.ControllerExpandSecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - } - return i, nil -} +var xxx_messageInfo_Pod proto.InternalMessageInfo -func (m *CSIVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +func (m *PodAffinity) Reset() { *m = PodAffinity{} } +func (*PodAffinity) ProtoMessage() {} +func (*PodAffinity) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{118} +} +func (m *PodAffinity) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodAffinity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *CSIVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) - i += copy(dAtA[i:], m.Driver) - if m.ReadOnly != nil { - dAtA[i] = 0x10 - i++ - if *m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.FSType != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FSType))) - i += copy(dAtA[i:], *m.FSType) - } - if len(m.VolumeAttributes) > 0 { - keysForVolumeAttributes := make([]string, 0, len(m.VolumeAttributes)) - for k := range m.VolumeAttributes { - keysForVolumeAttributes = append(keysForVolumeAttributes, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes) - for _, k := range keysForVolumeAttributes { - dAtA[i] = 0x22 - i++ - v := m.VolumeAttributes[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - if m.NodePublishSecretRef != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NodePublishSecretRef.Size())) - n10, err := m.NodePublishSecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - } - return i, nil +func (m *PodAffinity) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodAffinity.Merge(m, src) +} +func (m *PodAffinity) XXX_Size() int { + return m.Size() +} +func (m *PodAffinity) XXX_DiscardUnknown() { + xxx_messageInfo_PodAffinity.DiscardUnknown(m) } -func (m *Capabilities) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PodAffinity proto.InternalMessageInfo + +func (m *PodAffinityTerm) Reset() { *m = PodAffinityTerm{} } +func (*PodAffinityTerm) ProtoMessage() {} +func (*PodAffinityTerm) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{119} +} +func (m *PodAffinityTerm) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodAffinityTerm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *Capabilities) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Add) > 0 { - for _, s := range m.Add { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.Drop) > 0 { - for _, s := range m.Drop { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil +func (m *PodAffinityTerm) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodAffinityTerm.Merge(m, src) +} +func (m *PodAffinityTerm) XXX_Size() int { + return m.Size() +} +func (m *PodAffinityTerm) XXX_DiscardUnknown() { + xxx_messageInfo_PodAffinityTerm.DiscardUnknown(m) } -func (m *CephFSPersistentVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PodAffinityTerm proto.InternalMessageInfo + +func (m *PodAntiAffinity) Reset() { *m = PodAntiAffinity{} } +func (*PodAntiAffinity) ProtoMessage() {} +func (*PodAntiAffinity) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{120} +} +func (m *PodAntiAffinity) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodAntiAffinity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *CephFSPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Monitors) > 0 { - for _, s := range m.Monitors { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) - i += copy(dAtA[i:], m.User) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretFile))) - i += copy(dAtA[i:], m.SecretFile) - if m.SecretRef != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n11, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 - } - dAtA[i] = 0x30 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - return i, nil +func (m *PodAntiAffinity) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodAntiAffinity.Merge(m, src) +} +func (m *PodAntiAffinity) XXX_Size() int { + return m.Size() +} +func (m *PodAntiAffinity) XXX_DiscardUnknown() { + xxx_messageInfo_PodAntiAffinity.DiscardUnknown(m) } -func (m *CephFSVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PodAntiAffinity proto.InternalMessageInfo + +func (m *PodAttachOptions) Reset() { *m = PodAttachOptions{} } +func (*PodAttachOptions) ProtoMessage() {} +func (*PodAttachOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{121} +} +func (m *PodAttachOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodAttachOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *CephFSVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Monitors) > 0 { - for _, s := range m.Monitors { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) - i += copy(dAtA[i:], m.User) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretFile))) - i += copy(dAtA[i:], m.SecretFile) - if m.SecretRef != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n12, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n12 - } - dAtA[i] = 0x30 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - return i, nil +func (m *PodAttachOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodAttachOptions.Merge(m, src) +} +func (m *PodAttachOptions) XXX_Size() int { + return m.Size() +} +func (m *PodAttachOptions) XXX_DiscardUnknown() { + xxx_messageInfo_PodAttachOptions.DiscardUnknown(m) } -func (m *CinderPersistentVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PodAttachOptions proto.InternalMessageInfo + +func (m *PodCondition) Reset() { *m = PodCondition{} } +func (*PodCondition) ProtoMessage() {} +func (*PodCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{122} +} +func (m *PodCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *CinderPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID))) - i += copy(dAtA[i:], m.VolumeID) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x18 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.SecretRef != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n13, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n13 - } - return i, nil +func (m *PodCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodCondition.Merge(m, src) +} +func (m *PodCondition) XXX_Size() int { + return m.Size() +} +func (m *PodCondition) XXX_DiscardUnknown() { + xxx_messageInfo_PodCondition.DiscardUnknown(m) } -func (m *CinderVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PodCondition proto.InternalMessageInfo + +func (m *PodDNSConfig) Reset() { *m = PodDNSConfig{} } +func (*PodDNSConfig) ProtoMessage() {} +func (*PodDNSConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{123} +} +func (m *PodDNSConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodDNSConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *CinderVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID))) - i += copy(dAtA[i:], m.VolumeID) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x18 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.SecretRef != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n14, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n14 - } - return i, nil +func (m *PodDNSConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodDNSConfig.Merge(m, src) +} +func (m *PodDNSConfig) XXX_Size() int { + return m.Size() +} +func (m *PodDNSConfig) XXX_DiscardUnknown() { + xxx_messageInfo_PodDNSConfig.DiscardUnknown(m) } -func (m *ClientIPConfig) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PodDNSConfig proto.InternalMessageInfo + +func (m *PodDNSConfigOption) Reset() { *m = PodDNSConfigOption{} } +func (*PodDNSConfigOption) ProtoMessage() {} +func (*PodDNSConfigOption) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{124} +} +func (m *PodDNSConfigOption) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodDNSConfigOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *ClientIPConfig) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.TimeoutSeconds != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) - } - return i, nil +func (m *PodDNSConfigOption) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodDNSConfigOption.Merge(m, src) +} +func (m *PodDNSConfigOption) XXX_Size() int { + return m.Size() +} +func (m *PodDNSConfigOption) XXX_DiscardUnknown() { + xxx_messageInfo_PodDNSConfigOption.DiscardUnknown(m) } -func (m *ComponentCondition) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PodDNSConfigOption proto.InternalMessageInfo + +func (m *PodExecOptions) Reset() { *m = PodExecOptions{} } +func (*PodExecOptions) ProtoMessage() {} +func (*PodExecOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{125} +} +func (m *PodExecOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodExecOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *ComponentCondition) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) - i += copy(dAtA[i:], m.Error) - return i, nil +func (m *PodExecOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodExecOptions.Merge(m, src) +} +func (m *PodExecOptions) XXX_Size() int { + return m.Size() +} +func (m *PodExecOptions) XXX_DiscardUnknown() { + xxx_messageInfo_PodExecOptions.DiscardUnknown(m) } -func (m *ComponentStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PodExecOptions proto.InternalMessageInfo + +func (m *PodIP) Reset() { *m = PodIP{} } +func (*PodIP) ProtoMessage() {} +func (*PodIP) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{126} +} +func (m *PodIP) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodIP) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PodIP) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodIP.Merge(m, src) +} +func (m *PodIP) XXX_Size() int { + return m.Size() +} +func (m *PodIP) XXX_DiscardUnknown() { + xxx_messageInfo_PodIP.DiscardUnknown(m) } -func (m *ComponentStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n15, err := m.ObjectMeta.MarshalTo(dAtA[i:]) +var xxx_messageInfo_PodIP proto.InternalMessageInfo + +func (m *PodList) Reset() { *m = PodList{} } +func (*PodList) ProtoMessage() {} +func (*PodList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{127} +} +func (m *PodList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n15 - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + return nil, err } - return i, nil + return b[:n], nil +} +func (m *PodList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodList.Merge(m, src) +} +func (m *PodList) XXX_Size() int { + return m.Size() +} +func (m *PodList) XXX_DiscardUnknown() { + xxx_messageInfo_PodList.DiscardUnknown(m) } -func (m *ComponentStatusList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PodList proto.InternalMessageInfo + +func (m *PodLogOptions) Reset() { *m = PodLogOptions{} } +func (*PodLogOptions) ProtoMessage() {} +func (*PodLogOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{128} +} +func (m *PodLogOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodLogOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PodLogOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodLogOptions.Merge(m, src) +} +func (m *PodLogOptions) XXX_Size() int { + return m.Size() +} +func (m *PodLogOptions) XXX_DiscardUnknown() { + xxx_messageInfo_PodLogOptions.DiscardUnknown(m) } -func (m *ComponentStatusList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n16, err := m.ListMeta.MarshalTo(dAtA[i:]) +var xxx_messageInfo_PodLogOptions proto.InternalMessageInfo + +func (m *PodPortForwardOptions) Reset() { *m = PodPortForwardOptions{} } +func (*PodPortForwardOptions) ProtoMessage() {} +func (*PodPortForwardOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{129} +} +func (m *PodPortForwardOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodPortForwardOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n16 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + return nil, err } - return i, nil + return b[:n], nil +} +func (m *PodPortForwardOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodPortForwardOptions.Merge(m, src) +} +func (m *PodPortForwardOptions) XXX_Size() int { + return m.Size() +} +func (m *PodPortForwardOptions) XXX_DiscardUnknown() { + xxx_messageInfo_PodPortForwardOptions.DiscardUnknown(m) } -func (m *ConfigMap) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PodPortForwardOptions proto.InternalMessageInfo + +func (m *PodProxyOptions) Reset() { *m = PodProxyOptions{} } +func (*PodProxyOptions) ProtoMessage() {} +func (*PodProxyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{130} +} +func (m *PodProxyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodProxyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PodProxyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodProxyOptions.Merge(m, src) +} +func (m *PodProxyOptions) XXX_Size() int { + return m.Size() +} +func (m *PodProxyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_PodProxyOptions.DiscardUnknown(m) } -func (m *ConfigMap) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n17, err := m.ObjectMeta.MarshalTo(dAtA[i:]) +var xxx_messageInfo_PodProxyOptions proto.InternalMessageInfo + +func (m *PodReadinessGate) Reset() { *m = PodReadinessGate{} } +func (*PodReadinessGate) ProtoMessage() {} +func (*PodReadinessGate) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{131} +} +func (m *PodReadinessGate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodReadinessGate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n17 - if len(m.Data) > 0 { - keysForData := make([]string, 0, len(m.Data)) - for k := range m.Data { - keysForData = append(keysForData, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForData) - for _, k := range keysForData { - dAtA[i] = 0x12 - i++ - v := m.Data[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - if len(m.BinaryData) > 0 { - keysForBinaryData := make([]string, 0, len(m.BinaryData)) - for k := range m.BinaryData { - keysForBinaryData = append(keysForBinaryData, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForBinaryData) - for _, k := range keysForBinaryData { - dAtA[i] = 0x1a - i++ - v := m.BinaryData[string(k)] - byteSize := 0 - if v != nil { - byteSize = 1 + len(v) + sovGenerated(uint64(len(v))) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + byteSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - if v != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } + return nil, err } - return i, nil + return b[:n], nil +} +func (m *PodReadinessGate) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodReadinessGate.Merge(m, src) +} +func (m *PodReadinessGate) XXX_Size() int { + return m.Size() +} +func (m *PodReadinessGate) XXX_DiscardUnknown() { + xxx_messageInfo_PodReadinessGate.DiscardUnknown(m) } -func (m *ConfigMapEnvSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PodReadinessGate proto.InternalMessageInfo + +func (m *PodSecurityContext) Reset() { *m = PodSecurityContext{} } +func (*PodSecurityContext) ProtoMessage() {} +func (*PodSecurityContext) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{132} +} +func (m *PodSecurityContext) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSecurityContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PodSecurityContext) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSecurityContext.Merge(m, src) +} +func (m *PodSecurityContext) XXX_Size() int { + return m.Size() +} +func (m *PodSecurityContext) XXX_DiscardUnknown() { + xxx_messageInfo_PodSecurityContext.DiscardUnknown(m) } -func (m *ConfigMapEnvSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n18, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) +var xxx_messageInfo_PodSecurityContext proto.InternalMessageInfo + +func (m *PodSignature) Reset() { *m = PodSignature{} } +func (*PodSignature) ProtoMessage() {} +func (*PodSignature) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{133} +} +func (m *PodSignature) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSignature) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n18 - if m.Optional != nil { - dAtA[i] = 0x10 - i++ - if *m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ + return nil, err } - return i, nil + return b[:n], nil +} +func (m *PodSignature) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSignature.Merge(m, src) +} +func (m *PodSignature) XXX_Size() int { + return m.Size() +} +func (m *PodSignature) XXX_DiscardUnknown() { + xxx_messageInfo_PodSignature.DiscardUnknown(m) } -func (m *ConfigMapKeySelector) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PodSignature proto.InternalMessageInfo + +func (m *PodSpec) Reset() { *m = PodSpec{} } +func (*PodSpec) ProtoMessage() {} +func (*PodSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{134} +} +func (m *PodSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PodSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSpec.Merge(m, src) +} +func (m *PodSpec) XXX_Size() int { + return m.Size() +} +func (m *PodSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PodSpec.DiscardUnknown(m) } -func (m *ConfigMapKeySelector) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n19, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) +var xxx_messageInfo_PodSpec proto.InternalMessageInfo + +func (m *PodStatus) Reset() { *m = PodStatus{} } +func (*PodStatus) ProtoMessage() {} +func (*PodStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{135} +} +func (m *PodStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n19 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - if m.Optional != nil { - dAtA[i] = 0x18 - i++ - if *m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ + return nil, err } - return i, nil + return b[:n], nil +} +func (m *PodStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodStatus.Merge(m, src) +} +func (m *PodStatus) XXX_Size() int { + return m.Size() +} +func (m *PodStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PodStatus.DiscardUnknown(m) } -func (m *ConfigMapList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PodStatus proto.InternalMessageInfo + +func (m *PodStatusResult) Reset() { *m = PodStatusResult{} } +func (*PodStatusResult) ProtoMessage() {} +func (*PodStatusResult) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{136} +} +func (m *PodStatusResult) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodStatusResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PodStatusResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodStatusResult.Merge(m, src) +} +func (m *PodStatusResult) XXX_Size() int { + return m.Size() +} +func (m *PodStatusResult) XXX_DiscardUnknown() { + xxx_messageInfo_PodStatusResult.DiscardUnknown(m) } -func (m *ConfigMapList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n20, err := m.ListMeta.MarshalTo(dAtA[i:]) +var xxx_messageInfo_PodStatusResult proto.InternalMessageInfo + +func (m *PodTemplate) Reset() { *m = PodTemplate{} } +func (*PodTemplate) ProtoMessage() {} +func (*PodTemplate) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{137} +} +func (m *PodTemplate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n20 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + return nil, err } - return i, nil + return b[:n], nil +} +func (m *PodTemplate) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodTemplate.Merge(m, src) +} +func (m *PodTemplate) XXX_Size() int { + return m.Size() +} +func (m *PodTemplate) XXX_DiscardUnknown() { + xxx_messageInfo_PodTemplate.DiscardUnknown(m) } -func (m *ConfigMapNodeConfigSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PodTemplate proto.InternalMessageInfo + +func (m *PodTemplateList) Reset() { *m = PodTemplateList{} } +func (*PodTemplateList) ProtoMessage() {} +func (*PodTemplateList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{138} +} +func (m *PodTemplateList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodTemplateList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PodTemplateList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodTemplateList.Merge(m, src) +} +func (m *PodTemplateList) XXX_Size() int { + return m.Size() +} +func (m *PodTemplateList) XXX_DiscardUnknown() { + xxx_messageInfo_PodTemplateList.DiscardUnknown(m) } -func (m *ConfigMapNodeConfigSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) - i += copy(dAtA[i:], m.ResourceVersion) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.KubeletConfigKey))) - i += copy(dAtA[i:], m.KubeletConfigKey) - return i, nil -} +var xxx_messageInfo_PodTemplateList proto.InternalMessageInfo -func (m *ConfigMapProjection) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +func (m *PodTemplateSpec) Reset() { *m = PodTemplateSpec{} } +func (*PodTemplateSpec) ProtoMessage() {} +func (*PodTemplateSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{139} +} +func (m *PodTemplateSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodTemplateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PodTemplateSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodTemplateSpec.Merge(m, src) +} +func (m *PodTemplateSpec) XXX_Size() int { + return m.Size() +} +func (m *PodTemplateSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PodTemplateSpec.DiscardUnknown(m) } -func (m *ConfigMapProjection) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n21, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) +var xxx_messageInfo_PodTemplateSpec proto.InternalMessageInfo + +func (m *PortworxVolumeSource) Reset() { *m = PortworxVolumeSource{} } +func (*PortworxVolumeSource) ProtoMessage() {} +func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{140} +} +func (m *PortworxVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PortworxVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n21 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.Optional != nil { - dAtA[i] = 0x20 - i++ - if *m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ + return nil, err } - return i, nil + return b[:n], nil +} +func (m *PortworxVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_PortworxVolumeSource.Merge(m, src) +} +func (m *PortworxVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *PortworxVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_PortworxVolumeSource.DiscardUnknown(m) } -func (m *ConfigMapVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PortworxVolumeSource proto.InternalMessageInfo + +func (m *Preconditions) Reset() { *m = Preconditions{} } +func (*Preconditions) ProtoMessage() {} +func (*Preconditions) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{141} +} +func (m *Preconditions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Preconditions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *Preconditions) XXX_Merge(src proto.Message) { + xxx_messageInfo_Preconditions.Merge(m, src) +} +func (m *Preconditions) XXX_Size() int { + return m.Size() +} +func (m *Preconditions) XXX_DiscardUnknown() { + xxx_messageInfo_Preconditions.DiscardUnknown(m) } -func (m *ConfigMapVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n22, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) +var xxx_messageInfo_Preconditions proto.InternalMessageInfo + +func (m *PreferAvoidPodsEntry) Reset() { *m = PreferAvoidPodsEntry{} } +func (*PreferAvoidPodsEntry) ProtoMessage() {} +func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{142} +} +func (m *PreferAvoidPodsEntry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PreferAvoidPodsEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n22 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.DefaultMode != nil { - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) - } - if m.Optional != nil { - dAtA[i] = 0x20 - i++ - if *m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ + return nil, err } - return i, nil + return b[:n], nil +} +func (m *PreferAvoidPodsEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_PreferAvoidPodsEntry.Merge(m, src) +} +func (m *PreferAvoidPodsEntry) XXX_Size() int { + return m.Size() +} +func (m *PreferAvoidPodsEntry) XXX_DiscardUnknown() { + xxx_messageInfo_PreferAvoidPodsEntry.DiscardUnknown(m) } -func (m *Container) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_PreferAvoidPodsEntry proto.InternalMessageInfo + +func (m *PreferredSchedulingTerm) Reset() { *m = PreferredSchedulingTerm{} } +func (*PreferredSchedulingTerm) ProtoMessage() {} +func (*PreferredSchedulingTerm) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{143} +} +func (m *PreferredSchedulingTerm) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PreferredSchedulingTerm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *PreferredSchedulingTerm) XXX_Merge(src proto.Message) { + xxx_messageInfo_PreferredSchedulingTerm.Merge(m, src) +} +func (m *PreferredSchedulingTerm) XXX_Size() int { + return m.Size() +} +func (m *PreferredSchedulingTerm) XXX_DiscardUnknown() { + xxx_messageInfo_PreferredSchedulingTerm.DiscardUnknown(m) } -func (m *Container) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image))) - i += copy(dAtA[i:], m.Image) - if len(m.Command) > 0 { - for _, s := range m.Command { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.Args) > 0 { - for _, s := range m.Args { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.WorkingDir))) - i += copy(dAtA[i:], m.WorkingDir) - if len(m.Ports) > 0 { - for _, msg := range m.Ports { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Env) > 0 { - for _, msg := range m.Env { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Resources.Size())) - n23, err := m.Resources.MarshalTo(dAtA[i:]) +var xxx_messageInfo_PreferredSchedulingTerm proto.InternalMessageInfo + +func (m *Probe) Reset() { *m = Probe{} } +func (*Probe) ProtoMessage() {} +func (*Probe) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{144} +} +func (m *Probe) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Probe) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n23 - if len(m.VolumeMounts) > 0 { - for _, msg := range m.VolumeMounts { - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.LivenessProbe != nil { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LivenessProbe.Size())) - n24, err := m.LivenessProbe.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n24 - } - if m.ReadinessProbe != nil { - dAtA[i] = 0x5a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadinessProbe.Size())) - n25, err := m.ReadinessProbe.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n25 - } - if m.Lifecycle != nil { - dAtA[i] = 0x62 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Lifecycle.Size())) - n26, err := m.Lifecycle.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n26 - } - dAtA[i] = 0x6a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TerminationMessagePath))) - i += copy(dAtA[i:], m.TerminationMessagePath) - dAtA[i] = 0x72 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImagePullPolicy))) - i += copy(dAtA[i:], m.ImagePullPolicy) - if m.SecurityContext != nil { - dAtA[i] = 0x7a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecurityContext.Size())) - n27, err := m.SecurityContext.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n27 - } - dAtA[i] = 0x80 - i++ - dAtA[i] = 0x1 - i++ - if m.Stdin { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x88 - i++ - dAtA[i] = 0x1 - i++ - if m.StdinOnce { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x90 - i++ - dAtA[i] = 0x1 - i++ - if m.TTY { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if len(m.EnvFrom) > 0 { - for _, msg := range m.EnvFrom { - dAtA[i] = 0x9a - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0xa2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TerminationMessagePolicy))) - i += copy(dAtA[i:], m.TerminationMessagePolicy) - if len(m.VolumeDevices) > 0 { - for _, msg := range m.VolumeDevices { - dAtA[i] = 0xaa - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + return nil, err } - return i, nil + return b[:n], nil +} +func (m *Probe) XXX_Merge(src proto.Message) { + xxx_messageInfo_Probe.Merge(m, src) +} +func (m *Probe) XXX_Size() int { + return m.Size() +} +func (m *Probe) XXX_DiscardUnknown() { + xxx_messageInfo_Probe.DiscardUnknown(m) } -func (m *ContainerImage) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_Probe proto.InternalMessageInfo + +func (m *ProjectedVolumeSource) Reset() { *m = ProjectedVolumeSource{} } +func (*ProjectedVolumeSource) ProtoMessage() {} +func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{145} +} +func (m *ProjectedVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProjectedVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *ContainerImage) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Names) > 0 { - for _, s := range m.Names { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SizeBytes)) - return i, nil +func (m *ProjectedVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProjectedVolumeSource.Merge(m, src) +} +func (m *ProjectedVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *ProjectedVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_ProjectedVolumeSource.DiscardUnknown(m) } -func (m *ContainerPort) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_ProjectedVolumeSource proto.InternalMessageInfo + +func (m *QuobyteVolumeSource) Reset() { *m = QuobyteVolumeSource{} } +func (*QuobyteVolumeSource) ProtoMessage() {} +func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{146} +} +func (m *QuobyteVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QuobyteVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *ContainerPort) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.HostPort)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ContainerPort)) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol))) - i += copy(dAtA[i:], m.Protocol) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.HostIP))) - i += copy(dAtA[i:], m.HostIP) - return i, nil +func (m *QuobyteVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_QuobyteVolumeSource.Merge(m, src) +} +func (m *QuobyteVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *QuobyteVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_QuobyteVolumeSource.DiscardUnknown(m) } -func (m *ContainerState) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_QuobyteVolumeSource proto.InternalMessageInfo + +func (m *RBDPersistentVolumeSource) Reset() { *m = RBDPersistentVolumeSource{} } +func (*RBDPersistentVolumeSource) ProtoMessage() {} +func (*RBDPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{147} +} +func (m *RBDPersistentVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RBDPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *RBDPersistentVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_RBDPersistentVolumeSource.Merge(m, src) +} +func (m *RBDPersistentVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *RBDPersistentVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_RBDPersistentVolumeSource.DiscardUnknown(m) } -func (m *ContainerState) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Waiting != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Waiting.Size())) - n28, err := m.Waiting.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n28 - } - if m.Running != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Running.Size())) - n29, err := m.Running.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n29 - } - if m.Terminated != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Terminated.Size())) - n30, err := m.Terminated.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n30 +var xxx_messageInfo_RBDPersistentVolumeSource proto.InternalMessageInfo + +func (m *RBDVolumeSource) Reset() { *m = RBDVolumeSource{} } +func (*RBDVolumeSource) ProtoMessage() {} +func (*RBDVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{148} +} +func (m *RBDVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RBDVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *RBDVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_RBDVolumeSource.Merge(m, src) +} +func (m *RBDVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *RBDVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_RBDVolumeSource.DiscardUnknown(m) } -func (m *ContainerStateRunning) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_RBDVolumeSource proto.InternalMessageInfo + +func (m *RangeAllocation) Reset() { *m = RangeAllocation{} } +func (*RangeAllocation) ProtoMessage() {} +func (*RangeAllocation) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{149} +} +func (m *RangeAllocation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RangeAllocation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *RangeAllocation) XXX_Merge(src proto.Message) { + xxx_messageInfo_RangeAllocation.Merge(m, src) +} +func (m *RangeAllocation) XXX_Size() int { + return m.Size() +} +func (m *RangeAllocation) XXX_DiscardUnknown() { + xxx_messageInfo_RangeAllocation.DiscardUnknown(m) } -func (m *ContainerStateRunning) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.StartedAt.Size())) - n31, err := m.StartedAt.MarshalTo(dAtA[i:]) +var xxx_messageInfo_RangeAllocation proto.InternalMessageInfo + +func (m *ReplicationController) Reset() { *m = ReplicationController{} } +func (*ReplicationController) ProtoMessage() {} +func (*ReplicationController) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{150} +} +func (m *ReplicationController) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicationController) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n31 - return i, nil + return b[:n], nil +} +func (m *ReplicationController) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicationController.Merge(m, src) +} +func (m *ReplicationController) XXX_Size() int { + return m.Size() +} +func (m *ReplicationController) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicationController.DiscardUnknown(m) } -func (m *ContainerStateTerminated) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_ReplicationController proto.InternalMessageInfo + +func (m *ReplicationControllerCondition) Reset() { *m = ReplicationControllerCondition{} } +func (*ReplicationControllerCondition) ProtoMessage() {} +func (*ReplicationControllerCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{151} +} +func (m *ReplicationControllerCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicationControllerCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *ReplicationControllerCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicationControllerCondition.Merge(m, src) +} +func (m *ReplicationControllerCondition) XXX_Size() int { + return m.Size() +} +func (m *ReplicationControllerCondition) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicationControllerCondition.DiscardUnknown(m) } -func (m *ContainerStateTerminated) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ExitCode)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Signal)) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.StartedAt.Size())) - n32, err := m.StartedAt.MarshalTo(dAtA[i:]) +var xxx_messageInfo_ReplicationControllerCondition proto.InternalMessageInfo + +func (m *ReplicationControllerList) Reset() { *m = ReplicationControllerList{} } +func (*ReplicationControllerList) ProtoMessage() {} +func (*ReplicationControllerList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{152} +} +func (m *ReplicationControllerList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicationControllerList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n32 - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FinishedAt.Size())) - n33, err := m.FinishedAt.MarshalTo(dAtA[i:]) + return b[:n], nil +} +func (m *ReplicationControllerList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicationControllerList.Merge(m, src) +} +func (m *ReplicationControllerList) XXX_Size() int { + return m.Size() +} +func (m *ReplicationControllerList) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicationControllerList.DiscardUnknown(m) +} + +var xxx_messageInfo_ReplicationControllerList proto.InternalMessageInfo + +func (m *ReplicationControllerSpec) Reset() { *m = ReplicationControllerSpec{} } +func (*ReplicationControllerSpec) ProtoMessage() {} +func (*ReplicationControllerSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{153} +} +func (m *ReplicationControllerSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicationControllerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n33 - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerID))) - i += copy(dAtA[i:], m.ContainerID) - return i, nil + return b[:n], nil +} +func (m *ReplicationControllerSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicationControllerSpec.Merge(m, src) +} +func (m *ReplicationControllerSpec) XXX_Size() int { + return m.Size() +} +func (m *ReplicationControllerSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicationControllerSpec.DiscardUnknown(m) } -func (m *ContainerStateWaiting) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_ReplicationControllerSpec proto.InternalMessageInfo + +func (m *ReplicationControllerStatus) Reset() { *m = ReplicationControllerStatus{} } +func (*ReplicationControllerStatus) ProtoMessage() {} +func (*ReplicationControllerStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{154} +} +func (m *ReplicationControllerStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicationControllerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *ContainerStateWaiting) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil +func (m *ReplicationControllerStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicationControllerStatus.Merge(m, src) +} +func (m *ReplicationControllerStatus) XXX_Size() int { + return m.Size() +} +func (m *ReplicationControllerStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicationControllerStatus.DiscardUnknown(m) } -func (m *ContainerStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_ReplicationControllerStatus proto.InternalMessageInfo + +func (m *ResourceFieldSelector) Reset() { *m = ResourceFieldSelector{} } +func (*ResourceFieldSelector) ProtoMessage() {} +func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{155} +} +func (m *ResourceFieldSelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceFieldSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *ResourceFieldSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceFieldSelector.Merge(m, src) +} +func (m *ResourceFieldSelector) XXX_Size() int { + return m.Size() +} +func (m *ResourceFieldSelector) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceFieldSelector.DiscardUnknown(m) } -func (m *ContainerStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.State.Size())) - n34, err := m.State.MarshalTo(dAtA[i:]) +var xxx_messageInfo_ResourceFieldSelector proto.InternalMessageInfo + +func (m *ResourceQuota) Reset() { *m = ResourceQuota{} } +func (*ResourceQuota) ProtoMessage() {} +func (*ResourceQuota) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{156} +} +func (m *ResourceQuota) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceQuota) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n34 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTerminationState.Size())) - n35, err := m.LastTerminationState.MarshalTo(dAtA[i:]) + return b[:n], nil +} +func (m *ResourceQuota) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceQuota.Merge(m, src) +} +func (m *ResourceQuota) XXX_Size() int { + return m.Size() +} +func (m *ResourceQuota) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceQuota.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceQuota proto.InternalMessageInfo + +func (m *ResourceQuotaList) Reset() { *m = ResourceQuotaList{} } +func (*ResourceQuotaList) ProtoMessage() {} +func (*ResourceQuotaList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{157} +} +func (m *ResourceQuotaList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceQuotaList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n35 - dAtA[i] = 0x20 - i++ - if m.Ready { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + return nil, err } - i++ - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RestartCount)) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image))) - i += copy(dAtA[i:], m.Image) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImageID))) - i += copy(dAtA[i:], m.ImageID) - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerID))) - i += copy(dAtA[i:], m.ContainerID) - return i, nil + return b[:n], nil +} +func (m *ResourceQuotaList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceQuotaList.Merge(m, src) +} +func (m *ResourceQuotaList) XXX_Size() int { + return m.Size() +} +func (m *ResourceQuotaList) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceQuotaList.DiscardUnknown(m) } -func (m *DaemonEndpoint) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_ResourceQuotaList proto.InternalMessageInfo + +func (m *ResourceQuotaSpec) Reset() { *m = ResourceQuotaSpec{} } +func (*ResourceQuotaSpec) ProtoMessage() {} +func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{158} +} +func (m *ResourceQuotaSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceQuotaSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *DaemonEndpoint) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) - return i, nil +func (m *ResourceQuotaSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceQuotaSpec.Merge(m, src) +} +func (m *ResourceQuotaSpec) XXX_Size() int { + return m.Size() +} +func (m *ResourceQuotaSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceQuotaSpec.DiscardUnknown(m) } -func (m *DownwardAPIProjection) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_ResourceQuotaSpec proto.InternalMessageInfo + +func (m *ResourceQuotaStatus) Reset() { *m = ResourceQuotaStatus{} } +func (*ResourceQuotaStatus) ProtoMessage() {} +func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{159} +} +func (m *ResourceQuotaStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceQuotaStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *ResourceQuotaStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceQuotaStatus.Merge(m, src) +} +func (m *ResourceQuotaStatus) XXX_Size() int { + return m.Size() +} +func (m *ResourceQuotaStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceQuotaStatus.DiscardUnknown(m) } -func (m *DownwardAPIProjection) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } +var xxx_messageInfo_ResourceQuotaStatus proto.InternalMessageInfo + +func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} } +func (*ResourceRequirements) ProtoMessage() {} +func (*ResourceRequirements) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{160} +} +func (m *ResourceRequirements) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceRequirements) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *ResourceRequirements) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceRequirements.Merge(m, src) +} +func (m *ResourceRequirements) XXX_Size() int { + return m.Size() +} +func (m *ResourceRequirements) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceRequirements.DiscardUnknown(m) } -func (m *DownwardAPIVolumeFile) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_ResourceRequirements proto.InternalMessageInfo + +func (m *SELinuxOptions) Reset() { *m = SELinuxOptions{} } +func (*SELinuxOptions) ProtoMessage() {} +func (*SELinuxOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{161} +} +func (m *SELinuxOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SELinuxOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *SELinuxOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_SELinuxOptions.Merge(m, src) +} +func (m *SELinuxOptions) XXX_Size() int { + return m.Size() +} +func (m *SELinuxOptions) XXX_DiscardUnknown() { + xxx_messageInfo_SELinuxOptions.DiscardUnknown(m) } -func (m *DownwardAPIVolumeFile) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - if m.FieldRef != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FieldRef.Size())) - n36, err := m.FieldRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n36 - } - if m.ResourceFieldRef != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceFieldRef.Size())) - n37, err := m.ResourceFieldRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n37 - } - if m.Mode != nil { - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Mode)) - } - return i, nil -} +var xxx_messageInfo_SELinuxOptions proto.InternalMessageInfo -func (m *DownwardAPIVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +func (m *ScaleIOPersistentVolumeSource) Reset() { *m = ScaleIOPersistentVolumeSource{} } +func (*ScaleIOPersistentVolumeSource) ProtoMessage() {} +func (*ScaleIOPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{162} +} +func (m *ScaleIOPersistentVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScaleIOPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *DownwardAPIVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.DefaultMode != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) - } - return i, nil +func (m *ScaleIOPersistentVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScaleIOPersistentVolumeSource.Merge(m, src) +} +func (m *ScaleIOPersistentVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *ScaleIOPersistentVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_ScaleIOPersistentVolumeSource.DiscardUnknown(m) } -func (m *EmptyDirVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_ScaleIOPersistentVolumeSource proto.InternalMessageInfo + +func (m *ScaleIOVolumeSource) Reset() { *m = ScaleIOVolumeSource{} } +func (*ScaleIOVolumeSource) ProtoMessage() {} +func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{163} +} +func (m *ScaleIOVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScaleIOVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *EmptyDirVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Medium))) - i += copy(dAtA[i:], m.Medium) - if m.SizeLimit != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SizeLimit.Size())) - n38, err := m.SizeLimit.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n38 - } - return i, nil +func (m *ScaleIOVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScaleIOVolumeSource.Merge(m, src) +} +func (m *ScaleIOVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *ScaleIOVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_ScaleIOVolumeSource.DiscardUnknown(m) } -func (m *EndpointAddress) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_ScaleIOVolumeSource proto.InternalMessageInfo + +func (m *ScopeSelector) Reset() { *m = ScopeSelector{} } +func (*ScopeSelector) ProtoMessage() {} +func (*ScopeSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{164} +} +func (m *ScopeSelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScopeSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *EndpointAddress) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) - i += copy(dAtA[i:], m.IP) - if m.TargetRef != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetRef.Size())) - n39, err := m.TargetRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n39 - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) - i += copy(dAtA[i:], m.Hostname) - if m.NodeName != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.NodeName))) - i += copy(dAtA[i:], *m.NodeName) - } - return i, nil +func (m *ScopeSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScopeSelector.Merge(m, src) +} +func (m *ScopeSelector) XXX_Size() int { + return m.Size() +} +func (m *ScopeSelector) XXX_DiscardUnknown() { + xxx_messageInfo_ScopeSelector.DiscardUnknown(m) } -func (m *EndpointPort) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_ScopeSelector proto.InternalMessageInfo + +func (m *ScopedResourceSelectorRequirement) Reset() { *m = ScopedResourceSelectorRequirement{} } +func (*ScopedResourceSelectorRequirement) ProtoMessage() {} +func (*ScopedResourceSelectorRequirement) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{165} +} +func (m *ScopedResourceSelectorRequirement) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScopedResourceSelectorRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *EndpointPort) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol))) - i += copy(dAtA[i:], m.Protocol) - return i, nil +func (m *ScopedResourceSelectorRequirement) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScopedResourceSelectorRequirement.Merge(m, src) +} +func (m *ScopedResourceSelectorRequirement) XXX_Size() int { + return m.Size() +} +func (m *ScopedResourceSelectorRequirement) XXX_DiscardUnknown() { + xxx_messageInfo_ScopedResourceSelectorRequirement.DiscardUnknown(m) } -func (m *EndpointSubset) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_ScopedResourceSelectorRequirement proto.InternalMessageInfo + +func (m *Secret) Reset() { *m = Secret{} } +func (*Secret) ProtoMessage() {} +func (*Secret) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{166} +} +func (m *Secret) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Secret) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *EndpointSubset) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Addresses) > 0 { - for _, msg := range m.Addresses { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.NotReadyAddresses) > 0 { - for _, msg := range m.NotReadyAddresses { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Ports) > 0 { - for _, msg := range m.Ports { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil +func (m *Secret) XXX_Merge(src proto.Message) { + xxx_messageInfo_Secret.Merge(m, src) +} +func (m *Secret) XXX_Size() int { + return m.Size() +} +func (m *Secret) XXX_DiscardUnknown() { + xxx_messageInfo_Secret.DiscardUnknown(m) } -func (m *Endpoints) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_Secret proto.InternalMessageInfo + +func (m *SecretEnvSource) Reset() { *m = SecretEnvSource{} } +func (*SecretEnvSource) ProtoMessage() {} +func (*SecretEnvSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{167} +} +func (m *SecretEnvSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecretEnvSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *SecretEnvSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecretEnvSource.Merge(m, src) +} +func (m *SecretEnvSource) XXX_Size() int { + return m.Size() +} +func (m *SecretEnvSource) XXX_DiscardUnknown() { + xxx_messageInfo_SecretEnvSource.DiscardUnknown(m) } -func (m *Endpoints) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n40, err := m.ObjectMeta.MarshalTo(dAtA[i:]) +var xxx_messageInfo_SecretEnvSource proto.InternalMessageInfo + +func (m *SecretKeySelector) Reset() { *m = SecretKeySelector{} } +func (*SecretKeySelector) ProtoMessage() {} +func (*SecretKeySelector) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{168} +} +func (m *SecretKeySelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecretKeySelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n40 - if len(m.Subsets) > 0 { - for _, msg := range m.Subsets { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + return nil, err } - return i, nil + return b[:n], nil +} +func (m *SecretKeySelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecretKeySelector.Merge(m, src) +} +func (m *SecretKeySelector) XXX_Size() int { + return m.Size() +} +func (m *SecretKeySelector) XXX_DiscardUnknown() { + xxx_messageInfo_SecretKeySelector.DiscardUnknown(m) } -func (m *EndpointsList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_SecretKeySelector proto.InternalMessageInfo + +func (m *SecretList) Reset() { *m = SecretList{} } +func (*SecretList) ProtoMessage() {} +func (*SecretList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{169} +} +func (m *SecretList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecretList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *SecretList) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecretList.Merge(m, src) +} +func (m *SecretList) XXX_Size() int { + return m.Size() +} +func (m *SecretList) XXX_DiscardUnknown() { + xxx_messageInfo_SecretList.DiscardUnknown(m) } -func (m *EndpointsList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n41, err := m.ListMeta.MarshalTo(dAtA[i:]) +var xxx_messageInfo_SecretList proto.InternalMessageInfo + +func (m *SecretProjection) Reset() { *m = SecretProjection{} } +func (*SecretProjection) ProtoMessage() {} +func (*SecretProjection) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{170} +} +func (m *SecretProjection) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecretProjection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n41 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + return nil, err } - return i, nil + return b[:n], nil +} +func (m *SecretProjection) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecretProjection.Merge(m, src) +} +func (m *SecretProjection) XXX_Size() int { + return m.Size() +} +func (m *SecretProjection) XXX_DiscardUnknown() { + xxx_messageInfo_SecretProjection.DiscardUnknown(m) } -func (m *EnvFromSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_SecretProjection proto.InternalMessageInfo + +func (m *SecretReference) Reset() { *m = SecretReference{} } +func (*SecretReference) ProtoMessage() {} +func (*SecretReference) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{171} +} +func (m *SecretReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecretReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *EnvFromSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Prefix))) - i += copy(dAtA[i:], m.Prefix) - if m.ConfigMapRef != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMapRef.Size())) - n42, err := m.ConfigMapRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n42 - } - if m.SecretRef != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n43, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n43 - } - return i, nil +func (m *SecretReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecretReference.Merge(m, src) +} +func (m *SecretReference) XXX_Size() int { + return m.Size() +} +func (m *SecretReference) XXX_DiscardUnknown() { + xxx_messageInfo_SecretReference.DiscardUnknown(m) } -func (m *EnvVar) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_SecretReference proto.InternalMessageInfo + +func (m *SecretVolumeSource) Reset() { *m = SecretVolumeSource{} } +func (*SecretVolumeSource) ProtoMessage() {} +func (*SecretVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{172} +} +func (m *SecretVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecretVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *EnvVar) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) - i += copy(dAtA[i:], m.Value) - if m.ValueFrom != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ValueFrom.Size())) - n44, err := m.ValueFrom.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n44 - } - return i, nil +func (m *SecretVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecretVolumeSource.Merge(m, src) +} +func (m *SecretVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *SecretVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_SecretVolumeSource.DiscardUnknown(m) } -func (m *EnvVarSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_SecretVolumeSource proto.InternalMessageInfo + +func (m *SecurityContext) Reset() { *m = SecurityContext{} } +func (*SecurityContext) ProtoMessage() {} +func (*SecurityContext) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{173} +} +func (m *SecurityContext) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecurityContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *EnvVarSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.FieldRef != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FieldRef.Size())) - n45, err := m.FieldRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n45 - } - if m.ResourceFieldRef != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceFieldRef.Size())) - n46, err := m.ResourceFieldRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n46 - } - if m.ConfigMapKeyRef != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMapKeyRef.Size())) - n47, err := m.ConfigMapKeyRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n47 - } - if m.SecretKeyRef != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretKeyRef.Size())) - n48, err := m.SecretKeyRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n48 - } - return i, nil +func (m *SecurityContext) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecurityContext.Merge(m, src) +} +func (m *SecurityContext) XXX_Size() int { + return m.Size() +} +func (m *SecurityContext) XXX_DiscardUnknown() { + xxx_messageInfo_SecurityContext.DiscardUnknown(m) } -func (m *Event) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_SecurityContext proto.InternalMessageInfo + +func (m *SerializedReference) Reset() { *m = SerializedReference{} } +func (*SerializedReference) ProtoMessage() {} +func (*SerializedReference) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{174} +} +func (m *SerializedReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SerializedReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *SerializedReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_SerializedReference.Merge(m, src) +} +func (m *SerializedReference) XXX_Size() int { + return m.Size() +} +func (m *SerializedReference) XXX_DiscardUnknown() { + xxx_messageInfo_SerializedReference.DiscardUnknown(m) } -func (m *Event) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n49, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n49 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.InvolvedObject.Size())) - n50, err := m.InvolvedObject.MarshalTo(dAtA[i:]) +var xxx_messageInfo_SerializedReference proto.InternalMessageInfo + +func (m *Service) Reset() { *m = Service{} } +func (*Service) ProtoMessage() {} +func (*Service) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{175} +} +func (m *Service) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Service) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n50 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Source.Size())) - n51, err := m.Source.MarshalTo(dAtA[i:]) + return b[:n], nil +} +func (m *Service) XXX_Merge(src proto.Message) { + xxx_messageInfo_Service.Merge(m, src) +} +func (m *Service) XXX_Size() int { + return m.Size() +} +func (m *Service) XXX_DiscardUnknown() { + xxx_messageInfo_Service.DiscardUnknown(m) +} + +var xxx_messageInfo_Service proto.InternalMessageInfo + +func (m *ServiceAccount) Reset() { *m = ServiceAccount{} } +func (*ServiceAccount) ProtoMessage() {} +func (*ServiceAccount) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{176} +} +func (m *ServiceAccount) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceAccount) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n51 - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FirstTimestamp.Size())) - n52, err := m.FirstTimestamp.MarshalTo(dAtA[i:]) + return b[:n], nil +} +func (m *ServiceAccount) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceAccount.Merge(m, src) +} +func (m *ServiceAccount) XXX_Size() int { + return m.Size() +} +func (m *ServiceAccount) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceAccount.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceAccount proto.InternalMessageInfo + +func (m *ServiceAccountList) Reset() { *m = ServiceAccountList{} } +func (*ServiceAccountList) ProtoMessage() {} +func (*ServiceAccountList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{177} +} +func (m *ServiceAccountList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceAccountList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n52 - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTimestamp.Size())) - n53, err := m.LastTimestamp.MarshalTo(dAtA[i:]) + return b[:n], nil +} +func (m *ServiceAccountList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceAccountList.Merge(m, src) +} +func (m *ServiceAccountList) XXX_Size() int { + return m.Size() +} +func (m *ServiceAccountList) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceAccountList.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceAccountList proto.InternalMessageInfo + +func (m *ServiceAccountTokenProjection) Reset() { *m = ServiceAccountTokenProjection{} } +func (*ServiceAccountTokenProjection) ProtoMessage() {} +func (*ServiceAccountTokenProjection) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{178} +} +func (m *ServiceAccountTokenProjection) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceAccountTokenProjection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n53 - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.EventTime.Size())) - n54, err := m.EventTime.MarshalTo(dAtA[i:]) + return b[:n], nil +} +func (m *ServiceAccountTokenProjection) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceAccountTokenProjection.Merge(m, src) +} +func (m *ServiceAccountTokenProjection) XXX_Size() int { + return m.Size() +} +func (m *ServiceAccountTokenProjection) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceAccountTokenProjection.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceAccountTokenProjection proto.InternalMessageInfo + +func (m *ServiceList) Reset() { *m = ServiceList{} } +func (*ServiceList) ProtoMessage() {} +func (*ServiceList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{179} +} +func (m *ServiceList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n54 - if m.Series != nil { - dAtA[i] = 0x5a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Series.Size())) - n55, err := m.Series.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n55 - } - dAtA[i] = 0x62 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Action))) - i += copy(dAtA[i:], m.Action) - if m.Related != nil { - dAtA[i] = 0x6a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Related.Size())) - n56, err := m.Related.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n56 + return nil, err } - dAtA[i] = 0x72 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingController))) - i += copy(dAtA[i:], m.ReportingController) - dAtA[i] = 0x7a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingInstance))) - i += copy(dAtA[i:], m.ReportingInstance) - return i, nil + return b[:n], nil +} +func (m *ServiceList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceList.Merge(m, src) +} +func (m *ServiceList) XXX_Size() int { + return m.Size() +} +func (m *ServiceList) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceList.DiscardUnknown(m) } -func (m *EventList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_ServiceList proto.InternalMessageInfo + +func (m *ServicePort) Reset() { *m = ServicePort{} } +func (*ServicePort) ProtoMessage() {} +func (*ServicePort) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{180} +} +func (m *ServicePort) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServicePort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *ServicePort) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServicePort.Merge(m, src) +} +func (m *ServicePort) XXX_Size() int { + return m.Size() +} +func (m *ServicePort) XXX_DiscardUnknown() { + xxx_messageInfo_ServicePort.DiscardUnknown(m) } -func (m *EventList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n57, err := m.ListMeta.MarshalTo(dAtA[i:]) +var xxx_messageInfo_ServicePort proto.InternalMessageInfo + +func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} } +func (*ServiceProxyOptions) ProtoMessage() {} +func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{181} +} +func (m *ServiceProxyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceProxyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n57 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + return nil, err } - return i, nil + return b[:n], nil +} +func (m *ServiceProxyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceProxyOptions.Merge(m, src) +} +func (m *ServiceProxyOptions) XXX_Size() int { + return m.Size() +} +func (m *ServiceProxyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceProxyOptions.DiscardUnknown(m) } -func (m *EventSeries) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_ServiceProxyOptions proto.InternalMessageInfo + +func (m *ServiceSpec) Reset() { *m = ServiceSpec{} } +func (*ServiceSpec) ProtoMessage() {} +func (*ServiceSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{182} +} +func (m *ServiceSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *ServiceSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceSpec.Merge(m, src) +} +func (m *ServiceSpec) XXX_Size() int { + return m.Size() +} +func (m *ServiceSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceSpec.DiscardUnknown(m) } -func (m *EventSeries) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastObservedTime.Size())) - n58, err := m.LastObservedTime.MarshalTo(dAtA[i:]) +var xxx_messageInfo_ServiceSpec proto.InternalMessageInfo + +func (m *ServiceStatus) Reset() { *m = ServiceStatus{} } +func (*ServiceStatus) ProtoMessage() {} +func (*ServiceStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{183} +} +func (m *ServiceStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i += n58 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.State))) - i += copy(dAtA[i:], m.State) - return i, nil + return b[:n], nil +} +func (m *ServiceStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceStatus.Merge(m, src) +} +func (m *ServiceStatus) XXX_Size() int { + return m.Size() +} +func (m *ServiceStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceStatus.DiscardUnknown(m) } -func (m *EventSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_ServiceStatus proto.InternalMessageInfo + +func (m *SessionAffinityConfig) Reset() { *m = SessionAffinityConfig{} } +func (*SessionAffinityConfig) ProtoMessage() {} +func (*SessionAffinityConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{184} +} +func (m *SessionAffinityConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SessionAffinityConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *EventSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Component))) - i += copy(dAtA[i:], m.Component) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) - i += copy(dAtA[i:], m.Host) - return i, nil +func (m *SessionAffinityConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_SessionAffinityConfig.Merge(m, src) +} +func (m *SessionAffinityConfig) XXX_Size() int { + return m.Size() +} +func (m *SessionAffinityConfig) XXX_DiscardUnknown() { + xxx_messageInfo_SessionAffinityConfig.DiscardUnknown(m) } -func (m *ExecAction) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_SessionAffinityConfig proto.InternalMessageInfo + +func (m *StorageOSPersistentVolumeSource) Reset() { *m = StorageOSPersistentVolumeSource{} } +func (*StorageOSPersistentVolumeSource) ProtoMessage() {} +func (*StorageOSPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{185} +} +func (m *StorageOSPersistentVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageOSPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *StorageOSPersistentVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageOSPersistentVolumeSource.Merge(m, src) +} +func (m *StorageOSPersistentVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *StorageOSPersistentVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_StorageOSPersistentVolumeSource.DiscardUnknown(m) } -func (m *ExecAction) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Command) > 0 { - for _, s := range m.Command { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } +var xxx_messageInfo_StorageOSPersistentVolumeSource proto.InternalMessageInfo + +func (m *StorageOSVolumeSource) Reset() { *m = StorageOSVolumeSource{} } +func (*StorageOSVolumeSource) ProtoMessage() {} +func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{186} +} +func (m *StorageOSVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageOSVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - return i, nil + return b[:n], nil +} +func (m *StorageOSVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageOSVolumeSource.Merge(m, src) +} +func (m *StorageOSVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *StorageOSVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_StorageOSVolumeSource.DiscardUnknown(m) } -func (m *FCVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_StorageOSVolumeSource proto.InternalMessageInfo + +func (m *Sysctl) Reset() { *m = Sysctl{} } +func (*Sysctl) ProtoMessage() {} +func (*Sysctl) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{187} +} +func (m *Sysctl) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Sysctl) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *Sysctl) XXX_Merge(src proto.Message) { + xxx_messageInfo_Sysctl.Merge(m, src) +} +func (m *Sysctl) XXX_Size() int { + return m.Size() +} +func (m *Sysctl) XXX_DiscardUnknown() { + xxx_messageInfo_Sysctl.DiscardUnknown(m) } -func (m *FCVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.TargetWWNs) > 0 { - for _, s := range m.TargetWWNs { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if m.Lun != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Lun)) - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x20 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if len(m.WWIDs) > 0 { - for _, s := range m.WWIDs { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} +var xxx_messageInfo_Sysctl proto.InternalMessageInfo -func (m *FlexPersistentVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} } +func (*TCPSocketAction) ProtoMessage() {} +func (*TCPSocketAction) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{188} +} +func (m *TCPSocketAction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TCPSocketAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *FlexPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) - i += copy(dAtA[i:], m.Driver) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - if m.SecretRef != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n59, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n59 - } - dAtA[i] = 0x20 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if len(m.Options) > 0 { - keysForOptions := make([]string, 0, len(m.Options)) - for k := range m.Options { - keysForOptions = append(keysForOptions, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) - for _, k := range keysForOptions { - dAtA[i] = 0x2a - i++ - v := m.Options[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - return i, nil +func (m *TCPSocketAction) XXX_Merge(src proto.Message) { + xxx_messageInfo_TCPSocketAction.Merge(m, src) +} +func (m *TCPSocketAction) XXX_Size() int { + return m.Size() +} +func (m *TCPSocketAction) XXX_DiscardUnknown() { + xxx_messageInfo_TCPSocketAction.DiscardUnknown(m) } -func (m *FlexVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_TCPSocketAction proto.InternalMessageInfo + +func (m *Taint) Reset() { *m = Taint{} } +func (*Taint) ProtoMessage() {} +func (*Taint) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{189} +} +func (m *Taint) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Taint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *FlexVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) - i += copy(dAtA[i:], m.Driver) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - if m.SecretRef != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n60, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n60 - } - dAtA[i] = 0x20 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if len(m.Options) > 0 { - keysForOptions := make([]string, 0, len(m.Options)) - for k := range m.Options { - keysForOptions = append(keysForOptions, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) - for _, k := range keysForOptions { - dAtA[i] = 0x2a - i++ - v := m.Options[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - return i, nil +func (m *Taint) XXX_Merge(src proto.Message) { + xxx_messageInfo_Taint.Merge(m, src) +} +func (m *Taint) XXX_Size() int { + return m.Size() +} +func (m *Taint) XXX_DiscardUnknown() { + xxx_messageInfo_Taint.DiscardUnknown(m) } -func (m *FlockerVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_Taint proto.InternalMessageInfo + +func (m *Toleration) Reset() { *m = Toleration{} } +func (*Toleration) ProtoMessage() {} +func (*Toleration) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{190} +} +func (m *Toleration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Toleration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *FlockerVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DatasetName))) - i += copy(dAtA[i:], m.DatasetName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DatasetUUID))) - i += copy(dAtA[i:], m.DatasetUUID) - return i, nil +func (m *Toleration) XXX_Merge(src proto.Message) { + xxx_messageInfo_Toleration.Merge(m, src) +} +func (m *Toleration) XXX_Size() int { + return m.Size() +} +func (m *Toleration) XXX_DiscardUnknown() { + xxx_messageInfo_Toleration.DiscardUnknown(m) } -func (m *GCEPersistentDiskVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_Toleration proto.InternalMessageInfo + +func (m *TopologySelectorLabelRequirement) Reset() { *m = TopologySelectorLabelRequirement{} } +func (*TopologySelectorLabelRequirement) ProtoMessage() {} +func (*TopologySelectorLabelRequirement) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{191} +} +func (m *TopologySelectorLabelRequirement) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TopologySelectorLabelRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *GCEPersistentDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PDName))) - i += copy(dAtA[i:], m.PDName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Partition)) - dAtA[i] = 0x20 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - return i, nil +func (m *TopologySelectorLabelRequirement) XXX_Merge(src proto.Message) { + xxx_messageInfo_TopologySelectorLabelRequirement.Merge(m, src) +} +func (m *TopologySelectorLabelRequirement) XXX_Size() int { + return m.Size() +} +func (m *TopologySelectorLabelRequirement) XXX_DiscardUnknown() { + xxx_messageInfo_TopologySelectorLabelRequirement.DiscardUnknown(m) } -func (m *GitRepoVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_TopologySelectorLabelRequirement proto.InternalMessageInfo + +func (m *TopologySelectorTerm) Reset() { *m = TopologySelectorTerm{} } +func (*TopologySelectorTerm) ProtoMessage() {} +func (*TopologySelectorTerm) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{192} +} +func (m *TopologySelectorTerm) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TopologySelectorTerm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *GitRepoVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Repository))) - i += copy(dAtA[i:], m.Repository) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Revision))) - i += copy(dAtA[i:], m.Revision) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Directory))) - i += copy(dAtA[i:], m.Directory) - return i, nil +func (m *TopologySelectorTerm) XXX_Merge(src proto.Message) { + xxx_messageInfo_TopologySelectorTerm.Merge(m, src) +} +func (m *TopologySelectorTerm) XXX_Size() int { + return m.Size() +} +func (m *TopologySelectorTerm) XXX_DiscardUnknown() { + xxx_messageInfo_TopologySelectorTerm.DiscardUnknown(m) } -func (m *GlusterfsPersistentVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_TopologySelectorTerm proto.InternalMessageInfo + +func (m *TopologySpreadConstraint) Reset() { *m = TopologySpreadConstraint{} } +func (*TopologySpreadConstraint) ProtoMessage() {} +func (*TopologySpreadConstraint) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{193} +} +func (m *TopologySpreadConstraint) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TopologySpreadConstraint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *GlusterfsPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.EndpointsName))) - i += copy(dAtA[i:], m.EndpointsName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - dAtA[i] = 0x18 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.EndpointsNamespace != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.EndpointsNamespace))) - i += copy(dAtA[i:], *m.EndpointsNamespace) - } - return i, nil +func (m *TopologySpreadConstraint) XXX_Merge(src proto.Message) { + xxx_messageInfo_TopologySpreadConstraint.Merge(m, src) +} +func (m *TopologySpreadConstraint) XXX_Size() int { + return m.Size() +} +func (m *TopologySpreadConstraint) XXX_DiscardUnknown() { + xxx_messageInfo_TopologySpreadConstraint.DiscardUnknown(m) } -func (m *GlusterfsVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_TopologySpreadConstraint proto.InternalMessageInfo + +func (m *TypedLocalObjectReference) Reset() { *m = TypedLocalObjectReference{} } +func (*TypedLocalObjectReference) ProtoMessage() {} +func (*TypedLocalObjectReference) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{194} +} +func (m *TypedLocalObjectReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TypedLocalObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *GlusterfsVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.EndpointsName))) - i += copy(dAtA[i:], m.EndpointsName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - dAtA[i] = 0x18 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - return i, nil +func (m *TypedLocalObjectReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_TypedLocalObjectReference.Merge(m, src) +} +func (m *TypedLocalObjectReference) XXX_Size() int { + return m.Size() +} +func (m *TypedLocalObjectReference) XXX_DiscardUnknown() { + xxx_messageInfo_TypedLocalObjectReference.DiscardUnknown(m) } -func (m *HTTPGetAction) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_TypedLocalObjectReference proto.InternalMessageInfo + +func (m *Volume) Reset() { *m = Volume{} } +func (*Volume) ProtoMessage() {} +func (*Volume) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{195} +} +func (m *Volume) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Volume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (m *Volume) XXX_Merge(src proto.Message) { + xxx_messageInfo_Volume.Merge(m, src) +} +func (m *Volume) XXX_Size() int { + return m.Size() +} +func (m *Volume) XXX_DiscardUnknown() { + xxx_messageInfo_Volume.DiscardUnknown(m) } -func (m *HTTPGetAction) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Port.Size())) - n61, err := m.Port.MarshalTo(dAtA[i:]) +var xxx_messageInfo_Volume proto.InternalMessageInfo + +func (m *VolumeDevice) Reset() { *m = VolumeDevice{} } +func (*VolumeDevice) ProtoMessage() {} +func (*VolumeDevice) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{196} +} +func (m *VolumeDevice) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeDevice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err - } - i += n61 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) - i += copy(dAtA[i:], m.Host) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scheme))) - i += copy(dAtA[i:], m.Scheme) - if len(m.HTTPHeaders) > 0 { - for _, msg := range m.HTTPHeaders { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + return nil, err } - return i, nil + return b[:n], nil +} +func (m *VolumeDevice) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeDevice.Merge(m, src) +} +func (m *VolumeDevice) XXX_Size() int { + return m.Size() +} +func (m *VolumeDevice) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeDevice.DiscardUnknown(m) } -func (m *HTTPHeader) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_VolumeDevice proto.InternalMessageInfo + +func (m *VolumeMount) Reset() { *m = VolumeMount{} } +func (*VolumeMount) ProtoMessage() {} +func (*VolumeMount) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{197} +} +func (m *VolumeMount) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeMount) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *HTTPHeader) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) - i += copy(dAtA[i:], m.Value) - return i, nil +func (m *VolumeMount) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeMount.Merge(m, src) +} +func (m *VolumeMount) XXX_Size() int { + return m.Size() +} +func (m *VolumeMount) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeMount.DiscardUnknown(m) } -func (m *Handler) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_VolumeMount proto.InternalMessageInfo + +func (m *VolumeNodeAffinity) Reset() { *m = VolumeNodeAffinity{} } +func (*VolumeNodeAffinity) ProtoMessage() {} +func (*VolumeNodeAffinity) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{198} +} +func (m *VolumeNodeAffinity) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeNodeAffinity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *Handler) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Exec != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Exec.Size())) - n62, err := m.Exec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n62 - } - if m.HTTPGet != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.HTTPGet.Size())) - n63, err := m.HTTPGet.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n63 - } - if m.TCPSocket != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TCPSocket.Size())) - n64, err := m.TCPSocket.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n64 - } - return i, nil +func (m *VolumeNodeAffinity) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeNodeAffinity.Merge(m, src) +} +func (m *VolumeNodeAffinity) XXX_Size() int { + return m.Size() +} +func (m *VolumeNodeAffinity) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeNodeAffinity.DiscardUnknown(m) } -func (m *HostAlias) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_VolumeNodeAffinity proto.InternalMessageInfo + +func (m *VolumeProjection) Reset() { *m = VolumeProjection{} } +func (*VolumeProjection) ProtoMessage() {} +func (*VolumeProjection) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{199} +} +func (m *VolumeProjection) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeProjection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *HostAlias) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) - i += copy(dAtA[i:], m.IP) - if len(m.Hostnames) > 0 { - for _, s := range m.Hostnames { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil +func (m *VolumeProjection) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeProjection.Merge(m, src) +} +func (m *VolumeProjection) XXX_Size() int { + return m.Size() +} +func (m *VolumeProjection) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeProjection.DiscardUnknown(m) } -func (m *HostPathVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_VolumeProjection proto.InternalMessageInfo + +func (m *VolumeSource) Reset() { *m = VolumeSource{} } +func (*VolumeSource) ProtoMessage() {} +func (*VolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{200} +} +func (m *VolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *HostPathVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - if m.Type != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Type))) - i += copy(dAtA[i:], *m.Type) - } - return i, nil +func (m *VolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeSource.Merge(m, src) +} +func (m *VolumeSource) XXX_Size() int { + return m.Size() +} +func (m *VolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeSource.DiscardUnknown(m) } -func (m *ISCSIPersistentVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_VolumeSource proto.InternalMessageInfo + +func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} } +func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {} +func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{201} +} +func (m *VsphereVirtualDiskVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VsphereVirtualDiskVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *ISCSIPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetPortal))) - i += copy(dAtA[i:], m.TargetPortal) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.IQN))) - i += copy(dAtA[i:], m.IQN) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Lun)) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ISCSIInterface))) - i += copy(dAtA[i:], m.ISCSIInterface) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x30 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if len(m.Portals) > 0 { - for _, s := range m.Portals { - dAtA[i] = 0x3a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x40 - i++ - if m.DiscoveryCHAPAuth { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.SecretRef != nil { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n65, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n65 - } - dAtA[i] = 0x58 - i++ - if m.SessionCHAPAuth { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.InitiatorName != nil { - dAtA[i] = 0x62 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.InitiatorName))) - i += copy(dAtA[i:], *m.InitiatorName) - } - return i, nil +func (m *VsphereVirtualDiskVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_VsphereVirtualDiskVolumeSource.Merge(m, src) +} +func (m *VsphereVirtualDiskVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *VsphereVirtualDiskVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_VsphereVirtualDiskVolumeSource.DiscardUnknown(m) } -func (m *ISCSIVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_VsphereVirtualDiskVolumeSource proto.InternalMessageInfo + +func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} } +func (*WeightedPodAffinityTerm) ProtoMessage() {} +func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{202} +} +func (m *WeightedPodAffinityTerm) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WeightedPodAffinityTerm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *ISCSIVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetPortal))) - i += copy(dAtA[i:], m.TargetPortal) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.IQN))) - i += copy(dAtA[i:], m.IQN) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Lun)) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ISCSIInterface))) - i += copy(dAtA[i:], m.ISCSIInterface) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x30 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if len(m.Portals) > 0 { - for _, s := range m.Portals { - dAtA[i] = 0x3a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x40 - i++ - if m.DiscoveryCHAPAuth { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.SecretRef != nil { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n66, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n66 - } - dAtA[i] = 0x58 - i++ - if m.SessionCHAPAuth { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.InitiatorName != nil { - dAtA[i] = 0x62 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.InitiatorName))) - i += copy(dAtA[i:], *m.InitiatorName) - } - return i, nil +func (m *WeightedPodAffinityTerm) XXX_Merge(src proto.Message) { + xxx_messageInfo_WeightedPodAffinityTerm.Merge(m, src) +} +func (m *WeightedPodAffinityTerm) XXX_Size() int { + return m.Size() +} +func (m *WeightedPodAffinityTerm) XXX_DiscardUnknown() { + xxx_messageInfo_WeightedPodAffinityTerm.DiscardUnknown(m) } -func (m *KeyToPath) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +var xxx_messageInfo_WeightedPodAffinityTerm proto.InternalMessageInfo + +func (m *WindowsSecurityContextOptions) Reset() { *m = WindowsSecurityContextOptions{} } +func (*WindowsSecurityContextOptions) ProtoMessage() {} +func (*WindowsSecurityContextOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{203} +} +func (m *WindowsSecurityContextOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WindowsSecurityContextOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil } - -func (m *KeyToPath) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - if m.Mode != nil { - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Mode)) - } - return i, nil +func (m *WindowsSecurityContextOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_WindowsSecurityContextOptions.Merge(m, src) } - -func (m *Lifecycle) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil +func (m *WindowsSecurityContextOptions) XXX_Size() int { + return m.Size() +} +func (m *WindowsSecurityContextOptions) XXX_DiscardUnknown() { + xxx_messageInfo_WindowsSecurityContextOptions.DiscardUnknown(m) } -func (m *Lifecycle) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.PostStart != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PostStart.Size())) - n67, err := m.PostStart.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n67 - } - if m.PreStop != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PreStop.Size())) - n68, err := m.PreStop.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n68 - } - return i, nil +var xxx_messageInfo_WindowsSecurityContextOptions proto.InternalMessageInfo + +func init() { + proto.RegisterType((*AWSElasticBlockStoreVolumeSource)(nil), "k8s.io.api.core.v1.AWSElasticBlockStoreVolumeSource") + proto.RegisterType((*Affinity)(nil), "k8s.io.api.core.v1.Affinity") + proto.RegisterType((*AttachedVolume)(nil), "k8s.io.api.core.v1.AttachedVolume") + proto.RegisterType((*AvoidPods)(nil), "k8s.io.api.core.v1.AvoidPods") + proto.RegisterType((*AzureDiskVolumeSource)(nil), "k8s.io.api.core.v1.AzureDiskVolumeSource") + proto.RegisterType((*AzureFilePersistentVolumeSource)(nil), "k8s.io.api.core.v1.AzureFilePersistentVolumeSource") + proto.RegisterType((*AzureFileVolumeSource)(nil), "k8s.io.api.core.v1.AzureFileVolumeSource") + proto.RegisterType((*Binding)(nil), "k8s.io.api.core.v1.Binding") + proto.RegisterType((*CSIPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CSIPersistentVolumeSource") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.CSIPersistentVolumeSource.VolumeAttributesEntry") + proto.RegisterType((*CSIVolumeSource)(nil), "k8s.io.api.core.v1.CSIVolumeSource") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.CSIVolumeSource.VolumeAttributesEntry") + proto.RegisterType((*Capabilities)(nil), "k8s.io.api.core.v1.Capabilities") + proto.RegisterType((*CephFSPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CephFSPersistentVolumeSource") + proto.RegisterType((*CephFSVolumeSource)(nil), "k8s.io.api.core.v1.CephFSVolumeSource") + proto.RegisterType((*CinderPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CinderPersistentVolumeSource") + proto.RegisterType((*CinderVolumeSource)(nil), "k8s.io.api.core.v1.CinderVolumeSource") + proto.RegisterType((*ClientIPConfig)(nil), "k8s.io.api.core.v1.ClientIPConfig") + proto.RegisterType((*ComponentCondition)(nil), "k8s.io.api.core.v1.ComponentCondition") + proto.RegisterType((*ComponentStatus)(nil), "k8s.io.api.core.v1.ComponentStatus") + proto.RegisterType((*ComponentStatusList)(nil), "k8s.io.api.core.v1.ComponentStatusList") + proto.RegisterType((*ConfigMap)(nil), "k8s.io.api.core.v1.ConfigMap") + proto.RegisterMapType((map[string][]byte)(nil), "k8s.io.api.core.v1.ConfigMap.BinaryDataEntry") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.ConfigMap.DataEntry") + proto.RegisterType((*ConfigMapEnvSource)(nil), "k8s.io.api.core.v1.ConfigMapEnvSource") + proto.RegisterType((*ConfigMapKeySelector)(nil), "k8s.io.api.core.v1.ConfigMapKeySelector") + proto.RegisterType((*ConfigMapList)(nil), "k8s.io.api.core.v1.ConfigMapList") + proto.RegisterType((*ConfigMapNodeConfigSource)(nil), "k8s.io.api.core.v1.ConfigMapNodeConfigSource") + proto.RegisterType((*ConfigMapProjection)(nil), "k8s.io.api.core.v1.ConfigMapProjection") + proto.RegisterType((*ConfigMapVolumeSource)(nil), "k8s.io.api.core.v1.ConfigMapVolumeSource") + proto.RegisterType((*Container)(nil), "k8s.io.api.core.v1.Container") + proto.RegisterType((*ContainerImage)(nil), "k8s.io.api.core.v1.ContainerImage") + proto.RegisterType((*ContainerPort)(nil), "k8s.io.api.core.v1.ContainerPort") + proto.RegisterType((*ContainerState)(nil), "k8s.io.api.core.v1.ContainerState") + proto.RegisterType((*ContainerStateRunning)(nil), "k8s.io.api.core.v1.ContainerStateRunning") + proto.RegisterType((*ContainerStateTerminated)(nil), "k8s.io.api.core.v1.ContainerStateTerminated") + proto.RegisterType((*ContainerStateWaiting)(nil), "k8s.io.api.core.v1.ContainerStateWaiting") + proto.RegisterType((*ContainerStatus)(nil), "k8s.io.api.core.v1.ContainerStatus") + proto.RegisterType((*DaemonEndpoint)(nil), "k8s.io.api.core.v1.DaemonEndpoint") + proto.RegisterType((*DownwardAPIProjection)(nil), "k8s.io.api.core.v1.DownwardAPIProjection") + proto.RegisterType((*DownwardAPIVolumeFile)(nil), "k8s.io.api.core.v1.DownwardAPIVolumeFile") + proto.RegisterType((*DownwardAPIVolumeSource)(nil), "k8s.io.api.core.v1.DownwardAPIVolumeSource") + proto.RegisterType((*EmptyDirVolumeSource)(nil), "k8s.io.api.core.v1.EmptyDirVolumeSource") + proto.RegisterType((*EndpointAddress)(nil), "k8s.io.api.core.v1.EndpointAddress") + proto.RegisterType((*EndpointPort)(nil), "k8s.io.api.core.v1.EndpointPort") + proto.RegisterType((*EndpointSubset)(nil), "k8s.io.api.core.v1.EndpointSubset") + proto.RegisterType((*Endpoints)(nil), "k8s.io.api.core.v1.Endpoints") + proto.RegisterType((*EndpointsList)(nil), "k8s.io.api.core.v1.EndpointsList") + proto.RegisterType((*EnvFromSource)(nil), "k8s.io.api.core.v1.EnvFromSource") + proto.RegisterType((*EnvVar)(nil), "k8s.io.api.core.v1.EnvVar") + proto.RegisterType((*EnvVarSource)(nil), "k8s.io.api.core.v1.EnvVarSource") + proto.RegisterType((*EphemeralContainer)(nil), "k8s.io.api.core.v1.EphemeralContainer") + proto.RegisterType((*EphemeralContainerCommon)(nil), "k8s.io.api.core.v1.EphemeralContainerCommon") + proto.RegisterType((*EphemeralContainers)(nil), "k8s.io.api.core.v1.EphemeralContainers") + proto.RegisterType((*Event)(nil), "k8s.io.api.core.v1.Event") + proto.RegisterType((*EventList)(nil), "k8s.io.api.core.v1.EventList") + proto.RegisterType((*EventSeries)(nil), "k8s.io.api.core.v1.EventSeries") + proto.RegisterType((*EventSource)(nil), "k8s.io.api.core.v1.EventSource") + proto.RegisterType((*ExecAction)(nil), "k8s.io.api.core.v1.ExecAction") + proto.RegisterType((*FCVolumeSource)(nil), "k8s.io.api.core.v1.FCVolumeSource") + proto.RegisterType((*FlexPersistentVolumeSource)(nil), "k8s.io.api.core.v1.FlexPersistentVolumeSource") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.FlexPersistentVolumeSource.OptionsEntry") + proto.RegisterType((*FlexVolumeSource)(nil), "k8s.io.api.core.v1.FlexVolumeSource") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.FlexVolumeSource.OptionsEntry") + proto.RegisterType((*FlockerVolumeSource)(nil), "k8s.io.api.core.v1.FlockerVolumeSource") + proto.RegisterType((*GCEPersistentDiskVolumeSource)(nil), "k8s.io.api.core.v1.GCEPersistentDiskVolumeSource") + proto.RegisterType((*GitRepoVolumeSource)(nil), "k8s.io.api.core.v1.GitRepoVolumeSource") + proto.RegisterType((*GlusterfsPersistentVolumeSource)(nil), "k8s.io.api.core.v1.GlusterfsPersistentVolumeSource") + proto.RegisterType((*GlusterfsVolumeSource)(nil), "k8s.io.api.core.v1.GlusterfsVolumeSource") + proto.RegisterType((*HTTPGetAction)(nil), "k8s.io.api.core.v1.HTTPGetAction") + proto.RegisterType((*HTTPHeader)(nil), "k8s.io.api.core.v1.HTTPHeader") + proto.RegisterType((*Handler)(nil), "k8s.io.api.core.v1.Handler") + proto.RegisterType((*HostAlias)(nil), "k8s.io.api.core.v1.HostAlias") + proto.RegisterType((*HostPathVolumeSource)(nil), "k8s.io.api.core.v1.HostPathVolumeSource") + proto.RegisterType((*ISCSIPersistentVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIPersistentVolumeSource") + proto.RegisterType((*ISCSIVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIVolumeSource") + proto.RegisterType((*KeyToPath)(nil), "k8s.io.api.core.v1.KeyToPath") + proto.RegisterType((*Lifecycle)(nil), "k8s.io.api.core.v1.Lifecycle") + proto.RegisterType((*LimitRange)(nil), "k8s.io.api.core.v1.LimitRange") + proto.RegisterType((*LimitRangeItem)(nil), "k8s.io.api.core.v1.LimitRangeItem") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.LimitRangeItem.DefaultEntry") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.LimitRangeItem.DefaultRequestEntry") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.LimitRangeItem.MaxEntry") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.LimitRangeItem.MaxLimitRequestRatioEntry") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.LimitRangeItem.MinEntry") + proto.RegisterType((*LimitRangeList)(nil), "k8s.io.api.core.v1.LimitRangeList") + proto.RegisterType((*LimitRangeSpec)(nil), "k8s.io.api.core.v1.LimitRangeSpec") + proto.RegisterType((*List)(nil), "k8s.io.api.core.v1.List") + proto.RegisterType((*LoadBalancerIngress)(nil), "k8s.io.api.core.v1.LoadBalancerIngress") + proto.RegisterType((*LoadBalancerStatus)(nil), "k8s.io.api.core.v1.LoadBalancerStatus") + proto.RegisterType((*LocalObjectReference)(nil), "k8s.io.api.core.v1.LocalObjectReference") + proto.RegisterType((*LocalVolumeSource)(nil), "k8s.io.api.core.v1.LocalVolumeSource") + proto.RegisterType((*NFSVolumeSource)(nil), "k8s.io.api.core.v1.NFSVolumeSource") + proto.RegisterType((*Namespace)(nil), "k8s.io.api.core.v1.Namespace") + proto.RegisterType((*NamespaceCondition)(nil), "k8s.io.api.core.v1.NamespaceCondition") + proto.RegisterType((*NamespaceList)(nil), "k8s.io.api.core.v1.NamespaceList") + proto.RegisterType((*NamespaceSpec)(nil), "k8s.io.api.core.v1.NamespaceSpec") + proto.RegisterType((*NamespaceStatus)(nil), "k8s.io.api.core.v1.NamespaceStatus") + proto.RegisterType((*Node)(nil), "k8s.io.api.core.v1.Node") + proto.RegisterType((*NodeAddress)(nil), "k8s.io.api.core.v1.NodeAddress") + proto.RegisterType((*NodeAffinity)(nil), "k8s.io.api.core.v1.NodeAffinity") + proto.RegisterType((*NodeCondition)(nil), "k8s.io.api.core.v1.NodeCondition") + proto.RegisterType((*NodeConfigSource)(nil), "k8s.io.api.core.v1.NodeConfigSource") + proto.RegisterType((*NodeConfigStatus)(nil), "k8s.io.api.core.v1.NodeConfigStatus") + proto.RegisterType((*NodeDaemonEndpoints)(nil), "k8s.io.api.core.v1.NodeDaemonEndpoints") + proto.RegisterType((*NodeList)(nil), "k8s.io.api.core.v1.NodeList") + proto.RegisterType((*NodeProxyOptions)(nil), "k8s.io.api.core.v1.NodeProxyOptions") + proto.RegisterType((*NodeResources)(nil), "k8s.io.api.core.v1.NodeResources") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.NodeResources.CapacityEntry") + proto.RegisterType((*NodeSelector)(nil), "k8s.io.api.core.v1.NodeSelector") + proto.RegisterType((*NodeSelectorRequirement)(nil), "k8s.io.api.core.v1.NodeSelectorRequirement") + proto.RegisterType((*NodeSelectorTerm)(nil), "k8s.io.api.core.v1.NodeSelectorTerm") + proto.RegisterType((*NodeSpec)(nil), "k8s.io.api.core.v1.NodeSpec") + proto.RegisterType((*NodeStatus)(nil), "k8s.io.api.core.v1.NodeStatus") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.NodeStatus.AllocatableEntry") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.NodeStatus.CapacityEntry") + proto.RegisterType((*NodeSystemInfo)(nil), "k8s.io.api.core.v1.NodeSystemInfo") + proto.RegisterType((*ObjectFieldSelector)(nil), "k8s.io.api.core.v1.ObjectFieldSelector") + proto.RegisterType((*ObjectReference)(nil), "k8s.io.api.core.v1.ObjectReference") + proto.RegisterType((*PersistentVolume)(nil), "k8s.io.api.core.v1.PersistentVolume") + proto.RegisterType((*PersistentVolumeClaim)(nil), "k8s.io.api.core.v1.PersistentVolumeClaim") + proto.RegisterType((*PersistentVolumeClaimCondition)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimCondition") + proto.RegisterType((*PersistentVolumeClaimList)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimList") + proto.RegisterType((*PersistentVolumeClaimSpec)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimSpec") + proto.RegisterType((*PersistentVolumeClaimStatus)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimStatus") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimStatus.CapacityEntry") + proto.RegisterType((*PersistentVolumeClaimVolumeSource)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimVolumeSource") + proto.RegisterType((*PersistentVolumeList)(nil), "k8s.io.api.core.v1.PersistentVolumeList") + proto.RegisterType((*PersistentVolumeSource)(nil), "k8s.io.api.core.v1.PersistentVolumeSource") + proto.RegisterType((*PersistentVolumeSpec)(nil), "k8s.io.api.core.v1.PersistentVolumeSpec") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.PersistentVolumeSpec.CapacityEntry") + proto.RegisterType((*PersistentVolumeStatus)(nil), "k8s.io.api.core.v1.PersistentVolumeStatus") + proto.RegisterType((*PhotonPersistentDiskVolumeSource)(nil), "k8s.io.api.core.v1.PhotonPersistentDiskVolumeSource") + proto.RegisterType((*Pod)(nil), "k8s.io.api.core.v1.Pod") + proto.RegisterType((*PodAffinity)(nil), "k8s.io.api.core.v1.PodAffinity") + proto.RegisterType((*PodAffinityTerm)(nil), "k8s.io.api.core.v1.PodAffinityTerm") + proto.RegisterType((*PodAntiAffinity)(nil), "k8s.io.api.core.v1.PodAntiAffinity") + proto.RegisterType((*PodAttachOptions)(nil), "k8s.io.api.core.v1.PodAttachOptions") + proto.RegisterType((*PodCondition)(nil), "k8s.io.api.core.v1.PodCondition") + proto.RegisterType((*PodDNSConfig)(nil), "k8s.io.api.core.v1.PodDNSConfig") + proto.RegisterType((*PodDNSConfigOption)(nil), "k8s.io.api.core.v1.PodDNSConfigOption") + proto.RegisterType((*PodExecOptions)(nil), "k8s.io.api.core.v1.PodExecOptions") + proto.RegisterType((*PodIP)(nil), "k8s.io.api.core.v1.PodIP") + proto.RegisterType((*PodList)(nil), "k8s.io.api.core.v1.PodList") + proto.RegisterType((*PodLogOptions)(nil), "k8s.io.api.core.v1.PodLogOptions") + proto.RegisterType((*PodPortForwardOptions)(nil), "k8s.io.api.core.v1.PodPortForwardOptions") + proto.RegisterType((*PodProxyOptions)(nil), "k8s.io.api.core.v1.PodProxyOptions") + proto.RegisterType((*PodReadinessGate)(nil), "k8s.io.api.core.v1.PodReadinessGate") + proto.RegisterType((*PodSecurityContext)(nil), "k8s.io.api.core.v1.PodSecurityContext") + proto.RegisterType((*PodSignature)(nil), "k8s.io.api.core.v1.PodSignature") + proto.RegisterType((*PodSpec)(nil), "k8s.io.api.core.v1.PodSpec") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.PodSpec.NodeSelectorEntry") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.PodSpec.OverheadEntry") + proto.RegisterType((*PodStatus)(nil), "k8s.io.api.core.v1.PodStatus") + proto.RegisterType((*PodStatusResult)(nil), "k8s.io.api.core.v1.PodStatusResult") + proto.RegisterType((*PodTemplate)(nil), "k8s.io.api.core.v1.PodTemplate") + proto.RegisterType((*PodTemplateList)(nil), "k8s.io.api.core.v1.PodTemplateList") + proto.RegisterType((*PodTemplateSpec)(nil), "k8s.io.api.core.v1.PodTemplateSpec") + proto.RegisterType((*PortworxVolumeSource)(nil), "k8s.io.api.core.v1.PortworxVolumeSource") + proto.RegisterType((*Preconditions)(nil), "k8s.io.api.core.v1.Preconditions") + proto.RegisterType((*PreferAvoidPodsEntry)(nil), "k8s.io.api.core.v1.PreferAvoidPodsEntry") + proto.RegisterType((*PreferredSchedulingTerm)(nil), "k8s.io.api.core.v1.PreferredSchedulingTerm") + proto.RegisterType((*Probe)(nil), "k8s.io.api.core.v1.Probe") + proto.RegisterType((*ProjectedVolumeSource)(nil), "k8s.io.api.core.v1.ProjectedVolumeSource") + proto.RegisterType((*QuobyteVolumeSource)(nil), "k8s.io.api.core.v1.QuobyteVolumeSource") + proto.RegisterType((*RBDPersistentVolumeSource)(nil), "k8s.io.api.core.v1.RBDPersistentVolumeSource") + proto.RegisterType((*RBDVolumeSource)(nil), "k8s.io.api.core.v1.RBDVolumeSource") + proto.RegisterType((*RangeAllocation)(nil), "k8s.io.api.core.v1.RangeAllocation") + proto.RegisterType((*ReplicationController)(nil), "k8s.io.api.core.v1.ReplicationController") + proto.RegisterType((*ReplicationControllerCondition)(nil), "k8s.io.api.core.v1.ReplicationControllerCondition") + proto.RegisterType((*ReplicationControllerList)(nil), "k8s.io.api.core.v1.ReplicationControllerList") + proto.RegisterType((*ReplicationControllerSpec)(nil), "k8s.io.api.core.v1.ReplicationControllerSpec") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.ReplicationControllerSpec.SelectorEntry") + proto.RegisterType((*ReplicationControllerStatus)(nil), "k8s.io.api.core.v1.ReplicationControllerStatus") + proto.RegisterType((*ResourceFieldSelector)(nil), "k8s.io.api.core.v1.ResourceFieldSelector") + proto.RegisterType((*ResourceQuota)(nil), "k8s.io.api.core.v1.ResourceQuota") + proto.RegisterType((*ResourceQuotaList)(nil), "k8s.io.api.core.v1.ResourceQuotaList") + proto.RegisterType((*ResourceQuotaSpec)(nil), "k8s.io.api.core.v1.ResourceQuotaSpec") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ResourceQuotaSpec.HardEntry") + proto.RegisterType((*ResourceQuotaStatus)(nil), "k8s.io.api.core.v1.ResourceQuotaStatus") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ResourceQuotaStatus.HardEntry") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ResourceQuotaStatus.UsedEntry") + proto.RegisterType((*ResourceRequirements)(nil), "k8s.io.api.core.v1.ResourceRequirements") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ResourceRequirements.LimitsEntry") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ResourceRequirements.RequestsEntry") + proto.RegisterType((*SELinuxOptions)(nil), "k8s.io.api.core.v1.SELinuxOptions") + proto.RegisterType((*ScaleIOPersistentVolumeSource)(nil), "k8s.io.api.core.v1.ScaleIOPersistentVolumeSource") + proto.RegisterType((*ScaleIOVolumeSource)(nil), "k8s.io.api.core.v1.ScaleIOVolumeSource") + proto.RegisterType((*ScopeSelector)(nil), "k8s.io.api.core.v1.ScopeSelector") + proto.RegisterType((*ScopedResourceSelectorRequirement)(nil), "k8s.io.api.core.v1.ScopedResourceSelectorRequirement") + proto.RegisterType((*Secret)(nil), "k8s.io.api.core.v1.Secret") + proto.RegisterMapType((map[string][]byte)(nil), "k8s.io.api.core.v1.Secret.DataEntry") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.Secret.StringDataEntry") + proto.RegisterType((*SecretEnvSource)(nil), "k8s.io.api.core.v1.SecretEnvSource") + proto.RegisterType((*SecretKeySelector)(nil), "k8s.io.api.core.v1.SecretKeySelector") + proto.RegisterType((*SecretList)(nil), "k8s.io.api.core.v1.SecretList") + proto.RegisterType((*SecretProjection)(nil), "k8s.io.api.core.v1.SecretProjection") + proto.RegisterType((*SecretReference)(nil), "k8s.io.api.core.v1.SecretReference") + proto.RegisterType((*SecretVolumeSource)(nil), "k8s.io.api.core.v1.SecretVolumeSource") + proto.RegisterType((*SecurityContext)(nil), "k8s.io.api.core.v1.SecurityContext") + proto.RegisterType((*SerializedReference)(nil), "k8s.io.api.core.v1.SerializedReference") + proto.RegisterType((*Service)(nil), "k8s.io.api.core.v1.Service") + proto.RegisterType((*ServiceAccount)(nil), "k8s.io.api.core.v1.ServiceAccount") + proto.RegisterType((*ServiceAccountList)(nil), "k8s.io.api.core.v1.ServiceAccountList") + proto.RegisterType((*ServiceAccountTokenProjection)(nil), "k8s.io.api.core.v1.ServiceAccountTokenProjection") + proto.RegisterType((*ServiceList)(nil), "k8s.io.api.core.v1.ServiceList") + proto.RegisterType((*ServicePort)(nil), "k8s.io.api.core.v1.ServicePort") + proto.RegisterType((*ServiceProxyOptions)(nil), "k8s.io.api.core.v1.ServiceProxyOptions") + proto.RegisterType((*ServiceSpec)(nil), "k8s.io.api.core.v1.ServiceSpec") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.ServiceSpec.SelectorEntry") + proto.RegisterType((*ServiceStatus)(nil), "k8s.io.api.core.v1.ServiceStatus") + proto.RegisterType((*SessionAffinityConfig)(nil), "k8s.io.api.core.v1.SessionAffinityConfig") + proto.RegisterType((*StorageOSPersistentVolumeSource)(nil), "k8s.io.api.core.v1.StorageOSPersistentVolumeSource") + proto.RegisterType((*StorageOSVolumeSource)(nil), "k8s.io.api.core.v1.StorageOSVolumeSource") + proto.RegisterType((*Sysctl)(nil), "k8s.io.api.core.v1.Sysctl") + proto.RegisterType((*TCPSocketAction)(nil), "k8s.io.api.core.v1.TCPSocketAction") + proto.RegisterType((*Taint)(nil), "k8s.io.api.core.v1.Taint") + proto.RegisterType((*Toleration)(nil), "k8s.io.api.core.v1.Toleration") + proto.RegisterType((*TopologySelectorLabelRequirement)(nil), "k8s.io.api.core.v1.TopologySelectorLabelRequirement") + proto.RegisterType((*TopologySelectorTerm)(nil), "k8s.io.api.core.v1.TopologySelectorTerm") + proto.RegisterType((*TopologySpreadConstraint)(nil), "k8s.io.api.core.v1.TopologySpreadConstraint") + proto.RegisterType((*TypedLocalObjectReference)(nil), "k8s.io.api.core.v1.TypedLocalObjectReference") + proto.RegisterType((*Volume)(nil), "k8s.io.api.core.v1.Volume") + proto.RegisterType((*VolumeDevice)(nil), "k8s.io.api.core.v1.VolumeDevice") + proto.RegisterType((*VolumeMount)(nil), "k8s.io.api.core.v1.VolumeMount") + proto.RegisterType((*VolumeNodeAffinity)(nil), "k8s.io.api.core.v1.VolumeNodeAffinity") + proto.RegisterType((*VolumeProjection)(nil), "k8s.io.api.core.v1.VolumeProjection") + proto.RegisterType((*VolumeSource)(nil), "k8s.io.api.core.v1.VolumeSource") + proto.RegisterType((*VsphereVirtualDiskVolumeSource)(nil), "k8s.io.api.core.v1.VsphereVirtualDiskVolumeSource") + proto.RegisterType((*WeightedPodAffinityTerm)(nil), "k8s.io.api.core.v1.WeightedPodAffinityTerm") + proto.RegisterType((*WindowsSecurityContextOptions)(nil), "k8s.io.api.core.v1.WindowsSecurityContextOptions") } -func (m *LimitRange) Marshal() (dAtA []byte, err error) { +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/core/v1/generated.proto", fileDescriptor_83c10c24ec417dc9) +} + +var fileDescriptor_83c10c24ec417dc9 = []byte{ + // 13567 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x7b, 0x70, 0x24, 0x49, + 0x5a, 0x18, 0x7e, 0xd5, 0xad, 0x47, 0xf7, 0xa7, 0x77, 0xce, 0x63, 0x35, 0xda, 0x9d, 0xd1, 0x6c, + 0xed, 0xdd, 0xec, 0xec, 0xed, 0xae, 0xe6, 0xf6, 0x75, 0xbb, 0xdc, 0xde, 0x2d, 0x48, 0x6a, 0x69, + 0xa6, 0x77, 0x46, 0x9a, 0xde, 0x6c, 0xcd, 0xcc, 0xdd, 0xb2, 0x77, 0x5c, 0xa9, 0x2b, 0x25, 0xd5, + 0xaa, 0xbb, 0xaa, 0xb7, 0xaa, 0x5a, 0x33, 0xda, 0x1f, 0xc4, 0x0f, 0x1f, 0xcf, 0x33, 0xe0, 0xb8, + 0xb0, 0x09, 0x3f, 0x80, 0xc0, 0x11, 0x18, 0x07, 0x60, 0xb0, 0xc3, 0x18, 0x0c, 0x98, 0xc3, 0x36, + 0x06, 0xdb, 0x81, 0xfd, 0x07, 0xc6, 0x0e, 0xdb, 0x47, 0x04, 0x61, 0x19, 0x06, 0x87, 0x89, 0xfb, + 0xc3, 0x40, 0x18, 0xfc, 0x87, 0x65, 0xc2, 0x38, 0xf2, 0x59, 0x99, 0xd5, 0x55, 0xdd, 0xad, 0x59, + 0x8d, 0x6e, 0xb9, 0xd8, 0xff, 0xba, 0xf3, 0xfb, 0xf2, 0xcb, 0xac, 0x7c, 0x7e, 0xf9, 0x3d, 0xe1, + 0xd5, 0xdd, 0x57, 0xa2, 0x05, 0x2f, 0xb8, 0xb2, 0xdb, 0xd9, 0x24, 0xa1, 0x4f, 0x62, 0x12, 0x5d, + 0xd9, 0x23, 0xbe, 0x1b, 0x84, 0x57, 0x04, 0xc0, 0x69, 0x7b, 0x57, 0x1a, 0x41, 0x48, 0xae, 0xec, + 0x3d, 0x77, 0x65, 0x9b, 0xf8, 0x24, 0x74, 0x62, 0xe2, 0x2e, 0xb4, 0xc3, 0x20, 0x0e, 0x10, 0xe2, + 0x38, 0x0b, 0x4e, 0xdb, 0x5b, 0xa0, 0x38, 0x0b, 0x7b, 0xcf, 0xcd, 0x3d, 0xbb, 0xed, 0xc5, 0x3b, + 0x9d, 0xcd, 0x85, 0x46, 0xd0, 0xba, 0xb2, 0x1d, 0x6c, 0x07, 0x57, 0x18, 0xea, 0x66, 0x67, 0x8b, + 0xfd, 0x63, 0x7f, 0xd8, 0x2f, 0x4e, 0x62, 0xee, 0xc5, 0xa4, 0x99, 0x96, 0xd3, 0xd8, 0xf1, 0x7c, + 0x12, 0xee, 0x5f, 0x69, 0xef, 0x6e, 0xb3, 0x76, 0x43, 0x12, 0x05, 0x9d, 0xb0, 0x41, 0xd2, 0x0d, + 0xf7, 0xac, 0x15, 0x5d, 0x69, 0x91, 0xd8, 0xc9, 0xe8, 0xee, 0xdc, 0x95, 0xbc, 0x5a, 0x61, 0xc7, + 0x8f, 0xbd, 0x56, 0x77, 0x33, 0x1f, 0xef, 0x57, 0x21, 0x6a, 0xec, 0x90, 0x96, 0xd3, 0x55, 0xef, + 0x85, 0xbc, 0x7a, 0x9d, 0xd8, 0x6b, 0x5e, 0xf1, 0xfc, 0x38, 0x8a, 0xc3, 0x74, 0x25, 0xfb, 0x2b, + 0x16, 0x5c, 0x5c, 0xbc, 0x53, 0x5f, 0x69, 0x3a, 0x51, 0xec, 0x35, 0x96, 0x9a, 0x41, 0x63, 0xb7, + 0x1e, 0x07, 0x21, 0xb9, 0x1d, 0x34, 0x3b, 0x2d, 0x52, 0x67, 0x03, 0x81, 0x9e, 0x81, 0xd2, 0x1e, + 0xfb, 0x5f, 0xad, 0xcc, 0x5a, 0x17, 0xad, 0xcb, 0xe5, 0xa5, 0xe9, 0xdf, 0x38, 0x98, 0xff, 0xd0, + 0xfd, 0x83, 0xf9, 0xd2, 0x6d, 0x51, 0x8e, 0x15, 0x06, 0xba, 0x04, 0x23, 0x5b, 0xd1, 0xc6, 0x7e, + 0x9b, 0xcc, 0x16, 0x18, 0xee, 0xa4, 0xc0, 0x1d, 0x59, 0xad, 0xd3, 0x52, 0x2c, 0xa0, 0xe8, 0x0a, + 0x94, 0xdb, 0x4e, 0x18, 0x7b, 0xb1, 0x17, 0xf8, 0xb3, 0xc5, 0x8b, 0xd6, 0xe5, 0xe1, 0xa5, 0x19, + 0x81, 0x5a, 0xae, 0x49, 0x00, 0x4e, 0x70, 0x68, 0x37, 0x42, 0xe2, 0xb8, 0x37, 0xfd, 0xe6, 0xfe, + 0xec, 0xd0, 0x45, 0xeb, 0x72, 0x29, 0xe9, 0x06, 0x16, 0xe5, 0x58, 0x61, 0xd8, 0x3f, 0x54, 0x80, + 0xd2, 0xe2, 0xd6, 0x96, 0xe7, 0x7b, 0xf1, 0x3e, 0xba, 0x0d, 0xe3, 0x7e, 0xe0, 0x12, 0xf9, 0x9f, + 0x7d, 0xc5, 0xd8, 0xf3, 0x17, 0x17, 0xba, 0x97, 0xd2, 0xc2, 0xba, 0x86, 0xb7, 0x34, 0x7d, 0xff, + 0x60, 0x7e, 0x5c, 0x2f, 0xc1, 0x06, 0x1d, 0x84, 0x61, 0xac, 0x1d, 0xb8, 0x8a, 0x6c, 0x81, 0x91, + 0x9d, 0xcf, 0x22, 0x5b, 0x4b, 0xd0, 0x96, 0xa6, 0xee, 0x1f, 0xcc, 0x8f, 0x69, 0x05, 0x58, 0x27, + 0x82, 0x36, 0x61, 0x8a, 0xfe, 0xf5, 0x63, 0x4f, 0xd1, 0x2d, 0x32, 0xba, 0x4f, 0xe4, 0xd1, 0xd5, + 0x50, 0x97, 0x4e, 0xdd, 0x3f, 0x98, 0x9f, 0x4a, 0x15, 0xe2, 0x34, 0x41, 0xfb, 0x5d, 0x98, 0x5c, + 0x8c, 0x63, 0xa7, 0xb1, 0x43, 0x5c, 0x3e, 0x83, 0xe8, 0x45, 0x18, 0xf2, 0x9d, 0x16, 0x11, 0xf3, + 0x7b, 0x51, 0x0c, 0xec, 0xd0, 0xba, 0xd3, 0x22, 0x87, 0x07, 0xf3, 0xd3, 0xb7, 0x7c, 0xef, 0x9d, + 0x8e, 0x58, 0x15, 0xb4, 0x0c, 0x33, 0x6c, 0xf4, 0x3c, 0x80, 0x4b, 0xf6, 0xbc, 0x06, 0xa9, 0x39, + 0xf1, 0x8e, 0x98, 0x6f, 0x24, 0xea, 0x42, 0x45, 0x41, 0xb0, 0x86, 0x65, 0xdf, 0x83, 0xf2, 0xe2, + 0x5e, 0xe0, 0xb9, 0xb5, 0xc0, 0x8d, 0xd0, 0x2e, 0x4c, 0xb5, 0x43, 0xb2, 0x45, 0x42, 0x55, 0x34, + 0x6b, 0x5d, 0x2c, 0x5e, 0x1e, 0x7b, 0xfe, 0x72, 0xe6, 0xc7, 0x9a, 0xa8, 0x2b, 0x7e, 0x1c, 0xee, + 0x2f, 0x3d, 0x22, 0xda, 0x9b, 0x4a, 0x41, 0x71, 0x9a, 0xb2, 0xfd, 0x2f, 0x0b, 0x70, 0x66, 0xf1, + 0xdd, 0x4e, 0x48, 0x2a, 0x5e, 0xb4, 0x9b, 0x5e, 0xe1, 0xae, 0x17, 0xed, 0xae, 0x27, 0x23, 0xa0, + 0x96, 0x56, 0x45, 0x94, 0x63, 0x85, 0x81, 0x9e, 0x85, 0x51, 0xfa, 0xfb, 0x16, 0xae, 0x8a, 0x4f, + 0x3e, 0x25, 0x90, 0xc7, 0x2a, 0x4e, 0xec, 0x54, 0x38, 0x08, 0x4b, 0x1c, 0xb4, 0x06, 0x63, 0x0d, + 0xb6, 0x21, 0xb7, 0xd7, 0x02, 0x97, 0xb0, 0xc9, 0x2c, 0x2f, 0x3d, 0x4d, 0xd1, 0x97, 0x93, 0xe2, + 0xc3, 0x83, 0xf9, 0x59, 0xde, 0x37, 0x41, 0x42, 0x83, 0x61, 0xbd, 0x3e, 0xb2, 0xd5, 0xfe, 0x1a, + 0x62, 0x94, 0x20, 0x63, 0x6f, 0x5d, 0xd6, 0xb6, 0xca, 0x30, 0xdb, 0x2a, 0xe3, 0xd9, 0xdb, 0x04, + 0x3d, 0x07, 0x43, 0xbb, 0x9e, 0xef, 0xce, 0x8e, 0x30, 0x5a, 0xe7, 0xe9, 0x9c, 0x5f, 0xf7, 0x7c, + 0xf7, 0xf0, 0x60, 0x7e, 0xc6, 0xe8, 0x0e, 0x2d, 0xc4, 0x0c, 0xd5, 0xfe, 0x13, 0x0b, 0xe6, 0x19, + 0x6c, 0xd5, 0x6b, 0x92, 0x1a, 0x09, 0x23, 0x2f, 0x8a, 0x89, 0x1f, 0x1b, 0x03, 0xfa, 0x3c, 0x40, + 0x44, 0x1a, 0x21, 0x89, 0xb5, 0x21, 0x55, 0x0b, 0xa3, 0xae, 0x20, 0x58, 0xc3, 0xa2, 0x07, 0x42, + 0xb4, 0xe3, 0x84, 0x6c, 0x7d, 0x89, 0x81, 0x55, 0x07, 0x42, 0x5d, 0x02, 0x70, 0x82, 0x63, 0x1c, + 0x08, 0xc5, 0x7e, 0x07, 0x02, 0xfa, 0x14, 0x4c, 0x25, 0x8d, 0x45, 0x6d, 0xa7, 0x21, 0x07, 0x90, + 0x6d, 0x99, 0xba, 0x09, 0xc2, 0x69, 0x5c, 0xfb, 0xef, 0x59, 0x62, 0xf1, 0xd0, 0xaf, 0x7e, 0x9f, + 0x7f, 0xab, 0xfd, 0x4b, 0x16, 0x8c, 0x2e, 0x79, 0xbe, 0xeb, 0xf9, 0xdb, 0xe8, 0xf3, 0x50, 0xa2, + 0x77, 0x93, 0xeb, 0xc4, 0x8e, 0x38, 0xf7, 0x3e, 0xa6, 0xed, 0x2d, 0x75, 0x55, 0x2c, 0xb4, 0x77, + 0xb7, 0x69, 0x41, 0xb4, 0x40, 0xb1, 0xe9, 0x6e, 0xbb, 0xb9, 0xf9, 0x36, 0x69, 0xc4, 0x6b, 0x24, + 0x76, 0x92, 0xcf, 0x49, 0xca, 0xb0, 0xa2, 0x8a, 0xae, 0xc3, 0x48, 0xec, 0x84, 0xdb, 0x24, 0x16, + 0x07, 0x60, 0xe6, 0x41, 0xc5, 0x6b, 0x62, 0xba, 0x23, 0x89, 0xdf, 0x20, 0xc9, 0xb5, 0xb0, 0xc1, + 0xaa, 0x62, 0x41, 0xc2, 0xfe, 0x81, 0x51, 0x38, 0xb7, 0x5c, 0xaf, 0xe6, 0xac, 0xab, 0x4b, 0x30, + 0xe2, 0x86, 0xde, 0x1e, 0x09, 0xc5, 0x38, 0x2b, 0x2a, 0x15, 0x56, 0x8a, 0x05, 0x14, 0xbd, 0x02, + 0xe3, 0xfc, 0x42, 0xba, 0xe6, 0xf8, 0x6e, 0x53, 0x0e, 0xf1, 0x69, 0x81, 0x3d, 0x7e, 0x5b, 0x83, + 0x61, 0x03, 0xf3, 0x88, 0x8b, 0xea, 0x52, 0x6a, 0x33, 0xe6, 0x5d, 0x76, 0x5f, 0xb4, 0x60, 0x9a, + 0x37, 0xb3, 0x18, 0xc7, 0xa1, 0xb7, 0xd9, 0x89, 0x49, 0x34, 0x3b, 0xcc, 0x4e, 0xba, 0xe5, 0xac, + 0xd1, 0xca, 0x1d, 0x81, 0x85, 0xdb, 0x29, 0x2a, 0xfc, 0x10, 0x9c, 0x15, 0xed, 0x4e, 0xa7, 0xc1, + 0xb8, 0xab, 0x59, 0xf4, 0x1d, 0x16, 0xcc, 0x35, 0x02, 0x3f, 0x0e, 0x83, 0x66, 0x93, 0x84, 0xb5, + 0xce, 0x66, 0xd3, 0x8b, 0x76, 0xf8, 0x3a, 0xc5, 0x64, 0x8b, 0x9d, 0x04, 0x39, 0x73, 0xa8, 0x90, + 0xc4, 0x1c, 0x5e, 0xb8, 0x7f, 0x30, 0x3f, 0xb7, 0x9c, 0x4b, 0x0a, 0xf7, 0x68, 0x06, 0xed, 0x02, + 0xa2, 0x57, 0x69, 0x3d, 0x76, 0xb6, 0x49, 0xd2, 0xf8, 0xe8, 0xe0, 0x8d, 0x9f, 0xbd, 0x7f, 0x30, + 0x8f, 0xd6, 0xbb, 0x48, 0xe0, 0x0c, 0xb2, 0xe8, 0x1d, 0x38, 0x4d, 0x4b, 0xbb, 0xbe, 0xb5, 0x34, + 0x78, 0x73, 0xb3, 0xf7, 0x0f, 0xe6, 0x4f, 0xaf, 0x67, 0x10, 0xc1, 0x99, 0xa4, 0xd1, 0xb7, 0x5b, + 0x70, 0x2e, 0xf9, 0xfc, 0x95, 0x7b, 0x6d, 0xc7, 0x77, 0x93, 0x86, 0xcb, 0x83, 0x37, 0x4c, 0xcf, + 0xe4, 0x73, 0xcb, 0x79, 0x94, 0x70, 0x7e, 0x23, 0x73, 0xcb, 0x70, 0x26, 0x73, 0xb5, 0xa0, 0x69, + 0x28, 0xee, 0x12, 0xce, 0x05, 0x95, 0x31, 0xfd, 0x89, 0x4e, 0xc3, 0xf0, 0x9e, 0xd3, 0xec, 0x88, + 0x8d, 0x82, 0xf9, 0x9f, 0x4f, 0x14, 0x5e, 0xb1, 0xec, 0x7f, 0x55, 0x84, 0xa9, 0xe5, 0x7a, 0xf5, + 0x81, 0x76, 0xa1, 0x7e, 0x0d, 0x15, 0x7a, 0x5e, 0x43, 0xc9, 0xa5, 0x56, 0xcc, 0xbd, 0xd4, 0xfe, + 0xff, 0x8c, 0x2d, 0x34, 0xc4, 0xb6, 0xd0, 0x37, 0xe4, 0x6c, 0xa1, 0x63, 0xde, 0x38, 0x7b, 0x39, + 0xab, 0x68, 0x98, 0x4d, 0x66, 0x26, 0xc7, 0x72, 0x23, 0x68, 0x38, 0xcd, 0xf4, 0xd1, 0x77, 0xc4, + 0xa5, 0x74, 0x3c, 0xf3, 0xd8, 0x80, 0xf1, 0x65, 0xa7, 0xed, 0x6c, 0x7a, 0x4d, 0x2f, 0xf6, 0x48, + 0x84, 0x9e, 0x84, 0xa2, 0xe3, 0xba, 0x8c, 0xdb, 0x2a, 0x2f, 0x9d, 0xb9, 0x7f, 0x30, 0x5f, 0x5c, + 0x74, 0xe9, 0xb5, 0x0f, 0x0a, 0x6b, 0x1f, 0x53, 0x0c, 0xf4, 0x51, 0x18, 0x72, 0xc3, 0xa0, 0x3d, + 0x5b, 0x60, 0x98, 0x74, 0xd7, 0x0d, 0x55, 0xc2, 0xa0, 0x9d, 0x42, 0x65, 0x38, 0xf6, 0xaf, 0x16, + 0xe0, 0xb1, 0x65, 0xd2, 0xde, 0x59, 0xad, 0xe7, 0x9c, 0xdf, 0x97, 0xa1, 0xd4, 0x0a, 0x7c, 0x2f, + 0x0e, 0xc2, 0x48, 0x34, 0xcd, 0x56, 0xc4, 0x9a, 0x28, 0xc3, 0x0a, 0x8a, 0x2e, 0xc2, 0x50, 0x3b, + 0x61, 0x2a, 0xc7, 0x25, 0x43, 0xca, 0xd8, 0x49, 0x06, 0xa1, 0x18, 0x9d, 0x88, 0x84, 0x62, 0xc5, + 0x28, 0x8c, 0x5b, 0x11, 0x09, 0x31, 0x83, 0x24, 0x37, 0x33, 0xbd, 0xb3, 0xc5, 0x09, 0x9d, 0xba, + 0x99, 0x29, 0x04, 0x6b, 0x58, 0xa8, 0x06, 0xe5, 0x28, 0x35, 0xb3, 0x03, 0x6d, 0xd3, 0x09, 0x76, + 0x75, 0xab, 0x99, 0x4c, 0x88, 0x18, 0x37, 0xca, 0x48, 0xdf, 0xab, 0xfb, 0xcb, 0x05, 0x40, 0x7c, + 0x08, 0xff, 0x82, 0x0d, 0xdc, 0xad, 0xee, 0x81, 0x1b, 0x7c, 0x4b, 0x1c, 0xd7, 0xe8, 0xfd, 0xa9, + 0x05, 0x8f, 0x2d, 0x7b, 0xbe, 0x4b, 0xc2, 0x9c, 0x05, 0xf8, 0x70, 0xde, 0xb2, 0x47, 0x63, 0x1a, + 0x8c, 0x25, 0x36, 0x74, 0x0c, 0x4b, 0xcc, 0xfe, 0x23, 0x0b, 0x10, 0xff, 0xec, 0xf7, 0xdd, 0xc7, + 0xde, 0xea, 0xfe, 0xd8, 0x63, 0x58, 0x16, 0xf6, 0x0d, 0x98, 0x5c, 0x6e, 0x7a, 0xc4, 0x8f, 0xab, + 0xb5, 0xe5, 0xc0, 0xdf, 0xf2, 0xb6, 0xd1, 0x27, 0x60, 0x32, 0xf6, 0x5a, 0x24, 0xe8, 0xc4, 0x75, + 0xd2, 0x08, 0x7c, 0xf6, 0x92, 0xb4, 0x2e, 0x0f, 0x2f, 0xa1, 0xfb, 0x07, 0xf3, 0x93, 0x1b, 0x06, + 0x04, 0xa7, 0x30, 0xed, 0xdf, 0xa1, 0xe3, 0x17, 0xb4, 0xda, 0x81, 0x4f, 0xfc, 0x78, 0x39, 0xf0, + 0x5d, 0x2e, 0x71, 0xf8, 0x04, 0x0c, 0xc5, 0x74, 0x3c, 0xf8, 0xd8, 0x5d, 0x92, 0x1b, 0x85, 0x8e, + 0xc2, 0xe1, 0xc1, 0xfc, 0xd9, 0xee, 0x1a, 0x6c, 0x9c, 0x58, 0x1d, 0xf4, 0x0d, 0x30, 0x12, 0xc5, + 0x4e, 0xdc, 0x89, 0xc4, 0x68, 0x3e, 0x2e, 0x47, 0xb3, 0xce, 0x4a, 0x0f, 0x0f, 0xe6, 0xa7, 0x54, + 0x35, 0x5e, 0x84, 0x45, 0x05, 0xf4, 0x14, 0x8c, 0xb6, 0x48, 0x14, 0x39, 0xdb, 0xf2, 0x36, 0x9c, + 0x12, 0x75, 0x47, 0xd7, 0x78, 0x31, 0x96, 0x70, 0xf4, 0x04, 0x0c, 0x93, 0x30, 0x0c, 0x42, 0xb1, + 0x47, 0x27, 0x04, 0xe2, 0xf0, 0x0a, 0x2d, 0xc4, 0x1c, 0x66, 0xff, 0x3b, 0x0b, 0xa6, 0x54, 0x5f, + 0x79, 0x5b, 0x27, 0xf0, 0x2a, 0x78, 0x13, 0xa0, 0x21, 0x3f, 0x30, 0x62, 0xb7, 0xc7, 0xd8, 0xf3, + 0x97, 0x32, 0x2f, 0xea, 0xae, 0x61, 0x4c, 0x28, 0xab, 0xa2, 0x08, 0x6b, 0xd4, 0xec, 0x7f, 0x6a, + 0xc1, 0xa9, 0xd4, 0x17, 0xdd, 0xf0, 0xa2, 0x18, 0xbd, 0xd5, 0xf5, 0x55, 0x0b, 0x83, 0x7d, 0x15, + 0xad, 0xcd, 0xbe, 0x49, 0x2d, 0x65, 0x59, 0xa2, 0x7d, 0xd1, 0x35, 0x18, 0xf6, 0x62, 0xd2, 0x92, + 0x1f, 0xf3, 0x44, 0xcf, 0x8f, 0xe1, 0xbd, 0x4a, 0x66, 0xa4, 0x4a, 0x6b, 0x62, 0x4e, 0xc0, 0xfe, + 0x6b, 0x45, 0x28, 0xf3, 0x65, 0xbb, 0xe6, 0xb4, 0x4f, 0x60, 0x2e, 0xaa, 0x30, 0xc4, 0xa8, 0xf3, + 0x8e, 0x3f, 0x99, 0xdd, 0x71, 0xd1, 0x9d, 0x05, 0xfa, 0xe4, 0xe7, 0xcc, 0x91, 0xba, 0x1a, 0x68, + 0x11, 0x66, 0x24, 0x90, 0x03, 0xb0, 0xe9, 0xf9, 0x4e, 0xb8, 0x4f, 0xcb, 0x66, 0x8b, 0x8c, 0xe0, + 0xb3, 0xbd, 0x09, 0x2e, 0x29, 0x7c, 0x4e, 0x56, 0xf5, 0x35, 0x01, 0x60, 0x8d, 0xe8, 0xdc, 0xcb, + 0x50, 0x56, 0xc8, 0x47, 0xe1, 0x71, 0xe6, 0x3e, 0x05, 0x53, 0xa9, 0xb6, 0xfa, 0x55, 0x1f, 0xd7, + 0x59, 0xa4, 0x5f, 0x66, 0xa7, 0x80, 0xe8, 0xf5, 0x8a, 0xbf, 0x27, 0x4e, 0xd1, 0x77, 0xe1, 0x74, + 0x33, 0xe3, 0x70, 0x12, 0x53, 0x35, 0xf8, 0x61, 0xf6, 0x98, 0xf8, 0xec, 0xd3, 0x59, 0x50, 0x9c, + 0xd9, 0x06, 0xbd, 0xf6, 0x83, 0x36, 0x5d, 0xf3, 0x4e, 0x53, 0xe7, 0xa0, 0x6f, 0x8a, 0x32, 0xac, + 0xa0, 0xf4, 0x08, 0x3b, 0xad, 0x3a, 0x7f, 0x9d, 0xec, 0xd7, 0x49, 0x93, 0x34, 0xe2, 0x20, 0xfc, + 0x9a, 0x76, 0xff, 0x3c, 0x1f, 0x7d, 0x7e, 0x02, 0x8e, 0x09, 0x02, 0xc5, 0xeb, 0x64, 0x9f, 0x4f, + 0x85, 0xfe, 0x75, 0xc5, 0x9e, 0x5f, 0xf7, 0xb3, 0x16, 0x4c, 0xa8, 0xaf, 0x3b, 0x81, 0xad, 0xbe, + 0x64, 0x6e, 0xf5, 0xf3, 0x3d, 0x17, 0x78, 0xce, 0x26, 0xff, 0x72, 0x01, 0xce, 0x29, 0x1c, 0xca, + 0xee, 0xf3, 0x3f, 0x62, 0x55, 0x5d, 0x81, 0xb2, 0xaf, 0x04, 0x51, 0x96, 0x29, 0x01, 0x4a, 0xc4, + 0x50, 0x09, 0x0e, 0xe5, 0xda, 0xfc, 0x44, 0x5a, 0x34, 0xae, 0x4b, 0x68, 0x85, 0x34, 0x76, 0x09, + 0x8a, 0x1d, 0xcf, 0x15, 0x77, 0xc6, 0xc7, 0xe4, 0x68, 0xdf, 0xaa, 0x56, 0x0e, 0x0f, 0xe6, 0x1f, + 0xcf, 0xd3, 0x0e, 0xd0, 0xcb, 0x2a, 0x5a, 0xb8, 0x55, 0xad, 0x60, 0x5a, 0x19, 0x2d, 0xc2, 0x94, + 0x54, 0x80, 0xdc, 0xa6, 0x1c, 0x54, 0xe0, 0x8b, 0xab, 0x45, 0x89, 0x59, 0xb1, 0x09, 0xc6, 0x69, + 0x7c, 0x54, 0x81, 0xe9, 0xdd, 0xce, 0x26, 0x69, 0x92, 0x98, 0x7f, 0xf0, 0x75, 0xc2, 0x85, 0x90, + 0xe5, 0xe4, 0xb1, 0x75, 0x3d, 0x05, 0xc7, 0x5d, 0x35, 0xec, 0x3f, 0x67, 0x47, 0xbc, 0x18, 0xbd, + 0x5a, 0x18, 0xd0, 0x85, 0x45, 0xa9, 0x7f, 0x2d, 0x97, 0xf3, 0x20, 0xab, 0xe2, 0x3a, 0xd9, 0xdf, + 0x08, 0x28, 0xb3, 0x9d, 0xbd, 0x2a, 0x8c, 0x35, 0x3f, 0xd4, 0x73, 0xcd, 0xff, 0x7c, 0x01, 0xce, + 0xa8, 0x11, 0x30, 0xf8, 0xba, 0xbf, 0xe8, 0x63, 0xf0, 0x1c, 0x8c, 0xb9, 0x64, 0xcb, 0xe9, 0x34, + 0x63, 0x25, 0x11, 0x1f, 0xe6, 0x5a, 0x91, 0x4a, 0x52, 0x8c, 0x75, 0x9c, 0x23, 0x0c, 0xdb, 0xff, + 0x1a, 0x63, 0x77, 0x6b, 0xec, 0xd0, 0x35, 0xae, 0x76, 0x8d, 0x95, 0xbb, 0x6b, 0x9e, 0x80, 0x61, + 0xaf, 0x45, 0x79, 0xad, 0x82, 0xc9, 0x42, 0x55, 0x69, 0x21, 0xe6, 0x30, 0xf4, 0x11, 0x18, 0x6d, + 0x04, 0xad, 0x96, 0xe3, 0xbb, 0xec, 0xca, 0x2b, 0x2f, 0x8d, 0x51, 0x76, 0x6c, 0x99, 0x17, 0x61, + 0x09, 0x43, 0x8f, 0xc1, 0x90, 0x13, 0x6e, 0x73, 0xb1, 0x44, 0x79, 0xa9, 0x44, 0x5b, 0x5a, 0x0c, + 0xb7, 0x23, 0xcc, 0x4a, 0xe9, 0xab, 0xea, 0x6e, 0x10, 0xee, 0x7a, 0xfe, 0x76, 0xc5, 0x0b, 0xc5, + 0x96, 0x50, 0x77, 0xe1, 0x1d, 0x05, 0xc1, 0x1a, 0x16, 0x5a, 0x85, 0xe1, 0x76, 0x10, 0xc6, 0xd1, + 0xec, 0x08, 0x1b, 0xee, 0xc7, 0x73, 0x0e, 0x22, 0xfe, 0xb5, 0xb5, 0x20, 0x8c, 0x93, 0x0f, 0xa0, + 0xff, 0x22, 0xcc, 0xab, 0xa3, 0x1b, 0x30, 0x4a, 0xfc, 0xbd, 0xd5, 0x30, 0x68, 0xcd, 0x9e, 0xca, + 0xa7, 0xb4, 0xc2, 0x51, 0xf8, 0x32, 0x4b, 0xd8, 0x4e, 0x51, 0x8c, 0x25, 0x09, 0xf4, 0x0d, 0x50, + 0x24, 0xfe, 0xde, 0xec, 0x28, 0xa3, 0x34, 0x97, 0x43, 0xe9, 0xb6, 0x13, 0x26, 0x67, 0xfe, 0x8a, + 0xbf, 0x87, 0x69, 0x1d, 0xf4, 0x19, 0x28, 0xcb, 0x03, 0x23, 0x12, 0xf2, 0xb7, 0xcc, 0x05, 0x2b, + 0x8f, 0x19, 0x4c, 0xde, 0xe9, 0x78, 0x21, 0x69, 0x11, 0x3f, 0x8e, 0x92, 0x13, 0x52, 0x42, 0x23, + 0x9c, 0x50, 0x43, 0x9f, 0x91, 0x42, 0xdf, 0xb5, 0xa0, 0xe3, 0xc7, 0xd1, 0x6c, 0x99, 0x75, 0x2f, + 0x53, 0x1d, 0x77, 0x3b, 0xc1, 0x4b, 0x4b, 0x85, 0x79, 0x65, 0x6c, 0x90, 0x42, 0x9f, 0x85, 0x09, + 0xfe, 0x9f, 0x2b, 0xb5, 0xa2, 0xd9, 0x33, 0x8c, 0xf6, 0xc5, 0x7c, 0xda, 0x1c, 0x71, 0xe9, 0x8c, + 0x20, 0x3e, 0xa1, 0x97, 0x46, 0xd8, 0xa4, 0x86, 0x30, 0x4c, 0x34, 0xbd, 0x3d, 0xe2, 0x93, 0x28, + 0xaa, 0x85, 0xc1, 0x26, 0x99, 0x05, 0x36, 0x30, 0xe7, 0xb2, 0x95, 0x60, 0xc1, 0x26, 0x59, 0x9a, + 0xa1, 0x34, 0x6f, 0xe8, 0x75, 0xb0, 0x49, 0x02, 0xdd, 0x82, 0x49, 0xfa, 0x08, 0xf3, 0x12, 0xa2, + 0x63, 0xfd, 0x88, 0xb2, 0xa7, 0x12, 0x36, 0x2a, 0xe1, 0x14, 0x11, 0x74, 0x13, 0xc6, 0xa3, 0xd8, + 0x09, 0xe3, 0x4e, 0x9b, 0x13, 0x3d, 0xdb, 0x8f, 0x28, 0xd3, 0xa1, 0xd6, 0xb5, 0x2a, 0xd8, 0x20, + 0x80, 0x5e, 0x87, 0x72, 0xd3, 0xdb, 0x22, 0x8d, 0xfd, 0x46, 0x93, 0xcc, 0x8e, 0x33, 0x6a, 0x99, + 0x87, 0xca, 0x0d, 0x89, 0xc4, 0x5f, 0x85, 0xea, 0x2f, 0x4e, 0xaa, 0xa3, 0xdb, 0x70, 0x36, 0x26, + 0x61, 0xcb, 0xf3, 0x1d, 0x7a, 0x18, 0x88, 0xd7, 0x12, 0xd3, 0x4d, 0x4e, 0xb0, 0xdd, 0x76, 0x41, + 0xcc, 0xc6, 0xd9, 0x8d, 0x4c, 0x2c, 0x9c, 0x53, 0x1b, 0xdd, 0x83, 0xd9, 0x0c, 0x48, 0xd0, 0xf4, + 0x1a, 0xfb, 0xb3, 0xa7, 0x19, 0xe5, 0x4f, 0x0a, 0xca, 0xb3, 0x1b, 0x39, 0x78, 0x87, 0x3d, 0x60, + 0x38, 0x97, 0x3a, 0xba, 0x09, 0x53, 0xec, 0x04, 0xaa, 0x75, 0x9a, 0x4d, 0xd1, 0xe0, 0x24, 0x6b, + 0xf0, 0x23, 0xf2, 0x3e, 0xae, 0x9a, 0xe0, 0xc3, 0x83, 0x79, 0x48, 0xfe, 0xe1, 0x74, 0x6d, 0xb4, + 0xc9, 0xd4, 0x60, 0x9d, 0xd0, 0x8b, 0xf7, 0xe9, 0xb9, 0x41, 0xee, 0xc5, 0xb3, 0x53, 0x3d, 0x45, + 0x10, 0x3a, 0xaa, 0xd2, 0x95, 0xe9, 0x85, 0x38, 0x4d, 0x90, 0x1e, 0xa9, 0x51, 0xec, 0x7a, 0xfe, + 0xec, 0x34, 0x3b, 0xa9, 0xd5, 0x89, 0x54, 0xa7, 0x85, 0x98, 0xc3, 0x98, 0x0a, 0x8c, 0xfe, 0xb8, + 0x49, 0x6f, 0xae, 0x19, 0x86, 0x98, 0xa8, 0xc0, 0x24, 0x00, 0x27, 0x38, 0x94, 0x99, 0x8c, 0xe3, + 0xfd, 0x59, 0xc4, 0x50, 0xd5, 0xc1, 0xb2, 0xb1, 0xf1, 0x19, 0x4c, 0xcb, 0xed, 0x4d, 0x98, 0x54, + 0x07, 0x21, 0x1b, 0x13, 0x34, 0x0f, 0xc3, 0x8c, 0x7d, 0x12, 0x02, 0xb3, 0x32, 0xed, 0x02, 0x63, + 0xad, 0x30, 0x2f, 0x67, 0x5d, 0xf0, 0xde, 0x25, 0x4b, 0xfb, 0x31, 0xe1, 0xcf, 0xf4, 0xa2, 0xd6, + 0x05, 0x09, 0xc0, 0x09, 0x8e, 0xfd, 0x7f, 0x39, 0x1b, 0x9a, 0x9c, 0xb6, 0x03, 0xdc, 0x2f, 0xcf, + 0x40, 0x69, 0x27, 0x88, 0x62, 0x8a, 0xcd, 0xda, 0x18, 0x4e, 0x18, 0xcf, 0x6b, 0xa2, 0x1c, 0x2b, + 0x0c, 0xf4, 0x2a, 0x4c, 0x34, 0xf4, 0x06, 0xc4, 0xe5, 0xa8, 0x8e, 0x11, 0xa3, 0x75, 0x6c, 0xe2, + 0xa2, 0x57, 0xa0, 0xc4, 0xcc, 0x3a, 0x1a, 0x41, 0x53, 0x70, 0x6d, 0xf2, 0x86, 0x2f, 0xd5, 0x44, + 0xf9, 0xa1, 0xf6, 0x1b, 0x2b, 0x6c, 0x74, 0x09, 0x46, 0x68, 0x17, 0xaa, 0x35, 0x71, 0x2d, 0x29, + 0xd9, 0xcf, 0x35, 0x56, 0x8a, 0x05, 0xd4, 0xfe, 0xab, 0x05, 0x6d, 0x94, 0xe9, 0x13, 0x97, 0xa0, + 0x1a, 0x8c, 0xde, 0x75, 0xbc, 0xd8, 0xf3, 0xb7, 0x05, 0xff, 0xf1, 0x54, 0xcf, 0x3b, 0x8a, 0x55, + 0xba, 0xc3, 0x2b, 0xf0, 0x5b, 0x54, 0xfc, 0xc1, 0x92, 0x0c, 0xa5, 0x18, 0x76, 0x7c, 0x9f, 0x52, + 0x2c, 0x0c, 0x4a, 0x11, 0xf3, 0x0a, 0x9c, 0xa2, 0xf8, 0x83, 0x25, 0x19, 0xf4, 0x16, 0x80, 0xdc, + 0x61, 0xc4, 0x15, 0xe6, 0x14, 0xcf, 0xf4, 0x27, 0xba, 0xa1, 0xea, 0x2c, 0x4d, 0xd2, 0x3b, 0x3a, + 0xf9, 0x8f, 0x35, 0x7a, 0x76, 0xcc, 0xf8, 0xb4, 0xee, 0xce, 0xa0, 0x6f, 0xa6, 0x4b, 0xdc, 0x09, + 0x63, 0xe2, 0x2e, 0xc6, 0x62, 0x70, 0x3e, 0x3a, 0xd8, 0x23, 0x65, 0xc3, 0x6b, 0x11, 0x7d, 0x3b, + 0x08, 0x22, 0x38, 0xa1, 0x67, 0xff, 0x62, 0x11, 0x66, 0xf3, 0xba, 0x4b, 0x17, 0x1d, 0xb9, 0xe7, + 0xc5, 0xcb, 0x94, 0xbd, 0xb2, 0xcc, 0x45, 0xb7, 0x22, 0xca, 0xb1, 0xc2, 0xa0, 0xb3, 0x1f, 0x79, + 0xdb, 0xf2, 0x8d, 0x39, 0x9c, 0xcc, 0x7e, 0x9d, 0x95, 0x62, 0x01, 0xa5, 0x78, 0x21, 0x71, 0x22, + 0x61, 0xaf, 0xa3, 0xad, 0x12, 0xcc, 0x4a, 0xb1, 0x80, 0xea, 0x02, 0xac, 0xa1, 0x3e, 0x02, 0x2c, + 0x63, 0x88, 0x86, 0x8f, 0x77, 0x88, 0xd0, 0xe7, 0x00, 0xb6, 0x3c, 0xdf, 0x8b, 0x76, 0x18, 0xf5, + 0x91, 0x23, 0x53, 0x57, 0xcc, 0xd9, 0xaa, 0xa2, 0x82, 0x35, 0x8a, 0xe8, 0x25, 0x18, 0x53, 0x1b, + 0xb0, 0x5a, 0x61, 0xca, 0x4b, 0xcd, 0x18, 0x24, 0x39, 0x8d, 0x2a, 0x58, 0xc7, 0xb3, 0xdf, 0x4e, + 0xaf, 0x17, 0xb1, 0x03, 0xb4, 0xf1, 0xb5, 0x06, 0x1d, 0xdf, 0x42, 0xef, 0xf1, 0xb5, 0xbf, 0x5a, + 0x84, 0x29, 0xa3, 0xb1, 0x4e, 0x34, 0xc0, 0x99, 0x75, 0x95, 0x1e, 0xe0, 0x4e, 0x4c, 0xc4, 0xfe, + 0xb3, 0xfb, 0x6f, 0x15, 0xfd, 0x90, 0xa7, 0x3b, 0x80, 0xd7, 0x47, 0x9f, 0x83, 0x72, 0xd3, 0x89, + 0x98, 0x30, 0x8c, 0x88, 0x7d, 0x37, 0x08, 0xb1, 0xe4, 0x61, 0xe2, 0x44, 0xb1, 0x76, 0x6b, 0x72, + 0xda, 0x09, 0x49, 0x7a, 0xd3, 0x50, 0xfe, 0x44, 0x1a, 0x84, 0xa9, 0x4e, 0x50, 0x26, 0x66, 0x1f, + 0x73, 0x18, 0x7a, 0x05, 0xc6, 0x43, 0xc2, 0x56, 0xc5, 0x32, 0xe5, 0xe6, 0xd8, 0x32, 0x1b, 0x4e, + 0xd8, 0x3e, 0xac, 0xc1, 0xb0, 0x81, 0x99, 0xbc, 0x0d, 0x46, 0x7a, 0xbc, 0x0d, 0x9e, 0x82, 0x51, + 0xf6, 0x43, 0xad, 0x00, 0x35, 0x1b, 0x55, 0x5e, 0x8c, 0x25, 0x3c, 0xbd, 0x60, 0x4a, 0x83, 0x2d, + 0x18, 0xfa, 0xfa, 0x10, 0x8b, 0x9a, 0x29, 0x8e, 0x4b, 0xfc, 0x94, 0x13, 0x4b, 0x1e, 0x4b, 0x98, + 0xfd, 0x51, 0x98, 0xac, 0x38, 0xa4, 0x15, 0xf8, 0x2b, 0xbe, 0xdb, 0x0e, 0x3c, 0x3f, 0x46, 0xb3, + 0x30, 0xc4, 0x2e, 0x11, 0x7e, 0x04, 0x0c, 0xd1, 0x86, 0xf0, 0x10, 0x7d, 0x10, 0xd8, 0xdb, 0x70, + 0xa6, 0x12, 0xdc, 0xf5, 0xef, 0x3a, 0xa1, 0xbb, 0x58, 0xab, 0x6a, 0xef, 0xeb, 0x75, 0xf9, 0xbe, + 0xe3, 0x76, 0x58, 0x99, 0x47, 0xaf, 0x56, 0x93, 0xb3, 0xb5, 0xab, 0x5e, 0x93, 0xe4, 0x48, 0x41, + 0xfe, 0x46, 0xc1, 0x68, 0x29, 0xc1, 0x57, 0x8a, 0x2a, 0x2b, 0x57, 0x51, 0xf5, 0x06, 0x94, 0xb6, + 0x3c, 0xd2, 0x74, 0x31, 0xd9, 0x12, 0x2b, 0xf1, 0xc9, 0x7c, 0xd3, 0x92, 0x55, 0x8a, 0x29, 0xa5, + 0x5e, 0xfc, 0x75, 0xb8, 0x2a, 0x2a, 0x63, 0x45, 0x06, 0xed, 0xc2, 0xb4, 0x7c, 0x30, 0x48, 0xa8, + 0x58, 0x97, 0x4f, 0xf5, 0x7a, 0x85, 0x98, 0xc4, 0x4f, 0xdf, 0x3f, 0x98, 0x9f, 0xc6, 0x29, 0x32, + 0xb8, 0x8b, 0x30, 0x7d, 0x0e, 0xb6, 0xe8, 0x09, 0x3c, 0xc4, 0x86, 0x9f, 0x3d, 0x07, 0xd9, 0xcb, + 0x96, 0x95, 0xda, 0x3f, 0x62, 0xc1, 0x23, 0x5d, 0x23, 0x23, 0x5e, 0xf8, 0xc7, 0x3c, 0x0b, 0xe9, + 0x17, 0x77, 0xa1, 0xff, 0x8b, 0xdb, 0xfe, 0x19, 0x0b, 0x4e, 0xaf, 0xb4, 0xda, 0xf1, 0x7e, 0xc5, + 0x33, 0xb5, 0x4a, 0x2f, 0xc3, 0x48, 0x8b, 0xb8, 0x5e, 0xa7, 0x25, 0x66, 0x6e, 0x5e, 0x9e, 0x52, + 0x6b, 0xac, 0xf4, 0xf0, 0x60, 0x7e, 0xa2, 0x1e, 0x07, 0xa1, 0xb3, 0x4d, 0x78, 0x01, 0x16, 0xe8, + 0xec, 0xac, 0xf7, 0xde, 0x25, 0x37, 0xbc, 0x96, 0x27, 0x4d, 0x85, 0x7a, 0xca, 0xec, 0x16, 0xe4, + 0x80, 0x2e, 0xbc, 0xd1, 0x71, 0xfc, 0xd8, 0x8b, 0xf7, 0x85, 0x42, 0x48, 0x12, 0xc1, 0x09, 0x3d, + 0xfb, 0x2b, 0x16, 0x4c, 0xc9, 0x75, 0xbf, 0xe8, 0xba, 0x21, 0x89, 0x22, 0x34, 0x07, 0x05, 0xaf, + 0x2d, 0x7a, 0x09, 0xa2, 0x97, 0x85, 0x6a, 0x0d, 0x17, 0xbc, 0xb6, 0x64, 0xcb, 0xd8, 0x41, 0x58, + 0x34, 0x75, 0x63, 0xd7, 0x44, 0x39, 0x56, 0x18, 0xe8, 0x32, 0x94, 0xfc, 0xc0, 0xe5, 0xe6, 0x5a, + 0xfc, 0x4a, 0x63, 0x0b, 0x6c, 0x5d, 0x94, 0x61, 0x05, 0x45, 0x35, 0x28, 0x73, 0x4b, 0xa6, 0x64, + 0xd1, 0x0e, 0x64, 0x0f, 0xc5, 0xbe, 0x6c, 0x43, 0xd6, 0xc4, 0x09, 0x11, 0xfb, 0xfb, 0x2d, 0x18, + 0x97, 0x5f, 0x36, 0x20, 0xcf, 0x49, 0xb7, 0x56, 0xc2, 0x6f, 0x26, 0x5b, 0x8b, 0xf2, 0x8c, 0x0c, + 0x62, 0xb0, 0x8a, 0xc5, 0xa3, 0xb0, 0x8a, 0xf6, 0x0f, 0x17, 0x60, 0x52, 0x76, 0xa7, 0xde, 0xd9, + 0x8c, 0x48, 0x8c, 0x36, 0xa0, 0xec, 0xf0, 0x21, 0x27, 0x72, 0xc5, 0x3e, 0x91, 0x2d, 0x14, 0x30, + 0xe6, 0x27, 0xb9, 0xbd, 0x17, 0x65, 0x6d, 0x9c, 0x10, 0x42, 0x4d, 0x98, 0xf1, 0x83, 0x98, 0x9d, + 0xe4, 0x0a, 0xde, 0x4b, 0xf5, 0x92, 0xa6, 0x7e, 0x4e, 0x50, 0x9f, 0x59, 0x4f, 0x53, 0xc1, 0xdd, + 0x84, 0xd1, 0x8a, 0x14, 0xb4, 0x14, 0xf3, 0x5f, 0xf6, 0xfa, 0x2c, 0x64, 0xcb, 0x59, 0xec, 0x5f, + 0xb1, 0xa0, 0x2c, 0xd1, 0x4e, 0x42, 0xcb, 0xb6, 0x06, 0xa3, 0x11, 0x9b, 0x04, 0x39, 0x34, 0x76, + 0xaf, 0x8e, 0xf3, 0xf9, 0x4a, 0x2e, 0x28, 0xfe, 0x3f, 0xc2, 0x92, 0x06, 0x93, 0xb3, 0xab, 0xee, + 0xbf, 0x4f, 0xe4, 0xec, 0xaa, 0x3f, 0x39, 0x37, 0xcc, 0x1f, 0xb0, 0x3e, 0x6b, 0x82, 0x2b, 0xca, + 0x47, 0xb5, 0x43, 0xb2, 0xe5, 0xdd, 0x4b, 0xf3, 0x51, 0x35, 0x56, 0x8a, 0x05, 0x14, 0xbd, 0x05, + 0xe3, 0x0d, 0x29, 0x60, 0x4d, 0xb6, 0xeb, 0xa5, 0x9e, 0xc2, 0x7e, 0xa5, 0x17, 0xe2, 0x82, 0x8d, + 0x65, 0xad, 0x3e, 0x36, 0xa8, 0x99, 0x6a, 0xfe, 0x62, 0x3f, 0x35, 0x7f, 0x42, 0x37, 0x5f, 0xe9, + 0xfd, 0xa3, 0x16, 0x8c, 0x70, 0xc1, 0xda, 0x60, 0x72, 0x4d, 0x4d, 0x4d, 0x96, 0x8c, 0xdd, 0x6d, + 0x5a, 0x28, 0xd4, 0x5e, 0x68, 0x0d, 0xca, 0xec, 0x07, 0x13, 0x0c, 0x16, 0xf3, 0xad, 0xe2, 0x79, + 0xab, 0x7a, 0x07, 0x6f, 0xcb, 0x6a, 0x38, 0xa1, 0x60, 0xff, 0x60, 0x91, 0x1e, 0x55, 0x09, 0xaa, + 0x71, 0x83, 0x5b, 0x0f, 0xef, 0x06, 0x2f, 0x3c, 0xac, 0x1b, 0x7c, 0x1b, 0xa6, 0x1a, 0x9a, 0x52, + 0x2d, 0x99, 0xc9, 0xcb, 0x3d, 0x17, 0x89, 0xa6, 0x7f, 0xe3, 0x22, 0x93, 0x65, 0x93, 0x08, 0x4e, + 0x53, 0x45, 0xdf, 0x0c, 0xe3, 0x7c, 0x9e, 0x45, 0x2b, 0xdc, 0x52, 0xe2, 0x23, 0xf9, 0xeb, 0x45, + 0x6f, 0x82, 0x8b, 0xd8, 0xb4, 0xea, 0xd8, 0x20, 0x66, 0xff, 0xb1, 0x05, 0x68, 0xa5, 0xbd, 0x43, + 0x5a, 0x24, 0x74, 0x9a, 0x89, 0x6c, 0xfc, 0x2f, 0x5b, 0x30, 0x4b, 0xba, 0x8a, 0x97, 0x83, 0x56, + 0x4b, 0xbc, 0x40, 0x72, 0x1e, 0xc9, 0x2b, 0x39, 0x75, 0x94, 0xdb, 0xc0, 0x6c, 0x1e, 0x06, 0xce, + 0x6d, 0x0f, 0xad, 0xc1, 0x29, 0x7e, 0xe5, 0x29, 0x80, 0x66, 0x1b, 0xfd, 0xa8, 0x20, 0x7c, 0x6a, + 0xa3, 0x1b, 0x05, 0x67, 0xd5, 0xb3, 0xbf, 0x73, 0x1c, 0x72, 0x7b, 0xf1, 0x81, 0x52, 0xe0, 0x03, + 0xa5, 0xc0, 0x07, 0x4a, 0x81, 0x0f, 0x94, 0x02, 0x1f, 0x28, 0x05, 0xbe, 0xee, 0x95, 0x02, 0x7f, + 0x68, 0xc1, 0xa9, 0xee, 0x6b, 0xe0, 0x24, 0x18, 0xf3, 0x0e, 0x9c, 0xea, 0xbe, 0xeb, 0x7a, 0xda, + 0xc1, 0x75, 0xf7, 0x33, 0xb9, 0xf7, 0x32, 0xbe, 0x01, 0x67, 0xd1, 0xb7, 0x7f, 0xb1, 0x04, 0xc3, + 0x2b, 0x7b, 0xc4, 0x8f, 0x4f, 0xe0, 0x13, 0x1b, 0x30, 0xe9, 0xf9, 0x7b, 0x41, 0x73, 0x8f, 0xb8, + 0x1c, 0x7e, 0x94, 0xf7, 0xee, 0x59, 0x41, 0x7a, 0xb2, 0x6a, 0x90, 0xc0, 0x29, 0x92, 0x0f, 0x43, + 0xe6, 0x7c, 0x15, 0x46, 0xf8, 0xed, 0x20, 0x04, 0xce, 0x99, 0x97, 0x01, 0x1b, 0x44, 0x71, 0xe7, + 0x25, 0xf2, 0x70, 0x7e, 0xfb, 0x88, 0xea, 0xe8, 0x6d, 0x98, 0xdc, 0xf2, 0xc2, 0x28, 0xde, 0xf0, + 0x5a, 0x24, 0x8a, 0x9d, 0x56, 0xfb, 0x01, 0x64, 0xcc, 0x6a, 0x1c, 0x56, 0x0d, 0x4a, 0x38, 0x45, + 0x19, 0x6d, 0xc3, 0x44, 0xd3, 0xd1, 0x9b, 0x1a, 0x3d, 0x72, 0x53, 0xea, 0xda, 0xb9, 0xa1, 0x13, + 0xc2, 0x26, 0x5d, 0xba, 0x4f, 0x1b, 0x4c, 0x4c, 0x5a, 0x62, 0xc2, 0x03, 0xb5, 0x4f, 0xb9, 0x7c, + 0x94, 0xc3, 0x28, 0x07, 0xc5, 0x2c, 0x63, 0xcb, 0x26, 0x07, 0xa5, 0xd9, 0xbf, 0x7e, 0x1e, 0xca, + 0x84, 0x0e, 0x21, 0x25, 0x2c, 0x6e, 0xae, 0x2b, 0x83, 0xf5, 0x75, 0xcd, 0x6b, 0x84, 0x81, 0x29, + 0xdd, 0x5f, 0x91, 0x94, 0x70, 0x42, 0x14, 0x2d, 0xc3, 0x48, 0x44, 0x42, 0x8f, 0x44, 0xe2, 0x0e, + 0xeb, 0x31, 0x8d, 0x0c, 0x8d, 0x3b, 0x95, 0xf0, 0xdf, 0x58, 0x54, 0xa5, 0xcb, 0xcb, 0x61, 0x82, + 0x4f, 0x76, 0xcb, 0x68, 0xcb, 0x6b, 0x91, 0x95, 0x62, 0x01, 0x45, 0xaf, 0xc3, 0x68, 0x48, 0x9a, + 0x4c, 0x7d, 0x34, 0x31, 0xf8, 0x22, 0xe7, 0xda, 0x28, 0x5e, 0x0f, 0x4b, 0x02, 0xe8, 0x3a, 0xa0, + 0x90, 0x50, 0x0e, 0xcc, 0xf3, 0xb7, 0x95, 0xbd, 0xa8, 0x38, 0xc1, 0xd5, 0x8e, 0xc7, 0x09, 0x86, + 0xf4, 0xef, 0xc1, 0x19, 0xd5, 0xd0, 0x55, 0x98, 0x51, 0xa5, 0x55, 0x3f, 0x8a, 0x1d, 0x7a, 0x72, + 0x4e, 0x31, 0x5a, 0x4a, 0x00, 0x82, 0xd3, 0x08, 0xb8, 0xbb, 0x8e, 0xfd, 0x53, 0x16, 0xf0, 0x71, + 0x3e, 0x81, 0x67, 0xff, 0x6b, 0xe6, 0xb3, 0xff, 0x5c, 0xee, 0xcc, 0xe5, 0x3c, 0xf9, 0xef, 0x5b, + 0x30, 0xa6, 0xcd, 0x6c, 0xb2, 0x66, 0xad, 0x1e, 0x6b, 0xb6, 0x03, 0xd3, 0x74, 0xa5, 0xdf, 0xdc, + 0x8c, 0x48, 0xb8, 0x47, 0x5c, 0xb6, 0x30, 0x0b, 0x0f, 0xb6, 0x30, 0x95, 0x21, 0xdb, 0x8d, 0x14, + 0x41, 0xdc, 0xd5, 0x04, 0x7a, 0x59, 0xea, 0x52, 0x8a, 0x86, 0x1d, 0x38, 0xd7, 0x93, 0x1c, 0x1e, + 0xcc, 0x4f, 0x6b, 0x1f, 0xa2, 0xeb, 0x4e, 0xec, 0xcf, 0xcb, 0x6f, 0x54, 0x06, 0x83, 0x0d, 0xb5, + 0x58, 0x52, 0x06, 0x83, 0x6a, 0x39, 0xe0, 0x04, 0x87, 0xee, 0xd1, 0x9d, 0x20, 0x8a, 0xd3, 0x06, + 0x83, 0xd7, 0x82, 0x28, 0xc6, 0x0c, 0x62, 0xbf, 0x00, 0xb0, 0x72, 0x8f, 0x34, 0xf8, 0x52, 0xd7, + 0x9f, 0x33, 0x56, 0xfe, 0x73, 0xc6, 0xfe, 0x0f, 0x16, 0x4c, 0xae, 0x2e, 0x1b, 0x12, 0xe1, 0x05, + 0x00, 0xfe, 0x06, 0xbb, 0x73, 0x67, 0x5d, 0x6a, 0xdb, 0xb9, 0xc2, 0x54, 0x95, 0x62, 0x0d, 0x03, + 0x9d, 0x83, 0x62, 0xb3, 0xe3, 0x0b, 0xe9, 0xe4, 0x28, 0xbd, 0xb0, 0x6f, 0x74, 0x7c, 0x4c, 0xcb, + 0x34, 0x27, 0x84, 0xe2, 0xc0, 0x4e, 0x08, 0x7d, 0x83, 0x01, 0xa0, 0x79, 0x18, 0xbe, 0x7b, 0xd7, + 0x73, 0xb9, 0xcb, 0xa5, 0xb0, 0x04, 0xb8, 0x73, 0xa7, 0x5a, 0x89, 0x30, 0x2f, 0xb7, 0xbf, 0x54, + 0x84, 0xb9, 0xd5, 0x26, 0xb9, 0xf7, 0x1e, 0xdd, 0x4e, 0x07, 0x75, 0xa1, 0x38, 0x9a, 0x68, 0xe8, + 0xa8, 0x6e, 0x32, 0xfd, 0xc7, 0x63, 0x0b, 0x46, 0xb9, 0xbd, 0x9c, 0x74, 0x42, 0x7d, 0x35, 0xab, + 0xf5, 0xfc, 0x01, 0x59, 0xe0, 0x76, 0x77, 0xc2, 0x87, 0x4e, 0xdd, 0xb4, 0xa2, 0x14, 0x4b, 0xe2, + 0x73, 0x9f, 0x80, 0x71, 0x1d, 0xf3, 0x48, 0x0e, 0x6b, 0x7f, 0xa9, 0x08, 0xd3, 0xb4, 0x07, 0x0f, + 0x75, 0x22, 0x6e, 0x75, 0x4f, 0xc4, 0x71, 0x3b, 0x2d, 0xf5, 0x9f, 0x8d, 0xb7, 0xd2, 0xb3, 0xf1, + 0x5c, 0xde, 0x6c, 0x9c, 0xf4, 0x1c, 0x7c, 0x87, 0x05, 0xa7, 0x56, 0x9b, 0x41, 0x63, 0x37, 0xe5, + 0x58, 0xf4, 0x12, 0x8c, 0xd1, 0x73, 0x3c, 0x32, 0x7c, 0xde, 0x8d, 0x28, 0x08, 0x02, 0x84, 0x75, + 0x3c, 0xad, 0xda, 0xad, 0x5b, 0xd5, 0x4a, 0x56, 0xf0, 0x04, 0x01, 0xc2, 0x3a, 0x9e, 0xfd, 0x9b, + 0x16, 0x9c, 0xbf, 0xba, 0xbc, 0x92, 0x2c, 0xc5, 0xae, 0xf8, 0x0d, 0x97, 0x60, 0xa4, 0xed, 0x6a, + 0x5d, 0x49, 0x04, 0xbe, 0x15, 0xd6, 0x0b, 0x01, 0x7d, 0xbf, 0xc4, 0x26, 0xf9, 0x49, 0x0b, 0x4e, + 0x5d, 0xf5, 0x62, 0x7a, 0x2d, 0xa7, 0x23, 0x09, 0xd0, 0x7b, 0x39, 0xf2, 0xe2, 0x20, 0xdc, 0x4f, + 0x47, 0x12, 0xc0, 0x0a, 0x82, 0x35, 0x2c, 0xde, 0xf2, 0x9e, 0xc7, 0x2c, 0xb5, 0x0b, 0xa6, 0x1e, + 0x0b, 0x8b, 0x72, 0xac, 0x30, 0xe8, 0x87, 0xb9, 0x5e, 0xc8, 0xa4, 0x86, 0xfb, 0xe2, 0x84, 0x55, + 0x1f, 0x56, 0x91, 0x00, 0x9c, 0xe0, 0xd0, 0x07, 0xd4, 0xfc, 0xd5, 0x66, 0x27, 0x8a, 0x49, 0xb8, + 0x15, 0xe5, 0x9c, 0x8e, 0x2f, 0x40, 0x99, 0x48, 0x19, 0xbd, 0xe8, 0xb5, 0x62, 0x35, 0x95, 0xf0, + 0x9e, 0x07, 0x34, 0x50, 0x78, 0x03, 0xb8, 0x29, 0x1e, 0xcd, 0xcf, 0x6c, 0x15, 0x10, 0xd1, 0xdb, + 0xd2, 0x23, 0x3c, 0x30, 0x57, 0xf1, 0x95, 0x2e, 0x28, 0xce, 0xa8, 0x61, 0xff, 0x88, 0x05, 0x67, + 0xd4, 0x07, 0xbf, 0xef, 0x3e, 0xd3, 0xfe, 0xb9, 0x02, 0x4c, 0x5c, 0xdb, 0xd8, 0xa8, 0x5d, 0x25, + 0xb1, 0xb8, 0xb6, 0xfb, 0xab, 0xd1, 0xb1, 0xa6, 0x0d, 0xec, 0xf5, 0x0a, 0xec, 0xc4, 0x5e, 0x73, + 0x81, 0x07, 0x0a, 0x5a, 0xa8, 0xfa, 0xf1, 0xcd, 0xb0, 0x1e, 0x87, 0x9e, 0xbf, 0x9d, 0xa9, 0x3f, + 0x94, 0xcc, 0x45, 0x31, 0x8f, 0xb9, 0x40, 0x2f, 0xc0, 0x08, 0x8b, 0x54, 0x24, 0x27, 0xe1, 0x51, + 0xf5, 0x88, 0x62, 0xa5, 0x87, 0x07, 0xf3, 0xe5, 0x5b, 0xb8, 0xca, 0xff, 0x60, 0x81, 0x8a, 0x6e, + 0xc1, 0xd8, 0x4e, 0x1c, 0xb7, 0xaf, 0x11, 0xc7, 0xa5, 0xaf, 0x65, 0x7e, 0x1c, 0x5e, 0xc8, 0x3a, + 0x0e, 0xe9, 0x20, 0x70, 0xb4, 0xe4, 0x04, 0x49, 0xca, 0x22, 0xac, 0xd3, 0xb1, 0xeb, 0x00, 0x09, + 0xec, 0x98, 0x74, 0x27, 0xf6, 0xef, 0x5b, 0x30, 0xca, 0x83, 0x46, 0x84, 0xe8, 0x93, 0x30, 0x44, + 0xee, 0x91, 0x86, 0x60, 0x95, 0x33, 0x3b, 0x9c, 0x70, 0x5a, 0x5c, 0x06, 0x4c, 0xff, 0x63, 0x56, + 0x0b, 0x5d, 0x83, 0x51, 0xda, 0xdb, 0xab, 0x2a, 0x82, 0xc6, 0xe3, 0x79, 0x5f, 0xac, 0xa6, 0x9d, + 0x33, 0x67, 0xa2, 0x08, 0xcb, 0xea, 0x4c, 0xfb, 0xdc, 0x68, 0xd7, 0xe9, 0x89, 0x1d, 0xf7, 0x62, + 0x2c, 0x36, 0x96, 0x6b, 0x1c, 0x49, 0x50, 0xe3, 0xda, 0x67, 0x59, 0x88, 0x13, 0x22, 0xf6, 0x06, + 0x94, 0xe9, 0xa4, 0x2e, 0x36, 0x3d, 0xa7, 0xb7, 0x42, 0xfd, 0x69, 0x28, 0x4b, 0x75, 0x79, 0x24, + 0x9c, 0xc5, 0x19, 0x55, 0xa9, 0x4d, 0x8f, 0x70, 0x02, 0xb7, 0xb7, 0xe0, 0x34, 0x33, 0x7e, 0x74, + 0xe2, 0x1d, 0x63, 0x8f, 0xf5, 0x5f, 0xcc, 0xcf, 0x88, 0x97, 0x27, 0x9f, 0x99, 0x59, 0xcd, 0x1f, + 0x73, 0x5c, 0x52, 0x4c, 0x5e, 0xa1, 0xf6, 0x57, 0x87, 0xe0, 0xd1, 0x6a, 0x3d, 0x3f, 0x9e, 0xc8, + 0x2b, 0x30, 0xce, 0xf9, 0x52, 0xba, 0xb4, 0x9d, 0xa6, 0x68, 0x57, 0x09, 0x7f, 0x37, 0x34, 0x18, + 0x36, 0x30, 0xd1, 0x79, 0x28, 0x7a, 0xef, 0xf8, 0x69, 0xd7, 0xa6, 0xea, 0x1b, 0xeb, 0x98, 0x96, + 0x53, 0x30, 0x65, 0x71, 0xf9, 0xdd, 0xa1, 0xc0, 0x8a, 0xcd, 0x7d, 0x0d, 0x26, 0xbd, 0xa8, 0x11, + 0x79, 0x55, 0x9f, 0x9e, 0x33, 0xda, 0x49, 0xa5, 0xa4, 0x22, 0xb4, 0xd3, 0x0a, 0x8a, 0x53, 0xd8, + 0xda, 0x45, 0x36, 0x3c, 0x30, 0x9b, 0xdc, 0xd7, 0x7b, 0x9a, 0xbe, 0x00, 0xda, 0xec, 0xeb, 0x22, + 0x26, 0xc5, 0x17, 0x2f, 0x00, 0xfe, 0xc1, 0x11, 0x96, 0x30, 0xfa, 0xe4, 0x6c, 0xec, 0x38, 0xed, + 0xc5, 0x4e, 0xbc, 0x53, 0xf1, 0xa2, 0x46, 0xb0, 0x47, 0xc2, 0x7d, 0x26, 0x2d, 0x28, 0x25, 0x4f, + 0x4e, 0x05, 0x58, 0xbe, 0xb6, 0x58, 0xa3, 0x98, 0xb8, 0xbb, 0x0e, 0x5a, 0x84, 0x29, 0x59, 0x58, + 0x27, 0x11, 0xbb, 0xc2, 0xc6, 0x18, 0x19, 0xe5, 0x6c, 0x24, 0x8a, 0x15, 0x91, 0x34, 0xbe, 0xc9, + 0x49, 0xc3, 0x71, 0x70, 0xd2, 0x2f, 0xc3, 0x84, 0xe7, 0x7b, 0xb1, 0xe7, 0xc4, 0x01, 0x57, 0x41, + 0x71, 0xc1, 0x00, 0x93, 0xad, 0x57, 0x75, 0x00, 0x36, 0xf1, 0xec, 0xff, 0x36, 0x04, 0x33, 0x6c, + 0xda, 0x3e, 0x58, 0x61, 0x5f, 0x4f, 0x2b, 0xec, 0x56, 0xf7, 0x0a, 0x3b, 0x8e, 0x27, 0xc2, 0x03, + 0x2f, 0xb3, 0xb7, 0xa1, 0xac, 0xfc, 0xab, 0xa4, 0x83, 0xa5, 0x95, 0xe3, 0x60, 0xd9, 0x9f, 0xfb, + 0x90, 0x26, 0x6a, 0xc5, 0x4c, 0x13, 0xb5, 0xbf, 0x65, 0x41, 0xa2, 0x53, 0x41, 0xd7, 0xa0, 0xdc, + 0x0e, 0x98, 0xe5, 0x65, 0x28, 0xcd, 0x99, 0x1f, 0xcd, 0xbc, 0xa8, 0xf8, 0xa5, 0xc8, 0x3f, 0xbe, + 0x26, 0x6b, 0xe0, 0xa4, 0x32, 0x5a, 0x82, 0xd1, 0x76, 0x48, 0xea, 0x31, 0x0b, 0x2b, 0xd2, 0x97, + 0x0e, 0x5f, 0x23, 0x1c, 0x1f, 0xcb, 0x8a, 0xf6, 0xcf, 0x5b, 0x00, 0xdc, 0x0a, 0xcc, 0xf1, 0xb7, + 0xc9, 0x09, 0x88, 0xbb, 0x2b, 0x30, 0x14, 0xb5, 0x49, 0xa3, 0x97, 0x4d, 0x6c, 0xd2, 0x9f, 0x7a, + 0x9b, 0x34, 0x92, 0x01, 0xa7, 0xff, 0x30, 0xab, 0x6d, 0x7f, 0x17, 0xc0, 0x64, 0x82, 0x56, 0x8d, + 0x49, 0x0b, 0x3d, 0x6b, 0x84, 0x19, 0x38, 0x97, 0x0a, 0x33, 0x50, 0x66, 0xd8, 0x9a, 0x64, 0xf5, + 0x6d, 0x28, 0xb6, 0x9c, 0x7b, 0x42, 0x74, 0xf6, 0x74, 0xef, 0x6e, 0x50, 0xfa, 0x0b, 0x6b, 0xce, + 0x3d, 0xfe, 0x48, 0x7c, 0x5a, 0x2e, 0x90, 0x35, 0xe7, 0xde, 0x21, 0xb7, 0x7c, 0x65, 0x87, 0xd4, + 0x0d, 0x2f, 0x8a, 0xbf, 0xf0, 0x5f, 0x93, 0xff, 0x6c, 0xd9, 0xd1, 0x46, 0x58, 0x5b, 0x9e, 0x2f, + 0x6c, 0xa2, 0x06, 0x6a, 0xcb, 0xf3, 0xd3, 0x6d, 0x79, 0xfe, 0x00, 0x6d, 0x79, 0x3e, 0x7a, 0x17, + 0x46, 0x85, 0xfd, 0xa1, 0x08, 0xeb, 0x73, 0x65, 0x80, 0xf6, 0x84, 0xf9, 0x22, 0x6f, 0xf3, 0x8a, + 0x7c, 0x04, 0x8b, 0xd2, 0xbe, 0xed, 0xca, 0x06, 0xd1, 0x5f, 0xb7, 0x60, 0x52, 0xfc, 0xc6, 0xe4, + 0x9d, 0x0e, 0x89, 0x62, 0xc1, 0x7b, 0x7e, 0x7c, 0xf0, 0x3e, 0x88, 0x8a, 0xbc, 0x2b, 0x1f, 0x97, + 0xc7, 0xac, 0x09, 0xec, 0xdb, 0xa3, 0x54, 0x2f, 0xd0, 0x3f, 0xb0, 0xe0, 0x74, 0xcb, 0xb9, 0xc7, + 0x5b, 0xe4, 0x65, 0xd8, 0x89, 0xbd, 0x40, 0xa8, 0xfe, 0x3f, 0x39, 0xd8, 0xf4, 0x77, 0x55, 0xe7, + 0x9d, 0x94, 0xfa, 0xc9, 0xd3, 0x59, 0x28, 0x7d, 0xbb, 0x9a, 0xd9, 0xaf, 0xb9, 0x2d, 0x28, 0xc9, + 0xf5, 0x96, 0x21, 0x6a, 0xa8, 0xe8, 0x8c, 0xf5, 0x91, 0xcd, 0x3f, 0x75, 0x5f, 0x7f, 0xda, 0x8e, + 0x58, 0x6b, 0x0f, 0xb5, 0x9d, 0xb7, 0x61, 0x5c, 0x5f, 0x63, 0x0f, 0xb5, 0xad, 0x77, 0xe0, 0x54, + 0xc6, 0x5a, 0x7a, 0xa8, 0x4d, 0xde, 0x85, 0x73, 0xb9, 0xeb, 0xe3, 0x61, 0x36, 0x6c, 0xff, 0x9c, + 0xa5, 0x9f, 0x83, 0x27, 0xa0, 0x73, 0x58, 0x36, 0x75, 0x0e, 0x17, 0x7a, 0xef, 0x9c, 0x1c, 0xc5, + 0xc3, 0x5b, 0x7a, 0xa7, 0xe9, 0xa9, 0x8e, 0x5e, 0x87, 0x91, 0x26, 0x2d, 0x91, 0x86, 0xaf, 0x76, + 0xff, 0x1d, 0x99, 0xf0, 0x52, 0xac, 0x3c, 0xc2, 0x82, 0x82, 0xfd, 0x4b, 0x16, 0x0c, 0x9d, 0xc0, + 0x48, 0x60, 0x73, 0x24, 0x9e, 0xcd, 0x25, 0x2d, 0x22, 0x0e, 0x2f, 0x60, 0xe7, 0xee, 0xca, 0xbd, + 0x98, 0xf8, 0x11, 0x7b, 0x2a, 0x66, 0x0e, 0xcc, 0xb7, 0xc0, 0xa9, 0x1b, 0x81, 0xe3, 0x2e, 0x39, + 0x4d, 0xc7, 0x6f, 0x90, 0xb0, 0xea, 0x6f, 0x1f, 0xc9, 0x02, 0xbb, 0xd0, 0xcf, 0x02, 0xdb, 0xde, + 0x01, 0xa4, 0x37, 0x20, 0x5c, 0x59, 0x30, 0x8c, 0x7a, 0xbc, 0x29, 0x31, 0xfc, 0x4f, 0x66, 0xb3, + 0x66, 0x5d, 0x3d, 0xd3, 0x9c, 0x34, 0x78, 0x01, 0x96, 0x84, 0xec, 0x57, 0x20, 0xd3, 0x1f, 0xbe, + 0xbf, 0xd8, 0xc0, 0xfe, 0x0c, 0xcc, 0xb0, 0x9a, 0x47, 0x7c, 0xd2, 0xda, 0x29, 0xa9, 0x64, 0x46, + 0xf0, 0x3b, 0xfb, 0x8b, 0x16, 0x4c, 0xad, 0xa7, 0x62, 0x82, 0x5d, 0x62, 0x0a, 0xd0, 0x0c, 0x61, + 0x78, 0x9d, 0x95, 0x62, 0x01, 0x3d, 0x76, 0x19, 0xd4, 0x9f, 0x5b, 0x90, 0x84, 0xa8, 0x38, 0x01, + 0xc6, 0x6b, 0xd9, 0x60, 0xbc, 0x32, 0x65, 0x23, 0xaa, 0x3b, 0x79, 0x7c, 0x17, 0xba, 0xae, 0xe2, + 0x31, 0xf5, 0x10, 0x8b, 0x24, 0x64, 0x78, 0xf4, 0x9e, 0x49, 0x33, 0x68, 0x93, 0x8c, 0xd0, 0x64, + 0xff, 0xe7, 0x02, 0x20, 0x85, 0x3b, 0x70, 0xbc, 0xa8, 0xee, 0x1a, 0xc7, 0x13, 0x2f, 0x6a, 0x0f, + 0x10, 0x53, 0xe1, 0x87, 0x8e, 0x1f, 0x71, 0xb2, 0x9e, 0x90, 0xba, 0x1d, 0xcd, 0x3e, 0x60, 0x4e, + 0x34, 0x89, 0x6e, 0x74, 0x51, 0xc3, 0x19, 0x2d, 0x68, 0xa6, 0x19, 0xc3, 0x83, 0x9a, 0x66, 0x8c, + 0xf4, 0x71, 0x57, 0xfb, 0x59, 0x0b, 0x26, 0xd4, 0x30, 0xbd, 0x4f, 0xec, 0xcf, 0x55, 0x7f, 0x72, + 0x8e, 0xbe, 0x9a, 0xd6, 0x65, 0x76, 0x25, 0x7c, 0x23, 0x73, 0x3b, 0x74, 0x9a, 0xde, 0xbb, 0x44, + 0x45, 0xeb, 0x9b, 0x17, 0x6e, 0x84, 0xa2, 0xf4, 0xf0, 0x60, 0x7e, 0x42, 0xfd, 0xe3, 0xd1, 0x81, + 0x93, 0x2a, 0xf6, 0x8f, 0xd3, 0xcd, 0x6e, 0x2e, 0x45, 0xf4, 0x12, 0x0c, 0xb7, 0x77, 0x9c, 0x88, + 0xa4, 0x9c, 0x6e, 0x86, 0x6b, 0xb4, 0xf0, 0xf0, 0x60, 0x7e, 0x52, 0x55, 0x60, 0x25, 0x98, 0x63, + 0x0f, 0x1e, 0x85, 0xab, 0x7b, 0x71, 0xf6, 0x8d, 0xc2, 0xf5, 0xc7, 0x16, 0x0c, 0xad, 0x07, 0xee, + 0x49, 0x1c, 0x01, 0xaf, 0x19, 0x47, 0xc0, 0x63, 0x79, 0x81, 0xdb, 0x73, 0x77, 0xff, 0x6a, 0x6a, + 0xf7, 0x5f, 0xc8, 0xa5, 0xd0, 0x7b, 0xe3, 0xb7, 0x60, 0x8c, 0x85, 0x83, 0x17, 0x0e, 0x46, 0x2f, + 0x18, 0x1b, 0x7e, 0x3e, 0xb5, 0xe1, 0xa7, 0x34, 0x54, 0x6d, 0xa7, 0x3f, 0x05, 0xa3, 0xc2, 0xc9, + 0x25, 0xed, 0xbd, 0x29, 0x70, 0xb1, 0x84, 0xdb, 0x3f, 0x5a, 0x04, 0x23, 0xfc, 0x3c, 0xfa, 0x15, + 0x0b, 0x16, 0x42, 0x6e, 0xfc, 0xea, 0x56, 0x3a, 0xa1, 0xe7, 0x6f, 0xd7, 0x1b, 0x3b, 0xc4, 0xed, + 0x34, 0x3d, 0x7f, 0xbb, 0xba, 0xed, 0x07, 0xaa, 0x78, 0xe5, 0x1e, 0x69, 0x74, 0x98, 0xfa, 0xaa, + 0x4f, 0xac, 0x7b, 0x65, 0x44, 0xfe, 0xfc, 0xfd, 0x83, 0xf9, 0x05, 0x7c, 0x24, 0xda, 0xf8, 0x88, + 0x7d, 0x41, 0xbf, 0x69, 0xc1, 0x15, 0x1e, 0x95, 0x7d, 0xf0, 0xfe, 0xf7, 0x78, 0xe7, 0xd6, 0x24, + 0xa9, 0x84, 0xc8, 0x06, 0x09, 0x5b, 0x4b, 0x2f, 0x8b, 0x01, 0xbd, 0x52, 0x3b, 0x5a, 0x5b, 0xf8, + 0xa8, 0x9d, 0xb3, 0xff, 0x45, 0x11, 0x26, 0x44, 0x68, 0x27, 0x71, 0x07, 0xbc, 0x64, 0x2c, 0x89, + 0xc7, 0x53, 0x4b, 0x62, 0xc6, 0x40, 0x3e, 0x9e, 0xe3, 0x3f, 0x82, 0x19, 0x7a, 0x38, 0x5f, 0x23, + 0x4e, 0x18, 0x6f, 0x12, 0x87, 0x5b, 0x5c, 0x15, 0x8f, 0x7c, 0xfa, 0x2b, 0xc1, 0xda, 0x8d, 0x34, + 0x31, 0xdc, 0x4d, 0xff, 0xeb, 0xe9, 0xce, 0xf1, 0x61, 0xba, 0x2b, 0x3a, 0xd7, 0x9b, 0x50, 0x56, + 0x1e, 0x1a, 0xe2, 0xd0, 0xe9, 0x1d, 0xe4, 0x2e, 0x4d, 0x81, 0x0b, 0xbf, 0x12, 0xef, 0xa0, 0x84, + 0x9c, 0xfd, 0x0f, 0x0b, 0x46, 0x83, 0x7c, 0x12, 0xd7, 0xa1, 0xe4, 0x44, 0x91, 0xb7, 0xed, 0x13, + 0x57, 0xec, 0xd8, 0x0f, 0xe7, 0xed, 0x58, 0xa3, 0x19, 0xe6, 0x25, 0xb3, 0x28, 0x6a, 0x62, 0x45, + 0x03, 0x5d, 0xe3, 0x76, 0x6d, 0x7b, 0xf2, 0xa5, 0x36, 0x18, 0x35, 0x90, 0x96, 0x6f, 0x7b, 0x04, + 0x8b, 0xfa, 0xe8, 0xb3, 0xdc, 0xf0, 0xf0, 0xba, 0x1f, 0xdc, 0xf5, 0xaf, 0x06, 0x81, 0x0c, 0x9f, + 0x30, 0x18, 0xc1, 0x19, 0x69, 0x6e, 0xa8, 0xaa, 0x63, 0x93, 0xda, 0x60, 0x11, 0x2c, 0xbf, 0x15, + 0x4e, 0x51, 0xd2, 0xa6, 0x77, 0x73, 0x84, 0x08, 0x4c, 0x89, 0xb8, 0x61, 0xb2, 0x4c, 0x8c, 0x5d, + 0xe6, 0x23, 0xcc, 0xac, 0x9d, 0x48, 0x80, 0xaf, 0x9b, 0x24, 0x70, 0x9a, 0xa6, 0xfd, 0x13, 0x16, + 0x30, 0x4f, 0xcf, 0x13, 0xe0, 0x47, 0x3e, 0x65, 0xf2, 0x23, 0xb3, 0x79, 0x83, 0x9c, 0xc3, 0x8a, + 0xbc, 0xc8, 0x57, 0x56, 0x2d, 0x0c, 0xee, 0xed, 0x0b, 0xa3, 0x8f, 0xfe, 0xef, 0x0f, 0xfb, 0xff, + 0x58, 0xfc, 0x10, 0x53, 0xfe, 0x13, 0xe8, 0xdb, 0xa0, 0xd4, 0x70, 0xda, 0x4e, 0x83, 0xe7, 0x4a, + 0xc9, 0x95, 0xc5, 0x19, 0x95, 0x16, 0x96, 0x45, 0x0d, 0x2e, 0x5b, 0x92, 0xf1, 0xe7, 0x4a, 0xb2, + 0xb8, 0xaf, 0x3c, 0x49, 0x35, 0x39, 0xb7, 0x0b, 0x13, 0x06, 0xb1, 0x87, 0x2a, 0x88, 0xf8, 0x36, + 0x7e, 0xc5, 0xaa, 0x78, 0x89, 0x2d, 0x98, 0xf1, 0xb5, 0xff, 0xf4, 0x42, 0x91, 0x8f, 0xcb, 0x0f, + 0xf7, 0xbb, 0x44, 0xd9, 0xed, 0xa3, 0xf9, 0x9d, 0xa6, 0xc8, 0xe0, 0x6e, 0xca, 0xf6, 0x8f, 0x59, + 0xf0, 0x88, 0x8e, 0xa8, 0xb9, 0xb6, 0xf4, 0x93, 0xee, 0x57, 0xa0, 0x14, 0xb4, 0x49, 0xe8, 0xc4, + 0x41, 0x28, 0x6e, 0x8d, 0xcb, 0x72, 0xd0, 0x6f, 0x8a, 0xf2, 0x43, 0x11, 0x69, 0x5c, 0x52, 0x97, + 0xe5, 0x58, 0xd5, 0xa4, 0xaf, 0x4f, 0x36, 0x18, 0x91, 0x70, 0x62, 0x62, 0x67, 0x00, 0x53, 0x74, + 0x47, 0x58, 0x40, 0xec, 0xaf, 0x5a, 0x7c, 0x61, 0xe9, 0x5d, 0x47, 0xef, 0xc0, 0x74, 0xcb, 0x89, + 0x1b, 0x3b, 0x2b, 0xf7, 0xda, 0x21, 0xd7, 0x95, 0xc8, 0x71, 0x7a, 0xba, 0xdf, 0x38, 0x69, 0x1f, + 0x99, 0xd8, 0x52, 0xae, 0xa5, 0x88, 0xe1, 0x2e, 0xf2, 0x68, 0x13, 0xc6, 0x58, 0x19, 0xf3, 0xcf, + 0x8b, 0x7a, 0xb1, 0x06, 0x79, 0xad, 0x29, 0x5b, 0x81, 0xb5, 0x84, 0x0e, 0xd6, 0x89, 0xda, 0x3f, + 0x53, 0xe4, 0xbb, 0x9d, 0xb1, 0xf2, 0x4f, 0xc1, 0x68, 0x3b, 0x70, 0x97, 0xab, 0x15, 0x2c, 0x66, + 0x41, 0x5d, 0x23, 0x35, 0x5e, 0x8c, 0x25, 0x1c, 0x5d, 0x86, 0x92, 0xf8, 0x29, 0x75, 0x5b, 0xec, + 0x6c, 0x16, 0x78, 0x11, 0x56, 0x50, 0xf4, 0x3c, 0x40, 0x3b, 0x0c, 0xf6, 0x3c, 0x97, 0x05, 0x81, + 0x28, 0x9a, 0x66, 0x3e, 0x35, 0x05, 0xc1, 0x1a, 0x16, 0x7a, 0x15, 0x26, 0x3a, 0x7e, 0xc4, 0xd9, + 0x11, 0x67, 0x53, 0x04, 0xe5, 0x2e, 0x25, 0x06, 0x28, 0xb7, 0x74, 0x20, 0x36, 0x71, 0xd1, 0x22, + 0x8c, 0xc4, 0x0e, 0x33, 0x5b, 0x19, 0xce, 0xb7, 0xb7, 0xdd, 0xa0, 0x18, 0x7a, 0x5a, 0x0e, 0x5a, + 0x01, 0x8b, 0x8a, 0xe8, 0x4d, 0xe9, 0x2a, 0xcb, 0x0f, 0x76, 0x61, 0xe8, 0x3e, 0xd8, 0x25, 0xa0, + 0x39, 0xca, 0x0a, 0x03, 0x7a, 0x83, 0x16, 0x7a, 0x15, 0x80, 0xdc, 0x8b, 0x49, 0xe8, 0x3b, 0x4d, + 0x65, 0x15, 0xa6, 0xec, 0xa0, 0x2b, 0xc1, 0x7a, 0x10, 0xdf, 0x8a, 0xc8, 0xb7, 0xac, 0x28, 0x14, + 0xac, 0xa1, 0xdb, 0xbf, 0x59, 0x06, 0x48, 0x18, 0x77, 0xf4, 0x6e, 0xd7, 0xc9, 0xf5, 0x4c, 0x6f, + 0x56, 0xff, 0xf8, 0x8e, 0x2d, 0xf4, 0xdd, 0x16, 0x8c, 0x39, 0xcd, 0x66, 0xd0, 0x70, 0x62, 0x36, + 0x45, 0x85, 0xde, 0x27, 0xa7, 0x68, 0x7f, 0x31, 0xa9, 0xc1, 0xbb, 0xf0, 0x82, 0x5c, 0xa2, 0x1a, + 0xa4, 0x6f, 0x2f, 0xf4, 0x86, 0xd1, 0xc7, 0xe4, 0x5b, 0x91, 0xaf, 0xad, 0xb9, 0xf4, 0x5b, 0xb1, + 0xcc, 0x2e, 0x09, 0xfd, 0x99, 0x78, 0xcb, 0x78, 0x26, 0x0e, 0xe5, 0x3b, 0x03, 0x1a, 0xfc, 0x6b, + 0xbf, 0x17, 0x22, 0xaa, 0xe9, 0x81, 0x01, 0x86, 0xf3, 0x3d, 0xef, 0xb4, 0x87, 0x52, 0x9f, 0xa0, + 0x00, 0x6f, 0xc3, 0x94, 0x6b, 0x72, 0x01, 0x62, 0x29, 0x3e, 0x99, 0x47, 0x37, 0xc5, 0x34, 0x24, + 0xf7, 0x7e, 0x0a, 0x80, 0xd3, 0x84, 0x51, 0x8d, 0x07, 0x7d, 0xa8, 0xfa, 0x5b, 0x81, 0xf0, 0xb6, + 0xb0, 0x73, 0xe7, 0x72, 0x3f, 0x8a, 0x49, 0x8b, 0x62, 0x26, 0xd7, 0xfb, 0xba, 0xa8, 0x8b, 0x15, + 0x15, 0xf4, 0x3a, 0x8c, 0x30, 0xd7, 0xab, 0x68, 0xb6, 0x94, 0x2f, 0x2c, 0x36, 0xa3, 0x98, 0x25, + 0x3b, 0x92, 0xfd, 0x8d, 0xb0, 0xa0, 0x80, 0xae, 0x49, 0xc7, 0xc6, 0xa8, 0xea, 0xdf, 0x8a, 0x08, + 0x73, 0x6c, 0x2c, 0x2f, 0x7d, 0x38, 0xf1, 0x59, 0xe4, 0xe5, 0x99, 0xd9, 0xbb, 0x8c, 0x9a, 0x94, + 0x8d, 0x12, 0xff, 0x65, 0x52, 0xb0, 0x59, 0xc8, 0xef, 0x9e, 0x99, 0x38, 0x2c, 0x19, 0xce, 0xdb, + 0x26, 0x09, 0x9c, 0xa6, 0x49, 0x59, 0x52, 0xbe, 0xed, 0x85, 0xbf, 0x46, 0xbf, 0xc3, 0x83, 0xbf, + 0xc4, 0xd9, 0x75, 0xc4, 0x4b, 0xb0, 0xa8, 0x7f, 0xa2, 0xfc, 0xc1, 0x9c, 0x0f, 0xd3, 0xe9, 0x2d, + 0xfa, 0x50, 0xf9, 0x91, 0xdf, 0x1f, 0x82, 0x49, 0x73, 0x49, 0xa1, 0x2b, 0x50, 0x16, 0x44, 0x54, + 0x20, 0x7f, 0xb5, 0x4b, 0xd6, 0x24, 0x00, 0x27, 0x38, 0x2c, 0x7f, 0x03, 0xab, 0xae, 0xd9, 0xd9, + 0x26, 0xf9, 0x1b, 0x14, 0x04, 0x6b, 0x58, 0xf4, 0x65, 0xb5, 0x19, 0x04, 0xb1, 0xba, 0x91, 0xd4, + 0xba, 0x5b, 0x62, 0xa5, 0x58, 0x40, 0xe9, 0x4d, 0xb4, 0x4b, 0x42, 0x9f, 0x34, 0xcd, 0xf8, 0xc0, + 0xea, 0x26, 0xba, 0xae, 0x03, 0xb1, 0x89, 0x4b, 0xef, 0xd3, 0x20, 0x62, 0x0b, 0x59, 0xbc, 0xdf, + 0x12, 0xbb, 0xe5, 0x3a, 0xf7, 0xad, 0x96, 0x70, 0xf4, 0x19, 0x78, 0x44, 0xc5, 0x40, 0xc2, 0x5c, + 0x11, 0x21, 0x5b, 0x1c, 0x31, 0xc4, 0x2d, 0x8f, 0x2c, 0x67, 0xa3, 0xe1, 0xbc, 0xfa, 0xe8, 0x35, + 0x98, 0x14, 0x3c, 0xbe, 0xa4, 0x38, 0x6a, 0xda, 0xc6, 0x5c, 0x37, 0xa0, 0x38, 0x85, 0x2d, 0x23, + 0x1c, 0x33, 0x36, 0x5b, 0x52, 0x28, 0x75, 0x47, 0x38, 0xd6, 0xe1, 0xb8, 0xab, 0x06, 0x5a, 0x84, + 0x29, 0xce, 0x84, 0x79, 0xfe, 0x36, 0x9f, 0x13, 0xe1, 0x4e, 0xa5, 0xb6, 0xd4, 0x4d, 0x13, 0x8c, + 0xd3, 0xf8, 0xe8, 0x15, 0x18, 0x77, 0xc2, 0xc6, 0x8e, 0x17, 0x93, 0x46, 0xdc, 0x09, 0xb9, 0x9f, + 0x95, 0x66, 0x5c, 0xb4, 0xa8, 0xc1, 0xb0, 0x81, 0x69, 0xbf, 0x0b, 0xa7, 0x32, 0x82, 0x2e, 0xd0, + 0x85, 0xe3, 0xb4, 0x3d, 0xf9, 0x4d, 0x29, 0x0b, 0xe4, 0xc5, 0x5a, 0x55, 0x7e, 0x8d, 0x86, 0x45, + 0x57, 0x27, 0x0b, 0xce, 0xa0, 0xe5, 0x00, 0x54, 0xab, 0x73, 0x55, 0x02, 0x70, 0x82, 0x63, 0xff, + 0xcf, 0x02, 0x4c, 0x65, 0x28, 0x57, 0x58, 0x1e, 0xba, 0xd4, 0x2b, 0x25, 0x49, 0x3b, 0x67, 0x06, + 0xcc, 0x2e, 0x1c, 0x21, 0x60, 0x76, 0xb1, 0x5f, 0xc0, 0xec, 0xa1, 0xf7, 0x12, 0x30, 0xdb, 0x1c, + 0xb1, 0xe1, 0x81, 0x46, 0x2c, 0x23, 0xc8, 0xf6, 0xc8, 0x11, 0x83, 0x6c, 0x1b, 0x83, 0x3e, 0x3a, + 0xc0, 0xa0, 0xff, 0x60, 0x01, 0xa6, 0xd3, 0x46, 0x90, 0x27, 0x20, 0xb8, 0x7d, 0xdd, 0x10, 0xdc, + 0x66, 0x67, 0x75, 0x4c, 0x9b, 0x66, 0xe6, 0x09, 0x71, 0x71, 0x4a, 0x88, 0xfb, 0xd1, 0x81, 0xa8, + 0xf5, 0x16, 0xe8, 0xfe, 0x9d, 0x02, 0x9c, 0x49, 0x57, 0x59, 0x6e, 0x3a, 0x5e, 0xeb, 0x04, 0xc6, + 0xe6, 0xa6, 0x31, 0x36, 0xcf, 0x0e, 0xf2, 0x35, 0xac, 0x6b, 0xb9, 0x03, 0x74, 0x27, 0x35, 0x40, + 0x57, 0x06, 0x27, 0xd9, 0x7b, 0x94, 0xbe, 0x52, 0x84, 0x0b, 0x99, 0xf5, 0x12, 0xb9, 0xe7, 0xaa, + 0x21, 0xf7, 0x7c, 0x3e, 0x25, 0xf7, 0xb4, 0x7b, 0xd7, 0x3e, 0x1e, 0x41, 0xa8, 0x70, 0x91, 0x65, + 0x11, 0x04, 0x1e, 0x50, 0x08, 0x6a, 0xb8, 0xc8, 0x2a, 0x42, 0xd8, 0xa4, 0xfb, 0xf5, 0x24, 0xfc, + 0xfc, 0x37, 0x16, 0x9c, 0xcb, 0x9c, 0x9b, 0x13, 0x10, 0x76, 0xad, 0x9b, 0xc2, 0xae, 0xa7, 0x06, + 0x5e, 0xad, 0x39, 0xd2, 0xaf, 0x5f, 0x1f, 0xca, 0xf9, 0x16, 0xf6, 0x94, 0xbf, 0x09, 0x63, 0x4e, + 0xa3, 0x41, 0xa2, 0x68, 0x2d, 0x70, 0x55, 0x4c, 0xe0, 0x67, 0xd9, 0x3b, 0x2b, 0x29, 0x3e, 0x3c, + 0x98, 0x9f, 0x4b, 0x93, 0x48, 0xc0, 0x58, 0xa7, 0x80, 0x3e, 0x0b, 0xa5, 0x48, 0xdc, 0x9b, 0x62, + 0xee, 0x5f, 0x18, 0x70, 0x70, 0x9c, 0x4d, 0xd2, 0x34, 0xe3, 0x1c, 0x29, 0x51, 0x85, 0x22, 0x69, + 0xc6, 0x44, 0x29, 0x1c, 0x6b, 0x4c, 0x94, 0xe7, 0x01, 0xf6, 0xd4, 0x63, 0x20, 0x2d, 0x80, 0xd0, + 0x9e, 0x09, 0x1a, 0x16, 0xfa, 0x26, 0x98, 0x8e, 0x78, 0x54, 0xbf, 0xe5, 0xa6, 0x13, 0x31, 0x3f, + 0x17, 0xb1, 0x0a, 0x59, 0x2c, 0xa5, 0x7a, 0x0a, 0x86, 0xbb, 0xb0, 0xd1, 0xaa, 0x6c, 0x95, 0x85, + 0x20, 0xe4, 0x0b, 0xf3, 0x52, 0xd2, 0xa2, 0xc8, 0x82, 0x7b, 0x3a, 0x3d, 0xfc, 0x6c, 0xe0, 0xb5, + 0x9a, 0xe8, 0xb3, 0x00, 0x74, 0xf9, 0x08, 0x41, 0xc4, 0x68, 0xfe, 0xe1, 0x49, 0x4f, 0x15, 0x37, + 0xd3, 0x2c, 0x97, 0x39, 0xa7, 0x56, 0x14, 0x11, 0xac, 0x11, 0xb4, 0x7f, 0x70, 0x08, 0x1e, 0xed, + 0x71, 0x46, 0xa2, 0x45, 0x53, 0x11, 0xfb, 0x74, 0xfa, 0x71, 0x3d, 0x97, 0x59, 0xd9, 0x78, 0x6d, + 0xa7, 0x96, 0x62, 0xe1, 0x3d, 0x2f, 0xc5, 0xef, 0xb3, 0x34, 0xb1, 0x07, 0x37, 0xd6, 0xfc, 0xd4, + 0x11, 0xcf, 0xfe, 0x63, 0x94, 0x83, 0x6c, 0x65, 0x08, 0x13, 0x9e, 0x1f, 0xb8, 0x3b, 0x03, 0x4b, + 0x17, 0x4e, 0x56, 0x4c, 0xfc, 0x05, 0x0b, 0x1e, 0xcf, 0xec, 0xaf, 0x61, 0x92, 0x73, 0x05, 0xca, + 0x0d, 0x5a, 0xa8, 0xf9, 0x22, 0x26, 0x4e, 0xda, 0x12, 0x80, 0x13, 0x1c, 0xc3, 0xf2, 0xa6, 0xd0, + 0xd7, 0xf2, 0xe6, 0x9f, 0x5b, 0xd0, 0xb5, 0x3f, 0x4e, 0xe0, 0xa0, 0xae, 0x9a, 0x07, 0xf5, 0x87, + 0x07, 0x99, 0xcb, 0x9c, 0x33, 0xfa, 0x8f, 0xa6, 0xe0, 0x6c, 0x8e, 0x2f, 0xce, 0x1e, 0xcc, 0x6c, + 0x37, 0x88, 0xe9, 0xe5, 0x29, 0x3e, 0x26, 0xd3, 0x21, 0xb6, 0xa7, 0x4b, 0x28, 0x4b, 0x69, 0x39, + 0xd3, 0x85, 0x82, 0xbb, 0x9b, 0x40, 0x5f, 0xb0, 0xe0, 0xb4, 0x73, 0x37, 0xea, 0xca, 0x81, 0x2f, + 0xd6, 0xcc, 0x8b, 0x99, 0x42, 0x90, 0x3e, 0x39, 0xf3, 0x79, 0x8e, 0xcf, 0x2c, 0x2c, 0x9c, 0xd9, + 0x16, 0xc2, 0x22, 0x4a, 0x3c, 0x65, 0xe7, 0x7b, 0xf8, 0x21, 0x67, 0x39, 0x4d, 0xf1, 0x1b, 0x44, + 0x42, 0xb0, 0xa2, 0x83, 0x3e, 0x0f, 0xe5, 0x6d, 0xe9, 0xc9, 0x98, 0x71, 0x43, 0x25, 0x03, 0xd9, + 0xdb, 0xbf, 0x93, 0xab, 0x32, 0x15, 0x12, 0x4e, 0x88, 0xa2, 0xd7, 0xa0, 0xe8, 0x6f, 0x45, 0xbd, + 0xd2, 0x64, 0xa6, 0x6c, 0xd6, 0xb8, 0xb7, 0xff, 0xfa, 0x6a, 0x1d, 0xd3, 0x8a, 0xe8, 0x1a, 0x14, + 0xc3, 0x4d, 0x57, 0x48, 0xf0, 0x32, 0xcf, 0x70, 0xbc, 0x54, 0xc9, 0xe9, 0x15, 0xa3, 0x84, 0x97, + 0x2a, 0x98, 0x92, 0x40, 0x35, 0x18, 0x66, 0x0e, 0x2c, 0xe2, 0x3e, 0xc8, 0xe4, 0x7c, 0x7b, 0x38, + 0x82, 0xf1, 0x90, 0x00, 0x0c, 0x01, 0x73, 0x42, 0x68, 0x03, 0x46, 0x1a, 0x2c, 0xa5, 0xa2, 0x08, + 0x48, 0xf6, 0xb1, 0x4c, 0x59, 0x5d, 0x8f, 0x5c, 0x93, 0x42, 0x74, 0xc5, 0x30, 0xb0, 0xa0, 0xc5, + 0xa8, 0x92, 0xf6, 0xce, 0x56, 0x24, 0x52, 0x00, 0x67, 0x53, 0xed, 0x91, 0x42, 0x55, 0x50, 0x65, + 0x18, 0x58, 0xd0, 0x42, 0x9f, 0x80, 0xc2, 0x56, 0x43, 0x38, 0xa7, 0x64, 0x0a, 0xed, 0xcc, 0x80, + 0x0d, 0x4b, 0x23, 0xf7, 0x0f, 0xe6, 0x0b, 0xab, 0xcb, 0xb8, 0xb0, 0xd5, 0x40, 0xeb, 0x30, 0xba, + 0xc5, 0x5d, 0xbc, 0x85, 0x5c, 0xee, 0xc9, 0x6c, 0xef, 0xf3, 0x2e, 0x2f, 0x70, 0xee, 0x97, 0x21, + 0x00, 0x58, 0x12, 0x61, 0x41, 0xd7, 0x95, 0xab, 0xba, 0x88, 0xdd, 0xb5, 0x70, 0xb4, 0xf0, 0x02, + 0xfc, 0x7e, 0x4e, 0x1c, 0xde, 0xb1, 0x46, 0x91, 0xae, 0x6a, 0x47, 0xe6, 0x61, 0x17, 0xb1, 0x58, + 0x32, 0x57, 0x75, 0x9f, 0x14, 0xf5, 0x7c, 0x55, 0x2b, 0x24, 0x9c, 0x10, 0x45, 0xbb, 0x30, 0xb1, + 0x17, 0xb5, 0x77, 0x88, 0xdc, 0xd2, 0x2c, 0x34, 0x4b, 0xce, 0x15, 0x76, 0x5b, 0x20, 0x7a, 0x61, + 0xdc, 0x71, 0x9a, 0x5d, 0xa7, 0x10, 0xd3, 0x7f, 0xdf, 0xd6, 0x89, 0x61, 0x93, 0x36, 0x1d, 0xfe, + 0x77, 0x3a, 0xc1, 0xe6, 0x7e, 0x4c, 0x44, 0xc8, 0xad, 0xcc, 0xe1, 0x7f, 0x83, 0xa3, 0x74, 0x0f, + 0xbf, 0x00, 0x60, 0x49, 0x04, 0xdd, 0x16, 0xc3, 0xc3, 0x4e, 0xcf, 0xe9, 0xfc, 0xb8, 0x98, 0x8b, + 0x12, 0x29, 0x67, 0x50, 0xd8, 0x69, 0x99, 0x90, 0x62, 0xa7, 0x64, 0x7b, 0x27, 0x88, 0x03, 0x3f, + 0x75, 0x42, 0xcf, 0xe4, 0x9f, 0x92, 0xb5, 0x0c, 0xfc, 0xee, 0x53, 0x32, 0x0b, 0x0b, 0x67, 0xb6, + 0x85, 0x5c, 0x98, 0x6c, 0x07, 0x61, 0x7c, 0x37, 0x08, 0xe5, 0xfa, 0x42, 0x3d, 0xe4, 0x0a, 0x06, + 0xa6, 0x68, 0x91, 0x45, 0xb3, 0x33, 0x21, 0x38, 0x45, 0x13, 0x7d, 0x1a, 0x46, 0xa3, 0x86, 0xd3, + 0x24, 0xd5, 0x9b, 0xb3, 0xa7, 0xf2, 0xaf, 0x9f, 0x3a, 0x47, 0xc9, 0x59, 0x5d, 0x3c, 0x42, 0x3b, + 0x47, 0xc1, 0x92, 0x1c, 0x5a, 0x85, 0x61, 0x96, 0x54, 0x8b, 0xc5, 0x87, 0xcb, 0x09, 0xef, 0xd9, + 0x65, 0x41, 0xcc, 0xcf, 0x26, 0x56, 0x8c, 0x79, 0x75, 0xba, 0x07, 0x04, 0x7b, 0x1d, 0x44, 0xb3, + 0x67, 0xf2, 0xf7, 0x80, 0xe0, 0xca, 0x6f, 0xd6, 0x7b, 0xed, 0x01, 0x85, 0x84, 0x13, 0xa2, 0xf4, + 0x64, 0xa6, 0xa7, 0xe9, 0xd9, 0x1e, 0xa6, 0x2f, 0xb9, 0x67, 0x29, 0x3b, 0x99, 0xe9, 0x49, 0x4a, + 0x49, 0xd8, 0xbf, 0x3b, 0xda, 0xcd, 0xb3, 0xb0, 0x07, 0xd9, 0x77, 0x5a, 0x5d, 0xba, 0xba, 0x8f, + 0x0f, 0x2a, 0x1f, 0x3a, 0x46, 0x6e, 0xf5, 0x0b, 0x16, 0x9c, 0x6d, 0x67, 0x7e, 0x88, 0x60, 0x00, + 0x06, 0x13, 0x33, 0xf1, 0x4f, 0x57, 0xb1, 0x04, 0xb3, 0xe1, 0x38, 0xa7, 0xa5, 0xf4, 0x8b, 0xa0, + 0xf8, 0x9e, 0x5f, 0x04, 0x6b, 0x50, 0x62, 0x4c, 0x66, 0x9f, 0x14, 0xc3, 0xe9, 0x87, 0x11, 0x63, + 0x25, 0x96, 0x45, 0x45, 0xac, 0x48, 0xa0, 0xef, 0xb7, 0xe0, 0x7c, 0xba, 0xeb, 0x98, 0x30, 0xb0, + 0x08, 0x40, 0xc8, 0xdf, 0x82, 0xab, 0xe2, 0xfb, 0xcf, 0xd7, 0x7a, 0x21, 0x1f, 0xf6, 0x43, 0xc0, + 0xbd, 0x1b, 0x43, 0x95, 0x8c, 0xc7, 0xe8, 0x88, 0x29, 0x80, 0x1f, 0xe0, 0x41, 0xfa, 0x22, 0x8c, + 0xb7, 0x82, 0x8e, 0x1f, 0x0b, 0x4b, 0x19, 0xa1, 0xb5, 0x67, 0xda, 0xea, 0x35, 0xad, 0x1c, 0x1b, + 0x58, 0xa9, 0x67, 0x6c, 0xe9, 0x81, 0x9f, 0xb1, 0x6f, 0xc1, 0xb8, 0xaf, 0x99, 0x76, 0x0a, 0x7e, + 0xe0, 0x52, 0x7e, 0xf0, 0x50, 0xdd, 0x10, 0x94, 0xf7, 0x52, 0x2f, 0xc1, 0x06, 0xb5, 0x93, 0x7d, + 0x1b, 0xfd, 0x94, 0x95, 0xc1, 0xd4, 0xf3, 0xd7, 0xf2, 0x27, 0xcd, 0xd7, 0xf2, 0xa5, 0xf4, 0x6b, + 0xb9, 0x4b, 0xf8, 0x6a, 0x3c, 0x94, 0x07, 0x4f, 0x74, 0x32, 0x68, 0x9c, 0x40, 0xbb, 0x09, 0x17, + 0xfb, 0x5d, 0x4b, 0xcc, 0x64, 0xca, 0x55, 0xaa, 0xb6, 0xc4, 0x64, 0xca, 0xad, 0x56, 0x30, 0x83, + 0x0c, 0x1a, 0x48, 0xc6, 0xfe, 0x1f, 0x16, 0x14, 0x6b, 0x81, 0x7b, 0x02, 0xc2, 0xe4, 0x4f, 0x19, + 0xc2, 0xe4, 0x47, 0xb3, 0x2f, 0x44, 0x37, 0x57, 0x74, 0xbc, 0x92, 0x12, 0x1d, 0x9f, 0xcf, 0x23, + 0xd0, 0x5b, 0x50, 0xfc, 0xe3, 0x45, 0x18, 0xab, 0x05, 0xae, 0xb2, 0x57, 0xfe, 0xf5, 0x07, 0xb1, + 0x57, 0xce, 0x8d, 0xf0, 0xaf, 0x51, 0x66, 0x96, 0x56, 0xd2, 0xc9, 0xf2, 0x2f, 0x98, 0xd9, 0xf2, + 0x1d, 0xe2, 0x6d, 0xef, 0xc4, 0xc4, 0x4d, 0x7f, 0xce, 0xc9, 0x99, 0x2d, 0xff, 0x77, 0x0b, 0xa6, + 0x52, 0xad, 0xa3, 0x26, 0x4c, 0x34, 0x75, 0xc1, 0xa4, 0x58, 0xa7, 0x0f, 0x24, 0xd3, 0x14, 0x66, + 0x9f, 0x5a, 0x11, 0x36, 0x89, 0xa3, 0x05, 0x00, 0xa5, 0xa9, 0x93, 0x12, 0x30, 0xc6, 0xf5, 0x2b, + 0x55, 0x5e, 0x84, 0x35, 0x0c, 0xf4, 0x12, 0x8c, 0xc5, 0x41, 0x3b, 0x68, 0x06, 0xdb, 0xfb, 0xd7, + 0x89, 0x0c, 0x5d, 0xa4, 0x8c, 0xb9, 0x36, 0x12, 0x10, 0xd6, 0xf1, 0xec, 0x9f, 0x2c, 0xf2, 0x0f, + 0xf5, 0x63, 0xef, 0x83, 0x35, 0xf9, 0xfe, 0x5e, 0x93, 0x5f, 0xb1, 0x60, 0x9a, 0xb6, 0xce, 0xcc, + 0x45, 0xe4, 0x65, 0xab, 0x82, 0x06, 0x5b, 0x3d, 0x82, 0x06, 0x5f, 0xa2, 0x67, 0x97, 0x1b, 0x74, + 0x62, 0x21, 0x41, 0xd3, 0x0e, 0x27, 0x5a, 0x8a, 0x05, 0x54, 0xe0, 0x91, 0x30, 0x14, 0x3e, 0x6e, + 0x3a, 0x1e, 0x09, 0x43, 0x2c, 0xa0, 0x32, 0xa6, 0xf0, 0x50, 0x76, 0x4c, 0x61, 0x1e, 0x88, 0x51, + 0x18, 0x16, 0x08, 0xb6, 0x47, 0x0b, 0xc4, 0x28, 0x2d, 0x0e, 0x12, 0x1c, 0xfb, 0xe7, 0x8a, 0x30, + 0x5e, 0x0b, 0xdc, 0x44, 0x57, 0xf6, 0xa2, 0xa1, 0x2b, 0xbb, 0x98, 0xd2, 0x95, 0x4d, 0xeb, 0xb8, + 0x1f, 0x68, 0xc6, 0xbe, 0x56, 0x9a, 0xb1, 0x7f, 0x66, 0xb1, 0x59, 0xab, 0xac, 0xd7, 0xb9, 0xf5, + 0x11, 0x7a, 0x0e, 0xc6, 0xd8, 0x81, 0xc4, 0x9c, 0x2a, 0xa5, 0x02, 0x89, 0xe5, 0x50, 0x5a, 0x4f, + 0x8a, 0xb1, 0x8e, 0x83, 0x2e, 0x43, 0x29, 0x22, 0x4e, 0xd8, 0xd8, 0x51, 0x67, 0x9c, 0xd0, 0xf6, + 0xf0, 0x32, 0xac, 0xa0, 0xe8, 0x8d, 0x24, 0x06, 0x60, 0x31, 0xdf, 0x49, 0x4b, 0xef, 0x0f, 0xdf, + 0x22, 0xf9, 0x81, 0xff, 0xec, 0x3b, 0x80, 0xba, 0xf1, 0x07, 0x08, 0x7e, 0x35, 0x6f, 0x06, 0xbf, + 0x2a, 0x77, 0x05, 0xbe, 0xfa, 0x33, 0x0b, 0x26, 0x6b, 0x81, 0x4b, 0xb7, 0xee, 0xd7, 0xd3, 0x3e, + 0xd5, 0x03, 0xa0, 0x8e, 0xf4, 0x08, 0x80, 0xfa, 0x04, 0x0c, 0xd7, 0x02, 0xb7, 0x5a, 0xeb, 0xe5, + 0xdc, 0x6c, 0xff, 0x5d, 0x0b, 0x46, 0x6b, 0x81, 0x7b, 0x02, 0xc2, 0xf9, 0x4f, 0x9a, 0xc2, 0xf9, + 0x47, 0x72, 0xd6, 0x4d, 0x8e, 0x3c, 0xfe, 0x17, 0x8a, 0x30, 0x41, 0xfb, 0x19, 0x6c, 0xcb, 0xa9, + 0x34, 0x86, 0xcd, 0x1a, 0x60, 0xd8, 0x28, 0x2f, 0x1c, 0x34, 0x9b, 0xc1, 0xdd, 0xf4, 0xb4, 0xae, + 0xb2, 0x52, 0x2c, 0xa0, 0xe8, 0x19, 0x28, 0xb5, 0x43, 0xb2, 0xe7, 0x05, 0x82, 0xc9, 0xd4, 0x54, + 0x1d, 0x35, 0x51, 0x8e, 0x15, 0x06, 0x7d, 0x9c, 0x45, 0x9e, 0xdf, 0x20, 0x75, 0xd2, 0x08, 0x7c, + 0x97, 0xcb, 0xaf, 0x8b, 0x22, 0x6f, 0x80, 0x56, 0x8e, 0x0d, 0x2c, 0x74, 0x07, 0xca, 0xec, 0x3f, + 0x3b, 0x76, 0x8e, 0x9e, 0x4e, 0x52, 0xa4, 0x17, 0x13, 0x04, 0x70, 0x42, 0x0b, 0x3d, 0x0f, 0x10, + 0xcb, 0x10, 0xd9, 0x91, 0x08, 0x74, 0xa4, 0x18, 0x72, 0x15, 0x3c, 0x3b, 0xc2, 0x1a, 0x16, 0x7a, + 0x1a, 0xca, 0xb1, 0xe3, 0x35, 0x6f, 0x78, 0x3e, 0x89, 0x98, 0x5c, 0xba, 0x28, 0xb3, 0x7c, 0x89, + 0x42, 0x9c, 0xc0, 0x29, 0x43, 0xc4, 0xa2, 0x00, 0xf0, 0x64, 0xb4, 0x25, 0x86, 0xcd, 0x18, 0xa2, + 0x1b, 0xaa, 0x14, 0x6b, 0x18, 0xf6, 0x2b, 0x70, 0xa6, 0x16, 0xb8, 0xb5, 0x20, 0x8c, 0x57, 0x83, + 0xf0, 0xae, 0x13, 0xba, 0x72, 0xfe, 0xe6, 0x65, 0x72, 0x10, 0x7a, 0x40, 0x0d, 0xf3, 0xed, 0x6b, + 0xa4, 0xa8, 0x7a, 0x81, 0xb1, 0x44, 0x47, 0xf4, 0x11, 0x69, 0xb0, 0xcb, 0x59, 0xa5, 0x81, 0xb8, + 0xea, 0xc4, 0x04, 0xdd, 0x64, 0xb9, 0x6a, 0x93, 0x7b, 0x4a, 0x54, 0x7f, 0x4a, 0xcb, 0x55, 0x9b, + 0x00, 0x33, 0x2f, 0x36, 0xb3, 0xbe, 0xfd, 0x33, 0x43, 0xec, 0xc8, 0x4a, 0xa5, 0x12, 0x40, 0x9f, + 0x83, 0xc9, 0x88, 0xdc, 0xf0, 0xfc, 0xce, 0x3d, 0xf9, 0x52, 0xef, 0xe1, 0xe5, 0x53, 0x5f, 0xd1, + 0x31, 0xb9, 0xbc, 0xcf, 0x2c, 0xc3, 0x29, 0x6a, 0xa8, 0x05, 0x93, 0x77, 0x3d, 0xdf, 0x0d, 0xee, + 0x46, 0x92, 0x7e, 0x29, 0x5f, 0xec, 0x77, 0x87, 0x63, 0xa6, 0xfa, 0x68, 0x34, 0x77, 0xc7, 0x20, + 0x86, 0x53, 0xc4, 0xe9, 0xb2, 0x08, 0x3b, 0xfe, 0x62, 0x74, 0x2b, 0x22, 0xa1, 0xc8, 0x3a, 0xcc, + 0x96, 0x05, 0x96, 0x85, 0x38, 0x81, 0xd3, 0x65, 0xc1, 0xfe, 0x5c, 0x0d, 0x83, 0x0e, 0x0f, 0x2f, + 0x2f, 0x96, 0x05, 0x56, 0xa5, 0x58, 0xc3, 0xa0, 0xdb, 0x86, 0xfd, 0x5b, 0x0f, 0x7c, 0x1c, 0x04, + 0xb1, 0xdc, 0x68, 0x2c, 0xcf, 0xa5, 0x56, 0x8e, 0x0d, 0x2c, 0xb4, 0x0a, 0x28, 0xea, 0xb4, 0xdb, + 0x4d, 0x66, 0x3d, 0xe0, 0x34, 0x19, 0x29, 0xae, 0xb9, 0x2d, 0xf2, 0xe0, 0x99, 0xf5, 0x2e, 0x28, + 0xce, 0xa8, 0x41, 0x4f, 0xd0, 0x2d, 0xd1, 0xd5, 0x61, 0xd6, 0x55, 0xae, 0x22, 0xa8, 0xf3, 0x7e, + 0x4a, 0x18, 0x5a, 0x81, 0xd1, 0x68, 0x3f, 0x6a, 0xc4, 0x22, 0x0a, 0x58, 0x4e, 0xb6, 0x98, 0x3a, + 0x43, 0xd1, 0x92, 0x95, 0xf1, 0x2a, 0x58, 0xd6, 0xb5, 0xbf, 0x8d, 0x5d, 0xd0, 0x2c, 0x47, 0x6d, + 0xdc, 0x09, 0x09, 0x6a, 0xc1, 0x44, 0x9b, 0xad, 0x30, 0x11, 0x2f, 0x5d, 0x2c, 0x93, 0x17, 0x07, + 0x7c, 0x69, 0xdf, 0xa5, 0xe7, 0x9a, 0x92, 0x84, 0xb1, 0x27, 0x4c, 0x4d, 0x27, 0x87, 0x4d, 0xea, + 0xf6, 0x57, 0xce, 0xb2, 0x23, 0xbe, 0xce, 0x9f, 0xcf, 0xa3, 0xc2, 0xdc, 0x59, 0xbc, 0x15, 0xe6, + 0xf2, 0xe5, 0x38, 0xc9, 0x17, 0x09, 0x93, 0x69, 0x2c, 0xeb, 0xa2, 0xcf, 0xc2, 0x24, 0x65, 0xbd, + 0xb5, 0x7c, 0x11, 0xa7, 0xf3, 0xfd, 0xd2, 0x93, 0x34, 0x11, 0x5a, 0x2e, 0x05, 0xbd, 0x32, 0x4e, + 0x11, 0x43, 0x6f, 0x30, 0xc5, 0xbc, 0x99, 0x8a, 0xa2, 0x0f, 0x69, 0x5d, 0x07, 0x2f, 0xc9, 0x6a, + 0x44, 0xf2, 0xd2, 0x5c, 0xd8, 0x0f, 0x37, 0xcd, 0x05, 0xba, 0x01, 0x13, 0x22, 0x51, 0xab, 0x10, + 0x3f, 0x16, 0x0d, 0xf1, 0xd2, 0x04, 0xd6, 0x81, 0x87, 0xe9, 0x02, 0x6c, 0x56, 0x46, 0xdb, 0x70, + 0x5e, 0xcb, 0xb5, 0x72, 0x35, 0x74, 0x98, 0x8e, 0xd8, 0x63, 0x27, 0x91, 0x76, 0xf9, 0x3c, 0x7e, + 0xff, 0x60, 0xfe, 0xfc, 0x46, 0x2f, 0x44, 0xdc, 0x9b, 0x0e, 0xba, 0x09, 0x67, 0xb8, 0x57, 0x65, + 0x85, 0x38, 0x6e, 0xd3, 0xf3, 0xd5, 0xed, 0xc6, 0x77, 0xcb, 0xb9, 0xfb, 0x07, 0xf3, 0x67, 0x16, + 0xb3, 0x10, 0x70, 0x76, 0x3d, 0xf4, 0x49, 0x28, 0xbb, 0x7e, 0x24, 0xc6, 0x60, 0xc4, 0x48, 0x67, + 0x53, 0xae, 0xac, 0xd7, 0xd5, 0xf7, 0x27, 0x7f, 0x70, 0x52, 0x01, 0x6d, 0x73, 0x11, 0xa4, 0x7a, + 0xf1, 0x8f, 0x76, 0xc5, 0x83, 0x49, 0xcb, 0x8e, 0x0c, 0xbf, 0x2a, 0x2e, 0x7b, 0x57, 0xd6, 0xc6, + 0x86, 0xcb, 0x95, 0x41, 0x18, 0xbd, 0x0e, 0x88, 0xb2, 0xc4, 0x5e, 0x83, 0x2c, 0x36, 0x58, 0x30, + 0x7e, 0x26, 0xb1, 0x2d, 0x19, 0xde, 0x29, 0xa8, 0xde, 0x85, 0x81, 0x33, 0x6a, 0xa1, 0x6b, 0xf4, + 0x36, 0xd0, 0x4b, 0x85, 0xd5, 0xb4, 0x4a, 0x3e, 0x56, 0x21, 0xed, 0x90, 0x34, 0x9c, 0x98, 0xb8, + 0x26, 0x45, 0x9c, 0xaa, 0x87, 0x5c, 0x78, 0xcc, 0xe9, 0xc4, 0x01, 0x93, 0xee, 0x9a, 0xa8, 0x1b, + 0xc1, 0x2e, 0xf1, 0x99, 0x62, 0xa5, 0xb4, 0x74, 0xf1, 0xfe, 0xc1, 0xfc, 0x63, 0x8b, 0x3d, 0xf0, + 0x70, 0x4f, 0x2a, 0x94, 0xed, 0x51, 0xa9, 0x43, 0xc1, 0x0c, 0x73, 0x93, 0x91, 0x3e, 0xf4, 0x25, + 0x18, 0xdb, 0x09, 0xa2, 0x78, 0x9d, 0xc4, 0x77, 0x83, 0x70, 0x57, 0x04, 0x2b, 0x4c, 0x02, 0xdc, + 0x26, 0x20, 0xac, 0xe3, 0xd1, 0x77, 0x0d, 0x53, 0xfb, 0x57, 0x2b, 0x4c, 0xe3, 0x5a, 0x4a, 0xce, + 0x98, 0x6b, 0xbc, 0x18, 0x4b, 0xb8, 0x44, 0xad, 0xd6, 0x96, 0x99, 0xf6, 0x34, 0x85, 0x5a, 0xad, + 0x2d, 0x63, 0x09, 0xa7, 0xcb, 0x35, 0xda, 0x71, 0x42, 0x52, 0x0b, 0x83, 0x06, 0x89, 0xb4, 0xb0, + 0xca, 0x8f, 0xf2, 0x50, 0x8c, 0x74, 0xb9, 0xd6, 0xb3, 0x10, 0x70, 0x76, 0x3d, 0x44, 0xba, 0xf3, + 0x0c, 0x4d, 0xe6, 0x8b, 0xbd, 0xbb, 0x59, 0x81, 0x01, 0x53, 0x0d, 0xf9, 0x30, 0xad, 0x32, 0x1c, + 0xf1, 0xe0, 0x8b, 0xd1, 0xec, 0x14, 0x5b, 0xdb, 0x83, 0x47, 0x6e, 0x54, 0x8a, 0x84, 0x6a, 0x8a, + 0x12, 0xee, 0xa2, 0x6d, 0x44, 0x32, 0x9a, 0xee, 0x9b, 0x4b, 0xf6, 0x0a, 0x94, 0xa3, 0xce, 0xa6, + 0x1b, 0xb4, 0x1c, 0xcf, 0x67, 0xda, 0x53, 0x8d, 0xc1, 0xae, 0x4b, 0x00, 0x4e, 0x70, 0xd0, 0x2a, + 0x94, 0x1c, 0xa9, 0x25, 0x40, 0xf9, 0x01, 0x30, 0x94, 0x6e, 0x80, 0xfb, 0x84, 0x4b, 0xbd, 0x80, + 0xaa, 0x8b, 0x5e, 0x85, 0x09, 0xe1, 0x15, 0x28, 0x92, 0xeb, 0x9d, 0x32, 0x3d, 0x37, 0xea, 0x3a, + 0x10, 0x9b, 0xb8, 0xe8, 0x16, 0x8c, 0xc5, 0x41, 0x93, 0xb9, 0x1f, 0x50, 0x0e, 0xe9, 0x6c, 0x7e, + 0x10, 0xad, 0x0d, 0x85, 0xa6, 0x0b, 0xe8, 0x54, 0x55, 0xac, 0xd3, 0x41, 0x1b, 0x7c, 0xbd, 0xb3, + 0xf0, 0xc2, 0x24, 0x9a, 0x7d, 0x24, 0xff, 0x4e, 0x52, 0x51, 0x88, 0xcd, 0xed, 0x20, 0x6a, 0x62, + 0x9d, 0x0c, 0xba, 0x0a, 0x33, 0xed, 0xd0, 0x0b, 0xd8, 0x9a, 0x50, 0x0a, 0xa2, 0x59, 0x33, 0x29, + 0x4a, 0x2d, 0x8d, 0x80, 0xbb, 0xeb, 0x30, 0xa7, 0x4e, 0x51, 0x38, 0x7b, 0x8e, 0x27, 0xd3, 0xe5, + 0xef, 0x15, 0x5e, 0x86, 0x15, 0x14, 0xad, 0xb1, 0x93, 0x98, 0x3f, 0xb5, 0x67, 0xe7, 0xf2, 0x63, + 0x6e, 0xe8, 0x4f, 0x72, 0xce, 0xf7, 0xa9, 0xbf, 0x38, 0xa1, 0x80, 0x5c, 0x2d, 0x51, 0x1b, 0x65, + 0xb6, 0xa3, 0xd9, 0xc7, 0x7a, 0xd8, 0x5e, 0xa5, 0x38, 0xf3, 0x84, 0x21, 0x30, 0x8a, 0x23, 0x9c, + 0xa2, 0x89, 0xbe, 0x09, 0xa6, 0x45, 0x8c, 0xaf, 0x64, 0x98, 0xce, 0x27, 0x46, 0x9d, 0x38, 0x05, + 0xc3, 0x5d, 0xd8, 0x3c, 0xec, 0xba, 0xb3, 0xd9, 0x24, 0xe2, 0xe8, 0xbb, 0xe1, 0xf9, 0xbb, 0xd1, + 0xec, 0x05, 0x76, 0x3e, 0x88, 0xb0, 0xeb, 0x69, 0x28, 0xce, 0xa8, 0x81, 0x36, 0x60, 0xba, 0x1d, + 0x12, 0xd2, 0x62, 0x3c, 0xb2, 0xb8, 0xcf, 0xe6, 0xb9, 0x4f, 0x33, 0xed, 0x49, 0x2d, 0x05, 0x3b, + 0xcc, 0x28, 0xc3, 0x5d, 0x14, 0xd0, 0x5d, 0x28, 0x05, 0x7b, 0x24, 0xdc, 0x21, 0x8e, 0x3b, 0x7b, + 0xb1, 0x87, 0x91, 0xb1, 0xb8, 0xdc, 0x6e, 0x0a, 0xdc, 0x94, 0x52, 0x59, 0x16, 0xf7, 0x57, 0x2a, + 0xcb, 0xc6, 0xd0, 0x0f, 0x58, 0x70, 0x4e, 0xca, 0xa1, 0xeb, 0x6d, 0x3a, 0xea, 0xcb, 0x81, 0x1f, + 0xc5, 0x21, 0xf7, 0xc2, 0x7d, 0x3c, 0xdf, 0x31, 0x75, 0x23, 0xa7, 0x92, 0x92, 0xf6, 0x9d, 0xcb, + 0xc3, 0x88, 0x70, 0x7e, 0x8b, 0x73, 0xdf, 0x08, 0x33, 0x5d, 0x37, 0xf7, 0x51, 0x32, 0x41, 0xcc, + 0xed, 0xc2, 0x84, 0x31, 0x3a, 0x0f, 0x55, 0x9f, 0xf8, 0xaf, 0x47, 0xa1, 0xac, 0x74, 0x4d, 0xe8, + 0x8a, 0xa9, 0x42, 0x3c, 0x97, 0x56, 0x21, 0x96, 0xe8, 0x6b, 0x56, 0xd7, 0x1a, 0x6e, 0x64, 0xc4, + 0x3c, 0xca, 0xdb, 0x8b, 0x83, 0xfb, 0xb2, 0x6a, 0xa2, 0xc3, 0xe2, 0xc0, 0xba, 0xc8, 0xa1, 0x9e, + 0xd2, 0xc8, 0xab, 0x30, 0xe3, 0x07, 0x8c, 0x5d, 0x24, 0xae, 0xe4, 0x05, 0xd8, 0x95, 0x5f, 0xd6, + 0x83, 0x08, 0xa4, 0x10, 0x70, 0x77, 0x1d, 0xda, 0x20, 0xbf, 0xb3, 0xd3, 0xe2, 0x4f, 0x7e, 0xa5, + 0x63, 0x01, 0x45, 0x4f, 0xc0, 0x70, 0x3b, 0x70, 0xab, 0x35, 0xc1, 0x2a, 0x6a, 0x59, 0x41, 0xdd, + 0x6a, 0x0d, 0x73, 0x18, 0x5a, 0x84, 0x11, 0xf6, 0x23, 0x9a, 0x1d, 0xcf, 0xf7, 0x16, 0x67, 0x35, + 0xb4, 0x3c, 0x1b, 0xac, 0x02, 0x16, 0x15, 0x99, 0x18, 0x86, 0xf2, 0xd7, 0x4c, 0x0c, 0x33, 0xfa, + 0x80, 0x62, 0x18, 0x49, 0x00, 0x27, 0xb4, 0xd0, 0x3d, 0x38, 0x63, 0xbc, 0x69, 0xf8, 0x12, 0x21, + 0x91, 0x70, 0x58, 0x7d, 0xa2, 0xe7, 0x63, 0x46, 0xe8, 0x2e, 0xcf, 0x8b, 0x4e, 0x9f, 0xa9, 0x66, + 0x51, 0xc2, 0xd9, 0x0d, 0xa0, 0x26, 0xcc, 0x34, 0xba, 0x5a, 0x2d, 0x0d, 0xde, 0xaa, 0x9a, 0xd0, + 0xee, 0x16, 0xbb, 0x09, 0xa3, 0x57, 0xa1, 0xf4, 0x4e, 0x10, 0xb1, 0x63, 0x56, 0xb0, 0xb7, 0xd2, + 0xdb, 0xb1, 0xf4, 0xc6, 0xcd, 0x3a, 0x2b, 0x3f, 0x3c, 0x98, 0x1f, 0xab, 0x05, 0xae, 0xfc, 0x8b, + 0x55, 0x05, 0xf4, 0x3d, 0x16, 0xcc, 0x75, 0x3f, 0x9a, 0x54, 0xa7, 0x27, 0x06, 0xef, 0xb4, 0x2d, + 0x1a, 0x9d, 0x5b, 0xc9, 0x25, 0x87, 0x7b, 0x34, 0x65, 0xff, 0x32, 0xd7, 0x33, 0x0a, 0x6d, 0x04, + 0x89, 0x3a, 0xcd, 0x93, 0xc8, 0x4b, 0xb8, 0x62, 0x28, 0x4a, 0x1e, 0x58, 0x97, 0xfd, 0x6b, 0x16, + 0xd3, 0x65, 0x6f, 0x90, 0x56, 0xbb, 0xe9, 0xc4, 0x27, 0xe1, 0x2c, 0xf7, 0x06, 0x94, 0x62, 0xd1, + 0x5a, 0xaf, 0x54, 0x8a, 0x5a, 0xa7, 0x98, 0x3e, 0x5f, 0x31, 0x9b, 0xb2, 0x14, 0x2b, 0x32, 0xf6, + 0x3f, 0xe6, 0x33, 0x20, 0x21, 0x27, 0x20, 0x8f, 0xae, 0x98, 0xf2, 0xe8, 0xf9, 0x3e, 0x5f, 0x90, + 0x23, 0x97, 0xfe, 0x47, 0x66, 0xbf, 0x99, 0x90, 0xe5, 0xfd, 0x6e, 0x44, 0x61, 0xff, 0x90, 0x05, + 0xa7, 0xb3, 0xac, 0x0e, 0xe9, 0x03, 0x81, 0x8b, 0x78, 0x94, 0x51, 0x89, 0x1a, 0xc1, 0xdb, 0xa2, + 0x1c, 0x2b, 0x8c, 0x81, 0xb3, 0x14, 0x1d, 0x2d, 0x6a, 0xe7, 0x4d, 0x98, 0xa8, 0x85, 0x44, 0xbb, + 0xd0, 0x5e, 0xe3, 0xde, 0xaf, 0xbc, 0x3f, 0xcf, 0x1c, 0xd9, 0xf3, 0xd5, 0xfe, 0xe9, 0x02, 0x9c, + 0xe6, 0x5a, 0xe1, 0xc5, 0xbd, 0xc0, 0x73, 0x6b, 0x81, 0x2b, 0x32, 0x4c, 0xbd, 0x09, 0xe3, 0x6d, + 0x4d, 0x2e, 0xd7, 0x2b, 0x02, 0x9d, 0x2e, 0xbf, 0x4b, 0x24, 0x09, 0x7a, 0x29, 0x36, 0x68, 0x21, + 0x17, 0xc6, 0xc9, 0x9e, 0xd7, 0x50, 0xaa, 0xc5, 0xc2, 0x91, 0x2f, 0x17, 0xd5, 0xca, 0x8a, 0x46, + 0x07, 0x1b, 0x54, 0x1f, 0x42, 0xd2, 0x51, 0xfb, 0x87, 0x2d, 0x78, 0x24, 0x27, 0x5e, 0x1d, 0x6d, + 0xee, 0x2e, 0xd3, 0xbf, 0x8b, 0xfc, 0x85, 0xaa, 0x39, 0xae, 0x95, 0xc7, 0x02, 0x8a, 0x3e, 0x0d, + 0xc0, 0xb5, 0xea, 0xf4, 0x85, 0xda, 0x2f, 0xb0, 0x97, 0x11, 0x93, 0x48, 0x0b, 0x2f, 0x23, 0xeb, + 0x63, 0x8d, 0x96, 0xfd, 0x13, 0x45, 0x18, 0xe6, 0x99, 0x97, 0x57, 0x61, 0x74, 0x87, 0xc7, 0xdd, + 0x1f, 0x24, 0xc4, 0x7f, 0x22, 0x3b, 0xe0, 0x05, 0x58, 0x56, 0x46, 0x6b, 0x70, 0x8a, 0xe7, 0x2d, + 0x68, 0x56, 0x48, 0xd3, 0xd9, 0x97, 0x82, 0x2e, 0x9e, 0xf3, 0x4f, 0x09, 0xfc, 0xaa, 0xdd, 0x28, + 0x38, 0xab, 0x1e, 0x7a, 0x0d, 0x26, 0xe9, 0xc3, 0x23, 0xe8, 0xc4, 0x92, 0x12, 0xcf, 0x58, 0xa0, + 0x5e, 0x3a, 0x1b, 0x06, 0x14, 0xa7, 0xb0, 0xe9, 0xdb, 0xb7, 0xdd, 0x25, 0xd2, 0x1b, 0x4e, 0xde, + 0xbe, 0xa6, 0x18, 0xcf, 0xc4, 0x65, 0xe6, 0x86, 0x1d, 0x66, 0x5c, 0xb9, 0xb1, 0x13, 0x92, 0x68, + 0x27, 0x68, 0xba, 0x8c, 0xd1, 0x1a, 0xd6, 0xcc, 0x0d, 0x53, 0x70, 0xdc, 0x55, 0x83, 0x52, 0xd9, + 0x72, 0xbc, 0x66, 0x27, 0x24, 0x09, 0x95, 0x11, 0x93, 0xca, 0x6a, 0x0a, 0x8e, 0xbb, 0x6a, 0xd0, + 0x75, 0x74, 0xa6, 0x16, 0x06, 0xf4, 0xf0, 0x92, 0x31, 0x38, 0x94, 0x0d, 0xe9, 0xa8, 0x74, 0x17, + 0xec, 0x11, 0xae, 0x4a, 0x58, 0xd9, 0x71, 0x0a, 0x86, 0x02, 0xb9, 0x2e, 0x1c, 0x05, 0x25, 0x15, + 0xf4, 0x1c, 0x8c, 0x89, 0x68, 0xf4, 0xcc, 0xd4, 0x91, 0x4f, 0x1d, 0x53, 0x78, 0x57, 0x92, 0x62, + 0xac, 0xe3, 0xd8, 0xdf, 0x5b, 0x80, 0x53, 0x19, 0xb6, 0xea, 0xfc, 0xa8, 0xda, 0xf6, 0xa2, 0x58, + 0xe5, 0x35, 0xd3, 0x8e, 0x2a, 0x5e, 0x8e, 0x15, 0x06, 0xdd, 0x0f, 0xfc, 0x30, 0x4c, 0x1f, 0x80, + 0xc2, 0x16, 0x54, 0x40, 0x8f, 0x98, 0x21, 0xec, 0x22, 0x0c, 0x75, 0x22, 0x22, 0x03, 0xcd, 0xa9, + 0xf3, 0x9b, 0x69, 0x5c, 0x18, 0x84, 0xb2, 0xc7, 0xdb, 0x4a, 0x79, 0xa1, 0xb1, 0xc7, 0x5c, 0x7d, + 0xc1, 0x61, 0xb4, 0x73, 0x31, 0xf1, 0x1d, 0x3f, 0x16, 0x4c, 0x74, 0x12, 0x31, 0x89, 0x95, 0x62, + 0x01, 0xb5, 0xbf, 0x54, 0x84, 0x73, 0xb9, 0xde, 0x2b, 0xb4, 0xeb, 0xad, 0xc0, 0xf7, 0xe2, 0x40, + 0x59, 0x12, 0xf0, 0x28, 0x49, 0xa4, 0xbd, 0xb3, 0x26, 0xca, 0xb1, 0xc2, 0x40, 0x97, 0x60, 0x98, + 0x09, 0x9d, 0xba, 0x32, 0xbc, 0x2d, 0x55, 0x78, 0xd4, 0x0c, 0x0e, 0x1e, 0x38, 0x7b, 0xe6, 0x13, + 0x30, 0xd4, 0x0e, 0x82, 0x66, 0xfa, 0xd0, 0xa2, 0xdd, 0x0d, 0x82, 0x26, 0x66, 0x40, 0xf4, 0x11, + 0x31, 0x5e, 0x29, 0xd5, 0x39, 0x76, 0xdc, 0x20, 0xd2, 0x06, 0xed, 0x29, 0x18, 0xdd, 0x25, 0xfb, + 0xa1, 0xe7, 0x6f, 0xa7, 0x4d, 0x2a, 0xae, 0xf3, 0x62, 0x2c, 0xe1, 0x66, 0xb2, 0x9e, 0xd1, 0xe3, + 0x4e, 0x7b, 0x59, 0xea, 0x7b, 0x05, 0x7e, 0x5f, 0x11, 0xa6, 0xf0, 0x52, 0xe5, 0x83, 0x89, 0xb8, + 0xd5, 0x3d, 0x11, 0xc7, 0x9d, 0xf6, 0xb2, 0xff, 0x6c, 0xfc, 0x82, 0x05, 0x53, 0x2c, 0x26, 0xbe, + 0x08, 0xaf, 0xe3, 0x05, 0xfe, 0x09, 0xb0, 0x78, 0x4f, 0xc0, 0x70, 0x48, 0x1b, 0x4d, 0xa7, 0x76, + 0x63, 0x3d, 0xc1, 0x1c, 0x86, 0x1e, 0x83, 0x21, 0xd6, 0x05, 0x3a, 0x79, 0xe3, 0x3c, 0x2b, 0x4e, + 0xc5, 0x89, 0x1d, 0xcc, 0x4a, 0x59, 0xcc, 0x08, 0x4c, 0xda, 0x4d, 0x8f, 0x77, 0x3a, 0x51, 0x09, + 0xbe, 0x3f, 0x62, 0x46, 0x64, 0x76, 0xed, 0xbd, 0xc5, 0x8c, 0xc8, 0x26, 0xd9, 0xfb, 0xf9, 0xf4, + 0x87, 0x05, 0xb8, 0x90, 0x59, 0x6f, 0xe0, 0x98, 0x11, 0xbd, 0x6b, 0x3f, 0xcc, 0xd8, 0xe9, 0xc5, + 0x13, 0x34, 0x58, 0x1b, 0x1a, 0x94, 0xc3, 0x1c, 0x1e, 0x20, 0x94, 0x43, 0xe6, 0x90, 0xbd, 0x4f, + 0x42, 0x39, 0x64, 0xf6, 0x2d, 0xe7, 0xf9, 0xf7, 0xe7, 0x85, 0x9c, 0x6f, 0x61, 0x0f, 0xc1, 0xcb, + 0xf4, 0x9c, 0x61, 0xc0, 0x48, 0x70, 0xcc, 0xe3, 0xfc, 0x8c, 0xe1, 0x65, 0x58, 0x41, 0xd1, 0x22, + 0x4c, 0xb5, 0x3c, 0x9f, 0x1e, 0x3e, 0xfb, 0x26, 0xe3, 0xa7, 0x22, 0xed, 0xac, 0x99, 0x60, 0x9c, + 0xc6, 0x47, 0x9e, 0x16, 0xe6, 0xa1, 0x90, 0x9f, 0x2c, 0x39, 0xb7, 0xb7, 0x0b, 0xa6, 0xba, 0x54, + 0x8d, 0x62, 0x46, 0xc8, 0x87, 0x35, 0xed, 0xfd, 0x5f, 0x1c, 0xfc, 0xfd, 0x3f, 0x9e, 0xfd, 0xf6, + 0x9f, 0x7b, 0x15, 0x26, 0x1e, 0x58, 0xe0, 0x6b, 0x7f, 0xa5, 0x08, 0x8f, 0xf6, 0xd8, 0xf6, 0xfc, + 0xac, 0x37, 0xe6, 0x40, 0x3b, 0xeb, 0xbb, 0xe6, 0xa1, 0x06, 0xa7, 0xb7, 0x3a, 0xcd, 0xe6, 0x3e, + 0xb3, 0x09, 0x27, 0xae, 0xc4, 0x10, 0x3c, 0xe5, 0x63, 0x32, 0x0f, 0xd1, 0x6a, 0x06, 0x0e, 0xce, + 0xac, 0x49, 0x19, 0x7a, 0x7a, 0x93, 0xec, 0x2b, 0x52, 0x29, 0x86, 0x1e, 0xeb, 0x40, 0x6c, 0xe2, + 0xa2, 0xab, 0x30, 0xe3, 0xec, 0x39, 0x1e, 0x0f, 0x96, 0x29, 0x09, 0x70, 0x8e, 0x5e, 0xc9, 0xe9, + 0x16, 0xd3, 0x08, 0xb8, 0xbb, 0x0e, 0x7a, 0x1d, 0x50, 0x20, 0x92, 0xbd, 0x5f, 0x25, 0xbe, 0xd0, + 0x6a, 0xb1, 0xb9, 0x2b, 0x26, 0x47, 0xc2, 0xcd, 0x2e, 0x0c, 0x9c, 0x51, 0x2b, 0x15, 0x36, 0x61, + 0x24, 0x3f, 0x6c, 0x42, 0xef, 0x73, 0xb1, 0x6f, 0xd8, 0xfe, 0xff, 0x62, 0xd1, 0xeb, 0x8b, 0x33, + 0xf9, 0x66, 0xf4, 0xaf, 0x57, 0x99, 0x41, 0x17, 0x97, 0xe1, 0x69, 0x11, 0x0c, 0xce, 0x68, 0x06, + 0x5d, 0x09, 0x10, 0x9b, 0xb8, 0x7c, 0x41, 0x44, 0x89, 0xe3, 0x9c, 0xc1, 0xe2, 0x8b, 0x10, 0x25, + 0x0a, 0x03, 0x7d, 0x06, 0x46, 0x5d, 0x6f, 0xcf, 0x8b, 0x82, 0x50, 0xac, 0xf4, 0x23, 0xaa, 0x0b, + 0x92, 0x73, 0xb0, 0xc2, 0xc9, 0x60, 0x49, 0xcf, 0xfe, 0xbe, 0x02, 0x4c, 0xc8, 0x16, 0xdf, 0xe8, + 0x04, 0xb1, 0x73, 0x02, 0xd7, 0xf2, 0x55, 0xe3, 0x5a, 0xfe, 0x48, 0xaf, 0x38, 0x2d, 0xac, 0x4b, + 0xb9, 0xd7, 0xf1, 0xcd, 0xd4, 0x75, 0xfc, 0x64, 0x7f, 0x52, 0xbd, 0xaf, 0xe1, 0x7f, 0x62, 0xc1, + 0x8c, 0x81, 0x7f, 0x02, 0xb7, 0xc1, 0xaa, 0x79, 0x1b, 0x3c, 0xde, 0xf7, 0x1b, 0x72, 0x6e, 0x81, + 0xef, 0x2a, 0xa6, 0xfa, 0xce, 0x4e, 0xff, 0x77, 0x60, 0x68, 0xc7, 0x09, 0xdd, 0x5e, 0x81, 0xa9, + 0xbb, 0x2a, 0x2d, 0x5c, 0x73, 0x42, 0xa1, 0xd6, 0x7b, 0x46, 0xe5, 0x2a, 0x76, 0xc2, 0xfe, 0x2a, + 0x3d, 0xd6, 0x14, 0x7a, 0x05, 0x46, 0xa2, 0x46, 0xd0, 0x56, 0x56, 0xdc, 0x17, 0x79, 0x1e, 0x63, + 0x5a, 0x72, 0x78, 0x30, 0x8f, 0xcc, 0xe6, 0x68, 0x31, 0x16, 0xf8, 0xe8, 0x4d, 0x98, 0x60, 0xbf, + 0x94, 0x8d, 0x4d, 0x31, 0x3f, 0x89, 0x4d, 0x5d, 0x47, 0xe4, 0x06, 0x68, 0x46, 0x11, 0x36, 0x49, + 0xcd, 0x6d, 0x43, 0x59, 0x7d, 0xd6, 0x43, 0xd5, 0xc7, 0xfd, 0xfb, 0x22, 0x9c, 0xca, 0x58, 0x73, + 0x28, 0x32, 0x66, 0xe2, 0xb9, 0x01, 0x97, 0xea, 0x7b, 0x9c, 0x8b, 0x88, 0xbd, 0x86, 0x5c, 0xb1, + 0xb6, 0x06, 0x6e, 0xf4, 0x56, 0x44, 0xd2, 0x8d, 0xd2, 0xa2, 0xfe, 0x8d, 0xd2, 0xc6, 0x4e, 0x6c, + 0xa8, 0x69, 0x43, 0xaa, 0xa7, 0x0f, 0x75, 0x4e, 0xff, 0xa4, 0x08, 0xa7, 0xb3, 0x42, 0x47, 0xa1, + 0x6f, 0x4d, 0x25, 0x34, 0x7b, 0x71, 0xd0, 0xa0, 0x53, 0x3c, 0xcb, 0x19, 0x97, 0x01, 0x2f, 0x2d, + 0x98, 0x29, 0xce, 0xfa, 0x0e, 0xb3, 0x68, 0x93, 0x39, 0x85, 0x87, 0x3c, 0x11, 0x9d, 0x3c, 0x3e, + 0x3e, 0x3e, 0x70, 0x07, 0x44, 0x06, 0xbb, 0x28, 0xa5, 0xbf, 0x97, 0xc5, 0xfd, 0xf5, 0xf7, 0xb2, + 0xe5, 0x39, 0x0f, 0xc6, 0xb4, 0xaf, 0x79, 0xa8, 0x33, 0xbe, 0x4b, 0x6f, 0x2b, 0xad, 0xdf, 0x0f, + 0x75, 0xd6, 0x7f, 0xd8, 0x82, 0x94, 0x35, 0xb4, 0x12, 0x8b, 0x59, 0xb9, 0x62, 0xb1, 0x8b, 0x30, + 0x14, 0x06, 0x4d, 0x92, 0xce, 0x1f, 0x86, 0x83, 0x26, 0xc1, 0x0c, 0x42, 0x31, 0xe2, 0x44, 0xd8, + 0x31, 0xae, 0x3f, 0xe4, 0xc4, 0x13, 0xed, 0x09, 0x18, 0x6e, 0x92, 0x3d, 0xd2, 0x4c, 0xa7, 0x79, + 0xb8, 0x41, 0x0b, 0x31, 0x87, 0xd9, 0xbf, 0x30, 0x04, 0xe7, 0x7b, 0x86, 0x55, 0xa0, 0xcf, 0xa1, + 0x6d, 0x27, 0x26, 0x77, 0x9d, 0xfd, 0x74, 0x3c, 0xf6, 0xab, 0xbc, 0x18, 0x4b, 0x38, 0xf3, 0x22, + 0xe1, 0x51, 0x55, 0x53, 0x42, 0x44, 0x11, 0x4c, 0x55, 0x40, 0x4d, 0xa1, 0x54, 0xf1, 0x38, 0x84, + 0x52, 0xcf, 0x03, 0x44, 0x51, 0x93, 0x1b, 0xbe, 0xb8, 0xc2, 0x3d, 0x25, 0x89, 0xbe, 0x5b, 0xbf, + 0x21, 0x20, 0x58, 0xc3, 0x42, 0x15, 0x98, 0x6e, 0x87, 0x41, 0xcc, 0x65, 0xb2, 0x15, 0x6e, 0x1b, + 0x36, 0x6c, 0x7a, 0xb4, 0xd7, 0x52, 0x70, 0xdc, 0x55, 0x03, 0xbd, 0x04, 0x63, 0xc2, 0xcb, 0xbd, + 0x16, 0x04, 0x4d, 0x21, 0x06, 0x52, 0xe6, 0x52, 0xf5, 0x04, 0x84, 0x75, 0x3c, 0xad, 0x1a, 0x13, + 0xf4, 0x8e, 0x66, 0x56, 0xe3, 0xc2, 0x5e, 0x0d, 0x2f, 0x15, 0x46, 0xae, 0x34, 0x50, 0x18, 0xb9, + 0x44, 0x30, 0x56, 0x1e, 0x58, 0xb7, 0x05, 0x7d, 0x45, 0x49, 0x3f, 0x3b, 0x04, 0xa7, 0xc4, 0xc2, + 0x79, 0xd8, 0xcb, 0xe5, 0x56, 0xf7, 0x72, 0x39, 0x0e, 0xd1, 0xd9, 0x07, 0x6b, 0xe6, 0xa4, 0xd7, + 0xcc, 0xf7, 0x5b, 0x60, 0xb2, 0x57, 0xe8, 0xff, 0xcb, 0x4d, 0x68, 0xf1, 0x52, 0x2e, 0xbb, 0xe6, + 0xca, 0x0b, 0xe4, 0x3d, 0xa6, 0xb6, 0xb0, 0xff, 0x93, 0x05, 0x8f, 0xf7, 0xa5, 0x88, 0x56, 0xa0, + 0xcc, 0x78, 0x40, 0xed, 0x75, 0xf6, 0xa4, 0xb2, 0x1d, 0x95, 0x80, 0x1c, 0x96, 0x34, 0xa9, 0x89, + 0x56, 0xba, 0x32, 0x87, 0x3c, 0x95, 0x91, 0x39, 0xe4, 0x8c, 0x31, 0x3c, 0x0f, 0x98, 0x3a, 0xe4, + 0x97, 0x8b, 0x30, 0xc2, 0x57, 0xfc, 0x09, 0x3c, 0xc3, 0x56, 0x85, 0xdc, 0xb6, 0x47, 0x9c, 0x3a, + 0xde, 0x97, 0x85, 0x8a, 0x13, 0x3b, 0x9c, 0x4d, 0x50, 0xb7, 0x55, 0x22, 0xe1, 0x45, 0x9f, 0x03, + 0x88, 0xe2, 0xd0, 0xf3, 0xb7, 0x69, 0x99, 0x88, 0x60, 0xf8, 0xd1, 0x1e, 0xd4, 0xea, 0x0a, 0x99, + 0xd3, 0x4c, 0x76, 0xae, 0x02, 0x60, 0x8d, 0x22, 0x5a, 0x30, 0xee, 0xcb, 0xb9, 0x94, 0xe0, 0x13, + 0x38, 0xd5, 0xe4, 0xf6, 0x9c, 0x7b, 0x19, 0xca, 0x8a, 0x78, 0x3f, 0x29, 0xce, 0xb8, 0xce, 0x5c, + 0x7c, 0x0a, 0xa6, 0x52, 0x7d, 0x3b, 0x92, 0x10, 0xe8, 0x17, 0x2d, 0x98, 0xe2, 0x9d, 0x59, 0xf1, + 0xf7, 0xc4, 0x99, 0xfa, 0x2e, 0x9c, 0x6e, 0x66, 0x9c, 0x6d, 0x62, 0x46, 0x07, 0x3f, 0x0b, 0x95, + 0xd0, 0x27, 0x0b, 0x8a, 0x33, 0xdb, 0x40, 0x97, 0xe9, 0xba, 0xa5, 0x67, 0x97, 0xd3, 0x14, 0xce, + 0x86, 0xe3, 0x7c, 0xcd, 0xf2, 0x32, 0xac, 0xa0, 0xf6, 0x6f, 0x5b, 0x30, 0xc3, 0x7b, 0x7e, 0x9d, + 0xec, 0xab, 0x1d, 0xfe, 0xb5, 0xec, 0xbb, 0x48, 0xe6, 0x53, 0xc8, 0x49, 0xe6, 0xa3, 0x7f, 0x5a, + 0xb1, 0xe7, 0xa7, 0xfd, 0xb4, 0x05, 0x62, 0x85, 0x9c, 0xc0, 0x53, 0xfe, 0x1b, 0xcd, 0xa7, 0xfc, + 0x5c, 0xfe, 0x26, 0xc8, 0x79, 0xc3, 0xff, 0x99, 0x05, 0xd3, 0x1c, 0x21, 0xd1, 0x39, 0x7f, 0x4d, + 0xe7, 0x61, 0x90, 0x94, 0x9f, 0xd7, 0xc9, 0xfe, 0x46, 0x50, 0x73, 0xe2, 0x9d, 0xec, 0x8f, 0x32, + 0x26, 0x6b, 0xa8, 0xe7, 0x64, 0xb9, 0x72, 0x03, 0x1d, 0x21, 0x8f, 0xf0, 0x91, 0x43, 0xdd, 0xdb, + 0x5f, 0xb5, 0x00, 0xf1, 0x66, 0x0c, 0xf6, 0x87, 0x32, 0x15, 0xac, 0x54, 0xbb, 0x2e, 0x92, 0xa3, + 0x49, 0x41, 0xb0, 0x86, 0x75, 0x2c, 0xc3, 0x93, 0x32, 0x1c, 0x28, 0xf6, 0x37, 0x1c, 0x38, 0xc2, + 0x88, 0xfe, 0xc1, 0x30, 0xa4, 0x3d, 0x40, 0xd0, 0x6d, 0x18, 0x6f, 0x38, 0x6d, 0x67, 0xd3, 0x6b, + 0x7a, 0xb1, 0x47, 0xa2, 0x5e, 0x16, 0x47, 0xcb, 0x1a, 0x9e, 0x50, 0xf5, 0x6a, 0x25, 0xd8, 0xa0, + 0x83, 0x16, 0x00, 0xda, 0xa1, 0xb7, 0xe7, 0x35, 0xc9, 0x36, 0x93, 0x38, 0x30, 0xf7, 0x66, 0x6e, + 0x46, 0x23, 0x4b, 0xb1, 0x86, 0x91, 0xe1, 0xa9, 0x5a, 0x7c, 0xc8, 0x9e, 0xaa, 0x70, 0x62, 0x9e, + 0xaa, 0x43, 0x47, 0xf2, 0x54, 0x2d, 0x1d, 0xd9, 0x53, 0x75, 0x78, 0x20, 0x4f, 0x55, 0x0c, 0x67, + 0x25, 0x07, 0x47, 0xff, 0xaf, 0x7a, 0x4d, 0x22, 0xd8, 0x76, 0xee, 0x93, 0x3d, 0x77, 0xff, 0x60, + 0xfe, 0x2c, 0xce, 0xc4, 0xc0, 0x39, 0x35, 0xd1, 0xa7, 0x61, 0xd6, 0x69, 0x36, 0x83, 0xbb, 0x6a, + 0x52, 0x57, 0xa2, 0x86, 0xd3, 0xe4, 0xa2, 0xfc, 0x51, 0x46, 0xf5, 0xb1, 0xfb, 0x07, 0xf3, 0xb3, + 0x8b, 0x39, 0x38, 0x38, 0xb7, 0x36, 0xfa, 0x24, 0x94, 0xdb, 0x61, 0xd0, 0x58, 0xd3, 0xdc, 0xd4, + 0x2e, 0xd0, 0x01, 0xac, 0xc9, 0xc2, 0xc3, 0x83, 0xf9, 0x09, 0xf5, 0x87, 0x5d, 0xf8, 0x49, 0x05, + 0x7b, 0x17, 0x4e, 0xd5, 0x49, 0xe8, 0xb1, 0xac, 0xc0, 0x6e, 0x72, 0x7e, 0x6c, 0x40, 0x39, 0x4c, + 0x9d, 0x98, 0x03, 0xc5, 0x76, 0xd3, 0x62, 0x82, 0xcb, 0x13, 0x32, 0x21, 0x64, 0xff, 0x6f, 0x0b, + 0x46, 0x85, 0x47, 0xc6, 0x09, 0x30, 0x6a, 0x8b, 0x86, 0xbc, 0x7c, 0x3e, 0xfb, 0x56, 0x61, 0x9d, + 0xc9, 0x95, 0x94, 0x57, 0x53, 0x92, 0xf2, 0xc7, 0x7b, 0x11, 0xe9, 0x2d, 0x23, 0xff, 0x9b, 0x45, + 0x98, 0x34, 0x5d, 0xf7, 0x4e, 0x60, 0x08, 0xd6, 0x61, 0x34, 0x12, 0xbe, 0x69, 0x85, 0x7c, 0x8b, + 0xec, 0xf4, 0x24, 0x26, 0xd6, 0x5a, 0xc2, 0x1b, 0x4d, 0x12, 0xc9, 0x74, 0x7a, 0x2b, 0x3e, 0x44, + 0xa7, 0xb7, 0x7e, 0xde, 0x93, 0x43, 0xc7, 0xe1, 0x3d, 0x69, 0x7f, 0x99, 0xdd, 0x6c, 0x7a, 0xf9, + 0x09, 0x30, 0x3d, 0x57, 0xcd, 0x3b, 0xd0, 0xee, 0xb1, 0xb2, 0x44, 0xa7, 0x72, 0x98, 0x9f, 0x9f, + 0xb7, 0xe0, 0x7c, 0xc6, 0x57, 0x69, 0x9c, 0xd0, 0x33, 0x50, 0x72, 0x3a, 0xae, 0xa7, 0xf6, 0xb2, + 0xa6, 0x35, 0x5b, 0x14, 0xe5, 0x58, 0x61, 0xa0, 0x65, 0x98, 0x21, 0xf7, 0xda, 0x1e, 0x57, 0x18, + 0xea, 0x26, 0x95, 0x45, 0x1e, 0xef, 0x7a, 0x25, 0x0d, 0xc4, 0xdd, 0xf8, 0x2a, 0xd8, 0x43, 0x31, + 0x37, 0xd8, 0xc3, 0xdf, 0xb7, 0x60, 0x4c, 0x79, 0x67, 0x3d, 0xf4, 0xd1, 0xfe, 0x26, 0x73, 0xb4, + 0x1f, 0xed, 0x31, 0xda, 0x39, 0xc3, 0xfc, 0xb7, 0x0b, 0xaa, 0xbf, 0xb5, 0x20, 0x8c, 0x07, 0xe0, + 0xb0, 0x5e, 0x81, 0x52, 0x3b, 0x0c, 0xe2, 0xa0, 0x11, 0x34, 0x05, 0x83, 0xf5, 0x58, 0x12, 0x8b, + 0x84, 0x97, 0x1f, 0x6a, 0xbf, 0xb1, 0xc2, 0x66, 0xa3, 0x17, 0x84, 0xb1, 0x60, 0x6a, 0x92, 0xd1, + 0x0b, 0xc2, 0x18, 0x33, 0x08, 0x72, 0x01, 0x62, 0x27, 0xdc, 0x26, 0x31, 0x2d, 0x13, 0xb1, 0x8f, + 0xf2, 0x0f, 0x8f, 0x4e, 0xec, 0x35, 0x17, 0x3c, 0x3f, 0x8e, 0xe2, 0x70, 0xa1, 0xea, 0xc7, 0x37, + 0x43, 0xfe, 0x5e, 0xd3, 0x82, 0x8b, 0x28, 0x5a, 0x58, 0xa3, 0x2b, 0xdd, 0x8a, 0x59, 0x1b, 0xc3, + 0xa6, 0xfe, 0x7d, 0x5d, 0x94, 0x63, 0x85, 0x61, 0xbf, 0xcc, 0xae, 0x12, 0x36, 0x40, 0x47, 0x8b, + 0xfb, 0xf1, 0x9d, 0x65, 0x35, 0xb4, 0x4c, 0xf9, 0x56, 0xd1, 0xa3, 0x8b, 0xf4, 0x3e, 0xb9, 0x69, + 0xc3, 0xba, 0x8b, 0x51, 0x12, 0x82, 0x04, 0x7d, 0x73, 0x97, 0x4d, 0xc5, 0xb3, 0x7d, 0xae, 0x80, + 0x23, 0x58, 0x51, 0xb0, 0x18, 0xfc, 0x2c, 0x42, 0x79, 0xb5, 0x26, 0x16, 0xb9, 0x16, 0x83, 0x5f, + 0x00, 0x70, 0x82, 0x83, 0xae, 0x88, 0xd7, 0xf8, 0x90, 0x91, 0x79, 0x52, 0xbe, 0xc6, 0xe5, 0xe7, + 0x6b, 0xc2, 0xec, 0xe7, 0x60, 0x4c, 0x65, 0xa0, 0xac, 0xf1, 0xc4, 0x86, 0x22, 0x12, 0xd4, 0x4a, + 0x52, 0x8c, 0x75, 0x1c, 0xb4, 0x01, 0x53, 0x11, 0x17, 0xf5, 0xa8, 0x80, 0x9f, 0x5c, 0x64, 0xf6, + 0x51, 0x69, 0x88, 0x52, 0x37, 0xc1, 0x87, 0xac, 0x88, 0x1f, 0x1d, 0xd2, 0x95, 0x37, 0x4d, 0x02, + 0xbd, 0x06, 0x93, 0xcd, 0xc0, 0x71, 0x97, 0x9c, 0xa6, 0xe3, 0x37, 0xd8, 0xf7, 0x96, 0xcc, 0x44, + 0x66, 0x37, 0x0c, 0x28, 0x4e, 0x61, 0x53, 0xce, 0x47, 0x2f, 0x11, 0x41, 0x6a, 0x1d, 0x7f, 0x9b, + 0x44, 0x22, 0x9f, 0x20, 0xe3, 0x7c, 0x6e, 0xe4, 0xe0, 0xe0, 0xdc, 0xda, 0xe8, 0x15, 0x18, 0x97, + 0x9f, 0xaf, 0x79, 0xbe, 0x27, 0xb6, 0xf7, 0x1a, 0x0c, 0x1b, 0x98, 0xe8, 0x2e, 0x9c, 0x91, 0xff, + 0x37, 0x42, 0x67, 0x6b, 0xcb, 0x6b, 0x08, 0x77, 0x50, 0xee, 0x18, 0xb7, 0x28, 0xbd, 0xb7, 0x56, + 0xb2, 0x90, 0x0e, 0x0f, 0xe6, 0x2f, 0x8a, 0x51, 0xcb, 0x84, 0xb3, 0x49, 0xcc, 0xa6, 0x8f, 0xd6, + 0xe0, 0xd4, 0x0e, 0x71, 0x9a, 0xf1, 0xce, 0xf2, 0x0e, 0x69, 0xec, 0xca, 0x4d, 0xc4, 0xfc, 0xe9, + 0x35, 0x8b, 0xf5, 0x6b, 0xdd, 0x28, 0x38, 0xab, 0x1e, 0x7a, 0x0b, 0x66, 0xdb, 0x9d, 0xcd, 0xa6, + 0x17, 0xed, 0xac, 0x07, 0x31, 0xb3, 0x46, 0x51, 0x09, 0x2d, 0x85, 0xe3, 0xbd, 0x8a, 0x58, 0x50, + 0xcb, 0xc1, 0xc3, 0xb9, 0x14, 0xd0, 0xbb, 0x70, 0x26, 0xb5, 0x18, 0x84, 0xeb, 0xf1, 0x64, 0x7e, + 0xc8, 0xef, 0x7a, 0x56, 0x05, 0xe1, 0xc5, 0x9f, 0x05, 0xc2, 0xd9, 0x4d, 0xa0, 0x17, 0xa1, 0xe4, + 0xb5, 0x57, 0x9d, 0x96, 0xd7, 0xdc, 0x67, 0x31, 0xcb, 0xcb, 0x2c, 0x8e, 0x77, 0xa9, 0x5a, 0xe3, + 0x65, 0x87, 0xda, 0x6f, 0xac, 0x30, 0xdf, 0x9b, 0x35, 0xd2, 0x3b, 0xb4, 0xb2, 0xc6, 0xca, 0xa1, + 0xcf, 0xc3, 0xb8, 0xbe, 0xf6, 0xc4, 0xb5, 0x74, 0x29, 0x9b, 0xd3, 0xd1, 0xd6, 0x28, 0x67, 0x04, + 0xd5, 0x3a, 0xd4, 0x61, 0xd8, 0xa0, 0x68, 0x13, 0xc8, 0x1e, 0x15, 0x74, 0x03, 0x4a, 0x8d, 0xa6, + 0x47, 0xfc, 0xb8, 0x5a, 0xeb, 0x15, 0x88, 0x68, 0x59, 0xe0, 0x88, 0x61, 0x16, 0x91, 0x95, 0x79, + 0x19, 0x56, 0x14, 0xec, 0x5f, 0x2d, 0xc0, 0x7c, 0x9f, 0x30, 0xdd, 0x29, 0xa1, 0xb9, 0x35, 0x90, + 0xd0, 0x7c, 0x51, 0x26, 0xf5, 0x5c, 0x4f, 0x49, 0x12, 0x52, 0x09, 0x3b, 0x13, 0x79, 0x42, 0x1a, + 0x7f, 0x60, 0x23, 0x66, 0x5d, 0xee, 0x3e, 0xd4, 0xd7, 0x0c, 0xdf, 0xd0, 0xb7, 0x0d, 0x0f, 0xfe, + 0x7c, 0xc9, 0xd5, 0x9d, 0xd8, 0x5f, 0x2e, 0xc0, 0x19, 0x35, 0x84, 0x5f, 0xbf, 0x03, 0x77, 0xab, + 0x7b, 0xe0, 0x8e, 0x41, 0xf3, 0x64, 0xdf, 0x84, 0x11, 0x1e, 0x59, 0x69, 0x00, 0xb6, 0xe9, 0x09, + 0x33, 0x34, 0xa0, 0xba, 0xdc, 0x8d, 0xf0, 0x80, 0xdf, 0x63, 0xc1, 0xd4, 0xc6, 0x72, 0xad, 0x1e, + 0x34, 0x76, 0x49, 0xbc, 0xc8, 0xd9, 0x5c, 0x2c, 0xb8, 0x26, 0xeb, 0x01, 0xb9, 0xa1, 0x2c, 0x3e, + 0xeb, 0x22, 0x0c, 0xed, 0x04, 0x51, 0x9c, 0x56, 0x4b, 0x5f, 0x0b, 0xa2, 0x18, 0x33, 0x88, 0xfd, + 0x3b, 0x16, 0x0c, 0xb3, 0x3c, 0xd6, 0xfd, 0x32, 0xa9, 0x0f, 0xf2, 0x5d, 0xe8, 0x25, 0x18, 0x21, + 0x5b, 0x5b, 0xa4, 0x11, 0x8b, 0x59, 0x95, 0x7e, 0xc4, 0x23, 0x2b, 0xac, 0x94, 0xb2, 0x0a, 0xac, + 0x31, 0xfe, 0x17, 0x0b, 0x64, 0x74, 0x07, 0xca, 0xb1, 0xd7, 0x22, 0x8b, 0xae, 0x2b, 0x14, 0x7b, + 0x0f, 0xe0, 0x0b, 0xbd, 0x21, 0x09, 0xe0, 0x84, 0x96, 0xfd, 0xa5, 0x02, 0x40, 0x12, 0x57, 0xa3, + 0xdf, 0x27, 0x2e, 0x75, 0xa9, 0x7c, 0x2e, 0x65, 0xa8, 0x7c, 0x50, 0x42, 0x30, 0x43, 0xdf, 0xa3, + 0x86, 0xa9, 0x38, 0xd0, 0x30, 0x0d, 0x1d, 0x65, 0x98, 0x96, 0x61, 0x26, 0x89, 0x0b, 0x62, 0x86, + 0x45, 0x62, 0x4f, 0x9b, 0x8d, 0x34, 0x10, 0x77, 0xe3, 0xdb, 0x04, 0x2e, 0xaa, 0xf0, 0x08, 0xe2, + 0xae, 0x61, 0x76, 0xa3, 0x47, 0x48, 0xaa, 0x9f, 0xe8, 0xb4, 0x0a, 0xb9, 0x3a, 0xad, 0x1f, 0xb3, + 0xe0, 0x74, 0xba, 0x1d, 0xe6, 0xc8, 0xf7, 0x45, 0x0b, 0xce, 0x30, 0xcd, 0x1e, 0x6b, 0xb5, 0x5b, + 0x8f, 0xf8, 0x62, 0xcf, 0x90, 0x0f, 0x39, 0x3d, 0x4e, 0x1c, 0xd6, 0xd7, 0xb2, 0x48, 0xe3, 0xec, + 0x16, 0xed, 0xff, 0x58, 0x80, 0xd9, 0xbc, 0x58, 0x11, 0xcc, 0xac, 0xdc, 0xb9, 0x57, 0xdf, 0x25, + 0x77, 0x85, 0xf1, 0x6e, 0x62, 0x56, 0xce, 0x8b, 0xb1, 0x84, 0xa7, 0x23, 0x2f, 0x17, 0x06, 0x8b, + 0xbc, 0x8c, 0x76, 0x60, 0xe6, 0xee, 0x0e, 0xf1, 0x6f, 0xf9, 0x91, 0x13, 0x7b, 0xd1, 0x96, 0xc7, + 0x32, 0xa2, 0xf3, 0x75, 0xf3, 0x09, 0x69, 0x62, 0x7b, 0x27, 0x8d, 0x70, 0x78, 0x30, 0x7f, 0xde, + 0x28, 0x48, 0xba, 0xcc, 0x0f, 0x12, 0xdc, 0x4d, 0xb4, 0x3b, 0x70, 0xf5, 0xd0, 0x43, 0x0c, 0x5c, + 0x6d, 0x7f, 0xd1, 0x82, 0x73, 0xb9, 0x89, 0xe5, 0xd0, 0x65, 0x28, 0x39, 0x6d, 0x8f, 0x8b, 0x40, + 0xc5, 0x31, 0xca, 0x9e, 0xf2, 0xb5, 0x2a, 0x17, 0x80, 0x2a, 0xa8, 0x4a, 0x78, 0x5b, 0xc8, 0x4d, + 0x78, 0xdb, 0x37, 0x7f, 0xad, 0xfd, 0xdd, 0x16, 0x08, 0x97, 0xb8, 0x01, 0xce, 0xee, 0x37, 0x65, + 0xbe, 0x70, 0x23, 0xb9, 0xc5, 0xc5, 0x7c, 0x1f, 0x41, 0x91, 0xd2, 0x42, 0xf1, 0x4a, 0x46, 0x22, + 0x0b, 0x83, 0x96, 0xed, 0x82, 0x80, 0x56, 0x08, 0x13, 0x20, 0xf6, 0xef, 0xcd, 0xf3, 0x00, 0x2e, + 0xc3, 0xd5, 0xb2, 0x06, 0xab, 0x9b, 0xb9, 0xa2, 0x20, 0x58, 0xc3, 0xb2, 0xff, 0x6d, 0x01, 0xc6, + 0x64, 0x32, 0x85, 0x8e, 0x3f, 0xc8, 0x33, 0xff, 0x48, 0xd9, 0xd5, 0x58, 0x9a, 0x6d, 0x4a, 0xb8, + 0x96, 0x48, 0x47, 0x92, 0x34, 0xdb, 0x12, 0x80, 0x13, 0x1c, 0xba, 0x8b, 0xa2, 0xce, 0x26, 0x43, + 0x4f, 0x39, 0x70, 0xd5, 0x79, 0x31, 0x96, 0x70, 0xf4, 0x69, 0x98, 0xe6, 0xf5, 0xc2, 0xa0, 0xed, + 0x6c, 0x73, 0xd9, 0xf2, 0xb0, 0xf2, 0xbc, 0x9e, 0x5e, 0x4b, 0xc1, 0x0e, 0x0f, 0xe6, 0x4f, 0xa7, + 0xcb, 0x98, 0xd2, 0xa4, 0x8b, 0x0a, 0x33, 0xc4, 0xe0, 0x8d, 0xd0, 0xdd, 0xdf, 0x65, 0xbf, 0x91, + 0x80, 0xb0, 0x8e, 0x67, 0x7f, 0x1e, 0x50, 0x77, 0x5a, 0x09, 0xf4, 0x3a, 0xb7, 0xbe, 0xf3, 0x42, + 0xe2, 0xf6, 0x52, 0xa2, 0xe8, 0xfe, 0xc5, 0xd2, 0xf7, 0x82, 0xd7, 0xc2, 0xaa, 0xbe, 0xfd, 0x57, + 0x8a, 0x30, 0x9d, 0xf6, 0x36, 0x45, 0xd7, 0x60, 0x84, 0xb3, 0x1e, 0x82, 0x7c, 0x0f, 0x1d, 0xbd, + 0xe6, 0xa3, 0xca, 0x0e, 0x61, 0xc1, 0xbd, 0x88, 0xfa, 0xe8, 0x2d, 0x18, 0x73, 0x83, 0xbb, 0xfe, + 0x5d, 0x27, 0x74, 0x17, 0x6b, 0x55, 0xb1, 0x9c, 0x33, 0xdf, 0x3d, 0x95, 0x04, 0x4d, 0xf7, 0x7b, + 0x65, 0xfa, 0xa8, 0x04, 0x84, 0x75, 0x72, 0x68, 0x83, 0x45, 0xc1, 0xdd, 0xf2, 0xb6, 0xd7, 0x9c, + 0x76, 0x2f, 0x53, 0xec, 0x65, 0x89, 0xa4, 0x51, 0x9e, 0x10, 0xa1, 0x72, 0x39, 0x00, 0x27, 0x84, + 0xd0, 0xb7, 0xc2, 0xa9, 0x28, 0x47, 0x54, 0x9a, 0x97, 0x65, 0xa8, 0x97, 0xf4, 0x70, 0xe9, 0x11, + 0xfa, 0x22, 0xcd, 0x12, 0xaa, 0x66, 0x35, 0x63, 0xff, 0xda, 0x29, 0x30, 0x36, 0xb1, 0x91, 0x74, + 0xce, 0x3a, 0xa6, 0xa4, 0x73, 0x18, 0x4a, 0xa4, 0xd5, 0x8e, 0xf7, 0x2b, 0x5e, 0xd8, 0x2b, 0x6b, + 0xe9, 0x8a, 0xc0, 0xe9, 0xa6, 0x29, 0x21, 0x58, 0xd1, 0xc9, 0xce, 0x0c, 0x58, 0xfc, 0x1a, 0x66, + 0x06, 0x1c, 0x3a, 0xc1, 0xcc, 0x80, 0xeb, 0x30, 0xba, 0xed, 0xc5, 0x98, 0xb4, 0x03, 0xc1, 0xf4, + 0x67, 0xae, 0xc3, 0xab, 0x1c, 0xa5, 0x3b, 0x07, 0x95, 0x00, 0x60, 0x49, 0x04, 0xbd, 0xae, 0x76, + 0xe0, 0x48, 0xfe, 0x9b, 0xb9, 0x5b, 0x99, 0x9c, 0xb9, 0x07, 0x45, 0xfe, 0xbf, 0xd1, 0x07, 0xcd, + 0xff, 0xb7, 0x2a, 0xb3, 0xf6, 0x95, 0xf2, 0xfd, 0x26, 0x58, 0x52, 0xbe, 0x3e, 0xb9, 0xfa, 0x6e, + 0xeb, 0x99, 0x0e, 0xcb, 0xf9, 0x27, 0x81, 0x4a, 0x62, 0x38, 0x60, 0x7e, 0xc3, 0xef, 0xb6, 0xe0, + 0x4c, 0x3b, 0x2b, 0xe9, 0xa7, 0xd0, 0xbb, 0xbe, 0x34, 0x70, 0x56, 0x53, 0xa3, 0x41, 0x26, 0x72, + 0xc9, 0x44, 0xc3, 0xd9, 0xcd, 0xd1, 0x81, 0x0e, 0x37, 0x5d, 0x91, 0xa0, 0xef, 0x89, 0x9c, 0x44, + 0x89, 0x3d, 0xd2, 0x23, 0x6e, 0x64, 0x24, 0xe5, 0xfb, 0x70, 0x5e, 0x52, 0xbe, 0x81, 0x53, 0xf1, + 0xbd, 0xae, 0x52, 0x24, 0x4e, 0xe4, 0x2f, 0x25, 0x9e, 0x00, 0xb1, 0x6f, 0x62, 0xc4, 0xd7, 0x55, + 0x62, 0xc4, 0x1e, 0x11, 0x21, 0x79, 0xda, 0xc3, 0xbe, 0xe9, 0x10, 0xb5, 0x94, 0x86, 0x53, 0xc7, + 0x93, 0xd2, 0xd0, 0xb8, 0x6a, 0x78, 0x56, 0xbd, 0xa7, 0xfb, 0x5c, 0x35, 0x06, 0xdd, 0xde, 0x97, + 0x0d, 0x4f, 0xdf, 0x38, 0xf3, 0x40, 0xe9, 0x1b, 0x6f, 0xeb, 0xe9, 0x10, 0x51, 0x9f, 0x7c, 0x7f, + 0x14, 0x69, 0xc0, 0x24, 0x88, 0xb7, 0xf5, 0x0b, 0xf0, 0x54, 0x3e, 0x5d, 0x75, 0xcf, 0x75, 0xd3, + 0xcd, 0xbc, 0x02, 0xbb, 0x92, 0x2b, 0x9e, 0x3e, 0x99, 0xe4, 0x8a, 0x67, 0x8e, 0x3d, 0xb9, 0xe2, + 0xd9, 0x13, 0x48, 0xae, 0xf8, 0xc8, 0x09, 0x26, 0x57, 0xbc, 0xcd, 0x8c, 0x15, 0x78, 0x60, 0x11, + 0x11, 0xc1, 0x32, 0x3b, 0x5a, 0x62, 0x56, 0xf4, 0x11, 0xfe, 0x71, 0x0a, 0x84, 0x13, 0x52, 0x19, + 0x49, 0x1b, 0x67, 0x1f, 0x42, 0xd2, 0xc6, 0xf5, 0x24, 0x69, 0xe3, 0xb9, 0xfc, 0xa9, 0xce, 0x30, + 0x12, 0xcf, 0x49, 0xd5, 0x78, 0x5b, 0x4f, 0xb1, 0xf8, 0x68, 0x0f, 0xa1, 0x7a, 0x96, 0xe0, 0xb1, + 0x47, 0x62, 0xc5, 0xd7, 0x78, 0x62, 0xc5, 0xc7, 0xf2, 0x4f, 0xf2, 0xf4, 0x75, 0x67, 0xa6, 0x53, + 0xfc, 0xde, 0x02, 0x5c, 0xe8, 0xbd, 0x2f, 0x12, 0xa9, 0x67, 0x2d, 0xd1, 0xed, 0xa5, 0xa4, 0x9e, + 0xfc, 0x6d, 0x95, 0x60, 0x0d, 0x1c, 0x73, 0xea, 0x2a, 0xcc, 0x28, 0x2b, 0xf0, 0xa6, 0xd7, 0xd8, + 0xd7, 0x32, 0xc8, 0x2b, 0xcf, 0xd9, 0x7a, 0x1a, 0x01, 0x77, 0xd7, 0x41, 0x8b, 0x30, 0x65, 0x14, + 0x56, 0x2b, 0xe2, 0x0d, 0xa5, 0xc4, 0xac, 0x75, 0x13, 0x8c, 0xd3, 0xf8, 0xf6, 0x4f, 0x59, 0xf0, + 0x48, 0x4e, 0xde, 0xa2, 0x81, 0x43, 0x2a, 0x6d, 0xc1, 0x54, 0xdb, 0xac, 0xda, 0x27, 0xf2, 0x9a, + 0x91, 0x1d, 0x49, 0xf5, 0x35, 0x05, 0xc0, 0x69, 0xa2, 0xf6, 0x9f, 0x5a, 0x70, 0xbe, 0xa7, 0x41, + 0x16, 0xc2, 0x70, 0x76, 0xbb, 0x15, 0x39, 0xcb, 0x21, 0x71, 0x89, 0x1f, 0x7b, 0x4e, 0xb3, 0xde, + 0x26, 0x0d, 0x4d, 0x6e, 0xcd, 0x2c, 0x9b, 0xae, 0xae, 0xd5, 0x17, 0xbb, 0x31, 0x70, 0x4e, 0x4d, + 0xb4, 0x0a, 0xa8, 0x1b, 0x22, 0x66, 0x98, 0x45, 0x67, 0xed, 0xa6, 0x87, 0x33, 0x6a, 0xa0, 0x97, + 0x61, 0x42, 0x19, 0x7a, 0x69, 0x33, 0xce, 0x0e, 0x60, 0xac, 0x03, 0xb0, 0x89, 0xb7, 0x74, 0xf9, + 0x37, 0x7e, 0xef, 0xc2, 0x87, 0x7e, 0xeb, 0xf7, 0x2e, 0x7c, 0xe8, 0xb7, 0x7f, 0xef, 0xc2, 0x87, + 0xbe, 0xfd, 0xfe, 0x05, 0xeb, 0x37, 0xee, 0x5f, 0xb0, 0x7e, 0xeb, 0xfe, 0x05, 0xeb, 0xb7, 0xef, + 0x5f, 0xb0, 0x7e, 0xf7, 0xfe, 0x05, 0xeb, 0x4b, 0xbf, 0x7f, 0xe1, 0x43, 0x6f, 0x16, 0xf6, 0x9e, + 0xfb, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x4e, 0x11, 0xe2, 0x4d, 0x14, 0xfc, 0x00, 0x00, +} + +func (m *AWSElasticBlockStoreVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *LimitRange) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *AWSElasticBlockStoreVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AWSElasticBlockStoreVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n69, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i += n69 + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.Partition)) + i-- + dAtA[i] = 0x18 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n70, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n70 - return i, nil + i -= len(m.VolumeID) + copy(dAtA[i:], m.VolumeID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *LimitRangeItem) Marshal() (dAtA []byte, err error) { +func (m *Affinity) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *Affinity) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Affinity) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if len(m.Max) > 0 { - keysForMax := make([]string, 0, len(m.Max)) - for k := range m.Max { - keysForMax = append(keysForMax, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForMax) - for _, k := range keysForMax { - dAtA[i] = 0x12 - i++ - v := m.Max[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n71, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n71 - } - } - if len(m.Min) > 0 { - keysForMin := make([]string, 0, len(m.Min)) - for k := range m.Min { - keysForMin = append(keysForMin, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForMin) - for _, k := range keysForMin { - dAtA[i] = 0x1a - i++ - v := m.Min[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n72, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n72 - } - } - if len(m.Default) > 0 { - keysForDefault := make([]string, 0, len(m.Default)) - for k := range m.Default { - keysForDefault = append(keysForDefault, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForDefault) - for _, k := range keysForDefault { - dAtA[i] = 0x22 - i++ - v := m.Default[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n73, err := (&v).MarshalTo(dAtA[i:]) + if m.PodAntiAffinity != nil { + { + size, err := m.PodAntiAffinity.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n73 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1a } - if len(m.DefaultRequest) > 0 { - keysForDefaultRequest := make([]string, 0, len(m.DefaultRequest)) - for k := range m.DefaultRequest { - keysForDefaultRequest = append(keysForDefaultRequest, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForDefaultRequest) - for _, k := range keysForDefaultRequest { - dAtA[i] = 0x2a - i++ - v := m.DefaultRequest[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n74, err := (&v).MarshalTo(dAtA[i:]) + if m.PodAffinity != nil { + { + size, err := m.PodAffinity.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n74 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - if len(m.MaxLimitRequestRatio) > 0 { - keysForMaxLimitRequestRatio := make([]string, 0, len(m.MaxLimitRequestRatio)) - for k := range m.MaxLimitRequestRatio { - keysForMaxLimitRequestRatio = append(keysForMaxLimitRequestRatio, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForMaxLimitRequestRatio) - for _, k := range keysForMaxLimitRequestRatio { - dAtA[i] = 0x32 - i++ - v := m.MaxLimitRequestRatio[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n75, err := (&v).MarshalTo(dAtA[i:]) + if m.NodeAffinity != nil { + { + size, err := m.NodeAffinity.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n75 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } -func (m *LimitRangeList) Marshal() (dAtA []byte, err error) { +func (m *AttachedVolume) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *LimitRangeList) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *AttachedVolume) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AttachedVolume) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + i -= len(m.DevicePath) + copy(dAtA[i:], m.DevicePath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DevicePath))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n76, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n76 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil + return len(dAtA) - i, nil } -func (m *LimitRangeSpec) Marshal() (dAtA []byte, err error) { +func (m *AvoidPods) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *LimitRangeSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Limits) > 0 { - for _, msg := range m.Limits { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *List) Marshal() (dAtA []byte, err error) { +func (m *AvoidPods) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *List) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *AvoidPods) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n77, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n77 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.PreferAvoidPods) > 0 { + for iNdEx := len(m.PreferAvoidPods) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.PreferAvoidPods[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } -func (m *LoadBalancerIngress) Marshal() (dAtA []byte, err error) { +func (m *AzureDiskVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *LoadBalancerIngress) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) - i += copy(dAtA[i:], m.IP) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) - i += copy(dAtA[i:], m.Hostname) - return i, nil -} - -func (m *LoadBalancerStatus) Marshal() (dAtA []byte, err error) { +func (m *AzureDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *LoadBalancerStatus) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *AzureDiskVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Ingress) > 0 { - for _, msg := range m.Ingress { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + if m.Kind != nil { + i -= len(*m.Kind) + copy(dAtA[i:], *m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Kind))) + i-- + dAtA[i] = 0x32 + } + if m.ReadOnly != nil { + i-- + if *m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x28 + } + if m.FSType != nil { + i -= len(*m.FSType) + copy(dAtA[i:], *m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FSType))) + i-- + dAtA[i] = 0x22 + } + if m.CachingMode != nil { + i -= len(*m.CachingMode) + copy(dAtA[i:], *m.CachingMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.CachingMode))) + i-- + dAtA[i] = 0x1a } - return i, nil + i -= len(m.DataDiskURI) + copy(dAtA[i:], m.DataDiskURI) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DataDiskURI))) + i-- + dAtA[i] = 0x12 + i -= len(m.DiskName) + copy(dAtA[i:], m.DiskName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DiskName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *LocalObjectReference) Marshal() (dAtA []byte, err error) { +func (m *AzureFilePersistentVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *LocalObjectReference) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - return i, nil -} - -func (m *LocalVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *AzureFilePersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *LocalVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *AzureFilePersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - if m.FSType != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FSType))) - i += copy(dAtA[i:], *m.FSType) + if m.SecretNamespace != nil { + i -= len(*m.SecretNamespace) + copy(dAtA[i:], *m.SecretNamespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SecretNamespace))) + i-- + dAtA[i] = 0x22 + } + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return i, nil + i-- + dAtA[i] = 0x18 + i -= len(m.ShareName) + copy(dAtA[i:], m.ShareName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShareName))) + i-- + dAtA[i] = 0x12 + i -= len(m.SecretName) + copy(dAtA[i:], m.SecretName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NFSVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *AzureFileVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NFSVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *AzureFileVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AzureFileVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Server))) - i += copy(dAtA[i:], m.Server) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - dAtA[i] = 0x18 - i++ + i-- if m.ReadOnly { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - return i, nil + i-- + dAtA[i] = 0x18 + i -= len(m.ShareName) + copy(dAtA[i:], m.ShareName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShareName))) + i-- + dAtA[i] = 0x12 + i -= len(m.SecretName) + copy(dAtA[i:], m.SecretName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *Namespace) Marshal() (dAtA []byte, err error) { +func (m *Binding) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Namespace) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *Binding) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Binding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n78, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n78 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n79, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n79 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n80, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n80 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NamespaceList) Marshal() (dAtA []byte, err error) { +func (m *CSIPersistentVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NamespaceList) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *CSIPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSIPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n81, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.ControllerExpandSecretRef != nil { + { + size, err := m.ControllerExpandSecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a } - i += n81 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.NodePublishSecretRef != nil { + { + size, err := m.NodePublishSecretRef.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x42 } - return i, nil -} + if m.NodeStageSecretRef != nil { + { + size, err := m.NodeStageSecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if m.ControllerPublishSecretRef != nil { + { + size, err := m.ControllerPublishSecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if len(m.VolumeAttributes) > 0 { + keysForVolumeAttributes := make([]string, 0, len(m.VolumeAttributes)) + for k := range m.VolumeAttributes { + keysForVolumeAttributes = append(keysForVolumeAttributes, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes) + for iNdEx := len(keysForVolumeAttributes) - 1; iNdEx >= 0; iNdEx-- { + v := m.VolumeAttributes[string(keysForVolumeAttributes[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForVolumeAttributes[iNdEx]) + copy(dAtA[i:], keysForVolumeAttributes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForVolumeAttributes[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x22 + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i -= len(m.VolumeHandle) + copy(dAtA[i:], m.VolumeHandle) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeHandle))) + i-- + dAtA[i] = 0x12 + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} -func (m *NamespaceSpec) Marshal() (dAtA []byte, err error) { +func (m *CSIVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NamespaceSpec) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *CSIVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSIVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Finalizers) > 0 { - for _, s := range m.Finalizers { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if m.NodePublishSecretRef != nil { + { + size, err := m.NodePublishSecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.VolumeAttributes) > 0 { + keysForVolumeAttributes := make([]string, 0, len(m.VolumeAttributes)) + for k := range m.VolumeAttributes { + keysForVolumeAttributes = append(keysForVolumeAttributes, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes) + for iNdEx := len(keysForVolumeAttributes) - 1; iNdEx >= 0; iNdEx-- { + v := m.VolumeAttributes[string(keysForVolumeAttributes[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForVolumeAttributes[iNdEx]) + copy(dAtA[i:], keysForVolumeAttributes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForVolumeAttributes[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } + } + if m.FSType != nil { + i -= len(*m.FSType) + copy(dAtA[i:], *m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FSType))) + i-- + dAtA[i] = 0x1a + } + if m.ReadOnly != nil { + i-- + if *m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x10 } - return i, nil + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NamespaceStatus) Marshal() (dAtA []byte, err error) { +func (m *Capabilities) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NamespaceStatus) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *Capabilities) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Capabilities) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) - i += copy(dAtA[i:], m.Phase) - return i, nil + if len(m.Drop) > 0 { + for iNdEx := len(m.Drop) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Drop[iNdEx]) + copy(dAtA[i:], m.Drop[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Drop[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Add) > 0 { + for iNdEx := len(m.Add) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Add[iNdEx]) + copy(dAtA[i:], m.Add[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Add[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *Node) Marshal() (dAtA []byte, err error) { +func (m *CephFSPersistentVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Node) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *CephFSPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CephFSPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n82, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i += n82 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n83, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + dAtA[i] = 0x30 + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a } - i += n83 + i -= len(m.SecretFile) + copy(dAtA[i:], m.SecretFile) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretFile))) + i-- + dAtA[i] = 0x22 + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n84, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x12 + if len(m.Monitors) > 0 { + for iNdEx := len(m.Monitors) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Monitors[iNdEx]) + copy(dAtA[i:], m.Monitors[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Monitors[iNdEx]))) + i-- + dAtA[i] = 0xa + } } - i += n84 - return i, nil + return len(dAtA) - i, nil } -func (m *NodeAddress) Marshal() (dAtA []byte, err error) { +func (m *CephFSVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeAddress) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Address))) - i += copy(dAtA[i:], m.Address) - return i, nil -} - -func (m *NodeAffinity) Marshal() (dAtA []byte, err error) { +func (m *CephFSVolumeSource) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *NodeAffinity) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *CephFSVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.RequiredDuringSchedulingIgnoredDuringExecution != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RequiredDuringSchedulingIgnoredDuringExecution.Size())) - n85, err := m.RequiredDuringSchedulingIgnoredDuringExecution.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n85 + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, msg := range m.PreferredDuringSchedulingIgnoredDuringExecution { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x30 + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + i -= len(m.SecretFile) + copy(dAtA[i:], m.SecretFile) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretFile))) + i-- + dAtA[i] = 0x22 + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0x1a + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x12 + if len(m.Monitors) > 0 { + for iNdEx := len(m.Monitors) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Monitors[iNdEx]) + copy(dAtA[i:], m.Monitors[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Monitors[iNdEx]))) + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } -func (m *NodeCondition) Marshal() (dAtA []byte, err error) { +func (m *CinderPersistentVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeCondition) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *CinderPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CinderPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastHeartbeatTime.Size())) - n86, err := m.LastHeartbeatTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 } - i += n86 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n87, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i += n87 - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x18 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x12 + i -= len(m.VolumeID) + copy(dAtA[i:], m.VolumeID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NodeConfigSource) Marshal() (dAtA []byte, err error) { +func (m *CinderVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeConfigSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *CinderVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CinderVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.ConfigMap != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMap.Size())) - n88, err := m.ConfigMap.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n88 + i-- + dAtA[i] = 0x22 + } + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return i, nil + i-- + dAtA[i] = 0x18 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x12 + i -= len(m.VolumeID) + copy(dAtA[i:], m.VolumeID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NodeConfigStatus) Marshal() (dAtA []byte, err error) { +func (m *ClientIPConfig) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeConfigStatus) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ClientIPConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClientIPConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Assigned != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Assigned.Size())) - n89, err := m.Assigned.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n89 - } - if m.Active != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Active.Size())) - n90, err := m.Active.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n90 - } - if m.LastKnownGood != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastKnownGood.Size())) - n91, err := m.LastKnownGood.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n91 + if m.TimeoutSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + i-- + dAtA[i] = 0x8 } - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) - i += copy(dAtA[i:], m.Error) - return i, nil + return len(dAtA) - i, nil } -func (m *NodeDaemonEndpoints) Marshal() (dAtA []byte, err error) { +func (m *ComponentCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeDaemonEndpoints) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ComponentCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ComponentCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) + i-- + dAtA[i] = 0x22 + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.KubeletEndpoint.Size())) - n92, err := m.KubeletEndpoint.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n92 - return i, nil + return len(dAtA) - i, nil } -func (m *NodeList) Marshal() (dAtA []byte, err error) { +func (m *ComponentStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeList) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ComponentStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ComponentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n93, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n93 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NodeProxyOptions) Marshal() (dAtA []byte, err error) { +func (m *ComponentStatusList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeProxyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ComponentStatusList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ComponentStatusList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - return i, nil + return len(dAtA) - i, nil } -func (m *NodeResources) Marshal() (dAtA []byte, err error) { +func (m *ConfigMap) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeResources) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ConfigMap) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConfigMap) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Capacity) > 0 { - keysForCapacity := make([]string, 0, len(m.Capacity)) - for k := range m.Capacity { - keysForCapacity = append(keysForCapacity, string(k)) + if len(m.BinaryData) > 0 { + keysForBinaryData := make([]string, 0, len(m.BinaryData)) + for k := range m.BinaryData { + keysForBinaryData = append(keysForBinaryData, string(k)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) - for _, k := range keysForCapacity { + github_com_gogo_protobuf_sortkeys.Strings(keysForBinaryData) + for iNdEx := len(keysForBinaryData) - 1; iNdEx >= 0; iNdEx-- { + v := m.BinaryData[string(keysForBinaryData[iNdEx])] + baseI := i + if v != nil { + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + } + i -= len(keysForBinaryData[iNdEx]) + copy(dAtA[i:], keysForBinaryData[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForBinaryData[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - v := m.Capacity[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Data) > 0 { + keysForData := make([]string, 0, len(m.Data)) + for k := range m.Data { + keysForData = append(keysForData, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForData) + for iNdEx := len(keysForData) - 1; iNdEx >= 0; iNdEx-- { + v := m.Data[string(keysForData[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForData[iNdEx]) + copy(dAtA[i:], keysForData[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForData[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n94, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n94 } } - return i, nil + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NodeSelector) Marshal() (dAtA []byte, err error) { +func (m *ConfigMapEnvSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeSelector) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ConfigMapEnvSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConfigMapEnvSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.NodeSelectorTerms) > 0 { - for _, msg := range m.NodeSelectorTerms { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + if m.Optional != nil { + i-- + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + { + size, err := m.LocalObjectReference.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NodeSelectorRequirement) Marshal() (dAtA []byte, err error) { +func (m *ConfigMapKeySelector) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeSelectorRequirement) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ConfigMapKeySelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConfigMapKeySelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ + if m.Optional != nil { + i-- + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + i -= len(m.Key) + copy(dAtA[i:], m.Key) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) - i += copy(dAtA[i:], m.Operator) - if len(m.Values) > 0 { - for _, s := range m.Values { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + { + size, err := m.LocalObjectReference.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NodeSelectorTerm) Marshal() (dAtA []byte, err error) { +func (m *ConfigMapList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeSelectorTerm) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ConfigMapList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConfigMapList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.MatchExpressions) > 0 { - for _, msg := range m.MatchExpressions { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if len(m.MatchFields) > 0 { - for _, msg := range m.MatchFields { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NodeSpec) Marshal() (dAtA []byte, err error) { +func (m *ConfigMapNodeConfigSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeSpec) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ConfigMapNodeConfigSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConfigMapNodeConfigSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodCIDR))) - i += copy(dAtA[i:], m.PodCIDR) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DoNotUse_ExternalID))) - i += copy(dAtA[i:], m.DoNotUse_ExternalID) + i -= len(m.KubeletConfigKey) + copy(dAtA[i:], m.KubeletConfigKey) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.KubeletConfigKey))) + i-- + dAtA[i] = 0x2a + i -= len(m.ResourceVersion) + copy(dAtA[i:], m.ResourceVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i-- + dAtA[i] = 0x22 + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProviderID))) - i += copy(dAtA[i:], m.ProviderID) - dAtA[i] = 0x20 - i++ - if m.Unschedulable { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ConfigMapProjection) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - i++ - if len(m.Taints) > 0 { - for _, msg := range m.Taints { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + return dAtA[:n], nil +} + +func (m *ConfigMapProjection) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConfigMapProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Optional != nil { + i-- + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if m.ConfigSource != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigSource.Size())) - n95, err := m.ConfigSource.MarshalTo(dAtA[i:]) + { + size, err := m.LocalObjectReference.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n95 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NodeStatus) Marshal() (dAtA []byte, err error) { +func (m *ConfigMapVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ConfigMapVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConfigMapVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Capacity) > 0 { - keysForCapacity := make([]string, 0, len(m.Capacity)) - for k := range m.Capacity { - keysForCapacity = append(keysForCapacity, string(k)) + if m.Optional != nil { + i-- + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) - for _, k := range keysForCapacity { - dAtA[i] = 0xa - i++ - v := m.Capacity[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + i-- + dAtA[i] = 0x20 + } + if m.DefaultMode != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) + i-- + dAtA[i] = 0x18 + } + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n96, err := (&v).MarshalTo(dAtA[i:]) + } + } + { + size, err := m.LocalObjectReference.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Container) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Container) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Container) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.StartupProbe != nil { + { + size, err := m.StartupProbe.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n96 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 } - if len(m.Allocatable) > 0 { - keysForAllocatable := make([]string, 0, len(m.Allocatable)) - for k := range m.Allocatable { - keysForAllocatable = append(keysForAllocatable, string(k)) + if len(m.VolumeDevices) > 0 { + for iNdEx := len(m.VolumeDevices) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumeDevices[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa } - github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatable) - for _, k := range keysForAllocatable { - dAtA[i] = 0x12 - i++ - v := m.Allocatable[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n97, err := (&v).MarshalTo(dAtA[i:]) + } + i -= len(m.TerminationMessagePolicy) + copy(dAtA[i:], m.TerminationMessagePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TerminationMessagePolicy))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + if len(m.EnvFrom) > 0 { + for iNdEx := len(m.EnvFrom) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.EnvFrom[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + } + i-- + if m.TTY { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x90 + i-- + if m.StdinOnce { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x88 + i-- + if m.Stdin { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 + if m.SecurityContext != nil { + { + size, err := m.SecurityContext.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n97 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x7a } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) - i += copy(dAtA[i:], m.Phase) - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + i -= len(m.ImagePullPolicy) + copy(dAtA[i:], m.ImagePullPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImagePullPolicy))) + i-- + dAtA[i] = 0x72 + i -= len(m.TerminationMessagePath) + copy(dAtA[i:], m.TerminationMessagePath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TerminationMessagePath))) + i-- + dAtA[i] = 0x6a + if m.Lifecycle != nil { + { + size, err := m.Lifecycle.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x62 } - if len(m.Addresses) > 0 { - for _, msg := range m.Addresses { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.ReadinessProbe != nil { + { + size, err := m.ReadinessProbe.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x5a } - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DaemonEndpoints.Size())) - n98, err := m.DaemonEndpoints.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n98 - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NodeInfo.Size())) - n99, err := m.NodeInfo.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n99 - if len(m.Images) > 0 { - for _, msg := range m.Images { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.LivenessProbe != nil { + { + size, err := m.LivenessProbe.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x52 } - if len(m.VolumesInUse) > 0 { - for _, s := range m.VolumesInUse { + if len(m.VolumeMounts) > 0 { + for iNdEx := len(m.VolumeMounts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumeMounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x4a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + } + } + { + size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + dAtA[i] = 0x3a } } - if len(m.VolumesAttached) > 0 { - for _, msg := range m.VolumesAttached { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x32 } } - if m.Config != nil { - dAtA[i] = 0x5a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Config.Size())) - n100, err := m.Config.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= len(m.WorkingDir) + copy(dAtA[i:], m.WorkingDir) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.WorkingDir))) + i-- + dAtA[i] = 0x2a + if len(m.Args) > 0 { + for iNdEx := len(m.Args) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Args[iNdEx]) + copy(dAtA[i:], m.Args[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Args[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Command) > 0 { + for iNdEx := len(m.Command) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Command[iNdEx]) + copy(dAtA[i:], m.Command[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Command[iNdEx]))) + i-- + dAtA[i] = 0x1a } - i += n100 } - return i, nil + i -= len(m.Image) + copy(dAtA[i:], m.Image) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NodeSystemInfo) Marshal() (dAtA []byte, err error) { +func (m *ContainerImage) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeSystemInfo) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ContainerImage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerImage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MachineID))) - i += copy(dAtA[i:], m.MachineID) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SystemUUID))) - i += copy(dAtA[i:], m.SystemUUID) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.BootID))) - i += copy(dAtA[i:], m.BootID) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.KernelVersion))) - i += copy(dAtA[i:], m.KernelVersion) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.OSImage))) - i += copy(dAtA[i:], m.OSImage) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerRuntimeVersion))) - i += copy(dAtA[i:], m.ContainerRuntimeVersion) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.KubeletVersion))) - i += copy(dAtA[i:], m.KubeletVersion) - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.KubeProxyVersion))) - i += copy(dAtA[i:], m.KubeProxyVersion) - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.OperatingSystem))) - i += copy(dAtA[i:], m.OperatingSystem) - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Architecture))) - i += copy(dAtA[i:], m.Architecture) - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.SizeBytes)) + i-- + dAtA[i] = 0x10 + if len(m.Names) > 0 { + for iNdEx := len(m.Names) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Names[iNdEx]) + copy(dAtA[i:], m.Names[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Names[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *ObjectFieldSelector) Marshal() (dAtA []byte, err error) { +func (m *ContainerPort) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ObjectFieldSelector) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ContainerPort) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerPort) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + i -= len(m.HostIP) + copy(dAtA[i:], m.HostIP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.HostIP))) + i-- + dAtA[i] = 0x2a + i -= len(m.Protocol) + copy(dAtA[i:], m.Protocol) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol))) + i-- + dAtA[i] = 0x22 + i = encodeVarintGenerated(dAtA, i, uint64(m.ContainerPort)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.HostPort)) + i-- + dAtA[i] = 0x10 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldPath))) - i += copy(dAtA[i:], m.FieldPath) - return i, nil + return len(dAtA) - i, nil } -func (m *ObjectReference) Marshal() (dAtA []byte, err error) { +func (m *ContainerState) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ObjectReference) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ContainerState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) - i += copy(dAtA[i:], m.ResourceVersion) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldPath))) - i += copy(dAtA[i:], m.FieldPath) - return i, nil + if m.Terminated != nil { + { + size, err := m.Terminated.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Running != nil { + { + size, err := m.Running.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Waiting != nil { + { + size, err := m.Waiting.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *PersistentVolume) Marshal() (dAtA []byte, err error) { +func (m *ContainerStateRunning) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PersistentVolume) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ContainerStateRunning) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerStateRunning) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n101, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n101 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n102, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n102 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n103, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.StartedAt.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n103 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PersistentVolumeClaim) Marshal() (dAtA []byte, err error) { +func (m *ContainerStateTerminated) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PersistentVolumeClaim) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ContainerStateTerminated) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerStateTerminated) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n104, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= len(m.ContainerID) + copy(dAtA[i:], m.ContainerID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerID))) + i-- + dAtA[i] = 0x3a + { + size, err := m.FinishedAt.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n104 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n105, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + dAtA[i] = 0x32 + { + size, err := m.StartedAt.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n105 + i-- + dAtA[i] = 0x2a + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x22 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n106, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n106 - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.Signal)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.ExitCode)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } -func (m *PersistentVolumeClaimCondition) Marshal() (dAtA []byte, err error) { +func (m *ContainerStateWaiting) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PersistentVolumeClaimCondition) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ContainerStateWaiting) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerStateWaiting) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastProbeTime.Size())) - n107, err := m.LastProbeTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n107 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n108, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n108 - dAtA[i] = 0x2a - i++ + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PersistentVolumeClaimList) Marshal() (dAtA []byte, err error) { +func (m *ContainerStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PersistentVolumeClaimList) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ContainerStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n109, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Started != nil { + i-- + if *m.Started { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 } - i += n109 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + i -= len(m.ContainerID) + copy(dAtA[i:], m.ContainerID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerID))) + i-- + dAtA[i] = 0x42 + i -= len(m.ImageID) + copy(dAtA[i:], m.ImageID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImageID))) + i-- + dAtA[i] = 0x3a + i -= len(m.Image) + copy(dAtA[i:], m.Image) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image))) + i-- + dAtA[i] = 0x32 + i = encodeVarintGenerated(dAtA, i, uint64(m.RestartCount)) + i-- + dAtA[i] = 0x28 + i-- + if m.Ready { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + { + size, err := m.LastTerminationState.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.State.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PersistentVolumeClaimSpec) Marshal() (dAtA []byte, err error) { +func (m *DaemonEndpoint) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PersistentVolumeClaimSpec) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *DaemonEndpoint) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonEndpoint) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.AccessModes) > 0 { - for _, s := range m.AccessModes { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Resources.Size())) - n110, err := m.Resources.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *DownwardAPIProjection) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { - return 0, err - } - i += n110 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) - i += copy(dAtA[i:], m.VolumeName) - if m.Selector != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n111, err := m.Selector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n111 - } - if m.StorageClassName != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.StorageClassName))) - i += copy(dAtA[i:], *m.StorageClassName) - } - if m.VolumeMode != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeMode))) - i += copy(dAtA[i:], *m.VolumeMode) + return nil, err } - if m.DataSource != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DataSource.Size())) - n112, err := m.DataSource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + return dAtA[:n], nil +} + +func (m *DownwardAPIProjection) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DownwardAPIProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - i += n112 } - return i, nil + return len(dAtA) - i, nil } -func (m *PersistentVolumeClaimStatus) Marshal() (dAtA []byte, err error) { +func (m *DownwardAPIVolumeFile) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PersistentVolumeClaimStatus) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *DownwardAPIVolumeFile) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DownwardAPIVolumeFile) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) - i += copy(dAtA[i:], m.Phase) - if len(m.AccessModes) > 0 { - for _, s := range m.AccessModes { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } + if m.Mode != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Mode)) + i-- + dAtA[i] = 0x20 } - if len(m.Capacity) > 0 { - keysForCapacity := make([]string, 0, len(m.Capacity)) - for k := range m.Capacity { - keysForCapacity = append(keysForCapacity, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) - for _, k := range keysForCapacity { - dAtA[i] = 0x1a - i++ - v := m.Capacity[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n113, err := (&v).MarshalTo(dAtA[i:]) + if m.ResourceFieldRef != nil { + { + size, err := m.ResourceFieldRef.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n113 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1a } - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.FieldRef != nil { + { + size, err := m.FieldRef.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PersistentVolumeClaimVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *DownwardAPIVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PersistentVolumeClaimVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *DownwardAPIVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DownwardAPIVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClaimName))) - i += copy(dAtA[i:], m.ClaimName) - dAtA[i] = 0x10 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.DefaultMode != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) + i-- + dAtA[i] = 0x10 + } + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } } - i++ - return i, nil + return len(dAtA) - i, nil } -func (m *PersistentVolumeList) Marshal() (dAtA []byte, err error) { +func (m *EmptyDirVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PersistentVolumeList) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *EmptyDirVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EmptyDirVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n114, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n114 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.SizeLimit != nil { + { + size, err := m.SizeLimit.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Medium) + copy(dAtA[i:], m.Medium) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Medium))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PersistentVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *EndpointAddress) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *EndpointAddress) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EndpointAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.GCEPersistentDisk != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.GCEPersistentDisk.Size())) - n115, err := m.GCEPersistentDisk.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n115 - } - if m.AWSElasticBlockStore != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AWSElasticBlockStore.Size())) - n116, err := m.AWSElasticBlockStore.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n116 - } - if m.HostPath != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.HostPath.Size())) - n117, err := m.HostPath.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n117 - } - if m.Glusterfs != nil { + if m.NodeName != nil { + i -= len(*m.NodeName) + copy(dAtA[i:], *m.NodeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.NodeName))) + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Glusterfs.Size())) - n118, err := m.Glusterfs.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n118 - } - if m.NFS != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NFS.Size())) - n119, err := m.NFS.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n119 - } - if m.RBD != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RBD.Size())) - n120, err := m.RBD.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n120 - } - if m.ISCSI != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ISCSI.Size())) - n121, err := m.ISCSI.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n121 - } - if m.Cinder != nil { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Cinder.Size())) - n122, err := m.Cinder.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n122 - } - if m.CephFS != nil { - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CephFS.Size())) - n123, err := m.CephFS.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n123 - } - if m.FC != nil { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FC.Size())) - n124, err := m.FC.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n124 - } - if m.Flocker != nil { - dAtA[i] = 0x5a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Flocker.Size())) - n125, err := m.Flocker.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n125 - } - if m.FlexVolume != nil { - dAtA[i] = 0x62 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FlexVolume.Size())) - n126, err := m.FlexVolume.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n126 - } - if m.AzureFile != nil { - dAtA[i] = 0x6a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AzureFile.Size())) - n127, err := m.AzureFile.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n127 - } - if m.VsphereVolume != nil { - dAtA[i] = 0x72 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.VsphereVolume.Size())) - n128, err := m.VsphereVolume.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n128 - } - if m.Quobyte != nil { - dAtA[i] = 0x7a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Quobyte.Size())) - n129, err := m.Quobyte.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n129 - } - if m.AzureDisk != nil { - dAtA[i] = 0x82 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AzureDisk.Size())) - n130, err := m.AzureDisk.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n130 - } - if m.PhotonPersistentDisk != nil { - dAtA[i] = 0x8a - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PhotonPersistentDisk.Size())) - n131, err := m.PhotonPersistentDisk.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n131 - } - if m.PortworxVolume != nil { - dAtA[i] = 0x92 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PortworxVolume.Size())) - n132, err := m.PortworxVolume.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n132 - } - if m.ScaleIO != nil { - dAtA[i] = 0x9a - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleIO.Size())) - n133, err := m.ScaleIO.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n133 - } - if m.Local != nil { - dAtA[i] = 0xa2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Local.Size())) - n134, err := m.Local.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n134 - } - if m.StorageOS != nil { - dAtA[i] = 0xaa - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.StorageOS.Size())) - n135, err := m.StorageOS.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n135 - } - if m.CSI != nil { - dAtA[i] = 0xb2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CSI.Size())) - n136, err := m.CSI.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n136 - } - return i, nil -} - -func (m *PersistentVolumeSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err } - return dAtA[:n], nil -} - -func (m *PersistentVolumeSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Capacity) > 0 { - keysForCapacity := make([]string, 0, len(m.Capacity)) - for k := range m.Capacity { - keysForCapacity = append(keysForCapacity, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) - for _, k := range keysForCapacity { - dAtA[i] = 0xa - i++ - v := m.Capacity[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n137, err := (&v).MarshalTo(dAtA[i:]) + i -= len(m.Hostname) + copy(dAtA[i:], m.Hostname) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) + i-- + dAtA[i] = 0x1a + if m.TargetRef != nil { + { + size, err := m.TargetRef.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n137 - } - } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PersistentVolumeSource.Size())) - n138, err := m.PersistentVolumeSource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n138 - if len(m.AccessModes) > 0 { - for _, s := range m.AccessModes { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if m.ClaimRef != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ClaimRef.Size())) - n139, err := m.ClaimRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n139 - } - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PersistentVolumeReclaimPolicy))) - i += copy(dAtA[i:], m.PersistentVolumeReclaimPolicy) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageClassName))) - i += copy(dAtA[i:], m.StorageClassName) - if len(m.MountOptions) > 0 { - for _, s := range m.MountOptions { - dAtA[i] = 0x3a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if m.VolumeMode != nil { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeMode))) - i += copy(dAtA[i:], *m.VolumeMode) - } - if m.NodeAffinity != nil { - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NodeAffinity.Size())) - n140, err := m.NodeAffinity.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n140 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.IP) + copy(dAtA[i:], m.IP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PersistentVolumeStatus) Marshal() (dAtA []byte, err error) { +func (m *EndpointPort) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PersistentVolumeStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) - i += copy(dAtA[i:], m.Phase) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - return i, nil -} - -func (m *PhotonPersistentDiskVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *EndpointPort) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PhotonPersistentDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *EndpointPort) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + i -= len(m.Protocol) + copy(dAtA[i:], m.Protocol) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol))) + i-- + dAtA[i] = 0x1a + i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) + i-- + dAtA[i] = 0x10 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PdID))) - i += copy(dAtA[i:], m.PdID) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - return i, nil + return len(dAtA) - i, nil } -func (m *Pod) Marshal() (dAtA []byte, err error) { +func (m *EndpointSubset) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Pod) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *EndpointSubset) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EndpointSubset) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n141, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } } - i += n141 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n142, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.NotReadyAddresses) > 0 { + for iNdEx := len(m.NotReadyAddresses) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.NotReadyAddresses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } } - i += n142 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n143, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Addresses) > 0 { + for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Addresses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } } - i += n143 - return i, nil + return len(dAtA) - i, nil } -func (m *PodAffinity) Marshal() (dAtA []byte, err error) { +func (m *Endpoints) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodAffinity) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *Endpoints) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Endpoints) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, msg := range m.RequiredDuringSchedulingIgnoredDuringExecution { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Subsets) > 0 { + for iNdEx := len(m.Subsets) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subsets[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, msg := range m.PreferredDuringSchedulingIgnoredDuringExecution { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodAffinityTerm) Marshal() (dAtA []byte, err error) { +func (m *EndpointsList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodAffinityTerm) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *EndpointsList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EndpointsList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.LabelSelector != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LabelSelector.Size())) - n144, err := m.LabelSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - i += n144 } - if len(m.Namespaces) > 0 { - for _, s := range m.Namespaces { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TopologyKey))) - i += copy(dAtA[i:], m.TopologyKey) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodAntiAffinity) Marshal() (dAtA []byte, err error) { +func (m *EnvFromSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodAntiAffinity) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *EnvFromSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EnvFromSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, msg := range m.RequiredDuringSchedulingIgnoredDuringExecution { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1a } - if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, msg := range m.PreferredDuringSchedulingIgnoredDuringExecution { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.ConfigMapRef != nil { + { + size, err := m.ConfigMapRef.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Prefix) + copy(dAtA[i:], m.Prefix) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Prefix))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodAttachOptions) Marshal() (dAtA []byte, err error) { +func (m *EnvVar) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodAttachOptions) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - if m.Stdin { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x10 - i++ - if m.Stdout { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x18 - i++ - if m.Stderr { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x20 - i++ - if m.TTY { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) - i += copy(dAtA[i:], m.Container) - return i, nil -} - -func (m *PodCondition) Marshal() (dAtA []byte, err error) { +func (m *EnvVar) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodCondition) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *EnvVar) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastProbeTime.Size())) - n145, err := m.LastProbeTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n145 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n146, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.ValueFrom != nil { + { + size, err := m.ValueFrom.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - i += n146 - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodDNSConfig) Marshal() (dAtA []byte, err error) { +func (m *EnvVarSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodDNSConfig) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *EnvVarSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EnvVarSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Nameservers) > 0 { - for _, s := range m.Nameservers { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if m.SecretKeyRef != nil { + { + size, err := m.SecretKeyRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x22 } - if len(m.Searches) > 0 { - for _, s := range m.Searches { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if m.ConfigMapKeyRef != nil { + { + size, err := m.ConfigMapKeyRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1a } - if len(m.Options) > 0 { - for _, msg := range m.Options { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.ResourceFieldRef != nil { + { + size, err := m.ResourceFieldRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.FieldRef != nil { + { + size, err := m.FieldRef.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } -func (m *PodDNSConfigOption) Marshal() (dAtA []byte, err error) { +func (m *EphemeralContainer) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodDNSConfigOption) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *EphemeralContainer) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EphemeralContainer) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - if m.Value != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Value))) - i += copy(dAtA[i:], *m.Value) + i -= len(m.TargetContainerName) + copy(dAtA[i:], m.TargetContainerName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetContainerName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.EphemeralContainerCommon.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodExecOptions) Marshal() (dAtA []byte, err error) { +func (m *EphemeralContainerCommon) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodExecOptions) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *EphemeralContainerCommon) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EphemeralContainerCommon) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - if m.Stdin { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.StartupProbe != nil { + { + size, err := m.StartupProbe.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 } - i++ - dAtA[i] = 0x10 - i++ - if m.Stdout { + if len(m.VolumeDevices) > 0 { + for iNdEx := len(m.VolumeDevices) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumeDevices[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } + } + i -= len(m.TerminationMessagePolicy) + copy(dAtA[i:], m.TerminationMessagePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TerminationMessagePolicy))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + if len(m.EnvFrom) > 0 { + for iNdEx := len(m.EnvFrom) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.EnvFrom[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + } + i-- + if m.TTY { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - dAtA[i] = 0x18 - i++ - if m.Stderr { + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x90 + i-- + if m.StdinOnce { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - dAtA[i] = 0x20 - i++ - if m.TTY { + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x88 + i-- + if m.Stdin { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) - i += copy(dAtA[i:], m.Container) - if len(m.Command) > 0 { - for _, s := range m.Command { - dAtA[i] = 0x32 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 + if m.SecurityContext != nil { + { + size, err := m.SecurityContext.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x7a } - return i, nil -} - -func (m *PodList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n147, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n147 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + i -= len(m.ImagePullPolicy) + copy(dAtA[i:], m.ImagePullPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImagePullPolicy))) + i-- + dAtA[i] = 0x72 + i -= len(m.TerminationMessagePath) + copy(dAtA[i:], m.TerminationMessagePath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TerminationMessagePath))) + i-- + dAtA[i] = 0x6a + if m.Lifecycle != nil { + { + size, err := m.Lifecycle.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x62 } - return i, nil -} - -func (m *PodLogOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodLogOptions) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) - i += copy(dAtA[i:], m.Container) - dAtA[i] = 0x10 - i++ - if m.Follow { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.ReadinessProbe != nil { + { + size, err := m.ReadinessProbe.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a } - i++ - dAtA[i] = 0x18 - i++ - if m.Previous { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.LivenessProbe != nil { + { + size, err := m.LivenessProbe.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 } - i++ - if m.SinceSeconds != nil { - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.SinceSeconds)) + if len(m.VolumeMounts) > 0 { + for iNdEx := len(m.VolumeMounts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumeMounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } } - if m.SinceTime != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SinceTime.Size())) - n148, err := m.SinceTime.MarshalTo(dAtA[i:]) + { + size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n148 - } - dAtA[i] = 0x30 - i++ - if m.Timestamps { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.TailLines != nil { - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.TailLines)) - } - if m.LimitBytes != nil { - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.LimitBytes)) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil -} - -func (m *PodPortForwardOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err + i-- + dAtA[i] = 0x42 + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } } - return dAtA[:n], nil -} - -func (m *PodPortForwardOptions) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l if len(m.Ports) > 0 { - for _, num := range m.Ports { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(num)) + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 } } - return i, nil -} - -func (m *PodProxyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err + i -= len(m.WorkingDir) + copy(dAtA[i:], m.WorkingDir) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.WorkingDir))) + i-- + dAtA[i] = 0x2a + if len(m.Args) > 0 { + for iNdEx := len(m.Args) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Args[iNdEx]) + copy(dAtA[i:], m.Args[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Args[iNdEx]))) + i-- + dAtA[i] = 0x22 + } } - return dAtA[:n], nil -} - -func (m *PodProxyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l + if len(m.Command) > 0 { + for iNdEx := len(m.Command) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Command[iNdEx]) + copy(dAtA[i:], m.Command[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Command[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.Image) + copy(dAtA[i:], m.Image) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - return i, nil + return len(dAtA) - i, nil } -func (m *PodReadinessGate) Marshal() (dAtA []byte, err error) { +func (m *EphemeralContainers) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodReadinessGate) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConditionType))) - i += copy(dAtA[i:], m.ConditionType) - return i, nil -} - -func (m *PodSecurityContext) Marshal() (dAtA []byte, err error) { +func (m *EphemeralContainers) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodSecurityContext) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *EphemeralContainers) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.SELinuxOptions != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size())) - n149, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n149 - } - if m.RunAsUser != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsUser)) - } - if m.RunAsNonRoot != nil { - dAtA[i] = 0x18 - i++ - if *m.RunAsNonRoot { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if len(m.SupplementalGroups) > 0 { - for _, num := range m.SupplementalGroups { - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(num)) - } - } - if m.FSGroup != nil { - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.FSGroup)) - } - if m.RunAsGroup != nil { - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsGroup)) - } - if len(m.Sysctls) > 0 { - for _, msg := range m.Sysctls { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.EphemeralContainers) > 0 { + for iNdEx := len(m.EphemeralContainers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.EphemeralContainers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if m.WindowsOptions != nil { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.WindowsOptions.Size())) - n150, err := m.WindowsOptions.MarshalTo(dAtA[i:]) + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n150 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodSignature) Marshal() (dAtA []byte, err error) { +func (m *Event) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodSignature) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.PodController != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PodController.Size())) - n151, err := m.PodController.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n151 - } - return i, nil -} - -func (m *PodSpec) Marshal() (dAtA []byte, err error) { +func (m *Event) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodSpec) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *Event) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Volumes) > 0 { - for _, msg := range m.Volumes { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + i -= len(m.ReportingInstance) + copy(dAtA[i:], m.ReportingInstance) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingInstance))) + i-- + dAtA[i] = 0x7a + i -= len(m.ReportingController) + copy(dAtA[i:], m.ReportingController) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingController))) + i-- + dAtA[i] = 0x72 + if m.Related != nil { + { + size, err := m.Related.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x6a } - if len(m.Containers) > 0 { - for _, msg := range m.Containers { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + i -= len(m.Action) + copy(dAtA[i:], m.Action) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Action))) + i-- + dAtA[i] = 0x62 + if m.Series != nil { + { + size, err := m.Series.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x5a } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RestartPolicy))) - i += copy(dAtA[i:], m.RestartPolicy) - if m.TerminationGracePeriodSeconds != nil { - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.TerminationGracePeriodSeconds)) - } - if m.ActiveDeadlineSeconds != nil { - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ActiveDeadlineSeconds)) - } - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DNSPolicy))) - i += copy(dAtA[i:], m.DNSPolicy) - if len(m.NodeSelector) > 0 { - keysForNodeSelector := make([]string, 0, len(m.NodeSelector)) - for k := range m.NodeSelector { - keysForNodeSelector = append(keysForNodeSelector, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) - for _, k := range keysForNodeSelector { - dAtA[i] = 0x3a - i++ - v := m.NodeSelector[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) + { + size, err := m.EventTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccountName))) - i += copy(dAtA[i:], m.ServiceAccountName) - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeprecatedServiceAccount))) - i += copy(dAtA[i:], m.DeprecatedServiceAccount) + i-- dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) - i += copy(dAtA[i:], m.NodeName) - dAtA[i] = 0x58 - i++ - if m.HostNetwork { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x60 - i++ - if m.HostPID { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x68 - i++ - if m.HostIPC { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.SecurityContext != nil { - dAtA[i] = 0x72 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecurityContext.Size())) - n152, err := m.SecurityContext.MarshalTo(dAtA[i:]) + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x4a + i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) + i-- + dAtA[i] = 0x40 + { + size, err := m.LastTimestamp.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n152 - } - if len(m.ImagePullSecrets) > 0 { - for _, msg := range m.ImagePullSecrets { - dAtA[i] = 0x7a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x82 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) - i += copy(dAtA[i:], m.Hostname) - dAtA[i] = 0x8a - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subdomain))) - i += copy(dAtA[i:], m.Subdomain) - if m.Affinity != nil { - dAtA[i] = 0x92 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Affinity.Size())) - n153, err := m.Affinity.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x3a + { + size, err := m.FirstTimestamp.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n153 - } - dAtA[i] = 0x9a - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SchedulerName))) - i += copy(dAtA[i:], m.SchedulerName) - if len(m.InitContainers) > 0 { - for _, msg := range m.InitContainers { - dAtA[i] = 0xa2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.AutomountServiceAccountToken != nil { - dAtA[i] = 0xa8 - i++ - dAtA[i] = 0x1 - i++ - if *m.AutomountServiceAccountToken { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if len(m.Tolerations) > 0 { - for _, msg := range m.Tolerations { - dAtA[i] = 0xb2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.HostAliases) > 0 { - for _, msg := range m.HostAliases { - dAtA[i] = 0xba - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0xc2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PriorityClassName))) - i += copy(dAtA[i:], m.PriorityClassName) - if m.Priority != nil { - dAtA[i] = 0xc8 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Priority)) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.DNSConfig != nil { - dAtA[i] = 0xd2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DNSConfig.Size())) - n154, err := m.DNSConfig.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x32 + { + size, err := m.Source.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n154 - } - if m.ShareProcessNamespace != nil { - dAtA[i] = 0xd8 - i++ - dAtA[i] = 0x1 - i++ - if *m.ShareProcessNamespace { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if len(m.ReadinessGates) > 0 { - for _, msg := range m.ReadinessGates { - dAtA[i] = 0xe2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + i-- + dAtA[i] = 0x2a + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x22 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x1a + { + size, err := m.InvolvedObject.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.RuntimeClassName != nil { - dAtA[i] = 0xea - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.RuntimeClassName))) - i += copy(dAtA[i:], *m.RuntimeClassName) - } - if m.EnableServiceLinks != nil { - dAtA[i] = 0xf0 - i++ - dAtA[i] = 0x1 - i++ - if *m.EnableServiceLinks { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i++ - } - if m.PreemptionPolicy != nil { - dAtA[i] = 0xfa - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PreemptionPolicy))) - i += copy(dAtA[i:], *m.PreemptionPolicy) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodStatus) Marshal() (dAtA []byte, err error) { +func (m *EventList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodStatus) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *EventList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) - i += copy(dAtA[i:], m.Phase) - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.HostIP))) - i += copy(dAtA[i:], m.HostIP) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodIP))) - i += copy(dAtA[i:], m.PodIP) - if m.StartTime != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.StartTime.Size())) - n155, err := m.StartTime.MarshalTo(dAtA[i:]) + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n155 - } - if len(m.ContainerStatuses) > 0 { - for _, msg := range m.ContainerStatuses { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.QOSClass))) - i += copy(dAtA[i:], m.QOSClass) - if len(m.InitContainerStatuses) > 0 { - for _, msg := range m.InitContainerStatuses { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x5a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.NominatedNodeName))) - i += copy(dAtA[i:], m.NominatedNodeName) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodStatusResult) Marshal() (dAtA []byte, err error) { +func (m *EventSeries) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodStatusResult) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *EventSeries) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n156, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= len(m.State) + copy(dAtA[i:], m.State) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.State))) + i-- + dAtA[i] = 0x1a + { + size, err := m.LastObservedTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n156 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n157, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n157 - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } -func (m *PodTemplate) Marshal() (dAtA []byte, err error) { +func (m *EventSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodTemplate) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *EventSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n158, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n158 + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n159, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n159 - return i, nil + i -= len(m.Component) + copy(dAtA[i:], m.Component) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Component))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodTemplateList) Marshal() (dAtA []byte, err error) { +func (m *ExecAction) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodTemplateList) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ExecAction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExecAction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n160, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n160 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + if len(m.Command) > 0 { + for iNdEx := len(m.Command) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Command[iNdEx]) + copy(dAtA[i:], m.Command[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Command[iNdEx]))) + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } -func (m *PodTemplateSpec) Marshal() (dAtA []byte, err error) { +func (m *FCVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodTemplateSpec) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *FCVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FCVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n161, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n161 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n162, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.WWIDs) > 0 { + for iNdEx := len(m.WWIDs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.WWIDs[iNdEx]) + copy(dAtA[i:], m.WWIDs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.WWIDs[iNdEx]))) + i-- + dAtA[i] = 0x2a + } } - i += n162 - return i, nil -} + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x1a + if m.Lun != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Lun)) + i-- + dAtA[i] = 0x10 + } + if len(m.TargetWWNs) > 0 { + for iNdEx := len(m.TargetWWNs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.TargetWWNs[iNdEx]) + copy(dAtA[i:], m.TargetWWNs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetWWNs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} -func (m *PortworxVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *FlexPersistentVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PortworxVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *FlexPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlexPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID))) - i += copy(dAtA[i:], m.VolumeID) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x18 - i++ + if len(m.Options) > 0 { + keysForOptions := make([]string, 0, len(m.Options)) + for k := range m.Options { + keysForOptions = append(keysForOptions, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) + for iNdEx := len(keysForOptions) - 1; iNdEx >= 0; iNdEx-- { + v := m.Options[string(keysForOptions[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForOptions[iNdEx]) + copy(dAtA[i:], keysForOptions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForOptions[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + i-- if m.ReadOnly { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - return i, nil + i-- + dAtA[i] = 0x20 + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x12 + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *Preconditions) Marshal() (dAtA []byte, err error) { +func (m *FlexVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Preconditions) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.UID != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.UID))) - i += copy(dAtA[i:], *m.UID) - } - return i, nil -} - -func (m *PreferAvoidPodsEntry) Marshal() (dAtA []byte, err error) { +func (m *FlexVolumeSource) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PreferAvoidPodsEntry) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *FlexVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PodSignature.Size())) - n163, err := m.PodSignature.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Options) > 0 { + keysForOptions := make([]string, 0, len(m.Options)) + for k := range m.Options { + keysForOptions = append(keysForOptions, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) + for iNdEx := len(keysForOptions) - 1; iNdEx >= 0; iNdEx-- { + v := m.Options[string(keysForOptions[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForOptions[iNdEx]) + copy(dAtA[i:], keysForOptions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForOptions[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } } - i += n163 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.EvictionTime.Size())) - n164, err := m.EvictionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i += n164 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x20 + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x12 + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PreferredSchedulingTerm) Marshal() (dAtA []byte, err error) { +func (m *FlockerVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PreferredSchedulingTerm) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *FlockerVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlockerVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Weight)) + i -= len(m.DatasetUUID) + copy(dAtA[i:], m.DatasetUUID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DatasetUUID))) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Preference.Size())) - n165, err := m.Preference.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n165 - return i, nil + i -= len(m.DatasetName) + copy(dAtA[i:], m.DatasetName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DatasetName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *Probe) Marshal() (dAtA []byte, err error) { +func (m *GCEPersistentDiskVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Probe) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *GCEPersistentDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GCEPersistentDiskVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Handler.Size())) - n166, err := m.Handler.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i += n166 - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.InitialDelaySeconds)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TimeoutSeconds)) + i-- dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PeriodSeconds)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SuccessThreshold)) - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FailureThreshold)) - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.Partition)) + i-- + dAtA[i] = 0x18 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x12 + i -= len(m.PDName) + copy(dAtA[i:], m.PDName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PDName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ProjectedVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *GitRepoVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ProjectedVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *GitRepoVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GitRepoVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Sources) > 0 { - for _, msg := range m.Sources { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.DefaultMode != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) - } - return i, nil + i -= len(m.Directory) + copy(dAtA[i:], m.Directory) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Directory))) + i-- + dAtA[i] = 0x1a + i -= len(m.Revision) + copy(dAtA[i:], m.Revision) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Revision))) + i-- + dAtA[i] = 0x12 + i -= len(m.Repository) + copy(dAtA[i:], m.Repository) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Repository))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *QuobyteVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *GlusterfsPersistentVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *QuobyteVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *GlusterfsPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GlusterfsPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Registry))) - i += copy(dAtA[i:], m.Registry) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Volume))) - i += copy(dAtA[i:], m.Volume) - dAtA[i] = 0x18 - i++ + if m.EndpointsNamespace != nil { + i -= len(*m.EndpointsNamespace) + copy(dAtA[i:], *m.EndpointsNamespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.EndpointsNamespace))) + i-- + dAtA[i] = 0x22 + } + i-- if m.ReadOnly { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) - i += copy(dAtA[i:], m.User) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i += copy(dAtA[i:], m.Group) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Tenant))) - i += copy(dAtA[i:], m.Tenant) - return i, nil + i-- + dAtA[i] = 0x18 + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x12 + i -= len(m.EndpointsName) + copy(dAtA[i:], m.EndpointsName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EndpointsName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *RBDPersistentVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *GlusterfsVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *RBDPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *GlusterfsVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GlusterfsVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.CephMonitors) > 0 { - for _, s := range m.CephMonitors { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDImage))) - i += copy(dAtA[i:], m.RBDImage) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDPool))) - i += copy(dAtA[i:], m.RBDPool) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RadosUser))) - i += copy(dAtA[i:], m.RadosUser) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Keyring))) - i += copy(dAtA[i:], m.Keyring) - if m.SecretRef != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n167, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n167 - } - dAtA[i] = 0x40 - i++ + i-- if m.ReadOnly { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - return i, nil + i-- + dAtA[i] = 0x18 + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x12 + i -= len(m.EndpointsName) + copy(dAtA[i:], m.EndpointsName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EndpointsName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *RBDVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *HTTPGetAction) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *RBDVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *HTTPGetAction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HTTPGetAction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.CephMonitors) > 0 { - for _, s := range m.CephMonitors { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if len(m.HTTPHeaders) > 0 { + for iNdEx := len(m.HTTPHeaders) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.HTTPHeaders[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + dAtA[i] = 0x2a } } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDImage))) - i += copy(dAtA[i:], m.RBDImage) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) + i -= len(m.Scheme) + copy(dAtA[i:], m.Scheme) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scheme))) + i-- dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDPool))) - i += copy(dAtA[i:], m.RBDPool) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RadosUser))) - i += copy(dAtA[i:], m.RadosUser) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Keyring))) - i += copy(dAtA[i:], m.Keyring) - if m.SecretRef != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n168, err := m.SecretRef.MarshalTo(dAtA[i:]) + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0x1a + { + size, err := m.Port.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n168 - } - dAtA[i] = 0x40 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i++ - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *RangeAllocation) Marshal() (dAtA []byte, err error) { +func (m *HTTPHeader) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *RangeAllocation) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *HTTPHeader) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HTTPHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n169, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n169 + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Range))) - i += copy(dAtA[i:], m.Range) - if m.Data != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Data))) - i += copy(dAtA[i:], m.Data) - } - return i, nil + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ReplicationController) Marshal() (dAtA []byte, err error) { +func (m *Handler) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ReplicationController) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *Handler) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Handler) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n170, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.TCPSocket != nil { + { + size, err := m.TCPSocket.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - i += n170 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n171, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.HTTPGet != nil { + { + size, err := m.HTTPGet.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - i += n171 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n172, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Exec != nil { + { + size, err := m.Exec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - i += n172 - return i, nil + return len(dAtA) - i, nil } -func (m *ReplicationControllerCondition) Marshal() (dAtA []byte, err error) { +func (m *HostAlias) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ReplicationControllerCondition) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *HostAlias) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HostAlias) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n173, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Hostnames) > 0 { + for iNdEx := len(m.Hostnames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Hostnames[iNdEx]) + copy(dAtA[i:], m.Hostnames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostnames[iNdEx]))) + i-- + dAtA[i] = 0x12 + } } - i += n173 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i -= len(m.IP) + copy(dAtA[i:], m.IP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ReplicationControllerList) Marshal() (dAtA []byte, err error) { +func (m *HostPathVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ReplicationControllerList) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *HostPathVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HostPathVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n174, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n174 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + if m.Type != nil { + i -= len(*m.Type) + copy(dAtA[i:], *m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Type))) + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ReplicationControllerSpec) Marshal() (dAtA []byte, err error) { +func (m *ISCSIPersistentVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ReplicationControllerSpec) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ISCSIPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ISCSIPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + if m.InitiatorName != nil { + i -= len(*m.InitiatorName) + copy(dAtA[i:], *m.InitiatorName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.InitiatorName))) + i-- + dAtA[i] = 0x62 } - if len(m.Selector) > 0 { - keysForSelector := make([]string, 0, len(m.Selector)) - for k := range m.Selector { - keysForSelector = append(keysForSelector, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - for _, k := range keysForSelector { - dAtA[i] = 0x12 - i++ - v := m.Selector[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) + i-- + if m.SessionCHAPAuth { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x58 + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x52 } - if m.Template != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n175, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + if m.DiscoveryCHAPAuth { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + if len(m.Portals) > 0 { + for iNdEx := len(m.Portals) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Portals[iNdEx]) + copy(dAtA[i:], m.Portals[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Portals[iNdEx]))) + i-- + dAtA[i] = 0x3a } - i += n175 } - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - return i, nil + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x2a + i -= len(m.ISCSIInterface) + copy(dAtA[i:], m.ISCSIInterface) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ISCSIInterface))) + i-- + dAtA[i] = 0x22 + i = encodeVarintGenerated(dAtA, i, uint64(m.Lun)) + i-- + dAtA[i] = 0x18 + i -= len(m.IQN) + copy(dAtA[i:], m.IQN) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IQN))) + i-- + dAtA[i] = 0x12 + i -= len(m.TargetPortal) + copy(dAtA[i:], m.TargetPortal) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetPortal))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ReplicationControllerStatus) Marshal() (dAtA []byte, err error) { +func (m *ISCSIVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ReplicationControllerStatus) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ISCSIVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ISCSIVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.InitiatorName != nil { + i -= len(*m.InitiatorName) + copy(dAtA[i:], *m.InitiatorName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.InitiatorName))) + i-- + dAtA[i] = 0x62 + } + i-- + if m.SessionCHAPAuth { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x58 + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + i-- + if m.DiscoveryCHAPAuth { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + if len(m.Portals) > 0 { + for iNdEx := len(m.Portals) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Portals[iNdEx]) + copy(dAtA[i:], m.Portals[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Portals[iNdEx]))) + i-- + dAtA[i] = 0x3a } } - return i, nil + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x2a + i -= len(m.ISCSIInterface) + copy(dAtA[i:], m.ISCSIInterface) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ISCSIInterface))) + i-- + dAtA[i] = 0x22 + i = encodeVarintGenerated(dAtA, i, uint64(m.Lun)) + i-- + dAtA[i] = 0x18 + i -= len(m.IQN) + copy(dAtA[i:], m.IQN) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IQN))) + i-- + dAtA[i] = 0x12 + i -= len(m.TargetPortal) + copy(dAtA[i:], m.TargetPortal) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetPortal))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ResourceFieldSelector) Marshal() (dAtA []byte, err error) { +func (m *KeyToPath) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ResourceFieldSelector) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *KeyToPath) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *KeyToPath) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerName))) - i += copy(dAtA[i:], m.ContainerName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) - i += copy(dAtA[i:], m.Resource) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Divisor.Size())) - n176, err := m.Divisor.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Mode != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Mode)) + i-- + dAtA[i] = 0x18 } - i += n176 - return i, nil + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x12 + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ResourceQuota) Marshal() (dAtA []byte, err error) { +func (m *Lifecycle) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ResourceQuota) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n177, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n177 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n178, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n178 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n179, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n179 - return i, nil -} - -func (m *ResourceQuotaList) Marshal() (dAtA []byte, err error) { +func (m *Lifecycle) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResourceQuotaList) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *Lifecycle) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n180, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.PreStop != nil { + { + size, err := m.PreStop.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - i += n180 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.PostStart != nil { + { + size, err := m.PostStart.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } -func (m *ResourceQuotaSpec) Marshal() (dAtA []byte, err error) { +func (m *LimitRange) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ResourceQuotaSpec) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *LimitRange) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LimitRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Hard) > 0 { - keysForHard := make([]string, 0, len(m.Hard)) - for k := range m.Hard { - keysForHard = append(keysForHard, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForHard) - for _, k := range keysForHard { - dAtA[i] = 0xa - i++ - v := m.Hard[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n181, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n181 - } - } - if len(m.Scopes) > 0 { - for _, s := range m.Scopes { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.ScopeSelector != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ScopeSelector.Size())) - n182, err := m.ScopeSelector.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n182 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ResourceQuotaStatus) Marshal() (dAtA []byte, err error) { +func (m *LimitRangeItem) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ResourceQuotaStatus) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LimitRangeItem) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Hard) > 0 { - keysForHard := make([]string, 0, len(m.Hard)) - for k := range m.Hard { - keysForHard = append(keysForHard, string(k)) + if len(m.MaxLimitRequestRatio) > 0 { + keysForMaxLimitRequestRatio := make([]string, 0, len(m.MaxLimitRequestRatio)) + for k := range m.MaxLimitRequestRatio { + keysForMaxLimitRequestRatio = append(keysForMaxLimitRequestRatio, string(k)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForHard) - for _, k := range keysForHard { + github_com_gogo_protobuf_sortkeys.Strings(keysForMaxLimitRequestRatio) + for iNdEx := len(keysForMaxLimitRequestRatio) - 1; iNdEx >= 0; iNdEx-- { + v := m.MaxLimitRequestRatio[ResourceName(keysForMaxLimitRequestRatio[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForMaxLimitRequestRatio[iNdEx]) + copy(dAtA[i:], keysForMaxLimitRequestRatio[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMaxLimitRequestRatio[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - v := m.Hard[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x32 + } + } + if len(m.DefaultRequest) > 0 { + keysForDefaultRequest := make([]string, 0, len(m.DefaultRequest)) + for k := range m.DefaultRequest { + keysForDefaultRequest = append(keysForDefaultRequest, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDefaultRequest) + for iNdEx := len(keysForDefaultRequest) - 1; iNdEx >= 0; iNdEx-- { + v := m.DefaultRequest[ResourceName(keysForDefaultRequest[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForDefaultRequest[iNdEx]) + copy(dAtA[i:], keysForDefaultRequest[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForDefaultRequest[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + if len(m.Default) > 0 { + keysForDefault := make([]string, 0, len(m.Default)) + for k := range m.Default { + keysForDefault = append(keysForDefault, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDefault) + for iNdEx := len(keysForDefault) - 1; iNdEx >= 0; iNdEx-- { + v := m.Default[ResourceName(keysForDefault[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n183, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= len(keysForDefault[iNdEx]) + copy(dAtA[i:], keysForDefault[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForDefault[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Min) > 0 { + keysForMin := make([]string, 0, len(m.Min)) + for k := range m.Min { + keysForMin = append(keysForMin, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMin) + for iNdEx := len(keysForMin) - 1; iNdEx >= 0; iNdEx-- { + v := m.Min[ResourceName(keysForMin[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n183 + i-- + dAtA[i] = 0x12 + i -= len(keysForMin[iNdEx]) + copy(dAtA[i:], keysForMin[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMin[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a } } - if len(m.Used) > 0 { - keysForUsed := make([]string, 0, len(m.Used)) - for k := range m.Used { - keysForUsed = append(keysForUsed, string(k)) + if len(m.Max) > 0 { + keysForMax := make([]string, 0, len(m.Max)) + for k := range m.Max { + keysForMax = append(keysForMax, string(k)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForUsed) - for _, k := range keysForUsed { + github_com_gogo_protobuf_sortkeys.Strings(keysForMax) + for iNdEx := len(keysForMax) - 1; iNdEx >= 0; iNdEx-- { + v := m.Max[ResourceName(keysForMax[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - v := m.Used[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + i -= len(keysForMax[iNdEx]) + copy(dAtA[i:], keysForMax[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMax[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n184, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n184 } } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ResourceRequirements) Marshal() (dAtA []byte, err error) { +func (m *LimitRangeList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ResourceRequirements) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *LimitRangeList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LimitRangeList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Limits) > 0 { - keysForLimits := make([]string, 0, len(m.Limits)) - for k := range m.Limits { - keysForLimits = append(keysForLimits, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLimits) - for _, k := range keysForLimits { - dAtA[i] = 0xa - i++ - v := m.Limits[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n185, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n185 + i-- + dAtA[i] = 0x12 } } - if len(m.Requests) > 0 { - keysForRequests := make([]string, 0, len(m.Requests)) - for k := range m.Requests { - keysForRequests = append(keysForRequests, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForRequests) - for _, k := range keysForRequests { - dAtA[i] = 0x12 - i++ - v := m.Requests[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n186, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n186 + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *SELinuxOptions) Marshal() (dAtA []byte, err error) { +func (m *LimitRangeSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *SELinuxOptions) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *LimitRangeSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LimitRangeSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) - i += copy(dAtA[i:], m.User) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Role))) - i += copy(dAtA[i:], m.Role) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Level))) - i += copy(dAtA[i:], m.Level) - return i, nil + if len(m.Limits) > 0 { + for iNdEx := len(m.Limits) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Limits[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *ScaleIOPersistentVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *List) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ScaleIOPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *List) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *List) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Gateway))) - i += copy(dAtA[i:], m.Gateway) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.System))) - i += copy(dAtA[i:], m.System) - if m.SecretRef != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n187, err := m.SecretRef.MarshalTo(dAtA[i:]) + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n187 - } - dAtA[i] = 0x20 - i++ - if m.SSLEnabled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProtectionDomain))) - i += copy(dAtA[i:], m.ProtectionDomain) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePool))) - i += copy(dAtA[i:], m.StoragePool) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageMode))) - i += copy(dAtA[i:], m.StorageMode) - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) - i += copy(dAtA[i:], m.VolumeName) - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x50 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i++ - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ScaleIOVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *LoadBalancerIngress) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ScaleIOVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *LoadBalancerIngress) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LoadBalancerIngress) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Gateway))) - i += copy(dAtA[i:], m.Gateway) + i -= len(m.Hostname) + copy(dAtA[i:], m.Hostname) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.System))) - i += copy(dAtA[i:], m.System) - if m.SecretRef != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n188, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n188 - } - dAtA[i] = 0x20 - i++ - if m.SSLEnabled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProtectionDomain))) - i += copy(dAtA[i:], m.ProtectionDomain) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePool))) - i += copy(dAtA[i:], m.StoragePool) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageMode))) - i += copy(dAtA[i:], m.StorageMode) - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) - i += copy(dAtA[i:], m.VolumeName) - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x50 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - return i, nil + i -= len(m.IP) + copy(dAtA[i:], m.IP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ScopeSelector) Marshal() (dAtA []byte, err error) { +func (m *LoadBalancerStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ScopeSelector) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *LoadBalancerStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LoadBalancerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.MatchExpressions) > 0 { - for _, msg := range m.MatchExpressions { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Ingress) > 0 { + for iNdEx := len(m.Ingress) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ingress[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } -func (m *ScopedResourceSelectorRequirement) Marshal() (dAtA []byte, err error) { +func (m *LocalObjectReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ScopedResourceSelectorRequirement) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *LocalObjectReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LocalObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ScopeName))) - i += copy(dAtA[i:], m.ScopeName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) - i += copy(dAtA[i:], m.Operator) - if len(m.Values) > 0 { - for _, s := range m.Values { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil + return len(dAtA) - i, nil } -func (m *Secret) Marshal() (dAtA []byte, err error) { +func (m *LocalVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Secret) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *LocalVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LocalVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n189, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n189 - if len(m.Data) > 0 { - keysForData := make([]string, 0, len(m.Data)) - for k := range m.Data { - keysForData = append(keysForData, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForData) - for _, k := range keysForData { - dAtA[i] = 0x12 - i++ - v := m.Data[string(k)] - byteSize := 0 - if v != nil { - byteSize = 1 + len(v) + sovGenerated(uint64(len(v))) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + byteSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - if v != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if len(m.StringData) > 0 { - keysForStringData := make([]string, 0, len(m.StringData)) - for k := range m.StringData { - keysForStringData = append(keysForStringData, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForStringData) - for _, k := range keysForStringData { - dAtA[i] = 0x22 - i++ - v := m.StringData[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } + if m.FSType != nil { + i -= len(*m.FSType) + copy(dAtA[i:], *m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FSType))) + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *SecretEnvSource) Marshal() (dAtA []byte, err error) { +func (m *NFSVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *SecretEnvSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NFSVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NFSVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n190, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n190 - if m.Optional != nil { - dAtA[i] = 0x10 - i++ - if *m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return i, nil + i-- + dAtA[i] = 0x18 + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x12 + i -= len(m.Server) + copy(dAtA[i:], m.Server) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Server))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *SecretKeySelector) Marshal() (dAtA []byte, err error) { +func (m *Namespace) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *SecretKeySelector) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *Namespace) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Namespace) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n191, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n191 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - if m.Optional != nil { - dAtA[i] = 0x18 - i++ - if *m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i++ + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *SecretList) Marshal() (dAtA []byte, err error) { +func (m *NamespaceCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *SecretList) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NamespaceCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NamespaceCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n192, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n192 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x32 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x2a + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0x22 + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *SecretProjection) Marshal() (dAtA []byte, err error) { +func (m *NamespaceList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *SecretProjection) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NamespaceList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NamespaceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n193, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n193 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if m.Optional != nil { - dAtA[i] = 0x20 - i++ - if *m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i++ + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *SecretReference) Marshal() (dAtA []byte, err error) { +func (m *NamespaceSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *SecretReference) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NamespaceSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NamespaceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - return i, nil + if len(m.Finalizers) > 0 { + for iNdEx := len(m.Finalizers) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Finalizers[iNdEx]) + copy(dAtA[i:], m.Finalizers[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Finalizers[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *SecretVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *NamespaceStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *SecretVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NamespaceStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NamespaceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) - i += copy(dAtA[i:], m.SecretName) - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if m.DefaultMode != nil { - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) - } - if m.Optional != nil { - dAtA[i] = 0x20 - i++ - if *m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - return i, nil + i -= len(m.Phase) + copy(dAtA[i:], m.Phase) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *SecurityContext) Marshal() (dAtA []byte, err error) { +func (m *Node) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *SecurityContext) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *Node) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Node) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Capabilities != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Capabilities.Size())) - n194, err := m.Capabilities.MarshalTo(dAtA[i:]) + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n194 - } - if m.Privileged != nil { - dAtA[i] = 0x10 - i++ - if *m.Privileged { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.SELinuxOptions != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size())) - n195, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n195 - } - if m.RunAsUser != nil { - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsUser)) - } - if m.RunAsNonRoot != nil { - dAtA[i] = 0x28 - i++ - if *m.RunAsNonRoot { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.ReadOnlyRootFilesystem != nil { - dAtA[i] = 0x30 - i++ - if *m.ReadOnlyRootFilesystem { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.AllowPrivilegeEscalation != nil { - dAtA[i] = 0x38 - i++ - if *m.AllowPrivilegeEscalation { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.RunAsGroup != nil { - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsGroup)) - } - if m.ProcMount != nil { - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ProcMount))) - i += copy(dAtA[i:], *m.ProcMount) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.WindowsOptions != nil { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.WindowsOptions.Size())) - n196, err := m.WindowsOptions.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n196 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *SerializedReference) Marshal() (dAtA []byte, err error) { +func (m *NodeAddress) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *SerializedReference) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Reference.Size())) - n197, err := m.Reference.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n197 - return i, nil -} - -func (m *Service) Marshal() (dAtA []byte, err error) { +func (m *NodeAddress) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Service) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n198, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n198 + i -= len(m.Address) + copy(dAtA[i:], m.Address) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Address))) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n199, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n199 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n200, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n200 - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ServiceAccount) Marshal() (dAtA []byte, err error) { +func (m *NodeAffinity) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ServiceAccount) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeAffinity) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeAffinity) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n201, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n201 - if len(m.Secrets) > 0 { - for _, msg := range m.Secrets { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for iNdEx := len(m.PreferredDuringSchedulingIgnoredDuringExecution) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.PreferredDuringSchedulingIgnoredDuringExecution[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if len(m.ImagePullSecrets) > 0 { - for _, msg := range m.ImagePullSecrets { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.RequiredDuringSchedulingIgnoredDuringExecution != nil { + { + size, err := m.RequiredDuringSchedulingIgnoredDuringExecution.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n - } - } - if m.AutomountServiceAccountToken != nil { - dAtA[i] = 0x20 - i++ - if *m.AutomountServiceAccountToken { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i++ + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } -func (m *ServiceAccountList) Marshal() (dAtA []byte, err error) { +func (m *NodeCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ServiceAccountList) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n202, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x32 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x2a + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n202 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + i-- + dAtA[i] = 0x22 + { + size, err := m.LastHeartbeatTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ServiceAccountTokenProjection) Marshal() (dAtA []byte, err error) { +func (m *NodeConfigSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ServiceAccountTokenProjection) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audience))) - i += copy(dAtA[i:], m.Audience) - if m.ExpirationSeconds != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ExpirationSeconds)) - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - return i, nil -} - -func (m *ServiceList) Marshal() (dAtA []byte, err error) { +func (m *NodeConfigSource) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ServiceList) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeConfigSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n203, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n203 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.ConfigMap != nil { + { + size, err := m.ConfigMap.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - return i, nil -} - -func (m *ServicePort) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ServicePort) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol))) - i += copy(dAtA[i:], m.Protocol) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetPort.Size())) - n204, err := m.TargetPort.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n204 - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NodePort)) - return i, nil + return len(dAtA) - i, nil } -func (m *ServiceProxyOptions) Marshal() (dAtA []byte, err error) { +func (m *NodeConfigStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ServiceProxyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - return i, nil -} - -func (m *ServiceSpec) Marshal() (dAtA []byte, err error) { +func (m *NodeConfigStatus) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ServiceSpec) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeConfigStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Ports) > 0 { - for _, msg := range m.Ports { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) + i-- + dAtA[i] = 0x22 + if m.LastKnownGood != nil { + { + size, err := m.LastKnownGood.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n - } - } - if len(m.Selector) > 0 { - keysForSelector := make([]string, 0, len(m.Selector)) - for k := range m.Selector { - keysForSelector = append(keysForSelector, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - for _, k := range keysForSelector { - dAtA[i] = 0x12 - i++ - v := m.Selector[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1a } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClusterIP))) - i += copy(dAtA[i:], m.ClusterIP) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if len(m.ExternalIPs) > 0 { - for _, s := range m.ExternalIPs { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if m.Active != nil { + { + size, err := m.Active.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SessionAffinity))) - i += copy(dAtA[i:], m.SessionAffinity) - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.LoadBalancerIP))) - i += copy(dAtA[i:], m.LoadBalancerIP) - if len(m.LoadBalancerSourceRanges) > 0 { - for _, s := range m.LoadBalancerSourceRanges { - dAtA[i] = 0x4a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if m.Assigned != nil { + { + size, err := m.Assigned.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ExternalName))) - i += copy(dAtA[i:], m.ExternalName) - dAtA[i] = 0x5a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ExternalTrafficPolicy))) - i += copy(dAtA[i:], m.ExternalTrafficPolicy) - dAtA[i] = 0x60 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.HealthCheckNodePort)) - dAtA[i] = 0x68 - i++ - if m.PublishNotReadyAddresses { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.SessionAffinityConfig != nil { - dAtA[i] = 0x72 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SessionAffinityConfig.Size())) - n205, err := m.SessionAffinityConfig.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n205 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } -func (m *ServiceStatus) Marshal() (dAtA []byte, err error) { +func (m *NodeDaemonEndpoints) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ServiceStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LoadBalancer.Size())) - n206, err := m.LoadBalancer.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n206 - return i, nil -} - -func (m *SessionAffinityConfig) Marshal() (dAtA []byte, err error) { +func (m *NodeDaemonEndpoints) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *SessionAffinityConfig) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeDaemonEndpoints) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.ClientIP != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ClientIP.Size())) - n207, err := m.ClientIP.MarshalTo(dAtA[i:]) + { + size, err := m.KubeletEndpoint.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n207 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *StorageOSPersistentVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *NodeList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *StorageOSPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) - i += copy(dAtA[i:], m.VolumeName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeNamespace))) - i += copy(dAtA[i:], m.VolumeNamespace) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x20 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.SecretRef != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n208, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n208 - } - return i, nil -} - -func (m *StorageOSVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *NodeList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *StorageOSVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) - i += copy(dAtA[i:], m.VolumeName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeNamespace))) - i += copy(dAtA[i:], m.VolumeNamespace) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x20 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } } - i++ - if m.SecretRef != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n209, err := m.SecretRef.MarshalTo(dAtA[i:]) + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n209 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *Sysctl) Marshal() (dAtA []byte, err error) { +func (m *NodeProxyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Sysctl) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeProxyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeProxyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) - i += copy(dAtA[i:], m.Value) - return i, nil + return len(dAtA) - i, nil } -func (m *TCPSocketAction) Marshal() (dAtA []byte, err error) { +func (m *NodeResources) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *TCPSocketAction) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeResources) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeResources) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Port.Size())) - n210, err := m.Port.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Capacity) > 0 { + keysForCapacity := make([]string, 0, len(m.Capacity)) + for k := range m.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + for iNdEx := len(keysForCapacity) - 1; iNdEx >= 0; iNdEx-- { + v := m.Capacity[ResourceName(keysForCapacity[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForCapacity[iNdEx]) + copy(dAtA[i:], keysForCapacity[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCapacity[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } } - i += n210 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) - i += copy(dAtA[i:], m.Host) - return i, nil + return len(dAtA) - i, nil } -func (m *Taint) Marshal() (dAtA []byte, err error) { +func (m *NodeSelector) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Taint) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeSelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) - i += copy(dAtA[i:], m.Value) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Effect))) - i += copy(dAtA[i:], m.Effect) - if m.TimeAdded != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TimeAdded.Size())) - n211, err := m.TimeAdded.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.NodeSelectorTerms) > 0 { + for iNdEx := len(m.NodeSelectorTerms) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.NodeSelectorTerms[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - i += n211 } - return i, nil + return len(dAtA) - i, nil } -func (m *Toleration) Marshal() (dAtA []byte, err error) { +func (m *NodeSelectorRequirement) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Toleration) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeSelectorRequirement) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeSelectorRequirement) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) - i += copy(dAtA[i:], m.Operator) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) - i += copy(dAtA[i:], m.Value) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Effect))) - i += copy(dAtA[i:], m.Effect) - if m.TolerationSeconds != nil { - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.TolerationSeconds)) + if len(m.Values) > 0 { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Values[iNdEx]) + copy(dAtA[i:], m.Values[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Values[iNdEx]))) + i-- + dAtA[i] = 0x1a + } } - return i, nil + i -= len(m.Operator) + copy(dAtA[i:], m.Operator) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) + i-- + dAtA[i] = 0x12 + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *TopologySelectorLabelRequirement) Marshal() (dAtA []byte, err error) { +func (m *NodeSelectorTerm) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *TopologySelectorLabelRequirement) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeSelectorTerm) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeSelectorTerm) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - if len(m.Values) > 0 { - for _, s := range m.Values { + if len(m.MatchFields) > 0 { + for iNdEx := len(m.MatchFields) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchFields[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + } + } + if len(m.MatchExpressions) > 0 { + for iNdEx := len(m.MatchExpressions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchExpressions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } -func (m *TopologySelectorTerm) Marshal() (dAtA []byte, err error) { +func (m *NodeSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *TopologySelectorTerm) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.MatchLabelExpressions) > 0 { - for _, msg := range m.MatchLabelExpressions { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if len(m.PodCIDRs) > 0 { + for iNdEx := len(m.PodCIDRs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.PodCIDRs[iNdEx]) + copy(dAtA[i:], m.PodCIDRs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodCIDRs[iNdEx]))) + i-- + dAtA[i] = 0x3a + } + } + if m.ConfigSource != nil { + { + size, err := m.ConfigSource.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x32 } - return i, nil -} - -func (m *TypedLocalObjectReference) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err + if len(m.Taints) > 0 { + for iNdEx := len(m.Taints) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Taints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } } - return dAtA[:n], nil -} - -func (m *TypedLocalObjectReference) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.APIGroup != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.APIGroup))) - i += copy(dAtA[i:], *m.APIGroup) + i-- + if m.Unschedulable { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) + i-- + dAtA[i] = 0x20 + i -= len(m.ProviderID) + copy(dAtA[i:], m.ProviderID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProviderID))) + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - return i, nil + i -= len(m.DoNotUse_ExternalID) + copy(dAtA[i:], m.DoNotUse_ExternalID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DoNotUse_ExternalID))) + i-- + dAtA[i] = 0x12 + i -= len(m.PodCIDR) + copy(dAtA[i:], m.PodCIDR) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodCIDR))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *Volume) Marshal() (dAtA []byte, err error) { +func (m *NodeStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Volume) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.VolumeSource.Size())) - n212, err := m.VolumeSource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Config != nil { + { + size, err := m.Config.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + if len(m.VolumesAttached) > 0 { + for iNdEx := len(m.VolumesAttached) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumesAttached[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + } + if len(m.VolumesInUse) > 0 { + for iNdEx := len(m.VolumesInUse) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.VolumesInUse[iNdEx]) + copy(dAtA[i:], m.VolumesInUse[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumesInUse[iNdEx]))) + i-- + dAtA[i] = 0x4a + } + } + if len(m.Images) > 0 { + for iNdEx := len(m.Images) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Images[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + { + size, err := m.NodeInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n212 - return i, nil + i-- + dAtA[i] = 0x3a + { + size, err := m.DaemonEndpoints.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + if len(m.Addresses) > 0 { + for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Addresses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + i -= len(m.Phase) + copy(dAtA[i:], m.Phase) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i-- + dAtA[i] = 0x1a + if len(m.Allocatable) > 0 { + keysForAllocatable := make([]string, 0, len(m.Allocatable)) + for k := range m.Allocatable { + keysForAllocatable = append(keysForAllocatable, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatable) + for iNdEx := len(keysForAllocatable) - 1; iNdEx >= 0; iNdEx-- { + v := m.Allocatable[ResourceName(keysForAllocatable[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForAllocatable[iNdEx]) + copy(dAtA[i:], keysForAllocatable[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAllocatable[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Capacity) > 0 { + keysForCapacity := make([]string, 0, len(m.Capacity)) + for k := range m.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + for iNdEx := len(keysForCapacity) - 1; iNdEx >= 0; iNdEx-- { + v := m.Capacity[ResourceName(keysForCapacity[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForCapacity[iNdEx]) + copy(dAtA[i:], keysForCapacity[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCapacity[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *VolumeDevice) Marshal() (dAtA []byte, err error) { +func (m *NodeSystemInfo) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *VolumeDevice) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *NodeSystemInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeSystemInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) + i -= len(m.Architecture) + copy(dAtA[i:], m.Architecture) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Architecture))) + i-- + dAtA[i] = 0x52 + i -= len(m.OperatingSystem) + copy(dAtA[i:], m.OperatingSystem) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.OperatingSystem))) + i-- + dAtA[i] = 0x4a + i -= len(m.KubeProxyVersion) + copy(dAtA[i:], m.KubeProxyVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.KubeProxyVersion))) + i-- + dAtA[i] = 0x42 + i -= len(m.KubeletVersion) + copy(dAtA[i:], m.KubeletVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.KubeletVersion))) + i-- + dAtA[i] = 0x3a + i -= len(m.ContainerRuntimeVersion) + copy(dAtA[i:], m.ContainerRuntimeVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerRuntimeVersion))) + i-- + dAtA[i] = 0x32 + i -= len(m.OSImage) + copy(dAtA[i:], m.OSImage) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.OSImage))) + i-- + dAtA[i] = 0x2a + i -= len(m.KernelVersion) + copy(dAtA[i:], m.KernelVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.KernelVersion))) + i-- + dAtA[i] = 0x22 + i -= len(m.BootID) + copy(dAtA[i:], m.BootID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.BootID))) + i-- + dAtA[i] = 0x1a + i -= len(m.SystemUUID) + copy(dAtA[i:], m.SystemUUID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SystemUUID))) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DevicePath))) - i += copy(dAtA[i:], m.DevicePath) - return i, nil + i -= len(m.MachineID) + copy(dAtA[i:], m.MachineID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MachineID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *VolumeMount) Marshal() (dAtA []byte, err error) { +func (m *ObjectFieldSelector) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *VolumeMount) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ObjectFieldSelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ObjectFieldSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + i -= len(m.FieldPath) + copy(dAtA[i:], m.FieldPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldPath))) + i-- + dAtA[i] = 0x12 + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x10 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MountPath))) - i += copy(dAtA[i:], m.MountPath) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubPath))) - i += copy(dAtA[i:], m.SubPath) - if m.MountPropagation != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MountPropagation))) - i += copy(dAtA[i:], *m.MountPropagation) - } - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubPathExpr))) - i += copy(dAtA[i:], m.SubPathExpr) - return i, nil + return len(dAtA) - i, nil } -func (m *VolumeNodeAffinity) Marshal() (dAtA []byte, err error) { +func (m *ObjectReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *VolumeNodeAffinity) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *ObjectReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Required != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Required.Size())) - n213, err := m.Required.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n213 - } - return i, nil + i -= len(m.FieldPath) + copy(dAtA[i:], m.FieldPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldPath))) + i-- + dAtA[i] = 0x3a + i -= len(m.ResourceVersion) + copy(dAtA[i:], m.ResourceVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i-- + dAtA[i] = 0x32 + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0x2a + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x22 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x12 + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *VolumeProjection) Marshal() (dAtA []byte, err error) { +func (m *PersistentVolume) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *VolumeProjection) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *PersistentVolume) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PersistentVolume) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Secret != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Secret.Size())) - n214, err := m.Secret.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n214 - } - if m.DownwardAPI != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DownwardAPI.Size())) - n215, err := m.DownwardAPI.MarshalTo(dAtA[i:]) + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n215 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.ConfigMap != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMap.Size())) - n216, err := m.ConfigMap.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n216 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.ServiceAccountToken != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ServiceAccountToken.Size())) - n217, err := m.ServiceAccountToken.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n217 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *VolumeSource) Marshal() (dAtA []byte, err error) { +func (m *PersistentVolumeClaim) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *PersistentVolumeClaim) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PersistentVolumeClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.HostPath != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.HostPath.Size())) - n218, err := m.HostPath.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n218 - } - if m.EmptyDir != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.EmptyDir.Size())) - n219, err := m.EmptyDir.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n219 - } - if m.GCEPersistentDisk != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.GCEPersistentDisk.Size())) - n220, err := m.GCEPersistentDisk.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n220 - } - if m.AWSElasticBlockStore != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AWSElasticBlockStore.Size())) - n221, err := m.AWSElasticBlockStore.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n221 - } - if m.GitRepo != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.GitRepo.Size())) - n222, err := m.GitRepo.MarshalTo(dAtA[i:]) + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n222 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.Secret != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Secret.Size())) - n223, err := m.Secret.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n223 - } - if m.NFS != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NFS.Size())) - n224, err := m.NFS.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n224 - } - if m.ISCSI != nil { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ISCSI.Size())) - n225, err := m.ISCSI.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n225 - } - if m.Glusterfs != nil { - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Glusterfs.Size())) - n226, err := m.Glusterfs.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n226 - } - if m.PersistentVolumeClaim != nil { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PersistentVolumeClaim.Size())) - n227, err := m.PersistentVolumeClaim.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n227 - } - if m.RBD != nil { - dAtA[i] = 0x5a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RBD.Size())) - n228, err := m.RBD.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n228 - } - if m.FlexVolume != nil { - dAtA[i] = 0x62 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FlexVolume.Size())) - n229, err := m.FlexVolume.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n229 - } - if m.Cinder != nil { - dAtA[i] = 0x6a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Cinder.Size())) - n230, err := m.Cinder.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n230 - } - if m.CephFS != nil { - dAtA[i] = 0x72 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CephFS.Size())) - n231, err := m.CephFS.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n231 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.Flocker != nil { - dAtA[i] = 0x7a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Flocker.Size())) - n232, err := m.Flocker.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n232 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.DownwardAPI != nil { - dAtA[i] = 0x82 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DownwardAPI.Size())) - n233, err := m.DownwardAPI.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n233 + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PersistentVolumeClaimCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if m.FC != nil { - dAtA[i] = 0x8a - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FC.Size())) - n234, err := m.FC.MarshalTo(dAtA[i:]) + return dAtA[:n], nil +} + +func (m *PersistentVolumeClaimCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PersistentVolumeClaimCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x32 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x2a + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n234 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.AzureFile != nil { - dAtA[i] = 0x92 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AzureFile.Size())) - n235, err := m.AzureFile.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastProbeTime.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n235 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.ConfigMap != nil { - dAtA[i] = 0x9a - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMap.Size())) - n236, err := m.ConfigMap.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n236 + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PersistentVolumeClaimList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if m.VsphereVolume != nil { - dAtA[i] = 0xa2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.VsphereVolume.Size())) - n237, err := m.VsphereVolume.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + return dAtA[:n], nil +} + +func (m *PersistentVolumeClaimList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PersistentVolumeClaimList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - i += n237 } - if m.Quobyte != nil { - dAtA[i] = 0xaa - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Quobyte.Size())) - n238, err := m.Quobyte.MarshalTo(dAtA[i:]) + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n238 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.AzureDisk != nil { - dAtA[i] = 0xb2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AzureDisk.Size())) - n239, err := m.AzureDisk.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n239 + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PersistentVolumeClaimSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if m.PhotonPersistentDisk != nil { - dAtA[i] = 0xba - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PhotonPersistentDisk.Size())) - n240, err := m.PhotonPersistentDisk.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + return dAtA[:n], nil +} + +func (m *PersistentVolumeClaimSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PersistentVolumeClaimSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DataSource != nil { + { + size, err := m.DataSource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n240 + i-- + dAtA[i] = 0x3a } - if m.PortworxVolume != nil { - dAtA[i] = 0xc2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PortworxVolume.Size())) - n241, err := m.PortworxVolume.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n241 + if m.VolumeMode != nil { + i -= len(*m.VolumeMode) + copy(dAtA[i:], *m.VolumeMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeMode))) + i-- + dAtA[i] = 0x32 } - if m.ScaleIO != nil { - dAtA[i] = 0xca - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleIO.Size())) - n242, err := m.ScaleIO.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n242 + if m.StorageClassName != nil { + i -= len(*m.StorageClassName) + copy(dAtA[i:], *m.StorageClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.StorageClassName))) + i-- + dAtA[i] = 0x2a } - if m.Projected != nil { - dAtA[i] = 0xd2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Projected.Size())) - n243, err := m.Projected.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n243 + i-- + dAtA[i] = 0x22 } - if m.StorageOS != nil { - dAtA[i] = 0xda - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.StorageOS.Size())) - n244, err := m.StorageOS.MarshalTo(dAtA[i:]) + i -= len(m.VolumeName) + copy(dAtA[i:], m.VolumeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) + i-- + dAtA[i] = 0x1a + { + size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n244 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.CSI != nil { - dAtA[i] = 0xe2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CSI.Size())) - n245, err := m.CSI.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + dAtA[i] = 0x12 + if len(m.AccessModes) > 0 { + for iNdEx := len(m.AccessModes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AccessModes[iNdEx]) + copy(dAtA[i:], m.AccessModes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AccessModes[iNdEx]))) + i-- + dAtA[i] = 0xa } - i += n245 } - return i, nil + return len(dAtA) - i, nil } -func (m *VsphereVirtualDiskVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *PersistentVolumeClaimStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *VsphereVirtualDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *PersistentVolumeClaimStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PersistentVolumeClaimStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.Capacity) > 0 { + keysForCapacity := make([]string, 0, len(m.Capacity)) + for k := range m.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + for iNdEx := len(keysForCapacity) - 1; iNdEx >= 0; iNdEx-- { + v := m.Capacity[ResourceName(keysForCapacity[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForCapacity[iNdEx]) + copy(dAtA[i:], keysForCapacity[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCapacity[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.AccessModes) > 0 { + for iNdEx := len(m.AccessModes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AccessModes[iNdEx]) + copy(dAtA[i:], m.AccessModes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AccessModes[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Phase) + copy(dAtA[i:], m.Phase) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumePath))) - i += copy(dAtA[i:], m.VolumePath) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePolicyName))) - i += copy(dAtA[i:], m.StoragePolicyName) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePolicyID))) - i += copy(dAtA[i:], m.StoragePolicyID) - return i, nil + return len(dAtA) - i, nil } -func (m *WeightedPodAffinityTerm) Marshal() (dAtA []byte, err error) { +func (m *PersistentVolumeClaimVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *WeightedPodAffinityTerm) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *PersistentVolumeClaimVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PersistentVolumeClaimVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Weight)) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PodAffinityTerm.Size())) - n246, err := m.PodAffinityTerm.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i += n246 - return i, nil + i-- + dAtA[i] = 0x10 + i -= len(m.ClaimName) + copy(dAtA[i:], m.ClaimName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClaimName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *WindowsSecurityContextOptions) Marshal() (dAtA []byte, err error) { +func (m *PersistentVolumeList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *WindowsSecurityContextOptions) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *PersistentVolumeList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PersistentVolumeList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.GMSACredentialSpecName != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.GMSACredentialSpecName))) - i += copy(dAtA[i:], *m.GMSACredentialSpecName) + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } } - if m.GMSACredentialSpec != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.GMSACredentialSpec))) - i += copy(dAtA[i:], *m.GMSACredentialSpec) + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ +func (m *PersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - dAtA[offset] = uint8(v) - return offset + 1 + return dAtA[:n], nil } -func (m *AWSElasticBlockStoreVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.VolumeID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Partition)) - n += 2 - return n + +func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Affinity) Size() (n int) { +func (m *PersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.NodeAffinity != nil { - l = m.NodeAffinity.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.CSI != nil { + { + size, err := m.CSI.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 } - if m.PodAffinity != nil { - l = m.PodAffinity.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.StorageOS != nil { + { + size, err := m.StorageOS.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa } - if m.PodAntiAffinity != nil { - l = m.PodAntiAffinity.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.Local != nil { + { + size, err := m.Local.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 } - return n + if m.ScaleIO != nil { + { + size, err := m.ScaleIO.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + if m.PortworxVolume != nil { + { + size, err := m.PortworxVolume.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + if m.PhotonPersistentDisk != nil { + { + size, err := m.PhotonPersistentDisk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + if m.AzureDisk != nil { + { + size, err := m.AzureDisk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if m.Quobyte != nil { + { + size, err := m.Quobyte.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + } + if m.VsphereVolume != nil { + { + size, err := m.VsphereVolume.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + } + if m.AzureFile != nil { + { + size, err := m.AzureFile.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } + if m.FlexVolume != nil { + { + size, err := m.FlexVolume.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + if m.Flocker != nil { + { + size, err := m.Flocker.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + if m.FC != nil { + { + size, err := m.FC.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + if m.CephFS != nil { + { + size, err := m.CephFS.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if m.Cinder != nil { + { + size, err := m.Cinder.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if m.ISCSI != nil { + { + size, err := m.ISCSI.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if m.RBD != nil { + { + size, err := m.RBD.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.NFS != nil { + { + size, err := m.NFS.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.Glusterfs != nil { + { + size, err := m.Glusterfs.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.HostPath != nil { + { + size, err := m.HostPath.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.AWSElasticBlockStore != nil { + { + size, err := m.AWSElasticBlockStore.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.GCEPersistentDisk != nil { + { + size, err := m.GCEPersistentDisk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *AttachedVolume) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DevicePath) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *PersistentVolumeSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *AvoidPods) Size() (n int) { - var l int - _ = l - if len(m.PreferAvoidPods) > 0 { - for _, e := range m.PreferAvoidPods { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n +func (m *PersistentVolumeSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *AzureDiskVolumeSource) Size() (n int) { +func (m *PersistentVolumeSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.DiskName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DataDiskURI) - n += 1 + l + sovGenerated(uint64(l)) - if m.CachingMode != nil { - l = len(*m.CachingMode) - n += 1 + l + sovGenerated(uint64(l)) + if m.NodeAffinity != nil { + { + size, err := m.NodeAffinity.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a } - if m.FSType != nil { - l = len(*m.FSType) - n += 1 + l + sovGenerated(uint64(l)) + if m.VolumeMode != nil { + i -= len(*m.VolumeMode) + copy(dAtA[i:], *m.VolumeMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeMode))) + i-- + dAtA[i] = 0x42 } - if m.ReadOnly != nil { - n += 2 + if len(m.MountOptions) > 0 { + for iNdEx := len(m.MountOptions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.MountOptions[iNdEx]) + copy(dAtA[i:], m.MountOptions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MountOptions[iNdEx]))) + i-- + dAtA[i] = 0x3a + } } - if m.Kind != nil { - l = len(*m.Kind) - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.StorageClassName) + copy(dAtA[i:], m.StorageClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageClassName))) + i-- + dAtA[i] = 0x32 + i -= len(m.PersistentVolumeReclaimPolicy) + copy(dAtA[i:], m.PersistentVolumeReclaimPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PersistentVolumeReclaimPolicy))) + i-- + dAtA[i] = 0x2a + if m.ClaimRef != nil { + { + size, err := m.ClaimRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 } - return n + if len(m.AccessModes) > 0 { + for iNdEx := len(m.AccessModes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AccessModes[iNdEx]) + copy(dAtA[i:], m.AccessModes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AccessModes[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + { + size, err := m.PersistentVolumeSource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Capacity) > 0 { + keysForCapacity := make([]string, 0, len(m.Capacity)) + for k := range m.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + for iNdEx := len(keysForCapacity) - 1; iNdEx >= 0; iNdEx-- { + v := m.Capacity[ResourceName(keysForCapacity[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForCapacity[iNdEx]) + copy(dAtA[i:], keysForCapacity[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCapacity[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *AzureFilePersistentVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.SecretName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ShareName) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.SecretNamespace != nil { - l = len(*m.SecretNamespace) - n += 1 + l + sovGenerated(uint64(l)) +func (m *PersistentVolumeStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *AzureFileVolumeSource) Size() (n int) { +func (m *PersistentVolumeStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PersistentVolumeStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.SecretName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ShareName) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x1a + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x12 + i -= len(m.Phase) + copy(dAtA[i:], m.Phase) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *Binding) Size() (n int) { +func (m *PhotonPersistentDiskVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PhotonPersistentDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PhotonPersistentDiskVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Target.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x12 + i -= len(m.PdID) + copy(dAtA[i:], m.PdID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PdID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *CSIPersistentVolumeSource) Size() (n int) { +func (m *Pod) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Pod) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Pod) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Driver) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.VolumeHandle) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.VolumeAttributes) > 0 { - for k, v := range m.VolumeAttributes { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.ControllerPublishSecretRef != nil { - l = m.ControllerPublishSecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.NodeStageSecretRef != nil { - l = m.NodeStageSecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.NodePublishSecretRef != nil { - l = m.NodePublishSecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.ControllerExpandSecretRef != nil { - l = m.ControllerExpandSecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodAffinity) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *CSIVolumeSource) Size() (n int) { +func (m *PodAffinity) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodAffinity) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Driver) - n += 1 + l + sovGenerated(uint64(l)) - if m.ReadOnly != nil { - n += 2 - } - if m.FSType != nil { - l = len(*m.FSType) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for iNdEx := len(m.PreferredDuringSchedulingIgnoredDuringExecution) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.PreferredDuringSchedulingIgnoredDuringExecution[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } } - if len(m.VolumeAttributes) > 0 { - for k, v := range m.VolumeAttributes { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { + for iNdEx := len(m.RequiredDuringSchedulingIgnoredDuringExecution) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.RequiredDuringSchedulingIgnoredDuringExecution[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } } - if m.NodePublishSecretRef != nil { - l = m.NodePublishSecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + return len(dAtA) - i, nil +} + +func (m *PodAffinityTerm) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *Capabilities) Size() (n int) { +func (m *PodAffinityTerm) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodAffinityTerm) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Add) > 0 { - for _, s := range m.Add { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.TopologyKey) + copy(dAtA[i:], m.TopologyKey) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TopologyKey))) + i-- + dAtA[i] = 0x1a + if len(m.Namespaces) > 0 { + for iNdEx := len(m.Namespaces) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Namespaces[iNdEx]) + copy(dAtA[i:], m.Namespaces[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespaces[iNdEx]))) + i-- + dAtA[i] = 0x12 } } - if len(m.Drop) > 0 { - for _, s := range m.Drop { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if m.LabelSelector != nil { + { + size, err := m.LabelSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - return n + return len(dAtA) - i, nil } -func (m *CephFSPersistentVolumeSource) Size() (n int) { - var l int - _ = l - if len(m.Monitors) > 0 { - for _, s := range m.Monitors { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.User) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.SecretFile) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) +func (m *PodAntiAffinity) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - n += 2 - return n + return dAtA[:n], nil } -func (m *CephFSVolumeSource) Size() (n int) { +func (m *PodAntiAffinity) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodAntiAffinity) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Monitors) > 0 { - for _, s := range m.Monitors { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for iNdEx := len(m.PreferredDuringSchedulingIgnoredDuringExecution) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.PreferredDuringSchedulingIgnoredDuringExecution[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } } - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.User) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.SecretFile) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { + for iNdEx := len(m.RequiredDuringSchedulingIgnoredDuringExecution) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.RequiredDuringSchedulingIgnoredDuringExecution[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } } - n += 2 - return n + return len(dAtA) - i, nil } -func (m *CinderPersistentVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.VolumeID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) +func (m *PodAttachOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *CinderVolumeSource) Size() (n int) { +func (m *PodAttachOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodAttachOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.VolumeID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.Container) + copy(dAtA[i:], m.Container) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i-- + dAtA[i] = 0x2a + i-- + if m.TTY { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return n + i-- + dAtA[i] = 0x20 + i-- + if m.Stderr { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i-- + if m.Stdout { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i-- + if m.Stdin { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } -func (m *ClientIPConfig) Size() (n int) { - var l int - _ = l - if m.TimeoutSeconds != nil { - n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) +func (m *PodCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *ComponentCondition) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Error) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *PodCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ComponentStatus) Size() (n int) { +func (m *PodCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x32 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x2a + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0x22 + { + size, err := m.LastProbeTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ComponentStatusList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *PodDNSConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *ConfigMap) Size() (n int) { +func (m *PodDNSConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodDNSConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Data) > 0 { - for k, v := range m.Data { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + if len(m.Options) > 0 { + for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Options[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } } - if len(m.BinaryData) > 0 { - for k, v := range m.BinaryData { - _ = k - _ = v - l = 0 - if v != nil { - l = 1 + len(v) + sovGenerated(uint64(len(v))) - } - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + l - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + if len(m.Searches) > 0 { + for iNdEx := len(m.Searches) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Searches[iNdEx]) + copy(dAtA[i:], m.Searches[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Searches[iNdEx]))) + i-- + dAtA[i] = 0x12 } } - return n + if len(m.Nameservers) > 0 { + for iNdEx := len(m.Nameservers) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Nameservers[iNdEx]) + copy(dAtA[i:], m.Nameservers[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Nameservers[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *ConfigMapEnvSource) Size() (n int) { - var l int - _ = l - l = m.LocalObjectReference.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Optional != nil { - n += 2 +func (m *PodDNSConfigOption) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *ConfigMapKeySelector) Size() (n int) { +func (m *PodDNSConfigOption) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodDNSConfigOption) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.LocalObjectReference.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - if m.Optional != nil { - n += 2 + if m.Value != nil { + i -= len(*m.Value) + copy(dAtA[i:], *m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Value))) + i-- + dAtA[i] = 0x12 } - return n + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ConfigMapList) Size() (n int) { +func (m *PodExecOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodExecOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodExecOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Command) > 0 { + for iNdEx := len(m.Command) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Command[iNdEx]) + copy(dAtA[i:], m.Command[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Command[iNdEx]))) + i-- + dAtA[i] = 0x32 } } - return n + i -= len(m.Container) + copy(dAtA[i:], m.Container) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i-- + dAtA[i] = 0x2a + i-- + if m.TTY { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + i-- + if m.Stderr { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i-- + if m.Stdout { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i-- + if m.Stdin { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } -func (m *ConfigMapNodeConfigSource) Size() (n int) { - var l int - _ = l - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.UID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ResourceVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.KubeletConfigKey) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *PodIP) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *ConfigMapProjection) Size() (n int) { +func (m *PodIP) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodIP) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.LocalObjectReference.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.Optional != nil { - n += 2 + i -= len(m.IP) + copy(dAtA[i:], m.IP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *ConfigMapVolumeSource) Size() (n int) { +func (m *PodList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.LocalObjectReference.Size() - n += 1 + l + sovGenerated(uint64(l)) if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } } - if m.DefaultMode != nil { - n += 1 + sovGenerated(uint64(*m.DefaultMode)) + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.Optional != nil { - n += 2 + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodLogOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *Container) Size() (n int) { +func (m *PodLogOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodLogOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Image) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Command) > 0 { - for _, s := range m.Command { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Args) > 0 { - for _, s := range m.Args { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + if m.LimitBytes != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LimitBytes)) + i-- + dAtA[i] = 0x40 } - l = len(m.WorkingDir) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ports) > 0 { - for _, e := range m.Ports { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + if m.TailLines != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TailLines)) + i-- + dAtA[i] = 0x38 } - if len(m.Env) > 0 { - for _, e := range m.Env { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + i-- + if m.Timestamps { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - l = m.Resources.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.VolumeMounts) > 0 { - for _, e := range m.VolumeMounts { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x30 + if m.SinceTime != nil { + { + size, err := m.SinceTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x2a } - if m.LivenessProbe != nil { - l = m.LivenessProbe.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ReadinessProbe != nil { - l = m.ReadinessProbe.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Lifecycle != nil { - l = m.Lifecycle.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.SinceSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.SinceSeconds)) + i-- + dAtA[i] = 0x20 } - l = len(m.TerminationMessagePath) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ImagePullPolicy) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecurityContext != nil { - l = m.SecurityContext.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + if m.Previous { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - n += 3 - n += 3 - n += 3 - if len(m.EnvFrom) > 0 { - for _, e := range m.EnvFrom { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } + i-- + dAtA[i] = 0x18 + i-- + if m.Follow { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - l = len(m.TerminationMessagePolicy) - n += 2 + l + sovGenerated(uint64(l)) - if len(m.VolumeDevices) > 0 { - for _, e := range m.VolumeDevices { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } + i-- + dAtA[i] = 0x10 + i -= len(m.Container) + copy(dAtA[i:], m.Container) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodPortForwardOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *ContainerImage) Size() (n int) { +func (m *PodPortForwardOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodPortForwardOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Names) > 0 { - for _, s := range m.Names { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + i = encodeVarintGenerated(dAtA, i, uint64(m.Ports[iNdEx])) + i-- + dAtA[i] = 0x8 } } - n += 1 + sovGenerated(uint64(m.SizeBytes)) - return n + return len(dAtA) - i, nil } -func (m *ContainerPort) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.HostPort)) - n += 1 + sovGenerated(uint64(m.ContainerPort)) - l = len(m.Protocol) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.HostIP) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ContainerState) Size() (n int) { - var l int - _ = l - if m.Waiting != nil { - l = m.Waiting.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Running != nil { - l = m.Running.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Terminated != nil { - l = m.Terminated.Size() - n += 1 + l + sovGenerated(uint64(l)) +func (m *PodProxyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *ContainerStateRunning) Size() (n int) { - var l int - _ = l - l = m.StartedAt.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *PodProxyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ContainerStateTerminated) Size() (n int) { +func (m *PodProxyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - n += 1 + sovGenerated(uint64(m.ExitCode)) - n += 1 + sovGenerated(uint64(m.Signal)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - l = m.StartedAt.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.FinishedAt.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ContainerID) - n += 1 + l + sovGenerated(uint64(l)) - return n + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ContainerStateWaiting) Size() (n int) { - var l int - _ = l - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *PodReadinessGate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *ContainerStatus) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = m.State.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTerminationState.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - n += 1 + sovGenerated(uint64(m.RestartCount)) - l = len(m.Image) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ImageID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ContainerID) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *PodReadinessGate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *DaemonEndpoint) Size() (n int) { +func (m *PodReadinessGate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - n += 1 + sovGenerated(uint64(m.Port)) - return n + i -= len(m.ConditionType) + copy(dAtA[i:], m.ConditionType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConditionType))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *DownwardAPIProjection) Size() (n int) { - var l int - _ = l - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *PodSecurityContext) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *DownwardAPIVolumeFile) Size() (n int) { - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - if m.FieldRef != nil { - l = m.FieldRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ResourceFieldRef != nil { - l = m.ResourceFieldRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Mode != nil { - n += 1 + sovGenerated(uint64(*m.Mode)) - } - return n +func (m *PodSecurityContext) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *DownwardAPIVolumeSource) Size() (n int) { +func (m *PodSecurityContext) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.WindowsOptions != nil { + { + size, err := m.WindowsOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x42 } - if m.DefaultMode != nil { - n += 1 + sovGenerated(uint64(*m.DefaultMode)) - } - return n -} - -func (m *EmptyDirVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.Medium) - n += 1 + l + sovGenerated(uint64(l)) - if m.SizeLimit != nil { - l = m.SizeLimit.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Sysctls) > 0 { + for iNdEx := len(m.Sysctls) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Sysctls[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } } - return n -} - -func (m *EndpointAddress) Size() (n int) { - var l int - _ = l - l = len(m.IP) - n += 1 + l + sovGenerated(uint64(l)) - if m.TargetRef != nil { - l = m.TargetRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.RunAsGroup != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsGroup)) + i-- + dAtA[i] = 0x30 } - l = len(m.Hostname) - n += 1 + l + sovGenerated(uint64(l)) - if m.NodeName != nil { - l = len(*m.NodeName) - n += 1 + l + sovGenerated(uint64(l)) + if m.FSGroup != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.FSGroup)) + i-- + dAtA[i] = 0x28 } - return n -} - -func (m *EndpointPort) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Port)) - l = len(m.Protocol) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *EndpointSubset) Size() (n int) { - var l int - _ = l - if len(m.Addresses) > 0 { - for _, e := range m.Addresses { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.SupplementalGroups) > 0 { + for iNdEx := len(m.SupplementalGroups) - 1; iNdEx >= 0; iNdEx-- { + i = encodeVarintGenerated(dAtA, i, uint64(m.SupplementalGroups[iNdEx])) + i-- + dAtA[i] = 0x20 } } - if len(m.NotReadyAddresses) > 0 { - for _, e := range m.NotReadyAddresses { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.RunAsNonRoot != nil { + i-- + if *m.RunAsNonRoot { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x18 } - if len(m.Ports) > 0 { - for _, e := range m.Ports { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + if m.RunAsUser != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsUser)) + i-- + dAtA[i] = 0x10 } - return n -} - -func (m *Endpoints) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Subsets) > 0 { - for _, e := range m.Subsets { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.SELinuxOptions != nil { + { + size, err := m.SELinuxOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - return n + return len(dAtA) - i, nil } -func (m *EndpointsList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *PodSignature) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *EnvFromSource) Size() (n int) { - var l int - _ = l - l = len(m.Prefix) - n += 1 + l + sovGenerated(uint64(l)) - if m.ConfigMapRef != nil { - l = m.ConfigMapRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n +func (m *PodSignature) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *EnvVar) Size() (n int) { +func (m *PodSignature) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Value) - n += 1 + l + sovGenerated(uint64(l)) - if m.ValueFrom != nil { - l = m.ValueFrom.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.PodController != nil { + { + size, err := m.PodController.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - return n + return len(dAtA) - i, nil } -func (m *EnvVarSource) Size() (n int) { - var l int - _ = l - if m.FieldRef != nil { - l = m.FieldRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ResourceFieldRef != nil { - l = m.ResourceFieldRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ConfigMapKeyRef != nil { - l = m.ConfigMapKeyRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.SecretKeyRef != nil { - l = m.SecretKeyRef.Size() - n += 1 + l + sovGenerated(uint64(l)) +func (m *PodSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *Event) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.InvolvedObject.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Source.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.FirstTimestamp.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTimestamp.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Count)) - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = m.EventTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Series != nil { - l = m.Series.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.Action) - n += 1 + l + sovGenerated(uint64(l)) - if m.Related != nil { - l = m.Related.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.ReportingController) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ReportingInstance) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *PodSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *EventList) Size() (n int) { +func (m *PodSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.EphemeralContainers) > 0 { + for iNdEx := len(m.EphemeralContainers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.EphemeralContainers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x92 } } - return n -} - -func (m *EventSeries) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Count)) - l = m.LastObservedTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.State) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *EventSource) Size() (n int) { - var l int - _ = l - l = len(m.Component) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Host) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ExecAction) Size() (n int) { - var l int - _ = l - if len(m.Command) > 0 { - for _, s := range m.Command { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.TopologySpreadConstraints) > 0 { + for iNdEx := len(m.TopologySpreadConstraints) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TopologySpreadConstraints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x8a } } - return n -} - -func (m *FCVolumeSource) Size() (n int) { - var l int - _ = l - if len(m.TargetWWNs) > 0 { - for _, s := range m.TargetWWNs { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Overhead) > 0 { + keysForOverhead := make([]string, 0, len(m.Overhead)) + for k := range m.Overhead { + keysForOverhead = append(keysForOverhead, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForOverhead) + for iNdEx := len(keysForOverhead) - 1; iNdEx >= 0; iNdEx-- { + v := m.Overhead[ResourceName(keysForOverhead[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForOverhead[iNdEx]) + copy(dAtA[i:], keysForOverhead[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForOverhead[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x82 } } - if m.Lun != nil { - n += 1 + sovGenerated(uint64(*m.Lun)) + if m.PreemptionPolicy != nil { + i -= len(*m.PreemptionPolicy) + copy(dAtA[i:], *m.PreemptionPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PreemptionPolicy))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xfa } - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if len(m.WWIDs) > 0 { - for _, s := range m.WWIDs { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if m.EnableServiceLinks != nil { + i-- + if *m.EnableServiceLinks { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf0 } - return n -} - -func (m *FlexPersistentVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.Driver) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.RuntimeClassName != nil { + i -= len(*m.RuntimeClassName) + copy(dAtA[i:], *m.RuntimeClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.RuntimeClassName))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xea } - n += 2 - if len(m.Options) > 0 { - for k, v := range m.Options { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + if len(m.ReadinessGates) > 0 { + for iNdEx := len(m.ReadinessGates) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ReadinessGates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe2 } } - return n -} - -func (m *FlexVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.Driver) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.ShareProcessNamespace != nil { + i-- + if *m.ShareProcessNamespace { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd8 } - n += 2 - if len(m.Options) > 0 { - for k, v := range m.Options { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + if m.DNSConfig != nil { + { + size, err := m.DNSConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd2 } - return n -} - -func (m *FlockerVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.DatasetName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DatasetUUID) - n += 1 + l + sovGenerated(uint64(l)) - return n + if m.Priority != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Priority)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc8 + } + i -= len(m.PriorityClassName) + copy(dAtA[i:], m.PriorityClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PriorityClassName))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + if len(m.HostAliases) > 0 { + for iNdEx := len(m.HostAliases) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.HostAliases[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } + } + if len(m.Tolerations) > 0 { + for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 + } + } + if m.AutomountServiceAccountToken != nil { + i-- + if *m.AutomountServiceAccountToken { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa8 + } + if len(m.InitContainers) > 0 { + for iNdEx := len(m.InitContainers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.InitContainers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + } + i -= len(m.SchedulerName) + copy(dAtA[i:], m.SchedulerName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SchedulerName))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + if m.Affinity != nil { + { + size, err := m.Affinity.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + i -= len(m.Subdomain) + copy(dAtA[i:], m.Subdomain) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subdomain))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + i -= len(m.Hostname) + copy(dAtA[i:], m.Hostname) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + if len(m.ImagePullSecrets) > 0 { + for iNdEx := len(m.ImagePullSecrets) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ImagePullSecrets[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + } + } + if m.SecurityContext != nil { + { + size, err := m.SecurityContext.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + } + i-- + if m.HostIPC { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x68 + i-- + if m.HostPID { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x60 + i-- + if m.HostNetwork { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x58 + i -= len(m.NodeName) + copy(dAtA[i:], m.NodeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) + i-- + dAtA[i] = 0x52 + i -= len(m.DeprecatedServiceAccount) + copy(dAtA[i:], m.DeprecatedServiceAccount) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeprecatedServiceAccount))) + i-- + dAtA[i] = 0x4a + i -= len(m.ServiceAccountName) + copy(dAtA[i:], m.ServiceAccountName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccountName))) + i-- + dAtA[i] = 0x42 + if len(m.NodeSelector) > 0 { + keysForNodeSelector := make([]string, 0, len(m.NodeSelector)) + for k := range m.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + for iNdEx := len(keysForNodeSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.NodeSelector[string(keysForNodeSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForNodeSelector[iNdEx]) + copy(dAtA[i:], keysForNodeSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForNodeSelector[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x3a + } + } + i -= len(m.DNSPolicy) + copy(dAtA[i:], m.DNSPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DNSPolicy))) + i-- + dAtA[i] = 0x32 + if m.ActiveDeadlineSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ActiveDeadlineSeconds)) + i-- + dAtA[i] = 0x28 + } + if m.TerminationGracePeriodSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TerminationGracePeriodSeconds)) + i-- + dAtA[i] = 0x20 + } + i -= len(m.RestartPolicy) + copy(dAtA[i:], m.RestartPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RestartPolicy))) + i-- + dAtA[i] = 0x1a + if len(m.Containers) > 0 { + for iNdEx := len(m.Containers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Containers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Volumes) > 0 { + for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Volumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *GCEPersistentDiskVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.PDName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Partition)) - n += 2 - return n +func (m *PodStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *GitRepoVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.Repository) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Revision) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Directory) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *PodStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *GlusterfsPersistentVolumeSource) Size() (n int) { +func (m *PodStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.EndpointsName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.EndpointsNamespace != nil { - l = len(*m.EndpointsNamespace) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.EphemeralContainerStatuses) > 0 { + for iNdEx := len(m.EphemeralContainerStatuses) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.EphemeralContainerStatuses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } } - return n -} - -func (m *GlusterfsVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.EndpointsName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n + if len(m.PodIPs) > 0 { + for iNdEx := len(m.PodIPs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.PodIPs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + } + i -= len(m.NominatedNodeName) + copy(dAtA[i:], m.NominatedNodeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NominatedNodeName))) + i-- + dAtA[i] = 0x5a + if len(m.InitContainerStatuses) > 0 { + for iNdEx := len(m.InitContainerStatuses) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.InitContainerStatuses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + } + i -= len(m.QOSClass) + copy(dAtA[i:], m.QOSClass) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.QOSClass))) + i-- + dAtA[i] = 0x4a + if len(m.ContainerStatuses) > 0 { + for iNdEx := len(m.ContainerStatuses) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ContainerStatuses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if m.StartTime != nil { + { + size, err := m.StartTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + i -= len(m.PodIP) + copy(dAtA[i:], m.PodIP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodIP))) + i-- + dAtA[i] = 0x32 + i -= len(m.HostIP) + copy(dAtA[i:], m.HostIP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.HostIP))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x1a + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Phase) + copy(dAtA[i:], m.Phase) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *HTTPGetAction) Size() (n int) { - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Port.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Host) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Scheme) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.HTTPHeaders) > 0 { - for _, e := range m.HTTPHeaders { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *PodStatusResult) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *HTTPHeader) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Value) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *PodStatusResult) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Handler) Size() (n int) { +func (m *PodStatusResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Exec != nil { - l = m.Exec.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.HTTPGet != nil { - l = m.HTTPGet.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.TCPSocket != nil { - l = m.TCPSocket.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodTemplate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *HostAlias) Size() (n int) { +func (m *PodTemplate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.IP) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Hostnames) > 0 { - for _, s := range m.Hostnames { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *HostPathVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - if m.Type != nil { - l = len(*m.Type) - n += 1 + l + sovGenerated(uint64(l)) +func (m *PodTemplateList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *ISCSIPersistentVolumeSource) Size() (n int) { +func (m *PodTemplateList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodTemplateList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.TargetPortal) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.IQN) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Lun)) - l = len(m.ISCSIInterface) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if len(m.Portals) > 0 { - for _, s := range m.Portals { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } } - n += 2 - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - n += 2 - if m.InitiatorName != nil { - l = len(*m.InitiatorName) - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodTemplateSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *ISCSIVolumeSource) Size() (n int) { +func (m *PodTemplateSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodTemplateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.TargetPortal) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.IQN) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Lun)) - l = len(m.ISCSIInterface) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if len(m.Portals) > 0 { - for _, s := range m.Portals { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - n += 2 - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - n += 2 - if m.InitiatorName != nil { - l = len(*m.InitiatorName) - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PortworxVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *KeyToPath) Size() (n int) { +func (m *PortworxVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PortworxVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - if m.Mode != nil { - n += 1 + sovGenerated(uint64(*m.Mode)) + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return n + i-- + dAtA[i] = 0x18 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x12 + i -= len(m.VolumeID) + copy(dAtA[i:], m.VolumeID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *Lifecycle) Size() (n int) { +func (m *Preconditions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Preconditions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Preconditions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.PostStart != nil { - l = m.PostStart.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.UID != nil { + i -= len(*m.UID) + copy(dAtA[i:], *m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.UID))) + i-- + dAtA[i] = 0xa } - if m.PreStop != nil { - l = m.PreStop.Size() - n += 1 + l + sovGenerated(uint64(l)) + return len(dAtA) - i, nil +} + +func (m *PreferAvoidPodsEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *LimitRange) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *PreferAvoidPodsEntry) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *LimitRangeItem) Size() (n int) { +func (m *PreferAvoidPodsEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Max) > 0 { - for k, v := range m.Max { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.Min) > 0 { - for k, v := range m.Min { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.Default) > 0 { - for k, v := range m.Default { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.DefaultRequest) > 0 { - for k, v := range m.DefaultRequest { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x22 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x1a + { + size, err := m.EvictionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if len(m.MaxLimitRequestRatio) > 0 { - for k, v := range m.MaxLimitRequestRatio { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + i-- + dAtA[i] = 0x12 + { + size, err := m.PodSignature.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *LimitRangeList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *PreferredSchedulingTerm) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *LimitRangeSpec) Size() (n int) { +func (m *PreferredSchedulingTerm) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PreferredSchedulingTerm) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Limits) > 0 { - for _, e := range m.Limits { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.Preference.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(m.Weight)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } -func (m *List) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *Probe) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *LoadBalancerIngress) Size() (n int) { - var l int - _ = l - l = len(m.IP) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Hostname) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *Probe) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *LoadBalancerStatus) Size() (n int) { +func (m *Probe) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Ingress) > 0 { - for _, e := range m.Ingress { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + i = encodeVarintGenerated(dAtA, i, uint64(m.FailureThreshold)) + i-- + dAtA[i] = 0x30 + i = encodeVarintGenerated(dAtA, i, uint64(m.SuccessThreshold)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.PeriodSeconds)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.TimeoutSeconds)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.InitialDelaySeconds)) + i-- + dAtA[i] = 0x10 + { + size, err := m.Handler.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *LocalObjectReference) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ProjectedVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *LocalVolumeSource) Size() (n int) { +func (m *ProjectedVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProjectedVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - if m.FSType != nil { - l = len(*m.FSType) - n += 1 + l + sovGenerated(uint64(l)) + if m.DefaultMode != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) + i-- + dAtA[i] = 0x10 } - return n + if len(m.Sources) > 0 { + for iNdEx := len(m.Sources) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Sources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *NFSVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.Server) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n +func (m *QuobyteVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *Namespace) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *QuobyteVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *NamespaceList) Size() (n int) { +func (m *QuobyteVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + i -= len(m.Tenant) + copy(dAtA[i:], m.Tenant) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Tenant))) + i-- + dAtA[i] = 0x32 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0x2a + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0x22 + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return n + i-- + dAtA[i] = 0x18 + i -= len(m.Volume) + copy(dAtA[i:], m.Volume) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Volume))) + i-- + dAtA[i] = 0x12 + i -= len(m.Registry) + copy(dAtA[i:], m.Registry) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Registry))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NamespaceSpec) Size() (n int) { - var l int - _ = l - if len(m.Finalizers) > 0 { - for _, s := range m.Finalizers { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *RBDPersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *NamespaceStatus) Size() (n int) { - var l int - _ = l - l = len(m.Phase) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *RBDPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Node) Size() (n int) { +func (m *RBDPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + i -= len(m.Keyring) + copy(dAtA[i:], m.Keyring) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Keyring))) + i-- + dAtA[i] = 0x32 + i -= len(m.RadosUser) + copy(dAtA[i:], m.RadosUser) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RadosUser))) + i-- + dAtA[i] = 0x2a + i -= len(m.RBDPool) + copy(dAtA[i:], m.RBDPool) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDPool))) + i-- + dAtA[i] = 0x22 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x1a + i -= len(m.RBDImage) + copy(dAtA[i:], m.RBDImage) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDImage))) + i-- + dAtA[i] = 0x12 + if len(m.CephMonitors) > 0 { + for iNdEx := len(m.CephMonitors) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.CephMonitors[iNdEx]) + copy(dAtA[i:], m.CephMonitors[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CephMonitors[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *NodeAddress) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Address) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *RBDVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *NodeAffinity) Size() (n int) { +func (m *RBDVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RBDVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.RequiredDuringSchedulingIgnoredDuringExecution != nil { - l = m.RequiredDuringSchedulingIgnoredDuringExecution.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x40 + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x3a } - return n + i -= len(m.Keyring) + copy(dAtA[i:], m.Keyring) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Keyring))) + i-- + dAtA[i] = 0x32 + i -= len(m.RadosUser) + copy(dAtA[i:], m.RadosUser) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RadosUser))) + i-- + dAtA[i] = 0x2a + i -= len(m.RBDPool) + copy(dAtA[i:], m.RBDPool) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDPool))) + i-- + dAtA[i] = 0x22 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x1a + i -= len(m.RBDImage) + copy(dAtA[i:], m.RBDImage) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDImage))) + i-- + dAtA[i] = 0x12 + if len(m.CephMonitors) > 0 { + for iNdEx := len(m.CephMonitors) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.CephMonitors[iNdEx]) + copy(dAtA[i:], m.CephMonitors[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CephMonitors[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *NodeCondition) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastHeartbeatTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *RangeAllocation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *NodeConfigSource) Size() (n int) { - var l int - _ = l - if m.ConfigMap != nil { - l = m.ConfigMap.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n +func (m *RangeAllocation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *NodeConfigStatus) Size() (n int) { +func (m *RangeAllocation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Assigned != nil { - l = m.Assigned.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.Data != nil { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x1a } - if m.Active != nil { - l = m.Active.Size() - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.Range) + copy(dAtA[i:], m.Range) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Range))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.LastKnownGood != nil { - l = m.LastKnownGood.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ReplicationController) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - l = len(m.Error) - n += 1 + l + sovGenerated(uint64(l)) - return n + return dAtA[:n], nil } -func (m *NodeDaemonEndpoints) Size() (n int) { - var l int - _ = l - l = m.KubeletEndpoint.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ReplicationController) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *NodeList) Size() (n int) { +func (m *ReplicationController) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NodeProxyOptions) Size() (n int) { - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ReplicationControllerCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *NodeResources) Size() (n int) { - var l int - _ = l - if len(m.Capacity) > 0 { - for k, v := range m.Capacity { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - return n +func (m *ReplicationControllerCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *NodeSelector) Size() (n int) { +func (m *ReplicationControllerCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.NodeSelectorTerms) > 0 { - for _, e := range m.NodeSelectorTerms { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NodeSelectorRequirement) Size() (n int) { - var l int - _ = l - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Operator) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Values) > 0 { - for _, s := range m.Values { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *ReplicationControllerList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *NodeSelectorTerm) Size() (n int) { +func (m *ReplicationControllerList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicationControllerList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.MatchExpressions) > 0 { - for _, e := range m.MatchExpressions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } } - if len(m.MatchFields) > 0 { - for _, e := range m.MatchFields { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NodeSpec) Size() (n int) { - var l int - _ = l - l = len(m.PodCIDR) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DoNotUse_ExternalID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ProviderID) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if len(m.Taints) > 0 { - for _, e := range m.Taints { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.ConfigSource != nil { - l = m.ConfigSource.Size() - n += 1 + l + sovGenerated(uint64(l)) +func (m *ReplicationControllerSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *NodeStatus) Size() (n int) { +func (m *ReplicationControllerSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicationControllerSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Capacity) > 0 { - for k, v := range m.Capacity { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.Allocatable) > 0 { - for k, v := range m.Allocatable { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x20 + if m.Template != nil { + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1a } - l = len(m.Phase) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Selector) > 0 { + keysForSelector := make([]string, 0, len(m.Selector)) + for k := range m.Selector { + keysForSelector = append(keysForSelector, string(k)) } - } - if len(m.Addresses) > 0 { - for _, e := range m.Addresses { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.Selector[string(keysForSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForSelector[iNdEx]) + copy(dAtA[i:], keysForSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSelector[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 } } - l = m.DaemonEndpoints.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.NodeInfo.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Images) > 0 { - for _, e := range m.Images { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 } - if len(m.VolumesInUse) > 0 { - for _, s := range m.VolumesInUse { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + return len(dAtA) - i, nil +} + +func (m *ReplicationControllerStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if len(m.VolumesAttached) > 0 { - for _, e := range m.VolumesAttached { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + return dAtA[:n], nil +} + +func (m *ReplicationControllerStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicationControllerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 } } - if m.Config != nil { - l = m.Config.Size() - n += 1 + l + sovGenerated(uint64(l)) + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *ResourceFieldSelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *NodeSystemInfo) Size() (n int) { - var l int - _ = l - l = len(m.MachineID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.SystemUUID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.BootID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.KernelVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.OSImage) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ContainerRuntimeVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.KubeletVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.KubeProxyVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.OperatingSystem) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Architecture) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ResourceFieldSelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ObjectFieldSelector) Size() (n int) { +func (m *ResourceFieldSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.APIVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FieldPath) - n += 1 + l + sovGenerated(uint64(l)) - return n + { + size, err := m.Divisor.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x12 + i -= len(m.ContainerName) + copy(dAtA[i:], m.ContainerName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ObjectReference) Size() (n int) { - var l int - _ = l - l = len(m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.UID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.APIVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ResourceVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FieldPath) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ResourceQuota) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *PersistentVolume) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ResourceQuota) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PersistentVolumeClaim) Size() (n int) { +func (m *ResourceQuota) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PersistentVolumeClaimCondition) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastProbeTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ResourceQuotaList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *PersistentVolumeClaimList) Size() (n int) { +func (m *ResourceQuotaList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceQuotaList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } } - return n + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PersistentVolumeClaimSpec) Size() (n int) { +func (m *ResourceQuotaSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceQuotaSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceQuotaSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.AccessModes) > 0 { - for _, s := range m.AccessModes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if m.ScopeSelector != nil { + { + size, err := m.ScopeSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1a } - l = m.Resources.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.VolumeName) - n += 1 + l + sovGenerated(uint64(l)) - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.StorageClassName != nil { - l = len(*m.StorageClassName) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Scopes) > 0 { + for iNdEx := len(m.Scopes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Scopes[iNdEx]) + copy(dAtA[i:], m.Scopes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scopes[iNdEx]))) + i-- + dAtA[i] = 0x12 + } } - if m.VolumeMode != nil { - l = len(*m.VolumeMode) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Hard) > 0 { + keysForHard := make([]string, 0, len(m.Hard)) + for k := range m.Hard { + keysForHard = append(keysForHard, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForHard) + for iNdEx := len(keysForHard) - 1; iNdEx >= 0; iNdEx-- { + v := m.Hard[ResourceName(keysForHard[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForHard[iNdEx]) + copy(dAtA[i:], keysForHard[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForHard[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } } - if m.DataSource != nil { - l = m.DataSource.Size() - n += 1 + l + sovGenerated(uint64(l)) + return len(dAtA) - i, nil +} + +func (m *ResourceQuotaStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *PersistentVolumeClaimStatus) Size() (n int) { +func (m *ResourceQuotaStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceQuotaStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Phase) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.AccessModes) > 0 { - for _, s := range m.AccessModes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Used) > 0 { + keysForUsed := make([]string, 0, len(m.Used)) + for k := range m.Used { + keysForUsed = append(keysForUsed, string(k)) } - } - if len(m.Capacity) > 0 { - for k, v := range m.Capacity { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + github_com_gogo_protobuf_sortkeys.Strings(keysForUsed) + for iNdEx := len(keysForUsed) - 1; iNdEx >= 0; iNdEx-- { + v := m.Used[ResourceName(keysForUsed[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForUsed[iNdEx]) + copy(dAtA[i:], keysForUsed[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForUsed[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 } } - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Hard) > 0 { + keysForHard := make([]string, 0, len(m.Hard)) + for k := range m.Hard { + keysForHard = append(keysForHard, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForHard) + for iNdEx := len(keysForHard) - 1; iNdEx >= 0; iNdEx-- { + v := m.Hard[ResourceName(keysForHard[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForHard[iNdEx]) + copy(dAtA[i:], keysForHard[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForHard[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa } } - return n + return len(dAtA) - i, nil } -func (m *PersistentVolumeClaimVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.ClaimName) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n +func (m *ResourceRequirements) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *PersistentVolumeList) Size() (n int) { +func (m *ResourceRequirements) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceRequirements) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Requests) > 0 { + keysForRequests := make([]string, 0, len(m.Requests)) + for k := range m.Requests { + keysForRequests = append(keysForRequests, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForRequests) + for iNdEx := len(keysForRequests) - 1; iNdEx >= 0; iNdEx-- { + v := m.Requests[ResourceName(keysForRequests[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForRequests[iNdEx]) + copy(dAtA[i:], keysForRequests[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForRequests[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 } } - return n + if len(m.Limits) > 0 { + keysForLimits := make([]string, 0, len(m.Limits)) + for k := range m.Limits { + keysForLimits = append(keysForLimits, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLimits) + for iNdEx := len(keysForLimits) - 1; iNdEx >= 0; iNdEx-- { + v := m.Limits[ResourceName(keysForLimits[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForLimits[iNdEx]) + copy(dAtA[i:], keysForLimits[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForLimits[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *PersistentVolumeSource) Size() (n int) { +func (m *SELinuxOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SELinuxOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SELinuxOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.GCEPersistentDisk != nil { - l = m.GCEPersistentDisk.Size() - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.Level) + copy(dAtA[i:], m.Level) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Level))) + i-- + dAtA[i] = 0x22 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x1a + i -= len(m.Role) + copy(dAtA[i:], m.Role) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Role))) + i-- + dAtA[i] = 0x12 + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ScaleIOPersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if m.AWSElasticBlockStore != nil { - l = m.AWSElasticBlockStore.Size() - n += 1 + l + sovGenerated(uint64(l)) + return dAtA[:n], nil +} + +func (m *ScaleIOPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScaleIOPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - if m.HostPath != nil { - l = m.HostPath.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Glusterfs != nil { - l = m.Glusterfs.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.NFS != nil { - l = m.NFS.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.RBD != nil { - l = m.RBD.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ISCSI != nil { - l = m.ISCSI.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Cinder != nil { - l = m.Cinder.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.CephFS != nil { - l = m.CephFS.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.FC != nil { - l = m.FC.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Flocker != nil { - l = m.Flocker.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.FlexVolume != nil { - l = m.FlexVolume.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.AzureFile != nil { - l = m.AzureFile.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.VsphereVolume != nil { - l = m.VsphereVolume.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Quobyte != nil { - l = m.Quobyte.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.AzureDisk != nil { - l = m.AzureDisk.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.PhotonPersistentDisk != nil { - l = m.PhotonPersistentDisk.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.PortworxVolume != nil { - l = m.PortworxVolume.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.ScaleIO != nil { - l = m.ScaleIO.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.Local != nil { - l = m.Local.Size() - n += 2 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x50 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x4a + i -= len(m.VolumeName) + copy(dAtA[i:], m.VolumeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) + i-- + dAtA[i] = 0x42 + i -= len(m.StorageMode) + copy(dAtA[i:], m.StorageMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageMode))) + i-- + dAtA[i] = 0x3a + i -= len(m.StoragePool) + copy(dAtA[i:], m.StoragePool) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePool))) + i-- + dAtA[i] = 0x32 + i -= len(m.ProtectionDomain) + copy(dAtA[i:], m.ProtectionDomain) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProtectionDomain))) + i-- + dAtA[i] = 0x2a + i-- + if m.SSLEnabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - if m.StorageOS != nil { - l = m.StorageOS.Size() - n += 2 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x20 + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - if m.CSI != nil { - l = m.CSI.Size() - n += 2 + l + sovGenerated(uint64(l)) + i -= len(m.System) + copy(dAtA[i:], m.System) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.System))) + i-- + dAtA[i] = 0x12 + i -= len(m.Gateway) + copy(dAtA[i:], m.Gateway) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Gateway))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ScaleIOVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *PersistentVolumeSpec) Size() (n int) { +func (m *ScaleIOVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScaleIOVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Capacity) > 0 { - for k, v := range m.Capacity { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - l = m.PersistentVolumeSource.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.AccessModes) > 0 { - for _, s := range m.AccessModes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - if m.ClaimRef != nil { - l = m.ClaimRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x50 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x4a + i -= len(m.VolumeName) + copy(dAtA[i:], m.VolumeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) + i-- + dAtA[i] = 0x42 + i -= len(m.StorageMode) + copy(dAtA[i:], m.StorageMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageMode))) + i-- + dAtA[i] = 0x3a + i -= len(m.StoragePool) + copy(dAtA[i:], m.StoragePool) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePool))) + i-- + dAtA[i] = 0x32 + i -= len(m.ProtectionDomain) + copy(dAtA[i:], m.ProtectionDomain) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProtectionDomain))) + i-- + dAtA[i] = 0x2a + i-- + if m.SSLEnabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - l = len(m.PersistentVolumeReclaimPolicy) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.StorageClassName) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.MountOptions) > 0 { - for _, s := range m.MountOptions { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x20 + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1a } - if m.VolumeMode != nil { - l = len(*m.VolumeMode) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.NodeAffinity != nil { - l = m.NodeAffinity.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *PersistentVolumeStatus) Size() (n int) { - var l int - _ = l - l = len(m.Phase) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - return n + i -= len(m.System) + copy(dAtA[i:], m.System) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.System))) + i-- + dAtA[i] = 0x12 + i -= len(m.Gateway) + copy(dAtA[i:], m.Gateway) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Gateway))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PhotonPersistentDiskVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.PdID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ScopeSelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *Pod) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ScopeSelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodAffinity) Size() (n int) { +func (m *ScopeSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, e := range m.RequiredDuringSchedulingIgnoredDuringExecution { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.MatchExpressions) > 0 { + for iNdEx := len(m.MatchExpressions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchExpressions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } } - return n + return len(dAtA) - i, nil } -func (m *PodAffinityTerm) Size() (n int) { - var l int - _ = l - if m.LabelSelector != nil { - l = m.LabelSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.Namespaces) > 0 { - for _, s := range m.Namespaces { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *ScopedResourceSelectorRequirement) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - l = len(m.TopologyKey) - n += 1 + l + sovGenerated(uint64(l)) - return n + return dAtA[:n], nil } -func (m *PodAntiAffinity) Size() (n int) { +func (m *ScopedResourceSelectorRequirement) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScopedResourceSelectorRequirement) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, e := range m.RequiredDuringSchedulingIgnoredDuringExecution { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Values[iNdEx]) + copy(dAtA[i:], m.Values[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Values[iNdEx]))) + i-- + dAtA[i] = 0x1a } } - return n + i -= len(m.Operator) + copy(dAtA[i:], m.Operator) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) + i-- + dAtA[i] = 0x12 + i -= len(m.ScopeName) + copy(dAtA[i:], m.ScopeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ScopeName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodAttachOptions) Size() (n int) { - var l int - _ = l - n += 2 - n += 2 - n += 2 - n += 2 - l = len(m.Container) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *Secret) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *PodCondition) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastProbeTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *Secret) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodDNSConfig) Size() (n int) { +func (m *Secret) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Nameservers) > 0 { - for _, s := range m.Nameservers { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.StringData) > 0 { + keysForStringData := make([]string, 0, len(m.StringData)) + for k := range m.StringData { + keysForStringData = append(keysForStringData, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForStringData) + for iNdEx := len(keysForStringData) - 1; iNdEx >= 0; iNdEx-- { + v := m.StringData[string(keysForStringData[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForStringData[iNdEx]) + copy(dAtA[i:], keysForStringData[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForStringData[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 } } - if len(m.Searches) > 0 { - for _, s := range m.Searches { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x1a + if len(m.Data) > 0 { + keysForData := make([]string, 0, len(m.Data)) + for k := range m.Data { + keysForData = append(keysForData, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForData) + for iNdEx := len(keysForData) - 1; iNdEx >= 0; iNdEx-- { + v := m.Data[string(keysForData[iNdEx])] + baseI := i + if v != nil { + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + } + i -= len(keysForData[iNdEx]) + copy(dAtA[i:], keysForData[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForData[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 } } - if len(m.Options) > 0 { - for _, e := range m.Options { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodDNSConfigOption) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if m.Value != nil { - l = len(*m.Value) - n += 1 + l + sovGenerated(uint64(l)) +func (m *SecretEnvSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *PodExecOptions) Size() (n int) { +func (m *SecretEnvSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SecretEnvSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - n += 2 - n += 2 - n += 2 - n += 2 - l = len(m.Container) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Command) > 0 { - for _, s := range m.Command { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + i-- + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x10 } - return n + { + size, err := m.LocalObjectReference.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *SecretKeySelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *PodLogOptions) Size() (n int) { +func (m *SecretKeySelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SecretKeySelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Container) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - n += 2 - if m.SinceSeconds != nil { - n += 1 + sovGenerated(uint64(*m.SinceSeconds)) - } - if m.SinceTime != nil { - l = m.SinceTime.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + i-- + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 } - n += 2 - if m.TailLines != nil { - n += 1 + sovGenerated(uint64(*m.TailLines)) + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0x12 + { + size, err := m.LocalObjectReference.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.LimitBytes != nil { - n += 1 + sovGenerated(uint64(*m.LimitBytes)) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SecretList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *PodPortForwardOptions) Size() (n int) { +func (m *SecretList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SecretList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Ports) > 0 { - for _, e := range m.Ports { - n += 1 + sovGenerated(uint64(e)) + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } } - return n + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodProxyOptions) Size() (n int) { - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *SecretProjection) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *PodReadinessGate) Size() (n int) { - var l int - _ = l - l = len(m.ConditionType) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *SecretProjection) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodSecurityContext) Size() (n int) { +func (m *SecretProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.SELinuxOptions != nil { - l = m.SELinuxOptions.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.RunAsUser != nil { - n += 1 + sovGenerated(uint64(*m.RunAsUser)) - } - if m.RunAsNonRoot != nil { - n += 2 - } - if len(m.SupplementalGroups) > 0 { - for _, e := range m.SupplementalGroups { - n += 1 + sovGenerated(uint64(e)) + if m.Optional != nil { + i-- + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x20 } - if m.FSGroup != nil { - n += 1 + sovGenerated(uint64(*m.FSGroup)) - } - if m.RunAsGroup != nil { - n += 1 + sovGenerated(uint64(*m.RunAsGroup)) + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } } - if len(m.Sysctls) > 0 { - for _, e := range m.Sysctls { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.LocalObjectReference.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.WindowsOptions != nil { - l = m.WindowsOptions.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SecretReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *PodSignature) Size() (n int) { +func (m *SecretReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SecretReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.PodController != nil { - l = m.PodController.Size() - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SecretVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *PodSpec) Size() (n int) { +func (m *SecretVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SecretVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Volumes) > 0 { - for _, e := range m.Volumes { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + i-- + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x20 } - if len(m.Containers) > 0 { - for _, e := range m.Containers { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + if m.DefaultMode != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) + i-- + dAtA[i] = 0x18 } - l = len(m.RestartPolicy) - n += 1 + l + sovGenerated(uint64(l)) - if m.TerminationGracePeriodSeconds != nil { - n += 1 + sovGenerated(uint64(*m.TerminationGracePeriodSeconds)) + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } } - if m.ActiveDeadlineSeconds != nil { - n += 1 + sovGenerated(uint64(*m.ActiveDeadlineSeconds)) + i -= len(m.SecretName) + copy(dAtA[i:], m.SecretName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SecurityContext) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - l = len(m.DNSPolicy) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.NodeSelector) > 0 { - for k, v := range m.NodeSelector { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + return dAtA[:n], nil +} + +func (m *SecurityContext) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SecurityContext) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.WindowsOptions != nil { + { + size, err := m.WindowsOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x52 } - l = len(m.ServiceAccountName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DeprecatedServiceAccount) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.NodeName) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - n += 2 - n += 2 - if m.SecurityContext != nil { - l = m.SecurityContext.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.ProcMount != nil { + i -= len(*m.ProcMount) + copy(dAtA[i:], *m.ProcMount) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ProcMount))) + i-- + dAtA[i] = 0x4a } - if len(m.ImagePullSecrets) > 0 { - for _, e := range m.ImagePullSecrets { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.RunAsGroup != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsGroup)) + i-- + dAtA[i] = 0x40 + } + if m.AllowPrivilegeEscalation != nil { + i-- + if *m.AllowPrivilegeEscalation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x38 } - l = len(m.Hostname) - n += 2 + l + sovGenerated(uint64(l)) - l = len(m.Subdomain) - n += 2 + l + sovGenerated(uint64(l)) - if m.Affinity != nil { - l = m.Affinity.Size() - n += 2 + l + sovGenerated(uint64(l)) + if m.ReadOnlyRootFilesystem != nil { + i-- + if *m.ReadOnlyRootFilesystem { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 } - l = len(m.SchedulerName) - n += 2 + l + sovGenerated(uint64(l)) - if len(m.InitContainers) > 0 { - for _, e := range m.InitContainers { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) + if m.RunAsNonRoot != nil { + i-- + if *m.RunAsNonRoot { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x28 } - if m.AutomountServiceAccountToken != nil { - n += 3 + if m.RunAsUser != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsUser)) + i-- + dAtA[i] = 0x20 } - if len(m.Tolerations) > 0 { - for _, e := range m.Tolerations { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) + if m.SELinuxOptions != nil { + { + size, err := m.SELinuxOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1a } - if len(m.HostAliases) > 0 { - for _, e := range m.HostAliases { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) + if m.Privileged != nil { + i-- + if *m.Privileged { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x10 } - l = len(m.PriorityClassName) - n += 2 + l + sovGenerated(uint64(l)) - if m.Priority != nil { - n += 2 + sovGenerated(uint64(*m.Priority)) + if m.Capabilities != nil { + { + size, err := m.Capabilities.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - if m.DNSConfig != nil { - l = m.DNSConfig.Size() - n += 2 + l + sovGenerated(uint64(l)) + return len(dAtA) - i, nil +} + +func (m *SerializedReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if m.ShareProcessNamespace != nil { - n += 3 + return dAtA[:n], nil +} + +func (m *SerializedReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SerializedReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Reference.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if len(m.ReadinessGates) > 0 { - for _, e := range m.ReadinessGates { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Service) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Service) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Service) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.RuntimeClassName != nil { - l = len(*m.RuntimeClassName) - n += 2 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.EnableServiceLinks != nil { - n += 3 + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.PreemptionPolicy != nil { - l = len(*m.PreemptionPolicy) - n += 2 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ServiceAccount) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *PodStatus) Size() (n int) { +func (m *ServiceAccount) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceAccount) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Phase) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.AutomountServiceAccountToken != nil { + i-- + if *m.AutomountServiceAccountToken { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x20 } - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.HostIP) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.PodIP) - n += 1 + l + sovGenerated(uint64(l)) - if m.StartTime != nil { - l = m.StartTime.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.ImagePullSecrets) > 0 { + for iNdEx := len(m.ImagePullSecrets) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ImagePullSecrets[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } } - if len(m.ContainerStatuses) > 0 { - for _, e := range m.ContainerStatuses { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Secrets) > 0 { + for iNdEx := len(m.Secrets) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Secrets[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } } - l = len(m.QOSClass) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.InitContainerStatuses) > 0 { - for _, e := range m.InitContainerStatuses { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - l = len(m.NominatedNodeName) - n += 1 + l + sovGenerated(uint64(l)) - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodStatusResult) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ServiceAccountList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *PodTemplate) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ServiceAccountList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodTemplateList) Size() (n int) { +func (m *ServiceAccountList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } } - return n + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodTemplateSpec) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ServiceAccountTokenProjection) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *PortworxVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.VolumeID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n +func (m *ServiceAccountTokenProjection) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Preconditions) Size() (n int) { +func (m *ServiceAccountTokenProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.UID != nil { - l = len(*m.UID) - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x1a + if m.ExpirationSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ExpirationSeconds)) + i-- + dAtA[i] = 0x10 } - return n -} - -func (m *PreferAvoidPodsEntry) Size() (n int) { - var l int - _ = l - l = m.PodSignature.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.EvictionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n + i -= len(m.Audience) + copy(dAtA[i:], m.Audience) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audience))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PreferredSchedulingTerm) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Weight)) - l = m.Preference.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ServiceList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *Probe) Size() (n int) { - var l int - _ = l - l = m.Handler.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.InitialDelaySeconds)) - n += 1 + sovGenerated(uint64(m.TimeoutSeconds)) - n += 1 + sovGenerated(uint64(m.PeriodSeconds)) - n += 1 + sovGenerated(uint64(m.SuccessThreshold)) - n += 1 + sovGenerated(uint64(m.FailureThreshold)) - return n +func (m *ServiceList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ProjectedVolumeSource) Size() (n int) { +func (m *ServiceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Sources) > 0 { - for _, e := range m.Sources { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } } - if m.DefaultMode != nil { - n += 1 + sovGenerated(uint64(*m.DefaultMode)) + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *QuobyteVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.Registry) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Volume) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - l = len(m.User) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Group) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Tenant) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ServicePort) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *RBDPersistentVolumeSource) Size() (n int) { - var l int - _ = l - if len(m.CephMonitors) > 0 { - for _, s := range m.CephMonitors { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.RBDImage) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.RBDPool) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.RadosUser) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Keyring) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 - return n +func (m *ServicePort) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RBDVolumeSource) Size() (n int) { +func (m *ServicePort) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.CephMonitors) > 0 { - for _, s := range m.CephMonitors { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + i = encodeVarintGenerated(dAtA, i, uint64(m.NodePort)) + i-- + dAtA[i] = 0x28 + { + size, err := m.TargetPort.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - l = len(m.RBDImage) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.RBDPool) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.RadosUser) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Keyring) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 - return n + i-- + dAtA[i] = 0x22 + i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) + i-- + dAtA[i] = 0x18 + i -= len(m.Protocol) + copy(dAtA[i:], m.Protocol) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *RangeAllocation) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Range) - n += 1 + l + sovGenerated(uint64(l)) - if m.Data != nil { - l = len(m.Data) - n += 1 + l + sovGenerated(uint64(l)) +func (m *ServiceProxyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *ReplicationController) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ServiceProxyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ReplicationControllerCondition) Size() (n int) { +func (m *ServiceProxyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ReplicationControllerList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *ServiceSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *ReplicationControllerSpec) Size() (n int) { +func (m *ServiceSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Replicas != nil { - n += 1 + sovGenerated(uint64(*m.Replicas)) + if m.IPFamily != nil { + i -= len(*m.IPFamily) + copy(dAtA[i:], *m.IPFamily) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.IPFamily))) + i-- + dAtA[i] = 0x7a + } + if m.SessionAffinityConfig != nil { + { + size, err := m.SessionAffinityConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + } + i-- + if m.PublishNotReadyAddresses { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x68 + i = encodeVarintGenerated(dAtA, i, uint64(m.HealthCheckNodePort)) + i-- + dAtA[i] = 0x60 + i -= len(m.ExternalTrafficPolicy) + copy(dAtA[i:], m.ExternalTrafficPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ExternalTrafficPolicy))) + i-- + dAtA[i] = 0x5a + i -= len(m.ExternalName) + copy(dAtA[i:], m.ExternalName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ExternalName))) + i-- + dAtA[i] = 0x52 + if len(m.LoadBalancerSourceRanges) > 0 { + for iNdEx := len(m.LoadBalancerSourceRanges) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.LoadBalancerSourceRanges[iNdEx]) + copy(dAtA[i:], m.LoadBalancerSourceRanges[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.LoadBalancerSourceRanges[iNdEx]))) + i-- + dAtA[i] = 0x4a + } + } + i -= len(m.LoadBalancerIP) + copy(dAtA[i:], m.LoadBalancerIP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.LoadBalancerIP))) + i-- + dAtA[i] = 0x42 + i -= len(m.SessionAffinity) + copy(dAtA[i:], m.SessionAffinity) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SessionAffinity))) + i-- + dAtA[i] = 0x3a + if len(m.ExternalIPs) > 0 { + for iNdEx := len(m.ExternalIPs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ExternalIPs[iNdEx]) + copy(dAtA[i:], m.ExternalIPs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ExternalIPs[iNdEx]))) + i-- + dAtA[i] = 0x2a + } } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x22 + i -= len(m.ClusterIP) + copy(dAtA[i:], m.ClusterIP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClusterIP))) + i-- + dAtA[i] = 0x1a if len(m.Selector) > 0 { - for k, v := range m.Selector { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + keysForSelector := make([]string, 0, len(m.Selector)) + for k := range m.Selector { + keysForSelector = append(keysForSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.Selector[string(keysForSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForSelector[iNdEx]) + copy(dAtA[i:], keysForSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSelector[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 } } - if m.Template != nil { - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } } - n += 1 + sovGenerated(uint64(m.MinReadySeconds)) - return n + return len(dAtA) - i, nil } -func (m *ReplicationControllerStatus) Size() (n int) { +func (m *ServiceStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - n += 1 + sovGenerated(uint64(m.Replicas)) - n += 1 + sovGenerated(uint64(m.FullyLabeledReplicas)) - n += 1 + sovGenerated(uint64(m.ObservedGeneration)) - n += 1 + sovGenerated(uint64(m.ReadyReplicas)) - n += 1 + sovGenerated(uint64(m.AvailableReplicas)) - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.LoadBalancer.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ResourceFieldSelector) Size() (n int) { - var l int - _ = l - l = len(m.ContainerName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Resource) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Divisor.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *SessionAffinityConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *ResourceQuota) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *SessionAffinityConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResourceQuotaList) Size() (n int) { +func (m *SessionAffinityConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.ClientIP != nil { + { + size, err := m.ClientIP.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - return n + return len(dAtA) - i, nil } -func (m *ResourceQuotaSpec) Size() (n int) { +func (m *StorageOSPersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StorageOSPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageOSPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Hard) > 0 { - for k, v := range m.Hard { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x2a } - if len(m.Scopes) > 0 { - for _, s := range m.Scopes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - if m.ScopeSelector != nil { - l = m.ScopeSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x20 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x1a + i -= len(m.VolumeNamespace) + copy(dAtA[i:], m.VolumeNamespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeNamespace))) + i-- + dAtA[i] = 0x12 + i -= len(m.VolumeName) + copy(dAtA[i:], m.VolumeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *StorageOSVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *ResourceQuotaStatus) Size() (n int) { +func (m *StorageOSVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageOSVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Hard) > 0 { - for k, v := range m.Hard { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x2a } - if len(m.Used) > 0 { - for k, v := range m.Used { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return n + i-- + dAtA[i] = 0x20 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x1a + i -= len(m.VolumeNamespace) + copy(dAtA[i:], m.VolumeNamespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeNamespace))) + i-- + dAtA[i] = 0x12 + i -= len(m.VolumeName) + copy(dAtA[i:], m.VolumeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ResourceRequirements) Size() (n int) { - var l int - _ = l - if len(m.Limits) > 0 { - for k, v := range m.Limits { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.Requests) > 0 { - for k, v := range m.Requests { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } +func (m *Sysctl) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *SELinuxOptions) Size() (n int) { - var l int - _ = l - l = len(m.User) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Role) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Level) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *Sysctl) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ScaleIOPersistentVolumeSource) Size() (n int) { +func (m *Sysctl) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Gateway) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.System) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 - l = len(m.ProtectionDomain) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.StoragePool) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.StorageMode) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.VolumeName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ScaleIOVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.Gateway) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.System) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) +func (m *TCPSocketAction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - n += 2 - l = len(m.ProtectionDomain) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.StoragePool) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.StorageMode) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.VolumeName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n + return dAtA[:n], nil } -func (m *ScopeSelector) Size() (n int) { +func (m *TCPSocketAction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TCPSocketAction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.MatchExpressions) > 0 { - for _, e := range m.MatchExpressions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Port.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ScopedResourceSelectorRequirement) Size() (n int) { - var l int - _ = l - l = len(m.ScopeName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Operator) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Values) > 0 { - for _, s := range m.Values { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *Taint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *Secret) Size() (n int) { +func (m *Taint) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Taint) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Data) > 0 { - for k, v := range m.Data { - _ = k - _ = v - l = 0 - if v != nil { - l = 1 + len(v) + sovGenerated(uint64(len(v))) + if m.TimeAdded != nil { + { + size, err := m.TimeAdded.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + l - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.StringData) > 0 { - for k, v := range m.StringData { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x22 } - return n + i -= len(m.Effect) + copy(dAtA[i:], m.Effect) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Effect))) + i-- + dAtA[i] = 0x1a + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *SecretEnvSource) Size() (n int) { - var l int - _ = l - l = m.LocalObjectReference.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Optional != nil { - n += 2 +func (m *Toleration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *SecretKeySelector) Size() (n int) { - var l int - _ = l - l = m.LocalObjectReference.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - if m.Optional != nil { - n += 2 - } - return n +func (m *Toleration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *SecretList) Size() (n int) { +func (m *Toleration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + if m.TolerationSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TolerationSeconds)) + i-- + dAtA[i] = 0x28 } - return n + i -= len(m.Effect) + copy(dAtA[i:], m.Effect) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Effect))) + i-- + dAtA[i] = 0x22 + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x1a + i -= len(m.Operator) + copy(dAtA[i:], m.Operator) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) + i-- + dAtA[i] = 0x12 + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *SecretProjection) Size() (n int) { - var l int - _ = l - l = m.LocalObjectReference.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.Optional != nil { - n += 2 +func (m *TopologySelectorLabelRequirement) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *SecretReference) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *TopologySelectorLabelRequirement) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *SecretVolumeSource) Size() (n int) { +func (m *TopologySelectorLabelRequirement) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.SecretName) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Values[iNdEx]) + copy(dAtA[i:], m.Values[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Values[iNdEx]))) + i-- + dAtA[i] = 0x12 } } - if m.DefaultMode != nil { - n += 1 + sovGenerated(uint64(*m.DefaultMode)) - } - if m.Optional != nil { - n += 2 - } - return n + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *SecurityContext) Size() (n int) { - var l int - _ = l - if m.Capabilities != nil { - l = m.Capabilities.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Privileged != nil { - n += 2 - } - if m.SELinuxOptions != nil { - l = m.SELinuxOptions.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.RunAsUser != nil { - n += 1 + sovGenerated(uint64(*m.RunAsUser)) - } - if m.RunAsNonRoot != nil { - n += 2 - } - if m.ReadOnlyRootFilesystem != nil { - n += 2 - } - if m.AllowPrivilegeEscalation != nil { - n += 2 - } - if m.RunAsGroup != nil { - n += 1 + sovGenerated(uint64(*m.RunAsGroup)) - } - if m.ProcMount != nil { - l = len(*m.ProcMount) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.WindowsOptions != nil { - l = m.WindowsOptions.Size() - n += 1 + l + sovGenerated(uint64(l)) +func (m *TopologySelectorTerm) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n -} - -func (m *SerializedReference) Size() (n int) { - var l int - _ = l - l = m.Reference.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + return dAtA[:n], nil } -func (m *Service) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *TopologySelectorTerm) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ServiceAccount) Size() (n int) { +func (m *TopologySelectorTerm) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Secrets) > 0 { - for _, e := range m.Secrets { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.ImagePullSecrets) > 0 { - for _, e := range m.ImagePullSecrets { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.MatchLabelExpressions) > 0 { + for iNdEx := len(m.MatchLabelExpressions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchLabelExpressions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } } - if m.AutomountServiceAccountToken != nil { - n += 2 - } - return n + return len(dAtA) - i, nil } -func (m *ServiceAccountList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *TopologySpreadConstraint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *ServiceAccountTokenProjection) Size() (n int) { - var l int - _ = l - l = len(m.Audience) - n += 1 + l + sovGenerated(uint64(l)) - if m.ExpirationSeconds != nil { - n += 1 + sovGenerated(uint64(*m.ExpirationSeconds)) - } - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *TopologySpreadConstraint) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ServiceList) Size() (n int) { +func (m *TopologySpreadConstraint) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.LabelSelector != nil { + { + size, err := m.LabelSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x22 } - return n + i -= len(m.WhenUnsatisfiable) + copy(dAtA[i:], m.WhenUnsatisfiable) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.WhenUnsatisfiable))) + i-- + dAtA[i] = 0x1a + i -= len(m.TopologyKey) + copy(dAtA[i:], m.TopologyKey) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TopologyKey))) + i-- + dAtA[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxSkew)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } -func (m *ServicePort) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Protocol) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Port)) - l = m.TargetPort.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.NodePort)) - return n +func (m *TypedLocalObjectReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *ServiceProxyOptions) Size() (n int) { - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *TypedLocalObjectReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ServiceSpec) Size() (n int) { +func (m *TypedLocalObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Ports) > 0 { - for _, e := range m.Ports { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Selector) > 0 { - for k, v := range m.Selector { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - l = len(m.ClusterIP) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.ExternalIPs) > 0 { - for _, s := range m.ExternalIPs { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.SessionAffinity) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.LoadBalancerIP) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.LoadBalancerSourceRanges) > 0 { - for _, s := range m.LoadBalancerSourceRanges { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x12 + if m.APIGroup != nil { + i -= len(*m.APIGroup) + copy(dAtA[i:], *m.APIGroup) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.APIGroup))) + i-- + dAtA[i] = 0xa } - l = len(m.ExternalName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ExternalTrafficPolicy) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.HealthCheckNodePort)) - n += 2 - if m.SessionAffinityConfig != nil { - l = m.SessionAffinityConfig.Size() - n += 1 + l + sovGenerated(uint64(l)) + return len(dAtA) - i, nil +} + +func (m *Volume) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *ServiceStatus) Size() (n int) { - var l int - _ = l - l = m.LoadBalancer.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *Volume) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *SessionAffinityConfig) Size() (n int) { +func (m *Volume) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.ClientIP != nil { - l = m.ClientIP.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.VolumeSource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *StorageOSPersistentVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.VolumeName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.VolumeNamespace) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) +func (m *VolumeDevice) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *StorageOSVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.VolumeName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.VolumeNamespace) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n +func (m *VolumeDevice) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Sysctl) Size() (n int) { +func (m *VolumeDevice) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Value) - n += 1 + l + sovGenerated(uint64(l)) - return n + i -= len(m.DevicePath) + copy(dAtA[i:], m.DevicePath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DevicePath))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *TCPSocketAction) Size() (n int) { - var l int - _ = l - l = m.Port.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Host) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *VolumeMount) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *Taint) Size() (n int) { +func (m *VolumeMount) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeMount) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Value) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Effect) - n += 1 + l + sovGenerated(uint64(l)) - if m.TimeAdded != nil { - l = m.TimeAdded.Size() - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.SubPathExpr) + copy(dAtA[i:], m.SubPathExpr) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubPathExpr))) + i-- + dAtA[i] = 0x32 + if m.MountPropagation != nil { + i -= len(*m.MountPropagation) + copy(dAtA[i:], *m.MountPropagation) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MountPropagation))) + i-- + dAtA[i] = 0x2a } - return n + i -= len(m.SubPath) + copy(dAtA[i:], m.SubPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubPath))) + i-- + dAtA[i] = 0x22 + i -= len(m.MountPath) + copy(dAtA[i:], m.MountPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MountPath))) + i-- + dAtA[i] = 0x1a + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *Toleration) Size() (n int) { - var l int - _ = l - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Operator) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Value) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Effect) - n += 1 + l + sovGenerated(uint64(l)) - if m.TolerationSeconds != nil { - n += 1 + sovGenerated(uint64(*m.TolerationSeconds)) +func (m *VolumeNodeAffinity) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *TopologySelectorLabelRequirement) Size() (n int) { +func (m *VolumeNodeAffinity) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeNodeAffinity) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Values) > 0 { - for _, s := range m.Values { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if m.Required != nil { + { + size, err := m.Required.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - return n + return len(dAtA) - i, nil } -func (m *TopologySelectorTerm) Size() (n int) { - var l int - _ = l - if len(m.MatchLabelExpressions) > 0 { - for _, e := range m.MatchLabelExpressions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *VolumeProjection) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *TypedLocalObjectReference) Size() (n int) { - var l int - _ = l - if m.APIGroup != nil { - l = len(*m.APIGroup) - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *Volume) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = m.VolumeSource.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *VolumeProjection) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *VolumeDevice) Size() (n int) { +func (m *VolumeProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DevicePath) - n += 1 + l + sovGenerated(uint64(l)) - return n + if m.ServiceAccountToken != nil { + { + size, err := m.ServiceAccountToken.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.ConfigMap != nil { + { + size, err := m.ConfigMap.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.DownwardAPI != nil { + { + size, err := m.DownwardAPI.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Secret != nil { + { + size, err := m.Secret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *VolumeMount) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - l = len(m.MountPath) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.SubPath) - n += 1 + l + sovGenerated(uint64(l)) - if m.MountPropagation != nil { - l = len(*m.MountPropagation) - n += 1 + l + sovGenerated(uint64(l)) +func (m *VolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - l = len(m.SubPathExpr) - n += 1 + l + sovGenerated(uint64(l)) - return n + return dAtA[:n], nil } -func (m *VolumeNodeAffinity) Size() (n int) { - var l int - _ = l - if m.Required != nil { - l = m.Required.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n +func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *VolumeProjection) Size() (n int) { +func (m *VolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Secret != nil { - l = m.Secret.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.DownwardAPI != nil { - l = m.DownwardAPI.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.CSI != nil { + { + size, err := m.CSI.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe2 } - if m.ConfigMap != nil { - l = m.ConfigMap.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.StorageOS != nil { + { + size, err := m.StorageOS.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xda } - if m.ServiceAccountToken != nil { - l = m.ServiceAccountToken.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.Projected != nil { + { + size, err := m.Projected.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd2 } - return n -} - -func (m *VolumeSource) Size() (n int) { - var l int - _ = l - if m.HostPath != nil { - l = m.HostPath.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.ScaleIO != nil { + { + size, err := m.ScaleIO.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca } - if m.EmptyDir != nil { - l = m.EmptyDir.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.PortworxVolume != nil { + { + size, err := m.PortworxVolume.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 } - if m.GCEPersistentDisk != nil { - l = m.GCEPersistentDisk.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.PhotonPersistentDisk != nil { + { + size, err := m.PhotonPersistentDisk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba } - if m.AWSElasticBlockStore != nil { - l = m.AWSElasticBlockStore.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.AzureDisk != nil { + { + size, err := m.AzureDisk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 } - if m.GitRepo != nil { - l = m.GitRepo.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.Quobyte != nil { + { + size, err := m.Quobyte.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa } - if m.Secret != nil { - l = m.Secret.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.VsphereVolume != nil { + { + size, err := m.VsphereVolume.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 } - if m.NFS != nil { - l = m.NFS.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.ConfigMap != nil { + { + size, err := m.ConfigMap.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a } - if m.ISCSI != nil { - l = m.ISCSI.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.AzureFile != nil { + { + size, err := m.AzureFile.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 } - if m.Glusterfs != nil { - l = m.Glusterfs.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.FC != nil { + { + size, err := m.FC.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a } - if m.PersistentVolumeClaim != nil { - l = m.PersistentVolumeClaim.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.DownwardAPI != nil { + { + size, err := m.DownwardAPI.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 } - if m.RBD != nil { - l = m.RBD.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.Flocker != nil { + { + size, err := m.Flocker.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a } - if m.FlexVolume != nil { - l = m.FlexVolume.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.CephFS != nil { + { + size, err := m.CephFS.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 } if m.Cinder != nil { - l = m.Cinder.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.Cinder.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a } - if m.CephFS != nil { - l = m.CephFS.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.FlexVolume != nil { + { + size, err := m.FlexVolume.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 } - if m.Flocker != nil { - l = m.Flocker.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.RBD != nil { + { + size, err := m.RBD.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a } - if m.DownwardAPI != nil { - l = m.DownwardAPI.Size() - n += 2 + l + sovGenerated(uint64(l)) + if m.PersistentVolumeClaim != nil { + { + size, err := m.PersistentVolumeClaim.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 } - if m.FC != nil { - l = m.FC.Size() - n += 2 + l + sovGenerated(uint64(l)) + if m.Glusterfs != nil { + { + size, err := m.Glusterfs.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a } - if m.AzureFile != nil { - l = m.AzureFile.Size() - n += 2 + l + sovGenerated(uint64(l)) + if m.ISCSI != nil { + { + size, err := m.ISCSI.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 } - if m.ConfigMap != nil { - l = m.ConfigMap.Size() - n += 2 + l + sovGenerated(uint64(l)) + if m.NFS != nil { + { + size, err := m.NFS.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a } - if m.VsphereVolume != nil { - l = m.VsphereVolume.Size() - n += 2 + l + sovGenerated(uint64(l)) + if m.Secret != nil { + { + size, err := m.Secret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 } - if m.Quobyte != nil { - l = m.Quobyte.Size() - n += 2 + l + sovGenerated(uint64(l)) + if m.GitRepo != nil { + { + size, err := m.GitRepo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a } - if m.AzureDisk != nil { - l = m.AzureDisk.Size() - n += 2 + l + sovGenerated(uint64(l)) + if m.AWSElasticBlockStore != nil { + { + size, err := m.AWSElasticBlockStore.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 } - if m.PhotonPersistentDisk != nil { - l = m.PhotonPersistentDisk.Size() - n += 2 + l + sovGenerated(uint64(l)) + if m.GCEPersistentDisk != nil { + { + size, err := m.GCEPersistentDisk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - if m.PortworxVolume != nil { - l = m.PortworxVolume.Size() - n += 2 + l + sovGenerated(uint64(l)) + if m.EmptyDir != nil { + { + size, err := m.EmptyDir.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - if m.ScaleIO != nil { - l = m.ScaleIO.Size() - n += 2 + l + sovGenerated(uint64(l)) + if m.HostPath != nil { + { + size, err := m.HostPath.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - if m.Projected != nil { - l = m.Projected.Size() - n += 2 + l + sovGenerated(uint64(l)) + return len(dAtA) - i, nil +} + +func (m *VsphereVirtualDiskVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if m.StorageOS != nil { - l = m.StorageOS.Size() - n += 2 + l + sovGenerated(uint64(l)) + return dAtA[:n], nil +} + +func (m *VsphereVirtualDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VsphereVirtualDiskVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.StoragePolicyID) + copy(dAtA[i:], m.StoragePolicyID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePolicyID))) + i-- + dAtA[i] = 0x22 + i -= len(m.StoragePolicyName) + copy(dAtA[i:], m.StoragePolicyName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePolicyName))) + i-- + dAtA[i] = 0x1a + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x12 + i -= len(m.VolumePath) + copy(dAtA[i:], m.VolumePath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumePath))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *WeightedPodAffinityTerm) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if m.CSI != nil { - l = m.CSI.Size() - n += 2 + l + sovGenerated(uint64(l)) + return dAtA[:n], nil +} + +func (m *WeightedPodAffinityTerm) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WeightedPodAffinityTerm) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.PodAffinityTerm.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(m.Weight)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } -func (m *VsphereVirtualDiskVolumeSource) Size() (n int) { +func (m *WindowsSecurityContextOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WindowsSecurityContextOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WindowsSecurityContextOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.VolumePath) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.StoragePolicyName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.StoragePolicyID) - n += 1 + l + sovGenerated(uint64(l)) - return n + if m.RunAsUserName != nil { + i -= len(*m.RunAsUserName) + copy(dAtA[i:], *m.RunAsUserName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.RunAsUserName))) + i-- + dAtA[i] = 0x1a + } + if m.GMSACredentialSpec != nil { + i -= len(*m.GMSACredentialSpec) + copy(dAtA[i:], *m.GMSACredentialSpec) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.GMSACredentialSpec))) + i-- + dAtA[i] = 0x12 + } + if m.GMSACredentialSpecName != nil { + i -= len(*m.GMSACredentialSpecName) + copy(dAtA[i:], *m.GMSACredentialSpecName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.GMSACredentialSpecName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *WeightedPodAffinityTerm) Size() (n int) { +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *AWSElasticBlockStoreVolumeSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - n += 1 + sovGenerated(uint64(m.Weight)) - l = m.PodAffinityTerm.Size() + l = len(m.VolumeID) n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Partition)) + n += 2 return n } -func (m *WindowsSecurityContextOptions) Size() (n int) { +func (m *Affinity) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - if m.GMSACredentialSpecName != nil { - l = len(*m.GMSACredentialSpecName) + if m.NodeAffinity != nil { + l = m.NodeAffinity.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.GMSACredentialSpec != nil { - l = len(*m.GMSACredentialSpec) + if m.PodAffinity != nil { + l = m.PodAffinity.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PodAntiAffinity != nil { + l = m.PodAntiAffinity.Size() n += 1 + l + sovGenerated(uint64(l)) } return n } -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } +func (m *AttachedVolume) Size() (n int) { + if m == nil { + return 0 } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DevicePath) + n += 1 + l + sovGenerated(uint64(l)) return n } -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *AWSElasticBlockStoreVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *AvoidPods) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&AWSElasticBlockStoreVolumeSource{`, - `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `Partition:` + fmt.Sprintf("%v", this.Partition) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *Affinity) String() string { - if this == nil { - return "nil" + var l int + _ = l + if len(m.PreferAvoidPods) > 0 { + for _, e := range m.PreferAvoidPods { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&Affinity{`, - `NodeAffinity:` + strings.Replace(fmt.Sprintf("%v", this.NodeAffinity), "NodeAffinity", "NodeAffinity", 1) + `,`, - `PodAffinity:` + strings.Replace(fmt.Sprintf("%v", this.PodAffinity), "PodAffinity", "PodAffinity", 1) + `,`, - `PodAntiAffinity:` + strings.Replace(fmt.Sprintf("%v", this.PodAntiAffinity), "PodAntiAffinity", "PodAntiAffinity", 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *AttachedVolume) String() string { - if this == nil { - return "nil" + +func (m *AzureDiskVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&AttachedVolume{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `DevicePath:` + fmt.Sprintf("%v", this.DevicePath) + `,`, - `}`, - }, "") - return s -} -func (this *AvoidPods) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.DiskName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DataDiskURI) + n += 1 + l + sovGenerated(uint64(l)) + if m.CachingMode != nil { + l = len(*m.CachingMode) + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&AvoidPods{`, - `PreferAvoidPods:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PreferAvoidPods), "PreferAvoidPodsEntry", "PreferAvoidPodsEntry", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *AzureDiskVolumeSource) String() string { - if this == nil { - return "nil" + if m.FSType != nil { + l = len(*m.FSType) + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&AzureDiskVolumeSource{`, - `DiskName:` + fmt.Sprintf("%v", this.DiskName) + `,`, - `DataDiskURI:` + fmt.Sprintf("%v", this.DataDiskURI) + `,`, - `CachingMode:` + valueToStringGenerated(this.CachingMode) + `,`, - `FSType:` + valueToStringGenerated(this.FSType) + `,`, - `ReadOnly:` + valueToStringGenerated(this.ReadOnly) + `,`, - `Kind:` + valueToStringGenerated(this.Kind) + `,`, - `}`, - }, "") - return s + if m.ReadOnly != nil { + n += 2 + } + if m.Kind != nil { + l = len(*m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (this *AzureFilePersistentVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *AzureFilePersistentVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&AzureFilePersistentVolumeSource{`, - `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, - `ShareName:` + fmt.Sprintf("%v", this.ShareName) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `SecretNamespace:` + valueToStringGenerated(this.SecretNamespace) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.SecretName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ShareName) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.SecretNamespace != nil { + l = len(*m.SecretNamespace) + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (this *AzureFileVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *AzureFileVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&AzureFileVolumeSource{`, - `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, - `ShareName:` + fmt.Sprintf("%v", this.ShareName) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.SecretName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ShareName) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n } -func (this *Binding) String() string { - if this == nil { - return "nil" + +func (m *Binding) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&Binding{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Target:` + strings.Replace(strings.Replace(this.Target.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Target.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *CSIPersistentVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *CSIPersistentVolumeSource) Size() (n int) { + if m == nil { + return 0 } - keysForVolumeAttributes := make([]string, 0, len(this.VolumeAttributes)) - for k := range this.VolumeAttributes { - keysForVolumeAttributes = append(keysForVolumeAttributes, k) + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.VolumeHandle) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.VolumeAttributes) > 0 { + for k, v := range m.VolumeAttributes { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes) - mapStringForVolumeAttributes := "map[string]string{" - for _, k := range keysForVolumeAttributes { - mapStringForVolumeAttributes += fmt.Sprintf("%v: %v,", k, this.VolumeAttributes[k]) + if m.ControllerPublishSecretRef != nil { + l = m.ControllerPublishSecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - mapStringForVolumeAttributes += "}" - s := strings.Join([]string{`&CSIPersistentVolumeSource{`, - `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, - `VolumeHandle:` + fmt.Sprintf("%v", this.VolumeHandle) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `VolumeAttributes:` + mapStringForVolumeAttributes + `,`, - `ControllerPublishSecretRef:` + strings.Replace(fmt.Sprintf("%v", this.ControllerPublishSecretRef), "SecretReference", "SecretReference", 1) + `,`, - `NodeStageSecretRef:` + strings.Replace(fmt.Sprintf("%v", this.NodeStageSecretRef), "SecretReference", "SecretReference", 1) + `,`, - `NodePublishSecretRef:` + strings.Replace(fmt.Sprintf("%v", this.NodePublishSecretRef), "SecretReference", "SecretReference", 1) + `,`, - `ControllerExpandSecretRef:` + strings.Replace(fmt.Sprintf("%v", this.ControllerExpandSecretRef), "SecretReference", "SecretReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *CSIVolumeSource) String() string { - if this == nil { - return "nil" + if m.NodeStageSecretRef != nil { + l = m.NodeStageSecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - keysForVolumeAttributes := make([]string, 0, len(this.VolumeAttributes)) - for k := range this.VolumeAttributes { - keysForVolumeAttributes = append(keysForVolumeAttributes, k) + if m.NodePublishSecretRef != nil { + l = m.NodePublishSecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes) - mapStringForVolumeAttributes := "map[string]string{" - for _, k := range keysForVolumeAttributes { - mapStringForVolumeAttributes += fmt.Sprintf("%v: %v,", k, this.VolumeAttributes[k]) + if m.ControllerExpandSecretRef != nil { + l = m.ControllerExpandSecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - mapStringForVolumeAttributes += "}" - s := strings.Join([]string{`&CSIVolumeSource{`, - `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, - `ReadOnly:` + valueToStringGenerated(this.ReadOnly) + `,`, - `FSType:` + valueToStringGenerated(this.FSType) + `,`, - `VolumeAttributes:` + mapStringForVolumeAttributes + `,`, - `NodePublishSecretRef:` + strings.Replace(fmt.Sprintf("%v", this.NodePublishSecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *Capabilities) String() string { - if this == nil { - return "nil" + +func (m *CSIVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&Capabilities{`, - `Add:` + fmt.Sprintf("%v", this.Add) + `,`, - `Drop:` + fmt.Sprintf("%v", this.Drop) + `,`, - `}`, - }, "") - return s -} -func (this *CephFSPersistentVolumeSource) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + if m.ReadOnly != nil { + n += 2 } - s := strings.Join([]string{`&CephFSPersistentVolumeSource{`, - `Monitors:` + fmt.Sprintf("%v", this.Monitors) + `,`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `User:` + fmt.Sprintf("%v", this.User) + `,`, - `SecretFile:` + fmt.Sprintf("%v", this.SecretFile) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "SecretReference", 1) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *CephFSVolumeSource) String() string { - if this == nil { - return "nil" + if m.FSType != nil { + l = len(*m.FSType) + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&CephFSVolumeSource{`, - `Monitors:` + fmt.Sprintf("%v", this.Monitors) + `,`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `User:` + fmt.Sprintf("%v", this.User) + `,`, - `SecretFile:` + fmt.Sprintf("%v", this.SecretFile) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *CinderPersistentVolumeSource) String() string { - if this == nil { - return "nil" + if len(m.VolumeAttributes) > 0 { + for k, v := range m.VolumeAttributes { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - s := strings.Join([]string{`&CinderPersistentVolumeSource{`, - `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "SecretReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *CinderVolumeSource) String() string { - if this == nil { - return "nil" + if m.NodePublishSecretRef != nil { + l = m.NodePublishSecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&CinderVolumeSource{`, - `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *ClientIPConfig) String() string { - if this == nil { - return "nil" + +func (m *Capabilities) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ClientIPConfig{`, - `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, - `}`, - }, "") - return s -} -func (this *ComponentCondition) String() string { - if this == nil { - return "nil" + var l int + _ = l + if len(m.Add) > 0 { + for _, s := range m.Add { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&ComponentCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `Error:` + fmt.Sprintf("%v", this.Error) + `,`, - `}`, - }, "") - return s -} -func (this *ComponentStatus) String() string { - if this == nil { - return "nil" + if len(m.Drop) > 0 { + for _, s := range m.Drop { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&ComponentStatus{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "ComponentCondition", "ComponentCondition", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *ComponentStatusList) String() string { - if this == nil { - return "nil" + +func (m *CephFSPersistentVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ComponentStatusList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ComponentStatus", "ComponentStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ConfigMap) String() string { - if this == nil { - return "nil" + var l int + _ = l + if len(m.Monitors) > 0 { + for _, s := range m.Monitors { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - keysForData := make([]string, 0, len(this.Data)) - for k := range this.Data { - keysForData = append(keysForData, k) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SecretFile) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForData) - mapStringForData := "map[string]string{" - for _, k := range keysForData { - mapStringForData += fmt.Sprintf("%v: %v,", k, this.Data[k]) + n += 2 + return n +} + +func (m *CephFSVolumeSource) Size() (n int) { + if m == nil { + return 0 } - mapStringForData += "}" - keysForBinaryData := make([]string, 0, len(this.BinaryData)) - for k := range this.BinaryData { - keysForBinaryData = append(keysForBinaryData, k) + var l int + _ = l + if len(m.Monitors) > 0 { + for _, s := range m.Monitors { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - github_com_gogo_protobuf_sortkeys.Strings(keysForBinaryData) - mapStringForBinaryData := "map[string][]byte{" - for _, k := range keysForBinaryData { - mapStringForBinaryData += fmt.Sprintf("%v: %v,", k, this.BinaryData[k]) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SecretFile) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - mapStringForBinaryData += "}" - s := strings.Join([]string{`&ConfigMap{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Data:` + mapStringForData + `,`, - `BinaryData:` + mapStringForBinaryData + `,`, - `}`, - }, "") - return s + n += 2 + return n } -func (this *ConfigMapEnvSource) String() string { - if this == nil { - return "nil" + +func (m *CinderPersistentVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ConfigMapEnvSource{`, - `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, - `Optional:` + valueToStringGenerated(this.Optional) + `,`, - `}`, - }, "") - return s -} -func (this *ConfigMapKeySelector) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.VolumeID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&ConfigMapKeySelector{`, - `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Optional:` + valueToStringGenerated(this.Optional) + `,`, - `}`, - }, "") - return s + return n } -func (this *ConfigMapList) String() string { - if this == nil { - return "nil" + +func (m *CinderVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ConfigMapList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ConfigMap", "ConfigMap", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ConfigMapNodeConfigSource) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.VolumeID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&ConfigMapNodeConfigSource{`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `UID:` + fmt.Sprintf("%v", this.UID) + `,`, - `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, - `KubeletConfigKey:` + fmt.Sprintf("%v", this.KubeletConfigKey) + `,`, - `}`, - }, "") - return s + return n } -func (this *ConfigMapProjection) String() string { - if this == nil { - return "nil" + +func (m *ClientIPConfig) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ConfigMapProjection{`, - `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + `,`, - `Optional:` + valueToStringGenerated(this.Optional) + `,`, - `}`, - }, "") - return s -} -func (this *ConfigMapVolumeSource) String() string { - if this == nil { - return "nil" + var l int + _ = l + if m.TimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) } - s := strings.Join([]string{`&ConfigMapVolumeSource{`, - `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + `,`, - `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, - `Optional:` + valueToStringGenerated(this.Optional) + `,`, - `}`, - }, "") - return s + return n } -func (this *Container) String() string { - if this == nil { - return "nil" + +func (m *ComponentCondition) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&Container{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Image:` + fmt.Sprintf("%v", this.Image) + `,`, - `Command:` + fmt.Sprintf("%v", this.Command) + `,`, - `Args:` + fmt.Sprintf("%v", this.Args) + `,`, - `WorkingDir:` + fmt.Sprintf("%v", this.WorkingDir) + `,`, - `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "ContainerPort", "ContainerPort", 1), `&`, ``, 1) + `,`, - `Env:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Env), "EnvVar", "EnvVar", 1), `&`, ``, 1) + `,`, - `Resources:` + strings.Replace(strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1), `&`, ``, 1) + `,`, - `VolumeMounts:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeMounts), "VolumeMount", "VolumeMount", 1), `&`, ``, 1) + `,`, - `LivenessProbe:` + strings.Replace(fmt.Sprintf("%v", this.LivenessProbe), "Probe", "Probe", 1) + `,`, - `ReadinessProbe:` + strings.Replace(fmt.Sprintf("%v", this.ReadinessProbe), "Probe", "Probe", 1) + `,`, - `Lifecycle:` + strings.Replace(fmt.Sprintf("%v", this.Lifecycle), "Lifecycle", "Lifecycle", 1) + `,`, - `TerminationMessagePath:` + fmt.Sprintf("%v", this.TerminationMessagePath) + `,`, - `ImagePullPolicy:` + fmt.Sprintf("%v", this.ImagePullPolicy) + `,`, - `SecurityContext:` + strings.Replace(fmt.Sprintf("%v", this.SecurityContext), "SecurityContext", "SecurityContext", 1) + `,`, - `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, - `StdinOnce:` + fmt.Sprintf("%v", this.StdinOnce) + `,`, - `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, - `EnvFrom:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EnvFrom), "EnvFromSource", "EnvFromSource", 1), `&`, ``, 1) + `,`, - `TerminationMessagePolicy:` + fmt.Sprintf("%v", this.TerminationMessagePolicy) + `,`, - `VolumeDevices:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeDevices), "VolumeDevice", "VolumeDevice", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Error) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *ContainerImage) String() string { - if this == nil { - return "nil" + +func (m *ComponentStatus) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ContainerImage{`, - `Names:` + fmt.Sprintf("%v", this.Names) + `,`, - `SizeBytes:` + fmt.Sprintf("%v", this.SizeBytes) + `,`, - `}`, - }, "") - return s -} -func (this *ContainerPort) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&ContainerPort{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `HostPort:` + fmt.Sprintf("%v", this.HostPort) + `,`, - `ContainerPort:` + fmt.Sprintf("%v", this.ContainerPort) + `,`, - `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, - `HostIP:` + fmt.Sprintf("%v", this.HostIP) + `,`, - `}`, - }, "") - return s + return n } -func (this *ContainerState) String() string { - if this == nil { - return "nil" + +func (m *ComponentStatusList) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ContainerState{`, - `Waiting:` + strings.Replace(fmt.Sprintf("%v", this.Waiting), "ContainerStateWaiting", "ContainerStateWaiting", 1) + `,`, - `Running:` + strings.Replace(fmt.Sprintf("%v", this.Running), "ContainerStateRunning", "ContainerStateRunning", 1) + `,`, - `Terminated:` + strings.Replace(fmt.Sprintf("%v", this.Terminated), "ContainerStateTerminated", "ContainerStateTerminated", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ContainerStateRunning) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&ContainerStateRunning{`, - `StartedAt:` + strings.Replace(strings.Replace(this.StartedAt.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *ContainerStateTerminated) String() string { - if this == nil { - return "nil" + +func (m *ConfigMap) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ContainerStateTerminated{`, - `ExitCode:` + fmt.Sprintf("%v", this.ExitCode) + `,`, - `Signal:` + fmt.Sprintf("%v", this.Signal) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `StartedAt:` + strings.Replace(strings.Replace(this.StartedAt.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `FinishedAt:` + strings.Replace(strings.Replace(this.FinishedAt.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `}`, - }, "") - return s -} -func (this *ContainerStateWaiting) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Data) > 0 { + for k, v := range m.Data { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - s := strings.Join([]string{`&ContainerStateWaiting{`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *ContainerStatus) String() string { - if this == nil { - return "nil" + if len(m.BinaryData) > 0 { + for k, v := range m.BinaryData { + _ = k + _ = v + l = 0 + if v != nil { + l = 1 + len(v) + sovGenerated(uint64(len(v))) + } + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + l + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - s := strings.Join([]string{`&ContainerStatus{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `State:` + strings.Replace(strings.Replace(this.State.String(), "ContainerState", "ContainerState", 1), `&`, ``, 1) + `,`, - `LastTerminationState:` + strings.Replace(strings.Replace(this.LastTerminationState.String(), "ContainerState", "ContainerState", 1), `&`, ``, 1) + `,`, - `Ready:` + fmt.Sprintf("%v", this.Ready) + `,`, - `RestartCount:` + fmt.Sprintf("%v", this.RestartCount) + `,`, - `Image:` + fmt.Sprintf("%v", this.Image) + `,`, - `ImageID:` + fmt.Sprintf("%v", this.ImageID) + `,`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `}`, - }, "") - return s + return n } -func (this *DaemonEndpoint) String() string { - if this == nil { - return "nil" + +func (m *ConfigMapEnvSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&DaemonEndpoint{`, - `Port:` + fmt.Sprintf("%v", this.Port) + `,`, - `}`, - }, "") - return s -} -func (this *DownwardAPIProjection) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + n += 2 } - s := strings.Join([]string{`&DownwardAPIProjection{`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "DownwardAPIVolumeFile", "DownwardAPIVolumeFile", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *DownwardAPIVolumeFile) String() string { - if this == nil { - return "nil" + +func (m *ConfigMapKeySelector) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&DownwardAPIVolumeFile{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `FieldRef:` + strings.Replace(fmt.Sprintf("%v", this.FieldRef), "ObjectFieldSelector", "ObjectFieldSelector", 1) + `,`, - `ResourceFieldRef:` + strings.Replace(fmt.Sprintf("%v", this.ResourceFieldRef), "ResourceFieldSelector", "ResourceFieldSelector", 1) + `,`, - `Mode:` + valueToStringGenerated(this.Mode) + `,`, - `}`, - }, "") - return s -} -func (this *DownwardAPIVolumeSource) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + n += 2 } - s := strings.Join([]string{`&DownwardAPIVolumeSource{`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "DownwardAPIVolumeFile", "DownwardAPIVolumeFile", 1), `&`, ``, 1) + `,`, - `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, - `}`, - }, "") - return s + return n } -func (this *EmptyDirVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *ConfigMapList) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&EmptyDirVolumeSource{`, - `Medium:` + fmt.Sprintf("%v", this.Medium) + `,`, - `SizeLimit:` + strings.Replace(fmt.Sprintf("%v", this.SizeLimit), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, - `}`, - }, "") - return s -} -func (this *EndpointAddress) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&EndpointAddress{`, - `IP:` + fmt.Sprintf("%v", this.IP) + `,`, - `TargetRef:` + strings.Replace(fmt.Sprintf("%v", this.TargetRef), "ObjectReference", "ObjectReference", 1) + `,`, - `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, - `NodeName:` + valueToStringGenerated(this.NodeName) + `,`, - `}`, - }, "") - return s + return n } -func (this *EndpointPort) String() string { - if this == nil { - return "nil" + +func (m *ConfigMapNodeConfigSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&EndpointPort{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Port:` + fmt.Sprintf("%v", this.Port) + `,`, - `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.KubeletConfigKey) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *EndpointSubset) String() string { - if this == nil { - return "nil" + +func (m *ConfigMapProjection) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&EndpointSubset{`, - `Addresses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Addresses), "EndpointAddress", "EndpointAddress", 1), `&`, ``, 1) + `,`, - `NotReadyAddresses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.NotReadyAddresses), "EndpointAddress", "EndpointAddress", 1), `&`, ``, 1) + `,`, - `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "EndpointPort", "EndpointPort", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *Endpoints) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&Endpoints{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Subsets:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subsets), "EndpointSubset", "EndpointSubset", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *EndpointsList) String() string { - if this == nil { - return "nil" + if m.Optional != nil { + n += 2 } - s := strings.Join([]string{`&EndpointsList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Endpoints", "Endpoints", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *EnvFromSource) String() string { - if this == nil { - return "nil" + +func (m *ConfigMapVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&EnvFromSource{`, - `Prefix:` + fmt.Sprintf("%v", this.Prefix) + `,`, - `ConfigMapRef:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMapRef), "ConfigMapEnvSource", "ConfigMapEnvSource", 1) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretEnvSource", "SecretEnvSource", 1) + `,`, - `}`, - }, "") - return s -} -func (this *EnvVar) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&EnvVar{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `ValueFrom:` + strings.Replace(fmt.Sprintf("%v", this.ValueFrom), "EnvVarSource", "EnvVarSource", 1) + `,`, - `}`, - }, "") - return s -} -func (this *EnvVarSource) String() string { - if this == nil { - return "nil" + if m.DefaultMode != nil { + n += 1 + sovGenerated(uint64(*m.DefaultMode)) } - s := strings.Join([]string{`&EnvVarSource{`, - `FieldRef:` + strings.Replace(fmt.Sprintf("%v", this.FieldRef), "ObjectFieldSelector", "ObjectFieldSelector", 1) + `,`, - `ResourceFieldRef:` + strings.Replace(fmt.Sprintf("%v", this.ResourceFieldRef), "ResourceFieldSelector", "ResourceFieldSelector", 1) + `,`, - `ConfigMapKeyRef:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMapKeyRef), "ConfigMapKeySelector", "ConfigMapKeySelector", 1) + `,`, - `SecretKeyRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretKeyRef), "SecretKeySelector", "SecretKeySelector", 1) + `,`, - `}`, - }, "") - return s -} -func (this *Event) String() string { - if this == nil { - return "nil" + if m.Optional != nil { + n += 2 } - s := strings.Join([]string{`&Event{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `InvolvedObject:` + strings.Replace(strings.Replace(this.InvolvedObject.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `Source:` + strings.Replace(strings.Replace(this.Source.String(), "EventSource", "EventSource", 1), `&`, ``, 1) + `,`, - `FirstTimestamp:` + strings.Replace(strings.Replace(this.FirstTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `LastTimestamp:` + strings.Replace(strings.Replace(this.LastTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `Count:` + fmt.Sprintf("%v", this.Count) + `,`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `EventTime:` + strings.Replace(strings.Replace(this.EventTime.String(), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1), `&`, ``, 1) + `,`, - `Series:` + strings.Replace(fmt.Sprintf("%v", this.Series), "EventSeries", "EventSeries", 1) + `,`, - `Action:` + fmt.Sprintf("%v", this.Action) + `,`, - `Related:` + strings.Replace(fmt.Sprintf("%v", this.Related), "ObjectReference", "ObjectReference", 1) + `,`, - `ReportingController:` + fmt.Sprintf("%v", this.ReportingController) + `,`, - `ReportingInstance:` + fmt.Sprintf("%v", this.ReportingInstance) + `,`, - `}`, - }, "") - return s + return n } -func (this *EventList) String() string { - if this == nil { - return "nil" + +func (m *Container) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&EventList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Event", "Event", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *EventSeries) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Image) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&EventSeries{`, - `Count:` + fmt.Sprintf("%v", this.Count) + `,`, - `LastObservedTime:` + strings.Replace(strings.Replace(this.LastObservedTime.String(), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1), `&`, ``, 1) + `,`, - `State:` + fmt.Sprintf("%v", this.State) + `,`, - `}`, - }, "") - return s -} -func (this *EventSource) String() string { - if this == nil { - return "nil" + if len(m.Args) > 0 { + for _, s := range m.Args { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&EventSource{`, - `Component:` + fmt.Sprintf("%v", this.Component) + `,`, - `Host:` + fmt.Sprintf("%v", this.Host) + `,`, - `}`, - }, "") - return s -} -func (this *ExecAction) String() string { - if this == nil { - return "nil" + l = len(m.WorkingDir) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&ExecAction{`, - `Command:` + fmt.Sprintf("%v", this.Command) + `,`, - `}`, - }, "") - return s -} -func (this *FCVolumeSource) String() string { - if this == nil { - return "nil" + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&FCVolumeSource{`, - `TargetWWNs:` + fmt.Sprintf("%v", this.TargetWWNs) + `,`, - `Lun:` + valueToStringGenerated(this.Lun) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `WWIDs:` + fmt.Sprintf("%v", this.WWIDs) + `,`, - `}`, - }, "") - return s -} -func (this *FlexPersistentVolumeSource) String() string { - if this == nil { - return "nil" + l = m.Resources.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.VolumeMounts) > 0 { + for _, e := range m.VolumeMounts { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - keysForOptions := make([]string, 0, len(this.Options)) - for k := range this.Options { - keysForOptions = append(keysForOptions, k) + if m.LivenessProbe != nil { + l = m.LivenessProbe.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) - mapStringForOptions := "map[string]string{" - for _, k := range keysForOptions { - mapStringForOptions += fmt.Sprintf("%v: %v,", k, this.Options[k]) + if m.ReadinessProbe != nil { + l = m.ReadinessProbe.Size() + n += 1 + l + sovGenerated(uint64(l)) } - mapStringForOptions += "}" - s := strings.Join([]string{`&FlexPersistentVolumeSource{`, - `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "SecretReference", 1) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `Options:` + mapStringForOptions + `,`, - `}`, - }, "") - return s -} -func (this *FlexVolumeSource) String() string { - if this == nil { - return "nil" + if m.Lifecycle != nil { + l = m.Lifecycle.Size() + n += 1 + l + sovGenerated(uint64(l)) } - keysForOptions := make([]string, 0, len(this.Options)) - for k := range this.Options { - keysForOptions = append(keysForOptions, k) + l = len(m.TerminationMessagePath) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ImagePullPolicy) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecurityContext != nil { + l = m.SecurityContext.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) - mapStringForOptions := "map[string]string{" - for _, k := range keysForOptions { - mapStringForOptions += fmt.Sprintf("%v: %v,", k, this.Options[k]) + n += 3 + n += 3 + n += 3 + if len(m.EnvFrom) > 0 { + for _, e := range m.EnvFrom { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } } - mapStringForOptions += "}" - s := strings.Join([]string{`&FlexVolumeSource{`, - `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `Options:` + mapStringForOptions + `,`, - `}`, - }, "") - return s + l = len(m.TerminationMessagePolicy) + n += 2 + l + sovGenerated(uint64(l)) + if len(m.VolumeDevices) > 0 { + for _, e := range m.VolumeDevices { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.StartupProbe != nil { + l = m.StartupProbe.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + return n } -func (this *FlockerVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *ContainerImage) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&FlockerVolumeSource{`, - `DatasetName:` + fmt.Sprintf("%v", this.DatasetName) + `,`, - `DatasetUUID:` + fmt.Sprintf("%v", this.DatasetUUID) + `,`, - `}`, - }, "") - return s -} -func (this *GCEPersistentDiskVolumeSource) String() string { - if this == nil { - return "nil" + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&GCEPersistentDiskVolumeSource{`, - `PDName:` + fmt.Sprintf("%v", this.PDName) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `Partition:` + fmt.Sprintf("%v", this.Partition) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s + n += 1 + sovGenerated(uint64(m.SizeBytes)) + return n } -func (this *GitRepoVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *ContainerPort) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&GitRepoVolumeSource{`, - `Repository:` + fmt.Sprintf("%v", this.Repository) + `,`, - `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, - `Directory:` + fmt.Sprintf("%v", this.Directory) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.HostPort)) + n += 1 + sovGenerated(uint64(m.ContainerPort)) + l = len(m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.HostIP) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *GlusterfsPersistentVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *ContainerState) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&GlusterfsPersistentVolumeSource{`, - `EndpointsName:` + fmt.Sprintf("%v", this.EndpointsName) + `,`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `EndpointsNamespace:` + valueToStringGenerated(this.EndpointsNamespace) + `,`, - `}`, - }, "") - return s -} -func (this *GlusterfsVolumeSource) String() string { - if this == nil { - return "nil" + var l int + _ = l + if m.Waiting != nil { + l = m.Waiting.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&GlusterfsVolumeSource{`, - `EndpointsName:` + fmt.Sprintf("%v", this.EndpointsName) + `,`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *HTTPGetAction) String() string { - if this == nil { - return "nil" + if m.Running != nil { + l = m.Running.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&HTTPGetAction{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `Port:` + strings.Replace(strings.Replace(this.Port.String(), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1), `&`, ``, 1) + `,`, - `Host:` + fmt.Sprintf("%v", this.Host) + `,`, - `Scheme:` + fmt.Sprintf("%v", this.Scheme) + `,`, - `HTTPHeaders:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.HTTPHeaders), "HTTPHeader", "HTTPHeader", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *HTTPHeader) String() string { - if this == nil { - return "nil" + if m.Terminated != nil { + l = m.Terminated.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&HTTPHeader{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `}`, - }, "") - return s + return n } -func (this *Handler) String() string { - if this == nil { - return "nil" + +func (m *ContainerStateRunning) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&Handler{`, - `Exec:` + strings.Replace(fmt.Sprintf("%v", this.Exec), "ExecAction", "ExecAction", 1) + `,`, - `HTTPGet:` + strings.Replace(fmt.Sprintf("%v", this.HTTPGet), "HTTPGetAction", "HTTPGetAction", 1) + `,`, - `TCPSocket:` + strings.Replace(fmt.Sprintf("%v", this.TCPSocket), "TCPSocketAction", "TCPSocketAction", 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.StartedAt.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *HostAlias) String() string { - if this == nil { - return "nil" + +func (m *ContainerStateTerminated) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&HostAlias{`, - `IP:` + fmt.Sprintf("%v", this.IP) + `,`, - `Hostnames:` + fmt.Sprintf("%v", this.Hostnames) + `,`, - `}`, - }, "") - return s + var l int + _ = l + n += 1 + sovGenerated(uint64(m.ExitCode)) + n += 1 + sovGenerated(uint64(m.Signal)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.StartedAt.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.FinishedAt.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ContainerID) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *HostPathVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *ContainerStateWaiting) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&HostPathVolumeSource{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `Type:` + valueToStringGenerated(this.Type) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *ISCSIPersistentVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *ContainerStatus) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ISCSIPersistentVolumeSource{`, - `TargetPortal:` + fmt.Sprintf("%v", this.TargetPortal) + `,`, - `IQN:` + fmt.Sprintf("%v", this.IQN) + `,`, - `Lun:` + fmt.Sprintf("%v", this.Lun) + `,`, - `ISCSIInterface:` + fmt.Sprintf("%v", this.ISCSIInterface) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `Portals:` + fmt.Sprintf("%v", this.Portals) + `,`, - `DiscoveryCHAPAuth:` + fmt.Sprintf("%v", this.DiscoveryCHAPAuth) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "SecretReference", 1) + `,`, - `SessionCHAPAuth:` + fmt.Sprintf("%v", this.SessionCHAPAuth) + `,`, - `InitiatorName:` + valueToStringGenerated(this.InitiatorName) + `,`, - `}`, - }, "") - return s -} -func (this *ISCSIVolumeSource) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.State.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTerminationState.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + n += 1 + sovGenerated(uint64(m.RestartCount)) + l = len(m.Image) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ImageID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ContainerID) + n += 1 + l + sovGenerated(uint64(l)) + if m.Started != nil { + n += 2 } - s := strings.Join([]string{`&ISCSIVolumeSource{`, - `TargetPortal:` + fmt.Sprintf("%v", this.TargetPortal) + `,`, - `IQN:` + fmt.Sprintf("%v", this.IQN) + `,`, - `Lun:` + fmt.Sprintf("%v", this.Lun) + `,`, - `ISCSIInterface:` + fmt.Sprintf("%v", this.ISCSIInterface) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `Portals:` + fmt.Sprintf("%v", this.Portals) + `,`, - `DiscoveryCHAPAuth:` + fmt.Sprintf("%v", this.DiscoveryCHAPAuth) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, - `SessionCHAPAuth:` + fmt.Sprintf("%v", this.SessionCHAPAuth) + `,`, - `InitiatorName:` + valueToStringGenerated(this.InitiatorName) + `,`, - `}`, - }, "") - return s + return n } -func (this *KeyToPath) String() string { - if this == nil { - return "nil" + +func (m *DaemonEndpoint) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&KeyToPath{`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `Mode:` + valueToStringGenerated(this.Mode) + `,`, - `}`, - }, "") - return s + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Port)) + return n } -func (this *Lifecycle) String() string { - if this == nil { - return "nil" + +func (m *DownwardAPIProjection) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&Lifecycle{`, - `PostStart:` + strings.Replace(fmt.Sprintf("%v", this.PostStart), "Handler", "Handler", 1) + `,`, - `PreStop:` + strings.Replace(fmt.Sprintf("%v", this.PreStop), "Handler", "Handler", 1) + `,`, - `}`, - }, "") - return s -} -func (this *LimitRange) String() string { - if this == nil { - return "nil" + var l int + _ = l + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&LimitRange{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "LimitRangeSpec", "LimitRangeSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *LimitRangeItem) String() string { - if this == nil { - return "nil" + +func (m *DownwardAPIVolumeFile) Size() (n int) { + if m == nil { + return 0 } - keysForMax := make([]string, 0, len(this.Max)) - for k := range this.Max { - keysForMax = append(keysForMax, string(k)) + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + if m.FieldRef != nil { + l = m.FieldRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForMax) - mapStringForMax := "ResourceList{" - for _, k := range keysForMax { - mapStringForMax += fmt.Sprintf("%v: %v,", k, this.Max[ResourceName(k)]) + if m.ResourceFieldRef != nil { + l = m.ResourceFieldRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - mapStringForMax += "}" - keysForMin := make([]string, 0, len(this.Min)) - for k := range this.Min { - keysForMin = append(keysForMin, string(k)) + if m.Mode != nil { + n += 1 + sovGenerated(uint64(*m.Mode)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForMin) - mapStringForMin := "ResourceList{" - for _, k := range keysForMin { - mapStringForMin += fmt.Sprintf("%v: %v,", k, this.Min[ResourceName(k)]) + return n +} + +func (m *DownwardAPIVolumeSource) Size() (n int) { + if m == nil { + return 0 } - mapStringForMin += "}" - keysForDefault := make([]string, 0, len(this.Default)) - for k := range this.Default { - keysForDefault = append(keysForDefault, string(k)) + var l int + _ = l + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - github_com_gogo_protobuf_sortkeys.Strings(keysForDefault) - mapStringForDefault := "ResourceList{" - for _, k := range keysForDefault { - mapStringForDefault += fmt.Sprintf("%v: %v,", k, this.Default[ResourceName(k)]) + if m.DefaultMode != nil { + n += 1 + sovGenerated(uint64(*m.DefaultMode)) } - mapStringForDefault += "}" - keysForDefaultRequest := make([]string, 0, len(this.DefaultRequest)) - for k := range this.DefaultRequest { - keysForDefaultRequest = append(keysForDefaultRequest, string(k)) + return n +} + +func (m *EmptyDirVolumeSource) Size() (n int) { + if m == nil { + return 0 } - github_com_gogo_protobuf_sortkeys.Strings(keysForDefaultRequest) - mapStringForDefaultRequest := "ResourceList{" - for _, k := range keysForDefaultRequest { - mapStringForDefaultRequest += fmt.Sprintf("%v: %v,", k, this.DefaultRequest[ResourceName(k)]) + var l int + _ = l + l = len(m.Medium) + n += 1 + l + sovGenerated(uint64(l)) + if m.SizeLimit != nil { + l = m.SizeLimit.Size() + n += 1 + l + sovGenerated(uint64(l)) } - mapStringForDefaultRequest += "}" - keysForMaxLimitRequestRatio := make([]string, 0, len(this.MaxLimitRequestRatio)) - for k := range this.MaxLimitRequestRatio { - keysForMaxLimitRequestRatio = append(keysForMaxLimitRequestRatio, string(k)) + return n +} + +func (m *EndpointAddress) Size() (n int) { + if m == nil { + return 0 } - github_com_gogo_protobuf_sortkeys.Strings(keysForMaxLimitRequestRatio) - mapStringForMaxLimitRequestRatio := "ResourceList{" - for _, k := range keysForMaxLimitRequestRatio { - mapStringForMaxLimitRequestRatio += fmt.Sprintf("%v: %v,", k, this.MaxLimitRequestRatio[ResourceName(k)]) + var l int + _ = l + l = len(m.IP) + n += 1 + l + sovGenerated(uint64(l)) + if m.TargetRef != nil { + l = m.TargetRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - mapStringForMaxLimitRequestRatio += "}" - s := strings.Join([]string{`&LimitRangeItem{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Max:` + mapStringForMax + `,`, - `Min:` + mapStringForMin + `,`, - `Default:` + mapStringForDefault + `,`, - `DefaultRequest:` + mapStringForDefaultRequest + `,`, - `MaxLimitRequestRatio:` + mapStringForMaxLimitRequestRatio + `,`, - `}`, - }, "") - return s -} -func (this *LimitRangeList) String() string { - if this == nil { - return "nil" + l = len(m.Hostname) + n += 1 + l + sovGenerated(uint64(l)) + if m.NodeName != nil { + l = len(*m.NodeName) + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&LimitRangeList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "LimitRange", "LimitRange", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *LimitRangeSpec) String() string { - if this == nil { - return "nil" + +func (m *EndpointPort) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&LimitRangeSpec{`, - `Limits:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Limits), "LimitRangeItem", "LimitRangeItem", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Port)) + l = len(m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *List) String() string { - if this == nil { - return "nil" + +func (m *EndpointSubset) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&List{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *LoadBalancerIngress) String() string { - if this == nil { - return "nil" + var l int + _ = l + if len(m.Addresses) > 0 { + for _, e := range m.Addresses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&LoadBalancerIngress{`, - `IP:` + fmt.Sprintf("%v", this.IP) + `,`, - `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, - `}`, - }, "") - return s -} -func (this *LoadBalancerStatus) String() string { - if this == nil { - return "nil" + if len(m.NotReadyAddresses) > 0 { + for _, e := range m.NotReadyAddresses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&LoadBalancerStatus{`, - `Ingress:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ingress), "LoadBalancerIngress", "LoadBalancerIngress", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *LocalObjectReference) String() string { - if this == nil { - return "nil" + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&LocalObjectReference{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `}`, - }, "") - return s + return n } -func (this *LocalVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *Endpoints) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&LocalVolumeSource{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `FSType:` + valueToStringGenerated(this.FSType) + `,`, - `}`, - }, "") - return s -} -func (this *NFSVolumeSource) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Subsets) > 0 { + for _, e := range m.Subsets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&NFSVolumeSource{`, - `Server:` + fmt.Sprintf("%v", this.Server) + `,`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s + return n } -func (this *Namespace) String() string { - if this == nil { - return "nil" + +func (m *EndpointsList) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&Namespace{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NamespaceSpec", "NamespaceSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "NamespaceStatus", "NamespaceStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *NamespaceList) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&NamespaceList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Namespace", "Namespace", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *NamespaceSpec) String() string { - if this == nil { - return "nil" + +func (m *EnvFromSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&NamespaceSpec{`, - `Finalizers:` + fmt.Sprintf("%v", this.Finalizers) + `,`, - `}`, - }, "") - return s -} -func (this *NamespaceStatus) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.Prefix) + n += 1 + l + sovGenerated(uint64(l)) + if m.ConfigMapRef != nil { + l = m.ConfigMapRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&NamespaceStatus{`, - `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, - `}`, - }, "") - return s -} -func (this *Node) String() string { - if this == nil { - return "nil" + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&Node{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NodeSpec", "NodeSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "NodeStatus", "NodeStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *NodeAddress) String() string { - if this == nil { - return "nil" + +func (m *EnvVar) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&NodeAddress{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Address:` + fmt.Sprintf("%v", this.Address) + `,`, - `}`, - }, "") - return s -} -func (this *NodeAffinity) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + if m.ValueFrom != nil { + l = m.ValueFrom.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&NodeAffinity{`, - `RequiredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(fmt.Sprintf("%v", this.RequiredDuringSchedulingIgnoredDuringExecution), "NodeSelector", "NodeSelector", 1) + `,`, - `PreferredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PreferredDuringSchedulingIgnoredDuringExecution), "PreferredSchedulingTerm", "PreferredSchedulingTerm", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *NodeCondition) String() string { - if this == nil { - return "nil" + +func (m *EnvVarSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&NodeCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastHeartbeatTime:` + strings.Replace(strings.Replace(this.LastHeartbeatTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *NodeConfigSource) String() string { - if this == nil { - return "nil" + var l int + _ = l + if m.FieldRef != nil { + l = m.FieldRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&NodeConfigSource{`, - `ConfigMap:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMap), "ConfigMapNodeConfigSource", "ConfigMapNodeConfigSource", 1) + `,`, - `}`, - }, "") - return s -} -func (this *NodeConfigStatus) String() string { - if this == nil { - return "nil" + if m.ResourceFieldRef != nil { + l = m.ResourceFieldRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&NodeConfigStatus{`, - `Assigned:` + strings.Replace(fmt.Sprintf("%v", this.Assigned), "NodeConfigSource", "NodeConfigSource", 1) + `,`, - `Active:` + strings.Replace(fmt.Sprintf("%v", this.Active), "NodeConfigSource", "NodeConfigSource", 1) + `,`, - `LastKnownGood:` + strings.Replace(fmt.Sprintf("%v", this.LastKnownGood), "NodeConfigSource", "NodeConfigSource", 1) + `,`, - `Error:` + fmt.Sprintf("%v", this.Error) + `,`, - `}`, - }, "") - return s -} -func (this *NodeDaemonEndpoints) String() string { - if this == nil { - return "nil" + if m.ConfigMapKeyRef != nil { + l = m.ConfigMapKeyRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&NodeDaemonEndpoints{`, - `KubeletEndpoint:` + strings.Replace(strings.Replace(this.KubeletEndpoint.String(), "DaemonEndpoint", "DaemonEndpoint", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *NodeList) String() string { - if this == nil { - return "nil" + if m.SecretKeyRef != nil { + l = m.SecretKeyRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&NodeList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Node", "Node", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *NodeProxyOptions) String() string { - if this == nil { - return "nil" + +func (m *EphemeralContainer) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&NodeProxyOptions{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.EphemeralContainerCommon.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.TargetContainerName) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *NodeResources) String() string { - if this == nil { - return "nil" + +func (m *EphemeralContainerCommon) Size() (n int) { + if m == nil { + return 0 } - keysForCapacity := make([]string, 0, len(this.Capacity)) - for k := range this.Capacity { - keysForCapacity = append(keysForCapacity, string(k)) + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Image) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) - mapStringForCapacity := "ResourceList{" - for _, k := range keysForCapacity { - mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) + if len(m.Args) > 0 { + for _, s := range m.Args { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - mapStringForCapacity += "}" - s := strings.Join([]string{`&NodeResources{`, - `Capacity:` + mapStringForCapacity + `,`, - `}`, - }, "") - return s -} -func (this *NodeSelector) String() string { - if this == nil { - return "nil" + l = len(m.WorkingDir) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&NodeSelector{`, - `NodeSelectorTerms:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.NodeSelectorTerms), "NodeSelectorTerm", "NodeSelectorTerm", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *NodeSelectorRequirement) String() string { - if this == nil { - return "nil" + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&NodeSelectorRequirement{`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, - `Values:` + fmt.Sprintf("%v", this.Values) + `,`, - `}`, - }, "") - return s -} -func (this *NodeSelectorTerm) String() string { - if this == nil { - return "nil" + l = m.Resources.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.VolumeMounts) > 0 { + for _, e := range m.VolumeMounts { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&NodeSelectorTerm{`, - `MatchExpressions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.MatchExpressions), "NodeSelectorRequirement", "NodeSelectorRequirement", 1), `&`, ``, 1) + `,`, - `MatchFields:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.MatchFields), "NodeSelectorRequirement", "NodeSelectorRequirement", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *NodeSpec) String() string { - if this == nil { - return "nil" + if m.LivenessProbe != nil { + l = m.LivenessProbe.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&NodeSpec{`, - `PodCIDR:` + fmt.Sprintf("%v", this.PodCIDR) + `,`, - `DoNotUse_ExternalID:` + fmt.Sprintf("%v", this.DoNotUse_ExternalID) + `,`, - `ProviderID:` + fmt.Sprintf("%v", this.ProviderID) + `,`, - `Unschedulable:` + fmt.Sprintf("%v", this.Unschedulable) + `,`, - `Taints:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Taints), "Taint", "Taint", 1), `&`, ``, 1) + `,`, - `ConfigSource:` + strings.Replace(fmt.Sprintf("%v", this.ConfigSource), "NodeConfigSource", "NodeConfigSource", 1) + `,`, - `}`, - }, "") - return s -} -func (this *NodeStatus) String() string { - if this == nil { - return "nil" + if m.ReadinessProbe != nil { + l = m.ReadinessProbe.Size() + n += 1 + l + sovGenerated(uint64(l)) } - keysForCapacity := make([]string, 0, len(this.Capacity)) - for k := range this.Capacity { - keysForCapacity = append(keysForCapacity, string(k)) + if m.Lifecycle != nil { + l = m.Lifecycle.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) - mapStringForCapacity := "ResourceList{" - for _, k := range keysForCapacity { - mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) + l = len(m.TerminationMessagePath) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ImagePullPolicy) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecurityContext != nil { + l = m.SecurityContext.Size() + n += 1 + l + sovGenerated(uint64(l)) } - mapStringForCapacity += "}" - keysForAllocatable := make([]string, 0, len(this.Allocatable)) - for k := range this.Allocatable { - keysForAllocatable = append(keysForAllocatable, string(k)) + n += 3 + n += 3 + n += 3 + if len(m.EnvFrom) > 0 { + for _, e := range m.EnvFrom { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } } - github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatable) - mapStringForAllocatable := "ResourceList{" - for _, k := range keysForAllocatable { - mapStringForAllocatable += fmt.Sprintf("%v: %v,", k, this.Allocatable[ResourceName(k)]) + l = len(m.TerminationMessagePolicy) + n += 2 + l + sovGenerated(uint64(l)) + if len(m.VolumeDevices) > 0 { + for _, e := range m.VolumeDevices { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } } - mapStringForAllocatable += "}" - s := strings.Join([]string{`&NodeStatus{`, - `Capacity:` + mapStringForCapacity + `,`, - `Allocatable:` + mapStringForAllocatable + `,`, - `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "NodeCondition", "NodeCondition", 1), `&`, ``, 1) + `,`, - `Addresses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Addresses), "NodeAddress", "NodeAddress", 1), `&`, ``, 1) + `,`, - `DaemonEndpoints:` + strings.Replace(strings.Replace(this.DaemonEndpoints.String(), "NodeDaemonEndpoints", "NodeDaemonEndpoints", 1), `&`, ``, 1) + `,`, - `NodeInfo:` + strings.Replace(strings.Replace(this.NodeInfo.String(), "NodeSystemInfo", "NodeSystemInfo", 1), `&`, ``, 1) + `,`, - `Images:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Images), "ContainerImage", "ContainerImage", 1), `&`, ``, 1) + `,`, - `VolumesInUse:` + fmt.Sprintf("%v", this.VolumesInUse) + `,`, - `VolumesAttached:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumesAttached), "AttachedVolume", "AttachedVolume", 1), `&`, ``, 1) + `,`, - `Config:` + strings.Replace(fmt.Sprintf("%v", this.Config), "NodeConfigStatus", "NodeConfigStatus", 1) + `,`, - `}`, - }, "") - return s -} -func (this *NodeSystemInfo) String() string { - if this == nil { - return "nil" + if m.StartupProbe != nil { + l = m.StartupProbe.Size() + n += 2 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&NodeSystemInfo{`, - `MachineID:` + fmt.Sprintf("%v", this.MachineID) + `,`, - `SystemUUID:` + fmt.Sprintf("%v", this.SystemUUID) + `,`, - `BootID:` + fmt.Sprintf("%v", this.BootID) + `,`, - `KernelVersion:` + fmt.Sprintf("%v", this.KernelVersion) + `,`, - `OSImage:` + fmt.Sprintf("%v", this.OSImage) + `,`, - `ContainerRuntimeVersion:` + fmt.Sprintf("%v", this.ContainerRuntimeVersion) + `,`, - `KubeletVersion:` + fmt.Sprintf("%v", this.KubeletVersion) + `,`, - `KubeProxyVersion:` + fmt.Sprintf("%v", this.KubeProxyVersion) + `,`, - `OperatingSystem:` + fmt.Sprintf("%v", this.OperatingSystem) + `,`, - `Architecture:` + fmt.Sprintf("%v", this.Architecture) + `,`, - `}`, - }, "") - return s + return n } -func (this *ObjectFieldSelector) String() string { - if this == nil { - return "nil" + +func (m *EphemeralContainers) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ObjectFieldSelector{`, - `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, - `FieldPath:` + fmt.Sprintf("%v", this.FieldPath) + `,`, - `}`, - }, "") - return s -} -func (this *ObjectReference) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.EphemeralContainers) > 0 { + for _, e := range m.EphemeralContainers { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&ObjectReference{`, - `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `UID:` + fmt.Sprintf("%v", this.UID) + `,`, - `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, - `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, - `FieldPath:` + fmt.Sprintf("%v", this.FieldPath) + `,`, - `}`, - }, "") - return s + return n } -func (this *PersistentVolume) String() string { - if this == nil { - return "nil" + +func (m *Event) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PersistentVolume{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PersistentVolumeSpec", "PersistentVolumeSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PersistentVolumeStatus", "PersistentVolumeStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PersistentVolumeClaim) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.InvolvedObject.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Source.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.FirstTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Count)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = m.EventTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Series != nil { + l = m.Series.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&PersistentVolumeClaim{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PersistentVolumeClaimSpec", "PersistentVolumeClaimSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PersistentVolumeClaimStatus", "PersistentVolumeClaimStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PersistentVolumeClaimCondition) String() string { - if this == nil { - return "nil" + l = len(m.Action) + n += 1 + l + sovGenerated(uint64(l)) + if m.Related != nil { + l = m.Related.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&PersistentVolumeClaimCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastProbeTime:` + strings.Replace(strings.Replace(this.LastProbeTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s + l = len(m.ReportingController) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ReportingInstance) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *PersistentVolumeClaimList) String() string { - if this == nil { - return "nil" + +func (m *EventList) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PersistentVolumeClaimList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PersistentVolumeClaim", "PersistentVolumeClaim", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PersistentVolumeClaimSpec) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&PersistentVolumeClaimSpec{`, - `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, - `Resources:` + strings.Replace(strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1), `&`, ``, 1) + `,`, - `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `StorageClassName:` + valueToStringGenerated(this.StorageClassName) + `,`, - `VolumeMode:` + valueToStringGenerated(this.VolumeMode) + `,`, - `DataSource:` + strings.Replace(fmt.Sprintf("%v", this.DataSource), "TypedLocalObjectReference", "TypedLocalObjectReference", 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *PersistentVolumeClaimStatus) String() string { - if this == nil { - return "nil" - } - keysForCapacity := make([]string, 0, len(this.Capacity)) - for k := range this.Capacity { - keysForCapacity = append(keysForCapacity, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) - mapStringForCapacity := "ResourceList{" - for _, k := range keysForCapacity { - mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) + +func (m *EventSeries) Size() (n int) { + if m == nil { + return 0 } - mapStringForCapacity += "}" - s := strings.Join([]string{`&PersistentVolumeClaimStatus{`, - `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, - `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, - `Capacity:` + mapStringForCapacity + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "PersistentVolumeClaimCondition", "PersistentVolumeClaimCondition", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Count)) + l = m.LastObservedTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.State) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *PersistentVolumeClaimVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *EventSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PersistentVolumeClaimVolumeSource{`, - `ClaimName:` + fmt.Sprintf("%v", this.ClaimName) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Component) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Host) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *PersistentVolumeList) String() string { - if this == nil { - return "nil" + +func (m *ExecAction) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PersistentVolumeList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PersistentVolume", "PersistentVolume", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n } -func (this *PersistentVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *FCVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PersistentVolumeSource{`, - `GCEPersistentDisk:` + strings.Replace(fmt.Sprintf("%v", this.GCEPersistentDisk), "GCEPersistentDiskVolumeSource", "GCEPersistentDiskVolumeSource", 1) + `,`, - `AWSElasticBlockStore:` + strings.Replace(fmt.Sprintf("%v", this.AWSElasticBlockStore), "AWSElasticBlockStoreVolumeSource", "AWSElasticBlockStoreVolumeSource", 1) + `,`, - `HostPath:` + strings.Replace(fmt.Sprintf("%v", this.HostPath), "HostPathVolumeSource", "HostPathVolumeSource", 1) + `,`, - `Glusterfs:` + strings.Replace(fmt.Sprintf("%v", this.Glusterfs), "GlusterfsPersistentVolumeSource", "GlusterfsPersistentVolumeSource", 1) + `,`, - `NFS:` + strings.Replace(fmt.Sprintf("%v", this.NFS), "NFSVolumeSource", "NFSVolumeSource", 1) + `,`, - `RBD:` + strings.Replace(fmt.Sprintf("%v", this.RBD), "RBDPersistentVolumeSource", "RBDPersistentVolumeSource", 1) + `,`, - `ISCSI:` + strings.Replace(fmt.Sprintf("%v", this.ISCSI), "ISCSIPersistentVolumeSource", "ISCSIPersistentVolumeSource", 1) + `,`, - `Cinder:` + strings.Replace(fmt.Sprintf("%v", this.Cinder), "CinderPersistentVolumeSource", "CinderPersistentVolumeSource", 1) + `,`, - `CephFS:` + strings.Replace(fmt.Sprintf("%v", this.CephFS), "CephFSPersistentVolumeSource", "CephFSPersistentVolumeSource", 1) + `,`, - `FC:` + strings.Replace(fmt.Sprintf("%v", this.FC), "FCVolumeSource", "FCVolumeSource", 1) + `,`, - `Flocker:` + strings.Replace(fmt.Sprintf("%v", this.Flocker), "FlockerVolumeSource", "FlockerVolumeSource", 1) + `,`, - `FlexVolume:` + strings.Replace(fmt.Sprintf("%v", this.FlexVolume), "FlexPersistentVolumeSource", "FlexPersistentVolumeSource", 1) + `,`, - `AzureFile:` + strings.Replace(fmt.Sprintf("%v", this.AzureFile), "AzureFilePersistentVolumeSource", "AzureFilePersistentVolumeSource", 1) + `,`, - `VsphereVolume:` + strings.Replace(fmt.Sprintf("%v", this.VsphereVolume), "VsphereVirtualDiskVolumeSource", "VsphereVirtualDiskVolumeSource", 1) + `,`, - `Quobyte:` + strings.Replace(fmt.Sprintf("%v", this.Quobyte), "QuobyteVolumeSource", "QuobyteVolumeSource", 1) + `,`, - `AzureDisk:` + strings.Replace(fmt.Sprintf("%v", this.AzureDisk), "AzureDiskVolumeSource", "AzureDiskVolumeSource", 1) + `,`, - `PhotonPersistentDisk:` + strings.Replace(fmt.Sprintf("%v", this.PhotonPersistentDisk), "PhotonPersistentDiskVolumeSource", "PhotonPersistentDiskVolumeSource", 1) + `,`, - `PortworxVolume:` + strings.Replace(fmt.Sprintf("%v", this.PortworxVolume), "PortworxVolumeSource", "PortworxVolumeSource", 1) + `,`, - `ScaleIO:` + strings.Replace(fmt.Sprintf("%v", this.ScaleIO), "ScaleIOPersistentVolumeSource", "ScaleIOPersistentVolumeSource", 1) + `,`, - `Local:` + strings.Replace(fmt.Sprintf("%v", this.Local), "LocalVolumeSource", "LocalVolumeSource", 1) + `,`, - `StorageOS:` + strings.Replace(fmt.Sprintf("%v", this.StorageOS), "StorageOSPersistentVolumeSource", "StorageOSPersistentVolumeSource", 1) + `,`, - `CSI:` + strings.Replace(fmt.Sprintf("%v", this.CSI), "CSIPersistentVolumeSource", "CSIPersistentVolumeSource", 1) + `,`, - `}`, - }, "") - return s -} -func (this *PersistentVolumeSpec) String() string { - if this == nil { - return "nil" + var l int + _ = l + if len(m.TargetWWNs) > 0 { + for _, s := range m.TargetWWNs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - keysForCapacity := make([]string, 0, len(this.Capacity)) - for k := range this.Capacity { - keysForCapacity = append(keysForCapacity, string(k)) + if m.Lun != nil { + n += 1 + sovGenerated(uint64(*m.Lun)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) - mapStringForCapacity := "ResourceList{" - for _, k := range keysForCapacity { - mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if len(m.WWIDs) > 0 { + for _, s := range m.WWIDs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - mapStringForCapacity += "}" - s := strings.Join([]string{`&PersistentVolumeSpec{`, - `Capacity:` + mapStringForCapacity + `,`, - `PersistentVolumeSource:` + strings.Replace(strings.Replace(this.PersistentVolumeSource.String(), "PersistentVolumeSource", "PersistentVolumeSource", 1), `&`, ``, 1) + `,`, - `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, - `ClaimRef:` + strings.Replace(fmt.Sprintf("%v", this.ClaimRef), "ObjectReference", "ObjectReference", 1) + `,`, - `PersistentVolumeReclaimPolicy:` + fmt.Sprintf("%v", this.PersistentVolumeReclaimPolicy) + `,`, - `StorageClassName:` + fmt.Sprintf("%v", this.StorageClassName) + `,`, - `MountOptions:` + fmt.Sprintf("%v", this.MountOptions) + `,`, - `VolumeMode:` + valueToStringGenerated(this.VolumeMode) + `,`, - `NodeAffinity:` + strings.Replace(fmt.Sprintf("%v", this.NodeAffinity), "VolumeNodeAffinity", "VolumeNodeAffinity", 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *PersistentVolumeStatus) String() string { - if this == nil { - return "nil" + +func (m *FlexPersistentVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PersistentVolumeStatus{`, - `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `}`, - }, "") - return s -} -func (this *PhotonPersistentDiskVolumeSource) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&PhotonPersistentDiskVolumeSource{`, - `PdID:` + fmt.Sprintf("%v", this.PdID) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `}`, - }, "") - return s -} -func (this *Pod) String() string { - if this == nil { - return "nil" + n += 2 + if len(m.Options) > 0 { + for k, v := range m.Options { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - s := strings.Join([]string{`&Pod{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSpec", "PodSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodStatus", "PodStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *PodAffinity) String() string { - if this == nil { - return "nil" + +func (m *FlexVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PodAffinity{`, - `RequiredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RequiredDuringSchedulingIgnoredDuringExecution), "PodAffinityTerm", "PodAffinityTerm", 1), `&`, ``, 1) + `,`, - `PreferredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PreferredDuringSchedulingIgnoredDuringExecution), "WeightedPodAffinityTerm", "WeightedPodAffinityTerm", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodAffinityTerm) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&PodAffinityTerm{`, - `LabelSelector:` + strings.Replace(fmt.Sprintf("%v", this.LabelSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Namespaces:` + fmt.Sprintf("%v", this.Namespaces) + `,`, - `TopologyKey:` + fmt.Sprintf("%v", this.TopologyKey) + `,`, - `}`, - }, "") - return s -} -func (this *PodAntiAffinity) String() string { - if this == nil { - return "nil" + n += 2 + if len(m.Options) > 0 { + for k, v := range m.Options { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - s := strings.Join([]string{`&PodAntiAffinity{`, - `RequiredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RequiredDuringSchedulingIgnoredDuringExecution), "PodAffinityTerm", "PodAffinityTerm", 1), `&`, ``, 1) + `,`, - `PreferredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PreferredDuringSchedulingIgnoredDuringExecution), "WeightedPodAffinityTerm", "WeightedPodAffinityTerm", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *PodAttachOptions) String() string { - if this == nil { - return "nil" + +func (m *FlockerVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PodAttachOptions{`, - `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, - `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, - `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, - `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, - `Container:` + fmt.Sprintf("%v", this.Container) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.DatasetName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DatasetUUID) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *PodCondition) String() string { - if this == nil { - return "nil" + +func (m *GCEPersistentDiskVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PodCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastProbeTime:` + strings.Replace(strings.Replace(this.LastProbeTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.PDName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Partition)) + n += 2 + return n } -func (this *PodDNSConfig) String() string { - if this == nil { - return "nil" + +func (m *GitRepoVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PodDNSConfig{`, - `Nameservers:` + fmt.Sprintf("%v", this.Nameservers) + `,`, - `Searches:` + fmt.Sprintf("%v", this.Searches) + `,`, - `Options:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Options), "PodDNSConfigOption", "PodDNSConfigOption", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Repository) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Revision) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Directory) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *PodDNSConfigOption) String() string { - if this == nil { - return "nil" + +func (m *GlusterfsPersistentVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PodDNSConfigOption{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Value:` + valueToStringGenerated(this.Value) + `,`, - `}`, - }, "") - return s -} -func (this *PodExecOptions) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.EndpointsName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.EndpointsNamespace != nil { + l = len(*m.EndpointsNamespace) + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&PodExecOptions{`, - `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, - `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, - `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, - `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, - `Container:` + fmt.Sprintf("%v", this.Container) + `,`, - `Command:` + fmt.Sprintf("%v", this.Command) + `,`, - `}`, - }, "") - return s + return n } -func (this *PodList) String() string { - if this == nil { - return "nil" + +func (m *GlusterfsVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PodList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Pod", "Pod", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.EndpointsName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n } -func (this *PodLogOptions) String() string { - if this == nil { - return "nil" + +func (m *HTTPGetAction) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PodLogOptions{`, - `Container:` + fmt.Sprintf("%v", this.Container) + `,`, - `Follow:` + fmt.Sprintf("%v", this.Follow) + `,`, - `Previous:` + fmt.Sprintf("%v", this.Previous) + `,`, - `SinceSeconds:` + valueToStringGenerated(this.SinceSeconds) + `,`, - `SinceTime:` + strings.Replace(fmt.Sprintf("%v", this.SinceTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, - `Timestamps:` + fmt.Sprintf("%v", this.Timestamps) + `,`, - `TailLines:` + valueToStringGenerated(this.TailLines) + `,`, - `LimitBytes:` + valueToStringGenerated(this.LimitBytes) + `,`, - `}`, - }, "") - return s -} -func (this *PodPortForwardOptions) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Port.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Host) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Scheme) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.HTTPHeaders) > 0 { + for _, e := range m.HTTPHeaders { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&PodPortForwardOptions{`, - `Ports:` + fmt.Sprintf("%v", this.Ports) + `,`, - `}`, - }, "") - return s + return n } -func (this *PodProxyOptions) String() string { - if this == nil { - return "nil" + +func (m *HTTPHeader) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PodProxyOptions{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *PodReadinessGate) String() string { - if this == nil { - return "nil" + +func (m *Handler) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PodReadinessGate{`, - `ConditionType:` + fmt.Sprintf("%v", this.ConditionType) + `,`, - `}`, - }, "") - return s -} -func (this *PodSecurityContext) String() string { - if this == nil { - return "nil" + var l int + _ = l + if m.Exec != nil { + l = m.Exec.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&PodSecurityContext{`, - `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "SELinuxOptions", 1) + `,`, - `RunAsUser:` + valueToStringGenerated(this.RunAsUser) + `,`, - `RunAsNonRoot:` + valueToStringGenerated(this.RunAsNonRoot) + `,`, - `SupplementalGroups:` + fmt.Sprintf("%v", this.SupplementalGroups) + `,`, - `FSGroup:` + valueToStringGenerated(this.FSGroup) + `,`, - `RunAsGroup:` + valueToStringGenerated(this.RunAsGroup) + `,`, - `Sysctls:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Sysctls), "Sysctl", "Sysctl", 1), `&`, ``, 1) + `,`, - `WindowsOptions:` + strings.Replace(fmt.Sprintf("%v", this.WindowsOptions), "WindowsSecurityContextOptions", "WindowsSecurityContextOptions", 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodSignature) String() string { - if this == nil { - return "nil" + if m.HTTPGet != nil { + l = m.HTTPGet.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&PodSignature{`, - `PodController:` + strings.Replace(fmt.Sprintf("%v", this.PodController), "OwnerReference", "k8s_io_apimachinery_pkg_apis_meta_v1.OwnerReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodSpec) String() string { - if this == nil { - return "nil" + if m.TCPSocket != nil { + l = m.TCPSocket.Size() + n += 1 + l + sovGenerated(uint64(l)) } - keysForNodeSelector := make([]string, 0, len(this.NodeSelector)) - for k := range this.NodeSelector { - keysForNodeSelector = append(keysForNodeSelector, k) + return n +} + +func (m *HostAlias) Size() (n int) { + if m == nil { + return 0 } - github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) - mapStringForNodeSelector := "map[string]string{" - for _, k := range keysForNodeSelector { - mapStringForNodeSelector += fmt.Sprintf("%v: %v,", k, this.NodeSelector[k]) + var l int + _ = l + l = len(m.IP) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Hostnames) > 0 { + for _, s := range m.Hostnames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - mapStringForNodeSelector += "}" - s := strings.Join([]string{`&PodSpec{`, - `Volumes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Volumes), "Volume", "Volume", 1), `&`, ``, 1) + `,`, - `Containers:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Containers), "Container", "Container", 1), `&`, ``, 1) + `,`, - `RestartPolicy:` + fmt.Sprintf("%v", this.RestartPolicy) + `,`, - `TerminationGracePeriodSeconds:` + valueToStringGenerated(this.TerminationGracePeriodSeconds) + `,`, - `ActiveDeadlineSeconds:` + valueToStringGenerated(this.ActiveDeadlineSeconds) + `,`, - `DNSPolicy:` + fmt.Sprintf("%v", this.DNSPolicy) + `,`, - `NodeSelector:` + mapStringForNodeSelector + `,`, - `ServiceAccountName:` + fmt.Sprintf("%v", this.ServiceAccountName) + `,`, - `DeprecatedServiceAccount:` + fmt.Sprintf("%v", this.DeprecatedServiceAccount) + `,`, - `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, - `HostNetwork:` + fmt.Sprintf("%v", this.HostNetwork) + `,`, - `HostPID:` + fmt.Sprintf("%v", this.HostPID) + `,`, - `HostIPC:` + fmt.Sprintf("%v", this.HostIPC) + `,`, - `SecurityContext:` + strings.Replace(fmt.Sprintf("%v", this.SecurityContext), "PodSecurityContext", "PodSecurityContext", 1) + `,`, - `ImagePullSecrets:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ImagePullSecrets), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, - `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, - `Subdomain:` + fmt.Sprintf("%v", this.Subdomain) + `,`, - `Affinity:` + strings.Replace(fmt.Sprintf("%v", this.Affinity), "Affinity", "Affinity", 1) + `,`, - `SchedulerName:` + fmt.Sprintf("%v", this.SchedulerName) + `,`, - `InitContainers:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.InitContainers), "Container", "Container", 1), `&`, ``, 1) + `,`, - `AutomountServiceAccountToken:` + valueToStringGenerated(this.AutomountServiceAccountToken) + `,`, - `Tolerations:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Tolerations), "Toleration", "Toleration", 1), `&`, ``, 1) + `,`, - `HostAliases:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.HostAliases), "HostAlias", "HostAlias", 1), `&`, ``, 1) + `,`, - `PriorityClassName:` + fmt.Sprintf("%v", this.PriorityClassName) + `,`, - `Priority:` + valueToStringGenerated(this.Priority) + `,`, - `DNSConfig:` + strings.Replace(fmt.Sprintf("%v", this.DNSConfig), "PodDNSConfig", "PodDNSConfig", 1) + `,`, - `ShareProcessNamespace:` + valueToStringGenerated(this.ShareProcessNamespace) + `,`, - `ReadinessGates:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ReadinessGates), "PodReadinessGate", "PodReadinessGate", 1), `&`, ``, 1) + `,`, - `RuntimeClassName:` + valueToStringGenerated(this.RuntimeClassName) + `,`, - `EnableServiceLinks:` + valueToStringGenerated(this.EnableServiceLinks) + `,`, - `PreemptionPolicy:` + valueToStringGenerated(this.PreemptionPolicy) + `,`, - `}`, - }, "") - return s + return n } -func (this *PodStatus) String() string { - if this == nil { - return "nil" + +func (m *HostPathVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PodStatus{`, - `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "PodCondition", "PodCondition", 1), `&`, ``, 1) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `HostIP:` + fmt.Sprintf("%v", this.HostIP) + `,`, - `PodIP:` + fmt.Sprintf("%v", this.PodIP) + `,`, - `StartTime:` + strings.Replace(fmt.Sprintf("%v", this.StartTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, - `ContainerStatuses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ContainerStatuses), "ContainerStatus", "ContainerStatus", 1), `&`, ``, 1) + `,`, - `QOSClass:` + fmt.Sprintf("%v", this.QOSClass) + `,`, - `InitContainerStatuses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.InitContainerStatuses), "ContainerStatus", "ContainerStatus", 1), `&`, ``, 1) + `,`, - `NominatedNodeName:` + fmt.Sprintf("%v", this.NominatedNodeName) + `,`, - `}`, - }, "") - return s -} -func (this *PodStatusResult) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + if m.Type != nil { + l = len(*m.Type) + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&PodStatusResult{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodStatus", "PodStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *PodTemplate) String() string { - if this == nil { - return "nil" + +func (m *ISCSIPersistentVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PodTemplate{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodTemplateList) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.TargetPortal) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.IQN) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Lun)) + l = len(m.ISCSIInterface) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if len(m.Portals) > 0 { + for _, s := range m.Portals { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&PodTemplateList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PodTemplate", "PodTemplate", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodTemplateSpec) String() string { - if this == nil { - return "nil" + n += 2 + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&PodTemplateSpec{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSpec", "PodSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PortworxVolumeSource) String() string { - if this == nil { - return "nil" + n += 2 + if m.InitiatorName != nil { + l = len(*m.InitiatorName) + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&PortworxVolumeSource{`, - `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s + return n } -func (this *Preconditions) String() string { - if this == nil { - return "nil" + +func (m *ISCSIVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&Preconditions{`, - `UID:` + valueToStringGenerated(this.UID) + `,`, - `}`, - }, "") - return s -} -func (this *PreferAvoidPodsEntry) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.TargetPortal) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.IQN) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Lun)) + l = len(m.ISCSIInterface) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if len(m.Portals) > 0 { + for _, s := range m.Portals { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&PreferAvoidPodsEntry{`, - `PodSignature:` + strings.Replace(strings.Replace(this.PodSignature.String(), "PodSignature", "PodSignature", 1), `&`, ``, 1) + `,`, - `EvictionTime:` + strings.Replace(strings.Replace(this.EvictionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *PreferredSchedulingTerm) String() string { - if this == nil { - return "nil" + n += 2 + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&PreferredSchedulingTerm{`, - `Weight:` + fmt.Sprintf("%v", this.Weight) + `,`, - `Preference:` + strings.Replace(strings.Replace(this.Preference.String(), "NodeSelectorTerm", "NodeSelectorTerm", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *Probe) String() string { - if this == nil { - return "nil" + n += 2 + if m.InitiatorName != nil { + l = len(*m.InitiatorName) + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&Probe{`, - `Handler:` + strings.Replace(strings.Replace(this.Handler.String(), "Handler", "Handler", 1), `&`, ``, 1) + `,`, - `InitialDelaySeconds:` + fmt.Sprintf("%v", this.InitialDelaySeconds) + `,`, - `TimeoutSeconds:` + fmt.Sprintf("%v", this.TimeoutSeconds) + `,`, - `PeriodSeconds:` + fmt.Sprintf("%v", this.PeriodSeconds) + `,`, - `SuccessThreshold:` + fmt.Sprintf("%v", this.SuccessThreshold) + `,`, - `FailureThreshold:` + fmt.Sprintf("%v", this.FailureThreshold) + `,`, - `}`, - }, "") - return s + return n } -func (this *ProjectedVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *KeyToPath) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ProjectedVolumeSource{`, - `Sources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Sources), "VolumeProjection", "VolumeProjection", 1), `&`, ``, 1) + `,`, - `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, - `}`, - }, "") - return s -} -func (this *QuobyteVolumeSource) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + if m.Mode != nil { + n += 1 + sovGenerated(uint64(*m.Mode)) } - s := strings.Join([]string{`&QuobyteVolumeSource{`, - `Registry:` + fmt.Sprintf("%v", this.Registry) + `,`, - `Volume:` + fmt.Sprintf("%v", this.Volume) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `User:` + fmt.Sprintf("%v", this.User) + `,`, - `Group:` + fmt.Sprintf("%v", this.Group) + `,`, - `Tenant:` + fmt.Sprintf("%v", this.Tenant) + `,`, - `}`, - }, "") - return s + return n } -func (this *RBDPersistentVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *Lifecycle) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&RBDPersistentVolumeSource{`, - `CephMonitors:` + fmt.Sprintf("%v", this.CephMonitors) + `,`, - `RBDImage:` + fmt.Sprintf("%v", this.RBDImage) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `RBDPool:` + fmt.Sprintf("%v", this.RBDPool) + `,`, - `RadosUser:` + fmt.Sprintf("%v", this.RadosUser) + `,`, - `Keyring:` + fmt.Sprintf("%v", this.Keyring) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "SecretReference", 1) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *RBDVolumeSource) String() string { - if this == nil { - return "nil" + var l int + _ = l + if m.PostStart != nil { + l = m.PostStart.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&RBDVolumeSource{`, - `CephMonitors:` + fmt.Sprintf("%v", this.CephMonitors) + `,`, - `RBDImage:` + fmt.Sprintf("%v", this.RBDImage) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `RBDPool:` + fmt.Sprintf("%v", this.RBDPool) + `,`, - `RadosUser:` + fmt.Sprintf("%v", this.RadosUser) + `,`, - `Keyring:` + fmt.Sprintf("%v", this.Keyring) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *RangeAllocation) String() string { - if this == nil { - return "nil" + if m.PreStop != nil { + l = m.PreStop.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&RangeAllocation{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Range:` + fmt.Sprintf("%v", this.Range) + `,`, - `Data:` + valueToStringGenerated(this.Data) + `,`, - `}`, - }, "") - return s + return n } -func (this *ReplicationController) String() string { - if this == nil { - return "nil" + +func (m *LimitRange) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ReplicationController{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicationControllerSpec", "ReplicationControllerSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicationControllerStatus", "ReplicationControllerStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *ReplicationControllerCondition) String() string { - if this == nil { - return "nil" + +func (m *LimitRangeItem) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ReplicationControllerCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicationControllerList) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Max) > 0 { + for k, v := range m.Max { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - s := strings.Join([]string{`&ReplicationControllerList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ReplicationController", "ReplicationController", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicationControllerSpec) String() string { - if this == nil { - return "nil" + if len(m.Min) > 0 { + for k, v := range m.Min { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - keysForSelector := make([]string, 0, len(this.Selector)) - for k := range this.Selector { - keysForSelector = append(keysForSelector, k) + if len(m.Default) > 0 { + for k, v := range m.Default { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - mapStringForSelector := "map[string]string{" - for _, k := range keysForSelector { - mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) + if len(m.DefaultRequest) > 0 { + for k, v := range m.DefaultRequest { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - mapStringForSelector += "}" - s := strings.Join([]string{`&ReplicationControllerSpec{`, - `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + mapStringForSelector + `,`, - `Template:` + strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "PodTemplateSpec", 1) + `,`, - `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicationControllerStatus) String() string { - if this == nil { - return "nil" + if len(m.MaxLimitRequestRatio) > 0 { + for k, v := range m.MaxLimitRequestRatio { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - s := strings.Join([]string{`&ReplicationControllerStatus{`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, - `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, - `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, - `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "ReplicationControllerCondition", "ReplicationControllerCondition", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *ResourceFieldSelector) String() string { - if this == nil { - return "nil" + +func (m *LimitRangeList) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ResourceFieldSelector{`, - `ContainerName:` + fmt.Sprintf("%v", this.ContainerName) + `,`, - `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, - `Divisor:` + strings.Replace(strings.Replace(this.Divisor.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceQuota) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&ResourceQuota{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceQuotaSpec", "ResourceQuotaSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ResourceQuotaStatus", "ResourceQuotaStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *ResourceQuotaList) String() string { - if this == nil { - return "nil" + +func (m *LimitRangeSpec) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ResourceQuotaList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ResourceQuota", "ResourceQuota", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + if len(m.Limits) > 0 { + for _, e := range m.Limits { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n } -func (this *ResourceQuotaSpec) String() string { - if this == nil { - return "nil" + +func (m *List) Size() (n int) { + if m == nil { + return 0 } - keysForHard := make([]string, 0, len(this.Hard)) - for k := range this.Hard { - keysForHard = append(keysForHard, string(k)) + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - github_com_gogo_protobuf_sortkeys.Strings(keysForHard) - mapStringForHard := "ResourceList{" - for _, k := range keysForHard { - mapStringForHard += fmt.Sprintf("%v: %v,", k, this.Hard[ResourceName(k)]) + return n +} + +func (m *LoadBalancerIngress) Size() (n int) { + if m == nil { + return 0 } - mapStringForHard += "}" - s := strings.Join([]string{`&ResourceQuotaSpec{`, - `Hard:` + mapStringForHard + `,`, - `Scopes:` + fmt.Sprintf("%v", this.Scopes) + `,`, - `ScopeSelector:` + strings.Replace(fmt.Sprintf("%v", this.ScopeSelector), "ScopeSelector", "ScopeSelector", 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.IP) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Hostname) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *ResourceQuotaStatus) String() string { - if this == nil { - return "nil" + +func (m *LoadBalancerStatus) Size() (n int) { + if m == nil { + return 0 } - keysForHard := make([]string, 0, len(this.Hard)) - for k := range this.Hard { - keysForHard = append(keysForHard, string(k)) + var l int + _ = l + if len(m.Ingress) > 0 { + for _, e := range m.Ingress { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - github_com_gogo_protobuf_sortkeys.Strings(keysForHard) - mapStringForHard := "ResourceList{" - for _, k := range keysForHard { - mapStringForHard += fmt.Sprintf("%v: %v,", k, this.Hard[ResourceName(k)]) + return n +} + +func (m *LocalObjectReference) Size() (n int) { + if m == nil { + return 0 } - mapStringForHard += "}" - keysForUsed := make([]string, 0, len(this.Used)) - for k := range this.Used { - keysForUsed = append(keysForUsed, string(k)) + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *LocalVolumeSource) Size() (n int) { + if m == nil { + return 0 } - github_com_gogo_protobuf_sortkeys.Strings(keysForUsed) - mapStringForUsed := "ResourceList{" - for _, k := range keysForUsed { - mapStringForUsed += fmt.Sprintf("%v: %v,", k, this.Used[ResourceName(k)]) + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + if m.FSType != nil { + l = len(*m.FSType) + n += 1 + l + sovGenerated(uint64(l)) } - mapStringForUsed += "}" - s := strings.Join([]string{`&ResourceQuotaStatus{`, - `Hard:` + mapStringForHard + `,`, - `Used:` + mapStringForUsed + `,`, - `}`, - }, "") - return s + return n } -func (this *ResourceRequirements) String() string { - if this == nil { - return "nil" + +func (m *NFSVolumeSource) Size() (n int) { + if m == nil { + return 0 } - keysForLimits := make([]string, 0, len(this.Limits)) - for k := range this.Limits { - keysForLimits = append(keysForLimits, string(k)) + var l int + _ = l + l = len(m.Server) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *Namespace) Size() (n int) { + if m == nil { + return 0 } - github_com_gogo_protobuf_sortkeys.Strings(keysForLimits) - mapStringForLimits := "ResourceList{" - for _, k := range keysForLimits { - mapStringForLimits += fmt.Sprintf("%v: %v,", k, this.Limits[ResourceName(k)]) + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NamespaceCondition) Size() (n int) { + if m == nil { + return 0 } - mapStringForLimits += "}" - keysForRequests := make([]string, 0, len(this.Requests)) - for k := range this.Requests { - keysForRequests = append(keysForRequests, string(k)) + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NamespaceList) Size() (n int) { + if m == nil { + return 0 } - github_com_gogo_protobuf_sortkeys.Strings(keysForRequests) - mapStringForRequests := "ResourceList{" - for _, k := range keysForRequests { - mapStringForRequests += fmt.Sprintf("%v: %v,", k, this.Requests[ResourceName(k)]) + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - mapStringForRequests += "}" - s := strings.Join([]string{`&ResourceRequirements{`, - `Limits:` + mapStringForLimits + `,`, - `Requests:` + mapStringForRequests + `,`, - `}`, - }, "") - return s + return n } -func (this *SELinuxOptions) String() string { - if this == nil { - return "nil" + +func (m *NamespaceSpec) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&SELinuxOptions{`, - `User:` + fmt.Sprintf("%v", this.User) + `,`, - `Role:` + fmt.Sprintf("%v", this.Role) + `,`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Level:` + fmt.Sprintf("%v", this.Level) + `,`, - `}`, - }, "") - return s + var l int + _ = l + if len(m.Finalizers) > 0 { + for _, s := range m.Finalizers { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n } -func (this *ScaleIOPersistentVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *NamespaceStatus) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ScaleIOPersistentVolumeSource{`, - `Gateway:` + fmt.Sprintf("%v", this.Gateway) + `,`, - `System:` + fmt.Sprintf("%v", this.System) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "SecretReference", 1) + `,`, - `SSLEnabled:` + fmt.Sprintf("%v", this.SSLEnabled) + `,`, - `ProtectionDomain:` + fmt.Sprintf("%v", this.ProtectionDomain) + `,`, - `StoragePool:` + fmt.Sprintf("%v", this.StoragePool) + `,`, - `StorageMode:` + fmt.Sprintf("%v", this.StorageMode) + `,`, - `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *ScaleIOVolumeSource) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&ScaleIOVolumeSource{`, - `Gateway:` + fmt.Sprintf("%v", this.Gateway) + `,`, - `System:` + fmt.Sprintf("%v", this.System) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, - `SSLEnabled:` + fmt.Sprintf("%v", this.SSLEnabled) + `,`, - `ProtectionDomain:` + fmt.Sprintf("%v", this.ProtectionDomain) + `,`, - `StoragePool:` + fmt.Sprintf("%v", this.StoragePool) + `,`, - `StorageMode:` + fmt.Sprintf("%v", this.StorageMode) + `,`, - `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s + return n } -func (this *ScopeSelector) String() string { - if this == nil { - return "nil" + +func (m *Node) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ScopeSelector{`, - `MatchExpressions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.MatchExpressions), "ScopedResourceSelectorRequirement", "ScopedResourceSelectorRequirement", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *ScopedResourceSelectorRequirement) String() string { - if this == nil { - return "nil" + +func (m *NodeAddress) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ScopedResourceSelectorRequirement{`, - `ScopeName:` + fmt.Sprintf("%v", this.ScopeName) + `,`, - `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, - `Values:` + fmt.Sprintf("%v", this.Values) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Address) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *Secret) String() string { - if this == nil { - return "nil" - } - keysForData := make([]string, 0, len(this.Data)) - for k := range this.Data { - keysForData = append(keysForData, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForData) - mapStringForData := "map[string][]byte{" - for _, k := range keysForData { - mapStringForData += fmt.Sprintf("%v: %v,", k, this.Data[k]) + +func (m *NodeAffinity) Size() (n int) { + if m == nil { + return 0 } - mapStringForData += "}" - keysForStringData := make([]string, 0, len(this.StringData)) - for k := range this.StringData { - keysForStringData = append(keysForStringData, k) + var l int + _ = l + if m.RequiredDuringSchedulingIgnoredDuringExecution != nil { + l = m.RequiredDuringSchedulingIgnoredDuringExecution.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForStringData) - mapStringForStringData := "map[string]string{" - for _, k := range keysForStringData { - mapStringForStringData += fmt.Sprintf("%v: %v,", k, this.StringData[k]) + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - mapStringForStringData += "}" - s := strings.Join([]string{`&Secret{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Data:` + mapStringForData + `,`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `StringData:` + mapStringForStringData + `,`, - `}`, - }, "") - return s + return n } -func (this *SecretEnvSource) String() string { - if this == nil { - return "nil" + +func (m *NodeCondition) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&SecretEnvSource{`, - `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, - `Optional:` + valueToStringGenerated(this.Optional) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastHeartbeatTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *SecretKeySelector) String() string { - if this == nil { - return "nil" + +func (m *NodeConfigSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&SecretKeySelector{`, - `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Optional:` + valueToStringGenerated(this.Optional) + `,`, - `}`, - }, "") - return s -} -func (this *SecretList) String() string { - if this == nil { - return "nil" + var l int + _ = l + if m.ConfigMap != nil { + l = m.ConfigMap.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&SecretList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Secret", "Secret", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *SecretProjection) String() string { - if this == nil { - return "nil" + +func (m *NodeConfigStatus) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&SecretProjection{`, - `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + `,`, - `Optional:` + valueToStringGenerated(this.Optional) + `,`, - `}`, - }, "") - return s -} -func (this *SecretReference) String() string { - if this == nil { - return "nil" + var l int + _ = l + if m.Assigned != nil { + l = m.Assigned.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&SecretReference{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `}`, - }, "") - return s -} -func (this *SecretVolumeSource) String() string { - if this == nil { - return "nil" + if m.Active != nil { + l = m.Active.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&SecretVolumeSource{`, - `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + `,`, - `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, - `Optional:` + valueToStringGenerated(this.Optional) + `,`, - `}`, - }, "") - return s -} -func (this *SecurityContext) String() string { - if this == nil { - return "nil" + if m.LastKnownGood != nil { + l = m.LastKnownGood.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&SecurityContext{`, - `Capabilities:` + strings.Replace(fmt.Sprintf("%v", this.Capabilities), "Capabilities", "Capabilities", 1) + `,`, - `Privileged:` + valueToStringGenerated(this.Privileged) + `,`, - `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "SELinuxOptions", 1) + `,`, - `RunAsUser:` + valueToStringGenerated(this.RunAsUser) + `,`, - `RunAsNonRoot:` + valueToStringGenerated(this.RunAsNonRoot) + `,`, - `ReadOnlyRootFilesystem:` + valueToStringGenerated(this.ReadOnlyRootFilesystem) + `,`, - `AllowPrivilegeEscalation:` + valueToStringGenerated(this.AllowPrivilegeEscalation) + `,`, - `RunAsGroup:` + valueToStringGenerated(this.RunAsGroup) + `,`, - `ProcMount:` + valueToStringGenerated(this.ProcMount) + `,`, - `WindowsOptions:` + strings.Replace(fmt.Sprintf("%v", this.WindowsOptions), "WindowsSecurityContextOptions", "WindowsSecurityContextOptions", 1) + `,`, - `}`, - }, "") - return s + l = len(m.Error) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *SerializedReference) String() string { - if this == nil { - return "nil" + +func (m *NodeDaemonEndpoints) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&SerializedReference{`, - `Reference:` + strings.Replace(strings.Replace(this.Reference.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.KubeletEndpoint.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *Service) String() string { - if this == nil { - return "nil" + +func (m *NodeList) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&Service{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ServiceSpec", "ServiceSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ServiceStatus", "ServiceStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ServiceAccount) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&ServiceAccount{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Secrets:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Secrets), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, - `ImagePullSecrets:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ImagePullSecrets), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, - `AutomountServiceAccountToken:` + valueToStringGenerated(this.AutomountServiceAccountToken) + `,`, - `}`, - }, "") - return s + return n } -func (this *ServiceAccountList) String() string { - if this == nil { - return "nil" + +func (m *NodeProxyOptions) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ServiceAccountList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ServiceAccount", "ServiceAccount", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *ServiceAccountTokenProjection) String() string { - if this == nil { - return "nil" + +func (m *NodeResources) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ServiceAccountTokenProjection{`, - `Audience:` + fmt.Sprintf("%v", this.Audience) + `,`, - `ExpirationSeconds:` + valueToStringGenerated(this.ExpirationSeconds) + `,`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `}`, - }, "") - return s -} -func (this *ServiceList) String() string { - if this == nil { - return "nil" + var l int + _ = l + if len(m.Capacity) > 0 { + for k, v := range m.Capacity { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - s := strings.Join([]string{`&ServiceList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Service", "Service", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *ServicePort) String() string { - if this == nil { - return "nil" + +func (m *NodeSelector) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ServicePort{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, - `Port:` + fmt.Sprintf("%v", this.Port) + `,`, - `TargetPort:` + strings.Replace(strings.Replace(this.TargetPort.String(), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1), `&`, ``, 1) + `,`, - `NodePort:` + fmt.Sprintf("%v", this.NodePort) + `,`, - `}`, - }, "") - return s -} -func (this *ServiceProxyOptions) String() string { - if this == nil { - return "nil" + var l int + _ = l + if len(m.NodeSelectorTerms) > 0 { + for _, e := range m.NodeSelectorTerms { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&ServiceProxyOptions{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `}`, - }, "") - return s + return n } -func (this *ServiceSpec) String() string { - if this == nil { - return "nil" - } - keysForSelector := make([]string, 0, len(this.Selector)) - for k := range this.Selector { - keysForSelector = append(keysForSelector, k) + +func (m *NodeSelectorRequirement) Size() (n int) { + if m == nil { + return 0 } - github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - mapStringForSelector := "map[string]string{" - for _, k := range keysForSelector { - mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operator) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for _, s := range m.Values { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - mapStringForSelector += "}" - s := strings.Join([]string{`&ServiceSpec{`, - `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "ServicePort", "ServicePort", 1), `&`, ``, 1) + `,`, - `Selector:` + mapStringForSelector + `,`, - `ClusterIP:` + fmt.Sprintf("%v", this.ClusterIP) + `,`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `ExternalIPs:` + fmt.Sprintf("%v", this.ExternalIPs) + `,`, - `SessionAffinity:` + fmt.Sprintf("%v", this.SessionAffinity) + `,`, - `LoadBalancerIP:` + fmt.Sprintf("%v", this.LoadBalancerIP) + `,`, - `LoadBalancerSourceRanges:` + fmt.Sprintf("%v", this.LoadBalancerSourceRanges) + `,`, - `ExternalName:` + fmt.Sprintf("%v", this.ExternalName) + `,`, - `ExternalTrafficPolicy:` + fmt.Sprintf("%v", this.ExternalTrafficPolicy) + `,`, - `HealthCheckNodePort:` + fmt.Sprintf("%v", this.HealthCheckNodePort) + `,`, - `PublishNotReadyAddresses:` + fmt.Sprintf("%v", this.PublishNotReadyAddresses) + `,`, - `SessionAffinityConfig:` + strings.Replace(fmt.Sprintf("%v", this.SessionAffinityConfig), "SessionAffinityConfig", "SessionAffinityConfig", 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *ServiceStatus) String() string { - if this == nil { - return "nil" + +func (m *NodeSelectorTerm) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ServiceStatus{`, - `LoadBalancer:` + strings.Replace(strings.Replace(this.LoadBalancer.String(), "LoadBalancerStatus", "LoadBalancerStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *SessionAffinityConfig) String() string { - if this == nil { - return "nil" + var l int + _ = l + if len(m.MatchExpressions) > 0 { + for _, e := range m.MatchExpressions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&SessionAffinityConfig{`, - `ClientIP:` + strings.Replace(fmt.Sprintf("%v", this.ClientIP), "ClientIPConfig", "ClientIPConfig", 1) + `,`, - `}`, - }, "") - return s -} -func (this *StorageOSPersistentVolumeSource) String() string { - if this == nil { - return "nil" + if len(m.MatchFields) > 0 { + for _, e := range m.MatchFields { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&StorageOSPersistentVolumeSource{`, - `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, - `VolumeNamespace:` + fmt.Sprintf("%v", this.VolumeNamespace) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "ObjectReference", "ObjectReference", 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *StorageOSVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *NodeSpec) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&StorageOSVolumeSource{`, - `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, - `VolumeNamespace:` + fmt.Sprintf("%v", this.VolumeNamespace) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *Sysctl) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.PodCIDR) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DoNotUse_ExternalID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ProviderID) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if len(m.Taints) > 0 { + for _, e := range m.Taints { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&Sysctl{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `}`, - }, "") - return s -} -func (this *TCPSocketAction) String() string { - if this == nil { - return "nil" + if m.ConfigSource != nil { + l = m.ConfigSource.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&TCPSocketAction{`, - `Port:` + strings.Replace(strings.Replace(this.Port.String(), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1), `&`, ``, 1) + `,`, - `Host:` + fmt.Sprintf("%v", this.Host) + `,`, - `}`, - }, "") - return s -} -func (this *Taint) String() string { - if this == nil { - return "nil" + if len(m.PodCIDRs) > 0 { + for _, s := range m.PodCIDRs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&Taint{`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `Effect:` + fmt.Sprintf("%v", this.Effect) + `,`, - `TimeAdded:` + strings.Replace(fmt.Sprintf("%v", this.TimeAdded), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *Toleration) String() string { - if this == nil { - return "nil" + +func (m *NodeStatus) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&Toleration{`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `Effect:` + fmt.Sprintf("%v", this.Effect) + `,`, - `TolerationSeconds:` + valueToStringGenerated(this.TolerationSeconds) + `,`, - `}`, - }, "") - return s + var l int + _ = l + if len(m.Capacity) > 0 { + for k, v := range m.Capacity { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Allocatable) > 0 { + for k, v := range m.Allocatable { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Addresses) > 0 { + for _, e := range m.Addresses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.DaemonEndpoints.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.NodeInfo.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Images) > 0 { + for _, e := range m.Images { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.VolumesInUse) > 0 { + for _, s := range m.VolumesInUse { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.VolumesAttached) > 0 { + for _, e := range m.VolumesAttached { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Config != nil { + l = m.Config.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (this *TopologySelectorLabelRequirement) String() string { - if this == nil { - return "nil" + +func (m *NodeSystemInfo) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&TopologySelectorLabelRequirement{`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Values:` + fmt.Sprintf("%v", this.Values) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.MachineID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SystemUUID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.BootID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.KernelVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.OSImage) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ContainerRuntimeVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.KubeletVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.KubeProxyVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.OperatingSystem) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Architecture) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *TopologySelectorTerm) String() string { - if this == nil { - return "nil" + +func (m *ObjectFieldSelector) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&TopologySelectorTerm{`, - `MatchLabelExpressions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.MatchLabelExpressions), "TopologySelectorLabelRequirement", "TopologySelectorLabelRequirement", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FieldPath) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *TypedLocalObjectReference) String() string { - if this == nil { - return "nil" + +func (m *ObjectReference) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&TypedLocalObjectReference{`, - `APIGroup:` + valueToStringGenerated(this.APIGroup) + `,`, - `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FieldPath) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *Volume) String() string { - if this == nil { - return "nil" + +func (m *PersistentVolume) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&Volume{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `VolumeSource:` + strings.Replace(strings.Replace(this.VolumeSource.String(), "VolumeSource", "VolumeSource", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *VolumeDevice) String() string { - if this == nil { - return "nil" + +func (m *PersistentVolumeClaim) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&VolumeDevice{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `DevicePath:` + fmt.Sprintf("%v", this.DevicePath) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *VolumeMount) String() string { - if this == nil { - return "nil" + +func (m *PersistentVolumeClaimCondition) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&VolumeMount{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `MountPath:` + fmt.Sprintf("%v", this.MountPath) + `,`, - `SubPath:` + fmt.Sprintf("%v", this.SubPath) + `,`, - `MountPropagation:` + valueToStringGenerated(this.MountPropagation) + `,`, - `SubPathExpr:` + fmt.Sprintf("%v", this.SubPathExpr) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastProbeTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *VolumeNodeAffinity) String() string { - if this == nil { - return "nil" + +func (m *PersistentVolumeClaimList) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&VolumeNodeAffinity{`, - `Required:` + strings.Replace(fmt.Sprintf("%v", this.Required), "NodeSelector", "NodeSelector", 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n } -func (this *VolumeProjection) String() string { - if this == nil { - return "nil" + +func (m *PersistentVolumeClaimSpec) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&VolumeProjection{`, - `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "SecretProjection", "SecretProjection", 1) + `,`, - `DownwardAPI:` + strings.Replace(fmt.Sprintf("%v", this.DownwardAPI), "DownwardAPIProjection", "DownwardAPIProjection", 1) + `,`, - `ConfigMap:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMap), "ConfigMapProjection", "ConfigMapProjection", 1) + `,`, - `ServiceAccountToken:` + strings.Replace(fmt.Sprintf("%v", this.ServiceAccountToken), "ServiceAccountTokenProjection", "ServiceAccountTokenProjection", 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + if len(m.AccessModes) > 0 { + for _, s := range m.AccessModes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.Resources.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.VolumeName) + n += 1 + l + sovGenerated(uint64(l)) + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.StorageClassName != nil { + l = len(*m.StorageClassName) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.VolumeMode != nil { + l = len(*m.VolumeMode) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.DataSource != nil { + l = m.DataSource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (this *VolumeSource) String() string { - if this == nil { - return "nil" + +func (m *PersistentVolumeClaimStatus) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&VolumeSource{`, - `HostPath:` + strings.Replace(fmt.Sprintf("%v", this.HostPath), "HostPathVolumeSource", "HostPathVolumeSource", 1) + `,`, - `EmptyDir:` + strings.Replace(fmt.Sprintf("%v", this.EmptyDir), "EmptyDirVolumeSource", "EmptyDirVolumeSource", 1) + `,`, - `GCEPersistentDisk:` + strings.Replace(fmt.Sprintf("%v", this.GCEPersistentDisk), "GCEPersistentDiskVolumeSource", "GCEPersistentDiskVolumeSource", 1) + `,`, - `AWSElasticBlockStore:` + strings.Replace(fmt.Sprintf("%v", this.AWSElasticBlockStore), "AWSElasticBlockStoreVolumeSource", "AWSElasticBlockStoreVolumeSource", 1) + `,`, - `GitRepo:` + strings.Replace(fmt.Sprintf("%v", this.GitRepo), "GitRepoVolumeSource", "GitRepoVolumeSource", 1) + `,`, - `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "SecretVolumeSource", "SecretVolumeSource", 1) + `,`, - `NFS:` + strings.Replace(fmt.Sprintf("%v", this.NFS), "NFSVolumeSource", "NFSVolumeSource", 1) + `,`, - `ISCSI:` + strings.Replace(fmt.Sprintf("%v", this.ISCSI), "ISCSIVolumeSource", "ISCSIVolumeSource", 1) + `,`, - `Glusterfs:` + strings.Replace(fmt.Sprintf("%v", this.Glusterfs), "GlusterfsVolumeSource", "GlusterfsVolumeSource", 1) + `,`, - `PersistentVolumeClaim:` + strings.Replace(fmt.Sprintf("%v", this.PersistentVolumeClaim), "PersistentVolumeClaimVolumeSource", "PersistentVolumeClaimVolumeSource", 1) + `,`, - `RBD:` + strings.Replace(fmt.Sprintf("%v", this.RBD), "RBDVolumeSource", "RBDVolumeSource", 1) + `,`, - `FlexVolume:` + strings.Replace(fmt.Sprintf("%v", this.FlexVolume), "FlexVolumeSource", "FlexVolumeSource", 1) + `,`, - `Cinder:` + strings.Replace(fmt.Sprintf("%v", this.Cinder), "CinderVolumeSource", "CinderVolumeSource", 1) + `,`, - `CephFS:` + strings.Replace(fmt.Sprintf("%v", this.CephFS), "CephFSVolumeSource", "CephFSVolumeSource", 1) + `,`, - `Flocker:` + strings.Replace(fmt.Sprintf("%v", this.Flocker), "FlockerVolumeSource", "FlockerVolumeSource", 1) + `,`, - `DownwardAPI:` + strings.Replace(fmt.Sprintf("%v", this.DownwardAPI), "DownwardAPIVolumeSource", "DownwardAPIVolumeSource", 1) + `,`, - `FC:` + strings.Replace(fmt.Sprintf("%v", this.FC), "FCVolumeSource", "FCVolumeSource", 1) + `,`, - `AzureFile:` + strings.Replace(fmt.Sprintf("%v", this.AzureFile), "AzureFileVolumeSource", "AzureFileVolumeSource", 1) + `,`, - `ConfigMap:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMap), "ConfigMapVolumeSource", "ConfigMapVolumeSource", 1) + `,`, - `VsphereVolume:` + strings.Replace(fmt.Sprintf("%v", this.VsphereVolume), "VsphereVirtualDiskVolumeSource", "VsphereVirtualDiskVolumeSource", 1) + `,`, - `Quobyte:` + strings.Replace(fmt.Sprintf("%v", this.Quobyte), "QuobyteVolumeSource", "QuobyteVolumeSource", 1) + `,`, - `AzureDisk:` + strings.Replace(fmt.Sprintf("%v", this.AzureDisk), "AzureDiskVolumeSource", "AzureDiskVolumeSource", 1) + `,`, - `PhotonPersistentDisk:` + strings.Replace(fmt.Sprintf("%v", this.PhotonPersistentDisk), "PhotonPersistentDiskVolumeSource", "PhotonPersistentDiskVolumeSource", 1) + `,`, - `PortworxVolume:` + strings.Replace(fmt.Sprintf("%v", this.PortworxVolume), "PortworxVolumeSource", "PortworxVolumeSource", 1) + `,`, - `ScaleIO:` + strings.Replace(fmt.Sprintf("%v", this.ScaleIO), "ScaleIOVolumeSource", "ScaleIOVolumeSource", 1) + `,`, - `Projected:` + strings.Replace(fmt.Sprintf("%v", this.Projected), "ProjectedVolumeSource", "ProjectedVolumeSource", 1) + `,`, - `StorageOS:` + strings.Replace(fmt.Sprintf("%v", this.StorageOS), "StorageOSVolumeSource", "StorageOSVolumeSource", 1) + `,`, - `CSI:` + strings.Replace(fmt.Sprintf("%v", this.CSI), "CSIVolumeSource", "CSIVolumeSource", 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.AccessModes) > 0 { + for _, s := range m.AccessModes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Capacity) > 0 { + for k, v := range m.Capacity { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n } -func (this *VsphereVirtualDiskVolumeSource) String() string { - if this == nil { - return "nil" + +func (m *PersistentVolumeClaimVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&VsphereVirtualDiskVolumeSource{`, - `VolumePath:` + fmt.Sprintf("%v", this.VolumePath) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `StoragePolicyName:` + fmt.Sprintf("%v", this.StoragePolicyName) + `,`, - `StoragePolicyID:` + fmt.Sprintf("%v", this.StoragePolicyID) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.ClaimName) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n } -func (this *WeightedPodAffinityTerm) String() string { - if this == nil { - return "nil" + +func (m *PersistentVolumeList) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&WeightedPodAffinityTerm{`, - `Weight:` + fmt.Sprintf("%v", this.Weight) + `,`, - `PodAffinityTerm:` + strings.Replace(strings.Replace(this.PodAffinityTerm.String(), "PodAffinityTerm", "PodAffinityTerm", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n } -func (this *WindowsSecurityContextOptions) String() string { - if this == nil { - return "nil" + +func (m *PersistentVolumeSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&WindowsSecurityContextOptions{`, - `GMSACredentialSpecName:` + valueToStringGenerated(this.GMSACredentialSpecName) + `,`, - `GMSACredentialSpec:` + valueToStringGenerated(this.GMSACredentialSpec) + `,`, - `}`, - }, "") - return s + var l int + _ = l + if m.GCEPersistentDisk != nil { + l = m.GCEPersistentDisk.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AWSElasticBlockStore != nil { + l = m.AWSElasticBlockStore.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.HostPath != nil { + l = m.HostPath.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Glusterfs != nil { + l = m.Glusterfs.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NFS != nil { + l = m.NFS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RBD != nil { + l = m.RBD.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ISCSI != nil { + l = m.ISCSI.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Cinder != nil { + l = m.Cinder.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CephFS != nil { + l = m.CephFS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.FC != nil { + l = m.FC.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Flocker != nil { + l = m.Flocker.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.FlexVolume != nil { + l = m.FlexVolume.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AzureFile != nil { + l = m.AzureFile.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.VsphereVolume != nil { + l = m.VsphereVolume.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Quobyte != nil { + l = m.Quobyte.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AzureDisk != nil { + l = m.AzureDisk.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.PhotonPersistentDisk != nil { + l = m.PhotonPersistentDisk.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.PortworxVolume != nil { + l = m.PortworxVolume.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.ScaleIO != nil { + l = m.ScaleIO.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.Local != nil { + l = m.Local.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.StorageOS != nil { + l = m.StorageOS.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.CSI != nil { + l = m.CSI.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + return n } -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" + +func (m *PersistentVolumeSpec) Size() (n int) { + if m == nil { + return 0 } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) + var l int + _ = l + if len(m.Capacity) > 0 { + for k, v := range m.Capacity { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = m.PersistentVolumeSource.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.AccessModes) > 0 { + for _, s := range m.AccessModes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.ClaimRef != nil { + l = m.ClaimRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.PersistentVolumeReclaimPolicy) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StorageClassName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.MountOptions) > 0 { + for _, s := range m.MountOptions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.VolumeMode != nil { + l = len(*m.VolumeMode) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NodeAffinity != nil { + l = m.NodeAffinity.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PersistentVolumeStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PhotonPersistentDiskVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PdID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Pod) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodAffinity) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, e := range m.RequiredDuringSchedulingIgnoredDuringExecution { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodAffinityTerm) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LabelSelector != nil { + l = m.LabelSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Namespaces) > 0 { + for _, s := range m.Namespaces { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.TopologyKey) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodAntiAffinity) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, e := range m.RequiredDuringSchedulingIgnoredDuringExecution { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodAttachOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + n += 2 + n += 2 + n += 2 + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastProbeTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodDNSConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Nameservers) > 0 { + for _, s := range m.Nameservers { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Searches) > 0 { + for _, s := range m.Searches { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodDNSConfigOption) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Value != nil { + l = len(*m.Value) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PodExecOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + n += 2 + n += 2 + n += 2 + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodIP) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.IP) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodLogOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + n += 2 + if m.SinceSeconds != nil { + n += 1 + sovGenerated(uint64(*m.SinceSeconds)) + } + if m.SinceTime != nil { + l = m.SinceTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if m.TailLines != nil { + n += 1 + sovGenerated(uint64(*m.TailLines)) + } + if m.LimitBytes != nil { + n += 1 + sovGenerated(uint64(*m.LimitBytes)) + } + return n +} + +func (m *PodPortForwardOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Ports) > 0 { + for _, e := range m.Ports { + n += 1 + sovGenerated(uint64(e)) + } + } + return n +} + +func (m *PodProxyOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodReadinessGate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ConditionType) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodSecurityContext) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SELinuxOptions != nil { + l = m.SELinuxOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RunAsUser != nil { + n += 1 + sovGenerated(uint64(*m.RunAsUser)) + } + if m.RunAsNonRoot != nil { + n += 2 + } + if len(m.SupplementalGroups) > 0 { + for _, e := range m.SupplementalGroups { + n += 1 + sovGenerated(uint64(e)) + } + } + if m.FSGroup != nil { + n += 1 + sovGenerated(uint64(*m.FSGroup)) + } + if m.RunAsGroup != nil { + n += 1 + sovGenerated(uint64(*m.RunAsGroup)) + } + if len(m.Sysctls) > 0 { + for _, e := range m.Sysctls { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.WindowsOptions != nil { + l = m.WindowsOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PodSignature) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PodController != nil { + l = m.PodController.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PodSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Volumes) > 0 { + for _, e := range m.Volumes { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Containers) > 0 { + for _, e := range m.Containers { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.RestartPolicy) + n += 1 + l + sovGenerated(uint64(l)) + if m.TerminationGracePeriodSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TerminationGracePeriodSeconds)) + } + if m.ActiveDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ActiveDeadlineSeconds)) + } + l = len(m.DNSPolicy) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.NodeSelector) > 0 { + for k, v := range m.NodeSelector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.ServiceAccountName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DeprecatedServiceAccount) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.NodeName) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + n += 2 + n += 2 + if m.SecurityContext != nil { + l = m.SecurityContext.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ImagePullSecrets) > 0 { + for _, e := range m.ImagePullSecrets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Hostname) + n += 2 + l + sovGenerated(uint64(l)) + l = len(m.Subdomain) + n += 2 + l + sovGenerated(uint64(l)) + if m.Affinity != nil { + l = m.Affinity.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + l = len(m.SchedulerName) + n += 2 + l + sovGenerated(uint64(l)) + if len(m.InitContainers) > 0 { + for _, e := range m.InitContainers { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.AutomountServiceAccountToken != nil { + n += 3 + } + if len(m.Tolerations) > 0 { + for _, e := range m.Tolerations { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.HostAliases) > 0 { + for _, e := range m.HostAliases { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + l = len(m.PriorityClassName) + n += 2 + l + sovGenerated(uint64(l)) + if m.Priority != nil { + n += 2 + sovGenerated(uint64(*m.Priority)) + } + if m.DNSConfig != nil { + l = m.DNSConfig.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.ShareProcessNamespace != nil { + n += 3 + } + if len(m.ReadinessGates) > 0 { + for _, e := range m.ReadinessGates { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.RuntimeClassName != nil { + l = len(*m.RuntimeClassName) + n += 2 + l + sovGenerated(uint64(l)) + } + if m.EnableServiceLinks != nil { + n += 3 + } + if m.PreemptionPolicy != nil { + l = len(*m.PreemptionPolicy) + n += 2 + l + sovGenerated(uint64(l)) + } + if len(m.Overhead) > 0 { + for k, v := range m.Overhead { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.TopologySpreadConstraints) > 0 { + for _, e := range m.TopologySpreadConstraints { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.EphemeralContainers) > 0 { + for _, e := range m.EphemeralContainers { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.HostIP) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.PodIP) + n += 1 + l + sovGenerated(uint64(l)) + if m.StartTime != nil { + l = m.StartTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ContainerStatuses) > 0 { + for _, e := range m.ContainerStatuses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.QOSClass) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.InitContainerStatuses) > 0 { + for _, e := range m.InitContainerStatuses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.NominatedNodeName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.PodIPs) > 0 { + for _, e := range m.PodIPs { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.EphemeralContainerStatuses) > 0 { + for _, e := range m.EphemeralContainerStatuses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodStatusResult) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodTemplate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodTemplateList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodTemplateSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PortworxVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VolumeID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *Preconditions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.UID != nil { + l = len(*m.UID) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PreferAvoidPodsEntry) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.PodSignature.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.EvictionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PreferredSchedulingTerm) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Weight)) + l = m.Preference.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Probe) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Handler.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.InitialDelaySeconds)) + n += 1 + sovGenerated(uint64(m.TimeoutSeconds)) + n += 1 + sovGenerated(uint64(m.PeriodSeconds)) + n += 1 + sovGenerated(uint64(m.SuccessThreshold)) + n += 1 + sovGenerated(uint64(m.FailureThreshold)) + return n +} + +func (m *ProjectedVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Sources) > 0 { + for _, e := range m.Sources { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.DefaultMode != nil { + n += 1 + sovGenerated(uint64(*m.DefaultMode)) + } + return n +} + +func (m *QuobyteVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Registry) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Volume) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Tenant) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RBDPersistentVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.CephMonitors) > 0 { + for _, s := range m.CephMonitors { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.RBDImage) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RBDPool) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RadosUser) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Keyring) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + return n +} + +func (m *RBDVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.CephMonitors) > 0 { + for _, s := range m.CephMonitors { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.RBDImage) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RBDPool) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RadosUser) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Keyring) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + return n +} + +func (m *RangeAllocation) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Range) + n += 1 + l + sovGenerated(uint64(l)) + if m.Data != nil { + l = len(m.Data) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ReplicationController) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ReplicationControllerCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ReplicationControllerList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ReplicationControllerSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if len(m.Selector) > 0 { + for k, v := range m.Selector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.Template != nil { + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + return n +} + +func (m *ReplicationControllerStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.FullyLabeledReplicas)) + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + n += 1 + sovGenerated(uint64(m.AvailableReplicas)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceFieldSelector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ContainerName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Divisor.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceQuota) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceQuotaList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceQuotaSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Hard) > 0 { + for k, v := range m.Hard { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Scopes) > 0 { + for _, s := range m.Scopes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.ScopeSelector != nil { + l = m.ScopeSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ResourceQuotaStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Hard) > 0 { + for k, v := range m.Hard { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Used) > 0 { + for k, v := range m.Used { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *ResourceRequirements) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Limits) > 0 { + for k, v := range m.Limits { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Requests) > 0 { + for k, v := range m.Requests { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *SELinuxOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Role) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Level) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ScaleIOPersistentVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Gateway) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.System) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + l = len(m.ProtectionDomain) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StoragePool) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StorageMode) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.VolumeName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *ScaleIOVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Gateway) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.System) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + l = len(m.ProtectionDomain) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StoragePool) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StorageMode) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.VolumeName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *ScopeSelector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.MatchExpressions) > 0 { + for _, e := range m.MatchExpressions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ScopedResourceSelectorRequirement) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ScopeName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operator) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for _, s := range m.Values { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Secret) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Data) > 0 { + for k, v := range m.Data { + _ = k + _ = v + l = 0 + if v != nil { + l = 1 + len(v) + sovGenerated(uint64(len(v))) + } + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + l + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.StringData) > 0 { + for k, v := range m.StringData { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *SecretEnvSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + n += 2 + } + return n +} + +func (m *SecretKeySelector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + n += 2 + } + return n +} + +func (m *SecretList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *SecretProjection) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Optional != nil { + n += 2 + } + return n +} + +func (m *SecretReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SecretVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SecretName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.DefaultMode != nil { + n += 1 + sovGenerated(uint64(*m.DefaultMode)) + } + if m.Optional != nil { + n += 2 + } + return n +} + +func (m *SecurityContext) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Capabilities != nil { + l = m.Capabilities.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Privileged != nil { + n += 2 + } + if m.SELinuxOptions != nil { + l = m.SELinuxOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RunAsUser != nil { + n += 1 + sovGenerated(uint64(*m.RunAsUser)) + } + if m.RunAsNonRoot != nil { + n += 2 + } + if m.ReadOnlyRootFilesystem != nil { + n += 2 + } + if m.AllowPrivilegeEscalation != nil { + n += 2 + } + if m.RunAsGroup != nil { + n += 1 + sovGenerated(uint64(*m.RunAsGroup)) + } + if m.ProcMount != nil { + l = len(*m.ProcMount) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.WindowsOptions != nil { + l = m.WindowsOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *SerializedReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Reference.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Service) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ServiceAccount) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Secrets) > 0 { + for _, e := range m.Secrets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ImagePullSecrets) > 0 { + for _, e := range m.ImagePullSecrets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.AutomountServiceAccountToken != nil { + n += 2 + } + return n +} + +func (m *ServiceAccountList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServiceAccountTokenProjection) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Audience) + n += 1 + l + sovGenerated(uint64(l)) + if m.ExpirationSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ExpirationSeconds)) + } + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ServiceList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServicePort) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Port)) + l = m.TargetPort.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.NodePort)) + return n +} + +func (m *ServiceProxyOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ServiceSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Selector) > 0 { + for k, v := range m.Selector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.ClusterIP) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.ExternalIPs) > 0 { + for _, s := range m.ExternalIPs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.SessionAffinity) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.LoadBalancerIP) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.LoadBalancerSourceRanges) > 0 { + for _, s := range m.LoadBalancerSourceRanges { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.ExternalName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ExternalTrafficPolicy) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.HealthCheckNodePort)) + n += 2 + if m.SessionAffinityConfig != nil { + l = m.SessionAffinityConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.IPFamily != nil { + l = len(*m.IPFamily) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ServiceStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.LoadBalancer.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SessionAffinityConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ClientIP != nil { + l = m.ClientIP.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *StorageOSPersistentVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VolumeName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.VolumeNamespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *StorageOSVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VolumeName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.VolumeNamespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Sysctl) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *TCPSocketAction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Port.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Host) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Taint) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Effect) + n += 1 + l + sovGenerated(uint64(l)) + if m.TimeAdded != nil { + l = m.TimeAdded.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Toleration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operator) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Effect) + n += 1 + l + sovGenerated(uint64(l)) + if m.TolerationSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TolerationSeconds)) + } + return n +} + +func (m *TopologySelectorLabelRequirement) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for _, s := range m.Values { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *TopologySelectorTerm) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.MatchLabelExpressions) > 0 { + for _, e := range m.MatchLabelExpressions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *TopologySpreadConstraint) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.MaxSkew)) + l = len(m.TopologyKey) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.WhenUnsatisfiable) + n += 1 + l + sovGenerated(uint64(l)) + if m.LabelSelector != nil { + l = m.LabelSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *TypedLocalObjectReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.APIGroup != nil { + l = len(*m.APIGroup) + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Volume) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.VolumeSource.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VolumeDevice) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DevicePath) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VolumeMount) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.MountPath) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SubPath) + n += 1 + l + sovGenerated(uint64(l)) + if m.MountPropagation != nil { + l = len(*m.MountPropagation) + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.SubPathExpr) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VolumeNodeAffinity) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Required != nil { + l = m.Required.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *VolumeProjection) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Secret != nil { + l = m.Secret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.DownwardAPI != nil { + l = m.DownwardAPI.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ConfigMap != nil { + l = m.ConfigMap.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ServiceAccountToken != nil { + l = m.ServiceAccountToken.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *VolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HostPath != nil { + l = m.HostPath.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.EmptyDir != nil { + l = m.EmptyDir.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GCEPersistentDisk != nil { + l = m.GCEPersistentDisk.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AWSElasticBlockStore != nil { + l = m.AWSElasticBlockStore.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GitRepo != nil { + l = m.GitRepo.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Secret != nil { + l = m.Secret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NFS != nil { + l = m.NFS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ISCSI != nil { + l = m.ISCSI.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Glusterfs != nil { + l = m.Glusterfs.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PersistentVolumeClaim != nil { + l = m.PersistentVolumeClaim.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RBD != nil { + l = m.RBD.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.FlexVolume != nil { + l = m.FlexVolume.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Cinder != nil { + l = m.Cinder.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CephFS != nil { + l = m.CephFS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Flocker != nil { + l = m.Flocker.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.DownwardAPI != nil { + l = m.DownwardAPI.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.FC != nil { + l = m.FC.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.AzureFile != nil { + l = m.AzureFile.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.ConfigMap != nil { + l = m.ConfigMap.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.VsphereVolume != nil { + l = m.VsphereVolume.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.Quobyte != nil { + l = m.Quobyte.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.AzureDisk != nil { + l = m.AzureDisk.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.PhotonPersistentDisk != nil { + l = m.PhotonPersistentDisk.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.PortworxVolume != nil { + l = m.PortworxVolume.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.ScaleIO != nil { + l = m.ScaleIO.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.Projected != nil { + l = m.Projected.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.StorageOS != nil { + l = m.StorageOS.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.CSI != nil { + l = m.CSI.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *VsphereVirtualDiskVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VolumePath) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StoragePolicyName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StoragePolicyID) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *WeightedPodAffinityTerm) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Weight)) + l = m.PodAffinityTerm.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *WindowsSecurityContextOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.GMSACredentialSpecName != nil { + l = len(*m.GMSACredentialSpecName) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GMSACredentialSpec != nil { + l = len(*m.GMSACredentialSpec) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RunAsUserName != nil { + l = len(*m.RunAsUserName) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AWSElasticBlockStoreVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AWSElasticBlockStoreVolumeSource{`, + `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `Partition:` + fmt.Sprintf("%v", this.Partition) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *Affinity) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Affinity{`, + `NodeAffinity:` + strings.Replace(this.NodeAffinity.String(), "NodeAffinity", "NodeAffinity", 1) + `,`, + `PodAffinity:` + strings.Replace(this.PodAffinity.String(), "PodAffinity", "PodAffinity", 1) + `,`, + `PodAntiAffinity:` + strings.Replace(this.PodAntiAffinity.String(), "PodAntiAffinity", "PodAntiAffinity", 1) + `,`, + `}`, + }, "") + return s +} +func (this *AttachedVolume) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AttachedVolume{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `DevicePath:` + fmt.Sprintf("%v", this.DevicePath) + `,`, + `}`, + }, "") + return s +} +func (this *AvoidPods) String() string { + if this == nil { + return "nil" + } + repeatedStringForPreferAvoidPods := "[]PreferAvoidPodsEntry{" + for _, f := range this.PreferAvoidPods { + repeatedStringForPreferAvoidPods += strings.Replace(strings.Replace(f.String(), "PreferAvoidPodsEntry", "PreferAvoidPodsEntry", 1), `&`, ``, 1) + "," + } + repeatedStringForPreferAvoidPods += "}" + s := strings.Join([]string{`&AvoidPods{`, + `PreferAvoidPods:` + repeatedStringForPreferAvoidPods + `,`, + `}`, + }, "") + return s +} +func (this *AzureDiskVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AzureDiskVolumeSource{`, + `DiskName:` + fmt.Sprintf("%v", this.DiskName) + `,`, + `DataDiskURI:` + fmt.Sprintf("%v", this.DataDiskURI) + `,`, + `CachingMode:` + valueToStringGenerated(this.CachingMode) + `,`, + `FSType:` + valueToStringGenerated(this.FSType) + `,`, + `ReadOnly:` + valueToStringGenerated(this.ReadOnly) + `,`, + `Kind:` + valueToStringGenerated(this.Kind) + `,`, + `}`, + }, "") + return s +} +func (this *AzureFilePersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AzureFilePersistentVolumeSource{`, + `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, + `ShareName:` + fmt.Sprintf("%v", this.ShareName) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `SecretNamespace:` + valueToStringGenerated(this.SecretNamespace) + `,`, + `}`, + }, "") + return s +} +func (this *AzureFileVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AzureFileVolumeSource{`, + `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, + `ShareName:` + fmt.Sprintf("%v", this.ShareName) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *Binding) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Binding{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Target:` + strings.Replace(strings.Replace(this.Target.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CSIPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + keysForVolumeAttributes := make([]string, 0, len(this.VolumeAttributes)) + for k := range this.VolumeAttributes { + keysForVolumeAttributes = append(keysForVolumeAttributes, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes) + mapStringForVolumeAttributes := "map[string]string{" + for _, k := range keysForVolumeAttributes { + mapStringForVolumeAttributes += fmt.Sprintf("%v: %v,", k, this.VolumeAttributes[k]) + } + mapStringForVolumeAttributes += "}" + s := strings.Join([]string{`&CSIPersistentVolumeSource{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `VolumeHandle:` + fmt.Sprintf("%v", this.VolumeHandle) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `VolumeAttributes:` + mapStringForVolumeAttributes + `,`, + `ControllerPublishSecretRef:` + strings.Replace(this.ControllerPublishSecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, + `NodeStageSecretRef:` + strings.Replace(this.NodeStageSecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, + `NodePublishSecretRef:` + strings.Replace(this.NodePublishSecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, + `ControllerExpandSecretRef:` + strings.Replace(this.ControllerExpandSecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CSIVolumeSource) String() string { + if this == nil { + return "nil" + } + keysForVolumeAttributes := make([]string, 0, len(this.VolumeAttributes)) + for k := range this.VolumeAttributes { + keysForVolumeAttributes = append(keysForVolumeAttributes, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes) + mapStringForVolumeAttributes := "map[string]string{" + for _, k := range keysForVolumeAttributes { + mapStringForVolumeAttributes += fmt.Sprintf("%v: %v,", k, this.VolumeAttributes[k]) + } + mapStringForVolumeAttributes += "}" + s := strings.Join([]string{`&CSIVolumeSource{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `ReadOnly:` + valueToStringGenerated(this.ReadOnly) + `,`, + `FSType:` + valueToStringGenerated(this.FSType) + `,`, + `VolumeAttributes:` + mapStringForVolumeAttributes + `,`, + `NodePublishSecretRef:` + strings.Replace(this.NodePublishSecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Capabilities) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Capabilities{`, + `Add:` + fmt.Sprintf("%v", this.Add) + `,`, + `Drop:` + fmt.Sprintf("%v", this.Drop) + `,`, + `}`, + }, "") + return s +} +func (this *CephFSPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CephFSPersistentVolumeSource{`, + `Monitors:` + fmt.Sprintf("%v", this.Monitors) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `SecretFile:` + fmt.Sprintf("%v", this.SecretFile) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *CephFSVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CephFSVolumeSource{`, + `Monitors:` + fmt.Sprintf("%v", this.Monitors) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `SecretFile:` + fmt.Sprintf("%v", this.SecretFile) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *CinderPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CinderPersistentVolumeSource{`, + `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CinderVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CinderVolumeSource{`, + `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClientIPConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClientIPConfig{`, + `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *ComponentCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ComponentCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Error:` + fmt.Sprintf("%v", this.Error) + `,`, + `}`, + }, "") + return s +} +func (this *ComponentStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]ComponentCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "ComponentCondition", "ComponentCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&ComponentStatus{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *ComponentStatusList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ComponentStatus{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ComponentStatus", "ComponentStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ComponentStatusList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMap) String() string { + if this == nil { + return "nil" + } + keysForData := make([]string, 0, len(this.Data)) + for k := range this.Data { + keysForData = append(keysForData, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForData) + mapStringForData := "map[string]string{" + for _, k := range keysForData { + mapStringForData += fmt.Sprintf("%v: %v,", k, this.Data[k]) + } + mapStringForData += "}" + keysForBinaryData := make([]string, 0, len(this.BinaryData)) + for k := range this.BinaryData { + keysForBinaryData = append(keysForBinaryData, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForBinaryData) + mapStringForBinaryData := "map[string][]byte{" + for _, k := range keysForBinaryData { + mapStringForBinaryData += fmt.Sprintf("%v: %v,", k, this.BinaryData[k]) + } + mapStringForBinaryData += "}" + s := strings.Join([]string{`&ConfigMap{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Data:` + mapStringForData + `,`, + `BinaryData:` + mapStringForBinaryData + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMapEnvSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConfigMapEnvSource{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMapKeySelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConfigMapKeySelector{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMapList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ConfigMap{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ConfigMap", "ConfigMap", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ConfigMapList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMapNodeConfigSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConfigMapNodeConfigSource{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, + `KubeletConfigKey:` + fmt.Sprintf("%v", this.KubeletConfigKey) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMapProjection) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]KeyToPath{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ConfigMapProjection{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMapVolumeSource) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]KeyToPath{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ConfigMapVolumeSource{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *Container) String() string { + if this == nil { + return "nil" + } + repeatedStringForPorts := "[]ContainerPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "ContainerPort", "ContainerPort", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + repeatedStringForEnv := "[]EnvVar{" + for _, f := range this.Env { + repeatedStringForEnv += strings.Replace(strings.Replace(f.String(), "EnvVar", "EnvVar", 1), `&`, ``, 1) + "," + } + repeatedStringForEnv += "}" + repeatedStringForVolumeMounts := "[]VolumeMount{" + for _, f := range this.VolumeMounts { + repeatedStringForVolumeMounts += strings.Replace(strings.Replace(f.String(), "VolumeMount", "VolumeMount", 1), `&`, ``, 1) + "," + } + repeatedStringForVolumeMounts += "}" + repeatedStringForEnvFrom := "[]EnvFromSource{" + for _, f := range this.EnvFrom { + repeatedStringForEnvFrom += strings.Replace(strings.Replace(f.String(), "EnvFromSource", "EnvFromSource", 1), `&`, ``, 1) + "," + } + repeatedStringForEnvFrom += "}" + repeatedStringForVolumeDevices := "[]VolumeDevice{" + for _, f := range this.VolumeDevices { + repeatedStringForVolumeDevices += strings.Replace(strings.Replace(f.String(), "VolumeDevice", "VolumeDevice", 1), `&`, ``, 1) + "," + } + repeatedStringForVolumeDevices += "}" + s := strings.Join([]string{`&Container{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Image:` + fmt.Sprintf("%v", this.Image) + `,`, + `Command:` + fmt.Sprintf("%v", this.Command) + `,`, + `Args:` + fmt.Sprintf("%v", this.Args) + `,`, + `WorkingDir:` + fmt.Sprintf("%v", this.WorkingDir) + `,`, + `Ports:` + repeatedStringForPorts + `,`, + `Env:` + repeatedStringForEnv + `,`, + `Resources:` + strings.Replace(strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1), `&`, ``, 1) + `,`, + `VolumeMounts:` + repeatedStringForVolumeMounts + `,`, + `LivenessProbe:` + strings.Replace(this.LivenessProbe.String(), "Probe", "Probe", 1) + `,`, + `ReadinessProbe:` + strings.Replace(this.ReadinessProbe.String(), "Probe", "Probe", 1) + `,`, + `Lifecycle:` + strings.Replace(this.Lifecycle.String(), "Lifecycle", "Lifecycle", 1) + `,`, + `TerminationMessagePath:` + fmt.Sprintf("%v", this.TerminationMessagePath) + `,`, + `ImagePullPolicy:` + fmt.Sprintf("%v", this.ImagePullPolicy) + `,`, + `SecurityContext:` + strings.Replace(this.SecurityContext.String(), "SecurityContext", "SecurityContext", 1) + `,`, + `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, + `StdinOnce:` + fmt.Sprintf("%v", this.StdinOnce) + `,`, + `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, + `EnvFrom:` + repeatedStringForEnvFrom + `,`, + `TerminationMessagePolicy:` + fmt.Sprintf("%v", this.TerminationMessagePolicy) + `,`, + `VolumeDevices:` + repeatedStringForVolumeDevices + `,`, + `StartupProbe:` + strings.Replace(this.StartupProbe.String(), "Probe", "Probe", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerImage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerImage{`, + `Names:` + fmt.Sprintf("%v", this.Names) + `,`, + `SizeBytes:` + fmt.Sprintf("%v", this.SizeBytes) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerPort) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerPort{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `HostPort:` + fmt.Sprintf("%v", this.HostPort) + `,`, + `ContainerPort:` + fmt.Sprintf("%v", this.ContainerPort) + `,`, + `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, + `HostIP:` + fmt.Sprintf("%v", this.HostIP) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerState) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerState{`, + `Waiting:` + strings.Replace(this.Waiting.String(), "ContainerStateWaiting", "ContainerStateWaiting", 1) + `,`, + `Running:` + strings.Replace(this.Running.String(), "ContainerStateRunning", "ContainerStateRunning", 1) + `,`, + `Terminated:` + strings.Replace(this.Terminated.String(), "ContainerStateTerminated", "ContainerStateTerminated", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerStateRunning) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerStateRunning{`, + `StartedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.StartedAt), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerStateTerminated) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerStateTerminated{`, + `ExitCode:` + fmt.Sprintf("%v", this.ExitCode) + `,`, + `Signal:` + fmt.Sprintf("%v", this.Signal) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `StartedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.StartedAt), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `FinishedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.FinishedAt), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerStateWaiting) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerStateWaiting{`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerStatus{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `State:` + strings.Replace(strings.Replace(this.State.String(), "ContainerState", "ContainerState", 1), `&`, ``, 1) + `,`, + `LastTerminationState:` + strings.Replace(strings.Replace(this.LastTerminationState.String(), "ContainerState", "ContainerState", 1), `&`, ``, 1) + `,`, + `Ready:` + fmt.Sprintf("%v", this.Ready) + `,`, + `RestartCount:` + fmt.Sprintf("%v", this.RestartCount) + `,`, + `Image:` + fmt.Sprintf("%v", this.Image) + `,`, + `ImageID:` + fmt.Sprintf("%v", this.ImageID) + `,`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `Started:` + valueToStringGenerated(this.Started) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonEndpoint) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonEndpoint{`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `}`, + }, "") + return s +} +func (this *DownwardAPIProjection) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]DownwardAPIVolumeFile{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DownwardAPIVolumeFile", "DownwardAPIVolumeFile", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&DownwardAPIProjection{`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *DownwardAPIVolumeFile) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DownwardAPIVolumeFile{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `FieldRef:` + strings.Replace(this.FieldRef.String(), "ObjectFieldSelector", "ObjectFieldSelector", 1) + `,`, + `ResourceFieldRef:` + strings.Replace(this.ResourceFieldRef.String(), "ResourceFieldSelector", "ResourceFieldSelector", 1) + `,`, + `Mode:` + valueToStringGenerated(this.Mode) + `,`, + `}`, + }, "") + return s +} +func (this *DownwardAPIVolumeSource) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]DownwardAPIVolumeFile{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DownwardAPIVolumeFile", "DownwardAPIVolumeFile", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&DownwardAPIVolumeSource{`, + `Items:` + repeatedStringForItems + `,`, + `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, + `}`, + }, "") + return s +} +func (this *EmptyDirVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EmptyDirVolumeSource{`, + `Medium:` + fmt.Sprintf("%v", this.Medium) + `,`, + `SizeLimit:` + strings.Replace(fmt.Sprintf("%v", this.SizeLimit), "Quantity", "resource.Quantity", 1) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointAddress) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EndpointAddress{`, + `IP:` + fmt.Sprintf("%v", this.IP) + `,`, + `TargetRef:` + strings.Replace(this.TargetRef.String(), "ObjectReference", "ObjectReference", 1) + `,`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `NodeName:` + valueToStringGenerated(this.NodeName) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointPort) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EndpointPort{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointSubset) String() string { + if this == nil { + return "nil" + } + repeatedStringForAddresses := "[]EndpointAddress{" + for _, f := range this.Addresses { + repeatedStringForAddresses += strings.Replace(strings.Replace(f.String(), "EndpointAddress", "EndpointAddress", 1), `&`, ``, 1) + "," + } + repeatedStringForAddresses += "}" + repeatedStringForNotReadyAddresses := "[]EndpointAddress{" + for _, f := range this.NotReadyAddresses { + repeatedStringForNotReadyAddresses += strings.Replace(strings.Replace(f.String(), "EndpointAddress", "EndpointAddress", 1), `&`, ``, 1) + "," + } + repeatedStringForNotReadyAddresses += "}" + repeatedStringForPorts := "[]EndpointPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "EndpointPort", "EndpointPort", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + s := strings.Join([]string{`&EndpointSubset{`, + `Addresses:` + repeatedStringForAddresses + `,`, + `NotReadyAddresses:` + repeatedStringForNotReadyAddresses + `,`, + `Ports:` + repeatedStringForPorts + `,`, + `}`, + }, "") + return s +} +func (this *Endpoints) String() string { + if this == nil { + return "nil" + } + repeatedStringForSubsets := "[]EndpointSubset{" + for _, f := range this.Subsets { + repeatedStringForSubsets += strings.Replace(strings.Replace(f.String(), "EndpointSubset", "EndpointSubset", 1), `&`, ``, 1) + "," + } + repeatedStringForSubsets += "}" + s := strings.Join([]string{`&Endpoints{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subsets:` + repeatedStringForSubsets + `,`, + `}`, + }, "") + return s +} +func (this *EndpointsList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Endpoints{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Endpoints", "Endpoints", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&EndpointsList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *EnvFromSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EnvFromSource{`, + `Prefix:` + fmt.Sprintf("%v", this.Prefix) + `,`, + `ConfigMapRef:` + strings.Replace(this.ConfigMapRef.String(), "ConfigMapEnvSource", "ConfigMapEnvSource", 1) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "SecretEnvSource", "SecretEnvSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *EnvVar) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EnvVar{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `ValueFrom:` + strings.Replace(this.ValueFrom.String(), "EnvVarSource", "EnvVarSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *EnvVarSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EnvVarSource{`, + `FieldRef:` + strings.Replace(this.FieldRef.String(), "ObjectFieldSelector", "ObjectFieldSelector", 1) + `,`, + `ResourceFieldRef:` + strings.Replace(this.ResourceFieldRef.String(), "ResourceFieldSelector", "ResourceFieldSelector", 1) + `,`, + `ConfigMapKeyRef:` + strings.Replace(this.ConfigMapKeyRef.String(), "ConfigMapKeySelector", "ConfigMapKeySelector", 1) + `,`, + `SecretKeyRef:` + strings.Replace(this.SecretKeyRef.String(), "SecretKeySelector", "SecretKeySelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *EphemeralContainer) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EphemeralContainer{`, + `EphemeralContainerCommon:` + strings.Replace(strings.Replace(this.EphemeralContainerCommon.String(), "EphemeralContainerCommon", "EphemeralContainerCommon", 1), `&`, ``, 1) + `,`, + `TargetContainerName:` + fmt.Sprintf("%v", this.TargetContainerName) + `,`, + `}`, + }, "") + return s +} +func (this *EphemeralContainerCommon) String() string { + if this == nil { + return "nil" + } + repeatedStringForPorts := "[]ContainerPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "ContainerPort", "ContainerPort", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + repeatedStringForEnv := "[]EnvVar{" + for _, f := range this.Env { + repeatedStringForEnv += strings.Replace(strings.Replace(f.String(), "EnvVar", "EnvVar", 1), `&`, ``, 1) + "," + } + repeatedStringForEnv += "}" + repeatedStringForVolumeMounts := "[]VolumeMount{" + for _, f := range this.VolumeMounts { + repeatedStringForVolumeMounts += strings.Replace(strings.Replace(f.String(), "VolumeMount", "VolumeMount", 1), `&`, ``, 1) + "," + } + repeatedStringForVolumeMounts += "}" + repeatedStringForEnvFrom := "[]EnvFromSource{" + for _, f := range this.EnvFrom { + repeatedStringForEnvFrom += strings.Replace(strings.Replace(f.String(), "EnvFromSource", "EnvFromSource", 1), `&`, ``, 1) + "," + } + repeatedStringForEnvFrom += "}" + repeatedStringForVolumeDevices := "[]VolumeDevice{" + for _, f := range this.VolumeDevices { + repeatedStringForVolumeDevices += strings.Replace(strings.Replace(f.String(), "VolumeDevice", "VolumeDevice", 1), `&`, ``, 1) + "," + } + repeatedStringForVolumeDevices += "}" + s := strings.Join([]string{`&EphemeralContainerCommon{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Image:` + fmt.Sprintf("%v", this.Image) + `,`, + `Command:` + fmt.Sprintf("%v", this.Command) + `,`, + `Args:` + fmt.Sprintf("%v", this.Args) + `,`, + `WorkingDir:` + fmt.Sprintf("%v", this.WorkingDir) + `,`, + `Ports:` + repeatedStringForPorts + `,`, + `Env:` + repeatedStringForEnv + `,`, + `Resources:` + strings.Replace(strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1), `&`, ``, 1) + `,`, + `VolumeMounts:` + repeatedStringForVolumeMounts + `,`, + `LivenessProbe:` + strings.Replace(this.LivenessProbe.String(), "Probe", "Probe", 1) + `,`, + `ReadinessProbe:` + strings.Replace(this.ReadinessProbe.String(), "Probe", "Probe", 1) + `,`, + `Lifecycle:` + strings.Replace(this.Lifecycle.String(), "Lifecycle", "Lifecycle", 1) + `,`, + `TerminationMessagePath:` + fmt.Sprintf("%v", this.TerminationMessagePath) + `,`, + `ImagePullPolicy:` + fmt.Sprintf("%v", this.ImagePullPolicy) + `,`, + `SecurityContext:` + strings.Replace(this.SecurityContext.String(), "SecurityContext", "SecurityContext", 1) + `,`, + `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, + `StdinOnce:` + fmt.Sprintf("%v", this.StdinOnce) + `,`, + `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, + `EnvFrom:` + repeatedStringForEnvFrom + `,`, + `TerminationMessagePolicy:` + fmt.Sprintf("%v", this.TerminationMessagePolicy) + `,`, + `VolumeDevices:` + repeatedStringForVolumeDevices + `,`, + `StartupProbe:` + strings.Replace(this.StartupProbe.String(), "Probe", "Probe", 1) + `,`, + `}`, + }, "") + return s +} +func (this *EphemeralContainers) String() string { + if this == nil { + return "nil" + } + repeatedStringForEphemeralContainers := "[]EphemeralContainer{" + for _, f := range this.EphemeralContainers { + repeatedStringForEphemeralContainers += strings.Replace(strings.Replace(f.String(), "EphemeralContainer", "EphemeralContainer", 1), `&`, ``, 1) + "," + } + repeatedStringForEphemeralContainers += "}" + s := strings.Join([]string{`&EphemeralContainers{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `EphemeralContainers:` + repeatedStringForEphemeralContainers + `,`, + `}`, + }, "") + return s +} +func (this *Event) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Event{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `InvolvedObject:` + strings.Replace(strings.Replace(this.InvolvedObject.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Source:` + strings.Replace(strings.Replace(this.Source.String(), "EventSource", "EventSource", 1), `&`, ``, 1) + `,`, + `FirstTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.FirstTimestamp), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTimestamp), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Count:` + fmt.Sprintf("%v", this.Count) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `EventTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EventTime), "MicroTime", "v1.MicroTime", 1), `&`, ``, 1) + `,`, + `Series:` + strings.Replace(this.Series.String(), "EventSeries", "EventSeries", 1) + `,`, + `Action:` + fmt.Sprintf("%v", this.Action) + `,`, + `Related:` + strings.Replace(this.Related.String(), "ObjectReference", "ObjectReference", 1) + `,`, + `ReportingController:` + fmt.Sprintf("%v", this.ReportingController) + `,`, + `ReportingInstance:` + fmt.Sprintf("%v", this.ReportingInstance) + `,`, + `}`, + }, "") + return s +} +func (this *EventList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Event{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Event", "Event", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&EventList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *EventSeries) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EventSeries{`, + `Count:` + fmt.Sprintf("%v", this.Count) + `,`, + `LastObservedTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastObservedTime), "MicroTime", "v1.MicroTime", 1), `&`, ``, 1) + `,`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `}`, + }, "") + return s +} +func (this *EventSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EventSource{`, + `Component:` + fmt.Sprintf("%v", this.Component) + `,`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `}`, + }, "") + return s +} +func (this *ExecAction) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExecAction{`, + `Command:` + fmt.Sprintf("%v", this.Command) + `,`, + `}`, + }, "") + return s +} +func (this *FCVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FCVolumeSource{`, + `TargetWWNs:` + fmt.Sprintf("%v", this.TargetWWNs) + `,`, + `Lun:` + valueToStringGenerated(this.Lun) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `WWIDs:` + fmt.Sprintf("%v", this.WWIDs) + `,`, + `}`, + }, "") + return s +} +func (this *FlexPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + keysForOptions := make([]string, 0, len(this.Options)) + for k := range this.Options { + keysForOptions = append(keysForOptions, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) + mapStringForOptions := "map[string]string{" + for _, k := range keysForOptions { + mapStringForOptions += fmt.Sprintf("%v: %v,", k, this.Options[k]) + } + mapStringForOptions += "}" + s := strings.Join([]string{`&FlexPersistentVolumeSource{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `Options:` + mapStringForOptions + `,`, + `}`, + }, "") + return s +} +func (this *FlexVolumeSource) String() string { + if this == nil { + return "nil" + } + keysForOptions := make([]string, 0, len(this.Options)) + for k := range this.Options { + keysForOptions = append(keysForOptions, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) + mapStringForOptions := "map[string]string{" + for _, k := range keysForOptions { + mapStringForOptions += fmt.Sprintf("%v: %v,", k, this.Options[k]) + } + mapStringForOptions += "}" + s := strings.Join([]string{`&FlexVolumeSource{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `Options:` + mapStringForOptions + `,`, + `}`, + }, "") + return s +} +func (this *FlockerVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FlockerVolumeSource{`, + `DatasetName:` + fmt.Sprintf("%v", this.DatasetName) + `,`, + `DatasetUUID:` + fmt.Sprintf("%v", this.DatasetUUID) + `,`, + `}`, + }, "") + return s +} +func (this *GCEPersistentDiskVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GCEPersistentDiskVolumeSource{`, + `PDName:` + fmt.Sprintf("%v", this.PDName) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `Partition:` + fmt.Sprintf("%v", this.Partition) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *GitRepoVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GitRepoVolumeSource{`, + `Repository:` + fmt.Sprintf("%v", this.Repository) + `,`, + `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, + `Directory:` + fmt.Sprintf("%v", this.Directory) + `,`, + `}`, + }, "") + return s +} +func (this *GlusterfsPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GlusterfsPersistentVolumeSource{`, + `EndpointsName:` + fmt.Sprintf("%v", this.EndpointsName) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `EndpointsNamespace:` + valueToStringGenerated(this.EndpointsNamespace) + `,`, + `}`, + }, "") + return s +} +func (this *GlusterfsVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GlusterfsVolumeSource{`, + `EndpointsName:` + fmt.Sprintf("%v", this.EndpointsName) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *HTTPGetAction) String() string { + if this == nil { + return "nil" + } + repeatedStringForHTTPHeaders := "[]HTTPHeader{" + for _, f := range this.HTTPHeaders { + repeatedStringForHTTPHeaders += strings.Replace(strings.Replace(f.String(), "HTTPHeader", "HTTPHeader", 1), `&`, ``, 1) + "," + } + repeatedStringForHTTPHeaders += "}" + s := strings.Join([]string{`&HTTPGetAction{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Port:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `Scheme:` + fmt.Sprintf("%v", this.Scheme) + `,`, + `HTTPHeaders:` + repeatedStringForHTTPHeaders + `,`, + `}`, + }, "") + return s +} +func (this *HTTPHeader) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HTTPHeader{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *Handler) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Handler{`, + `Exec:` + strings.Replace(this.Exec.String(), "ExecAction", "ExecAction", 1) + `,`, + `HTTPGet:` + strings.Replace(this.HTTPGet.String(), "HTTPGetAction", "HTTPGetAction", 1) + `,`, + `TCPSocket:` + strings.Replace(this.TCPSocket.String(), "TCPSocketAction", "TCPSocketAction", 1) + `,`, + `}`, + }, "") + return s +} +func (this *HostAlias) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HostAlias{`, + `IP:` + fmt.Sprintf("%v", this.IP) + `,`, + `Hostnames:` + fmt.Sprintf("%v", this.Hostnames) + `,`, + `}`, + }, "") + return s +} +func (this *HostPathVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HostPathVolumeSource{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Type:` + valueToStringGenerated(this.Type) + `,`, + `}`, + }, "") + return s +} +func (this *ISCSIPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ISCSIPersistentVolumeSource{`, + `TargetPortal:` + fmt.Sprintf("%v", this.TargetPortal) + `,`, + `IQN:` + fmt.Sprintf("%v", this.IQN) + `,`, + `Lun:` + fmt.Sprintf("%v", this.Lun) + `,`, + `ISCSIInterface:` + fmt.Sprintf("%v", this.ISCSIInterface) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `Portals:` + fmt.Sprintf("%v", this.Portals) + `,`, + `DiscoveryCHAPAuth:` + fmt.Sprintf("%v", this.DiscoveryCHAPAuth) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, + `SessionCHAPAuth:` + fmt.Sprintf("%v", this.SessionCHAPAuth) + `,`, + `InitiatorName:` + valueToStringGenerated(this.InitiatorName) + `,`, + `}`, + }, "") + return s +} +func (this *ISCSIVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ISCSIVolumeSource{`, + `TargetPortal:` + fmt.Sprintf("%v", this.TargetPortal) + `,`, + `IQN:` + fmt.Sprintf("%v", this.IQN) + `,`, + `Lun:` + fmt.Sprintf("%v", this.Lun) + `,`, + `ISCSIInterface:` + fmt.Sprintf("%v", this.ISCSIInterface) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `Portals:` + fmt.Sprintf("%v", this.Portals) + `,`, + `DiscoveryCHAPAuth:` + fmt.Sprintf("%v", this.DiscoveryCHAPAuth) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `SessionCHAPAuth:` + fmt.Sprintf("%v", this.SessionCHAPAuth) + `,`, + `InitiatorName:` + valueToStringGenerated(this.InitiatorName) + `,`, + `}`, + }, "") + return s +} +func (this *KeyToPath) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&KeyToPath{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Mode:` + valueToStringGenerated(this.Mode) + `,`, + `}`, + }, "") + return s +} +func (this *Lifecycle) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Lifecycle{`, + `PostStart:` + strings.Replace(this.PostStart.String(), "Handler", "Handler", 1) + `,`, + `PreStop:` + strings.Replace(this.PreStop.String(), "Handler", "Handler", 1) + `,`, + `}`, + }, "") + return s +} +func (this *LimitRange) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LimitRange{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "LimitRangeSpec", "LimitRangeSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *LimitRangeItem) String() string { + if this == nil { + return "nil" + } + keysForMax := make([]string, 0, len(this.Max)) + for k := range this.Max { + keysForMax = append(keysForMax, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMax) + mapStringForMax := "ResourceList{" + for _, k := range keysForMax { + mapStringForMax += fmt.Sprintf("%v: %v,", k, this.Max[ResourceName(k)]) + } + mapStringForMax += "}" + keysForMin := make([]string, 0, len(this.Min)) + for k := range this.Min { + keysForMin = append(keysForMin, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMin) + mapStringForMin := "ResourceList{" + for _, k := range keysForMin { + mapStringForMin += fmt.Sprintf("%v: %v,", k, this.Min[ResourceName(k)]) + } + mapStringForMin += "}" + keysForDefault := make([]string, 0, len(this.Default)) + for k := range this.Default { + keysForDefault = append(keysForDefault, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDefault) + mapStringForDefault := "ResourceList{" + for _, k := range keysForDefault { + mapStringForDefault += fmt.Sprintf("%v: %v,", k, this.Default[ResourceName(k)]) + } + mapStringForDefault += "}" + keysForDefaultRequest := make([]string, 0, len(this.DefaultRequest)) + for k := range this.DefaultRequest { + keysForDefaultRequest = append(keysForDefaultRequest, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDefaultRequest) + mapStringForDefaultRequest := "ResourceList{" + for _, k := range keysForDefaultRequest { + mapStringForDefaultRequest += fmt.Sprintf("%v: %v,", k, this.DefaultRequest[ResourceName(k)]) + } + mapStringForDefaultRequest += "}" + keysForMaxLimitRequestRatio := make([]string, 0, len(this.MaxLimitRequestRatio)) + for k := range this.MaxLimitRequestRatio { + keysForMaxLimitRequestRatio = append(keysForMaxLimitRequestRatio, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMaxLimitRequestRatio) + mapStringForMaxLimitRequestRatio := "ResourceList{" + for _, k := range keysForMaxLimitRequestRatio { + mapStringForMaxLimitRequestRatio += fmt.Sprintf("%v: %v,", k, this.MaxLimitRequestRatio[ResourceName(k)]) + } + mapStringForMaxLimitRequestRatio += "}" + s := strings.Join([]string{`&LimitRangeItem{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Max:` + mapStringForMax + `,`, + `Min:` + mapStringForMin + `,`, + `Default:` + mapStringForDefault + `,`, + `DefaultRequest:` + mapStringForDefaultRequest + `,`, + `MaxLimitRequestRatio:` + mapStringForMaxLimitRequestRatio + `,`, + `}`, + }, "") + return s +} +func (this *LimitRangeList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]LimitRange{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "LimitRange", "LimitRange", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&LimitRangeList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *LimitRangeSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForLimits := "[]LimitRangeItem{" + for _, f := range this.Limits { + repeatedStringForLimits += strings.Replace(strings.Replace(f.String(), "LimitRangeItem", "LimitRangeItem", 1), `&`, ``, 1) + "," + } + repeatedStringForLimits += "}" + s := strings.Join([]string{`&LimitRangeSpec{`, + `Limits:` + repeatedStringForLimits + `,`, + `}`, + }, "") + return s +} +func (this *List) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]RawExtension{" + for _, f := range this.Items { + repeatedStringForItems += fmt.Sprintf("%v", f) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&List{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *LoadBalancerIngress) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LoadBalancerIngress{`, + `IP:` + fmt.Sprintf("%v", this.IP) + `,`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `}`, + }, "") + return s +} +func (this *LoadBalancerStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForIngress := "[]LoadBalancerIngress{" + for _, f := range this.Ingress { + repeatedStringForIngress += strings.Replace(strings.Replace(f.String(), "LoadBalancerIngress", "LoadBalancerIngress", 1), `&`, ``, 1) + "," + } + repeatedStringForIngress += "}" + s := strings.Join([]string{`&LoadBalancerStatus{`, + `Ingress:` + repeatedStringForIngress + `,`, + `}`, + }, "") + return s +} +func (this *LocalObjectReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LocalObjectReference{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *LocalVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LocalVolumeSource{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `FSType:` + valueToStringGenerated(this.FSType) + `,`, + `}`, + }, "") + return s +} +func (this *NFSVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NFSVolumeSource{`, + `Server:` + fmt.Sprintf("%v", this.Server) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *Namespace) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Namespace{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NamespaceSpec", "NamespaceSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "NamespaceStatus", "NamespaceStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NamespaceCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NamespaceCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *NamespaceList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Namespace{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Namespace", "Namespace", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&NamespaceList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *NamespaceSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NamespaceSpec{`, + `Finalizers:` + fmt.Sprintf("%v", this.Finalizers) + `,`, + `}`, + }, "") + return s +} +func (this *NamespaceStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]NamespaceCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "NamespaceCondition", "NamespaceCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&NamespaceStatus{`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *Node) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Node{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NodeSpec", "NodeSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "NodeStatus", "NodeStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeAddress) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeAddress{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Address:` + fmt.Sprintf("%v", this.Address) + `,`, + `}`, + }, "") + return s +} +func (this *NodeAffinity) String() string { + if this == nil { + return "nil" + } + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution := "[]PreferredSchedulingTerm{" + for _, f := range this.PreferredDuringSchedulingIgnoredDuringExecution { + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution += strings.Replace(strings.Replace(f.String(), "PreferredSchedulingTerm", "PreferredSchedulingTerm", 1), `&`, ``, 1) + "," + } + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution += "}" + s := strings.Join([]string{`&NodeAffinity{`, + `RequiredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(this.RequiredDuringSchedulingIgnoredDuringExecution.String(), "NodeSelector", "NodeSelector", 1) + `,`, + `PreferredDuringSchedulingIgnoredDuringExecution:` + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution + `,`, + `}`, + }, "") + return s +} +func (this *NodeCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastHeartbeatTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastHeartbeatTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *NodeConfigSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeConfigSource{`, + `ConfigMap:` + strings.Replace(this.ConfigMap.String(), "ConfigMapNodeConfigSource", "ConfigMapNodeConfigSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeConfigStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeConfigStatus{`, + `Assigned:` + strings.Replace(this.Assigned.String(), "NodeConfigSource", "NodeConfigSource", 1) + `,`, + `Active:` + strings.Replace(this.Active.String(), "NodeConfigSource", "NodeConfigSource", 1) + `,`, + `LastKnownGood:` + strings.Replace(this.LastKnownGood.String(), "NodeConfigSource", "NodeConfigSource", 1) + `,`, + `Error:` + fmt.Sprintf("%v", this.Error) + `,`, + `}`, + }, "") + return s +} +func (this *NodeDaemonEndpoints) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeDaemonEndpoints{`, + `KubeletEndpoint:` + strings.Replace(strings.Replace(this.KubeletEndpoint.String(), "DaemonEndpoint", "DaemonEndpoint", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Node{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Node", "Node", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&NodeList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *NodeProxyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeProxyOptions{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `}`, + }, "") + return s +} +func (this *NodeResources) String() string { + if this == nil { + return "nil" + } + keysForCapacity := make([]string, 0, len(this.Capacity)) + for k := range this.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + mapStringForCapacity := "ResourceList{" + for _, k := range keysForCapacity { + mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) + } + mapStringForCapacity += "}" + s := strings.Join([]string{`&NodeResources{`, + `Capacity:` + mapStringForCapacity + `,`, + `}`, + }, "") + return s +} +func (this *NodeSelector) String() string { + if this == nil { + return "nil" + } + repeatedStringForNodeSelectorTerms := "[]NodeSelectorTerm{" + for _, f := range this.NodeSelectorTerms { + repeatedStringForNodeSelectorTerms += strings.Replace(strings.Replace(f.String(), "NodeSelectorTerm", "NodeSelectorTerm", 1), `&`, ``, 1) + "," + } + repeatedStringForNodeSelectorTerms += "}" + s := strings.Join([]string{`&NodeSelector{`, + `NodeSelectorTerms:` + repeatedStringForNodeSelectorTerms + `,`, + `}`, + }, "") + return s +} +func (this *NodeSelectorRequirement) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeSelectorRequirement{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, + `Values:` + fmt.Sprintf("%v", this.Values) + `,`, + `}`, + }, "") + return s +} +func (this *NodeSelectorTerm) String() string { + if this == nil { + return "nil" + } + repeatedStringForMatchExpressions := "[]NodeSelectorRequirement{" + for _, f := range this.MatchExpressions { + repeatedStringForMatchExpressions += strings.Replace(strings.Replace(f.String(), "NodeSelectorRequirement", "NodeSelectorRequirement", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchExpressions += "}" + repeatedStringForMatchFields := "[]NodeSelectorRequirement{" + for _, f := range this.MatchFields { + repeatedStringForMatchFields += strings.Replace(strings.Replace(f.String(), "NodeSelectorRequirement", "NodeSelectorRequirement", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchFields += "}" + s := strings.Join([]string{`&NodeSelectorTerm{`, + `MatchExpressions:` + repeatedStringForMatchExpressions + `,`, + `MatchFields:` + repeatedStringForMatchFields + `,`, + `}`, + }, "") + return s +} +func (this *NodeSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForTaints := "[]Taint{" + for _, f := range this.Taints { + repeatedStringForTaints += strings.Replace(strings.Replace(f.String(), "Taint", "Taint", 1), `&`, ``, 1) + "," + } + repeatedStringForTaints += "}" + s := strings.Join([]string{`&NodeSpec{`, + `PodCIDR:` + fmt.Sprintf("%v", this.PodCIDR) + `,`, + `DoNotUse_ExternalID:` + fmt.Sprintf("%v", this.DoNotUse_ExternalID) + `,`, + `ProviderID:` + fmt.Sprintf("%v", this.ProviderID) + `,`, + `Unschedulable:` + fmt.Sprintf("%v", this.Unschedulable) + `,`, + `Taints:` + repeatedStringForTaints + `,`, + `ConfigSource:` + strings.Replace(this.ConfigSource.String(), "NodeConfigSource", "NodeConfigSource", 1) + `,`, + `PodCIDRs:` + fmt.Sprintf("%v", this.PodCIDRs) + `,`, + `}`, + }, "") + return s +} +func (this *NodeStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]NodeCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "NodeCondition", "NodeCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + repeatedStringForAddresses := "[]NodeAddress{" + for _, f := range this.Addresses { + repeatedStringForAddresses += strings.Replace(strings.Replace(f.String(), "NodeAddress", "NodeAddress", 1), `&`, ``, 1) + "," + } + repeatedStringForAddresses += "}" + repeatedStringForImages := "[]ContainerImage{" + for _, f := range this.Images { + repeatedStringForImages += strings.Replace(strings.Replace(f.String(), "ContainerImage", "ContainerImage", 1), `&`, ``, 1) + "," + } + repeatedStringForImages += "}" + repeatedStringForVolumesAttached := "[]AttachedVolume{" + for _, f := range this.VolumesAttached { + repeatedStringForVolumesAttached += strings.Replace(strings.Replace(f.String(), "AttachedVolume", "AttachedVolume", 1), `&`, ``, 1) + "," + } + repeatedStringForVolumesAttached += "}" + keysForCapacity := make([]string, 0, len(this.Capacity)) + for k := range this.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + mapStringForCapacity := "ResourceList{" + for _, k := range keysForCapacity { + mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) + } + mapStringForCapacity += "}" + keysForAllocatable := make([]string, 0, len(this.Allocatable)) + for k := range this.Allocatable { + keysForAllocatable = append(keysForAllocatable, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatable) + mapStringForAllocatable := "ResourceList{" + for _, k := range keysForAllocatable { + mapStringForAllocatable += fmt.Sprintf("%v: %v,", k, this.Allocatable[ResourceName(k)]) + } + mapStringForAllocatable += "}" + s := strings.Join([]string{`&NodeStatus{`, + `Capacity:` + mapStringForCapacity + `,`, + `Allocatable:` + mapStringForAllocatable + `,`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `Addresses:` + repeatedStringForAddresses + `,`, + `DaemonEndpoints:` + strings.Replace(strings.Replace(this.DaemonEndpoints.String(), "NodeDaemonEndpoints", "NodeDaemonEndpoints", 1), `&`, ``, 1) + `,`, + `NodeInfo:` + strings.Replace(strings.Replace(this.NodeInfo.String(), "NodeSystemInfo", "NodeSystemInfo", 1), `&`, ``, 1) + `,`, + `Images:` + repeatedStringForImages + `,`, + `VolumesInUse:` + fmt.Sprintf("%v", this.VolumesInUse) + `,`, + `VolumesAttached:` + repeatedStringForVolumesAttached + `,`, + `Config:` + strings.Replace(this.Config.String(), "NodeConfigStatus", "NodeConfigStatus", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeSystemInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeSystemInfo{`, + `MachineID:` + fmt.Sprintf("%v", this.MachineID) + `,`, + `SystemUUID:` + fmt.Sprintf("%v", this.SystemUUID) + `,`, + `BootID:` + fmt.Sprintf("%v", this.BootID) + `,`, + `KernelVersion:` + fmt.Sprintf("%v", this.KernelVersion) + `,`, + `OSImage:` + fmt.Sprintf("%v", this.OSImage) + `,`, + `ContainerRuntimeVersion:` + fmt.Sprintf("%v", this.ContainerRuntimeVersion) + `,`, + `KubeletVersion:` + fmt.Sprintf("%v", this.KubeletVersion) + `,`, + `KubeProxyVersion:` + fmt.Sprintf("%v", this.KubeProxyVersion) + `,`, + `OperatingSystem:` + fmt.Sprintf("%v", this.OperatingSystem) + `,`, + `Architecture:` + fmt.Sprintf("%v", this.Architecture) + `,`, + `}`, + }, "") + return s +} +func (this *ObjectFieldSelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ObjectFieldSelector{`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `FieldPath:` + fmt.Sprintf("%v", this.FieldPath) + `,`, + `}`, + }, "") + return s +} +func (this *ObjectReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ObjectReference{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, + `FieldPath:` + fmt.Sprintf("%v", this.FieldPath) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolume) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolume{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PersistentVolumeSpec", "PersistentVolumeSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PersistentVolumeStatus", "PersistentVolumeStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeClaim) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolumeClaim{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PersistentVolumeClaimSpec", "PersistentVolumeClaimSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PersistentVolumeClaimStatus", "PersistentVolumeClaimStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeClaimCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolumeClaimCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastProbeTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastProbeTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeClaimList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]PersistentVolumeClaim{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PersistentVolumeClaim", "PersistentVolumeClaim", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PersistentVolumeClaimList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeClaimSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolumeClaimSpec{`, + `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, + `Resources:` + strings.Replace(strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1), `&`, ``, 1) + `,`, + `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `StorageClassName:` + valueToStringGenerated(this.StorageClassName) + `,`, + `VolumeMode:` + valueToStringGenerated(this.VolumeMode) + `,`, + `DataSource:` + strings.Replace(this.DataSource.String(), "TypedLocalObjectReference", "TypedLocalObjectReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeClaimStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]PersistentVolumeClaimCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "PersistentVolumeClaimCondition", "PersistentVolumeClaimCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + keysForCapacity := make([]string, 0, len(this.Capacity)) + for k := range this.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + mapStringForCapacity := "ResourceList{" + for _, k := range keysForCapacity { + mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) + } + mapStringForCapacity += "}" + s := strings.Join([]string{`&PersistentVolumeClaimStatus{`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, + `Capacity:` + mapStringForCapacity + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeClaimVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolumeClaimVolumeSource{`, + `ClaimName:` + fmt.Sprintf("%v", this.ClaimName) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]PersistentVolume{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PersistentVolume", "PersistentVolume", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PersistentVolumeList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolumeSource{`, + `GCEPersistentDisk:` + strings.Replace(this.GCEPersistentDisk.String(), "GCEPersistentDiskVolumeSource", "GCEPersistentDiskVolumeSource", 1) + `,`, + `AWSElasticBlockStore:` + strings.Replace(this.AWSElasticBlockStore.String(), "AWSElasticBlockStoreVolumeSource", "AWSElasticBlockStoreVolumeSource", 1) + `,`, + `HostPath:` + strings.Replace(this.HostPath.String(), "HostPathVolumeSource", "HostPathVolumeSource", 1) + `,`, + `Glusterfs:` + strings.Replace(this.Glusterfs.String(), "GlusterfsPersistentVolumeSource", "GlusterfsPersistentVolumeSource", 1) + `,`, + `NFS:` + strings.Replace(this.NFS.String(), "NFSVolumeSource", "NFSVolumeSource", 1) + `,`, + `RBD:` + strings.Replace(this.RBD.String(), "RBDPersistentVolumeSource", "RBDPersistentVolumeSource", 1) + `,`, + `ISCSI:` + strings.Replace(this.ISCSI.String(), "ISCSIPersistentVolumeSource", "ISCSIPersistentVolumeSource", 1) + `,`, + `Cinder:` + strings.Replace(this.Cinder.String(), "CinderPersistentVolumeSource", "CinderPersistentVolumeSource", 1) + `,`, + `CephFS:` + strings.Replace(this.CephFS.String(), "CephFSPersistentVolumeSource", "CephFSPersistentVolumeSource", 1) + `,`, + `FC:` + strings.Replace(this.FC.String(), "FCVolumeSource", "FCVolumeSource", 1) + `,`, + `Flocker:` + strings.Replace(this.Flocker.String(), "FlockerVolumeSource", "FlockerVolumeSource", 1) + `,`, + `FlexVolume:` + strings.Replace(this.FlexVolume.String(), "FlexPersistentVolumeSource", "FlexPersistentVolumeSource", 1) + `,`, + `AzureFile:` + strings.Replace(this.AzureFile.String(), "AzureFilePersistentVolumeSource", "AzureFilePersistentVolumeSource", 1) + `,`, + `VsphereVolume:` + strings.Replace(this.VsphereVolume.String(), "VsphereVirtualDiskVolumeSource", "VsphereVirtualDiskVolumeSource", 1) + `,`, + `Quobyte:` + strings.Replace(this.Quobyte.String(), "QuobyteVolumeSource", "QuobyteVolumeSource", 1) + `,`, + `AzureDisk:` + strings.Replace(this.AzureDisk.String(), "AzureDiskVolumeSource", "AzureDiskVolumeSource", 1) + `,`, + `PhotonPersistentDisk:` + strings.Replace(this.PhotonPersistentDisk.String(), "PhotonPersistentDiskVolumeSource", "PhotonPersistentDiskVolumeSource", 1) + `,`, + `PortworxVolume:` + strings.Replace(this.PortworxVolume.String(), "PortworxVolumeSource", "PortworxVolumeSource", 1) + `,`, + `ScaleIO:` + strings.Replace(this.ScaleIO.String(), "ScaleIOPersistentVolumeSource", "ScaleIOPersistentVolumeSource", 1) + `,`, + `Local:` + strings.Replace(this.Local.String(), "LocalVolumeSource", "LocalVolumeSource", 1) + `,`, + `StorageOS:` + strings.Replace(this.StorageOS.String(), "StorageOSPersistentVolumeSource", "StorageOSPersistentVolumeSource", 1) + `,`, + `CSI:` + strings.Replace(this.CSI.String(), "CSIPersistentVolumeSource", "CSIPersistentVolumeSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeSpec) String() string { + if this == nil { + return "nil" + } + keysForCapacity := make([]string, 0, len(this.Capacity)) + for k := range this.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + mapStringForCapacity := "ResourceList{" + for _, k := range keysForCapacity { + mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) + } + mapStringForCapacity += "}" + s := strings.Join([]string{`&PersistentVolumeSpec{`, + `Capacity:` + mapStringForCapacity + `,`, + `PersistentVolumeSource:` + strings.Replace(strings.Replace(this.PersistentVolumeSource.String(), "PersistentVolumeSource", "PersistentVolumeSource", 1), `&`, ``, 1) + `,`, + `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, + `ClaimRef:` + strings.Replace(this.ClaimRef.String(), "ObjectReference", "ObjectReference", 1) + `,`, + `PersistentVolumeReclaimPolicy:` + fmt.Sprintf("%v", this.PersistentVolumeReclaimPolicy) + `,`, + `StorageClassName:` + fmt.Sprintf("%v", this.StorageClassName) + `,`, + `MountOptions:` + fmt.Sprintf("%v", this.MountOptions) + `,`, + `VolumeMode:` + valueToStringGenerated(this.VolumeMode) + `,`, + `NodeAffinity:` + strings.Replace(this.NodeAffinity.String(), "VolumeNodeAffinity", "VolumeNodeAffinity", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolumeStatus{`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `}`, + }, "") + return s +} +func (this *PhotonPersistentDiskVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PhotonPersistentDiskVolumeSource{`, + `PdID:` + fmt.Sprintf("%v", this.PdID) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `}`, + }, "") + return s +} +func (this *Pod) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Pod{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSpec", "PodSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodStatus", "PodStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodAffinity) String() string { + if this == nil { + return "nil" + } + repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution := "[]PodAffinityTerm{" + for _, f := range this.RequiredDuringSchedulingIgnoredDuringExecution { + repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution += strings.Replace(strings.Replace(f.String(), "PodAffinityTerm", "PodAffinityTerm", 1), `&`, ``, 1) + "," + } + repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution += "}" + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution := "[]WeightedPodAffinityTerm{" + for _, f := range this.PreferredDuringSchedulingIgnoredDuringExecution { + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution += strings.Replace(strings.Replace(f.String(), "WeightedPodAffinityTerm", "WeightedPodAffinityTerm", 1), `&`, ``, 1) + "," + } + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution += "}" + s := strings.Join([]string{`&PodAffinity{`, + `RequiredDuringSchedulingIgnoredDuringExecution:` + repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution + `,`, + `PreferredDuringSchedulingIgnoredDuringExecution:` + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution + `,`, + `}`, + }, "") + return s +} +func (this *PodAffinityTerm) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodAffinityTerm{`, + `LabelSelector:` + strings.Replace(fmt.Sprintf("%v", this.LabelSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Namespaces:` + fmt.Sprintf("%v", this.Namespaces) + `,`, + `TopologyKey:` + fmt.Sprintf("%v", this.TopologyKey) + `,`, + `}`, + }, "") + return s +} +func (this *PodAntiAffinity) String() string { + if this == nil { + return "nil" + } + repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution := "[]PodAffinityTerm{" + for _, f := range this.RequiredDuringSchedulingIgnoredDuringExecution { + repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution += strings.Replace(strings.Replace(f.String(), "PodAffinityTerm", "PodAffinityTerm", 1), `&`, ``, 1) + "," + } + repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution += "}" + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution := "[]WeightedPodAffinityTerm{" + for _, f := range this.PreferredDuringSchedulingIgnoredDuringExecution { + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution += strings.Replace(strings.Replace(f.String(), "WeightedPodAffinityTerm", "WeightedPodAffinityTerm", 1), `&`, ``, 1) + "," + } + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution += "}" + s := strings.Join([]string{`&PodAntiAffinity{`, + `RequiredDuringSchedulingIgnoredDuringExecution:` + repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution + `,`, + `PreferredDuringSchedulingIgnoredDuringExecution:` + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution + `,`, + `}`, + }, "") + return s +} +func (this *PodAttachOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodAttachOptions{`, + `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, + `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, + `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, + `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `}`, + }, "") + return s +} +func (this *PodCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastProbeTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastProbeTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *PodDNSConfig) String() string { + if this == nil { + return "nil" + } + repeatedStringForOptions := "[]PodDNSConfigOption{" + for _, f := range this.Options { + repeatedStringForOptions += strings.Replace(strings.Replace(f.String(), "PodDNSConfigOption", "PodDNSConfigOption", 1), `&`, ``, 1) + "," + } + repeatedStringForOptions += "}" + s := strings.Join([]string{`&PodDNSConfig{`, + `Nameservers:` + fmt.Sprintf("%v", this.Nameservers) + `,`, + `Searches:` + fmt.Sprintf("%v", this.Searches) + `,`, + `Options:` + repeatedStringForOptions + `,`, + `}`, + }, "") + return s +} +func (this *PodDNSConfigOption) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodDNSConfigOption{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + valueToStringGenerated(this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *PodExecOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodExecOptions{`, + `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, + `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, + `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, + `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `Command:` + fmt.Sprintf("%v", this.Command) + `,`, + `}`, + }, "") + return s +} +func (this *PodIP) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodIP{`, + `IP:` + fmt.Sprintf("%v", this.IP) + `,`, + `}`, + }, "") + return s +} +func (this *PodList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Pod{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Pod", "Pod", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PodList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PodLogOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodLogOptions{`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `Follow:` + fmt.Sprintf("%v", this.Follow) + `,`, + `Previous:` + fmt.Sprintf("%v", this.Previous) + `,`, + `SinceSeconds:` + valueToStringGenerated(this.SinceSeconds) + `,`, + `SinceTime:` + strings.Replace(fmt.Sprintf("%v", this.SinceTime), "Time", "v1.Time", 1) + `,`, + `Timestamps:` + fmt.Sprintf("%v", this.Timestamps) + `,`, + `TailLines:` + valueToStringGenerated(this.TailLines) + `,`, + `LimitBytes:` + valueToStringGenerated(this.LimitBytes) + `,`, + `}`, + }, "") + return s +} +func (this *PodPortForwardOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodPortForwardOptions{`, + `Ports:` + fmt.Sprintf("%v", this.Ports) + `,`, + `}`, + }, "") + return s +} +func (this *PodProxyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodProxyOptions{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `}`, + }, "") + return s +} +func (this *PodReadinessGate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodReadinessGate{`, + `ConditionType:` + fmt.Sprintf("%v", this.ConditionType) + `,`, + `}`, + }, "") + return s +} +func (this *PodSecurityContext) String() string { + if this == nil { + return "nil" + } + repeatedStringForSysctls := "[]Sysctl{" + for _, f := range this.Sysctls { + repeatedStringForSysctls += strings.Replace(strings.Replace(f.String(), "Sysctl", "Sysctl", 1), `&`, ``, 1) + "," + } + repeatedStringForSysctls += "}" + s := strings.Join([]string{`&PodSecurityContext{`, + `SELinuxOptions:` + strings.Replace(this.SELinuxOptions.String(), "SELinuxOptions", "SELinuxOptions", 1) + `,`, + `RunAsUser:` + valueToStringGenerated(this.RunAsUser) + `,`, + `RunAsNonRoot:` + valueToStringGenerated(this.RunAsNonRoot) + `,`, + `SupplementalGroups:` + fmt.Sprintf("%v", this.SupplementalGroups) + `,`, + `FSGroup:` + valueToStringGenerated(this.FSGroup) + `,`, + `RunAsGroup:` + valueToStringGenerated(this.RunAsGroup) + `,`, + `Sysctls:` + repeatedStringForSysctls + `,`, + `WindowsOptions:` + strings.Replace(this.WindowsOptions.String(), "WindowsSecurityContextOptions", "WindowsSecurityContextOptions", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodSignature) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodSignature{`, + `PodController:` + strings.Replace(fmt.Sprintf("%v", this.PodController), "OwnerReference", "v1.OwnerReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForVolumes := "[]Volume{" + for _, f := range this.Volumes { + repeatedStringForVolumes += strings.Replace(strings.Replace(f.String(), "Volume", "Volume", 1), `&`, ``, 1) + "," + } + repeatedStringForVolumes += "}" + repeatedStringForContainers := "[]Container{" + for _, f := range this.Containers { + repeatedStringForContainers += strings.Replace(strings.Replace(f.String(), "Container", "Container", 1), `&`, ``, 1) + "," + } + repeatedStringForContainers += "}" + repeatedStringForImagePullSecrets := "[]LocalObjectReference{" + for _, f := range this.ImagePullSecrets { + repeatedStringForImagePullSecrets += strings.Replace(strings.Replace(f.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + "," + } + repeatedStringForImagePullSecrets += "}" + repeatedStringForInitContainers := "[]Container{" + for _, f := range this.InitContainers { + repeatedStringForInitContainers += strings.Replace(strings.Replace(f.String(), "Container", "Container", 1), `&`, ``, 1) + "," + } + repeatedStringForInitContainers += "}" + repeatedStringForTolerations := "[]Toleration{" + for _, f := range this.Tolerations { + repeatedStringForTolerations += strings.Replace(strings.Replace(f.String(), "Toleration", "Toleration", 1), `&`, ``, 1) + "," + } + repeatedStringForTolerations += "}" + repeatedStringForHostAliases := "[]HostAlias{" + for _, f := range this.HostAliases { + repeatedStringForHostAliases += strings.Replace(strings.Replace(f.String(), "HostAlias", "HostAlias", 1), `&`, ``, 1) + "," + } + repeatedStringForHostAliases += "}" + repeatedStringForReadinessGates := "[]PodReadinessGate{" + for _, f := range this.ReadinessGates { + repeatedStringForReadinessGates += strings.Replace(strings.Replace(f.String(), "PodReadinessGate", "PodReadinessGate", 1), `&`, ``, 1) + "," + } + repeatedStringForReadinessGates += "}" + repeatedStringForTopologySpreadConstraints := "[]TopologySpreadConstraint{" + for _, f := range this.TopologySpreadConstraints { + repeatedStringForTopologySpreadConstraints += strings.Replace(strings.Replace(f.String(), "TopologySpreadConstraint", "TopologySpreadConstraint", 1), `&`, ``, 1) + "," + } + repeatedStringForTopologySpreadConstraints += "}" + repeatedStringForEphemeralContainers := "[]EphemeralContainer{" + for _, f := range this.EphemeralContainers { + repeatedStringForEphemeralContainers += strings.Replace(strings.Replace(f.String(), "EphemeralContainer", "EphemeralContainer", 1), `&`, ``, 1) + "," + } + repeatedStringForEphemeralContainers += "}" + keysForNodeSelector := make([]string, 0, len(this.NodeSelector)) + for k := range this.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + mapStringForNodeSelector := "map[string]string{" + for _, k := range keysForNodeSelector { + mapStringForNodeSelector += fmt.Sprintf("%v: %v,", k, this.NodeSelector[k]) + } + mapStringForNodeSelector += "}" + keysForOverhead := make([]string, 0, len(this.Overhead)) + for k := range this.Overhead { + keysForOverhead = append(keysForOverhead, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForOverhead) + mapStringForOverhead := "ResourceList{" + for _, k := range keysForOverhead { + mapStringForOverhead += fmt.Sprintf("%v: %v,", k, this.Overhead[ResourceName(k)]) + } + mapStringForOverhead += "}" + s := strings.Join([]string{`&PodSpec{`, + `Volumes:` + repeatedStringForVolumes + `,`, + `Containers:` + repeatedStringForContainers + `,`, + `RestartPolicy:` + fmt.Sprintf("%v", this.RestartPolicy) + `,`, + `TerminationGracePeriodSeconds:` + valueToStringGenerated(this.TerminationGracePeriodSeconds) + `,`, + `ActiveDeadlineSeconds:` + valueToStringGenerated(this.ActiveDeadlineSeconds) + `,`, + `DNSPolicy:` + fmt.Sprintf("%v", this.DNSPolicy) + `,`, + `NodeSelector:` + mapStringForNodeSelector + `,`, + `ServiceAccountName:` + fmt.Sprintf("%v", this.ServiceAccountName) + `,`, + `DeprecatedServiceAccount:` + fmt.Sprintf("%v", this.DeprecatedServiceAccount) + `,`, + `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, + `HostNetwork:` + fmt.Sprintf("%v", this.HostNetwork) + `,`, + `HostPID:` + fmt.Sprintf("%v", this.HostPID) + `,`, + `HostIPC:` + fmt.Sprintf("%v", this.HostIPC) + `,`, + `SecurityContext:` + strings.Replace(this.SecurityContext.String(), "PodSecurityContext", "PodSecurityContext", 1) + `,`, + `ImagePullSecrets:` + repeatedStringForImagePullSecrets + `,`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `Subdomain:` + fmt.Sprintf("%v", this.Subdomain) + `,`, + `Affinity:` + strings.Replace(this.Affinity.String(), "Affinity", "Affinity", 1) + `,`, + `SchedulerName:` + fmt.Sprintf("%v", this.SchedulerName) + `,`, + `InitContainers:` + repeatedStringForInitContainers + `,`, + `AutomountServiceAccountToken:` + valueToStringGenerated(this.AutomountServiceAccountToken) + `,`, + `Tolerations:` + repeatedStringForTolerations + `,`, + `HostAliases:` + repeatedStringForHostAliases + `,`, + `PriorityClassName:` + fmt.Sprintf("%v", this.PriorityClassName) + `,`, + `Priority:` + valueToStringGenerated(this.Priority) + `,`, + `DNSConfig:` + strings.Replace(this.DNSConfig.String(), "PodDNSConfig", "PodDNSConfig", 1) + `,`, + `ShareProcessNamespace:` + valueToStringGenerated(this.ShareProcessNamespace) + `,`, + `ReadinessGates:` + repeatedStringForReadinessGates + `,`, + `RuntimeClassName:` + valueToStringGenerated(this.RuntimeClassName) + `,`, + `EnableServiceLinks:` + valueToStringGenerated(this.EnableServiceLinks) + `,`, + `PreemptionPolicy:` + valueToStringGenerated(this.PreemptionPolicy) + `,`, + `Overhead:` + mapStringForOverhead + `,`, + `TopologySpreadConstraints:` + repeatedStringForTopologySpreadConstraints + `,`, + `EphemeralContainers:` + repeatedStringForEphemeralContainers + `,`, + `}`, + }, "") + return s +} +func (this *PodStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]PodCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "PodCondition", "PodCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + repeatedStringForContainerStatuses := "[]ContainerStatus{" + for _, f := range this.ContainerStatuses { + repeatedStringForContainerStatuses += strings.Replace(strings.Replace(f.String(), "ContainerStatus", "ContainerStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForContainerStatuses += "}" + repeatedStringForInitContainerStatuses := "[]ContainerStatus{" + for _, f := range this.InitContainerStatuses { + repeatedStringForInitContainerStatuses += strings.Replace(strings.Replace(f.String(), "ContainerStatus", "ContainerStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForInitContainerStatuses += "}" + repeatedStringForPodIPs := "[]PodIP{" + for _, f := range this.PodIPs { + repeatedStringForPodIPs += strings.Replace(strings.Replace(f.String(), "PodIP", "PodIP", 1), `&`, ``, 1) + "," + } + repeatedStringForPodIPs += "}" + repeatedStringForEphemeralContainerStatuses := "[]ContainerStatus{" + for _, f := range this.EphemeralContainerStatuses { + repeatedStringForEphemeralContainerStatuses += strings.Replace(strings.Replace(f.String(), "ContainerStatus", "ContainerStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForEphemeralContainerStatuses += "}" + s := strings.Join([]string{`&PodStatus{`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `HostIP:` + fmt.Sprintf("%v", this.HostIP) + `,`, + `PodIP:` + fmt.Sprintf("%v", this.PodIP) + `,`, + `StartTime:` + strings.Replace(fmt.Sprintf("%v", this.StartTime), "Time", "v1.Time", 1) + `,`, + `ContainerStatuses:` + repeatedStringForContainerStatuses + `,`, + `QOSClass:` + fmt.Sprintf("%v", this.QOSClass) + `,`, + `InitContainerStatuses:` + repeatedStringForInitContainerStatuses + `,`, + `NominatedNodeName:` + fmt.Sprintf("%v", this.NominatedNodeName) + `,`, + `PodIPs:` + repeatedStringForPodIPs + `,`, + `EphemeralContainerStatuses:` + repeatedStringForEphemeralContainerStatuses + `,`, + `}`, + }, "") + return s +} +func (this *PodStatusResult) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodStatusResult{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodStatus", "PodStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodTemplate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodTemplate{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodTemplateList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]PodTemplate{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodTemplate", "PodTemplate", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PodTemplateList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PodTemplateSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodTemplateSpec{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSpec", "PodSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PortworxVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PortworxVolumeSource{`, + `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *Preconditions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Preconditions{`, + `UID:` + valueToStringGenerated(this.UID) + `,`, + `}`, + }, "") + return s +} +func (this *PreferAvoidPodsEntry) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PreferAvoidPodsEntry{`, + `PodSignature:` + strings.Replace(strings.Replace(this.PodSignature.String(), "PodSignature", "PodSignature", 1), `&`, ``, 1) + `,`, + `EvictionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EvictionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *PreferredSchedulingTerm) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PreferredSchedulingTerm{`, + `Weight:` + fmt.Sprintf("%v", this.Weight) + `,`, + `Preference:` + strings.Replace(strings.Replace(this.Preference.String(), "NodeSelectorTerm", "NodeSelectorTerm", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Probe) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Probe{`, + `Handler:` + strings.Replace(strings.Replace(this.Handler.String(), "Handler", "Handler", 1), `&`, ``, 1) + `,`, + `InitialDelaySeconds:` + fmt.Sprintf("%v", this.InitialDelaySeconds) + `,`, + `TimeoutSeconds:` + fmt.Sprintf("%v", this.TimeoutSeconds) + `,`, + `PeriodSeconds:` + fmt.Sprintf("%v", this.PeriodSeconds) + `,`, + `SuccessThreshold:` + fmt.Sprintf("%v", this.SuccessThreshold) + `,`, + `FailureThreshold:` + fmt.Sprintf("%v", this.FailureThreshold) + `,`, + `}`, + }, "") + return s +} +func (this *ProjectedVolumeSource) String() string { + if this == nil { + return "nil" + } + repeatedStringForSources := "[]VolumeProjection{" + for _, f := range this.Sources { + repeatedStringForSources += strings.Replace(strings.Replace(f.String(), "VolumeProjection", "VolumeProjection", 1), `&`, ``, 1) + "," + } + repeatedStringForSources += "}" + s := strings.Join([]string{`&ProjectedVolumeSource{`, + `Sources:` + repeatedStringForSources + `,`, + `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, + `}`, + }, "") + return s +} +func (this *QuobyteVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&QuobyteVolumeSource{`, + `Registry:` + fmt.Sprintf("%v", this.Registry) + `,`, + `Volume:` + fmt.Sprintf("%v", this.Volume) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Tenant:` + fmt.Sprintf("%v", this.Tenant) + `,`, + `}`, + }, "") + return s +} +func (this *RBDPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RBDPersistentVolumeSource{`, + `CephMonitors:` + fmt.Sprintf("%v", this.CephMonitors) + `,`, + `RBDImage:` + fmt.Sprintf("%v", this.RBDImage) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `RBDPool:` + fmt.Sprintf("%v", this.RBDPool) + `,`, + `RadosUser:` + fmt.Sprintf("%v", this.RadosUser) + `,`, + `Keyring:` + fmt.Sprintf("%v", this.Keyring) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *RBDVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RBDVolumeSource{`, + `CephMonitors:` + fmt.Sprintf("%v", this.CephMonitors) + `,`, + `RBDImage:` + fmt.Sprintf("%v", this.RBDImage) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `RBDPool:` + fmt.Sprintf("%v", this.RBDPool) + `,`, + `RadosUser:` + fmt.Sprintf("%v", this.RadosUser) + `,`, + `Keyring:` + fmt.Sprintf("%v", this.Keyring) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *RangeAllocation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RangeAllocation{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Range:` + fmt.Sprintf("%v", this.Range) + `,`, + `Data:` + valueToStringGenerated(this.Data) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicationController) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicationController{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicationControllerSpec", "ReplicationControllerSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicationControllerStatus", "ReplicationControllerStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicationControllerCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicationControllerCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicationControllerList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ReplicationController{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ReplicationController", "ReplicationController", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ReplicationControllerList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ReplicationControllerSpec) String() string { + if this == nil { + return "nil" + } + keysForSelector := make([]string, 0, len(this.Selector)) + for k := range this.Selector { + keysForSelector = append(keysForSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + mapStringForSelector := "map[string]string{" + for _, k := range keysForSelector { + mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) + } + mapStringForSelector += "}" + s := strings.Join([]string{`&ReplicationControllerSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + mapStringForSelector + `,`, + `Template:` + strings.Replace(this.Template.String(), "PodTemplateSpec", "PodTemplateSpec", 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicationControllerStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]ReplicationControllerCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "ReplicationControllerCondition", "ReplicationControllerCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&ReplicationControllerStatus{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *ResourceFieldSelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceFieldSelector{`, + `ContainerName:` + fmt.Sprintf("%v", this.ContainerName) + `,`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `Divisor:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Divisor), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceQuota) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceQuota{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceQuotaSpec", "ResourceQuotaSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ResourceQuotaStatus", "ResourceQuotaStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceQuotaList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ResourceQuota{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceQuota", "ResourceQuota", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ResourceQuotaList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ResourceQuotaSpec) String() string { + if this == nil { + return "nil" + } + keysForHard := make([]string, 0, len(this.Hard)) + for k := range this.Hard { + keysForHard = append(keysForHard, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForHard) + mapStringForHard := "ResourceList{" + for _, k := range keysForHard { + mapStringForHard += fmt.Sprintf("%v: %v,", k, this.Hard[ResourceName(k)]) + } + mapStringForHard += "}" + s := strings.Join([]string{`&ResourceQuotaSpec{`, + `Hard:` + mapStringForHard + `,`, + `Scopes:` + fmt.Sprintf("%v", this.Scopes) + `,`, + `ScopeSelector:` + strings.Replace(this.ScopeSelector.String(), "ScopeSelector", "ScopeSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceQuotaStatus) String() string { + if this == nil { + return "nil" + } + keysForHard := make([]string, 0, len(this.Hard)) + for k := range this.Hard { + keysForHard = append(keysForHard, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForHard) + mapStringForHard := "ResourceList{" + for _, k := range keysForHard { + mapStringForHard += fmt.Sprintf("%v: %v,", k, this.Hard[ResourceName(k)]) + } + mapStringForHard += "}" + keysForUsed := make([]string, 0, len(this.Used)) + for k := range this.Used { + keysForUsed = append(keysForUsed, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUsed) + mapStringForUsed := "ResourceList{" + for _, k := range keysForUsed { + mapStringForUsed += fmt.Sprintf("%v: %v,", k, this.Used[ResourceName(k)]) + } + mapStringForUsed += "}" + s := strings.Join([]string{`&ResourceQuotaStatus{`, + `Hard:` + mapStringForHard + `,`, + `Used:` + mapStringForUsed + `,`, + `}`, + }, "") + return s +} +func (this *ResourceRequirements) String() string { + if this == nil { + return "nil" + } + keysForLimits := make([]string, 0, len(this.Limits)) + for k := range this.Limits { + keysForLimits = append(keysForLimits, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLimits) + mapStringForLimits := "ResourceList{" + for _, k := range keysForLimits { + mapStringForLimits += fmt.Sprintf("%v: %v,", k, this.Limits[ResourceName(k)]) + } + mapStringForLimits += "}" + keysForRequests := make([]string, 0, len(this.Requests)) + for k := range this.Requests { + keysForRequests = append(keysForRequests, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForRequests) + mapStringForRequests := "ResourceList{" + for _, k := range keysForRequests { + mapStringForRequests += fmt.Sprintf("%v: %v,", k, this.Requests[ResourceName(k)]) + } + mapStringForRequests += "}" + s := strings.Join([]string{`&ResourceRequirements{`, + `Limits:` + mapStringForLimits + `,`, + `Requests:` + mapStringForRequests + `,`, + `}`, + }, "") + return s +} +func (this *SELinuxOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SELinuxOptions{`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `Role:` + fmt.Sprintf("%v", this.Role) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Level:` + fmt.Sprintf("%v", this.Level) + `,`, + `}`, + }, "") + return s +} +func (this *ScaleIOPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ScaleIOPersistentVolumeSource{`, + `Gateway:` + fmt.Sprintf("%v", this.Gateway) + `,`, + `System:` + fmt.Sprintf("%v", this.System) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, + `SSLEnabled:` + fmt.Sprintf("%v", this.SSLEnabled) + `,`, + `ProtectionDomain:` + fmt.Sprintf("%v", this.ProtectionDomain) + `,`, + `StoragePool:` + fmt.Sprintf("%v", this.StoragePool) + `,`, + `StorageMode:` + fmt.Sprintf("%v", this.StorageMode) + `,`, + `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *ScaleIOVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ScaleIOVolumeSource{`, + `Gateway:` + fmt.Sprintf("%v", this.Gateway) + `,`, + `System:` + fmt.Sprintf("%v", this.System) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `SSLEnabled:` + fmt.Sprintf("%v", this.SSLEnabled) + `,`, + `ProtectionDomain:` + fmt.Sprintf("%v", this.ProtectionDomain) + `,`, + `StoragePool:` + fmt.Sprintf("%v", this.StoragePool) + `,`, + `StorageMode:` + fmt.Sprintf("%v", this.StorageMode) + `,`, + `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *ScopeSelector) String() string { + if this == nil { + return "nil" + } + repeatedStringForMatchExpressions := "[]ScopedResourceSelectorRequirement{" + for _, f := range this.MatchExpressions { + repeatedStringForMatchExpressions += strings.Replace(strings.Replace(f.String(), "ScopedResourceSelectorRequirement", "ScopedResourceSelectorRequirement", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchExpressions += "}" + s := strings.Join([]string{`&ScopeSelector{`, + `MatchExpressions:` + repeatedStringForMatchExpressions + `,`, + `}`, + }, "") + return s +} +func (this *ScopedResourceSelectorRequirement) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ScopedResourceSelectorRequirement{`, + `ScopeName:` + fmt.Sprintf("%v", this.ScopeName) + `,`, + `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, + `Values:` + fmt.Sprintf("%v", this.Values) + `,`, + `}`, + }, "") + return s +} +func (this *Secret) String() string { + if this == nil { + return "nil" + } + keysForData := make([]string, 0, len(this.Data)) + for k := range this.Data { + keysForData = append(keysForData, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForData) + mapStringForData := "map[string][]byte{" + for _, k := range keysForData { + mapStringForData += fmt.Sprintf("%v: %v,", k, this.Data[k]) + } + mapStringForData += "}" + keysForStringData := make([]string, 0, len(this.StringData)) + for k := range this.StringData { + keysForStringData = append(keysForStringData, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForStringData) + mapStringForStringData := "map[string]string{" + for _, k := range keysForStringData { + mapStringForStringData += fmt.Sprintf("%v: %v,", k, this.StringData[k]) + } + mapStringForStringData += "}" + s := strings.Join([]string{`&Secret{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Data:` + mapStringForData + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `StringData:` + mapStringForStringData + `,`, + `}`, + }, "") + return s +} +func (this *SecretEnvSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretEnvSource{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *SecretKeySelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretKeySelector{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *SecretList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Secret{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Secret", "Secret", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&SecretList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *SecretProjection) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]KeyToPath{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&SecretProjection{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *SecretReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretReference{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `}`, + }, "") + return s +} +func (this *SecretVolumeSource) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]KeyToPath{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&SecretVolumeSource{`, + `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, + `Items:` + repeatedStringForItems + `,`, + `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *SecurityContext) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecurityContext{`, + `Capabilities:` + strings.Replace(this.Capabilities.String(), "Capabilities", "Capabilities", 1) + `,`, + `Privileged:` + valueToStringGenerated(this.Privileged) + `,`, + `SELinuxOptions:` + strings.Replace(this.SELinuxOptions.String(), "SELinuxOptions", "SELinuxOptions", 1) + `,`, + `RunAsUser:` + valueToStringGenerated(this.RunAsUser) + `,`, + `RunAsNonRoot:` + valueToStringGenerated(this.RunAsNonRoot) + `,`, + `ReadOnlyRootFilesystem:` + valueToStringGenerated(this.ReadOnlyRootFilesystem) + `,`, + `AllowPrivilegeEscalation:` + valueToStringGenerated(this.AllowPrivilegeEscalation) + `,`, + `RunAsGroup:` + valueToStringGenerated(this.RunAsGroup) + `,`, + `ProcMount:` + valueToStringGenerated(this.ProcMount) + `,`, + `WindowsOptions:` + strings.Replace(this.WindowsOptions.String(), "WindowsSecurityContextOptions", "WindowsSecurityContextOptions", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SerializedReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SerializedReference{`, + `Reference:` + strings.Replace(strings.Replace(this.Reference.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Service) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Service{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ServiceSpec", "ServiceSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ServiceStatus", "ServiceStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceAccount) String() string { + if this == nil { + return "nil" + } + repeatedStringForSecrets := "[]ObjectReference{" + for _, f := range this.Secrets { + repeatedStringForSecrets += strings.Replace(strings.Replace(f.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + "," + } + repeatedStringForSecrets += "}" + repeatedStringForImagePullSecrets := "[]LocalObjectReference{" + for _, f := range this.ImagePullSecrets { + repeatedStringForImagePullSecrets += strings.Replace(strings.Replace(f.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + "," + } + repeatedStringForImagePullSecrets += "}" + s := strings.Join([]string{`&ServiceAccount{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Secrets:` + repeatedStringForSecrets + `,`, + `ImagePullSecrets:` + repeatedStringForImagePullSecrets + `,`, + `AutomountServiceAccountToken:` + valueToStringGenerated(this.AutomountServiceAccountToken) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceAccountList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ServiceAccount{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ServiceAccount", "ServiceAccount", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ServiceAccountList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ServiceAccountTokenProjection) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceAccountTokenProjection{`, + `Audience:` + fmt.Sprintf("%v", this.Audience) + `,`, + `ExpirationSeconds:` + valueToStringGenerated(this.ExpirationSeconds) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Service{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Service", "Service", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ServiceList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ServicePort) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServicePort{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `TargetPort:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TargetPort), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `NodePort:` + fmt.Sprintf("%v", this.NodePort) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceProxyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceProxyOptions{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForPorts := "[]ServicePort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "ServicePort", "ServicePort", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + keysForSelector := make([]string, 0, len(this.Selector)) + for k := range this.Selector { + keysForSelector = append(keysForSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + mapStringForSelector := "map[string]string{" + for _, k := range keysForSelector { + mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) + } + mapStringForSelector += "}" + s := strings.Join([]string{`&ServiceSpec{`, + `Ports:` + repeatedStringForPorts + `,`, + `Selector:` + mapStringForSelector + `,`, + `ClusterIP:` + fmt.Sprintf("%v", this.ClusterIP) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `ExternalIPs:` + fmt.Sprintf("%v", this.ExternalIPs) + `,`, + `SessionAffinity:` + fmt.Sprintf("%v", this.SessionAffinity) + `,`, + `LoadBalancerIP:` + fmt.Sprintf("%v", this.LoadBalancerIP) + `,`, + `LoadBalancerSourceRanges:` + fmt.Sprintf("%v", this.LoadBalancerSourceRanges) + `,`, + `ExternalName:` + fmt.Sprintf("%v", this.ExternalName) + `,`, + `ExternalTrafficPolicy:` + fmt.Sprintf("%v", this.ExternalTrafficPolicy) + `,`, + `HealthCheckNodePort:` + fmt.Sprintf("%v", this.HealthCheckNodePort) + `,`, + `PublishNotReadyAddresses:` + fmt.Sprintf("%v", this.PublishNotReadyAddresses) + `,`, + `SessionAffinityConfig:` + strings.Replace(this.SessionAffinityConfig.String(), "SessionAffinityConfig", "SessionAffinityConfig", 1) + `,`, + `IPFamily:` + valueToStringGenerated(this.IPFamily) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceStatus{`, + `LoadBalancer:` + strings.Replace(strings.Replace(this.LoadBalancer.String(), "LoadBalancerStatus", "LoadBalancerStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SessionAffinityConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SessionAffinityConfig{`, + `ClientIP:` + strings.Replace(this.ClientIP.String(), "ClientIPConfig", "ClientIPConfig", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StorageOSPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StorageOSPersistentVolumeSource{`, + `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, + `VolumeNamespace:` + fmt.Sprintf("%v", this.VolumeNamespace) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "ObjectReference", "ObjectReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StorageOSVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StorageOSVolumeSource{`, + `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, + `VolumeNamespace:` + fmt.Sprintf("%v", this.VolumeNamespace) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Sysctl) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Sysctl{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *TCPSocketAction) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TCPSocketAction{`, + `Port:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `}`, + }, "") + return s +} +func (this *Taint) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Taint{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `Effect:` + fmt.Sprintf("%v", this.Effect) + `,`, + `TimeAdded:` + strings.Replace(fmt.Sprintf("%v", this.TimeAdded), "Time", "v1.Time", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Toleration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Toleration{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `Effect:` + fmt.Sprintf("%v", this.Effect) + `,`, + `TolerationSeconds:` + valueToStringGenerated(this.TolerationSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *TopologySelectorLabelRequirement) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TopologySelectorLabelRequirement{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Values:` + fmt.Sprintf("%v", this.Values) + `,`, + `}`, + }, "") + return s +} +func (this *TopologySelectorTerm) String() string { + if this == nil { + return "nil" + } + repeatedStringForMatchLabelExpressions := "[]TopologySelectorLabelRequirement{" + for _, f := range this.MatchLabelExpressions { + repeatedStringForMatchLabelExpressions += strings.Replace(strings.Replace(f.String(), "TopologySelectorLabelRequirement", "TopologySelectorLabelRequirement", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchLabelExpressions += "}" + s := strings.Join([]string{`&TopologySelectorTerm{`, + `MatchLabelExpressions:` + repeatedStringForMatchLabelExpressions + `,`, + `}`, + }, "") + return s +} +func (this *TopologySpreadConstraint) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TopologySpreadConstraint{`, + `MaxSkew:` + fmt.Sprintf("%v", this.MaxSkew) + `,`, + `TopologyKey:` + fmt.Sprintf("%v", this.TopologyKey) + `,`, + `WhenUnsatisfiable:` + fmt.Sprintf("%v", this.WhenUnsatisfiable) + `,`, + `LabelSelector:` + strings.Replace(fmt.Sprintf("%v", this.LabelSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *TypedLocalObjectReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TypedLocalObjectReference{`, + `APIGroup:` + valueToStringGenerated(this.APIGroup) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *Volume) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Volume{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `VolumeSource:` + strings.Replace(strings.Replace(this.VolumeSource.String(), "VolumeSource", "VolumeSource", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeDevice) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeDevice{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `DevicePath:` + fmt.Sprintf("%v", this.DevicePath) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeMount) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeMount{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `MountPath:` + fmt.Sprintf("%v", this.MountPath) + `,`, + `SubPath:` + fmt.Sprintf("%v", this.SubPath) + `,`, + `MountPropagation:` + valueToStringGenerated(this.MountPropagation) + `,`, + `SubPathExpr:` + fmt.Sprintf("%v", this.SubPathExpr) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeNodeAffinity) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeNodeAffinity{`, + `Required:` + strings.Replace(this.Required.String(), "NodeSelector", "NodeSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeProjection) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeProjection{`, + `Secret:` + strings.Replace(this.Secret.String(), "SecretProjection", "SecretProjection", 1) + `,`, + `DownwardAPI:` + strings.Replace(this.DownwardAPI.String(), "DownwardAPIProjection", "DownwardAPIProjection", 1) + `,`, + `ConfigMap:` + strings.Replace(this.ConfigMap.String(), "ConfigMapProjection", "ConfigMapProjection", 1) + `,`, + `ServiceAccountToken:` + strings.Replace(this.ServiceAccountToken.String(), "ServiceAccountTokenProjection", "ServiceAccountTokenProjection", 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeSource{`, + `HostPath:` + strings.Replace(this.HostPath.String(), "HostPathVolumeSource", "HostPathVolumeSource", 1) + `,`, + `EmptyDir:` + strings.Replace(this.EmptyDir.String(), "EmptyDirVolumeSource", "EmptyDirVolumeSource", 1) + `,`, + `GCEPersistentDisk:` + strings.Replace(this.GCEPersistentDisk.String(), "GCEPersistentDiskVolumeSource", "GCEPersistentDiskVolumeSource", 1) + `,`, + `AWSElasticBlockStore:` + strings.Replace(this.AWSElasticBlockStore.String(), "AWSElasticBlockStoreVolumeSource", "AWSElasticBlockStoreVolumeSource", 1) + `,`, + `GitRepo:` + strings.Replace(this.GitRepo.String(), "GitRepoVolumeSource", "GitRepoVolumeSource", 1) + `,`, + `Secret:` + strings.Replace(this.Secret.String(), "SecretVolumeSource", "SecretVolumeSource", 1) + `,`, + `NFS:` + strings.Replace(this.NFS.String(), "NFSVolumeSource", "NFSVolumeSource", 1) + `,`, + `ISCSI:` + strings.Replace(this.ISCSI.String(), "ISCSIVolumeSource", "ISCSIVolumeSource", 1) + `,`, + `Glusterfs:` + strings.Replace(this.Glusterfs.String(), "GlusterfsVolumeSource", "GlusterfsVolumeSource", 1) + `,`, + `PersistentVolumeClaim:` + strings.Replace(this.PersistentVolumeClaim.String(), "PersistentVolumeClaimVolumeSource", "PersistentVolumeClaimVolumeSource", 1) + `,`, + `RBD:` + strings.Replace(this.RBD.String(), "RBDVolumeSource", "RBDVolumeSource", 1) + `,`, + `FlexVolume:` + strings.Replace(this.FlexVolume.String(), "FlexVolumeSource", "FlexVolumeSource", 1) + `,`, + `Cinder:` + strings.Replace(this.Cinder.String(), "CinderVolumeSource", "CinderVolumeSource", 1) + `,`, + `CephFS:` + strings.Replace(this.CephFS.String(), "CephFSVolumeSource", "CephFSVolumeSource", 1) + `,`, + `Flocker:` + strings.Replace(this.Flocker.String(), "FlockerVolumeSource", "FlockerVolumeSource", 1) + `,`, + `DownwardAPI:` + strings.Replace(this.DownwardAPI.String(), "DownwardAPIVolumeSource", "DownwardAPIVolumeSource", 1) + `,`, + `FC:` + strings.Replace(this.FC.String(), "FCVolumeSource", "FCVolumeSource", 1) + `,`, + `AzureFile:` + strings.Replace(this.AzureFile.String(), "AzureFileVolumeSource", "AzureFileVolumeSource", 1) + `,`, + `ConfigMap:` + strings.Replace(this.ConfigMap.String(), "ConfigMapVolumeSource", "ConfigMapVolumeSource", 1) + `,`, + `VsphereVolume:` + strings.Replace(this.VsphereVolume.String(), "VsphereVirtualDiskVolumeSource", "VsphereVirtualDiskVolumeSource", 1) + `,`, + `Quobyte:` + strings.Replace(this.Quobyte.String(), "QuobyteVolumeSource", "QuobyteVolumeSource", 1) + `,`, + `AzureDisk:` + strings.Replace(this.AzureDisk.String(), "AzureDiskVolumeSource", "AzureDiskVolumeSource", 1) + `,`, + `PhotonPersistentDisk:` + strings.Replace(this.PhotonPersistentDisk.String(), "PhotonPersistentDiskVolumeSource", "PhotonPersistentDiskVolumeSource", 1) + `,`, + `PortworxVolume:` + strings.Replace(this.PortworxVolume.String(), "PortworxVolumeSource", "PortworxVolumeSource", 1) + `,`, + `ScaleIO:` + strings.Replace(this.ScaleIO.String(), "ScaleIOVolumeSource", "ScaleIOVolumeSource", 1) + `,`, + `Projected:` + strings.Replace(this.Projected.String(), "ProjectedVolumeSource", "ProjectedVolumeSource", 1) + `,`, + `StorageOS:` + strings.Replace(this.StorageOS.String(), "StorageOSVolumeSource", "StorageOSVolumeSource", 1) + `,`, + `CSI:` + strings.Replace(this.CSI.String(), "CSIVolumeSource", "CSIVolumeSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *VsphereVirtualDiskVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VsphereVirtualDiskVolumeSource{`, + `VolumePath:` + fmt.Sprintf("%v", this.VolumePath) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `StoragePolicyName:` + fmt.Sprintf("%v", this.StoragePolicyName) + `,`, + `StoragePolicyID:` + fmt.Sprintf("%v", this.StoragePolicyID) + `,`, + `}`, + }, "") + return s +} +func (this *WeightedPodAffinityTerm) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WeightedPodAffinityTerm{`, + `Weight:` + fmt.Sprintf("%v", this.Weight) + `,`, + `PodAffinityTerm:` + strings.Replace(strings.Replace(this.PodAffinityTerm.String(), "PodAffinityTerm", "PodAffinityTerm", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *WindowsSecurityContextOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WindowsSecurityContextOptions{`, + `GMSACredentialSpecName:` + valueToStringGenerated(this.GMSACredentialSpecName) + `,`, + `GMSACredentialSpec:` + valueToStringGenerated(this.GMSACredentialSpec) + `,`, + `RunAsUserName:` + valueToStringGenerated(this.RunAsUserName) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AWSElasticBlockStoreVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AWSElasticBlockStoreVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) + } + m.Partition = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Partition |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Affinity) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Affinity: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Affinity: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeAffinity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeAffinity == nil { + m.NodeAffinity = &NodeAffinity{} + } + if err := m.NodeAffinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodAffinity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodAffinity == nil { + m.PodAffinity = &PodAffinity{} + } + if err := m.PodAffinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodAntiAffinity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodAntiAffinity == nil { + m.PodAntiAffinity = &PodAntiAffinity{} + } + if err := m.PodAntiAffinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AttachedVolume) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AttachedVolume: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AttachedVolume: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = UniqueVolumeName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DevicePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DevicePath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AvoidPods) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AvoidPods: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AvoidPods: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreferAvoidPods", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PreferAvoidPods = append(m.PreferAvoidPods, PreferAvoidPodsEntry{}) + if err := m.PreferAvoidPods[len(m.PreferAvoidPods)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AzureDiskVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AzureDiskVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DiskName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DiskName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DataDiskURI", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DataDiskURI = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CachingMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := AzureDataDiskCachingMode(dAtA[iNdEx:postIndex]) + m.CachingMode = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.FSType = &s + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.ReadOnly = &b + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := AzureDataDiskKind(dAtA[iNdEx:postIndex]) + m.Kind = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AzureFilePersistentVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AzureFilePersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShareName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ShareName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretNamespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.SecretNamespace = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AzureFileVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AzureFileVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AzureFileVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShareName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ShareName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Binding) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Binding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Binding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil } -func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(dAtA []byte) error { +func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -17669,7 +28017,7 @@ func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -17677,15 +28025,15 @@ func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AWSElasticBlockStoreVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: CSIPersistentVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AWSElasticBlockStoreVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CSIPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -17697,7 +28045,7 @@ func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -17707,14 +28055,17 @@ func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeID = string(dAtA[iNdEx:postIndex]) + m.Driver = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VolumeHandle", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -17726,7 +28077,7 @@ func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -17736,16 +28087,19 @@ func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.FSType = string(dAtA[iNdEx:postIndex]) + m.VolumeHandle = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) } - m.Partition = 0 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -17755,16 +28109,17 @@ func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Partition |= (int32(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } + m.ReadOnly = bool(v != 0) case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -17774,65 +28129,154 @@ func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.ReadOnly = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - if skippy < 0 { + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { + if postIndex > l { return io.ErrUnexpectedEOF } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Affinity) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeAttributes", wireType) } - if iNdEx >= l { + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + if m.VolumeAttributes == nil { + m.VolumeAttributes = make(map[string]string) } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Affinity: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Affinity: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.VolumeAttributes[mapkey] = mapvalue + iNdEx = postIndex + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeAffinity", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ControllerPublishSecretRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -17844,7 +28288,7 @@ func (m *Affinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -17853,19 +28297,22 @@ func (m *Affinity) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.NodeAffinity == nil { - m.NodeAffinity = &NodeAffinity{} + if m.ControllerPublishSecretRef == nil { + m.ControllerPublishSecretRef = &SecretReference{} } - if err := m.NodeAffinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ControllerPublishSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodAffinity", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NodeStageSecretRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -17877,7 +28324,7 @@ func (m *Affinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -17886,19 +28333,22 @@ func (m *Affinity) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.PodAffinity == nil { - m.PodAffinity = &PodAffinity{} + if m.NodeStageSecretRef == nil { + m.NodeStageSecretRef = &SecretReference{} } - if err := m.PodAffinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.NodeStageSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodAntiAffinity", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NodePublishSecretRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -17910,7 +28360,7 @@ func (m *Affinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -17919,13 +28369,52 @@ func (m *Affinity) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.PodAntiAffinity == nil { - m.PodAntiAffinity = &PodAntiAffinity{} + if m.NodePublishSecretRef == nil { + m.NodePublishSecretRef = &SecretReference{} } - if err := m.PodAntiAffinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.NodePublishSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ControllerExpandSecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ControllerExpandSecretRef == nil { + m.ControllerExpandSecretRef = &SecretReference{} + } + if err := m.ControllerExpandSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -17938,6 +28427,9 @@ func (m *Affinity) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -17950,7 +28442,7 @@ func (m *Affinity) Unmarshal(dAtA []byte) error { } return nil } -func (m *AttachedVolume) Unmarshal(dAtA []byte) error { +func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -17965,7 +28457,7 @@ func (m *AttachedVolume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -17973,15 +28465,15 @@ func (m *AttachedVolume) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AttachedVolume: wiretype end group for non-group") + return fmt.Errorf("proto: CSIVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AttachedVolume: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CSIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -17993,7 +28485,7 @@ func (m *AttachedVolume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -18003,14 +28495,38 @@ func (m *AttachedVolume) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = UniqueVolumeName(dAtA[iNdEx:postIndex]) + m.Driver = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.ReadOnly = &b + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DevicePath", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -18022,7 +28538,7 @@ func (m *AttachedVolume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -18032,10 +28548,177 @@ func (m *AttachedVolume) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.DevicePath = string(dAtA[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) + m.FSType = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeAttributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.VolumeAttributes == nil { + m.VolumeAttributes = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.VolumeAttributes[mapkey] = mapvalue + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodePublishSecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodePublishSecretRef == nil { + m.NodePublishSecretRef = &LocalObjectReference{} + } + if err := m.NodePublishSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -18046,6 +28729,9 @@ func (m *AttachedVolume) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -18058,7 +28744,7 @@ func (m *AttachedVolume) Unmarshal(dAtA []byte) error { } return nil } -func (m *AvoidPods) Unmarshal(dAtA []byte) error { +func (m *Capabilities) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -18073,7 +28759,7 @@ func (m *AvoidPods) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -18081,17 +28767,17 @@ func (m *AvoidPods) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AvoidPods: wiretype end group for non-group") + return fmt.Errorf("proto: Capabilities: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AvoidPods: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Capabilities: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PreferAvoidPods", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Add", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18101,22 +28787,55 @@ func (m *AvoidPods) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.PreferAvoidPods = append(m.PreferAvoidPods, PreferAvoidPodsEntry{}) - if err := m.PreferAvoidPods[len(m.PreferAvoidPods)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.Add = append(m.Add, Capability(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Drop", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF } + m.Drop = append(m.Drop, Capability(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -18127,6 +28846,9 @@ func (m *AvoidPods) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -18139,7 +28861,7 @@ func (m *AvoidPods) Unmarshal(dAtA []byte) error { } return nil } -func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { +func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -18154,7 +28876,7 @@ func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -18162,15 +28884,15 @@ func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AzureDiskVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: CephFSPersistentVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AzureDiskVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CephFSPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DiskName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Monitors", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -18182,7 +28904,7 @@ func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -18192,14 +28914,17 @@ func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.DiskName = string(dAtA[iNdEx:postIndex]) + m.Monitors = append(m.Monitors, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DataDiskURI", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -18211,7 +28936,7 @@ func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -18221,14 +28946,17 @@ func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.DataDiskURI = string(dAtA[iNdEx:postIndex]) + m.Path = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CachingMode", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -18240,7 +28968,7 @@ func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -18250,15 +28978,17 @@ func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - s := AzureDataDiskCachingMode(dAtA[iNdEx:postIndex]) - m.CachingMode = &s + m.User = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretFile", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -18270,7 +29000,7 @@ func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -18280,17 +29010,19 @@ func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.FSType = &s + m.SecretFile = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18300,18 +29032,33 @@ func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - b := bool(v != 0) - m.ReadOnly = &b + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &SecretReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18321,22 +29068,12 @@ func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := AzureDataDiskKind(dAtA[iNdEx:postIndex]) - m.Kind = &s - iNdEx = postIndex + m.ReadOnly = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -18346,6 +29083,9 @@ func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -18358,7 +29098,7 @@ func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { +func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -18373,7 +29113,7 @@ func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -18381,15 +29121,15 @@ func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AzureFilePersistentVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: CephFSVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AzureFilePersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CephFSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Monitors", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -18401,7 +29141,7 @@ func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -18411,14 +29151,17 @@ func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.SecretName = string(dAtA[iNdEx:postIndex]) + m.Monitors = append(m.Monitors, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ShareName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -18430,7 +29173,7 @@ func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -18440,16 +29183,19 @@ func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.ShareName = string(dAtA[iNdEx:postIndex]) + m.Path = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18459,15 +29205,27 @@ func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.ReadOnly = bool(v != 0) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretNamespace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretFile", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -18479,7 +29237,7 @@ func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -18489,12 +29247,70 @@ func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.SecretNamespace = &s + m.SecretFile = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &LocalObjectReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -18504,6 +29320,9 @@ func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -18516,7 +29335,7 @@ func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *AzureFileVolumeSource) Unmarshal(dAtA []byte) error { +func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -18531,7 +29350,7 @@ func (m *AzureFileVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -18539,15 +29358,15 @@ func (m *AzureFileVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AzureFileVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: CinderPersistentVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AzureFileVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CinderPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -18559,7 +29378,7 @@ func (m *AzureFileVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -18569,14 +29388,17 @@ func (m *AzureFileVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.SecretName = string(dAtA[iNdEx:postIndex]) + m.VolumeID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ShareName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -18588,7 +29410,7 @@ func (m *AzureFileVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -18598,10 +29420,13 @@ func (m *AzureFileVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.ShareName = string(dAtA[iNdEx:postIndex]) + m.FSType = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 0 { @@ -18617,65 +29442,15 @@ func (m *AzureFileVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } m.ReadOnly = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Binding) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Binding: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Binding: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -18687,7 +29462,7 @@ func (m *Binding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -18696,40 +29471,16 @@ func (m *Binding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.SecretRef == nil { + m.SecretRef = &SecretReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -18742,6 +29493,9 @@ func (m *Binding) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -18754,7 +29508,7 @@ func (m *Binding) Unmarshal(dAtA []byte) error { } return nil } -func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { +func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -18769,7 +29523,7 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -18777,15 +29531,15 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CSIPersistentVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: CinderVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CSIPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CinderVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -18797,7 +29551,7 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -18807,14 +29561,17 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Driver = string(dAtA[iNdEx:postIndex]) + m.VolumeID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeHandle", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -18826,7 +29583,7 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -18836,10 +29593,13 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeHandle = string(dAtA[iNdEx:postIndex]) + m.FSType = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 0 { @@ -18855,7 +29615,7 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -18863,9 +29623,9 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { m.ReadOnly = bool(v != 0) case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18875,26 +29635,86 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.FSType = string(dAtA[iNdEx:postIndex]) + if m.SecretRef == nil { + m.SecretRef = &LocalObjectReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeAttributes", wireType) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - var msglen int + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClientIPConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClientIPConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClientIPConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18904,115 +29724,102 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + m.TimeoutSeconds = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen - if postIndex > l { + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - if m.VolumeAttributes == nil { - m.VolumeAttributes = make(map[string]string) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ComponentCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ComponentCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ComponentCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break } } - m.VolumeAttributes[mapkey] = mapvalue + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = ComponentConditionType(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 6: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ControllerPublishSecretRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19022,30 +29829,61 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if m.ControllerPublishSecretRef == nil { - m.ControllerPublishSecretRef = &SecretReference{} + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } - if err := m.ControllerPublishSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF } + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 7: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeStageSecretRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19055,28 +29893,80 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if m.NodeStageSecretRef == nil { - m.NodeStageSecretRef = &SecretReference{} - } - if err := m.NodeStageSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { return err } - iNdEx = postIndex - case 8: + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ComponentStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ComponentStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ComponentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodePublishSecretRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -19088,7 +29978,7 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -19097,19 +29987,19 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.NodePublishSecretRef == nil { - m.NodePublishSecretRef = &SecretReference{} - } - if err := m.NodePublishSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 9: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ControllerExpandSecretRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -19121,7 +30011,7 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -19130,13 +30020,14 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.ControllerExpandSecretRef == nil { - m.ControllerExpandSecretRef = &SecretReference{} - } - if err := m.ControllerExpandSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Conditions = append(m.Conditions, ComponentCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -19149,6 +30040,9 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -19161,7 +30055,7 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { +func (m *ComponentStatusList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19176,7 +30070,7 @@ func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -19184,17 +30078,17 @@ func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CSIVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: ComponentStatusList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CSIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ComponentStatusList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19204,26 +30098,30 @@ func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Driver = string(dAtA[iNdEx:postIndex]) + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19233,18 +30131,84 @@ func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - b := bool(v != 0) - m.ReadOnly = &b - case 3: + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ComponentStatus{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigMap) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigMap: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigMap: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19254,25 +30218,28 @@ func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.FSType = &s + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 4: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeAttributes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -19284,7 +30251,7 @@ func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -19293,11 +30260,14 @@ func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.VolumeAttributes == nil { - m.VolumeAttributes = make(map[string]string) + if m.Data == nil { + m.Data = make(map[string]string) } var mapkey string var mapvalue string @@ -19313,7 +30283,7 @@ func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -19330,7 +30300,7 @@ func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -19340,6 +30310,9 @@ func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -19356,7 +30329,7 @@ func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -19366,6 +30339,9 @@ func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -19386,11 +30362,11 @@ func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { iNdEx += skippy } } - m.VolumeAttributes[mapkey] = mapvalue + m.Data[mapkey] = mapvalue iNdEx = postIndex - case 5: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodePublishSecretRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field BinaryData", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -19402,7 +30378,7 @@ func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -19411,15 +30387,110 @@ func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.NodePublishSecretRef == nil { - m.NodePublishSecretRef = &LocalObjectReference{} + if m.BinaryData == nil { + m.BinaryData = make(map[string][]byte) } - if err := m.NodePublishSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + var mapkey string + mapvalue := []byte{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapbyteLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapbyteLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intMapbyteLen := int(mapbyteLen) + if intMapbyteLen < 0 { + return ErrInvalidLengthGenerated + } + postbytesIndex := iNdEx + intMapbyteLen + if postbytesIndex < 0 { + return ErrInvalidLengthGenerated + } + if postbytesIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = make([]byte, mapbyteLen) + copy(mapvalue, dAtA[iNdEx:postbytesIndex]) + iNdEx = postbytesIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } + m.BinaryData[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -19430,6 +30501,9 @@ func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -19442,7 +30516,7 @@ func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *Capabilities) Unmarshal(dAtA []byte) error { +func (m *ConfigMapEnvSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19457,7 +30531,7 @@ func (m *Capabilities) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -19465,17 +30539,17 @@ func (m *Capabilities) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Capabilities: wiretype end group for non-group") + return fmt.Errorf("proto: ConfigMapEnvSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Capabilities: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ConfigMapEnvSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Add", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19485,26 +30559,30 @@ func (m *Capabilities) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Add = append(m.Add, Capability(dAtA[iNdEx:postIndex])) + if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Drop", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19514,21 +30592,13 @@ func (m *Capabilities) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Drop = append(m.Drop, Capability(dAtA[iNdEx:postIndex])) - iNdEx = postIndex + b := bool(v != 0) + m.Optional = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -19538,6 +30608,9 @@ func (m *Capabilities) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -19550,7 +30623,7 @@ func (m *Capabilities) Unmarshal(dAtA []byte) error { } return nil } -func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { +func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19565,7 +30638,7 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -19573,17 +30646,17 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CephFSPersistentVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: ConfigMapKeySelector: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CephFSPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ConfigMapKeySelector: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Monitors", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19593,24 +30666,28 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Monitors = append(m.Monitors, string(dAtA[iNdEx:postIndex])) + if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19622,7 +30699,7 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -19632,16 +30709,19 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(dAtA[iNdEx:postIndex]) + m.Key = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19651,26 +30731,71 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + b := bool(v != 0) + m.Optional = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen - if postIndex > l { + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigMapList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.User = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigMapList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigMapList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretFile", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19680,24 +30805,28 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.SecretFile = string(dAtA[iNdEx:postIndex]) + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 5: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -19709,7 +30838,7 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -19718,36 +30847,17 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.SecretRef == nil { - m.SecretRef = &SecretReference{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, ConfigMap{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -19757,6 +30867,9 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -19769,7 +30882,7 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { +func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19784,7 +30897,7 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -19792,15 +30905,15 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CephFSVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: ConfigMapNodeConfigSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CephFSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ConfigMapNodeConfigSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Monitors", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19812,7 +30925,7 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -19822,14 +30935,17 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Monitors = append(m.Monitors, string(dAtA[iNdEx:postIndex])) + m.Namespace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19841,7 +30957,7 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -19851,14 +30967,17 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19870,7 +30989,7 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -19880,14 +30999,17 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.User = string(dAtA[iNdEx:postIndex]) + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretFile", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19899,7 +31021,7 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -19909,16 +31031,19 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.SecretFile = string(dAtA[iNdEx:postIndex]) + m.ResourceVersion = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field KubeletConfigKey", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19928,45 +31053,24 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if m.SecretRef == nil { - m.SecretRef = &LocalObjectReference{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.KubeletConfigKey = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -19976,6 +31080,9 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -19988,7 +31095,7 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { +func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -20003,7 +31110,7 @@ func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -20011,17 +31118,17 @@ func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CinderPersistentVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: ConfigMapProjection: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CinderPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ConfigMapProjection: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20031,26 +31138,30 @@ func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeID = string(dAtA[iNdEx:postIndex]) + if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20060,24 +31171,29 @@ func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.FSType = string(dAtA[iNdEx:postIndex]) + m.Items = append(m.Items, KeyToPath{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 3: + case 4: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -20089,45 +31205,13 @@ func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecretRef == nil { - m.SecretRef = &SecretReference{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex + b := bool(v != 0) + m.Optional = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -20137,6 +31221,9 @@ func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -20149,7 +31236,7 @@ func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { +func (m *ConfigMapVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -20164,7 +31251,7 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -20172,17 +31259,17 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CinderVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: ConfigMapVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CinderVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ConfigMapVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20192,26 +31279,30 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeID = string(dAtA[iNdEx:postIndex]) + if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20221,26 +31312,31 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.FSType = string(dAtA[iNdEx:postIndex]) + m.Items = append(m.Items, KeyToPath{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DefaultMode", wireType) } - var v int + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20250,17 +31346,17 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - m.ReadOnly = bool(v != 0) + m.DefaultMode = &v case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20270,25 +31366,13 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecretRef == nil { - m.SecretRef = &LocalObjectReference{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex + b := bool(v != 0) + m.Optional = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -20298,6 +31382,9 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -20310,7 +31397,7 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *ClientIPConfig) Unmarshal(dAtA []byte) error { +func (m *Container) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -20325,7 +31412,7 @@ func (m *ClientIPConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -20333,17 +31420,17 @@ func (m *ClientIPConfig) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ClientIPConfig: wiretype end group for non-group") + return fmt.Errorf("proto: Container: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ClientIPConfig: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Container: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var v int32 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20353,65 +31440,27 @@ func (m *ClientIPConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.TimeoutSeconds = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ComponentCondition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ComponentCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ComponentCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -20423,7 +31472,7 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -20433,14 +31482,17 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = ComponentConditionType(dAtA[iNdEx:postIndex]) + m.Image = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -20452,7 +31504,7 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -20462,14 +31514,17 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 3: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -20481,7 +31536,7 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -20491,14 +31546,17 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(dAtA[iNdEx:postIndex]) + m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 4: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field WorkingDir", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -20510,7 +31568,7 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -20520,64 +31578,17 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Error = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ComponentStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ComponentStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ComponentStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.WorkingDir = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -20589,7 +31600,7 @@ func (m *ComponentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -20598,16 +31609,20 @@ func (m *ComponentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Ports = append(m.Ports, ContainerPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -20619,7 +31634,7 @@ func (m *ComponentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -20628,67 +31643,20 @@ func (m *ComponentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, ComponentCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ComponentStatusList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ComponentStatusList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ComponentStatusList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -20700,7 +31668,7 @@ func (m *ComponentStatusList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -20709,16 +31677,19 @@ func (m *ComponentStatusList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -20730,7 +31701,7 @@ func (m *ComponentStatusList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -20739,67 +31710,56 @@ func (m *ComponentStatusList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, ComponentStatus{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.VolumeMounts = append(m.VolumeMounts, VolumeMount{}) + if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LivenessProbe", wireType) } - if skippy < 0 { - return ErrInvalidLengthGenerated + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + if msglen < 0 { + return ErrInvalidLengthGenerated } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ConfigMap) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + if m.LivenessProbe == nil { + m.LivenessProbe = &Probe{} } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ConfigMap: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigMap: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + if err := m.LivenessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ReadinessProbe", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -20811,7 +31771,7 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -20820,16 +31780,22 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.ReadinessProbe == nil { + m.ReadinessProbe = &Probe{} + } + if err := m.ReadinessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 12: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Lifecycle", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -20841,7 +31807,7 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -20850,106 +31816,24 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Data == nil { - m.Data = make(map[string]string) + if m.Lifecycle == nil { + m.Lifecycle = &Lifecycle{} } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + if err := m.Lifecycle.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.Data[mapkey] = mapvalue iNdEx = postIndex - case 3: + case 13: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BinaryData", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePath", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20959,164 +31843,59 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if m.BinaryData == nil { - m.BinaryData = make(map[string][]byte) + m.TerminationMessagePath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImagePullPolicy", wireType) } - var mapkey string - mapvalue := []byte{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapbyteLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapbyteLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intMapbyteLen := int(mapbyteLen) - if intMapbyteLen < 0 { - return ErrInvalidLengthGenerated - } - postbytesIndex := iNdEx + intMapbyteLen - if postbytesIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = make([]byte, mapbyteLen) - copy(mapvalue, dAtA[iNdEx:postbytesIndex]) - iNdEx = postbytesIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break } } - m.BinaryData[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ConfigMapEnvSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ConfigMapEnvSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigMapEnvSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.ImagePullPolicy = PullPolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 15: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -21128,7 +31907,7 @@ func (m *ConfigMapEnvSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -21137,16 +31916,22 @@ func (m *ConfigMapEnvSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.SecurityContext == nil { + m.SecurityContext = &SecurityContext{} + } + if err := m.SecurityContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 16: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -21158,66 +31943,55 @@ func (m *ConfigMapEnvSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - b := bool(v != 0) - m.Optional = &b - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + m.Stdin = bool(v != 0) + case 17: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StdinOnce", wireType) } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { - return io.ErrUnexpectedEOF + m.StdinOnce = bool(v != 0) + case 18: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType) } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ConfigMapKeySelector: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigMapKeySelector: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.TTY = bool(v != 0) + case 19: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field EnvFrom", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -21229,7 +32003,7 @@ func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -21238,16 +32012,20 @@ func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.EnvFrom = append(m.EnvFrom, EnvFromSource{}) + if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 20: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePolicy", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -21259,7 +32037,7 @@ func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -21269,16 +32047,53 @@ func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TerminationMessagePolicy = TerminationMessagePolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeDevices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Key = string(dAtA[iNdEx:postIndex]) + m.VolumeDevices = append(m.VolumeDevices, VolumeDevice{}) + if err := m.VolumeDevices[len(m.VolumeDevices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + case 22: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartupProbe", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21288,13 +32103,28 @@ func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - b := bool(v != 0) - m.Optional = &b + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StartupProbe == nil { + m.StartupProbe = &Probe{} + } + if err := m.StartupProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -21304,6 +32134,9 @@ func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -21316,7 +32149,7 @@ func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error { } return nil } -func (m *ConfigMapList) Unmarshal(dAtA []byte) error { +func (m *ContainerImage) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -21331,7 +32164,7 @@ func (m *ConfigMapList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -21339,17 +32172,17 @@ func (m *ConfigMapList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ConfigMapList: wiretype end group for non-group") + return fmt.Errorf("proto: ContainerImage: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigMapList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ContainerImage: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21359,27 +32192,29 @@ func (m *ConfigMapList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SizeBytes", wireType) } - var msglen int + m.SizeBytes = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21389,23 +32224,11 @@ func (m *ConfigMapList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + m.SizeBytes |= int64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ConfigMap{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -21415,6 +32238,9 @@ func (m *ConfigMapList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -21427,7 +32253,7 @@ func (m *ConfigMapList) Unmarshal(dAtA []byte) error { } return nil } -func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { +func (m *ContainerPort) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -21442,7 +32268,7 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -21450,15 +32276,15 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ConfigMapNodeConfigSource: wiretype end group for non-group") + return fmt.Errorf("proto: ContainerPort: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigMapNodeConfigSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ContainerPort: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -21470,7 +32296,7 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -21480,16 +32306,19 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Namespace = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostPort", wireType) } - var stringLen uint64 + m.HostPort = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21499,26 +32328,16 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + m.HostPort |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerPort", wireType) } - var stringLen uint64 + m.ContainerPort = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21528,24 +32347,14 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + m.ContainerPort |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) - iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -21557,7 +32366,7 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -21567,14 +32376,17 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.ResourceVersion = string(dAtA[iNdEx:postIndex]) + m.Protocol = Protocol(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KubeletConfigKey", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field HostIP", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -21586,7 +32398,7 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -21596,10 +32408,13 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.KubeletConfigKey = string(dAtA[iNdEx:postIndex]) + m.HostIP = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -21610,6 +32425,9 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -21622,7 +32440,7 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { +func (m *ContainerState) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -21637,7 +32455,7 @@ func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -21645,15 +32463,15 @@ func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ConfigMapProjection: wiretype end group for non-group") + return fmt.Errorf("proto: ContainerState: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigMapProjection: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ContainerState: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Waiting", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -21665,7 +32483,7 @@ func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -21674,16 +32492,22 @@ func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Waiting == nil { + m.Waiting = &ContainerStateWaiting{} + } + if err := m.Waiting.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Running", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -21695,7 +32519,7 @@ func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -21704,19 +32528,24 @@ func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, KeyToPath{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Running == nil { + m.Running = &ContainerStateRunning{} + } + if err := m.Running.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Terminated", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21726,13 +32555,28 @@ func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - b := bool(v != 0) - m.Optional = &b + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Terminated == nil { + m.Terminated = &ContainerStateTerminated{} + } + if err := m.Terminated.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -21742,6 +32586,9 @@ func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -21754,7 +32601,7 @@ func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { } return nil } -func (m *ConfigMapVolumeSource) Unmarshal(dAtA []byte) error { +func (m *ContainerStateRunning) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -21769,7 +32616,7 @@ func (m *ConfigMapVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -21777,15 +32624,15 @@ func (m *ConfigMapVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ConfigMapVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: ContainerStateRunning: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigMapVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ContainerStateRunning: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -21797,7 +32644,7 @@ func (m *ConfigMapVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -21806,85 +32653,16 @@ func (m *ConfigMapVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, KeyToPath{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.StartedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultMode", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.DefaultMode = &v - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.Optional = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -21894,6 +32672,9 @@ func (m *ConfigMapVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -21906,7 +32687,7 @@ func (m *ConfigMapVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *Container) Unmarshal(dAtA []byte) error { +func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -21921,7 +32702,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -21929,17 +32710,17 @@ func (m *Container) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Container: wiretype end group for non-group") + return fmt.Errorf("proto: ContainerStateTerminated: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Container: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ContainerStateTerminated: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExitCode", wireType) } - var stringLen uint64 + m.ExitCode = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21949,26 +32730,16 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + m.ExitCode |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Signal", wireType) } - var stringLen uint64 + m.Signal = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21978,24 +32749,14 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + m.Signal |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Image = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -22007,7 +32768,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -22017,14 +32778,17 @@ func (m *Container) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) + m.Reason = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -22036,7 +32800,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -22046,43 +32810,17 @@ func (m *Container) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WorkingDir", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.WorkingDir = string(dAtA[iNdEx:postIndex]) + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 6: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -22094,7 +32832,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -22103,48 +32841,19 @@ func (m *Container) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ports = append(m.Ports, ContainerPort{}) - if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Env = append(m.Env, EnvVar{}) - if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.StartedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 8: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FinishedAt", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -22156,7 +32865,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -22165,49 +32874,21 @@ func (m *Container) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeMounts = append(m.VolumeMounts, VolumeMount{}) - if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.FinishedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 10: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LivenessProbe", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22217,63 +32898,82 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if m.LivenessProbe == nil { - m.LivenessProbe = &Probe{} - } - if err := m.LivenessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.ContainerID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadinessProbe", wireType) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } + if skippy < 0 { + return ErrInvalidLengthGenerated } - if msglen < 0 { + if (iNdEx + skippy) < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - if m.ReadinessProbe == nil { - m.ReadinessProbe = &Probe{} + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerStateWaiting) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if err := m.ReadinessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + if iNdEx >= l { + return io.ErrUnexpectedEOF } - iNdEx = postIndex - case 12: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerStateWaiting: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerStateWaiting: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Lifecycle", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22283,28 +32983,27 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if m.Lifecycle == nil { - m.Lifecycle = &Lifecycle{} - } - if err := m.Lifecycle.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Reason = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 13: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePath", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -22316,7 +33015,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -22326,14 +33025,70 @@ func (m *Container) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.TerminationMessagePath = string(dAtA[iNdEx:postIndex]) + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 14: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImagePullPolicy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -22345,7 +33100,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -22355,14 +33110,17 @@ func (m *Container) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.ImagePullPolicy = PullPolicy(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 15: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -22374,7 +33132,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -22383,21 +33141,21 @@ func (m *Container) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.SecurityContext == nil { - m.SecurityContext = &SecurityContext{} - } - if err := m.SecurityContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.State.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 16: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTerminationState", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22407,15 +33165,28 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.Stdin = bool(v != 0) - case 17: + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTerminationState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StdinOnce", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ready", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -22427,17 +33198,17 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - m.StdinOnce = bool(v != 0) - case 18: + m.Ready = bool(v != 0) + case 5: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RestartCount", wireType) } - var v int + m.RestartCount = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22447,17 +33218,16 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + m.RestartCount |= int32(b&0x7F) << shift if b < 0x80 { break } } - m.TTY = bool(v != 0) - case 19: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EnvFrom", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22467,26 +33237,27 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.EnvFrom = append(m.EnvFrom, EnvFromSource{}) - if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Image = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 20: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePolicy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ImageID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -22498,7 +33269,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -22508,16 +33279,19 @@ func (m *Container) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.TerminationMessagePolicy = TerminationMessagePolicy(dAtA[iNdEx:postIndex]) + m.ImageID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 21: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeDevices", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22527,23 +33301,45 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeDevices = append(m.VolumeDevices, VolumeDevice{}) - if err := m.VolumeDevices[len(m.VolumeDevices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.ContainerID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Started", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Started = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -22553,6 +33349,9 @@ func (m *Container) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -22565,7 +33364,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { } return nil } -func (m *ContainerImage) Unmarshal(dAtA []byte) error { +func (m *DaemonEndpoint) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -22580,7 +33379,7 @@ func (m *ContainerImage) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -22588,46 +33387,17 @@ func (m *ContainerImage) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ContainerImage: wiretype end group for non-group") + return fmt.Errorf("proto: DaemonEndpoint: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerImage: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DaemonEndpoint: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SizeBytes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) } - m.SizeBytes = 0 + m.Port = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22637,7 +33407,7 @@ func (m *ContainerImage) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.SizeBytes |= (int64(b) & 0x7F) << shift + m.Port |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -22651,6 +33421,9 @@ func (m *ContainerImage) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -22663,7 +33436,7 @@ func (m *ContainerImage) Unmarshal(dAtA []byte) error { } return nil } -func (m *ContainerPort) Unmarshal(dAtA []byte) error { +func (m *DownwardAPIProjection) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -22678,7 +33451,7 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -22686,17 +33459,17 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ContainerPort: wiretype end group for non-group") + return fmt.Errorf("proto: DownwardAPIProjection: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerPort: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DownwardAPIProjection: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22706,116 +33479,25 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostPort", wireType) - } - m.HostPort = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.HostPort |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerPort", wireType) - } - m.ContainerPort = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ContainerPort |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Protocol = Protocol(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HostIP", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF + m.Items = append(m.Items, DownwardAPIVolumeFile{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.HostIP = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -22826,6 +33508,9 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -22838,7 +33523,7 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error { } return nil } -func (m *ContainerState) Unmarshal(dAtA []byte) error { +func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -22853,7 +33538,7 @@ func (m *ContainerState) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -22861,17 +33546,17 @@ func (m *ContainerState) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ContainerState: wiretype end group for non-group") + return fmt.Errorf("proto: DownwardAPIVolumeFile: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerState: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DownwardAPIVolumeFile: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Waiting", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22881,28 +33566,27 @@ func (m *ContainerState) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if m.Waiting == nil { - m.Waiting = &ContainerStateWaiting{} - } - if err := m.Waiting.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Path = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Running", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -22914,7 +33598,7 @@ func (m *ContainerState) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -22923,19 +33607,22 @@ func (m *ContainerState) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Running == nil { - m.Running = &ContainerStateRunning{} + if m.FieldRef == nil { + m.FieldRef = &ObjectFieldSelector{} } - if err := m.Running.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.FieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Terminated", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResourceFieldRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -22947,7 +33634,7 @@ func (m *ContainerState) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -22956,16 +33643,39 @@ func (m *ContainerState) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Terminated == nil { - m.Terminated = &ContainerStateTerminated{} + if m.ResourceFieldRef == nil { + m.ResourceFieldRef = &ResourceFieldSelector{} } - if err := m.Terminated.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ResourceFieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Mode = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -22975,6 +33685,9 @@ func (m *ContainerState) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -22987,7 +33700,7 @@ func (m *ContainerState) Unmarshal(dAtA []byte) error { } return nil } -func (m *ContainerStateRunning) Unmarshal(dAtA []byte) error { +func (m *DownwardAPIVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -23002,7 +33715,7 @@ func (m *ContainerStateRunning) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -23010,15 +33723,15 @@ func (m *ContainerStateRunning) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ContainerStateRunning: wiretype end group for non-group") + return fmt.Errorf("proto: DownwardAPIVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerStateRunning: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DownwardAPIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -23030,7 +33743,7 @@ func (m *ContainerStateRunning) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -23039,13 +33752,37 @@ func (m *ContainerStateRunning) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.StartedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, DownwardAPIVolumeFile{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultMode", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DefaultMode = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -23055,6 +33792,9 @@ func (m *ContainerStateRunning) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -23067,7 +33807,7 @@ func (m *ContainerStateRunning) Unmarshal(dAtA []byte) error { } return nil } -func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { +func (m *EmptyDirVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -23082,7 +33822,7 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -23090,17 +33830,17 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ContainerStateTerminated: wiretype end group for non-group") + return fmt.Errorf("proto: EmptyDirVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerStateTerminated: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EmptyDirVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExitCode", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Medium", wireType) } - m.ExitCode = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23110,35 +33850,29 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ExitCode |= (int32(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Signal", wireType) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - m.Signal = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Signal |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - case 3: + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Medium = StorageMedium(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SizeLimit", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23148,24 +33882,84 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SizeLimit == nil { + m.SizeLimit = &resource.Quantity{} + } + if err := m.SizeLimit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen - if postIndex > l { + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointAddress) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointAddress: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointAddress: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -23177,7 +33971,7 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -23187,14 +33981,17 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(dAtA[iNdEx:postIndex]) + m.IP = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TargetRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -23206,7 +34003,7 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -23215,18 +34012,24 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.StartedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.TargetRef == nil { + m.TargetRef = &ObjectReference{} + } + if err := m.TargetRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 6: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FinishedAt", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23236,25 +34039,27 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.FinishedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Hostname = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 7: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -23266,7 +34071,7 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -23276,10 +34081,14 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.ContainerID = string(dAtA[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) + m.NodeName = &s iNdEx = postIndex default: iNdEx = preIndex @@ -23290,6 +34099,9 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -23302,7 +34114,7 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { } return nil } -func (m *ContainerStateWaiting) Unmarshal(dAtA []byte) error { +func (m *EndpointPort) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -23317,7 +34129,7 @@ func (m *ContainerStateWaiting) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -23325,15 +34137,15 @@ func (m *ContainerStateWaiting) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ContainerStateWaiting: wiretype end group for non-group") + return fmt.Errorf("proto: EndpointPort: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerStateWaiting: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EndpointPort: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -23345,7 +34157,7 @@ func (m *ContainerStateWaiting) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -23355,14 +34167,36 @@ func (m *ContainerStateWaiting) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Reason = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Port |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -23374,7 +34208,7 @@ func (m *ContainerStateWaiting) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -23384,10 +34218,13 @@ func (m *ContainerStateWaiting) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(dAtA[iNdEx:postIndex]) + m.Protocol = Protocol(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -23398,6 +34235,9 @@ func (m *ContainerStateWaiting) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -23410,7 +34250,7 @@ func (m *ContainerStateWaiting) Unmarshal(dAtA []byte) error { } return nil } -func (m *ContainerStatus) Unmarshal(dAtA []byte) error { +func (m *EndpointSubset) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -23425,7 +34265,7 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -23433,17 +34273,17 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ContainerStatus: wiretype end group for non-group") + return fmt.Errorf("proto: EndpointSubset: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EndpointSubset: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23453,24 +34293,29 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Addresses = append(m.Addresses, EndpointAddress{}) + if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NotReadyAddresses", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -23482,7 +34327,7 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -23491,16 +34336,20 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.State.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.NotReadyAddresses = append(m.NotReadyAddresses, EndpointAddress{}) + if err := m.NotReadyAddresses[len(m.NotReadyAddresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTerminationState", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -23512,7 +34361,7 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -23521,86 +34370,75 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LastTerminationState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Ports = append(m.Ports, EndpointPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Ready", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Ready = bool(v != 0) - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RestartCount", wireType) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - m.RestartCount = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RestartCount |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } + if skippy < 0 { + return ErrInvalidLengthGenerated } - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Endpoints) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - postIndex := iNdEx + intStringLen - if postIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.Image = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Endpoints: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Endpoints: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImageID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23610,26 +34448,30 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.ImageID = string(dAtA[iNdEx:postIndex]) + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 8: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Subsets", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23639,20 +34481,25 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.ContainerID = string(dAtA[iNdEx:postIndex]) + m.Subsets = append(m.Subsets, EndpointSubset{}) + if err := m.Subsets[len(m.Subsets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -23663,6 +34510,9 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -23675,7 +34525,7 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *DaemonEndpoint) Unmarshal(dAtA []byte) error { +func (m *EndpointsList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -23690,7 +34540,7 @@ func (m *DaemonEndpoint) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -23698,17 +34548,17 @@ func (m *DaemonEndpoint) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DaemonEndpoint: wiretype end group for non-group") + return fmt.Errorf("proto: EndpointsList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonEndpoint: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EndpointsList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - m.Port = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23718,62 +34568,26 @@ func (m *DaemonEndpoint) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Port |= (int32(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DownwardAPIProjection) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DownwardAPIProjection: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DownwardAPIProjection: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } @@ -23787,7 +34601,7 @@ func (m *DownwardAPIProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -23796,10 +34610,13 @@ func (m *DownwardAPIProjection) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, DownwardAPIVolumeFile{}) + m.Items = append(m.Items, Endpoints{}) if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -23813,6 +34630,9 @@ func (m *DownwardAPIProjection) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -23825,7 +34645,7 @@ func (m *DownwardAPIProjection) Unmarshal(dAtA []byte) error { } return nil } -func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { +func (m *EnvFromSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -23840,7 +34660,7 @@ func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -23848,15 +34668,15 @@ func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DownwardAPIVolumeFile: wiretype end group for non-group") + return fmt.Errorf("proto: EnvFromSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DownwardAPIVolumeFile: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EnvFromSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Prefix", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -23868,7 +34688,7 @@ func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -23878,14 +34698,17 @@ func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(dAtA[iNdEx:postIndex]) + m.Prefix = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -23897,7 +34720,7 @@ func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -23906,19 +34729,22 @@ func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.FieldRef == nil { - m.FieldRef = &ObjectFieldSelector{} + if m.ConfigMapRef == nil { + m.ConfigMapRef = &ConfigMapEnvSource{} } - if err := m.FieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ConfigMapRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceFieldRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -23930,7 +34756,7 @@ func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -23939,36 +34765,19 @@ func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.ResourceFieldRef == nil { - m.ResourceFieldRef = &ResourceFieldSelector{} + if m.SecretRef == nil { + m.SecretRef = &SecretEnvSource{} } - if err := m.ResourceFieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Mode = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -23978,6 +34787,9 @@ func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -23990,7 +34802,7 @@ func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { } return nil } -func (m *DownwardAPIVolumeSource) Unmarshal(dAtA []byte) error { +func (m *EnvVar) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -24005,7 +34817,7 @@ func (m *DownwardAPIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -24013,17 +34825,17 @@ func (m *DownwardAPIVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DownwardAPIVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: EnvVar: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DownwardAPIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EnvVar: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -24033,96 +34845,27 @@ func (m *DownwardAPIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, DownwardAPIVolumeFile{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultMode", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.DefaultMode = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EmptyDirVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EmptyDirVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EmptyDirVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Medium", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -24134,7 +34877,7 @@ func (m *EmptyDirVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -24144,14 +34887,17 @@ func (m *EmptyDirVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Medium = StorageMedium(dAtA[iNdEx:postIndex]) + m.Value = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SizeLimit", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ValueFrom", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -24163,7 +34909,7 @@ func (m *EmptyDirVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -24172,13 +34918,16 @@ func (m *EmptyDirVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.SizeLimit == nil { - m.SizeLimit = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if m.ValueFrom == nil { + m.ValueFrom = &EnvVarSource{} } - if err := m.SizeLimit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ValueFrom.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -24191,6 +34940,9 @@ func (m *EmptyDirVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -24203,7 +34955,7 @@ func (m *EmptyDirVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *EndpointAddress) Unmarshal(dAtA []byte) error { +func (m *EnvVarSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -24218,7 +34970,7 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -24226,17 +34978,17 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EndpointAddress: wiretype end group for non-group") + return fmt.Errorf("proto: EnvVarSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EndpointAddress: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EnvVarSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -24246,24 +34998,31 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.IP = string(dAtA[iNdEx:postIndex]) + if m.FieldRef == nil { + m.FieldRef = &ObjectFieldSelector{} + } + if err := m.FieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResourceFieldRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -24275,7 +35034,7 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -24284,21 +35043,24 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.TargetRef == nil { - m.TargetRef = &ObjectReference{} + if m.ResourceFieldRef == nil { + m.ResourceFieldRef = &ResourceFieldSelector{} } - if err := m.TargetRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ResourceFieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapKeyRef", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -24308,26 +35070,33 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Hostname = string(dAtA[iNdEx:postIndex]) + if m.ConfigMapKeyRef == nil { + m.ConfigMapKeyRef = &ConfigMapKeySelector{} + } + if err := m.ConfigMapKeyRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretKeyRef", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -24337,21 +35106,27 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.NodeName = &s + if m.SecretKeyRef == nil { + m.SecretKeyRef = &SecretKeySelector{} + } + if err := m.SecretKeyRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -24362,6 +35137,9 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -24374,7 +35152,7 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { } return nil } -func (m *EndpointPort) Unmarshal(dAtA []byte) error { +func (m *EphemeralContainer) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -24389,7 +35167,7 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -24397,17 +35175,17 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EndpointPort: wiretype end group for non-group") + return fmt.Errorf("proto: EphemeralContainer: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EndpointPort: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EphemeralContainer: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field EphemeralContainerCommon", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -24417,43 +35195,28 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + if err := m.EphemeralContainerCommon.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) - } - m.Port = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Port |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TargetContainerName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -24465,7 +35228,7 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -24475,10 +35238,13 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Protocol = Protocol(dAtA[iNdEx:postIndex]) + m.TargetContainerName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -24489,6 +35255,9 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -24501,7 +35270,7 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { } return nil } -func (m *EndpointSubset) Unmarshal(dAtA []byte) error { +func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -24516,7 +35285,7 @@ func (m *EndpointSubset) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -24524,17 +35293,17 @@ func (m *EndpointSubset) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EndpointSubset: wiretype end group for non-group") + return fmt.Errorf("proto: EphemeralContainerCommon: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EndpointSubset: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EphemeralContainerCommon: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -24544,28 +35313,29 @@ func (m *EndpointSubset) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Addresses = append(m.Addresses, EndpointAddress{}) - if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NotReadyAddresses", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -24575,24 +35345,121 @@ func (m *EndpointSubset) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.NotReadyAddresses = append(m.NotReadyAddresses, EndpointAddress{}) - if err := m.NotReadyAddresses[len(m.NotReadyAddresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Image = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WorkingDir", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WorkingDir = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) } @@ -24606,7 +35473,7 @@ func (m *EndpointSubset) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -24615,67 +35482,54 @@ func (m *EndpointSubset) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Ports = append(m.Ports, EndpointPort{}) + m.Ports = append(m.Ports, ContainerPort{}) if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) } - if skippy < 0 { - return ErrInvalidLengthGenerated + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + if msglen < 0 { + return ErrInvalidLengthGenerated } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Endpoints) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + m.Env = append(m.Env, EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Endpoints: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Endpoints: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -24687,7 +35541,7 @@ func (m *Endpoints) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -24696,16 +35550,19 @@ func (m *Endpoints) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Subsets", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -24717,7 +35574,7 @@ func (m *Endpoints) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -24726,67 +35583,56 @@ func (m *Endpoints) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Subsets = append(m.Subsets, EndpointSubset{}) - if err := m.Subsets[len(m.Subsets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.VolumeMounts = append(m.VolumeMounts, VolumeMount{}) + if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LivenessProbe", wireType) } - if skippy < 0 { - return ErrInvalidLengthGenerated + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + if msglen < 0 { + return ErrInvalidLengthGenerated } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EndpointsList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + if m.LivenessProbe == nil { + m.LivenessProbe = &Probe{} } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EndpointsList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EndpointsList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + if err := m.LivenessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ReadinessProbe", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -24798,7 +35644,7 @@ func (m *EndpointsList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -24807,16 +35653,22 @@ func (m *EndpointsList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.ReadinessProbe == nil { + m.ReadinessProbe = &Probe{} + } + if err := m.ReadinessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 12: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Lifecycle", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -24828,7 +35680,7 @@ func (m *EndpointsList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -24837,67 +35689,54 @@ func (m *EndpointsList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, Endpoints{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Lifecycle == nil { + m.Lifecycle = &Lifecycle{} + } + if err := m.Lifecycle.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePath", wireType) } - if skippy < 0 { - return ErrInvalidLengthGenerated + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EnvFromSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EnvFromSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EnvFromSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.TerminationMessagePath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 14: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Prefix", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ImagePullPolicy", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -24909,7 +35748,7 @@ func (m *EnvFromSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -24919,14 +35758,17 @@ func (m *EnvFromSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Prefix = string(dAtA[iNdEx:postIndex]) + m.ImagePullPolicy = PullPolicy(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 15: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -24938,7 +35780,7 @@ func (m *EnvFromSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -24947,19 +35789,82 @@ func (m *EnvFromSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.ConfigMapRef == nil { - m.ConfigMapRef = &ConfigMapEnvSource{} + if m.SecurityContext == nil { + m.SecurityContext = &SecurityContext{} } - if err := m.ConfigMapRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.SecurityContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 16: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Stdin = bool(v != 0) + case 17: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StdinOnce", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.StdinOnce = bool(v != 0) + case 18: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TTY = bool(v != 0) + case 19: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field EnvFrom", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -24971,7 +35876,7 @@ func (m *EnvFromSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -24980,69 +35885,20 @@ func (m *EnvFromSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.SecretRef == nil { - m.SecretRef = &SecretEnvSource{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.EnvFrom = append(m.EnvFrom, EnvFromSource{}) + if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EnvVar) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EnvVar: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EnvVar: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 20: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePolicy", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -25054,7 +35910,7 @@ func (m *EnvVar) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25064,16 +35920,19 @@ func (m *EnvVar) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.TerminationMessagePolicy = TerminationMessagePolicy(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 21: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VolumeDevices", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -25083,24 +35942,29 @@ func (m *EnvVar) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Value = string(dAtA[iNdEx:postIndex]) + m.VolumeDevices = append(m.VolumeDevices, VolumeDevice{}) + if err := m.VolumeDevices[len(m.VolumeDevices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 3: + case 22: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ValueFrom", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StartupProbe", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -25112,7 +35976,7 @@ func (m *EnvVar) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -25121,13 +35985,16 @@ func (m *EnvVar) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.ValueFrom == nil { - m.ValueFrom = &EnvVarSource{} + if m.StartupProbe == nil { + m.StartupProbe = &Probe{} } - if err := m.ValueFrom.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.StartupProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -25140,6 +36007,9 @@ func (m *EnvVar) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -25152,7 +36022,7 @@ func (m *EnvVar) Unmarshal(dAtA []byte) error { } return nil } -func (m *EnvVarSource) Unmarshal(dAtA []byte) error { +func (m *EphemeralContainers) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -25167,7 +36037,7 @@ func (m *EnvVarSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25175,15 +36045,15 @@ func (m *EnvVarSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EnvVarSource: wiretype end group for non-group") + return fmt.Errorf("proto: EphemeralContainers: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EnvVarSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EphemeralContainers: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -25195,7 +36065,7 @@ func (m *EnvVarSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -25204,52 +36074,19 @@ func (m *EnvVarSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.FieldRef == nil { - m.FieldRef = &ObjectFieldSelector{} - } - if err := m.FieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceFieldRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if m.ResourceFieldRef == nil { - m.ResourceFieldRef = &ResourceFieldSelector{} - } - if err := m.ResourceFieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapKeyRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field EphemeralContainers", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -25261,7 +36098,7 @@ func (m *EnvVarSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -25270,46 +36107,14 @@ func (m *EnvVarSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ConfigMapKeyRef == nil { - m.ConfigMapKeyRef = &ConfigMapKeySelector{} - } - if err := m.ConfigMapKeyRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretKeyRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if m.SecretKeyRef == nil { - m.SecretKeyRef = &SecretKeySelector{} - } - if err := m.SecretKeyRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.EphemeralContainers = append(m.EphemeralContainers, EphemeralContainer{}) + if err := m.EphemeralContainers[len(m.EphemeralContainers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -25322,6 +36127,9 @@ func (m *EnvVarSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -25349,7 +36157,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25377,7 +36185,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -25386,6 +36194,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25407,7 +36218,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -25416,6 +36227,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25437,7 +36251,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25447,6 +36261,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25466,7 +36283,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25476,6 +36293,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25495,7 +36315,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -25504,6 +36324,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25525,7 +36348,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -25534,6 +36357,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25555,7 +36381,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -25564,6 +36390,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25585,7 +36414,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Count |= (int32(b) & 0x7F) << shift + m.Count |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -25604,7 +36433,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25614,6 +36443,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25633,7 +36465,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -25642,6 +36474,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25663,7 +36498,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -25672,6 +36507,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25696,7 +36534,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25706,6 +36544,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25725,7 +36566,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -25734,6 +36575,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25758,7 +36602,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25768,6 +36612,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25787,7 +36634,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25797,6 +36644,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25811,6 +36661,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -25838,7 +36691,7 @@ func (m *EventList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25866,7 +36719,7 @@ func (m *EventList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -25875,6 +36728,9 @@ func (m *EventList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25896,7 +36752,7 @@ func (m *EventList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -25905,6 +36761,9 @@ func (m *EventList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -25922,6 +36781,9 @@ func (m *EventList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -25949,7 +36811,7 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -25977,7 +36839,7 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Count |= (int32(b) & 0x7F) << shift + m.Count |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -25996,7 +36858,7 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -26005,6 +36867,9 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -26026,7 +36891,7 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26036,6 +36901,9 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -26050,6 +36918,9 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -26077,7 +36948,7 @@ func (m *EventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26105,7 +36976,7 @@ func (m *EventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26115,6 +36986,9 @@ func (m *EventSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -26134,7 +37008,7 @@ func (m *EventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26144,6 +37018,9 @@ func (m *EventSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -26158,6 +37035,9 @@ func (m *EventSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -26185,7 +37065,7 @@ func (m *ExecAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26213,7 +37093,7 @@ func (m *ExecAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26223,6 +37103,9 @@ func (m *ExecAction) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -26237,6 +37120,9 @@ func (m *ExecAction) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -26264,7 +37150,7 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26292,7 +37178,7 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26302,6 +37188,9 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -26321,7 +37210,7 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -26341,7 +37230,176 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WWIDs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WWIDs = append(m.WWIDs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlexPersistentVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlexPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26351,11 +37409,50 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } m.FSType = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &SecretReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) @@ -26370,7 +37467,7 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -26378,144 +37475,7 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { m.ReadOnly = bool(v != 0) case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WWIDs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.WWIDs = append(m.WWIDs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: FlexPersistentVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: FlexPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Driver = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FSType = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -26527,7 +37487,7 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -26536,59 +37496,9 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecretRef == nil { - m.SecretRef = &SecretReference{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } @@ -26609,7 +37519,7 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26626,7 +37536,7 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26636,6 +37546,9 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -26652,7 +37565,7 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26662,6 +37575,9 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -26693,6 +37609,9 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -26720,7 +37639,7 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26748,7 +37667,7 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26758,6 +37677,9 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -26777,7 +37699,7 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26787,6 +37709,9 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -26806,7 +37731,7 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -26815,6 +37740,9 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -26839,7 +37767,7 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -26859,7 +37787,7 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -26868,6 +37796,9 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -26888,7 +37819,7 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26905,7 +37836,7 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26915,6 +37846,9 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -26931,7 +37865,7 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -26941,6 +37875,9 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -26972,6 +37909,9 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -26999,7 +37939,7 @@ func (m *FlockerVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27027,7 +37967,7 @@ func (m *FlockerVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27037,6 +37977,9 @@ func (m *FlockerVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27056,7 +37999,7 @@ func (m *FlockerVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27066,6 +38009,9 @@ func (m *FlockerVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27080,6 +38026,9 @@ func (m *FlockerVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -27107,7 +38056,7 @@ func (m *GCEPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27135,7 +38084,7 @@ func (m *GCEPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27145,6 +38094,9 @@ func (m *GCEPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27164,7 +38116,7 @@ func (m *GCEPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27174,6 +38126,9 @@ func (m *GCEPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27193,7 +38148,7 @@ func (m *GCEPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Partition |= (int32(b) & 0x7F) << shift + m.Partition |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -27212,7 +38167,7 @@ func (m *GCEPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -27227,6 +38182,9 @@ func (m *GCEPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -27254,7 +38212,7 @@ func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27282,7 +38240,7 @@ func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27292,6 +38250,9 @@ func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27311,7 +38272,7 @@ func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27321,6 +38282,9 @@ func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27340,7 +38304,7 @@ func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27350,6 +38314,9 @@ func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27364,6 +38331,9 @@ func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -27391,7 +38361,7 @@ func (m *GlusterfsPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27419,7 +38389,7 @@ func (m *GlusterfsPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27429,6 +38399,9 @@ func (m *GlusterfsPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27448,7 +38421,7 @@ func (m *GlusterfsPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27458,6 +38431,9 @@ func (m *GlusterfsPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27477,7 +38453,7 @@ func (m *GlusterfsPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -27497,7 +38473,7 @@ func (m *GlusterfsPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27507,6 +38483,9 @@ func (m *GlusterfsPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27522,6 +38501,9 @@ func (m *GlusterfsPersistentVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -27549,7 +38531,7 @@ func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27577,7 +38559,7 @@ func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27587,6 +38569,9 @@ func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27606,7 +38591,7 @@ func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27616,6 +38601,9 @@ func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27635,7 +38623,7 @@ func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -27650,6 +38638,9 @@ func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -27677,7 +38668,7 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27705,7 +38696,7 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27715,6 +38706,9 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27734,7 +38728,7 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -27743,6 +38737,9 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27764,7 +38761,7 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27774,6 +38771,9 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27793,7 +38793,7 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27803,6 +38803,9 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27822,7 +38825,7 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -27831,6 +38834,9 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27848,6 +38854,9 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -27875,7 +38884,7 @@ func (m *HTTPHeader) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27903,7 +38912,7 @@ func (m *HTTPHeader) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27913,6 +38922,9 @@ func (m *HTTPHeader) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27932,7 +38944,7 @@ func (m *HTTPHeader) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -27942,6 +38954,9 @@ func (m *HTTPHeader) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -27956,6 +38971,9 @@ func (m *HTTPHeader) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -27983,7 +39001,7 @@ func (m *Handler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28011,7 +39029,7 @@ func (m *Handler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -28020,6 +39038,9 @@ func (m *Handler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28044,7 +39065,7 @@ func (m *Handler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -28053,6 +39074,9 @@ func (m *Handler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28077,7 +39101,7 @@ func (m *Handler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -28086,6 +39110,9 @@ func (m *Handler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28105,6 +39132,9 @@ func (m *Handler) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -28132,7 +39162,7 @@ func (m *HostAlias) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28160,7 +39190,7 @@ func (m *HostAlias) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28170,6 +39200,9 @@ func (m *HostAlias) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28189,7 +39222,7 @@ func (m *HostAlias) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28199,6 +39232,9 @@ func (m *HostAlias) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28213,6 +39249,9 @@ func (m *HostAlias) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -28240,7 +39279,7 @@ func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28268,7 +39307,7 @@ func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28278,6 +39317,9 @@ func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28297,7 +39339,7 @@ func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28307,6 +39349,9 @@ func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28322,6 +39367,9 @@ func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -28349,7 +39397,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28377,7 +39425,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28387,6 +39435,9 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28406,7 +39457,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28416,6 +39467,9 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28435,7 +39489,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Lun |= (int32(b) & 0x7F) << shift + m.Lun |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -28454,7 +39508,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28464,6 +39518,9 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28483,7 +39540,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28493,6 +39550,9 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28512,7 +39572,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -28532,7 +39592,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28542,6 +39602,9 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28561,7 +39624,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -28581,7 +39644,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -28590,6 +39653,9 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28614,7 +39680,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -28634,7 +39700,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28644,6 +39710,9 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28659,6 +39728,9 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -28686,7 +39758,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28714,7 +39786,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28724,6 +39796,9 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28743,7 +39818,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28753,6 +39828,9 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28772,7 +39850,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Lun |= (int32(b) & 0x7F) << shift + m.Lun |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -28791,7 +39869,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28801,6 +39879,9 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28820,7 +39901,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28830,6 +39911,9 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28849,7 +39933,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -28869,7 +39953,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28879,6 +39963,9 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28898,7 +39985,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -28918,7 +40005,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -28927,6 +40014,9 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28951,7 +40041,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -28971,7 +40061,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -28981,6 +40071,9 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -28996,6 +40089,9 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -29023,7 +40119,7 @@ func (m *KeyToPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29051,7 +40147,7 @@ func (m *KeyToPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29061,6 +40157,9 @@ func (m *KeyToPath) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -29080,7 +40179,7 @@ func (m *KeyToPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29090,6 +40189,9 @@ func (m *KeyToPath) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -29109,7 +40211,7 @@ func (m *KeyToPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -29124,6 +40226,9 @@ func (m *KeyToPath) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -29151,7 +40256,7 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29179,7 +40284,7 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -29188,6 +40293,9 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -29212,7 +40320,7 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -29221,6 +40329,9 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -29240,6 +40351,9 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -29267,7 +40381,7 @@ func (m *LimitRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29295,7 +40409,7 @@ func (m *LimitRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -29304,6 +40418,9 @@ func (m *LimitRange) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -29325,7 +40442,7 @@ func (m *LimitRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -29334,6 +40451,9 @@ func (m *LimitRange) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -29350,6 +40470,9 @@ func (m *LimitRange) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -29377,7 +40500,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29405,7 +40528,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29415,6 +40538,9 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -29434,7 +40560,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -29443,6 +40569,9 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -29450,7 +40579,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { m.Max = make(ResourceList) } var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue := &resource.Quantity{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -29463,7 +40592,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29480,7 +40609,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29490,6 +40619,9 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -29506,7 +40638,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -29515,13 +40647,13 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue = &resource.Quantity{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -29557,7 +40689,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -29566,6 +40698,9 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -29573,7 +40708,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { m.Min = make(ResourceList) } var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue := &resource.Quantity{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -29586,7 +40721,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29603,7 +40738,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29613,6 +40748,9 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -29629,7 +40767,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -29638,13 +40776,13 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue = &resource.Quantity{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -29680,7 +40818,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -29689,6 +40827,9 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -29696,7 +40837,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { m.Default = make(ResourceList) } var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue := &resource.Quantity{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -29709,7 +40850,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29726,7 +40867,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29736,6 +40877,9 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -29752,7 +40896,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -29761,13 +40905,13 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue = &resource.Quantity{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -29803,7 +40947,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -29812,6 +40956,9 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -29819,7 +40966,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { m.DefaultRequest = make(ResourceList) } var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue := &resource.Quantity{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -29832,7 +40979,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29849,7 +40996,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29859,6 +41006,9 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -29875,7 +41025,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -29884,13 +41034,13 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue = &resource.Quantity{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -29926,7 +41076,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -29935,6 +41085,9 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -29942,7 +41095,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { m.MaxLimitRequestRatio = make(ResourceList) } var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue := &resource.Quantity{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -29955,7 +41108,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29972,7 +41125,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -29982,6 +41135,9 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -29998,7 +41154,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -30007,13 +41163,13 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue = &resource.Quantity{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -30044,6 +41200,9 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -30071,7 +41230,7 @@ func (m *LimitRangeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30099,7 +41258,7 @@ func (m *LimitRangeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -30108,6 +41267,9 @@ func (m *LimitRangeList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -30129,7 +41291,7 @@ func (m *LimitRangeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -30138,6 +41300,9 @@ func (m *LimitRangeList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -30155,6 +41320,9 @@ func (m *LimitRangeList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -30182,7 +41350,7 @@ func (m *LimitRangeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30210,7 +41378,7 @@ func (m *LimitRangeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -30219,6 +41387,9 @@ func (m *LimitRangeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -30236,6 +41407,9 @@ func (m *LimitRangeSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -30263,7 +41437,7 @@ func (m *List) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30291,7 +41465,7 @@ func (m *List) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -30300,6 +41474,9 @@ func (m *List) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -30321,7 +41498,7 @@ func (m *List) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -30330,10 +41507,13 @@ func (m *List) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, k8s_io_apimachinery_pkg_runtime.RawExtension{}) + m.Items = append(m.Items, runtime.RawExtension{}) if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -30347,6 +41527,9 @@ func (m *List) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -30374,7 +41557,7 @@ func (m *LoadBalancerIngress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30402,7 +41585,7 @@ func (m *LoadBalancerIngress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30412,6 +41595,9 @@ func (m *LoadBalancerIngress) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -30431,7 +41617,7 @@ func (m *LoadBalancerIngress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30441,6 +41627,9 @@ func (m *LoadBalancerIngress) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -30455,6 +41644,9 @@ func (m *LoadBalancerIngress) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -30482,7 +41674,7 @@ func (m *LoadBalancerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30510,7 +41702,7 @@ func (m *LoadBalancerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -30519,6 +41711,9 @@ func (m *LoadBalancerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -30536,6 +41731,9 @@ func (m *LoadBalancerStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -30563,7 +41761,7 @@ func (m *LocalObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30591,7 +41789,7 @@ func (m *LocalObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30601,6 +41799,9 @@ func (m *LocalObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -30615,6 +41816,9 @@ func (m *LocalObjectReference) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -30642,7 +41846,7 @@ func (m *LocalVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30670,7 +41874,157 @@ func (m *LocalVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.FSType = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NFSVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NFSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Server", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Server = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30680,16 +42034,19 @@ func (m *LocalVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } m.Path = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -30699,22 +42056,12 @@ func (m *LocalVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.FSType = &s - iNdEx = postIndex + m.ReadOnly = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -30724,6 +42071,9 @@ func (m *LocalVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -30736,7 +42086,7 @@ func (m *LocalVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { +func (m *Namespace) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -30751,7 +42101,7 @@ func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30759,17 +42109,17 @@ func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NFSVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: Namespace: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NFSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Namespace: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Server", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -30779,26 +42129,30 @@ func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Server = string(dAtA[iNdEx:postIndex]) + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -30808,26 +42162,30 @@ func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(dAtA[iNdEx:postIndex]) + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -30837,12 +42195,25 @@ func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.ReadOnly = bool(v != 0) + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -30852,6 +42223,9 @@ func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -30864,7 +42238,7 @@ func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *Namespace) Unmarshal(dAtA []byte) error { +func (m *NamespaceCondition) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -30879,7 +42253,7 @@ func (m *Namespace) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -30887,17 +42261,17 @@ func (m *Namespace) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Namespace: wiretype end group for non-group") + return fmt.Errorf("proto: NamespaceCondition: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Namespace: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: NamespaceCondition: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -30907,25 +42281,59 @@ func (m *Namespace) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Type = NamespaceConditionType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -30937,7 +42345,7 @@ func (m *Namespace) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -30946,18 +42354,21 @@ func (m *Namespace) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -30967,21 +42378,55 @@ func (m *Namespace) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -30992,6 +42437,9 @@ func (m *Namespace) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -31019,7 +42467,7 @@ func (m *NamespaceList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31047,7 +42495,7 @@ func (m *NamespaceList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -31056,6 +42504,9 @@ func (m *NamespaceList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31077,7 +42528,7 @@ func (m *NamespaceList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -31086,6 +42537,9 @@ func (m *NamespaceList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31103,6 +42557,9 @@ func (m *NamespaceList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -31130,7 +42587,7 @@ func (m *NamespaceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31158,7 +42615,7 @@ func (m *NamespaceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31168,6 +42625,9 @@ func (m *NamespaceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31182,6 +42642,9 @@ func (m *NamespaceSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -31209,7 +42672,7 @@ func (m *NamespaceStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31237,7 +42700,7 @@ func (m *NamespaceStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31247,11 +42710,48 @@ func (m *NamespaceStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } m.Phase = NamespacePhase(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, NamespaceCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -31261,6 +42761,9 @@ func (m *NamespaceStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -31288,7 +42791,7 @@ func (m *Node) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31316,7 +42819,7 @@ func (m *Node) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -31325,6 +42828,9 @@ func (m *Node) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31346,7 +42852,7 @@ func (m *Node) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -31355,6 +42861,9 @@ func (m *Node) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31376,7 +42885,7 @@ func (m *Node) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -31385,6 +42894,9 @@ func (m *Node) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31401,6 +42913,9 @@ func (m *Node) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -31428,7 +42943,7 @@ func (m *NodeAddress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31456,7 +42971,7 @@ func (m *NodeAddress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31466,6 +42981,9 @@ func (m *NodeAddress) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31485,7 +43003,7 @@ func (m *NodeAddress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31495,6 +43013,9 @@ func (m *NodeAddress) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31509,6 +43030,9 @@ func (m *NodeAddress) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -31536,7 +43060,7 @@ func (m *NodeAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31564,7 +43088,7 @@ func (m *NodeAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -31573,6 +43097,9 @@ func (m *NodeAffinity) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31597,7 +43124,7 @@ func (m *NodeAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -31606,6 +43133,9 @@ func (m *NodeAffinity) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31623,6 +43153,9 @@ func (m *NodeAffinity) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -31650,7 +43183,7 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31678,7 +43211,7 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31688,6 +43221,9 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31707,7 +43243,7 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31717,6 +43253,9 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31736,7 +43275,7 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -31745,6 +43284,9 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31766,7 +43308,7 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -31775,6 +43317,9 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31796,7 +43341,7 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31806,6 +43351,9 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31825,7 +43373,7 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31835,6 +43383,9 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31849,6 +43400,9 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -31876,7 +43430,7 @@ func (m *NodeConfigSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31904,7 +43458,7 @@ func (m *NodeConfigSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -31913,6 +43467,9 @@ func (m *NodeConfigSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -31932,6 +43489,9 @@ func (m *NodeConfigSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -31959,7 +43519,7 @@ func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -31987,7 +43547,7 @@ func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -31996,6 +43556,9 @@ func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32020,7 +43583,7 @@ func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -32029,6 +43592,9 @@ func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32053,7 +43619,7 @@ func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -32062,6 +43628,9 @@ func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32086,7 +43655,7 @@ func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32096,6 +43665,9 @@ func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32110,6 +43682,9 @@ func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -32137,7 +43712,7 @@ func (m *NodeDaemonEndpoints) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32165,7 +43740,7 @@ func (m *NodeDaemonEndpoints) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -32174,6 +43749,9 @@ func (m *NodeDaemonEndpoints) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32190,6 +43768,9 @@ func (m *NodeDaemonEndpoints) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -32217,7 +43798,7 @@ func (m *NodeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32245,7 +43826,7 @@ func (m *NodeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -32254,6 +43835,9 @@ func (m *NodeList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32275,7 +43859,7 @@ func (m *NodeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -32284,6 +43868,9 @@ func (m *NodeList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32301,6 +43888,9 @@ func (m *NodeList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -32328,7 +43918,7 @@ func (m *NodeProxyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32356,7 +43946,7 @@ func (m *NodeProxyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32366,6 +43956,9 @@ func (m *NodeProxyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32380,6 +43973,9 @@ func (m *NodeProxyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -32407,7 +44003,7 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32435,7 +44031,7 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -32444,6 +44040,9 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32451,7 +44050,7 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { m.Capacity = make(ResourceList) } var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue := &resource.Quantity{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -32464,7 +44063,7 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32481,7 +44080,7 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32491,6 +44090,9 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -32507,7 +44109,7 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -32516,13 +44118,13 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue = &resource.Quantity{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -32553,6 +44155,9 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -32580,7 +44185,7 @@ func (m *NodeSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32608,7 +44213,7 @@ func (m *NodeSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -32617,6 +44222,9 @@ func (m *NodeSelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32634,6 +44242,9 @@ func (m *NodeSelector) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -32661,7 +44272,7 @@ func (m *NodeSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32689,7 +44300,7 @@ func (m *NodeSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32699,6 +44310,9 @@ func (m *NodeSelectorRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32718,7 +44332,7 @@ func (m *NodeSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32728,6 +44342,9 @@ func (m *NodeSelectorRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32747,7 +44364,7 @@ func (m *NodeSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32757,6 +44374,9 @@ func (m *NodeSelectorRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32771,6 +44391,9 @@ func (m *NodeSelectorRequirement) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -32798,7 +44421,7 @@ func (m *NodeSelectorTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32826,7 +44449,7 @@ func (m *NodeSelectorTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -32835,6 +44458,9 @@ func (m *NodeSelectorTerm) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32857,7 +44483,7 @@ func (m *NodeSelectorTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -32866,6 +44492,9 @@ func (m *NodeSelectorTerm) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32883,6 +44512,9 @@ func (m *NodeSelectorTerm) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -32910,7 +44542,7 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32938,7 +44570,7 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32948,6 +44580,9 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32967,7 +44602,7 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -32977,6 +44612,9 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -32996,7 +44634,7 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33006,6 +44644,9 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33025,7 +44666,7 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -33045,7 +44686,7 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -33054,6 +44695,9 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33076,7 +44720,7 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -33085,6 +44729,9 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33095,6 +44742,38 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodCIDRs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodCIDRs = append(m.PodCIDRs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -33104,6 +44783,9 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -33131,7 +44813,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33159,7 +44841,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -33168,6 +44850,9 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33175,7 +44860,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { m.Capacity = make(ResourceList) } var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue := &resource.Quantity{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -33188,7 +44873,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33205,7 +44890,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33215,6 +44900,9 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -33231,7 +44919,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -33240,13 +44928,13 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue = &resource.Quantity{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -33282,7 +44970,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -33291,6 +44979,9 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33298,7 +44989,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { m.Allocatable = make(ResourceList) } var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue := &resource.Quantity{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -33311,7 +45002,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33328,7 +45019,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33338,6 +45029,9 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -33354,7 +45048,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -33363,13 +45057,13 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue = &resource.Quantity{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -33405,7 +45099,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33415,6 +45109,9 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33434,7 +45131,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -33443,6 +45140,9 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33465,7 +45165,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -33474,6 +45174,9 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33496,7 +45199,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -33505,6 +45208,9 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33526,7 +45232,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -33535,6 +45241,9 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33556,7 +45265,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -33565,6 +45274,9 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33587,7 +45299,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33597,6 +45309,9 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33616,7 +45331,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -33625,6 +45340,9 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33647,7 +45365,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -33656,6 +45374,9 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33675,6 +45396,9 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -33702,7 +45426,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33730,7 +45454,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33740,6 +45464,9 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33759,7 +45486,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33769,6 +45496,9 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33788,7 +45518,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33798,6 +45528,9 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33817,7 +45550,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33827,6 +45560,9 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33846,7 +45582,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33856,6 +45592,9 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33875,7 +45614,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33885,6 +45624,9 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33904,7 +45646,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33914,6 +45656,9 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33933,7 +45678,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33943,6 +45688,9 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33962,7 +45710,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -33972,6 +45720,9 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -33991,7 +45742,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34001,6 +45752,9 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34015,6 +45769,9 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -34042,7 +45799,7 @@ func (m *ObjectFieldSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34070,7 +45827,7 @@ func (m *ObjectFieldSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34080,6 +45837,9 @@ func (m *ObjectFieldSelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34099,7 +45859,7 @@ func (m *ObjectFieldSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34109,6 +45869,9 @@ func (m *ObjectFieldSelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34123,6 +45886,9 @@ func (m *ObjectFieldSelector) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -34150,7 +45916,7 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34178,7 +45944,7 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34188,6 +45954,9 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34207,7 +45976,7 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34217,6 +45986,9 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34236,7 +46008,7 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34246,6 +46018,9 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34265,7 +46040,7 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34275,6 +46050,9 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34294,7 +46072,7 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34304,6 +46082,9 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34323,7 +46104,7 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34333,6 +46114,9 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34352,7 +46136,7 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34362,6 +46146,9 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34376,6 +46163,9 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -34403,7 +46193,7 @@ func (m *PersistentVolume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34431,7 +46221,7 @@ func (m *PersistentVolume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -34440,6 +46230,9 @@ func (m *PersistentVolume) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34461,7 +46254,7 @@ func (m *PersistentVolume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -34470,6 +46263,9 @@ func (m *PersistentVolume) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34491,7 +46287,7 @@ func (m *PersistentVolume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -34500,6 +46296,9 @@ func (m *PersistentVolume) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34516,6 +46315,9 @@ func (m *PersistentVolume) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -34543,7 +46345,7 @@ func (m *PersistentVolumeClaim) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34571,7 +46373,7 @@ func (m *PersistentVolumeClaim) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -34580,6 +46382,9 @@ func (m *PersistentVolumeClaim) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34601,7 +46406,7 @@ func (m *PersistentVolumeClaim) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -34610,6 +46415,9 @@ func (m *PersistentVolumeClaim) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34631,7 +46439,7 @@ func (m *PersistentVolumeClaim) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -34640,6 +46448,9 @@ func (m *PersistentVolumeClaim) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34656,6 +46467,9 @@ func (m *PersistentVolumeClaim) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -34683,7 +46497,7 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34711,7 +46525,7 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34721,6 +46535,9 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34740,7 +46557,7 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34750,6 +46567,9 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34769,7 +46589,7 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -34778,6 +46598,9 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34799,7 +46622,7 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -34808,6 +46631,9 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34829,7 +46655,7 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34839,6 +46665,9 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34858,7 +46687,7 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34868,6 +46697,9 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34882,6 +46714,9 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -34909,7 +46744,7 @@ func (m *PersistentVolumeClaimList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -34937,7 +46772,7 @@ func (m *PersistentVolumeClaimList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -34946,6 +46781,9 @@ func (m *PersistentVolumeClaimList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34967,7 +46805,7 @@ func (m *PersistentVolumeClaimList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -34976,6 +46814,9 @@ func (m *PersistentVolumeClaimList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -34993,6 +46834,9 @@ func (m *PersistentVolumeClaimList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -35020,7 +46864,7 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -35048,7 +46892,7 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -35058,6 +46902,9 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35077,7 +46924,7 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35086,6 +46933,9 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35107,7 +46957,7 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -35117,6 +46967,9 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35136,7 +46989,7 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35145,11 +46998,14 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -35169,7 +47025,7 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -35179,6 +47035,9 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35199,7 +47058,7 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -35209,6 +47068,9 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35229,7 +47091,7 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35238,6 +47100,9 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35257,6 +47122,9 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -35284,7 +47152,7 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -35312,7 +47180,7 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -35322,6 +47190,9 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35341,7 +47212,7 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -35351,6 +47222,9 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35370,7 +47244,7 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35379,6 +47253,9 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35386,7 +47263,7 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { m.Capacity = make(ResourceList) } var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue := &resource.Quantity{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -35399,7 +47276,7 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -35416,7 +47293,7 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -35426,6 +47303,9 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -35442,7 +47322,7 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35451,13 +47331,13 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue = &resource.Quantity{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -35493,7 +47373,7 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35502,6 +47382,9 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35519,6 +47402,9 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -35546,7 +47432,7 @@ func (m *PersistentVolumeClaimVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -35574,7 +47460,7 @@ func (m *PersistentVolumeClaimVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -35584,6 +47470,9 @@ func (m *PersistentVolumeClaimVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35603,7 +47492,7 @@ func (m *PersistentVolumeClaimVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35618,6 +47507,9 @@ func (m *PersistentVolumeClaimVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -35645,7 +47537,7 @@ func (m *PersistentVolumeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -35673,7 +47565,7 @@ func (m *PersistentVolumeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35682,6 +47574,9 @@ func (m *PersistentVolumeList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35703,7 +47598,7 @@ func (m *PersistentVolumeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35712,6 +47607,9 @@ func (m *PersistentVolumeList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35729,6 +47627,9 @@ func (m *PersistentVolumeList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -35756,7 +47657,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -35784,7 +47685,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35793,6 +47694,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35817,7 +47721,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35826,6 +47730,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35850,7 +47757,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35859,6 +47766,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35883,7 +47793,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35892,6 +47802,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35916,7 +47829,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35925,6 +47838,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35949,7 +47865,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35958,6 +47874,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -35982,7 +47901,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -35991,6 +47910,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36015,7 +47937,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36024,6 +47946,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36048,7 +47973,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36057,6 +47982,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36081,7 +48009,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36090,6 +48018,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36114,7 +48045,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36123,6 +48054,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36147,7 +48081,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36156,6 +48090,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36180,7 +48117,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36189,6 +48126,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36213,7 +48153,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36222,6 +48162,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36246,7 +48189,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36255,6 +48198,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36279,7 +48225,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36288,6 +48234,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36312,7 +48261,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36321,6 +48270,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36345,7 +48297,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36354,6 +48306,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36378,7 +48333,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36387,6 +48342,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36411,7 +48369,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36420,6 +48378,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36444,7 +48405,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36453,6 +48414,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36477,7 +48441,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36486,6 +48450,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36505,6 +48472,9 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -36532,7 +48502,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36560,7 +48530,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36569,6 +48539,9 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36576,7 +48549,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { m.Capacity = make(ResourceList) } var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue := &resource.Quantity{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -36589,7 +48562,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36606,7 +48579,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36616,6 +48589,9 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -36632,7 +48608,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36641,13 +48617,13 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue = &resource.Quantity{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -36683,7 +48659,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36692,6 +48668,9 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36713,7 +48692,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36723,6 +48702,9 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36742,7 +48724,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36751,6 +48733,9 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36775,7 +48760,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36785,6 +48770,9 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36804,7 +48792,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36814,6 +48802,9 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36833,7 +48824,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36843,6 +48834,9 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36862,7 +48856,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36872,6 +48866,9 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36892,7 +48889,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -36901,6 +48898,9 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36920,6 +48920,9 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -36947,7 +48950,7 @@ func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36975,7 +48978,7 @@ func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -36985,6 +48988,9 @@ func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37004,7 +49010,7 @@ func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37014,6 +49020,9 @@ func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37033,7 +49042,7 @@ func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37043,6 +49052,9 @@ func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37057,6 +49069,9 @@ func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -37084,7 +49099,7 @@ func (m *PhotonPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37112,7 +49127,7 @@ func (m *PhotonPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37122,6 +49137,9 @@ func (m *PhotonPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37141,7 +49159,7 @@ func (m *PhotonPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37151,6 +49169,9 @@ func (m *PhotonPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37165,6 +49186,9 @@ func (m *PhotonPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -37192,7 +49216,7 @@ func (m *Pod) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37220,7 +49244,7 @@ func (m *Pod) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -37229,6 +49253,9 @@ func (m *Pod) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37250,7 +49277,7 @@ func (m *Pod) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -37259,6 +49286,9 @@ func (m *Pod) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37280,7 +49310,7 @@ func (m *Pod) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -37289,6 +49319,9 @@ func (m *Pod) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37305,6 +49338,9 @@ func (m *Pod) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -37332,7 +49368,7 @@ func (m *PodAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37360,7 +49396,7 @@ func (m *PodAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -37369,6 +49405,9 @@ func (m *PodAffinity) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37391,7 +49430,7 @@ func (m *PodAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -37400,6 +49439,9 @@ func (m *PodAffinity) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37417,6 +49459,9 @@ func (m *PodAffinity) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -37444,7 +49489,7 @@ func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37472,7 +49517,7 @@ func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -37481,11 +49526,14 @@ func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.LabelSelector == nil { - m.LabelSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.LabelSelector = &v1.LabelSelector{} } if err := m.LabelSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -37505,7 +49553,7 @@ func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37515,6 +49563,9 @@ func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37534,7 +49585,7 @@ func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37544,6 +49595,9 @@ func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37558,6 +49612,9 @@ func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -37585,7 +49642,7 @@ func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37613,7 +49670,7 @@ func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -37622,6 +49679,9 @@ func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37644,7 +49704,7 @@ func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -37653,6 +49713,9 @@ func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37670,6 +49733,9 @@ func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -37697,7 +49763,7 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37725,7 +49791,7 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -37745,7 +49811,7 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -37765,7 +49831,7 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -37785,7 +49851,7 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -37805,7 +49871,7 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37815,6 +49881,9 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37829,6 +49898,9 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -37856,7 +49928,7 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37884,7 +49956,7 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37894,6 +49966,9 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37913,7 +49988,7 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -37923,6 +49998,9 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37942,7 +50020,7 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -37951,6 +50029,9 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37972,7 +50053,7 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -37981,6 +50062,9 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -38002,7 +50086,7 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38012,6 +50096,9 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -38031,7 +50118,7 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38041,6 +50128,9 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -38055,6 +50145,9 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -38082,7 +50175,7 @@ func (m *PodDNSConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38110,7 +50203,7 @@ func (m *PodDNSConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38120,6 +50213,9 @@ func (m *PodDNSConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -38139,7 +50235,7 @@ func (m *PodDNSConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38149,6 +50245,9 @@ func (m *PodDNSConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -38168,7 +50267,7 @@ func (m *PodDNSConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -38177,6 +50276,9 @@ func (m *PodDNSConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -38194,6 +50296,9 @@ func (m *PodDNSConfig) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -38221,7 +50326,7 @@ func (m *PodDNSConfigOption) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38249,7 +50354,7 @@ func (m *PodDNSConfigOption) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38259,6 +50364,9 @@ func (m *PodDNSConfigOption) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -38278,7 +50386,7 @@ func (m *PodDNSConfigOption) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38288,6 +50396,9 @@ func (m *PodDNSConfigOption) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -38303,6 +50414,9 @@ func (m *PodDNSConfigOption) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -38330,7 +50444,7 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38358,7 +50472,7 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -38378,7 +50492,7 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -38398,7 +50512,7 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -38418,7 +50532,7 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -38438,7 +50552,7 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38448,6 +50562,9 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -38467,7 +50584,7 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38477,6 +50594,9 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -38491,6 +50611,94 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodIP) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodIP: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodIP: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IP = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -38518,7 +50726,7 @@ func (m *PodList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38546,7 +50754,7 @@ func (m *PodList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -38555,6 +50763,9 @@ func (m *PodList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -38576,7 +50787,7 @@ func (m *PodList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -38585,6 +50796,9 @@ func (m *PodList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -38602,6 +50816,9 @@ func (m *PodList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -38629,7 +50846,7 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38657,7 +50874,7 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38667,6 +50884,9 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -38686,7 +50906,7 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -38706,7 +50926,7 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -38726,7 +50946,7 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -38746,7 +50966,7 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -38755,11 +50975,14 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.SinceTime == nil { - m.SinceTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + m.SinceTime = &v1.Time{} } if err := m.SinceTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -38779,7 +51002,7 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -38799,7 +51022,7 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -38819,7 +51042,7 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -38834,6 +51057,9 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -38861,7 +51087,7 @@ func (m *PodPortForwardOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -38887,7 +51113,7 @@ func (m *PodPortForwardOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -38904,7 +51130,7 @@ func (m *PodPortForwardOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - packedLen |= (int(b) & 0x7F) << shift + packedLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -38913,9 +51139,23 @@ func (m *PodPortForwardOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.Ports) == 0 { + m.Ports = make([]int32, 0, elementCount) + } for iNdEx < postIndex { var v int32 for shift := uint(0); ; shift += 7 { @@ -38927,7 +51167,7 @@ func (m *PodPortForwardOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -38946,6 +51186,9 @@ func (m *PodPortForwardOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -38973,7 +51216,7 @@ func (m *PodProxyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -39001,7 +51244,7 @@ func (m *PodProxyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -39011,6 +51254,9 @@ func (m *PodProxyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39025,6 +51271,9 @@ func (m *PodProxyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -39052,7 +51301,7 @@ func (m *PodReadinessGate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -39080,7 +51329,7 @@ func (m *PodReadinessGate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -39090,6 +51339,9 @@ func (m *PodReadinessGate) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39104,6 +51356,9 @@ func (m *PodReadinessGate) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -39131,7 +51386,7 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -39159,7 +51414,7 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39168,6 +51423,9 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39192,7 +51450,7 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -39212,7 +51470,7 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39231,7 +51489,7 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -39248,7 +51506,7 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - packedLen |= (int(b) & 0x7F) << shift + packedLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39257,9 +51515,23 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.SupplementalGroups) == 0 { + m.SupplementalGroups = make([]int64, 0, elementCount) + } for iNdEx < postIndex { var v int64 for shift := uint(0); ; shift += 7 { @@ -39271,7 +51543,7 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -39295,7 +51567,7 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -39315,7 +51587,7 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -39335,7 +51607,7 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39344,6 +51616,9 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39366,7 +51641,7 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39375,6 +51650,9 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39394,6 +51672,9 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -39421,7 +51702,7 @@ func (m *PodSignature) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -39449,7 +51730,7 @@ func (m *PodSignature) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39458,11 +51739,14 @@ func (m *PodSignature) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.PodController == nil { - m.PodController = &k8s_io_apimachinery_pkg_apis_meta_v1.OwnerReference{} + m.PodController = &v1.OwnerReference{} } if err := m.PodController.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -39477,6 +51761,9 @@ func (m *PodSignature) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -39504,7 +51791,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -39532,7 +51819,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39541,6 +51828,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39563,7 +51853,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39572,6 +51862,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39594,7 +51887,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -39604,6 +51897,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39623,7 +51919,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -39643,7 +51939,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -39663,7 +51959,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -39673,6 +51969,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39692,7 +51991,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39701,6 +52000,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39721,7 +52023,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -39738,7 +52040,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -39748,6 +52050,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -39764,7 +52069,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -39774,6 +52079,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -39810,7 +52118,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -39820,6 +52128,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39839,7 +52150,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -39849,6 +52160,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39868,7 +52182,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -39878,6 +52192,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39897,7 +52214,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39917,7 +52234,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39937,7 +52254,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39957,7 +52274,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39966,6 +52283,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39990,7 +52310,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -39999,6 +52319,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40021,7 +52344,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -40031,6 +52354,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40050,7 +52376,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -40060,6 +52386,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40079,7 +52408,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40088,6 +52417,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40112,7 +52444,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -40122,6 +52454,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40141,7 +52476,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40150,6 +52485,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40172,7 +52510,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40193,7 +52531,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40202,6 +52540,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40224,7 +52565,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40233,6 +52574,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40255,7 +52599,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -40265,6 +52609,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40284,7 +52631,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -40304,7 +52651,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40313,6 +52660,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40337,7 +52687,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40358,7 +52708,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40367,6 +52717,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40389,7 +52742,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -40399,6 +52752,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40419,7 +52775,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40440,7 +52796,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -40450,12 +52806,212 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } s := PreemptionPolicy(dAtA[iNdEx:postIndex]) m.PreemptionPolicy = &s iNdEx = postIndex + case 32: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Overhead", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Overhead == nil { + m.Overhead = make(ResourceList) + } + var mapkey ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Overhead[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 33: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TopologySpreadConstraints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TopologySpreadConstraints = append(m.TopologySpreadConstraints, TopologySpreadConstraint{}) + if err := m.TopologySpreadConstraints[len(m.TopologySpreadConstraints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 34: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EphemeralContainers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EphemeralContainers = append(m.EphemeralContainers, EphemeralContainer{}) + if err := m.EphemeralContainers[len(m.EphemeralContainers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -40465,6 +53021,9 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -40492,7 +53051,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -40520,7 +53079,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -40530,6 +53089,9 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40549,7 +53111,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40558,6 +53120,9 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40580,7 +53145,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -40590,6 +53155,9 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40609,7 +53177,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -40619,6 +53187,9 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40638,7 +53209,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -40648,6 +53219,9 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40667,7 +53241,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -40677,6 +53251,9 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40696,7 +53273,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40705,11 +53282,14 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.StartTime == nil { - m.StartTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + m.StartTime = &v1.Time{} } if err := m.StartTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -40729,7 +53309,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40738,6 +53318,9 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40760,7 +53343,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -40770,6 +53353,9 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40789,7 +53375,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40798,6 +53384,9 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40820,7 +53409,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -40830,11 +53419,82 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } m.NominatedNodeName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodIPs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodIPs = append(m.PodIPs, PodIP{}) + if err := m.PodIPs[len(m.PodIPs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EphemeralContainerStatuses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EphemeralContainerStatuses = append(m.EphemeralContainerStatuses, ContainerStatus{}) + if err := m.EphemeralContainerStatuses[len(m.EphemeralContainerStatuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -40844,6 +53504,9 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -40871,7 +53534,7 @@ func (m *PodStatusResult) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -40899,7 +53562,7 @@ func (m *PodStatusResult) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40908,6 +53571,9 @@ func (m *PodStatusResult) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40929,7 +53595,7 @@ func (m *PodStatusResult) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -40938,6 +53604,9 @@ func (m *PodStatusResult) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40954,6 +53623,9 @@ func (m *PodStatusResult) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -40981,7 +53653,7 @@ func (m *PodTemplate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41009,7 +53681,7 @@ func (m *PodTemplate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -41018,6 +53690,9 @@ func (m *PodTemplate) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41039,7 +53714,7 @@ func (m *PodTemplate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -41048,6 +53723,9 @@ func (m *PodTemplate) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41064,6 +53742,9 @@ func (m *PodTemplate) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -41091,7 +53772,7 @@ func (m *PodTemplateList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41119,7 +53800,7 @@ func (m *PodTemplateList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -41128,6 +53809,9 @@ func (m *PodTemplateList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41149,7 +53833,7 @@ func (m *PodTemplateList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -41158,6 +53842,9 @@ func (m *PodTemplateList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41175,6 +53862,9 @@ func (m *PodTemplateList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -41202,7 +53892,7 @@ func (m *PodTemplateSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41230,7 +53920,7 @@ func (m *PodTemplateSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -41239,6 +53929,9 @@ func (m *PodTemplateSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41260,7 +53953,7 @@ func (m *PodTemplateSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -41269,6 +53962,9 @@ func (m *PodTemplateSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41285,6 +53981,9 @@ func (m *PodTemplateSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -41312,7 +54011,7 @@ func (m *PortworxVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41340,7 +54039,7 @@ func (m *PortworxVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41350,6 +54049,9 @@ func (m *PortworxVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41369,7 +54071,7 @@ func (m *PortworxVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41379,6 +54081,9 @@ func (m *PortworxVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41398,7 +54103,7 @@ func (m *PortworxVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -41413,6 +54118,9 @@ func (m *PortworxVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -41440,7 +54148,7 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41468,7 +54176,7 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41478,6 +54186,9 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41493,6 +54204,9 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -41520,7 +54234,7 @@ func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41548,7 +54262,7 @@ func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -41557,6 +54271,9 @@ func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41578,7 +54295,7 @@ func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -41587,6 +54304,9 @@ func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41608,7 +54328,7 @@ func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41618,6 +54338,9 @@ func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41637,7 +54360,7 @@ func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41647,6 +54370,9 @@ func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41661,6 +54387,9 @@ func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -41688,7 +54417,7 @@ func (m *PreferredSchedulingTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41716,7 +54445,7 @@ func (m *PreferredSchedulingTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Weight |= (int32(b) & 0x7F) << shift + m.Weight |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -41735,7 +54464,7 @@ func (m *PreferredSchedulingTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -41744,6 +54473,9 @@ func (m *PreferredSchedulingTerm) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41760,6 +54492,9 @@ func (m *PreferredSchedulingTerm) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -41787,7 +54522,7 @@ func (m *Probe) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41815,7 +54550,7 @@ func (m *Probe) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -41824,6 +54559,9 @@ func (m *Probe) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41845,7 +54583,7 @@ func (m *Probe) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.InitialDelaySeconds |= (int32(b) & 0x7F) << shift + m.InitialDelaySeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -41864,7 +54602,7 @@ func (m *Probe) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.TimeoutSeconds |= (int32(b) & 0x7F) << shift + m.TimeoutSeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -41883,7 +54621,7 @@ func (m *Probe) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.PeriodSeconds |= (int32(b) & 0x7F) << shift + m.PeriodSeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -41902,7 +54640,7 @@ func (m *Probe) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.SuccessThreshold |= (int32(b) & 0x7F) << shift + m.SuccessThreshold |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -41921,7 +54659,7 @@ func (m *Probe) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.FailureThreshold |= (int32(b) & 0x7F) << shift + m.FailureThreshold |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -41935,6 +54673,9 @@ func (m *Probe) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -41962,7 +54703,7 @@ func (m *ProjectedVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -41990,7 +54731,7 @@ func (m *ProjectedVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -41999,6 +54740,9 @@ func (m *ProjectedVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42021,7 +54765,7 @@ func (m *ProjectedVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -42036,6 +54780,9 @@ func (m *ProjectedVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -42063,7 +54810,7 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42091,7 +54838,7 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42101,6 +54848,9 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42120,7 +54870,7 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42130,6 +54880,9 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42149,7 +54902,7 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -42169,7 +54922,7 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42179,6 +54932,9 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42198,7 +54954,7 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42208,6 +54964,9 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42227,7 +54986,7 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42237,6 +54996,9 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42251,6 +55013,9 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -42278,7 +55043,7 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42306,7 +55071,7 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42316,6 +55081,9 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42335,7 +55103,7 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42345,6 +55113,9 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42364,7 +55135,7 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42374,6 +55145,9 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42393,7 +55167,7 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42403,6 +55177,9 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42422,7 +55199,7 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42432,6 +55209,9 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42451,7 +55231,7 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42461,6 +55241,9 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42480,7 +55263,7 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -42489,6 +55272,9 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42513,7 +55299,7 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -42528,6 +55314,9 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -42555,7 +55344,7 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42583,7 +55372,7 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42593,6 +55382,9 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42612,7 +55404,7 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42622,6 +55414,9 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42641,7 +55436,7 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42651,6 +55446,9 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42670,7 +55468,7 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42680,6 +55478,9 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42699,7 +55500,7 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42709,6 +55510,9 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42728,7 +55532,7 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42738,6 +55542,9 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42757,7 +55564,7 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -42766,6 +55573,9 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42790,7 +55600,7 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -42805,6 +55615,9 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -42832,7 +55645,7 @@ func (m *RangeAllocation) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42860,7 +55673,7 @@ func (m *RangeAllocation) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -42869,6 +55682,9 @@ func (m *RangeAllocation) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42890,7 +55706,7 @@ func (m *RangeAllocation) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -42900,6 +55716,9 @@ func (m *RangeAllocation) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42919,7 +55738,7 @@ func (m *RangeAllocation) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -42928,6 +55747,9 @@ func (m *RangeAllocation) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42945,6 +55767,9 @@ func (m *RangeAllocation) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -42972,7 +55797,7 @@ func (m *ReplicationController) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -43000,7 +55825,7 @@ func (m *ReplicationController) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -43009,6 +55834,9 @@ func (m *ReplicationController) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43030,7 +55858,7 @@ func (m *ReplicationController) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -43039,6 +55867,9 @@ func (m *ReplicationController) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43060,7 +55891,7 @@ func (m *ReplicationController) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -43069,6 +55900,9 @@ func (m *ReplicationController) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43085,6 +55919,9 @@ func (m *ReplicationController) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -43112,7 +55949,7 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -43140,7 +55977,7 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -43150,6 +55987,9 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43169,7 +56009,7 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -43179,6 +56019,9 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43198,7 +56041,7 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -43207,6 +56050,9 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43228,7 +56074,7 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -43238,6 +56084,9 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43257,7 +56106,7 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -43267,6 +56116,9 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43281,6 +56133,9 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -43308,7 +56163,7 @@ func (m *ReplicationControllerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -43336,7 +56191,7 @@ func (m *ReplicationControllerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -43345,6 +56200,9 @@ func (m *ReplicationControllerList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43366,7 +56224,7 @@ func (m *ReplicationControllerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -43375,6 +56233,9 @@ func (m *ReplicationControllerList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43392,6 +56253,9 @@ func (m *ReplicationControllerList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -43419,7 +56283,7 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -43447,7 +56311,7 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -43467,7 +56331,7 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -43476,6 +56340,9 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43496,7 +56363,7 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -43513,7 +56380,7 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -43523,6 +56390,9 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -43539,7 +56409,7 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -43549,6 +56419,9 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -43585,7 +56458,7 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -43594,6 +56467,9 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43618,7 +56494,7 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift + m.MinReadySeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -43632,6 +56508,9 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -43659,7 +56538,7 @@ func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -43687,7 +56566,7 @@ func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -43706,7 +56585,7 @@ func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.FullyLabeledReplicas |= (int32(b) & 0x7F) << shift + m.FullyLabeledReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -43725,7 +56604,7 @@ func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -43744,7 +56623,7 @@ func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift + m.ReadyReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -43763,7 +56642,7 @@ func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AvailableReplicas |= (int32(b) & 0x7F) << shift + m.AvailableReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -43782,7 +56661,7 @@ func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -43791,6 +56670,9 @@ func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43808,6 +56690,9 @@ func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -43835,7 +56720,7 @@ func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -43863,7 +56748,7 @@ func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -43873,6 +56758,9 @@ func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43892,7 +56780,7 @@ func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -43902,6 +56790,9 @@ func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43921,7 +56812,7 @@ func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -43930,6 +56821,9 @@ func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43946,6 +56840,9 @@ func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -43973,7 +56870,7 @@ func (m *ResourceQuota) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44001,7 +56898,7 @@ func (m *ResourceQuota) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -44010,6 +56907,9 @@ func (m *ResourceQuota) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44031,7 +56931,7 @@ func (m *ResourceQuota) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -44040,6 +56940,9 @@ func (m *ResourceQuota) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44061,7 +56964,7 @@ func (m *ResourceQuota) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -44070,6 +56973,9 @@ func (m *ResourceQuota) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44086,6 +56992,9 @@ func (m *ResourceQuota) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -44113,7 +57022,7 @@ func (m *ResourceQuotaList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44141,7 +57050,7 @@ func (m *ResourceQuotaList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -44150,6 +57059,9 @@ func (m *ResourceQuotaList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44171,7 +57083,7 @@ func (m *ResourceQuotaList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -44180,6 +57092,9 @@ func (m *ResourceQuotaList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44197,6 +57112,9 @@ func (m *ResourceQuotaList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -44224,7 +57142,7 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44252,7 +57170,7 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -44261,6 +57179,9 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44268,7 +57189,7 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { m.Hard = make(ResourceList) } var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue := &resource.Quantity{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -44281,7 +57202,7 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44298,7 +57219,7 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44308,6 +57229,9 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -44324,7 +57248,7 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -44333,13 +57257,13 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue = &resource.Quantity{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -44375,7 +57299,7 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44385,6 +57309,9 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44404,7 +57331,7 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -44413,6 +57340,9 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44432,6 +57362,9 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -44459,7 +57392,7 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44487,7 +57420,7 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -44496,6 +57429,9 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44503,7 +57439,7 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { m.Hard = make(ResourceList) } var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue := &resource.Quantity{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -44516,7 +57452,7 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44533,7 +57469,7 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44543,6 +57479,9 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -44559,7 +57498,7 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -44568,13 +57507,13 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue = &resource.Quantity{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -44610,7 +57549,7 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -44619,6 +57558,9 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44626,7 +57568,7 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { m.Used = make(ResourceList) } var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue := &resource.Quantity{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -44639,7 +57581,7 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44656,7 +57598,7 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44666,6 +57608,9 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -44682,7 +57627,7 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -44691,13 +57636,13 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue = &resource.Quantity{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -44728,6 +57673,9 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -44755,7 +57703,7 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44783,7 +57731,7 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -44792,6 +57740,9 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44799,7 +57750,7 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { m.Limits = make(ResourceList) } var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue := &resource.Quantity{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -44812,7 +57763,7 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44829,7 +57780,7 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44839,6 +57790,9 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -44855,7 +57809,7 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -44864,13 +57818,13 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue = &resource.Quantity{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -44906,7 +57860,7 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -44915,6 +57869,9 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44922,7 +57879,7 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { m.Requests = make(ResourceList) } var mapkey ResourceName - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue := &resource.Quantity{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -44935,7 +57892,7 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44952,7 +57909,7 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -44962,6 +57919,9 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -44978,7 +57938,7 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -44987,13 +57947,13 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + mapvalue = &resource.Quantity{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -45024,6 +57984,9 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -45051,7 +58014,7 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45079,7 +58042,7 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45089,6 +58052,9 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45108,7 +58074,7 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45118,6 +58084,9 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45137,7 +58106,7 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45147,6 +58116,9 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45166,7 +58138,7 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45176,6 +58148,9 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45190,6 +58165,9 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -45217,7 +58195,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45245,7 +58223,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45255,6 +58233,9 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45274,7 +58255,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45284,6 +58265,9 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45303,7 +58287,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -45312,6 +58296,9 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45336,7 +58323,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -45356,7 +58343,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45366,6 +58353,9 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45385,7 +58375,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45395,6 +58385,9 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45414,7 +58407,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45424,6 +58417,9 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45443,7 +58439,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45453,6 +58449,9 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45472,7 +58471,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45482,6 +58481,9 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45501,7 +58503,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -45516,6 +58518,9 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -45543,7 +58548,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45571,7 +58576,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45581,6 +58586,9 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45600,7 +58608,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45610,6 +58618,9 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45629,7 +58640,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -45638,6 +58649,9 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45662,7 +58676,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -45682,7 +58696,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45692,6 +58706,9 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45711,7 +58728,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45721,6 +58738,9 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45740,7 +58760,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45750,6 +58770,9 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45769,7 +58792,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45779,6 +58802,9 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45798,7 +58824,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45808,6 +58834,9 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45827,7 +58856,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -45842,6 +58871,9 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -45869,7 +58901,7 @@ func (m *ScopeSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45897,7 +58929,7 @@ func (m *ScopeSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -45906,6 +58938,9 @@ func (m *ScopeSelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45923,6 +58958,9 @@ func (m *ScopeSelector) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -45950,7 +58988,7 @@ func (m *ScopedResourceSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45978,7 +59016,7 @@ func (m *ScopedResourceSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -45988,6 +59026,9 @@ func (m *ScopedResourceSelectorRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46007,7 +59048,7 @@ func (m *ScopedResourceSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46017,6 +59058,9 @@ func (m *ScopedResourceSelectorRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46036,7 +59080,7 @@ func (m *ScopedResourceSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46046,6 +59090,9 @@ func (m *ScopedResourceSelectorRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46060,6 +59107,9 @@ func (m *ScopedResourceSelectorRequirement) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -46087,7 +59137,7 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46115,7 +59165,7 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46124,6 +59174,9 @@ func (m *Secret) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46145,7 +59198,7 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46154,6 +59207,9 @@ func (m *Secret) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46174,7 +59230,7 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46191,7 +59247,7 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46201,6 +59257,9 @@ func (m *Secret) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -46217,7 +59276,7 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapbyteLen |= (uint64(b) & 0x7F) << shift + mapbyteLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46227,6 +59286,9 @@ func (m *Secret) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postbytesIndex := iNdEx + intMapbyteLen + if postbytesIndex < 0 { + return ErrInvalidLengthGenerated + } if postbytesIndex > l { return io.ErrUnexpectedEOF } @@ -46264,7 +59326,7 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46274,6 +59336,9 @@ func (m *Secret) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46293,7 +59358,7 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46302,6 +59367,9 @@ func (m *Secret) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46322,7 +59390,7 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46339,7 +59407,7 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46349,6 +59417,9 @@ func (m *Secret) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -46365,7 +59436,7 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46375,6 +59446,9 @@ func (m *Secret) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -46406,6 +59480,9 @@ func (m *Secret) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -46433,7 +59510,7 @@ func (m *SecretEnvSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46461,7 +59538,7 @@ func (m *SecretEnvSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46470,6 +59547,9 @@ func (m *SecretEnvSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46491,7 +59571,7 @@ func (m *SecretEnvSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46507,6 +59587,9 @@ func (m *SecretEnvSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -46534,7 +59617,7 @@ func (m *SecretKeySelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46562,7 +59645,7 @@ func (m *SecretKeySelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46571,6 +59654,9 @@ func (m *SecretKeySelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46592,7 +59678,7 @@ func (m *SecretKeySelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46602,6 +59688,9 @@ func (m *SecretKeySelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46621,7 +59710,7 @@ func (m *SecretKeySelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46637,6 +59726,9 @@ func (m *SecretKeySelector) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -46664,7 +59756,7 @@ func (m *SecretList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46692,7 +59784,7 @@ func (m *SecretList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46701,6 +59793,9 @@ func (m *SecretList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46722,7 +59817,7 @@ func (m *SecretList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46731,6 +59826,9 @@ func (m *SecretList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46748,6 +59846,9 @@ func (m *SecretList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -46775,7 +59876,7 @@ func (m *SecretProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46803,7 +59904,7 @@ func (m *SecretProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46812,6 +59913,9 @@ func (m *SecretProjection) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46833,7 +59937,7 @@ func (m *SecretProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46842,6 +59946,9 @@ func (m *SecretProjection) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46864,7 +59971,7 @@ func (m *SecretProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -46880,6 +59987,9 @@ func (m *SecretProjection) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -46907,7 +60017,7 @@ func (m *SecretReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46935,7 +60045,7 @@ func (m *SecretReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46945,6 +60055,9 @@ func (m *SecretReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46964,7 +60077,7 @@ func (m *SecretReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -46974,6 +60087,9 @@ func (m *SecretReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46988,6 +60104,9 @@ func (m *SecretReference) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -47015,7 +60134,7 @@ func (m *SecretVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -47043,7 +60162,7 @@ func (m *SecretVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -47053,6 +60172,9 @@ func (m *SecretVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47072,7 +60194,7 @@ func (m *SecretVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47081,6 +60203,9 @@ func (m *SecretVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47103,7 +60228,7 @@ func (m *SecretVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -47123,7 +60248,7 @@ func (m *SecretVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47139,6 +60264,9 @@ func (m *SecretVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -47166,7 +60294,7 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -47194,7 +60322,7 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47203,6 +60331,9 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47227,7 +60358,7 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47248,7 +60379,7 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47257,6 +60388,9 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47281,7 +60415,7 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -47301,7 +60435,7 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47322,7 +60456,7 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47343,7 +60477,7 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47364,7 +60498,7 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -47384,7 +60518,7 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -47394,6 +60528,9 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47414,7 +60551,7 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47423,6 +60560,9 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47442,6 +60582,9 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -47469,7 +60612,7 @@ func (m *SerializedReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -47497,7 +60640,7 @@ func (m *SerializedReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47506,6 +60649,9 @@ func (m *SerializedReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47522,6 +60668,9 @@ func (m *SerializedReference) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -47549,7 +60698,7 @@ func (m *Service) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -47577,7 +60726,7 @@ func (m *Service) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47586,6 +60735,9 @@ func (m *Service) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47607,7 +60759,7 @@ func (m *Service) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47616,6 +60768,9 @@ func (m *Service) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47637,7 +60792,7 @@ func (m *Service) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47646,6 +60801,9 @@ func (m *Service) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47662,6 +60820,9 @@ func (m *Service) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -47689,7 +60850,7 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -47717,7 +60878,7 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47726,6 +60887,9 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47747,7 +60911,7 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47756,6 +60920,9 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47778,7 +60945,7 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47787,6 +60954,9 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47809,7 +60979,7 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47825,6 +60995,9 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -47852,7 +61025,7 @@ func (m *ServiceAccountList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -47880,7 +61053,7 @@ func (m *ServiceAccountList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47889,6 +61062,9 @@ func (m *ServiceAccountList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47910,7 +61086,7 @@ func (m *ServiceAccountList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -47919,6 +61095,9 @@ func (m *ServiceAccountList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47936,6 +61115,9 @@ func (m *ServiceAccountList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -47963,7 +61145,7 @@ func (m *ServiceAccountTokenProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -47991,7 +61173,7 @@ func (m *ServiceAccountTokenProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48001,6 +61183,9 @@ func (m *ServiceAccountTokenProjection) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48020,7 +61205,7 @@ func (m *ServiceAccountTokenProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -48040,7 +61225,7 @@ func (m *ServiceAccountTokenProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48050,6 +61235,9 @@ func (m *ServiceAccountTokenProjection) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48064,6 +61252,9 @@ func (m *ServiceAccountTokenProjection) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -48091,7 +61282,7 @@ func (m *ServiceList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48119,7 +61310,7 @@ func (m *ServiceList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -48128,6 +61319,9 @@ func (m *ServiceList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48149,7 +61343,7 @@ func (m *ServiceList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -48158,6 +61352,9 @@ func (m *ServiceList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48175,6 +61372,9 @@ func (m *ServiceList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -48202,7 +61402,7 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48230,7 +61430,7 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48240,6 +61440,9 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48259,7 +61462,7 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48269,6 +61472,9 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48288,7 +61494,7 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Port |= (int32(b) & 0x7F) << shift + m.Port |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -48307,7 +61513,7 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -48316,6 +61522,9 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48337,7 +61546,7 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NodePort |= (int32(b) & 0x7F) << shift + m.NodePort |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -48351,6 +61560,9 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -48378,7 +61590,7 @@ func (m *ServiceProxyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48406,7 +61618,7 @@ func (m *ServiceProxyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48416,6 +61628,9 @@ func (m *ServiceProxyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48430,6 +61645,9 @@ func (m *ServiceProxyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -48457,7 +61675,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48485,7 +61703,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -48494,6 +61712,9 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48516,7 +61737,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -48525,6 +61746,9 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48545,7 +61769,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48562,7 +61786,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48572,6 +61796,9 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -48588,7 +61815,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48598,6 +61825,9 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -48634,7 +61864,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48644,6 +61874,9 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48663,7 +61896,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48673,6 +61906,9 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48692,7 +61928,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48702,6 +61938,9 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48721,7 +61960,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48731,6 +61970,9 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48750,7 +61992,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48760,6 +62002,9 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48779,7 +62024,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48789,6 +62034,9 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48808,7 +62056,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48818,6 +62066,9 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48837,7 +62088,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48847,6 +62098,9 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48866,7 +62120,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.HealthCheckNodePort |= (int32(b) & 0x7F) << shift + m.HealthCheckNodePort |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -48885,7 +62139,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -48905,7 +62159,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -48914,6 +62168,9 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48924,6 +62181,39 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IPFamily", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := IPFamily(dAtA[iNdEx:postIndex]) + m.IPFamily = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -48933,6 +62223,9 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -48960,7 +62253,7 @@ func (m *ServiceStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -48988,7 +62281,7 @@ func (m *ServiceStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -48997,6 +62290,9 @@ func (m *ServiceStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49013,6 +62309,9 @@ func (m *ServiceStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -49040,7 +62339,7 @@ func (m *SessionAffinityConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49068,7 +62367,7 @@ func (m *SessionAffinityConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -49077,6 +62376,9 @@ func (m *SessionAffinityConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49096,6 +62398,9 @@ func (m *SessionAffinityConfig) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -49123,7 +62428,7 @@ func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49151,7 +62456,7 @@ func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49161,6 +62466,9 @@ func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49180,7 +62488,7 @@ func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49190,6 +62498,9 @@ func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49209,7 +62520,7 @@ func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49219,6 +62530,9 @@ func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49238,7 +62552,7 @@ func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -49258,7 +62572,7 @@ func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -49267,6 +62581,9 @@ func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49286,6 +62603,9 @@ func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -49313,7 +62633,7 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49341,7 +62661,7 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49351,6 +62671,9 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49370,7 +62693,7 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49380,6 +62703,9 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49399,7 +62725,7 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49409,6 +62735,9 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49428,7 +62757,7 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -49448,7 +62777,7 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -49457,6 +62786,9 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49476,6 +62808,9 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -49503,7 +62838,7 @@ func (m *Sysctl) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49531,7 +62866,7 @@ func (m *Sysctl) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49541,6 +62876,9 @@ func (m *Sysctl) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49560,7 +62898,7 @@ func (m *Sysctl) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49570,6 +62908,9 @@ func (m *Sysctl) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49584,6 +62925,9 @@ func (m *Sysctl) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -49611,7 +62955,7 @@ func (m *TCPSocketAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49639,7 +62983,7 @@ func (m *TCPSocketAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -49648,6 +62992,9 @@ func (m *TCPSocketAction) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49669,7 +63016,7 @@ func (m *TCPSocketAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49679,6 +63026,9 @@ func (m *TCPSocketAction) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49693,6 +63043,9 @@ func (m *TCPSocketAction) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -49720,7 +63073,7 @@ func (m *Taint) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49748,7 +63101,7 @@ func (m *Taint) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49758,6 +63111,9 @@ func (m *Taint) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49777,7 +63133,7 @@ func (m *Taint) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49787,6 +63143,9 @@ func (m *Taint) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49806,7 +63165,7 @@ func (m *Taint) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49816,6 +63175,9 @@ func (m *Taint) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49835,7 +63197,7 @@ func (m *Taint) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -49844,11 +63206,14 @@ func (m *Taint) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.TimeAdded == nil { - m.TimeAdded = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + m.TimeAdded = &v1.Time{} } if err := m.TimeAdded.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -49863,6 +63228,9 @@ func (m *Taint) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -49890,7 +63258,7 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49918,7 +63286,7 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49928,6 +63296,9 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49947,7 +63318,7 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49957,6 +63328,9 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49976,7 +63350,7 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -49986,6 +63360,9 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50005,7 +63382,7 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50015,6 +63392,9 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50034,7 +63414,7 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -50049,6 +63429,9 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -50076,7 +63459,7 @@ func (m *TopologySelectorLabelRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50104,7 +63487,7 @@ func (m *TopologySelectorLabelRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50114,6 +63497,9 @@ func (m *TopologySelectorLabelRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50133,7 +63519,7 @@ func (m *TopologySelectorLabelRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50143,6 +63529,9 @@ func (m *TopologySelectorLabelRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50157,6 +63546,9 @@ func (m *TopologySelectorLabelRequirement) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -50184,7 +63576,7 @@ func (m *TopologySelectorTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50212,7 +63604,7 @@ func (m *TopologySelectorTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -50221,6 +63613,9 @@ func (m *TopologySelectorTerm) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50238,6 +63633,181 @@ func (m *TopologySelectorTerm) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TopologySpreadConstraint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TopologySpreadConstraint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TopologySpreadConstraint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxSkew", wireType) + } + m.MaxSkew = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxSkew |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TopologyKey", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TopologyKey = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WhenUnsatisfiable", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WhenUnsatisfiable = UnsatisfiableConstraintAction(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LabelSelector == nil { + m.LabelSelector = &v1.LabelSelector{} + } + if err := m.LabelSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -50265,7 +63835,7 @@ func (m *TypedLocalObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50293,7 +63863,7 @@ func (m *TypedLocalObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50303,6 +63873,9 @@ func (m *TypedLocalObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50323,7 +63896,7 @@ func (m *TypedLocalObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50333,6 +63906,9 @@ func (m *TypedLocalObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50352,7 +63928,7 @@ func (m *TypedLocalObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50362,6 +63938,9 @@ func (m *TypedLocalObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50376,6 +63955,9 @@ func (m *TypedLocalObjectReference) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -50403,7 +63985,7 @@ func (m *Volume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50431,7 +64013,125 @@ func (m *Volume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeSource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.VolumeSource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeDevice) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeDevice: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeDevice: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50441,115 +64141,9 @@ func (m *Volume) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeSource", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.VolumeSource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VolumeDevice) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VolumeDevice: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VolumeDevice: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } @@ -50569,7 +64163,7 @@ func (m *VolumeDevice) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50579,6 +64173,9 @@ func (m *VolumeDevice) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50593,6 +64190,9 @@ func (m *VolumeDevice) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -50620,7 +64220,7 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50648,7 +64248,7 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50658,6 +64258,9 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50677,7 +64280,7 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -50697,7 +64300,7 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50707,6 +64310,9 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50726,7 +64332,7 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50736,6 +64342,9 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50755,7 +64364,7 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50765,6 +64374,9 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50785,7 +64397,7 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50795,6 +64407,9 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50809,6 +64424,9 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -50836,7 +64454,7 @@ func (m *VolumeNodeAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50864,7 +64482,7 @@ func (m *VolumeNodeAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -50873,6 +64491,9 @@ func (m *VolumeNodeAffinity) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50892,6 +64513,9 @@ func (m *VolumeNodeAffinity) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -50919,7 +64543,7 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -50947,7 +64571,7 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -50956,6 +64580,9 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50980,7 +64607,7 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -50989,6 +64616,9 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51013,7 +64643,7 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51022,6 +64652,9 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51046,7 +64679,7 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51055,6 +64688,9 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51074,6 +64710,9 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -51101,7 +64740,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -51129,7 +64768,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51138,6 +64777,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51162,7 +64804,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51171,6 +64813,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51195,7 +64840,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51204,6 +64849,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51228,7 +64876,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51237,6 +64885,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51261,7 +64912,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51270,6 +64921,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51294,7 +64948,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51303,6 +64957,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51327,7 +64984,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51336,6 +64993,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51360,7 +65020,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51369,6 +65029,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51393,7 +65056,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51402,6 +65065,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51426,7 +65092,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51435,6 +65101,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51459,7 +65128,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51468,6 +65137,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51492,7 +65164,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51501,6 +65173,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51525,7 +65200,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51534,6 +65209,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51558,7 +65236,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51567,6 +65245,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51591,7 +65272,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51600,6 +65281,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51624,7 +65308,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51633,6 +65317,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51657,7 +65344,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51666,6 +65353,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51690,7 +65380,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51699,6 +65389,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51723,7 +65416,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51732,6 +65425,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51756,7 +65452,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51765,6 +65461,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51789,7 +65488,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51798,6 +65497,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51822,7 +65524,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51831,6 +65533,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51855,7 +65560,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51864,6 +65569,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51888,7 +65596,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51897,6 +65605,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51921,7 +65632,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51930,6 +65641,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51954,7 +65668,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51963,6 +65677,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51987,7 +65704,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -51996,6 +65713,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -52020,7 +65740,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -52029,6 +65749,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -52048,6 +65771,9 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -52075,7 +65801,7 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -52103,7 +65829,7 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -52113,6 +65839,9 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -52132,7 +65861,7 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -52142,6 +65871,9 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -52161,7 +65893,7 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -52171,6 +65903,9 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -52190,7 +65925,7 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -52200,6 +65935,9 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -52214,6 +65952,9 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -52241,7 +65982,7 @@ func (m *WeightedPodAffinityTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -52269,7 +66010,7 @@ func (m *WeightedPodAffinityTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Weight |= (int32(b) & 0x7F) << shift + m.Weight |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -52288,7 +66029,7 @@ func (m *WeightedPodAffinityTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -52297,6 +66038,9 @@ func (m *WeightedPodAffinityTerm) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -52313,6 +66057,9 @@ func (m *WeightedPodAffinityTerm) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -52340,7 +66087,7 @@ func (m *WindowsSecurityContextOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -52368,7 +66115,7 @@ func (m *WindowsSecurityContextOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -52378,6 +66125,9 @@ func (m *WindowsSecurityContextOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -52398,7 +66148,7 @@ func (m *WindowsSecurityContextOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -52408,12 +66158,48 @@ func (m *WindowsSecurityContextOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) m.GMSACredentialSpec = &s iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsUserName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.RunAsUserName = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -52423,6 +66209,9 @@ func (m *WindowsSecurityContextOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -52489,10 +66278,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -52521,6 +66313,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -52539,829 +66334,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/core/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 13088 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x7d, 0x70, 0x64, 0x57, - 0x56, 0x18, 0xbe, 0xaf, 0x5b, 0x1f, 0xdd, 0x47, 0xdf, 0x77, 0x3e, 0xac, 0x91, 0x67, 0xa6, 0xc7, - 0xcf, 0xbb, 0xe3, 0xf1, 0xda, 0xd6, 0xac, 0xc7, 0xf6, 0xda, 0xac, 0xbd, 0x06, 0x49, 0x2d, 0xcd, - 0xc8, 0x33, 0xd2, 0xb4, 0x6f, 0x6b, 0x66, 0x76, 0x8d, 0x77, 0xf1, 0x53, 0xbf, 0x2b, 0xe9, 0x59, - 0xad, 0xf7, 0xda, 0xef, 0xbd, 0xd6, 0x8c, 0xfc, 0x83, 0xfa, 0x91, 0x25, 0x10, 0xb6, 0x20, 0xa9, - 0xad, 0x84, 0xca, 0x07, 0x50, 0xa4, 0x8a, 0x90, 0x02, 0x02, 0x49, 0x85, 0x40, 0x80, 0xb0, 0x24, - 0x21, 0x90, 0x54, 0x91, 0xfc, 0xb1, 0x21, 0xa9, 0x4a, 0x2d, 0x55, 0x54, 0x14, 0x10, 0xa9, 0x50, - 0xa4, 0x2a, 0x90, 0x0a, 0xf9, 0x07, 0x85, 0x0a, 0xa9, 0xfb, 0xf9, 0xee, 0x7d, 0xfd, 0x5e, 0x77, - 0x6b, 0xac, 0x91, 0xcd, 0xd6, 0xfe, 0xd7, 0x7d, 0xcf, 0xb9, 0xe7, 0xde, 0x77, 0x3f, 0xcf, 0x39, - 0xf7, 0x7c, 0xc0, 0xab, 0xdb, 0xaf, 0x44, 0xb3, 0x5e, 0x70, 0x75, 0xbb, 0xbd, 0x4e, 0x42, 0x9f, - 0xc4, 0x24, 0xba, 0xba, 0x4b, 0x7c, 0x37, 0x08, 0xaf, 0x0a, 0x80, 0xd3, 0xf2, 0xae, 0x36, 0x82, - 0x90, 0x5c, 0xdd, 0x7d, 0xfe, 0xea, 0x26, 0xf1, 0x49, 0xe8, 0xc4, 0xc4, 0x9d, 0x6d, 0x85, 0x41, - 0x1c, 0x20, 0xc4, 0x71, 0x66, 0x9d, 0x96, 0x37, 0x4b, 0x71, 0x66, 0x77, 0x9f, 0x9f, 0x79, 0x6e, - 0xd3, 0x8b, 0xb7, 0xda, 0xeb, 0xb3, 0x8d, 0x60, 0xe7, 0xea, 0x66, 0xb0, 0x19, 0x5c, 0x65, 0xa8, - 0xeb, 0xed, 0x0d, 0xf6, 0x8f, 0xfd, 0x61, 0xbf, 0x38, 0x89, 0x99, 0x17, 0x93, 0x66, 0x76, 0x9c, - 0xc6, 0x96, 0xe7, 0x93, 0x70, 0xef, 0x6a, 0x6b, 0x7b, 0x93, 0xb5, 0x1b, 0x92, 0x28, 0x68, 0x87, - 0x0d, 0x92, 0x6e, 0xb8, 0x6b, 0xad, 0xe8, 0xea, 0x0e, 0x89, 0x9d, 0x8c, 0xee, 0xce, 0x5c, 0xcd, - 0xab, 0x15, 0xb6, 0xfd, 0xd8, 0xdb, 0xe9, 0x6c, 0xe6, 0xd3, 0xbd, 0x2a, 0x44, 0x8d, 0x2d, 0xb2, - 0xe3, 0x74, 0xd4, 0x7b, 0x21, 0xaf, 0x5e, 0x3b, 0xf6, 0x9a, 0x57, 0x3d, 0x3f, 0x8e, 0xe2, 0x30, - 0x5d, 0xc9, 0xfe, 0xba, 0x05, 0x97, 0xe6, 0xee, 0xd5, 0x17, 0x9b, 0x4e, 0x14, 0x7b, 0x8d, 0xf9, - 0x66, 0xd0, 0xd8, 0xae, 0xc7, 0x41, 0x48, 0xee, 0x06, 0xcd, 0xf6, 0x0e, 0xa9, 0xb3, 0x81, 0x40, - 0xcf, 0x42, 0x69, 0x97, 0xfd, 0x5f, 0xae, 0x4e, 0x5b, 0x97, 0xac, 0x2b, 0xe5, 0xf9, 0xc9, 0xdf, - 0xdc, 0xaf, 0x7c, 0xec, 0x60, 0xbf, 0x52, 0xba, 0x2b, 0xca, 0xb1, 0xc2, 0x40, 0x97, 0x61, 0x68, - 0x23, 0x5a, 0xdb, 0x6b, 0x91, 0xe9, 0x02, 0xc3, 0x1d, 0x17, 0xb8, 0x43, 0x4b, 0x75, 0x5a, 0x8a, - 0x05, 0x14, 0x5d, 0x85, 0x72, 0xcb, 0x09, 0x63, 0x2f, 0xf6, 0x02, 0x7f, 0xba, 0x78, 0xc9, 0xba, - 0x32, 0x38, 0x3f, 0x25, 0x50, 0xcb, 0x35, 0x09, 0xc0, 0x09, 0x0e, 0xed, 0x46, 0x48, 0x1c, 0xf7, - 0xb6, 0xdf, 0xdc, 0x9b, 0x1e, 0xb8, 0x64, 0x5d, 0x29, 0x25, 0xdd, 0xc0, 0xa2, 0x1c, 0x2b, 0x0c, - 0xfb, 0x87, 0x0b, 0x50, 0x9a, 0xdb, 0xd8, 0xf0, 0x7c, 0x2f, 0xde, 0x43, 0x77, 0x61, 0xd4, 0x0f, - 0x5c, 0x22, 0xff, 0xb3, 0xaf, 0x18, 0xb9, 0x76, 0x69, 0xb6, 0x73, 0x29, 0xcd, 0xae, 0x6a, 0x78, - 0xf3, 0x93, 0x07, 0xfb, 0x95, 0x51, 0xbd, 0x04, 0x1b, 0x74, 0x10, 0x86, 0x91, 0x56, 0xe0, 0x2a, - 0xb2, 0x05, 0x46, 0xb6, 0x92, 0x45, 0xb6, 0x96, 0xa0, 0xcd, 0x4f, 0x1c, 0xec, 0x57, 0x46, 0xb4, - 0x02, 0xac, 0x13, 0x41, 0xeb, 0x30, 0x41, 0xff, 0xfa, 0xb1, 0xa7, 0xe8, 0x16, 0x19, 0xdd, 0x27, - 0xf3, 0xe8, 0x6a, 0xa8, 0xf3, 0xa7, 0x0e, 0xf6, 0x2b, 0x13, 0xa9, 0x42, 0x9c, 0x26, 0x68, 0xbf, - 0x0f, 0xe3, 0x73, 0x71, 0xec, 0x34, 0xb6, 0x88, 0xcb, 0x67, 0x10, 0xbd, 0x08, 0x03, 0xbe, 0xb3, - 0x43, 0xc4, 0xfc, 0x5e, 0x12, 0x03, 0x3b, 0xb0, 0xea, 0xec, 0x90, 0xc3, 0xfd, 0xca, 0xe4, 0x1d, - 0xdf, 0x7b, 0xaf, 0x2d, 0x56, 0x05, 0x2d, 0xc3, 0x0c, 0x1b, 0x5d, 0x03, 0x70, 0xc9, 0xae, 0xd7, - 0x20, 0x35, 0x27, 0xde, 0x12, 0xf3, 0x8d, 0x44, 0x5d, 0xa8, 0x2a, 0x08, 0xd6, 0xb0, 0xec, 0x07, - 0x50, 0x9e, 0xdb, 0x0d, 0x3c, 0xb7, 0x16, 0xb8, 0x11, 0xda, 0x86, 0x89, 0x56, 0x48, 0x36, 0x48, - 0xa8, 0x8a, 0xa6, 0xad, 0x4b, 0xc5, 0x2b, 0x23, 0xd7, 0xae, 0x64, 0x7e, 0xac, 0x89, 0xba, 0xe8, - 0xc7, 0xe1, 0xde, 0xfc, 0x63, 0xa2, 0xbd, 0x89, 0x14, 0x14, 0xa7, 0x29, 0xdb, 0xff, 0xba, 0x00, - 0x67, 0xe6, 0xde, 0x6f, 0x87, 0xa4, 0xea, 0x45, 0xdb, 0xe9, 0x15, 0xee, 0x7a, 0xd1, 0xf6, 0x6a, - 0x32, 0x02, 0x6a, 0x69, 0x55, 0x45, 0x39, 0x56, 0x18, 0xe8, 0x39, 0x18, 0xa6, 0xbf, 0xef, 0xe0, - 0x65, 0xf1, 0xc9, 0xa7, 0x04, 0xf2, 0x48, 0xd5, 0x89, 0x9d, 0x2a, 0x07, 0x61, 0x89, 0x83, 0x56, - 0x60, 0xa4, 0xc1, 0x36, 0xe4, 0xe6, 0x4a, 0xe0, 0x12, 0x36, 0x99, 0xe5, 0xf9, 0x67, 0x28, 0xfa, - 0x42, 0x52, 0x7c, 0xb8, 0x5f, 0x99, 0xe6, 0x7d, 0x13, 0x24, 0x34, 0x18, 0xd6, 0xeb, 0x23, 0x5b, - 0xed, 0xaf, 0x01, 0x46, 0x09, 0x32, 0xf6, 0xd6, 0x15, 0x6d, 0xab, 0x0c, 0xb2, 0xad, 0x32, 0x9a, - 0xbd, 0x4d, 0xd0, 0xf3, 0x30, 0xb0, 0xed, 0xf9, 0xee, 0xf4, 0x10, 0xa3, 0x75, 0x81, 0xce, 0xf9, - 0x4d, 0xcf, 0x77, 0x0f, 0xf7, 0x2b, 0x53, 0x46, 0x77, 0x68, 0x21, 0x66, 0xa8, 0xf6, 0x9f, 0x58, - 0x50, 0x61, 0xb0, 0x25, 0xaf, 0x49, 0x6a, 0x24, 0x8c, 0xbc, 0x28, 0x26, 0x7e, 0x6c, 0x0c, 0xe8, - 0x35, 0x80, 0x88, 0x34, 0x42, 0x12, 0x6b, 0x43, 0xaa, 0x16, 0x46, 0x5d, 0x41, 0xb0, 0x86, 0x45, - 0x0f, 0x84, 0x68, 0xcb, 0x09, 0xd9, 0xfa, 0x12, 0x03, 0xab, 0x0e, 0x84, 0xba, 0x04, 0xe0, 0x04, - 0xc7, 0x38, 0x10, 0x8a, 0xbd, 0x0e, 0x04, 0xf4, 0x59, 0x98, 0x48, 0x1a, 0x8b, 0x5a, 0x4e, 0x43, - 0x0e, 0x20, 0xdb, 0x32, 0x75, 0x13, 0x84, 0xd3, 0xb8, 0xf6, 0x3f, 0xb0, 0xc4, 0xe2, 0xa1, 0x5f, - 0xfd, 0x11, 0xff, 0x56, 0xfb, 0x97, 0x2d, 0x18, 0x9e, 0xf7, 0x7c, 0xd7, 0xf3, 0x37, 0xd1, 0x3b, - 0x50, 0xa2, 0x77, 0x93, 0xeb, 0xc4, 0x8e, 0x38, 0xf7, 0x3e, 0xa5, 0xed, 0x2d, 0x75, 0x55, 0xcc, - 0xb6, 0xb6, 0x37, 0x69, 0x41, 0x34, 0x4b, 0xb1, 0xe9, 0x6e, 0xbb, 0xbd, 0xfe, 0x2e, 0x69, 0xc4, - 0x2b, 0x24, 0x76, 0x92, 0xcf, 0x49, 0xca, 0xb0, 0xa2, 0x8a, 0x6e, 0xc2, 0x50, 0xec, 0x84, 0x9b, - 0x24, 0x16, 0x07, 0x60, 0xe6, 0x41, 0xc5, 0x6b, 0x62, 0xba, 0x23, 0x89, 0xdf, 0x20, 0xc9, 0xb5, - 0xb0, 0xc6, 0xaa, 0x62, 0x41, 0xc2, 0xfe, 0xab, 0xc3, 0x70, 0x6e, 0xa1, 0xbe, 0x9c, 0xb3, 0xae, - 0x2e, 0xc3, 0x90, 0x1b, 0x7a, 0xbb, 0x24, 0x14, 0xe3, 0xac, 0xa8, 0x54, 0x59, 0x29, 0x16, 0x50, - 0xf4, 0x0a, 0x8c, 0xf2, 0x0b, 0xe9, 0x86, 0xe3, 0xbb, 0x4d, 0x39, 0xc4, 0xa7, 0x05, 0xf6, 0xe8, - 0x5d, 0x0d, 0x86, 0x0d, 0xcc, 0x23, 0x2e, 0xaa, 0xcb, 0xa9, 0xcd, 0x98, 0x77, 0xd9, 0x7d, 0xd9, - 0x82, 0x49, 0xde, 0xcc, 0x5c, 0x1c, 0x87, 0xde, 0x7a, 0x3b, 0x26, 0xd1, 0xf4, 0x20, 0x3b, 0xe9, - 0x16, 0xb2, 0x46, 0x2b, 0x77, 0x04, 0x66, 0xef, 0xa6, 0xa8, 0xf0, 0x43, 0x70, 0x5a, 0xb4, 0x3b, - 0x99, 0x06, 0xe3, 0x8e, 0x66, 0xd1, 0xf7, 0x58, 0x30, 0xd3, 0x08, 0xfc, 0x38, 0x0c, 0x9a, 0x4d, - 0x12, 0xd6, 0xda, 0xeb, 0x4d, 0x2f, 0xda, 0xe2, 0xeb, 0x14, 0x93, 0x0d, 0x76, 0x12, 0xe4, 0xcc, - 0xa1, 0x42, 0x12, 0x73, 0x78, 0xf1, 0x60, 0xbf, 0x32, 0xb3, 0x90, 0x4b, 0x0a, 0x77, 0x69, 0x06, - 0x6d, 0x03, 0xa2, 0x57, 0x69, 0x3d, 0x76, 0x36, 0x49, 0xd2, 0xf8, 0x70, 0xff, 0x8d, 0x9f, 0x3d, - 0xd8, 0xaf, 0xa0, 0xd5, 0x0e, 0x12, 0x38, 0x83, 0x2c, 0x7a, 0x0f, 0x4e, 0xd3, 0xd2, 0x8e, 0x6f, - 0x2d, 0xf5, 0xdf, 0xdc, 0xf4, 0xc1, 0x7e, 0xe5, 0xf4, 0x6a, 0x06, 0x11, 0x9c, 0x49, 0x1a, 0x7d, - 0xb7, 0x05, 0xe7, 0x92, 0xcf, 0x5f, 0x7c, 0xd0, 0x72, 0x7c, 0x37, 0x69, 0xb8, 0xdc, 0x7f, 0xc3, - 0xf4, 0x4c, 0x3e, 0xb7, 0x90, 0x47, 0x09, 0xe7, 0x37, 0x32, 0xb3, 0x00, 0x67, 0x32, 0x57, 0x0b, - 0x9a, 0x84, 0xe2, 0x36, 0xe1, 0x5c, 0x50, 0x19, 0xd3, 0x9f, 0xe8, 0x34, 0x0c, 0xee, 0x3a, 0xcd, - 0xb6, 0xd8, 0x28, 0x98, 0xff, 0xf9, 0x4c, 0xe1, 0x15, 0xcb, 0xfe, 0x37, 0x45, 0x98, 0x58, 0xa8, - 0x2f, 0x3f, 0xd4, 0x2e, 0xd4, 0xaf, 0xa1, 0x42, 0xd7, 0x6b, 0x28, 0xb9, 0xd4, 0x8a, 0xb9, 0x97, - 0xda, 0xff, 0x9f, 0xb1, 0x85, 0x06, 0xd8, 0x16, 0xfa, 0x96, 0x9c, 0x2d, 0x74, 0xcc, 0x1b, 0x67, - 0x37, 0x67, 0x15, 0x0d, 0xb2, 0xc9, 0xcc, 0xe4, 0x58, 0x6e, 0x05, 0x0d, 0xa7, 0x99, 0x3e, 0xfa, - 0x8e, 0xb8, 0x94, 0x8e, 0x67, 0x1e, 0x1b, 0x30, 0xba, 0xe0, 0xb4, 0x9c, 0x75, 0xaf, 0xe9, 0xc5, - 0x1e, 0x89, 0xd0, 0x53, 0x50, 0x74, 0x5c, 0x97, 0x71, 0x5b, 0xe5, 0xf9, 0x33, 0x07, 0xfb, 0x95, - 0xe2, 0x9c, 0x4b, 0xaf, 0x7d, 0x50, 0x58, 0x7b, 0x98, 0x62, 0xa0, 0x4f, 0xc2, 0x80, 0x1b, 0x06, - 0xad, 0xe9, 0x02, 0xc3, 0xa4, 0xbb, 0x6e, 0xa0, 0x1a, 0x06, 0xad, 0x14, 0x2a, 0xc3, 0xb1, 0x7f, - 0xad, 0x00, 0xe7, 0x17, 0x48, 0x6b, 0x6b, 0xa9, 0x9e, 0x73, 0x7e, 0x5f, 0x81, 0xd2, 0x4e, 0xe0, - 0x7b, 0x71, 0x10, 0x46, 0xa2, 0x69, 0xb6, 0x22, 0x56, 0x44, 0x19, 0x56, 0x50, 0x74, 0x09, 0x06, - 0x5a, 0x09, 0x53, 0x39, 0x2a, 0x19, 0x52, 0xc6, 0x4e, 0x32, 0x08, 0xc5, 0x68, 0x47, 0x24, 0x14, - 0x2b, 0x46, 0x61, 0xdc, 0x89, 0x48, 0x88, 0x19, 0x24, 0xb9, 0x99, 0xe9, 0x9d, 0x2d, 0x4e, 0xe8, - 0xd4, 0xcd, 0x4c, 0x21, 0x58, 0xc3, 0x42, 0x35, 0x28, 0x47, 0xa9, 0x99, 0xed, 0x6b, 0x9b, 0x8e, - 0xb1, 0xab, 0x5b, 0xcd, 0x64, 0x42, 0xc4, 0xb8, 0x51, 0x86, 0x7a, 0x5e, 0xdd, 0x5f, 0x2d, 0x00, - 0xe2, 0x43, 0xf8, 0x17, 0x6c, 0xe0, 0xee, 0x74, 0x0e, 0x5c, 0xff, 0x5b, 0xe2, 0xb8, 0x46, 0xef, - 0x7f, 0x5b, 0x70, 0x7e, 0xc1, 0xf3, 0x5d, 0x12, 0xe6, 0x2c, 0xc0, 0x47, 0x23, 0xcb, 0x1e, 0x8d, - 0x69, 0x30, 0x96, 0xd8, 0xc0, 0x31, 0x2c, 0x31, 0xfb, 0x8f, 0x2d, 0x40, 0xfc, 0xb3, 0x3f, 0x72, - 0x1f, 0x7b, 0xa7, 0xf3, 0x63, 0x8f, 0x61, 0x59, 0xd8, 0xb7, 0x60, 0x7c, 0xa1, 0xe9, 0x11, 0x3f, - 0x5e, 0xae, 0x2d, 0x04, 0xfe, 0x86, 0xb7, 0x89, 0x3e, 0x03, 0xe3, 0xb1, 0xb7, 0x43, 0x82, 0x76, - 0x5c, 0x27, 0x8d, 0xc0, 0x67, 0x92, 0xa4, 0x75, 0x65, 0x70, 0x1e, 0x1d, 0xec, 0x57, 0xc6, 0xd7, - 0x0c, 0x08, 0x4e, 0x61, 0xda, 0xbf, 0x43, 0xc7, 0x2f, 0xd8, 0x69, 0x05, 0x3e, 0xf1, 0xe3, 0x85, - 0xc0, 0x77, 0xb9, 0xc6, 0xe1, 0x33, 0x30, 0x10, 0xd3, 0xf1, 0xe0, 0x63, 0x77, 0x59, 0x6e, 0x14, - 0x3a, 0x0a, 0x87, 0xfb, 0x95, 0xb3, 0x9d, 0x35, 0xd8, 0x38, 0xb1, 0x3a, 0xe8, 0x5b, 0x60, 0x28, - 0x8a, 0x9d, 0xb8, 0x1d, 0x89, 0xd1, 0x7c, 0x42, 0x8e, 0x66, 0x9d, 0x95, 0x1e, 0xee, 0x57, 0x26, - 0x54, 0x35, 0x5e, 0x84, 0x45, 0x05, 0xf4, 0x34, 0x0c, 0xef, 0x90, 0x28, 0x72, 0x36, 0xe5, 0x6d, - 0x38, 0x21, 0xea, 0x0e, 0xaf, 0xf0, 0x62, 0x2c, 0xe1, 0xe8, 0x49, 0x18, 0x24, 0x61, 0x18, 0x84, - 0x62, 0x8f, 0x8e, 0x09, 0xc4, 0xc1, 0x45, 0x5a, 0x88, 0x39, 0xcc, 0xfe, 0xf7, 0x16, 0x4c, 0xa8, - 0xbe, 0xf2, 0xb6, 0x4e, 0x40, 0x2a, 0x78, 0x0b, 0xa0, 0x21, 0x3f, 0x30, 0x62, 0xb7, 0xc7, 0xc8, - 0xb5, 0xcb, 0x99, 0x17, 0x75, 0xc7, 0x30, 0x26, 0x94, 0x55, 0x51, 0x84, 0x35, 0x6a, 0xf6, 0x3f, - 0xb7, 0xe0, 0x54, 0xea, 0x8b, 0x6e, 0x79, 0x51, 0x8c, 0xde, 0xee, 0xf8, 0xaa, 0xd9, 0xfe, 0xbe, - 0x8a, 0xd6, 0x66, 0xdf, 0xa4, 0x96, 0xb2, 0x2c, 0xd1, 0xbe, 0xe8, 0x06, 0x0c, 0x7a, 0x31, 0xd9, - 0x91, 0x1f, 0xf3, 0x64, 0xd7, 0x8f, 0xe1, 0xbd, 0x4a, 0x66, 0x64, 0x99, 0xd6, 0xc4, 0x9c, 0x80, - 0xfd, 0x37, 0x8a, 0x50, 0xe6, 0xcb, 0x76, 0xc5, 0x69, 0x9d, 0xc0, 0x5c, 0x2c, 0xc3, 0x00, 0xa3, - 0xce, 0x3b, 0xfe, 0x54, 0x76, 0xc7, 0x45, 0x77, 0x66, 0xa9, 0xc8, 0xcf, 0x99, 0x23, 0x75, 0x35, - 0xd0, 0x22, 0xcc, 0x48, 0x20, 0x07, 0x60, 0xdd, 0xf3, 0x9d, 0x70, 0x8f, 0x96, 0x4d, 0x17, 0x19, - 0xc1, 0xe7, 0xba, 0x13, 0x9c, 0x57, 0xf8, 0x9c, 0xac, 0xea, 0x6b, 0x02, 0xc0, 0x1a, 0xd1, 0x99, - 0x97, 0xa1, 0xac, 0x90, 0x8f, 0xc2, 0xe3, 0xcc, 0x7c, 0x16, 0x26, 0x52, 0x6d, 0xf5, 0xaa, 0x3e, - 0xaa, 0xb3, 0x48, 0xbf, 0xc2, 0x4e, 0x01, 0xd1, 0xeb, 0x45, 0x7f, 0x57, 0x9c, 0xa2, 0xef, 0xc3, - 0xe9, 0x66, 0xc6, 0xe1, 0x24, 0xa6, 0xaa, 0xff, 0xc3, 0xec, 0xbc, 0xf8, 0xec, 0xd3, 0x59, 0x50, - 0x9c, 0xd9, 0x06, 0xbd, 0xf6, 0x83, 0x16, 0x5d, 0xf3, 0x4e, 0x53, 0xe7, 0xa0, 0x6f, 0x8b, 0x32, - 0xac, 0xa0, 0xf4, 0x08, 0x3b, 0xad, 0x3a, 0x7f, 0x93, 0xec, 0xd5, 0x49, 0x93, 0x34, 0xe2, 0x20, - 0xfc, 0x50, 0xbb, 0x7f, 0x81, 0x8f, 0x3e, 0x3f, 0x01, 0x47, 0x04, 0x81, 0xe2, 0x4d, 0xb2, 0xc7, - 0xa7, 0x42, 0xff, 0xba, 0x62, 0xd7, 0xaf, 0xfb, 0x39, 0x0b, 0xc6, 0xd4, 0xd7, 0x9d, 0xc0, 0x56, - 0x9f, 0x37, 0xb7, 0xfa, 0x85, 0xae, 0x0b, 0x3c, 0x67, 0x93, 0x7f, 0xb5, 0x00, 0xe7, 0x14, 0x0e, - 0x65, 0xf7, 0xf9, 0x1f, 0xb1, 0xaa, 0xae, 0x42, 0xd9, 0x57, 0x8a, 0x28, 0xcb, 0xd4, 0x00, 0x25, - 0x6a, 0xa8, 0x04, 0x87, 0x72, 0x6d, 0x7e, 0xa2, 0x2d, 0x1a, 0xd5, 0x35, 0xb4, 0x42, 0x1b, 0x3b, - 0x0f, 0xc5, 0xb6, 0xe7, 0x8a, 0x3b, 0xe3, 0x53, 0x72, 0xb4, 0xef, 0x2c, 0x57, 0x0f, 0xf7, 0x2b, - 0x4f, 0xe4, 0xbd, 0x0e, 0xd0, 0xcb, 0x2a, 0x9a, 0xbd, 0xb3, 0x5c, 0xc5, 0xb4, 0x32, 0x9a, 0x83, - 0x09, 0xf9, 0x00, 0x72, 0x97, 0x72, 0x50, 0x81, 0x2f, 0xae, 0x16, 0xa5, 0x66, 0xc5, 0x26, 0x18, - 0xa7, 0xf1, 0x51, 0x15, 0x26, 0xb7, 0xdb, 0xeb, 0xa4, 0x49, 0x62, 0xfe, 0xc1, 0x37, 0x09, 0x57, - 0x42, 0x96, 0x13, 0x61, 0xeb, 0x66, 0x0a, 0x8e, 0x3b, 0x6a, 0xd8, 0x7f, 0xce, 0x8e, 0x78, 0x31, - 0x7a, 0xb5, 0x30, 0xa0, 0x0b, 0x8b, 0x52, 0xff, 0x30, 0x97, 0x73, 0x3f, 0xab, 0xe2, 0x26, 0xd9, - 0x5b, 0x0b, 0x28, 0xb3, 0x9d, 0xbd, 0x2a, 0x8c, 0x35, 0x3f, 0xd0, 0x75, 0xcd, 0xff, 0x42, 0x01, - 0xce, 0xa8, 0x11, 0x30, 0xf8, 0xba, 0xbf, 0xe8, 0x63, 0xf0, 0x3c, 0x8c, 0xb8, 0x64, 0xc3, 0x69, - 0x37, 0x63, 0xa5, 0x11, 0x1f, 0xe4, 0xaf, 0x22, 0xd5, 0xa4, 0x18, 0xeb, 0x38, 0x47, 0x18, 0xb6, - 0x9f, 0x18, 0x61, 0x77, 0x6b, 0xec, 0xd0, 0x35, 0xae, 0x76, 0x8d, 0x95, 0xbb, 0x6b, 0x9e, 0x84, - 0x41, 0x6f, 0x87, 0xf2, 0x5a, 0x05, 0x93, 0x85, 0x5a, 0xa6, 0x85, 0x98, 0xc3, 0xd0, 0x27, 0x60, - 0xb8, 0x11, 0xec, 0xec, 0x38, 0xbe, 0xcb, 0xae, 0xbc, 0xf2, 0xfc, 0x08, 0x65, 0xc7, 0x16, 0x78, - 0x11, 0x96, 0x30, 0x74, 0x1e, 0x06, 0x9c, 0x70, 0x93, 0xab, 0x25, 0xca, 0xf3, 0x25, 0xda, 0xd2, - 0x5c, 0xb8, 0x19, 0x61, 0x56, 0x4a, 0xa5, 0xaa, 0xfb, 0x41, 0xb8, 0xed, 0xf9, 0x9b, 0x55, 0x2f, - 0x14, 0x5b, 0x42, 0xdd, 0x85, 0xf7, 0x14, 0x04, 0x6b, 0x58, 0x68, 0x09, 0x06, 0x5b, 0x41, 0x18, - 0x47, 0xd3, 0x43, 0x6c, 0xb8, 0x9f, 0xc8, 0x39, 0x88, 0xf8, 0xd7, 0xd6, 0x82, 0x30, 0x4e, 0x3e, - 0x80, 0xfe, 0x8b, 0x30, 0xaf, 0x8e, 0xbe, 0x05, 0x8a, 0xc4, 0xdf, 0x9d, 0x1e, 0x66, 0x54, 0x66, - 0xb2, 0xa8, 0x2c, 0xfa, 0xbb, 0x77, 0x9d, 0x30, 0x39, 0xa5, 0x17, 0xfd, 0x5d, 0x4c, 0xeb, 0xa0, - 0xcf, 0x43, 0x59, 0x6e, 0xf1, 0x48, 0x68, 0xcc, 0x32, 0x97, 0x98, 0x3c, 0x18, 0x30, 0x79, 0xaf, - 0xed, 0x85, 0x64, 0x87, 0xf8, 0x71, 0x94, 0x9c, 0x69, 0x12, 0x1a, 0xe1, 0x84, 0x1a, 0xfa, 0xbc, - 0x54, 0xd3, 0xae, 0x04, 0x6d, 0x3f, 0x8e, 0xa6, 0xcb, 0xac, 0x7b, 0x99, 0x0f, 0x68, 0x77, 0x13, - 0xbc, 0xb4, 0x1e, 0x97, 0x57, 0xc6, 0x06, 0x29, 0x84, 0x61, 0xac, 0xe9, 0xed, 0x12, 0x9f, 0x44, - 0x51, 0x2d, 0x0c, 0xd6, 0xc9, 0x34, 0xb0, 0x9e, 0x9f, 0xcb, 0x7e, 0x57, 0x0a, 0xd6, 0xc9, 0xfc, - 0xd4, 0xc1, 0x7e, 0x65, 0xec, 0x96, 0x5e, 0x07, 0x9b, 0x24, 0xd0, 0x1d, 0x18, 0xa7, 0x72, 0x8d, - 0x97, 0x10, 0x1d, 0xe9, 0x45, 0x94, 0x49, 0x1f, 0xd8, 0xa8, 0x84, 0x53, 0x44, 0xd0, 0x1b, 0x50, - 0x6e, 0x7a, 0x1b, 0xa4, 0xb1, 0xd7, 0x68, 0x92, 0xe9, 0x51, 0x46, 0x31, 0x73, 0x5b, 0xdd, 0x92, - 0x48, 0x5c, 0x2e, 0x52, 0x7f, 0x71, 0x52, 0x1d, 0xdd, 0x85, 0xb3, 0x31, 0x09, 0x77, 0x3c, 0xdf, - 0xa1, 0xdb, 0x41, 0xc8, 0x0b, 0xec, 0x75, 0x6e, 0x8c, 0xad, 0xb7, 0x8b, 0x62, 0xe8, 0xce, 0xae, - 0x65, 0x62, 0xe1, 0x9c, 0xda, 0xe8, 0x36, 0x4c, 0xb0, 0x9d, 0x50, 0x6b, 0x37, 0x9b, 0xb5, 0xa0, - 0xe9, 0x35, 0xf6, 0xa6, 0xc7, 0x19, 0xc1, 0x4f, 0xc8, 0x7b, 0x61, 0xd9, 0x04, 0x1f, 0xee, 0x57, - 0x20, 0xf9, 0x87, 0xd3, 0xb5, 0xd1, 0x3a, 0x7b, 0x8e, 0x69, 0x87, 0x5e, 0xbc, 0x47, 0xd7, 0x2f, - 0x79, 0x10, 0x4f, 0x4f, 0x74, 0x15, 0x85, 0x75, 0x54, 0xf5, 0x66, 0xa3, 0x17, 0xe2, 0x34, 0x41, - 0xba, 0xb5, 0xa3, 0xd8, 0xf5, 0xfc, 0xe9, 0x49, 0x76, 0x62, 0xa8, 0x9d, 0x51, 0xa7, 0x85, 0x98, - 0xc3, 0xd8, 0x53, 0x0c, 0xfd, 0x71, 0x9b, 0x9e, 0xa0, 0x53, 0x0c, 0x31, 0x79, 0x8a, 0x91, 0x00, - 0x9c, 0xe0, 0x50, 0xa6, 0x26, 0x8e, 0xf7, 0xa6, 0x11, 0x43, 0x55, 0xdb, 0x65, 0x6d, 0xed, 0xf3, - 0x98, 0x96, 0xa3, 0x5b, 0x30, 0x4c, 0xfc, 0xdd, 0xa5, 0x30, 0xd8, 0x99, 0x3e, 0x95, 0xbf, 0x67, - 0x17, 0x39, 0x0a, 0x3f, 0xd0, 0x13, 0x01, 0x4f, 0x14, 0x63, 0x49, 0x02, 0x3d, 0x80, 0xe9, 0x8c, - 0x19, 0xe1, 0x13, 0x70, 0x9a, 0x4d, 0xc0, 0x6b, 0xa2, 0xee, 0xf4, 0x5a, 0x0e, 0xde, 0x61, 0x17, - 0x18, 0xce, 0xa5, 0x8e, 0xbe, 0x00, 0x63, 0x7c, 0x43, 0xf1, 0x77, 0xdc, 0x68, 0xfa, 0x0c, 0xfb, - 0x9a, 0x4b, 0xf9, 0x9b, 0x93, 0x23, 0xce, 0x9f, 0x11, 0x1d, 0x1a, 0xd3, 0x4b, 0x23, 0x6c, 0x52, - 0xb3, 0xd7, 0x61, 0x5c, 0x9d, 0x5b, 0x6c, 0xe9, 0xa0, 0x0a, 0x0c, 0x32, 0x6e, 0x47, 0xe8, 0xb7, - 0xca, 0x74, 0xa6, 0x18, 0x27, 0x84, 0x79, 0x39, 0x9b, 0x29, 0xef, 0x7d, 0x32, 0xbf, 0x17, 0x13, - 0x2e, 0x55, 0x17, 0xb5, 0x99, 0x92, 0x00, 0x9c, 0xe0, 0xd8, 0xff, 0x97, 0x73, 0x8d, 0xc9, 0xe1, - 0xd8, 0xc7, 0x75, 0xf0, 0x2c, 0x94, 0xb6, 0x82, 0x28, 0xa6, 0xd8, 0xac, 0x8d, 0xc1, 0x84, 0x4f, - 0xbc, 0x21, 0xca, 0xb1, 0xc2, 0x40, 0xaf, 0xc2, 0x58, 0x43, 0x6f, 0x40, 0xdc, 0x65, 0x6a, 0x08, - 0x8c, 0xd6, 0xb1, 0x89, 0x8b, 0x5e, 0x81, 0x12, 0xb3, 0xc2, 0x68, 0x04, 0x4d, 0xc1, 0x64, 0xc9, - 0x0b, 0xb9, 0x54, 0x13, 0xe5, 0x87, 0xda, 0x6f, 0xac, 0xb0, 0xd1, 0x65, 0x18, 0xa2, 0x5d, 0x58, - 0xae, 0x89, 0x5b, 0x44, 0xa9, 0x6a, 0x6e, 0xb0, 0x52, 0x2c, 0xa0, 0xf6, 0x5f, 0x2f, 0x68, 0xa3, - 0x4c, 0x25, 0x52, 0x82, 0x6a, 0x30, 0x7c, 0xdf, 0xf1, 0x62, 0xcf, 0xdf, 0x14, 0xec, 0xc2, 0xd3, - 0x5d, 0xaf, 0x14, 0x56, 0xe9, 0x1e, 0xaf, 0xc0, 0x2f, 0x3d, 0xf1, 0x07, 0x4b, 0x32, 0x94, 0x62, - 0xd8, 0xf6, 0x7d, 0x4a, 0xb1, 0xd0, 0x2f, 0x45, 0xcc, 0x2b, 0x70, 0x8a, 0xe2, 0x0f, 0x96, 0x64, - 0xd0, 0xdb, 0x00, 0x72, 0x59, 0x12, 0x57, 0x58, 0x3f, 0x3c, 0xdb, 0x9b, 0xe8, 0x9a, 0xaa, 0x33, - 0x3f, 0x4e, 0xaf, 0xd4, 0xe4, 0x3f, 0xd6, 0xe8, 0xd9, 0x31, 0x63, 0xab, 0x3a, 0x3b, 0x83, 0xbe, - 0x9d, 0x9e, 0x04, 0x4e, 0x18, 0x13, 0x77, 0x2e, 0x16, 0x83, 0xf3, 0xc9, 0xfe, 0x64, 0x8a, 0x35, - 0x6f, 0x87, 0xe8, 0xa7, 0x86, 0x20, 0x82, 0x13, 0x7a, 0xf6, 0x2f, 0x15, 0x61, 0x3a, 0xaf, 0xbb, - 0x74, 0xd1, 0x91, 0x07, 0x5e, 0xbc, 0x40, 0xb9, 0x21, 0xcb, 0x5c, 0x74, 0x8b, 0xa2, 0x1c, 0x2b, - 0x0c, 0x3a, 0xfb, 0x91, 0xb7, 0x29, 0x45, 0xc2, 0xc1, 0x64, 0xf6, 0xeb, 0xac, 0x14, 0x0b, 0x28, - 0xc5, 0x0b, 0x89, 0x13, 0x09, 0xf3, 0x1a, 0x6d, 0x95, 0x60, 0x56, 0x8a, 0x05, 0x54, 0xd7, 0x37, - 0x0d, 0xf4, 0xd0, 0x37, 0x19, 0x43, 0x34, 0x78, 0xbc, 0x43, 0x84, 0xbe, 0x08, 0xb0, 0xe1, 0xf9, - 0x5e, 0xb4, 0xc5, 0xa8, 0x0f, 0x1d, 0x99, 0xba, 0xe2, 0xa5, 0x96, 0x14, 0x15, 0xac, 0x51, 0x44, - 0x2f, 0xc1, 0x88, 0xda, 0x80, 0xcb, 0x55, 0xf6, 0xd6, 0xa8, 0xd9, 0x6e, 0x24, 0xa7, 0x51, 0x15, - 0xeb, 0x78, 0xf6, 0xbb, 0xe9, 0xf5, 0x22, 0x76, 0x80, 0x36, 0xbe, 0x56, 0xbf, 0xe3, 0x5b, 0xe8, - 0x3e, 0xbe, 0xf6, 0xaf, 0x17, 0x61, 0xc2, 0x68, 0xac, 0x1d, 0xf5, 0x71, 0x66, 0x5d, 0xa7, 0xf7, - 0x9c, 0x13, 0x13, 0xb1, 0xff, 0xec, 0xde, 0x5b, 0x45, 0xbf, 0x0b, 0xe9, 0x0e, 0xe0, 0xf5, 0xd1, - 0x17, 0xa1, 0xdc, 0x74, 0x22, 0xa6, 0xbb, 0x22, 0x62, 0xdf, 0xf5, 0x43, 0x2c, 0x91, 0x23, 0x9c, - 0x28, 0xd6, 0xae, 0x1a, 0x4e, 0x3b, 0x21, 0x49, 0x2f, 0x64, 0xca, 0xfb, 0x48, 0xfb, 0x2d, 0xd5, - 0x09, 0xca, 0x20, 0xed, 0x61, 0x0e, 0x43, 0xaf, 0xc0, 0x68, 0x48, 0xd8, 0xaa, 0x58, 0xa0, 0xac, - 0x1c, 0x5b, 0x66, 0x83, 0x09, 0xcf, 0x87, 0x35, 0x18, 0x36, 0x30, 0x13, 0x56, 0x7e, 0xa8, 0x0b, - 0x2b, 0xff, 0x34, 0x0c, 0xb3, 0x1f, 0x6a, 0x05, 0xa8, 0xd9, 0x58, 0xe6, 0xc5, 0x58, 0xc2, 0xd3, - 0x0b, 0xa6, 0xd4, 0xe7, 0x82, 0xf9, 0x24, 0x8c, 0x57, 0x1d, 0xb2, 0x13, 0xf8, 0x8b, 0xbe, 0xdb, - 0x0a, 0x3c, 0x3f, 0x46, 0xd3, 0x30, 0xc0, 0x6e, 0x07, 0xbe, 0xb7, 0x07, 0x28, 0x05, 0x3c, 0x40, - 0x19, 0x73, 0x7b, 0x13, 0xce, 0x54, 0x83, 0xfb, 0xfe, 0x7d, 0x27, 0x74, 0xe7, 0x6a, 0xcb, 0x9a, - 0x9c, 0xbb, 0x2a, 0xe5, 0x2c, 0x6e, 0x0f, 0x95, 0x79, 0xa6, 0x6a, 0x35, 0xf9, 0x5d, 0xbb, 0xe4, - 0x35, 0x49, 0x8e, 0x36, 0xe2, 0x6f, 0x15, 0x8c, 0x96, 0x12, 0x7c, 0xf5, 0x60, 0x64, 0xe5, 0x3e, - 0x18, 0xbd, 0x09, 0xa5, 0x0d, 0x8f, 0x34, 0x5d, 0x4c, 0x36, 0xc4, 0x12, 0x7b, 0x2a, 0xdf, 0xc4, - 0x63, 0x89, 0x62, 0x4a, 0xed, 0x13, 0x97, 0xd2, 0x96, 0x44, 0x65, 0xac, 0xc8, 0xa0, 0x6d, 0x98, - 0x94, 0x62, 0x80, 0x84, 0x8a, 0x05, 0xf7, 0x74, 0x37, 0xd9, 0xc2, 0x24, 0x7e, 0xfa, 0x60, 0xbf, - 0x32, 0x89, 0x53, 0x64, 0x70, 0x07, 0x61, 0x2a, 0x96, 0xed, 0xd0, 0xa3, 0x75, 0x80, 0x0d, 0x3f, - 0x13, 0xcb, 0x98, 0x84, 0xc9, 0x4a, 0xed, 0x1f, 0xb5, 0xe0, 0xb1, 0x8e, 0x91, 0x11, 0x92, 0xf6, - 0x31, 0xcf, 0x42, 0x5a, 0xf2, 0x2d, 0xf4, 0x96, 0x7c, 0xed, 0x9f, 0xb5, 0xe0, 0xf4, 0xe2, 0x4e, - 0x2b, 0xde, 0xab, 0x7a, 0xe6, 0xeb, 0xce, 0xcb, 0x30, 0xb4, 0x43, 0x5c, 0xaf, 0xbd, 0x23, 0x66, - 0xae, 0x22, 0x8f, 0x9f, 0x15, 0x56, 0x7a, 0xb8, 0x5f, 0x19, 0xab, 0xc7, 0x41, 0xe8, 0x6c, 0x12, - 0x5e, 0x80, 0x05, 0x3a, 0x3b, 0xc4, 0xbd, 0xf7, 0xc9, 0x2d, 0x6f, 0xc7, 0x93, 0x26, 0x3b, 0x5d, - 0x75, 0x67, 0xb3, 0x72, 0x40, 0x67, 0xdf, 0x6c, 0x3b, 0x7e, 0xec, 0xc5, 0x7b, 0xe2, 0x61, 0x46, - 0x12, 0xc1, 0x09, 0x3d, 0xfb, 0xeb, 0x16, 0x4c, 0xc8, 0x75, 0x3f, 0xe7, 0xba, 0x21, 0x89, 0x22, - 0x34, 0x03, 0x05, 0xaf, 0x25, 0x7a, 0x09, 0xa2, 0x97, 0x85, 0xe5, 0x1a, 0x2e, 0x78, 0x2d, 0x54, - 0x83, 0x32, 0xb7, 0xfc, 0x49, 0x16, 0x57, 0x5f, 0xf6, 0x43, 0xac, 0x07, 0x6b, 0xb2, 0x26, 0x4e, - 0x88, 0x48, 0x0e, 0x8e, 0x9d, 0x99, 0x45, 0xf3, 0xd5, 0xeb, 0x86, 0x28, 0xc7, 0x0a, 0x03, 0x5d, - 0x81, 0x92, 0x1f, 0xb8, 0xdc, 0x10, 0x8b, 0xdf, 0x7e, 0x6c, 0xc9, 0xae, 0x8a, 0x32, 0xac, 0xa0, - 0xf6, 0x0f, 0x5a, 0x30, 0x2a, 0xbf, 0xac, 0x4f, 0x66, 0x92, 0x6e, 0xad, 0x84, 0x91, 0x4c, 0xb6, - 0x16, 0x65, 0x06, 0x19, 0xc4, 0xe0, 0x01, 0x8b, 0x47, 0xe1, 0x01, 0xed, 0x1f, 0x29, 0xc0, 0xb8, - 0xec, 0x4e, 0xbd, 0xbd, 0x1e, 0x91, 0x18, 0xad, 0x41, 0xd9, 0xe1, 0x43, 0x4e, 0xe4, 0x8a, 0x7d, - 0x32, 0x5b, 0xf8, 0x30, 0xe6, 0x27, 0xb9, 0x96, 0xe7, 0x64, 0x6d, 0x9c, 0x10, 0x42, 0x4d, 0x98, - 0xf2, 0x83, 0x98, 0x1d, 0xd1, 0x0a, 0xde, 0xed, 0x09, 0x24, 0x4d, 0xfd, 0x9c, 0xa0, 0x3e, 0xb5, - 0x9a, 0xa6, 0x82, 0x3b, 0x09, 0xa3, 0x45, 0xa9, 0xf0, 0x28, 0xe6, 0x8b, 0x1b, 0xfa, 0x2c, 0x64, - 0xeb, 0x3b, 0xec, 0x5f, 0xb5, 0xa0, 0x2c, 0xd1, 0x4e, 0xe2, 0xb5, 0x6b, 0x05, 0x86, 0x23, 0x36, - 0x09, 0x72, 0x68, 0xec, 0x6e, 0x1d, 0xe7, 0xf3, 0x95, 0xdc, 0x3c, 0xfc, 0x7f, 0x84, 0x25, 0x0d, - 0xa6, 0xef, 0x56, 0xdd, 0xff, 0x88, 0xe8, 0xbb, 0x55, 0x7f, 0x72, 0x6e, 0x98, 0x3f, 0x60, 0x7d, - 0xd6, 0xc4, 0x5a, 0xca, 0x20, 0xb5, 0x42, 0xb2, 0xe1, 0x3d, 0x48, 0x33, 0x48, 0x35, 0x56, 0x8a, - 0x05, 0x14, 0xbd, 0x0d, 0xa3, 0x0d, 0xa9, 0xe8, 0x4c, 0x8e, 0x81, 0xcb, 0x5d, 0x95, 0xee, 0xea, - 0x7d, 0x86, 0x1b, 0x69, 0x2f, 0x68, 0xf5, 0xb1, 0x41, 0xcd, 0x7c, 0x6e, 0x2f, 0xf6, 0x7a, 0x6e, - 0x4f, 0xe8, 0xe6, 0x3f, 0x3e, 0xff, 0x98, 0x05, 0x43, 0x5c, 0x5d, 0xd6, 0x9f, 0x7e, 0x51, 0x7b, - 0xae, 0x4a, 0xc6, 0xee, 0x2e, 0x2d, 0x14, 0xcf, 0x4f, 0x68, 0x05, 0xca, 0xec, 0x07, 0x53, 0x1b, - 0x14, 0xf3, 0xad, 0xd3, 0x79, 0xab, 0x7a, 0x07, 0xef, 0xca, 0x6a, 0x38, 0xa1, 0x60, 0xff, 0x50, - 0x91, 0x1e, 0x55, 0x09, 0xaa, 0x71, 0x83, 0x5b, 0x8f, 0xee, 0x06, 0x2f, 0x3c, 0xaa, 0x1b, 0x7c, - 0x13, 0x26, 0x1a, 0xda, 0xe3, 0x56, 0x32, 0x93, 0x57, 0xba, 0x2e, 0x12, 0xed, 0x1d, 0x8c, 0xab, - 0x8c, 0x16, 0x4c, 0x22, 0x38, 0x4d, 0x15, 0x7d, 0x3b, 0x8c, 0xf2, 0x79, 0x16, 0xad, 0x70, 0x8b, - 0x85, 0x4f, 0xe4, 0xaf, 0x17, 0xbd, 0x09, 0xb6, 0x12, 0xeb, 0x5a, 0x75, 0x6c, 0x10, 0xb3, 0x7f, - 0xa9, 0x04, 0x83, 0x8b, 0xbb, 0xc4, 0x8f, 0x4f, 0xe0, 0x40, 0x6a, 0xc0, 0xb8, 0xe7, 0xef, 0x06, - 0xcd, 0x5d, 0xe2, 0x72, 0xf8, 0x51, 0x2e, 0xd7, 0xb3, 0x82, 0xf4, 0xf8, 0xb2, 0x41, 0x02, 0xa7, - 0x48, 0x3e, 0x0a, 0x09, 0xf3, 0x3a, 0x0c, 0xf1, 0xb9, 0x17, 0xe2, 0x65, 0xa6, 0x32, 0x98, 0x0d, - 0xa2, 0xd8, 0x05, 0x89, 0xf4, 0xcb, 0xb5, 0xcf, 0xa2, 0x3a, 0x7a, 0x17, 0xc6, 0x37, 0xbc, 0x30, - 0x8a, 0xa9, 0x68, 0x18, 0xc5, 0xce, 0x4e, 0xeb, 0x21, 0x24, 0x4a, 0x35, 0x0e, 0x4b, 0x06, 0x25, - 0x9c, 0xa2, 0x8c, 0x36, 0x61, 0x8c, 0x0a, 0x39, 0x49, 0x53, 0xc3, 0x47, 0x6e, 0x4a, 0xa9, 0x8c, - 0x6e, 0xe9, 0x84, 0xb0, 0x49, 0x97, 0x1e, 0x26, 0x0d, 0x26, 0x14, 0x95, 0x18, 0x47, 0xa1, 0x0e, - 0x13, 0x2e, 0x0d, 0x71, 0x18, 0x3d, 0x93, 0x98, 0xd9, 0x4a, 0xd9, 0x3c, 0x93, 0x34, 0xe3, 0x94, - 0x77, 0xa0, 0x4c, 0xe8, 0x10, 0x52, 0xc2, 0x42, 0x31, 0x7e, 0xb5, 0xbf, 0xbe, 0xae, 0x78, 0x8d, - 0x30, 0x30, 0x65, 0xf9, 0x45, 0x49, 0x09, 0x27, 0x44, 0xd1, 0x02, 0x0c, 0x45, 0x24, 0xf4, 0x48, - 0x24, 0x54, 0xe4, 0x5d, 0xa6, 0x91, 0xa1, 0x71, 0x8b, 0x4f, 0xfe, 0x1b, 0x8b, 0xaa, 0x74, 0x79, - 0x39, 0x4c, 0x1a, 0x62, 0x5a, 0x71, 0x6d, 0x79, 0xcd, 0xb1, 0x52, 0x2c, 0xa0, 0xe8, 0x0d, 0x18, - 0x0e, 0x49, 0x93, 0x29, 0x8b, 0xc6, 0xfa, 0x5f, 0xe4, 0x5c, 0xf7, 0xc4, 0xeb, 0x61, 0x49, 0x00, - 0xdd, 0x04, 0x14, 0x12, 0xca, 0x43, 0x78, 0xfe, 0xa6, 0x32, 0xe6, 0x10, 0xba, 0xee, 0xc7, 0x45, - 0xfb, 0xa7, 0x70, 0x82, 0x21, 0x8d, 0x6f, 0x71, 0x46, 0x35, 0x74, 0x1d, 0xa6, 0x54, 0xe9, 0xb2, - 0x1f, 0xc5, 0x8e, 0xdf, 0x20, 0x4c, 0xcd, 0x5d, 0x4e, 0xb8, 0x22, 0x9c, 0x46, 0xc0, 0x9d, 0x75, - 0xec, 0x9f, 0xa6, 0xec, 0x0c, 0x1d, 0xad, 0x13, 0xe0, 0x05, 0x5e, 0x37, 0x79, 0x81, 0x73, 0xb9, - 0x33, 0x97, 0xc3, 0x07, 0x1c, 0x58, 0x30, 0xa2, 0xcd, 0x6c, 0xb2, 0x66, 0xad, 0x2e, 0x6b, 0xb6, - 0x0d, 0x93, 0x74, 0xa5, 0xdf, 0x5e, 0x8f, 0x48, 0xb8, 0x4b, 0x5c, 0xb6, 0x30, 0x0b, 0x0f, 0xb7, - 0x30, 0xd5, 0x2b, 0xf3, 0xad, 0x14, 0x41, 0xdc, 0xd1, 0x04, 0x7a, 0x59, 0x6a, 0x4e, 0x8a, 0x86, - 0x91, 0x16, 0xd7, 0x8a, 0x1c, 0xee, 0x57, 0x26, 0xb5, 0x0f, 0xd1, 0x35, 0x25, 0xf6, 0x3b, 0xf2, - 0x1b, 0xd5, 0x6b, 0x7e, 0x43, 0x2d, 0x96, 0xd4, 0x6b, 0xbe, 0x5a, 0x0e, 0x38, 0xc1, 0xa1, 0x7b, - 0x94, 0x8a, 0x20, 0xe9, 0xd7, 0x7c, 0x2a, 0xa0, 0x60, 0x06, 0xb1, 0x5f, 0x00, 0x58, 0x7c, 0x40, - 0x1a, 0x7c, 0xa9, 0xeb, 0x0f, 0x90, 0x56, 0xfe, 0x03, 0xa4, 0xfd, 0x1f, 0x2d, 0x18, 0x5f, 0x5a, - 0x30, 0xc4, 0xc4, 0x59, 0x00, 0x2e, 0x1b, 0xdd, 0xbb, 0xb7, 0x2a, 0x75, 0xeb, 0x5c, 0x3d, 0xaa, - 0x4a, 0xb1, 0x86, 0x81, 0xce, 0x41, 0xb1, 0xd9, 0xf6, 0x85, 0xc8, 0x32, 0x7c, 0xb0, 0x5f, 0x29, - 0xde, 0x6a, 0xfb, 0x98, 0x96, 0x69, 0x16, 0x82, 0xc5, 0xbe, 0x2d, 0x04, 0x7b, 0x7a, 0xea, 0xa1, - 0x0a, 0x0c, 0xde, 0xbf, 0xef, 0xb9, 0xdc, 0x1f, 0x42, 0xe8, 0xfd, 0xef, 0xdd, 0x5b, 0xae, 0x46, - 0x98, 0x97, 0xdb, 0x5f, 0x29, 0xc2, 0xcc, 0x52, 0x93, 0x3c, 0xf8, 0x80, 0x3e, 0x21, 0xfd, 0xda, - 0x37, 0x1e, 0x8d, 0x5f, 0x3c, 0xaa, 0x0d, 0x6b, 0xef, 0xf1, 0xd8, 0x80, 0x61, 0xfe, 0x98, 0x2d, - 0x3d, 0x44, 0x5e, 0xcd, 0x6a, 0x3d, 0x7f, 0x40, 0x66, 0xf9, 0xa3, 0xb8, 0x30, 0x70, 0x57, 0x37, - 0xad, 0x28, 0xc5, 0x92, 0xf8, 0xcc, 0x67, 0x60, 0x54, 0xc7, 0x3c, 0x92, 0x35, 0xf9, 0x5f, 0x2a, - 0xc2, 0x24, 0xed, 0xc1, 0x23, 0x9d, 0x88, 0x3b, 0x9d, 0x13, 0x71, 0xdc, 0x16, 0xc5, 0xbd, 0x67, - 0xe3, 0xed, 0xf4, 0x6c, 0x3c, 0x9f, 0x37, 0x1b, 0x27, 0x3d, 0x07, 0xdf, 0x63, 0xc1, 0xa9, 0xa5, - 0x66, 0xd0, 0xd8, 0x4e, 0x59, 0xfd, 0xbe, 0x04, 0x23, 0xf4, 0x1c, 0x8f, 0x0c, 0x87, 0x34, 0xc3, - 0x45, 0x51, 0x80, 0xb0, 0x8e, 0xa7, 0x55, 0xbb, 0x73, 0x67, 0xb9, 0x9a, 0xe5, 0xd9, 0x28, 0x40, - 0x58, 0xc7, 0xb3, 0xbf, 0x66, 0xc1, 0x85, 0xeb, 0x0b, 0x8b, 0xc9, 0x52, 0xec, 0x70, 0xae, 0xa4, - 0x52, 0xa0, 0xab, 0x75, 0x25, 0x91, 0x02, 0xab, 0xac, 0x17, 0x02, 0xfa, 0x51, 0x71, 0x1c, 0xfe, - 0x29, 0x0b, 0x4e, 0x5d, 0xf7, 0x62, 0x7a, 0x2d, 0xa7, 0xdd, 0xfc, 0xe8, 0xbd, 0x1c, 0x79, 0x71, - 0x10, 0xee, 0xa5, 0xdd, 0xfc, 0xb0, 0x82, 0x60, 0x0d, 0x8b, 0xb7, 0xbc, 0xeb, 0x31, 0x33, 0xaa, - 0x82, 0xa9, 0x8a, 0xc2, 0xa2, 0x1c, 0x2b, 0x0c, 0xfa, 0x61, 0xae, 0x17, 0x32, 0x51, 0x62, 0x4f, - 0x9c, 0xb0, 0xea, 0xc3, 0xaa, 0x12, 0x80, 0x13, 0x1c, 0xfb, 0x8f, 0x2c, 0xa8, 0x5c, 0x6f, 0xb6, - 0xa3, 0x98, 0x84, 0x1b, 0x51, 0xce, 0xe9, 0xf8, 0x02, 0x94, 0x89, 0x14, 0xdc, 0x45, 0xaf, 0x15, - 0xab, 0xa9, 0x24, 0x7a, 0xee, 0x6d, 0xa8, 0xf0, 0xfa, 0xf0, 0x21, 0x38, 0x9a, 0x11, 0xf8, 0x12, - 0x20, 0xa2, 0xb7, 0xa5, 0xbb, 0x5f, 0x32, 0x3f, 0xae, 0xc5, 0x0e, 0x28, 0xce, 0xa8, 0x61, 0xff, - 0xa8, 0x05, 0x67, 0xd4, 0x07, 0x7f, 0xe4, 0x3e, 0xd3, 0xfe, 0xf9, 0x02, 0x8c, 0xdd, 0x58, 0x5b, - 0xab, 0x5d, 0x27, 0xb1, 0xb8, 0xb6, 0x7b, 0xeb, 0xd6, 0xb1, 0xa6, 0x22, 0xec, 0x26, 0x05, 0xb6, - 0x63, 0xaf, 0x39, 0xcb, 0xbd, 0xf8, 0x67, 0x97, 0xfd, 0xf8, 0x76, 0x58, 0x8f, 0x43, 0xcf, 0xdf, - 0xcc, 0x54, 0x2a, 0x4a, 0xe6, 0xa2, 0x98, 0xc7, 0x5c, 0xa0, 0x17, 0x60, 0x88, 0x85, 0x11, 0x90, - 0x93, 0xf0, 0xb8, 0x12, 0xa2, 0x58, 0xe9, 0xe1, 0x7e, 0xa5, 0x7c, 0x07, 0x2f, 0xf3, 0x3f, 0x58, - 0xa0, 0xa2, 0x3b, 0x30, 0xb2, 0x15, 0xc7, 0xad, 0x1b, 0xc4, 0x71, 0x49, 0x28, 0x8f, 0xc3, 0x8b, - 0x59, 0xc7, 0x21, 0x1d, 0x04, 0x8e, 0x96, 0x9c, 0x20, 0x49, 0x59, 0x84, 0x75, 0x3a, 0x76, 0x1d, - 0x20, 0x81, 0x1d, 0x93, 0x42, 0xc5, 0xfe, 0x7d, 0x0b, 0x86, 0xb9, 0x47, 0x67, 0x88, 0x5e, 0x83, - 0x01, 0xf2, 0x80, 0x34, 0x04, 0xab, 0x9c, 0xd9, 0xe1, 0x84, 0xd3, 0xe2, 0xcf, 0x03, 0xf4, 0x3f, - 0x66, 0xb5, 0xd0, 0x0d, 0x18, 0xa6, 0xbd, 0xbd, 0xae, 0xdc, 0x5b, 0x9f, 0xc8, 0xfb, 0x62, 0x35, - 0xed, 0x9c, 0x39, 0x13, 0x45, 0x58, 0x56, 0x67, 0xaa, 0xee, 0x46, 0xab, 0x4e, 0x4f, 0xec, 0xb8, - 0x1b, 0x63, 0xb1, 0xb6, 0x50, 0xe3, 0x48, 0x82, 0x1a, 0x57, 0x75, 0xcb, 0x42, 0x9c, 0x10, 0xb1, - 0xd7, 0xa0, 0x4c, 0x27, 0x75, 0xae, 0xe9, 0x39, 0xdd, 0xb5, 0xec, 0xcf, 0x40, 0x59, 0x6a, 0xbc, - 0x23, 0xe1, 0xc9, 0xc5, 0xa8, 0x4a, 0x85, 0x78, 0x84, 0x13, 0xb8, 0xbd, 0x01, 0xa7, 0x99, 0xa9, - 0x83, 0x13, 0x6f, 0x19, 0x7b, 0xac, 0xf7, 0x62, 0x7e, 0x56, 0x48, 0x9e, 0x7c, 0x66, 0xa6, 0x35, - 0x67, 0x89, 0x51, 0x49, 0x31, 0x91, 0x42, 0xed, 0x3f, 0x1c, 0x80, 0xc7, 0x97, 0xeb, 0xf9, 0xce, - 0xbe, 0xaf, 0xc0, 0x28, 0xe7, 0x4b, 0xe9, 0xd2, 0x76, 0x9a, 0xa2, 0x5d, 0xf5, 0x10, 0xb8, 0xa6, - 0xc1, 0xb0, 0x81, 0x89, 0x2e, 0x40, 0xd1, 0x7b, 0xcf, 0x4f, 0xdb, 0x1d, 0x2f, 0xbf, 0xb9, 0x8a, - 0x69, 0x39, 0x05, 0x53, 0x16, 0x97, 0xdf, 0x1d, 0x0a, 0xac, 0xd8, 0xdc, 0xd7, 0x61, 0xdc, 0x8b, - 0x1a, 0x91, 0xb7, 0xec, 0xd3, 0x73, 0x46, 0x3b, 0xa9, 0x94, 0x56, 0x84, 0x76, 0x5a, 0x41, 0x71, - 0x0a, 0x5b, 0xbb, 0xc8, 0x06, 0xfb, 0x66, 0x93, 0x7b, 0xba, 0x36, 0x51, 0x09, 0xa0, 0xc5, 0xbe, - 0x2e, 0x62, 0x56, 0x7c, 0x42, 0x02, 0xe0, 0x1f, 0x1c, 0x61, 0x09, 0xa3, 0x22, 0x67, 0x63, 0xcb, - 0x69, 0xcd, 0xb5, 0xe3, 0xad, 0xaa, 0x17, 0x35, 0x82, 0x5d, 0x12, 0xee, 0x31, 0x6d, 0x41, 0x29, - 0x11, 0x39, 0x15, 0x60, 0xe1, 0xc6, 0x5c, 0x8d, 0x62, 0xe2, 0xce, 0x3a, 0x26, 0x1b, 0x0c, 0xc7, - 0xc1, 0x06, 0xcf, 0xc1, 0x84, 0x6c, 0xa6, 0x4e, 0x22, 0x76, 0x29, 0x8e, 0xb0, 0x8e, 0x29, 0xdb, - 0x62, 0x51, 0xac, 0xba, 0x95, 0xc6, 0x47, 0x2f, 0xc3, 0x98, 0xe7, 0x7b, 0xb1, 0xe7, 0xc4, 0x41, - 0xc8, 0x58, 0x0a, 0xae, 0x18, 0x60, 0xa6, 0x7b, 0xcb, 0x3a, 0x00, 0x9b, 0x78, 0xf6, 0x7f, 0x1d, - 0x80, 0x29, 0x36, 0x6d, 0xdf, 0x5c, 0x61, 0x1f, 0x99, 0x15, 0x76, 0xa7, 0x73, 0x85, 0x1d, 0x07, - 0x7f, 0xff, 0x61, 0x2e, 0xb3, 0x77, 0xa1, 0xac, 0x8c, 0x9f, 0xa5, 0xf7, 0x83, 0x95, 0xe3, 0xfd, - 0xd0, 0x9b, 0xfb, 0x90, 0xef, 0xd6, 0xc5, 0xcc, 0x77, 0xeb, 0xbf, 0x63, 0x41, 0x62, 0x03, 0x8a, - 0x6e, 0x40, 0xb9, 0x15, 0x30, 0x3b, 0x8b, 0x50, 0x1a, 0x2f, 0x3d, 0x9e, 0x79, 0x51, 0xf1, 0x4b, - 0x91, 0x8f, 0x5f, 0x4d, 0xd6, 0xc0, 0x49, 0x65, 0x34, 0x0f, 0xc3, 0xad, 0x90, 0xd4, 0x63, 0xe6, - 0xf3, 0xdb, 0x93, 0x0e, 0x5f, 0x23, 0x1c, 0x1f, 0xcb, 0x8a, 0xf6, 0x2f, 0x58, 0x00, 0xfc, 0x69, - 0xd8, 0xf1, 0x37, 0xc9, 0x09, 0xa8, 0xbb, 0xab, 0x30, 0x10, 0xb5, 0x48, 0xa3, 0x9b, 0x05, 0x4c, - 0xd2, 0x9f, 0x7a, 0x8b, 0x34, 0x92, 0x01, 0xa7, 0xff, 0x30, 0xab, 0x6d, 0x7f, 0x2f, 0xc0, 0x78, - 0x82, 0xb6, 0x1c, 0x93, 0x1d, 0xf4, 0x9c, 0xe1, 0x03, 0x78, 0x2e, 0xe5, 0x03, 0x58, 0x66, 0xd8, - 0x9a, 0x66, 0xf5, 0x5d, 0x28, 0xee, 0x38, 0x0f, 0x84, 0xea, 0xec, 0x99, 0xee, 0xdd, 0xa0, 0xf4, - 0x67, 0x57, 0x9c, 0x07, 0x5c, 0x48, 0x7c, 0x46, 0x2e, 0x90, 0x15, 0xe7, 0xc1, 0x21, 0xb7, 0x73, - 0x61, 0x87, 0xd4, 0x2d, 0x2f, 0x8a, 0xbf, 0xf4, 0x5f, 0x92, 0xff, 0x6c, 0xd9, 0xd1, 0x46, 0x58, - 0x5b, 0x9e, 0x2f, 0x1e, 0x4a, 0xfb, 0x6a, 0xcb, 0xf3, 0xd3, 0x6d, 0x79, 0x7e, 0x1f, 0x6d, 0x79, - 0x3e, 0x7a, 0x1f, 0x86, 0x85, 0x51, 0x82, 0xf0, 0xb9, 0xbf, 0xda, 0x47, 0x7b, 0xc2, 0xa6, 0x81, - 0xb7, 0x79, 0x55, 0x0a, 0xc1, 0xa2, 0xb4, 0x67, 0xbb, 0xb2, 0x41, 0xf4, 0x37, 0x2d, 0x18, 0x17, - 0xbf, 0x31, 0x79, 0xaf, 0x4d, 0xa2, 0x58, 0xf0, 0x9e, 0x9f, 0xee, 0xbf, 0x0f, 0xa2, 0x22, 0xef, - 0xca, 0xa7, 0xe5, 0x31, 0x6b, 0x02, 0x7b, 0xf6, 0x28, 0xd5, 0x0b, 0xf4, 0x8f, 0x2c, 0x38, 0xbd, - 0xe3, 0x3c, 0xe0, 0x2d, 0xf2, 0x32, 0xec, 0xc4, 0x5e, 0x20, 0x8c, 0xf5, 0x5f, 0xeb, 0x6f, 0xfa, - 0x3b, 0xaa, 0xf3, 0x4e, 0x4a, 0xbb, 0xde, 0xd3, 0x59, 0x28, 0x3d, 0xbb, 0x9a, 0xd9, 0xaf, 0x99, - 0x0d, 0x28, 0xc9, 0xf5, 0x96, 0xa1, 0x6a, 0xa8, 0xea, 0x8c, 0xf5, 0x91, 0x6d, 0x42, 0x74, 0x47, - 0x3c, 0xda, 0x8e, 0x58, 0x6b, 0x8f, 0xb4, 0x9d, 0x77, 0x61, 0x54, 0x5f, 0x63, 0x8f, 0xb4, 0xad, - 0xf7, 0xe0, 0x54, 0xc6, 0x5a, 0x7a, 0xa4, 0x4d, 0xde, 0x87, 0x73, 0xb9, 0xeb, 0xe3, 0x51, 0x36, - 0x6c, 0xff, 0xbc, 0xa5, 0x9f, 0x83, 0x27, 0xf0, 0xe6, 0xb0, 0x60, 0xbe, 0x39, 0x5c, 0xec, 0xbe, - 0x73, 0x72, 0x1e, 0x1e, 0xde, 0xd6, 0x3b, 0x4d, 0x4f, 0x75, 0xf4, 0x06, 0x0c, 0x35, 0x69, 0x89, - 0xb4, 0x86, 0xb1, 0x7b, 0xef, 0xc8, 0x84, 0x97, 0x62, 0xe5, 0x11, 0x16, 0x14, 0xec, 0x5f, 0xb6, - 0x60, 0xe0, 0x04, 0x46, 0x02, 0x9b, 0x23, 0xf1, 0x5c, 0x2e, 0x69, 0x11, 0x0e, 0x70, 0x16, 0x3b, - 0xf7, 0x17, 0x1f, 0xc4, 0xc4, 0x8f, 0x98, 0xa8, 0x98, 0x39, 0x30, 0xdf, 0x01, 0xa7, 0x6e, 0x05, - 0x8e, 0x3b, 0xef, 0x34, 0x1d, 0xbf, 0x41, 0xc2, 0x65, 0x7f, 0xb3, 0xa7, 0x59, 0x96, 0x6e, 0x44, - 0x55, 0xe8, 0x65, 0x44, 0x65, 0x6f, 0x01, 0xd2, 0x1b, 0x10, 0x86, 0xab, 0x18, 0x86, 0x3d, 0xde, - 0x94, 0x18, 0xfe, 0xa7, 0xb2, 0xb9, 0xbb, 0x8e, 0x9e, 0x69, 0x26, 0x99, 0xbc, 0x00, 0x4b, 0x42, - 0xf6, 0x2b, 0x90, 0xe9, 0xac, 0xd6, 0x5b, 0x6d, 0x60, 0x7f, 0x1e, 0xa6, 0x58, 0xcd, 0x23, 0x8a, - 0xb4, 0x76, 0x4a, 0x2b, 0x99, 0x11, 0x99, 0xc6, 0xfe, 0xb2, 0x05, 0x13, 0xab, 0xa9, 0x80, 0x1d, - 0x97, 0xd9, 0x03, 0x68, 0x86, 0x32, 0xbc, 0xce, 0x4a, 0xb1, 0x80, 0x1e, 0xbb, 0x0e, 0xea, 0xcf, - 0x2d, 0x48, 0xfc, 0x47, 0x4f, 0x80, 0xf1, 0x5a, 0x30, 0x18, 0xaf, 0x4c, 0xdd, 0x88, 0xea, 0x4e, - 0x1e, 0xdf, 0x85, 0x6e, 0xaa, 0x60, 0x09, 0x5d, 0xd4, 0x22, 0x09, 0x19, 0xee, 0x5a, 0x3f, 0x6e, - 0x46, 0x54, 0x90, 0xe1, 0x13, 0x98, 0xed, 0x94, 0xc2, 0xfd, 0x88, 0xd8, 0x4e, 0xa9, 0xfe, 0xe4, - 0xec, 0xd0, 0x9a, 0xd6, 0x65, 0x76, 0x72, 0x7d, 0x2b, 0xb3, 0x85, 0x77, 0x9a, 0xde, 0xfb, 0x44, - 0x45, 0x7c, 0xa9, 0x08, 0xdb, 0x76, 0x51, 0x7a, 0xb8, 0x5f, 0x19, 0x53, 0xff, 0x78, 0x84, 0xb9, - 0xa4, 0x8a, 0x7d, 0x03, 0x26, 0x52, 0x03, 0x86, 0x5e, 0x82, 0xc1, 0xd6, 0x96, 0x13, 0x91, 0x94, - 0xbd, 0xe8, 0x60, 0x8d, 0x16, 0x1e, 0xee, 0x57, 0xc6, 0x55, 0x05, 0x56, 0x82, 0x39, 0xb6, 0xfd, - 0x3f, 0x2d, 0x18, 0x58, 0x0d, 0xdc, 0x93, 0x58, 0x4c, 0xaf, 0x1b, 0x8b, 0xe9, 0x7c, 0x5e, 0x7c, - 0xce, 0xdc, 0x75, 0xb4, 0x94, 0x5a, 0x47, 0x17, 0x73, 0x29, 0x74, 0x5f, 0x42, 0x3b, 0x30, 0xc2, - 0xa2, 0x7e, 0x0a, 0xfb, 0xd5, 0x17, 0x0c, 0x19, 0xa0, 0x92, 0x92, 0x01, 0x26, 0x34, 0x54, 0x4d, - 0x12, 0x78, 0x1a, 0x86, 0x85, 0x0d, 0x65, 0xda, 0xea, 0x5f, 0xe0, 0x62, 0x09, 0xb7, 0x7f, 0xac, - 0x08, 0x46, 0x94, 0x51, 0xf4, 0xab, 0x16, 0xcc, 0x86, 0xdc, 0x8d, 0xd2, 0xad, 0xb6, 0x43, 0xcf, - 0xdf, 0xac, 0x37, 0xb6, 0x88, 0xdb, 0x6e, 0x7a, 0xfe, 0xe6, 0xf2, 0xa6, 0x1f, 0xa8, 0xe2, 0xc5, - 0x07, 0xa4, 0xd1, 0x66, 0x0f, 0x21, 0x3d, 0x42, 0x9a, 0x2a, 0x1b, 0xa5, 0x6b, 0x07, 0xfb, 0x95, - 0x59, 0x7c, 0x24, 0xda, 0xf8, 0x88, 0x7d, 0x41, 0x5f, 0xb3, 0xe0, 0x2a, 0x0f, 0xbe, 0xd9, 0x7f, - 0xff, 0xbb, 0x48, 0x4c, 0x35, 0x49, 0x2a, 0x21, 0xb2, 0x46, 0xc2, 0x9d, 0xf9, 0x97, 0xc5, 0x80, - 0x5e, 0xad, 0x1d, 0xad, 0x2d, 0x7c, 0xd4, 0xce, 0xd9, 0xff, 0xaa, 0x08, 0x63, 0xc2, 0x83, 0x5f, - 0x84, 0x86, 0x79, 0xc9, 0x58, 0x12, 0x4f, 0xa4, 0x96, 0xc4, 0x94, 0x81, 0x7c, 0x3c, 0x51, 0x61, - 0x22, 0x98, 0x6a, 0x3a, 0x51, 0x7c, 0x83, 0x38, 0x61, 0xbc, 0x4e, 0x1c, 0x6e, 0xbb, 0x53, 0x3c, - 0xb2, 0x9d, 0x91, 0x52, 0xd1, 0xdc, 0x4a, 0x13, 0xc3, 0x9d, 0xf4, 0xd1, 0x2e, 0x20, 0x66, 0x80, - 0x14, 0x3a, 0x7e, 0xc4, 0xbf, 0xc5, 0x13, 0x6f, 0x06, 0x47, 0x6b, 0x75, 0x46, 0xb4, 0x8a, 0x6e, - 0x75, 0x50, 0xc3, 0x19, 0x2d, 0x68, 0x86, 0x65, 0x83, 0xfd, 0x1a, 0x96, 0x0d, 0xf5, 0x70, 0xad, - 0xf1, 0x61, 0xb2, 0x23, 0x08, 0xc3, 0x5b, 0x50, 0x56, 0x06, 0x80, 0xe2, 0xd0, 0xe9, 0x1e, 0xcb, - 0x24, 0x4d, 0x81, 0xab, 0x51, 0x12, 0xe3, 0xd3, 0x84, 0x9c, 0xfd, 0x8f, 0x0b, 0x46, 0x83, 0x7c, - 0x12, 0x57, 0xa1, 0xe4, 0x44, 0x91, 0xb7, 0xe9, 0x13, 0x57, 0xec, 0xd8, 0x8f, 0xe7, 0xed, 0x58, - 0xa3, 0x19, 0x66, 0x84, 0x39, 0x27, 0x6a, 0x62, 0x45, 0x03, 0xdd, 0xe0, 0x16, 0x52, 0xbb, 0x92, - 0xe7, 0xef, 0x8f, 0x1a, 0x48, 0x1b, 0xaa, 0x5d, 0x82, 0x45, 0x7d, 0xf4, 0x05, 0x6e, 0xc2, 0x76, - 0xd3, 0x0f, 0xee, 0xfb, 0xd7, 0x83, 0x40, 0xba, 0xdd, 0xf5, 0x47, 0x70, 0x4a, 0x1a, 0xae, 0xa9, - 0xea, 0xd8, 0xa4, 0xd6, 0x5f, 0xa0, 0xa2, 0xef, 0x84, 0x53, 0x94, 0xb4, 0xe9, 0x3c, 0x13, 0x21, - 0x02, 0x13, 0x22, 0x3c, 0x84, 0x2c, 0x13, 0x63, 0x97, 0xc9, 0xce, 0x9b, 0xb5, 0x13, 0xa5, 0xdf, - 0x4d, 0x93, 0x04, 0x4e, 0xd3, 0xb4, 0x7f, 0xd2, 0x02, 0x66, 0xf6, 0x7f, 0x02, 0x2c, 0xc3, 0x67, - 0x4d, 0x96, 0x61, 0x3a, 0x6f, 0x90, 0x73, 0xb8, 0x85, 0x17, 0xf9, 0xca, 0xaa, 0x85, 0xc1, 0x83, - 0x3d, 0x61, 0x3e, 0xd0, 0x9b, 0x93, 0xb5, 0xff, 0x8f, 0xc5, 0x0f, 0x31, 0xe5, 0x89, 0x8f, 0xbe, - 0x0b, 0x4a, 0x0d, 0xa7, 0xe5, 0x34, 0x78, 0x48, 0xec, 0x5c, 0xad, 0x8e, 0x51, 0x69, 0x76, 0x41, - 0xd4, 0xe0, 0x5a, 0x0a, 0x19, 0x66, 0xa4, 0x24, 0x8b, 0x7b, 0x6a, 0x26, 0x54, 0x93, 0x33, 0xdb, - 0x30, 0x66, 0x10, 0x7b, 0xa4, 0x22, 0xed, 0x77, 0xf1, 0x2b, 0x56, 0x85, 0xc5, 0xd9, 0x81, 0x29, - 0x5f, 0xfb, 0x4f, 0x2f, 0x14, 0x29, 0xa6, 0x7c, 0xbc, 0xd7, 0x25, 0xca, 0x6e, 0x1f, 0xcd, 0xad, - 0x21, 0x45, 0x06, 0x77, 0x52, 0xb6, 0x7f, 0xdc, 0x82, 0xc7, 0x74, 0x44, 0x2d, 0x48, 0x42, 0x2f, - 0x3d, 0x71, 0x15, 0x4a, 0x41, 0x8b, 0x84, 0x4e, 0x1c, 0x84, 0xe2, 0xd6, 0xb8, 0x22, 0x07, 0xfd, - 0xb6, 0x28, 0x3f, 0x14, 0x01, 0x25, 0x25, 0x75, 0x59, 0x8e, 0x55, 0x4d, 0x2a, 0xc7, 0xb0, 0xc1, - 0x88, 0x44, 0x00, 0x0b, 0x76, 0x06, 0xb0, 0x27, 0xd3, 0x08, 0x0b, 0x88, 0xfd, 0x87, 0x16, 0x5f, - 0x58, 0x7a, 0xd7, 0xd1, 0x7b, 0x30, 0xb9, 0xe3, 0xc4, 0x8d, 0xad, 0xc5, 0x07, 0xad, 0x90, 0xab, - 0xc7, 0xe5, 0x38, 0x3d, 0xd3, 0x6b, 0x9c, 0xb4, 0x8f, 0x4c, 0xac, 0xf2, 0x56, 0x52, 0xc4, 0x70, - 0x07, 0x79, 0xb4, 0x0e, 0x23, 0xac, 0x8c, 0x99, 0x7f, 0x47, 0xdd, 0x58, 0x83, 0xbc, 0xd6, 0xd4, - 0xab, 0xf3, 0x4a, 0x42, 0x07, 0xeb, 0x44, 0xed, 0x2f, 0x15, 0xf9, 0x6e, 0x67, 0xdc, 0xf6, 0xd3, - 0x30, 0xdc, 0x0a, 0xdc, 0x85, 0xe5, 0x2a, 0x16, 0xb3, 0xa0, 0xae, 0x91, 0x1a, 0x2f, 0xc6, 0x12, - 0x8e, 0x5e, 0x05, 0x20, 0x0f, 0x62, 0x12, 0xfa, 0x4e, 0x53, 0x59, 0xc9, 0x28, 0xbb, 0xd0, 0x6a, - 0xb0, 0x1a, 0xc4, 0x77, 0x22, 0xf2, 0x1d, 0x8b, 0x0a, 0x05, 0x6b, 0xe8, 0xe8, 0x1a, 0x40, 0x2b, - 0x0c, 0x76, 0x3d, 0x97, 0xf9, 0x13, 0x16, 0x4d, 0x1b, 0x92, 0x9a, 0x82, 0x60, 0x0d, 0x0b, 0xbd, - 0x0a, 0x63, 0x6d, 0x3f, 0xe2, 0x1c, 0x8a, 0xb3, 0x2e, 0xc2, 0x31, 0x96, 0x12, 0xeb, 0x86, 0x3b, - 0x3a, 0x10, 0x9b, 0xb8, 0x68, 0x0e, 0x86, 0x62, 0x87, 0xd9, 0x44, 0x0c, 0xe6, 0x1b, 0x73, 0xae, - 0x51, 0x0c, 0x3d, 0x20, 0x33, 0xad, 0x80, 0x45, 0x45, 0xf4, 0x96, 0x74, 0xce, 0xe0, 0x67, 0xbd, - 0xb0, 0xa2, 0xee, 0xef, 0x5e, 0xd0, 0x5c, 0x33, 0x84, 0x75, 0xb6, 0x41, 0xcb, 0xfe, 0x5a, 0x19, - 0x20, 0x61, 0xc7, 0xd1, 0xfb, 0x1d, 0xe7, 0xd1, 0xb3, 0xdd, 0x19, 0xf8, 0xe3, 0x3b, 0x8c, 0xd0, - 0xf7, 0x59, 0x30, 0xe2, 0x34, 0x9b, 0x41, 0xc3, 0x89, 0xd9, 0x28, 0x17, 0xba, 0x9f, 0x87, 0xa2, - 0xfd, 0xb9, 0xa4, 0x06, 0xef, 0xc2, 0x0b, 0x72, 0xe1, 0x69, 0x90, 0x9e, 0xbd, 0xd0, 0x1b, 0x46, - 0x9f, 0x92, 0x52, 0x1a, 0x5f, 0x1e, 0x33, 0x69, 0x29, 0xad, 0xcc, 0x8e, 0x7e, 0x4d, 0x40, 0x43, - 0x77, 0x8c, 0x48, 0x7b, 0x03, 0xf9, 0x41, 0x27, 0x0c, 0xae, 0xb4, 0x57, 0x90, 0x3d, 0x54, 0xd3, - 0xbd, 0xc9, 0x06, 0xf3, 0x23, 0xb3, 0x68, 0xe2, 0x4f, 0x0f, 0x4f, 0xb2, 0x77, 0x61, 0xc2, 0x35, - 0xef, 0x76, 0xb1, 0x9a, 0x9e, 0xca, 0xa3, 0x9b, 0x62, 0x05, 0x92, 0xdb, 0x3c, 0x05, 0xc0, 0x69, - 0xc2, 0xa8, 0xc6, 0xfd, 0xfa, 0x96, 0xfd, 0x8d, 0x40, 0x58, 0xe3, 0xdb, 0xb9, 0x73, 0xb9, 0x17, - 0xc5, 0x64, 0x87, 0x62, 0x26, 0x97, 0xf6, 0xaa, 0xa8, 0x8b, 0x15, 0x15, 0xf4, 0x06, 0x0c, 0x31, - 0xc7, 0xe0, 0x68, 0xba, 0x94, 0xaf, 0x4c, 0x34, 0x63, 0x5a, 0x24, 0x9b, 0x8a, 0xfd, 0x8d, 0xb0, - 0xa0, 0x80, 0x6e, 0xc8, 0xc0, 0x37, 0xd1, 0xb2, 0x7f, 0x27, 0x22, 0x2c, 0xf0, 0x4d, 0x79, 0xfe, - 0xe3, 0x49, 0x4c, 0x1b, 0x5e, 0x9e, 0x99, 0x7a, 0xc1, 0xa8, 0x49, 0x99, 0x23, 0xf1, 0x5f, 0x66, - 0x74, 0x98, 0x86, 0xfc, 0xee, 0x99, 0x59, 0x1f, 0x92, 0xe1, 0xbc, 0x6b, 0x92, 0xc0, 0x69, 0x9a, - 0x94, 0xd1, 0xe4, 0x3b, 0x57, 0xd8, 0xf3, 0xf7, 0xda, 0xff, 0x5c, 0xbe, 0x66, 0x97, 0x0c, 0x2f, - 0xc1, 0xa2, 0xfe, 0x89, 0xde, 0xfa, 0x33, 0x3e, 0x4c, 0xa6, 0xb7, 0xe8, 0x23, 0xe5, 0x32, 0x7e, - 0x7f, 0x00, 0xc6, 0xcd, 0x25, 0x85, 0xae, 0x42, 0x59, 0x10, 0x51, 0x51, 0x58, 0xd5, 0x2e, 0x59, - 0x91, 0x00, 0x9c, 0xe0, 0xb0, 0xe0, 0xbb, 0xac, 0xba, 0x66, 0x87, 0x99, 0x04, 0xdf, 0x55, 0x10, - 0xac, 0x61, 0x51, 0x79, 0x69, 0x3d, 0x08, 0x62, 0x75, 0xa9, 0xa8, 0x75, 0x37, 0xcf, 0x4a, 0xb1, - 0x80, 0xd2, 0xcb, 0x64, 0x9b, 0x84, 0x3e, 0x69, 0x9a, 0xc1, 0xdd, 0xd4, 0x65, 0x72, 0x53, 0x07, - 0x62, 0x13, 0x97, 0xde, 0x92, 0x41, 0xc4, 0x16, 0xb2, 0x90, 0xca, 0x12, 0xbb, 0xd6, 0x3a, 0x77, - 0xb1, 0x97, 0x70, 0xf4, 0x79, 0x78, 0x4c, 0x79, 0xc4, 0x63, 0xae, 0xa8, 0x96, 0x2d, 0x0e, 0x19, - 0x4a, 0x94, 0xc7, 0x16, 0xb2, 0xd1, 0x70, 0x5e, 0x7d, 0xf4, 0x3a, 0x8c, 0x0b, 0xce, 0x5d, 0x52, - 0x1c, 0x36, 0x6d, 0x27, 0x6e, 0x1a, 0x50, 0x9c, 0xc2, 0x96, 0xe1, 0xe9, 0x18, 0xf3, 0x2c, 0x29, - 0x94, 0x3a, 0xc3, 0xd3, 0xe9, 0x70, 0xdc, 0x51, 0x03, 0xcd, 0xc1, 0x04, 0x67, 0xad, 0x3c, 0x7f, - 0x93, 0xcf, 0x89, 0x70, 0xb7, 0x51, 0x5b, 0xea, 0xb6, 0x09, 0xc6, 0x69, 0x7c, 0xf4, 0x0a, 0x8c, - 0x3a, 0x61, 0x63, 0xcb, 0x8b, 0x49, 0x23, 0x6e, 0x87, 0xdc, 0x0f, 0x47, 0x33, 0x3e, 0x99, 0xd3, - 0x60, 0xd8, 0xc0, 0xb4, 0xdf, 0x87, 0x53, 0x19, 0x9e, 0x7a, 0x74, 0xe1, 0x38, 0x2d, 0x4f, 0x7e, - 0x53, 0xca, 0x42, 0x75, 0xae, 0xb6, 0x2c, 0xbf, 0x46, 0xc3, 0xa2, 0xab, 0x93, 0x79, 0xf4, 0x69, - 0x09, 0x5c, 0xd4, 0xea, 0x5c, 0x92, 0x00, 0x9c, 0xe0, 0xd8, 0xff, 0xab, 0x00, 0x13, 0x19, 0xca, - 0x77, 0x96, 0x44, 0x24, 0x25, 0x7b, 0x24, 0x39, 0x43, 0xcc, 0x68, 0x87, 0x85, 0x23, 0x44, 0x3b, - 0x2c, 0xf6, 0x8a, 0x76, 0x38, 0xf0, 0x41, 0xa2, 0x1d, 0x9a, 0x23, 0x36, 0xd8, 0xd7, 0x88, 0x65, - 0x44, 0x48, 0x1c, 0x3a, 0x62, 0x84, 0x44, 0x63, 0xd0, 0x87, 0xfb, 0x18, 0xf4, 0x1f, 0x2a, 0xc0, - 0x64, 0xda, 0x48, 0xee, 0x04, 0xd4, 0xb1, 0x6f, 0x18, 0xea, 0xd8, 0xec, 0x94, 0x3c, 0x69, 0xd3, - 0xbd, 0x3c, 0xd5, 0x2c, 0x4e, 0xa9, 0x66, 0x3f, 0xd9, 0x17, 0xb5, 0xee, 0x6a, 0xda, 0xbf, 0x57, - 0x80, 0x33, 0xe9, 0x2a, 0x0b, 0x4d, 0xc7, 0xdb, 0x39, 0x81, 0xb1, 0xb9, 0x6d, 0x8c, 0xcd, 0x73, - 0xfd, 0x7c, 0x0d, 0xeb, 0x5a, 0xee, 0x00, 0xdd, 0x4b, 0x0d, 0xd0, 0xd5, 0xfe, 0x49, 0x76, 0x1f, - 0xa5, 0xaf, 0x17, 0xe1, 0x62, 0x66, 0xbd, 0x44, 0x9b, 0xb9, 0x64, 0x68, 0x33, 0xaf, 0xa5, 0xb4, - 0x99, 0x76, 0xf7, 0xda, 0xc7, 0xa3, 0xde, 0x14, 0x2e, 0x94, 0x2c, 0x22, 0xde, 0x43, 0xaa, 0x36, - 0x0d, 0x17, 0x4a, 0x45, 0x08, 0x9b, 0x74, 0xbf, 0x91, 0x54, 0x9a, 0xff, 0xd6, 0x82, 0x73, 0x99, - 0x73, 0x73, 0x02, 0x2a, 0xac, 0x55, 0x53, 0x85, 0xf5, 0x74, 0xdf, 0xab, 0x35, 0x47, 0xa7, 0xf5, - 0x1b, 0x03, 0x39, 0xdf, 0xc2, 0x04, 0xf4, 0xdb, 0x30, 0xe2, 0x34, 0x1a, 0x24, 0x8a, 0x56, 0x02, - 0x57, 0x45, 0x88, 0x7b, 0x8e, 0xc9, 0x59, 0x49, 0xf1, 0xe1, 0x7e, 0x65, 0x26, 0x4d, 0x22, 0x01, - 0x63, 0x9d, 0x82, 0x19, 0xd4, 0xb2, 0x70, 0xac, 0x41, 0x2d, 0xaf, 0x01, 0xec, 0x2a, 0x6e, 0x3d, - 0x2d, 0xe4, 0x6b, 0x7c, 0xbc, 0x86, 0x85, 0xbe, 0x00, 0xa5, 0x48, 0x5c, 0xe3, 0x62, 0x29, 0xbe, - 0xd0, 0xe7, 0x5c, 0x39, 0xeb, 0xa4, 0x69, 0xfa, 0xea, 0x2b, 0x7d, 0x88, 0x22, 0x89, 0xbe, 0x0d, - 0x26, 0x23, 0x1e, 0x0a, 0x66, 0xa1, 0xe9, 0x44, 0xcc, 0x0f, 0x42, 0xac, 0x42, 0xe6, 0x80, 0x5f, - 0x4f, 0xc1, 0x70, 0x07, 0x36, 0x5a, 0x92, 0x1f, 0xc5, 0xe2, 0xd6, 0xf0, 0x85, 0x79, 0x39, 0xf9, - 0x20, 0x91, 0xc2, 0xec, 0x74, 0x7a, 0xf8, 0xd9, 0xc0, 0x6b, 0x35, 0xd1, 0x17, 0x00, 0xe8, 0xf2, - 0x11, 0xba, 0x84, 0xe1, 0xfc, 0xc3, 0x93, 0x9e, 0x2a, 0x6e, 0xa6, 0xe5, 0x27, 0x73, 0x5e, 0xac, - 0x2a, 0x22, 0x58, 0x23, 0x68, 0xff, 0xd0, 0x00, 0x3c, 0xde, 0xe5, 0x8c, 0x44, 0x73, 0xe6, 0x13, - 0xe8, 0x33, 0x69, 0xe1, 0x7a, 0x26, 0xb3, 0xb2, 0x21, 0x6d, 0xa7, 0x96, 0x62, 0xe1, 0x03, 0x2f, - 0xc5, 0x1f, 0xb0, 0x34, 0xb5, 0x07, 0x37, 0xe6, 0xfb, 0xec, 0x11, 0xcf, 0xfe, 0x63, 0xd4, 0x83, - 0x6c, 0x64, 0x28, 0x13, 0xae, 0xf5, 0xdd, 0x9d, 0xbe, 0xb5, 0x0b, 0x27, 0xab, 0xfc, 0xfd, 0x92, - 0x05, 0x4f, 0x64, 0xf6, 0xd7, 0x30, 0xd9, 0xb8, 0x0a, 0xe5, 0x06, 0x2d, 0xd4, 0x7c, 0xd5, 0x12, - 0x27, 0x5e, 0x09, 0xc0, 0x09, 0x8e, 0x61, 0x99, 0x51, 0xe8, 0x69, 0x99, 0xf1, 0x2f, 0x2d, 0xe8, - 0xd8, 0x1f, 0x27, 0x70, 0x50, 0x2f, 0x9b, 0x07, 0xf5, 0xc7, 0xfb, 0x99, 0xcb, 0x9c, 0x33, 0xfa, - 0x8f, 0x27, 0xe0, 0x6c, 0x8e, 0xaf, 0xc6, 0x2e, 0x4c, 0x6d, 0x36, 0x88, 0xe9, 0x05, 0x28, 0x3e, - 0x26, 0xd3, 0x61, 0xb2, 0xab, 0xcb, 0x20, 0xcb, 0x47, 0x34, 0xd5, 0x81, 0x82, 0x3b, 0x9b, 0x40, - 0x5f, 0xb2, 0xe0, 0xb4, 0x73, 0x3f, 0xea, 0x48, 0x60, 0x2a, 0xd6, 0xcc, 0x8b, 0x99, 0x4a, 0x90, - 0x1e, 0x09, 0x4f, 0x79, 0x82, 0xa6, 0x2c, 0x2c, 0x9c, 0xd9, 0x16, 0xc2, 0x22, 0x66, 0x28, 0x65, - 0xe7, 0xbb, 0xf8, 0xa9, 0x66, 0x39, 0xd5, 0xf0, 0x23, 0x5b, 0x42, 0xb0, 0xa2, 0x83, 0xde, 0x81, - 0xf2, 0xa6, 0xf4, 0x74, 0xcb, 0xb8, 0x12, 0x92, 0x81, 0xec, 0xee, 0xff, 0xc7, 0x1f, 0x28, 0x15, - 0x12, 0x4e, 0x88, 0xa2, 0xd7, 0xa1, 0xe8, 0x6f, 0x44, 0xdd, 0x72, 0x1c, 0xa5, 0x6c, 0x9a, 0xb8, - 0x37, 0xf8, 0xea, 0x52, 0x1d, 0xd3, 0x8a, 0xe8, 0x06, 0x14, 0xc3, 0x75, 0x57, 0x68, 0xf0, 0x32, - 0xcf, 0x70, 0x3c, 0x5f, 0xcd, 0xe9, 0x15, 0xa3, 0x84, 0xe7, 0xab, 0x98, 0x92, 0x40, 0x35, 0x18, - 0x64, 0x0e, 0x0e, 0xe2, 0x3e, 0xc8, 0xe4, 0x7c, 0xbb, 0x38, 0x0a, 0x71, 0x97, 0x71, 0x86, 0x80, - 0x39, 0x21, 0xb4, 0x06, 0x43, 0x0d, 0x96, 0x0f, 0x47, 0x04, 0xac, 0xfe, 0x54, 0xa6, 0xae, 0xae, - 0x4b, 0xa2, 0x20, 0xa1, 0xba, 0x62, 0x18, 0x58, 0xd0, 0x62, 0x54, 0x49, 0x6b, 0x6b, 0x23, 0x12, - 0xf9, 0xdb, 0xb2, 0xa9, 0x76, 0xc9, 0x7f, 0x25, 0xa8, 0x32, 0x0c, 0x2c, 0x68, 0xa1, 0xcf, 0x40, - 0x61, 0xa3, 0x21, 0xfc, 0x1f, 0x32, 0x95, 0x76, 0xa6, 0x43, 0xff, 0xfc, 0xd0, 0xc1, 0x7e, 0xa5, - 0xb0, 0xb4, 0x80, 0x0b, 0x1b, 0x0d, 0xb4, 0x0a, 0xc3, 0x1b, 0xdc, 0x05, 0x58, 0xe8, 0xe5, 0x9e, - 0xca, 0xf6, 0x4e, 0xee, 0xf0, 0x12, 0xe6, 0x76, 0xfb, 0x02, 0x80, 0x25, 0x11, 0x16, 0x82, 0x53, - 0xb9, 0x32, 0x8b, 0x58, 0xd4, 0xb3, 0x47, 0x73, 0x3f, 0xe7, 0xf7, 0x73, 0xe2, 0x10, 0x8d, 0x35, - 0x8a, 0x74, 0x55, 0x3b, 0x32, 0x89, 0xa6, 0x88, 0xd5, 0x91, 0xb9, 0xaa, 0x7b, 0xe4, 0x17, 0xe5, - 0xab, 0x5a, 0x21, 0xe1, 0x84, 0x28, 0xda, 0x86, 0xb1, 0xdd, 0xa8, 0xb5, 0x45, 0xe4, 0x96, 0x66, - 0xa1, 0x3b, 0x72, 0xae, 0xb0, 0xbb, 0x02, 0xd1, 0x0b, 0xe3, 0xb6, 0xd3, 0xec, 0x38, 0x85, 0xd8, - 0xab, 0xf6, 0x5d, 0x9d, 0x18, 0x36, 0x69, 0xd3, 0xe1, 0x7f, 0xaf, 0x1d, 0xac, 0xef, 0xc5, 0x44, - 0x04, 0xaf, 0xce, 0x1c, 0xfe, 0x37, 0x39, 0x4a, 0xe7, 0xf0, 0x0b, 0x00, 0x96, 0x44, 0xd0, 0x5d, - 0x31, 0x3c, 0xec, 0xf4, 0x9c, 0xcc, 0x0f, 0xa6, 0x94, 0x99, 0xc5, 0x56, 0x1b, 0x14, 0x76, 0x5a, - 0x26, 0xa4, 0xd8, 0x29, 0xd9, 0xda, 0x0a, 0xe2, 0xc0, 0x4f, 0x9d, 0xd0, 0x53, 0xf9, 0xa7, 0x64, - 0x2d, 0x03, 0xbf, 0xf3, 0x94, 0xcc, 0xc2, 0xc2, 0x99, 0x6d, 0x21, 0x17, 0xc6, 0x5b, 0x41, 0x18, - 0xdf, 0x0f, 0x42, 0xb9, 0xbe, 0x50, 0x17, 0xbd, 0x82, 0x81, 0x29, 0x5a, 0x64, 0xc1, 0xd4, 0x4d, - 0x08, 0x4e, 0xd1, 0x44, 0x9f, 0x83, 0xe1, 0xa8, 0xe1, 0x34, 0xc9, 0xf2, 0xed, 0xe9, 0x53, 0xf9, - 0xd7, 0x4f, 0x9d, 0xa3, 0xe4, 0xac, 0x2e, 0x36, 0x39, 0x02, 0x05, 0x4b, 0x72, 0x68, 0x09, 0x06, - 0x59, 0x46, 0x04, 0x16, 0x77, 0x3b, 0x27, 0x26, 0x54, 0x87, 0x85, 0x29, 0x3f, 0x9b, 0x58, 0x31, - 0xe6, 0xd5, 0xe9, 0x1e, 0x10, 0xec, 0x75, 0x10, 0x4d, 0x9f, 0xc9, 0xdf, 0x03, 0x82, 0x2b, 0xbf, - 0x5d, 0xef, 0xb6, 0x07, 0x14, 0x12, 0x4e, 0x88, 0xd2, 0x93, 0x99, 0x9e, 0xa6, 0x67, 0xbb, 0x18, - 0xb4, 0xe4, 0x9e, 0xa5, 0xec, 0x64, 0xa6, 0x27, 0x29, 0x25, 0x61, 0xff, 0xee, 0x70, 0x27, 0xcf, - 0xc2, 0x04, 0xb2, 0xbf, 0x6c, 0x75, 0xbc, 0xd5, 0x7d, 0xba, 0x5f, 0xfd, 0xd0, 0x31, 0x72, 0xab, - 0x5f, 0xb2, 0xe0, 0x6c, 0x2b, 0xf3, 0x43, 0x04, 0x03, 0xd0, 0x9f, 0x9a, 0x89, 0x7f, 0xba, 0x8a, - 0x8d, 0x9f, 0x0d, 0xc7, 0x39, 0x2d, 0xa5, 0x25, 0x82, 0xe2, 0x07, 0x96, 0x08, 0x56, 0xa0, 0xc4, - 0x98, 0xcc, 0x1e, 0xf9, 0xe1, 0xd2, 0x82, 0x11, 0x63, 0x25, 0x16, 0x44, 0x45, 0xac, 0x48, 0xa0, - 0x1f, 0xb4, 0xe0, 0x42, 0xba, 0xeb, 0x98, 0x30, 0xb0, 0x88, 0x24, 0xcf, 0x65, 0xc1, 0x25, 0xf1, - 0xfd, 0x17, 0x6a, 0xdd, 0x90, 0x0f, 0x7b, 0x21, 0xe0, 0xee, 0x8d, 0xa1, 0x6a, 0x86, 0x30, 0x3a, - 0x64, 0x2a, 0xe0, 0xfb, 0x10, 0x48, 0x5f, 0x84, 0xd1, 0x9d, 0xa0, 0xed, 0xc7, 0xc2, 0xfe, 0x45, - 0x78, 0x2c, 0xb2, 0x07, 0xe7, 0x15, 0xad, 0x1c, 0x1b, 0x58, 0x29, 0x31, 0xb6, 0xf4, 0xd0, 0x62, - 0xec, 0xdb, 0xa9, 0x84, 0xf2, 0xe5, 0xfc, 0x88, 0x85, 0x42, 0xe2, 0x3f, 0x42, 0x5a, 0xf9, 0x93, - 0x95, 0x8d, 0x7e, 0xda, 0xca, 0x60, 0xea, 0xb9, 0xb4, 0xfc, 0x9a, 0x29, 0x2d, 0x5f, 0x4e, 0x4b, - 0xcb, 0x1d, 0xca, 0x57, 0x43, 0x50, 0xee, 0x3f, 0xec, 0x75, 0xbf, 0x71, 0xe4, 0xec, 0x26, 0x5c, - 0xea, 0x75, 0x2d, 0x31, 0x43, 0x28, 0x57, 0x3d, 0xb5, 0x25, 0x86, 0x50, 0xee, 0x72, 0x15, 0x33, - 0x48, 0xbf, 0x81, 0x46, 0xec, 0xff, 0x61, 0x41, 0xb1, 0x16, 0xb8, 0x27, 0xa0, 0x4c, 0xfe, 0xac, - 0xa1, 0x4c, 0x7e, 0x3c, 0x27, 0xd1, 0x7f, 0xae, 0xea, 0x78, 0x31, 0xa5, 0x3a, 0xbe, 0x90, 0x47, - 0xa0, 0xbb, 0xa2, 0xf8, 0x27, 0x8a, 0x30, 0x52, 0x0b, 0x5c, 0x65, 0x85, 0xfc, 0x1b, 0x0f, 0x63, - 0x85, 0x9c, 0x1b, 0x16, 0x56, 0xa3, 0xcc, 0xec, 0xa7, 0xa4, 0x13, 0xde, 0x5f, 0x30, 0x63, 0xe4, - 0x7b, 0xc4, 0xdb, 0xdc, 0x8a, 0x89, 0x9b, 0xfe, 0x9c, 0x93, 0x33, 0x46, 0xfe, 0x6f, 0x16, 0x4c, - 0xa4, 0x5a, 0x47, 0x4d, 0x18, 0x6b, 0xea, 0x9a, 0x40, 0xb1, 0x4e, 0x1f, 0x4a, 0x89, 0x28, 0x8c, - 0x39, 0xb5, 0x22, 0x6c, 0x12, 0x47, 0xb3, 0x00, 0xea, 0xa5, 0x4e, 0x6a, 0xc0, 0x18, 0xd7, 0xaf, - 0x9e, 0xf2, 0x22, 0xac, 0x61, 0xa0, 0x97, 0x60, 0x24, 0x0e, 0x5a, 0x41, 0x33, 0xd8, 0xdc, 0xbb, - 0x49, 0x64, 0x68, 0x1b, 0x65, 0xa2, 0xb5, 0x96, 0x80, 0xb0, 0x8e, 0x67, 0xff, 0x54, 0x91, 0x7f, - 0xa8, 0x1f, 0x7b, 0xdf, 0x5c, 0x93, 0x1f, 0xed, 0x35, 0xf9, 0x75, 0x0b, 0x26, 0x69, 0xeb, 0xcc, - 0x5c, 0x44, 0x5e, 0xb6, 0x2a, 0xfd, 0x8e, 0xd5, 0x25, 0xfd, 0xce, 0x65, 0x7a, 0x76, 0xb9, 0x41, - 0x3b, 0x16, 0x1a, 0x34, 0xed, 0x70, 0xa2, 0xa5, 0x58, 0x40, 0x05, 0x1e, 0x09, 0x43, 0xe1, 0x03, - 0xa5, 0xe3, 0x91, 0x30, 0xc4, 0x02, 0x2a, 0xb3, 0xf3, 0x0c, 0xe4, 0x64, 0xe7, 0x61, 0x81, 0xfa, - 0x84, 0x61, 0x81, 0x60, 0x7b, 0xb4, 0x40, 0x7d, 0xd2, 0xe2, 0x20, 0xc1, 0xb1, 0x7f, 0xbe, 0x08, - 0xa3, 0xb5, 0xc0, 0x4d, 0xde, 0xca, 0x5e, 0x34, 0xde, 0xca, 0x2e, 0xa5, 0xde, 0xca, 0x26, 0x75, - 0xdc, 0x6f, 0xbe, 0x8c, 0x7d, 0x58, 0x2f, 0x63, 0xff, 0xc2, 0x62, 0xb3, 0x56, 0x5d, 0xad, 0x8b, - 0xec, 0xc0, 0xcf, 0xc3, 0x08, 0x3b, 0x90, 0x98, 0xd3, 0x9d, 0x7c, 0x40, 0x62, 0x81, 0xf7, 0x57, - 0x93, 0x62, 0xac, 0xe3, 0xa0, 0x2b, 0x50, 0x8a, 0x88, 0x13, 0x36, 0xb6, 0xd4, 0x19, 0x27, 0x9e, - 0x57, 0x78, 0x19, 0x56, 0x50, 0xf4, 0x66, 0x12, 0x23, 0xae, 0x98, 0x9f, 0xe7, 0x56, 0xef, 0x0f, - 0xdf, 0x22, 0xf9, 0x81, 0xe1, 0xec, 0x7b, 0x80, 0x3a, 0xf1, 0xfb, 0x08, 0x8e, 0x54, 0x31, 0x83, - 0x23, 0x95, 0x3b, 0x02, 0x23, 0xfd, 0x99, 0x05, 0xe3, 0xb5, 0xc0, 0xa5, 0x5b, 0xf7, 0x1b, 0x69, - 0x9f, 0xea, 0x01, 0x32, 0x87, 0xba, 0x04, 0xc8, 0xfc, 0xfb, 0x16, 0x0c, 0xd7, 0x02, 0xf7, 0x04, - 0xf4, 0xee, 0xaf, 0x99, 0x7a, 0xf7, 0xc7, 0x72, 0x96, 0x44, 0x8e, 0xaa, 0xfd, 0x17, 0x8b, 0x30, - 0x46, 0xfb, 0x19, 0x6c, 0xca, 0x59, 0x32, 0x46, 0xc4, 0xea, 0x63, 0x44, 0x28, 0x9b, 0x1b, 0x34, - 0x9b, 0xc1, 0xfd, 0xf4, 0x8c, 0x2d, 0xb1, 0x52, 0x2c, 0xa0, 0xe8, 0x59, 0x28, 0xb5, 0x42, 0xb2, - 0xeb, 0x05, 0x82, 0x7f, 0xd4, 0x5e, 0x31, 0x6a, 0xa2, 0x1c, 0x2b, 0x0c, 0x2a, 0x77, 0x45, 0x9e, - 0xdf, 0x20, 0x32, 0xc9, 0xf6, 0x00, 0xcb, 0xc3, 0xc5, 0x23, 0x5f, 0x6b, 0xe5, 0xd8, 0xc0, 0x42, - 0xf7, 0xa0, 0xcc, 0xfe, 0xb3, 0x13, 0xe5, 0xe8, 0x79, 0x83, 0x44, 0xba, 0x09, 0x41, 0x00, 0x27, - 0xb4, 0xd0, 0x35, 0x80, 0x58, 0x46, 0x47, 0x8e, 0x44, 0x8c, 0x1b, 0xc5, 0x6b, 0xab, 0xb8, 0xc9, - 0x11, 0xd6, 0xb0, 0xd0, 0x33, 0x50, 0x8e, 0x1d, 0xaf, 0x79, 0xcb, 0xf3, 0x49, 0xc4, 0x54, 0xce, - 0x45, 0x99, 0x4d, 0x42, 0x14, 0xe2, 0x04, 0x4e, 0x79, 0x1d, 0xe6, 0x00, 0xce, 0xb3, 0x8e, 0x95, - 0x18, 0x36, 0xe3, 0x75, 0x6e, 0xa9, 0x52, 0xac, 0x61, 0xd8, 0xaf, 0xc0, 0x99, 0x5a, 0xe0, 0xd6, - 0x82, 0x30, 0x5e, 0x0a, 0xc2, 0xfb, 0x4e, 0xe8, 0xca, 0xf9, 0xab, 0xc8, 0xc4, 0x06, 0xf4, 0xec, - 0x19, 0xe4, 0x3b, 0xd3, 0x48, 0x59, 0xf0, 0x02, 0xe3, 0x76, 0x8e, 0xe8, 0xd4, 0xd1, 0x60, 0xf7, - 0xae, 0x4a, 0x30, 0x78, 0xdd, 0x89, 0x09, 0xba, 0xcd, 0x92, 0x92, 0x25, 0x57, 0x90, 0xa8, 0xfe, - 0xb4, 0x96, 0x94, 0x2c, 0x01, 0x66, 0xde, 0x59, 0x66, 0x7d, 0xfb, 0x67, 0x07, 0xd8, 0x69, 0x94, - 0xca, 0xb7, 0x87, 0xbe, 0x08, 0xe3, 0x11, 0xb9, 0xe5, 0xf9, 0xed, 0x07, 0x52, 0x08, 0xef, 0xe2, - 0x96, 0x53, 0x5f, 0xd4, 0x31, 0xb9, 0x2a, 0xcf, 0x2c, 0xc3, 0x29, 0x6a, 0x74, 0x9e, 0xc2, 0xb6, - 0x3f, 0x17, 0xdd, 0x89, 0x48, 0x28, 0xf2, 0xbd, 0xb1, 0x79, 0xc2, 0xb2, 0x10, 0x27, 0x70, 0xba, - 0x2e, 0xd9, 0x9f, 0xd5, 0xc0, 0xc7, 0x41, 0x10, 0xcb, 0x95, 0xcc, 0x32, 0x06, 0x69, 0xe5, 0xd8, - 0xc0, 0x42, 0x4b, 0x80, 0xa2, 0x76, 0xab, 0xd5, 0x64, 0x0f, 0xfb, 0x4e, 0xf3, 0x7a, 0x18, 0xb4, - 0x5b, 0xfc, 0xd5, 0xb3, 0xc8, 0x03, 0x13, 0xd6, 0x3b, 0xa0, 0x38, 0xa3, 0x06, 0x3d, 0x7d, 0x36, - 0x22, 0xf6, 0x9b, 0xad, 0xee, 0xa2, 0x50, 0xaf, 0xd7, 0x59, 0x11, 0x96, 0x30, 0xba, 0x98, 0x58, - 0xf3, 0x1c, 0x73, 0x28, 0x59, 0x4c, 0x58, 0x95, 0x62, 0x0d, 0x03, 0x2d, 0xc2, 0x70, 0xb4, 0x17, - 0x35, 0x62, 0x11, 0x91, 0x29, 0x27, 0x73, 0x67, 0x9d, 0xa1, 0x68, 0xd9, 0x24, 0x78, 0x15, 0x2c, - 0xeb, 0xa2, 0x1d, 0x18, 0xbf, 0xef, 0xf9, 0x6e, 0x70, 0x3f, 0x92, 0x13, 0x55, 0xca, 0x57, 0x8d, - 0xde, 0xe3, 0x98, 0xa9, 0xc9, 0x36, 0xe6, 0xed, 0x9e, 0x41, 0x0c, 0xa7, 0x88, 0xdb, 0xdf, 0xc5, - 0xee, 0x5e, 0x96, 0x8c, 0x2c, 0x6e, 0x87, 0x04, 0xed, 0xc0, 0x58, 0x8b, 0xad, 0x30, 0x11, 0x2a, - 0x5b, 0x2c, 0x93, 0x17, 0xfb, 0x14, 0xa2, 0xef, 0xd3, 0x73, 0x4d, 0x29, 0xb9, 0x98, 0x74, 0x52, - 0xd3, 0xc9, 0x61, 0x93, 0xba, 0xfd, 0xdf, 0x11, 0x3b, 0xe2, 0xeb, 0x5c, 0x32, 0x1e, 0x16, 0x96, - 0xcc, 0x42, 0x0c, 0x98, 0xc9, 0x57, 0xd1, 0x24, 0x03, 0x28, 0xac, 0xa1, 0xb1, 0xac, 0x8b, 0xde, - 0x64, 0x8f, 0xe2, 0xfc, 0x5c, 0xed, 0x95, 0x13, 0x9a, 0x63, 0x19, 0xef, 0xdf, 0xa2, 0x22, 0xd6, - 0x88, 0xa0, 0x5b, 0x30, 0x26, 0x72, 0x57, 0x09, 0x1d, 0x5c, 0xd1, 0xd0, 0xb1, 0x8c, 0x61, 0x1d, - 0x78, 0x98, 0x2e, 0xc0, 0x66, 0x65, 0xb4, 0x09, 0x17, 0xb4, 0x44, 0x8e, 0xd7, 0x43, 0x87, 0x3d, - 0x94, 0x7a, 0x6c, 0xcf, 0x6a, 0xc7, 0xf4, 0x13, 0x07, 0xfb, 0x95, 0x0b, 0x6b, 0xdd, 0x10, 0x71, - 0x77, 0x3a, 0xe8, 0x36, 0x9c, 0xe1, 0x0e, 0x83, 0x55, 0xe2, 0xb8, 0x4d, 0xcf, 0x57, 0xf7, 0x00, - 0x5f, 0xf6, 0xe7, 0x0e, 0xf6, 0x2b, 0x67, 0xe6, 0xb2, 0x10, 0x70, 0x76, 0x3d, 0xf4, 0x1a, 0x94, - 0x5d, 0x3f, 0x12, 0x63, 0x30, 0x64, 0xe4, 0x28, 0x2d, 0x57, 0x57, 0xeb, 0xea, 0xfb, 0x93, 0x3f, - 0x38, 0xa9, 0x80, 0x36, 0xb9, 0x1e, 0x4e, 0x89, 0xbd, 0xc3, 0xf9, 0xf9, 0xe8, 0xc5, 0x92, 0x30, - 0x5c, 0x86, 0xb8, 0x02, 0x5a, 0x99, 0xdc, 0x1a, 0xde, 0x44, 0x06, 0x61, 0xf4, 0x06, 0x20, 0xca, - 0x17, 0x7a, 0x0d, 0x32, 0xd7, 0x60, 0x11, 0xcb, 0x99, 0xda, 0xb2, 0x64, 0xb8, 0x68, 0xa0, 0x7a, - 0x07, 0x06, 0xce, 0xa8, 0x85, 0x6e, 0xd0, 0x73, 0x53, 0x2f, 0x15, 0xa6, 0xc3, 0x52, 0x96, 0x98, - 0xae, 0x92, 0x56, 0x48, 0x1a, 0x4e, 0x4c, 0x5c, 0x93, 0x22, 0x4e, 0xd5, 0xa3, 0x57, 0xb7, 0x4a, - 0x5e, 0x04, 0x66, 0x94, 0x8e, 0xce, 0x04, 0x46, 0x54, 0x0c, 0xdf, 0x0a, 0xa2, 0x78, 0x95, 0xc4, - 0xf7, 0x83, 0x70, 0x5b, 0x04, 0x45, 0x4b, 0xe2, 0x73, 0x26, 0x20, 0xac, 0xe3, 0x51, 0xb6, 0x9b, - 0xbd, 0x4a, 0x2f, 0x57, 0xd9, 0x83, 0x60, 0x29, 0xd9, 0x27, 0x37, 0x78, 0x31, 0x96, 0x70, 0x89, - 0xba, 0x5c, 0x5b, 0x60, 0x8f, 0x7b, 0x29, 0xd4, 0xe5, 0xda, 0x02, 0x96, 0x70, 0x44, 0x3a, 0xf3, - 0xbf, 0x8e, 0xe7, 0x2b, 0x51, 0x3b, 0x6f, 0x9f, 0x3e, 0x53, 0xc0, 0xfa, 0x30, 0xa9, 0x32, 0xcf, - 0xf2, 0x68, 0x71, 0xd1, 0xf4, 0x04, 0x5b, 0x24, 0xfd, 0x87, 0x9a, 0x53, 0x6a, 0xe9, 0xe5, 0x14, - 0x25, 0xdc, 0x41, 0xdb, 0x88, 0x9b, 0x32, 0xd9, 0x33, 0xf9, 0xd4, 0x55, 0x28, 0x47, 0xed, 0x75, - 0x37, 0xd8, 0x71, 0x3c, 0x9f, 0xbd, 0xc5, 0x69, 0x3c, 0x5d, 0x5d, 0x02, 0x70, 0x82, 0x83, 0x96, - 0xa0, 0xe4, 0x48, 0x9d, 0x33, 0xca, 0x0f, 0x92, 0xa0, 0x34, 0xcd, 0xdc, 0x6f, 0x58, 0x6a, 0x99, - 0x55, 0x5d, 0xf4, 0x2a, 0x8c, 0x09, 0x37, 0x31, 0x1e, 0x3a, 0x82, 0xbd, 0x95, 0x69, 0x7e, 0x00, - 0x75, 0x1d, 0x88, 0x4d, 0x5c, 0xf4, 0x05, 0x18, 0xa7, 0x54, 0x92, 0x83, 0x6d, 0xfa, 0x74, 0x3f, - 0x27, 0xa2, 0x96, 0x54, 0x44, 0xaf, 0x8c, 0x53, 0xc4, 0x90, 0x0b, 0xe7, 0x9d, 0x76, 0x1c, 0x30, - 0xbd, 0xbd, 0xb9, 0xfe, 0xd7, 0x82, 0x6d, 0xe2, 0xb3, 0x27, 0xb3, 0xd2, 0xfc, 0xa5, 0x83, 0xfd, - 0xca, 0xf9, 0xb9, 0x2e, 0x78, 0xb8, 0x2b, 0x15, 0x74, 0x07, 0x46, 0xe2, 0xa0, 0xc9, 0x2c, 0xf2, - 0xe9, 0x85, 0x78, 0x36, 0x3f, 0xee, 0xd0, 0x9a, 0x42, 0xd3, 0x75, 0x56, 0xaa, 0x2a, 0xd6, 0xe9, - 0xa0, 0x35, 0xbe, 0xc7, 0x58, 0x44, 0x56, 0x12, 0x4d, 0x3f, 0x96, 0x3f, 0x30, 0x2a, 0x70, 0xab, - 0xb9, 0x05, 0x45, 0x4d, 0xac, 0x93, 0x41, 0xd7, 0x61, 0xaa, 0x15, 0x7a, 0x01, 0x5b, 0xd8, 0xea, - 0xcd, 0x64, 0xda, 0xcc, 0x23, 0x51, 0x4b, 0x23, 0xe0, 0xce, 0x3a, 0x54, 0xa6, 0x95, 0x85, 0xd3, - 0xe7, 0x78, 0x52, 0x32, 0xce, 0xe7, 0xf3, 0x32, 0xac, 0xa0, 0x68, 0x85, 0x9d, 0xcb, 0x5c, 0xfa, - 0x9c, 0x9e, 0xc9, 0x0f, 0x2e, 0xa1, 0x4b, 0xa9, 0x9c, 0x3d, 0x53, 0x7f, 0x71, 0x42, 0x81, 0xde, - 0x1b, 0xd1, 0x96, 0x13, 0x92, 0x5a, 0x18, 0x34, 0x48, 0xa4, 0x05, 0x81, 0x7e, 0x9c, 0x07, 0x8e, - 0xa4, 0xf7, 0x46, 0x3d, 0x0b, 0x01, 0x67, 0xd7, 0x43, 0xae, 0x96, 0x8b, 0x9b, 0x72, 0xbd, 0xd1, - 0xf4, 0xf9, 0x2e, 0xf6, 0x4d, 0x29, 0x16, 0x39, 0x59, 0x8b, 0x46, 0x71, 0x84, 0x53, 0x34, 0xd1, - 0xb7, 0xc1, 0xa4, 0x88, 0xb3, 0x94, 0x8c, 0xfb, 0x85, 0xc4, 0x70, 0x12, 0xa7, 0x60, 0xb8, 0x03, - 0x9b, 0x87, 0xbe, 0x76, 0xd6, 0x9b, 0x44, 0x2c, 0xc2, 0x5b, 0x9e, 0xbf, 0x1d, 0x4d, 0x5f, 0x64, - 0x5f, 0x2d, 0x42, 0x5f, 0xa7, 0xa1, 0x38, 0xa3, 0x06, 0x5a, 0x83, 0xc9, 0x56, 0x48, 0xc8, 0x0e, - 0xe3, 0xb1, 0xc4, 0x75, 0x59, 0xe1, 0xde, 0xc0, 0xb4, 0x27, 0xb5, 0x14, 0xec, 0x30, 0xa3, 0x0c, - 0x77, 0x50, 0x98, 0xf9, 0x56, 0x98, 0xea, 0xb8, 0x0f, 0x8f, 0x14, 0x84, 0xfe, 0x4f, 0x07, 0xa1, - 0xac, 0x5e, 0x16, 0xd0, 0x55, 0xf3, 0xc1, 0xe8, 0x5c, 0xfa, 0xc1, 0xa8, 0x44, 0x05, 0x1c, 0xfd, - 0x8d, 0x68, 0xcd, 0xb0, 0x36, 0x2c, 0xe4, 0xa7, 0x7c, 0xd3, 0x45, 0x94, 0x9e, 0x9e, 0x8b, 0x9a, - 0xa2, 0xa8, 0xd8, 0xf7, 0xcb, 0xd3, 0x40, 0x57, 0xdd, 0x53, 0x9f, 0x19, 0x97, 0xd1, 0x93, 0x54, - 0xca, 0x73, 0x97, 0x6b, 0xe9, 0x14, 0xa4, 0x35, 0x5a, 0x88, 0x39, 0x8c, 0x49, 0xc3, 0x94, 0x79, - 0x63, 0xd2, 0xf0, 0xf0, 0x43, 0x4a, 0xc3, 0x92, 0x00, 0x4e, 0x68, 0xa1, 0x26, 0x4c, 0x35, 0xcc, - 0xec, 0xb1, 0xca, 0x5b, 0xf1, 0xc9, 0x9e, 0x79, 0x5c, 0xdb, 0x5a, 0xaa, 0xbe, 0x85, 0x34, 0x15, - 0xdc, 0x49, 0x18, 0xbd, 0x0a, 0xa5, 0xf7, 0x82, 0x88, 0x2d, 0x75, 0xc1, 0xc1, 0x48, 0xaf, 0xae, - 0xd2, 0x9b, 0xb7, 0xeb, 0xac, 0xfc, 0x70, 0xbf, 0x32, 0x52, 0x0b, 0x5c, 0xf9, 0x17, 0xab, 0x0a, - 0xe8, 0x01, 0x9c, 0x31, 0xce, 0x7d, 0xd5, 0x5d, 0xe8, 0xbf, 0xbb, 0x17, 0x44, 0x73, 0x67, 0x96, - 0xb3, 0x28, 0xe1, 0xec, 0x06, 0xe8, 0x61, 0xea, 0x07, 0x22, 0xf3, 0xb2, 0xe4, 0x92, 0x18, 0x33, - 0x54, 0xd6, 0x7d, 0xfa, 0x53, 0x08, 0xb8, 0xb3, 0x8e, 0xfd, 0x2b, 0xfc, 0x21, 0x46, 0xa8, 0x6b, - 0x49, 0xd4, 0x6e, 0x9e, 0x44, 0x62, 0xaf, 0x45, 0x43, 0x93, 0xfc, 0xd0, 0x8f, 0x7d, 0xbf, 0x6e, - 0xb1, 0xc7, 0xbe, 0x35, 0xb2, 0xd3, 0x6a, 0x3a, 0xf1, 0x49, 0x78, 0x13, 0xbd, 0x09, 0xa5, 0x58, - 0xb4, 0xd6, 0x2d, 0x17, 0x99, 0xd6, 0x29, 0xf6, 0xe0, 0xa9, 0xf8, 0x27, 0x59, 0x8a, 0x15, 0x19, - 0xfb, 0x9f, 0xf2, 0x19, 0x90, 0x90, 0x13, 0xd0, 0xea, 0x55, 0x4d, 0xad, 0x5e, 0xa5, 0xc7, 0x17, - 0xe4, 0x68, 0xf7, 0xfe, 0x89, 0xd9, 0x6f, 0x26, 0xaa, 0x7e, 0xd4, 0x5f, 0x99, 0xed, 0x1f, 0xb6, - 0xe0, 0x74, 0x96, 0x59, 0x16, 0xe5, 0x79, 0xb9, 0xa0, 0xac, 0x5e, 0xdd, 0xd5, 0x08, 0xde, 0x15, - 0xe5, 0x58, 0x61, 0xf4, 0x9d, 0xe6, 0xe3, 0x68, 0x61, 0xef, 0x6e, 0xc3, 0x58, 0x2d, 0x24, 0xda, - 0x1d, 0xf0, 0x3a, 0x77, 0x0f, 0xe4, 0xfd, 0x79, 0xf6, 0xc8, 0xae, 0x81, 0xf6, 0xcf, 0x14, 0xe0, - 0x34, 0x7f, 0x36, 0x9b, 0xdb, 0x0d, 0x3c, 0xb7, 0x16, 0xb8, 0x22, 0x45, 0xcb, 0x5b, 0x30, 0xda, - 0xd2, 0xb4, 0x1b, 0xdd, 0x02, 0x6f, 0xe9, 0x5a, 0x90, 0x44, 0xca, 0xd4, 0x4b, 0xb1, 0x41, 0x0b, - 0xb9, 0x30, 0x4a, 0x76, 0xbd, 0x86, 0x7a, 0x7b, 0x29, 0x1c, 0xf9, 0x6e, 0x50, 0xad, 0x2c, 0x6a, - 0x74, 0xb0, 0x41, 0xf5, 0x11, 0x64, 0xed, 0xb3, 0x7f, 0xc4, 0x82, 0xc7, 0x72, 0xc2, 0x74, 0xd1, - 0xe6, 0xee, 0xb3, 0x07, 0x4a, 0x91, 0x00, 0x4c, 0x35, 0xc7, 0x9f, 0x2d, 0xb1, 0x80, 0xa2, 0xcf, - 0x01, 0xf0, 0x67, 0x47, 0x2a, 0x74, 0xf5, 0x8a, 0x67, 0x64, 0x84, 0x62, 0xd1, 0x42, 0x68, 0xc8, - 0xfa, 0x58, 0xa3, 0x65, 0xff, 0x64, 0x11, 0x06, 0xd9, 0x33, 0x17, 0x5a, 0x82, 0xe1, 0x2d, 0x1e, - 0xb8, 0xba, 0x9f, 0x18, 0xd9, 0x89, 0xf4, 0xca, 0x0b, 0xb0, 0xac, 0x8c, 0x56, 0xe0, 0x14, 0x0f, - 0xfc, 0xdd, 0xac, 0x92, 0xa6, 0xb3, 0x27, 0x95, 0x20, 0x3c, 0x69, 0x96, 0x0a, 0x07, 0xb2, 0xdc, - 0x89, 0x82, 0xb3, 0xea, 0xa1, 0xd7, 0x61, 0x9c, 0x72, 0x8d, 0x41, 0x3b, 0x96, 0x94, 0x78, 0xc8, - 0x6f, 0xc5, 0xa6, 0xae, 0x19, 0x50, 0x9c, 0xc2, 0xa6, 0xe2, 0x5c, 0xab, 0x43, 0xdd, 0x33, 0x98, - 0x88, 0x73, 0xa6, 0x8a, 0xc7, 0xc4, 0x65, 0xf6, 0x58, 0x6d, 0x66, 0x7d, 0xb6, 0xb6, 0x15, 0x92, - 0x68, 0x2b, 0x68, 0xba, 0x22, 0xe7, 0x7a, 0x62, 0x8f, 0x95, 0x82, 0xe3, 0x8e, 0x1a, 0x94, 0xca, - 0x86, 0xe3, 0x35, 0xdb, 0x21, 0x49, 0xa8, 0x0c, 0x99, 0x54, 0x96, 0x52, 0x70, 0xdc, 0x51, 0x83, - 0xae, 0xa3, 0x33, 0x22, 0x09, 0xba, 0x0c, 0x52, 0xa0, 0x8c, 0xec, 0x86, 0xa5, 0xbb, 0x56, 0x97, - 0x28, 0x3d, 0xc2, 0x0c, 0x49, 0xa5, 0x51, 0xd7, 0x94, 0xa2, 0xc2, 0x51, 0x4b, 0x52, 0x79, 0x98, - 0x54, 0xdc, 0xdf, 0x5f, 0x80, 0x53, 0x19, 0xc6, 0xbc, 0xfc, 0xa8, 0xda, 0xf4, 0xa2, 0x58, 0x25, - 0x06, 0xd2, 0x8e, 0x2a, 0x5e, 0x8e, 0x15, 0x06, 0xdd, 0x0f, 0xfc, 0x30, 0x4c, 0x1f, 0x80, 0xc2, - 0x58, 0x4e, 0x40, 0x8f, 0x98, 0x62, 0xe7, 0x12, 0x0c, 0xb4, 0x23, 0x22, 0xe3, 0x6b, 0xa9, 0xf3, - 0x9b, 0xa9, 0xc9, 0x19, 0x84, 0xb2, 0xa6, 0x9b, 0x4a, 0x43, 0xad, 0xb1, 0xa6, 0x5c, 0xed, 0xcc, - 0x61, 0xb4, 0x73, 0x31, 0xf1, 0x1d, 0x3f, 0x16, 0x0c, 0x6c, 0x12, 0x15, 0x86, 0x95, 0x62, 0x01, - 0xb5, 0xbf, 0x52, 0x84, 0x73, 0xb9, 0xe6, 0xfd, 0xb4, 0xeb, 0x3b, 0x81, 0xef, 0xc5, 0x81, 0x7a, - 0x6a, 0xe5, 0x91, 0x60, 0x48, 0x6b, 0x6b, 0x45, 0x94, 0x63, 0x85, 0x81, 0x2e, 0xcb, 0xb4, 0xfd, - 0xe9, 0x14, 0x49, 0xf3, 0x55, 0x23, 0x73, 0x7f, 0xbf, 0xe9, 0xe7, 0x9e, 0x84, 0x81, 0x56, 0x10, - 0x34, 0xd3, 0x87, 0x16, 0xed, 0x6e, 0x10, 0x34, 0x31, 0x03, 0xa2, 0x4f, 0x88, 0xf1, 0x4a, 0xbd, - 0x2d, 0x62, 0xc7, 0x0d, 0x22, 0x6d, 0xd0, 0x9e, 0x86, 0xe1, 0x6d, 0xb2, 0x17, 0x7a, 0xfe, 0x66, - 0xfa, 0xcd, 0xf9, 0x26, 0x2f, 0xc6, 0x12, 0x6e, 0x26, 0xcc, 0x18, 0x3e, 0xee, 0xbc, 0x71, 0xa5, - 0x9e, 0x57, 0xe0, 0x0f, 0x14, 0x61, 0x02, 0xcf, 0x57, 0xbf, 0x39, 0x11, 0x77, 0x3a, 0x27, 0xe2, - 0xb8, 0xf3, 0xc6, 0xf5, 0x9e, 0x8d, 0x5f, 0xb4, 0x60, 0x82, 0x05, 0x95, 0x16, 0xf1, 0x47, 0xbc, - 0xc0, 0x3f, 0x01, 0x16, 0xef, 0x49, 0x18, 0x0c, 0x69, 0xa3, 0xe9, 0xdc, 0x48, 0xac, 0x27, 0x98, - 0xc3, 0xd0, 0x79, 0x18, 0x60, 0x5d, 0xa0, 0x93, 0x37, 0xca, 0xd3, 0x4a, 0x54, 0x9d, 0xd8, 0xc1, - 0xac, 0x94, 0x39, 0xd5, 0x63, 0xd2, 0x6a, 0x7a, 0xbc, 0xd3, 0xc9, 0xc3, 0xca, 0x47, 0xc3, 0xa9, - 0x3e, 0xb3, 0x6b, 0x1f, 0xcc, 0xa9, 0x3e, 0x9b, 0x64, 0x77, 0xf1, 0xe9, 0x8f, 0x0a, 0x70, 0x31, - 0xb3, 0x5e, 0xdf, 0x4e, 0xf5, 0xdd, 0x6b, 0x1f, 0x8f, 0xe9, 0x50, 0xb6, 0x45, 0x4f, 0xf1, 0x04, - 0x2d, 0x7a, 0x06, 0xfa, 0xe5, 0x30, 0x07, 0xfb, 0xf0, 0x75, 0xcf, 0x1c, 0xb2, 0x8f, 0x88, 0xaf, - 0x7b, 0x66, 0xdf, 0x72, 0xc4, 0xbf, 0x3f, 0x2f, 0xe4, 0x7c, 0x0b, 0x13, 0x04, 0xaf, 0xd0, 0x73, - 0x86, 0x01, 0x23, 0xc1, 0x31, 0x8f, 0xf2, 0x33, 0x86, 0x97, 0x61, 0x05, 0x45, 0x9e, 0xe6, 0x35, - 0x5e, 0xc8, 0x4f, 0x15, 0x9a, 0xdb, 0xd4, 0xac, 0xf9, 0x0e, 0xa6, 0x86, 0x20, 0xc3, 0x83, 0x7c, - 0x45, 0x13, 0xde, 0x8b, 0xfd, 0x0b, 0xef, 0xa3, 0xd9, 0x82, 0x3b, 0x9a, 0x83, 0x89, 0x1d, 0xcf, - 0xa7, 0xc7, 0xe6, 0x9e, 0xc9, 0xb2, 0xaa, 0x20, 0x2a, 0x2b, 0x26, 0x18, 0xa7, 0xf1, 0x67, 0x5e, - 0x85, 0xb1, 0x87, 0x57, 0x5b, 0x7e, 0xbd, 0x08, 0x8f, 0x77, 0xd9, 0xf6, 0xfc, 0xac, 0x37, 0xe6, - 0x40, 0x3b, 0xeb, 0x3b, 0xe6, 0xa1, 0x06, 0xa7, 0x37, 0xda, 0xcd, 0xe6, 0x1e, 0x33, 0x9a, 0x25, - 0xae, 0xc4, 0x10, 0x3c, 0xe5, 0x79, 0x99, 0xc8, 0x63, 0x29, 0x03, 0x07, 0x67, 0xd6, 0x44, 0x6f, - 0x00, 0x0a, 0x44, 0x9e, 0xe2, 0xeb, 0xc4, 0x17, 0xaf, 0x0b, 0x6c, 0xe0, 0x8b, 0xc9, 0x66, 0xbc, - 0xdd, 0x81, 0x81, 0x33, 0x6a, 0x51, 0xe1, 0x80, 0xde, 0x4a, 0x7b, 0xaa, 0x5b, 0x29, 0xe1, 0x00, - 0xeb, 0x40, 0x6c, 0xe2, 0xa2, 0xeb, 0x30, 0xe5, 0xec, 0x3a, 0x1e, 0x0f, 0x2e, 0x28, 0x09, 0x70, - 0xe9, 0x40, 0x29, 0xcb, 0xe6, 0xd2, 0x08, 0xb8, 0xb3, 0x4e, 0xca, 0xaf, 0x7c, 0x28, 0xdf, 0xaf, - 0xbc, 0xfb, 0xb9, 0xd8, 0x4b, 0xf7, 0x6b, 0xff, 0x67, 0x8b, 0x5e, 0x5f, 0x9c, 0xc9, 0x37, 0xc3, - 0x23, 0xbd, 0xca, 0xcc, 0x62, 0xb8, 0x32, 0x50, 0x73, 0xf1, 0x3e, 0xa3, 0x99, 0xc5, 0x24, 0x40, - 0x6c, 0xe2, 0xf2, 0x05, 0x11, 0x25, 0x9e, 0x45, 0x06, 0x8b, 0x2f, 0x42, 0x44, 0x28, 0x0c, 0xf4, - 0x79, 0x18, 0x76, 0xbd, 0x5d, 0x2f, 0x0a, 0x42, 0xb1, 0x59, 0x8e, 0xe8, 0x9f, 0x91, 0x9c, 0x83, - 0x55, 0x4e, 0x06, 0x4b, 0x7a, 0xf6, 0x0f, 0x14, 0x60, 0x4c, 0xb6, 0xf8, 0x66, 0x3b, 0x88, 0x9d, - 0x13, 0xb8, 0x96, 0xaf, 0x1b, 0xd7, 0xf2, 0x27, 0xba, 0xc5, 0xc9, 0x60, 0x5d, 0xca, 0xbd, 0x8e, - 0x6f, 0xa7, 0xae, 0xe3, 0xa7, 0x7a, 0x93, 0xea, 0x7e, 0x0d, 0xff, 0x33, 0x0b, 0xa6, 0x0c, 0xfc, - 0x13, 0xb8, 0x0d, 0x96, 0xcc, 0xdb, 0xe0, 0x89, 0x9e, 0xdf, 0x90, 0x73, 0x0b, 0x7c, 0x6f, 0x31, - 0xd5, 0x77, 0x76, 0xfa, 0xbf, 0x07, 0x03, 0x5b, 0x4e, 0xe8, 0x76, 0x8b, 0xc7, 0xdb, 0x51, 0x69, - 0xf6, 0x86, 0x13, 0xba, 0xfc, 0x0c, 0x7f, 0x56, 0x25, 0xfb, 0x74, 0x42, 0xb7, 0xa7, 0x23, 0x1d, - 0x6b, 0x0a, 0xbd, 0x02, 0x43, 0x51, 0x23, 0x68, 0x29, 0x33, 0xd7, 0x4b, 0x3c, 0x11, 0x28, 0x2d, - 0x39, 0xdc, 0xaf, 0x20, 0xb3, 0x39, 0x5a, 0x8c, 0x05, 0x3e, 0x7a, 0x0b, 0xc6, 0xd8, 0x2f, 0x65, - 0x7f, 0x51, 0xcc, 0xcf, 0x02, 0x51, 0xd7, 0x11, 0xb9, 0x19, 0x8f, 0x51, 0x84, 0x4d, 0x52, 0x33, - 0x9b, 0x50, 0x56, 0x9f, 0xf5, 0x48, 0x1d, 0xa0, 0xfe, 0x43, 0x11, 0x4e, 0x65, 0xac, 0x39, 0x14, - 0x19, 0x33, 0xf1, 0x7c, 0x9f, 0x4b, 0xf5, 0x03, 0xce, 0x45, 0xc4, 0xa4, 0x21, 0x57, 0xac, 0xad, - 0xbe, 0x1b, 0xbd, 0x13, 0x91, 0x74, 0xa3, 0xb4, 0xa8, 0x77, 0xa3, 0xb4, 0xb1, 0x13, 0x1b, 0x6a, - 0xda, 0x90, 0xea, 0xe9, 0x23, 0x9d, 0xd3, 0x3f, 0x29, 0xc2, 0xe9, 0xac, 0xd0, 0x3d, 0xe8, 0x3b, - 0x53, 0x19, 0x81, 0x5e, 0xec, 0x37, 0xe8, 0x0f, 0x4f, 0x13, 0x24, 0x12, 0x7a, 0xcf, 0x9a, 0x39, - 0x82, 0x7a, 0x0e, 0xb3, 0x68, 0x93, 0x79, 0xcd, 0x86, 0x3c, 0x93, 0x93, 0x3c, 0x3e, 0x3e, 0xdd, - 0x77, 0x07, 0x44, 0x0a, 0xa8, 0x28, 0xe5, 0x35, 0x2b, 0x8b, 0x7b, 0x7b, 0xcd, 0xca, 0x96, 0x67, - 0x3c, 0x18, 0xd1, 0xbe, 0xe6, 0x91, 0xce, 0xf8, 0x36, 0xbd, 0xad, 0xb4, 0x7e, 0x3f, 0xd2, 0x59, - 0xff, 0x11, 0x0b, 0x52, 0x36, 0xa5, 0x4a, 0x2d, 0x66, 0xe5, 0xaa, 0xc5, 0x2e, 0xc1, 0x40, 0x18, - 0x34, 0x49, 0x3a, 0x01, 0x0f, 0x0e, 0x9a, 0x04, 0x33, 0x08, 0xc5, 0x88, 0x13, 0x65, 0xc7, 0xa8, - 0x2e, 0xc8, 0x09, 0x11, 0xed, 0x49, 0x18, 0x6c, 0x92, 0x5d, 0xd2, 0x4c, 0x47, 0xb7, 0xbf, 0x45, - 0x0b, 0x31, 0x87, 0xd9, 0xbf, 0x38, 0x00, 0x17, 0xba, 0xfa, 0x9d, 0x53, 0x71, 0x68, 0xd3, 0x89, - 0xc9, 0x7d, 0x67, 0x2f, 0x1d, 0x86, 0xfa, 0x3a, 0x2f, 0xc6, 0x12, 0xce, 0xcc, 0xec, 0x79, 0xd8, - 0xc9, 0x94, 0x12, 0x51, 0x44, 0x9b, 0x14, 0x50, 0x53, 0x29, 0x55, 0x3c, 0x0e, 0xa5, 0xd4, 0x35, - 0x80, 0x28, 0x6a, 0x72, 0xab, 0x05, 0x57, 0xd8, 0xef, 0x27, 0xe1, 0x49, 0xeb, 0xb7, 0x04, 0x04, - 0x6b, 0x58, 0xa8, 0x0a, 0x93, 0xad, 0x30, 0x88, 0xb9, 0x4e, 0xb6, 0xca, 0xcd, 0x9d, 0x06, 0x4d, - 0x97, 0xdf, 0x5a, 0x0a, 0x8e, 0x3b, 0x6a, 0xa0, 0x97, 0x60, 0x44, 0xb8, 0x01, 0xd7, 0x82, 0xa0, - 0x29, 0xd4, 0x40, 0xca, 0x78, 0xa6, 0x9e, 0x80, 0xb0, 0x8e, 0xa7, 0x55, 0x63, 0x8a, 0xde, 0xe1, - 0xcc, 0x6a, 0x5c, 0xd9, 0xab, 0xe1, 0xa5, 0xc2, 0x78, 0x95, 0xfa, 0x0a, 0xe3, 0x95, 0x28, 0xc6, - 0xca, 0x7d, 0xbf, 0x6d, 0x41, 0x4f, 0x55, 0xd2, 0xcf, 0x0d, 0xc0, 0x29, 0xb1, 0x70, 0x1e, 0xf5, - 0x72, 0xb9, 0xd3, 0xb9, 0x5c, 0x8e, 0x43, 0x75, 0xf6, 0xcd, 0x35, 0x73, 0xd2, 0x6b, 0xe6, 0x07, - 0x2d, 0x30, 0xd9, 0x2b, 0xf4, 0xff, 0xe5, 0xc6, 0xf1, 0x7f, 0x29, 0x97, 0x5d, 0x73, 0xe5, 0x05, - 0xf2, 0x01, 0x23, 0xfa, 0xdb, 0xff, 0xc9, 0x82, 0x27, 0x7a, 0x52, 0x44, 0x8b, 0x50, 0x66, 0x3c, - 0xa0, 0x26, 0x9d, 0x3d, 0xa5, 0xcc, 0x21, 0x25, 0x20, 0x87, 0x25, 0x4d, 0x6a, 0xa2, 0xc5, 0x8e, - 0x84, 0x09, 0x4f, 0x67, 0x24, 0x4c, 0x38, 0x63, 0x0c, 0xcf, 0x43, 0x66, 0x4c, 0xf8, 0x95, 0x22, - 0x0c, 0xf1, 0x15, 0x7f, 0x02, 0x62, 0xd8, 0x92, 0xd0, 0xdb, 0x76, 0x09, 0xe4, 0xc5, 0xfb, 0x32, - 0x5b, 0x75, 0x62, 0x87, 0xb3, 0x09, 0xea, 0xb6, 0x4a, 0x34, 0xbc, 0x68, 0xd6, 0xb8, 0xcf, 0x66, - 0x52, 0x8a, 0x49, 0xe0, 0x34, 0xb4, 0xdb, 0xed, 0x8b, 0x00, 0x51, 0x1c, 0x7a, 0xfe, 0x26, 0xa5, - 0x21, 0x42, 0xc2, 0x7d, 0xb2, 0x4b, 0xeb, 0x75, 0x85, 0xcc, 0xfb, 0x90, 0xec, 0x74, 0x05, 0xc0, - 0x1a, 0xc5, 0x99, 0x97, 0xa1, 0xac, 0x90, 0x7b, 0x69, 0x71, 0x46, 0x75, 0xe6, 0xe2, 0xb3, 0x30, - 0x91, 0x6a, 0xeb, 0x48, 0x4a, 0xa0, 0x5f, 0xb2, 0x60, 0x82, 0x77, 0x79, 0xd1, 0xdf, 0x15, 0x67, - 0xea, 0xfb, 0x70, 0xba, 0x99, 0x71, 0xb6, 0x89, 0x19, 0xed, 0xff, 0x2c, 0x54, 0x4a, 0x9f, 0x2c, - 0x28, 0xce, 0x6c, 0x03, 0x5d, 0xa1, 0xeb, 0x96, 0x9e, 0x5d, 0x4e, 0x53, 0xb8, 0x6c, 0x8d, 0xf2, - 0x35, 0xcb, 0xcb, 0xb0, 0x82, 0xda, 0xbf, 0x6d, 0xc1, 0x14, 0xef, 0xf9, 0x4d, 0xb2, 0xa7, 0x76, - 0xf8, 0x87, 0xd9, 0x77, 0x91, 0xc3, 0xa4, 0x90, 0x93, 0xc3, 0x44, 0xff, 0xb4, 0x62, 0xd7, 0x4f, - 0xfb, 0x19, 0x0b, 0xc4, 0x0a, 0x3c, 0x01, 0x51, 0xfe, 0x5b, 0x4d, 0x51, 0x7e, 0x26, 0x7f, 0x51, - 0xe7, 0xc8, 0xf0, 0x7f, 0x66, 0xc1, 0x24, 0x47, 0x48, 0xde, 0x9c, 0x3f, 0xd4, 0x79, 0xe8, 0x27, - 0x19, 0xa1, 0xca, 0x50, 0x9e, 0xfd, 0x51, 0xc6, 0x64, 0x0d, 0x74, 0x9d, 0x2c, 0x57, 0x6e, 0xa0, - 0x23, 0x24, 0xe2, 0x3c, 0x72, 0x2c, 0x70, 0xfb, 0x0f, 0x2d, 0x40, 0xbc, 0x19, 0x83, 0xfd, 0xa1, - 0x4c, 0x05, 0x2b, 0xd5, 0xae, 0x8b, 0xe4, 0xa8, 0x51, 0x10, 0xac, 0x61, 0x1d, 0xcb, 0xf0, 0xa4, - 0x0c, 0x07, 0x8a, 0xbd, 0x0d, 0x07, 0x8e, 0x30, 0xa2, 0x7f, 0x30, 0x08, 0x69, 0xa7, 0x06, 0x74, - 0x17, 0x46, 0x1b, 0x4e, 0xcb, 0x59, 0xf7, 0x9a, 0x5e, 0xec, 0x91, 0xa8, 0x9b, 0xc5, 0xd1, 0x82, - 0x86, 0x27, 0x9e, 0x7a, 0xb5, 0x12, 0x6c, 0xd0, 0x41, 0xb3, 0x00, 0xad, 0xd0, 0xdb, 0xf5, 0x9a, - 0x64, 0x93, 0x69, 0x1c, 0x98, 0x93, 0x28, 0x37, 0xa3, 0x91, 0xa5, 0x58, 0xc3, 0xc8, 0xf0, 0xf7, - 0x2b, 0x3e, 0x3a, 0x7f, 0xbf, 0x81, 0x23, 0xfa, 0xfb, 0x0d, 0xf6, 0xe5, 0xef, 0x87, 0xe1, 0xac, - 0x64, 0x91, 0xe8, 0xff, 0x25, 0xaf, 0x49, 0x04, 0x5f, 0xcc, 0x5d, 0x47, 0x67, 0x0e, 0xf6, 0x2b, - 0x67, 0x71, 0x26, 0x06, 0xce, 0xa9, 0x89, 0x3e, 0x07, 0xd3, 0x4e, 0xb3, 0x19, 0xdc, 0x57, 0xa3, - 0xb6, 0x18, 0x35, 0x9c, 0x26, 0xd7, 0xd8, 0x0f, 0x33, 0xaa, 0xe7, 0x0f, 0xf6, 0x2b, 0xd3, 0x73, - 0x39, 0x38, 0x38, 0xb7, 0x76, 0xca, 0x5d, 0xb0, 0xd4, 0xd3, 0x5d, 0xf0, 0x35, 0x28, 0xb7, 0xc2, - 0xa0, 0xb1, 0xa2, 0xf9, 0x14, 0x5d, 0x64, 0x69, 0xfe, 0x65, 0xe1, 0xe1, 0x7e, 0x65, 0x4c, 0xfd, - 0x61, 0x37, 0x7c, 0x52, 0x21, 0xc3, 0x4b, 0x10, 0x1e, 0xa5, 0x97, 0xe0, 0x36, 0x9c, 0xaa, 0x93, - 0xd0, 0x63, 0xf9, 0x4a, 0xdd, 0xe4, 0xfc, 0x58, 0x83, 0x72, 0x98, 0x3a, 0x31, 0xfb, 0x0a, 0x7e, - 0xa5, 0xc5, 0x64, 0x96, 0x27, 0x64, 0x42, 0xc8, 0xfe, 0x53, 0x0b, 0x86, 0x85, 0x39, 0xfd, 0x09, - 0x30, 0x6a, 0x73, 0x86, 0xbe, 0xbc, 0x92, 0x7d, 0xab, 0xb0, 0xce, 0xe4, 0x6a, 0xca, 0x97, 0x53, - 0x9a, 0xf2, 0x27, 0xba, 0x11, 0xe9, 0xae, 0x23, 0xff, 0xdb, 0x45, 0x18, 0x37, 0x3d, 0x60, 0x4e, - 0x60, 0x08, 0x56, 0x61, 0x38, 0x12, 0xee, 0x56, 0x85, 0x7c, 0x83, 0xee, 0xf4, 0x24, 0x26, 0xd6, - 0x5a, 0xc2, 0xc1, 0x4a, 0x12, 0xc9, 0xf4, 0xe3, 0x2a, 0x3e, 0x42, 0x3f, 0xae, 0x5e, 0x4e, 0x48, - 0x03, 0xc7, 0xe1, 0x84, 0x64, 0x7f, 0x95, 0xdd, 0x6c, 0x7a, 0xf9, 0x09, 0x30, 0x3d, 0xd7, 0xcd, - 0x3b, 0xd0, 0xee, 0xb2, 0xb2, 0x44, 0xa7, 0x72, 0x98, 0x9f, 0x5f, 0xb0, 0xe0, 0x42, 0xc6, 0x57, - 0x69, 0x9c, 0xd0, 0xb3, 0x50, 0x72, 0xda, 0xae, 0xa7, 0xf6, 0xb2, 0xf6, 0x6a, 0x36, 0x27, 0xca, - 0xb1, 0xc2, 0x40, 0x0b, 0x30, 0x45, 0x1e, 0xb4, 0x3c, 0xfe, 0x6c, 0xa9, 0x9b, 0x54, 0x16, 0x79, - 0x40, 0xe0, 0xc5, 0x34, 0x10, 0x77, 0xe2, 0x2b, 0x97, 0xf9, 0x62, 0xae, 0xcb, 0xfc, 0x3f, 0xb4, - 0x60, 0x44, 0xb9, 0xd6, 0x3c, 0xf2, 0xd1, 0xfe, 0x36, 0x73, 0xb4, 0x1f, 0xef, 0x32, 0xda, 0x39, - 0xc3, 0xfc, 0x77, 0x0b, 0xaa, 0xbf, 0xb5, 0x20, 0x8c, 0xfb, 0xe0, 0xb0, 0x5e, 0x81, 0x52, 0x2b, - 0x0c, 0xe2, 0xa0, 0x11, 0x34, 0x05, 0x83, 0x75, 0x3e, 0x89, 0xe8, 0xc0, 0xcb, 0x0f, 0xb5, 0xdf, - 0x58, 0x61, 0xb3, 0xd1, 0x0b, 0xc2, 0x58, 0x30, 0x35, 0xc9, 0xe8, 0x05, 0x61, 0x8c, 0x19, 0x04, - 0xb9, 0x00, 0xb1, 0x13, 0x6e, 0x92, 0x98, 0x96, 0x89, 0xe0, 0x30, 0xf9, 0x87, 0x47, 0x3b, 0xf6, - 0x9a, 0xb3, 0x9e, 0x1f, 0x47, 0x71, 0x38, 0xbb, 0xec, 0xc7, 0xb7, 0x43, 0x2e, 0xaf, 0x69, 0x21, - 0x1a, 0x14, 0x2d, 0xac, 0xd1, 0x95, 0x8e, 0xad, 0xac, 0x8d, 0x41, 0xf3, 0xfd, 0x7d, 0x55, 0x94, - 0x63, 0x85, 0x61, 0xbf, 0xcc, 0xae, 0x12, 0x36, 0x40, 0x47, 0x8b, 0x9e, 0xf0, 0xb5, 0x92, 0x1a, - 0x5a, 0xf6, 0xf8, 0x56, 0xd5, 0x63, 0x34, 0x74, 0x3f, 0xb9, 0x69, 0xc3, 0xba, 0x7b, 0x4f, 0x12, - 0xc8, 0x01, 0x7d, 0x7b, 0x87, 0x59, 0xc6, 0x73, 0x3d, 0xae, 0x80, 0x23, 0x18, 0x62, 0xb0, 0x20, - 0xe5, 0x2c, 0x84, 0xf3, 0x72, 0x4d, 0x2c, 0x72, 0x2d, 0x48, 0xb9, 0x00, 0xe0, 0x04, 0x07, 0x5d, - 0x15, 0xd2, 0xfe, 0x80, 0x91, 0xaa, 0x50, 0x4a, 0xfb, 0xf2, 0xf3, 0x35, 0x71, 0xff, 0x79, 0x18, - 0x51, 0x29, 0x0b, 0x6b, 0x3c, 0xf3, 0x9b, 0x08, 0x95, 0xb3, 0x98, 0x14, 0x63, 0x1d, 0x07, 0xad, - 0xc1, 0x44, 0xc4, 0x55, 0x3d, 0x2a, 0x22, 0x22, 0x57, 0x99, 0x7d, 0x52, 0x9a, 0x73, 0xd4, 0x4d, - 0xf0, 0x21, 0x2b, 0xe2, 0x47, 0x87, 0xf4, 0x4e, 0x4d, 0x93, 0x40, 0xaf, 0xc3, 0x78, 0x33, 0x70, - 0xdc, 0x79, 0xa7, 0xe9, 0xf8, 0x0d, 0xf6, 0xbd, 0x25, 0x33, 0xd3, 0xd3, 0x2d, 0x03, 0x8a, 0x53, - 0xd8, 0x94, 0x31, 0xd3, 0x4b, 0x44, 0x14, 0x4f, 0xc7, 0xdf, 0x24, 0x91, 0x48, 0xb8, 0xc6, 0x18, - 0xb3, 0x5b, 0x39, 0x38, 0x38, 0xb7, 0x36, 0x7a, 0x05, 0x46, 0xe5, 0xe7, 0x6b, 0xbe, 0xd7, 0x89, - 0xed, 0xbd, 0x06, 0xc3, 0x06, 0x26, 0xba, 0x0f, 0x67, 0xe4, 0xff, 0xb5, 0xd0, 0xd9, 0xd8, 0xf0, - 0x1a, 0xc2, 0x97, 0x8f, 0x3b, 0x20, 0xcd, 0x49, 0x8f, 0xa6, 0xc5, 0x2c, 0xa4, 0xc3, 0xfd, 0xca, - 0x25, 0x31, 0x6a, 0x99, 0x70, 0x36, 0x89, 0xd9, 0xf4, 0xd1, 0x0a, 0x9c, 0xda, 0x22, 0x4e, 0x33, - 0xde, 0x5a, 0xd8, 0x22, 0x8d, 0x6d, 0xb9, 0x89, 0x98, 0x47, 0xb7, 0x66, 0xb1, 0x7e, 0xa3, 0x13, - 0x05, 0x67, 0xd5, 0x43, 0x6f, 0xc3, 0x74, 0xab, 0xbd, 0xde, 0xf4, 0xa2, 0xad, 0xd5, 0x20, 0x66, - 0x16, 0x24, 0x2a, 0xe3, 0x9f, 0x70, 0xfd, 0x56, 0xde, 0xec, 0xb5, 0x1c, 0x3c, 0x9c, 0x4b, 0x01, - 0xbd, 0x0f, 0x67, 0x52, 0x8b, 0x41, 0x38, 0xa2, 0x8e, 0xe7, 0xc7, 0x44, 0xae, 0x67, 0x55, 0x10, - 0x8e, 0xa5, 0x59, 0x20, 0x9c, 0xdd, 0xc4, 0x07, 0xb3, 0x2b, 0x7a, 0x8f, 0x56, 0xd6, 0x98, 0x32, - 0xf4, 0x0e, 0x8c, 0xea, 0xab, 0x48, 0x5c, 0x30, 0x97, 0xb3, 0x79, 0x16, 0x6d, 0xb5, 0x71, 0x96, - 0x4e, 0xad, 0x28, 0x1d, 0x86, 0x0d, 0x8a, 0x36, 0x81, 0xec, 0xef, 0x43, 0xb7, 0xa0, 0xd4, 0x68, - 0x7a, 0xc4, 0x8f, 0x97, 0x6b, 0xdd, 0x02, 0xb3, 0x2c, 0x08, 0x1c, 0x31, 0x60, 0x22, 0x88, 0x2c, - 0x2f, 0xc3, 0x8a, 0x82, 0xfd, 0x6b, 0x05, 0xa8, 0xf4, 0x88, 0x48, 0x9c, 0x52, 0x7f, 0x5b, 0x7d, - 0xa9, 0xbf, 0xe7, 0x64, 0xfe, 0xc2, 0xd5, 0x94, 0x4e, 0x20, 0x95, 0x9b, 0x30, 0xd1, 0x0c, 0xa4, - 0xf1, 0xfb, 0x36, 0x47, 0xd6, 0x35, 0xe8, 0x03, 0x3d, 0x0d, 0xea, 0x8d, 0x97, 0xb3, 0xc1, 0xfe, - 0x05, 0x91, 0xdc, 0x57, 0x10, 0xfb, 0xab, 0x05, 0x38, 0xa3, 0x86, 0xf0, 0x1b, 0x77, 0xe0, 0xee, - 0x74, 0x0e, 0xdc, 0x31, 0xbc, 0x21, 0xd9, 0xb7, 0x61, 0x88, 0x07, 0xb6, 0xe9, 0x83, 0x01, 0x7a, - 0xd2, 0x8c, 0x82, 0xa6, 0xae, 0x69, 0x23, 0x12, 0xda, 0x5f, 0xb1, 0x60, 0x62, 0x6d, 0xa1, 0x56, - 0x0f, 0x1a, 0xdb, 0x24, 0x9e, 0xe3, 0x0c, 0x2b, 0x16, 0xfc, 0x8f, 0xf5, 0x90, 0x7c, 0x4d, 0x16, - 0xc7, 0x74, 0x09, 0x06, 0xb6, 0x82, 0x28, 0x4e, 0x3f, 0x30, 0xdf, 0x08, 0xa2, 0x18, 0x33, 0x88, - 0xfd, 0x3b, 0x16, 0x0c, 0xb2, 0xac, 0xbb, 0xbd, 0x52, 0x41, 0xf7, 0xf3, 0x5d, 0xe8, 0x25, 0x18, - 0x22, 0x1b, 0x1b, 0xa4, 0x11, 0x8b, 0x59, 0x95, 0x5e, 0xb2, 0x43, 0x8b, 0xac, 0x94, 0x5e, 0xfa, - 0xac, 0x31, 0xfe, 0x17, 0x0b, 0x64, 0x74, 0x0f, 0xca, 0xb1, 0xb7, 0x43, 0xe6, 0x5c, 0x57, 0x3c, - 0xd1, 0x3d, 0x84, 0x53, 0xf2, 0x9a, 0x24, 0x80, 0x13, 0x5a, 0xf6, 0x57, 0x0a, 0x00, 0x49, 0xbc, - 0x84, 0x5e, 0x9f, 0x38, 0xdf, 0xf1, 0x78, 0x73, 0x39, 0xe3, 0xf1, 0x06, 0x25, 0x04, 0x33, 0x5e, - 0x6e, 0xd4, 0x30, 0x15, 0xfb, 0x1a, 0xa6, 0x81, 0xa3, 0x0c, 0xd3, 0x02, 0x4c, 0x25, 0xf1, 0x1e, - 0xcc, 0xe0, 0x37, 0x4c, 0x48, 0x59, 0x4b, 0x03, 0x71, 0x27, 0xbe, 0x4d, 0xe0, 0x92, 0x8c, 0x7a, - 0x2a, 0xef, 0x1a, 0x66, 0x01, 0x7a, 0x84, 0xac, 0xe0, 0xc9, 0xeb, 0x54, 0x21, 0xf7, 0x75, 0xea, - 0xc7, 0x2d, 0x38, 0x9d, 0x6e, 0x87, 0xb9, 0xe4, 0x7d, 0xd9, 0x82, 0x33, 0xec, 0x8d, 0x8e, 0xb5, - 0xda, 0xf9, 0x22, 0xf8, 0x62, 0x76, 0x1c, 0x8c, 0xee, 0x3d, 0x4e, 0xdc, 0xb1, 0x57, 0xb2, 0x48, - 0xe3, 0xec, 0x16, 0xed, 0x2f, 0x5b, 0x70, 0x2e, 0x37, 0xd9, 0x13, 0xba, 0x02, 0x25, 0xa7, 0xe5, - 0x71, 0x05, 0x98, 0xd8, 0xef, 0x4c, 0x7a, 0xac, 0x2d, 0x73, 0xf5, 0x97, 0x82, 0xaa, 0x24, 0x94, - 0x85, 0xdc, 0x24, 0x94, 0x3d, 0x73, 0x4a, 0xda, 0xdf, 0x67, 0x81, 0xf0, 0xc2, 0xea, 0xe3, 0x90, - 0x79, 0x4b, 0xe6, 0xf0, 0x35, 0x02, 0xce, 0x5f, 0xca, 0x77, 0x4b, 0x13, 0x61, 0xe6, 0xd5, 0xa5, - 0x6e, 0x04, 0x97, 0x37, 0x68, 0xd9, 0x2e, 0x08, 0x68, 0x95, 0x30, 0x9d, 0x55, 0xef, 0xde, 0x5c, - 0x03, 0x70, 0x19, 0xae, 0x96, 0xc9, 0x53, 0x5d, 0x21, 0x55, 0x05, 0xc1, 0x1a, 0x96, 0xfd, 0xef, - 0x0a, 0x30, 0x22, 0x03, 0x9c, 0xb7, 0xfd, 0x7e, 0x24, 0xcb, 0x23, 0x65, 0x3c, 0x62, 0xa9, 0x6f, - 0x29, 0xe1, 0x5a, 0x22, 0x90, 0x27, 0xa9, 0x6f, 0x25, 0x00, 0x27, 0x38, 0xe8, 0x69, 0x18, 0x8e, - 0xda, 0xeb, 0x0c, 0x3d, 0xe5, 0x33, 0x54, 0xe7, 0xc5, 0x58, 0xc2, 0xd1, 0xe7, 0x60, 0x92, 0xd7, - 0x0b, 0x83, 0x96, 0xb3, 0xc9, 0xb5, 0xad, 0x83, 0xca, 0xd9, 0x77, 0x72, 0x25, 0x05, 0x3b, 0xdc, - 0xaf, 0x9c, 0x4e, 0x97, 0x31, 0x3d, 0x7d, 0x07, 0x15, 0xf6, 0xf6, 0xcf, 0x1b, 0xa1, 0xcb, 0xb4, - 0xc3, 0x64, 0x20, 0x01, 0x61, 0x1d, 0xcf, 0x7e, 0x07, 0x50, 0x67, 0xa8, 0x77, 0xf4, 0x06, 0x37, - 0xf8, 0xf2, 0x42, 0xe2, 0x76, 0xd3, 0xdb, 0xeb, 0x2e, 0xad, 0xd2, 0xdc, 0x9f, 0xd7, 0xc2, 0xaa, - 0xbe, 0xfd, 0xd7, 0x8a, 0x30, 0x99, 0x76, 0x70, 0x44, 0x37, 0x60, 0x88, 0xdf, 0x91, 0x82, 0x7c, - 0x97, 0x67, 0x61, 0xcd, 0x2d, 0x92, 0x9d, 0x16, 0xe2, 0x9a, 0x15, 0xf5, 0xd1, 0xdb, 0x30, 0xe2, - 0x06, 0xf7, 0xfd, 0xfb, 0x4e, 0xe8, 0xce, 0xd5, 0x96, 0xc5, 0x72, 0xce, 0x64, 0xb5, 0xab, 0x09, - 0x9a, 0xee, 0x6a, 0xc9, 0x9e, 0x40, 0x12, 0x10, 0xd6, 0xc9, 0xa1, 0x35, 0x16, 0xbe, 0x72, 0xc3, - 0xdb, 0x5c, 0x71, 0x5a, 0xdd, 0xac, 0x7f, 0x17, 0x24, 0x92, 0x46, 0x79, 0x4c, 0xc4, 0xb8, 0xe4, - 0x00, 0x9c, 0x10, 0x42, 0xdf, 0x09, 0xa7, 0xa2, 0x1c, 0xed, 0x5c, 0x5e, 0xe6, 0x8f, 0x6e, 0x0a, - 0xab, 0xf9, 0xc7, 0xa8, 0x10, 0x94, 0xa5, 0xc7, 0xcb, 0x6a, 0xc6, 0xfe, 0xf5, 0x53, 0x60, 0x6c, - 0x62, 0x23, 0x11, 0x94, 0x75, 0x4c, 0x89, 0xa0, 0x30, 0x94, 0xc8, 0x4e, 0x2b, 0xde, 0xab, 0x7a, - 0x61, 0xb7, 0x44, 0x85, 0x8b, 0x02, 0xa7, 0x93, 0xa6, 0x84, 0x60, 0x45, 0x27, 0x3b, 0x5b, 0x57, - 0xf1, 0x43, 0xcc, 0xd6, 0x35, 0x70, 0x82, 0xd9, 0xba, 0x56, 0x61, 0x78, 0xd3, 0x8b, 0x31, 0x69, - 0x05, 0x82, 0x3b, 0xcd, 0x5c, 0x87, 0xd7, 0x39, 0x4a, 0x67, 0x5e, 0x18, 0x01, 0xc0, 0x92, 0x08, - 0x7a, 0x43, 0xed, 0xc0, 0xa1, 0x7c, 0xe1, 0xae, 0xf3, 0xfd, 0x32, 0x73, 0x0f, 0x8a, 0x9c, 0x5c, - 0xc3, 0x0f, 0x9b, 0x93, 0x6b, 0x49, 0x66, 0xd2, 0x2a, 0xe5, 0x9b, 0xea, 0xb3, 0x44, 0x59, 0x3d, - 0xf2, 0x67, 0xdd, 0xd5, 0xb3, 0x8f, 0x95, 0xf3, 0x4f, 0x02, 0x95, 0x58, 0xac, 0xcf, 0x9c, 0x63, - 0xdf, 0x67, 0xc1, 0x99, 0x56, 0x56, 0x22, 0x3e, 0xf1, 0xd6, 0xf4, 0x52, 0xdf, 0x99, 0x06, 0x8d, - 0x06, 0x99, 0x94, 0x9f, 0x89, 0x86, 0xb3, 0x9b, 0xa3, 0x03, 0x1d, 0xae, 0xbb, 0x22, 0x69, 0xd6, - 0x93, 0x39, 0xc9, 0xcb, 0xba, 0xa4, 0x2c, 0x5b, 0xcb, 0x48, 0x94, 0xf5, 0xf1, 0xbc, 0x44, 0x59, - 0x7d, 0xa7, 0xc7, 0x7a, 0x43, 0xa5, 0x2d, 0x1b, 0xcb, 0x5f, 0x4a, 0x3c, 0x29, 0x59, 0xcf, 0x64, - 0x65, 0x6f, 0xa8, 0x64, 0x65, 0x5d, 0xe2, 0xea, 0xf1, 0x54, 0x64, 0x3d, 0x53, 0x94, 0x69, 0x69, - 0xc6, 0x26, 0x8e, 0x27, 0xcd, 0x98, 0x71, 0xd5, 0xf0, 0x4c, 0x57, 0xcf, 0xf4, 0xb8, 0x6a, 0x0c, - 0xba, 0xdd, 0x2f, 0x1b, 0x9e, 0x52, 0x6d, 0xea, 0xa1, 0x52, 0xaa, 0xdd, 0xd5, 0x53, 0x94, 0xa1, - 0x1e, 0x39, 0xb8, 0x28, 0x52, 0x9f, 0x89, 0xc9, 0xee, 0xea, 0x17, 0xe0, 0xa9, 0x7c, 0xba, 0xea, - 0x9e, 0xeb, 0xa4, 0x9b, 0x79, 0x05, 0x76, 0x24, 0x3c, 0x3b, 0x7d, 0x32, 0x09, 0xcf, 0xce, 0x1c, - 0x7b, 0xc2, 0xb3, 0xb3, 0x27, 0x90, 0xf0, 0xec, 0xb1, 0x0f, 0x35, 0xe1, 0xd9, 0xf4, 0x23, 0x48, - 0x78, 0xb6, 0x9a, 0x24, 0x3c, 0x3b, 0x97, 0x3f, 0x25, 0x19, 0xf6, 0xc3, 0x39, 0x69, 0xce, 0xee, - 0x32, 0x23, 0x02, 0x1e, 0x81, 0x43, 0x04, 0xfe, 0xcb, 0x4e, 0xee, 0x9c, 0x15, 0xa6, 0x83, 0x4f, - 0x89, 0x02, 0xe1, 0x84, 0x14, 0xa5, 0x9b, 0xa4, 0x3d, 0x7b, 0xbc, 0x8b, 0x1e, 0x37, 0x4b, 0x43, - 0xd6, 0x25, 0xd9, 0xd9, 0xeb, 0x3c, 0xd9, 0xd9, 0xf9, 0xfc, 0x93, 0x3c, 0x7d, 0xdd, 0x99, 0x29, - 0xce, 0xbe, 0xbf, 0x00, 0x17, 0xbb, 0xef, 0x8b, 0x44, 0x3d, 0x57, 0x4b, 0x9e, 0x93, 0x52, 0xea, - 0x39, 0x2e, 0x5b, 0x25, 0x58, 0x7d, 0x87, 0x39, 0xba, 0x0e, 0x53, 0xca, 0xf0, 0xb8, 0xe9, 0x35, - 0xf6, 0xb4, 0xa4, 0xd1, 0xca, 0xc1, 0xb2, 0x9e, 0x46, 0xc0, 0x9d, 0x75, 0xd0, 0x1c, 0x4c, 0x18, - 0x85, 0xcb, 0x55, 0x21, 0x43, 0x29, 0x7d, 0x60, 0xdd, 0x04, 0xe3, 0x34, 0xbe, 0xfd, 0xd3, 0x16, - 0x3c, 0x96, 0x93, 0x4b, 0xa4, 0xef, 0x28, 0x3e, 0x1b, 0x30, 0xd1, 0x32, 0xab, 0xf6, 0x08, 0xf6, - 0x65, 0x64, 0x2c, 0x51, 0x7d, 0x4d, 0x01, 0x70, 0x9a, 0xa8, 0xfd, 0x55, 0x0b, 0x2e, 0x74, 0x35, - 0x42, 0x41, 0x18, 0xce, 0x6e, 0xee, 0x44, 0xce, 0x42, 0x48, 0x5c, 0xe2, 0xc7, 0x9e, 0xd3, 0xac, - 0xb7, 0x48, 0x43, 0x53, 0xb0, 0x32, 0x5b, 0x9f, 0xeb, 0x2b, 0xf5, 0xb9, 0x4e, 0x0c, 0x9c, 0x53, - 0x13, 0x2d, 0x01, 0xea, 0x84, 0x88, 0x19, 0x66, 0xd1, 0x1c, 0x3b, 0xe9, 0xe1, 0x8c, 0x1a, 0xf3, - 0x57, 0x7e, 0xf3, 0xf7, 0x2e, 0x7e, 0xec, 0xb7, 0x7e, 0xef, 0xe2, 0xc7, 0x7e, 0xfb, 0xf7, 0x2e, - 0x7e, 0xec, 0xbb, 0x0f, 0x2e, 0x5a, 0xbf, 0x79, 0x70, 0xd1, 0xfa, 0xad, 0x83, 0x8b, 0xd6, 0x6f, - 0x1f, 0x5c, 0xb4, 0x7e, 0xf7, 0xe0, 0xa2, 0xf5, 0x95, 0xdf, 0xbf, 0xf8, 0xb1, 0xb7, 0x0a, 0xbb, - 0xcf, 0xff, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xe3, 0xc5, 0xa7, 0xa5, 0x2c, 0xed, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/core/v1/generated.proto b/vendor/k8s.io/api/core/v1/generated.proto index bb88fb27cf72d..b99d104427767 100644 --- a/vendor/k8s.io/api/core/v1/generated.proto +++ b/vendor/k8s.io/api/core/v1/generated.proto @@ -161,7 +161,7 @@ message AzureFileVolumeSource { // Deprecated in 1.7, please use the bindings subresource of pods instead. message Binding { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -275,7 +275,7 @@ message Capabilities { // Cephfs volumes do not support ownership management or SELinux relabeling. message CephFSPersistentVolumeSource { // Required: Monitors is a collection of Ceph monitors - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it repeated string monitors = 1; // Optional: Used as the mounted root, rather than the full Ceph tree, default is / @@ -283,23 +283,23 @@ message CephFSPersistentVolumeSource { optional string path = 2; // Optional: User is the rados user name, default is admin - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional optional string user = 3; // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional optional string secretFile = 4; // Optional: SecretRef is reference to the authentication secret for User, default is empty. - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional optional SecretReference secretRef = 5; // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional optional bool readOnly = 6; } @@ -308,7 +308,7 @@ message CephFSPersistentVolumeSource { // Cephfs volumes do not support ownership management or SELinux relabeling. message CephFSVolumeSource { // Required: Monitors is a collection of Ceph monitors - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it repeated string monitors = 1; // Optional: Used as the mounted root, rather than the full Ceph tree, default is / @@ -316,23 +316,23 @@ message CephFSVolumeSource { optional string path = 2; // Optional: User is the rados user name, default is admin - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional optional string user = 3; // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional optional string secretFile = 4; // Optional: SecretRef is reference to the authentication secret for User, default is empty. - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional optional LocalObjectReference secretRef = 5; // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional optional bool readOnly = 6; } @@ -342,20 +342,20 @@ message CephFSVolumeSource { // The volume must also be in the same region as the kubelet. // Cinder volumes support ownership management and SELinux relabeling. message CinderPersistentVolumeSource { - // volume id used to identify the volume in cinder - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // volume id used to identify the volume in cinder. + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md optional string volumeID = 1; // Filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional optional string fsType = 2; // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional optional bool readOnly = 3; @@ -370,20 +370,20 @@ message CinderPersistentVolumeSource { // The volume must also be in the same region as the kubelet. // Cinder volumes support ownership management and SELinux relabeling. message CinderVolumeSource { - // volume id used to identify the volume in cinder - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // volume id used to identify the volume in cinder. + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md optional string volumeID = 1; // Filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional optional string fsType = 2; // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional optional bool readOnly = 3; @@ -426,7 +426,7 @@ message ComponentCondition { // ComponentStatus (and ComponentStatusList) holds the cluster validation info. message ComponentStatus { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -440,7 +440,7 @@ message ComponentStatus { // Status of all the conditions for the component as a list of ComponentStatus objects. message ComponentStatusList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -451,7 +451,7 @@ message ComponentStatusList { // ConfigMap holds configuration data for pods to consume. message ConfigMap { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -503,7 +503,7 @@ message ConfigMapKeySelector { // ConfigMapList is a resource containing a list of ConfigMap objects. message ConfigMapList { - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -701,6 +701,17 @@ message Container { // +optional optional Probe readinessProbe = 11; + // StartupProbe indicates that the Pod has successfully initialized. + // If specified, no other probes are executed until this completes successfully. + // If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + // This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + // when it might take a long time to load data or warm a cache, than during steady-state operation. + // This cannot be updated. + // This is an alpha feature enabled by the StartupProbe feature flag. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + optional Probe startupProbe = 22; + // Actions that the management system should take in response to container lifecycle events. // Cannot be updated. // +optional @@ -901,6 +912,13 @@ message ContainerStatus { // Container's ID in the format 'docker://'. // +optional optional string containerID = 8; + + // Specifies whether the container has passed its startup probe. + // Initialized as false, becomes true after startupProbe is considered successful. + // Resets to false when the container is restarted, or if kubelet loses state temporarily. + // Is always true when no startupProbe is defined. + // +optional + optional bool started = 9; } // DaemonEndpoint contains information about a single Daemon endpoint. @@ -1001,7 +1019,8 @@ message EndpointAddress { // EndpointPort is a tuple that describes a single port. message EndpointPort { - // The name of this port (corresponds to ServicePort.Name). + // The name of this port. This must match the 'name' field in the + // corresponding ServicePort. // Must be a DNS_LABEL. // Optional only if one port is defined. // +optional @@ -1058,7 +1077,7 @@ message EndpointSubset { // ] message Endpoints { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -1076,7 +1095,7 @@ message Endpoints { // EndpointsList is a list of endpoints. message EndpointsList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -1141,10 +1160,197 @@ message EnvVarSource { optional SecretKeySelector secretKeyRef = 4; } +// An EphemeralContainer is a container that may be added temporarily to an existing pod for +// user-initiated activities such as debugging. Ephemeral containers have no resource or +// scheduling guarantees, and they will not be restarted when they exit or when a pod is +// removed or restarted. If an ephemeral container causes a pod to exceed its resource +// allocation, the pod may be evicted. +// Ephemeral containers may not be added by directly updating the pod spec. They must be added +// via the pod's ephemeralcontainers subresource, and they will appear in the pod spec +// once added. +// This is an alpha feature enabled by the EphemeralContainers feature flag. +message EphemeralContainer { + // Ephemeral containers have all of the fields of Container, plus additional fields + // specific to ephemeral containers. Fields in common with Container are in the + // following inlined struct so than an EphemeralContainer may easily be converted + // to a Container. + optional EphemeralContainerCommon ephemeralContainerCommon = 1; + + // If set, the name of the container from PodSpec that this ephemeral container targets. + // The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. + // If not set then the ephemeral container is run in whatever namespaces are shared + // for the pod. Note that the container runtime must support this feature. + // +optional + optional string targetContainerName = 2; +} + +// EphemeralContainerCommon is a copy of all fields in Container to be inlined in +// EphemeralContainer. This separate type allows easy conversion from EphemeralContainer +// to Container and allows separate documentation for the fields of EphemeralContainer. +// When a new field is added to Container it must be added here as well. +message EphemeralContainerCommon { + // Name of the ephemeral container specified as a DNS_LABEL. + // This name must be unique among all containers, init containers and ephemeral containers. + optional string name = 1; + + // Docker image name. + // More info: https://kubernetes.io/docs/concepts/containers/images + optional string image = 2; + + // Entrypoint array. Not executed within a shell. + // The docker image's ENTRYPOINT is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + repeated string command = 3; + + // Arguments to the entrypoint. + // The docker image's CMD is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + repeated string args = 4; + + // Container's working directory. + // If not specified, the container runtime's default will be used, which + // might be configured in the container image. + // Cannot be updated. + // +optional + optional string workingDir = 5; + + // Ports are not allowed for ephemeral containers. + repeated ContainerPort ports = 6; + + // List of sources to populate environment variables in the container. + // The keys defined within a source must be a C_IDENTIFIER. All invalid keys + // will be reported as an event when the container is starting. When a key exists in multiple + // sources, the value associated with the last source will take precedence. + // Values defined by an Env with a duplicate key will take precedence. + // Cannot be updated. + // +optional + repeated EnvFromSource envFrom = 19; + + // List of environment variables to set in the container. + // Cannot be updated. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + repeated EnvVar env = 7; + + // Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources + // already allocated to the pod. + // +optional + optional ResourceRequirements resources = 8; + + // Pod volumes to mount into the container's filesystem. + // Cannot be updated. + // +optional + // +patchMergeKey=mountPath + // +patchStrategy=merge + repeated VolumeMount volumeMounts = 9; + + // volumeDevices is the list of block devices to be used by the container. + // This is a beta feature. + // +patchMergeKey=devicePath + // +patchStrategy=merge + // +optional + repeated VolumeDevice volumeDevices = 21; + + // Probes are not allowed for ephemeral containers. + // +optional + optional Probe livenessProbe = 10; + + // Probes are not allowed for ephemeral containers. + // +optional + optional Probe readinessProbe = 11; + + // Probes are not allowed for ephemeral containers. + // +optional + optional Probe startupProbe = 22; + + // Lifecycle is not allowed for ephemeral containers. + // +optional + optional Lifecycle lifecycle = 12; + + // Optional: Path at which the file to which the container's termination message + // will be written is mounted into the container's filesystem. + // Message written is intended to be brief final status, such as an assertion failure message. + // Will be truncated by the node if greater than 4096 bytes. The total message length across + // all containers will be limited to 12kb. + // Defaults to /dev/termination-log. + // Cannot be updated. + // +optional + optional string terminationMessagePath = 13; + + // Indicate how the termination message should be populated. File will use the contents of + // terminationMessagePath to populate the container status message on both success and failure. + // FallbackToLogsOnError will use the last chunk of container log output if the termination + // message file is empty and the container exited with an error. + // The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + // Defaults to File. + // Cannot be updated. + // +optional + optional string terminationMessagePolicy = 20; + + // Image pull policy. + // One of Always, Never, IfNotPresent. + // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + // +optional + optional string imagePullPolicy = 14; + + // SecurityContext is not allowed for ephemeral containers. + // +optional + optional SecurityContext securityContext = 15; + + // Whether this container should allocate a buffer for stdin in the container runtime. If this + // is not set, reads from stdin in the container will always result in EOF. + // Default is false. + // +optional + optional bool stdin = 16; + + // Whether the container runtime should close the stdin channel after it has been opened by + // a single attach. When stdin is true the stdin stream will remain open across multiple attach + // sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + // first client attaches to stdin, and then remains open and accepts data until the client disconnects, + // at which time stdin is closed and remains closed until the container is restarted. If this + // flag is false, a container processes that reads from stdin will never receive an EOF. + // Default is false + // +optional + optional bool stdinOnce = 17; + + // Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + // Default is false. + // +optional + optional bool tty = 18; +} + +// A list of ephemeral containers used with the Pod ephemeralcontainers subresource. +message EphemeralContainers { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // A list of ephemeral containers associated with this pod. New ephemeral containers + // may be appended to this list, but existing ephemeral containers may not be removed + // or modified. + // +patchMergeKey=name + // +patchStrategy=merge + repeated EphemeralContainer ephemeralContainers = 2; +} + // Event is a report of an event somewhere in the cluster. message Event { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // The object that this event is about. @@ -1209,7 +1415,7 @@ message Event { // EventList is a list of events. message EventList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -1418,22 +1624,22 @@ message GitRepoVolumeSource { // Glusterfs volumes do not support ownership management or SELinux relabeling. message GlusterfsPersistentVolumeSource { // EndpointsName is the endpoint name that details Glusterfs topology. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod optional string endpoints = 1; // Path is the Glusterfs volume path. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod optional string path = 2; // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. // Defaults to false. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod // +optional optional bool readOnly = 3; // EndpointsNamespace is the namespace that contains Glusterfs endpoint. // If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod // +optional optional string endpointsNamespace = 4; } @@ -1442,16 +1648,16 @@ message GlusterfsPersistentVolumeSource { // Glusterfs volumes do not support ownership management or SELinux relabeling. message GlusterfsVolumeSource { // EndpointsName is the endpoint name that details Glusterfs topology. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod optional string endpoints = 1; // Path is the Glusterfs volume path. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod optional string path = 2; // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. // Defaults to false. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod // +optional optional bool readOnly = 3; } @@ -1678,7 +1884,7 @@ message Lifecycle { optional Handler postStart = 1; // PreStop is called immediately before a container is terminated due to an - // API request or management event such as liveness probe failure, + // API request or management event such as liveness/startup probe failure, // preemption, resource contention, etc. The handler is not called if the // container crashes or exits. The reason for termination is passed to the // handler. The Pod's termination grace period countdown begins before the @@ -1694,12 +1900,12 @@ message Lifecycle { // LimitRange sets resource usage limits for each kind of resource in a Namespace. message LimitRange { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the limits enforced. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional LimitRangeSpec spec = 2; } @@ -1734,7 +1940,7 @@ message LimitRangeItem { // LimitRangeList is a list of LimitRange items. message LimitRangeList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -1752,7 +1958,7 @@ message LimitRangeSpec { // List holds a list of objects, which may not be known by the server. message List { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -1829,25 +2035,43 @@ message NFSVolumeSource { // Use of multiple namespaces is optional. message Namespace { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the behavior of the Namespace. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional NamespaceSpec spec = 2; // Status describes the current status of a Namespace. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional NamespaceStatus status = 3; } +// NamespaceCondition contains details about state of namespace. +message NamespaceCondition { + // Type of namespace controller condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; + + // +optional + optional string reason = 5; + + // +optional + optional string message = 6; +} + // NamespaceList is a list of Namespaces. message NamespaceList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -1870,25 +2094,31 @@ message NamespaceStatus { // More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/ // +optional optional string phase = 1; + + // Represents the latest available observations of a namespace's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated NamespaceCondition conditions = 2; } // Node is a worker node in Kubernetes. // Each node will have a unique identifier in the cache (i.e. in etcd). message Node { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the behavior of a node. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional NodeSpec spec = 2; // Most recently observed status of the node. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional NodeStatus status = 3; } @@ -2016,7 +2246,7 @@ message NodeDaemonEndpoints { // NodeList is the whole list of all Nodes which have been registered with master. message NodeList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -2084,6 +2314,13 @@ message NodeSpec { // +optional optional string podCIDR = 1; + // podCIDRs represents the IP ranges assigned to the node for usage by Pods on that node. If this + // field is specified, the 0th entry must match the podCIDR field. It may contain at most 1 value for + // each of IPv4 and IPv6. + // +optional + // +patchStrategy=merge + repeated string podCIDRs = 7; + // ID of the node assigned by the cloud provider in the format: :// // +optional optional string providerID = 3; @@ -2136,6 +2373,9 @@ message NodeStatus { // List of addresses reachable to the node. // Queried from cloud provider, if available. // More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses + // Note: This field is declared as mergeable, but the merge key is not sufficiently + // unique, which can cause data corruption when it is merged. Callers should instead + // use a full-replacement patch. See http://pr.k8s.io/79391 for an example. // +optional // +patchMergeKey=type // +patchStrategy=merge @@ -2218,7 +2458,7 @@ message ObjectFieldSelector { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object message ObjectReference { // Kind of the referent. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional string kind = 1; @@ -2242,7 +2482,7 @@ message ObjectReference { optional string apiVersion = 5; // Specific resourceVersion to which this reference is made, if any. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency // +optional optional string resourceVersion = 6; @@ -2263,7 +2503,7 @@ message ObjectReference { // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes message PersistentVolume { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -2284,7 +2524,7 @@ message PersistentVolume { // PersistentVolumeClaim is a user's request for and claim to a persistent volume message PersistentVolumeClaim { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -2328,7 +2568,7 @@ message PersistentVolumeClaimCondition { // PersistentVolumeClaimList is a list of PersistentVolumeClaim items. message PersistentVolumeClaimList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -2422,7 +2662,7 @@ message PersistentVolumeClaimVolumeSource { // PersistentVolumeList is a list of PersistentVolume items. message PersistentVolumeList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -2456,7 +2696,7 @@ message PersistentVolumeSource { // Glusterfs represents a Glusterfs volume that is attached to a host and // exposed to the pod. Provisioned by an admin. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md + // More info: https://examples.k8s.io/volumes/glusterfs/README.md // +optional optional GlusterfsPersistentVolumeSource glusterfs = 4; @@ -2466,7 +2706,7 @@ message PersistentVolumeSource { optional NFSVolumeSource nfs = 5; // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md + // More info: https://examples.k8s.io/volumes/rbd/README.md // +optional optional RBDPersistentVolumeSource rbd = 6; @@ -2475,8 +2715,8 @@ message PersistentVolumeSource { // +optional optional ISCSIPersistentVolumeSource iscsi = 7; - // Cinder represents a cinder volume attached and mounted on kubelets host machine - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // Cinder represents a cinder volume attached and mounted on kubelets host machine. + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional optional CinderPersistentVolumeSource cinder = 8; @@ -2529,7 +2769,7 @@ message PersistentVolumeSource { optional LocalVolumeSource local = 20; // StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod - // More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md + // More info: https://examples.k8s.io/volumes/storageos/README.md // +optional optional StorageOSPersistentVolumeSource storageos = 21; @@ -2623,12 +2863,12 @@ message PhotonPersistentDiskVolumeSource { // by clients and scheduled onto hosts. message Pod { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the pod. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional PodSpec spec = 2; @@ -2636,7 +2876,7 @@ message Pod { // This data may not be up to date. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional PodStatus status = 3; } @@ -2842,15 +3082,23 @@ message PodExecOptions { repeated string command = 6; } +// IP address information for entries in the (plural) PodIPs field. +// Each entry includes: +// IP: An IP address allocated to the pod. Routable at least within the cluster. +message PodIP { + // ip is an IP address (IPv4 or IPv6) assigned to the pod + optional string ip = 1; +} + // PodList is a list of Pods. message PodList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of pods. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md repeated Pod items = 2; } @@ -2937,7 +3185,9 @@ message PodSecurityContext { // +optional optional SELinuxOptions seLinuxOptions = 1; - // Windows security options. + // The Windows specific settings applied to all containers. + // If unspecified, the options within a container's SecurityContext will be used. + // If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. // +optional optional WindowsSecurityContextOptions windowsOptions = 8; @@ -3012,7 +3262,7 @@ message PodSpec { // init container fails, the pod is considered to have failed and is handled according // to its restartPolicy. The name for an init container or normal container must be // unique among all containers. - // Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. + // Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. // The resourceRequirements of an init container are taken into account during scheduling // by finding the highest request/limit for each resource type, and then using the max of // of that value or the sum of the normal containers. Limits are applied to init containers @@ -3032,6 +3282,16 @@ message PodSpec { // +patchStrategy=merge repeated Container containers = 2; + // List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing + // pod to perform user-initiated actions such as debugging. This list cannot be specified when + // creating a pod, and it cannot be modified by updating the pod spec. In order to add an + // ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. + // This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + repeated EphemeralContainer ephemeralContainers = 34; + // Restart policy for all containers within the pod. // One of Always, OnFailure, Never. // Default to Always. @@ -3215,6 +3475,30 @@ message PodSpec { // This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature. // +optional optional string preemptionPolicy = 31; + + // Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. + // This field will be autopopulated at admission time by the RuntimeClass admission controller. If + // the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. + // The RuntimeClass admission controller will reject Pod create requests which have the overhead already + // set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value + // defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. + // More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md + // This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature. + // +optional + map overhead = 32; + + // TopologySpreadConstraints describes how a group of pods ought to spread across topology + // domains. Scheduler will schedule pods in a way which abides by the constraints. + // This field is alpha-level and is only honored by clusters that enables the EvenPodsSpread + // feature. + // All topologySpreadConstraints are ANDed. + // +optional + // +patchMergeKey=topologyKey + // +patchStrategy=merge + // +listType=map + // +listMapKey=topologyKey + // +listMapKey=whenUnsatisfiable + repeated TopologySpreadConstraint topologySpreadConstraints = 33; } // PodStatus represents information about the status of a pod. Status may trail the actual @@ -3277,6 +3561,14 @@ message PodStatus { // +optional optional string podIP = 6; + // podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must + // match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list + // is empty if no IPs have been allocated yet. + // +optional + // +patchStrategy=merge + // +patchMergeKey=ip + repeated PodIP podIPs = 12; + // RFC 3339 date and time at which the object was acknowledged by the Kubelet. // This is before the Kubelet pulled the container image(s) for the pod. // +optional @@ -3299,12 +3591,17 @@ message PodStatus { // More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md // +optional optional string qosClass = 9; + + // Status for any ephemeral containers that have run in this pod. + // This field is alpha-level and is only populated by servers that enable the EphemeralContainers feature. + // +optional + repeated ContainerStatus ephemeralContainerStatuses = 13; } // PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded message PodStatusResult { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -3312,7 +3609,7 @@ message PodStatusResult { // This data may not be up to date. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional PodStatus status = 2; } @@ -3320,12 +3617,12 @@ message PodStatusResult { // PodTemplate describes a template for creating copies of a predefined pod. message PodTemplate { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Template defines the pods that will be created from this pod template. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional PodTemplateSpec template = 2; } @@ -3333,7 +3630,7 @@ message PodTemplate { // PodTemplateList is a list of PodTemplates. message PodTemplateList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -3344,12 +3641,12 @@ message PodTemplateList { // PodTemplateSpec describes the data a pod should have when created from a template message PodTemplateSpec { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the pod. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional PodSpec spec = 2; } @@ -3429,7 +3726,7 @@ message Probe { optional int32 periodSeconds = 4; // Minimum consecutive successes for the probe to be considered successful after having failed. - // Defaults to 1. Must be 1 for liveness. Minimum value is 1. + // Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. // +optional optional int32 successThreshold = 5; @@ -3489,11 +3786,11 @@ message QuobyteVolumeSource { // RBD volumes support ownership management and SELinux relabeling. message RBDPersistentVolumeSource { // A collection of Ceph monitors. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it repeated string monitors = 1; // The rados image name. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it optional string image = 2; // Filesystem type of the volume that you want to mount. @@ -3506,32 +3803,32 @@ message RBDPersistentVolumeSource { // The rados pool name. // Default is rbd. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional optional string pool = 4; // The rados user name. // Default is admin. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional optional string user = 5; // Keyring is the path to key ring for RBDUser. // Default is /etc/ceph/keyring. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional optional string keyring = 6; // SecretRef is name of the authentication secret for RBDUser. If provided // overrides keyring. // Default is nil. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional optional SecretReference secretRef = 7; // ReadOnly here will force the ReadOnly setting in VolumeMounts. // Defaults to false. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional optional bool readOnly = 8; } @@ -3540,11 +3837,11 @@ message RBDPersistentVolumeSource { // RBD volumes support ownership management and SELinux relabeling. message RBDVolumeSource { // A collection of Ceph monitors. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it repeated string monitors = 1; // The rados image name. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it optional string image = 2; // Filesystem type of the volume that you want to mount. @@ -3557,32 +3854,32 @@ message RBDVolumeSource { // The rados pool name. // Default is rbd. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional optional string pool = 4; // The rados user name. // Default is admin. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional optional string user = 5; // Keyring is the path to key ring for RBDUser. // Default is /etc/ceph/keyring. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional optional string keyring = 6; // SecretRef is name of the authentication secret for RBDUser. If provided // overrides keyring. // Default is nil. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional optional LocalObjectReference secretRef = 7; // ReadOnly here will force the ReadOnly setting in VolumeMounts. // Defaults to false. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional optional bool readOnly = 8; } @@ -3590,7 +3887,7 @@ message RBDVolumeSource { // RangeAllocation is not a public type. message RangeAllocation { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -3605,12 +3902,12 @@ message RangeAllocation { message ReplicationController { // If the Labels of a ReplicationController are empty, they are defaulted to // be the same as the Pod(s) that the replication controller manages. - // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the specification of the desired behavior of the replication controller. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional ReplicationControllerSpec spec = 2; @@ -3618,7 +3915,7 @@ message ReplicationController { // This data may be out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional ReplicationControllerStatus status = 3; } @@ -3647,7 +3944,7 @@ message ReplicationControllerCondition { // ReplicationControllerList is a collection of replication controllers. message ReplicationControllerList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -3733,17 +4030,17 @@ message ResourceFieldSelector { // ResourceQuota sets aggregate quota restrictions enforced per namespace message ResourceQuota { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the desired quota. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional ResourceQuotaSpec spec = 2; // Status defines the actual enforced quota and its current usage. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional ResourceQuotaStatus status = 3; } @@ -3751,7 +4048,7 @@ message ResourceQuota { // ResourceQuotaList is a list of ResourceQuota items. message ResourceQuotaList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -3947,7 +4244,7 @@ message ScopedResourceSelectorRequirement { // the Data field must be less than MaxSecretSize bytes. message Secret { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -4001,7 +4298,7 @@ message SecretKeySelector { // SecretList is a list of Secret. message SecretList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -4102,7 +4399,9 @@ message SecurityContext { // +optional optional SELinuxOptions seLinuxOptions = 3; - // Windows security options. + // The Windows specific settings applied to all containers. + // If unspecified, the options from the PodSecurityContext will be used. + // If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. // +optional optional WindowsSecurityContextOptions windowsOptions = 10; @@ -4163,19 +4462,19 @@ message SerializedReference { // will answer requests sent through the proxy. message Service { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the behavior of a service. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional ServiceSpec spec = 2; // Most recently observed status of the service. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional ServiceStatus status = 3; } @@ -4186,7 +4485,7 @@ message Service { // * a set of secrets message ServiceAccount { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -4213,7 +4512,7 @@ message ServiceAccount { // ServiceAccountList is a list of ServiceAccount objects message ServiceAccountList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -4251,7 +4550,7 @@ message ServiceAccountTokenProjection { // ServiceList holds a list of services. message ServiceList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -4262,8 +4561,9 @@ message ServiceList { // ServicePort contains information on service's port. message ServicePort { // The name of this port within the service. This must be a DNS_LABEL. - // All ports within a ServiceSpec must have unique names. This maps to - // the 'Name' field in EndpointPort objects. + // All ports within a ServiceSpec must have unique names. When considering + // the endpoints for a Service, this must match the 'name' field in the + // EndpointPort. // Optional if only one ServicePort is defined on this service. // +optional optional string name = 1; @@ -4423,6 +4723,16 @@ message ServiceSpec { // sessionAffinityConfig contains the configurations of session affinity. // +optional optional SessionAffinityConfig sessionAffinityConfig = 14; + + // ipFamily specifies whether this Service has a preference for a particular IP family (e.g. IPv4 vs. + // IPv6). If a specific IP family is requested, the clusterIP field will be allocated from that family, if it is + // available in the cluster. If no IP family is requested, the cluster's primary IP family will be used. + // Other IP fields (loadBalancerIP, loadBalancerSourceRanges, externalIPs) and controllers which + // allocate external load-balancers should use the same IP family. Endpoints for this Service will be of + // this family. This field is immutable after creation. Assigning a ServiceIPFamily not available in the + // cluster (e.g. IPv6 in IPv4 only cluster) is an error condition and will fail during clusterIP assignment. + // +optional + optional string ipFamily = 15; } // ServiceStatus represents the current status of a service. @@ -4601,6 +4911,59 @@ message TopologySelectorTerm { repeated TopologySelectorLabelRequirement matchLabelExpressions = 1; } +// TopologySpreadConstraint specifies how to spread matching pods among the given topology. +message TopologySpreadConstraint { + // MaxSkew describes the degree to which pods may be unevenly distributed. + // It's the maximum permitted difference between the number of matching pods in + // any two topology domains of a given topology type. + // For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + // labelSelector spread as 1/1/0: + // +-------+-------+-------+ + // | zone1 | zone2 | zone3 | + // +-------+-------+-------+ + // | P | P | | + // +-------+-------+-------+ + // - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; + // scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) + // violate MaxSkew(1). + // - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + // It's a required field. Default value is 1 and 0 is not allowed. + optional int32 maxSkew = 1; + + // TopologyKey is the key of node labels. Nodes that have a label with this key + // and identical values are considered to be in the same topology. + // We consider each as a "bucket", and try to put balanced number + // of pods into each bucket. + // It's a required field. + optional string topologyKey = 2; + + // WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + // the spread constraint. + // - DoNotSchedule (default) tells the scheduler not to schedule it + // - ScheduleAnyway tells the scheduler to still schedule it + // It's considered as "Unsatisfiable" if and only if placing incoming pod on any + // topology violates "MaxSkew". + // For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + // labelSelector spread as 3/1/1: + // +-------+-------+-------+ + // | zone1 | zone2 | zone3 | + // +-------+-------+-------+ + // | P P P | P | P | + // +-------+-------+-------+ + // If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + // to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + // MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + // won't make it *more* imbalanced. + // It's a required field. + optional string whenUnsatisfiable = 3; + + // LabelSelector is used to find matching pods. + // Pods that match this label selector are counted to determine the number of pods + // in their corresponding topology domain. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 4; +} + // TypedLocalObjectReference contains enough information to let you locate the // typed referenced object inside the same namespace. message TypedLocalObjectReference { @@ -4749,12 +5112,12 @@ message VolumeSource { // ISCSI represents an ISCSI Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. - // More info: https://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md + // More info: https://examples.k8s.io/volumes/iscsi/README.md // +optional optional ISCSIVolumeSource iscsi = 8; // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md + // More info: https://examples.k8s.io/volumes/glusterfs/README.md // +optional optional GlusterfsVolumeSource glusterfs = 9; @@ -4765,7 +5128,7 @@ message VolumeSource { optional PersistentVolumeClaimVolumeSource persistentVolumeClaim = 10; // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md + // More info: https://examples.k8s.io/volumes/rbd/README.md // +optional optional RBDVolumeSource rbd = 11; @@ -4774,8 +5137,8 @@ message VolumeSource { // +optional optional FlexVolumeSource flexVolume = 12; - // Cinder represents a cinder volume attached and mounted on kubelets host machine - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // Cinder represents a cinder volume attached and mounted on kubelets host machine. + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional optional CinderVolumeSource cinder = 13; @@ -4881,5 +5244,13 @@ message WindowsSecurityContextOptions { // This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag. // +optional optional string gmsaCredentialSpec = 2; + + // The UserName in Windows to run the entrypoint of the container process. + // Defaults to the user specified in image metadata if unspecified. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // This field is alpha-level and it is only honored by servers that enable the WindowsRunAsUserName feature flag. + // +optional + optional string runAsUserName = 3; } diff --git a/vendor/k8s.io/api/core/v1/register.go b/vendor/k8s.io/api/core/v1/register.go index 1aac0cb41e0de..8da1ddeade1fc 100644 --- a/vendor/k8s.io/api/core/v1/register.go +++ b/vendor/k8s.io/api/core/v1/register.go @@ -88,6 +88,7 @@ func addKnownTypes(scheme *runtime.Scheme) error { &RangeAllocation{}, &ConfigMap{}, &ConfigMapList{}, + &EphemeralContainers{}, ) // Add common types diff --git a/vendor/k8s.io/api/core/v1/taint.go b/vendor/k8s.io/api/core/v1/taint.go index 7b606a30904f8..db71bd2fd0b60 100644 --- a/vendor/k8s.io/api/core/v1/taint.go +++ b/vendor/k8s.io/api/core/v1/taint.go @@ -24,8 +24,14 @@ func (t *Taint) MatchTaint(taintToMatch *Taint) bool { return t.Key == taintToMatch.Key && t.Effect == taintToMatch.Effect } -// taint.ToString() converts taint struct to string in format key=value:effect or key:effect. +// taint.ToString() converts taint struct to string in format '=:', '=:', ':', or ''. func (t *Taint) ToString() string { + if len(t.Effect) == 0 { + if len(t.Value) == 0 { + return fmt.Sprintf("%v", t.Key) + } + return fmt.Sprintf("%v=%v:", t.Key, t.Value) + } if len(t.Value) == 0 { return fmt.Sprintf("%v:%v", t.Key, t.Effect) } diff --git a/vendor/k8s.io/api/core/v1/types.go b/vendor/k8s.io/api/core/v1/types.go index d014d0baf3d50..fcd4554029029 100644 --- a/vendor/k8s.io/api/core/v1/types.go +++ b/vendor/k8s.io/api/core/v1/types.go @@ -87,11 +87,11 @@ type VolumeSource struct { NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,7,opt,name=nfs"` // ISCSI represents an ISCSI Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. - // More info: https://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md + // More info: https://examples.k8s.io/volumes/iscsi/README.md // +optional ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,8,opt,name=iscsi"` // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md + // More info: https://examples.k8s.io/volumes/glusterfs/README.md // +optional Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,9,opt,name=glusterfs"` // PersistentVolumeClaimVolumeSource represents a reference to a @@ -100,15 +100,15 @@ type VolumeSource struct { // +optional PersistentVolumeClaim *PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaim"` // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md + // More info: https://examples.k8s.io/volumes/rbd/README.md // +optional RBD *RBDVolumeSource `json:"rbd,omitempty" protobuf:"bytes,11,opt,name=rbd"` // FlexVolume represents a generic volume resource that is // provisioned/attached using an exec based plugin. // +optional FlexVolume *FlexVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"` - // Cinder represents a cinder volume attached and mounted on kubelets host machine - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // Cinder represents a cinder volume attached and mounted on kubelets host machine. + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional Cinder *CinderVolumeSource `json:"cinder,omitempty" protobuf:"bytes,13,opt,name=cinder"` // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime @@ -192,7 +192,7 @@ type PersistentVolumeSource struct { HostPath *HostPathVolumeSource `json:"hostPath,omitempty" protobuf:"bytes,3,opt,name=hostPath"` // Glusterfs represents a Glusterfs volume that is attached to a host and // exposed to the pod. Provisioned by an admin. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md + // More info: https://examples.k8s.io/volumes/glusterfs/README.md // +optional Glusterfs *GlusterfsPersistentVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,4,opt,name=glusterfs"` // NFS represents an NFS mount on the host. Provisioned by an admin. @@ -200,15 +200,15 @@ type PersistentVolumeSource struct { // +optional NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,5,opt,name=nfs"` // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md + // More info: https://examples.k8s.io/volumes/rbd/README.md // +optional RBD *RBDPersistentVolumeSource `json:"rbd,omitempty" protobuf:"bytes,6,opt,name=rbd"` // ISCSI represents an ISCSI Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. Provisioned by an admin. // +optional ISCSI *ISCSIPersistentVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,7,opt,name=iscsi"` - // Cinder represents a cinder volume attached and mounted on kubelets host machine - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // Cinder represents a cinder volume attached and mounted on kubelets host machine. + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional Cinder *CinderPersistentVolumeSource `json:"cinder,omitempty" protobuf:"bytes,8,opt,name=cinder"` // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime @@ -248,7 +248,7 @@ type PersistentVolumeSource struct { // +optional Local *LocalVolumeSource `json:"local,omitempty" protobuf:"bytes,20,opt,name=local"` // StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod - // More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md + // More info: https://examples.k8s.io/volumes/storageos/README.md // +optional StorageOS *StorageOSPersistentVolumeSource `json:"storageos,omitempty" protobuf:"bytes,21,opt,name=storageos"` // CSI represents storage that is handled by an external CSI driver (Beta feature). @@ -275,7 +275,7 @@ const ( type PersistentVolume struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -390,7 +390,7 @@ type PersistentVolumeStatus struct { type PersistentVolumeList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of persistent volumes. @@ -405,7 +405,7 @@ type PersistentVolumeList struct { type PersistentVolumeClaim struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -427,7 +427,7 @@ type PersistentVolumeClaim struct { type PersistentVolumeClaimList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // A list of persistent volume claims. @@ -625,16 +625,16 @@ type EmptyDirVolumeSource struct { // Glusterfs volumes do not support ownership management or SELinux relabeling. type GlusterfsVolumeSource struct { // EndpointsName is the endpoint name that details Glusterfs topology. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod EndpointsName string `json:"endpoints" protobuf:"bytes,1,opt,name=endpoints"` // Path is the Glusterfs volume path. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod Path string `json:"path" protobuf:"bytes,2,opt,name=path"` // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. // Defaults to false. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` } @@ -643,22 +643,22 @@ type GlusterfsVolumeSource struct { // Glusterfs volumes do not support ownership management or SELinux relabeling. type GlusterfsPersistentVolumeSource struct { // EndpointsName is the endpoint name that details Glusterfs topology. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod EndpointsName string `json:"endpoints" protobuf:"bytes,1,opt,name=endpoints"` // Path is the Glusterfs volume path. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod Path string `json:"path" protobuf:"bytes,2,opt,name=path"` // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. // Defaults to false. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` // EndpointsNamespace is the namespace that contains Glusterfs endpoint. // If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod // +optional EndpointsNamespace *string `json:"endpointsNamespace,omitempty" protobuf:"bytes,4,opt,name=endpointsNamespace"` } @@ -667,10 +667,10 @@ type GlusterfsPersistentVolumeSource struct { // RBD volumes support ownership management and SELinux relabeling. type RBDVolumeSource struct { // A collection of Ceph monitors. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it CephMonitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"` // The rados image name. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it RBDImage string `json:"image" protobuf:"bytes,2,opt,name=image"` // Filesystem type of the volume that you want to mount. // Tip: Ensure that the filesystem type is supported by the host operating system. @@ -681,28 +681,28 @@ type RBDVolumeSource struct { FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"` // The rados pool name. // Default is rbd. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"` // The rados user name. // Default is admin. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"` // Keyring is the path to key ring for RBDUser. // Default is /etc/ceph/keyring. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"` // SecretRef is name of the authentication secret for RBDUser. If provided // overrides keyring. // Default is nil. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,7,opt,name=secretRef"` // ReadOnly here will force the ReadOnly setting in VolumeMounts. // Defaults to false. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,8,opt,name=readOnly"` } @@ -711,10 +711,10 @@ type RBDVolumeSource struct { // RBD volumes support ownership management and SELinux relabeling. type RBDPersistentVolumeSource struct { // A collection of Ceph monitors. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it CephMonitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"` // The rados image name. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it RBDImage string `json:"image" protobuf:"bytes,2,opt,name=image"` // Filesystem type of the volume that you want to mount. // Tip: Ensure that the filesystem type is supported by the host operating system. @@ -725,28 +725,28 @@ type RBDPersistentVolumeSource struct { FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"` // The rados pool name. // Default is rbd. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"` // The rados user name. // Default is admin. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"` // Keyring is the path to key ring for RBDUser. // Default is /etc/ceph/keyring. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"` // SecretRef is name of the authentication secret for RBDUser. If provided // overrides keyring. // Default is nil. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,7,opt,name=secretRef"` // ReadOnly here will force the ReadOnly setting in VolumeMounts. // Defaults to false. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,8,opt,name=readOnly"` } @@ -756,18 +756,18 @@ type RBDPersistentVolumeSource struct { // The volume must also be in the same region as the kubelet. // Cinder volumes support ownership management and SELinux relabeling. type CinderVolumeSource struct { - // volume id used to identify the volume in cinder - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // volume id used to identify the volume in cinder. + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"` // Filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` // Optional: points to a secret object containing parameters used to connect @@ -781,18 +781,18 @@ type CinderVolumeSource struct { // The volume must also be in the same region as the kubelet. // Cinder volumes support ownership management and SELinux relabeling. type CinderPersistentVolumeSource struct { - // volume id used to identify the volume in cinder - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // volume id used to identify the volume in cinder. + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"` // Filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` // Optional: points to a secret object containing parameters used to connect @@ -805,26 +805,26 @@ type CinderPersistentVolumeSource struct { // Cephfs volumes do not support ownership management or SELinux relabeling. type CephFSVolumeSource struct { // Required: Monitors is a collection of Ceph monitors - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it Monitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"` // Optional: Used as the mounted root, rather than the full Ceph tree, default is / // +optional Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"` // Optional: User is the rados user name, default is admin - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"` // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional SecretFile string `json:"secretFile,omitempty" protobuf:"bytes,4,opt,name=secretFile"` // Optional: SecretRef is reference to the authentication secret for User, default is empty. - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"` // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"` } @@ -844,26 +844,26 @@ type SecretReference struct { // Cephfs volumes do not support ownership management or SELinux relabeling. type CephFSPersistentVolumeSource struct { // Required: Monitors is a collection of Ceph monitors - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it Monitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"` // Optional: Used as the mounted root, rather than the full Ceph tree, default is / // +optional Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"` // Optional: User is the rados user name, default is admin - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"` // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional SecretFile string `json:"secretFile,omitempty" protobuf:"bytes,4,opt,name=secretFile"` // Optional: SecretRef is reference to the authentication secret for User, default is empty. - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"` // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"` } @@ -2025,7 +2025,7 @@ type Probe struct { // +optional PeriodSeconds int32 `json:"periodSeconds,omitempty" protobuf:"varint,4,opt,name=periodSeconds"` // Minimum consecutive successes for the probe to be considered successful after having failed. - // Defaults to 1. Must be 1 for liveness. Minimum value is 1. + // Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. // +optional SuccessThreshold int32 `json:"successThreshold,omitempty" protobuf:"varint,5,opt,name=successThreshold"` // Minimum consecutive failures for the probe to be considered failed after having succeeded. @@ -2196,6 +2196,16 @@ type Container struct { // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes // +optional ReadinessProbe *Probe `json:"readinessProbe,omitempty" protobuf:"bytes,11,opt,name=readinessProbe"` + // StartupProbe indicates that the Pod has successfully initialized. + // If specified, no other probes are executed until this completes successfully. + // If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + // This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + // when it might take a long time to load data or warm a cache, than during steady-state operation. + // This cannot be updated. + // This is an alpha feature enabled by the StartupProbe feature flag. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + StartupProbe *Probe `json:"startupProbe,omitempty" protobuf:"bytes,22,opt,name=startupProbe"` // Actions that the management system should take in response to container lifecycle events. // Cannot be updated. // +optional @@ -2282,7 +2292,7 @@ type Lifecycle struct { // +optional PostStart *Handler `json:"postStart,omitempty" protobuf:"bytes,1,opt,name=postStart"` // PreStop is called immediately before a container is terminated due to an - // API request or management event such as liveness probe failure, + // API request or management event such as liveness/startup probe failure, // preemption, resource contention, etc. The handler is not called if the // container crashes or exits. The reason for termination is passed to the // handler. The Pod's termination grace period countdown begins before the @@ -2390,6 +2400,12 @@ type ContainerStatus struct { // Container's ID in the format 'docker://'. // +optional ContainerID string `json:"containerID,omitempty" protobuf:"bytes,8,opt,name=containerID"` + // Specifies whether the container has passed its startup probe. + // Initialized as false, becomes true after startupProbe is considered successful. + // Resets to false when the container is restarted, or if kubelet loses state temporarily. + // Is always true when no startupProbe is defined. + // +optional + Started *bool `json:"started,omitempty" protobuf:"varint,9,opt,name=started"` } // PodPhase is a label for the condition of a pod at the current time. @@ -2825,7 +2841,7 @@ type PodSpec struct { // init container fails, the pod is considered to have failed and is handled according // to its restartPolicy. The name for an init container or normal container must be // unique among all containers. - // Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. + // Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. // The resourceRequirements of an init container are taken into account during scheduling // by finding the highest request/limit for each resource type, and then using the max of // of that value or the sum of the normal containers. Limits are applied to init containers @@ -2843,6 +2859,15 @@ type PodSpec struct { // +patchMergeKey=name // +patchStrategy=merge Containers []Container `json:"containers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=containers"` + // List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing + // pod to perform user-initiated actions such as debugging. This list cannot be specified when + // creating a pod, and it cannot be modified by updating the pod spec. In order to add an + // ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. + // This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + EphemeralContainers []EphemeralContainer `json:"ephemeralContainers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,34,rep,name=ephemeralContainers"` // Restart policy for all containers within the pod. // One of Always, OnFailure, Never. // Default to Always. @@ -3001,6 +3026,89 @@ type PodSpec struct { // This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature. // +optional PreemptionPolicy *PreemptionPolicy `json:"preemptionPolicy,omitempty" protobuf:"bytes,31,opt,name=preemptionPolicy"` + // Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. + // This field will be autopopulated at admission time by the RuntimeClass admission controller. If + // the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. + // The RuntimeClass admission controller will reject Pod create requests which have the overhead already + // set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value + // defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. + // More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md + // This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature. + // +optional + Overhead ResourceList `json:"overhead,omitempty" protobuf:"bytes,32,opt,name=overhead"` + // TopologySpreadConstraints describes how a group of pods ought to spread across topology + // domains. Scheduler will schedule pods in a way which abides by the constraints. + // This field is alpha-level and is only honored by clusters that enables the EvenPodsSpread + // feature. + // All topologySpreadConstraints are ANDed. + // +optional + // +patchMergeKey=topologyKey + // +patchStrategy=merge + // +listType=map + // +listMapKey=topologyKey + // +listMapKey=whenUnsatisfiable + TopologySpreadConstraints []TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty" patchStrategy:"merge" patchMergeKey:"topologyKey" protobuf:"bytes,33,opt,name=topologySpreadConstraints"` +} + +type UnsatisfiableConstraintAction string + +const ( + // DoNotSchedule instructs the scheduler not to schedule the pod + // when constraints are not satisfied. + DoNotSchedule UnsatisfiableConstraintAction = "DoNotSchedule" + // ScheduleAnyway instructs the scheduler to schedule the pod + // even if constraints are not satisfied. + ScheduleAnyway UnsatisfiableConstraintAction = "ScheduleAnyway" +) + +// TopologySpreadConstraint specifies how to spread matching pods among the given topology. +type TopologySpreadConstraint struct { + // MaxSkew describes the degree to which pods may be unevenly distributed. + // It's the maximum permitted difference between the number of matching pods in + // any two topology domains of a given topology type. + // For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + // labelSelector spread as 1/1/0: + // +-------+-------+-------+ + // | zone1 | zone2 | zone3 | + // +-------+-------+-------+ + // | P | P | | + // +-------+-------+-------+ + // - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; + // scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) + // violate MaxSkew(1). + // - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + // It's a required field. Default value is 1 and 0 is not allowed. + MaxSkew int32 `json:"maxSkew" protobuf:"varint,1,opt,name=maxSkew"` + // TopologyKey is the key of node labels. Nodes that have a label with this key + // and identical values are considered to be in the same topology. + // We consider each as a "bucket", and try to put balanced number + // of pods into each bucket. + // It's a required field. + TopologyKey string `json:"topologyKey" protobuf:"bytes,2,opt,name=topologyKey"` + // WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + // the spread constraint. + // - DoNotSchedule (default) tells the scheduler not to schedule it + // - ScheduleAnyway tells the scheduler to still schedule it + // It's considered as "Unsatisfiable" if and only if placing incoming pod on any + // topology violates "MaxSkew". + // For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + // labelSelector spread as 3/1/1: + // +-------+-------+-------+ + // | zone1 | zone2 | zone3 | + // +-------+-------+-------+ + // | P P P | P | P | + // +-------+-------+-------+ + // If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + // to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + // MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + // won't make it *more* imbalanced. + // It's a required field. + WhenUnsatisfiable UnsatisfiableConstraintAction `json:"whenUnsatisfiable" protobuf:"bytes,3,opt,name=whenUnsatisfiable,casttype=UnsatisfiableConstraintAction"` + // LabelSelector is used to find matching pods. + // Pods that match this label selector are counted to determine the number of pods + // in their corresponding topology domain. + // +optional + LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty" protobuf:"bytes,4,opt,name=labelSelector"` } const ( @@ -3028,7 +3136,9 @@ type PodSecurityContext struct { // takes precedence for that container. // +optional SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,1,opt,name=seLinuxOptions"` - // Windows security options. + // The Windows specific settings applied to all containers. + // If unspecified, the options within a container's SecurityContext will be used. + // If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. // +optional WindowsOptions *WindowsSecurityContextOptions `json:"windowsOptions,omitempty" protobuf:"bytes,8,opt,name=windowsOptions"` // The UID to run the entrypoint of the container process. @@ -3116,6 +3226,175 @@ type PodDNSConfigOption struct { Value *string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` } +// IP address information for entries in the (plural) PodIPs field. +// Each entry includes: +// IP: An IP address allocated to the pod. Routable at least within the cluster. +type PodIP struct { + // ip is an IP address (IPv4 or IPv6) assigned to the pod + IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"` +} + +// EphemeralContainerCommon is a copy of all fields in Container to be inlined in +// EphemeralContainer. This separate type allows easy conversion from EphemeralContainer +// to Container and allows separate documentation for the fields of EphemeralContainer. +// When a new field is added to Container it must be added here as well. +type EphemeralContainerCommon struct { + // Name of the ephemeral container specified as a DNS_LABEL. + // This name must be unique among all containers, init containers and ephemeral containers. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Docker image name. + // More info: https://kubernetes.io/docs/concepts/containers/images + Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"` + // Entrypoint array. Not executed within a shell. + // The docker image's ENTRYPOINT is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"` + // Arguments to the entrypoint. + // The docker image's CMD is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"` + // Container's working directory. + // If not specified, the container runtime's default will be used, which + // might be configured in the container image. + // Cannot be updated. + // +optional + WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"` + // Ports are not allowed for ephemeral containers. + Ports []ContainerPort `json:"ports,omitempty" protobuf:"bytes,6,rep,name=ports"` + // List of sources to populate environment variables in the container. + // The keys defined within a source must be a C_IDENTIFIER. All invalid keys + // will be reported as an event when the container is starting. When a key exists in multiple + // sources, the value associated with the last source will take precedence. + // Values defined by an Env with a duplicate key will take precedence. + // Cannot be updated. + // +optional + EnvFrom []EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,19,rep,name=envFrom"` + // List of environment variables to set in the container. + // Cannot be updated. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + Env []EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"` + // Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources + // already allocated to the pod. + // +optional + Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` + // Pod volumes to mount into the container's filesystem. + // Cannot be updated. + // +optional + // +patchMergeKey=mountPath + // +patchStrategy=merge + VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"` + // volumeDevices is the list of block devices to be used by the container. + // This is a beta feature. + // +patchMergeKey=devicePath + // +patchStrategy=merge + // +optional + VolumeDevices []VolumeDevice `json:"volumeDevices,omitempty" patchStrategy:"merge" patchMergeKey:"devicePath" protobuf:"bytes,21,rep,name=volumeDevices"` + // Probes are not allowed for ephemeral containers. + // +optional + LivenessProbe *Probe `json:"livenessProbe,omitempty" protobuf:"bytes,10,opt,name=livenessProbe"` + // Probes are not allowed for ephemeral containers. + // +optional + ReadinessProbe *Probe `json:"readinessProbe,omitempty" protobuf:"bytes,11,opt,name=readinessProbe"` + // Probes are not allowed for ephemeral containers. + // +optional + StartupProbe *Probe `json:"startupProbe,omitempty" protobuf:"bytes,22,opt,name=startupProbe"` + // Lifecycle is not allowed for ephemeral containers. + // +optional + Lifecycle *Lifecycle `json:"lifecycle,omitempty" protobuf:"bytes,12,opt,name=lifecycle"` + // Optional: Path at which the file to which the container's termination message + // will be written is mounted into the container's filesystem. + // Message written is intended to be brief final status, such as an assertion failure message. + // Will be truncated by the node if greater than 4096 bytes. The total message length across + // all containers will be limited to 12kb. + // Defaults to /dev/termination-log. + // Cannot be updated. + // +optional + TerminationMessagePath string `json:"terminationMessagePath,omitempty" protobuf:"bytes,13,opt,name=terminationMessagePath"` + // Indicate how the termination message should be populated. File will use the contents of + // terminationMessagePath to populate the container status message on both success and failure. + // FallbackToLogsOnError will use the last chunk of container log output if the termination + // message file is empty and the container exited with an error. + // The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + // Defaults to File. + // Cannot be updated. + // +optional + TerminationMessagePolicy TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty" protobuf:"bytes,20,opt,name=terminationMessagePolicy,casttype=TerminationMessagePolicy"` + // Image pull policy. + // One of Always, Never, IfNotPresent. + // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + // +optional + ImagePullPolicy PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"` + // SecurityContext is not allowed for ephemeral containers. + // +optional + SecurityContext *SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"` + + // Variables for interactive containers, these have very specialized use-cases (e.g. debugging) + // and shouldn't be used for general purpose containers. + + // Whether this container should allocate a buffer for stdin in the container runtime. If this + // is not set, reads from stdin in the container will always result in EOF. + // Default is false. + // +optional + Stdin bool `json:"stdin,omitempty" protobuf:"varint,16,opt,name=stdin"` + // Whether the container runtime should close the stdin channel after it has been opened by + // a single attach. When stdin is true the stdin stream will remain open across multiple attach + // sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + // first client attaches to stdin, and then remains open and accepts data until the client disconnects, + // at which time stdin is closed and remains closed until the container is restarted. If this + // flag is false, a container processes that reads from stdin will never receive an EOF. + // Default is false + // +optional + StdinOnce bool `json:"stdinOnce,omitempty" protobuf:"varint,17,opt,name=stdinOnce"` + // Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + // Default is false. + // +optional + TTY bool `json:"tty,omitempty" protobuf:"varint,18,opt,name=tty"` +} + +// EphemeralContainerCommon converts to Container. All fields must be kept in sync between +// these two types. +var _ = Container(EphemeralContainerCommon{}) + +// An EphemeralContainer is a container that may be added temporarily to an existing pod for +// user-initiated activities such as debugging. Ephemeral containers have no resource or +// scheduling guarantees, and they will not be restarted when they exit or when a pod is +// removed or restarted. If an ephemeral container causes a pod to exceed its resource +// allocation, the pod may be evicted. +// Ephemeral containers may not be added by directly updating the pod spec. They must be added +// via the pod's ephemeralcontainers subresource, and they will appear in the pod spec +// once added. +// This is an alpha feature enabled by the EphemeralContainers feature flag. +type EphemeralContainer struct { + // Ephemeral containers have all of the fields of Container, plus additional fields + // specific to ephemeral containers. Fields in common with Container are in the + // following inlined struct so than an EphemeralContainer may easily be converted + // to a Container. + EphemeralContainerCommon `json:",inline" protobuf:"bytes,1,req"` + + // If set, the name of the container from PodSpec that this ephemeral container targets. + // The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. + // If not set then the ephemeral container is run in whatever namespaces are shared + // for the pod. Note that the container runtime must support this feature. + // +optional + TargetContainerName string `json:"targetContainerName,omitempty" protobuf:"bytes,2,opt,name=targetContainerName"` +} + // PodStatus represents information about the status of a pod. Status may trail the actual // state of a system, especially if the node that hosts the pod cannot contact the control // plane. @@ -3171,6 +3450,14 @@ type PodStatus struct { // +optional PodIP string `json:"podIP,omitempty" protobuf:"bytes,6,opt,name=podIP"` + // podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must + // match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list + // is empty if no IPs have been allocated yet. + // +optional + // +patchStrategy=merge + // +patchMergeKey=ip + PodIPs []PodIP `json:"podIPs,omitempty" protobuf:"bytes,12,rep,name=podIPs" patchStrategy:"merge" patchMergeKey:"ip"` + // RFC 3339 date and time at which the object was acknowledged by the Kubelet. // This is before the Kubelet pulled the container image(s) for the pod. // +optional @@ -3192,6 +3479,10 @@ type PodStatus struct { // More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md // +optional QOSClass PodQOSClass `json:"qosClass,omitempty" protobuf:"bytes,9,rep,name=qosClass"` + // Status for any ephemeral containers that have run in this pod. + // This field is alpha-level and is only populated by servers that enable the EphemeralContainers feature. + // +optional + EphemeralContainerStatuses []ContainerStatus `json:"ephemeralContainerStatuses,omitempty" protobuf:"bytes,13,rep,name=ephemeralContainerStatuses"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -3200,19 +3491,21 @@ type PodStatus struct { type PodStatusResult struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Most recently observed status of the pod. // This data may not be up to date. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status PodStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` } // +genclient +// +genclient:method=GetEphemeralContainers,verb=get,subresource=ephemeralcontainers,result=EphemeralContainers +// +genclient:method=UpdateEphemeralContainers,verb=update,subresource=ephemeralcontainers,input=EphemeralContainers,result=EphemeralContainers // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Pod is a collection of containers that can run on a host. This resource is created @@ -3220,12 +3513,12 @@ type PodStatusResult struct { type Pod struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of the pod. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -3233,7 +3526,7 @@ type Pod struct { // This data may not be up to date. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status PodStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -3244,24 +3537,24 @@ type Pod struct { type PodList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of pods. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md Items []Pod `json:"items" protobuf:"bytes,2,rep,name=items"` } // PodTemplateSpec describes the data a pod should have when created from a template type PodTemplateSpec struct { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of the pod. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` } @@ -3273,12 +3566,12 @@ type PodTemplateSpec struct { type PodTemplate struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Template defines the pods that will be created from this pod template. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Template PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"` } @@ -3289,7 +3582,7 @@ type PodTemplate struct { type PodTemplateList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -3401,12 +3694,12 @@ type ReplicationController struct { // If the Labels of a ReplicationController are empty, they are defaulted to // be the same as the Pod(s) that the replication controller manages. - // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the specification of the desired behavior of the replication controller. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec ReplicationControllerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -3414,7 +3707,7 @@ type ReplicationController struct { // This data may be out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status ReplicationControllerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -3425,7 +3718,7 @@ type ReplicationController struct { type ReplicationControllerList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -3526,6 +3819,17 @@ type LoadBalancerIngress struct { Hostname string `json:"hostname,omitempty" protobuf:"bytes,2,opt,name=hostname"` } +// IPFamily represents the IP Family (IPv4 or IPv6). This type is used +// to express the family of an IP expressed by a type (i.e. service.Spec.IPFamily) +type IPFamily string + +const ( + // IPv4Protocol indicates that this IP is IPv4 protocol + IPv4Protocol IPFamily = "IPv4" + // IPv6Protocol indicates that this IP is IPv6 protocol + IPv6Protocol IPFamily = "IPv6" +) + // ServiceSpec describes the attributes that a user creates on a service. type ServiceSpec struct { // The list of ports that are exposed by this service. @@ -3641,13 +3945,24 @@ type ServiceSpec struct { // sessionAffinityConfig contains the configurations of session affinity. // +optional SessionAffinityConfig *SessionAffinityConfig `json:"sessionAffinityConfig,omitempty" protobuf:"bytes,14,opt,name=sessionAffinityConfig"` + + // ipFamily specifies whether this Service has a preference for a particular IP family (e.g. IPv4 vs. + // IPv6). If a specific IP family is requested, the clusterIP field will be allocated from that family, if it is + // available in the cluster. If no IP family is requested, the cluster's primary IP family will be used. + // Other IP fields (loadBalancerIP, loadBalancerSourceRanges, externalIPs) and controllers which + // allocate external load-balancers should use the same IP family. Endpoints for this Service will be of + // this family. This field is immutable after creation. Assigning a ServiceIPFamily not available in the + // cluster (e.g. IPv6 in IPv4 only cluster) is an error condition and will fail during clusterIP assignment. + // +optional + IPFamily *IPFamily `json:"ipFamily,omitempty" protobuf:"bytes,15,opt,name=ipFamily,Configcasttype=IPFamily"` } // ServicePort contains information on service's port. type ServicePort struct { // The name of this port within the service. This must be a DNS_LABEL. - // All ports within a ServiceSpec must have unique names. This maps to - // the 'Name' field in EndpointPort objects. + // All ports within a ServiceSpec must have unique names. When considering + // the endpoints for a Service, this must match the 'name' field in the + // EndpointPort. // Optional if only one ServicePort is defined on this service. // +optional Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` @@ -3690,19 +4005,19 @@ type ServicePort struct { type Service struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the behavior of a service. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec ServiceSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Most recently observed status of the service. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status ServiceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -3719,7 +4034,7 @@ const ( type ServiceList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -3737,7 +4052,7 @@ type ServiceList struct { type ServiceAccount struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -3767,7 +4082,7 @@ type ServiceAccount struct { type ServiceAccountList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -3794,7 +4109,7 @@ type ServiceAccountList struct { type Endpoints struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -3856,7 +4171,8 @@ type EndpointAddress struct { // EndpointPort is a tuple that describes a single port. type EndpointPort struct { - // The name of this port (corresponds to ServicePort.Name). + // The name of this port. This must match the 'name' field in the + // corresponding ServicePort. // Must be a DNS_LABEL. // Optional only if one port is defined. // +optional @@ -3878,7 +4194,7 @@ type EndpointPort struct { type EndpointsList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -3891,6 +4207,14 @@ type NodeSpec struct { // PodCIDR represents the pod IP range assigned to the node. // +optional PodCIDR string `json:"podCIDR,omitempty" protobuf:"bytes,1,opt,name=podCIDR"` + + // podCIDRs represents the IP ranges assigned to the node for usage by Pods on that node. If this + // field is specified, the 0th entry must match the podCIDR field. It may contain at most 1 value for + // each of IPv4 and IPv6. + // +optional + // +patchStrategy=merge + PodCIDRs []string `json:"podCIDRs,omitempty" protobuf:"bytes,7,opt,name=podCIDRs" patchStrategy:"merge"` + // ID of the node assigned by the cloud provider in the format: :// // +optional ProviderID string `json:"providerID,omitempty" protobuf:"bytes,3,opt,name=providerID"` @@ -4072,6 +4396,9 @@ type NodeStatus struct { // List of addresses reachable to the node. // Queried from cloud provider, if available. // More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses + // Note: This field is declared as mergeable, but the merge key is not sufficiently + // unique, which can cause data corruption when it is merged. Callers should instead + // use a full-replacement patch. See http://pr.k8s.io/79391 for an example. // +optional // +patchMergeKey=type // +patchStrategy=merge @@ -4171,9 +4498,6 @@ type NodeConditionType string const ( // NodeReady means kubelet is healthy and ready to accept pods. NodeReady NodeConditionType = "Ready" - // NodeOutOfDisk means the kubelet will not accept new pods due to insufficient free disk - // space on the node. - NodeOutOfDisk NodeConditionType = "OutOfDisk" // NodeMemoryPressure means the kubelet is under pressure due to insufficient available memory. NodeMemoryPressure NodeConditionType = "MemoryPressure" // NodeDiskPressure means the kubelet is under pressure due to insufficient available disk. @@ -4264,19 +4588,19 @@ type ResourceList map[ResourceName]resource.Quantity type Node struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the behavior of a node. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec NodeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Most recently observed status of the node. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status NodeStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -4287,7 +4611,7 @@ type Node struct { type NodeList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -4318,6 +4642,12 @@ type NamespaceStatus struct { // More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/ // +optional Phase NamespacePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=NamespacePhase"` + + // Represents the latest available observations of a namespace's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []NamespaceCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"` } type NamespacePhase string @@ -4330,6 +4660,32 @@ const ( NamespaceTerminating NamespacePhase = "Terminating" ) +type NamespaceConditionType string + +// These are valid conditions of a namespace. +const ( + // NamespaceDeletionDiscoveryFailure contains information about namespace deleter errors during resource discovery. + NamespaceDeletionDiscoveryFailure NamespaceConditionType = "NamespaceDeletionDiscoveryFailure" + // NamespaceDeletionContentFailure contains information about namespace deleter errors during deletion of resources. + NamespaceDeletionContentFailure NamespaceConditionType = "NamespaceDeletionContentFailure" + // NamespaceDeletionGVParsingFailure contains information about namespace deleter errors parsing GV for legacy types. + NamespaceDeletionGVParsingFailure NamespaceConditionType = "NamespaceDeletionGroupVersionParsingFailure" +) + +// NamespaceCondition contains details about state of namespace. +type NamespaceCondition struct { + // Type of namespace controller condition. + Type NamespaceConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=NamespaceConditionType"` + // Status of the condition, one of True, False, Unknown. + Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` +} + // +genclient // +genclient:nonNamespaced // +genclient:skipVerbs=deleteCollection @@ -4340,17 +4696,17 @@ const ( type Namespace struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the behavior of the Namespace. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec NamespaceSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Status describes the current status of a Namespace. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status NamespaceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -4361,7 +4717,7 @@ type Namespace struct { type NamespaceList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -4377,7 +4733,7 @@ type NamespaceList struct { type Binding struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -4385,6 +4741,22 @@ type Binding struct { Target ObjectReference `json:"target" protobuf:"bytes,2,opt,name=target"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// A list of ephemeral containers used with the Pod ephemeralcontainers subresource. +type EphemeralContainers struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // A list of ephemeral containers associated with this pod. New ephemeral containers + // may be appended to this list, but existing ephemeral containers may not be removed + // or modified. + // +patchMergeKey=name + // +patchStrategy=merge + EphemeralContainers []EphemeralContainer `json:"ephemeralContainers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=ephemeralContainers"` +} + // Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. // +k8s:openapi-gen=false type Preconditions struct { @@ -4568,7 +4940,7 @@ type ServiceProxyOptions struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type ObjectReference struct { // Kind of the referent. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"` // Namespace of the referent. @@ -4587,7 +4959,7 @@ type ObjectReference struct { // +optional APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,5,opt,name=apiVersion"` // Specific resourceVersion to which this reference is made, if any. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency // +optional ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"` @@ -4662,7 +5034,7 @@ const ( type Event struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` // The object that this event is about. @@ -4750,7 +5122,7 @@ const ( type EventList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -4810,12 +5182,12 @@ type LimitRangeSpec struct { type LimitRange struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the limits enforced. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec LimitRangeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` } @@ -4826,7 +5198,7 @@ type LimitRange struct { type LimitRangeList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -4966,17 +5338,17 @@ type ResourceQuotaStatus struct { type ResourceQuota struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the desired quota. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec ResourceQuotaSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Status defines the actual enforced quota and its current usage. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status ResourceQuotaStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -4987,7 +5359,7 @@ type ResourceQuota struct { type ResourceQuotaList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -5004,7 +5376,7 @@ type ResourceQuotaList struct { type Secret struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -5121,7 +5493,7 @@ const ( type SecretList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -5137,7 +5509,7 @@ type SecretList struct { type ConfigMap struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -5166,7 +5538,7 @@ type ConfigMap struct { type ConfigMapList struct { metav1.TypeMeta `json:",inline"` - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -5208,7 +5580,7 @@ type ComponentCondition struct { type ComponentStatus struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -5225,7 +5597,7 @@ type ComponentStatus struct { type ComponentStatusList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -5299,7 +5671,9 @@ type SecurityContext struct { // PodSecurityContext, the value specified in SecurityContext takes precedence. // +optional SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,3,opt,name=seLinuxOptions"` - // Windows security options. + // The Windows specific settings applied to all containers. + // If unspecified, the options from the PodSecurityContext will be used. + // If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. // +optional WindowsOptions *WindowsSecurityContextOptions `json:"windowsOptions,omitempty" protobuf:"bytes,10,opt,name=windowsOptions"` // The UID to run the entrypoint of the container process. @@ -5385,6 +5759,14 @@ type WindowsSecurityContextOptions struct { // This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag. // +optional GMSACredentialSpec *string `json:"gmsaCredentialSpec,omitempty" protobuf:"bytes,2,opt,name=gmsaCredentialSpec"` + + // The UserName in Windows to run the entrypoint of the container process. + // Defaults to the user specified in image metadata if unspecified. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // This field is alpha-level and it is only honored by servers that enable the WindowsRunAsUserName feature flag. + // +optional + RunAsUserName *string `json:"runAsUserName,omitempty" protobuf:"bytes,3,opt,name=runAsUserName"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -5393,7 +5775,7 @@ type WindowsSecurityContextOptions struct { type RangeAllocation struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go index c0489ca1706d0..35b8389a7506a 100644 --- a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -108,7 +108,7 @@ func (AzureFileVolumeSource) SwaggerDoc() map[string]string { var map_Binding = map[string]string{ "": "Binding ties one object to another; for example, a pod is bound to a node by a scheduler. Deprecated in 1.7, please use the bindings subresource of pods instead.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "target": "The target object that you want to bind to the standard object.", } @@ -158,12 +158,12 @@ func (Capabilities) SwaggerDoc() map[string]string { var map_CephFSPersistentVolumeSource = map[string]string{ "": "Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling.", - "monitors": "Required: Monitors is a collection of Ceph monitors More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", + "monitors": "Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", "path": "Optional: Used as the mounted root, rather than the full Ceph tree, default is /", - "user": "Optional: User is the rados user name, default is admin More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", - "secretFile": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", - "secretRef": "Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", - "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", + "user": "Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "secretFile": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "secretRef": "Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", } func (CephFSPersistentVolumeSource) SwaggerDoc() map[string]string { @@ -172,12 +172,12 @@ func (CephFSPersistentVolumeSource) SwaggerDoc() map[string]string { var map_CephFSVolumeSource = map[string]string{ "": "Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling.", - "monitors": "Required: Monitors is a collection of Ceph monitors More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", + "monitors": "Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", "path": "Optional: Used as the mounted root, rather than the full Ceph tree, default is /", - "user": "Optional: User is the rados user name, default is admin More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", - "secretFile": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", - "secretRef": "Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", - "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", + "user": "Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "secretFile": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "secretRef": "Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", } func (CephFSVolumeSource) SwaggerDoc() map[string]string { @@ -186,9 +186,9 @@ func (CephFSVolumeSource) SwaggerDoc() map[string]string { var map_CinderPersistentVolumeSource = map[string]string{ "": "Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.", - "volumeID": "volume id used to identify the volume in cinder More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", - "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", - "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + "volumeID": "volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", "secretRef": "Optional: points to a secret object containing parameters used to connect to OpenStack.", } @@ -198,9 +198,9 @@ func (CinderPersistentVolumeSource) SwaggerDoc() map[string]string { var map_CinderVolumeSource = map[string]string{ "": "Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.", - "volumeID": "volume id used to identify the volume in cinder More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", - "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", - "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + "volumeID": "volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", "secretRef": "Optional: points to a secret object containing parameters used to connect to OpenStack.", } @@ -231,7 +231,7 @@ func (ComponentCondition) SwaggerDoc() map[string]string { var map_ComponentStatus = map[string]string{ "": "ComponentStatus (and ComponentStatusList) holds the cluster validation info.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "conditions": "List of component conditions observed", } @@ -241,7 +241,7 @@ func (ComponentStatus) SwaggerDoc() map[string]string { var map_ComponentStatusList = map[string]string{ "": "Status of all the conditions for the component as a list of ComponentStatus objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of ComponentStatus objects.", } @@ -251,7 +251,7 @@ func (ComponentStatusList) SwaggerDoc() map[string]string { var map_ConfigMap = map[string]string{ "": "ConfigMap holds configuration data for pods to consume.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "data": "Data contains the configuration data. Each key must consist of alphanumeric characters, '-', '_' or '.'. Values with non-UTF-8 byte sequences must use the BinaryData field. The keys stored in Data must not overlap with the keys in the BinaryData field, this is enforced during validation process.", "binaryData": "BinaryData contains the binary data. Each key must consist of alphanumeric characters, '-', '_' or '.'. BinaryData can contain byte sequences that are not in the UTF-8 range. The keys stored in BinaryData must not overlap with the ones in the Data field, this is enforced during validation process. Using this field will require 1.10+ apiserver and kubelet.", } @@ -281,7 +281,7 @@ func (ConfigMapKeySelector) SwaggerDoc() map[string]string { var map_ConfigMapList = map[string]string{ "": "ConfigMapList is a resource containing a list of ConfigMap objects.", - "metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is the list of ConfigMaps.", } @@ -338,6 +338,7 @@ var map_Container = map[string]string{ "volumeDevices": "volumeDevices is the list of block devices to be used by the container. This is a beta feature.", "livenessProbe": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "readinessProbe": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + "startupProbe": "StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. This is an alpha feature enabled by the StartupProbe feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "lifecycle": "Actions that the management system should take in response to container lifecycle events. Cannot be updated.", "terminationMessagePath": "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", "terminationMessagePolicy": "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", @@ -430,6 +431,7 @@ var map_ContainerStatus = map[string]string{ "image": "The image the container is running. More info: https://kubernetes.io/docs/concepts/containers/images", "imageID": "ImageID of the container's image.", "containerID": "Container's ID in the format 'docker://'.", + "started": "Specifies whether the container has passed its startup probe. Initialized as false, becomes true after startupProbe is considered successful. Resets to false when the container is restarted, or if kubelet loses state temporarily. Is always true when no startupProbe is defined.", } func (ContainerStatus) SwaggerDoc() map[string]string { @@ -500,7 +502,7 @@ func (EndpointAddress) SwaggerDoc() map[string]string { var map_EndpointPort = map[string]string{ "": "EndpointPort is a tuple that describes a single port.", - "name": "The name of this port (corresponds to ServicePort.Name). Must be a DNS_LABEL. Optional only if one port is defined.", + "name": "The name of this port. This must match the 'name' field in the corresponding ServicePort. Must be a DNS_LABEL. Optional only if one port is defined.", "port": "The port number of the endpoint.", "protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", } @@ -522,7 +524,7 @@ func (EndpointSubset) SwaggerDoc() map[string]string { var map_Endpoints = map[string]string{ "": "Endpoints is a collection of endpoints that implement the actual service. Example:\n Name: \"mysvc\",\n Subsets: [\n {\n Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n },\n {\n Addresses: [{\"ip\": \"10.10.3.3\"}],\n Ports: [{\"name\": \"a\", \"port\": 93}, {\"name\": \"b\", \"port\": 76}]\n },\n ]", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "subsets": "The set of all endpoints is the union of all subsets. Addresses are placed into subsets according to the IPs they share. A single address with multiple ports, some of which are ready and some of which are not (because they come from different containers) will result in the address being displayed in different subsets for the different ports. No address will appear in both Addresses and NotReadyAddresses in the same subset. Sets of addresses and ports that comprise a service.", } @@ -532,7 +534,7 @@ func (Endpoints) SwaggerDoc() map[string]string { var map_EndpointsList = map[string]string{ "": "EndpointsList is a list of endpoints.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of endpoints.", } @@ -574,9 +576,57 @@ func (EnvVarSource) SwaggerDoc() map[string]string { return map_EnvVarSource } +var map_EphemeralContainer = map[string]string{ + "": "An EphemeralContainer is a container that may be added temporarily to an existing pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a pod is removed or restarted. If an ephemeral container causes a pod to exceed its resource allocation, the pod may be evicted. Ephemeral containers may not be added by directly updating the pod spec. They must be added via the pod's ephemeralcontainers subresource, and they will appear in the pod spec once added. This is an alpha feature enabled by the EphemeralContainers feature flag.", + "targetContainerName": "If set, the name of the container from PodSpec that this ephemeral container targets. The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. If not set then the ephemeral container is run in whatever namespaces are shared for the pod. Note that the container runtime must support this feature.", +} + +func (EphemeralContainer) SwaggerDoc() map[string]string { + return map_EphemeralContainer +} + +var map_EphemeralContainerCommon = map[string]string{ + "": "EphemeralContainerCommon is a copy of all fields in Container to be inlined in EphemeralContainer. This separate type allows easy conversion from EphemeralContainer to Container and allows separate documentation for the fields of EphemeralContainer. When a new field is added to Container it must be added here as well.", + "name": "Name of the ephemeral container specified as a DNS_LABEL. This name must be unique among all containers, init containers and ephemeral containers.", + "image": "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images", + "command": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "args": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "workingDir": "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", + "ports": "Ports are not allowed for ephemeral containers.", + "envFrom": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", + "env": "List of environment variables to set in the container. Cannot be updated.", + "resources": "Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod.", + "volumeMounts": "Pod volumes to mount into the container's filesystem. Cannot be updated.", + "volumeDevices": "volumeDevices is the list of block devices to be used by the container. This is a beta feature.", + "livenessProbe": "Probes are not allowed for ephemeral containers.", + "readinessProbe": "Probes are not allowed for ephemeral containers.", + "startupProbe": "Probes are not allowed for ephemeral containers.", + "lifecycle": "Lifecycle is not allowed for ephemeral containers.", + "terminationMessagePath": "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", + "terminationMessagePolicy": "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", + "imagePullPolicy": "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", + "securityContext": "SecurityContext is not allowed for ephemeral containers.", + "stdin": "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", + "stdinOnce": "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", + "tty": "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.", +} + +func (EphemeralContainerCommon) SwaggerDoc() map[string]string { + return map_EphemeralContainerCommon +} + +var map_EphemeralContainers = map[string]string{ + "": "A list of ephemeral containers used with the Pod ephemeralcontainers subresource.", + "ephemeralContainers": "A list of ephemeral containers associated with this pod. New ephemeral containers may be appended to this list, but existing ephemeral containers may not be removed or modified.", +} + +func (EphemeralContainers) SwaggerDoc() map[string]string { + return map_EphemeralContainers +} + var map_Event = map[string]string{ "": "Event is a report of an event somewhere in the cluster.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "involvedObject": "The object that this event is about.", "reason": "This should be a short, machine understandable string that gives the reason for the transition into the object's current status.", "message": "A human-readable description of the status of this operation.", @@ -599,7 +649,7 @@ func (Event) SwaggerDoc() map[string]string { var map_EventList = map[string]string{ "": "EventList is a list of events.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of events", } @@ -711,10 +761,10 @@ func (GitRepoVolumeSource) SwaggerDoc() map[string]string { var map_GlusterfsPersistentVolumeSource = map[string]string{ "": "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.", - "endpoints": "EndpointsName is the endpoint name that details Glusterfs topology. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", - "path": "Path is the Glusterfs volume path. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", - "readOnly": "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", - "endpointsNamespace": "EndpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", + "endpoints": "EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "path": "Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "readOnly": "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "endpointsNamespace": "EndpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", } func (GlusterfsPersistentVolumeSource) SwaggerDoc() map[string]string { @@ -723,9 +773,9 @@ func (GlusterfsPersistentVolumeSource) SwaggerDoc() map[string]string { var map_GlusterfsVolumeSource = map[string]string{ "": "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.", - "endpoints": "EndpointsName is the endpoint name that details Glusterfs topology. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", - "path": "Path is the Glusterfs volume path. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", - "readOnly": "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", + "endpoints": "EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "path": "Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "readOnly": "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", } func (GlusterfsVolumeSource) SwaggerDoc() map[string]string { @@ -838,7 +888,7 @@ func (KeyToPath) SwaggerDoc() map[string]string { var map_Lifecycle = map[string]string{ "": "Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted.", "postStart": "PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks", - "preStop": "PreStop is called immediately before a container is terminated due to an API request or management event such as liveness probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod's termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks", + "preStop": "PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod's termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks", } func (Lifecycle) SwaggerDoc() map[string]string { @@ -847,8 +897,8 @@ func (Lifecycle) SwaggerDoc() map[string]string { var map_LimitRange = map[string]string{ "": "LimitRange sets resource usage limits for each kind of resource in a Namespace.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Spec defines the limits enforced. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec defines the limits enforced. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (LimitRange) SwaggerDoc() map[string]string { @@ -871,7 +921,7 @@ func (LimitRangeItem) SwaggerDoc() map[string]string { var map_LimitRangeList = map[string]string{ "": "LimitRangeList is a list of LimitRange items.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "Items is a list of LimitRange objects. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/", } @@ -939,18 +989,28 @@ func (NFSVolumeSource) SwaggerDoc() map[string]string { var map_Namespace = map[string]string{ "": "Namespace provides a scope for Names. Use of multiple namespaces is optional.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Spec defines the behavior of the Namespace. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Status describes the current status of a Namespace. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec defines the behavior of the Namespace. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Status describes the current status of a Namespace. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Namespace) SwaggerDoc() map[string]string { return map_Namespace } +var map_NamespaceCondition = map[string]string{ + "": "NamespaceCondition contains details about state of namespace.", + "type": "Type of namespace controller condition.", + "status": "Status of the condition, one of True, False, Unknown.", +} + +func (NamespaceCondition) SwaggerDoc() map[string]string { + return map_NamespaceCondition +} + var map_NamespaceList = map[string]string{ "": "NamespaceList is a list of Namespaces.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "Items is the list of Namespace objects in the list. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/", } @@ -968,8 +1028,9 @@ func (NamespaceSpec) SwaggerDoc() map[string]string { } var map_NamespaceStatus = map[string]string{ - "": "NamespaceStatus is information about the current status of a Namespace.", - "phase": "Phase is the current lifecycle phase of the namespace. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/", + "": "NamespaceStatus is information about the current status of a Namespace.", + "phase": "Phase is the current lifecycle phase of the namespace. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/", + "conditions": "Represents the latest available observations of a namespace's current state.", } func (NamespaceStatus) SwaggerDoc() map[string]string { @@ -978,9 +1039,9 @@ func (NamespaceStatus) SwaggerDoc() map[string]string { var map_Node = map[string]string{ "": "Node is a worker node in Kubernetes. Each node will have a unique identifier in the cache (i.e. in etcd).", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Spec defines the behavior of a node. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Most recently observed status of the node. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec defines the behavior of a node. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Most recently observed status of the node. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Node) SwaggerDoc() map[string]string { @@ -1053,7 +1114,7 @@ func (NodeDaemonEndpoints) SwaggerDoc() map[string]string { var map_NodeList = map[string]string{ "": "NodeList is the whole list of all Nodes which have been registered with master.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of nodes", } @@ -1112,6 +1173,7 @@ func (NodeSelectorTerm) SwaggerDoc() map[string]string { var map_NodeSpec = map[string]string{ "": "NodeSpec describes the attributes that a node is created with.", "podCIDR": "PodCIDR represents the pod IP range assigned to the node.", + "podCIDRs": "podCIDRs represents the IP ranges assigned to the node for usage by Pods on that node. If this field is specified, the 0th entry must match the podCIDR field. It may contain at most 1 value for each of IPv4 and IPv6.", "providerID": "ID of the node assigned by the cloud provider in the format: ://", "unschedulable": "Unschedulable controls node schedulability of new pods. By default, node is schedulable. More info: https://kubernetes.io/docs/concepts/nodes/node/#manual-node-administration", "taints": "If specified, the node's taints.", @@ -1129,7 +1191,7 @@ var map_NodeStatus = map[string]string{ "allocatable": "Allocatable represents the resources of a node that are available for scheduling. Defaults to Capacity.", "phase": "NodePhase is the recently observed lifecycle phase of the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#phase The field is never populated, and now is deprecated.", "conditions": "Conditions is an array of current observed node conditions. More info: https://kubernetes.io/docs/concepts/nodes/node/#condition", - "addresses": "List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses", + "addresses": "List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses Note: This field is declared as mergeable, but the merge key is not sufficiently unique, which can cause data corruption when it is merged. Callers should instead use a full-replacement patch. See http://pr.k8s.io/79391 for an example.", "daemonEndpoints": "Endpoints of daemons running on the Node.", "nodeInfo": "Set of ids/uuids to uniquely identify the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#info", "images": "List of container images on this node", @@ -1172,12 +1234,12 @@ func (ObjectFieldSelector) SwaggerDoc() map[string]string { var map_ObjectReference = map[string]string{ "": "ObjectReference contains enough information to let you inspect or modify the referred object.", - "kind": "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "kind": "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "namespace": "Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/", "name": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", "uid": "UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids", "apiVersion": "API version of the referent.", - "resourceVersion": "Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency", + "resourceVersion": "Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency", "fieldPath": "If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \"spec.containers{name}\" (where \"name\" refers to the name of the container that triggered the event) or if no container name is specified \"spec.containers[2]\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object.", } @@ -1187,7 +1249,7 @@ func (ObjectReference) SwaggerDoc() map[string]string { var map_PersistentVolume = map[string]string{ "": "PersistentVolume (PV) is a storage resource provisioned by an administrator. It is analogous to a node. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "Spec defines a specification of a persistent volume owned by the cluster. Provisioned by an administrator. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes", "status": "Status represents the current information/status for the persistent volume. Populated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes", } @@ -1198,7 +1260,7 @@ func (PersistentVolume) SwaggerDoc() map[string]string { var map_PersistentVolumeClaim = map[string]string{ "": "PersistentVolumeClaim is a user's request for and claim to a persistent volume", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "Spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", "status": "Status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", } @@ -1221,7 +1283,7 @@ func (PersistentVolumeClaimCondition) SwaggerDoc() map[string]string { var map_PersistentVolumeClaimList = map[string]string{ "": "PersistentVolumeClaimList is a list of PersistentVolumeClaim items.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "A list of persistent volume claims. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", } @@ -1268,7 +1330,7 @@ func (PersistentVolumeClaimVolumeSource) SwaggerDoc() map[string]string { var map_PersistentVolumeList = map[string]string{ "": "PersistentVolumeList is a list of PersistentVolume items.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of persistent volumes. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes", } @@ -1281,11 +1343,11 @@ var map_PersistentVolumeSource = map[string]string{ "gcePersistentDisk": "GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", "awsElasticBlockStore": "AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", "hostPath": "HostPath represents a directory on the host. Provisioned by a developer or tester. This is useful for single-node development and testing only! On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", - "glusterfs": "Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md", + "glusterfs": "Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. More info: https://examples.k8s.io/volumes/glusterfs/README.md", "nfs": "NFS represents an NFS mount on the host. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", - "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md", + "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md", "iscsi": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin.", - "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", "cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime", "fc": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", "flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running", @@ -1298,7 +1360,7 @@ var map_PersistentVolumeSource = map[string]string{ "portworxVolume": "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine", "scaleIO": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", "local": "Local represents directly-attached storage with node affinity", - "storageos": "StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md", + "storageos": "StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod More info: https://examples.k8s.io/volumes/storageos/README.md", "csi": "CSI represents storage that is handled by an external CSI driver (Beta feature).", } @@ -1345,9 +1407,9 @@ func (PhotonPersistentDiskVolumeSource) SwaggerDoc() map[string]string { var map_Pod = map[string]string{ "": "Pod is a collection of containers that can run on a host. This resource is created by clients and scheduled onto hosts.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Pod) SwaggerDoc() map[string]string { @@ -1446,10 +1508,19 @@ func (PodExecOptions) SwaggerDoc() map[string]string { return map_PodExecOptions } +var map_PodIP = map[string]string{ + "": "IP address information for entries in the (plural) PodIPs field. Each entry includes:\n IP: An IP address allocated to the pod. Routable at least within the cluster.", + "ip": "ip is an IP address (IPv4 or IPv6) assigned to the pod", +} + +func (PodIP) SwaggerDoc() map[string]string { + return map_PodIP +} + var map_PodList = map[string]string{ "": "PodList is a list of Pods.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "items": "List of pods. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "List of pods. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md", } func (PodList) SwaggerDoc() map[string]string { @@ -1502,7 +1573,7 @@ func (PodReadinessGate) SwaggerDoc() map[string]string { var map_PodSecurityContext = map[string]string{ "": "PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.", "seLinuxOptions": "The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", - "windowsOptions": "Windows security options.", + "windowsOptions": "The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", "runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", "runAsGroup": "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", "runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", @@ -1527,8 +1598,9 @@ func (PodSignature) SwaggerDoc() map[string]string { var map_PodSpec = map[string]string{ "": "PodSpec is a description of a pod.", "volumes": "List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes", - "initContainers": "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/", + "initContainers": "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/", "containers": "List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated.", + "ephemeralContainers": "List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.", "restartPolicy": "Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy", "terminationGracePeriodSeconds": "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.", "activeDeadlineSeconds": "Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer.", @@ -1557,6 +1629,8 @@ var map_PodSpec = map[string]string{ "runtimeClassName": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14.", "enableServiceLinks": "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.", "preemptionPolicy": "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature.", + "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature.", + "topologySpreadConstraints": "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. This field is alpha-level and is only honored by clusters that enables the EvenPodsSpread feature. All topologySpreadConstraints are ANDed.", } func (PodSpec) SwaggerDoc() map[string]string { @@ -1564,18 +1638,20 @@ func (PodSpec) SwaggerDoc() map[string]string { } var map_PodStatus = map[string]string{ - "": "PodStatus represents information about the status of a pod. Status may trail the actual state of a system, especially if the node that hosts the pod cannot contact the control plane.", - "phase": "The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. The conditions array, the reason and message fields, and the individual container status arrays contain more detail about the pod's status. There are five possible phase values:\n\nPending: The pod has been accepted by the Kubernetes system, but one or more of the container images has not been created. This includes time before being scheduled as well as time spent downloading images over the network, which could take a while. Running: The pod has been bound to a node, and all of the containers have been created. At least one container is still running, or is in the process of starting or restarting. Succeeded: All containers in the pod have terminated in success, and will not be restarted. Failed: All containers in the pod have terminated, and at least one container has terminated in failure. The container either exited with non-zero status or was terminated by the system. Unknown: For some reason the state of the pod could not be obtained, typically due to an error in communicating with the host of the pod.\n\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase", - "conditions": "Current service state of pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions", - "message": "A human readable message indicating details about why the pod is in this condition.", - "reason": "A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted'", - "nominatedNodeName": "nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be scheduled right away as preemption victims receive their graceful termination periods. This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to give the resources on this node to a higher priority pod that is created after preemption. As a result, this field may be different than PodSpec.nodeName when the pod is scheduled.", - "hostIP": "IP address of the host to which the pod is assigned. Empty if not yet scheduled.", - "podIP": "IP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated.", - "startTime": "RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod.", - "initContainerStatuses": "The list has one entry per init container in the manifest. The most recent successful init container will have ready = true, the most recently started container will have startTime set. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", - "containerStatuses": "The list has one entry per container in the manifest. Each entry is currently the output of `docker inspect`. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", - "qosClass": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md", + "": "PodStatus represents information about the status of a pod. Status may trail the actual state of a system, especially if the node that hosts the pod cannot contact the control plane.", + "phase": "The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. The conditions array, the reason and message fields, and the individual container status arrays contain more detail about the pod's status. There are five possible phase values:\n\nPending: The pod has been accepted by the Kubernetes system, but one or more of the container images has not been created. This includes time before being scheduled as well as time spent downloading images over the network, which could take a while. Running: The pod has been bound to a node, and all of the containers have been created. At least one container is still running, or is in the process of starting or restarting. Succeeded: All containers in the pod have terminated in success, and will not be restarted. Failed: All containers in the pod have terminated, and at least one container has terminated in failure. The container either exited with non-zero status or was terminated by the system. Unknown: For some reason the state of the pod could not be obtained, typically due to an error in communicating with the host of the pod.\n\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase", + "conditions": "Current service state of pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions", + "message": "A human readable message indicating details about why the pod is in this condition.", + "reason": "A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted'", + "nominatedNodeName": "nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be scheduled right away as preemption victims receive their graceful termination periods. This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to give the resources on this node to a higher priority pod that is created after preemption. As a result, this field may be different than PodSpec.nodeName when the pod is scheduled.", + "hostIP": "IP address of the host to which the pod is assigned. Empty if not yet scheduled.", + "podIP": "IP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated.", + "podIPs": "podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list is empty if no IPs have been allocated yet.", + "startTime": "RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod.", + "initContainerStatuses": "The list has one entry per init container in the manifest. The most recent successful init container will have ready = true, the most recently started container will have startTime set. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", + "containerStatuses": "The list has one entry per container in the manifest. Each entry is currently the output of `docker inspect`. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", + "qosClass": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md", + "ephemeralContainerStatuses": "Status for any ephemeral containers that have run in this pod. This field is alpha-level and is only populated by servers that enable the EphemeralContainers feature.", } func (PodStatus) SwaggerDoc() map[string]string { @@ -1584,8 +1660,8 @@ func (PodStatus) SwaggerDoc() map[string]string { var map_PodStatusResult = map[string]string{ "": "PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "status": "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "status": "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (PodStatusResult) SwaggerDoc() map[string]string { @@ -1594,8 +1670,8 @@ func (PodStatusResult) SwaggerDoc() map[string]string { var map_PodTemplate = map[string]string{ "": "PodTemplate describes a template for creating copies of a predefined pod.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "template": "Template defines the pods that will be created from this pod template. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "template": "Template defines the pods that will be created from this pod template. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (PodTemplate) SwaggerDoc() map[string]string { @@ -1604,7 +1680,7 @@ func (PodTemplate) SwaggerDoc() map[string]string { var map_PodTemplateList = map[string]string{ "": "PodTemplateList is a list of PodTemplates.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of pod templates", } @@ -1614,8 +1690,8 @@ func (PodTemplateList) SwaggerDoc() map[string]string { var map_PodTemplateSpec = map[string]string{ "": "PodTemplateSpec describes the data a pod should have when created from a template", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (PodTemplateSpec) SwaggerDoc() map[string]string { @@ -1669,7 +1745,7 @@ var map_Probe = map[string]string{ "initialDelaySeconds": "Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "timeoutSeconds": "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "periodSeconds": "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.", - "successThreshold": "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.", + "successThreshold": "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.", "failureThreshold": "Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.", } @@ -1703,14 +1779,14 @@ func (QuobyteVolumeSource) SwaggerDoc() map[string]string { var map_RBDPersistentVolumeSource = map[string]string{ "": "Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.", - "monitors": "A collection of Ceph monitors. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "image": "The rados image name. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "monitors": "A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "image": "The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd", - "pool": "The rados pool name. Default is rbd. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "user": "The rados user name. Default is admin. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "keyring": "Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "secretRef": "SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "pool": "The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "user": "The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "keyring": "Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "secretRef": "SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", } func (RBDPersistentVolumeSource) SwaggerDoc() map[string]string { @@ -1719,14 +1795,14 @@ func (RBDPersistentVolumeSource) SwaggerDoc() map[string]string { var map_RBDVolumeSource = map[string]string{ "": "Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.", - "monitors": "A collection of Ceph monitors. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "image": "The rados image name. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "monitors": "A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "image": "The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd", - "pool": "The rados pool name. Default is rbd. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "user": "The rados user name. Default is admin. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "keyring": "Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "secretRef": "SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "pool": "The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "user": "The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "keyring": "Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "secretRef": "SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", } func (RBDVolumeSource) SwaggerDoc() map[string]string { @@ -1735,7 +1811,7 @@ func (RBDVolumeSource) SwaggerDoc() map[string]string { var map_RangeAllocation = map[string]string{ "": "RangeAllocation is not a public type.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "range": "Range is string that identifies the range represented by 'data'.", "data": "Data is a bit array containing all allocated addresses in the previous segment.", } @@ -1746,9 +1822,9 @@ func (RangeAllocation) SwaggerDoc() map[string]string { var map_ReplicationController = map[string]string{ "": "ReplicationController represents the configuration of a replication controller.", - "metadata": "If the Labels of a ReplicationController are empty, they are defaulted to be the same as the Pod(s) that the replication controller manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Spec defines the specification of the desired behavior of the replication controller. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Status is the most recently observed status of the replication controller. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "If the Labels of a ReplicationController are empty, they are defaulted to be the same as the Pod(s) that the replication controller manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec defines the specification of the desired behavior of the replication controller. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Status is the most recently observed status of the replication controller. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (ReplicationController) SwaggerDoc() map[string]string { @@ -1770,7 +1846,7 @@ func (ReplicationControllerCondition) SwaggerDoc() map[string]string { var map_ReplicationControllerList = map[string]string{ "": "ReplicationControllerList is a collection of replication controllers.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of replication controllers. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller", } @@ -1817,9 +1893,9 @@ func (ResourceFieldSelector) SwaggerDoc() map[string]string { var map_ResourceQuota = map[string]string{ "": "ResourceQuota sets aggregate quota restrictions enforced per namespace", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Spec defines the desired quota. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Status defines the actual enforced quota and its current usage. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec defines the desired quota. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Status defines the actual enforced quota and its current usage. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (ResourceQuota) SwaggerDoc() map[string]string { @@ -1828,7 +1904,7 @@ func (ResourceQuota) SwaggerDoc() map[string]string { var map_ResourceQuotaList = map[string]string{ "": "ResourceQuotaList is a list of ResourceQuota items.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "Items is a list of ResourceQuota objects. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/", } @@ -1937,7 +2013,7 @@ func (ScopedResourceSelectorRequirement) SwaggerDoc() map[string]string { var map_Secret = map[string]string{ "": "Secret holds secret data of a certain type. The total bytes of the values in the Data field must be less than MaxSecretSize bytes.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "data": "Data contains the secret data. Each key must consist of alphanumeric characters, '-', '_' or '.'. The serialized form of the secret data is a base64 encoded string, representing the arbitrary (possibly non-string) data value here. Described in https://tools.ietf.org/html/rfc4648#section-4", "stringData": "stringData allows specifying non-binary secret data in string form. It is provided as a write-only convenience method. All keys and values are merged into the data field on write, overwriting any existing values. It is never output when reading from the API.", "type": "Used to facilitate programmatic handling of secret data.", @@ -1968,7 +2044,7 @@ func (SecretKeySelector) SwaggerDoc() map[string]string { var map_SecretList = map[string]string{ "": "SecretList is a list of Secret.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "Items is a list of secret objects. More info: https://kubernetes.io/docs/concepts/configuration/secret", } @@ -2013,7 +2089,7 @@ var map_SecurityContext = map[string]string{ "capabilities": "The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.", "privileged": "Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false.", "seLinuxOptions": "The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", - "windowsOptions": "Windows security options.", + "windowsOptions": "The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", "runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", "runAsGroup": "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", "runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", @@ -2037,9 +2113,9 @@ func (SerializedReference) SwaggerDoc() map[string]string { var map_Service = map[string]string{ "": "Service is a named abstraction of software service (for example, mysql) consisting of local port (for example 3306) that the proxy listens on, and the selector that determines which pods will answer requests sent through the proxy.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Spec defines the behavior of a service. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Most recently observed status of the service. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec defines the behavior of a service. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Most recently observed status of the service. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Service) SwaggerDoc() map[string]string { @@ -2048,7 +2124,7 @@ func (Service) SwaggerDoc() map[string]string { var map_ServiceAccount = map[string]string{ "": "ServiceAccount binds together: * a name, understood by users, and perhaps by peripheral systems, for an identity * a principal that can be authenticated and authorized * a set of secrets", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "secrets": "Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. More info: https://kubernetes.io/docs/concepts/configuration/secret", "imagePullSecrets": "ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod", "automountServiceAccountToken": "AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. Can be overridden at the pod level.", @@ -2060,7 +2136,7 @@ func (ServiceAccount) SwaggerDoc() map[string]string { var map_ServiceAccountList = map[string]string{ "": "ServiceAccountList is a list of ServiceAccount objects", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of ServiceAccounts. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", } @@ -2081,7 +2157,7 @@ func (ServiceAccountTokenProjection) SwaggerDoc() map[string]string { var map_ServiceList = map[string]string{ "": "ServiceList holds a list of services.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of services", } @@ -2091,7 +2167,7 @@ func (ServiceList) SwaggerDoc() map[string]string { var map_ServicePort = map[string]string{ "": "ServicePort contains information on service's port.", - "name": "The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. This maps to the 'Name' field in EndpointPort objects. Optional if only one ServicePort is defined on this service.", + "name": "The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. When considering the endpoints for a Service, this must match the 'name' field in the EndpointPort. Optional if only one ServicePort is defined on this service.", "protocol": "The IP protocol for this port. Supports \"TCP\", \"UDP\", and \"SCTP\". Default is TCP.", "port": "The port that will be exposed by this service.", "targetPort": "Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service", @@ -2126,6 +2202,7 @@ var map_ServiceSpec = map[string]string{ "healthCheckNodePort": "healthCheckNodePort specifies the healthcheck nodePort for the service. If not specified, HealthCheckNodePort is created by the service api backend with the allocated nodePort. Will use user-specified nodePort value if specified by the client. Only effects when Type is set to LoadBalancer and ExternalTrafficPolicy is set to Local.", "publishNotReadyAddresses": "publishNotReadyAddresses, when set to true, indicates that DNS implementations must publish the notReadyAddresses of subsets for the Endpoints associated with the Service. The default value is false. The primary use case for setting this field is to use a StatefulSet's Headless Service to propagate SRV records for its Pods without respect to their readiness for purpose of peer discovery.", "sessionAffinityConfig": "sessionAffinityConfig contains the configurations of session affinity.", + "ipFamily": "ipFamily specifies whether this Service has a preference for a particular IP family (e.g. IPv4 vs. IPv6). If a specific IP family is requested, the clusterIP field will be allocated from that family, if it is available in the cluster. If no IP family is requested, the cluster's primary IP family will be used. Other IP fields (loadBalancerIP, loadBalancerSourceRanges, externalIPs) and controllers which allocate external load-balancers should use the same IP family. Endpoints for this Service will be of this family. This field is immutable after creation. Assigning a ServiceIPFamily not available in the cluster (e.g. IPv6 in IPv4 only cluster) is an error condition and will fail during clusterIP assignment.", } func (ServiceSpec) SwaggerDoc() map[string]string { @@ -2240,6 +2317,18 @@ func (TopologySelectorTerm) SwaggerDoc() map[string]string { return map_TopologySelectorTerm } +var map_TopologySpreadConstraint = map[string]string{ + "": "TopologySpreadConstraint specifies how to spread matching pods among the given topology.", + "maxSkew": "MaxSkew describes the degree to which pods may be unevenly distributed. It's the maximum permitted difference between the number of matching pods in any two topology domains of a given topology type. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: ", + "topologyKey": "TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a \"bucket\", and try to put balanced number of pods into each bucket. It's a required field.", + "whenUnsatisfiable": "WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it - ScheduleAnyway tells the scheduler to still schedule it It's considered as \"Unsatisfiable\" if and only if placing incoming pod on any topology violates \"MaxSkew\". For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: ", + "labelSelector": "LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain.", +} + +func (TopologySpreadConstraint) SwaggerDoc() map[string]string { + return map_TopologySpreadConstraint +} + var map_TypedLocalObjectReference = map[string]string{ "": "TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace.", "apiGroup": "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", @@ -2314,12 +2403,12 @@ var map_VolumeSource = map[string]string{ "gitRepo": "GitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.", "secret": "Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", "nfs": "NFS represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", - "iscsi": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md", - "glusterfs": "Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md", + "iscsi": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md", + "glusterfs": "Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md", "persistentVolumeClaim": "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", - "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md", + "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md", "flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", - "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", "cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime", "flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", "downwardAPI": "DownwardAPI represents downward API about the pod that should populate this volume", @@ -2367,6 +2456,7 @@ var map_WindowsSecurityContextOptions = map[string]string{ "": "WindowsSecurityContextOptions contain Windows-specific options and credentials.", "gmsaCredentialSpecName": "GMSACredentialSpecName is the name of the GMSA credential spec to use. This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.", "gmsaCredentialSpec": "GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.", + "runAsUserName": "The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. This field is alpha-level and it is only honored by servers that enable the WindowsRunAsUserName feature flag.", } func (WindowsSecurityContextOptions) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/core/v1/well_known_labels.go b/vendor/k8s.io/api/core/v1/well_known_labels.go index 4497760d3f6b4..3287fb51fc2b2 100644 --- a/vendor/k8s.io/api/core/v1/well_known_labels.go +++ b/vendor/k8s.io/api/core/v1/well_known_labels.go @@ -33,4 +33,10 @@ const ( // LabelNamespaceNodeRestriction is a forbidden label namespace that kubelets may not self-set when the NodeRestriction admission plugin is enabled LabelNamespaceNodeRestriction = "node-restriction.kubernetes.io" + + // IsHeadlessService is added by Controller to an Endpoint denoting if its parent + // Service is Headless. The existence of this label can be used further by other + // controllers and kube-proxy to check if the Endpoint objects should be replicated when + // using Headless Services + IsHeadlessService = "service.kubernetes.io/headless" ) diff --git a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go index 114e1974cd012..fd47019c0344e 100644 --- a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go @@ -773,6 +773,11 @@ func (in *Container) DeepCopyInto(out *Container) { *out = new(Probe) (*in).DeepCopyInto(*out) } + if in.StartupProbe != nil { + in, out := &in.StartupProbe, &out.StartupProbe + *out = new(Probe) + (*in).DeepCopyInto(*out) + } if in.Lifecycle != nil { in, out := &in.Lifecycle, &out.Lifecycle *out = new(Lifecycle) @@ -920,6 +925,11 @@ func (in *ContainerStatus) DeepCopyInto(out *ContainerStatus) { *out = *in in.State.DeepCopyInto(&out.State) in.LastTerminationState.DeepCopyInto(&out.LastTerminationState) + if in.Started != nil { + in, out := &in.Started, &out.Started + *out = new(bool) + **out = **in + } return } @@ -1278,6 +1288,139 @@ func (in *EnvVarSource) DeepCopy() *EnvVarSource { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EphemeralContainer) DeepCopyInto(out *EphemeralContainer) { + *out = *in + in.EphemeralContainerCommon.DeepCopyInto(&out.EphemeralContainerCommon) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralContainer. +func (in *EphemeralContainer) DeepCopy() *EphemeralContainer { + if in == nil { + return nil + } + out := new(EphemeralContainer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EphemeralContainerCommon) DeepCopyInto(out *EphemeralContainerCommon) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]ContainerPort, len(*in)) + copy(*out, *in) + } + if in.EnvFrom != nil { + in, out := &in.EnvFrom, &out.EnvFrom + *out = make([]EnvFromSource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Resources.DeepCopyInto(&out.Resources) + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumeDevices != nil { + in, out := &in.VolumeDevices, &out.VolumeDevices + *out = make([]VolumeDevice, len(*in)) + copy(*out, *in) + } + if in.LivenessProbe != nil { + in, out := &in.LivenessProbe, &out.LivenessProbe + *out = new(Probe) + (*in).DeepCopyInto(*out) + } + if in.ReadinessProbe != nil { + in, out := &in.ReadinessProbe, &out.ReadinessProbe + *out = new(Probe) + (*in).DeepCopyInto(*out) + } + if in.StartupProbe != nil { + in, out := &in.StartupProbe, &out.StartupProbe + *out = new(Probe) + (*in).DeepCopyInto(*out) + } + if in.Lifecycle != nil { + in, out := &in.Lifecycle, &out.Lifecycle + *out = new(Lifecycle) + (*in).DeepCopyInto(*out) + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(SecurityContext) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralContainerCommon. +func (in *EphemeralContainerCommon) DeepCopy() *EphemeralContainerCommon { + if in == nil { + return nil + } + out := new(EphemeralContainerCommon) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EphemeralContainers) DeepCopyInto(out *EphemeralContainers) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.EphemeralContainers != nil { + in, out := &in.EphemeralContainers, &out.EphemeralContainers + *out = make([]EphemeralContainer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralContainers. +func (in *EphemeralContainers) DeepCopy() *EphemeralContainers { + if in == nil { + return nil + } + out := new(EphemeralContainers) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EphemeralContainers) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Event) DeepCopyInto(out *Event) { *out = *in @@ -2061,7 +2204,7 @@ func (in *Namespace) DeepCopyInto(out *Namespace) { out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status + in.Status.DeepCopyInto(&out.Status) return } @@ -2083,6 +2226,23 @@ func (in *Namespace) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamespaceCondition) DeepCopyInto(out *NamespaceCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceCondition. +func (in *NamespaceCondition) DeepCopy() *NamespaceCondition { + if in == nil { + return nil + } + out := new(NamespaceCondition) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NamespaceList) DeepCopyInto(out *NamespaceList) { *out = *in @@ -2140,6 +2300,13 @@ func (in *NamespaceSpec) DeepCopy() *NamespaceSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NamespaceStatus) DeepCopyInto(out *NamespaceStatus) { *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]NamespaceCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -2470,6 +2637,11 @@ func (in *NodeSelectorTerm) DeepCopy() *NodeSelectorTerm { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NodeSpec) DeepCopyInto(out *NodeSpec) { *out = *in + if in.PodCIDRs != nil { + in, out := &in.PodCIDRs, &out.PodCIDRs + *out = make([]string, len(*in)) + copy(*out, *in) + } if in.Taints != nil { in, out := &in.Taints, &out.Taints *out = make([]Taint, len(*in)) @@ -3298,6 +3470,22 @@ func (in *PodExecOptions) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodIP) DeepCopyInto(out *PodIP) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodIP. +func (in *PodIP) DeepCopy() *PodIP { + if in == nil { + return nil + } + out := new(PodIP) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PodList) DeepCopyInto(out *PodList) { *out = *in @@ -3547,6 +3735,13 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.EphemeralContainers != nil { + in, out := &in.EphemeralContainers, &out.EphemeralContainers + *out = make([]EphemeralContainer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.TerminationGracePeriodSeconds != nil { in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds *out = new(int64) @@ -3633,6 +3828,20 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) { *out = new(PreemptionPolicy) **out = **in } + if in.Overhead != nil { + in, out := &in.Overhead, &out.Overhead + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.TopologySpreadConstraints != nil { + in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints + *out = make([]TopologySpreadConstraint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -3656,6 +3865,11 @@ func (in *PodStatus) DeepCopyInto(out *PodStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.PodIPs != nil { + in, out := &in.PodIPs, &out.PodIPs + *out = make([]PodIP, len(*in)) + copy(*out, *in) + } if in.StartTime != nil { in, out := &in.StartTime, &out.StartTime *out = (*in).DeepCopy() @@ -3674,6 +3888,13 @@ func (in *PodStatus) DeepCopyInto(out *PodStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.EphemeralContainerStatuses != nil { + in, out := &in.EphemeralContainerStatuses, &out.EphemeralContainerStatuses + *out = make([]ContainerStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -4960,6 +5181,11 @@ func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { *out = new(SessionAffinityConfig) (*in).DeepCopyInto(*out) } + if in.IPFamily != nil { + in, out := &in.IPFamily, &out.IPFamily + *out = new(IPFamily) + **out = **in + } return } @@ -5171,6 +5397,27 @@ func (in *TopologySelectorTerm) DeepCopy() *TopologySelectorTerm { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopologySpreadConstraint) DeepCopyInto(out *TopologySpreadConstraint) { + *out = *in + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopologySpreadConstraint. +func (in *TopologySpreadConstraint) DeepCopy() *TopologySpreadConstraint { + if in == nil { + return nil + } + out := new(TopologySpreadConstraint) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TypedLocalObjectReference) DeepCopyInto(out *TypedLocalObjectReference) { *out = *in @@ -5505,6 +5752,11 @@ func (in *WindowsSecurityContextOptions) DeepCopyInto(out *WindowsSecurityContex *out = new(string) **out = **in } + if in.RunAsUserName != nil { + in, out := &in.RunAsUserName, &out.RunAsUserName + *out = new(string) + **out = **in + } return } diff --git a/vendor/k8s.io/api/discovery/v1alpha1/BUILD.bazel b/vendor/k8s.io/api/discovery/v1alpha1/BUILD.bazel new file mode 100644 index 0000000000000..bbed8f0e31684 --- /dev/null +++ b/vendor/k8s.io/api/discovery/v1alpha1/BUILD.bazel @@ -0,0 +1,25 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "generated.pb.go", + "register.go", + "types.go", + "types_swagger_doc_generated.go", + "well_known_labels.go", + "zz_generated.deepcopy.go", + ], + importmap = "k8s.io/kops/vendor/k8s.io/api/discovery/v1alpha1", + importpath = "k8s.io/api/discovery/v1alpha1", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/gogo/protobuf/proto:go_default_library", + "//vendor/github.com/gogo/protobuf/sortkeys:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + ], +) diff --git a/vendor/k8s.io/api/discovery/v1alpha1/doc.go b/vendor/k8s.io/api/discovery/v1alpha1/doc.go new file mode 100644 index 0000000000000..ffd6b0b54d451 --- /dev/null +++ b/vendor/k8s.io/api/discovery/v1alpha1/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true +// +groupName=discovery.k8s.io + +package v1alpha1 // import "k8s.io/api/discovery/v1alpha1" diff --git a/vendor/k8s.io/api/discovery/v1alpha1/generated.pb.go b/vendor/k8s.io/api/discovery/v1alpha1/generated.pb.go new file mode 100644 index 0000000000000..34f6b28d71fd3 --- /dev/null +++ b/vendor/k8s.io/api/discovery/v1alpha1/generated.pb.go @@ -0,0 +1,1689 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/discovery/v1alpha1/generated.proto + +package v1alpha1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *Endpoint) Reset() { *m = Endpoint{} } +func (*Endpoint) ProtoMessage() {} +func (*Endpoint) Descriptor() ([]byte, []int) { + return fileDescriptor_772f83c5b34e07a5, []int{0} +} +func (m *Endpoint) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Endpoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Endpoint) XXX_Merge(src proto.Message) { + xxx_messageInfo_Endpoint.Merge(m, src) +} +func (m *Endpoint) XXX_Size() int { + return m.Size() +} +func (m *Endpoint) XXX_DiscardUnknown() { + xxx_messageInfo_Endpoint.DiscardUnknown(m) +} + +var xxx_messageInfo_Endpoint proto.InternalMessageInfo + +func (m *EndpointConditions) Reset() { *m = EndpointConditions{} } +func (*EndpointConditions) ProtoMessage() {} +func (*EndpointConditions) Descriptor() ([]byte, []int) { + return fileDescriptor_772f83c5b34e07a5, []int{1} +} +func (m *EndpointConditions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndpointConditions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EndpointConditions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndpointConditions.Merge(m, src) +} +func (m *EndpointConditions) XXX_Size() int { + return m.Size() +} +func (m *EndpointConditions) XXX_DiscardUnknown() { + xxx_messageInfo_EndpointConditions.DiscardUnknown(m) +} + +var xxx_messageInfo_EndpointConditions proto.InternalMessageInfo + +func (m *EndpointPort) Reset() { *m = EndpointPort{} } +func (*EndpointPort) ProtoMessage() {} +func (*EndpointPort) Descriptor() ([]byte, []int) { + return fileDescriptor_772f83c5b34e07a5, []int{2} +} +func (m *EndpointPort) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndpointPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EndpointPort) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndpointPort.Merge(m, src) +} +func (m *EndpointPort) XXX_Size() int { + return m.Size() +} +func (m *EndpointPort) XXX_DiscardUnknown() { + xxx_messageInfo_EndpointPort.DiscardUnknown(m) +} + +var xxx_messageInfo_EndpointPort proto.InternalMessageInfo + +func (m *EndpointSlice) Reset() { *m = EndpointSlice{} } +func (*EndpointSlice) ProtoMessage() {} +func (*EndpointSlice) Descriptor() ([]byte, []int) { + return fileDescriptor_772f83c5b34e07a5, []int{3} +} +func (m *EndpointSlice) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndpointSlice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EndpointSlice) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndpointSlice.Merge(m, src) +} +func (m *EndpointSlice) XXX_Size() int { + return m.Size() +} +func (m *EndpointSlice) XXX_DiscardUnknown() { + xxx_messageInfo_EndpointSlice.DiscardUnknown(m) +} + +var xxx_messageInfo_EndpointSlice proto.InternalMessageInfo + +func (m *EndpointSliceList) Reset() { *m = EndpointSliceList{} } +func (*EndpointSliceList) ProtoMessage() {} +func (*EndpointSliceList) Descriptor() ([]byte, []int) { + return fileDescriptor_772f83c5b34e07a5, []int{4} +} +func (m *EndpointSliceList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndpointSliceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EndpointSliceList) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndpointSliceList.Merge(m, src) +} +func (m *EndpointSliceList) XXX_Size() int { + return m.Size() +} +func (m *EndpointSliceList) XXX_DiscardUnknown() { + xxx_messageInfo_EndpointSliceList.DiscardUnknown(m) +} + +var xxx_messageInfo_EndpointSliceList proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Endpoint)(nil), "k8s.io.api.discovery.v1alpha1.Endpoint") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.discovery.v1alpha1.Endpoint.TopologyEntry") + proto.RegisterType((*EndpointConditions)(nil), "k8s.io.api.discovery.v1alpha1.EndpointConditions") + proto.RegisterType((*EndpointPort)(nil), "k8s.io.api.discovery.v1alpha1.EndpointPort") + proto.RegisterType((*EndpointSlice)(nil), "k8s.io.api.discovery.v1alpha1.EndpointSlice") + proto.RegisterType((*EndpointSliceList)(nil), "k8s.io.api.discovery.v1alpha1.EndpointSliceList") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/discovery/v1alpha1/generated.proto", fileDescriptor_772f83c5b34e07a5) +} + +var fileDescriptor_772f83c5b34e07a5 = []byte{ + // 728 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0x4b, 0x6f, 0xd3, 0x4a, + 0x14, 0x8e, 0x9b, 0x5a, 0xb2, 0x27, 0x8d, 0xd4, 0x8e, 0xee, 0x22, 0xca, 0xbd, 0xd7, 0x8e, 0xc2, + 0x82, 0x48, 0x85, 0x31, 0xa9, 0x28, 0xaa, 0x60, 0x43, 0x8d, 0xca, 0x43, 0xe2, 0x11, 0x86, 0x2e, + 0x10, 0x62, 0xc1, 0xc4, 0x9e, 0x3a, 0x26, 0x89, 0xc7, 0xb2, 0x27, 0x91, 0xb2, 0xe3, 0x27, 0x20, + 0xf1, 0x77, 0x58, 0xb2, 0xe8, 0xb2, 0xcb, 0xae, 0x0c, 0x35, 0xff, 0xa2, 0x2b, 0x34, 0xe3, 0x57, + 0x4a, 0x78, 0x64, 0x37, 0xe7, 0x9b, 0xf3, 0x7d, 0xe7, 0x9c, 0x6f, 0xce, 0x80, 0x87, 0xe3, 0x83, + 0x18, 0xf9, 0xcc, 0x1a, 0xcf, 0x86, 0x34, 0x0a, 0x28, 0xa7, 0xb1, 0x35, 0xa7, 0x81, 0xcb, 0x22, + 0x2b, 0xbf, 0x20, 0xa1, 0x6f, 0xb9, 0x7e, 0xec, 0xb0, 0x39, 0x8d, 0x16, 0xd6, 0xbc, 0x4f, 0x26, + 0xe1, 0x88, 0xf4, 0x2d, 0x8f, 0x06, 0x34, 0x22, 0x9c, 0xba, 0x28, 0x8c, 0x18, 0x67, 0xf0, 0xff, + 0x2c, 0x1d, 0x91, 0xd0, 0x47, 0x65, 0x3a, 0x2a, 0xd2, 0xdb, 0x37, 0x3d, 0x9f, 0x8f, 0x66, 0x43, + 0xe4, 0xb0, 0xa9, 0xe5, 0x31, 0x8f, 0x59, 0x92, 0x35, 0x9c, 0x9d, 0xc8, 0x48, 0x06, 0xf2, 0x94, + 0xa9, 0xb5, 0xbb, 0x4b, 0xc5, 0x1d, 0x16, 0x51, 0x6b, 0xbe, 0x52, 0xb1, 0x7d, 0xbb, 0xca, 0x99, + 0x12, 0x67, 0xe4, 0x07, 0xa2, 0xbf, 0x70, 0xec, 0x09, 0x20, 0xb6, 0xa6, 0x94, 0x93, 0x5f, 0xb1, + 0xac, 0xdf, 0xb1, 0xa2, 0x59, 0xc0, 0xfd, 0x29, 0x5d, 0x21, 0xdc, 0xf9, 0x1b, 0x21, 0x76, 0x46, + 0x74, 0x4a, 0x7e, 0xe6, 0x75, 0x3f, 0xd7, 0x81, 0x76, 0x14, 0xb8, 0x21, 0xf3, 0x03, 0x0e, 0x77, + 0x81, 0x4e, 0x5c, 0x37, 0xa2, 0x71, 0x4c, 0xe3, 0x96, 0xd2, 0xa9, 0xf7, 0x74, 0xbb, 0x99, 0x26, + 0xa6, 0x7e, 0x58, 0x80, 0xb8, 0xba, 0x87, 0x14, 0x00, 0x87, 0x05, 0xae, 0xcf, 0x7d, 0x16, 0xc4, + 0xad, 0x8d, 0x8e, 0xd2, 0x6b, 0xec, 0xf5, 0xd1, 0x1f, 0xfd, 0x45, 0x45, 0xa5, 0x07, 0x25, 0xd1, + 0x86, 0xa7, 0x89, 0x59, 0x4b, 0x13, 0x13, 0x54, 0x18, 0x5e, 0x12, 0x86, 0x3d, 0xa0, 0x8d, 0x58, + 0xcc, 0x03, 0x32, 0xa5, 0xad, 0x7a, 0x47, 0xe9, 0xe9, 0xf6, 0x56, 0x9a, 0x98, 0xda, 0xe3, 0x1c, + 0xc3, 0xe5, 0x2d, 0x1c, 0x00, 0x9d, 0x93, 0xc8, 0xa3, 0x1c, 0xd3, 0x93, 0xd6, 0xa6, 0xec, 0xe7, + 0xda, 0x72, 0x3f, 0xe2, 0x85, 0xd0, 0xbc, 0x8f, 0x5e, 0x0c, 0xdf, 0x53, 0x47, 0x24, 0xd1, 0x88, + 0x06, 0x0e, 0xcd, 0x46, 0x3c, 0x2e, 0x98, 0xb8, 0x12, 0x81, 0x0e, 0xd0, 0x38, 0x0b, 0xd9, 0x84, + 0x79, 0x8b, 0x96, 0xda, 0xa9, 0xf7, 0x1a, 0x7b, 0xfb, 0x6b, 0x0e, 0x88, 0x8e, 0x73, 0xde, 0x51, + 0xc0, 0xa3, 0x85, 0xbd, 0x9d, 0x0f, 0xa9, 0x15, 0x30, 0x2e, 0x85, 0xdb, 0xf7, 0x40, 0xf3, 0x4a, + 0x32, 0xdc, 0x06, 0xf5, 0x31, 0x5d, 0xb4, 0x14, 0x31, 0x2c, 0x16, 0x47, 0xf8, 0x0f, 0x50, 0xe7, + 0x64, 0x32, 0xa3, 0xd2, 0x65, 0x1d, 0x67, 0xc1, 0xdd, 0x8d, 0x03, 0xa5, 0xbb, 0x0f, 0xe0, 0xaa, + 0xa7, 0xd0, 0x04, 0x6a, 0x44, 0x89, 0x9b, 0x69, 0x68, 0xb6, 0x9e, 0x26, 0xa6, 0x8a, 0x05, 0x80, + 0x33, 0xbc, 0xfb, 0x49, 0x01, 0x5b, 0x05, 0x6f, 0xc0, 0x22, 0x0e, 0xff, 0x03, 0x9b, 0xd2, 0x61, + 0x59, 0xd4, 0xd6, 0xd2, 0xc4, 0xdc, 0x7c, 0x2e, 0xdc, 0x95, 0x28, 0x7c, 0x04, 0x34, 0xb9, 0x2d, + 0x0e, 0x9b, 0x64, 0x2d, 0xd8, 0xbb, 0x62, 0x98, 0x41, 0x8e, 0x5d, 0x26, 0xe6, 0xbf, 0xab, 0x3f, + 0x01, 0x15, 0xd7, 0xb8, 0x24, 0x8b, 0x32, 0x21, 0x8b, 0xb8, 0x7c, 0x48, 0x35, 0x2b, 0x23, 0xca, + 0x63, 0x89, 0x76, 0xbf, 0x6e, 0x80, 0x66, 0xd1, 0xd5, 0xab, 0x89, 0xef, 0x50, 0xf8, 0x0e, 0x68, + 0xe2, 0x87, 0xb8, 0x84, 0x13, 0xd9, 0x5a, 0x63, 0xef, 0xd6, 0xd2, 0x03, 0x94, 0x8b, 0x8e, 0xc2, + 0xb1, 0x27, 0x80, 0x18, 0x89, 0xec, 0xea, 0x8d, 0x9f, 0x51, 0x4e, 0xaa, 0x05, 0xab, 0x30, 0x5c, + 0xaa, 0xc2, 0xfb, 0xa0, 0x91, 0xaf, 0xf4, 0xf1, 0x22, 0xa4, 0x72, 0x6d, 0x74, 0xdb, 0x48, 0x13, + 0xb3, 0x71, 0x58, 0xc1, 0x97, 0x57, 0x43, 0xbc, 0x4c, 0x81, 0xaf, 0x81, 0x4e, 0xf3, 0xa6, 0xc5, + 0x37, 0x10, 0x5b, 0x72, 0x7d, 0xcd, 0x2d, 0xb1, 0x77, 0xf2, 0xde, 0xf4, 0x02, 0x89, 0x71, 0x25, + 0x06, 0x07, 0x40, 0x15, 0xbe, 0xc4, 0xad, 0xba, 0x54, 0xdd, 0x5d, 0x53, 0x55, 0x38, 0x6a, 0x37, + 0x73, 0x65, 0x55, 0x44, 0x31, 0xce, 0x84, 0xba, 0x5f, 0x14, 0xb0, 0x73, 0xc5, 0xe1, 0xa7, 0x7e, + 0xcc, 0xe1, 0xdb, 0x15, 0x97, 0xd1, 0x7a, 0x2e, 0x0b, 0xb6, 0xf4, 0xb8, 0xdc, 0xef, 0x02, 0x59, + 0x72, 0xf8, 0x25, 0x50, 0x7d, 0x4e, 0xa7, 0x85, 0x37, 0x37, 0xd6, 0x9c, 0x42, 0xb6, 0x57, 0x8d, + 0xf1, 0x44, 0x48, 0xe0, 0x4c, 0xc9, 0x46, 0xa7, 0x17, 0x46, 0xed, 0xec, 0xc2, 0xa8, 0x9d, 0x5f, + 0x18, 0xb5, 0x0f, 0xa9, 0xa1, 0x9c, 0xa6, 0x86, 0x72, 0x96, 0x1a, 0xca, 0x79, 0x6a, 0x28, 0xdf, + 0x52, 0x43, 0xf9, 0xf8, 0xdd, 0xa8, 0xbd, 0xd1, 0x0a, 0xcd, 0x1f, 0x01, 0x00, 0x00, 0xff, 0xff, + 0x04, 0x9d, 0x1a, 0x33, 0x3e, 0x06, 0x00, 0x00, +} + +func (m *Endpoint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Endpoint) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Endpoint) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Topology) > 0 { + keysForTopology := make([]string, 0, len(m.Topology)) + for k := range m.Topology { + keysForTopology = append(keysForTopology, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForTopology) + for iNdEx := len(keysForTopology) - 1; iNdEx >= 0; iNdEx-- { + v := m.Topology[string(keysForTopology[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForTopology[iNdEx]) + copy(dAtA[i:], keysForTopology[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForTopology[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + if m.TargetRef != nil { + { + size, err := m.TargetRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Hostname != nil { + i -= len(*m.Hostname) + copy(dAtA[i:], *m.Hostname) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Hostname))) + i-- + dAtA[i] = 0x1a + } + { + size, err := m.Conditions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Addresses) > 0 { + for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Addresses[iNdEx]) + copy(dAtA[i:], m.Addresses[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Addresses[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *EndpointConditions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointConditions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EndpointConditions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Ready != nil { + i-- + if *m.Ready { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *EndpointPort) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointPort) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EndpointPort) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Port != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Port)) + i-- + dAtA[i] = 0x18 + } + if m.Protocol != nil { + i -= len(*m.Protocol) + copy(dAtA[i:], *m.Protocol) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Protocol))) + i-- + dAtA[i] = 0x12 + } + if m.Name != nil { + i -= len(*m.Name) + copy(dAtA[i:], *m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EndpointSlice) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointSlice) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EndpointSlice) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AddressType != nil { + i -= len(*m.AddressType) + copy(dAtA[i:], *m.AddressType) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.AddressType))) + i-- + dAtA[i] = 0x22 + } + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Endpoints) > 0 { + for iNdEx := len(m.Endpoints) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Endpoints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EndpointSliceList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointSliceList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EndpointSliceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Endpoint) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Addresses) > 0 { + for _, s := range m.Addresses { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.Conditions.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Hostname != nil { + l = len(*m.Hostname) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.TargetRef != nil { + l = m.TargetRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Topology) > 0 { + for k, v := range m.Topology { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *EndpointConditions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Ready != nil { + n += 2 + } + return n +} + +func (m *EndpointPort) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Name != nil { + l = len(*m.Name) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Protocol != nil { + l = len(*m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Port != nil { + n += 1 + sovGenerated(uint64(*m.Port)) + } + return n +} + +func (m *EndpointSlice) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Endpoints) > 0 { + for _, e := range m.Endpoints { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.AddressType != nil { + l = len(*m.AddressType) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *EndpointSliceList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Endpoint) String() string { + if this == nil { + return "nil" + } + keysForTopology := make([]string, 0, len(this.Topology)) + for k := range this.Topology { + keysForTopology = append(keysForTopology, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForTopology) + mapStringForTopology := "map[string]string{" + for _, k := range keysForTopology { + mapStringForTopology += fmt.Sprintf("%v: %v,", k, this.Topology[k]) + } + mapStringForTopology += "}" + s := strings.Join([]string{`&Endpoint{`, + `Addresses:` + fmt.Sprintf("%v", this.Addresses) + `,`, + `Conditions:` + strings.Replace(strings.Replace(this.Conditions.String(), "EndpointConditions", "EndpointConditions", 1), `&`, ``, 1) + `,`, + `Hostname:` + valueToStringGenerated(this.Hostname) + `,`, + `TargetRef:` + strings.Replace(fmt.Sprintf("%v", this.TargetRef), "ObjectReference", "v1.ObjectReference", 1) + `,`, + `Topology:` + mapStringForTopology + `,`, + `}`, + }, "") + return s +} +func (this *EndpointConditions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EndpointConditions{`, + `Ready:` + valueToStringGenerated(this.Ready) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointPort) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EndpointPort{`, + `Name:` + valueToStringGenerated(this.Name) + `,`, + `Protocol:` + valueToStringGenerated(this.Protocol) + `,`, + `Port:` + valueToStringGenerated(this.Port) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointSlice) String() string { + if this == nil { + return "nil" + } + repeatedStringForEndpoints := "[]Endpoint{" + for _, f := range this.Endpoints { + repeatedStringForEndpoints += strings.Replace(strings.Replace(f.String(), "Endpoint", "Endpoint", 1), `&`, ``, 1) + "," + } + repeatedStringForEndpoints += "}" + repeatedStringForPorts := "[]EndpointPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "EndpointPort", "EndpointPort", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + s := strings.Join([]string{`&EndpointSlice{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Endpoints:` + repeatedStringForEndpoints + `,`, + `Ports:` + repeatedStringForPorts + `,`, + `AddressType:` + valueToStringGenerated(this.AddressType) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointSliceList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]EndpointSlice{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "EndpointSlice", "EndpointSlice", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&EndpointSliceList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Endpoint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Endpoint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Endpoint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addresses = append(m.Addresses, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Conditions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Hostname = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TargetRef == nil { + m.TargetRef = &v1.ObjectReference{} + } + if err := m.TargetRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Topology", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Topology == nil { + m.Topology = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Topology[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointConditions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointConditions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointConditions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Ready", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Ready = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointPort) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointPort: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointPort: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Name = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex]) + m.Protocol = &s + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Port = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointSlice) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointSlice: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointSlice: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Endpoints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Endpoints = append(m.Endpoints, Endpoint{}) + if err := m.Endpoints[len(m.Endpoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, EndpointPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AddressType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := AddressType(dAtA[iNdEx:postIndex]) + m.AddressType = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointSliceList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointSliceList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointSliceList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, EndpointSlice{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/k8s.io/api/discovery/v1alpha1/generated.proto b/vendor/k8s.io/api/discovery/v1alpha1/generated.proto new file mode 100644 index 0000000000000..f14f37fd0462d --- /dev/null +++ b/vendor/k8s.io/api/discovery/v1alpha1/generated.proto @@ -0,0 +1,148 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.discovery.v1alpha1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1alpha1"; + +// Endpoint represents a single logical "backend" implementing a service. +message Endpoint { + // addresses of this endpoint. The contents of this field are interpreted + // according to the corresponding EndpointSlice addressType field. This + // allows for cases like dual-stack (IPv4 and IPv6) networking. Consumers + // (e.g. kube-proxy) must handle different types of addresses in the context + // of their own capabilities. This must contain at least one address but no + // more than 100. + // +listType=set + repeated string addresses = 1; + + // conditions contains information about the current status of the endpoint. + optional EndpointConditions conditions = 2; + + // hostname of this endpoint. This field may be used by consumers of + // endpoints to distinguish endpoints from each other (e.g. in DNS names). + // Multiple endpoints which use the same hostname should be considered + // fungible (e.g. multiple A values in DNS). Must pass DNS Label (RFC 1123) + // validation. + // +optional + optional string hostname = 3; + + // targetRef is a reference to a Kubernetes object that represents this + // endpoint. + // +optional + optional k8s.io.api.core.v1.ObjectReference targetRef = 4; + + // topology contains arbitrary topology information associated with the + // endpoint. These key/value pairs must conform with the label format. + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels + // Topology may include a maximum of 16 key/value pairs. This includes, but + // is not limited to the following well known keys: + // * kubernetes.io/hostname: the value indicates the hostname of the node + // where the endpoint is located. This should match the corresponding + // node label. + // * topology.kubernetes.io/zone: the value indicates the zone where the + // endpoint is located. This should match the corresponding node label. + // * topology.kubernetes.io/region: the value indicates the region where the + // endpoint is located. This should match the corresponding node label. + // +optional + map topology = 5; +} + +// EndpointConditions represents the current condition of an endpoint. +message EndpointConditions { + // ready indicates that this endpoint is prepared to receive traffic, + // according to whatever system is managing the endpoint. A nil value + // indicates an unknown state. In most cases consumers should interpret this + // unknown state as ready. + // +optional + optional bool ready = 1; +} + +// EndpointPort represents a Port used by an EndpointSlice +message EndpointPort { + // The name of this port. All ports in an EndpointSlice must have a unique + // name. If the EndpointSlice is dervied from a Kubernetes service, this + // corresponds to the Service.ports[].name. + // Name must either be an empty string or pass IANA_SVC_NAME validation: + // * must be no more than 15 characters long + // * may contain only [-a-z0-9] + // * must contain at least one letter [a-z] + // * it must not start or end with a hyphen, nor contain adjacent hyphens + // Default is empty string. + optional string name = 1; + + // The IP protocol for this port. + // Must be UDP, TCP, or SCTP. + // Default is TCP. + optional string protocol = 2; + + // The port number of the endpoint. + // If this is not specified, ports are not restricted and must be + // interpreted in the context of the specific consumer. + optional int32 port = 3; +} + +// EndpointSlice represents a subset of the endpoints that implement a service. +// For a given service there may be multiple EndpointSlice objects, selected by +// labels, which must be joined to produce the full set of endpoints. +message EndpointSlice { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // addressType specifies the type of address carried by this EndpointSlice. + // All addresses in this slice must be the same type. + // Default is IP + // +optional + optional string addressType = 4; + + // endpoints is a list of unique endpoints in this slice. Each slice may + // include a maximum of 1000 endpoints. + // +listType=atomic + repeated Endpoint endpoints = 2; + + // ports specifies the list of network ports exposed by each endpoint in + // this slice. Each port must have a unique name. When ports is empty, it + // indicates that there are no defined ports. When a port is defined with a + // nil port value, it indicates "all ports". Each slice may include a + // maximum of 100 ports. + // +optional + // +listType=atomic + repeated EndpointPort ports = 3; +} + +// EndpointSliceList represents a list of endpoint slices +message EndpointSliceList { + // Standard list metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of endpoint slices + // +listType=set + repeated EndpointSlice items = 2; +} + diff --git a/vendor/k8s.io/api/discovery/v1alpha1/register.go b/vendor/k8s.io/api/discovery/v1alpha1/register.go new file mode 100644 index 0000000000000..55b73f992f86b --- /dev/null +++ b/vendor/k8s.io/api/discovery/v1alpha1/register.go @@ -0,0 +1,56 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name used in this package +const GroupName = "discovery.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder is the scheme builder with scheme init functions to run for this API package + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &EndpointSlice{}, + &EndpointSliceList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/discovery/v1alpha1/types.go b/vendor/k8s.io/api/discovery/v1alpha1/types.go new file mode 100644 index 0000000000000..0de6082aeef00 --- /dev/null +++ b/vendor/k8s.io/api/discovery/v1alpha1/types.go @@ -0,0 +1,144 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EndpointSlice represents a subset of the endpoints that implement a service. +// For a given service there may be multiple EndpointSlice objects, selected by +// labels, which must be joined to produce the full set of endpoints. +type EndpointSlice struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // addressType specifies the type of address carried by this EndpointSlice. + // All addresses in this slice must be the same type. + // Default is IP + // +optional + AddressType *AddressType `json:"addressType" protobuf:"bytes,4,rep,name=addressType"` + // endpoints is a list of unique endpoints in this slice. Each slice may + // include a maximum of 1000 endpoints. + // +listType=atomic + Endpoints []Endpoint `json:"endpoints" protobuf:"bytes,2,rep,name=endpoints"` + // ports specifies the list of network ports exposed by each endpoint in + // this slice. Each port must have a unique name. When ports is empty, it + // indicates that there are no defined ports. When a port is defined with a + // nil port value, it indicates "all ports". Each slice may include a + // maximum of 100 ports. + // +optional + // +listType=atomic + Ports []EndpointPort `json:"ports" protobuf:"bytes,3,rep,name=ports"` +} + +// AddressType represents the type of address referred to by an endpoint. +type AddressType string + +const ( + // AddressTypeIP represents an IP Address. + AddressTypeIP = AddressType("IP") +) + +// Endpoint represents a single logical "backend" implementing a service. +type Endpoint struct { + // addresses of this endpoint. The contents of this field are interpreted + // according to the corresponding EndpointSlice addressType field. This + // allows for cases like dual-stack (IPv4 and IPv6) networking. Consumers + // (e.g. kube-proxy) must handle different types of addresses in the context + // of their own capabilities. This must contain at least one address but no + // more than 100. + // +listType=set + Addresses []string `json:"addresses" protobuf:"bytes,1,rep,name=addresses"` + // conditions contains information about the current status of the endpoint. + Conditions EndpointConditions `json:"conditions,omitempty" protobuf:"bytes,2,opt,name=conditions"` + // hostname of this endpoint. This field may be used by consumers of + // endpoints to distinguish endpoints from each other (e.g. in DNS names). + // Multiple endpoints which use the same hostname should be considered + // fungible (e.g. multiple A values in DNS). Must pass DNS Label (RFC 1123) + // validation. + // +optional + Hostname *string `json:"hostname,omitempty" protobuf:"bytes,3,opt,name=hostname"` + // targetRef is a reference to a Kubernetes object that represents this + // endpoint. + // +optional + TargetRef *v1.ObjectReference `json:"targetRef,omitempty" protobuf:"bytes,4,opt,name=targetRef"` + // topology contains arbitrary topology information associated with the + // endpoint. These key/value pairs must conform with the label format. + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels + // Topology may include a maximum of 16 key/value pairs. This includes, but + // is not limited to the following well known keys: + // * kubernetes.io/hostname: the value indicates the hostname of the node + // where the endpoint is located. This should match the corresponding + // node label. + // * topology.kubernetes.io/zone: the value indicates the zone where the + // endpoint is located. This should match the corresponding node label. + // * topology.kubernetes.io/region: the value indicates the region where the + // endpoint is located. This should match the corresponding node label. + // +optional + Topology map[string]string `json:"topology,omitempty" protobuf:"bytes,5,opt,name=topology"` +} + +// EndpointConditions represents the current condition of an endpoint. +type EndpointConditions struct { + // ready indicates that this endpoint is prepared to receive traffic, + // according to whatever system is managing the endpoint. A nil value + // indicates an unknown state. In most cases consumers should interpret this + // unknown state as ready. + // +optional + Ready *bool `json:"ready,omitempty" protobuf:"bytes,1,name=ready"` +} + +// EndpointPort represents a Port used by an EndpointSlice +type EndpointPort struct { + // The name of this port. All ports in an EndpointSlice must have a unique + // name. If the EndpointSlice is dervied from a Kubernetes service, this + // corresponds to the Service.ports[].name. + // Name must either be an empty string or pass IANA_SVC_NAME validation: + // * must be no more than 15 characters long + // * may contain only [-a-z0-9] + // * must contain at least one letter [a-z] + // * it must not start or end with a hyphen, nor contain adjacent hyphens + // Default is empty string. + Name *string `json:"name,omitempty" protobuf:"bytes,1,name=name"` + // The IP protocol for this port. + // Must be UDP, TCP, or SCTP. + // Default is TCP. + Protocol *v1.Protocol `json:"protocol,omitempty" protobuf:"bytes,2,name=protocol"` + // The port number of the endpoint. + // If this is not specified, ports are not restricted and must be + // interpreted in the context of the specific consumer. + Port *int32 `json:"port,omitempty" protobuf:"bytes,3,opt,name=port"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EndpointSliceList represents a list of endpoint slices +type EndpointSliceList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // List of endpoint slices + // +listType=set + Items []EndpointSlice `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/discovery/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/discovery/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 0000000000000..a524bcd68e5cf --- /dev/null +++ b/vendor/k8s.io/api/discovery/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,85 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_Endpoint = map[string]string{ + "": "Endpoint represents a single logical \"backend\" implementing a service.", + "addresses": "addresses of this endpoint. The contents of this field are interpreted according to the corresponding EndpointSlice addressType field. This allows for cases like dual-stack (IPv4 and IPv6) networking. Consumers (e.g. kube-proxy) must handle different types of addresses in the context of their own capabilities. This must contain at least one address but no more than 100.", + "conditions": "conditions contains information about the current status of the endpoint.", + "hostname": "hostname of this endpoint. This field may be used by consumers of endpoints to distinguish endpoints from each other (e.g. in DNS names). Multiple endpoints which use the same hostname should be considered fungible (e.g. multiple A values in DNS). Must pass DNS Label (RFC 1123) validation.", + "targetRef": "targetRef is a reference to a Kubernetes object that represents this endpoint.", + "topology": "topology contains arbitrary topology information associated with the endpoint. These key/value pairs must conform with the label format. https://kubernetes.io/docs/concepts/overview/working-with-objects/labels Topology may include a maximum of 16 key/value pairs. This includes, but is not limited to the following well known keys: * kubernetes.io/hostname: the value indicates the hostname of the node\n where the endpoint is located. This should match the corresponding\n node label.\n* topology.kubernetes.io/zone: the value indicates the zone where the\n endpoint is located. This should match the corresponding node label.\n* topology.kubernetes.io/region: the value indicates the region where the\n endpoint is located. This should match the corresponding node label.", +} + +func (Endpoint) SwaggerDoc() map[string]string { + return map_Endpoint +} + +var map_EndpointConditions = map[string]string{ + "": "EndpointConditions represents the current condition of an endpoint.", + "ready": "ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready.", +} + +func (EndpointConditions) SwaggerDoc() map[string]string { + return map_EndpointConditions +} + +var map_EndpointPort = map[string]string{ + "": "EndpointPort represents a Port used by an EndpointSlice", + "name": "The name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass IANA_SVC_NAME validation: * must be no more than 15 characters long * may contain only [-a-z0-9] * must contain at least one letter [a-z] * it must not start or end with a hyphen, nor contain adjacent hyphens Default is empty string.", + "protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", + "port": "The port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.", +} + +func (EndpointPort) SwaggerDoc() map[string]string { + return map_EndpointPort +} + +var map_EndpointSlice = map[string]string{ + "": "EndpointSlice represents a subset of the endpoints that implement a service. For a given service there may be multiple EndpointSlice objects, selected by labels, which must be joined to produce the full set of endpoints.", + "metadata": "Standard object's metadata.", + "addressType": "addressType specifies the type of address carried by this EndpointSlice. All addresses in this slice must be the same type. Default is IP", + "endpoints": "endpoints is a list of unique endpoints in this slice. Each slice may include a maximum of 1000 endpoints.", + "ports": "ports specifies the list of network ports exposed by each endpoint in this slice. Each port must have a unique name. When ports is empty, it indicates that there are no defined ports. When a port is defined with a nil port value, it indicates \"all ports\". Each slice may include a maximum of 100 ports.", +} + +func (EndpointSlice) SwaggerDoc() map[string]string { + return map_EndpointSlice +} + +var map_EndpointSliceList = map[string]string{ + "": "EndpointSliceList represents a list of endpoint slices", + "metadata": "Standard list metadata.", + "items": "List of endpoint slices", +} + +func (EndpointSliceList) SwaggerDoc() map[string]string { + return map_EndpointSliceList +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/interfaces.go b/vendor/k8s.io/api/discovery/v1alpha1/well_known_labels.go similarity index 58% rename from vendor/k8s.io/kubernetes/pkg/kubectl/interfaces.go rename to vendor/k8s.io/api/discovery/v1alpha1/well_known_labels.go index de20a5d77b7fd..850cd205925c7 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/interfaces.go +++ b/vendor/k8s.io/api/discovery/v1alpha1/well_known_labels.go @@ -1,5 +1,5 @@ /* -Copyright 2014 The Kubernetes Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,19 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -package kubectl +package v1alpha1 -import ( - "k8s.io/apimachinery/pkg/types" - client "k8s.io/client-go/rest" +const ( + // LabelServiceName is used to indicate the name of a Kubernetes service. + LabelServiceName = "kubernetes.io/service-name" ) - -// RESTClient is a client helper for dealing with RESTful resources -// in a generic way. -type RESTClient interface { - Get() *client.Request - Post() *client.Request - Patch(types.PatchType) *client.Request - Delete() *client.Request - Put() *client.Request -} diff --git a/vendor/k8s.io/api/discovery/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/discovery/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..e424fccf3b369 --- /dev/null +++ b/vendor/k8s.io/api/discovery/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,195 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Endpoint) DeepCopyInto(out *Endpoint) { + *out = *in + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.Conditions.DeepCopyInto(&out.Conditions) + if in.Hostname != nil { + in, out := &in.Hostname, &out.Hostname + *out = new(string) + **out = **in + } + if in.TargetRef != nil { + in, out := &in.TargetRef, &out.TargetRef + *out = new(v1.ObjectReference) + **out = **in + } + if in.Topology != nil { + in, out := &in.Topology, &out.Topology + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoint. +func (in *Endpoint) DeepCopy() *Endpoint { + if in == nil { + return nil + } + out := new(Endpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointConditions) DeepCopyInto(out *EndpointConditions) { + *out = *in + if in.Ready != nil { + in, out := &in.Ready, &out.Ready + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointConditions. +func (in *EndpointConditions) DeepCopy() *EndpointConditions { + if in == nil { + return nil + } + out := new(EndpointConditions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointPort) DeepCopyInto(out *EndpointPort) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(v1.Protocol) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointPort. +func (in *EndpointPort) DeepCopy() *EndpointPort { + if in == nil { + return nil + } + out := new(EndpointPort) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointSlice) DeepCopyInto(out *EndpointSlice) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.AddressType != nil { + in, out := &in.AddressType, &out.AddressType + *out = new(AddressType) + **out = **in + } + if in.Endpoints != nil { + in, out := &in.Endpoints, &out.Endpoints + *out = make([]Endpoint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]EndpointPort, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSlice. +func (in *EndpointSlice) DeepCopy() *EndpointSlice { + if in == nil { + return nil + } + out := new(EndpointSlice) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EndpointSlice) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointSliceList) DeepCopyInto(out *EndpointSliceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]EndpointSlice, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSliceList. +func (in *EndpointSliceList) DeepCopy() *EndpointSliceList { + if in == nil { + return nil + } + out := new(EndpointSliceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EndpointSliceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/k8s.io/api/events/v1beta1/generated.pb.go b/vendor/k8s.io/api/events/v1beta1/generated.pb.go index bbaa909aa421d..0e9a8e7830372 100644 --- a/vendor/k8s.io/api/events/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/events/v1beta1/generated.pb.go @@ -17,33 +17,20 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/events/v1beta1/generated.proto -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/events/v1beta1/generated.proto - - It has these top-level messages: - Event - EventList - EventSeries -*/ package v1beta1 import ( fmt "fmt" + io "io" + proto "github.com/gogo/protobuf/proto" + v11 "k8s.io/api/core/v1" math "math" - - k8s_io_api_core_v1 "k8s.io/api/core/v1" - - strings "strings" - + math_bits "math/bits" reflect "reflect" - - io "io" + strings "strings" ) // Reference imports to suppress errors if they are not otherwise used. @@ -57,27 +44,159 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *Event) Reset() { *m = Event{} } -func (*Event) ProtoMessage() {} -func (*Event) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *Event) Reset() { *m = Event{} } +func (*Event) ProtoMessage() {} +func (*Event) Descriptor() ([]byte, []int) { + return fileDescriptor_4f97f691c32a5ac8, []int{0} +} +func (m *Event) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Event) XXX_Merge(src proto.Message) { + xxx_messageInfo_Event.Merge(m, src) +} +func (m *Event) XXX_Size() int { + return m.Size() +} +func (m *Event) XXX_DiscardUnknown() { + xxx_messageInfo_Event.DiscardUnknown(m) +} + +var xxx_messageInfo_Event proto.InternalMessageInfo + +func (m *EventList) Reset() { *m = EventList{} } +func (*EventList) ProtoMessage() {} +func (*EventList) Descriptor() ([]byte, []int) { + return fileDescriptor_4f97f691c32a5ac8, []int{1} +} +func (m *EventList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EventList) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventList.Merge(m, src) +} +func (m *EventList) XXX_Size() int { + return m.Size() +} +func (m *EventList) XXX_DiscardUnknown() { + xxx_messageInfo_EventList.DiscardUnknown(m) +} + +var xxx_messageInfo_EventList proto.InternalMessageInfo -func (m *EventList) Reset() { *m = EventList{} } -func (*EventList) ProtoMessage() {} -func (*EventList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (m *EventSeries) Reset() { *m = EventSeries{} } +func (*EventSeries) ProtoMessage() {} +func (*EventSeries) Descriptor() ([]byte, []int) { + return fileDescriptor_4f97f691c32a5ac8, []int{2} +} +func (m *EventSeries) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EventSeries) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventSeries.Merge(m, src) +} +func (m *EventSeries) XXX_Size() int { + return m.Size() +} +func (m *EventSeries) XXX_DiscardUnknown() { + xxx_messageInfo_EventSeries.DiscardUnknown(m) +} -func (m *EventSeries) Reset() { *m = EventSeries{} } -func (*EventSeries) ProtoMessage() {} -func (*EventSeries) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_EventSeries proto.InternalMessageInfo func init() { proto.RegisterType((*Event)(nil), "k8s.io.api.events.v1beta1.Event") proto.RegisterType((*EventList)(nil), "k8s.io.api.events.v1beta1.EventList") proto.RegisterType((*EventSeries)(nil), "k8s.io.api.events.v1beta1.EventSeries") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/events/v1beta1/generated.proto", fileDescriptor_4f97f691c32a5ac8) +} + +var fileDescriptor_4f97f691c32a5ac8 = []byte{ + // 801 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0xcd, 0x6e, 0xdb, 0x46, + 0x10, 0x16, 0x13, 0x4b, 0xb2, 0x56, 0x49, 0x2c, 0x6f, 0x0e, 0xde, 0xb8, 0x00, 0xa5, 0x2a, 0x40, + 0x20, 0x14, 0x08, 0x59, 0x07, 0x45, 0xdb, 0x6b, 0x18, 0xb9, 0x45, 0x02, 0xbb, 0x01, 0xd6, 0x3e, + 0x15, 0x3d, 0x64, 0x45, 0x4d, 0x68, 0x56, 0xe2, 0x2e, 0xb1, 0xbb, 0x12, 0xe0, 0x5b, 0x2f, 0x05, + 0x7a, 0xec, 0x33, 0xf4, 0x09, 0xfa, 0x18, 0x3e, 0xe6, 0x98, 0x93, 0x50, 0xb3, 0x6f, 0xd1, 0x53, + 0xc1, 0xe5, 0x4a, 0x94, 0xf5, 0x83, 0xa8, 0xe8, 0x4d, 0x9c, 0xf9, 0x7e, 0x66, 0x66, 0x47, 0x83, + 0x82, 0xd1, 0xb7, 0xca, 0x8b, 0x85, 0x3f, 0x9a, 0x0c, 0x40, 0x72, 0xd0, 0xa0, 0xfc, 0x29, 0xf0, + 0xa1, 0x90, 0xbe, 0x4d, 0xb0, 0x34, 0xf6, 0x61, 0x0a, 0x5c, 0x2b, 0x7f, 0x7a, 0x32, 0x00, 0xcd, + 0x4e, 0xfc, 0x08, 0x38, 0x48, 0xa6, 0x61, 0xe8, 0xa5, 0x52, 0x68, 0x81, 0x9f, 0x14, 0x50, 0x8f, + 0xa5, 0xb1, 0x57, 0x40, 0x3d, 0x0b, 0x3d, 0x7e, 0x1e, 0xc5, 0xfa, 0x6a, 0x32, 0xf0, 0x42, 0x91, + 0xf8, 0x91, 0x88, 0x84, 0x6f, 0x18, 0x83, 0xc9, 0x7b, 0xf3, 0x65, 0x3e, 0xcc, 0xaf, 0x42, 0xe9, + 0xb8, 0xbb, 0x64, 0x1a, 0x0a, 0x09, 0xfe, 0x74, 0xcd, 0xed, 0xf8, 0xab, 0x12, 0x93, 0xb0, 0xf0, + 0x2a, 0xe6, 0x20, 0xaf, 0xfd, 0x74, 0x14, 0xe5, 0x01, 0xe5, 0x27, 0xa0, 0xd9, 0x26, 0x96, 0xbf, + 0x8d, 0x25, 0x27, 0x5c, 0xc7, 0x09, 0xac, 0x11, 0xbe, 0xfe, 0x14, 0x41, 0x85, 0x57, 0x90, 0xb0, + 0x55, 0x5e, 0xf7, 0x8f, 0x06, 0xaa, 0x9e, 0xe6, 0x43, 0xc0, 0xef, 0xd0, 0x7e, 0x5e, 0xcd, 0x90, + 0x69, 0x46, 0x9c, 0x8e, 0xd3, 0x6b, 0xbe, 0xf8, 0xd2, 0x2b, 0x27, 0xb5, 0x10, 0xf5, 0xd2, 0x51, + 0x94, 0x07, 0x94, 0x97, 0xa3, 0xbd, 0xe9, 0x89, 0xf7, 0x76, 0xf0, 0x33, 0x84, 0xfa, 0x1c, 0x34, + 0x0b, 0xf0, 0xcd, 0xac, 0x5d, 0xc9, 0x66, 0x6d, 0x54, 0xc6, 0xe8, 0x42, 0x15, 0xbf, 0x43, 0x0d, + 0x33, 0xef, 0xcb, 0x38, 0x01, 0x72, 0xcf, 0x58, 0xf8, 0xbb, 0x59, 0x9c, 0xc7, 0xa1, 0x14, 0x39, + 0x2d, 0x38, 0xb4, 0x0e, 0x8d, 0xd3, 0xb9, 0x12, 0x2d, 0x45, 0xf1, 0x1b, 0x54, 0x53, 0x20, 0x63, + 0x50, 0xe4, 0xbe, 0x91, 0x7f, 0xe6, 0x6d, 0x7d, 0x6b, 0xcf, 0x08, 0x5c, 0x18, 0x74, 0x80, 0xb2, + 0x59, 0xbb, 0x56, 0xfc, 0xa6, 0x56, 0x01, 0x9f, 0xa3, 0xc7, 0x12, 0x52, 0x21, 0x75, 0xcc, 0xa3, + 0x57, 0x82, 0x6b, 0x29, 0xc6, 0x63, 0x90, 0x64, 0xaf, 0xe3, 0xf4, 0x1a, 0xc1, 0x67, 0xb6, 0x8c, + 0xc7, 0x74, 0x1d, 0x42, 0x37, 0xf1, 0xf0, 0xf7, 0xe8, 0x70, 0x11, 0x7e, 0xcd, 0x95, 0x66, 0x3c, + 0x04, 0x52, 0x35, 0x62, 0x4f, 0xac, 0xd8, 0x21, 0x5d, 0x05, 0xd0, 0x75, 0x0e, 0x7e, 0x86, 0x6a, + 0x2c, 0xd4, 0xb1, 0xe0, 0xa4, 0x66, 0xd8, 0x8f, 0x2c, 0xbb, 0xf6, 0xd2, 0x44, 0xa9, 0xcd, 0xe6, + 0x38, 0x09, 0x4c, 0x09, 0x4e, 0xea, 0x77, 0x71, 0xd4, 0x44, 0xa9, 0xcd, 0xe2, 0x4b, 0xd4, 0x90, + 0x10, 0x31, 0x39, 0x8c, 0x79, 0x44, 0xf6, 0xcd, 0xd8, 0x9e, 0x2e, 0x8f, 0x2d, 0x5f, 0xec, 0xf2, + 0x99, 0x29, 0xbc, 0x07, 0x09, 0x3c, 0x5c, 0x7a, 0x09, 0x3a, 0x67, 0xd3, 0x52, 0x08, 0xbf, 0x41, + 0x75, 0x09, 0xe3, 0x7c, 0xd1, 0x48, 0x63, 0x77, 0xcd, 0x66, 0x36, 0x6b, 0xd7, 0x69, 0xc1, 0xa3, + 0x73, 0x01, 0xdc, 0x41, 0x7b, 0x5c, 0x68, 0x20, 0xc8, 0xf4, 0xf1, 0xc0, 0xfa, 0xee, 0xfd, 0x20, + 0x34, 0x50, 0x93, 0xc9, 0x11, 0xfa, 0x3a, 0x05, 0xd2, 0xbc, 0x8b, 0xb8, 0xbc, 0x4e, 0x81, 0x9a, + 0x0c, 0x06, 0xd4, 0x1a, 0x42, 0x2a, 0x21, 0xcc, 0x15, 0x2f, 0xc4, 0x44, 0x86, 0x40, 0x1e, 0x98, + 0xc2, 0xda, 0x9b, 0x0a, 0x2b, 0x96, 0xc3, 0xc0, 0x02, 0x62, 0xe5, 0x5a, 0xfd, 0x15, 0x01, 0xba, + 0x26, 0x89, 0x7f, 0x73, 0x10, 0x29, 0x83, 0xdf, 0xc5, 0x52, 0x99, 0xc5, 0x54, 0x9a, 0x25, 0x29, + 0x79, 0x68, 0xfc, 0xbe, 0xd8, 0x6d, 0xe5, 0xcd, 0xb6, 0x77, 0xac, 0x35, 0xe9, 0x6f, 0xd1, 0xa4, + 0x5b, 0xdd, 0xf0, 0xaf, 0x0e, 0x3a, 0x2a, 0x93, 0x67, 0x6c, 0xb9, 0x92, 0x47, 0xff, 0xb9, 0x92, + 0xb6, 0xad, 0xe4, 0xa8, 0xbf, 0x59, 0x92, 0x6e, 0xf3, 0xc2, 0x2f, 0xd1, 0x41, 0x99, 0x7a, 0x25, + 0x26, 0x5c, 0x93, 0x83, 0x8e, 0xd3, 0xab, 0x06, 0x47, 0x56, 0xf2, 0xa0, 0x7f, 0x37, 0x4d, 0x57, + 0xf1, 0xdd, 0x3f, 0x1d, 0x54, 0xfc, 0xdf, 0xcf, 0x62, 0xa5, 0xf1, 0x4f, 0x6b, 0x87, 0xca, 0xdb, + 0xad, 0x91, 0x9c, 0x6d, 0xce, 0x54, 0xcb, 0x3a, 0xef, 0xcf, 0x23, 0x4b, 0x47, 0xea, 0x14, 0x55, + 0x63, 0x0d, 0x89, 0x22, 0xf7, 0x3a, 0xf7, 0x7b, 0xcd, 0x17, 0x9d, 0x4f, 0x5d, 0x90, 0xe0, 0xa1, + 0x15, 0xab, 0xbe, 0xce, 0x69, 0xb4, 0x60, 0x77, 0x33, 0x07, 0x35, 0x97, 0x2e, 0x0c, 0x7e, 0x8a, + 0xaa, 0xa1, 0xe9, 0xdd, 0x31, 0xbd, 0x2f, 0x48, 0x45, 0xc7, 0x45, 0x0e, 0x4f, 0x50, 0x6b, 0xcc, + 0x94, 0x7e, 0x3b, 0x50, 0x20, 0xa7, 0x30, 0xfc, 0x3f, 0x77, 0x72, 0xb1, 0xb4, 0x67, 0x2b, 0x82, + 0x74, 0xcd, 0x02, 0x7f, 0x83, 0xaa, 0x4a, 0x33, 0x0d, 0xe6, 0x68, 0x36, 0x82, 0xcf, 0xe7, 0xb5, + 0x5d, 0xe4, 0xc1, 0x7f, 0x66, 0xed, 0xd6, 0x52, 0x23, 0x26, 0x46, 0x0b, 0x7c, 0xf0, 0xfc, 0xe6, + 0xd6, 0xad, 0x7c, 0xb8, 0x75, 0x2b, 0x1f, 0x6f, 0xdd, 0xca, 0x2f, 0x99, 0xeb, 0xdc, 0x64, 0xae, + 0xf3, 0x21, 0x73, 0x9d, 0x8f, 0x99, 0xeb, 0xfc, 0x95, 0xb9, 0xce, 0xef, 0x7f, 0xbb, 0x95, 0x1f, + 0xeb, 0x76, 0x5e, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x25, 0x9b, 0x14, 0x4d, 0xbd, 0x07, 0x00, + 0x00, +} + func (m *Event) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -85,112 +204,139 @@ func (m *Event) Marshal() (dAtA []byte, err error) { } func (m *Event) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Event) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.EventTime.Size())) - n2, err := m.EventTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i = encodeVarintGenerated(dAtA, i, uint64(m.DeprecatedCount)) + i-- + dAtA[i] = 0x78 + { + size, err := m.DeprecatedLastTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 - if m.Series != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Series.Size())) - n3, err := m.Series.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x72 + { + size, err := m.DeprecatedFirstTimestamp.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n3 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingController))) - i += copy(dAtA[i:], m.ReportingController) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingInstance))) - i += copy(dAtA[i:], m.ReportingInstance) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Action))) - i += copy(dAtA[i:], m.Action) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Regarding.Size())) - n4, err := m.Regarding.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + dAtA[i] = 0x6a + { + size, err := m.DeprecatedSource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 + i-- + dAtA[i] = 0x62 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x5a + i -= len(m.Note) + copy(dAtA[i:], m.Note) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Note))) + i-- + dAtA[i] = 0x52 if m.Related != nil { + { + size, err := m.Related.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Related.Size())) - n5, err := m.Related.MarshalTo(dAtA[i:]) + } + { + size, err := m.Regarding.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n5 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Note))) - i += copy(dAtA[i:], m.Note) - dAtA[i] = 0x5a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x62 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DeprecatedSource.Size())) - n6, err := m.DeprecatedSource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + dAtA[i] = 0x42 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x3a + i -= len(m.Action) + copy(dAtA[i:], m.Action) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Action))) + i-- + dAtA[i] = 0x32 + i -= len(m.ReportingInstance) + copy(dAtA[i:], m.ReportingInstance) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingInstance))) + i-- + dAtA[i] = 0x2a + i -= len(m.ReportingController) + copy(dAtA[i:], m.ReportingController) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingController))) + i-- + dAtA[i] = 0x22 + if m.Series != nil { + { + size, err := m.Series.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - i += n6 - dAtA[i] = 0x6a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DeprecatedFirstTimestamp.Size())) - n7, err := m.DeprecatedFirstTimestamp.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.EventTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n7 - dAtA[i] = 0x72 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DeprecatedLastTimestamp.Size())) - n8, err := m.DeprecatedLastTimestamp.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 - dAtA[i] = 0x78 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DeprecatedCount)) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *EventList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -198,37 +344,46 @@ func (m *EventList) Marshal() (dAtA []byte, err error) { } func (m *EventList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n9, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *EventSeries) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -236,38 +391,51 @@ func (m *EventSeries) Marshal() (dAtA []byte, err error) { } func (m *EventSeries) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastObservedTime.Size())) - n10, err := m.LastObservedTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - dAtA[i] = 0x1a - i++ + i -= len(m.State) + copy(dAtA[i:], m.State) i = encodeVarintGenerated(dAtA, i, uint64(len(m.State))) - i += copy(dAtA[i:], m.State) - return i, nil + i-- + dAtA[i] = 0x1a + { + size, err := m.LastObservedTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *Event) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -307,6 +475,9 @@ func (m *Event) Size() (n int) { } func (m *EventList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -321,6 +492,9 @@ func (m *EventList) Size() (n int) { } func (m *EventSeries) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Count)) @@ -332,14 +506,7 @@ func (m *EventSeries) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -349,20 +516,20 @@ func (this *Event) String() string { return "nil" } s := strings.Join([]string{`&Event{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `EventTime:` + strings.Replace(strings.Replace(this.EventTime.String(), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1), `&`, ``, 1) + `,`, - `Series:` + strings.Replace(fmt.Sprintf("%v", this.Series), "EventSeries", "EventSeries", 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `EventTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EventTime), "MicroTime", "v1.MicroTime", 1), `&`, ``, 1) + `,`, + `Series:` + strings.Replace(this.Series.String(), "EventSeries", "EventSeries", 1) + `,`, `ReportingController:` + fmt.Sprintf("%v", this.ReportingController) + `,`, `ReportingInstance:` + fmt.Sprintf("%v", this.ReportingInstance) + `,`, `Action:` + fmt.Sprintf("%v", this.Action) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Regarding:` + strings.Replace(strings.Replace(this.Regarding.String(), "ObjectReference", "k8s_io_api_core_v1.ObjectReference", 1), `&`, ``, 1) + `,`, - `Related:` + strings.Replace(fmt.Sprintf("%v", this.Related), "ObjectReference", "k8s_io_api_core_v1.ObjectReference", 1) + `,`, + `Regarding:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Regarding), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`, + `Related:` + strings.Replace(fmt.Sprintf("%v", this.Related), "ObjectReference", "v11.ObjectReference", 1) + `,`, `Note:` + fmt.Sprintf("%v", this.Note) + `,`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `DeprecatedSource:` + strings.Replace(strings.Replace(this.DeprecatedSource.String(), "EventSource", "k8s_io_api_core_v1.EventSource", 1), `&`, ``, 1) + `,`, - `DeprecatedFirstTimestamp:` + strings.Replace(strings.Replace(this.DeprecatedFirstTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `DeprecatedLastTimestamp:` + strings.Replace(strings.Replace(this.DeprecatedLastTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `DeprecatedSource:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.DeprecatedSource), "EventSource", "v11.EventSource", 1), `&`, ``, 1) + `,`, + `DeprecatedFirstTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.DeprecatedFirstTimestamp), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `DeprecatedLastTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.DeprecatedLastTimestamp), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `DeprecatedCount:` + fmt.Sprintf("%v", this.DeprecatedCount) + `,`, `}`, }, "") @@ -372,9 +539,14 @@ func (this *EventList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]Event{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Event", "Event", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&EventList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Event", "Event", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -385,7 +557,7 @@ func (this *EventSeries) String() string { } s := strings.Join([]string{`&EventSeries{`, `Count:` + fmt.Sprintf("%v", this.Count) + `,`, - `LastObservedTime:` + strings.Replace(strings.Replace(this.LastObservedTime.String(), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1), `&`, ``, 1) + `,`, + `LastObservedTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastObservedTime), "MicroTime", "v1.MicroTime", 1), `&`, ``, 1) + `,`, `State:` + fmt.Sprintf("%v", this.State) + `,`, `}`, }, "") @@ -414,7 +586,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -442,7 +614,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -451,6 +623,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -472,7 +647,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -481,6 +656,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -502,7 +680,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -511,6 +689,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -535,7 +716,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -545,6 +726,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -564,7 +748,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -574,6 +758,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -593,7 +780,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -603,6 +790,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -622,7 +812,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -632,6 +822,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -651,7 +844,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -660,6 +853,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -681,7 +877,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -690,11 +886,14 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Related == nil { - m.Related = &k8s_io_api_core_v1.ObjectReference{} + m.Related = &v11.ObjectReference{} } if err := m.Related.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -714,7 +913,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -724,6 +923,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -743,7 +945,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -753,6 +955,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -772,7 +977,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -781,6 +986,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -802,7 +1010,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -811,6 +1019,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -832,7 +1043,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -841,6 +1052,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -862,7 +1076,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.DeprecatedCount |= (int32(b) & 0x7F) << shift + m.DeprecatedCount |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -876,6 +1090,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -903,7 +1120,7 @@ func (m *EventList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -931,7 +1148,7 @@ func (m *EventList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -940,6 +1157,9 @@ func (m *EventList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -961,7 +1181,7 @@ func (m *EventList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -970,6 +1190,9 @@ func (m *EventList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -987,6 +1210,9 @@ func (m *EventList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1014,7 +1240,7 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1042,7 +1268,7 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Count |= (int32(b) & 0x7F) << shift + m.Count |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -1061,7 +1287,7 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1070,6 +1296,9 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1091,7 +1320,7 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1101,6 +1330,9 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1115,6 +1347,9 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1181,10 +1416,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -1213,6 +1451,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -1231,62 +1472,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/events/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 801 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0xcd, 0x6e, 0xdb, 0x46, - 0x10, 0x16, 0x13, 0x4b, 0xb2, 0x56, 0x49, 0x2c, 0x6f, 0x0e, 0xde, 0xb8, 0x00, 0xa5, 0x2a, 0x40, - 0x20, 0x14, 0x08, 0x59, 0x07, 0x45, 0xdb, 0x6b, 0x18, 0xb9, 0x45, 0x02, 0xbb, 0x01, 0xd6, 0x3e, - 0x15, 0x3d, 0x64, 0x45, 0x4d, 0x68, 0x56, 0xe2, 0x2e, 0xb1, 0xbb, 0x12, 0xe0, 0x5b, 0x2f, 0x05, - 0x7a, 0xec, 0x33, 0xf4, 0x09, 0xfa, 0x18, 0x3e, 0xe6, 0x98, 0x93, 0x50, 0xb3, 0x6f, 0xd1, 0x53, - 0xc1, 0xe5, 0x4a, 0x94, 0xf5, 0x83, 0xa8, 0xe8, 0x4d, 0x9c, 0xf9, 0x7e, 0x66, 0x66, 0x47, 0x83, - 0x82, 0xd1, 0xb7, 0xca, 0x8b, 0x85, 0x3f, 0x9a, 0x0c, 0x40, 0x72, 0xd0, 0xa0, 0xfc, 0x29, 0xf0, - 0xa1, 0x90, 0xbe, 0x4d, 0xb0, 0x34, 0xf6, 0x61, 0x0a, 0x5c, 0x2b, 0x7f, 0x7a, 0x32, 0x00, 0xcd, - 0x4e, 0xfc, 0x08, 0x38, 0x48, 0xa6, 0x61, 0xe8, 0xa5, 0x52, 0x68, 0x81, 0x9f, 0x14, 0x50, 0x8f, - 0xa5, 0xb1, 0x57, 0x40, 0x3d, 0x0b, 0x3d, 0x7e, 0x1e, 0xc5, 0xfa, 0x6a, 0x32, 0xf0, 0x42, 0x91, - 0xf8, 0x91, 0x88, 0x84, 0x6f, 0x18, 0x83, 0xc9, 0x7b, 0xf3, 0x65, 0x3e, 0xcc, 0xaf, 0x42, 0xe9, - 0xb8, 0xbb, 0x64, 0x1a, 0x0a, 0x09, 0xfe, 0x74, 0xcd, 0xed, 0xf8, 0xab, 0x12, 0x93, 0xb0, 0xf0, - 0x2a, 0xe6, 0x20, 0xaf, 0xfd, 0x74, 0x14, 0xe5, 0x01, 0xe5, 0x27, 0xa0, 0xd9, 0x26, 0x96, 0xbf, - 0x8d, 0x25, 0x27, 0x5c, 0xc7, 0x09, 0xac, 0x11, 0xbe, 0xfe, 0x14, 0x41, 0x85, 0x57, 0x90, 0xb0, - 0x55, 0x5e, 0xf7, 0x8f, 0x06, 0xaa, 0x9e, 0xe6, 0x43, 0xc0, 0xef, 0xd0, 0x7e, 0x5e, 0xcd, 0x90, - 0x69, 0x46, 0x9c, 0x8e, 0xd3, 0x6b, 0xbe, 0xf8, 0xd2, 0x2b, 0x27, 0xb5, 0x10, 0xf5, 0xd2, 0x51, - 0x94, 0x07, 0x94, 0x97, 0xa3, 0xbd, 0xe9, 0x89, 0xf7, 0x76, 0xf0, 0x33, 0x84, 0xfa, 0x1c, 0x34, - 0x0b, 0xf0, 0xcd, 0xac, 0x5d, 0xc9, 0x66, 0x6d, 0x54, 0xc6, 0xe8, 0x42, 0x15, 0xbf, 0x43, 0x0d, - 0x33, 0xef, 0xcb, 0x38, 0x01, 0x72, 0xcf, 0x58, 0xf8, 0xbb, 0x59, 0x9c, 0xc7, 0xa1, 0x14, 0x39, - 0x2d, 0x38, 0xb4, 0x0e, 0x8d, 0xd3, 0xb9, 0x12, 0x2d, 0x45, 0xf1, 0x1b, 0x54, 0x53, 0x20, 0x63, - 0x50, 0xe4, 0xbe, 0x91, 0x7f, 0xe6, 0x6d, 0x7d, 0x6b, 0xcf, 0x08, 0x5c, 0x18, 0x74, 0x80, 0xb2, - 0x59, 0xbb, 0x56, 0xfc, 0xa6, 0x56, 0x01, 0x9f, 0xa3, 0xc7, 0x12, 0x52, 0x21, 0x75, 0xcc, 0xa3, - 0x57, 0x82, 0x6b, 0x29, 0xc6, 0x63, 0x90, 0x64, 0xaf, 0xe3, 0xf4, 0x1a, 0xc1, 0x67, 0xb6, 0x8c, - 0xc7, 0x74, 0x1d, 0x42, 0x37, 0xf1, 0xf0, 0xf7, 0xe8, 0x70, 0x11, 0x7e, 0xcd, 0x95, 0x66, 0x3c, - 0x04, 0x52, 0x35, 0x62, 0x4f, 0xac, 0xd8, 0x21, 0x5d, 0x05, 0xd0, 0x75, 0x0e, 0x7e, 0x86, 0x6a, - 0x2c, 0xd4, 0xb1, 0xe0, 0xa4, 0x66, 0xd8, 0x8f, 0x2c, 0xbb, 0xf6, 0xd2, 0x44, 0xa9, 0xcd, 0xe6, - 0x38, 0x09, 0x4c, 0x09, 0x4e, 0xea, 0x77, 0x71, 0xd4, 0x44, 0xa9, 0xcd, 0xe2, 0x4b, 0xd4, 0x90, - 0x10, 0x31, 0x39, 0x8c, 0x79, 0x44, 0xf6, 0xcd, 0xd8, 0x9e, 0x2e, 0x8f, 0x2d, 0x5f, 0xec, 0xf2, - 0x99, 0x29, 0xbc, 0x07, 0x09, 0x3c, 0x5c, 0x7a, 0x09, 0x3a, 0x67, 0xd3, 0x52, 0x08, 0xbf, 0x41, - 0x75, 0x09, 0xe3, 0x7c, 0xd1, 0x48, 0x63, 0x77, 0xcd, 0x66, 0x36, 0x6b, 0xd7, 0x69, 0xc1, 0xa3, - 0x73, 0x01, 0xdc, 0x41, 0x7b, 0x5c, 0x68, 0x20, 0xc8, 0xf4, 0xf1, 0xc0, 0xfa, 0xee, 0xfd, 0x20, - 0x34, 0x50, 0x93, 0xc9, 0x11, 0xfa, 0x3a, 0x05, 0xd2, 0xbc, 0x8b, 0xb8, 0xbc, 0x4e, 0x81, 0x9a, - 0x0c, 0x06, 0xd4, 0x1a, 0x42, 0x2a, 0x21, 0xcc, 0x15, 0x2f, 0xc4, 0x44, 0x86, 0x40, 0x1e, 0x98, - 0xc2, 0xda, 0x9b, 0x0a, 0x2b, 0x96, 0xc3, 0xc0, 0x02, 0x62, 0xe5, 0x5a, 0xfd, 0x15, 0x01, 0xba, - 0x26, 0x89, 0x7f, 0x73, 0x10, 0x29, 0x83, 0xdf, 0xc5, 0x52, 0x99, 0xc5, 0x54, 0x9a, 0x25, 0x29, - 0x79, 0x68, 0xfc, 0xbe, 0xd8, 0x6d, 0xe5, 0xcd, 0xb6, 0x77, 0xac, 0x35, 0xe9, 0x6f, 0xd1, 0xa4, - 0x5b, 0xdd, 0xf0, 0xaf, 0x0e, 0x3a, 0x2a, 0x93, 0x67, 0x6c, 0xb9, 0x92, 0x47, 0xff, 0xb9, 0x92, - 0xb6, 0xad, 0xe4, 0xa8, 0xbf, 0x59, 0x92, 0x6e, 0xf3, 0xc2, 0x2f, 0xd1, 0x41, 0x99, 0x7a, 0x25, - 0x26, 0x5c, 0x93, 0x83, 0x8e, 0xd3, 0xab, 0x06, 0x47, 0x56, 0xf2, 0xa0, 0x7f, 0x37, 0x4d, 0x57, - 0xf1, 0xdd, 0x3f, 0x1d, 0x54, 0xfc, 0xdf, 0xcf, 0x62, 0xa5, 0xf1, 0x4f, 0x6b, 0x87, 0xca, 0xdb, - 0xad, 0x91, 0x9c, 0x6d, 0xce, 0x54, 0xcb, 0x3a, 0xef, 0xcf, 0x23, 0x4b, 0x47, 0xea, 0x14, 0x55, - 0x63, 0x0d, 0x89, 0x22, 0xf7, 0x3a, 0xf7, 0x7b, 0xcd, 0x17, 0x9d, 0x4f, 0x5d, 0x90, 0xe0, 0xa1, - 0x15, 0xab, 0xbe, 0xce, 0x69, 0xb4, 0x60, 0x77, 0x33, 0x07, 0x35, 0x97, 0x2e, 0x0c, 0x7e, 0x8a, - 0xaa, 0xa1, 0xe9, 0xdd, 0x31, 0xbd, 0x2f, 0x48, 0x45, 0xc7, 0x45, 0x0e, 0x4f, 0x50, 0x6b, 0xcc, - 0x94, 0x7e, 0x3b, 0x50, 0x20, 0xa7, 0x30, 0xfc, 0x3f, 0x77, 0x72, 0xb1, 0xb4, 0x67, 0x2b, 0x82, - 0x74, 0xcd, 0x02, 0x7f, 0x83, 0xaa, 0x4a, 0x33, 0x0d, 0xe6, 0x68, 0x36, 0x82, 0xcf, 0xe7, 0xb5, - 0x5d, 0xe4, 0xc1, 0x7f, 0x66, 0xed, 0xd6, 0x52, 0x23, 0x26, 0x46, 0x0b, 0x7c, 0xf0, 0xfc, 0xe6, - 0xd6, 0xad, 0x7c, 0xb8, 0x75, 0x2b, 0x1f, 0x6f, 0xdd, 0xca, 0x2f, 0x99, 0xeb, 0xdc, 0x64, 0xae, - 0xf3, 0x21, 0x73, 0x9d, 0x8f, 0x99, 0xeb, 0xfc, 0x95, 0xb9, 0xce, 0xef, 0x7f, 0xbb, 0x95, 0x1f, - 0xeb, 0x76, 0x5e, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x25, 0x9b, 0x14, 0x4d, 0xbd, 0x07, 0x00, - 0x00, -} diff --git a/vendor/k8s.io/api/events/v1beta1/generated.proto b/vendor/k8s.io/api/events/v1beta1/generated.proto index 04eacbb280c70..58f5aa422f638 100644 --- a/vendor/k8s.io/api/events/v1beta1/generated.proto +++ b/vendor/k8s.io/api/events/v1beta1/generated.proto @@ -98,7 +98,7 @@ message Event { // EventList is a list of Event objects. message EventList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; diff --git a/vendor/k8s.io/api/events/v1beta1/types.go b/vendor/k8s.io/api/events/v1beta1/types.go index eef45645323a2..0571fbb2e826c 100644 --- a/vendor/k8s.io/api/events/v1beta1/types.go +++ b/vendor/k8s.io/api/events/v1beta1/types.go @@ -43,11 +43,11 @@ type Event struct { // ID of the controller instance, e.g. `kubelet-xyzf`. // +optional - ReportingInstance string `json:"reportingInstance,omitemtpy" protobuf:"bytes,5,opt,name=reportingInstance"` + ReportingInstance string `json:"reportingInstance,omitempty" protobuf:"bytes,5,opt,name=reportingInstance"` // What action was taken/failed regarding to the regarding object. // +optional - Action string `json:"action,omitemtpy" protobuf:"bytes,6,name=action"` + Action string `json:"action,omitempty" protobuf:"bytes,6,name=action"` // Why the action was taken. Reason string `json:"reason,omitempty" protobuf:"bytes,7,name=reason"` @@ -114,7 +114,7 @@ const ( type EventList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go index bbc91ed9b3a70..639daca6daef1 100644 --- a/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go @@ -51,7 +51,7 @@ func (Event) SwaggerDoc() map[string]string { var map_EventList = map[string]string{ "": "EventList is a list of Event objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is a list of schema objects.", } diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go b/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go index ed7a9e6a88029..65b47eabd57e0 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go @@ -17,93 +17,25 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/extensions/v1beta1/generated.proto -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/extensions/v1beta1/generated.proto - - It has these top-level messages: - AllowedCSIDriver - AllowedFlexVolume - AllowedHostPath - DaemonSet - DaemonSetCondition - DaemonSetList - DaemonSetSpec - DaemonSetStatus - DaemonSetUpdateStrategy - Deployment - DeploymentCondition - DeploymentList - DeploymentRollback - DeploymentSpec - DeploymentStatus - DeploymentStrategy - FSGroupStrategyOptions - HTTPIngressPath - HTTPIngressRuleValue - HostPortRange - IDRange - IPBlock - Ingress - IngressBackend - IngressList - IngressRule - IngressRuleValue - IngressSpec - IngressStatus - IngressTLS - NetworkPolicy - NetworkPolicyEgressRule - NetworkPolicyIngressRule - NetworkPolicyList - NetworkPolicyPeer - NetworkPolicyPort - NetworkPolicySpec - PodSecurityPolicy - PodSecurityPolicyList - PodSecurityPolicySpec - ReplicaSet - ReplicaSetCondition - ReplicaSetList - ReplicaSetSpec - ReplicaSetStatus - ReplicationControllerDummy - RollbackConfig - RollingUpdateDaemonSet - RollingUpdateDeployment - RunAsGroupStrategyOptions - RunAsUserStrategyOptions - RuntimeClassStrategyOptions - SELinuxStrategyOptions - Scale - ScaleSpec - ScaleStatus - SupplementalGroupsStrategyOptions -*/ package v1beta1 import ( fmt "fmt" - proto "github.com/gogo/protobuf/proto" - - math "math" - - k8s_io_api_core_v1 "k8s.io/api/core/v1" - - k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" + io "io" + proto "github.com/gogo/protobuf/proto" github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - strings "strings" - + math "math" + math_bits "math/bits" reflect "reflect" + strings "strings" - io "io" + intstr "k8s.io/apimachinery/pkg/util/intstr" ) // Reference imports to suppress errors if they are not otherwise used. @@ -117,250 +49,1602 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *AllowedCSIDriver) Reset() { *m = AllowedCSIDriver{} } -func (*AllowedCSIDriver) ProtoMessage() {} -func (*AllowedCSIDriver) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *AllowedCSIDriver) Reset() { *m = AllowedCSIDriver{} } +func (*AllowedCSIDriver) ProtoMessage() {} +func (*AllowedCSIDriver) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{0} +} +func (m *AllowedCSIDriver) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AllowedCSIDriver) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AllowedCSIDriver) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllowedCSIDriver.Merge(m, src) +} +func (m *AllowedCSIDriver) XXX_Size() int { + return m.Size() +} +func (m *AllowedCSIDriver) XXX_DiscardUnknown() { + xxx_messageInfo_AllowedCSIDriver.DiscardUnknown(m) +} + +var xxx_messageInfo_AllowedCSIDriver proto.InternalMessageInfo + +func (m *AllowedFlexVolume) Reset() { *m = AllowedFlexVolume{} } +func (*AllowedFlexVolume) ProtoMessage() {} +func (*AllowedFlexVolume) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{1} +} +func (m *AllowedFlexVolume) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AllowedFlexVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AllowedFlexVolume) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllowedFlexVolume.Merge(m, src) +} +func (m *AllowedFlexVolume) XXX_Size() int { + return m.Size() +} +func (m *AllowedFlexVolume) XXX_DiscardUnknown() { + xxx_messageInfo_AllowedFlexVolume.DiscardUnknown(m) +} + +var xxx_messageInfo_AllowedFlexVolume proto.InternalMessageInfo + +func (m *AllowedHostPath) Reset() { *m = AllowedHostPath{} } +func (*AllowedHostPath) ProtoMessage() {} +func (*AllowedHostPath) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{2} +} +func (m *AllowedHostPath) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AllowedHostPath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AllowedHostPath) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllowedHostPath.Merge(m, src) +} +func (m *AllowedHostPath) XXX_Size() int { + return m.Size() +} +func (m *AllowedHostPath) XXX_DiscardUnknown() { + xxx_messageInfo_AllowedHostPath.DiscardUnknown(m) +} + +var xxx_messageInfo_AllowedHostPath proto.InternalMessageInfo + +func (m *DaemonSet) Reset() { *m = DaemonSet{} } +func (*DaemonSet) ProtoMessage() {} +func (*DaemonSet) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{3} +} +func (m *DaemonSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSet.Merge(m, src) +} +func (m *DaemonSet) XXX_Size() int { + return m.Size() +} +func (m *DaemonSet) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSet.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSet proto.InternalMessageInfo + +func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } +func (*DaemonSetCondition) ProtoMessage() {} +func (*DaemonSetCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{4} +} +func (m *DaemonSetCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetCondition.Merge(m, src) +} +func (m *DaemonSetCondition) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetCondition) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSetCondition proto.InternalMessageInfo + +func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } +func (*DaemonSetList) ProtoMessage() {} +func (*DaemonSetList) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{5} +} +func (m *DaemonSetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetList.Merge(m, src) +} +func (m *DaemonSetList) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetList) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetList.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSetList proto.InternalMessageInfo + +func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } +func (*DaemonSetSpec) ProtoMessage() {} +func (*DaemonSetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{6} +} +func (m *DaemonSetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetSpec.Merge(m, src) +} +func (m *DaemonSetSpec) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSetSpec proto.InternalMessageInfo + +func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } +func (*DaemonSetStatus) ProtoMessage() {} +func (*DaemonSetStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{7} +} +func (m *DaemonSetStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetStatus.Merge(m, src) +} +func (m *DaemonSetStatus) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetStatus) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSetStatus proto.InternalMessageInfo + +func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } +func (*DaemonSetUpdateStrategy) ProtoMessage() {} +func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{8} +} +func (m *DaemonSetUpdateStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetUpdateStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetUpdateStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetUpdateStrategy.Merge(m, src) +} +func (m *DaemonSetUpdateStrategy) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetUpdateStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetUpdateStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSetUpdateStrategy proto.InternalMessageInfo + +func (m *Deployment) Reset() { *m = Deployment{} } +func (*Deployment) ProtoMessage() {} +func (*Deployment) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{9} +} +func (m *Deployment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Deployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Deployment) XXX_Merge(src proto.Message) { + xxx_messageInfo_Deployment.Merge(m, src) +} +func (m *Deployment) XXX_Size() int { + return m.Size() +} +func (m *Deployment) XXX_DiscardUnknown() { + xxx_messageInfo_Deployment.DiscardUnknown(m) +} + +var xxx_messageInfo_Deployment proto.InternalMessageInfo + +func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } +func (*DeploymentCondition) ProtoMessage() {} +func (*DeploymentCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{10} +} +func (m *DeploymentCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentCondition.Merge(m, src) +} +func (m *DeploymentCondition) XXX_Size() int { + return m.Size() +} +func (m *DeploymentCondition) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentCondition proto.InternalMessageInfo + +func (m *DeploymentList) Reset() { *m = DeploymentList{} } +func (*DeploymentList) ProtoMessage() {} +func (*DeploymentList) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{11} +} +func (m *DeploymentList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentList) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentList.Merge(m, src) +} +func (m *DeploymentList) XXX_Size() int { + return m.Size() +} +func (m *DeploymentList) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentList.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentList proto.InternalMessageInfo + +func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} } +func (*DeploymentRollback) ProtoMessage() {} +func (*DeploymentRollback) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{12} +} +func (m *DeploymentRollback) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentRollback) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentRollback) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentRollback.Merge(m, src) +} +func (m *DeploymentRollback) XXX_Size() int { + return m.Size() +} +func (m *DeploymentRollback) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentRollback.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentRollback proto.InternalMessageInfo + +func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } +func (*DeploymentSpec) ProtoMessage() {} +func (*DeploymentSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{13} +} +func (m *DeploymentSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentSpec.Merge(m, src) +} +func (m *DeploymentSpec) XXX_Size() int { + return m.Size() +} +func (m *DeploymentSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentSpec proto.InternalMessageInfo + +func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } +func (*DeploymentStatus) ProtoMessage() {} +func (*DeploymentStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{14} +} +func (m *DeploymentStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentStatus.Merge(m, src) +} +func (m *DeploymentStatus) XXX_Size() int { + return m.Size() +} +func (m *DeploymentStatus) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentStatus proto.InternalMessageInfo + +func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } +func (*DeploymentStrategy) ProtoMessage() {} +func (*DeploymentStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{15} +} +func (m *DeploymentStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentStrategy.Merge(m, src) +} +func (m *DeploymentStrategy) XXX_Size() int { + return m.Size() +} +func (m *DeploymentStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentStrategy proto.InternalMessageInfo + +func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} } +func (*FSGroupStrategyOptions) ProtoMessage() {} +func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{16} +} +func (m *FSGroupStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FSGroupStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FSGroupStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FSGroupStrategyOptions.Merge(m, src) +} +func (m *FSGroupStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *FSGroupStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FSGroupStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FSGroupStrategyOptions proto.InternalMessageInfo + +func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} } +func (*HTTPIngressPath) ProtoMessage() {} +func (*HTTPIngressPath) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{17} +} +func (m *HTTPIngressPath) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HTTPIngressPath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HTTPIngressPath) XXX_Merge(src proto.Message) { + xxx_messageInfo_HTTPIngressPath.Merge(m, src) +} +func (m *HTTPIngressPath) XXX_Size() int { + return m.Size() +} +func (m *HTTPIngressPath) XXX_DiscardUnknown() { + xxx_messageInfo_HTTPIngressPath.DiscardUnknown(m) +} + +var xxx_messageInfo_HTTPIngressPath proto.InternalMessageInfo + +func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} } +func (*HTTPIngressRuleValue) ProtoMessage() {} +func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{18} +} +func (m *HTTPIngressRuleValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HTTPIngressRuleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HTTPIngressRuleValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_HTTPIngressRuleValue.Merge(m, src) +} +func (m *HTTPIngressRuleValue) XXX_Size() int { + return m.Size() +} +func (m *HTTPIngressRuleValue) XXX_DiscardUnknown() { + xxx_messageInfo_HTTPIngressRuleValue.DiscardUnknown(m) +} + +var xxx_messageInfo_HTTPIngressRuleValue proto.InternalMessageInfo + +func (m *HostPortRange) Reset() { *m = HostPortRange{} } +func (*HostPortRange) ProtoMessage() {} +func (*HostPortRange) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{19} +} +func (m *HostPortRange) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HostPortRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HostPortRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_HostPortRange.Merge(m, src) +} +func (m *HostPortRange) XXX_Size() int { + return m.Size() +} +func (m *HostPortRange) XXX_DiscardUnknown() { + xxx_messageInfo_HostPortRange.DiscardUnknown(m) +} + +var xxx_messageInfo_HostPortRange proto.InternalMessageInfo + +func (m *IDRange) Reset() { *m = IDRange{} } +func (*IDRange) ProtoMessage() {} +func (*IDRange) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{20} +} +func (m *IDRange) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IDRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IDRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_IDRange.Merge(m, src) +} +func (m *IDRange) XXX_Size() int { + return m.Size() +} +func (m *IDRange) XXX_DiscardUnknown() { + xxx_messageInfo_IDRange.DiscardUnknown(m) +} + +var xxx_messageInfo_IDRange proto.InternalMessageInfo + +func (m *IPBlock) Reset() { *m = IPBlock{} } +func (*IPBlock) ProtoMessage() {} +func (*IPBlock) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{21} +} +func (m *IPBlock) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IPBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IPBlock) XXX_Merge(src proto.Message) { + xxx_messageInfo_IPBlock.Merge(m, src) +} +func (m *IPBlock) XXX_Size() int { + return m.Size() +} +func (m *IPBlock) XXX_DiscardUnknown() { + xxx_messageInfo_IPBlock.DiscardUnknown(m) +} + +var xxx_messageInfo_IPBlock proto.InternalMessageInfo + +func (m *Ingress) Reset() { *m = Ingress{} } +func (*Ingress) ProtoMessage() {} +func (*Ingress) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{22} +} +func (m *Ingress) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Ingress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Ingress) XXX_Merge(src proto.Message) { + xxx_messageInfo_Ingress.Merge(m, src) +} +func (m *Ingress) XXX_Size() int { + return m.Size() +} +func (m *Ingress) XXX_DiscardUnknown() { + xxx_messageInfo_Ingress.DiscardUnknown(m) +} + +var xxx_messageInfo_Ingress proto.InternalMessageInfo + +func (m *IngressBackend) Reset() { *m = IngressBackend{} } +func (*IngressBackend) ProtoMessage() {} +func (*IngressBackend) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{23} +} +func (m *IngressBackend) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressBackend) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressBackend) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressBackend.Merge(m, src) +} +func (m *IngressBackend) XXX_Size() int { + return m.Size() +} +func (m *IngressBackend) XXX_DiscardUnknown() { + xxx_messageInfo_IngressBackend.DiscardUnknown(m) +} -func (m *AllowedFlexVolume) Reset() { *m = AllowedFlexVolume{} } -func (*AllowedFlexVolume) ProtoMessage() {} -func (*AllowedFlexVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_IngressBackend proto.InternalMessageInfo -func (m *AllowedHostPath) Reset() { *m = AllowedHostPath{} } -func (*AllowedHostPath) ProtoMessage() {} -func (*AllowedHostPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *IngressList) Reset() { *m = IngressList{} } +func (*IngressList) ProtoMessage() {} +func (*IngressList) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{24} +} +func (m *IngressList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressList) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressList.Merge(m, src) +} +func (m *IngressList) XXX_Size() int { + return m.Size() +} +func (m *IngressList) XXX_DiscardUnknown() { + xxx_messageInfo_IngressList.DiscardUnknown(m) +} -func (m *DaemonSet) Reset() { *m = DaemonSet{} } -func (*DaemonSet) ProtoMessage() {} -func (*DaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_IngressList proto.InternalMessageInfo -func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } -func (*DaemonSetCondition) ProtoMessage() {} -func (*DaemonSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *IngressRule) Reset() { *m = IngressRule{} } +func (*IngressRule) ProtoMessage() {} +func (*IngressRule) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{25} +} +func (m *IngressRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressRule.Merge(m, src) +} +func (m *IngressRule) XXX_Size() int { + return m.Size() +} +func (m *IngressRule) XXX_DiscardUnknown() { + xxx_messageInfo_IngressRule.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressRule proto.InternalMessageInfo -func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } -func (*DaemonSetList) ProtoMessage() {} -func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} } +func (*IngressRuleValue) ProtoMessage() {} +func (*IngressRuleValue) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{26} +} +func (m *IngressRuleValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressRuleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressRuleValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressRuleValue.Merge(m, src) +} +func (m *IngressRuleValue) XXX_Size() int { + return m.Size() +} +func (m *IngressRuleValue) XXX_DiscardUnknown() { + xxx_messageInfo_IngressRuleValue.DiscardUnknown(m) +} -func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } -func (*DaemonSetSpec) ProtoMessage() {} -func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +var xxx_messageInfo_IngressRuleValue proto.InternalMessageInfo -func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } -func (*DaemonSetStatus) ProtoMessage() {} -func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +func (m *IngressSpec) Reset() { *m = IngressSpec{} } +func (*IngressSpec) ProtoMessage() {} +func (*IngressSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{27} +} +func (m *IngressSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressSpec.Merge(m, src) +} +func (m *IngressSpec) XXX_Size() int { + return m.Size() +} +func (m *IngressSpec) XXX_DiscardUnknown() { + xxx_messageInfo_IngressSpec.DiscardUnknown(m) +} -func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } -func (*DaemonSetUpdateStrategy) ProtoMessage() {} -func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +var xxx_messageInfo_IngressSpec proto.InternalMessageInfo -func (m *Deployment) Reset() { *m = Deployment{} } -func (*Deployment) ProtoMessage() {} -func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (m *IngressStatus) Reset() { *m = IngressStatus{} } +func (*IngressStatus) ProtoMessage() {} +func (*IngressStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{28} +} +func (m *IngressStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressStatus.Merge(m, src) +} +func (m *IngressStatus) XXX_Size() int { + return m.Size() +} +func (m *IngressStatus) XXX_DiscardUnknown() { + xxx_messageInfo_IngressStatus.DiscardUnknown(m) +} -func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } -func (*DeploymentCondition) ProtoMessage() {} -func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +var xxx_messageInfo_IngressStatus proto.InternalMessageInfo -func (m *DeploymentList) Reset() { *m = DeploymentList{} } -func (*DeploymentList) ProtoMessage() {} -func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +func (m *IngressTLS) Reset() { *m = IngressTLS{} } +func (*IngressTLS) ProtoMessage() {} +func (*IngressTLS) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{29} +} +func (m *IngressTLS) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressTLS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressTLS) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressTLS.Merge(m, src) +} +func (m *IngressTLS) XXX_Size() int { + return m.Size() +} +func (m *IngressTLS) XXX_DiscardUnknown() { + xxx_messageInfo_IngressTLS.DiscardUnknown(m) +} -func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} } -func (*DeploymentRollback) ProtoMessage() {} -func (*DeploymentRollback) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +var xxx_messageInfo_IngressTLS proto.InternalMessageInfo -func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } -func (*DeploymentSpec) ProtoMessage() {} -func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} } +func (*NetworkPolicy) ProtoMessage() {} +func (*NetworkPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{30} +} +func (m *NetworkPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicy.Merge(m, src) +} +func (m *NetworkPolicy) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicy.DiscardUnknown(m) +} -func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } -func (*DeploymentStatus) ProtoMessage() {} -func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +var xxx_messageInfo_NetworkPolicy proto.InternalMessageInfo -func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } -func (*DeploymentStrategy) ProtoMessage() {} -func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +func (m *NetworkPolicyEgressRule) Reset() { *m = NetworkPolicyEgressRule{} } +func (*NetworkPolicyEgressRule) ProtoMessage() {} +func (*NetworkPolicyEgressRule) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{31} +} +func (m *NetworkPolicyEgressRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicyEgressRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicyEgressRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyEgressRule.Merge(m, src) +} +func (m *NetworkPolicyEgressRule) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicyEgressRule) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyEgressRule.DiscardUnknown(m) +} -func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} } -func (*FSGroupStrategyOptions) ProtoMessage() {} -func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +var xxx_messageInfo_NetworkPolicyEgressRule proto.InternalMessageInfo -func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} } -func (*HTTPIngressPath) ProtoMessage() {} -func (*HTTPIngressPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +func (m *NetworkPolicyIngressRule) Reset() { *m = NetworkPolicyIngressRule{} } +func (*NetworkPolicyIngressRule) ProtoMessage() {} +func (*NetworkPolicyIngressRule) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{32} +} +func (m *NetworkPolicyIngressRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicyIngressRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicyIngressRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyIngressRule.Merge(m, src) +} +func (m *NetworkPolicyIngressRule) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicyIngressRule) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyIngressRule.DiscardUnknown(m) +} -func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} } -func (*HTTPIngressRuleValue) ProtoMessage() {} -func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +var xxx_messageInfo_NetworkPolicyIngressRule proto.InternalMessageInfo -func (m *HostPortRange) Reset() { *m = HostPortRange{} } -func (*HostPortRange) ProtoMessage() {} -func (*HostPortRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } +func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} } +func (*NetworkPolicyList) ProtoMessage() {} +func (*NetworkPolicyList) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{33} +} +func (m *NetworkPolicyList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicyList) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyList.Merge(m, src) +} +func (m *NetworkPolicyList) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicyList) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyList.DiscardUnknown(m) +} -func (m *IDRange) Reset() { *m = IDRange{} } -func (*IDRange) ProtoMessage() {} -func (*IDRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } +var xxx_messageInfo_NetworkPolicyList proto.InternalMessageInfo -func (m *IPBlock) Reset() { *m = IPBlock{} } -func (*IPBlock) ProtoMessage() {} -func (*IPBlock) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } +func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} } +func (*NetworkPolicyPeer) ProtoMessage() {} +func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{34} +} +func (m *NetworkPolicyPeer) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicyPeer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicyPeer) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyPeer.Merge(m, src) +} +func (m *NetworkPolicyPeer) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicyPeer) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyPeer.DiscardUnknown(m) +} -func (m *Ingress) Reset() { *m = Ingress{} } -func (*Ingress) ProtoMessage() {} -func (*Ingress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } +var xxx_messageInfo_NetworkPolicyPeer proto.InternalMessageInfo -func (m *IngressBackend) Reset() { *m = IngressBackend{} } -func (*IngressBackend) ProtoMessage() {} -func (*IngressBackend) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } +func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} } +func (*NetworkPolicyPort) ProtoMessage() {} +func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{35} +} +func (m *NetworkPolicyPort) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicyPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicyPort) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyPort.Merge(m, src) +} +func (m *NetworkPolicyPort) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicyPort) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyPort.DiscardUnknown(m) +} -func (m *IngressList) Reset() { *m = IngressList{} } -func (*IngressList) ProtoMessage() {} -func (*IngressList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } +var xxx_messageInfo_NetworkPolicyPort proto.InternalMessageInfo -func (m *IngressRule) Reset() { *m = IngressRule{} } -func (*IngressRule) ProtoMessage() {} -func (*IngressRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } +func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} } +func (*NetworkPolicySpec) ProtoMessage() {} +func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{36} +} +func (m *NetworkPolicySpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicySpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicySpec.Merge(m, src) +} +func (m *NetworkPolicySpec) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicySpec) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicySpec.DiscardUnknown(m) +} -func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} } -func (*IngressRuleValue) ProtoMessage() {} -func (*IngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } +var xxx_messageInfo_NetworkPolicySpec proto.InternalMessageInfo -func (m *IngressSpec) Reset() { *m = IngressSpec{} } -func (*IngressSpec) ProtoMessage() {} -func (*IngressSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } +func (m *PodSecurityPolicy) Reset() { *m = PodSecurityPolicy{} } +func (*PodSecurityPolicy) ProtoMessage() {} +func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{37} +} +func (m *PodSecurityPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSecurityPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSecurityPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSecurityPolicy.Merge(m, src) +} +func (m *PodSecurityPolicy) XXX_Size() int { + return m.Size() +} +func (m *PodSecurityPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_PodSecurityPolicy.DiscardUnknown(m) +} -func (m *IngressStatus) Reset() { *m = IngressStatus{} } -func (*IngressStatus) ProtoMessage() {} -func (*IngressStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } +var xxx_messageInfo_PodSecurityPolicy proto.InternalMessageInfo -func (m *IngressTLS) Reset() { *m = IngressTLS{} } -func (*IngressTLS) ProtoMessage() {} -func (*IngressTLS) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } +func (m *PodSecurityPolicyList) Reset() { *m = PodSecurityPolicyList{} } +func (*PodSecurityPolicyList) ProtoMessage() {} +func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{38} +} +func (m *PodSecurityPolicyList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSecurityPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSecurityPolicyList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSecurityPolicyList.Merge(m, src) +} +func (m *PodSecurityPolicyList) XXX_Size() int { + return m.Size() +} +func (m *PodSecurityPolicyList) XXX_DiscardUnknown() { + xxx_messageInfo_PodSecurityPolicyList.DiscardUnknown(m) +} -func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} } -func (*NetworkPolicy) ProtoMessage() {} -func (*NetworkPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } +var xxx_messageInfo_PodSecurityPolicyList proto.InternalMessageInfo -func (m *NetworkPolicyEgressRule) Reset() { *m = NetworkPolicyEgressRule{} } -func (*NetworkPolicyEgressRule) ProtoMessage() {} -func (*NetworkPolicyEgressRule) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{31} +func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPolicySpec{} } +func (*PodSecurityPolicySpec) ProtoMessage() {} +func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{39} } - -func (m *NetworkPolicyIngressRule) Reset() { *m = NetworkPolicyIngressRule{} } -func (*NetworkPolicyIngressRule) ProtoMessage() {} -func (*NetworkPolicyIngressRule) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{32} +func (m *PodSecurityPolicySpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSecurityPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSecurityPolicySpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSecurityPolicySpec.Merge(m, src) +} +func (m *PodSecurityPolicySpec) XXX_Size() int { + return m.Size() +} +func (m *PodSecurityPolicySpec) XXX_DiscardUnknown() { + xxx_messageInfo_PodSecurityPolicySpec.DiscardUnknown(m) } -func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} } -func (*NetworkPolicyList) ProtoMessage() {} -func (*NetworkPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } - -func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} } -func (*NetworkPolicyPeer) ProtoMessage() {} -func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } +var xxx_messageInfo_PodSecurityPolicySpec proto.InternalMessageInfo -func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} } -func (*NetworkPolicyPort) ProtoMessage() {} -func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } +func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } +func (*ReplicaSet) ProtoMessage() {} +func (*ReplicaSet) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{40} +} +func (m *ReplicaSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSet.Merge(m, src) +} +func (m *ReplicaSet) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSet) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSet.DiscardUnknown(m) +} -func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} } -func (*NetworkPolicySpec) ProtoMessage() {} -func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } +var xxx_messageInfo_ReplicaSet proto.InternalMessageInfo -func (m *PodSecurityPolicy) Reset() { *m = PodSecurityPolicy{} } -func (*PodSecurityPolicy) ProtoMessage() {} -func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } +func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } +func (*ReplicaSetCondition) ProtoMessage() {} +func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{41} +} +func (m *ReplicaSetCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetCondition.Merge(m, src) +} +func (m *ReplicaSetCondition) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetCondition) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetCondition.DiscardUnknown(m) +} -func (m *PodSecurityPolicyList) Reset() { *m = PodSecurityPolicyList{} } -func (*PodSecurityPolicyList) ProtoMessage() {} -func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } +var xxx_messageInfo_ReplicaSetCondition proto.InternalMessageInfo -func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPolicySpec{} } -func (*PodSecurityPolicySpec) ProtoMessage() {} -func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } +func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } +func (*ReplicaSetList) ProtoMessage() {} +func (*ReplicaSetList) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{42} +} +func (m *ReplicaSetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetList.Merge(m, src) +} +func (m *ReplicaSetList) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetList) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetList.DiscardUnknown(m) +} -func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } -func (*ReplicaSet) ProtoMessage() {} -func (*ReplicaSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } +var xxx_messageInfo_ReplicaSetList proto.InternalMessageInfo -func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } -func (*ReplicaSetCondition) ProtoMessage() {} -func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } +func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } +func (*ReplicaSetSpec) ProtoMessage() {} +func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{43} +} +func (m *ReplicaSetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetSpec.Merge(m, src) +} +func (m *ReplicaSetSpec) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetSpec.DiscardUnknown(m) +} -func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } -func (*ReplicaSetList) ProtoMessage() {} -func (*ReplicaSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} } +var xxx_messageInfo_ReplicaSetSpec proto.InternalMessageInfo -func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } -func (*ReplicaSetSpec) ProtoMessage() {} -func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} } +func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } +func (*ReplicaSetStatus) ProtoMessage() {} +func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{44} +} +func (m *ReplicaSetStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetStatus.Merge(m, src) +} +func (m *ReplicaSetStatus) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetStatus.DiscardUnknown(m) +} -func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } -func (*ReplicaSetStatus) ProtoMessage() {} -func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{44} } +var xxx_messageInfo_ReplicaSetStatus proto.InternalMessageInfo func (m *ReplicationControllerDummy) Reset() { *m = ReplicationControllerDummy{} } func (*ReplicationControllerDummy) ProtoMessage() {} func (*ReplicationControllerDummy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{45} + return fileDescriptor_cdc93917efc28165, []int{45} +} +func (m *ReplicationControllerDummy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicationControllerDummy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicationControllerDummy) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicationControllerDummy.Merge(m, src) +} +func (m *ReplicationControllerDummy) XXX_Size() int { + return m.Size() +} +func (m *ReplicationControllerDummy) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicationControllerDummy.DiscardUnknown(m) +} + +var xxx_messageInfo_ReplicationControllerDummy proto.InternalMessageInfo + +func (m *RollbackConfig) Reset() { *m = RollbackConfig{} } +func (*RollbackConfig) ProtoMessage() {} +func (*RollbackConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{46} +} +func (m *RollbackConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollbackConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollbackConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollbackConfig.Merge(m, src) +} +func (m *RollbackConfig) XXX_Size() int { + return m.Size() +} +func (m *RollbackConfig) XXX_DiscardUnknown() { + xxx_messageInfo_RollbackConfig.DiscardUnknown(m) } -func (m *RollbackConfig) Reset() { *m = RollbackConfig{} } -func (*RollbackConfig) ProtoMessage() {} -func (*RollbackConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} } +var xxx_messageInfo_RollbackConfig proto.InternalMessageInfo + +func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } +func (*RollingUpdateDaemonSet) ProtoMessage() {} +func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{47} +} +func (m *RollingUpdateDaemonSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollingUpdateDaemonSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollingUpdateDaemonSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollingUpdateDaemonSet.Merge(m, src) +} +func (m *RollingUpdateDaemonSet) XXX_Size() int { + return m.Size() +} +func (m *RollingUpdateDaemonSet) XXX_DiscardUnknown() { + xxx_messageInfo_RollingUpdateDaemonSet.DiscardUnknown(m) +} -func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } -func (*RollingUpdateDaemonSet) ProtoMessage() {} -func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{47} } +var xxx_messageInfo_RollingUpdateDaemonSet proto.InternalMessageInfo func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } func (*RollingUpdateDeployment) ProtoMessage() {} func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{48} + return fileDescriptor_cdc93917efc28165, []int{48} +} +func (m *RollingUpdateDeployment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollingUpdateDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollingUpdateDeployment) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollingUpdateDeployment.Merge(m, src) +} +func (m *RollingUpdateDeployment) XXX_Size() int { + return m.Size() +} +func (m *RollingUpdateDeployment) XXX_DiscardUnknown() { + xxx_messageInfo_RollingUpdateDeployment.DiscardUnknown(m) } +var xxx_messageInfo_RollingUpdateDeployment proto.InternalMessageInfo + func (m *RunAsGroupStrategyOptions) Reset() { *m = RunAsGroupStrategyOptions{} } func (*RunAsGroupStrategyOptions) ProtoMessage() {} func (*RunAsGroupStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{49} + return fileDescriptor_cdc93917efc28165, []int{49} +} +func (m *RunAsGroupStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } +func (m *RunAsGroupStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RunAsGroupStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_RunAsGroupStrategyOptions.Merge(m, src) +} +func (m *RunAsGroupStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *RunAsGroupStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_RunAsGroupStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_RunAsGroupStrategyOptions proto.InternalMessageInfo func (m *RunAsUserStrategyOptions) Reset() { *m = RunAsUserStrategyOptions{} } func (*RunAsUserStrategyOptions) ProtoMessage() {} func (*RunAsUserStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{50} + return fileDescriptor_cdc93917efc28165, []int{50} +} +func (m *RunAsUserStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RunAsUserStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RunAsUserStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_RunAsUserStrategyOptions.Merge(m, src) +} +func (m *RunAsUserStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *RunAsUserStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_RunAsUserStrategyOptions.DiscardUnknown(m) } +var xxx_messageInfo_RunAsUserStrategyOptions proto.InternalMessageInfo + func (m *RuntimeClassStrategyOptions) Reset() { *m = RuntimeClassStrategyOptions{} } func (*RuntimeClassStrategyOptions) ProtoMessage() {} func (*RuntimeClassStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{51} + return fileDescriptor_cdc93917efc28165, []int{51} +} +func (m *RuntimeClassStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuntimeClassStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RuntimeClassStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuntimeClassStrategyOptions.Merge(m, src) +} +func (m *RuntimeClassStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *RuntimeClassStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_RuntimeClassStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_RuntimeClassStrategyOptions proto.InternalMessageInfo + +func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} } +func (*SELinuxStrategyOptions) ProtoMessage() {} +func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{52} +} +func (m *SELinuxStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SELinuxStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SELinuxStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_SELinuxStrategyOptions.Merge(m, src) +} +func (m *SELinuxStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *SELinuxStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_SELinuxStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_SELinuxStrategyOptions proto.InternalMessageInfo + +func (m *Scale) Reset() { *m = Scale{} } +func (*Scale) ProtoMessage() {} +func (*Scale) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{53} +} +func (m *Scale) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } +func (m *Scale) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Scale) XXX_Merge(src proto.Message) { + xxx_messageInfo_Scale.Merge(m, src) +} +func (m *Scale) XXX_Size() int { + return m.Size() +} +func (m *Scale) XXX_DiscardUnknown() { + xxx_messageInfo_Scale.DiscardUnknown(m) +} + +var xxx_messageInfo_Scale proto.InternalMessageInfo -func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} } -func (*SELinuxStrategyOptions) ProtoMessage() {} -func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{52} } +func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } +func (*ScaleSpec) ProtoMessage() {} +func (*ScaleSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{54} +} +func (m *ScaleSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScaleSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ScaleSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScaleSpec.Merge(m, src) +} +func (m *ScaleSpec) XXX_Size() int { + return m.Size() +} +func (m *ScaleSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ScaleSpec.DiscardUnknown(m) +} -func (m *Scale) Reset() { *m = Scale{} } -func (*Scale) ProtoMessage() {} -func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{53} } +var xxx_messageInfo_ScaleSpec proto.InternalMessageInfo -func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } -func (*ScaleSpec) ProtoMessage() {} -func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{54} } +func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } +func (*ScaleStatus) ProtoMessage() {} +func (*ScaleStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{55} +} +func (m *ScaleStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScaleStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ScaleStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScaleStatus.Merge(m, src) +} +func (m *ScaleStatus) XXX_Size() int { + return m.Size() +} +func (m *ScaleStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ScaleStatus.DiscardUnknown(m) +} -func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } -func (*ScaleStatus) ProtoMessage() {} -func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{55} } +var xxx_messageInfo_ScaleStatus proto.InternalMessageInfo func (m *SupplementalGroupsStrategyOptions) Reset() { *m = SupplementalGroupsStrategyOptions{} } func (*SupplementalGroupsStrategyOptions) ProtoMessage() {} func (*SupplementalGroupsStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{56} + return fileDescriptor_cdc93917efc28165, []int{56} +} +func (m *SupplementalGroupsStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SupplementalGroupsStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SupplementalGroupsStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_SupplementalGroupsStrategyOptions.Merge(m, src) +} +func (m *SupplementalGroupsStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *SupplementalGroupsStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_SupplementalGroupsStrategyOptions.DiscardUnknown(m) } +var xxx_messageInfo_SupplementalGroupsStrategyOptions proto.InternalMessageInfo + func init() { proto.RegisterType((*AllowedCSIDriver)(nil), "k8s.io.api.extensions.v1beta1.AllowedCSIDriver") proto.RegisterType((*AllowedFlexVolume)(nil), "k8s.io.api.extensions.v1beta1.AllowedFlexVolume") @@ -375,6 +1659,7 @@ func init() { proto.RegisterType((*DeploymentCondition)(nil), "k8s.io.api.extensions.v1beta1.DeploymentCondition") proto.RegisterType((*DeploymentList)(nil), "k8s.io.api.extensions.v1beta1.DeploymentList") proto.RegisterType((*DeploymentRollback)(nil), "k8s.io.api.extensions.v1beta1.DeploymentRollback") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.extensions.v1beta1.DeploymentRollback.UpdatedAnnotationsEntry") proto.RegisterType((*DeploymentSpec)(nil), "k8s.io.api.extensions.v1beta1.DeploymentSpec") proto.RegisterType((*DeploymentStatus)(nil), "k8s.io.api.extensions.v1beta1.DeploymentStatus") proto.RegisterType((*DeploymentStrategy)(nil), "k8s.io.api.extensions.v1beta1.DeploymentStrategy") @@ -418,12 +1703,253 @@ func init() { proto.RegisterType((*Scale)(nil), "k8s.io.api.extensions.v1beta1.Scale") proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.extensions.v1beta1.ScaleSpec") proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.extensions.v1beta1.ScaleStatus") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.extensions.v1beta1.ScaleStatus.SelectorEntry") proto.RegisterType((*SupplementalGroupsStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.SupplementalGroupsStrategyOptions") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/extensions/v1beta1/generated.proto", fileDescriptor_cdc93917efc28165) +} + +var fileDescriptor_cdc93917efc28165 = []byte{ + // 3684 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0x4f, 0x6c, 0x1b, 0x47, + 0x77, 0xf7, 0x92, 0x94, 0x48, 0x3d, 0xfd, 0x1f, 0xc9, 0x12, 0x3f, 0x3b, 0x16, 0xfd, 0x6d, 0x00, + 0xd7, 0x49, 0x6d, 0x32, 0x76, 0x6c, 0x7f, 0xae, 0x8d, 0x7e, 0x89, 0x28, 0x59, 0xb6, 0x52, 0xfd, + 0x61, 0x86, 0x92, 0x1b, 0x04, 0x4d, 0x9a, 0x15, 0x39, 0xa2, 0xd6, 0x5a, 0xee, 0x6e, 0x76, 0x87, + 0x8a, 0x08, 0xf4, 0xd0, 0x43, 0x51, 0xa0, 0x40, 0x8b, 0xf6, 0x92, 0xb6, 0xc7, 0x06, 0x05, 0x7a, + 0x6a, 0xd1, 0xde, 0xda, 0x43, 0x10, 0xa0, 0x40, 0x0a, 0x18, 0x45, 0x5a, 0xe4, 0xd6, 0x9c, 0x84, + 0x46, 0x39, 0x15, 0x3d, 0xf5, 0x56, 0xf8, 0x50, 0x14, 0x33, 0x3b, 0xfb, 0x7f, 0x57, 0x5c, 0x29, + 0xb6, 0xd0, 0x00, 0xbd, 0x89, 0xf3, 0xde, 0xfb, 0xbd, 0x37, 0x33, 0x6f, 0xde, 0x7b, 0x33, 0xfb, + 0x04, 0x2b, 0xfb, 0xf7, 0xed, 0xaa, 0x6a, 0xd4, 0xf6, 0x7b, 0x3b, 0xc4, 0xd2, 0x09, 0x25, 0x76, + 0xed, 0x80, 0xe8, 0x6d, 0xc3, 0xaa, 0x09, 0x82, 0x62, 0xaa, 0x35, 0x72, 0x48, 0x89, 0x6e, 0xab, + 0x86, 0x6e, 0xd7, 0x0e, 0x6e, 0xed, 0x10, 0xaa, 0xdc, 0xaa, 0x75, 0x88, 0x4e, 0x2c, 0x85, 0x92, + 0x76, 0xd5, 0xb4, 0x0c, 0x6a, 0xa0, 0x2b, 0x0e, 0x7b, 0x55, 0x31, 0xd5, 0xaa, 0xcf, 0x5e, 0x15, + 0xec, 0x97, 0x6e, 0x76, 0x54, 0xba, 0xd7, 0xdb, 0xa9, 0xb6, 0x8c, 0x6e, 0xad, 0x63, 0x74, 0x8c, + 0x1a, 0x97, 0xda, 0xe9, 0xed, 0xf2, 0x5f, 0xfc, 0x07, 0xff, 0xcb, 0x41, 0xbb, 0x24, 0x07, 0x94, + 0xb7, 0x0c, 0x8b, 0xd4, 0x0e, 0x62, 0x1a, 0x2f, 0xdd, 0xf1, 0x79, 0xba, 0x4a, 0x6b, 0x4f, 0xd5, + 0x89, 0xd5, 0xaf, 0x99, 0xfb, 0x1d, 0x36, 0x60, 0xd7, 0xba, 0x84, 0x2a, 0x49, 0x52, 0xb5, 0x34, + 0x29, 0xab, 0xa7, 0x53, 0xb5, 0x4b, 0x62, 0x02, 0xf7, 0x06, 0x09, 0xd8, 0xad, 0x3d, 0xd2, 0x55, + 0x62, 0x72, 0x6f, 0xa7, 0xc9, 0xf5, 0xa8, 0xaa, 0xd5, 0x54, 0x9d, 0xda, 0xd4, 0x8a, 0x0a, 0xc9, + 0x77, 0x60, 0x6a, 0x51, 0xd3, 0x8c, 0xcf, 0x48, 0x7b, 0xa9, 0xb9, 0xba, 0x6c, 0xa9, 0x07, 0xc4, + 0x42, 0x57, 0xa1, 0xa0, 0x2b, 0x5d, 0x52, 0x96, 0xae, 0x4a, 0xd7, 0x47, 0xea, 0x63, 0xcf, 0x8f, + 0x2a, 0x17, 0x8e, 0x8f, 0x2a, 0x85, 0x0d, 0xa5, 0x4b, 0x30, 0xa7, 0xc8, 0x0f, 0x61, 0x5a, 0x48, + 0xad, 0x68, 0xe4, 0xf0, 0xa9, 0xa1, 0xf5, 0xba, 0x04, 0x5d, 0x83, 0xe1, 0x36, 0x07, 0x10, 0x82, + 0x13, 0x42, 0x70, 0xd8, 0x81, 0xc5, 0x82, 0x2a, 0xdb, 0x30, 0x29, 0x84, 0x9f, 0x18, 0x36, 0x6d, + 0x28, 0x74, 0x0f, 0xdd, 0x06, 0x30, 0x15, 0xba, 0xd7, 0xb0, 0xc8, 0xae, 0x7a, 0x28, 0xc4, 0x91, + 0x10, 0x87, 0x86, 0x47, 0xc1, 0x01, 0x2e, 0x74, 0x03, 0x4a, 0x16, 0x51, 0xda, 0x9b, 0xba, 0xd6, + 0x2f, 0xe7, 0xae, 0x4a, 0xd7, 0x4b, 0xf5, 0x29, 0x21, 0x51, 0xc2, 0x62, 0x1c, 0x7b, 0x1c, 0xf2, + 0xe7, 0x39, 0x18, 0x59, 0x56, 0x48, 0xd7, 0xd0, 0x9b, 0x84, 0xa2, 0x4f, 0xa0, 0xc4, 0xb6, 0xab, + 0xad, 0x50, 0x85, 0x6b, 0x1b, 0xbd, 0xfd, 0x56, 0xd5, 0x77, 0x27, 0x6f, 0xf5, 0xaa, 0xe6, 0x7e, + 0x87, 0x0d, 0xd8, 0x55, 0xc6, 0x5d, 0x3d, 0xb8, 0x55, 0xdd, 0xdc, 0x79, 0x46, 0x5a, 0x74, 0x9d, + 0x50, 0xc5, 0xb7, 0xcf, 0x1f, 0xc3, 0x1e, 0x2a, 0xda, 0x80, 0x82, 0x6d, 0x92, 0x16, 0xb7, 0x6c, + 0xf4, 0xf6, 0x8d, 0xea, 0x89, 0xce, 0x5a, 0xf5, 0x2c, 0x6b, 0x9a, 0xa4, 0xe5, 0xaf, 0x38, 0xfb, + 0x85, 0x39, 0x0e, 0x7a, 0x0a, 0xc3, 0x36, 0x55, 0x68, 0xcf, 0x2e, 0xe7, 0x39, 0x62, 0x35, 0x33, + 0x22, 0x97, 0xf2, 0x37, 0xc3, 0xf9, 0x8d, 0x05, 0x9a, 0xfc, 0x1f, 0x39, 0x40, 0x1e, 0xef, 0x92, + 0xa1, 0xb7, 0x55, 0xaa, 0x1a, 0x3a, 0x7a, 0x00, 0x05, 0xda, 0x37, 0x5d, 0x17, 0xb8, 0xe6, 0x1a, + 0xb4, 0xd5, 0x37, 0xc9, 0x8b, 0xa3, 0xca, 0x5c, 0x5c, 0x82, 0x51, 0x30, 0x97, 0x41, 0x6b, 0x9e, + 0xa9, 0x39, 0x2e, 0x7d, 0x27, 0xac, 0xfa, 0xc5, 0x51, 0x25, 0xe1, 0xb0, 0x55, 0x3d, 0xa4, 0xb0, + 0x81, 0xe8, 0x00, 0x90, 0xa6, 0xd8, 0x74, 0xcb, 0x52, 0x74, 0xdb, 0xd1, 0xa4, 0x76, 0x89, 0x58, + 0x84, 0x37, 0xb3, 0x6d, 0x1a, 0x93, 0xa8, 0x5f, 0x12, 0x56, 0xa0, 0xb5, 0x18, 0x1a, 0x4e, 0xd0, + 0xc0, 0xbc, 0xd9, 0x22, 0x8a, 0x6d, 0xe8, 0xe5, 0x42, 0xd8, 0x9b, 0x31, 0x1f, 0xc5, 0x82, 0x8a, + 0xde, 0x80, 0x62, 0x97, 0xd8, 0xb6, 0xd2, 0x21, 0xe5, 0x21, 0xce, 0x38, 0x29, 0x18, 0x8b, 0xeb, + 0xce, 0x30, 0x76, 0xe9, 0xf2, 0x97, 0x12, 0x8c, 0x7b, 0x2b, 0xb7, 0xa6, 0xda, 0x14, 0xfd, 0x56, + 0xcc, 0x0f, 0xab, 0xd9, 0xa6, 0xc4, 0xa4, 0xb9, 0x17, 0x7a, 0x3e, 0xef, 0x8e, 0x04, 0x7c, 0x70, + 0x1d, 0x86, 0x54, 0x4a, 0xba, 0x6c, 0x1f, 0xf2, 0xd7, 0x47, 0x6f, 0x5f, 0xcf, 0xea, 0x32, 0xf5, + 0x71, 0x01, 0x3a, 0xb4, 0xca, 0xc4, 0xb1, 0x83, 0x22, 0xff, 0x69, 0x21, 0x60, 0x3e, 0x73, 0x4d, + 0xf4, 0x11, 0x94, 0x6c, 0xa2, 0x91, 0x16, 0x35, 0x2c, 0x61, 0xfe, 0xdb, 0x19, 0xcd, 0x57, 0x76, + 0x88, 0xd6, 0x14, 0xa2, 0xf5, 0x31, 0x66, 0xbf, 0xfb, 0x0b, 0x7b, 0x90, 0xe8, 0x7d, 0x28, 0x51, + 0xd2, 0x35, 0x35, 0x85, 0x12, 0x71, 0x8e, 0x5e, 0x0f, 0x4e, 0x81, 0x79, 0x0e, 0x03, 0x6b, 0x18, + 0xed, 0x2d, 0xc1, 0xc6, 0x8f, 0x8f, 0xb7, 0x24, 0xee, 0x28, 0xf6, 0x60, 0xd0, 0x01, 0x4c, 0xf4, + 0xcc, 0x36, 0xe3, 0xa4, 0x2c, 0x0a, 0x76, 0xfa, 0xc2, 0x93, 0xee, 0x65, 0x5d, 0x9b, 0xed, 0x90, + 0x74, 0x7d, 0x4e, 0xe8, 0x9a, 0x08, 0x8f, 0xe3, 0x88, 0x16, 0xb4, 0x08, 0x93, 0x5d, 0x55, 0x67, + 0x71, 0xa9, 0xdf, 0x24, 0x2d, 0x43, 0x6f, 0xdb, 0xdc, 0xad, 0x86, 0xea, 0xf3, 0x02, 0x60, 0x72, + 0x3d, 0x4c, 0xc6, 0x51, 0x7e, 0xf4, 0x1e, 0x20, 0x77, 0x1a, 0x8f, 0x9d, 0x20, 0xae, 0x1a, 0x3a, + 0xf7, 0xb9, 0xbc, 0xef, 0xdc, 0x5b, 0x31, 0x0e, 0x9c, 0x20, 0x85, 0xd6, 0x60, 0xd6, 0x22, 0x07, + 0x2a, 0x9b, 0xe3, 0x13, 0xd5, 0xa6, 0x86, 0xd5, 0x5f, 0x53, 0xbb, 0x2a, 0x2d, 0x0f, 0x73, 0x9b, + 0xca, 0xc7, 0x47, 0x95, 0x59, 0x9c, 0x40, 0xc7, 0x89, 0x52, 0xf2, 0x9f, 0x0d, 0xc3, 0x64, 0x24, + 0xde, 0xa0, 0xa7, 0x30, 0xd7, 0xea, 0x59, 0x16, 0xd1, 0xe9, 0x46, 0xaf, 0xbb, 0x43, 0xac, 0x66, + 0x6b, 0x8f, 0xb4, 0x7b, 0x1a, 0x69, 0x73, 0x47, 0x19, 0xaa, 0x2f, 0x08, 0x8b, 0xe7, 0x96, 0x12, + 0xb9, 0x70, 0x8a, 0x34, 0x5b, 0x05, 0x9d, 0x0f, 0xad, 0xab, 0xb6, 0xed, 0x61, 0xe6, 0x38, 0xa6, + 0xb7, 0x0a, 0x1b, 0x31, 0x0e, 0x9c, 0x20, 0xc5, 0x6c, 0x6c, 0x13, 0x5b, 0xb5, 0x48, 0x3b, 0x6a, + 0x63, 0x3e, 0x6c, 0xe3, 0x72, 0x22, 0x17, 0x4e, 0x91, 0x46, 0x77, 0x61, 0xd4, 0xd1, 0xc6, 0xf7, + 0x4f, 0x6c, 0xf4, 0x8c, 0x00, 0x1b, 0xdd, 0xf0, 0x49, 0x38, 0xc8, 0xc7, 0xa6, 0x66, 0xec, 0xd8, + 0xc4, 0x3a, 0x20, 0xed, 0xf4, 0x0d, 0xde, 0x8c, 0x71, 0xe0, 0x04, 0x29, 0x36, 0x35, 0xc7, 0x03, + 0x63, 0x53, 0x1b, 0x0e, 0x4f, 0x6d, 0x3b, 0x91, 0x0b, 0xa7, 0x48, 0x33, 0x3f, 0x76, 0x4c, 0x5e, + 0x3c, 0x50, 0x54, 0x4d, 0xd9, 0xd1, 0x48, 0xb9, 0x18, 0xf6, 0xe3, 0x8d, 0x30, 0x19, 0x47, 0xf9, + 0xd1, 0x63, 0x98, 0x76, 0x86, 0xb6, 0x75, 0xc5, 0x03, 0x29, 0x71, 0x90, 0x9f, 0x09, 0x90, 0xe9, + 0x8d, 0x28, 0x03, 0x8e, 0xcb, 0xa0, 0x07, 0x30, 0xd1, 0x32, 0x34, 0x8d, 0xfb, 0xe3, 0x92, 0xd1, + 0xd3, 0x69, 0x79, 0x84, 0xa3, 0x20, 0x76, 0x1e, 0x97, 0x42, 0x14, 0x1c, 0xe1, 0x44, 0x04, 0xa0, + 0xe5, 0x26, 0x1c, 0xbb, 0x0c, 0x3c, 0x3e, 0xde, 0xca, 0x1a, 0x03, 0xbc, 0x54, 0xe5, 0xd7, 0x00, + 0xde, 0x90, 0x8d, 0x03, 0xc0, 0xf2, 0x3f, 0x4b, 0x30, 0x9f, 0x12, 0x3a, 0xd0, 0x3b, 0xa1, 0x14, + 0xfb, 0xab, 0x91, 0x14, 0x7b, 0x39, 0x45, 0x2c, 0x90, 0x67, 0x75, 0x18, 0xb7, 0xd8, 0xac, 0xf4, + 0x8e, 0xc3, 0x22, 0x62, 0xe4, 0xdd, 0x01, 0xd3, 0xc0, 0x41, 0x19, 0x3f, 0xe6, 0x4f, 0x1f, 0x1f, + 0x55, 0xc6, 0x43, 0x34, 0x1c, 0x86, 0x97, 0xff, 0x3c, 0x07, 0xb0, 0x4c, 0x4c, 0xcd, 0xe8, 0x77, + 0x89, 0x7e, 0x1e, 0x35, 0xd4, 0x66, 0xa8, 0x86, 0xba, 0x39, 0x68, 0x7b, 0x3c, 0xd3, 0x52, 0x8b, + 0xa8, 0xdf, 0x8c, 0x14, 0x51, 0xb5, 0xec, 0x90, 0x27, 0x57, 0x51, 0xff, 0x96, 0x87, 0x19, 0x9f, + 0xd9, 0x2f, 0xa3, 0x1e, 0x86, 0xf6, 0xf8, 0x57, 0x22, 0x7b, 0x3c, 0x9f, 0x20, 0xf2, 0xca, 0xea, + 0xa8, 0x67, 0x30, 0xc1, 0xaa, 0x1c, 0x67, 0x2f, 0x79, 0x0d, 0x35, 0x7c, 0xea, 0x1a, 0xca, 0xcb, + 0x76, 0x6b, 0x21, 0x24, 0x1c, 0x41, 0x4e, 0xa9, 0xd9, 0x8a, 0x3f, 0xc5, 0x9a, 0xed, 0x2b, 0x09, + 0x26, 0xfc, 0x6d, 0x3a, 0x87, 0xa2, 0x6d, 0x23, 0x5c, 0xb4, 0xbd, 0x91, 0xd9, 0x45, 0x53, 0xaa, + 0xb6, 0xff, 0x66, 0x05, 0xbe, 0xc7, 0xc4, 0x0e, 0xf8, 0x8e, 0xd2, 0xda, 0x1f, 0x7c, 0xc7, 0x43, + 0x9f, 0x4b, 0x80, 0x44, 0x16, 0x58, 0xd4, 0x75, 0x83, 0x2a, 0x4e, 0xac, 0x74, 0xcc, 0x5a, 0xcd, + 0x6c, 0x96, 0xab, 0xb1, 0xba, 0x1d, 0xc3, 0x7a, 0xa4, 0x53, 0xab, 0xef, 0x6f, 0x72, 0x9c, 0x01, + 0x27, 0x18, 0x80, 0x14, 0x00, 0x4b, 0x60, 0x6e, 0x19, 0xe2, 0x20, 0xdf, 0xcc, 0x10, 0xf3, 0x98, + 0xc0, 0x92, 0xa1, 0xef, 0xaa, 0x1d, 0x3f, 0xec, 0x60, 0x0f, 0x08, 0x07, 0x40, 0x2f, 0x3d, 0x82, + 0xf9, 0x14, 0x6b, 0xd1, 0x14, 0xe4, 0xf7, 0x49, 0xdf, 0x59, 0x36, 0xcc, 0xfe, 0x44, 0xb3, 0x30, + 0x74, 0xa0, 0x68, 0x3d, 0x27, 0xfc, 0x8e, 0x60, 0xe7, 0xc7, 0x83, 0xdc, 0x7d, 0x49, 0xfe, 0x72, + 0x28, 0xe8, 0x3b, 0xbc, 0x62, 0xbe, 0xce, 0x2e, 0xad, 0xa6, 0xa6, 0xb6, 0x14, 0x5b, 0x14, 0x42, + 0x63, 0xce, 0x85, 0xd5, 0x19, 0xc3, 0x1e, 0x35, 0x54, 0x5b, 0xe7, 0x5e, 0x6d, 0x6d, 0x9d, 0x7f, + 0x39, 0xb5, 0xf5, 0x6f, 0x43, 0xc9, 0x76, 0xab, 0xea, 0x02, 0x87, 0xbc, 0x75, 0x8a, 0xf8, 0x2a, + 0x0a, 0x6a, 0x4f, 0x81, 0x57, 0x4a, 0x7b, 0xa0, 0x49, 0x45, 0xf4, 0xd0, 0x29, 0x8b, 0xe8, 0x97, + 0x5a, 0xf8, 0xb2, 0x78, 0x63, 0x2a, 0x3d, 0x9b, 0xb4, 0x79, 0x6c, 0x2b, 0xf9, 0xf1, 0xa6, 0xc1, + 0x47, 0xb1, 0xa0, 0xa2, 0x8f, 0x42, 0x2e, 0x5b, 0x3a, 0x8b, 0xcb, 0x4e, 0xa4, 0xbb, 0x2b, 0xda, + 0x86, 0x79, 0xd3, 0x32, 0x3a, 0x16, 0xb1, 0xed, 0x65, 0xa2, 0xb4, 0x35, 0x55, 0x27, 0xee, 0xfa, + 0x38, 0x15, 0xd1, 0xe5, 0xe3, 0xa3, 0xca, 0x7c, 0x23, 0x99, 0x05, 0xa7, 0xc9, 0xca, 0xcf, 0x0b, + 0x30, 0x15, 0xcd, 0x80, 0x29, 0x45, 0xaa, 0x74, 0xa6, 0x22, 0xf5, 0x46, 0xe0, 0x30, 0x38, 0x15, + 0x7c, 0xe0, 0x05, 0x27, 0x76, 0x20, 0x16, 0x61, 0x52, 0x44, 0x03, 0x97, 0x28, 0xca, 0x74, 0x6f, + 0xf7, 0xb7, 0xc3, 0x64, 0x1c, 0xe5, 0x47, 0x0f, 0x61, 0xdc, 0xe2, 0x75, 0xb7, 0x0b, 0xe0, 0xd4, + 0xae, 0x17, 0x05, 0xc0, 0x38, 0x0e, 0x12, 0x71, 0x98, 0x97, 0xd5, 0xad, 0x7e, 0x39, 0xea, 0x02, + 0x14, 0xc2, 0x75, 0xeb, 0x62, 0x94, 0x01, 0xc7, 0x65, 0xd0, 0x3a, 0xcc, 0xf4, 0xf4, 0x38, 0x94, + 0xe3, 0xca, 0x97, 0x05, 0xd4, 0xcc, 0x76, 0x9c, 0x05, 0x27, 0xc9, 0xa1, 0xdd, 0x50, 0x29, 0x3b, + 0xcc, 0xc3, 0xf3, 0xed, 0xcc, 0x07, 0x2f, 0x73, 0x2d, 0x9b, 0x50, 0x6e, 0x97, 0xb2, 0x96, 0xdb, + 0xf2, 0x3f, 0x4a, 0xc1, 0x24, 0xe4, 0x95, 0xc0, 0x83, 0x5e, 0x99, 0x62, 0x12, 0x81, 0xea, 0xc8, + 0x48, 0xae, 0x7e, 0xef, 0x9d, 0xaa, 0xfa, 0xf5, 0x93, 0xe7, 0xe0, 0xf2, 0xf7, 0x0b, 0x09, 0xe6, + 0x56, 0x9a, 0x8f, 0x2d, 0xa3, 0x67, 0xba, 0xe6, 0x6c, 0x9a, 0xce, 0xd2, 0xfc, 0x02, 0x0a, 0x56, + 0x4f, 0x73, 0xe7, 0xf1, 0xba, 0x3b, 0x0f, 0xdc, 0xd3, 0xd8, 0x3c, 0x66, 0x22, 0x52, 0xce, 0x24, + 0x98, 0x00, 0xda, 0x80, 0x61, 0x4b, 0xd1, 0x3b, 0xc4, 0x4d, 0xab, 0xd7, 0x06, 0x58, 0xbf, 0xba, + 0x8c, 0x19, 0x7b, 0xa0, 0xb0, 0xe1, 0xd2, 0x58, 0xa0, 0xc8, 0x7f, 0x24, 0xc1, 0xe4, 0x93, 0xad, + 0xad, 0xc6, 0xaa, 0xce, 0x4f, 0x34, 0x7f, 0x5b, 0xbd, 0x0a, 0x05, 0x53, 0xa1, 0x7b, 0xd1, 0x4c, + 0xcf, 0x68, 0x98, 0x53, 0xd0, 0x07, 0x50, 0x64, 0x91, 0x84, 0xe8, 0xed, 0x8c, 0xa5, 0xb6, 0x80, + 0xaf, 0x3b, 0x42, 0x7e, 0xf5, 0x24, 0x06, 0xb0, 0x0b, 0x27, 0xef, 0xc3, 0x6c, 0xc0, 0x1c, 0xb6, + 0x1e, 0x4f, 0x59, 0x76, 0x44, 0x4d, 0x18, 0x62, 0x9a, 0x59, 0x0e, 0xcc, 0x67, 0x78, 0xcc, 0x8c, + 0x4c, 0xc9, 0xaf, 0x74, 0xd8, 0x2f, 0x1b, 0x3b, 0x58, 0xf2, 0x3a, 0x8c, 0xf3, 0x07, 0x65, 0xc3, + 0xa2, 0x7c, 0x59, 0xd0, 0x15, 0xc8, 0x77, 0x55, 0x5d, 0xe4, 0xd9, 0x51, 0x21, 0x93, 0x67, 0x39, + 0x82, 0x8d, 0x73, 0xb2, 0x72, 0x28, 0x22, 0x8f, 0x4f, 0x56, 0x0e, 0x31, 0x1b, 0x97, 0x1f, 0x43, + 0x51, 0x2c, 0x77, 0x10, 0x28, 0x7f, 0x32, 0x50, 0x3e, 0x01, 0x68, 0x13, 0x8a, 0xab, 0x8d, 0xba, + 0x66, 0x38, 0x55, 0x57, 0x4b, 0x6d, 0x5b, 0xd1, 0xbd, 0x58, 0x5a, 0x5d, 0xc6, 0x98, 0x53, 0x90, + 0x0c, 0xc3, 0xe4, 0xb0, 0x45, 0x4c, 0xca, 0x3d, 0x62, 0xa4, 0x0e, 0x6c, 0x97, 0x1f, 0xf1, 0x11, + 0x2c, 0x28, 0xf2, 0x1f, 0xe7, 0xa0, 0x28, 0x96, 0xe3, 0x1c, 0x6e, 0x61, 0x6b, 0xa1, 0x5b, 0xd8, + 0x9b, 0xd9, 0x5c, 0x23, 0xf5, 0x0a, 0xb6, 0x15, 0xb9, 0x82, 0xdd, 0xc8, 0x88, 0x77, 0xf2, 0xfd, + 0xeb, 0xef, 0x24, 0x98, 0x08, 0x3b, 0x25, 0xba, 0x0b, 0xa3, 0x2c, 0xe1, 0xa8, 0x2d, 0xb2, 0xe1, + 0xd7, 0xb9, 0xde, 0x23, 0x4c, 0xd3, 0x27, 0xe1, 0x20, 0x1f, 0xea, 0x78, 0x62, 0xcc, 0x8f, 0xc4, + 0xa4, 0xd3, 0x97, 0xb4, 0x47, 0x55, 0xad, 0xea, 0x7c, 0x5a, 0xa9, 0xae, 0xea, 0x74, 0xd3, 0x6a, + 0x52, 0x4b, 0xd5, 0x3b, 0x31, 0x45, 0xdc, 0x29, 0x83, 0xc8, 0xf2, 0x3f, 0x48, 0x30, 0x2a, 0x4c, + 0x3e, 0x87, 0x5b, 0xc5, 0x6f, 0x84, 0x6f, 0x15, 0xd7, 0x32, 0x1e, 0xf0, 0xe4, 0x2b, 0xc5, 0x5f, + 0xf9, 0xa6, 0xb3, 0x23, 0xcd, 0xbc, 0x7a, 0xcf, 0xb0, 0x69, 0xd4, 0xab, 0xd9, 0x61, 0xc4, 0x9c, + 0x82, 0x7a, 0x30, 0xa5, 0x46, 0x62, 0x80, 0x58, 0xda, 0x5a, 0x36, 0x4b, 0x3c, 0xb1, 0x7a, 0x59, + 0xc0, 0x4f, 0x45, 0x29, 0x38, 0xa6, 0x42, 0x26, 0x10, 0xe3, 0x42, 0xef, 0x43, 0x61, 0x8f, 0x52, + 0x33, 0xe1, 0xbd, 0x7a, 0x40, 0xe4, 0xf1, 0x4d, 0x28, 0xf1, 0xd9, 0x6d, 0x6d, 0x35, 0x30, 0x87, + 0x92, 0xff, 0xc7, 0x5f, 0x8f, 0xa6, 0xe3, 0xe3, 0x5e, 0x3c, 0x95, 0xce, 0x12, 0x4f, 0x47, 0x93, + 0x62, 0x29, 0x7a, 0x02, 0x79, 0xaa, 0x65, 0xbd, 0x16, 0x0a, 0xc4, 0xad, 0xb5, 0xa6, 0x1f, 0x90, + 0xb6, 0xd6, 0x9a, 0x98, 0x41, 0xa0, 0x4d, 0x18, 0x62, 0xd9, 0x87, 0x1d, 0xc1, 0x7c, 0xf6, 0x23, + 0xcd, 0xe6, 0xef, 0x3b, 0x04, 0xfb, 0x65, 0x63, 0x07, 0x47, 0xfe, 0x14, 0xc6, 0x43, 0xe7, 0x14, + 0x7d, 0x02, 0x63, 0x9a, 0xa1, 0xb4, 0xeb, 0x8a, 0xa6, 0xe8, 0x2d, 0xe2, 0x7e, 0x1c, 0xb8, 0x96, + 0x74, 0xc3, 0x58, 0x0b, 0xf0, 0x89, 0x53, 0x3e, 0x2b, 0x94, 0x8c, 0x05, 0x69, 0x38, 0x84, 0x28, + 0x2b, 0x00, 0xfe, 0x1c, 0x51, 0x05, 0x86, 0x98, 0x9f, 0x39, 0xf9, 0x64, 0xa4, 0x3e, 0xc2, 0x2c, + 0x64, 0xee, 0x67, 0x63, 0x67, 0x1c, 0xdd, 0x06, 0xb0, 0x49, 0xcb, 0x22, 0x94, 0x07, 0x83, 0x5c, + 0xf8, 0x03, 0x63, 0xd3, 0xa3, 0xe0, 0x00, 0x97, 0xfc, 0x4f, 0x12, 0x8c, 0x6f, 0x10, 0xfa, 0x99, + 0x61, 0xed, 0x37, 0x0c, 0x4d, 0x6d, 0xf5, 0xcf, 0x21, 0xd8, 0xe2, 0x50, 0xb0, 0x7d, 0x6b, 0xc0, + 0xce, 0x84, 0xac, 0x4b, 0x0b, 0xb9, 0xf2, 0x57, 0x12, 0xcc, 0x87, 0x38, 0x1f, 0xf9, 0x47, 0x77, + 0x1b, 0x86, 0x4c, 0xc3, 0xa2, 0x6e, 0x22, 0x3e, 0x95, 0x42, 0x16, 0xc6, 0x02, 0xa9, 0x98, 0xc1, + 0x60, 0x07, 0x0d, 0xad, 0x41, 0x8e, 0x1a, 0xc2, 0x55, 0x4f, 0x87, 0x49, 0x88, 0x55, 0x07, 0x81, + 0x99, 0xdb, 0x32, 0x70, 0x8e, 0x1a, 0x6c, 0x23, 0xca, 0x21, 0xae, 0x60, 0xf0, 0x79, 0x45, 0x33, + 0xc0, 0x50, 0xd8, 0xb5, 0x8c, 0xee, 0x99, 0xe7, 0xe0, 0x6d, 0xc4, 0x8a, 0x65, 0x74, 0x31, 0xc7, + 0x92, 0xbf, 0x96, 0x60, 0x3a, 0xc4, 0x79, 0x0e, 0x81, 0xff, 0xfd, 0x70, 0xe0, 0xbf, 0x71, 0x9a, + 0x89, 0xa4, 0x84, 0xff, 0xaf, 0x73, 0x91, 0x69, 0xb0, 0x09, 0xa3, 0x5d, 0x18, 0x35, 0x8d, 0x76, + 0xf3, 0x25, 0x7c, 0x0e, 0x9c, 0x64, 0x79, 0xb3, 0xe1, 0x63, 0xe1, 0x20, 0x30, 0x3a, 0x84, 0x69, + 0x5d, 0xe9, 0x12, 0xdb, 0x54, 0x5a, 0xa4, 0xf9, 0x12, 0x1e, 0x48, 0x2e, 0xf2, 0xef, 0x0d, 0x51, + 0x44, 0x1c, 0x57, 0x82, 0xd6, 0xa1, 0xa8, 0x9a, 0xbc, 0x8e, 0x13, 0xb5, 0xcb, 0xc0, 0x2c, 0xea, + 0x54, 0x7d, 0x4e, 0x3c, 0x17, 0x3f, 0xb0, 0x8b, 0x21, 0xff, 0x75, 0xd4, 0x1b, 0x98, 0xff, 0xa1, + 0xc7, 0x50, 0xe2, 0x8d, 0x19, 0x2d, 0x43, 0x73, 0xbf, 0x0c, 0xb0, 0x9d, 0x6d, 0x88, 0xb1, 0x17, + 0x47, 0x95, 0xcb, 0x09, 0x8f, 0xbe, 0x2e, 0x19, 0x7b, 0xc2, 0x68, 0x03, 0x0a, 0xe6, 0x8f, 0xa9, + 0x60, 0x78, 0x92, 0xe3, 0x65, 0x0b, 0xc7, 0x91, 0x7f, 0x2f, 0x1f, 0x31, 0x97, 0xa7, 0xba, 0x67, + 0x2f, 0x6d, 0xd7, 0xbd, 0x8a, 0x29, 0x75, 0xe7, 0x77, 0xa0, 0x28, 0x32, 0xbc, 0x70, 0xe6, 0x5f, + 0x9c, 0xc6, 0x99, 0x83, 0x59, 0xcc, 0xbb, 0xb0, 0xb8, 0x83, 0x2e, 0x30, 0xfa, 0x18, 0x86, 0x89, + 0xa3, 0xc2, 0xc9, 0x8d, 0xf7, 0x4e, 0xa3, 0xc2, 0x8f, 0xab, 0x7e, 0xa1, 0x2a, 0xc6, 0x04, 0x2a, + 0x7a, 0x87, 0xad, 0x17, 0xe3, 0x65, 0x97, 0x40, 0xbb, 0x5c, 0xe0, 0xe9, 0xea, 0x8a, 0x33, 0x6d, + 0x6f, 0xf8, 0xc5, 0x51, 0x05, 0xfc, 0x9f, 0x38, 0x28, 0x21, 0xff, 0x8b, 0x04, 0xd3, 0x7c, 0x85, + 0x5a, 0x3d, 0x4b, 0xa5, 0xfd, 0x73, 0x4b, 0x4c, 0x4f, 0x43, 0x89, 0xe9, 0xce, 0x80, 0x65, 0x89, + 0x59, 0x98, 0x9a, 0x9c, 0xbe, 0x91, 0xe0, 0x62, 0x8c, 0xfb, 0x1c, 0xe2, 0xe2, 0x76, 0x38, 0x2e, + 0xbe, 0x75, 0xda, 0x09, 0xa5, 0xc4, 0xc6, 0xff, 0x9a, 0x4e, 0x98, 0x0e, 0x3f, 0x29, 0xb7, 0x01, + 0x4c, 0x4b, 0x3d, 0x50, 0x35, 0xd2, 0x11, 0x1f, 0xc1, 0x4b, 0x81, 0x16, 0x27, 0x8f, 0x82, 0x03, + 0x5c, 0xc8, 0x86, 0xb9, 0x36, 0xd9, 0x55, 0x7a, 0x1a, 0x5d, 0x6c, 0xb7, 0x97, 0x14, 0x53, 0xd9, + 0x51, 0x35, 0x95, 0xaa, 0xe2, 0xb9, 0x60, 0xa4, 0xfe, 0xd0, 0xf9, 0x38, 0x9d, 0xc4, 0xf1, 0xe2, + 0xa8, 0x72, 0x25, 0xe9, 0xeb, 0x90, 0xcb, 0xd2, 0xc7, 0x29, 0xd0, 0xa8, 0x0f, 0x65, 0x8b, 0x7c, + 0xda, 0x53, 0x2d, 0xd2, 0x5e, 0xb6, 0x0c, 0x33, 0xa4, 0x36, 0xcf, 0xd5, 0xfe, 0xfa, 0xf1, 0x51, + 0xa5, 0x8c, 0x53, 0x78, 0x06, 0x2b, 0x4e, 0x85, 0x47, 0xcf, 0x60, 0x46, 0x11, 0xcd, 0x68, 0x41, + 0xad, 0xce, 0x29, 0xb9, 0x7f, 0x7c, 0x54, 0x99, 0x59, 0x8c, 0x93, 0x07, 0x2b, 0x4c, 0x02, 0x45, + 0x35, 0x28, 0x1e, 0xf0, 0xbe, 0x35, 0xbb, 0x3c, 0xc4, 0xf1, 0x59, 0x22, 0x28, 0x3a, 0xad, 0x6c, + 0x0c, 0x73, 0x78, 0xa5, 0xc9, 0x4f, 0x9f, 0xcb, 0xc5, 0x2e, 0x94, 0xac, 0x96, 0x14, 0x27, 0x9e, + 0xbf, 0x18, 0x97, 0xfc, 0xa8, 0xf5, 0xc4, 0x27, 0xe1, 0x20, 0x1f, 0xfa, 0x08, 0x46, 0xf6, 0xc4, + 0xab, 0x84, 0x5d, 0x2e, 0x66, 0x4a, 0xc2, 0xa1, 0x57, 0x8c, 0xfa, 0xb4, 0x50, 0x31, 0xe2, 0x0e, + 0xdb, 0xd8, 0x47, 0x44, 0x6f, 0x40, 0x91, 0xff, 0x58, 0x5d, 0xe6, 0xcf, 0x71, 0x25, 0x3f, 0xb6, + 0x3d, 0x71, 0x86, 0xb1, 0x4b, 0x77, 0x59, 0x57, 0x1b, 0x4b, 0xfc, 0x59, 0x38, 0xc2, 0xba, 0xda, + 0x58, 0xc2, 0x2e, 0x1d, 0x7d, 0x02, 0x45, 0x9b, 0xac, 0xa9, 0x7a, 0xef, 0xb0, 0x0c, 0x99, 0x3e, + 0x2a, 0x37, 0x1f, 0x71, 0xee, 0xc8, 0xc3, 0x98, 0xaf, 0x41, 0xd0, 0xb1, 0x0b, 0x8b, 0xf6, 0x60, + 0xc4, 0xea, 0xe9, 0x8b, 0xf6, 0xb6, 0x4d, 0xac, 0xf2, 0x28, 0xd7, 0x31, 0x28, 0x9c, 0x63, 0x97, + 0x3f, 0xaa, 0xc5, 0x5b, 0x21, 0x8f, 0x03, 0xfb, 0xe0, 0x68, 0x0f, 0x80, 0xff, 0xe0, 0x6f, 0x70, + 0xe5, 0x39, 0xae, 0xea, 0x7e, 0x16, 0x55, 0x49, 0x4f, 0x7d, 0xe2, 0x1d, 0xde, 0x23, 0xe3, 0x00, + 0x36, 0xfa, 0x43, 0x09, 0x90, 0xdd, 0x33, 0x4d, 0x8d, 0x74, 0x89, 0x4e, 0x15, 0x8d, 0x8f, 0xda, + 0xe5, 0x31, 0xae, 0xf2, 0xdd, 0x41, 0x2b, 0x18, 0x13, 0x8c, 0xaa, 0xf6, 0x9e, 0xd7, 0xe3, 0xac, + 0x38, 0x41, 0x2f, 0xdb, 0xc4, 0x5d, 0x31, 0xeb, 0xf1, 0x4c, 0x9b, 0x98, 0xfc, 0xba, 0xe9, 0x6f, + 0xa2, 0xa0, 0x63, 0x17, 0x16, 0x3d, 0x85, 0x39, 0xb7, 0xc1, 0x12, 0x1b, 0x06, 0x5d, 0x51, 0x35, + 0x62, 0xf7, 0x6d, 0x4a, 0xba, 0xe5, 0x09, 0xee, 0x60, 0x5e, 0x97, 0x09, 0x4e, 0xe4, 0xc2, 0x29, + 0xd2, 0xa8, 0x0b, 0x15, 0x37, 0x38, 0xb1, 0x93, 0xeb, 0x45, 0xc7, 0x47, 0x76, 0x4b, 0xd1, 0x9c, + 0x2f, 0x0e, 0x93, 0x5c, 0xc1, 0xeb, 0xc7, 0x47, 0x95, 0xca, 0xf2, 0xc9, 0xac, 0x78, 0x10, 0x16, + 0xfa, 0x00, 0xca, 0x4a, 0x9a, 0x9e, 0x29, 0xae, 0xe7, 0x35, 0x16, 0xf1, 0x52, 0x15, 0xa4, 0x4a, + 0x23, 0x0a, 0x53, 0x4a, 0xb8, 0xd5, 0xd5, 0x2e, 0x4f, 0x67, 0x7a, 0xf2, 0x8c, 0x74, 0xc8, 0xfa, + 0xcf, 0x1e, 0x11, 0x82, 0x8d, 0x63, 0x1a, 0xd0, 0xef, 0x00, 0x52, 0xa2, 0xdd, 0xb9, 0x76, 0x19, + 0x65, 0x4a, 0x74, 0xb1, 0xb6, 0x5e, 0xdf, 0xed, 0x62, 0x24, 0x1b, 0x27, 0xe8, 0x61, 0x05, 0xba, + 0x12, 0xe9, 0x28, 0xb6, 0xcb, 0xf3, 0x5c, 0x79, 0x2d, 0x9b, 0x72, 0x4f, 0x2e, 0xf0, 0x61, 0x25, + 0x8a, 0x88, 0xe3, 0x4a, 0xd0, 0x1a, 0xcc, 0x8a, 0xc1, 0x6d, 0xdd, 0x56, 0x76, 0x49, 0xb3, 0x6f, + 0xb7, 0xa8, 0x66, 0x97, 0x67, 0x78, 0x7c, 0xe7, 0x1f, 0xf7, 0x16, 0x13, 0xe8, 0x38, 0x51, 0x0a, + 0xbd, 0x0b, 0x53, 0xbb, 0x86, 0xb5, 0xa3, 0xb6, 0xdb, 0x44, 0x77, 0x91, 0x66, 0x39, 0xd2, 0x2c, + 0xdb, 0x87, 0x95, 0x08, 0x0d, 0xc7, 0xb8, 0x91, 0x0d, 0x17, 0x05, 0x72, 0xc3, 0x32, 0x5a, 0xeb, + 0x46, 0x4f, 0xa7, 0x4e, 0xd9, 0x77, 0xd1, 0x4b, 0xa3, 0x17, 0x17, 0x93, 0x18, 0x5e, 0x1c, 0x55, + 0xae, 0x26, 0x57, 0xf9, 0x3e, 0x13, 0x4e, 0xc6, 0x46, 0x26, 0x8c, 0x89, 0x3e, 0xf1, 0x25, 0x4d, + 0xb1, 0xed, 0x72, 0x99, 0x1f, 0xfd, 0x07, 0x83, 0x03, 0x9e, 0x27, 0x12, 0x3d, 0xff, 0x53, 0xc7, + 0x47, 0x95, 0xb1, 0x20, 0x03, 0x0e, 0x69, 0xe0, 0x7d, 0x41, 0xe2, 0x6b, 0xd4, 0xf9, 0xf4, 0x56, + 0x9f, 0xae, 0x2f, 0xc8, 0x37, 0xed, 0xa5, 0xf5, 0x05, 0x05, 0x20, 0x4f, 0x7e, 0x97, 0xfe, 0xcf, + 0x1c, 0xcc, 0xf8, 0xcc, 0x99, 0xfb, 0x82, 0x12, 0x44, 0xfe, 0xbf, 0xbf, 0x3a, 0x5b, 0xaf, 0x8e, + 0xbf, 0x74, 0xff, 0xf7, 0x7a, 0x75, 0x7c, 0xdb, 0x52, 0x6e, 0x0f, 0x7f, 0x9b, 0x0b, 0x4e, 0xe0, + 0x94, 0x0d, 0x23, 0x2f, 0xa1, 0xc5, 0xf8, 0x27, 0xd7, 0x73, 0x22, 0x7f, 0x93, 0x87, 0xa9, 0xe8, + 0x69, 0x0c, 0xf5, 0x15, 0x48, 0x03, 0xfb, 0x0a, 0x1a, 0x30, 0xbb, 0xdb, 0xd3, 0xb4, 0x3e, 0x9f, + 0x43, 0xa0, 0xb9, 0xc0, 0xf9, 0x2e, 0xf8, 0x9a, 0x90, 0x9c, 0x5d, 0x49, 0xe0, 0xc1, 0x89, 0x92, + 0xf1, 0x36, 0x83, 0xc2, 0x8f, 0x6d, 0x33, 0x18, 0x3a, 0x43, 0x9b, 0x41, 0x72, 0xa7, 0x46, 0xfe, + 0x4c, 0x9d, 0x1a, 0x67, 0xe9, 0x31, 0x48, 0x08, 0x62, 0x03, 0xfb, 0x65, 0x5f, 0x83, 0x4b, 0x42, + 0x8c, 0xf2, 0xde, 0x01, 0x9d, 0x5a, 0x86, 0xa6, 0x11, 0x6b, 0xb9, 0xd7, 0xed, 0xf6, 0xe5, 0x5f, + 0xc2, 0x44, 0xb8, 0x2b, 0xc6, 0xd9, 0x69, 0xa7, 0x31, 0x47, 0x7c, 0x9d, 0x0d, 0xec, 0xb4, 0x33, + 0x8e, 0x3d, 0x0e, 0xf9, 0xf7, 0x25, 0x98, 0x4b, 0xee, 0x7e, 0x45, 0x1a, 0x4c, 0x74, 0x95, 0xc3, + 0x60, 0x47, 0xb2, 0x74, 0xc6, 0x77, 0x33, 0xde, 0x0e, 0xb1, 0x1e, 0xc2, 0xc2, 0x11, 0x6c, 0xf9, + 0x07, 0x09, 0xe6, 0x53, 0x1a, 0x11, 0xce, 0xd7, 0x12, 0xf4, 0x21, 0x94, 0xba, 0xca, 0x61, 0xb3, + 0x67, 0x75, 0xc8, 0x99, 0x5f, 0x0a, 0xf9, 0x71, 0x5f, 0x17, 0x28, 0xd8, 0xc3, 0x93, 0xff, 0x52, + 0x82, 0x9f, 0xa5, 0x5e, 0xa4, 0xd0, 0xbd, 0x50, 0xcf, 0x84, 0x1c, 0xe9, 0x99, 0x40, 0x71, 0xc1, + 0x57, 0xd4, 0x32, 0xf1, 0x85, 0x04, 0xe5, 0xb4, 0x9b, 0x25, 0xba, 0x1b, 0x32, 0xf2, 0xe7, 0x11, + 0x23, 0xa7, 0x63, 0x72, 0xaf, 0xc8, 0xc6, 0x7f, 0x95, 0xe0, 0xf2, 0x09, 0x15, 0x9a, 0x77, 0x81, + 0x21, 0xed, 0x20, 0x17, 0x7f, 0xd4, 0x16, 0x5f, 0xc4, 0xfc, 0x0b, 0x4c, 0x02, 0x0f, 0x4e, 0x95, + 0x46, 0xdb, 0x30, 0x2f, 0x6e, 0x4f, 0x51, 0x9a, 0x28, 0x3e, 0x78, 0x6b, 0xd9, 0x72, 0x32, 0x0b, + 0x4e, 0x93, 0x95, 0xff, 0x46, 0x82, 0xb9, 0xe4, 0x27, 0x03, 0xf4, 0x76, 0x68, 0xc9, 0x2b, 0x91, + 0x25, 0x9f, 0x8c, 0x48, 0x89, 0x05, 0xff, 0x18, 0x26, 0xc4, 0xc3, 0x82, 0x80, 0x11, 0xce, 0x2c, + 0x27, 0xe5, 0x17, 0x01, 0xe1, 0x96, 0xb7, 0xfc, 0x98, 0x84, 0xc7, 0x70, 0x04, 0x4d, 0xfe, 0x83, + 0x1c, 0x0c, 0x35, 0x5b, 0x8a, 0x46, 0xce, 0xa1, 0xba, 0x7d, 0x2f, 0x54, 0xdd, 0x0e, 0xfa, 0xa7, + 0x2d, 0x6e, 0x55, 0x6a, 0x61, 0x8b, 0x23, 0x85, 0xed, 0x9b, 0x99, 0xd0, 0x4e, 0xae, 0x69, 0x7f, + 0x0d, 0x46, 0x3c, 0xa5, 0xa7, 0x4b, 0xb5, 0xf2, 0x5f, 0xe4, 0x60, 0x34, 0xa0, 0xe2, 0x94, 0x89, + 0x7a, 0x37, 0x54, 0x9d, 0xe4, 0x33, 0x3c, 0xe3, 0x04, 0x74, 0x55, 0xdd, 0x7a, 0xc4, 0x69, 0x3a, + 0xf6, 0xdb, 0x4c, 0xe3, 0x65, 0xca, 0x2f, 0x61, 0x82, 0x2a, 0x56, 0x87, 0x50, 0xef, 0xb3, 0x46, + 0x9e, 0xfb, 0xa2, 0xd7, 0xfd, 0xbe, 0x15, 0xa2, 0xe2, 0x08, 0xf7, 0xa5, 0x87, 0x30, 0x1e, 0x52, + 0x76, 0xaa, 0x9e, 0xe1, 0xbf, 0x97, 0xe0, 0xe7, 0x03, 0x9f, 0x82, 0x50, 0x3d, 0x74, 0x48, 0xaa, + 0x91, 0x43, 0xb2, 0x90, 0x0e, 0xf0, 0xea, 0x7a, 0xcf, 0xea, 0x37, 0x9f, 0x7f, 0xbf, 0x70, 0xe1, + 0xdb, 0xef, 0x17, 0x2e, 0x7c, 0xf7, 0xfd, 0xc2, 0x85, 0xdf, 0x3d, 0x5e, 0x90, 0x9e, 0x1f, 0x2f, + 0x48, 0xdf, 0x1e, 0x2f, 0x48, 0xdf, 0x1d, 0x2f, 0x48, 0xff, 0x7e, 0xbc, 0x20, 0xfd, 0xc9, 0x0f, + 0x0b, 0x17, 0x3e, 0x2c, 0x0a, 0xb8, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xa0, 0x62, 0xda, 0xf9, + 0x07, 0x3e, 0x00, 0x00, +} + func (m *AllowedCSIDriver) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -431,21 +1957,27 @@ func (m *AllowedCSIDriver) Marshal() (dAtA []byte, err error) { } func (m *AllowedCSIDriver) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AllowedCSIDriver) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ + i -= len(m.Name) + copy(dAtA[i:], m.Name) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *AllowedFlexVolume) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -453,21 +1985,27 @@ func (m *AllowedFlexVolume) Marshal() (dAtA []byte, err error) { } func (m *AllowedFlexVolume) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AllowedFlexVolume) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) - i += copy(dAtA[i:], m.Driver) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *AllowedHostPath) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -475,29 +2013,35 @@ func (m *AllowedHostPath) Marshal() (dAtA []byte, err error) { } func (m *AllowedHostPath) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AllowedHostPath) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PathPrefix))) - i += copy(dAtA[i:], m.PathPrefix) - dAtA[i] = 0x10 - i++ + i-- if m.ReadOnly { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - return i, nil + i-- + dAtA[i] = 0x10 + i -= len(m.PathPrefix) + copy(dAtA[i:], m.PathPrefix) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PathPrefix))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DaemonSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -505,41 +2049,52 @@ func (m *DaemonSet) Marshal() (dAtA []byte, err error) { } func (m *DaemonSet) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DaemonSetCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -547,41 +2102,52 @@ func (m *DaemonSetCondition) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n4, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DaemonSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -589,37 +2155,46 @@ func (m *DaemonSetList) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n5, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DaemonSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -627,54 +2202,65 @@ func (m *DaemonSetSpec) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Selector != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n6, err := m.Selector.MarshalTo(dAtA[i:]) + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x30 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.TemplateGeneration)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x20 + { + size, err := m.UpdateStrategy.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n6 - } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n7, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n7 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) - n8, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TemplateGeneration)) - if m.RevisionHistoryLimit != nil { - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x12 + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *DaemonSetStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -682,58 +2268,65 @@ func (m *DaemonSetStatus) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentNumberScheduled)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberMisscheduled)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredNumberScheduled)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberReady)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedNumberScheduled)) - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberAvailable)) - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberUnavailable)) - if m.CollisionCount != nil { - dAtA[i] = 0x48 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - } if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x52 } } - return i, nil + if m.CollisionCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + i-- + dAtA[i] = 0x48 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberUnavailable)) + i-- + dAtA[i] = 0x40 + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberAvailable)) + i-- + dAtA[i] = 0x38 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedNumberScheduled)) + i-- + dAtA[i] = 0x30 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberReady)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredNumberScheduled)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberMisscheduled)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentNumberScheduled)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *DaemonSetUpdateStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -741,31 +2334,39 @@ func (m *DaemonSetUpdateStrategy) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetUpdateStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) if m.RollingUpdate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n9, err := m.RollingUpdate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n9 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Deployment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -773,41 +2374,52 @@ func (m *Deployment) Marshal() (dAtA []byte, err error) { } func (m *Deployment) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Deployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n10, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n11, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n11 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n12, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n12 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -815,49 +2427,62 @@ func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { } func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastUpdateTime.Size())) - n13, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n13 + i-- dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n14, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n14 - return i, nil + i-- + dAtA[i] = 0x32 + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -865,37 +2490,46 @@ func (m *DeploymentList) Marshal() (dAtA []byte, err error) { } func (m *DeploymentList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n15, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n15 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentRollback) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -903,51 +2537,61 @@ func (m *DeploymentRollback) Marshal() (dAtA []byte, err error) { } func (m *DeploymentRollback) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentRollback) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) + { + size, err := m.RollbackTo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a if len(m.UpdatedAnnotations) > 0 { keysForUpdatedAnnotations := make([]string, 0, len(m.UpdatedAnnotations)) for k := range m.UpdatedAnnotations { keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations) - for _, k := range keysForUpdatedAnnotations { + for iNdEx := len(keysForUpdatedAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.UpdatedAnnotations[string(keysForUpdatedAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- dAtA[i] = 0x12 - i++ - v := m.UpdatedAnnotations[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + i -= len(keysForUpdatedAnnotations[iNdEx]) + copy(dAtA[i:], keysForUpdatedAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForUpdatedAnnotations[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) } } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollbackTo.Size())) - n16, err := m.RollbackTo.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n16 - return i, nil + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -955,79 +2599,92 @@ func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) { } func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + if m.ProgressDeadlineSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds)) + i-- + dAtA[i] = 0x48 } - if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n17, err := m.Selector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.RollbackTo != nil { + { + size, err := m.RollbackTo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n17 - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n18, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n18 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Strategy.Size())) - n19, err := m.Strategy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n19 - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - if m.RevisionHistoryLimit != nil { - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x42 } - dAtA[i] = 0x38 - i++ + i-- if m.Paused { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - if m.RollbackTo != nil { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollbackTo.Size())) - n20, err := m.RollbackTo.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x38 + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x30 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x28 + { + size, err := m.Strategy.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n20 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.ProgressDeadlineSeconds != nil { - dAtA[i] = 0x48 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds)) + i-- + dAtA[i] = 0x22 + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1035,52 +2692,59 @@ func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) { } func (m *DeploymentStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) + if m.CollisionCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + i-- + dAtA[i] = 0x40 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x38 if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x32 } } - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - if m.CollisionCount != nil { - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - } - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1088,31 +2752,39 @@ func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { } func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) if m.RollingUpdate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n21, err := m.RollingUpdate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n21 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *FSGroupStrategyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1120,33 +2792,41 @@ func (m *FSGroupStrategyOptions) Marshal() (dAtA []byte, err error) { } func (m *FSGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FSGroupStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i += copy(dAtA[i:], m.Rule) if len(m.Ranges) > 0 { - for _, msg := range m.Ranges { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HTTPIngressPath) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1154,29 +2834,37 @@ func (m *HTTPIngressPath) Marshal() (dAtA []byte, err error) { } func (m *HTTPIngressPath) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HTTPIngressPath) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Backend.Size())) - n22, err := m.Backend.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Backend.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n22 - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HTTPIngressRuleValue) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1184,29 +2872,36 @@ func (m *HTTPIngressRuleValue) Marshal() (dAtA []byte, err error) { } func (m *HTTPIngressRuleValue) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HTTPIngressRuleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m.Paths) > 0 { - for _, msg := range m.Paths { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Paths) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Paths[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *HostPortRange) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1214,23 +2909,28 @@ func (m *HostPortRange) Marshal() (dAtA []byte, err error) { } func (m *HostPortRange) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HostPortRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) - dAtA[i] = 0x10 - i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) - return i, nil + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *IDRange) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1238,23 +2938,28 @@ func (m *IDRange) Marshal() (dAtA []byte, err error) { } func (m *IDRange) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IDRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) - dAtA[i] = 0x10 - i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) - return i, nil + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *IPBlock) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1262,36 +2967,36 @@ func (m *IPBlock) Marshal() (dAtA []byte, err error) { } func (m *IPBlock) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IPBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDR))) - i += copy(dAtA[i:], m.CIDR) if len(m.Except) > 0 { - for _, s := range m.Except { + for iNdEx := len(m.Except) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Except[iNdEx]) + copy(dAtA[i:], m.Except[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Except[iNdEx]))) + i-- dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + i -= len(m.CIDR) + copy(dAtA[i:], m.CIDR) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDR))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Ingress) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1299,41 +3004,52 @@ func (m *Ingress) Marshal() (dAtA []byte, err error) { } func (m *Ingress) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Ingress) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n23, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n23 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n24, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n24 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n25, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n25 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *IngressBackend) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1341,29 +3057,37 @@ func (m *IngressBackend) Marshal() (dAtA []byte, err error) { } func (m *IngressBackend) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressBackend) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) - i += copy(dAtA[i:], m.ServiceName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ServicePort.Size())) - n26, err := m.ServicePort.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ServicePort.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n26 - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.ServiceName) + copy(dAtA[i:], m.ServiceName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *IngressList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1371,37 +3095,46 @@ func (m *IngressList) Marshal() (dAtA []byte, err error) { } func (m *IngressList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n27, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n27 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *IngressRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1409,29 +3142,37 @@ func (m *IngressRule) Marshal() (dAtA []byte, err error) { } func (m *IngressRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) - i += copy(dAtA[i:], m.Host) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.IngressRuleValue.Size())) - n28, err := m.IngressRuleValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.IngressRuleValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n28 - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *IngressRuleValue) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1439,79 +3180,97 @@ func (m *IngressRuleValue) Marshal() (dAtA []byte, err error) { } func (m *IngressRuleValue) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressRuleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if m.HTTP != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.HTTP.Size())) - n29, err := m.HTTP.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.HTTP.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n29 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *IngressSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *IngressSpec) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *IngressSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Backend != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Backend.Size())) - n30, err := m.Backend.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - i += n30 } if len(m.TLS) > 0 { - for _, msg := range m.TLS { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.TLS) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TLS[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.Backend != nil { + { + size, err := m.Backend.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *IngressStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1519,25 +3278,32 @@ func (m *IngressStatus) Marshal() (dAtA []byte, err error) { } func (m *IngressStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LoadBalancer.Size())) - n31, err := m.LoadBalancer.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.LoadBalancer.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n31 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *IngressTLS) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1545,36 +3311,36 @@ func (m *IngressTLS) Marshal() (dAtA []byte, err error) { } func (m *IngressTLS) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressTLS) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + i -= len(m.SecretName) + copy(dAtA[i:], m.SecretName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) + i-- + dAtA[i] = 0x12 if len(m.Hosts) > 0 { - for _, s := range m.Hosts { + for iNdEx := len(m.Hosts) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Hosts[iNdEx]) + copy(dAtA[i:], m.Hosts[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hosts[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) - i += copy(dAtA[i:], m.SecretName) - return i, nil + return len(dAtA) - i, nil } func (m *NetworkPolicy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1582,33 +3348,42 @@ func (m *NetworkPolicy) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n32, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n32 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n33, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n33 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *NetworkPolicyEgressRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1616,41 +3391,50 @@ func (m *NetworkPolicyEgressRule) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicyEgressRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicyEgressRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Ports) > 0 { - for _, msg := range m.Ports { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.To) > 0 { + for iNdEx := len(m.To) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.To[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if len(m.To) > 0 { - for _, msg := range m.To { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *NetworkPolicyIngressRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1658,41 +3442,50 @@ func (m *NetworkPolicyIngressRule) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicyIngressRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicyIngressRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Ports) > 0 { - for _, msg := range m.Ports { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.From) > 0 { + for iNdEx := len(m.From) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.From[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if len(m.From) > 0 { - for _, msg := range m.From { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *NetworkPolicyList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1700,37 +3493,46 @@ func (m *NetworkPolicyList) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicyList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n34, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n34 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *NetworkPolicyPeer) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1738,47 +3540,58 @@ func (m *NetworkPolicyPeer) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicyPeer) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicyPeer) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.PodSelector != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PodSelector.Size())) - n35, err := m.PodSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.IPBlock != nil { + { + size, err := m.IPBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n35 + i-- + dAtA[i] = 0x1a } if m.NamespaceSelector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NamespaceSelector.Size())) - n36, err := m.NamespaceSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n36 + i-- + dAtA[i] = 0x12 } - if m.IPBlock != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.IPBlock.Size())) - n37, err := m.IPBlock.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.PodSelector != nil { + { + size, err := m.PodSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n37 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *NetworkPolicyPort) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1786,33 +3599,41 @@ func (m *NetworkPolicyPort) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicyPort) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicyPort) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Protocol != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Protocol))) - i += copy(dAtA[i:], *m.Protocol) - } if m.Port != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Port.Size())) - n38, err := m.Port.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Port.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n38 + i-- + dAtA[i] = 0x12 + } + if m.Protocol != nil { + i -= len(*m.Protocol) + copy(dAtA[i:], *m.Protocol) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Protocol))) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *NetworkPolicySpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1820,64 +3641,69 @@ func (m *NetworkPolicySpec) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicySpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PodSelector.Size())) - n39, err := m.PodSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n39 - if len(m.Ingress) > 0 { - for _, msg := range m.Ingress { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + if len(m.PolicyTypes) > 0 { + for iNdEx := len(m.PolicyTypes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.PolicyTypes[iNdEx]) + copy(dAtA[i:], m.PolicyTypes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PolicyTypes[iNdEx]))) + i-- + dAtA[i] = 0x22 } } if len(m.Egress) > 0 { - for _, msg := range m.Egress { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Egress) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Egress[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x1a } } - if len(m.PolicyTypes) > 0 { - for _, s := range m.PolicyTypes { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if len(m.Ingress) > 0 { + for iNdEx := len(m.Ingress) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ingress[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.PodSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodSecurityPolicy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1885,33 +3711,42 @@ func (m *PodSecurityPolicy) Marshal() (dAtA []byte, err error) { } func (m *PodSecurityPolicy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSecurityPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n40, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n40 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n41, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n41 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodSecurityPolicyList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1919,338 +3754,330 @@ func (m *PodSecurityPolicyList) Marshal() (dAtA []byte, err error) { } func (m *PodSecurityPolicyList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSecurityPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n42, err := m.ListMeta.MarshalTo(dAtA[i:]) + return len(dAtA) - i, nil +} + +func (m *PodSecurityPolicySpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { - return 0, err + return nil, err } - i += n42 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + return dAtA[:n], nil +} + +func (m *PodSecurityPolicySpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSecurityPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.RuntimeClass != nil { + { + size, err := m.RuntimeClass.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + } + if len(m.AllowedCSIDrivers) > 0 { + for iNdEx := len(m.AllowedCSIDrivers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllowedCSIDrivers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } + } + if m.RunAsGroup != nil { + { + size, err := m.RunAsGroup.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 } - return i, nil -} - -func (m *PodSecurityPolicySpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodSecurityPolicySpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - if m.Privileged { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if len(m.AllowedProcMountTypes) > 0 { + for iNdEx := len(m.AllowedProcMountTypes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AllowedProcMountTypes[iNdEx]) + copy(dAtA[i:], m.AllowedProcMountTypes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedProcMountTypes[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } } - i++ - if len(m.DefaultAddCapabilities) > 0 { - for _, s := range m.DefaultAddCapabilities { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.ForbiddenSysctls) > 0 { + for iNdEx := len(m.ForbiddenSysctls) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ForbiddenSysctls[iNdEx]) + copy(dAtA[i:], m.ForbiddenSysctls[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ForbiddenSysctls[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 } } - if len(m.RequiredDropCapabilities) > 0 { - for _, s := range m.RequiredDropCapabilities { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.AllowedUnsafeSysctls) > 0 { + for iNdEx := len(m.AllowedUnsafeSysctls) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AllowedUnsafeSysctls[iNdEx]) + copy(dAtA[i:], m.AllowedUnsafeSysctls[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedUnsafeSysctls[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a } } - if len(m.AllowedCapabilities) > 0 { - for _, s := range m.AllowedCapabilities { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if len(m.AllowedFlexVolumes) > 0 { + for iNdEx := len(m.AllowedFlexVolumes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllowedFlexVolumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 } } - if len(m.Volumes) > 0 { - for _, s := range m.Volumes { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if len(m.AllowedHostPaths) > 0 { + for iNdEx := len(m.AllowedHostPaths) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllowedHostPaths[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a } } - dAtA[i] = 0x30 - i++ - if m.HostNetwork { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if len(m.HostPorts) > 0 { - for _, msg := range m.HostPorts { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + if m.AllowPrivilegeEscalation != nil { + i-- + if *m.AllowPrivilegeEscalation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 } - dAtA[i] = 0x40 - i++ - if m.HostPID { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.DefaultAllowPrivilegeEscalation != nil { + i-- + if *m.DefaultAllowPrivilegeEscalation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x78 } - i++ - dAtA[i] = 0x48 - i++ - if m.HostIPC { + i-- + if m.ReadOnlyRootFilesystem { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SELinux.Size())) - n43, err := m.SELinux.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + dAtA[i] = 0x70 + { + size, err := m.FSGroup.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n43 - dAtA[i] = 0x5a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RunAsUser.Size())) - n44, err := m.RunAsUser.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + dAtA[i] = 0x6a + { + size, err := m.SupplementalGroups.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n44 + i-- dAtA[i] = 0x62 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SupplementalGroups.Size())) - n45, err := m.SupplementalGroups.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RunAsUser.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n45 - dAtA[i] = 0x6a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FSGroup.Size())) - n46, err := m.FSGroup.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + dAtA[i] = 0x5a + { + size, err := m.SELinux.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n46 - dAtA[i] = 0x70 - i++ - if m.ReadOnlyRootFilesystem { + i-- + dAtA[i] = 0x52 + i-- + if m.HostIPC { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - if m.DefaultAllowPrivilegeEscalation != nil { - dAtA[i] = 0x78 - i++ - if *m.DefaultAllowPrivilegeEscalation { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.AllowPrivilegeEscalation != nil { - dAtA[i] = 0x80 - i++ - dAtA[i] = 0x1 - i++ - if *m.AllowPrivilegeEscalation { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if len(m.AllowedHostPaths) > 0 { - for _, msg := range m.AllowedHostPaths { - dAtA[i] = 0x8a - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + i-- + dAtA[i] = 0x48 + i-- + if m.HostPID { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - if len(m.AllowedFlexVolumes) > 0 { - for _, msg := range m.AllowedFlexVolumes { - dAtA[i] = 0x92 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + dAtA[i] = 0x40 + if len(m.HostPorts) > 0 { + for iNdEx := len(m.HostPorts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.HostPorts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x3a } } - if len(m.AllowedUnsafeSysctls) > 0 { - for _, s := range m.AllowedUnsafeSysctls { - dAtA[i] = 0x9a - i++ - dAtA[i] = 0x1 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } + i-- + if m.HostNetwork { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - if len(m.ForbiddenSysctls) > 0 { - for _, s := range m.ForbiddenSysctls { - dAtA[i] = 0xa2 - i++ - dAtA[i] = 0x1 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + dAtA[i] = 0x30 + if len(m.Volumes) > 0 { + for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Volumes[iNdEx]) + copy(dAtA[i:], m.Volumes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Volumes[iNdEx]))) + i-- + dAtA[i] = 0x2a } } - if len(m.AllowedProcMountTypes) > 0 { - for _, s := range m.AllowedProcMountTypes { - dAtA[i] = 0xaa - i++ - dAtA[i] = 0x1 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.AllowedCapabilities) > 0 { + for iNdEx := len(m.AllowedCapabilities) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AllowedCapabilities[iNdEx]) + copy(dAtA[i:], m.AllowedCapabilities[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedCapabilities[iNdEx]))) + i-- + dAtA[i] = 0x22 } } - if m.RunAsGroup != nil { - dAtA[i] = 0xb2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RunAsGroup.Size())) - n47, err := m.RunAsGroup.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.RequiredDropCapabilities) > 0 { + for iNdEx := len(m.RequiredDropCapabilities) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RequiredDropCapabilities[iNdEx]) + copy(dAtA[i:], m.RequiredDropCapabilities[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RequiredDropCapabilities[iNdEx]))) + i-- + dAtA[i] = 0x1a } - i += n47 } - if len(m.AllowedCSIDrivers) > 0 { - for _, msg := range m.AllowedCSIDrivers { - dAtA[i] = 0xba - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + if len(m.DefaultAddCapabilities) > 0 { + for iNdEx := len(m.DefaultAddCapabilities) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DefaultAddCapabilities[iNdEx]) + copy(dAtA[i:], m.DefaultAddCapabilities[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DefaultAddCapabilities[iNdEx]))) + i-- + dAtA[i] = 0x12 } } - if m.RuntimeClass != nil { - dAtA[i] = 0xc2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RuntimeClass.Size())) - n48, err := m.RuntimeClass.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n48 + i-- + if m.Privileged { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -2258,41 +4085,52 @@ func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSet) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n49, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n49 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n50, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n50 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n51, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n51 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -2300,41 +4138,52 @@ func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n52, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n52 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -2342,37 +4191,46 @@ func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n53, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n53 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -2380,43 +4238,52 @@ func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) - } - if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n54, err := m.Selector.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x20 + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n54 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n55, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - i += n55 - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - return i, nil + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -2424,44 +4291,51 @@ func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x32 } } - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *ReplicationControllerDummy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -2469,17 +4343,22 @@ func (m *ReplicationControllerDummy) Marshal() (dAtA []byte, err error) { } func (m *ReplicationControllerDummy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicationControllerDummy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - return i, nil + return len(dAtA) - i, nil } func (m *RollbackConfig) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -2487,20 +4366,25 @@ func (m *RollbackConfig) Marshal() (dAtA []byte, err error) { } func (m *RollbackConfig) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollbackConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *RollingUpdateDaemonSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -2508,27 +4392,34 @@ func (m *RollingUpdateDaemonSet) Marshal() (dAtA []byte, err error) { } func (m *RollingUpdateDaemonSet) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateDaemonSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if m.MaxUnavailable != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n56, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n56 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -2536,37 +4427,46 @@ func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { } func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.MaxUnavailable != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n57, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n57 - } if m.MaxSurge != nil { + { + size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxSurge.Size())) - n58, err := m.MaxSurge.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if m.MaxUnavailable != nil { + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n58 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *RunAsGroupStrategyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -2574,33 +4474,41 @@ func (m *RunAsGroupStrategyOptions) Marshal() (dAtA []byte, err error) { } func (m *RunAsGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RunAsGroupStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i += copy(dAtA[i:], m.Rule) if len(m.Ranges) > 0 { - for _, msg := range m.Ranges { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RunAsUserStrategyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -2608,33 +4516,41 @@ func (m *RunAsUserStrategyOptions) Marshal() (dAtA []byte, err error) { } func (m *RunAsUserStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RunAsUserStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i += copy(dAtA[i:], m.Rule) if len(m.Ranges) > 0 { - for _, msg := range m.Ranges { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RuntimeClassStrategyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -2642,38 +4558,38 @@ func (m *RuntimeClassStrategyOptions) Marshal() (dAtA []byte, err error) { } func (m *RuntimeClassStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuntimeClassStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + if m.DefaultRuntimeClassName != nil { + i -= len(*m.DefaultRuntimeClassName) + copy(dAtA[i:], *m.DefaultRuntimeClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.DefaultRuntimeClassName))) + i-- + dAtA[i] = 0x12 + } if len(m.AllowedRuntimeClassNames) > 0 { - for _, s := range m.AllowedRuntimeClassNames { + for iNdEx := len(m.AllowedRuntimeClassNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AllowedRuntimeClassNames[iNdEx]) + copy(dAtA[i:], m.AllowedRuntimeClassNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedRuntimeClassNames[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - if m.DefaultRuntimeClassName != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.DefaultRuntimeClassName))) - i += copy(dAtA[i:], *m.DefaultRuntimeClassName) - } - return i, nil + return len(dAtA) - i, nil } func (m *SELinuxStrategyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -2681,31 +4597,39 @@ func (m *SELinuxStrategyOptions) Marshal() (dAtA []byte, err error) { } func (m *SELinuxStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SELinuxStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i += copy(dAtA[i:], m.Rule) if m.SELinuxOptions != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size())) - n59, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.SELinuxOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n59 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Scale) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -2713,41 +4637,52 @@ func (m *Scale) Marshal() (dAtA []byte, err error) { } func (m *Scale) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Scale) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n60, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n60 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n61, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n61 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n62, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n62 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -2755,20 +4690,25 @@ func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { } func (m *ScaleSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScaleSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -2776,46 +4716,54 @@ func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { } func (m *ScaleStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScaleStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i -= len(m.TargetSelector) + copy(dAtA[i:], m.TargetSelector) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetSelector))) + i-- + dAtA[i] = 0x1a if len(m.Selector) > 0 { keysForSelector := make([]string, 0, len(m.Selector)) for k := range m.Selector { keysForSelector = append(keysForSelector, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - for _, k := range keysForSelector { + for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.Selector[string(keysForSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- dAtA[i] = 0x12 - i++ - v := m.Selector[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + i -= len(keysForSelector[iNdEx]) + copy(dAtA[i:], keysForSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSelector[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) } } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetSelector))) - i += copy(dAtA[i:], m.TargetSelector) - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *SupplementalGroupsStrategyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -2823,39 +4771,52 @@ func (m *SupplementalGroupsStrategyOptions) Marshal() (dAtA []byte, err error) { } func (m *SupplementalGroupsStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SupplementalGroupsStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i += copy(dAtA[i:], m.Rule) if len(m.Ranges) > 0 { - for _, msg := range m.Ranges { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *AllowedCSIDriver) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -2864,6 +4825,9 @@ func (m *AllowedCSIDriver) Size() (n int) { } func (m *AllowedFlexVolume) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Driver) @@ -2872,6 +4836,9 @@ func (m *AllowedFlexVolume) Size() (n int) { } func (m *AllowedHostPath) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.PathPrefix) @@ -2881,6 +4848,9 @@ func (m *AllowedHostPath) Size() (n int) { } func (m *DaemonSet) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -2893,6 +4863,9 @@ func (m *DaemonSet) Size() (n int) { } func (m *DaemonSetCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -2909,6 +4882,9 @@ func (m *DaemonSetCondition) Size() (n int) { } func (m *DaemonSetList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -2923,6 +4899,9 @@ func (m *DaemonSetList) Size() (n int) { } func (m *DaemonSetSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Selector != nil { @@ -2942,6 +4921,9 @@ func (m *DaemonSetSpec) Size() (n int) { } func (m *DaemonSetStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.CurrentNumberScheduled)) @@ -2965,6 +4947,9 @@ func (m *DaemonSetStatus) Size() (n int) { } func (m *DaemonSetUpdateStrategy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -2977,6 +4962,9 @@ func (m *DaemonSetUpdateStrategy) Size() (n int) { } func (m *Deployment) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -2989,6 +4977,9 @@ func (m *Deployment) Size() (n int) { } func (m *DeploymentCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -3007,6 +4998,9 @@ func (m *DeploymentCondition) Size() (n int) { } func (m *DeploymentList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -3021,6 +5015,9 @@ func (m *DeploymentList) Size() (n int) { } func (m *DeploymentRollback) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -3039,6 +5036,9 @@ func (m *DeploymentRollback) Size() (n int) { } func (m *DeploymentSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Replicas != nil { @@ -3068,6 +5068,9 @@ func (m *DeploymentSpec) Size() (n int) { } func (m *DeploymentStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.ObservedGeneration)) @@ -3089,6 +5092,9 @@ func (m *DeploymentStatus) Size() (n int) { } func (m *DeploymentStrategy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -3101,6 +5107,9 @@ func (m *DeploymentStrategy) Size() (n int) { } func (m *FSGroupStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Rule) @@ -3115,6 +5124,9 @@ func (m *FSGroupStrategyOptions) Size() (n int) { } func (m *HTTPIngressPath) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Path) @@ -3125,6 +5137,9 @@ func (m *HTTPIngressPath) Size() (n int) { } func (m *HTTPIngressRuleValue) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Paths) > 0 { @@ -3137,6 +5152,9 @@ func (m *HTTPIngressRuleValue) Size() (n int) { } func (m *HostPortRange) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Min)) @@ -3145,6 +5163,9 @@ func (m *HostPortRange) Size() (n int) { } func (m *IDRange) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Min)) @@ -3153,6 +5174,9 @@ func (m *IDRange) Size() (n int) { } func (m *IPBlock) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.CIDR) @@ -3167,6 +5191,9 @@ func (m *IPBlock) Size() (n int) { } func (m *Ingress) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -3179,6 +5206,9 @@ func (m *Ingress) Size() (n int) { } func (m *IngressBackend) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.ServiceName) @@ -3189,6 +5219,9 @@ func (m *IngressBackend) Size() (n int) { } func (m *IngressList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -3203,6 +5236,9 @@ func (m *IngressList) Size() (n int) { } func (m *IngressRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Host) @@ -3213,6 +5249,9 @@ func (m *IngressRule) Size() (n int) { } func (m *IngressRuleValue) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.HTTP != nil { @@ -3223,6 +5262,9 @@ func (m *IngressRuleValue) Size() (n int) { } func (m *IngressSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Backend != nil { @@ -3245,6 +5287,9 @@ func (m *IngressSpec) Size() (n int) { } func (m *IngressStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.LoadBalancer.Size() @@ -3253,6 +5298,9 @@ func (m *IngressStatus) Size() (n int) { } func (m *IngressTLS) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Hosts) > 0 { @@ -3267,6 +5315,9 @@ func (m *IngressTLS) Size() (n int) { } func (m *NetworkPolicy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -3277,6 +5328,9 @@ func (m *NetworkPolicy) Size() (n int) { } func (m *NetworkPolicyEgressRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Ports) > 0 { @@ -3295,6 +5349,9 @@ func (m *NetworkPolicyEgressRule) Size() (n int) { } func (m *NetworkPolicyIngressRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Ports) > 0 { @@ -3313,6 +5370,9 @@ func (m *NetworkPolicyIngressRule) Size() (n int) { } func (m *NetworkPolicyList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -3327,6 +5387,9 @@ func (m *NetworkPolicyList) Size() (n int) { } func (m *NetworkPolicyPeer) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.PodSelector != nil { @@ -3345,6 +5408,9 @@ func (m *NetworkPolicyPeer) Size() (n int) { } func (m *NetworkPolicyPort) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Protocol != nil { @@ -3359,6 +5425,9 @@ func (m *NetworkPolicyPort) Size() (n int) { } func (m *NetworkPolicySpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.PodSelector.Size() @@ -3385,6 +5454,9 @@ func (m *NetworkPolicySpec) Size() (n int) { } func (m *PodSecurityPolicy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -3395,6 +5467,9 @@ func (m *PodSecurityPolicy) Size() (n int) { } func (m *PodSecurityPolicyList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -3409,6 +5484,9 @@ func (m *PodSecurityPolicyList) Size() (n int) { } func (m *PodSecurityPolicySpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 2 @@ -3508,6 +5586,9 @@ func (m *PodSecurityPolicySpec) Size() (n int) { } func (m *ReplicaSet) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -3520,6 +5601,9 @@ func (m *ReplicaSet) Size() (n int) { } func (m *ReplicaSetCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -3536,6 +5620,9 @@ func (m *ReplicaSetCondition) Size() (n int) { } func (m *ReplicaSetList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -3550,6 +5637,9 @@ func (m *ReplicaSetList) Size() (n int) { } func (m *ReplicaSetSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Replicas != nil { @@ -3566,6 +5656,9 @@ func (m *ReplicaSetSpec) Size() (n int) { } func (m *ReplicaSetStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Replicas)) @@ -3583,12 +5676,18 @@ func (m *ReplicaSetStatus) Size() (n int) { } func (m *ReplicationControllerDummy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l return n } func (m *RollbackConfig) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Revision)) @@ -3596,6 +5695,9 @@ func (m *RollbackConfig) Size() (n int) { } func (m *RollingUpdateDaemonSet) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.MaxUnavailable != nil { @@ -3606,6 +5708,9 @@ func (m *RollingUpdateDaemonSet) Size() (n int) { } func (m *RollingUpdateDeployment) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.MaxUnavailable != nil { @@ -3620,6 +5725,9 @@ func (m *RollingUpdateDeployment) Size() (n int) { } func (m *RunAsGroupStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Rule) @@ -3634,6 +5742,9 @@ func (m *RunAsGroupStrategyOptions) Size() (n int) { } func (m *RunAsUserStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Rule) @@ -3648,6 +5759,9 @@ func (m *RunAsUserStrategyOptions) Size() (n int) { } func (m *RuntimeClassStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.AllowedRuntimeClassNames) > 0 { @@ -3664,6 +5778,9 @@ func (m *RuntimeClassStrategyOptions) Size() (n int) { } func (m *SELinuxStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Rule) @@ -3676,6 +5793,9 @@ func (m *SELinuxStrategyOptions) Size() (n int) { } func (m *Scale) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -3688,6 +5808,9 @@ func (m *Scale) Size() (n int) { } func (m *ScaleSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Replicas)) @@ -3695,6 +5818,9 @@ func (m *ScaleSpec) Size() (n int) { } func (m *ScaleStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Replicas)) @@ -3712,6 +5838,9 @@ func (m *ScaleStatus) Size() (n int) { } func (m *SupplementalGroupsStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Rule) @@ -3726,14 +5855,7 @@ func (m *SupplementalGroupsStrategyOptions) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -3774,7 +5896,7 @@ func (this *DaemonSet) String() string { return "nil" } s := strings.Join([]string{`&DaemonSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DaemonSetSpec", "DaemonSetSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DaemonSetStatus", "DaemonSetStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -3788,7 +5910,7 @@ func (this *DaemonSetCondition) String() string { s := strings.Join([]string{`&DaemonSetCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -3799,9 +5921,14 @@ func (this *DaemonSetList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]DaemonSet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&DaemonSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -3811,8 +5938,8 @@ func (this *DaemonSetSpec) String() string { return "nil" } s := strings.Join([]string{`&DaemonSetSpec{`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "DaemonSetUpdateStrategy", "DaemonSetUpdateStrategy", 1), `&`, ``, 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `TemplateGeneration:` + fmt.Sprintf("%v", this.TemplateGeneration) + `,`, @@ -3825,6 +5952,11 @@ func (this *DaemonSetStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]DaemonSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&DaemonSetStatus{`, `CurrentNumberScheduled:` + fmt.Sprintf("%v", this.CurrentNumberScheduled) + `,`, `NumberMisscheduled:` + fmt.Sprintf("%v", this.NumberMisscheduled) + `,`, @@ -3835,7 +5967,7 @@ func (this *DaemonSetStatus) String() string { `NumberAvailable:` + fmt.Sprintf("%v", this.NumberAvailable) + `,`, `NumberUnavailable:` + fmt.Sprintf("%v", this.NumberUnavailable) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `}`, }, "") return s @@ -3846,7 +5978,7 @@ func (this *DaemonSetUpdateStrategy) String() string { } s := strings.Join([]string{`&DaemonSetUpdateStrategy{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`, `}`, }, "") return s @@ -3856,7 +5988,7 @@ func (this *Deployment) String() string { return "nil" } s := strings.Join([]string{`&Deployment{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentSpec", "DeploymentSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentStatus", "DeploymentStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -3872,8 +6004,8 @@ func (this *DeploymentCondition) String() string { `Status:` + fmt.Sprintf("%v", this.Status) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `LastUpdateTime:` + strings.Replace(strings.Replace(this.LastUpdateTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -3882,9 +6014,14 @@ func (this *DeploymentList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]Deployment{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Deployment", "Deployment", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&DeploymentList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Deployment", "Deployment", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -3917,13 +6054,13 @@ func (this *DeploymentSpec) String() string { } s := strings.Join([]string{`&DeploymentSpec{`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`, - `RollbackTo:` + strings.Replace(fmt.Sprintf("%v", this.RollbackTo), "RollbackConfig", "RollbackConfig", 1) + `,`, + `RollbackTo:` + strings.Replace(this.RollbackTo.String(), "RollbackConfig", "RollbackConfig", 1) + `,`, `ProgressDeadlineSeconds:` + valueToStringGenerated(this.ProgressDeadlineSeconds) + `,`, `}`, }, "") @@ -3933,13 +6070,18 @@ func (this *DeploymentStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]DeploymentCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&DeploymentStatus{`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, `}`, @@ -3952,7 +6094,7 @@ func (this *DeploymentStrategy) String() string { } s := strings.Join([]string{`&DeploymentStrategy{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, `}`, }, "") return s @@ -3961,9 +6103,14 @@ func (this *FSGroupStrategyOptions) String() string { if this == nil { return "nil" } + repeatedStringForRanges := "[]IDRange{" + for _, f := range this.Ranges { + repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," + } + repeatedStringForRanges += "}" s := strings.Join([]string{`&FSGroupStrategyOptions{`, `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, + `Ranges:` + repeatedStringForRanges + `,`, `}`, }, "") return s @@ -3983,8 +6130,13 @@ func (this *HTTPIngressRuleValue) String() string { if this == nil { return "nil" } + repeatedStringForPaths := "[]HTTPIngressPath{" + for _, f := range this.Paths { + repeatedStringForPaths += strings.Replace(strings.Replace(f.String(), "HTTPIngressPath", "HTTPIngressPath", 1), `&`, ``, 1) + "," + } + repeatedStringForPaths += "}" s := strings.Join([]string{`&HTTPIngressRuleValue{`, - `Paths:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Paths), "HTTPIngressPath", "HTTPIngressPath", 1), `&`, ``, 1) + `,`, + `Paths:` + repeatedStringForPaths + `,`, `}`, }, "") return s @@ -4027,7 +6179,7 @@ func (this *Ingress) String() string { return "nil" } s := strings.Join([]string{`&Ingress{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IngressSpec", "IngressSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "IngressStatus", "IngressStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -4040,7 +6192,7 @@ func (this *IngressBackend) String() string { } s := strings.Join([]string{`&IngressBackend{`, `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, - `ServicePort:` + strings.Replace(strings.Replace(this.ServicePort.String(), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `ServicePort:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ServicePort), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -4049,9 +6201,14 @@ func (this *IngressList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]Ingress{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Ingress", "Ingress", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&IngressList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Ingress", "Ingress", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -4072,7 +6229,7 @@ func (this *IngressRuleValue) String() string { return "nil" } s := strings.Join([]string{`&IngressRuleValue{`, - `HTTP:` + strings.Replace(fmt.Sprintf("%v", this.HTTP), "HTTPIngressRuleValue", "HTTPIngressRuleValue", 1) + `,`, + `HTTP:` + strings.Replace(this.HTTP.String(), "HTTPIngressRuleValue", "HTTPIngressRuleValue", 1) + `,`, `}`, }, "") return s @@ -4081,10 +6238,20 @@ func (this *IngressSpec) String() string { if this == nil { return "nil" } + repeatedStringForTLS := "[]IngressTLS{" + for _, f := range this.TLS { + repeatedStringForTLS += strings.Replace(strings.Replace(f.String(), "IngressTLS", "IngressTLS", 1), `&`, ``, 1) + "," + } + repeatedStringForTLS += "}" + repeatedStringForRules := "[]IngressRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "IngressRule", "IngressRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" s := strings.Join([]string{`&IngressSpec{`, - `Backend:` + strings.Replace(fmt.Sprintf("%v", this.Backend), "IngressBackend", "IngressBackend", 1) + `,`, - `TLS:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TLS), "IngressTLS", "IngressTLS", 1), `&`, ``, 1) + `,`, - `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "IngressRule", "IngressRule", 1), `&`, ``, 1) + `,`, + `Backend:` + strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1) + `,`, + `TLS:` + repeatedStringForTLS + `,`, + `Rules:` + repeatedStringForRules + `,`, `}`, }, "") return s @@ -4094,7 +6261,7 @@ func (this *IngressStatus) String() string { return "nil" } s := strings.Join([]string{`&IngressStatus{`, - `LoadBalancer:` + strings.Replace(strings.Replace(this.LoadBalancer.String(), "LoadBalancerStatus", "k8s_io_api_core_v1.LoadBalancerStatus", 1), `&`, ``, 1) + `,`, + `LoadBalancer:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LoadBalancer), "LoadBalancerStatus", "v11.LoadBalancerStatus", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -4115,7 +6282,7 @@ func (this *NetworkPolicy) String() string { return "nil" } s := strings.Join([]string{`&NetworkPolicy{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NetworkPolicySpec", "NetworkPolicySpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -4125,9 +6292,19 @@ func (this *NetworkPolicyEgressRule) String() string { if this == nil { return "nil" } + repeatedStringForPorts := "[]NetworkPolicyPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + repeatedStringForTo := "[]NetworkPolicyPeer{" + for _, f := range this.To { + repeatedStringForTo += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + "," + } + repeatedStringForTo += "}" s := strings.Join([]string{`&NetworkPolicyEgressRule{`, - `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + `,`, - `To:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.To), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + `,`, + `Ports:` + repeatedStringForPorts + `,`, + `To:` + repeatedStringForTo + `,`, `}`, }, "") return s @@ -4136,9 +6313,19 @@ func (this *NetworkPolicyIngressRule) String() string { if this == nil { return "nil" } + repeatedStringForPorts := "[]NetworkPolicyPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + repeatedStringForFrom := "[]NetworkPolicyPeer{" + for _, f := range this.From { + repeatedStringForFrom += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + "," + } + repeatedStringForFrom += "}" s := strings.Join([]string{`&NetworkPolicyIngressRule{`, - `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + `,`, - `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + `,`, + `Ports:` + repeatedStringForPorts + `,`, + `From:` + repeatedStringForFrom + `,`, `}`, }, "") return s @@ -4147,9 +6334,14 @@ func (this *NetworkPolicyList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]NetworkPolicy{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "NetworkPolicy", "NetworkPolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&NetworkPolicyList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "NetworkPolicy", "NetworkPolicy", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -4159,9 +6351,9 @@ func (this *NetworkPolicyPeer) String() string { return "nil" } s := strings.Join([]string{`&NetworkPolicyPeer{`, - `PodSelector:` + strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `IPBlock:` + strings.Replace(fmt.Sprintf("%v", this.IPBlock), "IPBlock", "IPBlock", 1) + `,`, + `PodSelector:` + strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `IPBlock:` + strings.Replace(this.IPBlock.String(), "IPBlock", "IPBlock", 1) + `,`, `}`, }, "") return s @@ -4172,7 +6364,7 @@ func (this *NetworkPolicyPort) String() string { } s := strings.Join([]string{`&NetworkPolicyPort{`, `Protocol:` + valueToStringGenerated(this.Protocol) + `,`, - `Port:` + strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `Port:` + strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "intstr.IntOrString", 1) + `,`, `}`, }, "") return s @@ -4181,10 +6373,20 @@ func (this *NetworkPolicySpec) String() string { if this == nil { return "nil" } + repeatedStringForIngress := "[]NetworkPolicyIngressRule{" + for _, f := range this.Ingress { + repeatedStringForIngress += strings.Replace(strings.Replace(f.String(), "NetworkPolicyIngressRule", "NetworkPolicyIngressRule", 1), `&`, ``, 1) + "," + } + repeatedStringForIngress += "}" + repeatedStringForEgress := "[]NetworkPolicyEgressRule{" + for _, f := range this.Egress { + repeatedStringForEgress += strings.Replace(strings.Replace(f.String(), "NetworkPolicyEgressRule", "NetworkPolicyEgressRule", 1), `&`, ``, 1) + "," + } + repeatedStringForEgress += "}" s := strings.Join([]string{`&NetworkPolicySpec{`, - `PodSelector:` + strings.Replace(strings.Replace(this.PodSelector.String(), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, - `Ingress:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ingress), "NetworkPolicyIngressRule", "NetworkPolicyIngressRule", 1), `&`, ``, 1) + `,`, - `Egress:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Egress), "NetworkPolicyEgressRule", "NetworkPolicyEgressRule", 1), `&`, ``, 1) + `,`, + `PodSelector:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `Ingress:` + repeatedStringForIngress + `,`, + `Egress:` + repeatedStringForEgress + `,`, `PolicyTypes:` + fmt.Sprintf("%v", this.PolicyTypes) + `,`, `}`, }, "") @@ -4195,7 +6397,7 @@ func (this *PodSecurityPolicy) String() string { return "nil" } s := strings.Join([]string{`&PodSecurityPolicy{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSecurityPolicySpec", "PodSecurityPolicySpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -4205,9 +6407,14 @@ func (this *PodSecurityPolicyList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]PodSecurityPolicy{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodSecurityPolicy", "PodSecurityPolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&PodSecurityPolicyList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PodSecurityPolicy", "PodSecurityPolicy", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -4216,6 +6423,26 @@ func (this *PodSecurityPolicySpec) String() string { if this == nil { return "nil" } + repeatedStringForHostPorts := "[]HostPortRange{" + for _, f := range this.HostPorts { + repeatedStringForHostPorts += strings.Replace(strings.Replace(f.String(), "HostPortRange", "HostPortRange", 1), `&`, ``, 1) + "," + } + repeatedStringForHostPorts += "}" + repeatedStringForAllowedHostPaths := "[]AllowedHostPath{" + for _, f := range this.AllowedHostPaths { + repeatedStringForAllowedHostPaths += strings.Replace(strings.Replace(f.String(), "AllowedHostPath", "AllowedHostPath", 1), `&`, ``, 1) + "," + } + repeatedStringForAllowedHostPaths += "}" + repeatedStringForAllowedFlexVolumes := "[]AllowedFlexVolume{" + for _, f := range this.AllowedFlexVolumes { + repeatedStringForAllowedFlexVolumes += strings.Replace(strings.Replace(f.String(), "AllowedFlexVolume", "AllowedFlexVolume", 1), `&`, ``, 1) + "," + } + repeatedStringForAllowedFlexVolumes += "}" + repeatedStringForAllowedCSIDrivers := "[]AllowedCSIDriver{" + for _, f := range this.AllowedCSIDrivers { + repeatedStringForAllowedCSIDrivers += strings.Replace(strings.Replace(f.String(), "AllowedCSIDriver", "AllowedCSIDriver", 1), `&`, ``, 1) + "," + } + repeatedStringForAllowedCSIDrivers += "}" s := strings.Join([]string{`&PodSecurityPolicySpec{`, `Privileged:` + fmt.Sprintf("%v", this.Privileged) + `,`, `DefaultAddCapabilities:` + fmt.Sprintf("%v", this.DefaultAddCapabilities) + `,`, @@ -4223,7 +6450,7 @@ func (this *PodSecurityPolicySpec) String() string { `AllowedCapabilities:` + fmt.Sprintf("%v", this.AllowedCapabilities) + `,`, `Volumes:` + fmt.Sprintf("%v", this.Volumes) + `,`, `HostNetwork:` + fmt.Sprintf("%v", this.HostNetwork) + `,`, - `HostPorts:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.HostPorts), "HostPortRange", "HostPortRange", 1), `&`, ``, 1) + `,`, + `HostPorts:` + repeatedStringForHostPorts + `,`, `HostPID:` + fmt.Sprintf("%v", this.HostPID) + `,`, `HostIPC:` + fmt.Sprintf("%v", this.HostIPC) + `,`, `SELinux:` + strings.Replace(strings.Replace(this.SELinux.String(), "SELinuxStrategyOptions", "SELinuxStrategyOptions", 1), `&`, ``, 1) + `,`, @@ -4233,14 +6460,14 @@ func (this *PodSecurityPolicySpec) String() string { `ReadOnlyRootFilesystem:` + fmt.Sprintf("%v", this.ReadOnlyRootFilesystem) + `,`, `DefaultAllowPrivilegeEscalation:` + valueToStringGenerated(this.DefaultAllowPrivilegeEscalation) + `,`, `AllowPrivilegeEscalation:` + valueToStringGenerated(this.AllowPrivilegeEscalation) + `,`, - `AllowedHostPaths:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedHostPaths), "AllowedHostPath", "AllowedHostPath", 1), `&`, ``, 1) + `,`, - `AllowedFlexVolumes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedFlexVolumes), "AllowedFlexVolume", "AllowedFlexVolume", 1), `&`, ``, 1) + `,`, + `AllowedHostPaths:` + repeatedStringForAllowedHostPaths + `,`, + `AllowedFlexVolumes:` + repeatedStringForAllowedFlexVolumes + `,`, `AllowedUnsafeSysctls:` + fmt.Sprintf("%v", this.AllowedUnsafeSysctls) + `,`, `ForbiddenSysctls:` + fmt.Sprintf("%v", this.ForbiddenSysctls) + `,`, `AllowedProcMountTypes:` + fmt.Sprintf("%v", this.AllowedProcMountTypes) + `,`, - `RunAsGroup:` + strings.Replace(fmt.Sprintf("%v", this.RunAsGroup), "RunAsGroupStrategyOptions", "RunAsGroupStrategyOptions", 1) + `,`, - `AllowedCSIDrivers:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedCSIDrivers), "AllowedCSIDriver", "AllowedCSIDriver", 1), `&`, ``, 1) + `,`, - `RuntimeClass:` + strings.Replace(fmt.Sprintf("%v", this.RuntimeClass), "RuntimeClassStrategyOptions", "RuntimeClassStrategyOptions", 1) + `,`, + `RunAsGroup:` + strings.Replace(this.RunAsGroup.String(), "RunAsGroupStrategyOptions", "RunAsGroupStrategyOptions", 1) + `,`, + `AllowedCSIDrivers:` + repeatedStringForAllowedCSIDrivers + `,`, + `RuntimeClass:` + strings.Replace(this.RuntimeClass.String(), "RuntimeClassStrategyOptions", "RuntimeClassStrategyOptions", 1) + `,`, `}`, }, "") return s @@ -4250,7 +6477,7 @@ func (this *ReplicaSet) String() string { return "nil" } s := strings.Join([]string{`&ReplicaSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicaSetSpec", "ReplicaSetSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicaSetStatus", "ReplicaSetStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -4264,7 +6491,7 @@ func (this *ReplicaSetCondition) String() string { s := strings.Join([]string{`&ReplicaSetCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -4275,9 +6502,14 @@ func (this *ReplicaSetList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]ReplicaSet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&ReplicaSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -4288,8 +6520,8 @@ func (this *ReplicaSetSpec) String() string { } s := strings.Join([]string{`&ReplicaSetSpec{`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `}`, }, "") @@ -4299,13 +6531,18 @@ func (this *ReplicaSetStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]ReplicaSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&ReplicaSetStatus{`, `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `}`, }, "") return s @@ -4334,7 +6571,7 @@ func (this *RollingUpdateDaemonSet) String() string { return "nil" } s := strings.Join([]string{`&RollingUpdateDaemonSet{`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, `}`, }, "") return s @@ -4344,8 +6581,8 @@ func (this *RollingUpdateDeployment) String() string { return "nil" } s := strings.Join([]string{`&RollingUpdateDeployment{`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, - `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`, `}`, }, "") return s @@ -4354,9 +6591,14 @@ func (this *RunAsGroupStrategyOptions) String() string { if this == nil { return "nil" } + repeatedStringForRanges := "[]IDRange{" + for _, f := range this.Ranges { + repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," + } + repeatedStringForRanges += "}" s := strings.Join([]string{`&RunAsGroupStrategyOptions{`, `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, + `Ranges:` + repeatedStringForRanges + `,`, `}`, }, "") return s @@ -4365,9 +6607,14 @@ func (this *RunAsUserStrategyOptions) String() string { if this == nil { return "nil" } + repeatedStringForRanges := "[]IDRange{" + for _, f := range this.Ranges { + repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," + } + repeatedStringForRanges += "}" s := strings.Join([]string{`&RunAsUserStrategyOptions{`, `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, + `Ranges:` + repeatedStringForRanges + `,`, `}`, }, "") return s @@ -4389,7 +6636,7 @@ func (this *SELinuxStrategyOptions) String() string { } s := strings.Join([]string{`&SELinuxStrategyOptions{`, `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "k8s_io_api_core_v1.SELinuxOptions", 1) + `,`, + `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "v11.SELinuxOptions", 1) + `,`, `}`, }, "") return s @@ -4399,7 +6646,7 @@ func (this *Scale) String() string { return "nil" } s := strings.Join([]string{`&Scale{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ScaleSpec", "ScaleSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ScaleStatus", "ScaleStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -4442,9 +6689,14 @@ func (this *SupplementalGroupsStrategyOptions) String() string { if this == nil { return "nil" } + repeatedStringForRanges := "[]IDRange{" + for _, f := range this.Ranges { + repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," + } + repeatedStringForRanges += "}" s := strings.Join([]string{`&SupplementalGroupsStrategyOptions{`, `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, + `Ranges:` + repeatedStringForRanges + `,`, `}`, }, "") return s @@ -4472,7 +6724,7 @@ func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4500,7 +6752,7 @@ func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4510,6 +6762,9 @@ func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4524,6 +6779,9 @@ func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4551,7 +6809,7 @@ func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4579,7 +6837,7 @@ func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4589,6 +6847,9 @@ func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4603,6 +6864,9 @@ func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4630,7 +6894,7 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4658,7 +6922,7 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4668,6 +6932,9 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4687,7 +6954,7 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4702,6 +6969,9 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4729,7 +6999,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4757,7 +7027,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4766,6 +7036,9 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4787,7 +7060,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4796,6 +7069,9 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4817,7 +7093,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4826,6 +7102,9 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4842,6 +7121,9 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4869,7 +7151,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4897,7 +7179,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4907,6 +7189,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4926,7 +7211,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4936,6 +7221,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4955,7 +7243,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4964,6 +7252,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4985,7 +7276,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4995,6 +7286,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5014,7 +7308,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5024,6 +7318,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5038,6 +7335,9 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5065,7 +7365,7 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5093,7 +7393,7 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5102,6 +7402,9 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5123,7 +7426,7 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5132,6 +7435,9 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5149,6 +7455,9 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5176,7 +7485,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5204,7 +7513,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5213,11 +7522,14 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -5237,7 +7549,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5246,6 +7558,9 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5267,7 +7582,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5276,6 +7591,9 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5297,7 +7615,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift + m.MinReadySeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5316,7 +7634,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.TemplateGeneration |= (int64(b) & 0x7F) << shift + m.TemplateGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -5335,7 +7653,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5350,6 +7668,9 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5377,7 +7698,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5405,7 +7726,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CurrentNumberScheduled |= (int32(b) & 0x7F) << shift + m.CurrentNumberScheduled |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5424,7 +7745,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberMisscheduled |= (int32(b) & 0x7F) << shift + m.NumberMisscheduled |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5443,7 +7764,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.DesiredNumberScheduled |= (int32(b) & 0x7F) << shift + m.DesiredNumberScheduled |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5462,7 +7783,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberReady |= (int32(b) & 0x7F) << shift + m.NumberReady |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5481,7 +7802,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -5500,7 +7821,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedNumberScheduled |= (int32(b) & 0x7F) << shift + m.UpdatedNumberScheduled |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5519,7 +7840,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberAvailable |= (int32(b) & 0x7F) << shift + m.NumberAvailable |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5538,7 +7859,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberUnavailable |= (int32(b) & 0x7F) << shift + m.NumberUnavailable |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5557,7 +7878,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -5577,7 +7898,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5586,6 +7907,9 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5603,6 +7927,9 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5630,7 +7957,7 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5658,7 +7985,7 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5668,6 +7995,9 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5687,7 +8017,7 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5696,6 +8026,9 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5715,6 +8048,9 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5742,7 +8078,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5770,7 +8106,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5779,6 +8115,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5800,7 +8139,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5809,6 +8148,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5830,7 +8172,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5839,6 +8181,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5855,6 +8200,9 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5882,7 +8230,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5910,7 +8258,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5920,6 +8268,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5939,7 +8290,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5949,6 +8300,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5968,7 +8322,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5978,6 +8332,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5997,7 +8354,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6007,6 +8364,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6026,7 +8386,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6035,6 +8395,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6056,7 +8419,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6065,6 +8428,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6081,6 +8447,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6108,7 +8477,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6136,7 +8505,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6145,6 +8514,9 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6166,7 +8538,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6175,6 +8547,9 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6192,6 +8567,9 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6219,7 +8597,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6247,7 +8625,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6257,6 +8635,9 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6276,7 +8657,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6285,6 +8666,9 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6305,7 +8689,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6322,7 +8706,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6332,6 +8716,9 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -6348,7 +8735,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6358,6 +8745,9 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -6394,7 +8784,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6403,6 +8793,9 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6419,6 +8812,9 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6446,7 +8842,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6474,7 +8870,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6494,7 +8890,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6503,11 +8899,14 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -6527,7 +8926,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6536,6 +8935,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6557,7 +8959,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6566,6 +8968,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6587,7 +8992,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift + m.MinReadySeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6606,7 +9011,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6626,7 +9031,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6646,7 +9051,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6655,6 +9060,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6679,7 +9087,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6694,6 +9102,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6721,7 +9132,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6749,7 +9160,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -6768,7 +9179,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6787,7 +9198,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedReplicas |= (int32(b) & 0x7F) << shift + m.UpdatedReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6806,7 +9217,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AvailableReplicas |= (int32(b) & 0x7F) << shift + m.AvailableReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6825,7 +9236,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UnavailableReplicas |= (int32(b) & 0x7F) << shift + m.UnavailableReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6844,7 +9255,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6853,6 +9264,9 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6875,7 +9289,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift + m.ReadyReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6894,7 +9308,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -6909,6 +9323,9 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6936,7 +9353,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6964,7 +9381,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6974,6 +9391,9 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6993,7 +9413,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7002,6 +9422,9 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7021,6 +9444,9 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7048,7 +9474,7 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7076,7 +9502,7 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7086,6 +9512,9 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7105,7 +9534,7 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7114,6 +9543,9 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7131,6 +9563,9 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7158,7 +9593,7 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7186,7 +9621,7 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7196,6 +9631,9 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7215,7 +9653,7 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7224,6 +9662,9 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7240,6 +9681,9 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7267,7 +9711,7 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7295,7 +9739,7 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7304,6 +9748,9 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7321,6 +9768,9 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7348,7 +9798,7 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7376,7 +9826,7 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Min |= (int32(b) & 0x7F) << shift + m.Min |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -7395,7 +9845,7 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Max |= (int32(b) & 0x7F) << shift + m.Max |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -7409,6 +9859,9 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7436,7 +9889,7 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7464,7 +9917,7 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Min |= (int64(b) & 0x7F) << shift + m.Min |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -7483,7 +9936,7 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Max |= (int64(b) & 0x7F) << shift + m.Max |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -7497,6 +9950,9 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7524,7 +9980,7 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7552,7 +10008,7 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7562,6 +10018,9 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7581,7 +10040,7 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7591,6 +10050,9 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7605,6 +10067,9 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7632,7 +10097,7 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7660,7 +10125,7 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7669,6 +10134,9 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7690,7 +10158,7 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7699,6 +10167,9 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7720,7 +10191,7 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7729,6 +10200,9 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7745,6 +10219,9 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7772,7 +10249,7 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7800,7 +10277,7 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7810,6 +10287,9 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7829,7 +10309,7 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7838,6 +10318,9 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7854,6 +10337,9 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7881,7 +10367,7 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7909,7 +10395,7 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7918,6 +10404,9 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7939,7 +10428,7 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7948,6 +10437,9 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7965,6 +10457,9 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7992,7 +10487,7 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8020,7 +10515,7 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8030,6 +10525,9 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8049,7 +10547,7 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8058,6 +10556,9 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8074,6 +10575,9 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8101,7 +10605,7 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8129,7 +10633,7 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8138,6 +10642,9 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8157,6 +10664,9 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8184,7 +10694,7 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8212,7 +10722,7 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8221,6 +10731,9 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8245,7 +10758,7 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8254,6 +10767,9 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8276,7 +10792,7 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8285,6 +10801,9 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8302,6 +10821,9 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8329,7 +10851,7 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8357,7 +10879,7 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8366,6 +10888,9 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8382,6 +10907,9 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8409,7 +10937,7 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8437,7 +10965,7 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8447,6 +10975,9 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8466,7 +10997,7 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8476,6 +11007,9 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8490,6 +11024,9 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8517,7 +11054,7 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8545,7 +11082,7 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8554,6 +11091,9 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8575,7 +11115,7 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8584,6 +11124,9 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8600,6 +11143,9 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8627,7 +11173,7 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8655,7 +11201,7 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8664,6 +11210,9 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8686,7 +11235,7 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8695,6 +11244,9 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8712,6 +11264,9 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8739,7 +11294,7 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8767,7 +11322,7 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8776,6 +11331,9 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8798,7 +11356,7 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8807,6 +11365,9 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8824,6 +11385,9 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8851,7 +11415,7 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8879,7 +11443,7 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8888,6 +11452,9 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8909,7 +11476,7 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8918,6 +11485,9 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8935,6 +11505,9 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8962,7 +11535,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8990,7 +11563,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8999,11 +11572,14 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.PodSelector == nil { - m.PodSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.PodSelector = &v1.LabelSelector{} } if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -9023,7 +11599,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9032,11 +11608,14 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.NamespaceSelector == nil { - m.NamespaceSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.NamespaceSelector = &v1.LabelSelector{} } if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -9056,7 +11635,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9065,6 +11644,9 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9084,6 +11666,9 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -9111,7 +11696,7 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -9139,7 +11724,7 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -9149,6 +11734,9 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9169,7 +11757,7 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9178,11 +11766,14 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Port == nil { - m.Port = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.Port = &intstr.IntOrString{} } if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -9197,6 +11788,9 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -9224,7 +11818,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -9252,7 +11846,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9261,6 +11855,9 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9282,7 +11879,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9291,6 +11888,9 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9313,7 +11913,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9322,6 +11922,9 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9344,7 +11947,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -9354,6 +11957,9 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9368,6 +11974,9 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -9395,7 +12004,7 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -9423,7 +12032,7 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9432,6 +12041,9 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9453,7 +12065,7 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9462,6 +12074,9 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9478,6 +12093,9 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -9505,7 +12123,7 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -9533,7 +12151,7 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9542,6 +12160,9 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9563,7 +12184,7 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9572,6 +12193,9 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9589,6 +12213,9 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -9616,7 +12243,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -9644,7 +12271,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9664,7 +12291,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -9674,6 +12301,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9693,7 +12323,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -9703,6 +12333,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9722,7 +12355,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -9732,6 +12365,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9751,7 +12387,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -9761,6 +12397,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9780,7 +12419,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9800,7 +12439,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9809,6 +12448,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9831,7 +12473,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9851,7 +12493,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9871,7 +12513,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9880,6 +12522,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9901,7 +12546,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9910,6 +12555,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9931,7 +12579,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9940,6 +12588,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9961,7 +12612,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9970,6 +12621,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9991,7 +12645,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10011,7 +12665,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10032,7 +12686,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10053,7 +12707,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10062,6 +12716,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10084,7 +12741,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10093,6 +12750,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10115,7 +12775,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -10125,6 +12785,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10144,7 +12807,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -10154,6 +12817,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10173,7 +12839,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -10183,6 +12849,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10202,7 +12871,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10211,6 +12880,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10235,7 +12907,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10244,6 +12916,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10266,7 +12941,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10275,6 +12950,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10294,6 +12972,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -10321,7 +13002,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -10349,7 +13030,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10358,6 +13039,9 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10379,7 +13063,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10388,6 +13072,9 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10409,7 +13096,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10418,6 +13105,9 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10434,6 +13124,9 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -10461,7 +13154,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -10489,7 +13182,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -10499,6 +13192,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10518,7 +13214,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -10528,6 +13224,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10547,7 +13246,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10556,6 +13255,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10577,7 +13279,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -10587,6 +13289,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10606,7 +13311,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -10616,6 +13321,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10630,6 +13338,9 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -10657,7 +13368,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -10685,7 +13396,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10694,6 +13405,9 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10715,7 +13429,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10724,6 +13438,9 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10741,6 +13458,9 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -10768,7 +13488,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -10796,7 +13516,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -10816,7 +13536,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10825,11 +13545,14 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -10849,7 +13572,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -10858,6 +13581,9 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10879,7 +13605,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift + m.MinReadySeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -10893,6 +13619,9 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -10920,7 +13649,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -10948,7 +13677,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -10967,7 +13696,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.FullyLabeledReplicas |= (int32(b) & 0x7F) << shift + m.FullyLabeledReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -10986,7 +13715,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -11005,7 +13734,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift + m.ReadyReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -11024,7 +13753,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AvailableReplicas |= (int32(b) & 0x7F) << shift + m.AvailableReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -11043,7 +13772,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -11052,6 +13781,9 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11069,6 +13801,9 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -11096,7 +13831,7 @@ func (m *ReplicationControllerDummy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -11119,6 +13854,9 @@ func (m *ReplicationControllerDummy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -11146,7 +13884,7 @@ func (m *RollbackConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -11174,7 +13912,7 @@ func (m *RollbackConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Revision |= (int64(b) & 0x7F) << shift + m.Revision |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -11188,6 +13926,9 @@ func (m *RollbackConfig) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -11215,7 +13956,7 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -11243,7 +13984,7 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -11252,11 +13993,14 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxUnavailable == nil { - m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.MaxUnavailable = &intstr.IntOrString{} } if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -11271,6 +14015,9 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -11298,7 +14045,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -11326,7 +14073,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -11335,11 +14082,14 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxUnavailable == nil { - m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.MaxUnavailable = &intstr.IntOrString{} } if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -11359,7 +14109,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -11368,11 +14118,14 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxSurge == nil { - m.MaxSurge = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.MaxSurge = &intstr.IntOrString{} } if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -11387,6 +14140,9 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -11414,7 +14170,7 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -11442,7 +14198,7 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -11452,6 +14208,9 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11471,7 +14230,7 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -11480,6 +14239,9 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11497,6 +14259,9 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -11524,7 +14289,7 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -11552,7 +14317,7 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -11562,6 +14327,9 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11581,7 +14349,7 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -11590,6 +14358,9 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11607,6 +14378,9 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -11634,7 +14408,7 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -11662,7 +14436,7 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -11672,6 +14446,9 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11691,7 +14468,7 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -11701,6 +14478,9 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11716,6 +14496,9 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -11743,7 +14526,7 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -11771,7 +14554,7 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -11781,6 +14564,9 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11800,7 +14586,7 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -11809,11 +14595,14 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.SELinuxOptions == nil { - m.SELinuxOptions = &k8s_io_api_core_v1.SELinuxOptions{} + m.SELinuxOptions = &v11.SELinuxOptions{} } if err := m.SELinuxOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -11828,6 +14617,9 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -11855,7 +14647,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -11883,7 +14675,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -11892,6 +14684,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11913,7 +14708,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -11922,6 +14717,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11943,7 +14741,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -11952,6 +14750,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11968,6 +14769,9 @@ func (m *Scale) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -11995,7 +14799,7 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -12023,7 +14827,7 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -12037,6 +14841,9 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -12064,7 +14871,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -12092,7 +14899,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -12111,7 +14918,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -12120,6 +14927,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -12140,7 +14950,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -12157,7 +14967,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -12167,6 +14977,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -12183,7 +14996,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -12193,6 +15006,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -12229,7 +15045,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -12239,6 +15055,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -12253,6 +15072,9 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -12280,7 +15102,7 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -12308,7 +15130,7 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -12318,6 +15140,9 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -12337,7 +15162,7 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -12346,6 +15171,9 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -12363,6 +15191,9 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -12429,10 +15260,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -12461,6 +15295,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -12479,242 +15316,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/extensions/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 3695 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0x4f, 0x6c, 0x1b, 0x47, - 0x77, 0xf7, 0x92, 0x94, 0x48, 0x3d, 0xfd, 0x1f, 0xc9, 0x12, 0x3f, 0x3b, 0x16, 0xfd, 0x6d, 0x00, - 0xd7, 0x49, 0x6d, 0x32, 0x76, 0x6c, 0x7f, 0xae, 0x8d, 0x26, 0x11, 0x25, 0xcb, 0x56, 0xaa, 0x3f, - 0xcc, 0x50, 0x72, 0x83, 0xa0, 0x49, 0xb3, 0x22, 0x47, 0xd4, 0x5a, 0xcb, 0xdd, 0xcd, 0xce, 0x52, - 0x11, 0x81, 0x1e, 0x7a, 0x28, 0x0a, 0x14, 0x68, 0xd1, 0x5e, 0xd2, 0xf6, 0xd8, 0xa0, 0x40, 0x4f, - 0x2d, 0xda, 0x5b, 0x7b, 0x08, 0x02, 0x14, 0x48, 0x01, 0xa3, 0x48, 0x8b, 0xdc, 0x9a, 0x93, 0xd0, - 0x28, 0xa7, 0xa2, 0xa7, 0xde, 0x0a, 0x1f, 0x8a, 0x62, 0x66, 0x67, 0xff, 0xef, 0x8a, 0x2b, 0xc5, - 0x16, 0x8a, 0xe2, 0xbb, 0x89, 0xf3, 0xde, 0xfb, 0xbd, 0x37, 0x33, 0x6f, 0xde, 0x7b, 0x33, 0xfb, - 0x04, 0x2b, 0xfb, 0xf7, 0x69, 0x55, 0x35, 0x6a, 0xfb, 0xbd, 0x1d, 0x62, 0xe9, 0xc4, 0x26, 0xb4, - 0x76, 0x40, 0xf4, 0xb6, 0x61, 0xd5, 0x04, 0x41, 0x31, 0xd5, 0x1a, 0x39, 0xb4, 0x89, 0x4e, 0x55, - 0x43, 0xa7, 0xb5, 0x83, 0x5b, 0x3b, 0xc4, 0x56, 0x6e, 0xd5, 0x3a, 0x44, 0x27, 0x96, 0x62, 0x93, - 0x76, 0xd5, 0xb4, 0x0c, 0xdb, 0x40, 0x57, 0x1c, 0xf6, 0xaa, 0x62, 0xaa, 0x55, 0x9f, 0xbd, 0x2a, - 0xd8, 0x2f, 0xdd, 0xec, 0xa8, 0xf6, 0x5e, 0x6f, 0xa7, 0xda, 0x32, 0xba, 0xb5, 0x8e, 0xd1, 0x31, - 0x6a, 0x5c, 0x6a, 0xa7, 0xb7, 0xcb, 0x7f, 0xf1, 0x1f, 0xfc, 0x2f, 0x07, 0xed, 0x92, 0x1c, 0x50, - 0xde, 0x32, 0x2c, 0x52, 0x3b, 0x88, 0x69, 0xbc, 0x74, 0xc7, 0xe7, 0xe9, 0x2a, 0xad, 0x3d, 0x55, - 0x27, 0x56, 0xbf, 0x66, 0xee, 0x77, 0xd8, 0x00, 0xad, 0x75, 0x89, 0xad, 0x24, 0x49, 0xd5, 0xd2, - 0xa4, 0xac, 0x9e, 0x6e, 0xab, 0x5d, 0x12, 0x13, 0xb8, 0x37, 0x48, 0x80, 0xb6, 0xf6, 0x48, 0x57, - 0x89, 0xc9, 0xbd, 0x9d, 0x26, 0xd7, 0xb3, 0x55, 0xad, 0xa6, 0xea, 0x36, 0xb5, 0xad, 0xa8, 0x90, - 0x7c, 0x07, 0xa6, 0x16, 0x35, 0xcd, 0xf8, 0x9c, 0xb4, 0x97, 0x9a, 0xab, 0xcb, 0x96, 0x7a, 0x40, - 0x2c, 0x74, 0x15, 0x0a, 0xba, 0xd2, 0x25, 0x65, 0xe9, 0xaa, 0x74, 0x7d, 0xa4, 0x3e, 0xf6, 0xfc, - 0xa8, 0x72, 0xe1, 0xf8, 0xa8, 0x52, 0xd8, 0x50, 0xba, 0x04, 0x73, 0x8a, 0xfc, 0x10, 0xa6, 0x85, - 0xd4, 0x8a, 0x46, 0x0e, 0x9f, 0x1a, 0x5a, 0xaf, 0x4b, 0xd0, 0x35, 0x18, 0x6e, 0x73, 0x00, 0x21, - 0x38, 0x21, 0x04, 0x87, 0x1d, 0x58, 0x2c, 0xa8, 0x32, 0x85, 0x49, 0x21, 0xfc, 0xc4, 0xa0, 0x76, - 0x43, 0xb1, 0xf7, 0xd0, 0x6d, 0x00, 0x53, 0xb1, 0xf7, 0x1a, 0x16, 0xd9, 0x55, 0x0f, 0x85, 0x38, - 0x12, 0xe2, 0xd0, 0xf0, 0x28, 0x38, 0xc0, 0x85, 0x6e, 0x40, 0xc9, 0x22, 0x4a, 0x7b, 0x53, 0xd7, - 0xfa, 0xe5, 0xdc, 0x55, 0xe9, 0x7a, 0xa9, 0x3e, 0x25, 0x24, 0x4a, 0x58, 0x8c, 0x63, 0x8f, 0x43, - 0xfe, 0x22, 0x07, 0x23, 0xcb, 0x0a, 0xe9, 0x1a, 0x7a, 0x93, 0xd8, 0xe8, 0x53, 0x28, 0xb1, 0xed, - 0x6a, 0x2b, 0xb6, 0xc2, 0xb5, 0x8d, 0xde, 0x7e, 0xab, 0xea, 0xbb, 0x93, 0xb7, 0x7a, 0x55, 0x73, - 0xbf, 0xc3, 0x06, 0x68, 0x95, 0x71, 0x57, 0x0f, 0x6e, 0x55, 0x37, 0x77, 0x9e, 0x91, 0x96, 0xbd, - 0x4e, 0x6c, 0xc5, 0xb7, 0xcf, 0x1f, 0xc3, 0x1e, 0x2a, 0xda, 0x80, 0x02, 0x35, 0x49, 0x8b, 0x5b, - 0x36, 0x7a, 0xfb, 0x46, 0xf5, 0x44, 0x67, 0xad, 0x7a, 0x96, 0x35, 0x4d, 0xd2, 0xf2, 0x57, 0x9c, - 0xfd, 0xc2, 0x1c, 0x07, 0x3d, 0x85, 0x61, 0x6a, 0x2b, 0x76, 0x8f, 0x96, 0xf3, 0x1c, 0xb1, 0x9a, - 0x19, 0x91, 0x4b, 0xf9, 0x9b, 0xe1, 0xfc, 0xc6, 0x02, 0x4d, 0xfe, 0x8f, 0x1c, 0x20, 0x8f, 0x77, - 0xc9, 0xd0, 0xdb, 0xaa, 0xad, 0x1a, 0x3a, 0x7a, 0x00, 0x05, 0xbb, 0x6f, 0xba, 0x2e, 0x70, 0xcd, - 0x35, 0x68, 0xab, 0x6f, 0x92, 0x17, 0x47, 0x95, 0xb9, 0xb8, 0x04, 0xa3, 0x60, 0x2e, 0x83, 0xd6, - 0x3c, 0x53, 0x73, 0x5c, 0xfa, 0x4e, 0x58, 0xf5, 0x8b, 0xa3, 0x4a, 0xc2, 0x61, 0xab, 0x7a, 0x48, - 0x61, 0x03, 0xd1, 0x01, 0x20, 0x4d, 0xa1, 0xf6, 0x96, 0xa5, 0xe8, 0xd4, 0xd1, 0xa4, 0x76, 0x89, - 0x58, 0x84, 0x37, 0xb3, 0x6d, 0x1a, 0x93, 0xa8, 0x5f, 0x12, 0x56, 0xa0, 0xb5, 0x18, 0x1a, 0x4e, - 0xd0, 0xc0, 0xbc, 0xd9, 0x22, 0x0a, 0x35, 0xf4, 0x72, 0x21, 0xec, 0xcd, 0x98, 0x8f, 0x62, 0x41, - 0x45, 0x6f, 0x40, 0xb1, 0x4b, 0x28, 0x55, 0x3a, 0xa4, 0x3c, 0xc4, 0x19, 0x27, 0x05, 0x63, 0x71, - 0xdd, 0x19, 0xc6, 0x2e, 0x5d, 0xfe, 0x4a, 0x82, 0x71, 0x6f, 0xe5, 0xd6, 0x54, 0x6a, 0xa3, 0xdf, - 0x8a, 0xf9, 0x61, 0x35, 0xdb, 0x94, 0x98, 0x34, 0xf7, 0x42, 0xcf, 0xe7, 0xdd, 0x91, 0x80, 0x0f, - 0xae, 0xc3, 0x90, 0x6a, 0x93, 0x2e, 0xdb, 0x87, 0xfc, 0xf5, 0xd1, 0xdb, 0xd7, 0xb3, 0xba, 0x4c, - 0x7d, 0x5c, 0x80, 0x0e, 0xad, 0x32, 0x71, 0xec, 0xa0, 0xc8, 0x7f, 0x5a, 0x08, 0x98, 0xcf, 0x5c, - 0x13, 0x7d, 0x0c, 0x25, 0x4a, 0x34, 0xd2, 0xb2, 0x0d, 0x4b, 0x98, 0xff, 0x76, 0x46, 0xf3, 0x95, - 0x1d, 0xa2, 0x35, 0x85, 0x68, 0x7d, 0x8c, 0xd9, 0xef, 0xfe, 0xc2, 0x1e, 0x24, 0xfa, 0x00, 0x4a, - 0x36, 0xe9, 0x9a, 0x9a, 0x62, 0x13, 0x71, 0x8e, 0x5e, 0x0f, 0x4e, 0x81, 0x79, 0x0e, 0x03, 0x6b, - 0x18, 0xed, 0x2d, 0xc1, 0xc6, 0x8f, 0x8f, 0xb7, 0x24, 0xee, 0x28, 0xf6, 0x60, 0xd0, 0x01, 0x4c, - 0xf4, 0xcc, 0x36, 0xe3, 0xb4, 0x59, 0x14, 0xec, 0xf4, 0x85, 0x27, 0xdd, 0xcb, 0xba, 0x36, 0xdb, - 0x21, 0xe9, 0xfa, 0x9c, 0xd0, 0x35, 0x11, 0x1e, 0xc7, 0x11, 0x2d, 0x68, 0x11, 0x26, 0xbb, 0xaa, - 0xce, 0xe2, 0x52, 0xbf, 0x49, 0x5a, 0x86, 0xde, 0xa6, 0xdc, 0xad, 0x86, 0xea, 0xf3, 0x02, 0x60, - 0x72, 0x3d, 0x4c, 0xc6, 0x51, 0x7e, 0xf4, 0x3e, 0x20, 0x77, 0x1a, 0x8f, 0x9d, 0x20, 0xae, 0x1a, - 0x3a, 0xf7, 0xb9, 0xbc, 0xef, 0xdc, 0x5b, 0x31, 0x0e, 0x9c, 0x20, 0x85, 0xd6, 0x60, 0xd6, 0x22, - 0x07, 0x2a, 0x9b, 0xe3, 0x13, 0x95, 0xda, 0x86, 0xd5, 0x5f, 0x53, 0xbb, 0xaa, 0x5d, 0x1e, 0xe6, - 0x36, 0x95, 0x8f, 0x8f, 0x2a, 0xb3, 0x38, 0x81, 0x8e, 0x13, 0xa5, 0xe4, 0x3f, 0x1b, 0x86, 0xc9, - 0x48, 0xbc, 0x41, 0x4f, 0x61, 0xae, 0xd5, 0xb3, 0x2c, 0xa2, 0xdb, 0x1b, 0xbd, 0xee, 0x0e, 0xb1, - 0x9a, 0xad, 0x3d, 0xd2, 0xee, 0x69, 0xa4, 0xcd, 0x1d, 0x65, 0xa8, 0xbe, 0x20, 0x2c, 0x9e, 0x5b, - 0x4a, 0xe4, 0xc2, 0x29, 0xd2, 0x6c, 0x15, 0x74, 0x3e, 0xb4, 0xae, 0x52, 0xea, 0x61, 0xe6, 0x38, - 0xa6, 0xb7, 0x0a, 0x1b, 0x31, 0x0e, 0x9c, 0x20, 0xc5, 0x6c, 0x6c, 0x13, 0xaa, 0x5a, 0xa4, 0x1d, - 0xb5, 0x31, 0x1f, 0xb6, 0x71, 0x39, 0x91, 0x0b, 0xa7, 0x48, 0xa3, 0xbb, 0x30, 0xea, 0x68, 0xe3, - 0xfb, 0x27, 0x36, 0x7a, 0x46, 0x80, 0x8d, 0x6e, 0xf8, 0x24, 0x1c, 0xe4, 0x63, 0x53, 0x33, 0x76, - 0x28, 0xb1, 0x0e, 0x48, 0x3b, 0x7d, 0x83, 0x37, 0x63, 0x1c, 0x38, 0x41, 0x8a, 0x4d, 0xcd, 0xf1, - 0xc0, 0xd8, 0xd4, 0x86, 0xc3, 0x53, 0xdb, 0x4e, 0xe4, 0xc2, 0x29, 0xd2, 0xcc, 0x8f, 0x1d, 0x93, - 0x17, 0x0f, 0x14, 0x55, 0x53, 0x76, 0x34, 0x52, 0x2e, 0x86, 0xfd, 0x78, 0x23, 0x4c, 0xc6, 0x51, - 0x7e, 0xf4, 0x18, 0xa6, 0x9d, 0xa1, 0x6d, 0x5d, 0xf1, 0x40, 0x4a, 0x1c, 0xe4, 0x67, 0x02, 0x64, - 0x7a, 0x23, 0xca, 0x80, 0xe3, 0x32, 0xe8, 0x01, 0x4c, 0xb4, 0x0c, 0x4d, 0xe3, 0xfe, 0xb8, 0x64, - 0xf4, 0x74, 0xbb, 0x3c, 0xc2, 0x51, 0x10, 0x3b, 0x8f, 0x4b, 0x21, 0x0a, 0x8e, 0x70, 0x22, 0x02, - 0xd0, 0x72, 0x13, 0x0e, 0x2d, 0x03, 0x8f, 0x8f, 0xb7, 0xb2, 0xc6, 0x00, 0x2f, 0x55, 0xf9, 0x35, - 0x80, 0x37, 0x44, 0x71, 0x00, 0x58, 0xfe, 0x67, 0x09, 0xe6, 0x53, 0x42, 0x07, 0x7a, 0x37, 0x94, - 0x62, 0x7f, 0x35, 0x92, 0x62, 0x2f, 0xa7, 0x88, 0x05, 0xf2, 0xac, 0x0e, 0xe3, 0x16, 0x9b, 0x95, - 0xde, 0x71, 0x58, 0x44, 0x8c, 0xbc, 0x3b, 0x60, 0x1a, 0x38, 0x28, 0xe3, 0xc7, 0xfc, 0xe9, 0xe3, - 0xa3, 0xca, 0x78, 0x88, 0x86, 0xc3, 0xf0, 0xf2, 0x9f, 0xe7, 0x00, 0x96, 0x89, 0xa9, 0x19, 0xfd, - 0x2e, 0xd1, 0xcf, 0xa3, 0x86, 0xda, 0x0c, 0xd5, 0x50, 0x37, 0x07, 0x6d, 0x8f, 0x67, 0x5a, 0x6a, - 0x11, 0xf5, 0x9b, 0x91, 0x22, 0xaa, 0x96, 0x1d, 0xf2, 0xe4, 0x2a, 0xea, 0xdf, 0xf2, 0x30, 0xe3, - 0x33, 0xfb, 0x65, 0xd4, 0xc3, 0xd0, 0x1e, 0xff, 0x4a, 0x64, 0x8f, 0xe7, 0x13, 0x44, 0x5e, 0x59, - 0x1d, 0xf5, 0xf2, 0xeb, 0x19, 0xf4, 0x0c, 0x26, 0x58, 0xe1, 0xe4, 0xb8, 0x07, 0x2f, 0xcb, 0x86, - 0x4f, 0x5d, 0x96, 0x79, 0x09, 0x74, 0x2d, 0x84, 0x84, 0x23, 0xc8, 0x29, 0x65, 0x60, 0xf1, 0x55, - 0x97, 0x81, 0xf2, 0xd7, 0x12, 0x4c, 0xf8, 0xdb, 0x74, 0x0e, 0x45, 0xdb, 0x46, 0xb8, 0x68, 0x7b, - 0x23, 0xb3, 0x8b, 0xa6, 0x54, 0x6d, 0xff, 0xcd, 0x0a, 0x7c, 0x8f, 0x89, 0x1d, 0xf0, 0x1d, 0xa5, - 0xb5, 0x3f, 0xf8, 0x8e, 0x87, 0xbe, 0x90, 0x00, 0x89, 0x2c, 0xb0, 0xa8, 0xeb, 0x86, 0xad, 0x38, - 0xb1, 0xd2, 0x31, 0x6b, 0x35, 0xb3, 0x59, 0xae, 0xc6, 0xea, 0x76, 0x0c, 0xeb, 0x91, 0x6e, 0x5b, - 0x7d, 0x7f, 0x47, 0xe2, 0x0c, 0x38, 0xc1, 0x00, 0xa4, 0x00, 0x58, 0x02, 0x73, 0xcb, 0x10, 0x07, - 0xf9, 0x66, 0x86, 0x98, 0xc7, 0x04, 0x96, 0x0c, 0x7d, 0x57, 0xed, 0xf8, 0x61, 0x07, 0x7b, 0x40, - 0x38, 0x00, 0x7a, 0xe9, 0x11, 0xcc, 0xa7, 0x58, 0x8b, 0xa6, 0x20, 0xbf, 0x4f, 0xfa, 0xce, 0xb2, - 0x61, 0xf6, 0x27, 0x9a, 0x85, 0xa1, 0x03, 0x45, 0xeb, 0x39, 0xe1, 0x77, 0x04, 0x3b, 0x3f, 0x1e, - 0xe4, 0xee, 0x4b, 0xf2, 0x57, 0x43, 0x41, 0xdf, 0xe1, 0x15, 0xf3, 0x75, 0x76, 0x69, 0x35, 0x35, - 0xb5, 0xa5, 0x50, 0x51, 0x08, 0x8d, 0x39, 0x17, 0x56, 0x67, 0x0c, 0x7b, 0xd4, 0x50, 0x6d, 0x9d, - 0x7b, 0xb5, 0xb5, 0x75, 0xfe, 0xe5, 0xd4, 0xd6, 0xbf, 0x0d, 0x25, 0xea, 0x56, 0xd5, 0x05, 0x0e, - 0x79, 0xeb, 0x14, 0xf1, 0x55, 0x14, 0xd4, 0x9e, 0x02, 0xaf, 0x94, 0xf6, 0x40, 0x93, 0x8a, 0xe8, - 0xa1, 0x53, 0x16, 0xd1, 0x2f, 0xb5, 0xf0, 0x65, 0x31, 0xd5, 0x54, 0x7a, 0x94, 0xb4, 0x79, 0x20, - 0x2a, 0xf9, 0x31, 0xb5, 0xc1, 0x47, 0xb1, 0xa0, 0xa2, 0x8f, 0x43, 0x2e, 0x5b, 0x3a, 0x8b, 0xcb, - 0x4e, 0xa4, 0xbb, 0x2b, 0xda, 0x86, 0x79, 0xd3, 0x32, 0x3a, 0x16, 0xa1, 0x74, 0x99, 0x28, 0x6d, - 0x4d, 0xd5, 0x89, 0xbb, 0x3e, 0x4e, 0x45, 0x74, 0xf9, 0xf8, 0xa8, 0x32, 0xdf, 0x48, 0x66, 0xc1, - 0x69, 0xb2, 0xf2, 0xf3, 0x02, 0x4c, 0x45, 0x33, 0x60, 0x4a, 0x91, 0x2a, 0x9d, 0xa9, 0x48, 0xbd, - 0x11, 0x38, 0x0c, 0x4e, 0x05, 0x1f, 0x78, 0xc1, 0x89, 0x1d, 0x88, 0x45, 0x98, 0x14, 0xd1, 0xc0, - 0x25, 0x8a, 0x32, 0xdd, 0xdb, 0xfd, 0xed, 0x30, 0x19, 0x47, 0xf9, 0x59, 0xe9, 0xe9, 0x57, 0x94, - 0x2e, 0x48, 0x21, 0x5c, 0x7a, 0x2e, 0x46, 0x19, 0x70, 0x5c, 0x06, 0xad, 0xc3, 0x4c, 0x4f, 0x8f, - 0x43, 0x39, 0xde, 0x78, 0x59, 0x40, 0xcd, 0x6c, 0xc7, 0x59, 0x70, 0x92, 0x1c, 0xda, 0x0d, 0x55, - 0xa3, 0xc3, 0x3c, 0xc2, 0xde, 0xce, 0x7c, 0x76, 0x32, 0x97, 0xa3, 0xe8, 0x21, 0x8c, 0x5b, 0xfc, - 0xde, 0xe1, 0x1a, 0xec, 0xd4, 0xee, 0x17, 0x85, 0xd8, 0x38, 0x0e, 0x12, 0x71, 0x98, 0x37, 0xa1, - 0xdc, 0x2e, 0x65, 0x2d, 0xb7, 0xe5, 0x7f, 0x94, 0x82, 0x49, 0xc8, 0x2b, 0x81, 0x07, 0xbd, 0x32, - 0xc5, 0x24, 0x02, 0xd5, 0x91, 0x91, 0x5c, 0xfd, 0xde, 0x3b, 0x55, 0xf5, 0xeb, 0x27, 0xcf, 0xc1, - 0xe5, 0xef, 0x97, 0x12, 0xcc, 0xad, 0x34, 0x1f, 0x5b, 0x46, 0xcf, 0x74, 0xcd, 0xd9, 0x34, 0x9d, - 0x75, 0xfd, 0x05, 0x14, 0xac, 0x9e, 0xe6, 0xce, 0xe3, 0x75, 0x77, 0x1e, 0xb8, 0xa7, 0xb1, 0x79, - 0xcc, 0x44, 0xa4, 0x9c, 0x49, 0x30, 0x01, 0xb4, 0x01, 0xc3, 0x96, 0xa2, 0x77, 0x88, 0x9b, 0x56, - 0xaf, 0x0d, 0xb0, 0x7e, 0x75, 0x19, 0x33, 0xf6, 0x40, 0xf1, 0xc6, 0xa5, 0xb1, 0x40, 0x91, 0xff, - 0x48, 0x82, 0xc9, 0x27, 0x5b, 0x5b, 0x8d, 0x55, 0x9d, 0x9f, 0x68, 0xfe, 0xb6, 0x7a, 0x15, 0x0a, - 0xa6, 0x62, 0xef, 0x45, 0x33, 0x3d, 0xa3, 0x61, 0x4e, 0x41, 0x1f, 0x42, 0x91, 0x45, 0x12, 0xa2, - 0xb7, 0x33, 0x96, 0xda, 0x02, 0xbe, 0xee, 0x08, 0xf9, 0x15, 0xa2, 0x18, 0xc0, 0x2e, 0x9c, 0xbc, - 0x0f, 0xb3, 0x01, 0x73, 0xd8, 0x7a, 0x3c, 0x65, 0xd9, 0x11, 0x35, 0x61, 0x88, 0x69, 0x66, 0x39, - 0x30, 0x9f, 0xe1, 0x31, 0x33, 0x32, 0x25, 0xbf, 0xd2, 0x61, 0xbf, 0x28, 0x76, 0xb0, 0xe4, 0x75, - 0x18, 0xe7, 0x0f, 0xca, 0x86, 0x65, 0xf3, 0x65, 0x41, 0x57, 0x20, 0xdf, 0x55, 0x75, 0x91, 0x67, - 0x47, 0x85, 0x4c, 0x9e, 0xe5, 0x08, 0x36, 0xce, 0xc9, 0xca, 0xa1, 0x88, 0x3c, 0x3e, 0x59, 0x39, - 0xc4, 0x6c, 0x5c, 0x7e, 0x0c, 0x45, 0xb1, 0xdc, 0x41, 0xa0, 0xfc, 0xc9, 0x40, 0xf9, 0x04, 0xa0, - 0x4d, 0x28, 0xae, 0x36, 0xea, 0x9a, 0xe1, 0x54, 0x5d, 0x2d, 0xb5, 0x6d, 0x45, 0xf7, 0x62, 0x69, - 0x75, 0x19, 0x63, 0x4e, 0x41, 0x32, 0x0c, 0x93, 0xc3, 0x16, 0x31, 0x6d, 0xee, 0x11, 0x23, 0x75, - 0x60, 0xbb, 0xfc, 0x88, 0x8f, 0x60, 0x41, 0x91, 0xff, 0x38, 0x07, 0x45, 0xb1, 0x1c, 0xe7, 0x70, - 0x0b, 0x5b, 0x0b, 0xdd, 0xc2, 0xde, 0xcc, 0xe6, 0x1a, 0xa9, 0x57, 0xb0, 0xad, 0xc8, 0x15, 0xec, - 0x46, 0x46, 0xbc, 0x93, 0xef, 0x5f, 0x7f, 0x27, 0xc1, 0x44, 0xd8, 0x29, 0xd1, 0x5d, 0x18, 0x65, - 0x09, 0x47, 0x6d, 0x91, 0x0d, 0xbf, 0xce, 0xf5, 0x1e, 0x61, 0x9a, 0x3e, 0x09, 0x07, 0xf9, 0x50, - 0xc7, 0x13, 0x63, 0x7e, 0x24, 0x26, 0x9d, 0xbe, 0xa4, 0x3d, 0x5b, 0xd5, 0xaa, 0xce, 0xa7, 0x95, - 0xea, 0xaa, 0x6e, 0x6f, 0x5a, 0x4d, 0xdb, 0x52, 0xf5, 0x4e, 0x4c, 0x11, 0x77, 0xca, 0x20, 0xb2, - 0xfc, 0x0f, 0x12, 0x8c, 0x0a, 0x93, 0xcf, 0xe1, 0x56, 0xf1, 0x1b, 0xe1, 0x5b, 0xc5, 0xb5, 0x8c, - 0x07, 0x3c, 0xf9, 0x4a, 0xf1, 0x57, 0xbe, 0xe9, 0xec, 0x48, 0x33, 0xaf, 0xde, 0x33, 0xa8, 0x1d, - 0xf5, 0x6a, 0x76, 0x18, 0x31, 0xa7, 0xa0, 0x1e, 0x4c, 0xa9, 0x91, 0x18, 0x20, 0x96, 0xb6, 0x96, - 0xcd, 0x12, 0x4f, 0xac, 0x5e, 0x16, 0xf0, 0x53, 0x51, 0x0a, 0x8e, 0xa9, 0x90, 0x09, 0xc4, 0xb8, - 0xd0, 0x07, 0x50, 0xd8, 0xb3, 0x6d, 0x33, 0xe1, 0xbd, 0x7a, 0x40, 0xe4, 0xf1, 0x4d, 0x28, 0xf1, - 0xd9, 0x6d, 0x6d, 0x35, 0x30, 0x87, 0x92, 0xff, 0xc7, 0x5f, 0x8f, 0xa6, 0xe3, 0xe3, 0x5e, 0x3c, - 0x95, 0xce, 0x12, 0x4f, 0x47, 0x93, 0x62, 0x29, 0x7a, 0x02, 0x79, 0x5b, 0xcb, 0x7a, 0x2d, 0x14, - 0x88, 0x5b, 0x6b, 0x4d, 0x3f, 0x20, 0x6d, 0xad, 0x35, 0x31, 0x83, 0x40, 0x9b, 0x30, 0xc4, 0xb2, - 0x0f, 0x3b, 0x82, 0xf9, 0xec, 0x47, 0x9a, 0xcd, 0xdf, 0x77, 0x08, 0xf6, 0x8b, 0x62, 0x07, 0x47, - 0xfe, 0x0c, 0xc6, 0x43, 0xe7, 0x14, 0x7d, 0x0a, 0x63, 0x9a, 0xa1, 0xb4, 0xeb, 0x8a, 0xa6, 0xe8, - 0x2d, 0xe2, 0x7e, 0x1c, 0xb8, 0x96, 0x74, 0xc3, 0x58, 0x0b, 0xf0, 0x89, 0x53, 0x3e, 0x2b, 0x94, - 0x8c, 0x05, 0x69, 0x38, 0x84, 0x28, 0x2b, 0x00, 0xfe, 0x1c, 0x51, 0x05, 0x86, 0x98, 0x9f, 0x39, - 0xf9, 0x64, 0xa4, 0x3e, 0xc2, 0x2c, 0x64, 0xee, 0x47, 0xb1, 0x33, 0x8e, 0x6e, 0x03, 0x50, 0xd2, - 0xb2, 0x88, 0xcd, 0x83, 0x41, 0x2e, 0xfc, 0x81, 0xb1, 0xe9, 0x51, 0x70, 0x80, 0x4b, 0xfe, 0x27, - 0x09, 0xc6, 0x37, 0x88, 0xfd, 0xb9, 0x61, 0xed, 0x37, 0x0c, 0x4d, 0x6d, 0xf5, 0xcf, 0x21, 0xd8, - 0xe2, 0x50, 0xb0, 0x7d, 0x6b, 0xc0, 0xce, 0x84, 0xac, 0x4b, 0x0b, 0xb9, 0xf2, 0xd7, 0x12, 0xcc, - 0x87, 0x38, 0x1f, 0xf9, 0x47, 0x77, 0x1b, 0x86, 0x4c, 0xc3, 0xb2, 0xdd, 0x44, 0x7c, 0x2a, 0x85, - 0x2c, 0x8c, 0x05, 0x52, 0x31, 0x83, 0xc1, 0x0e, 0x1a, 0x5a, 0x83, 0x9c, 0x6d, 0x08, 0x57, 0x3d, - 0x1d, 0x26, 0x21, 0x56, 0x1d, 0x04, 0x66, 0x6e, 0xcb, 0xc0, 0x39, 0xdb, 0x60, 0x1b, 0x51, 0x0e, - 0x71, 0x05, 0x83, 0xcf, 0x2b, 0x9a, 0x01, 0x86, 0xc2, 0xae, 0x65, 0x74, 0xcf, 0x3c, 0x07, 0x6f, - 0x23, 0x56, 0x2c, 0xa3, 0x8b, 0x39, 0x96, 0xfc, 0x8d, 0x04, 0xd3, 0x21, 0xce, 0x73, 0x08, 0xfc, - 0x1f, 0x84, 0x03, 0xff, 0x8d, 0xd3, 0x4c, 0x24, 0x25, 0xfc, 0x7f, 0x93, 0x8b, 0x4c, 0x83, 0x4d, - 0x18, 0xed, 0xc2, 0xa8, 0x69, 0xb4, 0x9b, 0x2f, 0xe1, 0x73, 0xe0, 0x24, 0xcb, 0x9b, 0x0d, 0x1f, - 0x0b, 0x07, 0x81, 0xd1, 0x21, 0x4c, 0xeb, 0x4a, 0x97, 0x50, 0x53, 0x69, 0x91, 0xe6, 0x4b, 0x78, - 0x20, 0xb9, 0xc8, 0xbf, 0x37, 0x44, 0x11, 0x71, 0x5c, 0x09, 0x5a, 0x87, 0xa2, 0x6a, 0xf2, 0x3a, - 0x4e, 0xd4, 0x2e, 0x03, 0xb3, 0xa8, 0x53, 0xf5, 0x39, 0xf1, 0x5c, 0xfc, 0xc0, 0x2e, 0x86, 0xfc, - 0xd7, 0x51, 0x6f, 0x60, 0xfe, 0x87, 0x1e, 0x43, 0x89, 0x37, 0x66, 0xb4, 0x0c, 0xcd, 0xfd, 0x32, - 0xc0, 0x76, 0xb6, 0x21, 0xc6, 0x5e, 0x1c, 0x55, 0x2e, 0x27, 0x3c, 0xfa, 0xba, 0x64, 0xec, 0x09, - 0xa3, 0x0d, 0x28, 0x98, 0x3f, 0xa5, 0x82, 0xe1, 0x49, 0x8e, 0x97, 0x2d, 0x1c, 0x47, 0xfe, 0xbd, - 0x7c, 0xc4, 0x5c, 0x9e, 0xea, 0x9e, 0xbd, 0xb4, 0x5d, 0xf7, 0x2a, 0xa6, 0xd4, 0x9d, 0xdf, 0x81, - 0xa2, 0xc8, 0xf0, 0xc2, 0x99, 0x7f, 0x71, 0x1a, 0x67, 0x0e, 0x66, 0x31, 0xef, 0xc2, 0xe2, 0x0e, - 0xba, 0xc0, 0xe8, 0x13, 0x18, 0x26, 0x8e, 0x0a, 0x27, 0x37, 0xde, 0x3b, 0x8d, 0x0a, 0x3f, 0xae, - 0xfa, 0x85, 0xaa, 0x18, 0x13, 0xa8, 0xe8, 0x5d, 0xb6, 0x5e, 0x8c, 0x97, 0x5d, 0x02, 0x69, 0xb9, - 0xc0, 0xd3, 0xd5, 0x15, 0x67, 0xda, 0xde, 0xf0, 0x8b, 0xa3, 0x0a, 0xf8, 0x3f, 0x71, 0x50, 0x42, - 0xfe, 0x17, 0x09, 0xa6, 0xf9, 0x0a, 0xb5, 0x7a, 0x96, 0x6a, 0xf7, 0xcf, 0x2d, 0x31, 0x3d, 0x0d, - 0x25, 0xa6, 0x3b, 0x03, 0x96, 0x25, 0x66, 0x61, 0x6a, 0x72, 0xfa, 0x56, 0x82, 0x8b, 0x31, 0xee, - 0x73, 0x88, 0x8b, 0xdb, 0xe1, 0xb8, 0xf8, 0xd6, 0x69, 0x27, 0x94, 0x12, 0x1b, 0xff, 0x6b, 0x3a, - 0x61, 0x3a, 0xfc, 0xa4, 0xdc, 0x06, 0x30, 0x2d, 0xf5, 0x40, 0xd5, 0x48, 0x47, 0x7c, 0x04, 0x2f, - 0x05, 0x5a, 0x9c, 0x3c, 0x0a, 0x0e, 0x70, 0x21, 0x0a, 0x73, 0x6d, 0xb2, 0xab, 0xf4, 0x34, 0x7b, - 0xb1, 0xdd, 0x5e, 0x52, 0x4c, 0x65, 0x47, 0xd5, 0x54, 0x5b, 0x15, 0xcf, 0x05, 0x23, 0xf5, 0x87, - 0xce, 0xc7, 0xe9, 0x24, 0x8e, 0x17, 0x47, 0x95, 0x2b, 0x49, 0x5f, 0x87, 0x5c, 0x96, 0x3e, 0x4e, - 0x81, 0x46, 0x7d, 0x28, 0x5b, 0xe4, 0xb3, 0x9e, 0x6a, 0x91, 0xf6, 0xb2, 0x65, 0x98, 0x21, 0xb5, - 0x79, 0xae, 0xf6, 0xd7, 0x8f, 0x8f, 0x2a, 0x65, 0x9c, 0xc2, 0x33, 0x58, 0x71, 0x2a, 0x3c, 0x7a, - 0x06, 0x33, 0x8a, 0x68, 0x46, 0x0b, 0x6a, 0x75, 0x4e, 0xc9, 0xfd, 0xe3, 0xa3, 0xca, 0xcc, 0x62, - 0x9c, 0x3c, 0x58, 0x61, 0x12, 0x28, 0xaa, 0x41, 0xf1, 0x80, 0xf7, 0xad, 0xd1, 0xf2, 0x10, 0xc7, - 0x67, 0x89, 0xa0, 0xe8, 0xb4, 0xb2, 0x31, 0xcc, 0xe1, 0x95, 0x26, 0x3f, 0x7d, 0x2e, 0x17, 0xbb, - 0x50, 0xb2, 0x5a, 0x52, 0x9c, 0x78, 0xfe, 0x62, 0x5c, 0xf2, 0xa3, 0xd6, 0x13, 0x9f, 0x84, 0x83, - 0x7c, 0xe8, 0x63, 0x18, 0xd9, 0x13, 0xaf, 0x12, 0xb4, 0x5c, 0xcc, 0x94, 0x84, 0x43, 0xaf, 0x18, - 0xf5, 0x69, 0xa1, 0x62, 0xc4, 0x1d, 0xa6, 0xd8, 0x47, 0x44, 0x6f, 0x40, 0x91, 0xff, 0x58, 0x5d, - 0xe6, 0xcf, 0x71, 0x25, 0x3f, 0xb6, 0x3d, 0x71, 0x86, 0xb1, 0x4b, 0x77, 0x59, 0x57, 0x1b, 0x4b, - 0xfc, 0x59, 0x38, 0xc2, 0xba, 0xda, 0x58, 0xc2, 0x2e, 0x1d, 0x7d, 0x0a, 0x45, 0x4a, 0xd6, 0x54, - 0xbd, 0x77, 0x58, 0x86, 0x4c, 0x1f, 0x95, 0x9b, 0x8f, 0x38, 0x77, 0xe4, 0x61, 0xcc, 0xd7, 0x20, - 0xe8, 0xd8, 0x85, 0x45, 0x7b, 0x30, 0x62, 0xf5, 0xf4, 0x45, 0xba, 0x4d, 0x89, 0x55, 0x1e, 0xe5, - 0x3a, 0x06, 0x85, 0x73, 0xec, 0xf2, 0x47, 0xb5, 0x78, 0x2b, 0xe4, 0x71, 0x60, 0x1f, 0x1c, 0xfd, - 0xa1, 0x04, 0x88, 0xf6, 0x4c, 0x53, 0x23, 0x5d, 0xa2, 0xdb, 0x8a, 0xc6, 0xdf, 0xe2, 0x68, 0x79, - 0x8c, 0xeb, 0x7c, 0x6f, 0xd0, 0xbc, 0x62, 0x82, 0x51, 0xe5, 0xde, 0xa3, 0x77, 0x9c, 0x15, 0x27, - 0xe8, 0x65, 0x4b, 0xbb, 0x4b, 0xf9, 0xdf, 0xe5, 0xf1, 0x4c, 0x4b, 0x9b, 0xfc, 0xe6, 0xe8, 0x2f, - 0xad, 0xa0, 0x63, 0x17, 0x16, 0x3d, 0x85, 0x39, 0xb7, 0xed, 0x11, 0x1b, 0x86, 0xbd, 0xa2, 0x6a, - 0x84, 0xf6, 0xa9, 0x4d, 0xba, 0xe5, 0x09, 0xbe, 0xed, 0x5e, 0xef, 0x07, 0x4e, 0xe4, 0xc2, 0x29, - 0xd2, 0xa8, 0x0b, 0x15, 0x37, 0x64, 0xb0, 0xf3, 0xe4, 0xc5, 0xac, 0x47, 0xb4, 0xa5, 0x68, 0xce, - 0x77, 0x80, 0x49, 0xae, 0xe0, 0xf5, 0xe3, 0xa3, 0x4a, 0x65, 0xf9, 0x64, 0x56, 0x3c, 0x08, 0x0b, - 0x7d, 0x08, 0x65, 0x25, 0x4d, 0xcf, 0x14, 0xd7, 0xf3, 0x1a, 0x8b, 0x43, 0xa9, 0x0a, 0x52, 0xa5, - 0x91, 0x0d, 0x53, 0x4a, 0xb8, 0x01, 0x95, 0x96, 0xa7, 0x33, 0x3d, 0x44, 0x46, 0xfa, 0x56, 0xfd, - 0xc7, 0x88, 0x08, 0x81, 0xe2, 0x98, 0x06, 0xf4, 0x3b, 0x80, 0x94, 0x68, 0xcf, 0x2c, 0x2d, 0xa3, - 0x4c, 0xe9, 0x27, 0xd6, 0x6c, 0xeb, 0xbb, 0x5d, 0x8c, 0x44, 0x71, 0x82, 0x1e, 0xb4, 0x06, 0xb3, - 0x62, 0x74, 0x5b, 0xa7, 0xca, 0x2e, 0x69, 0xf6, 0x69, 0xcb, 0xd6, 0x68, 0x79, 0x86, 0xc7, 0x3e, - 0xfe, 0xe1, 0x6b, 0x31, 0x81, 0x8e, 0x13, 0xa5, 0xd0, 0x7b, 0x30, 0xb5, 0x6b, 0x58, 0x3b, 0x6a, - 0xbb, 0x4d, 0x74, 0x17, 0x69, 0x96, 0x23, 0xcd, 0xb2, 0xd5, 0x58, 0x89, 0xd0, 0x70, 0x8c, 0x1b, - 0x51, 0xb8, 0x28, 0x90, 0x1b, 0x96, 0xd1, 0x5a, 0x37, 0x7a, 0xba, 0xed, 0x94, 0x44, 0x17, 0xbd, - 0x14, 0x73, 0x71, 0x31, 0x89, 0xe1, 0xc5, 0x51, 0xe5, 0x6a, 0x72, 0x05, 0xec, 0x33, 0xe1, 0x64, - 0x6c, 0xb4, 0x07, 0xc0, 0xe3, 0x82, 0x73, 0xfc, 0xe6, 0xf8, 0xf1, 0xbb, 0x9f, 0x25, 0xea, 0x24, - 0x9e, 0x40, 0xe7, 0x93, 0x9c, 0x47, 0xc6, 0x01, 0x6c, 0x76, 0x4b, 0x51, 0x22, 0x6d, 0xd5, 0xb4, - 0x3c, 0xcf, 0xf7, 0xba, 0x96, 0x6d, 0xaf, 0x3d, 0xb9, 0xc0, 0xa7, 0xa9, 0x28, 0x22, 0x8e, 0x2b, - 0x41, 0x26, 0x8c, 0x89, 0x3e, 0xf1, 0x25, 0x4d, 0xa1, 0xb4, 0x5c, 0xe6, 0xb3, 0x7c, 0x30, 0x78, - 0x96, 0x9e, 0x48, 0x74, 0x9e, 0x53, 0xc7, 0x47, 0x95, 0xb1, 0x20, 0x03, 0x0e, 0x69, 0xe0, 0x7d, - 0x41, 0xe2, 0x2b, 0xd1, 0xf9, 0xf4, 0x56, 0x9f, 0xae, 0x2f, 0xc8, 0x37, 0xed, 0xa5, 0xf5, 0x05, - 0x05, 0x20, 0x4f, 0x7e, 0x97, 0xfe, 0xcf, 0x1c, 0xcc, 0xf8, 0xcc, 0x99, 0xfb, 0x82, 0x12, 0x44, - 0x7e, 0xd9, 0x5f, 0x3d, 0xb8, 0xbf, 0xfa, 0x6b, 0x09, 0x26, 0xfc, 0xa5, 0xfb, 0xbf, 0xd7, 0xab, - 0xe3, 0xdb, 0x96, 0x72, 0x7b, 0xf8, 0xdb, 0x5c, 0x70, 0x02, 0xff, 0xef, 0x1b, 0x46, 0x7e, 0x7a, - 0x53, 0xb4, 0xfc, 0x6d, 0x1e, 0xa6, 0xa2, 0xa7, 0x31, 0xd4, 0x57, 0x20, 0x0d, 0xec, 0x2b, 0x68, - 0xc0, 0xec, 0x6e, 0x4f, 0xd3, 0xfa, 0x7c, 0x19, 0x02, 0xcd, 0x05, 0xce, 0x77, 0xc1, 0xd7, 0x84, - 0xe4, 0xec, 0x4a, 0x02, 0x0f, 0x4e, 0x94, 0x4c, 0xe9, 0x91, 0xc8, 0x9f, 0xa9, 0x47, 0x22, 0xf6, - 0xc9, 0xbe, 0x70, 0x8a, 0x4f, 0xf6, 0x89, 0xfd, 0x0e, 0x43, 0x67, 0xe8, 0x77, 0x38, 0x4b, 0x83, - 0x42, 0x42, 0x10, 0x1b, 0xd8, 0x2f, 0xfb, 0x1a, 0x5c, 0x12, 0x62, 0x36, 0xef, 0x1d, 0xd0, 0x6d, - 0xcb, 0xd0, 0x34, 0x62, 0x2d, 0xf7, 0xba, 0xdd, 0xbe, 0xfc, 0x0e, 0x4c, 0x84, 0xbb, 0x62, 0x9c, - 0x9d, 0x76, 0x1a, 0x73, 0xc4, 0xd7, 0xd9, 0xc0, 0x4e, 0x3b, 0xe3, 0xd8, 0xe3, 0x90, 0x7f, 0x5f, - 0x82, 0xb9, 0xe4, 0xee, 0x57, 0xa4, 0xc1, 0x44, 0x57, 0x39, 0x0c, 0x76, 0x24, 0x4b, 0x67, 0x7c, - 0x37, 0xe3, 0xed, 0x10, 0xeb, 0x21, 0x2c, 0x1c, 0xc1, 0x96, 0x7f, 0x94, 0x60, 0x3e, 0xa5, 0x11, - 0xe1, 0x7c, 0x2d, 0x41, 0x1f, 0x41, 0xa9, 0xab, 0x1c, 0x36, 0x7b, 0x56, 0x87, 0x9c, 0xf9, 0xa5, - 0x90, 0x47, 0x8c, 0x75, 0x81, 0x82, 0x3d, 0x3c, 0xf9, 0x2f, 0x25, 0xf8, 0x59, 0x6a, 0xf5, 0x84, - 0xee, 0x85, 0x7a, 0x26, 0xe4, 0x48, 0xcf, 0x04, 0x8a, 0x0b, 0xbe, 0xa2, 0x96, 0x89, 0x2f, 0x25, - 0x28, 0xa7, 0xdd, 0x2c, 0xd1, 0xdd, 0x90, 0x91, 0x3f, 0x8f, 0x18, 0x39, 0x1d, 0x93, 0x7b, 0x45, - 0x36, 0xfe, 0xab, 0x04, 0x97, 0x4f, 0xa8, 0xd0, 0xbc, 0xab, 0x12, 0x69, 0x07, 0xb9, 0xf8, 0xa3, - 0xb6, 0xf8, 0x22, 0xe6, 0x5f, 0x95, 0x12, 0x78, 0x70, 0xaa, 0x34, 0xda, 0x86, 0x79, 0x71, 0x4f, - 0x8b, 0xd2, 0x44, 0xf1, 0xc1, 0x5b, 0xcb, 0x96, 0x93, 0x59, 0x70, 0x9a, 0xac, 0xfc, 0x37, 0x12, - 0xcc, 0x25, 0x3f, 0x19, 0xa0, 0xb7, 0x43, 0x4b, 0x5e, 0x89, 0x2c, 0xf9, 0x64, 0x44, 0x4a, 0x2c, - 0xf8, 0x27, 0x30, 0x21, 0x1e, 0x16, 0x04, 0x8c, 0x70, 0x66, 0x39, 0x29, 0x45, 0x09, 0x08, 0xb7, - 0xbc, 0xe5, 0xc7, 0x24, 0x3c, 0x86, 0x23, 0x68, 0xf2, 0x1f, 0xe4, 0x60, 0xa8, 0xd9, 0x52, 0x34, - 0x72, 0x0e, 0xd5, 0xed, 0xfb, 0xa1, 0xea, 0x76, 0xd0, 0x3f, 0x6d, 0x71, 0xab, 0x52, 0x0b, 0x5b, - 0x1c, 0x29, 0x6c, 0xdf, 0xcc, 0x84, 0x76, 0x72, 0x4d, 0xfb, 0x6b, 0x30, 0xe2, 0x29, 0x3d, 0x5d, - 0xaa, 0x95, 0xff, 0x22, 0x07, 0xa3, 0x01, 0x15, 0xa7, 0x4c, 0xd4, 0xbb, 0xa1, 0x02, 0x27, 0x9f, - 0xe1, 0xee, 0x16, 0xd0, 0x55, 0x75, 0x4b, 0x1a, 0xa7, 0xe9, 0xd8, 0x6f, 0x33, 0x8d, 0x57, 0x3a, - 0xef, 0xc0, 0x84, 0xad, 0x58, 0x1d, 0x62, 0x7b, 0x9f, 0x35, 0xf2, 0xdc, 0x17, 0xbd, 0x56, 0xf5, - 0xad, 0x10, 0x15, 0x47, 0xb8, 0x2f, 0x3d, 0x84, 0xf1, 0x90, 0xb2, 0x53, 0xf5, 0x0c, 0xff, 0xbd, - 0x04, 0x3f, 0x1f, 0xf8, 0xe8, 0x84, 0xea, 0xa1, 0x43, 0x52, 0x8d, 0x1c, 0x92, 0x85, 0x74, 0x80, - 0x57, 0xd7, 0x7b, 0x56, 0xbf, 0xf9, 0xfc, 0x87, 0x85, 0x0b, 0xdf, 0xfd, 0xb0, 0x70, 0xe1, 0xfb, - 0x1f, 0x16, 0x2e, 0xfc, 0xee, 0xf1, 0x82, 0xf4, 0xfc, 0x78, 0x41, 0xfa, 0xee, 0x78, 0x41, 0xfa, - 0xfe, 0x78, 0x41, 0xfa, 0xf7, 0xe3, 0x05, 0xe9, 0x4f, 0x7e, 0x5c, 0xb8, 0xf0, 0x51, 0x51, 0xc0, - 0xfd, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd3, 0xd0, 0x60, 0xbe, 0x07, 0x3e, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.proto b/vendor/k8s.io/api/extensions/v1beta1/generated.proto index d67d30ab18d1f..6c90cb3c453fb 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/generated.proto +++ b/vendor/k8s.io/api/extensions/v1beta1/generated.proto @@ -649,7 +649,7 @@ message NetworkPolicyIngressRule { // List of sources which should be able to access the pods selected for this rule. // Items in this list are combined using a logical OR operation. // If this field is empty or missing, this rule matches all sources (traffic not restricted by source). - // If this field is present and contains at least on item, this rule allows traffic only if the + // If this field is present and contains at least one item, this rule allows traffic only if the // traffic matches at least one item in the from list. // +optional repeated NetworkPolicyPeer from = 2; @@ -873,7 +873,6 @@ message PodSecurityPolicySpec { // AllowedCSIDrivers is a whitelist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. // An empty value indicates that any CSI driver can be used for inline ephemeral volumes. - // This is an alpha field, and is only honored if the API server enables the CSIInlineVolume feature gate. // +optional repeated AllowedCSIDriver allowedCSIDrivers = 23; diff --git a/vendor/k8s.io/api/extensions/v1beta1/types.go b/vendor/k8s.io/api/extensions/v1beta1/types.go index 7d802b9446749..8f581ef1d598d 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/types.go +++ b/vendor/k8s.io/api/extensions/v1beta1/types.go @@ -930,7 +930,6 @@ type PodSecurityPolicySpec struct { AllowedFlexVolumes []AllowedFlexVolume `json:"allowedFlexVolumes,omitempty" protobuf:"bytes,18,rep,name=allowedFlexVolumes"` // AllowedCSIDrivers is a whitelist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. // An empty value indicates that any CSI driver can be used for inline ephemeral volumes. - // This is an alpha field, and is only honored if the API server enables the CSIInlineVolume feature gate. // +optional AllowedCSIDrivers []AllowedCSIDriver `json:"allowedCSIDrivers,omitempty" protobuf:"bytes,23,rep,name=allowedCSIDrivers"` // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. @@ -1297,7 +1296,7 @@ type NetworkPolicyIngressRule struct { // List of sources which should be able to access the pods selected for this rule. // Items in this list are combined using a logical OR operation. // If this field is empty or missing, this rule matches all sources (traffic not restricted by source). - // If this field is present and contains at least on item, this rule allows traffic only if the + // If this field is present and contains at least one item, this rule allows traffic only if the // traffic matches at least one item in the from list. // +optional From []NetworkPolicyPeer `json:"from,omitempty" protobuf:"bytes,2,rep,name=from"` diff --git a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go index 91632260c1603..a7eb2ec907ee3 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go @@ -379,7 +379,7 @@ func (NetworkPolicyEgressRule) SwaggerDoc() map[string]string { var map_NetworkPolicyIngressRule = map[string]string{ "": "DEPRECATED 1.9 - This group version of NetworkPolicyIngressRule is deprecated by networking/v1/NetworkPolicyIngressRule. This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from.", "ports": "List of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", - "from": "List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least on item, this rule allows traffic only if the traffic matches at least one item in the from list.", + "from": "List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the from list.", } func (NetworkPolicyIngressRule) SwaggerDoc() map[string]string { @@ -470,7 +470,7 @@ var map_PodSecurityPolicySpec = map[string]string{ "allowPrivilegeEscalation": "allowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true.", "allowedHostPaths": "allowedHostPaths is a white list of allowed host paths. Empty indicates that all host paths may be used.", "allowedFlexVolumes": "allowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \"volumes\" field.", - "allowedCSIDrivers": "AllowedCSIDrivers is a whitelist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. An empty value indicates that any CSI driver can be used for inline ephemeral volumes. This is an alpha field, and is only honored if the API server enables the CSIInlineVolume feature gate.", + "allowedCSIDrivers": "AllowedCSIDrivers is a whitelist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. An empty value indicates that any CSI driver can be used for inline ephemeral volumes.", "allowedUnsafeSysctls": "allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection.\n\nExamples: e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" allows \"foo.bar\", \"foo.baz\", etc.", "forbiddenSysctls": "forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.\n\nExamples: e.g. \"foo/*\" forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", \"foo.baz\", etc.", "allowedProcMountTypes": "AllowedProcMountTypes is a whitelist of allowed ProcMountTypes. Empty or nil indicates that only the DefaultProcMountType may be used. This requires the ProcMountType feature flag to be enabled.", diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.pb.go b/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.pb.go index de6e358e74353..912a93ecb0e61 100644 --- a/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.pb.go @@ -17,34 +17,20 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.proto -/* - Package v1alpha1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.proto - - It has these top-level messages: - ImageReview - ImageReviewContainerSpec - ImageReviewSpec - ImageReviewStatus -*/ package v1alpha1 import ( fmt "fmt" - proto "github.com/gogo/protobuf/proto" - - math "math" + io "io" + proto "github.com/gogo/protobuf/proto" github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - strings "strings" - + math "math" + math_bits "math/bits" reflect "reflect" - - io "io" + strings "strings" ) // Reference imports to suppress errors if they are not otherwise used. @@ -58,34 +44,177 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *ImageReview) Reset() { *m = ImageReview{} } -func (*ImageReview) ProtoMessage() {} -func (*ImageReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *ImageReview) Reset() { *m = ImageReview{} } +func (*ImageReview) ProtoMessage() {} +func (*ImageReview) Descriptor() ([]byte, []int) { + return fileDescriptor_834793af728657a5, []int{0} +} +func (m *ImageReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageReview.Merge(m, src) +} +func (m *ImageReview) XXX_Size() int { + return m.Size() +} +func (m *ImageReview) XXX_DiscardUnknown() { + xxx_messageInfo_ImageReview.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageReview proto.InternalMessageInfo func (m *ImageReviewContainerSpec) Reset() { *m = ImageReviewContainerSpec{} } func (*ImageReviewContainerSpec) ProtoMessage() {} func (*ImageReviewContainerSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{1} + return fileDescriptor_834793af728657a5, []int{1} +} +func (m *ImageReviewContainerSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageReviewContainerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageReviewContainerSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageReviewContainerSpec.Merge(m, src) +} +func (m *ImageReviewContainerSpec) XXX_Size() int { + return m.Size() +} +func (m *ImageReviewContainerSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ImageReviewContainerSpec.DiscardUnknown(m) } -func (m *ImageReviewSpec) Reset() { *m = ImageReviewSpec{} } -func (*ImageReviewSpec) ProtoMessage() {} -func (*ImageReviewSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_ImageReviewContainerSpec proto.InternalMessageInfo -func (m *ImageReviewStatus) Reset() { *m = ImageReviewStatus{} } -func (*ImageReviewStatus) ProtoMessage() {} -func (*ImageReviewStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (m *ImageReviewSpec) Reset() { *m = ImageReviewSpec{} } +func (*ImageReviewSpec) ProtoMessage() {} +func (*ImageReviewSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_834793af728657a5, []int{2} +} +func (m *ImageReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageReviewSpec.Merge(m, src) +} +func (m *ImageReviewSpec) XXX_Size() int { + return m.Size() +} +func (m *ImageReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ImageReviewSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageReviewSpec proto.InternalMessageInfo + +func (m *ImageReviewStatus) Reset() { *m = ImageReviewStatus{} } +func (*ImageReviewStatus) ProtoMessage() {} +func (*ImageReviewStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_834793af728657a5, []int{3} +} +func (m *ImageReviewStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageReviewStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageReviewStatus.Merge(m, src) +} +func (m *ImageReviewStatus) XXX_Size() int { + return m.Size() +} +func (m *ImageReviewStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ImageReviewStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageReviewStatus proto.InternalMessageInfo func init() { proto.RegisterType((*ImageReview)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReview") proto.RegisterType((*ImageReviewContainerSpec)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReviewContainerSpec") proto.RegisterType((*ImageReviewSpec)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReviewSpec") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReviewSpec.AnnotationsEntry") proto.RegisterType((*ImageReviewStatus)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReviewStatus") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReviewStatus.AuditAnnotationsEntry") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.proto", fileDescriptor_834793af728657a5) +} + +var fileDescriptor_834793af728657a5 = []byte{ + // 607 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x92, 0xcf, 0x6e, 0xd3, 0x4c, + 0x14, 0xc5, 0xe3, 0xa4, 0xff, 0x32, 0xf9, 0x3e, 0x9a, 0x0e, 0x20, 0x59, 0x59, 0xb8, 0x55, 0x90, + 0x50, 0x59, 0x30, 0x43, 0x2b, 0x84, 0x0a, 0x0b, 0x50, 0x5c, 0x21, 0x95, 0x05, 0x20, 0x0d, 0xbb, + 0xae, 0x98, 0x38, 0x17, 0xc7, 0x24, 0x9e, 0xb1, 0x3c, 0xe3, 0x94, 0xec, 0x78, 0x02, 0xc4, 0x1b, + 0xf0, 0x22, 0x3c, 0x40, 0x97, 0x5d, 0x76, 0x55, 0x51, 0xb3, 0xe4, 0x25, 0x90, 0xc7, 0x4e, 0x6c, + 0x92, 0x22, 0x94, 0x9d, 0xef, 0xbd, 0x73, 0x7e, 0xf7, 0xcc, 0xf1, 0xa0, 0x93, 0xd1, 0x91, 0x22, + 0x81, 0xa4, 0xa3, 0xa4, 0x0f, 0xb1, 0x00, 0x0d, 0x8a, 0x4e, 0x40, 0x0c, 0x64, 0x4c, 0x8b, 0x01, + 0x8f, 0x02, 0x1a, 0x84, 0xdc, 0x87, 0x48, 0x8e, 0x03, 0x6f, 0x4a, 0x27, 0x07, 0x7c, 0x1c, 0x0d, + 0xf9, 0x01, 0xf5, 0x41, 0x40, 0xcc, 0x35, 0x0c, 0x48, 0x14, 0x4b, 0x2d, 0xf1, 0x6e, 0x2e, 0x20, + 0x3c, 0x0a, 0x48, 0x45, 0x40, 0x66, 0x82, 0xce, 0x43, 0x3f, 0xd0, 0xc3, 0xa4, 0x4f, 0x3c, 0x19, + 0x52, 0x5f, 0xfa, 0x92, 0x1a, 0x5d, 0x3f, 0xf9, 0x60, 0x2a, 0x53, 0x98, 0xaf, 0x9c, 0xd7, 0x79, + 0x5c, 0x1a, 0x08, 0xb9, 0x37, 0x0c, 0x04, 0xc4, 0x53, 0x1a, 0x8d, 0xfc, 0xac, 0xa1, 0x68, 0x08, + 0x9a, 0xd3, 0xc9, 0x92, 0x8b, 0x0e, 0xfd, 0x9b, 0x2a, 0x4e, 0x84, 0x0e, 0x42, 0x58, 0x12, 0x3c, + 0xf9, 0x97, 0x40, 0x79, 0x43, 0x08, 0xf9, 0xa2, 0xae, 0xfb, 0xad, 0x8e, 0x5a, 0xaf, 0xb2, 0x6b, + 0x32, 0x98, 0x04, 0x70, 0x86, 0xdf, 0xa3, 0xad, 0xcc, 0xd3, 0x80, 0x6b, 0x6e, 0x5b, 0x7b, 0xd6, + 0x7e, 0xeb, 0xf0, 0x11, 0x29, 0x13, 0x99, 0xa3, 0x49, 0x34, 0xf2, 0xb3, 0x86, 0x22, 0xd9, 0x69, + 0x32, 0x39, 0x20, 0x6f, 0xfb, 0x1f, 0xc1, 0xd3, 0xaf, 0x41, 0x73, 0x17, 0x9f, 0x5f, 0xed, 0xd6, + 0xd2, 0xab, 0x5d, 0x54, 0xf6, 0xd8, 0x9c, 0x8a, 0x19, 0x5a, 0x53, 0x11, 0x78, 0x76, 0x7d, 0x89, + 0x7e, 0x63, 0xde, 0xa4, 0xe2, 0xee, 0x5d, 0x04, 0x9e, 0xfb, 0x5f, 0x41, 0x5f, 0xcb, 0x2a, 0x66, + 0x58, 0xf8, 0x14, 0x6d, 0x28, 0xcd, 0x75, 0xa2, 0xec, 0x86, 0xa1, 0x1e, 0xae, 0x44, 0x35, 0x4a, + 0xf7, 0x56, 0xc1, 0xdd, 0xc8, 0x6b, 0x56, 0x10, 0xbb, 0x2f, 0x90, 0x5d, 0x39, 0x7c, 0x2c, 0x85, + 0xe6, 0x59, 0x04, 0xd9, 0x76, 0x7c, 0x0f, 0xad, 0x1b, 0xba, 0x89, 0xaa, 0xe9, 0xfe, 0x5f, 0x20, + 0xd6, 0x73, 0x41, 0x3e, 0xeb, 0xfe, 0xaa, 0xa3, 0xed, 0x85, 0x4b, 0xe0, 0x10, 0x21, 0x6f, 0x46, + 0x52, 0xb6, 0xb5, 0xd7, 0xd8, 0x6f, 0x1d, 0x3e, 0x5d, 0xc5, 0xf4, 0x1f, 0x3e, 0xca, 0xc4, 0xe7, + 0x6d, 0xc5, 0x2a, 0x0b, 0xf0, 0x27, 0xd4, 0xe2, 0x42, 0x48, 0xcd, 0x75, 0x20, 0x85, 0xb2, 0xeb, + 0x66, 0x5f, 0x6f, 0xd5, 0xe8, 0x49, 0xaf, 0x64, 0xbc, 0x14, 0x3a, 0x9e, 0xba, 0xb7, 0x8b, 0xbd, + 0xad, 0xca, 0x84, 0x55, 0x57, 0x61, 0x8a, 0x9a, 0x82, 0x87, 0xa0, 0x22, 0xee, 0x81, 0xf9, 0x39, + 0x4d, 0x77, 0xa7, 0x10, 0x35, 0xdf, 0xcc, 0x06, 0xac, 0x3c, 0xd3, 0x79, 0x8e, 0xda, 0x8b, 0x6b, + 0x70, 0x1b, 0x35, 0x46, 0x30, 0xcd, 0x43, 0x66, 0xd9, 0x27, 0xbe, 0x83, 0xd6, 0x27, 0x7c, 0x9c, + 0x80, 0x79, 0x45, 0x4d, 0x96, 0x17, 0xcf, 0xea, 0x47, 0x56, 0xf7, 0x7b, 0x1d, 0xed, 0x2c, 0xfd, + 0x5c, 0xfc, 0x00, 0x6d, 0xf2, 0xf1, 0x58, 0x9e, 0xc1, 0xc0, 0x50, 0xb6, 0xdc, 0xed, 0xc2, 0xc4, + 0x66, 0x2f, 0x6f, 0xb3, 0xd9, 0x1c, 0xdf, 0x47, 0x1b, 0x31, 0x70, 0x25, 0x45, 0xce, 0x2e, 0xdf, + 0x05, 0x33, 0x5d, 0x56, 0x4c, 0xf1, 0x17, 0x0b, 0xb5, 0x79, 0x32, 0x08, 0x74, 0xc5, 0xae, 0xdd, + 0x30, 0xc9, 0x9e, 0xac, 0xfe, 0xfc, 0x48, 0x6f, 0x01, 0x95, 0x07, 0x6c, 0x17, 0xcb, 0xdb, 0x8b, + 0x63, 0xb6, 0xb4, 0xbb, 0x73, 0x8c, 0xee, 0xde, 0x08, 0x59, 0x25, 0x3e, 0x97, 0x9c, 0x5f, 0x3b, + 0xb5, 0x8b, 0x6b, 0xa7, 0x76, 0x79, 0xed, 0xd4, 0x3e, 0xa7, 0x8e, 0x75, 0x9e, 0x3a, 0xd6, 0x45, + 0xea, 0x58, 0x97, 0xa9, 0x63, 0xfd, 0x48, 0x1d, 0xeb, 0xeb, 0x4f, 0xa7, 0x76, 0xba, 0x35, 0xbb, + 0xc8, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x44, 0x16, 0x48, 0xa2, 0x79, 0x05, 0x00, 0x00, +} + func (m *ImageReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -93,41 +222,52 @@ func (m *ImageReview) Marshal() (dAtA []byte, err error) { } func (m *ImageReview) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ImageReviewContainerSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -135,21 +275,27 @@ func (m *ImageReviewContainerSpec) Marshal() (dAtA []byte, err error) { } func (m *ImageReviewContainerSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageReviewContainerSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ + i -= len(m.Image) + copy(dAtA[i:], m.Image) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image))) - i += copy(dAtA[i:], m.Image) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ImageReviewSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -157,55 +303,65 @@ func (m *ImageReviewSpec) Marshal() (dAtA []byte, err error) { } func (m *ImageReviewSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Containers) > 0 { - for _, msg := range m.Containers { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x1a if len(m.Annotations) > 0 { keysForAnnotations := make([]string, 0, len(m.Annotations)) for k := range m.Annotations { keysForAnnotations = append(keysForAnnotations, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) - for _, k := range keysForAnnotations { + for iNdEx := len(keysForAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.Annotations[string(keysForAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- dAtA[i] = 0x12 - i++ - v := m.Annotations[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + i -= len(keysForAnnotations[iNdEx]) + copy(dAtA[i:], keysForAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAnnotations[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) } } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - return i, nil + if len(m.Containers) > 0 { + for iNdEx := len(m.Containers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Containers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } func (m *ImageReviewStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -213,57 +369,70 @@ func (m *ImageReviewStatus) Marshal() (dAtA []byte, err error) { } func (m *ImageReviewStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - if m.Allowed { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) if len(m.AuditAnnotations) > 0 { keysForAuditAnnotations := make([]string, 0, len(m.AuditAnnotations)) for k := range m.AuditAnnotations { keysForAuditAnnotations = append(keysForAuditAnnotations, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForAuditAnnotations) - for _, k := range keysForAuditAnnotations { - dAtA[i] = 0x1a - i++ - v := m.AuditAnnotations[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ + for iNdEx := len(keysForAuditAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.AuditAnnotations[string(keysForAuditAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) + i-- + dAtA[i] = 0x12 + i -= len(keysForAuditAnnotations[iNdEx]) + copy(dAtA[i:], keysForAuditAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAuditAnnotations[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a } } - return i, nil + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x12 + i-- + if m.Allowed { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *ImageReview) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -276,6 +445,9 @@ func (m *ImageReview) Size() (n int) { } func (m *ImageReviewContainerSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Image) @@ -284,6 +456,9 @@ func (m *ImageReviewContainerSpec) Size() (n int) { } func (m *ImageReviewSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Containers) > 0 { @@ -306,6 +481,9 @@ func (m *ImageReviewSpec) Size() (n int) { } func (m *ImageReviewStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 2 @@ -323,14 +501,7 @@ func (m *ImageReviewStatus) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -340,7 +511,7 @@ func (this *ImageReview) String() string { return "nil" } s := strings.Join([]string{`&ImageReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ImageReviewSpec", "ImageReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ImageReviewStatus", "ImageReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -361,6 +532,11 @@ func (this *ImageReviewSpec) String() string { if this == nil { return "nil" } + repeatedStringForContainers := "[]ImageReviewContainerSpec{" + for _, f := range this.Containers { + repeatedStringForContainers += strings.Replace(strings.Replace(f.String(), "ImageReviewContainerSpec", "ImageReviewContainerSpec", 1), `&`, ``, 1) + "," + } + repeatedStringForContainers += "}" keysForAnnotations := make([]string, 0, len(this.Annotations)) for k := range this.Annotations { keysForAnnotations = append(keysForAnnotations, k) @@ -372,7 +548,7 @@ func (this *ImageReviewSpec) String() string { } mapStringForAnnotations += "}" s := strings.Join([]string{`&ImageReviewSpec{`, - `Containers:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Containers), "ImageReviewContainerSpec", "ImageReviewContainerSpec", 1), `&`, ``, 1) + `,`, + `Containers:` + repeatedStringForContainers + `,`, `Annotations:` + mapStringForAnnotations + `,`, `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, `}`, @@ -424,7 +600,7 @@ func (m *ImageReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -452,7 +628,7 @@ func (m *ImageReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -461,6 +637,9 @@ func (m *ImageReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -482,7 +661,7 @@ func (m *ImageReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -491,6 +670,9 @@ func (m *ImageReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -512,7 +694,7 @@ func (m *ImageReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -521,6 +703,9 @@ func (m *ImageReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -537,6 +722,9 @@ func (m *ImageReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -564,7 +752,7 @@ func (m *ImageReviewContainerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -592,7 +780,7 @@ func (m *ImageReviewContainerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -602,6 +790,9 @@ func (m *ImageReviewContainerSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -616,6 +807,9 @@ func (m *ImageReviewContainerSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -643,7 +837,7 @@ func (m *ImageReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -671,7 +865,7 @@ func (m *ImageReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -680,6 +874,9 @@ func (m *ImageReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -702,7 +899,7 @@ func (m *ImageReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -711,6 +908,9 @@ func (m *ImageReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -731,7 +931,7 @@ func (m *ImageReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -748,7 +948,7 @@ func (m *ImageReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -758,6 +958,9 @@ func (m *ImageReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -774,7 +977,7 @@ func (m *ImageReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -784,6 +987,9 @@ func (m *ImageReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -820,7 +1026,7 @@ func (m *ImageReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -830,6 +1036,9 @@ func (m *ImageReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -844,6 +1053,9 @@ func (m *ImageReviewSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -871,7 +1083,7 @@ func (m *ImageReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -899,7 +1111,7 @@ func (m *ImageReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -919,7 +1131,7 @@ func (m *ImageReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -929,6 +1141,9 @@ func (m *ImageReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -948,7 +1163,7 @@ func (m *ImageReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -957,6 +1172,9 @@ func (m *ImageReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -977,7 +1195,7 @@ func (m *ImageReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -994,7 +1212,7 @@ func (m *ImageReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1004,6 +1222,9 @@ func (m *ImageReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -1020,7 +1241,7 @@ func (m *ImageReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1030,6 +1251,9 @@ func (m *ImageReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -1061,6 +1285,9 @@ func (m *ImageReviewStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1127,10 +1354,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -1159,6 +1389,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -1177,49 +1410,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 607 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x92, 0xcf, 0x6e, 0xd3, 0x4c, - 0x14, 0xc5, 0xe3, 0xa4, 0xff, 0x32, 0xf9, 0x3e, 0x9a, 0x0e, 0x20, 0x59, 0x59, 0xb8, 0x55, 0x90, - 0x50, 0x59, 0x30, 0x43, 0x2b, 0x84, 0x0a, 0x0b, 0x50, 0x5c, 0x21, 0x95, 0x05, 0x20, 0x0d, 0xbb, - 0xae, 0x98, 0x38, 0x17, 0xc7, 0x24, 0x9e, 0xb1, 0x3c, 0xe3, 0x94, 0xec, 0x78, 0x02, 0xc4, 0x1b, - 0xf0, 0x22, 0x3c, 0x40, 0x97, 0x5d, 0x76, 0x55, 0x51, 0xb3, 0xe4, 0x25, 0x90, 0xc7, 0x4e, 0x6c, - 0x92, 0x22, 0x94, 0x9d, 0xef, 0xbd, 0x73, 0x7e, 0xf7, 0xcc, 0xf1, 0xa0, 0x93, 0xd1, 0x91, 0x22, - 0x81, 0xa4, 0xa3, 0xa4, 0x0f, 0xb1, 0x00, 0x0d, 0x8a, 0x4e, 0x40, 0x0c, 0x64, 0x4c, 0x8b, 0x01, - 0x8f, 0x02, 0x1a, 0x84, 0xdc, 0x87, 0x48, 0x8e, 0x03, 0x6f, 0x4a, 0x27, 0x07, 0x7c, 0x1c, 0x0d, - 0xf9, 0x01, 0xf5, 0x41, 0x40, 0xcc, 0x35, 0x0c, 0x48, 0x14, 0x4b, 0x2d, 0xf1, 0x6e, 0x2e, 0x20, - 0x3c, 0x0a, 0x48, 0x45, 0x40, 0x66, 0x82, 0xce, 0x43, 0x3f, 0xd0, 0xc3, 0xa4, 0x4f, 0x3c, 0x19, - 0x52, 0x5f, 0xfa, 0x92, 0x1a, 0x5d, 0x3f, 0xf9, 0x60, 0x2a, 0x53, 0x98, 0xaf, 0x9c, 0xd7, 0x79, - 0x5c, 0x1a, 0x08, 0xb9, 0x37, 0x0c, 0x04, 0xc4, 0x53, 0x1a, 0x8d, 0xfc, 0xac, 0xa1, 0x68, 0x08, - 0x9a, 0xd3, 0xc9, 0x92, 0x8b, 0x0e, 0xfd, 0x9b, 0x2a, 0x4e, 0x84, 0x0e, 0x42, 0x58, 0x12, 0x3c, - 0xf9, 0x97, 0x40, 0x79, 0x43, 0x08, 0xf9, 0xa2, 0xae, 0xfb, 0xad, 0x8e, 0x5a, 0xaf, 0xb2, 0x6b, - 0x32, 0x98, 0x04, 0x70, 0x86, 0xdf, 0xa3, 0xad, 0xcc, 0xd3, 0x80, 0x6b, 0x6e, 0x5b, 0x7b, 0xd6, - 0x7e, 0xeb, 0xf0, 0x11, 0x29, 0x13, 0x99, 0xa3, 0x49, 0x34, 0xf2, 0xb3, 0x86, 0x22, 0xd9, 0x69, - 0x32, 0x39, 0x20, 0x6f, 0xfb, 0x1f, 0xc1, 0xd3, 0xaf, 0x41, 0x73, 0x17, 0x9f, 0x5f, 0xed, 0xd6, - 0xd2, 0xab, 0x5d, 0x54, 0xf6, 0xd8, 0x9c, 0x8a, 0x19, 0x5a, 0x53, 0x11, 0x78, 0x76, 0x7d, 0x89, - 0x7e, 0x63, 0xde, 0xa4, 0xe2, 0xee, 0x5d, 0x04, 0x9e, 0xfb, 0x5f, 0x41, 0x5f, 0xcb, 0x2a, 0x66, - 0x58, 0xf8, 0x14, 0x6d, 0x28, 0xcd, 0x75, 0xa2, 0xec, 0x86, 0xa1, 0x1e, 0xae, 0x44, 0x35, 0x4a, - 0xf7, 0x56, 0xc1, 0xdd, 0xc8, 0x6b, 0x56, 0x10, 0xbb, 0x2f, 0x90, 0x5d, 0x39, 0x7c, 0x2c, 0x85, - 0xe6, 0x59, 0x04, 0xd9, 0x76, 0x7c, 0x0f, 0xad, 0x1b, 0xba, 0x89, 0xaa, 0xe9, 0xfe, 0x5f, 0x20, - 0xd6, 0x73, 0x41, 0x3e, 0xeb, 0xfe, 0xaa, 0xa3, 0xed, 0x85, 0x4b, 0xe0, 0x10, 0x21, 0x6f, 0x46, - 0x52, 0xb6, 0xb5, 0xd7, 0xd8, 0x6f, 0x1d, 0x3e, 0x5d, 0xc5, 0xf4, 0x1f, 0x3e, 0xca, 0xc4, 0xe7, - 0x6d, 0xc5, 0x2a, 0x0b, 0xf0, 0x27, 0xd4, 0xe2, 0x42, 0x48, 0xcd, 0x75, 0x20, 0x85, 0xb2, 0xeb, - 0x66, 0x5f, 0x6f, 0xd5, 0xe8, 0x49, 0xaf, 0x64, 0xbc, 0x14, 0x3a, 0x9e, 0xba, 0xb7, 0x8b, 0xbd, - 0xad, 0xca, 0x84, 0x55, 0x57, 0x61, 0x8a, 0x9a, 0x82, 0x87, 0xa0, 0x22, 0xee, 0x81, 0xf9, 0x39, - 0x4d, 0x77, 0xa7, 0x10, 0x35, 0xdf, 0xcc, 0x06, 0xac, 0x3c, 0xd3, 0x79, 0x8e, 0xda, 0x8b, 0x6b, - 0x70, 0x1b, 0x35, 0x46, 0x30, 0xcd, 0x43, 0x66, 0xd9, 0x27, 0xbe, 0x83, 0xd6, 0x27, 0x7c, 0x9c, - 0x80, 0x79, 0x45, 0x4d, 0x96, 0x17, 0xcf, 0xea, 0x47, 0x56, 0xf7, 0x7b, 0x1d, 0xed, 0x2c, 0xfd, - 0x5c, 0xfc, 0x00, 0x6d, 0xf2, 0xf1, 0x58, 0x9e, 0xc1, 0xc0, 0x50, 0xb6, 0xdc, 0xed, 0xc2, 0xc4, - 0x66, 0x2f, 0x6f, 0xb3, 0xd9, 0x1c, 0xdf, 0x47, 0x1b, 0x31, 0x70, 0x25, 0x45, 0xce, 0x2e, 0xdf, - 0x05, 0x33, 0x5d, 0x56, 0x4c, 0xf1, 0x17, 0x0b, 0xb5, 0x79, 0x32, 0x08, 0x74, 0xc5, 0xae, 0xdd, - 0x30, 0xc9, 0x9e, 0xac, 0xfe, 0xfc, 0x48, 0x6f, 0x01, 0x95, 0x07, 0x6c, 0x17, 0xcb, 0xdb, 0x8b, - 0x63, 0xb6, 0xb4, 0xbb, 0x73, 0x8c, 0xee, 0xde, 0x08, 0x59, 0x25, 0x3e, 0x97, 0x9c, 0x5f, 0x3b, - 0xb5, 0x8b, 0x6b, 0xa7, 0x76, 0x79, 0xed, 0xd4, 0x3e, 0xa7, 0x8e, 0x75, 0x9e, 0x3a, 0xd6, 0x45, - 0xea, 0x58, 0x97, 0xa9, 0x63, 0xfd, 0x48, 0x1d, 0xeb, 0xeb, 0x4f, 0xa7, 0x76, 0xba, 0x35, 0xbb, - 0xc8, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x44, 0x16, 0x48, 0xa2, 0x79, 0x05, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/networking/v1/generated.pb.go b/vendor/k8s.io/api/networking/v1/generated.pb.go index 52a86e36ff0c3..c9b22fc79478e 100644 --- a/vendor/k8s.io/api/networking/v1/generated.pb.go +++ b/vendor/k8s.io/api/networking/v1/generated.pb.go @@ -17,42 +17,24 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/networking/v1/generated.proto -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/networking/v1/generated.proto - - It has these top-level messages: - IPBlock - NetworkPolicy - NetworkPolicyEgressRule - NetworkPolicyIngressRule - NetworkPolicyList - NetworkPolicyPeer - NetworkPolicyPort - NetworkPolicySpec -*/ package v1 import ( fmt "fmt" - proto "github.com/gogo/protobuf/proto" - - math "math" - - k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + io "io" - k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" + proto "github.com/gogo/protobuf/proto" k8s_io_api_core_v1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - strings "strings" - + math "math" + math_bits "math/bits" reflect "reflect" + strings "strings" - io "io" + intstr "k8s.io/apimachinery/pkg/util/intstr" ) // Reference imports to suppress errors if they are not otherwise used. @@ -66,39 +48,229 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *IPBlock) Reset() { *m = IPBlock{} } -func (*IPBlock) ProtoMessage() {} -func (*IPBlock) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *IPBlock) Reset() { *m = IPBlock{} } +func (*IPBlock) ProtoMessage() {} +func (*IPBlock) Descriptor() ([]byte, []int) { + return fileDescriptor_1c72867a70a7cc90, []int{0} +} +func (m *IPBlock) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IPBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IPBlock) XXX_Merge(src proto.Message) { + xxx_messageInfo_IPBlock.Merge(m, src) +} +func (m *IPBlock) XXX_Size() int { + return m.Size() +} +func (m *IPBlock) XXX_DiscardUnknown() { + xxx_messageInfo_IPBlock.DiscardUnknown(m) +} -func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} } -func (*NetworkPolicy) ProtoMessage() {} -func (*NetworkPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_IPBlock proto.InternalMessageInfo -func (m *NetworkPolicyEgressRule) Reset() { *m = NetworkPolicyEgressRule{} } -func (*NetworkPolicyEgressRule) ProtoMessage() {} -func (*NetworkPolicyEgressRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} } +func (*NetworkPolicy) ProtoMessage() {} +func (*NetworkPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_1c72867a70a7cc90, []int{1} +} +func (m *NetworkPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicy.Merge(m, src) +} +func (m *NetworkPolicy) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkPolicy proto.InternalMessageInfo + +func (m *NetworkPolicyEgressRule) Reset() { *m = NetworkPolicyEgressRule{} } +func (*NetworkPolicyEgressRule) ProtoMessage() {} +func (*NetworkPolicyEgressRule) Descriptor() ([]byte, []int) { + return fileDescriptor_1c72867a70a7cc90, []int{2} +} +func (m *NetworkPolicyEgressRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicyEgressRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicyEgressRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyEgressRule.Merge(m, src) +} +func (m *NetworkPolicyEgressRule) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicyEgressRule) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyEgressRule.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkPolicyEgressRule proto.InternalMessageInfo func (m *NetworkPolicyIngressRule) Reset() { *m = NetworkPolicyIngressRule{} } func (*NetworkPolicyIngressRule) ProtoMessage() {} func (*NetworkPolicyIngressRule) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{3} + return fileDescriptor_1c72867a70a7cc90, []int{3} +} +func (m *NetworkPolicyIngressRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicyIngressRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicyIngressRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyIngressRule.Merge(m, src) +} +func (m *NetworkPolicyIngressRule) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicyIngressRule) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyIngressRule.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkPolicyIngressRule proto.InternalMessageInfo + +func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} } +func (*NetworkPolicyList) ProtoMessage() {} +func (*NetworkPolicyList) Descriptor() ([]byte, []int) { + return fileDescriptor_1c72867a70a7cc90, []int{4} +} +func (m *NetworkPolicyList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicyList) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyList.Merge(m, src) +} +func (m *NetworkPolicyList) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicyList) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyList.DiscardUnknown(m) } -func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} } -func (*NetworkPolicyList) ProtoMessage() {} -func (*NetworkPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +var xxx_messageInfo_NetworkPolicyList proto.InternalMessageInfo -func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} } -func (*NetworkPolicyPeer) ProtoMessage() {} -func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} } +func (*NetworkPolicyPeer) ProtoMessage() {} +func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { + return fileDescriptor_1c72867a70a7cc90, []int{5} +} +func (m *NetworkPolicyPeer) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicyPeer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicyPeer) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyPeer.Merge(m, src) +} +func (m *NetworkPolicyPeer) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicyPeer) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyPeer.DiscardUnknown(m) +} -func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} } -func (*NetworkPolicyPort) ProtoMessage() {} -func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +var xxx_messageInfo_NetworkPolicyPeer proto.InternalMessageInfo -func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} } -func (*NetworkPolicySpec) ProtoMessage() {} -func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} } +func (*NetworkPolicyPort) ProtoMessage() {} +func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { + return fileDescriptor_1c72867a70a7cc90, []int{6} +} +func (m *NetworkPolicyPort) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicyPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicyPort) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyPort.Merge(m, src) +} +func (m *NetworkPolicyPort) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicyPort) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyPort.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkPolicyPort proto.InternalMessageInfo + +func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} } +func (*NetworkPolicySpec) ProtoMessage() {} +func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { + return fileDescriptor_1c72867a70a7cc90, []int{7} +} +func (m *NetworkPolicySpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicySpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicySpec.Merge(m, src) +} +func (m *NetworkPolicySpec) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicySpec) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicySpec.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkPolicySpec proto.InternalMessageInfo func init() { proto.RegisterType((*IPBlock)(nil), "k8s.io.api.networking.v1.IPBlock") @@ -110,10 +282,70 @@ func init() { proto.RegisterType((*NetworkPolicyPort)(nil), "k8s.io.api.networking.v1.NetworkPolicyPort") proto.RegisterType((*NetworkPolicySpec)(nil), "k8s.io.api.networking.v1.NetworkPolicySpec") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/networking/v1/generated.proto", fileDescriptor_1c72867a70a7cc90) +} + +var fileDescriptor_1c72867a70a7cc90 = []byte{ + // 804 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0xcf, 0x8f, 0xdb, 0x44, + 0x14, 0x8e, 0x9d, 0x6c, 0x92, 0x4e, 0x28, 0x65, 0x07, 0x21, 0xac, 0x45, 0xd8, 0xc1, 0x17, 0x56, + 0xaa, 0x18, 0x93, 0x16, 0x21, 0x6e, 0x08, 0x43, 0x29, 0x91, 0xba, 0xbb, 0xd1, 0x6c, 0x2f, 0x20, + 0x90, 0x70, 0x9c, 0x59, 0xef, 0x34, 0xb1, 0xc7, 0x1a, 0x4f, 0x42, 0xf7, 0xc6, 0x9f, 0xc0, 0x1f, + 0xc2, 0x91, 0x1b, 0x87, 0x72, 0xdc, 0x63, 0x8f, 0x3d, 0x59, 0xac, 0xf9, 0x2f, 0xf6, 0x84, 0x66, + 0x3c, 0x89, 0xf3, 0xa3, 0x11, 0xd9, 0x15, 0xbd, 0x65, 0xde, 0xbc, 0xef, 0x7b, 0xf3, 0xde, 0xfb, + 0xf2, 0x19, 0x7c, 0x35, 0xfe, 0x22, 0x43, 0x94, 0x79, 0xe3, 0xe9, 0x90, 0xf0, 0x84, 0x08, 0x92, + 0x79, 0x33, 0x92, 0x8c, 0x18, 0xf7, 0xf4, 0x45, 0x90, 0x52, 0x2f, 0x21, 0xe2, 0x17, 0xc6, 0xc7, + 0x34, 0x89, 0xbc, 0x59, 0xcf, 0x8b, 0x48, 0x42, 0x78, 0x20, 0xc8, 0x08, 0xa5, 0x9c, 0x09, 0x06, + 0xad, 0x32, 0x13, 0x05, 0x29, 0x45, 0x55, 0x26, 0x9a, 0xf5, 0x0e, 0x3e, 0x89, 0xa8, 0x38, 0x9f, + 0x0e, 0x51, 0xc8, 0x62, 0x2f, 0x62, 0x11, 0xf3, 0x14, 0x60, 0x38, 0x3d, 0x53, 0x27, 0x75, 0x50, + 0xbf, 0x4a, 0xa2, 0x03, 0x77, 0xa9, 0x64, 0xc8, 0x38, 0x79, 0x4d, 0xb1, 0x83, 0xcf, 0xaa, 0x9c, + 0x38, 0x08, 0xcf, 0x69, 0x42, 0xf8, 0x85, 0x97, 0x8e, 0x23, 0x19, 0xc8, 0xbc, 0x98, 0x88, 0xe0, + 0x75, 0x28, 0x6f, 0x1b, 0x8a, 0x4f, 0x13, 0x41, 0x63, 0xb2, 0x01, 0xf8, 0xfc, 0xbf, 0x00, 0x59, + 0x78, 0x4e, 0xe2, 0x60, 0x03, 0xf7, 0x70, 0x1b, 0x6e, 0x2a, 0xe8, 0xc4, 0xa3, 0x89, 0xc8, 0x04, + 0x5f, 0x07, 0xb9, 0x27, 0xa0, 0xd5, 0x1f, 0xf8, 0x13, 0x16, 0x8e, 0x61, 0x17, 0x34, 0x42, 0x3a, + 0xe2, 0x96, 0xd1, 0x35, 0x0e, 0xef, 0xf8, 0x6f, 0x5d, 0xe6, 0x4e, 0xad, 0xc8, 0x9d, 0xc6, 0xd7, + 0xfd, 0x6f, 0x30, 0x56, 0x37, 0xd0, 0x05, 0x4d, 0xf2, 0x3c, 0x24, 0xa9, 0xb0, 0xcc, 0x6e, 0xfd, + 0xf0, 0x8e, 0x0f, 0x8a, 0xdc, 0x69, 0x3e, 0x52, 0x11, 0xac, 0x6f, 0xdc, 0xbf, 0x0c, 0x70, 0xf7, + 0xb8, 0xdc, 0xc4, 0x80, 0x4d, 0x68, 0x78, 0x01, 0x7f, 0x06, 0x6d, 0x39, 0x9b, 0x51, 0x20, 0x02, + 0xc5, 0xdd, 0x79, 0xf0, 0x29, 0xaa, 0xd6, 0xb6, 0x78, 0x2a, 0x4a, 0xc7, 0x91, 0x0c, 0x64, 0x48, + 0x66, 0xa3, 0x59, 0x0f, 0x9d, 0x0c, 0x9f, 0x91, 0x50, 0x1c, 0x11, 0x11, 0xf8, 0x50, 0xbf, 0x06, + 0x54, 0x31, 0xbc, 0x60, 0x85, 0x47, 0xa0, 0x91, 0xa5, 0x24, 0xb4, 0x4c, 0xc5, 0x7e, 0x1f, 0x6d, + 0x13, 0x05, 0x5a, 0x79, 0xd8, 0x69, 0x4a, 0xc2, 0xaa, 0x4d, 0x79, 0xc2, 0x8a, 0xc6, 0xfd, 0xc3, + 0x00, 0xef, 0xaf, 0x64, 0x3e, 0x8a, 0x38, 0xc9, 0x32, 0x3c, 0x9d, 0x10, 0x38, 0x00, 0x7b, 0x29, + 0xe3, 0x22, 0xb3, 0x8c, 0x6e, 0xfd, 0x06, 0xb5, 0x06, 0x8c, 0x0b, 0xff, 0xae, 0xae, 0xb5, 0x27, + 0x4f, 0x19, 0x2e, 0x89, 0xe0, 0x63, 0x60, 0x0a, 0xa6, 0x06, 0x7a, 0x03, 0x3a, 0x42, 0xb8, 0x0f, + 0x34, 0x9d, 0xf9, 0x94, 0x61, 0x53, 0x30, 0xf7, 0x4f, 0x03, 0x58, 0x2b, 0x59, 0xfd, 0xe4, 0x4d, + 0xbe, 0xfb, 0x08, 0x34, 0xce, 0x38, 0x8b, 0x6f, 0xf3, 0xf2, 0xc5, 0xd0, 0xbf, 0xe5, 0x2c, 0xc6, + 0x8a, 0xc6, 0x7d, 0x61, 0x80, 0xfd, 0x95, 0xcc, 0x27, 0x34, 0x13, 0xf0, 0xc7, 0x0d, 0xed, 0xa0, + 0xdd, 0xb4, 0x23, 0xd1, 0x4a, 0x39, 0xef, 0xe8, 0x5a, 0xed, 0x79, 0x64, 0x49, 0x37, 0x4f, 0xc0, + 0x1e, 0x15, 0x24, 0xce, 0x74, 0x0f, 0x1f, 0xef, 0xd8, 0x43, 0x35, 0x90, 0xbe, 0x44, 0xe3, 0x92, + 0xc4, 0x7d, 0x61, 0xae, 0x75, 0x20, 0x7b, 0x85, 0x67, 0xa0, 0x93, 0xb2, 0xd1, 0x29, 0x99, 0x90, + 0x50, 0x30, 0xae, 0x9b, 0x78, 0xb8, 0x63, 0x13, 0xc1, 0x90, 0x4c, 0xe6, 0x50, 0xff, 0x5e, 0x91, + 0x3b, 0x9d, 0x41, 0xc5, 0x85, 0x97, 0x89, 0xe1, 0x73, 0xb0, 0x9f, 0x04, 0x31, 0xc9, 0xd2, 0x20, + 0x24, 0x8b, 0x6a, 0xe6, 0xed, 0xab, 0xbd, 0x57, 0xe4, 0xce, 0xfe, 0xf1, 0x3a, 0x23, 0xde, 0x2c, + 0x02, 0xbf, 0x03, 0x2d, 0x9a, 0x2a, 0x0b, 0xb1, 0xea, 0xaa, 0xde, 0x47, 0xdb, 0xe7, 0xa8, 0xbd, + 0xc6, 0xef, 0x14, 0xb9, 0x33, 0x37, 0x1e, 0x3c, 0x87, 0xbb, 0xbf, 0xaf, 0x6b, 0x40, 0x0a, 0x0e, + 0x3e, 0x06, 0x6d, 0xe5, 0x55, 0x21, 0x9b, 0x68, 0x6f, 0xba, 0x2f, 0xf7, 0x39, 0xd0, 0xb1, 0xeb, + 0xdc, 0xf9, 0x60, 0xd3, 0xbc, 0xd1, 0xfc, 0x1a, 0x2f, 0xc0, 0xf0, 0x18, 0x34, 0xa4, 0x74, 0xf5, + 0x54, 0xb6, 0x9b, 0x90, 0xf4, 0x4b, 0x54, 0xfa, 0x25, 0xea, 0x27, 0xe2, 0x84, 0x9f, 0x0a, 0x4e, + 0x93, 0xc8, 0x6f, 0x4b, 0xc9, 0xca, 0x27, 0x61, 0xc5, 0xe3, 0x5e, 0xaf, 0x2f, 0x5c, 0x7a, 0x08, + 0x7c, 0xf6, 0xbf, 0x2d, 0xfc, 0x5d, 0x2d, 0xb3, 0xed, 0x4b, 0xff, 0x09, 0xb4, 0x68, 0xf9, 0x27, + 0xd7, 0x12, 0x7e, 0xb0, 0xa3, 0x84, 0x97, 0xac, 0xc1, 0xbf, 0xa7, 0xcb, 0xb4, 0xe6, 0xc1, 0x39, + 0x27, 0xfc, 0x1e, 0x34, 0x49, 0xc9, 0x5e, 0x57, 0xec, 0xbd, 0x1d, 0xd9, 0x2b, 0xbf, 0xf4, 0xdf, + 0xd6, 0xe4, 0x4d, 0x1d, 0xd3, 0x84, 0xf0, 0x4b, 0x39, 0x25, 0x99, 0xfb, 0xf4, 0x22, 0x25, 0x99, + 0xd5, 0x50, 0xdf, 0x93, 0x0f, 0xcb, 0x66, 0x17, 0xe1, 0xeb, 0xdc, 0x01, 0xd5, 0x11, 0x2f, 0x23, + 0xfc, 0xc3, 0xcb, 0x2b, 0xbb, 0xf6, 0xf2, 0xca, 0xae, 0xbd, 0xba, 0xb2, 0x6b, 0xbf, 0x16, 0xb6, + 0x71, 0x59, 0xd8, 0xc6, 0xcb, 0xc2, 0x36, 0x5e, 0x15, 0xb6, 0xf1, 0x77, 0x61, 0x1b, 0xbf, 0xfd, + 0x63, 0xd7, 0x7e, 0x30, 0x67, 0xbd, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x7b, 0xc9, 0x59, + 0x67, 0x08, 0x00, 0x00, +} + func (m *IPBlock) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -121,36 +353,36 @@ func (m *IPBlock) Marshal() (dAtA []byte, err error) { } func (m *IPBlock) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IPBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDR))) - i += copy(dAtA[i:], m.CIDR) if len(m.Except) > 0 { - for _, s := range m.Except { + for iNdEx := len(m.Except) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Except[iNdEx]) + copy(dAtA[i:], m.Except[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Except[iNdEx]))) + i-- dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + i -= len(m.CIDR) + copy(dAtA[i:], m.CIDR) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDR))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *NetworkPolicy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -158,33 +390,42 @@ func (m *NetworkPolicy) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *NetworkPolicyEgressRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -192,41 +433,50 @@ func (m *NetworkPolicyEgressRule) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicyEgressRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicyEgressRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Ports) > 0 { - for _, msg := range m.Ports { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.To) > 0 { + for iNdEx := len(m.To) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.To[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if len(m.To) > 0 { - for _, msg := range m.To { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *NetworkPolicyIngressRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -234,41 +484,50 @@ func (m *NetworkPolicyIngressRule) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicyIngressRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicyIngressRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Ports) > 0 { - for _, msg := range m.Ports { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.From) > 0 { + for iNdEx := len(m.From) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.From[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if len(m.From) > 0 { - for _, msg := range m.From { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *NetworkPolicyList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -276,37 +535,46 @@ func (m *NetworkPolicyList) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicyList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n3, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *NetworkPolicyPeer) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -314,47 +582,58 @@ func (m *NetworkPolicyPeer) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicyPeer) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicyPeer) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.PodSelector != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PodSelector.Size())) - n4, err := m.PodSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.IPBlock != nil { + { + size, err := m.IPBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 + i-- + dAtA[i] = 0x1a } if m.NamespaceSelector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NamespaceSelector.Size())) - n5, err := m.NamespaceSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 + i-- + dAtA[i] = 0x12 } - if m.IPBlock != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.IPBlock.Size())) - n6, err := m.IPBlock.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.PodSelector != nil { + { + size, err := m.PodSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n6 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *NetworkPolicyPort) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -362,33 +641,41 @@ func (m *NetworkPolicyPort) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicyPort) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicyPort) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Protocol != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Protocol))) - i += copy(dAtA[i:], *m.Protocol) - } if m.Port != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Port.Size())) - n7, err := m.Port.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Port.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n7 + i-- + dAtA[i] = 0x12 + } + if m.Protocol != nil { + i -= len(*m.Protocol) + copy(dAtA[i:], *m.Protocol) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Protocol))) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *NetworkPolicySpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -396,70 +683,80 @@ func (m *NetworkPolicySpec) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicySpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PodSelector.Size())) - n8, err := m.PodSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - if len(m.Ingress) > 0 { - for _, msg := range m.Ingress { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + if len(m.PolicyTypes) > 0 { + for iNdEx := len(m.PolicyTypes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.PolicyTypes[iNdEx]) + copy(dAtA[i:], m.PolicyTypes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PolicyTypes[iNdEx]))) + i-- + dAtA[i] = 0x22 } } if len(m.Egress) > 0 { - for _, msg := range m.Egress { + for iNdEx := len(m.Egress) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Egress[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + } + if len(m.Ingress) > 0 { + for iNdEx := len(m.Ingress) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ingress[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if len(m.PolicyTypes) > 0 { - for _, s := range m.PolicyTypes { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + { + size, err := m.PodSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *IPBlock) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.CIDR) @@ -474,6 +771,9 @@ func (m *IPBlock) Size() (n int) { } func (m *NetworkPolicy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -484,6 +784,9 @@ func (m *NetworkPolicy) Size() (n int) { } func (m *NetworkPolicyEgressRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Ports) > 0 { @@ -502,6 +805,9 @@ func (m *NetworkPolicyEgressRule) Size() (n int) { } func (m *NetworkPolicyIngressRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Ports) > 0 { @@ -520,6 +826,9 @@ func (m *NetworkPolicyIngressRule) Size() (n int) { } func (m *NetworkPolicyList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -534,6 +843,9 @@ func (m *NetworkPolicyList) Size() (n int) { } func (m *NetworkPolicyPeer) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.PodSelector != nil { @@ -552,6 +864,9 @@ func (m *NetworkPolicyPeer) Size() (n int) { } func (m *NetworkPolicyPort) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Protocol != nil { @@ -566,6 +881,9 @@ func (m *NetworkPolicyPort) Size() (n int) { } func (m *NetworkPolicySpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.PodSelector.Size() @@ -592,14 +910,7 @@ func (m *NetworkPolicySpec) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -620,7 +931,7 @@ func (this *NetworkPolicy) String() string { return "nil" } s := strings.Join([]string{`&NetworkPolicy{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NetworkPolicySpec", "NetworkPolicySpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -630,9 +941,19 @@ func (this *NetworkPolicyEgressRule) String() string { if this == nil { return "nil" } + repeatedStringForPorts := "[]NetworkPolicyPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + repeatedStringForTo := "[]NetworkPolicyPeer{" + for _, f := range this.To { + repeatedStringForTo += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + "," + } + repeatedStringForTo += "}" s := strings.Join([]string{`&NetworkPolicyEgressRule{`, - `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + `,`, - `To:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.To), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + `,`, + `Ports:` + repeatedStringForPorts + `,`, + `To:` + repeatedStringForTo + `,`, `}`, }, "") return s @@ -641,9 +962,19 @@ func (this *NetworkPolicyIngressRule) String() string { if this == nil { return "nil" } + repeatedStringForPorts := "[]NetworkPolicyPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + repeatedStringForFrom := "[]NetworkPolicyPeer{" + for _, f := range this.From { + repeatedStringForFrom += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + "," + } + repeatedStringForFrom += "}" s := strings.Join([]string{`&NetworkPolicyIngressRule{`, - `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + `,`, - `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + `,`, + `Ports:` + repeatedStringForPorts + `,`, + `From:` + repeatedStringForFrom + `,`, `}`, }, "") return s @@ -652,9 +983,14 @@ func (this *NetworkPolicyList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]NetworkPolicy{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "NetworkPolicy", "NetworkPolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&NetworkPolicyList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "NetworkPolicy", "NetworkPolicy", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -664,9 +1000,9 @@ func (this *NetworkPolicyPeer) String() string { return "nil" } s := strings.Join([]string{`&NetworkPolicyPeer{`, - `PodSelector:` + strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `IPBlock:` + strings.Replace(fmt.Sprintf("%v", this.IPBlock), "IPBlock", "IPBlock", 1) + `,`, + `PodSelector:` + strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `IPBlock:` + strings.Replace(this.IPBlock.String(), "IPBlock", "IPBlock", 1) + `,`, `}`, }, "") return s @@ -677,7 +1013,7 @@ func (this *NetworkPolicyPort) String() string { } s := strings.Join([]string{`&NetworkPolicyPort{`, `Protocol:` + valueToStringGenerated(this.Protocol) + `,`, - `Port:` + strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `Port:` + strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "intstr.IntOrString", 1) + `,`, `}`, }, "") return s @@ -686,10 +1022,20 @@ func (this *NetworkPolicySpec) String() string { if this == nil { return "nil" } + repeatedStringForIngress := "[]NetworkPolicyIngressRule{" + for _, f := range this.Ingress { + repeatedStringForIngress += strings.Replace(strings.Replace(f.String(), "NetworkPolicyIngressRule", "NetworkPolicyIngressRule", 1), `&`, ``, 1) + "," + } + repeatedStringForIngress += "}" + repeatedStringForEgress := "[]NetworkPolicyEgressRule{" + for _, f := range this.Egress { + repeatedStringForEgress += strings.Replace(strings.Replace(f.String(), "NetworkPolicyEgressRule", "NetworkPolicyEgressRule", 1), `&`, ``, 1) + "," + } + repeatedStringForEgress += "}" s := strings.Join([]string{`&NetworkPolicySpec{`, - `PodSelector:` + strings.Replace(strings.Replace(this.PodSelector.String(), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, - `Ingress:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ingress), "NetworkPolicyIngressRule", "NetworkPolicyIngressRule", 1), `&`, ``, 1) + `,`, - `Egress:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Egress), "NetworkPolicyEgressRule", "NetworkPolicyEgressRule", 1), `&`, ``, 1) + `,`, + `PodSelector:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `Ingress:` + repeatedStringForIngress + `,`, + `Egress:` + repeatedStringForEgress + `,`, `PolicyTypes:` + fmt.Sprintf("%v", this.PolicyTypes) + `,`, `}`, }, "") @@ -718,7 +1064,7 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -746,7 +1092,7 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -756,6 +1102,9 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -775,7 +1124,7 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -785,6 +1134,9 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -799,6 +1151,9 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -826,7 +1181,7 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -854,7 +1209,7 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -863,6 +1218,9 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -884,7 +1242,7 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -893,6 +1251,9 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -909,6 +1270,9 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -936,7 +1300,7 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -964,7 +1328,7 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -973,6 +1337,9 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -995,7 +1362,7 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1004,6 +1371,9 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1021,6 +1391,9 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1048,7 +1421,7 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1076,7 +1449,7 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1085,6 +1458,9 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1107,7 +1483,7 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1116,6 +1492,9 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1133,6 +1512,9 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1160,7 +1542,7 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1188,7 +1570,7 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1197,6 +1579,9 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1218,7 +1603,7 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1227,6 +1612,9 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1244,6 +1632,9 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1271,7 +1662,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1299,7 +1690,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1308,11 +1699,14 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.PodSelector == nil { - m.PodSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.PodSelector = &v1.LabelSelector{} } if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1332,7 +1726,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1341,11 +1735,14 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.NamespaceSelector == nil { - m.NamespaceSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.NamespaceSelector = &v1.LabelSelector{} } if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1365,7 +1762,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1374,6 +1771,9 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1393,6 +1793,9 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1420,7 +1823,7 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1448,7 +1851,7 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1458,6 +1861,9 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1478,7 +1884,7 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1487,11 +1893,14 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Port == nil { - m.Port = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.Port = &intstr.IntOrString{} } if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1506,6 +1915,9 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1533,7 +1945,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1561,7 +1973,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1570,6 +1982,9 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1591,7 +2006,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1600,6 +2015,9 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1622,7 +2040,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1631,6 +2049,9 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1653,7 +2074,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1663,6 +2084,9 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1677,6 +2101,9 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1743,10 +2170,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -1775,6 +2205,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -1793,62 +2226,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/networking/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 804 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0xcf, 0x8f, 0xdb, 0x44, - 0x14, 0x8e, 0x9d, 0x6c, 0x92, 0x4e, 0x28, 0x65, 0x07, 0x21, 0xac, 0x45, 0xd8, 0xc1, 0x17, 0x56, - 0xaa, 0x18, 0x93, 0x16, 0x21, 0x6e, 0x08, 0x43, 0x29, 0x91, 0xba, 0xbb, 0xd1, 0x6c, 0x2f, 0x20, - 0x90, 0x70, 0x9c, 0x59, 0xef, 0x34, 0xb1, 0xc7, 0x1a, 0x4f, 0x42, 0xf7, 0xc6, 0x9f, 0xc0, 0x1f, - 0xc2, 0x91, 0x1b, 0x87, 0x72, 0xdc, 0x63, 0x8f, 0x3d, 0x59, 0xac, 0xf9, 0x2f, 0xf6, 0x84, 0x66, - 0x3c, 0x89, 0xf3, 0xa3, 0x11, 0xd9, 0x15, 0xbd, 0x65, 0xde, 0xbc, 0xef, 0x7b, 0xf3, 0xde, 0xfb, - 0xf2, 0x19, 0x7c, 0x35, 0xfe, 0x22, 0x43, 0x94, 0x79, 0xe3, 0xe9, 0x90, 0xf0, 0x84, 0x08, 0x92, - 0x79, 0x33, 0x92, 0x8c, 0x18, 0xf7, 0xf4, 0x45, 0x90, 0x52, 0x2f, 0x21, 0xe2, 0x17, 0xc6, 0xc7, - 0x34, 0x89, 0xbc, 0x59, 0xcf, 0x8b, 0x48, 0x42, 0x78, 0x20, 0xc8, 0x08, 0xa5, 0x9c, 0x09, 0x06, - 0xad, 0x32, 0x13, 0x05, 0x29, 0x45, 0x55, 0x26, 0x9a, 0xf5, 0x0e, 0x3e, 0x89, 0xa8, 0x38, 0x9f, - 0x0e, 0x51, 0xc8, 0x62, 0x2f, 0x62, 0x11, 0xf3, 0x14, 0x60, 0x38, 0x3d, 0x53, 0x27, 0x75, 0x50, - 0xbf, 0x4a, 0xa2, 0x03, 0x77, 0xa9, 0x64, 0xc8, 0x38, 0x79, 0x4d, 0xb1, 0x83, 0xcf, 0xaa, 0x9c, - 0x38, 0x08, 0xcf, 0x69, 0x42, 0xf8, 0x85, 0x97, 0x8e, 0x23, 0x19, 0xc8, 0xbc, 0x98, 0x88, 0xe0, - 0x75, 0x28, 0x6f, 0x1b, 0x8a, 0x4f, 0x13, 0x41, 0x63, 0xb2, 0x01, 0xf8, 0xfc, 0xbf, 0x00, 0x59, - 0x78, 0x4e, 0xe2, 0x60, 0x03, 0xf7, 0x70, 0x1b, 0x6e, 0x2a, 0xe8, 0xc4, 0xa3, 0x89, 0xc8, 0x04, - 0x5f, 0x07, 0xb9, 0x27, 0xa0, 0xd5, 0x1f, 0xf8, 0x13, 0x16, 0x8e, 0x61, 0x17, 0x34, 0x42, 0x3a, - 0xe2, 0x96, 0xd1, 0x35, 0x0e, 0xef, 0xf8, 0x6f, 0x5d, 0xe6, 0x4e, 0xad, 0xc8, 0x9d, 0xc6, 0xd7, - 0xfd, 0x6f, 0x30, 0x56, 0x37, 0xd0, 0x05, 0x4d, 0xf2, 0x3c, 0x24, 0xa9, 0xb0, 0xcc, 0x6e, 0xfd, - 0xf0, 0x8e, 0x0f, 0x8a, 0xdc, 0x69, 0x3e, 0x52, 0x11, 0xac, 0x6f, 0xdc, 0xbf, 0x0c, 0x70, 0xf7, - 0xb8, 0xdc, 0xc4, 0x80, 0x4d, 0x68, 0x78, 0x01, 0x7f, 0x06, 0x6d, 0x39, 0x9b, 0x51, 0x20, 0x02, - 0xc5, 0xdd, 0x79, 0xf0, 0x29, 0xaa, 0xd6, 0xb6, 0x78, 0x2a, 0x4a, 0xc7, 0x91, 0x0c, 0x64, 0x48, - 0x66, 0xa3, 0x59, 0x0f, 0x9d, 0x0c, 0x9f, 0x91, 0x50, 0x1c, 0x11, 0x11, 0xf8, 0x50, 0xbf, 0x06, - 0x54, 0x31, 0xbc, 0x60, 0x85, 0x47, 0xa0, 0x91, 0xa5, 0x24, 0xb4, 0x4c, 0xc5, 0x7e, 0x1f, 0x6d, - 0x13, 0x05, 0x5a, 0x79, 0xd8, 0x69, 0x4a, 0xc2, 0xaa, 0x4d, 0x79, 0xc2, 0x8a, 0xc6, 0xfd, 0xc3, - 0x00, 0xef, 0xaf, 0x64, 0x3e, 0x8a, 0x38, 0xc9, 0x32, 0x3c, 0x9d, 0x10, 0x38, 0x00, 0x7b, 0x29, - 0xe3, 0x22, 0xb3, 0x8c, 0x6e, 0xfd, 0x06, 0xb5, 0x06, 0x8c, 0x0b, 0xff, 0xae, 0xae, 0xb5, 0x27, - 0x4f, 0x19, 0x2e, 0x89, 0xe0, 0x63, 0x60, 0x0a, 0xa6, 0x06, 0x7a, 0x03, 0x3a, 0x42, 0xb8, 0x0f, - 0x34, 0x9d, 0xf9, 0x94, 0x61, 0x53, 0x30, 0xf7, 0x4f, 0x03, 0x58, 0x2b, 0x59, 0xfd, 0xe4, 0x4d, - 0xbe, 0xfb, 0x08, 0x34, 0xce, 0x38, 0x8b, 0x6f, 0xf3, 0xf2, 0xc5, 0xd0, 0xbf, 0xe5, 0x2c, 0xc6, - 0x8a, 0xc6, 0x7d, 0x61, 0x80, 0xfd, 0x95, 0xcc, 0x27, 0x34, 0x13, 0xf0, 0xc7, 0x0d, 0xed, 0xa0, - 0xdd, 0xb4, 0x23, 0xd1, 0x4a, 0x39, 0xef, 0xe8, 0x5a, 0xed, 0x79, 0x64, 0x49, 0x37, 0x4f, 0xc0, - 0x1e, 0x15, 0x24, 0xce, 0x74, 0x0f, 0x1f, 0xef, 0xd8, 0x43, 0x35, 0x90, 0xbe, 0x44, 0xe3, 0x92, - 0xc4, 0x7d, 0x61, 0xae, 0x75, 0x20, 0x7b, 0x85, 0x67, 0xa0, 0x93, 0xb2, 0xd1, 0x29, 0x99, 0x90, - 0x50, 0x30, 0xae, 0x9b, 0x78, 0xb8, 0x63, 0x13, 0xc1, 0x90, 0x4c, 0xe6, 0x50, 0xff, 0x5e, 0x91, - 0x3b, 0x9d, 0x41, 0xc5, 0x85, 0x97, 0x89, 0xe1, 0x73, 0xb0, 0x9f, 0x04, 0x31, 0xc9, 0xd2, 0x20, - 0x24, 0x8b, 0x6a, 0xe6, 0xed, 0xab, 0xbd, 0x57, 0xe4, 0xce, 0xfe, 0xf1, 0x3a, 0x23, 0xde, 0x2c, - 0x02, 0xbf, 0x03, 0x2d, 0x9a, 0x2a, 0x0b, 0xb1, 0xea, 0xaa, 0xde, 0x47, 0xdb, 0xe7, 0xa8, 0xbd, - 0xc6, 0xef, 0x14, 0xb9, 0x33, 0x37, 0x1e, 0x3c, 0x87, 0xbb, 0xbf, 0xaf, 0x6b, 0x40, 0x0a, 0x0e, - 0x3e, 0x06, 0x6d, 0xe5, 0x55, 0x21, 0x9b, 0x68, 0x6f, 0xba, 0x2f, 0xf7, 0x39, 0xd0, 0xb1, 0xeb, - 0xdc, 0xf9, 0x60, 0xd3, 0xbc, 0xd1, 0xfc, 0x1a, 0x2f, 0xc0, 0xf0, 0x18, 0x34, 0xa4, 0x74, 0xf5, - 0x54, 0xb6, 0x9b, 0x90, 0xf4, 0x4b, 0x54, 0xfa, 0x25, 0xea, 0x27, 0xe2, 0x84, 0x9f, 0x0a, 0x4e, - 0x93, 0xc8, 0x6f, 0x4b, 0xc9, 0xca, 0x27, 0x61, 0xc5, 0xe3, 0x5e, 0xaf, 0x2f, 0x5c, 0x7a, 0x08, - 0x7c, 0xf6, 0xbf, 0x2d, 0xfc, 0x5d, 0x2d, 0xb3, 0xed, 0x4b, 0xff, 0x09, 0xb4, 0x68, 0xf9, 0x27, - 0xd7, 0x12, 0x7e, 0xb0, 0xa3, 0x84, 0x97, 0xac, 0xc1, 0xbf, 0xa7, 0xcb, 0xb4, 0xe6, 0xc1, 0x39, - 0x27, 0xfc, 0x1e, 0x34, 0x49, 0xc9, 0x5e, 0x57, 0xec, 0xbd, 0x1d, 0xd9, 0x2b, 0xbf, 0xf4, 0xdf, - 0xd6, 0xe4, 0x4d, 0x1d, 0xd3, 0x84, 0xf0, 0x4b, 0x39, 0x25, 0x99, 0xfb, 0xf4, 0x22, 0x25, 0x99, - 0xd5, 0x50, 0xdf, 0x93, 0x0f, 0xcb, 0x66, 0x17, 0xe1, 0xeb, 0xdc, 0x01, 0xd5, 0x11, 0x2f, 0x23, - 0xfc, 0xc3, 0xcb, 0x2b, 0xbb, 0xf6, 0xf2, 0xca, 0xae, 0xbd, 0xba, 0xb2, 0x6b, 0xbf, 0x16, 0xb6, - 0x71, 0x59, 0xd8, 0xc6, 0xcb, 0xc2, 0x36, 0x5e, 0x15, 0xb6, 0xf1, 0x77, 0x61, 0x1b, 0xbf, 0xfd, - 0x63, 0xd7, 0x7e, 0x30, 0x67, 0xbd, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x7b, 0xc9, 0x59, - 0x67, 0x08, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/networking/v1/generated.proto b/vendor/k8s.io/api/networking/v1/generated.proto index cbbb265289cd0..3cb73804526e3 100644 --- a/vendor/k8s.io/api/networking/v1/generated.proto +++ b/vendor/k8s.io/api/networking/v1/generated.proto @@ -92,7 +92,7 @@ message NetworkPolicyIngressRule { // List of sources which should be able to access the pods selected for this rule. // Items in this list are combined using a logical OR operation. If this field is // empty or missing, this rule matches all sources (traffic not restricted by - // source). If this field is present and contains at least on item, this rule + // source). If this field is present and contains at least one item, this rule // allows traffic only if the traffic matches at least one item in the from list. // +optional repeated NetworkPolicyPeer from = 2; diff --git a/vendor/k8s.io/api/networking/v1/types.go b/vendor/k8s.io/api/networking/v1/types.go index 59331111f4630..38a640f0b9c5f 100644 --- a/vendor/k8s.io/api/networking/v1/types.go +++ b/vendor/k8s.io/api/networking/v1/types.go @@ -107,7 +107,7 @@ type NetworkPolicyIngressRule struct { // List of sources which should be able to access the pods selected for this rule. // Items in this list are combined using a logical OR operation. If this field is // empty or missing, this rule matches all sources (traffic not restricted by - // source). If this field is present and contains at least on item, this rule + // source). If this field is present and contains at least one item, this rule // allows traffic only if the traffic matches at least one item in the from list. // +optional From []NetworkPolicyPeer `json:"from,omitempty" protobuf:"bytes,2,rep,name=from"` diff --git a/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go index cfcd0c54c5960..188b72a1cac2a 100644 --- a/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go @@ -60,7 +60,7 @@ func (NetworkPolicyEgressRule) SwaggerDoc() map[string]string { var map_NetworkPolicyIngressRule = map[string]string{ "": "NetworkPolicyIngressRule describes a particular set of traffic that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and from.", "ports": "List of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", - "from": "List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least on item, this rule allows traffic only if the traffic matches at least one item in the from list.", + "from": "List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the from list.", } func (NetworkPolicyIngressRule) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/networking/v1beta1/generated.pb.go b/vendor/k8s.io/api/networking/v1beta1/generated.pb.go index 90b9434ca215c..8ed560090339d 100644 --- a/vendor/k8s.io/api/networking/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/networking/v1beta1/generated.pb.go @@ -17,38 +17,19 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/networking/v1beta1/generated.proto -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/networking/v1beta1/generated.proto - - It has these top-level messages: - HTTPIngressPath - HTTPIngressRuleValue - Ingress - IngressBackend - IngressList - IngressRule - IngressRuleValue - IngressSpec - IngressStatus - IngressTLS -*/ package v1beta1 import ( fmt "fmt" + io "io" + proto "github.com/gogo/protobuf/proto" math "math" - - strings "strings" - + math_bits "math/bits" reflect "reflect" - - io "io" + strings "strings" ) // Reference imports to suppress errors if they are not otherwise used. @@ -62,45 +43,285 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} } -func (*HTTPIngressPath) ProtoMessage() {} -func (*HTTPIngressPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} } +func (*HTTPIngressPath) ProtoMessage() {} +func (*HTTPIngressPath) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{0} +} +func (m *HTTPIngressPath) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HTTPIngressPath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HTTPIngressPath) XXX_Merge(src proto.Message) { + xxx_messageInfo_HTTPIngressPath.Merge(m, src) +} +func (m *HTTPIngressPath) XXX_Size() int { + return m.Size() +} +func (m *HTTPIngressPath) XXX_DiscardUnknown() { + xxx_messageInfo_HTTPIngressPath.DiscardUnknown(m) +} + +var xxx_messageInfo_HTTPIngressPath proto.InternalMessageInfo + +func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} } +func (*HTTPIngressRuleValue) ProtoMessage() {} +func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{1} +} +func (m *HTTPIngressRuleValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HTTPIngressRuleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HTTPIngressRuleValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_HTTPIngressRuleValue.Merge(m, src) +} +func (m *HTTPIngressRuleValue) XXX_Size() int { + return m.Size() +} +func (m *HTTPIngressRuleValue) XXX_DiscardUnknown() { + xxx_messageInfo_HTTPIngressRuleValue.DiscardUnknown(m) +} + +var xxx_messageInfo_HTTPIngressRuleValue proto.InternalMessageInfo + +func (m *Ingress) Reset() { *m = Ingress{} } +func (*Ingress) ProtoMessage() {} +func (*Ingress) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{2} +} +func (m *Ingress) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Ingress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Ingress) XXX_Merge(src proto.Message) { + xxx_messageInfo_Ingress.Merge(m, src) +} +func (m *Ingress) XXX_Size() int { + return m.Size() +} +func (m *Ingress) XXX_DiscardUnknown() { + xxx_messageInfo_Ingress.DiscardUnknown(m) +} + +var xxx_messageInfo_Ingress proto.InternalMessageInfo + +func (m *IngressBackend) Reset() { *m = IngressBackend{} } +func (*IngressBackend) ProtoMessage() {} +func (*IngressBackend) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{3} +} +func (m *IngressBackend) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressBackend) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressBackend) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressBackend.Merge(m, src) +} +func (m *IngressBackend) XXX_Size() int { + return m.Size() +} +func (m *IngressBackend) XXX_DiscardUnknown() { + xxx_messageInfo_IngressBackend.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressBackend proto.InternalMessageInfo + +func (m *IngressList) Reset() { *m = IngressList{} } +func (*IngressList) ProtoMessage() {} +func (*IngressList) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{4} +} +func (m *IngressList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressList) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressList.Merge(m, src) +} +func (m *IngressList) XXX_Size() int { + return m.Size() +} +func (m *IngressList) XXX_DiscardUnknown() { + xxx_messageInfo_IngressList.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressList proto.InternalMessageInfo -func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} } -func (*HTTPIngressRuleValue) ProtoMessage() {} -func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (m *IngressRule) Reset() { *m = IngressRule{} } +func (*IngressRule) ProtoMessage() {} +func (*IngressRule) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{5} +} +func (m *IngressRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressRule.Merge(m, src) +} +func (m *IngressRule) XXX_Size() int { + return m.Size() +} +func (m *IngressRule) XXX_DiscardUnknown() { + xxx_messageInfo_IngressRule.DiscardUnknown(m) +} -func (m *Ingress) Reset() { *m = Ingress{} } -func (*Ingress) ProtoMessage() {} -func (*Ingress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_IngressRule proto.InternalMessageInfo -func (m *IngressBackend) Reset() { *m = IngressBackend{} } -func (*IngressBackend) ProtoMessage() {} -func (*IngressBackend) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} } +func (*IngressRuleValue) ProtoMessage() {} +func (*IngressRuleValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{6} +} +func (m *IngressRuleValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressRuleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressRuleValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressRuleValue.Merge(m, src) +} +func (m *IngressRuleValue) XXX_Size() int { + return m.Size() +} +func (m *IngressRuleValue) XXX_DiscardUnknown() { + xxx_messageInfo_IngressRuleValue.DiscardUnknown(m) +} -func (m *IngressList) Reset() { *m = IngressList{} } -func (*IngressList) ProtoMessage() {} -func (*IngressList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +var xxx_messageInfo_IngressRuleValue proto.InternalMessageInfo -func (m *IngressRule) Reset() { *m = IngressRule{} } -func (*IngressRule) ProtoMessage() {} -func (*IngressRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +func (m *IngressSpec) Reset() { *m = IngressSpec{} } +func (*IngressSpec) ProtoMessage() {} +func (*IngressSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{7} +} +func (m *IngressSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressSpec.Merge(m, src) +} +func (m *IngressSpec) XXX_Size() int { + return m.Size() +} +func (m *IngressSpec) XXX_DiscardUnknown() { + xxx_messageInfo_IngressSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressSpec proto.InternalMessageInfo -func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} } -func (*IngressRuleValue) ProtoMessage() {} -func (*IngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (m *IngressStatus) Reset() { *m = IngressStatus{} } +func (*IngressStatus) ProtoMessage() {} +func (*IngressStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{8} +} +func (m *IngressStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressStatus.Merge(m, src) +} +func (m *IngressStatus) XXX_Size() int { + return m.Size() +} +func (m *IngressStatus) XXX_DiscardUnknown() { + xxx_messageInfo_IngressStatus.DiscardUnknown(m) +} -func (m *IngressSpec) Reset() { *m = IngressSpec{} } -func (*IngressSpec) ProtoMessage() {} -func (*IngressSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_IngressStatus proto.InternalMessageInfo -func (m *IngressStatus) Reset() { *m = IngressStatus{} } -func (*IngressStatus) ProtoMessage() {} -func (*IngressStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (m *IngressTLS) Reset() { *m = IngressTLS{} } +func (*IngressTLS) ProtoMessage() {} +func (*IngressTLS) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{9} +} +func (m *IngressTLS) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressTLS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressTLS) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressTLS.Merge(m, src) +} +func (m *IngressTLS) XXX_Size() int { + return m.Size() +} +func (m *IngressTLS) XXX_DiscardUnknown() { + xxx_messageInfo_IngressTLS.DiscardUnknown(m) +} -func (m *IngressTLS) Reset() { *m = IngressTLS{} } -func (*IngressTLS) ProtoMessage() {} -func (*IngressTLS) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +var xxx_messageInfo_IngressTLS proto.InternalMessageInfo func init() { proto.RegisterType((*HTTPIngressPath)(nil), "k8s.io.api.networking.v1beta1.HTTPIngressPath") @@ -114,10 +335,70 @@ func init() { proto.RegisterType((*IngressStatus)(nil), "k8s.io.api.networking.v1beta1.IngressStatus") proto.RegisterType((*IngressTLS)(nil), "k8s.io.api.networking.v1beta1.IngressTLS") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/networking/v1beta1/generated.proto", fileDescriptor_5bea11de0ceb8f53) +} + +var fileDescriptor_5bea11de0ceb8f53 = []byte{ + // 812 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0xcf, 0x6e, 0xfb, 0x44, + 0x10, 0x8e, 0xf3, 0xa7, 0x69, 0xd7, 0xfd, 0xa7, 0xa5, 0x87, 0xa8, 0x12, 0x6e, 0xe4, 0x03, 0x2a, + 0x88, 0xae, 0x69, 0x0a, 0x88, 0xb3, 0x0f, 0xa8, 0x15, 0x81, 0x86, 0x75, 0x84, 0x10, 0xe2, 0xd0, + 0x8d, 0xb3, 0x38, 0x26, 0x89, 0x6d, 0x76, 0xd7, 0x41, 0xdc, 0x78, 0x01, 0x04, 0x4f, 0xc1, 0x99, + 0x23, 0x8f, 0xd0, 0x63, 0x8f, 0x3d, 0x55, 0x34, 0xbc, 0x07, 0x42, 0xbb, 0xde, 0xda, 0x4e, 0xd2, + 0xfe, 0x6a, 0xfd, 0x6e, 0xde, 0x9d, 0xf9, 0xbe, 0xd9, 0x99, 0xf9, 0x66, 0x0c, 0x3e, 0x9f, 0x7e, + 0xc6, 0x51, 0x18, 0x3b, 0xd3, 0x74, 0x44, 0x59, 0x44, 0x05, 0xe5, 0xce, 0x82, 0x46, 0xe3, 0x98, + 0x39, 0xda, 0x40, 0x92, 0xd0, 0x89, 0xa8, 0xf8, 0x39, 0x66, 0xd3, 0x30, 0x0a, 0x9c, 0xc5, 0xf9, + 0x88, 0x0a, 0x72, 0xee, 0x04, 0x34, 0xa2, 0x8c, 0x08, 0x3a, 0x46, 0x09, 0x8b, 0x45, 0x0c, 0xdf, + 0xcd, 0xdc, 0x11, 0x49, 0x42, 0x54, 0xb8, 0x23, 0xed, 0x7e, 0x7c, 0x16, 0x84, 0x62, 0x92, 0x8e, + 0x90, 0x1f, 0xcf, 0x9d, 0x20, 0x0e, 0x62, 0x47, 0xa1, 0x46, 0xe9, 0x0f, 0xea, 0xa4, 0x0e, 0xea, + 0x2b, 0x63, 0x3b, 0xb6, 0x4b, 0xc1, 0xfd, 0x98, 0x51, 0x67, 0xb1, 0x11, 0xf1, 0xf8, 0xe3, 0xc2, + 0x67, 0x4e, 0xfc, 0x49, 0x18, 0x51, 0xf6, 0x8b, 0x93, 0x4c, 0x03, 0x79, 0xc1, 0x9d, 0x39, 0x15, + 0xe4, 0x39, 0x94, 0xf3, 0x12, 0x8a, 0xa5, 0x91, 0x08, 0xe7, 0x74, 0x03, 0xf0, 0xe9, 0x6b, 0x00, + 0xee, 0x4f, 0xe8, 0x9c, 0x6c, 0xe0, 0x2e, 0x5e, 0xc2, 0xa5, 0x22, 0x9c, 0x39, 0x61, 0x24, 0xb8, + 0x60, 0xeb, 0x20, 0xfb, 0x37, 0x03, 0x1c, 0x5c, 0x0e, 0x87, 0x83, 0xab, 0x28, 0x60, 0x94, 0xf3, + 0x01, 0x11, 0x13, 0xd8, 0x05, 0xcd, 0x84, 0x88, 0x49, 0xc7, 0xe8, 0x1a, 0xa7, 0x3b, 0xee, 0xee, + 0xed, 0xc3, 0x49, 0x6d, 0xf9, 0x70, 0xd2, 0x94, 0x36, 0xac, 0x2c, 0xf0, 0x5b, 0xd0, 0x1e, 0x11, + 0x7f, 0x4a, 0xa3, 0x71, 0xa7, 0xde, 0x35, 0x4e, 0xcd, 0xde, 0x19, 0x7a, 0x63, 0x37, 0x90, 0xa6, + 0x77, 0x33, 0x90, 0x7b, 0xa0, 0x39, 0xdb, 0xfa, 0x02, 0x3f, 0xd1, 0xd9, 0x53, 0x70, 0x54, 0x7a, + 0x0e, 0x4e, 0x67, 0xf4, 0x1b, 0x32, 0x4b, 0x29, 0xf4, 0x40, 0x4b, 0x46, 0xe6, 0x1d, 0xa3, 0xdb, + 0x38, 0x35, 0x7b, 0xe8, 0x95, 0x78, 0x6b, 0x29, 0xb9, 0x7b, 0x3a, 0x60, 0x4b, 0x9e, 0x38, 0xce, + 0xb8, 0xec, 0xdf, 0xeb, 0xa0, 0xad, 0xbd, 0xe0, 0x0d, 0xd8, 0x96, 0x1d, 0x1c, 0x13, 0x41, 0x54, + 0xe2, 0x66, 0xef, 0xa3, 0x52, 0x8c, 0xbc, 0xa0, 0x28, 0x99, 0x06, 0xf2, 0x82, 0x23, 0xe9, 0x8d, + 0x16, 0xe7, 0xe8, 0x7a, 0xf4, 0x23, 0xf5, 0xc5, 0x97, 0x54, 0x10, 0x17, 0xea, 0x28, 0xa0, 0xb8, + 0xc3, 0x39, 0x2b, 0xec, 0x83, 0x26, 0x4f, 0xa8, 0xaf, 0x2b, 0xf6, 0x41, 0xb5, 0x8a, 0x79, 0x09, + 0xf5, 0x8b, 0x16, 0xc8, 0x13, 0x56, 0x2c, 0x70, 0x08, 0xb6, 0xb8, 0x20, 0x22, 0xe5, 0x9d, 0x86, + 0xe2, 0xfb, 0xb0, 0x22, 0x9f, 0xc2, 0xb8, 0xfb, 0x9a, 0x71, 0x2b, 0x3b, 0x63, 0xcd, 0x65, 0xff, + 0x65, 0x80, 0xfd, 0xd5, 0x5e, 0xc1, 0x4f, 0x80, 0xc9, 0x29, 0x5b, 0x84, 0x3e, 0xfd, 0x8a, 0xcc, + 0xa9, 0x16, 0xc5, 0x3b, 0x1a, 0x6f, 0x7a, 0x85, 0x09, 0x97, 0xfd, 0x60, 0x90, 0xc3, 0x06, 0x31, + 0x13, 0x3a, 0xe9, 0x97, 0x4b, 0x2a, 0x35, 0x8a, 0x32, 0x8d, 0xa2, 0xab, 0x48, 0x5c, 0x33, 0x4f, + 0xb0, 0x30, 0x0a, 0x36, 0x02, 0x49, 0x32, 0x5c, 0x66, 0xb6, 0xff, 0x36, 0x80, 0xa9, 0x9f, 0xdc, + 0x0f, 0xb9, 0x80, 0xdf, 0x6f, 0x34, 0x12, 0x55, 0x6b, 0xa4, 0x44, 0xab, 0x36, 0x1e, 0xea, 0x98, + 0xdb, 0x4f, 0x37, 0xa5, 0x26, 0x7e, 0x01, 0x5a, 0xa1, 0xa0, 0x73, 0xde, 0xa9, 0x2b, 0x1d, 0xbe, + 0x57, 0x51, 0xf7, 0xb9, 0xfe, 0xae, 0x24, 0x18, 0x67, 0x1c, 0xf6, 0x9f, 0xc5, 0xd3, 0xa5, 0xd2, + 0xe5, 0xe0, 0x4d, 0x62, 0x2e, 0xd6, 0x07, 0xef, 0x32, 0xe6, 0x02, 0x2b, 0x0b, 0x4c, 0xc1, 0x61, + 0xb8, 0x36, 0x1a, 0xba, 0xb4, 0x4e, 0xb5, 0x97, 0xe4, 0x30, 0xb7, 0xa3, 0xe9, 0x0f, 0xd7, 0x2d, + 0x78, 0x23, 0x84, 0x4d, 0xc1, 0x86, 0x17, 0xfc, 0x1a, 0x34, 0x27, 0x42, 0x24, 0xba, 0xc6, 0x17, + 0xd5, 0x07, 0xb2, 0x78, 0xc2, 0xb6, 0xca, 0x6e, 0x38, 0x1c, 0x60, 0x45, 0x65, 0xff, 0x57, 0xd4, + 0xc3, 0xcb, 0x34, 0x9e, 0xaf, 0x19, 0xe3, 0x6d, 0xd6, 0x8c, 0xf9, 0xdc, 0x8a, 0x81, 0x97, 0xa0, + 0x21, 0x66, 0x4f, 0x0d, 0x7c, 0xbf, 0x1a, 0xe3, 0xb0, 0xef, 0xb9, 0xa6, 0x2e, 0x58, 0x63, 0xd8, + 0xf7, 0xb0, 0xa4, 0x80, 0xd7, 0xa0, 0xc5, 0xd2, 0x19, 0x95, 0x23, 0xd8, 0xa8, 0x3e, 0xd2, 0x32, + 0xff, 0x42, 0x10, 0xf2, 0xc4, 0x71, 0xc6, 0x63, 0xff, 0x04, 0xf6, 0x56, 0xe6, 0x14, 0xde, 0x80, + 0xdd, 0x59, 0x4c, 0xc6, 0x2e, 0x99, 0x91, 0xc8, 0xa7, 0x4c, 0x97, 0x61, 0x45, 0x75, 0xf2, 0x6f, + 0xa5, 0xe4, 0x5b, 0xf2, 0xd3, 0x53, 0x7e, 0xa4, 0x83, 0xec, 0x96, 0x6d, 0x78, 0x85, 0xd1, 0x26, + 0x00, 0x14, 0x39, 0xc2, 0x13, 0xd0, 0x92, 0x3a, 0xcb, 0xd6, 0xec, 0x8e, 0xbb, 0x23, 0x5f, 0x28, + 0xe5, 0xc7, 0x71, 0x76, 0x0f, 0x7b, 0x00, 0x70, 0xea, 0x33, 0x2a, 0xd4, 0x32, 0xa8, 0x2b, 0xa1, + 0xe6, 0x6b, 0xcf, 0xcb, 0x2d, 0xb8, 0xe4, 0xe5, 0x9e, 0xdd, 0x3e, 0x5a, 0xb5, 0xbb, 0x47, 0xab, + 0x76, 0xff, 0x68, 0xd5, 0x7e, 0x5d, 0x5a, 0xc6, 0xed, 0xd2, 0x32, 0xee, 0x96, 0x96, 0x71, 0xbf, + 0xb4, 0x8c, 0x7f, 0x96, 0x96, 0xf1, 0xc7, 0xbf, 0x56, 0xed, 0xbb, 0xb6, 0x2e, 0xd3, 0xff, 0x01, + 0x00, 0x00, 0xff, 0xff, 0xdb, 0x8a, 0xe4, 0xd8, 0x21, 0x08, 0x00, 0x00, +} + func (m *HTTPIngressPath) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -125,29 +406,37 @@ func (m *HTTPIngressPath) Marshal() (dAtA []byte, err error) { } func (m *HTTPIngressPath) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HTTPIngressPath) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Backend.Size())) - n1, err := m.Backend.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Backend.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HTTPIngressRuleValue) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -155,29 +444,36 @@ func (m *HTTPIngressRuleValue) Marshal() (dAtA []byte, err error) { } func (m *HTTPIngressRuleValue) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HTTPIngressRuleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m.Paths) > 0 { - for _, msg := range m.Paths { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Paths) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Paths[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *Ingress) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -185,41 +481,52 @@ func (m *Ingress) Marshal() (dAtA []byte, err error) { } func (m *Ingress) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Ingress) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n2, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n3, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n4, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *IngressBackend) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -227,29 +534,37 @@ func (m *IngressBackend) Marshal() (dAtA []byte, err error) { } func (m *IngressBackend) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressBackend) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) - i += copy(dAtA[i:], m.ServiceName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ServicePort.Size())) - n5, err := m.ServicePort.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ServicePort.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.ServiceName) + copy(dAtA[i:], m.ServiceName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *IngressList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -257,37 +572,46 @@ func (m *IngressList) Marshal() (dAtA []byte, err error) { } func (m *IngressList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *IngressRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -295,29 +619,37 @@ func (m *IngressRule) Marshal() (dAtA []byte, err error) { } func (m *IngressRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) - i += copy(dAtA[i:], m.Host) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.IngressRuleValue.Size())) - n7, err := m.IngressRuleValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.IngressRuleValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n7 - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *IngressRuleValue) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -325,27 +657,34 @@ func (m *IngressRuleValue) Marshal() (dAtA []byte, err error) { } func (m *IngressRuleValue) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressRuleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if m.HTTP != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.HTTP.Size())) - n8, err := m.HTTP.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.HTTP.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *IngressSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -353,51 +692,62 @@ func (m *IngressSpec) Marshal() (dAtA []byte, err error) { } func (m *IngressSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Backend != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Backend.Size())) - n9, err := m.Backend.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - i += n9 } if len(m.TLS) > 0 { - for _, msg := range m.TLS { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.TLS) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TLS[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.Backend != nil { + { + size, err := m.Backend.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *IngressStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -405,25 +755,32 @@ func (m *IngressStatus) Marshal() (dAtA []byte, err error) { } func (m *IngressStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LoadBalancer.Size())) - n10, err := m.LoadBalancer.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.LoadBalancer.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n10 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *IngressTLS) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -431,42 +788,47 @@ func (m *IngressTLS) Marshal() (dAtA []byte, err error) { } func (m *IngressTLS) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressTLS) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + i -= len(m.SecretName) + copy(dAtA[i:], m.SecretName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) + i-- + dAtA[i] = 0x12 if len(m.Hosts) > 0 { - for _, s := range m.Hosts { + for iNdEx := len(m.Hosts) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Hosts[iNdEx]) + copy(dAtA[i:], m.Hosts[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hosts[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) - i += copy(dAtA[i:], m.SecretName) - return i, nil + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *HTTPIngressPath) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Path) @@ -477,6 +839,9 @@ func (m *HTTPIngressPath) Size() (n int) { } func (m *HTTPIngressRuleValue) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Paths) > 0 { @@ -489,6 +854,9 @@ func (m *HTTPIngressRuleValue) Size() (n int) { } func (m *Ingress) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -501,6 +869,9 @@ func (m *Ingress) Size() (n int) { } func (m *IngressBackend) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.ServiceName) @@ -511,6 +882,9 @@ func (m *IngressBackend) Size() (n int) { } func (m *IngressList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -525,6 +899,9 @@ func (m *IngressList) Size() (n int) { } func (m *IngressRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Host) @@ -535,6 +912,9 @@ func (m *IngressRule) Size() (n int) { } func (m *IngressRuleValue) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.HTTP != nil { @@ -545,6 +925,9 @@ func (m *IngressRuleValue) Size() (n int) { } func (m *IngressSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Backend != nil { @@ -567,6 +950,9 @@ func (m *IngressSpec) Size() (n int) { } func (m *IngressStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.LoadBalancer.Size() @@ -575,6 +961,9 @@ func (m *IngressStatus) Size() (n int) { } func (m *IngressTLS) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Hosts) > 0 { @@ -589,14 +978,7 @@ func (m *IngressTLS) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -616,8 +998,13 @@ func (this *HTTPIngressRuleValue) String() string { if this == nil { return "nil" } + repeatedStringForPaths := "[]HTTPIngressPath{" + for _, f := range this.Paths { + repeatedStringForPaths += strings.Replace(strings.Replace(f.String(), "HTTPIngressPath", "HTTPIngressPath", 1), `&`, ``, 1) + "," + } + repeatedStringForPaths += "}" s := strings.Join([]string{`&HTTPIngressRuleValue{`, - `Paths:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Paths), "HTTPIngressPath", "HTTPIngressPath", 1), `&`, ``, 1) + `,`, + `Paths:` + repeatedStringForPaths + `,`, `}`, }, "") return s @@ -627,7 +1014,7 @@ func (this *Ingress) String() string { return "nil" } s := strings.Join([]string{`&Ingress{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IngressSpec", "IngressSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "IngressStatus", "IngressStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -640,7 +1027,7 @@ func (this *IngressBackend) String() string { } s := strings.Join([]string{`&IngressBackend{`, `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, - `ServicePort:` + strings.Replace(strings.Replace(this.ServicePort.String(), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `ServicePort:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ServicePort), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -649,9 +1036,14 @@ func (this *IngressList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]Ingress{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Ingress", "Ingress", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&IngressList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Ingress", "Ingress", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -672,7 +1064,7 @@ func (this *IngressRuleValue) String() string { return "nil" } s := strings.Join([]string{`&IngressRuleValue{`, - `HTTP:` + strings.Replace(fmt.Sprintf("%v", this.HTTP), "HTTPIngressRuleValue", "HTTPIngressRuleValue", 1) + `,`, + `HTTP:` + strings.Replace(this.HTTP.String(), "HTTPIngressRuleValue", "HTTPIngressRuleValue", 1) + `,`, `}`, }, "") return s @@ -681,10 +1073,20 @@ func (this *IngressSpec) String() string { if this == nil { return "nil" } + repeatedStringForTLS := "[]IngressTLS{" + for _, f := range this.TLS { + repeatedStringForTLS += strings.Replace(strings.Replace(f.String(), "IngressTLS", "IngressTLS", 1), `&`, ``, 1) + "," + } + repeatedStringForTLS += "}" + repeatedStringForRules := "[]IngressRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "IngressRule", "IngressRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" s := strings.Join([]string{`&IngressSpec{`, - `Backend:` + strings.Replace(fmt.Sprintf("%v", this.Backend), "IngressBackend", "IngressBackend", 1) + `,`, - `TLS:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TLS), "IngressTLS", "IngressTLS", 1), `&`, ``, 1) + `,`, - `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "IngressRule", "IngressRule", 1), `&`, ``, 1) + `,`, + `Backend:` + strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1) + `,`, + `TLS:` + repeatedStringForTLS + `,`, + `Rules:` + repeatedStringForRules + `,`, `}`, }, "") return s @@ -694,7 +1096,7 @@ func (this *IngressStatus) String() string { return "nil" } s := strings.Join([]string{`&IngressStatus{`, - `LoadBalancer:` + strings.Replace(strings.Replace(this.LoadBalancer.String(), "LoadBalancerStatus", "k8s_io_api_core_v1.LoadBalancerStatus", 1), `&`, ``, 1) + `,`, + `LoadBalancer:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LoadBalancer), "LoadBalancerStatus", "v11.LoadBalancerStatus", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -733,7 +1135,7 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -761,7 +1163,7 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -771,6 +1173,9 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -790,7 +1195,7 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -799,6 +1204,9 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -815,6 +1223,9 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -842,7 +1253,7 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -870,7 +1281,7 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -879,6 +1290,9 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -896,6 +1310,9 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -923,7 +1340,7 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -951,7 +1368,7 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -960,6 +1377,9 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -981,7 +1401,7 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -990,6 +1410,9 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1011,7 +1434,7 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1020,6 +1443,9 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1036,6 +1462,9 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1063,7 +1492,7 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1091,7 +1520,7 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1101,6 +1530,9 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1120,7 +1552,7 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1129,6 +1561,9 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1145,6 +1580,9 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1172,7 +1610,7 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1200,7 +1638,7 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1209,6 +1647,9 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1230,7 +1671,7 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1239,6 +1680,9 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1256,6 +1700,9 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1283,7 +1730,7 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1311,7 +1758,7 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1321,6 +1768,9 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1340,7 +1790,7 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1349,6 +1799,9 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1365,6 +1818,9 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1392,7 +1848,7 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1420,7 +1876,7 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1429,6 +1885,9 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1448,6 +1907,9 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1475,7 +1937,7 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1503,7 +1965,7 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1512,6 +1974,9 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1536,7 +2001,7 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1545,6 +2010,9 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1567,7 +2035,7 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1576,6 +2044,9 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1593,6 +2064,9 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1620,7 +2094,7 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1648,7 +2122,7 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1657,6 +2131,9 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1673,6 +2150,9 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1700,7 +2180,7 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1728,7 +2208,7 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1738,6 +2218,9 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1757,7 +2240,7 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1767,6 +2250,9 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1781,6 +2267,9 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1847,10 +2336,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -1879,6 +2371,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -1897,62 +2392,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/networking/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 812 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0xcf, 0x6e, 0xfb, 0x44, - 0x10, 0x8e, 0xf3, 0xa7, 0x69, 0xd7, 0xfd, 0xa7, 0xa5, 0x87, 0xa8, 0x12, 0x6e, 0xe4, 0x03, 0x2a, - 0x88, 0xae, 0x69, 0x0a, 0x88, 0xb3, 0x0f, 0xa8, 0x15, 0x81, 0x86, 0x75, 0x84, 0x10, 0xe2, 0xd0, - 0x8d, 0xb3, 0x38, 0x26, 0x89, 0x6d, 0x76, 0xd7, 0x41, 0xdc, 0x78, 0x01, 0x04, 0x4f, 0xc1, 0x99, - 0x23, 0x8f, 0xd0, 0x63, 0x8f, 0x3d, 0x55, 0x34, 0xbc, 0x07, 0x42, 0xbb, 0xde, 0xda, 0x4e, 0xd2, - 0xfe, 0x6a, 0xfd, 0x6e, 0xde, 0x9d, 0xf9, 0xbe, 0xd9, 0x99, 0xf9, 0x66, 0x0c, 0x3e, 0x9f, 0x7e, - 0xc6, 0x51, 0x18, 0x3b, 0xd3, 0x74, 0x44, 0x59, 0x44, 0x05, 0xe5, 0xce, 0x82, 0x46, 0xe3, 0x98, - 0x39, 0xda, 0x40, 0x92, 0xd0, 0x89, 0xa8, 0xf8, 0x39, 0x66, 0xd3, 0x30, 0x0a, 0x9c, 0xc5, 0xf9, - 0x88, 0x0a, 0x72, 0xee, 0x04, 0x34, 0xa2, 0x8c, 0x08, 0x3a, 0x46, 0x09, 0x8b, 0x45, 0x0c, 0xdf, - 0xcd, 0xdc, 0x11, 0x49, 0x42, 0x54, 0xb8, 0x23, 0xed, 0x7e, 0x7c, 0x16, 0x84, 0x62, 0x92, 0x8e, - 0x90, 0x1f, 0xcf, 0x9d, 0x20, 0x0e, 0x62, 0x47, 0xa1, 0x46, 0xe9, 0x0f, 0xea, 0xa4, 0x0e, 0xea, - 0x2b, 0x63, 0x3b, 0xb6, 0x4b, 0xc1, 0xfd, 0x98, 0x51, 0x67, 0xb1, 0x11, 0xf1, 0xf8, 0xe3, 0xc2, - 0x67, 0x4e, 0xfc, 0x49, 0x18, 0x51, 0xf6, 0x8b, 0x93, 0x4c, 0x03, 0x79, 0xc1, 0x9d, 0x39, 0x15, - 0xe4, 0x39, 0x94, 0xf3, 0x12, 0x8a, 0xa5, 0x91, 0x08, 0xe7, 0x74, 0x03, 0xf0, 0xe9, 0x6b, 0x00, - 0xee, 0x4f, 0xe8, 0x9c, 0x6c, 0xe0, 0x2e, 0x5e, 0xc2, 0xa5, 0x22, 0x9c, 0x39, 0x61, 0x24, 0xb8, - 0x60, 0xeb, 0x20, 0xfb, 0x37, 0x03, 0x1c, 0x5c, 0x0e, 0x87, 0x83, 0xab, 0x28, 0x60, 0x94, 0xf3, - 0x01, 0x11, 0x13, 0xd8, 0x05, 0xcd, 0x84, 0x88, 0x49, 0xc7, 0xe8, 0x1a, 0xa7, 0x3b, 0xee, 0xee, - 0xed, 0xc3, 0x49, 0x6d, 0xf9, 0x70, 0xd2, 0x94, 0x36, 0xac, 0x2c, 0xf0, 0x5b, 0xd0, 0x1e, 0x11, - 0x7f, 0x4a, 0xa3, 0x71, 0xa7, 0xde, 0x35, 0x4e, 0xcd, 0xde, 0x19, 0x7a, 0x63, 0x37, 0x90, 0xa6, - 0x77, 0x33, 0x90, 0x7b, 0xa0, 0x39, 0xdb, 0xfa, 0x02, 0x3f, 0xd1, 0xd9, 0x53, 0x70, 0x54, 0x7a, - 0x0e, 0x4e, 0x67, 0xf4, 0x1b, 0x32, 0x4b, 0x29, 0xf4, 0x40, 0x4b, 0x46, 0xe6, 0x1d, 0xa3, 0xdb, - 0x38, 0x35, 0x7b, 0xe8, 0x95, 0x78, 0x6b, 0x29, 0xb9, 0x7b, 0x3a, 0x60, 0x4b, 0x9e, 0x38, 0xce, - 0xb8, 0xec, 0xdf, 0xeb, 0xa0, 0xad, 0xbd, 0xe0, 0x0d, 0xd8, 0x96, 0x1d, 0x1c, 0x13, 0x41, 0x54, - 0xe2, 0x66, 0xef, 0xa3, 0x52, 0x8c, 0xbc, 0xa0, 0x28, 0x99, 0x06, 0xf2, 0x82, 0x23, 0xe9, 0x8d, - 0x16, 0xe7, 0xe8, 0x7a, 0xf4, 0x23, 0xf5, 0xc5, 0x97, 0x54, 0x10, 0x17, 0xea, 0x28, 0xa0, 0xb8, - 0xc3, 0x39, 0x2b, 0xec, 0x83, 0x26, 0x4f, 0xa8, 0xaf, 0x2b, 0xf6, 0x41, 0xb5, 0x8a, 0x79, 0x09, - 0xf5, 0x8b, 0x16, 0xc8, 0x13, 0x56, 0x2c, 0x70, 0x08, 0xb6, 0xb8, 0x20, 0x22, 0xe5, 0x9d, 0x86, - 0xe2, 0xfb, 0xb0, 0x22, 0x9f, 0xc2, 0xb8, 0xfb, 0x9a, 0x71, 0x2b, 0x3b, 0x63, 0xcd, 0x65, 0xff, - 0x65, 0x80, 0xfd, 0xd5, 0x5e, 0xc1, 0x4f, 0x80, 0xc9, 0x29, 0x5b, 0x84, 0x3e, 0xfd, 0x8a, 0xcc, - 0xa9, 0x16, 0xc5, 0x3b, 0x1a, 0x6f, 0x7a, 0x85, 0x09, 0x97, 0xfd, 0x60, 0x90, 0xc3, 0x06, 0x31, - 0x13, 0x3a, 0xe9, 0x97, 0x4b, 0x2a, 0x35, 0x8a, 0x32, 0x8d, 0xa2, 0xab, 0x48, 0x5c, 0x33, 0x4f, - 0xb0, 0x30, 0x0a, 0x36, 0x02, 0x49, 0x32, 0x5c, 0x66, 0xb6, 0xff, 0x36, 0x80, 0xa9, 0x9f, 0xdc, - 0x0f, 0xb9, 0x80, 0xdf, 0x6f, 0x34, 0x12, 0x55, 0x6b, 0xa4, 0x44, 0xab, 0x36, 0x1e, 0xea, 0x98, - 0xdb, 0x4f, 0x37, 0xa5, 0x26, 0x7e, 0x01, 0x5a, 0xa1, 0xa0, 0x73, 0xde, 0xa9, 0x2b, 0x1d, 0xbe, - 0x57, 0x51, 0xf7, 0xb9, 0xfe, 0xae, 0x24, 0x18, 0x67, 0x1c, 0xf6, 0x9f, 0xc5, 0xd3, 0xa5, 0xd2, - 0xe5, 0xe0, 0x4d, 0x62, 0x2e, 0xd6, 0x07, 0xef, 0x32, 0xe6, 0x02, 0x2b, 0x0b, 0x4c, 0xc1, 0x61, - 0xb8, 0x36, 0x1a, 0xba, 0xb4, 0x4e, 0xb5, 0x97, 0xe4, 0x30, 0xb7, 0xa3, 0xe9, 0x0f, 0xd7, 0x2d, - 0x78, 0x23, 0x84, 0x4d, 0xc1, 0x86, 0x17, 0xfc, 0x1a, 0x34, 0x27, 0x42, 0x24, 0xba, 0xc6, 0x17, - 0xd5, 0x07, 0xb2, 0x78, 0xc2, 0xb6, 0xca, 0x6e, 0x38, 0x1c, 0x60, 0x45, 0x65, 0xff, 0x57, 0xd4, - 0xc3, 0xcb, 0x34, 0x9e, 0xaf, 0x19, 0xe3, 0x6d, 0xd6, 0x8c, 0xf9, 0xdc, 0x8a, 0x81, 0x97, 0xa0, - 0x21, 0x66, 0x4f, 0x0d, 0x7c, 0xbf, 0x1a, 0xe3, 0xb0, 0xef, 0xb9, 0xa6, 0x2e, 0x58, 0x63, 0xd8, - 0xf7, 0xb0, 0xa4, 0x80, 0xd7, 0xa0, 0xc5, 0xd2, 0x19, 0x95, 0x23, 0xd8, 0xa8, 0x3e, 0xd2, 0x32, - 0xff, 0x42, 0x10, 0xf2, 0xc4, 0x71, 0xc6, 0x63, 0xff, 0x04, 0xf6, 0x56, 0xe6, 0x14, 0xde, 0x80, - 0xdd, 0x59, 0x4c, 0xc6, 0x2e, 0x99, 0x91, 0xc8, 0xa7, 0x4c, 0x97, 0x61, 0x45, 0x75, 0xf2, 0x6f, - 0xa5, 0xe4, 0x5b, 0xf2, 0xd3, 0x53, 0x7e, 0xa4, 0x83, 0xec, 0x96, 0x6d, 0x78, 0x85, 0xd1, 0x26, - 0x00, 0x14, 0x39, 0xc2, 0x13, 0xd0, 0x92, 0x3a, 0xcb, 0xd6, 0xec, 0x8e, 0xbb, 0x23, 0x5f, 0x28, - 0xe5, 0xc7, 0x71, 0x76, 0x0f, 0x7b, 0x00, 0x70, 0xea, 0x33, 0x2a, 0xd4, 0x32, 0xa8, 0x2b, 0xa1, - 0xe6, 0x6b, 0xcf, 0xcb, 0x2d, 0xb8, 0xe4, 0xe5, 0x9e, 0xdd, 0x3e, 0x5a, 0xb5, 0xbb, 0x47, 0xab, - 0x76, 0xff, 0x68, 0xd5, 0x7e, 0x5d, 0x5a, 0xc6, 0xed, 0xd2, 0x32, 0xee, 0x96, 0x96, 0x71, 0xbf, - 0xb4, 0x8c, 0x7f, 0x96, 0x96, 0xf1, 0xc7, 0xbf, 0x56, 0xed, 0xbb, 0xb6, 0x2e, 0xd3, 0xff, 0x01, - 0x00, 0x00, 0xff, 0xff, 0xdb, 0x8a, 0xe4, 0xd8, 0x21, 0x08, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/networking/v1beta1/generated.proto b/vendor/k8s.io/api/networking/v1beta1/generated.proto index 7df19138e242c..a72545c81342c 100644 --- a/vendor/k8s.io/api/networking/v1beta1/generated.proto +++ b/vendor/k8s.io/api/networking/v1beta1/generated.proto @@ -64,17 +64,17 @@ message HTTPIngressRuleValue { // based virtual hosting etc. message Ingress { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec is the desired state of the Ingress. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional IngressSpec spec = 2; // Status is the current state of the Ingress. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional IngressStatus status = 3; } @@ -91,7 +91,7 @@ message IngressBackend { // IngressList is a collection of Ingress. message IngressList { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; diff --git a/vendor/k8s.io/api/networking/v1beta1/types.go b/vendor/k8s.io/api/networking/v1beta1/types.go index 63bf2d52a3d1f..37277bf8169a9 100644 --- a/vendor/k8s.io/api/networking/v1beta1/types.go +++ b/vendor/k8s.io/api/networking/v1beta1/types.go @@ -32,17 +32,17 @@ import ( type Ingress struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec is the desired state of the Ingress. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec IngressSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Status is the current state of the Ingress. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status IngressStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -53,7 +53,7 @@ type Ingress struct { type IngressList struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go index 9e05b7f1bbfbf..4ae5e32d01413 100644 --- a/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go @@ -48,9 +48,9 @@ func (HTTPIngressRuleValue) SwaggerDoc() map[string]string { var map_Ingress = map[string]string{ "": "Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Status is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Status is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Ingress) SwaggerDoc() map[string]string { @@ -69,7 +69,7 @@ func (IngressBackend) SwaggerDoc() map[string]string { var map_IngressList = map[string]string{ "": "IngressList is a collection of Ingress.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is the list of Ingress.", } diff --git a/vendor/k8s.io/api/node/v1alpha1/BUILD.bazel b/vendor/k8s.io/api/node/v1alpha1/BUILD.bazel index 04a5fd92de47e..52c9c7d5293ff 100644 --- a/vendor/k8s.io/api/node/v1alpha1/BUILD.bazel +++ b/vendor/k8s.io/api/node/v1alpha1/BUILD.bazel @@ -15,6 +15,9 @@ go_library( visibility = ["//visibility:public"], deps = [ "//vendor/github.com/gogo/protobuf/proto:go_default_library", + "//vendor/github.com/gogo/protobuf/sortkeys:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", diff --git a/vendor/k8s.io/api/node/v1alpha1/generated.pb.go b/vendor/k8s.io/api/node/v1alpha1/generated.pb.go index e9c502a4eafde..34f4dd6deca87 100644 --- a/vendor/k8s.io/api/node/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/node/v1alpha1/generated.pb.go @@ -17,31 +17,24 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/node/v1alpha1/generated.proto -/* - Package v1alpha1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/node/v1alpha1/generated.proto - - It has these top-level messages: - RuntimeClass - RuntimeClassList - RuntimeClassSpec -*/ package v1alpha1 import ( fmt "fmt" + io "io" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" + k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" + resource "k8s.io/apimachinery/pkg/api/resource" math "math" - - strings "strings" - + math_bits "math/bits" reflect "reflect" - - io "io" + strings "strings" ) // Reference imports to suppress errors if they are not otherwise used. @@ -55,27 +48,264 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *RuntimeClass) Reset() { *m = RuntimeClass{} } -func (*RuntimeClass) ProtoMessage() {} -func (*RuntimeClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *Overhead) Reset() { *m = Overhead{} } +func (*Overhead) ProtoMessage() {} +func (*Overhead) Descriptor() ([]byte, []int) { + return fileDescriptor_82a78945ab308218, []int{0} +} +func (m *Overhead) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Overhead) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Overhead) XXX_Merge(src proto.Message) { + xxx_messageInfo_Overhead.Merge(m, src) +} +func (m *Overhead) XXX_Size() int { + return m.Size() +} +func (m *Overhead) XXX_DiscardUnknown() { + xxx_messageInfo_Overhead.DiscardUnknown(m) +} + +var xxx_messageInfo_Overhead proto.InternalMessageInfo + +func (m *RuntimeClass) Reset() { *m = RuntimeClass{} } +func (*RuntimeClass) ProtoMessage() {} +func (*RuntimeClass) Descriptor() ([]byte, []int) { + return fileDescriptor_82a78945ab308218, []int{1} +} +func (m *RuntimeClass) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuntimeClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RuntimeClass) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuntimeClass.Merge(m, src) +} +func (m *RuntimeClass) XXX_Size() int { + return m.Size() +} +func (m *RuntimeClass) XXX_DiscardUnknown() { + xxx_messageInfo_RuntimeClass.DiscardUnknown(m) +} + +var xxx_messageInfo_RuntimeClass proto.InternalMessageInfo + +func (m *RuntimeClassList) Reset() { *m = RuntimeClassList{} } +func (*RuntimeClassList) ProtoMessage() {} +func (*RuntimeClassList) Descriptor() ([]byte, []int) { + return fileDescriptor_82a78945ab308218, []int{2} +} +func (m *RuntimeClassList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuntimeClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RuntimeClassList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuntimeClassList.Merge(m, src) +} +func (m *RuntimeClassList) XXX_Size() int { + return m.Size() +} +func (m *RuntimeClassList) XXX_DiscardUnknown() { + xxx_messageInfo_RuntimeClassList.DiscardUnknown(m) +} + +var xxx_messageInfo_RuntimeClassList proto.InternalMessageInfo + +func (m *RuntimeClassSpec) Reset() { *m = RuntimeClassSpec{} } +func (*RuntimeClassSpec) ProtoMessage() {} +func (*RuntimeClassSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_82a78945ab308218, []int{3} +} +func (m *RuntimeClassSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuntimeClassSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RuntimeClassSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuntimeClassSpec.Merge(m, src) +} +func (m *RuntimeClassSpec) XXX_Size() int { + return m.Size() +} +func (m *RuntimeClassSpec) XXX_DiscardUnknown() { + xxx_messageInfo_RuntimeClassSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_RuntimeClassSpec proto.InternalMessageInfo -func (m *RuntimeClassList) Reset() { *m = RuntimeClassList{} } -func (*RuntimeClassList) ProtoMessage() {} -func (*RuntimeClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (m *Scheduling) Reset() { *m = Scheduling{} } +func (*Scheduling) ProtoMessage() {} +func (*Scheduling) Descriptor() ([]byte, []int) { + return fileDescriptor_82a78945ab308218, []int{4} +} +func (m *Scheduling) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Scheduling) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Scheduling) XXX_Merge(src proto.Message) { + xxx_messageInfo_Scheduling.Merge(m, src) +} +func (m *Scheduling) XXX_Size() int { + return m.Size() +} +func (m *Scheduling) XXX_DiscardUnknown() { + xxx_messageInfo_Scheduling.DiscardUnknown(m) +} -func (m *RuntimeClassSpec) Reset() { *m = RuntimeClassSpec{} } -func (*RuntimeClassSpec) ProtoMessage() {} -func (*RuntimeClassSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_Scheduling proto.InternalMessageInfo func init() { + proto.RegisterType((*Overhead)(nil), "k8s.io.api.node.v1alpha1.Overhead") + proto.RegisterMapType((k8s_io_api_core_v1.ResourceList)(nil), "k8s.io.api.node.v1alpha1.Overhead.PodFixedEntry") proto.RegisterType((*RuntimeClass)(nil), "k8s.io.api.node.v1alpha1.RuntimeClass") proto.RegisterType((*RuntimeClassList)(nil), "k8s.io.api.node.v1alpha1.RuntimeClassList") proto.RegisterType((*RuntimeClassSpec)(nil), "k8s.io.api.node.v1alpha1.RuntimeClassSpec") + proto.RegisterType((*Scheduling)(nil), "k8s.io.api.node.v1alpha1.Scheduling") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.node.v1alpha1.Scheduling.NodeSelectorEntry") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/node/v1alpha1/generated.proto", fileDescriptor_82a78945ab308218) +} + +var fileDescriptor_82a78945ab308218 = []byte{ + // 698 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xbf, 0x6f, 0xd3, 0x4e, + 0x14, 0xcf, 0xa5, 0xad, 0x94, 0x5e, 0xd2, 0xaa, 0x5f, 0x7f, 0x2b, 0x14, 0x65, 0x70, 0x2a, 0x0b, + 0xa1, 0x0a, 0xa9, 0x67, 0x5a, 0xa1, 0xaa, 0x62, 0x00, 0x61, 0x7e, 0x08, 0x44, 0x69, 0xc1, 0x2d, + 0x0b, 0x62, 0xe0, 0x62, 0x3f, 0x1c, 0x13, 0xdb, 0x67, 0xd9, 0xe7, 0x88, 0x6c, 0x88, 0x05, 0x89, + 0x89, 0x89, 0xff, 0x06, 0xe6, 0x8e, 0x9d, 0x50, 0xa7, 0x96, 0x86, 0xff, 0x81, 0x81, 0x09, 0x9d, + 0x7d, 0x4e, 0x9c, 0xa4, 0xa1, 0x61, 0xf3, 0xdd, 0x7d, 0x7e, 0xdc, 0xfb, 0xbc, 0x7b, 0xc6, 0x77, + 0x3b, 0x3b, 0x31, 0x71, 0x99, 0xde, 0x49, 0x5a, 0x10, 0x05, 0xc0, 0x21, 0xd6, 0xbb, 0x10, 0xd8, + 0x2c, 0xd2, 0xe5, 0x01, 0x0d, 0x5d, 0x3d, 0x60, 0x36, 0xe8, 0xdd, 0x4d, 0xea, 0x85, 0x6d, 0xba, + 0xa9, 0x3b, 0x10, 0x40, 0x44, 0x39, 0xd8, 0x24, 0x8c, 0x18, 0x67, 0x4a, 0x3d, 0x43, 0x12, 0x1a, + 0xba, 0x44, 0x20, 0x49, 0x8e, 0x6c, 0x6c, 0x38, 0x2e, 0x6f, 0x27, 0x2d, 0x62, 0x31, 0x5f, 0x77, + 0x98, 0xc3, 0xf4, 0x94, 0xd0, 0x4a, 0xde, 0xa4, 0xab, 0x74, 0x91, 0x7e, 0x65, 0x42, 0x0d, 0xad, + 0x60, 0x69, 0xb1, 0x48, 0x58, 0x8e, 0x9b, 0x35, 0x6e, 0x0e, 0x31, 0x3e, 0xb5, 0xda, 0x6e, 0x00, + 0x51, 0x4f, 0x0f, 0x3b, 0x4e, 0x4a, 0x8a, 0x20, 0x66, 0x49, 0x64, 0xc1, 0x3f, 0xb1, 0x62, 0xdd, + 0x07, 0x4e, 0x2f, 0xf2, 0xd2, 0xa7, 0xb1, 0xa2, 0x24, 0xe0, 0xae, 0x3f, 0x69, 0xb3, 0x7d, 0x19, + 0x21, 0xb6, 0xda, 0xe0, 0xd3, 0x71, 0x9e, 0x76, 0x5c, 0xc6, 0x95, 0xfd, 0x2e, 0x44, 0x6d, 0xa0, + 0xb6, 0xf2, 0x1d, 0xe1, 0x4a, 0xc8, 0xec, 0x87, 0xee, 0x3b, 0xb0, 0xeb, 0x68, 0x6d, 0x6e, 0xbd, + 0xba, 0x75, 0x83, 0x4c, 0x8b, 0x98, 0xe4, 0x34, 0xf2, 0x4c, 0x52, 0x1e, 0x04, 0x3c, 0xea, 0x19, + 0x1f, 0xd1, 0xd1, 0x69, 0xb3, 0xd4, 0x3f, 0x6d, 0x56, 0xf2, 0xfd, 0xdf, 0xa7, 0xcd, 0xe6, 0x64, + 0xbe, 0xc4, 0x94, 0x91, 0xed, 0xba, 0x31, 0xff, 0x70, 0xf6, 0x57, 0xc8, 0x1e, 0xf5, 0xe1, 0xd3, + 0x59, 0x73, 0x63, 0x96, 0x0e, 0x90, 0xe7, 0x09, 0x0d, 0xb8, 0xcb, 0x7b, 0xe6, 0xa0, 0x96, 0x46, + 0x07, 0x2f, 0x8d, 0x5c, 0x52, 0x59, 0xc1, 0x73, 0x1d, 0xe8, 0xd5, 0xd1, 0x1a, 0x5a, 0x5f, 0x34, + 0xc5, 0xa7, 0x72, 0x1f, 0x2f, 0x74, 0xa9, 0x97, 0x40, 0xbd, 0xbc, 0x86, 0xd6, 0xab, 0x5b, 0xa4, + 0x50, 0xf7, 0xc0, 0x8b, 0x84, 0x1d, 0x27, 0x0d, 0x62, 0xd2, 0x2b, 0x23, 0xdf, 0x2a, 0xef, 0x20, + 0xed, 0x1b, 0xc2, 0x35, 0x33, 0x4b, 0xfd, 0x9e, 0x47, 0xe3, 0x58, 0x79, 0x8d, 0x2b, 0xa2, 0xcf, + 0x36, 0xe5, 0x34, 0x75, 0x1c, 0x4d, 0x75, 0x42, 0x3d, 0x26, 0x02, 0x4d, 0xba, 0x9b, 0x64, 0xbf, + 0xf5, 0x16, 0x2c, 0xfe, 0x14, 0x38, 0x35, 0x14, 0x19, 0x2a, 0x1e, 0xee, 0x99, 0x03, 0x55, 0x65, + 0x17, 0xcf, 0xc7, 0x21, 0x58, 0xf2, 0xee, 0xd7, 0xa7, 0xf7, 0xac, 0x78, 0xaf, 0x83, 0x10, 0x2c, + 0xa3, 0x26, 0x75, 0xe7, 0xc5, 0xca, 0x4c, 0x55, 0xb4, 0xaf, 0x08, 0xaf, 0x14, 0x81, 0xa2, 0x41, + 0xca, 0xab, 0x89, 0x22, 0xc8, 0x6c, 0x45, 0x08, 0x76, 0x5a, 0xc2, 0x4a, 0xfe, 0x2e, 0xf2, 0x9d, + 0x42, 0x01, 0x4f, 0xf0, 0x82, 0xcb, 0xc1, 0x8f, 0xeb, 0xe5, 0xf4, 0xd5, 0x5d, 0x9b, 0xad, 0x02, + 0x63, 0x49, 0x4a, 0x2e, 0x3c, 0x16, 0x64, 0x33, 0xd3, 0xd0, 0x7e, 0x8d, 0xdd, 0x5f, 0x94, 0xa6, + 0xdc, 0xc6, 0xcb, 0x72, 0x14, 0x1e, 0xd1, 0xc0, 0xf6, 0x20, 0xca, 0x9a, 0x6f, 0x5c, 0x91, 0x12, + 0xcb, 0xe6, 0xc8, 0xa9, 0x39, 0x86, 0x56, 0x76, 0x71, 0x85, 0xc9, 0x07, 0x2f, 0x63, 0xd6, 0x2e, + 0x1f, 0x0d, 0xa3, 0x26, 0xea, 0xcd, 0x57, 0xe6, 0x40, 0x41, 0x39, 0xc4, 0x58, 0x0c, 0xa4, 0x9d, + 0x78, 0x6e, 0xe0, 0xd4, 0xe7, 0x52, 0xbd, 0xab, 0xd3, 0xf5, 0x0e, 0x06, 0x58, 0x63, 0x59, 0x3c, + 0x82, 0xe1, 0xda, 0x2c, 0xe8, 0x68, 0x5f, 0xca, 0xb8, 0x70, 0xa4, 0x84, 0xb8, 0x26, 0x64, 0x0e, + 0xc0, 0x03, 0x8b, 0xb3, 0x48, 0x4e, 0xf4, 0xf6, 0x2c, 0x36, 0x64, 0xaf, 0x40, 0xcc, 0xe6, 0x7a, + 0x55, 0x06, 0x55, 0x2b, 0x1e, 0x99, 0x23, 0x0e, 0xca, 0x0b, 0x5c, 0xe5, 0xcc, 0x13, 0x3f, 0x18, + 0x97, 0x05, 0x79, 0x33, 0xd5, 0xa2, 0xa1, 0x98, 0x6c, 0xf1, 0x2a, 0x0e, 0x07, 0x30, 0xe3, 0x7f, + 0x29, 0x5c, 0x1d, 0xee, 0xc5, 0x66, 0x51, 0xa7, 0x71, 0x07, 0xff, 0x37, 0x71, 0x9f, 0x0b, 0x46, + 0x78, 0xb5, 0x38, 0xc2, 0x8b, 0x85, 0x91, 0x34, 0xc8, 0xd1, 0xb9, 0x5a, 0x3a, 0x3e, 0x57, 0x4b, + 0x27, 0xe7, 0x6a, 0xe9, 0x7d, 0x5f, 0x45, 0x47, 0x7d, 0x15, 0x1d, 0xf7, 0x55, 0x74, 0xd2, 0x57, + 0xd1, 0x8f, 0xbe, 0x8a, 0x3e, 0xff, 0x54, 0x4b, 0x2f, 0x2b, 0x79, 0x10, 0x7f, 0x02, 0x00, 0x00, + 0xff, 0xff, 0xa8, 0x77, 0xef, 0x80, 0x9b, 0x06, 0x00, 0x00, +} + +func (m *Overhead) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Overhead) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Overhead) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.PodFixed) > 0 { + keysForPodFixed := make([]string, 0, len(m.PodFixed)) + for k := range m.PodFixed { + keysForPodFixed = append(keysForPodFixed, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForPodFixed) + for iNdEx := len(keysForPodFixed) - 1; iNdEx >= 0; iNdEx-- { + v := m.PodFixed[k8s_io_api_core_v1.ResourceName(keysForPodFixed[iNdEx])] + baseI := i + { + size, err := ((*resource.Quantity)(&v)).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForPodFixed[iNdEx]) + copy(dAtA[i:], keysForPodFixed[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForPodFixed[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } + func (m *RuntimeClass) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -83,33 +313,42 @@ func (m *RuntimeClass) Marshal() (dAtA []byte, err error) { } func (m *RuntimeClass) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuntimeClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RuntimeClassList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -117,65 +356,188 @@ func (m *RuntimeClassList) Marshal() (dAtA []byte, err error) { } func (m *RuntimeClassList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuntimeClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n3, err := m.ListMeta.MarshalTo(dAtA[i:]) + return len(dAtA) - i, nil +} + +func (m *RuntimeClassSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { - return 0, err + return nil, err } - i += n3 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + return dAtA[:n], nil +} + +func (m *RuntimeClassSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuntimeClassSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Scheduling != nil { + { + size, err := m.Scheduling.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Overhead != nil { + { + size, err := m.Overhead.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.RuntimeHandler) + copy(dAtA[i:], m.RuntimeHandler) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RuntimeHandler))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *RuntimeClassSpec) Marshal() (dAtA []byte, err error) { +func (m *Scheduling) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *RuntimeClassSpec) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *Scheduling) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Scheduling) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RuntimeHandler))) - i += copy(dAtA[i:], m.RuntimeHandler) - return i, nil + if len(m.Tolerations) > 0 { + for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.NodeSelector) > 0 { + keysForNodeSelector := make([]string, 0, len(m.NodeSelector)) + for k := range m.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + for iNdEx := len(keysForNodeSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.NodeSelector[string(keysForNodeSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForNodeSelector[iNdEx]) + copy(dAtA[i:], keysForNodeSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForNodeSelector[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base +} +func (m *Overhead) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.PodFixed) > 0 { + for k, v := range m.PodFixed { + _ = k + _ = v + l = ((*resource.Quantity)(&v)).Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n } + func (m *RuntimeClass) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -186,6 +548,9 @@ func (m *RuntimeClass) Size() (n int) { } func (m *RuntimeClassList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -200,32 +565,79 @@ func (m *RuntimeClassList) Size() (n int) { } func (m *RuntimeClassSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.RuntimeHandler) n += 1 + l + sovGenerated(uint64(l)) + if m.Overhead != nil { + l = m.Overhead.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Scheduling != nil { + l = m.Scheduling.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break +func (m *Scheduling) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.NodeSelector) > 0 { + for k, v := range m.NodeSelector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Tolerations) > 0 { + for _, e := range m.Tolerations { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } } return n } + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *Overhead) String() string { + if this == nil { + return "nil" + } + keysForPodFixed := make([]string, 0, len(this.PodFixed)) + for k := range this.PodFixed { + keysForPodFixed = append(keysForPodFixed, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForPodFixed) + mapStringForPodFixed := "k8s_io_api_core_v1.ResourceList{" + for _, k := range keysForPodFixed { + mapStringForPodFixed += fmt.Sprintf("%v: %v,", k, this.PodFixed[k8s_io_api_core_v1.ResourceName(k)]) + } + mapStringForPodFixed += "}" + s := strings.Join([]string{`&Overhead{`, + `PodFixed:` + mapStringForPodFixed + `,`, + `}`, + }, "") + return s +} func (this *RuntimeClass) String() string { if this == nil { return "nil" } s := strings.Join([]string{`&RuntimeClass{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "RuntimeClassSpec", "RuntimeClassSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -235,9 +647,14 @@ func (this *RuntimeClassList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]RuntimeClass{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "RuntimeClass", "RuntimeClass", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&RuntimeClassList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RuntimeClass", "RuntimeClass", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -246,19 +663,229 @@ func (this *RuntimeClassSpec) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&RuntimeClassSpec{`, - `RuntimeHandler:` + fmt.Sprintf("%v", this.RuntimeHandler) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" + s := strings.Join([]string{`&RuntimeClassSpec{`, + `RuntimeHandler:` + fmt.Sprintf("%v", this.RuntimeHandler) + `,`, + `Overhead:` + strings.Replace(this.Overhead.String(), "Overhead", "Overhead", 1) + `,`, + `Scheduling:` + strings.Replace(this.Scheduling.String(), "Scheduling", "Scheduling", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Scheduling) String() string { + if this == nil { + return "nil" + } + repeatedStringForTolerations := "[]Toleration{" + for _, f := range this.Tolerations { + repeatedStringForTolerations += fmt.Sprintf("%v", f) + "," + } + repeatedStringForTolerations += "}" + keysForNodeSelector := make([]string, 0, len(this.NodeSelector)) + for k := range this.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + mapStringForNodeSelector := "map[string]string{" + for _, k := range keysForNodeSelector { + mapStringForNodeSelector += fmt.Sprintf("%v: %v,", k, this.NodeSelector[k]) + } + mapStringForNodeSelector += "}" + s := strings.Join([]string{`&Scheduling{`, + `NodeSelector:` + mapStringForNodeSelector + `,`, + `Tolerations:` + repeatedStringForTolerations + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Overhead) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Overhead: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Overhead: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodFixed", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodFixed == nil { + m.PodFixed = make(k8s_io_api_core_v1.ResourceList) + } + var mapkey k8s_io_api_core_v1.ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.PodFixed[k8s_io_api_core_v1.ResourceName(mapkey)] = ((k8s_io_apimachinery_pkg_api_resource.Quantity)(*mapvalue)) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) + return nil } func (m *RuntimeClass) Unmarshal(dAtA []byte) error { l := len(dAtA) @@ -275,7 +902,7 @@ func (m *RuntimeClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -303,7 +930,7 @@ func (m *RuntimeClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -312,6 +939,9 @@ func (m *RuntimeClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -333,7 +963,7 @@ func (m *RuntimeClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -342,6 +972,9 @@ func (m *RuntimeClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -358,6 +991,9 @@ func (m *RuntimeClass) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -385,7 +1021,7 @@ func (m *RuntimeClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -413,7 +1049,7 @@ func (m *RuntimeClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -422,6 +1058,9 @@ func (m *RuntimeClassList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -443,7 +1082,7 @@ func (m *RuntimeClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -452,6 +1091,9 @@ func (m *RuntimeClassList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -469,6 +1111,9 @@ func (m *RuntimeClassList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -496,7 +1141,7 @@ func (m *RuntimeClassSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -524,7 +1169,7 @@ func (m *RuntimeClassSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -534,11 +1179,300 @@ func (m *RuntimeClassSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } m.RuntimeHandler = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Overhead", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Overhead == nil { + m.Overhead = &Overhead{} + } + if err := m.Overhead.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scheduling", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Scheduling == nil { + m.Scheduling = &Scheduling{} + } + if err := m.Scheduling.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Scheduling) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Scheduling: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Scheduling: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeSelector == nil { + m.NodeSelector = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.NodeSelector[mapkey] = mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tolerations = append(m.Tolerations, v11.Toleration{}) + if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -548,6 +1482,9 @@ func (m *RuntimeClassSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -614,10 +1551,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -646,6 +1586,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -664,38 +1607,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/node/v1alpha1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 421 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0x41, 0x6b, 0xd4, 0x40, - 0x14, 0xc7, 0x33, 0xb5, 0x85, 0x75, 0x5a, 0x4b, 0xc9, 0x41, 0xc2, 0x1e, 0xa6, 0x65, 0x0f, 0x52, - 0x04, 0x67, 0xdc, 0x22, 0xe2, 0x49, 0x30, 0x5e, 0x14, 0x2b, 0x42, 0xbc, 0x89, 0x07, 0x27, 0xc9, - 0x33, 0x19, 0xb3, 0xc9, 0x0c, 0x99, 0x49, 0xc0, 0x9b, 0x1f, 0xc1, 0x2f, 0xa4, 0xe7, 0x3d, 0xf6, - 0xd8, 0x53, 0x71, 0xe3, 0x17, 0x91, 0x99, 0x64, 0xbb, 0xdb, 0x2e, 0xc5, 0xbd, 0xe5, 0xbd, 0xf9, - 0xff, 0x7f, 0xef, 0xfd, 0x5f, 0xf0, 0xab, 0xe2, 0x85, 0xa6, 0x42, 0xb2, 0xa2, 0x89, 0xa1, 0xae, - 0xc0, 0x80, 0x66, 0x2d, 0x54, 0xa9, 0xac, 0xd9, 0xf0, 0xc0, 0x95, 0x60, 0x95, 0x4c, 0x81, 0xb5, - 0x53, 0x3e, 0x53, 0x39, 0x9f, 0xb2, 0x0c, 0x2a, 0xa8, 0xb9, 0x81, 0x94, 0xaa, 0x5a, 0x1a, 0xe9, - 0x07, 0xbd, 0x92, 0x72, 0x25, 0xa8, 0x55, 0xd2, 0xa5, 0x72, 0xfc, 0x24, 0x13, 0x26, 0x6f, 0x62, - 0x9a, 0xc8, 0x92, 0x65, 0x32, 0x93, 0xcc, 0x19, 0xe2, 0xe6, 0xab, 0xab, 0x5c, 0xe1, 0xbe, 0x7a, - 0xd0, 0xf8, 0xd9, 0x6a, 0x64, 0xc9, 0x93, 0x5c, 0x54, 0x50, 0x7f, 0x67, 0xaa, 0xc8, 0x6c, 0x43, - 0xb3, 0x12, 0x0c, 0x67, 0xed, 0xc6, 0xf8, 0x31, 0xbb, 0xcb, 0x55, 0x37, 0x95, 0x11, 0x25, 0x6c, - 0x18, 0x9e, 0xff, 0xcf, 0xa0, 0x93, 0x1c, 0x4a, 0x7e, 0xdb, 0x37, 0xf9, 0x8d, 0xf0, 0x41, 0xd4, - 0x4b, 0x5e, 0xcf, 0xb8, 0xd6, 0xfe, 0x17, 0x3c, 0xb2, 0x4b, 0xa5, 0xdc, 0xf0, 0x00, 0x9d, 0xa0, - 0xd3, 0xfd, 0xb3, 0xa7, 0x74, 0x75, 0x8b, 0x6b, 0x36, 0x55, 0x45, 0x66, 0x1b, 0x9a, 0x5a, 0x35, - 0x6d, 0xa7, 0xf4, 0x43, 0xfc, 0x0d, 0x12, 0xf3, 0x1e, 0x0c, 0x0f, 0xfd, 0xf9, 0xd5, 0xb1, 0xd7, - 0x5d, 0x1d, 0xe3, 0x55, 0x2f, 0xba, 0xa6, 0xfa, 0xe7, 0x78, 0x57, 0x2b, 0x48, 0x82, 0x1d, 0x47, - 0x7f, 0x4c, 0xef, 0xba, 0x34, 0x5d, 0xdf, 0xeb, 0xa3, 0x82, 0x24, 0x3c, 0x18, 0xb8, 0xbb, 0xb6, - 0x8a, 0x1c, 0x65, 0xf2, 0x0b, 0xe1, 0xa3, 0x75, 0xe1, 0xb9, 0xd0, 0xc6, 0xff, 0xbc, 0x11, 0x82, - 0x6e, 0x17, 0xc2, 0xba, 0x5d, 0x84, 0xa3, 0x61, 0xd4, 0x68, 0xd9, 0x59, 0x0b, 0xf0, 0x0e, 0xef, - 0x09, 0x03, 0xa5, 0x0e, 0x76, 0x4e, 0xee, 0x9d, 0xee, 0x9f, 0x3d, 0xda, 0x2e, 0x41, 0xf8, 0x60, - 0x40, 0xee, 0xbd, 0xb5, 0xe6, 0xa8, 0x67, 0x4c, 0xa2, 0x9b, 0xeb, 0xdb, 0x64, 0xfe, 0x4b, 0x7c, - 0x38, 0xfc, 0xb6, 0x37, 0xbc, 0x4a, 0x67, 0x50, 0xbb, 0x10, 0xf7, 0xc3, 0x87, 0x03, 0xe1, 0x30, - 0xba, 0xf1, 0x1a, 0xdd, 0x52, 0x87, 0x74, 0xbe, 0x20, 0xde, 0xc5, 0x82, 0x78, 0x97, 0x0b, 0xe2, - 0xfd, 0xe8, 0x08, 0x9a, 0x77, 0x04, 0x5d, 0x74, 0x04, 0x5d, 0x76, 0x04, 0xfd, 0xe9, 0x08, 0xfa, - 0xf9, 0x97, 0x78, 0x9f, 0x46, 0xcb, 0x35, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x94, 0x34, 0x0e, - 0xef, 0x30, 0x03, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/node/v1alpha1/generated.proto b/vendor/k8s.io/api/node/v1alpha1/generated.proto index ca4e5e53506a5..ac05f839eba80 100644 --- a/vendor/k8s.io/api/node/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/node/v1alpha1/generated.proto @@ -21,6 +21,8 @@ syntax = 'proto2'; package k8s.io.api.node.v1alpha1; +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; @@ -28,6 +30,13 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1alpha1"; +// Overhead structure represents the resource overhead associated with running a pod. +message Overhead { + // PodFixed represents the fixed resource overhead associated with running a pod. + // +optional + map podFixed = 1; +} + // RuntimeClass defines a class of container runtime supported in the cluster. // The RuntimeClass is used to determine which container runtime is used to run // all containers in a pod. RuntimeClasses are (currently) manually defined by a @@ -36,19 +45,19 @@ option go_package = "v1alpha1"; // pod. For more details, see // https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md message RuntimeClass { - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the RuntimeClass - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status optional RuntimeClassSpec spec = 2; } // RuntimeClassList is a list of RuntimeClass objects. message RuntimeClassList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -72,5 +81,38 @@ message RuntimeClassSpec { // The RuntimeHandler must conform to the DNS Label (RFC 1123) requirements // and is immutable. optional string runtimeHandler = 1; + + // Overhead represents the resource overhead associated with running a pod for a + // given RuntimeClass. For more details, see + // https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md + // This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature. + // +optional + optional Overhead overhead = 2; + + // Scheduling holds the scheduling constraints to ensure that pods running + // with this RuntimeClass are scheduled to nodes that support it. + // If scheduling is nil, this RuntimeClass is assumed to be supported by all + // nodes. + // +optional + optional Scheduling scheduling = 3; +} + +// Scheduling specifies the scheduling constraints for nodes supporting a +// RuntimeClass. +message Scheduling { + // nodeSelector lists labels that must be present on nodes that support this + // RuntimeClass. Pods using this RuntimeClass can only be scheduled to a + // node matched by this selector. The RuntimeClass nodeSelector is merged + // with a pod's existing nodeSelector. Any conflicts will cause the pod to + // be rejected in admission. + // +optional + map nodeSelector = 1; + + // tolerations are appended (excluding duplicates) to pods running with this + // RuntimeClass during admission, effectively unioning the set of nodes + // tolerated by the pod and the RuntimeClass. + // +optional + // +listType=atomic + repeated k8s.io.api.core.v1.Toleration tolerations = 2; } diff --git a/vendor/k8s.io/api/node/v1alpha1/types.go b/vendor/k8s.io/api/node/v1alpha1/types.go index 2ce67c116fcd9..b59767107c455 100644 --- a/vendor/k8s.io/api/node/v1alpha1/types.go +++ b/vendor/k8s.io/api/node/v1alpha1/types.go @@ -17,6 +17,7 @@ limitations under the License. package v1alpha1 import ( + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -33,12 +34,12 @@ import ( // https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md type RuntimeClass struct { metav1.TypeMeta `json:",inline"` - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the RuntimeClass - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status Spec RuntimeClassSpec `json:"spec" protobuf:"bytes,2,name=spec"` } @@ -58,6 +59,46 @@ type RuntimeClassSpec struct { // The RuntimeHandler must conform to the DNS Label (RFC 1123) requirements // and is immutable. RuntimeHandler string `json:"runtimeHandler" protobuf:"bytes,1,opt,name=runtimeHandler"` + + // Overhead represents the resource overhead associated with running a pod for a + // given RuntimeClass. For more details, see + // https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md + // This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature. + // +optional + Overhead *Overhead `json:"overhead,omitempty" protobuf:"bytes,2,opt,name=overhead"` + + // Scheduling holds the scheduling constraints to ensure that pods running + // with this RuntimeClass are scheduled to nodes that support it. + // If scheduling is nil, this RuntimeClass is assumed to be supported by all + // nodes. + // +optional + Scheduling *Scheduling `json:"scheduling,omitempty" protobuf:"bytes,3,opt,name=scheduling"` +} + +// Overhead structure represents the resource overhead associated with running a pod. +type Overhead struct { + // PodFixed represents the fixed resource overhead associated with running a pod. + // +optional + PodFixed corev1.ResourceList `json:"podFixed,omitempty" protobuf:"bytes,1,opt,name=podFixed,casttype=k8s.io/api/core/v1.ResourceList,castkey=k8s.io/api/core/v1.ResourceName,castvalue=k8s.io/apimachinery/pkg/api/resource.Quantity"` +} + +// Scheduling specifies the scheduling constraints for nodes supporting a +// RuntimeClass. +type Scheduling struct { + // nodeSelector lists labels that must be present on nodes that support this + // RuntimeClass. Pods using this RuntimeClass can only be scheduled to a + // node matched by this selector. The RuntimeClass nodeSelector is merged + // with a pod's existing nodeSelector. Any conflicts will cause the pod to + // be rejected in admission. + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,1,opt,name=nodeSelector"` + + // tolerations are appended (excluding duplicates) to pods running with this + // RuntimeClass during admission, effectively unioning the set of nodes + // tolerated by the pod and the RuntimeClass. + // +optional + // +listType=atomic + Tolerations []corev1.Toleration `json:"tolerations,omitempty" protobuf:"bytes,2,rep,name=tolerations"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -66,7 +107,7 @@ type RuntimeClassSpec struct { type RuntimeClassList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go index a51fa525df386..3900001723580 100644 --- a/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go @@ -27,10 +27,19 @@ package v1alpha1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_Overhead = map[string]string{ + "": "Overhead structure represents the resource overhead associated with running a pod.", + "podFixed": "PodFixed represents the fixed resource overhead associated with running a pod.", +} + +func (Overhead) SwaggerDoc() map[string]string { + return map_Overhead +} + var map_RuntimeClass = map[string]string{ "": "RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are (currently) manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md", - "metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Specification of the RuntimeClass More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the RuntimeClass More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (RuntimeClass) SwaggerDoc() map[string]string { @@ -39,7 +48,7 @@ func (RuntimeClass) SwaggerDoc() map[string]string { var map_RuntimeClassList = map[string]string{ "": "RuntimeClassList is a list of RuntimeClass objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is a list of schema objects.", } @@ -50,10 +59,22 @@ func (RuntimeClassList) SwaggerDoc() map[string]string { var map_RuntimeClassSpec = map[string]string{ "": "RuntimeClassSpec is a specification of a RuntimeClass. It contains parameters that are required to describe the RuntimeClass to the Container Runtime Interface (CRI) implementation, as well as any other components that need to understand how the pod will be run. The RuntimeClassSpec is immutable.", "runtimeHandler": "RuntimeHandler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The RuntimeHandler must conform to the DNS Label (RFC 1123) requirements and is immutable.", + "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature.", + "scheduling": "Scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", } func (RuntimeClassSpec) SwaggerDoc() map[string]string { return map_RuntimeClassSpec } +var map_Scheduling = map[string]string{ + "": "Scheduling specifies the scheduling constraints for nodes supporting a RuntimeClass.", + "nodeSelector": "nodeSelector lists labels that must be present on nodes that support this RuntimeClass. Pods using this RuntimeClass can only be scheduled to a node matched by this selector. The RuntimeClass nodeSelector is merged with a pod's existing nodeSelector. Any conflicts will cause the pod to be rejected in admission.", + "tolerations": "tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.", +} + +func (Scheduling) SwaggerDoc() map[string]string { + return map_Scheduling +} + // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/node/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/node/v1alpha1/zz_generated.deepcopy.go index 91b8d80168df4..20f8051835c93 100644 --- a/vendor/k8s.io/api/node/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/node/v1alpha1/zz_generated.deepcopy.go @@ -21,15 +21,39 @@ limitations under the License. package v1alpha1 import ( + v1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Overhead) DeepCopyInto(out *Overhead) { + *out = *in + if in.PodFixed != nil { + in, out := &in.PodFixed, &out.PodFixed + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Overhead. +func (in *Overhead) DeepCopy() *Overhead { + if in == nil { + return nil + } + out := new(Overhead) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RuntimeClass) DeepCopyInto(out *RuntimeClass) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec + in.Spec.DeepCopyInto(&out.Spec) return } @@ -87,6 +111,16 @@ func (in *RuntimeClassList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RuntimeClassSpec) DeepCopyInto(out *RuntimeClassSpec) { *out = *in + if in.Overhead != nil { + in, out := &in.Overhead, &out.Overhead + *out = new(Overhead) + (*in).DeepCopyInto(*out) + } + if in.Scheduling != nil { + in, out := &in.Scheduling, &out.Scheduling + *out = new(Scheduling) + (*in).DeepCopyInto(*out) + } return } @@ -99,3 +133,33 @@ func (in *RuntimeClassSpec) DeepCopy() *RuntimeClassSpec { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Scheduling) DeepCopyInto(out *Scheduling) { + *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]v1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scheduling. +func (in *Scheduling) DeepCopy() *Scheduling { + if in == nil { + return nil + } + out := new(Scheduling) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/node/v1beta1/BUILD.bazel b/vendor/k8s.io/api/node/v1beta1/BUILD.bazel index 19297b7c007bc..bd254f38f1bac 100644 --- a/vendor/k8s.io/api/node/v1beta1/BUILD.bazel +++ b/vendor/k8s.io/api/node/v1beta1/BUILD.bazel @@ -15,6 +15,9 @@ go_library( visibility = ["//visibility:public"], deps = [ "//vendor/github.com/gogo/protobuf/proto:go_default_library", + "//vendor/github.com/gogo/protobuf/sortkeys:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", diff --git a/vendor/k8s.io/api/node/v1beta1/generated.pb.go b/vendor/k8s.io/api/node/v1beta1/generated.pb.go index a2f213be26bb3..63992f436236d 100644 --- a/vendor/k8s.io/api/node/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/node/v1beta1/generated.pb.go @@ -17,30 +17,24 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/node/v1beta1/generated.proto -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/node/v1beta1/generated.proto - - It has these top-level messages: - RuntimeClass - RuntimeClassList -*/ package v1beta1 import ( fmt "fmt" + io "io" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" + k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" + resource "k8s.io/apimachinery/pkg/api/resource" math "math" - - strings "strings" - + math_bits "math/bits" reflect "reflect" - - io "io" + strings "strings" ) // Reference imports to suppress errors if they are not otherwise used. @@ -54,22 +48,233 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *RuntimeClass) Reset() { *m = RuntimeClass{} } -func (*RuntimeClass) ProtoMessage() {} -func (*RuntimeClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *Overhead) Reset() { *m = Overhead{} } +func (*Overhead) ProtoMessage() {} +func (*Overhead) Descriptor() ([]byte, []int) { + return fileDescriptor_f977b0dddc93b4ec, []int{0} +} +func (m *Overhead) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Overhead) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Overhead) XXX_Merge(src proto.Message) { + xxx_messageInfo_Overhead.Merge(m, src) +} +func (m *Overhead) XXX_Size() int { + return m.Size() +} +func (m *Overhead) XXX_DiscardUnknown() { + xxx_messageInfo_Overhead.DiscardUnknown(m) +} + +var xxx_messageInfo_Overhead proto.InternalMessageInfo + +func (m *RuntimeClass) Reset() { *m = RuntimeClass{} } +func (*RuntimeClass) ProtoMessage() {} +func (*RuntimeClass) Descriptor() ([]byte, []int) { + return fileDescriptor_f977b0dddc93b4ec, []int{1} +} +func (m *RuntimeClass) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuntimeClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RuntimeClass) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuntimeClass.Merge(m, src) +} +func (m *RuntimeClass) XXX_Size() int { + return m.Size() +} +func (m *RuntimeClass) XXX_DiscardUnknown() { + xxx_messageInfo_RuntimeClass.DiscardUnknown(m) +} -func (m *RuntimeClassList) Reset() { *m = RuntimeClassList{} } -func (*RuntimeClassList) ProtoMessage() {} -func (*RuntimeClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_RuntimeClass proto.InternalMessageInfo + +func (m *RuntimeClassList) Reset() { *m = RuntimeClassList{} } +func (*RuntimeClassList) ProtoMessage() {} +func (*RuntimeClassList) Descriptor() ([]byte, []int) { + return fileDescriptor_f977b0dddc93b4ec, []int{2} +} +func (m *RuntimeClassList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuntimeClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RuntimeClassList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuntimeClassList.Merge(m, src) +} +func (m *RuntimeClassList) XXX_Size() int { + return m.Size() +} +func (m *RuntimeClassList) XXX_DiscardUnknown() { + xxx_messageInfo_RuntimeClassList.DiscardUnknown(m) +} + +var xxx_messageInfo_RuntimeClassList proto.InternalMessageInfo + +func (m *Scheduling) Reset() { *m = Scheduling{} } +func (*Scheduling) ProtoMessage() {} +func (*Scheduling) Descriptor() ([]byte, []int) { + return fileDescriptor_f977b0dddc93b4ec, []int{3} +} +func (m *Scheduling) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Scheduling) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Scheduling) XXX_Merge(src proto.Message) { + xxx_messageInfo_Scheduling.Merge(m, src) +} +func (m *Scheduling) XXX_Size() int { + return m.Size() +} +func (m *Scheduling) XXX_DiscardUnknown() { + xxx_messageInfo_Scheduling.DiscardUnknown(m) +} + +var xxx_messageInfo_Scheduling proto.InternalMessageInfo func init() { + proto.RegisterType((*Overhead)(nil), "k8s.io.api.node.v1beta1.Overhead") + proto.RegisterMapType((k8s_io_api_core_v1.ResourceList)(nil), "k8s.io.api.node.v1beta1.Overhead.PodFixedEntry") proto.RegisterType((*RuntimeClass)(nil), "k8s.io.api.node.v1beta1.RuntimeClass") proto.RegisterType((*RuntimeClassList)(nil), "k8s.io.api.node.v1beta1.RuntimeClassList") + proto.RegisterType((*Scheduling)(nil), "k8s.io.api.node.v1beta1.Scheduling") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.node.v1beta1.Scheduling.NodeSelectorEntry") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/node/v1beta1/generated.proto", fileDescriptor_f977b0dddc93b4ec) +} + +var fileDescriptor_f977b0dddc93b4ec = []byte{ + // 666 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0xbd, 0x6f, 0xd3, 0x40, + 0x14, 0xcf, 0xa5, 0x54, 0x4d, 0x2f, 0x29, 0x14, 0x53, 0x89, 0x28, 0x83, 0x53, 0x82, 0x90, 0xca, + 0xd0, 0x33, 0xad, 0x00, 0x55, 0x2c, 0x20, 0xf3, 0x21, 0x3e, 0x5b, 0x70, 0x61, 0x41, 0x0c, 0x5c, + 0xec, 0x87, 0x63, 0x12, 0xfb, 0xa2, 0xf3, 0x39, 0x22, 0x1b, 0x62, 0x41, 0x62, 0x62, 0xe1, 0xbf, + 0x81, 0xbd, 0x1b, 0x5d, 0x90, 0x3a, 0xb5, 0x34, 0xfc, 0x17, 0x4c, 0xe8, 0xec, 0x73, 0x72, 0x6d, + 0x9a, 0xb6, 0x6c, 0xbe, 0xf3, 0xef, 0xe3, 0xbd, 0xdf, 0xbb, 0x87, 0xef, 0xb4, 0xd7, 0x62, 0x12, + 0x30, 0xab, 0x9d, 0x34, 0x81, 0x47, 0x20, 0x20, 0xb6, 0x7a, 0x10, 0x79, 0x8c, 0x5b, 0xea, 0x07, + 0xed, 0x06, 0x56, 0xc4, 0x3c, 0xb0, 0x7a, 0x2b, 0x4d, 0x10, 0x74, 0xc5, 0xf2, 0x21, 0x02, 0x4e, + 0x05, 0x78, 0xa4, 0xcb, 0x99, 0x60, 0xc6, 0xc5, 0x0c, 0x48, 0x68, 0x37, 0x20, 0x12, 0x48, 0x14, + 0xb0, 0xb6, 0xec, 0x07, 0xa2, 0x95, 0x34, 0x89, 0xcb, 0x42, 0xcb, 0x67, 0x3e, 0xb3, 0x52, 0x7c, + 0x33, 0x79, 0x97, 0x9e, 0xd2, 0x43, 0xfa, 0x95, 0xe9, 0xd4, 0x1a, 0x9a, 0xa1, 0xcb, 0xb8, 0x34, + 0x3c, 0xec, 0x55, 0xbb, 0x3e, 0xc2, 0x84, 0xd4, 0x6d, 0x05, 0x11, 0xf0, 0xbe, 0xd5, 0x6d, 0xfb, + 0x29, 0x89, 0x43, 0xcc, 0x12, 0xee, 0xc2, 0x7f, 0xb1, 0x62, 0x2b, 0x04, 0x41, 0x8f, 0xf2, 0xb2, + 0x26, 0xb1, 0x78, 0x12, 0x89, 0x20, 0x1c, 0xb7, 0xb9, 0x79, 0x12, 0x21, 0x76, 0x5b, 0x10, 0xd2, + 0xc3, 0xbc, 0xc6, 0xcf, 0x22, 0x2e, 0x6d, 0xf4, 0x80, 0xb7, 0x80, 0x7a, 0xc6, 0x2f, 0x84, 0x4b, + 0x5d, 0xe6, 0x3d, 0x08, 0x3e, 0x80, 0x57, 0x45, 0x8b, 0x53, 0x4b, 0xe5, 0x55, 0x8b, 0x4c, 0x48, + 0x98, 0xe4, 0x2c, 0xf2, 0x5c, 0x31, 0xee, 0x47, 0x82, 0xf7, 0xed, 0xcf, 0x68, 0x6b, 0xb7, 0x5e, + 0x18, 0xec, 0xd6, 0x4b, 0xf9, 0xfd, 0xdf, 0xdd, 0x7a, 0x7d, 0x3c, 0x5e, 0xe2, 0xa8, 0xc4, 0x9e, + 0x06, 0xb1, 0xf8, 0xb4, 0x77, 0x2c, 0x64, 0x9d, 0x86, 0xf0, 0x65, 0xaf, 0xbe, 0x7c, 0x9a, 0x01, + 0x90, 0x17, 0x09, 0x8d, 0x44, 0x20, 0xfa, 0xce, 0xb0, 0x95, 0x5a, 0x1b, 0xcf, 0x1d, 0x28, 0xd2, + 0x98, 0xc7, 0x53, 0x6d, 0xe8, 0x57, 0xd1, 0x22, 0x5a, 0x9a, 0x75, 0xe4, 0xa7, 0x71, 0x0f, 0x4f, + 0xf7, 0x68, 0x27, 0x81, 0x6a, 0x71, 0x11, 0x2d, 0x95, 0x57, 0x89, 0xd6, 0xf6, 0xd0, 0x8b, 0x74, + 0xdb, 0x7e, 0x9a, 0xc3, 0xb8, 0x57, 0x46, 0xbe, 0x55, 0x5c, 0x43, 0x8d, 0x1f, 0x45, 0x5c, 0x71, + 0xb2, 0xd0, 0xef, 0x76, 0x68, 0x1c, 0x1b, 0x6f, 0x71, 0x49, 0x8e, 0xd9, 0xa3, 0x82, 0xa6, 0x8e, + 0xe5, 0xd5, 0x6b, 0xc7, 0xa9, 0xc7, 0x44, 0xa2, 0x49, 0x6f, 0x85, 0x6c, 0x34, 0xdf, 0x83, 0x2b, + 0x9e, 0x81, 0xa0, 0xb6, 0xa1, 0x42, 0xc5, 0xa3, 0x3b, 0x67, 0xa8, 0x6a, 0x5c, 0xc5, 0x33, 0x2d, + 0x1a, 0x79, 0x1d, 0xe0, 0x69, 0xf9, 0xb3, 0xf6, 0x39, 0x05, 0x9f, 0x79, 0x98, 0x5d, 0x3b, 0xf9, + 0x7f, 0xe3, 0x09, 0x2e, 0x31, 0x35, 0xb8, 0xea, 0x54, 0x5a, 0xcc, 0xa5, 0x13, 0x27, 0x6c, 0x57, + 0xe4, 0x38, 0xf3, 0x93, 0x33, 0x14, 0x30, 0x36, 0x31, 0x96, 0xcf, 0xca, 0x4b, 0x3a, 0x41, 0xe4, + 0x57, 0xcf, 0xa4, 0x72, 0x97, 0x27, 0xca, 0x6d, 0x0e, 0xa1, 0xf6, 0x59, 0xd9, 0xca, 0xe8, 0xec, + 0x68, 0x32, 0x8d, 0xef, 0x08, 0xcf, 0xeb, 0xf9, 0xc9, 0xf7, 0x61, 0xbc, 0x19, 0xcb, 0x90, 0x9c, + 0x2e, 0x43, 0xc9, 0x4e, 0x13, 0x9c, 0xcf, 0x9f, 0x65, 0x7e, 0xa3, 0xe5, 0xf7, 0x18, 0x4f, 0x07, + 0x02, 0xc2, 0xb8, 0x5a, 0x4c, 0xdf, 0xfc, 0x95, 0x89, 0x2d, 0xe8, 0x75, 0xd9, 0x73, 0x4a, 0x71, + 0xfa, 0x91, 0xe4, 0x3a, 0x99, 0x44, 0xe3, 0x5b, 0x11, 0x6b, 0x9d, 0x19, 0x0c, 0x57, 0xa4, 0xc2, + 0x26, 0x74, 0xc0, 0x15, 0x8c, 0xab, 0xad, 0xba, 0x71, 0x8a, 0x90, 0xc8, 0xba, 0xc6, 0xcb, 0x76, + 0x6b, 0x41, 0x39, 0x56, 0xf4, 0x5f, 0xce, 0x01, 0x03, 0xe3, 0x15, 0x2e, 0x0b, 0xd6, 0x91, 0x3b, + 0x1e, 0xb0, 0x28, 0xef, 0xc8, 0xd4, 0xfd, 0xe4, 0x76, 0xc9, 0x68, 0x5e, 0x0e, 0x61, 0xf6, 0x05, + 0x25, 0x5c, 0x1e, 0xdd, 0xc5, 0x8e, 0xae, 0x53, 0xbb, 0x8d, 0xcf, 0x8f, 0xd5, 0x73, 0xc4, 0x1a, + 0x2d, 0xe8, 0x6b, 0x34, 0xab, 0xad, 0x85, 0xbd, 0xbc, 0xb5, 0x6f, 0x16, 0xb6, 0xf7, 0xcd, 0xc2, + 0xce, 0xbe, 0x59, 0xf8, 0x38, 0x30, 0xd1, 0xd6, 0xc0, 0x44, 0xdb, 0x03, 0x13, 0xed, 0x0c, 0x4c, + 0xf4, 0x7b, 0x60, 0xa2, 0xaf, 0x7f, 0xcc, 0xc2, 0xeb, 0x19, 0x95, 0xc3, 0xbf, 0x00, 0x00, 0x00, + 0xff, 0xff, 0x5b, 0xcf, 0x13, 0x0c, 0x1b, 0x06, 0x00, 0x00, +} + +func (m *Overhead) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Overhead) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Overhead) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.PodFixed) > 0 { + keysForPodFixed := make([]string, 0, len(m.PodFixed)) + for k := range m.PodFixed { + keysForPodFixed = append(keysForPodFixed, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForPodFixed) + for iNdEx := len(keysForPodFixed) - 1; iNdEx >= 0; iNdEx-- { + v := m.PodFixed[k8s_io_api_core_v1.ResourceName(keysForPodFixed[iNdEx])] + baseI := i + { + size, err := ((*resource.Quantity)(&v)).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForPodFixed[iNdEx]) + copy(dAtA[i:], keysForPodFixed[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForPodFixed[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func (m *RuntimeClass) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -77,29 +282,61 @@ func (m *RuntimeClass) Marshal() (dAtA []byte, err error) { } func (m *RuntimeClass) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuntimeClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Scheduling != nil { + { + size, err := m.Scheduling.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 } - i += n1 - dAtA[i] = 0x12 - i++ + if m.Overhead != nil { + { + size, err := m.Overhead.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + i -= len(m.Handler) + copy(dAtA[i:], m.Handler) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Handler))) - i += copy(dAtA[i:], m.Handler) - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RuntimeClassList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -107,53 +344,157 @@ func (m *RuntimeClassList) Marshal() (dAtA []byte, err error) { } func (m *RuntimeClassList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuntimeClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n2, err := m.ListMeta.MarshalTo(dAtA[i:]) + return len(dAtA) - i, nil +} + +func (m *Scheduling) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { - return 0, err + return nil, err } - i += n2 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + return dAtA[:n], nil +} + +func (m *Scheduling) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Scheduling) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Tolerations) > 0 { + for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + if len(m.NodeSelector) > 0 { + keysForNodeSelector := make([]string, 0, len(m.NodeSelector)) + for k := range m.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + for iNdEx := len(keysForNodeSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.NodeSelector[string(keysForNodeSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForNodeSelector[iNdEx]) + copy(dAtA[i:], keysForNodeSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForNodeSelector[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base +} +func (m *Overhead) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.PodFixed) > 0 { + for k, v := range m.PodFixed { + _ = k + _ = v + l = ((*resource.Quantity)(&v)).Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n } + func (m *RuntimeClass) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() n += 1 + l + sovGenerated(uint64(l)) l = len(m.Handler) n += 1 + l + sovGenerated(uint64(l)) + if m.Overhead != nil { + l = m.Overhead.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Scheduling != nil { + l = m.Scheduling.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } func (m *RuntimeClassList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -167,26 +508,64 @@ func (m *RuntimeClassList) Size() (n int) { return n } -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break +func (m *Scheduling) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.NodeSelector) > 0 { + for k, v := range m.NodeSelector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Tolerations) > 0 { + for _, e := range m.Tolerations { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } } return n } + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *Overhead) String() string { + if this == nil { + return "nil" + } + keysForPodFixed := make([]string, 0, len(this.PodFixed)) + for k := range this.PodFixed { + keysForPodFixed = append(keysForPodFixed, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForPodFixed) + mapStringForPodFixed := "k8s_io_api_core_v1.ResourceList{" + for _, k := range keysForPodFixed { + mapStringForPodFixed += fmt.Sprintf("%v: %v,", k, this.PodFixed[k8s_io_api_core_v1.ResourceName(k)]) + } + mapStringForPodFixed += "}" + s := strings.Join([]string{`&Overhead{`, + `PodFixed:` + mapStringForPodFixed + `,`, + `}`, + }, "") + return s +} func (this *RuntimeClass) String() string { if this == nil { return "nil" } s := strings.Join([]string{`&RuntimeClass{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Handler:` + fmt.Sprintf("%v", this.Handler) + `,`, + `Overhead:` + strings.Replace(this.Overhead.String(), "Overhead", "Overhead", 1) + `,`, + `Scheduling:` + strings.Replace(this.Scheduling.String(), "Scheduling", "Scheduling", 1) + `,`, `}`, }, "") return s @@ -195,9 +574,40 @@ func (this *RuntimeClassList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]RuntimeClass{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "RuntimeClass", "RuntimeClass", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&RuntimeClassList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RuntimeClass", "RuntimeClass", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *Scheduling) String() string { + if this == nil { + return "nil" + } + repeatedStringForTolerations := "[]Toleration{" + for _, f := range this.Tolerations { + repeatedStringForTolerations += fmt.Sprintf("%v", f) + "," + } + repeatedStringForTolerations += "}" + keysForNodeSelector := make([]string, 0, len(this.NodeSelector)) + for k := range this.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + mapStringForNodeSelector := "map[string]string{" + for _, k := range keysForNodeSelector { + mapStringForNodeSelector += fmt.Sprintf("%v: %v,", k, this.NodeSelector[k]) + } + mapStringForNodeSelector += "}" + s := strings.Join([]string{`&Scheduling{`, + `NodeSelector:` + mapStringForNodeSelector + `,`, + `Tolerations:` + repeatedStringForTolerations + `,`, `}`, }, "") return s @@ -210,6 +620,188 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } +func (m *Overhead) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Overhead: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Overhead: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodFixed", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodFixed == nil { + m.PodFixed = make(k8s_io_api_core_v1.ResourceList) + } + var mapkey k8s_io_api_core_v1.ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.PodFixed[k8s_io_api_core_v1.ResourceName(mapkey)] = ((k8s_io_apimachinery_pkg_api_resource.Quantity)(*mapvalue)) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *RuntimeClass) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -225,7 +817,7 @@ func (m *RuntimeClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -253,7 +845,7 @@ func (m *RuntimeClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -262,6 +854,9 @@ func (m *RuntimeClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -283,7 +878,7 @@ func (m *RuntimeClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -293,11 +888,86 @@ func (m *RuntimeClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } m.Handler = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Overhead", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Overhead == nil { + m.Overhead = &Overhead{} + } + if err := m.Overhead.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scheduling", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Scheduling == nil { + m.Scheduling = &Scheduling{} + } + if err := m.Scheduling.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -307,6 +977,9 @@ func (m *RuntimeClass) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -334,7 +1007,7 @@ func (m *RuntimeClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -362,7 +1035,7 @@ func (m *RuntimeClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -371,6 +1044,9 @@ func (m *RuntimeClassList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -392,7 +1068,7 @@ func (m *RuntimeClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -401,6 +1077,9 @@ func (m *RuntimeClassList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -418,6 +1097,223 @@ func (m *RuntimeClassList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Scheduling) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Scheduling: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Scheduling: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeSelector == nil { + m.NodeSelector = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.NodeSelector[mapkey] = mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tolerations = append(m.Tolerations, v11.Toleration{}) + if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -484,10 +1380,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -516,6 +1415,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -534,36 +1436,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/node/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 389 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xcd, 0x6a, 0xdb, 0x40, - 0x14, 0x85, 0x35, 0x2e, 0xc6, 0xae, 0xdc, 0x52, 0xa3, 0x4d, 0x8d, 0x17, 0x63, 0x63, 0x28, 0xb8, - 0x0b, 0xcf, 0xd4, 0xa6, 0x94, 0x2e, 0x8b, 0xba, 0x69, 0x4b, 0x4b, 0x41, 0xcb, 0x90, 0x45, 0x46, - 0xd2, 0x8d, 0x34, 0x91, 0xa5, 0x11, 0x9a, 0x91, 0x20, 0xbb, 0x3c, 0x42, 0xf6, 0x79, 0x95, 0x3c, - 0x80, 0x97, 0x5e, 0x7a, 0x65, 0x62, 0xe5, 0x45, 0x82, 0x7e, 0xfc, 0x43, 0x8c, 0x49, 0x76, 0xba, - 0xe7, 0x9e, 0x73, 0xee, 0x87, 0x18, 0xfd, 0x47, 0xf0, 0x5d, 0x12, 0x2e, 0x68, 0x90, 0xda, 0x90, - 0x44, 0xa0, 0x40, 0xd2, 0x0c, 0x22, 0x57, 0x24, 0xb4, 0x5e, 0xb0, 0x98, 0xd3, 0x48, 0xb8, 0x40, - 0xb3, 0xa9, 0x0d, 0x8a, 0x4d, 0xa9, 0x07, 0x11, 0x24, 0x4c, 0x81, 0x4b, 0xe2, 0x44, 0x28, 0x61, - 0x7c, 0xac, 0x8c, 0x84, 0xc5, 0x9c, 0x14, 0x46, 0x52, 0x1b, 0xfb, 0x13, 0x8f, 0x2b, 0x3f, 0xb5, - 0x89, 0x23, 0x42, 0xea, 0x09, 0x4f, 0xd0, 0xd2, 0x6f, 0xa7, 0x97, 0xe5, 0x54, 0x0e, 0xe5, 0x57, - 0xd5, 0xd3, 0xff, 0xba, 0x3f, 0x18, 0x32, 0xc7, 0xe7, 0x11, 0x24, 0xd7, 0x34, 0x0e, 0xbc, 0x42, - 0x90, 0x34, 0x04, 0xc5, 0x68, 0x76, 0x74, 0xbd, 0x4f, 0x4f, 0xa5, 0x92, 0x34, 0x52, 0x3c, 0x84, - 0xa3, 0xc0, 0xb7, 0x97, 0x02, 0xd2, 0xf1, 0x21, 0x64, 0xcf, 0x73, 0xa3, 0x3b, 0xa4, 0xbf, 0xb3, - 0x2a, 0xcb, 0xcf, 0x39, 0x93, 0xd2, 0xb8, 0xd0, 0xdb, 0x05, 0x94, 0xcb, 0x14, 0xeb, 0xa1, 0x21, - 0x1a, 0x77, 0x66, 0x5f, 0xc8, 0xfe, 0x57, 0xec, 0xba, 0x49, 0x1c, 0x78, 0x85, 0x20, 0x49, 0xe1, - 0x26, 0xd9, 0x94, 0xfc, 0xb7, 0xaf, 0xc0, 0x51, 0xff, 0x40, 0x31, 0xd3, 0x58, 0xac, 0x07, 0x5a, - 0xbe, 0x1e, 0xe8, 0x7b, 0xcd, 0xda, 0xb5, 0x1a, 0x9f, 0xf5, 0x96, 0xcf, 0x22, 0x77, 0x0e, 0x49, - 0xaf, 0x31, 0x44, 0xe3, 0xb7, 0xe6, 0x87, 0xda, 0xde, 0xfa, 0x55, 0xc9, 0xd6, 0x76, 0x3f, 0xba, - 0x47, 0x7a, 0xf7, 0x90, 0xee, 0x2f, 0x97, 0xca, 0x38, 0x3f, 0x22, 0x24, 0xaf, 0x23, 0x2c, 0xd2, - 0x25, 0x5f, 0xb7, 0x3e, 0xd8, 0xde, 0x2a, 0x07, 0x74, 0x7f, 0xf4, 0x26, 0x57, 0x10, 0xca, 0x5e, - 0x63, 0xf8, 0x66, 0xdc, 0x99, 0x7d, 0x22, 0x27, 0xde, 0x01, 0x39, 0xe4, 0x32, 0xdf, 0xd7, 0x8d, - 0xcd, 0xdf, 0x45, 0xd6, 0xaa, 0x2a, 0xcc, 0xc9, 0x62, 0x83, 0xb5, 0xe5, 0x06, 0x6b, 0xab, 0x0d, - 0xd6, 0x6e, 0x72, 0x8c, 0x16, 0x39, 0x46, 0xcb, 0x1c, 0xa3, 0x55, 0x8e, 0xd1, 0x43, 0x8e, 0xd1, - 0xed, 0x23, 0xd6, 0xce, 0x5a, 0x75, 0xe3, 0x53, 0x00, 0x00, 0x00, 0xff, 0xff, 0x93, 0x68, 0xe5, - 0x0d, 0xb5, 0x02, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/node/v1beta1/generated.proto b/vendor/k8s.io/api/node/v1beta1/generated.proto index 9082fbd3344bd..49166798dad57 100644 --- a/vendor/k8s.io/api/node/v1beta1/generated.proto +++ b/vendor/k8s.io/api/node/v1beta1/generated.proto @@ -21,6 +21,8 @@ syntax = 'proto2'; package k8s.io.api.node.v1beta1; +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; @@ -28,6 +30,13 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1beta1"; +// Overhead structure represents the resource overhead associated with running a pod. +message Overhead { + // PodFixed represents the fixed resource overhead associated with running a pod. + // +optional + map podFixed = 1; +} + // RuntimeClass defines a class of container runtime supported in the cluster. // The RuntimeClass is used to determine which container runtime is used to run // all containers in a pod. RuntimeClasses are (currently) manually defined by a @@ -36,7 +45,7 @@ option go_package = "v1beta1"; // pod. For more details, see // https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md message RuntimeClass { - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -51,12 +60,26 @@ message RuntimeClass { // The Handler must conform to the DNS Label (RFC 1123) requirements, and is // immutable. optional string handler = 2; + + // Overhead represents the resource overhead associated with running a pod for a + // given RuntimeClass. For more details, see + // https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md + // This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature. + // +optional + optional Overhead overhead = 3; + + // Scheduling holds the scheduling constraints to ensure that pods running + // with this RuntimeClass are scheduled to nodes that support it. + // If scheduling is nil, this RuntimeClass is assumed to be supported by all + // nodes. + // +optional + optional Scheduling scheduling = 4; } // RuntimeClassList is a list of RuntimeClass objects. message RuntimeClassList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -64,3 +87,22 @@ message RuntimeClassList { repeated RuntimeClass items = 2; } +// Scheduling specifies the scheduling constraints for nodes supporting a +// RuntimeClass. +message Scheduling { + // nodeSelector lists labels that must be present on nodes that support this + // RuntimeClass. Pods using this RuntimeClass can only be scheduled to a + // node matched by this selector. The RuntimeClass nodeSelector is merged + // with a pod's existing nodeSelector. Any conflicts will cause the pod to + // be rejected in admission. + // +optional + map nodeSelector = 1; + + // tolerations are appended (excluding duplicates) to pods running with this + // RuntimeClass during admission, effectively unioning the set of nodes + // tolerated by the pod and the RuntimeClass. + // +optional + // +listType=atomic + repeated k8s.io.api.core.v1.Toleration tolerations = 2; +} + diff --git a/vendor/k8s.io/api/node/v1beta1/types.go b/vendor/k8s.io/api/node/v1beta1/types.go index 993c6e5066081..793a48f62b64f 100644 --- a/vendor/k8s.io/api/node/v1beta1/types.go +++ b/vendor/k8s.io/api/node/v1beta1/types.go @@ -17,6 +17,7 @@ limitations under the License. package v1beta1 import ( + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -33,7 +34,7 @@ import ( // https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md type RuntimeClass struct { metav1.TypeMeta `json:",inline"` - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -48,6 +49,46 @@ type RuntimeClass struct { // The Handler must conform to the DNS Label (RFC 1123) requirements, and is // immutable. Handler string `json:"handler" protobuf:"bytes,2,opt,name=handler"` + + // Overhead represents the resource overhead associated with running a pod for a + // given RuntimeClass. For more details, see + // https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md + // This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature. + // +optional + Overhead *Overhead `json:"overhead,omitempty" protobuf:"bytes,3,opt,name=overhead"` + + // Scheduling holds the scheduling constraints to ensure that pods running + // with this RuntimeClass are scheduled to nodes that support it. + // If scheduling is nil, this RuntimeClass is assumed to be supported by all + // nodes. + // +optional + Scheduling *Scheduling `json:"scheduling,omitempty" protobuf:"bytes,4,opt,name=scheduling"` +} + +// Overhead structure represents the resource overhead associated with running a pod. +type Overhead struct { + // PodFixed represents the fixed resource overhead associated with running a pod. + // +optional + PodFixed corev1.ResourceList `json:"podFixed,omitempty" protobuf:"bytes,1,opt,name=podFixed,casttype=k8s.io/api/core/v1.ResourceList,castkey=k8s.io/api/core/v1.ResourceName,castvalue=k8s.io/apimachinery/pkg/api/resource.Quantity"` +} + +// Scheduling specifies the scheduling constraints for nodes supporting a +// RuntimeClass. +type Scheduling struct { + // nodeSelector lists labels that must be present on nodes that support this + // RuntimeClass. Pods using this RuntimeClass can only be scheduled to a + // node matched by this selector. The RuntimeClass nodeSelector is merged + // with a pod's existing nodeSelector. Any conflicts will cause the pod to + // be rejected in admission. + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,1,opt,name=nodeSelector"` + + // tolerations are appended (excluding duplicates) to pods running with this + // RuntimeClass during admission, effectively unioning the set of nodes + // tolerated by the pod and the RuntimeClass. + // +optional + // +listType=atomic + Tolerations []corev1.Toleration `json:"tolerations,omitempty" protobuf:"bytes,2,rep,name=tolerations"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -56,7 +97,7 @@ type RuntimeClass struct { type RuntimeClassList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go index 8bfa304e78d6e..681f73f23c832 100644 --- a/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go @@ -27,10 +27,21 @@ package v1beta1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_Overhead = map[string]string{ + "": "Overhead structure represents the resource overhead associated with running a pod.", + "podFixed": "PodFixed represents the fixed resource overhead associated with running a pod.", +} + +func (Overhead) SwaggerDoc() map[string]string { + return map_Overhead +} + var map_RuntimeClass = map[string]string{ - "": "RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are (currently) manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md", - "metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "handler": "Handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The Handler must conform to the DNS Label (RFC 1123) requirements, and is immutable.", + "": "RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are (currently) manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "handler": "Handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The Handler must conform to the DNS Label (RFC 1123) requirements, and is immutable.", + "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature.", + "scheduling": "Scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", } func (RuntimeClass) SwaggerDoc() map[string]string { @@ -39,7 +50,7 @@ func (RuntimeClass) SwaggerDoc() map[string]string { var map_RuntimeClassList = map[string]string{ "": "RuntimeClassList is a list of RuntimeClass objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is a list of schema objects.", } @@ -47,4 +58,14 @@ func (RuntimeClassList) SwaggerDoc() map[string]string { return map_RuntimeClassList } +var map_Scheduling = map[string]string{ + "": "Scheduling specifies the scheduling constraints for nodes supporting a RuntimeClass.", + "nodeSelector": "nodeSelector lists labels that must be present on nodes that support this RuntimeClass. Pods using this RuntimeClass can only be scheduled to a node matched by this selector. The RuntimeClass nodeSelector is merged with a pod's existing nodeSelector. Any conflicts will cause the pod to be rejected in admission.", + "tolerations": "tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.", +} + +func (Scheduling) SwaggerDoc() map[string]string { + return map_Scheduling +} + // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/node/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/node/v1beta1/zz_generated.deepcopy.go index f211e84994038..c3989528b2af6 100644 --- a/vendor/k8s.io/api/node/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/node/v1beta1/zz_generated.deepcopy.go @@ -21,14 +21,48 @@ limitations under the License. package v1beta1 import ( + v1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Overhead) DeepCopyInto(out *Overhead) { + *out = *in + if in.PodFixed != nil { + in, out := &in.PodFixed, &out.PodFixed + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Overhead. +func (in *Overhead) DeepCopy() *Overhead { + if in == nil { + return nil + } + out := new(Overhead) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RuntimeClass) DeepCopyInto(out *RuntimeClass) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Overhead != nil { + in, out := &in.Overhead, &out.Overhead + *out = new(Overhead) + (*in).DeepCopyInto(*out) + } + if in.Scheduling != nil { + in, out := &in.Scheduling, &out.Scheduling + *out = new(Scheduling) + (*in).DeepCopyInto(*out) + } return } @@ -82,3 +116,33 @@ func (in *RuntimeClassList) DeepCopyObject() runtime.Object { } return nil } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Scheduling) DeepCopyInto(out *Scheduling) { + *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]v1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scheduling. +func (in *Scheduling) DeepCopy() *Scheduling { + if in == nil { + return nil + } + out := new(Scheduling) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.pb.go b/vendor/k8s.io/api/policy/v1beta1/generated.pb.go index a2b8398656a3f..5b57f699c343a 100644 --- a/vendor/k8s.io/api/policy/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/policy/v1beta1/generated.pb.go @@ -17,55 +17,25 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/policy/v1beta1/generated.proto -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/policy/v1beta1/generated.proto - - It has these top-level messages: - AllowedCSIDriver - AllowedFlexVolume - AllowedHostPath - Eviction - FSGroupStrategyOptions - HostPortRange - IDRange - PodDisruptionBudget - PodDisruptionBudgetList - PodDisruptionBudgetSpec - PodDisruptionBudgetStatus - PodSecurityPolicy - PodSecurityPolicyList - PodSecurityPolicySpec - RunAsGroupStrategyOptions - RunAsUserStrategyOptions - RuntimeClassStrategyOptions - SELinuxStrategyOptions - SupplementalGroupsStrategyOptions -*/ package v1beta1 import ( fmt "fmt" - proto "github.com/gogo/protobuf/proto" - - math "math" - - k8s_io_api_core_v1 "k8s.io/api/core/v1" - - k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" + io "io" + proto "github.com/gogo/protobuf/proto" github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - strings "strings" - + math "math" + math_bits "math/bits" reflect "reflect" + strings "strings" - io "io" + intstr "k8s.io/apimachinery/pkg/util/intstr" ) // Reference imports to suppress errors if they are not otherwise used. @@ -79,91 +49,537 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *AllowedCSIDriver) Reset() { *m = AllowedCSIDriver{} } -func (*AllowedCSIDriver) ProtoMessage() {} -func (*AllowedCSIDriver) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *AllowedCSIDriver) Reset() { *m = AllowedCSIDriver{} } +func (*AllowedCSIDriver) ProtoMessage() {} +func (*AllowedCSIDriver) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{0} +} +func (m *AllowedCSIDriver) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AllowedCSIDriver) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AllowedCSIDriver) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllowedCSIDriver.Merge(m, src) +} +func (m *AllowedCSIDriver) XXX_Size() int { + return m.Size() +} +func (m *AllowedCSIDriver) XXX_DiscardUnknown() { + xxx_messageInfo_AllowedCSIDriver.DiscardUnknown(m) +} + +var xxx_messageInfo_AllowedCSIDriver proto.InternalMessageInfo + +func (m *AllowedFlexVolume) Reset() { *m = AllowedFlexVolume{} } +func (*AllowedFlexVolume) ProtoMessage() {} +func (*AllowedFlexVolume) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{1} +} +func (m *AllowedFlexVolume) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AllowedFlexVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AllowedFlexVolume) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllowedFlexVolume.Merge(m, src) +} +func (m *AllowedFlexVolume) XXX_Size() int { + return m.Size() +} +func (m *AllowedFlexVolume) XXX_DiscardUnknown() { + xxx_messageInfo_AllowedFlexVolume.DiscardUnknown(m) +} + +var xxx_messageInfo_AllowedFlexVolume proto.InternalMessageInfo + +func (m *AllowedHostPath) Reset() { *m = AllowedHostPath{} } +func (*AllowedHostPath) ProtoMessage() {} +func (*AllowedHostPath) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{2} +} +func (m *AllowedHostPath) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AllowedHostPath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AllowedHostPath) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllowedHostPath.Merge(m, src) +} +func (m *AllowedHostPath) XXX_Size() int { + return m.Size() +} +func (m *AllowedHostPath) XXX_DiscardUnknown() { + xxx_messageInfo_AllowedHostPath.DiscardUnknown(m) +} + +var xxx_messageInfo_AllowedHostPath proto.InternalMessageInfo + +func (m *Eviction) Reset() { *m = Eviction{} } +func (*Eviction) ProtoMessage() {} +func (*Eviction) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{3} +} +func (m *Eviction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Eviction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Eviction) XXX_Merge(src proto.Message) { + xxx_messageInfo_Eviction.Merge(m, src) +} +func (m *Eviction) XXX_Size() int { + return m.Size() +} +func (m *Eviction) XXX_DiscardUnknown() { + xxx_messageInfo_Eviction.DiscardUnknown(m) +} + +var xxx_messageInfo_Eviction proto.InternalMessageInfo + +func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} } +func (*FSGroupStrategyOptions) ProtoMessage() {} +func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{4} +} +func (m *FSGroupStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FSGroupStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FSGroupStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FSGroupStrategyOptions.Merge(m, src) +} +func (m *FSGroupStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *FSGroupStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FSGroupStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FSGroupStrategyOptions proto.InternalMessageInfo + +func (m *HostPortRange) Reset() { *m = HostPortRange{} } +func (*HostPortRange) ProtoMessage() {} +func (*HostPortRange) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{5} +} +func (m *HostPortRange) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HostPortRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HostPortRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_HostPortRange.Merge(m, src) +} +func (m *HostPortRange) XXX_Size() int { + return m.Size() +} +func (m *HostPortRange) XXX_DiscardUnknown() { + xxx_messageInfo_HostPortRange.DiscardUnknown(m) +} -func (m *AllowedFlexVolume) Reset() { *m = AllowedFlexVolume{} } -func (*AllowedFlexVolume) ProtoMessage() {} -func (*AllowedFlexVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_HostPortRange proto.InternalMessageInfo -func (m *AllowedHostPath) Reset() { *m = AllowedHostPath{} } -func (*AllowedHostPath) ProtoMessage() {} -func (*AllowedHostPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *IDRange) Reset() { *m = IDRange{} } +func (*IDRange) ProtoMessage() {} +func (*IDRange) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{6} +} +func (m *IDRange) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IDRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IDRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_IDRange.Merge(m, src) +} +func (m *IDRange) XXX_Size() int { + return m.Size() +} +func (m *IDRange) XXX_DiscardUnknown() { + xxx_messageInfo_IDRange.DiscardUnknown(m) +} -func (m *Eviction) Reset() { *m = Eviction{} } -func (*Eviction) ProtoMessage() {} -func (*Eviction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_IDRange proto.InternalMessageInfo -func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} } -func (*FSGroupStrategyOptions) ProtoMessage() {} -func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *PodDisruptionBudget) Reset() { *m = PodDisruptionBudget{} } +func (*PodDisruptionBudget) ProtoMessage() {} +func (*PodDisruptionBudget) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{7} +} +func (m *PodDisruptionBudget) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodDisruptionBudget) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodDisruptionBudget) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodDisruptionBudget.Merge(m, src) +} +func (m *PodDisruptionBudget) XXX_Size() int { + return m.Size() +} +func (m *PodDisruptionBudget) XXX_DiscardUnknown() { + xxx_messageInfo_PodDisruptionBudget.DiscardUnknown(m) +} -func (m *HostPortRange) Reset() { *m = HostPortRange{} } -func (*HostPortRange) ProtoMessage() {} -func (*HostPortRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_PodDisruptionBudget proto.InternalMessageInfo -func (m *IDRange) Reset() { *m = IDRange{} } -func (*IDRange) ProtoMessage() {} -func (*IDRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (m *PodDisruptionBudgetList) Reset() { *m = PodDisruptionBudgetList{} } +func (*PodDisruptionBudgetList) ProtoMessage() {} +func (*PodDisruptionBudgetList) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{8} +} +func (m *PodDisruptionBudgetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodDisruptionBudgetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodDisruptionBudgetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodDisruptionBudgetList.Merge(m, src) +} +func (m *PodDisruptionBudgetList) XXX_Size() int { + return m.Size() +} +func (m *PodDisruptionBudgetList) XXX_DiscardUnknown() { + xxx_messageInfo_PodDisruptionBudgetList.DiscardUnknown(m) +} -func (m *PodDisruptionBudget) Reset() { *m = PodDisruptionBudget{} } -func (*PodDisruptionBudget) ProtoMessage() {} -func (*PodDisruptionBudget) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_PodDisruptionBudgetList proto.InternalMessageInfo -func (m *PodDisruptionBudgetList) Reset() { *m = PodDisruptionBudgetList{} } -func (*PodDisruptionBudgetList) ProtoMessage() {} -func (*PodDisruptionBudgetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (m *PodDisruptionBudgetSpec) Reset() { *m = PodDisruptionBudgetSpec{} } +func (*PodDisruptionBudgetSpec) ProtoMessage() {} +func (*PodDisruptionBudgetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{9} +} +func (m *PodDisruptionBudgetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodDisruptionBudgetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodDisruptionBudgetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodDisruptionBudgetSpec.Merge(m, src) +} +func (m *PodDisruptionBudgetSpec) XXX_Size() int { + return m.Size() +} +func (m *PodDisruptionBudgetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PodDisruptionBudgetSpec.DiscardUnknown(m) +} -func (m *PodDisruptionBudgetSpec) Reset() { *m = PodDisruptionBudgetSpec{} } -func (*PodDisruptionBudgetSpec) ProtoMessage() {} -func (*PodDisruptionBudgetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +var xxx_messageInfo_PodDisruptionBudgetSpec proto.InternalMessageInfo func (m *PodDisruptionBudgetStatus) Reset() { *m = PodDisruptionBudgetStatus{} } func (*PodDisruptionBudgetStatus) ProtoMessage() {} func (*PodDisruptionBudgetStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{10} + return fileDescriptor_014060e454a820dc, []int{10} +} +func (m *PodDisruptionBudgetStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodDisruptionBudgetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodDisruptionBudgetStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodDisruptionBudgetStatus.Merge(m, src) +} +func (m *PodDisruptionBudgetStatus) XXX_Size() int { + return m.Size() +} +func (m *PodDisruptionBudgetStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PodDisruptionBudgetStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_PodDisruptionBudgetStatus proto.InternalMessageInfo + +func (m *PodSecurityPolicy) Reset() { *m = PodSecurityPolicy{} } +func (*PodSecurityPolicy) ProtoMessage() {} +func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{11} +} +func (m *PodSecurityPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSecurityPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSecurityPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSecurityPolicy.Merge(m, src) +} +func (m *PodSecurityPolicy) XXX_Size() int { + return m.Size() +} +func (m *PodSecurityPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_PodSecurityPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSecurityPolicy proto.InternalMessageInfo + +func (m *PodSecurityPolicyList) Reset() { *m = PodSecurityPolicyList{} } +func (*PodSecurityPolicyList) ProtoMessage() {} +func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{12} +} +func (m *PodSecurityPolicyList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSecurityPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSecurityPolicyList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSecurityPolicyList.Merge(m, src) +} +func (m *PodSecurityPolicyList) XXX_Size() int { + return m.Size() +} +func (m *PodSecurityPolicyList) XXX_DiscardUnknown() { + xxx_messageInfo_PodSecurityPolicyList.DiscardUnknown(m) } -func (m *PodSecurityPolicy) Reset() { *m = PodSecurityPolicy{} } -func (*PodSecurityPolicy) ProtoMessage() {} -func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +var xxx_messageInfo_PodSecurityPolicyList proto.InternalMessageInfo -func (m *PodSecurityPolicyList) Reset() { *m = PodSecurityPolicyList{} } -func (*PodSecurityPolicyList) ProtoMessage() {} -func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPolicySpec{} } +func (*PodSecurityPolicySpec) ProtoMessage() {} +func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{13} +} +func (m *PodSecurityPolicySpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSecurityPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSecurityPolicySpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSecurityPolicySpec.Merge(m, src) +} +func (m *PodSecurityPolicySpec) XXX_Size() int { + return m.Size() +} +func (m *PodSecurityPolicySpec) XXX_DiscardUnknown() { + xxx_messageInfo_PodSecurityPolicySpec.DiscardUnknown(m) +} -func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPolicySpec{} } -func (*PodSecurityPolicySpec) ProtoMessage() {} -func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +var xxx_messageInfo_PodSecurityPolicySpec proto.InternalMessageInfo func (m *RunAsGroupStrategyOptions) Reset() { *m = RunAsGroupStrategyOptions{} } func (*RunAsGroupStrategyOptions) ProtoMessage() {} func (*RunAsGroupStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{14} + return fileDescriptor_014060e454a820dc, []int{14} +} +func (m *RunAsGroupStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RunAsGroupStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RunAsGroupStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_RunAsGroupStrategyOptions.Merge(m, src) +} +func (m *RunAsGroupStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *RunAsGroupStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_RunAsGroupStrategyOptions.DiscardUnknown(m) } +var xxx_messageInfo_RunAsGroupStrategyOptions proto.InternalMessageInfo + func (m *RunAsUserStrategyOptions) Reset() { *m = RunAsUserStrategyOptions{} } func (*RunAsUserStrategyOptions) ProtoMessage() {} func (*RunAsUserStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{15} + return fileDescriptor_014060e454a820dc, []int{15} +} +func (m *RunAsUserStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RunAsUserStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RunAsUserStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_RunAsUserStrategyOptions.Merge(m, src) +} +func (m *RunAsUserStrategyOptions) XXX_Size() int { + return m.Size() } +func (m *RunAsUserStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_RunAsUserStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_RunAsUserStrategyOptions proto.InternalMessageInfo func (m *RuntimeClassStrategyOptions) Reset() { *m = RuntimeClassStrategyOptions{} } func (*RuntimeClassStrategyOptions) ProtoMessage() {} func (*RuntimeClassStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{16} + return fileDescriptor_014060e454a820dc, []int{16} +} +func (m *RuntimeClassStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuntimeClassStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RuntimeClassStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuntimeClassStrategyOptions.Merge(m, src) +} +func (m *RuntimeClassStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *RuntimeClassStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_RuntimeClassStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_RuntimeClassStrategyOptions proto.InternalMessageInfo + +func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} } +func (*SELinuxStrategyOptions) ProtoMessage() {} +func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{17} +} +func (m *SELinuxStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SELinuxStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SELinuxStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_SELinuxStrategyOptions.Merge(m, src) +} +func (m *SELinuxStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *SELinuxStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_SELinuxStrategyOptions.DiscardUnknown(m) } -func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} } -func (*SELinuxStrategyOptions) ProtoMessage() {} -func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +var xxx_messageInfo_SELinuxStrategyOptions proto.InternalMessageInfo func (m *SupplementalGroupsStrategyOptions) Reset() { *m = SupplementalGroupsStrategyOptions{} } func (*SupplementalGroupsStrategyOptions) ProtoMessage() {} func (*SupplementalGroupsStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{18} + return fileDescriptor_014060e454a820dc, []int{18} +} +func (m *SupplementalGroupsStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SupplementalGroupsStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SupplementalGroupsStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_SupplementalGroupsStrategyOptions.Merge(m, src) +} +func (m *SupplementalGroupsStrategyOptions) XXX_Size() int { + return m.Size() } +func (m *SupplementalGroupsStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_SupplementalGroupsStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_SupplementalGroupsStrategyOptions proto.InternalMessageInfo func init() { proto.RegisterType((*AllowedCSIDriver)(nil), "k8s.io.api.policy.v1beta1.AllowedCSIDriver") @@ -177,6 +593,7 @@ func init() { proto.RegisterType((*PodDisruptionBudgetList)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudgetList") proto.RegisterType((*PodDisruptionBudgetSpec)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudgetSpec") proto.RegisterType((*PodDisruptionBudgetStatus)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudgetStatus") + proto.RegisterMapType((map[string]v1.Time)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudgetStatus.DisruptedPodsEntry") proto.RegisterType((*PodSecurityPolicy)(nil), "k8s.io.api.policy.v1beta1.PodSecurityPolicy") proto.RegisterType((*PodSecurityPolicyList)(nil), "k8s.io.api.policy.v1beta1.PodSecurityPolicyList") proto.RegisterType((*PodSecurityPolicySpec)(nil), "k8s.io.api.policy.v1beta1.PodSecurityPolicySpec") @@ -186,10 +603,137 @@ func init() { proto.RegisterType((*SELinuxStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.SELinuxStrategyOptions") proto.RegisterType((*SupplementalGroupsStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.SupplementalGroupsStrategyOptions") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/policy/v1beta1/generated.proto", fileDescriptor_014060e454a820dc) +} + +var fileDescriptor_014060e454a820dc = []byte{ + // 1883 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xdd, 0x6e, 0x1b, 0xc7, + 0x15, 0xd6, 0x9a, 0xfa, 0xa1, 0x46, 0x3f, 0x16, 0x47, 0x3f, 0x5e, 0x2b, 0x35, 0xd7, 0xd9, 0x00, + 0x85, 0x9b, 0x26, 0xcb, 0x58, 0x76, 0x5c, 0xa3, 0x69, 0x8b, 0x68, 0x45, 0xc9, 0x56, 0x60, 0x59, + 0xec, 0xd0, 0x0e, 0xda, 0xc2, 0x2d, 0x3a, 0xe4, 0x8e, 0xa8, 0x8d, 0x96, 0xbb, 0xdb, 0x99, 0x59, + 0x46, 0xbc, 0xeb, 0x45, 0x2f, 0x7a, 0xd9, 0x17, 0x08, 0xfa, 0x00, 0x45, 0xaf, 0xfa, 0x12, 0x0e, + 0x50, 0x04, 0xb9, 0x0c, 0x7a, 0x41, 0xd4, 0x2c, 0xfa, 0x12, 0xbe, 0x0a, 0x76, 0x38, 0xbb, 0xe4, + 0xfe, 0x91, 0x76, 0x00, 0xfb, 0x8e, 0x3b, 0xe7, 0xfb, 0xbe, 0x33, 0x73, 0xe6, 0xcc, 0x99, 0xc3, + 0x01, 0xe6, 0xc5, 0x7d, 0x66, 0xd8, 0x5e, 0xed, 0x22, 0x68, 0x11, 0xea, 0x12, 0x4e, 0x58, 0xad, + 0x47, 0x5c, 0xcb, 0xa3, 0x35, 0x69, 0xc0, 0xbe, 0x5d, 0xf3, 0x3d, 0xc7, 0x6e, 0xf7, 0x6b, 0xbd, + 0xdb, 0x2d, 0xc2, 0xf1, 0xed, 0x5a, 0x87, 0xb8, 0x84, 0x62, 0x4e, 0x2c, 0xc3, 0xa7, 0x1e, 0xf7, + 0xe0, 0xf5, 0x11, 0xd4, 0xc0, 0xbe, 0x6d, 0x8c, 0xa0, 0x86, 0x84, 0xee, 0x7e, 0xd8, 0xb1, 0xf9, + 0x79, 0xd0, 0x32, 0xda, 0x5e, 0xb7, 0xd6, 0xf1, 0x3a, 0x5e, 0x4d, 0x30, 0x5a, 0xc1, 0x99, 0xf8, + 0x12, 0x1f, 0xe2, 0xd7, 0x48, 0x69, 0x57, 0x9f, 0x70, 0xda, 0xf6, 0x28, 0xa9, 0xf5, 0x32, 0xde, + 0x76, 0xef, 0x8e, 0x31, 0x5d, 0xdc, 0x3e, 0xb7, 0x5d, 0x42, 0xfb, 0x35, 0xff, 0xa2, 0x13, 0x0e, + 0xb0, 0x5a, 0x97, 0x70, 0x9c, 0xc7, 0xaa, 0x15, 0xb1, 0x68, 0xe0, 0x72, 0xbb, 0x4b, 0x32, 0x84, + 0x7b, 0xb3, 0x08, 0xac, 0x7d, 0x4e, 0xba, 0x38, 0xc3, 0xbb, 0x53, 0xc4, 0x0b, 0xb8, 0xed, 0xd4, + 0x6c, 0x97, 0x33, 0x4e, 0xd3, 0x24, 0xfd, 0x2e, 0xd8, 0xd8, 0x77, 0x1c, 0xef, 0x4b, 0x62, 0x1d, + 0x34, 0x8f, 0xeb, 0xd4, 0xee, 0x11, 0x0a, 0x6f, 0x82, 0x79, 0x17, 0x77, 0x89, 0xaa, 0xdc, 0x54, + 0x6e, 0x2d, 0x9b, 0xab, 0xcf, 0x07, 0xda, 0xdc, 0x70, 0xa0, 0xcd, 0x3f, 0xc6, 0x5d, 0x82, 0x84, + 0x45, 0xff, 0x04, 0x54, 0x24, 0xeb, 0xc8, 0x21, 0x97, 0x9f, 0x7b, 0x4e, 0xd0, 0x25, 0xf0, 0xc7, + 0x60, 0xd1, 0x12, 0x02, 0x92, 0xb8, 0x2e, 0x89, 0x8b, 0x23, 0x59, 0x24, 0xad, 0x3a, 0x03, 0x57, + 0x25, 0xf9, 0xa1, 0xc7, 0x78, 0x03, 0xf3, 0x73, 0xb8, 0x07, 0x80, 0x8f, 0xf9, 0x79, 0x83, 0x92, + 0x33, 0xfb, 0x52, 0xd2, 0xa1, 0xa4, 0x83, 0x46, 0x6c, 0x41, 0x13, 0x28, 0xf8, 0x01, 0x28, 0x53, + 0x82, 0xad, 0x53, 0xd7, 0xe9, 0xab, 0x57, 0x6e, 0x2a, 0xb7, 0xca, 0xe6, 0x86, 0x64, 0x94, 0x91, + 0x1c, 0x47, 0x31, 0x42, 0xff, 0x8f, 0x02, 0xca, 0x87, 0x3d, 0xbb, 0xcd, 0x6d, 0xcf, 0x85, 0x7f, + 0x04, 0xe5, 0x70, 0xb7, 0x2c, 0xcc, 0xb1, 0x70, 0xb6, 0xb2, 0xf7, 0x91, 0x31, 0xce, 0xa4, 0x38, + 0x78, 0x86, 0x7f, 0xd1, 0x09, 0x07, 0x98, 0x11, 0xa2, 0x8d, 0xde, 0x6d, 0xe3, 0xb4, 0xf5, 0x05, + 0x69, 0xf3, 0x13, 0xc2, 0xf1, 0x78, 0x7a, 0xe3, 0x31, 0x14, 0xab, 0x42, 0x07, 0xac, 0x59, 0xc4, + 0x21, 0x9c, 0x9c, 0xfa, 0xa1, 0x47, 0x26, 0x66, 0xb8, 0xb2, 0x77, 0xe7, 0xd5, 0xdc, 0xd4, 0x27, + 0xa9, 0x66, 0x65, 0x38, 0xd0, 0xd6, 0x12, 0x43, 0x28, 0x29, 0xae, 0x7f, 0xa5, 0x80, 0x9d, 0xa3, + 0xe6, 0x03, 0xea, 0x05, 0x7e, 0x93, 0x87, 0xbb, 0xdb, 0xe9, 0x4b, 0x13, 0xfc, 0x19, 0x98, 0xa7, + 0x81, 0x13, 0xed, 0xe5, 0x7b, 0xd1, 0x5e, 0xa2, 0xc0, 0x21, 0x2f, 0x07, 0xda, 0x66, 0x8a, 0xf5, + 0xa4, 0xef, 0x13, 0x24, 0x08, 0xf0, 0x33, 0xb0, 0x48, 0xb1, 0xdb, 0x21, 0xe1, 0xd4, 0x4b, 0xb7, + 0x56, 0xf6, 0x74, 0xa3, 0xf0, 0xac, 0x19, 0xc7, 0x75, 0x14, 0x42, 0xc7, 0x3b, 0x2e, 0x3e, 0x19, + 0x92, 0x0a, 0xfa, 0x09, 0x58, 0x13, 0x5b, 0xed, 0x51, 0x2e, 0x2c, 0xf0, 0x06, 0x28, 0x75, 0x6d, + 0x57, 0x4c, 0x6a, 0xc1, 0x5c, 0x91, 0xac, 0xd2, 0x89, 0xed, 0xa2, 0x70, 0x5c, 0x98, 0xf1, 0xa5, + 0x88, 0xd9, 0xa4, 0x19, 0x5f, 0xa2, 0x70, 0x5c, 0x7f, 0x00, 0x96, 0xa4, 0xc7, 0x49, 0xa1, 0xd2, + 0x74, 0xa1, 0x52, 0x8e, 0xd0, 0x3f, 0xae, 0x80, 0xcd, 0x86, 0x67, 0xd5, 0x6d, 0x46, 0x03, 0x11, + 0x2f, 0x33, 0xb0, 0x3a, 0x84, 0xbf, 0x85, 0xfc, 0x78, 0x02, 0xe6, 0x99, 0x4f, 0xda, 0x32, 0x2d, + 0xf6, 0xa6, 0xc4, 0x36, 0x67, 0x7e, 0x4d, 0x9f, 0xb4, 0xc7, 0xc7, 0x32, 0xfc, 0x42, 0x42, 0x0d, + 0x3e, 0x03, 0x8b, 0x8c, 0x63, 0x1e, 0x30, 0xb5, 0x24, 0x74, 0xef, 0xbe, 0xa6, 0xae, 0xe0, 0x8e, + 0x77, 0x71, 0xf4, 0x8d, 0xa4, 0xa6, 0xfe, 0x6f, 0x05, 0x5c, 0xcb, 0x61, 0x3d, 0xb2, 0x19, 0x87, + 0xcf, 0x32, 0x11, 0x33, 0x5e, 0x2d, 0x62, 0x21, 0x5b, 0xc4, 0x2b, 0x3e, 0xbc, 0xd1, 0xc8, 0x44, + 0xb4, 0x9a, 0x60, 0xc1, 0xe6, 0xa4, 0x1b, 0xa5, 0xa2, 0xf1, 0x7a, 0xcb, 0x32, 0xd7, 0xa4, 0xf4, + 0xc2, 0x71, 0x28, 0x82, 0x46, 0x5a, 0xfa, 0x37, 0x57, 0x72, 0x97, 0x13, 0x86, 0x13, 0x9e, 0x81, + 0xd5, 0xae, 0xed, 0xee, 0xf7, 0xb0, 0xed, 0xe0, 0x96, 0x3c, 0x3d, 0xd3, 0x92, 0x20, 0xac, 0xb0, + 0xc6, 0xa8, 0xc2, 0x1a, 0xc7, 0x2e, 0x3f, 0xa5, 0x4d, 0x4e, 0x6d, 0xb7, 0x63, 0x6e, 0x0c, 0x07, + 0xda, 0xea, 0xc9, 0x84, 0x12, 0x4a, 0xe8, 0xc2, 0xdf, 0x83, 0x32, 0x23, 0x0e, 0x69, 0x73, 0x8f, + 0xbe, 0x5e, 0x85, 0x78, 0x84, 0x5b, 0xc4, 0x69, 0x4a, 0xaa, 0xb9, 0x1a, 0xc6, 0x2d, 0xfa, 0x42, + 0xb1, 0x24, 0x74, 0xc0, 0x7a, 0x17, 0x5f, 0x3e, 0x75, 0x71, 0xbc, 0x90, 0xd2, 0x0f, 0x5c, 0x08, + 0x1c, 0x0e, 0xb4, 0xf5, 0x93, 0x84, 0x16, 0x4a, 0x69, 0xeb, 0xff, 0x9f, 0x07, 0xd7, 0x0b, 0xb3, + 0x0a, 0x7e, 0x06, 0xa0, 0xd7, 0x62, 0x84, 0xf6, 0x88, 0xf5, 0x60, 0x74, 0x07, 0xd9, 0x5e, 0x74, + 0x70, 0x77, 0xe5, 0x06, 0xc1, 0xd3, 0x0c, 0x02, 0xe5, 0xb0, 0xe0, 0x5f, 0x14, 0xb0, 0x66, 0x8d, + 0xdc, 0x10, 0xab, 0xe1, 0x59, 0x51, 0x62, 0x3c, 0xf8, 0x21, 0xf9, 0x6e, 0xd4, 0x27, 0x95, 0x0e, + 0x5d, 0x4e, 0xfb, 0xe6, 0xb6, 0x9c, 0xd0, 0x5a, 0xc2, 0x86, 0x92, 0x4e, 0xe1, 0x09, 0x80, 0x56, + 0x2c, 0xc9, 0xe4, 0x9d, 0x26, 0x42, 0xbc, 0x60, 0xde, 0x90, 0x0a, 0xdb, 0x09, 0xbf, 0x11, 0x08, + 0xe5, 0x10, 0xe1, 0xaf, 0xc0, 0x7a, 0x3b, 0xa0, 0x94, 0xb8, 0xfc, 0x21, 0xc1, 0x0e, 0x3f, 0xef, + 0xab, 0xf3, 0x42, 0x6a, 0x47, 0x4a, 0xad, 0x1f, 0x24, 0xac, 0x28, 0x85, 0x0e, 0xf9, 0x16, 0x61, + 0x36, 0x25, 0x56, 0xc4, 0x5f, 0x48, 0xf2, 0xeb, 0x09, 0x2b, 0x4a, 0xa1, 0xe1, 0x7d, 0xb0, 0x4a, + 0x2e, 0x7d, 0xd2, 0x8e, 0x62, 0xba, 0x28, 0xd8, 0x5b, 0x92, 0xbd, 0x7a, 0x38, 0x61, 0x43, 0x09, + 0xe4, 0xae, 0x03, 0x60, 0x36, 0x88, 0x70, 0x03, 0x94, 0x2e, 0x48, 0x7f, 0x74, 0xf3, 0xa0, 0xf0, + 0x27, 0xfc, 0x14, 0x2c, 0xf4, 0xb0, 0x13, 0x10, 0x99, 0xeb, 0xef, 0xbf, 0x5a, 0xae, 0x3f, 0xb1, + 0xbb, 0x04, 0x8d, 0x88, 0x3f, 0xbf, 0x72, 0x5f, 0xd1, 0xbf, 0x56, 0x40, 0xa5, 0xe1, 0x59, 0x4d, + 0xd2, 0x0e, 0xa8, 0xcd, 0xfb, 0x0d, 0xb1, 0xcf, 0x6f, 0xa1, 0x66, 0xa3, 0x44, 0xcd, 0xfe, 0x68, + 0x7a, 0xae, 0x25, 0x67, 0x57, 0x54, 0xb1, 0xf5, 0xe7, 0x0a, 0xd8, 0xce, 0xa0, 0xdf, 0x42, 0x45, + 0xfd, 0x75, 0xb2, 0xa2, 0x7e, 0xf0, 0x3a, 0x8b, 0x29, 0xa8, 0xa7, 0x5f, 0x57, 0x72, 0x96, 0x22, + 0xaa, 0x69, 0xd8, 0xdd, 0x51, 0xbb, 0x67, 0x3b, 0xa4, 0x43, 0x2c, 0xb1, 0x98, 0xf2, 0x44, 0x77, + 0x17, 0x5b, 0xd0, 0x04, 0x0a, 0x32, 0xb0, 0x63, 0x91, 0x33, 0x1c, 0x38, 0x7c, 0xdf, 0xb2, 0x0e, + 0xb0, 0x8f, 0x5b, 0xb6, 0x63, 0x73, 0x5b, 0xb6, 0x23, 0xcb, 0xe6, 0x27, 0xc3, 0x81, 0xb6, 0x53, + 0xcf, 0x45, 0xbc, 0x1c, 0x68, 0x37, 0xb2, 0xdd, 0xbc, 0x11, 0x43, 0xfa, 0xa8, 0x40, 0x1a, 0xf6, + 0x81, 0x4a, 0xc9, 0x9f, 0x82, 0xf0, 0x50, 0xd4, 0xa9, 0xe7, 0x27, 0xdc, 0x96, 0x84, 0xdb, 0x5f, + 0x0e, 0x07, 0x9a, 0x8a, 0x0a, 0x30, 0xb3, 0x1d, 0x17, 0xca, 0xc3, 0x2f, 0xc0, 0x26, 0x96, 0x7d, + 0xf8, 0xa4, 0xd7, 0x79, 0xe1, 0xf5, 0xfe, 0x70, 0xa0, 0x6d, 0xee, 0x67, 0xcd, 0xb3, 0x1d, 0xe6, + 0x89, 0xc2, 0x1a, 0x58, 0xea, 0x89, 0x96, 0x9d, 0xa9, 0x0b, 0x42, 0x7f, 0x7b, 0x38, 0xd0, 0x96, + 0x46, 0x5d, 0x7c, 0xa8, 0xb9, 0x78, 0xd4, 0x14, 0x8d, 0x60, 0x84, 0x82, 0x1f, 0x83, 0x95, 0x73, + 0x8f, 0xf1, 0xc7, 0x84, 0x7f, 0xe9, 0xd1, 0x0b, 0x51, 0x18, 0xca, 0xe6, 0xa6, 0xdc, 0xc1, 0x95, + 0x87, 0x63, 0x13, 0x9a, 0xc4, 0xc1, 0xdf, 0x82, 0xe5, 0x73, 0xd9, 0xf6, 0x31, 0x75, 0x49, 0x24, + 0xda, 0xad, 0x29, 0x89, 0x96, 0x68, 0x11, 0xcd, 0x8a, 0x94, 0x5f, 0x8e, 0x86, 0x19, 0x1a, 0xab, + 0xc1, 0x9f, 0x80, 0x25, 0xf1, 0x71, 0x5c, 0x57, 0xcb, 0x62, 0x36, 0x57, 0x25, 0x7c, 0xe9, 0xe1, + 0x68, 0x18, 0x45, 0xf6, 0x08, 0x7a, 0xdc, 0x38, 0x50, 0x97, 0xb3, 0xd0, 0xe3, 0xc6, 0x01, 0x8a, + 0xec, 0xf0, 0x19, 0x58, 0x62, 0xe4, 0x91, 0xed, 0x06, 0x97, 0x2a, 0x10, 0x47, 0xee, 0xf6, 0x94, + 0xe9, 0x36, 0x0f, 0x05, 0x32, 0xd5, 0x70, 0x8f, 0xd5, 0xa5, 0x1d, 0x45, 0x92, 0xd0, 0x02, 0xcb, + 0x34, 0x70, 0xf7, 0xd9, 0x53, 0x46, 0xa8, 0xba, 0x92, 0xb9, 0xed, 0xd3, 0xfa, 0x28, 0xc2, 0xa6, + 0x3d, 0xc4, 0x91, 0x89, 0x11, 0x68, 0x2c, 0x0c, 0x2d, 0x00, 0xc4, 0x87, 0xe8, 0xeb, 0xd5, 0x9d, + 0x99, 0x7d, 0x20, 0x8a, 0xc1, 0x69, 0x3f, 0xeb, 0xe1, 0xf1, 0x1c, 0x9b, 0xd1, 0x84, 0x2e, 0xfc, + 0xab, 0x02, 0x20, 0x0b, 0x7c, 0xdf, 0x21, 0x5d, 0xe2, 0x72, 0xec, 0x88, 0x51, 0xa6, 0xae, 0x0a, + 0x77, 0xbf, 0x98, 0x16, 0xb5, 0x0c, 0x29, 0xed, 0x36, 0x6e, 0x06, 0xb2, 0x50, 0x94, 0xe3, 0x33, + 0xdc, 0xb4, 0x33, 0xb9, 0xda, 0xb5, 0x99, 0x9b, 0x96, 0xff, 0x2f, 0x69, 0xbc, 0x69, 0xd2, 0x8e, + 0x22, 0x49, 0xf8, 0x39, 0xd8, 0x89, 0xfe, 0x43, 0x22, 0xcf, 0xe3, 0x47, 0xb6, 0x43, 0x58, 0x9f, + 0x71, 0xd2, 0x55, 0xd7, 0x45, 0x32, 0x55, 0x25, 0x73, 0x07, 0xe5, 0xa2, 0x50, 0x01, 0x1b, 0x76, + 0x81, 0x16, 0x15, 0xa1, 0xf0, 0x84, 0xc6, 0x55, 0xf0, 0x90, 0xb5, 0xb1, 0x33, 0xea, 0x8d, 0xae, + 0x0a, 0x07, 0xef, 0x0d, 0x07, 0x9a, 0x56, 0x9f, 0x0e, 0x45, 0xb3, 0xb4, 0xe0, 0x6f, 0x80, 0x8a, + 0x8b, 0xfc, 0x6c, 0x08, 0x3f, 0x3f, 0x0a, 0x2b, 0x5b, 0xa1, 0x83, 0x42, 0x36, 0xf4, 0xc1, 0x06, + 0x4e, 0xfe, 0x9b, 0x67, 0x6a, 0x45, 0x9c, 0xf5, 0xf7, 0xa7, 0xec, 0x43, 0xea, 0x01, 0xc0, 0x54, + 0x65, 0x18, 0x37, 0x52, 0x06, 0x86, 0x32, 0xea, 0xf0, 0x12, 0x40, 0x9c, 0x7e, 0x7c, 0x60, 0x2a, + 0x9c, 0x79, 0x91, 0x65, 0x5e, 0x2c, 0xc6, 0xa9, 0x96, 0x31, 0x31, 0x94, 0xe3, 0x03, 0x72, 0x50, + 0xc1, 0xa9, 0xc7, 0x12, 0xa6, 0x5e, 0x13, 0x8e, 0x7f, 0x3a, 0xdb, 0x71, 0xcc, 0x31, 0xaf, 0x4b, + 0xbf, 0x95, 0xb4, 0x85, 0xa1, 0xac, 0x03, 0xf8, 0x08, 0x6c, 0xc9, 0xc1, 0xa7, 0x2e, 0xc3, 0x67, + 0xa4, 0xd9, 0x67, 0x6d, 0xee, 0x30, 0x75, 0x53, 0xd4, 0x6e, 0x75, 0x38, 0xd0, 0xb6, 0xf6, 0x73, + 0xec, 0x28, 0x97, 0x05, 0x3f, 0x05, 0x1b, 0x67, 0x1e, 0x6d, 0xd9, 0x96, 0x45, 0xdc, 0x48, 0x69, + 0x4b, 0x28, 0x6d, 0x85, 0xf1, 0x3f, 0x4a, 0xd9, 0x50, 0x06, 0x0d, 0x19, 0xd8, 0x96, 0xca, 0x0d, + 0xea, 0xb5, 0x4f, 0xbc, 0xc0, 0xe5, 0xe1, 0x75, 0xc1, 0xd4, 0xed, 0xf8, 0x8a, 0xdc, 0xde, 0xcf, + 0x03, 0xbc, 0x1c, 0x68, 0x37, 0x73, 0xae, 0xab, 0x04, 0x08, 0xe5, 0x6b, 0x43, 0x07, 0xac, 0xca, + 0xe7, 0xaf, 0x03, 0x07, 0x33, 0xa6, 0xaa, 0xe2, 0xa8, 0xdf, 0x9b, 0x5e, 0xd8, 0x62, 0x78, 0xfa, + 0xbc, 0x8b, 0xff, 0x65, 0x93, 0x00, 0x94, 0x50, 0xd7, 0xff, 0xae, 0x80, 0xeb, 0x85, 0x85, 0x11, + 0xde, 0x4b, 0xbc, 0xa9, 0xe8, 0xa9, 0x37, 0x15, 0x98, 0x25, 0xbe, 0x81, 0x27, 0x95, 0xaf, 0x14, + 0xa0, 0x16, 0xdd, 0x10, 0xf0, 0xe3, 0xc4, 0x04, 0xdf, 0x4d, 0x4d, 0xb0, 0x92, 0xe1, 0xbd, 0x81, + 0xf9, 0x7d, 0xa3, 0x80, 0x77, 0xa6, 0xec, 0x40, 0x5c, 0x90, 0x88, 0x35, 0x89, 0x7a, 0x8c, 0xc3, + 0xa3, 0xac, 0x88, 0x3c, 0x1a, 0x17, 0xa4, 0x1c, 0x0c, 0x2a, 0x64, 0xc3, 0xa7, 0xe0, 0x9a, 0xac, + 0x86, 0x69, 0x9b, 0xe8, 0xdc, 0x97, 0xcd, 0x77, 0x86, 0x03, 0xed, 0x5a, 0x3d, 0x1f, 0x82, 0x8a, + 0xb8, 0xfa, 0x3f, 0x15, 0xb0, 0x93, 0x7f, 0xe5, 0xc3, 0x3b, 0x89, 0x70, 0x6b, 0xa9, 0x70, 0x5f, + 0x4d, 0xb1, 0x64, 0xb0, 0xff, 0x00, 0xd6, 0x65, 0x63, 0x90, 0x7c, 0x22, 0x4c, 0x04, 0x3d, 0x3c, + 0x22, 0x61, 0x4f, 0x2f, 0x25, 0xa2, 0xf4, 0x15, 0xff, 0xc6, 0x93, 0x63, 0x28, 0xa5, 0xa6, 0xff, + 0x4b, 0x01, 0xef, 0xce, 0xbc, 0x6c, 0xa1, 0x99, 0x98, 0xba, 0x91, 0x9a, 0x7a, 0xb5, 0x58, 0xe0, + 0xcd, 0xbc, 0x14, 0x9a, 0x1f, 0x3e, 0x7f, 0x51, 0x9d, 0xfb, 0xf6, 0x45, 0x75, 0xee, 0xbb, 0x17, + 0xd5, 0xb9, 0x3f, 0x0f, 0xab, 0xca, 0xf3, 0x61, 0x55, 0xf9, 0x76, 0x58, 0x55, 0xbe, 0x1b, 0x56, + 0x95, 0xff, 0x0e, 0xab, 0xca, 0xdf, 0xfe, 0x57, 0x9d, 0xfb, 0xdd, 0x92, 0x94, 0xfb, 0x3e, 0x00, + 0x00, 0xff, 0xff, 0x48, 0x23, 0x7b, 0x0e, 0x44, 0x18, 0x00, 0x00, +} + func (m *AllowedCSIDriver) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -197,21 +741,27 @@ func (m *AllowedCSIDriver) Marshal() (dAtA []byte, err error) { } func (m *AllowedCSIDriver) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AllowedCSIDriver) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ + i -= len(m.Name) + copy(dAtA[i:], m.Name) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *AllowedFlexVolume) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -219,21 +769,27 @@ func (m *AllowedFlexVolume) Marshal() (dAtA []byte, err error) { } func (m *AllowedFlexVolume) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AllowedFlexVolume) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) - i += copy(dAtA[i:], m.Driver) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *AllowedHostPath) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -241,29 +797,35 @@ func (m *AllowedHostPath) Marshal() (dAtA []byte, err error) { } func (m *AllowedHostPath) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AllowedHostPath) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PathPrefix))) - i += copy(dAtA[i:], m.PathPrefix) - dAtA[i] = 0x10 - i++ + i-- if m.ReadOnly { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - return i, nil + i-- + dAtA[i] = 0x10 + i -= len(m.PathPrefix) + copy(dAtA[i:], m.PathPrefix) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PathPrefix))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Eviction) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -271,35 +833,44 @@ func (m *Eviction) Marshal() (dAtA []byte, err error) { } func (m *Eviction) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Eviction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 if m.DeleteOptions != nil { + { + size, err := m.DeleteOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DeleteOptions.Size())) - n2, err := m.DeleteOptions.MarshalTo(dAtA[i:]) + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n2 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *FSGroupStrategyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -307,33 +878,41 @@ func (m *FSGroupStrategyOptions) Marshal() (dAtA []byte, err error) { } func (m *FSGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FSGroupStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i += copy(dAtA[i:], m.Rule) if len(m.Ranges) > 0 { - for _, msg := range m.Ranges { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *HostPortRange) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -341,23 +920,28 @@ func (m *HostPortRange) Marshal() (dAtA []byte, err error) { } func (m *HostPortRange) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HostPortRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) - dAtA[i] = 0x10 - i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) - return i, nil + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *IDRange) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -365,23 +949,28 @@ func (m *IDRange) Marshal() (dAtA []byte, err error) { } func (m *IDRange) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IDRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) - dAtA[i] = 0x10 - i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) - return i, nil + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *PodDisruptionBudget) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -389,41 +978,52 @@ func (m *PodDisruptionBudget) Marshal() (dAtA []byte, err error) { } func (m *PodDisruptionBudget) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodDisruptionBudget) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n4, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n5, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodDisruptionBudgetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -431,37 +1031,46 @@ func (m *PodDisruptionBudgetList) Marshal() (dAtA []byte, err error) { } func (m *PodDisruptionBudgetList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodDisruptionBudgetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodDisruptionBudgetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -469,47 +1078,58 @@ func (m *PodDisruptionBudgetSpec) Marshal() (dAtA []byte, err error) { } func (m *PodDisruptionBudgetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodDisruptionBudgetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.MinAvailable != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinAvailable.Size())) - n7, err := m.MinAvailable.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.MaxUnavailable != nil { + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n7 + i-- + dAtA[i] = 0x1a } if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n8, err := m.Selector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 + i-- + dAtA[i] = 0x12 } - if m.MaxUnavailable != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n9, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.MinAvailable != nil { + { + size, err := m.MinAvailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n9 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *PodDisruptionBudgetStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -517,63 +1137,66 @@ func (m *PodDisruptionBudgetStatus) Marshal() (dAtA []byte, err error) { } func (m *PodDisruptionBudgetStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodDisruptionBudgetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i = encodeVarintGenerated(dAtA, i, uint64(m.ExpectedPods)) + i-- + dAtA[i] = 0x30 + i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredHealthy)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentHealthy)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.PodDisruptionsAllowed)) + i-- + dAtA[i] = 0x18 if len(m.DisruptedPods) > 0 { keysForDisruptedPods := make([]string, 0, len(m.DisruptedPods)) for k := range m.DisruptedPods { keysForDisruptedPods = append(keysForDisruptedPods, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForDisruptedPods) - for _, k := range keysForDisruptedPods { + for iNdEx := len(keysForDisruptedPods) - 1; iNdEx >= 0; iNdEx-- { + v := m.DisruptedPods[string(keysForDisruptedPods[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - v := m.DisruptedPods[string(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + i -= len(keysForDisruptedPods[iNdEx]) + copy(dAtA[i:], keysForDisruptedPods[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForDisruptedPods[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n10, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 } } - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PodDisruptionsAllowed)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentHealthy)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredHealthy)) - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ExpectedPods)) - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *PodSecurityPolicy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -581,33 +1204,42 @@ func (m *PodSecurityPolicy) Marshal() (dAtA []byte, err error) { } func (m *PodSecurityPolicy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSecurityPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n11, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n11 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n12, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n12 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodSecurityPolicyList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -615,37 +1247,46 @@ func (m *PodSecurityPolicyList) Marshal() (dAtA []byte, err error) { } func (m *PodSecurityPolicyList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSecurityPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n13, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n13 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodSecurityPolicySpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -653,300 +1294,283 @@ func (m *PodSecurityPolicySpec) Marshal() (dAtA []byte, err error) { } func (m *PodSecurityPolicySpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSecurityPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - if m.Privileged { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if len(m.DefaultAddCapabilities) > 0 { - for _, s := range m.DefaultAddCapabilities { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.RequiredDropCapabilities) > 0 { - for _, s := range m.RequiredDropCapabilities { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.AllowedCapabilities) > 0 { - for _, s := range m.AllowedCapabilities { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if m.RuntimeClass != nil { + { + size, err := m.RuntimeClass.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 } - if len(m.Volumes) > 0 { - for _, s := range m.Volumes { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if len(m.AllowedCSIDrivers) > 0 { + for iNdEx := len(m.AllowedCSIDrivers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllowedCSIDrivers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba } } - dAtA[i] = 0x30 - i++ - if m.HostNetwork { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if len(m.HostPorts) > 0 { - for _, msg := range m.HostPorts { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.RunAsGroup != nil { + { + size, err := m.RunAsGroup.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 } - dAtA[i] = 0x40 - i++ - if m.HostPID { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x48 - i++ - if m.HostIPC { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SELinux.Size())) - n14, err := m.SELinux.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.AllowedProcMountTypes) > 0 { + for iNdEx := len(m.AllowedProcMountTypes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AllowedProcMountTypes[iNdEx]) + copy(dAtA[i:], m.AllowedProcMountTypes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedProcMountTypes[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } } - i += n14 - dAtA[i] = 0x5a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RunAsUser.Size())) - n15, err := m.RunAsUser.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.ForbiddenSysctls) > 0 { + for iNdEx := len(m.ForbiddenSysctls) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ForbiddenSysctls[iNdEx]) + copy(dAtA[i:], m.ForbiddenSysctls[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ForbiddenSysctls[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } } - i += n15 - dAtA[i] = 0x62 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SupplementalGroups.Size())) - n16, err := m.SupplementalGroups.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.AllowedUnsafeSysctls) > 0 { + for iNdEx := len(m.AllowedUnsafeSysctls) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AllowedUnsafeSysctls[iNdEx]) + copy(dAtA[i:], m.AllowedUnsafeSysctls[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedUnsafeSysctls[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } } - i += n16 - dAtA[i] = 0x6a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FSGroup.Size())) - n17, err := m.FSGroup.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.AllowedFlexVolumes) > 0 { + for iNdEx := len(m.AllowedFlexVolumes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllowedFlexVolumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } } - i += n17 - dAtA[i] = 0x70 - i++ - if m.ReadOnlyRootFilesystem { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if len(m.AllowedHostPaths) > 0 { + for iNdEx := len(m.AllowedHostPaths) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllowedHostPaths[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } } - i++ - if m.DefaultAllowPrivilegeEscalation != nil { - dAtA[i] = 0x78 - i++ - if *m.DefaultAllowPrivilegeEscalation { + if m.AllowPrivilegeEscalation != nil { + i-- + if *m.AllowPrivilegeEscalation { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - } - if m.AllowPrivilegeEscalation != nil { - dAtA[i] = 0x80 - i++ + i-- dAtA[i] = 0x1 - i++ - if *m.AllowPrivilegeEscalation { + i-- + dAtA[i] = 0x80 + } + if m.DefaultAllowPrivilegeEscalation != nil { + i-- + if *m.DefaultAllowPrivilegeEscalation { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ + i-- + dAtA[i] = 0x78 } - if len(m.AllowedHostPaths) > 0 { - for _, msg := range m.AllowedHostPaths { - dAtA[i] = 0x8a - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + i-- + if m.ReadOnlyRootFilesystem { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x70 + { + size, err := m.FSGroup.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if len(m.AllowedFlexVolumes) > 0 { - for _, msg := range m.AllowedFlexVolumes { - dAtA[i] = 0x92 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + i-- + dAtA[i] = 0x6a + { + size, err := m.SupplementalGroups.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if len(m.AllowedUnsafeSysctls) > 0 { - for _, s := range m.AllowedUnsafeSysctls { - dAtA[i] = 0x9a - i++ - dAtA[i] = 0x1 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + dAtA[i] = 0x62 + { + size, err := m.RunAsUser.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if len(m.ForbiddenSysctls) > 0 { - for _, s := range m.ForbiddenSysctls { - dAtA[i] = 0xa2 - i++ - dAtA[i] = 0x1 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + dAtA[i] = 0x5a + { + size, err := m.SELinux.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if len(m.AllowedProcMountTypes) > 0 { - for _, s := range m.AllowedProcMountTypes { - dAtA[i] = 0xaa - i++ - dAtA[i] = 0x1 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + i-- + dAtA[i] = 0x52 + i-- + if m.HostIPC { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + i-- + if m.HostPID { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + if len(m.HostPorts) > 0 { + for iNdEx := len(m.HostPorts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.HostPorts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + dAtA[i] = 0x3a } } - if m.RunAsGroup != nil { - dAtA[i] = 0xb2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RunAsGroup.Size())) - n18, err := m.RunAsGroup.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i-- + if m.HostNetwork { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + if len(m.Volumes) > 0 { + for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Volumes[iNdEx]) + copy(dAtA[i:], m.Volumes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Volumes[iNdEx]))) + i-- + dAtA[i] = 0x2a } - i += n18 } - if len(m.AllowedCSIDrivers) > 0 { - for _, msg := range m.AllowedCSIDrivers { - dAtA[i] = 0xba - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + if len(m.AllowedCapabilities) > 0 { + for iNdEx := len(m.AllowedCapabilities) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AllowedCapabilities[iNdEx]) + copy(dAtA[i:], m.AllowedCapabilities[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedCapabilities[iNdEx]))) + i-- + dAtA[i] = 0x22 } } - if m.RuntimeClass != nil { - dAtA[i] = 0xc2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RuntimeClass.Size())) - n19, err := m.RuntimeClass.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.RequiredDropCapabilities) > 0 { + for iNdEx := len(m.RequiredDropCapabilities) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RequiredDropCapabilities[iNdEx]) + copy(dAtA[i:], m.RequiredDropCapabilities[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RequiredDropCapabilities[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.DefaultAddCapabilities) > 0 { + for iNdEx := len(m.DefaultAddCapabilities) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DefaultAddCapabilities[iNdEx]) + copy(dAtA[i:], m.DefaultAddCapabilities[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DefaultAddCapabilities[iNdEx]))) + i-- + dAtA[i] = 0x12 } - i += n19 } - return i, nil + i-- + if m.Privileged { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *RunAsGroupStrategyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -954,33 +1578,41 @@ func (m *RunAsGroupStrategyOptions) Marshal() (dAtA []byte, err error) { } func (m *RunAsGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RunAsGroupStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i += copy(dAtA[i:], m.Rule) if len(m.Ranges) > 0 { - for _, msg := range m.Ranges { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RunAsUserStrategyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -988,33 +1620,41 @@ func (m *RunAsUserStrategyOptions) Marshal() (dAtA []byte, err error) { } func (m *RunAsUserStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RunAsUserStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i += copy(dAtA[i:], m.Rule) if len(m.Ranges) > 0 { - for _, msg := range m.Ranges { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RuntimeClassStrategyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1022,38 +1662,38 @@ func (m *RuntimeClassStrategyOptions) Marshal() (dAtA []byte, err error) { } func (m *RuntimeClassStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuntimeClassStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + if m.DefaultRuntimeClassName != nil { + i -= len(*m.DefaultRuntimeClassName) + copy(dAtA[i:], *m.DefaultRuntimeClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.DefaultRuntimeClassName))) + i-- + dAtA[i] = 0x12 + } if len(m.AllowedRuntimeClassNames) > 0 { - for _, s := range m.AllowedRuntimeClassNames { + for iNdEx := len(m.AllowedRuntimeClassNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AllowedRuntimeClassNames[iNdEx]) + copy(dAtA[i:], m.AllowedRuntimeClassNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedRuntimeClassNames[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - if m.DefaultRuntimeClassName != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.DefaultRuntimeClassName))) - i += copy(dAtA[i:], *m.DefaultRuntimeClassName) - } - return i, nil + return len(dAtA) - i, nil } func (m *SELinuxStrategyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1061,31 +1701,39 @@ func (m *SELinuxStrategyOptions) Marshal() (dAtA []byte, err error) { } func (m *SELinuxStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SELinuxStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i += copy(dAtA[i:], m.Rule) if m.SELinuxOptions != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size())) - n20, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.SELinuxOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n20 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *SupplementalGroupsStrategyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1093,39 +1741,52 @@ func (m *SupplementalGroupsStrategyOptions) Marshal() (dAtA []byte, err error) { } func (m *SupplementalGroupsStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SupplementalGroupsStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i += copy(dAtA[i:], m.Rule) if len(m.Ranges) > 0 { - for _, msg := range m.Ranges { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *AllowedCSIDriver) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -1134,6 +1795,9 @@ func (m *AllowedCSIDriver) Size() (n int) { } func (m *AllowedFlexVolume) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Driver) @@ -1142,6 +1806,9 @@ func (m *AllowedFlexVolume) Size() (n int) { } func (m *AllowedHostPath) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.PathPrefix) @@ -1151,6 +1818,9 @@ func (m *AllowedHostPath) Size() (n int) { } func (m *Eviction) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1163,6 +1833,9 @@ func (m *Eviction) Size() (n int) { } func (m *FSGroupStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Rule) @@ -1177,6 +1850,9 @@ func (m *FSGroupStrategyOptions) Size() (n int) { } func (m *HostPortRange) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Min)) @@ -1185,6 +1861,9 @@ func (m *HostPortRange) Size() (n int) { } func (m *IDRange) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Min)) @@ -1193,6 +1872,9 @@ func (m *IDRange) Size() (n int) { } func (m *PodDisruptionBudget) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1205,6 +1887,9 @@ func (m *PodDisruptionBudget) Size() (n int) { } func (m *PodDisruptionBudgetList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1219,6 +1904,9 @@ func (m *PodDisruptionBudgetList) Size() (n int) { } func (m *PodDisruptionBudgetSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.MinAvailable != nil { @@ -1237,6 +1925,9 @@ func (m *PodDisruptionBudgetSpec) Size() (n int) { } func (m *PodDisruptionBudgetStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.ObservedGeneration)) @@ -1257,6 +1948,9 @@ func (m *PodDisruptionBudgetStatus) Size() (n int) { } func (m *PodSecurityPolicy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1267,6 +1961,9 @@ func (m *PodSecurityPolicy) Size() (n int) { } func (m *PodSecurityPolicyList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1281,6 +1978,9 @@ func (m *PodSecurityPolicyList) Size() (n int) { } func (m *PodSecurityPolicySpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 2 @@ -1380,6 +2080,9 @@ func (m *PodSecurityPolicySpec) Size() (n int) { } func (m *RunAsGroupStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Rule) @@ -1394,6 +2097,9 @@ func (m *RunAsGroupStrategyOptions) Size() (n int) { } func (m *RunAsUserStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Rule) @@ -1408,6 +2114,9 @@ func (m *RunAsUserStrategyOptions) Size() (n int) { } func (m *RuntimeClassStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.AllowedRuntimeClassNames) > 0 { @@ -1424,6 +2133,9 @@ func (m *RuntimeClassStrategyOptions) Size() (n int) { } func (m *SELinuxStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Rule) @@ -1436,6 +2148,9 @@ func (m *SELinuxStrategyOptions) Size() (n int) { } func (m *SupplementalGroupsStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Rule) @@ -1450,14 +2165,7 @@ func (m *SupplementalGroupsStrategyOptions) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -1498,8 +2206,8 @@ func (this *Eviction) String() string { return "nil" } s := strings.Join([]string{`&Eviction{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `DeleteOptions:` + strings.Replace(fmt.Sprintf("%v", this.DeleteOptions), "DeleteOptions", "k8s_io_apimachinery_pkg_apis_meta_v1.DeleteOptions", 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `DeleteOptions:` + strings.Replace(fmt.Sprintf("%v", this.DeleteOptions), "DeleteOptions", "v1.DeleteOptions", 1) + `,`, `}`, }, "") return s @@ -1508,9 +2216,14 @@ func (this *FSGroupStrategyOptions) String() string { if this == nil { return "nil" } + repeatedStringForRanges := "[]IDRange{" + for _, f := range this.Ranges { + repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," + } + repeatedStringForRanges += "}" s := strings.Join([]string{`&FSGroupStrategyOptions{`, `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, + `Ranges:` + repeatedStringForRanges + `,`, `}`, }, "") return s @@ -1542,7 +2255,7 @@ func (this *PodDisruptionBudget) String() string { return "nil" } s := strings.Join([]string{`&PodDisruptionBudget{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodDisruptionBudgetSpec", "PodDisruptionBudgetSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodDisruptionBudgetStatus", "PodDisruptionBudgetStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1553,9 +2266,14 @@ func (this *PodDisruptionBudgetList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]PodDisruptionBudget{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodDisruptionBudget", "PodDisruptionBudget", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&PodDisruptionBudgetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PodDisruptionBudget", "PodDisruptionBudget", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1565,9 +2283,9 @@ func (this *PodDisruptionBudgetSpec) String() string { return "nil" } s := strings.Join([]string{`&PodDisruptionBudgetSpec{`, - `MinAvailable:` + strings.Replace(fmt.Sprintf("%v", this.MinAvailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `MinAvailable:` + strings.Replace(fmt.Sprintf("%v", this.MinAvailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, `}`, }, "") return s @@ -1581,7 +2299,7 @@ func (this *PodDisruptionBudgetStatus) String() string { keysForDisruptedPods = append(keysForDisruptedPods, k) } github_com_gogo_protobuf_sortkeys.Strings(keysForDisruptedPods) - mapStringForDisruptedPods := "map[string]k8s_io_apimachinery_pkg_apis_meta_v1.Time{" + mapStringForDisruptedPods := "map[string]v1.Time{" for _, k := range keysForDisruptedPods { mapStringForDisruptedPods += fmt.Sprintf("%v: %v,", k, this.DisruptedPods[k]) } @@ -1602,7 +2320,7 @@ func (this *PodSecurityPolicy) String() string { return "nil" } s := strings.Join([]string{`&PodSecurityPolicy{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSecurityPolicySpec", "PodSecurityPolicySpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -1612,9 +2330,14 @@ func (this *PodSecurityPolicyList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]PodSecurityPolicy{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodSecurityPolicy", "PodSecurityPolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&PodSecurityPolicyList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PodSecurityPolicy", "PodSecurityPolicy", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1623,6 +2346,26 @@ func (this *PodSecurityPolicySpec) String() string { if this == nil { return "nil" } + repeatedStringForHostPorts := "[]HostPortRange{" + for _, f := range this.HostPorts { + repeatedStringForHostPorts += strings.Replace(strings.Replace(f.String(), "HostPortRange", "HostPortRange", 1), `&`, ``, 1) + "," + } + repeatedStringForHostPorts += "}" + repeatedStringForAllowedHostPaths := "[]AllowedHostPath{" + for _, f := range this.AllowedHostPaths { + repeatedStringForAllowedHostPaths += strings.Replace(strings.Replace(f.String(), "AllowedHostPath", "AllowedHostPath", 1), `&`, ``, 1) + "," + } + repeatedStringForAllowedHostPaths += "}" + repeatedStringForAllowedFlexVolumes := "[]AllowedFlexVolume{" + for _, f := range this.AllowedFlexVolumes { + repeatedStringForAllowedFlexVolumes += strings.Replace(strings.Replace(f.String(), "AllowedFlexVolume", "AllowedFlexVolume", 1), `&`, ``, 1) + "," + } + repeatedStringForAllowedFlexVolumes += "}" + repeatedStringForAllowedCSIDrivers := "[]AllowedCSIDriver{" + for _, f := range this.AllowedCSIDrivers { + repeatedStringForAllowedCSIDrivers += strings.Replace(strings.Replace(f.String(), "AllowedCSIDriver", "AllowedCSIDriver", 1), `&`, ``, 1) + "," + } + repeatedStringForAllowedCSIDrivers += "}" s := strings.Join([]string{`&PodSecurityPolicySpec{`, `Privileged:` + fmt.Sprintf("%v", this.Privileged) + `,`, `DefaultAddCapabilities:` + fmt.Sprintf("%v", this.DefaultAddCapabilities) + `,`, @@ -1630,7 +2373,7 @@ func (this *PodSecurityPolicySpec) String() string { `AllowedCapabilities:` + fmt.Sprintf("%v", this.AllowedCapabilities) + `,`, `Volumes:` + fmt.Sprintf("%v", this.Volumes) + `,`, `HostNetwork:` + fmt.Sprintf("%v", this.HostNetwork) + `,`, - `HostPorts:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.HostPorts), "HostPortRange", "HostPortRange", 1), `&`, ``, 1) + `,`, + `HostPorts:` + repeatedStringForHostPorts + `,`, `HostPID:` + fmt.Sprintf("%v", this.HostPID) + `,`, `HostIPC:` + fmt.Sprintf("%v", this.HostIPC) + `,`, `SELinux:` + strings.Replace(strings.Replace(this.SELinux.String(), "SELinuxStrategyOptions", "SELinuxStrategyOptions", 1), `&`, ``, 1) + `,`, @@ -1640,14 +2383,14 @@ func (this *PodSecurityPolicySpec) String() string { `ReadOnlyRootFilesystem:` + fmt.Sprintf("%v", this.ReadOnlyRootFilesystem) + `,`, `DefaultAllowPrivilegeEscalation:` + valueToStringGenerated(this.DefaultAllowPrivilegeEscalation) + `,`, `AllowPrivilegeEscalation:` + valueToStringGenerated(this.AllowPrivilegeEscalation) + `,`, - `AllowedHostPaths:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedHostPaths), "AllowedHostPath", "AllowedHostPath", 1), `&`, ``, 1) + `,`, - `AllowedFlexVolumes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedFlexVolumes), "AllowedFlexVolume", "AllowedFlexVolume", 1), `&`, ``, 1) + `,`, + `AllowedHostPaths:` + repeatedStringForAllowedHostPaths + `,`, + `AllowedFlexVolumes:` + repeatedStringForAllowedFlexVolumes + `,`, `AllowedUnsafeSysctls:` + fmt.Sprintf("%v", this.AllowedUnsafeSysctls) + `,`, `ForbiddenSysctls:` + fmt.Sprintf("%v", this.ForbiddenSysctls) + `,`, `AllowedProcMountTypes:` + fmt.Sprintf("%v", this.AllowedProcMountTypes) + `,`, - `RunAsGroup:` + strings.Replace(fmt.Sprintf("%v", this.RunAsGroup), "RunAsGroupStrategyOptions", "RunAsGroupStrategyOptions", 1) + `,`, - `AllowedCSIDrivers:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedCSIDrivers), "AllowedCSIDriver", "AllowedCSIDriver", 1), `&`, ``, 1) + `,`, - `RuntimeClass:` + strings.Replace(fmt.Sprintf("%v", this.RuntimeClass), "RuntimeClassStrategyOptions", "RuntimeClassStrategyOptions", 1) + `,`, + `RunAsGroup:` + strings.Replace(this.RunAsGroup.String(), "RunAsGroupStrategyOptions", "RunAsGroupStrategyOptions", 1) + `,`, + `AllowedCSIDrivers:` + repeatedStringForAllowedCSIDrivers + `,`, + `RuntimeClass:` + strings.Replace(this.RuntimeClass.String(), "RuntimeClassStrategyOptions", "RuntimeClassStrategyOptions", 1) + `,`, `}`, }, "") return s @@ -1656,9 +2399,14 @@ func (this *RunAsGroupStrategyOptions) String() string { if this == nil { return "nil" } + repeatedStringForRanges := "[]IDRange{" + for _, f := range this.Ranges { + repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," + } + repeatedStringForRanges += "}" s := strings.Join([]string{`&RunAsGroupStrategyOptions{`, `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, + `Ranges:` + repeatedStringForRanges + `,`, `}`, }, "") return s @@ -1667,9 +2415,14 @@ func (this *RunAsUserStrategyOptions) String() string { if this == nil { return "nil" } + repeatedStringForRanges := "[]IDRange{" + for _, f := range this.Ranges { + repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," + } + repeatedStringForRanges += "}" s := strings.Join([]string{`&RunAsUserStrategyOptions{`, `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, + `Ranges:` + repeatedStringForRanges + `,`, `}`, }, "") return s @@ -1691,7 +2444,7 @@ func (this *SELinuxStrategyOptions) String() string { } s := strings.Join([]string{`&SELinuxStrategyOptions{`, `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "k8s_io_api_core_v1.SELinuxOptions", 1) + `,`, + `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "v11.SELinuxOptions", 1) + `,`, `}`, }, "") return s @@ -1700,9 +2453,14 @@ func (this *SupplementalGroupsStrategyOptions) String() string { if this == nil { return "nil" } + repeatedStringForRanges := "[]IDRange{" + for _, f := range this.Ranges { + repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," + } + repeatedStringForRanges += "}" s := strings.Join([]string{`&SupplementalGroupsStrategyOptions{`, `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, + `Ranges:` + repeatedStringForRanges + `,`, `}`, }, "") return s @@ -1730,7 +2488,7 @@ func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1758,7 +2516,7 @@ func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1768,6 +2526,9 @@ func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1782,6 +2543,9 @@ func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1809,7 +2573,7 @@ func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1837,7 +2601,7 @@ func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1847,6 +2611,9 @@ func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1861,6 +2628,9 @@ func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1888,7 +2658,7 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1916,7 +2686,7 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1926,6 +2696,9 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1945,7 +2718,7 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1960,6 +2733,9 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1987,7 +2763,7 @@ func (m *Eviction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2015,7 +2791,7 @@ func (m *Eviction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2024,6 +2800,9 @@ func (m *Eviction) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2045,7 +2824,7 @@ func (m *Eviction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2054,11 +2833,14 @@ func (m *Eviction) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.DeleteOptions == nil { - m.DeleteOptions = &k8s_io_apimachinery_pkg_apis_meta_v1.DeleteOptions{} + m.DeleteOptions = &v1.DeleteOptions{} } if err := m.DeleteOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2073,6 +2855,9 @@ func (m *Eviction) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2100,7 +2885,7 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2128,7 +2913,7 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2138,6 +2923,9 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2157,7 +2945,7 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2166,6 +2954,9 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2183,6 +2974,9 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2210,7 +3004,7 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2238,7 +3032,7 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Min |= (int32(b) & 0x7F) << shift + m.Min |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2257,7 +3051,7 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Max |= (int32(b) & 0x7F) << shift + m.Max |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2271,6 +3065,9 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2298,7 +3095,7 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2326,7 +3123,7 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Min |= (int64(b) & 0x7F) << shift + m.Min |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -2345,7 +3142,7 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Max |= (int64(b) & 0x7F) << shift + m.Max |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -2359,6 +3156,9 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2386,7 +3186,7 @@ func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2414,7 +3214,7 @@ func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2423,6 +3223,9 @@ func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2444,7 +3247,7 @@ func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2453,6 +3256,9 @@ func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2474,7 +3280,7 @@ func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2483,6 +3289,9 @@ func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2499,6 +3308,9 @@ func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2526,7 +3338,7 @@ func (m *PodDisruptionBudgetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2554,7 +3366,7 @@ func (m *PodDisruptionBudgetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2563,6 +3375,9 @@ func (m *PodDisruptionBudgetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2584,7 +3399,7 @@ func (m *PodDisruptionBudgetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2593,6 +3408,9 @@ func (m *PodDisruptionBudgetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2610,6 +3428,9 @@ func (m *PodDisruptionBudgetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2637,7 +3458,7 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2665,7 +3486,7 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2674,11 +3495,14 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MinAvailable == nil { - m.MinAvailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.MinAvailable = &intstr.IntOrString{} } if err := m.MinAvailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2698,7 +3522,7 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2707,11 +3531,14 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Selector = &v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2731,7 +3558,7 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2740,11 +3567,14 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxUnavailable == nil { - m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.MaxUnavailable = &intstr.IntOrString{} } if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2759,6 +3589,9 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2786,7 +3619,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2814,7 +3647,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -2833,7 +3666,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2842,14 +3675,17 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.DisruptedPods == nil { - m.DisruptedPods = make(map[string]k8s_io_apimachinery_pkg_apis_meta_v1.Time) + m.DisruptedPods = make(map[string]v1.Time) } var mapkey string - mapvalue := &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + mapvalue := &v1.Time{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -2862,7 +3698,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2879,7 +3715,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2889,6 +3725,9 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -2905,7 +3744,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2914,13 +3753,13 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + mapvalue = &v1.Time{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -2956,7 +3795,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.PodDisruptionsAllowed |= (int32(b) & 0x7F) << shift + m.PodDisruptionsAllowed |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2975,7 +3814,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CurrentHealthy |= (int32(b) & 0x7F) << shift + m.CurrentHealthy |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -2994,7 +3833,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.DesiredHealthy |= (int32(b) & 0x7F) << shift + m.DesiredHealthy |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3013,7 +3852,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ExpectedPods |= (int32(b) & 0x7F) << shift + m.ExpectedPods |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3027,6 +3866,9 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3054,7 +3896,7 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3082,7 +3924,7 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3091,6 +3933,9 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3112,7 +3957,7 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3121,6 +3966,9 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3137,6 +3985,9 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3164,7 +4015,7 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3192,7 +4043,7 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3201,6 +4052,9 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3222,7 +4076,7 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3231,6 +4085,9 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3248,6 +4105,9 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3275,7 +4135,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3303,7 +4163,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3323,7 +4183,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3333,6 +4193,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3352,7 +4215,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3362,6 +4225,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3381,7 +4247,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3391,6 +4257,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3410,7 +4279,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3420,6 +4289,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3439,7 +4311,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3459,7 +4331,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3468,6 +4340,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3490,7 +4365,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3510,7 +4385,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3530,7 +4405,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3539,6 +4414,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3560,7 +4438,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3569,6 +4447,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3590,7 +4471,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3599,6 +4480,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3620,7 +4504,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3629,6 +4513,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3650,7 +4537,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3670,7 +4557,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3691,7 +4578,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3712,7 +4599,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3721,6 +4608,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3743,7 +4633,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3752,6 +4642,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3774,7 +4667,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3784,6 +4677,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3803,7 +4699,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3813,6 +4709,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3832,7 +4731,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3842,6 +4741,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3861,7 +4763,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3870,6 +4772,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3894,7 +4799,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3903,6 +4808,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3925,7 +4833,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3934,6 +4842,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3953,6 +4864,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3980,7 +4894,7 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4008,7 +4922,7 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4018,6 +4932,9 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4037,7 +4954,7 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4046,6 +4963,9 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4063,6 +4983,9 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4090,7 +5013,7 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4118,7 +5041,7 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4128,6 +5051,9 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4147,7 +5073,7 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4156,6 +5082,9 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4173,6 +5102,9 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4200,7 +5132,7 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4228,7 +5160,7 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4238,6 +5170,9 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4257,7 +5192,7 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4267,6 +5202,9 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4282,6 +5220,9 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4309,7 +5250,7 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4337,7 +5278,7 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4347,6 +5288,9 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4366,7 +5310,7 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4375,11 +5319,14 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.SELinuxOptions == nil { - m.SELinuxOptions = &k8s_io_api_core_v1.SELinuxOptions{} + m.SELinuxOptions = &v11.SELinuxOptions{} } if err := m.SELinuxOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -4394,6 +5341,9 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4421,7 +5371,7 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4449,7 +5399,7 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4459,6 +5409,9 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4478,7 +5431,7 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4487,6 +5440,9 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4504,6 +5460,9 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4570,10 +5529,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -4602,6 +5564,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -4620,129 +5585,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/policy/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 1886 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xdd, 0x8e, 0xdb, 0xc6, - 0x15, 0x5e, 0x5a, 0xfb, 0xa3, 0x9d, 0xfd, 0xf1, 0x6a, 0xf6, 0xc7, 0xf4, 0xa6, 0x16, 0x1d, 0x06, - 0x28, 0xdc, 0x34, 0xa1, 0xe2, 0xb5, 0xe3, 0x1a, 0x4d, 0x5b, 0x64, 0xb9, 0xda, 0xb5, 0x37, 0xf0, - 0x7a, 0xd5, 0x91, 0x1d, 0xb4, 0x85, 0x5b, 0x74, 0x24, 0xce, 0x6a, 0x99, 0xa5, 0x48, 0x76, 0x66, - 0xa8, 0xac, 0xee, 0x7a, 0xd1, 0x8b, 0x5e, 0xf6, 0x05, 0x82, 0x3e, 0x40, 0xd1, 0xab, 0xbe, 0x84, - 0x03, 0x14, 0x41, 0x2e, 0x83, 0x5e, 0x08, 0xb5, 0x8a, 0xbe, 0x84, 0xaf, 0x02, 0x8e, 0x86, 0x94, - 0xf8, 0x27, 0xd9, 0x01, 0xec, 0x3b, 0x72, 0xce, 0xf7, 0x7d, 0x67, 0xe6, 0x9c, 0x99, 0x33, 0x87, - 0x04, 0xe6, 0xc5, 0x7d, 0x66, 0xd8, 0x5e, 0xed, 0x22, 0x68, 0x11, 0xea, 0x12, 0x4e, 0x58, 0xad, - 0x47, 0x5c, 0xcb, 0xa3, 0x35, 0x69, 0xc0, 0xbe, 0x5d, 0xf3, 0x3d, 0xc7, 0x6e, 0xf7, 0x6b, 0xbd, - 0xdb, 0x2d, 0xc2, 0xf1, 0xed, 0x5a, 0x87, 0xb8, 0x84, 0x62, 0x4e, 0x2c, 0xc3, 0xa7, 0x1e, 0xf7, - 0xe0, 0xf5, 0x11, 0xd4, 0xc0, 0xbe, 0x6d, 0x8c, 0xa0, 0x86, 0x84, 0xee, 0x7e, 0xd8, 0xb1, 0xf9, - 0x79, 0xd0, 0x32, 0xda, 0x5e, 0xb7, 0xd6, 0xf1, 0x3a, 0x5e, 0x4d, 0x30, 0x5a, 0xc1, 0x99, 0x78, - 0x13, 0x2f, 0xe2, 0x69, 0xa4, 0xb4, 0xab, 0x4f, 0x38, 0x6d, 0x7b, 0x94, 0xd4, 0x7a, 0x19, 0x6f, - 0xbb, 0x77, 0xc7, 0x98, 0x2e, 0x6e, 0x9f, 0xdb, 0x2e, 0xa1, 0xfd, 0x9a, 0x7f, 0xd1, 0x09, 0x07, - 0x58, 0xad, 0x4b, 0x38, 0xce, 0x63, 0xd5, 0x8a, 0x58, 0x34, 0x70, 0xb9, 0xdd, 0x25, 0x19, 0xc2, - 0xbd, 0x59, 0x04, 0xd6, 0x3e, 0x27, 0x5d, 0x9c, 0xe1, 0xdd, 0x29, 0xe2, 0x05, 0xdc, 0x76, 0x6a, - 0xb6, 0xcb, 0x19, 0xa7, 0x69, 0x92, 0x7e, 0x17, 0x6c, 0xec, 0x3b, 0x8e, 0xf7, 0x25, 0xb1, 0x0e, - 0x9a, 0xc7, 0x75, 0x6a, 0xf7, 0x08, 0x85, 0x37, 0xc1, 0xbc, 0x8b, 0xbb, 0x44, 0x55, 0x6e, 0x2a, - 0xb7, 0x96, 0xcd, 0xd5, 0xe7, 0x03, 0x6d, 0x6e, 0x38, 0xd0, 0xe6, 0x1f, 0xe3, 0x2e, 0x41, 0xc2, - 0xa2, 0x7f, 0x02, 0x2a, 0x92, 0x75, 0xe4, 0x90, 0xcb, 0xcf, 0x3d, 0x27, 0xe8, 0x12, 0xf8, 0x63, - 0xb0, 0x68, 0x09, 0x01, 0x49, 0x5c, 0x97, 0xc4, 0xc5, 0x91, 0x2c, 0x92, 0x56, 0x9d, 0x81, 0xab, - 0x92, 0xfc, 0xd0, 0x63, 0xbc, 0x81, 0xf9, 0x39, 0xdc, 0x03, 0xc0, 0xc7, 0xfc, 0xbc, 0x41, 0xc9, - 0x99, 0x7d, 0x29, 0xe9, 0x50, 0xd2, 0x41, 0x23, 0xb6, 0xa0, 0x09, 0x14, 0xfc, 0x00, 0x94, 0x29, - 0xc1, 0xd6, 0xa9, 0xeb, 0xf4, 0xd5, 0x2b, 0x37, 0x95, 0x5b, 0x65, 0x73, 0x43, 0x32, 0xca, 0x48, - 0x8e, 0xa3, 0x18, 0xa1, 0xff, 0x47, 0x01, 0xe5, 0xc3, 0x9e, 0xdd, 0xe6, 0xb6, 0xe7, 0xc2, 0x3f, - 0x82, 0x72, 0x98, 0x2d, 0x0b, 0x73, 0x2c, 0x9c, 0xad, 0xec, 0x7d, 0x64, 0x8c, 0x77, 0x52, 0x1c, - 0x3c, 0xc3, 0xbf, 0xe8, 0x84, 0x03, 0xcc, 0x08, 0xd1, 0x46, 0xef, 0xb6, 0x71, 0xda, 0xfa, 0x82, - 0xb4, 0xf9, 0x09, 0xe1, 0x78, 0x3c, 0xbd, 0xf1, 0x18, 0x8a, 0x55, 0xa1, 0x03, 0xd6, 0x2c, 0xe2, - 0x10, 0x4e, 0x4e, 0xfd, 0xd0, 0x23, 0x13, 0x33, 0x5c, 0xd9, 0xbb, 0xf3, 0x6a, 0x6e, 0xea, 0x93, - 0x54, 0xb3, 0x32, 0x1c, 0x68, 0x6b, 0x89, 0x21, 0x94, 0x14, 0xd7, 0xbf, 0x52, 0xc0, 0xce, 0x51, - 0xf3, 0x01, 0xf5, 0x02, 0xbf, 0xc9, 0xc3, 0xec, 0x76, 0xfa, 0xd2, 0x04, 0x7f, 0x06, 0xe6, 0x69, - 0xe0, 0x44, 0xb9, 0x7c, 0x2f, 0xca, 0x25, 0x0a, 0x1c, 0xf2, 0x72, 0xa0, 0x6d, 0xa6, 0x58, 0x4f, - 0xfa, 0x3e, 0x41, 0x82, 0x00, 0x3f, 0x03, 0x8b, 0x14, 0xbb, 0x1d, 0x12, 0x4e, 0xbd, 0x74, 0x6b, - 0x65, 0x4f, 0x37, 0x0a, 0xcf, 0x9a, 0x71, 0x5c, 0x47, 0x21, 0x74, 0x9c, 0x71, 0xf1, 0xca, 0x90, - 0x54, 0xd0, 0x4f, 0xc0, 0x9a, 0x48, 0xb5, 0x47, 0xb9, 0xb0, 0xc0, 0x1b, 0xa0, 0xd4, 0xb5, 0x5d, - 0x31, 0xa9, 0x05, 0x73, 0x45, 0xb2, 0x4a, 0x27, 0xb6, 0x8b, 0xc2, 0x71, 0x61, 0xc6, 0x97, 0x22, - 0x66, 0x93, 0x66, 0x7c, 0x89, 0xc2, 0x71, 0xfd, 0x01, 0x58, 0x92, 0x1e, 0x27, 0x85, 0x4a, 0xd3, - 0x85, 0x4a, 0x39, 0x42, 0xff, 0xb8, 0x02, 0x36, 0x1b, 0x9e, 0x55, 0xb7, 0x19, 0x0d, 0x44, 0xbc, - 0xcc, 0xc0, 0xea, 0x10, 0xfe, 0x16, 0xf6, 0xc7, 0x13, 0x30, 0xcf, 0x7c, 0xd2, 0x96, 0xdb, 0x62, - 0x6f, 0x4a, 0x6c, 0x73, 0xe6, 0xd7, 0xf4, 0x49, 0x7b, 0x7c, 0x2c, 0xc3, 0x37, 0x24, 0xd4, 0xe0, - 0x33, 0xb0, 0xc8, 0x38, 0xe6, 0x01, 0x53, 0x4b, 0x42, 0xf7, 0xee, 0x6b, 0xea, 0x0a, 0xee, 0x38, - 0x8b, 0xa3, 0x77, 0x24, 0x35, 0xf5, 0x7f, 0x2b, 0xe0, 0x5a, 0x0e, 0xeb, 0x91, 0xcd, 0x38, 0x7c, - 0x96, 0x89, 0x98, 0xf1, 0x6a, 0x11, 0x0b, 0xd9, 0x22, 0x5e, 0xf1, 0xe1, 0x8d, 0x46, 0x26, 0xa2, - 0xd5, 0x04, 0x0b, 0x36, 0x27, 0xdd, 0x68, 0x2b, 0x1a, 0xaf, 0xb7, 0x2c, 0x73, 0x4d, 0x4a, 0x2f, - 0x1c, 0x87, 0x22, 0x68, 0xa4, 0xa5, 0x7f, 0x73, 0x25, 0x77, 0x39, 0x61, 0x38, 0xe1, 0x19, 0x58, - 0xed, 0xda, 0xee, 0x7e, 0x0f, 0xdb, 0x0e, 0x6e, 0xc9, 0xd3, 0x33, 0x6d, 0x13, 0x84, 0x15, 0xd6, - 0x18, 0x55, 0x58, 0xe3, 0xd8, 0xe5, 0xa7, 0xb4, 0xc9, 0xa9, 0xed, 0x76, 0xcc, 0x8d, 0xe1, 0x40, - 0x5b, 0x3d, 0x99, 0x50, 0x42, 0x09, 0x5d, 0xf8, 0x7b, 0x50, 0x66, 0xc4, 0x21, 0x6d, 0xee, 0xd1, - 0xd7, 0xab, 0x10, 0x8f, 0x70, 0x8b, 0x38, 0x4d, 0x49, 0x35, 0x57, 0xc3, 0xb8, 0x45, 0x6f, 0x28, - 0x96, 0x84, 0x0e, 0x58, 0xef, 0xe2, 0xcb, 0xa7, 0x2e, 0x8e, 0x17, 0x52, 0xfa, 0x81, 0x0b, 0x81, - 0xc3, 0x81, 0xb6, 0x7e, 0x92, 0xd0, 0x42, 0x29, 0x6d, 0xfd, 0xff, 0xf3, 0xe0, 0x7a, 0xe1, 0xae, - 0x82, 0x9f, 0x01, 0xe8, 0xb5, 0x18, 0xa1, 0x3d, 0x62, 0x3d, 0x18, 0xdd, 0x41, 0xb6, 0x17, 0x1d, - 0xdc, 0x5d, 0x99, 0x20, 0x78, 0x9a, 0x41, 0xa0, 0x1c, 0x16, 0xfc, 0x8b, 0x02, 0xd6, 0xac, 0x91, - 0x1b, 0x62, 0x35, 0x3c, 0x2b, 0xda, 0x18, 0x0f, 0x7e, 0xc8, 0x7e, 0x37, 0xea, 0x93, 0x4a, 0x87, - 0x2e, 0xa7, 0x7d, 0x73, 0x5b, 0x4e, 0x68, 0x2d, 0x61, 0x43, 0x49, 0xa7, 0xf0, 0x04, 0x40, 0x2b, - 0x96, 0x64, 0xf2, 0x4e, 0x13, 0x21, 0x5e, 0x30, 0x6f, 0x48, 0x85, 0xed, 0x84, 0xdf, 0x08, 0x84, - 0x72, 0x88, 0xf0, 0x57, 0x60, 0xbd, 0x1d, 0x50, 0x4a, 0x5c, 0xfe, 0x90, 0x60, 0x87, 0x9f, 0xf7, - 0xd5, 0x79, 0x21, 0xb5, 0x23, 0xa5, 0xd6, 0x0f, 0x12, 0x56, 0x94, 0x42, 0x87, 0x7c, 0x8b, 0x30, - 0x9b, 0x12, 0x2b, 0xe2, 0x2f, 0x24, 0xf9, 0xf5, 0x84, 0x15, 0xa5, 0xd0, 0xf0, 0x3e, 0x58, 0x25, - 0x97, 0x3e, 0x69, 0x47, 0x31, 0x5d, 0x14, 0xec, 0x2d, 0xc9, 0x5e, 0x3d, 0x9c, 0xb0, 0xa1, 0x04, - 0x72, 0xd7, 0x01, 0x30, 0x1b, 0x44, 0xb8, 0x01, 0x4a, 0x17, 0xa4, 0x3f, 0xba, 0x79, 0x50, 0xf8, - 0x08, 0x3f, 0x05, 0x0b, 0x3d, 0xec, 0x04, 0x44, 0xee, 0xf5, 0xf7, 0x5f, 0x6d, 0xaf, 0x3f, 0xb1, - 0xbb, 0x04, 0x8d, 0x88, 0x3f, 0xbf, 0x72, 0x5f, 0xd1, 0xbf, 0x56, 0x40, 0xa5, 0xe1, 0x59, 0x4d, - 0xd2, 0x0e, 0xa8, 0xcd, 0xfb, 0x0d, 0x91, 0xe7, 0xb7, 0x50, 0xb3, 0x51, 0xa2, 0x66, 0x7f, 0x34, - 0x7d, 0xaf, 0x25, 0x67, 0x57, 0x54, 0xb1, 0xf5, 0xe7, 0x0a, 0xd8, 0xce, 0xa0, 0xdf, 0x42, 0x45, - 0xfd, 0x75, 0xb2, 0xa2, 0x7e, 0xf0, 0x3a, 0x8b, 0x29, 0xa8, 0xa7, 0x5f, 0x57, 0x72, 0x96, 0x22, - 0xaa, 0x69, 0xd8, 0xdd, 0x51, 0xbb, 0x67, 0x3b, 0xa4, 0x43, 0x2c, 0xb1, 0x98, 0xf2, 0x44, 0x77, - 0x17, 0x5b, 0xd0, 0x04, 0x0a, 0x32, 0xb0, 0x63, 0x91, 0x33, 0x1c, 0x38, 0x7c, 0xdf, 0xb2, 0x0e, - 0xb0, 0x8f, 0x5b, 0xb6, 0x63, 0x73, 0x5b, 0xb6, 0x23, 0xcb, 0xe6, 0x27, 0xc3, 0x81, 0xb6, 0x53, - 0xcf, 0x45, 0xbc, 0x1c, 0x68, 0x37, 0xb2, 0xdd, 0xbc, 0x11, 0x43, 0xfa, 0xa8, 0x40, 0x1a, 0xf6, - 0x81, 0x4a, 0xc9, 0x9f, 0x82, 0xf0, 0x50, 0xd4, 0xa9, 0xe7, 0x27, 0xdc, 0x96, 0x84, 0xdb, 0x5f, - 0x0e, 0x07, 0x9a, 0x8a, 0x0a, 0x30, 0xb3, 0x1d, 0x17, 0xca, 0xc3, 0x2f, 0xc0, 0x26, 0x96, 0x7d, - 0xf8, 0xa4, 0xd7, 0x79, 0xe1, 0xf5, 0xfe, 0x70, 0xa0, 0x6d, 0xee, 0x67, 0xcd, 0xb3, 0x1d, 0xe6, - 0x89, 0xc2, 0x1a, 0x58, 0xea, 0x89, 0x96, 0x9d, 0xa9, 0x0b, 0x42, 0x7f, 0x7b, 0x38, 0xd0, 0x96, - 0x46, 0x5d, 0x7c, 0xa8, 0xb9, 0x78, 0xd4, 0x14, 0x8d, 0x60, 0x84, 0x82, 0x1f, 0x83, 0x95, 0x73, - 0x8f, 0xf1, 0xc7, 0x84, 0x7f, 0xe9, 0xd1, 0x0b, 0x51, 0x18, 0xca, 0xe6, 0xa6, 0xcc, 0xe0, 0xca, - 0xc3, 0xb1, 0x09, 0x4d, 0xe2, 0xe0, 0x6f, 0xc1, 0xf2, 0xb9, 0x6c, 0xfb, 0x98, 0xba, 0x24, 0x36, - 0xda, 0xad, 0x29, 0x1b, 0x2d, 0xd1, 0x22, 0x9a, 0x15, 0x29, 0xbf, 0x1c, 0x0d, 0x33, 0x34, 0x56, - 0x83, 0x3f, 0x01, 0x4b, 0xe2, 0xe5, 0xb8, 0xae, 0x96, 0xc5, 0x6c, 0xae, 0x4a, 0xf8, 0xd2, 0xc3, - 0xd1, 0x30, 0x8a, 0xec, 0x11, 0xf4, 0xb8, 0x71, 0xa0, 0x2e, 0x67, 0xa1, 0xc7, 0x8d, 0x03, 0x14, - 0xd9, 0xe1, 0x33, 0xb0, 0xc4, 0xc8, 0x23, 0xdb, 0x0d, 0x2e, 0x55, 0x20, 0x8e, 0xdc, 0xed, 0x29, - 0xd3, 0x6d, 0x1e, 0x0a, 0x64, 0xaa, 0xe1, 0x1e, 0xab, 0x4b, 0x3b, 0x8a, 0x24, 0xa1, 0x05, 0x96, - 0x69, 0xe0, 0xee, 0xb3, 0xa7, 0x8c, 0x50, 0x75, 0x25, 0x73, 0xdb, 0xa7, 0xf5, 0x51, 0x84, 0x4d, - 0x7b, 0x88, 0x23, 0x13, 0x23, 0xd0, 0x58, 0x18, 0xfe, 0x55, 0x01, 0x90, 0x05, 0xbe, 0xef, 0x90, - 0x2e, 0x71, 0x39, 0x76, 0x44, 0x7f, 0xcf, 0xd4, 0x55, 0xe1, 0xef, 0x17, 0xd3, 0xd6, 0x93, 0x21, - 0xa5, 0x1d, 0xc7, 0xd7, 0x74, 0x16, 0x8a, 0x72, 0x7c, 0x86, 0xe1, 0x3c, 0x63, 0xe2, 0x59, 0x5d, - 0x9b, 0x19, 0xce, 0xfc, 0xef, 0x97, 0x71, 0x38, 0xa5, 0x1d, 0x45, 0x92, 0xf0, 0x73, 0xb0, 0x13, - 0x7d, 0xdd, 0x21, 0xcf, 0xe3, 0x47, 0xb6, 0x43, 0x58, 0x9f, 0x71, 0xd2, 0x55, 0xd7, 0x45, 0x9a, - 0xab, 0x92, 0xb9, 0x83, 0x72, 0x51, 0xa8, 0x80, 0x0d, 0xbb, 0x40, 0x8b, 0xca, 0x43, 0x78, 0x76, - 0xe2, 0xfa, 0x74, 0xc8, 0xda, 0xd8, 0x19, 0x75, 0x2d, 0x57, 0x85, 0x83, 0xf7, 0x86, 0x03, 0x4d, - 0xab, 0x4f, 0x87, 0xa2, 0x59, 0x5a, 0xf0, 0x37, 0x40, 0xc5, 0x45, 0x7e, 0x36, 0x84, 0x9f, 0x1f, - 0x85, 0x35, 0xa7, 0xd0, 0x41, 0x21, 0x1b, 0xfa, 0x60, 0x03, 0x27, 0xbf, 0xb3, 0x99, 0x5a, 0x11, - 0xa7, 0xf0, 0xfd, 0x29, 0x79, 0x48, 0x7d, 0x9a, 0x9b, 0xaa, 0x0c, 0xe3, 0x46, 0xca, 0xc0, 0x50, - 0x46, 0x1d, 0x5e, 0x02, 0x88, 0xd3, 0xbf, 0x05, 0x98, 0x0a, 0x67, 0x5e, 0x31, 0x99, 0x7f, 0x09, - 0xe3, 0xad, 0x96, 0x31, 0x31, 0x94, 0xe3, 0x03, 0x3e, 0x02, 0x5b, 0x72, 0xf4, 0xa9, 0xcb, 0xf0, - 0x19, 0x69, 0xf6, 0x59, 0x9b, 0x3b, 0x4c, 0xdd, 0x14, 0xf5, 0x4d, 0x1d, 0x0e, 0xb4, 0xad, 0xfd, - 0x1c, 0x3b, 0xca, 0x65, 0xc1, 0x4f, 0xc1, 0xc6, 0x99, 0x47, 0x5b, 0xb6, 0x65, 0x11, 0x37, 0x52, - 0xda, 0x12, 0x4a, 0x5b, 0x61, 0x24, 0x8e, 0x52, 0x36, 0x94, 0x41, 0x43, 0x06, 0xb6, 0xa5, 0x72, - 0x83, 0x7a, 0xed, 0x13, 0x2f, 0x70, 0x79, 0x58, 0x52, 0x99, 0xba, 0x1d, 0x5f, 0x23, 0xdb, 0xfb, - 0x79, 0x80, 0x97, 0x03, 0xed, 0x66, 0x4e, 0x49, 0x4f, 0x80, 0x50, 0xbe, 0x36, 0xb4, 0x00, 0x10, - 0x75, 0x60, 0x74, 0xe4, 0x76, 0x66, 0x7e, 0x02, 0xa2, 0x18, 0x9c, 0x3e, 0x75, 0xeb, 0xe1, 0xcd, - 0x3c, 0x36, 0xa3, 0x09, 0x5d, 0xc8, 0x41, 0x05, 0xa7, 0xfe, 0x18, 0x31, 0xf5, 0x9a, 0xc8, 0xf1, - 0x4f, 0x67, 0xe7, 0x38, 0xe6, 0x98, 0xd7, 0x65, 0x8a, 0x2b, 0x69, 0x0b, 0x43, 0x59, 0x07, 0xd0, - 0x01, 0xab, 0xf2, 0xf7, 0xd7, 0x81, 0x83, 0x19, 0x53, 0x55, 0xb1, 0xba, 0x7b, 0xd3, 0x57, 0x17, - 0xc3, 0xd3, 0xeb, 0x13, 0xdf, 0x65, 0x93, 0x00, 0x94, 0x50, 0xd7, 0xff, 0xae, 0x80, 0xeb, 0x85, - 0xd1, 0x81, 0xf7, 0x12, 0xff, 0x54, 0xf4, 0xd4, 0x3f, 0x15, 0x98, 0x25, 0xbe, 0x81, 0x5f, 0x2a, - 0x5f, 0x29, 0x40, 0x2d, 0xba, 0x21, 0xe0, 0xc7, 0x89, 0x09, 0xbe, 0x9b, 0x9a, 0x60, 0x25, 0xc3, - 0x7b, 0x03, 0xf3, 0xfb, 0x46, 0x01, 0xef, 0x4c, 0xc9, 0x40, 0x5c, 0xf6, 0x88, 0x35, 0x89, 0x7a, - 0x8c, 0xc3, 0x82, 0xa1, 0x88, 0x33, 0x32, 0x2e, 0x7b, 0x39, 0x18, 0x54, 0xc8, 0x86, 0x4f, 0xc1, - 0x35, 0x59, 0x73, 0xd3, 0x36, 0xd1, 0xb9, 0x2f, 0x9b, 0xef, 0x0c, 0x07, 0xda, 0xb5, 0x7a, 0x3e, - 0x04, 0x15, 0x71, 0xf5, 0x7f, 0x2a, 0x60, 0x27, 0xff, 0xca, 0x87, 0x77, 0x12, 0xe1, 0xd6, 0x52, - 0xe1, 0xbe, 0x9a, 0x62, 0xc9, 0x60, 0xff, 0x01, 0xac, 0xcb, 0xc6, 0x20, 0xf9, 0x8b, 0x30, 0x11, - 0xf4, 0xf0, 0xf8, 0x87, 0x3d, 0xbd, 0x94, 0x88, 0xb6, 0xaf, 0xf8, 0x1a, 0x4f, 0x8e, 0xa1, 0x94, - 0x9a, 0xfe, 0x2f, 0x05, 0xbc, 0x3b, 0xf3, 0x4a, 0x87, 0x66, 0x62, 0xea, 0x46, 0x6a, 0xea, 0xd5, - 0x62, 0x81, 0x37, 0xf3, 0xa7, 0xd0, 0xfc, 0xf0, 0xf9, 0x8b, 0xea, 0xdc, 0xb7, 0x2f, 0xaa, 0x73, - 0xdf, 0xbd, 0xa8, 0xce, 0xfd, 0x79, 0x58, 0x55, 0x9e, 0x0f, 0xab, 0xca, 0xb7, 0xc3, 0xaa, 0xf2, - 0xdd, 0xb0, 0xaa, 0xfc, 0x77, 0x58, 0x55, 0xfe, 0xf6, 0xbf, 0xea, 0xdc, 0xef, 0x96, 0xa4, 0xdc, - 0xf7, 0x01, 0x00, 0x00, 0xff, 0xff, 0x56, 0x4d, 0xc9, 0x62, 0x44, 0x18, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.proto b/vendor/k8s.io/api/policy/v1beta1/generated.proto index a1173a61c6f85..7be0a7f7a40cb 100644 --- a/vendor/k8s.io/api/policy/v1beta1/generated.proto +++ b/vendor/k8s.io/api/policy/v1beta1/generated.proto @@ -186,7 +186,7 @@ message PodDisruptionBudgetStatus { // that will be applied to a pod and container. message PodSecurityPolicy { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -198,7 +198,7 @@ message PodSecurityPolicy { // PodSecurityPolicyList is a list of PodSecurityPolicy objects. message PodSecurityPolicyList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; diff --git a/vendor/k8s.io/api/policy/v1beta1/types.go b/vendor/k8s.io/api/policy/v1beta1/types.go index a59df9840d546..b3001bb5c0dd9 100644 --- a/vendor/k8s.io/api/policy/v1beta1/types.go +++ b/vendor/k8s.io/api/policy/v1beta1/types.go @@ -134,7 +134,7 @@ type Eviction struct { type PodSecurityPolicy struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -480,7 +480,7 @@ const AllowAllRuntimeClassNames = "*" type PodSecurityPolicyList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go index eb2eec9333f1c..0bbc48f9f8ad3 100644 --- a/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go @@ -140,7 +140,7 @@ func (PodDisruptionBudgetStatus) SwaggerDoc() map[string]string { var map_PodSecurityPolicy = map[string]string{ "": "PodSecurityPolicy governs the ability to make requests that affect the Security Context that will be applied to a pod and container.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "spec defines the policy enforced.", } @@ -150,7 +150,7 @@ func (PodSecurityPolicy) SwaggerDoc() map[string]string { var map_PodSecurityPolicyList = map[string]string{ "": "PodSecurityPolicyList is a list of PodSecurityPolicy objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "items is a list of schema objects.", } diff --git a/vendor/k8s.io/api/rbac/v1/generated.pb.go b/vendor/k8s.io/api/rbac/v1/generated.pb.go index 3236cee884dea..9bb48fc31235f 100644 --- a/vendor/k8s.io/api/rbac/v1/generated.pb.go +++ b/vendor/k8s.io/api/rbac/v1/generated.pb.go @@ -17,42 +17,20 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1/generated.proto -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1/generated.proto - - It has these top-level messages: - AggregationRule - ClusterRole - ClusterRoleBinding - ClusterRoleBindingList - ClusterRoleList - PolicyRule - Role - RoleBinding - RoleBindingList - RoleList - RoleRef - Subject -*/ package v1 import ( fmt "fmt" + io "io" + proto "github.com/gogo/protobuf/proto" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" math "math" - - k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - strings "strings" - + math_bits "math/bits" reflect "reflect" - - io "io" + strings "strings" ) // Reference imports to suppress errors if they are not otherwise used. @@ -66,53 +44,341 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *AggregationRule) Reset() { *m = AggregationRule{} } -func (*AggregationRule) ProtoMessage() {} -func (*AggregationRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *AggregationRule) Reset() { *m = AggregationRule{} } +func (*AggregationRule) ProtoMessage() {} +func (*AggregationRule) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{0} +} +func (m *AggregationRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AggregationRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AggregationRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_AggregationRule.Merge(m, src) +} +func (m *AggregationRule) XXX_Size() int { + return m.Size() +} +func (m *AggregationRule) XXX_DiscardUnknown() { + xxx_messageInfo_AggregationRule.DiscardUnknown(m) +} + +var xxx_messageInfo_AggregationRule proto.InternalMessageInfo + +func (m *ClusterRole) Reset() { *m = ClusterRole{} } +func (*ClusterRole) ProtoMessage() {} +func (*ClusterRole) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{1} +} +func (m *ClusterRole) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRole) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRole) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRole.Merge(m, src) +} +func (m *ClusterRole) XXX_Size() int { + return m.Size() +} +func (m *ClusterRole) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRole.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterRole proto.InternalMessageInfo + +func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } +func (*ClusterRoleBinding) ProtoMessage() {} +func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{2} +} +func (m *ClusterRoleBinding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleBinding) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleBinding.Merge(m, src) +} +func (m *ClusterRoleBinding) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleBinding) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleBinding.DiscardUnknown(m) +} -func (m *ClusterRole) Reset() { *m = ClusterRole{} } -func (*ClusterRole) ProtoMessage() {} -func (*ClusterRole) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_ClusterRoleBinding proto.InternalMessageInfo -func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } -func (*ClusterRoleBinding) ProtoMessage() {} -func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } +func (*ClusterRoleBindingList) ProtoMessage() {} +func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{3} +} +func (m *ClusterRoleBindingList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleBindingList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleBindingList.Merge(m, src) +} +func (m *ClusterRoleBindingList) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleBindingList) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleBindingList.DiscardUnknown(m) +} -func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } -func (*ClusterRoleBindingList) ProtoMessage() {} -func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_ClusterRoleBindingList proto.InternalMessageInfo -func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } -func (*ClusterRoleList) ProtoMessage() {} -func (*ClusterRoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } +func (*ClusterRoleList) ProtoMessage() {} +func (*ClusterRoleList) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{4} +} +func (m *ClusterRoleList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleList.Merge(m, src) +} +func (m *ClusterRoleList) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleList) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleList.DiscardUnknown(m) +} -func (m *PolicyRule) Reset() { *m = PolicyRule{} } -func (*PolicyRule) ProtoMessage() {} -func (*PolicyRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_ClusterRoleList proto.InternalMessageInfo -func (m *Role) Reset() { *m = Role{} } -func (*Role) ProtoMessage() {} -func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (m *PolicyRule) Reset() { *m = PolicyRule{} } +func (*PolicyRule) ProtoMessage() {} +func (*PolicyRule) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{5} +} +func (m *PolicyRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PolicyRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_PolicyRule.Merge(m, src) +} +func (m *PolicyRule) XXX_Size() int { + return m.Size() +} +func (m *PolicyRule) XXX_DiscardUnknown() { + xxx_messageInfo_PolicyRule.DiscardUnknown(m) +} -func (m *RoleBinding) Reset() { *m = RoleBinding{} } -func (*RoleBinding) ProtoMessage() {} -func (*RoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_PolicyRule proto.InternalMessageInfo -func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } -func (*RoleBindingList) ProtoMessage() {} -func (*RoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (m *Role) Reset() { *m = Role{} } +func (*Role) ProtoMessage() {} +func (*Role) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{6} +} +func (m *Role) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Role) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Role) XXX_Merge(src proto.Message) { + xxx_messageInfo_Role.Merge(m, src) +} +func (m *Role) XXX_Size() int { + return m.Size() +} +func (m *Role) XXX_DiscardUnknown() { + xxx_messageInfo_Role.DiscardUnknown(m) +} -func (m *RoleList) Reset() { *m = RoleList{} } -func (*RoleList) ProtoMessage() {} -func (*RoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +var xxx_messageInfo_Role proto.InternalMessageInfo -func (m *RoleRef) Reset() { *m = RoleRef{} } -func (*RoleRef) ProtoMessage() {} -func (*RoleRef) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (m *RoleBinding) Reset() { *m = RoleBinding{} } +func (*RoleBinding) ProtoMessage() {} +func (*RoleBinding) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{7} +} +func (m *RoleBinding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleBinding) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleBinding.Merge(m, src) +} +func (m *RoleBinding) XXX_Size() int { + return m.Size() +} +func (m *RoleBinding) XXX_DiscardUnknown() { + xxx_messageInfo_RoleBinding.DiscardUnknown(m) +} -func (m *Subject) Reset() { *m = Subject{} } -func (*Subject) ProtoMessage() {} -func (*Subject) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +var xxx_messageInfo_RoleBinding proto.InternalMessageInfo + +func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } +func (*RoleBindingList) ProtoMessage() {} +func (*RoleBindingList) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{8} +} +func (m *RoleBindingList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleBindingList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleBindingList.Merge(m, src) +} +func (m *RoleBindingList) XXX_Size() int { + return m.Size() +} +func (m *RoleBindingList) XXX_DiscardUnknown() { + xxx_messageInfo_RoleBindingList.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleBindingList proto.InternalMessageInfo + +func (m *RoleList) Reset() { *m = RoleList{} } +func (*RoleList) ProtoMessage() {} +func (*RoleList) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{9} +} +func (m *RoleList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleList.Merge(m, src) +} +func (m *RoleList) XXX_Size() int { + return m.Size() +} +func (m *RoleList) XXX_DiscardUnknown() { + xxx_messageInfo_RoleList.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleList proto.InternalMessageInfo + +func (m *RoleRef) Reset() { *m = RoleRef{} } +func (*RoleRef) ProtoMessage() {} +func (*RoleRef) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{10} +} +func (m *RoleRef) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleRef) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleRef.Merge(m, src) +} +func (m *RoleRef) XXX_Size() int { + return m.Size() +} +func (m *RoleRef) XXX_DiscardUnknown() { + xxx_messageInfo_RoleRef.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleRef proto.InternalMessageInfo + +func (m *Subject) Reset() { *m = Subject{} } +func (*Subject) ProtoMessage() {} +func (*Subject) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{11} +} +func (m *Subject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Subject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Subject) XXX_Merge(src proto.Message) { + xxx_messageInfo_Subject.Merge(m, src) +} +func (m *Subject) XXX_Size() int { + return m.Size() +} +func (m *Subject) XXX_DiscardUnknown() { + xxx_messageInfo_Subject.DiscardUnknown(m) +} + +var xxx_messageInfo_Subject proto.InternalMessageInfo func init() { proto.RegisterType((*AggregationRule)(nil), "k8s.io.api.rbac.v1.AggregationRule") @@ -128,10 +394,70 @@ func init() { proto.RegisterType((*RoleRef)(nil), "k8s.io.api.rbac.v1.RoleRef") proto.RegisterType((*Subject)(nil), "k8s.io.api.rbac.v1.Subject") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1/generated.proto", fileDescriptor_979ffd7b30c07419) +} + +var fileDescriptor_979ffd7b30c07419 = []byte{ + // 807 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x55, 0xcf, 0x6f, 0xe3, 0x44, + 0x14, 0xce, 0xa4, 0x89, 0x1a, 0x4f, 0x88, 0x42, 0x87, 0x0a, 0x59, 0x05, 0x39, 0x95, 0x91, 0x50, + 0x25, 0xc0, 0x26, 0x05, 0x01, 0x12, 0xea, 0xa1, 0x2e, 0x02, 0x55, 0x2d, 0xa5, 0x9a, 0x0a, 0x0e, + 0x88, 0x03, 0x63, 0x67, 0xea, 0x0e, 0xf1, 0x2f, 0xcd, 0xd8, 0x91, 0x2a, 0x2e, 0x08, 0x89, 0x03, + 0xb7, 0x3d, 0xee, 0xfe, 0x05, 0x7b, 0xd9, 0x3d, 0xee, 0x5f, 0xb0, 0x97, 0x1e, 0x7b, 0xec, 0x29, + 0xda, 0x7a, 0xff, 0x90, 0x5d, 0xf9, 0x57, 0x9c, 0x1f, 0xee, 0x36, 0xa7, 0x48, 0xab, 0x3d, 0xb5, + 0xf3, 0xde, 0xf7, 0xbe, 0xf7, 0xcd, 0xe7, 0x79, 0x2f, 0xf0, 0xfb, 0xe1, 0x77, 0x42, 0x63, 0xbe, + 0x3e, 0x8c, 0x4c, 0xca, 0x3d, 0x1a, 0x52, 0xa1, 0x8f, 0xa8, 0x37, 0xf0, 0xb9, 0x9e, 0x27, 0x48, + 0xc0, 0x74, 0x6e, 0x12, 0x4b, 0x1f, 0xf5, 0x75, 0x9b, 0x7a, 0x94, 0x93, 0x90, 0x0e, 0xb4, 0x80, + 0xfb, 0xa1, 0x8f, 0x50, 0x86, 0xd1, 0x48, 0xc0, 0xb4, 0x04, 0xa3, 0x8d, 0xfa, 0x5b, 0x5f, 0xd8, + 0x2c, 0xbc, 0x88, 0x4c, 0xcd, 0xf2, 0x5d, 0xdd, 0xf6, 0x6d, 0x5f, 0x4f, 0xa1, 0x66, 0x74, 0x9e, + 0x9e, 0xd2, 0x43, 0xfa, 0x5f, 0x46, 0xb1, 0xf5, 0x75, 0xd9, 0xc6, 0x25, 0xd6, 0x05, 0xf3, 0x28, + 0xbf, 0xd4, 0x83, 0xa1, 0x9d, 0x04, 0x84, 0xee, 0xd2, 0x90, 0x54, 0x34, 0xde, 0xd2, 0xef, 0xaa, + 0xe2, 0x91, 0x17, 0x32, 0x97, 0x2e, 0x14, 0x7c, 0x73, 0x5f, 0x81, 0xb0, 0x2e, 0xa8, 0x4b, 0xe6, + 0xeb, 0xd4, 0x47, 0x00, 0x76, 0xf7, 0x6d, 0x9b, 0x53, 0x9b, 0x84, 0xcc, 0xf7, 0x70, 0xe4, 0x50, + 0xf4, 0x1f, 0x80, 0x9b, 0x96, 0x13, 0x89, 0x90, 0x72, 0xec, 0x3b, 0xf4, 0x8c, 0x3a, 0xd4, 0x0a, + 0x7d, 0x2e, 0x64, 0xb0, 0xbd, 0xb6, 0xd3, 0xde, 0xfd, 0x4a, 0x2b, 0x5d, 0x99, 0xf4, 0xd2, 0x82, + 0xa1, 0x9d, 0x04, 0x84, 0x96, 0x5c, 0x49, 0x1b, 0xf5, 0xb5, 0x63, 0x62, 0x52, 0xa7, 0xa8, 0x35, + 0x3e, 0xbe, 0x1a, 0xf7, 0x6a, 0xf1, 0xb8, 0xb7, 0x79, 0x50, 0x41, 0x8c, 0x2b, 0xdb, 0xa9, 0x0f, + 0xeb, 0xb0, 0x3d, 0x05, 0x47, 0x7f, 0xc2, 0x56, 0x42, 0x3e, 0x20, 0x21, 0x91, 0xc1, 0x36, 0xd8, + 0x69, 0xef, 0x7e, 0xb9, 0x9c, 0x94, 0x5f, 0xcc, 0xbf, 0xa8, 0x15, 0xfe, 0x4c, 0x43, 0x62, 0xa0, + 0x5c, 0x07, 0x2c, 0x63, 0x78, 0xc2, 0x8a, 0x0e, 0x60, 0x93, 0x47, 0x0e, 0x15, 0x72, 0x3d, 0xbd, + 0xa9, 0xa2, 0x2d, 0x7e, 0x7f, 0xed, 0xd4, 0x77, 0x98, 0x75, 0x99, 0x18, 0x65, 0x74, 0x72, 0xb2, + 0x66, 0x72, 0x12, 0x38, 0xab, 0x45, 0x26, 0xec, 0x92, 0x59, 0x47, 0xe5, 0xb5, 0x54, 0xed, 0x27, + 0x55, 0x74, 0x73, 0xe6, 0x1b, 0x1f, 0xc4, 0xe3, 0xde, 0xfc, 0x17, 0xc1, 0xf3, 0x84, 0xea, 0xff, + 0x75, 0x88, 0xa6, 0xac, 0x31, 0x98, 0x37, 0x60, 0x9e, 0xbd, 0x02, 0x87, 0x0e, 0x61, 0x4b, 0x44, + 0x69, 0xa2, 0x30, 0xe9, 0xa3, 0xaa, 0x5b, 0x9d, 0x65, 0x18, 0xe3, 0xfd, 0x9c, 0xac, 0x95, 0x07, + 0x04, 0x9e, 0x94, 0xa3, 0x1f, 0xe1, 0x3a, 0xf7, 0x1d, 0x8a, 0xe9, 0x79, 0xee, 0x4f, 0x25, 0x13, + 0xce, 0x20, 0x46, 0x37, 0x67, 0x5a, 0xcf, 0x03, 0xb8, 0x28, 0x56, 0x9f, 0x03, 0xf8, 0xe1, 0xa2, + 0x17, 0xc7, 0x4c, 0x84, 0xe8, 0x8f, 0x05, 0x3f, 0xb4, 0x25, 0x1f, 0x2f, 0x13, 0x99, 0x1b, 0x93, + 0x0b, 0x14, 0x91, 0x29, 0x2f, 0x8e, 0x60, 0x93, 0x85, 0xd4, 0x2d, 0x8c, 0xf8, 0xb4, 0x4a, 0xfe, + 0xa2, 0xb0, 0xf2, 0xd5, 0x1c, 0x26, 0xc5, 0x38, 0xe3, 0x50, 0x9f, 0x01, 0xd8, 0x9d, 0x02, 0xaf, + 0x40, 0xfe, 0x0f, 0xb3, 0xf2, 0x7b, 0xf7, 0xc9, 0xaf, 0xd6, 0xfd, 0x0a, 0x40, 0x58, 0x8e, 0x04, + 0xea, 0xc1, 0xe6, 0x88, 0x72, 0x33, 0xdb, 0x15, 0x92, 0x21, 0x25, 0xf8, 0xdf, 0x92, 0x00, 0xce, + 0xe2, 0xe8, 0x33, 0x28, 0x91, 0x80, 0xfd, 0xc4, 0xfd, 0x28, 0xc8, 0x3a, 0x4b, 0x46, 0x27, 0x1e, + 0xf7, 0xa4, 0xfd, 0xd3, 0xc3, 0x2c, 0x88, 0xcb, 0x7c, 0x02, 0xe6, 0x54, 0xf8, 0x11, 0xb7, 0xa8, + 0x90, 0xd7, 0x4a, 0x30, 0x2e, 0x82, 0xb8, 0xcc, 0xa3, 0x6f, 0x61, 0xa7, 0x38, 0x9c, 0x10, 0x97, + 0x0a, 0xb9, 0x91, 0x16, 0x6c, 0xc4, 0xe3, 0x5e, 0x07, 0x4f, 0x27, 0xf0, 0x2c, 0x0e, 0xed, 0xc1, + 0xae, 0xe7, 0x7b, 0x05, 0xe4, 0x57, 0x7c, 0x2c, 0xe4, 0x66, 0x5a, 0x9a, 0xce, 0xe2, 0xc9, 0x6c, + 0x0a, 0xcf, 0x63, 0xd5, 0xa7, 0x00, 0x36, 0xde, 0xa2, 0xfd, 0xa4, 0xfe, 0x5b, 0x87, 0xed, 0x77, + 0x7e, 0x69, 0x24, 0xe3, 0xb6, 0xda, 0x6d, 0xb1, 0xcc, 0xb8, 0xdd, 0xbf, 0x26, 0x1e, 0x03, 0xd8, + 0x5a, 0xd1, 0x7e, 0xd8, 0x9b, 0x15, 0x2c, 0xdf, 0x29, 0xb8, 0x5a, 0xe9, 0xdf, 0xb0, 0x70, 0x1d, + 0x7d, 0x0e, 0x5b, 0xc5, 0x4c, 0xa7, 0x3a, 0xa5, 0xb2, 0x6f, 0x31, 0xf6, 0x78, 0x82, 0x40, 0xdb, + 0xb0, 0x31, 0x64, 0xde, 0x40, 0xae, 0xa7, 0xc8, 0xf7, 0x72, 0x64, 0xe3, 0x88, 0x79, 0x03, 0x9c, + 0x66, 0x12, 0x84, 0x47, 0xdc, 0xec, 0x67, 0x75, 0x0a, 0x91, 0x4c, 0x33, 0x4e, 0x33, 0xea, 0x13, + 0x00, 0xd7, 0xf3, 0xd7, 0x33, 0xe1, 0x03, 0x77, 0xf2, 0x4d, 0xeb, 0xab, 0x2f, 0xa3, 0xef, 0xcd, + 0xdd, 0x91, 0x0e, 0xa5, 0xe4, 0xaf, 0x08, 0x88, 0x45, 0xe5, 0x46, 0x0a, 0xdb, 0xc8, 0x61, 0xd2, + 0x49, 0x91, 0xc0, 0x25, 0xc6, 0xd8, 0xb9, 0xba, 0x55, 0x6a, 0xd7, 0xb7, 0x4a, 0xed, 0xe6, 0x56, + 0xa9, 0xfd, 0x13, 0x2b, 0xe0, 0x2a, 0x56, 0xc0, 0x75, 0xac, 0x80, 0x9b, 0x58, 0x01, 0x2f, 0x62, + 0x05, 0x3c, 0x78, 0xa9, 0xd4, 0x7e, 0xaf, 0x8f, 0xfa, 0xaf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x24, + 0xa1, 0x47, 0x98, 0xcf, 0x0a, 0x00, 0x00, +} + func (m *AggregationRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -139,29 +465,36 @@ func (m *AggregationRule) Marshal() (dAtA []byte, err error) { } func (m *AggregationRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AggregationRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m.ClusterRoleSelectors) > 0 { - for _, msg := range m.ClusterRoleSelectors { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.ClusterRoleSelectors) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ClusterRoleSelectors[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *ClusterRole) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -169,47 +502,58 @@ func (m *ClusterRole) Marshal() (dAtA []byte, err error) { } func (m *ClusterRole) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRole) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.AggregationRule != nil { + { + size, err := m.AggregationRule.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - } - if m.AggregationRule != nil { + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AggregationRule.Size())) - n2, err := m.AggregationRule.MarshalTo(dAtA[i:]) + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n2 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ClusterRoleBinding) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -217,45 +561,56 @@ func (m *ClusterRoleBinding) Marshal() (dAtA []byte, err error) { } func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RoleRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 + i-- + dAtA[i] = 0x1a if len(m.Subjects) > 0 { - for _, msg := range m.Subjects { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Subjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) - n4, err := m.RoleRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ClusterRoleBindingList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -263,37 +618,46 @@ func (m *ClusterRoleBindingList) Marshal() (dAtA []byte, err error) { } func (m *ClusterRoleBindingList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n5, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ClusterRoleList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -301,37 +665,46 @@ func (m *ClusterRoleList) Marshal() (dAtA []byte, err error) { } func (m *ClusterRoleList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PolicyRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -339,92 +712,67 @@ func (m *PolicyRule) Marshal() (dAtA []byte, err error) { } func (m *PolicyRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Verbs) > 0 { - for _, s := range m.Verbs { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.NonResourceURLs) > 0 { + for iNdEx := len(m.NonResourceURLs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.NonResourceURLs[iNdEx]) + copy(dAtA[i:], m.NonResourceURLs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NonResourceURLs[iNdEx]))) + i-- + dAtA[i] = 0x2a } } - if len(m.APIGroups) > 0 { - for _, s := range m.APIGroups { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.ResourceNames) > 0 { + for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResourceNames[iNdEx]) + copy(dAtA[i:], m.ResourceNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx]))) + i-- + dAtA[i] = 0x22 } } if len(m.Resources) > 0 { - for _, s := range m.Resources { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Resources[iNdEx]) + copy(dAtA[i:], m.Resources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) + i-- dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - if len(m.ResourceNames) > 0 { - for _, s := range m.ResourceNames { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.APIGroups) > 0 { + for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIGroups[iNdEx]) + copy(dAtA[i:], m.APIGroups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) + i-- + dAtA[i] = 0x12 } } - if len(m.NonResourceURLs) > 0 { - for _, s := range m.NonResourceURLs { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *Role) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -432,37 +780,46 @@ func (m *Role) Marshal() (dAtA []byte, err error) { } func (m *Role) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Role) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RoleBinding) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -470,45 +827,56 @@ func (m *RoleBinding) Marshal() (dAtA []byte, err error) { } func (m *RoleBinding) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n8, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RoleRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 + i-- + dAtA[i] = 0x1a if len(m.Subjects) > 0 { - for _, msg := range m.Subjects { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Subjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) - n9, err := m.RoleRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RoleBindingList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -516,37 +884,46 @@ func (m *RoleBindingList) Marshal() (dAtA []byte, err error) { } func (m *RoleBindingList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n10, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RoleList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -554,37 +931,46 @@ func (m *RoleList) Marshal() (dAtA []byte, err error) { } func (m *RoleList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n11, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RoleRef) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -592,29 +978,37 @@ func (m *RoleRef) Marshal() (dAtA []byte, err error) { } func (m *RoleRef) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) - i += copy(dAtA[i:], m.APIGroup) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x1a - i++ + i -= len(m.Name) + copy(dAtA[i:], m.Name) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - return i, nil + i-- + dAtA[i] = 0x1a + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x12 + i -= len(m.APIGroup) + copy(dAtA[i:], m.APIGroup) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Subject) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -622,39 +1016,53 @@ func (m *Subject) Marshal() (dAtA []byte, err error) { } func (m *Subject) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Subject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) - i += copy(dAtA[i:], m.APIGroup) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x22 - i++ + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - return i, nil + i-- + dAtA[i] = 0x22 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + i -= len(m.APIGroup) + copy(dAtA[i:], m.APIGroup) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) + i-- + dAtA[i] = 0x12 + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *AggregationRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.ClusterRoleSelectors) > 0 { @@ -667,6 +1075,9 @@ func (m *AggregationRule) Size() (n int) { } func (m *ClusterRole) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -685,6 +1096,9 @@ func (m *ClusterRole) Size() (n int) { } func (m *ClusterRoleBinding) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -701,6 +1115,9 @@ func (m *ClusterRoleBinding) Size() (n int) { } func (m *ClusterRoleBindingList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -715,6 +1132,9 @@ func (m *ClusterRoleBindingList) Size() (n int) { } func (m *ClusterRoleList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -729,6 +1149,9 @@ func (m *ClusterRoleList) Size() (n int) { } func (m *PolicyRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Verbs) > 0 { @@ -765,6 +1188,9 @@ func (m *PolicyRule) Size() (n int) { } func (m *Role) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -779,6 +1205,9 @@ func (m *Role) Size() (n int) { } func (m *RoleBinding) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -795,6 +1224,9 @@ func (m *RoleBinding) Size() (n int) { } func (m *RoleBindingList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -809,6 +1241,9 @@ func (m *RoleBindingList) Size() (n int) { } func (m *RoleList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -823,6 +1258,9 @@ func (m *RoleList) Size() (n int) { } func (m *RoleRef) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.APIGroup) @@ -835,6 +1273,9 @@ func (m *RoleRef) Size() (n int) { } func (m *Subject) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Kind) @@ -849,14 +1290,7 @@ func (m *Subject) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -865,8 +1299,13 @@ func (this *AggregationRule) String() string { if this == nil { return "nil" } + repeatedStringForClusterRoleSelectors := "[]LabelSelector{" + for _, f := range this.ClusterRoleSelectors { + repeatedStringForClusterRoleSelectors += fmt.Sprintf("%v", f) + "," + } + repeatedStringForClusterRoleSelectors += "}" s := strings.Join([]string{`&AggregationRule{`, - `ClusterRoleSelectors:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ClusterRoleSelectors), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `ClusterRoleSelectors:` + repeatedStringForClusterRoleSelectors + `,`, `}`, }, "") return s @@ -875,10 +1314,15 @@ func (this *ClusterRole) String() string { if this == nil { return "nil" } + repeatedStringForRules := "[]PolicyRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" s := strings.Join([]string{`&ClusterRole{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, - `AggregationRule:` + strings.Replace(fmt.Sprintf("%v", this.AggregationRule), "AggregationRule", "AggregationRule", 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `AggregationRule:` + strings.Replace(this.AggregationRule.String(), "AggregationRule", "AggregationRule", 1) + `,`, `}`, }, "") return s @@ -887,9 +1331,14 @@ func (this *ClusterRoleBinding) String() string { if this == nil { return "nil" } + repeatedStringForSubjects := "[]Subject{" + for _, f := range this.Subjects { + repeatedStringForSubjects += strings.Replace(strings.Replace(f.String(), "Subject", "Subject", 1), `&`, ``, 1) + "," + } + repeatedStringForSubjects += "}" s := strings.Join([]string{`&ClusterRoleBinding{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Subjects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subjects), "Subject", "Subject", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subjects:` + repeatedStringForSubjects + `,`, `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -899,9 +1348,14 @@ func (this *ClusterRoleBindingList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]ClusterRoleBinding{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&ClusterRoleBindingList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -910,9 +1364,14 @@ func (this *ClusterRoleList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]ClusterRole{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterRole", "ClusterRole", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&ClusterRoleList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ClusterRole", "ClusterRole", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -935,9 +1394,14 @@ func (this *Role) String() string { if this == nil { return "nil" } + repeatedStringForRules := "[]PolicyRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" s := strings.Join([]string{`&Role{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, `}`, }, "") return s @@ -946,9 +1410,14 @@ func (this *RoleBinding) String() string { if this == nil { return "nil" } + repeatedStringForSubjects := "[]Subject{" + for _, f := range this.Subjects { + repeatedStringForSubjects += strings.Replace(strings.Replace(f.String(), "Subject", "Subject", 1), `&`, ``, 1) + "," + } + repeatedStringForSubjects += "}" s := strings.Join([]string{`&RoleBinding{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Subjects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subjects), "Subject", "Subject", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subjects:` + repeatedStringForSubjects + `,`, `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -958,9 +1427,14 @@ func (this *RoleBindingList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]RoleBinding{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "RoleBinding", "RoleBinding", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&RoleBindingList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RoleBinding", "RoleBinding", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -969,9 +1443,14 @@ func (this *RoleList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]Role{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Role", "Role", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&RoleList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Role", "Role", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1024,7 +1503,7 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1052,7 +1531,7 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1061,10 +1540,13 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.ClusterRoleSelectors = append(m.ClusterRoleSelectors, k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{}) + m.ClusterRoleSelectors = append(m.ClusterRoleSelectors, v1.LabelSelector{}) if err := m.ClusterRoleSelectors[len(m.ClusterRoleSelectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -1078,6 +1560,9 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1105,7 +1590,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1133,7 +1618,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1142,6 +1627,9 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1163,7 +1651,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1172,6 +1660,9 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1194,7 +1685,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1203,6 +1694,9 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1222,6 +1716,9 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1249,7 +1746,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1277,7 +1774,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1286,6 +1783,9 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1307,7 +1807,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1316,6 +1816,9 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1338,7 +1841,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1347,6 +1850,9 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1363,6 +1869,9 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1390,7 +1899,7 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1418,7 +1927,7 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1427,6 +1936,9 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1448,7 +1960,7 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1457,6 +1969,9 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1474,6 +1989,9 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1501,7 +2019,7 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1529,7 +2047,7 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1538,6 +2056,9 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1559,7 +2080,7 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1568,6 +2089,9 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1585,6 +2109,9 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1612,7 +2139,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1640,7 +2167,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1650,6 +2177,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1669,7 +2199,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1679,6 +2209,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1698,7 +2231,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1708,6 +2241,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1727,7 +2263,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1737,6 +2273,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1756,7 +2295,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1766,6 +2305,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1780,6 +2322,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1807,7 +2352,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1835,7 +2380,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1844,6 +2389,9 @@ func (m *Role) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1865,7 +2413,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1874,6 +2422,9 @@ func (m *Role) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1891,6 +2442,9 @@ func (m *Role) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1918,7 +2472,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1946,7 +2500,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1955,6 +2509,9 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1976,7 +2533,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1985,6 +2542,9 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2007,7 +2567,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2016,6 +2576,9 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2032,6 +2595,9 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2059,7 +2625,7 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2087,7 +2653,7 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2096,6 +2662,9 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2117,7 +2686,7 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2126,6 +2695,9 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2143,6 +2715,9 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2170,7 +2745,7 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2198,7 +2773,7 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2207,6 +2782,9 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2228,7 +2806,7 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2237,6 +2815,9 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2254,6 +2835,9 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2281,7 +2865,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2309,7 +2893,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2319,6 +2903,9 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2338,7 +2925,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2348,6 +2935,9 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2367,7 +2957,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2377,6 +2967,9 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2391,6 +2984,9 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2418,7 +3014,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2446,7 +3042,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2456,6 +3052,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2475,7 +3074,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2485,6 +3084,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2504,7 +3106,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2514,6 +3116,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2533,7 +3138,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2543,6 +3148,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2557,6 +3165,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2623,10 +3234,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -2655,6 +3269,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -2673,62 +3290,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 807 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x55, 0xcf, 0x6f, 0xe3, 0x44, - 0x14, 0xce, 0xa4, 0x89, 0x1a, 0x4f, 0x88, 0x42, 0x87, 0x0a, 0x59, 0x05, 0x39, 0x95, 0x91, 0x50, - 0x25, 0xc0, 0x26, 0x05, 0x01, 0x12, 0xea, 0xa1, 0x2e, 0x02, 0x55, 0x2d, 0xa5, 0x9a, 0x0a, 0x0e, - 0x88, 0x03, 0x63, 0x67, 0xea, 0x0e, 0xf1, 0x2f, 0xcd, 0xd8, 0x91, 0x2a, 0x2e, 0x08, 0x89, 0x03, - 0xb7, 0x3d, 0xee, 0xfe, 0x05, 0x7b, 0xd9, 0x3d, 0xee, 0x5f, 0xb0, 0x97, 0x1e, 0x7b, 0xec, 0x29, - 0xda, 0x7a, 0xff, 0x90, 0x5d, 0xf9, 0x57, 0x9c, 0x1f, 0xee, 0x36, 0xa7, 0x48, 0xab, 0x3d, 0xb5, - 0xf3, 0xde, 0xf7, 0xbe, 0xf7, 0xcd, 0xe7, 0x79, 0x2f, 0xf0, 0xfb, 0xe1, 0x77, 0x42, 0x63, 0xbe, - 0x3e, 0x8c, 0x4c, 0xca, 0x3d, 0x1a, 0x52, 0xa1, 0x8f, 0xa8, 0x37, 0xf0, 0xb9, 0x9e, 0x27, 0x48, - 0xc0, 0x74, 0x6e, 0x12, 0x4b, 0x1f, 0xf5, 0x75, 0x9b, 0x7a, 0x94, 0x93, 0x90, 0x0e, 0xb4, 0x80, - 0xfb, 0xa1, 0x8f, 0x50, 0x86, 0xd1, 0x48, 0xc0, 0xb4, 0x04, 0xa3, 0x8d, 0xfa, 0x5b, 0x5f, 0xd8, - 0x2c, 0xbc, 0x88, 0x4c, 0xcd, 0xf2, 0x5d, 0xdd, 0xf6, 0x6d, 0x5f, 0x4f, 0xa1, 0x66, 0x74, 0x9e, - 0x9e, 0xd2, 0x43, 0xfa, 0x5f, 0x46, 0xb1, 0xf5, 0x75, 0xd9, 0xc6, 0x25, 0xd6, 0x05, 0xf3, 0x28, - 0xbf, 0xd4, 0x83, 0xa1, 0x9d, 0x04, 0x84, 0xee, 0xd2, 0x90, 0x54, 0x34, 0xde, 0xd2, 0xef, 0xaa, - 0xe2, 0x91, 0x17, 0x32, 0x97, 0x2e, 0x14, 0x7c, 0x73, 0x5f, 0x81, 0xb0, 0x2e, 0xa8, 0x4b, 0xe6, - 0xeb, 0xd4, 0x47, 0x00, 0x76, 0xf7, 0x6d, 0x9b, 0x53, 0x9b, 0x84, 0xcc, 0xf7, 0x70, 0xe4, 0x50, - 0xf4, 0x1f, 0x80, 0x9b, 0x96, 0x13, 0x89, 0x90, 0x72, 0xec, 0x3b, 0xf4, 0x8c, 0x3a, 0xd4, 0x0a, - 0x7d, 0x2e, 0x64, 0xb0, 0xbd, 0xb6, 0xd3, 0xde, 0xfd, 0x4a, 0x2b, 0x5d, 0x99, 0xf4, 0xd2, 0x82, - 0xa1, 0x9d, 0x04, 0x84, 0x96, 0x5c, 0x49, 0x1b, 0xf5, 0xb5, 0x63, 0x62, 0x52, 0xa7, 0xa8, 0x35, - 0x3e, 0xbe, 0x1a, 0xf7, 0x6a, 0xf1, 0xb8, 0xb7, 0x79, 0x50, 0x41, 0x8c, 0x2b, 0xdb, 0xa9, 0x0f, - 0xeb, 0xb0, 0x3d, 0x05, 0x47, 0x7f, 0xc2, 0x56, 0x42, 0x3e, 0x20, 0x21, 0x91, 0xc1, 0x36, 0xd8, - 0x69, 0xef, 0x7e, 0xb9, 0x9c, 0x94, 0x5f, 0xcc, 0xbf, 0xa8, 0x15, 0xfe, 0x4c, 0x43, 0x62, 0xa0, - 0x5c, 0x07, 0x2c, 0x63, 0x78, 0xc2, 0x8a, 0x0e, 0x60, 0x93, 0x47, 0x0e, 0x15, 0x72, 0x3d, 0xbd, - 0xa9, 0xa2, 0x2d, 0x7e, 0x7f, 0xed, 0xd4, 0x77, 0x98, 0x75, 0x99, 0x18, 0x65, 0x74, 0x72, 0xb2, - 0x66, 0x72, 0x12, 0x38, 0xab, 0x45, 0x26, 0xec, 0x92, 0x59, 0x47, 0xe5, 0xb5, 0x54, 0xed, 0x27, - 0x55, 0x74, 0x73, 0xe6, 0x1b, 0x1f, 0xc4, 0xe3, 0xde, 0xfc, 0x17, 0xc1, 0xf3, 0x84, 0xea, 0xff, - 0x75, 0x88, 0xa6, 0xac, 0x31, 0x98, 0x37, 0x60, 0x9e, 0xbd, 0x02, 0x87, 0x0e, 0x61, 0x4b, 0x44, - 0x69, 0xa2, 0x30, 0xe9, 0xa3, 0xaa, 0x5b, 0x9d, 0x65, 0x18, 0xe3, 0xfd, 0x9c, 0xac, 0x95, 0x07, - 0x04, 0x9e, 0x94, 0xa3, 0x1f, 0xe1, 0x3a, 0xf7, 0x1d, 0x8a, 0xe9, 0x79, 0xee, 0x4f, 0x25, 0x13, - 0xce, 0x20, 0x46, 0x37, 0x67, 0x5a, 0xcf, 0x03, 0xb8, 0x28, 0x56, 0x9f, 0x03, 0xf8, 0xe1, 0xa2, - 0x17, 0xc7, 0x4c, 0x84, 0xe8, 0x8f, 0x05, 0x3f, 0xb4, 0x25, 0x1f, 0x2f, 0x13, 0x99, 0x1b, 0x93, - 0x0b, 0x14, 0x91, 0x29, 0x2f, 0x8e, 0x60, 0x93, 0x85, 0xd4, 0x2d, 0x8c, 0xf8, 0xb4, 0x4a, 0xfe, - 0xa2, 0xb0, 0xf2, 0xd5, 0x1c, 0x26, 0xc5, 0x38, 0xe3, 0x50, 0x9f, 0x01, 0xd8, 0x9d, 0x02, 0xaf, - 0x40, 0xfe, 0x0f, 0xb3, 0xf2, 0x7b, 0xf7, 0xc9, 0xaf, 0xd6, 0xfd, 0x0a, 0x40, 0x58, 0x8e, 0x04, - 0xea, 0xc1, 0xe6, 0x88, 0x72, 0x33, 0xdb, 0x15, 0x92, 0x21, 0x25, 0xf8, 0xdf, 0x92, 0x00, 0xce, - 0xe2, 0xe8, 0x33, 0x28, 0x91, 0x80, 0xfd, 0xc4, 0xfd, 0x28, 0xc8, 0x3a, 0x4b, 0x46, 0x27, 0x1e, - 0xf7, 0xa4, 0xfd, 0xd3, 0xc3, 0x2c, 0x88, 0xcb, 0x7c, 0x02, 0xe6, 0x54, 0xf8, 0x11, 0xb7, 0xa8, - 0x90, 0xd7, 0x4a, 0x30, 0x2e, 0x82, 0xb8, 0xcc, 0xa3, 0x6f, 0x61, 0xa7, 0x38, 0x9c, 0x10, 0x97, - 0x0a, 0xb9, 0x91, 0x16, 0x6c, 0xc4, 0xe3, 0x5e, 0x07, 0x4f, 0x27, 0xf0, 0x2c, 0x0e, 0xed, 0xc1, - 0xae, 0xe7, 0x7b, 0x05, 0xe4, 0x57, 0x7c, 0x2c, 0xe4, 0x66, 0x5a, 0x9a, 0xce, 0xe2, 0xc9, 0x6c, - 0x0a, 0xcf, 0x63, 0xd5, 0xa7, 0x00, 0x36, 0xde, 0xa2, 0xfd, 0xa4, 0xfe, 0x5b, 0x87, 0xed, 0x77, - 0x7e, 0x69, 0x24, 0xe3, 0xb6, 0xda, 0x6d, 0xb1, 0xcc, 0xb8, 0xdd, 0xbf, 0x26, 0x1e, 0x03, 0xd8, - 0x5a, 0xd1, 0x7e, 0xd8, 0x9b, 0x15, 0x2c, 0xdf, 0x29, 0xb8, 0x5a, 0xe9, 0xdf, 0xb0, 0x70, 0x1d, - 0x7d, 0x0e, 0x5b, 0xc5, 0x4c, 0xa7, 0x3a, 0xa5, 0xb2, 0x6f, 0x31, 0xf6, 0x78, 0x82, 0x40, 0xdb, - 0xb0, 0x31, 0x64, 0xde, 0x40, 0xae, 0xa7, 0xc8, 0xf7, 0x72, 0x64, 0xe3, 0x88, 0x79, 0x03, 0x9c, - 0x66, 0x12, 0x84, 0x47, 0xdc, 0xec, 0x67, 0x75, 0x0a, 0x91, 0x4c, 0x33, 0x4e, 0x33, 0xea, 0x13, - 0x00, 0xd7, 0xf3, 0xd7, 0x33, 0xe1, 0x03, 0x77, 0xf2, 0x4d, 0xeb, 0xab, 0x2f, 0xa3, 0xef, 0xcd, - 0xdd, 0x91, 0x0e, 0xa5, 0xe4, 0xaf, 0x08, 0x88, 0x45, 0xe5, 0x46, 0x0a, 0xdb, 0xc8, 0x61, 0xd2, - 0x49, 0x91, 0xc0, 0x25, 0xc6, 0xd8, 0xb9, 0xba, 0x55, 0x6a, 0xd7, 0xb7, 0x4a, 0xed, 0xe6, 0x56, - 0xa9, 0xfd, 0x13, 0x2b, 0xe0, 0x2a, 0x56, 0xc0, 0x75, 0xac, 0x80, 0x9b, 0x58, 0x01, 0x2f, 0x62, - 0x05, 0x3c, 0x78, 0xa9, 0xd4, 0x7e, 0xaf, 0x8f, 0xfa, 0xaf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x24, - 0xa1, 0x47, 0x98, 0xcf, 0x0a, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go b/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go index 355196bceaabe..1933624772279 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go @@ -17,42 +17,20 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1alpha1/generated.proto -/* - Package v1alpha1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1alpha1/generated.proto - - It has these top-level messages: - AggregationRule - ClusterRole - ClusterRoleBinding - ClusterRoleBindingList - ClusterRoleList - PolicyRule - Role - RoleBinding - RoleBindingList - RoleList - RoleRef - Subject -*/ package v1alpha1 import ( fmt "fmt" + io "io" + proto "github.com/gogo/protobuf/proto" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" math "math" - - k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - strings "strings" - + math_bits "math/bits" reflect "reflect" - - io "io" + strings "strings" ) // Reference imports to suppress errors if they are not otherwise used. @@ -66,53 +44,341 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *AggregationRule) Reset() { *m = AggregationRule{} } -func (*AggregationRule) ProtoMessage() {} -func (*AggregationRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *AggregationRule) Reset() { *m = AggregationRule{} } +func (*AggregationRule) ProtoMessage() {} +func (*AggregationRule) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{0} +} +func (m *AggregationRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AggregationRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AggregationRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_AggregationRule.Merge(m, src) +} +func (m *AggregationRule) XXX_Size() int { + return m.Size() +} +func (m *AggregationRule) XXX_DiscardUnknown() { + xxx_messageInfo_AggregationRule.DiscardUnknown(m) +} + +var xxx_messageInfo_AggregationRule proto.InternalMessageInfo + +func (m *ClusterRole) Reset() { *m = ClusterRole{} } +func (*ClusterRole) ProtoMessage() {} +func (*ClusterRole) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{1} +} +func (m *ClusterRole) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRole) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRole) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRole.Merge(m, src) +} +func (m *ClusterRole) XXX_Size() int { + return m.Size() +} +func (m *ClusterRole) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRole.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterRole proto.InternalMessageInfo + +func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } +func (*ClusterRoleBinding) ProtoMessage() {} +func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{2} +} +func (m *ClusterRoleBinding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleBinding) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleBinding.Merge(m, src) +} +func (m *ClusterRoleBinding) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleBinding) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleBinding.DiscardUnknown(m) +} -func (m *ClusterRole) Reset() { *m = ClusterRole{} } -func (*ClusterRole) ProtoMessage() {} -func (*ClusterRole) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_ClusterRoleBinding proto.InternalMessageInfo -func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } -func (*ClusterRoleBinding) ProtoMessage() {} -func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } +func (*ClusterRoleBindingList) ProtoMessage() {} +func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{3} +} +func (m *ClusterRoleBindingList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleBindingList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleBindingList.Merge(m, src) +} +func (m *ClusterRoleBindingList) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleBindingList) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleBindingList.DiscardUnknown(m) +} -func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } -func (*ClusterRoleBindingList) ProtoMessage() {} -func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_ClusterRoleBindingList proto.InternalMessageInfo -func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } -func (*ClusterRoleList) ProtoMessage() {} -func (*ClusterRoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } +func (*ClusterRoleList) ProtoMessage() {} +func (*ClusterRoleList) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{4} +} +func (m *ClusterRoleList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleList.Merge(m, src) +} +func (m *ClusterRoleList) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleList) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleList.DiscardUnknown(m) +} -func (m *PolicyRule) Reset() { *m = PolicyRule{} } -func (*PolicyRule) ProtoMessage() {} -func (*PolicyRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_ClusterRoleList proto.InternalMessageInfo -func (m *Role) Reset() { *m = Role{} } -func (*Role) ProtoMessage() {} -func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (m *PolicyRule) Reset() { *m = PolicyRule{} } +func (*PolicyRule) ProtoMessage() {} +func (*PolicyRule) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{5} +} +func (m *PolicyRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PolicyRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_PolicyRule.Merge(m, src) +} +func (m *PolicyRule) XXX_Size() int { + return m.Size() +} +func (m *PolicyRule) XXX_DiscardUnknown() { + xxx_messageInfo_PolicyRule.DiscardUnknown(m) +} -func (m *RoleBinding) Reset() { *m = RoleBinding{} } -func (*RoleBinding) ProtoMessage() {} -func (*RoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_PolicyRule proto.InternalMessageInfo -func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } -func (*RoleBindingList) ProtoMessage() {} -func (*RoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (m *Role) Reset() { *m = Role{} } +func (*Role) ProtoMessage() {} +func (*Role) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{6} +} +func (m *Role) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Role) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Role) XXX_Merge(src proto.Message) { + xxx_messageInfo_Role.Merge(m, src) +} +func (m *Role) XXX_Size() int { + return m.Size() +} +func (m *Role) XXX_DiscardUnknown() { + xxx_messageInfo_Role.DiscardUnknown(m) +} -func (m *RoleList) Reset() { *m = RoleList{} } -func (*RoleList) ProtoMessage() {} -func (*RoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +var xxx_messageInfo_Role proto.InternalMessageInfo -func (m *RoleRef) Reset() { *m = RoleRef{} } -func (*RoleRef) ProtoMessage() {} -func (*RoleRef) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (m *RoleBinding) Reset() { *m = RoleBinding{} } +func (*RoleBinding) ProtoMessage() {} +func (*RoleBinding) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{7} +} +func (m *RoleBinding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleBinding) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleBinding.Merge(m, src) +} +func (m *RoleBinding) XXX_Size() int { + return m.Size() +} +func (m *RoleBinding) XXX_DiscardUnknown() { + xxx_messageInfo_RoleBinding.DiscardUnknown(m) +} -func (m *Subject) Reset() { *m = Subject{} } -func (*Subject) ProtoMessage() {} -func (*Subject) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +var xxx_messageInfo_RoleBinding proto.InternalMessageInfo + +func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } +func (*RoleBindingList) ProtoMessage() {} +func (*RoleBindingList) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{8} +} +func (m *RoleBindingList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleBindingList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleBindingList.Merge(m, src) +} +func (m *RoleBindingList) XXX_Size() int { + return m.Size() +} +func (m *RoleBindingList) XXX_DiscardUnknown() { + xxx_messageInfo_RoleBindingList.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleBindingList proto.InternalMessageInfo + +func (m *RoleList) Reset() { *m = RoleList{} } +func (*RoleList) ProtoMessage() {} +func (*RoleList) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{9} +} +func (m *RoleList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleList.Merge(m, src) +} +func (m *RoleList) XXX_Size() int { + return m.Size() +} +func (m *RoleList) XXX_DiscardUnknown() { + xxx_messageInfo_RoleList.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleList proto.InternalMessageInfo + +func (m *RoleRef) Reset() { *m = RoleRef{} } +func (*RoleRef) ProtoMessage() {} +func (*RoleRef) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{10} +} +func (m *RoleRef) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleRef) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleRef.Merge(m, src) +} +func (m *RoleRef) XXX_Size() int { + return m.Size() +} +func (m *RoleRef) XXX_DiscardUnknown() { + xxx_messageInfo_RoleRef.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleRef proto.InternalMessageInfo + +func (m *Subject) Reset() { *m = Subject{} } +func (*Subject) ProtoMessage() {} +func (*Subject) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{11} +} +func (m *Subject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Subject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Subject) XXX_Merge(src proto.Message) { + xxx_messageInfo_Subject.Merge(m, src) +} +func (m *Subject) XXX_Size() int { + return m.Size() +} +func (m *Subject) XXX_DiscardUnknown() { + xxx_messageInfo_Subject.DiscardUnknown(m) +} + +var xxx_messageInfo_Subject proto.InternalMessageInfo func init() { proto.RegisterType((*AggregationRule)(nil), "k8s.io.api.rbac.v1alpha1.AggregationRule") @@ -128,10 +394,71 @@ func init() { proto.RegisterType((*RoleRef)(nil), "k8s.io.api.rbac.v1alpha1.RoleRef") proto.RegisterType((*Subject)(nil), "k8s.io.api.rbac.v1alpha1.Subject") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1alpha1/generated.proto", fileDescriptor_b59b0bd5e7cb9590) +} + +var fileDescriptor_b59b0bd5e7cb9590 = []byte{ + // 830 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0xbf, 0x8f, 0xe3, 0x44, + 0x14, 0xce, 0x64, 0x13, 0x36, 0x99, 0x25, 0x0a, 0x37, 0x9c, 0x90, 0xb5, 0x42, 0xce, 0x62, 0x81, + 0x74, 0x88, 0xc3, 0x66, 0x17, 0x04, 0x34, 0x14, 0xf1, 0x15, 0x28, 0x10, 0xf6, 0x96, 0x39, 0x71, + 0x05, 0xa2, 0x60, 0xe2, 0xcc, 0x39, 0x43, 0x6c, 0x8f, 0x35, 0x63, 0x47, 0x3a, 0xd1, 0xd0, 0xd0, + 0x22, 0x1a, 0x0a, 0x7a, 0x5a, 0x1a, 0x28, 0xf9, 0x07, 0x96, 0xee, 0xca, 0xad, 0x22, 0xd6, 0xfc, + 0x21, 0x20, 0x8f, 0xed, 0xd8, 0xf9, 0x45, 0x52, 0x45, 0x42, 0xba, 0x2a, 0x99, 0xf7, 0xbe, 0xf7, + 0xbd, 0xf7, 0xbe, 0x99, 0xf7, 0x0c, 0xfb, 0xd3, 0x0f, 0xa5, 0xc9, 0xb8, 0x35, 0x8d, 0x47, 0x54, + 0x04, 0x34, 0xa2, 0xd2, 0x9a, 0xd1, 0x60, 0xcc, 0x85, 0x95, 0x3b, 0x48, 0xc8, 0x2c, 0x31, 0x22, + 0x8e, 0x35, 0x3b, 0x27, 0x5e, 0x38, 0x21, 0xe7, 0x96, 0x4b, 0x03, 0x2a, 0x48, 0x44, 0xc7, 0x66, + 0x28, 0x78, 0xc4, 0x91, 0x96, 0x21, 0x4d, 0x12, 0x32, 0x33, 0x45, 0x9a, 0x05, 0xf2, 0xf4, 0x6d, + 0x97, 0x45, 0x93, 0x78, 0x64, 0x3a, 0xdc, 0xb7, 0x5c, 0xee, 0x72, 0x4b, 0x05, 0x8c, 0xe2, 0x27, + 0xea, 0xa4, 0x0e, 0xea, 0x5f, 0x46, 0x74, 0xfa, 0x5e, 0x99, 0xd2, 0x27, 0xce, 0x84, 0x05, 0x54, + 0x3c, 0xb5, 0xc2, 0xa9, 0x9b, 0x1a, 0xa4, 0xe5, 0xd3, 0x88, 0x58, 0xb3, 0xb5, 0xf4, 0xa7, 0xd6, + 0xb6, 0x28, 0x11, 0x07, 0x11, 0xf3, 0xe9, 0x5a, 0xc0, 0xfb, 0xbb, 0x02, 0xa4, 0x33, 0xa1, 0x3e, + 0x59, 0x8d, 0x33, 0x7e, 0x06, 0xb0, 0xdb, 0x77, 0x5d, 0x41, 0x5d, 0x12, 0x31, 0x1e, 0xe0, 0xd8, + 0xa3, 0xe8, 0x7b, 0x00, 0xef, 0x3a, 0x5e, 0x2c, 0x23, 0x2a, 0x30, 0xf7, 0xe8, 0x23, 0xea, 0x51, + 0x27, 0xe2, 0x42, 0x6a, 0xe0, 0xec, 0xe8, 0xde, 0xc9, 0xc5, 0xbb, 0x66, 0xa9, 0xcd, 0x22, 0x97, + 0x19, 0x4e, 0xdd, 0xd4, 0x20, 0xcd, 0xb4, 0x25, 0x73, 0x76, 0x6e, 0x0e, 0xc9, 0x88, 0x7a, 0x45, + 0xac, 0xfd, 0xea, 0xf5, 0xbc, 0x57, 0x4b, 0xe6, 0xbd, 0xbb, 0x0f, 0x36, 0x10, 0xe3, 0x8d, 0xe9, + 0x8c, 0x5f, 0xea, 0xf0, 0xa4, 0x02, 0x47, 0x5f, 0xc3, 0x56, 0x4a, 0x3e, 0x26, 0x11, 0xd1, 0xc0, + 0x19, 0xb8, 0x77, 0x72, 0xf1, 0xce, 0x7e, 0xa5, 0x3c, 0x1c, 0x7d, 0x43, 0x9d, 0xe8, 0x33, 0x1a, + 0x11, 0x1b, 0xe5, 0x75, 0xc0, 0xd2, 0x86, 0x17, 0xac, 0x68, 0x00, 0x9b, 0x22, 0xf6, 0xa8, 0xd4, + 0xea, 0xaa, 0xd3, 0xd7, 0xcd, 0x6d, 0xaf, 0xc0, 0xbc, 0xe2, 0x1e, 0x73, 0x9e, 0xa6, 0x72, 0xd9, + 0x9d, 0x9c, 0xb2, 0x99, 0x9e, 0x24, 0xce, 0x18, 0xd0, 0x04, 0x76, 0xc9, 0xb2, 0xae, 0xda, 0x91, + 0xaa, 0xf9, 0xcd, 0xed, 0xa4, 0x2b, 0x17, 0x61, 0xbf, 0x9c, 0xcc, 0x7b, 0xab, 0xb7, 0x83, 0x57, + 0x69, 0x8d, 0x9f, 0xea, 0x10, 0x55, 0x64, 0xb2, 0x59, 0x30, 0x66, 0x81, 0x7b, 0x00, 0xb5, 0x1e, + 0xc2, 0x96, 0x8c, 0x95, 0xa3, 0x10, 0xec, 0xb5, 0xed, 0xbd, 0x3d, 0xca, 0x90, 0xf6, 0x4b, 0x39, + 0x65, 0x2b, 0x37, 0x48, 0xbc, 0x20, 0x41, 0x43, 0x78, 0x2c, 0xb8, 0x47, 0x31, 0x7d, 0x92, 0x6b, + 0xf5, 0x1f, 0x7c, 0x38, 0x03, 0xda, 0xdd, 0x9c, 0xef, 0x38, 0x37, 0xe0, 0x82, 0xc2, 0xf8, 0x13, + 0xc0, 0x57, 0xd6, 0x75, 0x19, 0x32, 0x19, 0xa1, 0xaf, 0xd6, 0xb4, 0x31, 0xf7, 0x7c, 0xd4, 0x4c, + 0x66, 0xca, 0x2c, 0xda, 0x28, 0x2c, 0x15, 0x5d, 0x3e, 0x87, 0x4d, 0x16, 0x51, 0xbf, 0x10, 0xe5, + 0xfe, 0xf6, 0x26, 0xd6, 0xcb, 0x2b, 0x5f, 0xd3, 0x20, 0xa5, 0xc0, 0x19, 0x93, 0xf1, 0x07, 0x80, + 0xdd, 0x0a, 0xf8, 0x00, 0x4d, 0x7c, 0xb2, 0xdc, 0xc4, 0x1b, 0xfb, 0x35, 0xb1, 0xb9, 0xfa, 0x7f, + 0x00, 0x84, 0xe5, 0xc0, 0xa0, 0x1e, 0x6c, 0xce, 0xa8, 0x18, 0x65, 0xfb, 0xa4, 0x6d, 0xb7, 0x53, + 0xfc, 0xe3, 0xd4, 0x80, 0x33, 0x3b, 0x7a, 0x0b, 0xb6, 0x49, 0xc8, 0x3e, 0x16, 0x3c, 0x0e, 0xa5, + 0x76, 0xa4, 0x40, 0x9d, 0x64, 0xde, 0x6b, 0xf7, 0xaf, 0x06, 0x99, 0x11, 0x97, 0xfe, 0x14, 0x2c, + 0xa8, 0xe4, 0xb1, 0x70, 0xa8, 0xd4, 0x1a, 0x25, 0x18, 0x17, 0x46, 0x5c, 0xfa, 0xd1, 0x07, 0xb0, + 0x53, 0x1c, 0x2e, 0x89, 0x4f, 0xa5, 0xd6, 0x54, 0x01, 0x77, 0x92, 0x79, 0xaf, 0x83, 0xab, 0x0e, + 0xbc, 0x8c, 0x43, 0x1f, 0xc1, 0x6e, 0xc0, 0x83, 0x02, 0xf2, 0x05, 0x1e, 0x4a, 0xed, 0x05, 0x15, + 0xaa, 0x66, 0xf4, 0x72, 0xd9, 0x85, 0x57, 0xb1, 0xc6, 0xef, 0x00, 0x36, 0xfe, 0x77, 0x3b, 0xcc, + 0xf8, 0xa1, 0x0e, 0x4f, 0x9e, 0xaf, 0x94, 0xca, 0x4a, 0x49, 0xc7, 0xf0, 0xb0, 0xbb, 0x64, 0xff, + 0x31, 0xdc, 0xbd, 0x44, 0x7e, 0x05, 0xb0, 0x75, 0xa0, 0xed, 0xf1, 0x60, 0xb9, 0x6c, 0x7d, 0x47, + 0xd9, 0x9b, 0xeb, 0xfd, 0x16, 0x16, 0x37, 0x80, 0xee, 0xc3, 0x56, 0x31, 0xf1, 0xaa, 0xda, 0x76, + 0x99, 0xbd, 0x58, 0x0a, 0x78, 0x81, 0x40, 0x67, 0xb0, 0x31, 0x65, 0xc1, 0x58, 0xab, 0x2b, 0xe4, + 0x8b, 0x39, 0xb2, 0xf1, 0x29, 0x0b, 0xc6, 0x58, 0x79, 0x52, 0x44, 0x40, 0xfc, 0xec, 0x93, 0x5c, + 0x41, 0xa4, 0xb3, 0x8e, 0x95, 0xc7, 0xf8, 0x0d, 0xc0, 0xe3, 0xfc, 0x3d, 0x2d, 0xf8, 0xc0, 0x56, + 0xbe, 0x0b, 0x08, 0x49, 0xc8, 0x1e, 0x53, 0x21, 0x19, 0x0f, 0xf2, 0xbc, 0x8b, 0x97, 0xde, 0xbf, + 0x1a, 0xe4, 0x1e, 0x5c, 0x41, 0xed, 0xae, 0x01, 0x59, 0xb0, 0x9d, 0xfe, 0xca, 0x90, 0x38, 0x54, + 0x6b, 0x28, 0xd8, 0x9d, 0x1c, 0xd6, 0xbe, 0x2c, 0x1c, 0xb8, 0xc4, 0xd8, 0xe6, 0xf5, 0xad, 0x5e, + 0x7b, 0x76, 0xab, 0xd7, 0x6e, 0x6e, 0xf5, 0xda, 0x77, 0x89, 0x0e, 0xae, 0x13, 0x1d, 0x3c, 0x4b, + 0x74, 0x70, 0x93, 0xe8, 0xe0, 0xaf, 0x44, 0x07, 0x3f, 0xfe, 0xad, 0xd7, 0xbe, 0x6c, 0x15, 0xe2, + 0xff, 0x1b, 0x00, 0x00, 0xff, 0xff, 0xb0, 0x73, 0x15, 0x10, 0x29, 0x0b, 0x00, 0x00, +} + func (m *AggregationRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -139,29 +466,36 @@ func (m *AggregationRule) Marshal() (dAtA []byte, err error) { } func (m *AggregationRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AggregationRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m.ClusterRoleSelectors) > 0 { - for _, msg := range m.ClusterRoleSelectors { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.ClusterRoleSelectors) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ClusterRoleSelectors[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *ClusterRole) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -169,47 +503,58 @@ func (m *ClusterRole) Marshal() (dAtA []byte, err error) { } func (m *ClusterRole) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRole) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.AggregationRule != nil { + { + size, err := m.AggregationRule.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - } - if m.AggregationRule != nil { + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AggregationRule.Size())) - n2, err := m.AggregationRule.MarshalTo(dAtA[i:]) + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n2 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ClusterRoleBinding) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -217,45 +562,56 @@ func (m *ClusterRoleBinding) Marshal() (dAtA []byte, err error) { } func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RoleRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 + i-- + dAtA[i] = 0x1a if len(m.Subjects) > 0 { - for _, msg := range m.Subjects { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Subjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) - n4, err := m.RoleRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ClusterRoleBindingList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -263,37 +619,46 @@ func (m *ClusterRoleBindingList) Marshal() (dAtA []byte, err error) { } func (m *ClusterRoleBindingList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n5, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ClusterRoleList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -301,37 +666,46 @@ func (m *ClusterRoleList) Marshal() (dAtA []byte, err error) { } func (m *ClusterRoleList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PolicyRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -339,92 +713,67 @@ func (m *PolicyRule) Marshal() (dAtA []byte, err error) { } func (m *PolicyRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Verbs) > 0 { - for _, s := range m.Verbs { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.NonResourceURLs) > 0 { + for iNdEx := len(m.NonResourceURLs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.NonResourceURLs[iNdEx]) + copy(dAtA[i:], m.NonResourceURLs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NonResourceURLs[iNdEx]))) + i-- + dAtA[i] = 0x32 } } - if len(m.APIGroups) > 0 { - for _, s := range m.APIGroups { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.ResourceNames) > 0 { + for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResourceNames[iNdEx]) + copy(dAtA[i:], m.ResourceNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx]))) + i-- + dAtA[i] = 0x2a } } if len(m.Resources) > 0 { - for _, s := range m.Resources { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Resources[iNdEx]) + copy(dAtA[i:], m.Resources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) + i-- dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - if len(m.ResourceNames) > 0 { - for _, s := range m.ResourceNames { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.APIGroups) > 0 { + for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIGroups[iNdEx]) + copy(dAtA[i:], m.APIGroups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) + i-- + dAtA[i] = 0x1a } } - if len(m.NonResourceURLs) > 0 { - for _, s := range m.NonResourceURLs { - dAtA[i] = 0x32 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *Role) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -432,37 +781,46 @@ func (m *Role) Marshal() (dAtA []byte, err error) { } func (m *Role) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Role) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RoleBinding) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -470,45 +828,56 @@ func (m *RoleBinding) Marshal() (dAtA []byte, err error) { } func (m *RoleBinding) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n8, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RoleRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 + i-- + dAtA[i] = 0x1a if len(m.Subjects) > 0 { - for _, msg := range m.Subjects { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Subjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) - n9, err := m.RoleRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n9 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RoleBindingList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -516,37 +885,46 @@ func (m *RoleBindingList) Marshal() (dAtA []byte, err error) { } func (m *RoleBindingList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n10, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RoleList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -554,37 +932,46 @@ func (m *RoleList) Marshal() (dAtA []byte, err error) { } func (m *RoleList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n11, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RoleRef) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -592,29 +979,37 @@ func (m *RoleRef) Marshal() (dAtA []byte, err error) { } func (m *RoleRef) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) - i += copy(dAtA[i:], m.APIGroup) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x1a - i++ + i -= len(m.Name) + copy(dAtA[i:], m.Name) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - return i, nil + i-- + dAtA[i] = 0x1a + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x12 + i -= len(m.APIGroup) + copy(dAtA[i:], m.APIGroup) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Subject) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -622,39 +1017,53 @@ func (m *Subject) Marshal() (dAtA []byte, err error) { } func (m *Subject) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Subject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x22 - i++ + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - return i, nil + i-- + dAtA[i] = 0x22 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0x12 + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *AggregationRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.ClusterRoleSelectors) > 0 { @@ -667,6 +1076,9 @@ func (m *AggregationRule) Size() (n int) { } func (m *ClusterRole) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -685,6 +1097,9 @@ func (m *ClusterRole) Size() (n int) { } func (m *ClusterRoleBinding) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -701,6 +1116,9 @@ func (m *ClusterRoleBinding) Size() (n int) { } func (m *ClusterRoleBindingList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -715,6 +1133,9 @@ func (m *ClusterRoleBindingList) Size() (n int) { } func (m *ClusterRoleList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -729,6 +1150,9 @@ func (m *ClusterRoleList) Size() (n int) { } func (m *PolicyRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Verbs) > 0 { @@ -765,6 +1189,9 @@ func (m *PolicyRule) Size() (n int) { } func (m *Role) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -779,6 +1206,9 @@ func (m *Role) Size() (n int) { } func (m *RoleBinding) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -795,6 +1225,9 @@ func (m *RoleBinding) Size() (n int) { } func (m *RoleBindingList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -809,6 +1242,9 @@ func (m *RoleBindingList) Size() (n int) { } func (m *RoleList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -823,6 +1259,9 @@ func (m *RoleList) Size() (n int) { } func (m *RoleRef) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.APIGroup) @@ -835,6 +1274,9 @@ func (m *RoleRef) Size() (n int) { } func (m *Subject) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Kind) @@ -849,14 +1291,7 @@ func (m *Subject) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -865,8 +1300,13 @@ func (this *AggregationRule) String() string { if this == nil { return "nil" } + repeatedStringForClusterRoleSelectors := "[]LabelSelector{" + for _, f := range this.ClusterRoleSelectors { + repeatedStringForClusterRoleSelectors += fmt.Sprintf("%v", f) + "," + } + repeatedStringForClusterRoleSelectors += "}" s := strings.Join([]string{`&AggregationRule{`, - `ClusterRoleSelectors:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ClusterRoleSelectors), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `ClusterRoleSelectors:` + repeatedStringForClusterRoleSelectors + `,`, `}`, }, "") return s @@ -875,10 +1315,15 @@ func (this *ClusterRole) String() string { if this == nil { return "nil" } + repeatedStringForRules := "[]PolicyRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" s := strings.Join([]string{`&ClusterRole{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, - `AggregationRule:` + strings.Replace(fmt.Sprintf("%v", this.AggregationRule), "AggregationRule", "AggregationRule", 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `AggregationRule:` + strings.Replace(this.AggregationRule.String(), "AggregationRule", "AggregationRule", 1) + `,`, `}`, }, "") return s @@ -887,9 +1332,14 @@ func (this *ClusterRoleBinding) String() string { if this == nil { return "nil" } + repeatedStringForSubjects := "[]Subject{" + for _, f := range this.Subjects { + repeatedStringForSubjects += strings.Replace(strings.Replace(f.String(), "Subject", "Subject", 1), `&`, ``, 1) + "," + } + repeatedStringForSubjects += "}" s := strings.Join([]string{`&ClusterRoleBinding{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Subjects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subjects), "Subject", "Subject", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subjects:` + repeatedStringForSubjects + `,`, `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -899,9 +1349,14 @@ func (this *ClusterRoleBindingList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]ClusterRoleBinding{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&ClusterRoleBindingList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -910,9 +1365,14 @@ func (this *ClusterRoleList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]ClusterRole{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterRole", "ClusterRole", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&ClusterRoleList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ClusterRole", "ClusterRole", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -935,9 +1395,14 @@ func (this *Role) String() string { if this == nil { return "nil" } + repeatedStringForRules := "[]PolicyRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" s := strings.Join([]string{`&Role{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, `}`, }, "") return s @@ -946,9 +1411,14 @@ func (this *RoleBinding) String() string { if this == nil { return "nil" } + repeatedStringForSubjects := "[]Subject{" + for _, f := range this.Subjects { + repeatedStringForSubjects += strings.Replace(strings.Replace(f.String(), "Subject", "Subject", 1), `&`, ``, 1) + "," + } + repeatedStringForSubjects += "}" s := strings.Join([]string{`&RoleBinding{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Subjects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subjects), "Subject", "Subject", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subjects:` + repeatedStringForSubjects + `,`, `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -958,9 +1428,14 @@ func (this *RoleBindingList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]RoleBinding{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "RoleBinding", "RoleBinding", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&RoleBindingList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RoleBinding", "RoleBinding", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -969,9 +1444,14 @@ func (this *RoleList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]Role{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Role", "Role", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&RoleList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Role", "Role", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1024,7 +1504,7 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1052,7 +1532,7 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1061,10 +1541,13 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.ClusterRoleSelectors = append(m.ClusterRoleSelectors, k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{}) + m.ClusterRoleSelectors = append(m.ClusterRoleSelectors, v1.LabelSelector{}) if err := m.ClusterRoleSelectors[len(m.ClusterRoleSelectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -1078,6 +1561,9 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1105,7 +1591,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1133,7 +1619,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1142,6 +1628,9 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1163,7 +1652,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1172,6 +1661,9 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1194,7 +1686,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1203,6 +1695,9 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1222,6 +1717,9 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1249,7 +1747,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1277,7 +1775,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1286,6 +1784,9 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1307,7 +1808,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1316,6 +1817,9 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1338,7 +1842,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1347,6 +1851,9 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1363,6 +1870,9 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1390,7 +1900,7 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1418,7 +1928,7 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1427,6 +1937,9 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1448,7 +1961,7 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1457,6 +1970,9 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1474,6 +1990,9 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1501,7 +2020,7 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1529,7 +2048,7 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1538,6 +2057,9 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1559,7 +2081,7 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1568,6 +2090,9 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1585,6 +2110,9 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1612,7 +2140,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1640,7 +2168,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1650,6 +2178,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1669,7 +2200,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1679,6 +2210,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1698,7 +2232,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1708,6 +2242,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1727,7 +2264,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1737,6 +2274,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1756,7 +2296,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1766,6 +2306,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1780,6 +2323,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1807,7 +2353,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1835,7 +2381,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1844,6 +2390,9 @@ func (m *Role) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1865,7 +2414,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1874,6 +2423,9 @@ func (m *Role) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1891,6 +2443,9 @@ func (m *Role) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1918,7 +2473,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1946,7 +2501,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1955,6 +2510,9 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1976,7 +2534,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1985,6 +2543,9 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2007,7 +2568,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2016,6 +2577,9 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2032,6 +2596,9 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2059,7 +2626,7 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2087,7 +2654,7 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2096,6 +2663,9 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2117,7 +2687,7 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2126,6 +2696,9 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2143,6 +2716,9 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2170,7 +2746,7 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2198,7 +2774,7 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2207,6 +2783,9 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2228,7 +2807,7 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2237,6 +2816,9 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2254,6 +2836,9 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2281,7 +2866,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2309,7 +2894,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2319,6 +2904,9 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2338,7 +2926,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2348,6 +2936,9 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2367,7 +2958,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2377,6 +2968,9 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2391,6 +2985,9 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2418,7 +3015,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2446,7 +3043,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2456,6 +3053,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2475,7 +3075,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2485,6 +3085,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2504,7 +3107,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2514,6 +3117,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2533,7 +3139,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2543,6 +3149,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2557,6 +3166,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2623,10 +3235,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -2655,6 +3270,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -2673,63 +3291,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1alpha1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 830 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0xbf, 0x8f, 0xe3, 0x44, - 0x14, 0xce, 0x64, 0x13, 0x36, 0x99, 0x25, 0x0a, 0x37, 0x9c, 0x90, 0xb5, 0x42, 0xce, 0x62, 0x81, - 0x74, 0x88, 0xc3, 0x66, 0x17, 0x04, 0x34, 0x14, 0xf1, 0x15, 0x28, 0x10, 0xf6, 0x96, 0x39, 0x71, - 0x05, 0xa2, 0x60, 0xe2, 0xcc, 0x39, 0x43, 0x6c, 0x8f, 0x35, 0x63, 0x47, 0x3a, 0xd1, 0xd0, 0xd0, - 0x22, 0x1a, 0x0a, 0x7a, 0x5a, 0x1a, 0x28, 0xf9, 0x07, 0x96, 0xee, 0xca, 0xad, 0x22, 0xd6, 0xfc, - 0x21, 0x20, 0x8f, 0xed, 0xd8, 0xf9, 0x45, 0x52, 0x45, 0x42, 0xba, 0x2a, 0x99, 0xf7, 0xbe, 0xf7, - 0xbd, 0xf7, 0xbe, 0x99, 0xf7, 0x0c, 0xfb, 0xd3, 0x0f, 0xa5, 0xc9, 0xb8, 0x35, 0x8d, 0x47, 0x54, - 0x04, 0x34, 0xa2, 0xd2, 0x9a, 0xd1, 0x60, 0xcc, 0x85, 0x95, 0x3b, 0x48, 0xc8, 0x2c, 0x31, 0x22, - 0x8e, 0x35, 0x3b, 0x27, 0x5e, 0x38, 0x21, 0xe7, 0x96, 0x4b, 0x03, 0x2a, 0x48, 0x44, 0xc7, 0x66, - 0x28, 0x78, 0xc4, 0x91, 0x96, 0x21, 0x4d, 0x12, 0x32, 0x33, 0x45, 0x9a, 0x05, 0xf2, 0xf4, 0x6d, - 0x97, 0x45, 0x93, 0x78, 0x64, 0x3a, 0xdc, 0xb7, 0x5c, 0xee, 0x72, 0x4b, 0x05, 0x8c, 0xe2, 0x27, - 0xea, 0xa4, 0x0e, 0xea, 0x5f, 0x46, 0x74, 0xfa, 0x5e, 0x99, 0xd2, 0x27, 0xce, 0x84, 0x05, 0x54, - 0x3c, 0xb5, 0xc2, 0xa9, 0x9b, 0x1a, 0xa4, 0xe5, 0xd3, 0x88, 0x58, 0xb3, 0xb5, 0xf4, 0xa7, 0xd6, - 0xb6, 0x28, 0x11, 0x07, 0x11, 0xf3, 0xe9, 0x5a, 0xc0, 0xfb, 0xbb, 0x02, 0xa4, 0x33, 0xa1, 0x3e, - 0x59, 0x8d, 0x33, 0x7e, 0x06, 0xb0, 0xdb, 0x77, 0x5d, 0x41, 0x5d, 0x12, 0x31, 0x1e, 0xe0, 0xd8, - 0xa3, 0xe8, 0x7b, 0x00, 0xef, 0x3a, 0x5e, 0x2c, 0x23, 0x2a, 0x30, 0xf7, 0xe8, 0x23, 0xea, 0x51, - 0x27, 0xe2, 0x42, 0x6a, 0xe0, 0xec, 0xe8, 0xde, 0xc9, 0xc5, 0xbb, 0x66, 0xa9, 0xcd, 0x22, 0x97, - 0x19, 0x4e, 0xdd, 0xd4, 0x20, 0xcd, 0xb4, 0x25, 0x73, 0x76, 0x6e, 0x0e, 0xc9, 0x88, 0x7a, 0x45, - 0xac, 0xfd, 0xea, 0xf5, 0xbc, 0x57, 0x4b, 0xe6, 0xbd, 0xbb, 0x0f, 0x36, 0x10, 0xe3, 0x8d, 0xe9, - 0x8c, 0x5f, 0xea, 0xf0, 0xa4, 0x02, 0x47, 0x5f, 0xc3, 0x56, 0x4a, 0x3e, 0x26, 0x11, 0xd1, 0xc0, - 0x19, 0xb8, 0x77, 0x72, 0xf1, 0xce, 0x7e, 0xa5, 0x3c, 0x1c, 0x7d, 0x43, 0x9d, 0xe8, 0x33, 0x1a, - 0x11, 0x1b, 0xe5, 0x75, 0xc0, 0xd2, 0x86, 0x17, 0xac, 0x68, 0x00, 0x9b, 0x22, 0xf6, 0xa8, 0xd4, - 0xea, 0xaa, 0xd3, 0xd7, 0xcd, 0x6d, 0xaf, 0xc0, 0xbc, 0xe2, 0x1e, 0x73, 0x9e, 0xa6, 0x72, 0xd9, - 0x9d, 0x9c, 0xb2, 0x99, 0x9e, 0x24, 0xce, 0x18, 0xd0, 0x04, 0x76, 0xc9, 0xb2, 0xae, 0xda, 0x91, - 0xaa, 0xf9, 0xcd, 0xed, 0xa4, 0x2b, 0x17, 0x61, 0xbf, 0x9c, 0xcc, 0x7b, 0xab, 0xb7, 0x83, 0x57, - 0x69, 0x8d, 0x9f, 0xea, 0x10, 0x55, 0x64, 0xb2, 0x59, 0x30, 0x66, 0x81, 0x7b, 0x00, 0xb5, 0x1e, - 0xc2, 0x96, 0x8c, 0x95, 0xa3, 0x10, 0xec, 0xb5, 0xed, 0xbd, 0x3d, 0xca, 0x90, 0xf6, 0x4b, 0x39, - 0x65, 0x2b, 0x37, 0x48, 0xbc, 0x20, 0x41, 0x43, 0x78, 0x2c, 0xb8, 0x47, 0x31, 0x7d, 0x92, 0x6b, - 0xf5, 0x1f, 0x7c, 0x38, 0x03, 0xda, 0xdd, 0x9c, 0xef, 0x38, 0x37, 0xe0, 0x82, 0xc2, 0xf8, 0x13, - 0xc0, 0x57, 0xd6, 0x75, 0x19, 0x32, 0x19, 0xa1, 0xaf, 0xd6, 0xb4, 0x31, 0xf7, 0x7c, 0xd4, 0x4c, - 0x66, 0xca, 0x2c, 0xda, 0x28, 0x2c, 0x15, 0x5d, 0x3e, 0x87, 0x4d, 0x16, 0x51, 0xbf, 0x10, 0xe5, - 0xfe, 0xf6, 0x26, 0xd6, 0xcb, 0x2b, 0x5f, 0xd3, 0x20, 0xa5, 0xc0, 0x19, 0x93, 0xf1, 0x07, 0x80, - 0xdd, 0x0a, 0xf8, 0x00, 0x4d, 0x7c, 0xb2, 0xdc, 0xc4, 0x1b, 0xfb, 0x35, 0xb1, 0xb9, 0xfa, 0x7f, - 0x00, 0x84, 0xe5, 0xc0, 0xa0, 0x1e, 0x6c, 0xce, 0xa8, 0x18, 0x65, 0xfb, 0xa4, 0x6d, 0xb7, 0x53, - 0xfc, 0xe3, 0xd4, 0x80, 0x33, 0x3b, 0x7a, 0x0b, 0xb6, 0x49, 0xc8, 0x3e, 0x16, 0x3c, 0x0e, 0xa5, - 0x76, 0xa4, 0x40, 0x9d, 0x64, 0xde, 0x6b, 0xf7, 0xaf, 0x06, 0x99, 0x11, 0x97, 0xfe, 0x14, 0x2c, - 0xa8, 0xe4, 0xb1, 0x70, 0xa8, 0xd4, 0x1a, 0x25, 0x18, 0x17, 0x46, 0x5c, 0xfa, 0xd1, 0x07, 0xb0, - 0x53, 0x1c, 0x2e, 0x89, 0x4f, 0xa5, 0xd6, 0x54, 0x01, 0x77, 0x92, 0x79, 0xaf, 0x83, 0xab, 0x0e, - 0xbc, 0x8c, 0x43, 0x1f, 0xc1, 0x6e, 0xc0, 0x83, 0x02, 0xf2, 0x05, 0x1e, 0x4a, 0xed, 0x05, 0x15, - 0xaa, 0x66, 0xf4, 0x72, 0xd9, 0x85, 0x57, 0xb1, 0xc6, 0xef, 0x00, 0x36, 0xfe, 0x77, 0x3b, 0xcc, - 0xf8, 0xa1, 0x0e, 0x4f, 0x9e, 0xaf, 0x94, 0xca, 0x4a, 0x49, 0xc7, 0xf0, 0xb0, 0xbb, 0x64, 0xff, - 0x31, 0xdc, 0xbd, 0x44, 0x7e, 0x05, 0xb0, 0x75, 0xa0, 0xed, 0xf1, 0x60, 0xb9, 0x6c, 0x7d, 0x47, - 0xd9, 0x9b, 0xeb, 0xfd, 0x16, 0x16, 0x37, 0x80, 0xee, 0xc3, 0x56, 0x31, 0xf1, 0xaa, 0xda, 0x76, - 0x99, 0xbd, 0x58, 0x0a, 0x78, 0x81, 0x40, 0x67, 0xb0, 0x31, 0x65, 0xc1, 0x58, 0xab, 0x2b, 0xe4, - 0x8b, 0x39, 0xb2, 0xf1, 0x29, 0x0b, 0xc6, 0x58, 0x79, 0x52, 0x44, 0x40, 0xfc, 0xec, 0x93, 0x5c, - 0x41, 0xa4, 0xb3, 0x8e, 0x95, 0xc7, 0xf8, 0x0d, 0xc0, 0xe3, 0xfc, 0x3d, 0x2d, 0xf8, 0xc0, 0x56, - 0xbe, 0x0b, 0x08, 0x49, 0xc8, 0x1e, 0x53, 0x21, 0x19, 0x0f, 0xf2, 0xbc, 0x8b, 0x97, 0xde, 0xbf, - 0x1a, 0xe4, 0x1e, 0x5c, 0x41, 0xed, 0xae, 0x01, 0x59, 0xb0, 0x9d, 0xfe, 0xca, 0x90, 0x38, 0x54, - 0x6b, 0x28, 0xd8, 0x9d, 0x1c, 0xd6, 0xbe, 0x2c, 0x1c, 0xb8, 0xc4, 0xd8, 0xe6, 0xf5, 0xad, 0x5e, - 0x7b, 0x76, 0xab, 0xd7, 0x6e, 0x6e, 0xf5, 0xda, 0x77, 0x89, 0x0e, 0xae, 0x13, 0x1d, 0x3c, 0x4b, - 0x74, 0x70, 0x93, 0xe8, 0xe0, 0xaf, 0x44, 0x07, 0x3f, 0xfe, 0xad, 0xd7, 0xbe, 0x6c, 0x15, 0xe2, - 0xff, 0x1b, 0x00, 0x00, 0xff, 0xff, 0xb0, 0x73, 0x15, 0x10, 0x29, 0x0b, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go b/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go index b1e98390b1ce2..6c80f52f93c10 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go @@ -17,42 +17,20 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1beta1/generated.proto -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1beta1/generated.proto - - It has these top-level messages: - AggregationRule - ClusterRole - ClusterRoleBinding - ClusterRoleBindingList - ClusterRoleList - PolicyRule - Role - RoleBinding - RoleBindingList - RoleList - RoleRef - Subject -*/ package v1beta1 import ( fmt "fmt" + io "io" + proto "github.com/gogo/protobuf/proto" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" math "math" - - k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - strings "strings" - + math_bits "math/bits" reflect "reflect" - - io "io" + strings "strings" ) // Reference imports to suppress errors if they are not otherwise used. @@ -66,53 +44,341 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *AggregationRule) Reset() { *m = AggregationRule{} } -func (*AggregationRule) ProtoMessage() {} -func (*AggregationRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *AggregationRule) Reset() { *m = AggregationRule{} } +func (*AggregationRule) ProtoMessage() {} +func (*AggregationRule) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{0} +} +func (m *AggregationRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AggregationRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AggregationRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_AggregationRule.Merge(m, src) +} +func (m *AggregationRule) XXX_Size() int { + return m.Size() +} +func (m *AggregationRule) XXX_DiscardUnknown() { + xxx_messageInfo_AggregationRule.DiscardUnknown(m) +} + +var xxx_messageInfo_AggregationRule proto.InternalMessageInfo + +func (m *ClusterRole) Reset() { *m = ClusterRole{} } +func (*ClusterRole) ProtoMessage() {} +func (*ClusterRole) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{1} +} +func (m *ClusterRole) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRole) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRole) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRole.Merge(m, src) +} +func (m *ClusterRole) XXX_Size() int { + return m.Size() +} +func (m *ClusterRole) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRole.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterRole proto.InternalMessageInfo + +func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } +func (*ClusterRoleBinding) ProtoMessage() {} +func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{2} +} +func (m *ClusterRoleBinding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleBinding) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleBinding.Merge(m, src) +} +func (m *ClusterRoleBinding) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleBinding) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleBinding.DiscardUnknown(m) +} -func (m *ClusterRole) Reset() { *m = ClusterRole{} } -func (*ClusterRole) ProtoMessage() {} -func (*ClusterRole) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_ClusterRoleBinding proto.InternalMessageInfo -func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } -func (*ClusterRoleBinding) ProtoMessage() {} -func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } +func (*ClusterRoleBindingList) ProtoMessage() {} +func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{3} +} +func (m *ClusterRoleBindingList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleBindingList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleBindingList.Merge(m, src) +} +func (m *ClusterRoleBindingList) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleBindingList) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleBindingList.DiscardUnknown(m) +} -func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } -func (*ClusterRoleBindingList) ProtoMessage() {} -func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_ClusterRoleBindingList proto.InternalMessageInfo -func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } -func (*ClusterRoleList) ProtoMessage() {} -func (*ClusterRoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } +func (*ClusterRoleList) ProtoMessage() {} +func (*ClusterRoleList) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{4} +} +func (m *ClusterRoleList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleList.Merge(m, src) +} +func (m *ClusterRoleList) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleList) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleList.DiscardUnknown(m) +} -func (m *PolicyRule) Reset() { *m = PolicyRule{} } -func (*PolicyRule) ProtoMessage() {} -func (*PolicyRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_ClusterRoleList proto.InternalMessageInfo -func (m *Role) Reset() { *m = Role{} } -func (*Role) ProtoMessage() {} -func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (m *PolicyRule) Reset() { *m = PolicyRule{} } +func (*PolicyRule) ProtoMessage() {} +func (*PolicyRule) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{5} +} +func (m *PolicyRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PolicyRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_PolicyRule.Merge(m, src) +} +func (m *PolicyRule) XXX_Size() int { + return m.Size() +} +func (m *PolicyRule) XXX_DiscardUnknown() { + xxx_messageInfo_PolicyRule.DiscardUnknown(m) +} -func (m *RoleBinding) Reset() { *m = RoleBinding{} } -func (*RoleBinding) ProtoMessage() {} -func (*RoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_PolicyRule proto.InternalMessageInfo -func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } -func (*RoleBindingList) ProtoMessage() {} -func (*RoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (m *Role) Reset() { *m = Role{} } +func (*Role) ProtoMessage() {} +func (*Role) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{6} +} +func (m *Role) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Role) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Role) XXX_Merge(src proto.Message) { + xxx_messageInfo_Role.Merge(m, src) +} +func (m *Role) XXX_Size() int { + return m.Size() +} +func (m *Role) XXX_DiscardUnknown() { + xxx_messageInfo_Role.DiscardUnknown(m) +} -func (m *RoleList) Reset() { *m = RoleList{} } -func (*RoleList) ProtoMessage() {} -func (*RoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +var xxx_messageInfo_Role proto.InternalMessageInfo -func (m *RoleRef) Reset() { *m = RoleRef{} } -func (*RoleRef) ProtoMessage() {} -func (*RoleRef) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (m *RoleBinding) Reset() { *m = RoleBinding{} } +func (*RoleBinding) ProtoMessage() {} +func (*RoleBinding) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{7} +} +func (m *RoleBinding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleBinding) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleBinding.Merge(m, src) +} +func (m *RoleBinding) XXX_Size() int { + return m.Size() +} +func (m *RoleBinding) XXX_DiscardUnknown() { + xxx_messageInfo_RoleBinding.DiscardUnknown(m) +} -func (m *Subject) Reset() { *m = Subject{} } -func (*Subject) ProtoMessage() {} -func (*Subject) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +var xxx_messageInfo_RoleBinding proto.InternalMessageInfo + +func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } +func (*RoleBindingList) ProtoMessage() {} +func (*RoleBindingList) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{8} +} +func (m *RoleBindingList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleBindingList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleBindingList.Merge(m, src) +} +func (m *RoleBindingList) XXX_Size() int { + return m.Size() +} +func (m *RoleBindingList) XXX_DiscardUnknown() { + xxx_messageInfo_RoleBindingList.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleBindingList proto.InternalMessageInfo + +func (m *RoleList) Reset() { *m = RoleList{} } +func (*RoleList) ProtoMessage() {} +func (*RoleList) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{9} +} +func (m *RoleList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleList.Merge(m, src) +} +func (m *RoleList) XXX_Size() int { + return m.Size() +} +func (m *RoleList) XXX_DiscardUnknown() { + xxx_messageInfo_RoleList.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleList proto.InternalMessageInfo + +func (m *RoleRef) Reset() { *m = RoleRef{} } +func (*RoleRef) ProtoMessage() {} +func (*RoleRef) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{10} +} +func (m *RoleRef) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleRef) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleRef.Merge(m, src) +} +func (m *RoleRef) XXX_Size() int { + return m.Size() +} +func (m *RoleRef) XXX_DiscardUnknown() { + xxx_messageInfo_RoleRef.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleRef proto.InternalMessageInfo + +func (m *Subject) Reset() { *m = Subject{} } +func (*Subject) ProtoMessage() {} +func (*Subject) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{11} +} +func (m *Subject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Subject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Subject) XXX_Merge(src proto.Message) { + xxx_messageInfo_Subject.Merge(m, src) +} +func (m *Subject) XXX_Size() int { + return m.Size() +} +func (m *Subject) XXX_DiscardUnknown() { + xxx_messageInfo_Subject.DiscardUnknown(m) +} + +var xxx_messageInfo_Subject proto.InternalMessageInfo func init() { proto.RegisterType((*AggregationRule)(nil), "k8s.io.api.rbac.v1beta1.AggregationRule") @@ -128,10 +394,70 @@ func init() { proto.RegisterType((*RoleRef)(nil), "k8s.io.api.rbac.v1beta1.RoleRef") proto.RegisterType((*Subject)(nil), "k8s.io.api.rbac.v1beta1.Subject") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1beta1/generated.proto", fileDescriptor_99f6bec96facc83d) +} + +var fileDescriptor_99f6bec96facc83d = []byte{ + // 808 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0xbd, 0x6f, 0xfb, 0x44, + 0x18, 0xce, 0xa5, 0x89, 0x12, 0x5f, 0x88, 0xc2, 0xef, 0xa8, 0xc0, 0xaa, 0xc0, 0x89, 0x02, 0x43, + 0xa5, 0x52, 0x9b, 0x16, 0x04, 0x2c, 0x48, 0xd4, 0x0c, 0x50, 0xb5, 0x84, 0xea, 0x2a, 0x18, 0x10, + 0x03, 0x67, 0xe7, 0xea, 0x1e, 0xf1, 0x97, 0xee, 0xec, 0x48, 0x15, 0x0b, 0x0b, 0x1b, 0x03, 0x12, + 0x13, 0x2b, 0x33, 0x13, 0x23, 0x7f, 0x41, 0xc6, 0x8e, 0x9d, 0x22, 0x6a, 0xfe, 0x10, 0xd0, 0xf9, + 0x23, 0xce, 0x67, 0x9b, 0x29, 0x12, 0x12, 0x53, 0x7b, 0xef, 0xfb, 0xbc, 0xcf, 0xfb, 0xbc, 0x8f, + 0xef, 0xde, 0xc0, 0x8f, 0x47, 0x1f, 0x0a, 0x9d, 0x05, 0xc6, 0x28, 0xb6, 0x28, 0xf7, 0x69, 0x44, + 0x85, 0x31, 0xa6, 0xfe, 0x30, 0xe0, 0x46, 0x9e, 0x20, 0x21, 0x33, 0xb8, 0x45, 0x6c, 0x63, 0x7c, + 0x62, 0xd1, 0x88, 0x9c, 0x18, 0x0e, 0xf5, 0x29, 0x27, 0x11, 0x1d, 0xea, 0x21, 0x0f, 0xa2, 0x00, + 0xbd, 0x96, 0x01, 0x75, 0x12, 0x32, 0x5d, 0x02, 0xf5, 0x1c, 0x78, 0x70, 0xec, 0xb0, 0xe8, 0x36, + 0xb6, 0x74, 0x3b, 0xf0, 0x0c, 0x27, 0x70, 0x02, 0x23, 0xc5, 0x5b, 0xf1, 0x4d, 0x7a, 0x4a, 0x0f, + 0xe9, 0x7f, 0x19, 0xcf, 0xc1, 0x7b, 0x65, 0x43, 0x8f, 0xd8, 0xb7, 0xcc, 0xa7, 0xfc, 0xce, 0x08, + 0x47, 0x8e, 0x0c, 0x08, 0xc3, 0xa3, 0x11, 0x31, 0xc6, 0x2b, 0xdd, 0x0f, 0x8c, 0x4d, 0x55, 0x3c, + 0xf6, 0x23, 0xe6, 0xd1, 0x95, 0x82, 0xf7, 0x9f, 0x2b, 0x10, 0xf6, 0x2d, 0xf5, 0xc8, 0x72, 0x5d, + 0xff, 0x57, 0x00, 0x3b, 0x67, 0x8e, 0xc3, 0xa9, 0x43, 0x22, 0x16, 0xf8, 0x38, 0x76, 0x29, 0xfa, + 0x11, 0xc0, 0x7d, 0xdb, 0x8d, 0x45, 0x44, 0x39, 0x0e, 0x5c, 0x7a, 0x4d, 0x5d, 0x6a, 0x47, 0x01, + 0x17, 0x2a, 0xe8, 0xed, 0x1d, 0xb6, 0x4e, 0xdf, 0xd5, 0x4b, 0x6b, 0x66, 0xbd, 0xf4, 0x70, 0xe4, + 0xc8, 0x80, 0xd0, 0xe5, 0x48, 0xfa, 0xf8, 0x44, 0xbf, 0x24, 0x16, 0x75, 0x8b, 0x5a, 0xf3, 0xf5, + 0xc9, 0xb4, 0x5b, 0x49, 0xa6, 0xdd, 0xfd, 0x4f, 0xd6, 0x10, 0xe3, 0xb5, 0xed, 0xfa, 0xbf, 0x55, + 0x61, 0x6b, 0x0e, 0x8e, 0xbe, 0x85, 0x4d, 0x49, 0x3e, 0x24, 0x11, 0x51, 0x41, 0x0f, 0x1c, 0xb6, + 0x4e, 0xdf, 0xd9, 0x4e, 0xca, 0x17, 0xd6, 0x77, 0xd4, 0x8e, 0x3e, 0xa7, 0x11, 0x31, 0x51, 0xae, + 0x03, 0x96, 0x31, 0x3c, 0x63, 0x45, 0x9f, 0xc1, 0x3a, 0x8f, 0x5d, 0x2a, 0xd4, 0x6a, 0x3a, 0xe9, + 0x9b, 0xfa, 0x86, 0x4b, 0xa0, 0x5f, 0x05, 0x2e, 0xb3, 0xef, 0xa4, 0x5b, 0x66, 0x3b, 0x67, 0xac, + 0xcb, 0x93, 0xc0, 0x19, 0x01, 0x72, 0x60, 0x87, 0x2c, 0xda, 0xaa, 0xee, 0xa5, 0x92, 0x0f, 0x37, + 0x72, 0x2e, 0x7d, 0x06, 0xf3, 0x95, 0x64, 0xda, 0x5d, 0xfe, 0x36, 0x78, 0x99, 0xb5, 0xff, 0x4b, + 0x15, 0xa2, 0x39, 0x93, 0x4c, 0xe6, 0x0f, 0x99, 0xef, 0xec, 0xc0, 0xab, 0x01, 0x6c, 0x8a, 0x38, + 0x4d, 0x14, 0x76, 0xf5, 0x36, 0x8e, 0x76, 0x9d, 0x01, 0xcd, 0x97, 0x73, 0xc6, 0x66, 0x1e, 0x10, + 0x78, 0xc6, 0x81, 0x2e, 0x60, 0x83, 0x07, 0x2e, 0xc5, 0xf4, 0x26, 0x77, 0x6a, 0x33, 0x1d, 0xce, + 0x70, 0x66, 0x27, 0xa7, 0x6b, 0xe4, 0x01, 0x5c, 0x30, 0xf4, 0x27, 0x00, 0xbe, 0xba, 0xea, 0xca, + 0x25, 0x13, 0x11, 0xfa, 0x66, 0xc5, 0x19, 0x7d, 0xcb, 0x0b, 0xcd, 0x44, 0xe6, 0xcb, 0x6c, 0x8a, + 0x22, 0x32, 0xe7, 0xca, 0x15, 0xac, 0xb3, 0x88, 0x7a, 0x85, 0x25, 0x47, 0x1b, 0x67, 0x58, 0x55, + 0x57, 0xde, 0xa4, 0x73, 0xc9, 0x80, 0x33, 0xa2, 0xfe, 0x9f, 0x00, 0x76, 0xe6, 0xc0, 0x3b, 0x98, + 0xe1, 0x7c, 0x71, 0x86, 0xb7, 0xb6, 0x9a, 0x61, 0xbd, 0xf8, 0x7f, 0x00, 0x84, 0xe5, 0x5b, 0x41, + 0x5d, 0x58, 0x1f, 0x53, 0x6e, 0x65, 0x9b, 0x44, 0x31, 0x15, 0x89, 0xff, 0x4a, 0x06, 0x70, 0x16, + 0x47, 0x47, 0x50, 0x21, 0x21, 0xfb, 0x94, 0x07, 0x71, 0x98, 0xb5, 0x57, 0xcc, 0x76, 0x32, 0xed, + 0x2a, 0x67, 0x57, 0xe7, 0x59, 0x10, 0x97, 0x79, 0x09, 0xe6, 0x54, 0x04, 0x31, 0xb7, 0xa9, 0x50, + 0xf7, 0x4a, 0x30, 0x2e, 0x82, 0xb8, 0xcc, 0xa3, 0x0f, 0x60, 0xbb, 0x38, 0x0c, 0x88, 0x47, 0x85, + 0x5a, 0x4b, 0x0b, 0x5e, 0x24, 0xd3, 0x6e, 0x1b, 0xcf, 0x27, 0xf0, 0x22, 0x0e, 0x7d, 0x04, 0x3b, + 0x7e, 0xe0, 0x17, 0x90, 0x2f, 0xf1, 0xa5, 0x50, 0xeb, 0x69, 0x69, 0xfa, 0x3e, 0x07, 0x8b, 0x29, + 0xbc, 0x8c, 0xed, 0xff, 0x01, 0x60, 0xed, 0xbf, 0xb6, 0xbd, 0xfa, 0x3f, 0x55, 0x61, 0xeb, 0xff, + 0x6d, 0x32, 0xdb, 0x26, 0xf2, 0x09, 0xee, 0x76, 0x8d, 0x6c, 0xfd, 0x04, 0x9f, 0xdf, 0x1f, 0xbf, + 0x03, 0xd8, 0xdc, 0xd1, 0xe2, 0x30, 0x17, 0x55, 0xbf, 0xf1, 0xb4, 0xea, 0xf5, 0x72, 0xbf, 0x87, + 0x85, 0xff, 0xe8, 0x6d, 0xd8, 0x2c, 0x1e, 0x7b, 0x2a, 0x56, 0x29, 0x9b, 0x17, 0xfb, 0x00, 0xcf, + 0x10, 0xa8, 0x07, 0x6b, 0x23, 0xe6, 0x0f, 0xd5, 0x6a, 0x8a, 0x7c, 0x29, 0x47, 0xd6, 0x2e, 0x98, + 0x3f, 0xc4, 0x69, 0x46, 0x22, 0x7c, 0xe2, 0x65, 0x3f, 0xc4, 0x73, 0x08, 0xf9, 0xcc, 0x71, 0x9a, + 0x91, 0x5e, 0x35, 0xf2, 0xcb, 0x34, 0xe3, 0x03, 0x1b, 0xf9, 0xe6, 0xf5, 0x55, 0xb7, 0xd1, 0xf7, + 0x74, 0x77, 0x64, 0x40, 0x45, 0xfe, 0x15, 0x21, 0xb1, 0xa9, 0x5a, 0x4b, 0x61, 0x2f, 0x72, 0x98, + 0x32, 0x28, 0x12, 0xb8, 0xc4, 0x98, 0xc7, 0x93, 0x47, 0xad, 0x72, 0xff, 0xa8, 0x55, 0x1e, 0x1e, + 0xb5, 0xca, 0x0f, 0x89, 0x06, 0x26, 0x89, 0x06, 0xee, 0x13, 0x0d, 0x3c, 0x24, 0x1a, 0xf8, 0x2b, + 0xd1, 0xc0, 0xcf, 0x7f, 0x6b, 0x95, 0xaf, 0x1b, 0xb9, 0xeb, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, + 0x37, 0x8f, 0x77, 0xcd, 0x15, 0x0b, 0x00, 0x00, +} + func (m *AggregationRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -139,29 +465,36 @@ func (m *AggregationRule) Marshal() (dAtA []byte, err error) { } func (m *AggregationRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AggregationRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m.ClusterRoleSelectors) > 0 { - for _, msg := range m.ClusterRoleSelectors { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.ClusterRoleSelectors) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ClusterRoleSelectors[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *ClusterRole) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -169,47 +502,58 @@ func (m *ClusterRole) Marshal() (dAtA []byte, err error) { } func (m *ClusterRole) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRole) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.AggregationRule != nil { + { + size, err := m.AggregationRule.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - } - if m.AggregationRule != nil { + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AggregationRule.Size())) - n2, err := m.AggregationRule.MarshalTo(dAtA[i:]) + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n2 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ClusterRoleBinding) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -217,45 +561,56 @@ func (m *ClusterRoleBinding) Marshal() (dAtA []byte, err error) { } func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RoleRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 + i-- + dAtA[i] = 0x1a if len(m.Subjects) > 0 { - for _, msg := range m.Subjects { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Subjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) - n4, err := m.RoleRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ClusterRoleBindingList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -263,37 +618,46 @@ func (m *ClusterRoleBindingList) Marshal() (dAtA []byte, err error) { } func (m *ClusterRoleBindingList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n5, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ClusterRoleList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -301,37 +665,46 @@ func (m *ClusterRoleList) Marshal() (dAtA []byte, err error) { } func (m *ClusterRoleList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PolicyRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -339,92 +712,67 @@ func (m *PolicyRule) Marshal() (dAtA []byte, err error) { } func (m *PolicyRule) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Verbs) > 0 { - for _, s := range m.Verbs { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.NonResourceURLs) > 0 { + for iNdEx := len(m.NonResourceURLs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.NonResourceURLs[iNdEx]) + copy(dAtA[i:], m.NonResourceURLs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NonResourceURLs[iNdEx]))) + i-- + dAtA[i] = 0x2a } } - if len(m.APIGroups) > 0 { - for _, s := range m.APIGroups { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.ResourceNames) > 0 { + for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResourceNames[iNdEx]) + copy(dAtA[i:], m.ResourceNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx]))) + i-- + dAtA[i] = 0x22 } } if len(m.Resources) > 0 { - for _, s := range m.Resources { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Resources[iNdEx]) + copy(dAtA[i:], m.Resources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) + i-- dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - if len(m.ResourceNames) > 0 { - for _, s := range m.ResourceNames { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.APIGroups) > 0 { + for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIGroups[iNdEx]) + copy(dAtA[i:], m.APIGroups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) + i-- + dAtA[i] = 0x12 } } - if len(m.NonResourceURLs) > 0 { - for _, s := range m.NonResourceURLs { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *Role) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -432,37 +780,46 @@ func (m *Role) Marshal() (dAtA []byte, err error) { } func (m *Role) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Role) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RoleBinding) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -470,45 +827,56 @@ func (m *RoleBinding) Marshal() (dAtA []byte, err error) { } func (m *RoleBinding) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n8, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.RoleRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 + i-- + dAtA[i] = 0x1a if len(m.Subjects) > 0 { - for _, msg := range m.Subjects { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Subjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) - n9, err := m.RoleRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RoleBindingList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -516,37 +884,46 @@ func (m *RoleBindingList) Marshal() (dAtA []byte, err error) { } func (m *RoleBindingList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n10, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RoleList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -554,37 +931,46 @@ func (m *RoleList) Marshal() (dAtA []byte, err error) { } func (m *RoleList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n11, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RoleRef) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -592,29 +978,37 @@ func (m *RoleRef) Marshal() (dAtA []byte, err error) { } func (m *RoleRef) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) - i += copy(dAtA[i:], m.APIGroup) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x1a - i++ + i -= len(m.Name) + copy(dAtA[i:], m.Name) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - return i, nil + i-- + dAtA[i] = 0x1a + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x12 + i -= len(m.APIGroup) + copy(dAtA[i:], m.APIGroup) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Subject) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -622,39 +1016,53 @@ func (m *Subject) Marshal() (dAtA []byte, err error) { } func (m *Subject) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Subject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) - i += copy(dAtA[i:], m.APIGroup) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x22 - i++ + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - return i, nil + i-- + dAtA[i] = 0x22 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + i -= len(m.APIGroup) + copy(dAtA[i:], m.APIGroup) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) + i-- + dAtA[i] = 0x12 + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *AggregationRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.ClusterRoleSelectors) > 0 { @@ -667,6 +1075,9 @@ func (m *AggregationRule) Size() (n int) { } func (m *ClusterRole) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -685,6 +1096,9 @@ func (m *ClusterRole) Size() (n int) { } func (m *ClusterRoleBinding) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -701,6 +1115,9 @@ func (m *ClusterRoleBinding) Size() (n int) { } func (m *ClusterRoleBindingList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -715,6 +1132,9 @@ func (m *ClusterRoleBindingList) Size() (n int) { } func (m *ClusterRoleList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -729,6 +1149,9 @@ func (m *ClusterRoleList) Size() (n int) { } func (m *PolicyRule) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Verbs) > 0 { @@ -765,6 +1188,9 @@ func (m *PolicyRule) Size() (n int) { } func (m *Role) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -779,6 +1205,9 @@ func (m *Role) Size() (n int) { } func (m *RoleBinding) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -795,6 +1224,9 @@ func (m *RoleBinding) Size() (n int) { } func (m *RoleBindingList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -809,6 +1241,9 @@ func (m *RoleBindingList) Size() (n int) { } func (m *RoleList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -823,6 +1258,9 @@ func (m *RoleList) Size() (n int) { } func (m *RoleRef) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.APIGroup) @@ -835,6 +1273,9 @@ func (m *RoleRef) Size() (n int) { } func (m *Subject) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Kind) @@ -849,14 +1290,7 @@ func (m *Subject) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -865,8 +1299,13 @@ func (this *AggregationRule) String() string { if this == nil { return "nil" } + repeatedStringForClusterRoleSelectors := "[]LabelSelector{" + for _, f := range this.ClusterRoleSelectors { + repeatedStringForClusterRoleSelectors += fmt.Sprintf("%v", f) + "," + } + repeatedStringForClusterRoleSelectors += "}" s := strings.Join([]string{`&AggregationRule{`, - `ClusterRoleSelectors:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ClusterRoleSelectors), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `ClusterRoleSelectors:` + repeatedStringForClusterRoleSelectors + `,`, `}`, }, "") return s @@ -875,10 +1314,15 @@ func (this *ClusterRole) String() string { if this == nil { return "nil" } + repeatedStringForRules := "[]PolicyRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" s := strings.Join([]string{`&ClusterRole{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, - `AggregationRule:` + strings.Replace(fmt.Sprintf("%v", this.AggregationRule), "AggregationRule", "AggregationRule", 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `AggregationRule:` + strings.Replace(this.AggregationRule.String(), "AggregationRule", "AggregationRule", 1) + `,`, `}`, }, "") return s @@ -887,9 +1331,14 @@ func (this *ClusterRoleBinding) String() string { if this == nil { return "nil" } + repeatedStringForSubjects := "[]Subject{" + for _, f := range this.Subjects { + repeatedStringForSubjects += strings.Replace(strings.Replace(f.String(), "Subject", "Subject", 1), `&`, ``, 1) + "," + } + repeatedStringForSubjects += "}" s := strings.Join([]string{`&ClusterRoleBinding{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Subjects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subjects), "Subject", "Subject", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subjects:` + repeatedStringForSubjects + `,`, `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -899,9 +1348,14 @@ func (this *ClusterRoleBindingList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]ClusterRoleBinding{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&ClusterRoleBindingList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -910,9 +1364,14 @@ func (this *ClusterRoleList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]ClusterRole{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterRole", "ClusterRole", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&ClusterRoleList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ClusterRole", "ClusterRole", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -935,9 +1394,14 @@ func (this *Role) String() string { if this == nil { return "nil" } + repeatedStringForRules := "[]PolicyRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" s := strings.Join([]string{`&Role{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, `}`, }, "") return s @@ -946,9 +1410,14 @@ func (this *RoleBinding) String() string { if this == nil { return "nil" } + repeatedStringForSubjects := "[]Subject{" + for _, f := range this.Subjects { + repeatedStringForSubjects += strings.Replace(strings.Replace(f.String(), "Subject", "Subject", 1), `&`, ``, 1) + "," + } + repeatedStringForSubjects += "}" s := strings.Join([]string{`&RoleBinding{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Subjects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subjects), "Subject", "Subject", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subjects:` + repeatedStringForSubjects + `,`, `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -958,9 +1427,14 @@ func (this *RoleBindingList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]RoleBinding{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "RoleBinding", "RoleBinding", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&RoleBindingList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RoleBinding", "RoleBinding", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -969,9 +1443,14 @@ func (this *RoleList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]Role{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Role", "Role", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&RoleList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Role", "Role", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1024,7 +1503,7 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1052,7 +1531,7 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1061,10 +1540,13 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.ClusterRoleSelectors = append(m.ClusterRoleSelectors, k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{}) + m.ClusterRoleSelectors = append(m.ClusterRoleSelectors, v1.LabelSelector{}) if err := m.ClusterRoleSelectors[len(m.ClusterRoleSelectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -1078,6 +1560,9 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1105,7 +1590,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1133,7 +1618,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1142,6 +1627,9 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1163,7 +1651,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1172,6 +1660,9 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1194,7 +1685,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1203,6 +1694,9 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1222,6 +1716,9 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1249,7 +1746,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1277,7 +1774,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1286,6 +1783,9 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1307,7 +1807,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1316,6 +1816,9 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1338,7 +1841,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1347,6 +1850,9 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1363,6 +1869,9 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1390,7 +1899,7 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1418,7 +1927,7 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1427,6 +1936,9 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1448,7 +1960,7 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1457,6 +1969,9 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1474,6 +1989,9 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1501,7 +2019,7 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1529,7 +2047,7 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1538,6 +2056,9 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1559,7 +2080,7 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1568,6 +2089,9 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1585,6 +2109,9 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1612,7 +2139,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1640,7 +2167,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1650,6 +2177,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1669,7 +2199,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1679,6 +2209,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1698,7 +2231,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1708,6 +2241,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1727,7 +2263,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1737,6 +2273,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1756,7 +2295,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1766,6 +2305,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1780,6 +2322,9 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1807,7 +2352,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1835,7 +2380,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1844,6 +2389,9 @@ func (m *Role) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1865,7 +2413,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1874,6 +2422,9 @@ func (m *Role) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1891,6 +2442,9 @@ func (m *Role) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1918,7 +2472,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1946,7 +2500,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1955,6 +2509,9 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1976,7 +2533,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1985,6 +2542,9 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2007,7 +2567,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2016,6 +2576,9 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2032,6 +2595,9 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2059,7 +2625,7 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2087,7 +2653,7 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2096,6 +2662,9 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2117,7 +2686,7 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2126,6 +2695,9 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2143,6 +2715,9 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2170,7 +2745,7 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2198,7 +2773,7 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2207,6 +2782,9 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2228,7 +2806,7 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2237,6 +2815,9 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2254,6 +2835,9 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2281,7 +2865,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2309,7 +2893,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2319,6 +2903,9 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2338,7 +2925,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2348,6 +2935,9 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2367,7 +2957,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2377,6 +2967,9 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2391,6 +2984,9 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2418,7 +3014,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2446,7 +3042,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2456,6 +3052,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2475,7 +3074,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2485,6 +3084,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2504,7 +3106,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2514,6 +3116,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2533,7 +3138,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2543,6 +3148,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2557,6 +3165,9 @@ func (m *Subject) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2623,10 +3234,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -2655,6 +3269,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -2673,62 +3290,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 808 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0xbd, 0x6f, 0xfb, 0x44, - 0x18, 0xce, 0xa5, 0x89, 0x12, 0x5f, 0x88, 0xc2, 0xef, 0xa8, 0xc0, 0xaa, 0xc0, 0x89, 0x02, 0x43, - 0xa5, 0x52, 0x9b, 0x16, 0x04, 0x2c, 0x48, 0xd4, 0x0c, 0x50, 0xb5, 0x84, 0xea, 0x2a, 0x18, 0x10, - 0x03, 0x67, 0xe7, 0xea, 0x1e, 0xf1, 0x97, 0xee, 0xec, 0x48, 0x15, 0x0b, 0x0b, 0x1b, 0x03, 0x12, - 0x13, 0x2b, 0x33, 0x13, 0x23, 0x7f, 0x41, 0xc6, 0x8e, 0x9d, 0x22, 0x6a, 0xfe, 0x10, 0xd0, 0xf9, - 0x23, 0xce, 0x67, 0x9b, 0x29, 0x12, 0x12, 0x53, 0x7b, 0xef, 0xfb, 0xbc, 0xcf, 0xfb, 0xbc, 0x8f, - 0xef, 0xde, 0xc0, 0x8f, 0x47, 0x1f, 0x0a, 0x9d, 0x05, 0xc6, 0x28, 0xb6, 0x28, 0xf7, 0x69, 0x44, - 0x85, 0x31, 0xa6, 0xfe, 0x30, 0xe0, 0x46, 0x9e, 0x20, 0x21, 0x33, 0xb8, 0x45, 0x6c, 0x63, 0x7c, - 0x62, 0xd1, 0x88, 0x9c, 0x18, 0x0e, 0xf5, 0x29, 0x27, 0x11, 0x1d, 0xea, 0x21, 0x0f, 0xa2, 0x00, - 0xbd, 0x96, 0x01, 0x75, 0x12, 0x32, 0x5d, 0x02, 0xf5, 0x1c, 0x78, 0x70, 0xec, 0xb0, 0xe8, 0x36, - 0xb6, 0x74, 0x3b, 0xf0, 0x0c, 0x27, 0x70, 0x02, 0x23, 0xc5, 0x5b, 0xf1, 0x4d, 0x7a, 0x4a, 0x0f, - 0xe9, 0x7f, 0x19, 0xcf, 0xc1, 0x7b, 0x65, 0x43, 0x8f, 0xd8, 0xb7, 0xcc, 0xa7, 0xfc, 0xce, 0x08, - 0x47, 0x8e, 0x0c, 0x08, 0xc3, 0xa3, 0x11, 0x31, 0xc6, 0x2b, 0xdd, 0x0f, 0x8c, 0x4d, 0x55, 0x3c, - 0xf6, 0x23, 0xe6, 0xd1, 0x95, 0x82, 0xf7, 0x9f, 0x2b, 0x10, 0xf6, 0x2d, 0xf5, 0xc8, 0x72, 0x5d, - 0xff, 0x57, 0x00, 0x3b, 0x67, 0x8e, 0xc3, 0xa9, 0x43, 0x22, 0x16, 0xf8, 0x38, 0x76, 0x29, 0xfa, - 0x11, 0xc0, 0x7d, 0xdb, 0x8d, 0x45, 0x44, 0x39, 0x0e, 0x5c, 0x7a, 0x4d, 0x5d, 0x6a, 0x47, 0x01, - 0x17, 0x2a, 0xe8, 0xed, 0x1d, 0xb6, 0x4e, 0xdf, 0xd5, 0x4b, 0x6b, 0x66, 0xbd, 0xf4, 0x70, 0xe4, - 0xc8, 0x80, 0xd0, 0xe5, 0x48, 0xfa, 0xf8, 0x44, 0xbf, 0x24, 0x16, 0x75, 0x8b, 0x5a, 0xf3, 0xf5, - 0xc9, 0xb4, 0x5b, 0x49, 0xa6, 0xdd, 0xfd, 0x4f, 0xd6, 0x10, 0xe3, 0xb5, 0xed, 0xfa, 0xbf, 0x55, - 0x61, 0x6b, 0x0e, 0x8e, 0xbe, 0x85, 0x4d, 0x49, 0x3e, 0x24, 0x11, 0x51, 0x41, 0x0f, 0x1c, 0xb6, - 0x4e, 0xdf, 0xd9, 0x4e, 0xca, 0x17, 0xd6, 0x77, 0xd4, 0x8e, 0x3e, 0xa7, 0x11, 0x31, 0x51, 0xae, - 0x03, 0x96, 0x31, 0x3c, 0x63, 0x45, 0x9f, 0xc1, 0x3a, 0x8f, 0x5d, 0x2a, 0xd4, 0x6a, 0x3a, 0xe9, - 0x9b, 0xfa, 0x86, 0x4b, 0xa0, 0x5f, 0x05, 0x2e, 0xb3, 0xef, 0xa4, 0x5b, 0x66, 0x3b, 0x67, 0xac, - 0xcb, 0x93, 0xc0, 0x19, 0x01, 0x72, 0x60, 0x87, 0x2c, 0xda, 0xaa, 0xee, 0xa5, 0x92, 0x0f, 0x37, - 0x72, 0x2e, 0x7d, 0x06, 0xf3, 0x95, 0x64, 0xda, 0x5d, 0xfe, 0x36, 0x78, 0x99, 0xb5, 0xff, 0x4b, - 0x15, 0xa2, 0x39, 0x93, 0x4c, 0xe6, 0x0f, 0x99, 0xef, 0xec, 0xc0, 0xab, 0x01, 0x6c, 0x8a, 0x38, - 0x4d, 0x14, 0x76, 0xf5, 0x36, 0x8e, 0x76, 0x9d, 0x01, 0xcd, 0x97, 0x73, 0xc6, 0x66, 0x1e, 0x10, - 0x78, 0xc6, 0x81, 0x2e, 0x60, 0x83, 0x07, 0x2e, 0xc5, 0xf4, 0x26, 0x77, 0x6a, 0x33, 0x1d, 0xce, - 0x70, 0x66, 0x27, 0xa7, 0x6b, 0xe4, 0x01, 0x5c, 0x30, 0xf4, 0x27, 0x00, 0xbe, 0xba, 0xea, 0xca, - 0x25, 0x13, 0x11, 0xfa, 0x66, 0xc5, 0x19, 0x7d, 0xcb, 0x0b, 0xcd, 0x44, 0xe6, 0xcb, 0x6c, 0x8a, - 0x22, 0x32, 0xe7, 0xca, 0x15, 0xac, 0xb3, 0x88, 0x7a, 0x85, 0x25, 0x47, 0x1b, 0x67, 0x58, 0x55, - 0x57, 0xde, 0xa4, 0x73, 0xc9, 0x80, 0x33, 0xa2, 0xfe, 0x9f, 0x00, 0x76, 0xe6, 0xc0, 0x3b, 0x98, - 0xe1, 0x7c, 0x71, 0x86, 0xb7, 0xb6, 0x9a, 0x61, 0xbd, 0xf8, 0x7f, 0x00, 0x84, 0xe5, 0x5b, 0x41, - 0x5d, 0x58, 0x1f, 0x53, 0x6e, 0x65, 0x9b, 0x44, 0x31, 0x15, 0x89, 0xff, 0x4a, 0x06, 0x70, 0x16, - 0x47, 0x47, 0x50, 0x21, 0x21, 0xfb, 0x94, 0x07, 0x71, 0x98, 0xb5, 0x57, 0xcc, 0x76, 0x32, 0xed, - 0x2a, 0x67, 0x57, 0xe7, 0x59, 0x10, 0x97, 0x79, 0x09, 0xe6, 0x54, 0x04, 0x31, 0xb7, 0xa9, 0x50, - 0xf7, 0x4a, 0x30, 0x2e, 0x82, 0xb8, 0xcc, 0xa3, 0x0f, 0x60, 0xbb, 0x38, 0x0c, 0x88, 0x47, 0x85, - 0x5a, 0x4b, 0x0b, 0x5e, 0x24, 0xd3, 0x6e, 0x1b, 0xcf, 0x27, 0xf0, 0x22, 0x0e, 0x7d, 0x04, 0x3b, - 0x7e, 0xe0, 0x17, 0x90, 0x2f, 0xf1, 0xa5, 0x50, 0xeb, 0x69, 0x69, 0xfa, 0x3e, 0x07, 0x8b, 0x29, - 0xbc, 0x8c, 0xed, 0xff, 0x01, 0x60, 0xed, 0xbf, 0xb6, 0xbd, 0xfa, 0x3f, 0x55, 0x61, 0xeb, 0xff, - 0x6d, 0x32, 0xdb, 0x26, 0xf2, 0x09, 0xee, 0x76, 0x8d, 0x6c, 0xfd, 0x04, 0x9f, 0xdf, 0x1f, 0xbf, - 0x03, 0xd8, 0xdc, 0xd1, 0xe2, 0x30, 0x17, 0x55, 0xbf, 0xf1, 0xb4, 0xea, 0xf5, 0x72, 0xbf, 0x87, - 0x85, 0xff, 0xe8, 0x6d, 0xd8, 0x2c, 0x1e, 0x7b, 0x2a, 0x56, 0x29, 0x9b, 0x17, 0xfb, 0x00, 0xcf, - 0x10, 0xa8, 0x07, 0x6b, 0x23, 0xe6, 0x0f, 0xd5, 0x6a, 0x8a, 0x7c, 0x29, 0x47, 0xd6, 0x2e, 0x98, - 0x3f, 0xc4, 0x69, 0x46, 0x22, 0x7c, 0xe2, 0x65, 0x3f, 0xc4, 0x73, 0x08, 0xf9, 0xcc, 0x71, 0x9a, - 0x91, 0x5e, 0x35, 0xf2, 0xcb, 0x34, 0xe3, 0x03, 0x1b, 0xf9, 0xe6, 0xf5, 0x55, 0xb7, 0xd1, 0xf7, - 0x74, 0x77, 0x64, 0x40, 0x45, 0xfe, 0x15, 0x21, 0xb1, 0xa9, 0x5a, 0x4b, 0x61, 0x2f, 0x72, 0x98, - 0x32, 0x28, 0x12, 0xb8, 0xc4, 0x98, 0xc7, 0x93, 0x47, 0xad, 0x72, 0xff, 0xa8, 0x55, 0x1e, 0x1e, - 0xb5, 0xca, 0x0f, 0x89, 0x06, 0x26, 0x89, 0x06, 0xee, 0x13, 0x0d, 0x3c, 0x24, 0x1a, 0xf8, 0x2b, - 0xd1, 0xc0, 0xcf, 0x7f, 0x6b, 0x95, 0xaf, 0x1b, 0xb9, 0xeb, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, - 0x37, 0x8f, 0x77, 0xcd, 0x15, 0x0b, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/scheduling/v1/generated.pb.go b/vendor/k8s.io/api/scheduling/v1/generated.pb.go index 9a29909184ebe..7e9764f122992 100644 --- a/vendor/k8s.io/api/scheduling/v1/generated.pb.go +++ b/vendor/k8s.io/api/scheduling/v1/generated.pb.go @@ -17,32 +17,21 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1/generated.proto -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1/generated.proto - - It has these top-level messages: - PriorityClass - PriorityClassList -*/ package v1 import ( fmt "fmt" - proto "github.com/gogo/protobuf/proto" + io "io" - math "math" + proto "github.com/gogo/protobuf/proto" k8s_io_api_core_v1 "k8s.io/api/core/v1" - strings "strings" - + math "math" + math_bits "math/bits" reflect "reflect" - - io "io" + strings "strings" ) // Reference imports to suppress errors if they are not otherwise used. @@ -56,22 +45,110 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *PriorityClass) Reset() { *m = PriorityClass{} } -func (*PriorityClass) ProtoMessage() {} -func (*PriorityClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *PriorityClass) Reset() { *m = PriorityClass{} } +func (*PriorityClass) ProtoMessage() {} +func (*PriorityClass) Descriptor() ([]byte, []int) { + return fileDescriptor_277b2f43b72fffd5, []int{0} +} +func (m *PriorityClass) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityClass) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityClass.Merge(m, src) +} +func (m *PriorityClass) XXX_Size() int { + return m.Size() +} +func (m *PriorityClass) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityClass.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityClass proto.InternalMessageInfo + +func (m *PriorityClassList) Reset() { *m = PriorityClassList{} } +func (*PriorityClassList) ProtoMessage() {} +func (*PriorityClassList) Descriptor() ([]byte, []int) { + return fileDescriptor_277b2f43b72fffd5, []int{1} +} +func (m *PriorityClassList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityClassList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityClassList.Merge(m, src) +} +func (m *PriorityClassList) XXX_Size() int { + return m.Size() +} +func (m *PriorityClassList) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityClassList.DiscardUnknown(m) +} -func (m *PriorityClassList) Reset() { *m = PriorityClassList{} } -func (*PriorityClassList) ProtoMessage() {} -func (*PriorityClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_PriorityClassList proto.InternalMessageInfo func init() { proto.RegisterType((*PriorityClass)(nil), "k8s.io.api.scheduling.v1.PriorityClass") proto.RegisterType((*PriorityClassList)(nil), "k8s.io.api.scheduling.v1.PriorityClassList") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1/generated.proto", fileDescriptor_277b2f43b72fffd5) +} + +var fileDescriptor_277b2f43b72fffd5 = []byte{ + // 488 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x3f, 0x8f, 0xd3, 0x30, + 0x18, 0xc6, 0xeb, 0x1e, 0x95, 0x0e, 0x57, 0x95, 0x4a, 0x10, 0x52, 0xd4, 0x21, 0xad, 0x7a, 0x03, + 0x59, 0xb0, 0xe9, 0x09, 0x10, 0xd2, 0x4d, 0x84, 0x93, 0x10, 0xd2, 0x21, 0xaa, 0x0c, 0x0c, 0x88, + 0x01, 0x27, 0x79, 0x2f, 0x35, 0x4d, 0xe2, 0xc8, 0x76, 0x22, 0x75, 0xe3, 0x23, 0xf0, 0x8d, 0x58, + 0x3b, 0xde, 0x78, 0x53, 0x45, 0xc3, 0x47, 0x60, 0x63, 0x42, 0x49, 0xc3, 0xa5, 0x7f, 0xee, 0x04, + 0x5b, 0xfc, 0x3e, 0xcf, 0xef, 0xb1, 0xfd, 0x24, 0xc1, 0xaf, 0xe6, 0x2f, 0x15, 0xe1, 0x82, 0xce, + 0x33, 0x0f, 0x64, 0x02, 0x1a, 0x14, 0xcd, 0x21, 0x09, 0x84, 0xa4, 0xb5, 0xc0, 0x52, 0x4e, 0x95, + 0x3f, 0x83, 0x20, 0x8b, 0x78, 0x12, 0xd2, 0x7c, 0x42, 0x43, 0x48, 0x40, 0x32, 0x0d, 0x01, 0x49, + 0xa5, 0xd0, 0xc2, 0x30, 0x37, 0x4e, 0xc2, 0x52, 0x4e, 0x1a, 0x27, 0xc9, 0x27, 0x83, 0x27, 0x21, + 0xd7, 0xb3, 0xcc, 0x23, 0xbe, 0x88, 0x69, 0x28, 0x42, 0x41, 0x2b, 0xc0, 0xcb, 0x2e, 0xab, 0x55, + 0xb5, 0xa8, 0x9e, 0x36, 0x41, 0x83, 0xf1, 0xd6, 0x96, 0xbe, 0x90, 0x70, 0xcb, 0x66, 0x83, 0x67, + 0x8d, 0x27, 0x66, 0xfe, 0x8c, 0x27, 0x20, 0x17, 0x34, 0x9d, 0x87, 0xe5, 0x40, 0xd1, 0x18, 0x34, + 0xbb, 0x8d, 0xa2, 0x77, 0x51, 0x32, 0x4b, 0x34, 0x8f, 0xe1, 0x00, 0x78, 0xf1, 0x2f, 0xa0, 0xbc, + 0x68, 0xcc, 0xf6, 0xb9, 0xf1, 0xaf, 0x36, 0xee, 0x4d, 0x25, 0x17, 0x92, 0xeb, 0xc5, 0xeb, 0x88, + 0x29, 0x65, 0x7c, 0xc6, 0xc7, 0xe5, 0xa9, 0x02, 0xa6, 0x99, 0x89, 0x46, 0xc8, 0xee, 0x9e, 0x3e, + 0x25, 0x4d, 0x61, 0x37, 0xe1, 0x24, 0x9d, 0x87, 0xe5, 0x40, 0x91, 0xd2, 0x4d, 0xf2, 0x09, 0x79, + 0xef, 0x7d, 0x01, 0x5f, 0xbf, 0x03, 0xcd, 0x1c, 0x63, 0xb9, 0x1a, 0xb6, 0x8a, 0xd5, 0x10, 0x37, + 0x33, 0xf7, 0x26, 0xd5, 0x38, 0xc1, 0x9d, 0x9c, 0x45, 0x19, 0x98, 0xed, 0x11, 0xb2, 0x3b, 0x4e, + 0xaf, 0x36, 0x77, 0x3e, 0x94, 0x43, 0x77, 0xa3, 0x19, 0x67, 0xb8, 0x17, 0x46, 0xc2, 0x63, 0xd1, + 0x39, 0x5c, 0xb2, 0x2c, 0xd2, 0xe6, 0xd1, 0x08, 0xd9, 0xc7, 0xce, 0xa3, 0xda, 0xdc, 0x7b, 0xb3, + 0x2d, 0xba, 0xbb, 0x5e, 0xe3, 0x39, 0xee, 0x06, 0xa0, 0x7c, 0xc9, 0x53, 0xcd, 0x45, 0x62, 0xde, + 0x1b, 0x21, 0xfb, 0xbe, 0xf3, 0xb0, 0x46, 0xbb, 0xe7, 0x8d, 0xe4, 0x6e, 0xfb, 0x8c, 0x10, 0xf7, + 0x53, 0x09, 0x10, 0x57, 0xab, 0xa9, 0x88, 0xb8, 0xbf, 0x30, 0x3b, 0x15, 0x7b, 0x56, 0xac, 0x86, + 0xfd, 0xe9, 0x9e, 0xf6, 0x7b, 0x35, 0x3c, 0x39, 0xfc, 0x02, 0xc8, 0xbe, 0xcd, 0x3d, 0x08, 0x1d, + 0x7f, 0x47, 0xf8, 0xc1, 0x4e, 0xeb, 0x17, 0x5c, 0x69, 0xe3, 0xd3, 0x41, 0xf3, 0xe4, 0xff, 0x9a, + 0x2f, 0xe9, 0xaa, 0xf7, 0x7e, 0x7d, 0xc5, 0xe3, 0xbf, 0x93, 0xad, 0xd6, 0x2f, 0x70, 0x87, 0x6b, + 0x88, 0x95, 0xd9, 0x1e, 0x1d, 0xd9, 0xdd, 0xd3, 0xc7, 0xe4, 0xae, 0xbf, 0x80, 0xec, 0x9c, 0xac, + 0x79, 0x3d, 0x6f, 0x4b, 0xda, 0xdd, 0x84, 0x38, 0xf6, 0x72, 0x6d, 0xb5, 0xae, 0xd6, 0x56, 0xeb, + 0x7a, 0x6d, 0xb5, 0xbe, 0x16, 0x16, 0x5a, 0x16, 0x16, 0xba, 0x2a, 0x2c, 0x74, 0x5d, 0x58, 0xe8, + 0x47, 0x61, 0xa1, 0x6f, 0x3f, 0xad, 0xd6, 0xc7, 0x76, 0x3e, 0xf9, 0x13, 0x00, 0x00, 0xff, 0xff, + 0x53, 0xd9, 0x28, 0x30, 0xb1, 0x03, 0x00, 0x00, +} + func (m *PriorityClass) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -79,46 +156,55 @@ func (m *PriorityClass) Marshal() (dAtA []byte, err error) { } func (m *PriorityClass) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.PreemptionPolicy != nil { + i -= len(*m.PreemptionPolicy) + copy(dAtA[i:], *m.PreemptionPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PreemptionPolicy))) + i-- + dAtA[i] = 0x2a } - i += n1 - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Value)) - dAtA[i] = 0x18 - i++ + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0x22 + i-- if m.GlobalDefault { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) - i += copy(dAtA[i:], m.Description) - if m.PreemptionPolicy != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PreemptionPolicy))) - i += copy(dAtA[i:], *m.PreemptionPolicy) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x10 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PriorityClassList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -126,43 +212,57 @@ func (m *PriorityClassList) Marshal() (dAtA []byte, err error) { } func (m *PriorityClassList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n2, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *PriorityClass) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -179,6 +279,9 @@ func (m *PriorityClass) Size() (n int) { } func (m *PriorityClassList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -193,14 +296,7 @@ func (m *PriorityClassList) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -210,7 +306,7 @@ func (this *PriorityClass) String() string { return "nil" } s := strings.Join([]string{`&PriorityClass{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Value:` + fmt.Sprintf("%v", this.Value) + `,`, `GlobalDefault:` + fmt.Sprintf("%v", this.GlobalDefault) + `,`, `Description:` + fmt.Sprintf("%v", this.Description) + `,`, @@ -223,9 +319,14 @@ func (this *PriorityClassList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]PriorityClass{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PriorityClass", "PriorityClass", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&PriorityClassList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PriorityClass", "PriorityClass", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -253,7 +354,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -281,7 +382,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -290,6 +391,9 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -311,7 +415,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Value |= (int32(b) & 0x7F) << shift + m.Value |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -330,7 +434,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -350,7 +454,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -360,6 +464,9 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -379,7 +486,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -389,6 +496,9 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -404,6 +514,9 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -431,7 +544,7 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -459,7 +572,7 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -468,6 +581,9 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -489,7 +605,7 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -498,6 +614,9 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -515,6 +634,9 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -581,10 +703,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -613,6 +738,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -631,42 +759,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 488 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x3f, 0x8f, 0xd3, 0x30, - 0x18, 0xc6, 0xeb, 0x1e, 0x95, 0x0e, 0x57, 0x95, 0x4a, 0x10, 0x52, 0xd4, 0x21, 0xad, 0x7a, 0x03, - 0x59, 0xb0, 0xe9, 0x09, 0x10, 0xd2, 0x4d, 0x84, 0x93, 0x10, 0xd2, 0x21, 0xaa, 0x0c, 0x0c, 0x88, - 0x01, 0x27, 0x79, 0x2f, 0x35, 0x4d, 0xe2, 0xc8, 0x76, 0x22, 0x75, 0xe3, 0x23, 0xf0, 0x8d, 0x58, - 0x3b, 0xde, 0x78, 0x53, 0x45, 0xc3, 0x47, 0x60, 0x63, 0x42, 0x49, 0xc3, 0xa5, 0x7f, 0xee, 0x04, - 0x5b, 0xfc, 0x3e, 0xcf, 0xef, 0xb1, 0xfd, 0x24, 0xc1, 0xaf, 0xe6, 0x2f, 0x15, 0xe1, 0x82, 0xce, - 0x33, 0x0f, 0x64, 0x02, 0x1a, 0x14, 0xcd, 0x21, 0x09, 0x84, 0xa4, 0xb5, 0xc0, 0x52, 0x4e, 0x95, - 0x3f, 0x83, 0x20, 0x8b, 0x78, 0x12, 0xd2, 0x7c, 0x42, 0x43, 0x48, 0x40, 0x32, 0x0d, 0x01, 0x49, - 0xa5, 0xd0, 0xc2, 0x30, 0x37, 0x4e, 0xc2, 0x52, 0x4e, 0x1a, 0x27, 0xc9, 0x27, 0x83, 0x27, 0x21, - 0xd7, 0xb3, 0xcc, 0x23, 0xbe, 0x88, 0x69, 0x28, 0x42, 0x41, 0x2b, 0xc0, 0xcb, 0x2e, 0xab, 0x55, - 0xb5, 0xa8, 0x9e, 0x36, 0x41, 0x83, 0xf1, 0xd6, 0x96, 0xbe, 0x90, 0x70, 0xcb, 0x66, 0x83, 0x67, - 0x8d, 0x27, 0x66, 0xfe, 0x8c, 0x27, 0x20, 0x17, 0x34, 0x9d, 0x87, 0xe5, 0x40, 0xd1, 0x18, 0x34, - 0xbb, 0x8d, 0xa2, 0x77, 0x51, 0x32, 0x4b, 0x34, 0x8f, 0xe1, 0x00, 0x78, 0xf1, 0x2f, 0xa0, 0xbc, - 0x68, 0xcc, 0xf6, 0xb9, 0xf1, 0xaf, 0x36, 0xee, 0x4d, 0x25, 0x17, 0x92, 0xeb, 0xc5, 0xeb, 0x88, - 0x29, 0x65, 0x7c, 0xc6, 0xc7, 0xe5, 0xa9, 0x02, 0xa6, 0x99, 0x89, 0x46, 0xc8, 0xee, 0x9e, 0x3e, - 0x25, 0x4d, 0x61, 0x37, 0xe1, 0x24, 0x9d, 0x87, 0xe5, 0x40, 0x91, 0xd2, 0x4d, 0xf2, 0x09, 0x79, - 0xef, 0x7d, 0x01, 0x5f, 0xbf, 0x03, 0xcd, 0x1c, 0x63, 0xb9, 0x1a, 0xb6, 0x8a, 0xd5, 0x10, 0x37, - 0x33, 0xf7, 0x26, 0xd5, 0x38, 0xc1, 0x9d, 0x9c, 0x45, 0x19, 0x98, 0xed, 0x11, 0xb2, 0x3b, 0x4e, - 0xaf, 0x36, 0x77, 0x3e, 0x94, 0x43, 0x77, 0xa3, 0x19, 0x67, 0xb8, 0x17, 0x46, 0xc2, 0x63, 0xd1, - 0x39, 0x5c, 0xb2, 0x2c, 0xd2, 0xe6, 0xd1, 0x08, 0xd9, 0xc7, 0xce, 0xa3, 0xda, 0xdc, 0x7b, 0xb3, - 0x2d, 0xba, 0xbb, 0x5e, 0xe3, 0x39, 0xee, 0x06, 0xa0, 0x7c, 0xc9, 0x53, 0xcd, 0x45, 0x62, 0xde, - 0x1b, 0x21, 0xfb, 0xbe, 0xf3, 0xb0, 0x46, 0xbb, 0xe7, 0x8d, 0xe4, 0x6e, 0xfb, 0x8c, 0x10, 0xf7, - 0x53, 0x09, 0x10, 0x57, 0xab, 0xa9, 0x88, 0xb8, 0xbf, 0x30, 0x3b, 0x15, 0x7b, 0x56, 0xac, 0x86, - 0xfd, 0xe9, 0x9e, 0xf6, 0x7b, 0x35, 0x3c, 0x39, 0xfc, 0x02, 0xc8, 0xbe, 0xcd, 0x3d, 0x08, 0x1d, - 0x7f, 0x47, 0xf8, 0xc1, 0x4e, 0xeb, 0x17, 0x5c, 0x69, 0xe3, 0xd3, 0x41, 0xf3, 0xe4, 0xff, 0x9a, - 0x2f, 0xe9, 0xaa, 0xf7, 0x7e, 0x7d, 0xc5, 0xe3, 0xbf, 0x93, 0xad, 0xd6, 0x2f, 0x70, 0x87, 0x6b, - 0x88, 0x95, 0xd9, 0x1e, 0x1d, 0xd9, 0xdd, 0xd3, 0xc7, 0xe4, 0xae, 0xbf, 0x80, 0xec, 0x9c, 0xac, - 0x79, 0x3d, 0x6f, 0x4b, 0xda, 0xdd, 0x84, 0x38, 0xf6, 0x72, 0x6d, 0xb5, 0xae, 0xd6, 0x56, 0xeb, - 0x7a, 0x6d, 0xb5, 0xbe, 0x16, 0x16, 0x5a, 0x16, 0x16, 0xba, 0x2a, 0x2c, 0x74, 0x5d, 0x58, 0xe8, - 0x47, 0x61, 0xa1, 0x6f, 0x3f, 0xad, 0xd6, 0xc7, 0x76, 0x3e, 0xf9, 0x13, 0x00, 0x00, 0xff, 0xff, - 0x53, 0xd9, 0x28, 0x30, 0xb1, 0x03, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/scheduling/v1/generated.proto b/vendor/k8s.io/api/scheduling/v1/generated.proto index ada9eaf85b038..82f6e0a21a54b 100644 --- a/vendor/k8s.io/api/scheduling/v1/generated.proto +++ b/vendor/k8s.io/api/scheduling/v1/generated.proto @@ -33,7 +33,7 @@ option go_package = "v1"; // integer value. The value can be any valid integer. message PriorityClass { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -65,7 +65,7 @@ message PriorityClass { // PriorityClassList is a collection of priority classes. message PriorityClassList { // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; diff --git a/vendor/k8s.io/api/scheduling/v1/types.go b/vendor/k8s.io/api/scheduling/v1/types.go index e91842ec4da26..087ee10d83390 100644 --- a/vendor/k8s.io/api/scheduling/v1/types.go +++ b/vendor/k8s.io/api/scheduling/v1/types.go @@ -30,7 +30,7 @@ import ( type PriorityClass struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -65,7 +65,7 @@ type PriorityClass struct { type PriorityClassList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go index 853f255d52610..4cfb9d3e35343 100644 --- a/vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go @@ -29,7 +29,7 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_PriorityClass = map[string]string{ "": "PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", @@ -42,7 +42,7 @@ func (PriorityClass) SwaggerDoc() map[string]string { var map_PriorityClassList = map[string]string{ "": "PriorityClassList is a collection of priority classes.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "items is the list of PriorityClasses", } diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go b/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go index 59dbb5eab5631..05bff0ff477cf 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go @@ -17,32 +17,21 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto -/* - Package v1alpha1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto - - It has these top-level messages: - PriorityClass - PriorityClassList -*/ package v1alpha1 import ( fmt "fmt" - proto "github.com/gogo/protobuf/proto" + io "io" - math "math" + proto "github.com/gogo/protobuf/proto" k8s_io_api_core_v1 "k8s.io/api/core/v1" - strings "strings" - + math "math" + math_bits "math/bits" reflect "reflect" - - io "io" + strings "strings" ) // Reference imports to suppress errors if they are not otherwise used. @@ -56,22 +45,110 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *PriorityClass) Reset() { *m = PriorityClass{} } -func (*PriorityClass) ProtoMessage() {} -func (*PriorityClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *PriorityClass) Reset() { *m = PriorityClass{} } +func (*PriorityClass) ProtoMessage() {} +func (*PriorityClass) Descriptor() ([]byte, []int) { + return fileDescriptor_f033641dd0b95dce, []int{0} +} +func (m *PriorityClass) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityClass) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityClass.Merge(m, src) +} +func (m *PriorityClass) XXX_Size() int { + return m.Size() +} +func (m *PriorityClass) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityClass.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityClass proto.InternalMessageInfo + +func (m *PriorityClassList) Reset() { *m = PriorityClassList{} } +func (*PriorityClassList) ProtoMessage() {} +func (*PriorityClassList) Descriptor() ([]byte, []int) { + return fileDescriptor_f033641dd0b95dce, []int{1} +} +func (m *PriorityClassList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityClassList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityClassList.Merge(m, src) +} +func (m *PriorityClassList) XXX_Size() int { + return m.Size() +} +func (m *PriorityClassList) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityClassList.DiscardUnknown(m) +} -func (m *PriorityClassList) Reset() { *m = PriorityClassList{} } -func (*PriorityClassList) ProtoMessage() {} -func (*PriorityClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_PriorityClassList proto.InternalMessageInfo func init() { proto.RegisterType((*PriorityClass)(nil), "k8s.io.api.scheduling.v1alpha1.PriorityClass") proto.RegisterType((*PriorityClassList)(nil), "k8s.io.api.scheduling.v1alpha1.PriorityClassList") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto", fileDescriptor_f033641dd0b95dce) +} + +var fileDescriptor_f033641dd0b95dce = []byte{ + // 494 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x4f, 0x8b, 0xd3, 0x40, + 0x18, 0xc6, 0x3b, 0x5d, 0x0b, 0x75, 0x4a, 0xa1, 0x46, 0x84, 0xd0, 0xc3, 0xb4, 0x74, 0x2f, 0xbd, + 0xec, 0x8c, 0x5d, 0x54, 0x84, 0xbd, 0xd5, 0x85, 0x45, 0x50, 0x2c, 0x39, 0x78, 0x10, 0x0f, 0x4e, + 0xd3, 0x77, 0xd3, 0xb1, 0x49, 0x26, 0xcc, 0x4c, 0x02, 0xbd, 0xf9, 0x11, 0xfc, 0x52, 0x42, 0x8f, + 0x7b, 0xdc, 0x53, 0xb1, 0xf1, 0x23, 0x78, 0xf3, 0x24, 0x49, 0xd3, 0x4d, 0xdb, 0xf8, 0x67, 0x6f, + 0x99, 0xf7, 0xf9, 0x3d, 0xcf, 0xcc, 0x3c, 0x49, 0xf0, 0xd5, 0xe2, 0xa5, 0xa6, 0x42, 0xb2, 0x45, + 0x3c, 0x05, 0x15, 0x82, 0x01, 0xcd, 0x12, 0x08, 0x67, 0x52, 0xb1, 0x42, 0xe0, 0x91, 0x60, 0xda, + 0x9d, 0xc3, 0x2c, 0xf6, 0x45, 0xe8, 0xb1, 0x64, 0xc4, 0xfd, 0x68, 0xce, 0x47, 0xcc, 0x83, 0x10, + 0x14, 0x37, 0x30, 0xa3, 0x91, 0x92, 0x46, 0x5a, 0x64, 0xcb, 0x53, 0x1e, 0x09, 0x5a, 0xf2, 0x74, + 0xc7, 0x77, 0xcf, 0x3c, 0x61, 0xe6, 0xf1, 0x94, 0xba, 0x32, 0x60, 0x9e, 0xf4, 0x24, 0xcb, 0x6d, + 0xd3, 0xf8, 0x3a, 0x5f, 0xe5, 0x8b, 0xfc, 0x69, 0x1b, 0xd7, 0x1d, 0xec, 0x6d, 0xef, 0x4a, 0x05, + 0x2c, 0xa9, 0x6c, 0xd9, 0x7d, 0x56, 0x32, 0x01, 0x77, 0xe7, 0x22, 0x04, 0xb5, 0x64, 0xd1, 0xc2, + 0xcb, 0x06, 0x9a, 0x05, 0x60, 0xf8, 0x9f, 0x5c, 0xec, 0x6f, 0x2e, 0x15, 0x87, 0x46, 0x04, 0x50, + 0x31, 0xbc, 0xf8, 0x9f, 0x21, 0xbb, 0x6e, 0xc0, 0x8f, 0x7d, 0x83, 0x9f, 0x75, 0xdc, 0x9e, 0x28, + 0x21, 0x95, 0x30, 0xcb, 0x57, 0x3e, 0xd7, 0xda, 0xfa, 0x84, 0x9b, 0xd9, 0xa9, 0x66, 0xdc, 0x70, + 0x1b, 0xf5, 0xd1, 0xb0, 0x75, 0xfe, 0x94, 0x96, 0xb5, 0xdd, 0x85, 0xd3, 0x68, 0xe1, 0x65, 0x03, + 0x4d, 0x33, 0x9a, 0x26, 0x23, 0xfa, 0x6e, 0xfa, 0x19, 0x5c, 0xf3, 0x16, 0x0c, 0x1f, 0x5b, 0xab, + 0x75, 0xaf, 0x96, 0xae, 0x7b, 0xb8, 0x9c, 0x39, 0x77, 0xa9, 0xd6, 0x29, 0x6e, 0x24, 0xdc, 0x8f, + 0xc1, 0xae, 0xf7, 0xd1, 0xb0, 0x31, 0x6e, 0x17, 0x70, 0xe3, 0x7d, 0x36, 0x74, 0xb6, 0x9a, 0x75, + 0x81, 0xdb, 0x9e, 0x2f, 0xa7, 0xdc, 0xbf, 0x84, 0x6b, 0x1e, 0xfb, 0xc6, 0x3e, 0xe9, 0xa3, 0x61, + 0x73, 0xfc, 0xa4, 0x80, 0xdb, 0x57, 0xfb, 0xa2, 0x73, 0xc8, 0x5a, 0xcf, 0x71, 0x6b, 0x06, 0xda, + 0x55, 0x22, 0x32, 0x42, 0x86, 0xf6, 0x83, 0x3e, 0x1a, 0x3e, 0x1c, 0x3f, 0x2e, 0xac, 0xad, 0xcb, + 0x52, 0x72, 0xf6, 0x39, 0xcb, 0xc3, 0x9d, 0x48, 0x01, 0x04, 0xf9, 0x6a, 0x22, 0x7d, 0xe1, 0x2e, + 0xed, 0x46, 0xee, 0xbd, 0x48, 0xd7, 0xbd, 0xce, 0xe4, 0x48, 0xfb, 0xb5, 0xee, 0x9d, 0x56, 0xbf, + 0x00, 0x7a, 0x8c, 0x39, 0x95, 0xd0, 0xc1, 0x37, 0x84, 0x1f, 0x1d, 0xb4, 0xfe, 0x46, 0x68, 0x63, + 0x7d, 0xac, 0x34, 0x4f, 0xef, 0xd7, 0x7c, 0xe6, 0xce, 0x7b, 0xef, 0x14, 0x57, 0x6c, 0xee, 0x26, + 0x7b, 0xad, 0x3b, 0xb8, 0x21, 0x0c, 0x04, 0xda, 0xae, 0xf7, 0x4f, 0x86, 0xad, 0xf3, 0x33, 0xfa, + 0xef, 0x7f, 0x81, 0x1e, 0x9c, 0xaf, 0x7c, 0x49, 0xaf, 0xb3, 0x0c, 0x67, 0x1b, 0x35, 0xa6, 0xab, + 0x0d, 0xa9, 0xdd, 0x6c, 0x48, 0xed, 0x76, 0x43, 0x6a, 0x5f, 0x52, 0x82, 0x56, 0x29, 0x41, 0x37, + 0x29, 0x41, 0xb7, 0x29, 0x41, 0xdf, 0x53, 0x82, 0xbe, 0xfe, 0x20, 0xb5, 0x0f, 0xcd, 0x5d, 0xe6, + 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0x5c, 0x1a, 0x39, 0xc9, 0x03, 0x00, 0x00, +} + func (m *PriorityClass) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -79,46 +156,55 @@ func (m *PriorityClass) Marshal() (dAtA []byte, err error) { } func (m *PriorityClass) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.PreemptionPolicy != nil { + i -= len(*m.PreemptionPolicy) + copy(dAtA[i:], *m.PreemptionPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PreemptionPolicy))) + i-- + dAtA[i] = 0x2a } - i += n1 - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Value)) - dAtA[i] = 0x18 - i++ + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0x22 + i-- if m.GlobalDefault { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) - i += copy(dAtA[i:], m.Description) - if m.PreemptionPolicy != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PreemptionPolicy))) - i += copy(dAtA[i:], *m.PreemptionPolicy) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x10 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PriorityClassList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -126,43 +212,57 @@ func (m *PriorityClassList) Marshal() (dAtA []byte, err error) { } func (m *PriorityClassList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n2, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *PriorityClass) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -179,6 +279,9 @@ func (m *PriorityClass) Size() (n int) { } func (m *PriorityClassList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -193,14 +296,7 @@ func (m *PriorityClassList) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -210,7 +306,7 @@ func (this *PriorityClass) String() string { return "nil" } s := strings.Join([]string{`&PriorityClass{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Value:` + fmt.Sprintf("%v", this.Value) + `,`, `GlobalDefault:` + fmt.Sprintf("%v", this.GlobalDefault) + `,`, `Description:` + fmt.Sprintf("%v", this.Description) + `,`, @@ -223,9 +319,14 @@ func (this *PriorityClassList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]PriorityClass{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PriorityClass", "PriorityClass", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&PriorityClassList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PriorityClass", "PriorityClass", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -253,7 +354,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -281,7 +382,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -290,6 +391,9 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -311,7 +415,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Value |= (int32(b) & 0x7F) << shift + m.Value |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -330,7 +434,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -350,7 +454,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -360,6 +464,9 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -379,7 +486,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -389,6 +496,9 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -404,6 +514,9 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -431,7 +544,7 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -459,7 +572,7 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -468,6 +581,9 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -489,7 +605,7 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -498,6 +614,9 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -515,6 +634,9 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -581,10 +703,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -613,6 +738,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -631,42 +759,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 494 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x4f, 0x8b, 0xd3, 0x40, - 0x18, 0xc6, 0x3b, 0x5d, 0x0b, 0x75, 0x4a, 0xa1, 0x46, 0x84, 0xd0, 0xc3, 0xb4, 0x74, 0x2f, 0xbd, - 0xec, 0x8c, 0x5d, 0x54, 0x84, 0xbd, 0xd5, 0x85, 0x45, 0x50, 0x2c, 0x39, 0x78, 0x10, 0x0f, 0x4e, - 0xd3, 0x77, 0xd3, 0xb1, 0x49, 0x26, 0xcc, 0x4c, 0x02, 0xbd, 0xf9, 0x11, 0xfc, 0x52, 0x42, 0x8f, - 0x7b, 0xdc, 0x53, 0xb1, 0xf1, 0x23, 0x78, 0xf3, 0x24, 0x49, 0xd3, 0x4d, 0xdb, 0xf8, 0x67, 0x6f, - 0x99, 0xf7, 0xf9, 0x3d, 0xcf, 0xcc, 0x3c, 0x49, 0xf0, 0xd5, 0xe2, 0xa5, 0xa6, 0x42, 0xb2, 0x45, - 0x3c, 0x05, 0x15, 0x82, 0x01, 0xcd, 0x12, 0x08, 0x67, 0x52, 0xb1, 0x42, 0xe0, 0x91, 0x60, 0xda, - 0x9d, 0xc3, 0x2c, 0xf6, 0x45, 0xe8, 0xb1, 0x64, 0xc4, 0xfd, 0x68, 0xce, 0x47, 0xcc, 0x83, 0x10, - 0x14, 0x37, 0x30, 0xa3, 0x91, 0x92, 0x46, 0x5a, 0x64, 0xcb, 0x53, 0x1e, 0x09, 0x5a, 0xf2, 0x74, - 0xc7, 0x77, 0xcf, 0x3c, 0x61, 0xe6, 0xf1, 0x94, 0xba, 0x32, 0x60, 0x9e, 0xf4, 0x24, 0xcb, 0x6d, - 0xd3, 0xf8, 0x3a, 0x5f, 0xe5, 0x8b, 0xfc, 0x69, 0x1b, 0xd7, 0x1d, 0xec, 0x6d, 0xef, 0x4a, 0x05, - 0x2c, 0xa9, 0x6c, 0xd9, 0x7d, 0x56, 0x32, 0x01, 0x77, 0xe7, 0x22, 0x04, 0xb5, 0x64, 0xd1, 0xc2, - 0xcb, 0x06, 0x9a, 0x05, 0x60, 0xf8, 0x9f, 0x5c, 0xec, 0x6f, 0x2e, 0x15, 0x87, 0x46, 0x04, 0x50, - 0x31, 0xbc, 0xf8, 0x9f, 0x21, 0xbb, 0x6e, 0xc0, 0x8f, 0x7d, 0x83, 0x9f, 0x75, 0xdc, 0x9e, 0x28, - 0x21, 0x95, 0x30, 0xcb, 0x57, 0x3e, 0xd7, 0xda, 0xfa, 0x84, 0x9b, 0xd9, 0xa9, 0x66, 0xdc, 0x70, - 0x1b, 0xf5, 0xd1, 0xb0, 0x75, 0xfe, 0x94, 0x96, 0xb5, 0xdd, 0x85, 0xd3, 0x68, 0xe1, 0x65, 0x03, - 0x4d, 0x33, 0x9a, 0x26, 0x23, 0xfa, 0x6e, 0xfa, 0x19, 0x5c, 0xf3, 0x16, 0x0c, 0x1f, 0x5b, 0xab, - 0x75, 0xaf, 0x96, 0xae, 0x7b, 0xb8, 0x9c, 0x39, 0x77, 0xa9, 0xd6, 0x29, 0x6e, 0x24, 0xdc, 0x8f, - 0xc1, 0xae, 0xf7, 0xd1, 0xb0, 0x31, 0x6e, 0x17, 0x70, 0xe3, 0x7d, 0x36, 0x74, 0xb6, 0x9a, 0x75, - 0x81, 0xdb, 0x9e, 0x2f, 0xa7, 0xdc, 0xbf, 0x84, 0x6b, 0x1e, 0xfb, 0xc6, 0x3e, 0xe9, 0xa3, 0x61, - 0x73, 0xfc, 0xa4, 0x80, 0xdb, 0x57, 0xfb, 0xa2, 0x73, 0xc8, 0x5a, 0xcf, 0x71, 0x6b, 0x06, 0xda, - 0x55, 0x22, 0x32, 0x42, 0x86, 0xf6, 0x83, 0x3e, 0x1a, 0x3e, 0x1c, 0x3f, 0x2e, 0xac, 0xad, 0xcb, - 0x52, 0x72, 0xf6, 0x39, 0xcb, 0xc3, 0x9d, 0x48, 0x01, 0x04, 0xf9, 0x6a, 0x22, 0x7d, 0xe1, 0x2e, - 0xed, 0x46, 0xee, 0xbd, 0x48, 0xd7, 0xbd, 0xce, 0xe4, 0x48, 0xfb, 0xb5, 0xee, 0x9d, 0x56, 0xbf, - 0x00, 0x7a, 0x8c, 0x39, 0x95, 0xd0, 0xc1, 0x37, 0x84, 0x1f, 0x1d, 0xb4, 0xfe, 0x46, 0x68, 0x63, - 0x7d, 0xac, 0x34, 0x4f, 0xef, 0xd7, 0x7c, 0xe6, 0xce, 0x7b, 0xef, 0x14, 0x57, 0x6c, 0xee, 0x26, - 0x7b, 0xad, 0x3b, 0xb8, 0x21, 0x0c, 0x04, 0xda, 0xae, 0xf7, 0x4f, 0x86, 0xad, 0xf3, 0x33, 0xfa, - 0xef, 0x7f, 0x81, 0x1e, 0x9c, 0xaf, 0x7c, 0x49, 0xaf, 0xb3, 0x0c, 0x67, 0x1b, 0x35, 0xa6, 0xab, - 0x0d, 0xa9, 0xdd, 0x6c, 0x48, 0xed, 0x76, 0x43, 0x6a, 0x5f, 0x52, 0x82, 0x56, 0x29, 0x41, 0x37, - 0x29, 0x41, 0xb7, 0x29, 0x41, 0xdf, 0x53, 0x82, 0xbe, 0xfe, 0x20, 0xb5, 0x0f, 0xcd, 0x5d, 0xe6, - 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0x5c, 0x1a, 0x39, 0xc9, 0x03, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto b/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto index 584a2918a263c..682fb873636ce 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto @@ -34,7 +34,7 @@ option go_package = "v1alpha1"; // integer value. The value can be any valid integer. message PriorityClass { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -66,7 +66,7 @@ message PriorityClass { // PriorityClassList is a collection of priority classes. message PriorityClassList { // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/types.go b/vendor/k8s.io/api/scheduling/v1alpha1/types.go index c1a09bce8eb09..86a2c5130e997 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/types.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/types.go @@ -31,7 +31,7 @@ import ( type PriorityClass struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -66,7 +66,7 @@ type PriorityClass struct { type PriorityClassList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go index f9880922a13df..63a9a353cbe44 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go @@ -29,7 +29,7 @@ package v1alpha1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_PriorityClass = map[string]string{ "": "DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", @@ -42,7 +42,7 @@ func (PriorityClass) SwaggerDoc() map[string]string { var map_PriorityClassList = map[string]string{ "": "PriorityClassList is a collection of priority classes.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "items is the list of PriorityClasses", } diff --git a/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go b/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go index d5d9e5a09d985..198fcd0293a91 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go @@ -17,32 +17,21 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1beta1/generated.proto -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1beta1/generated.proto - - It has these top-level messages: - PriorityClass - PriorityClassList -*/ package v1beta1 import ( fmt "fmt" - proto "github.com/gogo/protobuf/proto" + io "io" - math "math" + proto "github.com/gogo/protobuf/proto" k8s_io_api_core_v1 "k8s.io/api/core/v1" - strings "strings" - + math "math" + math_bits "math/bits" reflect "reflect" - - io "io" + strings "strings" ) // Reference imports to suppress errors if they are not otherwise used. @@ -56,22 +45,110 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *PriorityClass) Reset() { *m = PriorityClass{} } -func (*PriorityClass) ProtoMessage() {} -func (*PriorityClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *PriorityClass) Reset() { *m = PriorityClass{} } +func (*PriorityClass) ProtoMessage() {} +func (*PriorityClass) Descriptor() ([]byte, []int) { + return fileDescriptor_6cd406dede2d3f42, []int{0} +} +func (m *PriorityClass) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityClass) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityClass.Merge(m, src) +} +func (m *PriorityClass) XXX_Size() int { + return m.Size() +} +func (m *PriorityClass) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityClass.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityClass proto.InternalMessageInfo + +func (m *PriorityClassList) Reset() { *m = PriorityClassList{} } +func (*PriorityClassList) ProtoMessage() {} +func (*PriorityClassList) Descriptor() ([]byte, []int) { + return fileDescriptor_6cd406dede2d3f42, []int{1} +} +func (m *PriorityClassList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityClassList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityClassList.Merge(m, src) +} +func (m *PriorityClassList) XXX_Size() int { + return m.Size() +} +func (m *PriorityClassList) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityClassList.DiscardUnknown(m) +} -func (m *PriorityClassList) Reset() { *m = PriorityClassList{} } -func (*PriorityClassList) ProtoMessage() {} -func (*PriorityClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_PriorityClassList proto.InternalMessageInfo func init() { proto.RegisterType((*PriorityClass)(nil), "k8s.io.api.scheduling.v1beta1.PriorityClass") proto.RegisterType((*PriorityClassList)(nil), "k8s.io.api.scheduling.v1beta1.PriorityClassList") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1beta1/generated.proto", fileDescriptor_6cd406dede2d3f42) +} + +var fileDescriptor_6cd406dede2d3f42 = []byte{ + // 494 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x3f, 0x8f, 0xd3, 0x30, + 0x18, 0xc6, 0xeb, 0x1e, 0x15, 0xc5, 0x55, 0xa5, 0x12, 0x84, 0x14, 0x55, 0x22, 0xad, 0x7a, 0x4b, + 0x07, 0xce, 0xa6, 0x27, 0x40, 0x48, 0xb7, 0x95, 0x13, 0x08, 0x09, 0x44, 0xc9, 0xc0, 0x80, 0x18, + 0x70, 0x92, 0xf7, 0x52, 0xd3, 0x24, 0x8e, 0x6c, 0x27, 0x52, 0x37, 0x3e, 0x02, 0x1f, 0x8a, 0xa1, + 0xe3, 0x8d, 0x37, 0x55, 0x34, 0x7c, 0x04, 0x36, 0x26, 0x94, 0x34, 0x5c, 0xda, 0x86, 0x7f, 0x5b, + 0xfc, 0x3e, 0xbf, 0xe7, 0xb1, 0xfd, 0x24, 0xc1, 0xcf, 0x16, 0x4f, 0x14, 0xe1, 0x82, 0x2e, 0x12, + 0x07, 0x64, 0x04, 0x1a, 0x14, 0x4d, 0x21, 0xf2, 0x84, 0xa4, 0xa5, 0xc0, 0x62, 0x4e, 0x95, 0x3b, + 0x07, 0x2f, 0x09, 0x78, 0xe4, 0xd3, 0x74, 0xe2, 0x80, 0x66, 0x13, 0xea, 0x43, 0x04, 0x92, 0x69, + 0xf0, 0x48, 0x2c, 0x85, 0x16, 0xc6, 0xbd, 0x2d, 0x4e, 0x58, 0xcc, 0x49, 0x85, 0x93, 0x12, 0xef, + 0x9f, 0xf8, 0x5c, 0xcf, 0x13, 0x87, 0xb8, 0x22, 0xa4, 0xbe, 0xf0, 0x05, 0x2d, 0x5c, 0x4e, 0x72, + 0x51, 0xac, 0x8a, 0x45, 0xf1, 0xb4, 0x4d, 0xeb, 0x8f, 0x76, 0x36, 0x77, 0x85, 0x04, 0x9a, 0xd6, + 0x76, 0xec, 0x3f, 0xac, 0x98, 0x90, 0xb9, 0x73, 0x1e, 0x81, 0x5c, 0xd2, 0x78, 0xe1, 0xe7, 0x03, + 0x45, 0x43, 0xd0, 0xec, 0x77, 0x2e, 0xfa, 0x27, 0x97, 0x4c, 0x22, 0xcd, 0x43, 0xa8, 0x19, 0x1e, + 0xff, 0xcb, 0x90, 0xdf, 0x36, 0x64, 0x87, 0xbe, 0xd1, 0xf7, 0x26, 0xee, 0xce, 0x24, 0x17, 0x92, + 0xeb, 0xe5, 0xd3, 0x80, 0x29, 0x65, 0x7c, 0xc0, 0xed, 0xfc, 0x54, 0x1e, 0xd3, 0xcc, 0x44, 0x43, + 0x34, 0xee, 0x9c, 0x3e, 0x20, 0x55, 0x6b, 0xd7, 0xe1, 0x24, 0x5e, 0xf8, 0xf9, 0x40, 0x91, 0x9c, + 0x26, 0xe9, 0x84, 0xbc, 0x76, 0x3e, 0x82, 0xab, 0x5f, 0x81, 0x66, 0x53, 0x63, 0xb5, 0x1e, 0x34, + 0xb2, 0xf5, 0x00, 0x57, 0x33, 0xfb, 0x3a, 0xd5, 0x38, 0xc6, 0xad, 0x94, 0x05, 0x09, 0x98, 0xcd, + 0x21, 0x1a, 0xb7, 0xa6, 0xdd, 0x12, 0x6e, 0xbd, 0xcd, 0x87, 0xf6, 0x56, 0x33, 0xce, 0x70, 0xd7, + 0x0f, 0x84, 0xc3, 0x82, 0x73, 0xb8, 0x60, 0x49, 0xa0, 0xcd, 0xa3, 0x21, 0x1a, 0xb7, 0xa7, 0x77, + 0x4b, 0xb8, 0xfb, 0x7c, 0x57, 0xb4, 0xf7, 0x59, 0xe3, 0x11, 0xee, 0x78, 0xa0, 0x5c, 0xc9, 0x63, + 0xcd, 0x45, 0x64, 0xde, 0x18, 0xa2, 0xf1, 0xad, 0xe9, 0x9d, 0xd2, 0xda, 0x39, 0xaf, 0x24, 0x7b, + 0x97, 0x33, 0x7c, 0xdc, 0x8b, 0x25, 0x40, 0x58, 0xac, 0x66, 0x22, 0xe0, 0xee, 0xd2, 0x6c, 0x15, + 0xde, 0xb3, 0x6c, 0x3d, 0xe8, 0xcd, 0x0e, 0xb4, 0x1f, 0xeb, 0xc1, 0x71, 0xfd, 0x0b, 0x20, 0x87, + 0x98, 0x5d, 0x0b, 0x1d, 0x7d, 0x41, 0xf8, 0xf6, 0x5e, 0xeb, 0x2f, 0xb9, 0xd2, 0xc6, 0xfb, 0x5a, + 0xf3, 0xe4, 0xff, 0x9a, 0xcf, 0xdd, 0x45, 0xef, 0xbd, 0xf2, 0x8a, 0xed, 0x5f, 0x93, 0x9d, 0xd6, + 0xdf, 0xe0, 0x16, 0xd7, 0x10, 0x2a, 0xb3, 0x39, 0x3c, 0x1a, 0x77, 0x4e, 0xef, 0x93, 0xbf, 0xfe, + 0x0a, 0x64, 0xef, 0x78, 0xd5, 0x3b, 0x7a, 0x91, 0x47, 0xd8, 0xdb, 0xa4, 0xe9, 0xc9, 0x6a, 0x63, + 0x35, 0x2e, 0x37, 0x56, 0xe3, 0x6a, 0x63, 0x35, 0x3e, 0x65, 0x16, 0x5a, 0x65, 0x16, 0xba, 0xcc, + 0x2c, 0x74, 0x95, 0x59, 0xe8, 0x6b, 0x66, 0xa1, 0xcf, 0xdf, 0xac, 0xc6, 0xbb, 0x9b, 0x65, 0xe4, + 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x1a, 0xc2, 0xc0, 0x1f, 0xc5, 0x03, 0x00, 0x00, +} + func (m *PriorityClass) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -79,46 +156,55 @@ func (m *PriorityClass) Marshal() (dAtA []byte, err error) { } func (m *PriorityClass) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.PreemptionPolicy != nil { + i -= len(*m.PreemptionPolicy) + copy(dAtA[i:], *m.PreemptionPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PreemptionPolicy))) + i-- + dAtA[i] = 0x2a } - i += n1 - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Value)) - dAtA[i] = 0x18 - i++ + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0x22 + i-- if m.GlobalDefault { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) - i += copy(dAtA[i:], m.Description) - if m.PreemptionPolicy != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PreemptionPolicy))) - i += copy(dAtA[i:], *m.PreemptionPolicy) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x10 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PriorityClassList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -126,43 +212,57 @@ func (m *PriorityClassList) Marshal() (dAtA []byte, err error) { } func (m *PriorityClassList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n2, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *PriorityClass) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -179,6 +279,9 @@ func (m *PriorityClass) Size() (n int) { } func (m *PriorityClassList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -193,14 +296,7 @@ func (m *PriorityClassList) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -210,7 +306,7 @@ func (this *PriorityClass) String() string { return "nil" } s := strings.Join([]string{`&PriorityClass{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Value:` + fmt.Sprintf("%v", this.Value) + `,`, `GlobalDefault:` + fmt.Sprintf("%v", this.GlobalDefault) + `,`, `Description:` + fmt.Sprintf("%v", this.Description) + `,`, @@ -223,9 +319,14 @@ func (this *PriorityClassList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]PriorityClass{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PriorityClass", "PriorityClass", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&PriorityClassList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PriorityClass", "PriorityClass", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -253,7 +354,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -281,7 +382,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -290,6 +391,9 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -311,7 +415,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Value |= (int32(b) & 0x7F) << shift + m.Value |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -330,7 +434,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -350,7 +454,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -360,6 +464,9 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -379,7 +486,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -389,6 +496,9 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -404,6 +514,9 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -431,7 +544,7 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -459,7 +572,7 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -468,6 +581,9 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -489,7 +605,7 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -498,6 +614,9 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -515,6 +634,9 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -581,10 +703,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -613,6 +738,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -631,42 +759,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 494 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x3f, 0x8f, 0xd3, 0x30, - 0x18, 0xc6, 0xeb, 0x1e, 0x15, 0xc5, 0x55, 0xa5, 0x12, 0x84, 0x14, 0x55, 0x22, 0xad, 0x7a, 0x4b, - 0x07, 0xce, 0xa6, 0x27, 0x40, 0x48, 0xb7, 0x95, 0x13, 0x08, 0x09, 0x44, 0xc9, 0xc0, 0x80, 0x18, - 0x70, 0x92, 0xf7, 0x52, 0xd3, 0x24, 0x8e, 0x6c, 0x27, 0x52, 0x37, 0x3e, 0x02, 0x1f, 0x8a, 0xa1, - 0xe3, 0x8d, 0x37, 0x55, 0x34, 0x7c, 0x04, 0x36, 0x26, 0x94, 0x34, 0x5c, 0xda, 0x86, 0x7f, 0x5b, - 0xfc, 0x3e, 0xbf, 0xe7, 0xb1, 0xfd, 0x24, 0xc1, 0xcf, 0x16, 0x4f, 0x14, 0xe1, 0x82, 0x2e, 0x12, - 0x07, 0x64, 0x04, 0x1a, 0x14, 0x4d, 0x21, 0xf2, 0x84, 0xa4, 0xa5, 0xc0, 0x62, 0x4e, 0x95, 0x3b, - 0x07, 0x2f, 0x09, 0x78, 0xe4, 0xd3, 0x74, 0xe2, 0x80, 0x66, 0x13, 0xea, 0x43, 0x04, 0x92, 0x69, - 0xf0, 0x48, 0x2c, 0x85, 0x16, 0xc6, 0xbd, 0x2d, 0x4e, 0x58, 0xcc, 0x49, 0x85, 0x93, 0x12, 0xef, - 0x9f, 0xf8, 0x5c, 0xcf, 0x13, 0x87, 0xb8, 0x22, 0xa4, 0xbe, 0xf0, 0x05, 0x2d, 0x5c, 0x4e, 0x72, - 0x51, 0xac, 0x8a, 0x45, 0xf1, 0xb4, 0x4d, 0xeb, 0x8f, 0x76, 0x36, 0x77, 0x85, 0x04, 0x9a, 0xd6, - 0x76, 0xec, 0x3f, 0xac, 0x98, 0x90, 0xb9, 0x73, 0x1e, 0x81, 0x5c, 0xd2, 0x78, 0xe1, 0xe7, 0x03, - 0x45, 0x43, 0xd0, 0xec, 0x77, 0x2e, 0xfa, 0x27, 0x97, 0x4c, 0x22, 0xcd, 0x43, 0xa8, 0x19, 0x1e, - 0xff, 0xcb, 0x90, 0xdf, 0x36, 0x64, 0x87, 0xbe, 0xd1, 0xf7, 0x26, 0xee, 0xce, 0x24, 0x17, 0x92, - 0xeb, 0xe5, 0xd3, 0x80, 0x29, 0x65, 0x7c, 0xc0, 0xed, 0xfc, 0x54, 0x1e, 0xd3, 0xcc, 0x44, 0x43, - 0x34, 0xee, 0x9c, 0x3e, 0x20, 0x55, 0x6b, 0xd7, 0xe1, 0x24, 0x5e, 0xf8, 0xf9, 0x40, 0x91, 0x9c, - 0x26, 0xe9, 0x84, 0xbc, 0x76, 0x3e, 0x82, 0xab, 0x5f, 0x81, 0x66, 0x53, 0x63, 0xb5, 0x1e, 0x34, - 0xb2, 0xf5, 0x00, 0x57, 0x33, 0xfb, 0x3a, 0xd5, 0x38, 0xc6, 0xad, 0x94, 0x05, 0x09, 0x98, 0xcd, - 0x21, 0x1a, 0xb7, 0xa6, 0xdd, 0x12, 0x6e, 0xbd, 0xcd, 0x87, 0xf6, 0x56, 0x33, 0xce, 0x70, 0xd7, - 0x0f, 0x84, 0xc3, 0x82, 0x73, 0xb8, 0x60, 0x49, 0xa0, 0xcd, 0xa3, 0x21, 0x1a, 0xb7, 0xa7, 0x77, - 0x4b, 0xb8, 0xfb, 0x7c, 0x57, 0xb4, 0xf7, 0x59, 0xe3, 0x11, 0xee, 0x78, 0xa0, 0x5c, 0xc9, 0x63, - 0xcd, 0x45, 0x64, 0xde, 0x18, 0xa2, 0xf1, 0xad, 0xe9, 0x9d, 0xd2, 0xda, 0x39, 0xaf, 0x24, 0x7b, - 0x97, 0x33, 0x7c, 0xdc, 0x8b, 0x25, 0x40, 0x58, 0xac, 0x66, 0x22, 0xe0, 0xee, 0xd2, 0x6c, 0x15, - 0xde, 0xb3, 0x6c, 0x3d, 0xe8, 0xcd, 0x0e, 0xb4, 0x1f, 0xeb, 0xc1, 0x71, 0xfd, 0x0b, 0x20, 0x87, - 0x98, 0x5d, 0x0b, 0x1d, 0x7d, 0x41, 0xf8, 0xf6, 0x5e, 0xeb, 0x2f, 0xb9, 0xd2, 0xc6, 0xfb, 0x5a, - 0xf3, 0xe4, 0xff, 0x9a, 0xcf, 0xdd, 0x45, 0xef, 0xbd, 0xf2, 0x8a, 0xed, 0x5f, 0x93, 0x9d, 0xd6, - 0xdf, 0xe0, 0x16, 0xd7, 0x10, 0x2a, 0xb3, 0x39, 0x3c, 0x1a, 0x77, 0x4e, 0xef, 0x93, 0xbf, 0xfe, - 0x0a, 0x64, 0xef, 0x78, 0xd5, 0x3b, 0x7a, 0x91, 0x47, 0xd8, 0xdb, 0xa4, 0xe9, 0xc9, 0x6a, 0x63, - 0x35, 0x2e, 0x37, 0x56, 0xe3, 0x6a, 0x63, 0x35, 0x3e, 0x65, 0x16, 0x5a, 0x65, 0x16, 0xba, 0xcc, - 0x2c, 0x74, 0x95, 0x59, 0xe8, 0x6b, 0x66, 0xa1, 0xcf, 0xdf, 0xac, 0xc6, 0xbb, 0x9b, 0x65, 0xe4, - 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x1a, 0xc2, 0xc0, 0x1f, 0xc5, 0x03, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go b/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go index 6cb51a8518c39..7469f74a44b9a 100644 --- a/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go @@ -17,33 +17,20 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/settings/v1alpha1/generated.proto -/* - Package v1alpha1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/settings/v1alpha1/generated.proto - - It has these top-level messages: - PodPreset - PodPresetList - PodPresetSpec -*/ package v1alpha1 import ( fmt "fmt" + io "io" + proto "github.com/gogo/protobuf/proto" + v11 "k8s.io/api/core/v1" math "math" - - k8s_io_api_core_v1 "k8s.io/api/core/v1" - - strings "strings" - + math_bits "math/bits" reflect "reflect" - - io "io" + strings "strings" ) // Reference imports to suppress errors if they are not otherwise used. @@ -57,27 +44,142 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *PodPreset) Reset() { *m = PodPreset{} } -func (*PodPreset) ProtoMessage() {} -func (*PodPreset) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *PodPreset) Reset() { *m = PodPreset{} } +func (*PodPreset) ProtoMessage() {} +func (*PodPreset) Descriptor() ([]byte, []int) { + return fileDescriptor_48fab0a6ea4b79ce, []int{0} +} +func (m *PodPreset) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodPreset) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodPreset) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodPreset.Merge(m, src) +} +func (m *PodPreset) XXX_Size() int { + return m.Size() +} +func (m *PodPreset) XXX_DiscardUnknown() { + xxx_messageInfo_PodPreset.DiscardUnknown(m) +} + +var xxx_messageInfo_PodPreset proto.InternalMessageInfo + +func (m *PodPresetList) Reset() { *m = PodPresetList{} } +func (*PodPresetList) ProtoMessage() {} +func (*PodPresetList) Descriptor() ([]byte, []int) { + return fileDescriptor_48fab0a6ea4b79ce, []int{1} +} +func (m *PodPresetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodPresetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodPresetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodPresetList.Merge(m, src) +} +func (m *PodPresetList) XXX_Size() int { + return m.Size() +} +func (m *PodPresetList) XXX_DiscardUnknown() { + xxx_messageInfo_PodPresetList.DiscardUnknown(m) +} + +var xxx_messageInfo_PodPresetList proto.InternalMessageInfo -func (m *PodPresetList) Reset() { *m = PodPresetList{} } -func (*PodPresetList) ProtoMessage() {} -func (*PodPresetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (m *PodPresetSpec) Reset() { *m = PodPresetSpec{} } +func (*PodPresetSpec) ProtoMessage() {} +func (*PodPresetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_48fab0a6ea4b79ce, []int{2} +} +func (m *PodPresetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodPresetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodPresetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodPresetSpec.Merge(m, src) +} +func (m *PodPresetSpec) XXX_Size() int { + return m.Size() +} +func (m *PodPresetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PodPresetSpec.DiscardUnknown(m) +} -func (m *PodPresetSpec) Reset() { *m = PodPresetSpec{} } -func (*PodPresetSpec) ProtoMessage() {} -func (*PodPresetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_PodPresetSpec proto.InternalMessageInfo func init() { proto.RegisterType((*PodPreset)(nil), "k8s.io.api.settings.v1alpha1.PodPreset") proto.RegisterType((*PodPresetList)(nil), "k8s.io.api.settings.v1alpha1.PodPresetList") proto.RegisterType((*PodPresetSpec)(nil), "k8s.io.api.settings.v1alpha1.PodPresetSpec") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/settings/v1alpha1/generated.proto", fileDescriptor_48fab0a6ea4b79ce) +} + +var fileDescriptor_48fab0a6ea4b79ce = []byte{ + // 542 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xc1, 0x8e, 0xd2, 0x40, + 0x1c, 0xc6, 0xe9, 0xb2, 0x04, 0x1c, 0xd8, 0x68, 0x1a, 0x0f, 0x0d, 0x31, 0x65, 0xe5, 0xe2, 0x26, + 0xc6, 0x19, 0x59, 0x8d, 0xd1, 0x6b, 0x13, 0x4c, 0x4c, 0x20, 0x6e, 0x4a, 0xb2, 0x89, 0xc6, 0x83, + 0x43, 0xf9, 0x5b, 0x2a, 0xb4, 0xd3, 0xcc, 0x4c, 0x9b, 0x78, 0xf3, 0x11, 0x7c, 0x01, 0x9f, 0x44, + 0x1f, 0x80, 0xe3, 0x1e, 0xf7, 0xb4, 0x91, 0xfa, 0x22, 0x66, 0x86, 0x29, 0xa0, 0x88, 0x72, 0x9b, + 0xff, 0x9f, 0xef, 0xfb, 0xcd, 0xf7, 0x31, 0x45, 0xfd, 0xd9, 0x73, 0x81, 0x23, 0x46, 0x66, 0xd9, + 0x18, 0x78, 0x02, 0x12, 0x04, 0xc9, 0x21, 0x99, 0x30, 0x4e, 0xcc, 0x0f, 0x34, 0x8d, 0x88, 0x00, + 0x29, 0xa3, 0x24, 0x14, 0x24, 0xef, 0xd1, 0x79, 0x3a, 0xa5, 0x3d, 0x12, 0x42, 0x02, 0x9c, 0x4a, + 0x98, 0xe0, 0x94, 0x33, 0xc9, 0xec, 0x7b, 0x2b, 0x35, 0xa6, 0x69, 0x84, 0x4b, 0x35, 0x2e, 0xd5, + 0xed, 0x47, 0x61, 0x24, 0xa7, 0xd9, 0x18, 0x07, 0x2c, 0x26, 0x21, 0x0b, 0x19, 0xd1, 0xa6, 0x71, + 0xf6, 0x41, 0x4f, 0x7a, 0xd0, 0xa7, 0x15, 0xac, 0xdd, 0xdd, 0xba, 0x3a, 0x60, 0x1c, 0x48, 0xbe, + 0x73, 0x61, 0xfb, 0xe9, 0x46, 0x13, 0xd3, 0x60, 0x1a, 0x25, 0xc0, 0x3f, 0x91, 0x74, 0x16, 0xaa, + 0x85, 0x20, 0x31, 0x48, 0xfa, 0x37, 0x17, 0xd9, 0xe7, 0xe2, 0x59, 0x22, 0xa3, 0x18, 0x76, 0x0c, + 0xcf, 0xfe, 0x67, 0x10, 0xc1, 0x14, 0x62, 0xfa, 0xa7, 0xaf, 0xfb, 0xdd, 0x42, 0xb7, 0x2e, 0xd8, + 0xe4, 0x82, 0x83, 0x00, 0x69, 0xbf, 0x47, 0x0d, 0x95, 0x68, 0x42, 0x25, 0x75, 0xac, 0x53, 0xeb, + 0xac, 0x79, 0xfe, 0x18, 0x6f, 0xfe, 0xb0, 0x35, 0x18, 0xa7, 0xb3, 0x50, 0x2d, 0x04, 0x56, 0x6a, + 0x9c, 0xf7, 0xf0, 0xeb, 0xf1, 0x47, 0x08, 0xe4, 0x10, 0x24, 0xf5, 0xec, 0xc5, 0x4d, 0xa7, 0x52, + 0xdc, 0x74, 0xd0, 0x66, 0xe7, 0xaf, 0xa9, 0xf6, 0x10, 0x1d, 0x8b, 0x14, 0x02, 0xe7, 0x48, 0xd3, + 0x1f, 0xe2, 0x7f, 0x3d, 0x07, 0x5e, 0x07, 0x1b, 0xa5, 0x10, 0x78, 0x2d, 0x03, 0x3e, 0x56, 0x93, + 0xaf, 0x31, 0xdd, 0x6f, 0x16, 0x3a, 0x59, 0xab, 0x06, 0x91, 0x90, 0xf6, 0xbb, 0x9d, 0x0a, 0xf8, + 0xb0, 0x0a, 0xca, 0xad, 0x0b, 0xdc, 0x31, 0xf7, 0x34, 0xca, 0xcd, 0x56, 0xfc, 0x01, 0xaa, 0x45, + 0x12, 0x62, 0xe1, 0x1c, 0x9d, 0x56, 0xcf, 0x9a, 0xe7, 0x0f, 0x0e, 0xcc, 0xef, 0x9d, 0x18, 0x66, + 0xed, 0x95, 0x72, 0xfb, 0x2b, 0x48, 0xf7, 0x6b, 0x75, 0x2b, 0xbd, 0x6a, 0x65, 0x53, 0xd4, 0x10, + 0x30, 0x87, 0x40, 0x32, 0x6e, 0xd2, 0x3f, 0x39, 0x30, 0x3d, 0x1d, 0xc3, 0x7c, 0x64, 0xac, 0x9b, + 0x0a, 0xe5, 0xc6, 0x5f, 0x63, 0xed, 0x17, 0xa8, 0x0a, 0x49, 0x6e, 0x0a, 0xb4, 0xb7, 0x0b, 0xa8, + 0x4f, 0x58, 0xb1, 0xfa, 0x49, 0x7e, 0x49, 0xb9, 0xd7, 0x34, 0x90, 0x6a, 0x3f, 0xc9, 0x7d, 0xe5, + 0xb1, 0x07, 0xa8, 0x0e, 0x49, 0xfe, 0x92, 0xb3, 0xd8, 0xa9, 0x6a, 0xfb, 0xfd, 0x3d, 0x76, 0x25, + 0x19, 0xb1, 0x8c, 0x07, 0xe0, 0xdd, 0x36, 0x94, 0xba, 0x59, 0xfb, 0x25, 0xc2, 0xee, 0xa3, 0x7a, + 0xce, 0xe6, 0x59, 0x0c, 0xc2, 0x39, 0xde, 0x1f, 0xe6, 0x52, 0x4b, 0x36, 0x98, 0xd5, 0x2c, 0xfc, + 0xd2, 0x6b, 0xbf, 0x41, 0xad, 0xd5, 0x71, 0xc8, 0xb2, 0x44, 0x0a, 0xa7, 0xa6, 0x59, 0x9d, 0xfd, + 0x2c, 0xad, 0xf3, 0xee, 0x1a, 0x60, 0x6b, 0x6b, 0x29, 0xfc, 0xdf, 0x50, 0x1e, 0x5e, 0x2c, 0xdd, + 0xca, 0xd5, 0xd2, 0xad, 0x5c, 0x2f, 0xdd, 0xca, 0xe7, 0xc2, 0xb5, 0x16, 0x85, 0x6b, 0x5d, 0x15, + 0xae, 0x75, 0x5d, 0xb8, 0xd6, 0x8f, 0xc2, 0xb5, 0xbe, 0xfc, 0x74, 0x2b, 0x6f, 0x1b, 0xe5, 0x7b, + 0xff, 0x0a, 0x00, 0x00, 0xff, 0xff, 0x46, 0x15, 0xf2, 0x97, 0xa4, 0x04, 0x00, 0x00, +} + func (m *PodPreset) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -85,33 +187,42 @@ func (m *PodPreset) Marshal() (dAtA []byte, err error) { } func (m *PodPreset) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodPreset) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodPresetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -119,37 +230,46 @@ func (m *PodPresetList) Marshal() (dAtA []byte, err error) { } func (m *PodPresetList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodPresetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n3, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PodPresetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -157,79 +277,99 @@ func (m *PodPresetSpec) Marshal() (dAtA []byte, err error) { } func (m *PodPresetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodPresetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n4, err := m.Selector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.VolumeMounts) > 0 { + for iNdEx := len(m.VolumeMounts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumeMounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } } - i += n4 - if len(m.Env) > 0 { - for _, msg := range m.Env { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Volumes) > 0 { + for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Volumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x22 } } if len(m.EnvFrom) > 0 { - for _, msg := range m.EnvFrom { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.EnvFrom) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.EnvFrom[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x1a } } - if len(m.Volumes) > 0 { - for _, msg := range m.Volumes { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - if len(m.VolumeMounts) > 0 { - for _, msg := range m.VolumeMounts { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *PodPreset) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -240,6 +380,9 @@ func (m *PodPreset) Size() (n int) { } func (m *PodPresetList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -254,6 +397,9 @@ func (m *PodPresetList) Size() (n int) { } func (m *PodPresetSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.Selector.Size() @@ -286,14 +432,7 @@ func (m *PodPresetSpec) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -303,7 +442,7 @@ func (this *PodPreset) String() string { return "nil" } s := strings.Join([]string{`&PodPreset{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodPresetSpec", "PodPresetSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -313,9 +452,14 @@ func (this *PodPresetList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]PodPreset{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodPreset", "PodPreset", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&PodPresetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PodPreset", "PodPreset", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -324,12 +468,32 @@ func (this *PodPresetSpec) String() string { if this == nil { return "nil" } + repeatedStringForEnv := "[]EnvVar{" + for _, f := range this.Env { + repeatedStringForEnv += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEnv += "}" + repeatedStringForEnvFrom := "[]EnvFromSource{" + for _, f := range this.EnvFrom { + repeatedStringForEnvFrom += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEnvFrom += "}" + repeatedStringForVolumes := "[]Volume{" + for _, f := range this.Volumes { + repeatedStringForVolumes += fmt.Sprintf("%v", f) + "," + } + repeatedStringForVolumes += "}" + repeatedStringForVolumeMounts := "[]VolumeMount{" + for _, f := range this.VolumeMounts { + repeatedStringForVolumeMounts += fmt.Sprintf("%v", f) + "," + } + repeatedStringForVolumeMounts += "}" s := strings.Join([]string{`&PodPresetSpec{`, - `Selector:` + strings.Replace(strings.Replace(this.Selector.String(), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, - `Env:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Env), "EnvVar", "k8s_io_api_core_v1.EnvVar", 1), `&`, ``, 1) + `,`, - `EnvFrom:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EnvFrom), "EnvFromSource", "k8s_io_api_core_v1.EnvFromSource", 1), `&`, ``, 1) + `,`, - `Volumes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Volumes), "Volume", "k8s_io_api_core_v1.Volume", 1), `&`, ``, 1) + `,`, - `VolumeMounts:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeMounts), "VolumeMount", "k8s_io_api_core_v1.VolumeMount", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `Env:` + repeatedStringForEnv + `,`, + `EnvFrom:` + repeatedStringForEnvFrom + `,`, + `Volumes:` + repeatedStringForVolumes + `,`, + `VolumeMounts:` + repeatedStringForVolumeMounts + `,`, `}`, }, "") return s @@ -357,7 +521,7 @@ func (m *PodPreset) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -385,7 +549,7 @@ func (m *PodPreset) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -394,6 +558,9 @@ func (m *PodPreset) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -415,7 +582,7 @@ func (m *PodPreset) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -424,6 +591,9 @@ func (m *PodPreset) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -440,6 +610,9 @@ func (m *PodPreset) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -467,7 +640,7 @@ func (m *PodPresetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -495,7 +668,7 @@ func (m *PodPresetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -504,6 +677,9 @@ func (m *PodPresetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -525,7 +701,7 @@ func (m *PodPresetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -534,6 +710,9 @@ func (m *PodPresetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -551,6 +730,9 @@ func (m *PodPresetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -578,7 +760,7 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -606,7 +788,7 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -615,6 +797,9 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -636,7 +821,7 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -645,10 +830,13 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Env = append(m.Env, k8s_io_api_core_v1.EnvVar{}) + m.Env = append(m.Env, v11.EnvVar{}) if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -667,7 +855,7 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -676,10 +864,13 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.EnvFrom = append(m.EnvFrom, k8s_io_api_core_v1.EnvFromSource{}) + m.EnvFrom = append(m.EnvFrom, v11.EnvFromSource{}) if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -698,7 +889,7 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -707,10 +898,13 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Volumes = append(m.Volumes, k8s_io_api_core_v1.Volume{}) + m.Volumes = append(m.Volumes, v11.Volume{}) if err := m.Volumes[len(m.Volumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -729,7 +923,7 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -738,10 +932,13 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeMounts = append(m.VolumeMounts, k8s_io_api_core_v1.VolumeMount{}) + m.VolumeMounts = append(m.VolumeMounts, v11.VolumeMount{}) if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -755,6 +952,9 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -821,10 +1021,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -853,6 +1056,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -871,45 +1077,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/settings/v1alpha1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 542 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xc1, 0x8e, 0xd2, 0x40, - 0x1c, 0xc6, 0xe9, 0xb2, 0x04, 0x1c, 0xd8, 0x68, 0x1a, 0x0f, 0x0d, 0x31, 0x65, 0xe5, 0xe2, 0x26, - 0xc6, 0x19, 0x59, 0x8d, 0xd1, 0x6b, 0x13, 0x4c, 0x4c, 0x20, 0x6e, 0x4a, 0xb2, 0x89, 0xc6, 0x83, - 0x43, 0xf9, 0x5b, 0x2a, 0xb4, 0xd3, 0xcc, 0x4c, 0x9b, 0x78, 0xf3, 0x11, 0x7c, 0x01, 0x9f, 0x44, - 0x1f, 0x80, 0xe3, 0x1e, 0xf7, 0xb4, 0x91, 0xfa, 0x22, 0x66, 0x86, 0x29, 0xa0, 0x88, 0x72, 0x9b, - 0xff, 0x9f, 0xef, 0xfb, 0xcd, 0xf7, 0x31, 0x45, 0xfd, 0xd9, 0x73, 0x81, 0x23, 0x46, 0x66, 0xd9, - 0x18, 0x78, 0x02, 0x12, 0x04, 0xc9, 0x21, 0x99, 0x30, 0x4e, 0xcc, 0x0f, 0x34, 0x8d, 0x88, 0x00, - 0x29, 0xa3, 0x24, 0x14, 0x24, 0xef, 0xd1, 0x79, 0x3a, 0xa5, 0x3d, 0x12, 0x42, 0x02, 0x9c, 0x4a, - 0x98, 0xe0, 0x94, 0x33, 0xc9, 0xec, 0x7b, 0x2b, 0x35, 0xa6, 0x69, 0x84, 0x4b, 0x35, 0x2e, 0xd5, - 0xed, 0x47, 0x61, 0x24, 0xa7, 0xd9, 0x18, 0x07, 0x2c, 0x26, 0x21, 0x0b, 0x19, 0xd1, 0xa6, 0x71, - 0xf6, 0x41, 0x4f, 0x7a, 0xd0, 0xa7, 0x15, 0xac, 0xdd, 0xdd, 0xba, 0x3a, 0x60, 0x1c, 0x48, 0xbe, - 0x73, 0x61, 0xfb, 0xe9, 0x46, 0x13, 0xd3, 0x60, 0x1a, 0x25, 0xc0, 0x3f, 0x91, 0x74, 0x16, 0xaa, - 0x85, 0x20, 0x31, 0x48, 0xfa, 0x37, 0x17, 0xd9, 0xe7, 0xe2, 0x59, 0x22, 0xa3, 0x18, 0x76, 0x0c, - 0xcf, 0xfe, 0x67, 0x10, 0xc1, 0x14, 0x62, 0xfa, 0xa7, 0xaf, 0xfb, 0xdd, 0x42, 0xb7, 0x2e, 0xd8, - 0xe4, 0x82, 0x83, 0x00, 0x69, 0xbf, 0x47, 0x0d, 0x95, 0x68, 0x42, 0x25, 0x75, 0xac, 0x53, 0xeb, - 0xac, 0x79, 0xfe, 0x18, 0x6f, 0xfe, 0xb0, 0x35, 0x18, 0xa7, 0xb3, 0x50, 0x2d, 0x04, 0x56, 0x6a, - 0x9c, 0xf7, 0xf0, 0xeb, 0xf1, 0x47, 0x08, 0xe4, 0x10, 0x24, 0xf5, 0xec, 0xc5, 0x4d, 0xa7, 0x52, - 0xdc, 0x74, 0xd0, 0x66, 0xe7, 0xaf, 0xa9, 0xf6, 0x10, 0x1d, 0x8b, 0x14, 0x02, 0xe7, 0x48, 0xd3, - 0x1f, 0xe2, 0x7f, 0x3d, 0x07, 0x5e, 0x07, 0x1b, 0xa5, 0x10, 0x78, 0x2d, 0x03, 0x3e, 0x56, 0x93, - 0xaf, 0x31, 0xdd, 0x6f, 0x16, 0x3a, 0x59, 0xab, 0x06, 0x91, 0x90, 0xf6, 0xbb, 0x9d, 0x0a, 0xf8, - 0xb0, 0x0a, 0xca, 0xad, 0x0b, 0xdc, 0x31, 0xf7, 0x34, 0xca, 0xcd, 0x56, 0xfc, 0x01, 0xaa, 0x45, - 0x12, 0x62, 0xe1, 0x1c, 0x9d, 0x56, 0xcf, 0x9a, 0xe7, 0x0f, 0x0e, 0xcc, 0xef, 0x9d, 0x18, 0x66, - 0xed, 0x95, 0x72, 0xfb, 0x2b, 0x48, 0xf7, 0x6b, 0x75, 0x2b, 0xbd, 0x6a, 0x65, 0x53, 0xd4, 0x10, - 0x30, 0x87, 0x40, 0x32, 0x6e, 0xd2, 0x3f, 0x39, 0x30, 0x3d, 0x1d, 0xc3, 0x7c, 0x64, 0xac, 0x9b, - 0x0a, 0xe5, 0xc6, 0x5f, 0x63, 0xed, 0x17, 0xa8, 0x0a, 0x49, 0x6e, 0x0a, 0xb4, 0xb7, 0x0b, 0xa8, - 0x4f, 0x58, 0xb1, 0xfa, 0x49, 0x7e, 0x49, 0xb9, 0xd7, 0x34, 0x90, 0x6a, 0x3f, 0xc9, 0x7d, 0xe5, - 0xb1, 0x07, 0xa8, 0x0e, 0x49, 0xfe, 0x92, 0xb3, 0xd8, 0xa9, 0x6a, 0xfb, 0xfd, 0x3d, 0x76, 0x25, - 0x19, 0xb1, 0x8c, 0x07, 0xe0, 0xdd, 0x36, 0x94, 0xba, 0x59, 0xfb, 0x25, 0xc2, 0xee, 0xa3, 0x7a, - 0xce, 0xe6, 0x59, 0x0c, 0xc2, 0x39, 0xde, 0x1f, 0xe6, 0x52, 0x4b, 0x36, 0x98, 0xd5, 0x2c, 0xfc, - 0xd2, 0x6b, 0xbf, 0x41, 0xad, 0xd5, 0x71, 0xc8, 0xb2, 0x44, 0x0a, 0xa7, 0xa6, 0x59, 0x9d, 0xfd, - 0x2c, 0xad, 0xf3, 0xee, 0x1a, 0x60, 0x6b, 0x6b, 0x29, 0xfc, 0xdf, 0x50, 0x1e, 0x5e, 0x2c, 0xdd, - 0xca, 0xd5, 0xd2, 0xad, 0x5c, 0x2f, 0xdd, 0xca, 0xe7, 0xc2, 0xb5, 0x16, 0x85, 0x6b, 0x5d, 0x15, - 0xae, 0x75, 0x5d, 0xb8, 0xd6, 0x8f, 0xc2, 0xb5, 0xbe, 0xfc, 0x74, 0x2b, 0x6f, 0x1b, 0xe5, 0x7b, - 0xff, 0x0a, 0x00, 0x00, 0xff, 0xff, 0x46, 0x15, 0xf2, 0x97, 0xa4, 0x04, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/storage/v1/generated.pb.go b/vendor/k8s.io/api/storage/v1/generated.pb.go index da9d91740e075..782f50693f3ff 100644 --- a/vendor/k8s.io/api/storage/v1/generated.pb.go +++ b/vendor/k8s.io/api/storage/v1/generated.pb.go @@ -17,40 +17,22 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/storage/v1/generated.proto -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/storage/v1/generated.proto - - It has these top-level messages: - StorageClass - StorageClassList - VolumeAttachment - VolumeAttachmentList - VolumeAttachmentSource - VolumeAttachmentSpec - VolumeAttachmentStatus - VolumeError -*/ package v1 import ( fmt "fmt" - proto "github.com/gogo/protobuf/proto" - - math "math" - - k8s_io_api_core_v1 "k8s.io/api/core/v1" + io "io" + proto "github.com/gogo/protobuf/proto" github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" - strings "strings" - + math "math" + math_bits "math/bits" reflect "reflect" - - io "io" + strings "strings" ) // Reference imports to suppress errors if they are not otherwise used. @@ -64,52 +46,319 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *StorageClass) Reset() { *m = StorageClass{} } -func (*StorageClass) ProtoMessage() {} -func (*StorageClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *StorageClass) Reset() { *m = StorageClass{} } +func (*StorageClass) ProtoMessage() {} +func (*StorageClass) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{0} +} +func (m *StorageClass) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StorageClass) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageClass.Merge(m, src) +} +func (m *StorageClass) XXX_Size() int { + return m.Size() +} +func (m *StorageClass) XXX_DiscardUnknown() { + xxx_messageInfo_StorageClass.DiscardUnknown(m) +} + +var xxx_messageInfo_StorageClass proto.InternalMessageInfo + +func (m *StorageClassList) Reset() { *m = StorageClassList{} } +func (*StorageClassList) ProtoMessage() {} +func (*StorageClassList) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{1} +} +func (m *StorageClassList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StorageClassList) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageClassList.Merge(m, src) +} +func (m *StorageClassList) XXX_Size() int { + return m.Size() +} +func (m *StorageClassList) XXX_DiscardUnknown() { + xxx_messageInfo_StorageClassList.DiscardUnknown(m) +} + +var xxx_messageInfo_StorageClassList proto.InternalMessageInfo -func (m *StorageClassList) Reset() { *m = StorageClassList{} } -func (*StorageClassList) ProtoMessage() {} -func (*StorageClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } +func (*VolumeAttachment) ProtoMessage() {} +func (*VolumeAttachment) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{2} +} +func (m *VolumeAttachment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachment) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachment.Merge(m, src) +} +func (m *VolumeAttachment) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachment) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachment.DiscardUnknown(m) +} -func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } -func (*VolumeAttachment) ProtoMessage() {} -func (*VolumeAttachment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_VolumeAttachment proto.InternalMessageInfo -func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } -func (*VolumeAttachmentList) ProtoMessage() {} -func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } +func (*VolumeAttachmentList) ProtoMessage() {} +func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{3} +} +func (m *VolumeAttachmentList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentList) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentList.Merge(m, src) +} +func (m *VolumeAttachmentList) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentList) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentList.DiscardUnknown(m) +} -func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } -func (*VolumeAttachmentSource) ProtoMessage() {} -func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +var xxx_messageInfo_VolumeAttachmentList proto.InternalMessageInfo -func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } -func (*VolumeAttachmentSpec) ProtoMessage() {} -func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } +func (*VolumeAttachmentSource) ProtoMessage() {} +func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{4} +} +func (m *VolumeAttachmentSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentSource.Merge(m, src) +} +func (m *VolumeAttachmentSource) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentSource) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentSource.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttachmentSource proto.InternalMessageInfo -func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } -func (*VolumeAttachmentStatus) ProtoMessage() {} -func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } +func (*VolumeAttachmentSpec) ProtoMessage() {} +func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{5} +} +func (m *VolumeAttachmentSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentSpec.Merge(m, src) +} +func (m *VolumeAttachmentSpec) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentSpec) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentSpec.DiscardUnknown(m) +} -func (m *VolumeError) Reset() { *m = VolumeError{} } -func (*VolumeError) ProtoMessage() {} -func (*VolumeError) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_VolumeAttachmentSpec proto.InternalMessageInfo + +func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } +func (*VolumeAttachmentStatus) ProtoMessage() {} +func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{6} +} +func (m *VolumeAttachmentStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentStatus.Merge(m, src) +} +func (m *VolumeAttachmentStatus) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentStatus) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttachmentStatus proto.InternalMessageInfo + +func (m *VolumeError) Reset() { *m = VolumeError{} } +func (*VolumeError) ProtoMessage() {} +func (*VolumeError) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{7} +} +func (m *VolumeError) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeError) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeError.Merge(m, src) +} +func (m *VolumeError) XXX_Size() int { + return m.Size() +} +func (m *VolumeError) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeError.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeError proto.InternalMessageInfo func init() { proto.RegisterType((*StorageClass)(nil), "k8s.io.api.storage.v1.StorageClass") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1.StorageClass.ParametersEntry") proto.RegisterType((*StorageClassList)(nil), "k8s.io.api.storage.v1.StorageClassList") proto.RegisterType((*VolumeAttachment)(nil), "k8s.io.api.storage.v1.VolumeAttachment") proto.RegisterType((*VolumeAttachmentList)(nil), "k8s.io.api.storage.v1.VolumeAttachmentList") proto.RegisterType((*VolumeAttachmentSource)(nil), "k8s.io.api.storage.v1.VolumeAttachmentSource") proto.RegisterType((*VolumeAttachmentSpec)(nil), "k8s.io.api.storage.v1.VolumeAttachmentSpec") proto.RegisterType((*VolumeAttachmentStatus)(nil), "k8s.io.api.storage.v1.VolumeAttachmentStatus") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1.VolumeAttachmentStatus.AttachmentMetadataEntry") proto.RegisterType((*VolumeError)(nil), "k8s.io.api.storage.v1.VolumeError") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/storage/v1/generated.proto", fileDescriptor_3b530c1983504d8d) +} + +var fileDescriptor_3b530c1983504d8d = []byte{ + // 1018 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x3d, 0x6f, 0x23, 0xc5, + 0x1b, 0xcf, 0xc6, 0x79, 0x71, 0xc6, 0xc9, 0xff, 0x9c, 0xf9, 0x07, 0x30, 0x2e, 0xec, 0xc8, 0x14, + 0x98, 0x83, 0xdb, 0xbd, 0x84, 0x03, 0x9d, 0x90, 0x40, 0xf2, 0x82, 0x25, 0x4e, 0x8a, 0xef, 0xa2, + 0x49, 0x38, 0x21, 0x44, 0xc1, 0x64, 0xf7, 0x61, 0xb3, 0x67, 0xef, 0xce, 0x32, 0x33, 0x36, 0xa4, + 0xa3, 0xa2, 0x43, 0x82, 0x96, 0x8f, 0x42, 0x49, 0x15, 0xba, 0x13, 0xd5, 0x55, 0x16, 0x59, 0x6a, + 0xbe, 0x40, 0x2a, 0x34, 0xb3, 0x13, 0x7b, 0x63, 0x6f, 0xc0, 0x69, 0xae, 0xf3, 0xf3, 0xf2, 0xfb, + 0x3d, 0xef, 0xb3, 0x46, 0x1f, 0xf5, 0x1f, 0x0a, 0x3b, 0x64, 0x4e, 0x7f, 0x78, 0x02, 0x3c, 0x06, + 0x09, 0xc2, 0x19, 0x41, 0xec, 0x33, 0xee, 0x18, 0x03, 0x4d, 0x42, 0x47, 0x48, 0xc6, 0x69, 0x00, + 0xce, 0x68, 0xcf, 0x09, 0x20, 0x06, 0x4e, 0x25, 0xf8, 0x76, 0xc2, 0x99, 0x64, 0xf8, 0x95, 0xcc, + 0xcd, 0xa6, 0x49, 0x68, 0x1b, 0x37, 0x7b, 0xb4, 0x57, 0xbf, 0x17, 0x84, 0xf2, 0x74, 0x78, 0x62, + 0x7b, 0x2c, 0x72, 0x02, 0x16, 0x30, 0x47, 0x7b, 0x9f, 0x0c, 0xbf, 0xd6, 0x92, 0x16, 0xf4, 0xaf, + 0x8c, 0xa5, 0xde, 0xca, 0x05, 0xf3, 0x18, 0x2f, 0x8a, 0x54, 0x7f, 0x30, 0xf5, 0x89, 0xa8, 0x77, + 0x1a, 0xc6, 0xc0, 0xcf, 0x9c, 0xa4, 0x1f, 0x28, 0x85, 0x70, 0x22, 0x90, 0xb4, 0x08, 0xe5, 0xdc, + 0x84, 0xe2, 0xc3, 0x58, 0x86, 0x11, 0xcc, 0x01, 0xde, 0xff, 0x2f, 0x80, 0xf0, 0x4e, 0x21, 0xa2, + 0xb3, 0xb8, 0xd6, 0x8f, 0x6b, 0x68, 0xf3, 0x28, 0x6b, 0xc0, 0xc7, 0x03, 0x2a, 0x04, 0xfe, 0x0a, + 0x95, 0x55, 0x52, 0x3e, 0x95, 0xb4, 0x66, 0xed, 0x5a, 0xed, 0xca, 0xfe, 0x7d, 0x7b, 0xda, 0xac, + 0x09, 0xb7, 0x9d, 0xf4, 0x03, 0xa5, 0x10, 0xb6, 0xf2, 0xb6, 0x47, 0x7b, 0xf6, 0x93, 0x93, 0x67, + 0xe0, 0xc9, 0x1e, 0x48, 0xea, 0xe2, 0xf3, 0x71, 0x73, 0x29, 0x1d, 0x37, 0xd1, 0x54, 0x47, 0x26, + 0xac, 0xf8, 0x3d, 0x54, 0x49, 0x38, 0x1b, 0x85, 0x22, 0x64, 0x31, 0xf0, 0xda, 0xf2, 0xae, 0xd5, + 0xde, 0x70, 0xff, 0x6f, 0x20, 0x95, 0xc3, 0xa9, 0x89, 0xe4, 0xfd, 0x70, 0x80, 0x50, 0x42, 0x39, + 0x8d, 0x40, 0x02, 0x17, 0xb5, 0xd2, 0x6e, 0xa9, 0x5d, 0xd9, 0x7f, 0xd7, 0x2e, 0x9c, 0xa3, 0x9d, + 0xaf, 0xc8, 0x3e, 0x9c, 0xa0, 0xba, 0xb1, 0xe4, 0x67, 0xd3, 0xec, 0xa6, 0x06, 0x92, 0xa3, 0xc6, + 0x7d, 0xb4, 0xc5, 0xc1, 0x1b, 0xd0, 0x30, 0x3a, 0x64, 0x83, 0xd0, 0x3b, 0xab, 0xad, 0xe8, 0x0c, + 0xbb, 0xe9, 0xb8, 0xb9, 0x45, 0xf2, 0x86, 0xcb, 0x71, 0xf3, 0xfe, 0xfc, 0x06, 0xd8, 0x87, 0xc0, + 0x45, 0x28, 0x24, 0xc4, 0xf2, 0x29, 0x1b, 0x0c, 0x23, 0xb8, 0x86, 0x21, 0xd7, 0xb9, 0xf1, 0x03, + 0xb4, 0x19, 0xb1, 0x61, 0x2c, 0x9f, 0x24, 0x32, 0x64, 0xb1, 0xa8, 0xad, 0xee, 0x96, 0xda, 0x1b, + 0x6e, 0x35, 0x1d, 0x37, 0x37, 0x7b, 0x39, 0x3d, 0xb9, 0xe6, 0x85, 0x0f, 0xd0, 0x0e, 0x1d, 0x0c, + 0xd8, 0xb7, 0x59, 0x80, 0xee, 0x77, 0x09, 0x8d, 0x55, 0x97, 0x6a, 0x6b, 0xbb, 0x56, 0xbb, 0xec, + 0xd6, 0xd2, 0x71, 0x73, 0xa7, 0x53, 0x60, 0x27, 0x85, 0x28, 0xfc, 0x39, 0xda, 0x1e, 0x69, 0x95, + 0x1b, 0xc6, 0x7e, 0x18, 0x07, 0x3d, 0xe6, 0x43, 0x6d, 0x5d, 0x17, 0x7d, 0x37, 0x1d, 0x37, 0xb7, + 0x9f, 0xce, 0x1a, 0x2f, 0x8b, 0x94, 0x64, 0x9e, 0x04, 0x7f, 0x83, 0xb6, 0x75, 0x44, 0xf0, 0x8f, + 0x59, 0xc2, 0x06, 0x2c, 0x08, 0x41, 0xd4, 0xca, 0x7a, 0x74, 0xed, 0xfc, 0xe8, 0x54, 0xeb, 0xd4, + 0xdc, 0x8c, 0xd7, 0xd9, 0x11, 0x0c, 0xc0, 0x93, 0x8c, 0x1f, 0x03, 0x8f, 0xdc, 0xd7, 0xcd, 0xbc, + 0xb6, 0x3b, 0xb3, 0x54, 0x64, 0x9e, 0xbd, 0xfe, 0x21, 0xba, 0x33, 0x33, 0x70, 0x5c, 0x45, 0xa5, + 0x3e, 0x9c, 0xe9, 0x6d, 0xde, 0x20, 0xea, 0x27, 0xde, 0x41, 0xab, 0x23, 0x3a, 0x18, 0x42, 0xb6, + 0x7c, 0x24, 0x13, 0x3e, 0x58, 0x7e, 0x68, 0xb5, 0x7e, 0xb5, 0x50, 0x35, 0xbf, 0x3d, 0x07, 0xa1, + 0x90, 0xf8, 0xcb, 0xb9, 0x9b, 0xb0, 0x17, 0xbb, 0x09, 0x85, 0xd6, 0x17, 0x51, 0x35, 0x35, 0x94, + 0xaf, 0x34, 0xb9, 0x7b, 0xf8, 0x14, 0xad, 0x86, 0x12, 0x22, 0x51, 0x5b, 0xd6, 0x8d, 0x79, 0x63, + 0x81, 0x9d, 0x76, 0xb7, 0x0c, 0xdf, 0xea, 0x23, 0x85, 0x24, 0x19, 0x41, 0xeb, 0x97, 0x65, 0x54, + 0xcd, 0xe6, 0xd2, 0x91, 0x92, 0x7a, 0xa7, 0x11, 0xc4, 0xf2, 0x25, 0x1c, 0x74, 0x0f, 0xad, 0x88, + 0x04, 0x3c, 0xdd, 0xcc, 0xca, 0xfe, 0xdb, 0x37, 0xe4, 0x3f, 0x9b, 0xd8, 0x51, 0x02, 0x9e, 0xbb, + 0x69, 0x88, 0x57, 0x94, 0x44, 0x34, 0x0d, 0xfe, 0x0c, 0xad, 0x09, 0x49, 0xe5, 0x50, 0x1d, 0xb9, + 0x22, 0xbc, 0xb7, 0x28, 0xa1, 0x06, 0xb9, 0xff, 0x33, 0x94, 0x6b, 0x99, 0x4c, 0x0c, 0x59, 0xeb, + 0x37, 0x0b, 0xed, 0xcc, 0x42, 0x5e, 0xc2, 0x74, 0x0f, 0xae, 0x4f, 0xf7, 0xcd, 0x05, 0x8b, 0xb9, + 0x61, 0xc2, 0x7f, 0x58, 0xe8, 0xd5, 0xb9, 0xba, 0xd9, 0x90, 0x7b, 0xa0, 0xde, 0x84, 0x64, 0xe6, + 0xe5, 0x79, 0x4c, 0x23, 0xc8, 0xd6, 0x3e, 0x7b, 0x13, 0x0e, 0x0b, 0xec, 0xa4, 0x10, 0x85, 0x9f, + 0xa1, 0x6a, 0x18, 0x0f, 0xc2, 0x18, 0x32, 0xdd, 0xd1, 0x74, 0xbe, 0x85, 0x87, 0x3b, 0xcb, 0xac, + 0x87, 0xbb, 0x93, 0x8e, 0x9b, 0xd5, 0x47, 0x33, 0x2c, 0x64, 0x8e, 0xb7, 0xf5, 0x7b, 0xc1, 0x64, + 0x94, 0x01, 0xbf, 0x83, 0xca, 0x54, 0x6b, 0x80, 0x9b, 0x32, 0x26, 0x9d, 0xee, 0x18, 0x3d, 0x99, + 0x78, 0xe8, 0xbd, 0xd1, 0xad, 0x30, 0x89, 0x2e, 0xbc, 0x37, 0x1a, 0x94, 0xdb, 0x1b, 0x2d, 0x13, + 0x43, 0xa6, 0x92, 0x88, 0x99, 0x9f, 0xf5, 0xb2, 0x74, 0x3d, 0x89, 0xc7, 0x46, 0x4f, 0x26, 0x1e, + 0xad, 0xbf, 0x4b, 0x05, 0x03, 0xd2, 0x0b, 0x98, 0xab, 0xc6, 0xd7, 0xd5, 0x94, 0xe7, 0xaa, 0xf1, + 0x27, 0xd5, 0xf8, 0xf8, 0x67, 0x0b, 0x61, 0x3a, 0xa1, 0xe8, 0x5d, 0x2d, 0x68, 0xb6, 0x45, 0xdd, + 0x5b, 0x9d, 0x84, 0xdd, 0x99, 0xe3, 0xc9, 0xbe, 0x84, 0x75, 0x13, 0x1f, 0xcf, 0x3b, 0x90, 0x82, + 0xe0, 0xd8, 0x47, 0x95, 0x4c, 0xdb, 0xe5, 0x9c, 0x71, 0x73, 0x9e, 0xad, 0x7f, 0xcd, 0x45, 0x7b, + 0xba, 0x0d, 0xf5, 0x65, 0xef, 0x4c, 0xa1, 0x97, 0xe3, 0x66, 0x25, 0x67, 0x27, 0x79, 0x5a, 0x15, + 0xc5, 0x87, 0x69, 0x94, 0x95, 0xdb, 0x45, 0xf9, 0x04, 0x6e, 0x8e, 0x92, 0xa3, 0xad, 0x77, 0xd1, + 0x6b, 0x37, 0xb4, 0xe5, 0x56, 0xdf, 0x8b, 0x1f, 0x2c, 0x94, 0x8f, 0x81, 0x0f, 0xd0, 0x8a, 0xfa, + 0xbb, 0x65, 0x1e, 0x92, 0xbb, 0x8b, 0x3d, 0x24, 0xc7, 0x61, 0x04, 0xd3, 0xa7, 0x50, 0x49, 0x44, + 0xb3, 0xe0, 0xb7, 0xd0, 0x7a, 0x04, 0x42, 0xd0, 0xc0, 0x44, 0x76, 0xef, 0x18, 0xa7, 0xf5, 0x5e, + 0xa6, 0x26, 0x57, 0x76, 0xb7, 0x7d, 0x7e, 0xd1, 0x58, 0x7a, 0x7e, 0xd1, 0x58, 0x7a, 0x71, 0xd1, + 0x58, 0xfa, 0x3e, 0x6d, 0x58, 0xe7, 0x69, 0xc3, 0x7a, 0x9e, 0x36, 0xac, 0x17, 0x69, 0xc3, 0xfa, + 0x33, 0x6d, 0x58, 0x3f, 0xfd, 0xd5, 0x58, 0xfa, 0x62, 0x79, 0xb4, 0xf7, 0x4f, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xe2, 0xd4, 0x42, 0x3d, 0x3c, 0x0b, 0x00, 0x00, +} + func (m *StorageClass) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -117,100 +366,108 @@ func (m *StorageClass) Marshal() (dAtA []byte, err error) { } func (m *StorageClass) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Provisioner))) - i += copy(dAtA[i:], m.Provisioner) - if len(m.Parameters) > 0 { - keysForParameters := make([]string, 0, len(m.Parameters)) - for k := range m.Parameters { - keysForParameters = append(keysForParameters, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) - for _, k := range keysForParameters { - dAtA[i] = 0x1a - i++ - v := m.Parameters[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) + if len(m.AllowedTopologies) > 0 { + for iNdEx := len(m.AllowedTopologies) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllowedTopologies[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 } } - if m.ReclaimPolicy != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ReclaimPolicy))) - i += copy(dAtA[i:], *m.ReclaimPolicy) - } - if len(m.MountOptions) > 0 { - for _, s := range m.MountOptions { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } + if m.VolumeBindingMode != nil { + i -= len(*m.VolumeBindingMode) + copy(dAtA[i:], *m.VolumeBindingMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeBindingMode))) + i-- + dAtA[i] = 0x3a } if m.AllowVolumeExpansion != nil { - dAtA[i] = 0x30 - i++ + i-- if *m.AllowVolumeExpansion { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ + i-- + dAtA[i] = 0x30 } - if m.VolumeBindingMode != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeBindingMode))) - i += copy(dAtA[i:], *m.VolumeBindingMode) + if len(m.MountOptions) > 0 { + for iNdEx := len(m.MountOptions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.MountOptions[iNdEx]) + copy(dAtA[i:], m.MountOptions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MountOptions[iNdEx]))) + i-- + dAtA[i] = 0x2a + } } - if len(m.AllowedTopologies) > 0 { - for _, msg := range m.AllowedTopologies { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + if m.ReclaimPolicy != nil { + i -= len(*m.ReclaimPolicy) + copy(dAtA[i:], *m.ReclaimPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ReclaimPolicy))) + i-- + dAtA[i] = 0x22 + } + if len(m.Parameters) > 0 { + keysForParameters := make([]string, 0, len(m.Parameters)) + for k := range m.Parameters { + keysForParameters = append(keysForParameters, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) + for iNdEx := len(keysForParameters) - 1; iNdEx >= 0; iNdEx-- { + v := m.Parameters[string(keysForParameters[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForParameters[iNdEx]) + copy(dAtA[i:], keysForParameters[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForParameters[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a } } - return i, nil + i -= len(m.Provisioner) + copy(dAtA[i:], m.Provisioner) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Provisioner))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *StorageClassList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -218,37 +475,46 @@ func (m *StorageClassList) Marshal() (dAtA []byte, err error) { } func (m *StorageClassList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n2, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *VolumeAttachment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -256,41 +522,52 @@ func (m *VolumeAttachment) Marshal() (dAtA []byte, err error) { } func (m *VolumeAttachment) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n4, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n5, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *VolumeAttachmentList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -298,37 +575,46 @@ func (m *VolumeAttachmentList) Marshal() (dAtA []byte, err error) { } func (m *VolumeAttachmentList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *VolumeAttachmentSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -336,33 +622,41 @@ func (m *VolumeAttachmentSource) Marshal() (dAtA []byte, err error) { } func (m *VolumeAttachmentSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.PersistentVolumeName != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PersistentVolumeName))) - i += copy(dAtA[i:], *m.PersistentVolumeName) - } if m.InlineVolumeSpec != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.InlineVolumeSpec.Size())) - n7, err := m.InlineVolumeSpec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.InlineVolumeSpec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n7 + i-- + dAtA[i] = 0x12 + } + if m.PersistentVolumeName != nil { + i -= len(*m.PersistentVolumeName) + copy(dAtA[i:], *m.PersistentVolumeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PersistentVolumeName))) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *VolumeAttachmentSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -370,33 +664,42 @@ func (m *VolumeAttachmentSpec) Marshal() (dAtA []byte, err error) { } func (m *VolumeAttachmentSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Attacher))) - i += copy(dAtA[i:], m.Attacher) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Source.Size())) - n8, err := m.Source.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - dAtA[i] = 0x1a - i++ + i -= len(m.NodeName) + copy(dAtA[i:], m.NodeName) i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) - i += copy(dAtA[i:], m.NodeName) - return i, nil + i-- + dAtA[i] = 0x1a + { + size, err := m.Source.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Attacher) + copy(dAtA[i:], m.Attacher) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Attacher))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *VolumeAttachmentStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -404,67 +707,78 @@ func (m *VolumeAttachmentStatus) Marshal() (dAtA []byte, err error) { } func (m *VolumeAttachmentStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - if m.Attached { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.DetachError != nil { + { + size, err := m.DetachError.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.AttachError != nil { + { + size, err := m.AttachError.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - i++ if len(m.AttachmentMetadata) > 0 { keysForAttachmentMetadata := make([]string, 0, len(m.AttachmentMetadata)) for k := range m.AttachmentMetadata { keysForAttachmentMetadata = append(keysForAttachmentMetadata, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata) - for _, k := range keysForAttachmentMetadata { + for iNdEx := len(keysForAttachmentMetadata) - 1; iNdEx >= 0; iNdEx-- { + v := m.AttachmentMetadata[string(keysForAttachmentMetadata[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- dAtA[i] = 0x12 - i++ - v := m.AttachmentMetadata[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + i -= len(keysForAttachmentMetadata[iNdEx]) + copy(dAtA[i:], keysForAttachmentMetadata[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAttachmentMetadata[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - if m.AttachError != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AttachError.Size())) - n9, err := m.AttachError.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err } - i += n9 } - if m.DetachError != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DetachError.Size())) - n10, err := m.DetachError.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 + i-- + if m.Attached { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *VolumeError) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -472,35 +786,48 @@ func (m *VolumeError) Marshal() (dAtA []byte, err error) { } func (m *VolumeError) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeError) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Time.Size())) - n11, err := m.Time.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 - dAtA[i] = 0x12 - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.Time.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *StorageClass) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -542,6 +869,9 @@ func (m *StorageClass) Size() (n int) { } func (m *StorageClassList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -556,6 +886,9 @@ func (m *StorageClassList) Size() (n int) { } func (m *VolumeAttachment) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -568,6 +901,9 @@ func (m *VolumeAttachment) Size() (n int) { } func (m *VolumeAttachmentList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -582,6 +918,9 @@ func (m *VolumeAttachmentList) Size() (n int) { } func (m *VolumeAttachmentSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.PersistentVolumeName != nil { @@ -596,6 +935,9 @@ func (m *VolumeAttachmentSource) Size() (n int) { } func (m *VolumeAttachmentSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Attacher) @@ -608,6 +950,9 @@ func (m *VolumeAttachmentSpec) Size() (n int) { } func (m *VolumeAttachmentStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 2 @@ -631,6 +976,9 @@ func (m *VolumeAttachmentStatus) Size() (n int) { } func (m *VolumeError) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.Time.Size() @@ -641,14 +989,7 @@ func (m *VolumeError) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -657,6 +998,11 @@ func (this *StorageClass) String() string { if this == nil { return "nil" } + repeatedStringForAllowedTopologies := "[]TopologySelectorTerm{" + for _, f := range this.AllowedTopologies { + repeatedStringForAllowedTopologies += fmt.Sprintf("%v", f) + "," + } + repeatedStringForAllowedTopologies += "}" keysForParameters := make([]string, 0, len(this.Parameters)) for k := range this.Parameters { keysForParameters = append(keysForParameters, k) @@ -668,14 +1014,14 @@ func (this *StorageClass) String() string { } mapStringForParameters += "}" s := strings.Join([]string{`&StorageClass{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Provisioner:` + fmt.Sprintf("%v", this.Provisioner) + `,`, `Parameters:` + mapStringForParameters + `,`, `ReclaimPolicy:` + valueToStringGenerated(this.ReclaimPolicy) + `,`, `MountOptions:` + fmt.Sprintf("%v", this.MountOptions) + `,`, `AllowVolumeExpansion:` + valueToStringGenerated(this.AllowVolumeExpansion) + `,`, `VolumeBindingMode:` + valueToStringGenerated(this.VolumeBindingMode) + `,`, - `AllowedTopologies:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedTopologies), "TopologySelectorTerm", "k8s_io_api_core_v1.TopologySelectorTerm", 1), `&`, ``, 1) + `,`, + `AllowedTopologies:` + repeatedStringForAllowedTopologies + `,`, `}`, }, "") return s @@ -684,9 +1030,14 @@ func (this *StorageClassList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]StorageClass{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "StorageClass", "StorageClass", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&StorageClassList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StorageClass", "StorageClass", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -696,7 +1047,7 @@ func (this *VolumeAttachment) String() string { return "nil" } s := strings.Join([]string{`&VolumeAttachment{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "VolumeAttachmentSpec", "VolumeAttachmentSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "VolumeAttachmentStatus", "VolumeAttachmentStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -707,9 +1058,14 @@ func (this *VolumeAttachmentList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]VolumeAttachment{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "VolumeAttachment", "VolumeAttachment", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&VolumeAttachmentList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "VolumeAttachment", "VolumeAttachment", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -720,7 +1076,7 @@ func (this *VolumeAttachmentSource) String() string { } s := strings.Join([]string{`&VolumeAttachmentSource{`, `PersistentVolumeName:` + valueToStringGenerated(this.PersistentVolumeName) + `,`, - `InlineVolumeSpec:` + strings.Replace(fmt.Sprintf("%v", this.InlineVolumeSpec), "PersistentVolumeSpec", "k8s_io_api_core_v1.PersistentVolumeSpec", 1) + `,`, + `InlineVolumeSpec:` + strings.Replace(fmt.Sprintf("%v", this.InlineVolumeSpec), "PersistentVolumeSpec", "v11.PersistentVolumeSpec", 1) + `,`, `}`, }, "") return s @@ -754,8 +1110,8 @@ func (this *VolumeAttachmentStatus) String() string { s := strings.Join([]string{`&VolumeAttachmentStatus{`, `Attached:` + fmt.Sprintf("%v", this.Attached) + `,`, `AttachmentMetadata:` + mapStringForAttachmentMetadata + `,`, - `AttachError:` + strings.Replace(fmt.Sprintf("%v", this.AttachError), "VolumeError", "VolumeError", 1) + `,`, - `DetachError:` + strings.Replace(fmt.Sprintf("%v", this.DetachError), "VolumeError", "VolumeError", 1) + `,`, + `AttachError:` + strings.Replace(this.AttachError.String(), "VolumeError", "VolumeError", 1) + `,`, + `DetachError:` + strings.Replace(this.DetachError.String(), "VolumeError", "VolumeError", 1) + `,`, `}`, }, "") return s @@ -765,7 +1121,7 @@ func (this *VolumeError) String() string { return "nil" } s := strings.Join([]string{`&VolumeError{`, - `Time:` + strings.Replace(strings.Replace(this.Time.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Time:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Time), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, }, "") @@ -794,7 +1150,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -822,7 +1178,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -831,6 +1187,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -852,7 +1211,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -862,6 +1221,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -881,7 +1243,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -890,6 +1252,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -910,7 +1275,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -927,7 +1292,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -937,6 +1302,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -953,7 +1321,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -963,6 +1331,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -999,7 +1370,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1009,6 +1380,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1029,7 +1403,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1039,6 +1413,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1058,7 +1435,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1079,7 +1456,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1089,6 +1466,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1109,7 +1489,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1118,10 +1498,13 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedTopologies = append(m.AllowedTopologies, k8s_io_api_core_v1.TopologySelectorTerm{}) + m.AllowedTopologies = append(m.AllowedTopologies, v11.TopologySelectorTerm{}) if err := m.AllowedTopologies[len(m.AllowedTopologies)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -1135,6 +1518,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1162,7 +1548,7 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1190,7 +1576,7 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1199,6 +1585,9 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1220,7 +1609,7 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1229,6 +1618,9 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1246,6 +1638,9 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1273,7 +1668,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1301,7 +1696,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1310,6 +1705,9 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1331,7 +1729,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1340,6 +1738,9 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1361,7 +1762,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1370,6 +1771,9 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1386,6 +1790,9 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1413,7 +1820,7 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1441,7 +1848,7 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1450,6 +1857,9 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1471,7 +1881,7 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1480,6 +1890,9 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1497,6 +1910,9 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1524,7 +1940,7 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1552,7 +1968,7 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1562,6 +1978,9 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1582,7 +2001,7 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1591,11 +2010,14 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.InlineVolumeSpec == nil { - m.InlineVolumeSpec = &k8s_io_api_core_v1.PersistentVolumeSpec{} + m.InlineVolumeSpec = &v11.PersistentVolumeSpec{} } if err := m.InlineVolumeSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1610,6 +2032,9 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1637,7 +2062,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1665,7 +2090,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1675,6 +2100,9 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1694,7 +2122,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1703,6 +2131,9 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1724,7 +2155,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1734,6 +2165,9 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1748,6 +2182,9 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1775,7 +2212,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1803,7 +2240,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1823,7 +2260,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1832,6 +2269,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1852,7 +2292,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1869,7 +2309,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1879,6 +2319,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -1895,7 +2338,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1905,6 +2348,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -1941,7 +2387,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1950,6 +2396,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1974,7 +2423,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1983,6 +2432,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2002,6 +2454,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2029,7 +2484,7 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2057,7 +2512,7 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2066,6 +2521,9 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2087,7 +2545,7 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2097,6 +2555,9 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2111,6 +2572,9 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2177,10 +2641,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -2209,6 +2676,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -2227,75 +2697,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/storage/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 1018 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x3d, 0x6f, 0x23, 0xc5, - 0x1b, 0xcf, 0xc6, 0x79, 0x71, 0xc6, 0xc9, 0xff, 0x9c, 0xf9, 0x07, 0x30, 0x2e, 0xec, 0xc8, 0x14, - 0x98, 0x83, 0xdb, 0xbd, 0x84, 0x03, 0x9d, 0x90, 0x40, 0xf2, 0x82, 0x25, 0x4e, 0x8a, 0xef, 0xa2, - 0x49, 0x38, 0x21, 0x44, 0xc1, 0x64, 0xf7, 0x61, 0xb3, 0x67, 0xef, 0xce, 0x32, 0x33, 0x36, 0xa4, - 0xa3, 0xa2, 0x43, 0x82, 0x96, 0x8f, 0x42, 0x49, 0x15, 0xba, 0x13, 0xd5, 0x55, 0x16, 0x59, 0x6a, - 0xbe, 0x40, 0x2a, 0x34, 0xb3, 0x13, 0x7b, 0x63, 0x6f, 0xc0, 0x69, 0xae, 0xf3, 0xf3, 0xf2, 0xfb, - 0x3d, 0xef, 0xb3, 0x46, 0x1f, 0xf5, 0x1f, 0x0a, 0x3b, 0x64, 0x4e, 0x7f, 0x78, 0x02, 0x3c, 0x06, - 0x09, 0xc2, 0x19, 0x41, 0xec, 0x33, 0xee, 0x18, 0x03, 0x4d, 0x42, 0x47, 0x48, 0xc6, 0x69, 0x00, - 0xce, 0x68, 0xcf, 0x09, 0x20, 0x06, 0x4e, 0x25, 0xf8, 0x76, 0xc2, 0x99, 0x64, 0xf8, 0x95, 0xcc, - 0xcd, 0xa6, 0x49, 0x68, 0x1b, 0x37, 0x7b, 0xb4, 0x57, 0xbf, 0x17, 0x84, 0xf2, 0x74, 0x78, 0x62, - 0x7b, 0x2c, 0x72, 0x02, 0x16, 0x30, 0x47, 0x7b, 0x9f, 0x0c, 0xbf, 0xd6, 0x92, 0x16, 0xf4, 0xaf, - 0x8c, 0xa5, 0xde, 0xca, 0x05, 0xf3, 0x18, 0x2f, 0x8a, 0x54, 0x7f, 0x30, 0xf5, 0x89, 0xa8, 0x77, - 0x1a, 0xc6, 0xc0, 0xcf, 0x9c, 0xa4, 0x1f, 0x28, 0x85, 0x70, 0x22, 0x90, 0xb4, 0x08, 0xe5, 0xdc, - 0x84, 0xe2, 0xc3, 0x58, 0x86, 0x11, 0xcc, 0x01, 0xde, 0xff, 0x2f, 0x80, 0xf0, 0x4e, 0x21, 0xa2, - 0xb3, 0xb8, 0xd6, 0x8f, 0x6b, 0x68, 0xf3, 0x28, 0x6b, 0xc0, 0xc7, 0x03, 0x2a, 0x04, 0xfe, 0x0a, - 0x95, 0x55, 0x52, 0x3e, 0x95, 0xb4, 0x66, 0xed, 0x5a, 0xed, 0xca, 0xfe, 0x7d, 0x7b, 0xda, 0xac, - 0x09, 0xb7, 0x9d, 0xf4, 0x03, 0xa5, 0x10, 0xb6, 0xf2, 0xb6, 0x47, 0x7b, 0xf6, 0x93, 0x93, 0x67, - 0xe0, 0xc9, 0x1e, 0x48, 0xea, 0xe2, 0xf3, 0x71, 0x73, 0x29, 0x1d, 0x37, 0xd1, 0x54, 0x47, 0x26, - 0xac, 0xf8, 0x3d, 0x54, 0x49, 0x38, 0x1b, 0x85, 0x22, 0x64, 0x31, 0xf0, 0xda, 0xf2, 0xae, 0xd5, - 0xde, 0x70, 0xff, 0x6f, 0x20, 0x95, 0xc3, 0xa9, 0x89, 0xe4, 0xfd, 0x70, 0x80, 0x50, 0x42, 0x39, - 0x8d, 0x40, 0x02, 0x17, 0xb5, 0xd2, 0x6e, 0xa9, 0x5d, 0xd9, 0x7f, 0xd7, 0x2e, 0x9c, 0xa3, 0x9d, - 0xaf, 0xc8, 0x3e, 0x9c, 0xa0, 0xba, 0xb1, 0xe4, 0x67, 0xd3, 0xec, 0xa6, 0x06, 0x92, 0xa3, 0xc6, - 0x7d, 0xb4, 0xc5, 0xc1, 0x1b, 0xd0, 0x30, 0x3a, 0x64, 0x83, 0xd0, 0x3b, 0xab, 0xad, 0xe8, 0x0c, - 0xbb, 0xe9, 0xb8, 0xb9, 0x45, 0xf2, 0x86, 0xcb, 0x71, 0xf3, 0xfe, 0xfc, 0x06, 0xd8, 0x87, 0xc0, - 0x45, 0x28, 0x24, 0xc4, 0xf2, 0x29, 0x1b, 0x0c, 0x23, 0xb8, 0x86, 0x21, 0xd7, 0xb9, 0xf1, 0x03, - 0xb4, 0x19, 0xb1, 0x61, 0x2c, 0x9f, 0x24, 0x32, 0x64, 0xb1, 0xa8, 0xad, 0xee, 0x96, 0xda, 0x1b, - 0x6e, 0x35, 0x1d, 0x37, 0x37, 0x7b, 0x39, 0x3d, 0xb9, 0xe6, 0x85, 0x0f, 0xd0, 0x0e, 0x1d, 0x0c, - 0xd8, 0xb7, 0x59, 0x80, 0xee, 0x77, 0x09, 0x8d, 0x55, 0x97, 0x6a, 0x6b, 0xbb, 0x56, 0xbb, 0xec, - 0xd6, 0xd2, 0x71, 0x73, 0xa7, 0x53, 0x60, 0x27, 0x85, 0x28, 0xfc, 0x39, 0xda, 0x1e, 0x69, 0x95, - 0x1b, 0xc6, 0x7e, 0x18, 0x07, 0x3d, 0xe6, 0x43, 0x6d, 0x5d, 0x17, 0x7d, 0x37, 0x1d, 0x37, 0xb7, - 0x9f, 0xce, 0x1a, 0x2f, 0x8b, 0x94, 0x64, 0x9e, 0x04, 0x7f, 0x83, 0xb6, 0x75, 0x44, 0xf0, 0x8f, - 0x59, 0xc2, 0x06, 0x2c, 0x08, 0x41, 0xd4, 0xca, 0x7a, 0x74, 0xed, 0xfc, 0xe8, 0x54, 0xeb, 0xd4, - 0xdc, 0x8c, 0xd7, 0xd9, 0x11, 0x0c, 0xc0, 0x93, 0x8c, 0x1f, 0x03, 0x8f, 0xdc, 0xd7, 0xcd, 0xbc, - 0xb6, 0x3b, 0xb3, 0x54, 0x64, 0x9e, 0xbd, 0xfe, 0x21, 0xba, 0x33, 0x33, 0x70, 0x5c, 0x45, 0xa5, - 0x3e, 0x9c, 0xe9, 0x6d, 0xde, 0x20, 0xea, 0x27, 0xde, 0x41, 0xab, 0x23, 0x3a, 0x18, 0x42, 0xb6, - 0x7c, 0x24, 0x13, 0x3e, 0x58, 0x7e, 0x68, 0xb5, 0x7e, 0xb5, 0x50, 0x35, 0xbf, 0x3d, 0x07, 0xa1, - 0x90, 0xf8, 0xcb, 0xb9, 0x9b, 0xb0, 0x17, 0xbb, 0x09, 0x85, 0xd6, 0x17, 0x51, 0x35, 0x35, 0x94, - 0xaf, 0x34, 0xb9, 0x7b, 0xf8, 0x14, 0xad, 0x86, 0x12, 0x22, 0x51, 0x5b, 0xd6, 0x8d, 0x79, 0x63, - 0x81, 0x9d, 0x76, 0xb7, 0x0c, 0xdf, 0xea, 0x23, 0x85, 0x24, 0x19, 0x41, 0xeb, 0x97, 0x65, 0x54, - 0xcd, 0xe6, 0xd2, 0x91, 0x92, 0x7a, 0xa7, 0x11, 0xc4, 0xf2, 0x25, 0x1c, 0x74, 0x0f, 0xad, 0x88, - 0x04, 0x3c, 0xdd, 0xcc, 0xca, 0xfe, 0xdb, 0x37, 0xe4, 0x3f, 0x9b, 0xd8, 0x51, 0x02, 0x9e, 0xbb, - 0x69, 0x88, 0x57, 0x94, 0x44, 0x34, 0x0d, 0xfe, 0x0c, 0xad, 0x09, 0x49, 0xe5, 0x50, 0x1d, 0xb9, - 0x22, 0xbc, 0xb7, 0x28, 0xa1, 0x06, 0xb9, 0xff, 0x33, 0x94, 0x6b, 0x99, 0x4c, 0x0c, 0x59, 0xeb, - 0x37, 0x0b, 0xed, 0xcc, 0x42, 0x5e, 0xc2, 0x74, 0x0f, 0xae, 0x4f, 0xf7, 0xcd, 0x05, 0x8b, 0xb9, - 0x61, 0xc2, 0x7f, 0x58, 0xe8, 0xd5, 0xb9, 0xba, 0xd9, 0x90, 0x7b, 0xa0, 0xde, 0x84, 0x64, 0xe6, - 0xe5, 0x79, 0x4c, 0x23, 0xc8, 0xd6, 0x3e, 0x7b, 0x13, 0x0e, 0x0b, 0xec, 0xa4, 0x10, 0x85, 0x9f, - 0xa1, 0x6a, 0x18, 0x0f, 0xc2, 0x18, 0x32, 0xdd, 0xd1, 0x74, 0xbe, 0x85, 0x87, 0x3b, 0xcb, 0xac, - 0x87, 0xbb, 0x93, 0x8e, 0x9b, 0xd5, 0x47, 0x33, 0x2c, 0x64, 0x8e, 0xb7, 0xf5, 0x7b, 0xc1, 0x64, - 0x94, 0x01, 0xbf, 0x83, 0xca, 0x54, 0x6b, 0x80, 0x9b, 0x32, 0x26, 0x9d, 0xee, 0x18, 0x3d, 0x99, - 0x78, 0xe8, 0xbd, 0xd1, 0xad, 0x30, 0x89, 0x2e, 0xbc, 0x37, 0x1a, 0x94, 0xdb, 0x1b, 0x2d, 0x13, - 0x43, 0xa6, 0x92, 0x88, 0x99, 0x9f, 0xf5, 0xb2, 0x74, 0x3d, 0x89, 0xc7, 0x46, 0x4f, 0x26, 0x1e, - 0xad, 0xbf, 0x4b, 0x05, 0x03, 0xd2, 0x0b, 0x98, 0xab, 0xc6, 0xd7, 0xd5, 0x94, 0xe7, 0xaa, 0xf1, - 0x27, 0xd5, 0xf8, 0xf8, 0x67, 0x0b, 0x61, 0x3a, 0xa1, 0xe8, 0x5d, 0x2d, 0x68, 0xb6, 0x45, 0xdd, - 0x5b, 0x9d, 0x84, 0xdd, 0x99, 0xe3, 0xc9, 0xbe, 0x84, 0x75, 0x13, 0x1f, 0xcf, 0x3b, 0x90, 0x82, - 0xe0, 0xd8, 0x47, 0x95, 0x4c, 0xdb, 0xe5, 0x9c, 0x71, 0x73, 0x9e, 0xad, 0x7f, 0xcd, 0x45, 0x7b, - 0xba, 0x0d, 0xf5, 0x65, 0xef, 0x4c, 0xa1, 0x97, 0xe3, 0x66, 0x25, 0x67, 0x27, 0x79, 0x5a, 0x15, - 0xc5, 0x87, 0x69, 0x94, 0x95, 0xdb, 0x45, 0xf9, 0x04, 0x6e, 0x8e, 0x92, 0xa3, 0xad, 0x77, 0xd1, - 0x6b, 0x37, 0xb4, 0xe5, 0x56, 0xdf, 0x8b, 0x1f, 0x2c, 0x94, 0x8f, 0x81, 0x0f, 0xd0, 0x8a, 0xfa, - 0xbb, 0x65, 0x1e, 0x92, 0xbb, 0x8b, 0x3d, 0x24, 0xc7, 0x61, 0x04, 0xd3, 0xa7, 0x50, 0x49, 0x44, - 0xb3, 0xe0, 0xb7, 0xd0, 0x7a, 0x04, 0x42, 0xd0, 0xc0, 0x44, 0x76, 0xef, 0x18, 0xa7, 0xf5, 0x5e, - 0xa6, 0x26, 0x57, 0x76, 0xb7, 0x7d, 0x7e, 0xd1, 0x58, 0x7a, 0x7e, 0xd1, 0x58, 0x7a, 0x71, 0xd1, - 0x58, 0xfa, 0x3e, 0x6d, 0x58, 0xe7, 0x69, 0xc3, 0x7a, 0x9e, 0x36, 0xac, 0x17, 0x69, 0xc3, 0xfa, - 0x33, 0x6d, 0x58, 0x3f, 0xfd, 0xd5, 0x58, 0xfa, 0x62, 0x79, 0xb4, 0xf7, 0x4f, 0x00, 0x00, 0x00, - 0xff, 0xff, 0xe2, 0xd4, 0x42, 0x3d, 0x3c, 0x0b, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/storage/v1/generated.proto b/vendor/k8s.io/api/storage/v1/generated.proto index df7823593e3c1..4087b7e5d3bf6 100644 --- a/vendor/k8s.io/api/storage/v1/generated.proto +++ b/vendor/k8s.io/api/storage/v1/generated.proto @@ -36,7 +36,7 @@ option go_package = "v1"; // according to etcd is in ObjectMeta.Name. message StorageClass { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -80,7 +80,7 @@ message StorageClass { // StorageClassList is a collection of storage classes. message StorageClassList { // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -94,7 +94,7 @@ message StorageClassList { // VolumeAttachment objects are non-namespaced. message VolumeAttachment { // Standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -112,7 +112,7 @@ message VolumeAttachment { // VolumeAttachmentList is a collection of VolumeAttachment objects. message VolumeAttachmentList { // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; diff --git a/vendor/k8s.io/api/storage/v1/types.go b/vendor/k8s.io/api/storage/v1/types.go index 21531c9e14657..e42ee5e9c59fb 100644 --- a/vendor/k8s.io/api/storage/v1/types.go +++ b/vendor/k8s.io/api/storage/v1/types.go @@ -33,7 +33,7 @@ import ( type StorageClass struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -80,7 +80,7 @@ type StorageClass struct { type StorageClassList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -115,7 +115,7 @@ type VolumeAttachment struct { metav1.TypeMeta `json:",inline"` // Standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -136,7 +136,7 @@ type VolumeAttachment struct { type VolumeAttachmentList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go index e31dd7f712b47..78ff19c21126c 100644 --- a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go @@ -29,7 +29,7 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_StorageClass = map[string]string{ "": "StorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned.\n\nStorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "provisioner": "Provisioner indicates the type of the provisioner.", "parameters": "Parameters holds the parameters for the provisioner that should create volumes of this storage class.", "reclaimPolicy": "Dynamically provisioned PersistentVolumes of this storage class are created with this reclaimPolicy. Defaults to Delete.", @@ -45,7 +45,7 @@ func (StorageClass) SwaggerDoc() map[string]string { var map_StorageClassList = map[string]string{ "": "StorageClassList is a collection of storage classes.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is the list of StorageClasses", } @@ -55,7 +55,7 @@ func (StorageClassList) SwaggerDoc() map[string]string { var map_VolumeAttachment = map[string]string{ "": "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.", - "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "Specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", "status": "Status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", } @@ -66,7 +66,7 @@ func (VolumeAttachment) SwaggerDoc() map[string]string { var map_VolumeAttachmentList = map[string]string{ "": "VolumeAttachmentList is a collection of VolumeAttachment objects.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is the list of VolumeAttachments", } diff --git a/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go b/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go index 8630937a29bcb..423243521ce52 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go @@ -17,38 +17,21 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/storage/v1alpha1/generated.proto -/* - Package v1alpha1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/storage/v1alpha1/generated.proto - - It has these top-level messages: - VolumeAttachment - VolumeAttachmentList - VolumeAttachmentSource - VolumeAttachmentSpec - VolumeAttachmentStatus - VolumeError -*/ package v1alpha1 import ( fmt "fmt" - proto "github.com/gogo/protobuf/proto" - - math "math" - - k8s_io_api_core_v1 "k8s.io/api/core/v1" + io "io" + proto "github.com/gogo/protobuf/proto" github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + v11 "k8s.io/api/core/v1" - strings "strings" - + math "math" + math_bits "math/bits" reflect "reflect" - - io "io" + strings "strings" ) // Reference imports to suppress errors if they are not otherwise used. @@ -62,29 +45,173 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } -func (*VolumeAttachment) ProtoMessage() {} -func (*VolumeAttachment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } +func (*VolumeAttachment) ProtoMessage() {} +func (*VolumeAttachment) Descriptor() ([]byte, []int) { + return fileDescriptor_10f856db1e670dc4, []int{0} +} +func (m *VolumeAttachment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachment) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachment.Merge(m, src) +} +func (m *VolumeAttachment) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachment) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachment.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttachment proto.InternalMessageInfo + +func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } +func (*VolumeAttachmentList) ProtoMessage() {} +func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { + return fileDescriptor_10f856db1e670dc4, []int{1} +} +func (m *VolumeAttachmentList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentList) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentList.Merge(m, src) +} +func (m *VolumeAttachmentList) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentList) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentList.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttachmentList proto.InternalMessageInfo + +func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } +func (*VolumeAttachmentSource) ProtoMessage() {} +func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { + return fileDescriptor_10f856db1e670dc4, []int{2} +} +func (m *VolumeAttachmentSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentSource.Merge(m, src) +} +func (m *VolumeAttachmentSource) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentSource) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentSource.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttachmentSource proto.InternalMessageInfo -func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } -func (*VolumeAttachmentList) ProtoMessage() {} -func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } +func (*VolumeAttachmentSpec) ProtoMessage() {} +func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_10f856db1e670dc4, []int{3} +} +func (m *VolumeAttachmentSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentSpec.Merge(m, src) +} +func (m *VolumeAttachmentSpec) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentSpec) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentSpec.DiscardUnknown(m) +} -func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } -func (*VolumeAttachmentSource) ProtoMessage() {} -func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_VolumeAttachmentSpec proto.InternalMessageInfo -func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } -func (*VolumeAttachmentSpec) ProtoMessage() {} -func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } +func (*VolumeAttachmentStatus) ProtoMessage() {} +func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_10f856db1e670dc4, []int{4} +} +func (m *VolumeAttachmentStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentStatus.Merge(m, src) +} +func (m *VolumeAttachmentStatus) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentStatus) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentStatus.DiscardUnknown(m) +} -func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } -func (*VolumeAttachmentStatus) ProtoMessage() {} -func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +var xxx_messageInfo_VolumeAttachmentStatus proto.InternalMessageInfo -func (m *VolumeError) Reset() { *m = VolumeError{} } -func (*VolumeError) ProtoMessage() {} -func (*VolumeError) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +func (m *VolumeError) Reset() { *m = VolumeError{} } +func (*VolumeError) ProtoMessage() {} +func (*VolumeError) Descriptor() ([]byte, []int) { + return fileDescriptor_10f856db1e670dc4, []int{5} +} +func (m *VolumeError) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeError) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeError.Merge(m, src) +} +func (m *VolumeError) XXX_Size() int { + return m.Size() +} +func (m *VolumeError) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeError.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeError proto.InternalMessageInfo func init() { proto.RegisterType((*VolumeAttachment)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachment") @@ -92,12 +219,69 @@ func init() { proto.RegisterType((*VolumeAttachmentSource)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentSource") proto.RegisterType((*VolumeAttachmentSpec)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentSpec") proto.RegisterType((*VolumeAttachmentStatus)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentStatus") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentStatus.AttachmentMetadataEntry") proto.RegisterType((*VolumeError)(nil), "k8s.io.api.storage.v1alpha1.VolumeError") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/storage/v1alpha1/generated.proto", fileDescriptor_10f856db1e670dc4) +} + +var fileDescriptor_10f856db1e670dc4 = []byte{ + // 745 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0xcd, 0x6e, 0xd3, 0x40, + 0x10, 0xc7, 0xe3, 0x24, 0x6d, 0xd3, 0x0d, 0x1f, 0xd1, 0x2a, 0x82, 0x28, 0x48, 0x4e, 0x95, 0x53, + 0x40, 0x74, 0x4d, 0x0a, 0x42, 0x15, 0xb7, 0x58, 0xed, 0xa1, 0xa2, 0x2d, 0x68, 0x8b, 0x38, 0x00, + 0x07, 0x36, 0xf6, 0xe2, 0xb8, 0x89, 0x3f, 0xe4, 0x5d, 0x47, 0xea, 0x8d, 0x13, 0x67, 0x6e, 0xbc, + 0x01, 0xcf, 0x92, 0x1b, 0x15, 0xa7, 0x9e, 0x22, 0x6a, 0xde, 0x82, 0x0b, 0x68, 0xd7, 0x9b, 0xc4, + 0x24, 0x29, 0xb4, 0xbd, 0x79, 0x66, 0x67, 0x7e, 0x33, 0xf3, 0xdf, 0xf1, 0x82, 0x9d, 0xfe, 0x36, + 0x43, 0x6e, 0x60, 0xf4, 0xe3, 0x2e, 0x8d, 0x7c, 0xca, 0x29, 0x33, 0x86, 0xd4, 0xb7, 0x83, 0xc8, + 0x50, 0x07, 0x24, 0x74, 0x0d, 0xc6, 0x83, 0x88, 0x38, 0xd4, 0x18, 0xb6, 0xc9, 0x20, 0xec, 0x91, + 0xb6, 0xe1, 0x50, 0x9f, 0x46, 0x84, 0x53, 0x1b, 0x85, 0x51, 0xc0, 0x03, 0x78, 0x2f, 0x0d, 0x46, + 0x24, 0x74, 0x91, 0x0a, 0x46, 0x93, 0xe0, 0xfa, 0xa6, 0xe3, 0xf2, 0x5e, 0xdc, 0x45, 0x56, 0xe0, + 0x19, 0x4e, 0xe0, 0x04, 0x86, 0xcc, 0xe9, 0xc6, 0x1f, 0xa4, 0x25, 0x0d, 0xf9, 0x95, 0xb2, 0xea, + 0xcd, 0x4c, 0x61, 0x2b, 0x88, 0x44, 0xd5, 0xf9, 0x7a, 0xf5, 0x27, 0xb3, 0x18, 0x8f, 0x58, 0x3d, + 0xd7, 0xa7, 0xd1, 0x89, 0x11, 0xf6, 0x1d, 0xe1, 0x60, 0x86, 0x47, 0x39, 0x59, 0x96, 0x65, 0x5c, + 0x94, 0x15, 0xc5, 0x3e, 0x77, 0x3d, 0xba, 0x90, 0xf0, 0xf4, 0x7f, 0x09, 0xcc, 0xea, 0x51, 0x8f, + 0xcc, 0xe7, 0x35, 0xbf, 0xe6, 0x41, 0xe5, 0x75, 0x30, 0x88, 0x3d, 0xda, 0xe1, 0x9c, 0x58, 0x3d, + 0x8f, 0xfa, 0x1c, 0xbe, 0x07, 0x25, 0xd1, 0x98, 0x4d, 0x38, 0xa9, 0x69, 0x1b, 0x5a, 0xab, 0xbc, + 0xf5, 0x08, 0xcd, 0x64, 0x9b, 0xf2, 0x51, 0xd8, 0x77, 0x84, 0x83, 0x21, 0x11, 0x8d, 0x86, 0x6d, + 0xf4, 0xa2, 0x7b, 0x4c, 0x2d, 0x7e, 0x40, 0x39, 0x31, 0xe1, 0x68, 0xdc, 0xc8, 0x25, 0xe3, 0x06, + 0x98, 0xf9, 0xf0, 0x94, 0x0a, 0x8f, 0x40, 0x91, 0x85, 0xd4, 0xaa, 0xe5, 0x25, 0xbd, 0x8d, 0xfe, + 0x71, 0x29, 0x68, 0xbe, 0xbd, 0xa3, 0x90, 0x5a, 0xe6, 0x0d, 0x85, 0x2f, 0x0a, 0x0b, 0x4b, 0x18, + 0x7c, 0x0b, 0x56, 0x19, 0x27, 0x3c, 0x66, 0xb5, 0x82, 0xc4, 0x3e, 0xbe, 0x1a, 0x56, 0xa6, 0x9a, + 0xb7, 0x14, 0x78, 0x35, 0xb5, 0xb1, 0x42, 0x36, 0x47, 0x1a, 0xa8, 0xce, 0xa7, 0xec, 0xbb, 0x8c, + 0xc3, 0x77, 0x0b, 0x62, 0xa1, 0xcb, 0x89, 0x25, 0xb2, 0xa5, 0x54, 0x15, 0x55, 0xb2, 0x34, 0xf1, + 0x64, 0x84, 0xc2, 0x60, 0xc5, 0xe5, 0xd4, 0x63, 0xb5, 0xfc, 0x46, 0xa1, 0x55, 0xde, 0xda, 0xbc, + 0xd2, 0x48, 0xe6, 0x4d, 0x45, 0x5e, 0xd9, 0x13, 0x0c, 0x9c, 0xa2, 0x9a, 0xdf, 0x35, 0x70, 0x67, + 0x61, 0xfa, 0x20, 0x8e, 0x2c, 0x0a, 0xf7, 0x41, 0x35, 0xa4, 0x11, 0x73, 0x19, 0xa7, 0x3e, 0x4f, + 0x63, 0x0e, 0x89, 0x47, 0xe5, 0x60, 0xeb, 0x66, 0x2d, 0x19, 0x37, 0xaa, 0x2f, 0x97, 0x9c, 0xe3, + 0xa5, 0x59, 0xf0, 0x18, 0x54, 0x5c, 0x7f, 0xe0, 0xfa, 0x34, 0xf5, 0x1d, 0xcd, 0x6e, 0xbc, 0x95, + 0x9d, 0x43, 0xfc, 0x3a, 0x42, 0x90, 0x79, 0xb2, 0xbc, 0xe8, 0x6a, 0x32, 0x6e, 0x54, 0xf6, 0xe6, + 0x28, 0x78, 0x81, 0xdb, 0xfc, 0xb6, 0xe4, 0x7e, 0xc4, 0x01, 0x7c, 0x08, 0x4a, 0x44, 0x7a, 0x68, + 0xa4, 0xc6, 0x98, 0xea, 0xdd, 0x51, 0x7e, 0x3c, 0x8d, 0x90, 0x3b, 0x24, 0xa5, 0x50, 0x8d, 0x5e, + 0x71, 0x87, 0x64, 0x6a, 0x66, 0x87, 0xa4, 0x8d, 0x15, 0x52, 0xb4, 0xe2, 0x07, 0x76, 0xaa, 0x68, + 0xe1, 0xef, 0x56, 0x0e, 0x95, 0x1f, 0x4f, 0x23, 0x9a, 0xbf, 0x0b, 0x4b, 0xae, 0x49, 0x2e, 0x63, + 0x66, 0x26, 0x5b, 0xce, 0x54, 0x5a, 0x98, 0xc9, 0x9e, 0xce, 0x64, 0xc3, 0x2f, 0x1a, 0x80, 0x64, + 0x8a, 0x38, 0x98, 0x2c, 0x6b, 0xba, 0x51, 0xcf, 0xaf, 0xf1, 0x93, 0xa0, 0xce, 0x02, 0x6d, 0xd7, + 0xe7, 0xd1, 0x89, 0x59, 0x57, 0x5d, 0xc0, 0xc5, 0x00, 0xbc, 0xa4, 0x05, 0x78, 0x0c, 0xca, 0xa9, + 0x77, 0x37, 0x8a, 0x82, 0x48, 0xfd, 0xb6, 0xad, 0x4b, 0x74, 0x24, 0xe3, 0x4d, 0x3d, 0x19, 0x37, + 0xca, 0x9d, 0x19, 0xe0, 0xd7, 0xb8, 0x51, 0xce, 0x9c, 0xe3, 0x2c, 0x5c, 0xd4, 0xb2, 0xe9, 0xac, + 0x56, 0xf1, 0x3a, 0xb5, 0x76, 0xe8, 0xc5, 0xb5, 0x32, 0xf0, 0xfa, 0x2e, 0xb8, 0x7b, 0x81, 0x44, + 0xb0, 0x02, 0x0a, 0x7d, 0x7a, 0x92, 0x6e, 0x22, 0x16, 0x9f, 0xb0, 0x0a, 0x56, 0x86, 0x64, 0x10, + 0xa7, 0x1b, 0xb7, 0x8e, 0x53, 0xe3, 0x59, 0x7e, 0x5b, 0x6b, 0x7e, 0xd2, 0x40, 0xb6, 0x06, 0xdc, + 0x07, 0x45, 0xf1, 0x96, 0xab, 0x67, 0xe6, 0xc1, 0xe5, 0x9e, 0x99, 0x57, 0xae, 0x47, 0x67, 0xcf, + 0xa5, 0xb0, 0xb0, 0xa4, 0xc0, 0xfb, 0x60, 0xcd, 0xa3, 0x8c, 0x11, 0x47, 0x55, 0x36, 0x6f, 0xab, + 0xa0, 0xb5, 0x83, 0xd4, 0x8d, 0x27, 0xe7, 0x26, 0x1a, 0x9d, 0xeb, 0xb9, 0xd3, 0x73, 0x3d, 0x77, + 0x76, 0xae, 0xe7, 0x3e, 0x26, 0xba, 0x36, 0x4a, 0x74, 0xed, 0x34, 0xd1, 0xb5, 0xb3, 0x44, 0xd7, + 0x7e, 0x24, 0xba, 0xf6, 0xf9, 0xa7, 0x9e, 0x7b, 0x53, 0x9a, 0x08, 0xf7, 0x27, 0x00, 0x00, 0xff, + 0xff, 0xe8, 0x45, 0xe3, 0xba, 0xab, 0x07, 0x00, 0x00, +} + func (m *VolumeAttachment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -105,41 +289,52 @@ func (m *VolumeAttachment) Marshal() (dAtA []byte, err error) { } func (m *VolumeAttachment) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *VolumeAttachmentList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -147,37 +342,46 @@ func (m *VolumeAttachmentList) Marshal() (dAtA []byte, err error) { } func (m *VolumeAttachmentList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n4, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *VolumeAttachmentSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -185,33 +389,41 @@ func (m *VolumeAttachmentSource) Marshal() (dAtA []byte, err error) { } func (m *VolumeAttachmentSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.PersistentVolumeName != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PersistentVolumeName))) - i += copy(dAtA[i:], *m.PersistentVolumeName) - } if m.InlineVolumeSpec != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.InlineVolumeSpec.Size())) - n5, err := m.InlineVolumeSpec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.InlineVolumeSpec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 + i-- + dAtA[i] = 0x12 } - return i, nil + if m.PersistentVolumeName != nil { + i -= len(*m.PersistentVolumeName) + copy(dAtA[i:], *m.PersistentVolumeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PersistentVolumeName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } func (m *VolumeAttachmentSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -219,33 +431,42 @@ func (m *VolumeAttachmentSpec) Marshal() (dAtA []byte, err error) { } func (m *VolumeAttachmentSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Attacher))) - i += copy(dAtA[i:], m.Attacher) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Source.Size())) - n6, err := m.Source.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - dAtA[i] = 0x1a - i++ + i -= len(m.NodeName) + copy(dAtA[i:], m.NodeName) i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) - i += copy(dAtA[i:], m.NodeName) - return i, nil + i-- + dAtA[i] = 0x1a + { + size, err := m.Source.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Attacher) + copy(dAtA[i:], m.Attacher) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Attacher))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *VolumeAttachmentStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -253,67 +474,78 @@ func (m *VolumeAttachmentStatus) Marshal() (dAtA []byte, err error) { } func (m *VolumeAttachmentStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - if m.Attached { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.DetachError != nil { + { + size, err := m.DetachError.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.AttachError != nil { + { + size, err := m.AttachError.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - i++ if len(m.AttachmentMetadata) > 0 { keysForAttachmentMetadata := make([]string, 0, len(m.AttachmentMetadata)) for k := range m.AttachmentMetadata { keysForAttachmentMetadata = append(keysForAttachmentMetadata, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata) - for _, k := range keysForAttachmentMetadata { + for iNdEx := len(keysForAttachmentMetadata) - 1; iNdEx >= 0; iNdEx-- { + v := m.AttachmentMetadata[string(keysForAttachmentMetadata[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- dAtA[i] = 0x12 - i++ - v := m.AttachmentMetadata[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + i -= len(keysForAttachmentMetadata[iNdEx]) + copy(dAtA[i:], keysForAttachmentMetadata[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAttachmentMetadata[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) } } - if m.AttachError != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AttachError.Size())) - n7, err := m.AttachError.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - } - if m.DetachError != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DetachError.Size())) - n8, err := m.DetachError.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 + i-- + if m.Attached { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *VolumeError) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -321,35 +553,48 @@ func (m *VolumeError) Marshal() (dAtA []byte, err error) { } func (m *VolumeError) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeError) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Time.Size())) - n9, err := m.Time.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - dAtA[i] = 0x12 - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.Time.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *VolumeAttachment) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -362,6 +607,9 @@ func (m *VolumeAttachment) Size() (n int) { } func (m *VolumeAttachmentList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -376,6 +624,9 @@ func (m *VolumeAttachmentList) Size() (n int) { } func (m *VolumeAttachmentSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.PersistentVolumeName != nil { @@ -390,6 +641,9 @@ func (m *VolumeAttachmentSource) Size() (n int) { } func (m *VolumeAttachmentSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Attacher) @@ -402,6 +656,9 @@ func (m *VolumeAttachmentSpec) Size() (n int) { } func (m *VolumeAttachmentStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 2 @@ -425,6 +682,9 @@ func (m *VolumeAttachmentStatus) Size() (n int) { } func (m *VolumeError) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.Time.Size() @@ -435,14 +695,7 @@ func (m *VolumeError) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -452,7 +705,7 @@ func (this *VolumeAttachment) String() string { return "nil" } s := strings.Join([]string{`&VolumeAttachment{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "VolumeAttachmentSpec", "VolumeAttachmentSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "VolumeAttachmentStatus", "VolumeAttachmentStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -463,9 +716,14 @@ func (this *VolumeAttachmentList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]VolumeAttachment{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "VolumeAttachment", "VolumeAttachment", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&VolumeAttachmentList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "VolumeAttachment", "VolumeAttachment", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -476,7 +734,7 @@ func (this *VolumeAttachmentSource) String() string { } s := strings.Join([]string{`&VolumeAttachmentSource{`, `PersistentVolumeName:` + valueToStringGenerated(this.PersistentVolumeName) + `,`, - `InlineVolumeSpec:` + strings.Replace(fmt.Sprintf("%v", this.InlineVolumeSpec), "PersistentVolumeSpec", "k8s_io_api_core_v1.PersistentVolumeSpec", 1) + `,`, + `InlineVolumeSpec:` + strings.Replace(fmt.Sprintf("%v", this.InlineVolumeSpec), "PersistentVolumeSpec", "v11.PersistentVolumeSpec", 1) + `,`, `}`, }, "") return s @@ -510,8 +768,8 @@ func (this *VolumeAttachmentStatus) String() string { s := strings.Join([]string{`&VolumeAttachmentStatus{`, `Attached:` + fmt.Sprintf("%v", this.Attached) + `,`, `AttachmentMetadata:` + mapStringForAttachmentMetadata + `,`, - `AttachError:` + strings.Replace(fmt.Sprintf("%v", this.AttachError), "VolumeError", "VolumeError", 1) + `,`, - `DetachError:` + strings.Replace(fmt.Sprintf("%v", this.DetachError), "VolumeError", "VolumeError", 1) + `,`, + `AttachError:` + strings.Replace(this.AttachError.String(), "VolumeError", "VolumeError", 1) + `,`, + `DetachError:` + strings.Replace(this.DetachError.String(), "VolumeError", "VolumeError", 1) + `,`, `}`, }, "") return s @@ -521,7 +779,7 @@ func (this *VolumeError) String() string { return "nil" } s := strings.Join([]string{`&VolumeError{`, - `Time:` + strings.Replace(strings.Replace(this.Time.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Time:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Time), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, }, "") @@ -550,7 +808,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -578,7 +836,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -587,6 +845,9 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -608,7 +869,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -617,6 +878,9 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -638,7 +902,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -647,6 +911,9 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -663,6 +930,9 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -690,7 +960,7 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -718,7 +988,7 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -727,6 +997,9 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -748,7 +1021,7 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -757,6 +1030,9 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -774,6 +1050,9 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -801,7 +1080,7 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -829,7 +1108,7 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -839,6 +1118,9 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -859,7 +1141,7 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -868,11 +1150,14 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.InlineVolumeSpec == nil { - m.InlineVolumeSpec = &k8s_io_api_core_v1.PersistentVolumeSpec{} + m.InlineVolumeSpec = &v11.PersistentVolumeSpec{} } if err := m.InlineVolumeSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -887,6 +1172,9 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -914,7 +1202,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -942,7 +1230,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -952,6 +1240,9 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -971,7 +1262,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -980,6 +1271,9 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1001,7 +1295,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1011,6 +1305,9 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1025,6 +1322,9 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1052,7 +1352,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1080,7 +1380,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1100,7 +1400,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1109,6 +1409,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1129,7 +1432,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1146,7 +1449,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1156,6 +1459,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -1172,7 +1478,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1182,6 +1488,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -1218,7 +1527,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1227,6 +1536,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1251,7 +1563,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1260,6 +1572,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1279,6 +1594,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1306,7 +1624,7 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1334,7 +1652,7 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1343,6 +1661,9 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1364,7 +1685,7 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1374,6 +1695,9 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1388,6 +1712,9 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1454,10 +1781,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -1486,6 +1816,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -1504,58 +1837,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/storage/v1alpha1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 745 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0xcd, 0x6e, 0xd3, 0x40, - 0x10, 0xc7, 0xe3, 0x24, 0x6d, 0xd3, 0x0d, 0x1f, 0xd1, 0x2a, 0x82, 0x28, 0x48, 0x4e, 0x95, 0x53, - 0x40, 0x74, 0x4d, 0x0a, 0x42, 0x15, 0xb7, 0x58, 0xed, 0xa1, 0xa2, 0x2d, 0x68, 0x8b, 0x38, 0x00, - 0x07, 0x36, 0xf6, 0xe2, 0xb8, 0x89, 0x3f, 0xe4, 0x5d, 0x47, 0xea, 0x8d, 0x13, 0x67, 0x6e, 0xbc, - 0x01, 0xcf, 0x92, 0x1b, 0x15, 0xa7, 0x9e, 0x22, 0x6a, 0xde, 0x82, 0x0b, 0x68, 0xd7, 0x9b, 0xc4, - 0x24, 0x29, 0xb4, 0xbd, 0x79, 0x66, 0x67, 0x7e, 0x33, 0xf3, 0xdf, 0xf1, 0x82, 0x9d, 0xfe, 0x36, - 0x43, 0x6e, 0x60, 0xf4, 0xe3, 0x2e, 0x8d, 0x7c, 0xca, 0x29, 0x33, 0x86, 0xd4, 0xb7, 0x83, 0xc8, - 0x50, 0x07, 0x24, 0x74, 0x0d, 0xc6, 0x83, 0x88, 0x38, 0xd4, 0x18, 0xb6, 0xc9, 0x20, 0xec, 0x91, - 0xb6, 0xe1, 0x50, 0x9f, 0x46, 0x84, 0x53, 0x1b, 0x85, 0x51, 0xc0, 0x03, 0x78, 0x2f, 0x0d, 0x46, - 0x24, 0x74, 0x91, 0x0a, 0x46, 0x93, 0xe0, 0xfa, 0xa6, 0xe3, 0xf2, 0x5e, 0xdc, 0x45, 0x56, 0xe0, - 0x19, 0x4e, 0xe0, 0x04, 0x86, 0xcc, 0xe9, 0xc6, 0x1f, 0xa4, 0x25, 0x0d, 0xf9, 0x95, 0xb2, 0xea, - 0xcd, 0x4c, 0x61, 0x2b, 0x88, 0x44, 0xd5, 0xf9, 0x7a, 0xf5, 0x27, 0xb3, 0x18, 0x8f, 0x58, 0x3d, - 0xd7, 0xa7, 0xd1, 0x89, 0x11, 0xf6, 0x1d, 0xe1, 0x60, 0x86, 0x47, 0x39, 0x59, 0x96, 0x65, 0x5c, - 0x94, 0x15, 0xc5, 0x3e, 0x77, 0x3d, 0xba, 0x90, 0xf0, 0xf4, 0x7f, 0x09, 0xcc, 0xea, 0x51, 0x8f, - 0xcc, 0xe7, 0x35, 0xbf, 0xe6, 0x41, 0xe5, 0x75, 0x30, 0x88, 0x3d, 0xda, 0xe1, 0x9c, 0x58, 0x3d, - 0x8f, 0xfa, 0x1c, 0xbe, 0x07, 0x25, 0xd1, 0x98, 0x4d, 0x38, 0xa9, 0x69, 0x1b, 0x5a, 0xab, 0xbc, - 0xf5, 0x08, 0xcd, 0x64, 0x9b, 0xf2, 0x51, 0xd8, 0x77, 0x84, 0x83, 0x21, 0x11, 0x8d, 0x86, 0x6d, - 0xf4, 0xa2, 0x7b, 0x4c, 0x2d, 0x7e, 0x40, 0x39, 0x31, 0xe1, 0x68, 0xdc, 0xc8, 0x25, 0xe3, 0x06, - 0x98, 0xf9, 0xf0, 0x94, 0x0a, 0x8f, 0x40, 0x91, 0x85, 0xd4, 0xaa, 0xe5, 0x25, 0xbd, 0x8d, 0xfe, - 0x71, 0x29, 0x68, 0xbe, 0xbd, 0xa3, 0x90, 0x5a, 0xe6, 0x0d, 0x85, 0x2f, 0x0a, 0x0b, 0x4b, 0x18, - 0x7c, 0x0b, 0x56, 0x19, 0x27, 0x3c, 0x66, 0xb5, 0x82, 0xc4, 0x3e, 0xbe, 0x1a, 0x56, 0xa6, 0x9a, - 0xb7, 0x14, 0x78, 0x35, 0xb5, 0xb1, 0x42, 0x36, 0x47, 0x1a, 0xa8, 0xce, 0xa7, 0xec, 0xbb, 0x8c, - 0xc3, 0x77, 0x0b, 0x62, 0xa1, 0xcb, 0x89, 0x25, 0xb2, 0xa5, 0x54, 0x15, 0x55, 0xb2, 0x34, 0xf1, - 0x64, 0x84, 0xc2, 0x60, 0xc5, 0xe5, 0xd4, 0x63, 0xb5, 0xfc, 0x46, 0xa1, 0x55, 0xde, 0xda, 0xbc, - 0xd2, 0x48, 0xe6, 0x4d, 0x45, 0x5e, 0xd9, 0x13, 0x0c, 0x9c, 0xa2, 0x9a, 0xdf, 0x35, 0x70, 0x67, - 0x61, 0xfa, 0x20, 0x8e, 0x2c, 0x0a, 0xf7, 0x41, 0x35, 0xa4, 0x11, 0x73, 0x19, 0xa7, 0x3e, 0x4f, - 0x63, 0x0e, 0x89, 0x47, 0xe5, 0x60, 0xeb, 0x66, 0x2d, 0x19, 0x37, 0xaa, 0x2f, 0x97, 0x9c, 0xe3, - 0xa5, 0x59, 0xf0, 0x18, 0x54, 0x5c, 0x7f, 0xe0, 0xfa, 0x34, 0xf5, 0x1d, 0xcd, 0x6e, 0xbc, 0x95, - 0x9d, 0x43, 0xfc, 0x3a, 0x42, 0x90, 0x79, 0xb2, 0xbc, 0xe8, 0x6a, 0x32, 0x6e, 0x54, 0xf6, 0xe6, - 0x28, 0x78, 0x81, 0xdb, 0xfc, 0xb6, 0xe4, 0x7e, 0xc4, 0x01, 0x7c, 0x08, 0x4a, 0x44, 0x7a, 0x68, - 0xa4, 0xc6, 0x98, 0xea, 0xdd, 0x51, 0x7e, 0x3c, 0x8d, 0x90, 0x3b, 0x24, 0xa5, 0x50, 0x8d, 0x5e, - 0x71, 0x87, 0x64, 0x6a, 0x66, 0x87, 0xa4, 0x8d, 0x15, 0x52, 0xb4, 0xe2, 0x07, 0x76, 0xaa, 0x68, - 0xe1, 0xef, 0x56, 0x0e, 0x95, 0x1f, 0x4f, 0x23, 0x9a, 0xbf, 0x0b, 0x4b, 0xae, 0x49, 0x2e, 0x63, - 0x66, 0x26, 0x5b, 0xce, 0x54, 0x5a, 0x98, 0xc9, 0x9e, 0xce, 0x64, 0xc3, 0x2f, 0x1a, 0x80, 0x64, - 0x8a, 0x38, 0x98, 0x2c, 0x6b, 0xba, 0x51, 0xcf, 0xaf, 0xf1, 0x93, 0xa0, 0xce, 0x02, 0x6d, 0xd7, - 0xe7, 0xd1, 0x89, 0x59, 0x57, 0x5d, 0xc0, 0xc5, 0x00, 0xbc, 0xa4, 0x05, 0x78, 0x0c, 0xca, 0xa9, - 0x77, 0x37, 0x8a, 0x82, 0x48, 0xfd, 0xb6, 0xad, 0x4b, 0x74, 0x24, 0xe3, 0x4d, 0x3d, 0x19, 0x37, - 0xca, 0x9d, 0x19, 0xe0, 0xd7, 0xb8, 0x51, 0xce, 0x9c, 0xe3, 0x2c, 0x5c, 0xd4, 0xb2, 0xe9, 0xac, - 0x56, 0xf1, 0x3a, 0xb5, 0x76, 0xe8, 0xc5, 0xb5, 0x32, 0xf0, 0xfa, 0x2e, 0xb8, 0x7b, 0x81, 0x44, - 0xb0, 0x02, 0x0a, 0x7d, 0x7a, 0x92, 0x6e, 0x22, 0x16, 0x9f, 0xb0, 0x0a, 0x56, 0x86, 0x64, 0x10, - 0xa7, 0x1b, 0xb7, 0x8e, 0x53, 0xe3, 0x59, 0x7e, 0x5b, 0x6b, 0x7e, 0xd2, 0x40, 0xb6, 0x06, 0xdc, - 0x07, 0x45, 0xf1, 0x96, 0xab, 0x67, 0xe6, 0xc1, 0xe5, 0x9e, 0x99, 0x57, 0xae, 0x47, 0x67, 0xcf, - 0xa5, 0xb0, 0xb0, 0xa4, 0xc0, 0xfb, 0x60, 0xcd, 0xa3, 0x8c, 0x11, 0x47, 0x55, 0x36, 0x6f, 0xab, - 0xa0, 0xb5, 0x83, 0xd4, 0x8d, 0x27, 0xe7, 0x26, 0x1a, 0x9d, 0xeb, 0xb9, 0xd3, 0x73, 0x3d, 0x77, - 0x76, 0xae, 0xe7, 0x3e, 0x26, 0xba, 0x36, 0x4a, 0x74, 0xed, 0x34, 0xd1, 0xb5, 0xb3, 0x44, 0xd7, - 0x7e, 0x24, 0xba, 0xf6, 0xf9, 0xa7, 0x9e, 0x7b, 0x53, 0x9a, 0x08, 0xf7, 0x27, 0x00, 0x00, 0xff, - 0xff, 0xe8, 0x45, 0xe3, 0xba, 0xab, 0x07, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/storage/v1alpha1/generated.proto b/vendor/k8s.io/api/storage/v1alpha1/generated.proto index 57a8357384731..7601963924071 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/storage/v1alpha1/generated.proto @@ -35,7 +35,7 @@ option go_package = "v1alpha1"; // VolumeAttachment objects are non-namespaced. message VolumeAttachment { // Standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -53,7 +53,7 @@ message VolumeAttachment { // VolumeAttachmentList is a collection of VolumeAttachment objects. message VolumeAttachmentList { // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; diff --git a/vendor/k8s.io/api/storage/v1alpha1/types.go b/vendor/k8s.io/api/storage/v1alpha1/types.go index 76ad6dc0dd8af..39408857c26bf 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/types.go +++ b/vendor/k8s.io/api/storage/v1alpha1/types.go @@ -33,7 +33,7 @@ type VolumeAttachment struct { metav1.TypeMeta `json:",inline"` // Standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -54,7 +54,7 @@ type VolumeAttachment struct { type VolumeAttachmentList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go index 3701b08640d53..2e821616649d0 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go @@ -29,7 +29,7 @@ package v1alpha1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_VolumeAttachment = map[string]string{ "": "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.", - "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "Specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", "status": "Status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", } @@ -40,7 +40,7 @@ func (VolumeAttachment) SwaggerDoc() map[string]string { var map_VolumeAttachmentList = map[string]string{ "": "VolumeAttachmentList is a collection of VolumeAttachment objects.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is the list of VolumeAttachments", } diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.pb.go b/vendor/k8s.io/api/storage/v1beta1/generated.pb.go index 802f05a775cd2..cd35af34f8621 100644 --- a/vendor/k8s.io/api/storage/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/storage/v1beta1/generated.pb.go @@ -17,47 +17,22 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/storage/v1beta1/generated.proto -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/storage/v1beta1/generated.proto - - It has these top-level messages: - CSIDriver - CSIDriverList - CSIDriverSpec - CSINode - CSINodeDriver - CSINodeList - CSINodeSpec - StorageClass - StorageClassList - VolumeAttachment - VolumeAttachmentList - VolumeAttachmentSource - VolumeAttachmentSpec - VolumeAttachmentStatus - VolumeError -*/ package v1beta1 import ( fmt "fmt" - proto "github.com/gogo/protobuf/proto" - - math "math" - - k8s_io_api_core_v1 "k8s.io/api/core/v1" + io "io" + proto "github.com/gogo/protobuf/proto" github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" - strings "strings" - + math "math" + math_bits "math/bits" reflect "reflect" - - io "io" + strings "strings" ) // Reference imports to suppress errors if they are not otherwise used. @@ -71,65 +46,453 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *CSIDriver) Reset() { *m = CSIDriver{} } -func (*CSIDriver) ProtoMessage() {} -func (*CSIDriver) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *CSIDriver) Reset() { *m = CSIDriver{} } +func (*CSIDriver) ProtoMessage() {} +func (*CSIDriver) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{0} +} +func (m *CSIDriver) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSIDriver) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSIDriver) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSIDriver.Merge(m, src) +} +func (m *CSIDriver) XXX_Size() int { + return m.Size() +} +func (m *CSIDriver) XXX_DiscardUnknown() { + xxx_messageInfo_CSIDriver.DiscardUnknown(m) +} -func (m *CSIDriverList) Reset() { *m = CSIDriverList{} } -func (*CSIDriverList) ProtoMessage() {} -func (*CSIDriverList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_CSIDriver proto.InternalMessageInfo -func (m *CSIDriverSpec) Reset() { *m = CSIDriverSpec{} } -func (*CSIDriverSpec) ProtoMessage() {} -func (*CSIDriverSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *CSIDriverList) Reset() { *m = CSIDriverList{} } +func (*CSIDriverList) ProtoMessage() {} +func (*CSIDriverList) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{1} +} +func (m *CSIDriverList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSIDriverList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSIDriverList) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSIDriverList.Merge(m, src) +} +func (m *CSIDriverList) XXX_Size() int { + return m.Size() +} +func (m *CSIDriverList) XXX_DiscardUnknown() { + xxx_messageInfo_CSIDriverList.DiscardUnknown(m) +} -func (m *CSINode) Reset() { *m = CSINode{} } -func (*CSINode) ProtoMessage() {} -func (*CSINode) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_CSIDriverList proto.InternalMessageInfo -func (m *CSINodeDriver) Reset() { *m = CSINodeDriver{} } -func (*CSINodeDriver) ProtoMessage() {} -func (*CSINodeDriver) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *CSIDriverSpec) Reset() { *m = CSIDriverSpec{} } +func (*CSIDriverSpec) ProtoMessage() {} +func (*CSIDriverSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{2} +} +func (m *CSIDriverSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSIDriverSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSIDriverSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSIDriverSpec.Merge(m, src) +} +func (m *CSIDriverSpec) XXX_Size() int { + return m.Size() +} +func (m *CSIDriverSpec) XXX_DiscardUnknown() { + xxx_messageInfo_CSIDriverSpec.DiscardUnknown(m) +} -func (m *CSINodeList) Reset() { *m = CSINodeList{} } -func (*CSINodeList) ProtoMessage() {} -func (*CSINodeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_CSIDriverSpec proto.InternalMessageInfo -func (m *CSINodeSpec) Reset() { *m = CSINodeSpec{} } -func (*CSINodeSpec) ProtoMessage() {} -func (*CSINodeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (m *CSINode) Reset() { *m = CSINode{} } +func (*CSINode) ProtoMessage() {} +func (*CSINode) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{3} +} +func (m *CSINode) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSINode) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSINode) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSINode.Merge(m, src) +} +func (m *CSINode) XXX_Size() int { + return m.Size() +} +func (m *CSINode) XXX_DiscardUnknown() { + xxx_messageInfo_CSINode.DiscardUnknown(m) +} -func (m *StorageClass) Reset() { *m = StorageClass{} } -func (*StorageClass) ProtoMessage() {} -func (*StorageClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_CSINode proto.InternalMessageInfo -func (m *StorageClassList) Reset() { *m = StorageClassList{} } -func (*StorageClassList) ProtoMessage() {} -func (*StorageClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (m *CSINodeDriver) Reset() { *m = CSINodeDriver{} } +func (*CSINodeDriver) ProtoMessage() {} +func (*CSINodeDriver) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{4} +} +func (m *CSINodeDriver) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSINodeDriver) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSINodeDriver) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSINodeDriver.Merge(m, src) +} +func (m *CSINodeDriver) XXX_Size() int { + return m.Size() +} +func (m *CSINodeDriver) XXX_DiscardUnknown() { + xxx_messageInfo_CSINodeDriver.DiscardUnknown(m) +} -func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } -func (*VolumeAttachment) ProtoMessage() {} -func (*VolumeAttachment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +var xxx_messageInfo_CSINodeDriver proto.InternalMessageInfo -func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } -func (*VolumeAttachmentList) ProtoMessage() {} -func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (m *CSINodeList) Reset() { *m = CSINodeList{} } +func (*CSINodeList) ProtoMessage() {} +func (*CSINodeList) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{5} +} +func (m *CSINodeList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSINodeList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSINodeList) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSINodeList.Merge(m, src) +} +func (m *CSINodeList) XXX_Size() int { + return m.Size() +} +func (m *CSINodeList) XXX_DiscardUnknown() { + xxx_messageInfo_CSINodeList.DiscardUnknown(m) +} -func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } -func (*VolumeAttachmentSource) ProtoMessage() {} -func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +var xxx_messageInfo_CSINodeList proto.InternalMessageInfo -func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } -func (*VolumeAttachmentSpec) ProtoMessage() {} -func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (m *CSINodeSpec) Reset() { *m = CSINodeSpec{} } +func (*CSINodeSpec) ProtoMessage() {} +func (*CSINodeSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{6} +} +func (m *CSINodeSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSINodeSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSINodeSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSINodeSpec.Merge(m, src) +} +func (m *CSINodeSpec) XXX_Size() int { + return m.Size() +} +func (m *CSINodeSpec) XXX_DiscardUnknown() { + xxx_messageInfo_CSINodeSpec.DiscardUnknown(m) +} -func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } -func (*VolumeAttachmentStatus) ProtoMessage() {} -func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +var xxx_messageInfo_CSINodeSpec proto.InternalMessageInfo -func (m *VolumeError) Reset() { *m = VolumeError{} } -func (*VolumeError) ProtoMessage() {} -func (*VolumeError) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +func (m *StorageClass) Reset() { *m = StorageClass{} } +func (*StorageClass) ProtoMessage() {} +func (*StorageClass) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{7} +} +func (m *StorageClass) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StorageClass) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageClass.Merge(m, src) +} +func (m *StorageClass) XXX_Size() int { + return m.Size() +} +func (m *StorageClass) XXX_DiscardUnknown() { + xxx_messageInfo_StorageClass.DiscardUnknown(m) +} + +var xxx_messageInfo_StorageClass proto.InternalMessageInfo + +func (m *StorageClassList) Reset() { *m = StorageClassList{} } +func (*StorageClassList) ProtoMessage() {} +func (*StorageClassList) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{8} +} +func (m *StorageClassList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StorageClassList) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageClassList.Merge(m, src) +} +func (m *StorageClassList) XXX_Size() int { + return m.Size() +} +func (m *StorageClassList) XXX_DiscardUnknown() { + xxx_messageInfo_StorageClassList.DiscardUnknown(m) +} + +var xxx_messageInfo_StorageClassList proto.InternalMessageInfo + +func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } +func (*VolumeAttachment) ProtoMessage() {} +func (*VolumeAttachment) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{9} +} +func (m *VolumeAttachment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachment) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachment.Merge(m, src) +} +func (m *VolumeAttachment) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachment) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachment.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttachment proto.InternalMessageInfo + +func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } +func (*VolumeAttachmentList) ProtoMessage() {} +func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{10} +} +func (m *VolumeAttachmentList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentList) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentList.Merge(m, src) +} +func (m *VolumeAttachmentList) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentList) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentList.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttachmentList proto.InternalMessageInfo + +func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } +func (*VolumeAttachmentSource) ProtoMessage() {} +func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{11} +} +func (m *VolumeAttachmentSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentSource.Merge(m, src) +} +func (m *VolumeAttachmentSource) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentSource) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentSource.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttachmentSource proto.InternalMessageInfo + +func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } +func (*VolumeAttachmentSpec) ProtoMessage() {} +func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{12} +} +func (m *VolumeAttachmentSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentSpec.Merge(m, src) +} +func (m *VolumeAttachmentSpec) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentSpec) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttachmentSpec proto.InternalMessageInfo + +func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } +func (*VolumeAttachmentStatus) ProtoMessage() {} +func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{13} +} +func (m *VolumeAttachmentStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentStatus.Merge(m, src) +} +func (m *VolumeAttachmentStatus) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentStatus) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttachmentStatus proto.InternalMessageInfo + +func (m *VolumeError) Reset() { *m = VolumeError{} } +func (*VolumeError) ProtoMessage() {} +func (*VolumeError) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{14} +} +func (m *VolumeError) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeError) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeError.Merge(m, src) +} +func (m *VolumeError) XXX_Size() int { + return m.Size() +} +func (m *VolumeError) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeError.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeError proto.InternalMessageInfo + +func (m *VolumeNodeResources) Reset() { *m = VolumeNodeResources{} } +func (*VolumeNodeResources) ProtoMessage() {} +func (*VolumeNodeResources) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{15} +} +func (m *VolumeNodeResources) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeNodeResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeNodeResources) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeNodeResources.Merge(m, src) +} +func (m *VolumeNodeResources) XXX_Size() int { + return m.Size() +} +func (m *VolumeNodeResources) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeNodeResources.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeNodeResources proto.InternalMessageInfo func init() { proto.RegisterType((*CSIDriver)(nil), "k8s.io.api.storage.v1beta1.CSIDriver") @@ -140,18 +503,114 @@ func init() { proto.RegisterType((*CSINodeList)(nil), "k8s.io.api.storage.v1beta1.CSINodeList") proto.RegisterType((*CSINodeSpec)(nil), "k8s.io.api.storage.v1beta1.CSINodeSpec") proto.RegisterType((*StorageClass)(nil), "k8s.io.api.storage.v1beta1.StorageClass") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1beta1.StorageClass.ParametersEntry") proto.RegisterType((*StorageClassList)(nil), "k8s.io.api.storage.v1beta1.StorageClassList") proto.RegisterType((*VolumeAttachment)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachment") proto.RegisterType((*VolumeAttachmentList)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentList") proto.RegisterType((*VolumeAttachmentSource)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentSource") proto.RegisterType((*VolumeAttachmentSpec)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentSpec") proto.RegisterType((*VolumeAttachmentStatus)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentStatus") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentStatus.AttachmentMetadataEntry") proto.RegisterType((*VolumeError)(nil), "k8s.io.api.storage.v1beta1.VolumeError") + proto.RegisterType((*VolumeNodeResources)(nil), "k8s.io.api.storage.v1beta1.VolumeNodeResources") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/storage/v1beta1/generated.proto", fileDescriptor_7d2980599fd0de80) +} + +var fileDescriptor_7d2980599fd0de80 = []byte{ + // 1344 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0xbd, 0x6f, 0xdb, 0x46, + 0x1b, 0x37, 0x2d, 0x7f, 0x9e, 0xec, 0x44, 0xbe, 0x18, 0xef, 0xab, 0x57, 0x83, 0x64, 0xe8, 0x45, + 0x1b, 0x27, 0x48, 0xc8, 0x24, 0x48, 0x83, 0x20, 0x40, 0x07, 0xd3, 0x31, 0x50, 0x25, 0x96, 0xe3, + 0x9e, 0x8d, 0xa0, 0x08, 0x3a, 0xf4, 0x44, 0x3e, 0x91, 0x19, 0x93, 0x3c, 0x86, 0x3c, 0xa9, 0xd5, + 0xd6, 0xa9, 0x73, 0xd1, 0xa1, 0x7f, 0x41, 0xff, 0x85, 0x16, 0x68, 0x97, 0x8e, 0xcd, 0x54, 0x04, + 0x9d, 0x32, 0x09, 0x0d, 0xbb, 0x76, 0xeb, 0x66, 0x74, 0x28, 0xee, 0x78, 0x12, 0x29, 0x89, 0x8a, + 0xed, 0x0e, 0xde, 0x78, 0xcf, 0xc7, 0xef, 0xf9, 0x7e, 0xee, 0x88, 0xb6, 0x8f, 0xef, 0x47, 0xba, + 0xc3, 0x8c, 0xe3, 0x4e, 0x0b, 0x42, 0x1f, 0x38, 0x44, 0x46, 0x17, 0x7c, 0x9b, 0x85, 0x86, 0x62, + 0xd0, 0xc0, 0x31, 0x22, 0xce, 0x42, 0xda, 0x06, 0xa3, 0x7b, 0xbb, 0x05, 0x9c, 0xde, 0x36, 0xda, + 0xe0, 0x43, 0x48, 0x39, 0xd8, 0x7a, 0x10, 0x32, 0xce, 0x70, 0x25, 0x91, 0xd5, 0x69, 0xe0, 0xe8, + 0x4a, 0x56, 0x57, 0xb2, 0x95, 0x9b, 0x6d, 0x87, 0x1f, 0x75, 0x5a, 0xba, 0xc5, 0x3c, 0xa3, 0xcd, + 0xda, 0xcc, 0x90, 0x2a, 0xad, 0xce, 0x73, 0x79, 0x92, 0x07, 0xf9, 0x95, 0x40, 0x55, 0xea, 0x19, + 0xb3, 0x16, 0x0b, 0x85, 0xcd, 0x71, 0x73, 0x95, 0xbb, 0xa9, 0x8c, 0x47, 0xad, 0x23, 0xc7, 0x87, + 0xb0, 0x67, 0x04, 0xc7, 0x6d, 0x41, 0x88, 0x0c, 0x0f, 0x38, 0xcd, 0xd3, 0x32, 0xa6, 0x69, 0x85, + 0x1d, 0x9f, 0x3b, 0x1e, 0x4c, 0x28, 0xdc, 0x3b, 0x4d, 0x21, 0xb2, 0x8e, 0xc0, 0xa3, 0xe3, 0x7a, + 0xf5, 0x9f, 0x34, 0xb4, 0xbc, 0x7d, 0xd0, 0x78, 0x18, 0x3a, 0x5d, 0x08, 0xf1, 0x67, 0x68, 0x49, + 0x78, 0x64, 0x53, 0x4e, 0xcb, 0xda, 0x86, 0xb6, 0x59, 0xbc, 0x73, 0x4b, 0x4f, 0xd3, 0x35, 0x04, + 0xd6, 0x83, 0xe3, 0xb6, 0x20, 0x44, 0xba, 0x90, 0xd6, 0xbb, 0xb7, 0xf5, 0x27, 0xad, 0x17, 0x60, + 0xf1, 0x26, 0x70, 0x6a, 0xe2, 0x57, 0xfd, 0xda, 0x4c, 0xdc, 0xaf, 0xa1, 0x94, 0x46, 0x86, 0xa8, + 0xf8, 0x31, 0x9a, 0x8b, 0x02, 0xb0, 0xca, 0xb3, 0x12, 0xfd, 0x9a, 0x3e, 0xbd, 0x18, 0xfa, 0xd0, + 0xad, 0x83, 0x00, 0x2c, 0x73, 0x45, 0xc1, 0xce, 0x89, 0x13, 0x91, 0x20, 0xf5, 0x1f, 0x35, 0xb4, + 0x3a, 0x94, 0xda, 0x75, 0x22, 0x8e, 0x3f, 0x9d, 0x08, 0x40, 0x3f, 0x5b, 0x00, 0x42, 0x5b, 0xba, + 0x5f, 0x52, 0x76, 0x96, 0x06, 0x94, 0x8c, 0xf3, 0x8f, 0xd0, 0xbc, 0xc3, 0xc1, 0x8b, 0xca, 0xb3, + 0x1b, 0x85, 0xcd, 0xe2, 0x9d, 0xf7, 0xce, 0xe4, 0xbd, 0xb9, 0xaa, 0x10, 0xe7, 0x1b, 0x42, 0x97, + 0x24, 0x10, 0xf5, 0x3f, 0xb3, 0xbe, 0x8b, 0x98, 0xf0, 0x03, 0x74, 0x89, 0x72, 0x4e, 0xad, 0x23, + 0x02, 0x2f, 0x3b, 0x4e, 0x08, 0xb6, 0x8c, 0x60, 0xc9, 0xc4, 0x71, 0xbf, 0x76, 0x69, 0x6b, 0x84, + 0x43, 0xc6, 0x24, 0x85, 0x6e, 0xc0, 0xec, 0x86, 0xff, 0x9c, 0x3d, 0xf1, 0x9b, 0xac, 0xe3, 0x73, + 0x99, 0x60, 0xa5, 0xbb, 0x3f, 0xc2, 0x21, 0x63, 0x92, 0xd8, 0x42, 0xeb, 0x5d, 0xe6, 0x76, 0x3c, + 0xd8, 0x75, 0x9e, 0x83, 0xd5, 0xb3, 0x5c, 0x68, 0x32, 0x1b, 0xa2, 0x72, 0x61, 0xa3, 0xb0, 0xb9, + 0x6c, 0x1a, 0x71, 0xbf, 0xb6, 0xfe, 0x34, 0x87, 0x7f, 0xd2, 0xaf, 0x5d, 0xc9, 0xa1, 0x93, 0x5c, + 0xb0, 0xfa, 0x0f, 0x1a, 0x5a, 0xdc, 0x3e, 0x68, 0xec, 0x31, 0x1b, 0x2e, 0xa0, 0xcb, 0x1a, 0x23, + 0x5d, 0x76, 0xf5, 0x94, 0x3a, 0x09, 0xa7, 0xa6, 0xf6, 0xd8, 0x5f, 0x49, 0x9d, 0x84, 0x8c, 0x1a, + 0x92, 0x0d, 0x34, 0xe7, 0x53, 0x0f, 0xa4, 0xeb, 0xcb, 0xa9, 0xce, 0x1e, 0xf5, 0x80, 0x48, 0x0e, + 0x7e, 0x1f, 0x2d, 0xf8, 0xcc, 0x86, 0xc6, 0x43, 0xe9, 0xc0, 0xb2, 0x79, 0x49, 0xc9, 0x2c, 0xec, + 0x49, 0x2a, 0x51, 0x5c, 0x7c, 0x17, 0xad, 0x70, 0x16, 0x30, 0x97, 0xb5, 0x7b, 0x8f, 0xa1, 0x37, + 0xc8, 0x78, 0x29, 0xee, 0xd7, 0x56, 0x0e, 0x33, 0x74, 0x32, 0x22, 0x85, 0x5b, 0xa8, 0x48, 0x5d, + 0x97, 0x59, 0x94, 0xd3, 0x96, 0x0b, 0xe5, 0x39, 0x19, 0xa3, 0xf1, 0xae, 0x18, 0x93, 0x32, 0x09, + 0xe3, 0x04, 0x22, 0xd6, 0x09, 0x2d, 0x88, 0xcc, 0xcb, 0x71, 0xbf, 0x56, 0xdc, 0x4a, 0x71, 0x48, + 0x16, 0xb4, 0xfe, 0xbd, 0x86, 0x8a, 0x2a, 0xea, 0x0b, 0x98, 0xab, 0x8f, 0x46, 0xe7, 0xea, 0xff, + 0x67, 0xa8, 0xd7, 0x94, 0xa9, 0xb2, 0x86, 0x6e, 0xcb, 0x91, 0x3a, 0x44, 0x8b, 0xb6, 0x2c, 0x5a, + 0x54, 0xd6, 0x24, 0xf4, 0xb5, 0x33, 0x40, 0xab, 0xb1, 0xbd, 0xac, 0x0c, 0x2c, 0x26, 0xe7, 0x88, + 0x0c, 0xa0, 0xea, 0xdf, 0x2c, 0xa0, 0x95, 0x83, 0x44, 0x77, 0xdb, 0xa5, 0x51, 0x74, 0x01, 0x0d, + 0xfd, 0x01, 0x2a, 0x06, 0x21, 0xeb, 0x3a, 0x91, 0xc3, 0x7c, 0x08, 0x55, 0x5b, 0x5d, 0x51, 0x2a, + 0xc5, 0xfd, 0x94, 0x45, 0xb2, 0x72, 0xd8, 0x45, 0x28, 0xa0, 0x21, 0xf5, 0x80, 0x8b, 0x14, 0x14, + 0x64, 0x0a, 0xee, 0xbf, 0x2b, 0x05, 0xd9, 0xb0, 0xf4, 0xfd, 0xa1, 0xea, 0x8e, 0xcf, 0xc3, 0x5e, + 0xea, 0x62, 0xca, 0x20, 0x19, 0x7c, 0x7c, 0x8c, 0x56, 0x43, 0xb0, 0x5c, 0xea, 0x78, 0xfb, 0xcc, + 0x75, 0xac, 0x9e, 0x6c, 0xcd, 0x65, 0x73, 0x27, 0xee, 0xd7, 0x56, 0x49, 0x96, 0x71, 0xd2, 0xaf, + 0xdd, 0x9a, 0xbc, 0x3a, 0xf5, 0x7d, 0x08, 0x23, 0x27, 0xe2, 0xe0, 0xf3, 0xa4, 0x61, 0x47, 0x74, + 0xc8, 0x28, 0xb6, 0x98, 0x1d, 0x4f, 0xac, 0xaf, 0x27, 0x01, 0x77, 0x98, 0x1f, 0x95, 0xe7, 0xd3, + 0xd9, 0x69, 0x66, 0xe8, 0x64, 0x44, 0x0a, 0xef, 0xa2, 0x75, 0xd1, 0xe6, 0x9f, 0x27, 0x06, 0x76, + 0xbe, 0x08, 0xa8, 0x2f, 0x52, 0x55, 0x5e, 0x90, 0xdb, 0xb2, 0x2c, 0x76, 0xdd, 0x56, 0x0e, 0x9f, + 0xe4, 0x6a, 0xe1, 0x4f, 0xd0, 0x5a, 0xb2, 0xec, 0x4c, 0xc7, 0xb7, 0x1d, 0xbf, 0x2d, 0x56, 0x5d, + 0x79, 0x51, 0x06, 0x7d, 0x3d, 0xee, 0xd7, 0xd6, 0x9e, 0x8e, 0x33, 0x4f, 0xf2, 0x88, 0x64, 0x12, + 0x04, 0xbf, 0x44, 0x6b, 0xd2, 0x22, 0xd8, 0x6a, 0x11, 0x38, 0x10, 0x95, 0x97, 0x64, 0xfd, 0x36, + 0xb3, 0xf5, 0x13, 0xa9, 0x13, 0x8d, 0x34, 0x58, 0x17, 0x07, 0xe0, 0x82, 0xc5, 0x59, 0x78, 0x08, + 0xa1, 0x67, 0xfe, 0x4f, 0xd5, 0x6b, 0x6d, 0x6b, 0x1c, 0x8a, 0x4c, 0xa2, 0x57, 0x3e, 0x44, 0x97, + 0xc7, 0x0a, 0x8e, 0x4b, 0xa8, 0x70, 0x0c, 0xbd, 0x64, 0xd1, 0x11, 0xf1, 0x89, 0xd7, 0xd1, 0x7c, + 0x97, 0xba, 0x1d, 0x48, 0x3a, 0x90, 0x24, 0x87, 0x07, 0xb3, 0xf7, 0xb5, 0xfa, 0xcf, 0x1a, 0x2a, + 0x65, 0xbb, 0xe7, 0x02, 0xd6, 0x46, 0x73, 0x74, 0x6d, 0x6c, 0x9e, 0xb5, 0xb1, 0xa7, 0xec, 0x8e, + 0xef, 0x66, 0x51, 0x29, 0x29, 0x4e, 0x72, 0xd9, 0x7a, 0xe0, 0xf3, 0x0b, 0x18, 0x6d, 0x32, 0x72, + 0x57, 0xdd, 0x3a, 0x7d, 0x8f, 0xa7, 0xde, 0x4d, 0xbb, 0xb4, 0xf0, 0x33, 0xb4, 0x10, 0x71, 0xca, + 0x3b, 0x62, 0xe6, 0x05, 0xea, 0x9d, 0x73, 0xa1, 0x4a, 0xcd, 0xf4, 0xd2, 0x4a, 0xce, 0x44, 0x21, + 0xd6, 0x7f, 0xd1, 0xd0, 0xfa, 0xb8, 0xca, 0x05, 0x14, 0xfb, 0xe3, 0xd1, 0x62, 0xdf, 0x38, 0x4f, + 0x44, 0x53, 0x0a, 0xfe, 0x9b, 0x86, 0xfe, 0x33, 0x11, 0xbc, 0xbc, 0x1e, 0xc5, 0x9e, 0x08, 0xc6, + 0xb6, 0xd1, 0x5e, 0x7a, 0xe7, 0xcb, 0x3d, 0xb1, 0x9f, 0xc3, 0x27, 0xb9, 0x5a, 0xf8, 0x05, 0x2a, + 0x39, 0xbe, 0xeb, 0xf8, 0x90, 0xd0, 0x0e, 0xd2, 0x72, 0xe7, 0x0e, 0xf3, 0x38, 0xb2, 0x2c, 0xf3, + 0x7a, 0xdc, 0xaf, 0x95, 0x1a, 0x63, 0x28, 0x64, 0x02, 0xb7, 0xfe, 0x6b, 0x4e, 0x79, 0xe4, 0x5d, + 0x78, 0x03, 0x2d, 0x25, 0x8f, 0x46, 0x08, 0x55, 0x18, 0xc3, 0x74, 0x6f, 0x29, 0x3a, 0x19, 0x4a, + 0xc8, 0x0e, 0x92, 0xa9, 0x50, 0x8e, 0x9e, 0xaf, 0x83, 0xa4, 0x66, 0xa6, 0x83, 0xe4, 0x99, 0x28, + 0x44, 0xe1, 0x89, 0x78, 0x00, 0xc9, 0x84, 0x16, 0x46, 0x3d, 0xd9, 0x53, 0x74, 0x32, 0x94, 0xa8, + 0xff, 0x5d, 0xc8, 0xa9, 0x92, 0x6c, 0xc5, 0x4c, 0x48, 0x83, 0xb7, 0xf2, 0x78, 0x48, 0xf6, 0x30, + 0x24, 0x1b, 0x7f, 0xab, 0x21, 0x4c, 0x87, 0x10, 0xcd, 0x41, 0xab, 0x26, 0xfd, 0xf4, 0xe8, 0xfc, + 0x13, 0xa2, 0x6f, 0x4d, 0x80, 0x25, 0xf7, 0x64, 0x45, 0x39, 0x81, 0x27, 0x05, 0x48, 0x8e, 0x07, + 0xd8, 0x41, 0xc5, 0x84, 0xba, 0x13, 0x86, 0x2c, 0x54, 0x23, 0x7b, 0xf5, 0x74, 0x87, 0xa4, 0xb8, + 0x59, 0x95, 0x0f, 0xb9, 0x54, 0xff, 0xa4, 0x5f, 0x2b, 0x66, 0xf8, 0x24, 0x8b, 0x2d, 0x4c, 0xd9, + 0x90, 0x9a, 0x9a, 0xfb, 0x17, 0xa6, 0x1e, 0xc2, 0x74, 0x53, 0x19, 0xec, 0xca, 0x0e, 0xfa, 0xef, + 0x94, 0x04, 0x9d, 0xeb, 0x5e, 0xf9, 0x4a, 0x43, 0x59, 0x1b, 0x78, 0x17, 0xcd, 0x89, 0xff, 0x59, + 0xb5, 0x61, 0xae, 0x9f, 0x6d, 0xc3, 0x1c, 0x3a, 0x1e, 0xa4, 0x8b, 0x52, 0x9c, 0x88, 0x44, 0xc1, + 0xd7, 0xd0, 0xa2, 0x07, 0x51, 0x44, 0xdb, 0xca, 0x72, 0xfa, 0xea, 0x6b, 0x26, 0x64, 0x32, 0xe0, + 0xd7, 0xef, 0xa1, 0x2b, 0x39, 0xef, 0x68, 0x5c, 0x43, 0xf3, 0x96, 0xfc, 0xe1, 0x12, 0x0e, 0xcd, + 0x9b, 0xcb, 0x62, 0xcb, 0x6c, 0xcb, 0xff, 0xac, 0x84, 0x6e, 0xde, 0x7c, 0xf5, 0xb6, 0x3a, 0xf3, + 0xfa, 0x6d, 0x75, 0xe6, 0xcd, 0xdb, 0xea, 0xcc, 0x97, 0x71, 0x55, 0x7b, 0x15, 0x57, 0xb5, 0xd7, + 0x71, 0x55, 0x7b, 0x13, 0x57, 0xb5, 0xdf, 0xe3, 0xaa, 0xf6, 0xf5, 0x1f, 0xd5, 0x99, 0x67, 0x8b, + 0x2a, 0xdf, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x72, 0xff, 0xde, 0x2e, 0xe4, 0x10, 0x00, 0x00, } + func (m *CSIDriver) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -159,33 +618,42 @@ func (m *CSIDriver) Marshal() (dAtA []byte, err error) { } func (m *CSIDriver) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSIDriver) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CSIDriverList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -193,37 +661,46 @@ func (m *CSIDriverList) Marshal() (dAtA []byte, err error) { } func (m *CSIDriverList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSIDriverList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n3, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CSIDriverSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -231,37 +708,51 @@ func (m *CSIDriverSpec) Marshal() (dAtA []byte, err error) { } func (m *CSIDriverSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSIDriverSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.AttachRequired != nil { - dAtA[i] = 0x8 - i++ - if *m.AttachRequired { + if len(m.VolumeLifecycleModes) > 0 { + for iNdEx := len(m.VolumeLifecycleModes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.VolumeLifecycleModes[iNdEx]) + copy(dAtA[i:], m.VolumeLifecycleModes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeLifecycleModes[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if m.PodInfoOnMount != nil { + i-- + if *m.PodInfoOnMount { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - } - if m.PodInfoOnMount != nil { + i-- dAtA[i] = 0x10 - i++ - if *m.PodInfoOnMount { + } + if m.AttachRequired != nil { + i-- + if *m.AttachRequired { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *CSINode) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -269,33 +760,42 @@ func (m *CSINode) Marshal() (dAtA []byte, err error) { } func (m *CSINode) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSINode) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n5, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CSINodeDriver) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -303,40 +803,53 @@ func (m *CSINodeDriver) Marshal() (dAtA []byte, err error) { } func (m *CSINodeDriver) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSINodeDriver) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeID))) - i += copy(dAtA[i:], m.NodeID) + if m.Allocatable != nil { + { + size, err := m.Allocatable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } if len(m.TopologyKeys) > 0 { - for _, s := range m.TopologyKeys { + for iNdEx := len(m.TopologyKeys) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.TopologyKeys[iNdEx]) + copy(dAtA[i:], m.TopologyKeys[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TopologyKeys[iNdEx]))) + i-- dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + i -= len(m.NodeID) + copy(dAtA[i:], m.NodeID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeID))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CSINodeList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -344,37 +857,46 @@ func (m *CSINodeList) Marshal() (dAtA []byte, err error) { } func (m *CSINodeList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSINodeList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CSINodeSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -382,130 +904,145 @@ func (m *CSINodeSpec) Marshal() (dAtA []byte, err error) { } func (m *CSINodeSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSINodeSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m.Drivers) > 0 { - for _, msg := range m.Drivers { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Drivers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Drivers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *StorageClass) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } - return dAtA[:n], nil -} - -func (m *StorageClass) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Provisioner))) - i += copy(dAtA[i:], m.Provisioner) - if len(m.Parameters) > 0 { - keysForParameters := make([]string, 0, len(m.Parameters)) - for k := range m.Parameters { - keysForParameters = append(keysForParameters, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) - for _, k := range keysForParameters { - dAtA[i] = 0x1a - i++ - v := m.Parameters[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - if m.ReclaimPolicy != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ReclaimPolicy))) - i += copy(dAtA[i:], *m.ReclaimPolicy) - } - if len(m.MountOptions) > 0 { - for _, s := range m.MountOptions { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + return dAtA[:n], nil +} + +func (m *StorageClass) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AllowedTopologies) > 0 { + for iNdEx := len(m.AllowedTopologies) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllowedTopologies[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + dAtA[i] = 0x42 } } + if m.VolumeBindingMode != nil { + i -= len(*m.VolumeBindingMode) + copy(dAtA[i:], *m.VolumeBindingMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeBindingMode))) + i-- + dAtA[i] = 0x3a + } if m.AllowVolumeExpansion != nil { - dAtA[i] = 0x30 - i++ + i-- if *m.AllowVolumeExpansion { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ + i-- + dAtA[i] = 0x30 } - if m.VolumeBindingMode != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeBindingMode))) - i += copy(dAtA[i:], *m.VolumeBindingMode) + if len(m.MountOptions) > 0 { + for iNdEx := len(m.MountOptions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.MountOptions[iNdEx]) + copy(dAtA[i:], m.MountOptions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MountOptions[iNdEx]))) + i-- + dAtA[i] = 0x2a + } } - if len(m.AllowedTopologies) > 0 { - for _, msg := range m.AllowedTopologies { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + if m.ReclaimPolicy != nil { + i -= len(*m.ReclaimPolicy) + copy(dAtA[i:], *m.ReclaimPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ReclaimPolicy))) + i-- + dAtA[i] = 0x22 + } + if len(m.Parameters) > 0 { + keysForParameters := make([]string, 0, len(m.Parameters)) + for k := range m.Parameters { + keysForParameters = append(keysForParameters, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) + for iNdEx := len(keysForParameters) - 1; iNdEx >= 0; iNdEx-- { + v := m.Parameters[string(keysForParameters[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForParameters[iNdEx]) + copy(dAtA[i:], keysForParameters[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForParameters[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.Provisioner) + copy(dAtA[i:], m.Provisioner) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Provisioner))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *StorageClassList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -513,37 +1050,46 @@ func (m *StorageClassList) Marshal() (dAtA []byte, err error) { } func (m *StorageClassList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n8, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *VolumeAttachment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -551,41 +1097,52 @@ func (m *VolumeAttachment) Marshal() (dAtA []byte, err error) { } func (m *VolumeAttachment) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n9, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n10, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n10 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n11, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n11 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *VolumeAttachmentList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -593,37 +1150,46 @@ func (m *VolumeAttachmentList) Marshal() (dAtA []byte, err error) { } func (m *VolumeAttachmentList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n12, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n12 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *VolumeAttachmentSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -631,33 +1197,41 @@ func (m *VolumeAttachmentSource) Marshal() (dAtA []byte, err error) { } func (m *VolumeAttachmentSource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.PersistentVolumeName != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PersistentVolumeName))) - i += copy(dAtA[i:], *m.PersistentVolumeName) - } if m.InlineVolumeSpec != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.InlineVolumeSpec.Size())) - n13, err := m.InlineVolumeSpec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.InlineVolumeSpec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n13 + i-- + dAtA[i] = 0x12 } - return i, nil + if m.PersistentVolumeName != nil { + i -= len(*m.PersistentVolumeName) + copy(dAtA[i:], *m.PersistentVolumeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PersistentVolumeName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } func (m *VolumeAttachmentSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -665,33 +1239,42 @@ func (m *VolumeAttachmentSpec) Marshal() (dAtA []byte, err error) { } func (m *VolumeAttachmentSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Attacher))) - i += copy(dAtA[i:], m.Attacher) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Source.Size())) - n14, err := m.Source.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n14 - dAtA[i] = 0x1a - i++ + i -= len(m.NodeName) + copy(dAtA[i:], m.NodeName) i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) - i += copy(dAtA[i:], m.NodeName) - return i, nil + i-- + dAtA[i] = 0x1a + { + size, err := m.Source.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Attacher) + copy(dAtA[i:], m.Attacher) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Attacher))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *VolumeAttachmentStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -699,67 +1282,78 @@ func (m *VolumeAttachmentStatus) Marshal() (dAtA []byte, err error) { } func (m *VolumeAttachmentStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - if m.Attached { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.DetachError != nil { + { + size, err := m.DetachError.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.AttachError != nil { + { + size, err := m.AttachError.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - i++ if len(m.AttachmentMetadata) > 0 { keysForAttachmentMetadata := make([]string, 0, len(m.AttachmentMetadata)) for k := range m.AttachmentMetadata { keysForAttachmentMetadata = append(keysForAttachmentMetadata, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata) - for _, k := range keysForAttachmentMetadata { + for iNdEx := len(keysForAttachmentMetadata) - 1; iNdEx >= 0; iNdEx-- { + v := m.AttachmentMetadata[string(keysForAttachmentMetadata[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- dAtA[i] = 0x12 - i++ - v := m.AttachmentMetadata[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + i -= len(keysForAttachmentMetadata[iNdEx]) + copy(dAtA[i:], keysForAttachmentMetadata[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAttachmentMetadata[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - if m.AttachError != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AttachError.Size())) - n15, err := m.AttachError.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err } - i += n15 } - if m.DetachError != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DetachError.Size())) - n16, err := m.DetachError.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n16 + i-- + if m.Attached { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *VolumeError) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -767,35 +1361,76 @@ func (m *VolumeError) Marshal() (dAtA []byte, err error) { } func (m *VolumeError) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeError) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Time.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Time.Size())) - n17, err := m.Time.MarshalTo(dAtA[i:]) + return len(dAtA) - i, nil +} + +func (m *VolumeNodeResources) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { - return 0, err + return nil, err } - i += n17 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + return dAtA[:n], nil +} + +func (m *VolumeNodeResources) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeNodeResources) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Count != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Count)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *CSIDriver) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -806,6 +1441,9 @@ func (m *CSIDriver) Size() (n int) { } func (m *CSIDriverList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -820,6 +1458,9 @@ func (m *CSIDriverList) Size() (n int) { } func (m *CSIDriverSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.AttachRequired != nil { @@ -828,10 +1469,19 @@ func (m *CSIDriverSpec) Size() (n int) { if m.PodInfoOnMount != nil { n += 2 } + if len(m.VolumeLifecycleModes) > 0 { + for _, s := range m.VolumeLifecycleModes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } func (m *CSINode) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -842,6 +1492,9 @@ func (m *CSINode) Size() (n int) { } func (m *CSINodeDriver) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -854,10 +1507,17 @@ func (m *CSINodeDriver) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if m.Allocatable != nil { + l = m.Allocatable.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } func (m *CSINodeList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -872,6 +1532,9 @@ func (m *CSINodeList) Size() (n int) { } func (m *CSINodeSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Drivers) > 0 { @@ -884,6 +1547,9 @@ func (m *CSINodeSpec) Size() (n int) { } func (m *StorageClass) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -925,6 +1591,9 @@ func (m *StorageClass) Size() (n int) { } func (m *StorageClassList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -939,6 +1608,9 @@ func (m *StorageClassList) Size() (n int) { } func (m *VolumeAttachment) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -951,6 +1623,9 @@ func (m *VolumeAttachment) Size() (n int) { } func (m *VolumeAttachmentList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -965,6 +1640,9 @@ func (m *VolumeAttachmentList) Size() (n int) { } func (m *VolumeAttachmentSource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.PersistentVolumeName != nil { @@ -979,6 +1657,9 @@ func (m *VolumeAttachmentSource) Size() (n int) { } func (m *VolumeAttachmentSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Attacher) @@ -991,6 +1672,9 @@ func (m *VolumeAttachmentSpec) Size() (n int) { } func (m *VolumeAttachmentStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 2 @@ -1014,6 +1698,9 @@ func (m *VolumeAttachmentStatus) Size() (n int) { } func (m *VolumeError) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.Time.Size() @@ -1023,16 +1710,21 @@ func (m *VolumeError) Size() (n int) { return n } -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } +func (m *VolumeNodeResources) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Count != nil { + n += 1 + sovGenerated(uint64(*m.Count)) } return n } + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } @@ -1041,7 +1733,7 @@ func (this *CSIDriver) String() string { return "nil" } s := strings.Join([]string{`&CSIDriver{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CSIDriverSpec", "CSIDriverSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -1051,9 +1743,14 @@ func (this *CSIDriverList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]CSIDriver{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CSIDriver", "CSIDriver", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&CSIDriverList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CSIDriver", "CSIDriver", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1065,6 +1762,7 @@ func (this *CSIDriverSpec) String() string { s := strings.Join([]string{`&CSIDriverSpec{`, `AttachRequired:` + valueToStringGenerated(this.AttachRequired) + `,`, `PodInfoOnMount:` + valueToStringGenerated(this.PodInfoOnMount) + `,`, + `VolumeLifecycleModes:` + fmt.Sprintf("%v", this.VolumeLifecycleModes) + `,`, `}`, }, "") return s @@ -1074,7 +1772,7 @@ func (this *CSINode) String() string { return "nil" } s := strings.Join([]string{`&CSINode{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CSINodeSpec", "CSINodeSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -1088,6 +1786,7 @@ func (this *CSINodeDriver) String() string { `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, `TopologyKeys:` + fmt.Sprintf("%v", this.TopologyKeys) + `,`, + `Allocatable:` + strings.Replace(this.Allocatable.String(), "VolumeNodeResources", "VolumeNodeResources", 1) + `,`, `}`, }, "") return s @@ -1096,9 +1795,14 @@ func (this *CSINodeList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]CSINode{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CSINode", "CSINode", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&CSINodeList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CSINode", "CSINode", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1107,8 +1811,13 @@ func (this *CSINodeSpec) String() string { if this == nil { return "nil" } + repeatedStringForDrivers := "[]CSINodeDriver{" + for _, f := range this.Drivers { + repeatedStringForDrivers += strings.Replace(strings.Replace(f.String(), "CSINodeDriver", "CSINodeDriver", 1), `&`, ``, 1) + "," + } + repeatedStringForDrivers += "}" s := strings.Join([]string{`&CSINodeSpec{`, - `Drivers:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Drivers), "CSINodeDriver", "CSINodeDriver", 1), `&`, ``, 1) + `,`, + `Drivers:` + repeatedStringForDrivers + `,`, `}`, }, "") return s @@ -1117,6 +1826,11 @@ func (this *StorageClass) String() string { if this == nil { return "nil" } + repeatedStringForAllowedTopologies := "[]TopologySelectorTerm{" + for _, f := range this.AllowedTopologies { + repeatedStringForAllowedTopologies += fmt.Sprintf("%v", f) + "," + } + repeatedStringForAllowedTopologies += "}" keysForParameters := make([]string, 0, len(this.Parameters)) for k := range this.Parameters { keysForParameters = append(keysForParameters, k) @@ -1128,14 +1842,14 @@ func (this *StorageClass) String() string { } mapStringForParameters += "}" s := strings.Join([]string{`&StorageClass{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Provisioner:` + fmt.Sprintf("%v", this.Provisioner) + `,`, `Parameters:` + mapStringForParameters + `,`, `ReclaimPolicy:` + valueToStringGenerated(this.ReclaimPolicy) + `,`, `MountOptions:` + fmt.Sprintf("%v", this.MountOptions) + `,`, `AllowVolumeExpansion:` + valueToStringGenerated(this.AllowVolumeExpansion) + `,`, `VolumeBindingMode:` + valueToStringGenerated(this.VolumeBindingMode) + `,`, - `AllowedTopologies:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedTopologies), "TopologySelectorTerm", "k8s_io_api_core_v1.TopologySelectorTerm", 1), `&`, ``, 1) + `,`, + `AllowedTopologies:` + repeatedStringForAllowedTopologies + `,`, `}`, }, "") return s @@ -1144,9 +1858,14 @@ func (this *StorageClassList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]StorageClass{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "StorageClass", "StorageClass", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&StorageClassList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StorageClass", "StorageClass", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1156,7 +1875,7 @@ func (this *VolumeAttachment) String() string { return "nil" } s := strings.Join([]string{`&VolumeAttachment{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "VolumeAttachmentSpec", "VolumeAttachmentSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "VolumeAttachmentStatus", "VolumeAttachmentStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1167,9 +1886,14 @@ func (this *VolumeAttachmentList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]VolumeAttachment{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "VolumeAttachment", "VolumeAttachment", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&VolumeAttachmentList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "VolumeAttachment", "VolumeAttachment", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1180,7 +1904,7 @@ func (this *VolumeAttachmentSource) String() string { } s := strings.Join([]string{`&VolumeAttachmentSource{`, `PersistentVolumeName:` + valueToStringGenerated(this.PersistentVolumeName) + `,`, - `InlineVolumeSpec:` + strings.Replace(fmt.Sprintf("%v", this.InlineVolumeSpec), "PersistentVolumeSpec", "k8s_io_api_core_v1.PersistentVolumeSpec", 1) + `,`, + `InlineVolumeSpec:` + strings.Replace(fmt.Sprintf("%v", this.InlineVolumeSpec), "PersistentVolumeSpec", "v11.PersistentVolumeSpec", 1) + `,`, `}`, }, "") return s @@ -1214,8 +1938,8 @@ func (this *VolumeAttachmentStatus) String() string { s := strings.Join([]string{`&VolumeAttachmentStatus{`, `Attached:` + fmt.Sprintf("%v", this.Attached) + `,`, `AttachmentMetadata:` + mapStringForAttachmentMetadata + `,`, - `AttachError:` + strings.Replace(fmt.Sprintf("%v", this.AttachError), "VolumeError", "VolumeError", 1) + `,`, - `DetachError:` + strings.Replace(fmt.Sprintf("%v", this.DetachError), "VolumeError", "VolumeError", 1) + `,`, + `AttachError:` + strings.Replace(this.AttachError.String(), "VolumeError", "VolumeError", 1) + `,`, + `DetachError:` + strings.Replace(this.DetachError.String(), "VolumeError", "VolumeError", 1) + `,`, `}`, }, "") return s @@ -1225,12 +1949,22 @@ func (this *VolumeError) String() string { return "nil" } s := strings.Join([]string{`&VolumeError{`, - `Time:` + strings.Replace(strings.Replace(this.Time.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Time:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Time), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, }, "") return s } +func (this *VolumeNodeResources) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeNodeResources{`, + `Count:` + valueToStringGenerated(this.Count) + `,`, + `}`, + }, "") + return s +} func valueToStringGenerated(v interface{}) string { rv := reflect.ValueOf(v) if rv.IsNil() { @@ -1254,7 +1988,7 @@ func (m *CSIDriver) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1282,7 +2016,7 @@ func (m *CSIDriver) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1291,6 +2025,9 @@ func (m *CSIDriver) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1312,7 +2049,7 @@ func (m *CSIDriver) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1321,6 +2058,9 @@ func (m *CSIDriver) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1337,6 +2077,9 @@ func (m *CSIDriver) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1364,7 +2107,7 @@ func (m *CSIDriverList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1392,7 +2135,7 @@ func (m *CSIDriverList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1401,6 +2144,9 @@ func (m *CSIDriverList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1422,7 +2168,7 @@ func (m *CSIDriverList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1431,6 +2177,9 @@ func (m *CSIDriverList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1448,6 +2197,9 @@ func (m *CSIDriverList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1475,7 +2227,7 @@ func (m *CSIDriverSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1503,7 +2255,7 @@ func (m *CSIDriverSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1524,13 +2276,45 @@ func (m *CSIDriverSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } b := bool(v != 0) m.PodInfoOnMount = &b + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeLifecycleModes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeLifecycleModes = append(m.VolumeLifecycleModes, VolumeLifecycleMode(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -1540,6 +2324,9 @@ func (m *CSIDriverSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1567,7 +2354,7 @@ func (m *CSINode) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1595,7 +2382,7 @@ func (m *CSINode) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1604,6 +2391,9 @@ func (m *CSINode) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1625,7 +2415,7 @@ func (m *CSINode) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1634,6 +2424,9 @@ func (m *CSINode) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1650,6 +2443,9 @@ func (m *CSINode) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1677,7 +2473,7 @@ func (m *CSINodeDriver) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1705,7 +2501,7 @@ func (m *CSINodeDriver) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1715,6 +2511,9 @@ func (m *CSINodeDriver) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1734,7 +2533,7 @@ func (m *CSINodeDriver) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1744,6 +2543,9 @@ func (m *CSINodeDriver) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1763,7 +2565,7 @@ func (m *CSINodeDriver) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1773,11 +2575,50 @@ func (m *CSINodeDriver) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } m.TopologyKeys = append(m.TopologyKeys, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Allocatable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Allocatable == nil { + m.Allocatable = &VolumeNodeResources{} + } + if err := m.Allocatable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -1787,6 +2628,9 @@ func (m *CSINodeDriver) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1814,7 +2658,7 @@ func (m *CSINodeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1842,7 +2686,7 @@ func (m *CSINodeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1851,6 +2695,9 @@ func (m *CSINodeList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1872,7 +2719,7 @@ func (m *CSINodeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1881,6 +2728,9 @@ func (m *CSINodeList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1898,6 +2748,9 @@ func (m *CSINodeList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1925,7 +2778,7 @@ func (m *CSINodeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1953,7 +2806,7 @@ func (m *CSINodeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1962,6 +2815,9 @@ func (m *CSINodeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1979,6 +2835,9 @@ func (m *CSINodeSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2006,7 +2865,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2034,7 +2893,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2043,6 +2902,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2064,7 +2926,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2074,6 +2936,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2093,7 +2958,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2102,6 +2967,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2122,7 +2990,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2139,7 +3007,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2149,6 +3017,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -2165,7 +3036,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2175,6 +3046,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -2211,7 +3085,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2221,6 +3095,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2241,7 +3118,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2251,6 +3128,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2270,7 +3150,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2291,7 +3171,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2301,6 +3181,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2321,7 +3204,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2330,10 +3213,13 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedTopologies = append(m.AllowedTopologies, k8s_io_api_core_v1.TopologySelectorTerm{}) + m.AllowedTopologies = append(m.AllowedTopologies, v11.TopologySelectorTerm{}) if err := m.AllowedTopologies[len(m.AllowedTopologies)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -2347,6 +3233,9 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2374,7 +3263,7 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2402,7 +3291,7 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2411,6 +3300,9 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2432,7 +3324,7 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2441,6 +3333,9 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2458,6 +3353,9 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2485,7 +3383,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2513,7 +3411,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2522,6 +3420,9 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2543,7 +3444,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2552,6 +3453,9 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2573,7 +3477,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2582,6 +3486,9 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2598,6 +3505,9 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2625,7 +3535,7 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2653,7 +3563,7 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2662,6 +3572,9 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2683,7 +3596,7 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2692,6 +3605,9 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2709,6 +3625,9 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2736,7 +3655,7 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2764,7 +3683,7 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2774,6 +3693,9 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2794,7 +3716,7 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2803,11 +3725,14 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.InlineVolumeSpec == nil { - m.InlineVolumeSpec = &k8s_io_api_core_v1.PersistentVolumeSpec{} + m.InlineVolumeSpec = &v11.PersistentVolumeSpec{} } if err := m.InlineVolumeSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2822,6 +3747,9 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2849,7 +3777,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2877,7 +3805,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2887,6 +3815,9 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2906,7 +3837,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2915,6 +3846,9 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2936,7 +3870,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2946,6 +3880,9 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2960,6 +3897,9 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2987,7 +3927,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3015,7 +3955,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3035,7 +3975,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3044,6 +3984,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3064,7 +4007,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3081,7 +4024,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3091,6 +4034,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -3107,7 +4053,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3117,6 +4063,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -3153,7 +4102,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3162,6 +4111,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3186,7 +4138,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3195,6 +4147,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3214,6 +4169,9 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3241,7 +4199,7 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3269,7 +4227,7 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3278,6 +4236,9 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3299,7 +4260,7 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3309,6 +4270,9 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3323,6 +4287,82 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeNodeResources) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeNodeResources: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeNodeResources: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Count = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3389,10 +4429,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -3421,6 +4464,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -3439,89 +4485,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/storage/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 1247 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0x4d, 0x6f, 0x1b, 0x45, - 0x18, 0xce, 0xc6, 0xf9, 0x1c, 0x27, 0xad, 0x33, 0x44, 0x60, 0x7c, 0xb0, 0x23, 0x23, 0x68, 0x5a, - 0xb5, 0xeb, 0xb6, 0x2a, 0xa8, 0xaa, 0xc4, 0x21, 0x4e, 0x23, 0xe1, 0xb6, 0x4e, 0xc3, 0x24, 0xaa, - 0x50, 0xc5, 0x81, 0xc9, 0xee, 0x5b, 0x67, 0x1b, 0xef, 0xce, 0x76, 0x76, 0x6c, 0xf0, 0x8d, 0x13, - 0x1c, 0x41, 0x1c, 0xf8, 0x05, 0xfc, 0x05, 0x90, 0xe0, 0xc2, 0x91, 0x9e, 0x50, 0xc5, 0xa9, 0x27, - 0x8b, 0x2e, 0xff, 0xa2, 0xe2, 0x80, 0x66, 0x76, 0xec, 0xfd, 0xb0, 0xdd, 0x38, 0x1c, 0x7c, 0xf3, - 0xbc, 0x1f, 0xcf, 0xfb, 0xf5, 0xcc, 0x3b, 0x6b, 0xb4, 0x7b, 0x7a, 0x3b, 0x30, 0x1d, 0x56, 0x3b, - 0xed, 0x1c, 0x03, 0xf7, 0x40, 0x40, 0x50, 0xeb, 0x82, 0x67, 0x33, 0x5e, 0xd3, 0x0a, 0xea, 0x3b, - 0xb5, 0x40, 0x30, 0x4e, 0x5b, 0x50, 0xeb, 0xde, 0x38, 0x06, 0x41, 0x6f, 0xd4, 0x5a, 0xe0, 0x01, - 0xa7, 0x02, 0x6c, 0xd3, 0xe7, 0x4c, 0x30, 0x5c, 0x8a, 0x6c, 0x4d, 0xea, 0x3b, 0xa6, 0xb6, 0x35, - 0xb5, 0x6d, 0xe9, 0x5a, 0xcb, 0x11, 0x27, 0x9d, 0x63, 0xd3, 0x62, 0x6e, 0xad, 0xc5, 0x5a, 0xac, - 0xa6, 0x5c, 0x8e, 0x3b, 0x4f, 0xd4, 0x49, 0x1d, 0xd4, 0xaf, 0x08, 0xaa, 0x54, 0x4d, 0x84, 0xb5, - 0x18, 0x97, 0x31, 0xb3, 0xe1, 0x4a, 0xb7, 0x62, 0x1b, 0x97, 0x5a, 0x27, 0x8e, 0x07, 0xbc, 0x57, - 0xf3, 0x4f, 0x5b, 0x52, 0x10, 0xd4, 0x5c, 0x10, 0x74, 0x9c, 0x57, 0x6d, 0x92, 0x17, 0xef, 0x78, - 0xc2, 0x71, 0x61, 0xc4, 0xe1, 0xa3, 0xb3, 0x1c, 0x02, 0xeb, 0x04, 0x5c, 0x9a, 0xf5, 0xab, 0xfe, - 0x66, 0xa0, 0xd5, 0xdd, 0xc3, 0xc6, 0x5d, 0xee, 0x74, 0x81, 0xe3, 0x2f, 0xd0, 0x8a, 0xcc, 0xc8, - 0xa6, 0x82, 0x16, 0x8d, 0x2d, 0x63, 0x3b, 0x7f, 0xf3, 0xba, 0x19, 0xb7, 0x6b, 0x08, 0x6c, 0xfa, - 0xa7, 0x2d, 0x29, 0x08, 0x4c, 0x69, 0x6d, 0x76, 0x6f, 0x98, 0x0f, 0x8f, 0x9f, 0x82, 0x25, 0x9a, - 0x20, 0x68, 0x1d, 0x3f, 0xef, 0x57, 0xe6, 0xc2, 0x7e, 0x05, 0xc5, 0x32, 0x32, 0x44, 0xc5, 0xf7, - 0xd1, 0x42, 0xe0, 0x83, 0x55, 0x9c, 0x57, 0xe8, 0x97, 0xcd, 0xc9, 0xc3, 0x30, 0x87, 0x69, 0x1d, - 0xfa, 0x60, 0xd5, 0xd7, 0x34, 0xec, 0x82, 0x3c, 0x11, 0x05, 0x52, 0xfd, 0xd5, 0x40, 0xeb, 0x43, - 0xab, 0x07, 0x4e, 0x20, 0xf0, 0xe7, 0x23, 0x05, 0x98, 0xd3, 0x15, 0x20, 0xbd, 0x55, 0xfa, 0x05, - 0x1d, 0x67, 0x65, 0x20, 0x49, 0x24, 0x7f, 0x0f, 0x2d, 0x3a, 0x02, 0xdc, 0xa0, 0x38, 0xbf, 0x95, - 0xdb, 0xce, 0xdf, 0x7c, 0x7f, 0xaa, 0xec, 0xeb, 0xeb, 0x1a, 0x71, 0xb1, 0x21, 0x7d, 0x49, 0x04, - 0x51, 0xfd, 0x36, 0x99, 0xbb, 0xac, 0x09, 0xdf, 0x41, 0x17, 0xa8, 0x10, 0xd4, 0x3a, 0x21, 0xf0, - 0xac, 0xe3, 0x70, 0xb0, 0x55, 0x05, 0x2b, 0x75, 0x1c, 0xf6, 0x2b, 0x17, 0x76, 0x52, 0x1a, 0x92, - 0xb1, 0x94, 0xbe, 0x3e, 0xb3, 0x1b, 0xde, 0x13, 0xf6, 0xd0, 0x6b, 0xb2, 0x8e, 0x27, 0x54, 0x83, - 0xb5, 0xef, 0x41, 0x4a, 0x43, 0x32, 0x96, 0xd5, 0x5f, 0x0c, 0xb4, 0xbc, 0x7b, 0xd8, 0xd8, 0x67, - 0x36, 0xcc, 0x80, 0x00, 0x8d, 0x14, 0x01, 0x2e, 0x9d, 0xd1, 0x42, 0x99, 0xd4, 0xc4, 0xf1, 0x7f, - 0x17, 0xb5, 0x50, 0xda, 0x68, 0xfe, 0x6e, 0xa1, 0x05, 0x8f, 0xba, 0xa0, 0x52, 0x5f, 0x8d, 0x7d, - 0xf6, 0xa9, 0x0b, 0x44, 0x69, 0xf0, 0x07, 0x68, 0xc9, 0x63, 0x36, 0x34, 0xee, 0xaa, 0x04, 0x56, - 0xeb, 0x17, 0xb4, 0xcd, 0xd2, 0xbe, 0x92, 0x12, 0xad, 0xc5, 0xb7, 0xd0, 0x9a, 0x60, 0x3e, 0x6b, - 0xb3, 0x56, 0xef, 0x3e, 0xf4, 0x82, 0x62, 0x6e, 0x2b, 0xb7, 0xbd, 0x5a, 0x2f, 0x84, 0xfd, 0xca, - 0xda, 0x51, 0x42, 0x4e, 0x52, 0x56, 0xd5, 0x9f, 0x0d, 0x94, 0xd7, 0x19, 0xcd, 0x80, 0x8e, 0x9f, - 0xa4, 0xe9, 0xf8, 0xde, 0x14, 0xbd, 0x9c, 0x40, 0x46, 0x6b, 0x98, 0xb6, 0x62, 0xe2, 0x11, 0x5a, - 0xb6, 0x55, 0x43, 0x83, 0xa2, 0xa1, 0xa0, 0x2f, 0x4f, 0x01, 0xad, 0xd9, 0x7e, 0x51, 0x07, 0x58, - 0x8e, 0xce, 0x01, 0x19, 0x40, 0x55, 0x7f, 0x58, 0x42, 0x6b, 0x87, 0x91, 0xef, 0x6e, 0x9b, 0x06, - 0xc1, 0x0c, 0xc8, 0xf6, 0x21, 0xca, 0xfb, 0x9c, 0x75, 0x9d, 0xc0, 0x61, 0x1e, 0x70, 0x3d, 0xf2, - 0xb7, 0xb4, 0x4b, 0xfe, 0x20, 0x56, 0x91, 0xa4, 0x1d, 0x6e, 0x23, 0xe4, 0x53, 0x4e, 0x5d, 0x10, - 0xb2, 0x05, 0x39, 0xd5, 0x82, 0xdb, 0x6f, 0x6a, 0x41, 0xb2, 0x2c, 0xf3, 0x60, 0xe8, 0xba, 0xe7, - 0x09, 0xde, 0x8b, 0x53, 0x8c, 0x15, 0x24, 0x81, 0x8f, 0x4f, 0xd1, 0x3a, 0x07, 0xab, 0x4d, 0x1d, - 0xf7, 0x80, 0xb5, 0x1d, 0xab, 0x57, 0x5c, 0x50, 0x69, 0xee, 0x85, 0xfd, 0xca, 0x3a, 0x49, 0x2a, - 0x5e, 0xf7, 0x2b, 0xd7, 0x47, 0x5f, 0x1c, 0xf3, 0x00, 0x78, 0xe0, 0x04, 0x02, 0x3c, 0xf1, 0x88, - 0xb5, 0x3b, 0x2e, 0xa4, 0x7c, 0x48, 0x1a, 0x5b, 0xf2, 0xda, 0x95, 0xb7, 0xfe, 0xa1, 0x2f, 0x1c, - 0xe6, 0x05, 0xc5, 0xc5, 0x98, 0xd7, 0xcd, 0x84, 0x9c, 0xa4, 0xac, 0xf0, 0x03, 0xb4, 0x49, 0xdb, - 0x6d, 0xf6, 0x65, 0x14, 0x60, 0xef, 0x2b, 0x9f, 0x7a, 0xb2, 0x55, 0xc5, 0x25, 0xb5, 0x64, 0x8a, - 0x61, 0xbf, 0xb2, 0xb9, 0x33, 0x46, 0x4f, 0xc6, 0x7a, 0xe1, 0xcf, 0xd0, 0x46, 0x57, 0x89, 0xea, - 0x8e, 0x67, 0x3b, 0x5e, 0xab, 0xc9, 0x6c, 0x28, 0x2e, 0xab, 0xa2, 0xaf, 0x84, 0xfd, 0xca, 0xc6, - 0xa3, 0xac, 0xf2, 0xf5, 0x38, 0x21, 0x19, 0x05, 0xc1, 0xcf, 0xd0, 0x86, 0x8a, 0x08, 0xb6, 0xbe, - 0xa4, 0x0e, 0x04, 0xc5, 0x15, 0x35, 0xbf, 0xed, 0xe4, 0xfc, 0x64, 0xeb, 0x24, 0x91, 0x06, 0x57, - 0xf9, 0x10, 0xda, 0x60, 0x09, 0xc6, 0x8f, 0x80, 0xbb, 0xf5, 0x77, 0xf5, 0xbc, 0x36, 0x76, 0xb2, - 0x50, 0x64, 0x14, 0xbd, 0xf4, 0x31, 0xba, 0x98, 0x19, 0x38, 0x2e, 0xa0, 0xdc, 0x29, 0xf4, 0xa2, - 0x25, 0x44, 0xe4, 0x4f, 0xbc, 0x89, 0x16, 0xbb, 0xb4, 0xdd, 0x81, 0x88, 0x81, 0x24, 0x3a, 0xdc, - 0x99, 0xbf, 0x6d, 0x54, 0x7f, 0x37, 0x50, 0x21, 0xc9, 0x9e, 0x19, 0xac, 0x8d, 0x66, 0x7a, 0x6d, - 0x6c, 0x4f, 0x4b, 0xec, 0x09, 0xbb, 0xe3, 0xa7, 0x79, 0x54, 0x88, 0x86, 0x13, 0xbd, 0x51, 0x2e, - 0x78, 0x62, 0x06, 0x57, 0x9b, 0xa4, 0xde, 0x91, 0xeb, 0x6f, 0x2a, 0x22, 0x9b, 0xdd, 0xa4, 0x07, - 0x05, 0x3f, 0x46, 0x4b, 0x81, 0xa0, 0xa2, 0x23, 0xef, 0xbc, 0x44, 0xbd, 0x79, 0x2e, 0x54, 0xe5, - 0x19, 0x3f, 0x28, 0xd1, 0x99, 0x68, 0xc4, 0xea, 0x1f, 0x06, 0xda, 0xcc, 0xba, 0xcc, 0x60, 0xd8, - 0x9f, 0xa6, 0x87, 0x7d, 0xf5, 0x3c, 0x15, 0x4d, 0x18, 0xf8, 0x5f, 0x06, 0x7a, 0x7b, 0xa4, 0x78, - 0xd6, 0xe1, 0x16, 0xc8, 0x3d, 0xe1, 0x67, 0xb6, 0xd1, 0x7e, 0xfc, 0x1e, 0xab, 0x3d, 0x71, 0x30, - 0x46, 0x4f, 0xc6, 0x7a, 0xe1, 0xa7, 0xa8, 0xe0, 0x78, 0x6d, 0xc7, 0x83, 0x48, 0x76, 0x18, 0x8f, - 0x7b, 0xec, 0x65, 0xce, 0x22, 0xab, 0x31, 0x6f, 0x86, 0xfd, 0x4a, 0xa1, 0x91, 0x41, 0x21, 0x23, - 0xb8, 0xd5, 0x3f, 0xc7, 0x8c, 0x47, 0xbd, 0x85, 0x57, 0xd1, 0x4a, 0xf4, 0xad, 0x05, 0x5c, 0x97, - 0x31, 0x6c, 0xf7, 0x8e, 0x96, 0x93, 0xa1, 0x85, 0x62, 0x90, 0x6a, 0x85, 0x4e, 0xf4, 0x7c, 0x0c, - 0x52, 0x9e, 0x09, 0x06, 0xa9, 0x33, 0xd1, 0x88, 0x32, 0x13, 0xf9, 0x71, 0xa2, 0x1a, 0x9a, 0x4b, - 0x67, 0xb2, 0xaf, 0xe5, 0x64, 0x68, 0x51, 0xfd, 0x37, 0x37, 0x66, 0x4a, 0x8a, 0x8a, 0x89, 0x92, - 0x06, 0x9f, 0x98, 0xd9, 0x92, 0xec, 0x61, 0x49, 0x36, 0xfe, 0xd1, 0x40, 0x98, 0x0e, 0x21, 0x9a, - 0x03, 0xaa, 0x46, 0x7c, 0xba, 0x77, 0xfe, 0x1b, 0x62, 0xee, 0x8c, 0x80, 0x45, 0xef, 0x64, 0x49, - 0x27, 0x81, 0x47, 0x0d, 0xc8, 0x98, 0x0c, 0xb0, 0x83, 0xf2, 0x91, 0x74, 0x8f, 0x73, 0xc6, 0xf5, - 0x95, 0xbd, 0x74, 0x76, 0x42, 0xca, 0xbc, 0x5e, 0x96, 0x5f, 0x00, 0x3b, 0xb1, 0xff, 0xeb, 0x7e, - 0x25, 0x9f, 0xd0, 0x93, 0x24, 0xb6, 0x0c, 0x65, 0x43, 0x1c, 0x6a, 0xe1, 0x7f, 0x84, 0xba, 0x0b, - 0x93, 0x43, 0x25, 0xb0, 0x4b, 0x7b, 0xe8, 0x9d, 0x09, 0x0d, 0x3a, 0xd7, 0xbb, 0xf2, 0x8d, 0x81, - 0x92, 0x31, 0xf0, 0x03, 0xb4, 0x20, 0xff, 0x06, 0xea, 0x0d, 0x73, 0x65, 0xba, 0x0d, 0x73, 0xe4, - 0xb8, 0x10, 0x2f, 0x4a, 0x79, 0x22, 0x0a, 0x05, 0x5f, 0x46, 0xcb, 0x2e, 0x04, 0x01, 0x6d, 0xe9, - 0xc8, 0xf1, 0x57, 0x5f, 0x33, 0x12, 0x93, 0x81, 0xbe, 0x7e, 0xed, 0xf9, 0xab, 0xf2, 0xdc, 0x8b, - 0x57, 0xe5, 0xb9, 0x97, 0xaf, 0xca, 0x73, 0x5f, 0x87, 0x65, 0xe3, 0x79, 0x58, 0x36, 0x5e, 0x84, - 0x65, 0xe3, 0x65, 0x58, 0x36, 0xfe, 0x0e, 0xcb, 0xc6, 0xf7, 0xff, 0x94, 0xe7, 0x1e, 0x2f, 0xeb, - 0xbe, 0xfd, 0x17, 0x00, 0x00, 0xff, 0xff, 0xf9, 0xfc, 0xf7, 0xf5, 0xe3, 0x0f, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.proto b/vendor/k8s.io/api/storage/v1beta1/generated.proto index b78d59aa58afa..bb2cf3450b366 100644 --- a/vendor/k8s.io/api/storage/v1beta1/generated.proto +++ b/vendor/k8s.io/api/storage/v1beta1/generated.proto @@ -45,7 +45,7 @@ message CSIDriver { // The driver name must be 63 characters or less, beginning and ending with // an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and // alphanumerics between. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the CSI Driver. @@ -55,7 +55,7 @@ message CSIDriver { // CSIDriverList is a collection of CSIDriver objects. message CSIDriverList { // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -93,8 +93,32 @@ message CSIDriverSpec { // "csi.storage.k8s.io/pod.name": pod.Name // "csi.storage.k8s.io/pod.namespace": pod.Namespace // "csi.storage.k8s.io/pod.uid": string(pod.UID) + // "csi.storage.k8s.io/ephemeral": "true" iff the volume is an ephemeral inline volume + // defined by a CSIVolumeSource, otherwise "false" + // + // "csi.storage.k8s.io/ephemeral" is a new feature in Kubernetes 1.16. It is only + // required for drivers which support both the "Persistent" and "Ephemeral" VolumeLifecycleMode. + // Other drivers can leave pod info disabled and/or ignore this field. + // As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when + // deployed on such a cluster and the deployment determines which mode that is, for example + // via a command line parameter of the driver. // +optional optional bool podInfoOnMount = 2; + + // VolumeLifecycleModes defines what kind of volumes this CSI volume driver supports. + // The default if the list is empty is "Persistent", which is the usage + // defined by the CSI specification and implemented in Kubernetes via the usual + // PV/PVC mechanism. + // The other mode is "Ephemeral". In this mode, volumes are defined inline + // inside the pod spec with CSIVolumeSource and their lifecycle is tied to + // the lifecycle of that pod. A driver has to be aware of this + // because it is only going to get a NodePublishVolume call for such a volume. + // For more information about implementing this mode, see + // https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html + // A driver can support one or more of these modes and + // more modes may be added in the future. + // +optional + repeated string volumeLifecycleModes = 3; } // CSINode holds information about all CSI drivers installed on a node. @@ -144,12 +168,16 @@ message CSINodeDriver { // This can be empty if driver does not support topology. // +optional repeated string topologyKeys = 3; + + // allocatable represents the volume resources of a node that are available for scheduling. + // +optional + optional VolumeNodeResources allocatable = 4; } // CSINodeList is a collection of CSINode objects. message CSINodeList { // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -173,7 +201,7 @@ message CSINodeSpec { // according to etcd is in ObjectMeta.Name. message StorageClass { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -217,7 +245,7 @@ message StorageClass { // StorageClassList is a collection of storage classes. message StorageClassList { // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -231,7 +259,7 @@ message StorageClassList { // VolumeAttachment objects are non-namespaced. message VolumeAttachment { // Standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -249,7 +277,7 @@ message VolumeAttachment { // VolumeAttachmentList is a collection of VolumeAttachment objects. message VolumeAttachmentList { // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -330,3 +358,13 @@ message VolumeError { optional string message = 2; } +// VolumeNodeResources is a set of resource limits for scheduling of volumes. +message VolumeNodeResources { + // Maximum number of unique volumes managed by the CSI driver that can be used on a node. + // A volume that is both attached and mounted on a node is considered to be used once, not twice. + // The same rule applies for a unique volume that is shared among multiple pods on the same node. + // If this field is nil, then the supported number of volumes on this node is unbounded. + // +optional + optional int32 count = 1; +} + diff --git a/vendor/k8s.io/api/storage/v1beta1/types.go b/vendor/k8s.io/api/storage/v1beta1/types.go index cca50d8209571..fa1bae1d8d5d5 100644 --- a/vendor/k8s.io/api/storage/v1beta1/types.go +++ b/vendor/k8s.io/api/storage/v1beta1/types.go @@ -17,7 +17,7 @@ limitations under the License. package v1beta1 import ( - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -33,7 +33,7 @@ import ( type StorageClass struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -80,7 +80,7 @@ type StorageClass struct { type StorageClassList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -115,7 +115,7 @@ type VolumeAttachment struct { metav1.TypeMeta `json:",inline"` // Standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -136,7 +136,7 @@ type VolumeAttachment struct { type VolumeAttachmentList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -239,7 +239,7 @@ type CSIDriver struct { // The driver name must be 63 characters or less, beginning and ending with // an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and // alphanumerics between. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the CSI Driver. @@ -253,7 +253,7 @@ type CSIDriverList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -291,10 +291,59 @@ type CSIDriverSpec struct { // "csi.storage.k8s.io/pod.name": pod.Name // "csi.storage.k8s.io/pod.namespace": pod.Namespace // "csi.storage.k8s.io/pod.uid": string(pod.UID) + // "csi.storage.k8s.io/ephemeral": "true" iff the volume is an ephemeral inline volume + // defined by a CSIVolumeSource, otherwise "false" + // + // "csi.storage.k8s.io/ephemeral" is a new feature in Kubernetes 1.16. It is only + // required for drivers which support both the "Persistent" and "Ephemeral" VolumeLifecycleMode. + // Other drivers can leave pod info disabled and/or ignore this field. + // As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when + // deployed on such a cluster and the deployment determines which mode that is, for example + // via a command line parameter of the driver. // +optional PodInfoOnMount *bool `json:"podInfoOnMount,omitempty" protobuf:"bytes,2,opt,name=podInfoOnMount"` + + // VolumeLifecycleModes defines what kind of volumes this CSI volume driver supports. + // The default if the list is empty is "Persistent", which is the usage + // defined by the CSI specification and implemented in Kubernetes via the usual + // PV/PVC mechanism. + // The other mode is "Ephemeral". In this mode, volumes are defined inline + // inside the pod spec with CSIVolumeSource and their lifecycle is tied to + // the lifecycle of that pod. A driver has to be aware of this + // because it is only going to get a NodePublishVolume call for such a volume. + // For more information about implementing this mode, see + // https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html + // A driver can support one or more of these modes and + // more modes may be added in the future. + // +optional + VolumeLifecycleModes []VolumeLifecycleMode `json:"volumeLifecycleModes,omitempty" protobuf:"bytes,3,opt,name=volumeLifecycleModes"` } +// VolumeLifecycleMode is an enumeration of possible usage modes for a volume +// provided by a CSI driver. More modes may be added in the future. +type VolumeLifecycleMode string + +const ( + // VolumeLifecyclePersistent explicitly confirms that the driver implements + // the full CSI spec. It is the default when CSIDriverSpec.VolumeLifecycleModes is not + // set. Such volumes are managed in Kubernetes via the persistent volume + // claim mechanism and have a lifecycle that is independent of the pods which + // use them. + VolumeLifecyclePersistent VolumeLifecycleMode = "Persistent" + + // VolumeLifecycleEphemeral indicates that the driver can be used for + // ephemeral inline volumes. Such volumes are specified inside the pod + // spec with a CSIVolumeSource and, as far as Kubernetes is concerned, have + // a lifecycle that is tied to the lifecycle of the pod. For example, such + // a volume might contain data that gets created specifically for that pod, + // like secrets. + // But how the volume actually gets created and managed is entirely up to + // the driver. It might also use reference counting to share the same volume + // instance among different pods if the CSIVolumeSource of those pods is + // identical. + VolumeLifecycleEphemeral VolumeLifecycleMode = "Ephemeral" +) + // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -357,6 +406,20 @@ type CSINodeDriver struct { // This can be empty if driver does not support topology. // +optional TopologyKeys []string `json:"topologyKeys" protobuf:"bytes,3,rep,name=topologyKeys"` + + // allocatable represents the volume resources of a node that are available for scheduling. + // +optional + Allocatable *VolumeNodeResources `json:"allocatable,omitempty" protobuf:"bytes,4,opt,name=allocatable"` +} + +// VolumeNodeResources is a set of resource limits for scheduling of volumes. +type VolumeNodeResources struct { + // Maximum number of unique volumes managed by the CSI driver that can be used on a node. + // A volume that is both attached and mounted on a node is considered to be used once, not twice. + // The same rule applies for a unique volume that is shared among multiple pods on the same node. + // If this field is nil, then the supported number of volumes on this node is unbounded. + // +optional + Count *int32 `json:"count,omitempty" protobuf:"varint,1,opt,name=count"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -366,7 +429,7 @@ type CSINodeList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go index ec741ecf7021c..fe80a8e50749c 100644 --- a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go @@ -29,7 +29,7 @@ package v1beta1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CSIDriver = map[string]string{ "": "CSIDriver captures information about a Container Storage Interface (CSI) volume driver deployed on the cluster. CSI drivers do not need to create the CSIDriver object directly. Instead they may use the cluster-driver-registrar sidecar container. When deployed with a CSI driver it automatically creates a CSIDriver object representing the driver. Kubernetes attach detach controller uses this object to determine whether attach is required. Kubelet uses this object to determine whether pod information needs to be passed on mount. CSIDriver objects are non-namespaced.", - "metadata": "Standard object metadata. metadata.Name indicates the name of the CSI driver that this object refers to; it MUST be the same name returned by the CSI GetPluginName() call for that driver. The driver name must be 63 characters or less, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and alphanumerics between. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object metadata. metadata.Name indicates the name of the CSI driver that this object refers to; it MUST be the same name returned by the CSI GetPluginName() call for that driver. The driver name must be 63 characters or less, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and alphanumerics between. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "Specification of the CSI Driver.", } @@ -39,7 +39,7 @@ func (CSIDriver) SwaggerDoc() map[string]string { var map_CSIDriverList = map[string]string{ "": "CSIDriverList is a collection of CSIDriver objects.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "items is the list of CSIDriver", } @@ -48,9 +48,10 @@ func (CSIDriverList) SwaggerDoc() map[string]string { } var map_CSIDriverSpec = map[string]string{ - "": "CSIDriverSpec is the specification of a CSIDriver.", - "attachRequired": "attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.", - "podInfoOnMount": "If set to true, podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID)", + "": "CSIDriverSpec is the specification of a CSIDriver.", + "attachRequired": "attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.", + "podInfoOnMount": "If set to true, podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" iff the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.", + "volumeLifecycleModes": "VolumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.", } func (CSIDriverSpec) SwaggerDoc() map[string]string { @@ -72,6 +73,7 @@ var map_CSINodeDriver = map[string]string{ "name": "This is the name of the CSI driver that this object refers to. This MUST be the same name returned by the CSI GetPluginName() call for that driver.", "nodeID": "nodeID of the node from the driver point of view. This field enables Kubernetes to communicate with storage systems that do not share the same nomenclature for nodes. For example, Kubernetes may refer to a given node as \"node1\", but the storage system may refer to the same node as \"nodeA\". When Kubernetes issues a command to the storage system to attach a volume to a specific node, it can use this field to refer to the node name using the ID that the storage system will understand, e.g. \"nodeA\" instead of \"node1\". This field is required.", "topologyKeys": "topologyKeys is the list of keys supported by the driver. When a driver is initialized on a cluster, it provides a set of topology keys that it understands (e.g. \"company.com/zone\", \"company.com/region\"). When a driver is initialized on a node, it provides the same topology keys along with values. Kubelet will expose these topology keys as labels on its own node object. When Kubernetes does topology aware provisioning, it can use this list to determine which labels it should retrieve from the node object and pass back to the driver. It is possible for different nodes to use different topology keys. This can be empty if driver does not support topology.", + "allocatable": "allocatable represents the volume resources of a node that are available for scheduling.", } func (CSINodeDriver) SwaggerDoc() map[string]string { @@ -80,7 +82,7 @@ func (CSINodeDriver) SwaggerDoc() map[string]string { var map_CSINodeList = map[string]string{ "": "CSINodeList is a collection of CSINode objects.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "items is the list of CSINode", } @@ -99,7 +101,7 @@ func (CSINodeSpec) SwaggerDoc() map[string]string { var map_StorageClass = map[string]string{ "": "StorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned.\n\nStorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "provisioner": "Provisioner indicates the type of the provisioner.", "parameters": "Parameters holds the parameters for the provisioner that should create volumes of this storage class.", "reclaimPolicy": "Dynamically provisioned PersistentVolumes of this storage class are created with this reclaimPolicy. Defaults to Delete.", @@ -115,7 +117,7 @@ func (StorageClass) SwaggerDoc() map[string]string { var map_StorageClassList = map[string]string{ "": "StorageClassList is a collection of storage classes.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is the list of StorageClasses", } @@ -125,7 +127,7 @@ func (StorageClassList) SwaggerDoc() map[string]string { var map_VolumeAttachment = map[string]string{ "": "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.", - "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "Specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", "status": "Status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", } @@ -136,7 +138,7 @@ func (VolumeAttachment) SwaggerDoc() map[string]string { var map_VolumeAttachmentList = map[string]string{ "": "VolumeAttachmentList is a collection of VolumeAttachment objects.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is the list of VolumeAttachments", } @@ -186,4 +188,13 @@ func (VolumeError) SwaggerDoc() map[string]string { return map_VolumeError } +var map_VolumeNodeResources = map[string]string{ + "": "VolumeNodeResources is a set of resource limits for scheduling of volumes.", + "count": "Maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is nil, then the supported number of volumes on this node is unbounded.", +} + +func (VolumeNodeResources) SwaggerDoc() map[string]string { + return map_VolumeNodeResources +} + // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go index 3059423320740..52433fcdf2c77 100644 --- a/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go @@ -98,6 +98,11 @@ func (in *CSIDriverSpec) DeepCopyInto(out *CSIDriverSpec) { *out = new(bool) **out = **in } + if in.VolumeLifecycleModes != nil { + in, out := &in.VolumeLifecycleModes, &out.VolumeLifecycleModes + *out = make([]VolumeLifecycleMode, len(*in)) + copy(*out, *in) + } return } @@ -146,6 +151,11 @@ func (in *CSINodeDriver) DeepCopyInto(out *CSINodeDriver) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.Allocatable != nil { + in, out := &in.Allocatable, &out.Allocatable + *out = new(VolumeNodeResources) + (*in).DeepCopyInto(*out) + } return } @@ -461,3 +471,24 @@ func (in *VolumeError) DeepCopy() *VolumeError { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeNodeResources) DeepCopyInto(out *VolumeNodeResources) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeNodeResources. +func (in *VolumeNodeResources) DeepCopy() *VolumeNodeResources { + if in == nil { + return nil + } + out := new(VolumeNodeResources) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/deepcopy.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/deepcopy.go index 51fb72df3cfc8..3c7ac0060f10f 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/deepcopy.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/deepcopy.go @@ -268,5 +268,21 @@ func (in *JSONSchemaProps) DeepCopy() *JSONSchemaProps { } } + if in.XListMapKeys != nil { + in, out := &in.XListMapKeys, &out.XListMapKeys + *out = make([]string, len(*in)) + copy(*out, *in) + } + + if in.XListType != nil { + in, out := &in.XListType, &out.XListType + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + return out } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/helpers.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/helpers.go index 964a6190baa63..52d6ea8663ee6 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/helpers.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/helpers.go @@ -27,18 +27,19 @@ var swaggerMetadataDescriptions = metav1.ObjectMeta{}.SwaggerDoc() // SetCRDCondition sets the status condition. It either overwrites the existing one or creates a new one. func SetCRDCondition(crd *CustomResourceDefinition, newCondition CustomResourceDefinitionCondition) { + newCondition.LastTransitionTime = metav1.NewTime(time.Now()) + existingCondition := FindCRDCondition(crd, newCondition.Type) if existingCondition == nil { - newCondition.LastTransitionTime = metav1.NewTime(time.Now()) crd.Status.Conditions = append(crd.Status.Conditions, newCondition) return } - if existingCondition.Status != newCondition.Status { - existingCondition.Status = newCondition.Status + if existingCondition.Status != newCondition.Status || existingCondition.LastTransitionTime.IsZero() { existingCondition.LastTransitionTime = newCondition.LastTransitionTime } + existingCondition.Status = newCondition.Status existingCondition.Reason = newCondition.Reason existingCondition.Message = newCondition.Message } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types.go index 9c2798f03076a..8f502e8b15ae4 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types.go @@ -289,6 +289,11 @@ const ( NonStructuralSchema CustomResourceDefinitionConditionType = "NonStructuralSchema" // Terminating means that the CustomResourceDefinition has been deleted and is cleaning up. Terminating CustomResourceDefinitionConditionType = "Terminating" + // KubernetesAPIApprovalPolicyConformant indicates that an API in *.k8s.io or *.kubernetes.io is or is not approved. For CRDs + // outside those groups, this condition will not be set. For CRDs inside those groups, the condition will + // be true if .metadata.annotations["api-approved.kubernetes.io"] is set to a URL, otherwise it will be false. + // See https://github.com/kubernetes/enhancements/pull/1111 for more details. + KubernetesAPIApprovalPolicyConformant CustomResourceDefinitionConditionType = "KubernetesAPIApprovalPolicyConformant" ) // CustomResourceDefinitionCondition contains details for the current condition of this pod. diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go index 7822393462846..77abe9e36d01c 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go @@ -86,6 +86,29 @@ type JSONSchemaProps struct { // - type: string // - ... zero or more XIntOrString bool + + // x-kubernetes-list-map-keys annotates an array with the x-kubernetes-list-type `map` by specifying the keys used + // as the index of the map. + // + // This tag MUST only be used on lists that have the "x-kubernetes-list-type" + // extension set to "map". Also, the values specified for this attribute must + // be a scalar typed field of the child structure (no nesting is supported). + XListMapKeys []string + + // x-kubernetes-list-type annotates an array to further describe its topology. + // This extension must only be used on lists and may have 3 possible values: + // + // 1) `atomic`: the list is treated as a single entity, like a scalar. + // Atomic lists will be entirely replaced when updated. This extension + // may be used on any type of list (struct, scalar, ...). + // 2) `set`: + // Sets are lists that must not have multiple items with the same value. Each + // value must be a scalar (or another atomic type). + // 3) `map`: + // These lists are like maps in that their elements have a non-index key + // used to identify them. Order is preserved upon merge. The map tag + // must only be used on a list with elements of type object. + XListType *string } // JSON represents any valid JSON value. diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/deepcopy.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/deepcopy.go index f67f44181251d..a4560dc5f6c21 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/deepcopy.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/deepcopy.go @@ -244,5 +244,21 @@ func (in *JSONSchemaProps) DeepCopy() *JSONSchemaProps { } } + if in.XListMapKeys != nil { + in, out := &in.XListMapKeys, &out.XListMapKeys + *out = make([]string, len(*in)) + copy(*out, *in) + } + + if in.XListType != nil { + in, out := &in.XListType, &out.XListType + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + return out } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go index 54bedf529b6f7..c28384c22a9b1 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go @@ -17,60 +17,24 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto - - It has these top-level messages: - ConversionRequest - ConversionResponse - ConversionReview - CustomResourceColumnDefinition - CustomResourceConversion - CustomResourceDefinition - CustomResourceDefinitionCondition - CustomResourceDefinitionList - CustomResourceDefinitionNames - CustomResourceDefinitionSpec - CustomResourceDefinitionStatus - CustomResourceDefinitionVersion - CustomResourceSubresourceScale - CustomResourceSubresourceStatus - CustomResourceSubresources - CustomResourceValidation - ExternalDocumentation - JSON - JSONSchemaProps - JSONSchemaPropsOrArray - JSONSchemaPropsOrBool - JSONSchemaPropsOrStringArray - ServiceReference - WebhookClientConfig -*/ package v1beta1 import ( + encoding_binary "encoding/binary" fmt "fmt" - proto "github.com/gogo/protobuf/proto" - - math "math" - - k8s_io_apimachinery_pkg_runtime "k8s.io/apimachinery/pkg/runtime" - - k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" + io "io" + proto "github.com/gogo/protobuf/proto" github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + runtime "k8s.io/apimachinery/pkg/runtime" - encoding_binary "encoding/binary" - - strings "strings" - + math "math" + math_bits "math/bits" reflect "reflect" + strings "strings" - io "io" + k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" ) // Reference imports to suppress errors if they are not otherwise used. @@ -84,129 +48,677 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *ConversionRequest) Reset() { *m = ConversionRequest{} } -func (*ConversionRequest) ProtoMessage() {} -func (*ConversionRequest) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *ConversionRequest) Reset() { *m = ConversionRequest{} } +func (*ConversionRequest) ProtoMessage() {} +func (*ConversionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{0} +} +func (m *ConversionRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConversionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConversionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConversionRequest.Merge(m, src) +} +func (m *ConversionRequest) XXX_Size() int { + return m.Size() +} +func (m *ConversionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ConversionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ConversionRequest proto.InternalMessageInfo + +func (m *ConversionResponse) Reset() { *m = ConversionResponse{} } +func (*ConversionResponse) ProtoMessage() {} +func (*ConversionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{1} +} +func (m *ConversionResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConversionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConversionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConversionResponse.Merge(m, src) +} +func (m *ConversionResponse) XXX_Size() int { + return m.Size() +} +func (m *ConversionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ConversionResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ConversionResponse proto.InternalMessageInfo -func (m *ConversionResponse) Reset() { *m = ConversionResponse{} } -func (*ConversionResponse) ProtoMessage() {} -func (*ConversionResponse) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (m *ConversionReview) Reset() { *m = ConversionReview{} } +func (*ConversionReview) ProtoMessage() {} +func (*ConversionReview) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{2} +} +func (m *ConversionReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConversionReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConversionReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConversionReview.Merge(m, src) +} +func (m *ConversionReview) XXX_Size() int { + return m.Size() +} +func (m *ConversionReview) XXX_DiscardUnknown() { + xxx_messageInfo_ConversionReview.DiscardUnknown(m) +} -func (m *ConversionReview) Reset() { *m = ConversionReview{} } -func (*ConversionReview) ProtoMessage() {} -func (*ConversionReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_ConversionReview proto.InternalMessageInfo func (m *CustomResourceColumnDefinition) Reset() { *m = CustomResourceColumnDefinition{} } func (*CustomResourceColumnDefinition) ProtoMessage() {} func (*CustomResourceColumnDefinition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{3} + return fileDescriptor_98a4cc6918394e53, []int{3} +} +func (m *CustomResourceColumnDefinition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceColumnDefinition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceColumnDefinition) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceColumnDefinition.Merge(m, src) +} +func (m *CustomResourceColumnDefinition) XXX_Size() int { + return m.Size() } +func (m *CustomResourceColumnDefinition) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceColumnDefinition.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceColumnDefinition proto.InternalMessageInfo func (m *CustomResourceConversion) Reset() { *m = CustomResourceConversion{} } func (*CustomResourceConversion) ProtoMessage() {} func (*CustomResourceConversion) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{4} + return fileDescriptor_98a4cc6918394e53, []int{4} +} +func (m *CustomResourceConversion) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceConversion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceConversion) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceConversion.Merge(m, src) +} +func (m *CustomResourceConversion) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceConversion) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceConversion.DiscardUnknown(m) } +var xxx_messageInfo_CustomResourceConversion proto.InternalMessageInfo + func (m *CustomResourceDefinition) Reset() { *m = CustomResourceDefinition{} } func (*CustomResourceDefinition) ProtoMessage() {} func (*CustomResourceDefinition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{5} + return fileDescriptor_98a4cc6918394e53, []int{5} +} +func (m *CustomResourceDefinition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceDefinition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } +func (m *CustomResourceDefinition) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinition.Merge(m, src) +} +func (m *CustomResourceDefinition) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceDefinition) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinition.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceDefinition proto.InternalMessageInfo func (m *CustomResourceDefinitionCondition) Reset() { *m = CustomResourceDefinitionCondition{} } func (*CustomResourceDefinitionCondition) ProtoMessage() {} func (*CustomResourceDefinitionCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{6} + return fileDescriptor_98a4cc6918394e53, []int{6} +} +func (m *CustomResourceDefinitionCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceDefinitionCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinitionCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinitionCondition.Merge(m, src) +} +func (m *CustomResourceDefinitionCondition) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceDefinitionCondition) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinitionCondition.DiscardUnknown(m) } +var xxx_messageInfo_CustomResourceDefinitionCondition proto.InternalMessageInfo + func (m *CustomResourceDefinitionList) Reset() { *m = CustomResourceDefinitionList{} } func (*CustomResourceDefinitionList) ProtoMessage() {} func (*CustomResourceDefinitionList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{7} + return fileDescriptor_98a4cc6918394e53, []int{7} +} +func (m *CustomResourceDefinitionList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceDefinitionList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinitionList) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinitionList.Merge(m, src) +} +func (m *CustomResourceDefinitionList) XXX_Size() int { + return m.Size() } +func (m *CustomResourceDefinitionList) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinitionList.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceDefinitionList proto.InternalMessageInfo func (m *CustomResourceDefinitionNames) Reset() { *m = CustomResourceDefinitionNames{} } func (*CustomResourceDefinitionNames) ProtoMessage() {} func (*CustomResourceDefinitionNames) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{8} + return fileDescriptor_98a4cc6918394e53, []int{8} +} +func (m *CustomResourceDefinitionNames) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceDefinitionNames) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinitionNames) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinitionNames.Merge(m, src) +} +func (m *CustomResourceDefinitionNames) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceDefinitionNames) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinitionNames.DiscardUnknown(m) } +var xxx_messageInfo_CustomResourceDefinitionNames proto.InternalMessageInfo + func (m *CustomResourceDefinitionSpec) Reset() { *m = CustomResourceDefinitionSpec{} } func (*CustomResourceDefinitionSpec) ProtoMessage() {} func (*CustomResourceDefinitionSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{9} + return fileDescriptor_98a4cc6918394e53, []int{9} +} +func (m *CustomResourceDefinitionSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } +func (m *CustomResourceDefinitionSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinitionSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinitionSpec.Merge(m, src) +} +func (m *CustomResourceDefinitionSpec) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceDefinitionSpec) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinitionSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceDefinitionSpec proto.InternalMessageInfo func (m *CustomResourceDefinitionStatus) Reset() { *m = CustomResourceDefinitionStatus{} } func (*CustomResourceDefinitionStatus) ProtoMessage() {} func (*CustomResourceDefinitionStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{10} + return fileDescriptor_98a4cc6918394e53, []int{10} +} +func (m *CustomResourceDefinitionStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceDefinitionStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinitionStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinitionStatus.Merge(m, src) +} +func (m *CustomResourceDefinitionStatus) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceDefinitionStatus) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinitionStatus.DiscardUnknown(m) } +var xxx_messageInfo_CustomResourceDefinitionStatus proto.InternalMessageInfo + func (m *CustomResourceDefinitionVersion) Reset() { *m = CustomResourceDefinitionVersion{} } func (*CustomResourceDefinitionVersion) ProtoMessage() {} func (*CustomResourceDefinitionVersion) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{11} + return fileDescriptor_98a4cc6918394e53, []int{11} +} +func (m *CustomResourceDefinitionVersion) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceDefinitionVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceDefinitionVersion) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceDefinitionVersion.Merge(m, src) +} +func (m *CustomResourceDefinitionVersion) XXX_Size() int { + return m.Size() } +func (m *CustomResourceDefinitionVersion) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceDefinitionVersion.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceDefinitionVersion proto.InternalMessageInfo func (m *CustomResourceSubresourceScale) Reset() { *m = CustomResourceSubresourceScale{} } func (*CustomResourceSubresourceScale) ProtoMessage() {} func (*CustomResourceSubresourceScale) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{12} + return fileDescriptor_98a4cc6918394e53, []int{12} +} +func (m *CustomResourceSubresourceScale) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceSubresourceScale) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceSubresourceScale) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceSubresourceScale.Merge(m, src) +} +func (m *CustomResourceSubresourceScale) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceSubresourceScale) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceSubresourceScale.DiscardUnknown(m) } +var xxx_messageInfo_CustomResourceSubresourceScale proto.InternalMessageInfo + func (m *CustomResourceSubresourceStatus) Reset() { *m = CustomResourceSubresourceStatus{} } func (*CustomResourceSubresourceStatus) ProtoMessage() {} func (*CustomResourceSubresourceStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{13} + return fileDescriptor_98a4cc6918394e53, []int{13} +} +func (m *CustomResourceSubresourceStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceSubresourceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } +func (m *CustomResourceSubresourceStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceSubresourceStatus.Merge(m, src) +} +func (m *CustomResourceSubresourceStatus) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceSubresourceStatus) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceSubresourceStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceSubresourceStatus proto.InternalMessageInfo func (m *CustomResourceSubresources) Reset() { *m = CustomResourceSubresources{} } func (*CustomResourceSubresources) ProtoMessage() {} func (*CustomResourceSubresources) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{14} + return fileDescriptor_98a4cc6918394e53, []int{14} +} +func (m *CustomResourceSubresources) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceSubresources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceSubresources) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceSubresources.Merge(m, src) +} +func (m *CustomResourceSubresources) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceSubresources) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceSubresources.DiscardUnknown(m) } +var xxx_messageInfo_CustomResourceSubresources proto.InternalMessageInfo + func (m *CustomResourceValidation) Reset() { *m = CustomResourceValidation{} } func (*CustomResourceValidation) ProtoMessage() {} func (*CustomResourceValidation) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{15} + return fileDescriptor_98a4cc6918394e53, []int{15} +} +func (m *CustomResourceValidation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomResourceValidation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomResourceValidation) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomResourceValidation.Merge(m, src) +} +func (m *CustomResourceValidation) XXX_Size() int { + return m.Size() +} +func (m *CustomResourceValidation) XXX_DiscardUnknown() { + xxx_messageInfo_CustomResourceValidation.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomResourceValidation proto.InternalMessageInfo + +func (m *ExternalDocumentation) Reset() { *m = ExternalDocumentation{} } +func (*ExternalDocumentation) ProtoMessage() {} +func (*ExternalDocumentation) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{16} +} +func (m *ExternalDocumentation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExternalDocumentation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExternalDocumentation) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExternalDocumentation.Merge(m, src) +} +func (m *ExternalDocumentation) XXX_Size() int { + return m.Size() +} +func (m *ExternalDocumentation) XXX_DiscardUnknown() { + xxx_messageInfo_ExternalDocumentation.DiscardUnknown(m) +} + +var xxx_messageInfo_ExternalDocumentation proto.InternalMessageInfo + +func (m *JSON) Reset() { *m = JSON{} } +func (*JSON) ProtoMessage() {} +func (*JSON) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{17} +} +func (m *JSON) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JSON) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JSON) XXX_Merge(src proto.Message) { + xxx_messageInfo_JSON.Merge(m, src) +} +func (m *JSON) XXX_Size() int { + return m.Size() +} +func (m *JSON) XXX_DiscardUnknown() { + xxx_messageInfo_JSON.DiscardUnknown(m) +} + +var xxx_messageInfo_JSON proto.InternalMessageInfo + +func (m *JSONSchemaProps) Reset() { *m = JSONSchemaProps{} } +func (*JSONSchemaProps) ProtoMessage() {} +func (*JSONSchemaProps) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{18} +} +func (m *JSONSchemaProps) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JSONSchemaProps) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JSONSchemaProps) XXX_Merge(src proto.Message) { + xxx_messageInfo_JSONSchemaProps.Merge(m, src) +} +func (m *JSONSchemaProps) XXX_Size() int { + return m.Size() +} +func (m *JSONSchemaProps) XXX_DiscardUnknown() { + xxx_messageInfo_JSONSchemaProps.DiscardUnknown(m) } -func (m *ExternalDocumentation) Reset() { *m = ExternalDocumentation{} } -func (*ExternalDocumentation) ProtoMessage() {} -func (*ExternalDocumentation) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +var xxx_messageInfo_JSONSchemaProps proto.InternalMessageInfo -func (m *JSON) Reset() { *m = JSON{} } -func (*JSON) ProtoMessage() {} -func (*JSON) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +func (m *JSONSchemaPropsOrArray) Reset() { *m = JSONSchemaPropsOrArray{} } +func (*JSONSchemaPropsOrArray) ProtoMessage() {} +func (*JSONSchemaPropsOrArray) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{19} +} +func (m *JSONSchemaPropsOrArray) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JSONSchemaPropsOrArray) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JSONSchemaPropsOrArray) XXX_Merge(src proto.Message) { + xxx_messageInfo_JSONSchemaPropsOrArray.Merge(m, src) +} +func (m *JSONSchemaPropsOrArray) XXX_Size() int { + return m.Size() +} +func (m *JSONSchemaPropsOrArray) XXX_DiscardUnknown() { + xxx_messageInfo_JSONSchemaPropsOrArray.DiscardUnknown(m) +} -func (m *JSONSchemaProps) Reset() { *m = JSONSchemaProps{} } -func (*JSONSchemaProps) ProtoMessage() {} -func (*JSONSchemaProps) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +var xxx_messageInfo_JSONSchemaPropsOrArray proto.InternalMessageInfo -func (m *JSONSchemaPropsOrArray) Reset() { *m = JSONSchemaPropsOrArray{} } -func (*JSONSchemaPropsOrArray) ProtoMessage() {} -func (*JSONSchemaPropsOrArray) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } +func (m *JSONSchemaPropsOrBool) Reset() { *m = JSONSchemaPropsOrBool{} } +func (*JSONSchemaPropsOrBool) ProtoMessage() {} +func (*JSONSchemaPropsOrBool) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{20} +} +func (m *JSONSchemaPropsOrBool) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JSONSchemaPropsOrBool) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JSONSchemaPropsOrBool) XXX_Merge(src proto.Message) { + xxx_messageInfo_JSONSchemaPropsOrBool.Merge(m, src) +} +func (m *JSONSchemaPropsOrBool) XXX_Size() int { + return m.Size() +} +func (m *JSONSchemaPropsOrBool) XXX_DiscardUnknown() { + xxx_messageInfo_JSONSchemaPropsOrBool.DiscardUnknown(m) +} -func (m *JSONSchemaPropsOrBool) Reset() { *m = JSONSchemaPropsOrBool{} } -func (*JSONSchemaPropsOrBool) ProtoMessage() {} -func (*JSONSchemaPropsOrBool) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } +var xxx_messageInfo_JSONSchemaPropsOrBool proto.InternalMessageInfo func (m *JSONSchemaPropsOrStringArray) Reset() { *m = JSONSchemaPropsOrStringArray{} } func (*JSONSchemaPropsOrStringArray) ProtoMessage() {} func (*JSONSchemaPropsOrStringArray) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{21} + return fileDescriptor_98a4cc6918394e53, []int{21} +} +func (m *JSONSchemaPropsOrStringArray) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JSONSchemaPropsOrStringArray) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JSONSchemaPropsOrStringArray) XXX_Merge(src proto.Message) { + xxx_messageInfo_JSONSchemaPropsOrStringArray.Merge(m, src) +} +func (m *JSONSchemaPropsOrStringArray) XXX_Size() int { + return m.Size() +} +func (m *JSONSchemaPropsOrStringArray) XXX_DiscardUnknown() { + xxx_messageInfo_JSONSchemaPropsOrStringArray.DiscardUnknown(m) } -func (m *ServiceReference) Reset() { *m = ServiceReference{} } -func (*ServiceReference) ProtoMessage() {} -func (*ServiceReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } +var xxx_messageInfo_JSONSchemaPropsOrStringArray proto.InternalMessageInfo -func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } -func (*WebhookClientConfig) ProtoMessage() {} -func (*WebhookClientConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } +func (m *ServiceReference) Reset() { *m = ServiceReference{} } +func (*ServiceReference) ProtoMessage() {} +func (*ServiceReference) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{22} +} +func (m *ServiceReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceReference.Merge(m, src) +} +func (m *ServiceReference) XXX_Size() int { + return m.Size() +} +func (m *ServiceReference) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceReference.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceReference proto.InternalMessageInfo + +func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } +func (*WebhookClientConfig) ProtoMessage() {} +func (*WebhookClientConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_98a4cc6918394e53, []int{23} +} +func (m *WebhookClientConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WebhookClientConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WebhookClientConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_WebhookClientConfig.Merge(m, src) +} +func (m *WebhookClientConfig) XXX_Size() int { + return m.Size() +} +func (m *WebhookClientConfig) XXX_DiscardUnknown() { + xxx_messageInfo_WebhookClientConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_WebhookClientConfig proto.InternalMessageInfo func init() { proto.RegisterType((*ConversionRequest)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.ConversionRequest") @@ -228,16 +740,214 @@ func init() { proto.RegisterType((*ExternalDocumentation)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.ExternalDocumentation") proto.RegisterType((*JSON)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSON") proto.RegisterType((*JSONSchemaProps)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaProps") + proto.RegisterMapType((JSONSchemaDefinitions)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaProps.DefinitionsEntry") + proto.RegisterMapType((JSONSchemaDependencies)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaProps.DependenciesEntry") + proto.RegisterMapType((map[string]JSONSchemaProps)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaProps.PatternPropertiesEntry") + proto.RegisterMapType((map[string]JSONSchemaProps)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaProps.PropertiesEntry") proto.RegisterType((*JSONSchemaPropsOrArray)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaPropsOrArray") proto.RegisterType((*JSONSchemaPropsOrBool)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaPropsOrBool") proto.RegisterType((*JSONSchemaPropsOrStringArray)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaPropsOrStringArray") proto.RegisterType((*ServiceReference)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.ServiceReference") proto.RegisterType((*WebhookClientConfig)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.WebhookClientConfig") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto", fileDescriptor_98a4cc6918394e53) +} + +var fileDescriptor_98a4cc6918394e53 = []byte{ + // 2955 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x5a, 0xcf, 0x73, 0x23, 0x47, + 0xf5, 0xdf, 0x91, 0x2c, 0x5b, 0x6e, 0xdb, 0x6b, 0xbb, 0x77, 0xed, 0xcc, 0x3a, 0x1b, 0xcb, 0xab, + 0x7c, 0xb3, 0x5f, 0x27, 0xec, 0xca, 0xc9, 0x92, 0x90, 0x90, 0x2a, 0x8a, 0xb2, 0x6c, 0x27, 0x38, + 0x59, 0x5b, 0xa6, 0xb5, 0x9b, 0x18, 0xf2, 0xb3, 0xad, 0x69, 0xc9, 0xb3, 0x9e, 0x5f, 0x3b, 0x3d, + 0x23, 0xdb, 0x15, 0xa0, 0xf8, 0x51, 0x29, 0x28, 0x0a, 0x08, 0x45, 0x72, 0xa1, 0x0a, 0x0e, 0x81, + 0xe2, 0xc2, 0x01, 0x0e, 0x70, 0x83, 0x3f, 0x20, 0xc7, 0x14, 0xa7, 0x1c, 0x28, 0x15, 0xab, 0x5c, + 0x39, 0x52, 0x05, 0xe5, 0x13, 0xd5, 0x3f, 0xa6, 0x67, 0x34, 0x92, 0x76, 0x5d, 0x59, 0x29, 0xcb, + 0xcd, 0x7a, 0xbf, 0x3e, 0xaf, 0x5f, 0xbf, 0x7e, 0xfd, 0xfa, 0x8d, 0x41, 0xfd, 0xe0, 0x39, 0x5a, + 0x32, 0xdd, 0x95, 0x83, 0x70, 0x8f, 0xf8, 0x0e, 0x09, 0x08, 0x5d, 0x69, 0x12, 0xc7, 0x70, 0xfd, + 0x15, 0xc9, 0xc0, 0x9e, 0x49, 0x8e, 0x02, 0xe2, 0x50, 0xd3, 0x75, 0xe8, 0x55, 0xec, 0x99, 0x94, + 0xf8, 0x4d, 0xe2, 0xaf, 0x78, 0x07, 0x0d, 0xc6, 0xa3, 0x9d, 0x02, 0x2b, 0xcd, 0xa7, 0xf6, 0x48, + 0x80, 0x9f, 0x5a, 0x69, 0x10, 0x87, 0xf8, 0x38, 0x20, 0x46, 0xc9, 0xf3, 0xdd, 0xc0, 0x85, 0x5f, + 0x11, 0xe6, 0x4a, 0x1d, 0xd2, 0x6f, 0x29, 0x73, 0x25, 0xef, 0xa0, 0xc1, 0x78, 0xb4, 0x53, 0xa0, + 0x24, 0xcd, 0x2d, 0x5c, 0x6d, 0x98, 0xc1, 0x7e, 0xb8, 0x57, 0xaa, 0xb9, 0xf6, 0x4a, 0xc3, 0x6d, + 0xb8, 0x2b, 0xdc, 0xea, 0x5e, 0x58, 0xe7, 0xbf, 0xf8, 0x0f, 0xfe, 0x97, 0x40, 0x5b, 0x78, 0x3a, + 0x76, 0xde, 0xc6, 0xb5, 0x7d, 0xd3, 0x21, 0xfe, 0x71, 0xec, 0xb1, 0x4d, 0x02, 0xbc, 0xd2, 0xec, + 0xf2, 0x71, 0x61, 0xa5, 0x9f, 0x96, 0x1f, 0x3a, 0x81, 0x69, 0x93, 0x2e, 0x85, 0x2f, 0xdd, 0x4b, + 0x81, 0xd6, 0xf6, 0x89, 0x8d, 0xd3, 0x7a, 0xc5, 0x13, 0x0d, 0xcc, 0xae, 0xb9, 0x4e, 0x93, 0xf8, + 0x6c, 0x95, 0x88, 0xdc, 0x0e, 0x09, 0x0d, 0x60, 0x19, 0x64, 0x43, 0xd3, 0xd0, 0xb5, 0x25, 0x6d, + 0x79, 0xbc, 0xfc, 0xe4, 0x47, 0xad, 0xc2, 0x99, 0x76, 0xab, 0x90, 0xbd, 0xb9, 0xb9, 0x7e, 0xd2, + 0x2a, 0x5c, 0xea, 0x87, 0x14, 0x1c, 0x7b, 0x84, 0x96, 0x6e, 0x6e, 0xae, 0x23, 0xa6, 0x0c, 0x5f, + 0x04, 0xb3, 0x06, 0xa1, 0xa6, 0x4f, 0x8c, 0xd5, 0x9d, 0xcd, 0x57, 0x84, 0x7d, 0x3d, 0xc3, 0x2d, + 0x5e, 0x90, 0x16, 0x67, 0xd7, 0xd3, 0x02, 0xa8, 0x5b, 0x07, 0xee, 0x82, 0x31, 0x77, 0xef, 0x16, + 0xa9, 0x05, 0x54, 0xcf, 0x2e, 0x65, 0x97, 0x27, 0xae, 0x5d, 0x2d, 0xc5, 0x3b, 0xa8, 0x5c, 0xe0, + 0xdb, 0x26, 0x17, 0x5b, 0x42, 0xf8, 0x70, 0x23, 0xda, 0xb9, 0xf2, 0xb4, 0x44, 0x1b, 0xab, 0x08, + 0x2b, 0x28, 0x32, 0x57, 0xfc, 0x6d, 0x06, 0xc0, 0xe4, 0xe2, 0xa9, 0xe7, 0x3a, 0x94, 0x0c, 0x64, + 0xf5, 0x14, 0xcc, 0xd4, 0xb8, 0xe5, 0x80, 0x18, 0x12, 0x57, 0xcf, 0x7c, 0x16, 0xef, 0x75, 0x89, + 0x3f, 0xb3, 0x96, 0x32, 0x87, 0xba, 0x00, 0xe0, 0x0d, 0x30, 0xea, 0x13, 0x1a, 0x5a, 0x81, 0x9e, + 0x5d, 0xd2, 0x96, 0x27, 0xae, 0x5d, 0xe9, 0x0b, 0xc5, 0xf3, 0x9b, 0x25, 0x5f, 0xa9, 0xf9, 0x54, + 0xa9, 0x1a, 0xe0, 0x20, 0xa4, 0xe5, 0xb3, 0x12, 0x69, 0x14, 0x71, 0x1b, 0x48, 0xda, 0x2a, 0xfe, + 0x28, 0x03, 0x66, 0x92, 0x51, 0x6a, 0x9a, 0xe4, 0x10, 0x1e, 0x82, 0x31, 0x5f, 0x24, 0x0b, 0x8f, + 0xd3, 0xc4, 0xb5, 0x9d, 0xd2, 0x7d, 0x1d, 0xab, 0x52, 0x57, 0x12, 0x96, 0x27, 0xd8, 0x9e, 0xc9, + 0x1f, 0x28, 0x42, 0x83, 0xef, 0x80, 0xbc, 0x2f, 0x37, 0x8a, 0x67, 0xd3, 0xc4, 0xb5, 0xaf, 0x0f, + 0x10, 0x59, 0x18, 0x2e, 0x4f, 0xb6, 0x5b, 0x85, 0x7c, 0xf4, 0x0b, 0x29, 0xc0, 0xe2, 0xfb, 0x19, + 0xb0, 0xb8, 0x16, 0xd2, 0xc0, 0xb5, 0x11, 0xa1, 0x6e, 0xe8, 0xd7, 0xc8, 0x9a, 0x6b, 0x85, 0xb6, + 0xb3, 0x4e, 0xea, 0xa6, 0x63, 0x06, 0x2c, 0x5b, 0x97, 0xc0, 0x88, 0x83, 0x6d, 0x22, 0xb3, 0x67, + 0x52, 0xc6, 0x74, 0x64, 0x1b, 0xdb, 0x04, 0x71, 0x0e, 0x93, 0x60, 0xc9, 0x22, 0xcf, 0x82, 0x92, + 0xb8, 0x71, 0xec, 0x11, 0xc4, 0x39, 0xf0, 0x32, 0x18, 0xad, 0xbb, 0xbe, 0x8d, 0xc5, 0x3e, 0x8e, + 0xc7, 0x3b, 0xf3, 0x02, 0xa7, 0x22, 0xc9, 0x85, 0xcf, 0x80, 0x09, 0x83, 0xd0, 0x9a, 0x6f, 0x7a, + 0x0c, 0x5a, 0x1f, 0xe1, 0xc2, 0xe7, 0xa4, 0xf0, 0xc4, 0x7a, 0xcc, 0x42, 0x49, 0x39, 0x78, 0x05, + 0xe4, 0x3d, 0xdf, 0x74, 0x7d, 0x33, 0x38, 0xd6, 0x73, 0x4b, 0xda, 0x72, 0xae, 0x3c, 0x23, 0x75, + 0xf2, 0x3b, 0x92, 0x8e, 0x94, 0x04, 0x5c, 0x02, 0xf9, 0x97, 0xaa, 0x95, 0xed, 0x1d, 0x1c, 0xec, + 0xeb, 0xa3, 0x1c, 0x61, 0x84, 0x49, 0xa3, 0xfc, 0x2d, 0x49, 0x2d, 0xfe, 0x3d, 0x03, 0xf4, 0x74, + 0x54, 0xa2, 0x90, 0xc2, 0x17, 0x40, 0x9e, 0x06, 0xac, 0xe2, 0x34, 0x8e, 0x65, 0x4c, 0x9e, 0x88, + 0xc0, 0xaa, 0x92, 0x7e, 0xd2, 0x2a, 0xcc, 0xc7, 0x1a, 0x11, 0x95, 0xc7, 0x43, 0xe9, 0xc2, 0x5f, + 0x6b, 0xe0, 0xdc, 0x21, 0xd9, 0xdb, 0x77, 0xdd, 0x83, 0x35, 0xcb, 0x24, 0x4e, 0xb0, 0xe6, 0x3a, + 0x75, 0xb3, 0x21, 0x73, 0x00, 0xdd, 0x67, 0x0e, 0xbc, 0xda, 0x6d, 0xb9, 0xfc, 0x50, 0xbb, 0x55, + 0x38, 0xd7, 0x83, 0x81, 0x7a, 0xf9, 0x01, 0x77, 0x81, 0x5e, 0x4b, 0x1d, 0x12, 0x59, 0xc0, 0x44, + 0xd9, 0x1a, 0x2f, 0x5f, 0x6c, 0xb7, 0x0a, 0xfa, 0x5a, 0x1f, 0x19, 0xd4, 0x57, 0xbb, 0xf8, 0x83, + 0x6c, 0x3a, 0xbc, 0x89, 0x74, 0x7b, 0x1b, 0xe4, 0xd9, 0x31, 0x36, 0x70, 0x80, 0xe5, 0x41, 0x7c, + 0xf2, 0x74, 0x87, 0x5e, 0xd4, 0x8c, 0x2d, 0x12, 0xe0, 0x32, 0x94, 0x1b, 0x02, 0x62, 0x1a, 0x52, + 0x56, 0xe1, 0xb7, 0xc1, 0x08, 0xf5, 0x48, 0x4d, 0x06, 0xfa, 0xb5, 0xfb, 0x3d, 0x6c, 0x7d, 0x16, + 0x52, 0xf5, 0x48, 0x2d, 0x3e, 0x0b, 0xec, 0x17, 0xe2, 0xb0, 0xf0, 0x5d, 0x0d, 0x8c, 0x52, 0x5e, + 0xa0, 0x64, 0x51, 0x7b, 0x63, 0x58, 0x1e, 0xa4, 0xaa, 0xa0, 0xf8, 0x8d, 0x24, 0x78, 0xf1, 0x5f, + 0x19, 0x70, 0xa9, 0x9f, 0xea, 0x9a, 0xeb, 0x18, 0x62, 0x3b, 0x36, 0xe5, 0xd9, 0x16, 0x99, 0xfe, + 0x4c, 0xf2, 0x6c, 0x9f, 0xb4, 0x0a, 0x8f, 0xdd, 0xd3, 0x40, 0xa2, 0x08, 0x7c, 0x59, 0xad, 0x5b, + 0x14, 0x8a, 0x4b, 0x9d, 0x8e, 0x9d, 0xb4, 0x0a, 0xd3, 0x4a, 0xad, 0xd3, 0x57, 0xd8, 0x04, 0xd0, + 0xc2, 0x34, 0xb8, 0xe1, 0x63, 0x87, 0x0a, 0xb3, 0xa6, 0x4d, 0x64, 0xf8, 0x9e, 0x38, 0x5d, 0x7a, + 0x30, 0x8d, 0xf2, 0x82, 0x84, 0x84, 0xd7, 0xbb, 0xac, 0xa1, 0x1e, 0x08, 0xac, 0x6e, 0xf9, 0x04, + 0x53, 0x55, 0x8a, 0x12, 0x37, 0x0a, 0xa3, 0x22, 0xc9, 0x85, 0x8f, 0x83, 0x31, 0x9b, 0x50, 0x8a, + 0x1b, 0x84, 0xd7, 0x9f, 0xf1, 0xf8, 0x8a, 0xde, 0x12, 0x64, 0x14, 0xf1, 0x59, 0x7f, 0x72, 0xb1, + 0x5f, 0xd4, 0xae, 0x9b, 0x34, 0x80, 0xaf, 0x77, 0x1d, 0x80, 0xd2, 0xe9, 0x56, 0xc8, 0xb4, 0x79, + 0xfa, 0xab, 0xe2, 0x17, 0x51, 0x12, 0xc9, 0xff, 0x2d, 0x90, 0x33, 0x03, 0x62, 0x47, 0x77, 0xf7, + 0xab, 0x43, 0xca, 0xbd, 0xf2, 0x94, 0xf4, 0x21, 0xb7, 0xc9, 0xd0, 0x90, 0x00, 0x2d, 0xfe, 0x2e, + 0x03, 0x1e, 0xe9, 0xa7, 0xc2, 0x2e, 0x14, 0xca, 0x22, 0xee, 0x59, 0xa1, 0x8f, 0x2d, 0x99, 0x71, + 0x2a, 0xe2, 0x3b, 0x9c, 0x8a, 0x24, 0x97, 0x95, 0x7c, 0x6a, 0x3a, 0x8d, 0xd0, 0xc2, 0xbe, 0x4c, + 0x27, 0xb5, 0xea, 0xaa, 0xa4, 0x23, 0x25, 0x01, 0x4b, 0x00, 0xd0, 0x7d, 0xd7, 0x0f, 0x38, 0x86, + 0xac, 0x5e, 0x67, 0x59, 0x81, 0xa8, 0x2a, 0x2a, 0x4a, 0x48, 0xb0, 0x1b, 0xed, 0xc0, 0x74, 0x0c, + 0xb9, 0xeb, 0xea, 0x14, 0xbf, 0x6c, 0x3a, 0x06, 0xe2, 0x1c, 0x86, 0x6f, 0x99, 0x34, 0x60, 0x14, + 0xb9, 0xe5, 0x1d, 0x51, 0xe7, 0x92, 0x4a, 0x82, 0xe1, 0xd7, 0x58, 0xd5, 0x77, 0x7d, 0x93, 0x50, + 0x7d, 0x34, 0xc6, 0x5f, 0x53, 0x54, 0x94, 0x90, 0x28, 0xfe, 0x33, 0xdf, 0x3f, 0x49, 0x58, 0x29, + 0x81, 0x8f, 0x82, 0x5c, 0xc3, 0x77, 0x43, 0x4f, 0x46, 0x49, 0x45, 0xfb, 0x45, 0x46, 0x44, 0x82, + 0xc7, 0xb2, 0xb2, 0xd9, 0xd1, 0xa6, 0xaa, 0xac, 0x8c, 0x9a, 0xd3, 0x88, 0x0f, 0xbf, 0xa7, 0x81, + 0x9c, 0x23, 0x83, 0xc3, 0x52, 0xee, 0xf5, 0x21, 0xe5, 0x05, 0x0f, 0x6f, 0xec, 0xae, 0x88, 0xbc, + 0x40, 0x86, 0x4f, 0x83, 0x1c, 0xad, 0xb9, 0x1e, 0x91, 0x51, 0x5f, 0x8c, 0x84, 0xaa, 0x8c, 0x78, + 0xd2, 0x2a, 0x4c, 0x45, 0xe6, 0x38, 0x01, 0x09, 0x61, 0xf8, 0x43, 0x0d, 0x80, 0x26, 0xb6, 0x4c, + 0x03, 0xf3, 0x96, 0x21, 0xc7, 0xdd, 0x1f, 0x6c, 0x5a, 0xbf, 0xa2, 0xcc, 0x8b, 0x4d, 0x8b, 0x7f, + 0xa3, 0x04, 0x34, 0x7c, 0x4f, 0x03, 0x93, 0x34, 0xdc, 0xf3, 0xa5, 0x16, 0xe5, 0xcd, 0xc5, 0xc4, + 0xb5, 0x6f, 0x0c, 0xd4, 0x97, 0x6a, 0x02, 0xa0, 0x3c, 0xd3, 0x6e, 0x15, 0x26, 0x93, 0x14, 0xd4, + 0xe1, 0x00, 0xfc, 0x89, 0x06, 0xf2, 0xcd, 0xe8, 0xce, 0x1e, 0xe3, 0x07, 0xfe, 0xcd, 0x21, 0x6d, + 0xac, 0xcc, 0xa8, 0xf8, 0x14, 0xa8, 0x3e, 0x40, 0x79, 0x00, 0xff, 0xa2, 0x01, 0x1d, 0x1b, 0xa2, + 0xc0, 0x63, 0x6b, 0xc7, 0x37, 0x9d, 0x80, 0xf8, 0xa2, 0xdf, 0xa4, 0x7a, 0x9e, 0xbb, 0x37, 0xd8, + 0xbb, 0x30, 0xdd, 0xcb, 0x96, 0x97, 0xa4, 0x77, 0xfa, 0x6a, 0x1f, 0x37, 0x50, 0x5f, 0x07, 0x79, + 0xa2, 0xc5, 0x2d, 0x8d, 0x3e, 0x3e, 0x84, 0x44, 0x8b, 0x7b, 0x29, 0x59, 0x1d, 0xe2, 0x0e, 0x2a, + 0x01, 0x0d, 0x2b, 0x60, 0xce, 0xf3, 0x09, 0x07, 0xb8, 0xe9, 0x1c, 0x38, 0xee, 0xa1, 0xf3, 0x82, + 0x49, 0x2c, 0x83, 0xea, 0x60, 0x49, 0x5b, 0xce, 0x97, 0x2f, 0xb4, 0x5b, 0x85, 0xb9, 0x9d, 0x5e, + 0x02, 0xa8, 0xb7, 0x5e, 0xf1, 0xbd, 0x6c, 0xfa, 0x15, 0x90, 0xee, 0x22, 0xe0, 0x07, 0x62, 0xf5, + 0x22, 0x36, 0x54, 0xd7, 0xf8, 0x6e, 0xbd, 0x3d, 0xa4, 0x64, 0x52, 0x6d, 0x40, 0xdc, 0xc9, 0x29, + 0x12, 0x45, 0x09, 0x3f, 0xe0, 0x2f, 0x35, 0x30, 0x85, 0x6b, 0x35, 0xe2, 0x05, 0xc4, 0x10, 0xc5, + 0x3d, 0xf3, 0x39, 0xd4, 0xaf, 0x39, 0xe9, 0xd5, 0xd4, 0x6a, 0x12, 0x1a, 0x75, 0x7a, 0x02, 0x9f, + 0x07, 0x67, 0x69, 0xe0, 0xfa, 0xc4, 0x48, 0xb5, 0xcd, 0xb0, 0xdd, 0x2a, 0x9c, 0xad, 0x76, 0x70, + 0x50, 0x4a, 0xb2, 0xf8, 0xe9, 0x08, 0x28, 0xdc, 0xe3, 0xa8, 0x9d, 0xe2, 0x61, 0x76, 0x19, 0x8c, + 0xf2, 0xe5, 0x1a, 0x3c, 0x2a, 0xf9, 0x44, 0x2b, 0xc8, 0xa9, 0x48, 0x72, 0xd9, 0x45, 0xc1, 0xf0, + 0x59, 0xfb, 0x92, 0xe5, 0x82, 0xea, 0xa2, 0xa8, 0x0a, 0x32, 0x8a, 0xf8, 0xf0, 0x1d, 0x30, 0x2a, + 0x06, 0x2f, 0xbc, 0x4a, 0x0f, 0xb1, 0xd2, 0x02, 0xee, 0x27, 0x87, 0x42, 0x12, 0xb2, 0xbb, 0xc2, + 0xe6, 0x1e, 0x74, 0x85, 0xbd, 0x6b, 0x49, 0x1b, 0xfd, 0x1f, 0x2f, 0x69, 0xc5, 0x7f, 0x6b, 0xe9, + 0x73, 0x9f, 0x58, 0x6a, 0xb5, 0x86, 0x2d, 0x02, 0xd7, 0xc1, 0x0c, 0x7b, 0xb5, 0x20, 0xe2, 0x59, + 0x66, 0x0d, 0x53, 0xfe, 0x68, 0x16, 0x09, 0xa7, 0xe6, 0x38, 0xd5, 0x14, 0x1f, 0x75, 0x69, 0xc0, + 0x97, 0x00, 0x14, 0x9d, 0x7c, 0x87, 0x1d, 0xd1, 0x94, 0xa8, 0x9e, 0xbc, 0xda, 0x25, 0x81, 0x7a, + 0x68, 0xc1, 0x35, 0x30, 0x6b, 0xe1, 0x3d, 0x62, 0x55, 0x89, 0x45, 0x6a, 0x81, 0xeb, 0x73, 0x53, + 0x62, 0xac, 0x30, 0xd7, 0x6e, 0x15, 0x66, 0xaf, 0xa7, 0x99, 0xa8, 0x5b, 0xbe, 0x78, 0x29, 0x7d, + 0xbc, 0x92, 0x0b, 0x17, 0xef, 0xa3, 0x0f, 0x33, 0x60, 0xa1, 0x7f, 0x66, 0xc0, 0xef, 0xc7, 0xcf, + 0x38, 0xd1, 0xa5, 0xbf, 0x39, 0xac, 0x2c, 0x94, 0xef, 0x38, 0xd0, 0xfd, 0x86, 0x83, 0xdf, 0x61, + 0x2d, 0x13, 0xb6, 0xa2, 0xc1, 0xd1, 0x1b, 0x43, 0x73, 0x81, 0x81, 0x94, 0xc7, 0x45, 0x37, 0x86, + 0x2d, 0xde, 0x7c, 0x61, 0x8b, 0x14, 0x7f, 0xaf, 0xa5, 0x5f, 0xf2, 0xf1, 0x09, 0x86, 0x3f, 0xd5, + 0xc0, 0xb4, 0xeb, 0x11, 0x67, 0x75, 0x67, 0xf3, 0x95, 0x2f, 0x8a, 0x93, 0x2c, 0x43, 0xb5, 0x7d, + 0x9f, 0x7e, 0xbe, 0x54, 0xad, 0x6c, 0x0b, 0x83, 0x3b, 0xbe, 0xeb, 0xd1, 0xf2, 0xb9, 0x76, 0xab, + 0x30, 0x5d, 0xe9, 0x84, 0x42, 0x69, 0xec, 0xa2, 0x0d, 0xe6, 0x36, 0x8e, 0x02, 0xe2, 0x3b, 0xd8, + 0x5a, 0x77, 0x6b, 0xa1, 0x4d, 0x9c, 0x40, 0x38, 0x9a, 0x9a, 0x3a, 0x69, 0xa7, 0x9c, 0x3a, 0x3d, + 0x02, 0xb2, 0xa1, 0x6f, 0xc9, 0x2c, 0x9e, 0x50, 0x53, 0x55, 0x74, 0x1d, 0x31, 0x7a, 0xf1, 0x12, + 0x18, 0x61, 0x7e, 0xc2, 0x0b, 0x20, 0xeb, 0xe3, 0x43, 0x6e, 0x75, 0xb2, 0x3c, 0xc6, 0x44, 0x10, + 0x3e, 0x44, 0x8c, 0x56, 0xfc, 0x4f, 0x01, 0x4c, 0xa7, 0xd6, 0x02, 0x17, 0x40, 0x46, 0x8d, 0x6a, + 0x81, 0x34, 0x9a, 0xd9, 0x5c, 0x47, 0x19, 0xd3, 0x80, 0xcf, 0xaa, 0xe2, 0x2b, 0x40, 0x0b, 0xaa, + 0x9e, 0x73, 0x2a, 0xeb, 0x91, 0x63, 0x73, 0xcc, 0x91, 0xa8, 0x70, 0x32, 0x1f, 0x48, 0x5d, 0x9e, + 0x12, 0xe1, 0x03, 0xa9, 0x23, 0x46, 0xfb, 0xac, 0x23, 0xb7, 0x68, 0xe6, 0x97, 0x3b, 0xc5, 0xcc, + 0x6f, 0xf4, 0xae, 0x33, 0xbf, 0x47, 0x41, 0x2e, 0x30, 0x03, 0x8b, 0xe8, 0x63, 0x9d, 0x4f, 0x99, + 0x1b, 0x8c, 0x88, 0x04, 0x0f, 0xde, 0x02, 0x63, 0x06, 0xa9, 0xe3, 0xd0, 0x0a, 0xf4, 0x3c, 0x4f, + 0xa1, 0xb5, 0x01, 0xa4, 0x90, 0x18, 0xc8, 0xae, 0x0b, 0xbb, 0x28, 0x02, 0x80, 0x8f, 0x81, 0x31, + 0x1b, 0x1f, 0x99, 0x76, 0x68, 0xf3, 0x26, 0x4f, 0x13, 0x62, 0x5b, 0x82, 0x84, 0x22, 0x1e, 0xab, + 0x8c, 0xe4, 0xa8, 0x66, 0x85, 0xd4, 0x6c, 0x12, 0xc9, 0x94, 0x0d, 0x98, 0xaa, 0x8c, 0x1b, 0x29, + 0x3e, 0xea, 0xd2, 0xe0, 0x60, 0xa6, 0xc3, 0x95, 0x27, 0x12, 0x60, 0x82, 0x84, 0x22, 0x5e, 0x27, + 0x98, 0x94, 0x9f, 0xec, 0x07, 0x26, 0x95, 0xbb, 0x34, 0xe0, 0x17, 0xc0, 0xb8, 0x8d, 0x8f, 0xae, + 0x13, 0xa7, 0x11, 0xec, 0xeb, 0x53, 0x4b, 0xda, 0x72, 0xb6, 0x3c, 0xd5, 0x6e, 0x15, 0xc6, 0xb7, + 0x22, 0x22, 0x8a, 0xf9, 0x5c, 0xd8, 0x74, 0xa4, 0xf0, 0xd9, 0x84, 0x70, 0x44, 0x44, 0x31, 0x9f, + 0x75, 0x10, 0x1e, 0x0e, 0xd8, 0xe1, 0xd2, 0xa7, 0x3b, 0x9f, 0x9a, 0x3b, 0x82, 0x8c, 0x22, 0x3e, + 0x5c, 0x06, 0x79, 0x1b, 0x1f, 0xf1, 0xb1, 0x80, 0x3e, 0xc3, 0xcd, 0xf2, 0xe1, 0xf4, 0x96, 0xa4, + 0x21, 0xc5, 0xe5, 0x92, 0xa6, 0x23, 0x24, 0x67, 0x13, 0x92, 0x92, 0x86, 0x14, 0x97, 0x25, 0x71, + 0xe8, 0x98, 0xb7, 0x43, 0x22, 0x84, 0x21, 0x8f, 0x8c, 0x4a, 0xe2, 0x9b, 0x31, 0x0b, 0x25, 0xe5, + 0xd8, 0xb3, 0xdc, 0x0e, 0xad, 0xc0, 0xf4, 0x2c, 0x52, 0xa9, 0xeb, 0xe7, 0x78, 0xfc, 0x79, 0xe3, + 0xbd, 0xa5, 0xa8, 0x28, 0x21, 0x01, 0x09, 0x18, 0x21, 0x4e, 0x68, 0xeb, 0xe7, 0xf9, 0xc5, 0x3e, + 0x90, 0x14, 0x54, 0x27, 0x67, 0xc3, 0x09, 0x6d, 0xc4, 0xcd, 0xc3, 0x67, 0xc1, 0x94, 0x8d, 0x8f, + 0x58, 0x39, 0x20, 0x7e, 0x60, 0x12, 0xaa, 0xcf, 0xf1, 0xc5, 0xcf, 0xb2, 0x8e, 0x73, 0x2b, 0xc9, + 0x40, 0x9d, 0x72, 0x5c, 0xd1, 0x74, 0x12, 0x8a, 0xf3, 0x09, 0xc5, 0x24, 0x03, 0x75, 0xca, 0xb1, + 0x48, 0xfb, 0xe4, 0x76, 0x68, 0xfa, 0xc4, 0xd0, 0x1f, 0xe2, 0x4d, 0xaa, 0xfc, 0x60, 0x20, 0x68, + 0x48, 0x71, 0x61, 0x33, 0x9a, 0x1f, 0xe9, 0xfc, 0x18, 0xde, 0x1c, 0x6c, 0x25, 0xaf, 0xf8, 0xab, + 0xbe, 0x8f, 0x8f, 0xc5, 0x4d, 0x93, 0x9c, 0x1c, 0x41, 0x0a, 0x72, 0xd8, 0xb2, 0x2a, 0x75, 0xfd, + 0x02, 0x8f, 0xfd, 0xa0, 0x6f, 0x10, 0x55, 0x75, 0x56, 0x19, 0x08, 0x12, 0x58, 0x0c, 0xd4, 0x75, + 0x58, 0x6a, 0x2c, 0x0c, 0x17, 0xb4, 0xc2, 0x40, 0x90, 0xc0, 0xe2, 0x2b, 0x75, 0x8e, 0x2b, 0x75, + 0xfd, 0xe1, 0x21, 0xaf, 0x94, 0x81, 0x20, 0x81, 0x05, 0x4d, 0x90, 0x75, 0xdc, 0x40, 0xbf, 0x38, + 0x94, 0xeb, 0x99, 0x5f, 0x38, 0xdb, 0x6e, 0x80, 0x18, 0x06, 0xfc, 0x85, 0x06, 0x80, 0x17, 0xa7, + 0xe8, 0x23, 0x03, 0x19, 0x4b, 0xa4, 0x20, 0x4b, 0x71, 0x6e, 0x6f, 0x38, 0x81, 0x7f, 0x1c, 0xbf, + 0x23, 0x13, 0x67, 0x20, 0xe1, 0x05, 0xfc, 0x8d, 0x06, 0xce, 0x27, 0xdb, 0x64, 0xe5, 0xde, 0x22, + 0x8f, 0xc8, 0x8d, 0x41, 0xa7, 0x79, 0xd9, 0x75, 0xad, 0xb2, 0xde, 0x6e, 0x15, 0xce, 0xaf, 0xf6, + 0x40, 0x45, 0x3d, 0x7d, 0x81, 0x7f, 0xd0, 0xc0, 0xac, 0xac, 0xa2, 0x09, 0x0f, 0x0b, 0x3c, 0x80, + 0x64, 0xd0, 0x01, 0x4c, 0xe3, 0x88, 0x38, 0xaa, 0x0f, 0xdd, 0x5d, 0x7c, 0xd4, 0xed, 0x1a, 0xfc, + 0xb3, 0x06, 0x26, 0x0d, 0xe2, 0x11, 0xc7, 0x20, 0x4e, 0x8d, 0xf9, 0xba, 0x34, 0x90, 0xb1, 0x41, + 0xda, 0xd7, 0xf5, 0x04, 0x84, 0x70, 0xb3, 0x24, 0xdd, 0x9c, 0x4c, 0xb2, 0x4e, 0x5a, 0x85, 0xf9, + 0x58, 0x35, 0xc9, 0x41, 0x1d, 0x5e, 0xc2, 0xf7, 0x35, 0x30, 0x1d, 0x6f, 0x80, 0xb8, 0x52, 0x2e, + 0x0d, 0x31, 0x0f, 0x78, 0xfb, 0xba, 0xda, 0x09, 0x88, 0xd2, 0x1e, 0xc0, 0x3f, 0x6a, 0xac, 0x53, + 0x8b, 0xde, 0x7d, 0x54, 0x2f, 0xf2, 0x58, 0xbe, 0x35, 0xf0, 0x58, 0x2a, 0x04, 0x11, 0xca, 0x2b, + 0x71, 0x2b, 0xa8, 0x38, 0x27, 0xad, 0xc2, 0x5c, 0x32, 0x92, 0x8a, 0x81, 0x92, 0x1e, 0xc2, 0x1f, + 0x6b, 0x60, 0x92, 0xc4, 0x1d, 0x37, 0xd5, 0x1f, 0x1d, 0x48, 0x10, 0x7b, 0x36, 0xf1, 0xe2, 0xa5, + 0x9e, 0x60, 0x51, 0xd4, 0x81, 0xcd, 0x3a, 0x48, 0x72, 0x84, 0x6d, 0xcf, 0x22, 0xfa, 0xff, 0x0d, + 0xb8, 0x83, 0xdc, 0x10, 0x76, 0x51, 0x04, 0x00, 0xaf, 0x80, 0xbc, 0x13, 0x5a, 0x16, 0xde, 0xb3, + 0x88, 0xfe, 0x18, 0xef, 0x45, 0xd4, 0x58, 0x74, 0x5b, 0xd2, 0x91, 0x92, 0x80, 0x75, 0xb0, 0x74, + 0xf4, 0xb2, 0xfa, 0x17, 0xa1, 0x9e, 0x83, 0x3b, 0xfd, 0x32, 0xb7, 0xb2, 0xd0, 0x6e, 0x15, 0xe6, + 0x77, 0x7b, 0x8f, 0xf6, 0xee, 0x69, 0x03, 0xbe, 0x06, 0x1e, 0x4e, 0xc8, 0x6c, 0xd8, 0x7b, 0xc4, + 0x30, 0x88, 0x11, 0x3d, 0xdc, 0xf4, 0xff, 0x17, 0xc3, 0xc3, 0xe8, 0x80, 0xef, 0xa6, 0x05, 0xd0, + 0xdd, 0xb4, 0xe1, 0x75, 0x30, 0x9f, 0x60, 0x6f, 0x3a, 0x41, 0xc5, 0xaf, 0x06, 0xbe, 0xe9, 0x34, + 0xf4, 0x65, 0x6e, 0xf7, 0x7c, 0x74, 0x22, 0x77, 0x13, 0x3c, 0xd4, 0x47, 0x07, 0x7e, 0xad, 0xc3, + 0x1a, 0xff, 0x8c, 0x85, 0xbd, 0x97, 0xc9, 0x31, 0xd5, 0x1f, 0xe7, 0xdd, 0x09, 0xdf, 0xec, 0xdd, + 0x04, 0x1d, 0xf5, 0x91, 0x87, 0x5f, 0x05, 0xe7, 0x52, 0x1c, 0xf6, 0x44, 0xd1, 0x9f, 0x10, 0x6f, + 0x0d, 0xd6, 0xcf, 0xee, 0x46, 0x44, 0xd4, 0x4b, 0x72, 0x81, 0xbd, 0x62, 0x53, 0x55, 0x10, 0xce, + 0x80, 0xec, 0x01, 0x91, 0x5f, 0xff, 0x11, 0xfb, 0x13, 0x1a, 0x20, 0xd7, 0xc4, 0x56, 0x18, 0x3d, + 0xc4, 0x07, 0x7c, 0x83, 0x22, 0x61, 0xfc, 0xf9, 0xcc, 0x73, 0xda, 0xc2, 0x07, 0x1a, 0x98, 0xef, + 0x5d, 0x9c, 0x1f, 0xa8, 0x5b, 0xbf, 0xd2, 0xc0, 0x6c, 0x57, 0x1d, 0xee, 0xe1, 0xd1, 0xed, 0x4e, + 0x8f, 0x5e, 0x1b, 0x74, 0x41, 0x15, 0x09, 0xc4, 0xbb, 0xc8, 0xa4, 0x7b, 0x3f, 0xd3, 0xc0, 0x4c, + 0xba, 0xb4, 0x3d, 0xc8, 0x78, 0x15, 0x3f, 0xc8, 0x80, 0xf9, 0xde, 0xcd, 0x2f, 0xf4, 0xd5, 0x2b, + 0x7f, 0x38, 0xd3, 0x92, 0x5e, 0x93, 0xd5, 0x77, 0x35, 0x30, 0x71, 0x4b, 0xc9, 0x45, 0x5f, 0x87, + 0x07, 0x3e, 0xa7, 0x89, 0xee, 0x92, 0x98, 0x41, 0x51, 0x12, 0xb7, 0xf8, 0x27, 0x0d, 0xcc, 0xf5, + 0xbc, 0x24, 0xe1, 0x65, 0x30, 0x8a, 0x2d, 0xcb, 0x3d, 0x14, 0xe3, 0xb6, 0xc4, 0x2c, 0x7b, 0x95, + 0x53, 0x91, 0xe4, 0x26, 0xa2, 0x97, 0xf9, 0xbc, 0xa2, 0x57, 0xfc, 0xab, 0x06, 0x2e, 0xde, 0x2d, + 0x13, 0x1f, 0xc8, 0x96, 0x2e, 0x83, 0xbc, 0x6c, 0x70, 0x8f, 0xf9, 0x76, 0xca, 0x37, 0x9d, 0x2c, + 0x1a, 0xfc, 0x1f, 0xa2, 0xc4, 0x5f, 0xc5, 0x0f, 0x35, 0x30, 0x53, 0x25, 0x7e, 0xd3, 0xac, 0x11, + 0x44, 0xea, 0xc4, 0x27, 0x4e, 0x8d, 0xc0, 0x15, 0x30, 0xce, 0x3f, 0xcb, 0x7a, 0xb8, 0x16, 0x7d, + 0x62, 0x98, 0x95, 0x21, 0x1f, 0xdf, 0x8e, 0x18, 0x28, 0x96, 0x51, 0x9f, 0x23, 0x32, 0x7d, 0x3f, + 0x47, 0x5c, 0x04, 0x23, 0x5e, 0x3c, 0xac, 0xcd, 0x33, 0x2e, 0x9f, 0xcf, 0x72, 0x2a, 0xe7, 0xba, + 0x7e, 0xc0, 0x27, 0x50, 0x39, 0xc9, 0x75, 0xfd, 0x00, 0x71, 0x6a, 0xf1, 0x6f, 0x1a, 0xe8, 0xf5, + 0xaf, 0x4b, 0xf0, 0x82, 0x18, 0xc2, 0x25, 0x26, 0x5b, 0xd1, 0x00, 0x0e, 0x36, 0xc1, 0x18, 0x15, + 0xab, 0x92, 0x51, 0xaf, 0xdc, 0x67, 0xd4, 0xd3, 0x31, 0x12, 0xb7, 0x7f, 0x44, 0x8d, 0xc0, 0x58, + 0xe0, 0x6b, 0xb8, 0x1c, 0x3a, 0x86, 0x9c, 0xcb, 0x4e, 0x8a, 0xc0, 0xaf, 0xad, 0x0a, 0x1a, 0x52, + 0xdc, 0xf2, 0xd5, 0x8f, 0xee, 0x2c, 0x9e, 0xf9, 0xf8, 0xce, 0xe2, 0x99, 0x4f, 0xee, 0x2c, 0x9e, + 0xf9, 0x6e, 0x7b, 0x51, 0xfb, 0xa8, 0xbd, 0xa8, 0x7d, 0xdc, 0x5e, 0xd4, 0x3e, 0x69, 0x2f, 0x6a, + 0xff, 0x68, 0x2f, 0x6a, 0x3f, 0xff, 0x74, 0xf1, 0xcc, 0x37, 0xc7, 0x24, 0xfe, 0x7f, 0x03, 0x00, + 0x00, 0xff, 0xff, 0x80, 0x3e, 0x52, 0x72, 0x50, 0x2c, 0x00, 0x00, +} + func (m *ConversionRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -245,37 +955,46 @@ func (m *ConversionRequest) Marshal() (dAtA []byte, err error) { } func (m *ConversionRequest) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConversionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DesiredAPIVersion))) - i += copy(dAtA[i:], m.DesiredAPIVersion) if len(m.Objects) > 0 { - for _, msg := range m.Objects { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Objects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Objects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x1a } } - return i, nil + i -= len(m.DesiredAPIVersion) + copy(dAtA[i:], m.DesiredAPIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DesiredAPIVersion))) + i-- + dAtA[i] = 0x12 + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ConversionResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -283,41 +1002,51 @@ func (m *ConversionResponse) Marshal() (dAtA []byte, err error) { } func (m *ConversionResponse) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConversionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - if len(m.ConvertedObjects) > 0 { - for _, msg := range m.ConvertedObjects { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + { + size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Result.Size())) - n1, err := m.Result.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.ConvertedObjects) > 0 { + for iNdEx := len(m.ConvertedObjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ConvertedObjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } } - i += n1 - return i, nil + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ConversionReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -325,37 +1054,46 @@ func (m *ConversionReview) Marshal() (dAtA []byte, err error) { } func (m *ConversionReview) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConversionReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Request != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Request.Size())) - n2, err := m.Request.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - } if m.Response != nil { + { + size, err := m.Response.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Response.Size())) - n3, err := m.Response.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if m.Request != nil { + { + size, err := m.Request.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *CustomResourceColumnDefinition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -363,40 +1101,50 @@ func (m *CustomResourceColumnDefinition) Marshal() (dAtA []byte, err error) { } func (m *CustomResourceColumnDefinition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceColumnDefinition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Format))) - i += copy(dAtA[i:], m.Format) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) - i += copy(dAtA[i:], m.Description) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Priority)) - dAtA[i] = 0x32 - i++ + i -= len(m.JSONPath) + copy(dAtA[i:], m.JSONPath) i = encodeVarintGenerated(dAtA, i, uint64(len(m.JSONPath))) - i += copy(dAtA[i:], m.JSONPath) - return i, nil + i-- + dAtA[i] = 0x32 + i = encodeVarintGenerated(dAtA, i, uint64(m.Priority)) + i-- + dAtA[i] = 0x28 + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0x22 + i -= len(m.Format) + copy(dAtA[i:], m.Format) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Format))) + i-- + dAtA[i] = 0x1a + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CustomResourceConversion) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -404,46 +1152,48 @@ func (m *CustomResourceConversion) Marshal() (dAtA []byte, err error) { } func (m *CustomResourceConversion) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceConversion) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Strategy))) - i += copy(dAtA[i:], m.Strategy) - if m.WebhookClientConfig != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.WebhookClientConfig.Size())) - n4, err := m.WebhookClientConfig.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - } if len(m.ConversionReviewVersions) > 0 { - for _, s := range m.ConversionReviewVersions { + for iNdEx := len(m.ConversionReviewVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ConversionReviewVersions[iNdEx]) + copy(dAtA[i:], m.ConversionReviewVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConversionReviewVersions[iNdEx]))) + i-- dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + } + } + if m.WebhookClientConfig != nil { + { + size, err := m.WebhookClientConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Strategy) + copy(dAtA[i:], m.Strategy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Strategy))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CustomResourceDefinition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -451,41 +1201,52 @@ func (m *CustomResourceDefinition) Marshal() (dAtA []byte, err error) { } func (m *CustomResourceDefinition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n5, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n6, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n6 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n7, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n7 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CustomResourceDefinitionCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -493,41 +1254,52 @@ func (m *CustomResourceDefinitionCondition) Marshal() (dAtA []byte, err error) { } func (m *CustomResourceDefinitionCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinitionCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n8, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ + i -= len(m.Message) + copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CustomResourceDefinitionList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -535,37 +1307,46 @@ func (m *CustomResourceDefinitionList) Marshal() (dAtA []byte, err error) { } func (m *CustomResourceDefinitionList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinitionList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n9, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CustomResourceDefinitionNames) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -573,63 +1354,60 @@ func (m *CustomResourceDefinitionNames) Marshal() (dAtA []byte, err error) { } func (m *CustomResourceDefinitionNames) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinitionNames) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Plural))) - i += copy(dAtA[i:], m.Plural) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Singular))) - i += copy(dAtA[i:], m.Singular) - if len(m.ShortNames) > 0 { - for _, s := range m.ShortNames { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.Categories) > 0 { + for iNdEx := len(m.Categories) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Categories[iNdEx]) + copy(dAtA[i:], m.Categories[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Categories[iNdEx]))) + i-- + dAtA[i] = 0x32 } } - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x2a - i++ + i -= len(m.ListKind) + copy(dAtA[i:], m.ListKind) i = encodeVarintGenerated(dAtA, i, uint64(len(m.ListKind))) - i += copy(dAtA[i:], m.ListKind) - if len(m.Categories) > 0 { - for _, s := range m.Categories { - dAtA[i] = 0x32 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + dAtA[i] = 0x2a + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x22 + if len(m.ShortNames) > 0 { + for iNdEx := len(m.ShortNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ShortNames[iNdEx]) + copy(dAtA[i:], m.ShortNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShortNames[iNdEx]))) + i-- + dAtA[i] = 0x1a } } - return i, nil + i -= len(m.Singular) + copy(dAtA[i:], m.Singular) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Singular))) + i-- + dAtA[i] = 0x12 + i -= len(m.Plural) + copy(dAtA[i:], m.Plural) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Plural))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CustomResourceDefinitionSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -637,101 +1415,121 @@ func (m *CustomResourceDefinitionSpec) Marshal() (dAtA []byte, err error) { } func (m *CustomResourceDefinitionSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinitionSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i += copy(dAtA[i:], m.Group) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) - i += copy(dAtA[i:], m.Version) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Names.Size())) - n10, err := m.Names.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.PreserveUnknownFields != nil { + i-- + if *m.PreserveUnknownFields { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 } - i += n10 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scope))) - i += copy(dAtA[i:], m.Scope) - if m.Validation != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Validation.Size())) - n11, err := m.Validation.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Conversion != nil { + { + size, err := m.Conversion.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n11 + i-- + dAtA[i] = 0x4a } - if m.Subresources != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Subresources.Size())) - n12, err := m.Subresources.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.AdditionalPrinterColumns) > 0 { + for iNdEx := len(m.AdditionalPrinterColumns) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AdditionalPrinterColumns[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 } - i += n12 } if len(m.Versions) > 0 { - for _, msg := range m.Versions { + for iNdEx := len(m.Versions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Versions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + } + } + if m.Subresources != nil { + { + size, err := m.Subresources.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x32 } - if len(m.AdditionalPrinterColumns) > 0 { - for _, msg := range m.AdditionalPrinterColumns { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.Validation != nil { + { + size, err := m.Validation.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x2a } - if m.Conversion != nil { - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Conversion.Size())) - n13, err := m.Conversion.MarshalTo(dAtA[i:]) + i -= len(m.Scope) + copy(dAtA[i:], m.Scope) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scope))) + i-- + dAtA[i] = 0x22 + { + size, err := m.Names.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n13 - } - if m.PreserveUnknownFields != nil { - dAtA[i] = 0x50 - i++ - if *m.PreserveUnknownFields { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0x1a + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x12 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CustomResourceDefinitionStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -739,52 +1537,55 @@ func (m *CustomResourceDefinitionStatus) Marshal() (dAtA []byte, err error) { } func (m *CustomResourceDefinitionStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinitionStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + if len(m.StoredVersions) > 0 { + for iNdEx := len(m.StoredVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.StoredVersions[iNdEx]) + copy(dAtA[i:], m.StoredVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoredVersions[iNdEx]))) + i-- + dAtA[i] = 0x1a } } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AcceptedNames.Size())) - n14, err := m.AcceptedNames.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.AcceptedNames.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n14 - if len(m.StoredVersions) > 0 { - for _, s := range m.StoredVersions { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + i-- + dAtA[i] = 0x12 + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *CustomResourceDefinitionVersion) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -792,69 +1593,81 @@ func (m *CustomResourceDefinitionVersion) Marshal() (dAtA []byte, err error) { } func (m *CustomResourceDefinitionVersion) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceDefinitionVersion) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x10 - i++ - if m.Served { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x18 - i++ - if m.Storage { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.Schema != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Schema.Size())) - n15, err := m.Schema.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.AdditionalPrinterColumns) > 0 { + for iNdEx := len(m.AdditionalPrinterColumns) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AdditionalPrinterColumns[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 } - i += n15 } if m.Subresources != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Subresources.Size())) - n16, err := m.Subresources.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Subresources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n16 + i-- + dAtA[i] = 0x2a } - if len(m.AdditionalPrinterColumns) > 0 { - for _, msg := range m.AdditionalPrinterColumns { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.Schema != nil { + { + size, err := m.Schema.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x22 + } + i-- + if m.Storage { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i-- + if m.Served { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return i, nil + i-- + dAtA[i] = 0x10 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CustomResourceSubresourceScale) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -862,31 +1675,39 @@ func (m *CustomResourceSubresourceScale) Marshal() (dAtA []byte, err error) { } func (m *CustomResourceSubresourceScale) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceSubresourceScale) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SpecReplicasPath))) - i += copy(dAtA[i:], m.SpecReplicasPath) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StatusReplicasPath))) - i += copy(dAtA[i:], m.StatusReplicasPath) if m.LabelSelectorPath != nil { - dAtA[i] = 0x1a - i++ + i -= len(*m.LabelSelectorPath) + copy(dAtA[i:], *m.LabelSelectorPath) i = encodeVarintGenerated(dAtA, i, uint64(len(*m.LabelSelectorPath))) - i += copy(dAtA[i:], *m.LabelSelectorPath) + i-- + dAtA[i] = 0x1a } - return i, nil + i -= len(m.StatusReplicasPath) + copy(dAtA[i:], m.StatusReplicasPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StatusReplicasPath))) + i-- + dAtA[i] = 0x12 + i -= len(m.SpecReplicasPath) + copy(dAtA[i:], m.SpecReplicasPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SpecReplicasPath))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *CustomResourceSubresourceStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -894,17 +1715,22 @@ func (m *CustomResourceSubresourceStatus) Marshal() (dAtA []byte, err error) { } func (m *CustomResourceSubresourceStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceSubresourceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - return i, nil + return len(dAtA) - i, nil } func (m *CustomResourceSubresources) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -912,37 +1738,46 @@ func (m *CustomResourceSubresources) Marshal() (dAtA []byte, err error) { } func (m *CustomResourceSubresources) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceSubresources) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Status != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n17, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n17 - } if m.Scale != nil { + { + size, err := m.Scale.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Scale.Size())) - n18, err := m.Scale.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + } + if m.Status != nil { + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n18 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *CustomResourceValidation) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -950,27 +1785,34 @@ func (m *CustomResourceValidation) Marshal() (dAtA []byte, err error) { } func (m *CustomResourceValidation) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomResourceValidation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if m.OpenAPIV3Schema != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.OpenAPIV3Schema.Size())) - n19, err := m.OpenAPIV3Schema.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.OpenAPIV3Schema.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n19 + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *ExternalDocumentation) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -978,25 +1820,32 @@ func (m *ExternalDocumentation) Marshal() (dAtA []byte, err error) { } func (m *ExternalDocumentation) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExternalDocumentation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) - i += copy(dAtA[i:], m.Description) - dAtA[i] = 0x12 - i++ + i -= len(m.URL) + copy(dAtA[i:], m.URL) i = encodeVarintGenerated(dAtA, i, uint64(len(m.URL))) - i += copy(dAtA[i:], m.URL) - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *JSON) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1004,23 +1853,29 @@ func (m *JSON) Marshal() (dAtA []byte, err error) { } func (m *JSON) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JSON) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if m.Raw != nil { - dAtA[i] = 0xa - i++ + i -= len(m.Raw) + copy(dAtA[i:], m.Raw) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Raw))) - i += copy(dAtA[i:], m.Raw) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *JSONSchemaProps) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1028,234 +1883,225 @@ func (m *JSONSchemaProps) Marshal() (dAtA []byte, err error) { } func (m *JSONSchemaProps) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JSONSchemaProps) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ID))) - i += copy(dAtA[i:], m.ID) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Schema))) - i += copy(dAtA[i:], m.Schema) - if m.Ref != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Ref))) - i += copy(dAtA[i:], *m.Ref) - } - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) - i += copy(dAtA[i:], m.Description) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Format))) - i += copy(dAtA[i:], m.Format) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Title))) - i += copy(dAtA[i:], m.Title) - if m.Default != nil { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Default.Size())) - n20, err := m.Default.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.XListType != nil { + i -= len(*m.XListType) + copy(dAtA[i:], *m.XListType) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.XListType))) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xd2 + } + if len(m.XListMapKeys) > 0 { + for iNdEx := len(m.XListMapKeys) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.XListMapKeys[iNdEx]) + copy(dAtA[i:], m.XListMapKeys[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.XListMapKeys[iNdEx]))) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xca } - i += n20 - } - if m.Maximum != nil { - dAtA[i] = 0x49 - i++ - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.Maximum)))) - i += 8 } - dAtA[i] = 0x50 - i++ - if m.ExclusiveMaximum { + i-- + if m.XIntOrString { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - if m.Minimum != nil { - dAtA[i] = 0x59 - i++ - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.Minimum)))) - i += 8 - } - dAtA[i] = 0x60 - i++ - if m.ExclusiveMinimum { + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xc0 + i-- + if m.XEmbeddedResource { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - if m.MaxLength != nil { - dAtA[i] = 0x68 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxLength)) - } - if m.MinLength != nil { - dAtA[i] = 0x70 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.MinLength)) - } - dAtA[i] = 0x7a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Pattern))) - i += copy(dAtA[i:], m.Pattern) - if m.MaxItems != nil { - dAtA[i] = 0x80 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxItems)) - } - if m.MinItems != nil { - dAtA[i] = 0x88 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.MinItems)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xb8 + if m.XPreserveUnknownFields != nil { + i-- + if *m.XPreserveUnknownFields { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xb0 } - dAtA[i] = 0x90 - i++ - dAtA[i] = 0x1 - i++ - if m.UniqueItems { + i-- + if m.Nullable { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - if m.MultipleOf != nil { - dAtA[i] = 0x99 - i++ - dAtA[i] = 0x1 - i++ - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.MultipleOf)))) - i += 8 - } - if len(m.Enum) > 0 { - for _, msg := range m.Enum { - dAtA[i] = 0xa2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xa8 + if m.Example != nil { + { + size, err := m.Example.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xa2 } - if m.MaxProperties != nil { - dAtA[i] = 0xa8 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxProperties)) - } - if m.MinProperties != nil { - dAtA[i] = 0xb0 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.MinProperties)) - } - if len(m.Required) > 0 { - for _, s := range m.Required { - dAtA[i] = 0xba - i++ - dAtA[i] = 0x1 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if m.ExternalDocs != nil { + { + size, err := m.ExternalDocs.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x9a } - if m.Items != nil { - dAtA[i] = 0xc2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Items.Size())) - n21, err := m.Items.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Definitions) > 0 { + keysForDefinitions := make([]string, 0, len(m.Definitions)) + for k := range m.Definitions { + keysForDefinitions = append(keysForDefinitions, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDefinitions) + for iNdEx := len(keysForDefinitions) - 1; iNdEx >= 0; iNdEx-- { + v := m.Definitions[string(keysForDefinitions[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForDefinitions[iNdEx]) + copy(dAtA[i:], keysForDefinitions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForDefinitions[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x92 } - i += n21 } - if len(m.AllOf) > 0 { - for _, msg := range m.AllOf { - dAtA[i] = 0xca - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.AdditionalItems != nil { + { + size, err := m.AdditionalItems.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x8a } - if len(m.OneOf) > 0 { - for _, msg := range m.OneOf { - dAtA[i] = 0xd2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Dependencies) > 0 { + keysForDependencies := make([]string, 0, len(m.Dependencies)) + for k := range m.Dependencies { + keysForDependencies = append(keysForDependencies, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDependencies) + for iNdEx := len(keysForDependencies) - 1; iNdEx >= 0; iNdEx-- { + v := m.Dependencies[string(keysForDependencies[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + i -= len(keysForDependencies[iNdEx]) + copy(dAtA[i:], keysForDependencies[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForDependencies[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x82 } } - if len(m.AnyOf) > 0 { - for _, msg := range m.AnyOf { - dAtA[i] = 0xda - i++ + if len(m.PatternProperties) > 0 { + keysForPatternProperties := make([]string, 0, len(m.PatternProperties)) + for k := range m.PatternProperties { + keysForPatternProperties = append(keysForPatternProperties, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForPatternProperties) + for iNdEx := len(keysForPatternProperties) - 1; iNdEx >= 0; iNdEx-- { + v := m.PatternProperties[string(keysForPatternProperties[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForPatternProperties[iNdEx]) + copy(dAtA[i:], keysForPatternProperties[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForPatternProperties[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + i-- + dAtA[i] = 0xfa + } + } + if m.AdditionalProperties != nil { + { + size, err := m.AdditionalProperties.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - } - if m.Not != nil { - dAtA[i] = 0xe2 - i++ + i-- dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Not.Size())) - n22, err := m.Not.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n22 + i-- + dAtA[i] = 0xf2 } if len(m.Properties) > 0 { keysForProperties := make([]string, 0, len(m.Properties)) @@ -1263,229 +2109,279 @@ func (m *JSONSchemaProps) MarshalTo(dAtA []byte) (int, error) { keysForProperties = append(keysForProperties, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForProperties) - for _, k := range keysForProperties { - dAtA[i] = 0xea - i++ - dAtA[i] = 0x1 - i++ - v := m.Properties[string(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + for iNdEx := len(keysForProperties) - 1; iNdEx >= 0; iNdEx-- { + v := m.Properties[string(keysForProperties[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n23, err := (&v).MarshalTo(dAtA[i:]) + i -= len(keysForProperties[iNdEx]) + copy(dAtA[i:], keysForProperties[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForProperties[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xea + } + } + if m.Not != nil { + { + size, err := m.Not.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n23 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - } - if m.AdditionalProperties != nil { - dAtA[i] = 0xf2 - i++ + i-- dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AdditionalProperties.Size())) - n24, err := m.AdditionalProperties.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n24 + i-- + dAtA[i] = 0xe2 } - if len(m.PatternProperties) > 0 { - keysForPatternProperties := make([]string, 0, len(m.PatternProperties)) - for k := range m.PatternProperties { - keysForPatternProperties = append(keysForPatternProperties, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForPatternProperties) - for _, k := range keysForPatternProperties { - dAtA[i] = 0xfa - i++ - dAtA[i] = 0x1 - i++ - v := m.PatternProperties[string(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n25, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.AnyOf) > 0 { + for iNdEx := len(m.AnyOf) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AnyOf[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n25 + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xda } } - if len(m.Dependencies) > 0 { - keysForDependencies := make([]string, 0, len(m.Dependencies)) - for k := range m.Dependencies { - keysForDependencies = append(keysForDependencies, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForDependencies) - for _, k := range keysForDependencies { - dAtA[i] = 0x82 - i++ - dAtA[i] = 0x2 - i++ - v := m.Dependencies[string(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n26, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.OneOf) > 0 { + for iNdEx := len(m.OneOf) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.OneOf[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n26 + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd2 } } - if m.AdditionalItems != nil { - dAtA[i] = 0x8a - i++ - dAtA[i] = 0x2 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AdditionalItems.Size())) - n27, err := m.AdditionalItems.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.AllOf) > 0 { + for iNdEx := len(m.AllOf) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllOf[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca } - i += n27 } - if len(m.Definitions) > 0 { - keysForDefinitions := make([]string, 0, len(m.Definitions)) - for k := range m.Definitions { - keysForDefinitions = append(keysForDefinitions, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForDefinitions) - for _, k := range keysForDefinitions { - dAtA[i] = 0x92 - i++ - dAtA[i] = 0x2 - i++ - v := m.Definitions[string(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n28, err := (&v).MarshalTo(dAtA[i:]) + if m.Items != nil { + { + size, err := m.Items.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n28 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 } - if m.ExternalDocs != nil { - dAtA[i] = 0x9a - i++ - dAtA[i] = 0x2 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ExternalDocs.Size())) - n29, err := m.ExternalDocs.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Required) > 0 { + for iNdEx := len(m.Required) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Required[iNdEx]) + copy(dAtA[i:], m.Required[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Required[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba } - i += n29 } - if m.Example != nil { - dAtA[i] = 0xa2 - i++ - dAtA[i] = 0x2 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Example.Size())) - n30, err := m.Example.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.MinProperties != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MinProperties)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb0 + } + if m.MaxProperties != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxProperties)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa8 + } + if len(m.Enum) > 0 { + for iNdEx := len(m.Enum) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Enum[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 } - i += n30 } - dAtA[i] = 0xa8 - i++ - dAtA[i] = 0x2 - i++ - if m.Nullable { + if m.MultipleOf != nil { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.MultipleOf)))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x99 + } + i-- + if m.UniqueItems { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - if m.XPreserveUnknownFields != nil { - dAtA[i] = 0xb0 - i++ - dAtA[i] = 0x2 - i++ - if *m.XPreserveUnknownFields { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x90 + if m.MinItems != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MinItems)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x88 } - dAtA[i] = 0xb8 - i++ - dAtA[i] = 0x2 - i++ - if m.XEmbeddedResource { + if m.MaxItems != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxItems)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 + } + i -= len(m.Pattern) + copy(dAtA[i:], m.Pattern) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Pattern))) + i-- + dAtA[i] = 0x7a + if m.MinLength != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MinLength)) + i-- + dAtA[i] = 0x70 + } + if m.MaxLength != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxLength)) + i-- + dAtA[i] = 0x68 + } + i-- + if m.ExclusiveMinimum { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - dAtA[i] = 0xc0 - i++ - dAtA[i] = 0x2 - i++ - if m.XIntOrString { + i-- + dAtA[i] = 0x60 + if m.Minimum != nil { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.Minimum)))) + i-- + dAtA[i] = 0x59 + } + i-- + if m.ExclusiveMaximum { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - return i, nil + i-- + dAtA[i] = 0x50 + if m.Maximum != nil { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.Maximum)))) + i-- + dAtA[i] = 0x49 + } + if m.Default != nil { + { + size, err := m.Default.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + i -= len(m.Title) + copy(dAtA[i:], m.Title) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Title))) + i-- + dAtA[i] = 0x3a + i -= len(m.Format) + copy(dAtA[i:], m.Format) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Format))) + i-- + dAtA[i] = 0x32 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x2a + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0x22 + if m.Ref != nil { + i -= len(*m.Ref) + copy(dAtA[i:], *m.Ref) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Ref))) + i-- + dAtA[i] = 0x1a + } + i -= len(m.Schema) + copy(dAtA[i:], m.Schema) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Schema))) + i-- + dAtA[i] = 0x12 + i -= len(m.ID) + copy(dAtA[i:], m.ID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *JSONSchemaPropsOrArray) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1493,39 +2389,48 @@ func (m *JSONSchemaPropsOrArray) Marshal() (dAtA []byte, err error) { } func (m *JSONSchemaPropsOrArray) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JSONSchemaPropsOrArray) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Schema != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Schema.Size())) - n31, err := m.Schema.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n31 - } if len(m.JSONSchemas) > 0 { - for _, msg := range m.JSONSchemas { + for iNdEx := len(m.JSONSchemas) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.JSONSchemas[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + } + } + if m.Schema != nil { + { + size, err := m.Schema.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *JSONSchemaPropsOrBool) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1533,35 +2438,42 @@ func (m *JSONSchemaPropsOrBool) Marshal() (dAtA []byte, err error) { } func (m *JSONSchemaPropsOrBool) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JSONSchemaPropsOrBool) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ + if m.Schema != nil { + { + size, err := m.Schema.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i-- if m.Allows { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - if m.Schema != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Schema.Size())) - n32, err := m.Schema.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n32 - } - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *JSONSchemaPropsOrStringArray) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1569,42 +2481,43 @@ func (m *JSONSchemaPropsOrStringArray) Marshal() (dAtA []byte, err error) { } func (m *JSONSchemaPropsOrStringArray) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JSONSchemaPropsOrStringArray) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Schema != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Schema.Size())) - n33, err := m.Schema.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n33 - } if len(m.Property) > 0 { - for _, s := range m.Property { + for iNdEx := len(m.Property) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Property[iNdEx]) + copy(dAtA[i:], m.Property[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Property[iNdEx]))) + i-- dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + } + } + if m.Schema != nil { + { + size, err := m.Schema.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *ServiceReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1612,36 +2525,44 @@ func (m *ServiceReference) Marshal() (dAtA []byte, err error) { } func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - if m.Path != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path))) - i += copy(dAtA[i:], *m.Path) - } if m.Port != nil { - dAtA[i] = 0x20 - i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.Port)) + i-- + dAtA[i] = 0x20 } - return i, nil + if m.Path != nil { + i -= len(*m.Path) + copy(dAtA[i:], *m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path))) + i-- + dAtA[i] = 0x1a + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1649,45 +2570,59 @@ func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { } func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WebhookClientConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Service != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Service.Size())) - n34, err := m.Service.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n34 + if m.URL != nil { + i -= len(*m.URL) + copy(dAtA[i:], *m.URL) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) + i-- + dAtA[i] = 0x1a } if m.CABundle != nil { - dAtA[i] = 0x12 - i++ + i -= len(m.CABundle) + copy(dAtA[i:], m.CABundle) i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) - i += copy(dAtA[i:], m.CABundle) + i-- + dAtA[i] = 0x12 } - if m.URL != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) - i += copy(dAtA[i:], *m.URL) + if m.Service != nil { + { + size, err := m.Service.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *ConversionRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.UID) @@ -1704,6 +2639,9 @@ func (m *ConversionRequest) Size() (n int) { } func (m *ConversionResponse) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.UID) @@ -1720,6 +2658,9 @@ func (m *ConversionResponse) Size() (n int) { } func (m *ConversionReview) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Request != nil { @@ -1734,6 +2675,9 @@ func (m *ConversionReview) Size() (n int) { } func (m *CustomResourceColumnDefinition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -1751,6 +2695,9 @@ func (m *CustomResourceColumnDefinition) Size() (n int) { } func (m *CustomResourceConversion) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Strategy) @@ -1769,6 +2716,9 @@ func (m *CustomResourceConversion) Size() (n int) { } func (m *CustomResourceDefinition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1781,6 +2731,9 @@ func (m *CustomResourceDefinition) Size() (n int) { } func (m *CustomResourceDefinitionCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1797,6 +2750,9 @@ func (m *CustomResourceDefinitionCondition) Size() (n int) { } func (m *CustomResourceDefinitionList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1811,6 +2767,9 @@ func (m *CustomResourceDefinitionList) Size() (n int) { } func (m *CustomResourceDefinitionNames) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Plural) @@ -1837,6 +2796,9 @@ func (m *CustomResourceDefinitionNames) Size() (n int) { } func (m *CustomResourceDefinitionSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Group) @@ -1878,6 +2840,9 @@ func (m *CustomResourceDefinitionSpec) Size() (n int) { } func (m *CustomResourceDefinitionStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Conditions) > 0 { @@ -1898,6 +2863,9 @@ func (m *CustomResourceDefinitionStatus) Size() (n int) { } func (m *CustomResourceDefinitionVersion) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -1922,6 +2890,9 @@ func (m *CustomResourceDefinitionVersion) Size() (n int) { } func (m *CustomResourceSubresourceScale) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.SpecReplicasPath) @@ -1936,12 +2907,18 @@ func (m *CustomResourceSubresourceScale) Size() (n int) { } func (m *CustomResourceSubresourceStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l return n } func (m *CustomResourceSubresources) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Status != nil { @@ -1956,6 +2933,9 @@ func (m *CustomResourceSubresources) Size() (n int) { } func (m *CustomResourceValidation) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.OpenAPIV3Schema != nil { @@ -1966,6 +2946,9 @@ func (m *CustomResourceValidation) Size() (n int) { } func (m *ExternalDocumentation) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Description) @@ -1976,6 +2959,9 @@ func (m *ExternalDocumentation) Size() (n int) { } func (m *JSON) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Raw != nil { @@ -1986,6 +2972,9 @@ func (m *JSON) Size() (n int) { } func (m *JSONSchemaProps) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.ID) @@ -2136,10 +3125,23 @@ func (m *JSONSchemaProps) Size() (n int) { } n += 3 n += 3 + if len(m.XListMapKeys) > 0 { + for _, s := range m.XListMapKeys { + l = len(s) + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.XListType != nil { + l = len(*m.XListType) + n += 2 + l + sovGenerated(uint64(l)) + } return n } func (m *JSONSchemaPropsOrArray) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Schema != nil { @@ -2156,6 +3158,9 @@ func (m *JSONSchemaPropsOrArray) Size() (n int) { } func (m *JSONSchemaPropsOrBool) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 2 @@ -2167,6 +3172,9 @@ func (m *JSONSchemaPropsOrBool) Size() (n int) { } func (m *JSONSchemaPropsOrStringArray) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Schema != nil { @@ -2183,6 +3191,9 @@ func (m *JSONSchemaPropsOrStringArray) Size() (n int) { } func (m *ServiceReference) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Namespace) @@ -2200,6 +3211,9 @@ func (m *ServiceReference) Size() (n int) { } func (m *WebhookClientConfig) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Service != nil { @@ -2218,14 +3232,7 @@ func (m *WebhookClientConfig) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -2234,10 +3241,15 @@ func (this *ConversionRequest) String() string { if this == nil { return "nil" } + repeatedStringForObjects := "[]RawExtension{" + for _, f := range this.Objects { + repeatedStringForObjects += fmt.Sprintf("%v", f) + "," + } + repeatedStringForObjects += "}" s := strings.Join([]string{`&ConversionRequest{`, `UID:` + fmt.Sprintf("%v", this.UID) + `,`, `DesiredAPIVersion:` + fmt.Sprintf("%v", this.DesiredAPIVersion) + `,`, - `Objects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Objects), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `Objects:` + repeatedStringForObjects + `,`, `}`, }, "") return s @@ -2246,10 +3258,15 @@ func (this *ConversionResponse) String() string { if this == nil { return "nil" } + repeatedStringForConvertedObjects := "[]RawExtension{" + for _, f := range this.ConvertedObjects { + repeatedStringForConvertedObjects += fmt.Sprintf("%v", f) + "," + } + repeatedStringForConvertedObjects += "}" s := strings.Join([]string{`&ConversionResponse{`, `UID:` + fmt.Sprintf("%v", this.UID) + `,`, - `ConvertedObjects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ConvertedObjects), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, - `Result:` + strings.Replace(strings.Replace(this.Result.String(), "Status", "k8s_io_apimachinery_pkg_apis_meta_v1.Status", 1), `&`, ``, 1) + `,`, + `ConvertedObjects:` + repeatedStringForConvertedObjects + `,`, + `Result:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Result), "Status", "v1.Status", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -2259,8 +3276,8 @@ func (this *ConversionReview) String() string { return "nil" } s := strings.Join([]string{`&ConversionReview{`, - `Request:` + strings.Replace(fmt.Sprintf("%v", this.Request), "ConversionRequest", "ConversionRequest", 1) + `,`, - `Response:` + strings.Replace(fmt.Sprintf("%v", this.Response), "ConversionResponse", "ConversionResponse", 1) + `,`, + `Request:` + strings.Replace(this.Request.String(), "ConversionRequest", "ConversionRequest", 1) + `,`, + `Response:` + strings.Replace(this.Response.String(), "ConversionResponse", "ConversionResponse", 1) + `,`, `}`, }, "") return s @@ -2286,7 +3303,7 @@ func (this *CustomResourceConversion) String() string { } s := strings.Join([]string{`&CustomResourceConversion{`, `Strategy:` + fmt.Sprintf("%v", this.Strategy) + `,`, - `WebhookClientConfig:` + strings.Replace(fmt.Sprintf("%v", this.WebhookClientConfig), "WebhookClientConfig", "WebhookClientConfig", 1) + `,`, + `WebhookClientConfig:` + strings.Replace(this.WebhookClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1) + `,`, `ConversionReviewVersions:` + fmt.Sprintf("%v", this.ConversionReviewVersions) + `,`, `}`, }, "") @@ -2297,7 +3314,7 @@ func (this *CustomResourceDefinition) String() string { return "nil" } s := strings.Join([]string{`&CustomResourceDefinition{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CustomResourceDefinitionSpec", "CustomResourceDefinitionSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "CustomResourceDefinitionStatus", "CustomResourceDefinitionStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -2311,7 +3328,7 @@ func (this *CustomResourceDefinitionCondition) String() string { s := strings.Join([]string{`&CustomResourceDefinitionCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -2322,9 +3339,14 @@ func (this *CustomResourceDefinitionList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]CustomResourceDefinition{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CustomResourceDefinition", "CustomResourceDefinition", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&CustomResourceDefinitionList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CustomResourceDefinition", "CustomResourceDefinition", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -2348,16 +3370,26 @@ func (this *CustomResourceDefinitionSpec) String() string { if this == nil { return "nil" } + repeatedStringForVersions := "[]CustomResourceDefinitionVersion{" + for _, f := range this.Versions { + repeatedStringForVersions += strings.Replace(strings.Replace(f.String(), "CustomResourceDefinitionVersion", "CustomResourceDefinitionVersion", 1), `&`, ``, 1) + "," + } + repeatedStringForVersions += "}" + repeatedStringForAdditionalPrinterColumns := "[]CustomResourceColumnDefinition{" + for _, f := range this.AdditionalPrinterColumns { + repeatedStringForAdditionalPrinterColumns += strings.Replace(strings.Replace(f.String(), "CustomResourceColumnDefinition", "CustomResourceColumnDefinition", 1), `&`, ``, 1) + "," + } + repeatedStringForAdditionalPrinterColumns += "}" s := strings.Join([]string{`&CustomResourceDefinitionSpec{`, `Group:` + fmt.Sprintf("%v", this.Group) + `,`, `Version:` + fmt.Sprintf("%v", this.Version) + `,`, `Names:` + strings.Replace(strings.Replace(this.Names.String(), "CustomResourceDefinitionNames", "CustomResourceDefinitionNames", 1), `&`, ``, 1) + `,`, `Scope:` + fmt.Sprintf("%v", this.Scope) + `,`, - `Validation:` + strings.Replace(fmt.Sprintf("%v", this.Validation), "CustomResourceValidation", "CustomResourceValidation", 1) + `,`, - `Subresources:` + strings.Replace(fmt.Sprintf("%v", this.Subresources), "CustomResourceSubresources", "CustomResourceSubresources", 1) + `,`, - `Versions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Versions), "CustomResourceDefinitionVersion", "CustomResourceDefinitionVersion", 1), `&`, ``, 1) + `,`, - `AdditionalPrinterColumns:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AdditionalPrinterColumns), "CustomResourceColumnDefinition", "CustomResourceColumnDefinition", 1), `&`, ``, 1) + `,`, - `Conversion:` + strings.Replace(fmt.Sprintf("%v", this.Conversion), "CustomResourceConversion", "CustomResourceConversion", 1) + `,`, + `Validation:` + strings.Replace(this.Validation.String(), "CustomResourceValidation", "CustomResourceValidation", 1) + `,`, + `Subresources:` + strings.Replace(this.Subresources.String(), "CustomResourceSubresources", "CustomResourceSubresources", 1) + `,`, + `Versions:` + repeatedStringForVersions + `,`, + `AdditionalPrinterColumns:` + repeatedStringForAdditionalPrinterColumns + `,`, + `Conversion:` + strings.Replace(this.Conversion.String(), "CustomResourceConversion", "CustomResourceConversion", 1) + `,`, `PreserveUnknownFields:` + valueToStringGenerated(this.PreserveUnknownFields) + `,`, `}`, }, "") @@ -2367,8 +3399,13 @@ func (this *CustomResourceDefinitionStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]CustomResourceDefinitionCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "CustomResourceDefinitionCondition", "CustomResourceDefinitionCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&CustomResourceDefinitionStatus{`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "CustomResourceDefinitionCondition", "CustomResourceDefinitionCondition", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `AcceptedNames:` + strings.Replace(strings.Replace(this.AcceptedNames.String(), "CustomResourceDefinitionNames", "CustomResourceDefinitionNames", 1), `&`, ``, 1) + `,`, `StoredVersions:` + fmt.Sprintf("%v", this.StoredVersions) + `,`, `}`, @@ -2379,13 +3416,18 @@ func (this *CustomResourceDefinitionVersion) String() string { if this == nil { return "nil" } + repeatedStringForAdditionalPrinterColumns := "[]CustomResourceColumnDefinition{" + for _, f := range this.AdditionalPrinterColumns { + repeatedStringForAdditionalPrinterColumns += strings.Replace(strings.Replace(f.String(), "CustomResourceColumnDefinition", "CustomResourceColumnDefinition", 1), `&`, ``, 1) + "," + } + repeatedStringForAdditionalPrinterColumns += "}" s := strings.Join([]string{`&CustomResourceDefinitionVersion{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Served:` + fmt.Sprintf("%v", this.Served) + `,`, `Storage:` + fmt.Sprintf("%v", this.Storage) + `,`, - `Schema:` + strings.Replace(fmt.Sprintf("%v", this.Schema), "CustomResourceValidation", "CustomResourceValidation", 1) + `,`, - `Subresources:` + strings.Replace(fmt.Sprintf("%v", this.Subresources), "CustomResourceSubresources", "CustomResourceSubresources", 1) + `,`, - `AdditionalPrinterColumns:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AdditionalPrinterColumns), "CustomResourceColumnDefinition", "CustomResourceColumnDefinition", 1), `&`, ``, 1) + `,`, + `Schema:` + strings.Replace(this.Schema.String(), "CustomResourceValidation", "CustomResourceValidation", 1) + `,`, + `Subresources:` + strings.Replace(this.Subresources.String(), "CustomResourceSubresources", "CustomResourceSubresources", 1) + `,`, + `AdditionalPrinterColumns:` + repeatedStringForAdditionalPrinterColumns + `,`, `}`, }, "") return s @@ -2416,8 +3458,8 @@ func (this *CustomResourceSubresources) String() string { return "nil" } s := strings.Join([]string{`&CustomResourceSubresources{`, - `Status:` + strings.Replace(fmt.Sprintf("%v", this.Status), "CustomResourceSubresourceStatus", "CustomResourceSubresourceStatus", 1) + `,`, - `Scale:` + strings.Replace(fmt.Sprintf("%v", this.Scale), "CustomResourceSubresourceScale", "CustomResourceSubresourceScale", 1) + `,`, + `Status:` + strings.Replace(this.Status.String(), "CustomResourceSubresourceStatus", "CustomResourceSubresourceStatus", 1) + `,`, + `Scale:` + strings.Replace(this.Scale.String(), "CustomResourceSubresourceScale", "CustomResourceSubresourceScale", 1) + `,`, `}`, }, "") return s @@ -2427,7 +3469,7 @@ func (this *CustomResourceValidation) String() string { return "nil" } s := strings.Join([]string{`&CustomResourceValidation{`, - `OpenAPIV3Schema:` + strings.Replace(fmt.Sprintf("%v", this.OpenAPIV3Schema), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, + `OpenAPIV3Schema:` + strings.Replace(this.OpenAPIV3Schema.String(), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, `}`, }, "") return s @@ -2457,6 +3499,26 @@ func (this *JSONSchemaProps) String() string { if this == nil { return "nil" } + repeatedStringForEnum := "[]JSON{" + for _, f := range this.Enum { + repeatedStringForEnum += strings.Replace(strings.Replace(f.String(), "JSON", "JSON", 1), `&`, ``, 1) + "," + } + repeatedStringForEnum += "}" + repeatedStringForAllOf := "[]JSONSchemaProps{" + for _, f := range this.AllOf { + repeatedStringForAllOf += strings.Replace(strings.Replace(f.String(), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + "," + } + repeatedStringForAllOf += "}" + repeatedStringForOneOf := "[]JSONSchemaProps{" + for _, f := range this.OneOf { + repeatedStringForOneOf += strings.Replace(strings.Replace(f.String(), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + "," + } + repeatedStringForOneOf += "}" + repeatedStringForAnyOf := "[]JSONSchemaProps{" + for _, f := range this.AnyOf { + repeatedStringForAnyOf += strings.Replace(strings.Replace(f.String(), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + "," + } + repeatedStringForAnyOf += "}" keysForProperties := make([]string, 0, len(this.Properties)) for k := range this.Properties { keysForProperties = append(keysForProperties, k) @@ -2505,7 +3567,7 @@ func (this *JSONSchemaProps) String() string { `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Format:` + fmt.Sprintf("%v", this.Format) + `,`, `Title:` + fmt.Sprintf("%v", this.Title) + `,`, - `Default:` + strings.Replace(fmt.Sprintf("%v", this.Default), "JSON", "JSON", 1) + `,`, + `Default:` + strings.Replace(this.Default.String(), "JSON", "JSON", 1) + `,`, `Maximum:` + valueToStringGenerated(this.Maximum) + `,`, `ExclusiveMaximum:` + fmt.Sprintf("%v", this.ExclusiveMaximum) + `,`, `Minimum:` + valueToStringGenerated(this.Minimum) + `,`, @@ -2517,27 +3579,29 @@ func (this *JSONSchemaProps) String() string { `MinItems:` + valueToStringGenerated(this.MinItems) + `,`, `UniqueItems:` + fmt.Sprintf("%v", this.UniqueItems) + `,`, `MultipleOf:` + valueToStringGenerated(this.MultipleOf) + `,`, - `Enum:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Enum), "JSON", "JSON", 1), `&`, ``, 1) + `,`, + `Enum:` + repeatedStringForEnum + `,`, `MaxProperties:` + valueToStringGenerated(this.MaxProperties) + `,`, `MinProperties:` + valueToStringGenerated(this.MinProperties) + `,`, `Required:` + fmt.Sprintf("%v", this.Required) + `,`, - `Items:` + strings.Replace(fmt.Sprintf("%v", this.Items), "JSONSchemaPropsOrArray", "JSONSchemaPropsOrArray", 1) + `,`, - `AllOf:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllOf), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + `,`, - `OneOf:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.OneOf), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + `,`, - `AnyOf:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AnyOf), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + `,`, - `Not:` + strings.Replace(fmt.Sprintf("%v", this.Not), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, + `Items:` + strings.Replace(this.Items.String(), "JSONSchemaPropsOrArray", "JSONSchemaPropsOrArray", 1) + `,`, + `AllOf:` + repeatedStringForAllOf + `,`, + `OneOf:` + repeatedStringForOneOf + `,`, + `AnyOf:` + repeatedStringForAnyOf + `,`, + `Not:` + strings.Replace(this.Not.String(), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, `Properties:` + mapStringForProperties + `,`, - `AdditionalProperties:` + strings.Replace(fmt.Sprintf("%v", this.AdditionalProperties), "JSONSchemaPropsOrBool", "JSONSchemaPropsOrBool", 1) + `,`, + `AdditionalProperties:` + strings.Replace(this.AdditionalProperties.String(), "JSONSchemaPropsOrBool", "JSONSchemaPropsOrBool", 1) + `,`, `PatternProperties:` + mapStringForPatternProperties + `,`, `Dependencies:` + mapStringForDependencies + `,`, - `AdditionalItems:` + strings.Replace(fmt.Sprintf("%v", this.AdditionalItems), "JSONSchemaPropsOrBool", "JSONSchemaPropsOrBool", 1) + `,`, + `AdditionalItems:` + strings.Replace(this.AdditionalItems.String(), "JSONSchemaPropsOrBool", "JSONSchemaPropsOrBool", 1) + `,`, `Definitions:` + mapStringForDefinitions + `,`, - `ExternalDocs:` + strings.Replace(fmt.Sprintf("%v", this.ExternalDocs), "ExternalDocumentation", "ExternalDocumentation", 1) + `,`, - `Example:` + strings.Replace(fmt.Sprintf("%v", this.Example), "JSON", "JSON", 1) + `,`, + `ExternalDocs:` + strings.Replace(this.ExternalDocs.String(), "ExternalDocumentation", "ExternalDocumentation", 1) + `,`, + `Example:` + strings.Replace(this.Example.String(), "JSON", "JSON", 1) + `,`, `Nullable:` + fmt.Sprintf("%v", this.Nullable) + `,`, `XPreserveUnknownFields:` + valueToStringGenerated(this.XPreserveUnknownFields) + `,`, `XEmbeddedResource:` + fmt.Sprintf("%v", this.XEmbeddedResource) + `,`, `XIntOrString:` + fmt.Sprintf("%v", this.XIntOrString) + `,`, + `XListMapKeys:` + fmt.Sprintf("%v", this.XListMapKeys) + `,`, + `XListType:` + valueToStringGenerated(this.XListType) + `,`, `}`, }, "") return s @@ -2546,9 +3610,14 @@ func (this *JSONSchemaPropsOrArray) String() string { if this == nil { return "nil" } + repeatedStringForJSONSchemas := "[]JSONSchemaProps{" + for _, f := range this.JSONSchemas { + repeatedStringForJSONSchemas += strings.Replace(strings.Replace(f.String(), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + "," + } + repeatedStringForJSONSchemas += "}" s := strings.Join([]string{`&JSONSchemaPropsOrArray{`, - `Schema:` + strings.Replace(fmt.Sprintf("%v", this.Schema), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, - `JSONSchemas:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.JSONSchemas), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + `,`, + `Schema:` + strings.Replace(this.Schema.String(), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, + `JSONSchemas:` + repeatedStringForJSONSchemas + `,`, `}`, }, "") return s @@ -2559,7 +3628,7 @@ func (this *JSONSchemaPropsOrBool) String() string { } s := strings.Join([]string{`&JSONSchemaPropsOrBool{`, `Allows:` + fmt.Sprintf("%v", this.Allows) + `,`, - `Schema:` + strings.Replace(fmt.Sprintf("%v", this.Schema), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, + `Schema:` + strings.Replace(this.Schema.String(), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, `}`, }, "") return s @@ -2569,7 +3638,7 @@ func (this *JSONSchemaPropsOrStringArray) String() string { return "nil" } s := strings.Join([]string{`&JSONSchemaPropsOrStringArray{`, - `Schema:` + strings.Replace(fmt.Sprintf("%v", this.Schema), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, + `Schema:` + strings.Replace(this.Schema.String(), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, `Property:` + fmt.Sprintf("%v", this.Property) + `,`, `}`, }, "") @@ -2593,7 +3662,7 @@ func (this *WebhookClientConfig) String() string { return "nil" } s := strings.Join([]string{`&WebhookClientConfig{`, - `Service:` + strings.Replace(fmt.Sprintf("%v", this.Service), "ServiceReference", "ServiceReference", 1) + `,`, + `Service:` + strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1) + `,`, `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, `URL:` + valueToStringGenerated(this.URL) + `,`, `}`, @@ -2623,7 +3692,7 @@ func (m *ConversionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2651,7 +3720,7 @@ func (m *ConversionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2661,6 +3730,9 @@ func (m *ConversionRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2680,7 +3752,7 @@ func (m *ConversionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2690,6 +3762,9 @@ func (m *ConversionRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2709,7 +3784,7 @@ func (m *ConversionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2718,10 +3793,13 @@ func (m *ConversionRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Objects = append(m.Objects, k8s_io_apimachinery_pkg_runtime.RawExtension{}) + m.Objects = append(m.Objects, runtime.RawExtension{}) if err := m.Objects[len(m.Objects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -2735,6 +3813,9 @@ func (m *ConversionRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2762,7 +3843,7 @@ func (m *ConversionResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2790,7 +3871,7 @@ func (m *ConversionResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2800,6 +3881,9 @@ func (m *ConversionResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2819,7 +3903,7 @@ func (m *ConversionResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2828,10 +3912,13 @@ func (m *ConversionResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.ConvertedObjects = append(m.ConvertedObjects, k8s_io_apimachinery_pkg_runtime.RawExtension{}) + m.ConvertedObjects = append(m.ConvertedObjects, runtime.RawExtension{}) if err := m.ConvertedObjects[len(m.ConvertedObjects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -2850,7 +3937,7 @@ func (m *ConversionResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2859,6 +3946,9 @@ func (m *ConversionResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2875,6 +3965,9 @@ func (m *ConversionResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2902,7 +3995,7 @@ func (m *ConversionReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2930,7 +4023,7 @@ func (m *ConversionReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2939,6 +4032,9 @@ func (m *ConversionReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2963,7 +4059,7 @@ func (m *ConversionReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2972,6 +4068,9 @@ func (m *ConversionReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2991,6 +4090,9 @@ func (m *ConversionReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3018,7 +4120,7 @@ func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3046,7 +4148,7 @@ func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3056,6 +4158,9 @@ func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3075,7 +4180,7 @@ func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3085,6 +4190,9 @@ func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3104,7 +4212,7 @@ func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3114,6 +4222,9 @@ func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3133,7 +4244,7 @@ func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3143,6 +4254,9 @@ func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3162,7 +4276,7 @@ func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Priority |= (int32(b) & 0x7F) << shift + m.Priority |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3181,7 +4295,7 @@ func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3191,6 +4305,9 @@ func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3205,6 +4322,9 @@ func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3232,7 +4352,7 @@ func (m *CustomResourceConversion) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3260,7 +4380,7 @@ func (m *CustomResourceConversion) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3270,6 +4390,9 @@ func (m *CustomResourceConversion) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3289,7 +4412,7 @@ func (m *CustomResourceConversion) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3298,6 +4421,9 @@ func (m *CustomResourceConversion) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3322,7 +4448,7 @@ func (m *CustomResourceConversion) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3332,6 +4458,9 @@ func (m *CustomResourceConversion) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3346,6 +4475,9 @@ func (m *CustomResourceConversion) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3373,7 +4505,7 @@ func (m *CustomResourceDefinition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3401,7 +4533,7 @@ func (m *CustomResourceDefinition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3410,6 +4542,9 @@ func (m *CustomResourceDefinition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3431,7 +4566,7 @@ func (m *CustomResourceDefinition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3440,6 +4575,9 @@ func (m *CustomResourceDefinition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3461,7 +4599,7 @@ func (m *CustomResourceDefinition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3470,6 +4608,9 @@ func (m *CustomResourceDefinition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3486,6 +4627,9 @@ func (m *CustomResourceDefinition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3513,7 +4657,7 @@ func (m *CustomResourceDefinitionCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3541,7 +4685,7 @@ func (m *CustomResourceDefinitionCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3551,6 +4695,9 @@ func (m *CustomResourceDefinitionCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3570,7 +4717,7 @@ func (m *CustomResourceDefinitionCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3580,6 +4727,9 @@ func (m *CustomResourceDefinitionCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3599,7 +4749,7 @@ func (m *CustomResourceDefinitionCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3608,6 +4758,9 @@ func (m *CustomResourceDefinitionCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3629,7 +4782,7 @@ func (m *CustomResourceDefinitionCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3639,6 +4792,9 @@ func (m *CustomResourceDefinitionCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3658,7 +4814,7 @@ func (m *CustomResourceDefinitionCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3668,6 +4824,9 @@ func (m *CustomResourceDefinitionCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3682,6 +4841,9 @@ func (m *CustomResourceDefinitionCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3709,7 +4871,7 @@ func (m *CustomResourceDefinitionList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3737,7 +4899,7 @@ func (m *CustomResourceDefinitionList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3746,6 +4908,9 @@ func (m *CustomResourceDefinitionList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3767,7 +4932,7 @@ func (m *CustomResourceDefinitionList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3776,6 +4941,9 @@ func (m *CustomResourceDefinitionList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3790,7 +4958,10 @@ func (m *CustomResourceDefinitionList) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3820,7 +4991,7 @@ func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3848,7 +5019,7 @@ func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3858,6 +5029,9 @@ func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3877,7 +5051,7 @@ func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3887,6 +5061,9 @@ func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3906,7 +5083,7 @@ func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3916,6 +5093,9 @@ func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3935,7 +5115,7 @@ func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3945,6 +5125,9 @@ func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3964,7 +5147,7 @@ func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3974,6 +5157,9 @@ func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3993,7 +5179,7 @@ func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4003,6 +5189,9 @@ func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4017,6 +5206,9 @@ func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4044,7 +5236,7 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4072,7 +5264,7 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4082,6 +5274,9 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4101,7 +5296,7 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4111,6 +5306,9 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4130,7 +5328,7 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4139,6 +5337,9 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4160,7 +5361,7 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4170,6 +5371,9 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4189,7 +5393,7 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4198,6 +5402,9 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4222,7 +5429,7 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4231,6 +5438,9 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4255,7 +5465,7 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4264,6 +5474,9 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4286,7 +5499,7 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4295,6 +5508,9 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4317,7 +5533,7 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4326,6 +5542,9 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4350,7 +5569,7 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4366,6 +5585,9 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4393,7 +5615,7 @@ func (m *CustomResourceDefinitionStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4421,7 +5643,7 @@ func (m *CustomResourceDefinitionStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4430,6 +5652,9 @@ func (m *CustomResourceDefinitionStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4452,7 +5677,7 @@ func (m *CustomResourceDefinitionStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4461,6 +5686,9 @@ func (m *CustomResourceDefinitionStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4482,7 +5710,7 @@ func (m *CustomResourceDefinitionStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4492,6 +5720,9 @@ func (m *CustomResourceDefinitionStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4506,6 +5737,9 @@ func (m *CustomResourceDefinitionStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4533,7 +5767,7 @@ func (m *CustomResourceDefinitionVersion) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4561,7 +5795,7 @@ func (m *CustomResourceDefinitionVersion) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4571,6 +5805,9 @@ func (m *CustomResourceDefinitionVersion) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4590,7 +5827,7 @@ func (m *CustomResourceDefinitionVersion) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4610,7 +5847,7 @@ func (m *CustomResourceDefinitionVersion) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4630,7 +5867,7 @@ func (m *CustomResourceDefinitionVersion) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4639,6 +5876,9 @@ func (m *CustomResourceDefinitionVersion) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4663,7 +5903,7 @@ func (m *CustomResourceDefinitionVersion) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4672,6 +5912,9 @@ func (m *CustomResourceDefinitionVersion) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4696,7 +5939,7 @@ func (m *CustomResourceDefinitionVersion) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4705,6 +5948,9 @@ func (m *CustomResourceDefinitionVersion) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4722,6 +5968,9 @@ func (m *CustomResourceDefinitionVersion) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4749,7 +5998,7 @@ func (m *CustomResourceSubresourceScale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4777,7 +6026,7 @@ func (m *CustomResourceSubresourceScale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4787,6 +6036,9 @@ func (m *CustomResourceSubresourceScale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4806,7 +6058,7 @@ func (m *CustomResourceSubresourceScale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4816,6 +6068,9 @@ func (m *CustomResourceSubresourceScale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4835,7 +6090,7 @@ func (m *CustomResourceSubresourceScale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4845,6 +6100,9 @@ func (m *CustomResourceSubresourceScale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4860,6 +6118,9 @@ func (m *CustomResourceSubresourceScale) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4887,7 +6148,7 @@ func (m *CustomResourceSubresourceStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4910,6 +6171,9 @@ func (m *CustomResourceSubresourceStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4937,7 +6201,7 @@ func (m *CustomResourceSubresources) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4965,7 +6229,7 @@ func (m *CustomResourceSubresources) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4974,6 +6238,9 @@ func (m *CustomResourceSubresources) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4998,7 +6265,7 @@ func (m *CustomResourceSubresources) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5007,6 +6274,9 @@ func (m *CustomResourceSubresources) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5026,6 +6296,9 @@ func (m *CustomResourceSubresources) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5053,7 +6326,7 @@ func (m *CustomResourceValidation) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5081,7 +6354,7 @@ func (m *CustomResourceValidation) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5090,6 +6363,9 @@ func (m *CustomResourceValidation) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5109,6 +6385,9 @@ func (m *CustomResourceValidation) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5136,7 +6415,7 @@ func (m *ExternalDocumentation) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5164,7 +6443,7 @@ func (m *ExternalDocumentation) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5174,6 +6453,9 @@ func (m *ExternalDocumentation) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5193,7 +6475,7 @@ func (m *ExternalDocumentation) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5203,6 +6485,9 @@ func (m *ExternalDocumentation) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5217,6 +6502,9 @@ func (m *ExternalDocumentation) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5244,7 +6532,7 @@ func (m *JSON) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5272,7 +6560,7 @@ func (m *JSON) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5281,6 +6569,9 @@ func (m *JSON) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5298,6 +6589,9 @@ func (m *JSON) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5325,7 +6619,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5353,7 +6647,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5363,6 +6657,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5382,7 +6679,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5392,6 +6689,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5411,7 +6711,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5421,6 +6721,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5441,7 +6744,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5451,6 +6754,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5470,7 +6776,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5480,6 +6786,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5499,7 +6808,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5509,6 +6818,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5528,7 +6840,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5538,6 +6850,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5557,7 +6872,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5566,6 +6881,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5602,7 +6920,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5634,7 +6952,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5654,7 +6972,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -5674,7 +6992,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -5694,7 +7012,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5704,6 +7022,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5723,7 +7044,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -5743,7 +7064,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -5763,7 +7084,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5795,7 +7116,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5804,6 +7125,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5826,7 +7150,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -5846,7 +7170,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -5866,7 +7190,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5876,6 +7200,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5895,7 +7222,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5904,6 +7231,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5928,7 +7258,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5937,6 +7267,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5959,7 +7292,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5968,6 +7301,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5990,7 +7326,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5999,6 +7335,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6021,7 +7360,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6030,6 +7369,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6054,7 +7396,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6063,6 +7405,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6083,7 +7428,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6100,7 +7445,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6110,6 +7455,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -6126,7 +7474,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6135,7 +7483,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { @@ -6177,7 +7525,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6186,6 +7534,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6210,7 +7561,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6219,6 +7570,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6239,7 +7593,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6256,7 +7610,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6266,6 +7620,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -6282,7 +7639,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6291,7 +7648,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { @@ -6333,7 +7690,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6342,6 +7699,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6362,7 +7722,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6379,7 +7739,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6389,6 +7749,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -6405,7 +7768,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6414,7 +7777,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { @@ -6456,7 +7819,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6465,6 +7828,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6489,7 +7855,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6498,6 +7864,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6518,7 +7887,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6535,7 +7904,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6545,6 +7914,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -6561,7 +7933,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6570,7 +7942,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { @@ -6612,7 +7984,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6621,6 +7993,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6645,7 +8020,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6654,6 +8029,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6678,7 +8056,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6698,7 +8076,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6719,7 +8097,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6739,12 +8117,77 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } m.XIntOrString = bool(v != 0) + case 41: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field XListMapKeys", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.XListMapKeys = append(m.XListMapKeys, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 42: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field XListType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.XListType = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -6754,6 +8197,9 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6781,7 +8227,7 @@ func (m *JSONSchemaPropsOrArray) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6809,7 +8255,7 @@ func (m *JSONSchemaPropsOrArray) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6818,6 +8264,9 @@ func (m *JSONSchemaPropsOrArray) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6842,7 +8291,7 @@ func (m *JSONSchemaPropsOrArray) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6851,6 +8300,9 @@ func (m *JSONSchemaPropsOrArray) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6868,6 +8320,9 @@ func (m *JSONSchemaPropsOrArray) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6895,7 +8350,7 @@ func (m *JSONSchemaPropsOrBool) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6923,7 +8378,7 @@ func (m *JSONSchemaPropsOrBool) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6943,7 +8398,7 @@ func (m *JSONSchemaPropsOrBool) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6952,6 +8407,9 @@ func (m *JSONSchemaPropsOrBool) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6971,6 +8429,9 @@ func (m *JSONSchemaPropsOrBool) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6998,7 +8459,7 @@ func (m *JSONSchemaPropsOrStringArray) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7026,7 +8487,7 @@ func (m *JSONSchemaPropsOrStringArray) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7035,6 +8496,9 @@ func (m *JSONSchemaPropsOrStringArray) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7059,7 +8523,7 @@ func (m *JSONSchemaPropsOrStringArray) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7069,6 +8533,9 @@ func (m *JSONSchemaPropsOrStringArray) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7083,6 +8550,9 @@ func (m *JSONSchemaPropsOrStringArray) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7110,7 +8580,7 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7138,7 +8608,7 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7148,6 +8618,9 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7167,7 +8640,7 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7177,6 +8650,9 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7196,7 +8672,7 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7206,6 +8682,9 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7226,7 +8705,7 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -7241,6 +8720,9 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7268,7 +8750,7 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7296,7 +8778,7 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7305,6 +8787,9 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7329,7 +8814,7 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7338,6 +8823,9 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7360,7 +8848,7 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7370,6 +8858,9 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7385,6 +8876,9 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7451,10 +8945,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -7483,6 +8980,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -7501,193 +9001,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 2908 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x5a, 0xcd, 0x73, 0x23, 0x47, - 0x15, 0xdf, 0x91, 0x2c, 0x5b, 0x6e, 0xdb, 0x6b, 0xbb, 0x77, 0xed, 0xcc, 0x3a, 0x1b, 0xc9, 0xab, - 0x90, 0x60, 0xc2, 0xae, 0x9c, 0x2c, 0x09, 0x09, 0xa9, 0xe2, 0x60, 0xd9, 0x4e, 0xca, 0xc9, 0xda, - 0x32, 0xad, 0xdd, 0x64, 0x21, 0x9f, 0x6d, 0x4d, 0x4b, 0x9e, 0xf5, 0x7c, 0xed, 0xf4, 0x8c, 0x6c, - 0x57, 0x80, 0xe2, 0xa3, 0x52, 0x50, 0x14, 0x10, 0x8a, 0xe4, 0x42, 0x15, 0x1c, 0x02, 0xc5, 0x85, - 0x03, 0x1c, 0xe0, 0x06, 0x7f, 0x40, 0x8e, 0x29, 0x4e, 0x39, 0x50, 0x2a, 0x56, 0xb9, 0xc2, 0x8d, - 0x2a, 0xaa, 0x7c, 0xa2, 0xfa, 0x63, 0x7a, 0x46, 0x23, 0x69, 0xd7, 0x95, 0x95, 0xb2, 0xdc, 0xac, - 0xf7, 0xf5, 0x7b, 0xfd, 0xfa, 0xf5, 0xeb, 0xd7, 0x6f, 0x0c, 0x1a, 0x07, 0xcf, 0xd1, 0xb2, 0xe9, - 0xae, 0x1e, 0x84, 0x7b, 0xc4, 0x77, 0x48, 0x40, 0xe8, 0x6a, 0x8b, 0x38, 0x86, 0xeb, 0xaf, 0x4a, - 0x06, 0xf6, 0x4c, 0x72, 0x14, 0x10, 0x87, 0x9a, 0xae, 0x43, 0xaf, 0x60, 0xcf, 0xa4, 0xc4, 0x6f, - 0x11, 0x7f, 0xd5, 0x3b, 0x68, 0x32, 0x1e, 0xed, 0x16, 0x58, 0x6d, 0x3d, 0xb5, 0x47, 0x02, 0xfc, - 0xd4, 0x6a, 0x93, 0x38, 0xc4, 0xc7, 0x01, 0x31, 0xca, 0x9e, 0xef, 0x06, 0x2e, 0xfc, 0xba, 0x30, - 0x57, 0xee, 0x92, 0x7e, 0x4b, 0x99, 0x2b, 0x7b, 0x07, 0x4d, 0xc6, 0xa3, 0xdd, 0x02, 0x65, 0x69, - 0x6e, 0xe9, 0x4a, 0xd3, 0x0c, 0xf6, 0xc3, 0xbd, 0x72, 0xdd, 0xb5, 0x57, 0x9b, 0x6e, 0xd3, 0x5d, - 0xe5, 0x56, 0xf7, 0xc2, 0x06, 0xff, 0xc5, 0x7f, 0xf0, 0xbf, 0x04, 0xda, 0xd2, 0xd3, 0xb1, 0xf3, - 0x36, 0xae, 0xef, 0x9b, 0x0e, 0xf1, 0x8f, 0x63, 0x8f, 0x6d, 0x12, 0xe0, 0xd5, 0x56, 0x8f, 0x8f, - 0x4b, 0xab, 0x83, 0xb4, 0xfc, 0xd0, 0x09, 0x4c, 0x9b, 0xf4, 0x28, 0x7c, 0xf5, 0x5e, 0x0a, 0xb4, - 0xbe, 0x4f, 0x6c, 0x9c, 0xd6, 0x2b, 0x9d, 0x68, 0x60, 0x7e, 0xdd, 0x75, 0x5a, 0xc4, 0x67, 0xab, - 0x44, 0xe4, 0x76, 0x48, 0x68, 0x00, 0x2b, 0x20, 0x1b, 0x9a, 0x86, 0xae, 0x2d, 0x6b, 0x2b, 0x93, - 0x95, 0x27, 0x3f, 0x6a, 0x17, 0xcf, 0x74, 0xda, 0xc5, 0xec, 0x8d, 0xad, 0x8d, 0x93, 0x76, 0xf1, - 0xd2, 0x20, 0xa4, 0xe0, 0xd8, 0x23, 0xb4, 0x7c, 0x63, 0x6b, 0x03, 0x31, 0x65, 0xf8, 0x22, 0x98, - 0x37, 0x08, 0x35, 0x7d, 0x62, 0xac, 0xed, 0x6e, 0xbd, 0x22, 0xec, 0xeb, 0x19, 0x6e, 0xf1, 0x82, - 0xb4, 0x38, 0xbf, 0x91, 0x16, 0x40, 0xbd, 0x3a, 0xf0, 0x26, 0x98, 0x70, 0xf7, 0x6e, 0x91, 0x7a, - 0x40, 0xf5, 0xec, 0x72, 0x76, 0x65, 0xea, 0xea, 0x95, 0x72, 0xbc, 0x83, 0xca, 0x05, 0xbe, 0x6d, - 0x72, 0xb1, 0x65, 0x84, 0x0f, 0x37, 0xa3, 0x9d, 0xab, 0xcc, 0x4a, 0xb4, 0x89, 0xaa, 0xb0, 0x82, - 0x22, 0x73, 0xa5, 0xdf, 0x65, 0x00, 0x4c, 0x2e, 0x9e, 0x7a, 0xae, 0x43, 0xc9, 0x50, 0x56, 0x4f, - 0xc1, 0x5c, 0x9d, 0x5b, 0x0e, 0x88, 0x21, 0x71, 0xf5, 0xcc, 0x67, 0xf1, 0x5e, 0x97, 0xf8, 0x73, - 0xeb, 0x29, 0x73, 0xa8, 0x07, 0x00, 0x5e, 0x07, 0xe3, 0x3e, 0xa1, 0xa1, 0x15, 0xe8, 0xd9, 0x65, - 0x6d, 0x65, 0xea, 0xea, 0xe5, 0x81, 0x50, 0x3c, 0xbf, 0x59, 0xf2, 0x95, 0x5b, 0x4f, 0x95, 0x6b, - 0x01, 0x0e, 0x42, 0x5a, 0x39, 0x2b, 0x91, 0xc6, 0x11, 0xb7, 0x81, 0xa4, 0xad, 0xd2, 0x8f, 0x33, - 0x60, 0x2e, 0x19, 0xa5, 0x96, 0x49, 0x0e, 0xe1, 0x21, 0x98, 0xf0, 0x45, 0xb2, 0xf0, 0x38, 0x4d, - 0x5d, 0xdd, 0x2d, 0xdf, 0xd7, 0xb1, 0x2a, 0xf7, 0x24, 0x61, 0x65, 0x8a, 0xed, 0x99, 0xfc, 0x81, - 0x22, 0x34, 0xf8, 0x0e, 0xc8, 0xfb, 0x72, 0xa3, 0x78, 0x36, 0x4d, 0x5d, 0xfd, 0xc6, 0x10, 0x91, - 0x85, 0xe1, 0xca, 0x74, 0xa7, 0x5d, 0xcc, 0x47, 0xbf, 0x90, 0x02, 0x2c, 0xbd, 0x9f, 0x01, 0x85, - 0xf5, 0x90, 0x06, 0xae, 0x8d, 0x08, 0x75, 0x43, 0xbf, 0x4e, 0xd6, 0x5d, 0x2b, 0xb4, 0x9d, 0x0d, - 0xd2, 0x30, 0x1d, 0x33, 0x60, 0xd9, 0xba, 0x0c, 0xc6, 0x1c, 0x6c, 0x13, 0x99, 0x3d, 0xd3, 0x32, - 0xa6, 0x63, 0x3b, 0xd8, 0x26, 0x88, 0x73, 0x98, 0x04, 0x4b, 0x16, 0x79, 0x16, 0x94, 0xc4, 0xf5, - 0x63, 0x8f, 0x20, 0xce, 0x81, 0x8f, 0x83, 0xf1, 0x86, 0xeb, 0xdb, 0x58, 0xec, 0xe3, 0x64, 0xbc, - 0x33, 0x2f, 0x70, 0x2a, 0x92, 0x5c, 0xf8, 0x0c, 0x98, 0x32, 0x08, 0xad, 0xfb, 0xa6, 0xc7, 0xa0, - 0xf5, 0x31, 0x2e, 0x7c, 0x4e, 0x0a, 0x4f, 0x6d, 0xc4, 0x2c, 0x94, 0x94, 0x83, 0x97, 0x41, 0xde, - 0xf3, 0x4d, 0xd7, 0x37, 0x83, 0x63, 0x3d, 0xb7, 0xac, 0xad, 0xe4, 0x2a, 0x73, 0x52, 0x27, 0xbf, - 0x2b, 0xe9, 0x48, 0x49, 0xc0, 0x65, 0x90, 0x7f, 0xa9, 0x56, 0xdd, 0xd9, 0xc5, 0xc1, 0xbe, 0x3e, - 0xce, 0x11, 0xc6, 0x98, 0x34, 0xca, 0xdf, 0x92, 0xd4, 0xd2, 0x3f, 0x32, 0x40, 0x4f, 0x47, 0x25, - 0x0a, 0x29, 0x7c, 0x01, 0xe4, 0x69, 0xc0, 0x2a, 0x4e, 0xf3, 0x58, 0xc6, 0xe4, 0x89, 0x08, 0xac, - 0x26, 0xe9, 0x27, 0xed, 0xe2, 0x62, 0xac, 0x11, 0x51, 0x79, 0x3c, 0x94, 0x2e, 0xfc, 0x8d, 0x06, - 0xce, 0x1d, 0x92, 0xbd, 0x7d, 0xd7, 0x3d, 0x58, 0xb7, 0x4c, 0xe2, 0x04, 0xeb, 0xae, 0xd3, 0x30, - 0x9b, 0x32, 0x07, 0xd0, 0x7d, 0xe6, 0xc0, 0xab, 0xbd, 0x96, 0x2b, 0x0f, 0x75, 0xda, 0xc5, 0x73, - 0x7d, 0x18, 0xa8, 0x9f, 0x1f, 0xf0, 0x26, 0xd0, 0xeb, 0xa9, 0x43, 0x22, 0x0b, 0x98, 0x28, 0x5b, - 0x93, 0x95, 0x8b, 0x9d, 0x76, 0x51, 0x5f, 0x1f, 0x20, 0x83, 0x06, 0x6a, 0x97, 0x7e, 0x98, 0x4d, - 0x87, 0x37, 0x91, 0x6e, 0x6f, 0x83, 0x3c, 0x3b, 0xc6, 0x06, 0x0e, 0xb0, 0x3c, 0x88, 0x4f, 0x9e, - 0xee, 0xd0, 0x8b, 0x9a, 0xb1, 0x4d, 0x02, 0x5c, 0x81, 0x72, 0x43, 0x40, 0x4c, 0x43, 0xca, 0x2a, - 0xfc, 0x0e, 0x18, 0xa3, 0x1e, 0xa9, 0xcb, 0x40, 0xbf, 0x76, 0xbf, 0x87, 0x6d, 0xc0, 0x42, 0x6a, - 0x1e, 0xa9, 0xc7, 0x67, 0x81, 0xfd, 0x42, 0x1c, 0x16, 0xbe, 0xab, 0x81, 0x71, 0xca, 0x0b, 0x94, - 0x2c, 0x6a, 0x6f, 0x8c, 0xca, 0x83, 0x54, 0x15, 0x14, 0xbf, 0x91, 0x04, 0x2f, 0xfd, 0x27, 0x03, - 0x2e, 0x0d, 0x52, 0x5d, 0x77, 0x1d, 0x43, 0x6c, 0xc7, 0x96, 0x3c, 0xdb, 0x22, 0xd3, 0x9f, 0x49, - 0x9e, 0xed, 0x93, 0x76, 0xf1, 0xb1, 0x7b, 0x1a, 0x48, 0x14, 0x81, 0xaf, 0xa9, 0x75, 0x8b, 0x42, - 0x71, 0xa9, 0xdb, 0xb1, 0x93, 0x76, 0x71, 0x56, 0xa9, 0x75, 0xfb, 0x0a, 0x5b, 0x00, 0x5a, 0x98, - 0x06, 0xd7, 0x7d, 0xec, 0x50, 0x61, 0xd6, 0xb4, 0x89, 0x0c, 0xdf, 0x13, 0xa7, 0x4b, 0x0f, 0xa6, - 0x51, 0x59, 0x92, 0x90, 0xf0, 0x5a, 0x8f, 0x35, 0xd4, 0x07, 0x81, 0xd5, 0x2d, 0x9f, 0x60, 0xaa, - 0x4a, 0x51, 0xe2, 0x46, 0x61, 0x54, 0x24, 0xb9, 0xf0, 0x4b, 0x60, 0xc2, 0x26, 0x94, 0xe2, 0x26, - 0xe1, 0xf5, 0x67, 0x32, 0xbe, 0xa2, 0xb7, 0x05, 0x19, 0x45, 0x7c, 0xd6, 0x9f, 0x5c, 0x1c, 0x14, - 0xb5, 0x6b, 0x26, 0x0d, 0xe0, 0xeb, 0x3d, 0x07, 0xa0, 0x7c, 0xba, 0x15, 0x32, 0x6d, 0x9e, 0xfe, - 0xaa, 0xf8, 0x45, 0x94, 0x44, 0xf2, 0x7f, 0x1b, 0xe4, 0xcc, 0x80, 0xd8, 0xd1, 0xdd, 0xfd, 0xea, - 0x88, 0x72, 0xaf, 0x32, 0x23, 0x7d, 0xc8, 0x6d, 0x31, 0x34, 0x24, 0x40, 0x4b, 0xbf, 0xcf, 0x80, - 0x47, 0x06, 0xa9, 0xb0, 0x0b, 0x85, 0xb2, 0x88, 0x7b, 0x56, 0xe8, 0x63, 0x4b, 0x66, 0x9c, 0x8a, - 0xf8, 0x2e, 0xa7, 0x22, 0xc9, 0x65, 0x25, 0x9f, 0x9a, 0x4e, 0x33, 0xb4, 0xb0, 0x2f, 0xd3, 0x49, - 0xad, 0xba, 0x26, 0xe9, 0x48, 0x49, 0xc0, 0x32, 0x00, 0x74, 0xdf, 0xf5, 0x03, 0x8e, 0x21, 0xab, - 0xd7, 0x59, 0x56, 0x20, 0x6a, 0x8a, 0x8a, 0x12, 0x12, 0xec, 0x46, 0x3b, 0x30, 0x1d, 0x43, 0xee, - 0xba, 0x3a, 0xc5, 0x2f, 0x9b, 0x8e, 0x81, 0x38, 0x87, 0xe1, 0x5b, 0x26, 0x0d, 0x18, 0x45, 0x6e, - 0x79, 0x57, 0xd4, 0xb9, 0xa4, 0x92, 0x60, 0xf8, 0x75, 0x56, 0xf5, 0x5d, 0xdf, 0x24, 0x54, 0x1f, - 0x8f, 0xf1, 0xd7, 0x15, 0x15, 0x25, 0x24, 0x4a, 0xff, 0xca, 0x0f, 0x4e, 0x12, 0x56, 0x4a, 0xe0, - 0xa3, 0x20, 0xd7, 0xf4, 0xdd, 0xd0, 0x93, 0x51, 0x52, 0xd1, 0x7e, 0x91, 0x11, 0x91, 0xe0, 0xb1, - 0xac, 0x6c, 0x75, 0xb5, 0xa9, 0x2a, 0x2b, 0xa3, 0xe6, 0x34, 0xe2, 0xc3, 0xef, 0x6b, 0x20, 0xe7, - 0xc8, 0xe0, 0xb0, 0x94, 0x7b, 0x7d, 0x44, 0x79, 0xc1, 0xc3, 0x1b, 0xbb, 0x2b, 0x22, 0x2f, 0x90, - 0xe1, 0xd3, 0x20, 0x47, 0xeb, 0xae, 0x47, 0x64, 0xd4, 0x0b, 0x91, 0x50, 0x8d, 0x11, 0x4f, 0xda, - 0xc5, 0x99, 0xc8, 0x1c, 0x27, 0x20, 0x21, 0x0c, 0x7f, 0xa4, 0x01, 0xd0, 0xc2, 0x96, 0x69, 0x60, - 0xde, 0x32, 0xe4, 0xb8, 0xfb, 0xc3, 0x4d, 0xeb, 0x57, 0x94, 0x79, 0xb1, 0x69, 0xf1, 0x6f, 0x94, - 0x80, 0x86, 0xef, 0x69, 0x60, 0x9a, 0x86, 0x7b, 0xbe, 0xd4, 0xa2, 0xbc, 0xb9, 0x98, 0xba, 0xfa, - 0xcd, 0xa1, 0xfa, 0x52, 0x4b, 0x00, 0x54, 0xe6, 0x3a, 0xed, 0xe2, 0x74, 0x92, 0x82, 0xba, 0x1c, - 0x80, 0x3f, 0xd5, 0x40, 0xbe, 0x15, 0xdd, 0xd9, 0x13, 0xfc, 0xc0, 0xbf, 0x39, 0xa2, 0x8d, 0x95, - 0x19, 0x15, 0x9f, 0x02, 0xd5, 0x07, 0x28, 0x0f, 0xe0, 0x5f, 0x35, 0xa0, 0x63, 0x43, 0x14, 0x78, - 0x6c, 0xed, 0xfa, 0xa6, 0x13, 0x10, 0x5f, 0xf4, 0x9b, 0x54, 0xcf, 0x73, 0xf7, 0x86, 0x7b, 0x17, - 0xa6, 0x7b, 0xd9, 0xca, 0xb2, 0xf4, 0x4e, 0x5f, 0x1b, 0xe0, 0x06, 0x1a, 0xe8, 0x20, 0x4f, 0xb4, - 0xb8, 0xa5, 0xd1, 0x27, 0x47, 0x90, 0x68, 0x71, 0x2f, 0x25, 0xab, 0x43, 0xdc, 0x41, 0x25, 0xa0, - 0x61, 0x15, 0x2c, 0x78, 0x3e, 0xe1, 0x00, 0x37, 0x9c, 0x03, 0xc7, 0x3d, 0x74, 0x5e, 0x30, 0x89, - 0x65, 0x50, 0x1d, 0x2c, 0x6b, 0x2b, 0xf9, 0xca, 0x85, 0x4e, 0xbb, 0xb8, 0xb0, 0xdb, 0x4f, 0x00, - 0xf5, 0xd7, 0x2b, 0xbd, 0x97, 0x4d, 0xbf, 0x02, 0xd2, 0x5d, 0x04, 0xfc, 0x40, 0xac, 0x5e, 0xc4, - 0x86, 0xea, 0x1a, 0xdf, 0xad, 0xb7, 0x47, 0x94, 0x4c, 0xaa, 0x0d, 0x88, 0x3b, 0x39, 0x45, 0xa2, - 0x28, 0xe1, 0x07, 0xfc, 0x95, 0x06, 0x66, 0x70, 0xbd, 0x4e, 0xbc, 0x80, 0x18, 0xa2, 0xb8, 0x67, - 0x3e, 0x87, 0xfa, 0xb5, 0x20, 0xbd, 0x9a, 0x59, 0x4b, 0x42, 0xa3, 0x6e, 0x4f, 0xe0, 0xf3, 0xe0, - 0x2c, 0x0d, 0x5c, 0x9f, 0x18, 0xa9, 0xb6, 0x19, 0x76, 0xda, 0xc5, 0xb3, 0xb5, 0x2e, 0x0e, 0x4a, - 0x49, 0x96, 0x3e, 0x1d, 0x03, 0xc5, 0x7b, 0x1c, 0xb5, 0x53, 0x3c, 0xcc, 0x1e, 0x07, 0xe3, 0x7c, - 0xb9, 0x06, 0x8f, 0x4a, 0x3e, 0xd1, 0x0a, 0x72, 0x2a, 0x92, 0x5c, 0x76, 0x51, 0x30, 0x7c, 0xd6, - 0xbe, 0x64, 0xb9, 0xa0, 0xba, 0x28, 0x6a, 0x82, 0x8c, 0x22, 0x3e, 0x7c, 0x07, 0x8c, 0x8b, 0xc1, - 0x0b, 0xaf, 0xd2, 0x23, 0xac, 0xb4, 0x80, 0xfb, 0xc9, 0xa1, 0x90, 0x84, 0xec, 0xad, 0xb0, 0xb9, - 0x07, 0x5d, 0x61, 0xef, 0x5a, 0xd2, 0xc6, 0xff, 0xcf, 0x4b, 0x5a, 0xe9, 0xbf, 0x5a, 0xfa, 0xdc, - 0x27, 0x96, 0x5a, 0xab, 0x63, 0x8b, 0xc0, 0x0d, 0x30, 0xc7, 0x5e, 0x2d, 0x88, 0x78, 0x96, 0x59, - 0xc7, 0x94, 0x3f, 0x9a, 0x45, 0xc2, 0xa9, 0x39, 0x4e, 0x2d, 0xc5, 0x47, 0x3d, 0x1a, 0xf0, 0x25, - 0x00, 0x45, 0x27, 0xdf, 0x65, 0x47, 0x34, 0x25, 0xaa, 0x27, 0xaf, 0xf5, 0x48, 0xa0, 0x3e, 0x5a, - 0x70, 0x1d, 0xcc, 0x5b, 0x78, 0x8f, 0x58, 0x35, 0x62, 0x91, 0x7a, 0xe0, 0xfa, 0xdc, 0x94, 0x18, - 0x2b, 0x2c, 0x74, 0xda, 0xc5, 0xf9, 0x6b, 0x69, 0x26, 0xea, 0x95, 0x2f, 0x5d, 0x4a, 0x1f, 0xaf, - 0xe4, 0xc2, 0xc5, 0xfb, 0xe8, 0xc3, 0x0c, 0x58, 0x1a, 0x9c, 0x19, 0xf0, 0x07, 0xf1, 0x33, 0x4e, - 0x74, 0xe9, 0x6f, 0x8e, 0x2a, 0x0b, 0xe5, 0x3b, 0x0e, 0xf4, 0xbe, 0xe1, 0xe0, 0x77, 0x59, 0xcb, - 0x84, 0xad, 0x68, 0x70, 0xf4, 0xc6, 0xc8, 0x5c, 0x60, 0x20, 0x95, 0x49, 0xd1, 0x8d, 0x61, 0x8b, - 0x37, 0x5f, 0xd8, 0x22, 0xa5, 0x3f, 0x68, 0xe9, 0x97, 0x7c, 0x7c, 0x82, 0xe1, 0xcf, 0x34, 0x30, - 0xeb, 0x7a, 0xc4, 0x59, 0xdb, 0xdd, 0x7a, 0xe5, 0x2b, 0xe2, 0x24, 0xcb, 0x50, 0xed, 0xdc, 0xa7, - 0x9f, 0x2f, 0xd5, 0xaa, 0x3b, 0xc2, 0xe0, 0xae, 0xef, 0x7a, 0xb4, 0x72, 0xae, 0xd3, 0x2e, 0xce, - 0x56, 0xbb, 0xa1, 0x50, 0x1a, 0xbb, 0x64, 0x83, 0x85, 0xcd, 0xa3, 0x80, 0xf8, 0x0e, 0xb6, 0x36, - 0xdc, 0x7a, 0x68, 0x13, 0x27, 0x10, 0x8e, 0xa6, 0xa6, 0x4e, 0xda, 0x29, 0xa7, 0x4e, 0x8f, 0x80, - 0x6c, 0xe8, 0x5b, 0x32, 0x8b, 0xa7, 0xd4, 0x54, 0x15, 0x5d, 0x43, 0x8c, 0x5e, 0xba, 0x04, 0xc6, - 0x98, 0x9f, 0xf0, 0x02, 0xc8, 0xfa, 0xf8, 0x90, 0x5b, 0x9d, 0xae, 0x4c, 0x30, 0x11, 0x84, 0x0f, - 0x11, 0xa3, 0x95, 0xfe, 0x5d, 0x00, 0xb3, 0xa9, 0xb5, 0xc0, 0x25, 0x90, 0x51, 0xa3, 0x5a, 0x20, - 0x8d, 0x66, 0xb6, 0x36, 0x50, 0xc6, 0x34, 0xe0, 0xb3, 0xaa, 0xf8, 0x0a, 0xd0, 0xa2, 0xaa, 0xe7, - 0x9c, 0xca, 0x7a, 0xe4, 0xd8, 0x1c, 0x73, 0x24, 0x2a, 0x9c, 0xcc, 0x07, 0xd2, 0x90, 0xa7, 0x44, - 0xf8, 0x40, 0x1a, 0x88, 0xd1, 0x3e, 0xeb, 0xc8, 0x2d, 0x9a, 0xf9, 0xe5, 0x4e, 0x31, 0xf3, 0x1b, - 0xbf, 0xeb, 0xcc, 0xef, 0x51, 0x90, 0x0b, 0xcc, 0xc0, 0x22, 0xfa, 0x44, 0xf7, 0x53, 0xe6, 0x3a, - 0x23, 0x22, 0xc1, 0x83, 0xb7, 0xc0, 0x84, 0x41, 0x1a, 0x38, 0xb4, 0x02, 0x3d, 0xcf, 0x53, 0x68, - 0x7d, 0x08, 0x29, 0x24, 0x06, 0xb2, 0x1b, 0xc2, 0x2e, 0x8a, 0x00, 0xe0, 0x63, 0x60, 0xc2, 0xc6, - 0x47, 0xa6, 0x1d, 0xda, 0xbc, 0xc9, 0xd3, 0x84, 0xd8, 0xb6, 0x20, 0xa1, 0x88, 0xc7, 0x2a, 0x23, - 0x39, 0xaa, 0x5b, 0x21, 0x35, 0x5b, 0x44, 0x32, 0x65, 0x03, 0xa6, 0x2a, 0xe3, 0x66, 0x8a, 0x8f, - 0x7a, 0x34, 0x38, 0x98, 0xe9, 0x70, 0xe5, 0xa9, 0x04, 0x98, 0x20, 0xa1, 0x88, 0xd7, 0x0d, 0x26, - 0xe5, 0xa7, 0x07, 0x81, 0x49, 0xe5, 0x1e, 0x0d, 0xf8, 0x65, 0x30, 0x69, 0xe3, 0xa3, 0x6b, 0xc4, - 0x69, 0x06, 0xfb, 0xfa, 0xcc, 0xb2, 0xb6, 0x92, 0xad, 0xcc, 0x74, 0xda, 0xc5, 0xc9, 0xed, 0x88, - 0x88, 0x62, 0x3e, 0x17, 0x36, 0x1d, 0x29, 0x7c, 0x36, 0x21, 0x1c, 0x11, 0x51, 0xcc, 0x67, 0x1d, - 0x84, 0x87, 0x03, 0x76, 0xb8, 0xf4, 0xd9, 0xee, 0xa7, 0xe6, 0xae, 0x20, 0xa3, 0x88, 0x0f, 0x57, - 0x40, 0xde, 0xc6, 0x47, 0x7c, 0x2c, 0xa0, 0xcf, 0x71, 0xb3, 0x7c, 0x38, 0xbd, 0x2d, 0x69, 0x48, - 0x71, 0xb9, 0xa4, 0xe9, 0x08, 0xc9, 0xf9, 0x84, 0xa4, 0xa4, 0x21, 0xc5, 0x65, 0x49, 0x1c, 0x3a, - 0xe6, 0xed, 0x90, 0x08, 0x61, 0xc8, 0x23, 0xa3, 0x92, 0xf8, 0x46, 0xcc, 0x42, 0x49, 0x39, 0xf6, - 0x2c, 0xb7, 0x43, 0x2b, 0x30, 0x3d, 0x8b, 0x54, 0x1b, 0xfa, 0x39, 0x1e, 0x7f, 0xde, 0x78, 0x6f, - 0x2b, 0x2a, 0x4a, 0x48, 0x40, 0x02, 0xc6, 0x88, 0x13, 0xda, 0xfa, 0x79, 0x7e, 0xb1, 0x0f, 0x25, - 0x05, 0xd5, 0xc9, 0xd9, 0x74, 0x42, 0x1b, 0x71, 0xf3, 0xf0, 0x59, 0x30, 0x63, 0xe3, 0x23, 0x56, - 0x0e, 0x88, 0x1f, 0x98, 0x84, 0xea, 0x0b, 0x7c, 0xf1, 0xf3, 0xac, 0xe3, 0xdc, 0x4e, 0x32, 0x50, - 0xb7, 0x1c, 0x57, 0x34, 0x9d, 0x84, 0xe2, 0x62, 0x42, 0x31, 0xc9, 0x40, 0xdd, 0x72, 0x2c, 0xd2, - 0x3e, 0xb9, 0x1d, 0x9a, 0x3e, 0x31, 0xf4, 0x87, 0x78, 0x93, 0x2a, 0x3f, 0x18, 0x08, 0x1a, 0x52, - 0x5c, 0xd8, 0x8a, 0xe6, 0x47, 0x3a, 0x3f, 0x86, 0x37, 0x86, 0x5b, 0xc9, 0xab, 0xfe, 0x9a, 0xef, - 0xe3, 0x63, 0x71, 0xd3, 0x24, 0x27, 0x47, 0x90, 0x82, 0x1c, 0xb6, 0xac, 0x6a, 0x43, 0xbf, 0xc0, - 0x63, 0x3f, 0xec, 0x1b, 0x44, 0x55, 0x9d, 0x35, 0x06, 0x82, 0x04, 0x16, 0x03, 0x75, 0x1d, 0x96, - 0x1a, 0x4b, 0xa3, 0x05, 0xad, 0x32, 0x10, 0x24, 0xb0, 0xf8, 0x4a, 0x9d, 0xe3, 0x6a, 0x43, 0x7f, - 0x78, 0xc4, 0x2b, 0x65, 0x20, 0x48, 0x60, 0x41, 0x13, 0x64, 0x1d, 0x37, 0xd0, 0x2f, 0x8e, 0xe4, - 0x7a, 0xe6, 0x17, 0xce, 0x8e, 0x1b, 0x20, 0x86, 0x01, 0x7f, 0xa9, 0x01, 0xe0, 0xc5, 0x29, 0xfa, - 0xc8, 0x50, 0xc6, 0x12, 0x29, 0xc8, 0x72, 0x9c, 0xdb, 0x9b, 0x4e, 0xe0, 0x1f, 0xc7, 0xef, 0xc8, - 0xc4, 0x19, 0x48, 0x78, 0x01, 0x7f, 0xab, 0x81, 0xf3, 0xc9, 0x36, 0x59, 0xb9, 0x57, 0xe0, 0x11, - 0xb9, 0x3e, 0xec, 0x34, 0xaf, 0xb8, 0xae, 0x55, 0xd1, 0x3b, 0xed, 0xe2, 0xf9, 0xb5, 0x3e, 0xa8, - 0xa8, 0xaf, 0x2f, 0xf0, 0x8f, 0x1a, 0x98, 0x97, 0x55, 0x34, 0xe1, 0x61, 0x91, 0x07, 0x90, 0x0c, - 0x3b, 0x80, 0x69, 0x1c, 0x11, 0x47, 0xf5, 0xa1, 0xbb, 0x87, 0x8f, 0x7a, 0x5d, 0x83, 0x7f, 0xd1, - 0xc0, 0xb4, 0x41, 0x3c, 0xe2, 0x18, 0xc4, 0xa9, 0x33, 0x5f, 0x97, 0x87, 0x32, 0x36, 0x48, 0xfb, - 0xba, 0x91, 0x80, 0x10, 0x6e, 0x96, 0xa5, 0x9b, 0xd3, 0x49, 0xd6, 0x49, 0xbb, 0xb8, 0x18, 0xab, - 0x26, 0x39, 0xa8, 0xcb, 0x4b, 0xf8, 0xbe, 0x06, 0x66, 0xe3, 0x0d, 0x10, 0x57, 0xca, 0xa5, 0x11, - 0xe6, 0x01, 0x6f, 0x5f, 0xd7, 0xba, 0x01, 0x51, 0xda, 0x03, 0xf8, 0x27, 0x8d, 0x75, 0x6a, 0xd1, - 0xbb, 0x8f, 0xea, 0x25, 0x1e, 0xcb, 0xb7, 0x86, 0x1e, 0x4b, 0x85, 0x20, 0x42, 0x79, 0x39, 0x6e, - 0x05, 0x15, 0xe7, 0xa4, 0x5d, 0x5c, 0x48, 0x46, 0x52, 0x31, 0x50, 0xd2, 0x43, 0xf8, 0x13, 0x0d, - 0x4c, 0x93, 0xb8, 0xe3, 0xa6, 0xfa, 0xa3, 0x43, 0x09, 0x62, 0xdf, 0x26, 0x5e, 0xbc, 0xd4, 0x13, - 0x2c, 0x8a, 0xba, 0xb0, 0x59, 0x07, 0x49, 0x8e, 0xb0, 0xed, 0x59, 0x44, 0xff, 0xc2, 0x90, 0x3b, - 0xc8, 0x4d, 0x61, 0x17, 0x45, 0x00, 0xf0, 0x32, 0xc8, 0x3b, 0xa1, 0x65, 0xe1, 0x3d, 0x8b, 0xe8, - 0x8f, 0xf1, 0x5e, 0x44, 0x8d, 0x45, 0x77, 0x24, 0x1d, 0x29, 0x09, 0xd8, 0x00, 0xcb, 0x47, 0x2f, - 0xab, 0x7f, 0x11, 0xea, 0x3b, 0xb8, 0xd3, 0x1f, 0xe7, 0x56, 0x96, 0x3a, 0xed, 0xe2, 0xe2, 0xcd, - 0xfe, 0xa3, 0xbd, 0x7b, 0xda, 0x80, 0xaf, 0x81, 0x87, 0x13, 0x32, 0x9b, 0xf6, 0x1e, 0x31, 0x0c, - 0x62, 0x44, 0x0f, 0x37, 0xfd, 0x8b, 0x62, 0x78, 0x18, 0x1d, 0xf0, 0x9b, 0x69, 0x01, 0x74, 0x37, - 0x6d, 0x78, 0x0d, 0x2c, 0x26, 0xd8, 0x5b, 0x4e, 0x50, 0xf5, 0x6b, 0x81, 0x6f, 0x3a, 0x4d, 0x7d, - 0x85, 0xdb, 0x3d, 0x1f, 0x9d, 0xc8, 0x9b, 0x09, 0x1e, 0x1a, 0xa0, 0xb3, 0xc4, 0x9e, 0x8e, 0xa9, - 0xd2, 0x03, 0xe7, 0x40, 0xf6, 0x80, 0xc8, 0x4f, 0xee, 0x88, 0xfd, 0x09, 0x0d, 0x90, 0x6b, 0x61, - 0x2b, 0x8c, 0x5e, 0xbf, 0x43, 0xbe, 0xb6, 0x90, 0x30, 0xfe, 0x7c, 0xe6, 0x39, 0x6d, 0xe9, 0x03, - 0x0d, 0x2c, 0xf6, 0xaf, 0x88, 0x0f, 0xd4, 0xad, 0x5f, 0x6b, 0x60, 0xbe, 0xa7, 0xf8, 0xf5, 0xf1, - 0xe8, 0x76, 0xb7, 0x47, 0xaf, 0x0d, 0xbb, 0x8a, 0x89, 0x5d, 0xe3, 0xad, 0x5b, 0xd2, 0xbd, 0x9f, - 0x6b, 0x60, 0x2e, 0x5d, 0x4f, 0x1e, 0x64, 0xbc, 0x4a, 0x1f, 0x64, 0xc0, 0x62, 0xff, 0x8e, 0x13, - 0xfa, 0xea, 0x69, 0x3d, 0x9a, 0x11, 0x45, 0xbf, 0x71, 0xe6, 0xbb, 0x1a, 0x98, 0xba, 0xa5, 0xe4, - 0xa2, 0x4f, 0xb2, 0x43, 0x1f, 0x8e, 0x44, 0x05, 0x3c, 0x66, 0x50, 0x94, 0xc4, 0x2d, 0xfd, 0x59, - 0x03, 0x0b, 0x7d, 0x6f, 0x26, 0xf6, 0x86, 0xc7, 0x96, 0xe5, 0x1e, 0x8a, 0x19, 0x57, 0x62, 0x80, - 0xbc, 0xc6, 0xa9, 0x48, 0x72, 0x13, 0xd1, 0xcb, 0x7c, 0x5e, 0xd1, 0x2b, 0xfd, 0x4d, 0x03, 0x17, - 0xef, 0x96, 0x89, 0x0f, 0x64, 0x4b, 0x57, 0x40, 0x5e, 0x76, 0x95, 0xc7, 0x7c, 0x3b, 0xe5, 0x43, - 0x4a, 0x16, 0x0d, 0xfe, 0x5f, 0x48, 0xe2, 0xaf, 0xd2, 0x87, 0x1a, 0x98, 0xab, 0x11, 0xbf, 0x65, - 0xd6, 0x09, 0x22, 0x0d, 0xe2, 0x13, 0xa7, 0x4e, 0xe0, 0x2a, 0x98, 0xe4, 0xdf, 0x42, 0x3d, 0x5c, - 0x8f, 0xe6, 0xfa, 0xf3, 0x32, 0xe4, 0x93, 0x3b, 0x11, 0x03, 0xc5, 0x32, 0xea, 0x1b, 0x40, 0x66, - 0xe0, 0x37, 0x80, 0x8b, 0x60, 0xcc, 0x8b, 0x27, 0xa4, 0x79, 0xc6, 0xe5, 0x43, 0x51, 0x4e, 0xe5, - 0x5c, 0xd7, 0x0f, 0xf8, 0xd8, 0x27, 0x27, 0xb9, 0xae, 0x1f, 0x20, 0x4e, 0x2d, 0xfd, 0x5d, 0x03, - 0xfd, 0xfe, 0x5f, 0x08, 0xb6, 0xc0, 0x04, 0x15, 0xae, 0xcb, 0xd0, 0x56, 0xef, 0x33, 0xb4, 0xe9, - 0x40, 0x88, 0x7b, 0x35, 0xa2, 0x46, 0x60, 0x2c, 0xba, 0x75, 0x5c, 0x09, 0x1d, 0x43, 0x4e, 0x3c, - 0xa7, 0x45, 0x74, 0xd7, 0xd7, 0x04, 0x0d, 0x29, 0x2e, 0xbc, 0x20, 0x66, 0x73, 0x89, 0x81, 0x57, - 0x34, 0x97, 0xab, 0x5c, 0xf9, 0xe8, 0x4e, 0xe1, 0xcc, 0xc7, 0x77, 0x0a, 0x67, 0x3e, 0xb9, 0x53, - 0x38, 0xf3, 0xbd, 0x4e, 0x41, 0xfb, 0xa8, 0x53, 0xd0, 0x3e, 0xee, 0x14, 0xb4, 0x4f, 0x3a, 0x05, - 0xed, 0x9f, 0x9d, 0x82, 0xf6, 0x8b, 0x4f, 0x0b, 0x67, 0xbe, 0x35, 0x21, 0x5d, 0xfb, 0x5f, 0x00, - 0x00, 0x00, 0xff, 0xff, 0x10, 0x75, 0x48, 0x06, 0xc5, 0x2b, 0x00, 0x00, -} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto index 47c06a296adb8..7f0902efa83ca 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto @@ -30,33 +30,33 @@ option go_package = "v1beta1"; // ConversionRequest describes the conversion request parameters. message ConversionRequest { - // `uid` is an identifier for the individual request/response. It allows us to distinguish instances of requests which are - // otherwise identical (parallel requests, requests when earlier requests did not modify etc) - // The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request. + // uid is an identifier for the individual request/response. It allows distinguishing instances of requests which are + // otherwise identical (parallel requests, etc). + // The UID is meant to track the round trip (request/response) between the Kubernetes API server and the webhook, not the user request. // It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging. optional string uid = 1; - // `desiredAPIVersion` is the version to convert given objects to. e.g. "myapi.example.com/v1" + // desiredAPIVersion is the version to convert given objects to. e.g. "myapi.example.com/v1" optional string desiredAPIVersion = 2; - // `objects` is the list of CR objects to be converted. + // objects is the list of custom resource objects to be converted. repeated k8s.io.apimachinery.pkg.runtime.RawExtension objects = 3; } // ConversionResponse describes a conversion response. message ConversionResponse { - // `uid` is an identifier for the individual request/response. - // This should be copied over from the corresponding ConversionRequest. + // uid is an identifier for the individual request/response. + // This should be copied over from the corresponding `request.uid`. optional string uid = 1; - // `convertedObjects` is the list of converted version of `request.objects` if the `result` is successful otherwise empty. - // The webhook is expected to set apiVersion of these objects to the ConversionRequest.desiredAPIVersion. The list - // must also have the same size as the input list with the same objects in the same order (equal kind, UID, name and namespace). + // convertedObjects is the list of converted version of `request.objects` if the `result` is successful, otherwise empty. + // The webhook is expected to set `apiVersion` of these objects to the `request.desiredAPIVersion`. The list + // must also have the same size as the input list with the same objects in the same order (equal kind, metadata.uid, metadata.name and metadata.namespace). // The webhook is allowed to mutate labels and annotations. Any other change to the metadata is silently ignored. repeated k8s.io.apimachinery.pkg.runtime.RawExtension convertedObjects = 2; - // `result` contains the result of conversion with extra details if the conversion failed. `result.status` determines if - // the conversion failed or succeeded. The `result.status` field is required and represent the success or failure of the + // result contains the result of conversion with extra details if the conversion failed. `result.status` determines if + // the conversion failed or succeeded. The `result.status` field is required and represents the success or failure of the // conversion. A successful conversion must set `result.status` to `Success`. A failed conversion must set // `result.status` to `Failure` and provide more details in `result.message` and return http status 200. The `result.message` // will be used to construct an error message for the end user. @@ -65,11 +65,11 @@ message ConversionResponse { // ConversionReview describes a conversion request/response. message ConversionReview { - // `request` describes the attributes for the conversion request. + // request describes the attributes for the conversion request. // +optional optional ConversionRequest request = 1; - // `response` describes the attributes for the conversion response. + // response describes the attributes for the conversion response. // +optional optional ConversionResponse response = 2; } @@ -80,12 +80,12 @@ message CustomResourceColumnDefinition { optional string name = 1; // type is an OpenAPI type definition for this column. - // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details. optional string type = 2; // format is an optional OpenAPI type definition for this column. The 'name' format is applied // to the primary identifier column to assist in clients identifying column is the resource name. - // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details. // +optional optional string format = 3; @@ -95,69 +95,71 @@ message CustomResourceColumnDefinition { // priority is an integer defining the relative importance of this column compared to others. Lower // numbers are considered higher priority. Columns that may be omitted in limited space scenarios - // should be given a higher priority. + // should be given a priority greater than 0. // +optional optional int32 priority = 5; - // JSONPath is a simple JSON path, i.e. with array notation. + // JSONPath is a simple JSON path (i.e. with array notation) which is evaluated against + // each custom resource to produce the value for this column. optional string JSONPath = 6; } // CustomResourceConversion describes how to convert different versions of a CR. message CustomResourceConversion { - // `strategy` specifies the conversion strategy. Allowed values are: - // - `None`: The converter only change the apiVersion and would not touch any other field in the CR. + // strategy specifies how custom resources are converted between versions. Allowed values are: + // - `None`: The converter only change the apiVersion and would not touch any other field in the custom resource. // - `Webhook`: API Server will call to an external webhook to do the conversion. Additional information - // is needed for this option. This requires spec.preserveUnknownFields to be false. + // is needed for this option. This requires spec.preserveUnknownFields to be false, and spec.conversion.webhookClientConfig to be set. optional string strategy = 1; - // `webhookClientConfig` is the instructions for how to call the webhook if strategy is `Webhook`. This field is - // alpha-level and is only honored by servers that enable the CustomResourceWebhookConversion feature. + // webhookClientConfig is the instructions for how to call the webhook if strategy is `Webhook`. + // Required when `strategy` is set to `Webhook`. // +optional optional WebhookClientConfig webhookClientConfig = 2; - // ConversionReviewVersions is an ordered list of preferred `ConversionReview` - // versions the Webhook expects. API server will try to use first version in + // conversionReviewVersions is an ordered list of preferred `ConversionReview` + // versions the Webhook expects. The API server will use the first version in // the list which it supports. If none of the versions specified in this list - // supported by API server, conversion will fail for this object. + // are supported by API server, conversion will fail for the custom resource. // If a persisted Webhook configuration specifies allowed versions and does not // include any versions known to the API Server, calls to the webhook will fail. - // Default to `['v1beta1']`. + // Defaults to `["v1beta1"]`. // +optional repeated string conversionReviewVersions = 3; } // CustomResourceDefinition represents a resource that should be exposed on the API server. Its name MUST be in the format // <.spec.name>.<.spec.group>. +// Deprecated in v1.16, planned for removal in v1.19. Use apiextensions.k8s.io/v1 CustomResourceDefinition instead. message CustomResourceDefinition { optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Spec describes how the user wants the resources to appear + // spec describes how the user wants the resources to appear optional CustomResourceDefinitionSpec spec = 2; - // Status indicates the actual state of the CustomResourceDefinition + // status indicates the actual state of the CustomResourceDefinition // +optional optional CustomResourceDefinitionStatus status = 3; } // CustomResourceDefinitionCondition contains details for the current condition of this pod. message CustomResourceDefinitionCondition { - // Type is the type of the condition. Types include Established, NamesAccepted and Terminating. + // type is the type of the condition. Types include Established, NamesAccepted and Terminating. optional string type = 1; - // Status is the status of the condition. + // status is the status of the condition. // Can be True, False, Unknown. optional string status = 2; - // Last time the condition transitioned from one status to another. + // lastTransitionTime last time the condition transitioned from one status to another. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; - // Unique, one-word, CamelCase reason for the condition's last transition. + // reason is a unique, one-word, CamelCase reason for the condition's last transition. // +optional optional string reason = 4; - // Human-readable message indicating details about last transition. + // message is a human-readable message indicating details about last transition. // +optional optional string message = 5; } @@ -166,71 +168,81 @@ message CustomResourceDefinitionCondition { message CustomResourceDefinitionList { optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items individual CustomResourceDefinitions + // items list individual CustomResourceDefinition objects repeated CustomResourceDefinition items = 2; } // CustomResourceDefinitionNames indicates the names to serve this CustomResourceDefinition message CustomResourceDefinitionNames { - // Plural is the plural name of the resource to serve. It must match the name of the CustomResourceDefinition-registration - // too: plural.group and it must be all lowercase. + // plural is the plural name of the resource to serve. + // The custom resources are served under `/apis///.../`. + // Must match the name of the CustomResourceDefinition (in the form `.`). + // Must be all lowercase. optional string plural = 1; - // Singular is the singular name of the resource. It must be all lowercase Defaults to lowercased + // singular is the singular name of the resource. It must be all lowercase. Defaults to lowercased `kind`. // +optional optional string singular = 2; - // ShortNames are short names for the resource. It must be all lowercase. + // shortNames are short names for the resource, exposed in API discovery documents, + // and used by clients to support invocations like `kubectl get `. + // It must be all lowercase. // +optional repeated string shortNames = 3; - // Kind is the serialized kind of the resource. It is normally CamelCase and singular. + // kind is the serialized kind of the resource. It is normally CamelCase and singular. + // Custom resource instances will use this value as the `kind` attribute in API calls. optional string kind = 4; - // ListKind is the serialized kind of the list for this resource. Defaults to List. + // listKind is the serialized kind of the list for this resource. Defaults to "`kind`List". // +optional optional string listKind = 5; - // Categories is a list of grouped resources custom resources belong to (e.g. 'all') + // categories is a list of grouped resources this custom resource belongs to (e.g. 'all'). + // This is published in API discovery documents, and used by clients to support invocations like + // `kubectl get all`. // +optional repeated string categories = 6; } // CustomResourceDefinitionSpec describes how a user wants their resource to appear message CustomResourceDefinitionSpec { - // Group is the group this resource belongs in + // group is the API group of the defined custom resource. + // The custom resources are served under `/apis//...`. + // Must match the name of the CustomResourceDefinition (in the form `.`). optional string group = 1; - // Version is the version this resource belongs in - // Should be always first item in Versions field if provided. - // Optional, but at least one of Version or Versions must be set. - // Deprecated: Please use `Versions`. + // version is the API version of the defined custom resource. + // The custom resources are served under `/apis///...`. + // Must match the name of the first item in the `versions` list if `version` and `versions` are both specified. + // Optional if `versions` is specified. + // Deprecated: use `versions` instead. // +optional optional string version = 2; - // Names are the names used to describe this custom resource + // names specify the resource and kind names for the custom resource. optional CustomResourceDefinitionNames names = 3; - // Scope indicates whether this resource is cluster or namespace scoped. Default is namespaced + // scope indicates whether the defined custom resource is cluster- or namespace-scoped. + // Allowed values are `Cluster` and `Namespaced`. Default is `Namespaced`. optional string scope = 4; - // Validation describes the validation methods for CustomResources - // Optional, the global validation schema for all versions. + // validation describes the schema used for validation and pruning of the custom resource. + // If present, this validation schema is used to validate all versions. // Top-level and per-version schemas are mutually exclusive. // +optional optional CustomResourceValidation validation = 5; - // Subresources describes the subresources for CustomResource - // Optional, the global subresources for all versions. + // subresources specify what subresources the defined custom resource has. + // If present, this field configures subresources for all versions. // Top-level and per-version subresources are mutually exclusive. // +optional optional CustomResourceSubresources subresources = 6; - // Versions is the list of all supported versions for this resource. - // If Version field is provided, this field is optional. - // Validation: All versions must use the same validation schema for now. i.e., top - // level Validation field is applied to all of these versions. - // Order: The version name will be used to compute the order. + // versions is the list of all API versions of the defined custom resource. + // Optional if `version` is specified. + // The name of the first item in the `versions` list must match the `version` field if `version` and `versions` are both specified. + // Version names are used to compute the order in which served versions are listed in API discovery. // If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered // lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version), // then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first @@ -240,101 +252,106 @@ message CustomResourceDefinitionSpec { // +optional repeated CustomResourceDefinitionVersion versions = 7; - // AdditionalPrinterColumns are additional columns shown e.g. in kubectl next to the name. Defaults to a created-at column. - // Optional, the global columns for all versions. + // additionalPrinterColumns specifies additional columns returned in Table output. + // See https://kubernetes.io/docs/reference/using-api/api-concepts/#receiving-resources-as-tables for details. + // If present, this field configures columns for all versions. // Top-level and per-version columns are mutually exclusive. + // If no top-level or per-version columns are specified, a single column displaying the age of the custom resource is used. // +optional repeated CustomResourceColumnDefinition additionalPrinterColumns = 8; - // `conversion` defines conversion settings for the CRD. + // conversion defines conversion settings for the CRD. // +optional optional CustomResourceConversion conversion = 9; - // preserveUnknownFields disables pruning of object fields which are not - // specified in the OpenAPI schema. apiVersion, kind, metadata and known - // fields inside metadata are always preserved. - // Defaults to true in v1beta and will default to false in v1. + // preserveUnknownFields indicates that object fields which are not specified + // in the OpenAPI schema should be preserved when persisting to storage. + // apiVersion, kind, metadata and known fields inside metadata are always preserved. + // If false, schemas must be defined for all versions. + // Defaults to true in v1beta for backwards compatibility. + // Deprecated: will be required to be false in v1. Preservation of unknown fields can be specified + // in the validation schema using the `x-kubernetes-preserve-unknown-fields: true` extension. + // See https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#pruning-versus-preserving-unknown-fields for details. + // +optional optional bool preserveUnknownFields = 10; } // CustomResourceDefinitionStatus indicates the state of the CustomResourceDefinition message CustomResourceDefinitionStatus { - // Conditions indicate state for particular aspects of a CustomResourceDefinition + // conditions indicate state for particular aspects of a CustomResourceDefinition + // +optional repeated CustomResourceDefinitionCondition conditions = 1; - // AcceptedNames are the names that are actually being used to serve discovery + // acceptedNames are the names that are actually being used to serve discovery. // They may be different than the names in spec. optional CustomResourceDefinitionNames acceptedNames = 2; - // StoredVersions are all versions of CustomResources that were ever persisted. Tracking these + // storedVersions lists all versions of CustomResources that were ever persisted. Tracking these // versions allows a migration path for stored versions in etcd. The field is mutable - // so the migration controller can first finish a migration to another version (i.e. - // that no old objects are left in the storage), and then remove the rest of the + // so a migration controller can finish a migration to another version (ensuring + // no old objects are left in storage), and then remove the rest of the // versions from this list. - // None of the versions in this list can be removed from the spec.Versions field. + // Versions may not be removed from `spec.versions` while they exist in this list. repeated string storedVersions = 3; } // CustomResourceDefinitionVersion describes a version for CRD. message CustomResourceDefinitionVersion { - // Name is the version name, e.g. “v1”, “v2beta1”, etc. + // name is the version name, e.g. “v1”, “v2beta1”, etc. + // The custom resources are served under this version at `/apis///...` if `served` is true. optional string name = 1; - // Served is a flag enabling/disabling this version from being served via REST APIs + // served is a flag enabling/disabling this version from being served via REST APIs optional bool served = 2; - // Storage flags the version as storage version. There must be exactly one - // flagged as storage version. + // storage indicates this version should be used when persisting custom resources to storage. + // There must be exactly one version with storage=true. optional bool storage = 3; - // Schema describes the schema for CustomResource used in validation, pruning, and defaulting. + // schema describes the schema used for validation and pruning of this version of the custom resource. // Top-level and per-version schemas are mutually exclusive. - // Per-version schemas must not all be set to identical values (top-level validation schema should be used instead) - // This field is alpha-level and is only honored by servers that enable the CustomResourceWebhookConversion feature. + // Per-version schemas must not all be set to identical values (top-level validation schema should be used instead). // +optional optional CustomResourceValidation schema = 4; - // Subresources describes the subresources for CustomResource + // subresources specify what subresources this version of the defined custom resource have. // Top-level and per-version subresources are mutually exclusive. - // Per-version subresources must not all be set to identical values (top-level subresources should be used instead) - // This field is alpha-level and is only honored by servers that enable the CustomResourceWebhookConversion feature. + // Per-version subresources must not all be set to identical values (top-level subresources should be used instead). // +optional optional CustomResourceSubresources subresources = 5; - // AdditionalPrinterColumns are additional columns shown e.g. in kubectl next to the name. Defaults to a created-at column. + // additionalPrinterColumns specifies additional columns returned in Table output. + // See https://kubernetes.io/docs/reference/using-api/api-concepts/#receiving-resources-as-tables for details. // Top-level and per-version columns are mutually exclusive. - // Per-version columns must not all be set to identical values (top-level columns should be used instead) - // This field is alpha-level and is only honored by servers that enable the CustomResourceWebhookConversion feature. - // NOTE: CRDs created prior to 1.13 populated the top-level additionalPrinterColumns field by default. To apply an - // update that changes to per-version additionalPrinterColumns, the top-level additionalPrinterColumns field must - // be explicitly set to null + // Per-version columns must not all be set to identical values (top-level columns should be used instead). + // If no top-level or per-version columns are specified, a single column displaying the age of the custom resource is used. // +optional repeated CustomResourceColumnDefinition additionalPrinterColumns = 6; } // CustomResourceSubresourceScale defines how to serve the scale subresource for CustomResources. message CustomResourceSubresourceScale { - // SpecReplicasPath defines the JSON path inside of a CustomResource that corresponds to Scale.Spec.Replicas. + // specReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `spec.replicas`. // Only JSON paths without the array notation are allowed. - // Must be a JSON Path under .spec. - // If there is no value under the given path in the CustomResource, the /scale subresource will return an error on GET. + // Must be a JSON Path under `.spec`. + // If there is no value under the given path in the custom resource, the `/scale` subresource will return an error on GET. optional string specReplicasPath = 1; - // StatusReplicasPath defines the JSON path inside of a CustomResource that corresponds to Scale.Status.Replicas. + // statusReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `status.replicas`. // Only JSON paths without the array notation are allowed. - // Must be a JSON Path under .status. - // If there is no value under the given path in the CustomResource, the status replica value in the /scale subresource + // Must be a JSON Path under `.status`. + // If there is no value under the given path in the custom resource, the `status.replicas` value in the `/scale` subresource // will default to 0. optional string statusReplicasPath = 2; - // LabelSelectorPath defines the JSON path inside of a CustomResource that corresponds to Scale.Status.Selector. + // labelSelectorPath defines the JSON path inside of a custom resource that corresponds to Scale `status.selector`. // Only JSON paths without the array notation are allowed. - // Must be a JSON Path under .status or .spec. - // Must be set to work with HPA. + // Must be a JSON Path under `.status` or `.spec`. + // Must be set to work with HorizontalPodAutoscaler. // The field pointed by this JSON path must be a string field (not a complex selector struct) // which contains a serialized label selector in string form. // More info: https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions#scale-subresource - // If there is no value under the given path in the CustomResource, the status label selector value in the /scale + // If there is no value under the given path in the custom resource, the `status.selector` value in the `/scale` // subresource will default to the empty string. // +optional optional string labelSelectorPath = 3; @@ -350,18 +367,21 @@ message CustomResourceSubresourceStatus { // CustomResourceSubresources defines the status and scale subresources for CustomResources. message CustomResourceSubresources { - // Status denotes the status subresource for CustomResources + // status indicates the custom resource should serve a `/status` subresource. + // When enabled: + // 1. requests to the custom resource primary endpoint ignore changes to the `status` stanza of the object. + // 2. requests to the custom resource `/status` subresource ignore changes to anything other than the `status` stanza of the object. // +optional optional CustomResourceSubresourceStatus status = 1; - // Scale denotes the scale subresource for CustomResources + // scale indicates the custom resource should serve a `/scale` subresource that returns an `autoscaling/v1` Scale object. // +optional optional CustomResourceSubresourceScale scale = 2; } // CustomResourceValidation is a list of validation methods for CustomResources. message CustomResourceValidation { - // OpenAPIV3Schema is the OpenAPI v3 schema to be validated against. + // openAPIV3Schema is the OpenAPI v3 schema to use for validation and pruning. // +optional optional JSONSchemaProps openAPIV3Schema = 1; } @@ -396,8 +416,8 @@ message JSONSchemaProps { optional string title = 7; // default is a default value for undefined object fields. - // Defaulting is an alpha feature under the CustomResourceDefaulting feature gate. - // Defaulting requires spec.preserveUnknownFields to be false. + // Defaulting is a beta feature under the CustomResourceDefaulting feature gate. + // CustomResourceDefinitions with defaults must be created using the v1 (or newer) CustomResourceDefinition API. optional JSON default = 8; optional double maximum = 9; @@ -489,6 +509,33 @@ message JSONSchemaProps { // - type: string // - ... zero or more optional bool xKubernetesIntOrString = 40; + + // x-kubernetes-list-map-keys annotates an array with the x-kubernetes-list-type `map` by specifying the keys used + // as the index of the map. + // + // This tag MUST only be used on lists that have the "x-kubernetes-list-type" + // extension set to "map". Also, the values specified for this attribute must + // be a scalar typed field of the child structure (no nesting is supported). + // + // +optional + repeated string xKubernetesListMapKeys = 41; + + // x-kubernetes-list-type annotates an array to further describe its topology. + // This extension must only be used on lists and may have 3 possible values: + // + // 1) `atomic`: the list is treated as a single entity, like a scalar. + // Atomic lists will be entirely replaced when updated. This extension + // may be used on any type of list (struct, scalar, ...). + // 2) `set`: + // Sets are lists that must not have multiple items with the same value. Each + // value must be a scalar (or another atomic type). + // 3) `map`: + // These lists are like maps in that their elements have a non-index key + // used to identify them. Order is preserved upon merge. The map tag + // must only be used on a list with elements of type object. + // Defaults to atomic for arrays. + // +optional + optional string xKubernetesListType = 42; } // JSONSchemaPropsOrArray represents a value that can either be a JSONSchemaProps @@ -516,30 +563,28 @@ message JSONSchemaPropsOrStringArray { // ServiceReference holds a reference to Service.legacy.k8s.io message ServiceReference { - // `namespace` is the namespace of the service. + // namespace is the namespace of the service. // Required optional string namespace = 1; - // `name` is the name of the service. + // name is the name of the service. // Required optional string name = 2; - // `path` is an optional URL path which will be sent in any request to - // this service. + // path is an optional URL path at which the webhook will be contacted. // +optional optional string path = 3; - // If specified, the port on the service that hosting webhook. - // Default to 443 for backward compatibility. + // port is an optional service port at which the webhook will be contacted. // `port` should be a valid port number (1-65535, inclusive). + // Defaults to 443 for backward compatibility. // +optional optional int32 port = 4; } -// WebhookClientConfig contains the information to make a TLS -// connection with the webhook. It has the same field as admissionregistration.v1beta1.WebhookClientConfig. +// WebhookClientConfig contains the information to make a TLS connection with the webhook. message WebhookClientConfig { - // `url` gives the location of the webhook, in standard URL form + // url gives the location of the webhook, in standard URL form // (`scheme://host:port/path`). Exactly one of `url` or `service` // must be specified. // @@ -568,15 +613,15 @@ message WebhookClientConfig { // +optional optional string url = 3; - // `service` is a reference to the service for this webhook. Either - // `service` or `url` must be specified. + // service is a reference to the service for this webhook. Either + // service or url must be specified. // // If the webhook is running within the cluster, then you should use `service`. // // +optional optional ServiceReference service = 1; - // `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. + // caBundle is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. // If unspecified, system trust roots on the apiserver are used. // +optional optional bytes caBundle = 2; diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go index 6d931ca95c1f5..f6c260b686885 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go @@ -26,6 +26,11 @@ import ( type ConversionStrategyType string const ( + // KubeAPIApprovedAnnotation is an annotation that must be set to create a CRD for the k8s.io, *.k8s.io, kubernetes.io, or *.kubernetes.io namespaces. + // The value should be a link to a URL where the current spec was approved, so updates to the spec should also update the URL. + // If the API is unapproved, you may set the annotation to a string starting with `"unapproved"`. For instance, `"unapproved, temporarily squatting"` or `"unapproved, experimental-only"`. This is discouraged. + KubeAPIApprovedAnnotation = "api-approved.kubernetes.io" + // NoneConverter is a converter that only sets apiversion of the CR and leave everything else unchanged. NoneConverter ConversionStrategyType = "None" // WebhookConverter is a converter that calls to an external webhook to convert the CR. @@ -34,33 +39,36 @@ const ( // CustomResourceDefinitionSpec describes how a user wants their resource to appear type CustomResourceDefinitionSpec struct { - // Group is the group this resource belongs in + // group is the API group of the defined custom resource. + // The custom resources are served under `/apis//...`. + // Must match the name of the CustomResourceDefinition (in the form `.`). Group string `json:"group" protobuf:"bytes,1,opt,name=group"` - // Version is the version this resource belongs in - // Should be always first item in Versions field if provided. - // Optional, but at least one of Version or Versions must be set. - // Deprecated: Please use `Versions`. + // version is the API version of the defined custom resource. + // The custom resources are served under `/apis///...`. + // Must match the name of the first item in the `versions` list if `version` and `versions` are both specified. + // Optional if `versions` is specified. + // Deprecated: use `versions` instead. // +optional Version string `json:"version,omitempty" protobuf:"bytes,2,opt,name=version"` - // Names are the names used to describe this custom resource + // names specify the resource and kind names for the custom resource. Names CustomResourceDefinitionNames `json:"names" protobuf:"bytes,3,opt,name=names"` - // Scope indicates whether this resource is cluster or namespace scoped. Default is namespaced + // scope indicates whether the defined custom resource is cluster- or namespace-scoped. + // Allowed values are `Cluster` and `Namespaced`. Default is `Namespaced`. Scope ResourceScope `json:"scope" protobuf:"bytes,4,opt,name=scope,casttype=ResourceScope"` - // Validation describes the validation methods for CustomResources - // Optional, the global validation schema for all versions. + // validation describes the schema used for validation and pruning of the custom resource. + // If present, this validation schema is used to validate all versions. // Top-level and per-version schemas are mutually exclusive. // +optional Validation *CustomResourceValidation `json:"validation,omitempty" protobuf:"bytes,5,opt,name=validation"` - // Subresources describes the subresources for CustomResource - // Optional, the global subresources for all versions. + // subresources specify what subresources the defined custom resource has. + // If present, this field configures subresources for all versions. // Top-level and per-version subresources are mutually exclusive. // +optional Subresources *CustomResourceSubresources `json:"subresources,omitempty" protobuf:"bytes,6,opt,name=subresources"` - // Versions is the list of all supported versions for this resource. - // If Version field is provided, this field is optional. - // Validation: All versions must use the same validation schema for now. i.e., top - // level Validation field is applied to all of these versions. - // Order: The version name will be used to compute the order. + // versions is the list of all API versions of the defined custom resource. + // Optional if `version` is specified. + // The name of the first item in the `versions` list must match the `version` field if `version` and `versions` are both specified. + // Version names are used to compute the order in which served versions are listed in API discovery. // If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered // lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version), // then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first @@ -69,51 +77,57 @@ type CustomResourceDefinitionSpec struct { // v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. // +optional Versions []CustomResourceDefinitionVersion `json:"versions,omitempty" protobuf:"bytes,7,rep,name=versions"` - // AdditionalPrinterColumns are additional columns shown e.g. in kubectl next to the name. Defaults to a created-at column. - // Optional, the global columns for all versions. + // additionalPrinterColumns specifies additional columns returned in Table output. + // See https://kubernetes.io/docs/reference/using-api/api-concepts/#receiving-resources-as-tables for details. + // If present, this field configures columns for all versions. // Top-level and per-version columns are mutually exclusive. + // If no top-level or per-version columns are specified, a single column displaying the age of the custom resource is used. // +optional AdditionalPrinterColumns []CustomResourceColumnDefinition `json:"additionalPrinterColumns,omitempty" protobuf:"bytes,8,rep,name=additionalPrinterColumns"` - // `conversion` defines conversion settings for the CRD. + // conversion defines conversion settings for the CRD. // +optional Conversion *CustomResourceConversion `json:"conversion,omitempty" protobuf:"bytes,9,opt,name=conversion"` - // preserveUnknownFields disables pruning of object fields which are not - // specified in the OpenAPI schema. apiVersion, kind, metadata and known - // fields inside metadata are always preserved. - // Defaults to true in v1beta and will default to false in v1. + // preserveUnknownFields indicates that object fields which are not specified + // in the OpenAPI schema should be preserved when persisting to storage. + // apiVersion, kind, metadata and known fields inside metadata are always preserved. + // If false, schemas must be defined for all versions. + // Defaults to true in v1beta for backwards compatibility. + // Deprecated: will be required to be false in v1. Preservation of unknown fields can be specified + // in the validation schema using the `x-kubernetes-preserve-unknown-fields: true` extension. + // See https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#pruning-versus-preserving-unknown-fields for details. + // +optional PreserveUnknownFields *bool `json:"preserveUnknownFields,omitempty" protobuf:"varint,10,opt,name=preserveUnknownFields"` } // CustomResourceConversion describes how to convert different versions of a CR. type CustomResourceConversion struct { - // `strategy` specifies the conversion strategy. Allowed values are: - // - `None`: The converter only change the apiVersion and would not touch any other field in the CR. + // strategy specifies how custom resources are converted between versions. Allowed values are: + // - `None`: The converter only change the apiVersion and would not touch any other field in the custom resource. // - `Webhook`: API Server will call to an external webhook to do the conversion. Additional information - // is needed for this option. This requires spec.preserveUnknownFields to be false. + // is needed for this option. This requires spec.preserveUnknownFields to be false, and spec.conversion.webhookClientConfig to be set. Strategy ConversionStrategyType `json:"strategy" protobuf:"bytes,1,name=strategy"` - // `webhookClientConfig` is the instructions for how to call the webhook if strategy is `Webhook`. This field is - // alpha-level and is only honored by servers that enable the CustomResourceWebhookConversion feature. + // webhookClientConfig is the instructions for how to call the webhook if strategy is `Webhook`. + // Required when `strategy` is set to `Webhook`. // +optional WebhookClientConfig *WebhookClientConfig `json:"webhookClientConfig,omitempty" protobuf:"bytes,2,name=webhookClientConfig"` - // ConversionReviewVersions is an ordered list of preferred `ConversionReview` - // versions the Webhook expects. API server will try to use first version in + // conversionReviewVersions is an ordered list of preferred `ConversionReview` + // versions the Webhook expects. The API server will use the first version in // the list which it supports. If none of the versions specified in this list - // supported by API server, conversion will fail for this object. + // are supported by API server, conversion will fail for the custom resource. // If a persisted Webhook configuration specifies allowed versions and does not // include any versions known to the API Server, calls to the webhook will fail. - // Default to `['v1beta1']`. + // Defaults to `["v1beta1"]`. // +optional ConversionReviewVersions []string `json:"conversionReviewVersions,omitempty" protobuf:"bytes,3,rep,name=conversionReviewVersions"` } -// WebhookClientConfig contains the information to make a TLS -// connection with the webhook. It has the same field as admissionregistration.v1beta1.WebhookClientConfig. +// WebhookClientConfig contains the information to make a TLS connection with the webhook. type WebhookClientConfig struct { - // `url` gives the location of the webhook, in standard URL form + // url gives the location of the webhook, in standard URL form // (`scheme://host:port/path`). Exactly one of `url` or `service` // must be specified. // @@ -142,15 +156,15 @@ type WebhookClientConfig struct { // +optional URL *string `json:"url,omitempty" protobuf:"bytes,3,opt,name=url"` - // `service` is a reference to the service for this webhook. Either - // `service` or `url` must be specified. + // service is a reference to the service for this webhook. Either + // service or url must be specified. // // If the webhook is running within the cluster, then you should use `service`. // // +optional Service *ServiceReference `json:"service,omitempty" protobuf:"bytes,1,opt,name=service"` - // `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. + // caBundle is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. // If unspecified, system trust roots on the apiserver are used. // +optional CABundle []byte `json:"caBundle,omitempty" protobuf:"bytes,2,opt,name=caBundle"` @@ -158,53 +172,49 @@ type WebhookClientConfig struct { // ServiceReference holds a reference to Service.legacy.k8s.io type ServiceReference struct { - // `namespace` is the namespace of the service. + // namespace is the namespace of the service. // Required Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"` - // `name` is the name of the service. + // name is the name of the service. // Required Name string `json:"name" protobuf:"bytes,2,opt,name=name"` - // `path` is an optional URL path which will be sent in any request to - // this service. + // path is an optional URL path at which the webhook will be contacted. // +optional Path *string `json:"path,omitempty" protobuf:"bytes,3,opt,name=path"` - // If specified, the port on the service that hosting webhook. - // Default to 443 for backward compatibility. + // port is an optional service port at which the webhook will be contacted. // `port` should be a valid port number (1-65535, inclusive). + // Defaults to 443 for backward compatibility. // +optional Port *int32 `json:"port,omitempty" protobuf:"varint,4,opt,name=port"` } // CustomResourceDefinitionVersion describes a version for CRD. type CustomResourceDefinitionVersion struct { - // Name is the version name, e.g. “v1”, “v2beta1”, etc. + // name is the version name, e.g. “v1”, “v2beta1”, etc. + // The custom resources are served under this version at `/apis///...` if `served` is true. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // Served is a flag enabling/disabling this version from being served via REST APIs + // served is a flag enabling/disabling this version from being served via REST APIs Served bool `json:"served" protobuf:"varint,2,opt,name=served"` - // Storage flags the version as storage version. There must be exactly one - // flagged as storage version. + // storage indicates this version should be used when persisting custom resources to storage. + // There must be exactly one version with storage=true. Storage bool `json:"storage" protobuf:"varint,3,opt,name=storage"` - // Schema describes the schema for CustomResource used in validation, pruning, and defaulting. + // schema describes the schema used for validation and pruning of this version of the custom resource. // Top-level and per-version schemas are mutually exclusive. - // Per-version schemas must not all be set to identical values (top-level validation schema should be used instead) - // This field is alpha-level and is only honored by servers that enable the CustomResourceWebhookConversion feature. + // Per-version schemas must not all be set to identical values (top-level validation schema should be used instead). // +optional Schema *CustomResourceValidation `json:"schema,omitempty" protobuf:"bytes,4,opt,name=schema"` - // Subresources describes the subresources for CustomResource + // subresources specify what subresources this version of the defined custom resource have. // Top-level and per-version subresources are mutually exclusive. - // Per-version subresources must not all be set to identical values (top-level subresources should be used instead) - // This field is alpha-level and is only honored by servers that enable the CustomResourceWebhookConversion feature. + // Per-version subresources must not all be set to identical values (top-level subresources should be used instead). // +optional Subresources *CustomResourceSubresources `json:"subresources,omitempty" protobuf:"bytes,5,opt,name=subresources"` - // AdditionalPrinterColumns are additional columns shown e.g. in kubectl next to the name. Defaults to a created-at column. + // additionalPrinterColumns specifies additional columns returned in Table output. + // See https://kubernetes.io/docs/reference/using-api/api-concepts/#receiving-resources-as-tables for details. // Top-level and per-version columns are mutually exclusive. - // Per-version columns must not all be set to identical values (top-level columns should be used instead) - // This field is alpha-level and is only honored by servers that enable the CustomResourceWebhookConversion feature. - // NOTE: CRDs created prior to 1.13 populated the top-level additionalPrinterColumns field by default. To apply an - // update that changes to per-version additionalPrinterColumns, the top-level additionalPrinterColumns field must - // be explicitly set to null + // Per-version columns must not all be set to identical values (top-level columns should be used instead). + // If no top-level or per-version columns are specified, a single column displaying the age of the custom resource is used. // +optional AdditionalPrinterColumns []CustomResourceColumnDefinition `json:"additionalPrinterColumns,omitempty" protobuf:"bytes,6,rep,name=additionalPrinterColumns"` } @@ -214,11 +224,11 @@ type CustomResourceColumnDefinition struct { // name is a human readable name for the column. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` // type is an OpenAPI type definition for this column. - // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details. Type string `json:"type" protobuf:"bytes,2,opt,name=type"` // format is an optional OpenAPI type definition for this column. The 'name' format is applied // to the primary identifier column to assist in clients identifying column is the resource name. - // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details. // +optional Format string `json:"format,omitempty" protobuf:"bytes,3,opt,name=format"` // description is a human readable description of this column. @@ -226,31 +236,38 @@ type CustomResourceColumnDefinition struct { Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` // priority is an integer defining the relative importance of this column compared to others. Lower // numbers are considered higher priority. Columns that may be omitted in limited space scenarios - // should be given a higher priority. + // should be given a priority greater than 0. // +optional Priority int32 `json:"priority,omitempty" protobuf:"bytes,5,opt,name=priority"` - - // JSONPath is a simple JSON path, i.e. with array notation. + // JSONPath is a simple JSON path (i.e. with array notation) which is evaluated against + // each custom resource to produce the value for this column. JSONPath string `json:"JSONPath" protobuf:"bytes,6,opt,name=JSONPath"` } // CustomResourceDefinitionNames indicates the names to serve this CustomResourceDefinition type CustomResourceDefinitionNames struct { - // Plural is the plural name of the resource to serve. It must match the name of the CustomResourceDefinition-registration - // too: plural.group and it must be all lowercase. + // plural is the plural name of the resource to serve. + // The custom resources are served under `/apis///.../`. + // Must match the name of the CustomResourceDefinition (in the form `.`). + // Must be all lowercase. Plural string `json:"plural" protobuf:"bytes,1,opt,name=plural"` - // Singular is the singular name of the resource. It must be all lowercase Defaults to lowercased + // singular is the singular name of the resource. It must be all lowercase. Defaults to lowercased `kind`. // +optional Singular string `json:"singular,omitempty" protobuf:"bytes,2,opt,name=singular"` - // ShortNames are short names for the resource. It must be all lowercase. + // shortNames are short names for the resource, exposed in API discovery documents, + // and used by clients to support invocations like `kubectl get `. + // It must be all lowercase. // +optional ShortNames []string `json:"shortNames,omitempty" protobuf:"bytes,3,opt,name=shortNames"` - // Kind is the serialized kind of the resource. It is normally CamelCase and singular. + // kind is the serialized kind of the resource. It is normally CamelCase and singular. + // Custom resource instances will use this value as the `kind` attribute in API calls. Kind string `json:"kind" protobuf:"bytes,4,opt,name=kind"` - // ListKind is the serialized kind of the list for this resource. Defaults to List. + // listKind is the serialized kind of the list for this resource. Defaults to "`kind`List". // +optional ListKind string `json:"listKind,omitempty" protobuf:"bytes,5,opt,name=listKind"` - // Categories is a list of grouped resources custom resources belong to (e.g. 'all') + // categories is a list of grouped resources this custom resource belongs to (e.g. 'all'). + // This is published in API discovery documents, and used by clients to support invocations like + // `kubectl get all`. // +optional Categories []string `json:"categories,omitempty" protobuf:"bytes,6,rep,name=categories"` } @@ -304,41 +321,47 @@ const ( NonStructuralSchema CustomResourceDefinitionConditionType = "NonStructuralSchema" // Terminating means that the CustomResourceDefinition has been deleted and is cleaning up. Terminating CustomResourceDefinitionConditionType = "Terminating" + // KubernetesAPIApprovalPolicyConformant indicates that an API in *.k8s.io or *.kubernetes.io is or is not approved. For CRDs + // outside those groups, this condition will not be set. For CRDs inside those groups, the condition will + // be true if .metadata.annotations["api-approved.kubernetes.io"] is set to a URL, otherwise it will be false. + // See https://github.com/kubernetes/enhancements/pull/1111 for more details. + KubernetesAPIApprovalPolicyConformant CustomResourceDefinitionConditionType = "KubernetesAPIApprovalPolicyConformant" ) // CustomResourceDefinitionCondition contains details for the current condition of this pod. type CustomResourceDefinitionCondition struct { - // Type is the type of the condition. Types include Established, NamesAccepted and Terminating. + // type is the type of the condition. Types include Established, NamesAccepted and Terminating. Type CustomResourceDefinitionConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=CustomResourceDefinitionConditionType"` - // Status is the status of the condition. + // status is the status of the condition. // Can be True, False, Unknown. Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` - // Last time the condition transitioned from one status to another. + // lastTransitionTime last time the condition transitioned from one status to another. // +optional LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` - // Unique, one-word, CamelCase reason for the condition's last transition. + // reason is a unique, one-word, CamelCase reason for the condition's last transition. // +optional Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` - // Human-readable message indicating details about last transition. + // message is a human-readable message indicating details about last transition. // +optional Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` } // CustomResourceDefinitionStatus indicates the state of the CustomResourceDefinition type CustomResourceDefinitionStatus struct { - // Conditions indicate state for particular aspects of a CustomResourceDefinition + // conditions indicate state for particular aspects of a CustomResourceDefinition + // +optional Conditions []CustomResourceDefinitionCondition `json:"conditions" protobuf:"bytes,1,opt,name=conditions"` - // AcceptedNames are the names that are actually being used to serve discovery + // acceptedNames are the names that are actually being used to serve discovery. // They may be different than the names in spec. AcceptedNames CustomResourceDefinitionNames `json:"acceptedNames" protobuf:"bytes,2,opt,name=acceptedNames"` - // StoredVersions are all versions of CustomResources that were ever persisted. Tracking these + // storedVersions lists all versions of CustomResources that were ever persisted. Tracking these // versions allows a migration path for stored versions in etcd. The field is mutable - // so the migration controller can first finish a migration to another version (i.e. - // that no old objects are left in the storage), and then remove the rest of the + // so a migration controller can finish a migration to another version (ensuring + // no old objects are left in storage), and then remove the rest of the // versions from this list. - // None of the versions in this list can be removed from the spec.Versions field. + // Versions may not be removed from `spec.versions` while they exist in this list. StoredVersions []string `json:"storedVersions" protobuf:"bytes,3,rep,name=storedVersions"` } @@ -352,13 +375,14 @@ const CustomResourceCleanupFinalizer = "customresourcecleanup.apiextensions.k8s. // CustomResourceDefinition represents a resource that should be exposed on the API server. Its name MUST be in the format // <.spec.name>.<.spec.group>. +// Deprecated in v1.16, planned for removal in v1.19. Use apiextensions.k8s.io/v1 CustomResourceDefinition instead. type CustomResourceDefinition struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Spec describes how the user wants the resources to appear + // spec describes how the user wants the resources to appear Spec CustomResourceDefinitionSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - // Status indicates the actual state of the CustomResourceDefinition + // status indicates the actual state of the CustomResourceDefinition // +optional Status CustomResourceDefinitionStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -370,23 +394,26 @@ type CustomResourceDefinitionList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items individual CustomResourceDefinitions + // items list individual CustomResourceDefinition objects Items []CustomResourceDefinition `json:"items" protobuf:"bytes,2,rep,name=items"` } // CustomResourceValidation is a list of validation methods for CustomResources. type CustomResourceValidation struct { - // OpenAPIV3Schema is the OpenAPI v3 schema to be validated against. + // openAPIV3Schema is the OpenAPI v3 schema to use for validation and pruning. // +optional OpenAPIV3Schema *JSONSchemaProps `json:"openAPIV3Schema,omitempty" protobuf:"bytes,1,opt,name=openAPIV3Schema"` } // CustomResourceSubresources defines the status and scale subresources for CustomResources. type CustomResourceSubresources struct { - // Status denotes the status subresource for CustomResources + // status indicates the custom resource should serve a `/status` subresource. + // When enabled: + // 1. requests to the custom resource primary endpoint ignore changes to the `status` stanza of the object. + // 2. requests to the custom resource `/status` subresource ignore changes to anything other than the `status` stanza of the object. // +optional Status *CustomResourceSubresourceStatus `json:"status,omitempty" protobuf:"bytes,1,opt,name=status"` - // Scale denotes the scale subresource for CustomResources + // scale indicates the custom resource should serve a `/scale` subresource that returns an `autoscaling/v1` Scale object. // +optional Scale *CustomResourceSubresourceScale `json:"scale,omitempty" protobuf:"bytes,2,opt,name=scale"` } @@ -400,25 +427,25 @@ type CustomResourceSubresourceStatus struct{} // CustomResourceSubresourceScale defines how to serve the scale subresource for CustomResources. type CustomResourceSubresourceScale struct { - // SpecReplicasPath defines the JSON path inside of a CustomResource that corresponds to Scale.Spec.Replicas. + // specReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `spec.replicas`. // Only JSON paths without the array notation are allowed. - // Must be a JSON Path under .spec. - // If there is no value under the given path in the CustomResource, the /scale subresource will return an error on GET. + // Must be a JSON Path under `.spec`. + // If there is no value under the given path in the custom resource, the `/scale` subresource will return an error on GET. SpecReplicasPath string `json:"specReplicasPath" protobuf:"bytes,1,name=specReplicasPath"` - // StatusReplicasPath defines the JSON path inside of a CustomResource that corresponds to Scale.Status.Replicas. + // statusReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `status.replicas`. // Only JSON paths without the array notation are allowed. - // Must be a JSON Path under .status. - // If there is no value under the given path in the CustomResource, the status replica value in the /scale subresource + // Must be a JSON Path under `.status`. + // If there is no value under the given path in the custom resource, the `status.replicas` value in the `/scale` subresource // will default to 0. StatusReplicasPath string `json:"statusReplicasPath" protobuf:"bytes,2,opt,name=statusReplicasPath"` - // LabelSelectorPath defines the JSON path inside of a CustomResource that corresponds to Scale.Status.Selector. + // labelSelectorPath defines the JSON path inside of a custom resource that corresponds to Scale `status.selector`. // Only JSON paths without the array notation are allowed. - // Must be a JSON Path under .status or .spec. - // Must be set to work with HPA. + // Must be a JSON Path under `.status` or `.spec`. + // Must be set to work with HorizontalPodAutoscaler. // The field pointed by this JSON path must be a string field (not a complex selector struct) // which contains a serialized label selector in string form. // More info: https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions#scale-subresource - // If there is no value under the given path in the CustomResource, the status label selector value in the /scale + // If there is no value under the given path in the custom resource, the `status.selector` value in the `/scale` // subresource will default to the empty string. // +optional LabelSelectorPath *string `json:"labelSelectorPath,omitempty" protobuf:"bytes,3,opt,name=labelSelectorPath"` @@ -429,39 +456,39 @@ type CustomResourceSubresourceScale struct { // ConversionReview describes a conversion request/response. type ConversionReview struct { metav1.TypeMeta `json:",inline"` - // `request` describes the attributes for the conversion request. + // request describes the attributes for the conversion request. // +optional Request *ConversionRequest `json:"request,omitempty" protobuf:"bytes,1,opt,name=request"` - // `response` describes the attributes for the conversion response. + // response describes the attributes for the conversion response. // +optional Response *ConversionResponse `json:"response,omitempty" protobuf:"bytes,2,opt,name=response"` } // ConversionRequest describes the conversion request parameters. type ConversionRequest struct { - // `uid` is an identifier for the individual request/response. It allows us to distinguish instances of requests which are - // otherwise identical (parallel requests, requests when earlier requests did not modify etc) - // The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request. + // uid is an identifier for the individual request/response. It allows distinguishing instances of requests which are + // otherwise identical (parallel requests, etc). + // The UID is meant to track the round trip (request/response) between the Kubernetes API server and the webhook, not the user request. // It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging. UID types.UID `json:"uid" protobuf:"bytes,1,name=uid"` - // `desiredAPIVersion` is the version to convert given objects to. e.g. "myapi.example.com/v1" + // desiredAPIVersion is the version to convert given objects to. e.g. "myapi.example.com/v1" DesiredAPIVersion string `json:"desiredAPIVersion" protobuf:"bytes,2,name=desiredAPIVersion"` - // `objects` is the list of CR objects to be converted. + // objects is the list of custom resource objects to be converted. Objects []runtime.RawExtension `json:"objects" protobuf:"bytes,3,rep,name=objects"` } // ConversionResponse describes a conversion response. type ConversionResponse struct { - // `uid` is an identifier for the individual request/response. - // This should be copied over from the corresponding ConversionRequest. + // uid is an identifier for the individual request/response. + // This should be copied over from the corresponding `request.uid`. UID types.UID `json:"uid" protobuf:"bytes,1,name=uid"` - // `convertedObjects` is the list of converted version of `request.objects` if the `result` is successful otherwise empty. - // The webhook is expected to set apiVersion of these objects to the ConversionRequest.desiredAPIVersion. The list - // must also have the same size as the input list with the same objects in the same order (equal kind, UID, name and namespace). + // convertedObjects is the list of converted version of `request.objects` if the `result` is successful, otherwise empty. + // The webhook is expected to set `apiVersion` of these objects to the `request.desiredAPIVersion`. The list + // must also have the same size as the input list with the same objects in the same order (equal kind, metadata.uid, metadata.name and metadata.namespace). // The webhook is allowed to mutate labels and annotations. Any other change to the metadata is silently ignored. ConvertedObjects []runtime.RawExtension `json:"convertedObjects" protobuf:"bytes,2,rep,name=convertedObjects"` - // `result` contains the result of conversion with extra details if the conversion failed. `result.status` determines if - // the conversion failed or succeeded. The `result.status` field is required and represent the success or failure of the + // result contains the result of conversion with extra details if the conversion failed. `result.status` determines if + // the conversion failed or succeeded. The `result.status` field is required and represents the success or failure of the // conversion. A successful conversion must set `result.status` to `Success`. A failed conversion must set // `result.status` to `Failure` and provide more details in `result.message` and return http status 200. The `result.message` // will be used to construct an error message for the end user. diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go index ed893bdff57a7..ec1f8022d7be5 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go @@ -26,8 +26,8 @@ type JSONSchemaProps struct { Format string `json:"format,omitempty" protobuf:"bytes,6,opt,name=format"` Title string `json:"title,omitempty" protobuf:"bytes,7,opt,name=title"` // default is a default value for undefined object fields. - // Defaulting is an alpha feature under the CustomResourceDefaulting feature gate. - // Defaulting requires spec.preserveUnknownFields to be false. + // Defaulting is a beta feature under the CustomResourceDefaulting feature gate. + // CustomResourceDefinitions with defaults must be created using the v1 (or newer) CustomResourceDefinition API. Default *JSON `json:"default,omitempty" protobuf:"bytes,8,opt,name=default"` Maximum *float64 `json:"maximum,omitempty" protobuf:"bytes,9,opt,name=maximum"` ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty" protobuf:"bytes,10,opt,name=exclusiveMaximum"` @@ -90,6 +90,33 @@ type JSONSchemaProps struct { // - type: string // - ... zero or more XIntOrString bool `json:"x-kubernetes-int-or-string,omitempty" protobuf:"bytes,40,opt,name=xKubernetesIntOrString"` + + // x-kubernetes-list-map-keys annotates an array with the x-kubernetes-list-type `map` by specifying the keys used + // as the index of the map. + // + // This tag MUST only be used on lists that have the "x-kubernetes-list-type" + // extension set to "map". Also, the values specified for this attribute must + // be a scalar typed field of the child structure (no nesting is supported). + // + // +optional + XListMapKeys []string `json:"x-kubernetes-list-map-keys,omitempty" protobuf:"bytes,41,rep,name=xKubernetesListMapKeys"` + + // x-kubernetes-list-type annotates an array to further describe its topology. + // This extension must only be used on lists and may have 3 possible values: + // + // 1) `atomic`: the list is treated as a single entity, like a scalar. + // Atomic lists will be entirely replaced when updated. This extension + // may be used on any type of list (struct, scalar, ...). + // 2) `set`: + // Sets are lists that must not have multiple items with the same value. Each + // value must be a scalar (or another atomic type). + // 3) `map`: + // These lists are like maps in that their elements have a non-index key + // used to identify them. Order is preserved upon merge. The map tag + // must only be used on a list with elements of type object. + // Defaults to atomic for arrays. + // +optional + XListType *string `json:"x-kubernetes-list-type,omitempty" protobuf:"bytes,42,opt,name=xKubernetesListType"` } // JSON represents any valid JSON value. diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go index 90be856c7f9d1..64073df08f507 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go @@ -943,6 +943,8 @@ func autoConvert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(in *JS out.XPreserveUnknownFields = (*bool)(unsafe.Pointer(in.XPreserveUnknownFields)) out.XEmbeddedResource = in.XEmbeddedResource out.XIntOrString = in.XIntOrString + out.XListMapKeys = *(*[]string)(unsafe.Pointer(&in.XListMapKeys)) + out.XListType = (*string)(unsafe.Pointer(in.XListType)) return nil } @@ -1128,6 +1130,8 @@ func autoConvert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(in *ap out.XPreserveUnknownFields = (*bool)(unsafe.Pointer(in.XPreserveUnknownFields)) out.XEmbeddedResource = in.XEmbeddedResource out.XIntOrString = in.XIntOrString + out.XListMapKeys = *(*[]string)(unsafe.Pointer(&in.XListMapKeys)) + out.XListType = (*string)(unsafe.Pointer(in.XListType)) return nil } diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go index f4201eb691036..95d5c7a355330 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go @@ -32,7 +32,9 @@ import ( const ( // StatusTooManyRequests means the server experienced too many requests within a // given window and that the client must wait to perform the action again. - StatusTooManyRequests = 429 + // DEPRECATED: please use http.StatusTooManyRequests, this will be removed in + // the future version. + StatusTooManyRequests = http.StatusTooManyRequests ) // StatusError is an error intended for consumption by a REST API server; it can also be @@ -349,7 +351,7 @@ func NewTimeoutError(message string, retryAfterSeconds int) *StatusError { func NewTooManyRequestsError(message string) *StatusError { return &StatusError{metav1.Status{ Status: metav1.StatusFailure, - Code: StatusTooManyRequests, + Code: http.StatusTooManyRequests, Reason: metav1.StatusReasonTooManyRequests, Message: fmt.Sprintf("Too many requests: %s", message), }} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go b/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go index 086bce04b0ac0..fa4b767314cc8 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go @@ -135,7 +135,6 @@ func AsPartialObjectMetadata(m metav1.Object) *metav1.PartialObjectMetadata { OwnerReferences: m.GetOwnerReferences(), Finalizers: m.GetFinalizers(), ClusterName: m.GetClusterName(), - Initializers: m.GetInitializers(), ManagedFields: m.GetManagedFields(), }, } diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go index 1b2cb05554fe8..9fca2e165deb7 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go @@ -17,23 +17,14 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto -/* -Package resource is a generated protocol buffer package. - -It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto - -It has these top-level messages: - Quantity -*/ package resource import ( fmt "fmt" - proto "github.com/gogo/protobuf/proto" - math "math" + + proto "github.com/gogo/protobuf/proto" ) // Reference imports to suppress errors if they are not otherwise used. @@ -47,19 +38,38 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *Quantity) Reset() { *m = Quantity{} } -func (*Quantity) ProtoMessage() {} -func (*Quantity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *Quantity) Reset() { *m = Quantity{} } +func (*Quantity) ProtoMessage() {} +func (*Quantity) Descriptor() ([]byte, []int) { + return fileDescriptor_612bba87bd70906c, []int{0} +} +func (m *Quantity) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Quantity.Unmarshal(m, b) +} +func (m *Quantity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Quantity.Marshal(b, m, deterministic) +} +func (m *Quantity) XXX_Merge(src proto.Message) { + xxx_messageInfo_Quantity.Merge(m, src) +} +func (m *Quantity) XXX_Size() int { + return xxx_messageInfo_Quantity.Size(m) +} +func (m *Quantity) XXX_DiscardUnknown() { + xxx_messageInfo_Quantity.DiscardUnknown(m) +} + +var xxx_messageInfo_Quantity proto.InternalMessageInfo func init() { proto.RegisterType((*Quantity)(nil), "k8s.io.apimachinery.pkg.api.resource.Quantity") } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto", fileDescriptorGenerated) + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto", fileDescriptor_612bba87bd70906c) } -var fileDescriptorGenerated = []byte{ +var fileDescriptor_612bba87bd70906c = []byte{ // 237 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x8e, 0xb1, 0x4e, 0xc3, 0x30, 0x10, 0x40, 0xcf, 0x0b, 0x2a, 0x19, 0x2b, 0x84, 0x10, 0xc3, 0xa5, 0x42, 0x0c, 0x2c, 0xd8, 0x6b, diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto index acc9044452287..18a6c7cd6812a 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto @@ -26,7 +26,7 @@ option go_package = "resource"; // Quantity is a fixed-point representation of a number. // It provides convenient marshaling/unmarshaling in JSON and YAML, -// in addition to String() and Int64() accessors. +// in addition to String() and AsInt64() accessors. // // The serialization format is: // diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go index 93a6c0c5004ca..516d041dafdda 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go @@ -29,7 +29,7 @@ import ( // Quantity is a fixed-point representation of a number. // It provides convenient marshaling/unmarshaling in JSON and YAML, -// in addition to String() and Int64() accessors. +// in addition to String() and AsInt64() accessors. // // The serialization format is: // @@ -697,7 +697,9 @@ func (q *Quantity) MilliValue() int64 { return q.ScaledValue(Milli) } -// ScaledValue returns the value of ceil(q * 10^scale); this could overflow an int64. +// ScaledValue returns the value of ceil(q / 10^scale). +// For example, NewQuantity(1, DecimalSI).ScaledValue(Milli) returns 1000. +// This could overflow an int64. // To detect overflow, call Value() first and verify the expected magnitude. func (q *Quantity) ScaledValue(scale Scale) int64 { if q.d.Dec == nil { @@ -724,21 +726,3 @@ func (q *Quantity) SetScaled(value int64, scale Scale) { q.d.Dec = nil q.i = int64Amount{value: value, scale: scale} } - -// Copy is a convenience function that makes a deep copy for you. Non-deep -// copies of quantities share pointers and you will regret that. -func (q *Quantity) Copy() *Quantity { - if q.d.Dec == nil { - return &Quantity{ - s: q.s, - i: q.i, - Format: q.Format, - } - } - tmp := &inf.Dec{} - return &Quantity{ - s: q.s, - d: infDecAmount{tmp.Set(q.d.Dec)}, - Format: q.Format, - } -} diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go index 74dfb4e4b7c91..f89ca163cd39a 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go @@ -19,6 +19,7 @@ package resource import ( "fmt" "io" + "math/bits" "github.com/gogo/protobuf/proto" ) @@ -28,7 +29,7 @@ var _ proto.Sizer = &Quantity{} func (m *Quantity) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) - n, err := m.MarshalTo(data) + n, err := m.MarshalToSizedBuffer(data[:size]) if err != nil { return nil, err } @@ -38,30 +39,40 @@ func (m *Quantity) Marshal() (data []byte, err error) { // MarshalTo is a customized version of the generated Protobuf unmarshaler for a struct // with a single string field. func (m *Quantity) MarshalTo(data []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(data[:size]) +} + +// MarshalToSizedBuffer is a customized version of the generated +// Protobuf unmarshaler for a struct with a single string field. +func (m *Quantity) MarshalToSizedBuffer(data []byte) (int, error) { + i := len(data) _ = i var l int _ = l - data[i] = 0xa - i++ // BEGIN CUSTOM MARSHAL out := m.String() + i -= len(out) + copy(data[i:], out) i = encodeVarintGenerated(data, i, uint64(len(out))) - i += copy(data[i:], out) // END CUSTOM MARSHAL + i-- + data[i] = 0xa - return i, nil + return len(data) - i, nil } func encodeVarintGenerated(data []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { data[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } data[offset] = uint8(v) - return offset + 1 + return base } func (m *Quantity) Size() (n int) { @@ -77,14 +88,7 @@ func (m *Quantity) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (bits.Len64(x|1) + 6) / 7 } // Unmarshal is a customized version of the generated Protobuf unmarshaler for a struct diff --git a/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go b/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go index cf668c7c81649..90f566b14f5c2 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go +++ b/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go @@ -184,6 +184,7 @@ func ValidateObjectMetaAccessor(meta metav1.Object, requiresNamespace bool, name allErrs = append(allErrs, ValidateAnnotations(meta.GetAnnotations(), fldPath.Child("annotations"))...) allErrs = append(allErrs, ValidateOwnerReferences(meta.GetOwnerReferences(), fldPath.Child("ownerReferences"))...) allErrs = append(allErrs, ValidateFinalizers(meta.GetFinalizers(), fldPath.Child("finalizers"))...) + allErrs = append(allErrs, v1validation.ValidateManagedFields(meta.GetManagedFields(), fldPath.Child("managedFields"))...) return allErrs } @@ -256,6 +257,7 @@ func ValidateObjectMetaAccessorUpdate(newMeta, oldMeta metav1.Object, fldPath *f allErrs = append(allErrs, v1validation.ValidateLabels(newMeta.GetLabels(), fldPath.Child("labels"))...) allErrs = append(allErrs, ValidateAnnotations(newMeta.GetAnnotations(), fldPath.Child("annotations"))...) allErrs = append(allErrs, ValidateOwnerReferences(newMeta.GetOwnerReferences(), fldPath.Child("ownerReferences"))...) + allErrs = append(allErrs, v1validation.ValidateManagedFields(newMeta.GetManagedFields(), fldPath.Child("managedFields"))...) return allErrs } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go index 034eaaa8ae267..31b1d955ececd 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go @@ -17,81 +17,24 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto - - It has these top-level messages: - APIGroup - APIGroupList - APIResource - APIResourceList - APIVersions - CreateOptions - DeleteOptions - Duration - ExportOptions - Fields - GetOptions - GroupKind - GroupResource - GroupVersion - GroupVersionForDiscovery - GroupVersionKind - GroupVersionResource - Initializer - Initializers - LabelSelector - LabelSelectorRequirement - List - ListMeta - ListOptions - ManagedFieldsEntry - MicroTime - ObjectMeta - OwnerReference - PartialObjectMetadata - PartialObjectMetadataList - Patch - PatchOptions - Preconditions - RootPaths - ServerAddressByClientCIDR - Status - StatusCause - StatusDetails - TableOptions - Time - Timestamp - TypeMeta - UpdateOptions - Verbs - WatchEvent -*/ package v1 import ( fmt "fmt" + io "io" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + runtime "k8s.io/apimachinery/pkg/runtime" math "math" - - k8s_io_apimachinery_pkg_runtime "k8s.io/apimachinery/pkg/runtime" - + math_bits "math/bits" + reflect "reflect" + strings "strings" time "time" k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" - - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - - strings "strings" - - reflect "reflect" - - io "io" ) // Reference imports to suppress errors if they are not otherwise used. @@ -106,193 +49,1199 @@ var _ = time.Kitchen // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *APIGroup) Reset() { *m = APIGroup{} } -func (*APIGroup) ProtoMessage() {} -func (*APIGroup) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *APIGroup) Reset() { *m = APIGroup{} } +func (*APIGroup) ProtoMessage() {} +func (*APIGroup) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{0} +} +func (m *APIGroup) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIGroup) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIGroup.Merge(m, src) +} +func (m *APIGroup) XXX_Size() int { + return m.Size() +} +func (m *APIGroup) XXX_DiscardUnknown() { + xxx_messageInfo_APIGroup.DiscardUnknown(m) +} + +var xxx_messageInfo_APIGroup proto.InternalMessageInfo + +func (m *APIGroupList) Reset() { *m = APIGroupList{} } +func (*APIGroupList) ProtoMessage() {} +func (*APIGroupList) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{1} +} +func (m *APIGroupList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIGroupList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIGroupList) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIGroupList.Merge(m, src) +} +func (m *APIGroupList) XXX_Size() int { + return m.Size() +} +func (m *APIGroupList) XXX_DiscardUnknown() { + xxx_messageInfo_APIGroupList.DiscardUnknown(m) +} + +var xxx_messageInfo_APIGroupList proto.InternalMessageInfo + +func (m *APIResource) Reset() { *m = APIResource{} } +func (*APIResource) ProtoMessage() {} +func (*APIResource) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{2} +} +func (m *APIResource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIResource) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIResource.Merge(m, src) +} +func (m *APIResource) XXX_Size() int { + return m.Size() +} +func (m *APIResource) XXX_DiscardUnknown() { + xxx_messageInfo_APIResource.DiscardUnknown(m) +} + +var xxx_messageInfo_APIResource proto.InternalMessageInfo + +func (m *APIResourceList) Reset() { *m = APIResourceList{} } +func (*APIResourceList) ProtoMessage() {} +func (*APIResourceList) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{3} +} +func (m *APIResourceList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIResourceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIResourceList) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIResourceList.Merge(m, src) +} +func (m *APIResourceList) XXX_Size() int { + return m.Size() +} +func (m *APIResourceList) XXX_DiscardUnknown() { + xxx_messageInfo_APIResourceList.DiscardUnknown(m) +} -func (m *APIGroupList) Reset() { *m = APIGroupList{} } -func (*APIGroupList) ProtoMessage() {} -func (*APIGroupList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_APIResourceList proto.InternalMessageInfo -func (m *APIResource) Reset() { *m = APIResource{} } -func (*APIResource) ProtoMessage() {} -func (*APIResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *APIVersions) Reset() { *m = APIVersions{} } +func (*APIVersions) ProtoMessage() {} +func (*APIVersions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{4} +} +func (m *APIVersions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIVersions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIVersions) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIVersions.Merge(m, src) +} +func (m *APIVersions) XXX_Size() int { + return m.Size() +} +func (m *APIVersions) XXX_DiscardUnknown() { + xxx_messageInfo_APIVersions.DiscardUnknown(m) +} -func (m *APIResourceList) Reset() { *m = APIResourceList{} } -func (*APIResourceList) ProtoMessage() {} -func (*APIResourceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_APIVersions proto.InternalMessageInfo -func (m *APIVersions) Reset() { *m = APIVersions{} } -func (*APIVersions) ProtoMessage() {} -func (*APIVersions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *CreateOptions) Reset() { *m = CreateOptions{} } +func (*CreateOptions) ProtoMessage() {} +func (*CreateOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{5} +} +func (m *CreateOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CreateOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CreateOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateOptions.Merge(m, src) +} +func (m *CreateOptions) XXX_Size() int { + return m.Size() +} +func (m *CreateOptions) XXX_DiscardUnknown() { + xxx_messageInfo_CreateOptions.DiscardUnknown(m) +} -func (m *CreateOptions) Reset() { *m = CreateOptions{} } -func (*CreateOptions) ProtoMessage() {} -func (*CreateOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_CreateOptions proto.InternalMessageInfo -func (m *DeleteOptions) Reset() { *m = DeleteOptions{} } -func (*DeleteOptions) ProtoMessage() {} -func (*DeleteOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (m *DeleteOptions) Reset() { *m = DeleteOptions{} } +func (*DeleteOptions) ProtoMessage() {} +func (*DeleteOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{6} +} +func (m *DeleteOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeleteOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeleteOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteOptions.Merge(m, src) +} +func (m *DeleteOptions) XXX_Size() int { + return m.Size() +} +func (m *DeleteOptions) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteOptions.DiscardUnknown(m) +} -func (m *Duration) Reset() { *m = Duration{} } -func (*Duration) ProtoMessage() {} -func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_DeleteOptions proto.InternalMessageInfo -func (m *ExportOptions) Reset() { *m = ExportOptions{} } -func (*ExportOptions) ProtoMessage() {} -func (*ExportOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (m *Duration) Reset() { *m = Duration{} } +func (*Duration) ProtoMessage() {} +func (*Duration) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{7} +} +func (m *Duration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Duration) XXX_Merge(src proto.Message) { + xxx_messageInfo_Duration.Merge(m, src) +} +func (m *Duration) XXX_Size() int { + return m.Size() +} +func (m *Duration) XXX_DiscardUnknown() { + xxx_messageInfo_Duration.DiscardUnknown(m) +} -func (m *Fields) Reset() { *m = Fields{} } -func (*Fields) ProtoMessage() {} -func (*Fields) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +var xxx_messageInfo_Duration proto.InternalMessageInfo -func (m *GetOptions) Reset() { *m = GetOptions{} } -func (*GetOptions) ProtoMessage() {} -func (*GetOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (m *ExportOptions) Reset() { *m = ExportOptions{} } +func (*ExportOptions) ProtoMessage() {} +func (*ExportOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{8} +} +func (m *ExportOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExportOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExportOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExportOptions.Merge(m, src) +} +func (m *ExportOptions) XXX_Size() int { + return m.Size() +} +func (m *ExportOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ExportOptions.DiscardUnknown(m) +} -func (m *GroupKind) Reset() { *m = GroupKind{} } -func (*GroupKind) ProtoMessage() {} -func (*GroupKind) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +var xxx_messageInfo_ExportOptions proto.InternalMessageInfo -func (m *GroupResource) Reset() { *m = GroupResource{} } -func (*GroupResource) ProtoMessage() {} -func (*GroupResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (m *FieldsV1) Reset() { *m = FieldsV1{} } +func (*FieldsV1) ProtoMessage() {} +func (*FieldsV1) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{9} +} +func (m *FieldsV1) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FieldsV1) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FieldsV1) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldsV1.Merge(m, src) +} +func (m *FieldsV1) XXX_Size() int { + return m.Size() +} +func (m *FieldsV1) XXX_DiscardUnknown() { + xxx_messageInfo_FieldsV1.DiscardUnknown(m) +} -func (m *GroupVersion) Reset() { *m = GroupVersion{} } -func (*GroupVersion) ProtoMessage() {} -func (*GroupVersion) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +var xxx_messageInfo_FieldsV1 proto.InternalMessageInfo + +func (m *GetOptions) Reset() { *m = GetOptions{} } +func (*GetOptions) ProtoMessage() {} +func (*GetOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{10} +} +func (m *GetOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GetOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetOptions.Merge(m, src) +} +func (m *GetOptions) XXX_Size() int { + return m.Size() +} +func (m *GetOptions) XXX_DiscardUnknown() { + xxx_messageInfo_GetOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_GetOptions proto.InternalMessageInfo + +func (m *GroupKind) Reset() { *m = GroupKind{} } +func (*GroupKind) ProtoMessage() {} +func (*GroupKind) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{11} +} +func (m *GroupKind) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupKind) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupKind) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupKind.Merge(m, src) +} +func (m *GroupKind) XXX_Size() int { + return m.Size() +} +func (m *GroupKind) XXX_DiscardUnknown() { + xxx_messageInfo_GroupKind.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupKind proto.InternalMessageInfo + +func (m *GroupResource) Reset() { *m = GroupResource{} } +func (*GroupResource) ProtoMessage() {} +func (*GroupResource) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{12} +} +func (m *GroupResource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupResource) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupResource.Merge(m, src) +} +func (m *GroupResource) XXX_Size() int { + return m.Size() +} +func (m *GroupResource) XXX_DiscardUnknown() { + xxx_messageInfo_GroupResource.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupResource proto.InternalMessageInfo + +func (m *GroupVersion) Reset() { *m = GroupVersion{} } +func (*GroupVersion) ProtoMessage() {} +func (*GroupVersion) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{13} +} +func (m *GroupVersion) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupVersion) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupVersion.Merge(m, src) +} +func (m *GroupVersion) XXX_Size() int { + return m.Size() +} +func (m *GroupVersion) XXX_DiscardUnknown() { + xxx_messageInfo_GroupVersion.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupVersion proto.InternalMessageInfo func (m *GroupVersionForDiscovery) Reset() { *m = GroupVersionForDiscovery{} } func (*GroupVersionForDiscovery) ProtoMessage() {} func (*GroupVersionForDiscovery) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{14} + return fileDescriptor_cf52fa777ced5367, []int{14} +} +func (m *GroupVersionForDiscovery) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupVersionForDiscovery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupVersionForDiscovery) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupVersionForDiscovery.Merge(m, src) +} +func (m *GroupVersionForDiscovery) XXX_Size() int { + return m.Size() +} +func (m *GroupVersionForDiscovery) XXX_DiscardUnknown() { + xxx_messageInfo_GroupVersionForDiscovery.DiscardUnknown(m) } -func (m *GroupVersionKind) Reset() { *m = GroupVersionKind{} } -func (*GroupVersionKind) ProtoMessage() {} -func (*GroupVersionKind) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +var xxx_messageInfo_GroupVersionForDiscovery proto.InternalMessageInfo -func (m *GroupVersionResource) Reset() { *m = GroupVersionResource{} } -func (*GroupVersionResource) ProtoMessage() {} -func (*GroupVersionResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +func (m *GroupVersionKind) Reset() { *m = GroupVersionKind{} } +func (*GroupVersionKind) ProtoMessage() {} +func (*GroupVersionKind) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{15} +} +func (m *GroupVersionKind) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupVersionKind) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupVersionKind) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupVersionKind.Merge(m, src) +} +func (m *GroupVersionKind) XXX_Size() int { + return m.Size() +} +func (m *GroupVersionKind) XXX_DiscardUnknown() { + xxx_messageInfo_GroupVersionKind.DiscardUnknown(m) +} -func (m *Initializer) Reset() { *m = Initializer{} } -func (*Initializer) ProtoMessage() {} -func (*Initializer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +var xxx_messageInfo_GroupVersionKind proto.InternalMessageInfo -func (m *Initializers) Reset() { *m = Initializers{} } -func (*Initializers) ProtoMessage() {} -func (*Initializers) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +func (m *GroupVersionResource) Reset() { *m = GroupVersionResource{} } +func (*GroupVersionResource) ProtoMessage() {} +func (*GroupVersionResource) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{16} +} +func (m *GroupVersionResource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupVersionResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupVersionResource) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupVersionResource.Merge(m, src) +} +func (m *GroupVersionResource) XXX_Size() int { + return m.Size() +} +func (m *GroupVersionResource) XXX_DiscardUnknown() { + xxx_messageInfo_GroupVersionResource.DiscardUnknown(m) +} -func (m *LabelSelector) Reset() { *m = LabelSelector{} } -func (*LabelSelector) ProtoMessage() {} -func (*LabelSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } +var xxx_messageInfo_GroupVersionResource proto.InternalMessageInfo + +func (m *LabelSelector) Reset() { *m = LabelSelector{} } +func (*LabelSelector) ProtoMessage() {} +func (*LabelSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{17} +} +func (m *LabelSelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LabelSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelSelector.Merge(m, src) +} +func (m *LabelSelector) XXX_Size() int { + return m.Size() +} +func (m *LabelSelector) XXX_DiscardUnknown() { + xxx_messageInfo_LabelSelector.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelSelector proto.InternalMessageInfo func (m *LabelSelectorRequirement) Reset() { *m = LabelSelectorRequirement{} } func (*LabelSelectorRequirement) ProtoMessage() {} func (*LabelSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{20} + return fileDescriptor_cf52fa777ced5367, []int{18} +} +func (m *LabelSelectorRequirement) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelSelectorRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LabelSelectorRequirement) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelSelectorRequirement.Merge(m, src) +} +func (m *LabelSelectorRequirement) XXX_Size() int { + return m.Size() +} +func (m *LabelSelectorRequirement) XXX_DiscardUnknown() { + xxx_messageInfo_LabelSelectorRequirement.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelSelectorRequirement proto.InternalMessageInfo + +func (m *List) Reset() { *m = List{} } +func (*List) ProtoMessage() {} +func (*List) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{19} +} +func (m *List) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *List) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *List) XXX_Merge(src proto.Message) { + xxx_messageInfo_List.Merge(m, src) +} +func (m *List) XXX_Size() int { + return m.Size() +} +func (m *List) XXX_DiscardUnknown() { + xxx_messageInfo_List.DiscardUnknown(m) +} + +var xxx_messageInfo_List proto.InternalMessageInfo + +func (m *ListMeta) Reset() { *m = ListMeta{} } +func (*ListMeta) ProtoMessage() {} +func (*ListMeta) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{20} +} +func (m *ListMeta) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ListMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ListMeta) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListMeta.Merge(m, src) +} +func (m *ListMeta) XXX_Size() int { + return m.Size() +} +func (m *ListMeta) XXX_DiscardUnknown() { + xxx_messageInfo_ListMeta.DiscardUnknown(m) +} + +var xxx_messageInfo_ListMeta proto.InternalMessageInfo + +func (m *ListOptions) Reset() { *m = ListOptions{} } +func (*ListOptions) ProtoMessage() {} +func (*ListOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{21} +} +func (m *ListOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ListOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ListOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListOptions.Merge(m, src) +} +func (m *ListOptions) XXX_Size() int { + return m.Size() +} +func (m *ListOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ListOptions.DiscardUnknown(m) } -func (m *List) Reset() { *m = List{} } -func (*List) ProtoMessage() {} -func (*List) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } +var xxx_messageInfo_ListOptions proto.InternalMessageInfo -func (m *ListMeta) Reset() { *m = ListMeta{} } -func (*ListMeta) ProtoMessage() {} -func (*ListMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } +func (m *ManagedFieldsEntry) Reset() { *m = ManagedFieldsEntry{} } +func (*ManagedFieldsEntry) ProtoMessage() {} +func (*ManagedFieldsEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{22} +} +func (m *ManagedFieldsEntry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ManagedFieldsEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ManagedFieldsEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_ManagedFieldsEntry.Merge(m, src) +} +func (m *ManagedFieldsEntry) XXX_Size() int { + return m.Size() +} +func (m *ManagedFieldsEntry) XXX_DiscardUnknown() { + xxx_messageInfo_ManagedFieldsEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_ManagedFieldsEntry proto.InternalMessageInfo -func (m *ListOptions) Reset() { *m = ListOptions{} } -func (*ListOptions) ProtoMessage() {} -func (*ListOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } +func (m *MicroTime) Reset() { *m = MicroTime{} } +func (*MicroTime) ProtoMessage() {} +func (*MicroTime) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{23} +} +func (m *MicroTime) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MicroTime.Unmarshal(m, b) +} +func (m *MicroTime) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MicroTime.Marshal(b, m, deterministic) +} +func (m *MicroTime) XXX_Merge(src proto.Message) { + xxx_messageInfo_MicroTime.Merge(m, src) +} +func (m *MicroTime) XXX_Size() int { + return xxx_messageInfo_MicroTime.Size(m) +} +func (m *MicroTime) XXX_DiscardUnknown() { + xxx_messageInfo_MicroTime.DiscardUnknown(m) +} + +var xxx_messageInfo_MicroTime proto.InternalMessageInfo + +func (m *ObjectMeta) Reset() { *m = ObjectMeta{} } +func (*ObjectMeta) ProtoMessage() {} +func (*ObjectMeta) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{24} +} +func (m *ObjectMeta) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ObjectMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ObjectMeta) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectMeta.Merge(m, src) +} +func (m *ObjectMeta) XXX_Size() int { + return m.Size() +} +func (m *ObjectMeta) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectMeta.DiscardUnknown(m) +} -func (m *ManagedFieldsEntry) Reset() { *m = ManagedFieldsEntry{} } -func (*ManagedFieldsEntry) ProtoMessage() {} -func (*ManagedFieldsEntry) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } +var xxx_messageInfo_ObjectMeta proto.InternalMessageInfo -func (m *MicroTime) Reset() { *m = MicroTime{} } -func (*MicroTime) ProtoMessage() {} -func (*MicroTime) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } +func (m *OwnerReference) Reset() { *m = OwnerReference{} } +func (*OwnerReference) ProtoMessage() {} +func (*OwnerReference) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{25} +} +func (m *OwnerReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OwnerReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OwnerReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_OwnerReference.Merge(m, src) +} +func (m *OwnerReference) XXX_Size() int { + return m.Size() +} +func (m *OwnerReference) XXX_DiscardUnknown() { + xxx_messageInfo_OwnerReference.DiscardUnknown(m) +} -func (m *ObjectMeta) Reset() { *m = ObjectMeta{} } -func (*ObjectMeta) ProtoMessage() {} -func (*ObjectMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } +var xxx_messageInfo_OwnerReference proto.InternalMessageInfo -func (m *OwnerReference) Reset() { *m = OwnerReference{} } -func (*OwnerReference) ProtoMessage() {} -func (*OwnerReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } +func (m *PartialObjectMetadata) Reset() { *m = PartialObjectMetadata{} } +func (*PartialObjectMetadata) ProtoMessage() {} +func (*PartialObjectMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{26} +} +func (m *PartialObjectMetadata) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PartialObjectMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PartialObjectMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_PartialObjectMetadata.Merge(m, src) +} +func (m *PartialObjectMetadata) XXX_Size() int { + return m.Size() +} +func (m *PartialObjectMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_PartialObjectMetadata.DiscardUnknown(m) +} -func (m *PartialObjectMetadata) Reset() { *m = PartialObjectMetadata{} } -func (*PartialObjectMetadata) ProtoMessage() {} -func (*PartialObjectMetadata) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } +var xxx_messageInfo_PartialObjectMetadata proto.InternalMessageInfo func (m *PartialObjectMetadataList) Reset() { *m = PartialObjectMetadataList{} } func (*PartialObjectMetadataList) ProtoMessage() {} func (*PartialObjectMetadataList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{29} + return fileDescriptor_cf52fa777ced5367, []int{27} +} +func (m *PartialObjectMetadataList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PartialObjectMetadataList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PartialObjectMetadataList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PartialObjectMetadataList.Merge(m, src) +} +func (m *PartialObjectMetadataList) XXX_Size() int { + return m.Size() +} +func (m *PartialObjectMetadataList) XXX_DiscardUnknown() { + xxx_messageInfo_PartialObjectMetadataList.DiscardUnknown(m) +} + +var xxx_messageInfo_PartialObjectMetadataList proto.InternalMessageInfo + +func (m *Patch) Reset() { *m = Patch{} } +func (*Patch) ProtoMessage() {} +func (*Patch) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{28} +} +func (m *Patch) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Patch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Patch) XXX_Merge(src proto.Message) { + xxx_messageInfo_Patch.Merge(m, src) +} +func (m *Patch) XXX_Size() int { + return m.Size() +} +func (m *Patch) XXX_DiscardUnknown() { + xxx_messageInfo_Patch.DiscardUnknown(m) } -func (m *Patch) Reset() { *m = Patch{} } -func (*Patch) ProtoMessage() {} -func (*Patch) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } +var xxx_messageInfo_Patch proto.InternalMessageInfo + +func (m *PatchOptions) Reset() { *m = PatchOptions{} } +func (*PatchOptions) ProtoMessage() {} +func (*PatchOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{29} +} +func (m *PatchOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PatchOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PatchOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_PatchOptions.Merge(m, src) +} +func (m *PatchOptions) XXX_Size() int { + return m.Size() +} +func (m *PatchOptions) XXX_DiscardUnknown() { + xxx_messageInfo_PatchOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_PatchOptions proto.InternalMessageInfo + +func (m *Preconditions) Reset() { *m = Preconditions{} } +func (*Preconditions) ProtoMessage() {} +func (*Preconditions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{30} +} +func (m *Preconditions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Preconditions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Preconditions) XXX_Merge(src proto.Message) { + xxx_messageInfo_Preconditions.Merge(m, src) +} +func (m *Preconditions) XXX_Size() int { + return m.Size() +} +func (m *Preconditions) XXX_DiscardUnknown() { + xxx_messageInfo_Preconditions.DiscardUnknown(m) +} -func (m *PatchOptions) Reset() { *m = PatchOptions{} } -func (*PatchOptions) ProtoMessage() {} -func (*PatchOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } +var xxx_messageInfo_Preconditions proto.InternalMessageInfo -func (m *Preconditions) Reset() { *m = Preconditions{} } -func (*Preconditions) ProtoMessage() {} -func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } +func (m *RootPaths) Reset() { *m = RootPaths{} } +func (*RootPaths) ProtoMessage() {} +func (*RootPaths) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{31} +} +func (m *RootPaths) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RootPaths) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RootPaths) XXX_Merge(src proto.Message) { + xxx_messageInfo_RootPaths.Merge(m, src) +} +func (m *RootPaths) XXX_Size() int { + return m.Size() +} +func (m *RootPaths) XXX_DiscardUnknown() { + xxx_messageInfo_RootPaths.DiscardUnknown(m) +} -func (m *RootPaths) Reset() { *m = RootPaths{} } -func (*RootPaths) ProtoMessage() {} -func (*RootPaths) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } +var xxx_messageInfo_RootPaths proto.InternalMessageInfo func (m *ServerAddressByClientCIDR) Reset() { *m = ServerAddressByClientCIDR{} } func (*ServerAddressByClientCIDR) ProtoMessage() {} func (*ServerAddressByClientCIDR) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{34} + return fileDescriptor_cf52fa777ced5367, []int{32} +} +func (m *ServerAddressByClientCIDR) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServerAddressByClientCIDR) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServerAddressByClientCIDR) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServerAddressByClientCIDR.Merge(m, src) +} +func (m *ServerAddressByClientCIDR) XXX_Size() int { + return m.Size() +} +func (m *ServerAddressByClientCIDR) XXX_DiscardUnknown() { + xxx_messageInfo_ServerAddressByClientCIDR.DiscardUnknown(m) } -func (m *Status) Reset() { *m = Status{} } -func (*Status) ProtoMessage() {} -func (*Status) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } +var xxx_messageInfo_ServerAddressByClientCIDR proto.InternalMessageInfo -func (m *StatusCause) Reset() { *m = StatusCause{} } -func (*StatusCause) ProtoMessage() {} -func (*StatusCause) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } +func (m *Status) Reset() { *m = Status{} } +func (*Status) ProtoMessage() {} +func (*Status) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{33} +} +func (m *Status) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Status) XXX_Merge(src proto.Message) { + xxx_messageInfo_Status.Merge(m, src) +} +func (m *Status) XXX_Size() int { + return m.Size() +} +func (m *Status) XXX_DiscardUnknown() { + xxx_messageInfo_Status.DiscardUnknown(m) +} -func (m *StatusDetails) Reset() { *m = StatusDetails{} } -func (*StatusDetails) ProtoMessage() {} -func (*StatusDetails) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } +var xxx_messageInfo_Status proto.InternalMessageInfo -func (m *TableOptions) Reset() { *m = TableOptions{} } -func (*TableOptions) ProtoMessage() {} -func (*TableOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } +func (m *StatusCause) Reset() { *m = StatusCause{} } +func (*StatusCause) ProtoMessage() {} +func (*StatusCause) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{34} +} +func (m *StatusCause) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatusCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatusCause) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatusCause.Merge(m, src) +} +func (m *StatusCause) XXX_Size() int { + return m.Size() +} +func (m *StatusCause) XXX_DiscardUnknown() { + xxx_messageInfo_StatusCause.DiscardUnknown(m) +} + +var xxx_messageInfo_StatusCause proto.InternalMessageInfo + +func (m *StatusDetails) Reset() { *m = StatusDetails{} } +func (*StatusDetails) ProtoMessage() {} +func (*StatusDetails) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{35} +} +func (m *StatusDetails) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatusDetails) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatusDetails) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatusDetails.Merge(m, src) +} +func (m *StatusDetails) XXX_Size() int { + return m.Size() +} +func (m *StatusDetails) XXX_DiscardUnknown() { + xxx_messageInfo_StatusDetails.DiscardUnknown(m) +} + +var xxx_messageInfo_StatusDetails proto.InternalMessageInfo + +func (m *TableOptions) Reset() { *m = TableOptions{} } +func (*TableOptions) ProtoMessage() {} +func (*TableOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{36} +} +func (m *TableOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TableOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TableOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_TableOptions.Merge(m, src) +} +func (m *TableOptions) XXX_Size() int { + return m.Size() +} +func (m *TableOptions) XXX_DiscardUnknown() { + xxx_messageInfo_TableOptions.DiscardUnknown(m) +} -func (m *Time) Reset() { *m = Time{} } -func (*Time) ProtoMessage() {} -func (*Time) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } +var xxx_messageInfo_TableOptions proto.InternalMessageInfo -func (m *Timestamp) Reset() { *m = Timestamp{} } -func (*Timestamp) ProtoMessage() {} -func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } +func (m *Time) Reset() { *m = Time{} } +func (*Time) ProtoMessage() {} +func (*Time) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{37} +} +func (m *Time) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Time.Unmarshal(m, b) +} +func (m *Time) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Time.Marshal(b, m, deterministic) +} +func (m *Time) XXX_Merge(src proto.Message) { + xxx_messageInfo_Time.Merge(m, src) +} +func (m *Time) XXX_Size() int { + return xxx_messageInfo_Time.Size(m) +} +func (m *Time) XXX_DiscardUnknown() { + xxx_messageInfo_Time.DiscardUnknown(m) +} -func (m *TypeMeta) Reset() { *m = TypeMeta{} } -func (*TypeMeta) ProtoMessage() {} -func (*TypeMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } +var xxx_messageInfo_Time proto.InternalMessageInfo -func (m *UpdateOptions) Reset() { *m = UpdateOptions{} } -func (*UpdateOptions) ProtoMessage() {} -func (*UpdateOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} } +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{38} +} +func (m *Timestamp) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Timestamp) XXX_Merge(src proto.Message) { + xxx_messageInfo_Timestamp.Merge(m, src) +} +func (m *Timestamp) XXX_Size() int { + return m.Size() +} +func (m *Timestamp) XXX_DiscardUnknown() { + xxx_messageInfo_Timestamp.DiscardUnknown(m) +} + +var xxx_messageInfo_Timestamp proto.InternalMessageInfo -func (m *Verbs) Reset() { *m = Verbs{} } -func (*Verbs) ProtoMessage() {} -func (*Verbs) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} } +func (m *TypeMeta) Reset() { *m = TypeMeta{} } +func (*TypeMeta) ProtoMessage() {} +func (*TypeMeta) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{39} +} +func (m *TypeMeta) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TypeMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TypeMeta) XXX_Merge(src proto.Message) { + xxx_messageInfo_TypeMeta.Merge(m, src) +} +func (m *TypeMeta) XXX_Size() int { + return m.Size() +} +func (m *TypeMeta) XXX_DiscardUnknown() { + xxx_messageInfo_TypeMeta.DiscardUnknown(m) +} -func (m *WatchEvent) Reset() { *m = WatchEvent{} } -func (*WatchEvent) ProtoMessage() {} -func (*WatchEvent) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{44} } +var xxx_messageInfo_TypeMeta proto.InternalMessageInfo + +func (m *UpdateOptions) Reset() { *m = UpdateOptions{} } +func (*UpdateOptions) ProtoMessage() {} +func (*UpdateOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{40} +} +func (m *UpdateOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UpdateOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *UpdateOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateOptions.Merge(m, src) +} +func (m *UpdateOptions) XXX_Size() int { + return m.Size() +} +func (m *UpdateOptions) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateOptions proto.InternalMessageInfo + +func (m *Verbs) Reset() { *m = Verbs{} } +func (*Verbs) ProtoMessage() {} +func (*Verbs) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{41} +} +func (m *Verbs) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Verbs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Verbs) XXX_Merge(src proto.Message) { + xxx_messageInfo_Verbs.Merge(m, src) +} +func (m *Verbs) XXX_Size() int { + return m.Size() +} +func (m *Verbs) XXX_DiscardUnknown() { + xxx_messageInfo_Verbs.DiscardUnknown(m) +} + +var xxx_messageInfo_Verbs proto.InternalMessageInfo + +func (m *WatchEvent) Reset() { *m = WatchEvent{} } +func (*WatchEvent) ProtoMessage() {} +func (*WatchEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{42} +} +func (m *WatchEvent) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WatchEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WatchEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_WatchEvent.Merge(m, src) +} +func (m *WatchEvent) XXX_Size() int { + return m.Size() +} +func (m *WatchEvent) XXX_DiscardUnknown() { + xxx_messageInfo_WatchEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_WatchEvent proto.InternalMessageInfo func init() { proto.RegisterType((*APIGroup)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIGroup") @@ -304,7 +1253,7 @@ func init() { proto.RegisterType((*DeleteOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions") proto.RegisterType((*Duration)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Duration") proto.RegisterType((*ExportOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ExportOptions") - proto.RegisterType((*Fields)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Fields") + proto.RegisterType((*FieldsV1)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.FieldsV1") proto.RegisterType((*GetOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GetOptions") proto.RegisterType((*GroupKind)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupKind") proto.RegisterType((*GroupResource)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupResource") @@ -312,9 +1261,8 @@ func init() { proto.RegisterType((*GroupVersionForDiscovery)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionForDiscovery") proto.RegisterType((*GroupVersionKind)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind") proto.RegisterType((*GroupVersionResource)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource") - proto.RegisterType((*Initializer)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Initializer") - proto.RegisterType((*Initializers)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Initializers") proto.RegisterType((*LabelSelector)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector.MatchLabelsEntry") proto.RegisterType((*LabelSelectorRequirement)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement") proto.RegisterType((*List)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.List") proto.RegisterType((*ListMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta") @@ -322,6 +1270,8 @@ func init() { proto.RegisterType((*ManagedFieldsEntry)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry") proto.RegisterType((*MicroTime)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime") proto.RegisterType((*ObjectMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta.AnnotationsEntry") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta.LabelsEntry") proto.RegisterType((*OwnerReference)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference") proto.RegisterType((*PartialObjectMetadata)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.PartialObjectMetadata") proto.RegisterType((*PartialObjectMetadataList)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.PartialObjectMetadataList") @@ -341,10 +1291,189 @@ func init() { proto.RegisterType((*Verbs)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Verbs") proto.RegisterType((*WatchEvent)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.WatchEvent") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto", fileDescriptor_cf52fa777ced5367) +} + +var fileDescriptor_cf52fa777ced5367 = []byte{ + // 2713 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x19, 0xcd, 0x6f, 0x1b, 0x59, + 0x3d, 0x63, 0xc7, 0x8e, 0xfd, 0x73, 0x9c, 0x8f, 0x97, 0x16, 0xdc, 0x00, 0x71, 0x76, 0x16, 0xad, + 0x52, 0xe8, 0x3a, 0x9b, 0x02, 0xab, 0xd2, 0x65, 0x0b, 0x71, 0x9c, 0x74, 0xc3, 0x36, 0x4d, 0xf4, + 0xd2, 0x16, 0x28, 0x15, 0xea, 0x64, 0xe6, 0xc5, 0x19, 0x32, 0x9e, 0xf1, 0xbe, 0x19, 0x27, 0x35, + 0x1c, 0xd8, 0x03, 0x08, 0x90, 0x60, 0xd5, 0x23, 0xe2, 0x80, 0xb6, 0x82, 0xbf, 0x80, 0x13, 0x7f, + 0x00, 0x12, 0xbd, 0x20, 0xad, 0xc4, 0x65, 0x25, 0x90, 0xb5, 0x0d, 0x07, 0x8e, 0x88, 0x6b, 0x4e, + 0xe8, 0x7d, 0xcd, 0x87, 0x1d, 0x37, 0x63, 0xba, 0xac, 0xf6, 0xe6, 0xf9, 0x7d, 0xff, 0xde, 0xfb, + 0xbd, 0xdf, 0x97, 0x61, 0xeb, 0xf0, 0x9a, 0x5f, 0xb3, 0xbd, 0xe5, 0xc3, 0xce, 0x1e, 0xa1, 0x2e, + 0x09, 0x88, 0xbf, 0x7c, 0x44, 0x5c, 0xcb, 0xa3, 0xcb, 0x12, 0x61, 0xb4, 0xed, 0x96, 0x61, 0x1e, + 0xd8, 0x2e, 0xa1, 0xdd, 0xe5, 0xf6, 0x61, 0x93, 0x01, 0xfc, 0xe5, 0x16, 0x09, 0x8c, 0xe5, 0xa3, + 0x95, 0xe5, 0x26, 0x71, 0x09, 0x35, 0x02, 0x62, 0xd5, 0xda, 0xd4, 0x0b, 0x3c, 0xf4, 0x45, 0xc1, + 0x55, 0x8b, 0x73, 0xd5, 0xda, 0x87, 0x4d, 0x06, 0xf0, 0x6b, 0x8c, 0xab, 0x76, 0xb4, 0x32, 0xff, + 0x6a, 0xd3, 0x0e, 0x0e, 0x3a, 0x7b, 0x35, 0xd3, 0x6b, 0x2d, 0x37, 0xbd, 0xa6, 0xb7, 0xcc, 0x99, + 0xf7, 0x3a, 0xfb, 0xfc, 0x8b, 0x7f, 0xf0, 0x5f, 0x42, 0xe8, 0xfc, 0x50, 0x53, 0x68, 0xc7, 0x0d, + 0xec, 0x16, 0xe9, 0xb7, 0x62, 0xfe, 0xf5, 0xf3, 0x18, 0x7c, 0xf3, 0x80, 0xb4, 0x8c, 0x7e, 0x3e, + 0xfd, 0x2f, 0x59, 0x28, 0xac, 0xee, 0x6c, 0xde, 0xa4, 0x5e, 0xa7, 0x8d, 0x16, 0x61, 0xdc, 0x35, + 0x5a, 0xa4, 0xa2, 0x2d, 0x6a, 0x4b, 0xc5, 0xfa, 0xe4, 0xd3, 0x5e, 0x75, 0xec, 0xa4, 0x57, 0x1d, + 0xbf, 0x6d, 0xb4, 0x08, 0xe6, 0x18, 0xe4, 0x40, 0xe1, 0x88, 0x50, 0xdf, 0xf6, 0x5c, 0xbf, 0x92, + 0x59, 0xcc, 0x2e, 0x95, 0xae, 0xde, 0xa8, 0xa5, 0xf1, 0xbf, 0xc6, 0x15, 0xdc, 0x13, 0xac, 0x1b, + 0x1e, 0x6d, 0xd8, 0xbe, 0xe9, 0x1d, 0x11, 0xda, 0xad, 0xcf, 0x48, 0x2d, 0x05, 0x89, 0xf4, 0x71, + 0xa8, 0x01, 0xfd, 0x54, 0x83, 0x99, 0x36, 0x25, 0xfb, 0x84, 0x52, 0x62, 0x49, 0x7c, 0x25, 0xbb, + 0xa8, 0x7d, 0x0c, 0x6a, 0x2b, 0x52, 0xed, 0xcc, 0x4e, 0x9f, 0x7c, 0x3c, 0xa0, 0x11, 0xfd, 0x5e, + 0x83, 0x79, 0x9f, 0xd0, 0x23, 0x42, 0x57, 0x2d, 0x8b, 0x12, 0xdf, 0xaf, 0x77, 0xd7, 0x1c, 0x9b, + 0xb8, 0xc1, 0xda, 0x66, 0x03, 0xfb, 0x95, 0x71, 0x7e, 0x0e, 0xdf, 0x4c, 0x67, 0xd0, 0xee, 0x30, + 0x39, 0x75, 0x5d, 0x5a, 0x34, 0x3f, 0x94, 0xc4, 0xc7, 0xcf, 0x31, 0x43, 0xdf, 0x87, 0x49, 0x75, + 0x91, 0xb7, 0x6c, 0x3f, 0x40, 0xf7, 0x20, 0xdf, 0x64, 0x1f, 0x7e, 0x45, 0xe3, 0x06, 0xd6, 0xd2, + 0x19, 0xa8, 0x64, 0xd4, 0xa7, 0xa4, 0x3d, 0x79, 0xfe, 0xe9, 0x63, 0x29, 0x4d, 0xff, 0xe5, 0x38, + 0x94, 0x56, 0x77, 0x36, 0x31, 0xf1, 0xbd, 0x0e, 0x35, 0x49, 0x8a, 0xa0, 0xb9, 0x06, 0x93, 0xbe, + 0xed, 0x36, 0x3b, 0x8e, 0x41, 0x19, 0xb4, 0x92, 0xe7, 0x94, 0x17, 0x24, 0xe5, 0xe4, 0x6e, 0x0c, + 0x87, 0x13, 0x94, 0xe8, 0x2a, 0x00, 0x93, 0xe0, 0xb7, 0x0d, 0x93, 0x58, 0x95, 0xcc, 0xa2, 0xb6, + 0x54, 0xa8, 0x23, 0xc9, 0x07, 0xb7, 0x43, 0x0c, 0x8e, 0x51, 0xa1, 0x97, 0x21, 0xc7, 0x2d, 0xad, + 0x14, 0xb8, 0x9a, 0xb2, 0x24, 0xcf, 0x71, 0x37, 0xb0, 0xc0, 0xa1, 0xcb, 0x30, 0x21, 0xa3, 0xac, + 0x52, 0xe4, 0x64, 0xd3, 0x92, 0x6c, 0x42, 0x85, 0x81, 0xc2, 0x33, 0xff, 0x0e, 0x6d, 0xd7, 0xe2, + 0x71, 0x17, 0xf3, 0xef, 0x6d, 0xdb, 0xb5, 0x30, 0xc7, 0xa0, 0x5b, 0x90, 0x3b, 0x22, 0x74, 0x8f, + 0x45, 0x02, 0x0b, 0xcd, 0x2f, 0xa7, 0x3b, 0xe8, 0x7b, 0x8c, 0xa5, 0x5e, 0x64, 0xa6, 0xf1, 0x9f, + 0x58, 0x08, 0x41, 0x35, 0x00, 0xff, 0xc0, 0xa3, 0x01, 0x77, 0xaf, 0x92, 0x5b, 0xcc, 0x2e, 0x15, + 0xeb, 0x53, 0xcc, 0xdf, 0xdd, 0x10, 0x8a, 0x63, 0x14, 0x8c, 0xde, 0x34, 0x02, 0xd2, 0xf4, 0xa8, + 0x4d, 0xfc, 0xca, 0x44, 0x44, 0xbf, 0x16, 0x42, 0x71, 0x8c, 0x02, 0x7d, 0x1b, 0x90, 0x1f, 0x78, + 0xd4, 0x68, 0x12, 0xe9, 0xea, 0x5b, 0x86, 0x7f, 0x50, 0x01, 0xee, 0xdd, 0xbc, 0xf4, 0x0e, 0xed, + 0x0e, 0x50, 0xe0, 0x33, 0xb8, 0xf4, 0x3f, 0x6a, 0x30, 0x1d, 0x8b, 0x05, 0x1e, 0x77, 0xd7, 0x60, + 0xb2, 0x19, 0x7b, 0x75, 0x32, 0x2e, 0xc2, 0xdb, 0x8e, 0xbf, 0x48, 0x9c, 0xa0, 0x44, 0x04, 0x8a, + 0x54, 0x4a, 0x52, 0xd9, 0x65, 0x25, 0x75, 0xd0, 0x2a, 0x1b, 0x22, 0x4d, 0x31, 0xa0, 0x8f, 0x23, + 0xc9, 0xfa, 0xbf, 0x34, 0x1e, 0xc0, 0x2a, 0xdf, 0xa0, 0xa5, 0x58, 0x4e, 0xd3, 0xf8, 0xf1, 0x4d, + 0x0e, 0xc9, 0x47, 0xe7, 0x24, 0x82, 0xcc, 0xa7, 0x22, 0x11, 0x5c, 0x2f, 0xfc, 0xe6, 0xfd, 0xea, + 0xd8, 0xbb, 0xff, 0x58, 0x1c, 0xd3, 0x5b, 0x50, 0x5e, 0xa3, 0xc4, 0x08, 0xc8, 0x76, 0x3b, 0xe0, + 0x0e, 0xe8, 0x90, 0xb7, 0x68, 0x17, 0x77, 0x5c, 0xe9, 0x28, 0xb0, 0xf7, 0xdd, 0xe0, 0x10, 0x2c, + 0x31, 0xec, 0xfe, 0xf6, 0x6d, 0xe2, 0x58, 0x5b, 0x86, 0x6b, 0x34, 0x09, 0x95, 0x71, 0x1f, 0x9e, + 0xea, 0x46, 0x0c, 0x87, 0x13, 0x94, 0xfa, 0xcf, 0xb3, 0x50, 0x6e, 0x10, 0x87, 0x44, 0xfa, 0x36, + 0x00, 0x35, 0xa9, 0x61, 0x92, 0x1d, 0x42, 0x6d, 0xcf, 0xda, 0x25, 0xa6, 0xe7, 0x5a, 0x3e, 0x8f, + 0x88, 0x6c, 0xfd, 0x33, 0x2c, 0xce, 0x6e, 0x0e, 0x60, 0xf1, 0x19, 0x1c, 0xc8, 0x81, 0x72, 0x9b, + 0xf2, 0xdf, 0x76, 0x20, 0x6b, 0x0f, 0x7b, 0x69, 0x5f, 0x49, 0x77, 0xd4, 0x3b, 0x71, 0xd6, 0xfa, + 0xec, 0x49, 0xaf, 0x5a, 0x4e, 0x80, 0x70, 0x52, 0x38, 0xfa, 0x16, 0xcc, 0x78, 0xb4, 0x7d, 0x60, + 0xb8, 0x0d, 0xd2, 0x26, 0xae, 0x45, 0xdc, 0xc0, 0xe7, 0xa7, 0x50, 0xa8, 0x5f, 0x60, 0x15, 0x63, + 0xbb, 0x0f, 0x87, 0x07, 0xa8, 0xd1, 0x7d, 0x98, 0x6d, 0x53, 0xaf, 0x6d, 0x34, 0x0d, 0x26, 0x71, + 0xc7, 0x73, 0x6c, 0xb3, 0xcb, 0xb3, 0x43, 0xb1, 0x7e, 0xe5, 0xa4, 0x57, 0x9d, 0xdd, 0xe9, 0x47, + 0x9e, 0xf6, 0xaa, 0x73, 0xfc, 0xe8, 0x18, 0x24, 0x42, 0xe2, 0x41, 0x31, 0xb1, 0x3b, 0xcc, 0x0d, + 0xbb, 0x43, 0x7d, 0x13, 0x0a, 0x8d, 0x0e, 0xe5, 0x5c, 0xe8, 0x4d, 0x28, 0x58, 0xf2, 0xb7, 0x3c, + 0xf9, 0x97, 0x54, 0xc9, 0x55, 0x34, 0xa7, 0xbd, 0x6a, 0x99, 0x35, 0x09, 0x35, 0x05, 0xc0, 0x21, + 0x8b, 0xfe, 0x00, 0xca, 0xeb, 0x8f, 0xda, 0x1e, 0x0d, 0xd4, 0x9d, 0xbe, 0x02, 0x79, 0xc2, 0x01, + 0x5c, 0x5a, 0x21, 0xaa, 0x13, 0x82, 0x0c, 0x4b, 0x2c, 0xcb, 0xc3, 0xe4, 0x91, 0x61, 0x06, 0x32, + 0x6d, 0x87, 0x79, 0x78, 0x9d, 0x01, 0xb1, 0xc0, 0xe9, 0x9f, 0x87, 0x02, 0x0f, 0x28, 0xff, 0xde, + 0x0a, 0x9a, 0x81, 0x2c, 0x36, 0x8e, 0xb9, 0xd4, 0x49, 0x9c, 0xa5, 0xc6, 0xb1, 0xbe, 0x0d, 0x70, + 0x93, 0x84, 0x8a, 0x57, 0x61, 0x5a, 0x3d, 0xe2, 0x64, 0x6e, 0xf9, 0xac, 0x14, 0x3d, 0x8d, 0x93, + 0x68, 0xdc, 0x4f, 0xaf, 0x3f, 0x80, 0x22, 0xcf, 0x3f, 0x2c, 0x79, 0x47, 0x85, 0x42, 0x7b, 0x4e, + 0xa1, 0x50, 0xd9, 0x3f, 0x33, 0x2c, 0xfb, 0xc7, 0x9e, 0x9b, 0x03, 0x65, 0xc1, 0xab, 0x4a, 0x63, + 0x2a, 0x0d, 0x57, 0xa0, 0xa0, 0xcc, 0x94, 0x5a, 0xc2, 0x96, 0x48, 0x09, 0xc2, 0x21, 0x45, 0x4c, + 0xdb, 0x01, 0x24, 0x72, 0x69, 0x3a, 0x65, 0xb1, 0xba, 0x97, 0x79, 0x7e, 0xdd, 0x8b, 0x69, 0xfa, + 0x09, 0x54, 0x86, 0xf5, 0x51, 0x2f, 0x90, 0xed, 0xd3, 0x9b, 0xa2, 0xbf, 0xa7, 0xc1, 0x4c, 0x5c, + 0x52, 0xfa, 0xeb, 0x4b, 0xaf, 0xe4, 0xfc, 0x3a, 0x1f, 0x3b, 0x91, 0xdf, 0x69, 0x70, 0x21, 0xe1, + 0xda, 0x48, 0x37, 0x3e, 0x82, 0x51, 0xf1, 0xe0, 0xc8, 0x8e, 0x10, 0x1c, 0x7f, 0xcb, 0x40, 0xf9, + 0x96, 0xb1, 0x47, 0x9c, 0x5d, 0xe2, 0x10, 0x33, 0xf0, 0x28, 0xfa, 0x31, 0x94, 0x5a, 0x46, 0x60, + 0x1e, 0x70, 0xa8, 0xea, 0x09, 0x1b, 0xe9, 0x12, 0x68, 0x42, 0x52, 0x6d, 0x2b, 0x12, 0xb3, 0xee, + 0x06, 0xb4, 0x5b, 0x9f, 0x93, 0x26, 0x95, 0x62, 0x18, 0x1c, 0xd7, 0xc6, 0x1b, 0x79, 0xfe, 0xbd, + 0xfe, 0xa8, 0xcd, 0x0a, 0xd6, 0xe8, 0xf3, 0x43, 0xc2, 0x04, 0x4c, 0xde, 0xe9, 0xd8, 0x94, 0xb4, + 0x88, 0x1b, 0x44, 0x8d, 0xfc, 0x56, 0x9f, 0x7c, 0x3c, 0xa0, 0x71, 0xfe, 0x06, 0xcc, 0xf4, 0x1b, + 0xcf, 0xb2, 0xce, 0x21, 0xe9, 0x8a, 0xfb, 0xc2, 0xec, 0x27, 0xba, 0x00, 0xb9, 0x23, 0xc3, 0xe9, + 0xc8, 0xd7, 0x88, 0xc5, 0xc7, 0xf5, 0xcc, 0x35, 0x4d, 0xff, 0x83, 0x06, 0x95, 0x61, 0x86, 0xa0, + 0x2f, 0xc4, 0x04, 0xd5, 0x4b, 0xd2, 0xaa, 0xec, 0xdb, 0xa4, 0x2b, 0xa4, 0xae, 0x43, 0xc1, 0x6b, + 0xb3, 0xd1, 0xcb, 0xa3, 0xf2, 0xd6, 0x2f, 0xab, 0x9b, 0xdc, 0x96, 0xf0, 0xd3, 0x5e, 0xf5, 0x62, + 0x42, 0xbc, 0x42, 0xe0, 0x90, 0x95, 0x65, 0x7f, 0x6e, 0x0f, 0xab, 0x48, 0x61, 0xf6, 0xbf, 0xc7, + 0x21, 0x58, 0x62, 0xf4, 0x3f, 0x69, 0x30, 0xce, 0x5b, 0xb1, 0x07, 0x50, 0x60, 0xe7, 0x67, 0x19, + 0x81, 0xc1, 0xed, 0x4a, 0x3d, 0x04, 0x30, 0xee, 0x2d, 0x12, 0x18, 0x51, 0xb4, 0x29, 0x08, 0x0e, + 0x25, 0x22, 0x0c, 0x39, 0x3b, 0x20, 0x2d, 0x75, 0x91, 0xaf, 0x0e, 0x15, 0x2d, 0x47, 0xd0, 0x1a, + 0x36, 0x8e, 0xd7, 0x1f, 0x05, 0xc4, 0x65, 0x97, 0x11, 0x3d, 0x8d, 0x4d, 0x26, 0x03, 0x0b, 0x51, + 0xfa, 0x7f, 0x34, 0x08, 0x55, 0xb1, 0xe0, 0xf7, 0x89, 0xb3, 0x7f, 0xcb, 0x76, 0x0f, 0xe5, 0xb1, + 0x86, 0xe6, 0xec, 0x4a, 0x38, 0x0e, 0x29, 0xce, 0x2a, 0x0f, 0x99, 0xd1, 0xca, 0x03, 0x53, 0x68, + 0x7a, 0x6e, 0x60, 0xbb, 0x9d, 0x81, 0xd7, 0xb6, 0x26, 0xe1, 0x38, 0xa4, 0x60, 0xcd, 0x0d, 0x25, + 0x2d, 0xc3, 0x76, 0x6d, 0xb7, 0xc9, 0x9c, 0x58, 0xf3, 0x3a, 0x6e, 0xc0, 0xab, 0xbc, 0x6c, 0x6e, + 0xf0, 0x00, 0x16, 0x9f, 0xc1, 0xa1, 0xff, 0x35, 0x0b, 0x25, 0xe6, 0xb3, 0xaa, 0x73, 0x6f, 0x40, + 0xd9, 0x89, 0x47, 0x81, 0xf4, 0xfd, 0xa2, 0x34, 0x25, 0xf9, 0xae, 0x71, 0x92, 0x96, 0x31, 0xf3, + 0x9e, 0x2c, 0x64, 0xce, 0x24, 0x99, 0x37, 0xe2, 0x48, 0x9c, 0xa4, 0x65, 0xd9, 0xeb, 0x98, 0xbd, + 0x0f, 0xd9, 0xed, 0x84, 0x57, 0xf4, 0x1d, 0x06, 0xc4, 0x02, 0x87, 0xb6, 0x60, 0xce, 0x70, 0x1c, + 0xef, 0x98, 0x03, 0xeb, 0x9e, 0x77, 0xd8, 0x32, 0xe8, 0xa1, 0xcf, 0xc7, 0xa8, 0x42, 0xfd, 0x73, + 0x92, 0x65, 0x6e, 0x75, 0x90, 0x04, 0x9f, 0xc5, 0x77, 0xd6, 0xb5, 0x8d, 0x8f, 0x78, 0x6d, 0xd7, + 0x61, 0x8a, 0xc5, 0x97, 0xd7, 0x09, 0x54, 0x87, 0x99, 0xe3, 0x97, 0x80, 0x4e, 0x7a, 0xd5, 0xa9, + 0x3b, 0x09, 0x0c, 0xee, 0xa3, 0x64, 0x2e, 0x3b, 0x76, 0xcb, 0x0e, 0x2a, 0x13, 0x9c, 0x25, 0x74, + 0xf9, 0x16, 0x03, 0x62, 0x81, 0x4b, 0xc4, 0x45, 0xe1, 0xbc, 0xb8, 0xd0, 0x7f, 0x9b, 0x05, 0x24, + 0x5a, 0x62, 0x4b, 0xf4, 0x36, 0x22, 0xd1, 0x5c, 0x86, 0x89, 0x96, 0x6c, 0xa9, 0xb5, 0x64, 0xd6, + 0x57, 0xdd, 0xb4, 0xc2, 0xa3, 0x2d, 0x28, 0x8a, 0x07, 0x1f, 0x05, 0xf1, 0xb2, 0x24, 0x2e, 0x6e, + 0x2b, 0xc4, 0x69, 0xaf, 0x3a, 0x9f, 0x50, 0x13, 0x62, 0xee, 0x74, 0xdb, 0x04, 0x47, 0x12, 0xd8, + 0x14, 0x6d, 0xb4, 0xed, 0xf8, 0xfe, 0xa4, 0x18, 0x4d, 0xd1, 0xd1, 0x24, 0x84, 0x63, 0x54, 0xe8, + 0x2d, 0x18, 0x67, 0x27, 0x25, 0x47, 0xda, 0x2f, 0xa5, 0x4b, 0x1b, 0xec, 0xac, 0xeb, 0x05, 0x56, + 0x35, 0xd9, 0x2f, 0xcc, 0x25, 0x30, 0xed, 0x3c, 0xca, 0x7c, 0x66, 0x96, 0x9c, 0xfd, 0x43, 0xed, + 0x1b, 0x21, 0x06, 0xc7, 0xa8, 0xd0, 0x77, 0xa1, 0xb0, 0x2f, 0xdb, 0x42, 0x7e, 0x31, 0xa9, 0x13, + 0x97, 0x6a, 0x26, 0xc5, 0x08, 0xa7, 0xbe, 0x70, 0x28, 0x4d, 0x7f, 0x07, 0x8a, 0x5b, 0xb6, 0x49, + 0x3d, 0x66, 0x20, 0xbb, 0x12, 0x3f, 0x31, 0x93, 0x84, 0x57, 0xa2, 0xc2, 0x45, 0xe1, 0x59, 0x9c, + 0xb8, 0x86, 0xeb, 0x89, 0xc9, 0x23, 0x17, 0xc5, 0xc9, 0x6d, 0x06, 0xc4, 0x02, 0x77, 0xfd, 0x02, + 0xab, 0xbf, 0xbf, 0x78, 0x52, 0x1d, 0x7b, 0xfc, 0xa4, 0x3a, 0xf6, 0xfe, 0x13, 0x59, 0x8b, 0x4f, + 0x01, 0x60, 0x7b, 0xef, 0x87, 0xc4, 0x14, 0x59, 0x2d, 0xd5, 0xbe, 0x44, 0xad, 0xe9, 0xf8, 0xbe, + 0x24, 0xd3, 0xd7, 0x53, 0xc5, 0x70, 0x38, 0x41, 0x89, 0x96, 0xa1, 0x18, 0x6e, 0x42, 0xe4, 0x45, + 0xcf, 0xaa, 0xc0, 0x09, 0xd7, 0x25, 0x38, 0xa2, 0x49, 0xa4, 0xd8, 0xf1, 0x73, 0x53, 0x6c, 0x1d, + 0xb2, 0x1d, 0xdb, 0xe2, 0xaf, 0xab, 0x58, 0x7f, 0x4d, 0x95, 0xb8, 0xbb, 0x9b, 0x8d, 0xd3, 0x5e, + 0xf5, 0xa5, 0x61, 0x0b, 0xc8, 0xa0, 0xdb, 0x26, 0x7e, 0xed, 0xee, 0x66, 0x03, 0x33, 0xe6, 0xb3, + 0xde, 0x7b, 0x7e, 0xc4, 0xf7, 0x7e, 0x15, 0x40, 0x7a, 0xcd, 0xb8, 0xc5, 0xc3, 0x0d, 0x23, 0xea, + 0x66, 0x88, 0xc1, 0x31, 0x2a, 0xe4, 0xc3, 0xac, 0xc9, 0x46, 0x61, 0xf6, 0x3c, 0xec, 0x16, 0xf1, + 0x03, 0xa3, 0x25, 0x36, 0x44, 0xa3, 0x05, 0xf7, 0x25, 0xa9, 0x66, 0x76, 0xad, 0x5f, 0x18, 0x1e, + 0x94, 0x8f, 0x3c, 0x98, 0xb5, 0xe4, 0x50, 0x17, 0x29, 0x2d, 0x8e, 0xac, 0xf4, 0x22, 0x53, 0xd8, + 0xe8, 0x17, 0x84, 0x07, 0x65, 0xa3, 0x1f, 0xc0, 0xbc, 0x02, 0x0e, 0x4e, 0xd6, 0x7c, 0xc7, 0x93, + 0xad, 0x2f, 0x9c, 0xf4, 0xaa, 0xf3, 0x8d, 0xa1, 0x54, 0xf8, 0x39, 0x12, 0x90, 0x05, 0x79, 0x47, + 0xf4, 0x8f, 0x25, 0x5e, 0xf3, 0xbf, 0x91, 0xce, 0x8b, 0x28, 0xfa, 0x6b, 0xf1, 0xbe, 0x31, 0x9c, + 0x1c, 0x65, 0xcb, 0x28, 0x65, 0xa3, 0x47, 0x50, 0x32, 0x5c, 0xd7, 0x0b, 0x0c, 0x31, 0xeb, 0x4f, + 0x72, 0x55, 0xab, 0x23, 0xab, 0x5a, 0x8d, 0x64, 0xf4, 0xf5, 0xa9, 0x31, 0x0c, 0x8e, 0xab, 0x42, + 0xc7, 0x30, 0xed, 0x1d, 0xbb, 0x84, 0x62, 0xb2, 0x4f, 0x28, 0x71, 0x4d, 0xe2, 0x57, 0xca, 0x5c, + 0xfb, 0x57, 0x53, 0x6a, 0x4f, 0x30, 0x47, 0x21, 0x9d, 0x84, 0xfb, 0xb8, 0x5f, 0x0b, 0xaa, 0xb1, + 0x24, 0xe9, 0x1a, 0x8e, 0xfd, 0x23, 0x42, 0xfd, 0xca, 0x54, 0xb4, 0xc4, 0xdb, 0x08, 0xa1, 0x38, + 0x46, 0x81, 0xbe, 0x06, 0x25, 0xd3, 0xe9, 0xf8, 0x01, 0x11, 0x1b, 0xd5, 0x69, 0xfe, 0x82, 0x42, + 0xff, 0xd6, 0x22, 0x14, 0x8e, 0xd3, 0xa1, 0x0e, 0x94, 0x5b, 0xf1, 0x92, 0x51, 0x99, 0xe5, 0xde, + 0x5d, 0x4b, 0xe7, 0xdd, 0x60, 0x51, 0x8b, 0xfa, 0x8a, 0x04, 0x0e, 0x27, 0xb5, 0xcc, 0x7f, 0x1d, + 0x4a, 0xff, 0x63, 0xcb, 0xcd, 0x5a, 0xf6, 0xfe, 0x7b, 0x1c, 0xa9, 0x65, 0xff, 0x73, 0x06, 0xa6, + 0x92, 0xa7, 0xdf, 0x57, 0x0e, 0x73, 0xa9, 0xca, 0xa1, 0x1a, 0x0e, 0xb5, 0xa1, 0x4b, 0x60, 0x95, + 0xd6, 0xb3, 0x43, 0xd3, 0xba, 0xcc, 0x9e, 0xe3, 0x2f, 0x92, 0x3d, 0x6b, 0x00, 0xac, 0xcf, 0xa0, + 0x9e, 0xe3, 0x10, 0xca, 0x13, 0x67, 0x41, 0x2e, 0x7b, 0x43, 0x28, 0x8e, 0x51, 0xb0, 0x1e, 0x75, + 0xcf, 0xf1, 0xcc, 0x43, 0x7e, 0x04, 0xea, 0xd1, 0xf3, 0x94, 0x59, 0x10, 0x3d, 0x6a, 0x7d, 0x00, + 0x8b, 0xcf, 0xe0, 0xd0, 0xbb, 0x70, 0x71, 0xc7, 0xa0, 0x81, 0x6d, 0x38, 0xd1, 0x03, 0xe3, 0x43, + 0xc0, 0xc3, 0x81, 0x11, 0xe3, 0xb5, 0x51, 0x1f, 0x6a, 0x74, 0xf8, 0x11, 0x2c, 0x1a, 0x33, 0xf4, + 0xbf, 0x6b, 0x70, 0xe9, 0x4c, 0xdd, 0x9f, 0xc0, 0x88, 0xf3, 0x30, 0x39, 0xe2, 0xbc, 0x91, 0x72, + 0xdf, 0x78, 0x96, 0xb5, 0x43, 0x06, 0x9e, 0x09, 0xc8, 0xed, 0xb0, 0x86, 0x58, 0xff, 0xb5, 0x06, + 0x93, 0xfc, 0xd7, 0x28, 0xbb, 0xda, 0x2a, 0xe4, 0xf6, 0x3d, 0xb5, 0x38, 0x2a, 0x88, 0x3f, 0x13, + 0x36, 0x18, 0x00, 0x0b, 0xf8, 0x0b, 0x2c, 0x73, 0xdf, 0xd3, 0x20, 0xb9, 0x25, 0x45, 0x37, 0x44, + 0xfc, 0x6a, 0xe1, 0x1a, 0x73, 0xc4, 0xd8, 0x7d, 0x73, 0xd8, 0x80, 0x36, 0x97, 0x6a, 0x77, 0x77, + 0x05, 0x8a, 0xd8, 0xf3, 0x82, 0x1d, 0x23, 0x38, 0xf0, 0x99, 0xe3, 0x6d, 0xf6, 0x43, 0x9e, 0x0d, + 0x77, 0x9c, 0x63, 0xb0, 0x80, 0xeb, 0xbf, 0xd2, 0xe0, 0xd2, 0xd0, 0xfd, 0x39, 0x4b, 0x01, 0x66, + 0xf8, 0x25, 0x3d, 0x0a, 0xa3, 0x30, 0xa2, 0xc3, 0x31, 0x2a, 0x36, 0x59, 0x25, 0x96, 0xee, 0xfd, + 0x93, 0x55, 0x42, 0x1b, 0x4e, 0xd2, 0xea, 0xff, 0xce, 0x40, 0x7e, 0x37, 0x30, 0x82, 0x8e, 0xff, + 0x7f, 0x8e, 0xd8, 0x57, 0x20, 0xef, 0x73, 0x3d, 0xd2, 0xbc, 0xb0, 0xc6, 0x0a, 0xed, 0x58, 0x62, + 0xf9, 0x34, 0x42, 0x7c, 0xdf, 0x68, 0xaa, 0x8c, 0x15, 0x4d, 0x23, 0x02, 0x8c, 0x15, 0x1e, 0xbd, + 0x0e, 0x79, 0x4a, 0x0c, 0x3f, 0x1c, 0xcc, 0x16, 0x94, 0x48, 0xcc, 0xa1, 0xa7, 0xbd, 0xea, 0xa4, + 0x14, 0xce, 0xbf, 0xb1, 0xa4, 0x46, 0xf7, 0x61, 0xc2, 0x22, 0x81, 0x61, 0x3b, 0x62, 0x1e, 0x4b, + 0xbd, 0xae, 0x17, 0xc2, 0x1a, 0x82, 0xb5, 0x5e, 0x62, 0x36, 0xc9, 0x0f, 0xac, 0x04, 0xb2, 0x6c, + 0x6b, 0x7a, 0x96, 0x18, 0x27, 0x72, 0x51, 0xb6, 0x5d, 0xf3, 0x2c, 0x82, 0x39, 0x46, 0x7f, 0xac, + 0x41, 0x49, 0x48, 0x5a, 0x33, 0x3a, 0x3e, 0x41, 0x2b, 0xa1, 0x17, 0xe2, 0xba, 0x55, 0x27, 0x37, + 0xce, 0x06, 0x8e, 0xd3, 0x5e, 0xb5, 0xc8, 0xc9, 0xf8, 0x24, 0xa2, 0x1c, 0x88, 0x9d, 0x51, 0xe6, + 0x9c, 0x33, 0x7a, 0x19, 0x72, 0xfc, 0xf5, 0xc8, 0xc3, 0x0c, 0xdf, 0x3a, 0x7f, 0x60, 0x58, 0xe0, + 0xf4, 0x8f, 0x32, 0x50, 0x4e, 0x38, 0x97, 0x62, 0x16, 0x08, 0x17, 0x8a, 0x99, 0x14, 0x4b, 0xea, + 0xe1, 0x7f, 0x51, 0xca, 0xda, 0x93, 0x7f, 0x91, 0xda, 0xf3, 0x3d, 0xc8, 0x9b, 0xec, 0x8c, 0xd4, + 0x3f, 0xde, 0x2b, 0xa3, 0x5c, 0x27, 0x3f, 0xdd, 0x28, 0x1a, 0xf9, 0xa7, 0x8f, 0xa5, 0x40, 0x74, + 0x13, 0x66, 0x29, 0x09, 0x68, 0x77, 0x75, 0x3f, 0x20, 0x34, 0x3e, 0xc4, 0xe7, 0xa2, 0x8e, 0x1b, + 0xf7, 0x13, 0xe0, 0x41, 0x1e, 0x7d, 0x0f, 0x26, 0xef, 0x18, 0x7b, 0x4e, 0xf8, 0x07, 0x14, 0x86, + 0xb2, 0xed, 0x9a, 0x4e, 0xc7, 0x22, 0x22, 0x1b, 0xab, 0xec, 0xa5, 0x1e, 0xed, 0x66, 0x1c, 0x79, + 0xda, 0xab, 0xce, 0x25, 0x00, 0xe2, 0x1f, 0x17, 0x9c, 0x14, 0xa1, 0x3b, 0x30, 0xfe, 0x09, 0x4e, + 0x8f, 0xdf, 0x87, 0x62, 0xd4, 0xdf, 0x7f, 0xcc, 0x2a, 0xf5, 0x87, 0x50, 0x60, 0x11, 0xaf, 0xe6, + 0xd2, 0x73, 0x5a, 0x9c, 0x64, 0xe3, 0x94, 0x49, 0xd3, 0x38, 0xe9, 0x2d, 0x28, 0xdf, 0x6d, 0x5b, + 0x2f, 0xf8, 0x17, 0x64, 0x26, 0x75, 0xd5, 0xba, 0x0a, 0xe2, 0xcf, 0x74, 0x56, 0x20, 0x44, 0xe5, + 0x8e, 0x15, 0x88, 0x78, 0xe1, 0x8d, 0xed, 0xca, 0x7f, 0xa6, 0x01, 0xf0, 0xa5, 0xd4, 0xfa, 0x11, + 0x71, 0x03, 0x76, 0x0e, 0x2c, 0xf0, 0xfb, 0xcf, 0x81, 0x67, 0x06, 0x8e, 0x41, 0x77, 0x21, 0xef, + 0x89, 0x68, 0x12, 0x7f, 0x43, 0x8e, 0xb8, 0xf9, 0x0c, 0x1f, 0x81, 0x88, 0x27, 0x2c, 0x85, 0xd5, + 0x97, 0x9e, 0x3e, 0x5b, 0x18, 0xfb, 0xe0, 0xd9, 0xc2, 0xd8, 0x87, 0xcf, 0x16, 0xc6, 0xde, 0x3d, + 0x59, 0xd0, 0x9e, 0x9e, 0x2c, 0x68, 0x1f, 0x9c, 0x2c, 0x68, 0x1f, 0x9e, 0x2c, 0x68, 0x1f, 0x9d, + 0x2c, 0x68, 0x8f, 0xff, 0xb9, 0x30, 0x76, 0x3f, 0x73, 0xb4, 0xf2, 0xdf, 0x00, 0x00, 0x00, 0xff, + 0xff, 0x61, 0xb7, 0xc5, 0x7c, 0xc2, 0x24, 0x00, 0x00, +} + func (m *APIGroup) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -352,53 +1481,65 @@ func (m *APIGroup) Marshal() (dAtA []byte, err error) { } func (m *APIGroup) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIGroup) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - if len(m.Versions) > 0 { - for _, msg := range m.Versions { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.ServerAddressByClientCIDRs) > 0 { + for iNdEx := len(m.ServerAddressByClientCIDRs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ServerAddressByClientCIDRs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x22 } } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PreferredVersion.Size())) - n1, err := m.PreferredVersion.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - if len(m.ServerAddressByClientCIDRs) > 0 { - for _, msg := range m.ServerAddressByClientCIDRs { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.PreferredVersion.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.Versions) > 0 { + for iNdEx := len(m.Versions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Versions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *APIGroupList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -406,29 +1547,36 @@ func (m *APIGroupList) Marshal() (dAtA []byte, err error) { } func (m *APIGroupList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIGroupList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m.Groups) > 0 { - for _, msg := range m.Groups { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Groups[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *APIResource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -436,89 +1584,90 @@ func (m *APIResource) Marshal() (dAtA []byte, err error) { } func (m *APIResource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIResource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x10 - i++ - if m.Namespaced { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - if m.Verbs != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Verbs.Size())) - n2, err := m.Verbs.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= len(m.StorageVersionHash) + copy(dAtA[i:], m.StorageVersionHash) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageVersionHash))) + i-- + dAtA[i] = 0x52 + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x4a + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0x42 + if len(m.Categories) > 0 { + for iNdEx := len(m.Categories) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Categories[iNdEx]) + copy(dAtA[i:], m.Categories[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Categories[iNdEx]))) + i-- + dAtA[i] = 0x3a } - i += n2 } + i -= len(m.SingularName) + copy(dAtA[i:], m.SingularName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SingularName))) + i-- + dAtA[i] = 0x32 if len(m.ShortNames) > 0 { - for _, s := range m.ShortNames { + for iNdEx := len(m.ShortNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ShortNames[iNdEx]) + copy(dAtA[i:], m.ShortNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShortNames[iNdEx]))) + i-- dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SingularName))) - i += copy(dAtA[i:], m.SingularName) - if len(m.Categories) > 0 { - for _, s := range m.Categories { - dAtA[i] = 0x3a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if m.Verbs != nil { + { + size, err := m.Verbs.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x22 } - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i += copy(dAtA[i:], m.Group) - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) - i += copy(dAtA[i:], m.Version) - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageVersionHash))) - i += copy(dAtA[i:], m.StorageVersionHash) - return i, nil + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x1a + i-- + if m.Namespaced { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *APIResourceList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -526,33 +1675,41 @@ func (m *APIResourceList) Marshal() (dAtA []byte, err error) { } func (m *APIResourceList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIResourceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.GroupVersion))) - i += copy(dAtA[i:], m.GroupVersion) if len(m.APIResources) > 0 { - for _, msg := range m.APIResources { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.APIResources) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.APIResources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + i -= len(m.GroupVersion) + copy(dAtA[i:], m.GroupVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GroupVersion))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *APIVersions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -560,44 +1717,45 @@ func (m *APIVersions) Marshal() (dAtA []byte, err error) { } func (m *APIVersions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIVersions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Versions) > 0 { - for _, s := range m.Versions { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if len(m.ServerAddressByClientCIDRs) > 0 { + for iNdEx := len(m.ServerAddressByClientCIDRs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ServerAddressByClientCIDRs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + dAtA[i] = 0x12 } } - if len(m.ServerAddressByClientCIDRs) > 0 { - for _, msg := range m.ServerAddressByClientCIDRs { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + if len(m.Versions) > 0 { + for iNdEx := len(m.Versions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Versions[iNdEx]) + copy(dAtA[i:], m.Versions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Versions[iNdEx]))) + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *CreateOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -605,36 +1763,36 @@ func (m *CreateOptions) Marshal() (dAtA []byte, err error) { } func (m *CreateOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CreateOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + i -= len(m.FieldManager) + copy(dAtA[i:], m.FieldManager) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldManager))) + i-- + dAtA[i] = 0x1a if len(m.DryRun) > 0 { - for _, s := range m.DryRun { + for iNdEx := len(m.DryRun) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DryRun[iNdEx]) + copy(dAtA[i:], m.DryRun[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DryRun[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldManager))) - i += copy(dAtA[i:], m.FieldManager) - return i, nil + return len(dAtA) - i, nil } func (m *DeleteOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -642,63 +1800,65 @@ func (m *DeleteOptions) Marshal() (dAtA []byte, err error) { } func (m *DeleteOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeleteOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.GracePeriodSeconds != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.GracePeriodSeconds)) - } - if m.Preconditions != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Preconditions.Size())) - n3, err := m.Preconditions.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.DryRun) > 0 { + for iNdEx := len(m.DryRun) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DryRun[iNdEx]) + copy(dAtA[i:], m.DryRun[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DryRun[iNdEx]))) + i-- + dAtA[i] = 0x2a } - i += n3 + } + if m.PropagationPolicy != nil { + i -= len(*m.PropagationPolicy) + copy(dAtA[i:], *m.PropagationPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PropagationPolicy))) + i-- + dAtA[i] = 0x22 } if m.OrphanDependents != nil { - dAtA[i] = 0x18 - i++ + i-- if *m.OrphanDependents { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - } - if m.PropagationPolicy != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PropagationPolicy))) - i += copy(dAtA[i:], *m.PropagationPolicy) + i-- + dAtA[i] = 0x18 } - if len(m.DryRun) > 0 { - for _, s := range m.DryRun { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if m.Preconditions != nil { + { + size, err := m.Preconditions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 + } + if m.GracePeriodSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.GracePeriodSeconds)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *Duration) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -706,20 +1866,25 @@ func (m *Duration) Marshal() (dAtA []byte, err error) { } func (m *Duration) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Duration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Duration)) - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *ExportOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -727,82 +1892,68 @@ func (m *ExportOptions) Marshal() (dAtA []byte, err error) { } func (m *ExportOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExportOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - if m.Export { + i-- + if m.Exact { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ + i-- dAtA[i] = 0x10 - i++ - if m.Exact { + i-- + if m.Export { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } -func (m *Fields) Marshal() (dAtA []byte, err error) { +func (m *FieldsV1) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Fields) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *FieldsV1) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FieldsV1) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Map) > 0 { - keysForMap := make([]string, 0, len(m.Map)) - for k := range m.Map { - keysForMap = append(keysForMap, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForMap) - for _, k := range keysForMap { - dAtA[i] = 0xa - i++ - v := m.Map[string(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n4, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - } + if m.Raw != nil { + i -= len(m.Raw) + copy(dAtA[i:], m.Raw) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Raw))) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *GetOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -810,21 +1961,27 @@ func (m *GetOptions) Marshal() (dAtA []byte, err error) { } func (m *GetOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ + i -= len(m.ResourceVersion) + copy(dAtA[i:], m.ResourceVersion) i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) - i += copy(dAtA[i:], m.ResourceVersion) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *GroupKind) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -832,25 +1989,32 @@ func (m *GroupKind) Marshal() (dAtA []byte, err error) { } func (m *GroupKind) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupKind) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i += copy(dAtA[i:], m.Group) - dAtA[i] = 0x12 - i++ + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *GroupResource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -858,25 +2022,32 @@ func (m *GroupResource) Marshal() (dAtA []byte, err error) { } func (m *GroupResource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupResource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i += copy(dAtA[i:], m.Group) - dAtA[i] = 0x12 - i++ + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) - i += copy(dAtA[i:], m.Resource) - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *GroupVersion) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -884,173 +2055,141 @@ func (m *GroupVersion) Marshal() (dAtA []byte, err error) { } func (m *GroupVersion) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i += copy(dAtA[i:], m.Group) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) - i += copy(dAtA[i:], m.Version) - return i, nil -} - -func (m *GroupVersionForDiscovery) Marshal() (dAtA []byte, err error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *GroupVersionForDiscovery) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *GroupVersion) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.GroupVersion))) - i += copy(dAtA[i:], m.GroupVersion) - dAtA[i] = 0x12 - i++ + i -= len(m.Version) + copy(dAtA[i:], m.Version) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) - i += copy(dAtA[i:], m.Version) - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *GroupVersionKind) Marshal() (dAtA []byte, err error) { +func (m *GroupVersionForDiscovery) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *GroupVersionKind) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i += copy(dAtA[i:], m.Group) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) - i += copy(dAtA[i:], m.Version) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - return i, nil -} - -func (m *GroupVersionResource) Marshal() (dAtA []byte, err error) { +func (m *GroupVersionForDiscovery) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *GroupVersionResource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *GroupVersionForDiscovery) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i += copy(dAtA[i:], m.Group) - dAtA[i] = 0x12 - i++ + i -= len(m.Version) + copy(dAtA[i:], m.Version) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) - i += copy(dAtA[i:], m.Version) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) - i += copy(dAtA[i:], m.Resource) - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.GroupVersion) + copy(dAtA[i:], m.GroupVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GroupVersion))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *Initializer) Marshal() (dAtA []byte, err error) { +func (m *GroupVersionKind) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Initializer) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *GroupVersionKind) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupVersionKind) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x1a + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x12 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - return i, nil + return len(dAtA) - i, nil } -func (m *Initializers) Marshal() (dAtA []byte, err error) { +func (m *GroupVersionResource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Initializers) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *GroupVersionResource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupVersionResource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Pending) > 0 { - for _, msg := range m.Pending { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.Result != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Result.Size())) - n5, err := m.Result.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - } - return i, nil + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x1a + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x12 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *LabelSelector) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1058,51 +2197,60 @@ func (m *LabelSelector) Marshal() (dAtA []byte, err error) { } func (m *LabelSelector) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + if len(m.MatchExpressions) > 0 { + for iNdEx := len(m.MatchExpressions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchExpressions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } if len(m.MatchLabels) > 0 { keysForMatchLabels := make([]string, 0, len(m.MatchLabels)) for k := range m.MatchLabels { keysForMatchLabels = append(keysForMatchLabels, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForMatchLabels) - for _, k := range keysForMatchLabels { - dAtA[i] = 0xa - i++ - v := m.MatchLabels[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ + for iNdEx := len(keysForMatchLabels) - 1; iNdEx >= 0; iNdEx-- { + v := m.MatchLabels[string(keysForMatchLabels[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - if len(m.MatchExpressions) > 0 { - for _, msg := range m.MatchExpressions { + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + i -= len(keysForMatchLabels[iNdEx]) + copy(dAtA[i:], keysForMatchLabels[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMatchLabels[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *LabelSelectorRequirement) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1110,40 +2258,41 @@ func (m *LabelSelectorRequirement) Marshal() (dAtA []byte, err error) { } func (m *LabelSelectorRequirement) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelSelectorRequirement) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) - i += copy(dAtA[i:], m.Operator) if len(m.Values) > 0 { - for _, s := range m.Values { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Values[iNdEx]) + copy(dAtA[i:], m.Values[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Values[iNdEx]))) + i-- dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + i -= len(m.Operator) + copy(dAtA[i:], m.Operator) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) + i-- + dAtA[i] = 0x12 + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *List) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1151,37 +2300,46 @@ func (m *List) Marshal() (dAtA []byte, err error) { } func (m *List) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *List) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ListMeta) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1189,34 +2347,42 @@ func (m *ListMeta) Marshal() (dAtA []byte, err error) { } func (m *ListMeta) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ListMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SelfLink))) - i += copy(dAtA[i:], m.SelfLink) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) - i += copy(dAtA[i:], m.ResourceVersion) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Continue))) - i += copy(dAtA[i:], m.Continue) if m.RemainingItemCount != nil { - dAtA[i] = 0x20 - i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.RemainingItemCount)) + i-- + dAtA[i] = 0x20 } - return i, nil + i -= len(m.Continue) + copy(dAtA[i:], m.Continue) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Continue))) + i-- + dAtA[i] = 0x1a + i -= len(m.ResourceVersion) + copy(dAtA[i:], m.ResourceVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i-- + dAtA[i] = 0x12 + i -= len(m.SelfLink) + copy(dAtA[i:], m.SelfLink) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SelfLink))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ListOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1224,57 +2390,66 @@ func (m *ListOptions) Marshal() (dAtA []byte, err error) { } func (m *ListOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ListOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.LabelSelector))) - i += copy(dAtA[i:], m.LabelSelector) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldSelector))) - i += copy(dAtA[i:], m.FieldSelector) - dAtA[i] = 0x18 - i++ - if m.Watch { + i-- + if m.AllowWatchBookmarks { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) - i += copy(dAtA[i:], m.ResourceVersion) + i-- + dAtA[i] = 0x48 + i -= len(m.Continue) + copy(dAtA[i:], m.Continue) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Continue))) + i-- + dAtA[i] = 0x42 + i = encodeVarintGenerated(dAtA, i, uint64(m.Limit)) + i-- + dAtA[i] = 0x38 if m.TimeoutSeconds != nil { - dAtA[i] = 0x28 - i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + i-- + dAtA[i] = 0x28 } - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Limit)) - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Continue))) - i += copy(dAtA[i:], m.Continue) - dAtA[i] = 0x48 - i++ - if m.AllowWatchBookmarks { + i -= len(m.ResourceVersion) + copy(dAtA[i:], m.ResourceVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i-- + dAtA[i] = 0x22 + i-- + if m.Watch { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - return i, nil + i-- + dAtA[i] = 0x18 + i -= len(m.FieldSelector) + copy(dAtA[i:], m.FieldSelector) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldSelector))) + i-- + dAtA[i] = 0x12 + i -= len(m.LabelSelector) + copy(dAtA[i:], m.LabelSelector) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.LabelSelector))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ManagedFieldsEntry) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1282,49 +2457,66 @@ func (m *ManagedFieldsEntry) Marshal() (dAtA []byte, err error) { } func (m *ManagedFieldsEntry) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ManagedFieldsEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Manager))) - i += copy(dAtA[i:], m.Manager) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operation))) - i += copy(dAtA[i:], m.Operation) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - if m.Time != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Time.Size())) - n7, err := m.Time.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.FieldsV1 != nil { + { + size, err := m.FieldsV1.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n7 + i-- + dAtA[i] = 0x3a } - if m.Fields != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Fields.Size())) - n8, err := m.Fields.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= len(m.FieldsType) + copy(dAtA[i:], m.FieldsType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldsType))) + i-- + dAtA[i] = 0x32 + if m.Time != nil { + { + size, err := m.Time.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 + i-- + dAtA[i] = 0x22 } - return i, nil + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0x1a + i -= len(m.Operation) + copy(dAtA[i:], m.Operation) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operation))) + i-- + dAtA[i] = 0x12 + i -= len(m.Manager) + copy(dAtA[i:], m.Manager) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Manager))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ObjectMeta) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1332,80 +2524,57 @@ func (m *ObjectMeta) Marshal() (dAtA []byte, err error) { } func (m *ObjectMeta) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ObjectMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.GenerateName))) - i += copy(dAtA[i:], m.GenerateName) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SelfLink))) - i += copy(dAtA[i:], m.SelfLink) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) - i += copy(dAtA[i:], m.ResourceVersion) - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Generation)) - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CreationTimestamp.Size())) - n9, err := m.CreationTimestamp.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - if m.DeletionTimestamp != nil { - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DeletionTimestamp.Size())) - n10, err := m.DeletionTimestamp.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.ManagedFields) > 0 { + for iNdEx := len(m.ManagedFields) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ManagedFields[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a } - i += n10 - } - if m.DeletionGracePeriodSeconds != nil { - dAtA[i] = 0x50 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.DeletionGracePeriodSeconds)) } - if len(m.Labels) > 0 { - keysForLabels := make([]string, 0, len(m.Labels)) - for k := range m.Labels { - keysForLabels = append(keysForLabels, string(k)) + i -= len(m.ClusterName) + copy(dAtA[i:], m.ClusterName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClusterName))) + i-- + dAtA[i] = 0x7a + if len(m.Finalizers) > 0 { + for iNdEx := len(m.Finalizers) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Finalizers[iNdEx]) + copy(dAtA[i:], m.Finalizers[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Finalizers[iNdEx]))) + i-- + dAtA[i] = 0x72 } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - for _, k := range keysForLabels { - dAtA[i] = 0x5a - i++ - v := m.Labels[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) + } + if len(m.OwnerReferences) > 0 { + for iNdEx := len(m.OwnerReferences) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.OwnerReferences[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a } } if len(m.Annotations) > 0 { @@ -1414,86 +2583,115 @@ func (m *ObjectMeta) MarshalTo(dAtA []byte) (int, error) { keysForAnnotations = append(keysForAnnotations, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) - for _, k := range keysForAnnotations { - dAtA[i] = 0x62 - i++ - v := m.Annotations[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ + for iNdEx := len(keysForAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.Annotations[string(keysForAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) + i-- + dAtA[i] = 0x12 + i -= len(keysForAnnotations[iNdEx]) + copy(dAtA[i:], keysForAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAnnotations[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x62 } } - if len(m.OwnerReferences) > 0 { - for _, msg := range m.OwnerReferences { - dAtA[i] = 0x6a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + if len(m.Labels) > 0 { + keysForLabels := make([]string, 0, len(m.Labels)) + for k := range m.Labels { + keysForLabels = append(keysForLabels, string(k)) } - } - if len(m.Finalizers) > 0 { - for _, s := range m.Finalizers { - dAtA[i] = 0x72 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + for iNdEx := len(keysForLabels) - 1; iNdEx >= 0; iNdEx-- { + v := m.Labels[string(keysForLabels[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForLabels[iNdEx]) + copy(dAtA[i:], keysForLabels[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForLabels[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x5a } } - dAtA[i] = 0x7a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClusterName))) - i += copy(dAtA[i:], m.ClusterName) - if m.Initializers != nil { - dAtA[i] = 0x82 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Initializers.Size())) - n11, err := m.Initializers.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 + if m.DeletionGracePeriodSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.DeletionGracePeriodSeconds)) + i-- + dAtA[i] = 0x50 } - if len(m.ManagedFields) > 0 { - for _, msg := range m.ManagedFields { - dAtA[i] = 0x8a - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.DeletionTimestamp != nil { + { + size, err := m.DeletionTimestamp.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + { + size, err := m.CreationTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0x42 + i = encodeVarintGenerated(dAtA, i, uint64(m.Generation)) + i-- + dAtA[i] = 0x38 + i -= len(m.ResourceVersion) + copy(dAtA[i:], m.ResourceVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i-- + dAtA[i] = 0x32 + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x2a + i -= len(m.SelfLink) + copy(dAtA[i:], m.SelfLink) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SelfLink))) + i-- + dAtA[i] = 0x22 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x1a + i -= len(m.GenerateName) + copy(dAtA[i:], m.GenerateName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GenerateName))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *OwnerReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1501,53 +2699,62 @@ func (m *OwnerReference) Marshal() (dAtA []byte, err error) { } func (m *OwnerReference) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OwnerReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - if m.Controller != nil { - dAtA[i] = 0x30 - i++ - if *m.Controller { + if m.BlockOwnerDeletion != nil { + i-- + if *m.BlockOwnerDeletion { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - } - if m.BlockOwnerDeletion != nil { + i-- dAtA[i] = 0x38 - i++ - if *m.BlockOwnerDeletion { + } + if m.Controller != nil { + i-- + if *m.Controller { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ + i-- + dAtA[i] = 0x30 } - return i, nil + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0x2a + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x22 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PartialObjectMetadata) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1555,25 +2762,32 @@ func (m *PartialObjectMetadata) Marshal() (dAtA []byte, err error) { } func (m *PartialObjectMetadata) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PartialObjectMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n12, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n12 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *PartialObjectMetadataList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1581,37 +2795,46 @@ func (m *PartialObjectMetadataList) Marshal() (dAtA []byte, err error) { } func (m *PartialObjectMetadataList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PartialObjectMetadataList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n13, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n13 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Patch) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1619,17 +2842,22 @@ func (m *Patch) Marshal() (dAtA []byte, err error) { } func (m *Patch) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Patch) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - return i, nil + return len(dAtA) - i, nil } func (m *PatchOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1637,46 +2865,46 @@ func (m *PatchOptions) Marshal() (dAtA []byte, err error) { } func (m *PatchOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PatchOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.DryRun) > 0 { - for _, s := range m.DryRun { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } + i -= len(m.FieldManager) + copy(dAtA[i:], m.FieldManager) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldManager))) + i-- + dAtA[i] = 0x1a if m.Force != nil { - dAtA[i] = 0x10 - i++ + i-- if *m.Force { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ + i-- + dAtA[i] = 0x10 } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldManager))) - i += copy(dAtA[i:], m.FieldManager) - return i, nil + if len(m.DryRun) > 0 { + for iNdEx := len(m.DryRun) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DryRun[iNdEx]) + copy(dAtA[i:], m.DryRun[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DryRun[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } func (m *Preconditions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1684,29 +2912,36 @@ func (m *Preconditions) Marshal() (dAtA []byte, err error) { } func (m *Preconditions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Preconditions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.UID != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.UID))) - i += copy(dAtA[i:], *m.UID) - } if m.ResourceVersion != nil { - dAtA[i] = 0x12 - i++ + i -= len(*m.ResourceVersion) + copy(dAtA[i:], *m.ResourceVersion) i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ResourceVersion))) - i += copy(dAtA[i:], *m.ResourceVersion) + i-- + dAtA[i] = 0x12 + } + if m.UID != nil { + i -= len(*m.UID) + copy(dAtA[i:], *m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.UID))) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *RootPaths) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1714,32 +2949,31 @@ func (m *RootPaths) Marshal() (dAtA []byte, err error) { } func (m *RootPaths) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RootPaths) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m.Paths) > 0 { - for _, s := range m.Paths { + for iNdEx := len(m.Paths) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Paths[iNdEx]) + copy(dAtA[i:], m.Paths[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Paths[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + return len(dAtA) - i, nil } func (m *ServerAddressByClientCIDR) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1747,25 +2981,32 @@ func (m *ServerAddressByClientCIDR) Marshal() (dAtA []byte, err error) { } func (m *ServerAddressByClientCIDR) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServerAddressByClientCIDR) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClientCIDR))) - i += copy(dAtA[i:], m.ClientCIDR) - dAtA[i] = 0x12 - i++ + i -= len(m.ServerAddress) + copy(dAtA[i:], m.ServerAddress) i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServerAddress))) - i += copy(dAtA[i:], m.ServerAddress) - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.ClientCIDR) + copy(dAtA[i:], m.ClientCIDR) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClientCIDR))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Status) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1773,50 +3014,62 @@ func (m *Status) Marshal() (dAtA []byte, err error) { } func (m *Status) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Status) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n14, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n14 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(m.Code)) + i-- + dAtA[i] = 0x30 if m.Details != nil { + { + size, err := m.Details.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Details.Size())) - n15, err := m.Details.MarshalTo(dAtA[i:]) + } + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n15 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Code)) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *StatusCause) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1824,29 +3077,37 @@ func (m *StatusCause) Marshal() (dAtA []byte, err error) { } func (m *StatusCause) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatusCause) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x1a - i++ + i -= len(m.Field) + copy(dAtA[i:], m.Field) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Field))) - i += copy(dAtA[i:], m.Field) - return i, nil + i-- + dAtA[i] = 0x1a + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *StatusDetails) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1854,48 +3115,59 @@ func (m *StatusDetails) Marshal() (dAtA []byte, err error) { } func (m *StatusDetails) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatusDetails) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i += copy(dAtA[i:], m.Group) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x32 + i = encodeVarintGenerated(dAtA, i, uint64(m.RetryAfterSeconds)) + i-- + dAtA[i] = 0x28 if len(m.Causes) > 0 { - for _, msg := range m.Causes { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Causes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Causes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x22 } } - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RetryAfterSeconds)) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - return i, nil + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x1a + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *TableOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1903,21 +3175,27 @@ func (m *TableOptions) Marshal() (dAtA []byte, err error) { } func (m *TableOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TableOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ + i -= len(m.IncludeObject) + copy(dAtA[i:], m.IncludeObject) i = encodeVarintGenerated(dAtA, i, uint64(len(m.IncludeObject))) - i += copy(dAtA[i:], m.IncludeObject) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Timestamp) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1925,23 +3203,28 @@ func (m *Timestamp) Marshal() (dAtA []byte, err error) { } func (m *Timestamp) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Timestamp) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Seconds)) - dAtA[i] = 0x10 - i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Nanos)) - return i, nil + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Seconds)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *TypeMeta) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1949,25 +3232,32 @@ func (m *TypeMeta) Marshal() (dAtA []byte, err error) { } func (m *TypeMeta) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TypeMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x12 - i++ + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *UpdateOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1975,36 +3265,36 @@ func (m *UpdateOptions) Marshal() (dAtA []byte, err error) { } func (m *UpdateOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UpdateOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + i -= len(m.FieldManager) + copy(dAtA[i:], m.FieldManager) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldManager))) + i-- + dAtA[i] = 0x12 if len(m.DryRun) > 0 { - for _, s := range m.DryRun { + for iNdEx := len(m.DryRun) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DryRun[iNdEx]) + copy(dAtA[i:], m.DryRun[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DryRun[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldManager))) - i += copy(dAtA[i:], m.FieldManager) - return i, nil + return len(dAtA) - i, nil } func (m Verbs) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -2012,32 +3302,31 @@ func (m Verbs) Marshal() (dAtA []byte, err error) { } func (m Verbs) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m Verbs) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m) > 0 { - for _, s := range m { + for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m[iNdEx]) + copy(dAtA[i:], m[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + return len(dAtA) - i, nil } func (m *WatchEvent) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -2045,35 +3334,48 @@ func (m *WatchEvent) Marshal() (dAtA []byte, err error) { } func (m *WatchEvent) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WatchEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) - n16, err := m.Object.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n16 - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *APIGroup) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -2096,6 +3398,9 @@ func (m *APIGroup) Size() (n int) { } func (m *APIGroupList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Groups) > 0 { @@ -2108,6 +3413,9 @@ func (m *APIGroupList) Size() (n int) { } func (m *APIResource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -2143,6 +3451,9 @@ func (m *APIResource) Size() (n int) { } func (m *APIResourceList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.GroupVersion) @@ -2157,6 +3468,9 @@ func (m *APIResourceList) Size() (n int) { } func (m *APIVersions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Versions) > 0 { @@ -2175,6 +3489,9 @@ func (m *APIVersions) Size() (n int) { } func (m *CreateOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.DryRun) > 0 { @@ -2189,6 +3506,9 @@ func (m *CreateOptions) Size() (n int) { } func (m *DeleteOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.GracePeriodSeconds != nil { @@ -2215,6 +3535,9 @@ func (m *DeleteOptions) Size() (n int) { } func (m *Duration) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Duration)) @@ -2222,6 +3545,9 @@ func (m *Duration) Size() (n int) { } func (m *ExportOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 2 @@ -2229,22 +3555,23 @@ func (m *ExportOptions) Size() (n int) { return n } -func (m *Fields) Size() (n int) { +func (m *FieldsV1) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - if len(m.Map) > 0 { - for k, v := range m.Map { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } + if m.Raw != nil { + l = len(m.Raw) + n += 1 + l + sovGenerated(uint64(l)) } return n } func (m *GetOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.ResourceVersion) @@ -2253,6 +3580,9 @@ func (m *GetOptions) Size() (n int) { } func (m *GroupKind) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Group) @@ -2263,6 +3593,9 @@ func (m *GroupKind) Size() (n int) { } func (m *GroupResource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Group) @@ -2273,6 +3606,9 @@ func (m *GroupResource) Size() (n int) { } func (m *GroupVersion) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Group) @@ -2283,6 +3619,9 @@ func (m *GroupVersion) Size() (n int) { } func (m *GroupVersionForDiscovery) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.GroupVersion) @@ -2293,6 +3632,9 @@ func (m *GroupVersionForDiscovery) Size() (n int) { } func (m *GroupVersionKind) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Group) @@ -2305,6 +3647,9 @@ func (m *GroupVersionKind) Size() (n int) { } func (m *GroupVersionResource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Group) @@ -2316,31 +3661,10 @@ func (m *GroupVersionResource) Size() (n int) { return n } -func (m *Initializer) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *Initializers) Size() (n int) { - var l int - _ = l - if len(m.Pending) > 0 { - for _, e := range m.Pending { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.Result != nil { - l = m.Result.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - func (m *LabelSelector) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.MatchLabels) > 0 { @@ -2361,6 +3685,9 @@ func (m *LabelSelector) Size() (n int) { } func (m *LabelSelectorRequirement) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Key) @@ -2377,6 +3704,9 @@ func (m *LabelSelectorRequirement) Size() (n int) { } func (m *List) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -2391,6 +3721,9 @@ func (m *List) Size() (n int) { } func (m *ListMeta) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.SelfLink) @@ -2406,6 +3739,9 @@ func (m *ListMeta) Size() (n int) { } func (m *ListOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.LabelSelector) @@ -2426,6 +3762,9 @@ func (m *ListOptions) Size() (n int) { } func (m *ManagedFieldsEntry) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Manager) @@ -2438,14 +3777,19 @@ func (m *ManagedFieldsEntry) Size() (n int) { l = m.Time.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.Fields != nil { - l = m.Fields.Size() + l = len(m.FieldsType) + n += 1 + l + sovGenerated(uint64(l)) + if m.FieldsV1 != nil { + l = m.FieldsV1.Size() n += 1 + l + sovGenerated(uint64(l)) } return n } func (m *ObjectMeta) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -2500,10 +3844,6 @@ func (m *ObjectMeta) Size() (n int) { } l = len(m.ClusterName) n += 1 + l + sovGenerated(uint64(l)) - if m.Initializers != nil { - l = m.Initializers.Size() - n += 2 + l + sovGenerated(uint64(l)) - } if len(m.ManagedFields) > 0 { for _, e := range m.ManagedFields { l = e.Size() @@ -2514,6 +3854,9 @@ func (m *ObjectMeta) Size() (n int) { } func (m *OwnerReference) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Kind) @@ -2534,6 +3877,9 @@ func (m *OwnerReference) Size() (n int) { } func (m *PartialObjectMetadata) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -2542,6 +3888,9 @@ func (m *PartialObjectMetadata) Size() (n int) { } func (m *PartialObjectMetadataList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -2556,12 +3905,18 @@ func (m *PartialObjectMetadataList) Size() (n int) { } func (m *Patch) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l return n } func (m *PatchOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.DryRun) > 0 { @@ -2579,6 +3934,9 @@ func (m *PatchOptions) Size() (n int) { } func (m *Preconditions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.UID != nil { @@ -2593,6 +3951,9 @@ func (m *Preconditions) Size() (n int) { } func (m *RootPaths) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Paths) > 0 { @@ -2605,6 +3966,9 @@ func (m *RootPaths) Size() (n int) { } func (m *ServerAddressByClientCIDR) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.ClientCIDR) @@ -2615,6 +3979,9 @@ func (m *ServerAddressByClientCIDR) Size() (n int) { } func (m *Status) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -2634,6 +4001,9 @@ func (m *Status) Size() (n int) { } func (m *StatusCause) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -2646,6 +4016,9 @@ func (m *StatusCause) Size() (n int) { } func (m *StatusDetails) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -2667,6 +4040,9 @@ func (m *StatusDetails) Size() (n int) { } func (m *TableOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.IncludeObject) @@ -2675,6 +4051,9 @@ func (m *TableOptions) Size() (n int) { } func (m *Timestamp) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Seconds)) @@ -2683,6 +4062,9 @@ func (m *Timestamp) Size() (n int) { } func (m *TypeMeta) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Kind) @@ -2693,6 +4075,9 @@ func (m *TypeMeta) Size() (n int) { } func (m *UpdateOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.DryRun) > 0 { @@ -2707,6 +4092,9 @@ func (m *UpdateOptions) Size() (n int) { } func (m Verbs) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m) > 0 { @@ -2719,6 +4107,9 @@ func (m Verbs) Size() (n int) { } func (m *WatchEvent) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -2729,14 +4120,7 @@ func (m *WatchEvent) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -2745,11 +4129,21 @@ func (this *APIGroup) String() string { if this == nil { return "nil" } + repeatedStringForVersions := "[]GroupVersionForDiscovery{" + for _, f := range this.Versions { + repeatedStringForVersions += strings.Replace(strings.Replace(f.String(), "GroupVersionForDiscovery", "GroupVersionForDiscovery", 1), `&`, ``, 1) + "," + } + repeatedStringForVersions += "}" + repeatedStringForServerAddressByClientCIDRs := "[]ServerAddressByClientCIDR{" + for _, f := range this.ServerAddressByClientCIDRs { + repeatedStringForServerAddressByClientCIDRs += strings.Replace(strings.Replace(f.String(), "ServerAddressByClientCIDR", "ServerAddressByClientCIDR", 1), `&`, ``, 1) + "," + } + repeatedStringForServerAddressByClientCIDRs += "}" s := strings.Join([]string{`&APIGroup{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Versions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Versions), "GroupVersionForDiscovery", "GroupVersionForDiscovery", 1), `&`, ``, 1) + `,`, + `Versions:` + repeatedStringForVersions + `,`, `PreferredVersion:` + strings.Replace(strings.Replace(this.PreferredVersion.String(), "GroupVersionForDiscovery", "GroupVersionForDiscovery", 1), `&`, ``, 1) + `,`, - `ServerAddressByClientCIDRs:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ServerAddressByClientCIDRs), "ServerAddressByClientCIDR", "ServerAddressByClientCIDR", 1), `&`, ``, 1) + `,`, + `ServerAddressByClientCIDRs:` + repeatedStringForServerAddressByClientCIDRs + `,`, `}`, }, "") return s @@ -2758,8 +4152,13 @@ func (this *APIGroupList) String() string { if this == nil { return "nil" } + repeatedStringForGroups := "[]APIGroup{" + for _, f := range this.Groups { + repeatedStringForGroups += strings.Replace(strings.Replace(f.String(), "APIGroup", "APIGroup", 1), `&`, ``, 1) + "," + } + repeatedStringForGroups += "}" s := strings.Join([]string{`&APIGroupList{`, - `Groups:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Groups), "APIGroup", "APIGroup", 1), `&`, ``, 1) + `,`, + `Groups:` + repeatedStringForGroups + `,`, `}`, }, "") return s @@ -2787,9 +4186,14 @@ func (this *APIResourceList) String() string { if this == nil { return "nil" } + repeatedStringForAPIResources := "[]APIResource{" + for _, f := range this.APIResources { + repeatedStringForAPIResources += strings.Replace(strings.Replace(f.String(), "APIResource", "APIResource", 1), `&`, ``, 1) + "," + } + repeatedStringForAPIResources += "}" s := strings.Join([]string{`&APIResourceList{`, `GroupVersion:` + fmt.Sprintf("%v", this.GroupVersion) + `,`, - `APIResources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.APIResources), "APIResource", "APIResource", 1), `&`, ``, 1) + `,`, + `APIResources:` + repeatedStringForAPIResources + `,`, `}`, }, "") return s @@ -2811,7 +4215,7 @@ func (this *DeleteOptions) String() string { } s := strings.Join([]string{`&DeleteOptions{`, `GracePeriodSeconds:` + valueToStringGenerated(this.GracePeriodSeconds) + `,`, - `Preconditions:` + strings.Replace(fmt.Sprintf("%v", this.Preconditions), "Preconditions", "Preconditions", 1) + `,`, + `Preconditions:` + strings.Replace(this.Preconditions.String(), "Preconditions", "Preconditions", 1) + `,`, `OrphanDependents:` + valueToStringGenerated(this.OrphanDependents) + `,`, `PropagationPolicy:` + valueToStringGenerated(this.PropagationPolicy) + `,`, `DryRun:` + fmt.Sprintf("%v", this.DryRun) + `,`, @@ -2840,22 +4244,12 @@ func (this *ExportOptions) String() string { }, "") return s } -func (this *Fields) String() string { +func (this *FieldsV1) String() string { if this == nil { return "nil" } - keysForMap := make([]string, 0, len(this.Map)) - for k := range this.Map { - keysForMap = append(keysForMap, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForMap) - mapStringForMap := "map[string]Fields{" - for _, k := range keysForMap { - mapStringForMap += fmt.Sprintf("%v: %v,", k, this.Map[k]) - } - mapStringForMap += "}" - s := strings.Join([]string{`&Fields{`, - `Map:` + mapStringForMap + `,`, + s := strings.Join([]string{`&FieldsV1{`, + `Raw:` + valueToStringGenerated(this.Raw) + `,`, `}`, }, "") return s @@ -2881,31 +4275,15 @@ func (this *GroupVersionForDiscovery) String() string { }, "") return s } -func (this *Initializer) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Initializer{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `}`, - }, "") - return s -} -func (this *Initializers) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Initializers{`, - `Pending:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Pending), "Initializer", "Initializer", 1), `&`, ``, 1) + `,`, - `Result:` + strings.Replace(fmt.Sprintf("%v", this.Result), "Status", "Status", 1) + `,`, - `}`, - }, "") - return s -} func (this *LabelSelector) String() string { if this == nil { return "nil" } + repeatedStringForMatchExpressions := "[]LabelSelectorRequirement{" + for _, f := range this.MatchExpressions { + repeatedStringForMatchExpressions += strings.Replace(strings.Replace(f.String(), "LabelSelectorRequirement", "LabelSelectorRequirement", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchExpressions += "}" keysForMatchLabels := make([]string, 0, len(this.MatchLabels)) for k := range this.MatchLabels { keysForMatchLabels = append(keysForMatchLabels, k) @@ -2918,7 +4296,7 @@ func (this *LabelSelector) String() string { mapStringForMatchLabels += "}" s := strings.Join([]string{`&LabelSelector{`, `MatchLabels:` + mapStringForMatchLabels + `,`, - `MatchExpressions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.MatchExpressions), "LabelSelectorRequirement", "LabelSelectorRequirement", 1), `&`, ``, 1) + `,`, + `MatchExpressions:` + repeatedStringForMatchExpressions + `,`, `}`, }, "") return s @@ -2939,9 +4317,14 @@ func (this *List) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]RawExtension{" + for _, f := range this.Items { + repeatedStringForItems += fmt.Sprintf("%v", f) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&List{`, `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -2985,7 +4368,8 @@ func (this *ManagedFieldsEntry) String() string { `Operation:` + fmt.Sprintf("%v", this.Operation) + `,`, `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, `Time:` + strings.Replace(fmt.Sprintf("%v", this.Time), "Time", "Time", 1) + `,`, - `Fields:` + strings.Replace(fmt.Sprintf("%v", this.Fields), "Fields", "Fields", 1) + `,`, + `FieldsType:` + fmt.Sprintf("%v", this.FieldsType) + `,`, + `FieldsV1:` + strings.Replace(this.FieldsV1.String(), "FieldsV1", "FieldsV1", 1) + `,`, `}`, }, "") return s @@ -2994,6 +4378,16 @@ func (this *ObjectMeta) String() string { if this == nil { return "nil" } + repeatedStringForOwnerReferences := "[]OwnerReference{" + for _, f := range this.OwnerReferences { + repeatedStringForOwnerReferences += strings.Replace(strings.Replace(f.String(), "OwnerReference", "OwnerReference", 1), `&`, ``, 1) + "," + } + repeatedStringForOwnerReferences += "}" + repeatedStringForManagedFields := "[]ManagedFieldsEntry{" + for _, f := range this.ManagedFields { + repeatedStringForManagedFields += strings.Replace(strings.Replace(f.String(), "ManagedFieldsEntry", "ManagedFieldsEntry", 1), `&`, ``, 1) + "," + } + repeatedStringForManagedFields += "}" keysForLabels := make([]string, 0, len(this.Labels)) for k := range this.Labels { keysForLabels = append(keysForLabels, k) @@ -3022,16 +4416,15 @@ func (this *ObjectMeta) String() string { `UID:` + fmt.Sprintf("%v", this.UID) + `,`, `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, `Generation:` + fmt.Sprintf("%v", this.Generation) + `,`, - `CreationTimestamp:` + strings.Replace(strings.Replace(this.CreationTimestamp.String(), "Time", "Time", 1), `&`, ``, 1) + `,`, + `CreationTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CreationTimestamp), "Time", "Time", 1), `&`, ``, 1) + `,`, `DeletionTimestamp:` + strings.Replace(fmt.Sprintf("%v", this.DeletionTimestamp), "Time", "Time", 1) + `,`, `DeletionGracePeriodSeconds:` + valueToStringGenerated(this.DeletionGracePeriodSeconds) + `,`, `Labels:` + mapStringForLabels + `,`, `Annotations:` + mapStringForAnnotations + `,`, - `OwnerReferences:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.OwnerReferences), "OwnerReference", "OwnerReference", 1), `&`, ``, 1) + `,`, + `OwnerReferences:` + repeatedStringForOwnerReferences + `,`, `Finalizers:` + fmt.Sprintf("%v", this.Finalizers) + `,`, `ClusterName:` + fmt.Sprintf("%v", this.ClusterName) + `,`, - `Initializers:` + strings.Replace(fmt.Sprintf("%v", this.Initializers), "Initializers", "Initializers", 1) + `,`, - `ManagedFields:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ManagedFields), "ManagedFieldsEntry", "ManagedFieldsEntry", 1), `&`, ``, 1) + `,`, + `ManagedFields:` + repeatedStringForManagedFields + `,`, `}`, }, "") return s @@ -3065,9 +4458,14 @@ func (this *PartialObjectMetadataList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]PartialObjectMetadata{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PartialObjectMetadata", "PartialObjectMetadata", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&PartialObjectMetadataList{`, `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PartialObjectMetadata", "PartialObjectMetadata", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -3134,7 +4532,7 @@ func (this *Status) String() string { `Status:` + fmt.Sprintf("%v", this.Status) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Details:` + strings.Replace(fmt.Sprintf("%v", this.Details), "StatusDetails", "StatusDetails", 1) + `,`, + `Details:` + strings.Replace(this.Details.String(), "StatusDetails", "StatusDetails", 1) + `,`, `Code:` + fmt.Sprintf("%v", this.Code) + `,`, `}`, }, "") @@ -3156,11 +4554,16 @@ func (this *StatusDetails) String() string { if this == nil { return "nil" } + repeatedStringForCauses := "[]StatusCause{" + for _, f := range this.Causes { + repeatedStringForCauses += strings.Replace(strings.Replace(f.String(), "StatusCause", "StatusCause", 1), `&`, ``, 1) + "," + } + repeatedStringForCauses += "}" s := strings.Join([]string{`&StatusDetails{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Group:` + fmt.Sprintf("%v", this.Group) + `,`, `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, - `Causes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Causes), "StatusCause", "StatusCause", 1), `&`, ``, 1) + `,`, + `Causes:` + repeatedStringForCauses + `,`, `RetryAfterSeconds:` + fmt.Sprintf("%v", this.RetryAfterSeconds) + `,`, `UID:` + fmt.Sprintf("%v", this.UID) + `,`, `}`, @@ -3216,7 +4619,7 @@ func (this *WatchEvent) String() string { } s := strings.Join([]string{`&WatchEvent{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Object:` + strings.Replace(strings.Replace(this.Object.String(), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `Object:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Object), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -3244,7 +4647,7 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3272,7 +4675,7 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3282,6 +4685,9 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3301,7 +4707,7 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3310,6 +4716,9 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3332,7 +4741,7 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3341,6 +4750,9 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3362,7 +4774,7 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3371,6 +4783,9 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3388,6 +4803,9 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3415,7 +4833,7 @@ func (m *APIGroupList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3443,7 +4861,7 @@ func (m *APIGroupList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3452,6 +4870,9 @@ func (m *APIGroupList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3469,6 +4890,9 @@ func (m *APIGroupList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3496,7 +4920,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3524,7 +4948,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3534,6 +4958,9 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3553,7 +4980,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3573,7 +5000,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3583,6 +5010,9 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3602,7 +5032,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3611,6 +5041,9 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3635,7 +5068,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3645,6 +5078,9 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3664,7 +5100,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3674,6 +5110,9 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3693,7 +5132,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3703,6 +5142,9 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3722,7 +5164,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3732,6 +5174,9 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3751,7 +5196,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3761,6 +5206,9 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3780,7 +5228,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3790,6 +5238,9 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3804,6 +5255,9 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3831,7 +5285,7 @@ func (m *APIResourceList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3859,7 +5313,7 @@ func (m *APIResourceList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3869,6 +5323,9 @@ func (m *APIResourceList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3888,7 +5345,7 @@ func (m *APIResourceList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3897,6 +5354,9 @@ func (m *APIResourceList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3914,6 +5374,9 @@ func (m *APIResourceList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3941,7 +5404,7 @@ func (m *APIVersions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3969,7 +5432,7 @@ func (m *APIVersions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3979,6 +5442,9 @@ func (m *APIVersions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3998,7 +5464,7 @@ func (m *APIVersions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4007,6 +5473,9 @@ func (m *APIVersions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4024,6 +5493,9 @@ func (m *APIVersions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4051,7 +5523,7 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4079,7 +5551,7 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4089,6 +5561,9 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4108,7 +5583,7 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4118,6 +5593,9 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4132,6 +5610,9 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4159,7 +5640,7 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4187,7 +5668,7 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -4207,7 +5688,7 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4216,6 +5697,9 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4240,7 +5724,7 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4261,7 +5745,7 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4271,6 +5755,9 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4291,7 +5778,7 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4301,6 +5788,9 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4315,6 +5805,9 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4342,7 +5835,7 @@ func (m *Duration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4370,7 +5863,7 @@ func (m *Duration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Duration |= (time.Duration(b) & 0x7F) << shift + m.Duration |= time.Duration(b&0x7F) << shift if b < 0x80 { break } @@ -4384,6 +5877,9 @@ func (m *Duration) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4411,7 +5907,7 @@ func (m *ExportOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4439,7 +5935,7 @@ func (m *ExportOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4459,7 +5955,7 @@ func (m *ExportOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4474,6 +5970,9 @@ func (m *ExportOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4486,7 +5985,7 @@ func (m *ExportOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *Fields) Unmarshal(dAtA []byte) error { +func (m *FieldsV1) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4501,7 +6000,7 @@ func (m *Fields) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4509,17 +6008,17 @@ func (m *Fields) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Fields: wiretype end group for non-group") + return fmt.Errorf("proto: FieldsV1: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Fields: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: FieldsV1: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Map", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Raw", wireType) } - var msglen int + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4529,114 +6028,25 @@ func (m *Fields) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + if byteLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Map == nil { - m.Map = make(map[string]Fields) - } - var mapkey string - mapvalue := &Fields{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &Fields{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Raw = append(m.Raw[:0], dAtA[iNdEx:postIndex]...) + if m.Raw == nil { + m.Raw = []byte{} } - m.Map[mapkey] = *mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -4647,6 +6057,9 @@ func (m *Fields) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4674,7 +6087,7 @@ func (m *GetOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4702,7 +6115,7 @@ func (m *GetOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4712,6 +6125,9 @@ func (m *GetOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4726,6 +6142,9 @@ func (m *GetOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4753,7 +6172,7 @@ func (m *GroupKind) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4781,7 +6200,7 @@ func (m *GroupKind) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4791,6 +6210,9 @@ func (m *GroupKind) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4810,7 +6232,7 @@ func (m *GroupKind) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4820,6 +6242,9 @@ func (m *GroupKind) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4834,6 +6259,9 @@ func (m *GroupKind) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4861,7 +6289,7 @@ func (m *GroupResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4889,7 +6317,7 @@ func (m *GroupResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4899,6 +6327,9 @@ func (m *GroupResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4918,7 +6349,7 @@ func (m *GroupResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4928,6 +6359,9 @@ func (m *GroupResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4942,6 +6376,9 @@ func (m *GroupResource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4969,7 +6406,7 @@ func (m *GroupVersion) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4997,7 +6434,7 @@ func (m *GroupVersion) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5007,6 +6444,9 @@ func (m *GroupVersion) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5026,7 +6466,7 @@ func (m *GroupVersion) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5036,6 +6476,9 @@ func (m *GroupVersion) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5050,6 +6493,9 @@ func (m *GroupVersion) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5077,7 +6523,7 @@ func (m *GroupVersionForDiscovery) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5105,7 +6551,7 @@ func (m *GroupVersionForDiscovery) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5115,6 +6561,9 @@ func (m *GroupVersionForDiscovery) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5134,7 +6583,7 @@ func (m *GroupVersionForDiscovery) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5144,6 +6593,9 @@ func (m *GroupVersionForDiscovery) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5158,6 +6610,9 @@ func (m *GroupVersionForDiscovery) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5185,7 +6640,7 @@ func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5213,7 +6668,7 @@ func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5223,6 +6678,9 @@ func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5242,7 +6700,7 @@ func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5252,6 +6710,9 @@ func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5271,7 +6732,7 @@ func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5281,6 +6742,9 @@ func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5295,6 +6759,9 @@ func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5322,7 +6789,7 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5350,7 +6817,7 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5360,6 +6827,9 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5379,7 +6849,7 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5389,6 +6859,9 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5408,7 +6881,7 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5418,89 +6891,13 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Resource = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Initializer) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Initializer: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Initializer: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Resource = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -5511,118 +6908,7 @@ func (m *Initializer) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Initializers) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Initializers: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Initializers: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pending", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Pending = append(m.Pending, Initializer{}) - if err := m.Pending[len(m.Pending)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Result == nil { - m.Result = &Status{} - } - if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { + if (iNdEx + skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -5652,7 +6938,7 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5680,7 +6966,7 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5689,6 +6975,9 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5709,7 +6998,7 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5726,7 +7015,7 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5736,6 +7025,9 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -5752,7 +7044,7 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5762,6 +7054,9 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -5798,7 +7093,7 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5807,6 +7102,9 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5824,6 +7122,9 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5851,7 +7152,7 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5879,7 +7180,7 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5889,6 +7190,9 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5908,7 +7212,7 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5918,6 +7222,9 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5937,7 +7244,7 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5947,6 +7254,9 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5961,6 +7271,9 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5988,7 +7301,7 @@ func (m *List) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6016,7 +7329,7 @@ func (m *List) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6025,6 +7338,9 @@ func (m *List) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6046,7 +7362,7 @@ func (m *List) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6055,10 +7371,13 @@ func (m *List) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, k8s_io_apimachinery_pkg_runtime.RawExtension{}) + m.Items = append(m.Items, runtime.RawExtension{}) if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -6072,6 +7391,9 @@ func (m *List) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6099,7 +7421,7 @@ func (m *ListMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6127,7 +7449,7 @@ func (m *ListMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6137,6 +7459,9 @@ func (m *ListMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6156,7 +7481,7 @@ func (m *ListMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6166,6 +7491,9 @@ func (m *ListMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6185,7 +7513,7 @@ func (m *ListMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6195,6 +7523,9 @@ func (m *ListMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6214,7 +7545,7 @@ func (m *ListMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -6229,6 +7560,9 @@ func (m *ListMeta) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6256,7 +7590,7 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6284,7 +7618,7 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6294,6 +7628,9 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6313,7 +7650,7 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6323,6 +7660,9 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6342,7 +7682,7 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6362,7 +7702,7 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6372,6 +7712,9 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6391,7 +7734,7 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -6411,7 +7754,7 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Limit |= (int64(b) & 0x7F) << shift + m.Limit |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -6430,7 +7773,7 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6440,6 +7783,9 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6459,7 +7805,7 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6474,6 +7820,9 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6501,7 +7850,7 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6529,7 +7878,7 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6539,6 +7888,9 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6558,7 +7910,7 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6568,6 +7920,9 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6587,7 +7942,7 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6597,6 +7952,9 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6616,7 +7974,7 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6625,6 +7983,9 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6635,9 +7996,41 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 5: + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldsType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldsType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FieldsV1", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6649,7 +8042,7 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6658,13 +8051,16 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Fields == nil { - m.Fields = &Fields{} + if m.FieldsV1 == nil { + m.FieldsV1 = &FieldsV1{} } - if err := m.Fields.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.FieldsV1.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -6677,6 +8073,9 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6704,7 +8103,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6732,7 +8131,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6742,6 +8141,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6761,7 +8163,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6771,6 +8173,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6790,7 +8195,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6800,6 +8205,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6819,7 +8227,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6829,6 +8237,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6848,7 +8259,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6858,6 +8269,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6877,7 +8291,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6887,6 +8301,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6906,7 +8323,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Generation |= (int64(b) & 0x7F) << shift + m.Generation |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -6925,7 +8342,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6934,6 +8351,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6955,7 +8375,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6964,6 +8384,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6988,7 +8411,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -7008,7 +8431,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7017,6 +8440,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7037,7 +8463,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7054,7 +8480,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7064,6 +8490,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -7080,7 +8509,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7090,6 +8519,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -7126,7 +8558,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7135,6 +8567,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7155,7 +8590,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7172,7 +8607,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7182,6 +8617,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -7198,7 +8636,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7208,6 +8646,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -7244,7 +8685,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7253,6 +8694,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7275,7 +8719,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7285,6 +8729,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7304,7 +8751,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7314,43 +8761,13 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ClusterName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 16: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Initializers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if m.Initializers == nil { - m.Initializers = &Initializers{} - } - if err := m.Initializers.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.ClusterName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 17: if wireType != 2 { @@ -7366,7 +8783,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7375,6 +8792,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7392,6 +8812,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7419,7 +8842,7 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7447,7 +8870,7 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7457,6 +8880,9 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7476,7 +8902,7 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7486,6 +8912,9 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7505,7 +8934,7 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7515,6 +8944,9 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7534,7 +8966,7 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7544,6 +8976,9 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7563,7 +8998,7 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7584,7 +9019,7 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7600,6 +9035,9 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7627,7 +9065,7 @@ func (m *PartialObjectMetadata) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7655,7 +9093,7 @@ func (m *PartialObjectMetadata) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7664,6 +9102,9 @@ func (m *PartialObjectMetadata) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7680,6 +9121,9 @@ func (m *PartialObjectMetadata) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7707,7 +9151,7 @@ func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7735,7 +9179,7 @@ func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7744,6 +9188,9 @@ func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7765,7 +9212,7 @@ func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7774,6 +9221,9 @@ func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7791,6 +9241,9 @@ func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7818,7 +9271,7 @@ func (m *Patch) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7841,6 +9294,9 @@ func (m *Patch) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7868,7 +9324,7 @@ func (m *PatchOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7896,7 +9352,7 @@ func (m *PatchOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7906,6 +9362,9 @@ func (m *PatchOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7925,7 +9384,7 @@ func (m *PatchOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7946,7 +9405,7 @@ func (m *PatchOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7956,6 +9415,9 @@ func (m *PatchOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7970,6 +9432,9 @@ func (m *PatchOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7997,7 +9462,7 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8025,7 +9490,7 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8035,6 +9500,9 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8055,7 +9523,7 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8065,6 +9533,9 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8080,6 +9551,9 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8107,7 +9581,7 @@ func (m *RootPaths) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8135,7 +9609,7 @@ func (m *RootPaths) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8145,6 +9619,9 @@ func (m *RootPaths) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8159,6 +9636,9 @@ func (m *RootPaths) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8186,7 +9666,7 @@ func (m *ServerAddressByClientCIDR) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8214,7 +9694,7 @@ func (m *ServerAddressByClientCIDR) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8224,6 +9704,9 @@ func (m *ServerAddressByClientCIDR) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8243,7 +9726,7 @@ func (m *ServerAddressByClientCIDR) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8253,6 +9736,9 @@ func (m *ServerAddressByClientCIDR) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8267,6 +9753,9 @@ func (m *ServerAddressByClientCIDR) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8294,7 +9783,7 @@ func (m *Status) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8322,7 +9811,7 @@ func (m *Status) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8331,6 +9820,9 @@ func (m *Status) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8352,7 +9844,7 @@ func (m *Status) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8362,6 +9854,9 @@ func (m *Status) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8381,7 +9876,7 @@ func (m *Status) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8391,6 +9886,9 @@ func (m *Status) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8410,7 +9908,7 @@ func (m *Status) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8420,6 +9918,9 @@ func (m *Status) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8439,7 +9940,7 @@ func (m *Status) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8448,6 +9949,9 @@ func (m *Status) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8472,7 +9976,7 @@ func (m *Status) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Code |= (int32(b) & 0x7F) << shift + m.Code |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -8486,6 +9990,9 @@ func (m *Status) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8513,7 +10020,7 @@ func (m *StatusCause) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8541,7 +10048,7 @@ func (m *StatusCause) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8551,6 +10058,9 @@ func (m *StatusCause) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8570,7 +10080,7 @@ func (m *StatusCause) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8580,6 +10090,9 @@ func (m *StatusCause) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8599,7 +10112,7 @@ func (m *StatusCause) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8609,6 +10122,9 @@ func (m *StatusCause) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8623,6 +10139,9 @@ func (m *StatusCause) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8650,7 +10169,7 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8678,7 +10197,7 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8688,6 +10207,9 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8707,7 +10229,7 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8717,6 +10239,9 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8736,7 +10261,7 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8746,6 +10271,9 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8765,7 +10293,7 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8774,6 +10302,9 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8796,7 +10327,7 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.RetryAfterSeconds |= (int32(b) & 0x7F) << shift + m.RetryAfterSeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -8815,7 +10346,7 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8825,6 +10356,9 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8839,6 +10373,9 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8866,7 +10403,7 @@ func (m *TableOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8894,7 +10431,7 @@ func (m *TableOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8904,6 +10441,9 @@ func (m *TableOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8918,6 +10458,9 @@ func (m *TableOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8945,7 +10488,7 @@ func (m *Timestamp) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8973,7 +10516,7 @@ func (m *Timestamp) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Seconds |= (int64(b) & 0x7F) << shift + m.Seconds |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -8992,7 +10535,7 @@ func (m *Timestamp) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Nanos |= (int32(b) & 0x7F) << shift + m.Nanos |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -9006,6 +10549,9 @@ func (m *Timestamp) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -9033,7 +10579,7 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -9061,7 +10607,7 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -9071,6 +10617,9 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9090,7 +10639,7 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -9100,6 +10649,9 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9114,6 +10666,9 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -9141,7 +10696,7 @@ func (m *UpdateOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -9169,7 +10724,7 @@ func (m *UpdateOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -9179,6 +10734,9 @@ func (m *UpdateOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9198,7 +10756,7 @@ func (m *UpdateOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -9208,6 +10766,9 @@ func (m *UpdateOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9222,6 +10783,9 @@ func (m *UpdateOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -9249,7 +10813,7 @@ func (m *Verbs) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -9277,7 +10841,7 @@ func (m *Verbs) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -9287,6 +10851,9 @@ func (m *Verbs) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9301,6 +10868,9 @@ func (m *Verbs) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -9328,7 +10898,7 @@ func (m *WatchEvent) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -9356,7 +10926,7 @@ func (m *WatchEvent) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -9366,6 +10936,9 @@ func (m *WatchEvent) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9385,7 +10958,7 @@ func (m *WatchEvent) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -9394,6 +10967,9 @@ func (m *WatchEvent) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9410,6 +10986,9 @@ func (m *WatchEvent) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -9476,10 +11055,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -9508,6 +11090,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -9526,188 +11111,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 2820 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x1a, 0xcf, 0x6f, 0x1c, 0x57, - 0xd9, 0xb3, 0xeb, 0x5d, 0xef, 0x7e, 0xeb, 0x4d, 0xec, 0x97, 0x04, 0xb6, 0x46, 0x78, 0xdd, 0x29, - 0xaa, 0x52, 0x48, 0xd7, 0x4d, 0x4a, 0xab, 0x90, 0xd2, 0x82, 0xd7, 0x76, 0x52, 0xd3, 0xb8, 0xb1, - 0x9e, 0x93, 0x20, 0x42, 0x84, 0x3a, 0xde, 0x79, 0x5e, 0x0f, 0x9e, 0x9d, 0x99, 0xbe, 0x37, 0xeb, - 0xc4, 0x70, 0xa0, 0x07, 0x10, 0x20, 0x41, 0xd5, 0x23, 0x27, 0xd4, 0x0a, 0xfe, 0x02, 0x4e, 0x9c, - 0x38, 0x55, 0xa2, 0x17, 0xa4, 0x4a, 0x5c, 0x2a, 0x81, 0x56, 0xad, 0x41, 0x82, 0x1b, 0xe2, 0xea, - 0x13, 0x7a, 0xbf, 0x66, 0xde, 0xec, 0x7a, 0xe3, 0x59, 0x52, 0x2a, 0x4e, 0x3b, 0xf3, 0xfd, 0x7e, - 0xef, 0x7d, 0xef, 0xfb, 0x35, 0x0b, 0x9b, 0xfb, 0x57, 0x59, 0xcb, 0x0b, 0x97, 0xf7, 0xfb, 0x3b, - 0x84, 0x06, 0x24, 0x26, 0x6c, 0xf9, 0x80, 0x04, 0x6e, 0x48, 0x97, 0x15, 0xc2, 0x89, 0xbc, 0x9e, - 0xd3, 0xd9, 0xf3, 0x02, 0x42, 0x0f, 0x97, 0xa3, 0xfd, 0x2e, 0x07, 0xb0, 0xe5, 0x1e, 0x89, 0x9d, - 0xe5, 0x83, 0xcb, 0xcb, 0x5d, 0x12, 0x10, 0xea, 0xc4, 0xc4, 0x6d, 0x45, 0x34, 0x8c, 0x43, 0xf4, - 0x25, 0xc9, 0xd5, 0x32, 0xb9, 0x5a, 0xd1, 0x7e, 0x97, 0x03, 0x58, 0x8b, 0x73, 0xb5, 0x0e, 0x2e, - 0x2f, 0x3c, 0xdb, 0xf5, 0xe2, 0xbd, 0xfe, 0x4e, 0xab, 0x13, 0xf6, 0x96, 0xbb, 0x61, 0x37, 0x5c, - 0x16, 0xcc, 0x3b, 0xfd, 0x5d, 0xf1, 0x26, 0x5e, 0xc4, 0x93, 0x14, 0xba, 0x30, 0xd6, 0x14, 0xda, - 0x0f, 0x62, 0xaf, 0x47, 0x86, 0xad, 0x58, 0x78, 0xf1, 0x34, 0x06, 0xd6, 0xd9, 0x23, 0x3d, 0x67, - 0x98, 0xcf, 0xfe, 0x63, 0x11, 0x2a, 0x2b, 0x5b, 0x1b, 0x37, 0x68, 0xd8, 0x8f, 0xd0, 0x12, 0x4c, - 0x07, 0x4e, 0x8f, 0x34, 0xac, 0x25, 0xeb, 0x62, 0xb5, 0x3d, 0xfb, 0xc1, 0xa0, 0x39, 0x75, 0x34, - 0x68, 0x4e, 0xbf, 0xee, 0xf4, 0x08, 0x16, 0x18, 0xe4, 0x43, 0xe5, 0x80, 0x50, 0xe6, 0x85, 0x01, - 0x6b, 0x14, 0x96, 0x8a, 0x17, 0x6b, 0x57, 0x5e, 0x69, 0xe5, 0x59, 0x7f, 0x4b, 0x28, 0xb8, 0x2b, - 0x59, 0xaf, 0x87, 0x74, 0xcd, 0x63, 0x9d, 0xf0, 0x80, 0xd0, 0xc3, 0xf6, 0x9c, 0xd2, 0x52, 0x51, - 0x48, 0x86, 0x13, 0x0d, 0xe8, 0xc7, 0x16, 0xcc, 0x45, 0x94, 0xec, 0x12, 0x4a, 0x89, 0xab, 0xf0, - 0x8d, 0xe2, 0x92, 0xf5, 0x29, 0xa8, 0x6d, 0x28, 0xb5, 0x73, 0x5b, 0x43, 0xf2, 0xf1, 0x88, 0x46, - 0xf4, 0x1b, 0x0b, 0x16, 0x18, 0xa1, 0x07, 0x84, 0xae, 0xb8, 0x2e, 0x25, 0x8c, 0xb5, 0x0f, 0x57, - 0x7d, 0x8f, 0x04, 0xf1, 0xea, 0xc6, 0x1a, 0x66, 0x8d, 0x69, 0xb1, 0x0f, 0xdf, 0xc8, 0x67, 0xd0, - 0xf6, 0x38, 0x39, 0x6d, 0x5b, 0x59, 0xb4, 0x30, 0x96, 0x84, 0xe1, 0x47, 0x98, 0x61, 0xef, 0xc2, - 0xac, 0x3e, 0xc8, 0x9b, 0x1e, 0x8b, 0xd1, 0x5d, 0x28, 0x77, 0xf9, 0x0b, 0x6b, 0x58, 0xc2, 0xc0, - 0x56, 0x3e, 0x03, 0xb5, 0x8c, 0xf6, 0x19, 0x65, 0x4f, 0x59, 0xbc, 0x32, 0xac, 0xa4, 0xd9, 0x3f, - 0x9f, 0x86, 0xda, 0xca, 0xd6, 0x06, 0x26, 0x2c, 0xec, 0xd3, 0x0e, 0xc9, 0xe1, 0x34, 0x57, 0x00, - 0xf8, 0x2f, 0x8b, 0x9c, 0x0e, 0x71, 0x1b, 0x85, 0x25, 0xeb, 0x62, 0xa5, 0x8d, 0x14, 0x1d, 0xbc, - 0x9e, 0x60, 0xb0, 0x41, 0xc5, 0xa5, 0xee, 0x7b, 0x81, 0x2b, 0x4e, 0xdb, 0x90, 0xfa, 0x9a, 0x17, - 0xb8, 0x58, 0x60, 0xd0, 0x4d, 0x28, 0x1d, 0x10, 0xba, 0xc3, 0xf7, 0x9f, 0x3b, 0xc4, 0x57, 0xf2, - 0x2d, 0xef, 0x2e, 0x67, 0x69, 0x57, 0x8f, 0x06, 0xcd, 0x92, 0x78, 0xc4, 0x52, 0x08, 0x6a, 0x01, - 0xb0, 0xbd, 0x90, 0xc6, 0xc2, 0x9c, 0x46, 0x69, 0xa9, 0x78, 0xb1, 0xda, 0x3e, 0xc3, 0xed, 0xdb, - 0x4e, 0xa0, 0xd8, 0xa0, 0x40, 0x57, 0x61, 0x96, 0x79, 0x41, 0xb7, 0xef, 0x3b, 0x94, 0x03, 0x1a, - 0x65, 0x61, 0xe7, 0x79, 0x65, 0xe7, 0xec, 0xb6, 0x81, 0xc3, 0x19, 0x4a, 0xae, 0xa9, 0xe3, 0xc4, - 0xa4, 0x1b, 0x52, 0x8f, 0xb0, 0xc6, 0x4c, 0xaa, 0x69, 0x35, 0x81, 0x62, 0x83, 0x02, 0x3d, 0x05, - 0x25, 0xb1, 0xf3, 0x8d, 0x8a, 0x50, 0x51, 0x57, 0x2a, 0x4a, 0xe2, 0x58, 0xb0, 0xc4, 0xa1, 0x67, - 0x60, 0x46, 0xdd, 0x9a, 0x46, 0x55, 0x90, 0x9d, 0x55, 0x64, 0x33, 0xda, 0xad, 0x35, 0x1e, 0x7d, - 0x0b, 0x10, 0x8b, 0x43, 0xea, 0x74, 0x89, 0x42, 0xbd, 0xea, 0xb0, 0xbd, 0x06, 0x08, 0xae, 0x05, - 0xc5, 0x85, 0xb6, 0x47, 0x28, 0xf0, 0x09, 0x5c, 0xf6, 0xef, 0x2c, 0x38, 0x6b, 0xf8, 0x82, 0xf0, - 0xbb, 0xab, 0x30, 0xdb, 0x35, 0x6e, 0x9d, 0xf2, 0x8b, 0x64, 0x67, 0xcc, 0x1b, 0x89, 0x33, 0x94, - 0x88, 0x40, 0x95, 0x2a, 0x49, 0x3a, 0xba, 0x5c, 0xce, 0xed, 0xb4, 0xda, 0x86, 0x54, 0x93, 0x01, - 0x64, 0x38, 0x95, 0x6c, 0xff, 0xc3, 0x12, 0x0e, 0xac, 0xe3, 0x0d, 0xba, 0x68, 0xc4, 0x34, 0x4b, - 0x1c, 0xc7, 0xec, 0x98, 0x78, 0x74, 0x4a, 0x20, 0x28, 0xfc, 0x5f, 0x04, 0x82, 0x6b, 0x95, 0x5f, - 0xbd, 0xdb, 0x9c, 0x7a, 0xeb, 0xaf, 0x4b, 0x53, 0x76, 0x0f, 0xea, 0xab, 0x94, 0x38, 0x31, 0xb9, - 0x15, 0xc5, 0x62, 0x01, 0x36, 0x94, 0x5d, 0x7a, 0x88, 0xfb, 0x81, 0x5a, 0x28, 0xf0, 0xfb, 0xbd, - 0x26, 0x20, 0x58, 0x61, 0xf8, 0xf9, 0xed, 0x7a, 0xc4, 0x77, 0x37, 0x9d, 0xc0, 0xe9, 0x12, 0xaa, - 0x6e, 0x60, 0xb2, 0xab, 0xd7, 0x0d, 0x1c, 0xce, 0x50, 0xda, 0x3f, 0x2d, 0x42, 0x7d, 0x8d, 0xf8, - 0x24, 0xd5, 0x77, 0x1d, 0x50, 0x97, 0x3a, 0x1d, 0xb2, 0x45, 0xa8, 0x17, 0xba, 0xdb, 0xa4, 0x13, - 0x06, 0x2e, 0x13, 0x1e, 0x51, 0x6c, 0x7f, 0x8e, 0xfb, 0xd9, 0x8d, 0x11, 0x2c, 0x3e, 0x81, 0x03, - 0xf9, 0x50, 0x8f, 0xa8, 0x78, 0xf6, 0x62, 0x95, 0x7b, 0xf8, 0x9d, 0x7f, 0x3e, 0xdf, 0x56, 0x6f, - 0x99, 0xac, 0xed, 0xf9, 0xa3, 0x41, 0xb3, 0x9e, 0x01, 0xe1, 0xac, 0x70, 0xf4, 0x4d, 0x98, 0x0b, - 0x69, 0xb4, 0xe7, 0x04, 0x6b, 0x24, 0x22, 0x81, 0x4b, 0x82, 0x98, 0x89, 0x5d, 0xa8, 0xb4, 0xcf, - 0xf3, 0x8c, 0x71, 0x6b, 0x08, 0x87, 0x47, 0xa8, 0xd1, 0x3d, 0x98, 0x8f, 0x68, 0x18, 0x39, 0x5d, - 0x87, 0x4b, 0xdc, 0x0a, 0x7d, 0xaf, 0x73, 0x28, 0xe2, 0x54, 0xb5, 0x7d, 0xe9, 0x68, 0xd0, 0x9c, - 0xdf, 0x1a, 0x46, 0x1e, 0x0f, 0x9a, 0xe7, 0xc4, 0xd6, 0x71, 0x48, 0x8a, 0xc4, 0xa3, 0x62, 0x8c, - 0x33, 0x2c, 0x8d, 0x3b, 0x43, 0x7b, 0x03, 0x2a, 0x6b, 0x7d, 0x2a, 0xb8, 0xd0, 0xcb, 0x50, 0x71, - 0xd5, 0xb3, 0xda, 0xf9, 0x27, 0x75, 0xca, 0xd5, 0x34, 0xc7, 0x83, 0x66, 0x9d, 0x17, 0x09, 0x2d, - 0x0d, 0xc0, 0x09, 0x8b, 0x7d, 0x1f, 0xea, 0xeb, 0x0f, 0xa3, 0x90, 0xc6, 0xfa, 0x4c, 0x9f, 0x86, - 0x32, 0x11, 0x00, 0x21, 0xad, 0x92, 0xe6, 0x09, 0x49, 0x86, 0x15, 0x96, 0xc7, 0x2d, 0xf2, 0xd0, - 0xe9, 0xc4, 0x2a, 0xe0, 0x27, 0x71, 0x6b, 0x9d, 0x03, 0xb1, 0xc4, 0xd9, 0xef, 0x5b, 0x50, 0x16, - 0x1e, 0xc5, 0xd0, 0x6d, 0x28, 0xf6, 0x9c, 0x48, 0x25, 0xab, 0x17, 0xf2, 0x9d, 0xac, 0x64, 0x6d, - 0x6d, 0x3a, 0xd1, 0x7a, 0x10, 0xd3, 0xc3, 0x76, 0x4d, 0x29, 0x29, 0x6e, 0x3a, 0x11, 0xe6, 0xe2, - 0x16, 0x5c, 0xa8, 0x68, 0x2c, 0x9a, 0x83, 0xe2, 0x3e, 0x39, 0x94, 0x01, 0x09, 0xf3, 0x47, 0xd4, - 0x86, 0xd2, 0x81, 0xe3, 0xf7, 0x89, 0xf2, 0xa7, 0x4b, 0x93, 0x68, 0xc5, 0x92, 0xf5, 0x5a, 0xe1, - 0xaa, 0x65, 0xdf, 0x02, 0xb8, 0x41, 0x92, 0x1d, 0x5a, 0x81, 0xb3, 0x3a, 0xda, 0x64, 0x83, 0xe0, - 0xe7, 0x95, 0x79, 0x67, 0x71, 0x16, 0x8d, 0x87, 0xe9, 0xed, 0xfb, 0x50, 0x15, 0x81, 0x92, 0xe7, - 0xbb, 0x34, 0x03, 0x58, 0x8f, 0xc8, 0x00, 0x3a, 0x61, 0x16, 0xc6, 0x25, 0x4c, 0x23, 0x2e, 0xf8, - 0x50, 0x97, 0xbc, 0x3a, 0x87, 0xe7, 0xd2, 0x70, 0x09, 0x2a, 0xda, 0x4c, 0xa5, 0x25, 0xa9, 0xdd, - 0xb4, 0x20, 0x9c, 0x50, 0x18, 0xda, 0xf6, 0x20, 0x13, 0xf4, 0xf3, 0x29, 0x33, 0x12, 0x5a, 0xe1, - 0xd1, 0x09, 0xcd, 0xd0, 0xf4, 0x23, 0x68, 0x8c, 0x2b, 0xf8, 0x1e, 0x23, 0x2d, 0xe5, 0x37, 0xc5, - 0x7e, 0xdb, 0x82, 0x39, 0x53, 0x52, 0xfe, 0xe3, 0xcb, 0xaf, 0xe4, 0xf4, 0xd2, 0xc8, 0xd8, 0x91, - 0x5f, 0x5b, 0x70, 0x3e, 0xb3, 0xb4, 0x89, 0x4e, 0x7c, 0x02, 0xa3, 0x4c, 0xe7, 0x28, 0x4e, 0xe0, - 0x1c, 0xcb, 0x50, 0xdb, 0x08, 0xbc, 0xd8, 0x73, 0x7c, 0xef, 0x07, 0x84, 0x9e, 0x5e, 0x4c, 0xda, - 0x7f, 0xb0, 0x60, 0xd6, 0xe0, 0x60, 0xe8, 0x3e, 0xcc, 0xf0, 0xb8, 0xeb, 0x05, 0x5d, 0x15, 0x3b, - 0x72, 0xd6, 0x0c, 0x86, 0x90, 0x74, 0x5d, 0x5b, 0x52, 0x12, 0xd6, 0x22, 0xd1, 0x16, 0x94, 0x29, - 0x61, 0x7d, 0x3f, 0x9e, 0x2c, 0x44, 0x6c, 0xc7, 0x4e, 0xdc, 0x67, 0x32, 0x36, 0x63, 0xc1, 0x8f, - 0x95, 0x1c, 0xfb, 0xcf, 0x05, 0xa8, 0xdf, 0x74, 0x76, 0x88, 0xbf, 0x4d, 0x7c, 0xd2, 0x89, 0x43, - 0x8a, 0x7e, 0x08, 0xb5, 0x9e, 0x13, 0x77, 0xf6, 0x04, 0x54, 0x97, 0xeb, 0x6b, 0xf9, 0x14, 0x65, - 0x24, 0xb5, 0x36, 0x53, 0x31, 0x32, 0x20, 0x9e, 0x53, 0x0b, 0xab, 0x19, 0x18, 0x6c, 0x6a, 0x13, - 0x3d, 0x96, 0x78, 0x5f, 0x7f, 0x18, 0xf1, 0x5a, 0x62, 0xf2, 0xd6, 0x2e, 0x63, 0x02, 0x26, 0x6f, - 0xf6, 0x3d, 0x4a, 0x7a, 0x24, 0x88, 0xd3, 0x1e, 0x6b, 0x73, 0x48, 0x3e, 0x1e, 0xd1, 0xb8, 0xf0, - 0x0a, 0xcc, 0x0d, 0x1b, 0x7f, 0x42, 0xbc, 0x3e, 0x6f, 0xc6, 0xeb, 0xaa, 0x19, 0x81, 0x7f, 0x6b, - 0x41, 0x63, 0x9c, 0x21, 0xe8, 0x8b, 0x86, 0xa0, 0x34, 0x47, 0xbc, 0x46, 0x0e, 0xa5, 0xd4, 0x75, - 0xa8, 0x84, 0x11, 0xef, 0x8a, 0x43, 0xaa, 0xfc, 0xfc, 0x19, 0xed, 0xbb, 0xb7, 0x14, 0xfc, 0x78, - 0xd0, 0xbc, 0x90, 0x11, 0xaf, 0x11, 0x38, 0x61, 0xe5, 0x89, 0x59, 0xd8, 0xc3, 0x8b, 0x85, 0x24, - 0x31, 0xdf, 0x15, 0x10, 0xac, 0x30, 0xf6, 0xef, 0x2d, 0x98, 0x16, 0x55, 0xf2, 0x7d, 0xa8, 0xf0, - 0xfd, 0x73, 0x9d, 0xd8, 0x11, 0x76, 0xe5, 0xee, 0xcf, 0x38, 0xf7, 0x26, 0x89, 0x9d, 0xf4, 0x7e, - 0x69, 0x08, 0x4e, 0x24, 0x22, 0x0c, 0x25, 0x2f, 0x26, 0x3d, 0x7d, 0x90, 0xcf, 0x8e, 0x15, 0xad, - 0xa6, 0x03, 0x2d, 0xec, 0x3c, 0x58, 0x7f, 0x18, 0x93, 0x80, 0x1f, 0x46, 0x1a, 0x0c, 0x36, 0xb8, - 0x0c, 0x2c, 0x45, 0xd9, 0xff, 0xb6, 0x20, 0x51, 0xc5, 0xaf, 0x3b, 0x23, 0xfe, 0xee, 0x4d, 0x2f, - 0xd8, 0x57, 0xdb, 0x9a, 0x98, 0xb3, 0xad, 0xe0, 0x38, 0xa1, 0x38, 0x29, 0x21, 0x16, 0x26, 0x4b, - 0x88, 0x5c, 0x61, 0x27, 0x0c, 0x62, 0x2f, 0xe8, 0x8f, 0xc4, 0x97, 0x55, 0x05, 0xc7, 0x09, 0x05, - 0xaf, 0x3b, 0x29, 0xe9, 0x39, 0x5e, 0xe0, 0x05, 0x5d, 0xbe, 0x88, 0xd5, 0xb0, 0x1f, 0xc4, 0xa2, - 0x00, 0x53, 0x75, 0x27, 0x1e, 0xc1, 0xe2, 0x13, 0x38, 0xec, 0x3f, 0x15, 0xa1, 0xc6, 0xd7, 0xac, - 0x33, 0xfb, 0x4b, 0x50, 0xf7, 0x4d, 0x2f, 0x50, 0x6b, 0xbf, 0xa0, 0x4c, 0xc9, 0xde, 0x6b, 0x9c, - 0xa5, 0xe5, 0xcc, 0xa2, 0x5c, 0x4e, 0x98, 0x0b, 0x59, 0xe6, 0xeb, 0x26, 0x12, 0x67, 0x69, 0x79, - 0xbc, 0x7e, 0xc0, 0xef, 0x87, 0x2a, 0x44, 0x93, 0x23, 0xfa, 0x36, 0x07, 0x62, 0x89, 0x3b, 0x69, - 0x9f, 0xa7, 0x27, 0xdc, 0xe7, 0x6b, 0x70, 0x86, 0x3b, 0x44, 0xd8, 0x8f, 0x75, 0xb5, 0x5e, 0x12, - 0xbb, 0x86, 0x8e, 0x06, 0xcd, 0x33, 0xb7, 0x33, 0x18, 0x3c, 0x44, 0xc9, 0x6d, 0xf4, 0xbd, 0x9e, - 0x17, 0x37, 0x66, 0x04, 0x4b, 0x62, 0xe3, 0x4d, 0x0e, 0xc4, 0x12, 0x97, 0x39, 0xc8, 0xca, 0xa9, - 0x07, 0xb9, 0x09, 0xe7, 0x1c, 0xdf, 0x0f, 0x1f, 0x88, 0x65, 0xb6, 0xc3, 0x70, 0xbf, 0xe7, 0xd0, - 0x7d, 0x26, 0x7a, 0xdc, 0x4a, 0xfb, 0x0b, 0x8a, 0xf1, 0xdc, 0xca, 0x28, 0x09, 0x3e, 0x89, 0xcf, - 0xfe, 0x67, 0x01, 0x90, 0xec, 0x56, 0x5c, 0x59, 0xc4, 0xc9, 0x40, 0xf3, 0x0c, 0xcc, 0xf4, 0x54, - 0xb7, 0x63, 0x65, 0xf3, 0x9c, 0x6e, 0x74, 0x34, 0x1e, 0x6d, 0x42, 0x55, 0x5e, 0xf8, 0xd4, 0x89, - 0x97, 0x15, 0x71, 0xf5, 0x96, 0x46, 0x1c, 0x0f, 0x9a, 0x0b, 0x19, 0x35, 0x09, 0xe6, 0xf6, 0x61, - 0x44, 0x70, 0x2a, 0x01, 0x5d, 0x01, 0x70, 0x22, 0xcf, 0x1c, 0x6d, 0x55, 0xd3, 0xd1, 0x48, 0xda, - 0xa4, 0x62, 0x83, 0x0a, 0xbd, 0x0a, 0xd3, 0x7c, 0xe3, 0xd5, 0xdc, 0xe3, 0xcb, 0xf9, 0xc2, 0x06, - 0x3f, 0xba, 0x76, 0x85, 0xe7, 0x52, 0xfe, 0x84, 0x85, 0x04, 0x74, 0x0f, 0xca, 0xc2, 0xcb, 0xe4, - 0x21, 0x4f, 0x58, 0xff, 0x8a, 0x66, 0x48, 0x15, 0xef, 0xc7, 0xc9, 0x13, 0x56, 0x12, 0xed, 0x37, - 0xa1, 0xba, 0xe9, 0x75, 0x68, 0xc8, 0xd5, 0xf1, 0x0d, 0x66, 0x99, 0xe6, 0x2f, 0xd9, 0x60, 0xed, - 0x4b, 0x1a, 0xcf, 0x9d, 0x28, 0x70, 0x82, 0x50, 0xb6, 0x78, 0xa5, 0xd4, 0x89, 0x5e, 0xe7, 0x40, - 0x2c, 0x71, 0xd7, 0xce, 0xf3, 0xfa, 0xe1, 0x67, 0xef, 0x35, 0xa7, 0xde, 0x79, 0xaf, 0x39, 0xf5, - 0xee, 0x7b, 0xaa, 0x96, 0xf8, 0x7b, 0x0d, 0xe0, 0xd6, 0xce, 0xf7, 0x49, 0x47, 0xc6, 0xa8, 0xd3, - 0x07, 0x53, 0xbc, 0x26, 0x54, 0xf3, 0x50, 0x31, 0xc4, 0x29, 0x0c, 0xd5, 0x84, 0x06, 0x0e, 0x67, - 0x28, 0xd1, 0x32, 0x54, 0x93, 0x61, 0x95, 0x3a, 0xb6, 0x79, 0xed, 0x06, 0xc9, 0x44, 0x0b, 0xa7, - 0x34, 0x99, 0x80, 0x39, 0x7d, 0x6a, 0xc0, 0x6c, 0x43, 0xb1, 0xef, 0xb9, 0xe2, 0x54, 0xaa, 0xed, - 0xe7, 0x74, 0xc2, 0xba, 0xb3, 0xb1, 0x76, 0x3c, 0x68, 0x3e, 0x39, 0x6e, 0xd2, 0x1b, 0x1f, 0x46, - 0x84, 0xb5, 0xee, 0x6c, 0xac, 0x61, 0xce, 0x7c, 0x52, 0x30, 0x28, 0x4f, 0x18, 0x0c, 0xae, 0x00, - 0xa8, 0x55, 0x73, 0x6e, 0x79, 0xab, 0x13, 0xef, 0xbc, 0x91, 0x60, 0xb0, 0x41, 0x85, 0x18, 0xcc, - 0x77, 0x28, 0x91, 0xce, 0xee, 0xf5, 0x08, 0x8b, 0x9d, 0x9e, 0x1c, 0x5d, 0x4d, 0xe6, 0xaa, 0x4f, - 0x28, 0x35, 0xf3, 0xab, 0xc3, 0xc2, 0xf0, 0xa8, 0x7c, 0x14, 0xc2, 0xbc, 0xab, 0xba, 0xe7, 0x54, - 0x69, 0x75, 0x62, 0xa5, 0x17, 0xb8, 0xc2, 0xb5, 0x61, 0x41, 0x78, 0x54, 0x36, 0xfa, 0x1e, 0x2c, - 0x68, 0xe0, 0xe8, 0x08, 0x43, 0x0c, 0xd3, 0x8a, 0xed, 0xc5, 0xa3, 0x41, 0x73, 0x61, 0x6d, 0x2c, - 0x15, 0x7e, 0x84, 0x04, 0xe4, 0x42, 0xd9, 0x97, 0xd5, 0x60, 0x4d, 0x64, 0xf0, 0xaf, 0xe7, 0x5b, - 0x45, 0xea, 0xfd, 0x2d, 0xb3, 0x0a, 0x4c, 0x5a, 0x74, 0x55, 0x00, 0x2a, 0xd9, 0xe8, 0x21, 0xd4, - 0x9c, 0x20, 0x08, 0x63, 0x47, 0x0e, 0x55, 0x66, 0x85, 0xaa, 0x95, 0x89, 0x55, 0xad, 0xa4, 0x32, - 0x86, 0xaa, 0x4e, 0x03, 0x83, 0x4d, 0x55, 0xe8, 0x01, 0x9c, 0x0d, 0x1f, 0x04, 0x84, 0x62, 0xb2, - 0x4b, 0x28, 0x09, 0x3a, 0x84, 0x35, 0xea, 0x42, 0xfb, 0x57, 0x73, 0x6a, 0xcf, 0x30, 0xa7, 0x2e, - 0x9d, 0x85, 0x33, 0x3c, 0xac, 0x05, 0xb5, 0x00, 0x76, 0xbd, 0x40, 0xf5, 0x0e, 0x8d, 0x33, 0xe9, - 0xf4, 0xf5, 0x7a, 0x02, 0xc5, 0x06, 0x05, 0x7a, 0x01, 0x6a, 0x1d, 0xbf, 0xcf, 0x62, 0x22, 0xc7, - 0xbc, 0x67, 0xc5, 0x0d, 0x4a, 0xd6, 0xb7, 0x9a, 0xa2, 0xb0, 0x49, 0x87, 0xf6, 0x60, 0xd6, 0x33, - 0x9a, 0x94, 0xc6, 0x9c, 0xf0, 0xc5, 0x2b, 0x13, 0x77, 0x26, 0xac, 0x3d, 0xc7, 0x23, 0x91, 0x09, - 0xc1, 0x19, 0xc9, 0xa8, 0x0f, 0xf5, 0x9e, 0x99, 0x6a, 0x1a, 0xf3, 0x62, 0x1f, 0xaf, 0xe6, 0x53, - 0x35, 0x9a, 0x0c, 0xd3, 0x7a, 0x24, 0x83, 0xc3, 0x59, 0x2d, 0x0b, 0x5f, 0x83, 0xda, 0x7f, 0x59, - 0xaa, 0xf3, 0x52, 0x7f, 0xd8, 0x63, 0x26, 0x2a, 0xf5, 0xdf, 0x2f, 0xc0, 0x99, 0xec, 0x39, 0x27, - 0x2d, 0xb1, 0x35, 0xf6, 0x6b, 0x81, 0x4e, 0x06, 0xc5, 0xb1, 0xc9, 0x40, 0xc5, 0xdc, 0xe9, 0xc7, - 0x89, 0xb9, 0xd9, 0x74, 0x5e, 0xca, 0x95, 0xce, 0x5b, 0x00, 0xbc, 0xdc, 0xa1, 0xa1, 0xef, 0x13, - 0x2a, 0x42, 0x74, 0x45, 0x7d, 0x0f, 0x48, 0xa0, 0xd8, 0xa0, 0xe0, 0xb5, 0xed, 0x8e, 0x1f, 0x76, - 0xf6, 0xc5, 0x16, 0xe8, 0xf0, 0x22, 0x82, 0x73, 0x45, 0xd6, 0xb6, 0xed, 0x11, 0x2c, 0x3e, 0x81, - 0xc3, 0x3e, 0x84, 0x0b, 0x5b, 0x0e, 0xe5, 0x8e, 0x94, 0x5e, 0x65, 0xd1, 0x3c, 0xbc, 0x31, 0xd2, - 0x9a, 0x3c, 0x37, 0x69, 0x48, 0x48, 0x17, 0x9d, 0xc2, 0xd2, 0xf6, 0xc4, 0xfe, 0x8b, 0x05, 0x4f, - 0x9c, 0xa8, 0xfb, 0x33, 0x68, 0x8d, 0xde, 0xc8, 0xb6, 0x46, 0x2f, 0xe5, 0x1c, 0x21, 0x9f, 0x64, - 0xed, 0x98, 0x46, 0x69, 0x06, 0x4a, 0x5b, 0xbc, 0xec, 0xb4, 0x7f, 0x69, 0xc1, 0xac, 0x78, 0x9a, - 0x64, 0xfc, 0xde, 0x84, 0xd2, 0x6e, 0xa8, 0x47, 0x6c, 0x15, 0xf9, 0xa5, 0xea, 0x3a, 0x07, 0x60, - 0x09, 0x7f, 0x8c, 0xf9, 0xfc, 0xdb, 0x16, 0x64, 0x07, 0xdf, 0xe8, 0x15, 0xe9, 0xf3, 0x56, 0x32, - 0x99, 0x9e, 0xd0, 0xdf, 0x5f, 0x1e, 0xd7, 0xd8, 0x9d, 0xcb, 0x35, 0xe5, 0xbc, 0x04, 0x55, 0x1c, - 0x86, 0xf1, 0x96, 0x13, 0xef, 0x31, 0xbe, 0xf0, 0x88, 0x3f, 0xa8, 0xbd, 0x11, 0x0b, 0x17, 0x18, - 0x2c, 0xe1, 0xf6, 0x2f, 0x2c, 0x78, 0x62, 0xec, 0x27, 0x11, 0x7e, 0xf5, 0x3a, 0xc9, 0x9b, 0x5a, - 0x51, 0xe2, 0x85, 0x29, 0x1d, 0x36, 0xa8, 0x78, 0x47, 0x96, 0xf9, 0x8e, 0x32, 0xdc, 0x91, 0x65, - 0xb4, 0xe1, 0x2c, 0xad, 0xfd, 0xaf, 0x02, 0x94, 0xe5, 0x98, 0xe7, 0x7f, 0xec, 0xb1, 0x4f, 0x43, - 0x99, 0x09, 0x3d, 0xca, 0xbc, 0x24, 0x9b, 0x4b, 0xed, 0x58, 0x61, 0x45, 0x17, 0x43, 0x18, 0x73, - 0xba, 0x3a, 0xca, 0xa5, 0x5d, 0x8c, 0x04, 0x63, 0x8d, 0x47, 0x2f, 0x42, 0x99, 0x12, 0x87, 0x25, - 0xfd, 0xe1, 0xa2, 0x16, 0x89, 0x05, 0xf4, 0x78, 0xd0, 0x9c, 0x55, 0xc2, 0xc5, 0x3b, 0x56, 0xd4, - 0xe8, 0x1e, 0xcc, 0xb8, 0x24, 0x76, 0x3c, 0x5f, 0x77, 0x0c, 0xcf, 0x4f, 0x32, 0x0e, 0x5b, 0x93, - 0xac, 0xed, 0x1a, 0xb7, 0x49, 0xbd, 0x60, 0x2d, 0x90, 0x47, 0xe8, 0x4e, 0xe8, 0xca, 0x2f, 0xa9, - 0xa5, 0x34, 0x42, 0xaf, 0x86, 0x2e, 0xc1, 0x02, 0x63, 0xbf, 0x63, 0x41, 0x4d, 0x4a, 0x5a, 0x75, - 0xfa, 0x8c, 0xa0, 0xcb, 0xc9, 0x2a, 0xe4, 0x71, 0xeb, 0x9a, 0x71, 0x9a, 0x77, 0x59, 0xc7, 0x83, - 0x66, 0x55, 0x90, 0x89, 0x96, 0x4b, 0x2f, 0xc0, 0xd8, 0xa3, 0xc2, 0x29, 0x7b, 0xf4, 0x14, 0x94, - 0xc4, 0xed, 0x51, 0x9b, 0x99, 0xdc, 0x75, 0x71, 0xc1, 0xb0, 0xc4, 0xd9, 0x1f, 0x17, 0xa0, 0x9e, - 0x59, 0x5c, 0x8e, 0xae, 0x23, 0x19, 0xbd, 0x16, 0x72, 0x8c, 0xf3, 0xc7, 0x7f, 0xff, 0xfe, 0x0e, - 0x94, 0x3b, 0x7c, 0x7d, 0xfa, 0x0f, 0x08, 0x97, 0x27, 0x39, 0x0a, 0xb1, 0x33, 0xa9, 0x27, 0x89, - 0x57, 0x86, 0x95, 0x40, 0x74, 0x03, 0xe6, 0x29, 0x89, 0xe9, 0xe1, 0xca, 0x6e, 0x4c, 0xa8, 0x39, - 0x07, 0x28, 0xa5, 0x75, 0x39, 0x1e, 0x26, 0xc0, 0xa3, 0x3c, 0x3a, 0xa7, 0x96, 0x1f, 0x23, 0xa7, - 0xda, 0x3b, 0x30, 0x7b, 0xdb, 0xd9, 0xf1, 0x93, 0x6f, 0x8a, 0x18, 0xea, 0x5e, 0xd0, 0xf1, 0xfb, - 0x2e, 0x91, 0xd1, 0x58, 0x47, 0x2f, 0x7d, 0x69, 0x37, 0x4c, 0xe4, 0xf1, 0xa0, 0x79, 0x2e, 0x03, - 0x90, 0x1f, 0xd1, 0x70, 0x56, 0x84, 0xed, 0xc3, 0xf4, 0x67, 0xd8, 0xa7, 0x7e, 0x17, 0xaa, 0x69, - 0x27, 0xf1, 0x29, 0xab, 0xb4, 0xdf, 0x80, 0x0a, 0xf7, 0x78, 0xdd, 0x01, 0x9f, 0x52, 0x16, 0x65, - 0x0b, 0x96, 0x42, 0x9e, 0x82, 0xc5, 0xee, 0x41, 0xfd, 0x4e, 0xe4, 0x3e, 0xe6, 0x57, 0xe5, 0x42, - 0xee, 0xac, 0x75, 0x05, 0xe4, 0x3f, 0x35, 0x78, 0x82, 0x90, 0x99, 0xdb, 0x48, 0x10, 0x66, 0xe2, - 0x35, 0xbe, 0x2a, 0xfc, 0xc4, 0x02, 0x10, 0xa3, 0x9f, 0xf5, 0x03, 0x12, 0xc4, 0x7c, 0x1f, 0xb8, - 0x53, 0x0d, 0xef, 0x83, 0x88, 0x0c, 0x02, 0x83, 0xee, 0x40, 0x39, 0x94, 0xde, 0x24, 0xc7, 0xfc, - 0x13, 0x4e, 0x4c, 0x93, 0x8b, 0x24, 0xfd, 0x09, 0x2b, 0x61, 0xed, 0x8b, 0x1f, 0x7c, 0xb2, 0x38, - 0xf5, 0xe1, 0x27, 0x8b, 0x53, 0x1f, 0x7d, 0xb2, 0x38, 0xf5, 0xd6, 0xd1, 0xa2, 0xf5, 0xc1, 0xd1, - 0xa2, 0xf5, 0xe1, 0xd1, 0xa2, 0xf5, 0xd1, 0xd1, 0xa2, 0xf5, 0xf1, 0xd1, 0xa2, 0xf5, 0xce, 0xdf, - 0x16, 0xa7, 0xee, 0x15, 0x0e, 0x2e, 0xff, 0x27, 0x00, 0x00, 0xff, 0xff, 0xe5, 0xe0, 0x33, 0x2e, - 0x95, 0x26, 0x00, 0x00, -} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto index cc9099a6570c4..605505e190384 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto @@ -212,21 +212,20 @@ message ExportOptions { optional bool exact = 2; } -// Fields stores a set of fields in a data structure like a Trie. -// To understand how this is used, see: https://github.com/kubernetes-sigs/structured-merge-diff -message Fields { - // Map stores a set of fields in a data structure like a Trie. - // - // Each key is either a '.' representing the field itself, and will always map to an empty set, - // or a string representing a sub-field or item. The string will follow one of these four formats: - // 'f:', where is the name of a field in a struct, or key in a map - // 'v:', where is the exact json formatted value of a list item - // 'i:', where is position of a item in a list - // 'k:', where is a map of a list item's key fields to their unique values - // If a key maps to an empty Fields value, the field that key represents is part of the set. - // - // The exact format is defined in k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal - map map = 1; +// FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format. +// +// Each key is either a '.' representing the field itself, and will always map to an empty set, +// or a string representing a sub-field or item. The string will follow one of these four formats: +// 'f:', where is the name of a field in a struct, or key in a map +// 'v:', where is the exact json formatted value of a list item +// 'i:', where is position of a item in a list +// 'k:', where is a map of a list item's key fields to their unique values +// If a key maps to an empty Fields value, the field that key represents is part of the set. +// +// The exact format is defined in sigs.k8s.io/structured-merge-diff +message FieldsV1 { + // Raw is the underlying serialization of this object. + optional bytes Raw = 1; } // GetOptions is the standard query options to the standard REST get call. @@ -302,27 +301,6 @@ message GroupVersionResource { optional string resource = 3; } -// Initializer is information about an initializer that has not yet completed. -message Initializer { - // name of the process that is responsible for initializing this object. - optional string name = 1; -} - -// Initializers tracks the progress of initialization. -message Initializers { - // Pending is a list of initializers that must execute in order before this object is visible. - // When the last pending initializer is removed, and no failing result is set, the initializers - // struct will be set to nil and the object is considered as initialized and visible to all - // clients. - // +patchMergeKey=name - // +patchStrategy=merge - repeated Initializer pending = 1; - - // If result is set with the Failure field, the object will be persisted to storage and then deleted, - // ensuring that other clients can observe the deletion. - optional Status result = 2; -} - // A label selector is a label query over a set of resources. The result of matchLabels and // matchExpressions are ANDed. An empty label selector matches all objects. A null // label selector matches no objects. @@ -361,7 +339,7 @@ message LabelSelectorRequirement { // List holds a list of objects, which may not be known by the server. message List { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional ListMeta metadata = 1; @@ -375,6 +353,10 @@ message ListMeta { // selfLink is a URL representing this object. // Populated by the system. // Read-only. + // + // DEPRECATED + // Kubernetes will stop propagating this field in 1.20 release and the field is planned + // to be removed in 1.21 release. // +optional optional string selfLink = 1; @@ -383,7 +365,7 @@ message ListMeta { // Value must be treated as opaque by clients and passed unmodified back to the server. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency // +optional optional string resourceVersion = 2; @@ -405,9 +387,6 @@ message ListMeta { // Servers older than v1.15 do not set this field. // The intended use of the remainingItemCount is *estimating* the size of a collection. Clients // should not rely on the remainingItemCount to be set or to be exact. - // - // This field is alpha and can be changed or removed without notice. - // // +optional optional int64 remainingItemCount = 4; } @@ -438,7 +417,7 @@ message ListOptions { // If the feature gate WatchBookmarks is not enabled in apiserver, // this field is ignored. // - // This field is alpha and can be changed or removed without notice. + // This field is beta. // // +optional optional bool allowWatchBookmarks = 9; @@ -512,9 +491,13 @@ message ManagedFieldsEntry { // +optional optional Time time = 4; - // Fields identifies a set of fields. + // FieldsType is the discriminator for the different fields format and version. + // There is currently only one possible value: "FieldsV1" + optional string fieldsType = 6; + + // FieldsV1 holds the first JSON version format as described in the "FieldsV1" type. // +optional - optional Fields fields = 5; + optional FieldsV1 fieldsV1 = 7; } // MicroTime is version of Time with microsecond level precision. @@ -561,7 +544,7 @@ message ObjectMeta { // should retry (optionally after the time indicated in the Retry-After header). // // Applied only if Name is not specified. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency // +optional optional string generateName = 2; @@ -579,6 +562,10 @@ message ObjectMeta { // SelfLink is a URL representing this object. // Populated by the system. // Read-only. + // + // DEPRECATED + // Kubernetes will stop propagating this field in 1.20 release and the field is planned + // to be removed in 1.21 release. // +optional optional string selfLink = 4; @@ -601,7 +588,7 @@ message ObjectMeta { // Populated by the system. // Read-only. // Value must be treated as opaque by clients and . - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency // +optional optional string resourceVersion = 6; @@ -617,7 +604,7 @@ message ObjectMeta { // Populated by the system. // Read-only. // Null for lists. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional Time creationTimestamp = 8; @@ -638,7 +625,7 @@ message ObjectMeta { // // Populated by the system when a graceful deletion is requested. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional Time deletionTimestamp = 9; @@ -672,19 +659,6 @@ message ObjectMeta { // +patchStrategy=merge repeated OwnerReference ownerReferences = 13; - // An initializer is a controller which enforces some system invariant at object creation time. - // This field is a list of initializers that have not yet acted on this object. If nil or empty, - // this object has been completely initialized. Otherwise, the object is considered uninitialized - // and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to - // observe uninitialized objects. - // - // When an object is created, the system will populate this list with the current set of initializers. - // Only privileged users may set or modify this list. Once it is empty, it may not be modified further - // by any user. - // - // DEPRECATED - initializers are an alpha field and will be removed in v1.15. - optional Initializers initializers = 16; - // Must be empty before the object is deleted from the registry. Each entry // is an identifier for the responsible component that will remove the entry // from the list. If the deletionTimestamp of the object is non-nil, entries @@ -707,8 +681,6 @@ message ObjectMeta { // "ci-cd". The set of fields is always in the version that the // workflow used when modifying the object. // - // This field is alpha and can be changed or removed without notice. - // // +optional repeated ManagedFieldsEntry managedFields = 17; } @@ -721,7 +693,7 @@ message OwnerReference { optional string apiVersion = 5; // Kind of the referent. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds optional string kind = 1; // Name of the referent. @@ -751,7 +723,7 @@ message OwnerReference { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object message PartialObjectMetadata { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional ObjectMeta metadata = 1; } @@ -760,7 +732,7 @@ message PartialObjectMetadata { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object message PartialObjectMetadataList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional ListMeta metadata = 1; @@ -831,13 +803,13 @@ message ServerAddressByClientCIDR { // Status is a return value for calls that don't return other objects. message Status { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional ListMeta metadata = 1; // Status of the operation. // One of: "Success" or "Failure". - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional string status = 2; @@ -908,7 +880,7 @@ message StatusDetails { // The kind attribute of the resource associated with the status StatusReason. // On some operations may differ from the requested resource Kind. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional string kind = 3; @@ -986,14 +958,14 @@ message TypeMeta { // Servers may infer this from the endpoint the client submits requests to. // Cannot be updated. // In CamelCase. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional string kind = 1; // APIVersion defines the versioned schema of this representation of an object. // Servers should convert recognized schemas to the latest internal value, and // may reject unrecognized values. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources // +optional optional string apiVersion = 2; } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go index b4dc78b3eaa66..ec016fd3c8da4 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go @@ -17,7 +17,9 @@ limitations under the License. package v1 import ( + "bytes" "encoding/json" + "errors" "fmt" "k8s.io/apimachinery/pkg/fields" @@ -254,14 +256,25 @@ func ResetObjectMetaForStatus(meta, existingMeta Object) { } // MarshalJSON implements json.Marshaler -func (f Fields) MarshalJSON() ([]byte, error) { - return json.Marshal(&f.Map) +// MarshalJSON may get called on pointers or values, so implement MarshalJSON on value. +// http://stackoverflow.com/questions/21390979/custom-marshaljson-never-gets-called-in-go +func (f FieldsV1) MarshalJSON() ([]byte, error) { + if f.Raw == nil { + return []byte("null"), nil + } + return f.Raw, nil } // UnmarshalJSON implements json.Unmarshaler -func (f *Fields) UnmarshalJSON(b []byte) error { - return json.Unmarshal(b, &f.Map) +func (f *FieldsV1) UnmarshalJSON(b []byte) error { + if f == nil { + return errors.New("metav1.Fields: UnmarshalJSON on nil pointer") + } + if !bytes.Equal(b, []byte("null")) { + f.Raw = append(f.Raw[0:0], b...) + } + return nil } -var _ json.Marshaler = Fields{} -var _ json.Unmarshaler = &Fields{} +var _ json.Marshaler = FieldsV1{} +var _ json.Unmarshaler = &FieldsV1{} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go index 37141bd5dac98..912cf2036b0a8 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go @@ -55,8 +55,6 @@ type Object interface { SetLabels(labels map[string]string) GetAnnotations() map[string]string SetAnnotations(annotations map[string]string) - GetInitializers() *Initializers - SetInitializers(initializers *Initializers) GetFinalizers() []string SetFinalizers(finalizers []string) GetOwnerReferences() []OwnerReference @@ -166,8 +164,6 @@ func (meta *ObjectMeta) GetLabels() map[string]string { return m func (meta *ObjectMeta) SetLabels(labels map[string]string) { meta.Labels = labels } func (meta *ObjectMeta) GetAnnotations() map[string]string { return meta.Annotations } func (meta *ObjectMeta) SetAnnotations(annotations map[string]string) { meta.Annotations = annotations } -func (meta *ObjectMeta) GetInitializers() *Initializers { return meta.Initializers } -func (meta *ObjectMeta) SetInitializers(initializers *Initializers) { meta.Initializers = initializers } func (meta *ObjectMeta) GetFinalizers() []string { return meta.Finalizers } func (meta *ObjectMeta) SetFinalizers(finalizers []string) { meta.Finalizers = finalizers } func (meta *ObjectMeta) GetOwnerReferences() []OwnerReference { return meta.OwnerReferences } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go index 14841be512ae5..6dd6d8999f742 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go @@ -70,3 +70,11 @@ func (m *MicroTime) MarshalTo(data []byte) (int, error) { } return m.ProtoMicroTime().MarshalTo(data) } + +// MarshalToSizedBuffer implements the protobuf marshalling interface. +func (m *MicroTime) MarshalToSizedBuffer(data []byte) (int, error) { + if m == nil || m.Time.IsZero() { + return 0, nil + } + return m.ProtoMicroTime().MarshalToSizedBuffer(data) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go index ed72186b49fec..eac8d965890a3 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go @@ -75,7 +75,7 @@ func (m *Time) Unmarshal(data []byte) error { return nil } -// Marshal implements the protobuf marshalling interface. +// Marshal implements the protobuf marshaling interface. func (m *Time) Marshal() (data []byte, err error) { if m == nil || m.Time.IsZero() { return nil, nil @@ -83,10 +83,18 @@ func (m *Time) Marshal() (data []byte, err error) { return m.ProtoTime().Marshal() } -// MarshalTo implements the protobuf marshalling interface. +// MarshalTo implements the protobuf marshaling interface. func (m *Time) MarshalTo(data []byte) (int, error) { if m == nil || m.Time.IsZero() { return 0, nil } return m.ProtoTime().MarshalTo(data) } + +// MarshalToSizedBuffer implements the protobuf reverse marshaling interface. +func (m *Time) MarshalToSizedBuffer(data []byte) (int, error) { + if m == nil || m.Time.IsZero() { + return 0, nil + } + return m.ProtoTime().MarshalToSizedBuffer(data) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go index 46ef65f457fd3..76b275589f22b 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go @@ -43,14 +43,14 @@ type TypeMeta struct { // Servers may infer this from the endpoint the client submits requests to. // Cannot be updated. // In CamelCase. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"` // APIVersion defines the versioned schema of this representation of an object. // Servers should convert recognized schemas to the latest internal value, and // may reject unrecognized values. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources // +optional APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,2,opt,name=apiVersion"` } @@ -61,6 +61,10 @@ type ListMeta struct { // selfLink is a URL representing this object. // Populated by the system. // Read-only. + // + // DEPRECATED + // Kubernetes will stop propagating this field in 1.20 release and the field is planned + // to be removed in 1.21 release. // +optional SelfLink string `json:"selfLink,omitempty" protobuf:"bytes,1,opt,name=selfLink"` @@ -69,7 +73,7 @@ type ListMeta struct { // Value must be treated as opaque by clients and passed unmodified back to the server. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency // +optional ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,2,opt,name=resourceVersion"` @@ -91,9 +95,6 @@ type ListMeta struct { // Servers older than v1.15 do not set this field. // The intended use of the remainingItemCount is *estimating* the size of a collection. Clients // should not rely on the remainingItemCount to be set or to be exact. - // - // This field is alpha and can be changed or removed without notice. - // // +optional RemainingItemCount *int64 `json:"remainingItemCount,omitempty" protobuf:"bytes,4,opt,name=remainingItemCount"` } @@ -130,7 +131,7 @@ type ObjectMeta struct { // should retry (optionally after the time indicated in the Retry-After header). // // Applied only if Name is not specified. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency // +optional GenerateName string `json:"generateName,omitempty" protobuf:"bytes,2,opt,name=generateName"` @@ -148,6 +149,10 @@ type ObjectMeta struct { // SelfLink is a URL representing this object. // Populated by the system. // Read-only. + // + // DEPRECATED + // Kubernetes will stop propagating this field in 1.20 release and the field is planned + // to be removed in 1.21 release. // +optional SelfLink string `json:"selfLink,omitempty" protobuf:"bytes,4,opt,name=selfLink"` @@ -170,7 +175,7 @@ type ObjectMeta struct { // Populated by the system. // Read-only. // Value must be treated as opaque by clients and . - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency // +optional ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"` @@ -186,7 +191,7 @@ type ObjectMeta struct { // Populated by the system. // Read-only. // Null for lists. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional CreationTimestamp Time `json:"creationTimestamp,omitempty" protobuf:"bytes,8,opt,name=creationTimestamp"` @@ -207,7 +212,7 @@ type ObjectMeta struct { // // Populated by the system when a graceful deletion is requested. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional DeletionTimestamp *Time `json:"deletionTimestamp,omitempty" protobuf:"bytes,9,opt,name=deletionTimestamp"` @@ -241,19 +246,6 @@ type ObjectMeta struct { // +patchStrategy=merge OwnerReferences []OwnerReference `json:"ownerReferences,omitempty" patchStrategy:"merge" patchMergeKey:"uid" protobuf:"bytes,13,rep,name=ownerReferences"` - // An initializer is a controller which enforces some system invariant at object creation time. - // This field is a list of initializers that have not yet acted on this object. If nil or empty, - // this object has been completely initialized. Otherwise, the object is considered uninitialized - // and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to - // observe uninitialized objects. - // - // When an object is created, the system will populate this list with the current set of initializers. - // Only privileged users may set or modify this list. Once it is empty, it may not be modified further - // by any user. - // - // DEPRECATED - initializers are an alpha field and will be removed in v1.15. - Initializers *Initializers `json:"initializers,omitempty" protobuf:"bytes,16,opt,name=initializers"` - // Must be empty before the object is deleted from the registry. Each entry // is an identifier for the responsible component that will remove the entry // from the list. If the deletionTimestamp of the object is non-nil, entries @@ -276,32 +268,10 @@ type ObjectMeta struct { // "ci-cd". The set of fields is always in the version that the // workflow used when modifying the object. // - // This field is alpha and can be changed or removed without notice. - // // +optional ManagedFields []ManagedFieldsEntry `json:"managedFields,omitempty" protobuf:"bytes,17,rep,name=managedFields"` } -// Initializers tracks the progress of initialization. -type Initializers struct { - // Pending is a list of initializers that must execute in order before this object is visible. - // When the last pending initializer is removed, and no failing result is set, the initializers - // struct will be set to nil and the object is considered as initialized and visible to all - // clients. - // +patchMergeKey=name - // +patchStrategy=merge - Pending []Initializer `json:"pending" protobuf:"bytes,1,rep,name=pending" patchStrategy:"merge" patchMergeKey:"name"` - // If result is set with the Failure field, the object will be persisted to storage and then deleted, - // ensuring that other clients can observe the deletion. - Result *Status `json:"result,omitempty" protobuf:"bytes,2,opt,name=result"` -} - -// Initializer is information about an initializer that has not yet completed. -type Initializer struct { - // name of the process that is responsible for initializing this object. - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` -} - const ( // NamespaceDefault means the object is in the default namespace which is applied when not specified by clients NamespaceDefault string = "default" @@ -322,7 +292,7 @@ type OwnerReference struct { // API version of the referent. APIVersion string `json:"apiVersion" protobuf:"bytes,5,opt,name=apiVersion"` // Kind of the referent. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` // Name of the referent. // More info: http://kubernetes.io/docs/user-guide/identifiers#names @@ -373,7 +343,7 @@ type ListOptions struct { // If the feature gate WatchBookmarks is not enabled in apiserver, // this field is ignored. // - // This field is alpha and can be changed or removed without notice. + // This field is beta. // // +optional AllowWatchBookmarks bool `json:"allowWatchBookmarks,omitempty" protobuf:"varint,9,opt,name=allowWatchBookmarks"` @@ -615,13 +585,13 @@ type Preconditions struct { type Status struct { TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Status of the operation. // One of: "Success" or "Failure". - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status string `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` // A human-readable description of the status of this operation. @@ -660,7 +630,7 @@ type StatusDetails struct { Group string `json:"group,omitempty" protobuf:"bytes,2,opt,name=group"` // The kind attribute of the resource associated with the status StatusReason. // On some operations may differ from the requested resource Kind. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional Kind string `json:"kind,omitempty" protobuf:"bytes,3,opt,name=kind"` // UID of the resource. @@ -792,11 +762,13 @@ const ( // doesn't make any sense, for example deleting a read-only object. This is different than // StatusReasonInvalid above which indicates that the API call could possibly succeed, but the // data was invalid. API calls that return BadRequest can never succeed. + // Status code 400 StatusReasonBadRequest StatusReason = "BadRequest" // StatusReasonMethodNotAllowed means that the action the client attempted to perform on the // resource was not supported by the code - for instance, attempting to delete a resource that // can only be created. API calls that return MethodNotAllowed can never succeed. + // Status code 405 StatusReasonMethodNotAllowed StatusReason = "MethodNotAllowed" // StatusReasonNotAcceptable means that the accept types indicated by the client were not acceptable @@ -895,7 +867,7 @@ const ( type List struct { TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -1128,9 +1100,16 @@ type ManagedFieldsEntry struct { // Time is timestamp of when these fields were set. It should always be empty if Operation is 'Apply' // +optional Time *Time `json:"time,omitempty" protobuf:"bytes,4,opt,name=time"` - // Fields identifies a set of fields. + + // Fields is tombstoned to show why 5 is a reserved protobuf tag. + //Fields *Fields `json:"fields,omitempty" protobuf:"bytes,5,opt,name=fields,casttype=Fields"` + + // FieldsType is the discriminator for the different fields format and version. + // There is currently only one possible value: "FieldsV1" + FieldsType string `json:"fieldsType,omitempty" protobuf:"bytes,6,opt,name=fieldsType"` + // FieldsV1 holds the first JSON version format as described in the "FieldsV1" type. // +optional - Fields *Fields `json:"fields,omitempty" protobuf:"bytes,5,opt,name=fields,casttype=Fields"` + FieldsV1 *FieldsV1 `json:"fieldsV1,omitempty" protobuf:"bytes,7,opt,name=fieldsV1"` } // ManagedFieldsOperationType is the type of operation which lead to a ManagedFieldsEntry being created. @@ -1141,21 +1120,20 @@ const ( ManagedFieldsOperationUpdate ManagedFieldsOperationType = "Update" ) -// Fields stores a set of fields in a data structure like a Trie. -// To understand how this is used, see: https://github.com/kubernetes-sigs/structured-merge-diff -type Fields struct { - // Map stores a set of fields in a data structure like a Trie. - // - // Each key is either a '.' representing the field itself, and will always map to an empty set, - // or a string representing a sub-field or item. The string will follow one of these four formats: - // 'f:', where is the name of a field in a struct, or key in a map - // 'v:', where is the exact json formatted value of a list item - // 'i:', where is position of a item in a list - // 'k:', where is a map of a list item's key fields to their unique values - // If a key maps to an empty Fields value, the field that key represents is part of the set. - // - // The exact format is defined in k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal - Map map[string]Fields `json:",inline" protobuf:"bytes,1,rep,name=map"` +// FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format. +// +// Each key is either a '.' representing the field itself, and will always map to an empty set, +// or a string representing a sub-field or item. The string will follow one of these four formats: +// 'f:', where is the name of a field in a struct, or key in a map +// 'v:', where is the exact json formatted value of a list item +// 'i:', where is position of a item in a list +// 'k:', where is a map of a list item's key fields to their unique values +// If a key maps to an empty Fields value, the field that key represents is part of the set. +// +// The exact format is defined in sigs.k8s.io/structured-merge-diff +type FieldsV1 struct { + // Raw is the underlying serialization of this object. + Raw []byte `json:"-" protobuf:"bytes,1,opt,name=Raw"` } // TODO: Table does not generate to protobuf because of the interface{} - fix protobuf @@ -1170,7 +1148,7 @@ type Fields struct { type Table struct { TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional ListMeta `json:"metadata,omitempty"` @@ -1301,7 +1279,7 @@ type TableOptions struct { type PartialObjectMetadata struct { TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` } @@ -1311,7 +1289,7 @@ type PartialObjectMetadata struct { type PartialObjectMetadataList struct { TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go index f35c22bf1623e..07e6cc12654ff 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go @@ -119,12 +119,12 @@ func (ExportOptions) SwaggerDoc() map[string]string { return map_ExportOptions } -var map_Fields = map[string]string{ - "": "Fields stores a set of fields in a data structure like a Trie. To understand how this is used, see: https://github.com/kubernetes-sigs/structured-merge-diff", +var map_FieldsV1 = map[string]string{ + "": "FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.\n\nEach key is either a '.' representing the field itself, and will always map to an empty set, or a string representing a sub-field or item. The string will follow one of these four formats: 'f:', where is the name of a field in a struct, or key in a map 'v:', where is the exact json formatted value of a list item 'i:', where is position of a item in a list 'k:', where is a map of a list item's key fields to their unique values If a key maps to an empty Fields value, the field that key represents is part of the set.\n\nThe exact format is defined in sigs.k8s.io/structured-merge-diff", } -func (Fields) SwaggerDoc() map[string]string { - return map_Fields +func (FieldsV1) SwaggerDoc() map[string]string { + return map_FieldsV1 } var map_GetOptions = map[string]string{ @@ -146,25 +146,6 @@ func (GroupVersionForDiscovery) SwaggerDoc() map[string]string { return map_GroupVersionForDiscovery } -var map_Initializer = map[string]string{ - "": "Initializer is information about an initializer that has not yet completed.", - "name": "name of the process that is responsible for initializing this object.", -} - -func (Initializer) SwaggerDoc() map[string]string { - return map_Initializer -} - -var map_Initializers = map[string]string{ - "": "Initializers tracks the progress of initialization.", - "pending": "Pending is a list of initializers that must execute in order before this object is visible. When the last pending initializer is removed, and no failing result is set, the initializers struct will be set to nil and the object is considered as initialized and visible to all clients.", - "result": "If result is set with the Failure field, the object will be persisted to storage and then deleted, ensuring that other clients can observe the deletion.", -} - -func (Initializers) SwaggerDoc() map[string]string { - return map_Initializers -} - var map_LabelSelector = map[string]string{ "": "A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.", "matchLabels": "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.", @@ -188,7 +169,7 @@ func (LabelSelectorRequirement) SwaggerDoc() map[string]string { var map_List = map[string]string{ "": "List holds a list of objects, which may not be known by the server.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of objects", } @@ -198,10 +179,10 @@ func (List) SwaggerDoc() map[string]string { var map_ListMeta = map[string]string{ "": "ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.", - "selfLink": "selfLink is a URL representing this object. Populated by the system. Read-only.", - "resourceVersion": "String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency", + "selfLink": "selfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release.", + "resourceVersion": "String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency", "continue": "continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a consistent list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response, unless you have received this token from an error message.", - "remainingItemCount": "remainingItemCount is the number of subsequent items in the list which are not included in this list response. If the list request contained label or field selectors, then the number of remaining items is unknown and the field will be left unset and omitted during serialization. If the list is complete (either because it is not chunking or because this is the last chunk), then there are no more remaining items and this field will be left unset and omitted during serialization. Servers older than v1.15 do not set this field. The intended use of the remainingItemCount is *estimating* the size of a collection. Clients should not rely on the remainingItemCount to be set or to be exact.\n\nThis field is alpha and can be changed or removed without notice.", + "remainingItemCount": "remainingItemCount is the number of subsequent items in the list which are not included in this list response. If the list request contained label or field selectors, then the number of remaining items is unknown and the field will be left unset and omitted during serialization. If the list is complete (either because it is not chunking or because this is the last chunk), then there are no more remaining items and this field will be left unset and omitted during serialization. Servers older than v1.15 do not set this field. The intended use of the remainingItemCount is *estimating* the size of a collection. Clients should not rely on the remainingItemCount to be set or to be exact.", } func (ListMeta) SwaggerDoc() map[string]string { @@ -213,7 +194,7 @@ var map_ListOptions = map[string]string{ "labelSelector": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", "fieldSelector": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", "watch": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", - "allowWatchBookmarks": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.\n\nThis field is alpha and can be changed or removed without notice.", + "allowWatchBookmarks": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.\n\nThis field is beta.", "resourceVersion": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", "timeoutSeconds": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", "limit": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", @@ -230,7 +211,8 @@ var map_ManagedFieldsEntry = map[string]string{ "operation": "Operation is the type of operation which lead to this ManagedFieldsEntry being created. The only valid values for this field are 'Apply' and 'Update'.", "apiVersion": "APIVersion defines the version of this resource that this field set applies to. The format is \"group/version\" just like the top-level APIVersion field. It is necessary to track the version of a field set because it cannot be automatically converted.", "time": "Time is timestamp of when these fields were set. It should always be empty if Operation is 'Apply'", - "fields": "Fields identifies a set of fields.", + "fieldsType": "FieldsType is the discriminator for the different fields format and version. There is currently only one possible value: \"FieldsV1\"", + "fieldsV1": "FieldsV1 holds the first JSON version format as described in the \"FieldsV1\" type.", } func (ManagedFieldsEntry) SwaggerDoc() map[string]string { @@ -240,22 +222,21 @@ func (ManagedFieldsEntry) SwaggerDoc() map[string]string { var map_ObjectMeta = map[string]string{ "": "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.", "name": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names", - "generateName": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency", + "generateName": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency", "namespace": "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces", - "selfLink": "SelfLink is a URL representing this object. Populated by the system. Read-only.", + "selfLink": "SelfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release.", "uid": "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", - "resourceVersion": "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency", + "resourceVersion": "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency", "generation": "A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.", - "creationTimestamp": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "deletionTimestamp": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "creationTimestamp": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "deletionTimestamp": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "deletionGracePeriodSeconds": "Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.", "labels": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels", "annotations": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations", "ownerReferences": "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.", - "initializers": "An initializer is a controller which enforces some system invariant at object creation time. This field is a list of initializers that have not yet acted on this object. If nil or empty, this object has been completely initialized. Otherwise, the object is considered uninitialized and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to observe uninitialized objects.\n\nWhen an object is created, the system will populate this list with the current set of initializers. Only privileged users may set or modify this list. Once it is empty, it may not be modified further by any user.\n\nDEPRECATED - initializers are an alpha field and will be removed in v1.15.", "finalizers": "Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed.", "clusterName": "The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.", - "managedFields": "ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \"ci-cd\". The set of fields is always in the version that the workflow used when modifying the object.\n\nThis field is alpha and can be changed or removed without notice.", + "managedFields": "ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \"ci-cd\". The set of fields is always in the version that the workflow used when modifying the object.", } func (ObjectMeta) SwaggerDoc() map[string]string { @@ -265,7 +246,7 @@ func (ObjectMeta) SwaggerDoc() map[string]string { var map_OwnerReference = map[string]string{ "": "OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field.", "apiVersion": "API version of the referent.", - "kind": "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "kind": "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "name": "Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names", "uid": "UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", "controller": "If true, this reference points to the managing controller.", @@ -278,7 +259,7 @@ func (OwnerReference) SwaggerDoc() map[string]string { var map_PartialObjectMetadata = map[string]string{ "": "PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients to get access to a particular ObjectMeta schema without knowing the details of the version.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", } func (PartialObjectMetadata) SwaggerDoc() map[string]string { @@ -287,7 +268,7 @@ func (PartialObjectMetadata) SwaggerDoc() map[string]string { var map_PartialObjectMetadataList = map[string]string{ "": "PartialObjectMetadataList contains a list of objects containing only their metadata", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "items contains each of the included items.", } @@ -345,8 +326,8 @@ func (ServerAddressByClientCIDR) SwaggerDoc() map[string]string { var map_Status = map[string]string{ "": "Status is a return value for calls that don't return other objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "status": "Status of the operation. One of: \"Success\" or \"Failure\". More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "status": "Status of the operation. One of: \"Success\" or \"Failure\". More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", "message": "A human-readable description of the status of this operation.", "reason": "A machine-readable description of why this operation is in the \"Failure\" status. If this value is empty there is no information available. A Reason clarifies an HTTP status code but does not override it.", "details": "Extended data associated with the reason. Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type.", @@ -372,7 +353,7 @@ var map_StatusDetails = map[string]string{ "": "StatusDetails is a set of additional properties that MAY be set by the server to provide additional information about a response. The Reason field of a Status object defines what attributes will be set. Clients must ignore fields that do not match the defined type of each attribute, and should assume that any attribute may be empty, invalid, or under defined.", "name": "The name attribute of the resource associated with the status StatusReason (when there is a single name which can be described).", "group": "The group attribute of the resource associated with the status StatusReason.", - "kind": "The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "kind": "The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "uid": "UID of the resource. (when there is a single resource which can be described). More info: http://kubernetes.io/docs/user-guide/identifiers#uids", "causes": "The Causes array includes more details associated with the StatusReason failure. Not all StatusReasons may provide detailed causes.", "retryAfterSeconds": "If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action.", @@ -384,7 +365,7 @@ func (StatusDetails) SwaggerDoc() map[string]string { var map_Table = map[string]string{ "": "Table is a tabular representation of a set of API resources. The server transforms the object into a set of preferred columns for quickly reviewing the objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "columnDefinitions": "columnDefinitions describes each column in the returned items array. The number of cells per row will always match the number of column definitions.", "rows": "rows is the list of items in the table.", } @@ -440,8 +421,8 @@ func (TableRowCondition) SwaggerDoc() map[string]string { var map_TypeMeta = map[string]string{ "": "TypeMeta describes an individual object in an API response or request with strings representing the type of the object and its API schema version. Structures that are versioned or persisted should inline TypeMeta.", - "kind": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "apiVersion": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + "kind": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "apiVersion": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", } func (TypeMeta) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go index 3b07e86db8f3c..7ea0986f35885 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go @@ -32,6 +32,9 @@ import ( // NestedFieldCopy returns a deep copy of the value of a nested field. // Returns false if the value is missing. // No error is returned for a nil field. +// +// Note: fields passed to this function are treated as keys within the passed +// object; no array/slice syntax is supported. func NestedFieldCopy(obj map[string]interface{}, fields ...string) (interface{}, bool, error) { val, found, err := NestedFieldNoCopy(obj, fields...) if !found || err != nil { @@ -43,6 +46,9 @@ func NestedFieldCopy(obj map[string]interface{}, fields ...string) (interface{}, // NestedFieldNoCopy returns a reference to a nested field. // Returns false if value is not found and an error if unable // to traverse obj. +// +// Note: fields passed to this function are treated as keys within the passed +// object; no array/slice syntax is supported. func NestedFieldNoCopy(obj map[string]interface{}, fields ...string) (interface{}, bool, error) { var val interface{} = obj diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go index 0ba18d45d8dff..d1903394d7f2e 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go @@ -431,31 +431,6 @@ func (u *Unstructured) GroupVersionKind() schema.GroupVersionKind { return gvk } -func (u *Unstructured) GetInitializers() *metav1.Initializers { - m, found, err := nestedMapNoCopy(u.Object, "metadata", "initializers") - if !found || err != nil { - return nil - } - out := &metav1.Initializers{} - if err := runtime.DefaultUnstructuredConverter.FromUnstructured(m, out); err != nil { - utilruntime.HandleError(fmt.Errorf("unable to retrieve initializers for object: %v", err)) - return nil - } - return out -} - -func (u *Unstructured) SetInitializers(initializers *metav1.Initializers) { - if initializers == nil { - RemoveNestedField(u.Object, "metadata", "initializers") - return - } - out, err := runtime.DefaultUnstructuredConverter.ToUnstructured(initializers) - if err != nil { - utilruntime.HandleError(fmt.Errorf("unable to retrieve initializers for object: %v", err)) - } - u.setNestedField(out, "metadata", "initializers") -} - func (u *Unstructured) GetFinalizers() []string { val, _, _ := NestedStringSlice(u.Object, "metadata", "finalizers") return val diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go index eeb73999f9b31..2743793dde2c4 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go @@ -169,3 +169,18 @@ func ValidateTableOptions(opts *metav1.TableOptions) field.ErrorList { } return allErrs } + +func ValidateManagedFields(fieldsList []metav1.ManagedFieldsEntry, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + for _, fields := range fieldsList { + switch fields.Operation { + case metav1.ManagedFieldsOperationApply, metav1.ManagedFieldsOperationUpdate: + default: + allErrs = append(allErrs, field.Invalid(fldPath.Child("operation"), fields.Operation, "must be `Apply` or `Update`")) + } + if fields.FieldsType != "FieldsV1" { + allErrs = append(allErrs, field.Invalid(fldPath.Child("fieldsType"), fields.FieldsType, "must be `FieldsV1`")) + } + } + return allErrs +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go index fa179ac7b109d..b82fdf202f2cd 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go @@ -313,24 +313,22 @@ func (in *ExportOptions) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Fields) DeepCopyInto(out *Fields) { +func (in *FieldsV1) DeepCopyInto(out *FieldsV1) { *out = *in - if in.Map != nil { - in, out := &in.Map, &out.Map - *out = make(map[string]Fields, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } + if in.Raw != nil { + in, out := &in.Raw, &out.Raw + *out = make([]byte, len(*in)) + copy(*out, *in) } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Fields. -func (in *Fields) DeepCopy() *Fields { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldsV1. +func (in *FieldsV1) DeepCopy() *FieldsV1 { if in == nil { return nil } - out := new(Fields) + out := new(FieldsV1) in.DeepCopyInto(out) return out } @@ -456,48 +454,6 @@ func (in *GroupVersionResource) DeepCopy() *GroupVersionResource { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Initializer) DeepCopyInto(out *Initializer) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Initializer. -func (in *Initializer) DeepCopy() *Initializer { - if in == nil { - return nil - } - out := new(Initializer) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Initializers) DeepCopyInto(out *Initializers) { - *out = *in - if in.Pending != nil { - in, out := &in.Pending, &out.Pending - *out = make([]Initializer, len(*in)) - copy(*out, *in) - } - if in.Result != nil { - in, out := &in.Result, &out.Result - *out = new(Status) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Initializers. -func (in *Initializers) DeepCopy() *Initializers { - if in == nil { - return nil - } - out := new(Initializers) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *InternalEvent) DeepCopyInto(out *InternalEvent) { *out = *in @@ -659,9 +615,9 @@ func (in *ManagedFieldsEntry) DeepCopyInto(out *ManagedFieldsEntry) { in, out := &in.Time, &out.Time *out = (*in).DeepCopy() } - if in.Fields != nil { - in, out := &in.Fields, &out.Fields - *out = new(Fields) + if in.FieldsV1 != nil { + in, out := &in.FieldsV1, &out.FieldsV1 + *out = new(FieldsV1) (*in).DeepCopyInto(*out) } return @@ -721,11 +677,6 @@ func (in *ObjectMeta) DeepCopyInto(out *ObjectMeta) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.Initializers != nil { - in, out := &in.Initializers, &out.Initializers - *out = new(Initializers) - (*in).DeepCopyInto(*out) - } if in.Finalizers != nil { in, out := &in.Finalizers, &out.Finalizers *out = make([]string, len(*in)) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go index 56cb9d8268b37..5fae30ae81de3 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go @@ -17,31 +17,20 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto - - It has these top-level messages: - PartialObjectMetadataList -*/ package v1beta1 import ( fmt "fmt" + io "io" + proto "github.com/gogo/protobuf/proto" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" math "math" - - k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - strings "strings" - + math_bits "math/bits" reflect "reflect" - - io "io" + strings "strings" ) // Reference imports to suppress errors if they are not otherwise used. @@ -58,16 +47,68 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *PartialObjectMetadataList) Reset() { *m = PartialObjectMetadataList{} } func (*PartialObjectMetadataList) ProtoMessage() {} func (*PartialObjectMetadataList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{0} + return fileDescriptor_90ec10f86b91f9a8, []int{0} +} +func (m *PartialObjectMetadataList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PartialObjectMetadataList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PartialObjectMetadataList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PartialObjectMetadataList.Merge(m, src) } +func (m *PartialObjectMetadataList) XXX_Size() int { + return m.Size() +} +func (m *PartialObjectMetadataList) XXX_DiscardUnknown() { + xxx_messageInfo_PartialObjectMetadataList.DiscardUnknown(m) +} + +var xxx_messageInfo_PartialObjectMetadataList proto.InternalMessageInfo func init() { proto.RegisterType((*PartialObjectMetadataList)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1beta1.PartialObjectMetadataList") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto", fileDescriptor_90ec10f86b91f9a8) +} + +var fileDescriptor_90ec10f86b91f9a8 = []byte{ + // 321 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0x41, 0x4b, 0xf3, 0x30, + 0x18, 0xc7, 0x9b, 0xf7, 0x65, 0x38, 0x3a, 0x04, 0xd9, 0x69, 0xee, 0x90, 0x0d, 0x4f, 0xf3, 0xb0, + 0x84, 0x0d, 0x11, 0xc1, 0xdb, 0x6e, 0x82, 0xa2, 0xec, 0x28, 0x1e, 0x4c, 0xbb, 0xc7, 0x2e, 0xd6, + 0x34, 0x25, 0x79, 0x3a, 0xf0, 0xe6, 0x47, 0xf0, 0x63, 0xed, 0xb8, 0xe3, 0x40, 0x18, 0xae, 0x7e, + 0x11, 0x49, 0x57, 0x45, 0xa6, 0x62, 0x6f, 0x79, 0xfe, 0xe1, 0xf7, 0xcb, 0x3f, 0x89, 0x3f, 0x8e, + 0x4f, 0x2c, 0x93, 0x9a, 0xc7, 0x59, 0x00, 0x26, 0x01, 0x04, 0xcb, 0x67, 0x90, 0x4c, 0xb4, 0xe1, + 0xe5, 0x86, 0x48, 0xa5, 0x12, 0xe1, 0x54, 0x26, 0x60, 0x1e, 0x79, 0x1a, 0x47, 0x2e, 0xb0, 0x5c, + 0x01, 0x0a, 0x3e, 0x1b, 0x04, 0x80, 0x62, 0xc0, 0x23, 0x48, 0xc0, 0x08, 0x84, 0x09, 0x4b, 0x8d, + 0x46, 0xdd, 0x3c, 0xdc, 0xa0, 0xec, 0x2b, 0xca, 0xd2, 0x38, 0x72, 0x81, 0x65, 0x0e, 0x65, 0x25, + 0xda, 0xee, 0x47, 0x12, 0xa7, 0x59, 0xc0, 0x42, 0xad, 0x78, 0xa4, 0x23, 0xcd, 0x0b, 0x43, 0x90, + 0xdd, 0x15, 0x53, 0x31, 0x14, 0xab, 0x8d, 0xb9, 0x7d, 0x54, 0xa5, 0xd4, 0x76, 0x9f, 0xf6, 0xaf, + 0x57, 0x31, 0x59, 0x82, 0x52, 0xc1, 0x37, 0xe0, 0xf8, 0x2f, 0xc0, 0x86, 0x53, 0x50, 0x62, 0x9b, + 0x3b, 0x78, 0x21, 0xfe, 0xfe, 0x95, 0x30, 0x28, 0xc5, 0xc3, 0x65, 0x70, 0x0f, 0x21, 0x5e, 0x00, + 0x8a, 0x89, 0x40, 0x71, 0x2e, 0x2d, 0x36, 0x6f, 0xfc, 0xba, 0x2a, 0xe7, 0xd6, 0xbf, 0x2e, 0xe9, + 0x35, 0x86, 0x8c, 0x55, 0x79, 0x29, 0xe6, 0x68, 0x67, 0x1a, 0xed, 0xcd, 0x57, 0x1d, 0x2f, 0x5f, + 0x75, 0xea, 0x1f, 0xc9, 0xf8, 0xd3, 0xd8, 0xbc, 0xf5, 0x6b, 0x12, 0x41, 0xd9, 0x16, 0xe9, 0xfe, + 0xef, 0x35, 0x86, 0xa7, 0xd5, 0xd4, 0x3f, 0xb6, 0x1d, 0xed, 0x96, 0xe7, 0xd4, 0xce, 0x9c, 0x71, + 0xbc, 0x11, 0x8f, 0xfa, 0xf3, 0x35, 0xf5, 0x16, 0x6b, 0xea, 0x2d, 0xd7, 0xd4, 0x7b, 0xca, 0x29, + 0x99, 0xe7, 0x94, 0x2c, 0x72, 0x4a, 0x96, 0x39, 0x25, 0xaf, 0x39, 0x25, 0xcf, 0x6f, 0xd4, 0xbb, + 0xde, 0x29, 0xbf, 0xf6, 0x3d, 0x00, 0x00, 0xff, 0xff, 0xc6, 0x7e, 0x00, 0x08, 0x5a, 0x02, 0x00, + 0x00, +} + func (m *PartialObjectMetadataList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -75,43 +116,57 @@ func (m *PartialObjectMetadataList) Marshal() (dAtA []byte, err error) { } func (m *PartialObjectMetadataList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PartialObjectMetadataList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n1, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } } - i += n1 - return i, nil + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *PartialObjectMetadataList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Items) > 0 { @@ -126,14 +181,7 @@ func (m *PartialObjectMetadataList) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -142,9 +190,14 @@ func (this *PartialObjectMetadataList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]PartialObjectMetadata{" + for _, f := range this.Items { + repeatedStringForItems += fmt.Sprintf("%v", f) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&PartialObjectMetadataList{`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PartialObjectMetadata", "k8s_io_apimachinery_pkg_apis_meta_v1.PartialObjectMetadata", 1), `&`, ``, 1) + `,`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -172,7 +225,7 @@ func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -200,7 +253,7 @@ func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -209,10 +262,13 @@ func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, k8s_io_apimachinery_pkg_apis_meta_v1.PartialObjectMetadata{}) + m.Items = append(m.Items, v1.PartialObjectMetadata{}) if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -231,7 +287,7 @@ func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -240,6 +296,9 @@ func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -256,6 +315,9 @@ func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -322,10 +384,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -354,6 +419,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -372,32 +440,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 322 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0x41, 0x4b, 0xf3, 0x30, - 0x18, 0xc7, 0x9b, 0xf7, 0x65, 0x38, 0x3a, 0x04, 0xd9, 0x69, 0xee, 0x90, 0x0d, 0x4f, 0xf3, 0xb0, - 0x84, 0x0d, 0x11, 0xc1, 0xdb, 0x6e, 0x82, 0xa2, 0xec, 0x28, 0x1e, 0x4c, 0xbb, 0xc7, 0x2e, 0xd6, - 0x34, 0x25, 0x79, 0x3a, 0xf0, 0xe6, 0x47, 0xf0, 0x63, 0xed, 0xb8, 0xe3, 0x40, 0x18, 0xae, 0x7e, - 0x11, 0x49, 0x57, 0x45, 0xa6, 0x62, 0x6f, 0x7d, 0xfe, 0xcd, 0xef, 0x97, 0x7f, 0x12, 0x7f, 0x1c, - 0x9f, 0x58, 0x26, 0x35, 0x8f, 0xb3, 0x00, 0x4c, 0x02, 0x08, 0x96, 0xcf, 0x20, 0x99, 0x68, 0xc3, - 0xcb, 0x1f, 0x22, 0x95, 0x4a, 0x84, 0x53, 0x99, 0x80, 0x79, 0xe4, 0x69, 0x1c, 0xb9, 0xc0, 0x72, - 0x05, 0x28, 0xf8, 0x6c, 0x10, 0x00, 0x8a, 0x01, 0x8f, 0x20, 0x01, 0x23, 0x10, 0x26, 0x2c, 0x35, - 0x1a, 0x75, 0xf3, 0x70, 0x83, 0xb2, 0xaf, 0x28, 0x4b, 0xe3, 0xc8, 0x05, 0x96, 0x39, 0x94, 0x95, - 0x68, 0xbb, 0x1f, 0x49, 0x9c, 0x66, 0x01, 0x0b, 0xb5, 0xe2, 0x91, 0x8e, 0x34, 0x2f, 0x0c, 0x41, - 0x76, 0x57, 0x4c, 0xc5, 0x50, 0x7c, 0x6d, 0xcc, 0xed, 0xa3, 0x2a, 0xa5, 0xb6, 0xfb, 0xb4, 0x7f, - 0x3d, 0x8a, 0xc9, 0x12, 0x94, 0x0a, 0xbe, 0x01, 0xc7, 0x7f, 0x01, 0x36, 0x9c, 0x82, 0x12, 0xdb, - 0xdc, 0xc1, 0x0b, 0xf1, 0xf7, 0xaf, 0x84, 0x41, 0x29, 0x1e, 0x2e, 0x83, 0x7b, 0x08, 0xf1, 0x02, - 0x50, 0x4c, 0x04, 0x8a, 0x73, 0x69, 0xb1, 0x79, 0xeb, 0xd7, 0x24, 0x82, 0xb2, 0x2d, 0xd2, 0xfd, - 0xdf, 0x6b, 0x0c, 0x4f, 0x59, 0x95, 0x6b, 0x62, 0x3f, 0xfa, 0x46, 0xbb, 0xf3, 0x55, 0xc7, 0xcb, - 0x57, 0x9d, 0xda, 0x99, 0x33, 0x8e, 0x37, 0xe2, 0xe6, 0x8d, 0x5f, 0x57, 0xe5, 0x8a, 0xd6, 0xbf, - 0x2e, 0xe9, 0x35, 0x86, 0xac, 0xda, 0x26, 0xae, 0x9f, 0x73, 0x8f, 0xf6, 0x4a, 0x6f, 0xfd, 0x23, - 0x19, 0x7f, 0x1a, 0x47, 0xfd, 0xf9, 0x9a, 0x7a, 0x8b, 0x35, 0xf5, 0x96, 0x6b, 0xea, 0x3d, 0xe5, - 0x94, 0xcc, 0x73, 0x4a, 0x16, 0x39, 0x25, 0xcb, 0x9c, 0x92, 0xd7, 0x9c, 0x92, 0xe7, 0x37, 0xea, - 0x5d, 0xef, 0x94, 0x4f, 0xfb, 0x1e, 0x00, 0x00, 0xff, 0xff, 0x10, 0x2f, 0x48, 0xbd, 0x5a, 0x02, - 0x00, 0x00, -} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto index 6339e719ad3a0..19606666f8a3c 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto @@ -32,7 +32,7 @@ option go_package = "v1beta1"; // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object message PartialObjectMetadataList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 2; diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go index 87895a5b5f270..f16170a37b2e7 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go @@ -63,7 +63,7 @@ type PartialObjectMetadata = v1.PartialObjectMetadata type PartialObjectMetadataList struct { v1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional v1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,2,opt,name=metadata"` diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go index 26d13f5d91c4b..ef7e7c1e9017a 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go @@ -29,7 +29,7 @@ package v1beta1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_PartialObjectMetadataList = map[string]string{ "": "PartialObjectMetadataList contains a list of objects containing only their metadata.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "items contains each of the included items.", } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/converter.go b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go index 80343081f52d1..b3e8a53b332dc 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/converter.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go @@ -94,7 +94,7 @@ func parseBool(key string) bool { } value, err := strconv.ParseBool(key) if err != nil { - utilruntime.HandleError(fmt.Errorf("Couldn't parse '%s' as bool for unstructured mismatch detection", key)) + utilruntime.HandleError(fmt.Errorf("couldn't parse '%s' as bool for unstructured mismatch detection", key)) } return value } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go index 5781b2c9ad058..af2f076b881a9 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go @@ -17,31 +17,18 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto -/* - Package runtime is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto - - It has these top-level messages: - RawExtension - TypeMeta - Unknown -*/ package runtime import ( fmt "fmt" - proto "github.com/gogo/protobuf/proto" - + io "io" math "math" - - strings "strings" - + math_bits "math/bits" reflect "reflect" + strings "strings" - io "io" + proto "github.com/gogo/protobuf/proto" ) // Reference imports to suppress errors if they are not otherwise used. @@ -55,27 +42,132 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *RawExtension) Reset() { *m = RawExtension{} } -func (*RawExtension) ProtoMessage() {} -func (*RawExtension) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *RawExtension) Reset() { *m = RawExtension{} } +func (*RawExtension) ProtoMessage() {} +func (*RawExtension) Descriptor() ([]byte, []int) { + return fileDescriptor_9d3c45d7f546725c, []int{0} +} +func (m *RawExtension) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RawExtension) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RawExtension) XXX_Merge(src proto.Message) { + xxx_messageInfo_RawExtension.Merge(m, src) +} +func (m *RawExtension) XXX_Size() int { + return m.Size() +} +func (m *RawExtension) XXX_DiscardUnknown() { + xxx_messageInfo_RawExtension.DiscardUnknown(m) +} -func (m *TypeMeta) Reset() { *m = TypeMeta{} } -func (*TypeMeta) ProtoMessage() {} -func (*TypeMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_RawExtension proto.InternalMessageInfo + +func (m *TypeMeta) Reset() { *m = TypeMeta{} } +func (*TypeMeta) ProtoMessage() {} +func (*TypeMeta) Descriptor() ([]byte, []int) { + return fileDescriptor_9d3c45d7f546725c, []int{1} +} +func (m *TypeMeta) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TypeMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TypeMeta) XXX_Merge(src proto.Message) { + xxx_messageInfo_TypeMeta.Merge(m, src) +} +func (m *TypeMeta) XXX_Size() int { + return m.Size() +} +func (m *TypeMeta) XXX_DiscardUnknown() { + xxx_messageInfo_TypeMeta.DiscardUnknown(m) +} + +var xxx_messageInfo_TypeMeta proto.InternalMessageInfo + +func (m *Unknown) Reset() { *m = Unknown{} } +func (*Unknown) ProtoMessage() {} +func (*Unknown) Descriptor() ([]byte, []int) { + return fileDescriptor_9d3c45d7f546725c, []int{2} +} +func (m *Unknown) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Unknown) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Unknown) XXX_Merge(src proto.Message) { + xxx_messageInfo_Unknown.Merge(m, src) +} +func (m *Unknown) XXX_Size() int { + return m.Size() +} +func (m *Unknown) XXX_DiscardUnknown() { + xxx_messageInfo_Unknown.DiscardUnknown(m) +} -func (m *Unknown) Reset() { *m = Unknown{} } -func (*Unknown) ProtoMessage() {} -func (*Unknown) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_Unknown proto.InternalMessageInfo func init() { proto.RegisterType((*RawExtension)(nil), "k8s.io.apimachinery.pkg.runtime.RawExtension") proto.RegisterType((*TypeMeta)(nil), "k8s.io.apimachinery.pkg.runtime.TypeMeta") proto.RegisterType((*Unknown)(nil), "k8s.io.apimachinery.pkg.runtime.Unknown") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto", fileDescriptor_9d3c45d7f546725c) +} + +var fileDescriptor_9d3c45d7f546725c = []byte{ + // 378 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x8f, 0x4f, 0xab, 0x13, 0x31, + 0x14, 0xc5, 0x27, 0xaf, 0x85, 0x3e, 0xd3, 0xc2, 0x93, 0xb8, 0x70, 0x74, 0x91, 0x79, 0x74, 0xe5, + 0x5b, 0xbc, 0x04, 0x1e, 0x08, 0x6e, 0x3b, 0xa5, 0xa0, 0x88, 0x20, 0xc1, 0x3f, 0xe0, 0xca, 0x74, + 0x26, 0x4e, 0xc3, 0xd0, 0x9b, 0x21, 0xcd, 0x38, 0x76, 0xe7, 0x47, 0xf0, 0x63, 0x75, 0xd9, 0x65, + 0x57, 0xc5, 0x8e, 0x1f, 0xc2, 0xad, 0x34, 0x4d, 0x6b, 0xd5, 0x85, 0xbb, 0xe4, 0x9e, 0xf3, 0x3b, + 0xf7, 0x1e, 0xfc, 0xbc, 0x7c, 0xb6, 0x60, 0xda, 0xf0, 0xb2, 0x9e, 0x2a, 0x0b, 0xca, 0xa9, 0x05, + 0xff, 0xac, 0x20, 0x37, 0x96, 0x07, 0x41, 0x56, 0x7a, 0x2e, 0xb3, 0x99, 0x06, 0x65, 0x97, 0xbc, + 0x2a, 0x0b, 0x6e, 0x6b, 0x70, 0x7a, 0xae, 0x78, 0xa1, 0x40, 0x59, 0xe9, 0x54, 0xce, 0x2a, 0x6b, + 0x9c, 0x21, 0xc9, 0x01, 0x60, 0xe7, 0x00, 0xab, 0xca, 0x82, 0x05, 0xe0, 0xf1, 0x6d, 0xa1, 0xdd, + 0xac, 0x9e, 0xb2, 0xcc, 0xcc, 0x79, 0x61, 0x0a, 0xc3, 0x3d, 0x37, 0xad, 0x3f, 0xf9, 0x9f, 0xff, + 0xf8, 0xd7, 0x21, 0x6f, 0x78, 0x83, 0x07, 0x42, 0x36, 0x93, 0x2f, 0x4e, 0xc1, 0x42, 0x1b, 0x20, + 0x8f, 0x70, 0xc7, 0xca, 0x26, 0x46, 0xd7, 0xe8, 0xc9, 0x20, 0xed, 0xb5, 0xdb, 0xa4, 0x23, 0x64, + 0x23, 0xf6, 0xb3, 0xe1, 0x47, 0x7c, 0xf9, 0x66, 0x59, 0xa9, 0x57, 0xca, 0x49, 0x72, 0x87, 0xb1, + 0xac, 0xf4, 0x3b, 0x65, 0xf7, 0x90, 0x77, 0xdf, 0x4b, 0xc9, 0x6a, 0x9b, 0x44, 0xed, 0x36, 0xc1, + 0xa3, 0xd7, 0x2f, 0x82, 0x22, 0xce, 0x5c, 0xe4, 0x1a, 0x77, 0x4b, 0x0d, 0x79, 0x7c, 0xe1, 0xdd, + 0x83, 0xe0, 0xee, 0xbe, 0xd4, 0x90, 0x0b, 0xaf, 0x0c, 0x7f, 0x22, 0xdc, 0x7b, 0x0b, 0x25, 0x98, + 0x06, 0xc8, 0x7b, 0x7c, 0xe9, 0xc2, 0x36, 0x9f, 0xdf, 0xbf, 0xbb, 0x61, 0xff, 0xe9, 0xce, 0x8e, + 0xe7, 0xa5, 0xf7, 0x43, 0xf8, 0xe9, 0x60, 0x71, 0x0a, 0x3b, 0x36, 0xbc, 0xf8, 0xb7, 0x21, 0x19, + 0xe1, 0xab, 0xcc, 0x80, 0x53, 0xe0, 0x26, 0x90, 0x99, 0x5c, 0x43, 0x11, 0x77, 0xfc, 0xb1, 0x0f, + 0x43, 0xde, 0xd5, 0xf8, 0x4f, 0x59, 0xfc, 0xed, 0x27, 0x4f, 0x71, 0x3f, 0x8c, 0xf6, 0xab, 0xe3, + 0xae, 0xc7, 0x1f, 0x04, 0xbc, 0x3f, 0xfe, 0x2d, 0x89, 0x73, 0x5f, 0x7a, 0xbb, 0xda, 0xd1, 0x68, + 0xbd, 0xa3, 0xd1, 0x66, 0x47, 0xa3, 0xaf, 0x2d, 0x45, 0xab, 0x96, 0xa2, 0x75, 0x4b, 0xd1, 0xa6, + 0xa5, 0xe8, 0x7b, 0x4b, 0xd1, 0xb7, 0x1f, 0x34, 0xfa, 0xd0, 0x0b, 0x45, 0x7f, 0x05, 0x00, 0x00, + 0xff, 0xff, 0xe3, 0x33, 0x18, 0x0b, 0x50, 0x02, 0x00, 0x00, +} + func (m *RawExtension) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -83,23 +175,29 @@ func (m *RawExtension) Marshal() (dAtA []byte, err error) { } func (m *RawExtension) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RawExtension) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if m.Raw != nil { - dAtA[i] = 0xa - i++ + i -= len(m.Raw) + copy(dAtA[i:], m.Raw) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Raw))) - i += copy(dAtA[i:], m.Raw) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *TypeMeta) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -107,25 +205,32 @@ func (m *TypeMeta) Marshal() (dAtA []byte, err error) { } func (m *TypeMeta) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TypeMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - dAtA[i] = 0x12 - i++ + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Unknown) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -133,45 +238,60 @@ func (m *Unknown) Marshal() (dAtA []byte, err error) { } func (m *Unknown) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Unknown) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TypeMeta.Size())) - n1, err := m.TypeMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 + i -= len(m.ContentType) + copy(dAtA[i:], m.ContentType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContentType))) + i-- + dAtA[i] = 0x22 + i -= len(m.ContentEncoding) + copy(dAtA[i:], m.ContentEncoding) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContentEncoding))) + i-- + dAtA[i] = 0x1a if m.Raw != nil { - dAtA[i] = 0x12 - i++ + i -= len(m.Raw) + copy(dAtA[i:], m.Raw) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Raw))) - i += copy(dAtA[i:], m.Raw) + i-- + dAtA[i] = 0x12 } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContentEncoding))) - i += copy(dAtA[i:], m.ContentEncoding) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContentType))) - i += copy(dAtA[i:], m.ContentType) - return i, nil + { + size, err := m.TypeMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *RawExtension) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Raw != nil { @@ -182,6 +302,9 @@ func (m *RawExtension) Size() (n int) { } func (m *TypeMeta) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.APIVersion) @@ -192,6 +315,9 @@ func (m *TypeMeta) Size() (n int) { } func (m *Unknown) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.TypeMeta.Size() @@ -208,14 +334,7 @@ func (m *Unknown) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -277,7 +396,7 @@ func (m *RawExtension) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -305,7 +424,7 @@ func (m *RawExtension) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -314,6 +433,9 @@ func (m *RawExtension) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -331,6 +453,9 @@ func (m *RawExtension) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -358,7 +483,7 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -386,7 +511,7 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -396,6 +521,9 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -415,7 +543,7 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -425,6 +553,9 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -439,6 +570,9 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -466,7 +600,7 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -494,7 +628,7 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -503,6 +637,9 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -524,7 +661,7 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -533,6 +670,9 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -555,7 +695,7 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -565,6 +705,9 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -584,7 +727,7 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -594,6 +737,9 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -608,6 +754,9 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -674,10 +823,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -706,6 +858,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -724,35 +879,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 378 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x8f, 0x4f, 0xab, 0x13, 0x31, - 0x14, 0xc5, 0x27, 0xaf, 0x85, 0x3e, 0xd3, 0xc2, 0x93, 0xb8, 0x70, 0x74, 0x91, 0x79, 0x74, 0xe5, - 0x5b, 0xbc, 0x04, 0x1e, 0x08, 0x6e, 0x3b, 0xa5, 0xa0, 0x88, 0x20, 0xc1, 0x3f, 0xe0, 0xca, 0x74, - 0x26, 0x4e, 0xc3, 0xd0, 0x9b, 0x21, 0xcd, 0x38, 0x76, 0xe7, 0x47, 0xf0, 0x63, 0x75, 0xd9, 0x65, - 0x57, 0xc5, 0x8e, 0x1f, 0xc2, 0xad, 0x34, 0x4d, 0x6b, 0xd5, 0x85, 0xbb, 0xe4, 0x9e, 0xf3, 0x3b, - 0xf7, 0x1e, 0xfc, 0xbc, 0x7c, 0xb6, 0x60, 0xda, 0xf0, 0xb2, 0x9e, 0x2a, 0x0b, 0xca, 0xa9, 0x05, - 0xff, 0xac, 0x20, 0x37, 0x96, 0x07, 0x41, 0x56, 0x7a, 0x2e, 0xb3, 0x99, 0x06, 0x65, 0x97, 0xbc, - 0x2a, 0x0b, 0x6e, 0x6b, 0x70, 0x7a, 0xae, 0x78, 0xa1, 0x40, 0x59, 0xe9, 0x54, 0xce, 0x2a, 0x6b, - 0x9c, 0x21, 0xc9, 0x01, 0x60, 0xe7, 0x00, 0xab, 0xca, 0x82, 0x05, 0xe0, 0xf1, 0x6d, 0xa1, 0xdd, - 0xac, 0x9e, 0xb2, 0xcc, 0xcc, 0x79, 0x61, 0x0a, 0xc3, 0x3d, 0x37, 0xad, 0x3f, 0xf9, 0x9f, 0xff, - 0xf8, 0xd7, 0x21, 0x6f, 0x78, 0x83, 0x07, 0x42, 0x36, 0x93, 0x2f, 0x4e, 0xc1, 0x42, 0x1b, 0x20, - 0x8f, 0x70, 0xc7, 0xca, 0x26, 0x46, 0xd7, 0xe8, 0xc9, 0x20, 0xed, 0xb5, 0xdb, 0xa4, 0x23, 0x64, - 0x23, 0xf6, 0xb3, 0xe1, 0x47, 0x7c, 0xf9, 0x66, 0x59, 0xa9, 0x57, 0xca, 0x49, 0x72, 0x87, 0xb1, - 0xac, 0xf4, 0x3b, 0x65, 0xf7, 0x90, 0x77, 0xdf, 0x4b, 0xc9, 0x6a, 0x9b, 0x44, 0xed, 0x36, 0xc1, - 0xa3, 0xd7, 0x2f, 0x82, 0x22, 0xce, 0x5c, 0xe4, 0x1a, 0x77, 0x4b, 0x0d, 0x79, 0x7c, 0xe1, 0xdd, - 0x83, 0xe0, 0xee, 0xbe, 0xd4, 0x90, 0x0b, 0xaf, 0x0c, 0x7f, 0x22, 0xdc, 0x7b, 0x0b, 0x25, 0x98, - 0x06, 0xc8, 0x7b, 0x7c, 0xe9, 0xc2, 0x36, 0x9f, 0xdf, 0xbf, 0xbb, 0x61, 0xff, 0xe9, 0xce, 0x8e, - 0xe7, 0xa5, 0xf7, 0x43, 0xf8, 0xe9, 0x60, 0x71, 0x0a, 0x3b, 0x36, 0xbc, 0xf8, 0xb7, 0x21, 0x19, - 0xe1, 0xab, 0xcc, 0x80, 0x53, 0xe0, 0x26, 0x90, 0x99, 0x5c, 0x43, 0x11, 0x77, 0xfc, 0xb1, 0x0f, - 0x43, 0xde, 0xd5, 0xf8, 0x4f, 0x59, 0xfc, 0xed, 0x27, 0x4f, 0x71, 0x3f, 0x8c, 0xf6, 0xab, 0xe3, - 0xae, 0xc7, 0x1f, 0x04, 0xbc, 0x3f, 0xfe, 0x2d, 0x89, 0x73, 0x5f, 0x7a, 0xbb, 0xda, 0xd1, 0x68, - 0xbd, 0xa3, 0xd1, 0x66, 0x47, 0xa3, 0xaf, 0x2d, 0x45, 0xab, 0x96, 0xa2, 0x75, 0x4b, 0xd1, 0xa6, - 0xa5, 0xe8, 0x7b, 0x4b, 0xd1, 0xb7, 0x1f, 0x34, 0xfa, 0xd0, 0x0b, 0x45, 0x7f, 0x05, 0x00, 0x00, - 0xff, 0xff, 0xe3, 0x33, 0x18, 0x0b, 0x50, 0x02, 0x00, 0x00, -} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go index c93861c527c40..a7276649f4a2a 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go @@ -17,22 +17,14 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto -/* -Package schema is a generated protocol buffer package. - -It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto - -It has these top-level messages: -*/ package schema import ( fmt "fmt" - proto "github.com/gogo/protobuf/proto" - math "math" + + proto "github.com/gogo/protobuf/proto" ) // Reference imports to suppress errors if they are not otherwise used. @@ -47,10 +39,10 @@ var _ = math.Inf const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto", fileDescriptorGenerated) + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto", fileDescriptor_0462724132518e0d) } -var fileDescriptorGenerated = []byte{ +var fileDescriptor_0462724132518e0d = []byte{ // 185 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0xcc, 0xaf, 0x6e, 0xc3, 0x30, 0x10, 0xc7, 0x71, 0x9b, 0x0c, 0x0c, 0x0e, 0x0e, 0x1c, 0x1c, 0xda, 0x7c, 0x74, 0xb8, 0x2f, 0x50, diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go index 01f56c9871e05..f21b0ef19dfb7 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go @@ -48,28 +48,40 @@ type serializerType struct { StreamSerializer runtime.Serializer } -func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory) []serializerType { - jsonSerializer := json.NewSerializer(mf, scheme, scheme, false) - jsonPrettySerializer := json.NewSerializer(mf, scheme, scheme, true) - yamlSerializer := json.NewYAMLSerializer(mf, scheme, scheme) - serializer := protobuf.NewSerializer(scheme, scheme) - raw := protobuf.NewRawSerializer(scheme, scheme) +func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory, options CodecFactoryOptions) []serializerType { + jsonSerializer := json.NewSerializerWithOptions( + mf, scheme, scheme, + json.SerializerOptions{Yaml: false, Pretty: false, Strict: options.Strict}, + ) + jsonSerializerType := serializerType{ + AcceptContentTypes: []string{runtime.ContentTypeJSON}, + ContentType: runtime.ContentTypeJSON, + FileExtensions: []string{"json"}, + EncodesAsText: true, + Serializer: jsonSerializer, + + Framer: json.Framer, + StreamSerializer: jsonSerializer, + } + if options.Pretty { + jsonSerializerType.PrettySerializer = json.NewSerializerWithOptions( + mf, scheme, scheme, + json.SerializerOptions{Yaml: false, Pretty: true, Strict: options.Strict}, + ) + } - serializers := []serializerType{ - { - AcceptContentTypes: []string{"application/json"}, - ContentType: "application/json", - FileExtensions: []string{"json"}, - EncodesAsText: true, - Serializer: jsonSerializer, - PrettySerializer: jsonPrettySerializer, + yamlSerializer := json.NewSerializerWithOptions( + mf, scheme, scheme, + json.SerializerOptions{Yaml: true, Pretty: false, Strict: options.Strict}, + ) + protoSerializer := protobuf.NewSerializer(scheme, scheme) + protoRawSerializer := protobuf.NewRawSerializer(scheme, scheme) - Framer: json.Framer, - StreamSerializer: jsonSerializer, - }, + serializers := []serializerType{ + jsonSerializerType, { - AcceptContentTypes: []string{"application/yaml"}, - ContentType: "application/yaml", + AcceptContentTypes: []string{runtime.ContentTypeYAML}, + ContentType: runtime.ContentTypeYAML, FileExtensions: []string{"yaml"}, EncodesAsText: true, Serializer: yamlSerializer, @@ -78,10 +90,10 @@ func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory) []seri AcceptContentTypes: []string{runtime.ContentTypeProtobuf}, ContentType: runtime.ContentTypeProtobuf, FileExtensions: []string{"pb"}, - Serializer: serializer, + Serializer: protoSerializer, Framer: protobuf.LengthDelimitedFramer, - StreamSerializer: raw, + StreamSerializer: protoRawSerializer, }, } @@ -104,14 +116,56 @@ type CodecFactory struct { legacySerializer runtime.Serializer } +// CodecFactoryOptions holds the options for configuring CodecFactory behavior +type CodecFactoryOptions struct { + // Strict configures all serializers in strict mode + Strict bool + // Pretty includes a pretty serializer along with the non-pretty one + Pretty bool +} + +// CodecFactoryOptionsMutator takes a pointer to an options struct and then modifies it. +// Functions implementing this type can be passed to the NewCodecFactory() constructor. +type CodecFactoryOptionsMutator func(*CodecFactoryOptions) + +// EnablePretty enables including a pretty serializer along with the non-pretty one +func EnablePretty(options *CodecFactoryOptions) { + options.Pretty = true +} + +// DisablePretty disables including a pretty serializer along with the non-pretty one +func DisablePretty(options *CodecFactoryOptions) { + options.Pretty = false +} + +// EnableStrict enables configuring all serializers in strict mode +func EnableStrict(options *CodecFactoryOptions) { + options.Strict = true +} + +// DisableStrict disables configuring all serializers in strict mode +func DisableStrict(options *CodecFactoryOptions) { + options.Strict = false +} + // NewCodecFactory provides methods for retrieving serializers for the supported wire formats // and conversion wrappers to define preferred internal and external versions. In the future, // as the internal version is used less, callers may instead use a defaulting serializer and // only convert objects which are shared internally (Status, common API machinery). +// +// Mutators can be passed to change the CodecFactoryOptions before construction of the factory. +// It is recommended to explicitly pass mutators instead of relying on defaults. +// By default, Pretty is enabled -- this is conformant with previously supported behavior. +// // TODO: allow other codecs to be compiled in? // TODO: accept a scheme interface -func NewCodecFactory(scheme *runtime.Scheme) CodecFactory { - serializers := newSerializersForScheme(scheme, json.DefaultMetaFactory) +func NewCodecFactory(scheme *runtime.Scheme, mutators ...CodecFactoryOptionsMutator) CodecFactory { + options := CodecFactoryOptions{Pretty: true} + for _, fn := range mutators { + fn(&options) + } + + serializers := newSerializersForScheme(scheme, json.DefaultMetaFactory, options) return newCodecFactory(scheme, serializers) } @@ -268,7 +322,3 @@ func (f WithoutConversionCodecFactory) DecoderToVersion(serializer runtime.Decod Decoder: serializer, } } - -// DirectCodecFactory was renamed to WithoutConversionCodecFactory in 1.15. -// TODO: remove in 1.16. -type DirectCodecFactory = WithoutConversionCodecFactory diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go index 69ada8ecf9c5d..de1a7d677f885 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go @@ -122,7 +122,27 @@ func (customNumberDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) { } iter.ReportError("DecodeNumber", err.Error()) default: + // init depth, if needed + if iter.Attachment == nil { + iter.Attachment = int(1) + } + + // remember current depth + originalAttachment := iter.Attachment + + // increment depth before descending + if i, ok := iter.Attachment.(int); ok { + iter.Attachment = i + 1 + if i > 10000 { + iter.ReportError("parse", "exceeded max depth") + return + } + } + *(*interface{})(ptr) = iter.Read() + + // restore current depth + iter.Attachment = originalAttachment } } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go index 8af889d35ba18..0f33e1d821fac 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go @@ -203,7 +203,7 @@ func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error { switch t := obj.(type) { case bufferedMarshaller: // this path performs a single allocation during write but requires the caller to implement - // the more efficient Size and MarshalTo methods + // the more efficient Size and MarshalToSizedBuffer methods encodedSize := uint64(t.Size()) estimatedSize := prefixSize + estimateUnknownSize(&unk, encodedSize) data := make([]byte, estimatedSize) @@ -283,6 +283,12 @@ type bufferedMarshaller interface { runtime.ProtobufMarshaller } +// Like bufferedMarshaller, but is able to marshal backwards, which is more efficient since it doesn't call Size() as frequently. +type bufferedReverseMarshaller interface { + proto.Sizer + runtime.ProtobufReverseMarshaller +} + // estimateUnknownSize returns the expected bytes consumed by a given runtime.Unknown // object with a nil RawJSON struct and the expected size of the provided buffer. The // returned size will not be correct if RawJSOn is set on unk. @@ -414,6 +420,19 @@ func unmarshalToObject(typer runtime.ObjectTyper, creater runtime.ObjectCreater, // Encode serializes the provided object to the given writer. Overrides is ignored. func (s *RawSerializer) Encode(obj runtime.Object, w io.Writer) error { switch t := obj.(type) { + case bufferedReverseMarshaller: + // this path performs a single allocation during write but requires the caller to implement + // the more efficient Size and MarshalToSizedBuffer methods + encodedSize := uint64(t.Size()) + data := make([]byte, encodedSize) + + n, err := t.MarshalToSizedBuffer(data) + if err != nil { + return err + } + _, err = w.Write(data[:n]) + return err + case bufferedMarshaller: // this path performs a single allocation during write but requires the caller to implement // the more efficient Size and MarshalTo methods diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go index a04a2e98bff13..ee5cb86f7e640 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go @@ -230,11 +230,3 @@ func (c *codec) Encode(obj runtime.Object, w io.Writer) error { // Conversion is responsible for setting the proper group, version, and kind onto the outgoing object return c.encoder.Encode(out, w) } - -// DirectEncoder was moved and renamed to runtime.WithVersionEncoder in 1.15. -// TODO: remove in 1.16. -type DirectEncoder = runtime.WithVersionEncoder - -// DirectDecoder was moved and renamed to runtime.WithoutVersionDecoder in 1.15. -// TODO: remove in 1.16. -type DirectDecoder = runtime.WithoutVersionDecoder diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/types.go b/vendor/k8s.io/apimachinery/pkg/runtime/types.go index 3d3ebe5f9d1b4..2f0b6c9e55b4b 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/types.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/types.go @@ -95,7 +95,7 @@ type RawExtension struct { // Raw is the underlying serialization of this object. // // TODO: Determine how to detect ContentType and ContentEncoding of 'Raw' data. - Raw []byte `protobuf:"bytes,1,opt,name=raw"` + Raw []byte `json:"-" protobuf:"bytes,1,opt,name=raw"` // Object can hold a representation of this extension - useful for working with versioned // structs. Object Object `json:"-"` diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go b/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go index ead96ee055432..a82227b239af5 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go @@ -24,46 +24,66 @@ type ProtobufMarshaller interface { MarshalTo(data []byte) (int, error) } +type ProtobufReverseMarshaller interface { + MarshalToSizedBuffer(data []byte) (int, error) +} + // NestedMarshalTo allows a caller to avoid extra allocations during serialization of an Unknown -// that will contain an object that implements ProtobufMarshaller. +// that will contain an object that implements ProtobufMarshaller or ProtobufReverseMarshaller. func (m *Unknown) NestedMarshalTo(data []byte, b ProtobufMarshaller, size uint64) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.TypeMeta.Size())) - n1, err := m.TypeMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err + // Calculate the full size of the message. + msgSize := m.Size() + if b != nil { + msgSize += int(size) + sovGenerated(size) + 1 } - i += n1 + // Reverse marshal the fields of m. + i := msgSize + i -= len(m.ContentType) + copy(data[i:], m.ContentType) + i = encodeVarintGenerated(data, i, uint64(len(m.ContentType))) + i-- + data[i] = 0x22 + i -= len(m.ContentEncoding) + copy(data[i:], m.ContentEncoding) + i = encodeVarintGenerated(data, i, uint64(len(m.ContentEncoding))) + i-- + data[i] = 0x1a if b != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, size) - n2, err := b.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - if uint64(n2) != size { - // programmer error: the Size() method for protobuf does not match the results of MarshalTo, which means the proto - // struct returned would be wrong. - return 0, fmt.Errorf("the Size() value of %T was %d, but NestedMarshalTo wrote %d bytes to data", b, size, n2) + if r, ok := b.(ProtobufReverseMarshaller); ok { + n1, err := r.MarshalToSizedBuffer(data[:i]) + if err != nil { + return 0, err + } + i -= int(size) + if uint64(n1) != size { + // programmer error: the Size() method for protobuf does not match the results of LashramOt, which means the proto + // struct returned would be wrong. + return 0, fmt.Errorf("the Size() value of %T was %d, but NestedMarshalTo wrote %d bytes to data", b, size, n1) + } + } else { + i -= int(size) + n1, err := b.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + if uint64(n1) != size { + // programmer error: the Size() method for protobuf does not match the results of MarshalTo, which means the proto + // struct returned would be wrong. + return 0, fmt.Errorf("the Size() value of %T was %d, but NestedMarshalTo wrote %d bytes to data", b, size, n1) + } } - i += n2 + i = encodeVarintGenerated(data, i, size) + i-- + data[i] = 0x12 } - - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ContentEncoding))) - i += copy(data[i:], m.ContentEncoding) - - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ContentType))) - i += copy(data[i:], m.ContentType) - return i, nil + n2, err := m.TypeMeta.MarshalToSizedBuffer(data[:i]) + if err != nil { + return 0, err + } + i -= n2 + i = encodeVarintGenerated(data, i, uint64(n2)) + i-- + data[i] = 0xa + return msgSize - i, nil } diff --git a/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go b/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go index 9567f90060d99..1689e62e82912 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go +++ b/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go @@ -21,11 +21,18 @@ import ( "time" ) +// PassiveClock allows for injecting fake or real clocks into code +// that needs to read the current time but does not support scheduling +// activity in the future. +type PassiveClock interface { + Now() time.Time + Since(time.Time) time.Duration +} + // Clock allows for injecting fake or real clocks into code that // needs to do arbitrary things based on time. type Clock interface { - Now() time.Time - Since(time.Time) time.Duration + PassiveClock After(time.Duration) <-chan time.Time NewTimer(time.Duration) Timer Sleep(time.Duration) @@ -66,10 +73,15 @@ func (RealClock) Sleep(d time.Duration) { time.Sleep(d) } -// FakeClock implements Clock, but returns an arbitrary time. -type FakeClock struct { +// FakePassiveClock implements PassiveClock, but returns an arbitrary time. +type FakePassiveClock struct { lock sync.RWMutex time time.Time +} + +// FakeClock implements Clock, but returns an arbitrary time. +type FakeClock struct { + FakePassiveClock // waiters are waiting for the fake time to pass their specified time waiters []fakeClockWaiter @@ -80,29 +92,41 @@ type fakeClockWaiter struct { stepInterval time.Duration skipIfBlocked bool destChan chan time.Time - fired bool +} + +func NewFakePassiveClock(t time.Time) *FakePassiveClock { + return &FakePassiveClock{ + time: t, + } } func NewFakeClock(t time.Time) *FakeClock { return &FakeClock{ - time: t, + FakePassiveClock: *NewFakePassiveClock(t), } } // Now returns f's time. -func (f *FakeClock) Now() time.Time { +func (f *FakePassiveClock) Now() time.Time { f.lock.RLock() defer f.lock.RUnlock() return f.time } // Since returns time since the time in f. -func (f *FakeClock) Since(ts time.Time) time.Duration { +func (f *FakePassiveClock) Since(ts time.Time) time.Duration { f.lock.RLock() defer f.lock.RUnlock() return f.time.Sub(ts) } +// Sets the time. +func (f *FakePassiveClock) SetTime(t time.Time) { + f.lock.Lock() + defer f.lock.Unlock() + f.time = t +} + // Fake version of time.After(d). func (f *FakeClock) After(d time.Duration) <-chan time.Time { f.lock.Lock() @@ -175,12 +199,10 @@ func (f *FakeClock) setTimeLocked(t time.Time) { if w.skipIfBlocked { select { case w.destChan <- t: - w.fired = true default: } } else { w.destChan <- t - w.fired = true } if w.stepInterval > 0 { @@ -287,36 +309,50 @@ func (f *fakeTimer) C() <-chan time.Time { return f.waiter.destChan } -// Stop stops the timer and returns true if the timer has not yet fired, or false otherwise. +// Stop conditionally stops the timer. If the timer has neither fired +// nor been stopped then this call stops the timer and returns true, +// otherwise this call returns false. This is like time.Timer::Stop. func (f *fakeTimer) Stop() bool { f.fakeClock.lock.Lock() defer f.fakeClock.lock.Unlock() - - newWaiters := make([]fakeClockWaiter, 0, len(f.fakeClock.waiters)) - for i := range f.fakeClock.waiters { - w := &f.fakeClock.waiters[i] - if w != &f.waiter { - newWaiters = append(newWaiters, *w) + // The timer has already fired or been stopped, unless it is found + // among the clock's waiters. + stopped := false + oldWaiters := f.fakeClock.waiters + newWaiters := make([]fakeClockWaiter, 0, len(oldWaiters)) + seekChan := f.waiter.destChan + for i := range oldWaiters { + // Identify the timer's fakeClockWaiter by the identity of the + // destination channel, nothing else is necessarily unique and + // constant since the timer's creation. + if oldWaiters[i].destChan == seekChan { + stopped = true + } else { + newWaiters = append(newWaiters, oldWaiters[i]) } } f.fakeClock.waiters = newWaiters - return !f.waiter.fired + return stopped } -// Reset resets the timer to the fake clock's "now" + d. It returns true if the timer has not yet -// fired, or false otherwise. +// Reset conditionally updates the firing time of the timer. If the +// timer has neither fired nor been stopped then this call resets the +// timer to the fake clock's "now" + d and returns true, otherwise +// this call returns false. This is like time.Timer::Reset. func (f *fakeTimer) Reset(d time.Duration) bool { f.fakeClock.lock.Lock() defer f.fakeClock.lock.Unlock() - - active := !f.waiter.fired - - f.waiter.fired = false - f.waiter.targetTime = f.fakeClock.time.Add(d) - - return active + waiters := f.fakeClock.waiters + seekChan := f.waiter.destChan + for i := range waiters { + if waiters[i].destChan == seekChan { + waiters[i].targetTime = f.fakeClock.time.Add(d) + return true + } + } + return false } type Ticker interface { diff --git a/vendor/k8s.io/apimachinery/pkg/util/duration/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/util/duration/BUILD.bazel deleted file mode 100644 index 1a929062b1d5c..0000000000000 --- a/vendor/k8s.io/apimachinery/pkg/util/duration/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["duration.go"], - importmap = "k8s.io/kops/vendor/k8s.io/apimachinery/pkg/util/duration", - importpath = "k8s.io/apimachinery/pkg/util/duration", - visibility = ["//visibility:public"], -) diff --git a/vendor/k8s.io/apimachinery/pkg/util/duration/duration.go b/vendor/k8s.io/apimachinery/pkg/util/duration/duration.go deleted file mode 100644 index 961ec5ed8b2db..0000000000000 --- a/vendor/k8s.io/apimachinery/pkg/util/duration/duration.go +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package duration - -import ( - "fmt" - "time" -) - -// ShortHumanDuration returns a succint representation of the provided duration -// with limited precision for consumption by humans. -func ShortHumanDuration(d time.Duration) string { - // Allow deviation no more than 2 seconds(excluded) to tolerate machine time - // inconsistence, it can be considered as almost now. - if seconds := int(d.Seconds()); seconds < -1 { - return fmt.Sprintf("") - } else if seconds < 0 { - return fmt.Sprintf("0s") - } else if seconds < 60 { - return fmt.Sprintf("%ds", seconds) - } else if minutes := int(d.Minutes()); minutes < 60 { - return fmt.Sprintf("%dm", minutes) - } else if hours := int(d.Hours()); hours < 24 { - return fmt.Sprintf("%dh", hours) - } else if hours < 24*365 { - return fmt.Sprintf("%dd", hours/24) - } - return fmt.Sprintf("%dy", int(d.Hours()/24/365)) -} - -// HumanDuration returns a succint representation of the provided duration -// with limited precision for consumption by humans. It provides ~2-3 significant -// figures of duration. -func HumanDuration(d time.Duration) string { - // Allow deviation no more than 2 seconds(excluded) to tolerate machine time - // inconsistence, it can be considered as almost now. - if seconds := int(d.Seconds()); seconds < -1 { - return fmt.Sprintf("") - } else if seconds < 0 { - return fmt.Sprintf("0s") - } else if seconds < 60*2 { - return fmt.Sprintf("%ds", seconds) - } - minutes := int(d / time.Minute) - if minutes < 10 { - s := int(d/time.Second) % 60 - if s == 0 { - return fmt.Sprintf("%dm", minutes) - } - return fmt.Sprintf("%dm%ds", minutes, s) - } else if minutes < 60*3 { - return fmt.Sprintf("%dm", minutes) - } - hours := int(d / time.Hour) - if hours < 8 { - m := int(d/time.Minute) % 60 - if m == 0 { - return fmt.Sprintf("%dh", hours) - } - return fmt.Sprintf("%dh%dm", hours, m) - } else if hours < 48 { - return fmt.Sprintf("%dh", hours) - } else if hours < 24*8 { - h := hours % 24 - if h == 0 { - return fmt.Sprintf("%dd", hours/24) - } - return fmt.Sprintf("%dd%dh", hours/24, h) - } else if hours < 24*365*2 { - return fmt.Sprintf("%dd", hours/24) - } else if hours < 24*365*8 { - return fmt.Sprintf("%dy%dd", hours/24/365, (hours/24)%365) - } - return fmt.Sprintf("%dy", int(hours/24/365)) -} diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go index 1ac96c9eb6ea9..64cbc77033d15 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go @@ -17,25 +17,16 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto -/* - Package intstr is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto - - It has these top-level messages: - IntOrString -*/ package intstr import ( fmt "fmt" - proto "github.com/gogo/protobuf/proto" - + io "io" math "math" + math_bits "math/bits" - io "io" + proto "github.com/gogo/protobuf/proto" ) // Reference imports to suppress errors if they are not otherwise used. @@ -49,17 +40,69 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *IntOrString) Reset() { *m = IntOrString{} } -func (*IntOrString) ProtoMessage() {} -func (*IntOrString) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *IntOrString) Reset() { *m = IntOrString{} } +func (*IntOrString) ProtoMessage() {} +func (*IntOrString) Descriptor() ([]byte, []int) { + return fileDescriptor_94e046ae3ce6121c, []int{0} +} +func (m *IntOrString) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IntOrString) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IntOrString) XXX_Merge(src proto.Message) { + xxx_messageInfo_IntOrString.Merge(m, src) +} +func (m *IntOrString) XXX_Size() int { + return m.Size() +} +func (m *IntOrString) XXX_DiscardUnknown() { + xxx_messageInfo_IntOrString.DiscardUnknown(m) +} + +var xxx_messageInfo_IntOrString proto.InternalMessageInfo func init() { proto.RegisterType((*IntOrString)(nil), "k8s.io.apimachinery.pkg.util.intstr.IntOrString") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto", fileDescriptor_94e046ae3ce6121c) +} + +var fileDescriptor_94e046ae3ce6121c = []byte{ + // 292 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x8f, 0x31, 0x4b, 0x33, 0x31, + 0x1c, 0xc6, 0x93, 0xb7, 0x7d, 0x8b, 0x9e, 0xe0, 0x50, 0x1c, 0x8a, 0x43, 0x7a, 0x28, 0xc8, 0x0d, + 0x9a, 0xac, 0xe2, 0xd8, 0xad, 0x20, 0x08, 0x57, 0x71, 0x70, 0xbb, 0x6b, 0x63, 0x1a, 0xae, 0x4d, + 0x42, 0xee, 0x7f, 0xc2, 0x6d, 0xfd, 0x08, 0xba, 0x39, 0xfa, 0x71, 0x6e, 0xec, 0xd8, 0x41, 0x8a, + 0x17, 0xbf, 0x85, 0x93, 0x5c, 0xee, 0x40, 0xa7, 0xe4, 0x79, 0x9e, 0xdf, 0x2f, 0x90, 0xe0, 0x36, + 0xbb, 0xce, 0xa9, 0xd4, 0x2c, 0x2b, 0x52, 0x6e, 0x15, 0x07, 0x9e, 0xb3, 0x67, 0xae, 0x16, 0xda, + 0xb2, 0x6e, 0x48, 0x8c, 0x5c, 0x27, 0xf3, 0xa5, 0x54, 0xdc, 0x96, 0xcc, 0x64, 0x82, 0x15, 0x20, + 0x57, 0x4c, 0x2a, 0xc8, 0xc1, 0x32, 0xc1, 0x15, 0xb7, 0x09, 0xf0, 0x05, 0x35, 0x56, 0x83, 0x1e, + 0x9e, 0xb7, 0x12, 0xfd, 0x2b, 0x51, 0x93, 0x09, 0xda, 0x48, 0xb4, 0x95, 0x4e, 0xaf, 0x84, 0x84, + 0x65, 0x91, 0xd2, 0xb9, 0x5e, 0x33, 0xa1, 0x85, 0x66, 0xde, 0x4d, 0x8b, 0x27, 0x9f, 0x7c, 0xf0, + 0xb7, 0xf6, 0xcd, 0xb3, 0x57, 0x1c, 0x1c, 0x4d, 0x15, 0xdc, 0xd9, 0x19, 0x58, 0xa9, 0xc4, 0x30, + 0x0a, 0xfa, 0x50, 0x1a, 0x3e, 0xc2, 0x21, 0x8e, 0x7a, 0x93, 0x93, 0x6a, 0x3f, 0x46, 0x6e, 0x3f, + 0xee, 0xdf, 0x97, 0x86, 0x7f, 0x77, 0x67, 0xec, 0x89, 0xe1, 0x45, 0x30, 0x90, 0x0a, 0x1e, 0x92, + 0xd5, 0xe8, 0x5f, 0x88, 0xa3, 0xff, 0x93, 0xe3, 0x8e, 0x1d, 0x4c, 0x7d, 0x1b, 0x77, 0x6b, 0xc3, + 0xe5, 0x60, 0x1b, 0xae, 0x17, 0xe2, 0xe8, 0xf0, 0x97, 0x9b, 0xf9, 0x36, 0xee, 0xd6, 0x9b, 0x83, + 0xb7, 0xf7, 0x31, 0xda, 0x7c, 0x84, 0x68, 0x72, 0x59, 0xd5, 0x04, 0x6d, 0x6b, 0x82, 0x76, 0x35, + 0x41, 0x1b, 0x47, 0x70, 0xe5, 0x08, 0xde, 0x3a, 0x82, 0x77, 0x8e, 0xe0, 0x4f, 0x47, 0xf0, 0xcb, + 0x17, 0x41, 0x8f, 0x83, 0xf6, 0xc3, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x52, 0xa0, 0xb5, 0xc9, + 0x64, 0x01, 0x00, 0x00, +} + func (m *IntOrString) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -67,33 +110,44 @@ func (m *IntOrString) Marshal() (dAtA []byte, err error) { } func (m *IntOrString) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IntOrString) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Type)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.IntVal)) - dAtA[i] = 0x1a - i++ + i -= len(m.StrVal) + copy(dAtA[i:], m.StrVal) i = encodeVarintGenerated(dAtA, i, uint64(len(m.StrVal))) - i += copy(dAtA[i:], m.StrVal) - return i, nil + i-- + dAtA[i] = 0x1a + i = encodeVarintGenerated(dAtA, i, uint64(m.IntVal)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *IntOrString) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Type)) @@ -104,14 +158,7 @@ func (m *IntOrString) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -131,7 +178,7 @@ func (m *IntOrString) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -159,7 +206,7 @@ func (m *IntOrString) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Type |= (Type(b) & 0x7F) << shift + m.Type |= Type(b&0x7F) << shift if b < 0x80 { break } @@ -178,7 +225,7 @@ func (m *IntOrString) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.IntVal |= (int32(b) & 0x7F) << shift + m.IntVal |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -197,7 +244,7 @@ func (m *IntOrString) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -207,6 +254,9 @@ func (m *IntOrString) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -221,6 +271,9 @@ func (m *IntOrString) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -287,10 +340,13 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } return iNdEx, nil case 3: for { @@ -319,6 +375,9 @@ func skipGenerated(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } } return iNdEx, nil case 4: @@ -337,30 +396,3 @@ var ( ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 292 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x8f, 0x31, 0x4b, 0x33, 0x31, - 0x1c, 0xc6, 0x93, 0xb7, 0x7d, 0x8b, 0x9e, 0xe0, 0x50, 0x1c, 0x8a, 0x43, 0x7a, 0x28, 0xc8, 0x0d, - 0x9a, 0xac, 0xe2, 0xd8, 0xad, 0x20, 0x08, 0x57, 0x71, 0x70, 0xbb, 0x6b, 0x63, 0x1a, 0xae, 0x4d, - 0x42, 0xee, 0x7f, 0xc2, 0x6d, 0xfd, 0x08, 0xba, 0x39, 0xfa, 0x71, 0x6e, 0xec, 0xd8, 0x41, 0x8a, - 0x17, 0xbf, 0x85, 0x93, 0x5c, 0xee, 0x40, 0xa7, 0xe4, 0x79, 0x9e, 0xdf, 0x2f, 0x90, 0xe0, 0x36, - 0xbb, 0xce, 0xa9, 0xd4, 0x2c, 0x2b, 0x52, 0x6e, 0x15, 0x07, 0x9e, 0xb3, 0x67, 0xae, 0x16, 0xda, - 0xb2, 0x6e, 0x48, 0x8c, 0x5c, 0x27, 0xf3, 0xa5, 0x54, 0xdc, 0x96, 0xcc, 0x64, 0x82, 0x15, 0x20, - 0x57, 0x4c, 0x2a, 0xc8, 0xc1, 0x32, 0xc1, 0x15, 0xb7, 0x09, 0xf0, 0x05, 0x35, 0x56, 0x83, 0x1e, - 0x9e, 0xb7, 0x12, 0xfd, 0x2b, 0x51, 0x93, 0x09, 0xda, 0x48, 0xb4, 0x95, 0x4e, 0xaf, 0x84, 0x84, - 0x65, 0x91, 0xd2, 0xb9, 0x5e, 0x33, 0xa1, 0x85, 0x66, 0xde, 0x4d, 0x8b, 0x27, 0x9f, 0x7c, 0xf0, - 0xb7, 0xf6, 0xcd, 0xb3, 0x57, 0x1c, 0x1c, 0x4d, 0x15, 0xdc, 0xd9, 0x19, 0x58, 0xa9, 0xc4, 0x30, - 0x0a, 0xfa, 0x50, 0x1a, 0x3e, 0xc2, 0x21, 0x8e, 0x7a, 0x93, 0x93, 0x6a, 0x3f, 0x46, 0x6e, 0x3f, - 0xee, 0xdf, 0x97, 0x86, 0x7f, 0x77, 0x67, 0xec, 0x89, 0xe1, 0x45, 0x30, 0x90, 0x0a, 0x1e, 0x92, - 0xd5, 0xe8, 0x5f, 0x88, 0xa3, 0xff, 0x93, 0xe3, 0x8e, 0x1d, 0x4c, 0x7d, 0x1b, 0x77, 0x6b, 0xc3, - 0xe5, 0x60, 0x1b, 0xae, 0x17, 0xe2, 0xe8, 0xf0, 0x97, 0x9b, 0xf9, 0x36, 0xee, 0xd6, 0x9b, 0x83, - 0xb7, 0xf7, 0x31, 0xda, 0x7c, 0x84, 0x68, 0x72, 0x59, 0xd5, 0x04, 0x6d, 0x6b, 0x82, 0x76, 0x35, - 0x41, 0x1b, 0x47, 0x70, 0xe5, 0x08, 0xde, 0x3a, 0x82, 0x77, 0x8e, 0xe0, 0x4f, 0x47, 0xf0, 0xcb, - 0x17, 0x41, 0x8f, 0x83, 0xf6, 0xc3, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x52, 0xa0, 0xb5, 0xc9, - 0x64, 0x01, 0x00, 0x00, -} diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go index 5b26ed262631b..2df62955538f8 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go @@ -45,7 +45,7 @@ type IntOrString struct { } // Type represents the stored type of IntOrString. -type Type int +type Type int64 const ( Int Type = iota // The IntOrString holds an int. @@ -122,11 +122,11 @@ func (intstr IntOrString) MarshalJSON() ([]byte, error) { // the OpenAPI spec of this type. // // See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators -func (_ IntOrString) OpenAPISchemaType() []string { return []string{"string"} } +func (IntOrString) OpenAPISchemaType() []string { return []string{"string"} } // OpenAPISchemaFormat is used by the kube-openapi generator when constructing // the OpenAPI spec of this type. -func (_ IntOrString) OpenAPISchemaFormat() string { return "int-or-string" } +func (IntOrString) OpenAPISchemaFormat() string { return "int-or-string" } func (intstr *IntOrString) Fuzz(c fuzz.Continue) { if intstr == nil { diff --git a/vendor/k8s.io/apimachinery/pkg/util/json/json.go b/vendor/k8s.io/apimachinery/pkg/util/json/json.go index 10c8cb837ed5f..0e2e301754763 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/json/json.go +++ b/vendor/k8s.io/apimachinery/pkg/util/json/json.go @@ -19,6 +19,7 @@ package json import ( "bytes" "encoding/json" + "fmt" "io" ) @@ -34,6 +35,9 @@ func Marshal(v interface{}) ([]byte, error) { return json.Marshal(v) } +// limit recursive depth to prevent stack overflow errors +const maxDepth = 10000 + // Unmarshal unmarshals the given data // If v is a *map[string]interface{}, numbers are converted to int64 or float64 func Unmarshal(data []byte, v interface{}) error { @@ -48,7 +52,7 @@ func Unmarshal(data []byte, v interface{}) error { return err } // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 - return convertMapNumbers(*v) + return convertMapNumbers(*v, 0) case *[]interface{}: // Build a decoder from the given data @@ -60,7 +64,7 @@ func Unmarshal(data []byte, v interface{}) error { return err } // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 - return convertSliceNumbers(*v) + return convertSliceNumbers(*v, 0) default: return json.Unmarshal(data, v) @@ -69,16 +73,20 @@ func Unmarshal(data []byte, v interface{}) error { // convertMapNumbers traverses the map, converting any json.Number values to int64 or float64. // values which are map[string]interface{} or []interface{} are recursively visited -func convertMapNumbers(m map[string]interface{}) error { +func convertMapNumbers(m map[string]interface{}, depth int) error { + if depth > maxDepth { + return fmt.Errorf("exceeded max depth of %d", maxDepth) + } + var err error for k, v := range m { switch v := v.(type) { case json.Number: m[k], err = convertNumber(v) case map[string]interface{}: - err = convertMapNumbers(v) + err = convertMapNumbers(v, depth+1) case []interface{}: - err = convertSliceNumbers(v) + err = convertSliceNumbers(v, depth+1) } if err != nil { return err @@ -89,16 +97,20 @@ func convertMapNumbers(m map[string]interface{}) error { // convertSliceNumbers traverses the slice, converting any json.Number values to int64 or float64. // values which are map[string]interface{} or []interface{} are recursively visited -func convertSliceNumbers(s []interface{}) error { +func convertSliceNumbers(s []interface{}, depth int) error { + if depth > maxDepth { + return fmt.Errorf("exceeded max depth of %d", maxDepth) + } + var err error for i, v := range s { switch v := v.(type) { case json.Number: s[i], err = convertNumber(v) case map[string]interface{}: - err = convertMapNumbers(v) + err = convertMapNumbers(v, depth+1) case []interface{}: - err = convertSliceNumbers(v) + err = convertSliceNumbers(v, depth+1) } if err != nil { return err diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/http.go b/vendor/k8s.io/apimachinery/pkg/util/net/http.go index 078f00d9b979d..f9540c63bb25b 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/net/http.go +++ b/vendor/k8s.io/apimachinery/pkg/util/net/http.go @@ -101,6 +101,9 @@ func SetOldTransportDefaults(t *http.Transport) *http.Transport { if t.TLSHandshakeTimeout == 0 { t.TLSHandshakeTimeout = defaultTransport.TLSHandshakeTimeout } + if t.IdleConnTimeout == 0 { + t.IdleConnTimeout = defaultTransport.IdleConnTimeout + } return t } @@ -111,7 +114,7 @@ func SetTransportDefaults(t *http.Transport) *http.Transport { // Allow clients to disable http2 if needed. if s := os.Getenv("DISABLE_HTTP2"); len(s) > 0 { klog.Infof("HTTP2 has been explicitly disabled") - } else { + } else if allowsHTTP2(t) { if err := http2.ConfigureTransport(t); err != nil { klog.Warningf("Transport failed http2 configuration: %v", err) } @@ -119,6 +122,21 @@ func SetTransportDefaults(t *http.Transport) *http.Transport { return t } +func allowsHTTP2(t *http.Transport) bool { + if t.TLSClientConfig == nil || len(t.TLSClientConfig.NextProtos) == 0 { + // the transport expressed no NextProto preference, allow + return true + } + for _, p := range t.TLSClientConfig.NextProtos { + if p == http2.NextProtoTLS { + // the transport explicitly allowed http/2 + return true + } + } + // the transport explicitly set NextProtos and excluded http/2 + return false +} + type RoundTripperWrapper interface { http.RoundTripper WrappedRoundTripper() http.RoundTripper diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/util.go b/vendor/k8s.io/apimachinery/pkg/util/net/util.go index 8344d10c83ae9..2e7cb9499465e 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/net/util.go +++ b/vendor/k8s.io/apimachinery/pkg/util/net/util.go @@ -54,3 +54,20 @@ func IsConnectionReset(err error) bool { } return false } + +// Returns if the given err is "connection refused" error +func IsConnectionRefused(err error) bool { + if urlErr, ok := err.(*url.Error); ok { + err = urlErr.Err + } + if opErr, ok := err.(*net.OpError); ok { + err = opErr.Err + } + if osErr, ok := err.(*os.SyscallError); ok { + err = osErr.Err + } + if errno, ok := err.(syscall.Errno); ok && errno == syscall.ECONNREFUSED { + return true + } + return false +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go index 3c886f46c3f19..1428443f54491 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go +++ b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go @@ -18,6 +18,7 @@ package runtime import ( "fmt" + "net/http" "runtime" "sync" "time" @@ -40,11 +41,7 @@ var PanicHandlers = []func(interface{}){logPanic} // called in case of panic. HandleCrash actually crashes, after calling the // handlers and logging the panic message. // -// TODO: remove this function. We are switching to a world where it's safe for -// apiserver to panic, since it will be restarted by kubelet. At the beginning -// of the Kubernetes project, nothing was going to restart apiserver and so -// catching panics was important. But it's actually much simpler for monitoring -// software if we just exit when an unexpected panic happens. +// E.g., you can provide one or more additional handlers for something like shutting down go routines gracefully. func HandleCrash(additionalHandlers ...func(interface{})) { if r := recover(); r != nil { for _, fn := range PanicHandlers { @@ -60,8 +57,16 @@ func HandleCrash(additionalHandlers ...func(interface{})) { } } -// logPanic logs the caller tree when a panic occurs. +// logPanic logs the caller tree when a panic occurs (except in the special case of http.ErrAbortHandler). func logPanic(r interface{}) { + if r == http.ErrAbortHandler { + // honor the http.ErrAbortHandler sentinel panic value: + // ErrAbortHandler is a sentinel panic value to abort a handler. + // While any panic from ServeHTTP aborts the response to the client, + // panicking with ErrAbortHandler also suppresses logging of a stack trace to the server's error log. + return + } + // Same as stdlib http server code. Manually allocate stack trace buffer size // to prevent excessively large logs const size = 64 << 10 diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go b/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go index 766f4501e0f28..9bfa85d43d411 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go @@ -46,17 +46,19 @@ func ByteKeySet(theMap interface{}) Byte { } // Insert adds items to the set. -func (s Byte) Insert(items ...byte) { +func (s Byte) Insert(items ...byte) Byte { for _, item := range items { s[item] = Empty{} } + return s } // Delete removes all items from the set. -func (s Byte) Delete(items ...byte) { +func (s Byte) Delete(items ...byte) Byte { for _, item := range items { delete(s, item) } + return s } // Has returns true if and only if item is contained in the set. diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/int.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int.go index a0a513cd9b511..88bd7096791e6 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/int.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/int.go @@ -46,17 +46,19 @@ func IntKeySet(theMap interface{}) Int { } // Insert adds items to the set. -func (s Int) Insert(items ...int) { +func (s Int) Insert(items ...int) Int { for _, item := range items { s[item] = Empty{} } + return s } // Delete removes all items from the set. -func (s Int) Delete(items ...int) { +func (s Int) Delete(items ...int) Int { for _, item := range items { delete(s, item) } + return s } // Has returns true if and only if item is contained in the set. diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/int32.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int32.go index 584eabc8b7618..96a48555426f3 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/int32.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/int32.go @@ -46,17 +46,19 @@ func Int32KeySet(theMap interface{}) Int32 { } // Insert adds items to the set. -func (s Int32) Insert(items ...int32) { +func (s Int32) Insert(items ...int32) Int32 { for _, item := range items { s[item] = Empty{} } + return s } // Delete removes all items from the set. -func (s Int32) Delete(items ...int32) { +func (s Int32) Delete(items ...int32) Int32 { for _, item := range items { delete(s, item) } + return s } // Has returns true if and only if item is contained in the set. diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go index 9ca9af0c59187..b375a1b065c9c 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go @@ -46,17 +46,19 @@ func Int64KeySet(theMap interface{}) Int64 { } // Insert adds items to the set. -func (s Int64) Insert(items ...int64) { +func (s Int64) Insert(items ...int64) Int64 { for _, item := range items { s[item] = Empty{} } + return s } // Delete removes all items from the set. -func (s Int64) Delete(items ...int64) { +func (s Int64) Delete(items ...int64) Int64 { for _, item := range items { delete(s, item) } + return s } // Has returns true if and only if item is contained in the set. diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/string.go b/vendor/k8s.io/apimachinery/pkg/util/sets/string.go index ba00ad7df4e7e..e6f37db8874b9 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/string.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/string.go @@ -46,17 +46,19 @@ func StringKeySet(theMap interface{}) String { } // Insert adds items to the set. -func (s String) Insert(items ...string) { +func (s String) Insert(items ...string) String { for _, item := range items { s[item] = Empty{} } + return s } // Delete removes all items from the set. -func (s String) Delete(items ...string) { +func (s String) Delete(items ...string) String { for _, item := range items { delete(s, item) } + return s } // Has returns true if and only if item is contained in the set. diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go index 4767fd1dda104..1d372a5259cbc 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go @@ -116,6 +116,10 @@ const ( // This is similar to ErrorTypeInvalid, but the error will not include the // too-long value. See TooLong(). ErrorTypeTooLong ErrorType = "FieldValueTooLong" + // ErrorTypeTooMany is used to report "too many". This is used to + // report that a given list has too many items. This is similar to FieldValueTooLong, + // but the error indicates quantity instead of length. + ErrorTypeTooMany ErrorType = "FieldValueTooMany" // ErrorTypeInternal is used to report other errors that are not related // to user input. See InternalError(). ErrorTypeInternal ErrorType = "InternalError" @@ -138,6 +142,8 @@ func (t ErrorType) String() string { return "Forbidden" case ErrorTypeTooLong: return "Too long" + case ErrorTypeTooMany: + return "Too many" case ErrorTypeInternal: return "Internal error" default: @@ -201,6 +207,13 @@ func TooLong(field *Path, value interface{}, maxLength int) *Error { return &Error{ErrorTypeTooLong, field.String(), value, fmt.Sprintf("must have at most %d characters", maxLength)} } +// TooMany returns a *Error indicating "too many". This is used to +// report that a given list has too many items. This is similar to TooLong, +// but the returned error indicates quantity instead of length. +func TooMany(field *Path, actualQuantity, maxQuantity int) *Error { + return &Error{ErrorTypeTooMany, field.String(), actualQuantity, fmt.Sprintf("must have at most %d items", maxQuantity)} +} + // InternalError returns a *Error indicating "internal error". This is used // to signal that an error was found that was not directly related to user // input. The err argument must be non-nil. diff --git a/vendor/k8s.io/apimachinery/pkg/util/version/version.go b/vendor/k8s.io/apimachinery/pkg/util/version/version.go index 9bc928a4e824a..8946926aac3ed 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/version/version.go +++ b/vendor/k8s.io/apimachinery/pkg/util/version/version.go @@ -183,6 +183,14 @@ func (v *Version) WithPreRelease(preRelease string) *Version { return &result } +// WithBuildMetadata returns copy of the version object with requested buildMetadata +func (v *Version) WithBuildMetadata(buildMetadata string) *Version { + result := *v + result.components = []uint{v.Major(), v.Minor(), v.Patch()} + result.buildMetadata = buildMetadata + return &result +} + // String converts a Version back to a string; note that for versions parsed with // ParseGeneric, this will not include the trailing uninterpreted portion of the version // number. diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go index bc6b18d2b4630..386c3e7ea0252 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go @@ -207,23 +207,31 @@ type ConditionFunc func() (done bool, err error) type Backoff struct { // The initial duration. Duration time.Duration - // Duration is multiplied by factor each iteration. Must be greater - // than or equal to zero. + // Duration is multiplied by factor each iteration, if factor is not zero + // and the limits imposed by Steps and Cap have not been reached. + // Should not be negative. + // The jitter does not contribute to the updates to the duration parameter. Factor float64 - // The amount of jitter applied each iteration. Jitter is applied after - // cap. + // The sleep at each iteration is the duration plus an additional + // amount chosen uniformly at random from the interval between + // zero and `jitter*duration`. Jitter float64 - // The number of steps before duration stops changing. If zero, initial - // duration is always used. Used for exponential backoff in combination - // with Factor. + // The remaining number of iterations in which the duration + // parameter may change (but progress can be stopped earlier by + // hitting the cap). If not positive, the duration is not + // changed. Used for exponential backoff in combination with + // Factor and Cap. Steps int - // The returned duration will never be greater than cap *before* jitter - // is applied. The actual maximum cap is `cap * (1.0 + jitter)`. + // A limit on revised values of the duration parameter. If a + // multiplication by the factor parameter would make the duration + // exceed the cap then the duration is set to the cap and the + // steps parameter is set to zero. Cap time.Duration } -// Step returns the next interval in the exponential backoff. This method -// will mutate the provided backoff. +// Step (1) returns an amount of time to sleep determined by the +// original Duration and Jitter and (2) mutates the provided Backoff +// to update its Steps and Duration. func (b *Backoff) Step() time.Duration { if b.Steps < 1 { if b.Jitter > 0 { @@ -271,14 +279,14 @@ func contextForChannel(parentCh <-chan struct{}) (context.Context, context.Cance // ExponentialBackoff repeats a condition check with exponential backoff. // -// It checks the condition up to Steps times, increasing the wait by multiplying -// the previous duration by Factor. -// -// If Jitter is greater than zero, a random amount of each duration is added -// (between duration and duration*(1+jitter)). -// -// If the condition never returns true, ErrWaitTimeout is returned. All other -// errors terminate immediately. +// It repeatedly checks the condition and then sleeps, using `backoff.Step()` +// to determine the length of the sleep and adjust Duration and Steps. +// Stops and returns as soon as: +// 1. the condition check returns true or an error, +// 2. `backoff.Steps` checks of the condition have been done, or +// 3. a sleep truncated by the cap on duration has been completed. +// In case (1) the returned error is what the condition function returned. +// In all other cases, ErrWaitTimeout is returned. func ExponentialBackoff(backoff Backoff, condition ConditionFunc) error { for backoff.Steps > 0 { if ok, err := condition(); err != nil || ok { diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/builder_flags.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/builder_flags.go index 8db95f927c984..f695fb5f9bd91 100644 --- a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/builder_flags.go +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/builder_flags.go @@ -130,6 +130,10 @@ func (o *ResourceBuilderFlags) ToBuilder(restClientGetter RESTClientGetter, reso builder := resource.NewBuilder(restClientGetter). NamespaceParam(namespace).DefaultNamespace() + if o.AllNamespaces != nil { + builder.AllNamespaces(*o.AllNamespaces) + } + if o.Scheme != nil { builder.WithScheme(o.Scheme, o.Scheme.PrioritizedVersionsAllGroups()...) } else { diff --git a/vendor/k8s.io/cli-runtime/pkg/printers/BUILD.bazel b/vendor/k8s.io/cli-runtime/pkg/printers/BUILD.bazel index 57c5dd09b9b46..db5a5c4222989 100644 --- a/vendor/k8s.io/cli-runtime/pkg/printers/BUILD.bazel +++ b/vendor/k8s.io/cli-runtime/pkg/printers/BUILD.bazel @@ -18,6 +18,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", diff --git a/vendor/k8s.io/cli-runtime/pkg/printers/json.go b/vendor/k8s.io/cli-runtime/pkg/printers/json.go index bb5bec7488cdc..1c35b97d7358c 100644 --- a/vendor/k8s.io/cli-runtime/pkg/printers/json.go +++ b/vendor/k8s.io/cli-runtime/pkg/printers/json.go @@ -22,7 +22,9 @@ import ( "fmt" "io" "reflect" + "sync/atomic" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/yaml" @@ -41,6 +43,20 @@ func (p *JSONPrinter) PrintObj(obj runtime.Object, w io.Writer) error { } switch obj := obj.(type) { + case *metav1.WatchEvent: + if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj.Object.Object)).Type().PkgPath()) { + return fmt.Errorf(InternalObjectPrinterErr) + } + data, err := json.Marshal(obj) + if err != nil { + return err + } + _, err = w.Write(data) + if err != nil { + return err + } + _, err = w.Write([]byte{'\n'}) + return err case *runtime.Unknown: var buf bytes.Buffer err := json.Indent(&buf, obj.Raw, "", " ") @@ -68,7 +84,10 @@ func (p *JSONPrinter) PrintObj(obj runtime.Object, w io.Writer) error { // YAMLPrinter is an implementation of ResourcePrinter which outputs an object as YAML. // The input object is assumed to be in the internal version of an API and is converted // to the given version first. -type YAMLPrinter struct{} +// If PrintObj() is called multiple times, objects are separated with a '---' separator. +type YAMLPrinter struct { + printCount int64 +} // PrintObj prints the data as YAML. func (p *YAMLPrinter) PrintObj(obj runtime.Object, w io.Writer) error { @@ -79,7 +98,28 @@ func (p *YAMLPrinter) PrintObj(obj runtime.Object, w io.Writer) error { return fmt.Errorf(InternalObjectPrinterErr) } + count := atomic.AddInt64(&p.printCount, 1) + if count > 1 { + if _, err := w.Write([]byte("---\n")); err != nil { + return err + } + } + switch obj := obj.(type) { + case *metav1.WatchEvent: + if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj.Object.Object)).Type().PkgPath()) { + return fmt.Errorf(InternalObjectPrinterErr) + } + data, err := json.Marshal(obj) + if err != nil { + return err + } + data, err = yaml.JSONToYAML(data) + if err != nil { + return err + } + _, err = w.Write(data) + return err case *runtime.Unknown: data, err := yaml.JSONToYAML(obj.Raw) if err != nil { diff --git a/vendor/k8s.io/cli-runtime/pkg/printers/name.go b/vendor/k8s.io/cli-runtime/pkg/printers/name.go index d04c5c6bbc79f..086166af27204 100644 --- a/vendor/k8s.io/cli-runtime/pkg/printers/name.go +++ b/vendor/k8s.io/cli-runtime/pkg/printers/name.go @@ -23,6 +23,7 @@ import ( "strings" "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -42,6 +43,11 @@ type NamePrinter struct { // PrintObj is an implementation of ResourcePrinter.PrintObj which decodes the object // and print "resource/name" pair. If the object is a List, print all items in it. func (p *NamePrinter) PrintObj(obj runtime.Object, w io.Writer) error { + switch castObj := obj.(type) { + case *metav1.WatchEvent: + obj = castObj.Object.Object + } + // we use reflect.Indirect here in order to obtain the actual value from a pointer. // using reflect.Indirect indiscriminately is valid here, as all runtime.Objects are supposed to be pointers. // we need an actual value in order to retrieve the package path for an object. diff --git a/vendor/k8s.io/cli-runtime/pkg/resource/visitor.go b/vendor/k8s.io/cli-runtime/pkg/resource/visitor.go index 99a2f1f753790..946016c134921 100644 --- a/vendor/k8s.io/cli-runtime/pkg/resource/visitor.go +++ b/vendor/k8s.io/cli-runtime/pkg/resource/visitor.go @@ -372,10 +372,9 @@ func (v ContinueOnErrorVisitor) Visit(fn VisitorFunc) error { // FlattenListVisitor flattens any objects that runtime.ExtractList recognizes as a list // - has an "Items" public field that is a slice of runtime.Objects or objects satisfying -// that interface - into multiple Infos. An error on any sub item (for instance, if a List -// contains an object that does not have a registered client or resource) will terminate -// the visit. -// TODO: allow errors to be aggregated? +// that interface - into multiple Infos. Returns nil in the case of no errors. +// When an error is hit on sub items (for instance, if a List contains an object that does +// not have a registered client or resource), returns an aggregate error. type FlattenListVisitor struct { visitor Visitor typer runtime.ObjectTyper @@ -425,20 +424,22 @@ func (v FlattenListVisitor) Visit(fn VisitorFunc) error { if info.Mapping != nil && !info.Mapping.GroupVersionKind.Empty() { preferredGVKs = append(preferredGVKs, info.Mapping.GroupVersionKind) } - + errs := []error{} for i := range items { item, err := v.mapper.infoForObject(items[i], v.typer, preferredGVKs) if err != nil { - return err + errs = append(errs, err) + continue } if len(info.ResourceVersion) != 0 { item.ResourceVersion = info.ResourceVersion } if err := fn(item, nil); err != nil { - return err + errs = append(errs, err) } } - return nil + return utilerrors.NewAggregate(errs) + }) } diff --git a/vendor/k8s.io/client-go/discovery/doc.go b/vendor/k8s.io/client-go/discovery/doc.go index 76495588ef7d7..6baa1ef2a51ca 100644 --- a/vendor/k8s.io/client-go/discovery/doc.go +++ b/vendor/k8s.io/client-go/discovery/doc.go @@ -16,4 +16,4 @@ limitations under the License. // Package discovery provides ways to discover server-supported // API groups, versions and resources. -package discovery +package discovery // import "k8s.io/client-go/discovery" diff --git a/vendor/k8s.io/client-go/informers/BUILD.bazel b/vendor/k8s.io/client-go/informers/BUILD.bazel index 7abfb695ff03c..521cd6c00a14a 100644 --- a/vendor/k8s.io/client-go/informers/BUILD.bazel +++ b/vendor/k8s.io/client-go/informers/BUILD.bazel @@ -10,6 +10,7 @@ go_library( importpath = "k8s.io/client-go/informers", visibility = ["//visibility:public"], deps = [ + "//vendor/k8s.io/api/admissionregistration/v1:go_default_library", "//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library", "//vendor/k8s.io/api/apps/v1:go_default_library", "//vendor/k8s.io/api/apps/v1beta1:go_default_library", @@ -25,6 +26,7 @@ go_library( "//vendor/k8s.io/api/coordination/v1:go_default_library", "//vendor/k8s.io/api/coordination/v1beta1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/api/discovery/v1alpha1:go_default_library", "//vendor/k8s.io/api/events/v1beta1:go_default_library", "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/api/networking/v1:go_default_library", @@ -53,6 +55,7 @@ go_library( "//vendor/k8s.io/client-go/informers/certificates:go_default_library", "//vendor/k8s.io/client-go/informers/coordination:go_default_library", "//vendor/k8s.io/client-go/informers/core:go_default_library", + "//vendor/k8s.io/client-go/informers/discovery:go_default_library", "//vendor/k8s.io/client-go/informers/events:go_default_library", "//vendor/k8s.io/client-go/informers/extensions:go_default_library", "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/BUILD.bazel b/vendor/k8s.io/client-go/informers/admissionregistration/BUILD.bazel index 9fe5ab5dfaa1b..9bb51b49f85f0 100644 --- a/vendor/k8s.io/client-go/informers/admissionregistration/BUILD.bazel +++ b/vendor/k8s.io/client-go/informers/admissionregistration/BUILD.bazel @@ -7,6 +7,7 @@ go_library( importpath = "k8s.io/client-go/informers/admissionregistration", visibility = ["//visibility:public"], deps = [ + "//vendor/k8s.io/client-go/informers/admissionregistration/v1:go_default_library", "//vendor/k8s.io/client-go/informers/admissionregistration/v1beta1:go_default_library", "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", ], diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/interface.go b/vendor/k8s.io/client-go/informers/admissionregistration/interface.go index f56fe31e24b21..14a6db438d5ad 100644 --- a/vendor/k8s.io/client-go/informers/admissionregistration/interface.go +++ b/vendor/k8s.io/client-go/informers/admissionregistration/interface.go @@ -19,12 +19,15 @@ limitations under the License. package admissionregistration import ( + v1 "k8s.io/client-go/informers/admissionregistration/v1" v1beta1 "k8s.io/client-go/informers/admissionregistration/v1beta1" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" ) // Interface provides access to each of this group's versions. type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface // V1beta1 provides access to shared informers for resources in V1beta1. V1beta1() v1beta1.Interface } @@ -40,6 +43,11 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} + // V1beta1 returns a new v1beta1.Interface. func (g *group) V1beta1() v1beta1.Interface { return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1/BUILD.bazel b/vendor/k8s.io/client-go/informers/admissionregistration/v1/BUILD.bazel new file mode 100644 index 0000000000000..258ba4a3ee56f --- /dev/null +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1/BUILD.bazel @@ -0,0 +1,23 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "interface.go", + "mutatingwebhookconfiguration.go", + "validatingwebhookconfiguration.go", + ], + importmap = "k8s.io/kops/vendor/k8s.io/client-go/informers/admissionregistration/v1", + importpath = "k8s.io/client-go/informers/admissionregistration/v1", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/api/admissionregistration/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", + "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", + "//vendor/k8s.io/client-go/kubernetes:go_default_library", + "//vendor/k8s.io/client-go/listers/admissionregistration/v1:go_default_library", + "//vendor/k8s.io/client-go/tools/cache:go_default_library", + ], +) diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1/interface.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1/interface.go new file mode 100644 index 0000000000000..1ecae9ecf70ab --- /dev/null +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1/interface.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // MutatingWebhookConfigurations returns a MutatingWebhookConfigurationInformer. + MutatingWebhookConfigurations() MutatingWebhookConfigurationInformer + // ValidatingWebhookConfigurations returns a ValidatingWebhookConfigurationInformer. + ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// MutatingWebhookConfigurations returns a MutatingWebhookConfigurationInformer. +func (v *version) MutatingWebhookConfigurations() MutatingWebhookConfigurationInformer { + return &mutatingWebhookConfigurationInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// ValidatingWebhookConfigurations returns a ValidatingWebhookConfigurationInformer. +func (v *version) ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInformer { + return &validatingWebhookConfigurationInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1/mutatingwebhookconfiguration.go new file mode 100644 index 0000000000000..4fadd9a216b67 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1/mutatingwebhookconfiguration.go @@ -0,0 +1,88 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/admissionregistration/v1" + cache "k8s.io/client-go/tools/cache" +) + +// MutatingWebhookConfigurationInformer provides access to a shared informer and lister for +// MutatingWebhookConfigurations. +type MutatingWebhookConfigurationInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.MutatingWebhookConfigurationLister +} + +type mutatingWebhookConfigurationInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewMutatingWebhookConfigurationInformer constructs a new informer for MutatingWebhookConfiguration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewMutatingWebhookConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredMutatingWebhookConfigurationInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredMutatingWebhookConfigurationInformer constructs a new informer for MutatingWebhookConfiguration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredMutatingWebhookConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AdmissionregistrationV1().MutatingWebhookConfigurations().List(options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AdmissionregistrationV1().MutatingWebhookConfigurations().Watch(options) + }, + }, + &admissionregistrationv1.MutatingWebhookConfiguration{}, + resyncPeriod, + indexers, + ) +} + +func (f *mutatingWebhookConfigurationInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredMutatingWebhookConfigurationInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *mutatingWebhookConfigurationInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&admissionregistrationv1.MutatingWebhookConfiguration{}, f.defaultInformer) +} + +func (f *mutatingWebhookConfigurationInformer) Lister() v1.MutatingWebhookConfigurationLister { + return v1.NewMutatingWebhookConfigurationLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingwebhookconfiguration.go new file mode 100644 index 0000000000000..1c648e6081a0b --- /dev/null +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingwebhookconfiguration.go @@ -0,0 +1,88 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/admissionregistration/v1" + cache "k8s.io/client-go/tools/cache" +) + +// ValidatingWebhookConfigurationInformer provides access to a shared informer and lister for +// ValidatingWebhookConfigurations. +type ValidatingWebhookConfigurationInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ValidatingWebhookConfigurationLister +} + +type validatingWebhookConfigurationInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewValidatingWebhookConfigurationInformer constructs a new informer for ValidatingWebhookConfiguration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewValidatingWebhookConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredValidatingWebhookConfigurationInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredValidatingWebhookConfigurationInformer constructs a new informer for ValidatingWebhookConfiguration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredValidatingWebhookConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AdmissionregistrationV1().ValidatingWebhookConfigurations().List(options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Watch(options) + }, + }, + &admissionregistrationv1.ValidatingWebhookConfiguration{}, + resyncPeriod, + indexers, + ) +} + +func (f *validatingWebhookConfigurationInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredValidatingWebhookConfigurationInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *validatingWebhookConfigurationInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&admissionregistrationv1.ValidatingWebhookConfiguration{}, f.defaultInformer) +} + +func (f *validatingWebhookConfigurationInformer) Lister() v1.ValidatingWebhookConfigurationLister { + return v1.NewValidatingWebhookConfigurationLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/discovery/BUILD.bazel b/vendor/k8s.io/client-go/informers/discovery/BUILD.bazel new file mode 100644 index 0000000000000..535bec72cccf5 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/discovery/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["interface.go"], + importmap = "k8s.io/kops/vendor/k8s.io/client-go/informers/discovery", + importpath = "k8s.io/client-go/informers/discovery", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/client-go/informers/discovery/v1alpha1:go_default_library", + "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", + ], +) diff --git a/vendor/k8s.io/client-go/informers/discovery/interface.go b/vendor/k8s.io/client-go/informers/discovery/interface.go new file mode 100644 index 0000000000000..eb8062feec7ea --- /dev/null +++ b/vendor/k8s.io/client-go/informers/discovery/interface.go @@ -0,0 +1,46 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package discovery + +import ( + v1alpha1 "k8s.io/client-go/informers/discovery/v1alpha1" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1alpha1 provides access to shared informers for resources in V1alpha1. + V1alpha1() v1alpha1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1alpha1 returns a new v1alpha1.Interface. +func (g *group) V1alpha1() v1alpha1.Interface { + return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/k8s.io/client-go/informers/discovery/v1alpha1/BUILD.bazel b/vendor/k8s.io/client-go/informers/discovery/v1alpha1/BUILD.bazel new file mode 100644 index 0000000000000..6d5780e41a768 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/discovery/v1alpha1/BUILD.bazel @@ -0,0 +1,22 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "endpointslice.go", + "interface.go", + ], + importmap = "k8s.io/kops/vendor/k8s.io/client-go/informers/discovery/v1alpha1", + importpath = "k8s.io/client-go/informers/discovery/v1alpha1", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/api/discovery/v1alpha1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", + "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", + "//vendor/k8s.io/client-go/kubernetes:go_default_library", + "//vendor/k8s.io/client-go/listers/discovery/v1alpha1:go_default_library", + "//vendor/k8s.io/client-go/tools/cache:go_default_library", + ], +) diff --git a/vendor/k8s.io/client-go/informers/discovery/v1alpha1/endpointslice.go b/vendor/k8s.io/client-go/informers/discovery/v1alpha1/endpointslice.go new file mode 100644 index 0000000000000..a545ce155147d --- /dev/null +++ b/vendor/k8s.io/client-go/informers/discovery/v1alpha1/endpointslice.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + time "time" + + discoveryv1alpha1 "k8s.io/api/discovery/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1alpha1 "k8s.io/client-go/listers/discovery/v1alpha1" + cache "k8s.io/client-go/tools/cache" +) + +// EndpointSliceInformer provides access to a shared informer and lister for +// EndpointSlices. +type EndpointSliceInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.EndpointSliceLister +} + +type endpointSliceInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewEndpointSliceInformer constructs a new informer for EndpointSlice type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewEndpointSliceInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredEndpointSliceInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredEndpointSliceInformer constructs a new informer for EndpointSlice type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredEndpointSliceInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.DiscoveryV1alpha1().EndpointSlices(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.DiscoveryV1alpha1().EndpointSlices(namespace).Watch(options) + }, + }, + &discoveryv1alpha1.EndpointSlice{}, + resyncPeriod, + indexers, + ) +} + +func (f *endpointSliceInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredEndpointSliceInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *endpointSliceInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&discoveryv1alpha1.EndpointSlice{}, f.defaultInformer) +} + +func (f *endpointSliceInformer) Lister() v1alpha1.EndpointSliceLister { + return v1alpha1.NewEndpointSliceLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/discovery/v1alpha1/interface.go b/vendor/k8s.io/client-go/informers/discovery/v1alpha1/interface.go new file mode 100644 index 0000000000000..711dcae52cca4 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/discovery/v1alpha1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // EndpointSlices returns a EndpointSliceInformer. + EndpointSlices() EndpointSliceInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// EndpointSlices returns a EndpointSliceInformer. +func (v *version) EndpointSlices() EndpointSliceInformer { + return &endpointSliceInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/factory.go b/vendor/k8s.io/client-go/informers/factory.go index b3a043009a18d..c5230a491d33a 100644 --- a/vendor/k8s.io/client-go/informers/factory.go +++ b/vendor/k8s.io/client-go/informers/factory.go @@ -34,6 +34,7 @@ import ( certificates "k8s.io/client-go/informers/certificates" coordination "k8s.io/client-go/informers/coordination" core "k8s.io/client-go/informers/core" + discovery "k8s.io/client-go/informers/discovery" events "k8s.io/client-go/informers/events" extensions "k8s.io/client-go/informers/extensions" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" @@ -196,6 +197,7 @@ type SharedInformerFactory interface { Certificates() certificates.Interface Coordination() coordination.Interface Core() core.Interface + Discovery() discovery.Interface Events() events.Interface Extensions() extensions.Interface Networking() networking.Interface @@ -239,6 +241,10 @@ func (f *sharedInformerFactory) Core() core.Interface { return core.New(f, f.namespace, f.tweakListOptions) } +func (f *sharedInformerFactory) Discovery() discovery.Interface { + return discovery.New(f, f.namespace, f.tweakListOptions) +} + func (f *sharedInformerFactory) Events() events.Interface { return events.New(f, f.namespace, f.tweakListOptions) } diff --git a/vendor/k8s.io/client-go/informers/generic.go b/vendor/k8s.io/client-go/informers/generic.go index 8b986a963df32..58f39460c2665 100644 --- a/vendor/k8s.io/client-go/informers/generic.go +++ b/vendor/k8s.io/client-go/informers/generic.go @@ -21,8 +21,9 @@ package informers import ( "fmt" + v1 "k8s.io/api/admissionregistration/v1" v1beta1 "k8s.io/api/admissionregistration/v1beta1" - v1 "k8s.io/api/apps/v1" + appsv1 "k8s.io/api/apps/v1" appsv1beta1 "k8s.io/api/apps/v1beta1" v1beta2 "k8s.io/api/apps/v1beta2" v1alpha1 "k8s.io/api/auditregistration/v1alpha1" @@ -36,6 +37,7 @@ import ( coordinationv1 "k8s.io/api/coordination/v1" coordinationv1beta1 "k8s.io/api/coordination/v1beta1" corev1 "k8s.io/api/core/v1" + discoveryv1alpha1 "k8s.io/api/discovery/v1alpha1" eventsv1beta1 "k8s.io/api/events/v1beta1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" networkingv1 "k8s.io/api/networking/v1" @@ -83,22 +85,28 @@ func (f *genericInformer) Lister() cache.GenericLister { // TODO extend this to unknown resources with a client pool func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { switch resource { - // Group=admissionregistration.k8s.io, Version=v1beta1 + // Group=admissionregistration.k8s.io, Version=v1 + case v1.SchemeGroupVersion.WithResource("mutatingwebhookconfigurations"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1().MutatingWebhookConfigurations().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("validatingwebhookconfigurations"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1().ValidatingWebhookConfigurations().Informer()}, nil + + // Group=admissionregistration.k8s.io, Version=v1beta1 case v1beta1.SchemeGroupVersion.WithResource("mutatingwebhookconfigurations"): return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1beta1().MutatingWebhookConfigurations().Informer()}, nil case v1beta1.SchemeGroupVersion.WithResource("validatingwebhookconfigurations"): return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1beta1().ValidatingWebhookConfigurations().Informer()}, nil // Group=apps, Version=v1 - case v1.SchemeGroupVersion.WithResource("controllerrevisions"): + case appsv1.SchemeGroupVersion.WithResource("controllerrevisions"): return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1().ControllerRevisions().Informer()}, nil - case v1.SchemeGroupVersion.WithResource("daemonsets"): + case appsv1.SchemeGroupVersion.WithResource("daemonsets"): return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1().DaemonSets().Informer()}, nil - case v1.SchemeGroupVersion.WithResource("deployments"): + case appsv1.SchemeGroupVersion.WithResource("deployments"): return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1().Deployments().Informer()}, nil - case v1.SchemeGroupVersion.WithResource("replicasets"): + case appsv1.SchemeGroupVersion.WithResource("replicasets"): return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1().ReplicaSets().Informer()}, nil - case v1.SchemeGroupVersion.WithResource("statefulsets"): + case appsv1.SchemeGroupVersion.WithResource("statefulsets"): return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1().StatefulSets().Informer()}, nil // Group=apps, Version=v1beta1 @@ -195,6 +203,10 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case corev1.SchemeGroupVersion.WithResource("serviceaccounts"): return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1().ServiceAccounts().Informer()}, nil + // Group=discovery.k8s.io, Version=v1alpha1 + case discoveryv1alpha1.SchemeGroupVersion.WithResource("endpointslices"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Discovery().V1alpha1().EndpointSlices().Informer()}, nil + // Group=events.k8s.io, Version=v1beta1 case eventsv1beta1.SchemeGroupVersion.WithResource("events"): return &genericInformer{resource: resource.GroupResource(), informer: f.Events().V1beta1().Events().Informer()}, nil diff --git a/vendor/k8s.io/client-go/kubernetes/BUILD.bazel b/vendor/k8s.io/client-go/kubernetes/BUILD.bazel index e1b106fabf02f..448891c3b3216 100644 --- a/vendor/k8s.io/client-go/kubernetes/BUILD.bazel +++ b/vendor/k8s.io/client-go/kubernetes/BUILD.bazel @@ -12,6 +12,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//vendor/k8s.io/client-go/discovery:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1:go_default_library", @@ -31,6 +32,7 @@ go_library( "//vendor/k8s.io/client-go/kubernetes/typed/coordination/v1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/networking/v1:go_default_library", diff --git a/vendor/k8s.io/client-go/kubernetes/clientset.go b/vendor/k8s.io/client-go/kubernetes/clientset.go index fb889e6df5392..f8a237f614d82 100644 --- a/vendor/k8s.io/client-go/kubernetes/clientset.go +++ b/vendor/k8s.io/client-go/kubernetes/clientset.go @@ -19,7 +19,10 @@ limitations under the License. package kubernetes import ( + "fmt" + discovery "k8s.io/client-go/discovery" + admissionregistrationv1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1" admissionregistrationv1beta1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1" appsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" appsv1beta1 "k8s.io/client-go/kubernetes/typed/apps/v1beta1" @@ -39,6 +42,7 @@ import ( coordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1" coordinationv1beta1 "k8s.io/client-go/kubernetes/typed/coordination/v1beta1" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" + discoveryv1alpha1 "k8s.io/client-go/kubernetes/typed/discovery/v1alpha1" eventsv1beta1 "k8s.io/client-go/kubernetes/typed/events/v1beta1" extensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" networkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1" @@ -62,6 +66,7 @@ import ( type Interface interface { Discovery() discovery.DiscoveryInterface + AdmissionregistrationV1() admissionregistrationv1.AdmissionregistrationV1Interface AdmissionregistrationV1beta1() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface AppsV1() appsv1.AppsV1Interface AppsV1beta1() appsv1beta1.AppsV1beta1Interface @@ -81,6 +86,7 @@ type Interface interface { CoordinationV1beta1() coordinationv1beta1.CoordinationV1beta1Interface CoordinationV1() coordinationv1.CoordinationV1Interface CoreV1() corev1.CoreV1Interface + DiscoveryV1alpha1() discoveryv1alpha1.DiscoveryV1alpha1Interface EventsV1beta1() eventsv1beta1.EventsV1beta1Interface ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Interface NetworkingV1() networkingv1.NetworkingV1Interface @@ -104,6 +110,7 @@ type Interface interface { // version included in a Clientset. type Clientset struct { *discovery.DiscoveryClient + admissionregistrationV1 *admissionregistrationv1.AdmissionregistrationV1Client admissionregistrationV1beta1 *admissionregistrationv1beta1.AdmissionregistrationV1beta1Client appsV1 *appsv1.AppsV1Client appsV1beta1 *appsv1beta1.AppsV1beta1Client @@ -123,6 +130,7 @@ type Clientset struct { coordinationV1beta1 *coordinationv1beta1.CoordinationV1beta1Client coordinationV1 *coordinationv1.CoordinationV1Client coreV1 *corev1.CoreV1Client + discoveryV1alpha1 *discoveryv1alpha1.DiscoveryV1alpha1Client eventsV1beta1 *eventsv1beta1.EventsV1beta1Client extensionsV1beta1 *extensionsv1beta1.ExtensionsV1beta1Client networkingV1 *networkingv1.NetworkingV1Client @@ -142,6 +150,11 @@ type Clientset struct { storageV1alpha1 *storagev1alpha1.StorageV1alpha1Client } +// AdmissionregistrationV1 retrieves the AdmissionregistrationV1Client +func (c *Clientset) AdmissionregistrationV1() admissionregistrationv1.AdmissionregistrationV1Interface { + return c.admissionregistrationV1 +} + // AdmissionregistrationV1beta1 retrieves the AdmissionregistrationV1beta1Client func (c *Clientset) AdmissionregistrationV1beta1() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface { return c.admissionregistrationV1beta1 @@ -237,6 +250,11 @@ func (c *Clientset) CoreV1() corev1.CoreV1Interface { return c.coreV1 } +// DiscoveryV1alpha1 retrieves the DiscoveryV1alpha1Client +func (c *Clientset) DiscoveryV1alpha1() discoveryv1alpha1.DiscoveryV1alpha1Interface { + return c.discoveryV1alpha1 +} + // EventsV1beta1 retrieves the EventsV1beta1Client func (c *Clientset) EventsV1beta1() eventsv1beta1.EventsV1beta1Interface { return c.eventsV1beta1 @@ -331,13 +349,22 @@ func (c *Clientset) Discovery() discovery.DiscoveryInterface { } // NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. func NewForConfig(c *rest.Config) (*Clientset, error) { configShallowCopy := *c if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("Burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) } var cs Clientset var err error + cs.admissionregistrationV1, err = admissionregistrationv1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } cs.admissionregistrationV1beta1, err = admissionregistrationv1beta1.NewForConfig(&configShallowCopy) if err != nil { return nil, err @@ -414,6 +441,10 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } + cs.discoveryV1alpha1, err = discoveryv1alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } cs.eventsV1beta1, err = eventsv1beta1.NewForConfig(&configShallowCopy) if err != nil { return nil, err @@ -494,6 +525,7 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { // panics if there is an error in the config. func NewForConfigOrDie(c *rest.Config) *Clientset { var cs Clientset + cs.admissionregistrationV1 = admissionregistrationv1.NewForConfigOrDie(c) cs.admissionregistrationV1beta1 = admissionregistrationv1beta1.NewForConfigOrDie(c) cs.appsV1 = appsv1.NewForConfigOrDie(c) cs.appsV1beta1 = appsv1beta1.NewForConfigOrDie(c) @@ -513,6 +545,7 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { cs.coordinationV1beta1 = coordinationv1beta1.NewForConfigOrDie(c) cs.coordinationV1 = coordinationv1.NewForConfigOrDie(c) cs.coreV1 = corev1.NewForConfigOrDie(c) + cs.discoveryV1alpha1 = discoveryv1alpha1.NewForConfigOrDie(c) cs.eventsV1beta1 = eventsv1beta1.NewForConfigOrDie(c) cs.extensionsV1beta1 = extensionsv1beta1.NewForConfigOrDie(c) cs.networkingV1 = networkingv1.NewForConfigOrDie(c) @@ -538,6 +571,7 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { // New creates a new Clientset for the given RESTClient. func New(c rest.Interface) *Clientset { var cs Clientset + cs.admissionregistrationV1 = admissionregistrationv1.New(c) cs.admissionregistrationV1beta1 = admissionregistrationv1beta1.New(c) cs.appsV1 = appsv1.New(c) cs.appsV1beta1 = appsv1beta1.New(c) @@ -557,6 +591,7 @@ func New(c rest.Interface) *Clientset { cs.coordinationV1beta1 = coordinationv1beta1.New(c) cs.coordinationV1 = coordinationv1.New(c) cs.coreV1 = corev1.New(c) + cs.discoveryV1alpha1 = discoveryv1alpha1.New(c) cs.eventsV1beta1 = eventsv1beta1.New(c) cs.extensionsV1beta1 = extensionsv1beta1.New(c) cs.networkingV1 = networkingv1.New(c) diff --git a/vendor/k8s.io/client-go/kubernetes/fake/BUILD.bazel b/vendor/k8s.io/client-go/kubernetes/fake/BUILD.bazel index 67cb08eeea2f1..f14886eb8a87e 100644 --- a/vendor/k8s.io/client-go/kubernetes/fake/BUILD.bazel +++ b/vendor/k8s.io/client-go/kubernetes/fake/BUILD.bazel @@ -11,6 +11,7 @@ go_library( importpath = "k8s.io/client-go/kubernetes/fake", visibility = ["//visibility:public"], deps = [ + "//vendor/k8s.io/api/admissionregistration/v1:go_default_library", "//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library", "//vendor/k8s.io/api/apps/v1:go_default_library", "//vendor/k8s.io/api/apps/v1beta1:go_default_library", @@ -30,6 +31,7 @@ go_library( "//vendor/k8s.io/api/coordination/v1:go_default_library", "//vendor/k8s.io/api/coordination/v1beta1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/api/discovery/v1alpha1:go_default_library", "//vendor/k8s.io/api/events/v1beta1:go_default_library", "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/api/networking/v1:go_default_library", @@ -56,6 +58,8 @@ go_library( "//vendor/k8s.io/client-go/discovery:go_default_library", "//vendor/k8s.io/client-go/discovery/fake:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library", @@ -94,6 +98,8 @@ go_library( "//vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/fake:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1:go_default_library", diff --git a/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go b/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go index 0c73d00abe9dd..e94ed73feaed6 100644 --- a/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go +++ b/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go @@ -24,6 +24,8 @@ import ( "k8s.io/client-go/discovery" fakediscovery "k8s.io/client-go/discovery/fake" clientset "k8s.io/client-go/kubernetes" + admissionregistrationv1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1" + fakeadmissionregistrationv1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake" admissionregistrationv1beta1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1" fakeadmissionregistrationv1beta1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake" appsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" @@ -62,6 +64,8 @@ import ( fakecoordinationv1beta1 "k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" fakecorev1 "k8s.io/client-go/kubernetes/typed/core/v1/fake" + discoveryv1alpha1 "k8s.io/client-go/kubernetes/typed/discovery/v1alpha1" + fakediscoveryv1alpha1 "k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/fake" eventsv1beta1 "k8s.io/client-go/kubernetes/typed/events/v1beta1" fakeeventsv1beta1 "k8s.io/client-go/kubernetes/typed/events/v1beta1/fake" extensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" @@ -146,6 +150,11 @@ func (c *Clientset) Tracker() testing.ObjectTracker { var _ clientset.Interface = &Clientset{} +// AdmissionregistrationV1 retrieves the AdmissionregistrationV1Client +func (c *Clientset) AdmissionregistrationV1() admissionregistrationv1.AdmissionregistrationV1Interface { + return &fakeadmissionregistrationv1.FakeAdmissionregistrationV1{Fake: &c.Fake} +} + // AdmissionregistrationV1beta1 retrieves the AdmissionregistrationV1beta1Client func (c *Clientset) AdmissionregistrationV1beta1() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface { return &fakeadmissionregistrationv1beta1.FakeAdmissionregistrationV1beta1{Fake: &c.Fake} @@ -241,6 +250,11 @@ func (c *Clientset) CoreV1() corev1.CoreV1Interface { return &fakecorev1.FakeCoreV1{Fake: &c.Fake} } +// DiscoveryV1alpha1 retrieves the DiscoveryV1alpha1Client +func (c *Clientset) DiscoveryV1alpha1() discoveryv1alpha1.DiscoveryV1alpha1Interface { + return &fakediscoveryv1alpha1.FakeDiscoveryV1alpha1{Fake: &c.Fake} +} + // EventsV1beta1 retrieves the EventsV1beta1Client func (c *Clientset) EventsV1beta1() eventsv1beta1.EventsV1beta1Interface { return &fakeeventsv1beta1.FakeEventsV1beta1{Fake: &c.Fake} diff --git a/vendor/k8s.io/client-go/kubernetes/fake/register.go b/vendor/k8s.io/client-go/kubernetes/fake/register.go index 62e07afbadf61..07d4456522054 100644 --- a/vendor/k8s.io/client-go/kubernetes/fake/register.go +++ b/vendor/k8s.io/client-go/kubernetes/fake/register.go @@ -19,6 +19,7 @@ limitations under the License. package fake import ( + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" appsv1 "k8s.io/api/apps/v1" appsv1beta1 "k8s.io/api/apps/v1beta1" @@ -38,6 +39,7 @@ import ( coordinationv1 "k8s.io/api/coordination/v1" coordinationv1beta1 "k8s.io/api/coordination/v1beta1" corev1 "k8s.io/api/core/v1" + discoveryv1alpha1 "k8s.io/api/discovery/v1alpha1" eventsv1beta1 "k8s.io/api/events/v1beta1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" networkingv1 "k8s.io/api/networking/v1" @@ -66,6 +68,7 @@ var scheme = runtime.NewScheme() var codecs = serializer.NewCodecFactory(scheme) var parameterCodec = runtime.NewParameterCodec(scheme) var localSchemeBuilder = runtime.SchemeBuilder{ + admissionregistrationv1.AddToScheme, admissionregistrationv1beta1.AddToScheme, appsv1.AddToScheme, appsv1beta1.AddToScheme, @@ -85,6 +88,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{ coordinationv1beta1.AddToScheme, coordinationv1.AddToScheme, corev1.AddToScheme, + discoveryv1alpha1.AddToScheme, eventsv1beta1.AddToScheme, extensionsv1beta1.AddToScheme, networkingv1.AddToScheme, diff --git a/vendor/k8s.io/client-go/kubernetes/scheme/BUILD.bazel b/vendor/k8s.io/client-go/kubernetes/scheme/BUILD.bazel index 730183a92e3bc..cba17c3640c89 100644 --- a/vendor/k8s.io/client-go/kubernetes/scheme/BUILD.bazel +++ b/vendor/k8s.io/client-go/kubernetes/scheme/BUILD.bazel @@ -10,6 +10,7 @@ go_library( importpath = "k8s.io/client-go/kubernetes/scheme", visibility = ["//visibility:public"], deps = [ + "//vendor/k8s.io/api/admissionregistration/v1:go_default_library", "//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library", "//vendor/k8s.io/api/apps/v1:go_default_library", "//vendor/k8s.io/api/apps/v1beta1:go_default_library", @@ -29,6 +30,7 @@ go_library( "//vendor/k8s.io/api/coordination/v1:go_default_library", "//vendor/k8s.io/api/coordination/v1beta1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/api/discovery/v1alpha1:go_default_library", "//vendor/k8s.io/api/events/v1beta1:go_default_library", "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/api/networking/v1:go_default_library", diff --git a/vendor/k8s.io/client-go/kubernetes/scheme/register.go b/vendor/k8s.io/client-go/kubernetes/scheme/register.go index 8346d26a5c540..c1a2c5198a0a1 100644 --- a/vendor/k8s.io/client-go/kubernetes/scheme/register.go +++ b/vendor/k8s.io/client-go/kubernetes/scheme/register.go @@ -19,6 +19,7 @@ limitations under the License. package scheme import ( + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" appsv1 "k8s.io/api/apps/v1" appsv1beta1 "k8s.io/api/apps/v1beta1" @@ -38,6 +39,7 @@ import ( coordinationv1 "k8s.io/api/coordination/v1" coordinationv1beta1 "k8s.io/api/coordination/v1beta1" corev1 "k8s.io/api/core/v1" + discoveryv1alpha1 "k8s.io/api/discovery/v1alpha1" eventsv1beta1 "k8s.io/api/events/v1beta1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" networkingv1 "k8s.io/api/networking/v1" @@ -66,6 +68,7 @@ var Scheme = runtime.NewScheme() var Codecs = serializer.NewCodecFactory(Scheme) var ParameterCodec = runtime.NewParameterCodec(Scheme) var localSchemeBuilder = runtime.SchemeBuilder{ + admissionregistrationv1.AddToScheme, admissionregistrationv1beta1.AddToScheme, appsv1.AddToScheme, appsv1beta1.AddToScheme, @@ -85,6 +88,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{ coordinationv1beta1.AddToScheme, coordinationv1.AddToScheme, corev1.AddToScheme, + discoveryv1alpha1.AddToScheme, eventsv1beta1.AddToScheme, extensionsv1beta1.AddToScheme, networkingv1.AddToScheme, diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/BUILD.bazel b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/BUILD.bazel new file mode 100644 index 0000000000000..16bbe3b95eab4 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/BUILD.bazel @@ -0,0 +1,23 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "admissionregistration_client.go", + "doc.go", + "generated_expansion.go", + "mutatingwebhookconfiguration.go", + "validatingwebhookconfiguration.go", + ], + importmap = "k8s.io/kops/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1", + importpath = "k8s.io/client-go/kubernetes/typed/admissionregistration/v1", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/api/admissionregistration/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", + "//vendor/k8s.io/client-go/rest:go_default_library", + ], +) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/admissionregistration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/admissionregistration_client.go new file mode 100644 index 0000000000000..751273159e5f5 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/admissionregistration_client.go @@ -0,0 +1,94 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/admissionregistration/v1" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type AdmissionregistrationV1Interface interface { + RESTClient() rest.Interface + MutatingWebhookConfigurationsGetter + ValidatingWebhookConfigurationsGetter +} + +// AdmissionregistrationV1Client is used to interact with features provided by the admissionregistration.k8s.io group. +type AdmissionregistrationV1Client struct { + restClient rest.Interface +} + +func (c *AdmissionregistrationV1Client) MutatingWebhookConfigurations() MutatingWebhookConfigurationInterface { + return newMutatingWebhookConfigurations(c) +} + +func (c *AdmissionregistrationV1Client) ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInterface { + return newValidatingWebhookConfigurations(c) +} + +// NewForConfig creates a new AdmissionregistrationV1Client for the given config. +func NewForConfig(c *rest.Config) (*AdmissionregistrationV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AdmissionregistrationV1Client{client}, nil +} + +// NewForConfigOrDie creates a new AdmissionregistrationV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AdmissionregistrationV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AdmissionregistrationV1Client for the given RESTClient. +func New(c rest.Interface) *AdmissionregistrationV1Client { + return &AdmissionregistrationV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AdmissionregistrationV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/doc.go new file mode 100644 index 0000000000000..3af5d054f1026 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/BUILD.bazel b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/BUILD.bazel new file mode 100644 index 0000000000000..94f9bfc4e3307 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/BUILD.bazel @@ -0,0 +1,25 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "fake_admissionregistration_client.go", + "fake_mutatingwebhookconfiguration.go", + "fake_validatingwebhookconfiguration.go", + ], + importmap = "k8s.io/kops/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake", + importpath = "k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/api/admissionregistration/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1:go_default_library", + "//vendor/k8s.io/client-go/rest:go_default_library", + "//vendor/k8s.io/client-go/testing:go_default_library", + ], +) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/doc.go new file mode 100644 index 0000000000000..16f44399065ed --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_admissionregistration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_admissionregistration_client.go new file mode 100644 index 0000000000000..a5a570ad15507 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_admissionregistration_client.go @@ -0,0 +1,44 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeAdmissionregistrationV1 struct { + *testing.Fake +} + +func (c *FakeAdmissionregistrationV1) MutatingWebhookConfigurations() v1.MutatingWebhookConfigurationInterface { + return &FakeMutatingWebhookConfigurations{c} +} + +func (c *FakeAdmissionregistrationV1) ValidatingWebhookConfigurations() v1.ValidatingWebhookConfigurationInterface { + return &FakeValidatingWebhookConfigurations{c} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeAdmissionregistrationV1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_mutatingwebhookconfiguration.go new file mode 100644 index 0000000000000..6e09faf110270 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_mutatingwebhookconfiguration.go @@ -0,0 +1,120 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeMutatingWebhookConfigurations implements MutatingWebhookConfigurationInterface +type FakeMutatingWebhookConfigurations struct { + Fake *FakeAdmissionregistrationV1 +} + +var mutatingwebhookconfigurationsResource = schema.GroupVersionResource{Group: "admissionregistration.k8s.io", Version: "v1", Resource: "mutatingwebhookconfigurations"} + +var mutatingwebhookconfigurationsKind = schema.GroupVersionKind{Group: "admissionregistration.k8s.io", Version: "v1", Kind: "MutatingWebhookConfiguration"} + +// Get takes name of the mutatingWebhookConfiguration, and returns the corresponding mutatingWebhookConfiguration object, and an error if there is any. +func (c *FakeMutatingWebhookConfigurations) Get(name string, options v1.GetOptions) (result *admissionregistrationv1.MutatingWebhookConfiguration, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(mutatingwebhookconfigurationsResource, name), &admissionregistrationv1.MutatingWebhookConfiguration{}) + if obj == nil { + return nil, err + } + return obj.(*admissionregistrationv1.MutatingWebhookConfiguration), err +} + +// List takes label and field selectors, and returns the list of MutatingWebhookConfigurations that match those selectors. +func (c *FakeMutatingWebhookConfigurations) List(opts v1.ListOptions) (result *admissionregistrationv1.MutatingWebhookConfigurationList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(mutatingwebhookconfigurationsResource, mutatingwebhookconfigurationsKind, opts), &admissionregistrationv1.MutatingWebhookConfigurationList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &admissionregistrationv1.MutatingWebhookConfigurationList{ListMeta: obj.(*admissionregistrationv1.MutatingWebhookConfigurationList).ListMeta} + for _, item := range obj.(*admissionregistrationv1.MutatingWebhookConfigurationList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested mutatingWebhookConfigurations. +func (c *FakeMutatingWebhookConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(mutatingwebhookconfigurationsResource, opts)) +} + +// Create takes the representation of a mutatingWebhookConfiguration and creates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. +func (c *FakeMutatingWebhookConfigurations) Create(mutatingWebhookConfiguration *admissionregistrationv1.MutatingWebhookConfiguration) (result *admissionregistrationv1.MutatingWebhookConfiguration, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(mutatingwebhookconfigurationsResource, mutatingWebhookConfiguration), &admissionregistrationv1.MutatingWebhookConfiguration{}) + if obj == nil { + return nil, err + } + return obj.(*admissionregistrationv1.MutatingWebhookConfiguration), err +} + +// Update takes the representation of a mutatingWebhookConfiguration and updates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. +func (c *FakeMutatingWebhookConfigurations) Update(mutatingWebhookConfiguration *admissionregistrationv1.MutatingWebhookConfiguration) (result *admissionregistrationv1.MutatingWebhookConfiguration, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(mutatingwebhookconfigurationsResource, mutatingWebhookConfiguration), &admissionregistrationv1.MutatingWebhookConfiguration{}) + if obj == nil { + return nil, err + } + return obj.(*admissionregistrationv1.MutatingWebhookConfiguration), err +} + +// Delete takes name of the mutatingWebhookConfiguration and deletes it. Returns an error if one occurs. +func (c *FakeMutatingWebhookConfigurations) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(mutatingwebhookconfigurationsResource, name), &admissionregistrationv1.MutatingWebhookConfiguration{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeMutatingWebhookConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(mutatingwebhookconfigurationsResource, listOptions) + + _, err := c.Fake.Invokes(action, &admissionregistrationv1.MutatingWebhookConfigurationList{}) + return err +} + +// Patch applies the patch and returns the patched mutatingWebhookConfiguration. +func (c *FakeMutatingWebhookConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *admissionregistrationv1.MutatingWebhookConfiguration, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(mutatingwebhookconfigurationsResource, name, pt, data, subresources...), &admissionregistrationv1.MutatingWebhookConfiguration{}) + if obj == nil { + return nil, err + } + return obj.(*admissionregistrationv1.MutatingWebhookConfiguration), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingwebhookconfiguration.go new file mode 100644 index 0000000000000..a5f8d20456019 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingwebhookconfiguration.go @@ -0,0 +1,120 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeValidatingWebhookConfigurations implements ValidatingWebhookConfigurationInterface +type FakeValidatingWebhookConfigurations struct { + Fake *FakeAdmissionregistrationV1 +} + +var validatingwebhookconfigurationsResource = schema.GroupVersionResource{Group: "admissionregistration.k8s.io", Version: "v1", Resource: "validatingwebhookconfigurations"} + +var validatingwebhookconfigurationsKind = schema.GroupVersionKind{Group: "admissionregistration.k8s.io", Version: "v1", Kind: "ValidatingWebhookConfiguration"} + +// Get takes name of the validatingWebhookConfiguration, and returns the corresponding validatingWebhookConfiguration object, and an error if there is any. +func (c *FakeValidatingWebhookConfigurations) Get(name string, options v1.GetOptions) (result *admissionregistrationv1.ValidatingWebhookConfiguration, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(validatingwebhookconfigurationsResource, name), &admissionregistrationv1.ValidatingWebhookConfiguration{}) + if obj == nil { + return nil, err + } + return obj.(*admissionregistrationv1.ValidatingWebhookConfiguration), err +} + +// List takes label and field selectors, and returns the list of ValidatingWebhookConfigurations that match those selectors. +func (c *FakeValidatingWebhookConfigurations) List(opts v1.ListOptions) (result *admissionregistrationv1.ValidatingWebhookConfigurationList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(validatingwebhookconfigurationsResource, validatingwebhookconfigurationsKind, opts), &admissionregistrationv1.ValidatingWebhookConfigurationList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &admissionregistrationv1.ValidatingWebhookConfigurationList{ListMeta: obj.(*admissionregistrationv1.ValidatingWebhookConfigurationList).ListMeta} + for _, item := range obj.(*admissionregistrationv1.ValidatingWebhookConfigurationList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested validatingWebhookConfigurations. +func (c *FakeValidatingWebhookConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(validatingwebhookconfigurationsResource, opts)) +} + +// Create takes the representation of a validatingWebhookConfiguration and creates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. +func (c *FakeValidatingWebhookConfigurations) Create(validatingWebhookConfiguration *admissionregistrationv1.ValidatingWebhookConfiguration) (result *admissionregistrationv1.ValidatingWebhookConfiguration, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(validatingwebhookconfigurationsResource, validatingWebhookConfiguration), &admissionregistrationv1.ValidatingWebhookConfiguration{}) + if obj == nil { + return nil, err + } + return obj.(*admissionregistrationv1.ValidatingWebhookConfiguration), err +} + +// Update takes the representation of a validatingWebhookConfiguration and updates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. +func (c *FakeValidatingWebhookConfigurations) Update(validatingWebhookConfiguration *admissionregistrationv1.ValidatingWebhookConfiguration) (result *admissionregistrationv1.ValidatingWebhookConfiguration, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(validatingwebhookconfigurationsResource, validatingWebhookConfiguration), &admissionregistrationv1.ValidatingWebhookConfiguration{}) + if obj == nil { + return nil, err + } + return obj.(*admissionregistrationv1.ValidatingWebhookConfiguration), err +} + +// Delete takes name of the validatingWebhookConfiguration and deletes it. Returns an error if one occurs. +func (c *FakeValidatingWebhookConfigurations) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(validatingwebhookconfigurationsResource, name), &admissionregistrationv1.ValidatingWebhookConfiguration{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeValidatingWebhookConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(validatingwebhookconfigurationsResource, listOptions) + + _, err := c.Fake.Invokes(action, &admissionregistrationv1.ValidatingWebhookConfigurationList{}) + return err +} + +// Patch applies the patch and returns the patched validatingWebhookConfiguration. +func (c *FakeValidatingWebhookConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *admissionregistrationv1.ValidatingWebhookConfiguration, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(validatingwebhookconfigurationsResource, name, pt, data, subresources...), &admissionregistrationv1.ValidatingWebhookConfiguration{}) + if obj == nil { + return nil, err + } + return obj.(*admissionregistrationv1.ValidatingWebhookConfiguration), err +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/event/sorted_event_list.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/generated_expansion.go similarity index 52% rename from vendor/k8s.io/kubernetes/pkg/kubectl/util/event/sorted_event_list.go rename to vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/generated_expansion.go index 9967f953e68f5..a5b062e37fde1 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/util/event/sorted_event_list.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2014 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,23 +14,10 @@ See the License for the specific language governing permissions and limitations under the License. */ -package event +// Code generated by client-gen. DO NOT EDIT. -import ( - corev1 "k8s.io/api/core/v1" -) +package v1 -// SortableEvents implements sort.Interface for []api.Event based on the Timestamp field -type SortableEvents []corev1.Event +type MutatingWebhookConfigurationExpansion interface{} -func (list SortableEvents) Len() int { - return len(list) -} - -func (list SortableEvents) Swap(i, j int) { - list[i], list[j] = list[j], list[i] -} - -func (list SortableEvents) Less(i, j int) bool { - return list[i].LastTimestamp.Time.Before(list[j].LastTimestamp.Time) -} +type ValidatingWebhookConfigurationExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/mutatingwebhookconfiguration.go new file mode 100644 index 0000000000000..1f5e5e38035d4 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/mutatingwebhookconfiguration.go @@ -0,0 +1,164 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + v1 "k8s.io/api/admissionregistration/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// MutatingWebhookConfigurationsGetter has a method to return a MutatingWebhookConfigurationInterface. +// A group's client should implement this interface. +type MutatingWebhookConfigurationsGetter interface { + MutatingWebhookConfigurations() MutatingWebhookConfigurationInterface +} + +// MutatingWebhookConfigurationInterface has methods to work with MutatingWebhookConfiguration resources. +type MutatingWebhookConfigurationInterface interface { + Create(*v1.MutatingWebhookConfiguration) (*v1.MutatingWebhookConfiguration, error) + Update(*v1.MutatingWebhookConfiguration) (*v1.MutatingWebhookConfiguration, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.MutatingWebhookConfiguration, error) + List(opts metav1.ListOptions) (*v1.MutatingWebhookConfigurationList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.MutatingWebhookConfiguration, err error) + MutatingWebhookConfigurationExpansion +} + +// mutatingWebhookConfigurations implements MutatingWebhookConfigurationInterface +type mutatingWebhookConfigurations struct { + client rest.Interface +} + +// newMutatingWebhookConfigurations returns a MutatingWebhookConfigurations +func newMutatingWebhookConfigurations(c *AdmissionregistrationV1Client) *mutatingWebhookConfigurations { + return &mutatingWebhookConfigurations{ + client: c.RESTClient(), + } +} + +// Get takes name of the mutatingWebhookConfiguration, and returns the corresponding mutatingWebhookConfiguration object, and an error if there is any. +func (c *mutatingWebhookConfigurations) Get(name string, options metav1.GetOptions) (result *v1.MutatingWebhookConfiguration, err error) { + result = &v1.MutatingWebhookConfiguration{} + err = c.client.Get(). + Resource("mutatingwebhookconfigurations"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of MutatingWebhookConfigurations that match those selectors. +func (c *mutatingWebhookConfigurations) List(opts metav1.ListOptions) (result *v1.MutatingWebhookConfigurationList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.MutatingWebhookConfigurationList{} + err = c.client.Get(). + Resource("mutatingwebhookconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested mutatingWebhookConfigurations. +func (c *mutatingWebhookConfigurations) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("mutatingwebhookconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a mutatingWebhookConfiguration and creates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. +func (c *mutatingWebhookConfigurations) Create(mutatingWebhookConfiguration *v1.MutatingWebhookConfiguration) (result *v1.MutatingWebhookConfiguration, err error) { + result = &v1.MutatingWebhookConfiguration{} + err = c.client.Post(). + Resource("mutatingwebhookconfigurations"). + Body(mutatingWebhookConfiguration). + Do(). + Into(result) + return +} + +// Update takes the representation of a mutatingWebhookConfiguration and updates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. +func (c *mutatingWebhookConfigurations) Update(mutatingWebhookConfiguration *v1.MutatingWebhookConfiguration) (result *v1.MutatingWebhookConfiguration, err error) { + result = &v1.MutatingWebhookConfiguration{} + err = c.client.Put(). + Resource("mutatingwebhookconfigurations"). + Name(mutatingWebhookConfiguration.Name). + Body(mutatingWebhookConfiguration). + Do(). + Into(result) + return +} + +// Delete takes name of the mutatingWebhookConfiguration and deletes it. Returns an error if one occurs. +func (c *mutatingWebhookConfigurations) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("mutatingwebhookconfigurations"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *mutatingWebhookConfigurations) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("mutatingwebhookconfigurations"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched mutatingWebhookConfiguration. +func (c *mutatingWebhookConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.MutatingWebhookConfiguration, err error) { + result = &v1.MutatingWebhookConfiguration{} + err = c.client.Patch(pt). + Resource("mutatingwebhookconfigurations"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingwebhookconfiguration.go new file mode 100644 index 0000000000000..7987b6e308497 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingwebhookconfiguration.go @@ -0,0 +1,164 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + v1 "k8s.io/api/admissionregistration/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ValidatingWebhookConfigurationsGetter has a method to return a ValidatingWebhookConfigurationInterface. +// A group's client should implement this interface. +type ValidatingWebhookConfigurationsGetter interface { + ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInterface +} + +// ValidatingWebhookConfigurationInterface has methods to work with ValidatingWebhookConfiguration resources. +type ValidatingWebhookConfigurationInterface interface { + Create(*v1.ValidatingWebhookConfiguration) (*v1.ValidatingWebhookConfiguration, error) + Update(*v1.ValidatingWebhookConfiguration) (*v1.ValidatingWebhookConfiguration, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.ValidatingWebhookConfiguration, error) + List(opts metav1.ListOptions) (*v1.ValidatingWebhookConfigurationList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ValidatingWebhookConfiguration, err error) + ValidatingWebhookConfigurationExpansion +} + +// validatingWebhookConfigurations implements ValidatingWebhookConfigurationInterface +type validatingWebhookConfigurations struct { + client rest.Interface +} + +// newValidatingWebhookConfigurations returns a ValidatingWebhookConfigurations +func newValidatingWebhookConfigurations(c *AdmissionregistrationV1Client) *validatingWebhookConfigurations { + return &validatingWebhookConfigurations{ + client: c.RESTClient(), + } +} + +// Get takes name of the validatingWebhookConfiguration, and returns the corresponding validatingWebhookConfiguration object, and an error if there is any. +func (c *validatingWebhookConfigurations) Get(name string, options metav1.GetOptions) (result *v1.ValidatingWebhookConfiguration, err error) { + result = &v1.ValidatingWebhookConfiguration{} + err = c.client.Get(). + Resource("validatingwebhookconfigurations"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ValidatingWebhookConfigurations that match those selectors. +func (c *validatingWebhookConfigurations) List(opts metav1.ListOptions) (result *v1.ValidatingWebhookConfigurationList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.ValidatingWebhookConfigurationList{} + err = c.client.Get(). + Resource("validatingwebhookconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested validatingWebhookConfigurations. +func (c *validatingWebhookConfigurations) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("validatingwebhookconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a validatingWebhookConfiguration and creates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. +func (c *validatingWebhookConfigurations) Create(validatingWebhookConfiguration *v1.ValidatingWebhookConfiguration) (result *v1.ValidatingWebhookConfiguration, err error) { + result = &v1.ValidatingWebhookConfiguration{} + err = c.client.Post(). + Resource("validatingwebhookconfigurations"). + Body(validatingWebhookConfiguration). + Do(). + Into(result) + return +} + +// Update takes the representation of a validatingWebhookConfiguration and updates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. +func (c *validatingWebhookConfigurations) Update(validatingWebhookConfiguration *v1.ValidatingWebhookConfiguration) (result *v1.ValidatingWebhookConfiguration, err error) { + result = &v1.ValidatingWebhookConfiguration{} + err = c.client.Put(). + Resource("validatingwebhookconfigurations"). + Name(validatingWebhookConfiguration.Name). + Body(validatingWebhookConfiguration). + Do(). + Into(result) + return +} + +// Delete takes name of the validatingWebhookConfiguration and deletes it. Returns an error if one occurs. +func (c *validatingWebhookConfigurations) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("validatingwebhookconfigurations"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *validatingWebhookConfigurations) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("validatingwebhookconfigurations"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched validatingWebhookConfiguration. +func (c *validatingWebhookConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ValidatingWebhookConfiguration, err error) { + result = &v1.ValidatingWebhookConfiguration{} + err = c.client.Patch(pt). + Resource("validatingwebhookconfigurations"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go index 2dbecbbaaccf3..ebd473ac758b2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go @@ -138,3 +138,25 @@ func (c *FakePods) Patch(name string, pt types.PatchType, data []byte, subresour } return obj.(*corev1.Pod), err } + +// GetEphemeralContainers takes name of the pod, and returns the corresponding ephemeralContainers object, and an error if there is any. +func (c *FakePods) GetEphemeralContainers(podName string, options v1.GetOptions) (result *corev1.EphemeralContainers, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetSubresourceAction(podsResource, c.ns, "ephemeralcontainers", podName), &corev1.EphemeralContainers{}) + + if obj == nil { + return nil, err + } + return obj.(*corev1.EphemeralContainers), err +} + +// UpdateEphemeralContainers takes the representation of a ephemeralContainers and updates it. Returns the server's representation of the ephemeralContainers, and an error, if there is any. +func (c *FakePods) UpdateEphemeralContainers(podName string, ephemeralContainers *corev1.EphemeralContainers) (result *corev1.EphemeralContainers, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(podsResource, "ephemeralcontainers", c.ns, ephemeralContainers), &corev1.EphemeralContainers{}) + + if obj == nil { + return nil, err + } + return obj.(*corev1.EphemeralContainers), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go index 8d6b6e879632d..feacd307f5509 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go @@ -46,6 +46,9 @@ type PodInterface interface { List(opts metav1.ListOptions) (*v1.PodList, error) Watch(opts metav1.ListOptions) (watch.Interface, error) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Pod, err error) + GetEphemeralContainers(podName string, options metav1.GetOptions) (*v1.EphemeralContainers, error) + UpdateEphemeralContainers(podName string, ephemeralContainers *v1.EphemeralContainers) (*v1.EphemeralContainers, error) + PodExpansion } @@ -189,3 +192,31 @@ func (c *pods) Patch(name string, pt types.PatchType, data []byte, subresources Into(result) return } + +// GetEphemeralContainers takes name of the pod, and returns the corresponding v1.EphemeralContainers object, and an error if there is any. +func (c *pods) GetEphemeralContainers(podName string, options metav1.GetOptions) (result *v1.EphemeralContainers, err error) { + result = &v1.EphemeralContainers{} + err = c.client.Get(). + Namespace(c.ns). + Resource("pods"). + Name(podName). + SubResource("ephemeralcontainers"). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// UpdateEphemeralContainers takes the top resource name and the representation of a ephemeralContainers and updates it. Returns the server's representation of the ephemeralContainers, and an error, if there is any. +func (c *pods) UpdateEphemeralContainers(podName string, ephemeralContainers *v1.EphemeralContainers) (result *v1.EphemeralContainers, err error) { + result = &v1.EphemeralContainers{} + err = c.client.Put(). + Namespace(c.ns). + Resource("pods"). + Name(podName). + SubResource("ephemeralcontainers"). + Body(ephemeralContainers). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/BUILD.bazel b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/BUILD.bazel new file mode 100644 index 0000000000000..32ff85c1cbb9f --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/BUILD.bazel @@ -0,0 +1,22 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "discovery_client.go", + "doc.go", + "endpointslice.go", + "generated_expansion.go", + ], + importmap = "k8s.io/kops/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1", + importpath = "k8s.io/client-go/kubernetes/typed/discovery/v1alpha1", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/api/discovery/v1alpha1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", + "//vendor/k8s.io/client-go/rest:go_default_library", + ], +) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/discovery_client.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/discovery_client.go new file mode 100644 index 0000000000000..e65a0988d8f81 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/discovery_client.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/discovery/v1alpha1" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type DiscoveryV1alpha1Interface interface { + RESTClient() rest.Interface + EndpointSlicesGetter +} + +// DiscoveryV1alpha1Client is used to interact with features provided by the discovery.k8s.io group. +type DiscoveryV1alpha1Client struct { + restClient rest.Interface +} + +func (c *DiscoveryV1alpha1Client) EndpointSlices(namespace string) EndpointSliceInterface { + return newEndpointSlices(c, namespace) +} + +// NewForConfig creates a new DiscoveryV1alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*DiscoveryV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &DiscoveryV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new DiscoveryV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *DiscoveryV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new DiscoveryV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *DiscoveryV1alpha1Client { + return &DiscoveryV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *DiscoveryV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/doc.go new file mode 100644 index 0000000000000..df51baa4d4c17 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/endpointslice.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/endpointslice.go new file mode 100644 index 0000000000000..417514761f22c --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/endpointslice.go @@ -0,0 +1,174 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "time" + + v1alpha1 "k8s.io/api/discovery/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// EndpointSlicesGetter has a method to return a EndpointSliceInterface. +// A group's client should implement this interface. +type EndpointSlicesGetter interface { + EndpointSlices(namespace string) EndpointSliceInterface +} + +// EndpointSliceInterface has methods to work with EndpointSlice resources. +type EndpointSliceInterface interface { + Create(*v1alpha1.EndpointSlice) (*v1alpha1.EndpointSlice, error) + Update(*v1alpha1.EndpointSlice) (*v1alpha1.EndpointSlice, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.EndpointSlice, error) + List(opts v1.ListOptions) (*v1alpha1.EndpointSliceList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.EndpointSlice, err error) + EndpointSliceExpansion +} + +// endpointSlices implements EndpointSliceInterface +type endpointSlices struct { + client rest.Interface + ns string +} + +// newEndpointSlices returns a EndpointSlices +func newEndpointSlices(c *DiscoveryV1alpha1Client, namespace string) *endpointSlices { + return &endpointSlices{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the endpointSlice, and returns the corresponding endpointSlice object, and an error if there is any. +func (c *endpointSlices) Get(name string, options v1.GetOptions) (result *v1alpha1.EndpointSlice, err error) { + result = &v1alpha1.EndpointSlice{} + err = c.client.Get(). + Namespace(c.ns). + Resource("endpointslices"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of EndpointSlices that match those selectors. +func (c *endpointSlices) List(opts v1.ListOptions) (result *v1alpha1.EndpointSliceList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.EndpointSliceList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("endpointslices"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested endpointSlices. +func (c *endpointSlices) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("endpointslices"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a endpointSlice and creates it. Returns the server's representation of the endpointSlice, and an error, if there is any. +func (c *endpointSlices) Create(endpointSlice *v1alpha1.EndpointSlice) (result *v1alpha1.EndpointSlice, err error) { + result = &v1alpha1.EndpointSlice{} + err = c.client.Post(). + Namespace(c.ns). + Resource("endpointslices"). + Body(endpointSlice). + Do(). + Into(result) + return +} + +// Update takes the representation of a endpointSlice and updates it. Returns the server's representation of the endpointSlice, and an error, if there is any. +func (c *endpointSlices) Update(endpointSlice *v1alpha1.EndpointSlice) (result *v1alpha1.EndpointSlice, err error) { + result = &v1alpha1.EndpointSlice{} + err = c.client.Put(). + Namespace(c.ns). + Resource("endpointslices"). + Name(endpointSlice.Name). + Body(endpointSlice). + Do(). + Into(result) + return +} + +// Delete takes name of the endpointSlice and deletes it. Returns an error if one occurs. +func (c *endpointSlices) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("endpointslices"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *endpointSlices) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("endpointslices"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched endpointSlice. +func (c *endpointSlices) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.EndpointSlice, err error) { + result = &v1alpha1.EndpointSlice{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("endpointslices"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/fake/BUILD.bazel b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/fake/BUILD.bazel new file mode 100644 index 0000000000000..8f2d6d19dcbda --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/fake/BUILD.bazel @@ -0,0 +1,24 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "fake_discovery_client.go", + "fake_endpointslice.go", + ], + importmap = "k8s.io/kops/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/fake", + importpath = "k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/fake", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/api/discovery/v1alpha1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1:go_default_library", + "//vendor/k8s.io/client-go/rest:go_default_library", + "//vendor/k8s.io/client-go/testing:go_default_library", + ], +) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/fake/doc.go new file mode 100644 index 0000000000000..16f44399065ed --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/fake/fake_discovery_client.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/fake/fake_discovery_client.go new file mode 100644 index 0000000000000..532a10756d05a --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/fake/fake_discovery_client.go @@ -0,0 +1,40 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha1 "k8s.io/client-go/kubernetes/typed/discovery/v1alpha1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeDiscoveryV1alpha1 struct { + *testing.Fake +} + +func (c *FakeDiscoveryV1alpha1) EndpointSlices(namespace string) v1alpha1.EndpointSliceInterface { + return &FakeEndpointSlices{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeDiscoveryV1alpha1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/fake/fake_endpointslice.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/fake/fake_endpointslice.go new file mode 100644 index 0000000000000..9d17306a46d67 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/fake/fake_endpointslice.go @@ -0,0 +1,128 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha1 "k8s.io/api/discovery/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeEndpointSlices implements EndpointSliceInterface +type FakeEndpointSlices struct { + Fake *FakeDiscoveryV1alpha1 + ns string +} + +var endpointslicesResource = schema.GroupVersionResource{Group: "discovery.k8s.io", Version: "v1alpha1", Resource: "endpointslices"} + +var endpointslicesKind = schema.GroupVersionKind{Group: "discovery.k8s.io", Version: "v1alpha1", Kind: "EndpointSlice"} + +// Get takes name of the endpointSlice, and returns the corresponding endpointSlice object, and an error if there is any. +func (c *FakeEndpointSlices) Get(name string, options v1.GetOptions) (result *v1alpha1.EndpointSlice, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(endpointslicesResource, c.ns, name), &v1alpha1.EndpointSlice{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.EndpointSlice), err +} + +// List takes label and field selectors, and returns the list of EndpointSlices that match those selectors. +func (c *FakeEndpointSlices) List(opts v1.ListOptions) (result *v1alpha1.EndpointSliceList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(endpointslicesResource, endpointslicesKind, c.ns, opts), &v1alpha1.EndpointSliceList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.EndpointSliceList{ListMeta: obj.(*v1alpha1.EndpointSliceList).ListMeta} + for _, item := range obj.(*v1alpha1.EndpointSliceList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested endpointSlices. +func (c *FakeEndpointSlices) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(endpointslicesResource, c.ns, opts)) + +} + +// Create takes the representation of a endpointSlice and creates it. Returns the server's representation of the endpointSlice, and an error, if there is any. +func (c *FakeEndpointSlices) Create(endpointSlice *v1alpha1.EndpointSlice) (result *v1alpha1.EndpointSlice, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(endpointslicesResource, c.ns, endpointSlice), &v1alpha1.EndpointSlice{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.EndpointSlice), err +} + +// Update takes the representation of a endpointSlice and updates it. Returns the server's representation of the endpointSlice, and an error, if there is any. +func (c *FakeEndpointSlices) Update(endpointSlice *v1alpha1.EndpointSlice) (result *v1alpha1.EndpointSlice, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(endpointslicesResource, c.ns, endpointSlice), &v1alpha1.EndpointSlice{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.EndpointSlice), err +} + +// Delete takes name of the endpointSlice and deletes it. Returns an error if one occurs. +func (c *FakeEndpointSlices) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(endpointslicesResource, c.ns, name), &v1alpha1.EndpointSlice{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeEndpointSlices) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(endpointslicesResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha1.EndpointSliceList{}) + return err +} + +// Patch applies the patch and returns the patched endpointSlice. +func (c *FakeEndpointSlices) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.EndpointSlice, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(endpointslicesResource, c.ns, name, pt, data, subresources...), &v1alpha1.EndpointSlice{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.EndpointSlice), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/generated_expansion.go new file mode 100644 index 0000000000000..e8ceb59a48c72 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type EndpointSliceExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event_expansion.go index 778843acd8ff5..19c1b441550eb 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2014 The Kubernetes Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1/BUILD.bazel b/vendor/k8s.io/client-go/listers/admissionregistration/v1/BUILD.bazel new file mode 100644 index 0000000000000..2e33c10177f72 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1/BUILD.bazel @@ -0,0 +1,19 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "expansion_generated.go", + "mutatingwebhookconfiguration.go", + "validatingwebhookconfiguration.go", + ], + importmap = "k8s.io/kops/vendor/k8s.io/client-go/listers/admissionregistration/v1", + importpath = "k8s.io/client-go/listers/admissionregistration/v1", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/api/admissionregistration/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + "//vendor/k8s.io/client-go/tools/cache:go_default_library", + ], +) diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1/expansion_generated.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1/expansion_generated.go new file mode 100644 index 0000000000000..e121ae41a3ef2 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +// MutatingWebhookConfigurationListerExpansion allows custom methods to be added to +// MutatingWebhookConfigurationLister. +type MutatingWebhookConfigurationListerExpansion interface{} + +// ValidatingWebhookConfigurationListerExpansion allows custom methods to be added to +// ValidatingWebhookConfigurationLister. +type ValidatingWebhookConfigurationListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1/mutatingwebhookconfiguration.go new file mode 100644 index 0000000000000..e2b5da0982e6e --- /dev/null +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1/mutatingwebhookconfiguration.go @@ -0,0 +1,65 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/admissionregistration/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// MutatingWebhookConfigurationLister helps list MutatingWebhookConfigurations. +type MutatingWebhookConfigurationLister interface { + // List lists all MutatingWebhookConfigurations in the indexer. + List(selector labels.Selector) (ret []*v1.MutatingWebhookConfiguration, err error) + // Get retrieves the MutatingWebhookConfiguration from the index for a given name. + Get(name string) (*v1.MutatingWebhookConfiguration, error) + MutatingWebhookConfigurationListerExpansion +} + +// mutatingWebhookConfigurationLister implements the MutatingWebhookConfigurationLister interface. +type mutatingWebhookConfigurationLister struct { + indexer cache.Indexer +} + +// NewMutatingWebhookConfigurationLister returns a new MutatingWebhookConfigurationLister. +func NewMutatingWebhookConfigurationLister(indexer cache.Indexer) MutatingWebhookConfigurationLister { + return &mutatingWebhookConfigurationLister{indexer: indexer} +} + +// List lists all MutatingWebhookConfigurations in the indexer. +func (s *mutatingWebhookConfigurationLister) List(selector labels.Selector) (ret []*v1.MutatingWebhookConfiguration, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.MutatingWebhookConfiguration)) + }) + return ret, err +} + +// Get retrieves the MutatingWebhookConfiguration from the index for a given name. +func (s *mutatingWebhookConfigurationLister) Get(name string) (*v1.MutatingWebhookConfiguration, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("mutatingwebhookconfiguration"), name) + } + return obj.(*v1.MutatingWebhookConfiguration), nil +} diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingwebhookconfiguration.go new file mode 100644 index 0000000000000..33d55e08b4c59 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingwebhookconfiguration.go @@ -0,0 +1,65 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/admissionregistration/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ValidatingWebhookConfigurationLister helps list ValidatingWebhookConfigurations. +type ValidatingWebhookConfigurationLister interface { + // List lists all ValidatingWebhookConfigurations in the indexer. + List(selector labels.Selector) (ret []*v1.ValidatingWebhookConfiguration, err error) + // Get retrieves the ValidatingWebhookConfiguration from the index for a given name. + Get(name string) (*v1.ValidatingWebhookConfiguration, error) + ValidatingWebhookConfigurationListerExpansion +} + +// validatingWebhookConfigurationLister implements the ValidatingWebhookConfigurationLister interface. +type validatingWebhookConfigurationLister struct { + indexer cache.Indexer +} + +// NewValidatingWebhookConfigurationLister returns a new ValidatingWebhookConfigurationLister. +func NewValidatingWebhookConfigurationLister(indexer cache.Indexer) ValidatingWebhookConfigurationLister { + return &validatingWebhookConfigurationLister{indexer: indexer} +} + +// List lists all ValidatingWebhookConfigurations in the indexer. +func (s *validatingWebhookConfigurationLister) List(selector labels.Selector) (ret []*v1.ValidatingWebhookConfiguration, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ValidatingWebhookConfiguration)) + }) + return ret, err +} + +// Get retrieves the ValidatingWebhookConfiguration from the index for a given name. +func (s *validatingWebhookConfigurationLister) Get(name string) (*v1.ValidatingWebhookConfiguration, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("validatingwebhookconfiguration"), name) + } + return obj.(*v1.ValidatingWebhookConfiguration), nil +} diff --git a/vendor/k8s.io/client-go/listers/discovery/v1alpha1/BUILD.bazel b/vendor/k8s.io/client-go/listers/discovery/v1alpha1/BUILD.bazel new file mode 100644 index 0000000000000..c2bb07c9a3b25 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/discovery/v1alpha1/BUILD.bazel @@ -0,0 +1,18 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "endpointslice.go", + "expansion_generated.go", + ], + importmap = "k8s.io/kops/vendor/k8s.io/client-go/listers/discovery/v1alpha1", + importpath = "k8s.io/client-go/listers/discovery/v1alpha1", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/api/discovery/v1alpha1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + "//vendor/k8s.io/client-go/tools/cache:go_default_library", + ], +) diff --git a/vendor/k8s.io/client-go/listers/discovery/v1alpha1/endpointslice.go b/vendor/k8s.io/client-go/listers/discovery/v1alpha1/endpointslice.go new file mode 100644 index 0000000000000..706beecfd1491 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/discovery/v1alpha1/endpointslice.go @@ -0,0 +1,94 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/discovery/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// EndpointSliceLister helps list EndpointSlices. +type EndpointSliceLister interface { + // List lists all EndpointSlices in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.EndpointSlice, err error) + // EndpointSlices returns an object that can list and get EndpointSlices. + EndpointSlices(namespace string) EndpointSliceNamespaceLister + EndpointSliceListerExpansion +} + +// endpointSliceLister implements the EndpointSliceLister interface. +type endpointSliceLister struct { + indexer cache.Indexer +} + +// NewEndpointSliceLister returns a new EndpointSliceLister. +func NewEndpointSliceLister(indexer cache.Indexer) EndpointSliceLister { + return &endpointSliceLister{indexer: indexer} +} + +// List lists all EndpointSlices in the indexer. +func (s *endpointSliceLister) List(selector labels.Selector) (ret []*v1alpha1.EndpointSlice, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.EndpointSlice)) + }) + return ret, err +} + +// EndpointSlices returns an object that can list and get EndpointSlices. +func (s *endpointSliceLister) EndpointSlices(namespace string) EndpointSliceNamespaceLister { + return endpointSliceNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// EndpointSliceNamespaceLister helps list and get EndpointSlices. +type EndpointSliceNamespaceLister interface { + // List lists all EndpointSlices in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1alpha1.EndpointSlice, err error) + // Get retrieves the EndpointSlice from the indexer for a given namespace and name. + Get(name string) (*v1alpha1.EndpointSlice, error) + EndpointSliceNamespaceListerExpansion +} + +// endpointSliceNamespaceLister implements the EndpointSliceNamespaceLister +// interface. +type endpointSliceNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all EndpointSlices in the indexer for a given namespace. +func (s endpointSliceNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.EndpointSlice, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.EndpointSlice)) + }) + return ret, err +} + +// Get retrieves the EndpointSlice from the indexer for a given namespace and name. +func (s endpointSliceNamespaceLister) Get(name string) (*v1alpha1.EndpointSlice, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("endpointslice"), name) + } + return obj.(*v1alpha1.EndpointSlice), nil +} diff --git a/vendor/k8s.io/client-go/listers/discovery/v1alpha1/expansion_generated.go b/vendor/k8s.io/client-go/listers/discovery/v1alpha1/expansion_generated.go new file mode 100644 index 0000000000000..d47af59aa82ca --- /dev/null +++ b/vendor/k8s.io/client-go/listers/discovery/v1alpha1/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +// EndpointSliceListerExpansion allows custom methods to be added to +// EndpointSliceLister. +type EndpointSliceListerExpansion interface{} + +// EndpointSliceNamespaceListerExpansion allows custom methods to be added to +// EndpointSliceNamespaceLister. +type EndpointSliceNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/rest/config.go b/vendor/k8s.io/client-go/rest/config.go index c75825ec53fa6..fb81fb7b1cf44 100644 --- a/vendor/k8s.io/client-go/rest/config.go +++ b/vendor/k8s.io/client-go/rest/config.go @@ -94,6 +94,10 @@ type Config struct { // UserAgent is an optional field that specifies the caller of this request. UserAgent string + // DisableCompression bypasses automatic GZip compression requests to the + // server. + DisableCompression bool + // Transport may be used for custom HTTP behavior. This attribute may not // be specified with the TLS client certificate options. Use WrapTransport // to provide additional per-server middleware behavior. @@ -207,6 +211,12 @@ type TLSClientConfig struct { // CAData holds PEM-encoded bytes (typically read from a root certificates bundle). // CAData takes precedence over CAFile CAData []byte + + // NextProtos is a list of supported application level protocols, in order of preference. + // Used to populate tls.Config.NextProtos. + // To indicate to the server http/1.1 is preferred over http/2, set to ["http/1.1", "h2"] (though the server is free to ignore that preference). + // To use only http/1.1, set to ["http/1.1"]. + NextProtos []string } var _ fmt.Stringer = TLSClientConfig{} @@ -232,6 +242,7 @@ func (c TLSClientConfig) String() string { CertData: c.CertData, KeyData: c.KeyData, CAData: c.CAData, + NextProtos: c.NextProtos, } // Explicitly mark non-empty credential fields as redacted. if len(cc.CertData) != 0 { @@ -499,13 +510,15 @@ func AnonymousClientConfig(config *Config) *Config { ServerName: config.ServerName, CAFile: config.TLSClientConfig.CAFile, CAData: config.TLSClientConfig.CAData, + NextProtos: config.TLSClientConfig.NextProtos, }, - RateLimiter: config.RateLimiter, - UserAgent: config.UserAgent, - QPS: config.QPS, - Burst: config.Burst, - Timeout: config.Timeout, - Dial: config.Dial, + RateLimiter: config.RateLimiter, + UserAgent: config.UserAgent, + DisableCompression: config.DisableCompression, + QPS: config.QPS, + Burst: config.Burst, + Timeout: config.Timeout, + Dial: config.Dial, } } @@ -536,14 +549,16 @@ func CopyConfig(config *Config) *Config { CertData: config.TLSClientConfig.CertData, KeyData: config.TLSClientConfig.KeyData, CAData: config.TLSClientConfig.CAData, + NextProtos: config.TLSClientConfig.NextProtos, }, - UserAgent: config.UserAgent, - Transport: config.Transport, - WrapTransport: config.WrapTransport, - QPS: config.QPS, - Burst: config.Burst, - RateLimiter: config.RateLimiter, - Timeout: config.Timeout, - Dial: config.Dial, + UserAgent: config.UserAgent, + DisableCompression: config.DisableCompression, + Transport: config.Transport, + WrapTransport: config.WrapTransport, + QPS: config.QPS, + Burst: config.Burst, + RateLimiter: config.RateLimiter, + Timeout: config.Timeout, + Dial: config.Dial, } } diff --git a/vendor/k8s.io/client-go/rest/plugin.go b/vendor/k8s.io/client-go/rest/plugin.go index 83ef5ae320fea..0bc2d03f6f9d9 100644 --- a/vendor/k8s.io/client-go/rest/plugin.go +++ b/vendor/k8s.io/client-go/rest/plugin.go @@ -55,7 +55,7 @@ func RegisterAuthProviderPlugin(name string, plugin Factory) error { pluginsLock.Lock() defer pluginsLock.Unlock() if _, found := plugins[name]; found { - return fmt.Errorf("Auth Provider Plugin %q was registered twice", name) + return fmt.Errorf("auth Provider Plugin %q was registered twice", name) } klog.V(4).Infof("Registered Auth Provider Plugin %q", name) plugins[name] = plugin @@ -67,7 +67,7 @@ func GetAuthProvider(clusterAddress string, apc *clientcmdapi.AuthProviderConfig defer pluginsLock.Unlock() p, ok := plugins[apc.Name] if !ok { - return nil, fmt.Errorf("No Auth Provider found for name %q", apc.Name) + return nil, fmt.Errorf("no Auth Provider found for name %q", apc.Name) } return p(clusterAddress, apc.Config, persister) } diff --git a/vendor/k8s.io/client-go/rest/request.go b/vendor/k8s.io/client-go/rest/request.go index 0570615fcc3c0..491f8bbd1e366 100644 --- a/vendor/k8s.io/client-go/rest/request.go +++ b/vendor/k8s.io/client-go/rest/request.go @@ -521,14 +521,24 @@ func (r Request) finalURLTemplate() url.URL { return *url } -func (r *Request) tryThrottle() { +func (r *Request) tryThrottle() error { + if r.throttle == nil { + return nil + } + now := time.Now() - if r.throttle != nil { + var err error + if r.ctx != nil { + err = r.throttle.Wait(r.ctx) + } else { r.throttle.Accept() } + if latency := time.Since(now); latency > longThrottleLatency { klog.V(4).Infof("Throttling request took %v, request: %s:%s", latency, r.verb, r.URL().String()) } + + return err } // Watch attempts to begin watching the requested location. @@ -630,13 +640,18 @@ func (r *Request) Stream() (io.ReadCloser, error) { return nil, r.err } - r.tryThrottle() + if err := r.tryThrottle(); err != nil { + return nil, err + } url := r.URL().String() req, err := http.NewRequest(r.verb, url, nil) if err != nil { return nil, err } + if r.body != nil { + req.Body = ioutil.NopCloser(r.body) + } if r.ctx != nil { req = req.WithContext(r.ctx) } @@ -732,7 +747,9 @@ func (r *Request) request(fn func(*http.Request, *http.Response)) error { // We are retrying the request that we already send to apiserver // at least once before. // This request should also be throttled with the client-internal throttler. - r.tryThrottle() + if err := r.tryThrottle(); err != nil { + return err + } } resp, err := client.Do(req) updateURLMetrics(r, resp, err) @@ -803,7 +820,9 @@ func (r *Request) request(fn func(*http.Request, *http.Response)) error { // * If the server responds with a status: *errors.StatusError or *errors.UnexpectedObjectError // * http.Client.Do errors are returned directly. func (r *Request) Do() Result { - r.tryThrottle() + if err := r.tryThrottle(); err != nil { + return Result{err: err} + } var result Result err := r.request(func(req *http.Request, resp *http.Response) { @@ -817,7 +836,9 @@ func (r *Request) Do() Result { // DoRaw executes the request but does not process the response body. func (r *Request) DoRaw() ([]byte, error) { - r.tryThrottle() + if err := r.tryThrottle(); err != nil { + return nil, err + } var result Result err := r.request(func(req *http.Request, resp *http.Response) { @@ -850,13 +871,13 @@ func (r *Request) transformResponse(resp *http.Response, req *http.Request) Resu // 3. Apiserver closes connection. // 4. client-go should catch this and return an error. klog.V(2).Infof("Stream error %#v when reading response body, may be caused by closed connection.", err) - streamErr := fmt.Errorf("Stream error when reading response body, may be caused by closed connection. Please retry. Original error: %v", err) + streamErr := fmt.Errorf("stream error when reading response body, may be caused by closed connection. Please retry. Original error: %v", err) return Result{ err: streamErr, } default: klog.Errorf("Unexpected error when reading response body: %v", err) - unexpectedErr := fmt.Errorf("Unexpected error when reading response body. Please retry. Original error: %v", err) + unexpectedErr := fmt.Errorf("unexpected error when reading response body. Please retry. Original error: %v", err) return Result{ err: unexpectedErr, } diff --git a/vendor/k8s.io/client-go/rest/transport.go b/vendor/k8s.io/client-go/rest/transport.go index de33ecbfc360f..0800e4ec74796 100644 --- a/vendor/k8s.io/client-go/rest/transport.go +++ b/vendor/k8s.io/client-go/rest/transport.go @@ -61,9 +61,10 @@ func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTrip // TransportConfig converts a client config to an appropriate transport config. func (c *Config) TransportConfig() (*transport.Config, error) { conf := &transport.Config{ - UserAgent: c.UserAgent, - Transport: c.Transport, - WrapTransport: c.WrapTransport, + UserAgent: c.UserAgent, + Transport: c.Transport, + WrapTransport: c.WrapTransport, + DisableCompression: c.DisableCompression, TLS: transport.TLSConfig{ Insecure: c.Insecure, ServerName: c.ServerName, @@ -73,6 +74,7 @@ func (c *Config) TransportConfig() (*transport.Config, error) { CertData: c.CertData, KeyFile: c.KeyFile, KeyData: c.KeyData, + NextProtos: c.NextProtos, }, Username: c.Username, Password: c.Password, diff --git a/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go index c1ab45f337e74..da4a1624e81aa 100644 --- a/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go +++ b/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go @@ -38,6 +38,11 @@ func (in *TLSClientConfig) DeepCopyInto(out *TLSClientConfig) { *out = make([]byte, len(*in)) copy(*out, *in) } + if in.NextProtos != nil { + in, out := &in.NextProtos, &out.NextProtos + *out = make([]string, len(*in)) + copy(*out, *in) + } return } diff --git a/vendor/k8s.io/client-go/scale/BUILD.bazel b/vendor/k8s.io/client-go/scale/BUILD.bazel index 769f6feef160b..4dbd46b22a537 100644 --- a/vendor/k8s.io/client-go/scale/BUILD.bazel +++ b/vendor/k8s.io/client-go/scale/BUILD.bazel @@ -17,6 +17,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//vendor/k8s.io/client-go/discovery:go_default_library", "//vendor/k8s.io/client-go/dynamic:go_default_library", diff --git a/vendor/k8s.io/client-go/scale/client.go b/vendor/k8s.io/client-go/scale/client.go index 00e597523b1a2..17519345ebe15 100644 --- a/vendor/k8s.io/client-go/scale/client.go +++ b/vendor/k8s.io/client-go/scale/client.go @@ -23,6 +23,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/dynamic" restclient "k8s.io/client-go/rest" ) @@ -75,6 +76,19 @@ func New(baseClient restclient.Interface, mapper PreferredResourceMapper, resolv } } +// apiPathFor returns the absolute api path for the given GroupVersion +func (c *scaleClient) apiPathFor(groupVer schema.GroupVersion) string { + // we need to set the API path based on GroupVersion (defaulting to the legacy path if none is set) + // TODO: we "cheat" here since the API path really only depends on group ATM, but this should + // *probably* take GroupVersionResource and not GroupVersionKind. + apiPath := c.apiPathResolverFunc(groupVer.WithKind("")) + if apiPath == "" { + apiPath = "/api" + } + + return restclient.DefaultVersionedAPIPath(apiPath, groupVer) +} + // pathAndVersionFor returns the appropriate base path and the associated full GroupVersionResource // for the given GroupResource func (c *scaleClient) pathAndVersionFor(resource schema.GroupResource) (string, schema.GroupVersionResource, error) { @@ -85,17 +99,7 @@ func (c *scaleClient) pathAndVersionFor(resource schema.GroupResource) (string, groupVer := gvr.GroupVersion() - // we need to set the API path based on GroupVersion (defaulting to the legacy path if none is set) - // TODO: we "cheat" here since the API path really only depends on group ATM, but this should - // *probably* take GroupVersionResource and not GroupVersionKind. - apiPath := c.apiPathResolverFunc(groupVer.WithKind("")) - if apiPath == "" { - apiPath = "/api" - } - - path := restclient.DefaultVersionedAPIPath(apiPath, groupVer) - - return path, gvr, nil + return c.apiPathFor(groupVer), gvr, nil } // namespacedScaleClient is an ScaleInterface for fetching @@ -105,6 +109,27 @@ type namespacedScaleClient struct { namespace string } +// convertToScale converts the response body to autoscaling/v1.Scale +func convertToScale(result *restclient.Result) (*autoscaling.Scale, error) { + scaleBytes, err := result.Raw() + if err != nil { + return nil, err + } + decoder := scaleConverter.codecs.UniversalDecoder(scaleConverter.ScaleVersions()...) + rawScaleObj, err := runtime.Decode(decoder, scaleBytes) + if err != nil { + return nil, err + } + + // convert whatever this is to autoscaling/v1.Scale + scaleObj, err := scaleConverter.ConvertToVersion(rawScaleObj, autoscaling.SchemeGroupVersion) + if err != nil { + return nil, fmt.Errorf("received an object from a /scale endpoint which was not convertible to autoscaling Scale: %v", err) + } + + return scaleObj.(*autoscaling.Scale), nil +} + func (c *scaleClient) Scales(namespace string) ScaleInterface { return &namespacedScaleClient{ client: c, @@ -125,7 +150,7 @@ func (c *namespacedScaleClient) Get(resource schema.GroupResource, name string) result := c.client.clientBase.Get(). AbsPath(path). - Namespace(c.namespace). + NamespaceIfScoped(c.namespace, c.namespace != ""). Resource(gvr.Resource). Name(name). SubResource("scale"). @@ -134,23 +159,7 @@ func (c *namespacedScaleClient) Get(resource schema.GroupResource, name string) return nil, err } - scaleBytes, err := result.Raw() - if err != nil { - return nil, err - } - decoder := scaleConverter.codecs.UniversalDecoder(scaleConverter.ScaleVersions()...) - rawScaleObj, err := runtime.Decode(decoder, scaleBytes) - if err != nil { - return nil, err - } - - // convert whatever this is to autoscaling/v1.Scale - scaleObj, err := scaleConverter.ConvertToVersion(rawScaleObj, autoscaling.SchemeGroupVersion) - if err != nil { - return nil, fmt.Errorf("received an object from a /scale endpoint which was not convertible to autoscaling Scale: %v", err) - } - - return scaleObj.(*autoscaling.Scale), nil + return convertToScale(&result) } func (c *namespacedScaleClient) Update(resource schema.GroupResource, scale *autoscaling.Scale) (*autoscaling.Scale, error) { @@ -182,7 +191,7 @@ func (c *namespacedScaleClient) Update(resource schema.GroupResource, scale *aut result := c.client.clientBase.Put(). AbsPath(path). - Namespace(c.namespace). + NamespaceIfScoped(c.namespace, c.namespace != ""). Resource(gvr.Resource). Name(scale.Name). SubResource("scale"). @@ -195,21 +204,22 @@ func (c *namespacedScaleClient) Update(resource schema.GroupResource, scale *aut return nil, err } - scaleBytes, err := result.Raw() - if err != nil { - return nil, err - } - decoder := scaleConverter.codecs.UniversalDecoder(scaleConverter.ScaleVersions()...) - rawScaleObj, err := runtime.Decode(decoder, scaleBytes) - if err != nil { - return nil, err - } + return convertToScale(&result) +} - // convert whatever this is back to autoscaling/v1.Scale - scaleObj, err := scaleConverter.ConvertToVersion(rawScaleObj, autoscaling.SchemeGroupVersion) - if err != nil { - return nil, fmt.Errorf("received an object from a /scale endpoint which was not convertible to autoscaling Scale: %v", err) +func (c *namespacedScaleClient) Patch(gvr schema.GroupVersionResource, name string, pt types.PatchType, data []byte) (*autoscaling.Scale, error) { + groupVersion := gvr.GroupVersion() + result := c.client.clientBase.Patch(pt). + AbsPath(c.client.apiPathFor(groupVersion)). + NamespaceIfScoped(c.namespace, c.namespace != ""). + Resource(gvr.Resource). + Name(name). + SubResource("scale"). + Body(data). + Do() + if err := result.Error(); err != nil { + return nil, err } - return scaleObj.(*autoscaling.Scale), err + return convertToScale(&result) } diff --git a/vendor/k8s.io/client-go/scale/doc.go b/vendor/k8s.io/client-go/scale/doc.go index 59fd39146cae9..b6fa3f5f209be 100644 --- a/vendor/k8s.io/client-go/scale/doc.go +++ b/vendor/k8s.io/client-go/scale/doc.go @@ -18,4 +18,4 @@ limitations under the License. // and updating Scale for any resource which implements the `scale` subresource, // as long as that subresource operates on a version of scale convertable to // autoscaling.Scale. -package scale +package scale // import "k8s.io/client-go/scale" diff --git a/vendor/k8s.io/client-go/scale/interfaces.go b/vendor/k8s.io/client-go/scale/interfaces.go index 13f2cfb8e786c..16c20d79f174b 100644 --- a/vendor/k8s.io/client-go/scale/interfaces.go +++ b/vendor/k8s.io/client-go/scale/interfaces.go @@ -19,11 +19,13 @@ package scale import ( autoscalingapi "k8s.io/api/autoscaling/v1" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" ) // ScalesGetter can produce a ScaleInterface -// for a particular namespace. type ScalesGetter interface { + // Scales produces a ScaleInterface for a particular namespace. + // Set namespace to the empty string for non-namespaced resources. Scales(namespace string) ScaleInterface } @@ -36,4 +38,7 @@ type ScaleInterface interface { // Update updates the scale of the given scalable resource. Update(resource schema.GroupResource, scale *autoscalingapi.Scale) (*autoscalingapi.Scale, error) + + // Patch patches the scale of the given scalable resource. + Patch(gvr schema.GroupVersionResource, name string, pt types.PatchType, data []byte) (*autoscalingapi.Scale, error) } diff --git a/vendor/k8s.io/client-go/scale/scheme/types.go b/vendor/k8s.io/client-go/scale/scheme/types.go index 13aec2b3c3cdf..5c5d0a6f237af 100644 --- a/vendor/k8s.io/client-go/scale/scheme/types.go +++ b/vendor/k8s.io/client-go/scale/scheme/types.go @@ -28,15 +28,15 @@ import ( // Scale represents a scaling request for a resource. type Scale struct { metav1.TypeMeta - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional metav1.ObjectMeta - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional Spec ScaleSpec - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. // +optional Status ScaleStatus } diff --git a/vendor/k8s.io/client-go/testing/actions.go b/vendor/k8s.io/client-go/testing/actions.go index e6db578ed8fe0..f56b34ee874e8 100644 --- a/vendor/k8s.io/client-go/testing/actions.go +++ b/vendor/k8s.io/client-go/testing/actions.go @@ -439,8 +439,8 @@ func (a ActionImpl) GetSubresource() string { return a.Subresource } func (a ActionImpl) Matches(verb, resource string) bool { - return strings.ToLower(verb) == strings.ToLower(a.Verb) && - strings.ToLower(resource) == strings.ToLower(a.Resource.Resource) + return strings.EqualFold(verb, a.Verb) && + strings.EqualFold(resource, a.Resource.Resource) } func (a ActionImpl) DeepCopy() Action { ret := a diff --git a/vendor/k8s.io/client-go/testing/fixture.go b/vendor/k8s.io/client-go/testing/fixture.go index 993fcf6a1bebc..98f82326730e5 100644 --- a/vendor/k8s.io/client-go/testing/fixture.go +++ b/vendor/k8s.io/client-go/testing/fixture.go @@ -18,9 +18,11 @@ package testing import ( "fmt" + "reflect" "sync" jsonpatch "github.com/evanphx/json-patch" + "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -139,6 +141,11 @@ func ObjectReaction(tracker ObjectTracker) ReactionFunc { return true, nil, err } + // reset the object in preparation to unmarshal, since unmarshal does not guarantee that fields + // in obj that are removed by patch are cleared + value := reflect.ValueOf(obj) + value.Elem().Set(reflect.New(value.Type().Elem()).Elem()) + switch action.GetPatchType() { case types.JSONPatchType: patch, err := jsonpatch.DecodePatch(action.GetPatch()) @@ -149,6 +156,7 @@ func ObjectReaction(tracker ObjectTracker) ReactionFunc { if err != nil { return true, nil, err } + if err = json.Unmarshal(modified, obj); err != nil { return true, nil, err } @@ -310,6 +318,11 @@ func (t *tracker) Add(obj runtime.Object) error { if err != nil { return err } + + if partial, ok := obj.(*metav1.PartialObjectMetadata); ok && len(partial.TypeMeta.APIVersion) > 0 { + gvks = []schema.GroupVersionKind{partial.TypeMeta.GroupVersionKind()} + } + if len(gvks) == 0 { return fmt.Errorf("no registered kinds for %v", obj) } diff --git a/vendor/k8s.io/client-go/tools/cache/BUILD.bazel b/vendor/k8s.io/client-go/tools/cache/BUILD.bazel index 3cf5851bec158..7cea71e09062b 100644 --- a/vendor/k8s.io/client-go/tools/cache/BUILD.bazel +++ b/vendor/k8s.io/client-go/tools/cache/BUILD.bazel @@ -38,6 +38,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/naming:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", diff --git a/vendor/k8s.io/client-go/tools/cache/controller.go b/vendor/k8s.io/client-go/tools/cache/controller.go index b5d392520b937..27a1c52cdfc6e 100644 --- a/vendor/k8s.io/client-go/tools/cache/controller.go +++ b/vendor/k8s.io/client-go/tools/cache/controller.go @@ -79,6 +79,7 @@ type controller struct { clock clock.Clock } +// Controller is a generic controller framework. type Controller interface { Run(stopCh <-chan struct{}) HasSynced() bool @@ -130,6 +131,8 @@ func (c *controller) HasSynced() bool { } func (c *controller) LastSyncResourceVersion() string { + c.reflectorMutex.RLock() + defer c.reflectorMutex.RUnlock() if c.reflector == nil { return "" } @@ -149,7 +152,7 @@ func (c *controller) processLoop() { for { obj, err := c.config.Queue.Pop(PopProcessFunc(c.config.Process)) if err != nil { - if err == FIFOClosedError { + if err == ErrFIFOClosed { return } if c.config.RetryOnError { diff --git a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go index f24eec254173f..b832b4bbece64 100644 --- a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go +++ b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go @@ -160,7 +160,7 @@ func (f *DeltaFIFO) KeyOf(obj interface{}) (string, error) { return f.keyFunc(obj) } -// Return true if an Add/Update/Delete/AddIfNotPresent are called first, +// HasSynced returns true if an Add/Update/Delete/AddIfNotPresent are called first, // or an Update called first but the first batch of items inserted by Replace() has been popped func (f *DeltaFIFO) HasSynced() bool { f.lock.Lock() @@ -295,13 +295,6 @@ func isDeletionDup(a, b *Delta) *Delta { return b } -// willObjectBeDeletedLocked returns true only if the last delta for the -// given object is Delete. Caller must lock first. -func (f *DeltaFIFO) willObjectBeDeletedLocked(id string) bool { - deltas := f.items[id] - return len(deltas) > 0 && deltas[len(deltas)-1].Type == Deleted -} - // queueActionLocked appends to the delta list for the object. // Caller must lock first. func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) error { @@ -310,13 +303,6 @@ func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) err return KeyError{obj, err} } - // If object is supposed to be deleted (last event is Deleted), - // then we should ignore Sync events, because it would result in - // recreation of this object. - if actionType == Sync && f.willObjectBeDeletedLocked(id) { - return nil - } - newDeltas := append(f.items[id], Delta{actionType, obj}) newDeltas = dedupDeltas(newDeltas) @@ -389,7 +375,7 @@ func (f *DeltaFIFO) GetByKey(key string) (item interface{}, exists bool, err err return d, exists, nil } -// Checks if the queue is closed +// IsClosed checks if the queue is closed func (f *DeltaFIFO) IsClosed() bool { f.closedLock.Lock() defer f.closedLock.Unlock() @@ -417,7 +403,7 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) { // When Close() is called, the f.closed is set and the condition is broadcasted. // Which causes this loop to continue and return from the Pop(). if f.IsClosed() { - return nil, FIFOClosedError + return nil, ErrFIFOClosed } f.cond.Wait() @@ -593,6 +579,7 @@ type KeyGetter interface { // DeltaType is the type of a change (addition, deletion, etc) type DeltaType string +// Change type definition const ( Added DeltaType = "Added" Updated DeltaType = "Updated" diff --git a/vendor/k8s.io/client-go/tools/cache/expiration_cache.go b/vendor/k8s.io/client-go/tools/cache/expiration_cache.go index 4b00544fc1fe4..14ad492ecc835 100644 --- a/vendor/k8s.io/client-go/tools/cache/expiration_cache.go +++ b/vendor/k8s.io/client-go/tools/cache/expiration_cache.go @@ -55,7 +55,7 @@ type ExpirationPolicy interface { type TTLPolicy struct { // >0: Expire entries with an age > ttl // <=0: Don't expire any entry - Ttl time.Duration + TTL time.Duration // Clock used to calculate ttl expiration Clock clock.Clock @@ -64,7 +64,7 @@ type TTLPolicy struct { // IsExpired returns true if the given object is older than the ttl, or it can't // determine its age. func (p *TTLPolicy) IsExpired(obj *TimestampedEntry) bool { - return p.Ttl > 0 && p.Clock.Since(obj.Timestamp) > p.Ttl + return p.TTL > 0 && p.Clock.Since(obj.Timestamp) > p.TTL } // TimestampedEntry is the only type allowed in a ExpirationCache. @@ -74,6 +74,7 @@ func (p *TTLPolicy) IsExpired(obj *TimestampedEntry) bool { type TimestampedEntry struct { Obj interface{} Timestamp time.Time + key string } // getTimestampedEntry returns the TimestampedEntry stored under the given key. @@ -129,10 +130,8 @@ func (c *ExpirationCache) List() []interface{} { list := make([]interface{}, 0, len(items)) for _, item := range items { - obj := item.(*TimestampedEntry).Obj - if key, err := c.keyFunc(obj); err != nil { - list = append(list, obj) - } else if obj, exists := c.getOrExpire(key); exists { + key := item.(*TimestampedEntry).key + if obj, exists := c.getOrExpire(key); exists { list = append(list, obj) } } @@ -154,7 +153,7 @@ func (c *ExpirationCache) Add(obj interface{}) error { c.expirationLock.Lock() defer c.expirationLock.Unlock() - c.cacheStorage.Add(key, &TimestampedEntry{obj, c.clock.Now()}) + c.cacheStorage.Add(key, &TimestampedEntry{obj, c.clock.Now(), key}) return nil } @@ -187,7 +186,7 @@ func (c *ExpirationCache) Replace(list []interface{}, resourceVersion string) er if err != nil { return KeyError{item, err} } - items[key] = &TimestampedEntry{item, ts} + items[key] = &TimestampedEntry{item, ts, key} } c.expirationLock.Lock() defer c.expirationLock.Unlock() diff --git a/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go b/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go index d61db3d519444..33afd32c867c7 100644 --- a/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go +++ b/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go @@ -33,16 +33,19 @@ func (c *fakeThreadSafeMap) Delete(key string) { } } +// FakeExpirationPolicy keeps the list for keys which never expires. type FakeExpirationPolicy struct { NeverExpire sets.String RetrieveKeyFunc KeyFunc } +// IsExpired used to check if object is expired. func (p *FakeExpirationPolicy) IsExpired(obj *TimestampedEntry) bool { key, _ := p.RetrieveKeyFunc(obj) return !p.NeverExpire.Has(key) } +// NewFakeExpirationStore creates a new instance for the ExpirationCache. func NewFakeExpirationStore(keyFunc KeyFunc, deletedKeys chan<- string, expirationPolicy ExpirationPolicy, cacheClock clock.Clock) Store { cacheStorage := NewThreadSafeStore(Indexers{}, Indices{}) return &ExpirationCache{ diff --git a/vendor/k8s.io/client-go/tools/cache/fake_custom_store.go b/vendor/k8s.io/client-go/tools/cache/fake_custom_store.go index b59e2eb272469..462d22660c3c7 100644 --- a/vendor/k8s.io/client-go/tools/cache/fake_custom_store.go +++ b/vendor/k8s.io/client-go/tools/cache/fake_custom_store.go @@ -16,7 +16,7 @@ limitations under the License. package cache -// FakeStore lets you define custom functions for store operations +// FakeCustomStore lets you define custom functions for store operations. type FakeCustomStore struct { AddFunc func(obj interface{}) error UpdateFunc func(obj interface{}) error @@ -25,7 +25,7 @@ type FakeCustomStore struct { ListKeysFunc func() []string GetFunc func(obj interface{}) (item interface{}, exists bool, err error) GetByKeyFunc func(key string) (item interface{}, exists bool, err error) - ReplaceFunc func(list []interface{}, resourceVerion string) error + ReplaceFunc func(list []interface{}, resourceVersion string) error ResyncFunc func() error } diff --git a/vendor/k8s.io/client-go/tools/cache/fifo.go b/vendor/k8s.io/client-go/tools/cache/fifo.go index 508c5530c36fb..7a3bc3d3018fd 100644 --- a/vendor/k8s.io/client-go/tools/cache/fifo.go +++ b/vendor/k8s.io/client-go/tools/cache/fifo.go @@ -34,7 +34,8 @@ type ErrRequeue struct { Err error } -var FIFOClosedError error = errors.New("DeltaFIFO: manipulating with closed queue") +// ErrFIFOClosed used when FIFO is closed +var ErrFIFOClosed = errors.New("DeltaFIFO: manipulating with closed queue") func (e ErrRequeue) Error() string { if e.Err == nil { @@ -66,7 +67,7 @@ type Queue interface { Close() } -// Helper function for popping from Queue. +// Pop is helper function for popping from Queue. // WARNING: Do NOT use this function in non-test code to avoid races // unless you really really really really know what you are doing. func Pop(queue Queue) interface{} { @@ -126,7 +127,7 @@ func (f *FIFO) Close() { f.cond.Broadcast() } -// Return true if an Add/Update/Delete/AddIfNotPresent are called first, +// HasSynced returns true if an Add/Update/Delete/AddIfNotPresent are called first, // or an Update called first but the first batch of items inserted by Replace() has been popped func (f *FIFO) HasSynced() bool { f.lock.Lock() @@ -242,7 +243,7 @@ func (f *FIFO) GetByKey(key string) (item interface{}, exists bool, err error) { return item, exists, nil } -// Checks if the queue is closed +// IsClosed checks if the queue is closed func (f *FIFO) IsClosed() bool { f.closedLock.Lock() defer f.closedLock.Unlock() @@ -267,7 +268,7 @@ func (f *FIFO) Pop(process PopProcessFunc) (interface{}, error) { // When Close() is called, the f.closed is set and the condition is broadcasted. // Which causes this loop to continue and return from the Pop(). if f.IsClosed() { - return nil, FIFOClosedError + return nil, ErrFIFOClosed } f.cond.Wait() diff --git a/vendor/k8s.io/client-go/tools/cache/heap.go b/vendor/k8s.io/client-go/tools/cache/heap.go index 7357ff97a1f4c..e503a45a467f4 100644 --- a/vendor/k8s.io/client-go/tools/cache/heap.go +++ b/vendor/k8s.io/client-go/tools/cache/heap.go @@ -28,7 +28,9 @@ const ( closedMsg = "heap is closed" ) +// LessFunc is used to compare two objects in the heap. type LessFunc func(interface{}, interface{}) bool + type heapItem struct { obj interface{} // The object which is stored in the heap. index int // The index of the object's key in the Heap.queue. @@ -158,7 +160,7 @@ func (h *Heap) Add(obj interface{}) error { return nil } -// Adds all the items in the list to the queue and then signals the condition +// BulkAdd adds all the items in the list to the queue and then signals the condition // variable. It is useful when the caller would like to add all of the items // to the queue before consumer starts processing them. func (h *Heap) BulkAdd(list []interface{}) error { @@ -249,11 +251,11 @@ func (h *Heap) Pop() (interface{}, error) { h.cond.Wait() } obj := heap.Pop(h.data) - if obj != nil { - return obj, nil - } else { + if obj == nil { return nil, fmt.Errorf("object was removed from heap data") } + + return obj, nil } // List returns a list of all the items. diff --git a/vendor/k8s.io/client-go/tools/cache/index.go b/vendor/k8s.io/client-go/tools/cache/index.go index 15acb168ef2af..9fa4068386399 100644 --- a/vendor/k8s.io/client-go/tools/cache/index.go +++ b/vendor/k8s.io/client-go/tools/cache/index.go @@ -23,17 +23,27 @@ import ( "k8s.io/apimachinery/pkg/util/sets" ) -// Indexer is a storage interface that lets you list objects using multiple indexing functions +// Indexer is a storage interface that lets you list objects using multiple indexing functions. +// There are three kinds of strings here. +// One is a storage key, as defined in the Store interface. +// Another kind is a name of an index. +// The third kind of string is an "indexed value", which is produced by an +// IndexFunc and can be a field value or any other string computed from the object. type Indexer interface { Store - // Retrieve list of objects that match on the named indexing function + // Index returns the stored objects whose set of indexed values + // intersects the set of indexed values of the given object, for + // the named index Index(indexName string, obj interface{}) ([]interface{}, error) - // IndexKeys returns the set of keys that match on the named indexing function. - IndexKeys(indexName, indexKey string) ([]string, error) - // ListIndexFuncValues returns the list of generated values of an Index func + // IndexKeys returns the storage keys of the stored objects whose + // set of indexed values for the named index includes the given + // indexed value + IndexKeys(indexName, indexedValue string) ([]string, error) + // ListIndexFuncValues returns all the indexed values of the given index ListIndexFuncValues(indexName string) []string - // ByIndex lists object that match on the named indexing function with the exact key - ByIndex(indexName, indexKey string) ([]interface{}, error) + // ByIndex returns the stored objects whose set of indexed values + // for the named index includes the given indexed value + ByIndex(indexName, indexedValue string) ([]interface{}, error) // GetIndexer return the indexers GetIndexers() Indexers @@ -42,7 +52,7 @@ type Indexer interface { AddIndexers(newIndexers Indexers) error } -// IndexFunc knows how to provide an indexed value for an object. +// IndexFunc knows how to compute the set of indexed values for an object. type IndexFunc func(obj interface{}) ([]string, error) // IndexFuncToKeyFuncAdapter adapts an indexFunc to a keyFunc. This is only useful if your index function returns @@ -65,6 +75,7 @@ func IndexFuncToKeyFuncAdapter(indexFunc IndexFunc) KeyFunc { } const ( + // NamespaceIndex is the lookup name for the most comment index function, which is to index by the namespace field. NamespaceIndex string = "namespace" ) diff --git a/vendor/k8s.io/client-go/tools/cache/listers.go b/vendor/k8s.io/client-go/tools/cache/listers.go index 311ff8c49310e..d649cd735e248 100644 --- a/vendor/k8s.io/client-go/tools/cache/listers.go +++ b/vendor/k8s.io/client-go/tools/cache/listers.go @@ -30,6 +30,7 @@ import ( // AppendFunc is used to add a matching item to whatever list the caller is using type AppendFunc func(interface{}) +// ListAll calls appendFn with each value retrieved from store which matches the selector. func ListAll(store Store, selector labels.Selector, appendFn AppendFunc) error { selectAll := selector.Empty() for _, m := range store.List() { @@ -50,6 +51,7 @@ func ListAll(store Store, selector labels.Selector, appendFn AppendFunc) error { return nil } +// ListAllByNamespace used to list items belongs to namespace from Indexer. func ListAllByNamespace(indexer Indexer, namespace string, selector labels.Selector, appendFn AppendFunc) error { selectAll := selector.Empty() if namespace == metav1.NamespaceAll { @@ -124,6 +126,7 @@ type GenericNamespaceLister interface { Get(name string) (runtime.Object, error) } +// NewGenericLister creates a new instance for the genericLister. func NewGenericLister(indexer Indexer, resource schema.GroupResource) GenericLister { return &genericLister{indexer: indexer, resource: resource} } diff --git a/vendor/k8s.io/client-go/tools/cache/mutation_cache.go b/vendor/k8s.io/client-go/tools/cache/mutation_cache.go index 4c6686e918c14..5d3245a600c49 100644 --- a/vendor/k8s.io/client-go/tools/cache/mutation_cache.go +++ b/vendor/k8s.io/client-go/tools/cache/mutation_cache.go @@ -42,6 +42,7 @@ type MutationCache interface { Mutation(interface{}) } +// ResourceVersionComparator is able to compare object versions. type ResourceVersionComparator interface { CompareResourceVersion(lhs, rhs runtime.Object) int } diff --git a/vendor/k8s.io/client-go/tools/cache/mutation_detector.go b/vendor/k8s.io/client-go/tools/cache/mutation_detector.go index adb5b8be8af46..9903b2928e8a2 100644 --- a/vendor/k8s.io/client-go/tools/cache/mutation_detector.go +++ b/vendor/k8s.io/client-go/tools/cache/mutation_detector.go @@ -36,12 +36,14 @@ func init() { mutationDetectionEnabled, _ = strconv.ParseBool(os.Getenv("KUBE_CACHE_MUTATION_DETECTOR")) } -type CacheMutationDetector interface { +// MutationDetector is able to monitor if the object be modified outside. +type MutationDetector interface { AddObject(obj interface{}) Run(stopCh <-chan struct{}) } -func NewCacheMutationDetector(name string) CacheMutationDetector { +// NewCacheMutationDetector creates a new instance for the defaultCacheMutationDetector. +func NewCacheMutationDetector(name string) MutationDetector { if !mutationDetectionEnabled { return dummyMutationDetector{} } @@ -114,7 +116,7 @@ func (d *defaultCacheMutationDetector) CompareObjects() { altered := false for i, obj := range d.cachedObjs { if !reflect.DeepEqual(obj.cached, obj.copied) { - fmt.Printf("CACHE %s[%d] ALTERED!\n%v\n", d.name, i, diff.ObjectDiff(obj.cached, obj.copied)) + fmt.Printf("CACHE %s[%d] ALTERED!\n%v\n", d.name, i, diff.ObjectGoPrintSideBySide(obj.cached, obj.copied)) altered = true } } diff --git a/vendor/k8s.io/client-go/tools/cache/reflector.go b/vendor/k8s.io/client-go/tools/cache/reflector.go index 2daa44ba54fce..8abde7131a389 100644 --- a/vendor/k8s.io/client-go/tools/cache/reflector.go +++ b/vendor/k8s.io/client-go/tools/cache/reflector.go @@ -22,11 +22,8 @@ import ( "fmt" "io" "math/rand" - "net" - "net/url" "reflect" "sync" - "syscall" "time" apierrs "k8s.io/apimachinery/pkg/api/errors" @@ -35,6 +32,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/naming" + utilnet "k8s.io/apimachinery/pkg/util/net" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" @@ -166,7 +164,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { options := metav1.ListOptions{ResourceVersion: "0"} if err := func() error { - initTrace := trace.New("Reflector " + r.name + " ListAndWatch") + initTrace := trace.New("Reflector ListAndWatch", trace.Field{"name", r.name}) defer initTrace.LogIfLong(10 * time.Second) var list runtime.Object var err error @@ -268,8 +266,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { // To reduce load on kube-apiserver on watch restarts, you may enable watch bookmarks. // Reflector doesn't assume bookmarks are returned at all (if the server do not support // watch bookmarks, it will ignore this field). - // Disabled in Alpha release of watch bookmarks feature. - AllowWatchBookmarks: false, + AllowWatchBookmarks: true, } w, err := r.listerWatcher.Watch(options) @@ -286,20 +283,21 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { // It doesn't make sense to re-list all objects because most likely we will be able to restart // watch where we ended. // If that's the case wait and resend watch request. - if urlError, ok := err.(*url.Error); ok { - if opError, ok := urlError.Err.(*net.OpError); ok { - if errno, ok := opError.Err.(syscall.Errno); ok && errno == syscall.ECONNREFUSED { - time.Sleep(time.Second) - continue - } - } + if utilnet.IsConnectionRefused(err) { + time.Sleep(time.Second) + continue } return nil } if err := r.watchHandler(w, &resourceVersion, resyncerrc, stopCh); err != nil { if err != errorStopRequested { - klog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedType, err) + switch { + case apierrs.IsResourceExpired(err): + klog.V(4).Infof("%s: watch of %v ended with: %v", r.name, r.expectedType, err) + default: + klog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedType, err) + } } return nil } diff --git a/vendor/k8s.io/client-go/tools/cache/shared_informer.go b/vendor/k8s.io/client-go/tools/cache/shared_informer.go index 3271d959f05b3..f59a0852fe0a5 100644 --- a/vendor/k8s.io/client-go/tools/cache/shared_informer.go +++ b/vendor/k8s.io/client-go/tools/cache/shared_informer.go @@ -34,56 +34,90 @@ import ( // SharedInformer provides eventually consistent linkage of its // clients to the authoritative state of a given collection of // objects. An object is identified by its API group, kind/resource, -// namespace, and name. One SharedInfomer provides linkage to objects -// of a particular API group and kind/resource. The linked object -// collection of a SharedInformer may be further restricted to one -// namespace and/or by label selector and/or field selector. +// namespace, and name; the `ObjectMeta.UID` is not part of an +// object's ID as far as this contract is concerned. One +// SharedInformer provides linkage to objects of a particular API +// group and kind/resource. The linked object collection of a +// SharedInformer may be further restricted to one namespace and/or by +// label selector and/or field selector. // // The authoritative state of an object is what apiservers provide // access to, and an object goes through a strict sequence of states. -// A state is either "absent" or present with a ResourceVersion and -// other appropriate content. +// An object state is either "absent" or present with a +// ResourceVersion and other appropriate content. // -// A SharedInformer maintains a local cache, exposed by Store(), of -// the state of each relevant object. This cache is eventually -// consistent with the authoritative state. This means that, unless -// prevented by persistent communication problems, if ever a -// particular object ID X is authoritatively associated with a state S -// then for every SharedInformer I whose collection includes (X, S) -// eventually either (1) I's cache associates X with S or a later -// state of X, (2) I is stopped, or (3) the authoritative state -// service for X terminates. To be formally complete, we say that the -// absent state meets any restriction by label selector or field -// selector. +// A SharedInformer gets object states from apiservers using a +// sequence of LIST and WATCH operations. Through this sequence the +// apiservers provide a sequence of "collection states" to the +// informer, where each collection state defines the state of every +// object of the collection. No promise --- beyond what is implied by +// other remarks here --- is made about how one informer's sequence of +// collection states relates to a different informer's sequence of +// collection states. +// +// A SharedInformer maintains a local cache, exposed by GetStore() and +// by GetIndexer() in the case of an indexed informer, of the state of +// each relevant object. This cache is eventually consistent with the +// authoritative state. This means that, unless prevented by +// persistent communication problems, if ever a particular object ID X +// is authoritatively associated with a state S then for every +// SharedInformer I whose collection includes (X, S) eventually either +// (1) I's cache associates X with S or a later state of X, (2) I is +// stopped, or (3) the authoritative state service for X terminates. +// To be formally complete, we say that the absent state meets any +// restriction by label selector or field selector. +// +// The local cache starts out empty, and gets populated and updated +// during `Run()`. // // As a simple example, if a collection of objects is henceforeth -// unchanging and a SharedInformer is created that links to that -// collection then that SharedInformer's cache eventually holds an -// exact copy of that collection (unless it is stopped too soon, the -// authoritative state service ends, or communication problems between -// the two persistently thwart achievement). +// unchanging, a SharedInformer is created that links to that +// collection, and that SharedInformer is `Run()` then that +// SharedInformer's cache eventually holds an exact copy of that +// collection (unless it is stopped too soon, the authoritative state +// service ends, or communication problems between the two +// persistently thwart achievement). // // As another simple example, if the local cache ever holds a // non-absent state for some object ID and the object is eventually // removed from the authoritative state then eventually the object is // removed from the local cache (unless the SharedInformer is stopped -// too soon, the authoritative state service emnds, or communication +// too soon, the authoritative state service ends, or communication // problems persistently thwart the desired result). // -// The keys in Store() are of the form namespace/name for namespaced +// The keys in the Store are of the form namespace/name for namespaced // objects, and are simply the name for non-namespaced objects. +// Clients can use `MetaNamespaceKeyFunc(obj)` to extract the key for +// a given object, and `SplitMetaNamespaceKey(key)` to split a key +// into its constituent parts. // // A client is identified here by a ResourceEventHandler. For every -// update to the SharedInformer's local cache and for every client, -// eventually either the SharedInformer is stopped or the client is -// notified of the update. These notifications happen after the -// corresponding cache update and, in the case of a -// SharedIndexInformer, after the corresponding index updates. It is -// possible that additional cache and index updates happen before such -// a prescribed notification. For a given SharedInformer and client, -// all notifications are delivered sequentially. For a given -// SharedInformer, client, and object ID, the notifications are -// delivered in order. +// update to the SharedInformer's local cache and for every client +// added before `Run()`, eventually either the SharedInformer is +// stopped or the client is notified of the update. A client added +// after `Run()` starts gets a startup batch of notifications of +// additions of the object existing in the cache at the time that +// client was added; also, for every update to the SharedInformer's +// local cache after that client was added, eventually either the +// SharedInformer is stopped or that client is notified of that +// update. Client notifications happen after the corresponding cache +// update and, in the case of a SharedIndexInformer, after the +// corresponding index updates. It is possible that additional cache +// and index updates happen before such a prescribed notification. +// For a given SharedInformer and client, the notifications are +// delivered sequentially. For a given SharedInformer, client, and +// object ID, the notifications are delivered in order. +// +// A client must process each notification promptly; a SharedInformer +// is not engineered to deal well with a large backlog of +// notifications to deliver. Lengthy processing should be passed off +// to something else, for example through a +// `client-go/util/workqueue`. +// +// Each query to an informer's local cache --- whether a single-object +// lookup, a list operation, or a use of one of its indices --- is +// answered entirely from one of the collection states received by +// that informer. // // A delete notification exposes the last locally known non-absent // state, except that its ResourceVersion is replaced with a @@ -116,6 +150,7 @@ type SharedInformer interface { LastSyncResourceVersion() string } +// SharedIndexInformer provides add and get Indexers ability based on SharedInformer. type SharedIndexInformer interface { SharedInformer // AddIndexers add indexers to the informer before it starts. @@ -155,10 +190,26 @@ const ( initialBufferSize = 1024 ) +// WaitForNamedCacheSync is a wrapper around WaitForCacheSync that generates log messages +// indicating that the caller identified by name is waiting for syncs, followed by +// either a successful or failed sync. +func WaitForNamedCacheSync(controllerName string, stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool { + klog.Infof("Waiting for caches to sync for %s", controllerName) + + if !WaitForCacheSync(stopCh, cacheSyncs...) { + utilruntime.HandleError(fmt.Errorf("unable to sync caches for %s", controllerName)) + return false + } + + klog.Infof("Caches are synced for %s ", controllerName) + return true +} + // WaitForCacheSync waits for caches to populate. It returns true if it was successful, false // if the controller should shutdown +// callers should prefer WaitForNamedCacheSync() func WaitForCacheSync(stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool { - err := wait.PollUntil(syncedPollPeriod, + err := wait.PollImmediateUntil(syncedPollPeriod, func() (bool, error) { for _, syncFunc := range cacheSyncs { if !syncFunc() { @@ -182,7 +233,7 @@ type sharedIndexInformer struct { controller Controller processor *sharedProcessor - cacheMutationDetector CacheMutationDetector + cacheMutationDetector MutationDetector // This block is tracked to handle late initialization of the controller listerWatcher ListerWatcher @@ -222,7 +273,7 @@ func (v *dummyController) HasSynced() bool { return v.informer.HasSynced() } -func (c *dummyController) LastSyncResourceVersion() string { +func (v *dummyController) LastSyncResourceVersion() string { return "" } diff --git a/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go b/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go index b74faa019c50a..33e6239a69a5a 100644 --- a/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go +++ b/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go @@ -302,6 +302,7 @@ func (c *threadSafeMap) Resync() error { return nil } +// NewThreadSafeStore creates a new instance of ThreadSafeStore. func NewThreadSafeStore(indexers Indexers, indices Indices) ThreadSafeStore { return &threadSafeMap{ items: map[string]interface{}{}, diff --git a/vendor/k8s.io/client-go/tools/cache/undelta_store.go b/vendor/k8s.io/client-go/tools/cache/undelta_store.go index 117df46c48d26..220845dd3a024 100644 --- a/vendor/k8s.io/client-go/tools/cache/undelta_store.go +++ b/vendor/k8s.io/client-go/tools/cache/undelta_store.go @@ -31,6 +31,7 @@ type UndeltaStore struct { // Assert that it implements the Store interface. var _ Store = &UndeltaStore{} +// Add inserts an object into the store and sends complete state by calling PushFunc. // Note about thread safety. The Store implementation (cache.cache) uses a lock for all methods. // In the functions below, the lock gets released and reacquired betweend the {Add,Delete,etc} // and the List. So, the following can happen, resulting in two identical calls to PushFunc. @@ -41,7 +42,6 @@ var _ Store = &UndeltaStore{} // 3 Store.Add(b) // 4 Store.List() -> [a,b] // 5 Store.List() -> [a,b] - func (u *UndeltaStore) Add(obj interface{}) error { if err := u.Store.Add(obj); err != nil { return err @@ -50,6 +50,7 @@ func (u *UndeltaStore) Add(obj interface{}) error { return nil } +// Update sets an item in the cache to its updated state and sends complete state by calling PushFunc. func (u *UndeltaStore) Update(obj interface{}) error { if err := u.Store.Update(obj); err != nil { return err @@ -58,6 +59,7 @@ func (u *UndeltaStore) Update(obj interface{}) error { return nil } +// Delete removes an item from the cache and sends complete state by calling PushFunc. func (u *UndeltaStore) Delete(obj interface{}) error { if err := u.Store.Delete(obj); err != nil { return err @@ -66,6 +68,10 @@ func (u *UndeltaStore) Delete(obj interface{}) error { return nil } +// Replace will delete the contents of current store, using instead the given list. +// 'u' takes ownership of the list, you should not reference the list again +// after calling this function. +// The new contents complete state will be sent by calling PushFunc after replacement. func (u *UndeltaStore) Replace(list []interface{}, resourceVersion string) error { if err := u.Store.Replace(list, resourceVersion); err != nil { return err diff --git a/vendor/k8s.io/client-go/tools/clientcmd/loader.go b/vendor/k8s.io/client-go/tools/clientcmd/loader.go index e00ea38272d11..4e37e79283949 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/loader.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/loader.go @@ -127,6 +127,10 @@ type ClientConfigLoadingRules struct { // DefaultClientConfig is an optional field indicating what rules to use to calculate a default configuration. // This should match the overrides passed in to ClientConfig loader. DefaultClientConfig ClientConfig + + // WarnIfAllMissing indicates whether the configuration files pointed by KUBECONFIG environment variable are present or not. + // In case of missing files, it warns the user about the missing files. + WarnIfAllMissing bool } // ClientConfigLoadingRules implements the ClientConfigLoader interface. @@ -136,20 +140,23 @@ var _ ClientConfigLoader = &ClientConfigLoadingRules{} // use this constructor func NewDefaultClientConfigLoadingRules() *ClientConfigLoadingRules { chain := []string{} + warnIfAllMissing := false envVarFiles := os.Getenv(RecommendedConfigPathEnvVar) if len(envVarFiles) != 0 { fileList := filepath.SplitList(envVarFiles) // prevent the same path load multiple times chain = append(chain, deduplicate(fileList)...) + warnIfAllMissing = true } else { chain = append(chain, RecommendedHomeFile) } return &ClientConfigLoadingRules{ - Precedence: chain, - MigrationRules: currentMigrationRules(), + Precedence: chain, + MigrationRules: currentMigrationRules(), + WarnIfAllMissing: warnIfAllMissing, } } @@ -172,6 +179,7 @@ func (rules *ClientConfigLoadingRules) Load() (*clientcmdapi.Config, error) { } errlist := []error{} + missingList := []string{} kubeConfigFiles := []string{} @@ -195,18 +203,26 @@ func (rules *ClientConfigLoadingRules) Load() (*clientcmdapi.Config, error) { } config, err := LoadFromFile(filename) + if os.IsNotExist(err) { // skip missing files + // Add to the missing list to produce a warning + missingList = append(missingList, filename) continue } + if err != nil { - errlist = append(errlist, fmt.Errorf("Error loading config file \"%s\": %v", filename, err)) + errlist = append(errlist, fmt.Errorf("error loading config file \"%s\": %v", filename, err)) continue } kubeconfigs = append(kubeconfigs, config) } + if rules.WarnIfAllMissing && len(missingList) > 0 && len(kubeconfigs) == 0 { + klog.Warningf("Config not found: %s", strings.Join(missingList, ", ")) + } + // first merge all of our maps mapConfig := clientcmdapi.NewConfig() @@ -467,7 +483,7 @@ func ResolveLocalPaths(config *clientcmdapi.Config) error { } base, err := filepath.Abs(filepath.Dir(cluster.LocationOfOrigin)) if err != nil { - return fmt.Errorf("Could not determine the absolute path of config file %s: %v", cluster.LocationOfOrigin, err) + return fmt.Errorf("could not determine the absolute path of config file %s: %v", cluster.LocationOfOrigin, err) } if err := ResolvePaths(GetClusterFileReferences(cluster), base); err != nil { @@ -480,7 +496,7 @@ func ResolveLocalPaths(config *clientcmdapi.Config) error { } base, err := filepath.Abs(filepath.Dir(authInfo.LocationOfOrigin)) if err != nil { - return fmt.Errorf("Could not determine the absolute path of config file %s: %v", authInfo.LocationOfOrigin, err) + return fmt.Errorf("could not determine the absolute path of config file %s: %v", authInfo.LocationOfOrigin, err) } if err := ResolvePaths(GetAuthInfoFileReferences(authInfo), base); err != nil { diff --git a/vendor/k8s.io/client-go/tools/clientcmd/validation.go b/vendor/k8s.io/client-go/tools/clientcmd/validation.go index 629c0b30a074d..2f927072bdee8 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/validation.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/validation.go @@ -185,9 +185,10 @@ func validateClusterInfo(clusterName string, clusterInfo clientcmdapi.Cluster) [ } if len(clusterInfo.CertificateAuthority) != 0 { clientCertCA, err := os.Open(clusterInfo.CertificateAuthority) - defer clientCertCA.Close() if err != nil { validationErrors = append(validationErrors, fmt.Errorf("unable to read certificate-authority %v for %v due to %v", clusterInfo.CertificateAuthority, clusterName, err)) + } else { + defer clientCertCA.Close() } } @@ -223,16 +224,18 @@ func validateAuthInfo(authInfoName string, authInfo clientcmdapi.AuthInfo) []err if len(authInfo.ClientCertificate) != 0 { clientCertFile, err := os.Open(authInfo.ClientCertificate) - defer clientCertFile.Close() if err != nil { validationErrors = append(validationErrors, fmt.Errorf("unable to read client-cert %v for %v due to %v", authInfo.ClientCertificate, authInfoName, err)) + } else { + defer clientCertFile.Close() } } if len(authInfo.ClientKey) != 0 { clientKeyFile, err := os.Open(authInfo.ClientKey) - defer clientKeyFile.Close() if err != nil { validationErrors = append(validationErrors, fmt.Errorf("unable to read client-key %v for %v due to %v", authInfo.ClientKey, authInfoName, err)) + } else { + defer clientKeyFile.Close() } } } @@ -250,8 +253,6 @@ func validateAuthInfo(authInfoName string, authInfo clientcmdapi.AuthInfo) []err for _, v := range authInfo.Exec.Env { if len(v.Name) == 0 { validationErrors = append(validationErrors, fmt.Errorf("env variable name must be specified for %v to use exec authentication plugin", authInfoName)) - } else if len(v.Value) == 0 { - validationErrors = append(validationErrors, fmt.Errorf("env variable %s value must be specified for %v to use exec authentication plugin", v.Name, authInfoName)) } } } diff --git a/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go b/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go index 53523ddddc3c2..4be650c0c8253 100644 --- a/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go +++ b/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go @@ -89,6 +89,12 @@ func NewLeaderElector(lec LeaderElectionConfig) (*LeaderElector, error) { if lec.RetryPeriod < 1 { return nil, fmt.Errorf("retryPeriod must be greater than zero") } + if lec.Callbacks.OnStartedLeading == nil { + return nil, fmt.Errorf("OnStartedLeading callback must not be nil") + } + if lec.Callbacks.OnStoppedLeading == nil { + return nil, fmt.Errorf("OnStoppedLeading callback must not be nil") + } if lec.Lock == nil { return nil, fmt.Errorf("Lock must not be nil.") diff --git a/vendor/k8s.io/client-go/tools/record/event.go b/vendor/k8s.io/client-go/tools/record/event.go index 65e48c0238a32..51ba5be3cb700 100644 --- a/vendor/k8s.io/client-go/tools/record/event.go +++ b/vendor/k8s.io/client-go/tools/record/event.go @@ -129,6 +129,25 @@ type EventBroadcaster interface { NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorder } +// EventRecorderAdapter is a wrapper around EventRecorder implementing the +// new EventRecorder interface. +type EventRecorderAdapter struct { + recorder EventRecorder +} + +// NewEventRecorderAdapter returns an adapter implementing new EventRecorder +// interface. +func NewEventRecorderAdapter(recorder EventRecorder) *EventRecorderAdapter { + return &EventRecorderAdapter{ + recorder: recorder, + } +} + +// Eventf is a wrapper around v1 Eventf +func (a *EventRecorderAdapter) Eventf(regarding, _ runtime.Object, eventtype, reason, action, note string, args ...interface{}) { + a.recorder.Eventf(regarding, eventtype, reason, note, args...) +} + // Creates a new event broadcaster. func NewBroadcaster() EventBroadcaster { return &eventBroadcasterImpl{ @@ -162,17 +181,14 @@ type eventBroadcasterImpl struct { // The return value can be ignored or used to stop recording, if desired. // TODO: make me an object with parameterizable queue length and retry interval func (eventBroadcaster *eventBroadcasterImpl) StartRecordingToSink(sink EventSink) watch.Interface { - // The default math/rand package functions aren't thread safe, so create a - // new Rand object for each StartRecording call. - randGen := rand.New(rand.NewSource(time.Now().UnixNano())) eventCorrelator := NewEventCorrelatorWithOptions(eventBroadcaster.options) return eventBroadcaster.StartEventWatcher( func(event *v1.Event) { - recordToSink(sink, event, eventCorrelator, randGen, eventBroadcaster.sleepDuration) + recordToSink(sink, event, eventCorrelator, eventBroadcaster.sleepDuration) }) } -func recordToSink(sink EventSink, event *v1.Event, eventCorrelator *EventCorrelator, randGen *rand.Rand, sleepDuration time.Duration) { +func recordToSink(sink EventSink, event *v1.Event, eventCorrelator *EventCorrelator, sleepDuration time.Duration) { // Make a copy before modification, because there could be multiple listeners. // Events are safe to copy like this. eventCopy := *event @@ -197,7 +213,7 @@ func recordToSink(sink EventSink, event *v1.Event, eventCorrelator *EventCorrela // Randomize the first sleep so that various clients won't all be // synced up if the master goes down. if tries == 1 { - time.Sleep(time.Duration(float64(sleepDuration) * randGen.Float64())) + time.Sleep(time.Duration(float64(sleepDuration) * rand.Float64())) } else { time.Sleep(sleepDuration) } diff --git a/vendor/k8s.io/client-go/transport/cache.go b/vendor/k8s.io/client-go/transport/cache.go index 7cffe2a5faf3f..980d36ae1289b 100644 --- a/vendor/k8s.io/client-go/transport/cache.go +++ b/vendor/k8s.io/client-go/transport/cache.go @@ -20,6 +20,7 @@ import ( "fmt" "net" "net/http" + "strings" "sync" "time" @@ -39,13 +40,15 @@ const idleConnsPerHost = 25 var tlsCache = &tlsTransportCache{transports: make(map[tlsCacheKey]*http.Transport)} type tlsCacheKey struct { - insecure bool - caData string - certData string - keyData string - getCert string - serverName string - dial string + insecure bool + caData string + certData string + keyData string + getCert string + serverName string + nextProtos string + dial string + disableCompression bool } func (t tlsCacheKey) String() string { @@ -53,7 +56,7 @@ func (t tlsCacheKey) String() string { if len(t.keyData) > 0 { keyText = "" } - return fmt.Sprintf("insecure:%v, caData:%#v, certData:%#v, keyData:%s, getCert: %s, serverName:%s, dial:%s", t.insecure, t.caData, t.certData, keyText, t.getCert, t.serverName, t.dial) + return fmt.Sprintf("insecure:%v, caData:%#v, certData:%#v, keyData:%s, getCert: %s, serverName:%s, dial:%s disableCompression:%t", t.insecure, t.caData, t.certData, keyText, t.getCert, t.serverName, t.dial, t.disableCompression) } func (c *tlsTransportCache) get(config *Config) (http.RoundTripper, error) { @@ -95,6 +98,7 @@ func (c *tlsTransportCache) get(config *Config) (http.RoundTripper, error) { TLSClientConfig: tlsConfig, MaxIdleConnsPerHost: idleConnsPerHost, DialContext: dial, + DisableCompression: config.DisableCompression, }) return c.transports[key], nil } @@ -106,12 +110,14 @@ func tlsConfigKey(c *Config) (tlsCacheKey, error) { return tlsCacheKey{}, err } return tlsCacheKey{ - insecure: c.TLS.Insecure, - caData: string(c.TLS.CAData), - certData: string(c.TLS.CertData), - keyData: string(c.TLS.KeyData), - getCert: fmt.Sprintf("%p", c.TLS.GetCert), - serverName: c.TLS.ServerName, - dial: fmt.Sprintf("%p", c.Dial), + insecure: c.TLS.Insecure, + caData: string(c.TLS.CAData), + certData: string(c.TLS.CertData), + keyData: string(c.TLS.KeyData), + getCert: fmt.Sprintf("%p", c.TLS.GetCert), + serverName: c.TLS.ServerName, + nextProtos: strings.Join(c.TLS.NextProtos, ","), + dial: fmt.Sprintf("%p", c.Dial), + disableCompression: c.DisableCompression, }, nil } diff --git a/vendor/k8s.io/client-go/transport/config.go b/vendor/k8s.io/client-go/transport/config.go index 5de0a2cb1012d..9e18d11d38ae7 100644 --- a/vendor/k8s.io/client-go/transport/config.go +++ b/vendor/k8s.io/client-go/transport/config.go @@ -47,6 +47,10 @@ type Config struct { // Impersonate is the config that this Config will impersonate using Impersonate ImpersonationConfig + // DisableCompression bypasses automatic GZip compression requests to the + // server. + DisableCompression bool + // Transport may be used for custom HTTP behavior. This attribute may // not be specified with the TLS client certificate options. Use // WrapTransport for most client level operations. @@ -122,5 +126,11 @@ type TLSConfig struct { CertData []byte // Bytes of the PEM-encoded client certificate. Supercedes CertFile. KeyData []byte // Bytes of the PEM-encoded client key. Supercedes KeyFile. + // NextProtos is a list of supported application level protocols, in order of preference. + // Used to populate tls.Config.NextProtos. + // To indicate to the server http/1.1 is preferred over http/2, set to ["http/1.1", "h2"] (though the server is free to ignore that preference). + // To use only http/1.1, set to ["http/1.1"]. + NextProtos []string + GetCert func() (*tls.Certificate, error) // Callback that returns a TLS client certificate. CertData, CertFile, KeyData and KeyFile supercede this field. } diff --git a/vendor/k8s.io/client-go/transport/round_trippers.go b/vendor/k8s.io/client-go/transport/round_trippers.go index 117a9c8c4de41..a272753ae2dbf 100644 --- a/vendor/k8s.io/client-go/transport/round_trippers.go +++ b/vendor/k8s.io/client-go/transport/round_trippers.go @@ -80,10 +80,6 @@ func DebugWrappers(rt http.RoundTripper) http.RoundTripper { return rt } -type requestCanceler interface { - CancelRequest(*http.Request) -} - type authProxyRoundTripper struct { username string groups []string @@ -140,11 +136,7 @@ func SetAuthProxyHeaders(req *http.Request, username string, groups []string, ex } func (rt *authProxyRoundTripper) CancelRequest(req *http.Request) { - if canceler, ok := rt.rt.(requestCanceler); ok { - canceler.CancelRequest(req) - } else { - klog.Errorf("CancelRequest not implemented by %T", rt.rt) - } + tryCancelRequest(rt.WrappedRoundTripper(), req) } func (rt *authProxyRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt } @@ -168,11 +160,7 @@ func (rt *userAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, e } func (rt *userAgentRoundTripper) CancelRequest(req *http.Request) { - if canceler, ok := rt.rt.(requestCanceler); ok { - canceler.CancelRequest(req) - } else { - klog.Errorf("CancelRequest not implemented by %T", rt.rt) - } + tryCancelRequest(rt.WrappedRoundTripper(), req) } func (rt *userAgentRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt } @@ -199,11 +187,7 @@ func (rt *basicAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, e } func (rt *basicAuthRoundTripper) CancelRequest(req *http.Request) { - if canceler, ok := rt.rt.(requestCanceler); ok { - canceler.CancelRequest(req) - } else { - klog.Errorf("CancelRequest not implemented by %T", rt.rt) - } + tryCancelRequest(rt.WrappedRoundTripper(), req) } func (rt *basicAuthRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt } @@ -259,11 +243,7 @@ func (rt *impersonatingRoundTripper) RoundTrip(req *http.Request) (*http.Respons } func (rt *impersonatingRoundTripper) CancelRequest(req *http.Request) { - if canceler, ok := rt.delegate.(requestCanceler); ok { - canceler.CancelRequest(req) - } else { - klog.Errorf("CancelRequest not implemented by %T", rt.delegate) - } + tryCancelRequest(rt.WrappedRoundTripper(), req) } func (rt *impersonatingRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.delegate } @@ -318,11 +298,7 @@ func (rt *bearerAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, } func (rt *bearerAuthRoundTripper) CancelRequest(req *http.Request) { - if canceler, ok := rt.rt.(requestCanceler); ok { - canceler.CancelRequest(req) - } else { - klog.Errorf("CancelRequest not implemented by %T", rt.rt) - } + tryCancelRequest(rt.WrappedRoundTripper(), req) } func (rt *bearerAuthRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt } @@ -402,11 +378,39 @@ func newDebuggingRoundTripper(rt http.RoundTripper, levels ...debugLevel) *debug } func (rt *debuggingRoundTripper) CancelRequest(req *http.Request) { - if canceler, ok := rt.delegatedRoundTripper.(requestCanceler); ok { - canceler.CancelRequest(req) + tryCancelRequest(rt.WrappedRoundTripper(), req) +} + +var knownAuthTypes = map[string]bool{ + "bearer": true, + "basic": true, + "negotiate": true, +} + +// maskValue masks credential content from authorization headers +// See https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Authorization +func maskValue(key string, value string) string { + if !strings.EqualFold(key, "Authorization") { + return value + } + if len(value) == 0 { + return "" + } + var authType string + if i := strings.Index(value, " "); i > 0 { + authType = value[0:i] + } else { + authType = value + } + if !knownAuthTypes[strings.ToLower(authType)] { + return "" + } + if len(value) > len(authType)+1 { + value = authType + " " } else { - klog.Errorf("CancelRequest not implemented by %T", rt.delegatedRoundTripper) + value = authType } + return value } func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { @@ -423,6 +427,7 @@ func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, e klog.Infof("Request Headers:") for key, values := range reqInfo.RequestHeaders { for _, value := range values { + value = maskValue(key, value) klog.Infof(" %s: %s", key, value) } } diff --git a/vendor/k8s.io/client-go/transport/token_source.go b/vendor/k8s.io/client-go/transport/token_source.go index b8cadd382af8a..bb32c3b4df533 100644 --- a/vendor/k8s.io/client-go/transport/token_source.go +++ b/vendor/k8s.io/client-go/transport/token_source.go @@ -25,6 +25,7 @@ import ( "time" "golang.org/x/oauth2" + "k8s.io/klog" ) @@ -81,6 +82,14 @@ func (tst *tokenSourceTransport) RoundTrip(req *http.Request) (*http.Response, e return tst.ort.RoundTrip(req) } +func (tst *tokenSourceTransport) CancelRequest(req *http.Request) { + if req.Header.Get("Authorization") != "" { + tryCancelRequest(tst.base, req) + return + } + tryCancelRequest(tst.ort, req) +} + type fileTokenSource struct { path string period time.Duration diff --git a/vendor/k8s.io/client-go/transport/transport.go b/vendor/k8s.io/client-go/transport/transport.go index 2a145c971a31c..cd8de98285995 100644 --- a/vendor/k8s.io/client-go/transport/transport.go +++ b/vendor/k8s.io/client-go/transport/transport.go @@ -23,6 +23,9 @@ import ( "fmt" "io/ioutil" "net/http" + + utilnet "k8s.io/apimachinery/pkg/util/net" + "k8s.io/klog" ) // New returns an http.RoundTripper that will provide the authentication @@ -53,7 +56,7 @@ func New(config *Config) (http.RoundTripper, error) { // TLSConfigFor returns a tls.Config that will provide the transport level security defined // by the provided Config. Will return nil if no transport level security is requested. func TLSConfigFor(c *Config) (*tls.Config, error) { - if !(c.HasCA() || c.HasCertAuth() || c.HasCertCallback() || c.TLS.Insecure || len(c.TLS.ServerName) > 0) { + if !(c.HasCA() || c.HasCertAuth() || c.HasCertCallback() || c.TLS.Insecure || len(c.TLS.ServerName) > 0 || len(c.TLS.NextProtos) > 0) { return nil, nil } if c.HasCA() && c.TLS.Insecure { @@ -70,6 +73,7 @@ func TLSConfigFor(c *Config) (*tls.Config, error) { MinVersion: tls.VersionTLS12, InsecureSkipVerify: c.TLS.Insecure, ServerName: c.TLS.ServerName, + NextProtos: c.TLS.NextProtos, } if c.HasCA() { @@ -225,3 +229,17 @@ func (b *contextCanceller) RoundTrip(req *http.Request) (*http.Response, error) return b.rt.RoundTrip(req) } } + +func tryCancelRequest(rt http.RoundTripper, req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + switch rt := rt.(type) { + case canceler: + rt.CancelRequest(req) + case utilnet.RoundTripperWrapper: + tryCancelRequest(rt.WrappedRoundTripper(), req) + default: + klog.Warningf("Unable to cancel request for %T", rt) + } +} diff --git a/vendor/k8s.io/client-go/util/flowcontrol/backoff.go b/vendor/k8s.io/client-go/util/flowcontrol/backoff.go index 39cd72f95361d..c48ba03e8cd1d 100644 --- a/vendor/k8s.io/client-go/util/flowcontrol/backoff.go +++ b/vendor/k8s.io/client-go/util/flowcontrol/backoff.go @@ -30,7 +30,7 @@ type backoffEntry struct { } type Backoff struct { - sync.Mutex + sync.RWMutex Clock clock.Clock defaultDuration time.Duration maxDuration time.Duration @@ -57,8 +57,8 @@ func NewBackOff(initial, max time.Duration) *Backoff { // Get the current backoff Duration func (p *Backoff) Get(id string) time.Duration { - p.Lock() - defer p.Unlock() + p.RLock() + defer p.RUnlock() var delay time.Duration entry, ok := p.perItemBackoff[id] if ok { @@ -90,8 +90,8 @@ func (p *Backoff) Reset(id string) { // Returns True if the elapsed time since eventTime is smaller than the current backoff window func (p *Backoff) IsInBackOffSince(id string, eventTime time.Time) bool { - p.Lock() - defer p.Unlock() + p.RLock() + defer p.RUnlock() entry, ok := p.perItemBackoff[id] if !ok { return false @@ -104,8 +104,8 @@ func (p *Backoff) IsInBackOffSince(id string, eventTime time.Time) bool { // Returns True if time since lastupdate is less than the current backoff window. func (p *Backoff) IsInBackOffSinceUpdate(id string, eventTime time.Time) bool { - p.Lock() - defer p.Unlock() + p.RLock() + defer p.RUnlock() entry, ok := p.perItemBackoff[id] if !ok { return false diff --git a/vendor/k8s.io/client-go/util/flowcontrol/throttle.go b/vendor/k8s.io/client-go/util/flowcontrol/throttle.go index e671c044d004d..ffd912c56022e 100644 --- a/vendor/k8s.io/client-go/util/flowcontrol/throttle.go +++ b/vendor/k8s.io/client-go/util/flowcontrol/throttle.go @@ -17,6 +17,8 @@ limitations under the License. package flowcontrol import ( + "context" + "errors" "sync" "time" @@ -33,6 +35,8 @@ type RateLimiter interface { Stop() // QPS returns QPS of this rate limiter QPS() float32 + // Wait returns nil if a token is taken before the Context is done. + Wait(ctx context.Context) error } type tokenBucketRateLimiter struct { @@ -98,6 +102,10 @@ func (t *tokenBucketRateLimiter) QPS() float32 { return t.qps } +func (t *tokenBucketRateLimiter) Wait(ctx context.Context) error { + return t.limiter.Wait(ctx) +} + type fakeAlwaysRateLimiter struct{} func NewFakeAlwaysRateLimiter() RateLimiter { @@ -116,6 +124,10 @@ func (t *fakeAlwaysRateLimiter) QPS() float32 { return 1 } +func (t *fakeAlwaysRateLimiter) Wait(ctx context.Context) error { + return nil +} + type fakeNeverRateLimiter struct { wg sync.WaitGroup } @@ -141,3 +153,7 @@ func (t *fakeNeverRateLimiter) Accept() { func (t *fakeNeverRateLimiter) QPS() float32 { return 1 } + +func (t *fakeNeverRateLimiter) Wait(ctx context.Context) error { + return errors.New("can not be accept") +} diff --git a/vendor/k8s.io/client-go/util/homedir/homedir.go b/vendor/k8s.io/client-go/util/homedir/homedir.go index 816db57f599b8..3fdbeb8cf1fef 100644 --- a/vendor/k8s.io/client-go/util/homedir/homedir.go +++ b/vendor/k8s.io/client-go/util/homedir/homedir.go @@ -18,30 +18,75 @@ package homedir import ( "os" + "path/filepath" "runtime" ) -// HomeDir returns the home directory for the current user +// HomeDir returns the home directory for the current user. +// On Windows: +// 1. the first of %HOME%, %HOMEDRIVE%%HOMEPATH%, %USERPROFILE% containing a `.kube\config` file is returned. +// 2. if none of those locations contain a `.kube\config` file, the first of %HOME%, %USERPROFILE%, %HOMEDRIVE%%HOMEPATH% that exists and is writeable is returned. +// 3. if none of those locations are writeable, the first of %HOME%, %USERPROFILE%, %HOMEDRIVE%%HOMEPATH% that exists is returned. +// 4. if none of those locations exists, the first of %HOME%, %USERPROFILE%, %HOMEDRIVE%%HOMEPATH% that is set is returned. func HomeDir() string { if runtime.GOOS == "windows" { + home := os.Getenv("HOME") + homeDriveHomePath := "" + if homeDrive, homePath := os.Getenv("HOMEDRIVE"), os.Getenv("HOMEPATH"); len(homeDrive) > 0 && len(homePath) > 0 { + homeDriveHomePath = homeDrive + homePath + } + userProfile := os.Getenv("USERPROFILE") - // First prefer the HOME environmental variable - if home := os.Getenv("HOME"); len(home) > 0 { - if _, err := os.Stat(home); err == nil { - return home + // Return first of %HOME%, %HOMEDRIVE%/%HOMEPATH%, %USERPROFILE% that contains a `.kube\config` file. + // %HOMEDRIVE%/%HOMEPATH% is preferred over %USERPROFILE% for backwards-compatibility. + for _, p := range []string{home, homeDriveHomePath, userProfile} { + if len(p) == 0 { + continue } - } - if homeDrive, homePath := os.Getenv("HOMEDRIVE"), os.Getenv("HOMEPATH"); len(homeDrive) > 0 && len(homePath) > 0 { - homeDir := homeDrive + homePath - if _, err := os.Stat(homeDir); err == nil { - return homeDir + if _, err := os.Stat(filepath.Join(p, ".kube", "config")); err != nil { + continue } + return p } - if userProfile := os.Getenv("USERPROFILE"); len(userProfile) > 0 { - if _, err := os.Stat(userProfile); err == nil { - return userProfile + + firstSetPath := "" + firstExistingPath := "" + + // Prefer %USERPROFILE% over %HOMEDRIVE%/%HOMEPATH% for compatibility with other auth-writing tools + for _, p := range []string{home, userProfile, homeDriveHomePath} { + if len(p) == 0 { + continue + } + if len(firstSetPath) == 0 { + // remember the first path that is set + firstSetPath = p + } + info, err := os.Stat(p) + if err != nil { + continue } + if len(firstExistingPath) == 0 { + // remember the first path that exists + firstExistingPath = p + } + if info.IsDir() && info.Mode().Perm()&(1<<(uint(7))) != 0 { + // return first path that is writeable + return p + } + } + + // If none are writeable, return first location that exists + if len(firstExistingPath) > 0 { + return firstExistingPath } + + // If none exist, return first location that is set + if len(firstSetPath) > 0 { + return firstSetPath + } + + // We've got nothing + return "" } return os.Getenv("HOME") } diff --git a/vendor/k8s.io/client-go/util/jsonpath/parser.go b/vendor/k8s.io/client-go/util/jsonpath/parser.go index 1af8f269f7ee0..e1aab6804f81f 100644 --- a/vendor/k8s.io/client-go/util/jsonpath/parser.go +++ b/vendor/k8s.io/client-go/util/jsonpath/parser.go @@ -37,7 +37,6 @@ type Parser struct { Name string Root *ListNode input string - cur *ListNode pos int start int width int @@ -186,8 +185,7 @@ func (p *Parser) parseInsideAction(cur *ListNode) error { func (p *Parser) parseRightDelim(cur *ListNode) error { p.pos += len(rightDelim) p.consumeText() - cur = p.Root - return p.parseText(cur) + return p.parseText(p.Root) } // parseIdentifier scans build-in keywords, like "range" "end" @@ -231,7 +229,7 @@ func (p *Parser) parseRecursive(cur *ListNode) error { func (p *Parser) parseNumber(cur *ListNode) error { r := p.peek() if r == '+' || r == '-' { - r = p.next() + p.next() } for { r = p.next() diff --git a/vendor/k8s.io/client-go/util/retry/util.go b/vendor/k8s.io/client-go/util/retry/util.go index 3ac0840ad0c22..c80ff0877fa3d 100644 --- a/vendor/k8s.io/client-go/util/retry/util.go +++ b/vendor/k8s.io/client-go/util/retry/util.go @@ -42,12 +42,12 @@ var DefaultBackoff = wait.Backoff{ Jitter: 0.1, } -// RetryConflict executes the provided function repeatedly, retrying if the server returns a conflicting -// write. Callers should preserve previous executions if they wish to retry changes. It performs an +// OnError executes the provided function repeatedly, retrying if the server returns a specified +// error. Callers should preserve previous executions if they wish to retry changes. It performs an // exponential backoff. // // var pod *api.Pod -// err := RetryOnConflict(DefaultBackoff, func() (err error) { +// err := retry.OnError(DefaultBackoff, errors.IsConflict, func() (err error) { // pod, err = c.Pods("mynamespace").UpdateStatus(podStatus) // return // }) @@ -58,14 +58,14 @@ var DefaultBackoff = wait.Backoff{ // ... // // TODO: Make Backoff an interface? -func RetryOnConflict(backoff wait.Backoff, fn func() error) error { +func OnError(backoff wait.Backoff, errorFunc func(error) bool, fn func() error) error { var lastConflictErr error err := wait.ExponentialBackoff(backoff, func() (bool, error) { err := fn() switch { case err == nil: return true, nil - case errors.IsConflict(err): + case errorFunc(err): lastConflictErr = err return false, nil default: @@ -77,3 +77,8 @@ func RetryOnConflict(backoff wait.Backoff, fn func() error) error { } return err } + +// RetryOnConflict executes the function function repeatedly, retrying if the server returns a conflicting +func RetryOnConflict(backoff wait.Backoff, fn func() error) error { + return OnError(backoff, errors.IsConflict, fn) +} diff --git a/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go b/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go index 6c9e944715829..0732798862517 100644 --- a/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go +++ b/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go @@ -44,13 +44,12 @@ func NewNamedDelayingQueue(name string) DelayingInterface { func newDelayingQueue(clock clock.Clock, name string) DelayingInterface { ret := &delayingType{ - Interface: NewNamed(name), - clock: clock, - heartbeat: clock.NewTicker(maxWait), - stopCh: make(chan struct{}), - waitingForAddCh: make(chan *waitFor, 1000), - metrics: newRetryMetrics(name), - deprecatedMetrics: newDeprecatedRetryMetrics(name), + Interface: NewNamed(name), + clock: clock, + heartbeat: clock.NewTicker(maxWait), + stopCh: make(chan struct{}), + waitingForAddCh: make(chan *waitFor, 1000), + metrics: newRetryMetrics(name), } go ret.waitingLoop() @@ -77,8 +76,7 @@ type delayingType struct { waitingForAddCh chan *waitFor // metrics counts the number of retries - metrics retryMetrics - deprecatedMetrics retryMetrics + metrics retryMetrics } // waitFor holds the data to add and the time it should be added @@ -154,7 +152,6 @@ func (q *delayingType) AddAfter(item interface{}, duration time.Duration) { } q.metrics.retry() - q.deprecatedMetrics.retry() // immediately add things with no delay if duration <= 0 { diff --git a/vendor/k8s.io/client-go/util/workqueue/doc.go b/vendor/k8s.io/client-go/util/workqueue/doc.go index 2a00c74ac5f57..a5c976e0f9cab 100644 --- a/vendor/k8s.io/client-go/util/workqueue/doc.go +++ b/vendor/k8s.io/client-go/util/workqueue/doc.go @@ -23,4 +23,4 @@ limitations under the License. // * Multiple consumers and producers. In particular, it is allowed for an // item to be reenqueued while it is being processed. // * Shutdown notifications. -package workqueue +package workqueue // import "k8s.io/client-go/util/workqueue" diff --git a/vendor/k8s.io/client-go/util/workqueue/metrics.go b/vendor/k8s.io/client-go/util/workqueue/metrics.go index be23ddd05f7c7..a3911bf2d6359 100644 --- a/vendor/k8s.io/client-go/util/workqueue/metrics.go +++ b/vendor/k8s.io/client-go/util/workqueue/metrics.go @@ -87,14 +87,6 @@ type defaultQueueMetrics struct { // how long have current threads been working? unfinishedWorkSeconds SettableGaugeMetric longestRunningProcessor SettableGaugeMetric - - // TODO(danielqsj): Remove the following metrics, they are deprecated - deprecatedDepth GaugeMetric - deprecatedAdds CounterMetric - deprecatedLatency SummaryMetric - deprecatedWorkDuration SummaryMetric - deprecatedUnfinishedWorkSeconds SettableGaugeMetric - deprecatedLongestRunningProcessor SettableGaugeMetric } func (m *defaultQueueMetrics) add(item t) { @@ -103,9 +95,7 @@ func (m *defaultQueueMetrics) add(item t) { } m.adds.Inc() - m.deprecatedAdds.Inc() m.depth.Inc() - m.deprecatedDepth.Inc() if _, exists := m.addTimes[item]; !exists { m.addTimes[item] = m.clock.Now() } @@ -117,11 +107,9 @@ func (m *defaultQueueMetrics) get(item t) { } m.depth.Dec() - m.deprecatedDepth.Dec() m.processingStartTimes[item] = m.clock.Now() if startTime, exists := m.addTimes[item]; exists { m.latency.Observe(m.sinceInSeconds(startTime)) - m.deprecatedLatency.Observe(m.sinceInMicroseconds(startTime)) delete(m.addTimes, item) } } @@ -133,7 +121,6 @@ func (m *defaultQueueMetrics) done(item t) { if startTime, exists := m.processingStartTimes[item]; exists { m.workDuration.Observe(m.sinceInSeconds(startTime)) - m.deprecatedWorkDuration.Observe(m.sinceInMicroseconds(startTime)) delete(m.processingStartTimes, item) } } @@ -153,9 +140,7 @@ func (m *defaultQueueMetrics) updateUnfinishedWork() { // Convert to seconds; microseconds is unhelpfully granular for this. total /= 1000000 m.unfinishedWorkSeconds.Set(total) - m.deprecatedUnfinishedWorkSeconds.Set(total) m.longestRunningProcessor.Set(oldest / 1000000) - m.deprecatedLongestRunningProcessor.Set(oldest) // in microseconds. } type noMetrics struct{} @@ -200,13 +185,6 @@ type MetricsProvider interface { NewUnfinishedWorkSecondsMetric(name string) SettableGaugeMetric NewLongestRunningProcessorSecondsMetric(name string) SettableGaugeMetric NewRetriesMetric(name string) CounterMetric - NewDeprecatedDepthMetric(name string) GaugeMetric - NewDeprecatedAddsMetric(name string) CounterMetric - NewDeprecatedLatencyMetric(name string) SummaryMetric - NewDeprecatedWorkDurationMetric(name string) SummaryMetric - NewDeprecatedUnfinishedWorkSecondsMetric(name string) SettableGaugeMetric - NewDeprecatedLongestRunningProcessorMicrosecondsMetric(name string) SettableGaugeMetric - NewDeprecatedRetriesMetric(name string) CounterMetric } type noopMetricsProvider struct{} @@ -239,34 +217,6 @@ func (_ noopMetricsProvider) NewRetriesMetric(name string) CounterMetric { return noopMetric{} } -func (_ noopMetricsProvider) NewDeprecatedDepthMetric(name string) GaugeMetric { - return noopMetric{} -} - -func (_ noopMetricsProvider) NewDeprecatedAddsMetric(name string) CounterMetric { - return noopMetric{} -} - -func (_ noopMetricsProvider) NewDeprecatedLatencyMetric(name string) SummaryMetric { - return noopMetric{} -} - -func (_ noopMetricsProvider) NewDeprecatedWorkDurationMetric(name string) SummaryMetric { - return noopMetric{} -} - -func (_ noopMetricsProvider) NewDeprecatedUnfinishedWorkSecondsMetric(name string) SettableGaugeMetric { - return noopMetric{} -} - -func (_ noopMetricsProvider) NewDeprecatedLongestRunningProcessorMicrosecondsMetric(name string) SettableGaugeMetric { - return noopMetric{} -} - -func (_ noopMetricsProvider) NewDeprecatedRetriesMetric(name string) CounterMetric { - return noopMetric{} -} - var globalMetricsFactory = queueMetricsFactory{ metricsProvider: noopMetricsProvider{}, } @@ -289,21 +239,15 @@ func (f *queueMetricsFactory) newQueueMetrics(name string, clock clock.Clock) qu return noMetrics{} } return &defaultQueueMetrics{ - clock: clock, - depth: mp.NewDepthMetric(name), - adds: mp.NewAddsMetric(name), - latency: mp.NewLatencyMetric(name), - workDuration: mp.NewWorkDurationMetric(name), - unfinishedWorkSeconds: mp.NewUnfinishedWorkSecondsMetric(name), - longestRunningProcessor: mp.NewLongestRunningProcessorSecondsMetric(name), - deprecatedDepth: mp.NewDeprecatedDepthMetric(name), - deprecatedAdds: mp.NewDeprecatedAddsMetric(name), - deprecatedLatency: mp.NewDeprecatedLatencyMetric(name), - deprecatedWorkDuration: mp.NewDeprecatedWorkDurationMetric(name), - deprecatedUnfinishedWorkSeconds: mp.NewDeprecatedUnfinishedWorkSecondsMetric(name), - deprecatedLongestRunningProcessor: mp.NewDeprecatedLongestRunningProcessorMicrosecondsMetric(name), - addTimes: map[t]time.Time{}, - processingStartTimes: map[t]time.Time{}, + clock: clock, + depth: mp.NewDepthMetric(name), + adds: mp.NewAddsMetric(name), + latency: mp.NewLatencyMetric(name), + workDuration: mp.NewWorkDurationMetric(name), + unfinishedWorkSeconds: mp.NewUnfinishedWorkSecondsMetric(name), + longestRunningProcessor: mp.NewLongestRunningProcessorSecondsMetric(name), + addTimes: map[t]time.Time{}, + processingStartTimes: map[t]time.Time{}, } } @@ -317,16 +261,6 @@ func newRetryMetrics(name string) retryMetrics { } } -func newDeprecatedRetryMetrics(name string) retryMetrics { - var ret *defaultRetryMetrics - if len(name) == 0 { - return ret - } - return &defaultRetryMetrics{ - retries: globalMetricsFactory.metricsProvider.NewDeprecatedRetriesMetric(name), - } -} - // SetProvider sets the metrics provider for all subsequently created work // queues. Only the first call has an effect. func SetProvider(metricsProvider MetricsProvider) { diff --git a/vendor/k8s.io/cloud-provider/OWNERS b/vendor/k8s.io/cloud-provider/OWNERS index 8ce19864150e3..866b3cc8cf277 100644 --- a/vendor/k8s.io/cloud-provider/OWNERS +++ b/vendor/k8s.io/cloud-provider/OWNERS @@ -23,9 +23,7 @@ reviewers: - zmerlynn - luxas - justinsb -- roberthbailey - eparis -- jlowdermilk - piosz - jsafrane - dims diff --git a/vendor/k8s.io/cloud-provider/SECURITY_CONTACTS b/vendor/k8s.io/cloud-provider/SECURITY_CONTACTS index c8282d6bd6950..68e73edaff6ce 100644 --- a/vendor/k8s.io/cloud-provider/SECURITY_CONTACTS +++ b/vendor/k8s.io/cloud-provider/SECURITY_CONTACTS @@ -13,3 +13,8 @@ cheftako andrewsykim dims +cjcullen +joelsmith +liggitt +philips +tallclair diff --git a/vendor/k8s.io/cloud-provider/cloud.go b/vendor/k8s.io/cloud-provider/cloud.go index 6db0219520bfd..38703f10302cf 100644 --- a/vendor/k8s.io/cloud-provider/cloud.go +++ b/vendor/k8s.io/cloud-provider/cloud.go @@ -104,6 +104,21 @@ func GetInstanceProviderID(ctx context.Context, cloud Interface, nodeName types. } // LoadBalancer is an abstract, pluggable interface for load balancers. +// +// Cloud provider may chose to implement the logic for +// constructing/destroying specific kinds of load balancers in a +// controller separate from the ServiceController. If this is the case, +// then {Ensure,Update}LoadBalancer must return the ImplementedElsewhere error. +// For the given LB service, the GetLoadBalancer must return "exists=True" if +// there exists a LoadBalancer instance created by ServiceController. +// In all other cases, GetLoadBalancer must return a NotFound error. +// EnsureLoadBalancerDeleted must not return ImplementedElsewhere to ensure +// proper teardown of resources that were allocated by the ServiceController. +// This can happen if a user changes the type of LB via an update to the resource +// or when migrating from ServiceController to alternate implementation. +// The finalizer on the service will be added and removed by ServiceController +// irrespective of the ImplementedElsewhere error. Additional finalizers for +// LB services must be managed in the alternate implementation. type LoadBalancer interface { // TODO: Break this up into different interfaces (LB, etc) when we have more than one type of service // GetLoadBalancer returns whether the specified load balancer exists, and @@ -199,9 +214,10 @@ type Routes interface { } var ( - InstanceNotFound = errors.New("instance not found") - DiskNotFound = errors.New("disk is not found") - NotImplemented = errors.New("unimplemented") + DiskNotFound = errors.New("disk is not found") + ImplementedElsewhere = errors.New("implemented by alternate to cloud provider") + InstanceNotFound = errors.New("instance not found") + NotImplemented = errors.New("unimplemented") ) // Zone represents the location of a particular machine. diff --git a/vendor/k8s.io/cloud-provider/go.mod b/vendor/k8s.io/cloud-provider/go.mod index bc8d84f3da3c7..10beea7469291 100644 --- a/vendor/k8s.io/cloud-provider/go.mod +++ b/vendor/k8s.io/cloud-provider/go.mod @@ -5,18 +5,22 @@ module k8s.io/cloud-provider go 1.12 require ( - k8s.io/api v0.0.0-20190819141258-3544db3b9e44 - k8s.io/apimachinery v0.0.0-20190817020851-f2f3a405f61d - k8s.io/client-go v0.0.0-20190819141724-e14f31a72a77 - k8s.io/klog v0.3.1 - k8s.io/utils v0.0.0-20190221042446-c2654d5206da + k8s.io/api v0.0.0-20191114100352-16d7abae0d2a + k8s.io/apimachinery v0.0.0-20191028221656-72ed19daf4bb + k8s.io/client-go v0.0.0-20191114101535-6c5935290e33 + k8s.io/klog v0.4.0 + k8s.io/utils v0.0.0-20190801114015-581e00157fb1 ) replace ( + golang.org/x/crypto => golang.org/x/crypto v0.0.0-20181025213731-e84da0312774 + golang.org/x/lint => golang.org/x/lint v0.0.0-20181217174547-8f45f776aaf1 + golang.org/x/oauth2 => golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a golang.org/x/sync => golang.org/x/sync v0.0.0-20181108010431-42b317875d0f golang.org/x/sys => golang.org/x/sys v0.0.0-20190209173611-3b5209105503 - golang.org/x/tools => golang.org/x/tools v0.0.0-20190313210603-aa82965741a9 - k8s.io/api => k8s.io/api v0.0.0-20190819141258-3544db3b9e44 - k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20190817020851-f2f3a405f61d - k8s.io/client-go => k8s.io/client-go v0.0.0-20190819141724-e14f31a72a77 + golang.org/x/text => golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db + golang.org/x/time => golang.org/x/time v0.0.0-20161028155119-f51c12702a4d + k8s.io/api => k8s.io/api v0.0.0-20191114100352-16d7abae0d2a + k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20191028221656-72ed19daf4bb + k8s.io/client-go => k8s.io/client-go v0.0.0-20191114101535-6c5935290e33 ) diff --git a/vendor/k8s.io/cloud-provider/go.sum b/vendor/k8s.io/cloud-provider/go.sum index cfccd2c74af82..402413d5264bd 100644 --- a/vendor/k8s.io/cloud-provider/go.sum +++ b/vendor/k8s.io/cloud-provider/go.sum @@ -1,57 +1,122 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/Azure/go-autorest v11.1.2+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgrijalva/jwt-go v0.0.0-20160705203006-01aeca54ebda/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415 h1:WSBJMqJbLxsn+bTCPyPYZfqHdJmc8MK4wrBjMft6BAM= -github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903 h1:LbsanbbD6LieFkXbj9YNNBupiGHJgFeLpO0j0Fza1h8= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck= -github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d h1:7XGaL1e6bYS1yIonGp9761ExpPPV1ui0SAC59Yube9k= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/gophercloud/gophercloud v0.0.0-20190126172459-c818fa66e4c8/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4= +github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be h1:AHimNtVIpiBjPUhEF5KNCkrUyqTSA5zWUl8sQ2bfGBE= -github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3 h1:EooPXg51Tn+xmWPXJUGCnJhJSpeuMlBmfJVcqIRmmv8= -github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/spf13/pflag v1.0.1 h1:aCvUg6QPl3ibpQUxyLkrEkCHtPqYJL4x9AuhqVqFis4= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= golang.org/x/crypto v0.0.0-20181025213731-e84da0312774 h1:a4tQYYYuK9QdeO/+kEvNYyuR21S+7ve5EANok6hABhI= golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181217174547-8f45f776aaf1/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc h1:gkKoSkUmnU6bpS/VhkuO27bzQeSA51uaEfbOW5dNb68= golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a h1:tImsplftrFpALCYumobsd0K86vlAs/eXGFms2txfJfA= @@ -60,17 +125,29 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTm golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190209173611-3b5209105503 h1:5SvYFrOM3W8Mexn9/oA44Ji7vhXAZQ9hiP+1Q/DMrWg= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db h1:6/JqlYfC1CCaLnGceQTI+sDGhC9UBSPAsBqI0Gun6kU= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/time v0.0.0-20161028155119-f51c12702a4d h1:TnM+PKb3ylGmZvyPXmo9m/wktg7Jn/a/fNmr33HSj8g= golang.org/x/time v0.0.0-20161028155119-f51c12702a4d/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20190313210603-aa82965741a9/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.0 h1:3zYtXIO92bvsdS3ggAdA8Gb4Azj0YU+TVY1uGYNFA8o= @@ -79,14 +156,24 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkep gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -k8s.io/api v0.0.0-20190819141258-3544db3b9e44/go.mod h1:AOxZTnaXR/xiarlQL0JUfwQPxjmKDvVYoRp58cA7lUo= -k8s.io/apimachinery v0.0.0-20190817020851-f2f3a405f61d/go.mod h1:3jediapYqJ2w1BFw7lAZPCx7scubsTfosqHkhXCWJKw= -k8s.io/client-go v0.0.0-20190819141724-e14f31a72a77/go.mod h1:DmkJD5UDP87MVqUQ5VJ6Tj9Oen8WzXPhk3la4qpyG4g= -k8s.io/klog v0.3.1 h1:RVgyDHY/kFKtLqh67NvEWIgkMneNoIrdkN0CxDSQc68= -k8s.io/klog v0.3.1/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30 h1:TRb4wNWoBVrH9plmkp2q86FIDppkbrEXdXlxU3a3BMI= -k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= -k8s.io/utils v0.0.0-20190221042446-c2654d5206da h1:ElyM7RPonbKnQqOcw7dG2IK5uvQQn3b/WPHqD5mBvP4= -k8s.io/utils v0.0.0-20190221042446-c2654d5206da/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +k8s.io/api v0.0.0-20191114100352-16d7abae0d2a/go.mod h1:qetVJgs5i8jwdFIdoOZ70ks0ecgU+dYwqZ2uD1srwOU= +k8s.io/apimachinery v0.0.0-20191028221656-72ed19daf4bb/go.mod h1:llRdnznGEAqC3DcNm6yEj472xaFVfLM7hnYofMb12tQ= +k8s.io/client-go v0.0.0-20191114101535-6c5935290e33/go.mod h1:4L/zQOBkEf4pArQJ+CMk1/5xjA30B5oyWv+Bzb44DOw= +k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.4.0 h1:lCJCxf/LIowc2IGS9TPjWDyXY4nOmdGdfcwwDQCOURQ= +k8s.io/klog v0.4.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf h1:EYm5AW/UUDbnmnI+gK0TJDVK9qPLhM+sRHYanNKw0EQ= +k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= +k8s.io/utils v0.0.0-20190801114015-581e00157fb1 h1:+ySTxfHnfzZb9ys375PXNlLhkJPLKgHajBU0N62BDvE= +k8s.io/utils v0.0.0-20190801114015-581e00157fb1/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/vendor/k8s.io/cloud-provider/plugins.go b/vendor/k8s.io/cloud-provider/plugins.go index 9fc6aff8cd255..e8a41d87aac6c 100644 --- a/vendor/k8s.io/cloud-provider/plugins.go +++ b/vendor/k8s.io/cloud-provider/plugins.go @@ -42,11 +42,8 @@ var ( }{ {"aws", false, "The AWS provider is deprecated and will be removed in a future release"}, {"azure", false, "The Azure provider is deprecated and will be removed in a future release"}, - {"cloudstack", false, "The CloudStack Controller project is no longer maintained."}, {"gce", false, "The GCE provider is deprecated and will be removed in a future release"}, {"openstack", true, "https://github.com/kubernetes/cloud-provider-openstack"}, - {"ovirt", false, "The ovirt Controller project is no longer maintained."}, - {"photon", false, "The Photon Controller project is no longer maintained."}, {"vsphere", false, "The vSphere provider is deprecated and will be removed in a future release"}, } ) diff --git a/vendor/k8s.io/component-base/LICENSE b/vendor/k8s.io/component-base/LICENSE new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/vendor/k8s.io/component-base/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/component-base/metrics/BUILD.bazel b/vendor/k8s.io/component-base/metrics/BUILD.bazel new file mode 100644 index 0000000000000..d278f22738b50 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/BUILD.bazel @@ -0,0 +1,36 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "collector.go", + "counter.go", + "desc.go", + "gauge.go", + "histogram.go", + "http.go", + "labels.go", + "metric.go", + "opts.go", + "processstarttime.go", + "registry.go", + "summary.go", + "value.go", + "version.go", + "version_parser.go", + "wrappers.go", + ], + importmap = "k8s.io/kops/vendor/k8s.io/component-base/metrics", + importpath = "k8s.io/component-base/metrics", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/blang/semver:go_default_library", + "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", + "//vendor/github.com/prometheus/client_golang/prometheus/promhttp:go_default_library", + "//vendor/github.com/prometheus/client_model/go:go_default_library", + "//vendor/github.com/prometheus/procfs:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/version:go_default_library", + "//vendor/k8s.io/component-base/version:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + ], +) diff --git a/vendor/k8s.io/component-base/metrics/OWNERS b/vendor/k8s.io/component-base/metrics/OWNERS new file mode 100644 index 0000000000000..46c94893ab57a --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/OWNERS @@ -0,0 +1,9 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- sig-instrumentation-approvers +- logicalhan +reviewers: +- sig-instrumentation-reviewers +labels: +- sig/instrumentation diff --git a/vendor/k8s.io/component-base/metrics/collector.go b/vendor/k8s.io/component-base/metrics/collector.go new file mode 100644 index 0000000000000..f720249068c13 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/collector.go @@ -0,0 +1,148 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "fmt" + + "github.com/blang/semver" + "github.com/prometheus/client_golang/prometheus" +) + +// StableCollector extends the prometheus.Collector interface to allow customization of the +// metric registration process, it's especially intend to be used in scenario of custom collector. +type StableCollector interface { + prometheus.Collector + + // DescribeWithStability sends the super-set of all possible metrics.Desc collected + // by this StableCollector to the provided channel. + DescribeWithStability(chan<- *Desc) + + // CollectWithStability sends each collected metrics.Metric via the provide channel. + CollectWithStability(chan<- Metric) + + // Create will initialize all Desc and it intends to be called by registry. + Create(version *semver.Version, self StableCollector) bool +} + +// BaseStableCollector which implements almost all of the methods defined by StableCollector +// is a convenient assistant for custom collectors. +// It is recommend that inherit BaseStableCollector when implementing custom collectors. +type BaseStableCollector struct { + descriptors []*Desc // stores all Desc collected from DescribeWithStability(). + registrable []*Desc // stores registrable Desc(not be hidden), is a subset of descriptors. + self StableCollector +} + +// DescribeWithStability sends all descriptors to the provided channel. +// Every custom collector should over-write this method. +func (bsc *BaseStableCollector) DescribeWithStability(ch chan<- *Desc) { + panic(fmt.Errorf("custom collector should over-write DescribeWithStability method")) +} + +// Describe sends all descriptors to the provided channel. +// It intend to be called by prometheus registry. +func (bsc *BaseStableCollector) Describe(ch chan<- *prometheus.Desc) { + for _, d := range bsc.registrable { + ch <- d.toPrometheusDesc() + } +} + +// CollectWithStability sends all metrics to the provided channel. +// Every custom collector should over-write this method. +func (bsc *BaseStableCollector) CollectWithStability(ch chan<- Metric) { + panic(fmt.Errorf("custom collector should over-write CollectWithStability method")) +} + +// Collect is called by the Prometheus registry when collecting metrics. +func (bsc *BaseStableCollector) Collect(ch chan<- prometheus.Metric) { + mch := make(chan Metric) + + go func() { + bsc.self.CollectWithStability(mch) + close(mch) + }() + + for m := range mch { + // nil Metric usually means hidden metrics + if m == nil { + continue + } + + ch <- prometheus.Metric(m) + } +} + +func (bsc *BaseStableCollector) add(d *Desc) { + bsc.descriptors = append(bsc.descriptors, d) +} + +// Init intends to be called by registry. +func (bsc *BaseStableCollector) init(self StableCollector) { + bsc.self = self + + dch := make(chan *Desc) + + // collect all possible descriptions from custom side + go func() { + bsc.self.DescribeWithStability(dch) + close(dch) + }() + + for d := range dch { + bsc.add(d) + } +} + +// Create intends to be called by registry. +// Create will return true as long as there is one or more metrics not be hidden. +// Otherwise return false, that means the whole collector will be ignored by registry. +func (bsc *BaseStableCollector) Create(version *semver.Version, self StableCollector) bool { + bsc.init(self) + + for _, d := range bsc.descriptors { + if version != nil { + d.determineDeprecationStatus(*version) + } + + d.createOnce.Do(func() { + d.createLock.Lock() + defer d.createLock.Unlock() + + if d.IsHidden() { + // do nothing for hidden metrics + } else if d.IsDeprecated() { + d.initializeDeprecatedDesc() + bsc.registrable = append(bsc.registrable, d) + d.isCreated = true + } else { + d.initialize() + bsc.registrable = append(bsc.registrable, d) + d.isCreated = true + } + }) + } + + if len(bsc.registrable) > 0 { + return true + } + + return false +} + +// Check if our BaseStableCollector implements necessary interface +var _ StableCollector = &BaseStableCollector{} diff --git a/vendor/k8s.io/component-base/metrics/counter.go b/vendor/k8s.io/component-base/metrics/counter.go new file mode 100644 index 0000000000000..d641b07829934 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/counter.go @@ -0,0 +1,170 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "github.com/blang/semver" + "github.com/prometheus/client_golang/prometheus" +) + +// Counter is our internal representation for our wrapping struct around prometheus +// counters. Counter implements both kubeCollector and CounterMetric. +type Counter struct { + CounterMetric + *CounterOpts + lazyMetric + selfCollector +} + +// NewCounter returns an object which satisfies the kubeCollector and CounterMetric interfaces. +// However, the object returned will not measure anything unless the collector is first +// registered, since the metric is lazily instantiated. +func NewCounter(opts *CounterOpts) *Counter { + opts.StabilityLevel.setDefaults() + + kc := &Counter{ + CounterOpts: opts, + lazyMetric: lazyMetric{}, + } + kc.setPrometheusCounter(noop) + kc.lazyInit(kc) + return kc +} + +// setPrometheusCounter sets the underlying CounterMetric object, i.e. the thing that does the measurement. +func (c *Counter) setPrometheusCounter(counter prometheus.Counter) { + c.CounterMetric = counter + c.initSelfCollection(counter) +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (c *Counter) DeprecatedVersion() *semver.Version { + return parseSemver(c.CounterOpts.DeprecatedVersion) +} + +// initializeMetric invocation creates the actual underlying Counter. Until this method is called +// the underlying counter is a no-op. +func (c *Counter) initializeMetric() { + c.CounterOpts.annotateStabilityLevel() + // this actually creates the underlying prometheus counter. + c.setPrometheusCounter(prometheus.NewCounter(c.CounterOpts.toPromCounterOpts())) +} + +// initializeDeprecatedMetric invocation creates the actual (but deprecated) Counter. Until this method +// is called the underlying counter is a no-op. +func (c *Counter) initializeDeprecatedMetric() { + c.CounterOpts.markDeprecated() + c.initializeMetric() +} + +// CounterVec is the internal representation of our wrapping struct around prometheus +// counterVecs. CounterVec implements both kubeCollector and CounterVecMetric. +type CounterVec struct { + *prometheus.CounterVec + *CounterOpts + lazyMetric + originalLabels []string +} + +// NewCounterVec returns an object which satisfies the kubeCollector and CounterVecMetric interfaces. +// However, the object returned will not measure anything unless the collector is first +// registered, since the metric is lazily instantiated. +func NewCounterVec(opts *CounterOpts, labels []string) *CounterVec { + opts.StabilityLevel.setDefaults() + + cv := &CounterVec{ + CounterVec: noopCounterVec, + CounterOpts: opts, + originalLabels: labels, + lazyMetric: lazyMetric{}, + } + cv.lazyInit(cv) + return cv +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (v *CounterVec) DeprecatedVersion() *semver.Version { + return parseSemver(v.CounterOpts.DeprecatedVersion) + +} + +// initializeMetric invocation creates the actual underlying CounterVec. Until this method is called +// the underlying counterVec is a no-op. +func (v *CounterVec) initializeMetric() { + v.CounterOpts.annotateStabilityLevel() + v.CounterVec = prometheus.NewCounterVec(v.CounterOpts.toPromCounterOpts(), v.originalLabels) +} + +// initializeDeprecatedMetric invocation creates the actual (but deprecated) CounterVec. Until this method is called +// the underlying counterVec is a no-op. +func (v *CounterVec) initializeDeprecatedMetric() { + v.CounterOpts.markDeprecated() + v.initializeMetric() +} + +// Default Prometheus behavior actually results in the creation of a new metric +// if a metric with the unique label values is not found in the underlying stored metricMap. +// This means that if this function is called but the underlying metric is not registered +// (which means it will never be exposed externally nor consumed), the metric will exist in memory +// for perpetuity (i.e. throughout application lifecycle). +// +// For reference: https://github.com/prometheus/client_golang/blob/v0.9.2/prometheus/counter.go#L179-L197 + +// WithLabelValues returns the Counter for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Counter is created IFF the counterVec +// has been registered to a metrics registry. +func (v *CounterVec) WithLabelValues(lvs ...string) CounterMetric { + if !v.IsCreated() { + return noop // return no-op counter + } + return v.CounterVec.WithLabelValues(lvs...) +} + +// With returns the Counter for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Counter is created IFF the counterVec has +// been registered to a metrics registry. +func (v *CounterVec) With(labels map[string]string) CounterMetric { + if !v.IsCreated() { + return noop // return no-op counter + } + return v.CounterVec.With(labels) +} + +// Delete deletes the metric where the variable labels are the same as those +// passed in as labels. It returns true if a metric was deleted. +// +// It is not an error if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. However, such inconsistent Labels +// can never match an actual metric, so the method will always return false in +// that case. +func (v *CounterVec) Delete(labels map[string]string) bool { + if !v.IsCreated() { + return false // since we haven't created the metric, we haven't deleted a metric with the passed in values + } + return v.CounterVec.Delete(labels) +} + +// Reset deletes all metrics in this vector. +func (v *CounterVec) Reset() { + if !v.IsCreated() { + return + } + + v.CounterVec.Reset() +} diff --git a/vendor/k8s.io/component-base/metrics/desc.go b/vendor/k8s.io/component-base/metrics/desc.go new file mode 100644 index 0000000000000..2d4b0793ec161 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/desc.go @@ -0,0 +1,173 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "fmt" + "strings" + "sync" + + "github.com/blang/semver" + "github.com/prometheus/client_golang/prometheus" + + "k8s.io/klog" +) + +// Desc is a prometheus.Desc extension. +// +// Use NewDesc to create new Desc instances. +type Desc struct { + // fqName has been built from Namespace, Subsystem, and Name. + fqName string + // help provides some helpful information about this metric. + help string + // constLabels is the label names. Their label values are variable. + constLabels Labels + // variableLabels contains names of labels for which the metric + // maintains variable values. + variableLabels []string + + // promDesc is the descriptor used by every Prometheus Metric. + promDesc *prometheus.Desc + + // stabilityLevel represents the API guarantees for a given defined metric. + stabilityLevel StabilityLevel + // deprecatedVersion represents in which version this metric be deprecated. + deprecatedVersion string + + isDeprecated bool + isHidden bool + isCreated bool + createLock sync.RWMutex + markDeprecationOnce sync.Once + createOnce sync.Once + deprecateOnce sync.Once + hideOnce sync.Once + annotateOnce sync.Once +} + +// NewDesc extends prometheus.NewDesc with stability support. +// +// The stabilityLevel should be valid stability label, such as "metrics.ALPHA" +// and "metrics.STABLE"(Maybe "metrics.BETA" in future). Default value "metrics.ALPHA" +// will be used in case of empty or invalid stability label. +// +// The deprecatedVersion represents in which version this Metric be deprecated. +// The deprecation policy outlined by the control plane metrics stability KEP. +func NewDesc(fqName string, help string, variableLabels []string, constLabels Labels, + stabilityLevel StabilityLevel, deprecatedVersion string) *Desc { + d := &Desc{ + fqName: fqName, + help: help, + variableLabels: variableLabels, + constLabels: constLabels, + stabilityLevel: stabilityLevel, + deprecatedVersion: deprecatedVersion, + } + d.stabilityLevel.setDefaults() + + return d +} + +// String formats the Desc as a string. +// The stability metadata maybe annotated in 'HELP' section if called after registry, +// otherwise not. +func (d *Desc) String() string { + if d.isCreated { + return d.promDesc.String() + } + + return prometheus.NewDesc(d.fqName, d.help, d.variableLabels, prometheus.Labels(d.constLabels)).String() +} + +// toPrometheusDesc transform self to prometheus.Desc +func (d *Desc) toPrometheusDesc() *prometheus.Desc { + return d.promDesc +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (d *Desc) DeprecatedVersion() *semver.Version { + return parseSemver(d.deprecatedVersion) + +} + +func (d *Desc) determineDeprecationStatus(version semver.Version) { + selfVersion := d.DeprecatedVersion() + if selfVersion == nil { + return + } + d.markDeprecationOnce.Do(func() { + if selfVersion.LTE(version) { + d.isDeprecated = true + } + if ShouldShowHidden() { + klog.Warningf("Hidden metrics(%s) have been manually overridden, showing this very deprecated metric.", d.fqName) + return + } + if shouldHide(&version, selfVersion) { + // TODO(RainbowMango): Remove this log temporarily. https://github.com/kubernetes/kubernetes/issues/85369 + // klog.Warningf("This metric(%s) has been deprecated for more than one release, hiding.", d.fqName) + d.isHidden = true + } + }) +} + +// IsHidden returns if metric will be hidden +func (d *Desc) IsHidden() bool { + return d.isHidden +} + +// IsDeprecated returns if metric has been deprecated +func (d *Desc) IsDeprecated() bool { + return d.isDeprecated +} + +func (d *Desc) markDeprecated() { + d.deprecateOnce.Do(func() { + d.help = fmt.Sprintf("(Deprecated since %s) %s", d.deprecatedVersion, d.help) + }) +} + +func (d *Desc) annotateStabilityLevel() { + d.annotateOnce.Do(func() { + d.help = fmt.Sprintf("[%v] %v", d.stabilityLevel, d.help) + }) +} + +func (d *Desc) initialize() { + d.annotateStabilityLevel() + // this actually creates the underlying prometheus desc. + d.promDesc = prometheus.NewDesc(d.fqName, d.help, d.variableLabels, prometheus.Labels(d.constLabels)) +} + +func (d *Desc) initializeDeprecatedDesc() { + d.markDeprecated() + d.initialize() +} + +// GetRawDesc will returns a new *Desc with original parameters provided to NewDesc(). +// +// It will be useful in testing scenario that the same Desc be registered to different registry. +// 1. Desc `D` is registered to registry 'A' in TestA (Note: `D` maybe created) +// 2. Desc `D` is registered to registry 'B' in TestB (Note: since 'D' has been created once, thus will be ignored by registry 'B') +func (d *Desc) GetRawDesc() *Desc { + // remove stability from help if any + stabilityStr := fmt.Sprintf("[%v] ", d.stabilityLevel) + rawHelp := strings.Replace(d.help, stabilityStr, "", -1) + + return NewDesc(d.fqName, rawHelp, d.variableLabels, d.constLabels, d.stabilityLevel, d.deprecatedVersion) +} diff --git a/vendor/k8s.io/component-base/metrics/gauge.go b/vendor/k8s.io/component-base/metrics/gauge.go new file mode 100644 index 0000000000000..c9c187b7b0044 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/gauge.go @@ -0,0 +1,193 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "github.com/blang/semver" + "github.com/prometheus/client_golang/prometheus" + + "k8s.io/component-base/version" +) + +// Gauge is our internal representation for our wrapping struct around prometheus +// gauges. kubeGauge implements both kubeCollector and KubeGauge. +type Gauge struct { + GaugeMetric + *GaugeOpts + lazyMetric + selfCollector +} + +// NewGauge returns an object which satisfies the kubeCollector and KubeGauge interfaces. +// However, the object returned will not measure anything unless the collector is first +// registered, since the metric is lazily instantiated. +func NewGauge(opts *GaugeOpts) *Gauge { + opts.StabilityLevel.setDefaults() + + kc := &Gauge{ + GaugeOpts: opts, + lazyMetric: lazyMetric{}, + } + kc.setPrometheusGauge(noop) + kc.lazyInit(kc) + return kc +} + +// setPrometheusGauge sets the underlying KubeGauge object, i.e. the thing that does the measurement. +func (g *Gauge) setPrometheusGauge(gauge prometheus.Gauge) { + g.GaugeMetric = gauge + g.initSelfCollection(gauge) +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (g *Gauge) DeprecatedVersion() *semver.Version { + return parseSemver(g.GaugeOpts.DeprecatedVersion) +} + +// initializeMetric invocation creates the actual underlying Gauge. Until this method is called +// the underlying gauge is a no-op. +func (g *Gauge) initializeMetric() { + g.GaugeOpts.annotateStabilityLevel() + // this actually creates the underlying prometheus gauge. + g.setPrometheusGauge(prometheus.NewGauge(g.GaugeOpts.toPromGaugeOpts())) +} + +// initializeDeprecatedMetric invocation creates the actual (but deprecated) Gauge. Until this method +// is called the underlying gauge is a no-op. +func (g *Gauge) initializeDeprecatedMetric() { + g.GaugeOpts.markDeprecated() + g.initializeMetric() +} + +// GaugeVec is the internal representation of our wrapping struct around prometheus +// gaugeVecs. kubeGaugeVec implements both kubeCollector and KubeGaugeVec. +type GaugeVec struct { + *prometheus.GaugeVec + *GaugeOpts + lazyMetric + originalLabels []string +} + +// NewGaugeVec returns an object which satisfies the kubeCollector and KubeGaugeVec interfaces. +// However, the object returned will not measure anything unless the collector is first +// registered, since the metric is lazily instantiated. +func NewGaugeVec(opts *GaugeOpts, labels []string) *GaugeVec { + opts.StabilityLevel.setDefaults() + + cv := &GaugeVec{ + GaugeVec: noopGaugeVec, + GaugeOpts: opts, + originalLabels: labels, + lazyMetric: lazyMetric{}, + } + cv.lazyInit(cv) + return cv +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (v *GaugeVec) DeprecatedVersion() *semver.Version { + return parseSemver(v.GaugeOpts.DeprecatedVersion) +} + +// initializeMetric invocation creates the actual underlying GaugeVec. Until this method is called +// the underlying gaugeVec is a no-op. +func (v *GaugeVec) initializeMetric() { + v.GaugeOpts.annotateStabilityLevel() + v.GaugeVec = prometheus.NewGaugeVec(v.GaugeOpts.toPromGaugeOpts(), v.originalLabels) +} + +// initializeDeprecatedMetric invocation creates the actual (but deprecated) GaugeVec. Until this method is called +// the underlying gaugeVec is a no-op. +func (v *GaugeVec) initializeDeprecatedMetric() { + v.GaugeOpts.markDeprecated() + v.initializeMetric() +} + +// Default Prometheus behavior actually results in the creation of a new metric +// if a metric with the unique label values is not found in the underlying stored metricMap. +// This means that if this function is called but the underlying metric is not registered +// (which means it will never be exposed externally nor consumed), the metric will exist in memory +// for perpetuity (i.e. throughout application lifecycle). +// +// For reference: https://github.com/prometheus/client_golang/blob/v0.9.2/prometheus/gauge.go#L190-L208 + +// WithLabelValues returns the GaugeMetric for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new GaugeMetric is created IFF the gaugeVec +// has been registered to a metrics registry. +func (v *GaugeVec) WithLabelValues(lvs ...string) GaugeMetric { + if !v.IsCreated() { + return noop // return no-op gauge + } + return v.GaugeVec.WithLabelValues(lvs...) +} + +// With returns the GaugeMetric for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new GaugeMetric is created IFF the gaugeVec has +// been registered to a metrics registry. +func (v *GaugeVec) With(labels map[string]string) GaugeMetric { + if !v.IsCreated() { + return noop // return no-op gauge + } + return v.GaugeVec.With(labels) +} + +// Delete deletes the metric where the variable labels are the same as those +// passed in as labels. It returns true if a metric was deleted. +// +// It is not an error if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. However, such inconsistent Labels +// can never match an actual metric, so the method will always return false in +// that case. +func (v *GaugeVec) Delete(labels map[string]string) bool { + if !v.IsCreated() { + return false // since we haven't created the metric, we haven't deleted a metric with the passed in values + } + return v.GaugeVec.Delete(labels) +} + +// Reset deletes all metrics in this vector. +func (v *GaugeVec) Reset() { + if !v.IsCreated() { + return + } + + v.GaugeVec.Reset() +} + +func newGaugeFunc(opts GaugeOpts, function func() float64, v semver.Version) GaugeFunc { + g := NewGauge(&opts) + + if !g.Create(&v) { + return nil + } + + return prometheus.NewGaugeFunc(g.GaugeOpts.toPromGaugeOpts(), function) +} + +// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The +// value reported is determined by calling the given function from within the +// Write method. Take into account that metric collection may happen +// concurrently. If that results in concurrent calls to Write, like in the case +// where a GaugeFunc is directly registered with Prometheus, the provided +// function must be concurrency-safe. +func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc { + v := parseVersion(version.Get()) + + return newGaugeFunc(opts, function, v) +} diff --git a/vendor/k8s.io/component-base/metrics/histogram.go b/vendor/k8s.io/component-base/metrics/histogram.go new file mode 100644 index 0000000000000..1a4bbda813efe --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/histogram.go @@ -0,0 +1,177 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "github.com/blang/semver" + "github.com/prometheus/client_golang/prometheus" +) + +// DefBuckets is a wrapper for prometheus.DefBuckets +var DefBuckets = prometheus.DefBuckets + +// LinearBuckets is a wrapper for prometheus.LinearBuckets. +func LinearBuckets(start, width float64, count int) []float64 { + return prometheus.LinearBuckets(start, width, count) +} + +// ExponentialBuckets is a wrapper for prometheus.ExponentialBuckets. +func ExponentialBuckets(start, factor float64, count int) []float64 { + return prometheus.ExponentialBuckets(start, factor, count) +} + +// Histogram is our internal representation for our wrapping struct around prometheus +// histograms. Summary implements both kubeCollector and ObserverMetric +type Histogram struct { + ObserverMetric + *HistogramOpts + lazyMetric + selfCollector +} + +// NewHistogram returns an object which is Histogram-like. However, nothing +// will be measured until the histogram is registered somewhere. +func NewHistogram(opts *HistogramOpts) *Histogram { + opts.StabilityLevel.setDefaults() + + h := &Histogram{ + HistogramOpts: opts, + lazyMetric: lazyMetric{}, + } + h.setPrometheusHistogram(noopMetric{}) + h.lazyInit(h) + return h +} + +// setPrometheusHistogram sets the underlying KubeGauge object, i.e. the thing that does the measurement. +func (h *Histogram) setPrometheusHistogram(histogram prometheus.Histogram) { + h.ObserverMetric = histogram + h.initSelfCollection(histogram) +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (h *Histogram) DeprecatedVersion() *semver.Version { + return parseSemver(h.HistogramOpts.DeprecatedVersion) +} + +// initializeMetric invokes the actual prometheus.Histogram object instantiation +// and stores a reference to it +func (h *Histogram) initializeMetric() { + h.HistogramOpts.annotateStabilityLevel() + // this actually creates the underlying prometheus gauge. + h.setPrometheusHistogram(prometheus.NewHistogram(h.HistogramOpts.toPromHistogramOpts())) +} + +// initializeDeprecatedMetric invokes the actual prometheus.Histogram object instantiation +// but modifies the Help description prior to object instantiation. +func (h *Histogram) initializeDeprecatedMetric() { + h.HistogramOpts.markDeprecated() + h.initializeMetric() +} + +// HistogramVec is the internal representation of our wrapping struct around prometheus +// histogramVecs. +type HistogramVec struct { + *prometheus.HistogramVec + *HistogramOpts + lazyMetric + originalLabels []string +} + +// NewHistogramVec returns an object which satisfies kubeCollector and wraps the +// prometheus.HistogramVec object. However, the object returned will not measure +// anything unless the collector is first registered, since the metric is lazily instantiated. +func NewHistogramVec(opts *HistogramOpts, labels []string) *HistogramVec { + opts.StabilityLevel.setDefaults() + + v := &HistogramVec{ + HistogramVec: noopHistogramVec, + HistogramOpts: opts, + originalLabels: labels, + lazyMetric: lazyMetric{}, + } + v.lazyInit(v) + return v +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (v *HistogramVec) DeprecatedVersion() *semver.Version { + return parseSemver(v.HistogramOpts.DeprecatedVersion) +} + +func (v *HistogramVec) initializeMetric() { + v.HistogramOpts.annotateStabilityLevel() + v.HistogramVec = prometheus.NewHistogramVec(v.HistogramOpts.toPromHistogramOpts(), v.originalLabels) +} + +func (v *HistogramVec) initializeDeprecatedMetric() { + v.HistogramOpts.markDeprecated() + v.initializeMetric() +} + +// Default Prometheus behavior actually results in the creation of a new metric +// if a metric with the unique label values is not found in the underlying stored metricMap. +// This means that if this function is called but the underlying metric is not registered +// (which means it will never be exposed externally nor consumed), the metric will exist in memory +// for perpetuity (i.e. throughout application lifecycle). +// +// For reference: https://github.com/prometheus/client_golang/blob/v0.9.2/prometheus/histogram.go#L460-L470 + +// WithLabelValues returns the ObserverMetric for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new ObserverMetric is created IFF the HistogramVec +// has been registered to a metrics registry. +func (v *HistogramVec) WithLabelValues(lvs ...string) ObserverMetric { + if !v.IsCreated() { + return noop + } + return v.HistogramVec.WithLabelValues(lvs...) +} + +// With returns the ObserverMetric for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new ObserverMetric is created IFF the HistogramVec has +// been registered to a metrics registry. +func (v *HistogramVec) With(labels map[string]string) ObserverMetric { + if !v.IsCreated() { + return noop + } + return v.HistogramVec.With(labels) +} + +// Delete deletes the metric where the variable labels are the same as those +// passed in as labels. It returns true if a metric was deleted. +// +// It is not an error if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. However, such inconsistent Labels +// can never match an actual metric, so the method will always return false in +// that case. +func (v *HistogramVec) Delete(labels map[string]string) bool { + if !v.IsCreated() { + return false // since we haven't created the metric, we haven't deleted a metric with the passed in values + } + return v.HistogramVec.Delete(labels) +} + +// Reset deletes all metrics in this vector. +func (v *HistogramVec) Reset() { + if !v.IsCreated() { + return + } + + v.HistogramVec.Reset() +} diff --git a/vendor/k8s.io/component-base/metrics/http.go b/vendor/k8s.io/component-base/metrics/http.go new file mode 100644 index 0000000000000..ecf722e914446 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/http.go @@ -0,0 +1,63 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "net/http" + + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +// These constants cause handlers serving metrics to behave as described if +// errors are encountered. +const ( + // Serve an HTTP status code 500 upon the first error + // encountered. Report the error message in the body. + HTTPErrorOnError promhttp.HandlerErrorHandling = iota + + // Ignore errors and try to serve as many metrics as possible. However, + // if no metrics can be served, serve an HTTP status code 500 and the + // last error message in the body. Only use this in deliberate "best + // effort" metrics collection scenarios. In this case, it is highly + // recommended to provide other means of detecting errors: By setting an + // ErrorLog in HandlerOpts, the errors are logged. By providing a + // Registry in HandlerOpts, the exposed metrics include an error counter + // "promhttp_metric_handler_errors_total", which can be used for + // alerts. + ContinueOnError + + // Panic upon the first error encountered (useful for "crash only" apps). + PanicOnError +) + +// HandlerOpts specifies options how to serve metrics via an http.Handler. The +// zero value of HandlerOpts is a reasonable default. +type HandlerOpts promhttp.HandlerOpts + +func (ho *HandlerOpts) toPromhttpHandlerOpts() promhttp.HandlerOpts { + return promhttp.HandlerOpts(*ho) +} + +// HandlerFor returns an uninstrumented http.Handler for the provided +// Gatherer. The behavior of the Handler is defined by the provided +// HandlerOpts. Thus, HandlerFor is useful to create http.Handlers for custom +// Gatherers, with non-default HandlerOpts, and/or with custom (or no) +// instrumentation. Use the InstrumentMetricHandler function to apply the same +// kind of instrumentation as it is used by the Handler function. +func HandlerFor(reg Gatherer, opts HandlerOpts) http.Handler { + return promhttp.HandlerFor(reg, opts.toPromhttpHandlerOpts()) +} diff --git a/vendor/k8s.io/component-base/metrics/labels.go b/vendor/k8s.io/component-base/metrics/labels.go new file mode 100644 index 0000000000000..11af3ae4249c7 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/labels.go @@ -0,0 +1,22 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import "github.com/prometheus/client_golang/prometheus" + +// Labels represents a collection of label name -> value mappings. +type Labels prometheus.Labels diff --git a/vendor/k8s.io/component-base/metrics/legacyregistry/BUILD.bazel b/vendor/k8s.io/component-base/metrics/legacyregistry/BUILD.bazel new file mode 100644 index 0000000000000..514abba74fae3 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/legacyregistry/BUILD.bazel @@ -0,0 +1,14 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["registry.go"], + importmap = "k8s.io/kops/vendor/k8s.io/component-base/metrics/legacyregistry", + importpath = "k8s.io/component-base/metrics/legacyregistry", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", + "//vendor/github.com/prometheus/client_golang/prometheus/promhttp:go_default_library", + "//vendor/k8s.io/component-base/metrics:go_default_library", + ], +) diff --git a/vendor/k8s.io/component-base/metrics/legacyregistry/registry.go b/vendor/k8s.io/component-base/metrics/legacyregistry/registry.go new file mode 100644 index 0000000000000..e48a4a771f884 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/legacyregistry/registry.go @@ -0,0 +1,94 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package legacyregistry + +import ( + "net/http" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + + "k8s.io/component-base/metrics" +) + +var ( + defaultRegistry = metrics.NewKubeRegistry() + // DefaultGatherer exposes the global registry gatherer + DefaultGatherer metrics.Gatherer = defaultRegistry +) + +func init() { + RawMustRegister(prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{})) + RawMustRegister(prometheus.NewGoCollector()) +} + +// Handler returns an HTTP handler for the DefaultGatherer. It is +// already instrumented with InstrumentHandler (using "prometheus" as handler +// name). +// +// Deprecated: Please note the issues described in the doc comment of +// InstrumentHandler. You might want to consider using promhttp.Handler instead. +func Handler() http.Handler { + return promhttp.InstrumentMetricHandler(prometheus.DefaultRegisterer, promhttp.HandlerFor(defaultRegistry, promhttp.HandlerOpts{})) +} + +// Register registers a collectable metric but uses the global registry +func Register(c metrics.Registerable) error { + err := defaultRegistry.Register(c) + return err +} + +// MustRegister registers registerable metrics but uses the global registry. +func MustRegister(cs ...metrics.Registerable) { + defaultRegistry.MustRegister(cs...) +} + +// RawMustRegister registers prometheus collectors but uses the global registry, this +// bypasses the metric stability framework +// +// Deprecated +func RawMustRegister(cs ...prometheus.Collector) { + defaultRegistry.RawMustRegister(cs...) +} + +// RawRegister registers a prometheus collector but uses the global registry, this +// bypasses the metric stability framework +// +// Deprecated +func RawRegister(c prometheus.Collector) error { + err := defaultRegistry.RawRegister(c) + return err +} + +// CustomRegister registers a custom collector but uses the global registry. +func CustomRegister(c metrics.StableCollector) error { + err := defaultRegistry.CustomRegister(c) + + //TODO(RainbowMango): Maybe we can wrap this error by error wrapping.(Golang 1.13) + _ = prometheus.Register(c) + + return err +} + +// CustomMustRegister registers custom collectors but uses the global registry. +func CustomMustRegister(cs ...metrics.StableCollector) { + defaultRegistry.CustomMustRegister(cs...) + + for _, c := range cs { + prometheus.MustRegister(c) + } +} diff --git a/vendor/k8s.io/component-base/metrics/metric.go b/vendor/k8s.io/component-base/metrics/metric.go new file mode 100644 index 0000000000000..015627b599071 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/metric.go @@ -0,0 +1,222 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "sync" + + "github.com/blang/semver" + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" + + "k8s.io/klog" +) + +/* +kubeCollector extends the prometheus.Collector interface to allow customization of the metric +registration process. Defer metric initialization until Create() is called, which then +delegates to the underlying metric's initializeMetric or initializeDeprecatedMetric +method call depending on whether the metric is deprecated or not. +*/ +type kubeCollector interface { + Collector + lazyKubeMetric + DeprecatedVersion() *semver.Version + // Each collector metric should provide an initialization function + // for both deprecated and non-deprecated variants of a metric. This + // is necessary since metric instantiation will be deferred + // until the metric is actually registered somewhere. + initializeMetric() + initializeDeprecatedMetric() +} + +/* +lazyKubeMetric defines our metric registration interface. lazyKubeMetric objects are expected +to lazily instantiate metrics (i.e defer metric instantiation until when +the Create() function is explicitly called). +*/ +type lazyKubeMetric interface { + Create(*semver.Version) bool + IsCreated() bool + IsHidden() bool + IsDeprecated() bool +} + +/* +lazyMetric implements lazyKubeMetric. A lazy metric is lazy because it waits until metric +registration time before instantiation. Add it as an anonymous field to a struct that +implements kubeCollector to get deferred registration behavior. You must call lazyInit +with the kubeCollector itself as an argument. +*/ +type lazyMetric struct { + isDeprecated bool + isHidden bool + isCreated bool + createLock sync.RWMutex + markDeprecationOnce sync.Once + createOnce sync.Once + self kubeCollector +} + +func (r *lazyMetric) IsCreated() bool { + r.createLock.RLock() + defer r.createLock.RUnlock() + return r.isCreated +} + +// lazyInit provides the lazyMetric with a reference to the kubeCollector it is supposed +// to allow lazy initialization for. It should be invoked in the factory function which creates new +// kubeCollector type objects. +func (r *lazyMetric) lazyInit(self kubeCollector) { + r.self = self +} + +// determineDeprecationStatus figures out whether the lazy metric should be deprecated or not. +// This method takes a Version argument which should be the version of the binary in which +// this code is currently being executed. +func (r *lazyMetric) determineDeprecationStatus(version semver.Version) { + selfVersion := r.self.DeprecatedVersion() + if selfVersion == nil { + return + } + r.markDeprecationOnce.Do(func() { + if selfVersion.LTE(version) { + r.isDeprecated = true + } + if ShouldShowHidden() { + klog.Warningf("Hidden metrics have been manually overridden, showing this very deprecated metric.") + return + } + if shouldHide(&version, selfVersion) { + // TODO(RainbowMango): Remove this log temporarily. https://github.com/kubernetes/kubernetes/issues/85369 + // klog.Warningf("This metric has been deprecated for more than one release, hiding.") + r.isHidden = true + } + }) +} + +func (r *lazyMetric) IsHidden() bool { + return r.isHidden +} + +func (r *lazyMetric) IsDeprecated() bool { + return r.isDeprecated +} + +// Create forces the initialization of metric which has been deferred until +// the point at which this method is invoked. This method will determine whether +// the metric is deprecated or hidden, no-opting if the metric should be considered +// hidden. Furthermore, this function no-opts and returns true if metric is already +// created. +func (r *lazyMetric) Create(version *semver.Version) bool { + if version != nil { + r.determineDeprecationStatus(*version) + } + // let's not create if this metric is slated to be hidden + if r.IsHidden() { + return false + } + r.createOnce.Do(func() { + r.createLock.Lock() + defer r.createLock.Unlock() + r.isCreated = true + if r.IsDeprecated() { + r.self.initializeDeprecatedMetric() + } else { + r.self.initializeMetric() + } + }) + return r.IsCreated() +} + +// ClearState will clear all the states marked by Create. +// It intends to be used for re-register a hidden metric. +func (r *lazyMetric) ClearState() { + r.createLock.Lock() + defer r.createLock.Unlock() + + r.isDeprecated = false + r.isHidden = false + r.isCreated = false + r.markDeprecationOnce = *(new(sync.Once)) + r.createOnce = *(new(sync.Once)) +} + +/* +This code is directly lifted from the prometheus codebase. It's a convenience struct which +allows you satisfy the Collector interface automatically if you already satisfy the Metric interface. + +For reference: https://github.com/prometheus/client_golang/blob/v0.9.2/prometheus/collector.go#L98-L120 +*/ +type selfCollector struct { + metric prometheus.Metric +} + +func (c *selfCollector) initSelfCollection(m prometheus.Metric) { + c.metric = m +} + +func (c *selfCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- c.metric.Desc() +} + +func (c *selfCollector) Collect(ch chan<- prometheus.Metric) { + ch <- c.metric +} + +// no-op vecs for convenience +var noopCounterVec = &prometheus.CounterVec{} +var noopHistogramVec = &prometheus.HistogramVec{} +var noopSummaryVec = &prometheus.SummaryVec{} +var noopGaugeVec = &prometheus.GaugeVec{} +var noopObserverVec = &noopObserverVector{} + +// just use a convenience struct for all the no-ops +var noop = &noopMetric{} + +type noopMetric struct{} + +func (noopMetric) Inc() {} +func (noopMetric) Add(float64) {} +func (noopMetric) Dec() {} +func (noopMetric) Set(float64) {} +func (noopMetric) Sub(float64) {} +func (noopMetric) Observe(float64) {} +func (noopMetric) SetToCurrentTime() {} +func (noopMetric) Desc() *prometheus.Desc { return nil } +func (noopMetric) Write(*dto.Metric) error { return nil } +func (noopMetric) Describe(chan<- *prometheus.Desc) {} +func (noopMetric) Collect(chan<- prometheus.Metric) {} + +type noopObserverVector struct{} + +func (noopObserverVector) GetMetricWith(prometheus.Labels) (prometheus.Observer, error) { + return noop, nil +} +func (noopObserverVector) GetMetricWithLabelValues(...string) (prometheus.Observer, error) { + return noop, nil +} +func (noopObserverVector) With(prometheus.Labels) prometheus.Observer { return noop } +func (noopObserverVector) WithLabelValues(...string) prometheus.Observer { return noop } +func (noopObserverVector) CurryWith(prometheus.Labels) (prometheus.ObserverVec, error) { + return noopObserverVec, nil +} +func (noopObserverVector) MustCurryWith(prometheus.Labels) prometheus.ObserverVec { + return noopObserverVec +} +func (noopObserverVector) Describe(chan<- *prometheus.Desc) {} +func (noopObserverVector) Collect(chan<- prometheus.Metric) {} diff --git a/vendor/k8s.io/component-base/metrics/opts.go b/vendor/k8s.io/component-base/metrics/opts.go new file mode 100644 index 0000000000000..6b4a290d27d4a --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/opts.go @@ -0,0 +1,245 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "fmt" + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +// KubeOpts is superset struct for prometheus.Opts. The prometheus Opts structure +// is purposefully not embedded here because that would change struct initialization +// in the manner which people are currently accustomed. +// +// Name must be set to a non-empty string. DeprecatedVersion is defined only +// if the metric for which this options applies is, in fact, deprecated. +type KubeOpts struct { + Namespace string + Subsystem string + Name string + Help string + ConstLabels map[string]string + DeprecatedVersion string + deprecateOnce sync.Once + annotateOnce sync.Once + StabilityLevel StabilityLevel +} + +// BuildFQName joins the given three name components by "_". Empty name +// components are ignored. If the name parameter itself is empty, an empty +// string is returned, no matter what. Metric implementations included in this +// library use this function internally to generate the fully-qualified metric +// name from the name component in their Opts. Users of the library will only +// need this function if they implement their own Metric or instantiate a Desc +// (with NewDesc) directly. +func BuildFQName(namespace, subsystem, name string) string { + return prometheus.BuildFQName(namespace, subsystem, name) +} + +// StabilityLevel represents the API guarantees for a given defined metric. +type StabilityLevel string + +const ( + // ALPHA metrics have no stability guarantees, as such, labels may + // be arbitrarily added/removed and the metric may be deleted at any time. + ALPHA StabilityLevel = "ALPHA" + // STABLE metrics are guaranteed not be mutated and removal is governed by + // the deprecation policy outlined in by the control plane metrics stability KEP. + STABLE StabilityLevel = "STABLE" +) + +// setDefaults takes 'ALPHA' in case of empty. +func (sl *StabilityLevel) setDefaults() { + switch *sl { + case "": + *sl = ALPHA + default: + // no-op, since we have a StabilityLevel already + } +} + +// CounterOpts is an alias for Opts. See there for doc comments. +type CounterOpts KubeOpts + +// Modify help description on the metric description. +func (o *CounterOpts) markDeprecated() { + o.deprecateOnce.Do(func() { + o.Help = fmt.Sprintf("(Deprecated since %v) %v", o.DeprecatedVersion, o.Help) + }) +} + +// annotateStabilityLevel annotates help description on the metric description with the stability level +// of the metric +func (o *CounterOpts) annotateStabilityLevel() { + o.annotateOnce.Do(func() { + o.Help = fmt.Sprintf("[%v] %v", o.StabilityLevel, o.Help) + }) +} + +// convenience function to allow easy transformation to the prometheus +// counterpart. This will do more once we have a proper label abstraction +func (o *CounterOpts) toPromCounterOpts() prometheus.CounterOpts { + return prometheus.CounterOpts{ + Namespace: o.Namespace, + Subsystem: o.Subsystem, + Name: o.Name, + Help: o.Help, + ConstLabels: o.ConstLabels, + } +} + +// GaugeOpts is an alias for Opts. See there for doc comments. +type GaugeOpts KubeOpts + +// Modify help description on the metric description. +func (o *GaugeOpts) markDeprecated() { + o.deprecateOnce.Do(func() { + o.Help = fmt.Sprintf("(Deprecated since %v) %v", o.DeprecatedVersion, o.Help) + }) +} + +// annotateStabilityLevel annotates help description on the metric description with the stability level +// of the metric +func (o *GaugeOpts) annotateStabilityLevel() { + o.annotateOnce.Do(func() { + o.Help = fmt.Sprintf("[%v] %v", o.StabilityLevel, o.Help) + }) +} + +// convenience function to allow easy transformation to the prometheus +// counterpart. This will do more once we have a proper label abstraction +func (o GaugeOpts) toPromGaugeOpts() prometheus.GaugeOpts { + return prometheus.GaugeOpts{ + Namespace: o.Namespace, + Subsystem: o.Subsystem, + Name: o.Name, + Help: o.Help, + ConstLabels: o.ConstLabels, + } +} + +// HistogramOpts bundles the options for creating a Histogram metric. It is +// mandatory to set Name to a non-empty string. All other fields are optional +// and can safely be left at their zero value, although it is strongly +// encouraged to set a Help string. +type HistogramOpts struct { + Namespace string + Subsystem string + Name string + Help string + ConstLabels map[string]string + Buckets []float64 + DeprecatedVersion string + deprecateOnce sync.Once + annotateOnce sync.Once + StabilityLevel StabilityLevel +} + +// Modify help description on the metric description. +func (o *HistogramOpts) markDeprecated() { + o.deprecateOnce.Do(func() { + o.Help = fmt.Sprintf("(Deprecated since %v) %v", o.DeprecatedVersion, o.Help) + }) +} + +// annotateStabilityLevel annotates help description on the metric description with the stability level +// of the metric +func (o *HistogramOpts) annotateStabilityLevel() { + o.annotateOnce.Do(func() { + o.Help = fmt.Sprintf("[%v] %v", o.StabilityLevel, o.Help) + }) +} + +// convenience function to allow easy transformation to the prometheus +// counterpart. This will do more once we have a proper label abstraction +func (o HistogramOpts) toPromHistogramOpts() prometheus.HistogramOpts { + return prometheus.HistogramOpts{ + Namespace: o.Namespace, + Subsystem: o.Subsystem, + Name: o.Name, + Help: o.Help, + ConstLabels: o.ConstLabels, + Buckets: o.Buckets, + } +} + +// SummaryOpts bundles the options for creating a Summary metric. It is +// mandatory to set Name to a non-empty string. While all other fields are +// optional and can safely be left at their zero value, it is recommended to set +// a help string and to explicitly set the Objectives field to the desired value +// as the default value will change in the upcoming v0.10 of the library. +type SummaryOpts struct { + Namespace string + Subsystem string + Name string + Help string + ConstLabels map[string]string + Objectives map[float64]float64 + MaxAge time.Duration + AgeBuckets uint32 + BufCap uint32 + DeprecatedVersion string + deprecateOnce sync.Once + annotateOnce sync.Once + StabilityLevel StabilityLevel +} + +// Modify help description on the metric description. +func (o *SummaryOpts) markDeprecated() { + o.deprecateOnce.Do(func() { + o.Help = fmt.Sprintf("(Deprecated since %v) %v", o.DeprecatedVersion, o.Help) + }) +} + +// annotateStabilityLevel annotates help description on the metric description with the stability level +// of the metric +func (o *SummaryOpts) annotateStabilityLevel() { + o.annotateOnce.Do(func() { + o.Help = fmt.Sprintf("[%v] %v", o.StabilityLevel, o.Help) + }) +} + +// Deprecated: DefObjectives will not be used as the default objectives in +// v1.0.0 of the library. The default Summary will have no quantiles then. +var ( + defObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001} +) + +// convenience function to allow easy transformation to the prometheus +// counterpart. This will do more once we have a proper label abstraction +func (o SummaryOpts) toPromSummaryOpts() prometheus.SummaryOpts { + // we need to retain existing quantile behavior for backwards compatibility, + // so let's do what prometheus used to do prior to v1. + objectives := o.Objectives + if objectives == nil { + objectives = defObjectives + } + return prometheus.SummaryOpts{ + Namespace: o.Namespace, + Subsystem: o.Subsystem, + Name: o.Name, + Help: o.Help, + ConstLabels: o.ConstLabels, + Objectives: objectives, + MaxAge: o.MaxAge, + AgeBuckets: o.AgeBuckets, + BufCap: o.BufCap, + } +} diff --git a/vendor/k8s.io/component-base/metrics/processstarttime.go b/vendor/k8s.io/component-base/metrics/processstarttime.go new file mode 100644 index 0000000000000..56c5a36d63e1e --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/processstarttime.go @@ -0,0 +1,66 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "os" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/procfs" + + "k8s.io/klog" +) + +var processStartTime = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "process_start_time_seconds", + Help: "Start time of the process since unix epoch in seconds.", + }, + []string{}, +) + +// Registerer is an interface expected by RegisterProcessStartTime in order to register the metric +type Registerer interface { + Register(prometheus.Collector) error +} + +// RegisterProcessStartTime registers the process_start_time_seconds to +// a prometheus registry. This metric needs to be included to ensure counter +// data fidelity. +func RegisterProcessStartTime(registrationFunc func(prometheus.Collector) error) error { + start, err := getProcessStart() + if err != nil { + klog.Errorf("Could not get process start time, %v", err) + start = float64(time.Now().Unix()) + } + processStartTime.WithLabelValues().Set(start) + return registrationFunc(processStartTime) +} + +func getProcessStart() (float64, error) { + pid := os.Getpid() + p, err := procfs.NewProc(pid) + if err != nil { + return 0, err + } + + if stat, err := p.NewStat(); err == nil { + return stat.StartTime() + } + return 0, err +} diff --git a/vendor/k8s.io/component-base/metrics/registry.go b/vendor/k8s.io/component-base/metrics/registry.go new file mode 100644 index 0000000000000..ed018332c7336 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/registry.go @@ -0,0 +1,265 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "fmt" + "sync" + "sync/atomic" + + "github.com/blang/semver" + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" + + apimachineryversion "k8s.io/apimachinery/pkg/version" + "k8s.io/component-base/version" +) + +var ( + showHiddenOnce sync.Once + showHidden atomic.Value + registries []*kubeRegistry // stores all registries created by NewKubeRegistry() + registriesLock sync.RWMutex +) + +// shouldHide be used to check if a specific metric with deprecated version should be hidden +// according to metrics deprecation lifecycle. +func shouldHide(currentVersion *semver.Version, deprecatedVersion *semver.Version) bool { + guardVersion, err := semver.Make(fmt.Sprintf("%d.%d.0", currentVersion.Major, currentVersion.Minor)) + if err != nil { + panic("failed to make version from current version") + } + + if deprecatedVersion.LT(guardVersion) { + return true + } + + return false +} + +func validateShowHiddenMetricsVersion(currentVersion semver.Version, targetVersionStr string) error { + if targetVersionStr == "" { + return nil + } + + validVersionStr := fmt.Sprintf("%d.%d", currentVersion.Major, currentVersion.Minor-1) + if targetVersionStr != validVersionStr { + return fmt.Errorf("--show-hidden-metrics-for-version must be omitted or have the value '%v'. Only the previous minor version is allowed", validVersionStr) + } + + return nil +} + +// ValidateShowHiddenMetricsVersion checks invalid version for which show hidden metrics. +func ValidateShowHiddenMetricsVersion(v string) []error { + err := validateShowHiddenMetricsVersion(parseVersion(version.Get()), v) + if err != nil { + return []error{err} + } + + return nil +} + +// SetShowHidden will enable showing hidden metrics. This will no-opt +// after the initial call +func SetShowHidden() { + showHiddenOnce.Do(func() { + showHidden.Store(true) + + // re-register collectors that has been hidden in phase of last registry. + for _, r := range registries { + r.enableHiddenCollectors() + } + }) +} + +// ShouldShowHidden returns whether showing hidden deprecated metrics +// is enabled. While the primary usecase for this is internal (to determine +// registration behavior) this can also be used to introspect +func ShouldShowHidden() bool { + return showHidden.Load() != nil && showHidden.Load().(bool) +} + +// Registerable is an interface for a collector metric which we +// will register with KubeRegistry. +type Registerable interface { + prometheus.Collector + + // Create will mark deprecated state for the collector + Create(version *semver.Version) bool + + // ClearState will clear all the states marked by Create. + ClearState() +} + +// KubeRegistry is an interface which implements a subset of prometheus.Registerer and +// prometheus.Gatherer interfaces +type KubeRegistry interface { + // Deprecated + RawRegister(prometheus.Collector) error + // Deprecated + RawMustRegister(...prometheus.Collector) + CustomRegister(c StableCollector) error + CustomMustRegister(cs ...StableCollector) + Register(Registerable) error + MustRegister(...Registerable) + Unregister(Registerable) bool + Gather() ([]*dto.MetricFamily, error) +} + +// kubeRegistry is a wrapper around a prometheus registry-type object. Upon initialization +// the kubernetes binary version information is loaded into the registry object, so that +// automatic behavior can be configured for metric versioning. +type kubeRegistry struct { + PromRegistry + version semver.Version + hiddenCollectors []Registerable // stores all collectors that has been hidden + hiddenCollectorsLock sync.RWMutex +} + +// Register registers a new Collector to be included in metrics +// collection. It returns an error if the descriptors provided by the +// Collector are invalid or if they — in combination with descriptors of +// already registered Collectors — do not fulfill the consistency and +// uniqueness criteria described in the documentation of metric.Desc. +func (kr *kubeRegistry) Register(c Registerable) error { + if c.Create(&kr.version) { + return kr.PromRegistry.Register(c) + } + + kr.trackHiddenCollector(c) + + return nil +} + +// MustRegister works like Register but registers any number of +// Collectors and panics upon the first registration that causes an +// error. +func (kr *kubeRegistry) MustRegister(cs ...Registerable) { + metrics := make([]prometheus.Collector, 0, len(cs)) + for _, c := range cs { + if c.Create(&kr.version) { + metrics = append(metrics, c) + } else { + kr.trackHiddenCollector(c) + } + } + kr.PromRegistry.MustRegister(metrics...) +} + +// CustomRegister registers a new custom collector. +func (kr *kubeRegistry) CustomRegister(c StableCollector) error { + if c.Create(&kr.version, c) { + return kr.PromRegistry.Register(c) + } + + return nil +} + +// CustomMustRegister works like CustomRegister but registers any number of +// StableCollectors and panics upon the first registration that causes an +// error. +func (kr *kubeRegistry) CustomMustRegister(cs ...StableCollector) { + collectors := make([]prometheus.Collector, 0, len(cs)) + for _, c := range cs { + if c.Create(&kr.version, c) { + collectors = append(collectors, c) + } + } + + kr.PromRegistry.MustRegister(collectors...) +} + +// RawRegister takes a native prometheus.Collector and registers the collector +// to the registry. This bypasses metrics safety checks, so should only be used +// to register custom prometheus collectors. +// +// Deprecated +func (kr *kubeRegistry) RawRegister(c prometheus.Collector) error { + return kr.PromRegistry.Register(c) +} + +// RawMustRegister takes a native prometheus.Collector and registers the collector +// to the registry. This bypasses metrics safety checks, so should only be used +// to register custom prometheus collectors. +// +// Deprecated +func (kr *kubeRegistry) RawMustRegister(cs ...prometheus.Collector) { + kr.PromRegistry.MustRegister(cs...) +} + +// Unregister unregisters the Collector that equals the Collector passed +// in as an argument. (Two Collectors are considered equal if their +// Describe method yields the same set of descriptors.) The function +// returns whether a Collector was unregistered. Note that an unchecked +// Collector cannot be unregistered (as its Describe method does not +// yield any descriptor). +func (kr *kubeRegistry) Unregister(collector Registerable) bool { + return kr.PromRegistry.Unregister(collector) +} + +// Gather calls the Collect method of the registered Collectors and then +// gathers the collected metrics into a lexicographically sorted slice +// of uniquely named MetricFamily protobufs. Gather ensures that the +// returned slice is valid and self-consistent so that it can be used +// for valid exposition. As an exception to the strict consistency +// requirements described for metric.Desc, Gather will tolerate +// different sets of label names for metrics of the same metric family. +func (kr *kubeRegistry) Gather() ([]*dto.MetricFamily, error) { + return kr.PromRegistry.Gather() +} + +// trackHiddenCollector stores all hidden collectors. +func (kr *kubeRegistry) trackHiddenCollector(c Registerable) { + kr.hiddenCollectorsLock.Lock() + defer kr.hiddenCollectorsLock.Unlock() + + kr.hiddenCollectors = append(kr.hiddenCollectors, c) +} + +// enableHiddenCollectors will re-register all of the hidden collectors. +func (kr *kubeRegistry) enableHiddenCollectors() { + kr.hiddenCollectorsLock.Lock() + defer kr.hiddenCollectorsLock.Unlock() + + for _, c := range kr.hiddenCollectors { + c.ClearState() + kr.MustRegister(c) + } + kr.hiddenCollectors = nil +} + +func newKubeRegistry(v apimachineryversion.Info) *kubeRegistry { + r := &kubeRegistry{ + PromRegistry: prometheus.NewRegistry(), + version: parseVersion(v), + } + + registriesLock.Lock() + defer registriesLock.Unlock() + registries = append(registries, r) + + return r +} + +// NewKubeRegistry creates a new vanilla Registry without any Collectors +// pre-registered. +func NewKubeRegistry() KubeRegistry { + r := newKubeRegistry(version.Get()) + + return r +} diff --git a/vendor/k8s.io/component-base/metrics/summary.go b/vendor/k8s.io/component-base/metrics/summary.go new file mode 100644 index 0000000000000..3d8da0064597e --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/summary.go @@ -0,0 +1,171 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "github.com/blang/semver" + "github.com/prometheus/client_golang/prometheus" +) + +// Summary is our internal representation for our wrapping struct around prometheus +// summaries. Summary implements both kubeCollector and ObserverMetric +// +// DEPRECATED: as per the metrics overhaul KEP +type Summary struct { + ObserverMetric + *SummaryOpts + lazyMetric + selfCollector +} + +// NewSummary returns an object which is Summary-like. However, nothing +// will be measured until the summary is registered somewhere. +// +// DEPRECATED: as per the metrics overhaul KEP +func NewSummary(opts *SummaryOpts) *Summary { + opts.StabilityLevel.setDefaults() + + s := &Summary{ + SummaryOpts: opts, + lazyMetric: lazyMetric{}, + } + s.setPrometheusSummary(noopMetric{}) + s.lazyInit(s) + return s +} + +// setPrometheusSummary sets the underlying KubeGauge object, i.e. the thing that does the measurement. +func (s *Summary) setPrometheusSummary(summary prometheus.Summary) { + s.ObserverMetric = summary + s.initSelfCollection(summary) +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (s *Summary) DeprecatedVersion() *semver.Version { + return parseSemver(s.SummaryOpts.DeprecatedVersion) +} + +// initializeMetric invokes the actual prometheus.Summary object instantiation +// and stores a reference to it +func (s *Summary) initializeMetric() { + s.SummaryOpts.annotateStabilityLevel() + // this actually creates the underlying prometheus gauge. + s.setPrometheusSummary(prometheus.NewSummary(s.SummaryOpts.toPromSummaryOpts())) +} + +// initializeDeprecatedMetric invokes the actual prometheus.Summary object instantiation +// but modifies the Help description prior to object instantiation. +func (s *Summary) initializeDeprecatedMetric() { + s.SummaryOpts.markDeprecated() + s.initializeMetric() +} + +// SummaryVec is the internal representation of our wrapping struct around prometheus +// summaryVecs. +// +// DEPRECATED: as per the metrics overhaul KEP +type SummaryVec struct { + *prometheus.SummaryVec + *SummaryOpts + lazyMetric + originalLabels []string +} + +// NewSummaryVec returns an object which satisfies kubeCollector and wraps the +// prometheus.SummaryVec object. However, the object returned will not measure +// anything unless the collector is first registered, since the metric is lazily instantiated. +// +// DEPRECATED: as per the metrics overhaul KEP +func NewSummaryVec(opts *SummaryOpts, labels []string) *SummaryVec { + opts.StabilityLevel.setDefaults() + + v := &SummaryVec{ + SummaryOpts: opts, + originalLabels: labels, + lazyMetric: lazyMetric{}, + } + v.lazyInit(v) + return v +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (v *SummaryVec) DeprecatedVersion() *semver.Version { + return parseSemver(v.SummaryOpts.DeprecatedVersion) +} + +func (v *SummaryVec) initializeMetric() { + v.SummaryOpts.annotateStabilityLevel() + v.SummaryVec = prometheus.NewSummaryVec(v.SummaryOpts.toPromSummaryOpts(), v.originalLabels) +} + +func (v *SummaryVec) initializeDeprecatedMetric() { + v.SummaryOpts.markDeprecated() + v.initializeMetric() +} + +// Default Prometheus behavior actually results in the creation of a new metric +// if a metric with the unique label values is not found in the underlying stored metricMap. +// This means that if this function is called but the underlying metric is not registered +// (which means it will never be exposed externally nor consumed), the metric will exist in memory +// for perpetuity (i.e. throughout application lifecycle). +// +// For reference: https://github.com/prometheus/client_golang/blob/v0.9.2/prometheus/summary.go#L485-L495 + +// WithLabelValues returns the ObserverMetric for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new ObserverMetric is created IFF the summaryVec +// has been registered to a metrics registry. +func (v *SummaryVec) WithLabelValues(lvs ...string) ObserverMetric { + if !v.IsCreated() { + return noop + } + return v.SummaryVec.WithLabelValues(lvs...) +} + +// With returns the ObserverMetric for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new ObserverMetric is created IFF the summaryVec has +// been registered to a metrics registry. +func (v *SummaryVec) With(labels map[string]string) ObserverMetric { + if !v.IsCreated() { + return noop + } + return v.SummaryVec.With(labels) +} + +// Delete deletes the metric where the variable labels are the same as those +// passed in as labels. It returns true if a metric was deleted. +// +// It is not an error if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. However, such inconsistent Labels +// can never match an actual metric, so the method will always return false in +// that case. +func (v *SummaryVec) Delete(labels map[string]string) bool { + if !v.IsCreated() { + return false // since we haven't created the metric, we haven't deleted a metric with the passed in values + } + return v.SummaryVec.Delete(labels) +} + +// Reset deletes all metrics in this vector. +func (v *SummaryVec) Reset() { + if !v.IsCreated() { + return + } + + v.SummaryVec.Reset() +} diff --git a/vendor/k8s.io/component-base/metrics/value.go b/vendor/k8s.io/component-base/metrics/value.go new file mode 100644 index 0000000000000..4a19aaa3bf9b4 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/value.go @@ -0,0 +1,60 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +// ValueType is an enumeration of metric types that represent a simple value. +type ValueType int + +// Possible values for the ValueType enum. +const ( + _ ValueType = iota + CounterValue + GaugeValue + UntypedValue +) + +func (vt *ValueType) toPromValueType() prometheus.ValueType { + return prometheus.ValueType(*vt) +} + +// NewLazyConstMetric is a helper of MustNewConstMetric. +// +// Note: If the metrics described by the desc is hidden, the metrics will not be created. +func NewLazyConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric { + if desc.IsHidden() { + return nil + } + return prometheus.MustNewConstMetric(desc.toPrometheusDesc(), valueType.toPromValueType(), value, labelValues...) +} + +// NewLazyMetricWithTimestamp is a helper of NewMetricWithTimestamp. +// +// Warning: the Metric 'm' must be the one created by NewLazyConstMetric(), +// otherwise, no stability guarantees would be offered. +func NewLazyMetricWithTimestamp(t time.Time, m Metric) Metric { + if m == nil { + return nil + } + + return prometheus.NewMetricWithTimestamp(t, m) +} diff --git a/vendor/k8s.io/component-base/metrics/version.go b/vendor/k8s.io/component-base/metrics/version.go new file mode 100644 index 0000000000000..63cc606e4c180 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/version.go @@ -0,0 +1,37 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import "k8s.io/component-base/version" + +var ( + buildInfo = NewGaugeVec( + &GaugeOpts{ + Name: "kubernetes_build_info", + Help: "A metric with a constant '1' value labeled by major, minor, git version, git commit, git tree state, build date, Go version, and compiler from which Kubernetes was built, and platform on which it is running.", + StabilityLevel: ALPHA, + }, + []string{"major", "minor", "gitVersion", "gitCommit", "gitTreeState", "buildDate", "goVersion", "compiler", "platform"}, + ) +) + +// RegisterBuildInfo registers the build and version info in a metadata metric in prometheus +func RegisterBuildInfo(r KubeRegistry) { + info := version.Get() + r.MustRegister(buildInfo) + buildInfo.WithLabelValues(info.Major, info.Minor, info.GitVersion, info.GitCommit, info.GitTreeState, info.BuildDate, info.GoVersion, info.Compiler, info.Platform).Set(1) +} diff --git a/vendor/k8s.io/component-base/metrics/version_parser.go b/vendor/k8s.io/component-base/metrics/version_parser.go new file mode 100644 index 0000000000000..d52bd74bdfd0a --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/version_parser.go @@ -0,0 +1,50 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "fmt" + "regexp" + + "github.com/blang/semver" + + apimachineryversion "k8s.io/apimachinery/pkg/version" +) + +const ( + versionRegexpString = `^v(\d+\.\d+\.\d+)` +) + +var ( + versionRe = regexp.MustCompile(versionRegexpString) +) + +func parseSemver(s string) *semver.Version { + if s != "" { + sv := semver.MustParse(s) + return &sv + } + return nil +} +func parseVersion(ver apimachineryversion.Info) semver.Version { + matches := versionRe.FindAllStringSubmatch(ver.String(), -1) + + if len(matches) != 1 { + panic(fmt.Sprintf("version string \"%v\" doesn't match expected regular expression: \"%v\"", ver.String(), versionRe.String())) + } + return semver.MustParse(matches[0][1]) +} diff --git a/vendor/k8s.io/component-base/metrics/wrappers.go b/vendor/k8s.io/component-base/metrics/wrappers.go new file mode 100644 index 0000000000000..6ae8a458acb7c --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/wrappers.go @@ -0,0 +1,95 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" +) + +// This file contains a series of interfaces which we explicitly define for +// integrating with prometheus. We redefine the interfaces explicitly here +// so that we can prevent breakage if methods are ever added to prometheus +// variants of them. + +// Collector defines a subset of prometheus.Collector interface methods +type Collector interface { + Describe(chan<- *prometheus.Desc) + Collect(chan<- prometheus.Metric) +} + +// Metric defines a subset of prometheus.Metric interface methods +type Metric interface { + Desc() *prometheus.Desc + Write(*dto.Metric) error +} + +// CounterMetric is a Metric that represents a single numerical value that only ever +// goes up. That implies that it cannot be used to count items whose number can +// also go down, e.g. the number of currently running goroutines. Those +// "counters" are represented by Gauges. + +// CounterMetric is an interface which defines a subset of the interface provided by prometheus.Counter +type CounterMetric interface { + Inc() + Add(float64) +} + +// CounterVecMetric is an interface which prometheus.CounterVec satisfies. +type CounterVecMetric interface { + WithLabelValues(...string) CounterMetric + With(prometheus.Labels) CounterMetric +} + +// GaugeMetric is an interface which defines a subset of the interface provided by prometheus.Gauge +type GaugeMetric interface { + Set(float64) + Inc() + Dec() + Add(float64) + Write(out *dto.Metric) error + SetToCurrentTime() +} + +// ObserverMetric captures individual observations. +type ObserverMetric interface { + Observe(float64) +} + +// PromRegistry is an interface which implements a subset of prometheus.Registerer and +// prometheus.Gatherer interfaces +type PromRegistry interface { + Register(prometheus.Collector) error + MustRegister(...prometheus.Collector) + Unregister(prometheus.Collector) bool + Gather() ([]*dto.MetricFamily, error) +} + +// Gatherer is the interface for the part of a registry in charge of gathering +// the collected metrics into a number of MetricFamilies. +type Gatherer interface { + prometheus.Gatherer +} + +// GaugeFunc is a Gauge whose value is determined at collect time by calling a +// provided function. +// +// To create GaugeFunc instances, use NewGaugeFunc. +type GaugeFunc interface { + Metric + Collector +} diff --git a/vendor/k8s.io/component-base/version/.gitattributes b/vendor/k8s.io/component-base/version/.gitattributes new file mode 100644 index 0000000000000..7e349eff60bd6 --- /dev/null +++ b/vendor/k8s.io/component-base/version/.gitattributes @@ -0,0 +1 @@ +base.go export-subst diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/version/BUILD.bazel b/vendor/k8s.io/component-base/version/BUILD.bazel similarity index 67% rename from vendor/k8s.io/kubernetes/pkg/kubectl/version/BUILD.bazel rename to vendor/k8s.io/component-base/version/BUILD.bazel index 7322b4d6909a9..47807cd4fc507 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/version/BUILD.bazel +++ b/vendor/k8s.io/component-base/version/BUILD.bazel @@ -6,8 +6,8 @@ go_library( "base.go", "version.go", ], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/version", - importpath = "k8s.io/kubernetes/pkg/kubectl/version", + importmap = "k8s.io/kops/vendor/k8s.io/component-base/version", + importpath = "k8s.io/component-base/version", visibility = ["//visibility:public"], deps = ["//vendor/k8s.io/apimachinery/pkg/version:go_default_library"], ) diff --git a/vendor/k8s.io/component-base/version/base.go b/vendor/k8s.io/component-base/version/base.go new file mode 100644 index 0000000000000..e13678c305fdd --- /dev/null +++ b/vendor/k8s.io/component-base/version/base.go @@ -0,0 +1,63 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version + +// Base version information. +// +// This is the fallback data used when version information from git is not +// provided via go ldflags. It provides an approximation of the Kubernetes +// version for ad-hoc builds (e.g. `go build`) that cannot get the version +// information from git. +// +// If you are looking at these fields in the git tree, they look +// strange. They are modified on the fly by the build process. The +// in-tree values are dummy values used for "git archive", which also +// works for GitHub tar downloads. +// +// When releasing a new Kubernetes version, this file is updated by +// build/mark_new_version.sh to reflect the new version, and then a +// git annotated tag (using format vX.Y where X == Major version and Y +// == Minor version) is created to point to the commit that updates +// component-base/version/base.go +var ( + // TODO: Deprecate gitMajor and gitMinor, use only gitVersion + // instead. First step in deprecation, keep the fields but make + // them irrelevant. (Next we'll take it out, which may muck with + // scripts consuming the kubectl version output - but most of + // these should be looking at gitVersion already anyways.) + gitMajor string // major version, always numeric + gitMinor string // minor version, numeric possibly followed by "+" + + // semantic version, derived by build scripts (see + // https://github.com/kubernetes/community/blob/master/contributors/design-proposals/release/versioning.md + // for a detailed discussion of this field) + // + // TODO: This field is still called "gitVersion" for legacy + // reasons. For prerelease versions, the build metadata on the + // semantic version is a git hash, but the version itself is no + // longer the direct output of "git describe", but a slight + // translation to be semver compliant. + + // NOTE: The $Format strings are replaced during 'git archive' thanks to the + // companion .gitattributes file containing 'export-subst' in this same + // directory. See also https://git-scm.com/docs/gitattributes + gitVersion = "v0.0.0-master+$Format:%h$" + gitCommit = "$Format:%H$" // sha1 from git, output of $(git rev-parse HEAD) + gitTreeState = "" // state of git tree, either "clean" or "dirty" + + buildDate = "1970-01-01T00:00:00Z" // build date in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ') +) diff --git a/vendor/k8s.io/component-base/version/def.bzl b/vendor/k8s.io/component-base/version/def.bzl new file mode 100644 index 0000000000000..77edcbc89bcb8 --- /dev/null +++ b/vendor/k8s.io/component-base/version/def.bzl @@ -0,0 +1,39 @@ +# Copyright 2017 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Implements hack/lib/version.sh's kube::version::ldflags() for Bazel. +def version_x_defs(): + # This should match the list of packages in kube::version::ldflag + stamp_pkgs = [ + "k8s.io/kubernetes/vendor/k8s.io/component-base/version", + "k8s.io/kubernetes/vendor/k8s.io/client-go/pkg/version", + ] + + # This should match the list of vars in kube::version::ldflags + # It should also match the list of vars set in hack/print-workspace-status.sh. + stamp_vars = [ + "buildDate", + "gitCommit", + "gitMajor", + "gitMinor", + "gitTreeState", + "gitVersion", + ] + + # Generate the cross-product. + x_defs = {} + for pkg in stamp_pkgs: + for var in stamp_vars: + x_defs["%s.%s" % (pkg, var)] = "{%s}" % var + return x_defs diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/version/version.go b/vendor/k8s.io/component-base/version/version.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/kubectl/version/version.go rename to vendor/k8s.io/component-base/version/version.go diff --git a/vendor/k8s.io/csi-translation-lib/plugins/BUILD.bazel b/vendor/k8s.io/csi-translation-lib/plugins/BUILD.bazel index c607afec45500..91ab19c677cec 100644 --- a/vendor/k8s.io/csi-translation-lib/plugins/BUILD.bazel +++ b/vendor/k8s.io/csi-translation-lib/plugins/BUILD.bazel @@ -16,6 +16,7 @@ go_library( deps = [ "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/storage/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/cloud-provider/volume:go_default_library", ], diff --git a/vendor/k8s.io/csi-translation-lib/plugins/aws_ebs.go b/vendor/k8s.io/csi-translation-lib/plugins/aws_ebs.go index 97c264f965e62..573214032ddaf 100644 --- a/vendor/k8s.io/csi-translation-lib/plugins/aws_ebs.go +++ b/vendor/k8s.io/csi-translation-lib/plugins/aws_ebs.go @@ -25,6 +25,7 @@ import ( "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) const ( @@ -57,6 +58,10 @@ func (t *awsElasticBlockStoreCSITranslator) TranslateInTreeInlineVolumeToCSI(vol } ebsSource := volume.AWSElasticBlockStore pv := &v1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + // A.K.A InnerVolumeSpecName required to match for Unmount + Name: volume.Name, + }, Spec: v1.PersistentVolumeSpec{ PersistentVolumeSource: v1.PersistentVolumeSource{ CSI: &v1.CSIPersistentVolumeSource{ @@ -122,7 +127,7 @@ func (t *awsElasticBlockStoreCSITranslator) TranslateCSIPVToInTree(pv *v1.Persis if partition, ok := csiSource.VolumeAttributes["partition"]; ok { partValue, err := strconv.Atoi(partition) if err != nil { - return nil, fmt.Errorf("Failed to convert partition %v to integer: %v", partition, err) + return nil, fmt.Errorf("failed to convert partition %v to integer: %v", partition, err) } ebsSource.Partition = int32(partValue) } diff --git a/vendor/k8s.io/csi-translation-lib/plugins/azure_disk.go b/vendor/k8s.io/csi-translation-lib/plugins/azure_disk.go index 7b44c658b55fc..b1e91bdef9ccb 100644 --- a/vendor/k8s.io/csi-translation-lib/plugins/azure_disk.go +++ b/vendor/k8s.io/csi-translation-lib/plugins/azure_disk.go @@ -23,6 +23,7 @@ import ( "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) const ( @@ -33,6 +34,7 @@ const ( // Parameter names defined in azure disk CSI driver, refer to // https://github.com/kubernetes-sigs/azuredisk-csi-driver/blob/master/docs/driver-parameters.md + azureDiskKind = "kind" azureDiskCachingMode = "cachingMode" azureDiskFSType = "fsType" ) @@ -67,6 +69,10 @@ func (t *azureDiskCSITranslator) TranslateInTreeInlineVolumeToCSI(volume *v1.Vol azureSource := volume.AzureDisk pv := &v1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + // A.K.A InnerVolumeSpecName required to match for Unmount + Name: volume.Name, + }, Spec: v1.PersistentVolumeSpec{ PersistentVolumeSource: v1.PersistentVolumeSource{ CSI: &v1.CSIPersistentVolumeSource{ @@ -74,7 +80,7 @@ func (t *azureDiskCSITranslator) TranslateInTreeInlineVolumeToCSI(volume *v1.Vol VolumeHandle: azureSource.DataDiskURI, ReadOnly: *azureSource.ReadOnly, FSType: *azureSource.FSType, - VolumeAttributes: map[string]string{}, + VolumeAttributes: map[string]string{azureDiskKind: "Managed"}, }, }, AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, @@ -87,6 +93,9 @@ func (t *azureDiskCSITranslator) TranslateInTreeInlineVolumeToCSI(volume *v1.Vol if *azureSource.FSType != "" { pv.Spec.PersistentVolumeSource.CSI.VolumeAttributes[azureDiskFSType] = *azureSource.FSType } + if azureSource.Kind != nil { + pv.Spec.PersistentVolumeSource.CSI.VolumeAttributes[azureDiskKind] = string(*azureSource.Kind) + } return pv, nil } @@ -106,17 +115,21 @@ func (t *azureDiskCSITranslator) TranslateInTreePVToCSI(pv *v1.PersistentVolume) VolumeHandle: azureSource.DataDiskURI, ReadOnly: *azureSource.ReadOnly, FSType: *azureSource.FSType, - VolumeAttributes: map[string]string{}, + VolumeAttributes: map[string]string{azureDiskKind: "Managed"}, } - if *azureSource.CachingMode != "" { + if azureSource.CachingMode != nil { csiSource.VolumeAttributes[azureDiskCachingMode] = string(*azureSource.CachingMode) } - if *azureSource.FSType != "" { + if azureSource.FSType != nil { csiSource.VolumeAttributes[azureDiskFSType] = *azureSource.FSType } + if azureSource.Kind != nil { + csiSource.VolumeAttributes[azureDiskKind] = string(*azureSource.Kind) + } + pv.Spec.PersistentVolumeSource.AzureDisk = nil pv.Spec.PersistentVolumeSource.CSI = csiSource pv.Spec.AccessModes = backwardCompatibleAccessModes(pv.Spec.AccessModes) @@ -139,11 +152,13 @@ func (t *azureDiskCSITranslator) TranslateCSIPVToInTree(pv *v1.PersistentVolume) } // refer to https://github.com/kubernetes-sigs/azuredisk-csi-driver/blob/master/docs/driver-parameters.md + managed := v1.AzureManagedDisk azureSource := &v1.AzureDiskVolumeSource{ DiskName: diskName, DataDiskURI: diskURI, FSType: &csiSource.FSType, ReadOnly: &csiSource.ReadOnly, + Kind: &managed, } if csiSource.VolumeAttributes != nil { @@ -155,6 +170,11 @@ func (t *azureDiskCSITranslator) TranslateCSIPVToInTree(pv *v1.PersistentVolume) if fsType, ok := csiSource.VolumeAttributes[azureDiskFSType]; ok && fsType != "" { azureSource.FSType = &fsType } + + if kind, ok := csiSource.VolumeAttributes[azureDiskKind]; ok && kind != "" { + diskKind := v1.AzureDataDiskKind(kind) + azureSource.Kind = &diskKind + } } pv.Spec.CSI = nil diff --git a/vendor/k8s.io/csi-translation-lib/plugins/azure_file.go b/vendor/k8s.io/csi-translation-lib/plugins/azure_file.go index 2b58dbda76537..b78a4f83fda79 100644 --- a/vendor/k8s.io/csi-translation-lib/plugins/azure_file.go +++ b/vendor/k8s.io/csi-translation-lib/plugins/azure_file.go @@ -22,6 +22,7 @@ import ( "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) const ( @@ -63,6 +64,10 @@ func (t *azureFileCSITranslator) TranslateInTreeInlineVolumeToCSI(volume *v1.Vol azureSource := volume.AzureFile pv := &v1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + // A.K.A InnerVolumeSpecName required to match for Unmount + Name: volume.Name, + }, Spec: v1.PersistentVolumeSpec{ PersistentVolumeSource: v1.PersistentVolumeSource{ CSI: &v1.CSIPersistentVolumeSource{ diff --git a/vendor/k8s.io/csi-translation-lib/plugins/gce_pd.go b/vendor/k8s.io/csi-translation-lib/plugins/gce_pd.go index 1a9b11a13ab76..ed9d079b5ebba 100644 --- a/vendor/k8s.io/csi-translation-lib/plugins/gce_pd.go +++ b/vendor/k8s.io/csi-translation-lib/plugins/gce_pd.go @@ -23,6 +23,7 @@ import ( "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" cloudvolume "k8s.io/cloud-provider/volume" ) @@ -141,19 +142,44 @@ func (g *gcePersistentDiskCSITranslator) TranslateInTreeStorageClassToCSI(sc *st // plugin never supported ReadWriteMany but also did not validate or enforce // this access mode for pre-provisioned volumes. The GCE PD CSI Driver validates // and enforces (fails) ReadWriteMany. Therefore we treat all in-tree -// ReadWriteMany as ReadWriteOnce volumes to not break legacy volumes. +// ReadWriteMany as ReadWriteOnce volumes to not break legacy volumes. It also +// takes [ReadWriteOnce, ReadOnlyMany] and makes it ReadWriteOnce. This is +// because the in-tree plugin does not enforce access modes and just attaches +// the disk in ReadWriteOnce mode; however, the CSI external-attacher will fail +// this combination because technically [ReadWriteOnce, ReadOnlyMany] is not +// supportable on an attached volume +// See: https://github.com/kubernetes-csi/external-attacher/issues/153 func backwardCompatibleAccessModes(ams []v1.PersistentVolumeAccessMode) []v1.PersistentVolumeAccessMode { if ams == nil { return nil } - newAM := []v1.PersistentVolumeAccessMode{} + + s := map[v1.PersistentVolumeAccessMode]bool{} + var newAM []v1.PersistentVolumeAccessMode + for _, am := range ams { if am == v1.ReadWriteMany { - newAM = append(newAM, v1.ReadWriteOnce) + // ReadWriteMany is unsupported in CSI, but in-tree did no + // validation and treated it as ReadWriteOnce + s[v1.ReadWriteOnce] = true } else { - newAM = append(newAM, am) + s[am] = true } } + + switch { + case s[v1.ReadOnlyMany] && s[v1.ReadWriteOnce]: + // ROX,RWO is unsupported in CSI, but in-tree did not validation and + // treated it as ReadWriteOnce + newAM = []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce} + case s[v1.ReadWriteOnce]: + newAM = []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce} + case s[v1.ReadOnlyMany]: + newAM = []v1.PersistentVolumeAccessMode{v1.ReadOnlyMany} + default: + newAM = []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce} + } + return newAM } @@ -172,6 +198,10 @@ func (g *gcePersistentDiskCSITranslator) TranslateInTreeInlineVolumeToCSI(volume } pv := &v1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + // A.K.A InnerVolumeSpecName required to match for Unmount + Name: volume.Name, + }, Spec: v1.PersistentVolumeSpec{ PersistentVolumeSource: v1.PersistentVolumeSource{ CSI: &v1.CSIPersistentVolumeSource{ diff --git a/vendor/k8s.io/csi-translation-lib/plugins/openstack_cinder.go b/vendor/k8s.io/csi-translation-lib/plugins/openstack_cinder.go index 0573ebc05260d..9965eacf596cd 100644 --- a/vendor/k8s.io/csi-translation-lib/plugins/openstack_cinder.go +++ b/vendor/k8s.io/csi-translation-lib/plugins/openstack_cinder.go @@ -21,6 +21,7 @@ import ( "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) const ( @@ -54,6 +55,10 @@ func (t *osCinderCSITranslator) TranslateInTreeInlineVolumeToCSI(volume *v1.Volu cinderSource := volume.Cinder pv := &v1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + // A.K.A InnerVolumeSpecName required to match for Unmount + Name: volume.Name, + }, Spec: v1.PersistentVolumeSpec{ PersistentVolumeSource: v1.PersistentVolumeSource{ CSI: &v1.CSIPersistentVolumeSource{ diff --git a/vendor/k8s.io/klog/.travis.yml b/vendor/k8s.io/klog/.travis.yml index 0f508dae66ed7..5677664c21836 100644 --- a/vendor/k8s.io/klog/.travis.yml +++ b/vendor/k8s.io/klog/.travis.yml @@ -5,11 +5,12 @@ go: - 1.9.x - 1.10.x - 1.11.x + - 1.12.x script: - go get -t -v ./... - diff -u <(echo -n) <(gofmt -d .) - diff -u <(echo -n) <(golint $(go list -e ./...)) - - go tool vet . + - go tool vet . || go vet . - go test -v -race ./... install: - go get golang.org/x/lint/golint diff --git a/vendor/k8s.io/klog/README.md b/vendor/k8s.io/klog/README.md index bee306f398fbd..841468b4b65c4 100644 --- a/vendor/k8s.io/klog/README.md +++ b/vendor/k8s.io/klog/README.md @@ -31,7 +31,7 @@ How to use klog - Use `klog.InitFlags(nil)` explicitly for initializing global flags as we no longer use `init()` method to register the flags - You can now use `log-file` instead of `log-dir` for logging to a single file (See `examples/log_file/usage_log_file.go`) - If you want to redirect everything logged using klog somewhere else (say syslog!), you can use `klog.SetOutput()` method and supply a `io.Writer`. (See `examples/set_output/usage_set_output.go`) -- For more logging conventions (See [Logging Conventions](https://github.com/kubernetes/community/blob/master/contributors/devel/logging.md)) +- For more logging conventions (See [Logging Conventions](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md)) ### Coexisting with glog This package can be used side by side with glog. [This example](examples/coexist_glog/coexist_glog.go) shows how to initialize and syncronize flags from the global `flag.CommandLine` FlagSet. In addition, the example makes use of stderr as combined output by setting `alsologtostderr` (or `logtostderr`) to `true`. diff --git a/vendor/k8s.io/klog/go.mod b/vendor/k8s.io/klog/go.mod new file mode 100644 index 0000000000000..3877d8546ab97 --- /dev/null +++ b/vendor/k8s.io/klog/go.mod @@ -0,0 +1,5 @@ +module k8s.io/klog + +go 1.12 + +require github.com/go-logr/logr v0.1.0 diff --git a/vendor/k8s.io/klog/go.sum b/vendor/k8s.io/klog/go.sum new file mode 100644 index 0000000000000..fb64d277a7afd --- /dev/null +++ b/vendor/k8s.io/klog/go.sum @@ -0,0 +1,2 @@ +github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= diff --git a/vendor/k8s.io/klog/klog.go b/vendor/k8s.io/klog/klog.go index a3dddeadfcfcc..2712ce0afc11e 100644 --- a/vendor/k8s.io/klog/klog.go +++ b/vendor/k8s.io/klog/klog.go @@ -20,26 +20,26 @@ // // Basic examples: // -// glog.Info("Prepare to repel boarders") +// klog.Info("Prepare to repel boarders") // -// glog.Fatalf("Initialization failed: %s", err) +// klog.Fatalf("Initialization failed: %s", err) // // See the documentation for the V function for an explanation of these examples: // -// if glog.V(2) { -// glog.Info("Starting transaction...") +// if klog.V(2) { +// klog.Info("Starting transaction...") // } // -// glog.V(2).Infoln("Processed", nItems, "elements") +// klog.V(2).Infoln("Processed", nItems, "elements") // // Log output is buffered and written periodically using Flush. Programs // should call Flush before exiting to guarantee all log output is written. // -// By default, all log statements write to files in a temporary directory. +// By default, all log statements write to standard error. // This package provides several flags that modify this behavior. // As a result, flag.Parse must be called before any logging is done. // -// -logtostderr=false +// -logtostderr=true // Logs are written to standard error instead of to files. // -alsologtostderr=false // Logs are written to standard error as well as to files. @@ -142,7 +142,7 @@ func (s *severity) Set(value string) error { if v, ok := severityByName(value); ok { threshold = v } else { - v, err := strconv.Atoi(value) + v, err := strconv.ParseInt(value, 10, 32) if err != nil { return err } @@ -226,7 +226,7 @@ func (l *Level) Get() interface{} { // Set is part of the flag.Value interface. func (l *Level) Set(value string) error { - v, err := strconv.Atoi(value) + v, err := strconv.ParseInt(value, 10, 32) if err != nil { return err } @@ -294,7 +294,7 @@ func (m *moduleSpec) Set(value string) error { return errVmoduleSyntax } pattern := patLev[0] - v, err := strconv.Atoi(patLev[1]) + v, err := strconv.ParseInt(patLev[1], 10, 32) if err != nil { return errors.New("syntax error: expect comma-separated list of filename=N") } @@ -396,30 +396,23 @@ type flushSyncWriter interface { io.Writer } +// init sets up the defaults and runs flushDaemon. func init() { - // Default stderrThreshold is ERROR. - logging.stderrThreshold = errorLog - + logging.stderrThreshold = errorLog // Default stderrThreshold is ERROR. logging.setVState(0, nil, false) + logging.logDir = "" + logging.logFile = "" + logging.logFileMaxSizeMB = 1800 + logging.toStderr = true + logging.alsoToStderr = false + logging.skipHeaders = false + logging.addDirHeader = false + logging.skipLogHeaders = false go logging.flushDaemon() } -var initDefaultsOnce sync.Once - // InitFlags is for explicitly initializing the flags. func InitFlags(flagset *flag.FlagSet) { - - // Initialize defaults. - initDefaultsOnce.Do(func() { - logging.logDir = "" - logging.logFile = "" - logging.logFileMaxSizeMB = 1800 - logging.toStderr = true - logging.alsoToStderr = false - logging.skipHeaders = false - logging.skipLogHeaders = false - }) - if flagset == nil { flagset = flag.CommandLine } @@ -432,6 +425,7 @@ func InitFlags(flagset *flag.FlagSet) { flagset.BoolVar(&logging.toStderr, "logtostderr", logging.toStderr, "log to standard error instead of files") flagset.BoolVar(&logging.alsoToStderr, "alsologtostderr", logging.alsoToStderr, "log to standard error as well as files") flagset.Var(&logging.verbosity, "v", "number for the log level verbosity") + flagset.BoolVar(&logging.skipHeaders, "add_dir_header", logging.addDirHeader, "If true, adds the file directory to the header") flagset.BoolVar(&logging.skipHeaders, "skip_headers", logging.skipHeaders, "If true, avoid header prefixes in the log messages") flagset.BoolVar(&logging.skipLogHeaders, "skip_log_headers", logging.skipLogHeaders, "If true, avoid headers when opening log files") flagset.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") @@ -500,6 +494,9 @@ type loggingT struct { // If true, do not add the headers to log files skipLogHeaders bool + + // If true, add the file directory to the header + addDirHeader bool } // buffer holds a byte Buffer for reuse. The zero value is ready for use. @@ -585,9 +582,14 @@ func (l *loggingT) header(s severity, depth int) (*buffer, string, int) { file = "???" line = 1 } else { - slash := strings.LastIndex(file, "/") - if slash >= 0 { - file = file[slash+1:] + if slash := strings.LastIndex(file, "/"); slash >= 0 { + path := file + file = path[slash+1:] + if l.addDirHeader { + if dirsep := strings.LastIndex(path[:slash], "/"); dirsep >= 0 { + file = path[dirsep+1:] + } + } } } return l.formatHeader(s, file, line), file, line @@ -736,6 +738,8 @@ func (rb *redirectBuffer) Write(bytes []byte) (n int, err error) { // SetOutput sets the output destination for all severities func SetOutput(w io.Writer) { + logging.mu.Lock() + defer logging.mu.Unlock() for s := fatalLog; s >= infoLog; s-- { rb := &redirectBuffer{ w: w, @@ -746,6 +750,8 @@ func SetOutput(w io.Writer) { // SetOutputBySeverity sets the output destination for specific severity func SetOutputBySeverity(name string, w io.Writer) { + logging.mu.Lock() + defer logging.mu.Unlock() sev, ok := severityByName(name) if !ok { panic(fmt.Sprintf("SetOutputBySeverity(%q): unrecognized severity name", name)) @@ -771,24 +777,38 @@ func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoTo if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() { os.Stderr.Write(data) } - if l.file[s] == nil { - if err := l.createFiles(s); err != nil { - os.Stderr.Write(data) // Make sure the message appears somewhere. - l.exit(err) + + if logging.logFile != "" { + // Since we are using a single log file, all of the items in l.file array + // will point to the same file, so just use one of them to write data. + if l.file[infoLog] == nil { + if err := l.createFiles(infoLog); err != nil { + os.Stderr.Write(data) // Make sure the message appears somewhere. + l.exit(err) + } } - } - switch s { - case fatalLog: - l.file[fatalLog].Write(data) - fallthrough - case errorLog: - l.file[errorLog].Write(data) - fallthrough - case warningLog: - l.file[warningLog].Write(data) - fallthrough - case infoLog: l.file[infoLog].Write(data) + } else { + if l.file[s] == nil { + if err := l.createFiles(s); err != nil { + os.Stderr.Write(data) // Make sure the message appears somewhere. + l.exit(err) + } + } + + switch s { + case fatalLog: + l.file[fatalLog].Write(data) + fallthrough + case errorLog: + l.file[errorLog].Write(data) + fallthrough + case warningLog: + l.file[warningLog].Write(data) + fallthrough + case infoLog: + l.file[infoLog].Write(data) + } } } if s == fatalLog { @@ -827,7 +847,7 @@ func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoTo // timeoutFlush calls Flush and returns when it completes or after timeout // elapses, whichever happens first. This is needed because the hooks invoked -// by Flush may deadlock when glog.Fatal is called from a hook that holds +// by Flush may deadlock when klog.Fatal is called from a hook that holds // a lock. func timeoutFlush(timeout time.Duration) { done := make(chan bool, 1) @@ -838,7 +858,7 @@ func timeoutFlush(timeout time.Duration) { select { case <-done: case <-time.After(timeout): - fmt.Fprintln(os.Stderr, "glog: Flush took longer than", timeout) + fmt.Fprintln(os.Stderr, "klog: Flush took longer than", timeout) } } @@ -1094,9 +1114,9 @@ type Verbose bool // The returned value is a boolean of type Verbose, which implements Info, Infoln // and Infof. These methods will write to the Info log if called. // Thus, one may write either -// if glog.V(2) { glog.Info("log this") } +// if klog.V(2) { klog.Info("log this") } // or -// glog.V(2).Info("log this") +// klog.V(2).Info("log this") // The second form is shorter but the first is cheaper if logging is off because it does // not evaluate its arguments. // @@ -1170,7 +1190,7 @@ func InfoDepth(depth int, args ...interface{}) { } // Infoln logs to the INFO log. -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +// Arguments are handled in the manner of fmt.Println; a newline is always appended. func Infoln(args ...interface{}) { logging.println(infoLog, args...) } @@ -1194,7 +1214,7 @@ func WarningDepth(depth int, args ...interface{}) { } // Warningln logs to the WARNING and INFO logs. -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +// Arguments are handled in the manner of fmt.Println; a newline is always appended. func Warningln(args ...interface{}) { logging.println(warningLog, args...) } @@ -1218,7 +1238,7 @@ func ErrorDepth(depth int, args ...interface{}) { } // Errorln logs to the ERROR, WARNING, and INFO logs. -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +// Arguments are handled in the manner of fmt.Println; a newline is always appended. func Errorln(args ...interface{}) { logging.println(errorLog, args...) } @@ -1244,7 +1264,7 @@ func FatalDepth(depth int, args ...interface{}) { // Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs, // including a stack trace of all running goroutines, then calls os.Exit(255). -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +// Arguments are handled in the manner of fmt.Println; a newline is always appended. func Fatalln(args ...interface{}) { logging.println(fatalLog, args...) } diff --git a/vendor/k8s.io/klog/klogr/klogr.go b/vendor/k8s.io/klog/klogr/klogr.go index f63557daacd5c..e145950867fa5 100644 --- a/vendor/k8s.io/klog/klogr/klogr.go +++ b/vendor/k8s.io/klog/klogr/klogr.go @@ -140,12 +140,11 @@ func pretty(value interface{}) string { func (l klogger) Info(msg string, kvList ...interface{}) { if l.Enabled() { - lvlStr := flatten("level", l.level) msgStr := flatten("msg", msg) trimmed := trimDuplicates(l.values, kvList) fixedStr := flatten(trimmed[0]...) userStr := flatten(trimmed[1]...) - klog.InfoDepth(framesToCaller(), l.prefix, " ", lvlStr, " ", msgStr, " ", fixedStr, " ", userStr) + klog.InfoDepth(framesToCaller(), l.prefix, " ", msgStr, " ", fixedStr, " ", userStr) } } diff --git a/vendor/k8s.io/kube-openapi/pkg/common/common.go b/vendor/k8s.io/kube-openapi/pkg/common/common.go index 7d5534b24ecdf..f1c87c30086c0 100644 --- a/vendor/k8s.io/kube-openapi/pkg/common/common.go +++ b/vendor/k8s.io/kube-openapi/pkg/common/common.go @@ -24,6 +24,12 @@ import ( "github.com/go-openapi/spec" ) +const ( + // TODO: Make this configurable. + ExtensionPrefix = "x-kubernetes-" + ExtensionV2Schema = ExtensionPrefix + "v2-schema" +) + // OpenAPIDefinition describes single type. Normally these definitions are auto-generated using gen-openapi. type OpenAPIDefinition struct { Schema spec.Schema @@ -43,6 +49,10 @@ type OpenAPIDefinitionGetter interface { OpenAPIDefinition() *OpenAPIDefinition } +type OpenAPIV3DefinitionGetter interface { + OpenAPIV3Definition() *OpenAPIDefinition +} + type PathHandler interface { Handle(path string, handler http.Handler) } @@ -172,3 +182,11 @@ func EscapeJsonPointer(p string) string { p = strings.Replace(p, "/", "~1", -1) return p } + +func EmbedOpenAPIDefinitionIntoV2Extension(main OpenAPIDefinition, embedded OpenAPIDefinition) OpenAPIDefinition { + if main.Schema.Extensions == nil { + main.Schema.Extensions = make(map[string]interface{}) + } + main.Schema.Extensions[ExtensionV2Schema] = embedded.Schema + return main +} diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/OWNERS b/vendor/k8s.io/kube-openapi/pkg/util/proto/OWNERS new file mode 100644 index 0000000000000..9621a6a3a4acf --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/OWNERS @@ -0,0 +1,2 @@ +approvers: +- apelisse diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go index 890a39399f668..5eb957affbce4 100644 --- a/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go @@ -92,13 +92,16 @@ func NewOpenAPIData(doc *openapi_v2.Document) (Models, error) { // We believe the schema is a reference, verify that and returns a new // Schema func (d *Definitions) parseReference(s *openapi_v2.Schema, path *Path) (Schema, error) { + // TODO(wrong): a schema with a $ref can have properties. We can ignore them (would be incomplete), but we cannot return an error. if len(s.GetProperties().GetAdditionalProperties()) > 0 { return nil, newSchemaError(path, "unallowed embedded type definition") } + // TODO(wrong): a schema with a $ref can have a type. We can ignore it (would be incomplete), but we cannot return an error. if len(s.GetType().GetValue()) > 0 { return nil, newSchemaError(path, "definition reference can't have a type") } + // TODO(wrong): $refs outside of the definitions are completely valid. We can ignore them (would be incomplete), but we cannot return an error. if !strings.HasPrefix(s.GetXRef(), "#/definitions/") { return nil, newSchemaError(path, "unallowed reference to non-definition %q", s.GetXRef()) } @@ -127,6 +130,7 @@ func (d *Definitions) parseMap(s *openapi_v2.Schema, path *Path) (Schema, error) return nil, newSchemaError(path, "invalid object type") } var sub Schema + // TODO(incomplete): this misses the boolean case as AdditionalProperties is a bool+schema sum type. if s.GetAdditionalProperties().GetSchema() == nil { sub = &Arbitrary{ BaseSchema: d.parseBaseSchema(s, path), @@ -157,6 +161,7 @@ func (d *Definitions) parsePrimitive(s *openapi_v2.Schema, path *Path) (Schema, case Number: // do nothing case Integer: // do nothing case Boolean: // do nothing + // TODO(wrong): this misses "null". Would skip the null case (would be incomplete), but we cannot return an error. default: return nil, newSchemaError(path, "Unknown primitive type: %q", t) } @@ -175,6 +180,8 @@ func (d *Definitions) parseArray(s *openapi_v2.Schema, path *Path) (Schema, erro return nil, newSchemaError(path, `array should have type "array"`) } if len(s.GetItems().GetSchema()) != 1 { + // TODO(wrong): Items can have multiple elements. We can ignore Items then (would be incomplete), but we cannot return an error. + // TODO(wrong): "type: array" witohut any items at all is completely valid. return nil, newSchemaError(path, "array should have exactly one sub-item") } sub, err := d.ParseSchema(s.GetItems().GetSchema()[0], path) @@ -227,6 +234,8 @@ func (d *Definitions) parseArbitrary(s *openapi_v2.Schema, path *Path) (Schema, // this function is public, it doesn't leak through the interface. func (d *Definitions) ParseSchema(s *openapi_v2.Schema, path *Path) (Schema, error) { if s.GetXRef() != "" { + // TODO(incomplete): ignoring the rest of s is wrong. As long as there are no conflict, everything from s must be considered + // Reference: https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#path-item-object return d.parseReference(s, path) } objectTypes := s.GetType().GetValue() @@ -234,11 +243,15 @@ func (d *Definitions) ParseSchema(s *openapi_v2.Schema, path *Path) (Schema, err case 0: // in the OpenAPI schema served by older k8s versions, object definitions created from structs did not include // the type:object property (they only included the "properties" property), so we need to handle this case + // TODO: validate that we ever published empty, non-nil properties. JSON roundtripping nils them. if s.GetProperties() != nil { + // TODO(wrong): when verifying a non-object later against this, it will be rejected as invalid type. + // TODO(CRD validation schema publishing): we have to filter properties (empty or not) if type=object is not given return d.parseKind(s, path) } else { // Definition has no type and no properties. Treat it as an arbitrary value - // TODO: what if it has additionalProperties or patternProperties? + // TODO(incomplete): what if it has additionalProperties=false or patternProperties? + // ANSWER: parseArbitrary is less strict than it has to be with patternProperties (which is ignored). So this is correct (of course not complete). return d.parseArbitrary(s, path) } case 1: @@ -256,6 +269,8 @@ func (d *Definitions) ParseSchema(s *openapi_v2.Schema, path *Path) (Schema, err return d.parsePrimitive(s, path) default: // the OpenAPI generator never generates (nor it ever did in the past) OpenAPI type definitions with multiple types + // TODO(wrong): this is rejecting a completely valid OpenAPI spec + // TODO(CRD validation schema publishing): filter these out return nil, newSchemaError(path, "definitions with multiple types aren't supported") } } diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/types.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/types.go index bbbdd4f61c969..6a9f68c0dbd2f 100644 --- a/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/types.go +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/types.go @@ -216,6 +216,7 @@ func (item *primitiveItem) VisitPrimitive(schema *proto.Primitive) { case proto.String: return } + // TODO(wrong): this misses "null" item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: schema.Type, Actual: item.Kind}) } diff --git a/vendor/k8s.io/kubectl/LICENSE b/vendor/k8s.io/kubectl/LICENSE new file mode 100644 index 0000000000000..8dada3edaf50d --- /dev/null +++ b/vendor/k8s.io/kubectl/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/BUILD.bazel b/vendor/k8s.io/kubectl/pkg/cmd/util/BUILD.bazel similarity index 75% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/BUILD.bazel rename to vendor/k8s.io/kubectl/pkg/cmd/util/BUILD.bazel index 0529a702c8809..f6fd29964e2d9 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/BUILD.bazel +++ b/vendor/k8s.io/kubectl/pkg/cmd/util/BUILD.bazel @@ -10,8 +10,8 @@ go_library( "kubectl_match_version.go", "printing.go", ], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util", - importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/util", + importmap = "k8s.io/kops/vendor/k8s.io/kubectl/pkg/cmd/util", + importpath = "k8s.io/kubectl/pkg/cmd/util", visibility = ["//visibility:public"], deps = [ "//vendor/github.com/evanphx/json-patch:go_default_library", @@ -36,12 +36,12 @@ go_library( "//vendor/k8s.io/client-go/scale:go_default_library", "//vendor/k8s.io/client-go/tools/clientcmd:go_default_library", "//vendor/k8s.io/klog:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/validation:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/scheme:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/util/templates:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/validation:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/version:go_default_library", + "//vendor/k8s.io/kubectl/pkg/scheme:go_default_library", + "//vendor/k8s.io/kubectl/pkg/util/openapi:go_default_library", + "//vendor/k8s.io/kubectl/pkg/util/openapi/validation:go_default_library", + "//vendor/k8s.io/kubectl/pkg/util/templates:go_default_library", + "//vendor/k8s.io/kubectl/pkg/validation:go_default_library", + "//vendor/k8s.io/kubectl/pkg/version:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/crdfinder.go b/vendor/k8s.io/kubectl/pkg/cmd/util/crdfinder.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/crdfinder.go rename to vendor/k8s.io/kubectl/pkg/cmd/util/crdfinder.go diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/editor/BUILD.bazel b/vendor/k8s.io/kubectl/pkg/cmd/util/editor/BUILD.bazel similarity index 73% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/editor/BUILD.bazel rename to vendor/k8s.io/kubectl/pkg/cmd/util/editor/BUILD.bazel index 46e5b81011b1a..8500207ad14e2 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/editor/BUILD.bazel +++ b/vendor/k8s.io/kubectl/pkg/cmd/util/editor/BUILD.bazel @@ -6,8 +6,8 @@ go_library( "editoptions.go", "editor.go", ], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/editor", - importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/util/editor", + importmap = "k8s.io/kops/vendor/k8s.io/kubectl/pkg/cmd/util/editor", + importpath = "k8s.io/kubectl/pkg/cmd/util/editor", visibility = ["//visibility:public"], deps = [ "//vendor/github.com/evanphx/json-patch:go_default_library", @@ -27,10 +27,10 @@ go_library( "//vendor/k8s.io/cli-runtime/pkg/printers:go_default_library", "//vendor/k8s.io/cli-runtime/pkg/resource:go_default_library", "//vendor/k8s.io/klog:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/editor/crlf:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/scheme:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/util/term:go_default_library", + "//vendor/k8s.io/kubectl/pkg/cmd/util:go_default_library", + "//vendor/k8s.io/kubectl/pkg/cmd/util/editor/crlf:go_default_library", + "//vendor/k8s.io/kubectl/pkg/scheme:go_default_library", + "//vendor/k8s.io/kubectl/pkg/util:go_default_library", + "//vendor/k8s.io/kubectl/pkg/util/term:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/editor/crlf/BUILD.bazel b/vendor/k8s.io/kubectl/pkg/cmd/util/editor/crlf/BUILD.bazel similarity index 51% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/editor/crlf/BUILD.bazel rename to vendor/k8s.io/kubectl/pkg/cmd/util/editor/crlf/BUILD.bazel index 0aae88a33bebc..a41dce68f4c8a 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/editor/crlf/BUILD.bazel +++ b/vendor/k8s.io/kubectl/pkg/cmd/util/editor/crlf/BUILD.bazel @@ -3,7 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = ["crlf.go"], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/editor/crlf", - importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/util/editor/crlf", + importmap = "k8s.io/kops/vendor/k8s.io/kubectl/pkg/cmd/util/editor/crlf", + importpath = "k8s.io/kubectl/pkg/cmd/util/editor/crlf", visibility = ["//visibility:public"], ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/editor/crlf/crlf.go b/vendor/k8s.io/kubectl/pkg/cmd/util/editor/crlf/crlf.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/editor/crlf/crlf.go rename to vendor/k8s.io/kubectl/pkg/cmd/util/editor/crlf/crlf.go diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/editor/editoptions.go b/vendor/k8s.io/kubectl/pkg/cmd/util/editor/editoptions.go similarity index 98% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/editor/editoptions.go rename to vendor/k8s.io/kubectl/pkg/cmd/util/editor/editoptions.go index f9b869eb6948b..6e0412ec3f94f 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/editor/editoptions.go +++ b/vendor/k8s.io/kubectl/pkg/cmd/util/editor/editoptions.go @@ -46,10 +46,10 @@ import ( "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/printers" "k8s.io/cli-runtime/pkg/resource" - "k8s.io/kubernetes/pkg/kubectl" - cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" - "k8s.io/kubernetes/pkg/kubectl/cmd/util/editor/crlf" - "k8s.io/kubernetes/pkg/kubectl/scheme" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/cmd/util/editor/crlf" + "k8s.io/kubectl/pkg/scheme" + "k8s.io/kubectl/pkg/util" ) // EditOptions contains all the options for running edit cli command. @@ -408,7 +408,7 @@ func (o *EditOptions) Run() error { } var annotationInfos []*resource.Info for i := range infos { - data, err := kubectl.GetOriginalConfiguration(infos[i].Object) + data, err := util.GetOriginalConfiguration(infos[i].Object) if err != nil { return err } @@ -663,7 +663,7 @@ func (o *EditOptions) visitAnnotation(annotationVisitor resource.Visitor) error err := annotationVisitor.Visit(func(info *resource.Info, incomingErr error) error { // put configuration annotation in "updates" if o.ApplyAnnotation { - if err := kubectl.CreateOrUpdateAnnotation(true, info.Object, scheme.DefaultJSONEncoder()); err != nil { + if err := util.CreateOrUpdateAnnotation(true, info.Object, scheme.DefaultJSONEncoder()); err != nil { return err } } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/editor/editor.go b/vendor/k8s.io/kubectl/pkg/cmd/util/editor/editor.go similarity index 99% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/editor/editor.go rename to vendor/k8s.io/kubectl/pkg/cmd/util/editor/editor.go index ef7831ca110ee..0f124ec07dc1f 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/editor/editor.go +++ b/vendor/k8s.io/kubectl/pkg/cmd/util/editor/editor.go @@ -29,7 +29,7 @@ import ( "k8s.io/klog" - "k8s.io/kubernetes/pkg/kubectl/util/term" + "k8s.io/kubectl/pkg/util/term" ) const ( diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory.go b/vendor/k8s.io/kubectl/pkg/cmd/util/factory.go similarity index 96% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory.go rename to vendor/k8s.io/kubectl/pkg/cmd/util/factory.go index 73c88a69e8e34..d9df0bf3b5190 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory.go +++ b/vendor/k8s.io/kubectl/pkg/cmd/util/factory.go @@ -23,8 +23,8 @@ import ( "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" - "k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi" - "k8s.io/kubernetes/pkg/kubectl/validation" + "k8s.io/kubectl/pkg/util/openapi" + "k8s.io/kubectl/pkg/validation" ) // Factory provides abstractions that allow the Kubectl command to be extended across multiple types diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory_client_access.go b/vendor/k8s.io/kubectl/pkg/cmd/util/factory_client_access.go similarity index 96% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory_client_access.go rename to vendor/k8s.io/kubectl/pkg/cmd/util/factory_client_access.go index 85a5cdf02f51a..e4cf2da07349f 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory_client_access.go +++ b/vendor/k8s.io/kubectl/pkg/cmd/util/factory_client_access.go @@ -30,9 +30,9 @@ import ( "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" - "k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi" - openapivalidation "k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/validation" - "k8s.io/kubernetes/pkg/kubectl/validation" + "k8s.io/kubectl/pkg/util/openapi" + openapivalidation "k8s.io/kubectl/pkg/util/openapi/validation" + "k8s.io/kubectl/pkg/validation" ) type factoryImpl struct { diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/helpers.go b/vendor/k8s.io/kubectl/pkg/cmd/util/helpers.go similarity index 97% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/helpers.go rename to vendor/k8s.io/kubectl/pkg/cmd/util/helpers.go index 9d27003e43f76..0659eabf30d69 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/helpers.go +++ b/vendor/k8s.io/kubectl/pkg/cmd/util/helpers.go @@ -26,7 +26,7 @@ import ( "strings" "time" - "github.com/evanphx/json-patch" + jsonpatch "github.com/evanphx/json-patch" "github.com/spf13/cobra" "github.com/spf13/pflag" kerrors "k8s.io/apimachinery/pkg/api/errors" @@ -409,9 +409,9 @@ func AddDryRunFlag(cmd *cobra.Command) { } func AddServerSideApplyFlags(cmd *cobra.Command) { - cmd.Flags().Bool("experimental-server-side", false, "If true, apply runs in the server instead of the client. This is an alpha feature and flag.") - cmd.Flags().Bool("experimental-force-conflicts", false, "If true, server-side apply will force the changes against conflicts. This is an alpha feature and flag.") - cmd.Flags().String("experimental-field-manager", "kubectl", "Name of the manager used to track field ownership. This is an alpha feature and flag.") + cmd.Flags().Bool("server-side", false, "If true, apply runs in the server instead of the client.") + cmd.Flags().Bool("force-conflicts", false, "If true, server-side apply will force the changes against conflicts.") + cmd.Flags().String("field-manager", "kubectl", "Name of the manager used to track field ownership.") } func AddIncludeUninitializedFlag(cmd *cobra.Command) { @@ -488,15 +488,15 @@ func DumpReaderToFile(reader io.Reader, filename string) error { } func GetServerSideApplyFlag(cmd *cobra.Command) bool { - return GetFlagBool(cmd, "experimental-server-side") + return GetFlagBool(cmd, "server-side") } func GetForceConflictsFlag(cmd *cobra.Command) bool { - return GetFlagBool(cmd, "experimental-force-conflicts") + return GetFlagBool(cmd, "force-conflicts") } func GetFieldManagerFlag(cmd *cobra.Command) string { - return GetFlagString(cmd, "experimental-field-manager") + return GetFlagString(cmd, "field-manager") } func GetDryRunFlag(cmd *cobra.Command) bool { diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/kubectl_match_version.go b/vendor/k8s.io/kubectl/pkg/cmd/util/kubectl_match_version.go similarity index 98% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/kubectl_match_version.go rename to vendor/k8s.io/kubectl/pkg/cmd/util/kubectl_match_version.go index 25bbf268bbd15..0c3f6e04917d4 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/kubectl_match_version.go +++ b/vendor/k8s.io/kubectl/pkg/cmd/util/kubectl_match_version.go @@ -26,10 +26,10 @@ import ( "k8s.io/client-go/discovery" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" - "k8s.io/kubernetes/pkg/kubectl/scheme" + "k8s.io/kubectl/pkg/scheme" "k8s.io/cli-runtime/pkg/genericclioptions" - "k8s.io/kubernetes/pkg/kubectl/version" + "k8s.io/kubectl/pkg/version" ) const ( diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/printing.go b/vendor/k8s.io/kubectl/pkg/cmd/util/printing.go similarity index 94% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/printing.go rename to vendor/k8s.io/kubectl/pkg/cmd/util/printing.go index f093940d8a455..ebd228821edbf 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/printing.go +++ b/vendor/k8s.io/kubectl/pkg/cmd/util/printing.go @@ -19,7 +19,7 @@ package util import ( "fmt" - "k8s.io/kubernetes/pkg/kubectl/util/templates" + "k8s.io/kubectl/pkg/util/templates" ) // SuggestAPIResources returns a suggestion to use the "api-resources" command diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/generated/BUILD.bazel b/vendor/k8s.io/kubectl/pkg/generated/BUILD.bazel similarity index 55% rename from vendor/k8s.io/kubernetes/pkg/kubectl/generated/BUILD.bazel rename to vendor/k8s.io/kubectl/pkg/generated/BUILD.bazel index 5e080fc044b7a..99dc472fe67ac 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/generated/BUILD.bazel +++ b/vendor/k8s.io/kubectl/pkg/generated/BUILD.bazel @@ -3,7 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = ["bindata.go"], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/generated", - importpath = "k8s.io/kubernetes/pkg/kubectl/generated", + importmap = "k8s.io/kops/vendor/k8s.io/kubectl/pkg/generated", + importpath = "k8s.io/kubectl/pkg/generated", visibility = ["//visibility:public"], ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/generated/bindata.go b/vendor/k8s.io/kubectl/pkg/generated/bindata.go similarity index 99% rename from vendor/k8s.io/kubernetes/pkg/kubectl/generated/bindata.go rename to vendor/k8s.io/kubectl/pkg/generated/bindata.go index 2806b9b65369d..6d49fa1f6d30e 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/generated/bindata.go +++ b/vendor/k8s.io/kubectl/pkg/generated/bindata.go @@ -1,4 +1,4 @@ -// Code generated by go-bindata. +// Package generated Code generated by go-bindata. (@generated) DO NOT EDIT. // sources: // translations/OWNERS // translations/extract.py @@ -26,8 +26,6 @@ // translations/test/default/LC_MESSAGES/k8s.po // translations/test/en_US/LC_MESSAGES/k8s.mo // translations/test/en_US/LC_MESSAGES/k8s.po -// DO NOT EDIT! - package generated import ( @@ -51,21 +49,32 @@ type bindataFileInfo struct { modTime time.Time } +// Name return file name func (fi bindataFileInfo) Name() string { return fi.name } + +// Size return file size func (fi bindataFileInfo) Size() int64 { return fi.size } + +// Mode return file mode func (fi bindataFileInfo) Mode() os.FileMode { return fi.mode } + +// Mode return file modify time func (fi bindataFileInfo) ModTime() time.Time { return fi.modTime } + +// IsDir return file whether a directory func (fi bindataFileInfo) IsDir() bool { - return false + return fi.mode&os.ModeDir != 0 } + +// Sys return file is sys mode func (fi bindataFileInfo) Sys() interface{} { return nil } @@ -141,9 +150,9 @@ def import_replace(match, file, line_number): Doesn't try to be smart and detect if it's already present, assumes a gofmt round wil fix things. """ - sys.stdout.write('{}\n"k8s.io/kubernetes/pkg/util/i18n"\n'.format(match.group(1))) + sys.stdout.write('{}\n"k8s.io/kubectl/pkg/util/i18n"\n'.format(match.group(1))) -IMPORT_MATCH = MatchHandler('(.*"k8s.io/kubernetes/pkg/kubectl/cmd/util")', import_replace) +IMPORT_MATCH = MatchHandler('(.*"k8s.io/kubectl/pkg/cmd/util")', import_replace) def string_flag_replace(match, file, line_number): diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/scheme/BUILD.bazel b/vendor/k8s.io/kubectl/pkg/scheme/BUILD.bazel similarity index 91% rename from vendor/k8s.io/kubernetes/pkg/kubectl/scheme/BUILD.bazel rename to vendor/k8s.io/kubectl/pkg/scheme/BUILD.bazel index 247e2c70484c5..b926e5ed8ce0f 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/scheme/BUILD.bazel +++ b/vendor/k8s.io/kubectl/pkg/scheme/BUILD.bazel @@ -6,11 +6,13 @@ go_library( "install.go", "scheme.go", ], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/scheme", - importpath = "k8s.io/kubernetes/pkg/kubectl/scheme", + importmap = "k8s.io/kops/vendor/k8s.io/kubectl/pkg/scheme", + importpath = "k8s.io/kubectl/pkg/scheme", visibility = ["//visibility:public"], deps = [ + "//vendor/k8s.io/api/admission/v1:go_default_library", "//vendor/k8s.io/api/admission/v1beta1:go_default_library", + "//vendor/k8s.io/api/admissionregistration/v1:go_default_library", "//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library", "//vendor/k8s.io/api/apps/v1:go_default_library", "//vendor/k8s.io/api/apps/v1beta1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/scheme/install.go b/vendor/k8s.io/kubectl/pkg/scheme/install.go similarity index 92% rename from vendor/k8s.io/kubernetes/pkg/kubectl/scheme/install.go rename to vendor/k8s.io/kubectl/pkg/scheme/install.go index 76a0578dc1937..ffd15bf1b660b 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/scheme/install.go +++ b/vendor/k8s.io/kubectl/pkg/scheme/install.go @@ -17,7 +17,9 @@ limitations under the License. package scheme import ( - admissionv1alpha1 "k8s.io/api/admission/v1beta1" + admissionv1 "k8s.io/api/admission/v1" + admissionv1beta1 "k8s.io/api/admission/v1beta1" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" appsv1 "k8s.io/api/apps/v1" appsv1beta1 "k8s.io/api/apps/v1beta1" @@ -62,8 +64,8 @@ func init() { utilruntime.Must(scheme.AddToScheme(Scheme)) utilruntime.Must(Scheme.SetVersionPriority(corev1.SchemeGroupVersion)) - utilruntime.Must(Scheme.SetVersionPriority(admissionv1alpha1.SchemeGroupVersion)) - utilruntime.Must(Scheme.SetVersionPriority(admissionregistrationv1beta1.SchemeGroupVersion)) + utilruntime.Must(Scheme.SetVersionPriority(admissionv1beta1.SchemeGroupVersion, admissionv1.SchemeGroupVersion)) + utilruntime.Must(Scheme.SetVersionPriority(admissionregistrationv1beta1.SchemeGroupVersion, admissionregistrationv1.SchemeGroupVersion)) utilruntime.Must(Scheme.SetVersionPriority(appsv1beta1.SchemeGroupVersion, appsv1beta2.SchemeGroupVersion, appsv1.SchemeGroupVersion)) utilruntime.Must(Scheme.SetVersionPriority(authenticationv1.SchemeGroupVersion, authenticationv1beta1.SchemeGroupVersion)) utilruntime.Must(Scheme.SetVersionPriority(authorizationv1.SchemeGroupVersion, authorizationv1beta1.SchemeGroupVersion)) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/scheme/scheme.go b/vendor/k8s.io/kubectl/pkg/scheme/scheme.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/kubectl/scheme/scheme.go rename to vendor/k8s.io/kubectl/pkg/scheme/scheme.go diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/BUILD.bazel b/vendor/k8s.io/kubectl/pkg/util/BUILD.bazel similarity index 91% rename from vendor/k8s.io/kubernetes/pkg/kubectl/util/BUILD.bazel rename to vendor/k8s.io/kubectl/pkg/util/BUILD.bazel index a97be3827b319..c6ba98560e783 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/util/BUILD.bazel +++ b/vendor/k8s.io/kubectl/pkg/util/BUILD.bazel @@ -3,17 +3,19 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = [ + "apply.go", "pod_port.go", "service_port.go", "umask.go", "umask_windows.go", "util.go", ], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/util", - importpath = "k8s.io/kubernetes/pkg/kubectl/util", + importmap = "k8s.io/kops/vendor/k8s.io/kubectl/pkg/util", + importpath = "k8s.io/kubectl/pkg/util", visibility = ["//visibility:public"], deps = [ "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/apply.go b/vendor/k8s.io/kubectl/pkg/util/apply.go similarity index 95% rename from vendor/k8s.io/kubernetes/pkg/kubectl/apply.go rename to vendor/k8s.io/kubectl/pkg/util/apply.go index 78447d498cf06..77ea59384261f 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/apply.go +++ b/vendor/k8s.io/kubectl/pkg/util/apply.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package kubectl +package util import ( "k8s.io/api/core/v1" @@ -116,9 +116,9 @@ func GetModifiedConfiguration(obj runtime.Object, annotate bool, codec runtime.E return modified, nil } -// UpdateApplyAnnotation calls CreateApplyAnnotation if the last applied +// updateApplyAnnotation calls CreateApplyAnnotation if the last applied // configuration annotation is already present. Otherwise, it does nothing. -func UpdateApplyAnnotation(obj runtime.Object, codec runtime.Encoder) error { +func updateApplyAnnotation(obj runtime.Object, codec runtime.Encoder) error { if original, err := GetOriginalConfiguration(obj); err != nil || len(original) <= 0 { return err } @@ -142,5 +142,5 @@ func CreateOrUpdateAnnotation(createAnnotation bool, obj runtime.Object, codec r if createAnnotation { return CreateApplyAnnotation(obj, codec) } - return UpdateApplyAnnotation(obj, codec) + return updateApplyAnnotation(obj, codec) } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/i18n/BUILD.bazel b/vendor/k8s.io/kubectl/pkg/util/i18n/BUILD.bazel similarity index 59% rename from vendor/k8s.io/kubernetes/pkg/kubectl/util/i18n/BUILD.bazel rename to vendor/k8s.io/kubectl/pkg/util/i18n/BUILD.bazel index e49afa71546b3..dbd08ace4bf2a 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/util/i18n/BUILD.bazel +++ b/vendor/k8s.io/kubectl/pkg/util/i18n/BUILD.bazel @@ -3,12 +3,12 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = ["i18n.go"], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/util/i18n", - importpath = "k8s.io/kubernetes/pkg/kubectl/util/i18n", + importmap = "k8s.io/kops/vendor/k8s.io/kubectl/pkg/util/i18n", + importpath = "k8s.io/kubectl/pkg/util/i18n", visibility = ["//visibility:public"], deps = [ "//vendor/github.com/chai2010/gettext-go/gettext:go_default_library", "//vendor/k8s.io/klog:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/generated:go_default_library", + "//vendor/k8s.io/kubectl/pkg/generated:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/i18n/i18n.go b/vendor/k8s.io/kubectl/pkg/util/i18n/i18n.go similarity index 98% rename from vendor/k8s.io/kubernetes/pkg/kubectl/util/i18n/i18n.go rename to vendor/k8s.io/kubectl/pkg/util/i18n/i18n.go index a4ff9ac036ded..2e2bc2ff983bb 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/util/i18n/i18n.go +++ b/vendor/k8s.io/kubectl/pkg/util/i18n/i18n.go @@ -24,7 +24,7 @@ import ( "os" "strings" - "k8s.io/kubernetes/pkg/kubectl/generated" + "k8s.io/kubectl/pkg/generated" "github.com/chai2010/gettext-go/gettext" "k8s.io/klog" diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/interrupt/BUILD.bazel b/vendor/k8s.io/kubectl/pkg/util/interrupt/BUILD.bazel similarity index 53% rename from vendor/k8s.io/kubernetes/pkg/kubectl/util/interrupt/BUILD.bazel rename to vendor/k8s.io/kubectl/pkg/util/interrupt/BUILD.bazel index 245fcd0a87de5..84c9c5e687c83 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/util/interrupt/BUILD.bazel +++ b/vendor/k8s.io/kubectl/pkg/util/interrupt/BUILD.bazel @@ -3,7 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = ["interrupt.go"], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/util/interrupt", - importpath = "k8s.io/kubernetes/pkg/kubectl/util/interrupt", + importmap = "k8s.io/kops/vendor/k8s.io/kubectl/pkg/util/interrupt", + importpath = "k8s.io/kubectl/pkg/util/interrupt", visibility = ["//visibility:public"], ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/interrupt/interrupt.go b/vendor/k8s.io/kubectl/pkg/util/interrupt/interrupt.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/kubectl/util/interrupt/interrupt.go rename to vendor/k8s.io/kubectl/pkg/util/interrupt/interrupt.go diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/BUILD.bazel b/vendor/k8s.io/kubectl/pkg/util/openapi/BUILD.bazel similarity index 82% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/BUILD.bazel rename to vendor/k8s.io/kubectl/pkg/util/openapi/BUILD.bazel index 4772e9242cbf3..a2f6a8ef84ed9 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/BUILD.bazel +++ b/vendor/k8s.io/kubectl/pkg/util/openapi/BUILD.bazel @@ -9,8 +9,8 @@ go_library( "openapi.go", "openapi_getter.go", ], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi", - importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi", + importmap = "k8s.io/kops/vendor/k8s.io/kubectl/pkg/util/openapi", + importpath = "k8s.io/kubectl/pkg/util/openapi", visibility = ["//visibility:public"], deps = [ "//vendor/github.com/go-openapi/spec:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/OWNERS b/vendor/k8s.io/kubectl/pkg/util/openapi/OWNERS similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/OWNERS rename to vendor/k8s.io/kubectl/pkg/util/openapi/OWNERS diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/doc.go b/vendor/k8s.io/kubectl/pkg/util/openapi/doc.go similarity index 92% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/doc.go rename to vendor/k8s.io/kubectl/pkg/util/openapi/doc.go index 7fff1fa87c3a3..08194d58084cc 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/doc.go +++ b/vendor/k8s.io/kubectl/pkg/util/openapi/doc.go @@ -18,4 +18,4 @@ limitations under the License. // from a Kubernetes server and then indexing the type definitions. // The openapi spec contains the object model definitions and extensions metadata // such as the patchStrategy and patchMergeKey for creating patches. -package openapi // k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi +package openapi // k8s.io/kubectl/pkg/util/openapi diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/dryrun.go b/vendor/k8s.io/kubectl/pkg/util/openapi/dryrun.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/dryrun.go rename to vendor/k8s.io/kubectl/pkg/util/openapi/dryrun.go diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/extensions.go b/vendor/k8s.io/kubectl/pkg/util/openapi/extensions.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/extensions.go rename to vendor/k8s.io/kubectl/pkg/util/openapi/extensions.go diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/openapi.go b/vendor/k8s.io/kubectl/pkg/util/openapi/openapi.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/openapi.go rename to vendor/k8s.io/kubectl/pkg/util/openapi/openapi.go diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/openapi_getter.go b/vendor/k8s.io/kubectl/pkg/util/openapi/openapi_getter.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/openapi_getter.go rename to vendor/k8s.io/kubectl/pkg/util/openapi/openapi_getter.go diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/validation/BUILD.bazel b/vendor/k8s.io/kubectl/pkg/util/openapi/validation/BUILD.bazel similarity index 68% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/validation/BUILD.bazel rename to vendor/k8s.io/kubectl/pkg/util/openapi/validation/BUILD.bazel index e873236893a53..84650c099161d 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/validation/BUILD.bazel +++ b/vendor/k8s.io/kubectl/pkg/util/openapi/validation/BUILD.bazel @@ -3,8 +3,8 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = ["validation.go"], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/validation", - importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/validation", + importmap = "k8s.io/kops/vendor/k8s.io/kubectl/pkg/util/openapi/validation", + importpath = "k8s.io/kubectl/pkg/util/openapi/validation", visibility = ["//visibility:public"], deps = [ "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", @@ -12,6 +12,6 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/yaml:go_default_library", "//vendor/k8s.io/kube-openapi/pkg/util/proto/validation:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi:go_default_library", + "//vendor/k8s.io/kubectl/pkg/util/openapi:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/validation/validation.go b/vendor/k8s.io/kubectl/pkg/util/openapi/validation/validation.go similarity index 98% rename from vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/validation/validation.go rename to vendor/k8s.io/kubectl/pkg/util/openapi/validation/validation.go index 0ac30d5f0f1e0..25aec97ed1fc2 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/validation/validation.go +++ b/vendor/k8s.io/kubectl/pkg/util/openapi/validation/validation.go @@ -24,7 +24,7 @@ import ( "k8s.io/apimachinery/pkg/util/json" "k8s.io/apimachinery/pkg/util/yaml" "k8s.io/kube-openapi/pkg/util/proto/validation" - "k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi" + "k8s.io/kubectl/pkg/util/openapi" ) // SchemaValidation validates the object against an OpenAPI schema. diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/pod_port.go b/vendor/k8s.io/kubectl/pkg/util/pod_port.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/kubectl/util/pod_port.go rename to vendor/k8s.io/kubectl/pkg/util/pod_port.go diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/service_port.go b/vendor/k8s.io/kubectl/pkg/util/service_port.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/kubectl/util/service_port.go rename to vendor/k8s.io/kubectl/pkg/util/service_port.go diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/templates/BUILD.bazel b/vendor/k8s.io/kubectl/pkg/util/templates/BUILD.bazel similarity index 71% rename from vendor/k8s.io/kubernetes/pkg/kubectl/util/templates/BUILD.bazel rename to vendor/k8s.io/kubectl/pkg/util/templates/BUILD.bazel index 9523ec7d467d8..714548455e48b 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/util/templates/BUILD.bazel +++ b/vendor/k8s.io/kubectl/pkg/util/templates/BUILD.bazel @@ -9,14 +9,14 @@ go_library( "templater.go", "templates.go", ], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/util/templates", - importpath = "k8s.io/kubernetes/pkg/kubectl/util/templates", + importmap = "k8s.io/kops/vendor/k8s.io/kubectl/pkg/util/templates", + importpath = "k8s.io/kubectl/pkg/util/templates", visibility = ["//visibility:public"], deps = [ "//vendor/github.com/MakeNowJust/heredoc:go_default_library", "//vendor/github.com/russross/blackfriday:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/util/term:go_default_library", + "//vendor/k8s.io/kubectl/pkg/util/term:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/templates/command_groups.go b/vendor/k8s.io/kubectl/pkg/util/templates/command_groups.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/kubectl/util/templates/command_groups.go rename to vendor/k8s.io/kubectl/pkg/util/templates/command_groups.go diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/templates/markdown.go b/vendor/k8s.io/kubectl/pkg/util/templates/markdown.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/kubectl/util/templates/markdown.go rename to vendor/k8s.io/kubectl/pkg/util/templates/markdown.go diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/templates/normalizers.go b/vendor/k8s.io/kubectl/pkg/util/templates/normalizers.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/kubectl/util/templates/normalizers.go rename to vendor/k8s.io/kubectl/pkg/util/templates/normalizers.go diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/templates/templater.go b/vendor/k8s.io/kubectl/pkg/util/templates/templater.go similarity index 99% rename from vendor/k8s.io/kubernetes/pkg/kubectl/util/templates/templater.go rename to vendor/k8s.io/kubectl/pkg/util/templates/templater.go index 9fcb2dbb58f6c..a7036f0ec6dd1 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/util/templates/templater.go +++ b/vendor/k8s.io/kubectl/pkg/util/templates/templater.go @@ -23,7 +23,7 @@ import ( "text/template" "unicode" - "k8s.io/kubernetes/pkg/kubectl/util/term" + "k8s.io/kubectl/pkg/util/term" "github.com/spf13/cobra" flag "github.com/spf13/pflag" diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/templates/templates.go b/vendor/k8s.io/kubectl/pkg/util/templates/templates.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/kubectl/util/templates/templates.go rename to vendor/k8s.io/kubectl/pkg/util/templates/templates.go diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/term/BUILD.bazel b/vendor/k8s.io/kubectl/pkg/util/term/BUILD.bazel similarity index 90% rename from vendor/k8s.io/kubernetes/pkg/kubectl/util/term/BUILD.bazel rename to vendor/k8s.io/kubectl/pkg/util/term/BUILD.bazel index e8887fd9faf6d..8fd1d6c1dfbaa 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/util/term/BUILD.bazel +++ b/vendor/k8s.io/kubectl/pkg/util/term/BUILD.bazel @@ -9,15 +9,15 @@ go_library( "term.go", "term_writer.go", ], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/util/term", - importpath = "k8s.io/kubernetes/pkg/kubectl/util/term", + importmap = "k8s.io/kops/vendor/k8s.io/kubectl/pkg/util/term", + importpath = "k8s.io/kubectl/pkg/util/term", visibility = ["//visibility:public"], deps = [ "//vendor/github.com/docker/docker/pkg/term:go_default_library", "//vendor/github.com/mitchellh/go-wordwrap:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//vendor/k8s.io/client-go/tools/remotecommand:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/util/interrupt:go_default_library", + "//vendor/k8s.io/kubectl/pkg/util/interrupt:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:android": [ "//vendor/golang.org/x/sys/unix:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/term/resize.go b/vendor/k8s.io/kubectl/pkg/util/term/resize.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/kubectl/util/term/resize.go rename to vendor/k8s.io/kubectl/pkg/util/term/resize.go diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/term/resizeevents.go b/vendor/k8s.io/kubectl/pkg/util/term/resizeevents.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/kubectl/util/term/resizeevents.go rename to vendor/k8s.io/kubectl/pkg/util/term/resizeevents.go diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/term/resizeevents_windows.go b/vendor/k8s.io/kubectl/pkg/util/term/resizeevents_windows.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/kubectl/util/term/resizeevents_windows.go rename to vendor/k8s.io/kubectl/pkg/util/term/resizeevents_windows.go diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/term/term.go b/vendor/k8s.io/kubectl/pkg/util/term/term.go similarity index 98% rename from vendor/k8s.io/kubernetes/pkg/kubectl/util/term/term.go rename to vendor/k8s.io/kubectl/pkg/util/term/term.go index d1e32dac6f63e..18183c0c96704 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/util/term/term.go +++ b/vendor/k8s.io/kubectl/pkg/util/term/term.go @@ -22,7 +22,7 @@ import ( "github.com/docker/docker/pkg/term" - "k8s.io/kubernetes/pkg/kubectl/util/interrupt" + "k8s.io/kubectl/pkg/util/interrupt" ) // SafeFunc is a function to be invoked by TTY. diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/term/term_writer.go b/vendor/k8s.io/kubectl/pkg/util/term/term_writer.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/kubectl/util/term/term_writer.go rename to vendor/k8s.io/kubectl/pkg/util/term/term_writer.go diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/umask.go b/vendor/k8s.io/kubectl/pkg/util/umask.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/kubectl/util/umask.go rename to vendor/k8s.io/kubectl/pkg/util/umask.go diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/umask_windows.go b/vendor/k8s.io/kubectl/pkg/util/umask_windows.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/kubectl/util/umask_windows.go rename to vendor/k8s.io/kubectl/pkg/util/umask_windows.go diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/util.go b/vendor/k8s.io/kubectl/pkg/util/util.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/kubectl/util/util.go rename to vendor/k8s.io/kubectl/pkg/util/util.go diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/validation/BUILD.bazel b/vendor/k8s.io/kubectl/pkg/validation/BUILD.bazel similarity index 70% rename from vendor/k8s.io/kubernetes/pkg/kubectl/validation/BUILD.bazel rename to vendor/k8s.io/kubectl/pkg/validation/BUILD.bazel index 1a26544e5a667..a78a5841cb5dd 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/validation/BUILD.bazel +++ b/vendor/k8s.io/kubectl/pkg/validation/BUILD.bazel @@ -3,8 +3,8 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = ["schema.go"], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/validation", - importpath = "k8s.io/kubernetes/pkg/kubectl/validation", + importmap = "k8s.io/kops/vendor/k8s.io/kubectl/pkg/validation", + importpath = "k8s.io/kubectl/pkg/validation", visibility = ["//visibility:public"], deps = [ "//vendor/github.com/exponent-io/jsonpath:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/validation/schema.go b/vendor/k8s.io/kubectl/pkg/validation/schema.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/kubectl/validation/schema.go rename to vendor/k8s.io/kubectl/pkg/validation/schema.go diff --git a/vendor/k8s.io/kubectl/pkg/version/BUILD.bazel b/vendor/k8s.io/kubectl/pkg/version/BUILD.bazel new file mode 100644 index 0000000000000..7551fec64bc38 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/version/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "base.go", + "version.go", + ], + importmap = "k8s.io/kops/vendor/k8s.io/kubectl/pkg/version", + importpath = "k8s.io/kubectl/pkg/version", + visibility = ["//visibility:public"], + deps = ["//vendor/k8s.io/apimachinery/pkg/version:go_default_library"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/version/base.go b/vendor/k8s.io/kubectl/pkg/version/base.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/kubectl/version/base.go rename to vendor/k8s.io/kubectl/pkg/version/base.go diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/version/def.bzl b/vendor/k8s.io/kubectl/pkg/version/def.bzl similarity index 95% rename from vendor/k8s.io/kubernetes/pkg/kubectl/version/def.bzl rename to vendor/k8s.io/kubectl/pkg/version/def.bzl index 5bb17a0d00d45..8ca2ca7923346 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/version/def.bzl +++ b/vendor/k8s.io/kubectl/pkg/version/def.bzl @@ -18,7 +18,7 @@ # go install ... -ldflags -X 'k8s.io/kubernetes/pkg/kubectl/cmd/version.gitMinor=12+' ... def version_x_defs(): stamp_pkgs = [ - "k8s.io/kubernetes/pkg/kubectl/version", + "k8s.io/kubernetes/vendor/k8s.io/kubectl/pkg/version", ] # This should match the list of vars in kube::version::ldflags diff --git a/vendor/k8s.io/kubectl/pkg/version/version.go b/vendor/k8s.io/kubectl/pkg/version/version.go new file mode 100644 index 0000000000000..d1e76dc00e055 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/version/version.go @@ -0,0 +1,42 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version + +import ( + "fmt" + "runtime" + + apimachineryversion "k8s.io/apimachinery/pkg/version" +) + +// Get returns the overall codebase version. It's for detecting +// what code a binary was built from. +func Get() apimachineryversion.Info { + // These variables typically come from -ldflags settings and in + // their absence fallback to the settings in ./base.go + return apimachineryversion.Info{ + Major: gitMajor, + Minor: gitMinor, + GitVersion: gitVersion, + GitCommit: gitCommit, + GitTreeState: gitTreeState, + BuildDate: buildDate, + GoVersion: runtime.Version(), + Compiler: runtime.Compiler, + Platform: fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH), + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/metrics/prometheus/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/client/metrics/prometheus/BUILD.bazel deleted file mode 100644 index abbf0f1b23a58..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/metrics/prometheus/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["prometheus.go"], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/client/metrics/prometheus", - importpath = "k8s.io/kubernetes/pkg/client/metrics/prometheus", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", - "//vendor/k8s.io/client-go/tools/metrics:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/client/metrics/prometheus/prometheus.go b/vendor/k8s.io/kubernetes/pkg/client/metrics/prometheus/prometheus.go deleted file mode 100644 index c7804dfc2c1f5..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/metrics/prometheus/prometheus.go +++ /dev/null @@ -1,84 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package prometheus creates and registers prometheus metrics with -// rest clients. To use this package, you just have to import it. -package prometheus - -import ( - "net/url" - "time" - - "k8s.io/client-go/tools/metrics" - - "github.com/prometheus/client_golang/prometheus" -) - -var ( - // requestLatency is a Prometheus Summary metric type partitioned by - // "verb" and "url" labels. It is used for the rest client latency metrics. - requestLatency = prometheus.NewHistogramVec( - prometheus.HistogramOpts{ - Name: "rest_client_request_duration_seconds", - Help: "Request latency in seconds. Broken down by verb and URL.", - Buckets: prometheus.ExponentialBuckets(0.001, 2, 10), - }, - []string{"verb", "url"}, - ) - - // deprecatedRequestLatency is deprecated, please use requestLatency. - deprecatedRequestLatency = prometheus.NewHistogramVec( - prometheus.HistogramOpts{ - Name: "rest_client_request_latency_seconds", - Help: "(Deprecated) Request latency in seconds. Broken down by verb and URL.", - Buckets: prometheus.ExponentialBuckets(0.001, 2, 10), - }, - []string{"verb", "url"}, - ) - - requestResult = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "rest_client_requests_total", - Help: "Number of HTTP requests, partitioned by status code, method, and host.", - }, - []string{"code", "method", "host"}, - ) -) - -func init() { - prometheus.MustRegister(requestLatency) - prometheus.MustRegister(deprecatedRequestLatency) - prometheus.MustRegister(requestResult) - metrics.Register(&latencyAdapter{m: requestLatency, dm: deprecatedRequestLatency}, &resultAdapter{requestResult}) -} - -type latencyAdapter struct { - m *prometheus.HistogramVec - dm *prometheus.HistogramVec -} - -func (l *latencyAdapter) Observe(verb string, u url.URL, latency time.Duration) { - l.m.WithLabelValues(verb, u.String()).Observe(latency.Seconds()) - l.dm.WithLabelValues(verb, u.String()).Observe(latency.Seconds()) -} - -type resultAdapter struct { - m *prometheus.CounterVec -} - -func (r *resultAdapter) Increment(code, method, host string) { - r.m.WithLabelValues(code, method, host).Inc() -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/.import-restrictions b/vendor/k8s.io/kubernetes/pkg/kubectl/.import-restrictions deleted file mode 100644 index cd0644cc10987..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/.import-restrictions +++ /dev/null @@ -1,73 +0,0 @@ -{ - "Rules": [{ - "SelectorRegexp": "k8s[.]io/kubernetes/pkg", - "AllowedPrefixes": [ - "k8s.io/kubernetes/pkg/api", - "k8s.io/kubernetes/pkg/api/legacyscheme", - "k8s.io/kubernetes/pkg/apis/apps", - "k8s.io/kubernetes/pkg/apis/apps/install", - "k8s.io/kubernetes/pkg/apis/apps/v1", - "k8s.io/kubernetes/pkg/apis/apps/v1beta1", - "k8s.io/kubernetes/pkg/apis/apps/v1beta2", - "k8s.io/kubernetes/pkg/apis/authentication", - "k8s.io/kubernetes/pkg/apis/authentication/install", - "k8s.io/kubernetes/pkg/apis/authentication/v1", - "k8s.io/kubernetes/pkg/apis/authentication/v1beta1", - "k8s.io/kubernetes/pkg/apis/authorization", - "k8s.io/kubernetes/pkg/apis/authorization/install", - "k8s.io/kubernetes/pkg/apis/authorization/v1", - "k8s.io/kubernetes/pkg/apis/authorization/v1beta1", - "k8s.io/kubernetes/pkg/apis/autoscaling", - "k8s.io/kubernetes/pkg/apis/autoscaling/install", - "k8s.io/kubernetes/pkg/apis/autoscaling/v1", - "k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1", - "k8s.io/kubernetes/pkg/apis/batch", - "k8s.io/kubernetes/pkg/apis/batch/install", - "k8s.io/kubernetes/pkg/apis/batch/v1", - "k8s.io/kubernetes/pkg/apis/batch/v1beta1", - "k8s.io/kubernetes/pkg/apis/batch/v2alpha1", - "k8s.io/kubernetes/pkg/apis/certificates", - "k8s.io/kubernetes/pkg/apis/certificates/install", - "k8s.io/kubernetes/pkg/apis/certificates/v1beta1", - "k8s.io/kubernetes/pkg/apis/core", - "k8s.io/kubernetes/pkg/apis/core/helper", - "k8s.io/kubernetes/pkg/apis/core/install", - "k8s.io/kubernetes/pkg/apis/core/v1", - "k8s.io/kubernetes/pkg/apis/extensions", - "k8s.io/kubernetes/pkg/apis/extensions/install", - "k8s.io/kubernetes/pkg/apis/extensions/v1beta1", - "k8s.io/kubernetes/pkg/apis/networking", - "k8s.io/kubernetes/pkg/apis/policy", - "k8s.io/kubernetes/pkg/apis/policy/install", - "k8s.io/kubernetes/pkg/apis/policy/v1beta1", - "k8s.io/kubernetes/pkg/apis/rbac", - "k8s.io/kubernetes/pkg/apis/rbac/install", - "k8s.io/kubernetes/pkg/apis/rbac/v1", - "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1", - "k8s.io/kubernetes/pkg/apis/rbac/v1beta1", - "k8s.io/kubernetes/pkg/apis/scheduling", - "k8s.io/kubernetes/pkg/apis/scheduling/install", - "k8s.io/kubernetes/pkg/apis/scheduling/v1alpha1", - "k8s.io/kubernetes/pkg/apis/settings", - "k8s.io/kubernetes/pkg/apis/settings/install", - "k8s.io/kubernetes/pkg/apis/settings/v1alpha1", - "k8s.io/kubernetes/pkg/apis/storage", - "k8s.io/kubernetes/pkg/apis/storage/install", - "k8s.io/kubernetes/pkg/apis/storage/util", - "k8s.io/kubernetes/pkg/apis/storage/v1", - "k8s.io/kubernetes/pkg/apis/storage/v1beta1", - "k8s.io/kubernetes/pkg/features", - "k8s.io/kubernetes/pkg/kubectl", - "k8s.io/kubernetes/pkg/printers", - "k8s.io/kubernetes/pkg/printers/internalversion", - "k8s.io/kubernetes/pkg/registry/rbac/reconciliation", - "k8s.io/kubernetes/pkg/registry/rbac/validation", - "k8s.io/kubernetes/pkg/util/interrupt", - "k8s.io/kubernetes/pkg/util/node", - "k8s.io/kubernetes/pkg/util/parsers", - "k8s.io/kubernetes/pkg/version", - "k8s.io/utils/pointer" - ], - "ForbiddenPrefixes": [] - }] -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/BUILD.bazel deleted file mode 100644 index b6109eec6c8af..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/BUILD.bazel +++ /dev/null @@ -1,53 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "apply.go", - "conditions.go", - "doc.go", - "history.go", - "interfaces.go", - "rollback.go", - "rolling_updater.go", - "rollout_status.go", - "scale.go", - ], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl", - importpath = "k8s.io/kubernetes/pkg/kubectl", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/apps/v1:go_default_library", - "//vendor/k8s.io/api/autoscaling/v1:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - "//vendor/k8s.io/client-go/scale:go_default_library", - "//vendor/k8s.io/client-go/util/retry:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/apps:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/describe/versioned:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/scheme:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/util:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/util/deployment:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/util/podutils:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/util/slice:go_default_library", - "//vendor/k8s.io/utils/integer:go_default_library", - "//vendor/k8s.io/utils/pointer:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/OWNERS b/vendor/k8s.io/kubernetes/pkg/kubectl/OWNERS deleted file mode 100644 index a1f6b7eee3089..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/OWNERS +++ /dev/null @@ -1,9 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: -- sig-cli-maintainers -reviewers: -- sig-cli -labels: -- area/kubectl -- sig/cli diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/apps/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/apps/BUILD.bazel deleted file mode 100644 index cebcb2847620a..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/apps/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["kind_visitor.go"], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/apps", - importpath = "k8s.io/kubernetes/pkg/kubectl/apps", - visibility = ["//visibility:public"], - deps = ["//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/apps/kind_visitor.go b/vendor/k8s.io/kubernetes/pkg/kubectl/apps/kind_visitor.go deleted file mode 100644 index 931c63b1834f6..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/apps/kind_visitor.go +++ /dev/null @@ -1,75 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apps - -import ( - "fmt" - - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// KindVisitor is used with GroupKindElement to call a particular function depending on the -// Kind of a schema.GroupKind -type KindVisitor interface { - VisitDaemonSet(kind GroupKindElement) - VisitDeployment(kind GroupKindElement) - VisitJob(kind GroupKindElement) - VisitPod(kind GroupKindElement) - VisitReplicaSet(kind GroupKindElement) - VisitReplicationController(kind GroupKindElement) - VisitStatefulSet(kind GroupKindElement) - VisitCronJob(kind GroupKindElement) -} - -// GroupKindElement defines a Kubernetes API group elem -type GroupKindElement schema.GroupKind - -// Accept calls the Visit method on visitor that corresponds to elem's Kind -func (elem GroupKindElement) Accept(visitor KindVisitor) error { - switch { - case elem.GroupMatch("apps", "extensions") && elem.Kind == "DaemonSet": - visitor.VisitDaemonSet(elem) - case elem.GroupMatch("apps", "extensions") && elem.Kind == "Deployment": - visitor.VisitDeployment(elem) - case elem.GroupMatch("batch") && elem.Kind == "Job": - visitor.VisitJob(elem) - case elem.GroupMatch("", "core") && elem.Kind == "Pod": - visitor.VisitPod(elem) - case elem.GroupMatch("apps", "extensions") && elem.Kind == "ReplicaSet": - visitor.VisitReplicaSet(elem) - case elem.GroupMatch("", "core") && elem.Kind == "ReplicationController": - visitor.VisitReplicationController(elem) - case elem.GroupMatch("apps") && elem.Kind == "StatefulSet": - visitor.VisitStatefulSet(elem) - case elem.GroupMatch("batch") && elem.Kind == "CronJob": - visitor.VisitCronJob(elem) - default: - return fmt.Errorf("no visitor method exists for %v", elem) - } - return nil -} - -// GroupMatch returns true if and only if elem's group matches one -// of the group arguments -func (elem GroupKindElement) GroupMatch(groups ...string) bool { - for _, g := range groups { - if elem.Group == g { - return true - } - } - return false -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/conditions.go b/vendor/k8s.io/kubernetes/pkg/kubectl/conditions.go deleted file mode 100644 index 405bc6d4634b8..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/conditions.go +++ /dev/null @@ -1,100 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/apimachinery/pkg/watch" - corev1client "k8s.io/client-go/kubernetes/typed/core/v1" -) - -// ControllerHasDesiredReplicas returns a condition that will be true if and only if -// the desired replica count for a controller's ReplicaSelector equals the Replicas count. -func ControllerHasDesiredReplicas(rcClient corev1client.ReplicationControllersGetter, controller *corev1.ReplicationController) wait.ConditionFunc { - - // If we're given a controller where the status lags the spec, it either means that the controller is stale, - // or that the rc manager hasn't noticed the update yet. Polling status.Replicas is not safe in the latter case. - desiredGeneration := controller.Generation - - return func() (bool, error) { - ctrl, err := rcClient.ReplicationControllers(controller.Namespace).Get(controller.Name, metav1.GetOptions{}) - if err != nil { - return false, err - } - // There's a chance a concurrent update modifies the Spec.Replicas causing this check to pass, - // or, after this check has passed, a modification causes the rc manager to create more pods. - // This will not be an issue once we've implemented graceful delete for rcs, but till then - // concurrent stop operations on the same rc might have unintended side effects. - return ctrl.Status.ObservedGeneration >= desiredGeneration && ctrl.Status.Replicas == valOrZero(ctrl.Spec.Replicas), nil - } -} - -// ErrPodCompleted is returned by PodRunning or PodContainerRunning to indicate that -// the pod has already reached completed state. -var ErrPodCompleted = fmt.Errorf("pod ran to completion") - -// PodCompleted returns true if the pod has run to completion, false if the pod has not yet -// reached running state, or an error in any other case. -func PodCompleted(event watch.Event) (bool, error) { - switch event.Type { - case watch.Deleted: - return false, errors.NewNotFound(schema.GroupResource{Resource: "pods"}, "") - } - switch t := event.Object.(type) { - case *corev1.Pod: - switch t.Status.Phase { - case corev1.PodFailed, corev1.PodSucceeded: - return true, nil - } - } - return false, nil -} - -// PodRunningAndReady returns true if the pod is running and ready, false if the pod has not -// yet reached those states, returns ErrPodCompleted if the pod has run to completion, or -// an error in any other case. -func PodRunningAndReady(event watch.Event) (bool, error) { - switch event.Type { - case watch.Deleted: - return false, errors.NewNotFound(schema.GroupResource{Resource: "pods"}, "") - } - switch t := event.Object.(type) { - case *corev1.Pod: - switch t.Status.Phase { - case corev1.PodFailed, corev1.PodSucceeded: - return false, ErrPodCompleted - case corev1.PodRunning: - conditions := t.Status.Conditions - if conditions == nil { - return false, nil - } - for i := range conditions { - if conditions[i].Type == corev1.PodReady && - conditions[i].Status == corev1.ConditionTrue { - return true, nil - } - } - } - } - return false, nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/describe/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/describe/BUILD.bazel deleted file mode 100644 index eb1fefeef0dc0..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/describe/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["interface.go"], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/describe", - importpath = "k8s.io/kubernetes/pkg/kubectl/describe", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//vendor/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/describe/interface.go b/vendor/k8s.io/kubernetes/pkg/kubectl/describe/interface.go deleted file mode 100644 index 58429918787e9..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/describe/interface.go +++ /dev/null @@ -1,71 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package describe - -import ( - "fmt" - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/cli-runtime/pkg/genericclioptions" -) - -const ( - // LoadBalancerWidth is the width how we describe load balancer - LoadBalancerWidth = 16 - - // LabelNodeRolePrefix is a label prefix for node roles - // It's copied over to here until it's merged in core: https://github.com/kubernetes/kubernetes/pull/39112 - LabelNodeRolePrefix = "node-role.kubernetes.io/" - - // NodeLabelRole specifies the role of a node - NodeLabelRole = "kubernetes.io/role" -) - -// DescriberFunc gives a way to display the specified RESTMapping type -type DescriberFunc func(restClientGetter genericclioptions.RESTClientGetter, mapping *meta.RESTMapping) (Describer, error) - -// Describer generates output for the named resource or an error -// if the output could not be generated. Implementers typically -// abstract the retrieval of the named object from a remote server. -type Describer interface { - Describe(namespace, name string, describerSettings DescriberSettings) (output string, err error) -} - -// DescriberSettings holds display configuration for each object -// describer to control what is printed. -type DescriberSettings struct { - ShowEvents bool -} - -// ObjectDescriber is an interface for displaying arbitrary objects with extra -// information. Use when an object is in hand (on disk, or already retrieved). -// Implementers may ignore the additional information passed on extra, or use it -// by default. ObjectDescribers may return ErrNoDescriber if no suitable describer -// is found. -type ObjectDescriber interface { - DescribeObject(object interface{}, extra ...interface{}) (output string, err error) -} - -// ErrNoDescriber is a structured error indicating the provided object or objects -// cannot be described. -type ErrNoDescriber struct { - Types []string -} - -// Error implements the error interface. -func (e ErrNoDescriber) Error() string { - return fmt.Sprintf("no describer has been defined for %v", e.Types) -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/describe/versioned/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/describe/versioned/BUILD.bazel deleted file mode 100644 index 256ef6ca4dffc..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/describe/versioned/BUILD.bazel +++ /dev/null @@ -1,55 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["describe.go"], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/describe/versioned", - importpath = "k8s.io/kubernetes/pkg/kubectl/describe/versioned", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/fatih/camelcase:go_default_library", - "//vendor/k8s.io/api/apps/v1:go_default_library", - "//vendor/k8s.io/api/autoscaling/v1:go_default_library", - "//vendor/k8s.io/api/autoscaling/v2beta2:go_default_library", - "//vendor/k8s.io/api/batch/v1:go_default_library", - "//vendor/k8s.io/api/batch/v1beta1:go_default_library", - "//vendor/k8s.io/api/certificates/v1beta1:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", - "//vendor/k8s.io/api/networking/v1:go_default_library", - "//vendor/k8s.io/api/networking/v1beta1:go_default_library", - "//vendor/k8s.io/api/policy/v1beta1:go_default_library", - "//vendor/k8s.io/api/rbac/v1:go_default_library", - "//vendor/k8s.io/api/scheduling/v1:go_default_library", - "//vendor/k8s.io/api/storage/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/duration:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", - "//vendor/k8s.io/client-go/dynamic:go_default_library", - "//vendor/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - "//vendor/k8s.io/client-go/tools/reference:go_default_library", - "//vendor/k8s.io/klog:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/describe:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/scheme:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/util/certificate:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/util/deployment:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/util/event:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/util/fieldpath:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/util/qos:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/util/rbac:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/util/resource:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/util/slice:go_default_library", - "//vendor/k8s.io/kubernetes/pkg/kubectl/util/storage:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/describe/versioned/describe.go b/vendor/k8s.io/kubernetes/pkg/kubectl/describe/versioned/describe.go deleted file mode 100644 index 0ad7e070b3b3c..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/describe/versioned/describe.go +++ /dev/null @@ -1,4555 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package versioned - -import ( - "bytes" - "crypto/x509" - "fmt" - "io" - "net" - "net/url" - "reflect" - "sort" - "strconv" - "strings" - "text/tabwriter" - "time" - "unicode" - - "github.com/fatih/camelcase" - - appsv1 "k8s.io/api/apps/v1" - autoscalingv1 "k8s.io/api/autoscaling/v1" - autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2" - batchv1 "k8s.io/api/batch/v1" - batchv1beta1 "k8s.io/api/batch/v1beta1" - certificatesv1beta1 "k8s.io/api/certificates/v1beta1" - corev1 "k8s.io/api/core/v1" - extensionsv1beta1 "k8s.io/api/extensions/v1beta1" - networkingv1 "k8s.io/api/networking/v1" - networkingv1beta1 "k8s.io/api/networking/v1beta1" - policyv1beta1 "k8s.io/api/policy/v1beta1" - rbacv1 "k8s.io/api/rbac/v1" - schedulingv1 "k8s.io/api/scheduling/v1" - storagev1 "k8s.io/api/storage/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/duration" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/cli-runtime/pkg/genericclioptions" - "k8s.io/client-go/dynamic" - clientset "k8s.io/client-go/kubernetes" - corev1client "k8s.io/client-go/kubernetes/typed/core/v1" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/reference" - "k8s.io/klog" - "k8s.io/kubernetes/pkg/kubectl/describe" - "k8s.io/kubernetes/pkg/kubectl/scheme" - "k8s.io/kubernetes/pkg/kubectl/util/certificate" - deploymentutil "k8s.io/kubernetes/pkg/kubectl/util/deployment" - "k8s.io/kubernetes/pkg/kubectl/util/event" - "k8s.io/kubernetes/pkg/kubectl/util/fieldpath" - "k8s.io/kubernetes/pkg/kubectl/util/qos" - "k8s.io/kubernetes/pkg/kubectl/util/rbac" - resourcehelper "k8s.io/kubernetes/pkg/kubectl/util/resource" - "k8s.io/kubernetes/pkg/kubectl/util/slice" - storageutil "k8s.io/kubernetes/pkg/kubectl/util/storage" -) - -// Each level has 2 spaces for PrefixWriter -const ( - LEVEL_0 = iota - LEVEL_1 - LEVEL_2 - LEVEL_3 -) - -// DescriberFn gives a way to easily override the function for unit testing if needed -var DescriberFn describe.DescriberFunc = Describer - -// Describer returns a Describer for displaying the specified RESTMapping type or an error. -func Describer(restClientGetter genericclioptions.RESTClientGetter, mapping *meta.RESTMapping) (describe.Describer, error) { - clientConfig, err := restClientGetter.ToRESTConfig() - if err != nil { - return nil, err - } - // try to get a describer - if describer, ok := DescriberFor(mapping.GroupVersionKind.GroupKind(), clientConfig); ok { - return describer, nil - } - // if this is a kind we don't have a describer for yet, go generic if possible - if genericDescriber, ok := GenericDescriberFor(mapping, clientConfig); ok { - return genericDescriber, nil - } - // otherwise return an unregistered error - return nil, fmt.Errorf("no description has been implemented for %s", mapping.GroupVersionKind.String()) -} - -// PrefixWriter can write text at various indentation levels. -type PrefixWriter interface { - // Write writes text with the specified indentation level. - Write(level int, format string, a ...interface{}) - // WriteLine writes an entire line with no indentation level. - WriteLine(a ...interface{}) - // Flush forces indentation to be reset. - Flush() -} - -// prefixWriter implements PrefixWriter -type prefixWriter struct { - out io.Writer -} - -var _ PrefixWriter = &prefixWriter{} - -// NewPrefixWriter creates a new PrefixWriter. -func NewPrefixWriter(out io.Writer) PrefixWriter { - return &prefixWriter{out: out} -} - -func (pw *prefixWriter) Write(level int, format string, a ...interface{}) { - levelSpace := " " - prefix := "" - for i := 0; i < level; i++ { - prefix += levelSpace - } - fmt.Fprintf(pw.out, prefix+format, a...) -} - -func (pw *prefixWriter) WriteLine(a ...interface{}) { - fmt.Fprintln(pw.out, a...) -} - -func (pw *prefixWriter) Flush() { - if f, ok := pw.out.(flusher); ok { - f.Flush() - } -} - -func describerMap(clientConfig *rest.Config) (map[schema.GroupKind]describe.Describer, error) { - c, err := clientset.NewForConfig(clientConfig) - if err != nil { - return nil, err - } - - m := map[schema.GroupKind]describe.Describer{ - {Group: corev1.GroupName, Kind: "Pod"}: &PodDescriber{c}, - {Group: corev1.GroupName, Kind: "ReplicationController"}: &ReplicationControllerDescriber{c}, - {Group: corev1.GroupName, Kind: "Secret"}: &SecretDescriber{c}, - {Group: corev1.GroupName, Kind: "Service"}: &ServiceDescriber{c}, - {Group: corev1.GroupName, Kind: "ServiceAccount"}: &ServiceAccountDescriber{c}, - {Group: corev1.GroupName, Kind: "Node"}: &NodeDescriber{c}, - {Group: corev1.GroupName, Kind: "LimitRange"}: &LimitRangeDescriber{c}, - {Group: corev1.GroupName, Kind: "ResourceQuota"}: &ResourceQuotaDescriber{c}, - {Group: corev1.GroupName, Kind: "PersistentVolume"}: &PersistentVolumeDescriber{c}, - {Group: corev1.GroupName, Kind: "PersistentVolumeClaim"}: &PersistentVolumeClaimDescriber{c}, - {Group: corev1.GroupName, Kind: "Namespace"}: &NamespaceDescriber{c}, - {Group: corev1.GroupName, Kind: "Endpoints"}: &EndpointsDescriber{c}, - {Group: corev1.GroupName, Kind: "ConfigMap"}: &ConfigMapDescriber{c}, - {Group: corev1.GroupName, Kind: "PriorityClass"}: &PriorityClassDescriber{c}, - {Group: extensionsv1beta1.GroupName, Kind: "ReplicaSet"}: &ReplicaSetDescriber{c}, - {Group: extensionsv1beta1.GroupName, Kind: "NetworkPolicy"}: &NetworkPolicyDescriber{c}, - {Group: extensionsv1beta1.GroupName, Kind: "PodSecurityPolicy"}: &PodSecurityPolicyDescriber{c}, - {Group: autoscalingv2beta2.GroupName, Kind: "HorizontalPodAutoscaler"}: &HorizontalPodAutoscalerDescriber{c}, - {Group: extensionsv1beta1.GroupName, Kind: "DaemonSet"}: &DaemonSetDescriber{c}, - {Group: extensionsv1beta1.GroupName, Kind: "Deployment"}: &DeploymentDescriber{c}, - {Group: extensionsv1beta1.GroupName, Kind: "Ingress"}: &IngressDescriber{c}, - {Group: networkingv1beta1.GroupName, Kind: "Ingress"}: &IngressDescriber{c}, - {Group: batchv1.GroupName, Kind: "Job"}: &JobDescriber{c}, - {Group: batchv1.GroupName, Kind: "CronJob"}: &CronJobDescriber{c}, - {Group: appsv1.GroupName, Kind: "StatefulSet"}: &StatefulSetDescriber{c}, - {Group: appsv1.GroupName, Kind: "Deployment"}: &DeploymentDescriber{c}, - {Group: appsv1.GroupName, Kind: "DaemonSet"}: &DaemonSetDescriber{c}, - {Group: appsv1.GroupName, Kind: "ReplicaSet"}: &ReplicaSetDescriber{c}, - {Group: certificatesv1beta1.GroupName, Kind: "CertificateSigningRequest"}: &CertificateSigningRequestDescriber{c}, - {Group: storagev1.GroupName, Kind: "StorageClass"}: &StorageClassDescriber{c}, - {Group: policyv1beta1.GroupName, Kind: "PodDisruptionBudget"}: &PodDisruptionBudgetDescriber{c}, - {Group: rbacv1.GroupName, Kind: "Role"}: &RoleDescriber{c}, - {Group: rbacv1.GroupName, Kind: "ClusterRole"}: &ClusterRoleDescriber{c}, - {Group: rbacv1.GroupName, Kind: "RoleBinding"}: &RoleBindingDescriber{c}, - {Group: rbacv1.GroupName, Kind: "ClusterRoleBinding"}: &ClusterRoleBindingDescriber{c}, - {Group: networkingv1.GroupName, Kind: "NetworkPolicy"}: &NetworkPolicyDescriber{c}, - {Group: schedulingv1.GroupName, Kind: "PriorityClass"}: &PriorityClassDescriber{c}, - } - - return m, nil -} - -// DescriberFor returns the default describe functions for each of the standard -// Kubernetes types. -func DescriberFor(kind schema.GroupKind, clientConfig *rest.Config) (describe.Describer, bool) { - describers, err := describerMap(clientConfig) - if err != nil { - klog.V(1).Info(err) - return nil, false - } - - f, ok := describers[kind] - return f, ok -} - -// GenericDescriberFor returns a generic describer for the specified mapping -// that uses only information available from runtime.Unstructured -func GenericDescriberFor(mapping *meta.RESTMapping, clientConfig *rest.Config) (describe.Describer, bool) { - // used to fetch the resource - dynamicClient, err := dynamic.NewForConfig(clientConfig) - if err != nil { - return nil, false - } - - // used to get events for the resource - clientSet, err := clientset.NewForConfig(clientConfig) - if err != nil { - return nil, false - } - eventsClient := clientSet.CoreV1() - - return &genericDescriber{mapping, dynamicClient, eventsClient}, true -} - -type genericDescriber struct { - mapping *meta.RESTMapping - dynamic dynamic.Interface - events corev1client.EventsGetter -} - -func (g *genericDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (output string, err error) { - obj, err := g.dynamic.Resource(g.mapping.Resource).Namespace(namespace).Get(name, metav1.GetOptions{}) - if err != nil { - return "", err - } - - var events *corev1.EventList - if describerSettings.ShowEvents { - events, _ = g.events.Events(namespace).Search(scheme.Scheme, obj) - } - - return tabbedString(func(out io.Writer) error { - w := NewPrefixWriter(out) - w.Write(LEVEL_0, "Name:\t%s\n", obj.GetName()) - w.Write(LEVEL_0, "Namespace:\t%s\n", obj.GetNamespace()) - printLabelsMultiline(w, "Labels", obj.GetLabels()) - printAnnotationsMultiline(w, "Annotations", obj.GetAnnotations()) - printUnstructuredContent(w, LEVEL_0, obj.UnstructuredContent(), "", ".metadata.name", ".metadata.namespace", ".metadata.labels", ".metadata.annotations") - if events != nil { - DescribeEvents(events, w) - } - return nil - }) -} - -func printUnstructuredContent(w PrefixWriter, level int, content map[string]interface{}, skipPrefix string, skip ...string) { - fields := []string{} - for field := range content { - fields = append(fields, field) - } - sort.Strings(fields) - - for _, field := range fields { - value := content[field] - switch typedValue := value.(type) { - case map[string]interface{}: - skipExpr := fmt.Sprintf("%s.%s", skipPrefix, field) - if slice.ContainsString(skip, skipExpr, nil) { - continue - } - w.Write(level, "%s:\n", smartLabelFor(field)) - printUnstructuredContent(w, level+1, typedValue, skipExpr, skip...) - - case []interface{}: - skipExpr := fmt.Sprintf("%s.%s", skipPrefix, field) - if slice.ContainsString(skip, skipExpr, nil) { - continue - } - w.Write(level, "%s:\n", smartLabelFor(field)) - for _, child := range typedValue { - switch typedChild := child.(type) { - case map[string]interface{}: - printUnstructuredContent(w, level+1, typedChild, skipExpr, skip...) - default: - w.Write(level+1, "%v\n", typedChild) - } - } - - default: - skipExpr := fmt.Sprintf("%s.%s", skipPrefix, field) - if slice.ContainsString(skip, skipExpr, nil) { - continue - } - w.Write(level, "%s:\t%v\n", smartLabelFor(field), typedValue) - } - } -} - -func smartLabelFor(field string) string { - // skip creating smart label if field name contains - // special characters other than '-' - if strings.IndexFunc(field, func(r rune) bool { - return !unicode.IsLetter(r) && r != '-' - }) != -1 { - return field - } - - commonAcronyms := []string{"API", "URL", "UID", "OSB", "GUID"} - parts := camelcase.Split(field) - result := make([]string, 0, len(parts)) - for _, part := range parts { - if part == "_" { - continue - } - - if slice.ContainsString(commonAcronyms, strings.ToUpper(part), nil) { - part = strings.ToUpper(part) - } else { - part = strings.Title(part) - } - result = append(result, part) - } - - return strings.Join(result, " ") -} - -// DefaultObjectDescriber can describe the default Kubernetes objects. -var DefaultObjectDescriber describe.ObjectDescriber - -func init() { - d := &Describers{} - err := d.Add( - describeLimitRange, - describeQuota, - describePod, - describeService, - describeReplicationController, - describeDaemonSet, - describeNode, - describeNamespace, - ) - if err != nil { - klog.Fatalf("Cannot register describers: %v", err) - } - DefaultObjectDescriber = d -} - -// NamespaceDescriber generates information about a namespace -type NamespaceDescriber struct { - clientset.Interface -} - -func (d *NamespaceDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - ns, err := d.CoreV1().Namespaces().Get(name, metav1.GetOptions{}) - if err != nil { - return "", err - } - resourceQuotaList, err := d.CoreV1().ResourceQuotas(name).List(metav1.ListOptions{}) - if err != nil { - if errors.IsNotFound(err) { - // Server does not support resource quotas. - // Not an error, will not show resource quotas information. - resourceQuotaList = nil - } else { - return "", err - } - } - limitRangeList, err := d.CoreV1().LimitRanges(name).List(metav1.ListOptions{}) - if err != nil { - if errors.IsNotFound(err) { - // Server does not support limit ranges. - // Not an error, will not show limit ranges information. - limitRangeList = nil - } else { - return "", err - } - } - return describeNamespace(ns, resourceQuotaList, limitRangeList) -} - -func describeNamespace(namespace *corev1.Namespace, resourceQuotaList *corev1.ResourceQuotaList, limitRangeList *corev1.LimitRangeList) (string, error) { - return tabbedString(func(out io.Writer) error { - w := NewPrefixWriter(out) - w.Write(LEVEL_0, "Name:\t%s\n", namespace.Name) - printLabelsMultiline(w, "Labels", namespace.Labels) - printAnnotationsMultiline(w, "Annotations", namespace.Annotations) - w.Write(LEVEL_0, "Status:\t%s\n", string(namespace.Status.Phase)) - if resourceQuotaList != nil { - w.Write(LEVEL_0, "\n") - DescribeResourceQuotas(resourceQuotaList, w) - } - if limitRangeList != nil { - w.Write(LEVEL_0, "\n") - DescribeLimitRanges(limitRangeList, w) - } - return nil - }) -} - -func describeLimitRangeSpec(spec corev1.LimitRangeSpec, prefix string, w PrefixWriter) { - for i := range spec.Limits { - item := spec.Limits[i] - maxResources := item.Max - minResources := item.Min - defaultLimitResources := item.Default - defaultRequestResources := item.DefaultRequest - ratio := item.MaxLimitRequestRatio - - set := map[corev1.ResourceName]bool{} - for k := range maxResources { - set[k] = true - } - for k := range minResources { - set[k] = true - } - for k := range defaultLimitResources { - set[k] = true - } - for k := range defaultRequestResources { - set[k] = true - } - for k := range ratio { - set[k] = true - } - - for k := range set { - // if no value is set, we output - - maxValue := "-" - minValue := "-" - defaultLimitValue := "-" - defaultRequestValue := "-" - ratioValue := "-" - - maxQuantity, maxQuantityFound := maxResources[k] - if maxQuantityFound { - maxValue = maxQuantity.String() - } - - minQuantity, minQuantityFound := minResources[k] - if minQuantityFound { - minValue = minQuantity.String() - } - - defaultLimitQuantity, defaultLimitQuantityFound := defaultLimitResources[k] - if defaultLimitQuantityFound { - defaultLimitValue = defaultLimitQuantity.String() - } - - defaultRequestQuantity, defaultRequestQuantityFound := defaultRequestResources[k] - if defaultRequestQuantityFound { - defaultRequestValue = defaultRequestQuantity.String() - } - - ratioQuantity, ratioQuantityFound := ratio[k] - if ratioQuantityFound { - ratioValue = ratioQuantity.String() - } - - msg := "%s%s\t%v\t%v\t%v\t%v\t%v\t%v\n" - w.Write(LEVEL_0, msg, prefix, item.Type, k, minValue, maxValue, defaultRequestValue, defaultLimitValue, ratioValue) - } - } -} - -// DescribeLimitRanges merges a set of limit range items into a single tabular description -func DescribeLimitRanges(limitRanges *corev1.LimitRangeList, w PrefixWriter) { - if len(limitRanges.Items) == 0 { - w.Write(LEVEL_0, "No resource limits.\n") - return - } - w.Write(LEVEL_0, "Resource Limits\n Type\tResource\tMin\tMax\tDefault Request\tDefault Limit\tMax Limit/Request Ratio\n") - w.Write(LEVEL_0, " ----\t--------\t---\t---\t---------------\t-------------\t-----------------------\n") - for _, limitRange := range limitRanges.Items { - describeLimitRangeSpec(limitRange.Spec, " ", w) - } -} - -// DescribeResourceQuotas merges a set of quota items into a single tabular description of all quotas -func DescribeResourceQuotas(quotas *corev1.ResourceQuotaList, w PrefixWriter) { - if len(quotas.Items) == 0 { - w.Write(LEVEL_0, "No resource quota.\n") - return - } - sort.Sort(SortableResourceQuotas(quotas.Items)) - - w.Write(LEVEL_0, "Resource Quotas") - for _, q := range quotas.Items { - w.Write(LEVEL_0, "\n Name:\t%s\n", q.Name) - if len(q.Spec.Scopes) > 0 { - scopes := make([]string, 0, len(q.Spec.Scopes)) - for _, scope := range q.Spec.Scopes { - scopes = append(scopes, string(scope)) - } - sort.Strings(scopes) - w.Write(LEVEL_0, " Scopes:\t%s\n", strings.Join(scopes, ", ")) - for _, scope := range scopes { - helpText := helpTextForResourceQuotaScope(corev1.ResourceQuotaScope(scope)) - if len(helpText) > 0 { - w.Write(LEVEL_0, " * %s\n", helpText) - } - } - } - - w.Write(LEVEL_0, " Resource\tUsed\tHard\n") - w.Write(LEVEL_0, " --------\t---\t---\n") - - resources := make([]corev1.ResourceName, 0, len(q.Status.Hard)) - for resource := range q.Status.Hard { - resources = append(resources, resource) - } - sort.Sort(SortableResourceNames(resources)) - - for _, resource := range resources { - hardQuantity := q.Status.Hard[resource] - usedQuantity := q.Status.Used[resource] - w.Write(LEVEL_0, " %s\t%s\t%s\n", string(resource), usedQuantity.String(), hardQuantity.String()) - } - } -} - -// LimitRangeDescriber generates information about a limit range -type LimitRangeDescriber struct { - clientset.Interface -} - -func (d *LimitRangeDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - lr := d.CoreV1().LimitRanges(namespace) - - limitRange, err := lr.Get(name, metav1.GetOptions{}) - if err != nil { - return "", err - } - return describeLimitRange(limitRange) -} - -func describeLimitRange(limitRange *corev1.LimitRange) (string, error) { - return tabbedString(func(out io.Writer) error { - w := NewPrefixWriter(out) - w.Write(LEVEL_0, "Name:\t%s\n", limitRange.Name) - w.Write(LEVEL_0, "Namespace:\t%s\n", limitRange.Namespace) - w.Write(LEVEL_0, "Type\tResource\tMin\tMax\tDefault Request\tDefault Limit\tMax Limit/Request Ratio\n") - w.Write(LEVEL_0, "----\t--------\t---\t---\t---------------\t-------------\t-----------------------\n") - describeLimitRangeSpec(limitRange.Spec, "", w) - return nil - }) -} - -// ResourceQuotaDescriber generates information about a resource quota -type ResourceQuotaDescriber struct { - clientset.Interface -} - -func (d *ResourceQuotaDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - rq := d.CoreV1().ResourceQuotas(namespace) - - resourceQuota, err := rq.Get(name, metav1.GetOptions{}) - if err != nil { - return "", err - } - - return describeQuota(resourceQuota) -} - -func helpTextForResourceQuotaScope(scope corev1.ResourceQuotaScope) string { - switch scope { - case corev1.ResourceQuotaScopeTerminating: - return "Matches all pods that have an active deadline. These pods have a limited lifespan on a node before being actively terminated by the system." - case corev1.ResourceQuotaScopeNotTerminating: - return "Matches all pods that do not have an active deadline. These pods usually include long running pods whose container command is not expected to terminate." - case corev1.ResourceQuotaScopeBestEffort: - return "Matches all pods that do not have resource requirements set. These pods have a best effort quality of service." - case corev1.ResourceQuotaScopeNotBestEffort: - return "Matches all pods that have at least one resource requirement set. These pods have a burstable or guaranteed quality of service." - default: - return "" - } -} -func describeQuota(resourceQuota *corev1.ResourceQuota) (string, error) { - return tabbedString(func(out io.Writer) error { - w := NewPrefixWriter(out) - w.Write(LEVEL_0, "Name:\t%s\n", resourceQuota.Name) - w.Write(LEVEL_0, "Namespace:\t%s\n", resourceQuota.Namespace) - if len(resourceQuota.Spec.Scopes) > 0 { - scopes := make([]string, 0, len(resourceQuota.Spec.Scopes)) - for _, scope := range resourceQuota.Spec.Scopes { - scopes = append(scopes, string(scope)) - } - sort.Strings(scopes) - w.Write(LEVEL_0, "Scopes:\t%s\n", strings.Join(scopes, ", ")) - for _, scope := range scopes { - helpText := helpTextForResourceQuotaScope(corev1.ResourceQuotaScope(scope)) - if len(helpText) > 0 { - w.Write(LEVEL_0, " * %s\n", helpText) - } - } - } - w.Write(LEVEL_0, "Resource\tUsed\tHard\n") - w.Write(LEVEL_0, "--------\t----\t----\n") - - resources := make([]corev1.ResourceName, 0, len(resourceQuota.Status.Hard)) - for resource := range resourceQuota.Status.Hard { - resources = append(resources, resource) - } - sort.Sort(SortableResourceNames(resources)) - - msg := "%v\t%v\t%v\n" - for i := range resources { - resource := resources[i] - hardQuantity := resourceQuota.Status.Hard[resource] - usedQuantity := resourceQuota.Status.Used[resource] - w.Write(LEVEL_0, msg, resource, usedQuantity.String(), hardQuantity.String()) - } - return nil - }) -} - -// PodDescriber generates information about a pod and the replication controllers that -// create it. -type PodDescriber struct { - clientset.Interface -} - -func (d *PodDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - pod, err := d.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{}) - if err != nil { - if describerSettings.ShowEvents { - eventsInterface := d.CoreV1().Events(namespace) - selector := eventsInterface.GetFieldSelector(&name, &namespace, nil, nil) - options := metav1.ListOptions{FieldSelector: selector.String()} - events, err2 := eventsInterface.List(options) - if describerSettings.ShowEvents && err2 == nil && len(events.Items) > 0 { - return tabbedString(func(out io.Writer) error { - w := NewPrefixWriter(out) - w.Write(LEVEL_0, "Pod '%v': error '%v', but found events.\n", name, err) - DescribeEvents(events, w) - return nil - }) - } - } - return "", err - } - - var events *corev1.EventList - if describerSettings.ShowEvents { - if ref, err := reference.GetReference(scheme.Scheme, pod); err != nil { - klog.Errorf("Unable to construct reference to '%#v': %v", pod, err) - } else { - ref.Kind = "" - if _, isMirrorPod := pod.Annotations[corev1.MirrorPodAnnotationKey]; isMirrorPod { - ref.UID = types.UID(pod.Annotations[corev1.MirrorPodAnnotationKey]) - } - events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, ref) - } - } - - return describePod(pod, events) -} - -func describePod(pod *corev1.Pod, events *corev1.EventList) (string, error) { - return tabbedString(func(out io.Writer) error { - w := NewPrefixWriter(out) - w.Write(LEVEL_0, "Name:\t%s\n", pod.Name) - w.Write(LEVEL_0, "Namespace:\t%s\n", pod.Namespace) - if pod.Spec.Priority != nil { - w.Write(LEVEL_0, "Priority:\t%d\n", *pod.Spec.Priority) - } - if len(pod.Spec.PriorityClassName) > 0 { - w.Write(LEVEL_0, "Priority Class Name:\t%s\n", stringOrNone(pod.Spec.PriorityClassName)) - } - if pod.Spec.NodeName == "" { - w.Write(LEVEL_0, "Node:\t\n") - } else { - w.Write(LEVEL_0, "Node:\t%s\n", pod.Spec.NodeName+"/"+pod.Status.HostIP) - } - if pod.Status.StartTime != nil { - w.Write(LEVEL_0, "Start Time:\t%s\n", pod.Status.StartTime.Time.Format(time.RFC1123Z)) - } - printLabelsMultiline(w, "Labels", pod.Labels) - printAnnotationsMultiline(w, "Annotations", pod.Annotations) - if pod.DeletionTimestamp != nil { - w.Write(LEVEL_0, "Status:\tTerminating (lasts %s)\n", translateTimestampSince(*pod.DeletionTimestamp)) - w.Write(LEVEL_0, "Termination Grace Period:\t%ds\n", *pod.DeletionGracePeriodSeconds) - } else { - w.Write(LEVEL_0, "Status:\t%s\n", string(pod.Status.Phase)) - } - if len(pod.Status.Reason) > 0 { - w.Write(LEVEL_0, "Reason:\t%s\n", pod.Status.Reason) - } - if len(pod.Status.Message) > 0 { - w.Write(LEVEL_0, "Message:\t%s\n", pod.Status.Message) - } - w.Write(LEVEL_0, "IP:\t%s\n", pod.Status.PodIP) - if controlledBy := printController(pod); len(controlledBy) > 0 { - w.Write(LEVEL_0, "Controlled By:\t%s\n", controlledBy) - } - if len(pod.Status.NominatedNodeName) > 0 { - w.Write(LEVEL_0, "NominatedNodeName:\t%s\n", pod.Status.NominatedNodeName) - } - - if len(pod.Spec.InitContainers) > 0 { - describeContainers("Init Containers", pod.Spec.InitContainers, pod.Status.InitContainerStatuses, EnvValueRetriever(pod), w, "") - } - describeContainers("Containers", pod.Spec.Containers, pod.Status.ContainerStatuses, EnvValueRetriever(pod), w, "") - if len(pod.Spec.ReadinessGates) > 0 { - w.Write(LEVEL_0, "Readiness Gates:\n Type\tStatus\n") - for _, g := range pod.Spec.ReadinessGates { - status := "" - for _, c := range pod.Status.Conditions { - if c.Type == g.ConditionType { - status = fmt.Sprintf("%v", c.Status) - break - } - } - w.Write(LEVEL_1, "%v \t%v \n", - g.ConditionType, - status) - } - } - if len(pod.Status.Conditions) > 0 { - w.Write(LEVEL_0, "Conditions:\n Type\tStatus\n") - for _, c := range pod.Status.Conditions { - w.Write(LEVEL_1, "%v \t%v \n", - c.Type, - c.Status) - } - } - describeVolumes(pod.Spec.Volumes, w, "") - if pod.Status.QOSClass != "" { - w.Write(LEVEL_0, "QoS Class:\t%s\n", pod.Status.QOSClass) - } else { - w.Write(LEVEL_0, "QoS Class:\t%s\n", qos.GetPodQOS(pod)) - } - printLabelsMultiline(w, "Node-Selectors", pod.Spec.NodeSelector) - printPodTolerationsMultiline(w, "Tolerations", pod.Spec.Tolerations) - if events != nil { - DescribeEvents(events, w) - } - return nil - }) -} - -func printController(controllee metav1.Object) string { - if controllerRef := metav1.GetControllerOf(controllee); controllerRef != nil { - return fmt.Sprintf("%s/%s", controllerRef.Kind, controllerRef.Name) - } - return "" -} - -func describeVolumes(volumes []corev1.Volume, w PrefixWriter, space string) { - if volumes == nil || len(volumes) == 0 { - w.Write(LEVEL_0, "%sVolumes:\t\n", space) - return - } - w.Write(LEVEL_0, "%sVolumes:\n", space) - for _, volume := range volumes { - nameIndent := "" - if len(space) > 0 { - nameIndent = " " - } - w.Write(LEVEL_1, "%s%v:\n", nameIndent, volume.Name) - switch { - case volume.VolumeSource.HostPath != nil: - printHostPathVolumeSource(volume.VolumeSource.HostPath, w) - case volume.VolumeSource.EmptyDir != nil: - printEmptyDirVolumeSource(volume.VolumeSource.EmptyDir, w) - case volume.VolumeSource.GCEPersistentDisk != nil: - printGCEPersistentDiskVolumeSource(volume.VolumeSource.GCEPersistentDisk, w) - case volume.VolumeSource.AWSElasticBlockStore != nil: - printAWSElasticBlockStoreVolumeSource(volume.VolumeSource.AWSElasticBlockStore, w) - case volume.VolumeSource.GitRepo != nil: - printGitRepoVolumeSource(volume.VolumeSource.GitRepo, w) - case volume.VolumeSource.Secret != nil: - printSecretVolumeSource(volume.VolumeSource.Secret, w) - case volume.VolumeSource.ConfigMap != nil: - printConfigMapVolumeSource(volume.VolumeSource.ConfigMap, w) - case volume.VolumeSource.NFS != nil: - printNFSVolumeSource(volume.VolumeSource.NFS, w) - case volume.VolumeSource.ISCSI != nil: - printISCSIVolumeSource(volume.VolumeSource.ISCSI, w) - case volume.VolumeSource.Glusterfs != nil: - printGlusterfsVolumeSource(volume.VolumeSource.Glusterfs, w) - case volume.VolumeSource.PersistentVolumeClaim != nil: - printPersistentVolumeClaimVolumeSource(volume.VolumeSource.PersistentVolumeClaim, w) - case volume.VolumeSource.RBD != nil: - printRBDVolumeSource(volume.VolumeSource.RBD, w) - case volume.VolumeSource.Quobyte != nil: - printQuobyteVolumeSource(volume.VolumeSource.Quobyte, w) - case volume.VolumeSource.DownwardAPI != nil: - printDownwardAPIVolumeSource(volume.VolumeSource.DownwardAPI, w) - case volume.VolumeSource.AzureDisk != nil: - printAzureDiskVolumeSource(volume.VolumeSource.AzureDisk, w) - case volume.VolumeSource.VsphereVolume != nil: - printVsphereVolumeSource(volume.VolumeSource.VsphereVolume, w) - case volume.VolumeSource.Cinder != nil: - printCinderVolumeSource(volume.VolumeSource.Cinder, w) - case volume.VolumeSource.PhotonPersistentDisk != nil: - printPhotonPersistentDiskVolumeSource(volume.VolumeSource.PhotonPersistentDisk, w) - case volume.VolumeSource.PortworxVolume != nil: - printPortworxVolumeSource(volume.VolumeSource.PortworxVolume, w) - case volume.VolumeSource.ScaleIO != nil: - printScaleIOVolumeSource(volume.VolumeSource.ScaleIO, w) - case volume.VolumeSource.CephFS != nil: - printCephFSVolumeSource(volume.VolumeSource.CephFS, w) - case volume.VolumeSource.StorageOS != nil: - printStorageOSVolumeSource(volume.VolumeSource.StorageOS, w) - case volume.VolumeSource.FC != nil: - printFCVolumeSource(volume.VolumeSource.FC, w) - case volume.VolumeSource.AzureFile != nil: - printAzureFileVolumeSource(volume.VolumeSource.AzureFile, w) - case volume.VolumeSource.FlexVolume != nil: - printFlexVolumeSource(volume.VolumeSource.FlexVolume, w) - case volume.VolumeSource.Flocker != nil: - printFlockerVolumeSource(volume.VolumeSource.Flocker, w) - case volume.VolumeSource.Projected != nil: - printProjectedVolumeSource(volume.VolumeSource.Projected, w) - case volume.VolumeSource.CSI != nil: - printCSIVolumeSource(volume.VolumeSource.CSI, w) - default: - w.Write(LEVEL_1, "\n") - } - } -} - -func printHostPathVolumeSource(hostPath *corev1.HostPathVolumeSource, w PrefixWriter) { - hostPathType := "" - if hostPath.Type != nil { - hostPathType = string(*hostPath.Type) - } - w.Write(LEVEL_2, "Type:\tHostPath (bare host directory volume)\n"+ - " Path:\t%v\n"+ - " HostPathType:\t%v\n", - hostPath.Path, hostPathType) -} - -func printEmptyDirVolumeSource(emptyDir *corev1.EmptyDirVolumeSource, w PrefixWriter) { - var sizeLimit string - if emptyDir.SizeLimit != nil && emptyDir.SizeLimit.Cmp(resource.Quantity{}) > 0 { - sizeLimit = fmt.Sprintf("%v", emptyDir.SizeLimit) - } else { - sizeLimit = "" - } - w.Write(LEVEL_2, "Type:\tEmptyDir (a temporary directory that shares a pod's lifetime)\n"+ - " Medium:\t%v\n"+ - " SizeLimit:\t%v\n", - emptyDir.Medium, sizeLimit) -} - -func printGCEPersistentDiskVolumeSource(gce *corev1.GCEPersistentDiskVolumeSource, w PrefixWriter) { - w.Write(LEVEL_2, "Type:\tGCEPersistentDisk (a Persistent Disk resource in Google Compute Engine)\n"+ - " PDName:\t%v\n"+ - " FSType:\t%v\n"+ - " Partition:\t%v\n"+ - " ReadOnly:\t%v\n", - gce.PDName, gce.FSType, gce.Partition, gce.ReadOnly) -} - -func printAWSElasticBlockStoreVolumeSource(aws *corev1.AWSElasticBlockStoreVolumeSource, w PrefixWriter) { - w.Write(LEVEL_2, "Type:\tAWSElasticBlockStore (a Persistent Disk resource in AWS)\n"+ - " VolumeID:\t%v\n"+ - " FSType:\t%v\n"+ - " Partition:\t%v\n"+ - " ReadOnly:\t%v\n", - aws.VolumeID, aws.FSType, aws.Partition, aws.ReadOnly) -} - -func printGitRepoVolumeSource(git *corev1.GitRepoVolumeSource, w PrefixWriter) { - w.Write(LEVEL_2, "Type:\tGitRepo (a volume that is pulled from git when the pod is created)\n"+ - " Repository:\t%v\n"+ - " Revision:\t%v\n", - git.Repository, git.Revision) -} - -func printSecretVolumeSource(secret *corev1.SecretVolumeSource, w PrefixWriter) { - optional := secret.Optional != nil && *secret.Optional - w.Write(LEVEL_2, "Type:\tSecret (a volume populated by a Secret)\n"+ - " SecretName:\t%v\n"+ - " Optional:\t%v\n", - secret.SecretName, optional) -} - -func printConfigMapVolumeSource(configMap *corev1.ConfigMapVolumeSource, w PrefixWriter) { - optional := configMap.Optional != nil && *configMap.Optional - w.Write(LEVEL_2, "Type:\tConfigMap (a volume populated by a ConfigMap)\n"+ - " Name:\t%v\n"+ - " Optional:\t%v\n", - configMap.Name, optional) -} - -func printProjectedVolumeSource(projected *corev1.ProjectedVolumeSource, w PrefixWriter) { - w.Write(LEVEL_2, "Type:\tProjected (a volume that contains injected data from multiple sources)\n") - for _, source := range projected.Sources { - if source.Secret != nil { - w.Write(LEVEL_2, "SecretName:\t%v\n"+ - " SecretOptionalName:\t%v\n", - source.Secret.Name, source.Secret.Optional) - } else if source.DownwardAPI != nil { - w.Write(LEVEL_2, "DownwardAPI:\ttrue\n") - } else if source.ConfigMap != nil { - w.Write(LEVEL_2, "ConfigMapName:\t%v\n"+ - " ConfigMapOptional:\t%v\n", - source.ConfigMap.Name, source.ConfigMap.Optional) - } else if source.ServiceAccountToken != nil { - w.Write(LEVEL_2, "TokenExpirationSeconds:\t%d\n", - *source.ServiceAccountToken.ExpirationSeconds) - } - } -} - -func printNFSVolumeSource(nfs *corev1.NFSVolumeSource, w PrefixWriter) { - w.Write(LEVEL_2, "Type:\tNFS (an NFS mount that lasts the lifetime of a pod)\n"+ - " Server:\t%v\n"+ - " Path:\t%v\n"+ - " ReadOnly:\t%v\n", - nfs.Server, nfs.Path, nfs.ReadOnly) -} - -func printQuobyteVolumeSource(quobyte *corev1.QuobyteVolumeSource, w PrefixWriter) { - w.Write(LEVEL_2, "Type:\tQuobyte (a Quobyte mount on the host that shares a pod's lifetime)\n"+ - " Registry:\t%v\n"+ - " Volume:\t%v\n"+ - " ReadOnly:\t%v\n", - quobyte.Registry, quobyte.Volume, quobyte.ReadOnly) -} - -func printPortworxVolumeSource(pwxVolume *corev1.PortworxVolumeSource, w PrefixWriter) { - w.Write(LEVEL_2, "Type:\tPortworxVolume (a Portworx Volume resource)\n"+ - " VolumeID:\t%v\n", - pwxVolume.VolumeID) -} - -func printISCSIVolumeSource(iscsi *corev1.ISCSIVolumeSource, w PrefixWriter) { - initiator := "" - if iscsi.InitiatorName != nil { - initiator = *iscsi.InitiatorName - } - w.Write(LEVEL_2, "Type:\tISCSI (an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod)\n"+ - " TargetPortal:\t%v\n"+ - " IQN:\t%v\n"+ - " Lun:\t%v\n"+ - " ISCSIInterface\t%v\n"+ - " FSType:\t%v\n"+ - " ReadOnly:\t%v\n"+ - " Portals:\t%v\n"+ - " DiscoveryCHAPAuth:\t%v\n"+ - " SessionCHAPAuth:\t%v\n"+ - " SecretRef:\t%v\n"+ - " InitiatorName:\t%v\n", - iscsi.TargetPortal, iscsi.IQN, iscsi.Lun, iscsi.ISCSIInterface, iscsi.FSType, iscsi.ReadOnly, iscsi.Portals, iscsi.DiscoveryCHAPAuth, iscsi.SessionCHAPAuth, iscsi.SecretRef, initiator) -} - -func printISCSIPersistentVolumeSource(iscsi *corev1.ISCSIPersistentVolumeSource, w PrefixWriter) { - initiatorName := "" - if iscsi.InitiatorName != nil { - initiatorName = *iscsi.InitiatorName - } - w.Write(LEVEL_2, "Type:\tISCSI (an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod)\n"+ - " TargetPortal:\t%v\n"+ - " IQN:\t%v\n"+ - " Lun:\t%v\n"+ - " ISCSIInterface\t%v\n"+ - " FSType:\t%v\n"+ - " ReadOnly:\t%v\n"+ - " Portals:\t%v\n"+ - " DiscoveryCHAPAuth:\t%v\n"+ - " SessionCHAPAuth:\t%v\n"+ - " SecretRef:\t%v\n"+ - " InitiatorName:\t%v\n", - iscsi.TargetPortal, iscsi.IQN, iscsi.Lun, iscsi.ISCSIInterface, iscsi.FSType, iscsi.ReadOnly, iscsi.Portals, iscsi.DiscoveryCHAPAuth, iscsi.SessionCHAPAuth, iscsi.SecretRef, initiatorName) -} - -func printGlusterfsVolumeSource(glusterfs *corev1.GlusterfsVolumeSource, w PrefixWriter) { - w.Write(LEVEL_2, "Type:\tGlusterfs (a Glusterfs mount on the host that shares a pod's lifetime)\n"+ - " EndpointsName:\t%v\n"+ - " Path:\t%v\n"+ - " ReadOnly:\t%v\n", - glusterfs.EndpointsName, glusterfs.Path, glusterfs.ReadOnly) -} - -func printGlusterfsPersistentVolumeSource(glusterfs *corev1.GlusterfsPersistentVolumeSource, w PrefixWriter) { - w.Write(LEVEL_2, "Type:\tGlusterfs (a Glusterfs mount on the host that shares a pod's lifetime)\n"+ - " EndpointsName:\t%v\n"+ - " EndpointsNamespace:\t%v\n"+ - " Path:\t%v\n"+ - " ReadOnly:\t%v\n", - glusterfs.EndpointsName, glusterfs.EndpointsNamespace, glusterfs.Path, glusterfs.ReadOnly) -} - -func printPersistentVolumeClaimVolumeSource(claim *corev1.PersistentVolumeClaimVolumeSource, w PrefixWriter) { - w.Write(LEVEL_2, "Type:\tPersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)\n"+ - " ClaimName:\t%v\n"+ - " ReadOnly:\t%v\n", - claim.ClaimName, claim.ReadOnly) -} - -func printRBDVolumeSource(rbd *corev1.RBDVolumeSource, w PrefixWriter) { - w.Write(LEVEL_2, "Type:\tRBD (a Rados Block Device mount on the host that shares a pod's lifetime)\n"+ - " CephMonitors:\t%v\n"+ - " RBDImage:\t%v\n"+ - " FSType:\t%v\n"+ - " RBDPool:\t%v\n"+ - " RadosUser:\t%v\n"+ - " Keyring:\t%v\n"+ - " SecretRef:\t%v\n"+ - " ReadOnly:\t%v\n", - rbd.CephMonitors, rbd.RBDImage, rbd.FSType, rbd.RBDPool, rbd.RadosUser, rbd.Keyring, rbd.SecretRef, rbd.ReadOnly) -} - -func printRBDPersistentVolumeSource(rbd *corev1.RBDPersistentVolumeSource, w PrefixWriter) { - w.Write(LEVEL_2, "Type:\tRBD (a Rados Block Device mount on the host that shares a pod's lifetime)\n"+ - " CephMonitors:\t%v\n"+ - " RBDImage:\t%v\n"+ - " FSType:\t%v\n"+ - " RBDPool:\t%v\n"+ - " RadosUser:\t%v\n"+ - " Keyring:\t%v\n"+ - " SecretRef:\t%v\n"+ - " ReadOnly:\t%v\n", - rbd.CephMonitors, rbd.RBDImage, rbd.FSType, rbd.RBDPool, rbd.RadosUser, rbd.Keyring, rbd.SecretRef, rbd.ReadOnly) -} - -func printDownwardAPIVolumeSource(d *corev1.DownwardAPIVolumeSource, w PrefixWriter) { - w.Write(LEVEL_2, "Type:\tDownwardAPI (a volume populated by information about the pod)\n Items:\n") - for _, mapping := range d.Items { - if mapping.FieldRef != nil { - w.Write(LEVEL_3, "%v -> %v\n", mapping.FieldRef.FieldPath, mapping.Path) - } - if mapping.ResourceFieldRef != nil { - w.Write(LEVEL_3, "%v -> %v\n", mapping.ResourceFieldRef.Resource, mapping.Path) - } - } -} - -func printAzureDiskVolumeSource(d *corev1.AzureDiskVolumeSource, w PrefixWriter) { - w.Write(LEVEL_2, "Type:\tAzureDisk (an Azure Data Disk mount on the host and bind mount to the pod)\n"+ - " DiskName:\t%v\n"+ - " DiskURI:\t%v\n"+ - " Kind: \t%v\n"+ - " FSType:\t%v\n"+ - " CachingMode:\t%v\n"+ - " ReadOnly:\t%v\n", - d.DiskName, d.DataDiskURI, *d.Kind, *d.FSType, *d.CachingMode, *d.ReadOnly) -} - -func printVsphereVolumeSource(vsphere *corev1.VsphereVirtualDiskVolumeSource, w PrefixWriter) { - w.Write(LEVEL_2, "Type:\tvSphereVolume (a Persistent Disk resource in vSphere)\n"+ - " VolumePath:\t%v\n"+ - " FSType:\t%v\n"+ - " StoragePolicyName:\t%v\n", - vsphere.VolumePath, vsphere.FSType, vsphere.StoragePolicyName) -} - -func printPhotonPersistentDiskVolumeSource(photon *corev1.PhotonPersistentDiskVolumeSource, w PrefixWriter) { - w.Write(LEVEL_2, "Type:\tPhotonPersistentDisk (a Persistent Disk resource in photon platform)\n"+ - " PdID:\t%v\n"+ - " FSType:\t%v\n", - photon.PdID, photon.FSType) -} - -func printCinderVolumeSource(cinder *corev1.CinderVolumeSource, w PrefixWriter) { - w.Write(LEVEL_2, "Type:\tCinder (a Persistent Disk resource in OpenStack)\n"+ - " VolumeID:\t%v\n"+ - " FSType:\t%v\n"+ - " ReadOnly:\t%v\n"+ - " SecretRef:\t%v\n", - cinder.VolumeID, cinder.FSType, cinder.ReadOnly, cinder.SecretRef) -} - -func printCinderPersistentVolumeSource(cinder *corev1.CinderPersistentVolumeSource, w PrefixWriter) { - w.Write(LEVEL_2, "Type:\tCinder (a Persistent Disk resource in OpenStack)\n"+ - " VolumeID:\t%v\n"+ - " FSType:\t%v\n"+ - " ReadOnly:\t%v\n"+ - " SecretRef:\t%v\n", - cinder.VolumeID, cinder.FSType, cinder.ReadOnly, cinder.SecretRef) -} - -func printScaleIOVolumeSource(sio *corev1.ScaleIOVolumeSource, w PrefixWriter) { - w.Write(LEVEL_2, "Type:\tScaleIO (a persistent volume backed by a block device in ScaleIO)\n"+ - " Gateway:\t%v\n"+ - " System:\t%v\n"+ - " Protection Domain:\t%v\n"+ - " Storage Pool:\t%v\n"+ - " Storage Mode:\t%v\n"+ - " VolumeName:\t%v\n"+ - " FSType:\t%v\n"+ - " ReadOnly:\t%v\n", - sio.Gateway, sio.System, sio.ProtectionDomain, sio.StoragePool, sio.StorageMode, sio.VolumeName, sio.FSType, sio.ReadOnly) -} - -func printScaleIOPersistentVolumeSource(sio *corev1.ScaleIOPersistentVolumeSource, w PrefixWriter) { - var secretNS, secretName string - if sio.SecretRef != nil { - secretName = sio.SecretRef.Name - secretNS = sio.SecretRef.Namespace - } - w.Write(LEVEL_2, "Type:\tScaleIO (a persistent volume backed by a block device in ScaleIO)\n"+ - " Gateway:\t%v\n"+ - " System:\t%v\n"+ - " Protection Domain:\t%v\n"+ - " Storage Pool:\t%v\n"+ - " Storage Mode:\t%v\n"+ - " VolumeName:\t%v\n"+ - " SecretName:\t%v\n"+ - " SecretNamespace:\t%v\n"+ - " FSType:\t%v\n"+ - " ReadOnly:\t%v\n", - sio.Gateway, sio.System, sio.ProtectionDomain, sio.StoragePool, sio.StorageMode, sio.VolumeName, secretName, secretNS, sio.FSType, sio.ReadOnly) -} - -func printLocalVolumeSource(ls *corev1.LocalVolumeSource, w PrefixWriter) { - w.Write(LEVEL_2, "Type:\tLocalVolume (a persistent volume backed by local storage on a node)\n"+ - " Path:\t%v\n", - ls.Path) -} - -func printCephFSVolumeSource(cephfs *corev1.CephFSVolumeSource, w PrefixWriter) { - w.Write(LEVEL_2, "Type:\tCephFS (a CephFS mount on the host that shares a pod's lifetime)\n"+ - " Monitors:\t%v\n"+ - " Path:\t%v\n"+ - " User:\t%v\n"+ - " SecretFile:\t%v\n"+ - " SecretRef:\t%v\n"+ - " ReadOnly:\t%v\n", - cephfs.Monitors, cephfs.Path, cephfs.User, cephfs.SecretFile, cephfs.SecretRef, cephfs.ReadOnly) -} - -func printCephFSPersistentVolumeSource(cephfs *corev1.CephFSPersistentVolumeSource, w PrefixWriter) { - w.Write(LEVEL_2, "Type:\tCephFS (a CephFS mount on the host that shares a pod's lifetime)\n"+ - " Monitors:\t%v\n"+ - " Path:\t%v\n"+ - " User:\t%v\n"+ - " SecretFile:\t%v\n"+ - " SecretRef:\t%v\n"+ - " ReadOnly:\t%v\n", - cephfs.Monitors, cephfs.Path, cephfs.User, cephfs.SecretFile, cephfs.SecretRef, cephfs.ReadOnly) -} - -func printStorageOSVolumeSource(storageos *corev1.StorageOSVolumeSource, w PrefixWriter) { - w.Write(LEVEL_2, "Type:\tStorageOS (a StorageOS Persistent Disk resource)\n"+ - " VolumeName:\t%v\n"+ - " VolumeNamespace:\t%v\n"+ - " FSType:\t%v\n"+ - " ReadOnly:\t%v\n", - storageos.VolumeName, storageos.VolumeNamespace, storageos.FSType, storageos.ReadOnly) -} - -func printStorageOSPersistentVolumeSource(storageos *corev1.StorageOSPersistentVolumeSource, w PrefixWriter) { - w.Write(LEVEL_2, "Type:\tStorageOS (a StorageOS Persistent Disk resource)\n"+ - " VolumeName:\t%v\n"+ - " VolumeNamespace:\t%v\n"+ - " FSType:\t%v\n"+ - " ReadOnly:\t%v\n", - storageos.VolumeName, storageos.VolumeNamespace, storageos.FSType, storageos.ReadOnly) -} - -func printFCVolumeSource(fc *corev1.FCVolumeSource, w PrefixWriter) { - lun := "" - if fc.Lun != nil { - lun = strconv.Itoa(int(*fc.Lun)) - } - w.Write(LEVEL_2, "Type:\tFC (a Fibre Channel disk)\n"+ - " TargetWWNs:\t%v\n"+ - " LUN:\t%v\n"+ - " FSType:\t%v\n"+ - " ReadOnly:\t%v\n", - strings.Join(fc.TargetWWNs, ", "), lun, fc.FSType, fc.ReadOnly) -} - -func printAzureFileVolumeSource(azureFile *corev1.AzureFileVolumeSource, w PrefixWriter) { - w.Write(LEVEL_2, "Type:\tAzureFile (an Azure File Service mount on the host and bind mount to the pod)\n"+ - " SecretName:\t%v\n"+ - " ShareName:\t%v\n"+ - " ReadOnly:\t%v\n", - azureFile.SecretName, azureFile.ShareName, azureFile.ReadOnly) -} - -func printAzureFilePersistentVolumeSource(azureFile *corev1.AzureFilePersistentVolumeSource, w PrefixWriter) { - ns := "" - if azureFile.SecretNamespace != nil { - ns = *azureFile.SecretNamespace - } - w.Write(LEVEL_2, "Type:\tAzureFile (an Azure File Service mount on the host and bind mount to the pod)\n"+ - " SecretName:\t%v\n"+ - " SecretNamespace:\t%v\n"+ - " ShareName:\t%v\n"+ - " ReadOnly:\t%v\n", - azureFile.SecretName, ns, azureFile.ShareName, azureFile.ReadOnly) -} - -func printFlexPersistentVolumeSource(flex *corev1.FlexPersistentVolumeSource, w PrefixWriter) { - w.Write(LEVEL_2, "Type:\tFlexVolume (a generic volume resource that is provisioned/attached using an exec based plugin)\n"+ - " Driver:\t%v\n"+ - " FSType:\t%v\n"+ - " SecretRef:\t%v\n"+ - " ReadOnly:\t%v\n"+ - " Options:\t%v\n", - flex.Driver, flex.FSType, flex.SecretRef, flex.ReadOnly, flex.Options) -} - -func printFlexVolumeSource(flex *corev1.FlexVolumeSource, w PrefixWriter) { - w.Write(LEVEL_2, "Type:\tFlexVolume (a generic volume resource that is provisioned/attached using an exec based plugin)\n"+ - " Driver:\t%v\n"+ - " FSType:\t%v\n"+ - " SecretRef:\t%v\n"+ - " ReadOnly:\t%v\n"+ - " Options:\t%v\n", - flex.Driver, flex.FSType, flex.SecretRef, flex.ReadOnly, flex.Options) -} - -func printFlockerVolumeSource(flocker *corev1.FlockerVolumeSource, w PrefixWriter) { - w.Write(LEVEL_2, "Type:\tFlocker (a Flocker volume mounted by the Flocker agent)\n"+ - " DatasetName:\t%v\n"+ - " DatasetUUID:\t%v\n", - flocker.DatasetName, flocker.DatasetUUID) -} - -func printCSIVolumeSource(csi *corev1.CSIVolumeSource, w PrefixWriter) { - var readOnly bool - var fsType string - if csi.ReadOnly != nil && *csi.ReadOnly { - readOnly = true - } - if csi.FSType != nil { - fsType = *csi.FSType - } - w.Write(LEVEL_2, "Type:\tCSI (a Container Storage Interface (CSI) volume source)\n"+ - " Driver:\t%v\n"+ - " FSType:\t%v\n"+ - " ReadOnly:\t%v\n", - csi.Driver, fsType, readOnly) - printCSIPersistentVolumeAttributesMultiline(w, "VolumeAttributes", csi.VolumeAttributes) -} - -func printCSIPersistentVolumeSource(csi *corev1.CSIPersistentVolumeSource, w PrefixWriter) { - w.Write(LEVEL_2, "Type:\tCSI (a Container Storage Interface (CSI) volume source)\n"+ - " Driver:\t%v\n"+ - " VolumeHandle:\t%v\n"+ - " ReadOnly:\t%v\n", - csi.Driver, csi.VolumeHandle, csi.ReadOnly) - printCSIPersistentVolumeAttributesMultiline(w, "VolumeAttributes", csi.VolumeAttributes) -} - -func printCSIPersistentVolumeAttributesMultiline(w PrefixWriter, title string, annotations map[string]string) { - printCSIPersistentVolumeAttributesMultilineIndent(w, "", title, "\t", annotations, sets.NewString()) -} - -func printCSIPersistentVolumeAttributesMultilineIndent(w PrefixWriter, initialIndent, title, innerIndent string, attributes map[string]string, skip sets.String) { - w.Write(LEVEL_2, "%s%s:%s", initialIndent, title, innerIndent) - - if len(attributes) == 0 { - w.WriteLine("") - return - } - - // to print labels in the sorted order - keys := make([]string, 0, len(attributes)) - for key := range attributes { - if skip.Has(key) { - continue - } - keys = append(keys, key) - } - if len(attributes) == 0 { - w.WriteLine("") - return - } - sort.Strings(keys) - - for i, key := range keys { - if i != 0 { - w.Write(LEVEL_2, initialIndent) - w.Write(LEVEL_2, innerIndent) - } - line := fmt.Sprintf("%s=%s", key, attributes[key]) - if len(line) > maxAnnotationLen { - w.Write(LEVEL_2, "%s...\n", line[:maxAnnotationLen]) - } else { - w.Write(LEVEL_2, "%s\n", line) - } - i++ - } -} - -type PersistentVolumeDescriber struct { - clientset.Interface -} - -func (d *PersistentVolumeDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - c := d.CoreV1().PersistentVolumes() - - pv, err := c.Get(name, metav1.GetOptions{}) - if err != nil { - return "", err - } - - var events *corev1.EventList - if describerSettings.ShowEvents { - events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, pv) - } - - return describePersistentVolume(pv, events) -} - -func printVolumeNodeAffinity(w PrefixWriter, affinity *corev1.VolumeNodeAffinity) { - w.Write(LEVEL_0, "Node Affinity:\t") - if affinity == nil || affinity.Required == nil { - w.WriteLine("") - return - } - w.WriteLine("") - - if affinity.Required != nil { - w.Write(LEVEL_1, "Required Terms:\t") - if len(affinity.Required.NodeSelectorTerms) == 0 { - w.WriteLine("") - } else { - w.WriteLine("") - for i, term := range affinity.Required.NodeSelectorTerms { - printNodeSelectorTermsMultilineWithIndent(w, LEVEL_2, fmt.Sprintf("Term %v", i), "\t", term.MatchExpressions) - } - } - } -} - -// printLabelsMultiline prints multiple labels with a user-defined alignment. -func printNodeSelectorTermsMultilineWithIndent(w PrefixWriter, indentLevel int, title, innerIndent string, reqs []corev1.NodeSelectorRequirement) { - w.Write(indentLevel, "%s:%s", title, innerIndent) - - if len(reqs) == 0 { - w.WriteLine("") - return - } - - for i, req := range reqs { - if i != 0 { - w.Write(indentLevel, "%s", innerIndent) - } - exprStr := fmt.Sprintf("%s %s", req.Key, strings.ToLower(string(req.Operator))) - if len(req.Values) > 0 { - exprStr = fmt.Sprintf("%s [%s]", exprStr, strings.Join(req.Values, ", ")) - } - w.Write(LEVEL_0, "%s\n", exprStr) - } -} - -func describePersistentVolume(pv *corev1.PersistentVolume, events *corev1.EventList) (string, error) { - return tabbedString(func(out io.Writer) error { - w := NewPrefixWriter(out) - w.Write(LEVEL_0, "Name:\t%s\n", pv.Name) - printLabelsMultiline(w, "Labels", pv.ObjectMeta.Labels) - printAnnotationsMultiline(w, "Annotations", pv.ObjectMeta.Annotations) - w.Write(LEVEL_0, "Finalizers:\t%v\n", pv.ObjectMeta.Finalizers) - w.Write(LEVEL_0, "StorageClass:\t%s\n", storageutil.GetPersistentVolumeClass(pv)) - if pv.ObjectMeta.DeletionTimestamp != nil { - w.Write(LEVEL_0, "Status:\tTerminating (lasts %s)\n", translateTimestampSince(*pv.ObjectMeta.DeletionTimestamp)) - } else { - w.Write(LEVEL_0, "Status:\t%v\n", pv.Status.Phase) - } - if pv.Spec.ClaimRef != nil { - w.Write(LEVEL_0, "Claim:\t%s\n", pv.Spec.ClaimRef.Namespace+"/"+pv.Spec.ClaimRef.Name) - } else { - w.Write(LEVEL_0, "Claim:\t%s\n", "") - } - w.Write(LEVEL_0, "Reclaim Policy:\t%v\n", pv.Spec.PersistentVolumeReclaimPolicy) - w.Write(LEVEL_0, "Access Modes:\t%s\n", storageutil.GetAccessModesAsString(pv.Spec.AccessModes)) - if pv.Spec.VolumeMode != nil { - w.Write(LEVEL_0, "VolumeMode:\t%v\n", *pv.Spec.VolumeMode) - } - storage := pv.Spec.Capacity[corev1.ResourceStorage] - w.Write(LEVEL_0, "Capacity:\t%s\n", storage.String()) - printVolumeNodeAffinity(w, pv.Spec.NodeAffinity) - w.Write(LEVEL_0, "Message:\t%s\n", pv.Status.Message) - w.Write(LEVEL_0, "Source:\n") - - switch { - case pv.Spec.HostPath != nil: - printHostPathVolumeSource(pv.Spec.HostPath, w) - case pv.Spec.GCEPersistentDisk != nil: - printGCEPersistentDiskVolumeSource(pv.Spec.GCEPersistentDisk, w) - case pv.Spec.AWSElasticBlockStore != nil: - printAWSElasticBlockStoreVolumeSource(pv.Spec.AWSElasticBlockStore, w) - case pv.Spec.NFS != nil: - printNFSVolumeSource(pv.Spec.NFS, w) - case pv.Spec.ISCSI != nil: - printISCSIPersistentVolumeSource(pv.Spec.ISCSI, w) - case pv.Spec.Glusterfs != nil: - printGlusterfsPersistentVolumeSource(pv.Spec.Glusterfs, w) - case pv.Spec.RBD != nil: - printRBDPersistentVolumeSource(pv.Spec.RBD, w) - case pv.Spec.Quobyte != nil: - printQuobyteVolumeSource(pv.Spec.Quobyte, w) - case pv.Spec.VsphereVolume != nil: - printVsphereVolumeSource(pv.Spec.VsphereVolume, w) - case pv.Spec.Cinder != nil: - printCinderPersistentVolumeSource(pv.Spec.Cinder, w) - case pv.Spec.AzureDisk != nil: - printAzureDiskVolumeSource(pv.Spec.AzureDisk, w) - case pv.Spec.PhotonPersistentDisk != nil: - printPhotonPersistentDiskVolumeSource(pv.Spec.PhotonPersistentDisk, w) - case pv.Spec.PortworxVolume != nil: - printPortworxVolumeSource(pv.Spec.PortworxVolume, w) - case pv.Spec.ScaleIO != nil: - printScaleIOPersistentVolumeSource(pv.Spec.ScaleIO, w) - case pv.Spec.Local != nil: - printLocalVolumeSource(pv.Spec.Local, w) - case pv.Spec.CephFS != nil: - printCephFSPersistentVolumeSource(pv.Spec.CephFS, w) - case pv.Spec.StorageOS != nil: - printStorageOSPersistentVolumeSource(pv.Spec.StorageOS, w) - case pv.Spec.FC != nil: - printFCVolumeSource(pv.Spec.FC, w) - case pv.Spec.AzureFile != nil: - printAzureFilePersistentVolumeSource(pv.Spec.AzureFile, w) - case pv.Spec.FlexVolume != nil: - printFlexPersistentVolumeSource(pv.Spec.FlexVolume, w) - case pv.Spec.Flocker != nil: - printFlockerVolumeSource(pv.Spec.Flocker, w) - case pv.Spec.CSI != nil: - printCSIPersistentVolumeSource(pv.Spec.CSI, w) - default: - w.Write(LEVEL_1, "\n") - } - - if events != nil { - DescribeEvents(events, w) - } - - return nil - }) -} - -type PersistentVolumeClaimDescriber struct { - clientset.Interface -} - -func (d *PersistentVolumeClaimDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - c := d.CoreV1().PersistentVolumeClaims(namespace) - - pvc, err := c.Get(name, metav1.GetOptions{}) - if err != nil { - return "", err - } - - pc := d.CoreV1().Pods(namespace) - - mountPods, err := getMountPods(pc, pvc.Name) - if err != nil { - return "", err - } - - events, _ := d.CoreV1().Events(namespace).Search(scheme.Scheme, pvc) - - return describePersistentVolumeClaim(pvc, events, mountPods) -} - -func getMountPods(c corev1client.PodInterface, pvcName string) ([]corev1.Pod, error) { - nsPods, err := c.List(metav1.ListOptions{}) - if err != nil { - return []corev1.Pod{}, err - } - - var pods []corev1.Pod - - for _, pod := range nsPods.Items { - pvcs := getPvcs(pod.Spec.Volumes) - - for _, pvc := range pvcs { - if pvc.PersistentVolumeClaim.ClaimName == pvcName { - pods = append(pods, pod) - } - } - } - - return pods, nil -} - -func getPvcs(volumes []corev1.Volume) []corev1.Volume { - var pvcs []corev1.Volume - - for _, volume := range volumes { - if volume.VolumeSource.PersistentVolumeClaim != nil { - pvcs = append(pvcs, volume) - } - } - - return pvcs -} - -func describePersistentVolumeClaim(pvc *corev1.PersistentVolumeClaim, events *corev1.EventList, mountPods []corev1.Pod) (string, error) { - return tabbedString(func(out io.Writer) error { - w := NewPrefixWriter(out) - w.Write(LEVEL_0, "Name:\t%s\n", pvc.Name) - w.Write(LEVEL_0, "Namespace:\t%s\n", pvc.Namespace) - w.Write(LEVEL_0, "StorageClass:\t%s\n", storageutil.GetPersistentVolumeClaimClass(pvc)) - if pvc.ObjectMeta.DeletionTimestamp != nil { - w.Write(LEVEL_0, "Status:\tTerminating (lasts %s)\n", translateTimestampSince(*pvc.ObjectMeta.DeletionTimestamp)) - } else { - w.Write(LEVEL_0, "Status:\t%v\n", pvc.Status.Phase) - } - w.Write(LEVEL_0, "Volume:\t%s\n", pvc.Spec.VolumeName) - printLabelsMultiline(w, "Labels", pvc.Labels) - printAnnotationsMultiline(w, "Annotations", pvc.Annotations) - w.Write(LEVEL_0, "Finalizers:\t%v\n", pvc.ObjectMeta.Finalizers) - storage := pvc.Spec.Resources.Requests[corev1.ResourceStorage] - capacity := "" - accessModes := "" - if pvc.Spec.VolumeName != "" { - accessModes = storageutil.GetAccessModesAsString(pvc.Status.AccessModes) - storage = pvc.Status.Capacity[corev1.ResourceStorage] - capacity = storage.String() - } - w.Write(LEVEL_0, "Capacity:\t%s\n", capacity) - w.Write(LEVEL_0, "Access Modes:\t%s\n", accessModes) - if pvc.Spec.VolumeMode != nil { - w.Write(LEVEL_0, "VolumeMode:\t%v\n", *pvc.Spec.VolumeMode) - } - printPodsMultiline(w, "Mounted By", mountPods) - - if len(pvc.Status.Conditions) > 0 { - w.Write(LEVEL_0, "Conditions:\n") - w.Write(LEVEL_1, "Type\tStatus\tLastProbeTime\tLastTransitionTime\tReason\tMessage\n") - w.Write(LEVEL_1, "----\t------\t-----------------\t------------------\t------\t-------\n") - for _, c := range pvc.Status.Conditions { - w.Write(LEVEL_1, "%v \t%v \t%s \t%s \t%v \t%v\n", - c.Type, - c.Status, - c.LastProbeTime.Time.Format(time.RFC1123Z), - c.LastTransitionTime.Time.Format(time.RFC1123Z), - c.Reason, - c.Message) - } - } - if events != nil { - DescribeEvents(events, w) - } - - return nil - }) -} - -func describeContainers(label string, containers []corev1.Container, containerStatuses []corev1.ContainerStatus, - resolverFn EnvVarResolverFunc, w PrefixWriter, space string) { - statuses := map[string]corev1.ContainerStatus{} - for _, status := range containerStatuses { - statuses[status.Name] = status - } - - describeContainersLabel(containers, label, space, w) - - for _, container := range containers { - status, ok := statuses[container.Name] - describeContainerBasicInfo(container, status, ok, space, w) - describeContainerCommand(container, w) - if ok { - describeContainerState(status, w) - } - describeContainerResource(container, w) - describeContainerProbe(container, w) - if len(container.EnvFrom) > 0 { - describeContainerEnvFrom(container, resolverFn, w) - } - describeContainerEnvVars(container, resolverFn, w) - describeContainerVolumes(container, w) - } -} - -func describeContainersLabel(containers []corev1.Container, label, space string, w PrefixWriter) { - none := "" - if len(containers) == 0 { - none = " " - } - w.Write(LEVEL_0, "%s%s:%s\n", space, label, none) -} - -func describeContainerBasicInfo(container corev1.Container, status corev1.ContainerStatus, ok bool, space string, w PrefixWriter) { - nameIndent := "" - if len(space) > 0 { - nameIndent = " " - } - w.Write(LEVEL_1, "%s%v:\n", nameIndent, container.Name) - if ok { - w.Write(LEVEL_2, "Container ID:\t%s\n", status.ContainerID) - } - w.Write(LEVEL_2, "Image:\t%s\n", container.Image) - if ok { - w.Write(LEVEL_2, "Image ID:\t%s\n", status.ImageID) - } - portString := describeContainerPorts(container.Ports) - if strings.Contains(portString, ",") { - w.Write(LEVEL_2, "Ports:\t%s\n", portString) - } else { - w.Write(LEVEL_2, "Port:\t%s\n", stringOrNone(portString)) - } - hostPortString := describeContainerHostPorts(container.Ports) - if strings.Contains(hostPortString, ",") { - w.Write(LEVEL_2, "Host Ports:\t%s\n", hostPortString) - } else { - w.Write(LEVEL_2, "Host Port:\t%s\n", stringOrNone(hostPortString)) - } -} - -func describeContainerPorts(cPorts []corev1.ContainerPort) string { - ports := make([]string, 0, len(cPorts)) - for _, cPort := range cPorts { - ports = append(ports, fmt.Sprintf("%d/%s", cPort.ContainerPort, cPort.Protocol)) - } - return strings.Join(ports, ", ") -} - -func describeContainerHostPorts(cPorts []corev1.ContainerPort) string { - ports := make([]string, 0, len(cPorts)) - for _, cPort := range cPorts { - ports = append(ports, fmt.Sprintf("%d/%s", cPort.HostPort, cPort.Protocol)) - } - return strings.Join(ports, ", ") -} - -func describeContainerCommand(container corev1.Container, w PrefixWriter) { - if len(container.Command) > 0 { - w.Write(LEVEL_2, "Command:\n") - for _, c := range container.Command { - for _, s := range strings.Split(c, "\n") { - w.Write(LEVEL_3, "%s\n", s) - } - } - } - if len(container.Args) > 0 { - w.Write(LEVEL_2, "Args:\n") - for _, arg := range container.Args { - for _, s := range strings.Split(arg, "\n") { - w.Write(LEVEL_3, "%s\n", s) - } - } - } -} - -func describeContainerResource(container corev1.Container, w PrefixWriter) { - resources := container.Resources - if len(resources.Limits) > 0 { - w.Write(LEVEL_2, "Limits:\n") - } - for _, name := range SortedResourceNames(resources.Limits) { - quantity := resources.Limits[name] - w.Write(LEVEL_3, "%s:\t%s\n", name, quantity.String()) - } - - if len(resources.Requests) > 0 { - w.Write(LEVEL_2, "Requests:\n") - } - for _, name := range SortedResourceNames(resources.Requests) { - quantity := resources.Requests[name] - w.Write(LEVEL_3, "%s:\t%s\n", name, quantity.String()) - } -} - -func describeContainerState(status corev1.ContainerStatus, w PrefixWriter) { - describeStatus("State", status.State, w) - if status.LastTerminationState.Terminated != nil { - describeStatus("Last State", status.LastTerminationState, w) - } - w.Write(LEVEL_2, "Ready:\t%v\n", printBool(status.Ready)) - w.Write(LEVEL_2, "Restart Count:\t%d\n", status.RestartCount) -} - -func describeContainerProbe(container corev1.Container, w PrefixWriter) { - if container.LivenessProbe != nil { - probe := DescribeProbe(container.LivenessProbe) - w.Write(LEVEL_2, "Liveness:\t%s\n", probe) - } - if container.ReadinessProbe != nil { - probe := DescribeProbe(container.ReadinessProbe) - w.Write(LEVEL_2, "Readiness:\t%s\n", probe) - } -} - -func describeContainerVolumes(container corev1.Container, w PrefixWriter) { - // Show volumeMounts - none := "" - if len(container.VolumeMounts) == 0 { - none = "\t" - } - w.Write(LEVEL_2, "Mounts:%s\n", none) - sort.Sort(SortableVolumeMounts(container.VolumeMounts)) - for _, mount := range container.VolumeMounts { - flags := []string{} - if mount.ReadOnly { - flags = append(flags, "ro") - } else { - flags = append(flags, "rw") - } - if len(mount.SubPath) > 0 { - flags = append(flags, fmt.Sprintf("path=%q", mount.SubPath)) - } - w.Write(LEVEL_3, "%s from %s (%s)\n", mount.MountPath, mount.Name, strings.Join(flags, ",")) - } - // Show volumeDevices if exists - if len(container.VolumeDevices) > 0 { - w.Write(LEVEL_2, "Devices:%s\n", none) - sort.Sort(SortableVolumeDevices(container.VolumeDevices)) - for _, device := range container.VolumeDevices { - w.Write(LEVEL_3, "%s from %s\n", device.DevicePath, device.Name) - } - } -} - -func describeContainerEnvVars(container corev1.Container, resolverFn EnvVarResolverFunc, w PrefixWriter) { - none := "" - if len(container.Env) == 0 { - none = "\t" - } - w.Write(LEVEL_2, "Environment:%s\n", none) - - for _, e := range container.Env { - if e.ValueFrom == nil { - for i, s := range strings.Split(e.Value, "\n") { - if i == 0 { - w.Write(LEVEL_3, "%s:\t%s\n", e.Name, s) - } else { - w.Write(LEVEL_3, "\t%s\n", s) - } - } - continue - } - - switch { - case e.ValueFrom.FieldRef != nil: - var valueFrom string - if resolverFn != nil { - valueFrom = resolverFn(e) - } - w.Write(LEVEL_3, "%s:\t%s (%s:%s)\n", e.Name, valueFrom, e.ValueFrom.FieldRef.APIVersion, e.ValueFrom.FieldRef.FieldPath) - case e.ValueFrom.ResourceFieldRef != nil: - valueFrom, err := resourcehelper.ExtractContainerResourceValue(e.ValueFrom.ResourceFieldRef, &container) - if err != nil { - valueFrom = "" - } - resource := e.ValueFrom.ResourceFieldRef.Resource - if valueFrom == "0" && (resource == "limits.cpu" || resource == "limits.memory") { - valueFrom = "node allocatable" - } - w.Write(LEVEL_3, "%s:\t%s (%s)\n", e.Name, valueFrom, resource) - case e.ValueFrom.SecretKeyRef != nil: - optional := e.ValueFrom.SecretKeyRef.Optional != nil && *e.ValueFrom.SecretKeyRef.Optional - w.Write(LEVEL_3, "%s:\t\tOptional: %t\n", e.Name, e.ValueFrom.SecretKeyRef.Key, e.ValueFrom.SecretKeyRef.Name, optional) - case e.ValueFrom.ConfigMapKeyRef != nil: - optional := e.ValueFrom.ConfigMapKeyRef.Optional != nil && *e.ValueFrom.ConfigMapKeyRef.Optional - w.Write(LEVEL_3, "%s:\t\tOptional: %t\n", e.Name, e.ValueFrom.ConfigMapKeyRef.Key, e.ValueFrom.ConfigMapKeyRef.Name, optional) - } - } -} - -func describeContainerEnvFrom(container corev1.Container, resolverFn EnvVarResolverFunc, w PrefixWriter) { - none := "" - if len(container.EnvFrom) == 0 { - none = "\t" - } - w.Write(LEVEL_2, "Environment Variables from:%s\n", none) - - for _, e := range container.EnvFrom { - from := "" - name := "" - optional := false - if e.ConfigMapRef != nil { - from = "ConfigMap" - name = e.ConfigMapRef.Name - optional = e.ConfigMapRef.Optional != nil && *e.ConfigMapRef.Optional - } else if e.SecretRef != nil { - from = "Secret" - name = e.SecretRef.Name - optional = e.SecretRef.Optional != nil && *e.SecretRef.Optional - } - if len(e.Prefix) == 0 { - w.Write(LEVEL_3, "%s\t%s\tOptional: %t\n", name, from, optional) - } else { - w.Write(LEVEL_3, "%s\t%s with prefix '%s'\tOptional: %t\n", name, from, e.Prefix, optional) - } - } -} - -// DescribeProbe is exported for consumers in other API groups that have probes -func DescribeProbe(probe *corev1.Probe) string { - attrs := fmt.Sprintf("delay=%ds timeout=%ds period=%ds #success=%d #failure=%d", probe.InitialDelaySeconds, probe.TimeoutSeconds, probe.PeriodSeconds, probe.SuccessThreshold, probe.FailureThreshold) - switch { - case probe.Exec != nil: - return fmt.Sprintf("exec %v %s", probe.Exec.Command, attrs) - case probe.HTTPGet != nil: - url := &url.URL{} - url.Scheme = strings.ToLower(string(probe.HTTPGet.Scheme)) - if len(probe.HTTPGet.Port.String()) > 0 { - url.Host = net.JoinHostPort(probe.HTTPGet.Host, probe.HTTPGet.Port.String()) - } else { - url.Host = probe.HTTPGet.Host - } - url.Path = probe.HTTPGet.Path - return fmt.Sprintf("http-get %s %s", url.String(), attrs) - case probe.TCPSocket != nil: - return fmt.Sprintf("tcp-socket %s:%s %s", probe.TCPSocket.Host, probe.TCPSocket.Port.String(), attrs) - } - return fmt.Sprintf("unknown %s", attrs) -} - -type EnvVarResolverFunc func(e corev1.EnvVar) string - -// EnvValueFrom is exported for use by describers in other packages -func EnvValueRetriever(pod *corev1.Pod) EnvVarResolverFunc { - return func(e corev1.EnvVar) string { - gv, err := schema.ParseGroupVersion(e.ValueFrom.FieldRef.APIVersion) - if err != nil { - return "" - } - gvk := gv.WithKind("Pod") - internalFieldPath, _, err := scheme.Scheme.ConvertFieldLabel(gvk, e.ValueFrom.FieldRef.FieldPath, "") - if err != nil { - return "" // pod validation should catch this on create - } - - valueFrom, err := fieldpath.ExtractFieldPathAsString(pod, internalFieldPath) - if err != nil { - return "" // pod validation should catch this on create - } - - return valueFrom - } -} - -func describeStatus(stateName string, state corev1.ContainerState, w PrefixWriter) { - switch { - case state.Running != nil: - w.Write(LEVEL_2, "%s:\tRunning\n", stateName) - w.Write(LEVEL_3, "Started:\t%v\n", state.Running.StartedAt.Time.Format(time.RFC1123Z)) - case state.Waiting != nil: - w.Write(LEVEL_2, "%s:\tWaiting\n", stateName) - if state.Waiting.Reason != "" { - w.Write(LEVEL_3, "Reason:\t%s\n", state.Waiting.Reason) - } - case state.Terminated != nil: - w.Write(LEVEL_2, "%s:\tTerminated\n", stateName) - if state.Terminated.Reason != "" { - w.Write(LEVEL_3, "Reason:\t%s\n", state.Terminated.Reason) - } - if state.Terminated.Message != "" { - w.Write(LEVEL_3, "Message:\t%s\n", state.Terminated.Message) - } - w.Write(LEVEL_3, "Exit Code:\t%d\n", state.Terminated.ExitCode) - if state.Terminated.Signal > 0 { - w.Write(LEVEL_3, "Signal:\t%d\n", state.Terminated.Signal) - } - w.Write(LEVEL_3, "Started:\t%s\n", state.Terminated.StartedAt.Time.Format(time.RFC1123Z)) - w.Write(LEVEL_3, "Finished:\t%s\n", state.Terminated.FinishedAt.Time.Format(time.RFC1123Z)) - default: - w.Write(LEVEL_2, "%s:\tWaiting\n", stateName) - } -} - -func describeVolumeClaimTemplates(templates []corev1.PersistentVolumeClaim, w PrefixWriter) { - if len(templates) == 0 { - w.Write(LEVEL_0, "Volume Claims:\t\n") - return - } - w.Write(LEVEL_0, "Volume Claims:\n") - for _, pvc := range templates { - w.Write(LEVEL_1, "Name:\t%s\n", pvc.Name) - w.Write(LEVEL_1, "StorageClass:\t%s\n", storageutil.GetPersistentVolumeClaimClass(&pvc)) - printLabelsMultilineWithIndent(w, " ", "Labels", "\t", pvc.Labels, sets.NewString()) - printLabelsMultilineWithIndent(w, " ", "Annotations", "\t", pvc.Annotations, sets.NewString()) - if capacity, ok := pvc.Spec.Resources.Requests[corev1.ResourceStorage]; ok { - w.Write(LEVEL_1, "Capacity:\t%s\n", capacity.String()) - } else { - w.Write(LEVEL_1, "Capacity:\t%s\n", "") - } - w.Write(LEVEL_1, "Access Modes:\t%s\n", pvc.Spec.AccessModes) - } -} - -func printBoolPtr(value *bool) string { - if value != nil { - return printBool(*value) - } - - return "" -} - -func printBool(value bool) string { - if value { - return "True" - } - - return "False" -} - -// ReplicationControllerDescriber generates information about a replication controller -// and the pods it has created. -type ReplicationControllerDescriber struct { - clientset.Interface -} - -func (d *ReplicationControllerDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - rc := d.CoreV1().ReplicationControllers(namespace) - pc := d.CoreV1().Pods(namespace) - - controller, err := rc.Get(name, metav1.GetOptions{}) - if err != nil { - return "", err - } - - running, waiting, succeeded, failed, err := getPodStatusForController(pc, labels.SelectorFromSet(controller.Spec.Selector), controller.UID) - if err != nil { - return "", err - } - - var events *corev1.EventList - if describerSettings.ShowEvents { - events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, controller) - } - - return describeReplicationController(controller, events, running, waiting, succeeded, failed) -} - -func describeReplicationController(controller *corev1.ReplicationController, events *corev1.EventList, running, waiting, succeeded, failed int) (string, error) { - return tabbedString(func(out io.Writer) error { - w := NewPrefixWriter(out) - w.Write(LEVEL_0, "Name:\t%s\n", controller.Name) - w.Write(LEVEL_0, "Namespace:\t%s\n", controller.Namespace) - w.Write(LEVEL_0, "Selector:\t%s\n", labels.FormatLabels(controller.Spec.Selector)) - printLabelsMultiline(w, "Labels", controller.Labels) - printAnnotationsMultiline(w, "Annotations", controller.Annotations) - w.Write(LEVEL_0, "Replicas:\t%d current / %d desired\n", controller.Status.Replicas, *controller.Spec.Replicas) - w.Write(LEVEL_0, "Pods Status:\t%d Running / %d Waiting / %d Succeeded / %d Failed\n", running, waiting, succeeded, failed) - DescribePodTemplate(controller.Spec.Template, w) - if len(controller.Status.Conditions) > 0 { - w.Write(LEVEL_0, "Conditions:\n Type\tStatus\tReason\n") - w.Write(LEVEL_1, "----\t------\t------\n") - for _, c := range controller.Status.Conditions { - w.Write(LEVEL_1, "%v \t%v\t%v\n", c.Type, c.Status, c.Reason) - } - } - if events != nil { - DescribeEvents(events, w) - } - return nil - }) -} - -func DescribePodTemplate(template *corev1.PodTemplateSpec, w PrefixWriter) { - w.Write(LEVEL_0, "Pod Template:\n") - if template == nil { - w.Write(LEVEL_1, "") - return - } - printLabelsMultiline(w, " Labels", template.Labels) - if len(template.Annotations) > 0 { - printAnnotationsMultiline(w, " Annotations", template.Annotations) - } - if len(template.Spec.ServiceAccountName) > 0 { - w.Write(LEVEL_1, "Service Account:\t%s\n", template.Spec.ServiceAccountName) - } - if len(template.Spec.InitContainers) > 0 { - describeContainers("Init Containers", template.Spec.InitContainers, nil, nil, w, " ") - } - describeContainers("Containers", template.Spec.Containers, nil, nil, w, " ") - describeVolumes(template.Spec.Volumes, w, " ") - if len(template.Spec.PriorityClassName) > 0 { - w.Write(LEVEL_1, "Priority Class Name:\t%s\n", template.Spec.PriorityClassName) - } -} - -// ReplicaSetDescriber generates information about a ReplicaSet and the pods it has created. -type ReplicaSetDescriber struct { - clientset.Interface -} - -func (d *ReplicaSetDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - rsc := d.AppsV1().ReplicaSets(namespace) - pc := d.CoreV1().Pods(namespace) - - rs, err := rsc.Get(name, metav1.GetOptions{}) - if err != nil { - return "", err - } - - selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector) - if err != nil { - return "", err - } - - running, waiting, succeeded, failed, getPodErr := getPodStatusForController(pc, selector, rs.UID) - - var events *corev1.EventList - if describerSettings.ShowEvents { - events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, rs) - } - - return describeReplicaSet(rs, events, running, waiting, succeeded, failed, getPodErr) -} - -func describeReplicaSet(rs *appsv1.ReplicaSet, events *corev1.EventList, running, waiting, succeeded, failed int, getPodErr error) (string, error) { - return tabbedString(func(out io.Writer) error { - w := NewPrefixWriter(out) - w.Write(LEVEL_0, "Name:\t%s\n", rs.Name) - w.Write(LEVEL_0, "Namespace:\t%s\n", rs.Namespace) - w.Write(LEVEL_0, "Selector:\t%s\n", metav1.FormatLabelSelector(rs.Spec.Selector)) - printLabelsMultiline(w, "Labels", rs.Labels) - printAnnotationsMultiline(w, "Annotations", rs.Annotations) - if controlledBy := printController(rs); len(controlledBy) > 0 { - w.Write(LEVEL_0, "Controlled By:\t%s\n", controlledBy) - } - w.Write(LEVEL_0, "Replicas:\t%d current / %d desired\n", rs.Status.Replicas, *rs.Spec.Replicas) - w.Write(LEVEL_0, "Pods Status:\t") - if getPodErr != nil { - w.Write(LEVEL_0, "error in fetching pods: %s\n", getPodErr) - } else { - w.Write(LEVEL_0, "%d Running / %d Waiting / %d Succeeded / %d Failed\n", running, waiting, succeeded, failed) - } - DescribePodTemplate(&rs.Spec.Template, w) - if len(rs.Status.Conditions) > 0 { - w.Write(LEVEL_0, "Conditions:\n Type\tStatus\tReason\n") - w.Write(LEVEL_1, "----\t------\t------\n") - for _, c := range rs.Status.Conditions { - w.Write(LEVEL_1, "%v \t%v\t%v\n", c.Type, c.Status, c.Reason) - } - } - if events != nil { - DescribeEvents(events, w) - } - return nil - }) -} - -// JobDescriber generates information about a job and the pods it has created. -type JobDescriber struct { - clientset.Interface -} - -func (d *JobDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - job, err := d.BatchV1().Jobs(namespace).Get(name, metav1.GetOptions{}) - if err != nil { - return "", err - } - - var events *corev1.EventList - if describerSettings.ShowEvents { - events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, job) - } - - return describeJob(job, events) -} - -func describeJob(job *batchv1.Job, events *corev1.EventList) (string, error) { - return tabbedString(func(out io.Writer) error { - w := NewPrefixWriter(out) - w.Write(LEVEL_0, "Name:\t%s\n", job.Name) - w.Write(LEVEL_0, "Namespace:\t%s\n", job.Namespace) - selector, _ := metav1.LabelSelectorAsSelector(job.Spec.Selector) - w.Write(LEVEL_0, "Selector:\t%s\n", selector) - printLabelsMultiline(w, "Labels", job.Labels) - printAnnotationsMultiline(w, "Annotations", job.Annotations) - if controlledBy := printController(job); len(controlledBy) > 0 { - w.Write(LEVEL_0, "Controlled By:\t%s\n", controlledBy) - } - w.Write(LEVEL_0, "Parallelism:\t%d\n", *job.Spec.Parallelism) - if job.Spec.Completions != nil { - w.Write(LEVEL_0, "Completions:\t%d\n", *job.Spec.Completions) - } else { - w.Write(LEVEL_0, "Completions:\t\n") - } - if job.Status.StartTime != nil { - w.Write(LEVEL_0, "Start Time:\t%s\n", job.Status.StartTime.Time.Format(time.RFC1123Z)) - } - if job.Status.CompletionTime != nil { - w.Write(LEVEL_0, "Completed At:\t%s\n", job.Status.CompletionTime.Time.Format(time.RFC1123Z)) - } - if job.Status.StartTime != nil && job.Status.CompletionTime != nil { - w.Write(LEVEL_0, "Duration:\t%s\n", duration.HumanDuration(job.Status.CompletionTime.Sub(job.Status.StartTime.Time))) - } - if job.Spec.ActiveDeadlineSeconds != nil { - w.Write(LEVEL_0, "Active Deadline Seconds:\t%ds\n", *job.Spec.ActiveDeadlineSeconds) - } - w.Write(LEVEL_0, "Pods Statuses:\t%d Running / %d Succeeded / %d Failed\n", job.Status.Active, job.Status.Succeeded, job.Status.Failed) - DescribePodTemplate(&job.Spec.Template, w) - if events != nil { - DescribeEvents(events, w) - } - return nil - }) -} - -// CronJobDescriber generates information about a cron job and the jobs it has created. -type CronJobDescriber struct { - client clientset.Interface -} - -func (d *CronJobDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - cronJob, err := d.client.BatchV1beta1().CronJobs(namespace).Get(name, metav1.GetOptions{}) - if err != nil { - return "", err - } - - var events *corev1.EventList - if describerSettings.ShowEvents { - events, _ = d.client.CoreV1().Events(namespace).Search(scheme.Scheme, cronJob) - } - return describeCronJob(cronJob, events) -} - -func describeCronJob(cronJob *batchv1beta1.CronJob, events *corev1.EventList) (string, error) { - return tabbedString(func(out io.Writer) error { - w := NewPrefixWriter(out) - w.Write(LEVEL_0, "Name:\t%s\n", cronJob.Name) - w.Write(LEVEL_0, "Namespace:\t%s\n", cronJob.Namespace) - printLabelsMultiline(w, "Labels", cronJob.Labels) - printAnnotationsMultiline(w, "Annotations", cronJob.Annotations) - w.Write(LEVEL_0, "Schedule:\t%s\n", cronJob.Spec.Schedule) - w.Write(LEVEL_0, "Concurrency Policy:\t%s\n", cronJob.Spec.ConcurrencyPolicy) - w.Write(LEVEL_0, "Suspend:\t%s\n", printBoolPtr(cronJob.Spec.Suspend)) - if cronJob.Spec.SuccessfulJobsHistoryLimit != nil { - w.Write(LEVEL_0, "Successful Job History Limit:\t%d\n", *cronJob.Spec.SuccessfulJobsHistoryLimit) - } else { - w.Write(LEVEL_0, "Successful Job History Limit:\t\n") - } - if cronJob.Spec.FailedJobsHistoryLimit != nil { - w.Write(LEVEL_0, "Failed Job History Limit:\t%d\n", *cronJob.Spec.FailedJobsHistoryLimit) - } else { - w.Write(LEVEL_0, "Failed Job History Limit:\t\n") - } - if cronJob.Spec.StartingDeadlineSeconds != nil { - w.Write(LEVEL_0, "Starting Deadline Seconds:\t%ds\n", *cronJob.Spec.StartingDeadlineSeconds) - } else { - w.Write(LEVEL_0, "Starting Deadline Seconds:\t\n") - } - describeJobTemplate(cronJob.Spec.JobTemplate, w) - if cronJob.Status.LastScheduleTime != nil { - w.Write(LEVEL_0, "Last Schedule Time:\t%s\n", cronJob.Status.LastScheduleTime.Time.Format(time.RFC1123Z)) - } else { - w.Write(LEVEL_0, "Last Schedule Time:\t\n") - } - printActiveJobs(w, "Active Jobs", cronJob.Status.Active) - if events != nil { - DescribeEvents(events, w) - } - return nil - }) -} - -func describeJobTemplate(jobTemplate batchv1beta1.JobTemplateSpec, w PrefixWriter) { - if jobTemplate.Spec.Selector != nil { - selector, _ := metav1.LabelSelectorAsSelector(jobTemplate.Spec.Selector) - w.Write(LEVEL_0, "Selector:\t%s\n", selector) - } else { - w.Write(LEVEL_0, "Selector:\t\n") - } - if jobTemplate.Spec.Parallelism != nil { - w.Write(LEVEL_0, "Parallelism:\t%d\n", *jobTemplate.Spec.Parallelism) - } else { - w.Write(LEVEL_0, "Parallelism:\t\n") - } - if jobTemplate.Spec.Completions != nil { - w.Write(LEVEL_0, "Completions:\t%d\n", *jobTemplate.Spec.Completions) - } else { - w.Write(LEVEL_0, "Completions:\t\n") - } - if jobTemplate.Spec.ActiveDeadlineSeconds != nil { - w.Write(LEVEL_0, "Active Deadline Seconds:\t%ds\n", *jobTemplate.Spec.ActiveDeadlineSeconds) - } - DescribePodTemplate(&jobTemplate.Spec.Template, w) -} - -func printActiveJobs(w PrefixWriter, title string, jobs []corev1.ObjectReference) { - w.Write(LEVEL_0, "%s:\t", title) - if len(jobs) == 0 { - w.WriteLine("") - return - } - - for i, job := range jobs { - if i != 0 { - w.Write(LEVEL_0, ", ") - } - w.Write(LEVEL_0, "%s", job.Name) - } - w.WriteLine("") -} - -// DaemonSetDescriber generates information about a daemon set and the pods it has created. -type DaemonSetDescriber struct { - clientset.Interface -} - -func (d *DaemonSetDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - dc := d.AppsV1().DaemonSets(namespace) - pc := d.CoreV1().Pods(namespace) - - daemon, err := dc.Get(name, metav1.GetOptions{}) - if err != nil { - return "", err - } - - selector, err := metav1.LabelSelectorAsSelector(daemon.Spec.Selector) - if err != nil { - return "", err - } - running, waiting, succeeded, failed, err := getPodStatusForController(pc, selector, daemon.UID) - if err != nil { - return "", err - } - - var events *corev1.EventList - if describerSettings.ShowEvents { - events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, daemon) - } - - return describeDaemonSet(daemon, events, running, waiting, succeeded, failed) -} - -func describeDaemonSet(daemon *appsv1.DaemonSet, events *corev1.EventList, running, waiting, succeeded, failed int) (string, error) { - return tabbedString(func(out io.Writer) error { - w := NewPrefixWriter(out) - w.Write(LEVEL_0, "Name:\t%s\n", daemon.Name) - selector, err := metav1.LabelSelectorAsSelector(daemon.Spec.Selector) - if err != nil { - // this shouldn't happen if LabelSelector passed validation - return err - } - w.Write(LEVEL_0, "Selector:\t%s\n", selector) - w.Write(LEVEL_0, "Node-Selector:\t%s\n", labels.FormatLabels(daemon.Spec.Template.Spec.NodeSelector)) - printLabelsMultiline(w, "Labels", daemon.Labels) - printAnnotationsMultiline(w, "Annotations", daemon.Annotations) - w.Write(LEVEL_0, "Desired Number of Nodes Scheduled: %d\n", daemon.Status.DesiredNumberScheduled) - w.Write(LEVEL_0, "Current Number of Nodes Scheduled: %d\n", daemon.Status.CurrentNumberScheduled) - w.Write(LEVEL_0, "Number of Nodes Scheduled with Up-to-date Pods: %d\n", daemon.Status.UpdatedNumberScheduled) - w.Write(LEVEL_0, "Number of Nodes Scheduled with Available Pods: %d\n", daemon.Status.NumberAvailable) - w.Write(LEVEL_0, "Number of Nodes Misscheduled: %d\n", daemon.Status.NumberMisscheduled) - w.Write(LEVEL_0, "Pods Status:\t%d Running / %d Waiting / %d Succeeded / %d Failed\n", running, waiting, succeeded, failed) - DescribePodTemplate(&daemon.Spec.Template, w) - if events != nil { - DescribeEvents(events, w) - } - return nil - }) -} - -// SecretDescriber generates information about a secret -type SecretDescriber struct { - clientset.Interface -} - -func (d *SecretDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - c := d.CoreV1().Secrets(namespace) - - secret, err := c.Get(name, metav1.GetOptions{}) - if err != nil { - return "", err - } - - return describeSecret(secret) -} - -func describeSecret(secret *corev1.Secret) (string, error) { - return tabbedString(func(out io.Writer) error { - w := NewPrefixWriter(out) - w.Write(LEVEL_0, "Name:\t%s\n", secret.Name) - w.Write(LEVEL_0, "Namespace:\t%s\n", secret.Namespace) - printLabelsMultiline(w, "Labels", secret.Labels) - skipAnnotations := sets.NewString(corev1.LastAppliedConfigAnnotation) - printAnnotationsMultilineWithFilter(w, "Annotations", secret.Annotations, skipAnnotations) - - w.Write(LEVEL_0, "\nType:\t%s\n", secret.Type) - - w.Write(LEVEL_0, "\nData\n====\n") - for k, v := range secret.Data { - switch { - case k == corev1.ServiceAccountTokenKey && secret.Type == corev1.SecretTypeServiceAccountToken: - w.Write(LEVEL_0, "%s:\t%s\n", k, string(v)) - default: - w.Write(LEVEL_0, "%s:\t%d bytes\n", k, len(v)) - } - } - - return nil - }) -} - -type IngressDescriber struct { - clientset.Interface -} - -func (i *IngressDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - c := i.NetworkingV1beta1().Ingresses(namespace) - ing, err := c.Get(name, metav1.GetOptions{}) - if err != nil { - return "", err - } - return i.describeIngress(ing, describerSettings) -} - -func (i *IngressDescriber) describeBackend(ns string, backend *networkingv1beta1.IngressBackend) string { - endpoints, _ := i.CoreV1().Endpoints(ns).Get(backend.ServiceName, metav1.GetOptions{}) - service, _ := i.CoreV1().Services(ns).Get(backend.ServiceName, metav1.GetOptions{}) - spName := "" - for i := range service.Spec.Ports { - sp := &service.Spec.Ports[i] - switch backend.ServicePort.Type { - case intstr.String: - if backend.ServicePort.StrVal == sp.Name { - spName = sp.Name - } - case intstr.Int: - if int32(backend.ServicePort.IntVal) == sp.Port { - spName = sp.Name - } - } - } - return formatEndpoints(endpoints, sets.NewString(spName)) -} - -func (i *IngressDescriber) describeIngress(ing *networkingv1beta1.Ingress, describerSettings describe.DescriberSettings) (string, error) { - return tabbedString(func(out io.Writer) error { - w := NewPrefixWriter(out) - w.Write(LEVEL_0, "Name:\t%v\n", ing.Name) - w.Write(LEVEL_0, "Namespace:\t%v\n", ing.Namespace) - w.Write(LEVEL_0, "Address:\t%v\n", loadBalancerStatusStringer(ing.Status.LoadBalancer, true)) - def := ing.Spec.Backend - ns := ing.Namespace - if def == nil { - // Ingresses that don't specify a default backend inherit the - // default backend in the kube-system namespace. - def = &networkingv1beta1.IngressBackend{ - ServiceName: "default-http-backend", - ServicePort: intstr.IntOrString{Type: intstr.Int, IntVal: 80}, - } - ns = metav1.NamespaceSystem - } - w.Write(LEVEL_0, "Default backend:\t%s (%s)\n", backendStringer(def), i.describeBackend(ns, def)) - if len(ing.Spec.TLS) != 0 { - describeIngressTLS(w, ing.Spec.TLS) - } - w.Write(LEVEL_0, "Rules:\n Host\tPath\tBackends\n") - w.Write(LEVEL_1, "----\t----\t--------\n") - count := 0 - for _, rules := range ing.Spec.Rules { - if rules.HTTP == nil { - continue - } - count++ - host := rules.Host - if len(host) == 0 { - host = "*" - } - w.Write(LEVEL_1, "%s\t\n", host) - for _, path := range rules.HTTP.Paths { - w.Write(LEVEL_2, "\t%s \t%s (%s)\n", path.Path, backendStringer(&path.Backend), i.describeBackend(ing.Namespace, &path.Backend)) - } - } - if count == 0 { - w.Write(LEVEL_1, "%s\t%s \t%s (%s)\n", "*", "*", backendStringer(def), i.describeBackend(ns, def)) - } - describeIngressAnnotations(w, ing.Annotations) - - if describerSettings.ShowEvents { - events, _ := i.CoreV1().Events(ing.Namespace).Search(scheme.Scheme, ing) - if events != nil { - DescribeEvents(events, w) - } - } - return nil - }) -} - -func describeIngressTLS(w PrefixWriter, ingTLS []networkingv1beta1.IngressTLS) { - w.Write(LEVEL_0, "TLS:\n") - for _, t := range ingTLS { - if t.SecretName == "" { - w.Write(LEVEL_1, "SNI routes %v\n", strings.Join(t.Hosts, ",")) - } else { - w.Write(LEVEL_1, "%v terminates %v\n", t.SecretName, strings.Join(t.Hosts, ",")) - } - } - return -} - -// TODO: Move from annotations into Ingress status. -func describeIngressAnnotations(w PrefixWriter, annotations map[string]string) { - w.Write(LEVEL_0, "Annotations:\n") - for k, v := range annotations { - w.Write(LEVEL_1, "%v:\t%s\n", k, v) - } - return -} - -// ServiceDescriber generates information about a service. -type ServiceDescriber struct { - clientset.Interface -} - -func (d *ServiceDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - c := d.CoreV1().Services(namespace) - - service, err := c.Get(name, metav1.GetOptions{}) - if err != nil { - return "", err - } - - endpoints, _ := d.CoreV1().Endpoints(namespace).Get(name, metav1.GetOptions{}) - var events *corev1.EventList - if describerSettings.ShowEvents { - events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, service) - } - return describeService(service, endpoints, events) -} - -func buildIngressString(ingress []corev1.LoadBalancerIngress) string { - var buffer bytes.Buffer - - for i := range ingress { - if i != 0 { - buffer.WriteString(", ") - } - if ingress[i].IP != "" { - buffer.WriteString(ingress[i].IP) - } else { - buffer.WriteString(ingress[i].Hostname) - } - } - return buffer.String() -} - -func describeService(service *corev1.Service, endpoints *corev1.Endpoints, events *corev1.EventList) (string, error) { - if endpoints == nil { - endpoints = &corev1.Endpoints{} - } - return tabbedString(func(out io.Writer) error { - w := NewPrefixWriter(out) - w.Write(LEVEL_0, "Name:\t%s\n", service.Name) - w.Write(LEVEL_0, "Namespace:\t%s\n", service.Namespace) - printLabelsMultiline(w, "Labels", service.Labels) - printAnnotationsMultiline(w, "Annotations", service.Annotations) - w.Write(LEVEL_0, "Selector:\t%s\n", labels.FormatLabels(service.Spec.Selector)) - w.Write(LEVEL_0, "Type:\t%s\n", service.Spec.Type) - w.Write(LEVEL_0, "IP:\t%s\n", service.Spec.ClusterIP) - if len(service.Spec.ExternalIPs) > 0 { - w.Write(LEVEL_0, "External IPs:\t%v\n", strings.Join(service.Spec.ExternalIPs, ",")) - } - if service.Spec.LoadBalancerIP != "" { - w.Write(LEVEL_0, "IP:\t%s\n", service.Spec.LoadBalancerIP) - } - if service.Spec.ExternalName != "" { - w.Write(LEVEL_0, "External Name:\t%s\n", service.Spec.ExternalName) - } - if len(service.Status.LoadBalancer.Ingress) > 0 { - list := buildIngressString(service.Status.LoadBalancer.Ingress) - w.Write(LEVEL_0, "LoadBalancer Ingress:\t%s\n", list) - } - for i := range service.Spec.Ports { - sp := &service.Spec.Ports[i] - - name := sp.Name - if name == "" { - name = "" - } - w.Write(LEVEL_0, "Port:\t%s\t%d/%s\n", name, sp.Port, sp.Protocol) - if sp.TargetPort.Type == intstr.Type(intstr.Int) { - w.Write(LEVEL_0, "TargetPort:\t%d/%s\n", sp.TargetPort.IntVal, sp.Protocol) - } else { - w.Write(LEVEL_0, "TargetPort:\t%s/%s\n", sp.TargetPort.StrVal, sp.Protocol) - } - if sp.NodePort != 0 { - w.Write(LEVEL_0, "NodePort:\t%s\t%d/%s\n", name, sp.NodePort, sp.Protocol) - } - w.Write(LEVEL_0, "Endpoints:\t%s\n", formatEndpoints(endpoints, sets.NewString(sp.Name))) - } - w.Write(LEVEL_0, "Session Affinity:\t%s\n", service.Spec.SessionAffinity) - if service.Spec.ExternalTrafficPolicy != "" { - w.Write(LEVEL_0, "External Traffic Policy:\t%s\n", service.Spec.ExternalTrafficPolicy) - } - if service.Spec.HealthCheckNodePort != 0 { - w.Write(LEVEL_0, "HealthCheck NodePort:\t%d\n", service.Spec.HealthCheckNodePort) - } - if len(service.Spec.LoadBalancerSourceRanges) > 0 { - w.Write(LEVEL_0, "LoadBalancer Source Ranges:\t%v\n", strings.Join(service.Spec.LoadBalancerSourceRanges, ",")) - } - if events != nil { - DescribeEvents(events, w) - } - return nil - }) -} - -// EndpointsDescriber generates information about an Endpoint. -type EndpointsDescriber struct { - clientset.Interface -} - -func (d *EndpointsDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - c := d.CoreV1().Endpoints(namespace) - - ep, err := c.Get(name, metav1.GetOptions{}) - if err != nil { - return "", err - } - - var events *corev1.EventList - if describerSettings.ShowEvents { - events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, ep) - } - - return describeEndpoints(ep, events) -} - -func describeEndpoints(ep *corev1.Endpoints, events *corev1.EventList) (string, error) { - return tabbedString(func(out io.Writer) error { - w := NewPrefixWriter(out) - w.Write(LEVEL_0, "Name:\t%s\n", ep.Name) - w.Write(LEVEL_0, "Namespace:\t%s\n", ep.Namespace) - printLabelsMultiline(w, "Labels", ep.Labels) - printAnnotationsMultiline(w, "Annotations", ep.Annotations) - - w.Write(LEVEL_0, "Subsets:\n") - for i := range ep.Subsets { - subset := &ep.Subsets[i] - - addresses := make([]string, 0, len(subset.Addresses)) - for _, addr := range subset.Addresses { - addresses = append(addresses, addr.IP) - } - addressesString := strings.Join(addresses, ",") - if len(addressesString) == 0 { - addressesString = "" - } - w.Write(LEVEL_1, "Addresses:\t%s\n", addressesString) - - notReadyAddresses := make([]string, 0, len(subset.NotReadyAddresses)) - for _, addr := range subset.NotReadyAddresses { - notReadyAddresses = append(notReadyAddresses, addr.IP) - } - notReadyAddressesString := strings.Join(notReadyAddresses, ",") - if len(notReadyAddressesString) == 0 { - notReadyAddressesString = "" - } - w.Write(LEVEL_1, "NotReadyAddresses:\t%s\n", notReadyAddressesString) - - if len(subset.Ports) > 0 { - w.Write(LEVEL_1, "Ports:\n") - w.Write(LEVEL_2, "Name\tPort\tProtocol\n") - w.Write(LEVEL_2, "----\t----\t--------\n") - for _, port := range subset.Ports { - name := port.Name - if len(name) == 0 { - name = "" - } - w.Write(LEVEL_2, "%s\t%d\t%s\n", name, port.Port, port.Protocol) - } - } - w.Write(LEVEL_0, "\n") - } - - if events != nil { - DescribeEvents(events, w) - } - return nil - }) -} - -// ServiceAccountDescriber generates information about a service. -type ServiceAccountDescriber struct { - clientset.Interface -} - -func (d *ServiceAccountDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - c := d.CoreV1().ServiceAccounts(namespace) - - serviceAccount, err := c.Get(name, metav1.GetOptions{}) - if err != nil { - return "", err - } - - tokens := []corev1.Secret{} - - // missingSecrets is the set of all secrets present in the - // serviceAccount but not present in the set of existing secrets. - missingSecrets := sets.NewString() - secrets, err := d.CoreV1().Secrets(namespace).List(metav1.ListOptions{}) - - // errors are tolerated here in order to describe the serviceAccount with all - // of the secrets that it references, even if those secrets cannot be fetched. - if err == nil { - // existingSecrets is the set of all secrets remaining on a - // service account that are not present in the "tokens" slice. - existingSecrets := sets.NewString() - - for _, s := range secrets.Items { - if s.Type == corev1.SecretTypeServiceAccountToken { - name, _ := s.Annotations[corev1.ServiceAccountNameKey] - uid, _ := s.Annotations[corev1.ServiceAccountUIDKey] - if name == serviceAccount.Name && uid == string(serviceAccount.UID) { - tokens = append(tokens, s) - } - } - existingSecrets.Insert(s.Name) - } - - for _, s := range serviceAccount.Secrets { - if !existingSecrets.Has(s.Name) { - missingSecrets.Insert(s.Name) - } - } - for _, s := range serviceAccount.ImagePullSecrets { - if !existingSecrets.Has(s.Name) { - missingSecrets.Insert(s.Name) - } - } - } - - var events *corev1.EventList - if describerSettings.ShowEvents { - events, _ = d.CoreV1().Events(namespace).Search(scheme.Scheme, serviceAccount) - } - - return describeServiceAccount(serviceAccount, tokens, missingSecrets, events) -} - -func describeServiceAccount(serviceAccount *corev1.ServiceAccount, tokens []corev1.Secret, missingSecrets sets.String, events *corev1.EventList) (string, error) { - return tabbedString(func(out io.Writer) error { - w := NewPrefixWriter(out) - w.Write(LEVEL_0, "Name:\t%s\n", serviceAccount.Name) - w.Write(LEVEL_0, "Namespace:\t%s\n", serviceAccount.Namespace) - printLabelsMultiline(w, "Labels", serviceAccount.Labels) - printAnnotationsMultiline(w, "Annotations", serviceAccount.Annotations) - - var ( - emptyHeader = " " - pullHeader = "Image pull secrets:" - mountHeader = "Mountable secrets: " - tokenHeader = "Tokens: " - - pullSecretNames = []string{} - mountSecretNames = []string{} - tokenSecretNames = []string{} - ) - - for _, s := range serviceAccount.ImagePullSecrets { - pullSecretNames = append(pullSecretNames, s.Name) - } - for _, s := range serviceAccount.Secrets { - mountSecretNames = append(mountSecretNames, s.Name) - } - for _, s := range tokens { - tokenSecretNames = append(tokenSecretNames, s.Name) - } - - types := map[string][]string{ - pullHeader: pullSecretNames, - mountHeader: mountSecretNames, - tokenHeader: tokenSecretNames, - } - for _, header := range sets.StringKeySet(types).List() { - names := types[header] - if len(names) == 0 { - w.Write(LEVEL_0, "%s\t\n", header) - } else { - prefix := header - for _, name := range names { - if missingSecrets.Has(name) { - w.Write(LEVEL_0, "%s\t%s (not found)\n", prefix, name) - } else { - w.Write(LEVEL_0, "%s\t%s\n", prefix, name) - } - prefix = emptyHeader - } - } - } - - if events != nil { - DescribeEvents(events, w) - } - - return nil - }) -} - -// RoleDescriber generates information about a node. -type RoleDescriber struct { - clientset.Interface -} - -func (d *RoleDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - role, err := d.RbacV1().Roles(namespace).Get(name, metav1.GetOptions{}) - if err != nil { - return "", err - } - - breakdownRules := []rbacv1.PolicyRule{} - for _, rule := range role.Rules { - breakdownRules = append(breakdownRules, rbac.BreakdownRule(rule)...) - } - - compactRules, err := rbac.CompactRules(breakdownRules) - if err != nil { - return "", err - } - sort.Stable(rbac.SortableRuleSlice(compactRules)) - - return tabbedString(func(out io.Writer) error { - w := NewPrefixWriter(out) - w.Write(LEVEL_0, "Name:\t%s\n", role.Name) - printLabelsMultiline(w, "Labels", role.Labels) - printAnnotationsMultiline(w, "Annotations", role.Annotations) - - w.Write(LEVEL_0, "PolicyRule:\n") - w.Write(LEVEL_1, "Resources\tNon-Resource URLs\tResource Names\tVerbs\n") - w.Write(LEVEL_1, "---------\t-----------------\t--------------\t-----\n") - for _, r := range compactRules { - w.Write(LEVEL_1, "%s\t%v\t%v\t%v\n", CombineResourceGroup(r.Resources, r.APIGroups), r.NonResourceURLs, r.ResourceNames, r.Verbs) - } - - return nil - }) -} - -// ClusterRoleDescriber generates information about a node. -type ClusterRoleDescriber struct { - clientset.Interface -} - -func (d *ClusterRoleDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - role, err := d.RbacV1().ClusterRoles().Get(name, metav1.GetOptions{}) - if err != nil { - return "", err - } - - breakdownRules := []rbacv1.PolicyRule{} - for _, rule := range role.Rules { - breakdownRules = append(breakdownRules, rbac.BreakdownRule(rule)...) - } - - compactRules, err := rbac.CompactRules(breakdownRules) - if err != nil { - return "", err - } - sort.Stable(rbac.SortableRuleSlice(compactRules)) - - return tabbedString(func(out io.Writer) error { - w := NewPrefixWriter(out) - w.Write(LEVEL_0, "Name:\t%s\n", role.Name) - printLabelsMultiline(w, "Labels", role.Labels) - printAnnotationsMultiline(w, "Annotations", role.Annotations) - - w.Write(LEVEL_0, "PolicyRule:\n") - w.Write(LEVEL_1, "Resources\tNon-Resource URLs\tResource Names\tVerbs\n") - w.Write(LEVEL_1, "---------\t-----------------\t--------------\t-----\n") - for _, r := range compactRules { - w.Write(LEVEL_1, "%s\t%v\t%v\t%v\n", CombineResourceGroup(r.Resources, r.APIGroups), r.NonResourceURLs, r.ResourceNames, r.Verbs) - } - - return nil - }) -} - -func CombineResourceGroup(resource, group []string) string { - if len(resource) == 0 { - return "" - } - parts := strings.SplitN(resource[0], "/", 2) - combine := parts[0] - - if len(group) > 0 && group[0] != "" { - combine = combine + "." + group[0] - } - - if len(parts) == 2 { - combine = combine + "/" + parts[1] - } - return combine -} - -// RoleBindingDescriber generates information about a node. -type RoleBindingDescriber struct { - clientset.Interface -} - -func (d *RoleBindingDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - binding, err := d.RbacV1().RoleBindings(namespace).Get(name, metav1.GetOptions{}) - if err != nil { - return "", err - } - - return tabbedString(func(out io.Writer) error { - w := NewPrefixWriter(out) - w.Write(LEVEL_0, "Name:\t%s\n", binding.Name) - printLabelsMultiline(w, "Labels", binding.Labels) - printAnnotationsMultiline(w, "Annotations", binding.Annotations) - - w.Write(LEVEL_0, "Role:\n") - w.Write(LEVEL_1, "Kind:\t%s\n", binding.RoleRef.Kind) - w.Write(LEVEL_1, "Name:\t%s\n", binding.RoleRef.Name) - - w.Write(LEVEL_0, "Subjects:\n") - w.Write(LEVEL_1, "Kind\tName\tNamespace\n") - w.Write(LEVEL_1, "----\t----\t---------\n") - for _, s := range binding.Subjects { - w.Write(LEVEL_1, "%s\t%s\t%s\n", s.Kind, s.Name, s.Namespace) - } - - return nil - }) -} - -// ClusterRoleBindingDescriber generates information about a node. -type ClusterRoleBindingDescriber struct { - clientset.Interface -} - -func (d *ClusterRoleBindingDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - binding, err := d.RbacV1().ClusterRoleBindings().Get(name, metav1.GetOptions{}) - if err != nil { - return "", err - } - - return tabbedString(func(out io.Writer) error { - w := NewPrefixWriter(out) - w.Write(LEVEL_0, "Name:\t%s\n", binding.Name) - printLabelsMultiline(w, "Labels", binding.Labels) - printAnnotationsMultiline(w, "Annotations", binding.Annotations) - - w.Write(LEVEL_0, "Role:\n") - w.Write(LEVEL_1, "Kind:\t%s\n", binding.RoleRef.Kind) - w.Write(LEVEL_1, "Name:\t%s\n", binding.RoleRef.Name) - - w.Write(LEVEL_0, "Subjects:\n") - w.Write(LEVEL_1, "Kind\tName\tNamespace\n") - w.Write(LEVEL_1, "----\t----\t---------\n") - for _, s := range binding.Subjects { - w.Write(LEVEL_1, "%s\t%s\t%s\n", s.Kind, s.Name, s.Namespace) - } - - return nil - }) -} - -// NodeDescriber generates information about a node. -type NodeDescriber struct { - clientset.Interface -} - -func (d *NodeDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - mc := d.CoreV1().Nodes() - node, err := mc.Get(name, metav1.GetOptions{}) - if err != nil { - return "", err - } - - fieldSelector, err := fields.ParseSelector("spec.nodeName=" + name + ",status.phase!=" + string(corev1.PodSucceeded) + ",status.phase!=" + string(corev1.PodFailed)) - if err != nil { - return "", err - } - // in a policy aware setting, users may have access to a node, but not all pods - // in that case, we note that the user does not have access to the pods - canViewPods := true - nodeNonTerminatedPodsList, err := d.CoreV1().Pods(namespace).List(metav1.ListOptions{FieldSelector: fieldSelector.String()}) - if err != nil { - if !errors.IsForbidden(err) { - return "", err - } - canViewPods = false - } - - var events *corev1.EventList - if describerSettings.ShowEvents { - if ref, err := reference.GetReference(scheme.Scheme, node); err != nil { - klog.Errorf("Unable to construct reference to '%#v': %v", node, err) - } else { - // TODO: We haven't decided the namespace for Node object yet. - ref.UID = types.UID(ref.Name) - events, _ = d.CoreV1().Events("").Search(scheme.Scheme, ref) - } - } - - return describeNode(node, nodeNonTerminatedPodsList, events, canViewPods) -} - -func describeNode(node *corev1.Node, nodeNonTerminatedPodsList *corev1.PodList, events *corev1.EventList, canViewPods bool) (string, error) { - return tabbedString(func(out io.Writer) error { - w := NewPrefixWriter(out) - w.Write(LEVEL_0, "Name:\t%s\n", node.Name) - if roles := findNodeRoles(node); len(roles) > 0 { - w.Write(LEVEL_0, "Roles:\t%s\n", strings.Join(roles, ",")) - } else { - w.Write(LEVEL_0, "Roles:\t%s\n", "") - } - printLabelsMultiline(w, "Labels", node.Labels) - printAnnotationsMultiline(w, "Annotations", node.Annotations) - w.Write(LEVEL_0, "CreationTimestamp:\t%s\n", node.CreationTimestamp.Time.Format(time.RFC1123Z)) - printNodeTaintsMultiline(w, "Taints", node.Spec.Taints) - w.Write(LEVEL_0, "Unschedulable:\t%v\n", node.Spec.Unschedulable) - if len(node.Status.Conditions) > 0 { - w.Write(LEVEL_0, "Conditions:\n Type\tStatus\tLastHeartbeatTime\tLastTransitionTime\tReason\tMessage\n") - w.Write(LEVEL_1, "----\t------\t-----------------\t------------------\t------\t-------\n") - for _, c := range node.Status.Conditions { - w.Write(LEVEL_1, "%v \t%v \t%s \t%s \t%v \t%v\n", - c.Type, - c.Status, - c.LastHeartbeatTime.Time.Format(time.RFC1123Z), - c.LastTransitionTime.Time.Format(time.RFC1123Z), - c.Reason, - c.Message) - } - } - - w.Write(LEVEL_0, "Addresses:\n") - for _, address := range node.Status.Addresses { - w.Write(LEVEL_1, "%s:\t%s\n", address.Type, address.Address) - } - - printResourceList := func(resourceList corev1.ResourceList) { - resources := make([]corev1.ResourceName, 0, len(resourceList)) - for resource := range resourceList { - resources = append(resources, resource) - } - sort.Sort(SortableResourceNames(resources)) - for _, resource := range resources { - value := resourceList[resource] - w.Write(LEVEL_0, " %s:\t%s\n", resource, value.String()) - } - } - - if len(node.Status.Capacity) > 0 { - w.Write(LEVEL_0, "Capacity:\n") - printResourceList(node.Status.Capacity) - } - if len(node.Status.Allocatable) > 0 { - w.Write(LEVEL_0, "Allocatable:\n") - printResourceList(node.Status.Allocatable) - } - - w.Write(LEVEL_0, "System Info:\n") - w.Write(LEVEL_0, " Machine ID:\t%s\n", node.Status.NodeInfo.MachineID) - w.Write(LEVEL_0, " System UUID:\t%s\n", node.Status.NodeInfo.SystemUUID) - w.Write(LEVEL_0, " Boot ID:\t%s\n", node.Status.NodeInfo.BootID) - w.Write(LEVEL_0, " Kernel Version:\t%s\n", node.Status.NodeInfo.KernelVersion) - w.Write(LEVEL_0, " OS Image:\t%s\n", node.Status.NodeInfo.OSImage) - w.Write(LEVEL_0, " Operating System:\t%s\n", node.Status.NodeInfo.OperatingSystem) - w.Write(LEVEL_0, " Architecture:\t%s\n", node.Status.NodeInfo.Architecture) - w.Write(LEVEL_0, " Container Runtime Version:\t%s\n", node.Status.NodeInfo.ContainerRuntimeVersion) - w.Write(LEVEL_0, " Kubelet Version:\t%s\n", node.Status.NodeInfo.KubeletVersion) - w.Write(LEVEL_0, " Kube-Proxy Version:\t%s\n", node.Status.NodeInfo.KubeProxyVersion) - - if len(node.Spec.PodCIDR) > 0 { - w.Write(LEVEL_0, "PodCIDR:\t%s\n", node.Spec.PodCIDR) - } - if len(node.Spec.ProviderID) > 0 { - w.Write(LEVEL_0, "ProviderID:\t%s\n", node.Spec.ProviderID) - } - if canViewPods && nodeNonTerminatedPodsList != nil { - describeNodeResource(nodeNonTerminatedPodsList, node, w) - } else { - w.Write(LEVEL_0, "Pods:\tnot authorized\n") - } - if events != nil { - DescribeEvents(events, w) - } - return nil - }) -} - -type StatefulSetDescriber struct { - client clientset.Interface -} - -func (p *StatefulSetDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - ps, err := p.client.AppsV1().StatefulSets(namespace).Get(name, metav1.GetOptions{}) - if err != nil { - return "", err - } - pc := p.client.CoreV1().Pods(namespace) - - selector, err := metav1.LabelSelectorAsSelector(ps.Spec.Selector) - if err != nil { - return "", err - } - - running, waiting, succeeded, failed, err := getPodStatusForController(pc, selector, ps.UID) - if err != nil { - return "", err - } - - var events *corev1.EventList - if describerSettings.ShowEvents { - events, _ = p.client.CoreV1().Events(namespace).Search(scheme.Scheme, ps) - } - - return describeStatefulSet(ps, selector, events, running, waiting, succeeded, failed) -} - -func describeStatefulSet(ps *appsv1.StatefulSet, selector labels.Selector, events *corev1.EventList, running, waiting, succeeded, failed int) (string, error) { - return tabbedString(func(out io.Writer) error { - w := NewPrefixWriter(out) - w.Write(LEVEL_0, "Name:\t%s\n", ps.ObjectMeta.Name) - w.Write(LEVEL_0, "Namespace:\t%s\n", ps.ObjectMeta.Namespace) - w.Write(LEVEL_0, "CreationTimestamp:\t%s\n", ps.CreationTimestamp.Time.Format(time.RFC1123Z)) - w.Write(LEVEL_0, "Selector:\t%s\n", selector) - printLabelsMultiline(w, "Labels", ps.Labels) - printAnnotationsMultiline(w, "Annotations", ps.Annotations) - w.Write(LEVEL_0, "Replicas:\t%d desired | %d total\n", *ps.Spec.Replicas, ps.Status.Replicas) - w.Write(LEVEL_0, "Update Strategy:\t%s\n", ps.Spec.UpdateStrategy.Type) - if ps.Spec.UpdateStrategy.RollingUpdate != nil { - ru := ps.Spec.UpdateStrategy.RollingUpdate - if ru.Partition != nil { - w.Write(LEVEL_1, "Partition:\t%d\n", ru.Partition) - } - } - - w.Write(LEVEL_0, "Pods Status:\t%d Running / %d Waiting / %d Succeeded / %d Failed\n", running, waiting, succeeded, failed) - DescribePodTemplate(&ps.Spec.Template, w) - describeVolumeClaimTemplates(ps.Spec.VolumeClaimTemplates, w) - if events != nil { - DescribeEvents(events, w) - } - - return nil - }) -} - -type CertificateSigningRequestDescriber struct { - client clientset.Interface -} - -func (p *CertificateSigningRequestDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - csr, err := p.client.CertificatesV1beta1().CertificateSigningRequests().Get(name, metav1.GetOptions{}) - if err != nil { - return "", err - } - - cr, err := certificate.ParseCSR(csr) - if err != nil { - return "", fmt.Errorf("Error parsing CSR: %v", err) - } - status, err := extractCSRStatus(csr) - if err != nil { - return "", err - } - - var events *corev1.EventList - if describerSettings.ShowEvents { - events, _ = p.client.CoreV1().Events(namespace).Search(scheme.Scheme, csr) - } - - return describeCertificateSigningRequest(csr, cr, status, events) -} - -func describeCertificateSigningRequest(csr *certificatesv1beta1.CertificateSigningRequest, cr *x509.CertificateRequest, status string, events *corev1.EventList) (string, error) { - printListHelper := func(w PrefixWriter, prefix, name string, values []string) { - if len(values) == 0 { - return - } - w.Write(LEVEL_0, prefix+name+":\t") - w.Write(LEVEL_0, strings.Join(values, "\n"+prefix+"\t")) - w.Write(LEVEL_0, "\n") - } - - return tabbedString(func(out io.Writer) error { - w := NewPrefixWriter(out) - w.Write(LEVEL_0, "Name:\t%s\n", csr.Name) - w.Write(LEVEL_0, "Labels:\t%s\n", labels.FormatLabels(csr.Labels)) - w.Write(LEVEL_0, "Annotations:\t%s\n", labels.FormatLabels(csr.Annotations)) - w.Write(LEVEL_0, "CreationTimestamp:\t%s\n", csr.CreationTimestamp.Time.Format(time.RFC1123Z)) - w.Write(LEVEL_0, "Requesting User:\t%s\n", csr.Spec.Username) - w.Write(LEVEL_0, "Status:\t%s\n", status) - - w.Write(LEVEL_0, "Subject:\n") - w.Write(LEVEL_0, "\tCommon Name:\t%s\n", cr.Subject.CommonName) - w.Write(LEVEL_0, "\tSerial Number:\t%s\n", cr.Subject.SerialNumber) - printListHelper(w, "\t", "Organization", cr.Subject.Organization) - printListHelper(w, "\t", "Organizational Unit", cr.Subject.OrganizationalUnit) - printListHelper(w, "\t", "Country", cr.Subject.Country) - printListHelper(w, "\t", "Locality", cr.Subject.Locality) - printListHelper(w, "\t", "Province", cr.Subject.Province) - printListHelper(w, "\t", "StreetAddress", cr.Subject.StreetAddress) - printListHelper(w, "\t", "PostalCode", cr.Subject.PostalCode) - - if len(cr.DNSNames)+len(cr.EmailAddresses)+len(cr.IPAddresses) > 0 { - w.Write(LEVEL_0, "Subject Alternative Names:\n") - printListHelper(w, "\t", "DNS Names", cr.DNSNames) - printListHelper(w, "\t", "Email Addresses", cr.EmailAddresses) - var ipaddrs []string - for _, ipaddr := range cr.IPAddresses { - ipaddrs = append(ipaddrs, ipaddr.String()) - } - printListHelper(w, "\t", "IP Addresses", ipaddrs) - } - - if events != nil { - DescribeEvents(events, w) - } - - return nil - }) -} - -// HorizontalPodAutoscalerDescriber generates information about a horizontal pod autoscaler. -type HorizontalPodAutoscalerDescriber struct { - client clientset.Interface -} - -func (d *HorizontalPodAutoscalerDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - var events *corev1.EventList - - // autoscaling/v2beta2 is introduced since v1.12 and autoscaling/v1 does not have full backward compatibility - // with autoscaling/v2beta2, so describer will try to get and describe hpa v2beta2 object firstly, if it fails, - // describer will fall back to do with hpa v1 object - hpaV2beta2, err := d.client.AutoscalingV2beta2().HorizontalPodAutoscalers(namespace).Get(name, metav1.GetOptions{}) - if err == nil { - if describerSettings.ShowEvents { - events, _ = d.client.CoreV1().Events(namespace).Search(scheme.Scheme, hpaV2beta2) - } - return describeHorizontalPodAutoscalerV2beta2(hpaV2beta2, events, d) - } - - hpaV1, err := d.client.AutoscalingV1().HorizontalPodAutoscalers(namespace).Get(name, metav1.GetOptions{}) - if err == nil { - if describerSettings.ShowEvents { - events, _ = d.client.CoreV1().Events(namespace).Search(scheme.Scheme, hpaV1) - } - return describeHorizontalPodAutoscalerV1(hpaV1, events, d) - } - - return "", err -} - -func describeHorizontalPodAutoscalerV2beta2(hpa *autoscalingv2beta2.HorizontalPodAutoscaler, events *corev1.EventList, d *HorizontalPodAutoscalerDescriber) (string, error) { - return tabbedString(func(out io.Writer) error { - w := NewPrefixWriter(out) - w.Write(LEVEL_0, "Name:\t%s\n", hpa.Name) - w.Write(LEVEL_0, "Namespace:\t%s\n", hpa.Namespace) - printLabelsMultiline(w, "Labels", hpa.Labels) - printAnnotationsMultiline(w, "Annotations", hpa.Annotations) - w.Write(LEVEL_0, "CreationTimestamp:\t%s\n", hpa.CreationTimestamp.Time.Format(time.RFC1123Z)) - w.Write(LEVEL_0, "Reference:\t%s/%s\n", - hpa.Spec.ScaleTargetRef.Kind, - hpa.Spec.ScaleTargetRef.Name) - w.Write(LEVEL_0, "Metrics:\t( current / target )\n") - for i, metric := range hpa.Spec.Metrics { - switch metric.Type { - case autoscalingv2beta2.ExternalMetricSourceType: - if metric.External.Target.AverageValue != nil { - current := "" - if len(hpa.Status.CurrentMetrics) > i && hpa.Status.CurrentMetrics[i].External != nil && - &hpa.Status.CurrentMetrics[i].External.Current.AverageValue != nil { - current = hpa.Status.CurrentMetrics[i].External.Current.AverageValue.String() - } - w.Write(LEVEL_1, "%q (target average value):\t%s / %s\n", metric.External.Metric.Name, current, metric.External.Target.AverageValue.String()) - } else { - current := "" - if len(hpa.Status.CurrentMetrics) > i && hpa.Status.CurrentMetrics[i].External != nil { - current = hpa.Status.CurrentMetrics[i].External.Current.Value.String() - } - w.Write(LEVEL_1, "%q (target value):\t%s / %s\n", metric.External.Metric.Name, current, metric.External.Target.Value.String()) - - } - case autoscalingv2beta2.PodsMetricSourceType: - current := "" - if len(hpa.Status.CurrentMetrics) > i && hpa.Status.CurrentMetrics[i].Pods != nil { - current = hpa.Status.CurrentMetrics[i].Pods.Current.AverageValue.String() - } - w.Write(LEVEL_1, "%q on pods:\t%s / %s\n", metric.Pods.Metric.Name, current, metric.Pods.Target.AverageValue.String()) - case autoscalingv2beta2.ObjectMetricSourceType: - w.Write(LEVEL_1, "\"%s\" on %s/%s ", metric.Object.Metric.Name, metric.Object.DescribedObject.Kind, metric.Object.DescribedObject.Name) - if metric.Object.Target.Type == autoscalingv2beta2.AverageValueMetricType { - current := "" - if len(hpa.Status.CurrentMetrics) > i && hpa.Status.CurrentMetrics[i].Object != nil { - current = hpa.Status.CurrentMetrics[i].Object.Current.AverageValue.String() - } - w.Write(LEVEL_0, "(target average value):\t%s / %s\n", current, metric.Object.Target.AverageValue.String()) - } else { - current := "" - if len(hpa.Status.CurrentMetrics) > i && hpa.Status.CurrentMetrics[i].Object != nil { - current = hpa.Status.CurrentMetrics[i].Object.Current.Value.String() - } - w.Write(LEVEL_0, "(target value):\t%s / %s\n", current, metric.Object.Target.Value.String()) - } - case autoscalingv2beta2.ResourceMetricSourceType: - w.Write(LEVEL_1, "resource %s on pods", string(metric.Resource.Name)) - if metric.Resource.Target.AverageValue != nil { - current := "" - if len(hpa.Status.CurrentMetrics) > i && hpa.Status.CurrentMetrics[i].Resource != nil { - current = hpa.Status.CurrentMetrics[i].Resource.Current.AverageValue.String() - } - w.Write(LEVEL_0, ":\t%s / %s\n", current, metric.Resource.Target.AverageValue.String()) - } else { - current := "" - if len(hpa.Status.CurrentMetrics) > i && hpa.Status.CurrentMetrics[i].Resource != nil && hpa.Status.CurrentMetrics[i].Resource.Current.AverageUtilization != nil { - current = fmt.Sprintf("%d%% (%s)", *hpa.Status.CurrentMetrics[i].Resource.Current.AverageUtilization, hpa.Status.CurrentMetrics[i].Resource.Current.AverageValue.String()) - } - - target := "" - if metric.Resource.Target.AverageUtilization != nil { - target = fmt.Sprintf("%d%%", *metric.Resource.Target.AverageUtilization) - } - w.Write(LEVEL_1, "(as a percentage of request):\t%s / %s\n", current, target) - } - default: - w.Write(LEVEL_1, "", string(metric.Type)) - } - } - minReplicas := "" - if hpa.Spec.MinReplicas != nil { - minReplicas = fmt.Sprintf("%d", *hpa.Spec.MinReplicas) - } - w.Write(LEVEL_0, "Min replicas:\t%s\n", minReplicas) - w.Write(LEVEL_0, "Max replicas:\t%d\n", hpa.Spec.MaxReplicas) - w.Write(LEVEL_0, "%s pods:\t", hpa.Spec.ScaleTargetRef.Kind) - w.Write(LEVEL_0, "%d current / %d desired\n", hpa.Status.CurrentReplicas, hpa.Status.DesiredReplicas) - - if len(hpa.Status.Conditions) > 0 { - w.Write(LEVEL_0, "Conditions:\n") - w.Write(LEVEL_1, "Type\tStatus\tReason\tMessage\n") - w.Write(LEVEL_1, "----\t------\t------\t-------\n") - for _, c := range hpa.Status.Conditions { - w.Write(LEVEL_1, "%v\t%v\t%v\t%v\n", c.Type, c.Status, c.Reason, c.Message) - } - } - - if events != nil { - DescribeEvents(events, w) - } - - return nil - }) -} - -func describeHorizontalPodAutoscalerV1(hpa *autoscalingv1.HorizontalPodAutoscaler, events *corev1.EventList, d *HorizontalPodAutoscalerDescriber) (string, error) { - return tabbedString(func(out io.Writer) error { - w := NewPrefixWriter(out) - w.Write(LEVEL_0, "Name:\t%s\n", hpa.Name) - w.Write(LEVEL_0, "Namespace:\t%s\n", hpa.Namespace) - printLabelsMultiline(w, "Labels", hpa.Labels) - printAnnotationsMultiline(w, "Annotations", hpa.Annotations) - w.Write(LEVEL_0, "CreationTimestamp:\t%s\n", hpa.CreationTimestamp.Time.Format(time.RFC1123Z)) - w.Write(LEVEL_0, "Reference:\t%s/%s\n", - hpa.Spec.ScaleTargetRef.Kind, - hpa.Spec.ScaleTargetRef.Name) - - if hpa.Spec.TargetCPUUtilizationPercentage != nil { - w.Write(LEVEL_0, "Target CPU utilization:\t%d%%\n", *hpa.Spec.TargetCPUUtilizationPercentage) - current := "" - if hpa.Status.CurrentCPUUtilizationPercentage != nil { - current = fmt.Sprintf("%d", *hpa.Status.CurrentCPUUtilizationPercentage) - } - w.Write(LEVEL_0, "Current CPU utilization:\t%s%%\n", current) - } - - minReplicas := "" - if hpa.Spec.MinReplicas != nil { - minReplicas = fmt.Sprintf("%d", *hpa.Spec.MinReplicas) - } - w.Write(LEVEL_0, "Min replicas:\t%s\n", minReplicas) - w.Write(LEVEL_0, "Max replicas:\t%d\n", hpa.Spec.MaxReplicas) - w.Write(LEVEL_0, "%s pods:\t", hpa.Spec.ScaleTargetRef.Kind) - w.Write(LEVEL_0, "%d current / %d desired\n", hpa.Status.CurrentReplicas, hpa.Status.DesiredReplicas) - - if events != nil { - DescribeEvents(events, w) - } - - return nil - }) -} - -func describeNodeResource(nodeNonTerminatedPodsList *corev1.PodList, node *corev1.Node, w PrefixWriter) { - w.Write(LEVEL_0, "Non-terminated Pods:\t(%d in total)\n", len(nodeNonTerminatedPodsList.Items)) - w.Write(LEVEL_1, "Namespace\tName\t\tCPU Requests\tCPU Limits\tMemory Requests\tMemory Limits\tAGE\n") - w.Write(LEVEL_1, "---------\t----\t\t------------\t----------\t---------------\t-------------\t---\n") - allocatable := node.Status.Capacity - if len(node.Status.Allocatable) > 0 { - allocatable = node.Status.Allocatable - } - - for _, pod := range nodeNonTerminatedPodsList.Items { - req, limit := resourcehelper.PodRequestsAndLimits(&pod) - cpuReq, cpuLimit, memoryReq, memoryLimit := req[corev1.ResourceCPU], limit[corev1.ResourceCPU], req[corev1.ResourceMemory], limit[corev1.ResourceMemory] - fractionCpuReq := float64(cpuReq.MilliValue()) / float64(allocatable.Cpu().MilliValue()) * 100 - fractionCpuLimit := float64(cpuLimit.MilliValue()) / float64(allocatable.Cpu().MilliValue()) * 100 - fractionMemoryReq := float64(memoryReq.Value()) / float64(allocatable.Memory().Value()) * 100 - fractionMemoryLimit := float64(memoryLimit.Value()) / float64(allocatable.Memory().Value()) * 100 - w.Write(LEVEL_1, "%s\t%s\t\t%s (%d%%)\t%s (%d%%)\t%s (%d%%)\t%s (%d%%)\t%s\n", pod.Namespace, pod.Name, - cpuReq.String(), int64(fractionCpuReq), cpuLimit.String(), int64(fractionCpuLimit), - memoryReq.String(), int64(fractionMemoryReq), memoryLimit.String(), int64(fractionMemoryLimit), translateTimestampSince(pod.CreationTimestamp)) - } - - w.Write(LEVEL_0, "Allocated resources:\n (Total limits may be over 100 percent, i.e., overcommitted.)\n") - w.Write(LEVEL_1, "Resource\tRequests\tLimits\n") - w.Write(LEVEL_1, "--------\t--------\t------\n") - reqs, limits := getPodsTotalRequestsAndLimits(nodeNonTerminatedPodsList) - cpuReqs, cpuLimits, memoryReqs, memoryLimits, ephemeralstorageReqs, ephemeralstorageLimits := - reqs[corev1.ResourceCPU], limits[corev1.ResourceCPU], reqs[corev1.ResourceMemory], limits[corev1.ResourceMemory], reqs[corev1.ResourceEphemeralStorage], limits[corev1.ResourceEphemeralStorage] - fractionCpuReqs := float64(0) - fractionCpuLimits := float64(0) - if allocatable.Cpu().MilliValue() != 0 { - fractionCpuReqs = float64(cpuReqs.MilliValue()) / float64(allocatable.Cpu().MilliValue()) * 100 - fractionCpuLimits = float64(cpuLimits.MilliValue()) / float64(allocatable.Cpu().MilliValue()) * 100 - } - fractionMemoryReqs := float64(0) - fractionMemoryLimits := float64(0) - if allocatable.Memory().Value() != 0 { - fractionMemoryReqs = float64(memoryReqs.Value()) / float64(allocatable.Memory().Value()) * 100 - fractionMemoryLimits = float64(memoryLimits.Value()) / float64(allocatable.Memory().Value()) * 100 - } - fractionEphemeralStorageReqs := float64(0) - fractionEphemeralStorageLimits := float64(0) - if allocatable.StorageEphemeral().Value() != 0 { - fractionEphemeralStorageReqs = float64(ephemeralstorageReqs.Value()) / float64(allocatable.StorageEphemeral().Value()) * 100 - fractionEphemeralStorageLimits = float64(ephemeralstorageLimits.Value()) / float64(allocatable.StorageEphemeral().Value()) * 100 - } - w.Write(LEVEL_1, "%s\t%s (%d%%)\t%s (%d%%)\n", - corev1.ResourceCPU, cpuReqs.String(), int64(fractionCpuReqs), cpuLimits.String(), int64(fractionCpuLimits)) - w.Write(LEVEL_1, "%s\t%s (%d%%)\t%s (%d%%)\n", - corev1.ResourceMemory, memoryReqs.String(), int64(fractionMemoryReqs), memoryLimits.String(), int64(fractionMemoryLimits)) - w.Write(LEVEL_1, "%s\t%s (%d%%)\t%s (%d%%)\n", - corev1.ResourceEphemeralStorage, ephemeralstorageReqs.String(), int64(fractionEphemeralStorageReqs), ephemeralstorageLimits.String(), int64(fractionEphemeralStorageLimits)) - extResources := make([]string, 0, len(allocatable)) - for resource := range allocatable { - if !resourcehelper.IsStandardContainerResourceName(string(resource)) && resource != corev1.ResourcePods { - extResources = append(extResources, string(resource)) - } - } - sort.Strings(extResources) - for _, ext := range extResources { - extRequests, extLimits := reqs[corev1.ResourceName(ext)], limits[corev1.ResourceName(ext)] - w.Write(LEVEL_1, "%s\t%s\t%s\n", ext, extRequests.String(), extLimits.String()) - } -} - -func getPodsTotalRequestsAndLimits(podList *corev1.PodList) (reqs map[corev1.ResourceName]resource.Quantity, limits map[corev1.ResourceName]resource.Quantity) { - reqs, limits = map[corev1.ResourceName]resource.Quantity{}, map[corev1.ResourceName]resource.Quantity{} - for _, pod := range podList.Items { - podReqs, podLimits := resourcehelper.PodRequestsAndLimits(&pod) - for podReqName, podReqValue := range podReqs { - if value, ok := reqs[podReqName]; !ok { - reqs[podReqName] = *podReqValue.Copy() - } else { - value.Add(podReqValue) - reqs[podReqName] = value - } - } - for podLimitName, podLimitValue := range podLimits { - if value, ok := limits[podLimitName]; !ok { - limits[podLimitName] = *podLimitValue.Copy() - } else { - value.Add(podLimitValue) - limits[podLimitName] = value - } - } - } - return -} - -func DescribeEvents(el *corev1.EventList, w PrefixWriter) { - if len(el.Items) == 0 { - w.Write(LEVEL_0, "Events:\t\n") - return - } - w.Flush() - sort.Sort(event.SortableEvents(el.Items)) - w.Write(LEVEL_0, "Events:\n Type\tReason\tAge\tFrom\tMessage\n") - w.Write(LEVEL_1, "----\t------\t----\t----\t-------\n") - for _, e := range el.Items { - var interval string - if e.Count > 1 { - interval = fmt.Sprintf("%s (x%d over %s)", translateTimestampSince(e.LastTimestamp), e.Count, translateTimestampSince(e.FirstTimestamp)) - } else { - interval = translateTimestampSince(e.FirstTimestamp) - } - w.Write(LEVEL_1, "%v\t%v\t%s\t%v\t%v\n", - e.Type, - e.Reason, - interval, - formatEventSource(e.Source), - strings.TrimSpace(e.Message), - ) - } -} - -// DeploymentDescriber generates information about a deployment. -type DeploymentDescriber struct { - client clientset.Interface -} - -func (dd *DeploymentDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - d, err := dd.client.AppsV1().Deployments(namespace).Get(name, metav1.GetOptions{}) - if err != nil { - return "", err - } - selector, err := metav1.LabelSelectorAsSelector(d.Spec.Selector) - if err != nil { - return "", err - } - var events *corev1.EventList - if describerSettings.ShowEvents { - events, _ = dd.client.CoreV1().Events(namespace).Search(scheme.Scheme, d) - } - - return describeDeployment(d, selector, d, events, dd) -} - -func describeDeployment(d *appsv1.Deployment, selector labels.Selector, internalDeployment *appsv1.Deployment, events *corev1.EventList, dd *DeploymentDescriber) (string, error) { - return tabbedString(func(out io.Writer) error { - w := NewPrefixWriter(out) - w.Write(LEVEL_0, "Name:\t%s\n", d.ObjectMeta.Name) - w.Write(LEVEL_0, "Namespace:\t%s\n", d.ObjectMeta.Namespace) - w.Write(LEVEL_0, "CreationTimestamp:\t%s\n", d.CreationTimestamp.Time.Format(time.RFC1123Z)) - printLabelsMultiline(w, "Labels", d.Labels) - printAnnotationsMultiline(w, "Annotations", d.Annotations) - w.Write(LEVEL_0, "Selector:\t%s\n", selector) - w.Write(LEVEL_0, "Replicas:\t%d desired | %d updated | %d total | %d available | %d unavailable\n", *(d.Spec.Replicas), d.Status.UpdatedReplicas, d.Status.Replicas, d.Status.AvailableReplicas, d.Status.UnavailableReplicas) - w.Write(LEVEL_0, "StrategyType:\t%s\n", d.Spec.Strategy.Type) - w.Write(LEVEL_0, "MinReadySeconds:\t%d\n", d.Spec.MinReadySeconds) - if d.Spec.Strategy.RollingUpdate != nil { - ru := d.Spec.Strategy.RollingUpdate - w.Write(LEVEL_0, "RollingUpdateStrategy:\t%s max unavailable, %s max surge\n", ru.MaxUnavailable.String(), ru.MaxSurge.String()) - } - DescribePodTemplate(&internalDeployment.Spec.Template, w) - if len(d.Status.Conditions) > 0 { - w.Write(LEVEL_0, "Conditions:\n Type\tStatus\tReason\n") - w.Write(LEVEL_1, "----\t------\t------\n") - for _, c := range d.Status.Conditions { - w.Write(LEVEL_1, "%v \t%v\t%v\n", c.Type, c.Status, c.Reason) - } - } - oldRSs, _, newRS, err := deploymentutil.GetAllReplicaSets(d, dd.client.AppsV1()) - if err == nil { - w.Write(LEVEL_0, "OldReplicaSets:\t%s\n", printReplicaSetsByLabels(oldRSs)) - var newRSs []*appsv1.ReplicaSet - if newRS != nil { - newRSs = append(newRSs, newRS) - } - w.Write(LEVEL_0, "NewReplicaSet:\t%s\n", printReplicaSetsByLabels(newRSs)) - } - if events != nil { - DescribeEvents(events, w) - } - - return nil - }) -} - -func printReplicaSetsByLabels(matchingRSs []*appsv1.ReplicaSet) string { - // Format the matching ReplicaSets into strings. - rsStrings := make([]string, 0, len(matchingRSs)) - for _, rs := range matchingRSs { - rsStrings = append(rsStrings, fmt.Sprintf("%s (%d/%d replicas created)", rs.Name, rs.Status.Replicas, *rs.Spec.Replicas)) - } - - list := strings.Join(rsStrings, ", ") - if list == "" { - return "" - } - return list -} - -func getPodStatusForController(c corev1client.PodInterface, selector labels.Selector, uid types.UID) (running, waiting, succeeded, failed int, err error) { - options := metav1.ListOptions{LabelSelector: selector.String()} - rcPods, err := c.List(options) - if err != nil { - return - } - for _, pod := range rcPods.Items { - controllerRef := metav1.GetControllerOf(&pod) - // Skip pods that are orphans or owned by other controllers. - if controllerRef == nil || controllerRef.UID != uid { - continue - } - switch pod.Status.Phase { - case corev1.PodRunning: - running++ - case corev1.PodPending: - waiting++ - case corev1.PodSucceeded: - succeeded++ - case corev1.PodFailed: - failed++ - } - } - return -} - -// ConfigMapDescriber generates information about a ConfigMap -type ConfigMapDescriber struct { - clientset.Interface -} - -func (d *ConfigMapDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - c := d.CoreV1().ConfigMaps(namespace) - - configMap, err := c.Get(name, metav1.GetOptions{}) - if err != nil { - return "", err - } - - return tabbedString(func(out io.Writer) error { - w := NewPrefixWriter(out) - w.Write(LEVEL_0, "Name:\t%s\n", configMap.Name) - w.Write(LEVEL_0, "Namespace:\t%s\n", configMap.Namespace) - printLabelsMultiline(w, "Labels", configMap.Labels) - printAnnotationsMultiline(w, "Annotations", configMap.Annotations) - - w.Write(LEVEL_0, "\nData\n====\n") - for k, v := range configMap.Data { - w.Write(LEVEL_0, "%s:\n----\n", k) - w.Write(LEVEL_0, "%s\n", string(v)) - } - if describerSettings.ShowEvents { - events, err := d.CoreV1().Events(namespace).Search(scheme.Scheme, configMap) - if err != nil { - return err - } - if events != nil { - DescribeEvents(events, w) - } - } - return nil - }) -} - -// NetworkPolicyDescriber generates information about a networkingv1.NetworkPolicy -type NetworkPolicyDescriber struct { - clientset.Interface -} - -func (d *NetworkPolicyDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - c := d.NetworkingV1().NetworkPolicies(namespace) - - networkPolicy, err := c.Get(name, metav1.GetOptions{}) - if err != nil { - return "", err - } - - return describeNetworkPolicy(networkPolicy) -} - -func describeNetworkPolicy(networkPolicy *networkingv1.NetworkPolicy) (string, error) { - return tabbedString(func(out io.Writer) error { - w := NewPrefixWriter(out) - w.Write(LEVEL_0, "Name:\t%s\n", networkPolicy.Name) - w.Write(LEVEL_0, "Namespace:\t%s\n", networkPolicy.Namespace) - w.Write(LEVEL_0, "Created on:\t%s\n", networkPolicy.CreationTimestamp) - printLabelsMultiline(w, "Labels", networkPolicy.Labels) - printAnnotationsMultiline(w, "Annotations", networkPolicy.Annotations) - describeNetworkPolicySpec(networkPolicy.Spec, w) - return nil - }) -} - -func describeNetworkPolicySpec(nps networkingv1.NetworkPolicySpec, w PrefixWriter) { - w.Write(LEVEL_0, "Spec:\n") - w.Write(LEVEL_1, "PodSelector: ") - if len(nps.PodSelector.MatchLabels) == 0 && len(nps.PodSelector.MatchExpressions) == 0 { - w.Write(LEVEL_2, " (Allowing the specific traffic to all pods in this namespace)\n") - } else { - w.Write(LEVEL_2, "%s\n", metav1.FormatLabelSelector(&nps.PodSelector)) - } - w.Write(LEVEL_1, "Allowing ingress traffic:\n") - printNetworkPolicySpecIngressFrom(nps.Ingress, " ", w) - w.Write(LEVEL_1, "Allowing egress traffic:\n") - printNetworkPolicySpecEgressTo(nps.Egress, " ", w) - w.Write(LEVEL_1, "Policy Types: %v\n", policyTypesToString(nps.PolicyTypes)) -} - -func printNetworkPolicySpecIngressFrom(npirs []networkingv1.NetworkPolicyIngressRule, initialIndent string, w PrefixWriter) { - if len(npirs) == 0 { - w.Write(LEVEL_0, "%s%s\n", initialIndent, " (Selected pods are isolated for ingress connectivity)") - return - } - for i, npir := range npirs { - if len(npir.Ports) == 0 { - w.Write(LEVEL_0, "%s%s\n", initialIndent, "To Port: (traffic allowed to all ports)") - } else { - for _, port := range npir.Ports { - var proto corev1.Protocol - if port.Protocol != nil { - proto = *port.Protocol - } else { - proto = corev1.ProtocolTCP - } - w.Write(LEVEL_0, "%s%s: %s/%s\n", initialIndent, "To Port", port.Port, proto) - } - } - if len(npir.From) == 0 { - w.Write(LEVEL_0, "%s%s\n", initialIndent, "From: (traffic not restricted by source)") - } else { - for _, from := range npir.From { - w.Write(LEVEL_0, "%s%s\n", initialIndent, "From:") - if from.PodSelector != nil && from.NamespaceSelector != nil { - w.Write(LEVEL_1, "%s%s: %s\n", initialIndent, "NamespaceSelector", metav1.FormatLabelSelector(from.NamespaceSelector)) - w.Write(LEVEL_1, "%s%s: %s\n", initialIndent, "PodSelector", metav1.FormatLabelSelector(from.PodSelector)) - } else if from.PodSelector != nil { - w.Write(LEVEL_1, "%s%s: %s\n", initialIndent, "PodSelector", metav1.FormatLabelSelector(from.PodSelector)) - } else if from.NamespaceSelector != nil { - w.Write(LEVEL_1, "%s%s: %s\n", initialIndent, "NamespaceSelector", metav1.FormatLabelSelector(from.NamespaceSelector)) - } else if from.IPBlock != nil { - w.Write(LEVEL_1, "%sIPBlock:\n", initialIndent) - w.Write(LEVEL_2, "%sCIDR: %s\n", initialIndent, from.IPBlock.CIDR) - w.Write(LEVEL_2, "%sExcept: %v\n", initialIndent, strings.Join(from.IPBlock.Except, ", ")) - } - } - } - if i != len(npirs)-1 { - w.Write(LEVEL_0, "%s%s\n", initialIndent, "----------") - } - } -} - -func printNetworkPolicySpecEgressTo(npers []networkingv1.NetworkPolicyEgressRule, initialIndent string, w PrefixWriter) { - if len(npers) == 0 { - w.Write(LEVEL_0, "%s%s\n", initialIndent, " (Selected pods are isolated for egress connectivity)") - return - } - for i, nper := range npers { - if len(nper.Ports) == 0 { - w.Write(LEVEL_0, "%s%s\n", initialIndent, "To Port: (traffic allowed to all ports)") - } else { - for _, port := range nper.Ports { - var proto corev1.Protocol - if port.Protocol != nil { - proto = *port.Protocol - } else { - proto = corev1.ProtocolTCP - } - w.Write(LEVEL_0, "%s%s: %s/%s\n", initialIndent, "To Port", port.Port, proto) - } - } - if len(nper.To) == 0 { - w.Write(LEVEL_0, "%s%s\n", initialIndent, "To: (traffic not restricted by source)") - } else { - for _, to := range nper.To { - w.Write(LEVEL_0, "%s%s\n", initialIndent, "To:") - if to.PodSelector != nil && to.NamespaceSelector != nil { - w.Write(LEVEL_1, "%s%s: %s\n", initialIndent, "NamespaceSelector", metav1.FormatLabelSelector(to.NamespaceSelector)) - w.Write(LEVEL_1, "%s%s: %s\n", initialIndent, "PodSelector", metav1.FormatLabelSelector(to.PodSelector)) - } else if to.PodSelector != nil { - w.Write(LEVEL_1, "%s%s: %s\n", initialIndent, "PodSelector", metav1.FormatLabelSelector(to.PodSelector)) - } else if to.NamespaceSelector != nil { - w.Write(LEVEL_1, "%s%s: %s\n", initialIndent, "NamespaceSelector", metav1.FormatLabelSelector(to.NamespaceSelector)) - } else if to.IPBlock != nil { - w.Write(LEVEL_1, "%sIPBlock:\n", initialIndent) - w.Write(LEVEL_2, "%sCIDR: %s\n", initialIndent, to.IPBlock.CIDR) - w.Write(LEVEL_2, "%sExcept: %v\n", initialIndent, strings.Join(to.IPBlock.Except, ", ")) - } - } - } - if i != len(npers)-1 { - w.Write(LEVEL_0, "%s%s\n", initialIndent, "----------") - } - } -} - -type StorageClassDescriber struct { - clientset.Interface -} - -func (s *StorageClassDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - sc, err := s.StorageV1().StorageClasses().Get(name, metav1.GetOptions{}) - if err != nil { - return "", err - } - - var events *corev1.EventList - if describerSettings.ShowEvents { - events, _ = s.CoreV1().Events(namespace).Search(scheme.Scheme, sc) - } - - return describeStorageClass(sc, events) -} - -func describeStorageClass(sc *storagev1.StorageClass, events *corev1.EventList) (string, error) { - return tabbedString(func(out io.Writer) error { - w := NewPrefixWriter(out) - w.Write(LEVEL_0, "Name:\t%s\n", sc.Name) - w.Write(LEVEL_0, "IsDefaultClass:\t%s\n", storageutil.IsDefaultAnnotationText(sc.ObjectMeta)) - w.Write(LEVEL_0, "Annotations:\t%s\n", labels.FormatLabels(sc.Annotations)) - w.Write(LEVEL_0, "Provisioner:\t%s\n", sc.Provisioner) - w.Write(LEVEL_0, "Parameters:\t%s\n", labels.FormatLabels(sc.Parameters)) - w.Write(LEVEL_0, "AllowVolumeExpansion:\t%s\n", printBoolPtr(sc.AllowVolumeExpansion)) - if len(sc.MountOptions) == 0 { - w.Write(LEVEL_0, "MountOptions:\t\n") - } else { - w.Write(LEVEL_0, "MountOptions:\n") - for _, option := range sc.MountOptions { - w.Write(LEVEL_1, "%s\n", option) - } - } - if sc.ReclaimPolicy != nil { - w.Write(LEVEL_0, "ReclaimPolicy:\t%s\n", *sc.ReclaimPolicy) - } - if sc.VolumeBindingMode != nil { - w.Write(LEVEL_0, "VolumeBindingMode:\t%s\n", *sc.VolumeBindingMode) - } - if sc.AllowedTopologies != nil { - printAllowedTopologies(w, sc.AllowedTopologies) - } - if events != nil { - DescribeEvents(events, w) - } - - return nil - }) -} - -func printAllowedTopologies(w PrefixWriter, topologies []corev1.TopologySelectorTerm) { - w.Write(LEVEL_0, "AllowedTopologies:\t") - if len(topologies) == 0 { - w.WriteLine("") - return - } - w.WriteLine("") - for i, term := range topologies { - printTopologySelectorTermsMultilineWithIndent(w, LEVEL_1, fmt.Sprintf("Term %d", i), "\t", term.MatchLabelExpressions) - } -} - -func printTopologySelectorTermsMultilineWithIndent(w PrefixWriter, indentLevel int, title, innerIndent string, reqs []corev1.TopologySelectorLabelRequirement) { - w.Write(indentLevel, "%s:%s", title, innerIndent) - - if len(reqs) == 0 { - w.WriteLine("") - return - } - - for i, req := range reqs { - if i != 0 { - w.Write(indentLevel, "%s", innerIndent) - } - exprStr := fmt.Sprintf("%s %s", req.Key, "in") - if len(req.Values) > 0 { - exprStr = fmt.Sprintf("%s [%s]", exprStr, strings.Join(req.Values, ", ")) - } - w.Write(LEVEL_0, "%s\n", exprStr) - } -} - -type PodDisruptionBudgetDescriber struct { - clientset.Interface -} - -func (p *PodDisruptionBudgetDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - pdb, err := p.PolicyV1beta1().PodDisruptionBudgets(namespace).Get(name, metav1.GetOptions{}) - if err != nil { - return "", err - } - - var events *corev1.EventList - if describerSettings.ShowEvents { - events, _ = p.CoreV1().Events(namespace).Search(scheme.Scheme, pdb) - } - - return describePodDisruptionBudget(pdb, events) -} - -func describePodDisruptionBudget(pdb *policyv1beta1.PodDisruptionBudget, events *corev1.EventList) (string, error) { - return tabbedString(func(out io.Writer) error { - w := NewPrefixWriter(out) - w.Write(LEVEL_0, "Name:\t%s\n", pdb.Name) - w.Write(LEVEL_0, "Namespace:\t%s\n", pdb.Namespace) - - if pdb.Spec.MinAvailable != nil { - w.Write(LEVEL_0, "Min available:\t%s\n", pdb.Spec.MinAvailable.String()) - } else if pdb.Spec.MaxUnavailable != nil { - w.Write(LEVEL_0, "Max unavailable:\t%s\n", pdb.Spec.MaxUnavailable.String()) - } - - if pdb.Spec.Selector != nil { - w.Write(LEVEL_0, "Selector:\t%s\n", metav1.FormatLabelSelector(pdb.Spec.Selector)) - } else { - w.Write(LEVEL_0, "Selector:\t\n") - } - w.Write(LEVEL_0, "Status:\n") - w.Write(LEVEL_2, "Allowed disruptions:\t%d\n", pdb.Status.PodDisruptionsAllowed) - w.Write(LEVEL_2, "Current:\t%d\n", pdb.Status.CurrentHealthy) - w.Write(LEVEL_2, "Desired:\t%d\n", pdb.Status.DesiredHealthy) - w.Write(LEVEL_2, "Total:\t%d\n", pdb.Status.ExpectedPods) - if events != nil { - DescribeEvents(events, w) - } - - return nil - }) -} - -// PriorityClassDescriber generates information about a PriorityClass. -type PriorityClassDescriber struct { - clientset.Interface -} - -func (s *PriorityClassDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - pc, err := s.SchedulingV1().PriorityClasses().Get(name, metav1.GetOptions{}) - if err != nil { - return "", err - } - - var events *corev1.EventList - if describerSettings.ShowEvents { - events, _ = s.CoreV1().Events(namespace).Search(scheme.Scheme, pc) - } - - return describePriorityClass(pc, events) -} - -func describePriorityClass(pc *schedulingv1.PriorityClass, events *corev1.EventList) (string, error) { - return tabbedString(func(out io.Writer) error { - w := NewPrefixWriter(out) - w.Write(LEVEL_0, "Name:\t%s\n", pc.Name) - w.Write(LEVEL_0, "Value:\t%v\n", pc.Value) - w.Write(LEVEL_0, "GlobalDefault:\t%v\n", pc.GlobalDefault) - w.Write(LEVEL_0, "Description:\t%s\n", pc.Description) - - w.Write(LEVEL_0, "Annotations:\t%s\n", labels.FormatLabels(pc.Annotations)) - if events != nil { - DescribeEvents(events, w) - } - - return nil - }) -} - -// PodSecurityPolicyDescriber generates information about a PodSecuritypolicyv1beta1. -type PodSecurityPolicyDescriber struct { - clientset.Interface -} - -func (d *PodSecurityPolicyDescriber) Describe(namespace, name string, describerSettings describe.DescriberSettings) (string, error) { - psp, err := d.PolicyV1beta1().PodSecurityPolicies().Get(name, metav1.GetOptions{}) - if err != nil { - return "", err - } - - return describePodSecurityPolicy(psp) -} - -func describePodSecurityPolicy(psp *policyv1beta1.PodSecurityPolicy) (string, error) { - return tabbedString(func(out io.Writer) error { - w := NewPrefixWriter(out) - w.Write(LEVEL_0, "Name:\t%s\n", psp.Name) - - w.Write(LEVEL_0, "\nSettings:\n") - - w.Write(LEVEL_1, "Allow Privileged:\t%t\n", psp.Spec.Privileged) - w.Write(LEVEL_1, "Allow Privilege Escalation:\t%v\n", psp.Spec.AllowPrivilegeEscalation) - w.Write(LEVEL_1, "Default Add Capabilities:\t%v\n", capsToString(psp.Spec.DefaultAddCapabilities)) - w.Write(LEVEL_1, "Required Drop Capabilities:\t%s\n", capsToString(psp.Spec.RequiredDropCapabilities)) - w.Write(LEVEL_1, "Allowed Capabilities:\t%s\n", capsToString(psp.Spec.AllowedCapabilities)) - w.Write(LEVEL_1, "Allowed Volume Types:\t%s\n", fsTypeToString(psp.Spec.Volumes)) - - if len(psp.Spec.AllowedFlexVolumes) > 0 { - w.Write(LEVEL_1, "Allowed FlexVolume Types:\t%s\n", flexVolumesToString(psp.Spec.AllowedFlexVolumes)) - } - - if len(psp.Spec.AllowedCSIDrivers) > 0 { - w.Write(LEVEL_1, "Allowed CSI Drivers:\t%s\n", csiDriversToString(psp.Spec.AllowedCSIDrivers)) - } - - if len(psp.Spec.AllowedUnsafeSysctls) > 0 { - w.Write(LEVEL_1, "Allowed Unsafe Sysctls:\t%s\n", sysctlsToString(psp.Spec.AllowedUnsafeSysctls)) - } - if len(psp.Spec.ForbiddenSysctls) > 0 { - w.Write(LEVEL_1, "Forbidden Sysctls:\t%s\n", sysctlsToString(psp.Spec.ForbiddenSysctls)) - } - w.Write(LEVEL_1, "Allow Host Network:\t%t\n", psp.Spec.HostNetwork) - w.Write(LEVEL_1, "Allow Host Ports:\t%s\n", hostPortRangeToString(psp.Spec.HostPorts)) - w.Write(LEVEL_1, "Allow Host PID:\t%t\n", psp.Spec.HostPID) - w.Write(LEVEL_1, "Allow Host IPC:\t%t\n", psp.Spec.HostIPC) - w.Write(LEVEL_1, "Read Only Root Filesystem:\t%v\n", psp.Spec.ReadOnlyRootFilesystem) - - w.Write(LEVEL_1, "SELinux Context Strategy: %s\t\n", string(psp.Spec.SELinux.Rule)) - var user, role, seLinuxType, level string - if psp.Spec.SELinux.SELinuxOptions != nil { - user = psp.Spec.SELinux.SELinuxOptions.User - role = psp.Spec.SELinux.SELinuxOptions.Role - seLinuxType = psp.Spec.SELinux.SELinuxOptions.Type - level = psp.Spec.SELinux.SELinuxOptions.Level - } - w.Write(LEVEL_2, "User:\t%s\n", stringOrNone(user)) - w.Write(LEVEL_2, "Role:\t%s\n", stringOrNone(role)) - w.Write(LEVEL_2, "Type:\t%s\n", stringOrNone(seLinuxType)) - w.Write(LEVEL_2, "Level:\t%s\n", stringOrNone(level)) - - w.Write(LEVEL_1, "Run As User Strategy: %s\t\n", string(psp.Spec.RunAsUser.Rule)) - w.Write(LEVEL_2, "Ranges:\t%s\n", idRangeToString(psp.Spec.RunAsUser.Ranges)) - - w.Write(LEVEL_1, "FSGroup Strategy: %s\t\n", string(psp.Spec.FSGroup.Rule)) - w.Write(LEVEL_2, "Ranges:\t%s\n", idRangeToString(psp.Spec.FSGroup.Ranges)) - - w.Write(LEVEL_1, "Supplemental Groups Strategy: %s\t\n", string(psp.Spec.SupplementalGroups.Rule)) - w.Write(LEVEL_2, "Ranges:\t%s\n", idRangeToString(psp.Spec.SupplementalGroups.Ranges)) - - return nil - }) -} - -func stringOrNone(s string) string { - return stringOrDefaultValue(s, "") -} - -func stringOrDefaultValue(s, defaultValue string) string { - if len(s) > 0 { - return s - } - return defaultValue -} - -func fsTypeToString(volumes []policyv1beta1.FSType) string { - strVolumes := []string{} - for _, v := range volumes { - strVolumes = append(strVolumes, string(v)) - } - return stringOrNone(strings.Join(strVolumes, ",")) -} - -func flexVolumesToString(flexVolumes []policyv1beta1.AllowedFlexVolume) string { - volumes := []string{} - for _, flexVolume := range flexVolumes { - volumes = append(volumes, "driver="+flexVolume.Driver) - } - return stringOrDefaultValue(strings.Join(volumes, ","), "") -} - -func csiDriversToString(csiDrivers []policyv1beta1.AllowedCSIDriver) string { - drivers := []string{} - for _, csiDriver := range csiDrivers { - drivers = append(drivers, "driver="+csiDriver.Name) - } - return stringOrDefaultValue(strings.Join(drivers, ","), "") -} - -func sysctlsToString(sysctls []string) string { - return stringOrNone(strings.Join(sysctls, ",")) -} - -func hostPortRangeToString(ranges []policyv1beta1.HostPortRange) string { - formattedString := "" - if ranges != nil { - strRanges := []string{} - for _, r := range ranges { - strRanges = append(strRanges, fmt.Sprintf("%d-%d", r.Min, r.Max)) - } - formattedString = strings.Join(strRanges, ",") - } - return stringOrNone(formattedString) -} - -func idRangeToString(ranges []policyv1beta1.IDRange) string { - formattedString := "" - if ranges != nil { - strRanges := []string{} - for _, r := range ranges { - strRanges = append(strRanges, fmt.Sprintf("%d-%d", r.Min, r.Max)) - } - formattedString = strings.Join(strRanges, ",") - } - return stringOrNone(formattedString) -} - -func capsToString(caps []corev1.Capability) string { - formattedString := "" - if caps != nil { - strCaps := []string{} - for _, c := range caps { - strCaps = append(strCaps, string(c)) - } - formattedString = strings.Join(strCaps, ",") - } - return stringOrNone(formattedString) -} - -func policyTypesToString(pts []networkingv1.PolicyType) string { - formattedString := "" - if pts != nil { - strPts := []string{} - for _, p := range pts { - strPts = append(strPts, string(p)) - } - formattedString = strings.Join(strPts, ", ") - } - return stringOrNone(formattedString) -} - -// newErrNoDescriber creates a new ErrNoDescriber with the names of the provided types. -func newErrNoDescriber(types ...reflect.Type) error { - names := make([]string, 0, len(types)) - for _, t := range types { - names = append(names, t.String()) - } - return describe.ErrNoDescriber{Types: names} -} - -// Describers implements ObjectDescriber against functions registered via Add. Those functions can -// be strongly typed. Types are exactly matched (no conversion or assignable checks). -type Describers struct { - searchFns map[reflect.Type][]typeFunc -} - -// DescribeObject implements ObjectDescriber and will attempt to print the provided object to a string, -// if at least one describer function has been registered with the exact types passed, or if any -// describer can print the exact object in its first argument (the remainder will be provided empty -// values). If no function registered with Add can satisfy the passed objects, an ErrNoDescriber will -// be returned -// TODO: reorder and partial match extra. -func (d *Describers) DescribeObject(exact interface{}, extra ...interface{}) (string, error) { - exactType := reflect.TypeOf(exact) - fns, ok := d.searchFns[exactType] - if !ok { - return "", newErrNoDescriber(exactType) - } - if len(extra) == 0 { - for _, typeFn := range fns { - if len(typeFn.Extra) == 0 { - return typeFn.Describe(exact, extra...) - } - } - typeFn := fns[0] - for _, t := range typeFn.Extra { - v := reflect.New(t).Elem() - extra = append(extra, v.Interface()) - } - return fns[0].Describe(exact, extra...) - } - - types := make([]reflect.Type, 0, len(extra)) - for _, obj := range extra { - types = append(types, reflect.TypeOf(obj)) - } - for _, typeFn := range fns { - if typeFn.Matches(types) { - return typeFn.Describe(exact, extra...) - } - } - return "", newErrNoDescriber(append([]reflect.Type{exactType}, types...)...) -} - -// Add adds one or more describer functions to the describe.Describer. The passed function must -// match the signature: -// -// func(...) (string, error) -// -// Any number of arguments may be provided. -func (d *Describers) Add(fns ...interface{}) error { - for _, fn := range fns { - fv := reflect.ValueOf(fn) - ft := fv.Type() - if ft.Kind() != reflect.Func { - return fmt.Errorf("expected func, got: %v", ft) - } - numIn := ft.NumIn() - if numIn == 0 { - return fmt.Errorf("expected at least one 'in' params, got: %v", ft) - } - if ft.NumOut() != 2 { - return fmt.Errorf("expected two 'out' params - (string, error), got: %v", ft) - } - types := make([]reflect.Type, 0, numIn) - for i := 0; i < numIn; i++ { - types = append(types, ft.In(i)) - } - if ft.Out(0) != reflect.TypeOf(string("")) { - return fmt.Errorf("expected string return, got: %v", ft) - } - var forErrorType error - // This convolution is necessary, otherwise TypeOf picks up on the fact - // that forErrorType is nil. - errorType := reflect.TypeOf(&forErrorType).Elem() - if ft.Out(1) != errorType { - return fmt.Errorf("expected error return, got: %v", ft) - } - - exact := types[0] - extra := types[1:] - if d.searchFns == nil { - d.searchFns = make(map[reflect.Type][]typeFunc) - } - fns := d.searchFns[exact] - fn := typeFunc{Extra: extra, Fn: fv} - fns = append(fns, fn) - d.searchFns[exact] = fns - } - return nil -} - -// typeFunc holds information about a describer function and the types it accepts -type typeFunc struct { - Extra []reflect.Type - Fn reflect.Value -} - -// Matches returns true when the passed types exactly match the Extra list. -func (fn typeFunc) Matches(types []reflect.Type) bool { - if len(fn.Extra) != len(types) { - return false - } - // reorder the items in array types and fn.Extra - // convert the type into string and sort them, check if they are matched - varMap := make(map[reflect.Type]bool) - for i := range fn.Extra { - varMap[fn.Extra[i]] = true - } - for i := range types { - if _, found := varMap[types[i]]; !found { - return false - } - } - return true -} - -// Describe invokes the nested function with the exact number of arguments. -func (fn typeFunc) Describe(exact interface{}, extra ...interface{}) (string, error) { - values := []reflect.Value{reflect.ValueOf(exact)} - for _, obj := range extra { - values = append(values, reflect.ValueOf(obj)) - } - out := fn.Fn.Call(values) - s := out[0].Interface().(string) - var err error - if !out[1].IsNil() { - err = out[1].Interface().(error) - } - return s, err -} - -// printLabelsMultiline prints multiple labels with a proper alignment. -func printLabelsMultiline(w PrefixWriter, title string, labels map[string]string) { - printLabelsMultilineWithIndent(w, "", title, "\t", labels, sets.NewString()) -} - -// printLabelsMultiline prints multiple labels with a user-defined alignment. -func printLabelsMultilineWithIndent(w PrefixWriter, initialIndent, title, innerIndent string, labels map[string]string, skip sets.String) { - w.Write(LEVEL_0, "%s%s:%s", initialIndent, title, innerIndent) - - if labels == nil || len(labels) == 0 { - w.WriteLine("") - return - } - - // to print labels in the sorted order - keys := make([]string, 0, len(labels)) - for key := range labels { - if skip.Has(key) { - continue - } - keys = append(keys, key) - } - if len(keys) == 0 { - w.WriteLine("") - return - } - sort.Strings(keys) - - for i, key := range keys { - if i != 0 { - w.Write(LEVEL_0, "%s", initialIndent) - w.Write(LEVEL_0, "%s", innerIndent) - } - w.Write(LEVEL_0, "%s=%s\n", key, labels[key]) - i++ - } -} - -// printTaintsMultiline prints multiple taints with a proper alignment. -func printNodeTaintsMultiline(w PrefixWriter, title string, taints []corev1.Taint) { - printTaintsMultilineWithIndent(w, "", title, "\t", taints) -} - -// printTaintsMultilineWithIndent prints multiple taints with a user-defined alignment. -func printTaintsMultilineWithIndent(w PrefixWriter, initialIndent, title, innerIndent string, taints []corev1.Taint) { - w.Write(LEVEL_0, "%s%s:%s", initialIndent, title, innerIndent) - - if taints == nil || len(taints) == 0 { - w.WriteLine("") - return - } - - // to print taints in the sorted order - sort.Slice(taints, func(i, j int) bool { - cmpKey := func(taint corev1.Taint) string { - return string(taint.Effect) + "," + taint.Key - } - return cmpKey(taints[i]) < cmpKey(taints[j]) - }) - - for i, taint := range taints { - if i != 0 { - w.Write(LEVEL_0, "%s", initialIndent) - w.Write(LEVEL_0, "%s", innerIndent) - } - w.Write(LEVEL_0, "%s\n", taint.ToString()) - } -} - -// printPodsMultiline prints multiple pods with a proper alignment. -func printPodsMultiline(w PrefixWriter, title string, pods []corev1.Pod) { - printPodsMultilineWithIndent(w, "", title, "\t", pods) -} - -// printPodsMultilineWithIndent prints multiple pods with a user-defined alignment. -func printPodsMultilineWithIndent(w PrefixWriter, initialIndent, title, innerIndent string, pods []corev1.Pod) { - w.Write(LEVEL_0, "%s%s:%s", initialIndent, title, innerIndent) - - if pods == nil || len(pods) == 0 { - w.WriteLine("") - return - } - - // to print pods in the sorted order - sort.Slice(pods, func(i, j int) bool { - cmpKey := func(pod corev1.Pod) string { - return pod.Name - } - return cmpKey(pods[i]) < cmpKey(pods[j]) - }) - - for i, pod := range pods { - if i != 0 { - w.Write(LEVEL_0, "%s", initialIndent) - w.Write(LEVEL_0, "%s", innerIndent) - } - w.Write(LEVEL_0, "%s\n", pod.Name) - } -} - -// printPodTolerationsMultiline prints multiple tolerations with a proper alignment. -func printPodTolerationsMultiline(w PrefixWriter, title string, tolerations []corev1.Toleration) { - printTolerationsMultilineWithIndent(w, "", title, "\t", tolerations) -} - -// printTolerationsMultilineWithIndent prints multiple tolerations with a user-defined alignment. -func printTolerationsMultilineWithIndent(w PrefixWriter, initialIndent, title, innerIndent string, tolerations []corev1.Toleration) { - w.Write(LEVEL_0, "%s%s:%s", initialIndent, title, innerIndent) - - if tolerations == nil || len(tolerations) == 0 { - w.WriteLine("") - return - } - - // to print tolerations in the sorted order - sort.Slice(tolerations, func(i, j int) bool { - return tolerations[i].Key < tolerations[j].Key - }) - - for i, toleration := range tolerations { - if i != 0 { - w.Write(LEVEL_0, "%s", initialIndent) - w.Write(LEVEL_0, "%s", innerIndent) - } - w.Write(LEVEL_0, "%s", toleration.Key) - if len(toleration.Value) != 0 { - w.Write(LEVEL_0, "=%s", toleration.Value) - } - if len(toleration.Effect) != 0 { - w.Write(LEVEL_0, ":%s", toleration.Effect) - } - if toleration.TolerationSeconds != nil { - w.Write(LEVEL_0, " for %ds", *toleration.TolerationSeconds) - } - w.Write(LEVEL_0, "\n") - } -} - -type flusher interface { - Flush() -} - -func tabbedString(f func(io.Writer) error) (string, error) { - out := new(tabwriter.Writer) - buf := &bytes.Buffer{} - out.Init(buf, 0, 8, 2, ' ', 0) - - err := f(out) - if err != nil { - return "", err - } - - out.Flush() - str := string(buf.String()) - return str, nil -} - -type SortableResourceNames []corev1.ResourceName - -func (list SortableResourceNames) Len() int { - return len(list) -} - -func (list SortableResourceNames) Swap(i, j int) { - list[i], list[j] = list[j], list[i] -} - -func (list SortableResourceNames) Less(i, j int) bool { - return list[i] < list[j] -} - -// SortedResourceNames returns the sorted resource names of a resource list. -func SortedResourceNames(list corev1.ResourceList) []corev1.ResourceName { - resources := make([]corev1.ResourceName, 0, len(list)) - for res := range list { - resources = append(resources, res) - } - sort.Sort(SortableResourceNames(resources)) - return resources -} - -type SortableResourceQuotas []corev1.ResourceQuota - -func (list SortableResourceQuotas) Len() int { - return len(list) -} - -func (list SortableResourceQuotas) Swap(i, j int) { - list[i], list[j] = list[j], list[i] -} - -func (list SortableResourceQuotas) Less(i, j int) bool { - return list[i].Name < list[j].Name -} - -type SortableVolumeMounts []corev1.VolumeMount - -func (list SortableVolumeMounts) Len() int { - return len(list) -} - -func (list SortableVolumeMounts) Swap(i, j int) { - list[i], list[j] = list[j], list[i] -} - -func (list SortableVolumeMounts) Less(i, j int) bool { - return list[i].MountPath < list[j].MountPath -} - -type SortableVolumeDevices []corev1.VolumeDevice - -func (list SortableVolumeDevices) Len() int { - return len(list) -} - -func (list SortableVolumeDevices) Swap(i, j int) { - list[i], list[j] = list[j], list[i] -} - -func (list SortableVolumeDevices) Less(i, j int) bool { - return list[i].DevicePath < list[j].DevicePath -} - -var maxAnnotationLen = 140 - -// printAnnotationsMultilineWithFilter prints filtered multiple annotations with a proper alignment. -func printAnnotationsMultilineWithFilter(w PrefixWriter, title string, annotations map[string]string, skip sets.String) { - printAnnotationsMultilineWithIndent(w, "", title, "\t", annotations, skip) -} - -// printAnnotationsMultiline prints multiple annotations with a proper alignment. -func printAnnotationsMultiline(w PrefixWriter, title string, annotations map[string]string) { - printAnnotationsMultilineWithIndent(w, "", title, "\t", annotations, sets.NewString()) -} - -// printAnnotationsMultilineWithIndent prints multiple annotations with a user-defined alignment. -// If annotation string is too long, we omit chars more than 200 length. -func printAnnotationsMultilineWithIndent(w PrefixWriter, initialIndent, title, innerIndent string, annotations map[string]string, skip sets.String) { - - w.Write(LEVEL_0, "%s%s:%s", initialIndent, title, innerIndent) - - if len(annotations) == 0 { - w.WriteLine("") - return - } - - // to print labels in the sorted order - keys := make([]string, 0, len(annotations)) - for key := range annotations { - if skip.Has(key) { - continue - } - keys = append(keys, key) - } - if len(annotations) == 0 { - w.WriteLine("") - return - } - sort.Strings(keys) - indent := initialIndent + innerIndent - for i, key := range keys { - if i != 0 { - w.Write(LEVEL_0, indent) - } - value := strings.TrimSuffix(annotations[key], "\n") - if (len(value)+len(key)+2) > maxAnnotationLen || strings.Contains(value, "\n") { - w.Write(LEVEL_0, "%s:\n", key) - for _, s := range strings.Split(value, "\n") { - w.Write(LEVEL_0, "%s %s\n", indent, shorten(s, maxAnnotationLen-2)) - } - } else { - w.Write(LEVEL_0, "%s: %s\n", key, value) - } - i++ - } -} - -func shorten(s string, maxLength int) string { - if len(s) > maxLength { - return s[:maxLength] + "..." - } - return s -} - -// translateTimestampSince returns the elapsed time since timestamp in -// human-readable approximation. -func translateTimestampSince(timestamp metav1.Time) string { - if timestamp.IsZero() { - return "" - } - - return duration.HumanDuration(time.Since(timestamp.Time)) -} - -// formatEventSource formats EventSource as a comma separated string excluding Host when empty -func formatEventSource(es corev1.EventSource) string { - EventSourceString := []string{es.Component} - if len(es.Host) > 0 { - EventSourceString = append(EventSourceString, es.Host) - } - return strings.Join(EventSourceString, ", ") -} - -// Pass ports=nil for all ports. -func formatEndpoints(endpoints *corev1.Endpoints, ports sets.String) string { - if len(endpoints.Subsets) == 0 { - return "" - } - list := []string{} - max := 3 - more := false - count := 0 - for i := range endpoints.Subsets { - ss := &endpoints.Subsets[i] - if len(ss.Ports) == 0 { - // It's possible to have headless services with no ports. - for i := range ss.Addresses { - if len(list) == max { - more = true - } - if !more { - list = append(list, ss.Addresses[i].IP) - } - count++ - } - } else { - // "Normal" services with ports defined. - for i := range ss.Ports { - port := &ss.Ports[i] - if ports == nil || ports.Has(port.Name) { - for i := range ss.Addresses { - if len(list) == max { - more = true - } - addr := &ss.Addresses[i] - if !more { - hostPort := net.JoinHostPort(addr.IP, strconv.Itoa(int(port.Port))) - list = append(list, hostPort) - } - count++ - } - } - } - } - } - ret := strings.Join(list, ",") - if more { - return fmt.Sprintf("%s + %d more...", ret, count-max) - } - return ret -} - -func extractCSRStatus(csr *certificatesv1beta1.CertificateSigningRequest) (string, error) { - var approved, denied bool - for _, c := range csr.Status.Conditions { - switch c.Type { - case certificatesv1beta1.CertificateApproved: - approved = true - case certificatesv1beta1.CertificateDenied: - denied = true - default: - return "", fmt.Errorf("unknown csr condition %q", c) - } - } - var status string - // must be in order of presidence - if denied { - status += "Denied" - } else if approved { - status += "Approved" - } else { - status += "Pending" - } - if len(csr.Status.Certificate) > 0 { - status += ",Issued" - } - return status, nil -} - -// backendStringer behaves just like a string interface and converts the given backend to a string. -func backendStringer(backend *networkingv1beta1.IngressBackend) string { - if backend == nil { - return "" - } - return fmt.Sprintf("%v:%v", backend.ServiceName, backend.ServicePort.String()) -} - -// findNodeRoles returns the roles of a given node. -// The roles are determined by looking for: -// * a node-role.kubernetes.io/="" label -// * a kubernetes.io/role="" label -func findNodeRoles(node *corev1.Node) []string { - roles := sets.NewString() - for k, v := range node.Labels { - switch { - case strings.HasPrefix(k, describe.LabelNodeRolePrefix): - if role := strings.TrimPrefix(k, describe.LabelNodeRolePrefix); len(role) > 0 { - roles.Insert(role) - } - - case k == describe.NodeLabelRole && v != "": - roles.Insert(v) - } - } - return roles.List() -} - -// loadBalancerStatusStringer behaves mostly like a string interface and converts the given status to a string. -// `wide` indicates whether the returned value is meant for --o=wide output. If not, it's clipped to 16 bytes. -func loadBalancerStatusStringer(s corev1.LoadBalancerStatus, wide bool) string { - ingress := s.Ingress - result := sets.NewString() - for i := range ingress { - if ingress[i].IP != "" { - result.Insert(ingress[i].IP) - } else if ingress[i].Hostname != "" { - result.Insert(ingress[i].Hostname) - } - } - - r := strings.Join(result.List(), ",") - if !wide && len(r) > describe.LoadBalancerWidth { - r = r[0:(describe.LoadBalancerWidth-3)] + "..." - } - return r -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/history.go b/vendor/k8s.io/kubernetes/pkg/kubectl/history.go deleted file mode 100644 index a1a4829aeb6a7..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/history.go +++ /dev/null @@ -1,390 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "bytes" - "fmt" - "io" - "text/tabwriter" - - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/json" - "k8s.io/apimachinery/pkg/util/strategicpatch" - "k8s.io/client-go/kubernetes" - clientappsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" - kapps "k8s.io/kubernetes/pkg/kubectl/apps" - describe "k8s.io/kubernetes/pkg/kubectl/describe/versioned" - deploymentutil "k8s.io/kubernetes/pkg/kubectl/util/deployment" - sliceutil "k8s.io/kubernetes/pkg/kubectl/util/slice" -) - -const ( - ChangeCauseAnnotation = "kubernetes.io/change-cause" -) - -// HistoryViewer provides an interface for resources have historical information. -type HistoryViewer interface { - ViewHistory(namespace, name string, revision int64) (string, error) -} - -type HistoryVisitor struct { - clientset kubernetes.Interface - result HistoryViewer -} - -func (v *HistoryVisitor) VisitDeployment(elem kapps.GroupKindElement) { - v.result = &DeploymentHistoryViewer{v.clientset} -} - -func (v *HistoryVisitor) VisitStatefulSet(kind kapps.GroupKindElement) { - v.result = &StatefulSetHistoryViewer{v.clientset} -} - -func (v *HistoryVisitor) VisitDaemonSet(kind kapps.GroupKindElement) { - v.result = &DaemonSetHistoryViewer{v.clientset} -} - -func (v *HistoryVisitor) VisitJob(kind kapps.GroupKindElement) {} -func (v *HistoryVisitor) VisitPod(kind kapps.GroupKindElement) {} -func (v *HistoryVisitor) VisitReplicaSet(kind kapps.GroupKindElement) {} -func (v *HistoryVisitor) VisitReplicationController(kind kapps.GroupKindElement) {} -func (v *HistoryVisitor) VisitCronJob(kind kapps.GroupKindElement) {} - -// HistoryViewerFor returns an implementation of HistoryViewer interface for the given schema kind -func HistoryViewerFor(kind schema.GroupKind, c kubernetes.Interface) (HistoryViewer, error) { - elem := kapps.GroupKindElement(kind) - visitor := &HistoryVisitor{ - clientset: c, - } - - // Determine which HistoryViewer we need here - err := elem.Accept(visitor) - - if err != nil { - return nil, fmt.Errorf("error retrieving history for %q, %v", kind.String(), err) - } - - if visitor.result == nil { - return nil, fmt.Errorf("no history viewer has been implemented for %q", kind.String()) - } - - return visitor.result, nil -} - -type DeploymentHistoryViewer struct { - c kubernetes.Interface -} - -// ViewHistory returns a revision-to-replicaset map as the revision history of a deployment -// TODO: this should be a describer -func (h *DeploymentHistoryViewer) ViewHistory(namespace, name string, revision int64) (string, error) { - versionedAppsClient := h.c.AppsV1() - deployment, err := versionedAppsClient.Deployments(namespace).Get(name, metav1.GetOptions{}) - if err != nil { - return "", fmt.Errorf("failed to retrieve deployment %s: %v", name, err) - } - _, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(deployment, versionedAppsClient) - if err != nil { - return "", fmt.Errorf("failed to retrieve replica sets from deployment %s: %v", name, err) - } - allRSs := allOldRSs - if newRS != nil { - allRSs = append(allRSs, newRS) - } - - historyInfo := make(map[int64]*corev1.PodTemplateSpec) - for _, rs := range allRSs { - v, err := deploymentutil.Revision(rs) - if err != nil { - continue - } - historyInfo[v] = &rs.Spec.Template - changeCause := getChangeCause(rs) - if historyInfo[v].Annotations == nil { - historyInfo[v].Annotations = make(map[string]string) - } - if len(changeCause) > 0 { - historyInfo[v].Annotations[ChangeCauseAnnotation] = changeCause - } - } - - if len(historyInfo) == 0 { - return "No rollout history found.", nil - } - - if revision > 0 { - // Print details of a specific revision - template, ok := historyInfo[revision] - if !ok { - return "", fmt.Errorf("unable to find the specified revision") - } - return printTemplate(template) - } - - // Sort the revisionToChangeCause map by revision - revisions := make([]int64, 0, len(historyInfo)) - for r := range historyInfo { - revisions = append(revisions, r) - } - sliceutil.SortInts64(revisions) - - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "REVISION\tCHANGE-CAUSE\n") - for _, r := range revisions { - // Find the change-cause of revision r - changeCause := historyInfo[r].Annotations[ChangeCauseAnnotation] - if len(changeCause) == 0 { - changeCause = "" - } - fmt.Fprintf(out, "%d\t%s\n", r, changeCause) - } - return nil - }) -} - -func printTemplate(template *corev1.PodTemplateSpec) (string, error) { - buf := bytes.NewBuffer([]byte{}) - w := describe.NewPrefixWriter(buf) - describe.DescribePodTemplate(template, w) - return buf.String(), nil -} - -type DaemonSetHistoryViewer struct { - c kubernetes.Interface -} - -// ViewHistory returns a revision-to-history map as the revision history of a deployment -// TODO: this should be a describer -func (h *DaemonSetHistoryViewer) ViewHistory(namespace, name string, revision int64) (string, error) { - ds, history, err := daemonSetHistory(h.c.AppsV1(), namespace, name) - if err != nil { - return "", err - } - historyInfo := make(map[int64]*appsv1.ControllerRevision) - for _, history := range history { - // TODO: for now we assume revisions don't overlap, we may need to handle it - historyInfo[history.Revision] = history - } - if len(historyInfo) == 0 { - return "No rollout history found.", nil - } - - // Print details of a specific revision - if revision > 0 { - history, ok := historyInfo[revision] - if !ok { - return "", fmt.Errorf("unable to find the specified revision") - } - dsOfHistory, err := applyDaemonSetHistory(ds, history) - if err != nil { - return "", fmt.Errorf("unable to parse history %s", history.Name) - } - return printTemplate(&dsOfHistory.Spec.Template) - } - - // Print an overview of all Revisions - // Sort the revisionToChangeCause map by revision - revisions := make([]int64, 0, len(historyInfo)) - for r := range historyInfo { - revisions = append(revisions, r) - } - sliceutil.SortInts64(revisions) - - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "REVISION\tCHANGE-CAUSE\n") - for _, r := range revisions { - // Find the change-cause of revision r - changeCause := historyInfo[r].Annotations[ChangeCauseAnnotation] - if len(changeCause) == 0 { - changeCause = "" - } - fmt.Fprintf(out, "%d\t%s\n", r, changeCause) - } - return nil - }) -} - -type StatefulSetHistoryViewer struct { - c kubernetes.Interface -} - -// ViewHistory returns a list of the revision history of a statefulset -// TODO: this should be a describer -// TODO: needs to implement detailed revision view -func (h *StatefulSetHistoryViewer) ViewHistory(namespace, name string, revision int64) (string, error) { - _, history, err := statefulSetHistory(h.c.AppsV1(), namespace, name) - if err != nil { - return "", err - } - - if len(history) <= 0 { - return "No rollout history found.", nil - } - revisions := make([]int64, len(history)) - for _, revision := range history { - revisions = append(revisions, revision.Revision) - } - sliceutil.SortInts64(revisions) - - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "REVISION\n") - for _, r := range revisions { - fmt.Fprintf(out, "%d\n", r) - } - return nil - }) -} - -// controlledHistories returns all ControllerRevisions in namespace that selected by selector and owned by accessor -// TODO: Rename this to controllerHistory when other controllers have been upgraded -func controlledHistoryV1( - apps clientappsv1.AppsV1Interface, - namespace string, - selector labels.Selector, - accessor metav1.Object) ([]*appsv1.ControllerRevision, error) { - var result []*appsv1.ControllerRevision - historyList, err := apps.ControllerRevisions(namespace).List(metav1.ListOptions{LabelSelector: selector.String()}) - if err != nil { - return nil, err - } - for i := range historyList.Items { - history := historyList.Items[i] - // Only add history that belongs to the API object - if metav1.IsControlledBy(&history, accessor) { - result = append(result, &history) - } - } - return result, nil -} - -// controlledHistories returns all ControllerRevisions in namespace that selected by selector and owned by accessor -func controlledHistory( - apps clientappsv1.AppsV1Interface, - namespace string, - selector labels.Selector, - accessor metav1.Object) ([]*appsv1.ControllerRevision, error) { - var result []*appsv1.ControllerRevision - historyList, err := apps.ControllerRevisions(namespace).List(metav1.ListOptions{LabelSelector: selector.String()}) - if err != nil { - return nil, err - } - for i := range historyList.Items { - history := historyList.Items[i] - // Only add history that belongs to the API object - if metav1.IsControlledBy(&history, accessor) { - result = append(result, &history) - } - } - return result, nil -} - -// daemonSetHistory returns the DaemonSet named name in namespace and all ControllerRevisions in its history. -func daemonSetHistory( - apps clientappsv1.AppsV1Interface, - namespace, name string) (*appsv1.DaemonSet, []*appsv1.ControllerRevision, error) { - ds, err := apps.DaemonSets(namespace).Get(name, metav1.GetOptions{}) - if err != nil { - return nil, nil, fmt.Errorf("failed to retrieve DaemonSet %s: %v", name, err) - } - selector, err := metav1.LabelSelectorAsSelector(ds.Spec.Selector) - if err != nil { - return nil, nil, fmt.Errorf("failed to create selector for DaemonSet %s: %v", ds.Name, err) - } - accessor, err := meta.Accessor(ds) - if err != nil { - return nil, nil, fmt.Errorf("failed to create accessor for DaemonSet %s: %v", ds.Name, err) - } - history, err := controlledHistory(apps, ds.Namespace, selector, accessor) - if err != nil { - return nil, nil, fmt.Errorf("unable to find history controlled by DaemonSet %s: %v", ds.Name, err) - } - return ds, history, nil -} - -// statefulSetHistory returns the StatefulSet named name in namespace and all ControllerRevisions in its history. -func statefulSetHistory( - apps clientappsv1.AppsV1Interface, - namespace, name string) (*appsv1.StatefulSet, []*appsv1.ControllerRevision, error) { - sts, err := apps.StatefulSets(namespace).Get(name, metav1.GetOptions{}) - if err != nil { - return nil, nil, fmt.Errorf("failed to retrieve Statefulset %s: %s", name, err.Error()) - } - selector, err := metav1.LabelSelectorAsSelector(sts.Spec.Selector) - if err != nil { - return nil, nil, fmt.Errorf("failed to create selector for StatefulSet %s: %s", name, err.Error()) - } - accessor, err := meta.Accessor(sts) - if err != nil { - return nil, nil, fmt.Errorf("failed to obtain accessor for StatefulSet %s: %s", name, err.Error()) - } - history, err := controlledHistoryV1(apps, namespace, selector, accessor) - if err != nil { - return nil, nil, fmt.Errorf("unable to find history controlled by StatefulSet %s: %v", name, err) - } - return sts, history, nil -} - -// applyDaemonSetHistory returns a specific revision of DaemonSet by applying the given history to a copy of the given DaemonSet -func applyDaemonSetHistory(ds *appsv1.DaemonSet, history *appsv1.ControllerRevision) (*appsv1.DaemonSet, error) { - clone := ds.DeepCopy() - cloneBytes, err := json.Marshal(clone) - if err != nil { - return nil, err - } - patched, err := strategicpatch.StrategicMergePatch(cloneBytes, history.Data.Raw, clone) - if err != nil { - return nil, err - } - err = json.Unmarshal(patched, clone) - if err != nil { - return nil, err - } - return clone, nil -} - -// TODO: copied here until this becomes a describer -func tabbedString(f func(io.Writer) error) (string, error) { - out := new(tabwriter.Writer) - buf := &bytes.Buffer{} - out.Init(buf, 0, 8, 2, ' ', 0) - - err := f(out) - if err != nil { - return "", err - } - - out.Flush() - str := string(buf.String()) - return str, nil -} - -// getChangeCause returns the change-cause annotation of the input object -func getChangeCause(obj runtime.Object) string { - accessor, err := meta.Accessor(obj) - if err != nil { - return "" - } - return accessor.GetAnnotations()[ChangeCauseAnnotation] -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/rollback.go b/vendor/k8s.io/kubernetes/pkg/kubectl/rollback.go deleted file mode 100644 index bd7e2c7c855f1..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/rollback.go +++ /dev/null @@ -1,486 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "bytes" - "fmt" - "sort" - - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - apiequality "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/json" - "k8s.io/apimachinery/pkg/util/strategicpatch" - "k8s.io/client-go/kubernetes" - kapps "k8s.io/kubernetes/pkg/kubectl/apps" - "k8s.io/kubernetes/pkg/kubectl/scheme" - deploymentutil "k8s.io/kubernetes/pkg/kubectl/util/deployment" -) - -const ( - rollbackSuccess = "rolled back" - rollbackSkipped = "skipped rollback" -) - -// Rollbacker provides an interface for resources that can be rolled back. -type Rollbacker interface { - Rollback(obj runtime.Object, updatedAnnotations map[string]string, toRevision int64, dryRun bool) (string, error) -} - -type RollbackVisitor struct { - clientset kubernetes.Interface - result Rollbacker -} - -func (v *RollbackVisitor) VisitDeployment(elem kapps.GroupKindElement) { - v.result = &DeploymentRollbacker{v.clientset} -} - -func (v *RollbackVisitor) VisitStatefulSet(kind kapps.GroupKindElement) { - v.result = &StatefulSetRollbacker{v.clientset} -} - -func (v *RollbackVisitor) VisitDaemonSet(kind kapps.GroupKindElement) { - v.result = &DaemonSetRollbacker{v.clientset} -} - -func (v *RollbackVisitor) VisitJob(kind kapps.GroupKindElement) {} -func (v *RollbackVisitor) VisitPod(kind kapps.GroupKindElement) {} -func (v *RollbackVisitor) VisitReplicaSet(kind kapps.GroupKindElement) {} -func (v *RollbackVisitor) VisitReplicationController(kind kapps.GroupKindElement) {} -func (v *RollbackVisitor) VisitCronJob(kind kapps.GroupKindElement) {} - -// RollbackerFor returns an implementation of Rollbacker interface for the given schema kind -func RollbackerFor(kind schema.GroupKind, c kubernetes.Interface) (Rollbacker, error) { - elem := kapps.GroupKindElement(kind) - visitor := &RollbackVisitor{ - clientset: c, - } - - err := elem.Accept(visitor) - - if err != nil { - return nil, fmt.Errorf("error retrieving rollbacker for %q, %v", kind.String(), err) - } - - if visitor.result == nil { - return nil, fmt.Errorf("no rollbacker has been implemented for %q", kind) - } - - return visitor.result, nil -} - -type DeploymentRollbacker struct { - c kubernetes.Interface -} - -func (r *DeploymentRollbacker) Rollback(obj runtime.Object, updatedAnnotations map[string]string, toRevision int64, dryRun bool) (string, error) { - if toRevision < 0 { - return "", revisionNotFoundErr(toRevision) - } - accessor, err := meta.Accessor(obj) - if err != nil { - return "", fmt.Errorf("failed to create accessor for kind %v: %s", obj.GetObjectKind(), err.Error()) - } - name := accessor.GetName() - namespace := accessor.GetNamespace() - - // TODO: Fix this after kubectl has been removed from core. It is not possible to convert the runtime.Object - // to the external appsv1 Deployment without round-tripping through an internal version of Deployment. We're - // currently getting rid of all internal versions of resources. So we specifically request the appsv1 version - // here. This follows the same pattern as for DaemonSet and StatefulSet. - deployment, err := r.c.AppsV1().Deployments(namespace).Get(name, metav1.GetOptions{}) - if err != nil { - return "", fmt.Errorf("failed to retrieve Deployment %s: %v", name, err) - } - - rsForRevision, err := deploymentRevision(deployment, r.c, toRevision) - if err != nil { - return "", err - } - if dryRun { - return printTemplate(&rsForRevision.Spec.Template) - } - if deployment.Spec.Paused { - return "", fmt.Errorf("you cannot rollback a paused deployment; resume it first with 'kubectl rollout resume deployment/%s' and try again", name) - } - - // Skip if the revision already matches current Deployment - if equalIgnoreHash(&rsForRevision.Spec.Template, &deployment.Spec.Template) { - return fmt.Sprintf("%s (current template already matches revision %d)", rollbackSkipped, toRevision), nil - } - - // remove hash label before patching back into the deployment - delete(rsForRevision.Spec.Template.Labels, appsv1.DefaultDeploymentUniqueLabelKey) - - // compute deployment annotations - annotations := map[string]string{} - for k := range annotationsToSkip { - if v, ok := deployment.Annotations[k]; ok { - annotations[k] = v - } - } - for k, v := range rsForRevision.Annotations { - if !annotationsToSkip[k] { - annotations[k] = v - } - } - - // make patch to restore - patchType, patch, err := getDeploymentPatch(&rsForRevision.Spec.Template, annotations) - if err != nil { - return "", fmt.Errorf("failed restoring revision %d: %v", toRevision, err) - } - - // Restore revision - if _, err = r.c.AppsV1().Deployments(namespace).Patch(name, patchType, patch); err != nil { - return "", fmt.Errorf("failed restoring revision %d: %v", toRevision, err) - } - return rollbackSuccess, nil -} - -// equalIgnoreHash returns true if two given podTemplateSpec are equal, ignoring the diff in value of Labels[pod-template-hash] -// We ignore pod-template-hash because: -// 1. The hash result would be different upon podTemplateSpec API changes -// (e.g. the addition of a new field will cause the hash code to change) -// 2. The deployment template won't have hash labels -func equalIgnoreHash(template1, template2 *corev1.PodTemplateSpec) bool { - t1Copy := template1.DeepCopy() - t2Copy := template2.DeepCopy() - // Remove hash labels from template.Labels before comparing - delete(t1Copy.Labels, appsv1.DefaultDeploymentUniqueLabelKey) - delete(t2Copy.Labels, appsv1.DefaultDeploymentUniqueLabelKey) - return apiequality.Semantic.DeepEqual(t1Copy, t2Copy) -} - -// annotationsToSkip lists the annotations that should be preserved from the deployment and not -// copied from the replicaset when rolling a deployment back -var annotationsToSkip = map[string]bool{ - corev1.LastAppliedConfigAnnotation: true, - deploymentutil.RevisionAnnotation: true, - deploymentutil.RevisionHistoryAnnotation: true, - deploymentutil.DesiredReplicasAnnotation: true, - deploymentutil.MaxReplicasAnnotation: true, - appsv1.DeprecatedRollbackTo: true, -} - -// getPatch returns a patch that can be applied to restore a Deployment to a -// previous version. If the returned error is nil the patch is valid. -func getDeploymentPatch(podTemplate *corev1.PodTemplateSpec, annotations map[string]string) (types.PatchType, []byte, error) { - // Create a patch of the Deployment that replaces spec.template - patch, err := json.Marshal([]interface{}{ - map[string]interface{}{ - "op": "replace", - "path": "/spec/template", - "value": podTemplate, - }, - map[string]interface{}{ - "op": "replace", - "path": "/metadata/annotations", - "value": annotations, - }, - }) - return types.JSONPatchType, patch, err -} - -func deploymentRevision(deployment *appsv1.Deployment, c kubernetes.Interface, toRevision int64) (revision *appsv1.ReplicaSet, err error) { - - _, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(deployment, c.AppsV1()) - if err != nil { - return nil, fmt.Errorf("failed to retrieve replica sets from deployment %s: %v", deployment.Name, err) - } - allRSs := allOldRSs - if newRS != nil { - allRSs = append(allRSs, newRS) - } - - var ( - latestReplicaSet *appsv1.ReplicaSet - latestRevision = int64(-1) - previousReplicaSet *appsv1.ReplicaSet - previousRevision = int64(-1) - ) - for _, rs := range allRSs { - if v, err := deploymentutil.Revision(rs); err == nil { - if toRevision == 0 { - if latestRevision < v { - // newest one we've seen so far - previousRevision = latestRevision - previousReplicaSet = latestReplicaSet - latestRevision = v - latestReplicaSet = rs - } else if previousRevision < v { - // second newest one we've seen so far - previousRevision = v - previousReplicaSet = rs - } - } else if toRevision == v { - return rs, nil - } - } - } - - if toRevision > 0 { - return nil, revisionNotFoundErr(toRevision) - } - - if previousReplicaSet == nil { - return nil, fmt.Errorf("no rollout history found for deployment %q", deployment.Name) - } - return previousReplicaSet, nil -} - -type DaemonSetRollbacker struct { - c kubernetes.Interface -} - -func (r *DaemonSetRollbacker) Rollback(obj runtime.Object, updatedAnnotations map[string]string, toRevision int64, dryRun bool) (string, error) { - if toRevision < 0 { - return "", revisionNotFoundErr(toRevision) - } - accessor, err := meta.Accessor(obj) - if err != nil { - return "", fmt.Errorf("failed to create accessor for kind %v: %s", obj.GetObjectKind(), err.Error()) - } - ds, history, err := daemonSetHistory(r.c.AppsV1(), accessor.GetNamespace(), accessor.GetName()) - if err != nil { - return "", err - } - if toRevision == 0 && len(history) <= 1 { - return "", fmt.Errorf("no last revision to roll back to") - } - - toHistory := findHistory(toRevision, history) - if toHistory == nil { - return "", revisionNotFoundErr(toRevision) - } - - if dryRun { - appliedDS, err := applyDaemonSetHistory(ds, toHistory) - if err != nil { - return "", err - } - return printPodTemplate(&appliedDS.Spec.Template) - } - - // Skip if the revision already matches current DaemonSet - done, err := daemonSetMatch(ds, toHistory) - if err != nil { - return "", err - } - if done { - return fmt.Sprintf("%s (current template already matches revision %d)", rollbackSkipped, toRevision), nil - } - - // Restore revision - if _, err = r.c.AppsV1().DaemonSets(accessor.GetNamespace()).Patch(accessor.GetName(), types.StrategicMergePatchType, toHistory.Data.Raw); err != nil { - return "", fmt.Errorf("failed restoring revision %d: %v", toRevision, err) - } - - return rollbackSuccess, nil -} - -// daemonMatch check if the given DaemonSet's template matches the template stored in the given history. -func daemonSetMatch(ds *appsv1.DaemonSet, history *appsv1.ControllerRevision) (bool, error) { - patch, err := getDaemonSetPatch(ds) - if err != nil { - return false, err - } - return bytes.Equal(patch, history.Data.Raw), nil -} - -// getPatch returns a strategic merge patch that can be applied to restore a Daemonset to a -// previous version. If the returned error is nil the patch is valid. The current state that we save is just the -// PodSpecTemplate. We can modify this later to encompass more state (or less) and remain compatible with previously -// recorded patches. -func getDaemonSetPatch(ds *appsv1.DaemonSet) ([]byte, error) { - dsBytes, err := json.Marshal(ds) - if err != nil { - return nil, err - } - var raw map[string]interface{} - err = json.Unmarshal(dsBytes, &raw) - if err != nil { - return nil, err - } - objCopy := make(map[string]interface{}) - specCopy := make(map[string]interface{}) - - // Create a patch of the DaemonSet that replaces spec.template - spec := raw["spec"].(map[string]interface{}) - template := spec["template"].(map[string]interface{}) - specCopy["template"] = template - template["$patch"] = "replace" - objCopy["spec"] = specCopy - patch, err := json.Marshal(objCopy) - return patch, err -} - -type StatefulSetRollbacker struct { - c kubernetes.Interface -} - -// toRevision is a non-negative integer, with 0 being reserved to indicate rolling back to previous configuration -func (r *StatefulSetRollbacker) Rollback(obj runtime.Object, updatedAnnotations map[string]string, toRevision int64, dryRun bool) (string, error) { - if toRevision < 0 { - return "", revisionNotFoundErr(toRevision) - } - accessor, err := meta.Accessor(obj) - if err != nil { - return "", fmt.Errorf("failed to create accessor for kind %v: %s", obj.GetObjectKind(), err.Error()) - } - sts, history, err := statefulSetHistory(r.c.AppsV1(), accessor.GetNamespace(), accessor.GetName()) - if err != nil { - return "", err - } - if toRevision == 0 && len(history) <= 1 { - return "", fmt.Errorf("no last revision to roll back to") - } - - toHistory := findHistory(toRevision, history) - if toHistory == nil { - return "", revisionNotFoundErr(toRevision) - } - - if dryRun { - appliedSS, err := applyRevision(sts, toHistory) - if err != nil { - return "", err - } - return printPodTemplate(&appliedSS.Spec.Template) - } - - // Skip if the revision already matches current StatefulSet - done, err := statefulsetMatch(sts, toHistory) - if err != nil { - return "", err - } - if done { - return fmt.Sprintf("%s (current template already matches revision %d)", rollbackSkipped, toRevision), nil - } - - // Restore revision - if _, err = r.c.AppsV1().StatefulSets(sts.Namespace).Patch(sts.Name, types.StrategicMergePatchType, toHistory.Data.Raw); err != nil { - return "", fmt.Errorf("failed restoring revision %d: %v", toRevision, err) - } - - return rollbackSuccess, nil -} - -var appsCodec = scheme.Codecs.LegacyCodec(appsv1.SchemeGroupVersion) - -// applyRevision returns a new StatefulSet constructed by restoring the state in revision to set. If the returned error -// is nil, the returned StatefulSet is valid. -func applyRevision(set *appsv1.StatefulSet, revision *appsv1.ControllerRevision) (*appsv1.StatefulSet, error) { - clone := set.DeepCopy() - patched, err := strategicpatch.StrategicMergePatch([]byte(runtime.EncodeOrDie(appsCodec, clone)), revision.Data.Raw, clone) - if err != nil { - return nil, err - } - err = json.Unmarshal(patched, clone) - if err != nil { - return nil, err - } - return clone, nil -} - -// statefulsetMatch check if the given StatefulSet's template matches the template stored in the given history. -func statefulsetMatch(ss *appsv1.StatefulSet, history *appsv1.ControllerRevision) (bool, error) { - patch, err := getStatefulSetPatch(ss) - if err != nil { - return false, err - } - return bytes.Equal(patch, history.Data.Raw), nil -} - -// getStatefulSetPatch returns a strategic merge patch that can be applied to restore a StatefulSet to a -// previous version. If the returned error is nil the patch is valid. The current state that we save is just the -// PodSpecTemplate. We can modify this later to encompass more state (or less) and remain compatible with previously -// recorded patches. -func getStatefulSetPatch(set *appsv1.StatefulSet) ([]byte, error) { - str, err := runtime.Encode(appsCodec, set) - if err != nil { - return nil, err - } - var raw map[string]interface{} - if err := json.Unmarshal([]byte(str), &raw); err != nil { - return nil, err - } - objCopy := make(map[string]interface{}) - specCopy := make(map[string]interface{}) - spec := raw["spec"].(map[string]interface{}) - template := spec["template"].(map[string]interface{}) - specCopy["template"] = template - template["$patch"] = "replace" - objCopy["spec"] = specCopy - patch, err := json.Marshal(objCopy) - return patch, err -} - -// findHistory returns a controllerrevision of a specific revision from the given controllerrevisions. -// It returns nil if no such controllerrevision exists. -// If toRevision is 0, the last previously used history is returned. -func findHistory(toRevision int64, allHistory []*appsv1.ControllerRevision) *appsv1.ControllerRevision { - if toRevision == 0 && len(allHistory) <= 1 { - return nil - } - - // Find the history to rollback to - var toHistory *appsv1.ControllerRevision - if toRevision == 0 { - // If toRevision == 0, find the latest revision (2nd max) - sort.Sort(historiesByRevision(allHistory)) - toHistory = allHistory[len(allHistory)-2] - } else { - for _, h := range allHistory { - if h.Revision == toRevision { - // If toRevision != 0, find the history with matching revision - return h - } - } - } - - return toHistory -} - -// printPodTemplate converts a given pod template into a human-readable string. -func printPodTemplate(specTemplate *corev1.PodTemplateSpec) (string, error) { - podSpec, err := printTemplate(specTemplate) - if err != nil { - return "", err - } - return fmt.Sprintf("will roll back to %s", podSpec), nil -} - -func revisionNotFoundErr(r int64) error { - return fmt.Errorf("unable to find specified revision %v in history", r) -} - -// TODO: copied from daemon controller, should extract to a library -type historiesByRevision []*appsv1.ControllerRevision - -func (h historiesByRevision) Len() int { return len(h) } -func (h historiesByRevision) Swap(i, j int) { h[i], h[j] = h[j], h[i] } -func (h historiesByRevision) Less(i, j int) bool { - return h[i].Revision < h[j].Revision -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/rolling_updater.go b/vendor/k8s.io/kubernetes/pkg/kubectl/rolling_updater.go deleted file mode 100644 index 54f94394db897..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/rolling_updater.go +++ /dev/null @@ -1,845 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - "io" - "strconv" - "strings" - "time" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/apimachinery/pkg/util/wait" - corev1client "k8s.io/client-go/kubernetes/typed/core/v1" - scaleclient "k8s.io/client-go/scale" - "k8s.io/client-go/util/retry" - "k8s.io/kubernetes/pkg/kubectl/util" - deploymentutil "k8s.io/kubernetes/pkg/kubectl/util/deployment" - "k8s.io/kubernetes/pkg/kubectl/util/podutils" - "k8s.io/utils/integer" - utilpointer "k8s.io/utils/pointer" -) - -func valOrZero(val *int32) int32 { - if val == nil { - return int32(0) - } - return *val -} - -const ( - kubectlAnnotationPrefix = "kubectl.kubernetes.io/" - sourceIDAnnotation = kubectlAnnotationPrefix + "update-source-id" - desiredReplicasAnnotation = kubectlAnnotationPrefix + "desired-replicas" - originalReplicasAnnotation = kubectlAnnotationPrefix + "original-replicas" - nextControllerAnnotation = kubectlAnnotationPrefix + "next-controller-id" -) - -// RollingUpdaterConfig is the configuration for a rolling deployment process. -type RollingUpdaterConfig struct { - // Out is a writer for progress output. - Out io.Writer - // OldRC is an existing controller to be replaced. - OldRc *corev1.ReplicationController - // NewRc is a controller that will take ownership of updated pods (will be - // created if needed). - NewRc *corev1.ReplicationController - // UpdatePeriod is the time to wait between individual pod updates. - UpdatePeriod time.Duration - // Interval is the time to wait between polling controller status after - // update. - Interval time.Duration - // Timeout is the time to wait for controller updates before giving up. - Timeout time.Duration - // MinReadySeconds is the number of seconds to wait after the pods are ready - MinReadySeconds int32 - // CleanupPolicy defines the cleanup action to take after the deployment is - // complete. - CleanupPolicy RollingUpdaterCleanupPolicy - // MaxUnavailable is the maximum number of pods that can be unavailable during the update. - // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). - // Absolute number is calculated from percentage by rounding up. - // This can not be 0 if MaxSurge is 0. - // By default, a fixed value of 1 is used. - // Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods - // immediately when the rolling update starts. Once new pods are ready, old RC - // can be scaled down further, followed by scaling up the new RC, ensuring - // that the total number of pods available at all times during the update is at - // least 70% of desired pods. - MaxUnavailable intstr.IntOrString - // MaxSurge is the maximum number of pods that can be scheduled above the desired number of pods. - // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). - // This can not be 0 if MaxUnavailable is 0. - // Absolute number is calculated from percentage by rounding up. - // By default, a value of 1 is used. - // Example: when this is set to 30%, the new RC can be scaled up immediately - // when the rolling update starts, such that the total number of old and new pods do not exceed - // 130% of desired pods. Once old pods have been killed, new RC can be scaled up - // further, ensuring that total number of pods running at any time during - // the update is at most 130% of desired pods. - MaxSurge intstr.IntOrString - // OnProgress is invoked if set during each scale cycle, to allow the caller to perform additional logic or - // abort the scale. If an error is returned the cleanup method will not be invoked. The percentage value - // is a synthetic "progress" calculation that represents the approximate percentage completion. - OnProgress func(oldRc, newRc *corev1.ReplicationController, percentage int) error -} - -// RollingUpdaterCleanupPolicy is a cleanup action to take after the -// deployment is complete. -type RollingUpdaterCleanupPolicy string - -const ( - // DeleteRollingUpdateCleanupPolicy means delete the old controller. - DeleteRollingUpdateCleanupPolicy RollingUpdaterCleanupPolicy = "Delete" - // PreserveRollingUpdateCleanupPolicy means keep the old controller. - PreserveRollingUpdateCleanupPolicy RollingUpdaterCleanupPolicy = "Preserve" - // RenameRollingUpdateCleanupPolicy means delete the old controller, and rename - // the new controller to the name of the old controller. - RenameRollingUpdateCleanupPolicy RollingUpdaterCleanupPolicy = "Rename" -) - -// RollingUpdater provides methods for updating replicated pods in a predictable, -// fault-tolerant way. -type RollingUpdater struct { - rcClient corev1client.ReplicationControllersGetter - podClient corev1client.PodsGetter - scaleClient scaleclient.ScalesGetter - // Namespace for resources - ns string - // scaleAndWait scales a controller and returns its updated state. - scaleAndWait func(rc *corev1.ReplicationController, retry *RetryParams, wait *RetryParams) (*corev1.ReplicationController, error) - //getOrCreateTargetController gets and validates an existing controller or - //makes a new one. - getOrCreateTargetController func(controller *corev1.ReplicationController, sourceID string) (*corev1.ReplicationController, bool, error) - // cleanup performs post deployment cleanup tasks for newRc and oldRc. - cleanup func(oldRc, newRc *corev1.ReplicationController, config *RollingUpdaterConfig) error - // getReadyPods returns the amount of old and new ready pods. - getReadyPods func(oldRc, newRc *corev1.ReplicationController, minReadySeconds int32) (int32, int32, error) - // nowFn returns the current time used to calculate the minReadySeconds - nowFn func() metav1.Time -} - -// NewRollingUpdater creates a RollingUpdater from a client. -func NewRollingUpdater(namespace string, rcClient corev1client.ReplicationControllersGetter, podClient corev1client.PodsGetter, sc scaleclient.ScalesGetter) *RollingUpdater { - updater := &RollingUpdater{ - rcClient: rcClient, - podClient: podClient, - scaleClient: sc, - ns: namespace, - } - // Inject real implementations. - updater.scaleAndWait = updater.scaleAndWaitWithScaler - updater.getOrCreateTargetController = updater.getOrCreateTargetControllerWithClient - updater.getReadyPods = updater.readyPods - updater.cleanup = updater.cleanupWithClients - updater.nowFn = func() metav1.Time { return metav1.Now() } - return updater -} - -// Update all pods for a ReplicationController (oldRc) by creating a new -// controller (newRc) with 0 replicas, and synchronously scaling oldRc and -// newRc until oldRc has 0 replicas and newRc has the original # of desired -// replicas. Cleanup occurs based on a RollingUpdaterCleanupPolicy. -// -// Each interval, the updater will attempt to make progress however it can -// without violating any availability constraints defined by the config. This -// means the amount scaled up or down each interval will vary based on the -// timeliness of readiness and the updater will always try to make progress, -// even slowly. -// -// If an update from newRc to oldRc is already in progress, we attempt to -// drive it to completion. If an error occurs at any step of the update, the -// error will be returned. -// -// A scaling event (either up or down) is considered progress; if no progress -// is made within the config.Timeout, an error is returned. -// -// TODO: make this handle performing a rollback of a partially completed -// rollout. -func (r *RollingUpdater) Update(config *RollingUpdaterConfig) error { - out := config.Out - oldRc := config.OldRc - scaleRetryParams := NewRetryParams(config.Interval, config.Timeout) - - // Find an existing controller (for continuing an interrupted update) or - // create a new one if necessary. - sourceID := fmt.Sprintf("%s:%s", oldRc.Name, oldRc.UID) - newRc, existed, err := r.getOrCreateTargetController(config.NewRc, sourceID) - if err != nil { - return err - } - if existed { - fmt.Fprintf(out, "Continuing update with existing controller %s.\n", newRc.Name) - } else { - fmt.Fprintf(out, "Created %s\n", newRc.Name) - } - // Extract the desired replica count from the controller. - desiredAnnotation, err := strconv.Atoi(newRc.Annotations[desiredReplicasAnnotation]) - if err != nil { - return fmt.Errorf("Unable to parse annotation for %s: %s=%s", - newRc.Name, desiredReplicasAnnotation, newRc.Annotations[desiredReplicasAnnotation]) - } - desired := int32(desiredAnnotation) - // Extract the original replica count from the old controller, adding the - // annotation if it doesn't yet exist. - _, hasOriginalAnnotation := oldRc.Annotations[originalReplicasAnnotation] - if !hasOriginalAnnotation { - existing, err := r.rcClient.ReplicationControllers(oldRc.Namespace).Get(oldRc.Name, metav1.GetOptions{}) - if err != nil { - return err - } - originReplicas := strconv.Itoa(int(valOrZero(existing.Spec.Replicas))) - applyUpdate := func(rc *corev1.ReplicationController) { - if rc.Annotations == nil { - rc.Annotations = map[string]string{} - } - rc.Annotations[originalReplicasAnnotation] = originReplicas - } - if oldRc, err = updateRcWithRetries(r.rcClient, existing.Namespace, existing, applyUpdate); err != nil { - return err - } - } - // maxSurge is the maximum scaling increment and maxUnavailable are the maximum pods - // that can be unavailable during a rollout. - maxSurge, maxUnavailable, err := deploymentutil.ResolveFenceposts(&config.MaxSurge, &config.MaxUnavailable, desired) - if err != nil { - return err - } - // Validate maximums. - if desired > 0 && maxUnavailable == 0 && maxSurge == 0 { - return fmt.Errorf("one of maxSurge or maxUnavailable must be specified") - } - // The minimum pods which must remain available throughout the update - // calculated for internal convenience. - minAvailable := int32(integer.IntMax(0, int(desired-maxUnavailable))) - // If the desired new scale is 0, then the max unavailable is necessarily - // the effective scale of the old RC regardless of the configuration - // (equivalent to 100% maxUnavailable). - if desired == 0 { - maxUnavailable = valOrZero(oldRc.Spec.Replicas) - minAvailable = 0 - } - - fmt.Fprintf(out, "Scaling up %s from %d to %d, scaling down %s from %d to 0 (keep %d pods available, don't exceed %d pods)\n", - newRc.Name, valOrZero(newRc.Spec.Replicas), desired, oldRc.Name, valOrZero(oldRc.Spec.Replicas), minAvailable, desired+maxSurge) - - // give a caller incremental notification and allow them to exit early - goal := desired - valOrZero(newRc.Spec.Replicas) - if goal < 0 { - goal = -goal - } - progress := func(complete bool) error { - if config.OnProgress == nil { - return nil - } - progress := desired - valOrZero(newRc.Spec.Replicas) - if progress < 0 { - progress = -progress - } - percentage := 100 - if !complete && goal > 0 { - percentage = int((goal - progress) * 100 / goal) - } - return config.OnProgress(oldRc, newRc, percentage) - } - - // Scale newRc and oldRc until newRc has the desired number of replicas and - // oldRc has 0 replicas. - progressDeadline := time.Now().UnixNano() + config.Timeout.Nanoseconds() - for valOrZero(newRc.Spec.Replicas) != desired || valOrZero(oldRc.Spec.Replicas) != 0 { - // Store the existing replica counts for progress timeout tracking. - newReplicas := valOrZero(newRc.Spec.Replicas) - oldReplicas := valOrZero(oldRc.Spec.Replicas) - - // Scale up as much as possible. - scaledRc, err := r.scaleUp(newRc, oldRc, desired, maxSurge, maxUnavailable, scaleRetryParams, config) - if err != nil { - return err - } - newRc = scaledRc - - // notify the caller if necessary - if err := progress(false); err != nil { - return err - } - - // Wait between scaling operations for things to settle. - time.Sleep(config.UpdatePeriod) - - // Scale down as much as possible. - scaledRc, err = r.scaleDown(newRc, oldRc, desired, minAvailable, maxUnavailable, maxSurge, config) - if err != nil { - return err - } - oldRc = scaledRc - - // notify the caller if necessary - if err := progress(false); err != nil { - return err - } - - // If we are making progress, continue to advance the progress deadline. - // Otherwise, time out with an error. - progressMade := (valOrZero(newRc.Spec.Replicas) != newReplicas) || (valOrZero(oldRc.Spec.Replicas) != oldReplicas) - if progressMade { - progressDeadline = time.Now().UnixNano() + config.Timeout.Nanoseconds() - } else if time.Now().UnixNano() > progressDeadline { - return fmt.Errorf("timed out waiting for any update progress to be made") - } - } - - // notify the caller if necessary - if err := progress(true); err != nil { - return err - } - - // Housekeeping and cleanup policy execution. - return r.cleanup(oldRc, newRc, config) -} - -// scaleUp scales up newRc to desired by whatever increment is possible given -// the configured surge threshold. scaleUp will safely no-op as necessary when -// it detects redundancy or other relevant conditions. -func (r *RollingUpdater) scaleUp(newRc, oldRc *corev1.ReplicationController, desired, maxSurge, maxUnavailable int32, scaleRetryParams *RetryParams, config *RollingUpdaterConfig) (*corev1.ReplicationController, error) { - // If we're already at the desired, do nothing. - if valOrZero(newRc.Spec.Replicas) == desired { - return newRc, nil - } - - // Scale up as far as we can based on the surge limit. - increment := (desired + maxSurge) - (valOrZero(oldRc.Spec.Replicas) + valOrZero(newRc.Spec.Replicas)) - // If the old is already scaled down, go ahead and scale all the way up. - if valOrZero(oldRc.Spec.Replicas) == 0 { - increment = desired - valOrZero(newRc.Spec.Replicas) - } - // We can't scale up without violating the surge limit, so do nothing. - if increment <= 0 { - return newRc, nil - } - // Increase the replica count, and deal with fenceposts. - nextVal := valOrZero(newRc.Spec.Replicas) + increment - newRc.Spec.Replicas = &nextVal - if valOrZero(newRc.Spec.Replicas) > desired { - newRc.Spec.Replicas = &desired - } - // Perform the scale-up. - fmt.Fprintf(config.Out, "Scaling %s up to %d\n", newRc.Name, valOrZero(newRc.Spec.Replicas)) - scaledRc, err := r.scaleAndWait(newRc, scaleRetryParams, scaleRetryParams) - if err != nil { - return nil, err - } - return scaledRc, nil -} - -// scaleDown scales down oldRc to 0 at whatever decrement possible given the -// thresholds defined on the config. scaleDown will safely no-op as necessary -// when it detects redundancy or other relevant conditions. -func (r *RollingUpdater) scaleDown(newRc, oldRc *corev1.ReplicationController, desired, minAvailable, maxUnavailable, maxSurge int32, config *RollingUpdaterConfig) (*corev1.ReplicationController, error) { - // Already scaled down; do nothing. - if valOrZero(oldRc.Spec.Replicas) == 0 { - return oldRc, nil - } - // Get ready pods. We shouldn't block, otherwise in case both old and new - // pods are unavailable then the rolling update process blocks. - // Timeout-wise we are already covered by the progress check. - _, newAvailable, err := r.getReadyPods(oldRc, newRc, config.MinReadySeconds) - if err != nil { - return nil, err - } - // The old controller is considered as part of the total because we want to - // maintain minimum availability even with a volatile old controller. - // Scale down as much as possible while maintaining minimum availability - allPods := valOrZero(oldRc.Spec.Replicas) + valOrZero(newRc.Spec.Replicas) - newUnavailable := valOrZero(newRc.Spec.Replicas) - newAvailable - decrement := allPods - minAvailable - newUnavailable - // The decrement normally shouldn't drop below 0 because the available count - // always starts below the old replica count, but the old replica count can - // decrement due to externalities like pods death in the replica set. This - // will be considered a transient condition; do nothing and try again later - // with new readiness values. - // - // If the most we can scale is 0, it means we can't scale down without - // violating the minimum. Do nothing and try again later when conditions may - // have changed. - if decrement <= 0 { - return oldRc, nil - } - // Reduce the replica count, and deal with fenceposts. - nextOldVal := valOrZero(oldRc.Spec.Replicas) - decrement - oldRc.Spec.Replicas = &nextOldVal - if valOrZero(oldRc.Spec.Replicas) < 0 { - oldRc.Spec.Replicas = utilpointer.Int32Ptr(0) - } - // If the new is already fully scaled and available up to the desired size, go - // ahead and scale old all the way down. - if valOrZero(newRc.Spec.Replicas) == desired && newAvailable == desired { - oldRc.Spec.Replicas = utilpointer.Int32Ptr(0) - } - // Perform the scale-down. - fmt.Fprintf(config.Out, "Scaling %s down to %d\n", oldRc.Name, valOrZero(oldRc.Spec.Replicas)) - retryWait := &RetryParams{config.Interval, config.Timeout} - scaledRc, err := r.scaleAndWait(oldRc, retryWait, retryWait) - if err != nil { - return nil, err - } - return scaledRc, nil -} - -// scalerScaleAndWait scales a controller using a Scaler and a real client. -func (r *RollingUpdater) scaleAndWaitWithScaler(rc *corev1.ReplicationController, retry *RetryParams, wait *RetryParams) (*corev1.ReplicationController, error) { - scaler := NewScaler(r.scaleClient) - if err := scaler.Scale(rc.Namespace, rc.Name, uint(valOrZero(rc.Spec.Replicas)), &ScalePrecondition{-1, ""}, retry, wait, schema.GroupResource{Resource: "replicationcontrollers"}); err != nil { - return nil, err - } - return r.rcClient.ReplicationControllers(rc.Namespace).Get(rc.Name, metav1.GetOptions{}) -} - -// readyPods returns the old and new ready counts for their pods. -// If a pod is observed as being ready, it's considered ready even -// if it later becomes notReady. -func (r *RollingUpdater) readyPods(oldRc, newRc *corev1.ReplicationController, minReadySeconds int32) (int32, int32, error) { - controllers := []*corev1.ReplicationController{oldRc, newRc} - oldReady := int32(0) - newReady := int32(0) - if r.nowFn == nil { - r.nowFn = func() metav1.Time { return metav1.Now() } - } - - for i := range controllers { - controller := controllers[i] - selector := labels.Set(controller.Spec.Selector).AsSelector() - options := metav1.ListOptions{LabelSelector: selector.String()} - pods, err := r.podClient.Pods(controller.Namespace).List(options) - if err != nil { - return 0, 0, err - } - for _, v1Pod := range pods.Items { - // Do not count deleted pods as ready - if v1Pod.DeletionTimestamp != nil { - continue - } - if !podutils.IsPodAvailable(&v1Pod, minReadySeconds, r.nowFn()) { - continue - } - switch controller.Name { - case oldRc.Name: - oldReady++ - case newRc.Name: - newReady++ - } - } - } - return oldReady, newReady, nil -} - -// getOrCreateTargetControllerWithClient looks for an existing controller with -// sourceID. If found, the existing controller is returned with true -// indicating that the controller already exists. If the controller isn't -// found, a new one is created and returned along with false indicating the -// controller was created. -// -// Existing controllers are validated to ensure their sourceIDAnnotation -// matches sourceID; if there's a mismatch, an error is returned. -func (r *RollingUpdater) getOrCreateTargetControllerWithClient(controller *corev1.ReplicationController, sourceID string) (*corev1.ReplicationController, bool, error) { - existingRc, err := r.existingController(controller) - if err != nil { - if !errors.IsNotFound(err) { - // There was an error trying to find the controller; don't assume we - // should create it. - return nil, false, err - } - if valOrZero(controller.Spec.Replicas) <= 0 { - return nil, false, fmt.Errorf("Invalid controller spec for %s; required: > 0 replicas, actual: %d", controller.Name, valOrZero(controller.Spec.Replicas)) - } - // The controller wasn't found, so create it. - if controller.Annotations == nil { - controller.Annotations = map[string]string{} - } - controller.Annotations[desiredReplicasAnnotation] = fmt.Sprintf("%d", valOrZero(controller.Spec.Replicas)) - controller.Annotations[sourceIDAnnotation] = sourceID - controller.Spec.Replicas = utilpointer.Int32Ptr(0) - newRc, err := r.rcClient.ReplicationControllers(r.ns).Create(controller) - return newRc, false, err - } - // Validate and use the existing controller. - annotations := existingRc.Annotations - source := annotations[sourceIDAnnotation] - _, ok := annotations[desiredReplicasAnnotation] - if source != sourceID || !ok { - return nil, false, fmt.Errorf("Missing/unexpected annotations for controller %s, expected %s : %s", controller.Name, sourceID, annotations) - } - return existingRc, true, nil -} - -// existingController verifies if the controller already exists -func (r *RollingUpdater) existingController(controller *corev1.ReplicationController) (*corev1.ReplicationController, error) { - // without rc name but generate name, there's no existing rc - if len(controller.Name) == 0 && len(controller.GenerateName) > 0 { - return nil, errors.NewNotFound(corev1.Resource("replicationcontrollers"), controller.Name) - } - // controller name is required to get rc back - return r.rcClient.ReplicationControllers(controller.Namespace).Get(controller.Name, metav1.GetOptions{}) -} - -// cleanupWithClients performs cleanup tasks after the rolling update. Update -// process related annotations are removed from oldRc and newRc. The -// CleanupPolicy on config is executed. -func (r *RollingUpdater) cleanupWithClients(oldRc, newRc *corev1.ReplicationController, config *RollingUpdaterConfig) error { - // Clean up annotations - var err error - newRc, err = r.rcClient.ReplicationControllers(r.ns).Get(newRc.Name, metav1.GetOptions{}) - if err != nil { - return err - } - applyUpdate := func(rc *corev1.ReplicationController) { - delete(rc.Annotations, sourceIDAnnotation) - delete(rc.Annotations, desiredReplicasAnnotation) - } - if newRc, err = updateRcWithRetries(r.rcClient, r.ns, newRc, applyUpdate); err != nil { - return err - } - - if err = wait.Poll(config.Interval, config.Timeout, ControllerHasDesiredReplicas(r.rcClient, newRc)); err != nil { - return err - } - newRc, err = r.rcClient.ReplicationControllers(r.ns).Get(newRc.Name, metav1.GetOptions{}) - if err != nil { - return err - } - - switch config.CleanupPolicy { - case DeleteRollingUpdateCleanupPolicy: - // delete old rc - fmt.Fprintf(config.Out, "Update succeeded. Deleting %s\n", oldRc.Name) - return r.rcClient.ReplicationControllers(r.ns).Delete(oldRc.Name, nil) - case RenameRollingUpdateCleanupPolicy: - // delete old rc - fmt.Fprintf(config.Out, "Update succeeded. Deleting old controller: %s\n", oldRc.Name) - if err := r.rcClient.ReplicationControllers(r.ns).Delete(oldRc.Name, nil); err != nil { - return err - } - fmt.Fprintf(config.Out, "Renaming %s to %s\n", newRc.Name, oldRc.Name) - return Rename(r.rcClient, newRc, oldRc.Name) - case PreserveRollingUpdateCleanupPolicy: - return nil - default: - return nil - } -} - -func Rename(c corev1client.ReplicationControllersGetter, rc *corev1.ReplicationController, newName string) error { - oldName := rc.Name - rc.Name = newName - rc.ResourceVersion = "" - // First delete the oldName RC and orphan its pods. - policy := metav1.DeletePropagationOrphan - err := c.ReplicationControllers(rc.Namespace).Delete(oldName, &metav1.DeleteOptions{PropagationPolicy: &policy}) - if err != nil && !errors.IsNotFound(err) { - return err - } - err = wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) { - _, err := c.ReplicationControllers(rc.Namespace).Get(oldName, metav1.GetOptions{}) - if err == nil { - return false, nil - } else if errors.IsNotFound(err) { - return true, nil - } else { - return false, err - } - }) - if err != nil { - return err - } - // Then create the same RC with the new name. - _, err = c.ReplicationControllers(rc.Namespace).Create(rc) - return err -} - -func LoadExistingNextReplicationController(c corev1client.ReplicationControllersGetter, namespace, newName string) (*corev1.ReplicationController, error) { - if len(newName) == 0 { - return nil, nil - } - newRc, err := c.ReplicationControllers(namespace).Get(newName, metav1.GetOptions{}) - if err != nil && errors.IsNotFound(err) { - return nil, nil - } - return newRc, err -} - -type NewControllerConfig struct { - Namespace string - OldName, NewName string - Image string - Container string - DeploymentKey string - PullPolicy corev1.PullPolicy -} - -func CreateNewControllerFromCurrentController(rcClient corev1client.ReplicationControllersGetter, codec runtime.Codec, cfg *NewControllerConfig) (*corev1.ReplicationController, error) { - containerIndex := 0 - // load the old RC into the "new" RC - newRc, err := rcClient.ReplicationControllers(cfg.Namespace).Get(cfg.OldName, metav1.GetOptions{}) - if err != nil { - return nil, err - } - - if len(cfg.Container) != 0 { - containerFound := false - - for i, c := range newRc.Spec.Template.Spec.Containers { - if c.Name == cfg.Container { - containerIndex = i - containerFound = true - break - } - } - - if !containerFound { - return nil, fmt.Errorf("container %s not found in pod", cfg.Container) - } - } - - if len(newRc.Spec.Template.Spec.Containers) > 1 && len(cfg.Container) == 0 { - return nil, fmt.Errorf("must specify container to update when updating a multi-container pod") - } - - if len(newRc.Spec.Template.Spec.Containers) == 0 { - return nil, fmt.Errorf("pod has no containers! (%v)", newRc) - } - newRc.Spec.Template.Spec.Containers[containerIndex].Image = cfg.Image - if len(cfg.PullPolicy) != 0 { - newRc.Spec.Template.Spec.Containers[containerIndex].ImagePullPolicy = cfg.PullPolicy - } - - newHash, err := util.HashObject(newRc, codec) - if err != nil { - return nil, err - } - - if len(cfg.NewName) == 0 { - cfg.NewName = fmt.Sprintf("%s-%s", newRc.Name, newHash) - } - newRc.Name = cfg.NewName - - newRc.Spec.Selector[cfg.DeploymentKey] = newHash - newRc.Spec.Template.Labels[cfg.DeploymentKey] = newHash - // Clear resource version after hashing so that identical updates get different hashes. - newRc.ResourceVersion = "" - return newRc, nil -} - -func AbortRollingUpdate(c *RollingUpdaterConfig) error { - // Swap the controllers - tmp := c.OldRc - c.OldRc = c.NewRc - c.NewRc = tmp - - if c.NewRc.Annotations == nil { - c.NewRc.Annotations = map[string]string{} - } - c.NewRc.Annotations[sourceIDAnnotation] = fmt.Sprintf("%s:%s", c.OldRc.Name, c.OldRc.UID) - - // Use the original value since the replica count change from old to new - // could be asymmetric. If we don't know the original count, we can't safely - // roll back to a known good size. - originalSize, foundOriginal := tmp.Annotations[originalReplicasAnnotation] - if !foundOriginal { - return fmt.Errorf("couldn't find original replica count of %q", tmp.Name) - } - fmt.Fprintf(c.Out, "Setting %q replicas to %s\n", c.NewRc.Name, originalSize) - c.NewRc.Annotations[desiredReplicasAnnotation] = originalSize - c.CleanupPolicy = DeleteRollingUpdateCleanupPolicy - return nil -} - -func GetNextControllerAnnotation(rc *corev1.ReplicationController) (string, bool) { - res, found := rc.Annotations[nextControllerAnnotation] - return res, found -} - -func SetNextControllerAnnotation(rc *corev1.ReplicationController, name string) { - if rc.Annotations == nil { - rc.Annotations = map[string]string{} - } - rc.Annotations[nextControllerAnnotation] = name -} - -func UpdateExistingReplicationController(rcClient corev1client.ReplicationControllersGetter, podClient corev1client.PodsGetter, oldRc *corev1.ReplicationController, namespace, newName, deploymentKey, deploymentValue string, out io.Writer) (*corev1.ReplicationController, error) { - if _, found := oldRc.Spec.Selector[deploymentKey]; !found { - SetNextControllerAnnotation(oldRc, newName) - return AddDeploymentKeyToReplicationController(oldRc, rcClient, podClient, deploymentKey, deploymentValue, namespace, out) - } - - // If we didn't need to update the controller for the deployment key, we still need to write - // the "next" controller. - applyUpdate := func(rc *corev1.ReplicationController) { - SetNextControllerAnnotation(rc, newName) - } - return updateRcWithRetries(rcClient, namespace, oldRc, applyUpdate) -} - -func AddDeploymentKeyToReplicationController(oldRc *corev1.ReplicationController, rcClient corev1client.ReplicationControllersGetter, podClient corev1client.PodsGetter, deploymentKey, deploymentValue, namespace string, out io.Writer) (*corev1.ReplicationController, error) { - var err error - // First, update the template label. This ensures that any newly created pods will have the new label - applyUpdate := func(rc *corev1.ReplicationController) { - if rc.Spec.Template.Labels == nil { - rc.Spec.Template.Labels = map[string]string{} - } - rc.Spec.Template.Labels[deploymentKey] = deploymentValue - } - if oldRc, err = updateRcWithRetries(rcClient, namespace, oldRc, applyUpdate); err != nil { - return nil, err - } - - // Update all pods managed by the rc to have the new hash label, so they are correctly adopted - // TODO: extract the code from the label command and re-use it here. - selector := labels.SelectorFromSet(oldRc.Spec.Selector) - options := metav1.ListOptions{LabelSelector: selector.String()} - podList, err := podClient.Pods(namespace).List(options) - if err != nil { - return nil, err - } - for ix := range podList.Items { - pod := &podList.Items[ix] - applyUpdate := func(p *corev1.Pod) { - if p.Labels == nil { - p.Labels = map[string]string{ - deploymentKey: deploymentValue, - } - } else { - p.Labels[deploymentKey] = deploymentValue - } - } - if pod, err = updatePodWithRetries(podClient, namespace, pod, applyUpdate); err != nil { - return nil, err - } - } - - if oldRc.Spec.Selector == nil { - oldRc.Spec.Selector = map[string]string{} - } - // Copy the old selector, so that we can scrub out any orphaned pods - selectorCopy := map[string]string{} - for k, v := range oldRc.Spec.Selector { - selectorCopy[k] = v - } - applyUpdate = func(rc *corev1.ReplicationController) { - rc.Spec.Selector[deploymentKey] = deploymentValue - } - // Update the selector of the rc so it manages all the pods we updated above - if oldRc, err = updateRcWithRetries(rcClient, namespace, oldRc, applyUpdate); err != nil { - return nil, err - } - - // Clean up any orphaned pods that don't have the new label, this can happen if the rc manager - // doesn't see the update to its pod template and creates a new pod with the old labels after - // we've finished re-adopting existing pods to the rc. - selector = labels.SelectorFromSet(selectorCopy) - options = metav1.ListOptions{LabelSelector: selector.String()} - if podList, err = podClient.Pods(namespace).List(options); err != nil { - return nil, err - } - for ix := range podList.Items { - pod := &podList.Items[ix] - if value, found := pod.Labels[deploymentKey]; !found || value != deploymentValue { - if err := podClient.Pods(namespace).Delete(pod.Name, nil); err != nil { - return nil, err - } - } - } - - return oldRc, nil -} - -type updateRcFunc func(controller *corev1.ReplicationController) - -// updateRcWithRetries retries updating the given rc on conflict with the following steps: -// 1. Get latest resource -// 2. applyUpdate -// 3. Update the resource -func updateRcWithRetries(rcClient corev1client.ReplicationControllersGetter, namespace string, rc *corev1.ReplicationController, applyUpdate updateRcFunc) (*corev1.ReplicationController, error) { - // Deep copy the rc in case we failed on Get during retry loop - oldRc := rc.DeepCopy() - err := retry.RetryOnConflict(retry.DefaultBackoff, func() (e error) { - // Apply the update, then attempt to push it to the apiserver. - applyUpdate(rc) - if rc, e = rcClient.ReplicationControllers(namespace).Update(rc); e == nil { - // rc contains the latest controller post update - return - } - updateErr := e - // Update the controller with the latest resource version, if the update failed we - // can't trust rc so use oldRc.Name. - if rc, e = rcClient.ReplicationControllers(namespace).Get(oldRc.Name, metav1.GetOptions{}); e != nil { - // The Get failed: Value in rc cannot be trusted. - rc = oldRc - } - // Only return the error from update - return updateErr - }) - // If the error is non-nil the returned controller cannot be trusted, if it is nil, the returned - // controller contains the applied update. - return rc, err -} - -type updatePodFunc func(controller *corev1.Pod) - -// updatePodWithRetries retries updating the given pod on conflict with the following steps: -// 1. Get latest resource -// 2. applyUpdate -// 3. Update the resource -func updatePodWithRetries(podClient corev1client.PodsGetter, namespace string, pod *corev1.Pod, applyUpdate updatePodFunc) (*corev1.Pod, error) { - // Deep copy the pod in case we failed on Get during retry loop - oldPod := pod.DeepCopy() - err := retry.RetryOnConflict(retry.DefaultBackoff, func() (e error) { - // Apply the update, then attempt to push it to the apiserver. - applyUpdate(pod) - if pod, e = podClient.Pods(namespace).Update(pod); e == nil { - return - } - updateErr := e - if pod, e = podClient.Pods(namespace).Get(oldPod.Name, metav1.GetOptions{}); e != nil { - pod = oldPod - } - // Only return the error from update - return updateErr - }) - // If the error is non-nil the returned pod cannot be trusted, if it is nil, the returned - // controller contains the applied update. - return pod, err -} - -func FindSourceController(r corev1client.ReplicationControllersGetter, namespace, name string) (*corev1.ReplicationController, error) { - list, err := r.ReplicationControllers(namespace).List(metav1.ListOptions{}) - if err != nil { - return nil, err - } - for ix := range list.Items { - rc := &list.Items[ix] - if rc.Annotations != nil && strings.HasPrefix(rc.Annotations[sourceIDAnnotation], name) { - return rc, nil - } - } - return nil, fmt.Errorf("couldn't find a replication controller with source id == %s/%s", namespace, name) -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/rollout_status.go b/vendor/k8s.io/kubernetes/pkg/kubectl/rollout_status.go deleted file mode 100644 index 932e905175eff..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/rollout_status.go +++ /dev/null @@ -1,153 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - - appsv1 "k8s.io/api/apps/v1" - extensionsv1beta1 "k8s.io/api/extensions/v1beta1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/kubernetes/pkg/kubectl/scheme" - deploymentutil "k8s.io/kubernetes/pkg/kubectl/util/deployment" -) - -// StatusViewer provides an interface for resources that have rollout status. -type StatusViewer interface { - Status(obj runtime.Unstructured, revision int64) (string, bool, error) -} - -// StatusViewerFor returns a StatusViewer for the resource specified by kind. -func StatusViewerFor(kind schema.GroupKind) (StatusViewer, error) { - switch kind { - case extensionsv1beta1.SchemeGroupVersion.WithKind("Deployment").GroupKind(), - appsv1.SchemeGroupVersion.WithKind("Deployment").GroupKind(): - return &DeploymentStatusViewer{}, nil - case extensionsv1beta1.SchemeGroupVersion.WithKind("DaemonSet").GroupKind(), - appsv1.SchemeGroupVersion.WithKind("DaemonSet").GroupKind(): - return &DaemonSetStatusViewer{}, nil - case appsv1.SchemeGroupVersion.WithKind("StatefulSet").GroupKind(): - return &StatefulSetStatusViewer{}, nil - } - return nil, fmt.Errorf("no status viewer has been implemented for %v", kind) -} - -// DeploymentStatusViewer implements the StatusViewer interface. -type DeploymentStatusViewer struct{} - -// DaemonSetStatusViewer implements the StatusViewer interface. -type DaemonSetStatusViewer struct{} - -// StatefulSetStatusViewer implements the StatusViewer interface. -type StatefulSetStatusViewer struct{} - -// Status returns a message describing deployment status, and a bool value indicating if the status is considered done. -func (s *DeploymentStatusViewer) Status(obj runtime.Unstructured, revision int64) (string, bool, error) { - deployment := &appsv1.Deployment{} - err := scheme.Scheme.Convert(obj, deployment, nil) - if err != nil { - return "", false, fmt.Errorf("failed to convert %T to %T: %v", obj, deployment, err) - } - - if revision > 0 { - deploymentRev, err := deploymentutil.Revision(deployment) - if err != nil { - return "", false, fmt.Errorf("cannot get the revision of deployment %q: %v", deployment.Name, err) - } - if revision != deploymentRev { - return "", false, fmt.Errorf("desired revision (%d) is different from the running revision (%d)", revision, deploymentRev) - } - } - if deployment.Generation <= deployment.Status.ObservedGeneration { - cond := deploymentutil.GetDeploymentCondition(deployment.Status, appsv1.DeploymentProgressing) - if cond != nil && cond.Reason == deploymentutil.TimedOutReason { - return "", false, fmt.Errorf("deployment %q exceeded its progress deadline", deployment.Name) - } - if deployment.Spec.Replicas != nil && deployment.Status.UpdatedReplicas < *deployment.Spec.Replicas { - return fmt.Sprintf("Waiting for deployment %q rollout to finish: %d out of %d new replicas have been updated...\n", deployment.Name, deployment.Status.UpdatedReplicas, *deployment.Spec.Replicas), false, nil - } - if deployment.Status.Replicas > deployment.Status.UpdatedReplicas { - return fmt.Sprintf("Waiting for deployment %q rollout to finish: %d old replicas are pending termination...\n", deployment.Name, deployment.Status.Replicas-deployment.Status.UpdatedReplicas), false, nil - } - if deployment.Status.AvailableReplicas < deployment.Status.UpdatedReplicas { - return fmt.Sprintf("Waiting for deployment %q rollout to finish: %d of %d updated replicas are available...\n", deployment.Name, deployment.Status.AvailableReplicas, deployment.Status.UpdatedReplicas), false, nil - } - return fmt.Sprintf("deployment %q successfully rolled out\n", deployment.Name), true, nil - } - return fmt.Sprintf("Waiting for deployment spec update to be observed...\n"), false, nil -} - -// Status returns a message describing daemon set status, and a bool value indicating if the status is considered done. -func (s *DaemonSetStatusViewer) Status(obj runtime.Unstructured, revision int64) (string, bool, error) { - //ignoring revision as DaemonSets does not have history yet - - daemon := &appsv1.DaemonSet{} - err := scheme.Scheme.Convert(obj, daemon, nil) - if err != nil { - return "", false, fmt.Errorf("failed to convert %T to %T: %v", obj, daemon, err) - } - - if daemon.Spec.UpdateStrategy.Type != appsv1.RollingUpdateDaemonSetStrategyType { - return "", true, fmt.Errorf("rollout status is only available for %s strategy type", appsv1.RollingUpdateStatefulSetStrategyType) - } - if daemon.Generation <= daemon.Status.ObservedGeneration { - if daemon.Status.UpdatedNumberScheduled < daemon.Status.DesiredNumberScheduled { - return fmt.Sprintf("Waiting for daemon set %q rollout to finish: %d out of %d new pods have been updated...\n", daemon.Name, daemon.Status.UpdatedNumberScheduled, daemon.Status.DesiredNumberScheduled), false, nil - } - if daemon.Status.NumberAvailable < daemon.Status.DesiredNumberScheduled { - return fmt.Sprintf("Waiting for daemon set %q rollout to finish: %d of %d updated pods are available...\n", daemon.Name, daemon.Status.NumberAvailable, daemon.Status.DesiredNumberScheduled), false, nil - } - return fmt.Sprintf("daemon set %q successfully rolled out\n", daemon.Name), true, nil - } - return fmt.Sprintf("Waiting for daemon set spec update to be observed...\n"), false, nil -} - -// Status returns a message describing statefulset status, and a bool value indicating if the status is considered done. -func (s *StatefulSetStatusViewer) Status(obj runtime.Unstructured, revision int64) (string, bool, error) { - sts := &appsv1.StatefulSet{} - err := scheme.Scheme.Convert(obj, sts, nil) - if err != nil { - return "", false, fmt.Errorf("failed to convert %T to %T: %v", obj, sts, err) - } - - if sts.Spec.UpdateStrategy.Type != appsv1.RollingUpdateStatefulSetStrategyType { - return "", true, fmt.Errorf("rollout status is only available for %s strategy type", appsv1.RollingUpdateStatefulSetStrategyType) - } - if sts.Status.ObservedGeneration == 0 || sts.Generation > sts.Status.ObservedGeneration { - return "Waiting for statefulset spec update to be observed...\n", false, nil - } - if sts.Spec.Replicas != nil && sts.Status.ReadyReplicas < *sts.Spec.Replicas { - return fmt.Sprintf("Waiting for %d pods to be ready...\n", *sts.Spec.Replicas-sts.Status.ReadyReplicas), false, nil - } - if sts.Spec.UpdateStrategy.Type == appsv1.RollingUpdateStatefulSetStrategyType && sts.Spec.UpdateStrategy.RollingUpdate != nil { - if sts.Spec.Replicas != nil && sts.Spec.UpdateStrategy.RollingUpdate.Partition != nil { - if sts.Status.UpdatedReplicas < (*sts.Spec.Replicas - *sts.Spec.UpdateStrategy.RollingUpdate.Partition) { - return fmt.Sprintf("Waiting for partitioned roll out to finish: %d out of %d new pods have been updated...\n", - sts.Status.UpdatedReplicas, *sts.Spec.Replicas-*sts.Spec.UpdateStrategy.RollingUpdate.Partition), false, nil - } - } - return fmt.Sprintf("partitioned roll out complete: %d new pods have been updated...\n", - sts.Status.UpdatedReplicas), true, nil - } - if sts.Status.UpdateRevision != sts.Status.CurrentRevision { - return fmt.Sprintf("waiting for statefulset rolling update to complete %d pods at revision %s...\n", - sts.Status.UpdatedReplicas, sts.Status.UpdateRevision), false, nil - } - return fmt.Sprintf("statefulset rolling update complete %d pods at revision %s...\n", sts.Status.CurrentReplicas, sts.Status.CurrentRevision), true, nil - -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/scale.go b/vendor/k8s.io/kubernetes/pkg/kubectl/scale.go deleted file mode 100644 index abe43d6e62c30..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/scale.go +++ /dev/null @@ -1,187 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - "strconv" - "time" - - autoscalingv1 "k8s.io/api/autoscaling/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/wait" - scaleclient "k8s.io/client-go/scale" -) - -// Scaler provides an interface for resources that can be scaled. -type Scaler interface { - // Scale scales the named resource after checking preconditions. It optionally - // retries in the event of resource version mismatch (if retry is not nil), - // and optionally waits until the status of the resource matches newSize (if wait is not nil) - // TODO: Make the implementation of this watch-based (#56075) once #31345 is fixed. - Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, wait *RetryParams, gr schema.GroupResource) error - // ScaleSimple does a simple one-shot attempt at scaling - not useful on its own, but - // a necessary building block for Scale - ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint, gr schema.GroupResource) (updatedResourceVersion string, err error) -} - -// NewScaler get a scaler for a given resource -func NewScaler(scalesGetter scaleclient.ScalesGetter) Scaler { - return &genericScaler{scalesGetter} -} - -// ScalePrecondition describes a condition that must be true for the scale to take place -// If CurrentSize == -1, it is ignored. -// If CurrentResourceVersion is the empty string, it is ignored. -// Otherwise they must equal the values in the resource for it to be valid. -type ScalePrecondition struct { - Size int - ResourceVersion string -} - -// A PreconditionError is returned when a resource fails to match -// the scale preconditions passed to kubectl. -type PreconditionError struct { - Precondition string - ExpectedValue string - ActualValue string -} - -func (pe PreconditionError) Error() string { - return fmt.Sprintf("Expected %s to be %s, was %s", pe.Precondition, pe.ExpectedValue, pe.ActualValue) -} - -// RetryParams encapsulates the retry parameters used by kubectl's scaler. -type RetryParams struct { - Interval, Timeout time.Duration -} - -func NewRetryParams(interval, timeout time.Duration) *RetryParams { - return &RetryParams{interval, timeout} -} - -// ScaleCondition is a closure around Scale that facilitates retries via util.wait -func ScaleCondition(r Scaler, precondition *ScalePrecondition, namespace, name string, count uint, updatedResourceVersion *string, gr schema.GroupResource) wait.ConditionFunc { - return func() (bool, error) { - rv, err := r.ScaleSimple(namespace, name, precondition, count, gr) - if updatedResourceVersion != nil { - *updatedResourceVersion = rv - } - // Retry only on update conflicts. - if errors.IsConflict(err) { - return false, nil - } - if err != nil { - return false, err - } - return true, nil - } -} - -// validateGeneric ensures that the preconditions match. Returns nil if they are valid, otherwise an error -func (precondition *ScalePrecondition) validate(scale *autoscalingv1.Scale) error { - if precondition.Size != -1 && int(scale.Spec.Replicas) != precondition.Size { - return PreconditionError{"replicas", strconv.Itoa(precondition.Size), strconv.Itoa(int(scale.Spec.Replicas))} - } - if len(precondition.ResourceVersion) > 0 && scale.ResourceVersion != precondition.ResourceVersion { - return PreconditionError{"resource version", precondition.ResourceVersion, scale.ResourceVersion} - } - return nil -} - -// genericScaler can update scales for resources in a particular namespace -type genericScaler struct { - scaleNamespacer scaleclient.ScalesGetter -} - -var _ Scaler = &genericScaler{} - -// ScaleSimple updates a scale of a given resource. It returns the resourceVersion of the scale if the update was successful. -func (s *genericScaler) ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint, gr schema.GroupResource) (updatedResourceVersion string, err error) { - scale := &autoscalingv1.Scale{ - ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name}, - } - if preconditions != nil { - var err error - scale, err = s.scaleNamespacer.Scales(namespace).Get(gr, name) - if err != nil { - return "", err - } - if err := preconditions.validate(scale); err != nil { - return "", err - } - } - - scale.Spec.Replicas = int32(newSize) - updatedScale, err := s.scaleNamespacer.Scales(namespace).Update(gr, scale) - if err != nil { - return "", err - } - return updatedScale.ResourceVersion, nil -} - -// Scale updates a scale of a given resource to a new size, with optional precondition check (if preconditions is not nil), -// optional retries (if retry is not nil), and then optionally waits for the status to reach desired count. -func (s *genericScaler) Scale(namespace, resourceName string, newSize uint, preconditions *ScalePrecondition, retry, waitForReplicas *RetryParams, gr schema.GroupResource) error { - if retry == nil { - // make it try only once, immediately - retry = &RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond} - } - cond := ScaleCondition(s, preconditions, namespace, resourceName, newSize, nil, gr) - if err := wait.PollImmediate(retry.Interval, retry.Timeout, cond); err != nil { - return err - } - if waitForReplicas != nil { - return WaitForScaleHasDesiredReplicas(s.scaleNamespacer, gr, resourceName, namespace, newSize, waitForReplicas) - } - return nil -} - -// scaleHasDesiredReplicas returns a condition that will be true if and only if the desired replica -// count for a scale (Spec) equals its updated replicas count (Status) -func scaleHasDesiredReplicas(sClient scaleclient.ScalesGetter, gr schema.GroupResource, resourceName string, namespace string, desiredReplicas int32) wait.ConditionFunc { - return func() (bool, error) { - actualScale, err := sClient.Scales(namespace).Get(gr, resourceName) - if err != nil { - return false, err - } - // this means the desired scale target has been reset by something else - if actualScale.Spec.Replicas != desiredReplicas { - return true, nil - } - return actualScale.Spec.Replicas == actualScale.Status.Replicas && - desiredReplicas == actualScale.Status.Replicas, nil - } -} - -// WaitForScaleHasDesiredReplicas waits until condition scaleHasDesiredReplicas is satisfied -// or returns error when timeout happens -func WaitForScaleHasDesiredReplicas(sClient scaleclient.ScalesGetter, gr schema.GroupResource, resourceName string, namespace string, newSize uint, waitForReplicas *RetryParams) error { - if waitForReplicas == nil { - return fmt.Errorf("waitForReplicas parameter cannot be nil") - } - err := wait.PollImmediate( - waitForReplicas.Interval, - waitForReplicas.Timeout, - scaleHasDesiredReplicas(sClient, gr, resourceName, namespace, int32(newSize))) - if err == wait.ErrWaitTimeout { - return fmt.Errorf("timed out waiting for %q to be synced", resourceName) - } - return err -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/certificate/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/util/certificate/BUILD.bazel deleted file mode 100644 index 26de2dc908771..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/util/certificate/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["certificate.go"], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/util/certificate", - importpath = "k8s.io/kubernetes/pkg/kubectl/util/certificate", - visibility = ["//visibility:public"], - deps = ["//vendor/k8s.io/api/certificates/v1beta1:go_default_library"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/certificate/certificate.go b/vendor/k8s.io/kubernetes/pkg/kubectl/util/certificate/certificate.go deleted file mode 100644 index 201958c6fca74..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/util/certificate/certificate.go +++ /dev/null @@ -1,42 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package certificate - -import ( - "crypto/x509" - "encoding/pem" - "errors" - - certificatesv1beta1 "k8s.io/api/certificates/v1beta1" -) - -// TODO(yue9944882): Remove this helper package once it's copied to k/api - -// ParseCSR extracts the CSR from the API object and decodes it. -func ParseCSR(obj *certificatesv1beta1.CertificateSigningRequest) (*x509.CertificateRequest, error) { - // extract PEM from request object - pemBytes := obj.Spec.Request - block, _ := pem.Decode(pemBytes) - if block == nil || block.Type != "CERTIFICATE REQUEST" { - return nil, errors.New("PEM block type must be CERTIFICATE REQUEST") - } - csr, err := x509.ParseCertificateRequest(block.Bytes) - if err != nil { - return nil, err - } - return csr, nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/deployment/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/util/deployment/BUILD.bazel deleted file mode 100644 index b3b3bcc1a6e59..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/util/deployment/BUILD.bazel +++ /dev/null @@ -1,19 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["deployment.go"], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/util/deployment", - importpath = "k8s.io/kubernetes/pkg/kubectl/util/deployment", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/apps/v1:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/deployment/deployment.go b/vendor/k8s.io/kubernetes/pkg/kubectl/util/deployment/deployment.go deleted file mode 100644 index 49fd4ec9fdb3e..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/util/deployment/deployment.go +++ /dev/null @@ -1,229 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package deployment - -import ( - "sort" - "strconv" - - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - apiequality "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - intstrutil "k8s.io/apimachinery/pkg/util/intstr" - appsclient "k8s.io/client-go/kubernetes/typed/apps/v1" -) - -const ( - // RevisionAnnotation is the revision annotation of a deployment's replica sets which records its rollout sequence - RevisionAnnotation = "deployment.kubernetes.io/revision" - // RevisionHistoryAnnotation maintains the history of all old revisions that a replica set has served for a deployment. - RevisionHistoryAnnotation = "deployment.kubernetes.io/revision-history" - // DesiredReplicasAnnotation is the desired replicas for a deployment recorded as an annotation - // in its replica sets. Helps in separating scaling events from the rollout process and for - // determining if the new replica set for a deployment is really saturated. - DesiredReplicasAnnotation = "deployment.kubernetes.io/desired-replicas" - // MaxReplicasAnnotation is the maximum replicas a deployment can have at a given point, which - // is deployment.spec.replicas + maxSurge. Used by the underlying replica sets to estimate their - // proportions in case the deployment has surge replicas. - MaxReplicasAnnotation = "deployment.kubernetes.io/max-replicas" - // RollbackRevisionNotFound is not found rollback event reason - RollbackRevisionNotFound = "DeploymentRollbackRevisionNotFound" - // RollbackTemplateUnchanged is the template unchanged rollback event reason - RollbackTemplateUnchanged = "DeploymentRollbackTemplateUnchanged" - // RollbackDone is the done rollback event reason - RollbackDone = "DeploymentRollback" - // TimedOutReason is added in a deployment when its newest replica set fails to show any progress - // within the given deadline (progressDeadlineSeconds). - TimedOutReason = "ProgressDeadlineExceeded" -) - -// GetDeploymentCondition returns the condition with the provided type. -func GetDeploymentCondition(status appsv1.DeploymentStatus, condType appsv1.DeploymentConditionType) *appsv1.DeploymentCondition { - for i := range status.Conditions { - c := status.Conditions[i] - if c.Type == condType { - return &c - } - } - return nil -} - -// Revision returns the revision number of the input object. -func Revision(obj runtime.Object) (int64, error) { - acc, err := meta.Accessor(obj) - if err != nil { - return 0, err - } - v, ok := acc.GetAnnotations()[RevisionAnnotation] - if !ok { - return 0, nil - } - return strconv.ParseInt(v, 10, 64) -} - -// GetAllReplicaSets returns the old and new replica sets targeted by the given Deployment. It gets PodList and -// ReplicaSetList from client interface. Note that the first set of old replica sets doesn't include the ones -// with no pods, and the second set of old replica sets include all old replica sets. The third returned value -// is the new replica set, and it may be nil if it doesn't exist yet. -func GetAllReplicaSets(deployment *appsv1.Deployment, c appsclient.AppsV1Interface) ([]*appsv1.ReplicaSet, []*appsv1.ReplicaSet, *appsv1.ReplicaSet, error) { - rsList, err := listReplicaSets(deployment, rsListFromClient(c)) - if err != nil { - return nil, nil, nil, err - } - oldRSes, allOldRSes := findOldReplicaSets(deployment, rsList) - newRS := findNewReplicaSet(deployment, rsList) - return oldRSes, allOldRSes, newRS, nil -} - -// RsListFromClient returns an rsListFunc that wraps the given client. -func rsListFromClient(c appsclient.AppsV1Interface) rsListFunc { - return func(namespace string, options metav1.ListOptions) ([]*appsv1.ReplicaSet, error) { - rsList, err := c.ReplicaSets(namespace).List(options) - if err != nil { - return nil, err - } - var ret []*appsv1.ReplicaSet - for i := range rsList.Items { - ret = append(ret, &rsList.Items[i]) - } - return ret, err - } -} - -// TODO: switch this to full namespacers -type rsListFunc func(string, metav1.ListOptions) ([]*appsv1.ReplicaSet, error) - -// listReplicaSets returns a slice of RSes the given deployment targets. -// Note that this does NOT attempt to reconcile ControllerRef (adopt/orphan), -// because only the controller itself should do that. -// However, it does filter out anything whose ControllerRef doesn't match. -func listReplicaSets(deployment *appsv1.Deployment, getRSList rsListFunc) ([]*appsv1.ReplicaSet, error) { - // TODO: Right now we list replica sets by their labels. We should list them by selector, i.e. the replica set's selector - // should be a superset of the deployment's selector, see https://github.com/kubernetes/kubernetes/issues/19830. - namespace := deployment.Namespace - selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) - if err != nil { - return nil, err - } - options := metav1.ListOptions{LabelSelector: selector.String()} - all, err := getRSList(namespace, options) - if err != nil { - return nil, err - } - // Only include those whose ControllerRef matches the Deployment. - owned := make([]*appsv1.ReplicaSet, 0, len(all)) - for _, rs := range all { - if metav1.IsControlledBy(rs, deployment) { - owned = append(owned, rs) - } - } - return owned, nil -} - -// EqualIgnoreHash returns true if two given podTemplateSpec are equal, ignoring the diff in value of Labels[pod-template-hash] -// We ignore pod-template-hash because: -// 1. The hash result would be different upon podTemplateSpec API changes -// (e.g. the addition of a new field will cause the hash code to change) -// 2. The deployment template won't have hash labels -func equalIgnoreHash(template1, template2 *corev1.PodTemplateSpec) bool { - t1Copy := template1.DeepCopy() - t2Copy := template2.DeepCopy() - // Remove hash labels from template.Labels before comparing - delete(t1Copy.Labels, appsv1.DefaultDeploymentUniqueLabelKey) - delete(t2Copy.Labels, appsv1.DefaultDeploymentUniqueLabelKey) - return apiequality.Semantic.DeepEqual(t1Copy, t2Copy) -} - -// FindNewReplicaSet returns the new RS this given deployment targets (the one with the same pod template). -func findNewReplicaSet(deployment *appsv1.Deployment, rsList []*appsv1.ReplicaSet) *appsv1.ReplicaSet { - sort.Sort(replicaSetsByCreationTimestamp(rsList)) - for i := range rsList { - if equalIgnoreHash(&rsList[i].Spec.Template, &deployment.Spec.Template) { - // In rare cases, such as after cluster upgrades, Deployment may end up with - // having more than one new ReplicaSets that have the same template as its template, - // see https://github.com/kubernetes/kubernetes/issues/40415 - // We deterministically choose the oldest new ReplicaSet. - return rsList[i] - } - } - // new ReplicaSet does not exist. - return nil -} - -// replicaSetsByCreationTimestamp sorts a list of ReplicaSet by creation timestamp, using their names as a tie breaker. -type replicaSetsByCreationTimestamp []*appsv1.ReplicaSet - -func (o replicaSetsByCreationTimestamp) Len() int { return len(o) } -func (o replicaSetsByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] } -func (o replicaSetsByCreationTimestamp) Less(i, j int) bool { - if o[i].CreationTimestamp.Equal(&o[j].CreationTimestamp) { - return o[i].Name < o[j].Name - } - return o[i].CreationTimestamp.Before(&o[j].CreationTimestamp) -} - -// // FindOldReplicaSets returns the old replica sets targeted by the given Deployment, with the given slice of RSes. -// // Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets. -func findOldReplicaSets(deployment *appsv1.Deployment, rsList []*appsv1.ReplicaSet) ([]*appsv1.ReplicaSet, []*appsv1.ReplicaSet) { - var requiredRSs []*appsv1.ReplicaSet - var allRSs []*appsv1.ReplicaSet - newRS := findNewReplicaSet(deployment, rsList) - for _, rs := range rsList { - // Filter out new replica set - if newRS != nil && rs.UID == newRS.UID { - continue - } - allRSs = append(allRSs, rs) - if *(rs.Spec.Replicas) != 0 { - requiredRSs = append(requiredRSs, rs) - } - } - return requiredRSs, allRSs -} - -// ResolveFenceposts resolves both maxSurge and maxUnavailable. This needs to happen in one -// step. For example: -// -// 2 desired, max unavailable 1%, surge 0% - should scale old(-1), then new(+1), then old(-1), then new(+1) -// 1 desired, max unavailable 1%, surge 0% - should scale old(-1), then new(+1) -// 2 desired, max unavailable 25%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1) -// 1 desired, max unavailable 25%, surge 1% - should scale new(+1), then old(-1) -// 2 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1) -// 1 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1) -func ResolveFenceposts(maxSurge, maxUnavailable *intstrutil.IntOrString, desired int32) (int32, int32, error) { - surge, err := intstrutil.GetValueFromIntOrPercent(intstrutil.ValueOrDefault(maxSurge, intstrutil.FromInt(0)), int(desired), true) - if err != nil { - return 0, 0, err - } - unavailable, err := intstrutil.GetValueFromIntOrPercent(intstrutil.ValueOrDefault(maxUnavailable, intstrutil.FromInt(0)), int(desired), false) - if err != nil { - return 0, 0, err - } - - if surge == 0 && unavailable == 0 { - // Validation should never allow the user to explicitly use zero values for both maxSurge - // maxUnavailable. Due to rounding down maxUnavailable though, it may resolve to zero. - // If both fenceposts resolve to zero, then we should set maxUnavailable to 1 on the - // theory that surge might not work due to quota. - unavailable = 1 - } - - return int32(surge), int32(unavailable), nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/event/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/util/event/BUILD.bazel deleted file mode 100644 index d35758f3c742c..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/util/event/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["sorted_event_list.go"], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/util/event", - importpath = "k8s.io/kubernetes/pkg/kubectl/util/event", - visibility = ["//visibility:public"], - deps = ["//vendor/k8s.io/api/core/v1:go_default_library"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/fieldpath/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/util/fieldpath/BUILD.bazel deleted file mode 100644 index 8b1a9ace34bac..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/util/fieldpath/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["fieldpath.go"], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/util/fieldpath", - importpath = "k8s.io/kubernetes/pkg/kubectl/util/fieldpath", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/fieldpath/fieldpath.go b/vendor/k8s.io/kubernetes/pkg/kubectl/util/fieldpath/fieldpath.go deleted file mode 100644 index efd08cde0f530..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/util/fieldpath/fieldpath.go +++ /dev/null @@ -1,111 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fieldpath - -import ( - "fmt" - "strings" - - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/validation" -) - -// TODO(yue9944882): Remove this helper package once it's copied to k/apimachinery - -// FormatMap formats map[string]string to a string. -func FormatMap(m map[string]string) (fmtStr string) { - // output with keys in sorted order to provide stable output - keys := sets.NewString() - for key := range m { - keys.Insert(key) - } - for _, key := range keys.List() { - fmtStr += fmt.Sprintf("%v=%q\n", key, m[key]) - } - fmtStr = strings.TrimSuffix(fmtStr, "\n") - - return -} - -// ExtractFieldPathAsString extracts the field from the given object -// and returns it as a string. The object must be a pointer to an -// API type. -func ExtractFieldPathAsString(obj interface{}, fieldPath string) (string, error) { - accessor, err := meta.Accessor(obj) - if err != nil { - return "", nil - } - - if path, subscript, ok := SplitMaybeSubscriptedPath(fieldPath); ok { - switch path { - case "metadata.annotations": - if errs := validation.IsQualifiedName(strings.ToLower(subscript)); len(errs) != 0 { - return "", fmt.Errorf("invalid key subscript in %s: %s", fieldPath, strings.Join(errs, ";")) - } - return accessor.GetAnnotations()[subscript], nil - case "metadata.labels": - if errs := validation.IsQualifiedName(subscript); len(errs) != 0 { - return "", fmt.Errorf("invalid key subscript in %s: %s", fieldPath, strings.Join(errs, ";")) - } - return accessor.GetLabels()[subscript], nil - default: - return "", fmt.Errorf("fieldPath %q does not support subscript", fieldPath) - } - } - - switch fieldPath { - case "metadata.annotations": - return FormatMap(accessor.GetAnnotations()), nil - case "metadata.labels": - return FormatMap(accessor.GetLabels()), nil - case "metadata.name": - return accessor.GetName(), nil - case "metadata.namespace": - return accessor.GetNamespace(), nil - case "metadata.uid": - return string(accessor.GetUID()), nil - } - - return "", fmt.Errorf("unsupported fieldPath: %v", fieldPath) -} - -// SplitMaybeSubscriptedPath checks whether the specified fieldPath is -// subscripted, and -// - if yes, this function splits the fieldPath into path and subscript, and -// returns (path, subscript, true). -// - if no, this function returns (fieldPath, "", false). -// -// Example inputs and outputs: -// - "metadata.annotations['myKey']" --> ("metadata.annotations", "myKey", true) -// - "metadata.annotations['a[b]c']" --> ("metadata.annotations", "a[b]c", true) -// - "metadata.labels['']" --> ("metadata.labels", "", true) -// - "metadata.labels" --> ("metadata.labels", "", false) -func SplitMaybeSubscriptedPath(fieldPath string) (string, string, bool) { - if !strings.HasSuffix(fieldPath, "']") { - return fieldPath, "", false - } - s := strings.TrimSuffix(fieldPath, "']") - parts := strings.SplitN(s, "['", 2) - if len(parts) < 2 { - return fieldPath, "", false - } - if len(parts[0]) == 0 { - return fieldPath, "", false - } - return parts[0], parts[1], true -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/podutils/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/util/podutils/BUILD.bazel deleted file mode 100644 index b0047a23a2818..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/util/podutils/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["podutils.go"], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/util/podutils", - importpath = "k8s.io/kubernetes/pkg/kubectl/util/podutils", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/utils/integer:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/podutils/podutils.go b/vendor/k8s.io/kubernetes/pkg/kubectl/util/podutils/podutils.go deleted file mode 100644 index 142b8879651fd..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/util/podutils/podutils.go +++ /dev/null @@ -1,190 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package podutils - -import ( - "time" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/integer" -) - -// IsPodAvailable returns true if a pod is available; false otherwise. -// Precondition for an available pod is that it must be ready. On top -// of that, there are two cases when a pod can be considered available: -// 1. minReadySeconds == 0, or -// 2. LastTransitionTime (is set) + minReadySeconds < current time -func IsPodAvailable(pod *corev1.Pod, minReadySeconds int32, now metav1.Time) bool { - if !IsPodReady(pod) { - return false - } - - c := getPodReadyCondition(pod.Status) - minReadySecondsDuration := time.Duration(minReadySeconds) * time.Second - if minReadySeconds == 0 || !c.LastTransitionTime.IsZero() && c.LastTransitionTime.Add(minReadySecondsDuration).Before(now.Time) { - return true - } - return false -} - -// IsPodReady returns true if a pod is ready; false otherwise. -func IsPodReady(pod *corev1.Pod) bool { - return isPodReadyConditionTrue(pod.Status) -} - -// IsPodReadyConditionTrue returns true if a pod is ready; false otherwise. -func isPodReadyConditionTrue(status corev1.PodStatus) bool { - condition := getPodReadyCondition(status) - return condition != nil && condition.Status == corev1.ConditionTrue -} - -// GetPodReadyCondition extracts the pod ready condition from the given status and returns that. -// Returns nil if the condition is not present. -func getPodReadyCondition(status corev1.PodStatus) *corev1.PodCondition { - _, condition := getPodCondition(&status, corev1.PodReady) - return condition -} - -// GetPodCondition extracts the provided condition from the given status and returns that. -// Returns nil and -1 if the condition is not present, and the index of the located condition. -func getPodCondition(status *corev1.PodStatus, conditionType corev1.PodConditionType) (int, *corev1.PodCondition) { - if status == nil { - return -1, nil - } - return getPodConditionFromList(status.Conditions, conditionType) -} - -// GetPodConditionFromList extracts the provided condition from the given list of condition and -// returns the index of the condition and the condition. Returns -1 and nil if the condition is not present. -func getPodConditionFromList(conditions []corev1.PodCondition, conditionType corev1.PodConditionType) (int, *corev1.PodCondition) { - if conditions == nil { - return -1, nil - } - for i := range conditions { - if conditions[i].Type == conditionType { - return i, &conditions[i] - } - } - return -1, nil -} - -// ByLogging allows custom sorting of pods so the best one can be picked for getting its logs. -type ByLogging []*corev1.Pod - -func (s ByLogging) Len() int { return len(s) } -func (s ByLogging) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -func (s ByLogging) Less(i, j int) bool { - // 1. assigned < unassigned - if s[i].Spec.NodeName != s[j].Spec.NodeName && (len(s[i].Spec.NodeName) == 0 || len(s[j].Spec.NodeName) == 0) { - return len(s[i].Spec.NodeName) > 0 - } - // 2. PodRunning < PodUnknown < PodPending - m := map[corev1.PodPhase]int{corev1.PodRunning: 0, corev1.PodUnknown: 1, corev1.PodPending: 2} - if m[s[i].Status.Phase] != m[s[j].Status.Phase] { - return m[s[i].Status.Phase] < m[s[j].Status.Phase] - } - // 3. ready < not ready - if IsPodReady(s[i]) != IsPodReady(s[j]) { - return IsPodReady(s[i]) - } - // TODO: take availability into account when we push minReadySeconds information from deployment into pods, - // see https://github.com/kubernetes/kubernetes/issues/22065 - // 4. Been ready for more time < less time < empty time - if IsPodReady(s[i]) && IsPodReady(s[j]) && !podReadyTime(s[i]).Equal(podReadyTime(s[j])) { - return afterOrZero(podReadyTime(s[j]), podReadyTime(s[i])) - } - // 5. Pods with containers with higher restart counts < lower restart counts - if maxContainerRestarts(s[i]) != maxContainerRestarts(s[j]) { - return maxContainerRestarts(s[i]) > maxContainerRestarts(s[j]) - } - // 6. older pods < newer pods < empty timestamp pods - if !s[i].CreationTimestamp.Equal(&s[j].CreationTimestamp) { - return afterOrZero(&s[j].CreationTimestamp, &s[i].CreationTimestamp) - } - return false -} - -// ActivePods type allows custom sorting of pods so a controller can pick the best ones to delete. -type ActivePods []*corev1.Pod - -func (s ActivePods) Len() int { return len(s) } -func (s ActivePods) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -func (s ActivePods) Less(i, j int) bool { - // 1. Unassigned < assigned - // If only one of the pods is unassigned, the unassigned one is smaller - if s[i].Spec.NodeName != s[j].Spec.NodeName && (len(s[i].Spec.NodeName) == 0 || len(s[j].Spec.NodeName) == 0) { - return len(s[i].Spec.NodeName) == 0 - } - // 2. PodPending < PodUnknown < PodRunning - m := map[corev1.PodPhase]int{corev1.PodPending: 0, corev1.PodUnknown: 1, corev1.PodRunning: 2} - if m[s[i].Status.Phase] != m[s[j].Status.Phase] { - return m[s[i].Status.Phase] < m[s[j].Status.Phase] - } - // 3. Not ready < ready - // If only one of the pods is not ready, the not ready one is smaller - if IsPodReady(s[i]) != IsPodReady(s[j]) { - return !IsPodReady(s[i]) - } - // TODO: take availability into account when we push minReadySeconds information from deployment into pods, - // see https://github.com/kubernetes/kubernetes/issues/22065 - // 4. Been ready for empty time < less time < more time - // If both pods are ready, the latest ready one is smaller - if IsPodReady(s[i]) && IsPodReady(s[j]) && !podReadyTime(s[i]).Equal(podReadyTime(s[j])) { - return afterOrZero(podReadyTime(s[i]), podReadyTime(s[j])) - } - // 5. Pods with containers with higher restart counts < lower restart counts - if maxContainerRestarts(s[i]) != maxContainerRestarts(s[j]) { - return maxContainerRestarts(s[i]) > maxContainerRestarts(s[j]) - } - // 6. Empty creation time pods < newer pods < older pods - if !s[i].CreationTimestamp.Equal(&s[j].CreationTimestamp) { - return afterOrZero(&s[i].CreationTimestamp, &s[j].CreationTimestamp) - } - return false -} - -// afterOrZero checks if time t1 is after time t2; if one of them -// is zero, the zero time is seen as after non-zero time. -func afterOrZero(t1, t2 *metav1.Time) bool { - if t1.Time.IsZero() || t2.Time.IsZero() { - return t1.Time.IsZero() - } - return t1.After(t2.Time) -} - -func podReadyTime(pod *corev1.Pod) *metav1.Time { - if IsPodReady(pod) { - for _, c := range pod.Status.Conditions { - // we only care about pod ready conditions - if c.Type == corev1.PodReady && c.Status == corev1.ConditionTrue { - return &c.LastTransitionTime - } - } - } - return &metav1.Time{} -} - -func maxContainerRestarts(pod *corev1.Pod) int { - maxRestarts := 0 - for _, c := range pod.Status.ContainerStatuses { - maxRestarts = integer.IntMax(maxRestarts, int(c.RestartCount)) - } - return maxRestarts -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/qos/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/util/qos/BUILD.bazel deleted file mode 100644 index a85e6abd1e223..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/util/qos/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["qos.go"], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/util/qos", - importpath = "k8s.io/kubernetes/pkg/kubectl/util/qos", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/qos/qos.go b/vendor/k8s.io/kubernetes/pkg/kubectl/util/qos/qos.go deleted file mode 100644 index 73b25acac2d68..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/util/qos/qos.go +++ /dev/null @@ -1,95 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package qos - -import ( - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - "k8s.io/apimachinery/pkg/util/sets" -) - -var supportedQoSComputeResources = sets.NewString(string(corev1.ResourceCPU), string(corev1.ResourceMemory)) - -func isSupportedQoSComputeResource(name corev1.ResourceName) bool { - return supportedQoSComputeResources.Has(string(name)) -} - -// GetPodQOS returns the QoS class of a pod. -// A pod is besteffort if none of its containers have specified any requests or limits. -// A pod is guaranteed only when requests and limits are specified for all the containers and they are equal. -// A pod is burstable if limits and requests do not match across all containers. -func GetPodQOS(pod *corev1.Pod) corev1.PodQOSClass { - requests := corev1.ResourceList{} - limits := corev1.ResourceList{} - zeroQuantity := resource.MustParse("0") - isGuaranteed := true - for _, container := range pod.Spec.Containers { - // process requests - for name, quantity := range container.Resources.Requests { - if !isSupportedQoSComputeResource(name) { - continue - } - if quantity.Cmp(zeroQuantity) == 1 { - delta := quantity.Copy() - if _, exists := requests[name]; !exists { - requests[name] = *delta - } else { - delta.Add(requests[name]) - requests[name] = *delta - } - } - } - // process limits - qosLimitsFound := sets.NewString() - for name, quantity := range container.Resources.Limits { - if !isSupportedQoSComputeResource(name) { - continue - } - if quantity.Cmp(zeroQuantity) == 1 { - qosLimitsFound.Insert(string(name)) - delta := quantity.Copy() - if _, exists := limits[name]; !exists { - limits[name] = *delta - } else { - delta.Add(limits[name]) - limits[name] = *delta - } - } - } - - if !qosLimitsFound.HasAll(string(corev1.ResourceMemory), string(corev1.ResourceCPU)) { - isGuaranteed = false - } - } - if len(requests) == 0 && len(limits) == 0 { - return corev1.PodQOSBestEffort - } - // Check is requests match limits for all resources. - if isGuaranteed { - for name, req := range requests { - if lim, exists := limits[name]; !exists || lim.Cmp(req) != 0 { - isGuaranteed = false - break - } - } - } - if isGuaranteed && - len(requests) == len(limits) { - return corev1.PodQOSGuaranteed - } - return corev1.PodQOSBurstable -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/rbac/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/util/rbac/BUILD.bazel deleted file mode 100644 index 031fff4788165..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/util/rbac/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["rbac.go"], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/util/rbac", - importpath = "k8s.io/kubernetes/pkg/kubectl/util/rbac", - visibility = ["//visibility:public"], - deps = ["//vendor/k8s.io/api/rbac/v1:go_default_library"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/rbac/rbac.go b/vendor/k8s.io/kubernetes/pkg/kubectl/util/rbac/rbac.go deleted file mode 100644 index a149a51afb080..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/util/rbac/rbac.go +++ /dev/null @@ -1,128 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package rbac - -import ( - rbacv1 "k8s.io/api/rbac/v1" - "reflect" - "strings" -) - -type simpleResource struct { - Group string - Resource string - ResourceNameExist bool - ResourceName string -} - -// CompactRules combines rules that contain a single APIGroup/Resource, differ only by verb, and contain no other attributes. -// this is a fast check, and works well with the decomposed "missing rules" list from a Covers check. -func CompactRules(rules []rbacv1.PolicyRule) ([]rbacv1.PolicyRule, error) { - compacted := make([]rbacv1.PolicyRule, 0, len(rules)) - - simpleRules := map[simpleResource]*rbacv1.PolicyRule{} - for _, rule := range rules { - if resource, isSimple := isSimpleResourceRule(&rule); isSimple { - if existingRule, ok := simpleRules[resource]; ok { - // Add the new verbs to the existing simple resource rule - if existingRule.Verbs == nil { - existingRule.Verbs = []string{} - } - existingRule.Verbs = append(existingRule.Verbs, rule.Verbs...) - } else { - // Copy the rule to accumulate matching simple resource rules into - simpleRules[resource] = rule.DeepCopy() - } - } else { - compacted = append(compacted, rule) - } - } - - // Once we've consolidated the simple resource rules, add them to the compacted list - for _, simpleRule := range simpleRules { - compacted = append(compacted, *simpleRule) - } - - return compacted, nil -} - -// isSimpleResourceRule returns true if the given rule contains verbs, a single resource, a single API group, at most one Resource Name, and no other values -func isSimpleResourceRule(rule *rbacv1.PolicyRule) (simpleResource, bool) { - resource := simpleResource{} - - // If we have "complex" rule attributes, return early without allocations or expensive comparisons - if len(rule.ResourceNames) > 1 || len(rule.NonResourceURLs) > 0 { - return resource, false - } - // If we have multiple api groups or resources, return early - if len(rule.APIGroups) != 1 || len(rule.Resources) != 1 { - return resource, false - } - - // Test if this rule only contains APIGroups/Resources/Verbs/ResourceNames - simpleRule := &rbacv1.PolicyRule{APIGroups: rule.APIGroups, Resources: rule.Resources, Verbs: rule.Verbs, ResourceNames: rule.ResourceNames} - if !reflect.DeepEqual(simpleRule, rule) { - return resource, false - } - - if len(rule.ResourceNames) == 0 { - resource = simpleResource{Group: rule.APIGroups[0], Resource: rule.Resources[0], ResourceNameExist: false} - } else { - resource = simpleResource{Group: rule.APIGroups[0], Resource: rule.Resources[0], ResourceNameExist: true, ResourceName: rule.ResourceNames[0]} - } - - return resource, true -} - -// BreakdownRule takes a rule and builds an equivalent list of rules that each have at most one verb, one -// resource, and one resource name -func BreakdownRule(rule rbacv1.PolicyRule) []rbacv1.PolicyRule { - subrules := []rbacv1.PolicyRule{} - for _, group := range rule.APIGroups { - for _, resource := range rule.Resources { - for _, verb := range rule.Verbs { - if len(rule.ResourceNames) > 0 { - for _, resourceName := range rule.ResourceNames { - subrules = append(subrules, rbacv1.PolicyRule{APIGroups: []string{group}, Resources: []string{resource}, Verbs: []string{verb}, ResourceNames: []string{resourceName}}) - } - - } else { - subrules = append(subrules, rbacv1.PolicyRule{APIGroups: []string{group}, Resources: []string{resource}, Verbs: []string{verb}}) - } - - } - } - } - - // Non-resource URLs are unique because they only combine with verbs. - for _, nonResourceURL := range rule.NonResourceURLs { - for _, verb := range rule.Verbs { - subrules = append(subrules, rbacv1.PolicyRule{NonResourceURLs: []string{nonResourceURL}, Verbs: []string{verb}}) - } - } - - return subrules -} - -// SortableRuleSlice is used to sort rule slice -type SortableRuleSlice []rbacv1.PolicyRule - -func (s SortableRuleSlice) Len() int { return len(s) } -func (s SortableRuleSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s SortableRuleSlice) Less(i, j int) bool { - return strings.Compare(s[i].String(), s[j].String()) < 0 -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/resource/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/util/resource/BUILD.bazel deleted file mode 100644 index edceac6eebe7a..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/util/resource/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["resource.go"], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/util/resource", - importpath = "k8s.io/kubernetes/pkg/kubectl/util/resource", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/resource/resource.go b/vendor/k8s.io/kubernetes/pkg/kubectl/util/resource/resource.go deleted file mode 100644 index 0a6f177a64710..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/util/resource/resource.go +++ /dev/null @@ -1,138 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resource - -import ( - "fmt" - "math" - "strconv" - "strings" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - "k8s.io/apimachinery/pkg/util/sets" -) - -// PodRequestsAndLimits returns a dictionary of all defined resources summed up for all -// containers of the pod. -func PodRequestsAndLimits(pod *corev1.Pod) (reqs, limits corev1.ResourceList) { - reqs, limits = corev1.ResourceList{}, corev1.ResourceList{} - for _, container := range pod.Spec.Containers { - addResourceList(reqs, container.Resources.Requests) - addResourceList(limits, container.Resources.Limits) - } - // init containers define the minimum of any resource - for _, container := range pod.Spec.InitContainers { - maxResourceList(reqs, container.Resources.Requests) - maxResourceList(limits, container.Resources.Limits) - } - return -} - -// addResourceList adds the resources in newList to list -func addResourceList(list, new corev1.ResourceList) { - for name, quantity := range new { - if value, ok := list[name]; !ok { - list[name] = *quantity.Copy() - } else { - value.Add(quantity) - list[name] = value - } - } -} - -// maxResourceList sets list to the greater of list/newList for every resource -// either list -func maxResourceList(list, new corev1.ResourceList) { - for name, quantity := range new { - if value, ok := list[name]; !ok { - list[name] = *quantity.Copy() - continue - } else { - if quantity.Cmp(value) > 0 { - list[name] = *quantity.Copy() - } - } - } -} - -// ExtractContainerResourceValue extracts the value of a resource -// in an already known container -func ExtractContainerResourceValue(fs *corev1.ResourceFieldSelector, container *corev1.Container) (string, error) { - divisor := resource.Quantity{} - if divisor.Cmp(fs.Divisor) == 0 { - divisor = resource.MustParse("1") - } else { - divisor = fs.Divisor - } - - switch fs.Resource { - case "limits.cpu": - return convertResourceCPUToString(container.Resources.Limits.Cpu(), divisor) - case "limits.memory": - return convertResourceMemoryToString(container.Resources.Limits.Memory(), divisor) - case "limits.ephemeral-storage": - return convertResourceEphemeralStorageToString(container.Resources.Limits.StorageEphemeral(), divisor) - case "requests.cpu": - return convertResourceCPUToString(container.Resources.Requests.Cpu(), divisor) - case "requests.memory": - return convertResourceMemoryToString(container.Resources.Requests.Memory(), divisor) - case "requests.ephemeral-storage": - return convertResourceEphemeralStorageToString(container.Resources.Requests.StorageEphemeral(), divisor) - } - - return "", fmt.Errorf("Unsupported container resource : %v", fs.Resource) -} - -// convertResourceCPUToString converts cpu value to the format of divisor and returns -// ceiling of the value. -func convertResourceCPUToString(cpu *resource.Quantity, divisor resource.Quantity) (string, error) { - c := int64(math.Ceil(float64(cpu.MilliValue()) / float64(divisor.MilliValue()))) - return strconv.FormatInt(c, 10), nil -} - -// convertResourceMemoryToString converts memory value to the format of divisor and returns -// ceiling of the value. -func convertResourceMemoryToString(memory *resource.Quantity, divisor resource.Quantity) (string, error) { - m := int64(math.Ceil(float64(memory.Value()) / float64(divisor.Value()))) - return strconv.FormatInt(m, 10), nil -} - -// convertResourceEphemeralStorageToString converts ephemeral storage value to the format of divisor and returns -// ceiling of the value. -func convertResourceEphemeralStorageToString(ephemeralStorage *resource.Quantity, divisor resource.Quantity) (string, error) { - m := int64(math.Ceil(float64(ephemeralStorage.Value()) / float64(divisor.Value()))) - return strconv.FormatInt(m, 10), nil -} - -var standardContainerResources = sets.NewString( - string(corev1.ResourceCPU), - string(corev1.ResourceMemory), - string(corev1.ResourceEphemeralStorage), -) - -// IsStandardContainerResourceName returns true if the container can make a resource request -// for the specified resource -func IsStandardContainerResourceName(str string) bool { - return standardContainerResources.Has(str) || IsHugePageResourceName(corev1.ResourceName(str)) -} - -// IsHugePageResourceName returns true if the resource name has the huge page -// resource prefix. -func IsHugePageResourceName(name corev1.ResourceName) bool { - return strings.HasPrefix(string(name), corev1.ResourceHugePagesPrefix) -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/slice/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/util/slice/BUILD.bazel deleted file mode 100644 index 722b8648b312e..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/util/slice/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["slice.go"], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/util/slice", - importpath = "k8s.io/kubernetes/pkg/kubectl/util/slice", - visibility = ["//visibility:public"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/slice/slice.go b/vendor/k8s.io/kubernetes/pkg/kubectl/util/slice/slice.go deleted file mode 100644 index f997d5cb4059f..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/util/slice/slice.go +++ /dev/null @@ -1,38 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package slice - -import ( - "sort" -) - -// SortInts64 sorts []int64 in increasing order -func SortInts64(a []int64) { sort.Slice(a, func(i, j int) bool { return a[i] < a[j] }) } - -// ContainsString checks if a given slice of strings contains the provided string. -// If a modifier func is provided, it is called with the slice item before the comparation. -func ContainsString(slice []string, s string, modifier func(s string) string) bool { - for _, item := range slice { - if item == s { - return true - } - if modifier != nil && modifier(item) == s { - return true - } - } - return false -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/storage/BUILD.bazel b/vendor/k8s.io/kubernetes/pkg/kubectl/util/storage/BUILD.bazel deleted file mode 100644 index 28b358b182eff..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/util/storage/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["storage.go"], - importmap = "k8s.io/kops/vendor/k8s.io/kubernetes/pkg/kubectl/util/storage", - importpath = "k8s.io/kubernetes/pkg/kubectl/util/storage", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/kubectl/util/storage/storage.go deleted file mode 100644 index b584a4da867b3..0000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/util/storage/storage.go +++ /dev/null @@ -1,107 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package storage - -import ( - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "strings" -) - -// TODO(yue9944882): Remove this helper package once it's copied to k/api - -// IsDefaultStorageClassAnnotation represents a StorageClass annotation that -// marks a class as the default StorageClass -const IsDefaultStorageClassAnnotation = "storageclass.kubernetes.io/is-default-class" - -// BetaIsDefaultStorageClassAnnotation is the beta version of IsDefaultStorageClassAnnotation. -const BetaIsDefaultStorageClassAnnotation = "storageclass.beta.kubernetes.io/is-default-class" - -// IsDefaultAnnotationText returns a pretty Yes/No String if -// the annotation is set -func IsDefaultAnnotationText(obj metav1.ObjectMeta) string { - if obj.Annotations[IsDefaultStorageClassAnnotation] == "true" { - return "Yes" - } - if obj.Annotations[BetaIsDefaultStorageClassAnnotation] == "true" { - return "Yes" - } - - return "No" -} - -// GetAccessModesAsString returns a string representation of an array of access modes. -// modes, when present, are always in the same order: RWO,ROX,RWX. -func GetAccessModesAsString(modes []v1.PersistentVolumeAccessMode) string { - modes = removeDuplicateAccessModes(modes) - modesStr := []string{} - if containsAccessMode(modes, v1.ReadWriteOnce) { - modesStr = append(modesStr, "RWO") - } - if containsAccessMode(modes, v1.ReadOnlyMany) { - modesStr = append(modesStr, "ROX") - } - if containsAccessMode(modes, v1.ReadWriteMany) { - modesStr = append(modesStr, "RWX") - } - return strings.Join(modesStr, ",") -} - -// removeDuplicateAccessModes returns an array of access modes without any duplicates -func removeDuplicateAccessModes(modes []v1.PersistentVolumeAccessMode) []v1.PersistentVolumeAccessMode { - accessModes := []v1.PersistentVolumeAccessMode{} - for _, m := range modes { - if !containsAccessMode(accessModes, m) { - accessModes = append(accessModes, m) - } - } - return accessModes -} - -func containsAccessMode(modes []v1.PersistentVolumeAccessMode, mode v1.PersistentVolumeAccessMode) bool { - for _, m := range modes { - if m == mode { - return true - } - } - return false -} - -// GetPersistentVolumeClass returns StorageClassName. -func GetPersistentVolumeClass(volume *v1.PersistentVolume) string { - // Use beta annotation first - if class, found := volume.Annotations[v1.BetaStorageClassAnnotation]; found { - return class - } - - return volume.Spec.StorageClassName -} - -// GetPersistentVolumeClaimClass returns StorageClassName. If no storage class was -// requested, it returns "". -func GetPersistentVolumeClaimClass(claim *v1.PersistentVolumeClaim) string { - // Use beta annotation first - if class, found := claim.Annotations[v1.BetaStorageClassAnnotation]; found { - return class - } - - if claim.Spec.StorageClassName != nil { - return *claim.Spec.StorageClassName - } - - return "" -} diff --git a/vendor/k8s.io/legacy-cloud-providers/aws/BUILD.bazel b/vendor/k8s.io/legacy-cloud-providers/aws/BUILD.bazel index a2720a9638b92..f2c1656293f4c 100644 --- a/vendor/k8s.io/legacy-cloud-providers/aws/BUILD.bazel +++ b/vendor/k8s.io/legacy-cloud-providers/aws/BUILD.bazel @@ -11,6 +11,7 @@ go_library( "aws_routes.go", "aws_utils.go", "device_allocator.go", + "doc.go", "instances.go", "log_handler.go", "retry_handler.go", @@ -45,10 +46,13 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/client-go/informers:go_default_library", + "//vendor/k8s.io/client-go/informers/core/v1:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", "//vendor/k8s.io/client-go/pkg/version:go_default_library", + "//vendor/k8s.io/client-go/tools/cache:go_default_library", "//vendor/k8s.io/client-go/tools/record:go_default_library", "//vendor/k8s.io/cloud-provider:go_default_library", "//vendor/k8s.io/cloud-provider/node/helpers:go_default_library", @@ -56,6 +60,8 @@ go_library( "//vendor/k8s.io/cloud-provider/volume:go_default_library", "//vendor/k8s.io/cloud-provider/volume/errors:go_default_library", "//vendor/k8s.io/cloud-provider/volume/helpers:go_default_library", + "//vendor/k8s.io/component-base/metrics:go_default_library", + "//vendor/k8s.io/component-base/metrics/legacyregistry:go_default_library", "//vendor/k8s.io/csi-translation-lib/plugins:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], diff --git a/vendor/k8s.io/legacy-cloud-providers/aws/OWNERS b/vendor/k8s.io/legacy-cloud-providers/aws/OWNERS index 4f5220d2be1bd..4ea2cf3433633 100644 --- a/vendor/k8s.io/legacy-cloud-providers/aws/OWNERS +++ b/vendor/k8s.io/legacy-cloud-providers/aws/OWNERS @@ -7,6 +7,7 @@ approvers: - jsafrane - micahhausler - mcrute +- m00nf1sh reviewers: - gnufied - jsafrane @@ -15,3 +16,4 @@ reviewers: - chrislovecnm - nckturner - micahhausler +- m00nf1sh diff --git a/vendor/k8s.io/legacy-cloud-providers/aws/aws.go b/vendor/k8s.io/legacy-cloud-providers/aws/aws.go index c33a4892be0eb..3fc5cdcab659e 100644 --- a/vendor/k8s.io/legacy-cloud-providers/aws/aws.go +++ b/vendor/k8s.io/legacy-cloud-providers/aws/aws.go @@ -1,3 +1,5 @@ +// +build !providerless + /* Copyright 2014 The Kubernetes Authors. @@ -23,6 +25,7 @@ import ( "io" "net" "path" + "sort" "strconv" "strings" "sync" @@ -44,18 +47,21 @@ import ( "github.com/aws/aws-sdk-go/service/kms" "github.com/aws/aws-sdk-go/service/sts" "gopkg.in/gcfg.v1" + "k8s.io/api/core/v1" "k8s.io/klog" - "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/informers" + informercorev1 "k8s.io/client-go/informers/core/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" v1core "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/pkg/version" + "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/cloud-provider" nodehelpers "k8s.io/cloud-provider/node/helpers" @@ -199,6 +205,11 @@ const ServiceAnnotationLoadBalancerHCTimeout = "service.beta.kubernetes.io/aws-l // service to specify, in seconds, the interval between health checks. const ServiceAnnotationLoadBalancerHCInterval = "service.beta.kubernetes.io/aws-load-balancer-healthcheck-interval" +// ServiceAnnotationLoadBalancerEIPAllocations is the annotation used on the +// service to specify a comma separated list of EIP allocations to use as +// static IP addresses for the NLB. Only supported on elbv2 (NLB) +const ServiceAnnotationLoadBalancerEIPAllocations = "service.beta.kubernetes.io/aws-load-balancer-eip-allocations" + // Event key when a volume is stuck on attaching state when being attached to a volume const volumeAttachmentStuck = "VolumeAttachmentStuck" @@ -225,12 +236,12 @@ const ( createTagFactor = 2.0 createTagSteps = 9 - // encryptedCheck* is configuration of poll for created volume to check - // it has not been silently removed by AWS. + // volumeCreate* is configuration of exponential backoff for created volume. // On a random AWS account (shared among several developers) it took 4s on - // average. - encryptedCheckInterval = 1 * time.Second - encryptedCheckTimeout = 30 * time.Second + // average, 8s max. + volumeCreateInitialDelay = 5 * time.Second + volumeCreateBackoffFactor = 1.2 + volumeCreateBackoffSteps = 10 // Number of node names that can be added to a filter. The AWS limit is 200 // but we are using a lower limit on purpose @@ -503,8 +514,13 @@ type Cloud struct { instanceCache instanceCache - clientBuilder cloudprovider.ControllerClientBuilder - kubeClient clientset.Interface + clientBuilder cloudprovider.ControllerClientBuilder + kubeClient clientset.Interface + + nodeInformer informercorev1.NodeInformer + // Extract the function out to make it easier to test + nodeInformerHasSynced cache.InformerSynced + eventBroadcaster record.EventBroadcaster eventRecorder record.EventRecorder @@ -521,6 +537,11 @@ type Cloud struct { var _ Volumes = &Cloud{} // CloudConfig wraps the settings for the AWS cloud provider. +// NOTE: Cloud config files should follow the same Kubernetes deprecation policy as +// flags or CLIs. Config fields should not change behavior in incompatible ways and +// should be deprecated for at least 2 release prior to removing. +// See https://kubernetes.io/docs/reference/using-api/deprecation-policy/#deprecating-a-flag-or-cli +// for more details. type CloudConfig struct { Global struct { // TODO: Is there any use for this? We can get it from the instance metadata service @@ -742,6 +763,14 @@ func (p *awsSDKProvider) getCrossRequestRetryDelay(regionName string) *CrossRequ return delayer } +// SetInformers implements InformerUser interface by setting up informer-fed caches for aws lib to +// leverage Kubernetes API for caching +func (c *Cloud) SetInformers(informerFactory informers.SharedInformerFactory) { + klog.Infof("Setting up informers for Cloud") + c.nodeInformer = informerFactory.Core().V1().Nodes() + c.nodeInformerHasSynced = c.nodeInformer.Informer().HasSynced +} + func (p *awsSDKProvider) Compute(regionName string) (EC2, error) { awsConfig := &aws.Config{ Region: ®ionName, @@ -1283,7 +1312,6 @@ func newAWSCloud(cfg CloudConfig, awsServices Services) (*Cloud, error) { return nil, err } } - return awsCloud, nil } @@ -2176,13 +2204,13 @@ func wrapAttachError(err error, disk *awsDisk, instance string) error { if disk.awsID != EBSVolumeID(aws.StringValue(a.VolumeId)) { klog.Warningf("Expected to get attachment info of volume %q but instead got info of %q", disk.awsID, aws.StringValue(a.VolumeId)) } else if aws.StringValue(a.State) == "attached" { - return fmt.Errorf("Error attaching EBS volume %q to instance %q: %q. The volume is currently attached to instance %q", disk.awsID, instance, awsError, aws.StringValue(a.InstanceId)) + return fmt.Errorf("error attaching EBS volume %q to instance %q: %q. The volume is currently attached to instance %q", disk.awsID, instance, awsError, aws.StringValue(a.InstanceId)) } } } } } - return fmt.Errorf("Error attaching EBS volume %q to instance %q: %q", disk.awsID, instance, err) + return fmt.Errorf("error attaching EBS volume %q to instance %q: %q", disk.awsID, instance, err) } // AttachDisk implements Volumes.AttachDisk @@ -2442,8 +2470,13 @@ func (c *Cloud) waitUntilVolumeAvailable(volumeName KubernetesVolumeID) error { // Unreachable code return err } - - err = wait.Poll(encryptedCheckInterval, encryptedCheckTimeout, func() (done bool, err error) { + time.Sleep(5 * time.Second) + backoff := wait.Backoff{ + Duration: volumeCreateInitialDelay, + Factor: volumeCreateBackoffFactor, + Steps: volumeCreateBackoffSteps, + } + err = wait.ExponentialBackoff(backoff, func() (done bool, err error) { vol, err := disk.describeVolume() if err != nil { return true, err @@ -2769,7 +2802,7 @@ func (c *Cloud) describeLoadBalancerv2(name string) (*elbv2.LoadBalancer, error) return nil, nil } } - return nil, fmt.Errorf("Error describing load balancer: %q", err) + return nil, fmt.Errorf("error describing load balancer: %q", err) } // AWS will not return 2 load balancers with the same name _and_ type. @@ -2786,7 +2819,7 @@ func (c *Cloud) describeLoadBalancerv2(name string) (*elbv2.LoadBalancer, error) func (c *Cloud) findVPCID() (string, error) { macs, err := c.metadata.GetMetadata("network/interfaces/macs/") if err != nil { - return "", fmt.Errorf("Could not list interfaces of the instance: %q", err) + return "", fmt.Errorf("could not list interfaces of the instance: %q", err) } // loop over interfaces, first vpc id returned wins @@ -2801,7 +2834,7 @@ func (c *Cloud) findVPCID() (string, error) { } return vpcID, nil } - return "", fmt.Errorf("Could not find VPC ID in instance metadata") + return "", fmt.Errorf("could not find VPC ID in instance metadata") } // Retrieves the specified security group from the AWS API, or returns nil if not found @@ -3304,9 +3337,16 @@ func (c *Cloud) findELBSubnets(internalELB bool) ([]string, error) { continue } + var azNames []string + for key := range subnetsByAZ { + azNames = append(azNames, key) + } + + sort.Strings(azNames) + var subnetIDs []string - for _, subnet := range subnetsByAZ { - subnetIDs = append(subnetIDs, aws.StringValue(subnet.SubnetId)) + for _, key := range azNames { + subnetIDs = append(subnetIDs, aws.StringValue(subnetsByAZ[key].SubnetId)) } return subnetIDs, nil @@ -3339,7 +3379,7 @@ func isSubnetPublic(rt []*ec2.RouteTable, subnetID string) (bool, error) { } if subnetTable == nil { - return false, fmt.Errorf("Could not locate routing table for subnet %s", subnetID) + return false, fmt.Errorf("could not locate routing table for subnet %s", subnetID) } for _, route := range subnetTable.Routes { @@ -4019,7 +4059,7 @@ func (c *Cloud) updateInstanceSecurityGroupsForLoadBalancer(lb *elb.LoadBalancer loadBalancerSecurityGroupID = *securityGroup } if loadBalancerSecurityGroupID == "" { - return fmt.Errorf("Could not determine security group for load balancer: %s", aws.StringValue(lb.LoadBalancerName)) + return fmt.Errorf("could not determine security group for load balancer: %s", aws.StringValue(lb.LoadBalancerName)) } // Get the actual list of groups that allow ingress from the load-balancer @@ -4161,14 +4201,14 @@ func (c *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName strin &elbv2.DescribeTargetGroupsInput{LoadBalancerArn: lb.LoadBalancerArn}, ) if err != nil { - return fmt.Errorf("Error listing target groups before deleting load balancer: %q", err) + return fmt.Errorf("error listing target groups before deleting load balancer: %q", err) } _, err = c.elbv2.DeleteLoadBalancer( &elbv2.DeleteLoadBalancerInput{LoadBalancerArn: lb.LoadBalancerArn}, ) if err != nil { - return fmt.Errorf("Error deleting load balancer %q: %v", loadBalancerName, err) + return fmt.Errorf("error deleting load balancer %q: %v", loadBalancerName, err) } for _, group := range targetGroups.TargetGroups { @@ -4176,7 +4216,7 @@ func (c *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName strin &elbv2.DeleteTargetGroupInput{TargetGroupArn: group.TargetGroupArn}, ) if err != nil { - return fmt.Errorf("Error deleting target groups after deleting load balancer: %q", err) + return fmt.Errorf("error deleting target groups after deleting load balancer: %q", err) } } } @@ -4502,7 +4542,18 @@ func (c *Cloud) findInstanceByNodeName(nodeName types.NodeName) (*ec2.Instance, // Returns the instance with the specified node name // Like findInstanceByNodeName, but returns error if node not found func (c *Cloud) getInstanceByNodeName(nodeName types.NodeName) (*ec2.Instance, error) { - instance, err := c.findInstanceByNodeName(nodeName) + var instance *ec2.Instance + + // we leverage node cache to try to retrieve node's provider id first, as + // get instance by provider id is way more efficient than by filters in + // aws context + awsID, err := c.nodeNameToProviderID(nodeName) + if err != nil { + klog.V(3).Infof("Unable to convert node name %q to aws instanceID, fall back to findInstanceByNodeName: %v", nodeName, err) + instance, err = c.findInstanceByNodeName(nodeName) + } else { + instance, err = c.getInstanceByID(string(awsID)) + } if err == nil && instance == nil { return nil, cloudprovider.InstanceNotFound } @@ -4522,6 +4573,26 @@ func (c *Cloud) getFullInstance(nodeName types.NodeName) (*awsInstance, *ec2.Ins return awsInstance, instance, err } +func (c *Cloud) nodeNameToProviderID(nodeName types.NodeName) (InstanceID, error) { + if len(nodeName) == 0 { + return "", fmt.Errorf("no nodeName provided") + } + + if c.nodeInformerHasSynced == nil || !c.nodeInformerHasSynced() { + return "", fmt.Errorf("node informer has not synced yet") + } + + node, err := c.nodeInformer.Lister().Get(string(nodeName)) + if err != nil { + return "", err + } + if len(node.Spec.ProviderID) == 0 { + return "", fmt.Errorf("node has no providerID") + } + + return KubernetesInstanceID(node.Spec.ProviderID).MapToAWSInstanceID() +} + func setNodeDisk( nodeDiskMap map[types.NodeName]map[KubernetesVolumeID]bool, volumeID KubernetesVolumeID, diff --git a/vendor/k8s.io/legacy-cloud-providers/aws/aws_fakes.go b/vendor/k8s.io/legacy-cloud-providers/aws/aws_fakes.go index 844a3a0c0af6c..05a2ad6c00e02 100644 --- a/vendor/k8s.io/legacy-cloud-providers/aws/aws_fakes.go +++ b/vendor/k8s.io/legacy-cloud-providers/aws/aws_fakes.go @@ -1,3 +1,5 @@ +// +build !providerless + /* Copyright 2017 The Kubernetes Authors. diff --git a/vendor/k8s.io/legacy-cloud-providers/aws/aws_instancegroups.go b/vendor/k8s.io/legacy-cloud-providers/aws/aws_instancegroups.go index 6c4c59b039e57..c97c96ed6f097 100644 --- a/vendor/k8s.io/legacy-cloud-providers/aws/aws_instancegroups.go +++ b/vendor/k8s.io/legacy-cloud-providers/aws/aws_instancegroups.go @@ -1,3 +1,5 @@ +// +build !providerless + /* Copyright 2014 The Kubernetes Authors. diff --git a/vendor/k8s.io/legacy-cloud-providers/aws/aws_loadbalancer.go b/vendor/k8s.io/legacy-cloud-providers/aws/aws_loadbalancer.go index 2b189e5ff6c28..06374699ec937 100644 --- a/vendor/k8s.io/legacy-cloud-providers/aws/aws_loadbalancer.go +++ b/vendor/k8s.io/legacy-cloud-providers/aws/aws_loadbalancer.go @@ -1,3 +1,5 @@ +// +build !providerless + /* Copyright 2014 The Kubernetes Authors. @@ -136,9 +138,17 @@ func (c *Cloud) ensureLoadBalancerv2(namespacedName types.NamespacedName, loadBa createRequest.Scheme = aws.String("internal") } + var allocationIDs []string + if eipList, present := annotations[ServiceAnnotationLoadBalancerEIPAllocations]; present { + allocationIDs = strings.Split(eipList, ",") + if len(allocationIDs) != len(subnetIDs) { + return nil, fmt.Errorf("error creating load balancer: Must have same number of EIP AllocationIDs (%d) and SubnetIDs (%d)", len(allocationIDs), len(subnetIDs)) + } + } + // We are supposed to specify one subnet per AZ. // TODO: What happens if we have more than one subnet per AZ? - createRequest.SubnetMappings = createSubnetMappings(subnetIDs) + createRequest.SubnetMappings = createSubnetMappings(subnetIDs, allocationIDs) for k, v := range tags { createRequest.Tags = append(createRequest.Tags, &elbv2.Tag{ @@ -149,7 +159,7 @@ func (c *Cloud) ensureLoadBalancerv2(namespacedName types.NamespacedName, loadBa klog.Infof("Creating load balancer for %v with name: %s", namespacedName, loadBalancerName) createResponse, err := c.elbv2.CreateLoadBalancer(createRequest) if err != nil { - return nil, fmt.Errorf("Error creating load balancer: %q", err) + return nil, fmt.Errorf("error creating load balancer: %q", err) } loadBalancer = createResponse.LoadBalancers[0] @@ -158,7 +168,7 @@ func (c *Cloud) ensureLoadBalancerv2(namespacedName types.NamespacedName, loadBa // duplicate target groups where the backend port is the same _, err := c.createListenerV2(createResponse.LoadBalancers[0].LoadBalancerArn, mappings[i], namespacedName, instanceIDs, *createResponse.LoadBalancers[0].VpcId, tags) if err != nil { - return nil, fmt.Errorf("Error creating listener: %q", err) + return nil, fmt.Errorf("error creating listener: %q", err) } } } else { @@ -172,7 +182,7 @@ func (c *Cloud) ensureLoadBalancerv2(namespacedName types.NamespacedName, loadBa }, ) if err != nil { - return nil, fmt.Errorf("Error describing listeners: %q", err) + return nil, fmt.Errorf("error describing listeners: %q", err) } // actual maps FrontendPort to an elbv2.Listener @@ -187,7 +197,7 @@ func (c *Cloud) ensureLoadBalancerv2(namespacedName types.NamespacedName, loadBa }, ) if err != nil { - return nil, fmt.Errorf("Error listing target groups: %q", err) + return nil, fmt.Errorf("error listing target groups: %q", err) } nodePortTargetGroup := map[int64]*elbv2.TargetGroup{} @@ -269,7 +279,7 @@ func (c *Cloud) ensureLoadBalancerv2(namespacedName types.NamespacedName, loadBa } } if _, err := c.elbv2.ModifyListener(modifyListenerInput); err != nil { - return nil, fmt.Errorf("Error updating load balancer listener: %q", err) + return nil, fmt.Errorf("error updating load balancer listener: %q", err) } } @@ -278,7 +288,7 @@ func (c *Cloud) ensureLoadBalancerv2(namespacedName types.NamespacedName, loadBa if _, err := c.elbv2.DeleteTargetGroup(&elbv2.DeleteTargetGroupInput{ TargetGroupArn: listener.DefaultActions[0].TargetGroupArn, }); err != nil { - return nil, fmt.Errorf("Error deleting old target group: %q", err) + return nil, fmt.Errorf("error deleting old target group: %q", err) } } else { // Run ensureTargetGroup to make sure instances in service are up-to-date @@ -337,7 +347,7 @@ func (c *Cloud) ensureLoadBalancerv2(namespacedName types.NamespacedName, loadBa }, ) if err != nil { - return nil, fmt.Errorf("Error retrieving load balancer after update: %q", err) + return nil, fmt.Errorf("error retrieving load balancer after update: %q", err) } loadBalancer = loadBalancers.LoadBalancers[0] } @@ -494,7 +504,7 @@ func (c *Cloud) createListenerV2(loadBalancerArn *string, mapping nlbPortMapping klog.Infof("Creating load balancer listener for %v", namespacedName) createListenerOutput, err := c.elbv2.CreateListener(createListernerInput) if err != nil { - return nil, fmt.Errorf("Error creating load balancer listener: %q", err) + return nil, fmt.Errorf("error creating load balancer listener: %q", err) } return createListenerOutput.Listeners[0], nil } @@ -503,11 +513,11 @@ func (c *Cloud) createListenerV2(loadBalancerArn *string, mapping nlbPortMapping func (c *Cloud) deleteListenerV2(listener *elbv2.Listener) error { _, err := c.elbv2.DeleteListener(&elbv2.DeleteListenerInput{ListenerArn: listener.ListenerArn}) if err != nil { - return fmt.Errorf("Error deleting load balancer listener: %q", err) + return fmt.Errorf("error deleting load balancer listener: %q", err) } _, err = c.elbv2.DeleteTargetGroup(&elbv2.DeleteTargetGroupInput{TargetGroupArn: listener.DefaultActions[0].TargetGroupArn}) if err != nil { - return fmt.Errorf("Error deleting load balancer target group: %q", err) + return fmt.Errorf("error deleting load balancer target group: %q", err) } return nil } @@ -544,10 +554,10 @@ func (c *Cloud) ensureTargetGroup(targetGroup *elbv2.TargetGroup, serviceName ty result, err := c.elbv2.CreateTargetGroup(input) if err != nil { - return nil, fmt.Errorf("Error creating load balancer target group: %q", err) + return nil, fmt.Errorf("error creating load balancer target group: %q", err) } if len(result.TargetGroups) != 1 { - return nil, fmt.Errorf("Expected only one target group on CreateTargetGroup, got %d groups", len(result.TargetGroups)) + return nil, fmt.Errorf("expected only one target group on CreateTargetGroup, got %d groups", len(result.TargetGroups)) } if len(tags) != 0 { @@ -579,7 +589,7 @@ func (c *Cloud) ensureTargetGroup(targetGroup *elbv2.TargetGroup, serviceName ty _, err = c.elbv2.RegisterTargets(registerInput) if err != nil { - return nil, fmt.Errorf("Error registering targets for load balancer: %q", err) + return nil, fmt.Errorf("error registering targets for load balancer: %q", err) } return result.TargetGroups[0], nil @@ -589,7 +599,7 @@ func (c *Cloud) ensureTargetGroup(targetGroup *elbv2.TargetGroup, serviceName ty { healthResponse, err := c.elbv2.DescribeTargetHealth(&elbv2.DescribeTargetHealthInput{TargetGroupArn: targetGroup.TargetGroupArn}) if err != nil { - return nil, fmt.Errorf("Error describing target group health: %q", err) + return nil, fmt.Errorf("error describing target group health: %q", err) } actualIDs := []string{} for _, healthDescription := range healthResponse.TargetHealthDescriptions { @@ -623,7 +633,7 @@ func (c *Cloud) ensureTargetGroup(targetGroup *elbv2.TargetGroup, serviceName ty } _, err := c.elbv2.RegisterTargets(registerInput) if err != nil { - return nil, fmt.Errorf("Error registering new targets in target group: %q", err) + return nil, fmt.Errorf("error registering new targets in target group: %q", err) } dirty = true } @@ -641,7 +651,7 @@ func (c *Cloud) ensureTargetGroup(targetGroup *elbv2.TargetGroup, serviceName ty } _, err := c.elbv2.DeregisterTargets(deregisterInput) if err != nil { - return nil, fmt.Errorf("Error trying to deregister targets in target group: %q", err) + return nil, fmt.Errorf("error trying to deregister targets in target group: %q", err) } dirty = true } @@ -671,7 +681,7 @@ func (c *Cloud) ensureTargetGroup(targetGroup *elbv2.TargetGroup, serviceName ty if dirtyHealthCheck { _, err := c.elbv2.ModifyTargetGroup(input) if err != nil { - return nil, fmt.Errorf("Error modifying target group health check: %q", err) + return nil, fmt.Errorf("error modifying target group health check: %q", err) } dirty = true @@ -683,7 +693,7 @@ func (c *Cloud) ensureTargetGroup(targetGroup *elbv2.TargetGroup, serviceName ty TargetGroupArns: []*string{targetGroup.TargetGroupArn}, }) if err != nil { - return nil, fmt.Errorf("Error retrieving target group after creation/update: %q", err) + return nil, fmt.Errorf("error retrieving target group after creation/update: %q", err) } targetGroup = result.TargetGroups[0] } @@ -696,10 +706,10 @@ func (c *Cloud) getVpcCidrBlocks() ([]string, error) { VpcIds: []*string{aws.String(c.vpcID)}, }) if err != nil { - return nil, fmt.Errorf("Error querying VPC for ELB: %q", err) + return nil, fmt.Errorf("error querying VPC for ELB: %q", err) } if len(vpcs.Vpcs) != 1 { - return nil, fmt.Errorf("Error querying VPC for ELB, got %d vpcs for %s", len(vpcs.Vpcs), c.vpcID) + return nil, fmt.Errorf("error querying VPC for ELB, got %d vpcs for %s", len(vpcs.Vpcs), c.vpcID) } cidrBlocks := make([]string, 0, len(vpcs.Vpcs[0].CidrBlockAssociationSet)) @@ -1222,12 +1232,15 @@ func elbListenersAreEqual(actual, expected *elb.Listener) bool { return true } -func createSubnetMappings(subnetIDs []string) []*elbv2.SubnetMapping { +func createSubnetMappings(subnetIDs []string, allocationIDs []string) []*elbv2.SubnetMapping { response := []*elbv2.SubnetMapping{} - for _, id := range subnetIDs { - // Ignore AllocationId for now - response = append(response, &elbv2.SubnetMapping{SubnetId: aws.String(id)}) + for index, id := range subnetIDs { + sm := &elbv2.SubnetMapping{SubnetId: aws.String(id)} + if len(allocationIDs) > 0 { + sm.AllocationId = aws.String(allocationIDs[index]) + } + response = append(response, sm) } return response diff --git a/vendor/k8s.io/legacy-cloud-providers/aws/aws_metrics.go b/vendor/k8s.io/legacy-cloud-providers/aws/aws_metrics.go index bb7958f04f08b..90153ec35ce7e 100644 --- a/vendor/k8s.io/legacy-cloud-providers/aws/aws_metrics.go +++ b/vendor/k8s.io/legacy-cloud-providers/aws/aws_metrics.go @@ -1,3 +1,5 @@ +// +build !providerless + /* Copyright 2017 The Kubernetes Authors. @@ -20,27 +22,33 @@ import ( "sync" "github.com/prometheus/client_golang/prometheus" + + "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" ) var ( - awsAPIMetric = prometheus.NewHistogramVec( - prometheus.HistogramOpts{ - Name: "cloudprovider_aws_api_request_duration_seconds", - Help: "Latency of AWS API calls", + awsAPIMetric = metrics.NewHistogramVec( + &metrics.HistogramOpts{ + Name: "cloudprovider_aws_api_request_duration_seconds", + Help: "Latency of AWS API calls", + StabilityLevel: metrics.ALPHA, }, []string{"request"}) - awsAPIErrorMetric = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "cloudprovider_aws_api_request_errors", - Help: "AWS API errors", + awsAPIErrorMetric = metrics.NewCounterVec( + &metrics.CounterOpts{ + Name: "cloudprovider_aws_api_request_errors", + Help: "AWS API errors", + StabilityLevel: metrics.ALPHA, }, []string{"request"}) - awsAPIThrottlesMetric = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "cloudprovider_aws_api_throttled_requests_total", - Help: "AWS API throttled requests", + awsAPIThrottlesMetric = metrics.NewCounterVec( + &metrics.CounterOpts{ + Name: "cloudprovider_aws_api_throttled_requests_total", + Help: "AWS API throttled requests", + StabilityLevel: metrics.ALPHA, }, []string{"operation_name"}) ) @@ -61,8 +69,8 @@ var registerOnce sync.Once func registerMetrics() { registerOnce.Do(func() { - prometheus.MustRegister(awsAPIMetric) - prometheus.MustRegister(awsAPIErrorMetric) - prometheus.MustRegister(awsAPIThrottlesMetric) + legacyregistry.MustRegister(awsAPIMetric) + legacyregistry.MustRegister(awsAPIErrorMetric) + legacyregistry.MustRegister(awsAPIThrottlesMetric) }) } diff --git a/vendor/k8s.io/legacy-cloud-providers/aws/aws_routes.go b/vendor/k8s.io/legacy-cloud-providers/aws/aws_routes.go index 36e6696f2ca19..3c37a12928d8e 100644 --- a/vendor/k8s.io/legacy-cloud-providers/aws/aws_routes.go +++ b/vendor/k8s.io/legacy-cloud-providers/aws/aws_routes.go @@ -1,3 +1,5 @@ +// +build !providerless + /* Copyright 2014 The Kubernetes Authors. diff --git a/vendor/k8s.io/legacy-cloud-providers/aws/aws_utils.go b/vendor/k8s.io/legacy-cloud-providers/aws/aws_utils.go index bd373d64e5881..27ce55a92c6f7 100644 --- a/vendor/k8s.io/legacy-cloud-providers/aws/aws_utils.go +++ b/vendor/k8s.io/legacy-cloud-providers/aws/aws_utils.go @@ -1,3 +1,5 @@ +// +build !providerless + /* Copyright 2014 The Kubernetes Authors. diff --git a/vendor/k8s.io/legacy-cloud-providers/aws/device_allocator.go b/vendor/k8s.io/legacy-cloud-providers/aws/device_allocator.go index ae5b0275773af..084962b883888 100644 --- a/vendor/k8s.io/legacy-cloud-providers/aws/device_allocator.go +++ b/vendor/k8s.io/legacy-cloud-providers/aws/device_allocator.go @@ -1,3 +1,5 @@ +// +build !providerless + /* Copyright 2016 The Kubernetes Authors. diff --git a/vendor/k8s.io/legacy-cloud-providers/aws/doc.go b/vendor/k8s.io/legacy-cloud-providers/aws/doc.go new file mode 100644 index 0000000000000..94cbbd1fc88ea --- /dev/null +++ b/vendor/k8s.io/legacy-cloud-providers/aws/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package aws diff --git a/vendor/k8s.io/legacy-cloud-providers/aws/instances.go b/vendor/k8s.io/legacy-cloud-providers/aws/instances.go index e18d559b5bb7b..53f2939d7cd40 100644 --- a/vendor/k8s.io/legacy-cloud-providers/aws/instances.go +++ b/vendor/k8s.io/legacy-cloud-providers/aws/instances.go @@ -1,3 +1,5 @@ +// +build !providerless + /* Copyright 2017 The Kubernetes Authors. diff --git a/vendor/k8s.io/legacy-cloud-providers/aws/log_handler.go b/vendor/k8s.io/legacy-cloud-providers/aws/log_handler.go index 9328fd284ac7a..15c13e11e3e2a 100644 --- a/vendor/k8s.io/legacy-cloud-providers/aws/log_handler.go +++ b/vendor/k8s.io/legacy-cloud-providers/aws/log_handler.go @@ -1,3 +1,5 @@ +// +build !providerless + /* Copyright 2015 The Kubernetes Authors. diff --git a/vendor/k8s.io/legacy-cloud-providers/aws/retry_handler.go b/vendor/k8s.io/legacy-cloud-providers/aws/retry_handler.go index 0fe6c2a575342..ddb1d0061b31f 100644 --- a/vendor/k8s.io/legacy-cloud-providers/aws/retry_handler.go +++ b/vendor/k8s.io/legacy-cloud-providers/aws/retry_handler.go @@ -1,3 +1,5 @@ +// +build !providerless + /* Copyright 2015 The Kubernetes Authors. diff --git a/vendor/k8s.io/legacy-cloud-providers/aws/sets_ippermissions.go b/vendor/k8s.io/legacy-cloud-providers/aws/sets_ippermissions.go index 201b8ac9cf00b..fc56ad45f7e98 100644 --- a/vendor/k8s.io/legacy-cloud-providers/aws/sets_ippermissions.go +++ b/vendor/k8s.io/legacy-cloud-providers/aws/sets_ippermissions.go @@ -1,3 +1,5 @@ +// +build !providerless + /* Copyright 2016 The Kubernetes Authors. diff --git a/vendor/k8s.io/legacy-cloud-providers/aws/tags.go b/vendor/k8s.io/legacy-cloud-providers/aws/tags.go index f2f74baf70257..b5509c042f277 100644 --- a/vendor/k8s.io/legacy-cloud-providers/aws/tags.go +++ b/vendor/k8s.io/legacy-cloud-providers/aws/tags.go @@ -1,3 +1,5 @@ +// +build !providerless + /* Copyright 2017 The Kubernetes Authors. @@ -65,7 +67,7 @@ type awsTagging struct { func (t *awsTagging) init(legacyClusterID string, clusterID string) error { if legacyClusterID != "" { if clusterID != "" && legacyClusterID != clusterID { - return fmt.Errorf("ClusterID tags did not match: %q vs %q", clusterID, legacyClusterID) + return fmt.Errorf("clusterID tags did not match: %q vs %q", clusterID, legacyClusterID) } t.usesLegacyTags = true clusterID = legacyClusterID diff --git a/vendor/k8s.io/legacy-cloud-providers/aws/volumes.go b/vendor/k8s.io/legacy-cloud-providers/aws/volumes.go index 0c8a24fa5308f..099a377a2fabb 100644 --- a/vendor/k8s.io/legacy-cloud-providers/aws/volumes.go +++ b/vendor/k8s.io/legacy-cloud-providers/aws/volumes.go @@ -1,3 +1,5 @@ +// +build !providerless + /* Copyright 2016 The Kubernetes Authors. @@ -101,7 +103,7 @@ func (c *Cloud) checkIfAttachedToNode(diskName KubernetesVolumeID, nodeName type // This should never happen but if it does it could mean there was a race and instance // has been deleted if err != nil { - fetchErr := fmt.Errorf("Error fetching instance %s for volume %s", instanceID, diskName) + fetchErr := fmt.Errorf("error fetching instance %s for volume %s", instanceID, diskName) klog.Warning(fetchErr) return awsDiskInfo, false, fetchErr } diff --git a/vendor/k8s.io/legacy-cloud-providers/gce/BUILD.bazel b/vendor/k8s.io/legacy-cloud-providers/gce/BUILD.bazel index 28575b03168fb..54ce7d642d4eb 100644 --- a/vendor/k8s.io/legacy-cloud-providers/gce/BUILD.bazel +++ b/vendor/k8s.io/legacy-cloud-providers/gce/BUILD.bazel @@ -47,7 +47,6 @@ go_library( "//vendor/github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/filter:go_default_library", "//vendor/github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta:go_default_library", "//vendor/github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/mock:go_default_library", - "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/golang.org/x/oauth2:go_default_library", "//vendor/golang.org/x/oauth2/google:go_default_library", "//vendor/google.golang.org/api/compute/v0.alpha:go_default_library", @@ -55,6 +54,7 @@ go_library( "//vendor/google.golang.org/api/compute/v1:go_default_library", "//vendor/google.golang.org/api/container/v1:go_default_library", "//vendor/google.golang.org/api/googleapi:go_default_library", + "//vendor/google.golang.org/api/option:go_default_library", "//vendor/google.golang.org/api/tpu/v1:go_default_library", "//vendor/gopkg.in/gcfg.v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", @@ -81,6 +81,8 @@ go_library( "//vendor/k8s.io/cloud-provider/volume:go_default_library", "//vendor/k8s.io/cloud-provider/volume/errors:go_default_library", "//vendor/k8s.io/cloud-provider/volume/helpers:go_default_library", + "//vendor/k8s.io/component-base/metrics:go_default_library", + "//vendor/k8s.io/component-base/metrics/legacyregistry:go_default_library", "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/net:go_default_library", ], diff --git a/vendor/k8s.io/legacy-cloud-providers/gce/gce.go b/vendor/k8s.io/legacy-cloud-providers/gce/gce.go index 0a22a6a59b651..1de777909c09a 100644 --- a/vendor/k8s.io/legacy-cloud-providers/gce/gce.go +++ b/vendor/k8s.io/legacy-cloud-providers/gce/gce.go @@ -1,3 +1,5 @@ +// +build !providerless + /* Copyright 2014 The Kubernetes Authors. @@ -36,6 +38,7 @@ import ( computebeta "google.golang.org/api/compute/v0.beta" compute "google.golang.org/api/compute/v1" container "google.golang.org/api/container/v1" + "google.golang.org/api/option" "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud" @@ -186,6 +189,11 @@ type ConfigGlobal struct { } // ConfigFile is the struct used to parse the /etc/gce.conf configuration file. +// NOTE: Cloud config files should follow the same Kubernetes deprecation policy as +// flags or CLIs. Config fields should not change behavior in incompatible ways and +// should be deprecated for at least 2 release prior to removing. +// See https://kubernetes.io/docs/reference/using-api/deprecation-policy/#deprecating-a-flag-or-cli +// for more details. type ConfigFile struct { Global ConfigGlobal `gcfg:"global"` } @@ -388,31 +396,19 @@ func CreateGCECloud(config *CloudConfig) (*Cloud, error) { config.NetworkProjectID = config.ProjectID } - client, err := newOauthClient(config.TokenSource) - if err != nil { - return nil, err - } - service, err := compute.New(client) + service, err := compute.NewService(context.Background(), option.WithTokenSource(config.TokenSource)) if err != nil { return nil, err } service.UserAgent = userAgent - client, err = newOauthClient(config.TokenSource) - if err != nil { - return nil, err - } - serviceBeta, err := computebeta.New(client) + serviceBeta, err := computebeta.NewService(context.Background(), option.WithTokenSource(config.TokenSource)) if err != nil { return nil, err } serviceBeta.UserAgent = userAgent - client, err = newOauthClient(config.TokenSource) - if err != nil { - return nil, err - } - serviceAlpha, err := computealpha.New(client) + serviceAlpha, err := computealpha.NewService(context.Background(), option.WithTokenSource(config.TokenSource)) if err != nil { return nil, err } @@ -428,7 +424,7 @@ func CreateGCECloud(config *CloudConfig) (*Cloud, error) { serviceAlpha.BasePath = fmt.Sprintf("%sprojects/", strings.Replace(config.APIEndpoint, "v1", "alpha", -1)) } - containerService, err := container.New(client) + containerService, err := container.NewService(context.Background(), option.WithTokenSource(config.TokenSource)) if err != nil { return nil, err } @@ -437,6 +433,10 @@ func CreateGCECloud(config *CloudConfig) (*Cloud, error) { containerService.BasePath = config.ContainerAPIEndpoint } + client, err := newOauthClient(config.TokenSource) + if err != nil { + return nil, err + } tpuService, err := newTPUService(client) if err != nil { return nil, err @@ -856,7 +856,7 @@ func newOauthClient(tokenSource oauth2.TokenSource) (*http.Client, error) { if tokenSource == nil { var err error tokenSource, err = google.DefaultTokenSource( - oauth2.NoContext, + context.Background(), compute.CloudPlatformScope, compute.ComputeScope) klog.Infof("Using DefaultTokenSource %#v", tokenSource) @@ -883,7 +883,7 @@ func newOauthClient(tokenSource oauth2.TokenSource) (*http.Client, error) { return nil, err } - return oauth2.NewClient(oauth2.NoContext, tokenSource), nil + return oauth2.NewClient(context.Background(), tokenSource), nil } func (manager *gceServiceManager) getProjectsAPIEndpoint() string { @@ -894,12 +894,3 @@ func (manager *gceServiceManager) getProjectsAPIEndpoint() string { return projectsAPIEndpoint } - -func (manager *gceServiceManager) getProjectsAPIEndpointBeta() string { - projectsAPIEndpoint := gceComputeAPIEndpointBeta + "projects/" - if manager.gce.service != nil { - projectsAPIEndpoint = manager.gce.serviceBeta.BasePath - } - - return projectsAPIEndpoint -} diff --git a/vendor/k8s.io/legacy-cloud-providers/gce/gce_address_manager.go b/vendor/k8s.io/legacy-cloud-providers/gce/gce_address_manager.go index 7674e36df60df..82466ea3fc53a 100644 --- a/vendor/k8s.io/legacy-cloud-providers/gce/gce_address_manager.go +++ b/vendor/k8s.io/legacy-cloud-providers/gce/gce_address_manager.go @@ -1,3 +1,5 @@ +// +build !providerless + /* Copyright 2017 The Kubernetes Authors. diff --git a/vendor/k8s.io/legacy-cloud-providers/gce/gce_addresses.go b/vendor/k8s.io/legacy-cloud-providers/gce/gce_addresses.go index 08ef981157e43..b161a4986522d 100644 --- a/vendor/k8s.io/legacy-cloud-providers/gce/gce_addresses.go +++ b/vendor/k8s.io/legacy-cloud-providers/gce/gce_addresses.go @@ -1,3 +1,5 @@ +// +build !providerless + /* Copyright 2017 The Kubernetes Authors. diff --git a/vendor/k8s.io/legacy-cloud-providers/gce/gce_alpha.go b/vendor/k8s.io/legacy-cloud-providers/gce/gce_alpha.go index 598f1e336d3f3..70b84d35b11a8 100644 --- a/vendor/k8s.io/legacy-cloud-providers/gce/gce_alpha.go +++ b/vendor/k8s.io/legacy-cloud-providers/gce/gce_alpha.go @@ -1,3 +1,5 @@ +// +build !providerless + /* Copyright 2017 The Kubernetes Authors. @@ -16,16 +18,15 @@ limitations under the License. package gce -import ( - "fmt" -) - const ( // AlphaFeatureNetworkTiers allows Services backed by a GCP load balancer to choose // what network tier to use. Currently supports "Standard" and "Premium" (default). // // alpha: v1.8 (for Services) AlphaFeatureNetworkTiers = "NetworkTiers" + // AlphaFeatureILBSubsets allows InternalLoadBalancer services to include a subset + // of cluster nodes as backends instead of all nodes. + AlphaFeatureILBSubsets = "ILBSubsets" ) // AlphaFeatureGate contains a mapping of alpha features to whether they are enabled @@ -35,6 +36,9 @@ type AlphaFeatureGate struct { // Enabled returns true if the provided alpha feature is enabled func (af *AlphaFeatureGate) Enabled(key string) bool { + if af == nil || af.features == nil { + return false + } return af.features[key] } @@ -46,10 +50,3 @@ func NewAlphaFeatureGate(features []string) *AlphaFeatureGate { } return &AlphaFeatureGate{featureMap} } - -func (g *Cloud) alphaFeatureEnabled(feature string) error { - if !g.AlphaFeatureGate.Enabled(feature) { - return fmt.Errorf("alpha feature %q is not enabled", feature) - } - return nil -} diff --git a/vendor/k8s.io/legacy-cloud-providers/gce/gce_annotations.go b/vendor/k8s.io/legacy-cloud-providers/gce/gce_annotations.go index 43e11660f7838..c2a3602f71452 100644 --- a/vendor/k8s.io/legacy-cloud-providers/gce/gce_annotations.go +++ b/vendor/k8s.io/legacy-cloud-providers/gce/gce_annotations.go @@ -1,3 +1,5 @@ +// +build !providerless + /* Copyright 2017 The Kubernetes Authors. @@ -31,8 +33,11 @@ type LoadBalancerType string const ( // ServiceAnnotationLoadBalancerType is annotated on a service with type LoadBalancer // dictates what specific kind of GCP LB should be assembled. - // Currently, only "internal" is supported. - ServiceAnnotationLoadBalancerType = "cloud.google.com/load-balancer-type" + // Currently, only "Internal" is supported. + ServiceAnnotationLoadBalancerType = "networking.gke.io/load-balancer-type" + + // Deprecating the old-style naming of LoadBalancerType annotation + deprecatedServiceAnnotationLoadBalancerType = "cloud.google.com/load-balancer-type" // LBTypeInternal is the constant for the official internal type. LBTypeInternal LoadBalancerType = "Internal" @@ -48,6 +53,11 @@ const ( // This annotation did not correctly specify "alpha", so both annotations will be checked. deprecatedServiceAnnotationILBBackendShare = "cloud.google.com/load-balancer-backend-share" + // ServiceAnnotationILBAllowGlobalAccess is annotated on a service with "true" when users + // want to access the Internal LoadBalancer globally, and not restricted to the region it is + // created in. + ServiceAnnotationILBAllowGlobalAccess = "networking.gke.io/internal-load-balancer-allow-global-access" + // NetworkTierAnnotationKey is annotated on a Service object to indicate which // network tier a GCP LB should use. The valid values are "Standard" and // "Premium" (default). @@ -58,29 +68,26 @@ const ( // NetworkTierAnnotationPremium is an annotation to indicate the Service is on the Premium network tier NetworkTierAnnotationPremium = cloud.NetworkTierPremium - - // NEGAnnotation is an annotation to indicate that the loadbalancer service is using NEGs instead of InstanceGroups - NEGAnnotation = "cloud.google.com/neg" ) // GetLoadBalancerAnnotationType returns the type of GCP load balancer which should be assembled. -func GetLoadBalancerAnnotationType(service *v1.Service) (LoadBalancerType, bool) { - v := LoadBalancerType("") - if service.Spec.Type != v1.ServiceTypeLoadBalancer { - return v, false - } - - l, ok := service.Annotations[ServiceAnnotationLoadBalancerType] - v = LoadBalancerType(l) - if !ok { - return v, false +func GetLoadBalancerAnnotationType(service *v1.Service) LoadBalancerType { + var lbType LoadBalancerType + for _, ann := range []string{ + ServiceAnnotationLoadBalancerType, + deprecatedServiceAnnotationLoadBalancerType, + } { + if v, ok := service.Annotations[ann]; ok { + lbType = LoadBalancerType(v) + break + } } - switch v { + switch lbType { case LBTypeInternal, deprecatedTypeInternalLowerCase: - return LBTypeInternal, true + return LBTypeInternal default: - return v, false + return lbType } } @@ -119,3 +126,16 @@ func GetServiceNetworkTier(service *v1.Service) (cloud.NetworkTier, error) { return cloud.NetworkTierDefault, fmt.Errorf("unsupported network tier: %q", v) } } + +// ILBOptions represents the extra options specified when creating a +// load balancer. +type ILBOptions struct { + // AllowGlobalAccess Indicates whether global access is allowed for the LoadBalancer + AllowGlobalAccess bool +} + +// GetLoadBalancerAnnotationAllowGlobalAccess returns if global access is enabled +// for the given loadbalancer service. +func GetLoadBalancerAnnotationAllowGlobalAccess(service *v1.Service) bool { + return service.Annotations[ServiceAnnotationILBAllowGlobalAccess] == "true" +} diff --git a/vendor/k8s.io/legacy-cloud-providers/gce/gce_backendservice.go b/vendor/k8s.io/legacy-cloud-providers/gce/gce_backendservice.go index aa0f52d212bef..3aca49cd835de 100644 --- a/vendor/k8s.io/legacy-cloud-providers/gce/gce_backendservice.go +++ b/vendor/k8s.io/legacy-cloud-providers/gce/gce_backendservice.go @@ -1,3 +1,5 @@ +// +build !providerless + /* Copyright 2017 The Kubernetes Authors. diff --git a/vendor/k8s.io/legacy-cloud-providers/gce/gce_cert.go b/vendor/k8s.io/legacy-cloud-providers/gce/gce_cert.go index 8d9481945c02a..7350210acbcca 100644 --- a/vendor/k8s.io/legacy-cloud-providers/gce/gce_cert.go +++ b/vendor/k8s.io/legacy-cloud-providers/gce/gce_cert.go @@ -1,3 +1,5 @@ +// +build !providerless + /* Copyright 2017 The Kubernetes Authors. diff --git a/vendor/k8s.io/legacy-cloud-providers/gce/gce_clusterid.go b/vendor/k8s.io/legacy-cloud-providers/gce/gce_clusterid.go index 2f40167788d84..b2e2a1deaf99f 100644 --- a/vendor/k8s.io/legacy-cloud-providers/gce/gce_clusterid.go +++ b/vendor/k8s.io/legacy-cloud-providers/gce/gce_clusterid.go @@ -1,3 +1,5 @@ +// +build !providerless + /* Copyright 2017 The Kubernetes Authors. diff --git a/vendor/k8s.io/legacy-cloud-providers/gce/gce_clusters.go b/vendor/k8s.io/legacy-cloud-providers/gce/gce_clusters.go index 379f5396a253f..f313b3bac1ca7 100644 --- a/vendor/k8s.io/legacy-cloud-providers/gce/gce_clusters.go +++ b/vendor/k8s.io/legacy-cloud-providers/gce/gce_clusters.go @@ -1,3 +1,5 @@ +// +build !providerless + /* Copyright 2017 The Kubernetes Authors. diff --git a/vendor/k8s.io/legacy-cloud-providers/gce/gce_disks.go b/vendor/k8s.io/legacy-cloud-providers/gce/gce_disks.go index c64e66cd1a069..ba18584fea8be 100644 --- a/vendor/k8s.io/legacy-cloud-providers/gce/gce_disks.go +++ b/vendor/k8s.io/legacy-cloud-providers/gce/gce_disks.go @@ -1,3 +1,5 @@ +// +build !providerless + /* Copyright 2017 The Kubernetes Authors. diff --git a/vendor/k8s.io/legacy-cloud-providers/gce/gce_fake.go b/vendor/k8s.io/legacy-cloud-providers/gce/gce_fake.go index 2394cb47333ba..07c42477f9ecb 100644 --- a/vendor/k8s.io/legacy-cloud-providers/gce/gce_fake.go +++ b/vendor/k8s.io/legacy-cloud-providers/gce/gce_fake.go @@ -1,3 +1,5 @@ +// +build !providerless + /* Copyright 2018 The Kubernetes Authors. @@ -17,8 +19,7 @@ limitations under the License. package gce import ( - "fmt" - "net/http" + "context" "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud" compute "google.golang.org/api/compute/v1" @@ -48,12 +49,6 @@ func DefaultTestClusterValues() TestClusterValues { } } -type fakeRoundTripper struct{} - -func (*fakeRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { - return nil, fmt.Errorf("err: test used fake http client") -} - // Stubs ClusterID so that ClusterID.getOrInitialize() does not require calling // gce.Initialize() func fakeClusterID(clusterID string) ClusterID { @@ -67,8 +62,7 @@ func fakeClusterID(clusterID string) ClusterID { // NewFakeGCECloud constructs a fake GCE Cloud from the cluster values. func NewFakeGCECloud(vals TestClusterValues) *Cloud { - client := &http.Client{Transport: &fakeRoundTripper{}} - service, _ := compute.New(client) + service, _ := compute.NewService(context.Background()) gce := &Cloud{ region: vals.Region, service: service, diff --git a/vendor/k8s.io/legacy-cloud-providers/gce/gce_firewall.go b/vendor/k8s.io/legacy-cloud-providers/gce/gce_firewall.go index d8b2ae749f30e..357f85d1fb2b9 100644 --- a/vendor/k8s.io/legacy-cloud-providers/gce/gce_firewall.go +++ b/vendor/k8s.io/legacy-cloud-providers/gce/gce_firewall.go @@ -1,3 +1,5 @@ +// +build !providerless + /* Copyright 2017 The Kubernetes Authors. diff --git a/vendor/k8s.io/legacy-cloud-providers/gce/gce_forwardingrule.go b/vendor/k8s.io/legacy-cloud-providers/gce/gce_forwardingrule.go index af12aa47c2b3f..21b361f219c5e 100644 --- a/vendor/k8s.io/legacy-cloud-providers/gce/gce_forwardingrule.go +++ b/vendor/k8s.io/legacy-cloud-providers/gce/gce_forwardingrule.go @@ -1,3 +1,5 @@ +// +build !providerless + /* Copyright 2017 The Kubernetes Authors. @@ -21,6 +23,7 @@ import ( "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/filter" "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" computealpha "google.golang.org/api/compute/v0.alpha" + computebeta "google.golang.org/api/compute/v0.beta" compute "google.golang.org/api/compute/v1" ) @@ -100,6 +103,16 @@ func (g *Cloud) GetAlphaRegionForwardingRule(name, region string) (*computealpha return v, mc.Observe(err) } +// GetBetaRegionForwardingRule returns the Beta forwarding rule by name & region. +func (g *Cloud) GetBetaRegionForwardingRule(name, region string) (*computebeta.ForwardingRule, error) { + ctx, cancel := cloud.ContextWithCallTimeout() + defer cancel() + + mc := newForwardingRuleMetricContextWithVersion("get", region, computeBetaVersion) + v, err := g.c.BetaForwardingRules().Get(ctx, meta.RegionalKey(name, region)) + return v, mc.Observe(err) +} + // ListRegionForwardingRules lists all RegionalForwardingRules in the project & region. func (g *Cloud) ListRegionForwardingRules(region string) ([]*compute.ForwardingRule, error) { ctx, cancel := cloud.ContextWithCallTimeout() @@ -120,6 +133,16 @@ func (g *Cloud) ListAlphaRegionForwardingRules(region string) ([]*computealpha.F return v, mc.Observe(err) } +// ListBetaRegionForwardingRules lists all RegionalForwardingRules in the project & region. +func (g *Cloud) ListBetaRegionForwardingRules(region string) ([]*computebeta.ForwardingRule, error) { + ctx, cancel := cloud.ContextWithCallTimeout() + defer cancel() + + mc := newForwardingRuleMetricContextWithVersion("list", region, computeBetaVersion) + v, err := g.c.BetaForwardingRules().List(ctx, region, filter.None) + return v, mc.Observe(err) +} + // CreateRegionForwardingRule creates and returns a // RegionalForwardingRule that points to the given BackendService func (g *Cloud) CreateRegionForwardingRule(rule *compute.ForwardingRule, region string) error { @@ -131,7 +154,7 @@ func (g *Cloud) CreateRegionForwardingRule(rule *compute.ForwardingRule, region } // CreateAlphaRegionForwardingRule creates and returns an Alpha -// forwarding fule in the given region. +// forwarding rule in the given region. func (g *Cloud) CreateAlphaRegionForwardingRule(rule *computealpha.ForwardingRule, region string) error { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() @@ -140,6 +163,16 @@ func (g *Cloud) CreateAlphaRegionForwardingRule(rule *computealpha.ForwardingRul return mc.Observe(g.c.AlphaForwardingRules().Insert(ctx, meta.RegionalKey(rule.Name, region), rule)) } +// CreateBetaRegionForwardingRule creates and returns a Beta +// forwarding rule in the given region. +func (g *Cloud) CreateBetaRegionForwardingRule(rule *computebeta.ForwardingRule, region string) error { + ctx, cancel := cloud.ContextWithCallTimeout() + defer cancel() + + mc := newForwardingRuleMetricContextWithVersion("create", region, computeBetaVersion) + return mc.Observe(g.c.BetaForwardingRules().Insert(ctx, meta.RegionalKey(rule.Name, region), rule)) +} + // DeleteRegionForwardingRule deletes the RegionalForwardingRule by name & region. func (g *Cloud) DeleteRegionForwardingRule(name, region string) error { ctx, cancel := cloud.ContextWithCallTimeout() diff --git a/vendor/k8s.io/legacy-cloud-providers/gce/gce_healthchecks.go b/vendor/k8s.io/legacy-cloud-providers/gce/gce_healthchecks.go index 7572fd7deb66e..7bb222b294196 100644 --- a/vendor/k8s.io/legacy-cloud-providers/gce/gce_healthchecks.go +++ b/vendor/k8s.io/legacy-cloud-providers/gce/gce_healthchecks.go @@ -1,3 +1,5 @@ +// +build !providerless + /* Copyright 2017 The Kubernetes Authors. @@ -26,7 +28,7 @@ import ( "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud" "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/filter" "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" utilversion "k8s.io/apimachinery/pkg/util/version" ) diff --git a/vendor/k8s.io/legacy-cloud-providers/gce/gce_instancegroup.go b/vendor/k8s.io/legacy-cloud-providers/gce/gce_instancegroup.go index 8fbdf8ae55d5d..a1dad3713d1fb 100644 --- a/vendor/k8s.io/legacy-cloud-providers/gce/gce_instancegroup.go +++ b/vendor/k8s.io/legacy-cloud-providers/gce/gce_instancegroup.go @@ -1,3 +1,5 @@ +// +build !providerless + /* Copyright 2017 The Kubernetes Authors. diff --git a/vendor/k8s.io/legacy-cloud-providers/gce/gce_instances.go b/vendor/k8s.io/legacy-cloud-providers/gce/gce_instances.go index 5cd2679c66bcf..15d7b6f2e3f73 100644 --- a/vendor/k8s.io/legacy-cloud-providers/gce/gce_instances.go +++ b/vendor/k8s.io/legacy-cloud-providers/gce/gce_instances.go @@ -1,3 +1,5 @@ +// +build !providerless + /* Copyright 2017 The Kubernetes Authors. @@ -109,7 +111,7 @@ func (g *Cloud) NodeAddresses(_ context.Context, _ types.NodeName) ([]v1.NodeAdd // NodeAddressesByProviderID will not be called from the node that is requesting this ID. // i.e. metadata service and other local methods cannot be used here func (g *Cloud) NodeAddressesByProviderID(ctx context.Context, providerID string) ([]v1.NodeAddress, error) { - ctx, cancel := cloud.ContextWithCallTimeout() + timeoutCtx, cancel := cloud.ContextWithCallTimeout() defer cancel() _, zone, name, err := splitProviderID(providerID) @@ -117,7 +119,7 @@ func (g *Cloud) NodeAddressesByProviderID(ctx context.Context, providerID string return []v1.NodeAddress{}, err } - instance, err := g.c.Instances().Get(ctx, meta.ZonalKey(canonicalizeInstanceName(name), zone)) + instance, err := g.c.Instances().Get(timeoutCtx, meta.ZonalKey(canonicalizeInstanceName(name), zone)) if err != nil { return []v1.NodeAddress{}, fmt.Errorf("error while querying for providerID %q: %v", providerID, err) } diff --git a/vendor/k8s.io/legacy-cloud-providers/gce/gce_interfaces.go b/vendor/k8s.io/legacy-cloud-providers/gce/gce_interfaces.go index 43e1ccab9cfc0..7097ddd5a3339 100644 --- a/vendor/k8s.io/legacy-cloud-providers/gce/gce_interfaces.go +++ b/vendor/k8s.io/legacy-cloud-providers/gce/gce_interfaces.go @@ -1,3 +1,5 @@ +// +build !providerless + /* Copyright 2017 The Kubernetes Authors. diff --git a/vendor/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer.go b/vendor/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer.go index 7bc1ea1d1eb51..35e871d7e5f78 100644 --- a/vendor/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer.go +++ b/vendor/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer.go @@ -1,3 +1,5 @@ +// +build !providerless + /* Copyright 2017 The Kubernetes Authors. @@ -150,7 +152,11 @@ func (g *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, svc default: status, err = g.ensureExternalLoadBalancer(clusterName, clusterID, svc, existingFwdRule, nodes) } - klog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v): done ensuring loadbalancer. err: %v", clusterName, svc.Namespace, svc.Name, loadBalancerName, g.region, err) + if err != nil { + klog.Errorf("Failed to EnsureLoadBalancer(%s, %s, %s, %s, %s), err: %v", clusterName, svc.Namespace, svc.Name, loadBalancerName, g.region, err) + return status, err + } + klog.V(4).Infof("EnsureLoadBalancer(%s, %s, %s, %s, %s): done ensuring loadbalancer.", clusterName, svc.Namespace, svc.Name, loadBalancerName, g.region) return status, err } @@ -197,7 +203,7 @@ func (g *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName strin } func getSvcScheme(svc *v1.Service) cloud.LbScheme { - if typ, ok := GetLoadBalancerAnnotationType(svc); ok && typ == LBTypeInternal { + if t := GetLoadBalancerAnnotationType(svc); t == LBTypeInternal { return cloud.SchemeInternal } return cloud.SchemeExternal diff --git a/vendor/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_external.go b/vendor/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_external.go index dedebe712556b..f9ff6853ac1f6 100644 --- a/vendor/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_external.go +++ b/vendor/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_external.go @@ -1,3 +1,5 @@ +// +build !providerless + /* Copyright 2017 The Kubernetes Authors. @@ -36,6 +38,10 @@ import ( "k8s.io/klog" ) +const ( + errStrLbNoHosts = "cannot EnsureLoadBalancer() with no hosts" +) + // ensureExternalLoadBalancer is the external implementation of LoadBalancer.EnsureLoadBalancer. // Our load balancers in GCE consist of four separate GCE resources - a static // IP address, a firewall rule, a target pool, and a forwarding rule. This @@ -46,7 +52,7 @@ import ( // each is needed. func (g *Cloud) ensureExternalLoadBalancer(clusterName string, clusterID string, apiService *v1.Service, existingFwdRule *compute.ForwardingRule, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) { if len(nodes) == 0 { - return nil, fmt.Errorf("Cannot EnsureLoadBalancer() with no hosts") + return nil, fmt.Errorf(errStrLbNoHosts) } hostNames := nodeNames(nodes) @@ -508,7 +514,7 @@ func (g *Cloud) ensureTargetPoolAndHealthCheck(tpExists, tpNeedsRecreation bool, klog.Infof("ensureTargetPoolAndHealthCheck(%s): Updated target pool (with %d hosts).", lbRefStr, len(hosts)) if hcToCreate != nil { if hc, err := g.ensureHTTPHealthCheck(hcToCreate.Name, hcToCreate.RequestPath, int32(hcToCreate.Port)); err != nil || hc == nil { - return fmt.Errorf("Failed to ensure health check for %v port %d path %v: %v", loadBalancerName, hcToCreate.Port, hcToCreate.RequestPath, err) + return fmt.Errorf("failed to ensure health check for %v port %d path %v: %v", loadBalancerName, hcToCreate.Port, hcToCreate.RequestPath, err) } } } else { @@ -538,7 +544,7 @@ func (g *Cloud) createTargetPoolAndHealthCheck(svc *v1.Service, name, serviceNam var err error hcRequestPath, hcPort := hc.RequestPath, hc.Port if hc, err = g.ensureHTTPHealthCheck(hc.Name, hc.RequestPath, int32(hc.Port)); err != nil || hc == nil { - return fmt.Errorf("Failed to ensure health check for %v port %d path %v: %v", name, hcPort, hcRequestPath, err) + return fmt.Errorf("failed to ensure health check for %v port %d path %v: %v", name, hcPort, hcRequestPath, err) } hcLinks = append(hcLinks, hc.SelfLink) } @@ -607,7 +613,7 @@ func (g *Cloud) updateTargetPool(loadBalancerName string, hosts []*gceInstance) if len(updatedPool.Instances) != len(hosts) { klog.Errorf("Unexpected number of instances (%d) in target pool %s after updating (expected %d). Instances in updated pool: %s", len(updatedPool.Instances), loadBalancerName, len(hosts), strings.Join(updatedPool.Instances, ",")) - return fmt.Errorf("Unexpected number of instances (%d) in target pool %s after update (expected %d)", len(updatedPool.Instances), loadBalancerName, len(hosts)) + return fmt.Errorf("unexpected number of instances (%d) in target pool %s after update (expected %d)", len(updatedPool.Instances), loadBalancerName, len(hosts)) } return nil } @@ -636,7 +642,7 @@ func makeHTTPHealthCheck(name, path string, port int32) *compute.HttpHealthCheck // The HC interval will be reconciled to 8 seconds. // If the existing health check is larger than the default interval, // the configuration will be kept. -func mergeHTTPHealthChecks(hc, newHC *compute.HttpHealthCheck) *compute.HttpHealthCheck { +func mergeHTTPHealthChecks(hc, newHC *compute.HttpHealthCheck) { if hc.CheckIntervalSec > newHC.CheckIntervalSec { newHC.CheckIntervalSec = hc.CheckIntervalSec } @@ -649,16 +655,23 @@ func mergeHTTPHealthChecks(hc, newHC *compute.HttpHealthCheck) *compute.HttpHeal if hc.HealthyThreshold > newHC.HealthyThreshold { newHC.HealthyThreshold = hc.HealthyThreshold } - return newHC } // needToUpdateHTTPHealthChecks checks whether the http healthcheck needs to be // updated. func needToUpdateHTTPHealthChecks(hc, newHC *compute.HttpHealthCheck) bool { - changed := hc.Port != newHC.Port || hc.RequestPath != newHC.RequestPath || hc.Description != newHC.Description - changed = changed || hc.CheckIntervalSec < newHC.CheckIntervalSec || hc.TimeoutSec < newHC.TimeoutSec - changed = changed || hc.UnhealthyThreshold < newHC.UnhealthyThreshold || hc.HealthyThreshold < newHC.HealthyThreshold - return changed + switch { + case + hc.Port != newHC.Port, + hc.RequestPath != newHC.RequestPath, + hc.Description != newHC.Description, + hc.CheckIntervalSec < newHC.CheckIntervalSec, + hc.TimeoutSec < newHC.TimeoutSec, + hc.UnhealthyThreshold < newHC.UnhealthyThreshold, + hc.HealthyThreshold < newHC.HealthyThreshold: + return true + } + return false } func (g *Cloud) ensureHTTPHealthCheck(name, path string, port int32) (hc *compute.HttpHealthCheck, err error) { @@ -681,7 +694,7 @@ func (g *Cloud) ensureHTTPHealthCheck(name, path string, port int32) (hc *comput klog.V(4).Infof("Checking http health check params %s", name) if needToUpdateHTTPHealthChecks(hc, newHC) { klog.Warningf("Health check %v exists but parameters have drifted - updating...", name) - newHC = mergeHTTPHealthChecks(hc, newHC) + mergeHTTPHealthChecks(hc, newHC) if err := g.UpdateHTTPHealthCheck(newHC); err != nil { klog.Warningf("Failed to reconcile http health check %v parameters", name) return nil, err @@ -790,7 +803,7 @@ func loadBalancerPortRange(ports []v1.ServicePort) (string, error) { // The service controller verified all the protocols match on the ports, just check and use the first one if ports[0].Protocol != v1.ProtocolTCP && ports[0].Protocol != v1.ProtocolUDP { - return "", fmt.Errorf("Invalid protocol %s, only TCP and UDP are supported", string(ports[0].Protocol)) + return "", fmt.Errorf("invalid protocol %s, only TCP and UDP are supported", string(ports[0].Protocol)) } minPort := int32(65536) diff --git a/vendor/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_internal.go b/vendor/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_internal.go index 88560a9f9611c..31a8beb488d19 100644 --- a/vendor/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_internal.go +++ b/vendor/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_internal.go @@ -1,3 +1,5 @@ +// +build !providerless + /* Copyright 2017 The Kubernetes Authors. @@ -18,39 +20,45 @@ package gce import ( "context" + "encoding/json" "fmt" + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" "strconv" "strings" "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud" + computebeta "google.golang.org/api/compute/v0.beta" compute "google.golang.org/api/compute/v1" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" + cloudprovider "k8s.io/cloud-provider" servicehelpers "k8s.io/cloud-provider/service/helpers" "k8s.io/klog" ) const ( + // Used to list instances in all states(RUNNING and other) - https://cloud.google.com/compute/docs/reference/rest/v1/instanceGroups/listInstances allInstances = "ALL" ) -func usesNEG(service *v1.Service) bool { - _, ok := service.GetAnnotations()[NEGAnnotation] - return ok -} - func (g *Cloud) ensureInternalLoadBalancer(clusterName, clusterID string, svc *v1.Service, existingFwdRule *compute.ForwardingRule, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) { - if usesNEG(svc) { - // Do not manage loadBalancer for services using NEGs - return &svc.Status.LoadBalancer, nil + if g.AlphaFeatureGate.Enabled(AlphaFeatureILBSubsets) { + return nil, cloudprovider.ImplementedElsewhere } + nm := types.NamespacedName{Name: svc.Name, Namespace: svc.Namespace} ports, protocol := getPortsAndProtocol(svc.Spec.Ports) if protocol != v1.ProtocolTCP && protocol != v1.ProtocolUDP { return nil, fmt.Errorf("Invalid protocol %s, only TCP and UDP are supported", string(protocol)) } scheme := cloud.SchemeInternal + options := getILBOptions(svc) + if g.isLegacyNetwork { + g.eventRecorder.Event(svc, v1.EventTypeWarning, "ILBOptionsIgnored", "Internal LoadBalancer options are not supported with Legacy Networks.") + options = ILBOptions{} + } + loadBalancerName := g.GetLoadBalancerName(context.TODO(), clusterName, svc) sharedBackend := shareBackendService(svc) backendServiceName := makeBackendServiceName(loadBalancerName, clusterID, sharedBackend, scheme, protocol, svc.Spec.SessionAffinity) @@ -119,46 +127,31 @@ func (g *Cloud) ensureInternalLoadBalancer(clusterName, clusterID string, svc *v return nil, err } - expectedFwdRule := &compute.ForwardingRule{ - Name: loadBalancerName, - Description: fmt.Sprintf(`{"kubernetes.io/service-name":"%s"}`, nm.String()), - IPAddress: ipToUse, - BackendService: backendServiceLink, - Ports: ports, - IPProtocol: string(protocol), - LoadBalancingScheme: string(scheme), - } - - // Given that CreateGCECloud will attempt to determine the subnet based off the network, - // the subnetwork should rarely be unknown. - if subnetworkURL != "" { - expectedFwdRule.Subnetwork = subnetworkURL - } else { - expectedFwdRule.Network = g.networkURL - } - - fwdRuleDeleted := false - if existingFwdRule != nil && !fwdRuleEqual(existingFwdRule, expectedFwdRule) { - klog.V(2).Infof("ensureInternalLoadBalancer(%v): deleting existing forwarding rule with IP address %v", loadBalancerName, existingFwdRule.IPAddress) - if err = ignoreNotFound(g.DeleteRegionForwardingRule(loadBalancerName, g.region)); err != nil { - return nil, err - } - fwdRuleDeleted = true - } - bsDescription := makeBackendServiceDescription(nm, sharedBackend) err = g.ensureInternalBackendService(backendServiceName, bsDescription, svc.Spec.SessionAffinity, scheme, protocol, igLinks, hc.SelfLink) if err != nil { return nil, err } - // If we previously deleted the forwarding rule or it never existed, finally create it. - if fwdRuleDeleted || existingFwdRule == nil { - klog.V(2).Infof("ensureInternalLoadBalancer(%v): creating forwarding rule", loadBalancerName) - if err = g.CreateRegionForwardingRule(expectedFwdRule, g.region); err != nil { - return nil, err - } - klog.V(2).Infof("ensureInternalLoadBalancer(%v): created forwarding rule", loadBalancerName) + newFRC := &forwardingRuleComposite{ + name: loadBalancerName, + description: &forwardingRuleDescription{ServiceName: nm.String()}, + ipAddress: ipToUse, + backendService: backendServiceLink, + ports: ports, + ipProtocol: string(protocol), + lbScheme: string(scheme), + // Given that CreateGCECloud will attempt to determine the subnet based off the network, + // the subnetwork should rarely be unknown. + subnetwork: subnetworkURL, + network: g.networkURL, + } + if options.AllowGlobalAccess { + newFRC.allowGlobalAccess = options.AllowGlobalAccess + newFRC.description.APIVersion = meta.VersionBeta + } + if err := g.ensureInternalForwardingRule(existingFwdRule, newFRC); err != nil { + return nil, err } // Delete the previous internal load balancer resources if necessary @@ -210,9 +203,8 @@ func (g *Cloud) clearPreviousInternalResources(svc *v1.Service, loadBalancerName // updateInternalLoadBalancer is called when the list of nodes has changed. Therefore, only the instance groups // and possibly the backend service need to be updated. func (g *Cloud) updateInternalLoadBalancer(clusterName, clusterID string, svc *v1.Service, nodes []*v1.Node) error { - if usesNEG(svc) { - // Do not manage loadBalancer for services using NEGs - return nil + if g.AlphaFeatureGate.Enabled(AlphaFeatureILBSubsets) { + return cloudprovider.ImplementedElsewhere } g.sharedResourceLock.Lock() defer g.sharedResourceLock.Unlock() @@ -420,7 +412,7 @@ func (g *Cloud) ensureInternalHealthCheck(name string, svcName types.NamespacedN if needToUpdateHealthChecks(hc, expectedHC) { klog.V(2).Infof("ensureInternalHealthCheck: health check %v exists but parameters have drifted - updating...", name) - expectedHC = mergeHealthChecks(hc, expectedHC) + mergeHealthChecks(hc, expectedHC) if err := g.UpdateHealthCheck(expectedHC); err != nil { klog.Warningf("Failed to reconcile http health check %v parameters", name) return nil, err @@ -642,7 +634,7 @@ func firewallRuleEqual(a, b *compute.Firewall) bool { // The HC interval will be reconciled to 8 seconds. // If the existing health check is larger than the default interval, // the configuration will be kept. -func mergeHealthChecks(hc, newHC *compute.HealthCheck) *compute.HealthCheck { +func mergeHealthChecks(hc, newHC *compute.HealthCheck) { if hc.CheckIntervalSec > newHC.CheckIntervalSec { newHC.CheckIntervalSec = hc.CheckIntervalSec } @@ -655,18 +647,24 @@ func mergeHealthChecks(hc, newHC *compute.HealthCheck) *compute.HealthCheck { if hc.HealthyThreshold > newHC.HealthyThreshold { newHC.HealthyThreshold = hc.HealthyThreshold } - return newHC } // needToUpdateHealthChecks checks whether the healthcheck needs to be updated. func needToUpdateHealthChecks(hc, newHC *compute.HealthCheck) bool { - if hc.HttpHealthCheck == nil || newHC.HttpHealthCheck == nil { + switch { + case + hc.HttpHealthCheck == nil, + newHC.HttpHealthCheck == nil, + hc.HttpHealthCheck.Port != newHC.HttpHealthCheck.Port, + hc.HttpHealthCheck.RequestPath != newHC.HttpHealthCheck.RequestPath, + hc.Description != newHC.Description, + hc.CheckIntervalSec < newHC.CheckIntervalSec, + hc.TimeoutSec < newHC.TimeoutSec, + hc.UnhealthyThreshold < newHC.UnhealthyThreshold, + hc.HealthyThreshold < newHC.HealthyThreshold: return true } - changed := hc.HttpHealthCheck.Port != newHC.HttpHealthCheck.Port || hc.HttpHealthCheck.RequestPath != newHC.HttpHealthCheck.RequestPath || hc.Description != newHC.Description - changed = changed || hc.CheckIntervalSec < newHC.CheckIntervalSec || hc.TimeoutSec < newHC.TimeoutSec - changed = changed || hc.UnhealthyThreshold < newHC.UnhealthyThreshold || hc.HealthyThreshold < newHC.HealthyThreshold - return changed + return false } // backendsListEqual asserts that backend lists are equal by instance group link only @@ -699,14 +697,6 @@ func backendSvcEqual(a, b *compute.BackendService) bool { backendsListEqual(a.Backends, b.Backends) } -func fwdRuleEqual(a, b *compute.ForwardingRule) bool { - return (a.IPAddress == "" || b.IPAddress == "" || a.IPAddress == b.IPAddress) && - a.IPProtocol == b.IPProtocol && - a.LoadBalancingScheme == b.LoadBalancingScheme && - equalStringSets(a.Ports, b.Ports) && - a.BackendService == b.BackendService -} - func getPortsAndProtocol(svcPorts []v1.ServicePort) (ports []string, protocol v1.Protocol) { if len(svcPorts) == 0 { return []string{}, v1.ProtocolUDP @@ -744,3 +734,205 @@ func determineRequestedIP(svc *v1.Service, fwdRule *compute.ForwardingRule) stri return "" } + +func getILBOptions(svc *v1.Service) ILBOptions { + return ILBOptions{AllowGlobalAccess: GetLoadBalancerAnnotationAllowGlobalAccess(svc)} +} + +// forwardingRuleComposite is a composite type encapsulating both the GA and Beta ForwardingRules. +// It exposes methods to compute the ForwardingRule object based on the given parameters and to compare 2 composite types +// based on the version string. +type forwardingRuleComposite struct { + allowGlobalAccess bool + name string + description *forwardingRuleDescription + ipAddress string + backendService string + ports []string + ipProtocol string + lbScheme string + subnetwork string + network string +} + +func (f *forwardingRuleComposite) Version() meta.Version { + return f.description.APIVersion +} + +func (f *forwardingRuleComposite) Equal(other *forwardingRuleComposite) bool { + return (f.ipAddress == "" || other.ipAddress == "" || f.ipAddress == other.ipAddress) && + f.ipProtocol == other.ipProtocol && + f.lbScheme == other.lbScheme && + equalStringSets(f.ports, other.ports) && + f.backendService == other.backendService && + f.allowGlobalAccess == other.allowGlobalAccess +} + +// toForwardingRuleComposite converts a compute beta or GA ForwardingRule into the composite type +func toForwardingRuleComposite(rule interface{}) (frc *forwardingRuleComposite, err error) { + switch fr := rule.(type) { + case *compute.ForwardingRule: + frc = &forwardingRuleComposite{ + name: fr.Name, + ipAddress: fr.IPAddress, + description: &forwardingRuleDescription{APIVersion: meta.VersionGA}, + backendService: fr.BackendService, + ports: fr.Ports, + ipProtocol: fr.IPProtocol, + lbScheme: fr.LoadBalancingScheme, + subnetwork: fr.Subnetwork, + network: fr.Network, + } + if fr.Description != "" { + err = frc.description.unmarshal(fr.Description) + } + return frc, err + case *computebeta.ForwardingRule: + frc = &forwardingRuleComposite{ + name: fr.Name, + ipAddress: fr.IPAddress, + description: &forwardingRuleDescription{APIVersion: meta.VersionBeta}, + backendService: fr.BackendService, + ports: fr.Ports, + ipProtocol: fr.IPProtocol, + lbScheme: fr.LoadBalancingScheme, + subnetwork: fr.Subnetwork, + network: fr.Network, + allowGlobalAccess: fr.AllowGlobalAccess, + } + if fr.Description != "" { + err = frc.description.unmarshal(fr.Description) + } + return frc, err + default: + return nil, fmt.Errorf("Invalid object type %T to compute ForwardingRuleComposite from", fr) + } +} + +// ToBeta returns a Beta ForwardingRule from the composite type. +func (f *forwardingRuleComposite) ToBeta() (*computebeta.ForwardingRule, error) { + descStr, err := f.description.marshal() + if err != nil { + return nil, fmt.Errorf("Failed to compute description for beta forwarding rule %s, err: %v", f.name, err) + } + return &computebeta.ForwardingRule{ + Name: f.name, + Description: descStr, + IPAddress: f.ipAddress, + BackendService: f.backendService, + Ports: f.ports, + IPProtocol: f.ipProtocol, + LoadBalancingScheme: f.lbScheme, + Subnetwork: f.subnetwork, + Network: f.network, + AllowGlobalAccess: f.allowGlobalAccess, + }, nil +} + +// ToGA returns a GA ForwardingRule from the composite type. +func (f *forwardingRuleComposite) ToGA() (*compute.ForwardingRule, error) { + descStr, err := f.description.marshal() + if err != nil { + return nil, fmt.Errorf("Failed to compute description for GA forwarding rule %s, err: %v", f.name, err) + } + return &compute.ForwardingRule{ + Name: f.name, + Description: descStr, + IPAddress: f.ipAddress, + BackendService: f.backendService, + Ports: f.ports, + IPProtocol: f.ipProtocol, + LoadBalancingScheme: f.lbScheme, + Subnetwork: f.subnetwork, + Network: f.network, + }, nil +} + +type forwardingRuleDescription struct { + ServiceName string `json:"kubernetes.io/service-name"` + APIVersion meta.Version `json:"kubernetes.io/api-version,omitempty"` +} + +// marshal the description as a JSON-encoded string. +func (d *forwardingRuleDescription) marshal() (string, error) { + out, err := json.Marshal(d) + if err != nil { + return "", err + } + return string(out), err +} + +// unmarshal desc JSON-encoded string into this structure. +func (d *forwardingRuleDescription) unmarshal(desc string) error { + return json.Unmarshal([]byte(desc), d) +} + +func getFwdRuleAPIVersion(rule *compute.ForwardingRule) (meta.Version, error) { + d := &forwardingRuleDescription{} + if rule.Description == "" { + return meta.VersionGA, nil + } + if err := d.unmarshal(rule.Description); err != nil { + return meta.VersionGA, fmt.Errorf("Failed to get APIVersion from Forwarding rule %s - %v", rule.Name, err) + } + if d.APIVersion == "" { + d.APIVersion = meta.VersionGA + } + return d.APIVersion, nil +} + +func (g *Cloud) ensureInternalForwardingRule(existingFwdRule *compute.ForwardingRule, newFRC *forwardingRuleComposite) (err error) { + if existingFwdRule != nil { + version, err := getFwdRuleAPIVersion(existingFwdRule) + if err != nil { + return err + } + var oldFRC *forwardingRuleComposite + switch version { + case meta.VersionBeta: + var betaRule *computebeta.ForwardingRule + betaRule, err = g.GetBetaRegionForwardingRule(existingFwdRule.Name, g.region) + if err != nil { + return err + } + oldFRC, err = toForwardingRuleComposite(betaRule) + case meta.VersionGA: + oldFRC, err = toForwardingRuleComposite(existingFwdRule) + default: + klog.Errorf("invalid version string for %s, assuming GA", existingFwdRule.Name) + oldFRC, err = toForwardingRuleComposite(existingFwdRule) + } + if err != nil { + return err + } + if oldFRC.Equal(newFRC) { + klog.V(4).Infof("oldFRC == newFRC, no updates needed (oldFRC == %+v)", oldFRC) + return nil + } + klog.V(2).Infof("ensureInternalLoadBalancer(%v): deleting existing forwarding rule with IP address %v", existingFwdRule.Name, existingFwdRule.IPAddress) + if err = ignoreNotFound(g.DeleteRegionForwardingRule(existingFwdRule.Name, g.region)); err != nil { + return err + } + } + // At this point, the existing rule has been deleted if required. + // Create the rule based on the api version determined + if newFRC.Version() == meta.VersionBeta { + klog.V(2).Infof("ensureInternalLoadBalancer(%v): creating beta forwarding rule", newFRC.name) + var betaRule *computebeta.ForwardingRule + betaRule, err = newFRC.ToBeta() + if err != nil { + return err + } + err = g.CreateBetaRegionForwardingRule(betaRule, g.region) + } else { + var gaRule *compute.ForwardingRule + klog.V(2).Infof("ensureInternalLoadBalancer(%v): creating ga forwarding rule", newFRC.name) + gaRule, err = newFRC.ToGA() + if err != nil { + return err + } + err = g.CreateRegionForwardingRule(gaRule, g.region) + } + klog.V(2).Infof("ensureInternalLoadBalancer(%v): created forwarding rule, err : %s", newFRC.name, err) + return err +} diff --git a/vendor/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_naming.go b/vendor/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_naming.go index f7c2e0ce289b1..07aebb3915505 100644 --- a/vendor/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_naming.go +++ b/vendor/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_naming.go @@ -1,3 +1,5 @@ +// +build !providerless + /* Copyright 2017 The Kubernetes Authors. diff --git a/vendor/k8s.io/legacy-cloud-providers/gce/gce_networkendpointgroup.go b/vendor/k8s.io/legacy-cloud-providers/gce/gce_networkendpointgroup.go index 23503d2e007e8..4ea9fc5a34709 100644 --- a/vendor/k8s.io/legacy-cloud-providers/gce/gce_networkendpointgroup.go +++ b/vendor/k8s.io/legacy-cloud-providers/gce/gce_networkendpointgroup.go @@ -1,3 +1,5 @@ +// +build !providerless + /* Copyright 2017 The Kubernetes Authors. diff --git a/vendor/k8s.io/legacy-cloud-providers/gce/gce_routes.go b/vendor/k8s.io/legacy-cloud-providers/gce/gce_routes.go index 62d9b0995f7ce..c585ca873ef4c 100644 --- a/vendor/k8s.io/legacy-cloud-providers/gce/gce_routes.go +++ b/vendor/k8s.io/legacy-cloud-providers/gce/gce_routes.go @@ -1,3 +1,5 @@ +// +build !providerless + /* Copyright 2017 The Kubernetes Authors. @@ -38,13 +40,13 @@ func newRoutesMetricContext(request string) *metricContext { // ListRoutes in the cloud environment. func (g *Cloud) ListRoutes(ctx context.Context, clusterName string) ([]*cloudprovider.Route, error) { - ctx, cancel := cloud.ContextWithCallTimeout() + timeoutCtx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newRoutesMetricContext("list") prefix := truncateClusterName(clusterName) f := filter.Regexp("name", prefix+"-.*").AndRegexp("network", g.NetworkURL()).AndRegexp("description", k8sNodeRouteTag) - routes, err := g.c.Routes().List(ctx, f) + routes, err := g.c.Routes().List(timeoutCtx, f) if err != nil { return nil, mc.Observe(err) } @@ -64,7 +66,7 @@ func (g *Cloud) ListRoutes(ctx context.Context, clusterName string) ([]*cloudpro // CreateRoute in the cloud environment. func (g *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint string, route *cloudprovider.Route) error { - ctx, cancel := cloud.ContextWithCallTimeout() + timeoutCtx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newRoutesMetricContext("create") @@ -74,6 +76,7 @@ func (g *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint st return mc.Observe(err) } cr := &compute.Route{ + // TODO(thockin): generate a unique name for node + route cidr. Don't depend on name hints. Name: truncateClusterName(clusterName) + "-" + nameHint, DestRange: route.DestinationCIDR, NextHopInstance: fmt.Sprintf("zones/%s/instances/%s", targetInstance.Zone, targetInstance.Name), @@ -81,7 +84,7 @@ func (g *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint st Priority: 1000, Description: k8sNodeRouteTag, } - err = g.c.Routes().Insert(ctx, meta.GlobalKey(cr.Name), cr) + err = g.c.Routes().Insert(timeoutCtx, meta.GlobalKey(cr.Name), cr) if isHTTPErrorCode(err, http.StatusConflict) { klog.Infof("Route %q already exists.", cr.Name) err = nil @@ -91,11 +94,11 @@ func (g *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint st // DeleteRoute from the cloud environment. func (g *Cloud) DeleteRoute(ctx context.Context, clusterName string, route *cloudprovider.Route) error { - ctx, cancel := cloud.ContextWithCallTimeout() + timeoutCtx, cancel := cloud.ContextWithCallTimeout() defer cancel() mc := newRoutesMetricContext("delete") - return mc.Observe(g.c.Routes().Delete(ctx, meta.GlobalKey(route.Name))) + return mc.Observe(g.c.Routes().Delete(timeoutCtx, meta.GlobalKey(route.Name))) } func truncateClusterName(clusterName string) string { diff --git a/vendor/k8s.io/legacy-cloud-providers/gce/gce_securitypolicy.go b/vendor/k8s.io/legacy-cloud-providers/gce/gce_securitypolicy.go index bfb2cb96ebe2a..10e876de71c9c 100644 --- a/vendor/k8s.io/legacy-cloud-providers/gce/gce_securitypolicy.go +++ b/vendor/k8s.io/legacy-cloud-providers/gce/gce_securitypolicy.go @@ -1,3 +1,5 @@ +// +build !providerless + /* Copyright 2018 The Kubernetes Authors. diff --git a/vendor/k8s.io/legacy-cloud-providers/gce/gce_targetpool.go b/vendor/k8s.io/legacy-cloud-providers/gce/gce_targetpool.go index 8f2f2cdff1b10..6d5d55e0d03b1 100644 --- a/vendor/k8s.io/legacy-cloud-providers/gce/gce_targetpool.go +++ b/vendor/k8s.io/legacy-cloud-providers/gce/gce_targetpool.go @@ -1,3 +1,5 @@ +// +build !providerless + /* Copyright 2017 The Kubernetes Authors. diff --git a/vendor/k8s.io/legacy-cloud-providers/gce/gce_targetproxy.go b/vendor/k8s.io/legacy-cloud-providers/gce/gce_targetproxy.go index 1744e88e86a46..7c28d8fc4dbba 100644 --- a/vendor/k8s.io/legacy-cloud-providers/gce/gce_targetproxy.go +++ b/vendor/k8s.io/legacy-cloud-providers/gce/gce_targetproxy.go @@ -1,3 +1,5 @@ +// +build !providerless + /* Copyright 2017 The Kubernetes Authors. diff --git a/vendor/k8s.io/legacy-cloud-providers/gce/gce_tpu.go b/vendor/k8s.io/legacy-cloud-providers/gce/gce_tpu.go index ff4217eb18f93..27f472fb0e304 100644 --- a/vendor/k8s.io/legacy-cloud-providers/gce/gce_tpu.go +++ b/vendor/k8s.io/legacy-cloud-providers/gce/gce_tpu.go @@ -1,3 +1,5 @@ +// +build !providerless + /* Copyright 2018 The Kubernetes Authors. @@ -33,7 +35,7 @@ import ( // newTPUService returns a new tpuService using the client to communicate with // the Cloud TPU APIs. func newTPUService(client *http.Client) (*tpuService, error) { - s, err := tpuapi.New(client) + s, err := tpuapi.NewService(context.Background()) if err != nil { return nil, err } diff --git a/vendor/k8s.io/legacy-cloud-providers/gce/gce_urlmap.go b/vendor/k8s.io/legacy-cloud-providers/gce/gce_urlmap.go index 2ea04e4a2c406..f2bdda1cba0ee 100644 --- a/vendor/k8s.io/legacy-cloud-providers/gce/gce_urlmap.go +++ b/vendor/k8s.io/legacy-cloud-providers/gce/gce_urlmap.go @@ -1,3 +1,5 @@ +// +build !providerless + /* Copyright 2017 The Kubernetes Authors. diff --git a/vendor/k8s.io/legacy-cloud-providers/gce/gce_util.go b/vendor/k8s.io/legacy-cloud-providers/gce/gce_util.go index 3b1d5353a4fab..a871c54d5ddfc 100644 --- a/vendor/k8s.io/legacy-cloud-providers/gce/gce_util.go +++ b/vendor/k8s.io/legacy-cloud-providers/gce/gce_util.go @@ -1,3 +1,5 @@ +// +build !providerless + /* Copyright 2017 The Kubernetes Authors. diff --git a/vendor/k8s.io/legacy-cloud-providers/gce/gce_zones.go b/vendor/k8s.io/legacy-cloud-providers/gce/gce_zones.go index 0862d6dea9f6a..4c5a6d0df6aaf 100644 --- a/vendor/k8s.io/legacy-cloud-providers/gce/gce_zones.go +++ b/vendor/k8s.io/legacy-cloud-providers/gce/gce_zones.go @@ -1,3 +1,5 @@ +// +build !providerless + /* Copyright 2017 The Kubernetes Authors. diff --git a/vendor/k8s.io/legacy-cloud-providers/gce/metrics.go b/vendor/k8s.io/legacy-cloud-providers/gce/metrics.go index 22f1c2dde6f5f..314195a3e2f38 100644 --- a/vendor/k8s.io/legacy-cloud-providers/gce/metrics.go +++ b/vendor/k8s.io/legacy-cloud-providers/gce/metrics.go @@ -1,3 +1,5 @@ +// +build !providerless + /* Copyright 2014 The Kubernetes Authors. @@ -19,7 +21,8 @@ package gce import ( "time" - "github.com/prometheus/client_golang/prometheus" + "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" ) const ( @@ -30,8 +33,8 @@ const ( ) type apiCallMetrics struct { - latency *prometheus.HistogramVec - errors *prometheus.CounterVec + latency *metrics.HistogramVec + errors *metrics.CounterVec } var ( @@ -82,24 +85,26 @@ func newGenericMetricContext(prefix, request, region, zone, version string) *met // registerApiMetrics adds metrics definitions for a category of API calls. func registerAPIMetrics(attributes ...string) *apiCallMetrics { metrics := &apiCallMetrics{ - latency: prometheus.NewHistogramVec( - prometheus.HistogramOpts{ - Name: "cloudprovider_gce_api_request_duration_seconds", - Help: "Latency of a GCE API call", + latency: metrics.NewHistogramVec( + &metrics.HistogramOpts{ + Name: "cloudprovider_gce_api_request_duration_seconds", + Help: "Latency of a GCE API call", + StabilityLevel: metrics.ALPHA, }, attributes, ), - errors: prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "cloudprovider_gce_api_request_errors", - Help: "Number of errors for an API call", + errors: metrics.NewCounterVec( + &metrics.CounterOpts{ + Name: "cloudprovider_gce_api_request_errors", + Help: "Number of errors for an API call", + StabilityLevel: metrics.ALPHA, }, attributes, ), } - prometheus.MustRegister(metrics.latency) - prometheus.MustRegister(metrics.errors) + legacyregistry.MustRegister(metrics.latency) + legacyregistry.MustRegister(metrics.errors) return metrics } diff --git a/vendor/k8s.io/legacy-cloud-providers/gce/support.go b/vendor/k8s.io/legacy-cloud-providers/gce/support.go index 9ad04439780d0..5413e78151d16 100644 --- a/vendor/k8s.io/legacy-cloud-providers/gce/support.go +++ b/vendor/k8s.io/legacy-cloud-providers/gce/support.go @@ -1,3 +1,5 @@ +// +build !providerless + /* Copyright 2017 The Kubernetes Authors. diff --git a/vendor/k8s.io/legacy-cloud-providers/gce/token_source.go b/vendor/k8s.io/legacy-cloud-providers/gce/token_source.go index e8434bd2a0c77..6eae376c10c7c 100644 --- a/vendor/k8s.io/legacy-cloud-providers/gce/token_source.go +++ b/vendor/k8s.io/legacy-cloud-providers/gce/token_source.go @@ -1,3 +1,5 @@ +// +build !providerless + /* Copyright 2015 The Kubernetes Authors. @@ -17,17 +19,19 @@ limitations under the License. package gce import ( + "context" "encoding/json" "net/http" "strings" "time" - "k8s.io/client-go/util/flowcontrol" - - "github.com/prometheus/client_golang/prometheus" "golang.org/x/oauth2" "golang.org/x/oauth2/google" "google.golang.org/api/googleapi" + + "k8s.io/client-go/util/flowcontrol" + "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" ) const ( @@ -37,24 +41,34 @@ const ( tokenURLBurst = 3 ) +/* + * By default, all the following metrics are defined as falling under + * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes) + * + * Promoting the stability level of the metric is a responsibility of the component owner, since it + * involves explicitly acknowledging support for the metric across multiple releases, in accordance with + * the metric stability policy. + */ var ( - getTokenCounter = prometheus.NewCounter( - prometheus.CounterOpts{ - Name: "get_token_count", - Help: "Counter of total Token() requests to the alternate token source", + getTokenCounter = metrics.NewCounter( + &metrics.CounterOpts{ + Name: "get_token_count", + Help: "Counter of total Token() requests to the alternate token source", + StabilityLevel: metrics.ALPHA, }, ) - getTokenFailCounter = prometheus.NewCounter( - prometheus.CounterOpts{ - Name: "get_token_fail_count", - Help: "Counter of failed Token() requests to the alternate token source", + getTokenFailCounter = metrics.NewCounter( + &metrics.CounterOpts{ + Name: "get_token_fail_count", + Help: "Counter of failed Token() requests to the alternate token source", + StabilityLevel: metrics.ALPHA, }, ) ) func init() { - prometheus.MustRegister(getTokenCounter) - prometheus.MustRegister(getTokenFailCounter) + legacyregistry.MustRegister(getTokenCounter) + legacyregistry.MustRegister(getTokenFailCounter) } // AltTokenSource is the structure holding the data for the functionality needed to generates tokens @@ -104,7 +118,7 @@ func (a *AltTokenSource) token() (*oauth2.Token, error) { // NewAltTokenSource constructs a new alternate token source for generating tokens. func NewAltTokenSource(tokenURL, tokenBody string) oauth2.TokenSource { - client := oauth2.NewClient(oauth2.NoContext, google.ComputeTokenSource("")) + client := oauth2.NewClient(context.Background(), google.ComputeTokenSource("")) a := &AltTokenSource{ oauthClient: client, tokenURL: tokenURL, diff --git a/vendor/modules.txt b/vendor/modules.txt index 13b16b88bb426..3ff4030e1e3f9 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,18 +1,22 @@ -# cloud.google.com/go v0.34.0 +# cloud.google.com/go v0.38.0 cloud.google.com/go/compute/metadata # github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 github.com/Azure/go-ansiterm github.com/Azure/go-ansiterm/winterm -# github.com/Azure/go-autorest v11.1.2+incompatible +# github.com/Azure/go-autorest/autorest v0.9.0 github.com/Azure/go-autorest/autorest -github.com/Azure/go-autorest/autorest/adal github.com/Azure/go-autorest/autorest/azure +# github.com/Azure/go-autorest/autorest/adal v0.5.0 +github.com/Azure/go-autorest/autorest/adal +# github.com/Azure/go-autorest/autorest/date v0.1.0 github.com/Azure/go-autorest/autorest/date +# github.com/Azure/go-autorest/logger v0.1.0 github.com/Azure/go-autorest/logger -github.com/Azure/go-autorest/version +# github.com/Azure/go-autorest/tracing v0.5.0 +github.com/Azure/go-autorest/tracing # github.com/BurntSushi/toml v0.3.1 github.com/BurntSushi/toml -# github.com/GoogleCloudPlatform/k8s-cloud-provider v0.0.0-20181220005116-f8e995905100 +# github.com/GoogleCloudPlatform/k8s-cloud-provider v0.0.0-20190822182118-27a4ced34534 github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/filter github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta @@ -23,9 +27,10 @@ github.com/MakeNowJust/heredoc github.com/Masterminds/semver # github.com/Masterminds/sprig v2.17.1+incompatible github.com/Masterminds/sprig -# github.com/Microsoft/go-winio v0.4.11 +# github.com/Microsoft/go-winio v0.4.14 github.com/Microsoft/go-winio -# github.com/PuerkitoBio/purell v1.1.0 +github.com/Microsoft/go-winio/pkg/guid +# github.com/PuerkitoBio/purell v1.1.1 github.com/PuerkitoBio/purell # github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 github.com/PuerkitoBio/urlesc @@ -108,7 +113,7 @@ github.com/bazelbuild/bazel-gazelle/walk # github.com/bazelbuild/buildtools v0.0.0-20190731111112-f720930ceb60 github.com/bazelbuild/buildtools/build github.com/bazelbuild/buildtools/tables -# github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 +# github.com/beorn7/perks v1.0.0 github.com/beorn7/perks/quantile # github.com/blang/semver v3.5.0+incompatible github.com/blang/semver @@ -117,16 +122,16 @@ github.com/chai2010/gettext-go/gettext github.com/chai2010/gettext-go/gettext/mo github.com/chai2010/gettext-go/gettext/plural github.com/chai2010/gettext-go/gettext/po -# github.com/client9/misspell v0.0.0-20170928000206-9ce5d979ffda +# github.com/client9/misspell v0.3.4 github.com/client9/misspell github.com/client9/misspell/cmd/misspell -# github.com/coreos/etcd v3.3.13+incompatible +# github.com/coreos/etcd v3.3.17+incompatible github.com/coreos/etcd/client github.com/coreos/etcd/pkg/pathutil github.com/coreos/etcd/pkg/srv github.com/coreos/etcd/pkg/types github.com/coreos/etcd/version -# github.com/coreos/go-semver v0.2.0 +# github.com/coreos/go-semver v0.3.0 github.com/coreos/go-semver/semver # github.com/cpuguy83/go-md2man v1.0.10 github.com/cpuguy83/go-md2man/md2man @@ -141,11 +146,11 @@ github.com/denverdino/aliyungo/oss github.com/denverdino/aliyungo/ram github.com/denverdino/aliyungo/slb github.com/denverdino/aliyungo/util -# github.com/dgrijalva/jwt-go v0.0.0-20160705203006-01aeca54ebda +# github.com/dgrijalva/jwt-go v3.2.0+incompatible github.com/dgrijalva/jwt-go # github.com/digitalocean/godo v1.19.0 github.com/digitalocean/godo -# github.com/docker/distribution v0.0.0-20170726174610-edc3ab29cdff +# github.com/docker/distribution v2.7.1+incompatible github.com/docker/distribution/digestset github.com/docker/distribution/reference # github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0 @@ -165,7 +170,7 @@ github.com/docker/engine-api/types/registry github.com/docker/engine-api/types/strslice github.com/docker/engine-api/types/time github.com/docker/engine-api/types/versions -# github.com/docker/go-connections v0.3.0 +# github.com/docker/go-connections v0.4.0 github.com/docker/go-connections/nat github.com/docker/go-connections/sockets github.com/docker/go-connections/tlsconfig @@ -174,15 +179,13 @@ github.com/docker/go-units # github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c github.com/docker/spdystream github.com/docker/spdystream/spdy -# github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633 +# github.com/emicklei/go-restful v2.9.5+incompatible github.com/emicklei/go-restful github.com/emicklei/go-restful/log # github.com/evanphx/json-patch v4.5.0+incompatible github.com/evanphx/json-patch # github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d github.com/exponent-io/jsonpath -# github.com/fatih/camelcase v0.0.0-20160318181535-f6a740d52f96 -github.com/fatih/camelcase # github.com/fatih/color v1.7.0 github.com/fatih/color # github.com/fsnotify/fsnotify v1.4.7 @@ -195,17 +198,17 @@ github.com/ghodss/yaml github.com/go-ini/ini # github.com/go-logr/logr v0.1.0 github.com/go-logr/logr -# github.com/go-openapi/jsonpointer v0.19.0 +# github.com/go-openapi/jsonpointer v0.19.2 github.com/go-openapi/jsonpointer -# github.com/go-openapi/jsonreference v0.19.0 +# github.com/go-openapi/jsonreference v0.19.2 github.com/go-openapi/jsonreference -# github.com/go-openapi/spec v0.17.2 +# github.com/go-openapi/spec v0.19.2 github.com/go-openapi/spec -# github.com/go-openapi/swag v0.17.2 +# github.com/go-openapi/swag v0.19.2 github.com/go-openapi/swag # github.com/gobuffalo/flect v0.1.5 github.com/gobuffalo/flect -# github.com/gogo/protobuf v1.1.1 +# github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d github.com/gogo/protobuf/gogoproto github.com/gogo/protobuf/proto github.com/gogo/protobuf/protoc-gen-gogo/descriptor @@ -228,15 +231,15 @@ github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value # github.com/google/go-querystring v1.0.0 github.com/google/go-querystring/query -# github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf +# github.com/google/gofuzz v1.0.0 github.com/google/gofuzz -# github.com/google/uuid v1.1.0 +# github.com/google/uuid v1.1.1 github.com/google/uuid # github.com/googleapis/gnostic v0.3.1 github.com/googleapis/gnostic/OpenAPIv2 github.com/googleapis/gnostic/compiler github.com/googleapis/gnostic/extensions -# github.com/gophercloud/gophercloud v0.0.0-20190216224116-dcc6e84aef1b +# github.com/gophercloud/gophercloud v0.1.0 github.com/gophercloud/gophercloud github.com/gophercloud/gophercloud/internal github.com/gophercloud/gophercloud/openstack @@ -291,7 +294,7 @@ github.com/hashicorp/go-msgpack/codec github.com/hashicorp/go-multierror # github.com/hashicorp/go-sockaddr v1.0.2 github.com/hashicorp/go-sockaddr -# github.com/hashicorp/golang-lru v0.5.0 +# github.com/hashicorp/golang-lru v0.5.1 github.com/hashicorp/golang-lru github.com/hashicorp/golang-lru/simplelru # github.com/hashicorp/hcl v1.0.0 @@ -320,18 +323,18 @@ github.com/jacksontj/memberlistmesh/clusterpb github.com/jmespath/go-jmespath # github.com/jpillora/backoff v0.0.0-20170918002102-8eab2debe79d github.com/jpillora/backoff -# github.com/json-iterator/go v1.1.5 +# github.com/json-iterator/go v1.1.7 github.com/json-iterator/go # github.com/jteeuwen/go-bindata v0.0.0-20151023091102-a0ff2567cfb7 github.com/jteeuwen/go-bindata github.com/jteeuwen/go-bindata/go-bindata # github.com/konsorten/go-windows-terminal-sequences v1.0.1 github.com/konsorten/go-windows-terminal-sequences -# github.com/kr/fs v0.0.0-20131111012553-2788f0dbd169 +# github.com/kr/fs v0.1.0 github.com/kr/fs # github.com/magiconair/properties v1.8.0 github.com/magiconair/properties -# github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 +# github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 github.com/mailru/easyjson/buffer github.com/mailru/easyjson/jlexer github.com/mailru/easyjson/jwriter @@ -343,7 +346,7 @@ github.com/mattn/go-isatty github.com/matttproud/golang_protobuf_extensions/pbutil # github.com/miekg/coredns v0.0.0-20161111164017-20e25559d5ea github.com/miekg/coredns/middleware/etcd/msg -# github.com/miekg/dns v1.0.14 +# github.com/miekg/dns v1.1.4 github.com/miekg/dns # github.com/mitchellh/go-wordwrap v1.0.0 github.com/mitchellh/go-wordwrap @@ -355,7 +358,7 @@ github.com/modern-go/concurrent github.com/modern-go/reflect2 # github.com/oklog/ulid v1.3.1 github.com/oklog/ulid -# github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420 +# github.com/opencontainers/go-digest v1.0.0-rc1 github.com/opencontainers/go-digest # github.com/pborman/uuid v1.2.0 github.com/pborman/uuid @@ -369,28 +372,26 @@ github.com/pkg/errors github.com/pkg/sftp # github.com/pmezard/go-difflib v1.0.0 github.com/pmezard/go-difflib/difflib -# github.com/prometheus/client_golang v0.9.2 +# github.com/prometheus/client_golang v1.0.0 github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/internal github.com/prometheus/client_golang/prometheus/promhttp -# github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 +# github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 +# github.com/prometheus/common v0.4.1 github.com/prometheus/common/expfmt github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg github.com/prometheus/common/model -# github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a +# github.com/prometheus/procfs v0.0.2 github.com/prometheus/procfs -github.com/prometheus/procfs/internal/util -github.com/prometheus/procfs/nfs -github.com/prometheus/procfs/xfs +github.com/prometheus/procfs/internal/fs # github.com/russross/blackfriday v1.5.2 github.com/russross/blackfriday # github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 github.com/sean-/seed # github.com/sergi/go-diff v0.0.0-20161102184045-552b4e9bbdca github.com/sergi/go-diff/diffmatchpatch -# github.com/sirupsen/logrus v1.2.0 +# github.com/sirupsen/logrus v1.4.2 github.com/sirupsen/logrus # github.com/spf13/afero v1.2.2 github.com/spf13/afero @@ -402,7 +403,7 @@ github.com/spf13/cobra github.com/spf13/cobra/doc # github.com/spf13/jwalterweatherman v1.0.0 github.com/spf13/jwalterweatherman -# github.com/spf13/pflag v1.0.3 +# github.com/spf13/pflag v1.0.5 github.com/spf13/pflag # github.com/spf13/viper v1.3.2 github.com/spf13/viper @@ -446,6 +447,23 @@ github.com/vmware/govmomi/vim25/types github.com/vmware/govmomi/vim25/xml # github.com/weaveworks/mesh v0.0.0-20170419100114-1f158d31de55 github.com/weaveworks/mesh +# go.opencensus.io v0.21.0 +go.opencensus.io +go.opencensus.io/internal +go.opencensus.io/internal/tagencoding +go.opencensus.io/metric/metricdata +go.opencensus.io/metric/metricproducer +go.opencensus.io/plugin/ochttp +go.opencensus.io/plugin/ochttp/propagation/b3 +go.opencensus.io/resource +go.opencensus.io/stats +go.opencensus.io/stats/internal +go.opencensus.io/stats/view +go.opencensus.io/tag +go.opencensus.io/trace +go.opencensus.io/trace/internal +go.opencensus.io/trace/propagation +go.opencensus.io/trace/tracestate # go.uber.org/atomic v1.3.2 go.uber.org/atomic # go.uber.org/multierr v1.1.0 @@ -457,7 +475,7 @@ go.uber.org/zap/internal/bufferpool go.uber.org/zap/internal/color go.uber.org/zap/internal/exit go.uber.org/zap/zapcore -# golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529 +# golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 golang.org/x/crypto/curve25519 golang.org/x/crypto/ed25519 golang.org/x/crypto/ed25519/internal/edwards25519 @@ -471,7 +489,7 @@ golang.org/x/crypto/salsa20/salsa golang.org/x/crypto/scrypt golang.org/x/crypto/ssh golang.org/x/crypto/ssh/terminal -# golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc +# golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 golang.org/x/net/bpf golang.org/x/net/context golang.org/x/net/context/ctxhttp @@ -482,10 +500,12 @@ golang.org/x/net/idna golang.org/x/net/internal/iana golang.org/x/net/internal/socket golang.org/x/net/internal/socks +golang.org/x/net/internal/timeseries golang.org/x/net/ipv4 golang.org/x/net/ipv6 golang.org/x/net/proxy -# golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a +golang.org/x/net/trace +# golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 golang.org/x/oauth2 golang.org/x/oauth2/google golang.org/x/oauth2/internal @@ -493,11 +513,11 @@ golang.org/x/oauth2/jws golang.org/x/oauth2/jwt # golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e golang.org/x/sync/errgroup -# golang.org/x/sys v0.0.0-20190412213103-97732733099d +# golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f golang.org/x/sys/cpu golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db +# golang.org/x/text v0.3.2 golang.org/x/text/encoding golang.org/x/text/encoding/internal golang.org/x/text/encoding/internal/identifier @@ -509,7 +529,7 @@ golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm golang.org/x/text/width -# golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 +# golang.org/x/time v0.0.0-20181108054448-85acf8d2951c golang.org/x/time/rate # golang.org/x/tools v0.0.0-20191124021906-f5828fc9a103 golang.org/x/tools/cmd/goimports @@ -531,9 +551,12 @@ golang.org/x/tools/internal/imports golang.org/x/tools/internal/module golang.org/x/tools/internal/semver golang.org/x/tools/internal/span +# golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 +golang.org/x/xerrors +golang.org/x/xerrors/internal # gomodules.xyz/jsonpatch/v2 v2.0.1 gomodules.xyz/jsonpatch/v2 -# google.golang.org/api v0.0.0-20181220000619-583d854617af +# google.golang.org/api v0.6.1-0.20190607001116-5213b8090861 google.golang.org/api/compute/v0.alpha google.golang.org/api/compute/v0.beta google.golang.org/api/compute/v1 @@ -542,10 +565,15 @@ google.golang.org/api/dns/v1 google.golang.org/api/gensupport google.golang.org/api/googleapi google.golang.org/api/googleapi/internal/uritemplates +google.golang.org/api/googleapi/transport google.golang.org/api/iam/v1 +google.golang.org/api/internal google.golang.org/api/oauth2/v2 +google.golang.org/api/option google.golang.org/api/storage/v1 google.golang.org/api/tpu/v1 +google.golang.org/api/transport/http +google.golang.org/api/transport/http/internal/propagation # google.golang.org/appengine v1.5.0 google.golang.org/appengine google.golang.org/appengine/internal @@ -557,6 +585,42 @@ google.golang.org/appengine/internal/modules google.golang.org/appengine/internal/remote_api google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/urlfetch +# google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873 +google.golang.org/genproto/googleapis/rpc/status +# google.golang.org/grpc v1.23.0 +google.golang.org/grpc +google.golang.org/grpc/balancer +google.golang.org/grpc/balancer/base +google.golang.org/grpc/balancer/roundrobin +google.golang.org/grpc/binarylog/grpc_binarylog_v1 +google.golang.org/grpc/codes +google.golang.org/grpc/connectivity +google.golang.org/grpc/credentials +google.golang.org/grpc/credentials/internal +google.golang.org/grpc/encoding +google.golang.org/grpc/encoding/proto +google.golang.org/grpc/grpclog +google.golang.org/grpc/internal +google.golang.org/grpc/internal/backoff +google.golang.org/grpc/internal/balancerload +google.golang.org/grpc/internal/binarylog +google.golang.org/grpc/internal/channelz +google.golang.org/grpc/internal/envconfig +google.golang.org/grpc/internal/grpcrand +google.golang.org/grpc/internal/grpcsync +google.golang.org/grpc/internal/syscall +google.golang.org/grpc/internal/transport +google.golang.org/grpc/keepalive +google.golang.org/grpc/metadata +google.golang.org/grpc/naming +google.golang.org/grpc/peer +google.golang.org/grpc/resolver +google.golang.org/grpc/resolver/dns +google.golang.org/grpc/resolver/passthrough +google.golang.org/grpc/serviceconfig +google.golang.org/grpc/stats +google.golang.org/grpc/status +google.golang.org/grpc/tap # gopkg.in/fsnotify.v1 v1.4.7 gopkg.in/fsnotify.v1 # gopkg.in/gcfg.v1 v1.2.0 @@ -568,7 +632,7 @@ gopkg.in/gcfg.v1/types gopkg.in/inf.v0 # gopkg.in/warnings.v0 v0.1.1 gopkg.in/warnings.v0 -# gopkg.in/yaml.v2 v2.2.2 +# gopkg.in/yaml.v2 v2.2.4 gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.0-20190905181640-827449938966 gopkg.in/yaml.v3 @@ -598,8 +662,10 @@ honnef.co/go/tools/staticcheck/vrp honnef.co/go/tools/stylecheck honnef.co/go/tools/unused honnef.co/go/tools/version -# k8s.io/api v0.0.0 => k8s.io/api v0.0.0-20190819141258-3544db3b9e44 +# k8s.io/api v0.0.0 => k8s.io/api v0.0.0-20191114100352-16d7abae0d2a +k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 +k8s.io/api/admissionregistration/v1 k8s.io/api/admissionregistration/v1beta1 k8s.io/api/apps/v1 k8s.io/api/apps/v1beta1 @@ -619,6 +685,7 @@ k8s.io/api/certificates/v1beta1 k8s.io/api/coordination/v1 k8s.io/api/coordination/v1beta1 k8s.io/api/core/v1 +k8s.io/api/discovery/v1alpha1 k8s.io/api/events/v1beta1 k8s.io/api/extensions/v1beta1 k8s.io/api/imagepolicy/v1alpha1 @@ -637,10 +704,10 @@ k8s.io/api/settings/v1alpha1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apiextensions-apiserver v0.0.0 => k8s.io/apiextensions-apiserver v0.0.0-20190819143637-0dbe462fe92d +# k8s.io/apiextensions-apiserver v0.0.0-20190918161926-8f644eb6e783 => k8s.io/apiextensions-apiserver v0.0.0-20191114105449-027877536833 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1 -# k8s.io/apimachinery v0.0.0 => k8s.io/apimachinery v0.0.0-20190817020851-f2f3a405f61d +# k8s.io/apimachinery v0.0.0 => k8s.io/apimachinery v0.0.0-20191028221656-72ed19daf4bb k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors k8s.io/apimachinery/pkg/api/meta @@ -669,7 +736,6 @@ k8s.io/apimachinery/pkg/types k8s.io/apimachinery/pkg/util/cache k8s.io/apimachinery/pkg/util/clock k8s.io/apimachinery/pkg/util/diff -k8s.io/apimachinery/pkg/util/duration k8s.io/apimachinery/pkg/util/errors k8s.io/apimachinery/pkg/util/framer k8s.io/apimachinery/pkg/util/httpstream @@ -694,7 +760,7 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/cli-runtime v0.0.0 => k8s.io/cli-runtime v0.0.0-20190819144027-541433d7ce35 +# k8s.io/cli-runtime v0.0.0 => k8s.io/cli-runtime v0.0.0-20191114110141-0a35778df828 k8s.io/cli-runtime/pkg/genericclioptions k8s.io/cli-runtime/pkg/kustomize k8s.io/cli-runtime/pkg/kustomize/k8sdeps @@ -707,13 +773,14 @@ k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/patch k8s.io/cli-runtime/pkg/kustomize/k8sdeps/validator k8s.io/cli-runtime/pkg/printers k8s.io/cli-runtime/pkg/resource -# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => k8s.io/client-go v0.0.0-20190819141724-e14f31a72a77 +# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => k8s.io/client-go v0.0.0-20191114101535-6c5935290e33 k8s.io/client-go/discovery k8s.io/client-go/discovery/cached/disk k8s.io/client-go/discovery/fake k8s.io/client-go/dynamic k8s.io/client-go/informers k8s.io/client-go/informers/admissionregistration +k8s.io/client-go/informers/admissionregistration/v1 k8s.io/client-go/informers/admissionregistration/v1beta1 k8s.io/client-go/informers/apps k8s.io/client-go/informers/apps/v1 @@ -736,6 +803,8 @@ k8s.io/client-go/informers/coordination/v1 k8s.io/client-go/informers/coordination/v1beta1 k8s.io/client-go/informers/core k8s.io/client-go/informers/core/v1 +k8s.io/client-go/informers/discovery +k8s.io/client-go/informers/discovery/v1alpha1 k8s.io/client-go/informers/events k8s.io/client-go/informers/events/v1beta1 k8s.io/client-go/informers/extensions @@ -766,6 +835,8 @@ k8s.io/client-go/informers/storage/v1beta1 k8s.io/client-go/kubernetes k8s.io/client-go/kubernetes/fake k8s.io/client-go/kubernetes/scheme +k8s.io/client-go/kubernetes/typed/admissionregistration/v1 +k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1 k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake k8s.io/client-go/kubernetes/typed/apps/v1 @@ -804,6 +875,8 @@ k8s.io/client-go/kubernetes/typed/coordination/v1beta1 k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake k8s.io/client-go/kubernetes/typed/core/v1 k8s.io/client-go/kubernetes/typed/core/v1/fake +k8s.io/client-go/kubernetes/typed/discovery/v1alpha1 +k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/fake k8s.io/client-go/kubernetes/typed/events/v1beta1 k8s.io/client-go/kubernetes/typed/events/v1beta1/fake k8s.io/client-go/kubernetes/typed/extensions/v1beta1 @@ -838,6 +911,7 @@ k8s.io/client-go/kubernetes/typed/storage/v1alpha1 k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake k8s.io/client-go/kubernetes/typed/storage/v1beta1 k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake +k8s.io/client-go/listers/admissionregistration/v1 k8s.io/client-go/listers/admissionregistration/v1beta1 k8s.io/client-go/listers/apps/v1 k8s.io/client-go/listers/apps/v1beta1 @@ -853,6 +927,7 @@ k8s.io/client-go/listers/certificates/v1beta1 k8s.io/client-go/listers/coordination/v1 k8s.io/client-go/listers/coordination/v1beta1 k8s.io/client-go/listers/core/v1 +k8s.io/client-go/listers/discovery/v1alpha1 k8s.io/client-go/listers/events/v1beta1 k8s.io/client-go/listers/extensions/v1beta1 k8s.io/client-go/listers/networking/v1 @@ -918,55 +993,44 @@ k8s.io/client-go/util/jsonpath k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/workqueue -# k8s.io/cloud-provider v0.0.0 => k8s.io/cloud-provider v0.0.0-20190819145148-d91c85d212d5 +# k8s.io/cloud-provider v0.0.0-20191114112024-4bbba8331835 => k8s.io/cloud-provider v0.0.0-20191114112024-4bbba8331835 k8s.io/cloud-provider k8s.io/cloud-provider/node/helpers k8s.io/cloud-provider/service/helpers k8s.io/cloud-provider/volume k8s.io/cloud-provider/volume/errors k8s.io/cloud-provider/volume/helpers -# k8s.io/csi-translation-lib v0.0.0 => k8s.io/csi-translation-lib v0.0.0-20190819145328-4831a4ced492 +# k8s.io/component-base v0.0.0-20191114102325-35a9586014f7 => k8s.io/component-base v0.0.0-20191204084121-18d14e17701e +k8s.io/component-base/metrics +k8s.io/component-base/metrics/legacyregistry +k8s.io/component-base/version +# k8s.io/csi-translation-lib v0.0.0-20191114112310-0da609c4ca2d => k8s.io/csi-translation-lib v0.0.0-20191114112310-0da609c4ca2d k8s.io/csi-translation-lib/plugins # k8s.io/helm v2.9.0+incompatible k8s.io/helm/pkg/strvals -# k8s.io/klog v0.3.1 +# k8s.io/klog v1.0.0 k8s.io/klog k8s.io/klog/klogr -# k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30 +# k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf k8s.io/kube-openapi/pkg/common k8s.io/kube-openapi/pkg/util/proto k8s.io/kube-openapi/pkg/util/proto/validation -# k8s.io/kubernetes v1.15.3 => k8s.io/kubernetes v1.15.3 -k8s.io/kubernetes/pkg/client/metrics/prometheus -k8s.io/kubernetes/pkg/kubectl -k8s.io/kubernetes/pkg/kubectl/apps -k8s.io/kubernetes/pkg/kubectl/cmd/util -k8s.io/kubernetes/pkg/kubectl/cmd/util/editor -k8s.io/kubernetes/pkg/kubectl/cmd/util/editor/crlf -k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi -k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/validation -k8s.io/kubernetes/pkg/kubectl/describe -k8s.io/kubernetes/pkg/kubectl/describe/versioned -k8s.io/kubernetes/pkg/kubectl/generated -k8s.io/kubernetes/pkg/kubectl/scheme -k8s.io/kubernetes/pkg/kubectl/util -k8s.io/kubernetes/pkg/kubectl/util/certificate -k8s.io/kubernetes/pkg/kubectl/util/deployment -k8s.io/kubernetes/pkg/kubectl/util/event -k8s.io/kubernetes/pkg/kubectl/util/fieldpath -k8s.io/kubernetes/pkg/kubectl/util/i18n -k8s.io/kubernetes/pkg/kubectl/util/interrupt -k8s.io/kubernetes/pkg/kubectl/util/podutils -k8s.io/kubernetes/pkg/kubectl/util/qos -k8s.io/kubernetes/pkg/kubectl/util/rbac -k8s.io/kubernetes/pkg/kubectl/util/resource -k8s.io/kubernetes/pkg/kubectl/util/slice -k8s.io/kubernetes/pkg/kubectl/util/storage -k8s.io/kubernetes/pkg/kubectl/util/templates -k8s.io/kubernetes/pkg/kubectl/util/term -k8s.io/kubernetes/pkg/kubectl/validation -k8s.io/kubernetes/pkg/kubectl/version -# k8s.io/legacy-cloud-providers v0.0.0 => k8s.io/legacy-cloud-providers v0.0.0-20190819145509-592c9a46fd00 +# k8s.io/kubectl v0.0.0 => k8s.io/kubectl v0.0.0-20191114113550-6123e1c827f7 +k8s.io/kubectl/pkg/cmd/util +k8s.io/kubectl/pkg/cmd/util/editor +k8s.io/kubectl/pkg/cmd/util/editor/crlf +k8s.io/kubectl/pkg/generated +k8s.io/kubectl/pkg/scheme +k8s.io/kubectl/pkg/util +k8s.io/kubectl/pkg/util/i18n +k8s.io/kubectl/pkg/util/interrupt +k8s.io/kubectl/pkg/util/openapi +k8s.io/kubectl/pkg/util/openapi/validation +k8s.io/kubectl/pkg/util/templates +k8s.io/kubectl/pkg/util/term +k8s.io/kubectl/pkg/validation +k8s.io/kubectl/pkg/version +# k8s.io/legacy-cloud-providers v0.0.0 => k8s.io/legacy-cloud-providers v0.0.0-20191114112655-db9be3e678bb k8s.io/legacy-cloud-providers/aws k8s.io/legacy-cloud-providers/gce # k8s.io/utils v0.0.0-20191114200735-6ca3b61696b6 @@ -981,7 +1045,7 @@ k8s.io/utils/nsenter k8s.io/utils/path k8s.io/utils/pointer k8s.io/utils/trace -# sigs.k8s.io/controller-runtime v0.2.2 +# sigs.k8s.io/controller-runtime v0.4.0 sigs.k8s.io/controller-runtime sigs.k8s.io/controller-runtime/pkg/builder sigs.k8s.io/controller-runtime/pkg/cache @@ -994,10 +1058,10 @@ sigs.k8s.io/controller-runtime/pkg/controller/controllerutil sigs.k8s.io/controller-runtime/pkg/conversion sigs.k8s.io/controller-runtime/pkg/event sigs.k8s.io/controller-runtime/pkg/handler +sigs.k8s.io/controller-runtime/pkg/healthz sigs.k8s.io/controller-runtime/pkg/internal/controller sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics sigs.k8s.io/controller-runtime/pkg/internal/log -sigs.k8s.io/controller-runtime/pkg/internal/objectutil sigs.k8s.io/controller-runtime/pkg/internal/recorder sigs.k8s.io/controller-runtime/pkg/leaderelection sigs.k8s.io/controller-runtime/pkg/log diff --git a/vendor/sigs.k8s.io/controller-runtime/CONTRIBUTING.md b/vendor/sigs.k8s.io/controller-runtime/CONTRIBUTING.md index 063016c2b8430..98617fa75ea7c 100644 --- a/vendor/sigs.k8s.io/controller-runtime/CONTRIBUTING.md +++ b/vendor/sigs.k8s.io/controller-runtime/CONTRIBUTING.md @@ -18,8 +18,7 @@ Please see https://git.k8s.io/community/CLA.md for more info 1. Setup tools ```bash - $ go get -u github.com/golang/dep/cmd/dep - $ curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.15.0 + $ curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.17.1 ``` 1. Test ```bash diff --git a/vendor/sigs.k8s.io/controller-runtime/Gopkg.lock b/vendor/sigs.k8s.io/controller-runtime/Gopkg.lock deleted file mode 100644 index d3d1f1dc802c9..0000000000000 --- a/vendor/sigs.k8s.io/controller-runtime/Gopkg.lock +++ /dev/null @@ -1,988 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - digest = "1:5c3894b2aa4d6bead0ceeea6831b305d62879c871780e7b76296ded1b004bc57" - name = "cloud.google.com/go" - packages = ["compute/metadata"] - pruneopts = "UT" - revision = "64a2037ec6be8a4b0c1d1f706ed35b428b989239" - version = "v0.26.0" - -[[projects]] - digest = "1:327b9226c8ea5f1cd9952ba859bb7c335cab40fd8781c4a790ef259b0c5fbc40" - name = "github.com/Azure/go-autorest" - packages = [ - "autorest", - "autorest/adal", - "autorest/azure", - "autorest/date", - "logger", - "version", - ] - pruneopts = "UT" - revision = "39013ecb48eaf6ced3f4e3e1d95515140ce6b3cf" - version = "v10.15.2" - -[[projects]] - digest = "1:d1665c44bd5db19aaee18d1b6233c99b0b9a986e8bccb24ef54747547a48027f" - name = "github.com/PuerkitoBio/purell" - packages = ["."] - pruneopts = "UT" - revision = "0bcb03f4b4d0a9428594752bd2a3b9aa0a9d4bd4" - version = "v1.1.0" - -[[projects]] - branch = "master" - digest = "1:c739832d67eb1e9cc478a19cc1a1ccd78df0397bf8a32978b759152e205f644b" - name = "github.com/PuerkitoBio/urlesc" - packages = ["."] - pruneopts = "UT" - revision = "de5bf2ad457846296e2031421a34e2568e304e35" - -[[projects]] - branch = "master" - digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d" - name = "github.com/beorn7/perks" - packages = ["quantile"] - pruneopts = "UT" - revision = "3a771d992973f24aa725d07868b467d1ddfceafb" - -[[projects]] - digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" - name = "github.com/davecgh/go-spew" - packages = ["spew"] - pruneopts = "UT" - revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" - version = "v1.1.1" - -[[projects]] - digest = "1:76dc72490af7174349349838f2fe118996381b31ea83243812a97e5a0fd5ed55" - name = "github.com/dgrijalva/jwt-go" - packages = ["."] - pruneopts = "UT" - revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e" - version = "v3.2.0" - -[[projects]] - digest = "1:899234af23e5793c34e06fd397f86ba33af5307b959b6a7afd19b63db065a9d7" - name = "github.com/emicklei/go-restful" - packages = [ - ".", - "log", - ] - pruneopts = "UT" - revision = "3eb9738c1697594ea6e71a7156a9bb32ed216cf0" - version = "v2.8.0" - -[[projects]] - digest = "1:f1f2bd73c025d24c3b93abf6364bccb802cf2fdedaa44360804c67800e8fab8d" - name = "github.com/evanphx/json-patch" - packages = ["."] - pruneopts = "UT" - revision = "72bf35d0ff611848c1dc9df0f976c81192392fa5" - version = "v4.1.0" - -[[projects]] - branch = "master" - digest = "1:99f994f4c2f71aeea1810d6061f3b52f3d2ee0084cfcb1ebbf40e00a0d0666f6" - name = "github.com/go-logr/logr" - packages = [ - ".", - "testing", - ] - pruneopts = "UT" - revision = "9fb12b3b21c5415d16ac18dc5cd42c1cfdd40c4e" - -[[projects]] - digest = "1:ce43ad4015e7cdad3f0e8f2c8339439dd4470859a828d2a6988b0f713699e94a" - name = "github.com/go-logr/zapr" - packages = ["."] - pruneopts = "UT" - revision = "7536572e8d55209135cd5e7ccf7fce43dca217ab" - -[[projects]] - digest = "1:2997679181d901ac8aaf4330d11138ecf3974c6d3334995ff36f20cbd597daf8" - name = "github.com/go-openapi/jsonpointer" - packages = ["."] - pruneopts = "UT" - revision = "3a0015ad55fa9873f41605d3e8f28cd279c32ab2" - version = "0.16.0" - -[[projects]] - digest = "1:1ae3f233d75a731b164ca9feafd8ed646cbedf1784095876ed6988ce8aa88b1f" - name = "github.com/go-openapi/jsonreference" - packages = ["."] - pruneopts = "UT" - revision = "3fb327e6747da3043567ee86abd02bb6376b6be2" - version = "0.16.0" - -[[projects]] - digest = "1:3e5bdbd2a071c72c778c28fd7b5dfde95cdfbcef412f364377e031877205e418" - name = "github.com/go-openapi/spec" - packages = ["."] - pruneopts = "UT" - revision = "384415f06ee238aae1df5caad877de6ceac3a5c4" - version = "0.16.0" - -[[projects]] - digest = "1:c80984d4a9bb79539743aff5af91b595d84f513700150b0ed73c1697d1200d54" - name = "github.com/go-openapi/swag" - packages = ["."] - pruneopts = "UT" - revision = "becd2f08beafcca035645a8a101e0e3e18140458" - version = "0.16.0" - -[[projects]] - digest = "1:34e709f36fd4f868fb00dbaf8a6cab4c1ae685832d392874ba9d7c5dec2429d1" - name = "github.com/gogo/protobuf" - packages = [ - "proto", - "sortkeys", - ] - pruneopts = "UT" - revision = "636bf0302bc95575d69441b25a2603156ffdddf1" - version = "v1.1.1" - -[[projects]] - branch = "master" - digest = "1:3fb07f8e222402962fa190eb060608b34eddfb64562a18e2167df2de0ece85d8" - name = "github.com/golang/groupcache" - packages = ["lru"] - pruneopts = "UT" - revision = "24b0969c4cb722950103eed87108c8d291a8df00" - -[[projects]] - digest = "1:4c0989ca0bcd10799064318923b9bc2db6b4d6338dd75f3f2d86c3511aaaf5cf" - name = "github.com/golang/protobuf" - packages = [ - "proto", - "ptypes", - "ptypes/any", - "ptypes/duration", - "ptypes/timestamp", - ] - pruneopts = "UT" - revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5" - version = "v1.2.0" - -[[projects]] - branch = "master" - digest = "1:3ee90c0d94da31b442dde97c99635aaafec68d0b8a3c12ee2075c6bdabeec6bb" - name = "github.com/google/gofuzz" - packages = ["."] - pruneopts = "UT" - revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1" - -[[projects]] - digest = "1:65c4414eeb350c47b8de71110150d0ea8a281835b1f386eacaa3ad7325929c21" - name = "github.com/googleapis/gnostic" - packages = [ - "OpenAPIv2", - "compiler", - "extensions", - ] - pruneopts = "UT" - revision = "7c663266750e7d82587642f65e60bc4083f1f84e" - version = "v0.2.0" - -[[projects]] - branch = "master" - digest = "1:349090dc3e864fe83388be8e8949df6e22607c477644abcc3b530d1b2c7bb714" - name = "github.com/gophercloud/gophercloud" - packages = [ - ".", - "openstack", - "openstack/identity/v2/tenants", - "openstack/identity/v2/tokens", - "openstack/identity/v3/tokens", - "openstack/utils", - "pagination", - ] - pruneopts = "UT" - revision = "199db9a3a2bfa55802ab074ab8fed67fce2a2791" - -[[projects]] - branch = "master" - digest = "1:cf296baa185baae04a9a7004efee8511d08e2f5f51d4cbe5375da89722d681db" - name = "github.com/hashicorp/golang-lru" - packages = [ - ".", - "simplelru", - ] - pruneopts = "UT" - revision = "0fb14efe8c47ae851c0034ed7a448854d3d34cf3" - -[[projects]] - digest = "1:a1038ef593beb4771c8f0f9c26e8b00410acd800af5c6864651d9bf160ea1813" - name = "github.com/hpcloud/tail" - packages = [ - ".", - "ratelimiter", - "util", - "watch", - "winfile", - ] - pruneopts = "UT" - revision = "a30252cb686a21eb2d0b98132633053ec2f7f1e5" - version = "v1.0.0" - -[[projects]] - digest = "1:8eb1de8112c9924d59bf1d3e5c26f5eaa2bfc2a5fcbb92dc1c2e4546d695f277" - name = "github.com/imdario/mergo" - packages = ["."] - pruneopts = "UT" - revision = "9f23e2d6bd2a77f959b2bf6acdbefd708a83a4a4" - version = "v0.3.6" - -[[projects]] - digest = "1:3e551bbb3a7c0ab2a2bf4660e7fcad16db089fdcfbb44b0199e62838038623ea" - name = "github.com/json-iterator/go" - packages = ["."] - pruneopts = "UT" - revision = "1624edc4454b8682399def8740d46db5e4362ba4" - version = "1.1.5" - -[[projects]] - branch = "master" - digest = "1:84a5a2b67486d5d67060ac393aa255d05d24ed5ee41daecd5635ec22657b6492" - name = "github.com/mailru/easyjson" - packages = [ - "buffer", - "jlexer", - "jwriter", - ] - pruneopts = "UT" - revision = "60711f1a8329503b04e1c88535f419d0bb440bff" - -[[projects]] - digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc" - name = "github.com/matttproud/golang_protobuf_extensions" - packages = ["pbutil"] - pruneopts = "UT" - revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" - version = "v1.0.1" - -[[projects]] - digest = "1:33422d238f147d247752996a26574ac48dcf472976eda7f5134015f06bf16563" - name = "github.com/modern-go/concurrent" - packages = ["."] - pruneopts = "UT" - revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94" - version = "1.0.3" - -[[projects]] - digest = "1:e32bdbdb7c377a07a9a46378290059822efdce5c8d96fe71940d87cb4f918855" - name = "github.com/modern-go/reflect2" - packages = ["."] - pruneopts = "UT" - revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd" - version = "1.0.1" - -[[projects]] - digest = "1:42e29deef12327a69123b9cb2cb45fee4af5c12c2a23c6e477338279a052703f" - name = "github.com/onsi/ginkgo" - packages = [ - ".", - "config", - "internal/codelocation", - "internal/containernode", - "internal/failer", - "internal/leafnodes", - "internal/remote", - "internal/spec", - "internal/spec_iterator", - "internal/specrunner", - "internal/suite", - "internal/testingtproxy", - "internal/writer", - "reporters", - "reporters/stenographer", - "reporters/stenographer/support/go-colorable", - "reporters/stenographer/support/go-isatty", - "types", - ] - pruneopts = "UT" - revision = "3774a09d95489ccaa16032e0770d08ea77ba6184" - version = "v1.6.0" - -[[projects]] - digest = "1:58418daa018f162c520512737c19ecd42582524b0f900f2eee7c212ce55cf0da" - name = "github.com/onsi/gomega" - packages = [ - ".", - "format", - "gbytes", - "gexec", - "internal/assertion", - "internal/asyncassertion", - "internal/oraclematcher", - "internal/testingtsupport", - "matchers", - "matchers/support/goraph/bipartitegraph", - "matchers/support/goraph/edge", - "matchers/support/goraph/node", - "matchers/support/goraph/util", - "types", - ] - pruneopts = "UT" - revision = "b6ea1ea48f981d0f615a154a45eabb9dd466556d" - version = "v1.4.1" - -[[projects]] - digest = "1:361de06aa7ae272616cbe71c3994a654cc6316324e30998e650f7765b20c5b33" - name = "github.com/pborman/uuid" - packages = ["."] - pruneopts = "UT" - revision = "e790cca94e6cc75c7064b1332e63811d4aae1a53" - version = "v1.1" - -[[projects]] - digest = "1:20a8a18488bdef471795af0413d9a3c9c35dc24ca93342329b884ba3c15cbaab" - name = "github.com/prometheus/client_golang" - packages = [ - "prometheus", - "prometheus/internal", - "prometheus/promhttp", - ] - pruneopts = "UT" - revision = "1cafe34db7fdec6022e17e00e1c1ea501022f3e4" - version = "v0.9.0" - -[[projects]] - branch = "master" - digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4" - name = "github.com/prometheus/client_model" - packages = ["go"] - pruneopts = "UT" - revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f" - -[[projects]] - branch = "master" - digest = "1:63b68062b8968092eb86bedc4e68894bd096ea6b24920faca8b9dcf451f54bb5" - name = "github.com/prometheus/common" - packages = [ - "expfmt", - "internal/bitbucket.org/ww/goautoneg", - "model", - ] - pruneopts = "UT" - revision = "c7de2306084e37d54b8be01f3541a8464345e9a5" - -[[projects]] - branch = "master" - digest = "1:8c49953a1414305f2ff5465147ee576dd705487c35b15918fcd4efdc0cb7a290" - name = "github.com/prometheus/procfs" - packages = [ - ".", - "internal/util", - "nfs", - "xfs", - ] - pruneopts = "UT" - revision = "05ee40e3a273f7245e8777337fc7b46e533a9a92" - -[[projects]] - digest = "1:dab83a1bbc7ad3d7a6ba1a1cc1760f25ac38cdf7d96a5cdd55cd915a4f5ceaf9" - name = "github.com/spf13/pflag" - packages = ["."] - pruneopts = "UT" - revision = "9a97c102cda95a86cec2345a6f09f55a939babf5" - version = "v1.0.2" - -[[projects]] - digest = "1:3c1a69cdae3501bf75e76d0d86dc6f2b0a7421bc205c0cb7b96b19eed464a34d" - name = "go.uber.org/atomic" - packages = ["."] - pruneopts = "UT" - revision = "1ea20fb1cbb1cc08cbd0d913a96dead89aa18289" - version = "v1.3.2" - -[[projects]] - digest = "1:60bf2a5e347af463c42ed31a493d817f8a72f102543060ed992754e689805d1a" - name = "go.uber.org/multierr" - packages = ["."] - pruneopts = "UT" - revision = "3c4937480c32f4c13a875a1829af76c98ca3d40a" - version = "v1.1.0" - -[[projects]] - digest = "1:c52caf7bd44f92e54627a31b85baf06a68333a196b3d8d241480a774733dcf8b" - name = "go.uber.org/zap" - packages = [ - ".", - "buffer", - "internal/bufferpool", - "internal/color", - "internal/exit", - "zapcore", - ] - pruneopts = "UT" - revision = "ff33455a0e382e8a81d14dd7c922020b6b5e7982" - version = "v1.9.1" - -[[projects]] - branch = "master" - digest = "1:3f3a05ae0b95893d90b9b3b5afdb79a9b3d96e4e36e099d841ae602e4aca0da8" - name = "golang.org/x/crypto" - packages = ["ssh/terminal"] - pruneopts = "UT" - revision = "614d502a4dac94afa3a6ce146bd1736da82514c6" - -[[projects]] - branch = "master" - digest = "1:ff58bc124488348ba2ca02f62de899d07ba3e7dce51da02557bfc9a8052608db" - name = "golang.org/x/net" - packages = [ - "context", - "context/ctxhttp", - "html", - "html/atom", - "html/charset", - "http/httpguts", - "http2", - "http2/hpack", - "idna", - ] - pruneopts = "UT" - revision = "922f4815f713f213882e8ef45e0d315b164d705c" - -[[projects]] - branch = "master" - digest = "1:f645667d687fc8bf228865a2c5455824ef05bad08841e673673ef2bb89ac5b90" - name = "golang.org/x/oauth2" - packages = [ - ".", - "google", - "internal", - "jws", - "jwt", - ] - pruneopts = "UT" - revision = "d2e6202438beef2727060aa7cabdd924d92ebfd9" - -[[projects]] - branch = "master" - digest = "1:7710ea76bd73d3154e86eaf2db8b36a6dffbd6114e4e37b516f1d232c03ddd8d" - name = "golang.org/x/sys" - packages = [ - "unix", - "windows", - ] - pruneopts = "UT" - revision = "11551d06cbcc94edc80a0facaccbda56473c19c1" - -[[projects]] - digest = "1:bb8277a2ca2bcad6ff7f413b939375924099be908cedd1314baa21ecd08df477" - name = "golang.org/x/text" - packages = [ - "collate", - "collate/build", - "encoding", - "encoding/charmap", - "encoding/htmlindex", - "encoding/internal", - "encoding/internal/identifier", - "encoding/japanese", - "encoding/korean", - "encoding/simplifiedchinese", - "encoding/traditionalchinese", - "encoding/unicode", - "internal/colltab", - "internal/gen", - "internal/tag", - "internal/triegen", - "internal/ucd", - "internal/utf8internal", - "language", - "runes", - "secure/bidirule", - "transform", - "unicode/bidi", - "unicode/cldr", - "unicode/norm", - "unicode/rangetable", - "width", - ] - pruneopts = "UT" - revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" - version = "v0.3.0" - -[[projects]] - branch = "master" - digest = "1:c9e7a4b4d47c0ed205d257648b0e5b0440880cb728506e318f8ac7cd36270bc4" - name = "golang.org/x/time" - packages = ["rate"] - pruneopts = "UT" - revision = "fbb02b2291d28baffd63558aa44b4b56f178d650" - -[[projects]] - digest = "1:7d4fd782f2a710d08834b2c01742b3bba7fb0248383f9cc6c3dc95b3025689d7" - name = "gomodules.xyz/jsonpatch" - packages = ["v2"] - pruneopts = "UT" - revision = "e8422f09d27ee2c8cfb2c7f8089eb9eeb0764849" - version = "v2.0.0" - -[[projects]] - digest = "1:c8907869850adaa8bd7631887948d0684f3787d0912f1c01ab72581a6c34432e" - name = "google.golang.org/appengine" - packages = [ - ".", - "internal", - "internal/app_identity", - "internal/base", - "internal/datastore", - "internal/log", - "internal/modules", - "internal/remote_api", - "internal/urlfetch", - "urlfetch", - ] - pruneopts = "UT" - revision = "b1f26356af11148e710935ed1ac8a7f5702c7612" - version = "v1.1.0" - -[[projects]] - digest = "1:abeb38ade3f32a92943e5be54f55ed6d6e3b6602761d74b4aab4c9dd45c18abd" - name = "gopkg.in/fsnotify.v1" - packages = ["."] - pruneopts = "UT" - revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9" - source = "https://github.com/fsnotify/fsnotify.git" - version = "v1.4.7" - -[[projects]] - digest = "1:2d1fbdc6777e5408cabeb02bf336305e724b925ff4546ded0fa8715a7267922a" - name = "gopkg.in/inf.v0" - packages = ["."] - pruneopts = "UT" - revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf" - version = "v0.9.1" - -[[projects]] - branch = "v1" - digest = "1:0caa92e17bc0b65a98c63e5bc76a9e844cd5e56493f8fdbb28fad101a16254d9" - name = "gopkg.in/tomb.v1" - packages = ["."] - pruneopts = "UT" - revision = "dd632973f1e7218eb1089048e0798ec9ae7dceb8" - -[[projects]] - digest = "1:342378ac4dcb378a5448dd723f0784ae519383532f5e70ade24132c4c8693202" - name = "gopkg.in/yaml.v2" - packages = ["."] - pruneopts = "UT" - revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183" - version = "v2.2.1" - -[[projects]] - digest = "1:baf5a7da864b8f493ff372a9486c2d6a7fae8363699af2dc0ef6dba9a869928f" - name = "k8s.io/api" - packages = [ - "admission/v1beta1", - "admissionregistration/v1beta1", - "apps/v1", - "apps/v1beta1", - "apps/v1beta2", - "auditregistration/v1alpha1", - "authentication/v1", - "authentication/v1beta1", - "authorization/v1", - "authorization/v1beta1", - "autoscaling/v1", - "autoscaling/v2beta1", - "autoscaling/v2beta2", - "batch/v1", - "batch/v1beta1", - "batch/v2alpha1", - "certificates/v1beta1", - "coordination/v1", - "coordination/v1beta1", - "core/v1", - "events/v1beta1", - "extensions/v1beta1", - "networking/v1", - "networking/v1beta1", - "node/v1alpha1", - "node/v1beta1", - "policy/v1beta1", - "rbac/v1", - "rbac/v1alpha1", - "rbac/v1beta1", - "scheduling/v1", - "scheduling/v1alpha1", - "scheduling/v1beta1", - "settings/v1alpha1", - "storage/v1", - "storage/v1alpha1", - "storage/v1beta1", - ] - pruneopts = "UT" - revision = "6e4e0e4f393bf5e8bbff570acd13217aa5a770cd" - version = "kubernetes-1.14.1" - -[[projects]] - digest = "1:65226935ef0a84716e327aa46c5a11cf2158ca6d0c79b379202d0b2c2be0e9bc" - name = "k8s.io/apiextensions-apiserver" - packages = [ - "pkg/apis/apiextensions", - "pkg/apis/apiextensions/v1beta1", - "pkg/client/clientset/clientset", - "pkg/client/clientset/clientset/scheme", - "pkg/client/clientset/clientset/typed/apiextensions/v1beta1", - ] - pruneopts = "UT" - revision = "727a075fdec8319bf095330e344b3ccc668abc73" - version = "kubernetes-1.14.1" - -[[projects]] - digest = "1:2616411ee46c05b1488f127c171e9204695d15d1e654fe79ea2d204f96f28070" - name = "k8s.io/apimachinery" - packages = [ - "pkg/api/errors", - "pkg/api/meta", - "pkg/api/resource", - "pkg/apis/meta/internalversion", - "pkg/apis/meta/v1", - "pkg/apis/meta/v1/unstructured", - "pkg/apis/meta/v1beta1", - "pkg/conversion", - "pkg/conversion/queryparams", - "pkg/fields", - "pkg/labels", - "pkg/runtime", - "pkg/runtime/schema", - "pkg/runtime/serializer", - "pkg/runtime/serializer/json", - "pkg/runtime/serializer/protobuf", - "pkg/runtime/serializer/recognizer", - "pkg/runtime/serializer/streaming", - "pkg/runtime/serializer/versioning", - "pkg/selection", - "pkg/types", - "pkg/util/cache", - "pkg/util/clock", - "pkg/util/diff", - "pkg/util/errors", - "pkg/util/framer", - "pkg/util/intstr", - "pkg/util/json", - "pkg/util/mergepatch", - "pkg/util/naming", - "pkg/util/net", - "pkg/util/runtime", - "pkg/util/sets", - "pkg/util/strategicpatch", - "pkg/util/uuid", - "pkg/util/validation", - "pkg/util/validation/field", - "pkg/util/wait", - "pkg/util/yaml", - "pkg/version", - "pkg/watch", - "third_party/forked/golang/json", - "third_party/forked/golang/reflect", - ] - pruneopts = "UT" - revision = "6a84e37a896db9780c75367af8d2ed2bb944022e" - version = "kubernetes-1.14.1" - -[[projects]] - digest = "1:00f288473b016529887e152dad2465482cad64b233298d58f1526385505b27db" - name = "k8s.io/client-go" - packages = [ - "discovery", - "dynamic", - "informers", - "informers/admissionregistration", - "informers/admissionregistration/v1beta1", - "informers/apps", - "informers/apps/v1", - "informers/apps/v1beta1", - "informers/apps/v1beta2", - "informers/auditregistration", - "informers/auditregistration/v1alpha1", - "informers/autoscaling", - "informers/autoscaling/v1", - "informers/autoscaling/v2beta1", - "informers/autoscaling/v2beta2", - "informers/batch", - "informers/batch/v1", - "informers/batch/v1beta1", - "informers/batch/v2alpha1", - "informers/certificates", - "informers/certificates/v1beta1", - "informers/coordination", - "informers/coordination/v1", - "informers/coordination/v1beta1", - "informers/core", - "informers/core/v1", - "informers/events", - "informers/events/v1beta1", - "informers/extensions", - "informers/extensions/v1beta1", - "informers/internalinterfaces", - "informers/networking", - "informers/networking/v1", - "informers/networking/v1beta1", - "informers/node", - "informers/node/v1alpha1", - "informers/node/v1beta1", - "informers/policy", - "informers/policy/v1beta1", - "informers/rbac", - "informers/rbac/v1", - "informers/rbac/v1alpha1", - "informers/rbac/v1beta1", - "informers/scheduling", - "informers/scheduling/v1", - "informers/scheduling/v1alpha1", - "informers/scheduling/v1beta1", - "informers/settings", - "informers/settings/v1alpha1", - "informers/storage", - "informers/storage/v1", - "informers/storage/v1alpha1", - "informers/storage/v1beta1", - "kubernetes", - "kubernetes/scheme", - "kubernetes/typed/admissionregistration/v1beta1", - "kubernetes/typed/apps/v1", - "kubernetes/typed/apps/v1beta1", - "kubernetes/typed/apps/v1beta2", - "kubernetes/typed/auditregistration/v1alpha1", - "kubernetes/typed/authentication/v1", - "kubernetes/typed/authentication/v1beta1", - "kubernetes/typed/authorization/v1", - "kubernetes/typed/authorization/v1beta1", - "kubernetes/typed/autoscaling/v1", - "kubernetes/typed/autoscaling/v2beta1", - "kubernetes/typed/autoscaling/v2beta2", - "kubernetes/typed/batch/v1", - "kubernetes/typed/batch/v1beta1", - "kubernetes/typed/batch/v2alpha1", - "kubernetes/typed/certificates/v1beta1", - "kubernetes/typed/coordination/v1", - "kubernetes/typed/coordination/v1beta1", - "kubernetes/typed/core/v1", - "kubernetes/typed/events/v1beta1", - "kubernetes/typed/extensions/v1beta1", - "kubernetes/typed/networking/v1", - "kubernetes/typed/networking/v1beta1", - "kubernetes/typed/node/v1alpha1", - "kubernetes/typed/node/v1beta1", - "kubernetes/typed/policy/v1beta1", - "kubernetes/typed/rbac/v1", - "kubernetes/typed/rbac/v1alpha1", - "kubernetes/typed/rbac/v1beta1", - "kubernetes/typed/scheduling/v1", - "kubernetes/typed/scheduling/v1alpha1", - "kubernetes/typed/scheduling/v1beta1", - "kubernetes/typed/settings/v1alpha1", - "kubernetes/typed/storage/v1", - "kubernetes/typed/storage/v1alpha1", - "kubernetes/typed/storage/v1beta1", - "listers/admissionregistration/v1beta1", - "listers/apps/v1", - "listers/apps/v1beta1", - "listers/apps/v1beta2", - "listers/auditregistration/v1alpha1", - "listers/autoscaling/v1", - "listers/autoscaling/v2beta1", - "listers/autoscaling/v2beta2", - "listers/batch/v1", - "listers/batch/v1beta1", - "listers/batch/v2alpha1", - "listers/certificates/v1beta1", - "listers/coordination/v1", - "listers/coordination/v1beta1", - "listers/core/v1", - "listers/events/v1beta1", - "listers/extensions/v1beta1", - "listers/networking/v1", - "listers/networking/v1beta1", - "listers/node/v1alpha1", - "listers/node/v1beta1", - "listers/policy/v1beta1", - "listers/rbac/v1", - "listers/rbac/v1alpha1", - "listers/rbac/v1beta1", - "listers/scheduling/v1", - "listers/scheduling/v1alpha1", - "listers/scheduling/v1beta1", - "listers/settings/v1alpha1", - "listers/storage/v1", - "listers/storage/v1alpha1", - "listers/storage/v1beta1", - "pkg/apis/clientauthentication", - "pkg/apis/clientauthentication/v1alpha1", - "pkg/apis/clientauthentication/v1beta1", - "pkg/version", - "plugin/pkg/client/auth", - "plugin/pkg/client/auth/azure", - "plugin/pkg/client/auth/exec", - "plugin/pkg/client/auth/gcp", - "plugin/pkg/client/auth/oidc", - "plugin/pkg/client/auth/openstack", - "rest", - "rest/watch", - "restmapper", - "testing", - "third_party/forked/golang/template", - "tools/auth", - "tools/cache", - "tools/clientcmd", - "tools/clientcmd/api", - "tools/clientcmd/api/latest", - "tools/clientcmd/api/v1", - "tools/leaderelection", - "tools/leaderelection/resourcelock", - "tools/metrics", - "tools/pager", - "tools/record", - "tools/record/util", - "tools/reference", - "transport", - "util/cert", - "util/connrotation", - "util/flowcontrol", - "util/homedir", - "util/jsonpath", - "util/keyutil", - "util/retry", - "util/workqueue", - ] - pruneopts = "UT" - revision = "1a26190bd76a9017e289958b9fba936430aa3704" - version = "kubernetes-1.14.1" - -[[projects]] - digest = "1:e2999bf1bb6eddc2a6aa03fe5e6629120a53088926520ca3b4765f77d7ff7eab" - name = "k8s.io/klog" - packages = ["."] - pruneopts = "UT" - revision = "a5bc97fbc634d635061f3146511332c7e313a55a" - version = "v0.1.0" - -[[projects]] - branch = "master" - digest = "1:0dac10b488f2671c15738b662478d665b0c0ec7cae3362669dec172147331ce5" - name = "k8s.io/kube-openapi" - packages = [ - "pkg/common", - "pkg/util/proto", - ] - pruneopts = "UT" - revision = "e3762e86a74c878ffed47484592986685639c2cd" - -[[projects]] - branch = "master" - digest = "1:ee6063c66a2b3be97487a8df6f359cd5f38b3a0d1fc2442b49f214f24622d321" - name = "k8s.io/utils" - packages = [ - "buffer", - "integer", - "pointer", - "trace", - ] - pruneopts = "UT" - revision = "21c4ce38f2a793ec01e925ddc31216500183b773" - -[[projects]] - digest = "1:9070222ca967d09b3966552a161dd4420d62315964bf5e1efd8cc4c7c30ebca8" - name = "sigs.k8s.io/testing_frameworks" - packages = [ - "integration", - "integration/addr", - "integration/internal", - ] - pruneopts = "UT" - revision = "d348cb12705b516376e0c323bacca72b00a78425" - version = "v0.1.1" - -[[projects]] - digest = "1:7719608fe0b52a4ece56c2dde37bedd95b938677d1ab0f84b8a7852e4c59f849" - name = "sigs.k8s.io/yaml" - packages = ["."] - pruneopts = "UT" - revision = "fd68e9863619f6ec2fdd8625fe1f02e7c877e480" - version = "v1.1.0" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - input-imports = [ - "github.com/emicklei/go-restful", - "github.com/evanphx/json-patch", - "github.com/go-logr/logr", - "github.com/go-logr/logr/testing", - "github.com/go-logr/zapr", - "github.com/go-openapi/spec", - "github.com/onsi/ginkgo", - "github.com/onsi/ginkgo/config", - "github.com/onsi/ginkgo/types", - "github.com/onsi/gomega", - "github.com/prometheus/client_golang/prometheus", - "github.com/prometheus/client_golang/prometheus/promhttp", - "github.com/prometheus/client_model/go", - "github.com/spf13/pflag", - "go.uber.org/zap", - "go.uber.org/zap/buffer", - "go.uber.org/zap/zapcore", - "gomodules.xyz/jsonpatch/v2", - "gopkg.in/fsnotify.v1", - "k8s.io/api/admission/v1beta1", - "k8s.io/api/apps/v1", - "k8s.io/api/apps/v1beta1", - "k8s.io/api/autoscaling/v1", - "k8s.io/api/core/v1", - "k8s.io/api/extensions/v1beta1", - "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1", - "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset", - "k8s.io/apimachinery/pkg/api/errors", - "k8s.io/apimachinery/pkg/api/meta", - "k8s.io/apimachinery/pkg/apis/meta/v1", - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured", - "k8s.io/apimachinery/pkg/fields", - "k8s.io/apimachinery/pkg/labels", - "k8s.io/apimachinery/pkg/runtime", - "k8s.io/apimachinery/pkg/runtime/schema", - "k8s.io/apimachinery/pkg/runtime/serializer", - "k8s.io/apimachinery/pkg/selection", - "k8s.io/apimachinery/pkg/types", - "k8s.io/apimachinery/pkg/util/json", - "k8s.io/apimachinery/pkg/util/runtime", - "k8s.io/apimachinery/pkg/util/sets", - "k8s.io/apimachinery/pkg/util/uuid", - "k8s.io/apimachinery/pkg/util/wait", - "k8s.io/apimachinery/pkg/util/yaml", - "k8s.io/apimachinery/pkg/watch", - "k8s.io/client-go/discovery", - "k8s.io/client-go/dynamic", - "k8s.io/client-go/informers", - "k8s.io/client-go/kubernetes", - "k8s.io/client-go/kubernetes/scheme", - "k8s.io/client-go/kubernetes/typed/core/v1", - "k8s.io/client-go/plugin/pkg/client/auth", - "k8s.io/client-go/plugin/pkg/client/auth/gcp", - "k8s.io/client-go/rest", - "k8s.io/client-go/restmapper", - "k8s.io/client-go/testing", - "k8s.io/client-go/tools/cache", - "k8s.io/client-go/tools/clientcmd", - "k8s.io/client-go/tools/clientcmd/api", - "k8s.io/client-go/tools/leaderelection", - "k8s.io/client-go/tools/leaderelection/resourcelock", - "k8s.io/client-go/tools/metrics", - "k8s.io/client-go/tools/record", - "k8s.io/client-go/tools/reference", - "k8s.io/client-go/util/workqueue", - "k8s.io/kube-openapi/pkg/common", - "k8s.io/utils/pointer", - "sigs.k8s.io/testing_frameworks/integration", - "sigs.k8s.io/testing_frameworks/integration/addr", - "sigs.k8s.io/yaml", - ] - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/vendor/sigs.k8s.io/controller-runtime/Gopkg.toml b/vendor/sigs.k8s.io/controller-runtime/Gopkg.toml deleted file mode 100644 index 81598f106bbe0..0000000000000 --- a/vendor/sigs.k8s.io/controller-runtime/Gopkg.toml +++ /dev/null @@ -1,88 +0,0 @@ -# Copyright 2018 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Packages required by users -# NB(directxman12): be very careful how you word these -- -# dep considers them as a dependency on the package you list -# meaning that if there's a main.go in the root package -# (like in apiextensions-apiserver), you'll pull it's deps in. -required = ["sigs.k8s.io/testing_frameworks/integration", - "k8s.io/client-go/plugin/pkg/client/auth", - "github.com/spf13/pflag", - "github.com/emicklei/go-restful", - "github.com/go-openapi/spec", - "k8s.io/kube-openapi/pkg/common", - "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1", - "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset", - "github.com/prometheus/client_golang/prometheus", - ] - -[[constraint]] - name = "k8s.io/api" - version = "kubernetes-1.14.1" - -[[constraint]] - name = "k8s.io/apiextensions-apiserver" - version = "kubernetes-1.14.1" - -[[constraint]] - name = "k8s.io/apimachinery" - version = "kubernetes-1.14.1" - -[[constraint]] - name = "k8s.io/client-go" - version = "kubernetes-1.14.1" - -[[constraint]] - name = "sigs.k8s.io/testing_frameworks" - version = "v0.1.1" - -[[constraint]] - name = "github.com/onsi/ginkgo" - version = "v1.5.0" - -[[constraint]] - name = "github.com/onsi/gomega" - version = "v1.4.0" - -[[constraint]] - name = "sigs.k8s.io/yaml" - version = "1.1.0" - -[[constraint]] - name = "go.uber.org/zap" - version = "1.8.0" - -[[constraint]] - name = "github.com/prometheus/client_golang" - version = "0.9.0" - -# these are not listed explicitly until we get version tags, -# since dep doesn't like bare revision dependencies - -# [[constraint]] -# name = "github.com/go-logr/logr" -# -# [[constraint]] -# name = "github.com/go-logr/zapr" - -# For dependency below: Refer to issue https://github.com/golang/dep/issues/1799 -[[override]] -name = "gopkg.in/fsnotify.v1" -source = "https://github.com/fsnotify/fsnotify.git" -version="v1.4.7" - -[prune] - go-tests = true - unused-packages = true diff --git a/vendor/sigs.k8s.io/controller-runtime/VERSIONING.md b/vendor/sigs.k8s.io/controller-runtime/VERSIONING.md index 55fb5d7ff482d..3638aae60e922 100644 --- a/vendor/sigs.k8s.io/controller-runtime/VERSIONING.md +++ b/vendor/sigs.k8s.io/controller-runtime/VERSIONING.md @@ -75,10 +75,10 @@ filters and checks for changes tagged as breaking (see below). ### Tooling -* [release-notes.sh](hack/release-notes.sh): generate release notes +* [release-notes.sh](hack/release/release-notes.sh): generate release notes for a range of commits, and check for next version type (***TODO***) -* [verify-commit-messages.sh](hack/verify-commit-messages.sh): check that +* [verify-emoji.sh](hack/release/verify-emoji.sh): check that your PR and/or commit messages have the right versioning icon (***TODO***). diff --git a/vendor/sigs.k8s.io/controller-runtime/go.mod b/vendor/sigs.k8s.io/controller-runtime/go.mod index 7cec2234d6729..e31d7a58ce5ea 100644 --- a/vendor/sigs.k8s.io/controller-runtime/go.mod +++ b/vendor/sigs.k8s.io/controller-runtime/go.mod @@ -1,50 +1,31 @@ module sigs.k8s.io/controller-runtime -go 1.11 +go 1.13 require ( - cloud.google.com/go v0.26.0 // indirect - github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 // indirect github.com/evanphx/json-patch v4.5.0+incompatible github.com/go-logr/logr v0.1.0 github.com/go-logr/zapr v0.1.0 - github.com/gogo/protobuf v1.1.1 // indirect github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7 // indirect - github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf // indirect github.com/googleapis/gnostic v0.3.1 // indirect - github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47 // indirect github.com/imdario/mergo v0.3.6 // indirect - github.com/json-iterator/go v1.1.5 // indirect - github.com/kr/pretty v0.1.0 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect - github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 // indirect - github.com/onsi/ginkgo v1.6.0 - github.com/onsi/gomega v1.4.2 - github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c // indirect - github.com/prometheus/client_golang v0.9.0 + github.com/onsi/ginkgo v1.8.0 + github.com/onsi/gomega v1.5.0 + github.com/prometheus/client_golang v0.9.2 github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 - github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e // indirect - github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273 // indirect - github.com/spf13/pflag v1.0.2 // indirect go.uber.org/atomic v1.3.2 // indirect go.uber.org/multierr v1.1.0 // indirect go.uber.org/zap v1.9.1 - golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac // indirect - golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be // indirect - golang.org/x/sync v0.0.0-20190423024810-112230192c58 // indirect - golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 // indirect + golang.org/x/time v0.0.0-20181108054448-85acf8d2951c + golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 gomodules.xyz/jsonpatch/v2 v2.0.1 - google.golang.org/appengine v1.1.0 // indirect - gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect gopkg.in/fsnotify.v1 v1.4.7 gopkg.in/inf.v0 v0.9.1 // indirect - k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b - k8s.io/apiextensions-apiserver v0.0.0-20190409022649-727a075fdec8 - k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d - k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible - k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c // indirect - k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5 // indirect - sigs.k8s.io/testing_frameworks v0.1.1 + k8s.io/api v0.0.0-20190918155943-95b840bb6a1f + k8s.io/apiextensions-apiserver v0.0.0-20190918161926-8f644eb6e783 + k8s.io/apimachinery v0.0.0-20190913080033-27d36303b655 + k8s.io/client-go v0.0.0-20190918160344-1fbdaa4c8d90 + k8s.io/utils v0.0.0-20190801114015-581e00157fb1 + sigs.k8s.io/testing_frameworks v0.1.2 sigs.k8s.io/yaml v1.1.0 ) diff --git a/vendor/sigs.k8s.io/controller-runtime/go.sum b/vendor/sigs.k8s.io/controller-runtime/go.sum index c45ebc20bb74a..8f6a90aac65ad 100644 --- a/vendor/sigs.k8s.io/controller-runtime/go.sum +++ b/vendor/sigs.k8s.io/controller-runtime/go.sum @@ -1,129 +1,387 @@ -cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0 h1:eOI3/cP2VTU6uZLDYAoic+eyzzB9YyGmJ7eIjl8rOPg= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0 h1:ROfEUZz+Gh5pa62DJWXSaonyu3StP6EA6lPEXPI6mCo= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/coreos/bbolt v1.3.1-coreos.6/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.15+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M= github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/zapr v0.1.0 h1:h+WVe9j6HAA01niTJPA/kKH0i7e0rLZBCwauQFcRE54= github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= -github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= +github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= +github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= +github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= +github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= +github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7 h1:u4bArs140e9+AfE52mFHOXVFnOSBJBRlzTHrOPLOIhE= github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.0.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck= -github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= -github.com/googleapis/gnostic v0.2.0 h1:l6N3VoaVzTncYYW+9yOz2LJJammFZGBO13sqgEhpy9g= -github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.3.1 h1:WeAefnSUHlBb0iJKwxFDZdbfGwkd7xRNuV+IpXMJhYk= github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= -github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47 h1:UnszMmmmm5vLwWzDjTFVIkfhvWF1NdrmChl8L2NUDCw= -github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v0.0.0-20190222133341-cfaf5686ec79/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.3.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/json-iterator/go v1.1.5 h1:gL2yXlmiIo4+t+y32d4WGwOjKGYcGOuyrg46vadswDE= -github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg= +github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.4.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v1.4.2 h1:3mYCb7aPxS/RU7TI1y4rkEn1oKmPRjNJLNEXgw7MH2I= -github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c h1:MUyE44mTvnI5A0xrxIxaMqoWFzPfQvtE2IWUollMDMs= -github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34= +github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.3.0/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.9.0 h1:tXuTFVHC03mW0D+Ua1Q2d1EAVqLTuggX50V0VLICCzY= -github.com/prometheus/client_golang v0.9.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/prometheus/client_golang v0.9.2 h1:awm861/B8OKDd2I/6o1dy3ra4BamzKhYOiGItCeZ740= +github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e h1:n/3MEhJQjQxrOUCzh1Y3Re6aJUUWRp2M9+Oc3eVn/54= -github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273 h1:agujYaXJSxSo18YNX3jzl+4G6Bstwt+kqv47GS12uL0= -github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 h1:PnBWHBf+6L0jOqq0gIVUe6Yk0/QMZ640k6NvkxcBf+8= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a h1:9a8MnZMP0X2nLJdBg+pBmGgkJlSaKC2KaQmTCk1XDtE= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/soheilhy/cmux v0.1.3/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/pflag v1.0.2 h1:Fy0orTDgHdbnzHcsOgfCN4LtHf0ec3wwtiwJqwvf3Gc= -github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.uber.org/atomic v0.0.0-20181018215023-8dc6146f7569/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v0.0.0-20180122172545-ddea229ff1df/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v0.0.0-20180814183419-67bc79d13d15/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac h1:7d7lG9fHOLdL6jZPtnV4LpI41SbohIJ1Atq7U991dMg= -golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 h1:1wopBVtVdWnn03fZelqdXTqk7U7zPQCb+T4rbU9ZEoU= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180112015858-5ccada7d0a7b/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc h1:gkKoSkUmnU6bpS/VhkuO27bzQeSA51uaEfbOW5dNb68= +golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180117170059-2c42eef0765b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313 h1:pczuHS43Cp2ktBEEmLwScxgjWsBSzdaQiKzUyf3DTTc= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f h1:25KHgbfyiSm6vwQLbM3zZIe1v9p/3ea4Rz+nnM5K/i4= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 h1:+DCIGbF/swA92ohVg0//6X2IVY3KZs6p9mix0ziNYJM= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.3.1-0.20171227012246-e19ae1496984/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.0.1 h1:xyiBuvkD2g5n7cYzx6u2sxQvsAy4QJsZFCzGVdzOXZ0= gomodules.xyz/jsonpatch/v2 v2.0.1/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= -google.golang.org/appengine v1.1.0 h1:igQkv0AAhEIvTEpD5LIpAfav2eeVO9HBTjvKHVJPRSs= +gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= +gopkg.in/yaml.v2 v2.0.0/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b h1:aBGgKJUM9Hk/3AE8WaZIApnTxG35kbuQba2w+SXqezo= -k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= -k8s.io/apiextensions-apiserver v0.0.0-20190409022649-727a075fdec8 h1:q1Qvjzs/iEdXF6A1a8H3AKVFDzJNcJn3nXMs6R6qFtA= -k8s.io/apiextensions-apiserver v0.0.0-20190409022649-727a075fdec8/go.mod h1:IxkesAMoaCRoLrPJdZNZUQp9NfZnzqaVzLhb2VEQzXE= -k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d h1:Jmdtdt1ZnoGfWWIIik61Z7nKYgO3J+swQJtPYsP9wHA= -k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= -k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible h1:U5Bt+dab9K8qaUmXINrkXO135kA11/i5Kg1RUydgaMQ= -k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= -k8s.io/klog v0.3.0 h1:0VPpR+sizsiivjIfIAQH/rl8tan6jvWkS7lU+0di3lE= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +k8s.io/api v0.0.0-20190918155943-95b840bb6a1f h1:8FRUST8oUkEI45WYKyD8ed7Ad0Kg5v11zHyPkEVb2xo= +k8s.io/api v0.0.0-20190918155943-95b840bb6a1f/go.mod h1:uWuOHnjmNrtQomJrvEBg0c0HRNyQ+8KTEERVsK0PW48= +k8s.io/apiextensions-apiserver v0.0.0-20190918161926-8f644eb6e783 h1:V6ndwCPoao1yZ52agqOKaUAl7DYWVGiXjV7ePA2i610= +k8s.io/apiextensions-apiserver v0.0.0-20190918161926-8f644eb6e783/go.mod h1:xvae1SZB3E17UpV59AWc271W/Ph25N+bjPyR63X6tPY= +k8s.io/apimachinery v0.0.0-20190913080033-27d36303b655 h1:CS1tBQz3HOXiseWZu6ZicKX361CZLT97UFnnPx0aqBw= +k8s.io/apimachinery v0.0.0-20190913080033-27d36303b655/go.mod h1:nL6pwRT8NgfF8TT68DBI8uEePRt89cSvoXUVqbkWHq4= +k8s.io/apiserver v0.0.0-20190918160949-bfa5e2e684ad/go.mod h1:XPCXEwhjaFN29a8NldXA901ElnKeKLrLtREO9ZhFyhg= +k8s.io/client-go v0.0.0-20190918160344-1fbdaa4c8d90 h1:mLmhKUm1X+pXu0zXMEzNsOF5E2kKFGe5o6BZBIIqA6A= +k8s.io/client-go v0.0.0-20190918160344-1fbdaa4c8d90/go.mod h1:J69/JveO6XESwVgG53q3Uz5OSfgsv4uxpScmmyYOOlk= +k8s.io/code-generator v0.0.0-20190912054826-cd179ad6a269/go.mod h1:V5BD6M4CyaN5m+VthcclXWsVcT1Hu+glwa1bi3MIsyE= +k8s.io/component-base v0.0.0-20190918160511-547f6c5d7090/go.mod h1:933PBGtQFJky3TEwYx4aEPZ4IxqhWh3R6DCmzqIn1hA= +k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c h1:3KSCztE7gPitlZmWbNwue/2U0YruD65DqX3INopDAQM= -k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= -k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5 h1:VBM/0P5TWxwk+Nw6Z+lAw3DKgO76g90ETOiA6rfLV1Y= -k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -sigs.k8s.io/testing_frameworks v0.1.1 h1:cP2l8fkA3O9vekpy5Ks8mmA0NW/F7yBdXf8brkWhVrs= -sigs.k8s.io/testing_frameworks v0.1.1/go.mod h1:VVBKrHmJ6Ekkfz284YKhQePcdycOzNH9qL6ht1zEr/U= +k8s.io/klog v0.4.0 h1:lCJCxf/LIowc2IGS9TPjWDyXY4nOmdGdfcwwDQCOURQ= +k8s.io/klog v0.4.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf h1:EYm5AW/UUDbnmnI+gK0TJDVK9qPLhM+sRHYanNKw0EQ= +k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= +k8s.io/utils v0.0.0-20190801114015-581e00157fb1 h1:+ySTxfHnfzZb9ys375PXNlLhkJPLKgHajBU0N62BDvE= +k8s.io/utils v0.0.0-20190801114015-581e00157fb1/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= +modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= +modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= +modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= +modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= +sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= +sigs.k8s.io/structured-merge-diff v0.0.0-20190817042607-6149e4549fca/go.mod h1:IIgPezJWb76P0hotTxzDbWsMYB8APh18qZnxkomBpxA= +sigs.k8s.io/testing_frameworks v0.1.2 h1:vK0+tvjF0BZ/RYFeZ1E6BYBwHJJXhjuZ3TdsEKH+UQM= +sigs.k8s.io/testing_frameworks v0.1.2/go.mod h1:ToQrwSC3s8Xf/lADdZp3Mktcql9CG0UAmdJG9th5i0w= sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go index b906a2e08f9fe..786609bd16aa3 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go @@ -38,6 +38,13 @@ var ( _ Cache = &informerCache{} ) +// ErrCacheNotStarted is returned when trying to read from the cache that wasn't started. +type ErrCacheNotStarted struct{} + +func (*ErrCacheNotStarted) Error() string { + return "the cache is not started, can not read objects" +} + // informerCache is a Kubernetes Object cache populated from InformersMap. informerCache wraps an InformersMap. type informerCache struct { *internal.InformersMap @@ -50,10 +57,14 @@ func (ip *informerCache) Get(ctx context.Context, key client.ObjectKey, out runt return err } - cache, err := ip.InformersMap.Get(gvk, out) + started, cache, err := ip.InformersMap.Get(gvk, out) if err != nil { return err } + + if !started { + return &ErrCacheNotStarted{} + } return cache.Reader.Get(ctx, key, out) } @@ -90,11 +101,15 @@ func (ip *informerCache) List(ctx context.Context, out runtime.Object, opts ...c } } - cache, err := ip.InformersMap.Get(gvk, cacheTypeObj) + started, cache, err := ip.InformersMap.Get(gvk, cacheTypeObj) if err != nil { return err } + if !started { + return &ErrCacheNotStarted{} + } + return cache.Reader.List(ctx, out, opts...) } @@ -105,7 +120,7 @@ func (ip *informerCache) GetInformerForKind(gvk schema.GroupVersionKind) (Inform if err != nil { return nil, err } - i, err := ip.InformersMap.Get(gvk, obj) + _, i, err := ip.InformersMap.Get(gvk, obj) if err != nil { return nil, err } @@ -118,13 +133,19 @@ func (ip *informerCache) GetInformer(obj runtime.Object) (Informer, error) { if err != nil { return nil, err } - i, err := ip.InformersMap.Get(gvk, obj) + _, i, err := ip.InformersMap.Get(gvk, obj) if err != nil { return nil, err } return i.Informer, err } +// NeedLeaderElection implements the LeaderElectionRunnable interface +// to indicate that this can be started without requiring the leader lock +func (ip *informerCache) NeedLeaderElection() bool { + return false +} + // IndexField adds an indexer to the underlying cache, using extraction function to get // value(s) from the given field. This index can then be used by passing a field selector // to List. For one-to-one compatibility with "normal" field selectors, only return one value. diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/BUILD.bazel b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/BUILD.bazel index 9767ceba6c2ba..e53d0fa986f1f 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/BUILD.bazel +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/BUILD.bazel @@ -27,6 +27,5 @@ go_library( "//vendor/k8s.io/client-go/tools/cache:go_default_library", "//vendor/sigs.k8s.io/controller-runtime/pkg/client:go_default_library", "//vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil:go_default_library", - "//vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil:go_default_library", ], ) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go index a9b7ae347cf02..5c3fac9228cf7 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go @@ -30,10 +30,9 @@ import ( "k8s.io/apimachinery/pkg/selection" "k8s.io/client-go/tools/cache" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/internal/objectutil" ) -// CacheReader is a CacheReader +// CacheReader is a client.Reader var _ client.Reader = &CacheReader{} // CacheReader wraps a cache.Index to implement the client.CacheReader interface for a single type @@ -125,15 +124,22 @@ func (c *CacheReader) List(_ context.Context, out runtime.Object, opts ...client if !isObj { return fmt.Errorf("cache contained %T, which is not an Object", obj) } + meta, err := apimeta.Accessor(obj) + if err != nil { + return err + } + if labelSel != nil { + lbls := labels.Set(meta.GetLabels()) + if !labelSel.Matches(lbls) { + continue + } + } + outObj := obj.DeepCopyObject() outObj.GetObjectKind().SetGroupVersionKind(c.groupVersionKind) runtimeObjs = append(runtimeObjs, outObj) } - filteredItems, err := objectutil.FilterWithLabels(runtimeObjs, labelSel) - if err != nil { - return err - } - return apimeta.SetList(out, filteredItems) + return apimeta.SetList(out, runtimeObjs) } // objectKeyToStorageKey converts an object key to store key. diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/deleg_map.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/deleg_map.go index 978002d9cf253..8e2c65f4bab83 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/deleg_map.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/deleg_map.go @@ -73,7 +73,7 @@ func (m *InformersMap) WaitForCacheSync(stop <-chan struct{}) bool { // Get will create a new Informer and add it to the map of InformersMap if none exists. Returns // the Informer from the map. -func (m *InformersMap) Get(gvk schema.GroupVersionKind, obj runtime.Object) (*MapEntry, error) { +func (m *InformersMap) Get(gvk schema.GroupVersionKind, obj runtime.Object) (bool, *MapEntry, error) { _, isUnstructured := obj.(*unstructured.Unstructured) _, isUnstructuredList := obj.(*unstructured.UnstructuredList) isUnstructured = isUnstructured || isUnstructuredList diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go index b2787adfc8282..7345c54b494cb 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go @@ -145,7 +145,7 @@ func (ip *specificInformersMap) HasSyncedFuncs() []cache.InformerSynced { // Get will create a new Informer and add it to the map of specificInformersMap if none exists. Returns // the Informer from the map. -func (ip *specificInformersMap) Get(gvk schema.GroupVersionKind, obj runtime.Object) (*MapEntry, error) { +func (ip *specificInformersMap) Get(gvk schema.GroupVersionKind, obj runtime.Object) (bool, *MapEntry, error) { // Return the informer if it is found i, started, ok := func() (*MapEntry, bool, bool) { ip.mu.RLock() @@ -157,18 +157,18 @@ func (ip *specificInformersMap) Get(gvk schema.GroupVersionKind, obj runtime.Obj if !ok { var err error if i, started, err = ip.addInformerToMap(gvk, obj); err != nil { - return nil, err + return started, nil, err } } if started && !i.Informer.HasSynced() { // Wait for it to sync before returning the Informer so that folks don't read from a stale cache. if !cache.WaitForCacheSync(ip.stop, i.Informer.HasSynced) { - return nil, fmt.Errorf("failed waiting for %T Informer to sync", obj) + return started, nil, fmt.Errorf("failed waiting for %T Informer to sync", obj) } } - return i, nil + return started, i, nil } func (ip *specificInformersMap) addInformerToMap(gvk schema.GroupVersionKind, obj runtime.Object) (*MapEntry, bool, error) { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/BUILD.bazel b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/BUILD.bazel index 5f364ff33b0c1..531cb74606aee 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/BUILD.bazel +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/BUILD.bazel @@ -2,11 +2,16 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", - srcs = ["apimachinery.go"], + srcs = [ + "apimachinery.go", + "dynamicrestmapper.go", + ], importmap = "k8s.io/kops/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil", importpath = "sigs.k8s.io/controller-runtime/pkg/client/apiutil", visibility = ["//visibility:public"], deps = [ + "//vendor/golang.org/x/time/rate:go_default_library", + "//vendor/golang.org/x/xerrors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go index cf775c3dc89fc..157f11f7c32c2 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go @@ -35,7 +35,10 @@ import ( // information fetched by a new client with the given config. func NewDiscoveryRESTMapper(c *rest.Config) (meta.RESTMapper, error) { // Get a mapper - dc := discovery.NewDiscoveryClientForConfigOrDie(c) + dc, err := discovery.NewDiscoveryClientForConfig(c) + if err != nil { + return nil, err + } gr, err := restmapper.GetAPIGroupResources(dc) if err != nil { return nil, err @@ -71,7 +74,7 @@ func GVKForObject(obj runtime.Object, scheme *runtime.Scheme) (schema.GroupVersi func RESTClientForGVK(gvk schema.GroupVersionKind, baseConfig *rest.Config, codecs serializer.CodecFactory) (rest.Interface, error) { cfg := createRestConfig(gvk, baseConfig) if cfg.NegotiatedSerializer == nil { - cfg.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: codecs} + cfg.NegotiatedSerializer = serializer.WithoutConversionCodecFactory{CodecFactory: codecs} } return rest.RESTClientFor(cfg) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go new file mode 100644 index 0000000000000..4588fb06cb569 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go @@ -0,0 +1,322 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apiutil + +import ( + "sync" + "time" + + "golang.org/x/time/rate" + "golang.org/x/xerrors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" + "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" +) + +// ErrRateLimited is returned by a RESTMapper method if the number of API +// calls has exceeded a limit within a certain time period. +type ErrRateLimited struct { + // Duration to wait until the next API call can be made. + Delay time.Duration +} + +func (e ErrRateLimited) Error() string { + return "too many API calls to the RESTMapper within a timeframe" +} + +// DelayIfRateLimited returns the delay time until the next API call is +// allowed and true if err is of type ErrRateLimited. The zero +// time.Duration value and false are returned if err is not a ErrRateLimited. +func DelayIfRateLimited(err error) (time.Duration, bool) { + var rlerr ErrRateLimited + if xerrors.As(err, &rlerr) { + return rlerr.Delay, true + } + return 0, false +} + +// dynamicRESTMapper is a RESTMapper that dynamically discovers resource +// types at runtime. +type dynamicRESTMapper struct { + mu sync.RWMutex // protects the following fields + staticMapper meta.RESTMapper + limiter *dynamicLimiter + newMapper func() (meta.RESTMapper, error) + + lazy bool + // Used for lazy init. + initOnce sync.Once +} + +// DynamicRESTMapperOption is a functional option on the dynamicRESTMapper +type DynamicRESTMapperOption func(*dynamicRESTMapper) error + +// WithLimiter sets the RESTMapper's underlying limiter to lim. +func WithLimiter(lim *rate.Limiter) DynamicRESTMapperOption { + return func(drm *dynamicRESTMapper) error { + drm.limiter = &dynamicLimiter{lim} + return nil + } +} + +// WithLazyDiscovery prevents the RESTMapper from discovering REST mappings +// until an API call is made. +var WithLazyDiscovery DynamicRESTMapperOption = func(drm *dynamicRESTMapper) error { + drm.lazy = true + return nil +} + +// WithCustomMapper supports setting a custom RESTMapper refresher instead of +// the default method, which uses a discovery client. +// +// This exists mainly for testing, but can be useful if you need tighter control +// over how discovery is performed, which discovery endpoints are queried, etc. +func WithCustomMapper(newMapper func() (meta.RESTMapper, error)) DynamicRESTMapperOption { + return func(drm *dynamicRESTMapper) error { + drm.newMapper = newMapper + return nil + } +} + +// NewDynamicRESTMapper returns a dynamic RESTMapper for cfg. The dynamic +// RESTMapper dynamically discovers resource types at runtime. opts +// configure the RESTMapper. +func NewDynamicRESTMapper(cfg *rest.Config, opts ...DynamicRESTMapperOption) (meta.RESTMapper, error) { + client, err := discovery.NewDiscoveryClientForConfig(cfg) + if err != nil { + return nil, err + } + drm := &dynamicRESTMapper{ + limiter: &dynamicLimiter{ + rate.NewLimiter(rate.Limit(defaultRefillRate), defaultLimitSize), + }, + newMapper: func() (meta.RESTMapper, error) { + groupResources, err := restmapper.GetAPIGroupResources(client) + if err != nil { + return nil, err + } + return restmapper.NewDiscoveryRESTMapper(groupResources), nil + }, + } + for _, opt := range opts { + if err = opt(drm); err != nil { + return nil, err + } + } + if !drm.lazy { + if err := drm.setStaticMapper(); err != nil { + return nil, err + } + } + return drm, nil +} + +var ( + // defaultRefilRate is the default rate at which potential calls are + // added back to the "bucket" of allowed calls. + defaultRefillRate = 5 + // defaultLimitSize is the default starting/max number of potential calls + // per second. Once a call is used, it's added back to the bucket at a rate + // of defaultRefillRate per second. + defaultLimitSize = 5 +) + +// setStaticMapper sets drm's staticMapper by querying its client, regardless +// of reload backoff. +func (drm *dynamicRESTMapper) setStaticMapper() error { + newMapper, err := drm.newMapper() + if err != nil { + return err + } + drm.staticMapper = newMapper + return nil +} + +// init initializes drm only once if drm is lazy. +func (drm *dynamicRESTMapper) init() (err error) { + drm.initOnce.Do(func() { + if drm.lazy { + err = drm.setStaticMapper() + } + }) + return err +} + +// checkAndReload attempts to call the given callback, which is assumed to be dependent +// on the data in the restmapper. +// +// If the callback returns a NoKindMatchError, it will attempt to reload +// the RESTMapper's data and re-call the callback once that's occurred. +// If the callback returns any other error, the function will return immediately regardless. +// +// It will take care +// ensuring that reloads are rate-limitted and that extraneous calls aren't made. +// It's thread-safe, and worries about thread-safety for the callback (so the callback does +// not need to attempt to lock the restmapper). +func (drm *dynamicRESTMapper) checkAndReload(needsReloadErr error, checkNeedsReload func() error) error { + // first, check the common path -- data is fresh enough + // (use an IIFE for the lock's defer) + err := func() error { + drm.mu.RLock() + defer drm.mu.RUnlock() + + return checkNeedsReload() + }() + + // NB(directxman12): `Is` and `As` have a confusing relationship -- + // `Is` is like `== or does this implement .Is`, whereas `As` says + // `can I type-assert into` + needsReload := xerrors.As(err, &needsReloadErr) + if !needsReload { + return err + } + + // if the data wasn't fresh, we'll need to try and update it, so grab the lock... + drm.mu.Lock() + defer drm.mu.Unlock() + + // ... and double-check that we didn't reload in the meantime + err = checkNeedsReload() + needsReload = xerrors.As(err, &needsReloadErr) + if !needsReload { + return err + } + + // we're still stale, so grab a rate-limit token if we can... + if err := drm.limiter.checkRate(); err != nil { + return err + } + + // ...reload... + if err := drm.setStaticMapper(); err != nil { + return err + } + + // ...and return the results of the closure regardless + return checkNeedsReload() +} + +// TODO: wrap reload errors on NoKindMatchError with go 1.13 errors. + +func (drm *dynamicRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + if err := drm.init(); err != nil { + return schema.GroupVersionKind{}, err + } + var gvk schema.GroupVersionKind + err := drm.checkAndReload(&meta.NoResourceMatchError{}, func() error { + var err error + gvk, err = drm.staticMapper.KindFor(resource) + return err + }) + return gvk, err +} + +func (drm *dynamicRESTMapper) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { + if err := drm.init(); err != nil { + return nil, err + } + var gvks []schema.GroupVersionKind + err := drm.checkAndReload(&meta.NoResourceMatchError{}, func() error { + var err error + gvks, err = drm.staticMapper.KindsFor(resource) + return err + }) + return gvks, err +} + +func (drm *dynamicRESTMapper) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) { + if err := drm.init(); err != nil { + return schema.GroupVersionResource{}, err + } + + var gvr schema.GroupVersionResource + err := drm.checkAndReload(&meta.NoResourceMatchError{}, func() error { + var err error + gvr, err = drm.staticMapper.ResourceFor(input) + return err + }) + return gvr, err +} + +func (drm *dynamicRESTMapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { + if err := drm.init(); err != nil { + return nil, err + } + var gvrs []schema.GroupVersionResource + err := drm.checkAndReload(&meta.NoResourceMatchError{}, func() error { + var err error + gvrs, err = drm.staticMapper.ResourcesFor(input) + return err + }) + return gvrs, err +} + +func (drm *dynamicRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { + if err := drm.init(); err != nil { + return nil, err + } + var mapping *meta.RESTMapping + err := drm.checkAndReload(&meta.NoKindMatchError{}, func() error { + var err error + mapping, err = drm.staticMapper.RESTMapping(gk, versions...) + return err + }) + return mapping, err +} + +func (drm *dynamicRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { + if err := drm.init(); err != nil { + return nil, err + } + var mappings []*meta.RESTMapping + err := drm.checkAndReload(&meta.NoKindMatchError{}, func() error { + var err error + mappings, err = drm.staticMapper.RESTMappings(gk, versions...) + return err + }) + return mappings, err +} + +func (drm *dynamicRESTMapper) ResourceSingularizer(resource string) (string, error) { + if err := drm.init(); err != nil { + return "", err + } + var singular string + err := drm.checkAndReload(&meta.NoResourceMatchError{}, func() error { + var err error + singular, err = drm.staticMapper.ResourceSingularizer(resource) + return err + }) + return singular, err +} + +// dynamicLimiter holds a rate limiter used to throttle chatty RESTMapper users. +type dynamicLimiter struct { + *rate.Limiter +} + +// checkRate returns an ErrRateLimited if too many API calls have been made +// within the set limit. +func (b *dynamicLimiter) checkRate() error { + res := b.Reserve() + if res.Delay() == 0 { + return nil + } + return ErrRateLimited{res.Delay()} +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go index 8be5619410fed..a3a0ddc98f97e 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go @@ -105,7 +105,7 @@ type client struct { } // resetGroupVersionKind is a helper function to restore and preserve GroupVersionKind on an object. -// TODO(vincepri): Remove this function and its calls once controller-runtime dependencies are upgraded to 1.15. +// TODO(vincepri): Remove this function and its calls once controller-runtime dependencies are upgraded to 1.16? func (c *client) resetGroupVersionKind(obj runtime.Object, gvk schema.GroupVersionKind) { if gvk != schema.EmptyObjectKind.GroupVersionKind() { if v, ok := obj.(schema.ObjectKind); ok { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/config.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/config.go index 25cd32ca181c1..fd75c68b89564 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/config.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/config.go @@ -20,8 +20,6 @@ import ( "flag" "fmt" "os" - "os/user" - "path/filepath" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" @@ -95,35 +93,40 @@ func GetConfigWithContext(context string) (*rest.Config, error) { return cfg, nil } +// loadInClusterConfig is a function used to load the in-cluster +// Kubernetes client config. This variable makes is possible to +// test the precedence of loading the config. +var loadInClusterConfig = rest.InClusterConfig + // loadConfig loads a REST Config as per the rules specified in GetConfig func loadConfig(context string) (*rest.Config, error) { // If a flag is specified with the config location, use that if len(kubeconfig) > 0 { - return loadConfigWithContext(apiServerURL, kubeconfig, context) - } - // If an env variable is specified with the config location, use that - if len(os.Getenv("KUBECONFIG")) > 0 { - return loadConfigWithContext(apiServerURL, os.Getenv("KUBECONFIG"), context) - } - // If no explicit location, try the in-cluster config - if c, err := rest.InClusterConfig(); err == nil { - return c, nil + return loadConfigWithContext(apiServerURL, &clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfig}, context) } - // If no in-cluster config, try the default location in the user's home directory - if usr, err := user.Current(); err == nil { - if c, err := loadConfigWithContext(apiServerURL, filepath.Join(usr.HomeDir, ".kube", "config"), - context); err == nil { + + // If the recommended kubeconfig env variable is not specified, + // try the in-cluster config. + kubeconfigPath := os.Getenv(clientcmd.RecommendedConfigPathEnvVar) + if len(kubeconfigPath) == 0 { + if c, err := loadInClusterConfig(); err == nil { return c, nil } } + // If the recommended kubeconfig env variable is set, or there + // is no in-cluster config, try the default recommended locations. + if c, err := loadConfigWithContext(apiServerURL, clientcmd.NewDefaultClientConfigLoadingRules(), context); err == nil { + return c, nil + } + return nil, fmt.Errorf("could not locate a kubeconfig") } -func loadConfigWithContext(apiServerURL, kubeconfig, context string) (*rest.Config, error) { +func loadConfigWithContext(apiServerURL string, loader clientcmd.ClientConfigLoader, context string) (*rest.Config, error) { return clientcmd.NewNonInteractiveDeferredLoadingClientConfig( - &clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfig}, + loader, &clientcmd.ConfigOverrides{ ClusterInfo: clientcmdapi.Cluster{ Server: apiServerURL, diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go index 0eb04baa99496..62b5a86242a30 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go @@ -303,9 +303,20 @@ type ListOptions struct { // non-namespaced objects, or to list across all namespaces. Namespace string + // Limit specifies the maximum number of results to return from the server. The server may + // not support this field on all resource types, but if it does and more results remain it + // will set the continue field on the returned list object. This field is not supported if watch + // is true in the Raw ListOptions. + Limit int64 + // Continue is a token returned by the server that lets a client retrieve chunks of results + // from the server by specifying limit. The server may reject requests for continuation tokens + // it does not recognize and will return a 410 error if the token can no longer be used because + // it has expired. This field is not supported if watch is true in the Raw ListOptions. + Continue string + // Raw represents raw ListOptions, as passed to the API server. Note // that these may not be respected by all implementations of interface, - // and the LabelSelector and FieldSelector fields are ignored. + // and the LabelSelector, FieldSelector, Limit and Continue fields are ignored. Raw *metav1.ListOptions } @@ -325,6 +336,12 @@ func (o *ListOptions) ApplyToList(lo *ListOptions) { if o.Raw != nil { lo.Raw = o.Raw } + if o.Limit > 0 { + lo.Limit = o.Limit + } + if o.Continue != "" { + lo.Continue = o.Continue + } } // AsListOptions returns these options as a flattened metav1.ListOptions. @@ -342,6 +359,10 @@ func (o *ListOptions) AsListOptions() *metav1.ListOptions { if o.FieldSelector != nil { o.Raw.FieldSelector = o.FieldSelector.String() } + if !o.Raw.Watch { + o.Raw.Limit = o.Limit + o.Raw.Continue = o.Continue + } return o.Raw } @@ -430,6 +451,24 @@ func (n InNamespace) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { n.ApplyToList(&opts.ListOptions) } +// Limit specifies the maximum number of results to return from the server. +// Limit does not implement DeleteAllOfOption interface because the server +// does not support setting it for deletecollection operations. +type Limit int64 + +func (l Limit) ApplyToList(opts *ListOptions) { + opts.Limit = int64(l) +} + +// Continue sets a continuation token to retrieve chunks of results when using limit. +// Continue does not implement DeleteAllOfOption interface because the server +// does not support setting it for deletecollection operations. +type Continue string + +func (c Continue) ApplyToList(opts *ListOptions) { + opts.Continue = string(c) +} + // }}} // {{{ Update Options diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go index 3411e95f75e6c..a4e2a982d99c1 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go @@ -80,13 +80,15 @@ func New(name string, mgr manager.Manager, options Options) (Controller, error) // Create controller with dependencies set c := &controller.Controller{ - Do: options.Reconciler, - Cache: mgr.GetCache(), - Config: mgr.GetConfig(), - Scheme: mgr.GetScheme(), - Client: mgr.GetClient(), - Recorder: mgr.GetEventRecorderFor(name), - Queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), name), + Do: options.Reconciler, + Cache: mgr.GetCache(), + Config: mgr.GetConfig(), + Scheme: mgr.GetScheme(), + Client: mgr.GetClient(), + Recorder: mgr.GetEventRecorderFor(name), + MakeQueue: func() workqueue.RateLimitingInterface { + return workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), name) + }, MaxConcurrentReconciles: options.MaxConcurrentReconciles, Name: name, } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/BUILD.bazel b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/BUILD.bazel index 4062acf7642d3..21808e8988cf3 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/BUILD.bazel +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/BUILD.bazel @@ -11,6 +11,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go index bb67c7037f5d7..d0a576c034c15 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go @@ -22,6 +22,7 @@ import ( "reflect" "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -59,6 +60,17 @@ func SetControllerReference(owner, object metav1.Object, scheme *runtime.Scheme) return fmt.Errorf("%T is not a runtime.Object, cannot call SetControllerReference", owner) } + ownerNs := owner.GetNamespace() + if ownerNs != "" { + objNs := object.GetNamespace() + if objNs == "" { + return fmt.Errorf("cluster-scoped resource must not have a namespace-scoped owner, owner's namespace %s", ownerNs) + } + if ownerNs != objNs { + return fmt.Errorf("cross-namespace owner references are disallowed, owner's namespace %s, obj's namespace %s", owner.GetNamespace(), object.GetNamespace()) + } + } + gvk, err := apiutil.GVKForObject(ro, scheme) if err != nil { return err @@ -168,3 +180,47 @@ func mutate(f MutateFn, key client.ObjectKey, obj runtime.Object) error { // MutateFn is a function which mutates the existing object into it's desired state. type MutateFn func() error + +// AddFinalizer accepts a metav1 object and adds the provided finalizer if not present. +func AddFinalizer(o metav1.Object, finalizer string) { + f := o.GetFinalizers() + for _, e := range f { + if e == finalizer { + return + } + } + o.SetFinalizers(append(f, finalizer)) +} + +// AddFinalizerWithError tries to convert a runtime object to a metav1 object and add the provided finalizer. +// It returns an error if the provided object cannot provide an accessor. +func AddFinalizerWithError(o runtime.Object, finalizer string) error { + m, err := meta.Accessor(o) + if err != nil { + return err + } + AddFinalizer(m, finalizer) + return nil +} + +// RemoveFinalizer accepts a metav1 object and removes the provided finalizer if present. +func RemoveFinalizer(o metav1.Object, finalizer string) { + f := o.GetFinalizers() + for i, e := range f { + if e == finalizer { + f = append(f[:i], f[i+1:]...) + } + } + o.SetFinalizers(f) +} + +// RemoveFinalizerWithError tries to convert a runtime object to a metav1 object and remove the provided finalizer. +// It returns an error if the provided object cannot provide an accessor. +func RemoveFinalizerWithError(o runtime.Object, finalizer string) error { + m, err := meta.Accessor(o) + if err != nil { + return err + } + RemoveFinalizer(m, finalizer) + return nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/healthz/BUILD.bazel b/vendor/sigs.k8s.io/controller-runtime/pkg/healthz/BUILD.bazel new file mode 100644 index 0000000000000..7416f1e13795e --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/healthz/BUILD.bazel @@ -0,0 +1,16 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "healthz.go", + ], + importmap = "k8s.io/kops/vendor/sigs.k8s.io/controller-runtime/pkg/healthz", + importpath = "sigs.k8s.io/controller-runtime/pkg/healthz", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//vendor/sigs.k8s.io/controller-runtime/pkg/internal/log:go_default_library", + ], +) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/healthz/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/healthz/doc.go new file mode 100644 index 0000000000000..9827eeafed3cf --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/healthz/doc.go @@ -0,0 +1,32 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package healthz contains helpers from supporting liveness and readiness endpoints. +// (often referred to as healthz and readyz, respectively). +// +// This package draws heavily from the apiserver's healthz package +// ( https://github.com/kubernetes/apiserver/tree/master/pkg/server/healthz ) +// but has some changes to bring it in line with controller-runtime's style. +// +// The main entrypoint is the Handler -- this serves both aggregated health status +// and individual health check endpoints. +package healthz + +import ( + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" +) + +var log = logf.RuntimeLog.WithName("healthz") diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/healthz/healthz.go b/vendor/sigs.k8s.io/controller-runtime/pkg/healthz/healthz.go new file mode 100644 index 0000000000000..e7f4b1c2793ad --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/healthz/healthz.go @@ -0,0 +1,207 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package healthz + +import ( + "fmt" + "net/http" + "path" + "sort" + "strings" + + "k8s.io/apimachinery/pkg/util/sets" +) + +// Handler is an http.Handler that aggregates the results of the given +// checkers to the root path, and supports calling individual checkers on +// subpaths of the name of the checker. +// +// Adding checks on the fly is *not* threadsafe -- use a wrapper. +type Handler struct { + Checks map[string]Checker +} + +// checkStatus holds the output of a particular check +type checkStatus struct { + name string + healthy bool + excluded bool +} + +func (h *Handler) serveAggregated(resp http.ResponseWriter, req *http.Request) { + failed := false + excluded := getExcludedChecks(req) + + parts := make([]checkStatus, 0, len(h.Checks)) + + // calculate the results... + for checkName, check := range h.Checks { + // no-op the check if we've specified we want to exclude the check + if excluded.Has(checkName) { + excluded.Delete(checkName) + parts = append(parts, checkStatus{name: checkName, healthy: true, excluded: true}) + continue + } + if err := check(req); err != nil { + log.V(1).Info("healthz check failed", "checker", checkName, "error", err) + parts = append(parts, checkStatus{name: checkName, healthy: false}) + failed = true + } else { + parts = append(parts, checkStatus{name: checkName, healthy: true}) + } + } + + // ...default a check if none is present... + if len(h.Checks) == 0 { + parts = append(parts, checkStatus{name: "ping", healthy: true}) + } + + for _, c := range excluded.List() { + log.V(1).Info("cannot exclude health check, no matches for it", "checker", c) + } + + // ...sort to be consistent... + sort.Slice(parts, func(i, j int) bool { return parts[i].name < parts[j].name }) + + // ...and write out the result + // TODO(directxman12): this should also accept a request for JSON content (via a accept header) + _, forceVerbose := req.URL.Query()["verbose"] + writeStatusesAsText(resp, parts, excluded, failed, forceVerbose) +} + +// writeStatusAsText writes out the given check statuses in some semi-arbitrary +// bespoke text format that we copied from Kubernetes. unknownExcludes lists +// any checks that the user requested to have excluded, but weren't actually +// known checks. writeStatusAsText is always verbose on failure, and can be +// forced to be verbose on success using the given argument. +func writeStatusesAsText(resp http.ResponseWriter, parts []checkStatus, unknownExcludes sets.String, failed, forceVerbose bool) { + resp.Header().Set("Content-Type", "text/plain; charset=utf-8") + resp.Header().Set("X-Content-Type-Options", "nosniff") + + // always write status code first + if failed { + resp.WriteHeader(http.StatusInternalServerError) + } else { + resp.WriteHeader(http.StatusOK) + } + + // shortcut for easy non-verbose success + if !failed && !forceVerbose { + fmt.Fprint(resp, "ok") + return + } + + // we're always verbose on failure, so from this point on we're guaranteed to be verbose + + for _, checkOut := range parts { + switch { + case checkOut.excluded: + fmt.Fprintf(resp, "[+]%s excluded: ok\n", checkOut.name) + case checkOut.healthy: + fmt.Fprintf(resp, "[+]%s ok\n", checkOut.name) + default: + // don't include the error since this endpoint is public. If someone wants more detail + // they should have explicit permission to the detailed checks. + fmt.Fprintf(resp, "[-]%s failed: reason withheld\n", checkOut.name) + } + } + + if unknownExcludes.Len() > 0 { + fmt.Fprintf(resp, "warn: some health checks cannot be excluded: no matches for %s\n", formatQuoted(unknownExcludes.List()...)) + } + + if failed { + log.Info("healthz check failed", "statuses", parts) + fmt.Fprintf(resp, "healthz check failed\n") + } else { + fmt.Fprint(resp, "healthz check passed\n") + } +} + +func (h *Handler) ServeHTTP(resp http.ResponseWriter, req *http.Request) { + // clean up the request (duplicating the internal logic of http.ServeMux a bit) + // clean up the path a bit + reqPath := req.URL.Path + if reqPath == "" || reqPath[0] != '/' { + reqPath = "/" + reqPath + } + // path.Clean removes the trailing slash except for root for us + // (which is fine, since we're only serving one layer of sub-paths) + reqPath = path.Clean(reqPath) + + // either serve the root endpoint... + if reqPath == "/" { + h.serveAggregated(resp, req) + return + } + + // ...the default check (if nothing else is present)... + if len(h.Checks) == 0 && reqPath[1:] == "ping" { + CheckHandler{Checker: Ping}.ServeHTTP(resp, req) + return + } + + // ...or an individual checker + checkName := reqPath[1:] // ignore the leading slash + checker, known := h.Checks[checkName] + if !known { + http.NotFoundHandler().ServeHTTP(resp, req) + return + } + + CheckHandler{Checker: checker}.ServeHTTP(resp, req) +} + +// CheckHandler is an http.Handler that serves a health check endpoint at the root path, +// based on its checker. +type CheckHandler struct { + Checker +} + +func (h CheckHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) { + err := h.Checker(req) + if err != nil { + http.Error(resp, fmt.Sprintf("internal server error: %v", err), http.StatusInternalServerError) + } else { + fmt.Fprint(resp, "ok") + } +} + +// Checker knows how to perform a health check. +type Checker func(req *http.Request) error + +// Ping returns true automatically when checked +var Ping Checker = func(_ *http.Request) error { return nil } + +// getExcludedChecks extracts the health check names to be excluded from the query param +func getExcludedChecks(r *http.Request) sets.String { + checks, found := r.URL.Query()["exclude"] + if found { + return sets.NewString(checks...) + } + return sets.NewString() +} + +// formatQuoted returns a formatted string of the health check names, +// preserving the order passed in. +func formatQuoted(names ...string) string { + quoted := make([]string, 0, len(names)) + for _, name := range names { + quoted = append(quoted, fmt.Sprintf("%q", name)) + } + return strings.Join(quoted, ",") +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go index 7f40a32a52171..0938fd4ec7385 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go @@ -68,6 +68,11 @@ type Controller struct { // specified, or the ~/.kube/Config. Config *rest.Config + // MakeQueue constructs the queue for this controller once the controller is ready to start. + // This exists because the standard Kubernetes workqueues start themselves immediately, which + // leads to goroutine leaks if something calls controller.New repeatedly. + MakeQueue func() workqueue.RateLimitingInterface + // Queue is an listeningQueue that listens for events from Informers and adds object keys to // the Queue for processing Queue workqueue.RateLimitingInterface @@ -93,6 +98,16 @@ type Controller struct { Recorder record.EventRecorder // TODO(community): Consider initializing a logger with the Controller Name as the tag + + // watches maintains a list of sources, handlers, and predicates to start when the controller is started. + watches []watchDescription +} + +// watchDescription contains all the information necessary to start a watch. +type watchDescription struct { + src source.Source + handler handler.EventHandler + predicates []predicate.Predicate } // Reconcile implements reconcile.Reconciler @@ -118,47 +133,72 @@ func (c *Controller) Watch(src source.Source, evthdler handler.EventHandler, prc } } - log.Info("Starting EventSource", "controller", c.Name, "source", src) - return src.Start(evthdler, c.Queue, prct...) + c.watches = append(c.watches, watchDescription{src: src, handler: evthdler, predicates: prct}) + if c.Started { + log.Info("Starting EventSource", "controller", c.Name, "source", src) + return src.Start(evthdler, c.Queue, prct...) + } + + return nil } // Start implements controller.Controller func (c *Controller) Start(stop <-chan struct{}) error { + // use an IIFE to get proper lock handling + // but lock outside to get proper handling of the queue shutdown c.mu.Lock() - // TODO(pwittrock): Reconsider HandleCrash - defer utilruntime.HandleCrash() - defer c.Queue.ShutDown() + c.Queue = c.MakeQueue() + defer c.Queue.ShutDown() // needs to be outside the iife so that we shutdown after the stop channel is closed - // Start the SharedIndexInformer factories to begin populating the SharedIndexInformer caches - log.Info("Starting Controller", "controller", c.Name) + err := func() error { + defer c.mu.Unlock() - // Wait for the caches to be synced before starting workers - if c.WaitForCacheSync == nil { - c.WaitForCacheSync = c.Cache.WaitForCacheSync - } - if ok := c.WaitForCacheSync(stop); !ok { - // This code is unreachable right now since WaitForCacheSync will never return an error - // Leaving it here because that could happen in the future - err := fmt.Errorf("failed to wait for %s caches to sync", c.Name) - log.Error(err, "Could not wait for Cache to sync", "controller", c.Name) - c.mu.Unlock() - return err - } + // TODO(pwittrock): Reconsider HandleCrash + defer utilruntime.HandleCrash() - if c.JitterPeriod == 0 { - c.JitterPeriod = 1 * time.Second - } + // NB(directxman12): launch the sources *before* trying to wait for the + // caches to sync so that they have a chance to register their intendeded + // caches. + for _, watch := range c.watches { + log.Info("Starting EventSource", "controller", c.Name, "source", watch.src) + if err := watch.src.Start(watch.handler, c.Queue, watch.predicates...); err != nil { + return err + } + } - // Launch workers to process resources - log.Info("Starting workers", "controller", c.Name, "worker count", c.MaxConcurrentReconciles) - for i := 0; i < c.MaxConcurrentReconciles; i++ { - // Process work items - go wait.Until(c.worker, c.JitterPeriod, stop) - } + // Start the SharedIndexInformer factories to begin populating the SharedIndexInformer caches + log.Info("Starting Controller", "controller", c.Name) + + // Wait for the caches to be synced before starting workers + if c.WaitForCacheSync == nil { + c.WaitForCacheSync = c.Cache.WaitForCacheSync + } + if ok := c.WaitForCacheSync(stop); !ok { + // This code is unreachable right now since WaitForCacheSync will never return an error + // Leaving it here because that could happen in the future + err := fmt.Errorf("failed to wait for %s caches to sync", c.Name) + log.Error(err, "Could not wait for Cache to sync", "controller", c.Name) + return err + } + + if c.JitterPeriod == 0 { + c.JitterPeriod = 1 * time.Second + } + + // Launch workers to process resources + log.Info("Starting workers", "controller", c.Name, "worker count", c.MaxConcurrentReconciles) + for i := 0; i < c.MaxConcurrentReconciles; i++ { + // Process work items + go wait.Until(c.worker, c.JitterPeriod, stop) + } - c.Started = true - c.mu.Unlock() + c.Started = true + return nil + }() + if err != nil { + return err + } <-stop log.Info("Stopping workers", "controller", c.Name) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/BUILD.bazel b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/BUILD.bazel deleted file mode 100644 index b912628c0b47d..0000000000000 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["filter.go"], - importmap = "k8s.io/kops/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil", - importpath = "sigs.k8s.io/controller-runtime/pkg/internal/objectutil", - visibility = ["//vendor/sigs.k8s.io/controller-runtime/pkg:__subpackages__"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - ], -) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/filter.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/filter.go deleted file mode 100644 index 8513846e2c173..0000000000000 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/filter.go +++ /dev/null @@ -1,42 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package objectutil - -import ( - apimeta "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" -) - -// FilterWithLabels returns a copy of the items in objs matching labelSel -func FilterWithLabels(objs []runtime.Object, labelSel labels.Selector) ([]runtime.Object, error) { - outItems := make([]runtime.Object, 0, len(objs)) - for _, obj := range objs { - meta, err := apimeta.Accessor(obj) - if err != nil { - return nil, err - } - if labelSel != nil { - lbls := labels.Set(meta.GetLabels()) - if !labelSel.Matches(lbls) { - continue - } - } - outItems = append(outItems, obj.DeepCopyObject()) - } - return outItems, nil -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/BUILD.bazel b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/BUILD.bazel index ba2119bec68c3..083120c74aad8 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/BUILD.bazel +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/BUILD.bazel @@ -24,6 +24,7 @@ go_library( "//vendor/sigs.k8s.io/controller-runtime/pkg/cache:go_default_library", "//vendor/sigs.k8s.io/controller-runtime/pkg/client:go_default_library", "//vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil:go_default_library", + "//vendor/sigs.k8s.io/controller-runtime/pkg/healthz:go_default_library", "//vendor/sigs.k8s.io/controller-runtime/pkg/internal/log:go_default_library", "//vendor/sigs.k8s.io/controller-runtime/pkg/internal/recorder:go_default_library", "//vendor/sigs.k8s.io/controller-runtime/pkg/leaderelection:go_default_library", diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go index 5e5a2611e09f4..5ece5ef7eb3b7 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go @@ -31,8 +31,10 @@ import ( "k8s.io/client-go/tools/leaderelection" "k8s.io/client-go/tools/leaderelection/resourcelock" "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/healthz" logf "sigs.k8s.io/controller-runtime/pkg/internal/log" "sigs.k8s.io/controller-runtime/pkg/metrics" "sigs.k8s.io/controller-runtime/pkg/recorder" @@ -45,6 +47,9 @@ const ( defaultLeaseDuration = 15 * time.Second defaultRenewDeadline = 10 * time.Second defaultRetryPeriod = 2 * time.Second + + defaultReadinessEndpoint = "/readyz" + defaultLivenessEndpoint = "/healthz" ) var log = logf.RuntimeLog.WithName("manager") @@ -90,10 +95,30 @@ type controllerManager struct { // metricsListener is used to serve prometheus metrics metricsListener net.Listener - mu sync.Mutex - started bool - startedLeader bool - errChan chan error + // healthProbeListener is used to serve liveness probe + healthProbeListener net.Listener + + // Readiness probe endpoint name + readinessEndpointName string + + // Liveness probe endpoint name + livenessEndpointName string + + // Readyz probe handler + readyzHandler *healthz.Handler + + // Healthz probe handler + healthzHandler *healthz.Handler + + mu sync.Mutex + started bool + startedLeader bool + healthzStarted bool + + // NB(directxman12): we don't just use an error channel here to avoid the situation where the + // error channel is too small and we end up blocking some goroutines waiting to report their errors. + // errSignal lets us track when we should stop because an error occurred + errSignal *errSignaler // internalStop is the stop channel *actually* used by everything involved // with the manager as a stop channel, so that we can pass a stop channel @@ -129,6 +154,51 @@ type controllerManager struct { retryPeriod time.Duration } +type errSignaler struct { + // errSignal indicates that an error occurred, when closed. It shouldn't + // be written to. + errSignal chan struct{} + + // err is the received error + err error + + mu sync.Mutex +} + +func (r *errSignaler) SignalError(err error) { + r.mu.Lock() + defer r.mu.Unlock() + + if err == nil { + // non-error, ignore + log.Error(nil, "SignalError called without an (with a nil) error, which should never happen, ignoring") + return + } + + if r.err != nil { + // we already have an error, don't try again + return + } + + // save the error and report it + r.err = err + close(r.errSignal) +} + +func (r *errSignaler) Error() error { + r.mu.Lock() + defer r.mu.Unlock() + + return r.err +} + +func (r *errSignaler) GotError() chan struct{} { + r.mu.Lock() + defer r.mu.Unlock() + + return r.errSignal +} + // Add sets dependencies on i, and adds it to the list of Runnables to start. func (cm *controllerManager) Add(r Runnable) error { cm.mu.Lock() @@ -153,7 +223,9 @@ func (cm *controllerManager) Add(r Runnable) error { if shouldStart { // If already started, start the controller go func() { - cm.errChan <- r.Start(cm.internalStop) + if err := r.Start(cm.internalStop); err != nil { + cm.errSignal.SignalError(err) + } }() } @@ -188,6 +260,40 @@ func (cm *controllerManager) SetFields(i interface{}) error { return nil } +// AddHealthzCheck allows you to add Healthz checker +func (cm *controllerManager) AddHealthzCheck(name string, check healthz.Checker) error { + cm.mu.Lock() + defer cm.mu.Unlock() + + if cm.healthzStarted { + return fmt.Errorf("unable to add new checker because healthz endpoint has already been created") + } + + if cm.healthzHandler == nil { + cm.healthzHandler = &healthz.Handler{Checks: map[string]healthz.Checker{}} + } + + cm.healthzHandler.Checks[name] = check + return nil +} + +// AddReadyzCheck allows you to add Readyz checker +func (cm *controllerManager) AddReadyzCheck(name string, check healthz.Checker) error { + cm.mu.Lock() + defer cm.mu.Unlock() + + if cm.healthzStarted { + return fmt.Errorf("unable to add new checker because readyz endpoint has already been created") + } + + if cm.readyzHandler == nil { + cm.readyzHandler = &healthz.Handler{Checks: map[string]healthz.Checker{}} + } + + cm.readyzHandler.Checks[name] = check + return nil +} + func (cm *controllerManager) GetConfig() *rest.Config { return cm.config } @@ -249,15 +355,47 @@ func (cm *controllerManager) serveMetrics(stop <-chan struct{}) { go func() { log.Info("starting metrics server", "path", metricsPath) if err := server.Serve(cm.metricsListener); err != nil && err != http.ErrServerClosed { - cm.errChan <- err + cm.errSignal.SignalError(err) + } + }() + + // Shutdown the server when stop is closed + select { + case <-stop: + if err := server.Shutdown(context.Background()); err != nil { + cm.errSignal.SignalError(err) + } + } +} + +func (cm *controllerManager) serveHealthProbes(stop <-chan struct{}) { + cm.mu.Lock() + mux := http.NewServeMux() + + if cm.readyzHandler != nil { + mux.Handle(cm.readinessEndpointName, http.StripPrefix(cm.readinessEndpointName, cm.readyzHandler)) + } + if cm.healthzHandler != nil { + mux.Handle(cm.livenessEndpointName, http.StripPrefix(cm.livenessEndpointName, cm.healthzHandler)) + } + + server := http.Server{ + Handler: mux, + } + // Run server + go func() { + if err := server.Serve(cm.healthProbeListener); err != nil && err != http.ErrServerClosed { + cm.errSignal.SignalError(err) } }() + cm.healthzStarted = true + cm.mu.Unlock() // Shutdown the server when stop is closed select { case <-stop: if err := server.Shutdown(context.Background()); err != nil { - cm.errChan <- err + cm.errSignal.SignalError(err) } } } @@ -266,6 +404,9 @@ func (cm *controllerManager) Start(stop <-chan struct{}) error { // join the passed-in stop channel as an upstream feeding into cm.internalStopper defer close(cm.internalStopper) + // initialize this here so that we reset the signal channel state on every start + cm.errSignal = &errSignaler{errSignal: make(chan struct{})} + // Metrics should be served whether the controller is leader or not. // (If we don't serve metrics for non-leaders, prometheus will still scrape // the pod but will get a connection refused) @@ -273,6 +414,11 @@ func (cm *controllerManager) Start(stop <-chan struct{}) error { go cm.serveMetrics(cm.internalStop) } + // Serve health probes + if cm.healthProbeListener != nil { + go cm.serveHealthProbes(cm.internalStop) + } + go cm.startNonLeaderElectionRunnables() if cm.resourceLock != nil { @@ -288,9 +434,9 @@ func (cm *controllerManager) Start(stop <-chan struct{}) error { case <-stop: // We are done return nil - case err := <-cm.errChan: + case <-cm.errSignal.GotError(): // Error starting a controller - return err + return cm.errSignal.Error() } } @@ -306,7 +452,12 @@ func (cm *controllerManager) startNonLeaderElectionRunnables() { // Write any Start errors to a channel so we can return them ctrl := c go func() { - cm.errChan <- ctrl.Start(cm.internalStop) + if err := ctrl.Start(cm.internalStop); err != nil { + cm.errSignal.SignalError(err) + } + // we use %T here because we don't have a good stand-in for "name", + // and the full runnable might not serialize (mutexes, etc) + log.V(1).Info("non-leader-election runnable finished", "runnable type", fmt.Sprintf("%T", ctrl)) }() } } @@ -323,7 +474,12 @@ func (cm *controllerManager) startLeaderElectionRunnables() { // Write any Start errors to a channel so we can return them ctrl := c go func() { - cm.errChan <- ctrl.Start(cm.internalStop) + if err := ctrl.Start(cm.internalStop); err != nil { + cm.errSignal.SignalError(err) + } + // we use %T here because we don't have a good stand-in for "name", + // and the full runnable might not serialize (mutexes, etc) + log.V(1).Info("leader-election runnable finished", "runnable type", fmt.Sprintf("%T", ctrl)) }() } @@ -341,7 +497,7 @@ func (cm *controllerManager) waitForCache() { } go func() { if err := cm.startCache(cm.internalStop); err != nil { - cm.errChan <- err + cm.errSignal.SignalError(err) } }() @@ -365,7 +521,7 @@ func (cm *controllerManager) startLeaderElection() (err error) { // Most implementations of leader election log.Fatal() here. // Since Start is wrapped in log.Fatal when called, we can just return // an error here which will cause the program to exit. - cm.errChan <- fmt.Errorf("leader election lost") + cm.errSignal.SignalError(fmt.Errorf("leader election lost")) }, }, }) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go index 2d045fa91deaf..d30c4f0d37187 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go @@ -32,6 +32,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "sigs.k8s.io/controller-runtime/pkg/healthz" internalrecorder "sigs.k8s.io/controller-runtime/pkg/internal/recorder" "sigs.k8s.io/controller-runtime/pkg/leaderelection" "sigs.k8s.io/controller-runtime/pkg/metrics" @@ -53,6 +54,12 @@ type Manager interface { // interface - e.g. inject.Client. SetFields(interface{}) error + // AddHealthzCheck allows you to add Healthz checker + AddHealthzCheck(name string, check healthz.Checker) error + + // AddReadyzCheck allows you to add Readyz checker + AddReadyzCheck(name string, check healthz.Checker) error + // Start starts all registered Controllers and blocks until the Stop channel is closed. // Returns an error if there is an error starting any controller. Start(<-chan struct{}) error @@ -142,6 +149,16 @@ type Options struct { // It can be set to "0" to disable the metrics serving. MetricsBindAddress string + // HealthProbeBindAddress is the TCP address that the controller should bind to + // for serving health probes + HealthProbeBindAddress string + + // Readiness probe endpoint name, defaults to "readyz" + ReadinessEndpointName string + + // Liveness probe endpoint name, defaults to "healthz" + LivenessEndpointName string + // Port is the port that the webhook server serves at. // It is used to set webhook.Server.Port. Port int @@ -169,9 +186,10 @@ type Options struct { EventBroadcaster record.EventBroadcaster // Dependency injection for testing - newRecorderProvider func(config *rest.Config, scheme *runtime.Scheme, logger logr.Logger, broadcaster record.EventBroadcaster) (recorder.Provider, error) - newResourceLock func(config *rest.Config, recorderProvider recorder.Provider, options leaderelection.Options) (resourcelock.Interface, error) - newMetricsListener func(addr string) (net.Listener, error) + newRecorderProvider func(config *rest.Config, scheme *runtime.Scheme, logger logr.Logger, broadcaster record.EventBroadcaster) (recorder.Provider, error) + newResourceLock func(config *rest.Config, recorderProvider recorder.Provider, options leaderelection.Options) (resourcelock.Interface, error) + newMetricsListener func(addr string) (net.Listener, error) + newHealthProbeListener func(addr string) (net.Listener, error) } // NewClientFunc allows a user to define how to create a client @@ -261,28 +279,37 @@ func New(config *rest.Config, options Options) (Manager, error) { return nil, err } + // Create health probes listener. This will throw an error if the bind + // address is invalid or already in use. + healthProbeListener, err := options.newHealthProbeListener(options.HealthProbeBindAddress) + if err != nil { + return nil, err + } + stop := make(chan struct{}) return &controllerManager{ - config: config, - scheme: options.Scheme, - errChan: make(chan error), - cache: cache, - fieldIndexes: cache, - client: writeObj, - apiReader: apiReader, - recorderProvider: recorderProvider, - resourceLock: resourceLock, - mapper: mapper, - metricsListener: metricsListener, - internalStop: stop, - internalStopper: stop, - port: options.Port, - host: options.Host, - certDir: options.CertDir, - leaseDuration: *options.LeaseDuration, - renewDeadline: *options.RenewDeadline, - retryPeriod: *options.RetryPeriod, + config: config, + scheme: options.Scheme, + cache: cache, + fieldIndexes: cache, + client: writeObj, + apiReader: apiReader, + recorderProvider: recorderProvider, + resourceLock: resourceLock, + mapper: mapper, + metricsListener: metricsListener, + internalStop: stop, + internalStopper: stop, + port: options.Port, + host: options.Host, + certDir: options.CertDir, + leaseDuration: *options.LeaseDuration, + renewDeadline: *options.RenewDeadline, + retryPeriod: *options.RetryPeriod, + healthProbeListener: healthProbeListener, + readinessEndpointName: options.ReadinessEndpointName, + livenessEndpointName: options.LivenessEndpointName, }, nil } @@ -304,6 +331,19 @@ func defaultNewClient(cache cache.Cache, config *rest.Config, options client.Opt }, nil } +// defaultHealthProbeListener creates the default health probes listener bound to the given address +func defaultHealthProbeListener(addr string) (net.Listener, error) { + if addr == "" || addr == "0" { + return nil, nil + } + + ln, err := net.Listen("tcp", addr) + if err != nil { + return nil, fmt.Errorf("error listening on %s: %v", addr, err) + } + return ln, nil +} + // setOptionsDefaults set default values for Options fields func setOptionsDefaults(options Options) Options { // Use the Kubernetes client-go scheme if none is specified @@ -312,7 +352,9 @@ func setOptionsDefaults(options Options) Options { } if options.MapperProvider == nil { - options.MapperProvider = apiutil.NewDiscoveryRESTMapper + options.MapperProvider = func(c *rest.Config) (meta.RESTMapper, error) { + return apiutil.NewDynamicRESTMapper(c) + } } // Allow newClient to be mocked @@ -355,5 +397,17 @@ func setOptionsDefaults(options Options) Options { options.EventBroadcaster = record.NewBroadcaster() } + if options.ReadinessEndpointName == "" { + options.ReadinessEndpointName = defaultReadinessEndpoint + } + + if options.LivenessEndpointName == "" { + options.LivenessEndpointName = defaultLivenessEndpoint + } + + if options.newHealthProbeListener == nil { + options.newHealthProbeListener = defaultHealthProbeListener + } + return options } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/workqueue.go b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/workqueue.go index 6381f0c14a459..4baf8170055d8 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/workqueue.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/workqueue.go @@ -122,52 +122,3 @@ func (workqueueMetricsProvider) NewRetriesMetric(queue string) workqueue.Counter registerWorkqueueMetric(m, name, queue) return m } - -// TODO(abursavich): Remove the following deprecated metrics when they are -// removed from k8s.io/client-go/util/workqueue. - -func (workqueueMetricsProvider) NewDeprecatedLongestRunningProcessorMicrosecondsMetric(queue string) workqueue.SettableGaugeMetric { - const name = "workqueue_longest_running_processor_microseconds" - m := prometheus.NewGauge(prometheus.GaugeOpts{ - Name: name, - Help: "(Deprecated) How many microseconds has the longest running " + - "processor for workqueue been running.", - ConstLabels: prometheus.Labels{"name": queue}, - }) - registerWorkqueueMetric(m, name, queue) - return m -} - -// NOTE: The following deprecated metrics are noops because they were never -// included in controller-runtime. - -func (workqueueMetricsProvider) NewDeprecatedDepthMetric(queue string) workqueue.GaugeMetric { - return noopMetric{} -} - -func (workqueueMetricsProvider) NewDeprecatedAddsMetric(queue string) workqueue.CounterMetric { - return noopMetric{} -} - -func (workqueueMetricsProvider) NewDeprecatedLatencyMetric(queue string) workqueue.SummaryMetric { - return noopMetric{} -} - -func (workqueueMetricsProvider) NewDeprecatedWorkDurationMetric(queue string) workqueue.SummaryMetric { - return noopMetric{} -} - -func (workqueueMetricsProvider) NewDeprecatedUnfinishedWorkSecondsMetric(queue string) workqueue.SettableGaugeMetric { - return noopMetric{} -} - -func (workqueueMetricsProvider) NewDeprecatedRetriesMetric(queue string) workqueue.CounterMetric { - return noopMetric{} -} - -type noopMetric struct{} - -func (noopMetric) Inc() {} -func (noopMetric) Dec() {} -func (noopMetric) Set(float64) {} -func (noopMetric) Observe(float64) {} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go index 5e4e2d699b7ec..4b02e2170c5ef 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go @@ -81,6 +81,7 @@ func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) { wh.writeResponse(w, reviewResponse) return } + wh.log.V(1).Info("received request", "UID", req.UID, "kind", req.Kind, "resource", req.Resource) // TODO: add panic-recovery for Handle reviewResponse = wh.Handle(r.Context(), req) @@ -96,5 +97,8 @@ func (wh *Webhook) writeResponse(w io.Writer, response Response) { if err != nil { wh.log.Error(err, "unable to encode the response") wh.writeResponse(w, Errored(http.StatusInternalServerError, err)) + } else { + res := responseAdmissionReview.Response + wh.log.V(1).Info("wrote response", "UID", res.UID, "allowed", res.Allowed, "result", res.Result) } } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go index c7ac97abac696..2dcddded41e90 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go @@ -106,6 +106,7 @@ func (s *Server) Register(path string, hook http.Handler) { // TODO(directxman12): call setfields if we've already started the server s.webhooks[path] = hook s.WebhookMux.Handle(path, instrumentedHook(path, hook)) + log.Info("registering webhook", "path", path) } // instrumentedHook adds some instrumentation on top of the given webhook. @@ -125,6 +126,8 @@ func (s *Server) Start(stop <-chan struct{}) error { s.defaultingOnce.Do(s.setDefaults) baseHookLog := log.WithName("webhooks") + baseHookLog.Info("starting webhook server") + // inject fields here as opposed to in Register so that we're certain to have our setFields // function available. for hookPath, webhook := range s.webhooks { @@ -164,6 +167,8 @@ func (s *Server) Start(stop <-chan struct{}) error { return err } + log.Info("serving webhook server", "host", s.Host, "port", s.Port) + srv := &http.Server{ Handler: s.WebhookMux, } @@ -171,6 +176,7 @@ func (s *Server) Start(stop <-chan struct{}) error { idleConnsClosed := make(chan struct{}) go func() { <-stop + log.Info("shutting down webhook server") // TODO: use a context with reasonable timeout if err := srv.Shutdown(context.Background()); err != nil {